aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--=announce-1.040
-rw-r--r--=announce-1.134
-rw-r--r--=announce-1.1.136
-rw-r--r--=announce-1.227
-rw-r--r--=announce-1.347
-rw-r--r--AUTHORS4
-rw-r--r--COPYING340
-rw-r--r--COPYING3674
-rw-r--r--ChangeLog8
-rw-r--r--DEVELOPMENT84
-rw-r--r--Makefile.am265
-rw-r--r--Makefile.in.dep.patch19
-rw-r--r--Makefrag.am611
-rw-r--r--Makerules.am54
-rw-r--r--Makerules.mig.am127
-rw-r--r--NEWS161
-rw-r--r--README56
-rw-r--r--chips/busses.c232
-rw-r--r--chips/busses.h154
-rw-r--r--config.status.dep.patch18
-rw-r--r--configfrag-first.ac29
-rw-r--r--configfrag.ac183
-rw-r--r--configure.ac270
-rw-r--r--ddb/db_access.c136
-rw-r--r--ddb/db_access.h79
-rw-r--r--ddb/db_break.c746
-rw-r--r--ddb/db_break.h111
-rw-r--r--ddb/db_command.c594
-rw-r--r--ddb/db_command.h80
-rw-r--r--ddb/db_cond.c185
-rw-r--r--ddb/db_cond.h39
-rw-r--r--ddb/db_elf.c232
-rw-r--r--ddb/db_elf.h52
-rw-r--r--ddb/db_examine.c664
-rw-r--r--ddb/db_examine.h92
-rw-r--r--ddb/db_expr.c382
-rw-r--r--ddb/db_expr.h26
-rw-r--r--ddb/db_ext_symtab.c123
-rw-r--r--ddb/db_input.c414
-rw-r--r--ddb/db_input.h33
-rw-r--r--ddb/db_lex.c454
-rw-r--r--ddb/db_lex.h99
-rw-r--r--ddb/db_macro.c197
-rw-r--r--ddb/db_macro.h53
-rw-r--r--ddb/db_mp.c339
-rw-r--r--ddb/db_mp.h35
-rw-r--r--ddb/db_output.c217
-rw-r--r--ddb/db_output.h46
-rw-r--r--ddb/db_print.c573
-rw-r--r--ddb/db_print.h68
-rw-r--r--ddb/db_run.c430
-rw-r--r--ddb/db_run.h94
-rw-r--r--ddb/db_sym.c532
-rw-r--r--ddb/db_sym.h264
-rw-r--r--ddb/db_task_thread.c326
-rw-r--r--ddb/db_task_thread.h73
-rw-r--r--ddb/db_trap.c115
-rw-r--r--ddb/db_trap.h34
-rw-r--r--ddb/db_variables.c224
-rw-r--r--ddb/db_variables.h88
-rw-r--r--ddb/db_watch.c329
-rw-r--r--ddb/db_watch.h80
-rw-r--r--ddb/db_write_cmd.c111
-rw-r--r--ddb/db_write_cmd.h34
-rw-r--r--ddb/nlist.h63
-rw-r--r--ddb/stab.h73
-rw-r--r--ddb/tr.h117
-rw-r--r--device/blkio.c66
-rw-r--r--device/blkio.h26
-rw-r--r--device/buf.h96
-rw-r--r--device/chario.c1060
-rw-r--r--device/chario.h37
-rw-r--r--device/cirbuf.c277
-rw-r--r--device/cirbuf.h61
-rw-r--r--device/conf.h127
-rw-r--r--device/cons.c183
-rw-r--r--device/cons.h68
-rw-r--r--device/dev_hdr.h160
-rw-r--r--device/dev_lookup.c364
-rw-r--r--device/dev_master.h65
-rw-r--r--device/dev_name.c242
-rw-r--r--device/dev_pager.c662
-rw-r--r--device/dev_pager.h26
-rw-r--r--device/device.srv27
-rw-r--r--device/device_emul.h64
-rw-r--r--device/device_init.c67
-rw-r--r--device/device_init.h24
-rw-r--r--device/device_pager.srv43
-rw-r--r--device/device_port.h41
-rw-r--r--device/device_reply.cli27
-rw-r--r--device/device_types_kernel.h43
-rw-r--r--device/ds_routines.c1901
-rw-r--r--device/ds_routines.h86
-rw-r--r--device/if_ether.h52
-rw-r--r--device/if_hdr.h165
-rw-r--r--device/intr.c372
-rw-r--r--device/intr.h62
-rw-r--r--device/io_req.h145
-rw-r--r--device/kmsg.c254
-rw-r--r--device/kmsg.h18
-rw-r--r--device/memory_object_reply.cli27
-rw-r--r--device/net_io.c2153
-rw-r--r--device/net_io.h164
-rw-r--r--device/param.h49
-rw-r--r--device/subrs.c86
-rw-r--r--device/subrs.h37
-rw-r--r--device/tty.h237
-rw-r--r--doc/.gitignore4
-rw-r--r--doc/Makefrag.am119
-rw-r--r--doc/fdl.texi452
-rw-r--r--doc/gpl.texi383
-rw-r--r--doc/mach.texi7417
-rw-r--r--gensym.awk78
-rwxr-xr-xgitlog-to-changelog432
-rw-r--r--i386/Makefrag.am215
-rw-r--r--i386/Makefrag_x86.am84
-rw-r--r--i386/README-Drivers122
-rw-r--r--i386/configfrag.ac124
-rw-r--r--i386/i386/.gitignore1
-rw-r--r--i386/i386/_setjmp.S63
-rw-r--r--i386/i386/apic.c453
-rw-r--r--i386/i386/apic.h337
-rw-r--r--i386/i386/ast.h47
-rw-r--r--i386/i386/ast_check.c56
-rw-r--r--i386/i386/ast_types.h36
-rw-r--r--i386/i386/copy_user.h100
-rw-r--r--i386/i386/cpu.h110
-rw-r--r--i386/i386/cpu_number.h119
-rw-r--r--i386/i386/cpuboot.S245
-rw-r--r--i386/i386/cswitch.S139
-rw-r--r--i386/i386/db_disasm.c1437
-rw-r--r--i386/i386/db_interface.c865
-rw-r--r--i386/i386/db_interface.h149
-rw-r--r--i386/i386/db_machdep.h105
-rw-r--r--i386/i386/db_trace.c586
-rw-r--r--i386/i386/db_trace.h33
-rw-r--r--i386/i386/debug.h73
-rw-r--r--i386/i386/debug_i386.c178
-rw-r--r--i386/i386/debug_trace.S56
-rw-r--r--i386/i386/eflags.h35
-rw-r--r--i386/i386/fpu.c948
-rw-r--r--i386/i386/fpu.h250
-rw-r--r--i386/i386/gdt.c166
-rw-r--r--i386/i386/gdt.h121
-rw-r--r--i386/i386/hardclock.c81
-rw-r--r--i386/i386/hardclock.h28
-rw-r--r--i386/i386/i386asm.sym194
-rw-r--r--i386/i386/idt-gen.h47
-rw-r--r--i386/i386/idt.c87
-rw-r--r--i386/i386/idt_inittab.S140
-rw-r--r--i386/i386/io_perm.c329
-rw-r--r--i386/i386/io_perm.h63
-rw-r--r--i386/i386/ipl.h83
-rw-r--r--i386/i386/irq.c73
-rw-r--r--i386/i386/irq.h31
-rw-r--r--i386/i386/ktss.c92
-rw-r--r--i386/i386/ktss.h33
-rw-r--r--i386/i386/kttd_interface.c574
-rw-r--r--i386/i386/kttd_machdep.h59
-rw-r--r--i386/i386/ldt.c117
-rw-r--r--i386/i386/ldt.h77
-rw-r--r--i386/i386/lock.h132
-rw-r--r--i386/i386/locore.S1603
-rw-r--r--i386/i386/locore.h98
-rw-r--r--i386/i386/loose_ends.c49
-rw-r--r--i386/i386/loose_ends.h33
-rw-r--r--i386/i386/mach_i386.srv27
-rw-r--r--i386/i386/mach_param.h31
-rw-r--r--i386/i386/machine_routines.h38
-rw-r--r--i386/i386/machine_task.c80
-rw-r--r--i386/i386/machspl.h29
-rw-r--r--i386/i386/model_dep.h68
-rw-r--r--i386/i386/mp_desc.c357
-rw-r--r--i386/i386/mp_desc.h98
-rw-r--r--i386/i386/msr.h56
-rw-r--r--i386/i386/pcb.c958
-rw-r--r--i386/i386/pcb.h90
-rw-r--r--i386/i386/percpu.c33
-rw-r--r--i386/i386/percpu.h98
-rw-r--r--i386/i386/phys.c187
-rw-r--r--i386/i386/pic.c262
-rw-r--r--i386/i386/pic.h191
-rw-r--r--i386/i386/pio.h61
-rw-r--r--i386/i386/pit.c140
-rw-r--r--i386/i386/pit.h98
-rw-r--r--i386/i386/pmap.h27
-rw-r--r--i386/i386/proc_reg.h407
-rw-r--r--i386/i386/sched_param.h40
-rw-r--r--i386/i386/seg.h264
-rw-r--r--i386/i386/setjmp.h44
-rw-r--r--i386/i386/smp.c199
-rw-r--r--i386/i386/smp.h34
-rw-r--r--i386/i386/spl.S264
-rw-r--r--i386/i386/spl.h78
-rw-r--r--i386/i386/strings.c96
-rw-r--r--i386/i386/task.h61
-rw-r--r--i386/i386/thread.h276
-rw-r--r--i386/i386/time_stamp.h30
-rw-r--r--i386/i386/trap.c675
-rw-r--r--i386/i386/trap.h71
-rw-r--r--i386/i386/tss.h109
-rw-r--r--i386/i386/user_ldt.c451
-rw-r--r--i386/i386/user_ldt.h50
-rw-r--r--i386/i386/vm_param.h200
-rw-r--r--i386/i386/xen.h412
-rw-r--r--i386/i386/xpr.h32
-rw-r--r--i386/i386at/acpi_parse_apic.c650
-rw-r--r--i386/i386at/acpi_parse_apic.h201
-rw-r--r--i386/i386at/autoconf.c149
-rw-r--r--i386/i386at/autoconf.h43
-rw-r--r--i386/i386at/biosmem.c1070
-rw-r--r--i386/i386at/biosmem.h109
-rw-r--r--i386/i386at/boothdr.S179
-rw-r--r--i386/i386at/com.c900
-rw-r--r--i386/i386at/com.h86
-rw-r--r--i386/i386at/comreg.h139
-rw-r--r--i386/i386at/conf.c172
-rw-r--r--i386/i386at/cons_conf.c63
-rw-r--r--i386/i386at/cram.h86
-rw-r--r--i386/i386at/disk.h89
-rw-r--r--i386/i386at/elf.h61
-rw-r--r--i386/i386at/i8250.h134
-rw-r--r--i386/i386at/idt.h53
-rw-r--r--i386/i386at/immc.c134
-rw-r--r--i386/i386at/immc.h31
-rw-r--r--i386/i386at/int_init.c82
-rw-r--r--i386/i386at/int_init.h35
-rw-r--r--i386/i386at/interrupt.S142
-rw-r--r--i386/i386at/ioapic.c463
-rw-r--r--i386/i386at/kd.c3059
-rw-r--r--i386/i386at/kd.h744
-rw-r--r--i386/i386at/kd_event.c392
-rw-r--r--i386/i386at/kd_event.h62
-rw-r--r--i386/i386at/kd_mouse.c800
-rw-r--r--i386/i386at/kd_mouse.h72
-rw-r--r--i386/i386at/kd_queue.c109
-rw-r--r--i386/i386at/kd_queue.h86
-rw-r--r--i386/i386at/kdasm.S145
-rw-r--r--i386/i386at/kdsoft.h209
-rw-r--r--i386/i386at/lpr.c285
-rw-r--r--i386/i386at/lpr.h66
-rw-r--r--i386/i386at/mem.c42
-rw-r--r--i386/i386at/mem.h24
-rw-r--r--i386/i386at/model_dep.c674
-rw-r--r--i386/i386at/model_dep.h39
-rw-r--r--i386/i386at/pic_isa.c56
-rw-r--r--i386/i386at/rtc.c242
-rw-r--r--i386/i386at/rtc.h143
-rw-r--r--i386/include/mach/i386/asm.h146
-rw-r--r--i386/include/mach/i386/boolean.h37
-rw-r--r--i386/include/mach/i386/eflags.h53
-rw-r--r--i386/include/mach/i386/exception.h85
-rw-r--r--i386/include/mach/i386/exec/elf.h53
-rw-r--r--i386/include/mach/i386/fp_reg.h140
-rw-r--r--i386/include/mach/i386/ioccom.h32
-rw-r--r--i386/include/mach/i386/kern_return.h40
-rw-r--r--i386/include/mach/i386/mach_i386.defs113
-rw-r--r--i386/include/mach/i386/mach_i386_types.h57
-rwxr-xr-xi386/include/mach/i386/machine_types.defs107
-rw-r--r--i386/include/mach/i386/multiboot.h208
-rw-r--r--i386/include/mach/i386/syscall_sw.h39
-rw-r--r--i386/include/mach/i386/thread_status.h190
-rw-r--r--i386/include/mach/i386/trap.h60
-rw-r--r--i386/include/mach/i386/vm_param.h90
-rw-r--r--i386/include/mach/i386/vm_types.h173
-rw-r--r--i386/include/mach/sa/stdarg.h58
-rw-r--r--i386/intel/pmap.c3325
-rw-r--r--i386/intel/pmap.h574
-rw-r--r--i386/intel/read_fault.c178
-rw-r--r--i386/intel/read_fault.h35
-rw-r--r--i386/ldscript201
-rw-r--r--i386/linux/Makefrag.am25
-rw-r--r--i386/linux/dev/include/linux/autoconf.h284
-rw-r--r--i386/xen/Makefrag.am34
-rw-r--r--i386/xen/xen.c69
-rw-r--r--i386/xen/xen_boothdr.S208
-rw-r--r--i386/xen/xen_locore.S110
-rw-r--r--include/alloca.h25
-rw-r--r--include/cache.h25
-rw-r--r--include/device/audio_status.h164
-rw-r--r--include/device/bpf.h244
-rw-r--r--include/device/device.defs183
-rw-r--r--include/device/device_reply.defs110
-rw-r--r--include/device/device_request.defs95
-rw-r--r--include/device/device_types.defs92
-rw-r--r--include/device/device_types.h148
-rw-r--r--include/device/disk_status.h318
-rw-r--r--include/device/input.h106
-rw-r--r--include/device/net_status.h201
-rw-r--r--include/device/notify.defs36
-rw-r--r--include/device/notify.h34
-rw-r--r--include/device/tape_status.h140
-rw-r--r--include/device/tty_status.h134
-rw-r--r--include/inttypes.h64
-rw-r--r--include/mach/alert.h37
-rw-r--r--include/mach/boolean.h63
-rw-r--r--include/mach/boot.h93
-rw-r--r--include/mach/default_pager.defs65
-rw-r--r--include/mach/default_pager_types.defs53
-rw-r--r--include/mach/default_pager_types.h59
-rw-r--r--include/mach/error.h93
-rw-r--r--include/mach/exc.defs47
-rw-r--r--include/mach/exception.h58
-rw-r--r--include/mach/exec/a.out.h68
-rw-r--r--include/mach/exec/elf.h364
-rw-r--r--include/mach/exec/exec.h130
-rw-r--r--include/mach/experimental.defs15
-rw-r--r--include/mach/gnumach.defs217
-rw-r--r--include/mach/host_info.h90
-rw-r--r--include/mach/inline.h27
-rw-r--r--include/mach/kern_return.h166
-rw-r--r--include/mach/mach.defs724
-rw-r--r--include/mach/mach4.defs131
-rw-r--r--include/mach/mach_host.defs388
-rw-r--r--include/mach/mach_param.h39
-rw-r--r--include/mach/mach_port.defs360
-rw-r--r--include/mach/mach_traps.h43
-rw-r--r--include/mach/mach_types.defs299
-rw-r--r--include/mach/mach_types.h90
-rw-r--r--include/mach/machine.h268
-rw-r--r--include/mach/macro_help.h18
-rw-r--r--include/mach/memory_object.defs333
-rw-r--r--include/mach/memory_object.h90
-rw-r--r--include/mach/memory_object_default.defs118
-rw-r--r--include/mach/message.h540
-rw-r--r--include/mach/mig_errors.h89
-rw-r--r--include/mach/mig_support.h57
-rw-r--r--include/mach/notify.defs112
-rw-r--r--include/mach/notify.h92
-rw-r--r--include/mach/pc_sample.h66
-rw-r--r--include/mach/policy.h45
-rw-r--r--include/mach/port.h159
-rw-r--r--include/mach/processor_info.h104
-rw-r--r--include/mach/profil.h212
-rw-r--r--include/mach/profilparam.h62
-rw-r--r--include/mach/std_types.defs101
-rw-r--r--include/mach/std_types.h44
-rw-r--r--include/mach/syscall_sw.h121
-rw-r--r--include/mach/task_info.h126
-rw-r--r--include/mach/task_notify.defs59
-rw-r--r--include/mach/task_special_ports.h66
-rw-r--r--include/mach/thread_info.h124
-rw-r--r--include/mach/thread_special_ports.h59
-rw-r--r--include/mach/thread_status.h55
-rw-r--r--include/mach/thread_switch.h40
-rw-r--r--include/mach/time_value.h201
-rw-r--r--include/mach/version.h73
-rw-r--r--include/mach/vm_attributes.h63
-rw-r--r--include/mach/vm_cache_statistics.h41
-rw-r--r--include/mach/vm_inherit.h55
-rw-r--r--include/mach/vm_param.h102
-rw-r--r--include/mach/vm_prot.h79
-rw-r--r--include/mach/vm_statistics.h75
-rw-r--r--include/mach/vm_sync.h45
-rw-r--r--include/mach/vm_wire.h30
-rw-r--r--include/mach/xen.h95
-rw-r--r--include/mach_debug/hash_info.h41
-rw-r--r--include/mach_debug/mach_debug.defs228
-rw-r--r--include/mach_debug/mach_debug_types.defs121
-rw-r--r--include/mach_debug/mach_debug_types.h52
-rw-r--r--include/mach_debug/slab_info.h56
-rw-r--r--include/mach_debug/vm_info.h143
-rw-r--r--include/string.h55
-rw-r--r--include/sys/reboot.h135
-rw-r--r--include/sys/types.h88
-rw-r--r--ipc/.gitignore2
-rw-r--r--ipc/ipc_entry.c214
-rw-r--r--ipc/ipc_entry.h110
-rw-r--r--ipc/ipc_init.c117
-rw-r--r--ipc/ipc_init.h50
-rw-r--r--ipc/ipc_kmsg.c2904
-rw-r--r--ipc/ipc_kmsg.h345
-rw-r--r--ipc/ipc_kmsg_queue.h31
-rwxr-xr-xipc/ipc_machdep.h39
-rw-r--r--ipc/ipc_marequest.c437
-rw-r--r--ipc/ipc_marequest.h99
-rw-r--r--ipc/ipc_mqueue.c695
-rw-r--r--ipc/ipc_mqueue.h112
-rw-r--r--ipc/ipc_notify.c449
-rw-r--r--ipc/ipc_notify.h58
-rw-r--r--ipc/ipc_object.c969
-rw-r--r--ipc/ipc_object.h169
-rw-r--r--ipc/ipc_port.c1290
-rw-r--r--ipc/ipc_port.h354
-rw-r--r--ipc/ipc_print.h39
-rw-r--r--ipc/ipc_pset.c350
-rw-r--r--ipc/ipc_pset.h92
-rw-r--r--ipc/ipc_right.c2115
-rw-r--r--ipc/ipc_right.h112
-rw-r--r--ipc/ipc_space.c215
-rw-r--r--ipc/ipc_space.h324
-rw-r--r--ipc/ipc_table.c135
-rw-r--r--ipc/ipc_table.h101
-rw-r--r--ipc/ipc_target.c78
-rw-r--r--ipc/ipc_target.h67
-rw-r--r--ipc/ipc_thread.c107
-rw-r--r--ipc/ipc_thread.h129
-rw-r--r--ipc/ipc_types.h31
-rw-r--r--ipc/mach_debug.c288
-rw-r--r--ipc/mach_msg.c1709
-rw-r--r--ipc/mach_msg.h60
-rw-r--r--ipc/mach_port.c1578
-rw-r--r--ipc/mach_port.h37
-rw-r--r--ipc/mach_port.srv27
-rw-r--r--ipc/notify.defs22
-rw-r--r--ipc/port.h106
-rw-r--r--kern/.gitignore2
-rw-r--r--kern/act.c1118
-rw-r--r--kern/act.h192
-rw-r--r--kern/assert.h54
-rw-r--r--kern/ast.c235
-rw-r--r--kern/ast.h139
-rw-r--r--kern/atomic.h54
-rw-r--r--kern/boot_script.c791
-rw-r--r--kern/boot_script.h111
-rw-r--r--kern/bootstrap.c918
-rw-r--r--kern/bootstrap.h26
-rw-r--r--kern/counters.c82
-rw-r--r--kern/counters.h107
-rw-r--r--kern/cpu_number.h47
-rw-r--r--kern/debug.c207
-rw-r--r--kern/debug.h72
-rw-r--r--kern/elf-load.c97
-rw-r--r--kern/eventcount.c361
-rw-r--r--kern/eventcount.h66
-rw-r--r--kern/exc.defs22
-rw-r--r--kern/exception.c1023
-rw-r--r--kern/exception.h66
-rw-r--r--kern/experimental.srv3
-rw-r--r--kern/gnumach.srv23
-rw-r--r--kern/gsync.c517
-rw-r--r--kern/gsync.h41
-rw-r--r--kern/host.c389
-rw-r--r--kern/host.h48
-rw-r--r--kern/ipc_host.c451
-rw-r--r--kern/ipc_host.h72
-rw-r--r--kern/ipc_kobject.c365
-rw-r--r--kern/ipc_kobject.h123
-rw-r--r--kern/ipc_mig.c984
-rw-r--r--kern/ipc_mig.h143
-rw-r--r--kern/ipc_sched.c283
-rw-r--r--kern/ipc_sched.h32
-rw-r--r--kern/ipc_tt.c1113
-rw-r--r--kern/ipc_tt.h92
-rw-r--r--kern/kalloc.h38
-rw-r--r--kern/kern_types.h70
-rw-r--r--kern/kmutex.c76
-rw-r--r--kern/kmutex.h52
-rw-r--r--kern/list.h357
-rw-r--r--kern/lock.c689
-rw-r--r--kern/lock.h316
-rw-r--r--kern/lock_mon.c364
-rw-r--r--kern/log2.h50
-rw-r--r--kern/mach.srv40
-rw-r--r--kern/mach4.srv32
-rw-r--r--kern/mach_clock.c657
-rw-r--r--kern/mach_clock.h112
-rw-r--r--kern/mach_debug.srv26
-rw-r--r--kern/mach_factor.c150
-rw-r--r--kern/mach_factor.h31
-rw-r--r--kern/mach_host.srv37
-rw-r--r--kern/machine.c672
-rw-r--r--kern/machine.h59
-rw-r--r--kern/macros.h93
-rw-r--r--kern/pc_sample.c306
-rw-r--r--kern/pc_sample.h94
-rw-r--r--kern/printf.c656
-rw-r--r--kern/printf.h68
-rw-r--r--kern/priority.c223
-rw-r--r--kern/priority.h28
-rw-r--r--kern/processor.c1034
-rw-r--r--kern/processor.h326
-rw-r--r--kern/profile.c408
-rw-r--r--kern/queue.c121
-rw-r--r--kern/queue.h391
-rw-r--r--kern/rbtree.c483
-rw-r--r--kern/rbtree.h306
-rw-r--r--kern/rbtree_i.h186
-rw-r--r--kern/rdxtree.c830
-rw-r--r--kern/rdxtree.h209
-rw-r--r--kern/rdxtree_i.h74
-rw-r--r--kern/refcount.h68
-rw-r--r--kern/sched.h186
-rw-r--r--kern/sched_prim.c2059
-rw-r--r--kern/sched_prim.h189
-rw-r--r--kern/shuttle.h71
-rw-r--r--kern/slab.c1686
-rw-r--r--kern/slab.h243
-rw-r--r--kern/smp.c49
-rw-r--r--kern/smp.h24
-rw-r--r--kern/startup.c316
-rw-r--r--kern/startup.h28
-rw-r--r--kern/strings.c275
-rw-r--r--kern/syscall_emulation.c453
-rw-r--r--kern/syscall_emulation.h67
-rw-r--r--kern/syscall_subr.c386
-rw-r--r--kern/syscall_subr.h42
-rw-r--r--kern/syscall_sw.c224
-rw-r--r--kern/syscall_sw.h57
-rw-r--r--kern/task.c1351
-rw-r--r--kern/task.h197
-rw-r--r--kern/task_notify.cli7
-rw-r--r--kern/thread.c2646
-rw-r--r--kern/thread.h437
-rw-r--r--kern/thread_swap.c200
-rw-r--r--kern/thread_swap.h43
-rw-r--r--kern/timer.c501
-rw-r--r--kern/timer.h195
-rw-r--r--kern/xpr.c197
-rw-r--r--kern/xpr.h97
-rw-r--r--linux/Makefrag.am788
-rw-r--r--linux/configfrag.ac664
-rw-r--r--linux/dev/README8
-rw-r--r--linux/dev/arch/i386/kernel/irq.c775
-rw-r--r--linux/dev/arch/i386/kernel/setup.c13
-rw-r--r--linux/dev/drivers/block/ahci.c1038
-rw-r--r--linux/dev/drivers/block/floppy.c4288
-rw-r--r--linux/dev/drivers/block/genhd.c1080
-rw-r--r--linux/dev/drivers/net/Space.c582
-rw-r--r--linux/dev/drivers/net/auto_irq.c123
-rw-r--r--linux/dev/drivers/net/net_init.c446
-rw-r--r--linux/dev/drivers/net/wavelan.p.h639
-rw-r--r--linux/dev/drivers/scsi/eata_dma.c1607
-rw-r--r--linux/dev/drivers/scsi/g_NCR5380.c735
-rw-r--r--linux/dev/glue/block.c1770
-rw-r--r--linux/dev/glue/glue.h42
-rw-r--r--linux/dev/glue/kmem.c589
-rw-r--r--linux/dev/glue/misc.c248
-rw-r--r--linux/dev/glue/net.c670
-rw-r--r--linux/dev/include/ahci.h268
-rw-r--r--linux/dev/include/asm-i386/page.h59
-rw-r--r--linux/dev/include/asm-i386/smp.h8
-rw-r--r--linux/dev/include/asm-i386/string.h487
-rw-r--r--linux/dev/include/asm-i386/system.h356
-rw-r--r--linux/dev/include/asm-i386/uaccess.h1
-rw-r--r--linux/dev/include/linux/blk.h471
-rw-r--r--linux/dev/include/linux/blkdev.h73
-rw-r--r--linux/dev/include/linux/compile.h6
-rw-r--r--linux/dev/include/linux/etherdevice.h62
-rw-r--r--linux/dev/include/linux/fs.h803
-rw-r--r--linux/dev/include/linux/genhd.h208
-rw-r--r--linux/dev/include/linux/if.h184
-rw-r--r--linux/dev/include/linux/kernel.h107
-rw-r--r--linux/dev/include/linux/locks.h66
-rw-r--r--linux/dev/include/linux/malloc.h18
-rw-r--r--linux/dev/include/linux/mm.h378
-rw-r--r--linux/dev/include/linux/modversions.h1
-rw-r--r--linux/dev/include/linux/netdevice.h339
-rw-r--r--linux/dev/include/linux/notifier.h96
-rw-r--r--linux/dev/include/linux/pagemap.h150
-rw-r--r--linux/dev/include/linux/pm.h1
-rw-r--r--linux/dev/include/linux/proc_fs.h292
-rw-r--r--linux/dev/include/linux/sched.h521
-rw-r--r--linux/dev/include/linux/skbuff.h466
-rw-r--r--linux/dev/include/linux/threads.h1
-rw-r--r--linux/dev/include/linux/types.h117
-rw-r--r--linux/dev/init/main.c261
-rw-r--r--linux/dev/init/version.c32
-rw-r--r--linux/dev/kernel/dma.c109
-rw-r--r--linux/dev/kernel/printk.c83
-rw-r--r--linux/dev/kernel/resource.c145
-rw-r--r--linux/dev/kernel/sched.c630
-rw-r--r--linux/dev/kernel/softirq.c48
-rw-r--r--linux/dev/lib/vsprintf.c354
-rw-r--r--linux/dev/net/core/dev.c1648
-rw-r--r--linux/pcmcia-cs/clients/3c574_cs.c1349
-rw-r--r--linux/pcmcia-cs/clients/3c589_cs.c1107
-rw-r--r--linux/pcmcia-cs/clients/ax8390.h165
-rw-r--r--linux/pcmcia-cs/clients/axnet_cs.c1936
-rw-r--r--linux/pcmcia-cs/clients/fmvj18x_cs.c1322
-rw-r--r--linux/pcmcia-cs/clients/nmclan_cs.c1744
-rw-r--r--linux/pcmcia-cs/clients/ositech.h358
-rw-r--r--linux/pcmcia-cs/clients/pcnet_cs.c1702
-rw-r--r--linux/pcmcia-cs/clients/smc91c92_cs.c2135
-rw-r--r--linux/pcmcia-cs/clients/xirc2ps_cs.c2091
-rw-r--r--linux/pcmcia-cs/glue/ds.c454
-rw-r--r--linux/pcmcia-cs/glue/pcmcia.c121
-rw-r--r--linux/pcmcia-cs/glue/pcmcia_glue.h264
-rw-r--r--linux/pcmcia-cs/glue/wireless_glue.h158
-rw-r--r--linux/pcmcia-cs/include/linux/crc32.h49
-rw-r--r--linux/pcmcia-cs/include/linux/slab.h12
-rw-r--r--linux/pcmcia-cs/include/pcmcia/bulkmem.h195
-rw-r--r--linux/pcmcia-cs/include/pcmcia/bus_ops.h157
-rw-r--r--linux/pcmcia-cs/include/pcmcia/ciscode.h138
-rw-r--r--linux/pcmcia-cs/include/pcmcia/cisreg.h135
-rw-r--r--linux/pcmcia-cs/include/pcmcia/cistpl.h604
-rw-r--r--linux/pcmcia-cs/include/pcmcia/cs.h441
-rw-r--r--linux/pcmcia-cs/include/pcmcia/cs_types.h70
-rw-r--r--linux/pcmcia-cs/include/pcmcia/driver_ops.h73
-rw-r--r--linux/pcmcia-cs/include/pcmcia/ds.h148
-rw-r--r--linux/pcmcia-cs/include/pcmcia/mem_op.h133
-rw-r--r--linux/pcmcia-cs/include/pcmcia/ss.h133
-rw-r--r--linux/pcmcia-cs/include/pcmcia/version.h9
-rw-r--r--linux/pcmcia-cs/modules/bulkmem.c626
-rw-r--r--linux/pcmcia-cs/modules/cirrus.h188
-rw-r--r--linux/pcmcia-cs/modules/cistpl.c1502
-rw-r--r--linux/pcmcia-cs/modules/cs.c2399
-rw-r--r--linux/pcmcia-cs/modules/cs_internal.h300
-rw-r--r--linux/pcmcia-cs/modules/ds.c1039
-rw-r--r--linux/pcmcia-cs/modules/ene.h59
-rw-r--r--linux/pcmcia-cs/modules/i82365.c2588
-rw-r--r--linux/pcmcia-cs/modules/i82365.h154
-rw-r--r--linux/pcmcia-cs/modules/o2micro.h160
-rw-r--r--linux/pcmcia-cs/modules/pci_fixup.c677
-rw-r--r--linux/pcmcia-cs/modules/ricoh.h161
-rw-r--r--linux/pcmcia-cs/modules/rsrc_mgr.c877
-rw-r--r--linux/pcmcia-cs/modules/smc34c90.h58
-rw-r--r--linux/pcmcia-cs/modules/ti113x.h264
-rw-r--r--linux/pcmcia-cs/modules/topic.h123
-rw-r--r--linux/pcmcia-cs/modules/vg468.h112
-rw-r--r--linux/pcmcia-cs/modules/yenta.h156
-rw-r--r--linux/pcmcia-cs/wireless/hermes.c552
-rw-r--r--linux/pcmcia-cs/wireless/hermes.h456
-rw-r--r--linux/pcmcia-cs/wireless/hermes_rid.h153
-rw-r--r--linux/pcmcia-cs/wireless/ieee802_11.h79
-rw-r--r--linux/pcmcia-cs/wireless/orinoco.c4230
-rw-r--r--linux/pcmcia-cs/wireless/orinoco.h166
-rw-r--r--linux/pcmcia-cs/wireless/orinoco_cs.c705
-rw-r--r--linux/src/COPYING351
-rw-r--r--linux/src/arch/i386/kernel/bios32.c916
-rw-r--r--linux/src/arch/i386/kernel/irq.c582
-rw-r--r--linux/src/arch/i386/lib/delay.c45
-rw-r--r--linux/src/arch/i386/lib/semaphore.S35
-rw-r--r--linux/src/drivers/block/cmd640.c850
-rw-r--r--linux/src/drivers/block/floppy.c4284
-rw-r--r--linux/src/drivers/block/genhd.c761
-rw-r--r--linux/src/drivers/block/ide-cd.c2802
-rw-r--r--linux/src/drivers/block/ide.c3926
-rw-r--r--linux/src/drivers/block/ide.h750
-rw-r--r--linux/src/drivers/block/ide_modes.h226
-rw-r--r--linux/src/drivers/block/rz1000.c59
-rw-r--r--linux/src/drivers/block/triton.c996
-rw-r--r--linux/src/drivers/net/3c501.c856
-rw-r--r--linux/src/drivers/net/3c503.c690
-rw-r--r--linux/src/drivers/net/3c503.h91
-rw-r--r--linux/src/drivers/net/3c505.c1732
-rw-r--r--linux/src/drivers/net/3c505.h245
-rw-r--r--linux/src/drivers/net/3c507.c924
-rw-r--r--linux/src/drivers/net/3c509.c842
-rw-r--r--linux/src/drivers/net/3c515.c1501
-rw-r--r--linux/src/drivers/net/3c59x.c2648
-rw-r--r--linux/src/drivers/net/8390.c829
-rw-r--r--linux/src/drivers/net/8390.h175
-rw-r--r--linux/src/drivers/net/Space.c541
-rw-r--r--linux/src/drivers/net/ac3200.c385
-rw-r--r--linux/src/drivers/net/apricot.c1046
-rw-r--r--linux/src/drivers/net/at1700.c756
-rw-r--r--linux/src/drivers/net/atp.c977
-rw-r--r--linux/src/drivers/net/atp.h274
-rw-r--r--linux/src/drivers/net/auto_irq.c123
-rw-r--r--linux/src/drivers/net/cb_shim.c296
-rw-r--r--linux/src/drivers/net/de4x5.c5942
-rw-r--r--linux/src/drivers/net/de4x5.h1028
-rw-r--r--linux/src/drivers/net/de600.c853
-rw-r--r--linux/src/drivers/net/de620.c1045
-rw-r--r--linux/src/drivers/net/de620.h117
-rw-r--r--linux/src/drivers/net/depca.c1890
-rw-r--r--linux/src/drivers/net/depca.h185
-rw-r--r--linux/src/drivers/net/e2100.c456
-rw-r--r--linux/src/drivers/net/eepro.c1407
-rw-r--r--linux/src/drivers/net/eepro100.c2155
-rw-r--r--linux/src/drivers/net/eexpress.c1285
-rw-r--r--linux/src/drivers/net/epic100.c1560
-rw-r--r--linux/src/drivers/net/eth16i.c1604
-rw-r--r--linux/src/drivers/net/eth82586.h172
-rw-r--r--linux/src/drivers/net/ewrk3.c1920
-rw-r--r--linux/src/drivers/net/ewrk3.h322
-rw-r--r--linux/src/drivers/net/fmv18x.c664
-rw-r--r--linux/src/drivers/net/hamachi.c1315
-rw-r--r--linux/src/drivers/net/hp-plus.c483
-rw-r--r--linux/src/drivers/net/hp.c451
-rw-r--r--linux/src/drivers/net/hp100.c3121
-rw-r--r--linux/src/drivers/net/hp100.h626
-rw-r--r--linux/src/drivers/net/i82586.h413
-rw-r--r--linux/src/drivers/net/intel-gige.c1450
-rw-r--r--linux/src/drivers/net/iow.h6
-rw-r--r--linux/src/drivers/net/kern_compat.h285
-rw-r--r--linux/src/drivers/net/lance.c1293
-rw-r--r--linux/src/drivers/net/myson803.c1650
-rw-r--r--linux/src/drivers/net/natsemi.c1448
-rw-r--r--linux/src/drivers/net/ne.c812
-rw-r--r--linux/src/drivers/net/ne2k-pci.c647
-rw-r--r--linux/src/drivers/net/net_init.c439
-rw-r--r--linux/src/drivers/net/ni52.c1387
-rw-r--r--linux/src/drivers/net/ni52.h310
-rw-r--r--linux/src/drivers/net/ni65.c1228
-rw-r--r--linux/src/drivers/net/ni65.h130
-rw-r--r--linux/src/drivers/net/ns820.c1547
-rw-r--r--linux/src/drivers/net/pci-scan.c659
-rw-r--r--linux/src/drivers/net/pci-scan.h90
-rw-r--r--linux/src/drivers/net/pcnet32.c970
-rw-r--r--linux/src/drivers/net/rtl8139.c1737
-rw-r--r--linux/src/drivers/net/seeq8005.c760
-rw-r--r--linux/src/drivers/net/seeq8005.h156
-rw-r--r--linux/src/drivers/net/sis900.c1803
-rw-r--r--linux/src/drivers/net/sis900.h284
-rw-r--r--linux/src/drivers/net/sk_g16.c2110
-rw-r--r--linux/src/drivers/net/sk_g16.h164
-rw-r--r--linux/src/drivers/net/smc-ultra.c496
-rw-r--r--linux/src/drivers/net/smc-ultra32.c413
-rw-r--r--linux/src/drivers/net/smc9194.c1779
-rw-r--r--linux/src/drivers/net/smc9194.h240
-rw-r--r--linux/src/drivers/net/starfire.c1535
-rw-r--r--linux/src/drivers/net/sundance.c1556
-rw-r--r--linux/src/drivers/net/tlan.c2863
-rw-r--r--linux/src/drivers/net/tlan.h525
-rw-r--r--linux/src/drivers/net/tulip.c3685
-rw-r--r--linux/src/drivers/net/via-rhine.c1427
-rw-r--r--linux/src/drivers/net/wavelan.c4373
-rw-r--r--linux/src/drivers/net/wavelan.h346
-rw-r--r--linux/src/drivers/net/wavelan.p.h635
-rw-r--r--linux/src/drivers/net/wd.c513
-rw-r--r--linux/src/drivers/net/winbond-840.c1558
-rw-r--r--linux/src/drivers/net/yellowfin.c1482
-rw-r--r--linux/src/drivers/net/znet.c746
-rw-r--r--linux/src/drivers/pci/pci.c1322
-rw-r--r--linux/src/drivers/scsi/53c7,8xx.h1584
-rw-r--r--linux/src/drivers/scsi/53c78xx.c6401
-rw-r--r--linux/src/drivers/scsi/53c8xx_d.h2677
-rw-r--r--linux/src/drivers/scsi/53c8xx_u.h97
-rw-r--r--linux/src/drivers/scsi/AM53C974.c2270
-rw-r--r--linux/src/drivers/scsi/AM53C974.h409
-rw-r--r--linux/src/drivers/scsi/BusLogic.c5003
-rw-r--r--linux/src/drivers/scsi/BusLogic.h1775
-rw-r--r--linux/src/drivers/scsi/FlashPoint.c12156
-rw-r--r--linux/src/drivers/scsi/NCR5380.c3246
-rw-r--r--linux/src/drivers/scsi/NCR5380.h369
-rw-r--r--linux/src/drivers/scsi/NCR53c406a.c1079
-rw-r--r--linux/src/drivers/scsi/NCR53c406a.h83
-rw-r--r--linux/src/drivers/scsi/advansys.c15554
-rw-r--r--linux/src/drivers/scsi/advansys.h174
-rw-r--r--linux/src/drivers/scsi/aha152x.c3280
-rw-r--r--linux/src/drivers/scsi/aha152x.h357
-rw-r--r--linux/src/drivers/scsi/aha1542.c1325
-rw-r--r--linux/src/drivers/scsi/aha1542.h170
-rw-r--r--linux/src/drivers/scsi/aha1740.c614
-rw-r--r--linux/src/drivers/scsi/aha1740.h196
-rw-r--r--linux/src/drivers/scsi/aic7xxx.c11404
-rw-r--r--linux/src/drivers/scsi/aic7xxx.h114
-rw-r--r--linux/src/drivers/scsi/aic7xxx/scsi_message.h41
-rw-r--r--linux/src/drivers/scsi/aic7xxx/sequencer.h135
-rw-r--r--linux/src/drivers/scsi/aic7xxx_proc.c384
-rw-r--r--linux/src/drivers/scsi/aic7xxx_reg.h587
-rw-r--r--linux/src/drivers/scsi/aic7xxx_seq.c769
-rw-r--r--linux/src/drivers/scsi/constants.c683
-rw-r--r--linux/src/drivers/scsi/constants.h6
-rw-r--r--linux/src/drivers/scsi/dc390.h147
-rw-r--r--linux/src/drivers/scsi/dtc.c400
-rw-r--r--linux/src/drivers/scsi/dtc.h169
-rw-r--r--linux/src/drivers/scsi/eata.c2331
-rw-r--r--linux/src/drivers/scsi/eata.h60
-rw-r--r--linux/src/drivers/scsi/eata_dma.c1603
-rw-r--r--linux/src/drivers/scsi/eata_dma.h128
-rw-r--r--linux/src/drivers/scsi/eata_dma_proc.c493
-rw-r--r--linux/src/drivers/scsi/eata_dma_proc.h260
-rw-r--r--linux/src/drivers/scsi/eata_generic.h414
-rw-r--r--linux/src/drivers/scsi/eata_pio.c1042
-rw-r--r--linux/src/drivers/scsi/eata_pio.h116
-rw-r--r--linux/src/drivers/scsi/eata_pio_proc.c135
-rw-r--r--linux/src/drivers/scsi/fdomain.c2082
-rw-r--r--linux/src/drivers/scsi/fdomain.h61
-rw-r--r--linux/src/drivers/scsi/g_NCR5380.c729
-rw-r--r--linux/src/drivers/scsi/g_NCR5380.h162
-rw-r--r--linux/src/drivers/scsi/gdth.c3598
-rw-r--r--linux/src/drivers/scsi/gdth.h819
-rw-r--r--linux/src/drivers/scsi/gdth_ioctl.h86
-rw-r--r--linux/src/drivers/scsi/gdth_proc.c656
-rw-r--r--linux/src/drivers/scsi/gdth_proc.h24
-rw-r--r--linux/src/drivers/scsi/hosts.c554
-rw-r--r--linux/src/drivers/scsi/hosts.h405
-rw-r--r--linux/src/drivers/scsi/in2000.c2379
-rw-r--r--linux/src/drivers/scsi/in2000.h465
-rw-r--r--linux/src/drivers/scsi/ncr53c8xx.c10795
-rw-r--r--linux/src/drivers/scsi/ncr53c8xx.h1220
-rw-r--r--linux/src/drivers/scsi/pas16.c576
-rw-r--r--linux/src/drivers/scsi/pas16.h196
-rw-r--r--linux/src/drivers/scsi/ppa.c1464
-rw-r--r--linux/src/drivers/scsi/ppa.h176
-rw-r--r--linux/src/drivers/scsi/qlogicfas.c679
-rw-r--r--linux/src/drivers/scsi/qlogicfas.h43
-rw-r--r--linux/src/drivers/scsi/qlogicisp.c1767
-rw-r--r--linux/src/drivers/scsi/qlogicisp.h98
-rw-r--r--linux/src/drivers/scsi/scripts.h1357
-rw-r--r--linux/src/drivers/scsi/scsi.c3585
-rw-r--r--linux/src/drivers/scsi/scsi.h650
-rw-r--r--linux/src/drivers/scsi/scsi_ioctl.c452
-rw-r--r--linux/src/drivers/scsi/scsi_proc.c302
-rw-r--r--linux/src/drivers/scsi/scsicam.c229
-rw-r--r--linux/src/drivers/scsi/scsiio.c1537
-rw-r--r--linux/src/drivers/scsi/scsiiom.c1540
-rw-r--r--linux/src/drivers/scsi/sd.c1691
-rw-r--r--linux/src/drivers/scsi/sd.h65
-rw-r--r--linux/src/drivers/scsi/sd_ioctl.c128
-rw-r--r--linux/src/drivers/scsi/seagate.c1679
-rw-r--r--linux/src/drivers/scsi/seagate.h139
-rw-r--r--linux/src/drivers/scsi/sr.c1290
-rw-r--r--linux/src/drivers/scsi/sr.h40
-rw-r--r--linux/src/drivers/scsi/sr_ioctl.c607
-rw-r--r--linux/src/drivers/scsi/sym53c8xx.c14696
-rw-r--r--linux/src/drivers/scsi/sym53c8xx.h116
-rw-r--r--linux/src/drivers/scsi/sym53c8xx_comm.h2717
-rw-r--r--linux/src/drivers/scsi/sym53c8xx_defs.h1767
-rw-r--r--linux/src/drivers/scsi/t128.c400
-rw-r--r--linux/src/drivers/scsi/t128.h169
-rw-r--r--linux/src/drivers/scsi/tmscsim.c1930
-rw-r--r--linux/src/drivers/scsi/tmscsim.h680
-rw-r--r--linux/src/drivers/scsi/u14-34f.c1996
-rw-r--r--linux/src/drivers/scsi/u14-34f.h60
-rw-r--r--linux/src/drivers/scsi/ultrastor.c1165
-rw-r--r--linux/src/drivers/scsi/ultrastor.h102
-rw-r--r--linux/src/drivers/scsi/wd7000.c1452
-rw-r--r--linux/src/drivers/scsi/wd7000.h446
-rw-r--r--linux/src/include/asm-i386/atomic.h69
-rw-r--r--linux/src/include/asm-i386/bitops.h201
-rw-r--r--linux/src/include/asm-i386/byteorder.h90
-rw-r--r--linux/src/include/asm-i386/cache.h18
-rw-r--r--linux/src/include/asm-i386/checksum.h121
-rw-r--r--linux/src/include/asm-i386/delay.h18
-rw-r--r--linux/src/include/asm-i386/dma.h271
-rw-r--r--linux/src/include/asm-i386/errno.h132
-rw-r--r--linux/src/include/asm-i386/fcntl.h59
-rw-r--r--linux/src/include/asm-i386/floppy.h289
-rw-r--r--linux/src/include/asm-i386/hardirq.h66
-rw-r--r--linux/src/include/asm-i386/io.h216
-rw-r--r--linux/src/include/asm-i386/ioctl.h75
-rw-r--r--linux/src/include/asm-i386/ioctls.h74
-rw-r--r--linux/src/include/asm-i386/irq.h421
-rw-r--r--linux/src/include/asm-i386/math_emu.h57
-rw-r--r--linux/src/include/asm-i386/page.h62
-rw-r--r--linux/src/include/asm-i386/param.h20
-rw-r--r--linux/src/include/asm-i386/posix_types.h63
-rw-r--r--linux/src/include/asm-i386/processor.h204
-rw-r--r--linux/src/include/asm-i386/ptrace.h60
-rw-r--r--linux/src/include/asm-i386/resource.h39
-rw-r--r--linux/src/include/asm-i386/segment.h380
-rw-r--r--linux/src/include/asm-i386/semaphore.h133
-rw-r--r--linux/src/include/asm-i386/sigcontext.h54
-rw-r--r--linux/src/include/asm-i386/signal.h97
-rw-r--r--linux/src/include/asm-i386/socket.h27
-rw-r--r--linux/src/include/asm-i386/sockios.h12
-rw-r--r--linux/src/include/asm-i386/spinlock.h262
-rw-r--r--linux/src/include/asm-i386/stat.h41
-rw-r--r--linux/src/include/asm-i386/statfs.h25
-rw-r--r--linux/src/include/asm-i386/string.h487
-rw-r--r--linux/src/include/asm-i386/system.h334
-rw-r--r--linux/src/include/asm-i386/termbits.h160
-rw-r--r--linux/src/include/asm-i386/termios.h92
-rw-r--r--linux/src/include/asm-i386/types.h46
-rw-r--r--linux/src/include/asm-i386/unaligned.h16
-rw-r--r--linux/src/include/asm-i386/unistd.h328
-rw-r--r--linux/src/include/asm-i386/vm86.h175
-rw-r--r--linux/src/include/linux/affs_hardblocks.h66
-rw-r--r--linux/src/include/linux/atalk.h157
-rw-r--r--linux/src/include/linux/ax25.h96
-rw-r--r--linux/src/include/linux/binfmts.h65
-rw-r--r--linux/src/include/linux/bios32.h61
-rw-r--r--linux/src/include/linux/blk.h454
-rw-r--r--linux/src/include/linux/blkdev.h66
-rw-r--r--linux/src/include/linux/cdrom.h453
-rw-r--r--linux/src/include/linux/compatmac.h153
-rw-r--r--linux/src/include/linux/compiler-gcc.h112
-rw-r--r--linux/src/include/linux/compiler-gcc3.h23
-rw-r--r--linux/src/include/linux/compiler-gcc4.h57
-rw-r--r--linux/src/include/linux/compiler-gcc5.h67
-rw-r--r--linux/src/include/linux/compiler.h315
-rw-r--r--linux/src/include/linux/config.h43
-rw-r--r--linux/src/include/linux/ctype.h64
-rw-r--r--linux/src/include/linux/delay.h14
-rw-r--r--linux/src/include/linux/errno.h16
-rw-r--r--linux/src/include/linux/etherdevice.h46
-rw-r--r--linux/src/include/linux/fcntl.h6
-rw-r--r--linux/src/include/linux/fd.h377
-rw-r--r--linux/src/include/linux/fddidevice.h42
-rw-r--r--linux/src/include/linux/fdreg.h143
-rw-r--r--linux/src/include/linux/fs.h728
-rw-r--r--linux/src/include/linux/genhd.h136
-rw-r--r--linux/src/include/linux/hdreg.h240
-rw-r--r--linux/src/include/linux/head.h20
-rw-r--r--linux/src/include/linux/icmp.h85
-rw-r--r--linux/src/include/linux/if.h155
-rw-r--r--linux/src/include/linux/if_arp.h130
-rw-r--r--linux/src/include/linux/if_ether.h119
-rw-r--r--linux/src/include/linux/if_fddi.h202
-rw-r--r--linux/src/include/linux/if_tr.h102
-rw-r--r--linux/src/include/linux/igmp.h119
-rw-r--r--linux/src/include/linux/in.h149
-rw-r--r--linux/src/include/linux/inet.h52
-rw-r--r--linux/src/include/linux/init.h30
-rw-r--r--linux/src/include/linux/interrupt.h120
-rw-r--r--linux/src/include/linux/ioctl.h7
-rw-r--r--linux/src/include/linux/ioport.h31
-rw-r--r--linux/src/include/linux/ip.h112
-rw-r--r--linux/src/include/linux/ipc.h67
-rw-r--r--linux/src/include/linux/ipx.h80
-rw-r--r--linux/src/include/linux/kcomp.h52
-rw-r--r--linux/src/include/linux/kdev_t.h114
-rw-r--r--linux/src/include/linux/kernel.h97
-rw-r--r--linux/src/include/linux/kernel_stat.h32
-rw-r--r--linux/src/include/linux/limits.h17
-rw-r--r--linux/src/include/linux/linkage.h59
-rw-r--r--linux/src/include/linux/list.h112
-rw-r--r--linux/src/include/linux/locks.h65
-rw-r--r--linux/src/include/linux/major.h88
-rw-r--r--linux/src/include/linux/malloc.h11
-rw-r--r--linux/src/include/linux/mc146818rtc.h149
-rw-r--r--linux/src/include/linux/md.h275
-rw-r--r--linux/src/include/linux/mm.h375
-rw-r--r--linux/src/include/linux/module.h116
-rw-r--r--linux/src/include/linux/mount.h30
-rw-r--r--linux/src/include/linux/net.h130
-rw-r--r--linux/src/include/linux/netdevice.h313
-rw-r--r--linux/src/include/linux/netrom.h34
-rw-r--r--linux/src/include/linux/notifier.h96
-rw-r--r--linux/src/include/linux/pagemap.h146
-rw-r--r--linux/src/include/linux/param.h6
-rw-r--r--linux/src/include/linux/pci.h1116
-rw-r--r--linux/src/include/linux/personality.h55
-rw-r--r--linux/src/include/linux/posix_types.h50
-rw-r--r--linux/src/include/linux/proc_fs.h292
-rw-r--r--linux/src/include/linux/ptrace.h26
-rw-r--r--linux/src/include/linux/quota.h221
-rw-r--r--linux/src/include/linux/random.h70
-rw-r--r--linux/src/include/linux/resource.h60
-rw-r--r--linux/src/include/linux/rose.h88
-rw-r--r--linux/src/include/linux/route.h79
-rw-r--r--linux/src/include/linux/sched.h496
-rw-r--r--linux/src/include/linux/sem.h112
-rw-r--r--linux/src/include/linux/signal.h6
-rw-r--r--linux/src/include/linux/skbuff.h467
-rw-r--r--linux/src/include/linux/smp.h54
-rw-r--r--linux/src/include/linux/socket.h147
-rw-r--r--linux/src/include/linux/sockios.h98
-rw-r--r--linux/src/include/linux/spinlock.h4
-rw-r--r--linux/src/include/linux/stat.h53
-rw-r--r--linux/src/include/linux/stddef.h15
-rw-r--r--linux/src/include/linux/string.h53
-rw-r--r--linux/src/include/linux/symtab_begin.h45
-rw-r--r--linux/src/include/linux/symtab_end.h15
-rw-r--r--linux/src/include/linux/tasks.h17
-rw-r--r--linux/src/include/linux/tcp.h71
-rw-r--r--linux/src/include/linux/termios.h7
-rw-r--r--linux/src/include/linux/time.h53
-rw-r--r--linux/src/include/linux/timer.h100
-rw-r--r--linux/src/include/linux/tqueue.h143
-rw-r--r--linux/src/include/linux/trdevice.h40
-rw-r--r--linux/src/include/linux/tty.h351
-rw-r--r--linux/src/include/linux/tty_driver.h189
-rw-r--r--linux/src/include/linux/tty_ldisc.h46
-rw-r--r--linux/src/include/linux/types.h96
-rw-r--r--linux/src/include/linux/ucdrom.h96
-rw-r--r--linux/src/include/linux/udp.h29
-rw-r--r--linux/src/include/linux/uio.h26
-rw-r--r--linux/src/include/linux/unistd.h11
-rw-r--r--linux/src/include/linux/utsname.h35
-rw-r--r--linux/src/include/linux/version.h2
-rw-r--r--linux/src/include/linux/vfs.h6
-rw-r--r--linux/src/include/linux/wait.h53
-rw-r--r--linux/src/include/linux/wireless.h479
-rw-r--r--linux/src/include/net/af_unix.h14
-rw-r--r--linux/src/include/net/arp.h17
-rw-r--r--linux/src/include/net/atalkcall.h2
-rw-r--r--linux/src/include/net/ax25.h292
-rw-r--r--linux/src/include/net/ax25call.h2
-rw-r--r--linux/src/include/net/br.h270
-rw-r--r--linux/src/include/net/checksum.h25
-rw-r--r--linux/src/include/net/datalink.h16
-rw-r--r--linux/src/include/net/gc.h46
-rw-r--r--linux/src/include/net/icmp.h43
-rw-r--r--linux/src/include/net/ip.h159
-rw-r--r--linux/src/include/net/ip_alias.h23
-rw-r--r--linux/src/include/net/ip_forward.h11
-rw-r--r--linux/src/include/net/ip_masq.h205
-rw-r--r--linux/src/include/net/ipip.h4
-rw-r--r--linux/src/include/net/ipx.h88
-rw-r--r--linux/src/include/net/ipxcall.h2
-rw-r--r--linux/src/include/net/netlink.h32
-rw-r--r--linux/src/include/net/netrom.h166
-rw-r--r--linux/src/include/net/nrcall.h2
-rw-r--r--linux/src/include/net/p8022.h7
-rw-r--r--linux/src/include/net/p8022call.h2
-rw-r--r--linux/src/include/net/p8022tr.h8
-rw-r--r--linux/src/include/net/p8022trcall.h3
-rw-r--r--linux/src/include/net/protocol.h55
-rw-r--r--linux/src/include/net/psnap.h7
-rw-r--r--linux/src/include/net/psnapcall.h2
-rw-r--r--linux/src/include/net/rarp.h12
-rw-r--r--linux/src/include/net/raw.h44
-rw-r--r--linux/src/include/net/rose.h233
-rw-r--r--linux/src/include/net/rosecall.h2
-rw-r--r--linux/src/include/net/route.h189
-rw-r--r--linux/src/include/net/slhc.h6
-rw-r--r--linux/src/include/net/slhc_vj.h187
-rw-r--r--linux/src/include/net/snmp.h107
-rw-r--r--linux/src/include/net/sock.h613
-rw-r--r--linux/src/include/net/spx.h38
-rw-r--r--linux/src/include/net/tcp.h374
-rw-r--r--linux/src/include/net/udp.h63
-rw-r--r--linux/src/include/scsi/scsi.h205
-rw-r--r--linux/src/include/scsi/scsi_ioctl.h28
-rw-r--r--linux/src/include/scsi/scsicam.h17
-rw-r--r--linux/src/init/main.c1135
-rw-r--r--linux/src/init/version.c30
-rw-r--r--linux/src/kernel/dma.c99
-rw-r--r--linux/src/kernel/printk.c253
-rw-r--r--linux/src/kernel/resource.c129
-rw-r--r--linux/src/kernel/sched.c1747
-rw-r--r--linux/src/kernel/softirq.c54
-rw-r--r--linux/src/lib/ctype.c36
-rw-r--r--linux/src/lib/vsprintf.c306
-rw-r--r--linux/src/net/core/dev.c1629
-rw-r--r--tests/.gitignore1
-rw-r--r--tests/Makefrag.am34
-rw-r--r--tests/README37
-rw-r--r--tests/configfrag.ac43
-rw-r--r--tests/grub.cfg.single.template4
-rw-r--r--tests/include/device/cons.h27
l---------tests/include/kern/printf.h1
-rw-r--r--tests/include/mach/mig_support.h71
-rw-r--r--tests/include/syscalls.h83
-rw-r--r--tests/include/testlib.h75
l---------tests/include/util/atoi.h1
-rw-r--r--tests/run-qemu.sh.template38
-rw-r--r--tests/start.S28
-rw-r--r--tests/syscalls.S4
-rw-r--r--tests/test-gsync.c122
-rw-r--r--tests/test-hello.c26
-rw-r--r--tests/test-mach_host.c81
-rw-r--r--tests/test-mach_port.c121
-rw-r--r--tests/test-machmsg.c405
-rw-r--r--tests/test-multiboot.in30
-rw-r--r--tests/test-syscalls.c166
-rw-r--r--tests/test-task.c171
-rw-r--r--tests/test-threads.c104
-rw-r--r--tests/test-vm.c85
-rw-r--r--tests/testlib.c114
-rw-r--r--tests/testlib_thread_start.c86
-rw-r--r--tests/user-qemu.mk221
-rw-r--r--util/atoi.c106
-rw-r--r--util/atoi.h67
-rw-r--r--util/byteorder.c53
-rw-r--r--util/byteorder.h32
-rw-r--r--version.c.in2
-rw-r--r--version.m44
-rw-r--r--vm/memory_object.c1090
-rw-r--r--vm/memory_object.h39
-rw-r--r--vm/memory_object_default.cli28
-rw-r--r--vm/memory_object_proxy.c228
-rw-r--r--vm/memory_object_proxy.h39
-rw-r--r--vm/memory_object_user.cli28
-rw-r--r--vm/pmap.h241
-rw-r--r--vm/vm_debug.c548
-rw-r--r--vm/vm_external.c151
-rw-r--r--vm/vm_external.h95
-rw-r--r--vm/vm_fault.c2136
-rw-r--r--vm/vm_fault.h81
-rw-r--r--vm/vm_init.c88
-rw-r--r--vm/vm_init.h25
-rw-r--r--vm/vm_kern.c1099
-rw-r--r--vm/vm_kern.h100
-rw-r--r--vm/vm_map.c5237
-rw-r--r--vm/vm_map.h585
-rw-r--r--vm/vm_object.c2994
-rw-r--r--vm/vm_object.h415
-rw-r--r--vm/vm_page.c2164
-rw-r--r--vm/vm_page.h567
-rw-r--r--vm/vm_pageout.c515
-rw-r--r--vm/vm_pageout.h53
-rw-r--r--vm/vm_print.h41
-rw-r--r--vm/vm_resident.c1116
-rw-r--r--vm/vm_resident.h45
-rw-r--r--vm/vm_types.h42
-rw-r--r--vm/vm_user.c803
-rw-r--r--vm/vm_user.h60
-rw-r--r--x86_64/Makefrag.am245
-rw-r--r--x86_64/_setjmp.S65
-rw-r--r--x86_64/boothdr.S222
-rw-r--r--x86_64/configfrag.ac63
-rw-r--r--x86_64/copy_user.c613
-rw-r--r--x86_64/cswitch.S148
-rw-r--r--x86_64/debug_trace.S56
-rw-r--r--x86_64/idt_inittab.S148
l---------x86_64/include/mach/x86_641
-rw-r--r--x86_64/include/syscall_sw.h40
-rw-r--r--x86_64/interrupt.S140
-rw-r--r--x86_64/kdasm.S133
-rw-r--r--x86_64/ldscript227
-rw-r--r--x86_64/locore.S1640
-rw-r--r--x86_64/spl.S265
l---------x86_64/x86_641
-rw-r--r--x86_64/xen_boothdr.S190
-rw-r--r--x86_64/xen_locore.S146
-rw-r--r--xen/Makefrag.am83
-rw-r--r--xen/block.c730
-rw-r--r--xen/block.h24
-rw-r--r--xen/configfrag.ac76
-rw-r--r--xen/console.c230
-rw-r--r--xen/console.h53
-rw-r--r--xen/evt.c119
-rw-r--r--xen/evt.h29
-rw-r--r--xen/grant.c143
-rw-r--r--xen/grant.h33
-rw-r--r--xen/net.c767
-rw-r--r--xen/net.h24
-rw-r--r--xen/public/COPYING38
-rw-r--r--xen/public/arch-x86/xen-mca.h279
-rw-r--r--xen/public/arch-x86/xen-x86_32.h180
-rw-r--r--xen/public/arch-x86/xen-x86_64.h212
-rw-r--r--xen/public/arch-x86/xen.h204
-rw-r--r--xen/public/arch-x86_32.h27
-rw-r--r--xen/public/arch-x86_64.h27
-rw-r--r--xen/public/callback.h121
-rw-r--r--xen/public/dom0_ops.h120
-rw-r--r--xen/public/domctl.h680
-rw-r--r--xen/public/elfnote.h233
-rw-r--r--xen/public/elfstructs.h526
-rw-r--r--xen/public/event_channel.h264
-rw-r--r--xen/public/features.h83
-rw-r--r--xen/public/grant_table.h438
-rw-r--r--xen/public/io/blkif.h141
-rw-r--r--xen/public/io/console.h51
-rw-r--r--xen/public/io/fbif.h176
-rw-r--r--xen/public/io/fsif.h191
-rw-r--r--xen/public/io/kbdif.h132
-rw-r--r--xen/public/io/netif.h205
-rw-r--r--xen/public/io/pciif.h101
-rw-r--r--xen/public/io/protocols.h40
-rw-r--r--xen/public/io/ring.h313
-rw-r--r--xen/public/io/tpmif.h77
-rw-r--r--xen/public/io/xenbus.h80
-rw-r--r--xen/public/io/xs_wire.h132
-rw-r--r--xen/public/kexec.h189
-rw-r--r--xen/public/libelf.h265
-rw-r--r--xen/public/memory.h312
-rw-r--r--xen/public/nmi.h78
-rw-r--r--xen/public/physdev.h219
-rw-r--r--xen/public/platform.h346
-rw-r--r--xen/public/sched.h121
-rw-r--r--xen/public/sysctl.h308
-rw-r--r--xen/public/trace.h206
-rw-r--r--xen/public/vcpu.h213
-rw-r--r--xen/public/version.h91
-rw-r--r--xen/public/xen-compat.h44
-rw-r--r--xen/public/xen.h657
-rw-r--r--xen/public/xencomm.h41
-rw-r--r--xen/public/xenoprof.h138
-rw-r--r--xen/ring.c61
-rw-r--r--xen/ring.h34
-rw-r--r--xen/store.c337
-rw-r--r--xen/store.h54
-rw-r--r--xen/time.c144
-rw-r--r--xen/time.h25
-rw-r--r--xen/xen.c73
-rw-r--r--xen/xen.h41
1153 files changed, 513598 insertions, 0 deletions
diff --git a/=announce-1.0 b/=announce-1.0
new file mode 100644
index 0000000..673dc99
--- /dev/null
+++ b/=announce-1.0
@@ -0,0 +1,40 @@
+I am pleased to announce version 1.0 of the GNU distribution of the
+Mach kernel. It may be found in the file (about 2.54 MB compressed)
+ftp://prep.ai.mit.edu/pub/gnu/gnumach-1.0.tar.gz.
+
+This distribution was prepared in order to install some bug fixes and
+minor improvements (not worth noting here) to the Utah microkernel,
+and to make the package conform to the GNU coding standards, at least
+minimally, as regards configuration. This was based upon the UK22
+distribution, with some modifications made at Utah as well.
+
+This kernel will be in the forthcoming complete binary distribution of
+GNU. The release is being made now, in the hopes that any serious
+problems we didn't notice might be found in advance of that
+distribution. Volunteers who are interested in compiling this release
+should do so, and please report any problems asap.
+
+All the non-kernel pieces of the Utah distribution are not present in
+this distribution, except for the MiG interface generator which is
+still supplied. These other pieces were never used for GNU, and are
+not really part of the kernel at all. We may separate MiG into a
+separate distribution for convenience at some later date.
+
+This distribution is only for i386, i486, i586 (pentium), and i686
+(sexium [pentium pro]) processors on PC-AT compatible machines.
+Volunteers interested in ports to other architectures are eagerly
+sought.
+
+Most ethernet cards and disk controllers are supported by this kernel;
+ones for which Linux drivers exist can be ported easily. Non-network
+devices for which BSD drivers exist can be ported with a little
+effort, but not much.
+
+Bug reports relating to this distribution should be sent to
+bug-hurd@prep.ai.mit.edu. Requests for assistance should be made on
+help-hurd@prep.ai.mit.edu.
+
+The md5sum checksum for gnumach-1.0.tar.gz is:
+
+62ac22cbe695a058243673427a264745 gnumach-1.0.tar.gz
+
diff --git a/=announce-1.1 b/=announce-1.1
new file mode 100644
index 0000000..ef5fa0b
--- /dev/null
+++ b/=announce-1.1
@@ -0,0 +1,34 @@
+I am pleased to announce version 1.1 of the GNU distribution of the
+Mach kernel. It may be found in the file (about 254 MB compressed)
+ftp://prep.ai.mit.edu/pub/gnu/gnumach-1.1.tar.gz.
+
+Diffs from version 1.0 of this distribution are avaiable at
+ftp://prep.ai.mit.edu/pub/gnu/gnumach-1.0-1.1.diff (42 KB
+uncompressed).
+
+This is a bug-fixing release; the only new feature over version 1.0 is
+the --enable-kdb configure option which has been added to turn on
+kernel debugging support.
+
+Several bugs have been fixed in this distribution, particularly
+relating to cross-compilation support.
+
+This distribution is only for i386, i486, i586 (pentium), and i686
+(sexium [pentium pro]) processors on PC-AT compatible machines.
+Volunteers interested in ports to other architectures are eagerly
+sought.
+
+Most ethernet cards and disk controllers are supported by this kernel;
+ones for which Linux drivers exist can be ported easily. Non-network
+devices for which BSD drivers exist can be ported with a little
+effort, but not much.
+
+Bug reports relating to this distribution should be sent to
+bug-hurd@prep.ai.mit.edu. Requests for assistance should be made on
+help-hurd@prep.ai.mit.edu.
+
+The md5sum checksums for the files mentioned in this message are:
+
+6bb809d81198fd28078c8ac9ccb55965 gnumach-1.1.tar.gz
+089b95de887c69c9f1f28b1b1dcb00f7 gnumach-1.0-1.1.diff
+
diff --git a/=announce-1.1.1 b/=announce-1.1.1
new file mode 100644
index 0000000..244141d
--- /dev/null
+++ b/=announce-1.1.1
@@ -0,0 +1,36 @@
+I am pleased to announce version 1.1.1 of the GNU distribution of the
+Mach kernel. It may be found in the file (about 254 MB compressed)
+ftp://prep.ai.mit.edu/pub/gnu/gnumach-1.1.1.tar.gz.
+
+Diffs from version 1.0 of this distribution are avaiable at
+ftp://prep.ai.mit.edu/pub/gnu/gnumach-1.0-1.1.1.diff.gz (about 169 MB
+compressed).
+
+Version 1.1 had some minor accidental problems and is being removed in
+favor of this minor update.
+
+This is a bug-fixing release; the only new feature over version 1.0 is
+the --enable-kdb configure option which has been added to turn on
+kernel debugging support.
+
+Several bugs have been fixed in this distribution, particularly
+relating to cross-compilation support.
+
+This distribution is only for i386, i486, i586 (pentium), and i686
+(sexium [pentium pro]) processors on PC-AT compatible machines.
+Volunteers interested in ports to other architectures are eagerly
+sought.
+
+Most ethernet cards and disk controllers are supported by this kernel;
+ones for which Linux drivers exist can be ported easily. Non-network
+devices for which BSD drivers exist can be ported with a little
+effort, but not much.
+
+Bug reports relating to this distribution should be sent to
+bug-hurd@prep.ai.mit.edu. Requests for assistance should be made on
+help-hurd@prep.ai.mit.edu.
+
+The md5sum checksums for the files mentioned in this message are:
+
+45147839691e40b2e67412579e0fcc5d gnumach-1.1.1.tar.gz
+35e69cedb4d23b4058be26bd58345a02 gnumach-1.0-1.1.1.diff.Z
diff --git a/=announce-1.2 b/=announce-1.2
new file mode 100644
index 0000000..6fc987f
--- /dev/null
+++ b/=announce-1.2
@@ -0,0 +1,27 @@
+I am pleased to announce version 1.2 of the GNU distribution of the
+Mach kernel. It may be found in the file (about 3.64 MB compressed)
+ftp://ftp.gnu.org/gnu/gnumach/gnumach-1.2.tar.gz.
+
+Because of extensive source reorganization, it is not profitable to
+provide diffs against previous versions.
+
+Most notably, this release updates the Linux-derived device drivers to
+those found in Linux version 2.0.36.
+
+This distribution is only for i386, i486, i586 (pentium), and i686
+(sexium [pentium pro]) processors on PC-AT compatible machines.
+Volunteers interested in ports to other architectures are eagerly
+sought.
+
+MiG (the Mach Interface Generator) is no longer in this distribution;
+it is now distributed separately. You will need MiG in order to be
+able to compile this release.
+
+Bug reports relating to this distribution should be sent to
+bug-hurd@gnu.org. Requests for assistance should be made on
+help-hurd@gnu.org.
+
+The md5sum checksums for this distibution is
+
+85e898a1753270e63a1cc69028043c68 gnumach-1.2.tar.gz
+
diff --git a/=announce-1.3 b/=announce-1.3
new file mode 100644
index 0000000..2f7edd0
--- /dev/null
+++ b/=announce-1.3
@@ -0,0 +1,47 @@
+We are pleased to announce version 1.3 of the GNU distribution of the
+Mach kernel. It may be found in the file (about 3.6 MB compressed)
+ftp://ftp.gnu.org/gnu/gnumach/gnumach-1.3.tar.gz;
+unidiffs from version 1.2 (about 310 KB compressed) are in
+ftp://ftp.gnu.org/gnu/gnumach/gnumach-1.2-1.3.diff.gz.
+
+This distribution is only for x86 PC machines.
+Volunteers interested in ports to other architectures are eagerly sought.
+
+We are no longer actively developing version 1.x of GNU Mach. We plan to
+make only necessary bug fixes or trivial enhancements in the 1.x line,
+and make further 1.x releases only as necessary for those purposes.
+New development efforts have been underway for some time on a new version
+of GNU Mach using the OSKit from the University of Utah for hardware
+support. Those efforts previously called OSKit-Mach are now working
+towards the future version 2.0 of GNU Mach.
+
+Aside from bug fixes, major changes from 1.2 (from the NEWS file) are:
+
+The kernel now directly supports "boot scripts" in the form of multiboot
+module names with the same syntax as the Hurd's `serverboot' program.
+That is, instead of telling GRUB "module /boot/serverboot", you can give
+GRUB a series of command like "module /hurd/ext2fs ${...}" where the
+syntax after "module" is the same as in boot scripts for Hurd's `serverboot'.
+
+The kernel message device `kmsg' is now enabled by default.
+--disable-kmsg turns it off.
+
+Large disks (>= 10GB) are now correctly supported, the new get_status
+call DEV_GET_RECORDS can return the number of records of a device.
+
+Lots of tweaks have been done to the virtual memory management to make
+it perform better on today's machines.
+
+The console supports ANSI escape sequences for colors and attributes.
+
+Support for the terminal speeds B57600 and B115200 has been added.
+
+
+Bug reports relating to this distribution should be sent to
+bug-hurd@gnu.org. Requests for assistance should be made on
+help-hurd@gnu.org.
+
+The md5sum checksums for this distribution are:
+
+61e90803889b079a380e30056b21d076 gnumach-1.3.tar.gz
+56ca6aa9040c4d4c4ef7a9757bb0509c gnumach-1.2-1.3.diff.gz
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..246225e
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,4 @@
+-*- Text -*-
+
+The original source of this code is the Mach 3.0 distribution from CMU.
+It was subsequently modified by the University of Utah and the GNU Project.
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..623b625
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,340 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/COPYING3 b/COPYING3
new file mode 100644
index 0000000..f288702
--- /dev/null
+++ b/COPYING3
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..9ed3b47
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,8 @@
+e227045b06d62ee7d2fbab9d5ade9030ff43170b is the last commit imported from CVS.
+All commits after that one have valid author and committer information.
+
+Use this to examine the change log for earlier changes:
+
+ $ git show e227045b06d62ee7d2fbab9d5ade9030ff43170b:ChangeLog
+ $ git show e227045b06d62ee7d2fbab9d5ade9030ff43170b:ChangeLog.0
+ $ git show e227045b06d62ee7d2fbab9d5ade9030ff43170b:ChangeLog.00
diff --git a/DEVELOPMENT b/DEVELOPMENT
new file mode 100644
index 0000000..0311cce
--- /dev/null
+++ b/DEVELOPMENT
@@ -0,0 +1,84 @@
+-*- Text -*-
+
+If you're ``just'' looking for something to work on, have a look at the
+ * bug list, <http://savannah.gnu.org/bugs/?group=hurd> and
+ * task list, <http://savannah.gnu.org/task/?group=hurd>.
+
+HOW TO CONTRIBUTE LARGER CHUNKS
+===============================
+
+If you want to help the maintainers to be quickly able to evaluate and
+check in your contribution, please try to follow these suggestions:
+
+Try to mark in the code stuff (i.e. whole functions, parts of header
+files) that you've just copied (and then perhaps modified; also note that
+briefly) from somewhere else and stuff that you've actually written
+yourself. Either do that by simply writing a ChangeLog in parallel (an
+informal one is fine as well) or put notes in the modified / imported
+files. The one who will be checking in your patches will then probably
+remove most of these notes, as soon as everything is written down in the
+real ChangeLog. Logging your changes right from the beginning makes it
+much easier for the maintainers to track down where which chunk is coming
+from, so that they can be handled appropriately.
+
+HISTORY
+=======
+
+`gnumach-1-branch-before_removing_unused_and_unsupported_code' was tagged
+on 2006-02-20. After creating that tag, code for unused and unsupported
+device driver for ISA cards and a good deal of i386 dependent, also
+unused and unsupported code was removed.
+
+On 2006-03-19, support was removed for FIPC, which only ever was used
+within the native Mach NE2000 NIC device driver, see
+<http://www.cs.utah.edu/flux/mach4-i386/html/mach4-UK22.html#FIPC>.
+<http://lists.gnu.org/archive/html/bug-hurd/2006-01/msg00162.html>.
+
+Support for NORMA was removed on 2006-03-20.
+<http://lists.gnu.org/archive/html/bug-hurd/2006-03/msg00007.html>.
+
+Support for PS2, i860, iPSC 386/860 and MB1/MB2/EXL was removed on
+2006-11-05.
+<http://lists.gnu.org/archive/html/bug-hurd/2006-11/msg00001.html>.
+
+Support for the old ipc interface, MACH_IPC_COMPAT, was removed on 2006-12-03.
+<http://savannah.gnu.org/patch/?5017>.
+
+Support for building without CONTINUATIONS was removed on 2006-12-03.
+<http://savannah.gnu.org/patch/?5019>.
+
+Support for FP emulation was removed on 2006-12-13.
+<http://lists.gnu.org/archive/html/bug-hurd/2006-12/msg00031.html>.
+
+Support for Olivetti XP7 & XP9 was removed on 2007-01-02.
+<http://lists.gnu.org/archive/html/bug-hurd/2006-12/msg00107.html>.
+
+Support for the `iopl' device and some i/o emulation code (that might be useful
+for DOSEMU) was removed on 2007-04-02.
+<http://lists.gnu.org/archive/html/bug-hurd/2007-04/msg00002.html>.
+
+
+Be sure to check the ChangeLog and have a look at the repository at that states
+if you want to work on those parts of GNU Mach.
+
+LAYOUT OF THE SOURCE TREE (very incomplete)
+
+ * include/
+
+[TODO: Check.]
+
+ ... is mainly for installed header and definition files, but it also holds
+ pseudo-clones of C library headers, which don't get installed because the C
+ library has better versions. In that category are <mach/error.h>,
+ <mach/mach_traps.h>, <mach/mig_support.h>, <sys/ioctl.h>, <sys/reboot.h>,
+ <sys/time.h>, <sys/types.h>, <alloca.h> and <string.h>. By putting such
+ headers into there, the relevant kernel code is easier to understand,
+ because the user will expect that the file named <alloca.h> or
+ <sys/types.h> does more or less what the normal C library file does, and
+ calling those <kern/alloca.h> or <kern/types.h> would make the reader have
+ to wonder or remember what they are. The directory is, essentially, a
+ special `/usr/include' for use by the kernel itself when compiling. It
+ only should get things which belong in `/usr/include'. The reason for
+ <alloca.h> and <sys/types.h> is because those are files found in
+ `/usr/include', even if on an actual installed system the versions in
+ `/usr/include' are provided by a different package.
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..ad38249
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,265 @@
+# Makefile for GNU Mach.
+
+# Copyright (C) 2006, 2007, 2008, 2009, 2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# Various definitions of the Automake environment.
+#
+# These will be augmented below.
+#
+
+SUBDIRS =
+DIST_SUBDIRS =
+EXTRA_DIST =
+DISTCHECK_CONFIGURE_FLAGS =
+noinst_LIBRARIES =
+noinst_PROGRAMS =
+TESTS=
+info_TEXINFOS =
+MOSTLYCLEANFILES =
+DISTCLEANFILES =
+AM_CPPFLAGS =
+AM_CCASFLAGS =
+AM_CFLAGS =
+AM_LDFLAGS =
+
+#
+# Compilation flags
+#
+
+GCC_INSTALL = $(shell LANG=C $(CC) -print-search-dirs | sed -n -e 's/install: \(.*\)/\1/p')
+AM_CPPFLAGS += \
+ -nostdinc -imacros config.h -I $(GCC_INSTALL)/include
+
+AM_CPPFLAGS += \
+ -I$(systype) \
+ -I. \
+ -I$(top_srcdir)/$(systype) \
+ -I$(top_srcdir)/$(systype)/include/mach/sa \
+ -I$(top_srcdir)/include
+
+AM_CFLAGS += \
+ -fno-builtin-log
+
+AM_CCASFLAGS += \
+ -D__ASSEMBLY__
+
+# Yes, this makes the eyes hurt. But perhaps someone will finally take care of
+# all that scruffy Mach code... Also see <http://savannah.gnu.org/task/?5726>.
+AM_CFLAGS += \
+ -Wall -Wstrict-prototypes -Wold-style-definition -Wmissing-prototypes
+
+# We need the GNU-style inline
+AM_CFLAGS += \
+ -fgnu89-inline
+
+# Much of the Mach code predates C99 and makes invalid assumptions about
+# type punning.
+AM_CFLAGS += \
+ -fno-strict-aliasing
+
+# The smashing stack protector might be enabled by default, but might emit
+# unsuitable code.
+if disable_smashing_stack_protector
+AM_CFLAGS += \
+ -fno-stack-protector
+endif
+
+# We do not support or need position-independent
+AM_CFLAGS += \
+ -no-pie -fno-PIE -fno-pie -fno-pic
+
+# This must be the same size as port names, see e.g. ipc/ipc_entry.c
+AM_CFLAGS += -DRDXTREE_KEY_32
+
+#
+# Silent build support.
+#
+
+AWK_V = $(AWK_V_$(V))
+AWK_V_ = $(AWK_V_$(AM_DEFAULT_VERBOSITY))
+AWK_V_0 = @echo " AWK $@";
+
+GZIP_V = $(GZIP_V_$(V))
+GZIP_V_ = $(GZIP_V_$(AM_DEFAULT_VERBOSITY))
+GZIP_V_0 = @echo " GZIP $@";
+
+NM_V = $(NM_V_$(V))
+NM_V_ = $(NM_V_$(AM_DEFAULT_VERBOSITY))
+NM_V_0 = @echo " NM $@";
+
+MIGCOM_V = $(MIGCOM_V_$(V))
+MIGCOM_V_ = $(MIGCOM_V_$(AM_DEFAULT_VERBOSITY))
+MIGCOM_V_0 = @echo " MIG $@";
+
+#
+# MIG Setup.
+#
+
+# MIGCOM.
+MIGCOM = $(MIG) -n -cc cat - /dev/null
+
+# We need this because we use $(CPP) to preprocess MIG .defs files.
+CPP = @CPP@ -x c
+
+#
+# Other Tools' Configuration.
+#
+
+# Don't needlessly overwrite files whose contents haven't changed.
+# This helps avoiding unnecessary recompilation cycles when keeping
+# cross-compilation toolchains up-to-date. Thus, unconditionally use the
+# `install-sh' that is supplied by GNU Automake 1.10.1, as the GNU Coreutils
+# one doesn't provide this functionality yet (TODO: change that). TODO:
+# `build-aux' is hardcoded.
+install_sh = $(SHELL) $(abs_srcdir)/build-aux/install-sh -C
+INSTALL = $(install_sh)
+
+#
+# The main kernel functionality.
+#
+
+noinst_LIBRARIES += \
+ libkernel.a
+libkernel_a_SOURCES =
+nodist_libkernel_a_SOURCES =
+MOSTLYCLEANFILES += \
+ $(nodist_libkernel_a_SOURCES)
+
+gnumach_o_LDADD = \
+ libkernel.a
+
+gnumach_SOURCES =
+gnumach_LINKFLAGS =
+
+# Makerules: how to do some things.
+include Makerules.am
+
+# Main Makefile fragment.
+include Makefrag.am
+
+# Test suite.
+include tests/Makefrag.am
+
+# Documentation.
+include doc/Makefrag.am
+
+#
+# Kernel Image
+#
+
+# We need the following junk because of the include-files-from-libc.a magic.
+# TODO. Is the following kosher from a Automake point of view? (I.e. a
+# program `gnumach.o' that is then later used again as an object file.)
+gnumach_o_SOURCES =
+# TODO. ``-u _start''. System dependent?
+gnumach_o_LINK = $(LD) $(LDFLAGS) -u _start -r -o $@
+noinst_PROGRAMS += \
+ gnumach.o
+
+# This is the list of routines we use from libgcc.
+libgcc_routines := udivdi3 __udivdi3 __udivmoddi4 __umoddi3 __divdi3 __divmoddi4 __moddi3 __ffsdi2
+# References generated by ld.
+ld_magic_routines := __rel_iplt_start __rel_iplt_end __rela_iplt_start __rela_iplt_end _START etext _edata _end
+gnumach-undef: gnumach.$(OBJEXT)
+ $(NM_V) $(NM) -u $< | sed 's/ *U *//' | sort -u > $@
+MOSTLYCLEANFILES += gnumach-undef
+gnumach-undef-bad: gnumach-undef Makefile
+ $(AM_V_GEN) sed '$(foreach r,$(libgcc_routines) $(ld_magic_routines),/^$r$$/d;)' $< > $@
+MOSTLYCLEANFILES += gnumach-undef-bad
+libgcc-routines.o: gnumach-undef gnumach-undef-bad
+ $(AM_V_at) if test -s gnumach-undef-bad; \
+ then cat gnumach-undef-bad; exit 2; else true; fi
+ $(AM_V_CCLD) $(CCLD) $(LDFLAGS) -r -static \
+ -o $@ `sed 's/^/-Wl,-u,/' < $<` -x c /dev/null -lgcc
+ @if nm $@ | grep __init_cpu_features; \
+ then echo "Please install a 32bit libc without multiarch support (on Debian systems, the libc6-dev:i386 package containing /usr/lib/i386-linux-gnu/libc.a)". ; \
+ false ; fi
+
+gnumach_LINK = $(LD) $(LDFLAGS) $(LINKFLAGS) $(gnumach_LINKFLAGS) -o $@
+gnumach_LDADD = gnumach.o libgcc-routines.o
+
+#
+# Installation.
+#
+
+exec_bootdir = \
+ $(exec_prefix)/boot
+exec_boot_PROGRAMS = \
+ gnumach
+
+#
+# Building a distribution.
+#
+
+EXTRA_DIST += \
+ config.status.dep.patch \
+ Makefile.in.dep.patch
+
+EXTRA_DIST += \
+ DEVELOPMENT
+
+dist-hook: dist-rm-CVS gen-ChangeLog
+
+.PHONY: dist-rm-CVS
+dist-rm-CVS:
+# Try to be very safe with respect to spuriously removing various directories
+# in case of an error.
+ find $(distdir)/ -type d -name CVS | while read d; do \
+ rm -f "$$d"/{Entries,Repository,Root,Tag} && \
+ rmdir "$$d"; \
+ done
+
+gen_start_commit = e227045b06d62ee7d2fbab9d5ade9030ff43170b
+ChangeLog_files = ChangeLog ChangeLog.0 ChangeLog.00
+.PHONY: gen-ChangeLog
+gen-ChangeLog:
+ $(AM_V_GEN)if test -d $(top_srcdir)/.git; then \
+ (cd $(top_srcdir)/ && \
+ ./gitlog-to-changelog --strip-tab \
+ $(gen_start_commit).. && \
+ echo) >> $(distdir)/cl-t && \
+ for f in $(ChangeLog_files); do \
+ (cd $(top_srcdir)/ && \
+ git show $(gen_start_commit):$$f) >> $(distdir)/cl-t && \
+ rm -f $(distdir)/$$f && \
+ mv $(distdir)/cl-t $(distdir)/$$f \
+ || exit $$?; \
+ done; \
+ fi
+
+DISTCLEANFILES += \
+ Makefile.orig \
+ config.status.orig
+
+#
+# Legacy support.
+#
+
+install-headers: install-data
+ @echo '*****************************************************'
+ @echo '* As you can see above, I was so kind to rewrite your'
+ @echo '* `make $@'\'
+ @echo '* into'
+ @echo '* `make $^'\'
+ @echo '* which is how it is to be spelled these days.'
+ @echo '*'
+ @echo '* Please get your instructions fixed.'
+ @echo '*****************************************************'
+ @echo
+ @echo 'Penalty:'
+ sleep 20
diff --git a/Makefile.in.dep.patch b/Makefile.in.dep.patch
new file mode 100644
index 0000000..72fb65f
--- /dev/null
+++ b/Makefile.in.dep.patch
@@ -0,0 +1,19 @@
+--- Makefile.in
++++ Makefile.in
+@@ -4785,7 +4785,15 @@ distclean-compile:
+
+ $(am__depfiles_remade):
+ @$(MKDIR_P) $(@D)
+- @echo '# dummy' >$@-t && $(am__mv) $@-t $@
++ # Ugly bootstrap hack to get to-be-generated files created
++ # Try to guess what file this dependency file is from...
++ @f=$(srcdir)/`dirname "$(@D)"`/`basename "$@" .Po | sed s/lib[^-]\*-//` ; \
++ for f in "$$f"*; do \
++ case $$f in \
++ *.c | *.S) echo "$$f"': $$(filter-out $$(DIST_SOURCES),$$(SOURCES))' ;; \
++ *) echo '# dummy';; \
++ esac ; \
++ done >$@-t && $(am__mv) $@-t $@
+
+ am--depfiles: $(am__depfiles_remade)
+
diff --git a/Makefrag.am b/Makefrag.am
new file mode 100644
index 0000000..5b61a1d
--- /dev/null
+++ b/Makefrag.am
@@ -0,0 +1,611 @@
+# Main Makefile fragment for GNU Mach.
+
+# Copyright (C) 1997, 1999, 2004, 2006, 2007, 2009 Free Software
+# Foundation, Inc.
+
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+# "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+# LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+# USE OF THIS SOFTWARE.
+
+#
+# DDB support --- eventually to die. Please.
+#
+
+# Do we want the icky kernel debugger?
+if enable_kdb
+libkernel_a_SOURCES += \
+ ddb/db_access.c \
+ ddb/db_access.h \
+ ddb/db_elf.c \
+ ddb/db_elf.h \
+ ddb/db_break.c \
+ ddb/db_break.h \
+ ddb/db_command.c \
+ ddb/db_command.h \
+ ddb/db_cond.c \
+ ddb/db_cond.h \
+ ddb/db_examine.c \
+ ddb/db_examine.h \
+ ddb/db_expr.c \
+ ddb/db_expr.h \
+ ddb/db_ext_symtab.c \
+ ddb/db_input.c \
+ ddb/db_input.h \
+ ddb/db_lex.c \
+ ddb/db_lex.h \
+ ddb/db_macro.c \
+ ddb/db_macro.h \
+ ddb/db_mp.c \
+ ddb/db_mp.h \
+ ddb/db_output.c \
+ ddb/db_output.h \
+ ddb/db_print.c \
+ ddb/db_print.h \
+ ddb/db_run.c \
+ ddb/db_run.h \
+ ddb/db_sym.c \
+ ddb/db_sym.h \
+ ddb/db_task_thread.c \
+ ddb/db_task_thread.h \
+ ddb/db_trap.c \
+ ddb/db_trap.h \
+ ddb/db_variables.c \
+ ddb/db_variables.h \
+ ddb/db_watch.c \
+ ddb/db_watch.h \
+ ddb/db_write_cmd.c \
+ ddb/db_write_cmd.h \
+ ddb/nlist.h \
+ ddb/stab.h \
+ ddb/tr.h
+
+# We need frame pointers for trace to work properly.
+AM_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
+endif
+
+
+#
+# IPC implementation.
+#
+
+libkernel_a_SOURCES += \
+ ipc/ipc_entry.c \
+ ipc/ipc_entry.h \
+ ipc/ipc_init.c \
+ ipc/ipc_init.h \
+ ipc/ipc_kmsg.c \
+ ipc/ipc_kmsg.h \
+ ipc/ipc_kmsg_queue.h \
+ ipc/ipc_machdep.h \
+ ipc/ipc_marequest.c \
+ ipc/ipc_marequest.h \
+ ipc/ipc_mqueue.c \
+ ipc/ipc_mqueue.h \
+ ipc/ipc_notify.c \
+ ipc/ipc_notify.h \
+ ipc/ipc_object.c \
+ ipc/ipc_object.h \
+ ipc/ipc_port.c \
+ ipc/ipc_port.h \
+ ipc/ipc_print.h \
+ ipc/ipc_pset.c \
+ ipc/ipc_pset.h \
+ ipc/ipc_right.c \
+ ipc/ipc_right.h \
+ ipc/ipc_space.c \
+ ipc/ipc_space.h \
+ ipc/ipc_table.c \
+ ipc/ipc_table.h \
+ ipc/ipc_target.c \
+ ipc/ipc_target.h \
+ ipc/ipc_thread.c \
+ ipc/ipc_thread.h \
+ ipc/ipc_types.h \
+ ipc/mach_msg.c \
+ ipc/mach_msg.h \
+ ipc/mach_port.c \
+ ipc/mach_port.h \
+ ipc/mach_debug.c \
+ ipc/port.h
+EXTRA_DIST += \
+ ipc/mach_port.srv \
+ ipc/notify.defs
+
+
+#
+# `kernel' implementation (tasks, threads, trivia, etc.).
+#
+
+libkernel_a_SOURCES += \
+ kern/act.c \
+ kern/act.h \
+ kern/assert.h \
+ kern/ast.c \
+ kern/ast.h \
+ kern/atomic.h \
+ kern/boot_script.h \
+ kern/bootstrap.c \
+ kern/bootstrap.h \
+ kern/counters.c \
+ kern/counters.h \
+ kern/cpu_number.h \
+ kern/debug.c \
+ kern/debug.h \
+ kern/eventcount.c \
+ kern/eventcount.h \
+ kern/exception.c \
+ kern/exception.h \
+ kern/gsync.c \
+ kern/gsync.h \
+ kern/host.c \
+ kern/host.h \
+ kern/ipc_host.c \
+ kern/ipc_host.h \
+ kern/ipc_kobject.c \
+ kern/ipc_kobject.h \
+ kern/ipc_mig.c \
+ kern/ipc_mig.h \
+ kern/ipc_sched.c \
+ kern/ipc_sched.h \
+ kern/ipc_tt.c \
+ kern/ipc_tt.h \
+ kern/kalloc.h \
+ kern/kern_types.h \
+ kern/kmutex.c \
+ kern/kmutex.h \
+ kern/list.h \
+ kern/lock.c \
+ kern/lock.h \
+ kern/lock_mon.c \
+ kern/log2.h \
+ kern/mach_clock.c \
+ kern/mach_clock.h \
+ kern/mach_factor.c \
+ kern/mach_factor.h \
+ kern/machine.c \
+ kern/machine.h \
+ kern/macros.h \
+ kern/pc_sample.c \
+ kern/pc_sample.h \
+ kern/printf.c \
+ kern/printf.h \
+ kern/priority.c \
+ kern/priority.h \
+ kern/processor.c \
+ kern/processor.h \
+ kern/profile.c \
+ kern/queue.c \
+ kern/queue.h \
+ kern/rbtree.c \
+ kern/rbtree.h \
+ kern/rbtree_i.h \
+ kern/rdxtree.c \
+ kern/rdxtree.h \
+ kern/rdxtree_i.h \
+ kern/refcount.h \
+ kern/slab.c \
+ kern/slab.h \
+ kern/smp.h \
+ kern/smp.c \
+ kern/sched.h \
+ kern/sched_prim.c \
+ kern/sched_prim.h \
+ kern/shuttle.h \
+ kern/startup.c \
+ kern/startup.h \
+ kern/strings.c \
+ kern/syscall_emulation.c \
+ kern/syscall_emulation.h \
+ kern/syscall_subr.c \
+ kern/syscall_subr.h \
+ kern/syscall_sw.c \
+ kern/syscall_sw.h \
+ kern/task.c \
+ kern/task.h \
+ kern/thread.c \
+ kern/thread.h \
+ kern/thread_swap.c \
+ kern/thread_swap.h \
+ kern/timer.c \
+ kern/timer.h \
+ kern/xpr.c \
+ kern/xpr.h \
+ kern/elf-load.c \
+ kern/boot_script.c
+EXTRA_DIST += \
+ kern/exc.defs \
+ kern/mach.srv \
+ kern/mach4.srv \
+ kern/gnumach.srv \
+ kern/experimental.srv \
+ kern/mach_debug.srv \
+ kern/mach_host.srv \
+ kern/task_notify.cli
+
+
+#
+# Still more trivia.
+#
+
+libkernel_a_SOURCES += \
+ util/atoi.c \
+ util/atoi.h \
+ util/byteorder.h \
+ util/byteorder.c
+
+#
+# Virtual memory implementation.
+#
+
+libkernel_a_SOURCES += \
+ vm/memory_object_proxy.c \
+ vm/memory_object_proxy.h \
+ vm/memory_object.c \
+ vm/memory_object.h \
+ vm/pmap.h \
+ vm/vm_debug.c \
+ vm/vm_external.c \
+ vm/vm_external.h \
+ vm/vm_fault.c \
+ vm/vm_fault.h \
+ vm/vm_init.c \
+ vm/vm_init.h \
+ vm/vm_kern.c \
+ vm/vm_kern.h \
+ vm/vm_map.c \
+ vm/vm_map.h \
+ vm/vm_object.c \
+ vm/vm_object.h \
+ vm/vm_page.c \
+ vm/vm_page.h \
+ vm/vm_pageout.c \
+ vm/vm_pageout.h \
+ vm/vm_print.h \
+ vm/vm_resident.c \
+ vm/vm_resident.h \
+ vm/vm_types.h \
+ vm/vm_user.c \
+ vm/vm_user.h
+EXTRA_DIST += \
+ vm/memory_object_default.cli \
+ vm/memory_object_user.cli
+
+
+#
+# Device driver support.
+#
+
+# These device support files are always needed; the others are needed only if
+# particular drivers want the routines.
+# TODO. Functions in device/subrs.c should each be moved elsewhere.
+libkernel_a_SOURCES += \
+ device/blkio.c \
+ device/blkio.h \
+ device/buf.h \
+ device/chario.c \
+ device/chario.h \
+ device/cirbuf.h \
+ device/conf.h \
+ device/cons.c \
+ device/cons.h \
+ device/device_emul.h \
+ device/dev_hdr.h \
+ device/dev_lookup.c \
+ device/dev_master.h \
+ device/dev_name.c \
+ device/dev_pager.c \
+ device/dev_pager.h \
+ device/device_init.c \
+ device/device_init.h \
+ device/device_port.h \
+ device/device_types_kernel.h \
+ device/ds_routines.c \
+ device/ds_routines.h \
+ device/if_ether.h \
+ device/if_hdr.h \
+ device/intr.c \
+ device/intr.h \
+ device/io_req.h \
+ device/net_io.c \
+ device/net_io.h \
+ device/param.h \
+ device/subrs.c \
+ device/subrs.h \
+ device/tty.h
+EXTRA_DIST += \
+ device/device.srv \
+ device/device_pager.srv \
+ device/device_reply.cli \
+ device/memory_object_reply.cli
+
+
+#
+# `kmsg' device.
+#
+
+if enable_kmsg
+libkernel_a_SOURCES += \
+ device/kmsg.c \
+ device/kmsg.h
+endif
+
+
+#
+# Version number.
+#
+
+nodist_libkernel_a_SOURCES += \
+ version.c
+
+#
+# Installation.
+#
+
+include_devicedir = $(includedir)/device
+include_device_HEADERS = \
+ include/device/audio_status.h \
+ include/device/bpf.h \
+ include/device/device.defs \
+ include/device/device_reply.defs \
+ include/device/device_request.defs \
+ include/device/device_types.defs \
+ include/device/device_types.h \
+ include/device/disk_status.h \
+ include/device/input.h \
+ include/device/net_status.h \
+ include/device/notify.defs \
+ include/device/notify.h \
+ include/device/tape_status.h \
+ include/device/tty_status.h
+
+include_machdir = $(includedir)/mach
+include_mach_HEADERS = \
+ include/mach/default_pager.defs \
+ include/mach/default_pager_types.defs \
+ include/mach/exc.defs \
+ include/mach/mach.defs \
+ include/mach/mach4.defs \
+ include/mach/gnumach.defs \
+ include/mach/task_notify.defs \
+ include/mach/mach_host.defs \
+ include/mach/mach_port.defs \
+ include/mach/mach_types.defs \
+ include/mach/memory_object.defs \
+ include/mach/memory_object_default.defs \
+ include/mach/notify.defs \
+ include/mach/std_types.defs \
+ include/mach/experimental.defs \
+ include/mach/alert.h \
+ include/mach/boolean.h \
+ include/mach/boot.h \
+ include/mach/default_pager_types.h \
+ include/mach/exception.h \
+ include/mach/host_info.h \
+ include/mach/kern_return.h \
+ include/mach/mach_param.h \
+ include/mach/mach_types.h \
+ include/mach/machine.h \
+ include/mach/macro_help.h \
+ include/mach/memory_object.h \
+ include/mach/message.h \
+ include/mach/mig_errors.h \
+ include/mach/notify.h \
+ include/mach/pc_sample.h \
+ include/mach/policy.h \
+ include/mach/port.h \
+ include/mach/processor_info.h \
+ include/mach/profil.h \
+ include/mach/profilparam.h \
+ include/mach/std_types.h \
+ include/mach/syscall_sw.h \
+ include/mach/task_info.h \
+ include/mach/task_special_ports.h \
+ include/mach/thread_info.h \
+ include/mach/thread_special_ports.h \
+ include/mach/thread_status.h \
+ include/mach/thread_switch.h \
+ include/mach/time_value.h \
+ include/mach/version.h \
+ include/mach/vm_attributes.h \
+ include/mach/vm_cache_statistics.h \
+ include/mach/vm_inherit.h \
+ include/mach/vm_param.h \
+ include/mach/vm_prot.h \
+ include/mach/vm_statistics.h \
+ include/mach/vm_sync.h \
+ include/mach/vm_wire.h \
+ include/mach/inline.h \
+ include/mach/xen.h
+
+# If we name this `*_execdir', Automake won't add it to `install-data'...
+include_mach_eXecdir = $(includedir)/mach/exec
+include_mach_eXec_HEADERS = \
+ include/mach/exec/a.out.h \
+ include/mach/exec/elf.h \
+ include/mach/exec/exec.h
+
+include_mach_debugdir = $(includedir)/mach_debug
+include_mach_debug_HEADERS = \
+ $(addprefix include/mach_debug/, \
+ hash_info.h \
+ mach_debug.defs \
+ mach_debug_types.defs \
+ mach_debug_types.h \
+ vm_info.h \
+ slab_info.h \
+ )
+
+# Other headers for the distribution. We don't install these, because the
+# GNU C library has correct versions for users to use.
+# other-sys-headers := types.h reboot.h ioctl.h
+# other-mach-headers := mig_support.h mach_traps.h error.h
+# other-headers := alloca.h
+
+install-data-hook:
+ rm -f '$(DESTDIR)$(include_machdir)'/machine
+ ln -s '$(systype)' '$(DESTDIR)$(include_machdir)'/machine
+
+#
+# Building a distribution.
+#
+
+# Enable all available features.
+DISTCHECK_CONFIGURE_FLAGS += \
+ --enable-kdb
+
+# Instead of listing each file individually...
+EXTRA_DIST += \
+ include
+
+#
+# Automatically generated source files.
+#
+# See Makerules.mig.am.
+#
+
+# User stubs.
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ vm/memory_object_user.user.defs.c \
+ vm/memory_object_default.user.defs.c
+nodist_libkernel_a_SOURCES += \
+ vm/memory_object_user.user.h \
+ vm/memory_object_user.user.c \
+ vm/memory_object_user.user.msgids \
+ vm/memory_object_default.user.h \
+ vm/memory_object_default.user.c \
+ vm/memory_object_default.user.msgids
+# vm/memory_object_user.user.defs
+# vm/memory_object_default.user.defs
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ device/device_reply.user.defs.c \
+ device/memory_object_reply.user.defs.c
+nodist_libkernel_a_SOURCES += \
+ device/device_reply.user.h \
+ device/device_reply.user.c \
+ device/device_reply.user.msgids \
+ device/memory_object_reply.user.h \
+ device/memory_object_reply.user.c \
+ device/memory_object_reply.user.msgids
+# device/device_reply.user.defs
+# device/memory_object_reply.user.defs
+
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ kern/task_notify.user.defs.c
+nodist_libkernel_a_SOURCES += \
+ kern/task_notify.user.h \
+ kern/task_notify.user.c \
+ kern/task_notify.user.msgids
+
+# Server stubs.
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ device/device.server.defs.c \
+ device/device_pager.server.defs.c
+nodist_libkernel_a_SOURCES += \
+ device/device.server.h \
+ device/device.server.c \
+ device/device.server.msgids \
+ device/device_pager.server.h \
+ device/device_pager.server.c \
+ device/device_pager.server.msgids
+# device/device.server.defs
+# device/device_pager.server.defs
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ ipc/mach_port.server.defs.c
+nodist_libkernel_a_SOURCES += \
+ ipc/mach_port.server.h \
+ ipc/mach_port.server.c \
+ ipc/mach_port.server.msgids
+# ipc/mach_port.server.defs
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ kern/mach.server.defs.c \
+ kern/mach4.server.defs.c \
+ kern/gnumach.server.defs.c \
+ kern/experimental.server.defs.c \
+ kern/mach_debug.server.defs.c \
+ kern/mach_host.server.defs.c
+nodist_libkernel_a_SOURCES += \
+ kern/mach.server.h \
+ kern/mach.server.c \
+ kern/mach.server.msgids \
+ kern/mach4.server.h \
+ kern/mach4.server.c \
+ kern/mach4.server.msgids \
+ kern/gnumach.server.h \
+ kern/gnumach.server.c \
+ kern/gnumach.server.msgids \
+ kern/experimental.server.h \
+ kern/experimental.server.c \
+ kern/experimental.server.msgids \
+ kern/mach_debug.server.h \
+ kern/mach_debug.server.c \
+ kern/mach_debug.server.msgids \
+ kern/mach_host.server.h \
+ kern/mach_host.server.c \
+ kern/mach_host.server.msgids
+# kern/mach.server.defs
+# kern/mach4.server.defs
+# kern/gnumach.server.defs
+# kern/experimental.server.defs
+# kern/mach_debug.server.defs
+# kern/mach_host.server.defs
+
+# Stand-alone rule to generate the list of message ids when neither
+# the client nor the server stubs are required.
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ ipc/notify.none.defs.c \
+ kern/exc.none.defs.c
+nodist_libkernel_a_SOURCES += \
+ ipc/notify.none.msgids \
+ kern/exc.none.msgids
+# ipc/notify.none.defs
+
+# rpctrace can make use of that.
+MOSTLYCLEANFILES += \
+ gnumach.msgids
+gnumach.msgids: $(filter %.msgids,$(nodist_libkernel_a_SOURCES))
+ $(AM_V_at) cat $^ > $@.new
+ $(AM_V_GEN) mv $@.new $@
+# `exec_' prefix, so that we don't try to build that file during when running
+# `make install-data', as it may fail there, but isn't needed there either.
+exec_msgidsdir = $(datadir)/msgids
+exec_msgids_DATA = gnumach.msgids
+
+#
+# Specific code.
+#
+
+# Linux device drivers and the glue code.
+include linux/Makefrag.am
+
+#
+# Platform specific parts.
+#
+
+# Xen.
+if PLATFORM_xen
+include xen/Makefrag.am
+endif
+
+#
+# Architecture specific parts.
+#
+
+if HOST_ix86
+include i386/Makefrag_x86.am
+endif
+if HOST_x86_64
+include i386/Makefrag_x86.am
+endif
+
+# ix86.
+include i386/Makefrag.am
+
+# x86_64.
+include x86_64/Makefrag.am
diff --git a/Makerules.am b/Makerules.am
new file mode 100644
index 0000000..5106fef
--- /dev/null
+++ b/Makerules.am
@@ -0,0 +1,54 @@
+# Makerules: how to do some things.
+
+# Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
+
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+# "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+# LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+# USE OF THIS SOFTWARE.
+
+#
+# Building foo.h from foo.sym.
+#
+
+EXTRA_DIST += \
+ gensym.awk
+%.symc: %.sym gensym.awk
+ $(AWK_V) $(AWK) -f $(word 2,$^) $< > $@
+%.symc.o: %.symc config.h
+ $(AM_V_CC) $(COMPILE) -S -x c -o $@ $<
+%.h: %.symc.o
+ $(AM_V_GEN) sed < $< > $@ \
+ -e 's/^[^*].*$$//' \
+ -e 's/^[*]/#define/' \
+ -e 's/mAgIc[^-0-9]*//'
+
+# Makerules.mig: how to do some MIG-related things.
+include Makerules.mig.am
+
+#
+# gzip files.
+#
+
+%.gz: %
+ $(GZIP_V) $(GZIP) -9 < $< > $@
+
+#
+# strip files.
+#
+
+%.stripped: %
+ $(STRIP) -o $@ $<
+
+#
+# Echo target.
+#
+
+echo-%:
+ @echo '$* = `$($*)'\'
diff --git a/Makerules.mig.am b/Makerules.mig.am
new file mode 100644
index 0000000..8ae6555
--- /dev/null
+++ b/Makerules.mig.am
@@ -0,0 +1,127 @@
+# Makerules.mig: how to do some MIG-related things.
+
+# Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Written by Thomas Schwinge.
+
+# serial 0
+
+# TODO. This file should probably be distributed with GNU MIG and then there
+# should be some mechanism so that every package using it is automagically
+# using the latest available (or best-matching) version of it. Which is not
+# trivial, as the file is already needed to build the build system. But then,
+# this file does not really depend on GNU Automake. Hmm...
+
+# USAGE.
+
+# Before `include'ing this file, `noinst_LIBRARIES' and `MOSTLYCLEANFILES' have
+# to be initialized.
+
+# For using these rules, `AM_CPPFLAGS', `MIGCOM', `MIGCOMFLAGS', `MIGCOMSFLAGS'
+# and `MIGCOMUFLAGS' have to be defined as desired.
+
+# Then you can (read: currently ``have to''; see below for comments) use
+# constructs like:
+#
+# # User stubs.
+# nodist_lib_dep_tr_for_defs_a_SOURCES += \
+# vm/memory_object_user.user.defs.c
+# nodist_libkernel_a_SOURCES += \
+# vm/memory_object_user.user.h \
+# vm/memory_object_user.user.c \
+# vm/memory_object_user.user.msgids
+#
+# # Server stubs.
+# nodist_lib_dep_tr_for_defs_a_SOURCES += \
+# device/device.server.defs.c
+# nodist_libkernel_a_SOURCES += \
+# device/device.server.h \
+# device/device.server.c \
+# device/device.server.msgids
+
+#
+# Building RPC stubs.
+#
+
+# TODO. Get rid of that stuff, lib_dep_tr_for_defs.a and the four following
+# rules. See the thread at
+# <http://lists.gnu.org/archive/html/automake/2006-10/msg00039.html> about what
+# we really want to do. This requires work on GNU Automake.
+
+noinst_LIBRARIES += \
+ lib_dep_tr_for_defs.a
+nodist_lib_dep_tr_for_defs_a_SOURCES =
+MOSTLYCLEANFILES += \
+ $(nodist_lib_dep_tr_for_defs_a_SOURCES)
+# Preprocess only.
+lib_dep_tr_for_defs_a_CPPFLAGS = $(AM_CPPFLAGS) \
+ -E
+
+%.server.defs.c: %.srv
+ $(AM_V_at) rm -f $@
+ $(AM_V_GEN) cp -p $< $@
+%.server.h %.server.c %.server.msgids: lib_dep_tr_for_defs_a-%.server.defs.$(OBJEXT)
+ $(MIGCOM_V) $(MIGCOM) $(MIGCOMFLAGS) $(MIGCOMSFLAGS) \
+ -sheader $*.server.h -server $*.server.c \
+ -list $*.server.msgids \
+ < $<
+%.user.defs.c: %.cli
+ $(AM_V_at) rm -f $@
+ $(AM_V_GEN) cp -p $< $@
+%.user.h %.user.c %.user.msgids: lib_dep_tr_for_defs_a-%.user.defs.$(OBJEXT)
+ $(MIGCOM_V) $(MIGCOM) $(MIGCOMFLAGS) $(MIGCOMUFLAGS) \
+ -user $*.user.c -header $*.user.h \
+ -list $*.user.msgids \
+ < $<
+# Stand-alone rule to generate the list of message ids when neither
+# the client nor the server stubs are required.
+%.none.defs.c: %.defs
+ $(AM_V_at) rm -f $@
+ $(AM_V_GEN) cp -p $< $@
+%.none.msgids: lib_dep_tr_for_defs_a-%.none.defs.$(OBJEXT)
+ $(MIGCOM_V) $(MIGCOM) $(MIGCOMFLAGS) \
+ -list $*.none.msgids \
+ < $<
+
+# This is how it should be done, but this is not integrated into GNU Automake
+# and is missing automatic inter-file dependency management because of that.
+
+# These chained rules could be (and used to be) single rules using pipes or
+# could even --- if you dare to --- use the `mig' shell script, but it's
+# convenient to be able to explicitly make the intermediate files when you want
+# to deal with a problem in the MIG stub generator.
+
+# TODO. Get rid of the .srv files and rather use .defs files and MIG*SFLAGS?
+#%.server.defs: %.srv
+# $(CPP) $(AM_CPPFLAGS) $(CPPFLAGS) -o $@ $<
+#%.server.defs: %.defs
+# $(CPP) $(AM_CPPFLAGS) $(CPPFLAGS) $(MIGSFLAGS) -o $@ $<
+#%.server.h %.server.c %.server.msgids: %.server.defs
+# $(MIGCOM) $(MIGCOMFLAGS) $(MIGCOMSFLAGS) \
+# -sheader $*.server.h -server $*.server.c \
+# -list $*.server.msgids \
+# < $<
+# TODO. Get rid of the .cli files and rather use .defs files and MIG*UFLAGS?
+#%.user.defs: %.cli
+# $(CPP) $(AM_CPPFLAGS) $(CPPFLAGS) -o $@ $<
+#%.user.defs: %.defs
+# $(CPP) $(AM_CPPFLAGS) $(CPPFLAGS) $(MIGUFLAGS) -o $@ $<
+#%.user.h %.user.c %.user.msgids: %.user.defs
+# $(MIGCOM) $(MIGCOMFLAGS) $(MIGCOMUFLAGS) \
+# -user $*.user.c -header $*.user.h \
+# -list $*.user.msgids \
+# < $<
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..8349550
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,161 @@
+Version 1.8 (2016-12-18)
+
+The memory management system was extensively reworked. A new type for
+physical addresses is now used where appropriate, and the system can
+make use of the high memory segment. Many paging issues have been
+addressed, and as a result the system handles low memory situations
+more gracefully now.
+
+The virtual memory system now uses a red-black tree for allocations,
+and as a result it now supports tasks with tens of thousands of
+mappings.
+
+Debugging and error reporting has been improved. Among other things
+the VM maps are now augmented with names that are used in error
+messages, panics and assertions point to their locations, the lock
+debugging mechanism has been fixed, and the kernel debugger can now
+inspect stack traces reaching into the machine-dependent bits
+implemented in assembler.
+
+As usual, bugs have been fixed throughout the code, including minor
+issues with the gsync synchronization mechanism which is now used for
+the internal locks in the GNU C Library (glibc).
+
+The deprecated external memory management interface has been removed.
+
+The partial ACPI support has been removed.
+
+Version 1.7 (2016-05-18)
+
+The code has been updated to work with newer versions of GCC, and numerous bugs
+have been fixed throughout the code, including a pageout deadlock. The code
+uses integer types from <stdint.h> now instead of the old Mach types.
+
+The VM cache policy change has been merged. The kernel now caches
+unreferenced VM objects unconditionally instead of using a fixed
+limit.
+
+The physical page allocator of the X15 kernel has been integrated, and
+is now used directly by the slab allocator. This increases the kernel
+heap addressing important scalability issues.
+
+The gsync synchronization mechanism was added, similar to the Linux kernel's
+futexes, to allow efficient and powerful userland synchronization.
+
+Support for profiling kernel code from userland through sampling was added.
+
+Version 1.6 (2015-10-31)
+
+The code has been updated to work with newer versions of the compiler,
+and numerous bugs have been fixed throughout the code.
+
+The lock debugging infrastructure has been revived and improved, and
+many locking issues have been fixed.
+
+The IPC tables and the hash table mapping objects to IPC entries have
+been replaced by radix trees. This addresses a scalability issue, as
+IPC tables required huge amounts of continuous virtual kernel memory.
+
+The kernel now allows non-privileged users to wire a small amount of
+memory.
+
+A bug hindering the eviction of inactive pages by the pageout daemon
+has been identified and fixed.
+
+The kernel now keeps timestamps relative to the system boot time.
+Among other things this fixes bogus uptime readings if the system time
+is altered.
+
+A reference leak in the exception handling mechanism has been
+identified and fixed.
+
+ANSI escape sequences are now handled when using `printf'. This fixes
+the formatting of messages printed by various Linux drivers.
+
+Version 1.5 (2015-04-10)
+
+Numerous cleanups and stylistic fixes of the code base. Several
+problems have been identified using static analysis tools and
+subsequently been fixed.
+
+A protected payload can now be associated with capabilities. This
+payload is attached by the kernel to delivered messages and can be
+used to speed up the object lookup in the receiving task.
+
+The kernel debugger can now parse ELF symbol tables, can be invoked
+over serial lines, gained two new commands and has received usability
+improvements.
+
+The VM pageout policy has been tuned to accommodate modern hardware.
+
+The kernel gained partial ACPI support on x86, enough to power down
+the system.
+
+Version 1.4 (2013-09-27)
+
+Really too many to list them individually. Highlight include numerous bug and
+stability fixes, a Xen port for 32-bit x86 including basic support for Physical
+Address Extension (PAE), an initial AHCI driver (SATA hard disks), a new SLAB
+memory allocator to replace the previous zone allocator, support for memory
+object proxies, access restrictions for x86 I/O ports, support for some PCMCIA
+devices based on the pcmcia-cs package.
+
+Version 1.3
+
+The kernel now directly supports "boot scripts" in the form of multiboot
+module names with the same syntax as the Hurd's `serverboot' program.
+That is, instead of telling GRUB "module /boot/serverboot", you can give
+GRUB a series of command like "module /hurd/ext2fs ${...}" where the
+syntax after "module" is the same as in boot scripts for Hurd's `serverboot'.
+
+The kernel message device `kmsg' is now enabled by default.
+--disable-kmsg turns it off.
+
+Large disks (>= 10GB) are now correctly supported, the new get_status
+call DEV_GET_RECORDS can return the number of records of a device.
+
+Lots of tweaks have been done to the virtual memory management to make
+it perform better on today's machines.
+
+The console supports ANSI escape sequences for colors and attributes.
+
+Support for the terminal speeds B57600 and B115200 has been added.
+
+Version 1.2
+
+Many bug fixes.
+
+The task_basic_info RPC now has an additional field, holding the
+creation time of the task. Likewise for thread_basic_info.
+
+The interface generator `MiG' has been split out.
+
+Partition names for disks are now printed in the correct way.
+
+Linux drivers are updated to 2.0.36. Many thanks to Okuji Yoshinori
+for great work here. The Linux emulation support is much improved.
+
+The kernel message device `kmsg' is supported. --enable-kmsg turns on
+the device.
+
+The parallel driver is enabled by --enable-lpr.
+
+New make targets, install-kernel and install-headers are added. The
+former will install only the kernel, and the latter will install only
+the header files.
+
+Print out Mach device names instead of Linux ones.
+
+Version 1.1
+
+Cross-compilation support is much improved. Any of various popular
+libc's is now sufficient for building clib-routines.o.
+
+New configure option --enable-kdb asks for kernel debugger to be
+compiled in.
+
+Bug in --enable-ncr53c7xx has been fixed.
+
+Many thanks go to Marcus G. Daniels (marcus@cathcart.sysc.pdx.edu) for
+his very helpful testing of the 1.0 release and for his many
+improvements to the cross-compilation support.
diff --git a/README b/README
new file mode 100644
index 0000000..108452a
--- /dev/null
+++ b/README
@@ -0,0 +1,56 @@
+This is GNU Mach, the GNU distribution of the Mach microkernel,
+<http://www.gnu.org/software/hurd/microkernel/mach/gnumach.html>. Welcome.
+
+GNU Mach is the microkernel upon which a GNU Hurd system is based. It
+provides an Inter Process Communication (IPC) mechanism that the Hurd
+uses to define interfaces for implementing in a distributed multi-server
+fashion the services a traditional operating system kernel provides.
+
+GNU Mach runs on 32-bit x86 machines. A version running on 64-bit x86
+(x86_64) machines is in progress. Volunteers interested in ports to
+other architectures are sought; please contact us (see below) if you'd
+like to help.
+
+libmach, bootloaders, default pagers, and the like are not part of
+this distribution. For libraries, we refer you to the GNU C Library,
+which has Mach support. For bootloaders, we refer you to GRUB. (This
+kernel can be loaded by any bootloader that uses the multiboot
+standard.) For default pagers, we refer you to your particular system
+that you will run on top of Mach.
+
+The Mach Interface Generator (MIG) is no longer part of this distribution, and
+instead is packaged separately: GNU MIG.
+
+Generic installation instructions may be found in the file INSTALL.
+
+By default, most drivers for network boards are included, as well as
+drivers for IDE, SCSI and AHCI disks.
+
+If you want the in-kernel debugger compiled in, specify --enable-kdb
+to configure. This is only useful if you actually anticipate
+debugging the kernel, of course. We don't turn it on by default
+because it adds considerably to the unpageable memory footprint of the
+kernel.
+
+GNU Mach can be cross-built. No specific options need to be given when
+building on a 32-bit x86 ELF userland such as GNU/Linux. Manually switch the
+compiler to 32-bit mode when using a 64-bit x86 (x86_64) ELF toolchain:
+
+ $ [...]/configure --host=i686-gnu CC='gcc -m32' LD='ld -melf_i386'
+
+or point to a 32-bit ELF toolchain:
+
+ $ [...]/configure --host=i686-gnu CC=i686-linux-gnu-gcc LD=i686-linux-gnu-ld
+
+Also, GNU MIG needs to be a 32bit version to properly compile the interfaces,
+you can specify for instance
+
+ $ [...]/configure --host=i686-gnu CC=i686-linux-gnu-gcc LD=i686-linux-gnu-ld MIG=i686-linux-gnu-mig
+
+
+Please read the FAQ at <http://www.gnu.org/software/hurd/faq.html>.
+Bug reports should be sent to <bug-hurd@gnu.org> or filed on
+<http://savannah.gnu.org/bugs/?group=hurd>. Requests for assistance
+should be sent to <help-hurd@gnu.org> or filed on
+<http://savannah.gnu.org/support/?group=hurd>. You can also find us on
+the Freenode IRC network in the #hurd channel.
diff --git a/chips/busses.c b/chips/busses.c
new file mode 100644
index 0000000..3811d0c
--- /dev/null
+++ b/chips/busses.c
@@ -0,0 +1,232 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: busses.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 4/90
+ *
+ * Generic autoconfiguration functions,
+ * usable to probe and attach devices
+ * on any bus that suits the generic bus
+ * structure, such as VME, TURBOChannel,
+ * and all the VAX busses.
+ *
+ */
+
+#include <string.h>
+#include <kern/printf.h>
+#include <mach/boolean.h>
+#include <mach/std_types.h>
+#include <chips/busses.h>
+
+
+
+
+/*
+ * configure_bus_master
+ *
+ * Given the name of a bus_ctlr, look it up in the
+ * init table. If found, probe it. If there can be
+ * slaves attached, walk the device's init table
+ * for those that might be attached to this controller.
+ * Call the 'slave' function on each one to see if
+ * ok, then the 'attach' one.
+ *
+ * Returns 0 if the controller is not there.
+ *
+ */
+boolean_t configure_bus_master(
+ const char *name,
+ vm_offset_t virt,
+ vm_offset_t phys,
+ int adpt_no,
+ const char *bus_name)
+{
+ struct bus_device *device;
+ struct bus_ctlr *master;
+ struct bus_driver *driver;
+
+ boolean_t found = FALSE;
+
+ /*
+ * Match the name in the table, then pick the entry that has the
+ * right adaptor number, or one that has it wildcarded. Entries
+ * already allocated are marked alive, skip them.
+ */
+ for (master = bus_master_init; master->driver; master++) {
+ if (master->alive)
+ continue;
+ if (((master->adaptor == adpt_no) || (master->adaptor == '?')) &&
+ (strcmp(master->name, name) == 0)) {
+ found = TRUE;
+ break;
+ }
+ }
+
+ if (!found)
+ return FALSE;
+
+ /*
+ * Found a match, probe it
+ */
+ driver = master->driver;
+ if ((*driver->probe) (virt, master) == 0)
+ return FALSE;
+
+ master->alive = 1;
+ master->adaptor = adpt_no;
+
+ /*
+ * Remember which controller this device is attached to
+ */
+ driver->minfo[master->unit] = master;
+
+ printf("%s%d: at %s%d\n", master->name, master->unit, bus_name, adpt_no);
+
+ /*
+ * Now walk all devices to check those that might be attached to this
+ * controller. We match the unallocated ones that have the right
+ * controller number, or that have a widcarded controller number.
+ */
+ for (device = bus_device_init; device->driver; device++) {
+ int ctlr;
+ if (device->alive || device->driver != driver ||
+ (device->adaptor != '?' && device->adaptor != adpt_no))
+ continue;
+ ctlr = device->ctlr;
+ if (ctlr == '?') device->ctlr = master->unit;
+ /*
+ * A matching entry. See if the slave-probing routine is
+ * happy.
+ */
+ if ((device->ctlr != master->unit) ||
+ ((*driver->slave) (device, virt) == 0)) {
+ device->ctlr = ctlr;
+ continue;
+ }
+
+ device->alive = 1;
+ device->adaptor = adpt_no;
+ device->ctlr = master->unit;
+
+ /*
+ * Save a backpointer to the controller
+ */
+ device->mi = master;
+
+ /*
+ * ..and to the device
+ */
+ driver->dinfo[device->unit] = device;
+
+ if (device->slave >= 0)
+ printf(" %s%d: at %s%d slave %d",
+ device->name, device->unit,
+ driver->mname, master->unit, device->slave);
+ else
+ printf(" %s%d: at %s%d",
+ device->name, device->unit,
+ driver->mname, master->unit);
+
+ /*
+ * Now attach this slave
+ */
+ (*driver->attach) (device);
+ printf("\n");
+ }
+ return TRUE;
+}
+
+/*
+ * configure_bus_device
+ *
+ * Given the name of a bus_device, look it up in the
+ * init table. If found, probe it. If it is present,
+ * call the driver's 'attach' function.
+ *
+ * Returns 0 if the device is not there.
+ *
+ */
+boolean_t configure_bus_device(
+ const char *name,
+ vm_offset_t virt,
+ vm_offset_t phys,
+ int adpt_no,
+ const char *bus_name)
+{
+ struct bus_device *device;
+ struct bus_driver *driver;
+
+ boolean_t found = FALSE;
+
+ /*
+ * Walk all devices to find one with the right name
+ * and adaptor number (or wildcard). The entry should
+ * be unallocated, and also the slave number should
+ * be wildcarded.
+ */
+ for (device = bus_device_init; device->driver; device++) {
+ if (device->alive)
+ continue;
+ if (((device->adaptor == adpt_no) || (device->adaptor == '?')) &&
+ (device->slave == -1) &&
+ ((!device->phys_address) ||
+ ((device->phys_address == phys) && (device->address == virt))) &&
+ (strcmp(device->name, name) == 0)) {
+ found = TRUE;
+ break;
+ }
+ }
+
+ if (!found)
+ return FALSE;
+
+ /*
+ * Found an entry, probe the device
+ */
+ driver = device->driver;
+ if ((*driver->probe) (virt, (struct bus_ctlr *)device) == 0)
+ return FALSE;
+
+ device->alive = 1;
+ device->adaptor = adpt_no;
+
+ printf("%s%d: at %s%d", device->name, device->unit, bus_name, adpt_no);
+
+ /*
+ * Remember which driver this device is attached to
+ */
+ driver->dinfo[device->unit] = device;
+
+ /*
+ * Attach the device
+ */
+ (*driver->attach) (device);
+ printf("\n");
+
+ return TRUE;
+}
+
diff --git a/chips/busses.h b/chips/busses.h
new file mode 100644
index 0000000..90eebc6
--- /dev/null
+++ b/chips/busses.h
@@ -0,0 +1,154 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: busses.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 4/90
+ *
+ * Structures used by configuration routines to
+ * explore a given bus structure.
+ */
+
+#ifndef _CHIPS_BUSSES_H_
+#define _CHIPS_BUSSES_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ *
+ * This is mildly modeled after the Unibus on Vaxen,
+ * one of the most complicated bus structures.
+ * Therefore, let's hope this can be done once and for all.
+ *
+ * At the bottom level there is a "bus_device", which
+ * might exist in isolation (e.g. a clock on the CPU
+ * board) or be a standard component of an architecture
+ * (e.g. the bitmap display on some workstations).
+ *
+ * Disk devices and communication lines support multiple
+ * units, hence the "bus_driver" structure which is more
+ * flexible and allows probing and dynamic configuration
+ * of the number and type of attached devices.
+ *
+ * At the top level there is a "bus_ctlr" structure, used
+ * in systems where the I/O bus(ses) are separate from
+ * the memory bus(ses), and/or when memory boards can be
+ * added to the main bus (and they must be config-ed
+ * and/or can interrupt the processor for ECC errors).
+ *
+ * The autoconfiguration process typically starts at
+ * the top level and walks down tables that are
+ * defined either in a generic file or are specially
+ * created by config.
+ */
+
+/*
+ * Per-controller structure.
+ */
+struct bus_ctlr {
+ struct bus_driver *driver; /* myself, as a device */
+ char *name; /* readability */
+ int unit; /* index in driver */
+ void (*intr)(int); /* interrupt handler(s) */
+ vm_offset_t address; /* device virtual address */
+ int am; /* address modifier */
+ vm_offset_t phys_address;/* device phys address */
+ char adaptor; /* slot where found */
+ char alive; /* probed successfully */
+ char flags; /* any special conditions */
+ vm_offset_t sysdep; /* On some systems, queue of
+ * operations in-progress */
+ natural_t sysdep1; /* System dependent */
+};
+
+
+/*
+ * Per-``device'' structure
+ */
+struct bus_device {
+ struct bus_driver *driver; /* autoconf info */
+ char *name; /* my name */
+ int unit;
+ void (*intr)(int);
+ vm_offset_t address; /* device address */
+ int am; /* address modifier */
+ vm_offset_t phys_address;/* device phys address */
+ char adaptor;
+ char alive;
+ char ctlr;
+ char slave;
+ int flags;
+ struct bus_ctlr *mi; /* backpointer to controller */
+ struct bus_device *next; /* optional chaining */
+ vm_offset_t sysdep; /* System dependent */
+ natural_t sysdep1; /* System dependent */
+};
+
+/*
+ * General flag definitions
+ */
+#define BUS_INTR_B4_PROBE 0x01 /* enable interrupts before probe */
+#define BUS_INTR_DISABLED 0x02 /* ignore all interrupts */
+#define BUS_CTLR 0x04 /* descriptor for a bus adaptor */
+#define BUS_XCLU 0x80 /* want exclusive use of bdp's */
+
+/*
+ * Per-driver structure.
+ *
+ * Each bus driver defines entries for a set of routines
+ * that are used at boot time by the configuration program.
+ */
+struct bus_driver {
+ int (*probe)( /* see if the driver is there */
+ vm_offset_t address,
+ struct bus_ctlr *);
+ int (*slave)( /* see if any slave is there */
+ struct bus_device *,
+ vm_offset_t);
+ void (*attach)( /* setup driver after probe */
+ struct bus_device *);
+ int (*dgo)(struct bus_device *); /* start transfer */
+ vm_offset_t *addr; /* device csr addresses */
+ char *dname; /* name of a device */
+ struct bus_device **dinfo; /* backpointers to init structs */
+ char *mname; /* name of a controller */
+ struct bus_ctlr **minfo; /* backpointers to init structs */
+ int flags;
+};
+
+#ifdef KERNEL
+extern struct bus_ctlr bus_master_init[];
+extern struct bus_device bus_device_init[];
+
+extern boolean_t configure_bus_master(const char *, vm_offset_t, vm_offset_t,
+ int, const char * );
+extern boolean_t configure_bus_device(const char *, vm_offset_t, vm_offset_t,
+ int, const char * );
+#endif /* KERNEL */
+
+
+#endif /* _CHIPS_BUSSES_H_ */
diff --git a/config.status.dep.patch b/config.status.dep.patch
new file mode 100644
index 0000000..868737b
--- /dev/null
+++ b/config.status.dep.patch
@@ -0,0 +1,18 @@
+--- config.status 2009-10-26 23:57:14.000000000 +0100
++++ config.status.new 2009-10-27 00:04:26.000000000 +0100
+@@ -1553,7 +1553,14 @@
+ s/.*/./; q'`
+ as_dir=$dirpart/$fdir; as_fn_mkdir_p
+ # echo "creating $dirpart/$file"
+- echo '# dummy' > "$dirpart/$file"
++ # Try to guess what file this dependency file is from...
++ f=$srcdir/`dirname "$fdir"`/`basename "$file" .Po | sed s/lib[^-]\*-//`
++ for f in "$f"*; do
++ case $f in
++ *.c | *.S) echo "$f"': $(filter-out $(DIST_SOURCES),$(SOURCES))';;
++ *) echo '# dummy';;
++ esac
++ done > "$dirpart/$file"
+ done
+ done
+ }
diff --git a/configfrag-first.ac b/configfrag-first.ac
new file mode 100644
index 0000000..5dc0db2
--- /dev/null
+++ b/configfrag-first.ac
@@ -0,0 +1,29 @@
+dnl Configure fragment for general options.
+
+dnl Copyright (C) 2020 Free Software Foundation, Inc.
+
+dnl Permission to use, copy, modify and distribute this software and its
+dnl documentation is hereby granted, provided that both the copyright
+dnl notice and this permission notice appear in all copies of the
+dnl software, derivative works or modified versions, and any portions
+dnl thereof, and that both notices appear in supporting documentation.
+dnl
+dnl THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+dnl "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+dnl LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+dnl USE OF THIS SOFTWARE.
+
+#
+# Common options
+#
+
+ncom=0
+nlpr=0
+
+AC_ARG_ENABLE([pae],
+ AS_HELP_STRING([--enable-pae], [PAE support (ix86-only); on i386-at disabled
+ by default, otherwise enabled by default]))
+
+dnl Local Variables:
+dnl mode: autoconf
+dnl End:
diff --git a/configfrag.ac b/configfrag.ac
new file mode 100644
index 0000000..b8b4126
--- /dev/null
+++ b/configfrag.ac
@@ -0,0 +1,183 @@
+dnl Configure fragment for general options.
+
+dnl Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+
+dnl Permission to use, copy, modify and distribute this software and its
+dnl documentation is hereby granted, provided that both the copyright
+dnl notice and this permission notice appear in all copies of the
+dnl software, derivative works or modified versions, and any portions
+dnl thereof, and that both notices appear in supporting documentation.
+dnl
+dnl THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+dnl "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+dnl LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+dnl USE OF THIS SOFTWARE.
+
+#
+# Definitions.
+#
+
+AC_DEFINE([MACH], [1], [MACH])
+AC_DEFINE([CMU], [1], [CMU])
+AC_DEFINE([MACH_KERNEL], [1], [Standalone MACH kernel])
+AC_DEFINE([KERNEL], [1], [KERNEL])
+
+#
+# Formerly in `bogus/'.
+#
+
+# i386/bogus/com.h
+AC_DEFINE_UNQUOTED([NCOM], [$ncom], [NCOM])
+
+# i386/bogus/lpr.h
+AC_DEFINE_UNQUOTED([NLPR], [$nlpr], [NLPR])
+
+[if [ x"$enable_pae" = xyes ]; then]
+ AC_DEFINE([PAE], [1], [PAE support])
+ AM_CONDITIONAL([enable_pae], [true])
+[else]
+ AM_CONDITIONAL([enable_pae], [false])
+[fi]
+
+# When set, the bootstrap task symbols are preserved by the kernel debugger.
+# Used in `kern/bootstrap.c'.
+AC_DEFINE([BOOTSTRAP_SYMBOLS], [0], [BOOTSTRAP_SYMBOLS])
+
+# Multiprocessor support is still broken.
+AH_TEMPLATE([MULTIPROCESSOR], [set things up for a uniprocessor])
+AC_ARG_ENABLE([ncpus],
+ AS_HELP_STRING([--enable-ncpus=N], [specify the maximum number of cpus to be supported]),
+ [mach_ncpus=$enable_ncpus],
+ [mach_ncpus=1])
+AC_DEFINE_UNQUOTED([NCPUS], [$mach_ncpus], [number of CPUs])
+[if [ $mach_ncpus -gt 1 ]; then]
+ AC_DEFINE([MULTIPROCESSOR], [1], [set things up for a multiprocessor])
+[fi]
+
+# Restartable Atomic Sequences to get a really fast test-n-set. Can't be
+# enabled, as the `void recover_ras()' function is missing.
+AC_DEFINE([FAST_TAS], [0], [FAST_TAS])
+
+# Cache footprint support.
+AC_DEFINE([HW_FOOTPRINT], [0], [HW_FOOTPRINT])
+
+# Counters.
+AC_DEFINE([MACH_COUNTERS], [0], [MACH_COUNTERS])
+
+# IPC debugging interface.
+AC_DEFINE([MACH_DEBUG], [1], [MACH_DEBUG])
+
+# Fixed priority threads.
+AC_DEFINE([MACH_FIXPRI], [1], [MACH_FIXPRI])
+
+# Mach host (cpu resource alloc.).
+[if [ $mach_ncpus -gt 1 ]; then]
+ AC_DEFINE([MACH_HOST], [1], [MACH_HOST])
+[else]
+ AC_DEFINE([MACH_HOST], [0], [MACH_HOST])
+[fi]
+
+# IPC debugging calls.
+AC_DEFINE([MACH_IPC_DEBUG], [1], [MACH_IPC_DEBUG])
+
+# Testing code/printfs.
+AC_DEFINE([MACH_IPC_TEST], [0], [MACH_IPC_TEST])
+
+# Sanity-check locking.
+AC_DEFINE([MACH_LDEBUG], [0], [MACH_LDEBUG])
+
+# MP lock monitoring. Registers use of locks, contention. Depending on
+# hardware also records time spent with locks held. Used in `kern/lock_mon.c'.
+AC_DEFINE([MACH_LOCK_MON], [0], [MACH_LOCK_MON])
+
+# Does the architecture provide machine-specific interfaces?
+mach_machine_routines=${mach_machine_routines-0}
+AC_DEFINE_UNQUOTED([MACH_MACHINE_ROUTINES], [$mach_machine_routines],
+ [MACH_MACHINE_ROUTINES])
+
+# MP debugging. Use alternate locking routines to detect deadlocks. Used in
+# `kern/lock_mon.c'.
+AC_DEFINE([MACH_MP_DEBUG], [0], [MACH_MP_DEBUG])
+
+# Paged-out page map hints.
+AC_DEFINE([MACH_PAGEMAP], [1], [MACH_PAGEMAP])
+
+# Do pc sample histogram.
+[if [ $mach_ncpus -gt 1 ]; then]
+ # Apparently not MP-safe yet.
+ AC_DEFINE([MACH_PCSAMPLE], [0], [MACH_PCSAMPLE])
+[else]
+ AC_DEFINE([MACH_PCSAMPLE], [1], [MACH_PCSAMPLE])
+[fi]
+
+# Sample kernel too.
+AC_ARG_ENABLE([kernsample],
+ AS_HELP_STRING([--enable-kernsample], [enable sampling kernel]))
+[if [ x"$enable_kernsample" = xyes ]; then]
+ AC_DEFINE([MACH_KERNSAMPLE], [1], [MACH_KERNSAMPLE])
+[else]
+ AC_DEFINE([MACH_KERNSAMPLE], [0], [MACH_KERNSAMPLE])
+[fi]
+
+# TTD Remote Kernel Debugging.
+AC_DEFINE([MACH_TTD], [0], [MACH_TTD])
+
+# VM debugging calls.
+AC_DEFINE([MACH_VM_DEBUG], [1], [MACH_VM_DEBUG])
+
+# Mach-dep power conservation.
+AC_DEFINE([POWER_SAVE], [1], [POWER_SAVE])
+
+# Use statistical timing.
+AC_DEFINE([STAT_TIME], [1], [STAT_TIME])
+
+# Kernel tracing.
+AC_DEFINE([XPR_DEBUG], [0], [XPR_DEBUG])
+
+# Slab allocator debugging facilities.
+AC_DEFINE([SLAB_VERIFY], [0], [SLAB_VERIFY])
+
+# Enable the CPU pool layer in the slab allocator.
+AC_DEFINE([SLAB_USE_CPU_POOLS], [0], [SLAB_USE_CPU_POOLS])
+
+#
+# Options.
+#
+
+AC_HEADER_ASSERT()
+
+AC_ARG_ENABLE([kdb],
+ AS_HELP_STRING([--enable-kdb], [enable use of in-kernel debugger]))
+[if [ x"$enable_kdb" = xyes ]; then]
+ AC_DEFINE([MACH_KDB], [1], [Use the in-kernel debugger?])
+ AM_CONDITIONAL([enable_kdb], [true])
+[else]
+ # We need to be long winded here: bogus/mach_kdb.h made it default to zero,
+ # unless overridden.
+ AC_DEFINE([MACH_KDB], [0], [Use the in-kernel debugger?])
+ AM_CONDITIONAL([enable_kdb], [false])
+[fi]
+
+
+AC_ARG_ENABLE([kmsg],
+ AS_HELP_STRING([--disable-kmsg], [disable use of kmsg device]))
+[if [ x"$enable_kmsg" != xno ]; then]
+ AC_DEFINE([MACH_KMSG], [], [enable use of kmsg device])
+ AM_CONDITIONAL([enable_kmsg], [true])
+[else]
+ AM_CONDITIONAL([enable_kmsg], [false])
+[fi]
+
+#
+# Set up `SYSTYPE/SYSTYPE' and `SYSTYPE/include/mach/SYSTYPE' links.
+#
+
+# `${file}' and `$file' have different meanings here with respect to having the
+# files in the referenced directory considered for `make dist' or not. See
+# <http://lists.gnu.org/archive/html/bug-automake/2006-11/msg00027.html>.
+AC_CONFIG_LINKS([machine:$srcdir/$systype/$systype
+ mach/machine:$systype/include/mach/$systype])
+
+dnl Local Variables:
+dnl mode: autoconf
+dnl End:
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..69f75cf
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,270 @@
+dnl Configure script for GNU Mach.
+
+dnl Copyright (C) 1997, 1998, 1999, 2004, 2006, 2007, 2008, 2010, 2013 Free
+dnl Software Foundation, Inc.
+
+dnl Permission to use, copy, modify and distribute this software and its
+dnl documentation is hereby granted, provided that both the copyright
+dnl notice and this permission notice appear in all copies of the
+dnl software, derivative works or modified versions, and any portions
+dnl thereof, and that both notices appear in supporting documentation.
+dnl
+dnl THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+dnl "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+dnl LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+dnl USE OF THIS SOFTWARE.
+
+AC_PREREQ([2.57])
+
+m4_include([version.m4])
+AC_INIT([AC_PACKAGE_NAME], [AC_PACKAGE_VERSION], [AC_PACKAGE_BUGREPORT],
+ [AC_PACKAGE_TARNAME])
+AC_CONFIG_SRCDIR([kern/ipc_kobject.c])
+
+if test -z "${CFLAGS+set}"; then
+ # Use these CFLAGS by default if nothing is set.
+ CFLAGS="-g -O2"
+fi
+# We don't need glibc to compile gnumach.
+CFLAGS="$CFLAGS -ffreestanding -nostdlib"
+
+AC_CONFIG_AUX_DIR([build-aux])
+
+AM_INIT_AUTOMAKE(
+ [1.10.2]
+ [dist-xz]
+dnl Don't define `PACKAGE' and `VERSION'.
+ [no-define]
+dnl Do not clutter the main build directory.
+ [subdir-objects]
+dnl We require GNU make.
+ [-Wall -Wno-portability]
+)
+
+m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([no])],
+ [AC_SUBST([AM_DEFAULT_VERBOSITY], [1])])
+
+#
+# Deduce the architecture we're building for.
+#
+# TODO: Should we also support constructs like `i686_xen-pc-gnu' or
+# `i686-pc_xen-gnu'?
+
+AC_CANONICAL_HOST
+
+AC_ARG_ENABLE([platform],
+ AS_HELP_STRING([--enable-platform=PLATFORM], [specify the platform to build a
+ kernel for. Defaults to `at' for `i?86'. The other possibility is
+ `xen'.]),
+ [host_platform=$enable_platform],
+ [host_platform=default])
+[# Supported configurations.
+case $host_platform:$host_cpu in
+ default:i?86)
+ host_platform=at;;
+ default:x86_64)]
+ [host_platform=at;;
+ at:i?86 | xen:i?86 | at:x86_64 | xen:x86_64)
+ :;;
+ *)]
+ AC_MSG_ERROR([unsupported combination of cpu type `$host_cpu' and platform
+ `$host_platform'.])[;;
+esac]
+AC_SUBST([host_platform])
+
+[# This is used in a few places.
+case $host_cpu in
+ i?86)
+ systype=i386;;
+ *)
+ systype=$host_cpu;;
+esac]
+AC_SUBST([systype])
+
+#
+# Programs.
+#
+
+AC_PROG_AWK
+# Temporarily force cross compiling mode to make sure the configure script
+# does not try to run compiled binaries.
+save_cross_compiling=$cross_compiling
+cross_compiling=yes
+AM_PROG_AS
+cross_compiling=$save_cross_compiling
+AC_PROG_CC
+AC_PROG_CPP
+AC_PROG_INSTALL
+AC_PROG_RANLIB
+AC_CHECK_TOOL([AR], [ar])
+AC_CHECK_TOOL([LD], [ld])
+AC_CHECK_TOOL([NM], [nm])
+
+AC_CHECK_TOOL([MIG], [mig], [no])
+AC_ARG_VAR([MIG], [Path to the mig tool])
+
+if test x$MIG = xno
+then
+ AC_MSG_WARN([mig was not found, we will not be able to build a kernel, only install headers. Install or build mig against them, and run configure again. If you already did so, perhaps you need to specify the path with MIG=])
+ MIG=mig
+fi
+
+dnl Needed for the Automake option `subdir-objects'.
+AM_PROG_CC_C_O
+
+dnl Makerules can make use of these.
+AC_CHECK_PROG([GZIP], [gzip], [gzip], [gzip-not-found])
+AC_CHECK_TOOL([STRIP], [strip])
+
+dnl See below why we need to patch stuff during build...
+AC_CHECK_PROG([PATCH], [patch], [patch], [patch-not-found])
+
+#
+# configure fragments.
+#
+
+# Default set of device drivers.
+AC_ARG_ENABLE([device-drivers],
+ AS_HELP_STRING([--enable-device-drivers=WHICH], [specify WHICH (on `ix86-at'
+ one of `default', `qemu', `none') to preset a certain subset of all
+ available device drivers, as indicated by the below-metioned ``enabled
+ ...'' comments; you can then still use further `--enable-*' or
+ `--disable-*' options to refine the selection of drivers to include in
+ order to choose only those you actually want to have enabled]))
+[case $enable_device_drivers in
+ '')
+ enable_device_drivers=default;;
+ no)
+ enable_device_drivers=none;;
+ default | none | qemu)
+ :;;
+ *)]
+ AC_MSG_ERROR([invalid choice of]
+ [`--enable-device-drivers=$enable_device_drivers'.])
+ [;;
+esac]
+
+AC_ARG_ENABLE([user32],
+AS_HELP_STRING([--enable-user32], [enable 32-bit user space on a 64-bit kernel]))
+[if [ x"$enable_user32" = xyes ]; then]
+ AC_DEFINE([USER32], [], [enable 32-bit user on 64-bit kernel])
+ AM_CONDITIONAL([enable_user32], [true])
+[else]
+ AM_CONDITIONAL([enable_user32], [false])
+[fi]
+
+
+# Platform-specific configuration.
+
+# PC AT.
+# TODO. Currently handled in `i386/configfrag.ac'.
+
+# General options.
+m4_include([configfrag-first.ac])
+
+# Xen.
+m4_include([xen/configfrag.ac])
+
+# Machine-specific configuration.
+
+# ix86.
+m4_include([i386/configfrag.ac])
+
+# x86_64
+m4_include([x86_64/configfrag.ac])
+
+# General options.
+m4_include([configfrag.ac])
+
+# Linux code snarfed into GNU Mach.
+m4_include([linux/configfrag.ac])
+
+# The test suite.
+m4_include([tests/configfrag.ac])
+
+#
+# Compiler features.
+#
+
+# Smashing stack protector.
+[ssp_possible=yes]
+AC_MSG_CHECKING([whether the compiler accepts `-fstack-protector'])
+# Is this a reliable test case?
+AC_LANG_CONFTEST(
+ [AC_LANG_SOURCE([[void foo (void) { volatile char a[8]; a[3]; }]])])
+[# `$CC -c -o ...' might not be portable. But, oh, well... Is calling
+# `ac_compile' like this correct, after all?
+if eval "$ac_compile -S -fstack-protector -o conftest.s" 2> /dev/null; then]
+ AC_MSG_RESULT([yes])
+ [# Should we clear up other files as well, having called `AC_LANG_CONFTEST'?
+ rm -f conftest.s
+else
+ ssp_possible=no]
+ AC_MSG_RESULT([no])
+[fi
+# Need that, because some distributions ship compilers that include
+# `-fstack-protector' in the default specs.]
+AM_CONDITIONAL([disable_smashing_stack_protector],
+ [[[ x"$ssp_possible" = xyes ]]])
+
+#
+# Output.
+#
+
+AC_CONFIG_HEADERS([config.h])
+AC_CONFIG_FILES([Makefile version.c])
+
+#
+# The remaining ugly, dark corners...
+#
+# Attention, parents: don't show this to your children...
+#
+
+#
+# config.status.dep.patch
+#
+# This is a (ugly --- I admit) bootstrap hack to get to-be-generated files
+# created before any other source files are compiled.
+#
+# See <http://lists.gnu.org/archive/html/automake/2006-05/msg00038.html>.
+#
+# We don't use `BUILT_SOURCES' (as it was suggested in the follow-up message),
+# as we also want things like `make SPECIFIC_TARGET' to work.
+#
+# This affair is especially ugly because internals are used (the `# dummy'
+# tag): internals that may be subject to changes. That's the reason why a
+# real patch is being used here and not some `sed' magic: to make it fail
+# loudly in case.
+#
+# For all shipped source files a dependency file is tried to be created where
+# it is simply stated that the respective source file depends on _all_
+# to-be-generated files. Depending on all of them doesn't do any harm, as they
+# will nevertheless have to be created, sooner or later. The problem is, that
+# `config.status' doesn't know about the source file of the file it is
+# currently creating the dependency file for. So we have it do an educated
+# guess... Later, when compiling the source files, these dependency files will
+# be rewritten to contain the files's actual dependencies. From then on this
+# bootstrap hack will be forgotten.
+#
+
+dnl AC_CONFIG_COMMANDS_POST([
+dnl sed -i -e \
+dnl 's%#\ dummy%Makefile: $(filter-out $(DIST_SOURCES),$(SOURCES))%' \
+dnl config.status
+dnl ])
+AC_CONFIG_COMMANDS_POST([
+ if "$PATCH" -f < "$srcdir"/config.status.dep.patch > /dev/null 2>&1 ||
+ ( cd "$srcdir" && "$PATCH" -f < Makefile.in.dep.patch ||
+ grep "Ugly bootstrap hack to get to-be-generated files created" Makefile.in ) > /dev/null 2>&1
+ then] AC_MSG_NOTICE([Applied a patch to work around a deficiency in]
+ [Automake. See `configure.ac' for details.])
+ [else] AC_MSG_ERROR([failed to patch using `config.status.dep.patch'.]
+ [You have a serious problem. Please contact <$PACKAGE_BUGREPORT>.])
+ [fi
+])
+
+#
+# Fire.
+#
+
+AC_OUTPUT
diff --git a/ddb/db_access.c b/ddb/db_access.c
new file mode 100644
index 0000000..509c1ba
--- /dev/null
+++ b/ddb/db_access.c
@@ -0,0 +1,136 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h> /* type definitions */
+#include <machine/db_interface.h> /* function definitions */
+#include <machine/setjmp.h>
+#include <kern/task.h>
+#include <ddb/db_access.h>
+
+
+
+/*
+ * Access unaligned data items on aligned (longword)
+ * boundaries.
+ */
+
+int db_access_level = DB_ACCESS_LEVEL;
+
+/*
+ * This table is for sign-extending things.
+ * Therefore its entries are signed, and yes
+ * they are infact negative numbers.
+ * So don't you put no more Us in it. Or Ls either.
+ * Otherwise there is no point having it, n'est pas ?
+ */
+static int db_extend[sizeof(int)+1] = { /* table for sign-extending */
+ 0,
+ 0xFFFFFF80,
+ 0xFFFF8000,
+ 0xFF800000,
+ 0x80000000
+};
+
+db_expr_t
+db_get_task_value(
+ db_addr_t addr,
+ int size,
+ boolean_t is_signed,
+ task_t task)
+{
+ char data[sizeof(db_expr_t)];
+ db_expr_t value;
+ int i;
+
+ if (!db_read_bytes(addr, size, data, task))
+ return 0;
+
+ value = 0;
+#if BYTE_MSF
+ for (i = 0; i < size; i++)
+#else /* BYTE_LSF */
+ for (i = size - 1; i >= 0; i--)
+#endif
+ {
+ value = (value << 8) + (data[i] & 0xFF);
+ }
+
+ if (size <= sizeof(int)) {
+ if (is_signed && (value & db_extend[size]) != 0)
+ value |= db_extend[size];
+ }
+ return (value);
+}
+
+void
+db_put_task_value(
+ db_addr_t addr,
+ int size,
+ db_expr_t value,
+ task_t task)
+{
+ char data[sizeof(db_expr_t)];
+ int i;
+
+#if BYTE_MSF
+ for (i = size - 1; i >= 0; i--)
+#else /* BYTE_LSF */
+ for (i = 0; i < size; i++)
+#endif
+ {
+ data[i] = value & 0xFF;
+ value >>= 8;
+ }
+
+ db_write_bytes(addr, size, data, task);
+}
+
+db_expr_t
+db_get_value(
+ db_addr_t addr,
+ int size,
+ boolean_t is_signed)
+{
+ return(db_get_task_value(addr, size, is_signed, TASK_NULL));
+}
+
+void
+db_put_value(
+ db_addr_t addr,
+ int size,
+ db_expr_t value)
+{
+ db_put_task_value(addr, size, value, TASK_NULL);
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_access.h b/ddb/db_access.h
new file mode 100644
index 0000000..3bda5a4
--- /dev/null
+++ b/ddb/db_access.h
@@ -0,0 +1,79 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+/*
+ * Data access functions for debugger.
+ */
+
+#ifndef _DDB_DB_ACCESS_H_
+#define _DDB_DB_ACCESS_H_
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_task_thread.h>
+#include <machine/vm_param.h>
+
+/* implementation dependent access capability */
+#define DB_ACCESS_KERNEL 0 /* only kernel space */
+#define DB_ACCESS_CURRENT 1 /* kernel or current task space */
+#define DB_ACCESS_ANY 2 /* any space */
+
+#ifndef DB_ACCESS_LEVEL
+#define DB_ACCESS_LEVEL DB_ACCESS_KERNEL
+#endif /* DB_ACCESS_LEVEL */
+
+#ifndef DB_VALID_KERN_ADDR
+#define DB_VALID_KERN_ADDR(addr) ((addr) >= VM_MIN_KERNEL_ADDRESS \
+ && (addr) < VM_MAX_KERNEL_ADDRESS)
+#define DB_VALID_ADDRESS(addr,user) ((user != 0) ^ DB_VALID_KERN_ADDR(addr))
+#define DB_PHYS_EQ(task1,addr1,task2,addr2) 0
+#define DB_CHECK_ACCESS(addr,size,task) db_is_current_task(task)
+#endif /* DB_VALID_KERN_ADDR */
+
+extern int db_access_level;
+
+extern db_expr_t db_get_value( db_addr_t addr,
+ int size,
+ boolean_t is_signed );
+
+extern void db_put_value( db_addr_t addr,
+ int size,
+ db_expr_t value );
+
+extern db_expr_t db_get_task_value( db_addr_t addr,
+ int size,
+ boolean_t is_signed,
+ task_t task );
+
+extern void db_put_task_value( db_addr_t addr,
+ int size,
+ db_expr_t value,
+ task_t task );
+
+#endif /* _DDB_DB_ACCESS_H_ */
diff --git a/ddb/db_break.c b/ddb/db_break.c
new file mode 100644
index 0000000..374dc6a
--- /dev/null
+++ b/ddb/db_break.c
@@ -0,0 +1,746 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+
+/*
+ * Breakpoints.
+ */
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <machine/db_interface.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_break.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_command.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_output.h>
+#include <ddb/db_cond.h>
+#include <ddb/db_expr.h>
+
+#define NBREAKPOINTS 100
+#define NTHREAD_LIST (NBREAKPOINTS*3)
+
+struct db_breakpoint db_break_table[NBREAKPOINTS];
+db_breakpoint_t db_next_free_breakpoint = &db_break_table[0];
+db_breakpoint_t db_free_breakpoints = 0;
+db_breakpoint_t db_breakpoint_list = 0;
+
+static struct db_thread_breakpoint db_thread_break_list[NTHREAD_LIST];
+static db_thread_breakpoint_t db_free_thread_break_list = 0;
+static boolean_t db_thread_break_init = FALSE;
+static int db_breakpoint_number = 0;
+
+static db_breakpoint_t
+db_breakpoint_alloc(void)
+{
+ db_breakpoint_t bkpt;
+
+ if ((bkpt = db_free_breakpoints) != 0) {
+ db_free_breakpoints = bkpt->link;
+ return (bkpt);
+ }
+ if (db_next_free_breakpoint == &db_break_table[NBREAKPOINTS]) {
+ db_printf("All breakpoints used.\n");
+ return (0);
+ }
+ bkpt = db_next_free_breakpoint;
+ db_next_free_breakpoint++;
+
+ return (bkpt);
+}
+
+static void
+db_breakpoint_free(db_breakpoint_t bkpt)
+{
+ bkpt->link = db_free_breakpoints;
+ db_free_breakpoints = bkpt;
+}
+
+static int
+db_add_thread_breakpoint(
+ const db_breakpoint_t bkpt,
+ vm_offset_t task_thd,
+ int count,
+ boolean_t task_bpt)
+{
+ db_thread_breakpoint_t tp;
+
+ if (db_thread_break_init == FALSE) {
+ for (tp = db_thread_break_list;
+ tp < &db_thread_break_list[NTHREAD_LIST-1]; tp++)
+ tp->tb_next = tp+1;
+ tp->tb_next = 0;
+ db_free_thread_break_list = db_thread_break_list;
+ db_thread_break_init = TRUE;
+ }
+ if (db_free_thread_break_list == 0)
+ return (-1);
+ tp = db_free_thread_break_list;
+ db_free_thread_break_list = tp->tb_next;
+ tp->tb_is_task = task_bpt;
+ tp->tb_task_thd = task_thd;
+ tp->tb_count = count;
+ tp->tb_init_count = count;
+ tp->tb_cond = 0;
+ tp->tb_number = ++db_breakpoint_number;
+ tp->tb_next = bkpt->threads;
+ bkpt->threads = tp;
+ return(0);
+}
+
+static int
+db_delete_thread_breakpoint(
+ db_breakpoint_t bkpt,
+ vm_offset_t task_thd)
+{
+ db_thread_breakpoint_t tp;
+ db_thread_breakpoint_t *tpp;
+
+ if (task_thd == 0) {
+ /* delete all the thread-breakpoints */
+
+ for (tpp = &bkpt->threads; (tp = *tpp) != 0; tpp = &tp->tb_next)
+ db_cond_free(tp);
+
+ *tpp = db_free_thread_break_list;
+ db_free_thread_break_list = bkpt->threads;
+ bkpt->threads = 0;
+ return 0;
+ } else {
+ /* delete the specified thread-breakpoint */
+
+ for (tpp = &bkpt->threads; (tp = *tpp) != 0; tpp = &tp->tb_next)
+ if (tp->tb_task_thd == task_thd) {
+ db_cond_free(tp);
+ *tpp = tp->tb_next;
+ tp->tb_next = db_free_thread_break_list;
+ db_free_thread_break_list = tp;
+ return 0;
+ }
+
+ return -1; /* not found */
+ }
+}
+
+static db_thread_breakpoint_t __attribute__ ((pure))
+db_find_thread_breakpoint(
+ const db_breakpoint_t bkpt,
+ const thread_t thread)
+{
+ db_thread_breakpoint_t tp;
+ task_t task = (thread == THREAD_NULL)? TASK_NULL: thread->task;
+
+ for (tp = bkpt->threads; tp; tp = tp->tb_next) {
+ if (tp->tb_is_task) {
+ if (tp->tb_task_thd == (vm_offset_t)task)
+ break;
+ continue;
+ }
+ if (tp->tb_task_thd == (vm_offset_t)thread || tp->tb_task_thd == 0)
+ break;
+ }
+ return(tp);
+}
+
+db_thread_breakpoint_t
+db_find_thread_breakpoint_here(
+ const task_t task,
+ db_addr_t addr)
+{
+ db_breakpoint_t bkpt;
+
+ bkpt = db_find_breakpoint(task, addr);
+ if (bkpt == 0)
+ return(0);
+ return(db_find_thread_breakpoint(bkpt, current_thread()));
+}
+
+db_thread_breakpoint_t
+db_find_breakpoint_number(
+ int num,
+ db_breakpoint_t *bkptp)
+{
+ db_thread_breakpoint_t tp;
+ db_breakpoint_t bkpt;
+
+ for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
+ for (tp = bkpt->threads; tp; tp = tp->tb_next) {
+ if (tp->tb_number == num) {
+ if (bkptp)
+ *bkptp = bkpt;
+ return(tp);
+ }
+ }
+ }
+ return(0);
+}
+
+static void
+db_force_delete_breakpoint(
+ db_breakpoint_t bkpt,
+ vm_offset_t task_thd,
+ boolean_t is_task)
+{
+ db_printf("deleted a stale breakpoint at ");
+ if (bkpt->task == TASK_NULL || db_lookup_task(bkpt->task) >= 0)
+ db_task_printsym(bkpt->address, DB_STGY_PROC, bkpt->task);
+ else
+ db_printf("%#X", bkpt->address);
+ if (bkpt->task)
+ db_printf(" in task %X", bkpt->task);
+ if (task_thd)
+ db_printf(" for %s %X", (is_task)? "task": "thread", task_thd);
+ db_printf("\n");
+ db_delete_thread_breakpoint(bkpt, task_thd);
+}
+
+void
+db_check_breakpoint_valid(void)
+{
+ db_thread_breakpoint_t tbp, tbp_next;
+ db_breakpoint_t bkpt, *bkptp;
+
+ bkptp = &db_breakpoint_list;
+ for (bkpt = *bkptp; bkpt; bkpt = *bkptp) {
+ if (bkpt->task != TASK_NULL) {
+ if (db_lookup_task(bkpt->task) < 0) {
+ db_force_delete_breakpoint(bkpt, 0, FALSE);
+ *bkptp = bkpt->link;
+ db_breakpoint_free(bkpt);
+ continue;
+ }
+ } else {
+ for (tbp = bkpt->threads; tbp; tbp = tbp_next) {
+ tbp_next = tbp->tb_next;
+ if (tbp->tb_task_thd == 0)
+ continue;
+ if ((tbp->tb_is_task &&
+ db_lookup_task((task_t)(tbp->tb_task_thd)) < 0) ||
+ (!tbp->tb_is_task &&
+ db_lookup_thread((thread_t)(tbp->tb_task_thd)) < 0)) {
+ db_force_delete_breakpoint(bkpt,
+ tbp->tb_task_thd, tbp->tb_is_task);
+ }
+ }
+ if (bkpt->threads == 0) {
+ db_put_task_value(bkpt->address, BKPT_SIZE,
+ bkpt->bkpt_inst, bkpt->task);
+ *bkptp = bkpt->link;
+ db_breakpoint_free(bkpt);
+ continue;
+ }
+ }
+ bkptp = &bkpt->link;
+ }
+}
+
+db_breakpoint_t
+db_set_breakpoint(
+ const task_t task,
+ db_addr_t addr,
+ int count,
+ const thread_t thread,
+ boolean_t task_bpt)
+{
+ db_breakpoint_t bkpt;
+ db_breakpoint_t alloc_bkpt = 0;
+ vm_offset_t task_thd;
+
+ bkpt = db_find_breakpoint(task, addr);
+ if (bkpt) {
+ if (thread == THREAD_NULL
+ || db_find_thread_breakpoint(bkpt, thread)) {
+ db_printf("Already set.\n");
+ return NULL;
+ }
+ } else {
+ if (!DB_CHECK_ACCESS(addr, BKPT_SIZE, task)) {
+ db_printf("Cannot set break point at %X\n", addr);
+ return NULL;
+ }
+ alloc_bkpt = bkpt = db_breakpoint_alloc();
+ if (bkpt == 0) {
+ db_printf("Too many breakpoints.\n");
+ return NULL;
+ }
+ bkpt->task = task;
+ bkpt->flags = (task && thread == THREAD_NULL)?
+ (BKPT_USR_GLOBAL|BKPT_1ST_SET): 0;
+ bkpt->address = addr;
+ bkpt->threads = 0;
+ }
+ if (db_breakpoint_list == 0)
+ db_breakpoint_number = 0;
+ task_thd = (task_bpt)? (vm_offset_t)(thread->task): (vm_offset_t)thread;
+ if (db_add_thread_breakpoint(bkpt, task_thd, count, task_bpt) < 0) {
+ if (alloc_bkpt)
+ db_breakpoint_free(alloc_bkpt);
+ db_printf("Too many thread_breakpoints.\n");
+ return NULL;
+ } else {
+ db_printf("set breakpoint #%d\n", db_breakpoint_number);
+ if (alloc_bkpt) {
+ bkpt->link = db_breakpoint_list;
+ db_breakpoint_list = bkpt;
+ }
+ return bkpt;
+ }
+}
+
+static void
+db_delete_breakpoint(
+ const task_t task,
+ db_addr_t addr,
+ vm_offset_t task_thd)
+{
+ db_breakpoint_t bkpt;
+ db_breakpoint_t *prev;
+
+ for (prev = &db_breakpoint_list; (bkpt = *prev) != 0;
+ prev = &bkpt->link) {
+ if ((bkpt->task == task
+ || (task != TASK_NULL && (bkpt->flags & BKPT_USR_GLOBAL)))
+ && bkpt->address == addr)
+ break;
+ }
+ if (bkpt && (bkpt->flags & BKPT_SET_IN_MEM)) {
+ db_printf("cannot delete it now.\n");
+ return;
+ }
+ if (bkpt == 0
+ || db_delete_thread_breakpoint(bkpt, task_thd) < 0) {
+ db_printf("Not set.\n");
+ return;
+ }
+ if (bkpt->threads == 0) {
+ *prev = bkpt->link;
+ db_breakpoint_free(bkpt);
+ }
+}
+
+db_breakpoint_t __attribute__ ((pure))
+db_find_breakpoint(
+ const task_t task,
+ db_addr_t addr)
+{
+ db_breakpoint_t bkpt;
+
+ for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
+ if ((bkpt->task == task
+ || (task != TASK_NULL && (bkpt->flags & BKPT_USR_GLOBAL)))
+ && bkpt->address == addr)
+ return (bkpt);
+ }
+ return (0);
+}
+
+boolean_t
+db_find_breakpoint_here(
+ const task_t task,
+ db_addr_t addr)
+{
+ db_breakpoint_t bkpt;
+
+ for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
+ if ((bkpt->task == task
+ || (task != TASK_NULL && (bkpt->flags & BKPT_USR_GLOBAL)))
+ && bkpt->address == addr)
+ return(TRUE);
+ if ((bkpt->flags & BKPT_USR_GLOBAL) == 0 &&
+ DB_PHYS_EQ(task, addr, bkpt->task, bkpt->address))
+ return (TRUE);
+ }
+ return(FALSE);
+}
+
+boolean_t db_breakpoints_inserted = TRUE;
+
+void
+db_set_breakpoints(void)
+{
+ db_breakpoint_t bkpt;
+ task_t task;
+ db_expr_t inst;
+ task_t cur_task;
+
+ cur_task = (current_thread())? current_thread()->task: TASK_NULL;
+ if (!db_breakpoints_inserted) {
+ for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
+ if (bkpt->flags & BKPT_SET_IN_MEM)
+ continue;
+ task = bkpt->task;
+ if (bkpt->flags & BKPT_USR_GLOBAL) {
+ if ((bkpt->flags & BKPT_1ST_SET) == 0) {
+ if (cur_task == TASK_NULL)
+ continue;
+ task = cur_task;
+ } else
+ bkpt->flags &= ~BKPT_1ST_SET;
+ }
+ if (DB_CHECK_ACCESS(bkpt->address, BKPT_SIZE, task)) {
+ inst = db_get_task_value(bkpt->address, BKPT_SIZE, FALSE,
+ task);
+ if (inst == BKPT_SET(inst))
+ continue;
+ bkpt->bkpt_inst = inst;
+ db_put_task_value(bkpt->address,
+ BKPT_SIZE,
+ BKPT_SET(bkpt->bkpt_inst), task);
+ bkpt->flags |= BKPT_SET_IN_MEM;
+ } else {
+ db_printf("Warning: cannot set breakpoint at %X ",
+ bkpt->address);
+ if (task)
+ db_printf("in task %X\n", task);
+ else
+ db_printf("in kernel space\n");
+ }
+ }
+ db_breakpoints_inserted = TRUE;
+ }
+}
+
+void
+db_clear_breakpoints(void)
+{
+ db_breakpoint_t bkpt, *bkptp;
+ task_t task;
+ task_t cur_task;
+ db_expr_t inst;
+
+ cur_task = (current_thread())? current_thread()->task: TASK_NULL;
+ if (db_breakpoints_inserted) {
+ bkptp = &db_breakpoint_list;
+ for (bkpt = *bkptp; bkpt; bkpt = *bkptp) {
+ task = bkpt->task;
+ if (bkpt->flags & BKPT_USR_GLOBAL) {
+ if (cur_task == TASK_NULL) {
+ bkptp = &bkpt->link;
+ continue;
+ }
+ task = cur_task;
+ }
+ if ((bkpt->flags & BKPT_SET_IN_MEM)
+ && DB_CHECK_ACCESS(bkpt->address, BKPT_SIZE, task)) {
+ inst = db_get_task_value(bkpt->address, BKPT_SIZE, FALSE,
+ task);
+ if (inst != BKPT_SET(inst)) {
+ if (bkpt->flags & BKPT_USR_GLOBAL) {
+ bkptp = &bkpt->link;
+ continue;
+ }
+ db_force_delete_breakpoint(bkpt, 0, FALSE);
+ *bkptp = bkpt->link;
+ db_breakpoint_free(bkpt);
+ continue;
+ }
+ db_put_task_value(bkpt->address, BKPT_SIZE,
+ bkpt->bkpt_inst, task);
+ bkpt->flags &= ~BKPT_SET_IN_MEM;
+ }
+ bkptp = &bkpt->link;
+ }
+ db_breakpoints_inserted = FALSE;
+ }
+}
+
+/*
+ * Set a temporary breakpoint.
+ * The instruction is changed immediately,
+ * so the breakpoint does not have to be on the breakpoint list.
+ */
+db_breakpoint_t
+db_set_temp_breakpoint(
+ task_t task,
+ db_addr_t addr)
+{
+ db_breakpoint_t bkpt;
+
+ bkpt = db_breakpoint_alloc();
+ if (bkpt == 0) {
+ db_printf("Too many breakpoints.\n");
+ return 0;
+ }
+ bkpt->task = task;
+ bkpt->address = addr;
+ bkpt->flags = BKPT_TEMP;
+ bkpt->threads = 0;
+ if (db_add_thread_breakpoint(bkpt, 0, 1, FALSE) < 0) {
+ if (bkpt)
+ db_breakpoint_free(bkpt);
+ db_printf("Too many thread_breakpoints.\n");
+ return 0;
+ }
+ bkpt->bkpt_inst = db_get_task_value(bkpt->address, BKPT_SIZE,
+ FALSE, task);
+ db_put_task_value(bkpt->address, BKPT_SIZE,
+ BKPT_SET(bkpt->bkpt_inst), task);
+ return bkpt;
+}
+
+void
+db_delete_temp_breakpoint(
+ task_t task,
+ db_breakpoint_t bkpt)
+{
+ db_put_task_value(bkpt->address, BKPT_SIZE, bkpt->bkpt_inst, task);
+ db_delete_thread_breakpoint(bkpt, 0);
+ db_breakpoint_free(bkpt);
+}
+
+/*
+ * List breakpoints.
+ */
+static void
+db_list_breakpoints(void)
+{
+ db_breakpoint_t bkpt;
+
+ if (db_breakpoint_list == 0) {
+ db_printf("No breakpoints set\n");
+ return;
+ }
+
+ db_printf(" No Space Thread Cnt Address(Cond)\n");
+ for (bkpt = db_breakpoint_list;
+ bkpt != 0;
+ bkpt = bkpt->link)
+ {
+ db_thread_breakpoint_t tp;
+ int task_id;
+ int thread_id;
+
+ if (bkpt->threads) {
+ for (tp = bkpt->threads; tp; tp = tp->tb_next) {
+ db_printf("%3d ", tp->tb_number);
+ if (bkpt->flags & BKPT_USR_GLOBAL)
+ db_printf("user ");
+ else if (bkpt->task == TASK_NULL)
+ db_printf("kernel ");
+ else if ((task_id = db_lookup_task(bkpt->task)) < 0)
+ db_printf("%0*X ", 2*sizeof(vm_offset_t), bkpt->task);
+ else
+ db_printf("task%-3d ", task_id);
+ if (tp->tb_task_thd == 0) {
+ db_printf("all ");
+ } else {
+ if (tp->tb_is_task) {
+ task_id = db_lookup_task((task_t)(tp->tb_task_thd));
+ if (task_id < 0)
+ db_printf("%0*X ", 2*sizeof(vm_offset_t),
+ tp->tb_task_thd);
+ else
+ db_printf("task%03d ", task_id);
+ } else {
+ thread_t thd = (thread_t)(tp->tb_task_thd);
+ task_id = db_lookup_task(thd->task);
+ thread_id = db_lookup_task_thread(thd->task, thd);
+ if (task_id < 0 || thread_id < 0)
+ db_printf("%0*X ", 2*sizeof(vm_offset_t),
+ tp->tb_task_thd);
+ else
+ db_printf("task%03d.%-3d ", task_id, thread_id);
+ }
+ }
+ db_printf("%3d ", tp->tb_init_count);
+ db_task_printsym(bkpt->address, DB_STGY_PROC, bkpt->task);
+ if (tp->tb_cond > 0) {
+ db_printf("(");
+ db_cond_print(tp);
+ db_printf(")");
+ }
+ db_printf("\n");
+ }
+ } else {
+ if (bkpt->task == TASK_NULL)
+ db_printf(" ? kernel ");
+ else
+ db_printf("%*X ", 2*sizeof(vm_offset_t), bkpt->task);
+ db_printf("(?) ");
+ db_task_printsym(bkpt->address, DB_STGY_PROC, bkpt->task);
+ db_printf("\n");
+ }
+ }
+}
+
+/* Delete breakpoint */
+/*ARGSUSED*/
+void
+db_delete_cmd(
+ db_expr_t addr_,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ int n;
+ thread_t thread;
+ vm_offset_t task_thd;
+ boolean_t user_global = FALSE;
+ boolean_t task_bpt = FALSE;
+ boolean_t user_space = FALSE;
+ boolean_t thd_bpt = FALSE;
+ db_expr_t addr;
+ int t;
+
+ t = db_read_token();
+ if (t == tSLASH) {
+ t = db_read_token();
+ if (t != tIDENT) {
+ db_printf("Bad modifier \"%s\"\n", db_tok_string);
+ db_error(0);
+ }
+ user_global = db_option(db_tok_string, 'U');
+ user_space = (user_global)? TRUE: db_option(db_tok_string, 'u');
+ task_bpt = db_option(db_tok_string, 'T');
+ thd_bpt = db_option(db_tok_string, 't');
+ if (task_bpt && user_global)
+ db_error("Cannot specify both 'T' and 'U' option\n");
+ t = db_read_token();
+ }
+ if (t == tHASH) {
+ db_thread_breakpoint_t tbp;
+ db_breakpoint_t bkpt;
+
+ if (db_read_token() != tNUMBER) {
+ db_printf("Bad break point number #%s\n", db_tok_string);
+ db_error(0);
+ }
+ if ((tbp = db_find_breakpoint_number(db_tok_number, &bkpt)) == 0) {
+ db_printf("No such break point #%d\n", db_tok_number);
+ db_error(0);
+ }
+ db_delete_breakpoint(bkpt->task, bkpt->address, tbp->tb_task_thd);
+ return;
+ }
+ db_unread_token(t);
+ if (!db_expression(&addr)) {
+ /*
+ * We attempt to pick up the user_space indication from db_dot,
+ * so that a plain "d" always works.
+ */
+ addr = (db_expr_t)db_dot;
+ if (!user_space && !DB_VALID_ADDRESS((vm_offset_t)addr, FALSE))
+ user_space = TRUE;
+ }
+ if (!DB_VALID_ADDRESS((vm_offset_t) addr, user_space)) {
+ db_printf("Address %#X is not in %s space\n", addr,
+ (user_space)? "user": "kernel");
+ db_error(0);
+ }
+ if (thd_bpt || task_bpt) {
+ for (n = 0; db_get_next_thread(&thread, n); n++) {
+ if (thread == THREAD_NULL)
+ db_error("No active thread\n");
+ if (task_bpt) {
+ if (thread->task == TASK_NULL)
+ db_error("No task\n");
+ task_thd = (vm_offset_t) (thread->task);
+ } else
+ task_thd = (user_global)? 0: (vm_offset_t) thread;
+ db_delete_breakpoint(db_target_space(thread, user_space),
+ (db_addr_t)addr, task_thd);
+ }
+ } else {
+ db_delete_breakpoint(db_target_space(THREAD_NULL, user_space),
+ (db_addr_t)addr, 0);
+ }
+}
+
+/* Set breakpoint with skip count */
+/*ARGSUSED*/
+void
+db_breakpoint_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ int n;
+ thread_t thread;
+ boolean_t user_global = db_option(modif, 'U');
+ boolean_t task_bpt = db_option(modif, 'T');
+ boolean_t user_space;
+
+ if (count == -1)
+ count = 1;
+
+ if (!task_bpt && db_option(modif,'t'))
+ task_bpt = TRUE;
+
+ if (task_bpt && user_global)
+ db_error("Cannot specify both 'T' and 'U'\n");
+ user_space = (user_global)? TRUE: db_option(modif, 'u');
+ if (user_space && db_access_level < DB_ACCESS_CURRENT)
+ db_error("User space break point is not supported\n");
+ if (!task_bpt && !DB_VALID_ADDRESS((vm_offset_t)addr, user_space)) {
+ /* if the user has explicitly specified user space,
+ do not insert a breakpoint into the kernel */
+ if (user_space)
+ db_error("Invalid user space address\n");
+ user_space = TRUE;
+ db_printf("%#X is in user space\n", addr);
+ }
+ if (db_option(modif, 't') || task_bpt) {
+ for (n = 0; db_get_next_thread(&thread, n); n++) {
+ if (thread == THREAD_NULL)
+ db_error("No active thread\n");
+ if (task_bpt && thread->task == TASK_NULL)
+ db_error("No task\n");
+ if (db_access_level <= DB_ACCESS_CURRENT && user_space
+ && thread->task != db_current_task())
+ db_error("Cannot set break point in inactive user space\n");
+ db_set_breakpoint(db_target_space(thread, user_space),
+ (db_addr_t)addr, count,
+ (user_global)? THREAD_NULL: thread,
+ task_bpt);
+ }
+ } else {
+ db_set_breakpoint(db_target_space(THREAD_NULL, user_space),
+ (db_addr_t)addr,
+ count, THREAD_NULL, FALSE);
+ }
+}
+
+/* list breakpoints */
+void
+db_listbreak_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ db_list_breakpoints();
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_break.h b/ddb/db_break.h
new file mode 100644
index 0000000..9f0ee95
--- /dev/null
+++ b/ddb/db_break.h
@@ -0,0 +1,111 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+#ifndef _DDB_DB_BREAK_H_
+#define _DDB_DB_BREAK_H_
+
+#include <machine/db_machdep.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <mach/boolean.h>
+
+/*
+ * thread list at the same breakpoint address
+ */
+struct db_thread_breakpoint {
+ vm_offset_t tb_task_thd; /* target task or thread */
+ boolean_t tb_is_task; /* task qualified */
+ short tb_number; /* breakpoint number */
+ short tb_init_count; /* skip count(initial value) */
+ short tb_count; /* current skip count */
+ short tb_cond; /* break condition */
+ struct db_thread_breakpoint *tb_next; /* next chain */
+};
+
+typedef struct db_thread_breakpoint *db_thread_breakpoint_t;
+
+/*
+ * Breakpoint.
+ */
+
+struct db_breakpoint {
+ task_t task; /* target task */
+ db_addr_t address; /* set here */
+ db_thread_breakpoint_t threads; /* thread */
+ int flags; /* flags: */
+#define BKPT_SINGLE_STEP 0x2 /* to simulate single step */
+#define BKPT_TEMP 0x4 /* temporary */
+#define BKPT_USR_GLOBAL 0x8 /* global user space break point */
+#define BKPT_SET_IN_MEM 0x10 /* break point is set in memory */
+#define BKPT_1ST_SET 0x20 /* 1st time set of user global bkpt */
+#define BKPT_EXTERNAL 0x40 /* break point is not from ddb */
+ vm_size_t bkpt_inst; /* saved instruction at bkpt */
+ struct db_breakpoint *link; /* link in in-use or free chain */
+};
+
+typedef struct db_breakpoint *db_breakpoint_t;
+
+extern db_breakpoint_t db_find_breakpoint( const task_t task, db_addr_t addr) __attribute__ ((pure));
+extern boolean_t db_find_breakpoint_here( const task_t task, db_addr_t addr);
+extern void db_set_breakpoints(void);
+extern void db_clear_breakpoints(void);
+extern db_thread_breakpoint_t db_find_thread_breakpoint_here
+ ( const task_t task, db_addr_t addr );
+extern db_thread_breakpoint_t db_find_breakpoint_number
+ ( int num, db_breakpoint_t *bkptp);
+
+extern db_breakpoint_t db_set_temp_breakpoint( task_t task, db_addr_t addr);
+extern void db_delete_temp_breakpoint
+ ( task_t task, db_breakpoint_t bkpt);
+
+extern db_breakpoint_t db_set_breakpoint(const task_t task, db_addr_t addr,
+ int count, const thread_t thread,
+ boolean_t task_bpt);
+
+void db_listbreak_cmd(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char *modif);
+
+void db_delete_cmd(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_breakpoint_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+extern void db_check_breakpoint_valid(void);
+
+#endif /* _DDB_DB_BREAK_H_ */
diff --git a/ddb/db_command.c b/ddb/db_command.c
new file mode 100644
index 0000000..4671fe8
--- /dev/null
+++ b/ddb/db_command.c
@@ -0,0 +1,594 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+/*
+ * Command dispatcher.
+ */
+
+#include <string.h>
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+#include <ddb/db_lex.h>
+#include <ddb/db_output.h>
+#include <ddb/db_command.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_macro.h>
+#include <ddb/db_expr.h>
+#include <ddb/db_examine.h>
+#include <ddb/db_print.h>
+#include <ddb/db_break.h>
+#include <ddb/db_watch.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_write_cmd.h>
+#include <ddb/db_run.h>
+#include <ddb/db_cond.h>
+
+#include <machine/setjmp.h>
+#include <machine/db_interface.h>
+#include <kern/debug.h>
+#include <kern/thread.h>
+#include <kern/slab.h>
+#include <ipc/ipc_pset.h> /* 4proto */
+#include <ipc/ipc_port.h> /* 4proto */
+
+#include <vm/vm_print.h>
+#include <vm/vm_page.h>
+#include <ipc/ipc_print.h>
+#include <ipc/mach_port.h>
+#include <kern/lock.h>
+
+/*
+ * Exported global variables
+ */
+boolean_t db_cmd_loop_done;
+jmp_buf_t *db_recover = 0;
+db_addr_t db_dot;
+db_addr_t db_last_addr;
+db_addr_t db_prev;
+db_addr_t db_next;
+
+/*
+ * if 'ed' style: 'dot' is set at start of last item printed,
+ * and '+' points to next line.
+ * Otherwise: 'dot' points to next item, '..' points to last.
+ */
+boolean_t db_ed_style = TRUE;
+
+/*
+ * Results of command search.
+ */
+#define CMD_UNIQUE 0
+#define CMD_FOUND 1
+#define CMD_NONE 2
+#define CMD_AMBIGUOUS 3
+#define CMD_HELP 4
+
+/*
+ * Search for command prefix.
+ */
+static int
+db_cmd_search(
+ const char * name,
+ const struct db_command *table,
+ const struct db_command **cmdp /* out */)
+{
+ const struct db_command *cmd;
+ int result = CMD_NONE;
+
+ for (cmd = table; cmd->name != 0; cmd++) {
+ const char *lp;
+ char *rp;
+ int c;
+
+ lp = name;
+ rp = cmd->name;
+ while ((c = *lp) == *rp) {
+ if (c == 0) {
+ /* complete match */
+ *cmdp = cmd;
+ return (CMD_UNIQUE);
+ }
+ lp++;
+ rp++;
+ }
+ if (c == 0) {
+ /* end of name, not end of command -
+ partial match */
+ if (result == CMD_FOUND) {
+ result = CMD_AMBIGUOUS;
+ /* but keep looking for a full match -
+ this lets us match single letters */
+ }
+ else {
+ *cmdp = cmd;
+ result = CMD_FOUND;
+ }
+ }
+ }
+ if (result == CMD_NONE) {
+ /* check for 'help' */
+ if (!strncmp(name, "help", strlen(name)))
+ result = CMD_HELP;
+ }
+ return (result);
+}
+
+static void
+db_cmd_list(const struct db_command *table)
+{
+ const struct db_command *cmd;
+
+ for (cmd = table; cmd->name != 0; cmd++) {
+ db_printf("%-12s", cmd->name);
+ db_end_line();
+ }
+}
+
+static void
+db_command(
+ const struct db_command **last_cmdp, /* IN_OUT */
+ struct db_command *cmd_table)
+{
+ const struct db_command *cmd = NULL;
+ int t;
+ char modif[TOK_STRING_SIZE];
+ db_expr_t addr, count;
+ boolean_t have_addr = FALSE;
+ int result;
+
+ t = db_read_token();
+ if (t == tEOL || t == tSEMI_COLON) {
+ /* empty line repeats last command, at 'next' */
+ cmd = *last_cmdp;
+ addr = (db_expr_t)db_next;
+ have_addr = FALSE;
+ count = 1;
+ modif[0] = '\0';
+ if (t == tSEMI_COLON)
+ db_unread_token(t);
+ }
+ else if (t == tEXCL) {
+ db_fncall();
+ return;
+ }
+ else if (t != tIDENT) {
+ db_printf("?\n");
+ db_flush_lex();
+ return;
+ }
+ else {
+ /*
+ * Search for command
+ */
+ while (cmd_table) {
+ result = db_cmd_search(db_tok_string,
+ cmd_table,
+ &cmd);
+ switch (result) {
+ case CMD_NONE:
+ if (db_exec_macro(db_tok_string) == 0)
+ return;
+ db_printf("No such command \"%s\"\n", db_tok_string);
+ db_flush_lex();
+ return;
+ case CMD_AMBIGUOUS:
+ db_printf("Ambiguous\n");
+ db_flush_lex();
+ return;
+ case CMD_HELP:
+ db_cmd_list(cmd_table);
+ db_flush_lex();
+ return;
+ default:
+ break;
+ }
+ if ((cmd_table = cmd->more) != 0) {
+ t = db_read_token();
+ if (t != tIDENT) {
+ db_cmd_list(cmd_table);
+ db_flush_lex();
+ return;
+ }
+ }
+ }
+
+ if ((cmd->flag & CS_OWN) == 0) {
+ /*
+ * Standard syntax:
+ * command [/modifier] [addr] [,count]
+ */
+ t = db_read_token();
+ if (t == tSLASH) {
+ t = db_read_token();
+ if (t != tIDENT) {
+ db_printf("Bad modifier \"/%s\"\n", db_tok_string);
+ db_flush_lex();
+ return;
+ }
+ db_strcpy(modif, db_tok_string);
+ }
+ else {
+ db_unread_token(t);
+ modif[0] = '\0';
+ }
+
+ if (db_expression(&addr)) {
+ db_dot = (db_addr_t) addr;
+ db_last_addr = db_dot;
+ have_addr = TRUE;
+ }
+ else {
+ addr = (db_expr_t) db_dot;
+ have_addr = FALSE;
+ }
+ t = db_read_token();
+ if (t == tCOMMA) {
+ if (!db_expression(&count)) {
+ db_printf("Count missing after ','\n");
+ db_flush_lex();
+ return;
+ }
+ }
+ else {
+ db_unread_token(t);
+ count = -1;
+ }
+ }
+ }
+ *last_cmdp = cmd;
+ if (cmd != NULL) {
+ /*
+ * Execute the command.
+ */
+ (*cmd->fcn)(addr, have_addr, count, modif);
+
+ if (cmd->flag & CS_SET_DOT) {
+ /*
+ * If command changes dot, set dot to
+ * previous address displayed (if 'ed' style).
+ */
+ if (db_ed_style) {
+ db_dot = db_prev;
+ }
+ else {
+ db_dot = db_next;
+ }
+ }
+ else {
+ /*
+ * If command does not change dot,
+ * set 'next' location to be the same.
+ */
+ db_next = db_dot;
+ }
+ }
+}
+
+static void
+db_command_list(
+ const struct db_command **last_cmdp, /* IN_OUT */
+ struct db_command *cmd_table)
+{
+ do {
+ db_command(last_cmdp, cmd_table);
+ db_skip_to_eol();
+ } while (db_read_token() == tSEMI_COLON && db_cmd_loop_done == FALSE);
+}
+
+struct db_command db_show_all_cmds[] = {
+ { "tasks", db_show_all_tasks, 0, 0 },
+ { "threads", db_show_all_threads, 0, 0 },
+ { "slocks", (db_command_fun_t)db_show_all_slocks, 0, 0 },
+ { "runqs", (db_command_fun_t)db_show_all_runqs, 0, 0 },
+ { (char *)0 }
+};
+
+struct db_command db_show_cmds[] = {
+ { "all", 0, 0, db_show_all_cmds },
+ { "registers", (db_command_fun_t)db_show_regs, 0, 0 },
+ { "breaks", db_listbreak_cmd, 0, 0 },
+ { "watches", db_listwatch_cmd, 0, 0 },
+ { "thread", db_show_one_thread, 0, 0 },
+ { "task", db_show_one_task, 0, 0 },
+ { "macro", db_show_macro, CS_OWN, 0 },
+ { "map", vm_map_print, 0, 0 },
+ { "object", (db_command_fun_t)vm_object_print, 0, 0 },
+ { "page", (db_command_fun_t)vm_page_print, 0, 0 },
+ { "copy", (db_command_fun_t)vm_map_copy_print, 0, 0 },
+ { "port", (db_command_fun_t)ipc_port_print, 0, 0 },
+ { "pset", (db_command_fun_t)ipc_pset_print, 0, 0 },
+ { "kmsg", (db_command_fun_t)ipc_kmsg_print, 0, 0 },
+ { "msg", (db_command_fun_t)ipc_msg_print, 0, 0 },
+ { "ipc_port", db_show_port_id, 0, 0 },
+ { "slabinfo", (db_command_fun_t)db_show_slab_info, 0, 0 },
+ { "vmstat", (db_command_fun_t)db_show_vmstat, 0, 0 },
+ { (char *)0, }
+};
+
+void
+db_debug_all_traps_cmd(db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif);
+void
+db_debug_port_references_cmd(db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif);
+
+struct db_command db_debug_cmds[] = {
+ { "traps", db_debug_all_traps_cmd, 0, 0 },
+ { "references", db_debug_port_references_cmd, 0, 0 },
+ { (char *)0, }
+};
+
+struct db_command db_command_table[] = {
+#ifdef DB_MACHINE_COMMANDS
+ /* this must be the first entry, if it exists */
+ { "machine", 0, 0, 0},
+#endif
+ { "print", (db_command_fun_t)db_print_cmd, CS_OWN, 0 },
+ { "examine", db_examine_cmd, CS_MORE|CS_SET_DOT, 0 },
+ { "x", db_examine_cmd, CS_MORE|CS_SET_DOT, 0 },
+ { "xf", db_examine_forward, CS_SET_DOT, 0 },
+ { "xb", db_examine_backward, CS_SET_DOT, 0 },
+ { "whatis", db_whatis_cmd, CS_MORE, 0 },
+ { "search", db_search_cmd, CS_OWN|CS_SET_DOT, 0 },
+ { "set", (db_command_fun_t)db_set_cmd, CS_OWN, 0 },
+ { "write", db_write_cmd, CS_MORE|CS_SET_DOT, 0 },
+ { "w", db_write_cmd, CS_MORE|CS_SET_DOT, 0 },
+ { "delete", db_delete_cmd, CS_OWN, 0 },
+ { "d", db_delete_cmd, CS_OWN, 0 },
+ { "break", db_breakpoint_cmd, CS_MORE, 0 },
+ { "dwatch", db_deletewatch_cmd, CS_MORE, 0 },
+ { "watch", db_watchpoint_cmd, CS_MORE, 0 },
+ { "step", db_single_step_cmd, 0, 0 },
+ { "s", db_single_step_cmd, 0, 0 },
+ { "continue", db_continue_cmd, 0, 0 },
+ { "c", db_continue_cmd, 0, 0 },
+ { "until", db_trace_until_call_cmd,0, 0 },
+ { "next", db_trace_until_matching_cmd,0, 0 },
+ { "match", db_trace_until_matching_cmd,0, 0 },
+ { "trace", db_stack_trace_cmd, 0, 0 },
+ { "cond", db_cond_cmd, CS_OWN, 0 },
+ { "call", (db_command_fun_t)db_fncall, CS_OWN, 0 },
+ { "macro", db_def_macro_cmd, CS_OWN, 0 },
+ { "dmacro", db_del_macro_cmd, CS_OWN, 0 },
+ { "show", 0, 0, db_show_cmds },
+ { "debug", 0, 0, db_debug_cmds },
+ { "reset", (db_command_fun_t)db_reset_cpu, 0, 0 },
+ { "reboot", (db_command_fun_t)db_reset_cpu, 0, 0 },
+ { "halt", (db_command_fun_t)db_halt_cpu, 0, 0 },
+ { (char *)0, }
+};
+
+#ifdef DB_MACHINE_COMMANDS
+
+/* this function should be called to install the machine dependent
+ commands. It should be called before the debugger is enabled */
+void db_machine_commands_install(struct db_command *ptr)
+{
+ db_command_table[0].more = ptr;
+ return;
+}
+
+#endif /* DB_MACHINE_COMMANDS */
+
+
+const struct db_command *db_last_command = 0;
+
+void
+db_help_cmd(void)
+{
+ struct db_command *cmd = db_command_table;
+
+ while (cmd->name != 0) {
+ db_printf("%-12s", cmd->name);
+ db_end_line();
+ cmd++;
+ }
+}
+
+void
+db_command_loop(void)
+{
+ jmp_buf_t db_jmpbuf;
+ jmp_buf_t *prev = db_recover;
+ extern int db_output_line;
+ extern int db_macro_level;
+
+ /*
+ * Initialize 'prev' and 'next' to dot.
+ */
+ db_prev = db_dot;
+ db_next = db_dot;
+
+ db_cmd_loop_done = FALSE;
+ while (!db_cmd_loop_done) {
+ (void) _setjmp(db_recover = &db_jmpbuf);
+ db_macro_level = 0;
+ if (db_print_position() != 0)
+ db_printf("\n");
+ db_output_line = 0;
+ db_printf("db%s", (db_default_thread)? "t": "");
+#if NCPUS > 1
+ db_printf("{%d}", cpu_number());
+#endif
+ db_printf("> ");
+
+ (void) db_read_line("!!");
+ db_command_list(&db_last_command, db_command_table);
+ }
+
+ db_recover = prev;
+}
+
+boolean_t
+db_exec_cmd_nest(
+ char *cmd,
+ int size)
+{
+ struct db_lex_context lex_context;
+
+ db_cmd_loop_done = FALSE;
+ if (cmd) {
+ db_save_lex_context(&lex_context);
+ db_switch_input(cmd, size /**OLD, &lex_context OLD**/);
+ }
+ db_command_list(&db_last_command, db_command_table);
+ if (cmd)
+ db_restore_lex_context(&lex_context);
+ return(db_cmd_loop_done == FALSE);
+}
+
+void db_error(const char *s)
+{
+ extern int db_macro_level;
+
+ db_macro_level = 0;
+ if (db_recover) {
+ if (s)
+ db_printf(s);
+ db_flush_lex();
+ _longjmp(db_recover, 1);
+ }
+ else
+ {
+ if (s)
+ db_printf(s);
+ panic("db_error");
+ }
+}
+
+/*
+ * Call random function:
+ * !expr(arg,arg,arg)
+ */
+void
+db_fncall(void)
+{
+ db_expr_t fn_addr;
+#define MAXARGS 11
+ db_expr_t args[MAXARGS];
+ int nargs = 0;
+ db_expr_t retval;
+ typedef db_expr_t(*function_t)(db_expr_t, db_expr_t, db_expr_t,
+ db_expr_t, db_expr_t, db_expr_t,
+ db_expr_t, db_expr_t, db_expr_t,
+ db_expr_t);
+ function_t func;
+ int t;
+
+ if (!db_expression(&fn_addr)) {
+ db_printf("Bad function \"%s\"\n", db_tok_string);
+ db_flush_lex();
+ return;
+ }
+ func = (function_t) fn_addr;
+
+ t = db_read_token();
+ if (t == tLPAREN) {
+ if (db_expression(&args[0])) {
+ nargs++;
+ while ((t = db_read_token()) == tCOMMA) {
+ if (nargs == MAXARGS) {
+ db_printf("Too many arguments\n");
+ db_flush_lex();
+ return;
+ }
+ if (!db_expression(&args[nargs])) {
+ db_printf("Argument missing\n");
+ db_flush_lex();
+ return;
+ }
+ nargs++;
+ }
+ db_unread_token(t);
+ }
+ if (db_read_token() != tRPAREN) {
+ db_printf("?\n");
+ db_flush_lex();
+ return;
+ }
+ }
+ while (nargs < MAXARGS) {
+ args[nargs++] = 0;
+ }
+
+ retval = (*func)(args[0], args[1], args[2], args[3], args[4],
+ args[5], args[6], args[7], args[8], args[9] );
+ db_printf(" %#N\n", retval);
+}
+
+boolean_t __attribute__ ((pure))
+db_option(
+ const char *modif,
+ int option)
+{
+ const char *p;
+
+ for (p = modif; *p; p++)
+ if (*p == option)
+ return(TRUE);
+ return(FALSE);
+}
+
+void
+db_debug_all_traps_cmd(db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif)
+{
+ if (strcmp (modif, "on") == 0)
+ db_debug_all_traps (TRUE);
+ else if (strcmp (modif, "off") == 0)
+ db_debug_all_traps (FALSE);
+ else
+ db_error ("debug traps /on|/off\n");
+}
+
+void
+db_debug_port_references_cmd(db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif)
+{
+ if (strcmp (modif, "on") == 0)
+ db_debug_port_references (TRUE);
+ else if (strcmp (modif, "off") == 0)
+ db_debug_port_references (FALSE);
+ else
+ db_error ("debug references /on|/off\n");
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_command.h b/ddb/db_command.h
new file mode 100644
index 0000000..73690a4
--- /dev/null
+++ b/ddb/db_command.h
@@ -0,0 +1,80 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#ifndef _DDB_DB_COMMAND_H_
+#define _DDB_DB_COMMAND_H_
+
+#if MACH_KDB
+
+/*
+ * Command loop declarations.
+ */
+
+#include <machine/db_machdep.h>
+#include <machine/setjmp.h>
+
+extern void db_command_loop(void);
+extern boolean_t db_option(const char *, int) __attribute__ ((pure));
+
+extern void db_error(const char *) __attribute__ ((noreturn)); /* report error */
+
+extern db_addr_t db_dot; /* current location */
+extern db_addr_t db_last_addr; /* last explicit address typed */
+extern db_addr_t db_prev; /* last address examined
+ or written */
+extern db_addr_t db_next; /* next address to be examined
+ or written */
+extern jmp_buf_t * db_recover; /* error recovery */
+
+typedef void (*db_command_fun_t)(db_expr_t, boolean_t, db_expr_t, const char *);
+
+/*
+ * Command table
+ */
+struct db_command {
+ char * name; /* command name */
+ db_command_fun_t fcn; /* function to call */
+ int flag; /* extra info: */
+#define CS_OWN 0x1 /* non-standard syntax */
+#define CS_MORE 0x2 /* standard syntax, but may have other
+ words at end */
+#define CS_SET_DOT 0x100 /* set dot after command */
+ struct db_command *more; /* another level of command */
+};
+
+extern boolean_t db_exec_cmd_nest(char *cmd, int size);
+
+void db_fncall(void);
+
+void db_help_cmd(void);
+
+#endif /* MACH_KDB */
+
+#endif /* _DDB_DB_COMMAND_H_ */
diff --git a/ddb/db_cond.c b/ddb/db_cond.c
new file mode 100644
index 0000000..d45d9b8
--- /dev/null
+++ b/ddb/db_cond.c
@@ -0,0 +1,185 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_KDB
+
+#include <machine/db_machdep.h>
+#include <machine/setjmp.h>
+
+#include <ddb/db_lex.h>
+#include <ddb/db_break.h>
+#include <ddb/db_command.h>
+#include <ddb/db_cond.h>
+#include <ddb/db_expr.h>
+#include <ddb/db_output.h>
+
+#include <kern/debug.h>
+
+
+#define DB_MAX_COND 10 /* maximum conditions to be set */
+
+int db_ncond_free = DB_MAX_COND; /* free condition */
+struct db_cond {
+ int c_size; /* size of cond */
+ char c_cond_cmd[DB_LEX_LINE_SIZE]; /* cond & cmd */
+} db_cond[DB_MAX_COND];
+
+void
+db_cond_free(db_thread_breakpoint_t bkpt)
+{
+ if (bkpt->tb_cond > 0) {
+ db_cond[bkpt->tb_cond-1].c_size = 0;
+ db_ncond_free++;
+ bkpt->tb_cond = 0;
+ }
+}
+
+boolean_t
+db_cond_check(db_thread_breakpoint_t bkpt)
+{
+ struct db_cond *cp;
+ db_expr_t value;
+ int t;
+ jmp_buf_t db_jmpbuf;
+ extern jmp_buf_t *db_recover;
+
+ if (bkpt->tb_cond <= 0) /* no condition */
+ return(TRUE);
+ db_dot = PC_REGS(DDB_REGS);
+ db_prev = db_dot;
+ db_next = db_dot;
+ if (_setjmp(db_recover = &db_jmpbuf)) {
+ /*
+ * in case of error, return true to enter interactive mode
+ */
+ return(TRUE);
+ }
+
+ /*
+ * switch input, and evalutate condition
+ */
+ cp = &db_cond[bkpt->tb_cond - 1];
+ db_switch_input(cp->c_cond_cmd, cp->c_size);
+ if (!db_expression(&value)) {
+ db_printf("error: condition evaluation error\n");
+ return(TRUE);
+ }
+ if (value == 0 || --(bkpt->tb_count) > 0)
+ return(FALSE);
+
+ /*
+ * execute a command list if exist
+ */
+ bkpt->tb_count = bkpt->tb_init_count;
+ if ((t = db_read_token()) != tEOL) {
+ db_unread_token(t);
+ return(db_exec_cmd_nest(0, 0));
+ }
+ return(TRUE);
+}
+
+void
+db_cond_print(const db_thread_breakpoint_t bkpt)
+{
+ char *p, *ep;
+ struct db_cond *cp;
+
+ if (bkpt->tb_cond <= 0)
+ return;
+ cp = &db_cond[bkpt->tb_cond-1];
+ p = cp->c_cond_cmd;
+ ep = p + cp->c_size;
+ while (p < ep) {
+ if (*p == '\n' || *p == 0)
+ break;
+ db_putchar(*p++);
+ }
+}
+
+void
+db_cond_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ int c;
+ struct db_cond *cp;
+ char *p;
+ db_expr_t value;
+ db_thread_breakpoint_t bkpt;
+
+ if (db_read_token() != tHASH || db_read_token() != tNUMBER) {
+ db_printf("#<number> expected instead of \"%s\"\n", db_tok_string);
+ db_error(0);
+ return;
+ }
+ if ((bkpt = db_find_breakpoint_number(db_tok_number, 0)) == 0) {
+ db_printf("No such break point #%d\n", db_tok_number);
+ db_error(0);
+ return;
+ }
+ /*
+ * if the break point already has a condition, free it first
+ */
+ if (bkpt->tb_cond > 0) {
+ cp = &db_cond[bkpt->tb_cond - 1];
+ db_cond_free(bkpt);
+ } else {
+ if (db_ncond_free <= 0) {
+ db_error("Too many conditions\n");
+ return;
+ }
+ for (cp = db_cond; cp < &db_cond[DB_MAX_COND]; cp++)
+ if (cp->c_size == 0)
+ break;
+ if (cp >= &db_cond[DB_MAX_COND])
+ panic("bad db_cond_free");
+ }
+ for (c = db_read_char(); c == ' ' || c == '\t'; c = db_read_char());
+ for (p = cp->c_cond_cmd; c >= 0; c = db_read_char())
+ *p++ = c;
+ /*
+ * switch to saved data and call db_expression to check the condition.
+ * If no condition is supplied, db_expression will return false.
+ * In this case, clear previous condition of the break point.
+ * If condition is supplied, set the condition to the permanent area.
+ * Note: db_expression will not return here, if the condition
+ * expression is wrong.
+ */
+ db_switch_input(cp->c_cond_cmd, p - cp->c_cond_cmd);
+ if (!db_expression(&value)) {
+ /* since condition is already freed, do nothing */
+ db_flush_lex();
+ return;
+ }
+ db_flush_lex();
+ db_ncond_free--;
+ cp->c_size = p - cp->c_cond_cmd;
+ bkpt->tb_cond = (cp - db_cond) + 1;
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_cond.h b/ddb/db_cond.h
new file mode 100644
index 0000000..c867c6e
--- /dev/null
+++ b/ddb/db_cond.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+
+#ifndef _DDB_DB_COND_H_
+#define _DDB_DB_COND_H_
+
+#include <sys/types.h>
+#include <machine/db_machdep.h>
+
+extern void db_cond_free (const db_thread_breakpoint_t bkpt);
+
+extern boolean_t db_cond_check (db_thread_breakpoint_t bkpt);
+
+extern void db_cond_print (db_thread_breakpoint_t bkpt);
+
+extern void db_cond_cmd (
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif);
+
+#endif /* _DDB_DB_COND_H_ */
diff --git a/ddb/db_elf.c b/ddb/db_elf.c
new file mode 100644
index 0000000..5ccfdd5
--- /dev/null
+++ b/ddb/db_elf.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2014 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+/*
+ * Symbol table routines for ELF format files.
+ */
+
+#include <string.h>
+#include <mach/std_types.h>
+#include <mach/exec/elf.h>
+#include <machine/db_machdep.h> /* data types */
+#include <machine/vm_param.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_elf.h>
+
+#ifndef DB_NO_ELF
+
+struct db_symtab_elf {
+ int type;
+ Elf_Sym *start;
+ Elf_Sym *end;
+ char *strings;
+ char *map_pointer; /* symbols are for this map only,
+ if not null */
+ char name[SYMTAB_NAME_LEN];
+ /* symtab name */
+};
+
+boolean_t
+elf_db_sym_init (unsigned shdr_num,
+ vm_size_t shdr_size,
+ vm_offset_t shdr_addr,
+ unsigned shdr_shndx,
+ char *name,
+ char *task_addr)
+{
+ Elf_Shdr *shdr, *symtab, *strtab;
+ const char *shstrtab;
+ unsigned i;
+
+ if (shdr_num == 0)
+ return FALSE;
+
+ if (shdr_size != sizeof *shdr)
+ return FALSE;
+
+ shdr = (Elf_Shdr *) shdr_addr;
+
+ if (shdr[shdr_shndx].sh_type != SHT_STRTAB)
+ return FALSE;
+
+ shstrtab = (const char *) phystokv (shdr[shdr_shndx].sh_addr);
+
+ symtab = strtab = NULL;
+ for (i = 0; i < shdr_num; i++)
+ switch (shdr[i].sh_type) {
+ case SHT_SYMTAB:
+ if (symtab)
+ db_printf ("Ignoring additional ELF symbol table at %d\n", i);
+ else
+ symtab = &shdr[i];
+ break;
+
+ case SHT_STRTAB:
+ if (strcmp (&shstrtab[shdr[i].sh_name], ".strtab") == 0) {
+ if (strtab)
+ db_printf ("Ignoring additional ELF string table at %d\n", i);
+ else
+ strtab = &shdr[i];
+ }
+ break;
+ }
+
+ if (symtab == NULL || strtab == NULL)
+ return FALSE;
+
+ if (db_add_symbol_table (SYMTAB_ELF,
+ (char *) phystokv (symtab->sh_addr),
+ (char *) phystokv (symtab->sh_addr)+symtab->sh_size,
+ name,
+ (char *) phystokv (strtab->sh_addr),
+ task_addr)) {
+ db_printf ("Loaded ELF symbol table for %s (%d symbols)\n",
+ name, symtab->sh_size / sizeof (Elf_Sym));
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/*
+ * lookup symbol by name
+ */
+db_sym_t
+elf_db_lookup (db_symtab_t *stab,
+ char *symstr)
+{
+ struct db_symtab_elf *self = (struct db_symtab_elf *) stab;
+ Elf_Sym *s;
+
+ for (s = self->start; s < self->end; s++)
+ if (strcmp (symstr, &self->strings[s->st_name]) == 0)
+ return (db_sym_t) s;
+
+ return NULL;
+}
+
+db_sym_t
+elf_db_search_symbol (db_symtab_t *stab,
+ db_addr_t off,
+ db_strategy_t strategy,
+ db_expr_t *diffp) /* in/out */
+{
+ struct db_symtab_elf *self = (struct db_symtab_elf *) stab;
+ unsigned long diff = *diffp;
+ Elf_Sym *s, *symp = NULL;
+
+ for (s = self->start; s < self->end; s++) {
+ if (s->st_name == 0)
+ continue;
+
+ if (strategy == DB_STGY_XTRN && (ELF_ST_BIND(s->st_info) != STB_GLOBAL))
+ continue;
+
+ if (off >= s->st_value) {
+ if (ELF_ST_TYPE(s->st_info) != STT_FUNC)
+ continue;
+
+ if (off - s->st_value < diff) {
+ diff = off - s->st_value;
+ symp = s;
+ if (diff == 0 && (ELF_ST_BIND(s->st_info) == STB_GLOBAL))
+ break;
+ } else if (off - s->st_value == diff) {
+ if (symp == NULL)
+ symp = s;
+ else if ((ELF_ST_BIND(symp->st_info) != STB_GLOBAL)
+ && (ELF_ST_BIND(s->st_info) == STB_GLOBAL))
+ symp = s; /* pick the external symbol */
+ }
+ }
+ }
+
+ if (symp == NULL)
+ *diffp = off;
+ else
+ *diffp = diff;
+
+ return (db_sym_t) symp;
+}
+
+/*
+ * Return the name and value for a symbol.
+ */
+void
+elf_db_symbol_values (db_symtab_t *stab,
+ db_sym_t sym,
+ char **namep,
+ db_expr_t *valuep)
+{
+ struct db_symtab_elf *self = (struct db_symtab_elf *) stab;
+ Elf_Sym *s = (Elf_Sym *) sym;
+
+ if (namep)
+ *namep = &self->strings[s->st_name];
+ if (valuep)
+ *valuep = s->st_value;
+}
+
+/*
+ * Find filename and lineno within, given the current pc.
+ */
+boolean_t
+elf_db_line_at_pc (db_symtab_t *stab,
+ db_sym_t sym,
+ char **file,
+ int *line,
+ db_addr_t pc)
+{
+ /* XXX Parse DWARF information. */
+ return FALSE;
+}
+
+#endif /* DB_NO_ELF */
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_elf.h b/ddb/db_elf.h
new file mode 100644
index 0000000..12b8286
--- /dev/null
+++ b/ddb/db_elf.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DDB_DB_ELF_H_
+#define _DDB_DB_ELF_H_
+
+#include <ddb/db_sym.h>
+#include <machine/db_machdep.h>
+
+extern boolean_t
+elf_db_line_at_pc(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **file,
+ int *line,
+ db_addr_t pc);
+
+extern db_sym_t
+elf_db_lookup(
+ db_symtab_t *stab,
+ char * symstr);
+
+extern db_sym_t
+elf_db_search_symbol(
+ db_symtab_t * symtab,
+ db_addr_t off,
+ db_strategy_t strategy,
+ db_expr_t *diffp);
+
+extern void
+elf_db_symbol_values(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **namep,
+ db_expr_t *valuep);
+
+#endif /* _DDB_DB_ELF_H_ */
diff --git a/ddb/db_examine.c b/ddb/db_examine.c
new file mode 100644
index 0000000..1941fc3
--- /dev/null
+++ b/ddb/db_examine.c
@@ -0,0 +1,664 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <machine/db_interface.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_output.h>
+#include <ddb/db_command.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_examine.h>
+#include <ddb/db_expr.h>
+#include <ddb/db_print.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/smp.h>
+#include <mach/vm_param.h>
+#include <vm/vm_map.h>
+
+#define db_thread_to_task(thread) ((thread)? thread->task: TASK_NULL)
+
+char db_examine_format[TOK_STRING_SIZE] = "x";
+int db_examine_count = 1;
+db_addr_t db_examine_prev_addr = 0;
+thread_t db_examine_thread = THREAD_NULL;
+
+/*
+ * Examine (print) data.
+ */
+/*ARGSUSED*/
+void
+db_examine_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ thread_t thread;
+
+ if (modif[0] != '\0')
+ db_strcpy(db_examine_format, modif);
+
+ if (count == -1)
+ count = 1;
+ db_examine_count = count;
+ if (db_option(modif, 't'))
+ {
+ if (!db_get_next_thread(&thread, 0))
+ return;
+ }
+ else
+ if (db_option(modif, 'u'))
+ thread = current_thread();
+ else
+ thread = THREAD_NULL;
+
+ db_examine_thread = thread;
+ db_examine((db_addr_t) addr, db_examine_format, count,
+ db_thread_to_task(thread));
+}
+
+/* ARGSUSED */
+void
+db_examine_forward(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ db_examine(db_next, db_examine_format, db_examine_count,
+ db_thread_to_task(db_examine_thread));
+}
+
+/* ARGSUSED */
+void
+db_examine_backward(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+
+ db_examine(db_examine_prev_addr - (db_next - db_examine_prev_addr),
+ db_examine_format, db_examine_count,
+ db_thread_to_task(db_examine_thread));
+}
+
+void
+db_examine(
+ db_addr_t addr,
+ const char * fmt, /* format string */
+ int count, /* repeat count */
+ task_t task)
+{
+ int c;
+ db_expr_t value;
+ int size; /* in bytes */
+ int width;
+ const char * fp;
+
+ db_examine_prev_addr = addr;
+ while (--count >= 0) {
+ fp = fmt;
+ size = 4;
+ width = 4*size;
+ while ((c = *fp++) != 0) {
+ switch (c) {
+ case 'b':
+ size = 1;
+ width = 4*size;
+ break;
+ case 'h':
+ size = 2;
+ width = 4*size;
+ break;
+ case 'l':
+ size = 4;
+ width = 4*size;
+ break;
+ case 'q':
+ size = 8;
+ width = 4*size;
+ break;
+ case 'a': /* address */
+ case 'A': /* function address */
+ /* always forces a new line */
+ if (db_print_position() != 0)
+ db_printf("\n");
+ db_prev = addr;
+ db_task_printsym(addr,
+ (c == 'a')?DB_STGY_ANY:DB_STGY_PROC,
+ task);
+ db_printf(":\t");
+ break;
+ case 'm':
+ db_next = db_xcdump(addr, size, count + 1, task);
+ return;
+ default:
+ if (db_print_position() == 0) {
+ /* If we hit a new symbol, print it */
+ char * name;
+ db_addr_t off;
+
+ db_find_task_sym_and_offset(addr, &name, &off, task);
+ if (off == 0)
+ db_printf("%s:\t", name);
+ else
+ db_printf("\t\t");
+
+ db_prev = addr;
+ }
+
+ switch (c) {
+ case ',': /* skip one unit w/o printing */
+ addr += size;
+ break;
+
+ case 'r': /* signed, current radix */
+ value = db_get_task_value(addr,size,TRUE,task);
+ addr += size;
+ db_printf("%-*R", width, value);
+ break;
+ case 'x': /* unsigned hex */
+ value = db_get_task_value(addr,size,FALSE,task);
+ addr += size;
+ db_printf("%-*X", width, value);
+ break;
+ case 'z': /* signed hex */
+ value = db_get_task_value(addr,size,TRUE,task);
+ addr += size;
+ db_printf("%-*Z", width, value);
+ break;
+ case 'd': /* signed decimal */
+ value = db_get_task_value(addr,size,TRUE,task);
+ addr += size;
+ db_printf("%-*D", width, value);
+ break;
+ case 'U': /* unsigned decimal */
+ value = db_get_task_value(addr,size,FALSE,task);
+ addr += size;
+ db_printf("%-*U", width, value);
+ break;
+ case 'o': /* unsigned octal */
+ value = db_get_task_value(addr,size,FALSE,task);
+ addr += size;
+ db_printf("%-*O", width, value);
+ break;
+ case 'c': /* character */
+ value = db_get_task_value(addr,1,FALSE,task);
+ addr += 1;
+ if (value >= ' ' && value <= '~')
+ db_printf("%c", value);
+ else
+ db_printf("\\%03o", value);
+ break;
+ case 's': /* null-terminated string */
+ for (;;) {
+ value = db_get_task_value(addr,1,FALSE,task);
+ addr += 1;
+ if (value == 0)
+ break;
+ if (value >= ' ' && value <= '~')
+ db_printf("%c", value);
+ else
+ db_printf("\\%03o", value);
+ }
+ break;
+ case 'i': /* instruction */
+ addr = db_disasm(addr, FALSE, task);
+ break;
+ case 'I': /* instruction, alternate form */
+ addr = db_disasm(addr, TRUE, task);
+ break;
+ default:
+ break;
+ }
+ if (db_print_position() != 0)
+ db_end_line();
+ break;
+ }
+ }
+ }
+ db_next = addr;
+}
+
+/*
+ * Find out what this address may be
+ */
+/*ARGSUSED*/
+void
+db_whatis_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ /* TODO: Add whatever you can think of */
+
+ int i;
+
+ {
+ /* tasks */
+
+ task_t task;
+ int task_id = 0;
+ processor_set_t pset;
+ thread_t thread;
+ int thread_id;
+ vm_map_entry_t entry;
+
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets)
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ if (addr >= (vm_offset_t) task
+ && addr < (vm_offset_t) task + sizeof(*task))
+ db_printf("%3d %0*X %s [%d]\n",
+ task_id,
+ 2*sizeof(vm_offset_t),
+ task,
+ task->name,
+ task->thread_count);
+
+ if (addr >= (vm_offset_t) task->map
+ && addr < (vm_offset_t) task->map + sizeof(*(task->map)))
+ db_printf("$map%d %X for $task%d %s\n",
+ task_id, (vm_offset_t) task->map, task_id, task->name);
+
+ for (entry = vm_map_first_entry(task->map);
+ entry != vm_map_to_entry(task->map);
+ entry = entry->vme_next)
+ if (addr >= (vm_offset_t) entry
+ && addr < (vm_offset_t) entry + sizeof(*entry))
+ db_printf("$map%d %X for $task%d %s entry 0x%X: ",
+ task_id, (vm_offset_t) task->map, task_id, task->name,
+ (vm_offset_t) entry);
+
+ if (pmap_whatis(task->map->pmap, addr))
+ db_printf(" in $task%d %s\n", task_id, task->name);
+
+ if ((task == current_task() || task == kernel_task)
+ && addr >= vm_map_min(task->map)
+ && addr < vm_map_max(task->map)) {
+ db_printf("inside $map%d of $task%d %s\n", task_id, task_id, task->name);
+
+ for (entry = vm_map_first_entry(task->map);
+ entry != vm_map_to_entry(task->map);
+ entry = entry->vme_next)
+ if (addr >= entry->vme_start
+ && addr < entry->vme_end) {
+ db_printf(" entry 0x%X: ", (vm_offset_t) entry);
+ if (entry->is_sub_map)
+ db_printf("submap=0x%X, offset=0x%X\n",
+ (vm_offset_t) entry->object.sub_map,
+ (vm_offset_t) entry->offset);
+ else
+ db_printf("object=0x%X, offset=0x%X\n",
+ (vm_offset_t) entry->object.vm_object,
+ (vm_offset_t) entry->offset);
+ }
+ }
+
+ thread_id = 0;
+ queue_iterate(&task->thread_list, thread, thread_t, thread_list) {
+ if (addr >= (vm_offset_t) thread
+ && addr < (vm_offset_t) thread + sizeof(*thread)) {
+ db_printf("In $task%d %s\n", task_id, task->name);
+ db_print_thread(thread, thread_id, 0);
+ }
+ if (addr >= thread->kernel_stack
+ && addr < thread->kernel_stack + KERNEL_STACK_SIZE) {
+ db_printf("In $task%d %s\n", task_id, task->name);
+ db_printf(" on stack of $thread%d.%d\n", task_id, thread_id);
+ db_print_thread(thread, thread_id, 0);
+ }
+ thread_id++;
+ }
+ task_id++;
+ }
+ }
+
+ pmap_whatis(kernel_pmap, addr);
+
+ {
+ /* runqs */
+ if (addr >= (vm_offset_t) &default_pset.runq
+ && addr < (vm_offset_t) &default_pset.runq + sizeof(default_pset.runq))
+ db_printf("default runq %p\n", &default_pset.runq);
+ for (i = 0; i < smp_get_numcpus(); i++) {
+ processor_t proc = cpu_to_processor(i);
+ if (addr >= (vm_offset_t) &proc->runq
+ && addr < (vm_offset_t) &proc->runq + sizeof(proc->runq))
+ db_printf("Processor #%d runq %p\n", &proc->runq);
+ }
+ }
+
+ {
+ /* stacks */
+ for (i = 0; i < smp_get_numcpus(); i++) {
+ if (addr >= percpu_array[i].active_stack
+ && addr < percpu_array[i].active_stack + KERNEL_STACK_SIZE)
+ db_printf("Processor #%d active stack\n", i);
+ }
+ }
+
+ db_whatis_slab(addr);
+
+ {
+ /* page */
+ phys_addr_t pa;
+ if (DB_VALID_KERN_ADDR(addr))
+ pa = kvtophys(addr);
+ else
+ pa = pmap_extract(current_task()->map->pmap, addr);
+
+ if (pa) {
+ struct vm_page *page = vm_page_lookup_pa(pa);
+ db_printf("phys %llx, page %p\n", (unsigned long long) pa, page);
+ if (page) {
+ const char *types[] = {
+ [VM_PT_FREE] = "free",
+ [VM_PT_RESERVED] = "reserved",
+ [VM_PT_TABLE] = "table",
+ [VM_PT_KERNEL] = "kernel",
+ };
+ db_printf(" %s\n", types[page->type]);
+ db_printf(" free %u\n", page->free);
+ db_printf(" external %u\n", page->external);
+ db_printf(" busy %u\n", page->busy);
+ db_printf(" private %u\n", page->private);
+ db_printf(" object %lx\n", page->object);
+ db_printf(" offset %lx\n", page->offset);
+ db_printf(" wired %u\n", page->wire_count);
+ db_printf(" segment %u\n", page->seg_index);
+ db_printf(" order %u\n", page->order);
+ }
+ }
+ }
+}
+
+/*
+ * Print value.
+ */
+char db_print_format = 'x';
+
+/*ARGSUSED*/
+void
+db_print_cmd(void)
+{
+ db_expr_t value;
+ int t;
+ task_t task = TASK_NULL;
+
+ if ((t = db_read_token()) == tSLASH) {
+ if (db_read_token() != tIDENT) {
+ db_printf("Bad modifier \"/%s\"\n", db_tok_string);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ if (db_tok_string[0])
+ db_print_format = db_tok_string[0];
+ if (db_option(db_tok_string, 't') && db_default_thread)
+ task = db_default_thread->task;
+ } else
+ db_unread_token(t);
+
+ for ( ; ; ) {
+ t = db_read_token();
+ if (t == tSTRING) {
+ db_printf("%s", db_tok_string);
+ continue;
+ }
+ db_unread_token(t);
+ if (!db_expression(&value))
+ break;
+ switch (db_print_format) {
+ case 'a':
+ db_task_printsym((db_addr_t)value, DB_STGY_ANY, task);
+ break;
+ case 'r':
+ db_printf("%*r", 3+2*sizeof(db_expr_t), value);
+ break;
+ case 'x':
+ db_printf("%*x", 2*sizeof(db_expr_t), value);
+ break;
+ case 'z':
+ db_printf("%*z", 2*sizeof(db_expr_t), value);
+ break;
+ case 'd':
+ db_printf("%*d", 3+2*sizeof(db_expr_t), value);
+ break;
+ case 'u':
+ db_printf("%*u", 3+2*sizeof(db_expr_t), value);
+ break;
+ case 'o':
+ db_printf("%o", 4*sizeof(db_expr_t), value);
+ break;
+ case 'c':
+ value = value & 0xFF;
+ if (value >= ' ' && value <= '~')
+ db_printf("%c", value);
+ else
+ db_printf("\\%03o", value);
+ break;
+ default:
+ db_printf("Unknown format %c\n", db_print_format);
+ db_print_format = 'x';
+ db_error(0);
+ }
+ }
+}
+
+void
+db_print_loc_and_inst(
+ db_addr_t loc,
+ task_t task)
+{
+ db_task_printsym(loc, DB_STGY_PROC, task);
+ db_printf(":\t");
+ (void) db_disasm(loc, TRUE, task);
+}
+
+void
+db_strcpy(char *dst, const char *src)
+{
+ while ((*dst++ = *src++))
+ ;
+}
+
+/*
+ * Search for a value in memory.
+ * Syntax: search [/bhl] addr value [mask] [,count] [thread]
+ */
+void
+db_search_cmd(
+ db_expr_t e,
+ boolean_t b,
+ db_expr_t e2,
+ const char * cc)
+{
+ int t;
+ db_addr_t addr;
+ int size = 0;
+ db_expr_t value;
+ db_expr_t mask;
+ db_addr_t count;
+ thread_t thread;
+ boolean_t thread_flag = FALSE;
+ char *p;
+
+ t = db_read_token();
+ if (t == tSLASH) {
+ t = db_read_token();
+ if (t != tIDENT) {
+ bad_modifier:
+ db_printf("Bad modifier \"/%s\"\n", db_tok_string);
+ db_flush_lex();
+ return;
+ }
+
+ for (p = db_tok_string; *p; p++) {
+ switch(*p) {
+ case 'b':
+ size = sizeof(char);
+ break;
+ case 'h':
+ size = sizeof(short);
+ break;
+ case 'l':
+ size = sizeof(long);
+ break;
+ case 't':
+ thread_flag = TRUE;
+ break;
+ default:
+ goto bad_modifier;
+ }
+ }
+ } else {
+ db_unread_token(t);
+ size = sizeof(int);
+ }
+
+ if (!db_expression((db_expr_t *)&addr)) {
+ db_printf("Address missing\n");
+ db_flush_lex();
+ return;
+ }
+
+ if (!db_expression(&value)) {
+ db_printf("Value missing\n");
+ db_flush_lex();
+ return;
+ }
+
+ if (!db_expression(&mask))
+ mask = ~0;
+
+ t = db_read_token();
+ if (t == tCOMMA) {
+ if (!db_expression((db_expr_t *)&count)) {
+ db_printf("Count missing\n");
+ db_flush_lex();
+ return;
+ }
+ } else {
+ db_unread_token(t);
+ count = -1; /* effectively forever */
+ }
+ if (thread_flag) {
+ if (!db_get_next_thread(&thread, 0))
+ return;
+ } else
+ thread = THREAD_NULL;
+
+ db_search(addr, size, value, mask, count, db_thread_to_task(thread));
+}
+
+void
+db_search(
+ db_addr_t addr,
+ int size,
+ db_expr_t value,
+ db_expr_t mask,
+ unsigned int count,
+ task_t task)
+{
+ while (count-- != 0) {
+ db_prev = addr;
+ if ((db_get_task_value(addr, size, FALSE, task) & mask) == value)
+ break;
+ addr += size;
+ }
+ db_next = addr;
+}
+
+#define DB_XCDUMP_NC 16
+
+int
+db_xcdump(
+ db_addr_t addr,
+ int size,
+ int count,
+ task_t task)
+{
+ int i, n;
+ db_expr_t value;
+ int bcount;
+ db_addr_t off;
+ char *name;
+ char data[DB_XCDUMP_NC];
+
+ db_find_task_sym_and_offset(addr, &name, &off, task);
+ for (n = count*size; n > 0; n -= bcount) {
+ db_prev = addr;
+ if (off == 0) {
+ db_printf("%s:\n", name);
+ off = -1;
+ }
+ db_printf("%0*X:%s", 2*sizeof(db_addr_t), addr,
+ (size != 1)? " ": "");
+ bcount = ((n > DB_XCDUMP_NC)? DB_XCDUMP_NC: n);
+ if (trunc_page(addr) != trunc_page(addr+bcount-1)) {
+ db_addr_t next_page_addr = trunc_page(addr+bcount-1);
+ if (!DB_CHECK_ACCESS(next_page_addr, sizeof(int), task))
+ bcount = next_page_addr - addr;
+ }
+ if (!db_read_bytes(addr, bcount, data, task)) {
+ db_printf("*\n");
+ continue;
+ }
+ for (i = 0; i < bcount && off != 0; i += size) {
+ if (i % 4 == 0)
+ db_printf(" ");
+ value = db_get_task_value(addr, size, FALSE, task);
+ db_printf("%0*x ", size*2, value);
+ addr += size;
+ db_find_task_sym_and_offset(addr, &name, &off, task);
+ }
+ db_printf("%*s",
+ ((DB_XCDUMP_NC-i)/size)*(size*2+1)+(DB_XCDUMP_NC-i)/4,
+ "");
+ bcount = i;
+ db_printf("%s*", (size != 1)? " ": "");
+ for (i = 0; i < bcount; i++) {
+ value = data[i];
+ db_printf("%c", (value >= ' ' && value <= '~')? value: '.');
+ }
+ db_printf("*\n");
+ }
+ return(addr);
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_examine.h b/ddb/db_examine.h
new file mode 100644
index 0000000..c76fa2a
--- /dev/null
+++ b/ddb/db_examine.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+
+#ifndef _DDB_DB_EXAMINE_H_
+#define _DDB_DB_EXAMINE_H_
+
+#include <sys/types.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_expr.h>
+
+extern void db_examine_cmd (
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif);
+
+extern void db_strcpy (char *dst, const char *src);
+
+extern void db_examine (
+ db_addr_t addr,
+ const char *fmt,
+ int count,
+ task_t task);
+
+void db_examine_forward(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_examine_backward(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+extern void db_print_loc_and_inst (
+ db_addr_t loc,
+ task_t task);
+
+int db_xcdump(
+ db_addr_t addr,
+ int size,
+ int count,
+ task_t task);
+
+extern void db_whatis_cmd (
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif);
+
+void db_print_cmd(void);
+
+void db_search_cmd(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_search(
+ db_addr_t addr,
+ int size,
+ db_expr_t value,
+ db_expr_t mask,
+ unsigned int count,
+ task_t task);
+
+/* instruction disassembler */
+extern db_addr_t db_disasm(
+ db_addr_t pc,
+ boolean_t altform,
+ task_t task);
+
+#endif /* _DDB_DB_EXAMINE_H_ */
diff --git a/ddb/db_expr.c b/ddb/db_expr.c
new file mode 100644
index 0000000..90edb6f
--- /dev/null
+++ b/ddb/db_expr.c
@@ -0,0 +1,382 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_expr.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+#include <kern/task.h>
+
+static boolean_t
+db_term(db_expr_t *valuep)
+{
+ int t;
+
+ switch(t = db_read_token()) {
+ case tIDENT:
+ if (!db_value_of_name(db_tok_string, valuep)) {
+ db_printf("Symbol \"%s\" not found\n", db_tok_string);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ return (TRUE);
+ case tNUMBER:
+ *valuep = db_tok_number;
+ return (TRUE);
+ case tDOT:
+ *valuep = (db_expr_t)db_dot;
+ return (TRUE);
+ case tDOTDOT:
+ *valuep = (db_expr_t)db_prev;
+ return (TRUE);
+ case tPLUS:
+ *valuep = (db_expr_t) db_next;
+ return (TRUE);
+ case tQUOTE:
+ *valuep = (db_expr_t)db_last_addr;
+ return (TRUE);
+ case tDOLLAR:
+ if (!db_get_variable(valuep))
+ return (FALSE);
+ return (TRUE);
+ case tLPAREN:
+ if (!db_expression(valuep)) {
+ db_error("Unmached ()s\n");
+ /*NOTREACHED*/
+ }
+ t = db_read_token();
+ if (t != tRPAREN) {
+ db_printf("')' expected at \"%s...\"\n", db_tok_string);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ return (TRUE);
+ default:
+ db_unread_token(t);
+ return (FALSE);
+ }
+}
+
+int
+db_size_option(const char *modif, boolean_t *u_option, boolean_t *t_option)
+{
+ const char *p;
+ int size = sizeof(int);
+
+ *u_option = FALSE;
+ *t_option = FALSE;
+ for (p = modif; *p; p++) {
+ switch(*p) {
+ case 'b':
+ size = sizeof(char);
+ break;
+ case 'h':
+ size = sizeof(short);
+ break;
+ case 'l':
+ size = sizeof(long);
+ break;
+ case 'u':
+ *u_option = TRUE;
+ break;
+ case 't':
+ *t_option = TRUE;
+ break;
+ }
+ }
+ return(size);
+}
+
+static boolean_t
+db_unary(db_expr_t *valuep)
+{
+ int t;
+ int size;
+ boolean_t u_opt, t_opt;
+ task_t task;
+ extern task_t db_default_task;
+
+ t = db_read_token();
+ if (t == tMINUS) {
+ if (!db_unary(valuep)) {
+ db_error("Expression syntax error after '-'\n");
+ /*NOTREACHED*/
+ }
+ *valuep = -*valuep;
+ return (TRUE);
+ }
+ if (t == tSTAR) {
+ /* indirection */
+ if (!db_unary(valuep)) {
+ db_error("Expression syntax error after '*'\n");
+ /*NOTREACHED*/
+ }
+ task = TASK_NULL;
+ size = sizeof(db_addr_t);
+ u_opt = FALSE;
+ t = db_read_token();
+ if (t == tIDENT && db_tok_string[0] == ':') {
+ size = db_size_option(&db_tok_string[1], &u_opt, &t_opt);
+ if (t_opt)
+ task = db_default_task;
+ } else
+ db_unread_token(t);
+ *valuep = db_get_task_value((db_addr_t)*valuep, size, !u_opt, task);
+ return (TRUE);
+ }
+ if (t == tEXCL) {
+ if (!db_unary(valuep)) {
+ db_error("Expression syntax error after '!'\n");
+ /*NOTREACHED*/
+ }
+ *valuep = (!(*valuep));
+ return (TRUE);
+ }
+ db_unread_token(t);
+ return (db_term(valuep));
+}
+
+static boolean_t
+db_mult_expr(db_expr_t *valuep)
+{
+ db_expr_t lhs = 0, rhs;
+ int t;
+ char c;
+
+ if (!db_unary(&lhs))
+ return (FALSE);
+
+ t = db_read_token();
+ while (t == tSTAR || t == tSLASH || t == tPCT || t == tHASH
+ || t == tBIT_AND) {
+ c = db_tok_string[0];
+ if (!db_term(&rhs)) {
+ db_printf("Expression syntax error after '%c'\n", c);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ switch(t) {
+ case tSTAR:
+ lhs *= rhs;
+ break;
+ case tBIT_AND:
+ lhs &= rhs;
+ break;
+ default:
+ if (rhs == 0) {
+ db_error("Divide by 0\n");
+ /*NOTREACHED*/
+ }
+ if (t == tSLASH)
+ lhs /= rhs;
+ else if (t == tPCT)
+ lhs %= rhs;
+ else
+ lhs = ((lhs+rhs-1)/rhs)*rhs;
+ }
+ t = db_read_token();
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+static boolean_t
+db_add_expr(db_expr_t *valuep)
+{
+ db_expr_t lhs, rhs;
+ int t;
+ char c;
+
+ if (!db_mult_expr(&lhs))
+ return (FALSE);
+
+ t = db_read_token();
+ while (t == tPLUS || t == tMINUS || t == tBIT_OR) {
+ c = db_tok_string[0];
+ if (!db_mult_expr(&rhs)) {
+ db_printf("Expression syntax error after '%c'\n", c);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ if (t == tPLUS)
+ lhs += rhs;
+ else if (t == tMINUS)
+ lhs -= rhs;
+ else
+ lhs |= rhs;
+ t = db_read_token();
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+static boolean_t
+db_shift_expr(db_expr_t *valuep)
+{
+ db_expr_t lhs, rhs;
+ int t;
+
+ if (!db_add_expr(&lhs))
+ return (FALSE);
+
+ t = db_read_token();
+ while (t == tSHIFT_L || t == tSHIFT_R) {
+ if (!db_add_expr(&rhs)) {
+ db_printf("Expression syntax error after \"%s\"\n",
+ (t == tSHIFT_L)? "<<": ">>");
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ if (rhs < 0) {
+ db_error("Negative shift amount\n");
+ /*NOTREACHED*/
+ }
+ if (t == tSHIFT_L)
+ lhs <<= rhs;
+ else {
+ /* Shift right is unsigned */
+ lhs = (natural_t) lhs >> rhs;
+ }
+ t = db_read_token();
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+static boolean_t
+db_logical_relation_expr(db_expr_t *valuep)
+{
+ db_expr_t lhs, rhs;
+ int t;
+ char op[3];
+
+ if (!db_shift_expr(&lhs))
+ return(FALSE);
+
+ t = db_read_token();
+ while (t == tLOG_EQ || t == tLOG_NOT_EQ
+ || t == tGREATER || t == tGREATER_EQ
+ || t == tLESS || t == tLESS_EQ) {
+ op[0] = db_tok_string[0];
+ op[1] = db_tok_string[1];
+ op[2] = 0;
+ if (!db_shift_expr(&rhs)) {
+ db_printf("Expression syntax error after \"%s\"\n", op);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ switch(t) {
+ case tLOG_EQ:
+ lhs = (lhs == rhs);
+ break;
+ case tLOG_NOT_EQ:
+ lhs = (lhs != rhs);
+ break;
+ case tGREATER:
+ lhs = (lhs > rhs);
+ break;
+ case tGREATER_EQ:
+ lhs = (lhs >= rhs);
+ break;
+ case tLESS:
+ lhs = (lhs < rhs);
+ break;
+ case tLESS_EQ:
+ lhs = (lhs <= rhs);
+ break;
+ }
+ t = db_read_token();
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+static boolean_t
+db_logical_and_expr(db_expr_t *valuep)
+{
+ db_expr_t lhs, rhs;
+ int t;
+
+ if (!db_logical_relation_expr(&lhs))
+ return(FALSE);
+
+ t = db_read_token();
+ while (t == tLOG_AND) {
+ if (!db_logical_relation_expr(&rhs)) {
+ db_error("Expression syntax error after \"&&\"\n");
+ /*NOTREACHED*/
+ }
+ lhs = (lhs && rhs);
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+static boolean_t
+db_logical_or_expr(db_expr_t *valuep)
+{
+ db_expr_t lhs, rhs;
+ int t;
+
+ if (!db_logical_and_expr(&lhs))
+ return(FALSE);
+
+ t = db_read_token();
+ while (t == tLOG_OR) {
+ if (!db_logical_and_expr(&rhs)) {
+ db_error("Expression syntax error after \"||\"\n");
+ /*NOTREACHED*/
+ }
+ lhs = (lhs || rhs);
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+int
+db_expression(db_expr_t *valuep)
+{
+ return (db_logical_or_expr(valuep));
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_expr.h b/ddb/db_expr.h
new file mode 100644
index 0000000..9c304e6
--- /dev/null
+++ b/ddb/db_expr.h
@@ -0,0 +1,26 @@
+/*
+ * (c) Copyright 1992, 1993, 1994, 1995 OPEN SOFTWARE FOUNDATION, INC.
+ * ALL RIGHTS RESERVED
+ */
+/*
+ * OSF RI nmk19b2 5/2/95
+ */
+
+#ifndef _DDB_DB_EXPR_H_
+#define _DDB_DB_EXPR_H_
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+
+/* Prototypes for functions exported by this module.
+ */
+
+int db_size_option(
+ const char *modif,
+ boolean_t *u_option,
+ boolean_t *t_option);
+
+int db_expression(db_expr_t *valuep);
+
+#endif /* !_DDB_DB_EXPR_H_ */
diff --git a/ddb/db_ext_symtab.c b/ddb/db_ext_symtab.c
new file mode 100644
index 0000000..db7bec2
--- /dev/null
+++ b/ddb/db_ext_symtab.c
@@ -0,0 +1,123 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_KDB
+
+#if MACH_DEBUG
+
+#include <mach/mach_types.h> /* vm_address_t */
+#include <mach/std_types.h> /* pointer_t */
+#include <mach/vm_param.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <kern/host.h>
+#include <kern/mach_debug.server.h>
+#include <kern/task.h>
+#include <ddb/db_sym.h>
+
+
+
+/*
+ * Loads a symbol table for an external file into the kernel debugger.
+ * The symbol table data is an array of characters. It is assumed that
+ * the caller and the kernel debugger agree on its format.
+ */
+kern_return_t
+host_load_symbol_table(
+ host_t host,
+ task_t task,
+ const char * name,
+ pointer_t symtab,
+ unsigned int symtab_count)
+{
+ kern_return_t result;
+ vm_offset_t symtab_start;
+ vm_offset_t symtab_end;
+ vm_map_t map;
+ vm_map_copy_t symtab_copy_object;
+
+ if (host == HOST_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ /*
+ * Copy the symbol table array into the kernel.
+ * We make a copy of the copy object, and clear
+ * the old one, so that returning error will not
+ * deallocate the data twice.
+ */
+ symtab_copy_object = (vm_map_copy_t) symtab;
+ result = vm_map_copyout(
+ kernel_map,
+ &symtab_start,
+ vm_map_copy_copy(symtab_copy_object));
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ symtab_end = symtab_start + symtab_count;
+
+ /*
+ * Add the symbol table.
+ * Do not keep a reference for the task map. XXX
+ */
+ if (task == TASK_NULL)
+ map = VM_MAP_NULL;
+ else
+ map = task->map;
+ if (!X_db_sym_init((char *)symtab_start,
+ (char *)symtab_end,
+ name,
+ (char *)map))
+ {
+ /*
+ * Not enough room for symbol table - failure.
+ */
+ (void) vm_deallocate(kernel_map,
+ symtab_start,
+ symtab_count);
+ return (KERN_FAILURE);
+ }
+
+ /*
+ * Wire down the symbol table
+ */
+ (void) vm_map_pageable(kernel_map,
+ symtab_start,
+ round_page(symtab_end),
+ VM_PROT_READ|VM_PROT_WRITE,
+ TRUE, TRUE);
+
+ /*
+ * Discard the original copy object
+ */
+ vm_map_copy_discard(symtab_copy_object);
+
+ return (KERN_SUCCESS);
+}
+
+#endif /* MACH_DEBUG */
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_input.c b/ddb/db_input.c
new file mode 100644
index 0000000..357474b
--- /dev/null
+++ b/ddb/db_input.c
@@ -0,0 +1,414 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <device/cons.h>
+#include <ddb/db_command.h>
+#include <ddb/db_input.h>
+#include <ddb/db_output.h>
+
+#ifndef DB_HISTORY_SIZE
+#define DB_HISTORY_SIZE 4000
+#endif /* DB_HISTORY_SIZE */
+
+/*
+ * Character input and editing.
+ */
+
+/*
+ * We don't track output position while editing input,
+ * since input always ends with a new-line. We just
+ * reset the line position at the end.
+ */
+char * db_lbuf_start; /* start of input line buffer */
+char * db_lbuf_end; /* end of input line buffer */
+char * db_lc; /* current character */
+char * db_le; /* one past last character */
+#if DB_HISTORY_SIZE != 0
+char db_history[DB_HISTORY_SIZE]; /* start of history buffer */
+int db_history_size = DB_HISTORY_SIZE;/* size of history buffer */
+char * db_history_curr = db_history; /* start of current line */
+char * db_history_last = db_history; /* start of last line */
+char * db_history_prev = (char *) 0; /* start of previous line */
+#endif
+
+#define CTRL(c) ((c) & 0x1f)
+#define isspace(c) ((c) == ' ' || (c) == '\t')
+#define BLANK ' '
+#define BACKUP '\b'
+
+static void
+db_putstring(const char *s, int count)
+{
+ while (--count >= 0)
+ cnputc(*s++);
+}
+
+static void
+db_putnchars(int c, int count)
+{
+ while (--count >= 0)
+ cnputc(c);
+}
+
+/*
+ * Delete N characters, forward or backward
+ */
+#define DEL_FWD 0
+#define DEL_BWD 1
+static void
+db_delete(
+ int n,
+ int bwd)
+{
+ char *p;
+
+ if (bwd) {
+ db_lc -= n;
+ db_putnchars(BACKUP, n);
+ }
+ for (p = db_lc; p < db_le-n; p++) {
+ *p = *(p+n);
+ cnputc(*p);
+ }
+ db_putnchars(BLANK, n);
+ db_putnchars(BACKUP, db_le - db_lc);
+ db_le -= n;
+}
+
+static void
+db_delete_line(void)
+{
+ db_delete(db_le - db_lc, DEL_FWD);
+ db_delete(db_lc - db_lbuf_start, DEL_BWD);
+ db_le = db_lc = db_lbuf_start;
+}
+
+#if DB_HISTORY_SIZE != 0
+#define INC_DB_CURR() \
+ do { \
+ db_history_curr++; \
+ if (db_history_curr > \
+ db_history + db_history_size - 1) \
+ db_history_curr = db_history; \
+ } while (0)
+#define DEC_DB_CURR() \
+ do { \
+ db_history_curr--; \
+ if (db_history_curr < db_history) \
+ db_history_curr = db_history + \
+ db_history_size - 1; \
+ } while (0)
+#endif /* DB_HISTORY_SIZE */
+
+/* returns TRUE at end-of-line */
+static boolean_t
+db_inputchar(int c)
+{
+ static int escaped, csi;
+ int was_escaped = escaped, was_csi = csi;
+ escaped = 0;
+ csi = 0;
+
+ switch (c) {
+ case CTRL('b'):
+ left:
+ /* back up one character */
+ if (db_lc > db_lbuf_start) {
+ cnputc(BACKUP);
+ db_lc--;
+ }
+ break;
+ case CTRL('f'):
+ right:
+ /* forward one character */
+ if (db_lc < db_le) {
+ cnputc(*db_lc);
+ db_lc++;
+ }
+ break;
+ case CTRL('a'):
+ /* beginning of line */
+ while (db_lc > db_lbuf_start) {
+ cnputc(BACKUP);
+ db_lc--;
+ }
+ break;
+ case CTRL('e'):
+ /* end of line */
+ while (db_lc < db_le) {
+ cnputc(*db_lc);
+ db_lc++;
+ }
+ break;
+ case CTRL('h'):
+ case 0177:
+ /* erase previous character */
+ if (db_lc > db_lbuf_start)
+ db_delete(1, DEL_BWD);
+ break;
+ case CTRL('d'):
+ /* erase next character */
+ if (db_lc < db_le)
+ db_delete(1, DEL_FWD);
+ break;
+ case CTRL('k'):
+ /* delete to end of line */
+ if (db_lc < db_le)
+ db_delete(db_le - db_lc, DEL_FWD);
+ break;
+ case CTRL('u'):
+ /* delete line */
+ db_delete_line();
+ break;
+ case CTRL('t'):
+ /* twiddle last 2 characters */
+ if (db_lc >= db_lbuf_start + 2) {
+ c = db_lc[-2];
+ db_lc[-2] = db_lc[-1];
+ db_lc[-1] = c;
+ cnputc(BACKUP);
+ cnputc(BACKUP);
+ cnputc(db_lc[-2]);
+ cnputc(db_lc[-1]);
+ }
+ break;
+#if DB_HISTORY_SIZE != 0
+ case CTRL('p'):
+ up:
+ DEC_DB_CURR();
+ while (db_history_curr != db_history_last) {
+ DEC_DB_CURR();
+ if (*db_history_curr == '\0')
+ break;
+ }
+ db_delete_line();
+ if (db_history_curr == db_history_last) {
+ INC_DB_CURR();
+ db_le = db_lc = db_lbuf_start;
+ } else {
+ char *p;
+ INC_DB_CURR();
+ for (p = db_history_curr, db_le = db_lbuf_start;
+ *p; ) {
+ *db_le++ = *p++;
+ if (p == db_history + db_history_size) {
+ p = db_history;
+ }
+ }
+ db_lc = db_le;
+ }
+ db_putstring(db_lbuf_start, db_le - db_lbuf_start);
+ break;
+ case CTRL('n'):
+ down:
+ while (db_history_curr != db_history_last) {
+ if (*db_history_curr == '\0')
+ break;
+ INC_DB_CURR();
+ }
+ if (db_history_curr != db_history_last) {
+ INC_DB_CURR();
+ db_delete_line();
+ if (db_history_curr != db_history_last) {
+ char *p;
+ for (p = db_history_curr,
+ db_le = db_lbuf_start; *p;) {
+ *db_le++ = *p++;
+ if (p == db_history +
+ db_history_size) {
+ p = db_history;
+ }
+ }
+ db_lc = db_le;
+ }
+ db_putstring(db_lbuf_start, db_le - db_lbuf_start);
+ }
+ break;
+#endif /* DB_HISTORY_SIZE */
+ case CTRL('r'):
+ db_putstring("^R\n", 3);
+ if (db_le > db_lbuf_start) {
+ db_putstring(db_lbuf_start, db_le - db_lbuf_start);
+ db_putnchars(BACKUP, db_le - db_lc);
+ }
+ break;
+ case '\n':
+ case '\r':
+#if DB_HISTORY_SIZE != 0
+ /*
+ * Check whether current line is the same
+ * as previous saved line. If it is, don`t
+ * save it.
+ */
+ if (db_history_curr == db_history_prev) {
+ char *pp, *pc;
+
+ /*
+ * Is it the same?
+ */
+ for (pp = db_history_prev, pc = db_lbuf_start;
+ pc != db_le && *pp; ) {
+ if (*pp != *pc)
+ break;
+ if (++pp == db_history + db_history_size) {
+ pp = db_history;
+ }
+ pc++;
+ }
+ if (!*pp && pc == db_le) {
+ /*
+ * Repeated previous line. Don`t save.
+ */
+ db_history_curr = db_history_last;
+ *db_le++ = c;
+ return (TRUE);
+ }
+ }
+ if (db_le != db_lbuf_start) {
+ char *p;
+ db_history_prev = db_history_last;
+ for (p = db_lbuf_start; p != db_le; p++) {
+ *db_history_last++ = *p;
+ if (db_history_last == db_history +
+ db_history_size) {
+ db_history_last = db_history;
+ }
+ }
+ *db_history_last++ = '\0';
+ }
+ db_history_curr = db_history_last;
+#endif /* DB_HISTORY_SIZE */
+ *db_le++ = c;
+ return (TRUE);
+ case '\033':
+ escaped = 1;
+ break;
+ case '[':
+ if (was_escaped)
+ csi = 1;
+ else
+ goto plain;
+ break;
+ case 'A':
+ if (was_csi)
+ goto up;
+ else
+ goto plain;
+ case 'B':
+ if (was_csi)
+ goto down;
+ else
+ goto plain;
+ case 'C':
+ if (was_csi)
+ goto right;
+ else
+ goto plain;
+ case 'D':
+ if (was_csi)
+ goto left;
+ else
+ goto plain;
+
+ default:
+ plain:
+ if (db_le == db_lbuf_end) {
+ cnputc('\007');
+ }
+ else if (c >= ' ' && c <= '~') {
+ char *p;
+
+ for (p = db_le; p > db_lc; p--)
+ *p = *(p-1);
+ *db_lc++ = c;
+ db_le++;
+ cnputc(c);
+ db_putstring(db_lc, db_le - db_lc);
+ db_putnchars(BACKUP, db_le - db_lc);
+ }
+ break;
+ }
+ return (FALSE);
+}
+
+int
+db_readline(
+ char * lstart,
+ int lsize)
+{
+ db_force_whitespace(); /* synch output position */
+
+ db_lbuf_start = lstart;
+ db_lbuf_end = lstart + lsize - 1;
+ db_lc = lstart;
+ db_le = lstart;
+
+ while (!db_inputchar(cngetc()))
+ continue;
+
+ db_putchar('\n'); /* synch output position */
+
+ *db_le = 0;
+ return (db_le - db_lbuf_start);
+}
+
+void
+db_check_interrupt(void)
+{
+ int c;
+
+ c = cnmaygetc();
+ switch (c) {
+ case -1: /* no character */
+ return;
+
+ case CTRL('c'):
+ db_error((char *)0);
+ /*NOTREACHED*/
+
+ case CTRL('s'):
+ do {
+ c = cnmaygetc();
+ if (c == CTRL('c'))
+ db_error((char *)0);
+ } while (c != CTRL('q'));
+ break;
+
+ default:
+ /* drop on floor */
+ break;
+ }
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_input.h b/ddb/db_input.h
new file mode 100644
index 0000000..352f035
--- /dev/null
+++ b/ddb/db_input.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+
+#ifndef _DDB_DB_INPUT_H_
+#define _DDB_DB_INPUT_H_
+
+#include <sys/types.h>
+
+/* Needs to be implemented by each arch. */
+extern void kdb_kintr(void);
+
+extern int db_readline (char *lstart, int lsize);
+
+extern void db_check_interrupt(void);
+
+#endif /* _DDB_DB_INPUT_H_ */
diff --git a/ddb/db_lex.c b/ddb/db_lex.c
new file mode 100644
index 0000000..49063e1
--- /dev/null
+++ b/ddb/db_lex.c
@@ -0,0 +1,454 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+/*
+ * Lexical analyzer.
+ */
+#include <string.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_command.h>
+#include <ddb/db_examine.h>
+#include <ddb/db_input.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_output.h>
+
+char db_line[DB_LEX_LINE_SIZE];
+char db_last_line[DB_LEX_LINE_SIZE];
+char *db_lp, *db_endlp;
+char *db_last_lp;
+int db_look_char = 0;
+db_expr_t db_look_token = 0;
+
+int
+db_read_line(const char *repeat_last)
+{
+ int i;
+
+ i = db_readline(db_line, sizeof(db_line));
+ if (i == 0)
+ return (0); /* EOI */
+ if (repeat_last) {
+ if (strncmp(db_line, repeat_last, strlen(repeat_last)) == 0) {
+ db_strcpy(db_line, db_last_line);
+ db_printf("%s", db_line);
+ i = strlen(db_line);
+ } else if (db_line[0] != '\n' && db_line[0] != 0)
+ db_strcpy(db_last_line, db_line);
+ }
+ db_lp = db_line;
+ db_endlp = db_lp + i;
+ db_last_lp = db_lp;
+ db_look_char = 0;
+ db_look_token = 0;
+ return (i);
+}
+
+void
+db_flush_line(void)
+{
+ db_lp = db_line;
+ db_last_lp = db_lp;
+ db_endlp = db_line;
+}
+
+void
+db_switch_input(
+ char *buffer,
+ int size)
+{
+ db_lp = buffer;
+ db_last_lp = db_lp;
+ db_endlp = buffer + size;
+ db_look_char = 0;
+ db_look_token = 0;
+}
+
+void
+db_save_lex_context(struct db_lex_context *lp)
+{
+ lp->l_ptr = db_lp;
+ lp->l_eptr = db_endlp;
+ lp->l_char = db_look_char;
+ lp->l_token = db_look_token;
+}
+
+void
+db_restore_lex_context(const struct db_lex_context *lp)
+{
+ db_lp = lp->l_ptr;
+ db_last_lp = db_lp;
+ db_endlp = lp->l_eptr;
+ db_look_char = lp->l_char;
+ db_look_token = lp->l_token;
+}
+
+int
+db_read_char(void)
+{
+ int c;
+
+ if (db_look_char != 0) {
+ c = db_look_char;
+ db_look_char = 0;
+ }
+ else if (db_lp >= db_endlp)
+ c = -1;
+ else
+ c = *db_lp++;
+ return (c);
+}
+
+void
+db_unread_char(int c)
+{
+ db_look_char = c;
+}
+
+void
+db_unread_token(int t)
+{
+ db_look_token = t;
+}
+
+int
+db_read_token(void)
+{
+ int t;
+
+ if (db_look_token) {
+ t = db_look_token;
+ db_look_token = 0;
+ }
+ else {
+ db_last_lp = db_lp;
+ if (db_look_char)
+ db_last_lp--;
+ t = db_lex();
+ }
+ return (t);
+}
+
+db_expr_t db_tok_number;
+char db_tok_string[TOK_STRING_SIZE];
+db_expr_t db_radix = 16;
+
+void
+db_flush_lex(void)
+{
+ db_flush_line();
+ db_look_char = 0;
+ db_look_token = 0;
+}
+
+#define DB_DISP_SKIP 40 /* number of chars to display skip */
+
+void
+db_skip_to_eol(void)
+{
+ int skip;
+ int t;
+ int n;
+ char *p;
+
+ t = db_read_token();
+ p = db_last_lp;
+ for (skip = 0; t != tEOL && t != tSEMI_COLON && t != tEOF; skip++)
+ t = db_read_token();
+ if (t == tSEMI_COLON)
+ db_unread_token(t);
+ if (skip != 0) {
+ while (p < db_last_lp && (*p == ' ' || *p == '\t'))
+ p++;
+ db_printf("Warning: Skipped input data \"");
+ for (n = 0; n < DB_DISP_SKIP && p < db_last_lp; n++)
+ db_printf("%c", *p++);
+ if (n >= DB_DISP_SKIP)
+ db_printf("....");
+ db_printf("\"\n");
+ }
+}
+
+int
+db_lex(void)
+{
+ char *cp;
+ int c;
+
+ c = db_read_char();
+ while (c <= ' ' || c > '~') {
+ if (c == '\n' || c == -1)
+ return (tEOL);
+ c = db_read_char();
+ }
+
+ cp = db_tok_string;
+ *cp++ = c;
+
+ if (c >= '0' && c <= '9') {
+ /* number */
+ int r, digit;
+
+ if (c > '0')
+ r = db_radix;
+ else {
+ c = db_read_char();
+ if (c == 'O' || c == 'o')
+ r = 8;
+ else if (c == 'T' || c == 't')
+ r = 10;
+ else if (c == 'X' || c == 'x')
+ r = 16;
+ else {
+ cp--;
+ r = db_radix;
+ db_unread_char(c);
+ }
+ c = db_read_char();
+ *cp++ = c;
+ }
+ db_tok_number = 0;
+ for (;;) {
+ if (c >= '0' && c <= ((r == 8) ? '7' : '9'))
+ digit = c - '0';
+ else if (r == 16 && ((c >= 'A' && c <= 'F') ||
+ (c >= 'a' && c <= 'f'))) {
+ if (c >= 'a')
+ digit = c - 'a' + 10;
+ else
+ digit = c - 'A' + 10;
+ }
+ else
+ break;
+ db_tok_number = db_tok_number * r + digit;
+ c = db_read_char();
+ if (cp < &db_tok_string[sizeof(db_tok_string)-1])
+ *cp++ = c;
+ }
+ cp[-1] = 0;
+ if ((c >= '0' && c <= '9') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') ||
+ (c == '_'))
+ {
+ db_printf("Bad character '%c' after number %s\n",
+ c, db_tok_string);
+ db_error(0);
+ db_flush_lex();
+ return (tEOF);
+ }
+ db_unread_char(c);
+ return (tNUMBER);
+ }
+ if ((c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') ||
+ c == '_' || c == '\\' || c == ':')
+ {
+ /* identifier */
+ if (c == '\\') {
+ c = db_read_char();
+ if (c == '\n' || c == -1)
+ db_error("Bad '\\' at the end of line\n");
+ cp[-1] = c;
+ }
+ while (1) {
+ c = db_read_char();
+ if ((c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') ||
+ (c >= '0' && c <= '9') ||
+ c == '_' || c == '\\' || c == ':' || c == '.')
+ {
+ if (c == '\\') {
+ c = db_read_char();
+ if (c == '\n' || c == -1)
+ db_error("Bad '\\' at the end of line\n");
+ }
+ *cp++ = c;
+ if (cp == db_tok_string+sizeof(db_tok_string)) {
+ db_error("String too long\n");
+ db_flush_lex();
+ return (tEOF);
+ }
+ continue;
+ }
+ else {
+ *cp = '\0';
+ break;
+ }
+ }
+ db_unread_char(c);
+ return (tIDENT);
+ }
+
+ *cp = 0;
+ switch (c) {
+ case '+':
+ return (tPLUS);
+ case '-':
+ return (tMINUS);
+ case '.':
+ c = db_read_char();
+ if (c == '.') {
+ *cp++ = c;
+ *cp = 0;
+ return (tDOTDOT);
+ }
+ db_unread_char(c);
+ return (tDOT);
+ case '*':
+ return (tSTAR);
+ case '/':
+ return (tSLASH);
+ case '=':
+ c = db_read_char();
+ if (c == '=') {
+ *cp++ = c;
+ *cp = 0;
+ return(tLOG_EQ);
+ }
+ db_unread_char(c);
+ return (tEQ);
+ case '%':
+ return (tPCT);
+ case '#':
+ return (tHASH);
+ case '(':
+ return (tLPAREN);
+ case ')':
+ return (tRPAREN);
+ case ',':
+ return (tCOMMA);
+ case '\'':
+ return (tQUOTE);
+ case '"':
+ /* string */
+ cp = db_tok_string;
+ c = db_read_char();
+ while (c != '"' && c > 0 && c != '\n') {
+ if (cp >= &db_tok_string[sizeof(db_tok_string)-1]) {
+ db_error("Too long string\n");
+ db_flush_lex();
+ return (tEOF);
+ }
+ if (c == '\\') {
+ c = db_read_char();
+ switch(c) {
+ case 'n':
+ c = '\n'; break;
+ case 't':
+ c = '\t'; break;
+ case '\\':
+ case '"':
+ break;
+ default:
+ db_printf("Bad escape sequence '\\%c'\n", c);
+ db_error(0);
+ db_flush_lex();
+ return (tEOF);
+ }
+ }
+ *cp++ = c;
+ c = db_read_char();
+ }
+ *cp = 0;
+ if (c != '"') {
+ db_error("Non terminated string constant\n");
+ db_flush_lex();
+ return (tEOF);
+ }
+ return (tSTRING);
+ case '$':
+ return (tDOLLAR);
+ case '!':
+ c = db_read_char();
+ if (c == '=') {
+ *cp++ = c;
+ *cp = 0;
+ return(tLOG_NOT_EQ);
+ }
+ db_unread_char(c);
+ return (tEXCL);
+ case '&':
+ c = db_read_char();
+ if (c == '&') {
+ *cp++ = c;
+ *cp = 0;
+ return(tLOG_AND);
+ }
+ db_unread_char(c);
+ return(tBIT_AND);
+ case '|':
+ c = db_read_char();
+ if (c == '|') {
+ *cp++ = c;
+ *cp = 0;
+ return(tLOG_OR);
+ }
+ db_unread_char(c);
+ return(tBIT_OR);
+ case '<':
+ c = db_read_char();
+ *cp++ = c;
+ *cp = 0;
+ if (c == '<')
+ return (tSHIFT_L);
+ if (c == '=')
+ return (tLESS_EQ);
+ cp[-1] = 0;
+ db_unread_char(c);
+ return(tLESS);
+ break;
+ case '>':
+ c = db_read_char();
+ *cp++ = c;
+ *cp = 0;
+ if (c == '>')
+ return (tSHIFT_R);
+ if (c == '=')
+ return (tGREATER_EQ);
+ cp[-1] = 0;
+ db_unread_char(c);
+ return (tGREATER);
+ break;
+ case ';':
+ return (tSEMI_COLON);
+ case '?':
+ return (tQUESTION);
+ case -1:
+ db_strcpy(db_tok_string, "<EOL>");
+ return (tEOF);
+ }
+ db_printf("Bad character '%c'\n", c);
+ db_flush_lex();
+ return (tEOF);
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_lex.h b/ddb/db_lex.h
new file mode 100644
index 0000000..f7677df
--- /dev/null
+++ b/ddb/db_lex.h
@@ -0,0 +1,99 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+/*
+ * Lexical analyzer.
+ */
+
+#ifndef _DDB_DB_LEX_H_
+#define _DDB_DB_LEX_H_
+
+#define TOK_STRING_SIZE 64
+#define DB_LEX_LINE_SIZE 256
+
+struct db_lex_context {
+ int l_char; /* peek char */
+ int l_token; /* peek token */
+ char *l_ptr; /* line pointer */
+ char *l_eptr; /* line end pointer */
+};
+
+extern int db_lex(void);
+extern int db_read_line(const char *rep_str);
+extern void db_flush_line(void);
+extern int db_read_char(void);
+extern void db_unread_char(int c);
+extern int db_read_token(void);
+extern void db_unread_token(int t);
+extern void db_flush_lex(void);
+extern void db_switch_input(char *, int);
+extern void db_save_lex_context(struct db_lex_context *);
+extern void db_restore_lex_context(const struct db_lex_context *);
+extern void db_skip_to_eol(void);
+
+extern db_expr_t db_tok_number;
+extern char db_tok_string[TOK_STRING_SIZE];
+extern db_expr_t db_radix;
+
+#define tEOF (-1)
+#define tEOL 1
+#define tNUMBER 2
+#define tIDENT 3
+#define tPLUS 4
+#define tMINUS 5
+#define tDOT 6
+#define tSTAR 7
+#define tSLASH 8
+#define tEQ 9
+#define tLPAREN 10
+#define tRPAREN 11
+#define tPCT 12
+#define tHASH 13
+#define tCOMMA 14
+#define tQUOTE 15
+#define tDOLLAR 16
+#define tEXCL 17
+#define tSHIFT_L 18
+#define tSHIFT_R 19
+#define tDOTDOT 20
+#define tSEMI_COLON 21
+#define tLOG_EQ 22
+#define tLOG_NOT_EQ 23
+#define tLESS 24
+#define tLESS_EQ 25
+#define tGREATER 26
+#define tGREATER_EQ 27
+#define tBIT_AND 28
+#define tBIT_OR 29
+#define tLOG_AND 30
+#define tLOG_OR 31
+#define tSTRING 32
+#define tQUESTION 33
+
+#endif /* _DDB_DB_LEX_H_ */
diff --git a/ddb/db_macro.c b/ddb/db_macro.c
new file mode 100644
index 0000000..63159d7
--- /dev/null
+++ b/ddb/db_macro.c
@@ -0,0 +1,197 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_KDB
+
+#include <string.h>
+#include <kern/thread.h>
+
+#include <machine/db_machdep.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_command.h>
+#include <ddb/db_examine.h>
+#include <ddb/db_expr.h>
+#include <ddb/db_macro.h>
+#include <ddb/db_output.h>
+
+
+/*
+ * debugger macro support
+ */
+
+#define DB_MACRO_LEVEL 5 /* max macro nesting */
+#define DB_NARGS 10 /* max args */
+#define DB_NUSER_MACRO 10 /* max user macros */
+
+int db_macro_free = DB_NUSER_MACRO;
+struct db_user_macro {
+ char m_name[TOK_STRING_SIZE];
+ char m_lbuf[DB_LEX_LINE_SIZE];
+ int m_size;
+} db_user_macro[DB_NUSER_MACRO];
+
+int db_macro_level = 0;
+db_expr_t db_macro_args[DB_MACRO_LEVEL][DB_NARGS];
+
+static struct db_user_macro *
+db_lookup_macro(const char *name)
+{
+ struct db_user_macro *mp;
+
+ for (mp = db_user_macro; mp < &db_user_macro[DB_NUSER_MACRO]; mp++) {
+ if (mp->m_name[0] == 0)
+ continue;
+ if (strcmp(mp->m_name, name) == 0)
+ return(mp);
+ }
+ return(0);
+}
+
+void
+db_def_macro_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ char *p;
+ int c;
+ struct db_user_macro *mp, *ep;
+
+ if (db_read_token() != tIDENT) {
+ db_printf("Bad macro name \"%s\"\n", db_tok_string);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ if ((mp = db_lookup_macro(db_tok_string)) == 0) {
+ if (db_macro_free <= 0)
+ db_error("Too many macros\n");
+ /* NOTREACHED */
+ ep = &db_user_macro[DB_NUSER_MACRO];
+ for (mp = db_user_macro; mp < ep && mp->m_name[0]; mp++);
+ if (mp >= ep)
+ db_error("ddb: internal error(macro)\n");
+ /* NOTREACHED */
+ db_macro_free--;
+ db_strcpy(mp->m_name, db_tok_string);
+ }
+ for (c = db_read_char(); c == ' ' || c == '\t'; c = db_read_char());
+ for (p = mp->m_lbuf; c > 0; c = db_read_char())
+ *p++ = c;
+ *p = 0;
+ mp->m_size = p - mp->m_lbuf;
+}
+
+void
+db_del_macro_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ struct db_user_macro *mp;
+
+ if (db_read_token() != tIDENT
+ || (mp = db_lookup_macro(db_tok_string)) == 0) {
+ db_printf("No such macro \"%s\"\n", db_tok_string);
+ db_error(0);
+ /* NOTREACHED */
+ } else {
+ mp->m_name[0] = 0;
+ db_macro_free++;
+ }
+}
+
+void
+db_show_macro(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ struct db_user_macro *mp;
+ int t;
+ char *name = 0;
+
+ if ((t = db_read_token()) == tIDENT)
+ name = db_tok_string;
+ else
+ db_unread_token(t);
+ for (mp = db_user_macro; mp < &db_user_macro[DB_NUSER_MACRO]; mp++) {
+ if (mp->m_name[0] == 0)
+ continue;
+ if (name && strcmp(mp->m_name, name))
+ continue;
+ db_printf("%s: %s", mp->m_name, mp->m_lbuf);
+ }
+}
+
+int
+db_exec_macro(const char *name)
+{
+ struct db_user_macro *mp;
+ int n;
+
+ if ((mp = db_lookup_macro(name)) == 0)
+ return(-1);
+ if (db_macro_level+1 >= DB_MACRO_LEVEL) {
+ db_macro_level = 0;
+ db_error("Too many macro nest\n");
+ /* NOTREACHED */
+ }
+ for (n = 0;
+ n < DB_NARGS &&
+ db_expression(&db_macro_args[db_macro_level+1][n]);
+ n++);
+ while (n < DB_NARGS)
+ db_macro_args[db_macro_level+1][n++] = 0;
+ db_macro_level++;
+ db_exec_cmd_nest(mp->m_lbuf, mp->m_size);
+ db_macro_level--;
+ return(0);
+}
+
+void
+/* ARGSUSED */
+db_arg_variable(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap)
+{
+ if (ap->level != 1 || ap->suffix[0] < 1 || ap->suffix[0] > DB_NARGS) {
+ db_error("Bad $arg variable\n");
+ /* NOTREACHED */
+ }
+ if (flag == DB_VAR_GET)
+ *valuep = db_macro_args[db_macro_level][ap->suffix[0]-1];
+ else
+ db_macro_args[db_macro_level][ap->suffix[0]-1] = *valuep;
+ return;
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_macro.h b/ddb/db_macro.h
new file mode 100644
index 0000000..9188247
--- /dev/null
+++ b/ddb/db_macro.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+
+#ifndef _DDB_DB_MACRO_H_
+#define _DDB_DB_MACRO_H_
+
+#include <sys/types.h>
+#include <ddb/db_variables.h>
+
+extern void db_def_macro_cmd (
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif);
+
+extern void db_del_macro_cmd (
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif);
+
+extern void db_show_macro (
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif);
+
+extern int db_exec_macro (const char *name);
+
+extern void db_arg_variable (
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap);
+
+#endif /* _DDB_DB_MACRO_H_ */
diff --git a/ddb/db_mp.c b/ddb/db_mp.c
new file mode 100644
index 0000000..5cf800c
--- /dev/null
+++ b/ddb/db_mp.c
@@ -0,0 +1,339 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_KDB
+
+#if NCPUS > 1
+
+#include <mach/boolean.h>
+#include <mach/machine.h>
+
+#include <kern/cpu_number.h>
+#include <kern/lock.h>
+
+#include <machine/db_machdep.h>
+#include <machine/db_interface.h>
+
+#include <ddb/db_command.h>
+#include <ddb/db_input.h>
+#include <ddb/db_run.h>
+#include <ddb/db_mp.h>
+#include <ddb/db_output.h>
+
+/*
+ * Routines to interlock access to the kernel debugger on
+ * multiprocessors.
+ */
+
+int db_spl;
+def_simple_lock_irq_data(static,db_lock) /* lock to enter debugger */
+volatile int db_cpu = -1; /* CPU currently in debugger */
+ /* -1 if none */
+int db_active[NCPUS] = { 0 }; /* count recursive entries
+ into debugger */
+int db_slave[NCPUS] = { 0 }; /* nonzero if cpu interrupted
+ by another cpu in debugger */
+
+boolean_t db_enter_debug = FALSE;
+
+/*
+ * Called when entering kernel debugger.
+ * Takes db lock. If we were called remotely (slave state) we just
+ * wait for db_cpu to be equal to cpu_number(). Otherwise enter debugger
+ * if not active on another cpu
+ */
+
+boolean_t
+db_enter(void)
+{
+ int mycpu = cpu_number();
+
+ /*
+ * Count recursive entries to debugger.
+ */
+ db_active[mycpu]++;
+
+ /*
+ * Wait for other CPUS to leave debugger.
+ */
+ db_spl = lock_db();
+
+ if (db_enter_debug)
+ db_printf(
+ "db_enter: cpu %d[%d], master %d, db_cpu %d, run mode %d\n",
+ mycpu, db_slave[mycpu], master_cpu, db_cpu, db_run_mode);
+
+ /*
+ * If no CPU in debugger, and I am not being stopped,
+ * enter the debugger.
+ */
+ if (db_cpu == -1 && !db_slave[mycpu]) {
+ remote_db(); /* stop other cpus */
+ db_cpu = mycpu;
+ return TRUE;
+ }
+ /*
+ * If I am already in the debugger (recursive entry
+ * or returning from single step), enter debugger.
+ */
+ else if (db_cpu == mycpu)
+ return TRUE;
+ /*
+ * Otherwise, cannot enter debugger.
+ */
+ else
+ return FALSE;
+}
+
+/*
+ * Leave debugger.
+ */
+void
+db_leave(void)
+{
+ int mycpu = cpu_number();
+
+ /*
+ * If continuing, give up debugger
+ */
+ if (db_run_mode == STEP_CONTINUE)
+ db_cpu = -1;
+
+ /*
+ * If I am a slave, drop my slave count.
+ */
+ if (db_slave[mycpu])
+ db_slave[mycpu]--;
+ if (db_enter_debug)
+ db_printf("db_leave: cpu %d[%d], db_cpu %d, run_mode %d\n",
+ mycpu, db_slave[mycpu], db_cpu, db_run_mode);
+ /*
+ * Unlock debugger.
+ */
+ unlock_db(db_spl);
+
+ /*
+ * Drop recursive entry count.
+ */
+ db_active[mycpu]--;
+}
+
+
+/*
+ * invoke kernel debugger on slave processors
+ */
+
+void
+remote_db(void) {
+ int my_cpu = cpu_number();
+ int i;
+
+ for (i = 0; i < NCPUS; i++) {
+ if (i != my_cpu &&
+ machine_slot[i].is_cpu &&
+ machine_slot[i].running)
+ {
+ cpu_interrupt_to_db(i);
+ }
+ }
+}
+
+/*
+ * Save and restore DB global registers.
+ *
+ * DB_SAVE_CTXT must be at the start of a block, and
+ * DB_RESTORE_CTXT must be in the same block.
+ */
+
+#ifdef __STDC__
+#define DB_SAVE(type, name) extern type name; type name##_save = name
+#define DB_RESTORE(name) name = name##_save
+#else /* __STDC__ */
+#define DB_SAVE(type, name) extern type name; type name/**/_save = name
+#define DB_RESTORE(name) name = name/**/_save
+#endif /* __STDC__ */
+
+#define DB_SAVE_CTXT() \
+ DB_SAVE(int, db_run_mode); \
+ DB_SAVE(boolean_t, db_sstep_print); \
+ DB_SAVE(int, db_loop_count); \
+ DB_SAVE(int, db_call_depth); \
+ DB_SAVE(int, db_inst_count); \
+ DB_SAVE(int, db_last_inst_count); \
+ DB_SAVE(int, db_load_count); \
+ DB_SAVE(int, db_store_count); \
+ DB_SAVE(boolean_t, db_cmd_loop_done); \
+ DB_SAVE(jmp_buf_t *, db_recover); \
+ DB_SAVE(db_addr_t, db_dot); \
+ DB_SAVE(db_addr_t, db_last_addr); \
+ DB_SAVE(db_addr_t, db_prev); \
+ DB_SAVE(db_addr_t, db_next); \
+ SAVE_DDB_REGS
+
+#define DB_RESTORE_CTXT() \
+ DB_RESTORE(db_run_mode); \
+ DB_RESTORE(db_sstep_print); \
+ DB_RESTORE(db_loop_count); \
+ DB_RESTORE(db_call_depth); \
+ DB_RESTORE(db_inst_count); \
+ DB_RESTORE(db_last_inst_count); \
+ DB_RESTORE(db_load_count); \
+ DB_RESTORE(db_store_count); \
+ DB_RESTORE(db_cmd_loop_done); \
+ DB_RESTORE(db_recover); \
+ DB_RESTORE(db_dot); \
+ DB_RESTORE(db_last_addr); \
+ DB_RESTORE(db_prev); \
+ DB_RESTORE(db_next); \
+ RESTORE_DDB_REGS
+
+/*
+ * switch to another cpu
+ */
+void
+db_on(int cpu)
+{
+ /*
+ * Save ddb global variables
+ */
+ DB_SAVE_CTXT();
+
+ /*
+ * Don`t do if bad CPU number.
+ * CPU must also be spinning in db_entry.
+ */
+ if (cpu < 0 || cpu >= NCPUS || !db_active[cpu])
+ return;
+
+ /*
+ * Give debugger to that CPU
+ */
+ db_cpu = cpu;
+ unlock_db(db_spl);
+
+ /*
+ * Wait for it to come back again
+ */
+ db_spl = lock_db();
+
+ /*
+ * Restore ddb globals
+ */
+ DB_RESTORE_CTXT();
+
+ if (db_cpu == -1) /* someone continued */
+ db_continue_cmd(0, 0, 0, "");
+}
+
+/*
+ * Called by interprocessor interrupt when one CPU is
+ * in kernel debugger and wants to stop other CPUs
+ */
+void
+remote_db_enter(void)
+{
+ db_slave[cpu_number()]++;
+ kdb_kintr();
+}
+
+/*
+ * Acquire kernel debugger.
+ * Conditional code for forwarding characters from slave to console
+ * if console on master only.
+ */
+
+/*
+ * As long as db_cpu is not -1 or cpu_number(), we know that debugger
+ * is active on another cpu.
+ */
+int
+lock_db(void)
+{
+ int my_cpu = cpu_number();
+ int s;
+
+ for (;;) {
+#if CONSOLE_ON_MASTER
+ if (my_cpu == master_cpu) {
+ db_console();
+ }
+#endif /* CONSOLE_ON_MASTER */
+ if (db_cpu != -1 && db_cpu != my_cpu)
+ continue;
+
+#if CONSOLE_ON_MASTER
+ if (my_cpu == master_cpu) {
+ if (!(s = simple_lock_try_irq(&db_lock)))
+ continue;
+ }
+ else {
+ s = simple_lock_irq(&db_lock);
+ }
+#else /* CONSOLE_ON_MASTER */
+ s = simple_lock_irq(&db_lock);
+#endif /* CONSOLE_ON_MASTER */
+ if (db_cpu == -1 || db_cpu == my_cpu)
+ break;
+ unlock_db(s);
+ }
+
+ return s;
+}
+
+void
+unlock_db(int s)
+{
+ simple_unlock_irq(s, &db_lock);
+}
+
+#if CONSOLE_ON_MASTER
+void
+db_console(void)
+{
+ if (i_bit(CBUS_PUT_CHAR, my_word)) {
+ volatile u_char c = cbus_ochar;
+ i_bit_clear(CBUS_PUT_CHAR, my_word);
+ cnputc(c);
+ } else if (i_bit(CBUS_GET_CHAR, my_word)) {
+ if (cbus_wait_char)
+ cbus_ichar = cngetc();
+ else
+ cbus_ichar = cnmaygetc();
+ i_bit_clear(CBUS_GET_CHAR, my_word);
+#ifndef notdef
+ } else if (!cnmaygetc()) {
+#else /* notdef */
+ } else if (com_is_char() && !com_getc(TRUE)) {
+#endif /* notdef */
+ simple_unlock(&db_lock);
+ db_cpu = my_cpu;
+ }
+}
+#endif /* CONSOLE_ON_MASTER */
+
+#endif /* NCPUS > 1 */
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_mp.h b/ddb/db_mp.h
new file mode 100644
index 0000000..8a0a9e1
--- /dev/null
+++ b/ddb/db_mp.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DDB_DB_MP_H_
+#define _DDB_DB_MP_H_
+
+void remote_db(void);
+int lock_db(void);
+void unlock_db(int);
+void db_on(int i);
+
+#if CONSOLE_ON_MASTER
+void db_console(void);
+#endif /* CONSOLE_ON_MASTER */
+
+boolean_t db_enter(void);
+void remote_db_enter(void);
+void db_leave(void);
+
+#endif /* _DDB_DB_MP_H_ */
diff --git a/ddb/db_output.c b/ddb/db_output.c
new file mode 100644
index 0000000..9a76f54
--- /dev/null
+++ b/ddb/db_output.c
@@ -0,0 +1,217 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+/*
+ * Printf and character output for debugger.
+ */
+
+#include <kern/printf.h>
+#include <stdarg.h>
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <device/cons.h>
+#include <ddb/db_command.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_output.h>
+#include <ddb/db_input.h>
+
+/*
+ * Character output - tracks position in line.
+ * To do this correctly, we should know how wide
+ * the output device is - then we could zero
+ * the line position when the output device wraps
+ * around to the start of the next line.
+ *
+ * Instead, we count the number of spaces printed
+ * since the last printing character so that we
+ * don't print trailing spaces. This avoids most
+ * of the wraparounds.
+ */
+
+#ifndef DB_MAX_LINE
+#define DB_MAX_LINE 24 /* maximum line */
+#define DB_MAX_WIDTH 80 /* maximum width */
+#endif /* DB_MAX_LINE */
+
+#define DB_MIN_MAX_WIDTH 20 /* minimum max width */
+#define DB_MIN_MAX_LINE 3 /* minimum max line */
+#define CTRL(c) ((c) & 0xff)
+
+int db_output_position = 0; /* output column */
+int db_output_line = 0; /* output line number */
+int db_last_non_space = 0; /* last non-space character */
+int db_tab_stop_width = 8; /* how wide are tab stops? */
+#define NEXT_TAB(i) \
+ ((((i) + db_tab_stop_width) / db_tab_stop_width) * db_tab_stop_width)
+int db_max_line = DB_MAX_LINE; /* output max lines */
+int db_max_width = DB_MAX_WIDTH; /* output line width */
+
+/*
+ * Force pending whitespace.
+ */
+void
+db_force_whitespace(void)
+{
+ int last_print, next_tab;
+
+ last_print = db_last_non_space;
+ while (last_print < db_output_position) {
+ next_tab = NEXT_TAB(last_print);
+ if (next_tab <= db_output_position) {
+ cnputc('\t');
+ last_print = next_tab;
+ }
+ else {
+ cnputc(' ');
+ last_print++;
+ }
+ }
+ db_last_non_space = db_output_position;
+}
+
+static void
+db_more(void)
+{
+ char *p;
+ boolean_t quit_output = FALSE;
+
+ for (p = "--db_more--"; *p; p++)
+ cnputc(*p);
+ switch(cngetc()) {
+ case ' ':
+ db_output_line = 0;
+ break;
+ case 'q':
+ case CTRL('c'):
+ db_output_line = 0;
+ quit_output = TRUE;
+ break;
+ default:
+ db_output_line--;
+ break;
+ }
+ p = "\b\b\b\b\b\b\b\b\b\b\b \b\b\b\b\b\b\b\b\b\b\b";
+ while (*p)
+ cnputc(*p++);
+ if (quit_output) {
+ db_error(0);
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * Output character. Buffer whitespace.
+ */
+void
+db_putchar(int c) /* character to output */
+{
+ if (db_max_line >= DB_MIN_MAX_LINE && db_output_line >= db_max_line-1)
+ db_more();
+ if (c > ' ' && c <= '~') {
+ /*
+ * Printing character.
+ * If we have spaces to print, print them first.
+ * Use tabs if possible.
+ */
+ db_force_whitespace();
+ cnputc(c);
+ db_output_position++;
+ if (db_max_width >= DB_MIN_MAX_WIDTH
+ && db_output_position >= db_max_width) {
+ /* auto new line */
+ cnputc('\n');
+ db_output_position = 0;
+ db_last_non_space = 0;
+ db_output_line++;
+ }
+ db_last_non_space = db_output_position;
+ }
+ else if (c == '\n') {
+ /* Return */
+ cnputc(c);
+ db_output_position = 0;
+ db_last_non_space = 0;
+ db_output_line++;
+ db_check_interrupt();
+ }
+ else if (c == '\t') {
+ /* assume tabs every 8 positions */
+ db_output_position = NEXT_TAB(db_output_position);
+ }
+ else if (c == ' ') {
+ /* space */
+ db_output_position++;
+ }
+ else if (c == '\007') {
+ /* bell */
+ cnputc(c);
+ }
+ /* other characters are assumed non-printing */
+}
+
+static void
+db_id_putc(char c, vm_offset_t dummy)
+{
+ db_putchar(c);
+}
+
+/*
+ * Return output position
+ */
+int __attribute__ ((pure))
+db_print_position(void)
+{
+ return (db_output_position);
+}
+
+/*
+ * End line if too long.
+ */
+void db_end_line(void)
+{
+ if (db_output_position >= db_max_width-1)
+ db_printf("\n");
+}
+
+/*VARARGS1*/
+int
+db_printf(const char *fmt, ...)
+{
+ va_list listp;
+
+ va_start(listp, fmt);
+ _doprnt(fmt, listp, db_id_putc, db_radix, 0);
+ va_end(listp);
+ return 0;
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_output.h b/ddb/db_output.h
new file mode 100644
index 0000000..7920179
--- /dev/null
+++ b/ddb/db_output.h
@@ -0,0 +1,46 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/90
+ */
+
+/*
+ * Printing routines for kernel debugger.
+ */
+
+#ifndef _DDB_DB_OUTPUT_H_
+#define _DDB_DB_OUTPUT_H_
+
+extern void db_force_whitespace(void);
+extern int db_print_position(void) __attribute__ ((pure));
+extern void db_end_line(void);
+extern int db_printf(const char *fmt, ...);
+/* alternate name */
+#define kdbprintf db_printf
+extern void db_putchar(int c);
+
+#endif /* _DDB_DB_OUTPUT_H_ */
diff --git a/ddb/db_print.c b/ddb/db_print.c
new file mode 100644
index 0000000..f08dd6c
--- /dev/null
+++ b/ddb/db_print.c
@@ -0,0 +1,573 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+/*
+ * Miscellaneous printing.
+ */
+#include <string.h>
+#include <mach/policy.h>
+#include <mach/port.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/processor.h>
+#include <kern/smp.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <machine/db_interface.h>
+#include <machine/db_machdep.h>
+#include <machine/thread.h>
+
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_print.h>
+
+extern unsigned long db_maxoff;
+
+/* ARGSUSED */
+void
+db_show_regs(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char *modif)
+{
+ struct db_variable *regp;
+ db_expr_t value;
+ db_addr_t offset;
+ char * name;
+ int i;
+ struct db_var_aux_param aux_param;
+ task_t task = TASK_NULL;
+
+ aux_param.modif = modif;
+ aux_param.thread = THREAD_NULL;
+ if (db_option(modif, 't')) {
+ if (have_addr) {
+ if (!db_check_thread_address_valid((thread_t)addr))
+ return;
+ aux_param.thread = (thread_t)addr;
+ } else
+ aux_param.thread = db_default_thread;
+ if (aux_param.thread != THREAD_NULL)
+ task = aux_param.thread->task;
+ }
+ for (regp = db_regs; regp < db_eregs; regp++) {
+ if (regp->max_level > 1) {
+ db_printf("bad multi-suffixed register %s\n", regp->name);
+ continue;
+ }
+ aux_param.level = regp->max_level;
+ for (i = regp->low; i <= regp->high; i++) {
+ aux_param.suffix[0] = i;
+ db_read_write_variable(regp, &value, DB_VAR_GET, &aux_param);
+ if (regp->max_level > 0)
+ db_printf("%s%d%*s", regp->name, i,
+ 12-strlen(regp->name)-((i<10)?1:2), "");
+ else
+ db_printf("%-12s", regp->name);
+ db_printf("%#*N", 2+2*sizeof(vm_offset_t), value);
+ db_find_xtrn_task_sym_and_offset((db_addr_t)value, &name,
+ &offset, task);
+ if (name != 0 && offset <= db_maxoff && offset != value) {
+ db_printf("\t%s", name);
+ if (offset != 0)
+ db_printf("+%#r", offset);
+ }
+ db_printf("\n");
+ }
+ }
+}
+
+#define OPTION_LONG 0x001 /* long print option */
+#define OPTION_USER 0x002 /* print ps-like stuff */
+#define OPTION_SCHED 0x004 /* print scheduling info */
+#define OPTION_INDENT 0x100 /* print with indent */
+#define OPTION_THREAD_TITLE 0x200 /* print thread title */
+#define OPTION_TASK_TITLE 0x400 /* print thread title */
+
+#ifndef DB_TASK_NAME
+#define DB_TASK_NAME(task) /* no task name */
+#define DB_TASK_NAME_TITLE "" /* no task name */
+#endif /* DB_TASK_NAME */
+
+#ifndef db_thread_fp_used
+#define db_thread_fp_used(thread) FALSE
+#endif
+
+static char *
+db_thread_stat(
+ const thread_t thread,
+ char *status)
+{
+ char *p = status;
+
+ *p++ = (thread->state & TH_RUN) ? 'R' : '.';
+ *p++ = (thread->state & TH_WAIT) ? 'W' : '.';
+ *p++ = (thread->state & TH_SUSP) ? 'S' : '.';
+ *p++ = (thread->state & TH_SWAPPED) ? 'O' : '.';
+ *p++ = (thread->state & TH_UNINT) ? 'N' : '.';
+ /* show if the FPU has been used */
+ *p++ = db_thread_fp_used(thread) ? 'F' : '.';
+ *p++ = 0;
+ return(status);
+}
+
+void
+db_print_thread(
+ thread_t thread,
+ int thread_id,
+ int flag)
+{
+ if (flag & OPTION_USER) {
+ char status[8];
+ char *indent = "";
+ if (flag & OPTION_INDENT)
+ indent = " ";
+
+ if (flag & OPTION_LONG) {
+ if (flag & OPTION_THREAD_TITLE) {
+ db_printf("%s ID: THREAD STAT STACK PCB", indent);
+ db_printf(" SUS PRI CONTINUE,WAIT_FUNC\n");
+ }
+ db_printf("%s%3d%c %0*X %s %0*X %0*X %3d %3d ",
+ indent, thread_id,
+ (thread == current_thread())? '#': ':',
+ 2*sizeof(vm_offset_t), thread,
+ db_thread_stat(thread, status),
+ 2*sizeof(vm_offset_t), thread->kernel_stack,
+ 2*sizeof(vm_offset_t), thread->pcb,
+ thread->suspend_count, thread->sched_pri);
+ if ((thread->state & TH_SWAPPED) && thread->swap_func) {
+ db_task_printsym((db_addr_t)thread->swap_func,
+ DB_STGY_ANY, kernel_task);
+ db_printf(", ");
+ }
+ if (thread->state & TH_WAIT)
+ db_task_printsym((db_addr_t)thread->wait_event,
+ DB_STGY_ANY, kernel_task);
+ db_printf("\n");
+ } else if (flag & OPTION_SCHED) {
+ if (flag & OPTION_THREAD_TITLE) {
+ db_printf("%s "
+ "STAT PRIORITY POLICY USAGE LAST\n",
+ indent);
+ db_printf("%s ID: "
+ "RWSONF SET MAX COMP DEPR P DATA CPU SCHED UPDATED\n",
+ indent);
+ db_printf(" \n");
+ }
+ db_printf("%s%3d%c %s %4d %4d %4d %4d %c %4d %10d %10d %10d\n",
+ indent, thread_id,
+ (thread == current_thread())? '#': ':',
+ db_thread_stat(thread, status),
+ thread->priority,
+ thread->max_priority,
+ thread->sched_pri,
+ thread->depress_priority,
+#if MACH_FIXPRI
+ thread->policy == POLICY_TIMESHARE ? 'T' : 'F',
+ thread->sched_data,
+#else /* MACH_FIXPRI */
+ 'T', 0,
+#endif /* MACH_FIXPRI */
+ thread->cpu_usage,
+ thread->sched_usage,
+ thread->sched_stamp);
+ } else {
+ if (thread_id % 3 == 0) {
+ if (flag & OPTION_INDENT)
+ db_printf("\n ");
+ } else
+ db_printf(" ");
+ db_printf("%3d%c(%0*X,%s)", thread_id,
+ (thread == current_thread())? '#': ':',
+ 2*sizeof(vm_offset_t), thread,
+ db_thread_stat(thread, status));
+ }
+ } else {
+ if (flag & OPTION_INDENT)
+ db_printf(" %3d ", thread_id);
+ if (thread->name[0] &&
+ strncmp (thread->name, thread->task->name, THREAD_NAME_SIZE))
+ db_printf("%s ", thread->name);
+ db_printf("(%0*X) ", 2*sizeof(vm_offset_t), thread);
+ char status[8];
+ db_printf("%s", db_thread_stat(thread, status));
+ if (thread->state & TH_SWAPPED) {
+ if (thread->swap_func) {
+ db_printf("(");
+ db_task_printsym((db_addr_t)thread->swap_func,
+ DB_STGY_ANY, kernel_task);
+ db_printf(")");
+ } else {
+ db_printf("(swapped)");
+ }
+ }
+ if (thread->state & TH_WAIT) {
+ db_printf(" ");
+ db_task_printsym((db_addr_t)thread->wait_event,
+ DB_STGY_ANY, kernel_task);
+ }
+ db_printf("\n");
+ }
+}
+
+static void
+db_print_task(
+ task_t task,
+ int task_id,
+ int flag)
+{
+ thread_t thread;
+ int thread_id;
+
+ if (flag & OPTION_USER) {
+ if (flag & OPTION_TASK_TITLE) {
+ db_printf(" ID: TASK MAP THD SUS PR %s",
+ DB_TASK_NAME_TITLE);
+ if ((flag & (OPTION_LONG|OPTION_SCHED)) == 0)
+ db_printf(" THREADS");
+ db_printf("\n");
+ }
+ db_printf("%3d: %0*X %0*X %3d %3d %2d ",
+ task_id, 2*sizeof(vm_offset_t), task,
+ 2*sizeof(vm_offset_t), task->map, task->thread_count,
+ task->suspend_count, task->priority);
+ DB_TASK_NAME(task);
+ if (flag & (OPTION_LONG|OPTION_SCHED)) {
+ if (flag & OPTION_TASK_TITLE)
+ flag |= OPTION_THREAD_TITLE;
+ db_printf("\n");
+ } else if (task->thread_count <= 1)
+ flag &= ~OPTION_INDENT;
+ thread_id = 0;
+ queue_iterate(&task->thread_list, thread, thread_t, thread_list) {
+ db_print_thread(thread, thread_id, flag);
+ flag &= ~OPTION_THREAD_TITLE;
+ thread_id++;
+ }
+ if ((flag & (OPTION_LONG|OPTION_SCHED)) == 0)
+ db_printf("\n");
+ } else {
+ if (flag & OPTION_TASK_TITLE)
+ db_printf(" TASK THREADS\n");
+ if (task->name[0])
+ db_printf("%3d %s (%0*X): ", task_id, task->name,
+ 2*sizeof(vm_offset_t), task);
+ else
+ db_printf("%3d (%0*X): ", task_id,
+ 2*sizeof(vm_offset_t), task);
+ if (task->thread_count == 0) {
+ db_printf("no threads\n");
+ } else {
+ if (task->thread_count > 1) {
+ db_printf("%d threads: \n", task->thread_count);
+ flag |= OPTION_INDENT;
+ } else
+ flag &= ~OPTION_INDENT;
+ thread_id = 0;
+ queue_iterate(&task->thread_list, thread,
+ thread_t, thread_list)
+ db_print_thread(thread, thread_id++, flag);
+ }
+ }
+}
+
+void
+db_show_all_tasks(db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char *modif)
+{
+ task_t task;
+ int task_id = 0;
+ processor_set_t pset;
+
+ db_printf(" ID %-*s NAME [THREADS]\n", 2*sizeof(vm_offset_t), "TASK");
+
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets)
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ db_printf("%3d %0*X %s [%d]\n",
+ task_id,
+ 2*sizeof(vm_offset_t),
+ task,
+ task->name,
+ task->thread_count);
+ task_id++;
+ }
+}
+
+static void showrq(run_queue_t rq)
+{
+ db_printf("count(%d) low(%d)\n", rq->count, rq->low);
+}
+
+/*ARGSUSED*/
+void
+db_show_all_runqs(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ int i = 0;
+ processor_set_t pset;
+
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ db_printf("Processor set #%d runq:\t", i);
+ showrq(&pset->runq);
+ i++;
+ }
+ for (i = 0; i < smp_get_numcpus(); i++) {
+ db_printf("Processor #%d runq:\t", i);
+ showrq(&cpu_to_processor(i)->runq);
+ }
+ db_printf("Stuck threads:\t%d", stuck_count);
+}
+
+/*ARGSUSED*/
+void
+db_show_all_threads(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ task_t task;
+ int task_id;
+ int flag;
+ processor_set_t pset;
+
+ flag = OPTION_TASK_TITLE|OPTION_INDENT;
+ if (db_option(modif, 'u'))
+ flag |= OPTION_USER;
+ if (db_option(modif, 'l'))
+ flag |= OPTION_LONG;
+ if (db_option(modif, 's'))
+ flag |= OPTION_SCHED;
+
+ task_id = 0;
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ db_print_task(task, task_id, flag);
+ flag &= ~OPTION_TASK_TITLE;
+ task_id++;
+ }
+ }
+}
+
+db_addr_t
+db_task_from_space(
+ ipc_space_t space,
+ int *task_id)
+{
+ task_t task;
+ int tid = 0;
+ processor_set_t pset;
+
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ if (task->itk_space == space) {
+ *task_id = tid;
+ return (db_addr_t)task;
+ }
+ tid++;
+ }
+ }
+ *task_id = 0;
+ return (0);
+}
+
+/*ARGSUSED*/
+void
+db_show_one_thread(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ int flag;
+ int thread_id;
+ thread_t thread;
+
+ flag = OPTION_THREAD_TITLE;
+ if (db_option(modif, 'u'))
+ flag |= OPTION_USER;
+ if (db_option(modif, 'l'))
+ flag |= OPTION_LONG;
+ if (db_option(modif, 's'))
+ flag |= OPTION_SCHED;
+
+ if (!have_addr) {
+ thread = current_thread();
+ if (thread == THREAD_NULL) {
+ db_error("No thread\n");
+ /*NOTREACHED*/
+ }
+ } else
+ thread = (thread_t) addr;
+
+ if ((thread_id = db_lookup_thread(thread)) < 0) {
+ db_printf("bad thread address %#X\n", addr);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+
+ if (flag & OPTION_USER) {
+ db_printf("TASK%d(%0*X):\n",
+ db_lookup_task(thread->task),
+ 2*sizeof(vm_offset_t), thread->task);
+ db_print_thread(thread, thread_id, flag);
+ } else {
+ db_printf("task %d(%0*X): thread %d",
+ db_lookup_task(thread->task),
+ 2*sizeof(vm_offset_t), thread->task, thread_id);
+ db_print_thread(thread, thread_id, flag);
+ }
+}
+
+/*ARGSUSED*/
+void
+db_show_one_task(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ int flag;
+ int task_id;
+ task_t task;
+
+ flag = OPTION_TASK_TITLE;
+ if (db_option(modif, 'u'))
+ flag |= OPTION_USER;
+ if (db_option(modif, 'l'))
+ flag |= OPTION_LONG;
+
+ if (!have_addr) {
+ task = db_current_task();
+ if (task == TASK_NULL) {
+ db_error("No task\n");
+ /*NOTREACHED*/
+ }
+ } else
+ task = (task_t) addr;
+
+ if ((task_id = db_lookup_task(task)) < 0) {
+ db_printf("bad task address %#X\n", addr);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+
+ db_print_task(task, task_id, flag);
+}
+
+static int
+db_port_iterate(const thread_t thread, void (*func)(int, const ipc_port_t, unsigned, int))
+{
+ ipc_entry_t entry;
+ int n = 0;
+ struct rdxtree_iter iter;
+ rdxtree_for_each(&thread->task->itk_space->is_map, &iter, entry) {
+ if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
+ (*func)(entry->ie_name, (ipc_port_t) entry->ie_object,
+ entry->ie_bits, n++);
+ }
+ return(n);
+}
+
+static void
+db_print_port_id(int id, const ipc_port_t port, unsigned bits, int n)
+{
+ if (n != 0 && n % 3 == 0)
+ db_printf("\n");
+ db_printf("\tport%d(%s,%x)", id,
+ (bits & MACH_PORT_TYPE_RECEIVE)? "r":
+ (bits & MACH_PORT_TYPE_SEND)? "s": "S", port);
+}
+
+static void
+db_print_port_id_long(
+ int id,
+ const ipc_port_t port,
+ unsigned bits,
+ int n)
+{
+ if (n != 0)
+ db_printf("\n");
+ db_printf("\tport%d(%s, port=0x%x", id,
+ (bits & MACH_PORT_TYPE_RECEIVE)? "r":
+ (bits & MACH_PORT_TYPE_SEND)? "s": "S", port);
+ db_printf(", receiver_name=0x%x)", port->ip_receiver_name);
+}
+
+/* ARGSUSED */
+void
+db_show_port_id(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ thread_t thread;
+
+ if (!have_addr) {
+ thread = current_thread();
+ if (thread == THREAD_NULL) {
+ db_error("No thread\n");
+ /*NOTREACHED*/
+ }
+ } else
+ thread = (thread_t) addr;
+ if (db_lookup_thread(thread) < 0) {
+ db_printf("Bad thread address %#X\n", addr);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ if (db_option(modif, 'l'))
+ {
+ if (db_port_iterate(thread, db_print_port_id_long))
+ db_printf("\n");
+ return;
+ }
+ if (db_port_iterate(thread, db_print_port_id))
+ db_printf("\n");
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_print.h b/ddb/db_print.h
new file mode 100644
index 0000000..b86c696
--- /dev/null
+++ b/ddb/db_print.h
@@ -0,0 +1,68 @@
+/*
+ * (c) Copyright 1992, 1993, 1994, 1995 OPEN SOFTWARE FOUNDATION, INC.
+ * ALL RIGHTS RESERVED
+ */
+/*
+ * OSF RI nmk19b2 5/2/95
+ */
+
+#ifndef _DDB_DB_PRINT_H_
+#define _DDB_DB_PRINT_H_
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+/* Prototypes for functions exported by this module.
+ */
+void db_show_regs(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char *modif);
+
+void db_show_one_task(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_show_port_id(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_show_one_thread(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_show_all_tasks(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_show_all_threads(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_show_all_runqs(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+db_addr_t db_task_from_space(
+ ipc_space_t space,
+ int *task_id);
+
+void db_print_thread(
+ thread_t thread,
+ int thread_id,
+ int flag);
+
+#endif /* !_DDB_DB_PRINT_H_ */
diff --git a/ddb/db_run.c b/ddb/db_run.c
new file mode 100644
index 0000000..0c8c12f
--- /dev/null
+++ b/ddb/db_run.c
@@ -0,0 +1,430 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+/*
+ * Commands to run process.
+ */
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+#include <ddb/db_lex.h>
+#include <ddb/db_break.h>
+#include <ddb/db_access.h>
+#include <ddb/db_run.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_command.h>
+#include <ddb/db_examine.h>
+#include <ddb/db_output.h>
+#include <ddb/db_watch.h>
+#include <ddb/db_cond.h>
+
+
+int db_run_mode;
+
+boolean_t db_sstep_print;
+int db_loop_count;
+int db_call_depth;
+
+int db_inst_count;
+int db_last_inst_count;
+int db_load_count;
+int db_store_count;
+
+boolean_t
+db_stop_at_pc(
+ boolean_t *is_breakpoint,
+ task_t task)
+{
+ db_addr_t pc;
+ db_thread_breakpoint_t bkpt;
+
+ db_clear_task_single_step(DDB_REGS, task);
+ db_clear_breakpoints();
+ db_clear_watchpoints();
+ pc = PC_REGS(DDB_REGS);
+
+#ifdef FIXUP_PC_AFTER_BREAK
+ if (*is_breakpoint) {
+ /*
+ * Breakpoint trap. Fix up the PC if the
+ * machine requires it.
+ */
+ FIXUP_PC_AFTER_BREAK
+ pc = PC_REGS(DDB_REGS);
+ }
+#endif /* FIXUP_PC_AFTER_BREAK */
+
+ /*
+ * Now check for a breakpoint at this address.
+ */
+ bkpt = db_find_thread_breakpoint_here(task, pc);
+ if (bkpt) {
+ if (db_cond_check(bkpt)) {
+ *is_breakpoint = TRUE;
+ return (TRUE); /* stop here */
+ }
+ }
+ *is_breakpoint = FALSE;
+
+ if (db_run_mode == STEP_INVISIBLE) {
+ db_run_mode = STEP_CONTINUE;
+ return (FALSE); /* continue */
+ }
+ if (db_run_mode == STEP_COUNT) {
+ return (FALSE); /* continue */
+ }
+ if (db_run_mode == STEP_ONCE) {
+ if (--db_loop_count > 0) {
+ if (db_sstep_print) {
+ db_print_loc_and_inst(pc, task);
+ }
+ return (FALSE); /* continue */
+ }
+ }
+ if (db_run_mode == STEP_RETURN) {
+ /* WARNING: the following assumes an instruction fits an int */
+ db_expr_t ins = db_get_task_value(pc, sizeof(int), FALSE, task);
+
+ /* continue until matching return */
+
+ if (!inst_trap_return(ins) &&
+ (!inst_return(ins) || --db_call_depth != 0)) {
+ if (db_sstep_print) {
+ if (inst_call(ins) || inst_return(ins)) {
+ int i;
+
+ db_printf("[after %6d /%4d] ",
+ db_inst_count,
+ db_inst_count - db_last_inst_count);
+ db_last_inst_count = db_inst_count;
+ for (i = db_call_depth; --i > 0; )
+ db_printf(" ");
+ db_print_loc_and_inst(pc, task);
+ db_printf("\n");
+ }
+ }
+ if (inst_call(ins))
+ db_call_depth++;
+ return (FALSE); /* continue */
+ }
+ }
+ if (db_run_mode == STEP_CALLT) {
+ /* WARNING: the following assumes an instruction fits an int */
+ db_expr_t ins = db_get_task_value(pc, sizeof(int), FALSE, task);
+
+ /* continue until call or return */
+
+ if (!inst_call(ins) &&
+ !inst_return(ins) &&
+ !inst_trap_return(ins)) {
+ return (FALSE); /* continue */
+ }
+ }
+ if (db_find_breakpoint_here(task, pc))
+ return(FALSE);
+ db_run_mode = STEP_NONE;
+ return (TRUE);
+}
+
+void
+db_restart_at_pc(
+ boolean_t watchpt,
+ task_t task)
+{
+ db_addr_t pc = PC_REGS(DDB_REGS);
+
+ if ((db_run_mode == STEP_COUNT) ||
+ (db_run_mode == STEP_RETURN) ||
+ (db_run_mode == STEP_CALLT)) {
+
+ /*
+ * We are about to execute this instruction,
+ * so count it now.
+ */
+
+ db_get_task_value(pc, sizeof(int), FALSE, task);
+ db_inst_count++;
+ db_load_count += inst_load(ins);
+ db_store_count += inst_store(ins);
+#ifdef SOFTWARE_SSTEP
+ db_addr_t brpc;
+ /* Account for instructions in delay slots */
+ brpc = next_instr_address(pc, 1, task);
+ if ((brpc != pc) && (inst_branch(ins) || inst_call(ins))) {
+ /* Note: this ~assumes an instruction <= sizeof(int) */
+ db_get_task_value(brpc, sizeof(int), FALSE, task);
+ db_inst_count++;
+ db_load_count += inst_load(ins);
+ db_store_count += inst_store(ins);
+ }
+#endif /* SOFTWARE_SSTEP */
+ }
+
+ if (db_run_mode == STEP_CONTINUE) {
+ if (watchpt || db_find_breakpoint_here(task, pc)) {
+ /*
+ * Step over breakpoint/watchpoint.
+ */
+ db_run_mode = STEP_INVISIBLE;
+ db_set_task_single_step(DDB_REGS, task);
+ } else {
+ db_set_breakpoints();
+ db_set_watchpoints();
+ }
+ } else {
+ db_set_task_single_step(DDB_REGS, task);
+ }
+}
+
+void
+db_single_step(
+ db_regs_t *regs,
+ task_t task)
+{
+ if (db_run_mode == STEP_CONTINUE) {
+ db_run_mode = STEP_INVISIBLE;
+ db_set_task_single_step(regs, task);
+ }
+}
+
+#ifdef SOFTWARE_SSTEP
+/*
+ * Software implementation of single-stepping.
+ * If your machine does not have a trace mode
+ * similar to the vax or sun ones you can use
+ * this implementation, done for the mips.
+ * Just define the above conditional and provide
+ * the functions/macros defined below.
+ *
+ * extern boolean_t
+ * inst_branch(), returns true if the instruction might branch
+ * extern unsigned
+ * branch_taken(), return the address the instruction might
+ * branch to
+ * db_getreg_val(); return the value of a user register,
+ * as indicated in the hardware instruction
+ * encoding, e.g. 8 for r8
+ *
+ * next_instr_address(pc,bd,task) returns the address of the first
+ * instruction following the one at "pc",
+ * which is either in the taken path of
+ * the branch (bd==1) or not. This is
+ * for machines (mips) with branch delays.
+ *
+ * A single-step may involve at most 2 breakpoints -
+ * one for branch-not-taken and one for branch taken.
+ * If one of these addresses does not already have a breakpoint,
+ * we allocate a breakpoint and save it here.
+ * These breakpoints are deleted on return.
+ */
+db_breakpoint_t db_not_taken_bkpt = 0;
+db_breakpoint_t db_taken_bkpt = 0;
+
+db_breakpoint_t __attribute__ ((pure))
+db_find_temp_breakpoint(const task_t task, db_addr_t addr)
+{
+ if (db_taken_bkpt && (db_taken_bkpt->address == addr) &&
+ db_taken_bkpt->task == task)
+ return db_taken_bkpt;
+ if (db_not_taken_bkpt && (db_not_taken_bkpt->address == addr) &&
+ db_not_taken_bkpt->task == task)
+ return db_not_taken_bkpt;
+ return 0;
+}
+
+void
+db_set_task_single_step(
+ db_regs_t *regs,
+ task_t task)
+{
+ db_addr_t pc = PC_REGS(regs), brpc;
+ unsigned int inst;
+ boolean_t unconditional;
+
+ /*
+ * User was stopped at pc, e.g. the instruction
+ * at pc was not executed.
+ */
+ inst = db_get_task_value(pc, sizeof(int), FALSE, task);
+ if (inst_branch(inst) || inst_call(inst)) {
+ extern db_expr_t getreg_val();
+
+ brpc = branch_taken(inst, pc, getreg_val, regs);
+ if (brpc != pc) { /* self-branches are hopeless */
+ db_taken_bkpt = db_set_temp_breakpoint(task, brpc);
+ } else
+ db_taken_bkpt = 0;
+ pc = next_instr_address(pc,1,task);
+ }
+
+ /* check if this control flow instruction is an unconditional transfer */
+ unconditional = inst_unconditional_flow_transfer(inst);
+
+ pc = next_instr_address(pc,0,task);
+ /*
+ We only set the sequential breakpoint if previous instruction was not
+ an unconditional change of flow of control. If the previous instruction
+ is an unconditional change of flow of control, setting a breakpoint in the
+ next sequential location may set a breakpoint in data or in another routine,
+ which could screw up either the program or the debugger.
+ (Consider, for instance, that the next sequential instruction is the
+ start of a routine needed by the debugger.)
+ */
+ if (!unconditional && db_find_breakpoint_here(task, pc) == 0) {
+ db_not_taken_bkpt = db_set_temp_breakpoint(task, pc);
+ }
+ else
+ db_not_taken_bkpt = 0;
+}
+
+void
+db_clear_task_single_step(const db_regs_t *regs, task_t task)
+{
+ if (db_taken_bkpt != 0) {
+ db_delete_temp_breakpoint(task, db_taken_bkpt);
+ db_taken_bkpt = 0;
+ }
+ if (db_not_taken_bkpt != 0) {
+ db_delete_temp_breakpoint(task, db_not_taken_bkpt);
+ db_not_taken_bkpt = 0;
+ }
+}
+
+#endif /* SOFTWARE_SSTEP */
+
+
+extern int db_cmd_loop_done;
+
+/* single-step */
+/*ARGSUSED*/
+void
+db_single_step_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ boolean_t print = FALSE;
+
+ if (count == -1)
+ count = 1;
+
+ if (modif[0] == 'p')
+ print = TRUE;
+
+ db_run_mode = STEP_ONCE;
+ db_loop_count = count;
+ db_sstep_print = print;
+ db_inst_count = 0;
+ db_last_inst_count = 0;
+ db_load_count = 0;
+ db_store_count = 0;
+
+ db_cmd_loop_done = 1;
+}
+
+/* trace and print until call/return */
+/*ARGSUSED*/
+void
+db_trace_until_call_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ boolean_t print = FALSE;
+
+ if (modif[0] == 'p')
+ print = TRUE;
+
+ db_run_mode = STEP_CALLT;
+ db_sstep_print = print;
+ db_inst_count = 0;
+ db_last_inst_count = 0;
+ db_load_count = 0;
+ db_store_count = 0;
+
+ db_cmd_loop_done = 1;
+}
+
+/*ARGSUSED*/
+void
+db_trace_until_matching_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ boolean_t print = FALSE;
+
+ if (modif[0] == 'p')
+ print = TRUE;
+
+ db_run_mode = STEP_RETURN;
+ db_call_depth = 1;
+ db_sstep_print = print;
+ db_inst_count = 0;
+ db_last_inst_count = 0;
+ db_load_count = 0;
+ db_store_count = 0;
+
+ db_cmd_loop_done = 1;
+}
+
+/* continue */
+/*ARGSUSED*/
+void
+db_continue_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ if (modif[0] == 'c')
+ db_run_mode = STEP_COUNT;
+ else
+ db_run_mode = STEP_CONTINUE;
+ db_inst_count = 0;
+ db_last_inst_count = 0;
+ db_load_count = 0;
+ db_store_count = 0;
+
+ db_cmd_loop_done = 1;
+}
+
+boolean_t
+db_in_single_step(void)
+{
+ return(db_run_mode != STEP_NONE && db_run_mode != STEP_CONTINUE);
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_run.h b/ddb/db_run.h
new file mode 100644
index 0000000..c042d4c
--- /dev/null
+++ b/ddb/db_run.h
@@ -0,0 +1,94 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _DDB_DB_RUN_H_
+#define _DDB_DB_RUN_H_
+
+#include <kern/task.h>
+#include <machine/db_machdep.h>
+
+extern int db_run_mode;
+
+/* modes the system may be running in */
+
+#define STEP_NONE 0
+#define STEP_ONCE 1
+#define STEP_RETURN 2
+#define STEP_CALLT 3
+#define STEP_CONTINUE 4
+#define STEP_INVISIBLE 5
+#define STEP_COUNT 6
+
+extern void db_single_step(db_regs_t *regs, task_t task);
+
+extern void db_single_step_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char *modif);
+
+void db_trace_until_call_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_trace_until_matching_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_continue_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+#ifndef db_set_single_step
+void db_set_task_single_step(db_regs_t *, task_t);
+#else
+#define db_set_task_single_step(regs, task) db_set_single_step(regs)
+#endif
+#ifndef db_clear_single_step
+void db_clear_task_single_step(const db_regs_t *, task_t);
+#else
+#define db_clear_task_single_step(regs, task) db_clear_single_step(regs)
+#endif
+
+extern boolean_t db_in_single_step(void);
+
+extern void
+db_restart_at_pc(
+ boolean_t watchpt,
+ task_t task);
+
+extern boolean_t
+db_stop_at_pc(
+ boolean_t *is_breakpoint,
+ task_t task);
+
+#endif /* _DDB_DB_RUN_H_ */
diff --git a/ddb/db_sym.c b/ddb/db_sym.c
new file mode 100644
index 0000000..f0adb0c
--- /dev/null
+++ b/ddb/db_sym.c
@@ -0,0 +1,532 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+#include <string.h>
+#include <mach/std_types.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_elf.h>
+
+#include <vm/vm_map.h> /* vm_map_t */
+
+/*
+ * Multiple symbol tables
+ */
+#define MAXNOSYMTABS 5 /* mach, bootstrap, ux, emulator, 1 spare */
+
+db_symtab_t db_symtabs[MAXNOSYMTABS] = {{0,},};
+int db_nsymtab = 0;
+
+db_symtab_t *db_last_symtab;
+
+/*
+ * Add symbol table, with given name, to list of symbol tables.
+ */
+boolean_t
+db_add_symbol_table(
+ int type,
+ char *start,
+ char *end,
+ const char *name,
+ char *ref,
+ char *map_pointer)
+{
+ db_symtab_t *st;
+ extern vm_map_t kernel_map;
+
+ if (db_nsymtab >= MAXNOSYMTABS)
+ return (FALSE);
+
+ st = &db_symtabs[db_nsymtab];
+ st->type = type;
+ st->start = start;
+ st->end = end;
+ st->private = ref;
+ st->map_pointer = (map_pointer == (char *)kernel_map)? 0: map_pointer;
+ strncpy(st->name, name, sizeof st->name - 1);
+ st->name[sizeof st->name - 1] = '\0';
+
+ db_nsymtab++;
+
+ return (TRUE);
+}
+
+/*
+ * db_qualify("vm_map", "ux") returns "ux::vm_map".
+ *
+ * Note: return value points to static data whose content is
+ * overwritten by each call... but in practice this seems okay.
+ */
+static char * __attribute__ ((pure))
+db_qualify(const char *symname, const char *symtabname)
+{
+ static char tmp[256];
+ char *s;
+
+ s = tmp;
+ while ((*s++ = *symtabname++)) {
+ }
+ s[-1] = ':';
+ *s++ = ':';
+ while ((*s++ = *symname++)) {
+ }
+ return tmp;
+}
+
+
+boolean_t
+db_eqname( const char* src, const char* dst, char c )
+{
+ if (!strcmp(src, dst))
+ return (TRUE);
+ if (src[0] == c)
+ return (!strcmp(src+1,dst));
+ return (FALSE);
+}
+
+boolean_t
+db_value_of_name(
+ char *name,
+ db_expr_t *valuep)
+{
+ db_sym_t sym;
+
+ sym = db_lookup(name);
+ if (sym == DB_SYM_NULL)
+ return (FALSE);
+ db_symbol_values(0, sym, &name, valuep);
+
+ db_free_symbol(sym);
+ return (TRUE);
+}
+
+/*
+ * Lookup a symbol.
+ * If the symbol has a qualifier (e.g., ux::vm_map),
+ * then only the specified symbol table will be searched;
+ * otherwise, all symbol tables will be searched.
+ */
+db_sym_t
+db_lookup(char *symstr)
+{
+ db_sym_t sp;
+ int i;
+ int symtab_start = 0;
+ int symtab_end = db_nsymtab;
+ char *cp;
+
+ /*
+ * Look for, remove, and remember any symbol table specifier.
+ */
+ for (cp = symstr; *cp; cp++) {
+ if (*cp == ':' && cp[1] == ':') {
+ *cp = '\0';
+ for (i = 0; i < db_nsymtab; i++) {
+ if (! strcmp(symstr, db_symtabs[i].name)) {
+ symtab_start = i;
+ symtab_end = i + 1;
+ break;
+ }
+ }
+ *cp = ':';
+ if (i == db_nsymtab)
+ db_error("Invalid symbol table name\n");
+ symstr = cp+2;
+ }
+ }
+
+ /*
+ * Look in the specified set of symbol tables.
+ * Return on first match.
+ */
+ for (i = symtab_start; i < symtab_end; i++) {
+ if ((sp = X_db_lookup(&db_symtabs[i], symstr))) {
+ db_last_symtab = &db_symtabs[i];
+ return sp;
+ }
+ db_free_symbol(sp);
+ }
+ return 0;
+}
+
+/*
+ * Common utility routine to parse a symbol string into a file
+ * name, a symbol name and line number.
+ * This routine is called from X_db_lookup if the object dependent
+ * handler supports qualified search with a file name or a line number.
+ * It parses the symbol string, and call an object dependent routine
+ * with parsed file name, symbol name and line number.
+ */
+db_sym_t
+db_sym_parse_and_lookup(
+ db_sym_t (*func) (db_symtab_t *, const char*, const char*, int),
+ db_symtab_t *symtab,
+ char *symstr)
+{
+ char *p;
+ int n;
+ int n_name;
+ int line_number;
+ char *file_name = 0;
+ char *sym_name = 0;
+ char *component[3];
+ db_sym_t found = DB_SYM_NULL;
+
+ /*
+ * disassemble the symbol into components:
+ * [file_name:]symbol[:line_nubmer]
+ */
+ component[0] = symstr;
+ component[1] = component[2] = 0;
+ for (p = symstr, n = 1; *p; p++) {
+ if (*p == ':') {
+ if (n >= 3)
+ break;
+ *p = 0;
+ component[n++] = p+1;
+ }
+ }
+ if (*p != 0)
+ goto out;
+ line_number = 0;
+ n_name = n;
+ p = component[n-1];
+ if (*p >= '0' && *p <= '9') {
+ if (n == 1)
+ goto out;
+ for (line_number = 0; *p; p++) {
+ if (*p < '0' || *p > '9')
+ goto out;
+ line_number = line_number*10 + *p - '0';
+ }
+ n_name--;
+ } else if (n >= 3)
+ goto out;
+ if (n_name == 1) {
+ for (p = component[0]; *p && *p != '.'; p++);
+ if (*p == '.') {
+ file_name = component[0];
+ sym_name = 0;
+ } else {
+ file_name = 0;
+ sym_name = component[0];
+ }
+ } else {
+ file_name = component[0];
+ sym_name = component[1];
+ }
+ found = func(symtab, file_name, sym_name, line_number);
+
+out:
+ while (--n >= 1)
+ component[n][-1] = ':';
+ return(found);
+}
+
+/*
+ * Does this symbol name appear in more than one symbol table?
+ * Used by db_symbol_values to decide whether to qualify a symbol.
+ */
+boolean_t db_qualify_ambiguous_names = FALSE;
+
+static boolean_t
+db_name_is_ambiguous(char *sym_name)
+{
+ int i;
+ boolean_t found_once = FALSE;
+
+ if (!db_qualify_ambiguous_names)
+ return FALSE;
+
+ for (i = 0; i < db_nsymtab; i++) {
+ db_sym_t sp = X_db_lookup(&db_symtabs[i], sym_name);
+ if (sp) {
+ if (found_once)
+ {
+ db_free_symbol(sp);
+ return TRUE;
+ }
+ found_once = TRUE;
+ }
+ db_free_symbol(sp);
+ }
+ return FALSE;
+}
+
+/*
+ * Find the closest symbol to val, and return its name
+ * and the difference between val and the symbol found.
+ *
+ * Logic change. If the task argument is non NULL and a
+ * matching symbol is found in a symbol table which explicitly
+ * specifies its map to be task->map, that symbol will have
+ * precedence over any symbol from a symbol table will a null
+ * map. This allows overlapping kernel/user maps to work correctly.
+ *
+ */
+db_sym_t
+db_search_task_symbol(
+ db_addr_t val,
+ db_strategy_t strategy,
+ db_addr_t *offp, /* better be unsigned */
+ task_t task)
+{
+ db_sym_t ret;
+
+ if (task != TASK_NULL)
+ ret = db_search_in_task_symbol(val, strategy, offp, task);
+ else
+ {
+ ret = db_search_in_task_symbol(val, strategy, offp, task);
+ /*
+ db_search_in_task_symbol will return success with
+ a very large offset when it should have failed.
+ */
+ if (ret == DB_SYM_NULL || (*offp) > 0x1000000)
+ {
+ db_free_symbol(ret);
+ task = db_current_task();
+ ret = db_search_in_task_symbol(val, strategy, offp, task);
+ }
+ }
+
+ return ret;
+}
+
+db_sym_t
+db_search_in_task_symbol(
+ db_addr_t val,
+ db_strategy_t strategy,
+ db_addr_t *offp,
+ task_t task)
+{
+ vm_size_t diff;
+ vm_size_t newdiff;
+ int i;
+ db_symtab_t *sp;
+ db_sym_t ret = DB_SYM_NULL, sym;
+ vm_map_t map_for_val;
+
+ map_for_val = (task == TASK_NULL)? VM_MAP_NULL: task->map;
+ newdiff = diff = ~0;
+ db_last_symtab = (db_symtab_t *) 0;
+ for (sp = &db_symtabs[0], i = 0; i < db_nsymtab; sp++, i++)
+ {
+ newdiff = ~0;
+ if ((vm_map_t)sp->map_pointer == VM_MAP_NULL ||
+ (vm_map_t)sp->map_pointer == map_for_val)
+ {
+ sym = X_db_search_symbol(sp, val, strategy, (db_expr_t*)&newdiff);
+ if (sym == DB_SYM_NULL)
+ continue;
+ if (db_last_symtab == (db_symtab_t *) 0)
+ { /* first hit */
+ db_last_symtab = sp;
+ diff = newdiff;
+ db_free_symbol(ret);
+ ret = sym;
+ continue;
+ }
+ if ((vm_map_t) sp->map_pointer == VM_MAP_NULL &&
+ (vm_map_t) db_last_symtab->map_pointer == VM_MAP_NULL &&
+ newdiff < diff )
+ { /* closer null map match */
+ db_last_symtab = sp;
+ diff = newdiff;
+ db_free_symbol(ret);
+ ret = sym;
+ continue;
+ }
+ if ((vm_map_t) sp->map_pointer != VM_MAP_NULL &&
+ (newdiff < 0x100000) &&
+ ((vm_map_t) db_last_symtab->map_pointer == VM_MAP_NULL ||
+ newdiff < diff ))
+ { /* update if new is in matching map and symbol is "close",
+ and
+ old is VM_MAP_NULL or old in is matching map but is further away
+ */
+ db_last_symtab = sp;
+ diff = newdiff;
+ db_free_symbol(ret);
+ ret = sym;
+ continue;
+ }
+ }
+ }
+
+ *offp = diff;
+ return ret;
+}
+
+/*
+ * Return name and value of a symbol
+ */
+void
+db_symbol_values(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **namep,
+ db_expr_t *valuep)
+{
+ db_expr_t value;
+ char *name;
+
+ if (sym == DB_SYM_NULL) {
+ *namep = 0;
+ return;
+ }
+ if (stab == 0)
+ stab = db_last_symtab;
+
+ X_db_symbol_values(stab, sym, &name, &value);
+
+ if (db_name_is_ambiguous(name))
+ *namep = db_qualify(name, db_last_symtab->name);
+ else
+ *namep = name;
+ if (valuep)
+ *valuep = value;
+}
+
+
+/*
+ * Print the closest symbol to value
+ *
+ * After matching the symbol according to the given strategy
+ * we print it in the name+offset format, provided the symbol's
+ * value is close enough (eg smaller than db_maxoff).
+ * We also attempt to print [filename:linenum] when applicable
+ * (eg for procedure names).
+ *
+ * If we could not find a reasonable name+offset representation,
+ * then we just print the value in hex. Small values might get
+ * bogus symbol associations, e.g. 3 might get some absolute
+ * value like _INCLUDE_VERSION or something, therefore we do
+ * not accept symbols whose value is zero (and use plain hex).
+ */
+
+unsigned long db_maxoff = 0x4000;
+
+void
+db_task_printsym(
+ db_addr_t off,
+ db_strategy_t strategy,
+ task_t task)
+{
+ db_addr_t d;
+ char *filename;
+ char *name;
+ db_expr_t value;
+ int linenum;
+ db_sym_t cursym;
+
+ cursym = db_search_task_symbol(off, strategy, &d, task);
+ db_symbol_values(0, cursym, &name, &value);
+ if (name == 0 || d >= db_maxoff || value == 0 || *name == 0) {
+ db_printf("%#n", off);
+ db_free_symbol(cursym);
+ return;
+ }
+ db_printf("%s", name);
+ if (d)
+ db_printf("+0x%x", d);
+ if (strategy == DB_STGY_PROC) {
+ if (db_line_at_pc(cursym, &filename, &linenum, off)) {
+ db_printf(" [%s", filename);
+ if (linenum > 0)
+ db_printf(":%d", linenum);
+ db_printf("]");
+ }
+ }
+ db_free_symbol(cursym);
+}
+
+void
+db_printsym(
+ db_expr_t off,
+ db_strategy_t strategy)
+{
+ db_task_printsym(off, strategy, TASK_NULL);
+}
+
+boolean_t
+db_line_at_pc(
+ db_sym_t sym,
+ char **filename,
+ int *linenum,
+ db_addr_t pc)
+{
+ return (db_last_symtab) ?
+ X_db_line_at_pc( db_last_symtab, sym, filename, linenum, pc) :
+ FALSE;
+}
+
+void db_free_symbol(db_sym_t s)
+{
+ return (db_last_symtab) ?
+ X_db_free_symbol( db_last_symtab, s) :
+ FALSE;
+}
+
+/*
+ * Switch into symbol-table specific routines
+ */
+
+static void dummy_db_free_symbol(db_sym_t symbol) { }
+static boolean_t dummy_db_sym_init(char *a, char *b, const char *c, char *d) {
+ return FALSE;
+}
+
+
+struct db_sym_switch x_db[] = {
+
+ /* BSD a.out format (really, sdb/dbx(1) symtabs) not supported */
+ { 0,},
+
+ { 0,},
+
+ /* Machdep, not inited here */
+ { 0,},
+
+#ifdef DB_NO_ELF
+ { 0,},
+#else /* DB_NO_ELF */
+ { dummy_db_sym_init, elf_db_lookup, elf_db_search_symbol,
+ elf_db_line_at_pc, elf_db_symbol_values, dummy_db_free_symbol },
+#endif /* DB_NO_ELF */
+
+};
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_sym.h b/ddb/db_sym.h
new file mode 100644
index 0000000..f4fb528
--- /dev/null
+++ b/ddb/db_sym.h
@@ -0,0 +1,264 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 8/90
+ */
+
+#ifndef _DDB_DB_SYM_H_
+#define _DDB_DB_SYM_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <machine/db_machdep.h>
+
+/*
+ * This module can handle multiple symbol tables,
+ * of multiple types, at the same time
+ */
+#define SYMTAB_NAME_LEN 32
+
+typedef struct {
+ int type;
+#define SYMTAB_AOUT 0
+#define SYMTAB_COFF 1
+#define SYMTAB_MACHDEP 2
+#define SYMTAB_ELF 3
+ char *start; /* symtab location */
+ char *end;
+ char *private; /* optional machdep pointer */
+ char *map_pointer; /* symbols are for this map only,
+ if not null */
+ char name[SYMTAB_NAME_LEN];
+ /* symtab name */
+} db_symtab_t;
+
+extern db_symtab_t *db_last_symtab; /* where last symbol was found */
+
+/*
+ * Symbol representation is specific to the symtab style:
+ * BSD compilers use dbx' nlist, other compilers might use
+ * a different one
+ */
+typedef char * db_sym_t; /* opaque handle on symbols */
+#define DB_SYM_NULL ((db_sym_t)0)
+
+/*
+ * Non-stripped symbol tables will have duplicates, for instance
+ * the same string could match a parameter name, a local var, a
+ * global var, etc.
+ * We are most concerned with the following matches.
+ */
+typedef int db_strategy_t; /* search strategy */
+
+#define DB_STGY_ANY 0 /* anything goes */
+#define DB_STGY_XTRN 1 /* only external symbols */
+#define DB_STGY_PROC 2 /* only procedures */
+
+extern boolean_t db_qualify_ambiguous_names;
+ /* if TRUE, check across symbol tables
+ * for multiple occurrences of a name.
+ * Might slow down quite a bit
+ * ..but the machine has nothing
+ * else to do, now does it ? */
+
+/*
+ * Functions exported by the symtable module
+ */
+
+/* extend the list of symbol tables */
+
+extern boolean_t db_add_symbol_table( int type,
+ char * start,
+ char * end,
+ const char *name,
+ char *ref,
+ char *map_pointer );
+
+/* find symbol value given name */
+
+extern int db_value_of_name( char* name, db_expr_t* valuep);
+
+/* find symbol given value */
+
+extern db_sym_t db_search_task_symbol( db_addr_t val,
+ db_strategy_t strategy,
+ db_addr_t *offp,
+ task_t task );
+
+/* return name and value of symbol */
+
+extern void db_symbol_values( db_symtab_t *stab,
+ db_sym_t sym,
+ char** namep,
+ db_expr_t* valuep);
+
+/* find symbol in current task */
+#define db_search_symbol(val,strgy,offp) \
+ db_search_task_symbol(val,strgy,offp,0)
+
+/* find name&value given approx val */
+
+#define db_find_sym_and_offset(val,namep,offp) \
+ do { \
+ db_sym_t s; \
+ db_symbol_values(0, s = db_search_symbol(val,DB_STGY_ANY,offp) \
+ ,namep,0); \
+ db_free_symbol(s); \
+ } while(0);
+
+
+/* ditto, but no locals */
+#define db_find_xtrn_sym_and_offset(val,namep,offp) \
+ do { \
+ db_sym_t s; \
+ db_symbol_values(0, s = db_search_symbol(val,DB_STGY_XTRN,offp) \
+ ,namep,0); \
+ db_free_symbol(s); \
+ } while(0);
+
+/* find name&value given approx val */
+
+#define db_find_task_sym_and_offset(val,namep,offp,task) \
+ do { \
+ db_sym_t s; \
+ db_symbol_values(0, s = db_search_task_symbol(val,DB_STGY_ANY \
+ ,offp,task), \
+ namep, 0); \
+ db_free_symbol(s); \
+ } while(0);
+
+/* ditto, but no locals */
+#define db_find_xtrn_task_sym_and_offset(val,namep,offp,task) \
+ do { \
+ db_sym_t s; \
+ db_symbol_values(0, s = db_search_task_symbol(val,DB_STGY_XTRN \
+ ,offp,task), \
+ namep,0); \
+ db_free_symbol(s); \
+ } while(0);
+
+/* strcmp, modulo leading char */
+extern boolean_t db_eqname( const char* src, const char* dst, char c );
+
+/* print closest symbol to a value */
+extern void db_task_printsym( db_addr_t off,
+ db_strategy_t strategy,
+ task_t task);
+
+/* print closest symbol to a value */
+extern void db_printsym( db_expr_t off, db_strategy_t strategy);
+
+/* free a symbol */
+extern void db_free_symbol(db_sym_t s);
+
+
+/*
+ * Symbol table switch, defines the interface
+ * to symbol-table specific routines.
+ */
+
+extern struct db_sym_switch {
+
+ boolean_t (*init)(
+ char *start,
+ char *end,
+ const char *name,
+ char *task_addr
+ );
+
+ db_sym_t (*lookup)(
+ db_symtab_t *stab,
+ char *symstr
+ );
+ db_sym_t (*search_symbol)(
+ db_symtab_t *stab,
+ db_addr_t off,
+ db_strategy_t strategy,
+ db_expr_t *diffp
+ );
+
+ boolean_t (*line_at_pc)(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **file,
+ int *line,
+ db_addr_t pc
+ );
+
+ void (*symbol_values)(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **namep,
+ db_expr_t *valuep
+ );
+
+ void (*free_symbol)(
+ db_sym_t sym
+ );
+} x_db[];
+
+#ifndef symtab_type
+#define symtab_type(s) SYMTAB_ELF
+#endif
+
+#define X_db_sym_init(s,e,n,t) x_db[symtab_type(s)].init(s,e,n,t)
+#define X_db_lookup(s,n) x_db[(s)->type].lookup(s,n)
+#define X_db_search_symbol(s,o,t,d) x_db[(s)->type].search_symbol(s,o,t,d)
+#define X_db_line_at_pc(s,p,f,l,a) x_db[(s)->type].line_at_pc(s,p,f,l,a)
+#define X_db_symbol_values(s,p,n,v) x_db[(s)->type].symbol_values(s,p,n,v)
+#define X_db_free_symbol(s,m) x_db[(s)->type].free_symbol(m)
+
+extern boolean_t db_line_at_pc(
+ db_sym_t sym,
+ char **filename,
+ int *linenum,
+ db_addr_t pc);
+
+extern boolean_t elf_db_sym_init (
+ unsigned shdr_num,
+ vm_size_t shdr_size,
+ vm_offset_t shdr_addr,
+ unsigned shdr_shndx,
+ char *name,
+ char *task_addr);
+
+db_sym_t db_lookup(char *);
+
+db_sym_t
+db_search_in_task_symbol(
+ db_addr_t val,
+ db_strategy_t strategy,
+ db_addr_t *offp,
+ task_t task);
+
+extern db_sym_t
+db_sym_parse_and_lookup(
+ db_sym_t (*func) (db_symtab_t *, const char*, const char*, int),
+ db_symtab_t *symtab,
+ char *symstr);
+
+#endif /* _DDB_DB_SYM_H_ */
diff --git a/ddb/db_task_thread.c b/ddb/db_task_thread.c
new file mode 100644
index 0000000..fe742c2
--- /dev/null
+++ b/ddb/db_task_thread.c
@@ -0,0 +1,326 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_KDB
+
+#include <machine/db_machdep.h>
+#include <ddb/db_command.h>
+#include <ddb/db_expr.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_output.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_variables.h>
+
+
+
+/*
+ * Following constants are used to prevent infinite loop of task
+ * or thread search due to the incorrect list.
+ */
+#define DB_MAX_TASKID 0x10000 /* max # of tasks */
+#define DB_MAX_THREADID 0x10000 /* max # of threads in a task */
+#define DB_MAX_PSETS 0x10000 /* max # of processor sets */
+
+task_t db_default_task; /* default target task */
+thread_t db_default_thread; /* default target thread */
+
+/*
+ * search valid task queue, and return the queue position as the task id
+ */
+int
+db_lookup_task(const task_t target_task)
+{
+ task_t task;
+ int task_id;
+ processor_set_t pset;
+ int npset = 0;
+
+ task_id = 0;
+ if (queue_first(&all_psets) == 0)
+ return(-1);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ if (npset++ >= DB_MAX_PSETS)
+ return(-1);
+ if (queue_first(&pset->tasks) == 0)
+ continue;
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ if (target_task == task)
+ return(task_id);
+ if (task_id++ >= DB_MAX_TASKID)
+ return(-1);
+ }
+ }
+ return(-1);
+}
+
+/*
+ * search thread queue of the task, and return the queue position
+ */
+int
+db_lookup_task_thread(const task_t task, const thread_t target_thread)
+{
+ thread_t thread;
+ int thread_id;
+
+ thread_id = 0;
+ if (queue_first(&task->thread_list) == 0)
+ return(-1);
+ queue_iterate(&task->thread_list, thread, thread_t, thread_list) {
+ if (target_thread == thread)
+ return(thread_id);
+ if (thread_id++ >= DB_MAX_THREADID)
+ return(-1);
+ }
+ return(-1);
+}
+
+/*
+ * search thread queue of every valid task, and return the queue position
+ * as the thread id.
+ */
+int
+db_lookup_thread(const thread_t target_thread)
+{
+ int thread_id;
+ task_t task;
+ processor_set_t pset;
+ int ntask = 0;
+ int npset = 0;
+
+ if (queue_first(&all_psets) == 0)
+ return(-1);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ if (npset++ >= DB_MAX_PSETS)
+ return(-1);
+ if (queue_first(&pset->tasks) == 0)
+ continue;
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ if (ntask++ > DB_MAX_TASKID)
+ return(-1);
+ if (task->thread_count == 0)
+ continue;
+ thread_id = db_lookup_task_thread(task, target_thread);
+ if (thread_id >= 0)
+ return(thread_id);
+ }
+ }
+ return(-1);
+}
+
+/*
+ * check the address is a valid thread address
+ */
+boolean_t
+db_check_thread_address_valid(const thread_t thread)
+{
+ if (db_lookup_thread(thread) < 0) {
+ db_printf("Bad thread address 0x%x\n", thread);
+ db_flush_lex();
+ return(FALSE);
+ } else
+ return(TRUE);
+}
+
+/*
+ * convert task_id(queue position) to task address
+ */
+static task_t
+db_lookup_task_id(int task_id)
+{
+ task_t task;
+ processor_set_t pset;
+ int npset = 0;
+
+ if (task_id > DB_MAX_TASKID)
+ return(TASK_NULL);
+ if (queue_first(&all_psets) == 0)
+ return(TASK_NULL);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ if (npset++ >= DB_MAX_PSETS)
+ return(TASK_NULL);
+ if (queue_first(&pset->tasks) == 0)
+ continue;
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ if (task_id-- <= 0)
+ return(task);
+ }
+ }
+ return(TASK_NULL);
+}
+
+/*
+ * convert (task_id, thread_id) pair to thread address
+ */
+static thread_t
+db_lookup_thread_id(
+ task_t task,
+ int thread_id)
+{
+ thread_t thread;
+
+
+ if (thread_id > DB_MAX_THREADID)
+ return(THREAD_NULL);
+ if (queue_first(&task->thread_list) == 0)
+ return(THREAD_NULL);
+ queue_iterate(&task->thread_list, thread, thread_t, thread_list) {
+ if (thread_id-- <= 0)
+ return(thread);
+ }
+ return(THREAD_NULL);
+}
+
+/*
+ * get next parameter from a command line, and check it as a valid
+ * thread address
+ */
+boolean_t
+db_get_next_thread(
+ thread_t *threadp,
+ int position)
+{
+ db_expr_t value;
+ thread_t thread;
+
+ *threadp = THREAD_NULL;
+ if (db_expression(&value)) {
+ thread = (thread_t) value;
+ if (!db_check_thread_address_valid(thread)) {
+ db_flush_lex();
+ return(FALSE);
+ }
+ } else if (position <= 0) {
+ thread = db_default_thread;
+ } else
+ return(FALSE);
+ *threadp = thread;
+ return(TRUE);
+}
+
+/*
+ * check the default thread is still valid
+ * ( it is called in entering DDB session )
+ */
+void
+db_init_default_thread(void)
+{
+ if (db_lookup_thread(db_default_thread) < 0) {
+ db_default_thread = THREAD_NULL;
+ db_default_task = TASK_NULL;
+ } else
+ db_default_task = db_default_thread->task;
+}
+
+/*
+ * set or get default thread which is used when /t or :t option is specified
+ * in the command line
+ */
+/* ARGSUSED */
+void
+db_set_default_thread(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap)
+{
+ thread_t thread;
+
+ if (flag != DB_VAR_SET) {
+ *valuep = (db_expr_t) db_default_thread;
+ return;
+ }
+ thread = (thread_t) *valuep;
+ if (thread != THREAD_NULL && !db_check_thread_address_valid(thread))
+ db_error(0);
+ /* NOTREACHED */
+ db_default_thread = thread;
+ if (thread)
+ db_default_task = thread->task;
+ return;
+}
+
+/*
+ * convert $taskXXX[.YYY] type DDB variable to task or thread address
+ */
+void
+db_get_task_thread(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap)
+{
+ task_t task;
+ thread_t thread;
+
+ if (flag != DB_VAR_GET) {
+ db_error("Cannot set to $task variable\n");
+ /* NOTREACHED */
+ }
+ if ((task = db_lookup_task_id(ap->suffix[0])) == TASK_NULL) {
+ db_printf("no such task($task%d)\n", ap->suffix[0]);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ if (ap->level <= 1) {
+ *valuep = (db_expr_t) task;
+ return;
+ }
+ if ((thread = db_lookup_thread_id(task, ap->suffix[1])) == THREAD_NULL){
+ db_printf("no such thread($task%d.%d)\n",
+ ap->suffix[0], ap->suffix[1]);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ *valuep = (db_expr_t) thread;
+ return;
+}
+
+/*
+ * convert $mapXXX type DDB variable to map address
+ */
+void
+db_get_map(struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap)
+{
+ task_t task;
+
+ if (flag != DB_VAR_GET) {
+ db_error("Cannot set to $map variable\n");
+ /* NOTREACHED */
+ }
+
+ if ((task = db_lookup_task_id(ap->suffix[0])) == TASK_NULL) {
+ db_printf("no such map($map%d)\n", ap->suffix[0]);
+ db_error(0);
+ /* NOTREACHED */
+ }
+
+ *valuep = (db_expr_t) task->map;
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_task_thread.h b/ddb/db_task_thread.h
new file mode 100644
index 0000000..55ab4f5
--- /dev/null
+++ b/ddb/db_task_thread.h
@@ -0,0 +1,73 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _DDB_DB_TASK_THREAD_H_
+#define _DDB_DB_TASK_THREAD_H_
+
+#include <ddb/db_variables.h>
+
+#include <kern/task.h>
+#include <kern/thread.h>
+
+#define db_current_task() \
+ ((current_thread())? current_thread()->task: TASK_NULL)
+#define db_target_space(thread, user_space) \
+ ((!(user_space))? TASK_NULL: \
+ (thread)? (thread)->task: db_current_task())
+#define db_is_current_task(task) \
+ ((task) == TASK_NULL || (task) == db_current_task())
+
+extern task_t db_default_task; /* default target task */
+extern thread_t db_default_thread; /* default target thread */
+
+extern int db_lookup_task(const task_t);
+extern int db_lookup_thread(const thread_t);
+extern int db_lookup_task_thread(const task_t, const thread_t);
+extern boolean_t db_check_thread_address_valid(const thread_t);
+extern boolean_t db_get_next_thread(thread_t *, int);
+extern void db_init_default_thread(void);
+
+extern void
+db_set_default_thread(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap);
+
+extern void
+db_get_task_thread(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap);
+
+extern void
+db_get_map(struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap);
+
+#endif /* _DDB_DB_TASK_THREAD_H_ */
diff --git a/ddb/db_trap.c b/ddb/db_trap.c
new file mode 100644
index 0000000..cbb6bde
--- /dev/null
+++ b/ddb/db_trap.c
@@ -0,0 +1,115 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+/*
+ * Trap entry point to kernel debugger.
+ */
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <machine/setjmp.h>
+#include <ddb/db_command.h>
+#include <ddb/db_access.h>
+#include <ddb/db_break.h>
+#include <ddb/db_examine.h>
+#include <ddb/db_output.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_trap.h>
+#include <ddb/db_run.h>
+#include <machine/db_interface.h>
+#include <kern/lock.h>
+
+
+extern jmp_buf_t *db_recover;
+
+extern int db_inst_count;
+extern int db_load_count;
+extern int db_store_count;
+
+void
+db_task_trap(
+ int type,
+ int code,
+ boolean_t user_space)
+{
+ jmp_buf_t db_jmpbuf;
+ jmp_buf_t *prev;
+ boolean_t bkpt;
+ boolean_t watchpt;
+ task_t task_space;
+
+ check_simple_locks_disable();
+
+ task_space = db_target_space(current_thread(), user_space);
+ bkpt = IS_BREAKPOINT_TRAP(type, code);
+ watchpt = IS_WATCHPOINT_TRAP(type, code);
+
+ db_init_default_thread();
+ db_check_breakpoint_valid();
+ if (db_stop_at_pc(&bkpt, task_space)) {
+ if (db_inst_count) {
+ db_printf("After %d instructions (%d loads, %d stores),\n",
+ db_inst_count, db_load_count, db_store_count);
+ }
+ if (bkpt)
+ db_printf("Breakpoint at ");
+ else if (watchpt)
+ db_printf("Watchpoint at ");
+ else
+ db_printf("Stopped at ");
+ db_dot = PC_REGS(DDB_REGS);
+
+ prev = db_recover;
+ if (_setjmp(db_recover = &db_jmpbuf) == 0)
+ db_print_loc_and_inst(db_dot, task_space);
+ else
+ db_printf("Trouble printing location %#X.\n", db_dot);
+
+ if (!bkpt && !watchpt && _setjmp(db_recover = &db_jmpbuf) == 0)
+ db_stack_trace_cmd(0, 0, -1, "");
+ db_recover = prev;
+
+ db_command_loop();
+ }
+
+ check_simple_locks_enable();
+ db_restart_at_pc(watchpt, task_space);
+}
+
+void
+db_trap(
+ int type,
+ int code)
+{
+ db_task_trap(type, code, !DB_VALID_KERN_ADDR(PC_REGS(DDB_REGS)));
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_trap.h b/ddb/db_trap.h
new file mode 100644
index 0000000..933fcd3
--- /dev/null
+++ b/ddb/db_trap.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+
+#ifndef _DDB_DB_TRAP_H_
+#define _DDB_DB_TRAP_H_
+
+#include <sys/types.h>
+#include <machine/db_machdep.h>
+
+extern void db_task_trap (
+ int type,
+ int code,
+ boolean_t user_space);
+
+extern void db_trap (int type, int code);
+
+#endif /* _DDB_DB_TRAP_H_ */
diff --git a/ddb/db_variables.c b/ddb/db_variables.c
new file mode 100644
index 0000000..40f2d4d
--- /dev/null
+++ b/ddb/db_variables.c
@@ -0,0 +1,224 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+#include <machine/db_machdep.h>
+
+#include <ddb/db_command.h>
+#include <ddb/db_examine.h>
+#include <ddb/db_expr.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_output.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_macro.h>
+
+extern unsigned long db_maxoff;
+
+extern db_expr_t db_radix;
+extern db_expr_t db_max_width;
+extern db_expr_t db_tab_stop_width;
+extern db_expr_t db_max_line;
+
+#define DB_NWORK 32 /* number of work variable */
+
+db_expr_t db_work[DB_NWORK]; /* work variable */
+
+struct db_variable db_vars[] = {
+ { "radix", &db_radix, FCN_NULL },
+ { "maxoff", (db_expr_t*)&db_maxoff, FCN_NULL },
+ { "maxwidth", &db_max_width, FCN_NULL },
+ { "tabstops", &db_tab_stop_width, FCN_NULL },
+ { "lines", &db_max_line, FCN_NULL },
+ { "thread", 0, db_set_default_thread },
+ { "task", 0, db_get_task_thread,
+ 1, 2, -1, -1 },
+ { "map", 0, db_get_map,
+ 1, 1, -1, -1 },
+ { "work", &db_work[0], FCN_NULL,
+ 1, 1, 0, DB_NWORK-1 },
+ { "arg", 0, db_arg_variable,
+ 1, 1, -1, -1 },
+};
+struct db_variable *db_evars = db_vars + sizeof(db_vars)/sizeof(db_vars[0]);
+
+static const char *
+db_get_suffix(
+ const char *suffix,
+ short *suffix_value)
+{
+ int value;
+
+ for (value = 0; *suffix && *suffix != '.' && *suffix != ':'; suffix++) {
+ if (*suffix < '0' || *suffix > '9')
+ return(0);
+ value = value*10 + *suffix - '0';
+ }
+ *suffix_value = value;
+ if (*suffix == '.')
+ suffix++;
+ return(suffix);
+}
+
+static boolean_t
+db_cmp_variable_name(
+ struct db_variable *vp,
+ char *name,
+ const db_var_aux_param_t ap)
+{
+ char *var_np;
+ const char *np;
+ int level;
+
+ for (np = name, var_np = vp->name; *var_np; ) {
+ if (*np++ != *var_np++)
+ return(FALSE);
+ }
+ for (level = 0; *np && *np != ':' && level < vp->max_level; level++){
+ if ((np = db_get_suffix(np, &ap->suffix[level])) == 0)
+ return(FALSE);
+ }
+ if ((*np && *np != ':') || level < vp->min_level
+ || (level > 0 && (ap->suffix[0] < vp->low
+ || (vp->high >= 0 && ap->suffix[0] > vp->high))))
+ return(FALSE);
+ db_strcpy(ap->modif, (*np)? np+1: "");
+ ap->thread = (db_option(ap->modif, 't')?db_default_thread: THREAD_NULL);
+ ap->level = level;
+ return(TRUE);
+}
+
+static int
+db_find_variable(
+ struct db_variable **varp,
+ db_var_aux_param_t ap)
+{
+ int t;
+ struct db_variable *vp;
+
+ t = db_read_token();
+ if (t == tIDENT) {
+ for (vp = db_vars; vp < db_evars; vp++) {
+ if (db_cmp_variable_name(vp, db_tok_string, ap)) {
+ *varp = vp;
+ return (1);
+ }
+ }
+ for (vp = db_regs; vp < db_eregs; vp++) {
+ if (db_cmp_variable_name(vp, db_tok_string, ap)) {
+ *varp = vp;
+ return (1);
+ }
+ }
+ }
+ db_printf("Unknown variable \"$%s\"\n", db_tok_string);
+ db_error(0);
+ return (0);
+}
+
+int
+db_get_variable(db_expr_t *valuep)
+{
+ struct db_variable *vp;
+ struct db_var_aux_param aux_param;
+ char modif[TOK_STRING_SIZE];
+
+ aux_param.modif = modif;
+ if (!db_find_variable(&vp, &aux_param))
+ return (0);
+
+ db_read_write_variable(vp, valuep, DB_VAR_GET, &aux_param);
+
+ return (1);
+}
+
+void
+db_read_write_variable(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int rw_flag,
+ db_var_aux_param_t ap)
+{
+ void (*func)(struct db_variable *, db_expr_t *, int, db_var_aux_param_t) = vp->fcn;
+ struct db_var_aux_param aux_param;
+
+ if (ap == 0) {
+ ap = &aux_param;
+ ap->modif = "";
+ ap->level = 0;
+ ap->thread = THREAD_NULL;
+ }
+ if (func == FCN_NULL) {
+ if (rw_flag == DB_VAR_SET)
+ vp->valuep[(ap->level)? (ap->suffix[0] - vp->low): 0] = *valuep;
+ else
+ *valuep = vp->valuep[(ap->level)? (ap->suffix[0] - vp->low): 0];
+ } else
+ (*func)(vp, valuep, rw_flag, ap);
+}
+
+void
+db_set_cmd(void)
+{
+ db_expr_t value;
+ int t;
+ struct db_variable *vp;
+ struct db_var_aux_param aux_param;
+ char modif[TOK_STRING_SIZE];
+
+ aux_param.modif = modif;
+ t = db_read_token();
+ if (t != tDOLLAR) {
+ db_error("Variable name should be prefixed with $\n");
+ return;
+ }
+ if (!db_find_variable(&vp, &aux_param)) {
+ db_error("Unknown variable\n");
+ return;
+ }
+
+ t = db_read_token();
+ if (t != tEQ)
+ db_unread_token(t);
+
+ if (!db_expression(&value)) {
+ db_error("No value\n");
+ return;
+ }
+ if ((t = db_read_token()) == tSEMI_COLON)
+ db_unread_token(t);
+ else if (t != tEOL)
+ db_error("?\n");
+
+ db_read_write_variable(vp, &value, DB_VAR_SET, &aux_param);
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_variables.h b/ddb/db_variables.h
new file mode 100644
index 0000000..9880d50
--- /dev/null
+++ b/ddb/db_variables.h
@@ -0,0 +1,88 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#ifndef _DB_VARIABLES_H_
+#define _DB_VARIABLES_H_
+
+#include <kern/thread.h>
+#include <machine/db_machdep.h>
+
+/*
+ * Debugger variables.
+ */
+struct db_var_aux_param; /* forward */
+typedef struct db_var_aux_param *db_var_aux_param_t; /* forward */
+struct db_variable {
+ char *name; /* Name of variable */
+ db_expr_t *valuep; /* pointer to value of variable */
+ /* function to call when reading/writing */
+ void (*fcn)(struct db_variable *, db_expr_t *, int, db_var_aux_param_t);
+ short min_level; /* number of minimum suffix levels */
+ short max_level; /* number of maximum suffix levels */
+ short low; /* low value of level 1 suffix */
+ short high; /* high value of level 1 suffix */
+#define DB_VAR_GET 0
+#define DB_VAR_SET 1
+};
+#define FCN_NULL ((void (*)())0)
+
+#define DB_VAR_LEVEL 3 /* maximum number of suffix level */
+
+#define db_read_variable(vp, valuep) \
+ db_read_write_variable(vp, valuep, DB_VAR_GET, 0)
+#define db_write_variable(vp, valuep) \
+ db_read_write_variable(vp, valuep, DB_VAR_SET, 0)
+
+/*
+ * auxiliary parameters passed to a variable handler
+ */
+struct db_var_aux_param {
+ char *modif; /* option strings */
+ short level; /* number of levels */
+ short suffix[DB_VAR_LEVEL]; /* suffix */
+ thread_t thread; /* target task */
+};
+
+/* Already defined above. */
+/* typedef struct db_var_aux_param *db_var_aux_param_t; */
+
+
+extern struct db_variable db_vars[]; /* debugger variables */
+extern struct db_variable *db_evars;
+extern struct db_variable db_regs[]; /* machine registers */
+extern struct db_variable *db_eregs;
+
+extern int db_get_variable(db_expr_t *valuep);
+
+void db_set_cmd(void);
+
+void db_read_write_variable(struct db_variable *, db_expr_t *, int, struct db_var_aux_param *);
+
+#endif /* _DB_VARIABLES_H_ */
diff --git a/ddb/db_watch.c b/ddb/db_watch.c
new file mode 100644
index 0000000..c3d2835
--- /dev/null
+++ b/ddb/db_watch.c
@@ -0,0 +1,329 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: Richard P. Draves, Carnegie Mellon University
+ * Date: 10/90
+ */
+
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <mach/vm_param.h>
+#include <mach/machine/vm_types.h>
+#include <mach/machine/vm_param.h>
+#include <vm/vm_map.h>
+
+#include <machine/db_machdep.h>
+#include <machine/db_interface.h>
+#include <ddb/db_command.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_watch.h>
+#include <ddb/db_access.h>
+#include <ddb/db_expr.h>
+#include <ddb/db_output.h>
+#include <ddb/db_run.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_task_thread.h>
+
+
+
+/*
+ * Watchpoints.
+ */
+
+boolean_t db_watchpoints_inserted = TRUE;
+
+#define NWATCHPOINTS 100
+struct db_watchpoint db_watch_table[NWATCHPOINTS];
+db_watchpoint_t db_next_free_watchpoint = &db_watch_table[0];
+db_watchpoint_t db_free_watchpoints = 0;
+db_watchpoint_t db_watchpoint_list = 0;
+
+extern vm_map_t kernel_map;
+
+static db_watchpoint_t
+db_watchpoint_alloc(void)
+{
+ db_watchpoint_t watch;
+
+ if ((watch = db_free_watchpoints) != 0) {
+ db_free_watchpoints = watch->link;
+ return (watch);
+ }
+ if (db_next_free_watchpoint == &db_watch_table[NWATCHPOINTS]) {
+ db_printf("All watchpoints used.\n");
+ return (0);
+ }
+ watch = db_next_free_watchpoint;
+ db_next_free_watchpoint++;
+
+ return (watch);
+}
+
+static void
+db_watchpoint_free(db_watchpoint_t watch)
+{
+ watch->link = db_free_watchpoints;
+ db_free_watchpoints = watch;
+}
+
+void
+db_set_watchpoint(
+ const task_t task,
+ db_addr_t addr,
+ vm_size_t size)
+{
+ db_watchpoint_t watch;
+
+ /*
+ * Should we do anything fancy with overlapping regions?
+ */
+
+ for (watch = db_watchpoint_list; watch != 0; watch = watch->link) {
+ if (watch->task == task &&
+ (watch->loaddr == addr) &&
+ (watch->hiaddr == addr+size)) {
+ db_printf("Already set.\n");
+ return;
+ }
+ }
+
+ watch = db_watchpoint_alloc();
+ if (watch == 0) {
+ db_printf("Too many watchpoints.\n");
+ return;
+ }
+
+ watch->task = task;
+ watch->loaddr = addr;
+ watch->hiaddr = addr+size;
+
+ watch->link = db_watchpoint_list;
+ db_watchpoint_list = watch;
+
+ db_watchpoints_inserted = FALSE;
+}
+
+void
+db_delete_watchpoint(const task_t task, db_addr_t addr)
+{
+ db_watchpoint_t watch;
+ db_watchpoint_t *prev;
+
+ for (prev = &db_watchpoint_list; (watch = *prev) != 0;
+ prev = &watch->link) {
+ if (watch->task == task &&
+ (watch->loaddr <= addr) &&
+ (addr < watch->hiaddr)) {
+ *prev = watch->link;
+ db_watchpoint_free(watch);
+ return;
+ }
+ }
+
+ db_printf("Not set.\n");
+}
+
+void
+db_list_watchpoints(void)
+{
+ db_watchpoint_t watch;
+ int task_id;
+
+ if (db_watchpoint_list == 0) {
+ db_printf("No watchpoints set\n");
+ return;
+ }
+
+ db_printf("Space Address Size\n");
+ for (watch = db_watchpoint_list; watch != 0; watch = watch->link) {
+ if (watch->task == TASK_NULL)
+ db_printf("kernel ");
+ else {
+ task_id = db_lookup_task(watch->task);
+ if (task_id < 0)
+ db_printf("%*X", 2*sizeof(vm_offset_t), watch->task);
+ else
+ db_printf("task%-3d ", task_id);
+ }
+ db_printf(" %*X %X\n", 2*sizeof(vm_offset_t), watch->loaddr,
+ watch->hiaddr - watch->loaddr);
+ }
+}
+
+static int
+db_get_task(const char *modif, task_t *taskp, db_addr_t addr)
+{
+ task_t task = TASK_NULL;
+ db_expr_t value;
+ boolean_t user_space;
+
+ user_space = db_option(modif, 'T');
+ if (user_space) {
+ if (db_expression(&value)) {
+ task = (task_t)value;
+ if (db_lookup_task(task) < 0) {
+ db_printf("bad task address %X\n", task);
+ return(-1);
+ }
+ } else {
+ task = db_default_task;
+ if (task == TASK_NULL) {
+ if ((task = db_current_task()) == TASK_NULL) {
+ db_printf("no task\n");
+ return(-1);
+ }
+ }
+ }
+ }
+ if (!DB_VALID_ADDRESS(addr, user_space)) {
+ db_printf("Address %#X is not in %s space\n", addr,
+ (user_space)? "user": "kernel");
+ return(-1);
+ }
+ *taskp = task;
+ return(0);
+}
+
+/* Delete watchpoint */
+/*ARGSUSED*/
+void
+db_deletewatch_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ task_t task;
+
+ if (db_get_task(modif, &task, addr) < 0)
+ return;
+ db_delete_watchpoint(task, addr);
+}
+
+/* Set watchpoint */
+/*ARGSUSED*/
+void
+db_watchpoint_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ vm_size_t size;
+ db_expr_t value;
+ task_t task;
+
+ if (db_get_task(modif, &task, addr) < 0)
+ return;
+ if (db_expression(&value))
+ size = (vm_size_t) value;
+ else
+ size = sizeof(int);
+ db_set_watchpoint(task, addr, size);
+}
+
+/* list watchpoints */
+void
+db_listwatch_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ db_list_watchpoints();
+}
+
+void
+db_set_watchpoints(void)
+{
+ db_watchpoint_t watch;
+ vm_map_t map;
+ unsigned hw_idx = 0;
+
+ if (!db_watchpoints_inserted) {
+ for (watch = db_watchpoint_list; watch != 0; watch = watch->link) {
+ if (db_set_hw_watchpoint(watch, hw_idx)) {
+ hw_idx++;
+ continue;
+ }
+ map = (watch->task)? watch->task->map: kernel_map;
+ pmap_protect(map->pmap,
+ trunc_page(watch->loaddr),
+ round_page(watch->hiaddr),
+ VM_PROT_READ);
+ }
+ db_watchpoints_inserted = TRUE;
+ }
+}
+
+void
+db_clear_watchpoints(void)
+{
+ unsigned hw_idx = 0;
+
+ while (db_clear_hw_watchpoint(hw_idx))
+ hw_idx++;
+
+ db_watchpoints_inserted = FALSE;
+}
+
+boolean_t
+db_find_watchpoint(
+ vm_map_t map,
+ db_addr_t addr,
+ db_regs_t *regs)
+{
+ db_watchpoint_t watch;
+ db_watchpoint_t found = 0;
+ task_t task_space;
+
+ task_space = (map == kernel_map)? TASK_NULL: db_current_task();
+ for (watch = db_watchpoint_list; watch != 0; watch = watch->link) {
+ if (watch->task == task_space) {
+ if ((watch->loaddr <= addr) && (addr < watch->hiaddr))
+ return (TRUE);
+ else if ((trunc_page(watch->loaddr) <= addr) &&
+ (addr < round_page(watch->hiaddr)))
+ found = watch;
+ }
+ }
+
+ /*
+ * We didn't hit exactly on a watchpoint, but we are
+ * in a protected region. We want to single-step
+ * and then re-protect.
+ */
+
+ if (found) {
+ db_watchpoints_inserted = FALSE;
+ db_single_step(regs, task_space);
+ }
+
+ return (FALSE);
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_watch.h b/ddb/db_watch.h
new file mode 100644
index 0000000..86f07fb
--- /dev/null
+++ b/ddb/db_watch.h
@@ -0,0 +1,80 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 10/90
+ */
+
+#if MACH_KDB
+
+#ifndef _DDB_DB_WATCH_
+#define _DDB_DB_WATCH_
+
+#include <mach/machine/vm_types.h>
+#include <kern/task.h>
+#include <machine/db_machdep.h>
+
+/*
+ * Watchpoint.
+ */
+
+typedef struct db_watchpoint {
+ task_t task; /* in this map */
+ db_addr_t loaddr; /* from this address */
+ db_addr_t hiaddr; /* to this address */
+ struct db_watchpoint *link; /* link in in-use or free chain */
+} *db_watchpoint_t;
+
+extern boolean_t db_find_watchpoint(vm_map_t map, db_addr_t addr,
+ db_regs_t *regs);
+extern void db_set_watchpoints(void);
+extern void db_clear_watchpoints(void);
+
+extern void db_set_watchpoint(const task_t task, db_addr_t addr, vm_size_t size);
+extern void db_delete_watchpoint(const task_t task, db_addr_t addr);
+extern void db_list_watchpoints(void);
+
+void db_listwatch_cmd(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_deletewatch_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+void db_watchpoint_cmd(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
+
+#endif /* _DDB_DB_WATCH_ */
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_write_cmd.c b/ddb/db_write_cmd.c
new file mode 100644
index 0000000..cfc2b70
--- /dev/null
+++ b/ddb/db_write_cmd.c
@@ -0,0 +1,111 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+
+#include <machine/db_machdep.h>
+
+#include <ddb/db_lex.h>
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_expr.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_write_cmd.h>
+
+
+
+/*
+ * Write to file.
+ */
+/*ARGSUSED*/
+void
+db_write_cmd(
+ db_expr_t address,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif)
+{
+ db_addr_t addr;
+ db_expr_t old_value;
+ db_expr_t new_value;
+ int size;
+ boolean_t wrote_one = FALSE;
+ boolean_t t_opt, u_opt;
+ thread_t thread;
+ task_t task;
+
+ addr = (db_addr_t) address;
+
+ size = db_size_option(modif, &u_opt, &t_opt);
+ if (t_opt)
+ {
+ if (!db_get_next_thread(&thread, 0))
+ return;
+ task = thread->task;
+ }
+ else
+ task = db_current_task();
+
+ /* if user space is not explicitly specified,
+ look in the kernel */
+ if (!u_opt)
+ task = TASK_NULL;
+
+ if (!DB_VALID_ADDRESS(addr, u_opt)) {
+ db_printf("Bad address %#*X\n", 2*sizeof(vm_offset_t), addr);
+ return;
+ }
+
+ while (db_expression(&new_value)) {
+ old_value = db_get_task_value(addr, size, FALSE, task);
+ db_task_printsym(addr, DB_STGY_ANY, task);
+ db_printf("\t\t%#*N\t=\t%#*N\n",
+ 2*sizeof(db_expr_t), old_value,
+ 2*sizeof(db_expr_t), new_value);
+ db_put_task_value(addr, size, new_value, task);
+ addr += size;
+
+ wrote_one = TRUE;
+ }
+
+ if (!wrote_one)
+ db_error("Nothing written.\n");
+
+ db_next = addr;
+ db_prev = addr - size;
+}
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_write_cmd.h b/ddb/db_write_cmd.h
new file mode 100644
index 0000000..3a1d057
--- /dev/null
+++ b/ddb/db_write_cmd.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DDB_DB_WRITE_CMD_H_
+#define _DDB_DB_WRITE_CMD_H_
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+/* Prototypes for functions exported by this module.
+ */
+
+void db_write_cmd(
+ db_expr_t address,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char * modif);
+
+#endif /* !_DDB_DB_WRITE_CMD_H_ */
diff --git a/ddb/nlist.h b/ddb/nlist.h
new file mode 100644
index 0000000..b948dfd
--- /dev/null
+++ b/ddb/nlist.h
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * nlist.h - symbol table entry structure for an a.out file
+ * derived from FSF's a.out.gnu.h
+ *
+ */
+
+#ifndef _DDB_NLIST_H_
+#define _DDB_NLIST_H_
+
+struct nlist {
+ union n_un {
+ char *n_name; /* symbol name */
+ long n_strx; /* index into file string table */
+ } n_un;
+ unsigned char n_type; /* type flag, i.e. N_TEXT etc; see below */
+ unsigned char n_other; /* machdep uses */
+ short n_desc; /* see <stab.h> */
+#if alpha
+ int n_pad; /* alignment, used to carry framesize info */
+#endif
+ vm_offset_t n_value; /* value of this symbol (or sdb offset) */
+};
+
+/*
+ * Simple values for n_type.
+ */
+#define N_UNDF 0 /* undefined */
+#define N_ABS 2 /* absolute */
+#define N_TEXT 4 /* text */
+#define N_DATA 6 /* data */
+#define N_BSS 8 /* bss */
+#define N_FN 0x1f /* file name symbol */
+#define N_EXT 1 /* external bit, or'ed in */
+#define N_TYPE 0x1e /* mask for all the type bits */
+#define N_STAB 0xe0 /* if any of these bits set, a SDB entry */
+
+
+#endif /* _DDB_NLIST_H_ */
diff --git a/ddb/stab.h b/ddb/stab.h
new file mode 100644
index 0000000..55e9d45
--- /dev/null
+++ b/ddb/stab.h
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stab.h 5.2 (Berkeley) 4/4/91
+ */
+
+#ifndef _DDB_STAB_H_
+#define _DDB_STAB_H_
+
+/*
+ * The following are symbols used by various debuggers and by the Pascal
+ * compiler. Each of them must have one (or more) of the bits defined by
+ * the N_STAB mask set.
+ */
+
+#define N_GSYM 0x20 /* global symbol */
+#define N_FNAME 0x22 /* F77 function name */
+#define N_FUN 0x24 /* procedure name */
+#define N_STSYM 0x26 /* data segment variable */
+#define N_LCSYM 0x28 /* bss segment variable */
+#define N_MAIN 0x2a /* main function name */
+#define N_PC 0x30 /* global Pascal symbol */
+#define N_FRAME 0x34 /* stack frame descriptor */
+#define N_RSYM 0x40 /* register variable */
+#define N_SLINE 0x44 /* text segment line number */
+#define N_DSLINE 0x46 /* data segment line number */
+#define N_BSLINE 0x48 /* bss segment line number */
+#define N_SSYM 0x60 /* structure/union element */
+#define N_SO 0x64 /* main source file name */
+#define N_LSYM 0x80 /* stack variable */
+#define N_BINCL 0x82 /* include file beginning */
+#define N_SOL 0x84 /* included source file name */
+#define N_PSYM 0xa0 /* parameter variable */
+#define N_EINCL 0xa2 /* include file end */
+#define N_ENTRY 0xa4 /* alternate entry point */
+#define N_LBRAC 0xc0 /* left bracket */
+#define N_EXCL 0xc2 /* deleted include file */
+#define N_RBRAC 0xe0 /* right bracket */
+#define N_BCOMM 0xe2 /* begin common */
+#define N_ECOMM 0xe4 /* end common */
+#define N_ECOML 0xe8 /* end common (local name) */
+#define N_LENG 0xfe /* length of preceding entry */
+
+#endif /* _DDB_STAB_H_ */
diff --git a/ddb/tr.h b/ddb/tr.h
new file mode 100644
index 0000000..2d058ca
--- /dev/null
+++ b/ddb/tr.h
@@ -0,0 +1,117 @@
+/*
+ * (c) Copyright 1992, 1993, 1994, 1995 OPEN SOFTWARE FOUNDATION, INC.
+ * ALL RIGHTS RESERVED
+ */
+/*
+ * OSF RI nmk19b2 5/2/95
+ */
+
+/*
+ * File: ddb/tr.h
+ * Author: Alan Langerman, Jeffrey Heller
+ * Date: 1992
+ *
+ * Internal trace routines. Like old-style XPRs but
+ * less formatting.
+ */
+
+#ifndef NDEBUG
+#define MACH_ASSERT 1
+#else
+#define MACH_ASSERT 0
+#endif
+
+#include <mach_tr.h>
+
+/*
+ * Originally, we only wanted tracing when
+ * MACH_TR and MACH_ASSERT were turned on
+ * together. Now, there's no reason why
+ * MACH_TR and MACH_ASSERT can't be completely
+ * orthogonal.
+ */
+#define TRACE_BUFFER (MACH_TR)
+
+/*
+ * Log events in a circular trace buffer for future debugging.
+ * Events are unsigned integers. Each event has a descriptive
+ * message.
+ *
+ * TR_DECL must be used at the beginning of a routine using
+ * one of the tr calls. The macro should be passed the name
+ * of the function surrounded by quotation marks, e.g.,
+ * TR_DECL("netipc_recv_intr");
+ * and should be terminated with a semi-colon. The TR_DECL
+ * must be the *last* declaration in the variable declaration
+ * list, or syntax errors will be introduced when TRACE_BUFFER
+ * is turned off.
+ */
+#ifndef _DDB_TR_H_
+#define _DDB_TR_H_
+
+#if TRACE_BUFFER
+
+#include <machine/db_machdep.h>
+
+#define __ui__ (unsigned int)
+#define TR_INIT() tr_init()
+#define TR_SHOW(a,b,c) show_tr((a),(b),(c))
+#define TR_DECL(funcname) char *__ntr_func_name__ = funcname
+#define tr1(msg) \
+ tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \
+ 0,0,0,0)
+#define tr2(msg,tag1) \
+ tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \
+ __ui__(tag1),0,0,0)
+#define tr3(msg,tag1,tag2) \
+ tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \
+ __ui__(tag1),__ui__(tag2),0,0)
+#define tr4(msg,tag1,tag2,tag3) \
+ tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \
+ __ui__(tag1),__ui__(tag2),__ui__(tag3),0)
+#define tr5(msg,tag1,tag2,tag3,tag4) \
+ tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \
+ __ui__(tag1),__ui__(tag2),__ui__(tag3),__ui__(tag4))
+
+/*
+ * Adjust tr log indentation based on function
+ * call graph; this method is quick-and-dirty
+ * and only works safely on a uniprocessor.
+ */
+extern int tr_indent;
+#define tr_start() tr_indent++
+#define tr_stop() tr_indent--
+
+extern void tr_init(void);
+extern void tr(
+ char *funcname,
+ char *file,
+ unsigned int lineno,
+ char *fmt,
+ unsigned int tag1,
+ unsigned int tag2,
+ unsigned int tag3,
+ unsigned int tag4);
+
+extern void db_show_tr(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+#else /* TRACE_BUFFER */
+
+#define TR_INIT()
+#define TR_SHOW(a,b,c)
+#define TR_DECL(funcname)
+#define tr1(msg)
+#define tr2(msg, tag1)
+#define tr3(msg, tag1, tag2)
+#define tr4(msg, tag1, tag2, tag3)
+#define tr5(msg, tag1, tag2, tag3, tag4)
+#define tr_start()
+#define tr_stop()
+
+#endif /* TRACE_BUFFER */
+
+#endif /* _DDB_TR_H_ */
diff --git a/device/blkio.c b/device/blkio.c
new file mode 100644
index 0000000..0dfa33c
--- /dev/null
+++ b/device/blkio.c
@@ -0,0 +1,66 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/89
+ *
+ * Block IO driven from generic kernel IO interface.
+ */
+#include <mach/kern_return.h>
+
+#include <device/blkio.h>
+#include <device/buf.h>
+#include <device/param.h>
+#include <device/device_types.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+
+
+/*
+ * 'standard' max_count routine. VM continuations mean that this
+ * code can cope with arbitrarily-sized write operations (they won't be
+ * atomic, but any caller that cares will do the op synchronously).
+ */
+#define MAX_PHYS (256 * 1024)
+
+void minphys(io_req_t ior)
+{
+ if ((ior->io_op & (IO_WRITE | IO_READ | IO_OPEN)) == IO_WRITE)
+ return;
+
+ if (ior->io_count > MAX_PHYS)
+ ior->io_count = MAX_PHYS;
+}
+
+/*
+ * Dummy routine placed in device switch entries to indicate that
+ * block device may be mapped.
+ */
+vm_offset_t block_io_mmap(dev_t dev, vm_offset_t off, int prot)
+{
+ return (0);
+}
+
diff --git a/device/blkio.h b/device/blkio.h
new file mode 100644
index 0000000..b188f38
--- /dev/null
+++ b/device/blkio.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DEVICE_BLKIO_H_
+#define _DEVICE_BLKIO_H_
+
+#include <sys/types.h>
+
+extern vm_offset_t block_io_mmap(dev_t dev, vm_offset_t off, int prot);
+
+#endif /* _DEVICE_BLKIO_H_ */
diff --git a/device/buf.h b/device/buf.h
new file mode 100644
index 0000000..7c8a436
--- /dev/null
+++ b/device/buf.h
@@ -0,0 +1,96 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/90
+ *
+ * Definitions to make new IO structures look like old ones
+ */
+
+#ifndef _DEVICE_BUF_H_
+#define _DEVICE_BUF_H_
+
+/*
+ * io_req and fields
+ */
+#include <device/io_req.h>
+
+#define buf io_req
+
+/*
+ * Redefine fields for drivers using old names
+ */
+#define b_flags io_op
+#define b_bcount io_count
+#define b_error io_error
+#define b_dev io_unit
+#define b_blkno io_recnum
+#define b_resid io_residual
+#define b_un io_un
+#define b_addr data
+#define av_forw io_next
+#define av_back io_prev
+#define b_physblock io_physrec
+#define b_blocktotal io_rectotal
+
+/*
+ * Redefine fields for driver request list heads, using old names.
+ */
+#define b_actf io_next
+#define b_actl io_prev
+#define b_forw io_link
+#define b_back io_rlink
+#define b_active io_count
+#define b_errcnt io_residual
+#define b_bufsize io_alloc_size
+
+/*
+ * Redefine flags
+ */
+#define B_WRITE IO_WRITE
+#define B_READ IO_READ
+#define B_OPEN IO_OPEN
+#define B_DONE IO_DONE
+#define B_ERROR IO_ERROR
+#define B_BUSY IO_BUSY
+#define B_WANTED IO_WANTED
+#define B_BAD IO_BAD
+#define B_CALL IO_CALL
+
+#define B_MD1 IO_SPARE_START
+
+/*
+ * Export standard minphys routine.
+ */
+extern void minphys(io_req_t);
+
+/*
+ * Alternate name for iodone
+ */
+#define biodone iodone
+#define biowait iowait
+
+#endif /* _DEVICE_BUF_H_ */
diff --git a/device/chario.c b/device/chario.c
new file mode 100644
index 0000000..3fe93cc
--- /dev/null
+++ b/device/chario.c
@@ -0,0 +1,1060 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/88
+ *
+ * TTY io.
+ * Compatibility with old TTY device drivers.
+ */
+
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/vm_param.h>
+#include <machine/machspl.h> /* spl definitions */
+
+#include <ipc/ipc_port.h>
+
+#include <kern/lock.h>
+#include <kern/queue.h>
+
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+
+#include <device/device_types.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+#include <device/device_reply.user.h>
+#include <device/chario.h>
+
+#include <device/tty.h>
+
+/* If you change these, check that tty_outq_size and tty_inq_size
+ * is greater than largest tthiwat entry.
+ */
+short tthiwat[NSPEEDS] =
+ { 100,100,100,100,100,100,100,200,200,400,400,400,650,650,1300,2000,
+ 2000,2000 };
+short ttlowat[NSPEEDS] =
+ { 30, 30, 30, 30, 30, 30, 30, 50, 50,120,120,120,125,125, 125, 125,
+ 125,125 };
+
+/*
+ * Fake 'line discipline' switch for the benefit of old code
+ * that wants to call through it.
+ */
+struct ldisc_switch linesw[] = {
+ {
+ char_read,
+ char_write,
+ ttyinput,
+ ttymodem,
+ tty_output
+ }
+};
+
+/*
+ * Sizes for input and output circular buffers.
+ */
+const unsigned int tty_inq_size = 4096; /* big nuf */
+const unsigned int tty_outq_size = 2048; /* Must be bigger that tthiwat */
+boolean_t pdma_default = TRUE; /* turn pseudo dma on by default */
+
+/*
+ * compute pseudo-dma tables
+ */
+
+int pdma_timeouts[NSPEEDS]; /* how many ticks in timeout */
+int pdma_water_mark[NSPEEDS];
+
+
+void chario_init(void)
+{
+ /* the basic idea with the timeouts is two allow enough
+ time for a character to show up if data is coming in at full data rate
+ plus a little slack. 2 ticks is considered slack
+ Below 300 baud we just glob a character at a time */
+#define _PR(x) ((hz/x) + 2)
+
+ int i;
+
+ for (i = B0; i < B300; i++)
+ pdma_timeouts[i] = 0;
+
+ pdma_timeouts[B300] = _PR(30);
+ pdma_timeouts[B600] = _PR(60);
+ pdma_timeouts[B1200] = _PR(120);
+ pdma_timeouts[B1800] = _PR(180);
+ pdma_timeouts[B2400] = _PR(240);
+ pdma_timeouts[B4800] = _PR(480);
+ pdma_timeouts[B9600] = _PR(960);
+ pdma_timeouts[EXTA] = _PR(1440); /* >14400 baud */
+ pdma_timeouts[EXTB] = _PR(1920); /* >19200 baud */
+ pdma_timeouts[B57600] = _PR(5760);
+ pdma_timeouts[B115200] = _PR(11520);
+
+ for (i = B0; i < B300; i++)
+ pdma_water_mark[i] = 0;
+
+ /* for the slow speeds, we try to buffer 0.02 of the baud rate
+ (20% of the character rate). For the faster lines,
+ we try to buffer 1/2 the input queue size */
+
+#undef _PR
+#define _PR(x) (0.20 * x)
+
+ pdma_water_mark[B300] = _PR(120);
+ pdma_water_mark[B600] = _PR(120);
+ pdma_water_mark[B1200] = _PR(120);
+ pdma_water_mark[B1800] = _PR(180);
+ pdma_water_mark[B2400] = _PR(240);
+ pdma_water_mark[B4800] = _PR(480);
+ i = tty_inq_size/2;
+ pdma_water_mark[B9600] = i;
+ pdma_water_mark[EXTA] = i; /* >14400 baud */
+ pdma_water_mark[EXTB] = i; /* >19200 baud */
+ pdma_water_mark[B57600] = i;
+ pdma_water_mark[B115200] = i;
+
+ return;
+}
+
+/*
+ * Open TTY, waiting for CARR_ON.
+ * No locks may be held.
+ * May run on any CPU.
+ */
+io_return_t char_open(
+ int dev,
+ struct tty * tp,
+ dev_mode_t mode,
+ io_req_t ior)
+{
+ spl_t s;
+ io_return_t rc = D_SUCCESS;
+
+ s = simple_lock_irq(&tp->t_lock);
+
+ tp->t_dev = dev;
+
+ if (tp->t_mctl)
+ (*tp->t_mctl)(tp, TM_DTR, DMSET);
+
+ if (pdma_default)
+ tp->t_state |= TS_MIN;
+
+ if ((tp->t_state & TS_CARR_ON) == 0) {
+ /*
+ * No carrier.
+ */
+ if (mode & D_NODELAY) {
+ tp->t_state |= TS_ONDELAY;
+ }
+ else {
+ /*
+ * Don`t return from open until carrier detected.
+ */
+ tp->t_state |= TS_WOPEN;
+
+ ior->io_dev_ptr = (char *)tp;
+
+ queue_delayed_reply(&tp->t_delayed_open, ior, char_open_done);
+ rc = D_IO_QUEUED;
+ goto out;
+ }
+ }
+ tp->t_state |= TS_ISOPEN;
+ if (tp->t_mctl)
+ (*tp->t_mctl)(tp, TM_RTS, DMBIS);
+out:
+ simple_unlock_irq(s, &tp->t_lock);
+ return rc;
+}
+
+/*
+ * Retry wait for CARR_ON for open.
+ * No locks may be held.
+ * May run on any CPU.
+ */
+boolean_t char_open_done(
+ io_req_t ior)
+{
+ struct tty *tp = (struct tty *)ior->io_dev_ptr;
+ spl_t s;
+
+ s = simple_lock_irq(&tp->t_lock);
+ if ((tp->t_state & TS_ISOPEN) == 0) {
+ queue_delayed_reply(&tp->t_delayed_open, ior, char_open_done);
+ simple_unlock_irq(s, &tp->t_lock);
+ return FALSE;
+ }
+
+ tp->t_state |= TS_ISOPEN;
+ tp->t_state &= ~TS_WOPEN;
+
+ if (tp->t_mctl)
+ (*tp->t_mctl)(tp, TM_RTS, DMBIS);
+
+ simple_unlock_irq(s, &tp->t_lock);
+
+ ior->io_error = D_SUCCESS;
+ (void) ds_open_done(ior);
+ return TRUE;
+}
+
+static boolean_t tty_close_open_reply(
+ io_req_t ior)
+{
+ ior->io_error = D_DEVICE_DOWN;
+ (void) ds_open_done(ior);
+ return TRUE;
+}
+
+/*
+ * Write to TTY.
+ * No locks may be held.
+ * Calls device start routine; must already be on master if
+ * device needs to run on master.
+ */
+io_return_t char_write(
+ struct tty * tp,
+ io_req_t ior)
+{
+ spl_t s;
+ int count;
+ char *data;
+ vm_offset_t addr = 0;
+ io_return_t rc = D_SUCCESS;
+
+ data = ior->io_data;
+ count = ior->io_count;
+ if (count == 0)
+ return rc;
+
+ if (!(ior->io_op & IO_INBAND)) {
+ /*
+ * Copy out-of-line data into kernel address space.
+ * Since data is copied as page list, it will be
+ * accessible.
+ */
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ kern_return_t kr;
+
+ kr = vm_map_copyout(device_io_map, &addr, copy);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ data = (char *) addr;
+ }
+
+ /*
+ * Check for tty operating.
+ */
+ s = simple_lock_irq(&tp->t_lock);
+
+ if ((tp->t_state & TS_CARR_ON) == 0) {
+
+ if ((tp->t_state & TS_ONDELAY) == 0) {
+ /*
+ * No delayed writes - tell caller that device is down
+ */
+ rc = D_IO_ERROR;
+ goto out;
+ }
+
+ if (ior->io_mode & D_NOWAIT) {
+ rc = D_WOULD_BLOCK;
+ goto out;
+ }
+ }
+
+ /*
+ * Copy data into the output buffer.
+ * Report the amount not copied.
+ */
+
+ ior->io_residual = b_to_q(data, count, &tp->t_outq);
+
+ /*
+ * Start hardware output.
+ */
+
+ tp->t_state &= ~TS_TTSTOP;
+ tty_output(tp);
+
+ if (tp->t_outq.c_cc > TTHIWAT(tp) ||
+ (tp->t_state & TS_CARR_ON) == 0) {
+
+ /*
+ * Do not send reply until some characters have been sent.
+ */
+ ior->io_dev_ptr = (char *)tp;
+ queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done);
+
+ rc = D_IO_QUEUED;
+ }
+out:
+ simple_unlock_irq(s, &tp->t_lock);
+
+ if (!(ior->io_op & IO_INBAND))
+ (void) vm_deallocate(device_io_map, addr, ior->io_count);
+ return rc;
+}
+
+/*
+ * Retry wait for output queue emptied, for write.
+ * No locks may be held.
+ * May run on any CPU.
+ */
+boolean_t char_write_done(
+ io_req_t ior)
+{
+ struct tty *tp = (struct tty *)ior->io_dev_ptr;
+ spl_t s;
+
+ s = simple_lock_irq(&tp->t_lock);
+ if (tp->t_outq.c_cc > TTHIWAT(tp) ||
+ (tp->t_state & TS_CARR_ON) == 0) {
+
+ queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done);
+ simple_unlock_irq(s, &tp->t_lock);
+ return FALSE;
+ }
+ simple_unlock_irq(s, &tp->t_lock);
+
+ if (IP_VALID(ior->io_reply_port)) {
+ (void) (*((ior->io_op & IO_INBAND) ?
+ ds_device_write_reply_inband :
+ ds_device_write_reply))(ior->io_reply_port,
+ ior->io_reply_port_type,
+ ior->io_error,
+ (int) (ior->io_total -
+ ior->io_residual));
+ }
+ mach_device_deallocate(ior->io_device);
+ return TRUE;
+}
+
+static boolean_t tty_close_write_reply(
+ io_req_t ior)
+{
+ ior->io_residual = ior->io_count;
+ ior->io_error = D_DEVICE_DOWN;
+ (void) ds_write_done(ior);
+ return TRUE;
+}
+
+/*
+ * Read from TTY.
+ * No locks may be held.
+ * May run on any CPU - does not talk to device driver.
+ */
+io_return_t char_read(
+ struct tty *tp,
+ io_req_t ior)
+{
+ spl_t s;
+ kern_return_t rc;
+
+ /*
+ * Allocate memory for read buffer.
+ */
+ rc = device_read_alloc(ior, (vm_size_t)ior->io_count);
+ if (rc != KERN_SUCCESS)
+ return rc;
+
+ s = simple_lock_irq(&tp->t_lock);
+ if ((tp->t_state & TS_CARR_ON) == 0) {
+
+ if ((tp->t_state & TS_ONDELAY) == 0) {
+ /*
+ * No delayed writes - tell caller that device is down
+ */
+ rc = D_IO_ERROR;
+ goto out;
+ }
+
+ if (ior->io_mode & D_NOWAIT) {
+ rc = D_WOULD_BLOCK;
+ goto out;
+ }
+
+ }
+
+ if (tp->t_inq.c_cc <= 0 ||
+ (tp->t_state & TS_CARR_ON) == 0) {
+
+ ior->io_dev_ptr = (char *)tp;
+ queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done);
+ rc = D_IO_QUEUED;
+ goto out;
+ }
+
+ ior->io_residual = ior->io_count - q_to_b(&tp->t_inq,
+ ior->io_data,
+ (int)ior->io_count);
+ if (tp->t_state & TS_RTS_DOWN) {
+ (*tp->t_mctl)(tp, TM_RTS, DMBIS);
+ tp->t_state &= ~TS_RTS_DOWN;
+ }
+
+ out:
+ simple_unlock_irq(s, &tp->t_lock);
+ return rc;
+}
+
+/*
+ * Retry wait for characters, for read.
+ * No locks may be held.
+ * May run on any CPU - does not talk to device driver.
+ */
+boolean_t char_read_done(
+ io_req_t ior)
+{
+ struct tty *tp = (struct tty *)ior->io_dev_ptr;
+ spl_t s;
+
+ s = simple_lock_irq(&tp->t_lock);
+
+ if (tp->t_inq.c_cc <= 0 ||
+ (tp->t_state & TS_CARR_ON) == 0) {
+
+ queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done);
+ simple_unlock_irq(s, &tp->t_lock);
+ return FALSE;
+ }
+
+ ior->io_residual = ior->io_count - q_to_b(&tp->t_inq,
+ ior->io_data,
+ (int)ior->io_count);
+ if (tp->t_state & TS_RTS_DOWN) {
+ (*tp->t_mctl)(tp, TM_RTS, DMBIS);
+ tp->t_state &= ~TS_RTS_DOWN;
+ }
+
+ simple_unlock_irq(s, &tp->t_lock);
+
+ (void) ds_read_done(ior);
+ return TRUE;
+}
+
+static boolean_t tty_close_read_reply(
+ io_req_t ior)
+{
+ ior->io_residual = ior->io_count;
+ ior->io_error = D_DEVICE_DOWN;
+ (void) ds_read_done(ior);
+ return TRUE;
+}
+
+/*
+ * Close the tty.
+ * Tty must be locked (at spltty).
+ * Iff modem control should run on master.
+ */
+void ttyclose(
+ struct tty *tp)
+{
+ io_req_t ior;
+
+ /*
+ * Flush the read and write queues. Signal
+ * the open queue so that those waiting for open
+ * to complete will see that the tty is closed.
+ */
+ while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_read)) != 0) {
+ ior->io_done = tty_close_read_reply;
+ iodone(ior);
+ }
+ while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_write)) != 0) {
+ ior->io_done = tty_close_write_reply;
+ iodone(ior);
+ }
+ while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_open)) != 0) {
+ ior->io_done = tty_close_open_reply;
+ iodone(ior);
+ }
+
+ /* Close down modem */
+ if (tp->t_mctl) {
+ (*tp->t_mctl)(tp, TM_BRK|TM_RTS, DMBIC);
+ if ((tp->t_state&(TS_HUPCLS|TS_WOPEN)) || (tp->t_state&TS_ISOPEN)==0)
+ (*tp->t_mctl)(tp, TM_HUP, DMSET);
+ }
+
+ /* only save buffering bit, and carrier */
+ tp->t_state = tp->t_state & (TS_MIN|TS_CARR_ON);
+}
+
+/*
+ * Port-death routine to clean up reply messages.
+ */
+static boolean_t
+tty_queue_clean(
+ queue_t q,
+ const ipc_port_t port,
+ boolean_t (*routine)(io_req_t) )
+{
+ io_req_t ior;
+
+ ior = (io_req_t)queue_first(q);
+ while (!queue_end(q, (queue_entry_t)ior)) {
+ if (ior->io_reply_port == port) {
+ remqueue(q, (queue_entry_t)ior);
+ ior->io_done = routine;
+ iodone(ior);
+ return TRUE;
+ }
+ ior = ior->io_next;
+ }
+ return FALSE;
+}
+
+/*
+ * Handle port-death (dead reply port) for tty.
+ * No locks may be held.
+ * May run on any CPU.
+ */
+boolean_t
+tty_portdeath(
+ struct tty * tp,
+ const ipc_port_t port)
+{
+ spl_t spl;
+ boolean_t result;
+
+ spl = simple_lock_irq(&tp->t_lock);
+
+ /*
+ * The queues may never have been initialized
+ */
+ if (tp->t_delayed_read.next == 0) {
+ result = FALSE;
+ }
+ else {
+ result =
+ tty_queue_clean(&tp->t_delayed_read, port,
+ tty_close_read_reply)
+ || tty_queue_clean(&tp->t_delayed_write, port,
+ tty_close_write_reply)
+ || tty_queue_clean(&tp->t_delayed_open, port,
+ tty_close_open_reply);
+ }
+ simple_unlock_irq(spl, &tp->t_lock);
+
+ return result;
+}
+
+/*
+ * Get TTY status.
+ * No locks may be held.
+ * May run on any CPU.
+ */
+io_return_t tty_get_status(
+ struct tty *tp,
+ dev_flavor_t flavor,
+ int * data, /* pointer to OUT array */
+ natural_t *count) /* out */
+{
+ spl_t s;
+
+ switch (flavor) {
+ case TTY_STATUS:
+ {
+ struct tty_status *tsp =
+ (struct tty_status *) data;
+
+ if (*count < TTY_STATUS_COUNT)
+ return (D_INVALID_OPERATION);
+
+ s = simple_lock_irq(&tp->t_lock);
+
+ tsp->tt_ispeed = tp->t_ispeed;
+ tsp->tt_ospeed = tp->t_ospeed;
+ tsp->tt_breakc = tp->t_breakc;
+ tsp->tt_flags = tp->t_flags;
+ if (tp->t_state & TS_HUPCLS)
+ tsp->tt_flags |= TF_HUPCLS;
+
+ simple_unlock_irq(s, &tp->t_lock);
+
+ *count = TTY_STATUS_COUNT;
+ break;
+
+ }
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+/*
+ * Set TTY status.
+ * No locks may be held.
+ * Calls device start or stop routines; must already be on master if
+ * device needs to run on master.
+ */
+io_return_t tty_set_status(
+ struct tty *tp,
+ dev_flavor_t flavor,
+ int * data,
+ natural_t count)
+{
+ int s;
+
+ switch (flavor) {
+ case TTY_FLUSH:
+ {
+ int flags;
+ if (count < TTY_FLUSH_COUNT)
+ return D_INVALID_OPERATION;
+
+ flags = *data;
+ if (flags == 0)
+ flags = D_READ | D_WRITE;
+
+ s = simple_lock_irq(&tp->t_lock);
+ tty_flush(tp, flags);
+ simple_unlock_irq(s, &tp->t_lock);
+
+ break;
+ }
+ case TTY_STOP:
+ /* stop output */
+ s = simple_lock_irq(&tp->t_lock);
+ if ((tp->t_state & TS_TTSTOP) == 0) {
+ tp->t_state |= TS_TTSTOP;
+ (*tp->t_stop)(tp, 0);
+ }
+ simple_unlock_irq(s, &tp->t_lock);
+ break;
+
+ case TTY_START:
+ /* start output */
+ s = simple_lock_irq(&tp->t_lock);
+ if (tp->t_state & TS_TTSTOP) {
+ tp->t_state &= ~TS_TTSTOP;
+ tty_output(tp);
+ }
+ simple_unlock_irq(s, &tp->t_lock);
+ break;
+
+ case TTY_STATUS:
+ /* set special characters and speed */
+ {
+ struct tty_status *tsp;
+
+ if (count < TTY_STATUS_COUNT)
+ return D_INVALID_OPERATION;
+
+ tsp = (struct tty_status *)data;
+
+ if (tsp->tt_ispeed < 0 ||
+ tsp->tt_ispeed >= NSPEEDS ||
+ tsp->tt_ospeed < 0 ||
+ tsp->tt_ospeed >= NSPEEDS)
+ {
+ return D_INVALID_OPERATION;
+ }
+
+ s = simple_lock_irq(&tp->t_lock);
+
+ tp->t_ispeed = tsp->tt_ispeed;
+ tp->t_ospeed = tsp->tt_ospeed;
+ tp->t_breakc = tsp->tt_breakc;
+ tp->t_flags = tsp->tt_flags & ~TF_HUPCLS;
+ if (tsp->tt_flags & TF_HUPCLS)
+ tp->t_state |= TS_HUPCLS;
+
+ simple_unlock_irq(s, &tp->t_lock);
+ break;
+ }
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+
+/*
+ * [internal]
+ * Queue IOR on reply queue, to wait for TTY operation.
+ * TTY must be locked (at spltty).
+ */
+void queue_delayed_reply(
+ queue_t qh,
+ io_req_t ior,
+ boolean_t (*io_done)(io_req_t) )
+{
+ ior->io_done = io_done;
+ enqueue_tail(qh, (queue_entry_t)ior);
+}
+
+/*
+ * Retry delayed IO operations for TTY.
+ * TTY containing queue must be locked (at spltty).
+ */
+void tty_queue_completion(
+ queue_t qh)
+{
+ io_req_t ior;
+
+ while ((ior = (io_req_t)dequeue_head(qh)) != 0) {
+ iodone(ior);
+ }
+}
+
+/*
+ * Set the default special characters.
+ * Since this routine is called whenever a tty has never been opened,
+ * we can initialize the queues here.
+ */
+void ttychars(
+ struct tty *tp)
+{
+ if ((tp->t_flags & TS_INIT) == 0) {
+ /*
+ * Initialize queues
+ */
+ queue_init(&tp->t_delayed_open);
+ queue_init(&tp->t_delayed_read);
+ queue_init(&tp->t_delayed_write);
+
+ /*
+ * Initialize character buffers
+ */
+ cb_alloc(&tp->t_inq, tty_inq_size);
+
+ /* if we might do modem flow control */
+ if (tp->t_mctl && tp->t_inq.c_hog > 30)
+ tp->t_inq.c_hog -= 30;
+
+ cb_alloc(&tp->t_outq, tty_outq_size);
+
+ /*
+ * Mark initialized
+ */
+ tp->t_state |= TS_INIT;
+ }
+
+ tp->t_breakc = 0;
+}
+
+/*
+ * Flush all TTY queues.
+ * Called at spltty, tty already locked.
+ * Calls device STOP routine; must already be on master if
+ * device needs to run on master.
+ */
+void tty_flush(
+ struct tty *tp,
+ int rw)
+{
+ if (rw & D_READ) {
+ cb_clear(&tp->t_inq);
+ tty_queue_completion(&tp->t_delayed_read);
+ }
+ if (rw & D_WRITE) {
+ tp->t_state &= ~TS_TTSTOP;
+ (*tp->t_stop)(tp, rw);
+ cb_clear(&tp->t_outq);
+ tty_queue_completion(&tp->t_delayed_write);
+ }
+}
+
+/*
+ * Restart character output after a delay timeout.
+ * Calls device start routine - must be on master CPU.
+ *
+ * Timeout routines are called only on master CPU.
+ * What if device runs on a different CPU?
+ */
+void ttrstrt(
+ struct tty *tp)
+{
+ spl_t s;
+
+ s = simple_lock_irq(&tp->t_lock);
+
+ tp->t_state &= ~TS_TIMEOUT;
+ ttstart (tp);
+
+ simple_unlock_irq(s, &tp->t_lock);
+}
+
+/*
+ * Start output on the typewriter. It is used from the top half
+ * after some characters have been put on the output queue,
+ * from the interrupt routine to transmit the next
+ * character, and after a timeout has finished.
+ *
+ * Called at spltty, tty already locked.
+ * Must be on master CPU if device runs on master.
+ */
+void ttstart(struct tty *tp)
+{
+ if ((tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) == 0) {
+ /*
+ * Start up the hardware again
+ */
+ (*tp->t_start)(tp);
+
+ /*
+ * Wake up those waiting for write completion.
+ */
+ if (tp->t_outq.c_cc <= TTLOWAT(tp))
+ tty_queue_completion(&tp->t_delayed_write);
+ }
+}
+
+/*
+ * Start character output, if the device is not busy or
+ * stopped or waiting for a timeout.
+ *
+ * Called at spltty, tty already locked.
+ * Must be on master CPU if device runs on master.
+ */
+void tty_output(
+ struct tty *tp)
+{
+ if ((tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) == 0) {
+ /*
+ * Not busy. Start output.
+ */
+ (*tp->t_start)(tp);
+
+ /*
+ * Wake up those waiting for write completion.
+ */
+ if (tp->t_outq.c_cc <= TTLOWAT(tp))
+ tty_queue_completion(&tp->t_delayed_write);
+ }
+}
+
+/*
+ * Send any buffered recvd chars up to user
+ */
+static void ttypush(void * _tp)
+{
+ struct tty *tp = _tp;
+ spl_t s;
+ int state;
+
+ s = simple_lock_irq(&tp->t_lock);
+
+ /*
+ The pdma timeout has gone off.
+ If no character has been received since the timeout
+ was set, push any pending characters up.
+ If any characters were received in the last interval
+ then just reset the timeout and the character received bit.
+ */
+
+ state = tp->t_state;
+
+ if (state & TS_MIN_TO)
+ {
+ if (state & TS_MIN_TO_RCV)
+ { /* a character was received */
+ tp->t_state = state & ~TS_MIN_TO_RCV;
+ timeout(ttypush, tp, pdma_timeouts[tp->t_ispeed]);
+ }
+ else
+ {
+ tp->t_state = state & ~TS_MIN_TO;
+ if (tp->t_inq.c_cc) /* pending characters */
+ tty_queue_completion(&tp->t_delayed_read);
+ }
+ }
+ else
+ {
+ tp->t_state = state & ~TS_MIN_TO_RCV;/* sanity */
+ }
+
+ simple_unlock_irq(s, &tp->t_lock);
+}
+
+/*
+ * Put input character on input queue.
+ *
+ * Called at spltty, tty already locked.
+ */
+void ttyinput(
+ unsigned int c,
+ struct tty *tp)
+{
+ if (tp->t_inq.c_cc >= tp->t_inq.c_hog) {
+ /*
+ * Do not want to overflow input queue
+ */
+ if (tp->t_mctl) {
+ (*tp->t_mctl)(tp, TM_RTS, DMBIC);
+ tp->t_state |= TS_RTS_DOWN;
+ }
+ tty_queue_completion(&tp->t_delayed_read);
+ return;
+
+ }
+
+ c &= 0xff;
+
+ (void) putc(c, &tp->t_inq);
+ if ((tp->t_state & TS_MIN) == 0 ||
+ tp->t_inq.c_cc > pdma_water_mark[tp->t_ispeed])
+ {
+ /*
+ * No input buffering, or input minimum exceeded.
+ * Grab a request from input queue and queue it
+ * to io_done thread.
+ */
+ if (tp->t_state & TS_MIN_TO) {
+ tp->t_state &= ~(TS_MIN_TO|TS_MIN_TO_RCV);
+ untimeout(ttypush, tp);
+ }
+ tty_queue_completion(&tp->t_delayed_read);
+ }
+ else {
+ /*
+ * Not enough characters.
+ * If no timeout is set, initiate the timeout
+ * Otherwise set the character received during timeout interval
+ * flag.
+ * One alternative approach would be just to reset the timeout
+ * into the future, but this involves making a timeout/untimeout
+ * call on every character.
+ */
+ int ptime = pdma_timeouts[tp->t_ispeed];
+ if (ptime > 0)
+ {
+ if ((tp->t_state & TS_MIN_TO) == 0)
+ {
+ tp->t_state |= TS_MIN_TO;
+ timeout(ttypush, tp, ptime);
+ }
+ else
+ {
+ tp->t_state |= TS_MIN_TO_RCV;
+ }
+ }
+ }
+}
+
+/*
+ * Put many characters on input queue.
+ *
+ * Called at spltty, tty already locked.
+ */
+void ttyinput_many(
+ struct tty *tp,
+ char *chars,
+ int count)
+{
+ /*
+ * Do not want to overflow input queue
+ */
+ if (tp->t_inq.c_cc < tp->t_inq.c_hog)
+ count -= b_to_q(chars, count, &tp->t_inq);
+
+ tty_queue_completion(&tp->t_delayed_read);
+}
+
+
+/*
+ * Handle modem control transition on a tty.
+ * Flag indicates new state of carrier.
+ * Returns FALSE if the line should be turned off.
+ *
+ * Called at spltty, tty already locked.
+ */
+boolean_t ttymodem(
+ struct tty * tp,
+ boolean_t carrier_up)
+{
+ if ((tp->t_state&TS_WOPEN) == 0 && (tp->t_flags & MDMBUF)) {
+ /*
+ * Flow control by carrier. Carrier down stops
+ * output; carrier up restarts output.
+ */
+ if (carrier_up) {
+ tp->t_state &= ~TS_TTSTOP;
+ tty_output(tp);
+ }
+ else if ((tp->t_state&TS_TTSTOP) == 0) {
+ tp->t_state |= TS_TTSTOP;
+ (*tp->t_stop)(tp, 0);
+ }
+ }
+ else if (carrier_up) {
+ /*
+ * Carrier now on.
+ */
+ tp->t_state |= TS_CARR_ON;
+ tt_open_wakeup(tp);
+ }
+ else {
+ /*
+ * Lost carrier.
+ */
+ tp->t_state &= ~TS_CARR_ON;
+ if (tp->t_state & TS_ISOPEN &&
+ (tp->t_flags & NOHANG) == 0)
+ {
+ /*
+ * Hang up TTY if carrier drops.
+ * Need to alert users, somehow...
+ */
+ tty_flush(tp, D_READ|D_WRITE);
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+/*
+ * Similarly, handle transitions on the ClearToSend
+ * signal. Nowadays, it is used by many modems as
+ * a flow-control device: they turn it down to stop
+ * us from sending more chars. We do the same with
+ * the RequestToSend signal. [Yes, that is exactly
+ * why those signals are defined in the standard.]
+ *
+ * Tty must be locked and on master.
+ */
+void
+tty_cts(
+ struct tty * tp,
+ boolean_t cts_up)
+{
+ if (tp->t_state & TS_ISOPEN){
+ if (cts_up) {
+ tp->t_state &= ~(TS_TTSTOP|TS_BUSY);
+ tty_output(tp);
+ } else {
+ tp->t_state |= (TS_TTSTOP|TS_BUSY);
+ (*tp->t_stop)(tp, D_WRITE);
+ }
+ }
+}
diff --git a/device/chario.h b/device/chario.h
new file mode 100644
index 0000000..52105a2
--- /dev/null
+++ b/device/chario.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DEVICE_CHARIO_H_
+#define _DEVICE_CHARIO_H_
+
+#include <device/tty.h>
+
+extern void chario_init(void);
+
+void queue_delayed_reply(
+ queue_t qh,
+ io_req_t ior,
+ boolean_t (*io_done)(io_req_t));
+
+void tty_output(struct tty *tp);
+
+boolean_t char_open_done(io_req_t);
+boolean_t char_read_done(io_req_t);
+boolean_t char_write_done(io_req_t);
+
+#endif /* _DEVICE_CHARIO_H_ */
diff --git a/device/cirbuf.c b/device/cirbuf.c
new file mode 100644
index 0000000..ed09f3d
--- /dev/null
+++ b/device/cirbuf.c
@@ -0,0 +1,277 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ *
+ * Circular buffers for TTY
+ */
+
+#include <string.h>
+#include <device/cirbuf.h>
+#include <kern/debug.h>
+#include <kern/kalloc.h>
+
+
+
+/* read at c_cf, write at c_cl */
+/* if c_cf == c_cl, buffer is empty */
+/* if c_cl == c_cf - 1, buffer is full */
+
+#if DEBUG
+#include <mach/boolean.h>
+
+boolean_t cb_check_enable = FALSE;
+#define CB_CHECK(cb) if (cb_check_enable) cb_check(cb)
+
+void
+cb_check(struct cirbuf *cb)
+{
+ if (!(cb->c_cf >= cb->c_start && cb->c_cf < cb->c_end))
+ panic("cf %p out of range [%p..%p)",
+ cb->c_cf, cb->c_start, cb->c_end);
+ if (!(cb->c_cl >= cb->c_start && cb->c_cl < cb->c_end))
+ panic("cl %p out of range [%p..%p)",
+ cb->c_cl, cb->c_start, cb->c_end);
+ if (cb->c_cf <= cb->c_cl) {
+ if (!(cb->c_cc == cb->c_cl - cb->c_cf))
+ panic("cc %x should be %x",
+ cb->c_cc,
+ cb->c_cl - cb->c_cf);
+ }
+ else {
+ if (!(cb->c_cc == cb->c_end - cb->c_cf
+ + cb->c_cl - cb->c_start))
+ panic("cc %x should be %x",
+ cb->c_cc,
+ cb->c_end - cb->c_cf +
+ cb->c_cl - cb->c_start);
+ }
+}
+#else /* DEBUG */
+#define CB_CHECK(cb)
+#endif /* DEBUG */
+
+/*
+ * Put one character in circular buffer.
+ */
+int putc(
+ int c,
+ struct cirbuf *cb)
+{
+ char *ow, *nw;
+
+ ow = cb->c_cl;
+ nw = ow+1;
+ if (nw == cb->c_end)
+ nw = cb->c_start;
+ if (nw == cb->c_cf)
+ return 1; /* not entered */
+ *ow = c;
+ cb->c_cl = nw;
+
+ cb->c_cc++;
+
+ CB_CHECK(cb);
+
+ return 0;
+}
+
+/*
+ * Get one character from circular buffer.
+ */
+int getc(struct cirbuf *cb)
+{
+ unsigned char *nr;
+ int c;
+
+ nr = (unsigned char *)cb->c_cf;
+ if (nr == (unsigned char *)cb->c_cl) {
+ CB_CHECK(cb);
+ return -1; /* empty */
+ }
+ c = *nr;
+ nr++;
+ if (nr == (unsigned char *)cb->c_end)
+ nr = (unsigned char *)cb->c_start;
+ cb->c_cf = (char *)nr;
+
+ cb->c_cc--;
+
+ CB_CHECK(cb);
+
+ return c;
+}
+
+/*
+ * Get lots of characters.
+ * Return number moved.
+ */
+int
+q_to_b( struct cirbuf *cb,
+ char *cp,
+ int count)
+{
+ char *ocp = cp;
+ int i;
+
+ while (count != 0) {
+ if (cb->c_cl == cb->c_cf)
+ break; /* empty */
+ if (cb->c_cl < cb->c_cf)
+ i = cb->c_end - cb->c_cf;
+ else
+ i = cb->c_cl - cb->c_cf;
+ if (i > count)
+ i = count;
+ memcpy(cp, cb->c_cf, i);
+ cp += i;
+ count -= i;
+ cb->c_cf += i;
+ cb->c_cc -= i;
+ if (cb->c_cf == cb->c_end)
+ cb->c_cf = cb->c_start;
+
+ CB_CHECK(cb);
+ }
+ CB_CHECK(cb);
+
+ return cp - ocp;
+}
+
+/*
+ * Add character array to buffer and return number of characters
+ * NOT entered.
+ */
+int
+b_to_q( char *cp,
+ int count,
+ struct cirbuf *cb)
+{
+ int i;
+ char *lim;
+
+ while (count != 0) {
+ lim = cb->c_cf - 1;
+ if (lim < cb->c_start)
+ lim = cb->c_end - 1;
+
+ if (cb->c_cl == lim)
+ break;
+ if (cb->c_cl < lim)
+ i = lim - cb->c_cl;
+ else
+ i = cb->c_end - cb->c_cl;
+
+ if (i > count)
+ i = count;
+ memcpy(cb->c_cl, cp, i);
+ cp += i;
+ count -= i;
+ cb->c_cc += i;
+ cb->c_cl += i;
+ if (cb->c_cl == cb->c_end)
+ cb->c_cl = cb->c_start;
+
+ CB_CHECK(cb);
+ }
+ CB_CHECK(cb);
+ return count;
+}
+
+/*
+ * Flush characters from circular buffer.
+ */
+void
+ndflush(struct cirbuf *cb,
+ int count)
+{
+ int i;
+
+ while (count != 0) {
+ if (cb->c_cl == cb->c_cf)
+ break; /* empty */
+ if (cb->c_cl < cb->c_cf)
+ i = cb->c_end - cb->c_cf;
+ else
+ i = cb->c_cl - cb->c_cf;
+ if (i > count)
+ i = count;
+ count -= i;
+ cb->c_cf += i;
+ cb->c_cc -= i;
+ if (cb->c_cf == cb->c_end)
+ cb->c_cf = cb->c_start;
+ CB_CHECK(cb);
+ }
+
+ CB_CHECK(cb);
+}
+
+/*
+ * Empty a circular buffer.
+ */
+void cb_clear(struct cirbuf *cb)
+{
+ cb->c_cf = cb->c_start;
+ cb->c_cl = cb->c_start;
+ cb->c_cc = 0;
+}
+
+/*
+ * Allocate character space for a circular buffer.
+ */
+void
+cb_alloc(
+ struct cirbuf *cb,
+ vm_size_t buf_size)
+{
+ char *buf;
+
+ buf = (char *)kalloc(buf_size);
+
+ cb->c_start = buf;
+ cb->c_end = buf + buf_size;
+ cb->c_cf = buf;
+ cb->c_cl = buf;
+ cb->c_cc = 0;
+ cb->c_hog = buf_size - 1;
+
+ CB_CHECK(cb);
+}
+
+/*
+ * Free character space for a circular buffer.
+ */
+void
+cb_free(struct cirbuf *cb)
+{
+ vm_size_t size;
+
+ size = cb->c_end - cb->c_start;
+ kfree((vm_offset_t)cb->c_start, size);
+}
+
diff --git a/device/cirbuf.h b/device/cirbuf.h
new file mode 100644
index 0000000..64771ce
--- /dev/null
+++ b/device/cirbuf.h
@@ -0,0 +1,61 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#ifndef _DEVICE_CIRBUF_H_
+#define _DEVICE_CIRBUF_H_
+
+/*
+ * Circular buffers for TTY
+ */
+
+struct cirbuf {
+ char * c_start; /* start of buffer */
+ char * c_end; /* end of buffer + 1*/
+ char * c_cf; /* read pointer */
+ char * c_cl; /* write pointer */
+ short c_cc; /* current number of characters
+ (compatibility) */
+ short c_hog; /* max ever */
+};
+
+/*
+ * Exported routines
+ */
+extern int putc(int, struct cirbuf *);
+extern int getc(struct cirbuf *);
+extern int q_to_b(struct cirbuf *, char *, int);
+extern int b_to_q(char *, int, struct cirbuf *);
+extern void ndflush(struct cirbuf *, int);
+extern void cb_clear(struct cirbuf *);
+
+extern void cb_alloc(struct cirbuf *, vm_size_t);
+extern void cb_free(struct cirbuf *);
+
+#endif /* _DEVICE_CIRBUF_H_ */
diff --git a/device/conf.h b/device/conf.h
new file mode 100644
index 0000000..8177966
--- /dev/null
+++ b/device/conf.h
@@ -0,0 +1,127 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/88
+ */
+
+#ifndef _DEVICE_CONF_H_
+#define _DEVICE_CONF_H_
+
+#include <mach/machine/vm_types.h>
+#include <sys/types.h>
+#include <mach/port.h>
+#include <mach/vm_prot.h>
+#include <device/device_types.h>
+#include <device/net_status.h>
+
+struct io_req;
+typedef struct io_req *io_req_t;
+
+typedef int io_return_t;
+
+/*
+ * Operations list for major device types.
+ */
+struct dev_ops {
+ char * d_name; /* name for major device */
+ int (*d_open)(dev_t, int, io_req_t);/* open device */
+ void (*d_close)(dev_t, int); /* close device */
+ int (*d_read)(dev_t, io_req_t); /* read */
+ int (*d_write)(dev_t, io_req_t); /* write */
+ int (*d_getstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t *); /* get status/control */
+ int (*d_setstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t); /* set status/control */
+ vm_offset_t (*d_mmap)(dev_t, vm_offset_t, vm_prot_t); /* map memory */
+ int (*d_async_in)(dev_t, const ipc_port_t, int, filter_t*, unsigned int); /* asynchronous input setup */
+ int (*d_reset)(dev_t); /* reset device */
+ int (*d_port_death)(dev_t, mach_port_t);
+ /* clean up reply ports */
+ int d_subdev; /* number of sub-devices per
+ unit */
+ int (*d_dev_info)(dev_t, int, int*); /* driver info for kernel */
+};
+typedef struct dev_ops *dev_ops_t;
+
+/*
+ * Routines for null entries.
+ */
+extern int nulldev_reset(dev_t dev);
+extern int nulldev_open(dev_t dev, int flag, io_req_t ior);
+extern void nulldev_close(dev_t dev, int flags);
+extern int nulldev_read(dev_t dev, io_req_t ior);
+extern int nulldev_write(dev_t dev, io_req_t ior);
+extern io_return_t nulldev_getstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count);
+extern io_return_t nulldev_setstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t count);
+extern io_return_t nulldev_portdeath(dev_t dev, mach_port_t port);
+extern int nodev_async_in(dev_t, const ipc_port_t, int, filter_t*, unsigned int); /* no operation - error */
+extern int nodev_info(dev_t, int, int*); /* no operation - error */
+extern vm_offset_t nomap(dev_t dev, vm_offset_t off, int prot); /* no operation - error */
+
+/*
+ * Flavor constants for d_dev_info routine
+ */
+#define D_INFO_BLOCK_SIZE 1
+
+/*
+ * Head of list of attached devices
+ */
+extern struct dev_ops dev_name_list[];
+extern int dev_name_count;
+
+/*
+ * Macro to search device list
+ */
+#define dev_search(dp) \
+ for (dp = dev_name_list; \
+ dp < &dev_name_list[dev_name_count]; \
+ dp++)
+
+/*
+ * Indirection vectors for certain devices.
+ */
+struct dev_indirect {
+ char * d_name; /* name for device */
+ dev_ops_t d_ops; /* operations (major device) */
+ int d_unit; /* and unit number */
+};
+typedef struct dev_indirect *dev_indirect_t;
+
+/*
+ * List of indirect devices.
+ */
+extern struct dev_indirect dev_indirect_list[];
+extern int dev_indirect_count;
+
+/*
+ * Macro to search indirect list
+ */
+#define dev_indirect_search(di) \
+ for (di = dev_indirect_list; \
+ di < &dev_indirect_list[dev_indirect_count]; \
+ di++)
+
+#endif /* _DEVICE_CONF_H_ */
+
diff --git a/device/cons.c b/device/cons.c
new file mode 100644
index 0000000..3f7cb9d
--- /dev/null
+++ b/device/cons.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 1988-1994, The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: cons.c 1.14 94/12/14$
+ */
+
+#include <string.h>
+#include <kern/debug.h>
+#include <sys/types.h>
+#include <device/conf.h>
+#include <mach/boolean.h>
+#include <device/cons.h>
+
+#ifdef MACH_KMSG
+#include <device/io_req.h>
+#include <device/kmsg.h>
+#endif /* MACH_KMSG */
+
+static boolean_t cn_inited = FALSE;
+static struct consdev *cn_tab = 0; /* physical console device info */
+
+/*
+ * ROM getc/putc primitives.
+ * On some architectures, the boot ROM provides basic character input/output
+ * routines that can be used before devices are configured or virtual memory
+ * is enabled. This can be useful to debug (or catch panics from) code early
+ * in the bootstrap procedure.
+ */
+int (*romgetc)(char c) = 0;
+void (*romputc)(char c) = 0;
+
+#if CONSBUFSIZE > 0
+/*
+ * Temporary buffer to store console output before a console is selected.
+ * This is statically allocated so it can be called before malloc/kmem_alloc
+ * have been initialized. It is initialized so it won't be clobbered as
+ * part of the zeroing of BSS (on PA/Mach).
+ */
+static char consbuf[CONSBUFSIZE] = { 0 };
+static char *consbp = consbuf;
+static boolean_t consbufused = FALSE;
+#endif /* CONSBUFSIZE > 0 */
+
+void
+cninit(void)
+{
+ struct consdev *cp;
+ dev_ops_t cn_ops;
+ int x;
+
+ if (cn_inited)
+ return;
+
+ /*
+ * Collect information about all possible consoles
+ * and find the one with highest priority
+ */
+ for (cp = constab; cp->cn_probe; cp++) {
+ (*cp->cn_probe)(cp);
+ if (cp->cn_pri > CN_DEAD &&
+ (cn_tab == NULL || cp->cn_pri > cn_tab->cn_pri))
+ cn_tab = cp;
+ }
+
+ /*
+ * Found a console, initialize it.
+ */
+ if ((cp = cn_tab)) {
+ /*
+ * Initialize as console
+ */
+ (*cp->cn_init)(cp);
+ /*
+ * Look up its dev_ops pointer in the device table and
+ * place it in the device indirection table.
+ */
+ if (dev_name_lookup(cp->cn_name, &cn_ops, &x) == FALSE)
+ panic("cninit: dev_name_lookup failed");
+ dev_set_indirection("console", cn_ops, minor(cp->cn_dev));
+#if CONSBUFSIZE > 0
+ /*
+ * Now that the console is initialized, dump any chars in
+ * the temporary console buffer.
+ */
+ if (consbufused) {
+ char *cbp = consbp;
+ do {
+ if (*cbp)
+ cnputc(*cbp);
+ if (++cbp == &consbuf[CONSBUFSIZE])
+ cbp = consbuf;
+ } while (cbp != consbp);
+ consbufused = FALSE;
+ }
+#endif /* CONSBUFSIZE > 0 */
+ cn_inited = TRUE;
+ return;
+ }
+ /*
+ * No console device found, not a problem for BSD, fatal for Mach
+ */
+ panic("can't find a console device");
+}
+
+
+int
+cngetc(void)
+{
+ if (cn_tab)
+ return ((*cn_tab->cn_getc)(cn_tab->cn_dev, 1));
+ if (romgetc)
+ return ((*romgetc)(1));
+ return (0);
+}
+
+int
+cnmaygetc(void)
+{
+ if (cn_tab)
+ return((*cn_tab->cn_getc)(cn_tab->cn_dev, 0));
+ if (romgetc)
+ return ((*romgetc)(0));
+ return (0);
+}
+
+void
+cnputc(char c)
+{
+ if (c == 0)
+ return;
+
+#ifdef MACH_KMSG
+ /* XXX: Assume that All output routines always use cnputc. */
+ kmsg_putchar (c);
+#endif
+
+#if defined(MACH_HYP) && 0
+ {
+ /* Also output on hypervisor's emergency console, for
+ * debugging */
+ unsigned char d = c;
+ hyp_console_write(&d, 1);
+ }
+#endif /* MACH_HYP */
+
+ if (cn_tab) {
+ (*cn_tab->cn_putc)(cn_tab->cn_dev, c);
+ if (c == '\n')
+ (*cn_tab->cn_putc)(cn_tab->cn_dev, '\r');
+ } else if (romputc) {
+ (*romputc)(c);
+ if (c == '\n')
+ (*romputc)('\r');
+ }
+#if CONSBUFSIZE > 0
+ else {
+ if (consbufused == FALSE) {
+ consbp = consbuf;
+ consbufused = TRUE;
+ memset(consbuf, 0, CONSBUFSIZE);
+ }
+ *consbp++ = c;
+ if (consbp >= &consbuf[CONSBUFSIZE])
+ consbp = consbuf;
+ }
+#endif /* CONSBUFSIZE > 0 */
+}
diff --git a/device/cons.h b/device/cons.h
new file mode 100644
index 0000000..34f3bc5
--- /dev/null
+++ b/device/cons.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1988-1994, The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: cons.h 1.10 94/12/14$
+ */
+
+#ifndef _DEVICE_CONS_H
+#define _DEVICE_CONS_H
+#include <sys/types.h>
+
+struct consdev {
+ char *cn_name; /* name of device in dev_name_list */
+ int (*cn_probe)(struct consdev *cp); /* probe hardware and fill in consdev info */
+ int (*cn_init)(struct consdev *cp); /* turn on as console */
+ int (*cn_getc)(dev_t dev, int wait); /* kernel getchar interface */
+ int (*cn_putc)(dev_t dev, int c); /* kernel putchar interface */
+ dev_t cn_dev; /* major/minor of device */
+ short cn_pri; /* pecking order; the higher the better */
+};
+
+/* values for cn_pri - reflect our policy for console selection */
+#define CN_DEAD 0 /* device doesn't exist */
+#define CN_NORMAL 1 /* device exists but is nothing special */
+#define CN_INTERNAL 2 /* "internal" bit-mapped display */
+#define CN_REMOTE 3 /* serial interface with remote bit set */
+
+#define CONSBUFSIZE 1024
+
+#ifdef KERNEL
+extern struct consdev constab[];
+#endif
+
+extern void cninit(void);
+
+extern int cngetc(void);
+
+extern int cnmaygetc(void);
+
+extern void cnputc(char);
+
+/*
+ * ROM getc/putc primitives.
+ * On some architectures, the boot ROM provides basic character input/output
+ * routines that can be used before devices are configured or virtual memory
+ * is enabled. This can be useful to debug (or catch panics from) code early
+ * in the bootstrap procedure.
+ */
+extern int (*romgetc)(char c);
+extern void (*romputc)(char c);
+
+#endif /* _DEVICE_CONS_H */
diff --git a/device/dev_hdr.h b/device/dev_hdr.h
new file mode 100644
index 0000000..ac6ce7e
--- /dev/null
+++ b/device/dev_hdr.h
@@ -0,0 +1,160 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ */
+
+/*
+ * Mach device emulation definitions (i386at version).
+ *
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#ifndef _DEVICE_DEV_HDR_H_
+#define _DEVICE_DEV_HDR_H_
+
+#include <ipc/ipc_types.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+
+typedef struct dev_ops *dev_ops_t;
+
+/* This structure is associated with each open device port.
+ The port representing the device points to this structure. */
+struct device
+{
+ struct device_emulation_ops *emul_ops;
+ void *emul_data;
+};
+
+typedef struct device *device_t;
+
+#define DEVICE_NULL ((device_t) 0)
+
+/*
+ * Generic device header. May be allocated with the device,
+ * or built when the device is opened.
+ */
+struct mach_device {
+ decl_simple_lock_data(,ref_lock)/* lock for reference count */
+ int ref_count; /* reference count */
+ decl_simple_lock_data(, lock) /* lock for rest of state */
+ short state; /* state: */
+#define DEV_STATE_INIT 0 /* not open */
+#define DEV_STATE_OPENING 1 /* being opened */
+#define DEV_STATE_OPEN 2 /* open */
+#define DEV_STATE_CLOSING 3 /* being closed */
+ short flag; /* random flags: */
+#define D_EXCL_OPEN 0x0001 /* open only once */
+ short open_count; /* number of times open */
+ short io_in_progress; /* number of IOs in progress */
+ boolean_t io_wait; /* someone waiting for IO to finish */
+
+ struct ipc_port *port; /* open port */
+ queue_chain_t number_chain; /* chain for lookup by number */
+ int dev_number; /* device number */
+ int bsize; /* replacement for DEV_BSIZE */
+ struct dev_ops *dev_ops; /* and operations vector */
+ struct device dev; /* the real device structure */
+};
+typedef struct mach_device *mach_device_t;
+#define MACH_DEVICE_NULL ((mach_device_t)0)
+
+/*
+ * To find and remove device entries
+ */
+mach_device_t device_lookup(const char *); /* by name */
+
+void mach_device_reference(mach_device_t);
+void mach_device_deallocate(mach_device_t);
+
+/*
+ * To find and remove port-to-device mappings
+ */
+device_t dev_port_lookup(ipc_port_t);
+void dev_port_enter(mach_device_t);
+void dev_port_remove(mach_device_t);
+
+typedef boolean_t (*dev_map_fn)(mach_device_t, mach_port_t);
+
+/*
+ * To call a routine on each device
+ */
+boolean_t dev_map(dev_map_fn, mach_port_t);
+
+/*
+ * To lock and unlock state and open-count
+ */
+#define device_lock(device) simple_lock(&(device)->lock)
+#define device_unlock(device) simple_unlock(&(device)->lock)
+
+/*
+ * device name lookup
+ */
+extern boolean_t dev_name_lookup(
+ const char * name,
+ dev_ops_t *ops, /* out */
+ int *unit); /* out */
+
+/*
+ * Change an entry in the indirection list.
+ */
+extern void dev_set_indirection(
+ const char *name,
+ dev_ops_t ops,
+ int unit);
+
+/*
+ * compare device name
+ */
+extern boolean_t __attribute__ ((pure))
+name_equal(
+ const char *src,
+ int len,
+ const char *target);
+
+#endif /* _DEVICE_DEV_HDR_H_ */
diff --git a/device/dev_lookup.c b/device/dev_lookup.c
new file mode 100644
index 0000000..c9c39f8
--- /dev/null
+++ b/device/dev_lookup.c
@@ -0,0 +1,364 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ */
+
+#include <mach/port.h>
+#include <mach/vm_param.h>
+
+#include <kern/queue.h>
+#include <kern/slab.h>
+
+#include <device/device_types.h>
+#include <device/dev_hdr.h>
+#include <device/conf.h>
+#include <device/param.h> /* DEV_BSIZE, as default */
+
+#include <ipc/ipc_port.h>
+#include <kern/ipc_kobject.h>
+
+#include <device/device_emul.h>
+#include <device/ds_routines.h>
+
+/*
+ * Device structure routines: reference counting, port->device.
+ */
+
+/*
+ * Lookup/enter by device number.
+ */
+#define NDEVHASH 8
+#define DEV_NUMBER_HASH(dev) ((dev) & (NDEVHASH-1))
+queue_head_t dev_number_hash_table[NDEVHASH];
+
+/*
+ * Lock for device-number to device lookup.
+ * Must be held before device-ref_count lock.
+ */
+def_simple_lock_data(static, dev_number_lock)
+
+struct kmem_cache dev_hdr_cache;
+
+/*
+ * Enter device in the number lookup table.
+ * The number table lock must be held.
+ */
+static void
+dev_number_enter(const mach_device_t device)
+{
+ queue_t q;
+
+ q = &dev_number_hash_table[DEV_NUMBER_HASH(device->dev_number)];
+ queue_enter(q, device, mach_device_t, number_chain);
+}
+
+/*
+ * Remove device from the device-number lookup table.
+ * The device-number table lock must be held.
+ */
+static void
+dev_number_remove(const mach_device_t device)
+{
+ queue_t q;
+
+ q = &dev_number_hash_table[DEV_NUMBER_HASH(device->dev_number)];
+ queue_remove(q, device, mach_device_t, number_chain);
+}
+
+/*
+ * Lookup a device by device operations and minor number.
+ * The number table lock must be held.
+ */
+static mach_device_t
+dev_number_lookup(const dev_ops_t ops, int devnum)
+{
+ queue_t q;
+ mach_device_t device;
+
+ q = &dev_number_hash_table[DEV_NUMBER_HASH(devnum)];
+ queue_iterate(q, device, mach_device_t, number_chain) {
+ if (device->dev_ops == ops && device->dev_number == devnum) {
+ return (device);
+ }
+ }
+ return (MACH_DEVICE_NULL);
+}
+
+/*
+ * Look up a device by name, and create the device structure
+ * if it does not exist. Enter it in the dev_number lookup
+ * table.
+ */
+mach_device_t
+device_lookup(const char *name)
+{
+ dev_ops_t dev_ops;
+ int dev_minor;
+ mach_device_t device;
+ mach_device_t new_device;
+
+ /*
+ * Get the device and unit number from the name.
+ */
+ if (!dev_name_lookup(name, &dev_ops, &dev_minor))
+ return (MACH_DEVICE_NULL);
+
+ /*
+ * Look up the device in the hash table. If it is
+ * not there, enter it.
+ */
+ new_device = MACH_DEVICE_NULL;
+ simple_lock(&dev_number_lock);
+ while ((device = dev_number_lookup(dev_ops, dev_minor))
+ == MACH_DEVICE_NULL) {
+ /*
+ * Must unlock to allocate the structure. If
+ * the structure has appeared after we have allocated,
+ * release the new structure.
+ */
+ if (new_device != MACH_DEVICE_NULL)
+ break; /* allocated */
+
+ simple_unlock(&dev_number_lock);
+
+ new_device = (mach_device_t) kmem_cache_alloc(&dev_hdr_cache);
+ simple_lock_init(&new_device->ref_lock);
+ new_device->ref_count = 1;
+ simple_lock_init(&new_device->lock);
+ new_device->state = DEV_STATE_INIT;
+ new_device->flag = 0;
+ new_device->open_count = 0;
+ new_device->io_in_progress = 0;
+ new_device->io_wait = FALSE;
+ new_device->port = IP_NULL;
+ new_device->dev_ops = dev_ops;
+ new_device->dev_number = dev_minor;
+ new_device->bsize = DEV_BSIZE; /* change later */
+
+ simple_lock(&dev_number_lock);
+ }
+
+ if (device == MACH_DEVICE_NULL) {
+ /*
+ * No existing device structure. Insert the
+ * new one.
+ */
+ assert(new_device != MACH_DEVICE_NULL);
+ device = new_device;
+
+ dev_number_enter(device);
+ simple_unlock(&dev_number_lock);
+ }
+ else {
+ /*
+ * Have existing device.
+ */
+ mach_device_reference(device);
+ simple_unlock(&dev_number_lock);
+
+ if (new_device != MACH_DEVICE_NULL)
+ kmem_cache_free(&dev_hdr_cache, (vm_offset_t)new_device);
+ }
+
+ return (device);
+}
+
+/*
+ * Add a reference to the device.
+ */
+void
+mach_device_reference(mach_device_t device)
+{
+ simple_lock(&device->ref_lock);
+ device->ref_count++;
+ simple_unlock(&device->ref_lock);
+}
+
+/*
+ * Remove a reference to the device, and deallocate the
+ * structure if no references are left.
+ */
+void
+mach_device_deallocate(mach_device_t device)
+{
+ simple_lock(&device->ref_lock);
+ if (--device->ref_count > 0) {
+ simple_unlock(&device->ref_lock);
+ return;
+ }
+ device->ref_count = 1;
+ simple_unlock(&device->ref_lock);
+
+ simple_lock(&dev_number_lock);
+ simple_lock(&device->ref_lock);
+ if (--device->ref_count > 0) {
+ simple_unlock(&device->ref_lock);
+ simple_unlock(&dev_number_lock);
+ return;
+ }
+
+ dev_number_remove(device);
+ simple_unlock(&device->ref_lock);
+ simple_unlock(&dev_number_lock);
+
+ kmem_cache_free(&dev_hdr_cache, (vm_offset_t)device);
+}
+
+/*
+
+ */
+/*
+ * port-to-device lookup routines.
+ */
+
+/*
+ * Enter a port-to-device mapping.
+ */
+void
+dev_port_enter(mach_device_t device)
+{
+ mach_device_reference(device);
+
+ ipc_kobject_set(device->port,
+ (ipc_kobject_t) &device->dev, IKOT_DEVICE);
+ device->dev.emul_data = device;
+ {
+ extern struct device_emulation_ops mach_device_emulation_ops;
+
+ device->dev.emul_ops = &mach_device_emulation_ops;
+ }
+}
+
+/*
+ * Remove a port-to-device mapping.
+ */
+void
+dev_port_remove(mach_device_t device)
+{
+ ipc_kobject_set(device->port, IKO_NULL, IKOT_NONE);
+ mach_device_deallocate(device);
+}
+
+/*
+ * Lookup a device by its port.
+ * Doesn't consume the naked send right; produces a device reference.
+ */
+device_t
+dev_port_lookup(ipc_port_t port)
+{
+ device_t device;
+
+ if (!IP_VALID(port))
+ return (DEVICE_NULL);
+
+ ip_lock(port);
+ if (ip_active(port) && (ip_kotype(port) == IKOT_DEVICE)) {
+ device = (device_t) port->ip_kobject;
+ if (device->emul_ops->reference)
+ (*device->emul_ops->reference)(device->emul_data);
+ }
+ else
+ device = DEVICE_NULL;
+
+ ip_unlock(port);
+ return (device);
+}
+
+/*
+ * Get the port for a device.
+ * Consumes a device reference; produces a naked send right.
+ */
+ipc_port_t
+convert_device_to_port(const device_t device)
+{
+ if (device == DEVICE_NULL)
+ return IP_NULL;
+
+ return (*device->emul_ops->dev_to_port) (device->emul_data);
+}
+
+/*
+ * Call a supplied routine on each device, passing it
+ * the port as an argument. If the routine returns TRUE,
+ * stop the search and return TRUE. If none returns TRUE,
+ * return FALSE.
+ */
+boolean_t
+dev_map(
+ dev_map_fn routine,
+ mach_port_t port)
+{
+ int i;
+ queue_t q;
+ mach_device_t dev, prev_dev;
+
+ for (i = 0, q = &dev_number_hash_table[0];
+ i < NDEVHASH;
+ i++, q++) {
+ prev_dev = MACH_DEVICE_NULL;
+ simple_lock(&dev_number_lock);
+ queue_iterate(q, dev, mach_device_t, number_chain) {
+ mach_device_reference(dev);
+ simple_unlock(&dev_number_lock);
+ if (prev_dev != MACH_DEVICE_NULL)
+ mach_device_deallocate(prev_dev);
+
+ if ((*routine)(dev, port)) {
+ /*
+ * Done
+ */
+ mach_device_deallocate(dev);
+ return (TRUE);
+ }
+
+ simple_lock(&dev_number_lock);
+ prev_dev = dev;
+ }
+ simple_unlock(&dev_number_lock);
+ if (prev_dev != MACH_DEVICE_NULL)
+ mach_device_deallocate(prev_dev);
+ }
+ return (FALSE);
+}
+
+/*
+ * Initialization
+ */
+void
+dev_lookup_init(void)
+{
+ int i;
+
+ simple_lock_init(&dev_number_lock);
+
+ for (i = 0; i < NDEVHASH; i++)
+ queue_init(&dev_number_hash_table[i]);
+
+ kmem_cache_init(&dev_hdr_cache, "mach_device",
+ sizeof(struct mach_device), 0, NULL, 0);
+}
diff --git a/device/dev_master.h b/device/dev_master.h
new file mode 100644
index 0000000..70d4c63
--- /dev/null
+++ b/device/dev_master.h
@@ -0,0 +1,65 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 11/89
+ *
+ * Bind an IO operation to the master CPU.
+ */
+
+#ifndef _DEVICE_DEV_MASTER_H_
+#define _DEVICE_DEV_MASTER_H_
+
+#include <cpus.h>
+
+#if NCPUS > 1
+
+#include <kern/macros.h>
+#include <kern/cpu_number.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+#include <kern/processor.h>
+
+#define io_grab_master() \
+ MACRO_BEGIN \
+ thread_bind(current_thread(), master_processor); \
+ if (current_processor() != master_processor) \
+ thread_block((void (*)()) 0); \
+ MACRO_END
+
+#define io_release_master() \
+ MACRO_BEGIN \
+ thread_bind(current_thread(), PROCESSOR_NULL); \
+ MACRO_END
+
+#else NCPUS > 1
+
+#define io_grab_master()
+#define io_release_master()
+
+#endif NCPUS > 1
+
+#endif /* _DEVICE_DEV_MASTER_H_ */
diff --git a/device/dev_name.c b/device/dev_name.c
new file mode 100644
index 0000000..abd525c
--- /dev/null
+++ b/device/dev_name.c
@@ -0,0 +1,242 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ */
+
+#include <kern/printf.h>
+#include <string.h>
+#include <device/device_types.h>
+#include <device/dev_hdr.h>
+#include <device/conf.h>
+
+
+
+/*
+ * Routines placed in empty entries in the device tables
+ */
+int nulldev_reset(dev_t dev)
+{
+ return (D_SUCCESS);
+}
+
+int nulldev_open(dev_t dev, int flags, io_req_t ior)
+{
+ return (D_SUCCESS);
+}
+
+void nulldev_close(dev_t dev, int flags)
+{
+}
+
+int nulldev_read(dev_t dev, io_req_t ior)
+{
+ return (D_SUCCESS);
+}
+
+int nulldev_write(dev_t dev, io_req_t ior)
+{
+ return (D_SUCCESS);
+}
+
+io_return_t nulldev_getstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count)
+{
+ return (D_SUCCESS);
+}
+
+io_return_t nulldev_setstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t count)
+{
+ return (D_SUCCESS);
+}
+
+int nulldev_portdeath(dev_t dev, mach_port_t port)
+{
+ return (D_SUCCESS);
+}
+
+int nodev_async_in(dev_t dev, const ipc_port_t port, int x, filter_t* filter, unsigned int j)
+{
+ return (D_INVALID_OPERATION);
+}
+
+int nodev_info(dev_t dev, int a, int* b)
+{
+ return (D_INVALID_OPERATION);
+}
+
+vm_offset_t
+nomap(dev_t dev, vm_offset_t off, int prot)
+{
+ return -1;
+}
+
+/*
+ * Name comparison routine.
+ * Compares first 'len' characters of 'src'
+ * with 'target', which is zero-terminated.
+ * Returns TRUE if strings are equal:
+ * src and target are equal in first 'len' characters
+ * next character of target is 0 (end of string).
+ */
+boolean_t __attribute__ ((pure))
+name_equal(const char *src,
+ int len,
+ const char *target)
+{
+ while (--len >= 0)
+ if (*src++ != *target++)
+ return FALSE;
+ return *target == 0;
+}
+
+/*
+ * device name lookup
+ */
+boolean_t dev_name_lookup(
+ const char *name,
+ dev_ops_t *ops, /* out */
+ int *unit) /* out */
+{
+ /*
+ * Assume that block device names are of the form
+ *
+ * <device_name><unit_number>[[<slice num>]<partition>]
+ *
+ * where
+ * <device_name> is the name in the device table
+ * <unit_number> is an integer
+ * <slice num> * is 's' followed by a number (disks only!)
+ * <partition> is a letter in [a-h] (disks only?)
+ */
+
+ const char *cp = name;
+ int len;
+ int j = 0;
+ int c;
+ dev_ops_t dev;
+ boolean_t found;
+
+ int slice_num = 0;
+
+ /*
+ * Find device type name (characters before digit)
+ */
+ while ((c = *cp) != '\0' &&
+ !(c >= '0' && c <= '9'))
+ cp++;
+
+ len = cp - name;
+ if (c != '\0') {
+ /*
+ * Find unit number
+ */
+ while ((c = *cp) != '\0' &&
+ c >= '0' && c <= '9') {
+ j = j * 10 + (c - '0');
+ cp++;
+ }
+ }
+
+ found = FALSE;
+ dev_search(dev) {
+ if (name_equal(name, len, dev->d_name)) {
+ found = TRUE;
+ break;
+ }
+ }
+ if (!found) {
+ /* name not found - try indirection list */
+ dev_indirect_t di;
+
+ dev_indirect_search(di) {
+ if (name_equal(name, len, di->d_name)) {
+ /*
+ * Return device and unit from indirect vector.
+ */
+ *ops = di->d_ops;
+ *unit = di->d_unit;
+ return (TRUE);
+ }
+ }
+ /* Not found in either list. */
+ return (FALSE);
+ }
+
+ *ops = dev;
+ *unit = j;
+
+ /*
+ * Find sub-device number
+ */
+
+ j = dev->d_subdev;
+ if (j > 0) {
+ /* if no slice string, slice num = 0 */
+
+ /* <subdev_count>*unit + <slice_number>*16 -- I know it's bad */
+ *unit *= j;
+
+ /* find slice ? */
+ if (c == 's') {
+ cp++;
+ while ((c = *cp) != '\0' &&
+ c >= '0' && c <= '9') {
+ slice_num = slice_num * 10 + (c - '0');
+ cp++;
+ }
+ }
+
+ *unit += (slice_num << 4);
+ /* if slice==0, it is either compatibility or whole device */
+
+ if (c >= 'a' && c < 'a' + j) { /* note: w/o this -> whole slice */
+ /*
+ * Minor number is <subdev_count>*unit + letter.
+ * NOW it is slice result + letter
+ */
+ *unit += (c - 'a' +1);
+ }
+ }
+ return (TRUE);
+}
+
+/*
+ * Change an entry in the indirection list.
+ */
+void
+dev_set_indirection(const char *name, dev_ops_t ops, int unit)
+{
+ dev_indirect_t di;
+
+ dev_indirect_search(di) {
+ if (!strcmp(di->d_name, name)) {
+ di->d_ops = ops;
+ di->d_unit = unit;
+ break;
+ }
+ }
+}
diff --git a/device/dev_pager.c b/device/dev_pager.c
new file mode 100644
index 0000000..1cd7406
--- /dev/null
+++ b/device/dev_pager.c
@@ -0,0 +1,662 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ *
+ * Device pager.
+ */
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/std_types.h>
+#include <mach/mach_types.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <kern/queue.h>
+#include <kern/slab.h>
+
+#include <vm/vm_page.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+
+#include <device/device_pager.server.h>
+#include <device/device_types.h>
+#include <device/ds_routines.h>
+#include <device/dev_hdr.h>
+#include <device/io_req.h>
+#include <device/memory_object_reply.user.h>
+#include <device/dev_pager.h>
+#include <device/blkio.h>
+#include <device/conf.h>
+
+/*
+ * The device pager routines are called directly from the message
+ * system (via mach_msg), and thus run in the kernel-internal
+ * environment. All ports are in internal form (ipc_port_t),
+ * and must be correctly reference-counted in order to be saved
+ * in other data structures. Kernel routines may be called
+ * directly. Kernel types are used for data objects (tasks,
+ * memory objects, ports). The only IPC routines that may be
+ * called are ones that masquerade as the kernel task (via
+ * msg_send_from_kernel).
+ *
+ * Port rights and references are maintained as follows:
+ * Memory object port:
+ * The device_pager task has all rights.
+ * Memory object control port:
+ * The device_pager task has only send rights.
+ * Memory object name port:
+ * The device_pager task has only send rights.
+ * The name port is not even recorded.
+ * Regardless how the object is created, the control and name
+ * ports are created by the kernel and passed through the memory
+ * management interface.
+ *
+ * The device_pager assumes that access to its memory objects
+ * will not be propagated to more that one host, and therefore
+ * provides no consistency guarantees beyond those made by the
+ * kernel.
+ *
+ * In the event that more than one host attempts to use a device
+ * memory object, the device_pager will only record the last set
+ * of port names. [This can happen with only one host if a new
+ * mapping is being established while termination of all previous
+ * mappings is taking place.] Currently, the device_pager assumes
+ * that its clients adhere to the initialization and termination
+ * protocols in the memory management interface; otherwise, port
+ * rights or out-of-line memory from erroneous messages may be
+ * allowed to accumulate.
+ *
+ * [The phrase "currently" has been used above to denote aspects of
+ * the implementation that could be altered without changing the rest
+ * of the basic documentation.]
+ */
+
+/*
+ * Basic device pager structure.
+ */
+struct dev_pager {
+ decl_simple_lock_data(, lock) /* lock for reference count */
+ int ref_count; /* reference count */
+ int client_count; /* How many memory_object_create
+ * calls have we received */
+ ipc_port_t pager; /* pager port */
+ ipc_port_t pager_request; /* Known request port */
+ ipc_port_t pager_name; /* Known name port */
+ mach_device_t device; /* Device handle */
+ vm_offset_t offset; /* offset within the pager, in bytes*/
+ int type; /* to distinguish */
+#define DEV_PAGER_TYPE 0
+#define CHAR_PAGER_TYPE 1
+ /* char pager specifics */
+ int prot;
+ vm_size_t size;
+};
+typedef struct dev_pager *dev_pager_t;
+#define DEV_PAGER_NULL ((dev_pager_t)0)
+
+
+struct kmem_cache dev_pager_cache;
+
+static void dev_pager_reference(dev_pager_t ds)
+{
+ simple_lock(&ds->lock);
+ ds->ref_count++;
+ simple_unlock(&ds->lock);
+}
+
+static void dev_pager_deallocate(dev_pager_t ds)
+{
+ simple_lock(&ds->lock);
+ if (--ds->ref_count > 0) {
+ simple_unlock(&ds->lock);
+ return;
+ }
+
+ simple_unlock(&ds->lock);
+ kmem_cache_free(&dev_pager_cache, (vm_offset_t)ds);
+}
+
+/*
+ * A hash table of ports for device_pager backed objects.
+ */
+
+#define DEV_HASH_COUNT 127
+
+struct dev_pager_entry {
+ queue_chain_t links;
+ ipc_port_t name;
+ dev_pager_t pager_rec;
+};
+typedef struct dev_pager_entry *dev_pager_entry_t;
+
+/*
+ * Indexed by port name, each element contains a queue of all dev_pager_entry_t
+ * which name shares the same hash
+ */
+queue_head_t dev_pager_hashtable[DEV_HASH_COUNT];
+struct kmem_cache dev_pager_hash_cache;
+def_simple_lock_data(static, dev_pager_hash_lock)
+
+struct dev_device_entry {
+ queue_chain_t links;
+ mach_device_t device;
+ vm_offset_t offset;
+ dev_pager_t pager_rec;
+};
+typedef struct dev_device_entry *dev_device_entry_t;
+
+/*
+ * Indexed by device + offset, each element contains a queue of all
+ * dev_device_entry_t which device + offset shares the same hash
+ */
+queue_head_t dev_device_hashtable[DEV_HASH_COUNT];
+struct kmem_cache dev_device_hash_cache;
+def_simple_lock_data(static, dev_device_hash_lock)
+
+#define dev_hash(name_port) \
+ (((vm_offset_t)(name_port) & 0xffffff) % DEV_HASH_COUNT)
+
+static void dev_pager_hash_init(void)
+{
+ int i;
+ vm_size_t size;
+
+ size = sizeof(struct dev_pager_entry);
+ kmem_cache_init(&dev_pager_hash_cache, "dev_pager_entry", size, 0,
+ NULL, 0);
+ for (i = 0; i < DEV_HASH_COUNT; i++)
+ queue_init(&dev_pager_hashtable[i]);
+ simple_lock_init(&dev_pager_hash_lock);
+}
+
+static void dev_pager_hash_insert(
+ const ipc_port_t name_port,
+ const dev_pager_t rec)
+{
+ dev_pager_entry_t new_entry;
+
+ new_entry = (dev_pager_entry_t) kmem_cache_alloc(&dev_pager_hash_cache);
+ new_entry->name = name_port;
+ new_entry->pager_rec = rec;
+
+ simple_lock(&dev_pager_hash_lock);
+ queue_enter(&dev_pager_hashtable[dev_hash(name_port)],
+ new_entry, dev_pager_entry_t, links);
+ simple_unlock(&dev_pager_hash_lock);
+}
+
+static void dev_pager_hash_delete(const ipc_port_t name_port)
+{
+ queue_t bucket;
+ dev_pager_entry_t entry;
+
+ bucket = &dev_pager_hashtable[dev_hash(name_port)];
+
+ simple_lock(&dev_pager_hash_lock);
+ for (entry = (dev_pager_entry_t)queue_first(bucket);
+ !queue_end(bucket, &entry->links);
+ entry = (dev_pager_entry_t)queue_next(&entry->links)) {
+ if (entry->name == name_port) {
+ queue_remove(bucket, entry, dev_pager_entry_t, links);
+ break;
+ }
+ }
+ simple_unlock(&dev_pager_hash_lock);
+ if (!queue_end(bucket, &entry->links))
+ kmem_cache_free(&dev_pager_hash_cache, (vm_offset_t)entry);
+}
+
+static dev_pager_t dev_pager_hash_lookup(const ipc_port_t name_port)
+{
+ queue_t bucket;
+ dev_pager_entry_t entry;
+ dev_pager_t pager;
+
+ bucket = &dev_pager_hashtable[dev_hash(name_port)];
+
+ simple_lock(&dev_pager_hash_lock);
+ for (entry = (dev_pager_entry_t)queue_first(bucket);
+ !queue_end(bucket, &entry->links);
+ entry = (dev_pager_entry_t)queue_next(&entry->links)) {
+ if (entry->name == name_port) {
+ pager = entry->pager_rec;
+ dev_pager_reference(pager);
+ simple_unlock(&dev_pager_hash_lock);
+ return (pager);
+ }
+ }
+ simple_unlock(&dev_pager_hash_lock);
+ return (DEV_PAGER_NULL);
+}
+
+static void dev_device_hash_init(void)
+{
+ int i;
+ vm_size_t size;
+
+ size = sizeof(struct dev_device_entry);
+ kmem_cache_init(&dev_device_hash_cache, "dev_device_entry", size, 0,
+ NULL, 0);
+ for (i = 0; i < DEV_HASH_COUNT; i++) {
+ queue_init(&dev_device_hashtable[i]);
+ }
+ simple_lock_init(&dev_device_hash_lock);
+}
+
+static void dev_device_hash_insert(
+ const mach_device_t device,
+ const vm_offset_t offset,
+ const dev_pager_t rec)
+{
+ dev_device_entry_t new_entry;
+
+ new_entry = (dev_device_entry_t) kmem_cache_alloc(&dev_device_hash_cache);
+ new_entry->device = device;
+ new_entry->offset = offset;
+ new_entry->pager_rec = rec;
+
+ simple_lock(&dev_device_hash_lock);
+ queue_enter(&dev_device_hashtable[dev_hash(device + offset)],
+ new_entry, dev_device_entry_t, links);
+ simple_unlock(&dev_device_hash_lock);
+}
+
+static void dev_device_hash_delete(
+ const mach_device_t device,
+ const vm_offset_t offset)
+{
+ queue_t bucket;
+ dev_device_entry_t entry;
+
+ bucket = &dev_device_hashtable[dev_hash(device + offset)];
+
+ simple_lock(&dev_device_hash_lock);
+ for (entry = (dev_device_entry_t)queue_first(bucket);
+ !queue_end(bucket, &entry->links);
+ entry = (dev_device_entry_t)queue_next(&entry->links)) {
+ if (entry->device == device && entry->offset == offset) {
+ queue_remove(bucket, entry, dev_device_entry_t, links);
+ break;
+ }
+ }
+ simple_unlock(&dev_device_hash_lock);
+ if (!queue_end(bucket, &entry->links))
+ kmem_cache_free(&dev_device_hash_cache, (vm_offset_t)entry);
+}
+
+static dev_pager_t dev_device_hash_lookup(
+ const mach_device_t device,
+ const vm_offset_t offset)
+{
+ queue_t bucket;
+ dev_device_entry_t entry;
+ dev_pager_t pager;
+
+ bucket = &dev_device_hashtable[dev_hash(device + offset)];
+
+ simple_lock(&dev_device_hash_lock);
+ for (entry = (dev_device_entry_t)queue_first(bucket);
+ !queue_end(bucket, &entry->links);
+ entry = (dev_device_entry_t)queue_next(&entry->links)) {
+ if (entry->device == device && entry->offset == offset) {
+ pager = entry->pager_rec;
+ dev_pager_reference(pager);
+ simple_unlock(&dev_device_hash_lock);
+ return (pager);
+ }
+ }
+ simple_unlock(&dev_device_hash_lock);
+ return (DEV_PAGER_NULL);
+}
+
+kern_return_t device_pager_setup(
+ const mach_device_t device,
+ int prot,
+ vm_offset_t offset,
+ vm_size_t size,
+ mach_port_t *pager)
+{
+ dev_pager_t d;
+
+ /*
+ * Verify the device is indeed mappable
+ */
+ if (!device->dev_ops->d_mmap || (device->dev_ops->d_mmap == nomap))
+ return (D_INVALID_OPERATION);
+
+ /*
+ * Allocate a structure to hold the arguments
+ * and port to represent this object.
+ */
+
+ d = dev_device_hash_lookup(device, offset);
+ if (d != DEV_PAGER_NULL) {
+ *pager = (mach_port_t) ipc_port_make_send(d->pager);
+ dev_pager_deallocate(d);
+ return (D_SUCCESS);
+ }
+
+ d = (dev_pager_t) kmem_cache_alloc(&dev_pager_cache);
+ if (d == DEV_PAGER_NULL)
+ return (KERN_RESOURCE_SHORTAGE);
+
+ simple_lock_init(&d->lock);
+ d->ref_count = 1;
+
+ /*
+ * Allocate the pager port.
+ */
+ d->pager = ipc_port_alloc_kernel();
+ if (d->pager == IP_NULL) {
+ dev_pager_deallocate(d);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+
+ d->client_count = 0;
+ d->pager_request = IP_NULL;
+ d->pager_name = IP_NULL;
+ d->device = device;
+ mach_device_reference(device);
+ d->offset = offset;
+ d->prot = prot;
+ d->size = round_page(size);
+ if (device->dev_ops->d_mmap == block_io_mmap) {
+ d->type = DEV_PAGER_TYPE;
+ } else {
+ d->type = CHAR_PAGER_TYPE;
+ }
+
+ dev_pager_hash_insert(d->pager, d);
+ dev_device_hash_insert(d->device, d->offset, d);
+
+ *pager = (mach_port_t) ipc_port_make_send(d->pager);
+ return (KERN_SUCCESS);
+}
+
+boolean_t device_pager_debug = FALSE;
+
+kern_return_t device_pager_data_request(
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
+ vm_offset_t offset,
+ vm_size_t length,
+ vm_prot_t protection_required)
+{
+ dev_pager_t ds;
+ kern_return_t ret;
+
+ if (device_pager_debug)
+ printf("(device_pager)data_request: pager=%p, offset=0x%lx, length=0x%lx\n",
+ pager, (unsigned long) offset, (unsigned long) length);
+
+ ds = dev_pager_hash_lookup(pager);
+ if (ds == DEV_PAGER_NULL)
+ panic("(device_pager)data_request: lookup failed");
+
+ if (ds->pager_request != pager_request)
+ panic("(device_pager)data_request: bad pager_request");
+
+ if (ds->type == CHAR_PAGER_TYPE) {
+ vm_object_t object;
+
+ object = vm_object_lookup(pager_request);
+ if (object == VM_OBJECT_NULL) {
+ (void) r_memory_object_data_error(pager_request,
+ offset, length,
+ KERN_FAILURE);
+ dev_pager_deallocate(ds);
+ return (KERN_SUCCESS);
+ }
+
+ ret = vm_object_page_map(object,
+ offset, length,
+ device_map_page, (void *)ds);
+
+ if (ret != KERN_SUCCESS) {
+ (void) r_memory_object_data_error(pager_request,
+ offset, length,
+ ret);
+ vm_object_deallocate(object);
+ dev_pager_deallocate(ds);
+ return (KERN_SUCCESS);
+ }
+
+ vm_object_deallocate(object);
+ }
+ else {
+ panic("(device_pager)data_request: dev pager");
+ }
+
+ dev_pager_deallocate(ds);
+
+ return (KERN_SUCCESS);
+}
+
+kern_return_t device_pager_copy(
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
+ vm_offset_t offset,
+ vm_size_t length,
+ const ipc_port_t new_pager)
+{
+ panic("(device_pager)copy: called");
+}
+
+kern_return_t
+device_pager_supply_completed(
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
+ vm_offset_t offset,
+ vm_size_t length,
+ kern_return_t result,
+ vm_offset_t error_offset)
+{
+ panic("(device_pager)supply_completed: called");
+}
+
+kern_return_t
+device_pager_data_return(
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
+ vm_offset_t offset,
+ pointer_t addr,
+ mach_msg_type_number_t data_cnt,
+ boolean_t dirty,
+ boolean_t kernel_copy)
+{
+ panic("(device_pager)data_return: called");
+}
+
+kern_return_t
+device_pager_change_completed(
+ const ipc_port_t pager,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
+{
+ panic("(device_pager)change_completed: called");
+}
+
+/*
+ * The mapping function takes a byte offset, but returns
+ * a machine-dependent page frame number. We convert
+ * that into something that the pmap module will
+ * accept later.
+ */
+phys_addr_t device_map_page(
+ void *dsp,
+ vm_offset_t offset)
+{
+ dev_pager_t ds = (dev_pager_t) dsp;
+ vm_offset_t pagenum =
+ (*(ds->device->dev_ops->d_mmap))
+ (ds->device->dev_number,
+ ds->offset + offset,
+ ds->prot);
+
+ if (pagenum == -1)
+ return vm_page_fictitious_addr;
+
+ return pmap_phys_address(pagenum);
+}
+
+kern_return_t device_pager_init_pager(
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
+ const ipc_port_t pager_name,
+ vm_size_t pager_page_size)
+{
+ dev_pager_t ds;
+
+ if (device_pager_debug)
+ printf("(device_pager)init: pager=%p, request=%p, name=%p\n",
+ pager, pager_request, pager_name);
+
+ assert(pager_page_size == PAGE_SIZE);
+ assert(IP_VALID(pager_request));
+ assert(IP_VALID(pager_name));
+
+ ds = dev_pager_hash_lookup(pager);
+ assert(ds != DEV_PAGER_NULL);
+
+ assert(ds->client_count == 0);
+ assert(ds->pager_request == IP_NULL);
+ assert(ds->pager_name == IP_NULL);
+
+ ds->client_count = 1;
+
+ /*
+ * We save the send rights for the request and name ports.
+ */
+
+ ds->pager_request = pager_request;
+ ds->pager_name = pager_name;
+
+ if (ds->type == CHAR_PAGER_TYPE) {
+ /*
+ * Reply that the object is ready
+ */
+ (void) r_memory_object_ready(pager_request,
+ FALSE, /* do not cache */
+ MEMORY_OBJECT_COPY_NONE);
+ } else {
+ (void) r_memory_object_ready(pager_request,
+ TRUE, /* cache */
+ MEMORY_OBJECT_COPY_DELAY);
+ }
+
+ dev_pager_deallocate(ds);
+ return (KERN_SUCCESS);
+}
+
+kern_return_t device_pager_terminate(
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
+ const ipc_port_t pager_name)
+{
+ dev_pager_t ds;
+
+ assert(IP_VALID(pager_request));
+ assert(IP_VALID(pager_name));
+
+ ds = dev_pager_hash_lookup(pager);
+ assert(ds != DEV_PAGER_NULL);
+
+ assert(ds->client_count == 1);
+ assert(ds->pager_request == pager_request);
+ assert(ds->pager_name == pager_name);
+
+ dev_pager_hash_delete(ds->pager);
+ dev_device_hash_delete(ds->device, ds->offset);
+ mach_device_deallocate(ds->device);
+
+ /* release the send rights we have saved from the init call */
+
+ ipc_port_release_send(pager_request);
+ ipc_port_release_send(pager_name);
+
+ /* release the naked receive rights we just acquired */
+
+ ipc_port_release_receive(pager_request);
+ ipc_port_release_receive(pager_name);
+
+ /* release the kernel's receive right for the pager port */
+
+ ipc_port_dealloc_kernel(pager);
+
+ /* once for ref from lookup, once to make it go away */
+ dev_pager_deallocate(ds);
+ dev_pager_deallocate(ds);
+
+ return (KERN_SUCCESS);
+}
+
+kern_return_t device_pager_data_unlock(
+ const ipc_port_t memory_object,
+ const ipc_port_t memory_control_port,
+ vm_offset_t offset,
+ vm_size_t length,
+ vm_prot_t desired_access)
+{
+ panic("(device_pager)data_unlock: called");
+ return (KERN_FAILURE);
+}
+
+kern_return_t device_pager_lock_completed(
+ const ipc_port_t memory_object,
+ const ipc_port_t pager_request_port,
+ vm_offset_t offset,
+ vm_size_t length)
+{
+ panic("(device_pager)lock_completed: called");
+ return (KERN_FAILURE);
+}
+
+void device_pager_init(void)
+{
+ vm_size_t size;
+
+ /*
+ * Initialize cache of paging structures.
+ */
+ size = sizeof(struct dev_pager);
+ kmem_cache_init(&dev_pager_cache, "dev_pager", size, 0,
+ NULL, 0);
+
+ /*
+ * Initialize the name port hashing stuff.
+ */
+ dev_pager_hash_init();
+ dev_device_hash_init();
+}
diff --git a/device/dev_pager.h b/device/dev_pager.h
new file mode 100644
index 0000000..dc4b202
--- /dev/null
+++ b/device/dev_pager.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DEVICE_DEV_PAGER_H_
+#define _DEVICE_DEV_PAGER_H_
+
+phys_addr_t device_map_page(void *dsp, vm_offset_t offset);
+
+boolean_t device_pager_data_request_done(io_req_t ior);
+
+#endif /* _DEVICE_DEV_PAGER_H_ */
diff --git a/device/device.srv b/device/device.srv
new file mode 100644
index 0000000..f63813f
--- /dev/null
+++ b/device/device.srv
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <device/device.defs>
diff --git a/device/device_emul.h b/device/device_emul.h
new file mode 100644
index 0000000..873d7f5
--- /dev/null
+++ b/device/device_emul.h
@@ -0,0 +1,64 @@
+/*
+ * Mach device emulation definitions (i386at version).
+ *
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#ifndef _I386AT_DEVICE_EMUL_H_
+#define _I386AT_DEVICE_EMUL_H_
+
+#include <mach/notify.h>
+#include <device/net_status.h>
+
+/* Each emulation layer provides these operations. */
+struct device_emulation_ops
+{
+ void (*reference) (void *);
+ void (*dealloc) (void *);
+ ipc_port_t (*dev_to_port) (void *);
+ io_return_t (*open) (ipc_port_t, mach_msg_type_name_t,
+ dev_mode_t, const char *, device_t *);
+ io_return_t (*close) (void *);
+ io_return_t (*write) (void *, ipc_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, io_buf_ptr_t, unsigned, int *);
+ io_return_t (*write_inband) (void *, ipc_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, const io_buf_ptr_inband_t,
+ unsigned, int *);
+ io_return_t (*read) (void *, ipc_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, int, io_buf_ptr_t *, unsigned *);
+ io_return_t (*read_inband) (void *, ipc_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, int, char *, unsigned *);
+ io_return_t (*set_status) (void *, dev_flavor_t, dev_status_t,
+ mach_msg_type_number_t);
+ io_return_t (*get_status) (void *, dev_flavor_t, dev_status_t,
+ mach_msg_type_number_t *);
+ io_return_t (*set_filter) (void *, ipc_port_t, int, filter_t [], unsigned);
+ io_return_t (*map) (void *, vm_prot_t, vm_offset_t,
+ vm_size_t, ipc_port_t *, boolean_t);
+ void (*no_senders) (mach_no_senders_notification_t *);
+ io_return_t (*write_trap) (void *, dev_mode_t,
+ rpc_recnum_t, rpc_vm_offset_t, rpc_vm_size_t);
+ io_return_t (*writev_trap) (void *, dev_mode_t,
+ rpc_recnum_t, rpc_io_buf_vec_t *, rpc_vm_size_t);
+};
+
+#endif /* _I386AT_DEVICE_EMUL_H_ */
diff --git a/device/device_init.c b/device/device_init.c
new file mode 100644
index 0000000..287d0a2
--- /dev/null
+++ b/device/device_init.c
@@ -0,0 +1,67 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ *
+ * Initialize device service as part of kernel task.
+ */
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <kern/debug.h>
+#include <kern/task.h>
+#include <xen/xen.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/tty.h>
+#include <device/device_init.h>
+#include <device/ds_routines.h>
+#include <device/net_io.h>
+#include <device/chario.h>
+
+
+ipc_port_t master_device_port;
+
+void
+device_service_create(void)
+{
+ master_device_port = ipc_port_alloc_kernel();
+ if (master_device_port == IP_NULL)
+ panic("can't allocate master device port");
+
+ mach_device_init();
+#ifdef MACH_HYP
+ hyp_dev_init();
+#endif
+ dev_lookup_init();
+ net_io_init();
+ device_pager_init();
+ chario_init();
+
+ (void) kernel_thread(kernel_task, io_done_thread, 0);
+ (void) kernel_thread(kernel_task, net_thread, 0);
+}
diff --git a/device/device_init.h b/device/device_init.h
new file mode 100644
index 0000000..175b34d
--- /dev/null
+++ b/device/device_init.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DEVICE_DEVICE_INIT_H_
+#define _DEVICE_DEVICE_INIT_H_
+
+extern void device_service_create(void);
+
+#endif /* _DEVICE_DEVICE_INIT_H_ */
diff --git a/device/device_pager.srv b/device/device_pager.srv
new file mode 100644
index 0000000..410323d
--- /dev/null
+++ b/device/device_pager.srv
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#define memory_object device_pager
+
+/*
+ * Rename all of the functions in the pager interface, to avoid
+ * confusing them with the kernel interface.
+ */
+#define memory_object_init device_pager_init_pager
+#define memory_object_terminate device_pager_terminate
+#define memory_object_copy device_pager_copy
+#define memory_object_data_request device_pager_data_request
+#define memory_object_data_unlock device_pager_data_unlock
+#define memory_object_lock_completed device_pager_lock_completed
+#define memory_object_supply_completed device_pager_supply_completed
+#define memory_object_data_return device_pager_data_return
+#define memory_object_change_completed device_pager_change_completed
+
+#include <mach/memory_object.defs>
diff --git a/device/device_port.h b/device/device_port.h
new file mode 100644
index 0000000..8f8aaaa
--- /dev/null
+++ b/device/device_port.h
@@ -0,0 +1,41 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ */
+
+#ifndef _DEVICE_DEVICE_PORT_H_
+#define _DEVICE_DEVICE_PORT_H_
+
+#include <ipc/ipc_port.h>
+
+/*
+ * Master privileged port for this host's device service
+ */
+extern ipc_port_t master_device_port;
+
+#endif /* _DEVICE_DEVICE_PORT_H_ */
diff --git a/device/device_reply.cli b/device/device_reply.cli
new file mode 100644
index 0000000..956540c
--- /dev/null
+++ b/device/device_reply.cli
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a client presentation file. */
+
+#define KERNEL_USER 1
+
+#include <device/device_reply.defs>
diff --git a/device/device_types_kernel.h b/device/device_types_kernel.h
new file mode 100644
index 0000000..e17055c
--- /dev/null
+++ b/device/device_types_kernel.h
@@ -0,0 +1,43 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ */
+
+#ifndef _DEVICE_DEVICE_TYPES_KERNEL_H_
+#define _DEVICE_DEVICE_TYPES_KERNEL_H_
+
+/*
+ * Kernel-only type definitions for device server.
+ */
+
+#include <mach/port.h>
+#include <device/dev_hdr.h>
+
+extern ipc_port_t convert_device_to_port(device_t);
+
+#endif /* _DEVICE_DEVICE_TYPES_KERNEL_H_ */
diff --git a/device/ds_routines.c b/device/ds_routines.c
new file mode 100644
index 0000000..d97d229
--- /dev/null
+++ b/device/ds_routines.c
@@ -0,0 +1,1901 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ */
+
+/*
+ * Mach device server routines (i386at version).
+ *
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#include <kern/printf.h>
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/port.h>
+#include <mach/vm_param.h>
+#include <mach/notify.h>
+#include <machine/locore.h>
+#include <machine/machspl.h> /* spl definitions */
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <kern/ast.h>
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <kern/queue.h>
+#include <kern/slab.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/sched_prim.h>
+
+#include <vm/memory_object.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+
+#include <device/device_types.h>
+#include <device/device.server.h>
+#include <device/dev_hdr.h>
+#include <device/conf.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+#include <device/net_status.h>
+#include <device/device_port.h>
+#include <device/device_reply.user.h>
+#include <device/device_emul.h>
+#include <device/intr.h>
+
+#include <machine/machspl.h>
+
+#ifdef LINUX_DEV
+extern struct device_emulation_ops linux_block_emulation_ops;
+#ifdef CONFIG_INET
+extern struct device_emulation_ops linux_net_emulation_ops;
+extern void free_skbuffs (void);
+#ifdef CONFIG_PCMCIA
+extern struct device_emulation_ops linux_pcmcia_emulation_ops;
+#endif /* CONFIG_PCMCIA */
+#endif /* CONFIG_INET */
+#endif /* LINUX_DEV */
+#ifdef MACH_HYP
+extern struct device_emulation_ops hyp_block_emulation_ops;
+extern struct device_emulation_ops hyp_net_emulation_ops;
+#endif /* MACH_HYP */
+extern struct device_emulation_ops mach_device_emulation_ops;
+
+/* List of emulations. */
+static struct device_emulation_ops *emulation_list[] =
+{
+#ifdef LINUX_DEV
+ &linux_block_emulation_ops,
+#ifdef CONFIG_INET
+ &linux_net_emulation_ops,
+#ifdef CONFIG_PCMCIA
+ &linux_pcmcia_emulation_ops,
+#endif /* CONFIG_PCMCIA */
+#endif /* CONFIG_INET */
+#endif /* LINUX_DEV */
+#ifdef MACH_HYP
+ &hyp_block_emulation_ops,
+ &hyp_net_emulation_ops,
+#endif /* MACH_HYP */
+ &mach_device_emulation_ops,
+};
+
+static struct vm_map device_io_map_store;
+vm_map_t device_io_map = &device_io_map_store;
+struct kmem_cache io_inband_cache;
+
+#define NUM_EMULATION (sizeof (emulation_list) / sizeof (emulation_list[0]))
+
+io_return_t
+ds_device_open (ipc_port_t open_port, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ const_dev_name_t name, device_t *devp)
+{
+ unsigned i;
+ io_return_t err;
+
+ /* Open must be called on the master device port. */
+ if (open_port != master_device_port)
+ return D_INVALID_OPERATION;
+
+ /* There must be a reply port. */
+ if (! IP_VALID (reply_port))
+ {
+ printf ("ds_* invalid reply port\n");
+ SoftDebugger ("ds_* reply_port");
+ return MIG_NO_REPLY;
+ }
+
+ /* Call each emulation's open routine to find the device. */
+ for (i = 0; i < NUM_EMULATION; i++)
+ {
+ err = (*emulation_list[i]->open) (reply_port, reply_port_type,
+ mode, name, devp);
+ if (err != D_NO_SUCH_DEVICE)
+ break;
+ }
+
+ return err;
+}
+
+io_return_t
+ds_device_open_new (ipc_port_t open_port, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ const_dev_name_t name, device_t *devp)
+{
+ return ds_device_open (open_port, reply_port, reply_port_type, mode, name, devp);
+}
+
+io_return_t
+ds_device_close (device_t dev)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ return (dev->emul_ops->close
+ ? (*dev->emul_ops->close) (dev->emul_data)
+ : D_SUCCESS);
+}
+
+io_return_t
+ds_device_write (device_t dev, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, io_buf_ptr_t data, unsigned int count,
+ int *bytes_written)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (data == 0)
+ return D_INVALID_SIZE;
+
+ if (! dev->emul_ops->write)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->write) (dev->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ data, count, bytes_written);
+}
+
+io_return_t
+ds_device_write_inband (device_t dev, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, recnum_t recnum,
+ const io_buf_ptr_inband_t data, unsigned count,
+ int *bytes_written)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (data == 0)
+ return D_INVALID_SIZE;
+
+ if (! dev->emul_ops->write_inband)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->write_inband) (dev->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ data, count, bytes_written);
+}
+
+io_return_t
+ds_device_read (device_t dev, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, int count, io_buf_ptr_t *data,
+ unsigned *bytes_read)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (! dev->emul_ops->read)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->read) (dev->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ count, data, bytes_read);
+}
+
+io_return_t
+ds_device_read_inband (device_t dev, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, int count, io_buf_ptr_inband_t data,
+ unsigned *bytes_read)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (! dev->emul_ops->read_inband)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->read_inband) (dev->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ count, data, bytes_read);
+}
+
+io_return_t
+ds_device_set_status (device_t dev, dev_flavor_t flavor,
+ dev_status_t status, mach_msg_type_number_t status_count)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (! dev->emul_ops->set_status)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->set_status) (dev->emul_data, flavor, status,
+ status_count);
+}
+
+io_return_t
+ds_device_get_status (device_t dev, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *status_count)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (! dev->emul_ops->get_status)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->get_status) (dev->emul_data, flavor, status,
+ status_count);
+}
+
+io_return_t
+ds_device_set_filter (device_t dev, ipc_port_t receive_port, int priority,
+ filter_t *filter, unsigned filter_count)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (! dev->emul_ops->set_filter)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->set_filter) (dev->emul_data, receive_port,
+ priority, filter, filter_count);
+}
+
+io_return_t
+ds_device_map (device_t dev, vm_prot_t prot, vm_offset_t offset,
+ vm_size_t size, ipc_port_t *pager, boolean_t unmap)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (! dev->emul_ops->map)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->map) (dev->emul_data, prot,
+ offset, size, pager, unmap);
+}
+
+/* TODO: missing deregister support */
+io_return_t
+ds_device_intr_register (device_t dev, int id,
+ int flags, ipc_port_t receive_port)
+{
+#if defined(MACH_XEN)
+ return D_INVALID_OPERATION;
+#else /* MACH_XEN */
+ kern_return_t err;
+ mach_device_t mdev;
+
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ mdev = dev->emul_data;
+
+ /* No flag is defined for now */
+ if (flags != 0)
+ return D_INVALID_OPERATION;
+
+ /* Must be called on the irq device only */
+ if (! name_equal(mdev->dev_ops->d_name, 3, "irq"))
+ return D_INVALID_OPERATION;
+
+ user_intr_t *e = insert_intr_entry (&irqtab, id, receive_port);
+ if (!e)
+ return D_NO_MEMORY;
+
+ // TODO detect when the port get destroyed because the driver crashes and
+ // restart, to replace it when the same device driver calls it again.
+ err = install_user_intr_handler (&irqtab, id, flags, e);
+ if (err == D_SUCCESS)
+ {
+ /* If the port is installed successfully, increase its reference by 1.
+ * Thus, the port won't be destroyed after its task is terminated. */
+ ip_reference (receive_port);
+ }
+ return err;
+#endif /* MACH_XEN */
+}
+
+kern_return_t
+ds_device_intr_ack (device_t dev, ipc_port_t receive_port)
+{
+#if defined(MACH_XEN)
+ return D_INVALID_OPERATION;
+#else /* MACH_XEN */
+ mach_device_t mdev;
+ kern_return_t ret;
+
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ mdev = dev->emul_data;
+
+ /* Must be called on the irq device only */
+ if (! name_equal(mdev->dev_ops->d_name, 3, "irq"))
+ return D_INVALID_OPERATION;
+
+ ret = irq_acknowledge(receive_port);
+
+ if (ret == D_SUCCESS)
+ ipc_port_release_send(receive_port);
+
+ return ret;
+#endif /* MACH_XEN */
+}
+
+boolean_t
+ds_notify (mach_msg_header_t *msg)
+{
+ if (msg->msgh_id == MACH_NOTIFY_NO_SENDERS)
+ {
+ device_t dev;
+ mach_no_senders_notification_t *ns;
+
+ ns = (mach_no_senders_notification_t *) msg;
+ dev = dev_port_lookup((ipc_port_t) ns->not_header.msgh_remote_port);
+ assert(dev);
+ if (dev->emul_ops->no_senders)
+ (*dev->emul_ops->no_senders) (ns);
+ return TRUE;
+ }
+
+ printf ("ds_notify: strange notification %d\n", msg->msgh_id);
+ return FALSE;
+}
+
+io_return_t
+ds_device_write_trap (device_t dev, dev_mode_t mode,
+ rpc_recnum_t recnum, rpc_vm_offset_t data, rpc_vm_size_t count)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (! dev->emul_ops->write_trap)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->write_trap) (dev->emul_data,
+ mode, recnum, data, count);
+}
+
+io_return_t
+ds_device_writev_trap (device_t dev, dev_mode_t mode,
+ rpc_recnum_t recnum, rpc_io_buf_vec_t *iovec, rpc_vm_size_t count)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+
+ if (! dev->emul_ops->writev_trap)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->writev_trap) (dev->emul_data,
+ mode, recnum, iovec, count);
+}
+
+void
+device_reference (device_t dev)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return;
+
+ if (dev->emul_ops->reference)
+ (*dev->emul_ops->reference) (dev->emul_data);
+}
+
+void
+device_deallocate (device_t dev)
+{
+ /* Refuse if device is dead or not completely open. */
+ if (dev == DEVICE_NULL)
+ return;
+
+ if (dev->emul_ops->dealloc)
+ (*dev->emul_ops->dealloc) (dev->emul_data);
+}
+
+/*
+ * What follows is the interface for the native Mach devices.
+ */
+
+static ipc_port_t
+mach_convert_device_to_port (mach_device_t device)
+{
+ ipc_port_t port;
+
+ if (! device)
+ return IP_NULL;
+
+ device_lock(device);
+
+ if (device->state == DEV_STATE_OPEN)
+ port = ipc_port_make_send(device->port);
+ else
+ port = IP_NULL;
+
+ device_unlock(device);
+
+ mach_device_deallocate(device);
+
+ return port;
+}
+
+static io_return_t
+device_open(const ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode,
+ const char * name,
+ device_t *device_p)
+{
+ mach_device_t device;
+ kern_return_t result;
+ io_req_t ior;
+ ipc_port_t notify;
+
+ /*
+ * Find the device.
+ */
+ device = device_lookup(name);
+ if (device == MACH_DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+
+ /*
+ * If the device is being opened or closed,
+ * wait for that operation to finish.
+ */
+ device_lock(device);
+ while (device->state == DEV_STATE_OPENING ||
+ device->state == DEV_STATE_CLOSING) {
+ device->io_wait = TRUE;
+ thread_sleep((event_t)device, simple_lock_addr(device->lock), TRUE);
+ device_lock(device);
+ }
+
+ /*
+ * If the device is already open, increment the open count
+ * and return.
+ */
+ if (device->state == DEV_STATE_OPEN) {
+
+ if (device->flag & D_EXCL_OPEN) {
+ /*
+ * Cannot open a second time.
+ */
+ device_unlock(device);
+ mach_device_deallocate(device);
+ return (D_ALREADY_OPEN);
+ }
+
+ device->open_count++;
+ device_unlock(device);
+ *device_p = &device->dev;
+ return (D_SUCCESS);
+ /*
+ * Return deallocates device reference while acquiring
+ * port.
+ */
+ }
+
+ /*
+ * Allocate the device port and register the device before
+ * opening it.
+ */
+ device->state = DEV_STATE_OPENING;
+ device_unlock(device);
+
+ /*
+ * Allocate port, keeping a reference for it.
+ */
+ device->port = ipc_port_alloc_kernel();
+ if (device->port == IP_NULL) {
+ device_lock(device);
+ device->state = DEV_STATE_INIT;
+ device->port = IP_NULL;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+ thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+ mach_device_deallocate(device);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+
+ dev_port_enter(device);
+
+ /*
+ * Request no-senders notifications on device port.
+ */
+ notify = ipc_port_make_sonce(device->port);
+ ip_lock(device->port);
+ ipc_port_nsrequest(device->port, 1, notify, &notify);
+ assert(notify == IP_NULL);
+
+ /*
+ * Open the device.
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_OPEN | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_error = 0;
+ ior->io_done = ds_open_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ result = (*device->dev_ops->d_open)(device->dev_number, (int)mode, ior);
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return result via ds_open_done.
+ */
+ ior->io_error = result;
+ (void) ds_open_done(ior);
+
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply already sent */
+}
+
+boolean_t
+ds_open_done(const io_req_t ior)
+{
+ kern_return_t result;
+ mach_device_t device;
+
+ device = ior->io_device;
+ result = ior->io_error;
+
+ if (result != D_SUCCESS) {
+ /*
+ * Open failed. Deallocate port and device.
+ */
+ dev_port_remove(device);
+ ipc_port_dealloc_kernel(device->port);
+ device->port = IP_NULL;
+
+ device_lock(device);
+ device->state = DEV_STATE_INIT;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+ thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+
+ mach_device_deallocate(device);
+ device = MACH_DEVICE_NULL;
+ }
+ else {
+ /*
+ * Open succeeded.
+ */
+ device_lock(device);
+ device->state = DEV_STATE_OPEN;
+ device->open_count = 1;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+ thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+
+ /* donate device reference to get port */
+ }
+ /*
+ * Must explicitly convert device to port, since
+ * device_reply interface is built as 'user' side
+ * (thus cannot get translation).
+ */
+ if (IP_VALID(ior->io_reply_port)) {
+ (void) ds_device_open_reply(ior->io_reply_port,
+ ior->io_reply_port_type,
+ result,
+ mach_convert_device_to_port(device));
+ } else if (device)
+ mach_device_deallocate(device);
+
+ return (TRUE);
+}
+
+static io_return_t
+device_close(void *dev)
+{
+ mach_device_t device = dev;
+
+ device_lock(device);
+
+ /*
+ * If device will remain open, do nothing.
+ */
+ if (--device->open_count > 0) {
+ device_unlock(device);
+ return (D_SUCCESS);
+ }
+
+ /*
+ * If device is being closed, do nothing.
+ */
+ if (device->state == DEV_STATE_CLOSING) {
+ device_unlock(device);
+ return (D_SUCCESS);
+ }
+
+ /*
+ * Mark device as closing, to prevent new IO.
+ * Outstanding IO will still be in progress.
+ */
+ device->state = DEV_STATE_CLOSING;
+ device_unlock(device);
+
+ /*
+ * ? wait for IO to end ?
+ * only if device wants to
+ */
+
+ /*
+ * Remove the device-port association.
+ */
+ dev_port_remove(device);
+ ipc_port_dealloc_kernel(device->port);
+
+ /*
+ * Close the device
+ */
+ (*device->dev_ops->d_close)(device->dev_number, 0);
+
+ /*
+ * Finally mark it closed. If someone else is trying
+ * to open it, the open can now proceed.
+ */
+ device_lock(device);
+ device->state = DEV_STATE_INIT;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+ thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+
+ return (D_SUCCESS);
+}
+
+/*
+ * Write to a device.
+ */
+static io_return_t
+device_write(void *dev,
+ const ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode,
+ recnum_t recnum,
+ const io_buf_ptr_t data,
+ unsigned int data_count,
+ int *bytes_written)
+{
+ mach_device_t device = dev;
+ io_req_t ior;
+ io_return_t result;
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /*
+ * XXX Need logic to reject ridiculously big requests.
+ */
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * Package the write request for the device driver
+ */
+ io_req_alloc(ior, data_count);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_WRITE | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = data;
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_write_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+ ior->io_copy = VM_MAP_COPY_NULL;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the write ...
+ *
+ * device_write_dealoc returns false if there's more
+ * to do; it has updated the ior appropriately and expects
+ * its caller to reinvoke it on the device.
+ */
+
+ do {
+
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Discard the local mapping of the data.
+ */
+
+ } while (!device_write_dealloc(ior));
+
+ /*
+ * Return the number of bytes actually written.
+ */
+ *bytes_written = ior->io_total - ior->io_residual;
+
+ /*
+ * Remove the extra reference.
+ */
+ mach_device_deallocate(device);
+
+ io_req_free(ior);
+ return (result);
+}
+
+/*
+ * Write to a device, but memory is in message.
+ */
+static io_return_t
+device_write_inband(void *dev,
+ const ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode,
+ recnum_t recnum,
+ const io_buf_ptr_inband_t data,
+ unsigned int data_count,
+ int *bytes_written)
+{
+ mach_device_t device = dev;
+ io_req_t ior;
+ io_return_t result;
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * Package the write request for the device driver.
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_WRITE | IO_CALL | IO_INBAND;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = (io_buf_ptr_t)data;
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_write_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the write.
+ */
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return the number of bytes actually written.
+ */
+ *bytes_written = ior->io_total - ior->io_residual;
+
+ /*
+ * Remove the extra reference.
+ */
+ mach_device_deallocate(device);
+
+ io_req_free(ior);
+ return (result);
+}
+
+/*
+ * Wire down incoming memory to give to device.
+ */
+kern_return_t
+device_write_get(
+ io_req_t ior,
+ boolean_t *wait)
+{
+ vm_map_copy_t io_copy;
+ vm_offset_t new_addr;
+ kern_return_t result;
+ int bsize;
+ vm_size_t min_size;
+
+ /*
+ * By default, caller does not have to wait.
+ */
+ *wait = FALSE;
+
+ /*
+ * Nothing to do if no data.
+ */
+ if (ior->io_count == 0)
+ return (KERN_SUCCESS);
+
+ /*
+ * Loaned iors already have valid data.
+ */
+ if (ior->io_op & IO_LOANED)
+ return (KERN_SUCCESS);
+
+ /*
+ * Inband case.
+ */
+ if (ior->io_op & IO_INBAND) {
+ assert(ior->io_count <= sizeof (io_buf_ptr_inband_t));
+ new_addr = kmem_cache_alloc(&io_inband_cache);
+ memcpy((void*)new_addr, ior->io_data, ior->io_count);
+ ior->io_data = (io_buf_ptr_t)new_addr;
+ ior->io_alloc_size = sizeof (io_buf_ptr_inband_t);
+
+ return (KERN_SUCCESS);
+ }
+
+ /*
+ * Figure out how much data to move this time. If the device
+ * won't return a block size, then we have to do the whole
+ * request in one shot (ditto if this is a block fragment),
+ * otherwise, move at least one block's worth.
+ */
+ result = (*ior->io_device->dev_ops->d_dev_info)(
+ ior->io_device->dev_number,
+ D_INFO_BLOCK_SIZE,
+ &bsize);
+
+ if (result != KERN_SUCCESS || ior->io_count < (vm_size_t) bsize)
+ min_size = (vm_size_t) ior->io_count;
+ else
+ min_size = (vm_size_t) bsize;
+
+ /*
+ * Map the pages from this page list into memory.
+ * io_data records location of data.
+ * io_alloc_size is the vm size of the region to deallocate.
+ */
+ io_copy = (vm_map_copy_t) ior->io_data;
+ result = kmem_io_map_copyout(device_io_map,
+ (vm_offset_t*)&ior->io_data, &new_addr,
+ &ior->io_alloc_size, io_copy, min_size);
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ if ((ior->io_data + ior->io_count) >
+ (((char *)new_addr) + ior->io_alloc_size)) {
+
+ /*
+ * Operation has to be split. Reset io_count for how
+ * much we can do this time.
+ */
+ assert(vm_map_copy_has_cont(io_copy));
+ assert(ior->io_count == io_copy->size);
+ ior->io_count = ior->io_alloc_size -
+ (ior->io_data - ((char *)new_addr));
+
+ /*
+ * Caller must wait synchronously.
+ */
+ ior->io_op &= ~IO_CALL;
+ *wait = TRUE;
+ }
+
+ ior->io_copy = io_copy; /* vm_map_copy to discard */
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Clean up memory allocated for IO.
+ */
+boolean_t
+device_write_dealloc(io_req_t ior)
+{
+ vm_map_copy_t new_copy = VM_MAP_COPY_NULL;
+ vm_map_copy_t io_copy;
+ kern_return_t result;
+ vm_offset_t size_to_do;
+ int bsize;
+
+ if (ior->io_alloc_size == 0)
+ return (TRUE);
+
+ /*
+ * Inband case.
+ */
+ if (ior->io_op & IO_INBAND) {
+ kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data);
+
+ return (TRUE);
+ }
+
+ if ((io_copy = ior->io_copy) == VM_MAP_COPY_NULL)
+ return (TRUE);
+
+ /*
+ * To prevent a possible deadlock with the default pager,
+ * we have to release space in the device_io_map before
+ * we allocate any memory. (Which vm_map_copy_invoke_cont
+ * might do.) See the discussion in mach_device_init.
+ */
+
+ kmem_io_map_deallocate(device_io_map,
+ trunc_page(ior->io_data),
+ ior->io_alloc_size);
+
+ if (vm_map_copy_has_cont(io_copy)) {
+
+ /*
+ * Remember how much is left, then
+ * invoke or abort the continuation.
+ */
+ size_to_do = io_copy->size - ior->io_count;
+ if (ior->io_error == 0) {
+ vm_map_copy_invoke_cont(io_copy, &new_copy, &result);
+ }
+ else {
+ vm_map_copy_abort_cont(io_copy);
+ result = KERN_FAILURE;
+ }
+
+ if (result == KERN_SUCCESS && new_copy != VM_MAP_COPY_NULL) {
+ int res;
+
+ /*
+ * We have a new continuation, reset the ior to
+ * represent the remainder of the request. Must
+ * adjust the recnum because drivers assume
+ * that the residual is zero.
+ */
+ ior->io_op &= ~IO_DONE;
+ ior->io_op |= IO_CALL;
+
+ res = (*ior->io_device->dev_ops->d_dev_info)(
+ ior->io_device->dev_number,
+ D_INFO_BLOCK_SIZE,
+ &bsize);
+
+ if (res != D_SUCCESS)
+ panic("device_write_dealloc: No block size");
+
+ ior->io_recnum += ior->io_count/bsize;
+ ior->io_count = new_copy->size;
+ }
+ else {
+
+ /*
+ * No continuation. Add amount we didn't get
+ * to into residual.
+ */
+ ior->io_residual += size_to_do;
+ }
+ }
+
+ /*
+ * Clean up the state for the IO that just completed.
+ */
+ vm_map_copy_discard(ior->io_copy);
+ ior->io_copy = VM_MAP_COPY_NULL;
+ ior->io_data = (char *) new_copy;
+
+ /*
+ * Return FALSE if there's more IO to do.
+ */
+
+ return(new_copy == VM_MAP_COPY_NULL);
+}
+
+/*
+ * Send write completion message to client, and discard the data.
+ */
+boolean_t
+ds_write_done(const io_req_t ior)
+{
+ /*
+ * device_write_dealloc discards the data that has been
+ * written, but may decide that there is more to write.
+ */
+ while (!device_write_dealloc(ior)) {
+ io_return_t result;
+ mach_device_t device;
+
+ /*
+ * More IO to do -- invoke it.
+ */
+ device = ior->io_device;
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, return FALSE -- not done yet.
+ */
+ if (result == D_IO_QUEUED)
+ return (FALSE);
+ }
+
+ /*
+ * Now the write is really complete. Send reply.
+ */
+
+ if (IP_VALID(ior->io_reply_port)) {
+ (void) (*((ior->io_op & IO_INBAND) ?
+ ds_device_write_reply_inband :
+ ds_device_write_reply))(ior->io_reply_port,
+ ior->io_reply_port_type,
+ ior->io_error,
+ (int) (ior->io_total -
+ ior->io_residual));
+ }
+ mach_device_deallocate(ior->io_device);
+
+ return (TRUE);
+}
+
+/*
+ * Read from a device.
+ */
+static io_return_t
+device_read(void *dev,
+ const ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode,
+ recnum_t recnum,
+ int bytes_wanted,
+ io_buf_ptr_t *data,
+ unsigned int *data_count)
+{
+ mach_device_t device = dev;
+ io_req_t ior;
+ io_return_t result;
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * There must be a reply port.
+ */
+ if (!IP_VALID(reply_port)) {
+ printf("ds_* invalid reply port\n");
+ SoftDebugger("ds_* reply_port");
+ return (MIG_NO_REPLY); /* no sense in doing anything */
+ }
+
+ /*
+ * Package the read request for the device driver
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_READ | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = 0; /* driver must allocate data */
+ ior->io_count = bytes_wanted;
+ ior->io_alloc_size = 0; /* no data allocated yet */
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_read_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the read.
+ */
+ result = (*device->dev_ops->d_read)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return result via ds_read_done.
+ */
+ ior->io_error = result;
+ (void) ds_read_done(ior);
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply has already been sent. */
+}
+
+/*
+ * Read from a device, but return the data 'inband.'
+ */
+static io_return_t
+device_read_inband(void *dev,
+ const ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode,
+ recnum_t recnum,
+ int bytes_wanted,
+ char *data,
+ unsigned int *data_count)
+{
+ mach_device_t device = dev;
+ io_req_t ior;
+ io_return_t result;
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * There must be a reply port.
+ */
+ if (!IP_VALID(reply_port)) {
+ printf("ds_* invalid reply port\n");
+ SoftDebugger("ds_* reply_port");
+ return (MIG_NO_REPLY); /* no sense in doing anything */
+ }
+
+ /*
+ * Package the read for the device driver
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_READ | IO_CALL | IO_INBAND;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = 0; /* driver must allocate data */
+ ior->io_count =
+ ((bytes_wanted < sizeof(io_buf_ptr_inband_t)) ?
+ bytes_wanted : sizeof(io_buf_ptr_inband_t));
+ ior->io_alloc_size = 0; /* no data allocated yet */
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_read_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * Do the read.
+ */
+ result = (*device->dev_ops->d_read)(device->dev_number, ior);
+
+ /*
+ * If the io was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return result, via ds_read_done.
+ */
+ ior->io_error = result;
+ (void) ds_read_done(ior);
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply has already been sent. */
+}
+
+/*
+ * Allocate wired-down memory for device read.
+ */
+kern_return_t device_read_alloc(
+ io_req_t ior,
+ vm_size_t size)
+{
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ /*
+ * Nothing to do if no data.
+ */
+ if (ior->io_count == 0)
+ return (KERN_SUCCESS);
+
+ if (ior->io_op & IO_INBAND) {
+ ior->io_data = (io_buf_ptr_t) kmem_cache_alloc(&io_inband_cache);
+ ior->io_alloc_size = sizeof(io_buf_ptr_inband_t);
+ } else {
+ size = round_page(size);
+ kr = kmem_alloc(kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return (kr);
+
+ ior->io_data = (io_buf_ptr_t) addr;
+ ior->io_alloc_size = size;
+ }
+
+ return (KERN_SUCCESS);
+}
+
+boolean_t ds_read_done(const io_req_t ior)
+{
+ vm_offset_t start_data, end_data;
+ vm_offset_t start_sent, end_sent;
+ vm_size_t size_read;
+
+ if (ior->io_error)
+ size_read = 0;
+ else
+ size_read = ior->io_count - ior->io_residual;
+
+ start_data = (vm_offset_t)ior->io_data;
+ end_data = start_data + size_read;
+
+ start_sent = (ior->io_op & IO_INBAND) ? start_data :
+ trunc_page(start_data);
+ end_sent = (ior->io_op & IO_INBAND) ?
+ start_data + ior->io_alloc_size : round_page(end_data);
+
+ /*
+ * Zero memory that the device did not fill.
+ */
+ if (start_sent < start_data)
+ memset((void *)start_sent, 0, start_data - start_sent);
+ if (end_sent > end_data)
+ memset((void *)end_data, 0, end_sent - end_data);
+
+
+ /*
+ * Touch the data being returned, to mark it dirty.
+ * If the pages were filled by DMA, the pmap module
+ * may think that they are clean.
+ */
+ {
+ vm_offset_t touch;
+ int c;
+
+ for (touch = start_sent; touch < end_sent; touch += PAGE_SIZE) {
+ c = *(volatile char *)touch;
+ *(volatile char *)touch = c;
+ }
+ }
+
+ /*
+ * Send the data to the reply port - this
+ * unwires and deallocates it.
+ */
+ if (ior->io_op & IO_INBAND) {
+ (void)ds_device_read_reply_inband(ior->io_reply_port,
+ ior->io_reply_port_type,
+ ior->io_error,
+ (char *) start_data,
+ size_read);
+ } else {
+ vm_map_copy_t copy;
+ kern_return_t kr;
+
+ kr = vm_map_copyin_page_list(kernel_map, start_data,
+ size_read, TRUE, TRUE,
+ &copy, FALSE);
+
+ if (kr != KERN_SUCCESS)
+ panic("read_done: vm_map_copyin_page_list failed");
+
+ (void)ds_device_read_reply(ior->io_reply_port,
+ ior->io_reply_port_type,
+ ior->io_error,
+ (char *) copy,
+ size_read);
+ }
+
+ /*
+ * Free any memory that was allocated but not sent.
+ */
+ if (ior->io_count != 0) {
+ if (ior->io_op & IO_INBAND) {
+ if (ior->io_alloc_size > 0)
+ kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data);
+ } else {
+ vm_offset_t end_alloc;
+
+ end_alloc = start_sent + round_page(ior->io_alloc_size);
+ if (end_alloc > end_sent)
+ (void) vm_deallocate(kernel_map,
+ end_sent,
+ end_alloc - end_sent);
+ }
+ }
+
+ mach_device_deallocate(ior->io_device);
+
+ return (TRUE);
+}
+
+static io_return_t
+device_set_status(
+ void *dev,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ mach_msg_type_number_t status_count)
+{
+ mach_device_t device = dev;
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ return ((*device->dev_ops->d_setstat)(device->dev_number,
+ flavor,
+ status,
+ status_count));
+}
+
+static io_return_t
+mach_device_get_status(
+ void *dev,
+ dev_flavor_t flavor,
+ dev_status_t status, /* pointer to OUT array */
+ mach_msg_type_number_t *status_count) /* out */
+{
+ mach_device_t device = dev;
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ return ((*device->dev_ops->d_getstat)(device->dev_number,
+ flavor,
+ status,
+ status_count));
+}
+
+static io_return_t
+device_set_filter(void *dev,
+ const ipc_port_t receive_port,
+ int priority,
+ filter_t filter[],
+ unsigned int filter_count)
+{
+ mach_device_t device = dev;
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * Request is absurd if no receive port is specified.
+ */
+ if (!IP_VALID(receive_port))
+ return (D_INVALID_OPERATION);
+
+ return ((*device->dev_ops->d_async_in)(device->dev_number,
+ receive_port,
+ priority,
+ filter,
+ filter_count));
+}
+
+static io_return_t
+device_map(
+ void *dev,
+ vm_prot_t protection,
+ vm_offset_t offset,
+ vm_size_t size,
+ ipc_port_t *pager, /* out */
+ boolean_t unmap) /* ? */
+{
+ mach_device_t device = dev;
+ if (protection & ~VM_PROT_ALL)
+ return (KERN_INVALID_ARGUMENT);
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ return (device_pager_setup(device, protection, offset, size,
+ (mach_port_t*)pager));
+}
+
+/*
+ * Doesn't do anything (yet).
+ */
+static void
+ds_no_senders(mach_no_senders_notification_t *notification)
+{
+ printf("ds_no_senders called! device_port=0x%zx count=%d\n",
+ notification->not_header.msgh_remote_port,
+ notification->not_count);
+}
+
+/* Shall be taken at splio only */
+def_simple_lock_irq_data(static, io_done_list_lock) /* Lock for... */
+queue_head_t io_done_list;
+
+#define splio splsched /* XXX must block ALL io devices */
+
+void iodone(io_req_t ior)
+{
+ spl_t s;
+
+ /*
+ * If this ior was loaned to us, return it directly.
+ */
+ if (ior->io_op & IO_LOANED) {
+ (*ior->io_done)(ior);
+ return;
+ }
+ /*
+ * If !IO_CALL, some thread is waiting for this. Must lock
+ * structure to interlock correctly with iowait(). Else can
+ * toss on queue for io_done thread to call completion.
+ */
+ s = splio();
+ if ((ior->io_op & IO_CALL) == 0) {
+ ior_lock(ior);
+ ior->io_op |= IO_DONE;
+ ior->io_op &= ~IO_WANTED;
+ ior_unlock(ior);
+ thread_wakeup((event_t)ior);
+ } else {
+ ior->io_op |= IO_DONE;
+ simple_lock_nocheck(&io_done_list_lock.slock);
+ enqueue_tail(&io_done_list, (queue_entry_t)ior);
+ thread_wakeup((event_t)&io_done_list);
+ simple_unlock_nocheck(&io_done_list_lock.slock);
+ }
+ splx(s);
+}
+
+static void __attribute__ ((noreturn)) io_done_thread_continue(void)
+{
+ for (;;) {
+ spl_t s;
+ io_req_t ior;
+
+#if defined (LINUX_DEV) && defined (CONFIG_INET)
+ free_skbuffs ();
+#endif
+ s = simple_lock_irq(&io_done_list_lock);
+ while ((ior = (io_req_t)dequeue_head(&io_done_list)) != 0) {
+ simple_unlock_irq(s, &io_done_list_lock);
+
+ if ((*ior->io_done)(ior)) {
+ /*
+ * IO done - free io_req_elt
+ */
+ io_req_free(ior);
+ }
+ /* else routine has re-queued it somewhere */
+
+ s = simple_lock_irq(&io_done_list_lock);
+ }
+
+ assert_wait(&io_done_list, FALSE);
+ simple_unlock_irq(s, &io_done_list_lock);
+ counter(c_io_done_thread_block++);
+ thread_block(io_done_thread_continue);
+ }
+}
+
+void io_done_thread(void)
+{
+ /*
+ * Set thread privileges and highest priority.
+ */
+ current_thread()->vm_privilege = 1;
+ stack_privilege(current_thread());
+ thread_set_own_priority(0);
+
+ io_done_thread_continue();
+ /*NOTREACHED*/
+}
+
+#define DEVICE_IO_MAP_SIZE (16 * 1024 * 1024)
+
+static void mach_device_trap_init(void); /* forward */
+
+void mach_device_init(void)
+{
+ vm_offset_t device_io_min, device_io_max;
+
+ queue_init(&io_done_list);
+ simple_lock_init_irq(&io_done_list_lock);
+
+ kmem_submap(device_io_map, kernel_map, &device_io_min, &device_io_max,
+ DEVICE_IO_MAP_SIZE);
+
+ /*
+ * If the kernel receives many device_write requests, the
+ * device_io_map might run out of space. To prevent
+ * device_write_get from failing in this case, we enable
+ * wait_for_space on the map. This causes kmem_io_map_copyout
+ * to block until there is sufficient space.
+ * (XXX Large writes may be starved by small writes.)
+ *
+ * There is a potential deadlock problem with this solution,
+ * if a device_write from the default pager has to wait
+ * for the completion of a device_write which needs to wait
+ * for memory allocation. Hence, once device_write_get
+ * allocates space in device_io_map, no blocking memory
+ * allocations should happen until device_write_dealloc
+ * frees the space. (XXX A large write might starve
+ * a small write from the default pager.)
+ */
+ device_io_map->wait_for_space = TRUE;
+
+ kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband",
+ sizeof(io_buf_ptr_inband_t), 0, NULL, 0);
+
+ mach_device_trap_init();
+}
+
+void iowait(io_req_t ior)
+{
+ spl_t s;
+
+ s = splio();
+ ior_lock(ior);
+ while ((ior->io_op&IO_DONE)==0) {
+ assert_wait((event_t)ior, FALSE);
+ ior_unlock(ior);
+ thread_block((void (*)()) 0);
+ ior_lock(ior);
+ }
+ ior_unlock(ior);
+ splx(s);
+}
+
+
+/*
+ * Device trap support.
+ */
+
+/*
+ * Memory Management
+ *
+ * This currently has a single pool of 2k wired buffers
+ * since we only handle writes to an ethernet device.
+ * Should be more general.
+ */
+#define IOTRAP_REQSIZE 2048
+
+struct kmem_cache io_trap_cache;
+
+/*
+ * Initialization. Called from mach_device_init().
+ */
+static void
+mach_device_trap_init(void)
+{
+ kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0,
+ NULL, 0);
+}
+
+/*
+ * Allocate an io_req_t.
+ * Currently allocates from io_trap_cache.
+ *
+ * Could have lists of different size caches.
+ * Could call a device-specific routine.
+ */
+static io_req_t
+ds_trap_req_alloc(const mach_device_t device, vm_size_t data_size)
+{
+ return (io_req_t) kmem_cache_alloc(&io_trap_cache);
+}
+
+/*
+ * Called by iodone to release ior.
+ */
+static boolean_t
+ds_trap_write_done(const io_req_t ior)
+{
+ mach_device_t dev;
+
+ dev = ior->io_device;
+
+ /*
+ * Should look at reply port and maybe send a message.
+ */
+ kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
+
+ /*
+ * Give up device reference from ds_write_trap.
+ */
+ mach_device_deallocate(dev);
+ return TRUE;
+}
+
+/*
+ * Like device_write except that data is in user space.
+ */
+static io_return_t
+device_write_trap (mach_device_t device, dev_mode_t mode,
+ rpc_recnum_t recnum, rpc_vm_offset_t data, rpc_vm_size_t data_count)
+{
+ io_req_t ior;
+ io_return_t result;
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * Get a buffer to hold the ioreq.
+ */
+ ior = ds_trap_req_alloc(device, data_count);
+
+ /*
+ * Package the write request for the device driver.
+ */
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_WRITE | IO_CALL | IO_LOANED;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = (io_buf_ptr_t)
+ (vm_offset_t)ior + sizeof(struct io_req);
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_trap_write_done;
+ ior->io_reply_port = IP_NULL; /* XXX */
+ ior->io_reply_port_type = 0; /* XXX */
+
+ /*
+ * Copy the data from user space.
+ */
+ if (data_count > 0)
+ copyin((void*)(vm_offset_t)data, ior->io_data, data_count);
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the write.
+ */
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Remove the extra reference.
+ */
+ mach_device_deallocate(device);
+
+ kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
+ return (result);
+}
+
+static io_return_t
+device_writev_trap (mach_device_t device, dev_mode_t mode,
+ rpc_recnum_t recnum, rpc_io_buf_vec_t *iovec, rpc_vm_size_t iocount)
+{
+ io_req_t ior;
+ io_return_t result;
+ io_buf_vec_t stack_iovec[16]; /* XXX */
+ vm_size_t data_count;
+ unsigned i;
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * Copyin user addresses.
+ */
+ if (iocount > 16)
+ return KERN_INVALID_VALUE; /* lame */
+
+ for (data_count = 0, i=0; i<iocount; i++) {
+ rpc_io_buf_vec_t riov;
+ if (copyin(iovec + i, &riov, sizeof(riov)))
+ return KERN_INVALID_ARGUMENT;
+ stack_iovec[i].data = riov.data;
+ stack_iovec[i].count = riov.count;
+ data_count += stack_iovec[i].count;
+ }
+
+ /*
+ * Get a buffer to hold the ioreq.
+ */
+ ior = ds_trap_req_alloc(device, data_count);
+
+ /*
+ * Package the write request for the device driver.
+ */
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_WRITE | IO_CALL | IO_LOANED;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = (io_buf_ptr_t)
+ (vm_offset_t)ior + sizeof(struct io_req);
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_trap_write_done;
+ ior->io_reply_port = IP_NULL; /* XXX */
+ ior->io_reply_port_type = 0; /* XXX */
+
+ /*
+ * Copy the data from user space.
+ */
+ if (data_count > 0) {
+ vm_offset_t p;
+
+ p = (vm_offset_t) ior->io_data;
+ for (i = 0; i < iocount; i++) {
+ copyin((void *) stack_iovec[i].data,
+ (void *) p,
+ stack_iovec[i].count);
+ p += stack_iovec[i].count;
+ }
+ }
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the write.
+ */
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Remove the extra reference.
+ */
+ mach_device_deallocate(device);
+
+ kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
+ return (result);
+}
+
+struct device_emulation_ops mach_device_emulation_ops =
+{
+ (void*) mach_device_reference,
+ (void*) mach_device_deallocate,
+ (void*) mach_convert_device_to_port,
+ device_open,
+ device_close,
+ device_write,
+ device_write_inband,
+ device_read,
+ device_read_inband,
+ device_set_status,
+ mach_device_get_status,
+ device_set_filter,
+ device_map,
+ ds_no_senders,
+ (void*) device_write_trap,
+ (void*) device_writev_trap
+};
diff --git a/device/ds_routines.h b/device/ds_routines.h
new file mode 100644
index 0000000..48d85dd
--- /dev/null
+++ b/device/ds_routines.h
@@ -0,0 +1,86 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ *
+ * Device service utility routines.
+ */
+
+#ifndef DS_ROUTINES_H
+#define DS_ROUTINES_H
+
+#include <vm/vm_map.h>
+#include <device/device_types.h>
+#include <device/io_req.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ * Map for device IO memory.
+ */
+extern vm_map_t device_io_map;
+
+extern queue_head_t io_done_list;
+
+kern_return_t device_read_alloc(io_req_t, vm_size_t);
+kern_return_t device_write_get(io_req_t, boolean_t *);
+boolean_t device_write_dealloc(io_req_t);
+void device_reference(device_t);
+
+boolean_t ds_notify(mach_msg_header_t *msg);
+boolean_t ds_open_done(io_req_t);
+boolean_t ds_read_done(io_req_t);
+boolean_t ds_write_done(io_req_t);
+
+void iowait (io_req_t ior);
+
+kern_return_t device_pager_setup(
+ const mach_device_t device,
+ int prot,
+ vm_offset_t offset,
+ vm_size_t size,
+ mach_port_t *pager);
+
+extern void mach_device_init(void);
+extern void dev_lookup_init(void);
+extern void device_pager_init(void);
+extern void io_done_thread(void) __attribute__ ((noreturn));
+
+io_return_t ds_device_write_trap(
+ device_t dev,
+ dev_mode_t mode,
+ rpc_recnum_t recnum,
+ rpc_vm_offset_t data,
+ rpc_vm_size_t count);
+
+io_return_t ds_device_writev_trap(
+ device_t dev,
+ dev_mode_t mode,
+ rpc_recnum_t recnum,
+ rpc_io_buf_vec_t *iovec,
+ rpc_vm_size_t count);
+
+#endif /* DS_ROUTINES_H */
diff --git a/device/if_ether.h b/device/if_ether.h
new file mode 100644
index 0000000..91d4d9a
--- /dev/null
+++ b/device/if_ether.h
@@ -0,0 +1,52 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Ethernet definitions.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ */
+
+#ifndef _DEVICE_IF_ETHER_H_
+#define _DEVICE_IF_ETHER_H_
+
+#include <sys/types.h>
+
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+struct ether_header {
+ u_char ether_dhost[6];
+ u_char ether_shost[6];
+ u_short ether_type;
+};
+
+#ifdef KERNEL
+extern char * ether_sprintf(const u_char *);
+#endif /* KERNEL */
+
+#endif /*_DEVICE_IF_ETHER_H_*/
diff --git a/device/if_hdr.h b/device/if_hdr.h
new file mode 100644
index 0000000..e53983b
--- /dev/null
+++ b/device/if_hdr.h
@@ -0,0 +1,165 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Taken from (bsd)net/if.h. Modified for MACH kernel.
+ */
+/*
+ * Copyright (c) 1982, 1986 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. The name of the Laboratory may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if.h 7.3 (Berkeley) 6/27/88
+ */
+
+#ifndef _IF_HDR_
+#define _IF_HDR_
+
+#include <kern/lock.h>
+#include <kern/queue.h>
+
+/*
+ * Queue for network output and filter input.
+ */
+struct ifqueue {
+ queue_head_t ifq_head; /* queue of io_req_t */
+ int ifq_len; /* length of queue */
+ int ifq_maxlen; /* maximum length of queue */
+ int ifq_drops; /* number of packets dropped
+ because queue full */
+ decl_simple_lock_data(,
+ ifq_lock) /* lock for queue and counters */
+};
+
+/*
+ * Header for network interface drivers.
+ */
+struct ifnet {
+ short if_unit; /* unit number */
+ short if_flags; /* up/down, broadcast, etc. */
+ short if_timer; /* time until if_watchdog called */
+ short if_mtu; /* maximum transmission unit */
+ short if_header_size; /* length of header */
+ short if_header_format; /* format of hardware header */
+ short if_address_size; /* length of hardware address */
+ short if_alloc_size; /* size of read buffer to allocate */
+ char *if_address; /* pointer to hardware address */
+ struct ifqueue if_snd; /* output queue */
+ queue_head_t if_rcv_port_list; /* input filter list */
+ queue_head_t if_snd_port_list; /* output filter list */
+ decl_simple_lock_data(,
+ if_rcv_port_list_lock) /* lock for input filter list */
+ decl_simple_lock_data(,
+ if_snd_port_list_lock) /* lock for output filter list */
+/* statistics */
+ int if_ipackets; /* packets received */
+ int if_ierrors; /* input errors */
+ int if_opackets; /* packets sent */
+ int if_oerrors; /* output errors */
+ int if_collisions; /* collisions on csma interfaces */
+ int if_rcvdrops; /* packets received but dropped */
+};
+
+#define IFF_UP 0x0001 /* interface is up */
+#define IFF_BROADCAST 0x0002 /* interface can broadcast */
+#define IFF_DEBUG 0x0004 /* turn on debugging */
+#define IFF_LOOPBACK 0x0008 /* is a loopback net */
+#define IFF_POINTOPOINT 0x0010 /* point-to-point link */
+#define IFF_RUNNING 0x0040 /* resources allocated */
+#define IFF_NOARP 0x0080 /* no address resolution protocol */
+#define IFF_PROMISC 0x0100 /* receive all packets */
+#define IFF_ALLMULTI 0x0200 /* receive all multicast packets */
+#define IFF_BRIDGE 0x0100 /* support token ring routing field */
+#define IFF_SNAP 0x0200 /* support extended sap header */
+
+/* internal flags only: */
+#define IFF_CANTCHANGE (IFF_BROADCAST | IFF_POINTOPOINT | IFF_RUNNING)
+
+/*
+ * Output queues (ifp->if_snd)
+ * have queues of messages stored on ifqueue structures. Entries
+ * are added to and deleted from these structures by these macros, which
+ * should be called with ipl raised to splimp().
+ * XXX locking XXX
+ */
+
+#define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
+#define IF_DROP(ifq) ((ifq)->ifq_drops++)
+#define IF_ENQUEUE(ifq, ior) { \
+ simple_lock(&(ifq)->ifq_lock); \
+ enqueue_tail(&(ifq)->ifq_head, (queue_entry_t)ior); \
+ (ifq)->ifq_len++; \
+ simple_unlock(&(ifq)->ifq_lock); \
+}
+#define IF_PREPEND(ifq, ior) { \
+ simple_lock(&(ifq)->ifq_lock); \
+ enqueue_head(&(ifq)->ifq_head, (queue_entry_t)ior); \
+ (ifq)->ifq_len++; \
+ simple_unlock(&(ifq)->ifq_lock); \
+}
+
+#define IF_DEQUEUE(ifq, ior) { \
+ simple_lock(&(ifq)->ifq_lock); \
+ if (((ior) = (io_req_t)dequeue_head(&(ifq)->ifq_head)) != 0) \
+ (ifq)->ifq_len--; \
+ simple_unlock(&(ifq)->ifq_lock); \
+}
+
+#define IFQ_MAXLEN 50
+
+#define IFQ_INIT(ifq) { \
+ queue_init(&(ifq)->ifq_head); \
+ simple_lock_init(&(ifq)->ifq_lock); \
+ (ifq)->ifq_len = 0; \
+ (ifq)->ifq_maxlen = IFQ_MAXLEN; \
+ (ifq)->ifq_drops = 0; \
+}
+
+#define IFNET_SLOWHZ 1 /* granularity is 1 second */
+
+#endif /* _IF_HDR_ */
diff --git a/device/intr.c b/device/intr.c
new file mode 100644
index 0000000..9035c03
--- /dev/null
+++ b/device/intr.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2010, 2011, 2016, 2019 Free Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE FREE SOFTWARE FOUNDATIONALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+#include <kern/assert.h>
+#include <device/intr.h>
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/notify.h>
+#include <kern/printf.h>
+#include <machine/spl.h>
+#include <machine/irq.h>
+#include <ipc/ipc_space.h>
+
+#ifndef MACH_XEN
+
+queue_head_t main_intr_queue;
+static boolean_t deliver_intr (int id, ipc_port_t dst_port);
+
+#ifndef LINUX_DEV
+#define SA_SHIRQ 0x04000000
+
+struct intr_list {
+ user_intr_t *user_intr;
+ unsigned long flags;
+ struct intr_list *next;
+};
+static struct intr_list *user_intr_handlers[NINTR];
+#endif
+
+static user_intr_t *
+search_intr (struct irqdev *dev, ipc_port_t dst_port)
+{
+ user_intr_t *e;
+ queue_iterate (dev->intr_queue, e, user_intr_t *, chain)
+ {
+ if (e->dst_port == dst_port)
+ return e;
+ }
+ return NULL;
+}
+
+
+/*
+ * Interrupt handling logic:
+ *
+ * interrupt.S raises spl (thus IF cleared)
+ * interrupt.S EOI
+ * interrupt.S calls the handler
+ * - for pure in-kernel handlers, they do whatever they want with IF cleared.
+ * - when a userland handler is registered, queue_intr masks the irq.
+ * interrupt.S lowers spl with splx_cli, thus IF still cleared
+ * iret, that also sets IF
+ *
+ * later on, (irq_acknowledge), userland acks the IRQ, that unmasks the irq
+ */
+kern_return_t
+irq_acknowledge (ipc_port_t receive_port)
+{
+ user_intr_t *e;
+ kern_return_t ret = 0;
+
+ spl_t s = splhigh ();
+ e = search_intr (&irqtab, receive_port);
+
+ if (!e)
+ {
+ printf("didn't find user intr for interrupt !?\n");
+ ret = KERN_INVALID_ARGUMENT;
+ }
+ else
+ {
+ if (!e->n_unacked)
+ ret = D_INVALID_OPERATION;
+ else
+ e->n_unacked--;
+ }
+ splx (s);
+
+ if (ret)
+ return ret;
+
+ __enable_irq (irqtab.irq[e->id]);
+
+ return D_SUCCESS;
+}
+
+/* This function can only be used in the interrupt handler. */
+static void
+queue_intr (struct irqdev *dev, int id, user_intr_t *e)
+{
+ /* Until userland has handled the IRQ in the driver, we have to keep it
+ * disabled. Level-triggered interrupts would keep raising otherwise. */
+ __disable_irq (dev->irq[id]);
+
+ spl_t s = splhigh ();
+ e->n_unacked++;
+ e->interrupts++;
+ dev->tot_num_intr++;
+ splx (s);
+
+ thread_wakeup ((event_t) &intr_thread);
+}
+
+int
+deliver_user_intr (struct irqdev *dev, int id, user_intr_t *e)
+{
+ /* The reference of the port was increased
+ * when the port was installed. If the reference is 1, it means
+ * the port was deallocated and we should clean after it. */
+ if (!e->dst_port || e->dst_port->ip_references == 1)
+ {
+ thread_wakeup ((event_t) &intr_thread);
+ return 0;
+ }
+ else
+ {
+ queue_intr (dev, id, e);
+ return 1;
+ }
+}
+
+/* insert an interrupt entry in the queue.
+ * This entry exists in the queue until
+ * the corresponding interrupt port is removed.*/
+user_intr_t *
+insert_intr_entry (struct irqdev *dev, int id, ipc_port_t dst_port)
+{
+ user_intr_t *e, *new, *ret;
+ int free = 0;
+
+ new = (user_intr_t *) kalloc (sizeof (*new));
+ if (new == NULL)
+ return NULL;
+
+ /* check whether the intr entry has been in the queue. */
+ spl_t s = splhigh ();
+ e = search_intr (dev, dst_port);
+ if (e)
+ {
+ printf ("the interrupt entry for irq[%d] and port %p has already been inserted\n", id, dst_port);
+ free = 1;
+ ret = NULL;
+ goto out;
+ }
+ printf("irq handler [%d]: new delivery port %p entry %p\n", id, dst_port, new);
+ ret = new;
+ new->id = id;
+ new->dst_port = dst_port;
+ new->interrupts = 0;
+ new->n_unacked = 0;
+
+ queue_enter (dev->intr_queue, new, user_intr_t *, chain);
+out:
+ splx (s);
+ if (free)
+ kfree ((vm_offset_t) new, sizeof (*new));
+ return ret;
+}
+
+#ifndef LINUX_DEV
+
+static void
+user_irq_handler (int id)
+{
+ struct intr_list *handler;
+ struct intr_list **prev = &user_intr_handlers[id];
+ user_intr_t *e;
+ spl_t s;
+
+ s = splhigh();
+
+ for (handler = *prev; handler; handler = handler->next)
+ {
+ e = handler->user_intr;
+ if (!deliver_user_intr(&irqtab, id, e))
+ {
+ /* We failed to deliver this interrupt, remove handler from list */
+ *prev = handler->next;
+ }
+ prev = &handler->next;
+ }
+ splx(s);
+}
+
+int
+install_user_intr_handler (struct irqdev *dev, int id, unsigned long flags,
+ user_intr_t *user_intr)
+{
+ unsigned int irq = dev->irq[id];
+ struct intr_list **head = &user_intr_handlers[id];
+ struct intr_list *new, *old = *head;
+ spl_t s;
+
+ flags |= SA_SHIRQ;
+
+ assert (irq < NINTR);
+
+ /* Don't allow overriding hardclock/kdintr etc */
+ if ((ivect[irq] != user_irq_handler) && (ivect[irq] != intnull))
+ {
+ printf("You can't have this interrupt\n");
+ return D_ALREADY_OPEN;
+ }
+
+ if (old)
+ {
+ if (!(old->flags & flags & SA_SHIRQ))
+ {
+ printf ("Cannot share irq\n");
+ return D_ALREADY_OPEN;
+ }
+ }
+
+ new = (struct intr_list *)kalloc (sizeof (struct intr_list));
+ new->user_intr = user_intr;
+ new->flags = flags;
+
+ s = splhigh();
+ new->next = *head;
+ *head = new;
+ ivect[irq] = user_irq_handler;
+ iunit[irq] = (int)irq;
+ unmask_irq (irq);
+ splx(s);
+
+ return D_SUCCESS;
+}
+#endif
+
+void
+intr_thread (void)
+{
+ user_intr_t *e;
+ int id;
+ ipc_port_t dst_port;
+ queue_init (&main_intr_queue);
+
+ for (;;)
+ {
+ assert_wait ((event_t) &intr_thread, FALSE);
+ /* Make sure we wake up from times to times to check for aborted processes */
+ thread_set_timeout (hz);
+ spl_t s = splhigh ();
+
+ /* Now check for interrupts */
+ int del;
+ do
+ {
+ del = 0;
+
+ queue_iterate (&main_intr_queue, e, user_intr_t *, chain)
+ {
+ /* The reference of the port was increased
+ * when the port was installed. If the reference is 1, it means
+ * the port was deallocated and we should clean after it. */
+ if (e->dst_port->ip_references == 1)
+ {
+ clear_wait (current_thread (), 0, 0);
+ del = 1;
+ break;
+ }
+
+ if (e->interrupts)
+ {
+ clear_wait (current_thread (), 0, 0);
+ id = e->id;
+ dst_port = e->dst_port;
+ e->interrupts--;
+ irqtab.tot_num_intr--;
+
+ splx (s);
+ deliver_intr (id, dst_port);
+ s = splhigh ();
+ }
+ }
+
+ /* remove the entry without dest port from the queue and free it. */
+ if (del)
+ {
+ /*
+ * We clear unacked irqs now, so the Linux handling can trigger,
+ * and we will cleanup later after the Linux handler is cleared.
+ */
+ assert (!queue_empty (&main_intr_queue));
+ queue_remove (&main_intr_queue, e, user_intr_t *, chain);
+
+ printf ("irq handler [%d]: release a dead delivery port %p entry %p\n", e->id, e->dst_port, e);
+ ipc_port_release (e->dst_port);
+ e->dst_port = MACH_PORT_NULL;
+
+ if (e->n_unacked)
+ printf("irq handler [%d]: still %d unacked irqs in entry %p\n", e->id, e->n_unacked, e);
+ while (e->n_unacked)
+ {
+ __enable_irq (irqtab.irq[e->id]);
+ e->n_unacked--;
+ }
+
+#if 0
+#ifndef LINUX_DEV
+ // TODO: remove from the action list
+#else
+ // FIXME: with the Linux irq handler we don't actually control the action list
+#endif
+ splx (s);
+ kfree ((vm_offset_t) e, sizeof (*e));
+ s = splhigh ();
+#endif
+ }
+ }
+ while (del || irqtab.tot_num_intr);
+ splx (s);
+ thread_block (NULL);
+ }
+}
+
+static boolean_t
+deliver_intr (int id, ipc_port_t dst_port)
+{
+ ipc_kmsg_t kmsg;
+ device_intr_notification_t *n;
+ mach_port_t dest = (mach_port_t) dst_port;
+
+ if (dest == MACH_PORT_NULL)
+ return FALSE;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL)
+ return FALSE;
+
+ ikm_init(kmsg, sizeof *n);
+ n = (device_intr_notification_t *) &kmsg->ikm_header;
+
+ mach_msg_header_t *m = &n->intr_header;
+ mach_msg_type_t *t = &n->intr_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = DEVICE_NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = DEVICE_INTR_NOTIFY;
+
+ t->msgt_name = MACH_MSG_TYPE_INTEGER_32;
+ t->msgt_size = 32;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->intr_header.msgh_remote_port = dest;
+ n->id = id;
+
+ ipc_port_copy_send (dst_port);
+ ipc_mqueue_send_always(kmsg);
+
+ return TRUE;
+}
+
+#endif /* MACH_XEN */
diff --git a/device/intr.h b/device/intr.h
new file mode 100644
index 0000000..cd3e0bc
--- /dev/null
+++ b/device/intr.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2010, 2011, 2019 Free Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE FREE SOFTWARE FOUNDATIONALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+#ifndef __INTR_H__
+#define __INTR_H__
+
+#ifndef MACH_XEN
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/queue.h>
+#include <ipc/ipc_port.h>
+#include <device/conf.h>
+
+#define DEVICE_NOTIFY_MSGH_SEQNO 0
+
+#include <sys/types.h>
+
+struct irqdev;
+#include <machine/irq.h>
+
+typedef struct {
+ queue_chain_t chain;
+ int interrupts; /* Number of interrupts occurred since last run of intr_thread */
+ int n_unacked; /* Number of times irqs were disabled for this */
+ ipc_port_t dst_port; /* Notification port */
+ int id; /* Mapping to machine dependent irq_t array elem */
+} user_intr_t;
+
+struct irqdev {
+ char *name;
+ void (*irqdev_ack)(struct irqdev *dev, int id);
+
+ queue_head_t *intr_queue;
+ int tot_num_intr; /* Total number of unprocessed interrupts */
+
+ /* Machine dependent */
+ irq_t irq[NINTR];
+};
+
+extern queue_head_t main_intr_queue;
+extern int install_user_intr_handler (struct irqdev *dev, int id, unsigned long flags, user_intr_t *e);
+extern int deliver_user_intr (struct irqdev *dev, int id, user_intr_t *e);
+extern user_intr_t *insert_intr_entry (struct irqdev *dev, int id, ipc_port_t receive_port);
+
+void intr_thread (void);
+kern_return_t irq_acknowledge (ipc_port_t receive_port);
+
+#endif /* MACH_XEN */
+
+#endif
diff --git a/device/io_req.h b/device/io_req.h
new file mode 100644
index 0000000..fb63696
--- /dev/null
+++ b/device/io_req.h
@@ -0,0 +1,145 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 10/88
+ */
+
+#ifndef _IO_REQ_
+#define _IO_REQ_
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/vm_param.h>
+#include <kern/slab.h>
+#include <kern/kalloc.h>
+#include <kern/lock.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <device/device_types.h>
+#include <device/dev_hdr.h>
+
+#include <kern/macros.h>
+
+/*
+ * IO request element, queued on device for delayed replies.
+ */
+typedef struct io_req *io_req_t;
+struct io_req {
+ struct io_req * io_next; /* next, ... */
+ struct io_req * io_prev; /* prev pointers: link in done,
+ defered, or in-progress list */
+ mach_device_t io_device; /* pointer to open-device structure */
+ char * io_dev_ptr; /* pointer to driver structure -
+ filled in by driver if necessary */
+ int io_unit; /* unit number ('minor') of device */
+ int io_op; /* IO operation */
+ dev_mode_t io_mode; /* operation mode (wait, truncate) */
+ recnum_t io_recnum; /* starting record number for
+ random-access devices */
+
+ union io_un {
+ io_buf_ptr_t data; /* data, for IO requests */
+ } io_un;
+#define io_data io_un.data
+
+ long io_count; /* amount requested */
+ vm_size_t io_alloc_size; /* amount allocated */
+ long io_residual; /* amount NOT done */
+ io_return_t io_error; /* error code */
+ /* call when done - returns TRUE if IO really finished */
+ boolean_t (*io_done)(io_req_t);
+ struct ipc_port *io_reply_port; /* reply port, for asynchronous
+ messages */
+ mach_msg_type_name_t io_reply_port_type;
+ /* send or send-once right? */
+ struct io_req * io_link; /* forward link (for driver header) */
+ struct io_req * io_rlink; /* reverse link (for driver header) */
+ vm_map_copy_t io_copy; /* vm_map_copy obj. for this op. */
+ long io_total; /* total op size, for write */
+ decl_simple_lock_data(,io_req_lock)
+ /* Lock for this structure */
+ long io_physrec; /* mapping to the physical block
+ number */
+ long io_rectotal; /* total number of blocks to move */
+};
+
+/*
+ * LOCKING NOTE: Operations on io_req's are in general single threaded by
+ * the invoking code, obviating the need for a lock. The usual IO_CALL
+ * path through the code is: Initiating thread hands io_req to device driver,
+ * driver passes it to io_done thread, io_done thread sends reply message. No
+ * locking is needed in this sequence. Unfortunately, a synchronous wait
+ * for a buffer requires a lock to avoid problems if the wait and interrupt
+ * happen simultaneously on different processors.
+ *
+ * Shall be taken at splio only
+ */
+
+#define ior_lock(ior) simple_lock(&(ior)->io_req_lock)
+#define ior_unlock(ior) simple_unlock(&(ior)->io_req_lock)
+
+/*
+ * Flags and operations
+ */
+
+#define IO_WRITE 0x00000000 /* operation is write */
+#define IO_READ 0x00000001 /* operation is read */
+#define IO_OPEN 0x00000002 /* operation is open */
+#define IO_DONE 0x00000100 /* operation complete */
+#define IO_ERROR 0x00000200 /* error on operation */
+#define IO_BUSY 0x00000400 /* operation in progress */
+#define IO_WANTED 0x00000800 /* wakeup when no longer BUSY */
+#define IO_BAD 0x00001000 /* bad disk block */
+#define IO_CALL 0x00002000 /* call io_done_thread when done */
+#define IO_INBAND 0x00004000 /* mig call was inband */
+#define IO_INTERNAL 0x00008000 /* internal, device-driver specific */
+#define IO_LOANED 0x00010000 /* ior loaned by another module */
+
+#define IO_SPARE_START 0x00020000 /* start of spare flags */
+
+/*
+ * Standard completion routine for io_requests.
+ */
+void iodone(io_req_t);
+
+/*
+ * Macros to allocate and free IORs - will convert to caches later.
+ */
+#define io_req_alloc(ior,size) \
+ MACRO_BEGIN \
+ (ior) = (io_req_t)kalloc(sizeof(struct io_req)); \
+ simple_lock_init(&(ior)->io_req_lock); \
+ MACRO_END
+
+#define io_req_free(ior) \
+ (kfree((vm_offset_t)(ior), sizeof(struct io_req)))
+
+
+extern struct kmem_cache io_inband_cache; /* for inband reads */
+
+#endif /* _IO_REQ_ */
diff --git a/device/kmsg.c b/device/kmsg.c
new file mode 100644
index 0000000..e5b518e
--- /dev/null
+++ b/device/kmsg.c
@@ -0,0 +1,254 @@
+/* GNU Mach Kernel Message Device.
+
+ Copyright (C) 1998, 1999, 2007 Free Software Foundation, Inc.
+
+ Written by OKUJI Yoshinori.
+
+This is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+This software is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with the software; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* kmsg provides a stream interface. */
+
+#include <sys/types.h>
+#include <string.h>
+
+#include <device/conf.h>
+#include <device/ds_routines.h>
+#include <device/io_req.h>
+#include <mach/boolean.h>
+#include <kern/lock.h>
+#include <device/kmsg.h>
+
+
+#define KMSGBUFSIZE (4096) /* XXX */
+
+/* Simple array for buffering messages */
+static char kmsg_buffer[KMSGBUFSIZE];
+/* Point to the offset to write */
+static int kmsg_write_offset;
+/* Point to the offset to read */
+static int kmsg_read_offset;
+/* I/O request queue for blocking read */
+static queue_head_t kmsg_read_queue;
+/* Used for exclusive access to the device */
+static boolean_t kmsg_in_use;
+/* Used for exclusive access to the routines */
+def_simple_lock_irq_data (static, kmsg_lock);
+/* If already initialized or not */
+static boolean_t kmsg_init_done = FALSE;
+
+/* Kernel Message Initializer */
+static void
+kmsginit (void)
+{
+ kmsg_write_offset = 0;
+ kmsg_read_offset = 0;
+ queue_init (&kmsg_read_queue);
+ kmsg_in_use = FALSE;
+ simple_lock_init_irq (&kmsg_lock);
+}
+
+/* Kernel Message Open Handler */
+io_return_t
+kmsgopen (dev_t dev, int flag, const io_req_t ior)
+{
+ spl_t s = simple_lock_irq (&kmsg_lock);
+ if (kmsg_in_use)
+ {
+ simple_unlock_irq (s, &kmsg_lock);
+ return D_ALREADY_OPEN;
+ }
+
+ kmsg_in_use = TRUE;
+
+ simple_unlock_irq (s, &kmsg_lock);
+ return D_SUCCESS;
+}
+
+/* Kernel Message Close Handler */
+void
+kmsgclose (dev_t dev, int flag)
+{
+ spl_t s = simple_lock_irq (&kmsg_lock);
+ kmsg_in_use = FALSE;
+
+ simple_unlock_irq (s, &kmsg_lock);
+}
+
+static boolean_t kmsg_read_done (io_req_t ior);
+
+/* Kernel Message Read Handler */
+io_return_t
+kmsgread (dev_t dev, io_req_t ior)
+{
+ int err;
+ int amt, len;
+
+ err = device_read_alloc (ior, ior->io_count);
+ if (err != KERN_SUCCESS)
+ return err;
+
+ spl_t s = simple_lock_irq (&kmsg_lock);
+ if (kmsg_read_offset == kmsg_write_offset)
+ {
+ /* The queue is empty. */
+ if (ior->io_mode & D_NOWAIT)
+ {
+ simple_unlock_irq (s, &kmsg_lock);
+ return D_WOULD_BLOCK;
+ }
+
+ ior->io_done = kmsg_read_done;
+ enqueue_tail (&kmsg_read_queue, (queue_entry_t) ior);
+ simple_unlock_irq (s, &kmsg_lock);
+ return D_IO_QUEUED;
+ }
+
+ len = kmsg_write_offset - kmsg_read_offset;
+ if (len < 0)
+ len += KMSGBUFSIZE;
+
+ amt = ior->io_count;
+ if (amt > len)
+ amt = len;
+
+ if (kmsg_read_offset + amt <= KMSGBUFSIZE)
+ {
+ memcpy (ior->io_data, kmsg_buffer + kmsg_read_offset, amt);
+ }
+ else
+ {
+ int cnt;
+
+ cnt = KMSGBUFSIZE - kmsg_read_offset;
+ memcpy (ior->io_data, kmsg_buffer + kmsg_read_offset, cnt);
+ memcpy (ior->io_data + cnt, kmsg_buffer, amt - cnt);
+ }
+
+ kmsg_read_offset += amt;
+ if (kmsg_read_offset >= KMSGBUFSIZE)
+ kmsg_read_offset -= KMSGBUFSIZE;
+
+ ior->io_residual = ior->io_count - amt;
+
+ simple_unlock_irq (s, &kmsg_lock);
+ return D_SUCCESS;
+}
+
+static boolean_t
+kmsg_read_done (io_req_t ior)
+{
+ int amt, len;
+
+ spl_t s = simple_lock_irq (&kmsg_lock);
+ if (kmsg_read_offset == kmsg_write_offset)
+ {
+ /* The queue is empty. */
+ ior->io_done = kmsg_read_done;
+ enqueue_tail (&kmsg_read_queue, (queue_entry_t) ior);
+ simple_unlock_irq (s, &kmsg_lock);
+ return FALSE;
+ }
+
+ len = kmsg_write_offset - kmsg_read_offset;
+ if (len < 0)
+ len += KMSGBUFSIZE;
+
+ amt = ior->io_count;
+ if (amt > len)
+ amt = len;
+
+ if (kmsg_read_offset + amt <= KMSGBUFSIZE)
+ {
+ memcpy (ior->io_data, kmsg_buffer + kmsg_read_offset, amt);
+ }
+ else
+ {
+ int cnt;
+
+ cnt = KMSGBUFSIZE - kmsg_read_offset;
+ memcpy (ior->io_data, kmsg_buffer + kmsg_read_offset, cnt);
+ memcpy (ior->io_data + cnt, kmsg_buffer, amt - cnt);
+ }
+
+ kmsg_read_offset += amt;
+ if (kmsg_read_offset >= KMSGBUFSIZE)
+ kmsg_read_offset -= KMSGBUFSIZE;
+
+ ior->io_residual = ior->io_count - amt;
+
+ simple_unlock_irq (s, &kmsg_lock);
+ ds_read_done (ior);
+
+ return TRUE;
+}
+
+io_return_t
+kmsggetstat (dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count)
+{
+ switch (flavor)
+ {
+ case DEV_GET_SIZE:
+ data[DEV_GET_SIZE_DEVICE_SIZE] = 0;
+ data[DEV_GET_SIZE_RECORD_SIZE] = 1;
+ *count = DEV_GET_SIZE_COUNT;
+ break;
+
+ default:
+ return D_INVALID_OPERATION;
+ }
+
+ return D_SUCCESS;
+}
+
+/* Write to Kernel Message Buffer */
+void
+kmsg_putchar (int c)
+{
+ io_req_t ior;
+ int offset;
+ spl_t s = -1;
+
+ /* XXX: cninit is not called before cnputc is used. So call kmsginit
+ here if not initialized yet. */
+ if (!kmsg_init_done)
+ {
+ kmsginit ();
+ kmsg_init_done = TRUE;
+ }
+
+ if (spl_init)
+ s = simple_lock_irq (&kmsg_lock);
+ offset = kmsg_write_offset + 1;
+ if (offset == KMSGBUFSIZE)
+ offset = 0;
+
+ if (offset == kmsg_read_offset)
+ {
+ /* Discard C. */
+ if (spl_init)
+ simple_unlock_irq (s, &kmsg_lock);
+ return;
+ }
+
+ kmsg_buffer[kmsg_write_offset++] = c;
+ if (kmsg_write_offset == KMSGBUFSIZE)
+ kmsg_write_offset = 0;
+
+ while ((ior = (io_req_t) dequeue_head (&kmsg_read_queue)) != NULL)
+ iodone (ior);
+
+ if (spl_init)
+ simple_unlock_irq (s, &kmsg_lock);
+}
diff --git a/device/kmsg.h b/device/kmsg.h
new file mode 100644
index 0000000..00a3505
--- /dev/null
+++ b/device/kmsg.h
@@ -0,0 +1,18 @@
+#ifndef _DEVICE_KMSG_H_
+#define _DEVICE_KMSG_H_ 1
+
+
+#include <sys/types.h>
+
+#include <device/device_types.h>
+#include <device/io_req.h>
+
+io_return_t kmsgopen (dev_t dev, int flag, io_req_t ior);
+void kmsgclose (dev_t dev, int flag);
+io_return_t kmsgread (dev_t dev, io_req_t ior);
+io_return_t kmsggetstat (dev_t dev, dev_flavor_t flavor,
+ dev_status_t data, mach_msg_type_number_t *count);
+void kmsg_putchar (int c);
+
+
+#endif /* !_DEVICE_KMSG_H_ */
diff --git a/device/memory_object_reply.cli b/device/memory_object_reply.cli
new file mode 100644
index 0000000..f2cd480
--- /dev/null
+++ b/device/memory_object_reply.cli
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a client presentation file. */
+
+#define KERNEL_USER 1
+
+#include <mach/mach.defs>
diff --git a/device/net_io.c b/device/net_io.c
new file mode 100644
index 0000000..ee9435d
--- /dev/null
+++ b/device/net_io.c
@@ -0,0 +1,2153 @@
+ /*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/98
+ *
+ * Network IO.
+ *
+ * Packet filter code taken from vaxif/enet.c written
+ * CMU and Stanford.
+ */
+
+/*
+ * Note: don't depend on anything in this file.
+ * It may change a lot real soon. -cmaeda 11 June 1993
+ */
+
+#include <sys/types.h>
+#include <string.h>
+
+#include <device/net_status.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <device/net_io.h>
+#include <device/if_hdr.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+
+#include <mach/boolean.h>
+#include <mach/vm_param.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_mqueue.h>
+
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/lock.h>
+#include <kern/printf.h>
+#include <kern/queue.h>
+#include <kern/sched_prim.h>
+#include <kern/slab.h>
+#include <kern/thread.h>
+
+#include <machine/machspl.h>
+
+#if MACH_TTD
+#include <ttd/ttd_stub.h>
+#endif /* MACH_TTD */
+
+#if MACH_TTD
+int kttd_async_counter= 0;
+#endif /* MACH_TTD */
+
+
+/*
+ * Packet Buffer Management
+ *
+ * This module manages a private pool of kmsg buffers.
+ */
+
+/*
+ * List of net kmsgs queued to be sent to users.
+ * Messages can be high priority or low priority.
+ * The network thread processes high priority messages first.
+ */
+def_simple_lock_data(static,net_queue_lock)
+boolean_t net_thread_awake = FALSE;
+struct ipc_kmsg_queue net_queue_high;
+int net_queue_high_size = 0;
+int net_queue_high_max = 0; /* for debugging */
+struct ipc_kmsg_queue net_queue_low;
+int net_queue_low_size = 0;
+int net_queue_low_max = 0; /* for debugging */
+
+/*
+ * List of net kmsgs that can be touched at interrupt level.
+ * If it is empty, we will also steal low priority messages.
+ */
+def_simple_lock_data(static,net_queue_free_lock)
+struct ipc_kmsg_queue net_queue_free;
+int net_queue_free_size = 0; /* on free list */
+int net_queue_free_max = 0; /* for debugging */
+
+/*
+ * This value is critical to network performance.
+ * At least this many buffers should be sitting in net_queue_free.
+ * If this is set too small, we will drop network packets.
+ * Even a low drop rate (<1%) can cause severe network throughput problems.
+ * We add one to net_queue_free_min for every filter.
+ */
+int net_queue_free_min = 3;
+
+int net_queue_free_hits = 0; /* for debugging */
+int net_queue_free_steals = 0; /* for debugging */
+int net_queue_free_misses = 0; /* for debugging */
+
+int net_kmsg_send_high_hits = 0; /* for debugging */
+int net_kmsg_send_low_hits = 0; /* for debugging */
+int net_kmsg_send_high_misses = 0; /* for debugging */
+int net_kmsg_send_low_misses = 0; /* for debugging */
+
+int net_thread_awaken = 0; /* for debugging */
+int net_ast_taken = 0; /* for debugging */
+
+def_simple_lock_data(static,net_kmsg_total_lock)
+int net_kmsg_total = 0; /* total allocated */
+int net_kmsg_max; /* initialized below */
+
+vm_size_t net_kmsg_size; /* initialized below */
+
+/*
+ * We want more buffers when there aren't enough in the free queue
+ * and the low priority queue. However, we don't want to allocate
+ * more than net_kmsg_max.
+ */
+
+#define net_kmsg_want_more() \
+ (((net_queue_free_size + net_queue_low_size) < net_queue_free_min) && \
+ (net_kmsg_total < net_kmsg_max))
+
+ipc_kmsg_t
+net_kmsg_get(void)
+{
+ ipc_kmsg_t kmsg;
+ spl_t s;
+
+ /*
+ * First check the list of free buffers.
+ */
+ s = splimp();
+ simple_lock(&net_queue_free_lock);
+ kmsg = ipc_kmsg_queue_first(&net_queue_free);
+ if (kmsg != IKM_NULL) {
+ ipc_kmsg_rmqueue_first_macro(&net_queue_free, kmsg);
+ net_queue_free_size--;
+ net_queue_free_hits++;
+ }
+ simple_unlock(&net_queue_free_lock);
+
+ if (kmsg == IKM_NULL) {
+ /*
+ * Try to steal from the low priority queue.
+ */
+ simple_lock(&net_queue_lock);
+ kmsg = ipc_kmsg_queue_first(&net_queue_low);
+ if (kmsg != IKM_NULL) {
+ ipc_kmsg_rmqueue_first_macro(&net_queue_low, kmsg);
+ net_queue_low_size--;
+ net_queue_free_steals++;
+ }
+ simple_unlock(&net_queue_lock);
+ }
+
+ if (kmsg == IKM_NULL)
+ net_queue_free_misses++;
+ (void) splx(s);
+
+ if (net_kmsg_want_more() || (kmsg == IKM_NULL)) {
+ boolean_t awake;
+
+ s = splimp();
+ simple_lock(&net_queue_lock);
+ awake = net_thread_awake;
+ net_thread_awake = TRUE;
+ simple_unlock(&net_queue_lock);
+ (void) splx(s);
+
+ if (!awake)
+ thread_wakeup((event_t) &net_thread_awake);
+ }
+
+ return kmsg;
+}
+
+void
+net_kmsg_put(const ipc_kmsg_t kmsg)
+{
+ spl_t s;
+
+ s = splimp();
+ simple_lock(&net_queue_free_lock);
+ ipc_kmsg_enqueue_macro(&net_queue_free, kmsg);
+ if (++net_queue_free_size > net_queue_free_max)
+ net_queue_free_max = net_queue_free_size;
+ simple_unlock(&net_queue_free_lock);
+ (void) splx(s);
+}
+
+void
+net_kmsg_collect(void)
+{
+ ipc_kmsg_t kmsg;
+ spl_t s;
+
+ s = splimp();
+ simple_lock(&net_queue_free_lock);
+ while (net_queue_free_size > net_queue_free_min) {
+ kmsg = ipc_kmsg_dequeue(&net_queue_free);
+ net_queue_free_size--;
+ simple_unlock(&net_queue_free_lock);
+ (void) splx(s);
+
+ net_kmsg_free(kmsg);
+ simple_lock(&net_kmsg_total_lock);
+ net_kmsg_total--;
+ simple_unlock(&net_kmsg_total_lock);
+
+ s = splimp();
+ simple_lock(&net_queue_free_lock);
+ }
+ simple_unlock(&net_queue_free_lock);
+ (void) splx(s);
+}
+
+static void
+net_kmsg_more(void)
+{
+ ipc_kmsg_t kmsg;
+
+ /*
+ * Replenish net kmsg pool if low. We don't have the locks
+ * necessary to look at these variables, but that's OK because
+ * misread values aren't critical. The danger in this code is
+ * that while we allocate buffers, interrupts are happening
+ * which take buffers out of the free list. If we are not
+ * careful, we will sit in the loop and allocate a zillion
+ * buffers while a burst of packets arrives. So we count
+ * buffers in the low priority queue as available, because
+ * net_kmsg_get will make use of them, and we cap the total
+ * number of buffers we are willing to allocate.
+ */
+
+ while (net_kmsg_want_more()) {
+ simple_lock(&net_kmsg_total_lock);
+ net_kmsg_total++;
+ simple_unlock(&net_kmsg_total_lock);
+ kmsg = net_kmsg_alloc();
+ net_kmsg_put(kmsg);
+ }
+}
+
+/*
+ * Packet Filter Data Structures
+ *
+ * Each network interface has a set of packet filters
+ * that are run on incoming packets.
+ *
+ * Each packet filter may represent a single network
+ * session or multiple network sessions. For example,
+ * all application level TCP sessions would be represented
+ * by a single packet filter data structure.
+ *
+ * If a packet filter has a single session, we use a
+ * struct net_rcv_port to represent it. If the packet
+ * filter represents multiple sessions, we use a
+ * struct net_hash_header to represent it.
+ */
+
+/*
+ * Each interface has a write port and a set of read ports.
+ * Each read port has one or more filters to determine what packets
+ * should go to that port.
+ */
+
+/*
+ * Receive port for net, with packet filter.
+ * This data structure by itself represents a packet
+ * filter for a single session.
+ */
+struct net_rcv_port {
+ queue_chain_t input; /* list of input open_descriptors */
+ queue_chain_t output; /* list of output open_descriptors */
+ ipc_port_t rcv_port; /* port to send packet to */
+ int rcv_qlimit; /* port's qlimit */
+ int rcv_count; /* number of packets received */
+ int priority; /* priority for filter */
+ filter_t *filter_end; /* pointer to end of filter */
+ filter_t filter[NET_MAX_FILTER];
+ /* filter operations */
+};
+
+struct kmem_cache net_rcv_cache; /* cache of net_rcv_port structs */
+
+#define NET_HASH_SIZE 256
+#define N_NET_HASH 4
+#define N_NET_HASH_KEYS 4
+
+/*
+ * A single hash entry.
+ */
+struct net_hash_entry {
+ queue_chain_t chain; /* list of entries with same hval */
+#define he_next chain.next
+#define he_prev chain.prev
+ ipc_port_t rcv_port; /* destination port */
+ int rcv_qlimit; /* qlimit for the port */
+ unsigned int keys[N_NET_HASH_KEYS];
+};
+
+struct kmem_cache net_hash_entry_cache;
+
+/*
+ * This structure represents a packet filter with multiple sessions.
+ *
+ * For example, all application level TCP sessions might be
+ * represented by one of these structures. It looks like a
+ * net_rcv_port struct so that both types can live on the
+ * same packet filter queues.
+ */
+struct net_hash_header {
+ struct net_rcv_port rcv;
+ int n_keys; /* zero if not used */
+ int ref_count; /* reference count */
+ net_hash_entry_t table[NET_HASH_SIZE];
+} filter_hash_header[N_NET_HASH];
+
+def_simple_lock_data(static,net_hash_header_lock)
+
+#define HASH_ITERATE(head, elt) (elt) = (net_hash_entry_t) (head); do {
+#define HASH_ITERATE_END(head, elt) \
+ (elt) = (net_hash_entry_t) queue_next((queue_entry_t) (elt)); \
+ } while ((elt) != (head));
+
+#define FILTER_ITERATE(if_port_list, fp, nextfp, chain) \
+ for ((fp) = (net_rcv_port_t) queue_first(if_port_list); \
+ !queue_end(if_port_list, (queue_entry_t)(fp)); \
+ (fp) = (nextfp)) { \
+ (nextfp) = (net_rcv_port_t) queue_next(chain);
+#define FILTER_ITERATE_END }
+
+/* entry_p must be net_rcv_port_t or net_hash_entry_t */
+#define ENQUEUE_DEAD(dead, entry_p, chain) { \
+ (entry_p)->chain.next = (queue_entry_t) (dead); \
+ (dead) = (queue_entry_t)(entry_p); \
+}
+
+/*
+ * ethernet_priority:
+ *
+ * This function properly belongs in the ethernet interfaces;
+ * it should not be called by this module. (We get packet
+ * priorities as an argument to net_filter.) It is here
+ * to avoid massive code duplication.
+ *
+ * Returns TRUE for high-priority packets.
+ */
+
+boolean_t ethernet_priority(const ipc_kmsg_t kmsg)
+{
+ unsigned char *addr =
+ (unsigned char *) net_kmsg(kmsg)->header;
+
+ /*
+ * A simplistic check for broadcast packets.
+ */
+
+ if ((addr[0] == 0xff) && (addr[1] == 0xff) &&
+ (addr[2] == 0xff) && (addr[3] == 0xff) &&
+ (addr[4] == 0xff) && (addr[5] == 0xff))
+ return FALSE;
+ else
+ return TRUE;
+}
+
+mach_msg_type_t header_type = {
+ .msgt_name = MACH_MSG_TYPE_BYTE,
+ .msgt_size = 8,
+ .msgt_number = NET_HDW_HDR_MAX,
+ .msgt_inline = TRUE,
+ .msgt_longform = FALSE,
+ .msgt_deallocate = FALSE,
+ .msgt_unused = 0
+};
+
+mach_msg_type_t packet_type = {
+ .msgt_name = MACH_MSG_TYPE_BYTE,
+ .msgt_size = 8,
+ .msgt_number = 0,
+ .msgt_inline = TRUE,
+ .msgt_longform = FALSE,
+ .msgt_deallocate = FALSE,
+ .msgt_unused = 0
+};
+
+/*
+ * net_deliver:
+ *
+ * Called and returns holding net_queue_lock, at splimp.
+ * Dequeues a message and delivers it at spl0.
+ * Returns FALSE if no messages.
+ */
+static boolean_t net_deliver(boolean_t nonblocking)
+{
+ ipc_kmsg_t kmsg;
+ boolean_t high_priority;
+ struct ipc_kmsg_queue send_list;
+
+ /*
+ * Pick up a pending network message and deliver it.
+ * Deliver high priority messages before low priority.
+ */
+
+ if ((kmsg = ipc_kmsg_dequeue(&net_queue_high)) != IKM_NULL) {
+ net_queue_high_size--;
+ high_priority = TRUE;
+ } else if ((kmsg = ipc_kmsg_dequeue(&net_queue_low)) != IKM_NULL) {
+ net_queue_low_size--;
+ high_priority = FALSE;
+ } else
+ return FALSE;
+ simple_unlock(&net_queue_lock);
+ (void) spl0();
+
+ /*
+ * Run the packet through the filters,
+ * getting back a queue of packets to send.
+ */
+ net_filter(kmsg, &send_list);
+
+ if (!nonblocking) {
+ /*
+ * There is a danger of running out of available buffers
+ * because they all get moved into the high priority queue
+ * or a port queue. In particular, we might need to
+ * allocate more buffers as we pull (previously available)
+ * buffers out of the low priority queue. But we can only
+ * allocate if we are allowed to block.
+ */
+ net_kmsg_more();
+ }
+
+ while ((kmsg = ipc_kmsg_dequeue(&send_list)) != IKM_NULL) {
+ int count;
+
+ /*
+ * Fill in the rest of the kmsg.
+ */
+ count = net_kmsg(kmsg)->net_rcv_msg_packet_count;
+
+ ikm_init_special(kmsg, IKM_SIZE_NETWORK);
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0);
+ /* remember message sizes must be rounded up */
+ kmsg->ikm_header.msgh_size =
+ (mach_msg_size_t) P2ROUND(sizeof(struct net_rcv_msg)
+ - sizeof net_kmsg(kmsg)->sent
+ - NET_RCV_MAX + count,
+ __alignof__ (uintptr_t));
+ kmsg->ikm_header.msgh_local_port = MACH_PORT_NULL;
+ kmsg->ikm_header.msgh_kind = MACH_MSGH_KIND_NORMAL;
+ kmsg->ikm_header.msgh_id = NET_RCV_MSG_ID;
+
+ net_kmsg(kmsg)->header_type = header_type;
+ net_kmsg(kmsg)->packet_type = packet_type;
+ net_kmsg(kmsg)->net_rcv_msg_packet_count = count;
+
+ /*
+ * Send the packet to the destination port. Drop it
+ * if the destination port is over its backlog.
+ */
+
+ if (ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT, 0) ==
+ MACH_MSG_SUCCESS) {
+ if (high_priority)
+ net_kmsg_send_high_hits++;
+ else
+ net_kmsg_send_low_hits++;
+ /* the receiver is responsible for the message now */
+ } else {
+ if (high_priority)
+ net_kmsg_send_high_misses++;
+ else
+ net_kmsg_send_low_misses++;
+ ipc_kmsg_destroy(kmsg);
+ }
+ }
+
+ (void) splimp();
+ simple_lock(&net_queue_lock);
+ return TRUE;
+}
+
+/*
+ * We want to deliver packets using ASTs, so we can avoid the
+ * thread_wakeup/thread_block needed to get to the network
+ * thread. However, we can't allocate memory in the AST handler,
+ * because memory allocation might block. Hence we have the
+ * network thread to allocate memory. The network thread also
+ * delivers packets, so it can be allocating and delivering for a
+ * burst. net_thread_awake is protected by net_queue_lock
+ * (instead of net_queue_free_lock) so that net_packet and
+ * net_ast can safely determine if the network thread is running.
+ * This prevents a race that might leave a packet sitting without
+ * being delivered. It is possible for net_kmsg_get to think
+ * the network thread is awake, and so avoid a wakeup, and then
+ * have the network thread sleep without allocating. The next
+ * net_kmsg_get will do a wakeup.
+ */
+
+void net_ast(void)
+{
+ spl_t s;
+
+ net_ast_taken++;
+
+ /*
+ * If the network thread is awake, then we would
+ * rather deliver messages from it, because
+ * it can also allocate memory.
+ */
+
+ s = splimp();
+ simple_lock(&net_queue_lock);
+ while (!net_thread_awake && net_deliver(TRUE))
+ continue;
+
+ /*
+ * Prevent an unnecessary AST. Either the network
+ * thread will deliver the messages, or there are
+ * no messages left to deliver.
+ */
+
+ simple_unlock(&net_queue_lock);
+ (void) splsched();
+ ast_off(cpu_number(), AST_NETWORK);
+ (void) splx(s);
+}
+
+static void __attribute__ ((noreturn)) net_thread_continue(void)
+{
+ for (;;) {
+ spl_t s;
+
+ net_thread_awaken++;
+
+ /*
+ * First get more buffers.
+ */
+ net_kmsg_more();
+
+ s = splimp();
+ simple_lock(&net_queue_lock);
+ while (net_deliver(FALSE))
+ continue;
+
+ net_thread_awake = FALSE;
+ assert_wait(&net_thread_awake, FALSE);
+ simple_unlock(&net_queue_lock);
+ (void) splx(s);
+ counter(c_net_thread_block++);
+ thread_block(net_thread_continue);
+ }
+}
+
+void net_thread(void)
+{
+ spl_t s;
+
+ /*
+ * We should be very high priority.
+ */
+
+ thread_set_own_priority(0);
+
+ /*
+ * We sleep initially, so that we don't allocate any buffers
+ * unless the network is really in use and they are needed.
+ */
+
+ s = splimp();
+ simple_lock(&net_queue_lock);
+ net_thread_awake = FALSE;
+ assert_wait(&net_thread_awake, FALSE);
+ simple_unlock(&net_queue_lock);
+ (void) splx(s);
+ counter(c_net_thread_block++);
+ thread_block(net_thread_continue);
+ net_thread_continue();
+ /*NOTREACHED*/
+}
+
+static void
+reorder_queue(
+ queue_t first,
+ queue_t last)
+{
+ queue_entry_t prev, next;
+
+ prev = first->prev;
+ next = last->next;
+
+ prev->next = last;
+ next->prev = first;
+
+ last->prev = prev;
+ last->next = first;
+
+ first->next = next;
+ first->prev = last;
+}
+
+/*
+ * Incoming packet. Header has already been moved to proper place.
+ * We are already at splimp.
+ */
+void
+net_packet(
+ struct ifnet *ifp,
+ ipc_kmsg_t kmsg,
+ unsigned int count,
+ boolean_t priority)
+{
+ boolean_t awake;
+
+#if MACH_TTD
+ /*
+ * Do a quick check to see if it is a kernel TTD packet.
+ *
+ * Only check if KernelTTD is enabled, ie. the current
+ * device driver supports TTD, and the bootp succeeded.
+ */
+ if (kttd_enabled && kttd_handle_async(kmsg)) {
+ /*
+ * Packet was a valid ttd packet and
+ * doesn't need to be passed up to filter.
+ * The ttd code put the used kmsg buffer
+ * back onto the free list.
+ */
+ if (kttd_debug)
+ printf("**%x**", kttd_async_counter++);
+ return;
+ }
+#endif /* MACH_TTD */
+
+ kmsg->ikm_header.msgh_remote_port = (mach_port_t) ifp;
+ net_kmsg(kmsg)->net_rcv_msg_packet_count = count;
+
+ simple_lock(&net_queue_lock);
+ if (priority) {
+ ipc_kmsg_enqueue(&net_queue_high, kmsg);
+ if (++net_queue_high_size > net_queue_high_max)
+ net_queue_high_max = net_queue_high_size;
+ } else {
+ ipc_kmsg_enqueue(&net_queue_low, kmsg);
+ if (++net_queue_low_size > net_queue_low_max)
+ net_queue_low_max = net_queue_low_size;
+ }
+ /*
+ * If the network thread is awake, then we don't
+ * need to take an AST, because the thread will
+ * deliver the packet.
+ */
+ awake = net_thread_awake;
+ simple_unlock(&net_queue_lock);
+
+ if (!awake) {
+ spl_t s = splsched();
+ ast_on(cpu_number(), AST_NETWORK);
+ (void) splx(s);
+ }
+}
+
+int net_filter_queue_reorder = 0; /* non-zero to enable reordering */
+
+/*
+ * Run a packet through the filters, returning a list of messages.
+ * We are *not* called at interrupt level.
+ */
+void
+net_filter(const ipc_kmsg_t kmsg,
+ ipc_kmsg_queue_t send_list)
+{
+ struct ifnet *ifp;
+ net_rcv_port_t infp, nextfp;
+ ipc_kmsg_t new_kmsg;
+
+ net_hash_entry_t entp, *hash_headp;
+ ipc_port_t dest;
+ queue_entry_t dead_infp = (queue_entry_t) 0;
+ queue_entry_t dead_entp = (queue_entry_t) 0;
+ unsigned int ret_count;
+
+ queue_head_t *if_port_list;
+
+ int count = net_kmsg(kmsg)->net_rcv_msg_packet_count;
+ ifp = (struct ifnet *) kmsg->ikm_header.msgh_remote_port;
+ ipc_kmsg_queue_init(send_list);
+
+ if (net_kmsg(kmsg)->sent)
+ if_port_list = &ifp->if_snd_port_list;
+ else
+ if_port_list = &ifp->if_rcv_port_list;
+
+ /*
+ * Unfortunately we can't allocate or deallocate memory
+ * while holding these locks. And we can't drop the locks
+ * while examining the filter lists.
+ * Both locks are hold in case a filter is removed from both
+ * queues.
+ */
+ simple_lock(&ifp->if_rcv_port_list_lock);
+ simple_lock(&ifp->if_snd_port_list_lock);
+ FILTER_ITERATE(if_port_list, infp, nextfp,
+ net_kmsg(kmsg)->sent ? &infp->output : &infp->input)
+ {
+ entp = (net_hash_entry_t) 0;
+ if ((infp->filter[0] & NETF_TYPE_MASK) == NETF_BPF) {
+ ret_count = bpf_do_filter(infp, net_kmsg(kmsg)->packet
+ + sizeof(struct packet_header),
+ count - sizeof(struct packet_header),
+ net_kmsg(kmsg)->header,
+ ifp->if_header_size, &hash_headp,
+ &entp);
+ if (entp == (net_hash_entry_t) 0)
+ dest = infp->rcv_port;
+ else
+ dest = entp->rcv_port;
+ if (ret_count)
+ ret_count += sizeof(struct packet_header);
+ } else {
+ ret_count = net_do_filter(infp, net_kmsg(kmsg)->packet, count,
+ net_kmsg(kmsg)->header);
+ if (ret_count)
+ ret_count = count;
+ dest = infp->rcv_port;
+ }
+
+ if (ret_count) {
+
+ /*
+ * Make a send right for the destination.
+ */
+
+ dest = ipc_port_copy_send(dest);
+ if (!IP_VALID(dest)) {
+ /*
+ * This filter is dead. We remove it from the
+ * filter list and set it aside for deallocation.
+ */
+
+ if (entp == (net_hash_entry_t) 0) {
+ if (infp->filter[0] & NETF_IN)
+ queue_remove(&ifp->if_rcv_port_list, infp,
+ net_rcv_port_t, input);
+ if (infp->filter[0] & NETF_OUT)
+ queue_remove(&ifp->if_snd_port_list, infp,
+ net_rcv_port_t, output);
+
+ /* Use input only for queues of dead filters. */
+ ENQUEUE_DEAD(dead_infp, infp, input);
+ continue;
+ } else {
+ hash_ent_remove (ifp,
+ (net_hash_header_t)infp,
+ FALSE, /* no longer used */
+ hash_headp,
+ entp,
+ &dead_entp);
+ continue;
+ }
+ }
+
+ /*
+ * Deliver copy of packet to this channel.
+ */
+ if (ipc_kmsg_queue_empty(send_list)) {
+ /*
+ * Only receiver, so far
+ */
+ new_kmsg = kmsg;
+ } else {
+ /*
+ * Other receivers - must allocate message and copy.
+ */
+ new_kmsg = net_kmsg_get();
+ if (new_kmsg == IKM_NULL) {
+ ipc_port_release_send(dest);
+ break;
+ }
+
+ memcpy(
+ net_kmsg(new_kmsg)->packet,
+ net_kmsg(kmsg)->packet,
+ ret_count);
+ memcpy(
+ net_kmsg(new_kmsg)->header,
+ net_kmsg(kmsg)->header,
+ NET_HDW_HDR_MAX);
+ }
+ net_kmsg(new_kmsg)->net_rcv_msg_packet_count = ret_count;
+ new_kmsg->ikm_header.msgh_remote_port = (mach_port_t) dest;
+ ipc_kmsg_enqueue(send_list, new_kmsg);
+
+ {
+ net_rcv_port_t prevfp;
+ int rcount = ++infp->rcv_count;
+
+ /*
+ * See if ordering of filters is wrong
+ */
+ if (infp->priority >= NET_HI_PRI) {
+#define REORDER_PRIO(chain) \
+ prevfp = (net_rcv_port_t) queue_prev(&infp->chain); \
+ /* \
+ * If infp is not the first element on the queue, \
+ * and the previous element is at equal priority \
+ * but has a lower count, then promote infp to \
+ * be in front of prevfp. \
+ */ \
+ if ((queue_t)prevfp != if_port_list && \
+ infp->priority == prevfp->priority) { \
+ /* \
+ * Threshold difference to prevent thrashing \
+ */ \
+ if (net_filter_queue_reorder \
+ && (100 + prevfp->rcv_count < rcount)) \
+ reorder_queue(&prevfp->chain, &infp->chain);\
+ }
+
+ REORDER_PRIO(input);
+ REORDER_PRIO(output);
+
+ /*
+ * High-priority filter -> no more deliveries
+ */
+ break;
+ }
+ }
+ }
+ }
+ FILTER_ITERATE_END
+ simple_unlock(&ifp->if_snd_port_list_lock);
+ simple_unlock(&ifp->if_rcv_port_list_lock);
+
+ /*
+ * Deallocate dead filters.
+ */
+ if (dead_infp != 0)
+ net_free_dead_infp(dead_infp);
+ if (dead_entp != 0)
+ net_free_dead_entp(dead_entp);
+
+ if (ipc_kmsg_queue_empty(send_list)) {
+ /* Not sent - recycle */
+ net_kmsg_put(kmsg);
+ }
+}
+
+boolean_t
+net_do_filter(net_rcv_port_t infp,
+ const char * data,
+ unsigned int data_count,
+ const char * header)
+{
+ int stack[NET_FILTER_STACK_DEPTH+1];
+ int *sp;
+ filter_t *fp, *fpe;
+ unsigned int op, arg;
+
+ /*
+ * The filter accesses the header and data
+ * as unsigned short words.
+ */
+ data_count /= sizeof(unsigned short);
+
+#define data_word ((unsigned short *)data)
+#define header_word ((unsigned short *)header)
+
+ sp = &stack[NET_FILTER_STACK_DEPTH];
+ fp = &infp->filter[1]; /* filter[0] used for flags */
+ fpe = infp->filter_end;
+
+ *sp = TRUE;
+
+ while (fp < fpe) {
+ arg = *fp++;
+ op = NETF_OP(arg);
+ arg = NETF_ARG(arg);
+
+ switch (arg) {
+ case NETF_NOPUSH:
+ arg = *sp++;
+ break;
+ case NETF_PUSHZERO:
+ arg = 0;
+ break;
+ case NETF_PUSHLIT:
+ arg = *fp++;
+ break;
+ case NETF_PUSHIND:
+ arg = *sp++;
+ if (arg >= data_count)
+ return FALSE;
+ arg = data_word[arg];
+ break;
+ case NETF_PUSHHDRIND:
+ arg = *sp++;
+ if (arg >= NET_HDW_HDR_MAX/sizeof(unsigned short))
+ return FALSE;
+ arg = header_word[arg];
+ break;
+ default:
+ if (arg >= NETF_PUSHSTK) {
+ arg = sp[arg - NETF_PUSHSTK];
+ }
+ else if (arg >= NETF_PUSHHDR) {
+ arg = header_word[arg - NETF_PUSHHDR];
+ }
+ else {
+ arg -= NETF_PUSHWORD;
+ if (arg >= data_count)
+ return FALSE;
+ arg = data_word[arg];
+ }
+ break;
+
+ }
+ switch (op) {
+ case NETF_OP(NETF_NOP):
+ *--sp = arg;
+ break;
+ case NETF_OP(NETF_AND):
+ *sp &= arg;
+ break;
+ case NETF_OP(NETF_OR):
+ *sp |= arg;
+ break;
+ case NETF_OP(NETF_XOR):
+ *sp ^= arg;
+ break;
+ case NETF_OP(NETF_EQ):
+ *sp = (*sp == arg);
+ break;
+ case NETF_OP(NETF_NEQ):
+ *sp = (*sp != arg);
+ break;
+ case NETF_OP(NETF_LT):
+ *sp = (*sp < arg);
+ break;
+ case NETF_OP(NETF_LE):
+ *sp = (*sp <= arg);
+ break;
+ case NETF_OP(NETF_GT):
+ *sp = (*sp > arg);
+ break;
+ case NETF_OP(NETF_GE):
+ *sp = (*sp >= arg);
+ break;
+ case NETF_OP(NETF_COR):
+ if (*sp++ == arg)
+ return (TRUE);
+ break;
+ case NETF_OP(NETF_CAND):
+ if (*sp++ != arg)
+ return (FALSE);
+ break;
+ case NETF_OP(NETF_CNOR):
+ if (*sp++ == arg)
+ return (FALSE);
+ break;
+ case NETF_OP(NETF_CNAND):
+ if (*sp++ != arg)
+ return (TRUE);
+ break;
+ case NETF_OP(NETF_LSH):
+ *sp <<= arg;
+ break;
+ case NETF_OP(NETF_RSH):
+ *sp >>= arg;
+ break;
+ case NETF_OP(NETF_ADD):
+ *sp += arg;
+ break;
+ case NETF_OP(NETF_SUB):
+ *sp -= arg;
+ break;
+ }
+ }
+ return ((*sp) ? TRUE : FALSE);
+
+#undef data_word
+#undef header_word
+}
+
+/*
+ * Check filter for invalid operations or stack over/under-flow.
+ */
+static boolean_t
+parse_net_filter(
+ filter_t *filter,
+ unsigned int count)
+{
+ int sp;
+ filter_t *fpe = &filter[count];
+ filter_t op, arg;
+
+ /*
+ * count is at least 1, and filter[0] is used for flags.
+ */
+ filter++;
+ sp = NET_FILTER_STACK_DEPTH;
+
+ for (; filter < fpe; filter++) {
+ op = NETF_OP(*filter);
+ arg = NETF_ARG(*filter);
+
+ switch (arg) {
+ case NETF_NOPUSH:
+ break;
+ case NETF_PUSHZERO:
+ sp--;
+ break;
+ case NETF_PUSHLIT:
+ filter++;
+ if (filter >= fpe)
+ return (FALSE); /* literal value not in filter */
+ sp--;
+ break;
+ case NETF_PUSHIND:
+ case NETF_PUSHHDRIND:
+ break;
+ default:
+ if (arg >= NETF_PUSHSTK) {
+ if (arg - NETF_PUSHSTK + sp > NET_FILTER_STACK_DEPTH)
+ return FALSE;
+ }
+ else if (arg >= NETF_PUSHHDR) {
+ if (arg - NETF_PUSHHDR >=
+ NET_HDW_HDR_MAX/sizeof(unsigned short))
+ return FALSE;
+ }
+ /* else... cannot check for packet bounds
+ without packet */
+ sp--;
+ break;
+ }
+ if (sp < 2) {
+ return (FALSE); /* stack overflow */
+ }
+ if (op == NETF_OP(NETF_NOP))
+ continue;
+
+ /*
+ * all non-NOP operators are binary.
+ */
+ if (sp > NET_MAX_FILTER-2)
+ return (FALSE);
+
+ sp++;
+ switch (op) {
+ case NETF_OP(NETF_AND):
+ case NETF_OP(NETF_OR):
+ case NETF_OP(NETF_XOR):
+ case NETF_OP(NETF_EQ):
+ case NETF_OP(NETF_NEQ):
+ case NETF_OP(NETF_LT):
+ case NETF_OP(NETF_LE):
+ case NETF_OP(NETF_GT):
+ case NETF_OP(NETF_GE):
+ case NETF_OP(NETF_COR):
+ case NETF_OP(NETF_CAND):
+ case NETF_OP(NETF_CNOR):
+ case NETF_OP(NETF_CNAND):
+ case NETF_OP(NETF_LSH):
+ case NETF_OP(NETF_RSH):
+ case NETF_OP(NETF_ADD):
+ case NETF_OP(NETF_SUB):
+ break;
+ default:
+ return (FALSE);
+ }
+ }
+ return (TRUE);
+}
+
+/*
+ * Set a filter for a network interface.
+ *
+ * We are given a naked send right for the rcv_port.
+ * If we are successful, we must consume that right.
+ */
+io_return_t
+net_set_filter(
+ struct ifnet *ifp,
+ ipc_port_t rcv_port,
+ int priority,
+ filter_t *filter,
+ unsigned int filter_count)
+{
+ int filter_bytes;
+ bpf_insn_t match;
+ net_rcv_port_t infp, my_infp;
+ net_rcv_port_t nextfp;
+ net_hash_header_t hhp;
+ net_hash_entry_t entp;
+ net_hash_entry_t *head, nextentp;
+ queue_entry_t dead_infp, dead_entp;
+ int i;
+ int ret, is_new_infp;
+ io_return_t rval;
+ boolean_t in, out;
+
+ /* Initialize hash_entp to NULL to quiet GCC
+ * warning about uninitialized variable. hash_entp is only
+ * used when match != 0; in that case it is properly initialized
+ * by kmem_cache_alloc().
+ */
+ net_hash_entry_t hash_entp = NULL;
+
+ /*
+ * Check the filter syntax.
+ */
+
+ filter_bytes = CSPF_BYTES(filter_count);
+ match = (bpf_insn_t) 0;
+
+ if (filter_count == 0) {
+ return (D_INVALID_OPERATION);
+ } else if (!((filter[0] & NETF_IN) || (filter[0] & NETF_OUT))) {
+ return (D_INVALID_OPERATION); /* NETF_IN or NETF_OUT required */
+ } else if ((filter[0] & NETF_TYPE_MASK) == NETF_BPF) {
+ ret = bpf_validate((bpf_insn_t)filter, filter_bytes, &match);
+ if (!ret)
+ return (D_INVALID_OPERATION);
+ } else if ((filter[0] & NETF_TYPE_MASK) == 0) {
+ if (!parse_net_filter(filter, filter_count))
+ return (D_INVALID_OPERATION);
+ } else {
+ return (D_INVALID_OPERATION);
+ }
+
+ rval = D_SUCCESS; /* default return value */
+ dead_infp = dead_entp = 0;
+
+ if (match == (bpf_insn_t) 0) {
+ /*
+ * If there is no match instruction, we allocate
+ * a normal packet filter structure.
+ */
+ my_infp = (net_rcv_port_t) kmem_cache_alloc(&net_rcv_cache);
+ my_infp->rcv_port = rcv_port;
+ is_new_infp = TRUE;
+ } else {
+ /*
+ * If there is a match instruction, we assume there will be
+ * multiple sessions with a common substructure and allocate
+ * a hash table to deal with them.
+ */
+ my_infp = 0;
+ hash_entp = (net_hash_entry_t) kmem_cache_alloc(&net_hash_entry_cache);
+ is_new_infp = FALSE;
+ }
+
+ /*
+ * Look for an existing filter on the same reply port.
+ * Look for filters with dead ports (for GC).
+ * Look for a filter with the same code except KEY insns.
+ */
+ void check_filter_list(queue_head_t *if_port_list)
+ {
+ FILTER_ITERATE(if_port_list, infp, nextfp,
+ (if_port_list == &ifp->if_rcv_port_list)
+ ? &infp->input : &infp->output)
+ {
+ if (infp->rcv_port == MACH_PORT_NULL) {
+ if (match != 0
+ && infp->priority == priority
+ && my_infp == 0
+ && (infp->filter_end - infp->filter) == filter_count
+ && bpf_eq((bpf_insn_t)infp->filter,
+ (bpf_insn_t)filter, filter_bytes))
+ my_infp = infp;
+
+ for (i = 0; i < NET_HASH_SIZE; i++) {
+ head = &((net_hash_header_t) infp)->table[i];
+ if (*head == 0)
+ continue;
+
+ /*
+ * Check each hash entry to make sure the
+ * destination port is still valid. Remove
+ * any invalid entries.
+ */
+ entp = *head;
+ do {
+ nextentp = (net_hash_entry_t) entp->he_next;
+
+ /* checked without
+ ip_lock(entp->rcv_port) */
+ if (entp->rcv_port == rcv_port
+ || !IP_VALID(entp->rcv_port)
+ || !ip_active(entp->rcv_port)) {
+ ret = hash_ent_remove (ifp,
+ (net_hash_header_t)infp,
+ (my_infp == infp),
+ head,
+ entp,
+ &dead_entp);
+ if (ret)
+ goto hash_loop_end;
+ }
+
+ entp = nextentp;
+ /* While test checks head since hash_ent_remove
+ might modify it.
+ */
+ } while (*head != 0 && entp != *head);
+ }
+
+ hash_loop_end:
+ ;
+ } else if (infp->rcv_port == rcv_port
+ || !IP_VALID(infp->rcv_port)
+ || !ip_active(infp->rcv_port)) {
+
+ /* Remove the old filter from lists */
+ if (infp->filter[0] & NETF_IN)
+ queue_remove(&ifp->if_rcv_port_list, infp,
+ net_rcv_port_t, input);
+ if (infp->filter[0] & NETF_OUT)
+ queue_remove(&ifp->if_snd_port_list, infp,
+ net_rcv_port_t, output);
+
+ ENQUEUE_DEAD(dead_infp, infp, input);
+ }
+ }
+ FILTER_ITERATE_END
+ }
+
+ in = (filter[0] & NETF_IN) != 0;
+ out = (filter[0] & NETF_OUT) != 0;
+
+ simple_lock(&ifp->if_rcv_port_list_lock);
+ simple_lock(&ifp->if_snd_port_list_lock);
+
+ if (in)
+ check_filter_list(&ifp->if_rcv_port_list);
+ if (out)
+ check_filter_list(&ifp->if_snd_port_list);
+
+ if (my_infp == 0) {
+ /* Allocate a dummy infp */
+ simple_lock(&net_hash_header_lock);
+ for (i = 0; i < N_NET_HASH; i++) {
+ if (filter_hash_header[i].n_keys == 0)
+ break;
+ }
+ if (i == N_NET_HASH) {
+ simple_unlock(&net_hash_header_lock);
+ simple_unlock(&ifp->if_snd_port_list_lock);
+ simple_unlock(&ifp->if_rcv_port_list_lock);
+
+ ipc_port_release_send(rcv_port);
+ if (match != 0)
+ kmem_cache_free(&net_hash_entry_cache,
+ (vm_offset_t)hash_entp);
+
+ rval = D_NO_MEMORY;
+ goto clean_and_return;
+ }
+
+ hhp = &filter_hash_header[i];
+ hhp->n_keys = match->jt;
+ simple_unlock(&net_hash_header_lock);
+
+ hhp->ref_count = 0;
+ for (i = 0; i < NET_HASH_SIZE; i++)
+ hhp->table[i] = 0;
+
+ my_infp = (net_rcv_port_t)hhp;
+ my_infp->rcv_port = MACH_PORT_NULL; /* indication of dummy */
+ is_new_infp = TRUE;
+ }
+
+ if (is_new_infp) {
+ my_infp->priority = priority;
+ my_infp->rcv_count = 0;
+
+ /* Copy filter program. */
+ memcpy (my_infp->filter, filter, filter_bytes);
+ my_infp->filter_end =
+ (filter_t *)((char *)my_infp->filter + filter_bytes);
+
+ if (match == 0) {
+ my_infp->rcv_qlimit = net_add_q_info(rcv_port);
+ } else {
+ my_infp->rcv_qlimit = 0;
+ }
+
+ /* Insert my_infp according to priority */
+ if (in) {
+ queue_iterate(&ifp->if_rcv_port_list, infp, net_rcv_port_t, input)
+ if (priority > infp->priority)
+ break;
+
+ queue_enter(&ifp->if_rcv_port_list, my_infp, net_rcv_port_t, input);
+ }
+
+ if (out) {
+ queue_iterate(&ifp->if_snd_port_list, infp, net_rcv_port_t, output)
+ if (priority > infp->priority)
+ break;
+
+ queue_enter(&ifp->if_snd_port_list, my_infp, net_rcv_port_t, output);
+ }
+ }
+
+ if (match != 0)
+ { /* Insert to hash list */
+ net_hash_entry_t *p;
+
+ hash_entp->rcv_port = rcv_port;
+ for (i = 0; i < match->jt; i++) /* match->jt is n_keys */
+ hash_entp->keys[i] = match[i+1].k;
+ p = &((net_hash_header_t)my_infp)->
+ table[bpf_hash(match->jt, hash_entp->keys)];
+
+ /* Not checking for the same key values */
+ if (*p == 0) {
+ queue_init (&hash_entp->chain);
+ *p = hash_entp;
+ } else {
+ enqueue_tail(&(*p)->chain, &hash_entp->chain);
+ }
+
+ ((net_hash_header_t)my_infp)->ref_count++;
+ hash_entp->rcv_qlimit = net_add_q_info(rcv_port);
+ }
+
+ simple_unlock(&ifp->if_snd_port_list_lock);
+ simple_unlock(&ifp->if_rcv_port_list_lock);
+
+clean_and_return:
+ /* No locks are held at this point. */
+
+ if (dead_infp != 0)
+ net_free_dead_infp(dead_infp);
+ if (dead_entp != 0)
+ net_free_dead_entp(dead_entp);
+
+ return (rval);
+}
+
+/*
+ * Other network operations
+ */
+io_return_t
+net_getstat(
+ struct ifnet *ifp,
+ dev_flavor_t flavor,
+ dev_status_t status, /* pointer to OUT array */
+ mach_msg_type_number_t *count) /* OUT */
+{
+ switch (flavor) {
+ case NET_STATUS:
+ {
+ struct net_status *ns = (struct net_status *)status;
+
+ if (*count < NET_STATUS_COUNT)
+ return (D_INVALID_OPERATION);
+
+ ns->min_packet_size = ifp->if_header_size;
+ ns->max_packet_size = ifp->if_header_size + ifp->if_mtu;
+ ns->header_format = ifp->if_header_format;
+ ns->header_size = ifp->if_header_size;
+ ns->address_size = ifp->if_address_size;
+ ns->flags = ifp->if_flags;
+ ns->mapped_size = 0;
+
+ *count = NET_STATUS_COUNT;
+ break;
+ }
+ case NET_ADDRESS:
+ {
+ int addr_byte_count;
+ int addr_int_count;
+ int i;
+
+ addr_byte_count = ifp->if_address_size;
+ addr_int_count = (addr_byte_count + (sizeof(int)-1))
+ / sizeof(int);
+
+ if (*count < addr_int_count)
+ {
+/* XXX debug hack. */
+printf ("net_getstat: count: %d, addr_int_count: %d\n",
+ *count, addr_int_count);
+ return (D_INVALID_OPERATION);
+ }
+
+ memcpy(status, ifp->if_address, addr_byte_count);
+ if (addr_byte_count < addr_int_count * sizeof(int))
+ memset((char *)status + addr_byte_count, 0,
+ (addr_int_count * sizeof(int)
+ - addr_byte_count));
+
+ for (i = 0; i < addr_int_count; i++) {
+ int word;
+
+ word = status[i];
+ status[i] = htonl(word);
+ }
+ *count = addr_int_count;
+ break;
+ }
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+io_return_t
+net_write(
+ struct ifnet *ifp,
+ net_write_start_device_fn start,
+ io_req_t ior)
+{
+ spl_t s;
+ kern_return_t rc;
+ boolean_t wait;
+
+ /*
+ * Reject the write if the interface is down.
+ */
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
+ return (D_DEVICE_DOWN);
+
+ /*
+ * Reject the write if the packet is too large or too small.
+ */
+ if (ior->io_count < ifp->if_header_size ||
+ ior->io_count > ifp->if_header_size + ifp->if_mtu)
+ return (D_INVALID_SIZE);
+
+ /*
+ * Wire down the memory.
+ */
+
+ rc = device_write_get(ior, &wait);
+ if (rc != KERN_SUCCESS)
+ return (rc);
+
+ /*
+ * Network interfaces can't cope with VM continuations.
+ * If wait is set, just panic.
+ */
+ if (wait) {
+ panic("net_write: VM continuation");
+ }
+
+ /*
+ * Queue the packet on the output queue, and
+ * start the device.
+ */
+ s = splimp();
+ IF_ENQUEUE(&ifp->if_snd, ior);
+ (*start)(ifp->if_unit);
+ splx(s);
+
+ return (D_IO_QUEUED);
+}
+
+/*
+ * Initialize the whole package.
+ */
+void
+net_io_init(void)
+{
+ vm_size_t size;
+
+ size = sizeof(struct net_rcv_port);
+ kmem_cache_init(&net_rcv_cache, "net_rcv_port", size, 0,
+ NULL, 0);
+
+ size = sizeof(struct net_hash_entry);
+ kmem_cache_init(&net_hash_entry_cache, "net_hash_entry", size, 0,
+ NULL, 0);
+
+ size = ikm_plus_overhead(sizeof(struct net_rcv_msg));
+ net_kmsg_size = round_page(size);
+
+ /*
+ * net_kmsg_max caps the number of buffers
+ * we are willing to allocate. By default,
+ * we allow for net_queue_free_min plus
+ * the queue limit for each filter.
+ * (Added as the filters are added.)
+ */
+
+ simple_lock_init(&net_kmsg_total_lock);
+ if (net_kmsg_max == 0)
+ net_kmsg_max = net_queue_free_min;
+
+ simple_lock_init(&net_queue_free_lock);
+ ipc_kmsg_queue_init(&net_queue_free);
+
+ simple_lock_init(&net_queue_lock);
+ ipc_kmsg_queue_init(&net_queue_high);
+ ipc_kmsg_queue_init(&net_queue_low);
+
+ simple_lock_init(&net_hash_header_lock);
+}
+
+
+/* ======== BPF: Berkeley Packet Filter ======== */
+
+/*-
+ * Copyright (c) 1990-1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf.c 7.5 (Berkeley) 7/15/91
+ */
+
+#if defined(sparc) || defined(mips) || defined(ibm032) || defined(alpha)
+#define BPF_ALIGN
+#endif
+
+#ifndef BPF_ALIGN
+#define EXTRACT_SHORT(p) ((u_short)ntohs(*(u_short *)p))
+#define EXTRACT_LONG(p) (ntohl(*(u_int *)p))
+#else
+#define EXTRACT_SHORT(p)\
+ ((u_short)\
+ ((u_short)*((u_char *)p+0)<<8|\
+ (u_short)*((u_char *)p+1)<<0))
+#define EXTRACT_LONG(p)\
+ ((u_int)*((u_char *)p+0)<<24|\
+ (u_int)*((u_char *)p+1)<<16|\
+ (u_int)*((u_char *)p+2)<<8|\
+ (u_int)*((u_char *)p+3)<<0)
+#endif
+
+/*
+ * Execute the filter program starting at pc on the packet p
+ * wirelen is the length of the original packet
+ * buflen is the amount of data present
+ */
+
+int
+bpf_do_filter(
+ net_rcv_port_t infp,
+ char * p, /* packet data */
+ unsigned int wirelen, /* data_count (in bytes) */
+ char * header,
+ unsigned int hlen, /* header len (in bytes) */
+ net_hash_entry_t **hash_headpp,
+ net_hash_entry_t *entpp) /* out */
+{
+ bpf_insn_t pc, pc_end;
+ unsigned int buflen;
+
+ unsigned int A, X;
+ int k;
+ unsigned int mem[BPF_MEMWORDS];
+
+ /* Generic pointer to either HEADER or P according to the specified offset. */
+ char *data = NULL;
+
+ pc = ((bpf_insn_t) infp->filter) + 1;
+ /* filter[0].code is (NETF_BPF | flags) */
+ pc_end = (bpf_insn_t)infp->filter_end;
+ buflen = NET_RCV_MAX;
+ *entpp = 0; /* default */
+
+ A = 0;
+ X = 0;
+
+ for (; pc < pc_end; ++pc) {
+ switch (pc->code) {
+
+ default:
+#ifdef KERNEL
+ return 0;
+#else
+ abort();
+#endif
+ case BPF_RET|BPF_K:
+ if (infp->rcv_port == MACH_PORT_NULL &&
+ *entpp == 0) {
+ return 0;
+ }
+ return ((u_int)pc->k <= wirelen) ?
+ pc->k : wirelen;
+
+ case BPF_RET|BPF_A:
+ if (infp->rcv_port == MACH_PORT_NULL &&
+ *entpp == 0) {
+ return 0;
+ }
+ return ((u_int)A <= wirelen) ?
+ A : wirelen;
+
+ case BPF_RET|BPF_MATCH_IMM:
+ if (bpf_match ((net_hash_header_t)infp, pc->jt, mem,
+ hash_headpp, entpp)) {
+ return ((u_int)pc->k <= wirelen) ?
+ pc->k : wirelen;
+ }
+ return 0;
+
+ case BPF_LD|BPF_W|BPF_ABS:
+ k = pc->k;
+
+ load_word:
+ if ((u_int)k + sizeof(int) <= hlen)
+ data = header;
+ else if ((u_int)k + sizeof(int) <= buflen) {
+ k -= hlen;
+ data = p;
+ } else
+ return 0;
+
+#ifdef BPF_ALIGN
+ if (((int)(data + k) & 3) != 0)
+ A = EXTRACT_LONG(&data[k]);
+ else
+#endif
+ A = ntohl(*(int *)(data + k));
+ continue;
+
+ case BPF_LD|BPF_H|BPF_ABS:
+ k = pc->k;
+
+ load_half:
+ if ((u_int)k + sizeof(short) <= hlen)
+ data = header;
+ else if ((u_int)k + sizeof(short) <= buflen) {
+ k -= hlen;
+ data = p;
+ } else
+ return 0;
+
+ A = EXTRACT_SHORT(&data[k]);
+ continue;
+
+ case BPF_LD|BPF_B|BPF_ABS:
+ k = pc->k;
+
+ load_byte:
+ if ((u_int)k < hlen)
+ data = header;
+ else if ((u_int)k < buflen) {
+ data = p;
+ k -= hlen;
+ } else
+ return 0;
+
+ A = data[k];
+ continue;
+
+ case BPF_LD|BPF_W|BPF_LEN:
+ A = wirelen;
+ continue;
+
+ case BPF_LDX|BPF_W|BPF_LEN:
+ X = wirelen;
+ continue;
+
+ case BPF_LD|BPF_W|BPF_IND:
+ k = X + pc->k;
+ goto load_word;
+
+ case BPF_LD|BPF_H|BPF_IND:
+ k = X + pc->k;
+ goto load_half;
+
+ case BPF_LD|BPF_B|BPF_IND:
+ k = X + pc->k;
+ goto load_byte;
+
+ case BPF_LDX|BPF_MSH|BPF_B:
+ k = pc->k;
+ if (k < hlen)
+ data = header;
+ else if (k < buflen) {
+ data = p;
+ k -= hlen;
+ } else
+ return 0;
+
+ X = (data[k] & 0xf) << 2;
+ continue;
+
+ case BPF_LD|BPF_IMM:
+ A = pc->k;
+ continue;
+
+ case BPF_LDX|BPF_IMM:
+ X = pc->k;
+ continue;
+
+ case BPF_LD|BPF_MEM:
+ A = mem[pc->k];
+ continue;
+
+ case BPF_LDX|BPF_MEM:
+ X = mem[pc->k];
+ continue;
+
+ case BPF_ST:
+ mem[pc->k] = A;
+ continue;
+
+ case BPF_STX:
+ mem[pc->k] = X;
+ continue;
+
+ case BPF_JMP|BPF_JA:
+ pc += pc->k;
+ continue;
+
+ case BPF_JMP|BPF_JGT|BPF_K:
+ pc += (A > pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGE|BPF_K:
+ pc += (A >= pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JEQ|BPF_K:
+ pc += (A == pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JSET|BPF_K:
+ pc += (A & pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGT|BPF_X:
+ pc += (A > X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGE|BPF_X:
+ pc += (A >= X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JEQ|BPF_X:
+ pc += (A == X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JSET|BPF_X:
+ pc += (A & X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_ALU|BPF_ADD|BPF_X:
+ A += X;
+ continue;
+
+ case BPF_ALU|BPF_SUB|BPF_X:
+ A -= X;
+ continue;
+
+ case BPF_ALU|BPF_MUL|BPF_X:
+ A *= X;
+ continue;
+
+ case BPF_ALU|BPF_DIV|BPF_X:
+ if (X == 0)
+ return 0;
+ A /= X;
+ continue;
+
+ case BPF_ALU|BPF_AND|BPF_X:
+ A &= X;
+ continue;
+
+ case BPF_ALU|BPF_OR|BPF_X:
+ A |= X;
+ continue;
+
+ case BPF_ALU|BPF_LSH|BPF_X:
+ A <<= X;
+ continue;
+
+ case BPF_ALU|BPF_RSH|BPF_X:
+ A >>= X;
+ continue;
+
+ case BPF_ALU|BPF_ADD|BPF_K:
+ A += pc->k;
+ continue;
+
+ case BPF_ALU|BPF_SUB|BPF_K:
+ A -= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_MUL|BPF_K:
+ A *= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_DIV|BPF_K:
+ A /= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_AND|BPF_K:
+ A &= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_OR|BPF_K:
+ A |= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_LSH|BPF_K:
+ A <<= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_RSH|BPF_K:
+ A >>= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_NEG:
+ A = -A;
+ continue;
+
+ case BPF_MISC|BPF_TAX:
+ X = A;
+ continue;
+
+ case BPF_MISC|BPF_TXA:
+ A = X;
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Return 1 if the 'f' is a valid filter program without a MATCH
+ * instruction. Return 2 if it is a valid filter program with a MATCH
+ * instruction. Otherwise, return 0.
+ * The constraints are that each jump be forward and to a valid
+ * code. The code must terminate with either an accept or reject.
+ * 'valid' is an array for use by the routine (it must be at least
+ * 'len' bytes long).
+ *
+ * The kernel needs to be able to verify an application's filter code.
+ * Otherwise, a bogus program could easily crash the system.
+ */
+int
+bpf_validate(
+ bpf_insn_t f,
+ int bytes,
+ bpf_insn_t *match)
+{
+ int i, j, len;
+ bpf_insn_t p;
+
+ len = BPF_BYTES2LEN(bytes);
+
+ /*
+ * f[0].code is already checked to be (NETF_BPF | flags).
+ * So skip f[0].
+ */
+
+ for (i = 1; i < len; ++i) {
+ /*
+ * Check that that jumps are forward, and within
+ * the code block.
+ */
+ p = &f[i];
+ if (BPF_CLASS(p->code) == BPF_JMP) {
+ int from = i + 1;
+
+ if (BPF_OP(p->code) == BPF_JA) {
+ if (from + p->k >= len)
+ return 0;
+ }
+ else if (from + p->jt >= len || from + p->jf >= len)
+ return 0;
+ }
+ /*
+ * Check that memory operations use valid addresses.
+ */
+ if ((BPF_CLASS(p->code) == BPF_ST ||
+ (BPF_CLASS(p->code) == BPF_LD &&
+ (p->code & 0xe0) == BPF_MEM)) &&
+ (p->k >= BPF_MEMWORDS || p->k < 0))
+ return 0;
+ /*
+ * Check for constant division by 0.
+ */
+ if (p->code == (BPF_ALU|BPF_DIV|BPF_K) && p->k == 0)
+ return 0;
+ /*
+ * Check for match instruction.
+ * Only one match instruction per filter is allowed.
+ */
+ if (p->code == (BPF_RET|BPF_MATCH_IMM)) {
+ if (*match != 0 ||
+ p->jt == 0 ||
+ p->jt > N_NET_HASH_KEYS)
+ return 0;
+ i += p->jt; /* skip keys */
+ if (i + 1 > len)
+ return 0;
+
+ for (j = 1; j <= p->jt; j++) {
+ if (p[j].code != (BPF_MISC|BPF_KEY))
+ return 0;
+ }
+
+ *match = p;
+ }
+ }
+ if (BPF_CLASS(f[len - 1].code) == BPF_RET)
+ return ((*match == 0) ? 1 : 2);
+ else
+ return 0;
+}
+
+int
+bpf_eq(
+ bpf_insn_t f1,
+ bpf_insn_t f2,
+ int bytes)
+{
+ int count;
+
+ count = BPF_BYTES2LEN(bytes);
+ for (; count--; f1++, f2++) {
+ if (!BPF_INSN_EQ(f1, f2)) {
+ if ( f1->code == (BPF_MISC|BPF_KEY) &&
+ f2->code == (BPF_MISC|BPF_KEY) )
+ continue;
+ return FALSE;
+ }
+ };
+ return TRUE;
+}
+
+unsigned int
+bpf_hash (int n,
+ const unsigned int *keys)
+{
+ unsigned int hval = 0;
+
+ while (n--) {
+ hval += *keys++;
+ }
+ return (hval % NET_HASH_SIZE);
+}
+
+
+int
+bpf_match (net_hash_header_t hash,
+ int n_keys,
+ const unsigned int *keys,
+ net_hash_entry_t **hash_headpp,
+ net_hash_entry_t *entpp)
+{
+ net_hash_entry_t head, entp;
+ int i;
+
+ if (n_keys != hash->n_keys)
+ return FALSE;
+
+ *hash_headpp = &hash->table[bpf_hash(n_keys, keys)];
+ head = **hash_headpp;
+
+ if (head == 0)
+ return FALSE;
+
+ HASH_ITERATE (head, entp)
+ {
+ for (i = 0; i < n_keys; i++) {
+ if (keys[i] != entp->keys[i])
+ break;
+ }
+ if (i == n_keys) {
+ *entpp = entp;
+ return TRUE;
+ }
+ }
+ HASH_ITERATE_END (head, entp)
+ return FALSE;
+}
+
+
+/*
+ * Removes a hash entry (ENTP) from its queue (HEAD).
+ * If the reference count of filter (HP) becomes zero and not USED,
+ * HP is removed from the corresponding port lists and is freed.
+ */
+
+int
+hash_ent_remove(
+ struct ifnet *ifp,
+ net_hash_header_t hp,
+ int used,
+ net_hash_entry_t *head,
+ net_hash_entry_t entp,
+ queue_entry_t *dead_p)
+{
+ hp->ref_count--;
+
+ if (*head == entp) {
+ if (queue_empty((queue_t) entp)) {
+ *head = 0;
+ ENQUEUE_DEAD(*dead_p, entp, chain);
+ if (hp->ref_count == 0 && !used) {
+ if (((net_rcv_port_t)hp)->filter[0] & NETF_IN)
+ queue_remove(&ifp->if_rcv_port_list,
+ (net_rcv_port_t)hp,
+ net_rcv_port_t, input);
+ if (((net_rcv_port_t)hp)->filter[0] & NETF_OUT)
+ queue_remove(&ifp->if_snd_port_list,
+ (net_rcv_port_t)hp,
+ net_rcv_port_t, output);
+ hp->n_keys = 0;
+ return TRUE;
+ }
+ return FALSE;
+ } else {
+ *head = (net_hash_entry_t)queue_next((queue_t) entp);
+ }
+ }
+
+ remqueue((queue_t)*head, (queue_entry_t)entp);
+ ENQUEUE_DEAD(*dead_p, entp, chain);
+ return FALSE;
+}
+
+int
+net_add_q_info(ipc_port_t rcv_port)
+{
+ mach_port_msgcount_t qlimit = 0;
+
+ /*
+ * We use a new port, so increase net_queue_free_min
+ * and net_kmsg_max to allow for more queued messages.
+ */
+
+ if (IP_VALID(rcv_port)) {
+ ip_lock(rcv_port);
+ if (ip_active(rcv_port))
+ qlimit = rcv_port->ip_qlimit;
+ ip_unlock(rcv_port);
+ }
+
+ simple_lock(&net_kmsg_total_lock);
+ net_queue_free_min++;
+ net_kmsg_max += qlimit + 1;
+ simple_unlock(&net_kmsg_total_lock);
+
+ return (int)qlimit;
+}
+
+static void
+net_del_q_info(int qlimit)
+{
+ simple_lock(&net_kmsg_total_lock);
+ net_queue_free_min--;
+ net_kmsg_max -= qlimit + 1;
+ simple_unlock(&net_kmsg_total_lock);
+}
+
+
+/*
+ * net_free_dead_infp (dead_infp)
+ * queue_entry_t dead_infp; list of dead net_rcv_port_t.
+ *
+ * Deallocates dead net_rcv_port_t.
+ * No locks should be held when called.
+ */
+void
+net_free_dead_infp(queue_entry_t dead_infp)
+{
+ net_rcv_port_t infp, nextfp;
+
+ for (infp = (net_rcv_port_t) dead_infp; infp != 0; infp = nextfp)
+ {
+ nextfp = (net_rcv_port_t) queue_next(&infp->input);
+ ipc_port_release_send(infp->rcv_port);
+ net_del_q_info(infp->rcv_qlimit);
+ kmem_cache_free(&net_rcv_cache, (vm_offset_t) infp);
+ }
+}
+
+/*
+ * net_free_dead_entp (dead_entp)
+ * queue_entry_t dead_entp; list of dead net_hash_entry_t.
+ *
+ * Deallocates dead net_hash_entry_t.
+ * No locks should be held when called.
+ */
+void
+net_free_dead_entp(queue_entry_t dead_entp)
+{
+ net_hash_entry_t entp, nextentp;
+
+ for (entp = (net_hash_entry_t)dead_entp; entp != 0; entp = nextentp)
+ {
+ nextentp = (net_hash_entry_t) queue_next(&entp->chain);
+
+ ipc_port_release_send(entp->rcv_port);
+ net_del_q_info(entp->rcv_qlimit);
+ kmem_cache_free(&net_hash_entry_cache, (vm_offset_t) entp);
+ }
+}
+
diff --git a/device/net_io.h b/device/net_io.h
new file mode 100644
index 0000000..c9af85e
--- /dev/null
+++ b/device/net_io.h
@@ -0,0 +1,164 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: ll/89
+ */
+
+#ifndef _DEVICE_NET_IO_H_
+#define _DEVICE_NET_IO_H_
+
+/*
+ * Utilities for playing with network messages.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <ipc/ipc_kmsg.h>
+
+#include <kern/macros.h>
+#include <kern/lock.h>
+#include <kern/kalloc.h>
+
+#include <device/if_hdr.h>
+#include <device/io_req.h>
+#include <device/net_status.h>
+
+struct net_rcv_port;
+typedef struct net_rcv_port *net_rcv_port_t;
+
+struct net_hash_entry;
+typedef struct net_hash_entry *net_hash_entry_t;
+
+struct net_hash_header;
+typedef struct net_hash_header *net_hash_header_t;
+
+/*
+ * A network packet is wrapped in a kernel message while in
+ * the kernel.
+ */
+
+#define net_kmsg(kmsg) ((net_rcv_msg_t)&(kmsg)->ikm_header)
+
+/*
+ * Interrupt routines may allocate and free net_kmsgs with these
+ * functions. net_kmsg_get may return IKM_NULL.
+ */
+
+extern ipc_kmsg_t net_kmsg_get(void);
+extern void net_kmsg_put(ipc_kmsg_t);
+
+/*
+ * Network utility routines.
+ */
+
+extern void net_ast(void);
+extern void net_packet(struct ifnet *, ipc_kmsg_t, unsigned int, boolean_t);
+extern void net_filter(ipc_kmsg_t, ipc_kmsg_queue_t);
+extern io_return_t net_getstat(struct ifnet *, dev_flavor_t, dev_status_t,
+ mach_msg_type_number_t *);
+
+typedef int (*net_write_start_device_fn)(short);
+extern io_return_t net_write(struct ifnet *, net_write_start_device_fn, io_req_t);
+
+/*
+ * Non-interrupt code may allocate and free net_kmsgs with these functions.
+ */
+
+extern vm_size_t net_kmsg_size;
+
+extern void net_kmsg_collect (void);
+
+extern void net_io_init(void);
+extern void net_thread(void) __attribute__ ((noreturn));
+
+#define net_kmsg_alloc() ((ipc_kmsg_t) kalloc(net_kmsg_size))
+#define net_kmsg_free(kmsg) kfree((vm_offset_t) (kmsg), net_kmsg_size)
+
+extern unsigned int ntohl(unsigned int);
+extern unsigned short int ntohs(unsigned short int);
+extern unsigned int htonl(unsigned int);
+extern unsigned short int htons(unsigned short int);
+
+unsigned int bpf_hash(int n, const unsigned int *keys);
+
+extern boolean_t
+net_do_filter(
+ net_rcv_port_t infp,
+ const char * data,
+ unsigned int data_count,
+ const char * header); /* CSPF */
+
+io_return_t
+net_set_filter(
+ struct ifnet *ifp,
+ ipc_port_t rcv_port,
+ int priority,
+ filter_t *filter,
+ unsigned int filter_count);
+
+extern int
+bpf_do_filter(
+ net_rcv_port_t infp,
+ char * p,
+ unsigned int wirelen,
+ char * header,
+ unsigned int hlen,
+ net_hash_entry_t **hash_headpp,
+ net_hash_entry_t *entpp); /* BPF */
+
+int hash_ent_remove(
+ struct ifnet *ifp,
+ net_hash_header_t hp,
+ int used,
+ net_hash_entry_t *head,
+ net_hash_entry_t entp,
+ queue_entry_t *dead_p);
+
+void net_free_dead_infp(queue_entry_t dead_infp);
+void net_free_dead_entp (queue_entry_t dead_entp);
+
+int bpf_validate(
+ bpf_insn_t f,
+ int bytes,
+ bpf_insn_t *match);
+
+int bpf_eq(
+ bpf_insn_t f1,
+ bpf_insn_t f2,
+ int bytes);
+
+int net_add_q_info(ipc_port_t rcv_port);
+
+int bpf_match (
+ net_hash_header_t hash,
+ int n_keys,
+ const unsigned int *keys,
+ net_hash_entry_t **hash_headpp,
+ net_hash_entry_t *entpp);
+
+boolean_t ethernet_priority(const ipc_kmsg_t kmsg);
+
+#endif /* _DEVICE_NET_IO_H_ */
diff --git a/device/param.h b/device/param.h
new file mode 100644
index 0000000..41b4793
--- /dev/null
+++ b/device/param.h
@@ -0,0 +1,49 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#ifndef _DEVICE_PARAM_H_
+#define _DEVICE_PARAM_H_
+
+/*
+ * Compatibility definitions for disk IO.
+ */
+
+/*
+ * Disk devices do all IO in 512-byte blocks.
+ */
+#define DEV_BSIZE 512
+
+/*
+ * Conversion between bytes and disk blocks.
+ */
+#define btodb(byte_offset) ((byte_offset) >> 9)
+#define dbtob(block_number) ((block_number) << 9)
+
+#endif /* _DEVICE_PARAM_H_ */
diff --git a/device/subrs.c b/device/subrs.c
new file mode 100644
index 0000000..6e90a81
--- /dev/null
+++ b/device/subrs.c
@@ -0,0 +1,86 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Random device subroutines and stubs.
+ */
+
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <device/buf.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/subrs.h>
+
+
+
+/*
+ * Convert Ethernet address to printable (loggable) representation.
+ */
+char *
+ether_sprintf(const u_char *ap)
+{
+ int i;
+ static char etherbuf[18];
+ char *cp = etherbuf;
+ static char digits[] = "0123456789abcdef";
+
+ for (i = 0; i < 6; i++) {
+ *cp++ = digits[*ap >> 4];
+ *cp++ = digits[*ap++ & 0xf];
+ *cp++ = ':';
+ }
+ *--cp = 0;
+ return (etherbuf);
+}
+
+/*
+ * Initialize send and receive queues on an interface.
+ */
+void if_init_queues(struct ifnet *ifp)
+{
+ IFQ_INIT(&ifp->if_snd);
+ queue_init(&ifp->if_rcv_port_list);
+ queue_init(&ifp->if_snd_port_list);
+ simple_lock_init(&ifp->if_rcv_port_list_lock);
+ simple_lock_init(&ifp->if_snd_port_list_lock);
+}
+
+
+/*
+ * Compatibility with BSD device drivers.
+ */
+void sleep(vm_offset_t channel, int priority)
+{
+ assert_wait((event_t) channel, FALSE); /* not interruptible XXX */
+ thread_block((void (*)()) 0);
+}
+
+void wakeup(vm_offset_t channel)
+{
+ thread_wakeup((event_t) channel);
+}
diff --git a/device/subrs.h b/device/subrs.h
new file mode 100644
index 0000000..60ea651
--- /dev/null
+++ b/device/subrs.h
@@ -0,0 +1,37 @@
+/*
+ * Random device functions
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Random device functions.
+ *
+ */
+
+#ifndef _SUBRS_H_
+#define _SUBRS_H_
+
+#include <mach/std_types.h>
+#include <device/if_hdr.h>
+
+extern void if_init_queues(struct ifnet *ifp);
+
+extern void sleep (vm_offset_t channel, int priority);
+extern void wakeup (vm_offset_t channel);
+
+#endif /* _SUBRS_H_ */
diff --git a/device/tty.h b/device/tty.h
new file mode 100644
index 0000000..3f8b2f6
--- /dev/null
+++ b/device/tty.h
@@ -0,0 +1,237 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ *
+ * Compatibility TTY structure for existing TTY device drivers.
+ */
+
+#ifndef _DEVICE_TTY_H_
+#define _DEVICE_TTY_H_
+
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <mach/port.h>
+
+#include <device/device_types.h>
+#include <device/tty_status.h>
+#include <device/cirbuf.h>
+#include <device/io_req.h>
+
+struct tty {
+ decl_simple_lock_irq_data(,t_lock) /* Shall be taken at spltty only */
+ struct cirbuf t_inq; /* input buffer */
+ struct cirbuf t_outq; /* output buffer */
+ char * t_addr; /* device pointer */
+ int t_dev; /* device number */
+ void (*t_start)(struct tty *);
+ /* routine to start output */
+#define t_oproc t_start
+ void (*t_stop)(struct tty *, int);
+ /* routine to stop output */
+ int (*t_mctl)(struct tty *, int, int);
+ /* (optional) routine to control
+ modem signals */
+ unsigned char t_ispeed; /* input speed */
+ unsigned char t_ospeed; /* output speed */
+ char t_breakc; /* character to deliver when 'break'
+ condition received */
+ int t_flags; /* mode flags */
+ int t_state; /* current state */
+ int t_line; /* fake line discipline number,
+ for old drivers - always 0 */
+ queue_head_t t_delayed_read; /* pending read requests */
+ queue_head_t t_delayed_write;/* pending write requests */
+ queue_head_t t_delayed_open; /* pending open requests */
+
+/*
+ * Items beyond this point should be removed to device-specific
+ * extension structures.
+ */
+ io_return_t (*t_getstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t *); /* routine to get status */
+ io_return_t (*t_setstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t); /* routine to set status */
+ dev_ops_t t_tops; /* another device to possibly
+ push through */
+};
+typedef struct tty *tty_t;
+
+/*
+ * Common TTY service routines
+ */
+extern io_return_t char_open(
+ int dev,
+ struct tty * tp,
+ dev_mode_t mode,
+ io_req_t ior);
+
+extern io_return_t char_read(
+ struct tty * tp,
+ io_req_t ior);
+
+extern io_return_t char_write(
+ struct tty * tp,
+ io_req_t ior);
+
+extern void ttyinput(
+ unsigned int c,
+ struct tty * tp);
+
+extern void ttyinput_many(
+ struct tty * tp,
+ char * chars,
+ int count);
+
+extern boolean_t ttymodem(
+ struct tty * tp,
+ boolean_t carrier_up);
+
+extern void tty_cts(
+ struct tty * tp,
+ boolean_t cts_up);
+
+extern void tty_queue_completion(
+ queue_t queue);
+#define tt_open_wakeup(tp) \
+ (tty_queue_completion(&(tp)->t_delayed_open))
+#define tt_write_wakeup(tp) \
+ (tty_queue_completion(&(tp)->t_delayed_write))
+
+extern void ttychars(
+ struct tty * tp);
+
+#define TTMINBUF 90
+
+extern short tthiwat[NSPEEDS], ttlowat[NSPEEDS];
+#define TTHIWAT(tp) tthiwat[(tp)->t_ospeed]
+#define TTLOWAT(tp) ttlowat[(tp)->t_ospeed]
+
+extern io_return_t tty_get_status(
+ struct tty * tp,
+ dev_flavor_t flavor,
+ int * data,
+ natural_t * count);
+
+extern io_return_t tty_set_status(
+ struct tty * tp,
+ dev_flavor_t flavor,
+ int * data,
+ natural_t count);
+
+extern void tty_flush(
+ struct tty * tp,
+ int rw);
+
+extern void ttrstrt(
+ struct tty * tp);
+
+extern void ttstart(
+ struct tty * tp);
+
+extern void ttyclose(
+ struct tty * tp);
+
+extern boolean_t tty_portdeath(
+ struct tty * tp,
+ ipc_port_t port);
+
+/* internal state bits */
+#define TS_INIT 0x00000001 /* tty structure initialized */
+#define TS_TIMEOUT 0x00000002 /* delay timeout in progress */
+#define TS_WOPEN 0x00000004 /* waiting for open to complete */
+#define TS_ISOPEN 0x00000008 /* device is open */
+#define TS_FLUSH 0x00000010 /* outq has been flushed during DMA */
+#define TS_CARR_ON 0x00000020 /* software copy of carrier-present */
+#define TS_BUSY 0x00000040 /* output in progress */
+#define TS_ASLEEP 0x00000080 /* wakeup when output done */
+
+#define TS_TTSTOP 0x00000100 /* output stopped by ctl-s */
+#define TS_HUPCLS 0x00000200 /* hang up upon last close */
+#define TS_TBLOCK 0x00000400 /* tandem queue blocked */
+
+#define TS_NBIO 0x00001000 /* tty in non-blocking mode */
+#define TS_ONDELAY 0x00002000 /* device is open; software copy of
+ * carrier is not present */
+#define TS_MIN 0x00004000 /* buffer input chars, if possible */
+#define TS_MIN_TO 0x00008000 /* timeout for the above is active */
+
+#define TS_OUT 0x00010000 /* tty in use for dialout only */
+#define TS_RTS_DOWN 0x00020000 /* modem pls stop */
+
+#define TS_TRANSLATE 0x00100000 /* translation device enabled */
+#define TS_KDB 0x00200000 /* should enter kdb on ALT */
+
+#define TS_MIN_TO_RCV 0x00400000 /* character received during
+ receive timeout interval */
+
+/* flags - old names defined in terms of new ones */
+
+#define TANDEM TF_TANDEM
+#define ODDP TF_ODDP
+#define EVENP TF_EVENP
+#define ANYP (ODDP|EVENP)
+#define MDMBUF TF_MDMBUF
+#define LITOUT TF_LITOUT
+#define NOHANG TF_NOHANG
+
+#define ECHO TF_ECHO
+#define CRMOD TF_CRMOD
+#define XTABS TF_XTABS
+
+/* these are here only to let old code compile - they are never set */
+#define RAW LITOUT
+#define PASS8 LITOUT
+
+/*
+ * Hardware bits.
+ * SHOULD NOT BE HERE.
+ */
+#define DONE 0200
+#define IENABLE 0100
+
+/*
+ * Modem control commands.
+ */
+#define DMSET 0
+#define DMBIS 1
+#define DMBIC 2
+#define DMGET 3
+
+/*
+ * Fake 'line discipline' switch, for the benefit of old code
+ * that wants to call through it.
+ */
+struct ldisc_switch {
+ int (*l_read) (struct tty *, io_req_t); /* read */
+ int (*l_write)(struct tty *, io_req_t); /* write */
+ void (*l_rint) (unsigned int, struct tty *); /* character input */
+ boolean_t (*l_modem)(struct tty *, boolean_t); /* modem change */
+ void (*l_start)(struct tty *); /* start output */
+};
+
+extern struct ldisc_switch linesw[];
+
+#endif /* _DEVICE_TTY_H_ */
diff --git a/doc/.gitignore b/doc/.gitignore
new file mode 100644
index 0000000..829355b
--- /dev/null
+++ b/doc/.gitignore
@@ -0,0 +1,4 @@
+/*.info
+/*.info-*
+/stamp-vti
+/version.texi
diff --git a/doc/Makefrag.am b/doc/Makefrag.am
new file mode 100644
index 0000000..490ebf5
--- /dev/null
+++ b/doc/Makefrag.am
@@ -0,0 +1,119 @@
+# Makefile fragment for the documentation.
+
+# Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# The GNU Mach Reference Manual.
+#
+
+info_TEXINFOS += \
+ doc/mach.texi
+mach_TEXINFOS = \
+ doc/fdl.texi doc/gpl.texi
+EXTRA_DIST += \
+ $(mach_TEXINFOS)
+
+#
+# Web pages of the GNU Mach Reference Manual.
+#
+
+web = doc/web
+
+# Prepare a checkout in `$(web)/' of the web pages of the GNU Mach Reference
+# Manual, using the same account that was used for the source code. Then
+# install the potentially updated files into `$(web)/'.
+.PHONY: $(web)
+$(web):
+ if test -d $@/CVS; then :; else \
+ mkdir -p $@ $@/CVS && \
+ sed -e s%cvsroot%web% \
+ < $(top_srcdir)/CVS/Root \
+ > $@/CVS/Root && \
+ echo hurd/gnumach-doc \
+ > $@/CVS/Repository && \
+ : > $@/CVS/Entries; \
+ fi
+ cd $@/ && \
+ cvs update
+ $(MAKE) $(AM_MAKEFLAGS) \
+ html \
+ ps \
+ pdf
+
+# Update the files, if such a checkout exists.
+html-local:
+ if test -d $(web); then \
+ ( cd $(web)/ && \
+ for f in *.html; do \
+ if test -f ../../$(HTMLS)/"$$f"; then :; else \
+ echo "\`$$f' isn't anymore. Removing." && \
+ rm "$$f" && \
+ cvs remove "$$f"; \
+ fi; \
+ done ) && \
+ cp $(HTMLS)/*.html $(web)/ && \
+ cd $(web)/ && \
+ { cvs add *.html || :; }; \
+ fi
+ps-local:
+ if test -d $(web); then \
+ ( cd $(web)/ && \
+ for f in *.ps; do \
+ case \ $(PSS)\ in \
+ \ doc/"$$f"\ ) :;; \
+ *) echo "\`$$f' isn't anymore. Removing." && \
+ rm -f "$$f" "$$f".ps && \
+ cvs remove "$$f" "$$f".ps ;; \
+ esac; \
+ done ) && \
+ cp $(PSS) $(web)/ && \
+ cd $(web)/ && \
+ for f in *.ps; do \
+ gzip -9 < "$$f" > "$$f".gz; \
+ done && \
+ { cvs add *.ps *.ps.gz || :; }; \
+ fi
+pdf-local:
+ if test -d $(web); then \
+ ( cd $(web)/ && \
+ for f in *.pdf; do \
+ case \ $(PDFS)\ in \
+ \ doc/"$$f"\ ) :;; \
+ *) echo "\`$$f' isn't anymore. Removing." && \
+ rm "$$f" && \
+ cvs remove "$$f";; \
+ esac; \
+ done ) && \
+ cp $(PDFS) $(web)/ && \
+ cd $(web)/ && \
+ { cvs add *.pdf || :; }; \
+ fi
+# TODO. There doesn't seem to be a hook or `-local' target suitable for this.
+$(srcdir)/doc/version.texi: $(srcdir)/doc/stamp-vti
+ @if test -d $(web); then :; \
+ elif grep 2> /dev/null \
+ -q :ext: $(top_srcdir)/CVS/Root 2> /dev/null && \
+ grep 2> /dev/null \
+ -q ^Tgnumach-1-branch$$ $(top_srcdir)/CVS/Tag; \
+ then \
+ echo "*** As it seems that you'd be allowed to check in the" \
+ "possible resulting fixes, you may consider running" \
+ " \`make $(web)' to get a checkout of the web pages of the" \
+ "GNU Mach manual and have possible changes installed into" \
+ "\`$(web)/', ready for checking them in in there." && \
+ sleep 2; \
+ fi
diff --git a/doc/fdl.texi b/doc/fdl.texi
new file mode 100644
index 0000000..9c6d9af
--- /dev/null
+++ b/doc/fdl.texi
@@ -0,0 +1,452 @@
+
+@node GNU Free Documentation License
+@appendixsec GNU Free Documentation License
+
+@cindex FDL, GNU Free Documentation License
+@center Version 1.2, November 2002
+
+@display
+Copyright @copyright{} 2000,2001,2002 Free Software Foundation, Inc.
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+
+Everyone is permitted to copy and distribute verbatim copies
+of this license document, but changing it is not allowed.
+@end display
+
+@enumerate 0
+@item
+PREAMBLE
+
+The purpose of this License is to make a manual, textbook, or other
+functional and useful document @dfn{free} in the sense of freedom: to
+assure everyone the effective freedom to copy and redistribute it,
+with or without modifying it, either commercially or noncommercially.
+Secondarily, this License preserves for the author and publisher a way
+to get credit for their work, while not being considered responsible
+for modifications made by others.
+
+This License is a kind of ``copyleft'', which means that derivative
+works of the document must themselves be free in the same sense. It
+complements the GNU General Public License, which is a copyleft
+license designed for free software.
+
+We have designed this License in order to use it for manuals for free
+software, because free software needs free documentation: a free
+program should come with manuals providing the same freedoms that the
+software does. But this License is not limited to software manuals;
+it can be used for any textual work, regardless of subject matter or
+whether it is published as a printed book. We recommend this License
+principally for works whose purpose is instruction or reference.
+
+@item
+APPLICABILITY AND DEFINITIONS
+
+This License applies to any manual or other work, in any medium, that
+contains a notice placed by the copyright holder saying it can be
+distributed under the terms of this License. Such a notice grants a
+world-wide, royalty-free license, unlimited in duration, to use that
+work under the conditions stated herein. The ``Document'', below,
+refers to any such manual or work. Any member of the public is a
+licensee, and is addressed as ``you''. You accept the license if you
+copy, modify or distribute the work in a way requiring permission
+under copyright law.
+
+A ``Modified Version'' of the Document means any work containing the
+Document or a portion of it, either copied verbatim, or with
+modifications and/or translated into another language.
+
+A ``Secondary Section'' is a named appendix or a front-matter section
+of the Document that deals exclusively with the relationship of the
+publishers or authors of the Document to the Document's overall
+subject (or to related matters) and contains nothing that could fall
+directly within that overall subject. (Thus, if the Document is in
+part a textbook of mathematics, a Secondary Section may not explain
+any mathematics.) The relationship could be a matter of historical
+connection with the subject or with related matters, or of legal,
+commercial, philosophical, ethical or political position regarding
+them.
+
+The ``Invariant Sections'' are certain Secondary Sections whose titles
+are designated, as being those of Invariant Sections, in the notice
+that says that the Document is released under this License. If a
+section does not fit the above definition of Secondary then it is not
+allowed to be designated as Invariant. The Document may contain zero
+Invariant Sections. If the Document does not identify any Invariant
+Sections then there are none.
+
+The ``Cover Texts'' are certain short passages of text that are listed,
+as Front-Cover Texts or Back-Cover Texts, in the notice that says that
+the Document is released under this License. A Front-Cover Text may
+be at most 5 words, and a Back-Cover Text may be at most 25 words.
+
+A ``Transparent'' copy of the Document means a machine-readable copy,
+represented in a format whose specification is available to the
+general public, that is suitable for revising the document
+straightforwardly with generic text editors or (for images composed of
+pixels) generic paint programs or (for drawings) some widely available
+drawing editor, and that is suitable for input to text formatters or
+for automatic translation to a variety of formats suitable for input
+to text formatters. A copy made in an otherwise Transparent file
+format whose markup, or absence of markup, has been arranged to thwart
+or discourage subsequent modification by readers is not Transparent.
+An image format is not Transparent if used for any substantial amount
+of text. A copy that is not ``Transparent'' is called ``Opaque''.
+
+Examples of suitable formats for Transparent copies include plain
+@sc{ascii} without markup, Texinfo input format, La@TeX{} input
+format, @acronym{SGML} or @acronym{XML} using a publicly available
+@acronym{DTD}, and standard-conforming simple @acronym{HTML},
+PostScript or @acronym{PDF} designed for human modification. Examples
+of transparent image formats include @acronym{PNG}, @acronym{XCF} and
+@acronym{JPG}. Opaque formats include proprietary formats that can be
+read and edited only by proprietary word processors, @acronym{SGML} or
+@acronym{XML} for which the @acronym{DTD} and/or processing tools are
+not generally available, and the machine-generated @acronym{HTML},
+PostScript or @acronym{PDF} produced by some word processors for
+output purposes only.
+
+The ``Title Page'' means, for a printed book, the title page itself,
+plus such following pages as are needed to hold, legibly, the material
+this License requires to appear in the title page. For works in
+formats which do not have any title page as such, ``Title Page'' means
+the text near the most prominent appearance of the work's title,
+preceding the beginning of the body of the text.
+
+A section ``Entitled XYZ'' means a named subunit of the Document whose
+title either is precisely XYZ or contains XYZ in parentheses following
+text that translates XYZ in another language. (Here XYZ stands for a
+specific section name mentioned below, such as ``Acknowledgements'',
+``Dedications'', ``Endorsements'', or ``History''.) To ``Preserve the Title''
+of such a section when you modify the Document means that it remains a
+section ``Entitled XYZ'' according to this definition.
+
+The Document may include Warranty Disclaimers next to the notice which
+states that this License applies to the Document. These Warranty
+Disclaimers are considered to be included by reference in this
+License, but only as regards disclaiming warranties: any other
+implication that these Warranty Disclaimers may have is void and has
+no effect on the meaning of this License.
+
+@item
+VERBATIM COPYING
+
+You may copy and distribute the Document in any medium, either
+commercially or noncommercially, provided that this License, the
+copyright notices, and the license notice saying this License applies
+to the Document are reproduced in all copies, and that you add no other
+conditions whatsoever to those of this License. You may not use
+technical measures to obstruct or control the reading or further
+copying of the copies you make or distribute. However, you may accept
+compensation in exchange for copies. If you distribute a large enough
+number of copies you must also follow the conditions in section 3.
+
+You may also lend copies, under the same conditions stated above, and
+you may publicly display copies.
+
+@item
+COPYING IN QUANTITY
+
+If you publish printed copies (or copies in media that commonly have
+printed covers) of the Document, numbering more than 100, and the
+Document's license notice requires Cover Texts, you must enclose the
+copies in covers that carry, clearly and legibly, all these Cover
+Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
+the back cover. Both covers must also clearly and legibly identify
+you as the publisher of these copies. The front cover must present
+the full title with all words of the title equally prominent and
+visible. You may add other material on the covers in addition.
+Copying with changes limited to the covers, as long as they preserve
+the title of the Document and satisfy these conditions, can be treated
+as verbatim copying in other respects.
+
+If the required texts for either cover are too voluminous to fit
+legibly, you should put the first ones listed (as many as fit
+reasonably) on the actual cover, and continue the rest onto adjacent
+pages.
+
+If you publish or distribute Opaque copies of the Document numbering
+more than 100, you must either include a machine-readable Transparent
+copy along with each Opaque copy, or state in or with each Opaque copy
+a computer-network location from which the general network-using
+public has access to download using public-standard network protocols
+a complete Transparent copy of the Document, free of added material.
+If you use the latter option, you must take reasonably prudent steps,
+when you begin distribution of Opaque copies in quantity, to ensure
+that this Transparent copy will remain thus accessible at the stated
+location until at least one year after the last time you distribute an
+Opaque copy (directly or through your agents or retailers) of that
+edition to the public.
+
+It is requested, but not required, that you contact the authors of the
+Document well before redistributing any large number of copies, to give
+them a chance to provide you with an updated version of the Document.
+
+@item
+MODIFICATIONS
+
+You may copy and distribute a Modified Version of the Document under
+the conditions of sections 2 and 3 above, provided that you release
+the Modified Version under precisely this License, with the Modified
+Version filling the role of the Document, thus licensing distribution
+and modification of the Modified Version to whoever possesses a copy
+of it. In addition, you must do these things in the Modified Version:
+
+@enumerate A
+@item
+Use in the Title Page (and on the covers, if any) a title distinct
+from that of the Document, and from those of previous versions
+(which should, if there were any, be listed in the History section
+of the Document). You may use the same title as a previous version
+if the original publisher of that version gives permission.
+
+@item
+List on the Title Page, as authors, one or more persons or entities
+responsible for authorship of the modifications in the Modified
+Version, together with at least five of the principal authors of the
+Document (all of its principal authors, if it has fewer than five),
+unless they release you from this requirement.
+
+@item
+State on the Title page the name of the publisher of the
+Modified Version, as the publisher.
+
+@item
+Preserve all the copyright notices of the Document.
+
+@item
+Add an appropriate copyright notice for your modifications
+adjacent to the other copyright notices.
+
+@item
+Include, immediately after the copyright notices, a license notice
+giving the public permission to use the Modified Version under the
+terms of this License, in the form shown in the Addendum below.
+
+@item
+Preserve in that license notice the full lists of Invariant Sections
+and required Cover Texts given in the Document's license notice.
+
+@item
+Include an unaltered copy of this License.
+
+@item
+Preserve the section Entitled ``History'', Preserve its Title, and add
+to it an item stating at least the title, year, new authors, and
+publisher of the Modified Version as given on the Title Page. If
+there is no section Entitled ``History'' in the Document, create one
+stating the title, year, authors, and publisher of the Document as
+given on its Title Page, then add an item describing the Modified
+Version as stated in the previous sentence.
+
+@item
+Preserve the network location, if any, given in the Document for
+public access to a Transparent copy of the Document, and likewise
+the network locations given in the Document for previous versions
+it was based on. These may be placed in the ``History'' section.
+You may omit a network location for a work that was published at
+least four years before the Document itself, or if the original
+publisher of the version it refers to gives permission.
+
+@item
+For any section Entitled ``Acknowledgements'' or ``Dedications'', Preserve
+the Title of the section, and preserve in the section all the
+substance and tone of each of the contributor acknowledgements and/or
+dedications given therein.
+
+@item
+Preserve all the Invariant Sections of the Document,
+unaltered in their text and in their titles. Section numbers
+or the equivalent are not considered part of the section titles.
+
+@item
+Delete any section Entitled ``Endorsements''. Such a section
+may not be included in the Modified Version.
+
+@item
+Do not retitle any existing section to be Entitled ``Endorsements'' or
+to conflict in title with any Invariant Section.
+
+@item
+Preserve any Warranty Disclaimers.
+@end enumerate
+
+If the Modified Version includes new front-matter sections or
+appendices that qualify as Secondary Sections and contain no material
+copied from the Document, you may at your option designate some or all
+of these sections as invariant. To do this, add their titles to the
+list of Invariant Sections in the Modified Version's license notice.
+These titles must be distinct from any other section titles.
+
+You may add a section Entitled ``Endorsements'', provided it contains
+nothing but endorsements of your Modified Version by various
+parties---for example, statements of peer review or that the text has
+been approved by an organization as the authoritative definition of a
+standard.
+
+You may add a passage of up to five words as a Front-Cover Text, and a
+passage of up to 25 words as a Back-Cover Text, to the end of the list
+of Cover Texts in the Modified Version. Only one passage of
+Front-Cover Text and one of Back-Cover Text may be added by (or
+through arrangements made by) any one entity. If the Document already
+includes a cover text for the same cover, previously added by you or
+by arrangement made by the same entity you are acting on behalf of,
+you may not add another; but you may replace the old one, on explicit
+permission from the previous publisher that added the old one.
+
+The author(s) and publisher(s) of the Document do not by this License
+give permission to use their names for publicity for or to assert or
+imply endorsement of any Modified Version.
+
+@item
+COMBINING DOCUMENTS
+
+You may combine the Document with other documents released under this
+License, under the terms defined in section 4 above for modified
+versions, provided that you include in the combination all of the
+Invariant Sections of all of the original documents, unmodified, and
+list them all as Invariant Sections of your combined work in its
+license notice, and that you preserve all their Warranty Disclaimers.
+
+The combined work need only contain one copy of this License, and
+multiple identical Invariant Sections may be replaced with a single
+copy. If there are multiple Invariant Sections with the same name but
+different contents, make the title of each such section unique by
+adding at the end of it, in parentheses, the name of the original
+author or publisher of that section if known, or else a unique number.
+Make the same adjustment to the section titles in the list of
+Invariant Sections in the license notice of the combined work.
+
+In the combination, you must combine any sections Entitled ``History''
+in the various original documents, forming one section Entitled
+``History''; likewise combine any sections Entitled ``Acknowledgements'',
+and any sections Entitled ``Dedications''. You must delete all
+sections Entitled ``Endorsements.''
+
+@item
+COLLECTIONS OF DOCUMENTS
+
+You may make a collection consisting of the Document and other documents
+released under this License, and replace the individual copies of this
+License in the various documents with a single copy that is included in
+the collection, provided that you follow the rules of this License for
+verbatim copying of each of the documents in all other respects.
+
+You may extract a single document from such a collection, and distribute
+it individually under this License, provided you insert a copy of this
+License into the extracted document, and follow this License in all
+other respects regarding verbatim copying of that document.
+
+@item
+AGGREGATION WITH INDEPENDENT WORKS
+
+A compilation of the Document or its derivatives with other separate
+and independent documents or works, in or on a volume of a storage or
+distribution medium, is called an ``aggregate'' if the copyright
+resulting from the compilation is not used to limit the legal rights
+of the compilation's users beyond what the individual works permit.
+When the Document is included in an aggregate, this License does not
+apply to the other works in the aggregate which are not themselves
+derivative works of the Document.
+
+If the Cover Text requirement of section 3 is applicable to these
+copies of the Document, then if the Document is less than one half of
+the entire aggregate, the Document's Cover Texts may be placed on
+covers that bracket the Document within the aggregate, or the
+electronic equivalent of covers if the Document is in electronic form.
+Otherwise they must appear on printed covers that bracket the whole
+aggregate.
+
+@item
+TRANSLATION
+
+Translation is considered a kind of modification, so you may
+distribute translations of the Document under the terms of section 4.
+Replacing Invariant Sections with translations requires special
+permission from their copyright holders, but you may include
+translations of some or all Invariant Sections in addition to the
+original versions of these Invariant Sections. You may include a
+translation of this License, and all the license notices in the
+Document, and any Warranty Disclaimers, provided that you also include
+the original English version of this License and the original versions
+of those notices and disclaimers. In case of a disagreement between
+the translation and the original version of this License or a notice
+or disclaimer, the original version will prevail.
+
+If a section in the Document is Entitled ``Acknowledgements'',
+``Dedications'', or ``History'', the requirement (section 4) to Preserve
+its Title (section 1) will typically require changing the actual
+title.
+
+@item
+TERMINATION
+
+You may not copy, modify, sublicense, or distribute the Document except
+as expressly provided for under this License. Any other attempt to
+copy, modify, sublicense or distribute the Document is void, and will
+automatically terminate your rights under this License. However,
+parties who have received copies, or rights, from you under this
+License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+@item
+FUTURE REVISIONS OF THIS LICENSE
+
+The Free Software Foundation may publish new, revised versions
+of the GNU Free Documentation License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns. See
+@uref{http://www.gnu.org/copyleft/}.
+
+Each version of the License is given a distinguishing version number.
+If the Document specifies that a particular numbered version of this
+License ``or any later version'' applies to it, you have the option of
+following the terms and conditions either of that specified version or
+of any later version that has been published (not as a draft) by the
+Free Software Foundation. If the Document does not specify a version
+number of this License, you may choose any version ever published (not
+as a draft) by the Free Software Foundation.
+@end enumerate
+
+@page
+@heading ADDENDUM: How to use this License for your documents
+
+To use this License in a document you have written, include a copy of
+the License in the document and put the following copyright and
+license notices just after the title page:
+
+@smallexample
+@group
+ Copyright (C) @var{year} @var{your name}.
+ Permission is granted to copy, distribute and/or modify this document
+ under the terms of the GNU Free Documentation License, Version 1.2
+ or any later version published by the Free Software Foundation;
+ with no Invariant Sections, no Front-Cover Texts, and no Back-Cover
+ Texts. A copy of the license is included in the section entitled ``GNU
+ Free Documentation License''.
+@end group
+@end smallexample
+
+If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts,
+replace the ``with...Texts.'' line with this:
+
+@smallexample
+@group
+ with the Invariant Sections being @var{list their titles}, with
+ the Front-Cover Texts being @var{list}, and with the Back-Cover Texts
+ being @var{list}.
+@end group
+@end smallexample
+
+If you have Invariant Sections without Cover Texts, or some other
+combination of the three, merge those two alternatives to suit the
+situation.
+
+If your document contains nontrivial examples of program code, we
+recommend releasing these examples in parallel under your choice of
+free software license, such as the GNU General Public License,
+to permit their use in free software.
+
+@c Local Variables:
+@c ispell-local-pdict: "ispell-dict"
+@c End:
+
diff --git a/doc/gpl.texi b/doc/gpl.texi
new file mode 100644
index 0000000..c1f025e
--- /dev/null
+++ b/doc/gpl.texi
@@ -0,0 +1,383 @@
+@node Copying
+@unnumbered GNU General Public License
+@cindex GPL, GNU General Public License
+@center Version 2, June 1991
+
+@c This file is intended to be included in another file.
+
+@display
+Copyright @copyright{} 1989, 1991 Free Software Foundation, Inc.
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+Everyone is permitted to copy and distribute verbatim copies
+of this license document, but changing it is not allowed.
+@end display
+
+@heading Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software---to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+@heading TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+@enumerate 0
+@item
+This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The ``Program'', below,
+refers to any such program or work, and a ``work based on the Program''
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term ``modification''.) Each licensee is addressed as ``you''.
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+@item
+You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+@item
+You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+@enumerate a
+@item
+You must cause the modified files to carry prominent notices
+stating that you changed the files and the date of any change.
+
+@item
+You must cause any work that you distribute or publish, that in
+whole or in part contains or is derived from the Program or any
+part thereof, to be licensed as a whole at no charge to all third
+parties under the terms of this License.
+
+@item
+If the modified program normally reads commands interactively
+when run, you must cause it, when started running for such
+interactive use in the most ordinary way, to print or display an
+announcement including an appropriate copyright notice and a
+notice that there is no warranty (or else, saying that you provide
+a warranty) and that users may redistribute the program under
+these conditions, and telling the user how to view a copy of this
+License. (Exception: if the Program itself is interactive but
+does not normally print such an announcement, your work based on
+the Program is not required to print an announcement.)
+@end enumerate
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+@item
+You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+@enumerate a
+@item
+Accompany it with the complete corresponding machine-readable
+source code, which must be distributed under the terms of Sections
+1 and 2 above on a medium customarily used for software interchange; or,
+
+@item
+Accompany it with a written offer, valid for at least three
+years, to give any third party, for a charge no more than your
+cost of physically performing source distribution, a complete
+machine-readable copy of the corresponding source code, to be
+distributed under the terms of Sections 1 and 2 above on a medium
+customarily used for software interchange; or,
+
+@item
+Accompany it with the information you received as to the offer
+to distribute corresponding source code. (This alternative is
+allowed only for noncommercial distribution and only if you
+received the program in object code or executable form with such
+an offer, in accord with Subsection b above.)
+@end enumerate
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+@item
+You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+@item
+You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+@item
+Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+@item
+If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+@item
+If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+@item
+The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and ``any
+later version'', you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+@item
+If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+@item NO WARRANTY
+
+BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM ``AS IS'' WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+@item
+IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+@end enumerate
+
+@iftex
+@heading END OF TERMS AND CONDITIONS
+@end iftex
+@ifinfo
+@center END OF TERMS AND CONDITIONS
+
+@end ifinfo
+
+@page
+@heading Appendix: How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the ``copyright'' line and a pointer to where the full notice is found.
+
+@smallexample
+@var{one line to give the program's name and a brief idea of what it does.}
+Copyright (C) @var{yyyy} @var{name of author}
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+@end smallexample
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+@smallexample
+Gnomovision version 69, Copyright (C) @var{year} @var{name of author}
+Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+This is free software, and you are welcome to redistribute it
+under certain conditions; type `show c' for details.
+@end smallexample
+
+The hypothetical commands @samp{show w} and @samp{show c} should show
+the appropriate parts of the General Public License. Of course, the
+commands you use may be called something other than @samp{show w} and
+@samp{show c}; they could even be mouse-clicks or menu items---whatever
+suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a ``copyright disclaimer'' for the program, if
+necessary. Here is a sample; alter the names:
+
+@example
+Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+`Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+@var{signature of Ty Coon}, 1 April 1989
+Ty Coon, President of Vice
+@end example
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/doc/mach.texi b/doc/mach.texi
new file mode 100644
index 0000000..f85288e
--- /dev/null
+++ b/doc/mach.texi
@@ -0,0 +1,7417 @@
+\input texinfo @c -*- Texinfo -*-
+@documentencoding ISO-8859-1
+@setfilename mach.info
+@settitle The GNU Mach Reference Manual
+@setchapternewpage odd
+
+@comment Tell install-info what to do.
+@dircategory Kernel
+@direntry
+* GNUMach: (mach). Using and programming the GNU Mach microkernel.
+@end direntry
+
+@c Should have a glossary.
+@c Unify some of our indices.
+@syncodeindex pg cp
+@syncodeindex vr fn
+@syncodeindex tp fn
+
+@c Get the Mach version we are documenting.
+@include version.texi
+@set EDITION 0.4
+@c @set ISBN X-XXXXXX-XX-X
+
+@copying
+This file documents the GNU Mach microkernel.
+
+This is edition @value{EDITION}, last updated on @value{UPDATED}, of @cite{The
+GNU Mach Reference Manual}, for version @value{VERSION}.
+
+Copyright @copyright{} 2001, 2002, 2006, 2007, 2008 Free Software
+Foundation, Inc.
+
+@c @sp 2
+@c Published by the Free Software Foundation @*
+@c 59 Temple Place -- Suite 330, @*
+@c Boston, MA 02111-1307 USA @*
+@c ISBN @value{ISBN} @*
+
+@quotation
+Permission is granted to copy, distribute and/or modify this document
+under the terms of the GNU Free Documentation License, Version 1.2 or
+any later version published by the Free Software Foundation; with no
+Invariant Section, with no Front-Cover Texts, and with no Back-Cover
+Texts. A copy of the license is included in the section entitled
+``GNU Free Documentation License''.
+
+This work is based on manual pages under the following copyright and license:
+
+@noindent
+Mach Operating System@*
+Copyright @copyright{} 1991,1990 Carnegie Mellon University@*
+All Rights Reserved.
+
+Permission to use, copy, modify and distribute this software and its
+documentation is hereby granted, provided that both the copyright
+notice and this permission notice appear in all copies of the
+software, derivative works or modified versions, and any portions
+thereof, and that both notices appear in supporting documentation.
+
+CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+@end quotation
+@end copying
+
+@iftex
+@shorttitlepage The GNU Mach Reference Manual
+@end iftex
+@titlepage
+@center @titlefont{The GNU Mach}
+@sp 1
+@center @titlefont{Reference Manual}
+@sp 2
+@center Marcus Brinkmann
+@center with
+@center Gordon Matzigkeit, Gibran Hasnaoui,
+@center Robert V. Baron, Richard P. Draves, Mary R. Thompson, Joseph S. Barrera
+@sp 3
+@center Edition @value{EDITION}
+@sp 1
+@center last updated @value{UPDATED}
+@sp 1
+@center for version @value{VERSION}
+@page
+@vskip 0pt plus 1filll
+@insertcopying
+@end titlepage
+@c @titlepage
+@c @finalout
+@c @title The GNU Mach Reference Manual
+@c @author Marcus Brinkmann
+@c @author Gordon Matzigkeit
+@c @author Gibran Hasnaoui
+
+@c @author Robert V. Baron @c (rvb)
+@c @author Richard P. Draves @c (rpd)
+@c @author Mary R. Thompson @c (mrt)
+@c @author Joseph S. Barrera @c (jsb)
+@c @c The following occur rarely in the rcs commit logs of the man pages:
+@c @c Dan Stodolsky, (danner)
+@c @c David B. Golub, (dbg)
+@c @c Terri Watson, (elf)
+@c @c Lori Iannamico, (lli) [distribution coordinator]
+@c @c Further authors of kernel_interfaces.ps:
+@c @c David Black [OSF]
+@c @c William Bolosky
+@c @c Jonathan Chew
+@c @c Alessandro Forin
+@c @c Richard F. Rashid
+@c @c Avadis Tevanian Jr.
+@c @c Michael W. Young
+@c @c See also
+@c @c http://www.cs.cmu.edu/afs/cs/project/mach/public/www/people-former.html
+@page
+
+@ifnottex
+@node Top
+@top Main Menu
+
+@insertcopying
+@end ifnottex
+
+@menu
+* Introduction:: How to use this manual.
+* Installing:: Setting up GNU Mach on your computer.
+* Bootstrap:: Running GNU Mach on your machine.
+* Inter Process Communication:: Communication between process.
+* Virtual Memory Interface:: Allocating and deallocating virtual memory.
+* External Memory Management:: Handling memory pages in user space.
+* Threads and Tasks:: Handling of threads and tasks.
+* Host Interface:: Interface to a Mach host.
+* Processors and Processor Sets:: Handling processors and sets of processors.
+* Device Interface:: Accessing kernel devices.
+* Kernel Debugger:: How to use the built-in kernel debugger.
+
+Appendices
+
+* Copying:: The GNU General Public License says how you
+ can copy and share the GNU Mach microkernel.
+* Documentation License:: This manual is under the GNU Free
+ Documentation License.
+
+Indices
+
+* Concept Index:: Index of concepts and programs.
+* Function and Data Index:: Index of functions, variables and data types.
+
+
+@detailmenu
+ --- The Detailed Node Listing ---
+
+Introduction
+
+* Audience:: The people for whom this manual is written.
+* Features:: Reasons to install and use GNU Mach.
+* Overview:: Basic architecture of the Mach microkernel.
+* History:: The story about Mach.
+
+Installing
+
+* Binary Distributions:: Obtaining ready-to-run GNU distributions.
+* Compilation:: Building GNU Mach from its source code.
+* Configuration:: Configuration options at compilation time.
+* Cross-Compilation:: Building GNU Mach from another system.
+
+Bootstrap
+
+* Bootloader:: Starting the microkernel, or other OSes.
+* Modules:: Starting the first task of the OS.
+
+Inter Process Communication
+
+* Major Concepts:: The concepts behind the Mach IPC system.
+* Messaging Interface:: Composing, sending and receiving messages.
+* Port Manipulation Interface:: Manipulating ports, port rights, port sets.
+
+Messaging Interface
+
+* Mach Message Call:: Sending and receiving messages.
+* Message Format:: The format of Mach messages.
+* Exchanging Port Rights:: Sending and receiving port rights.
+* Memory:: Passing memory regions in messages.
+* Message Send:: Sending messages.
+* Message Receive:: Receiving messages.
+* Atomicity:: Atomicity of port rights.
+
+Port Manipulation Interface
+
+* Port Creation:: How to create new ports and port sets.
+* Port Destruction:: How to destroy ports and port sets.
+* Port Names:: How to query and manipulate port names.
+* Port Rights:: How to work with port rights.
+* Ports and other Tasks:: How to move rights between tasks.
+* Receive Rights:: How to work with receive rights.
+* Port Sets:: How to work with port sets.
+* Request Notifications:: How to request notifications for events.
+* Inherited Ports:: How to work with the inherited system ports.
+
+Virtual Memory Interface
+
+* Memory Allocation:: Allocation of new virtual memory.
+* Memory Deallocation:: Freeing unused virtual memory.
+* Data Transfer:: Reading, writing and copying memory.
+* Memory Attributes:: Tweaking memory regions.
+* Mapping Memory Objects:: How to map memory objects.
+* Memory Statistics:: How to get statistics about memory usage.
+
+External Memory Management
+
+* Memory Object Server:: The basics of external memory management.
+* Memory Object Creation:: How new memory objects are created.
+* Memory Object Termination:: How memory objects are terminated.
+* Memory Objects and Data:: Data transfer to and from memory objects.
+* Memory Object Locking:: How memory objects are locked.
+* Memory Object Attributes:: Manipulating attributes of memory objects.
+* Default Memory Manager:: Setting and using the default memory manager.
+
+Threads and Tasks
+
+* Thread Interface:: Manipulating threads.
+* Task Interface:: Manipulating tasks.
+* Profiling:: Profiling threads and tasks.
+
+Thread Interface
+
+* Thread Creation:: Creating threads.
+* Thread Termination:: Terminating threads.
+* Thread Information:: How to get informations on threads.
+* Thread Settings:: How to set threads related informations.
+* Thread Execution:: How to control the thread's machine state.
+* Scheduling:: Operations on thread scheduling.
+* Thread Special Ports:: How to handle the thread's special ports.
+* Exceptions:: Managing exceptions.
+
+Scheduling
+
+* Thread Priority:: Changing the priority of a thread.
+* Hand-Off Scheduling:: Switch to a new thread.
+* Scheduling Policy:: Setting the scheduling policy.
+
+Task Interface
+
+* Task Creation:: Creating tasks.
+* Task Termination:: Terminating tasks.
+* Task Information:: Informations on tasks.
+* Task Execution:: Thread scheduling in a task.
+* Task Special Ports:: How to get and set the task's special ports.
+* Syscall Emulation:: How to emulate system calls.
+
+Host Interface
+
+* Host Ports:: Ports representing a host.
+* Host Information:: Query information about a host.
+* Host Time:: Functions to query manipulate the host time.
+* Host Reboot:: Rebooting the system.
+
+Processors and Processor Sets
+
+* Processor Set Interface:: How to work with processor sets.
+* Processor Interface:: How to work with individual processors.
+
+Processor Set Interface
+
+* Processor Set Ports:: Ports representing a processor set.
+* Processor Set Access:: How the processor sets are accessed.
+* Processor Set Creation:: How new processor sets are created.
+* Processor Set Destruction:: How processor sets are destroyed.
+* Tasks and Threads on Sets:: Assigning tasks or threads to processor sets.
+* Processor Set Priority:: Specifying the priority of a processor set.
+* Processor Set Policy:: Changing the processor set policies.
+* Processor Set Info:: Obtaining information about a processor set.
+
+Processor Interface
+
+* Hosted Processors:: Getting a list of all processors on a host.
+* Processor Control:: Starting, stopping, controlling processors.
+* Processors and Sets:: Combining processors into processor sets.
+* Processor Info:: Obtaining information on processors.
+
+Device Interface
+
+* Device Open:: Opening hardware devices.
+* Device Close:: Closing hardware devices.
+* Device Read:: Reading data from the device.
+* Device Write:: Writing data to the device.
+* Device Map:: Mapping devices into virtual memory.
+* Device Status:: Querying and manipulating a device.
+* Device Filter:: Filtering packets arriving on a device.
+* Device Interrupt:: Getting hardware interrupt notifications.
+
+Kernel Debugger
+
+* Operation:: Basic architecture of the kernel debugger.
+* Commands:: Available commands in the kernel debugger.
+* Variables:: Access of variables from the kernel debugger.
+* Expressions:: Usage of expressions in the kernel debugger.
+
+Documentation License
+
+* GNU Free Documentation License:: The GNU Free Documentation License.
+* CMU License:: The CMU license applies to the original Mach
+ kernel and its documentation.
+
+@end detailmenu
+@end menu
+
+
+@node Introduction
+@chapter Introduction
+
+GNU Mach is the microkernel of the GNU Project. It is the base of the
+operating system, and provides its functionality to the Hurd servers,
+the GNU C Library and all user applications. The microkernel itself
+does not provide much functionality of the system, just enough to make
+it possible for the Hurd servers and the C library to implement the missing
+features you would expect from a POSIX compatible operating system.
+
+@menu
+* Audience:: The people for whom this manual is written.
+* Features:: Reasons to install and use GNU Mach.
+* Overview:: Basic architecture of the Mach microkernel.
+* History:: The story about Mach.
+@end menu
+
+
+@node Audience
+@section Audience
+
+This manual is designed to be useful to everybody who is interested in
+using, administering, or programming the Mach microkernel.
+
+If you are an end-user and you are looking for help on running the Mach
+kernel, the first few chapters of this manual describe the essential
+parts of installing and using the kernel in the GNU operating system.
+
+The rest of this manual is a technical discussion of the Mach
+programming interface and its implementation, and would not be helpful
+until you want to learn how to extend the system or modify the kernel.
+
+This manual is organized according to the subsystems of Mach, and each
+chapter begins with descriptions of conceptual ideas that are related to
+that subsystem. If you are a programmer and want to learn more about,
+say, the Mach IPC subsystem, you can skip to the IPC chapter
+(@pxref{Inter Process Communication}), and read about the related
+concepts and interface definitions.
+
+
+@node Features
+@section Features
+
+GNU Mach is not the most advanced microkernel known to the planet,
+nor is it the fastest or smallest, but it has a rich set of interfaces and
+some features which make it useful as the base of the Hurd system.
+
+@table @asis
+@item it's free software
+Anybody can use, modify, and redistribute it under the terms of the GNU
+General Public License (@pxref{Copying}). GNU Mach is part of the GNU
+system, which is a complete operating system licensed under the GPL.
+
+@item it's built to survive
+As a microkernel, GNU Mach doesn't implement a lot of the features
+commonly found in an operating system, but only the bare minimum
+that is required to implement a full operating system on top of it.
+This means that a lot of the operating system code is maintained outside
+of GNU Mach, and while this code may go through a complete redesign, the
+code of the microkernel can remain comparatively stable.
+
+@item it's scalable
+Mach is particularly well suited for SMP and network cluster techniques.
+Thread support is provided at the kernel level, and the kernel itself
+takes advantage of that. Network transparency at the IPC level makes
+resources of the system available across machine boundaries (with NORMA
+IPC, currently not available in GNU Mach).
+
+@item it exists
+The Mach microkernel is real software that works Right Now.
+It is not a research project or a proposal. You don't have to wait at all
+before you can start using and developing it. Mach has been used in
+many operating systems in the past, usually as the base for a single
+UNIX server. In the GNU system, Mach is the base of a functional
+multi-server operating system, the Hurd.
+@end table
+
+
+@node Overview
+@section Overview
+
+@c This paragraph by Gordon Matzigkeit from the Hurd manual.
+An operating system kernel provides a framework for programs to share a
+computer's hardware resources securely and efficiently. This requires
+that the programs are separated and protected from each other. To make
+running multiple programs in parallel useful, there also needs to be a
+facility for programs to exchange information by communication.
+
+The Mach microkernel provides abstractions of the underlying hardware
+resources like devices and memory. It organizes the running programs
+into tasks and threads (points of execution in the tasks). In addition,
+Mach provides a rich interface for inter-process communication.
+
+What Mach does not provide is a POSIX compatible programming interface.
+In fact, it has no understanding of file systems, POSIX process semantics,
+network protocols and many more. All this is implemented in tasks
+running on top of the microkernel. In the GNU operating system, the Hurd
+servers and the C library share the responsibility to implement the POSIX
+interface, and the additional interfaces which are specific to the GNU
+system.
+
+
+@node History
+@section History
+
+XXX A few lines about the history of Mach here.
+
+
+@node Installing
+@chapter Installing
+
+Before you can use the Mach microkernel in your system you'll need to install
+it and all components you want to use with it, e.g. the rest of the operating
+system. You also need a bootloader to load the kernel from the storage
+medium and run it when the computer is started.
+
+GNU Mach is only available for Intel i386-compatible architectures
+(such as the Pentium) currently. If you have a different architecture
+and want to run the GNU Mach microkernel, you will need to port the
+kernel and all other software of the system to your machine's architecture.
+Porting is an involved process which requires considerable programming skills,
+and it is not recommended for the faint-of-heart.
+If you have the talent and desire to do a port, contact
+@email{bug-hurd@@gnu.org} in order to coordinate the effort.
+
+@menu
+* Binary Distributions:: Obtaining ready-to-run GNU distributions.
+* Compilation:: Building GNU Mach from its source code.
+* Configuration:: Configuration options at compile time.
+* Cross-Compilation:: Building GNU Mach from another system.
+@end menu
+
+
+@node Binary Distributions
+@section Binary Distributions
+
+By far the easiest and best way to install GNU Mach and the operating
+system is to obtain a GNU binary distribution. The GNU operating
+system consists of GNU Mach, the Hurd, the C library and many applications.
+Without the GNU operating system, you will only have a microkernel, which
+is not very useful by itself, without the other programs.
+
+Building the whole operating system takes a huge effort, and you are well
+advised to not do it yourself, but to get a binary distribution of the
+GNU operating system. The distribution also includes a binary of the
+GNU Mach microkernel.
+
+Information on how to obtain the GNU system can be found in the Hurd
+info manual.
+
+
+@node Compilation
+@section Compilation
+
+If you already have a running GNU system, and only want to recompile
+the kernel, for example to select a different set of included hardware
+drivers, you can easily do this. You need the GNU C compiler and
+MIG, the Mach interface generator, which both come in their own
+packages.
+
+Building and installing the kernel is as easy as with any other GNU
+software package. The configure script is used to configure the source
+and set the compile time options. The compilation is done by running:
+
+@example
+make
+@end example
+
+To install the kernel and its header files, just enter the command:
+
+@example
+make install
+@end example
+
+This will install the kernel as @file{EXEC_PREFIX/boot/gnumach}, the header
+files into @file{PREFIX/include/}, the list of message ids as
+@file{PREFIX/share/msgids/gnumach.msgids} and the documentation into
+@file{PREFIX/share/info/}.
+
+Note that there is also a way to only install the header and documentation
+files without having to actually build the whole package: run @command{make
+install-data} after having ran @command{configure} to do so. (This is needed
+for bootstrapping a cross compiler and similar procedures.)
+
+@node Configuration
+@section Configuration
+
+See the following tables for the options can be passed to the
+@command{configure} script as command line arguments to control what components
+are built into the kernel, how certain things are configured and so on.
+
+See the top-level @file{INSTALL} file for information about generic
+@command{configure} options, like under which paths to install the package's
+components. It also describes how to control the process by setting
+environment variables.
+
+The file @file{i386/README-Drivers} has some i386-specific information for
+device drivers. You should only need to consult this file in case a device
+driver is not working for you.
+
+@subsection Table of configure switches not related to device drivers
+
+@table @code
+@item --enable-kdb
+In-kernel debugger. This is only useful if you actually anticipate debugging
+the kernel. It is not enabled by default because it adds considerably to the
+unpageable memory footprint of the kernel. @xref{Kernel Debugger}.
+@end table
+
+@table @code
+@item --enable-pae
+@acronym{PAE, Physical Address Extension} feature (@samp{ix86}-only),
+which is available on modern @samp{ix86} processors; on @samp{ix86-at} disabled
+by default, on @samp{ix86-xen} enabled by default.
+@end table
+
+@subsection Turning device drivers on or off
+
+Each device driver has an associated configure switch. The following table
+indicates whether a device driver is enabled by default or not. It also gives
+--- if possible at all --- the configure switches to use for disabling or
+enabling device drivers, in case you're not satisfied with the default choices.
+You can specify @samp{--enable-device-drivers=WHICH} (where WHICH on
+@samp{ix86-at} must be one of @samp{default}, @samp{qemu}, @samp{none}) to
+preset a certain subset of all available device drivers.
+@samp{--enable-device-drivers} is sugar for
+@samp{--enable-device-drivers=default} (and is the implicit default
+nevertheless) and @samp{--disable-device-drivers} is short for
+@samp{--enable-device-drivers=none}. @samp{qemu} will include only the set of
+device drivers that is useful when using the resulting kernel binary to drive a
+Hurd system in the @acronym{QEMU} system emulator. This is only useful for
+reducing the kernel build time and the kernel image size.
+
+@subsection What the configure switches do
+
+Each configure switch has two effects. First, it defines a @acronym{CPP}
+symbol that turns on or off the hooks that autoconfigure the device and add it
+to the list of available devices. Second, it adds the source code for the
+driver to a make variable so that the code for the driver is compiled and
+linked into the kernel. Also follow this route to find the file(s) which are
+implementing a certain device driver.
+
+@subsection Table of configure switches related to device drivers
+
+(@samp{%d} in the following denotes a unit number, starting with @samp{0}.)
+
+@table @code
+@item --disable-kmsg
+Kernel message device @samp{kmsg}.
+
+@item --enable-lpr
+Parallel port device driver for the @samp{lpr%d} devices. On @samp{ix86-at}
+enabled by @samp{default}.
+
+@item --enable-floppy
+PC floppy disk controller device driver for the @samp{fd%d} devices. On
+@samp{ix86-at} enabled by @samp{default} and for @samp{qemu}.
+
+@item --enable-ide
+IDE controller device driver for the @samp{hd%d} and @samp{hd%ds%d} (disks and
+their partitions) devices. On @samp{ix86-at} enabled by @samp{default} and for
+@samp{qemu}.
+@end table
+
+The following options control drivers for various SCSI controller. SCSI
+devices are named @samp{sd%d} and @samp{sd%ds$d} (disks and their partitions)
+or @samp{cd%d} (CD ROMs).
+
+@table @code
+@item --enable-advansys
+AdvanSys SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-buslogic
+BusLogic SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-flashpoint
+Only meaningful in conjunction with the above BusLogic SCSI controller device
+driver. Enable the FlashPoint support.
+
+@item --enable-u14-34f
+UltraStor 14F/34F SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-ultrastor
+UltraStor SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-aha152x
+Adaptec AHA-152x/2825 SCSI controller device driver. On @samp{ix86-at} enabled
+by @samp{default}.
+
+@item --enable-aha1542
+Adaptec AHA-1542 SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-aha1740
+Adaptec AHA-1740 SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-aic7xxx
+Adaptec AIC7xxx SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-fdomain
+Future Domain 16xx SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-in2000
+Always IN 2000 SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-g_NCR5380
+Generic NCR5380/53c400 SCSI controller device driver.
+
+@item --enable-NCR53c406a
+NCR53c406a SCSI controller device driver.
+
+@item --enable-pas16
+PAS16 SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-seagate
+Seagate ST02 and Future Domain TMC-8xx SCSI controller device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-t128
+Trantor T128/T128F/T228 SCSI controller device driver. On @samp{ix86-at}
+enabled by @samp{default}.
+
+@item --enable-53c78xx
+NCR53C7,8xx SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-eata_dma
+EATA-DMA (DPT, NEC, AT&T, SNI, AST, Olivetti, Alphatronix) SCSI controller
+device driver.
+
+@item --enable-eata_pio
+EATA-PIO (old DPT PM2001, PM2012A) SCSI controller device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-wd7000
+WD 7000 SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-eata
+EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) SCSI controller
+device driver. On @samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-am53c974
+AM53/79C974 SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-dtc
+DTC3180/3280 SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-ncr53c8xx
+NCR53C8XX, dc390w, dc390u, dc390f SCSI controller device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-tmscsim
+Tekram DC-390(T) SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-ppa
+IOMEGA Parallel Port ZIP drive device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-qlogicfas
+Qlogic FAS SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-qlogicisp
+Qlogic ISP SCSI controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-gdth
+GDT SCSI Disk Array controller device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+@end table
+
+The following options enable drivers for various ethernet cards. NIC devices
+are usually named @samp{eth%d}, except for the pocket adaptors.
+
+@c GNU Mach does only autodetect one ethernet card. To enable any further
+@c cards, the source code has to be edited.
+@c XXX Reference to the source code.
+
+@table @code
+@item --enable-ne
+NE2000/NE1000 ISA network card device driver. On @samp{ix86-at} enabled by
+@samp{default} and for @samp{qemu}.
+
+@item --enable-3c503
+3Com 503 (Etherlink II) network card device driver. On @samp{ix86-at} enabled
+by @samp{default}.
+
+@item --enable-3c509
+3Com 509/579 (Etherlink III) network card device driver. On @samp{ix86-at}
+enabled by @samp{default}.
+
+@item --enable-wd
+WD80X3 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-3c501
+3COM 501/Etherlink I network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-smc-ultra
+SMC Ultra network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-smc-ultra32
+SMC Ultra 32 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-hp-plus
+HP PCLAN+ (27247B and 27252A) network card device driver. On @samp{ix86-at}
+enabled by @samp{default}.
+
+@item --enable-hp
+HP PCLAN (27245 and other 27xxx series) network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-3c59x
+3Com 590/900 series (592/595/597/900/905) "Vortex/Boomerang" network card
+device driver. On @samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-seeq8005
+Seeq8005 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-hp100
+HP 10/100VG PCLAN (ISA, EISA, PCI) network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-ac3200
+Ansel Communications EISA 3200 network card device driver. On @samp{ix86-at}
+enabled by @samp{default}.
+
+@item --enable-e2100
+Cabletron E21xx network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-at1700
+AT1700 (Fujitsu 86965) network card device driver. On @samp{ix86-at} enabled
+by @samp{default}.
+
+@item --enable-eth16i
+ICL EtherTeam 16i/32 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-znet
+Zenith Z-Note network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-eexpress
+EtherExpress 16 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-eepro
+EtherExpressPro network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-eepro100
+Intel EtherExpressPro PCI 10+/100B/100+ network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-depca
+DEPCA, DE10x, DE200, DE201, DE202, DE210, DE422 network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-ewrk3
+EtherWORKS 3 (DE203, DE204, DE205) network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-de4x5
+DE425, DE434, DE435, DE450, DE500 network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-apricot
+Apricot XEN-II on board ethernet network card device driver. On @samp{ix86-at}
+enabled by @samp{default}.
+
+@item --enable-wavelan
+AT&T WaveLAN & DEC RoamAbout DS network card device driver.
+
+@item --enable-3c507
+3Com 507 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-3c505
+3Com 505/Etherlink II network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-de600
+D-Link DE-600 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-de620
+D-Link DE-620 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-sk_g16
+Schneider & Koch G16 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-ni52
+NI5210 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-ni65
+NI6510 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-atp
+AT-LAN-TEC/RealTek pocket adaptor network card device driver for the
+@samp{atp%d} devices.
+
+@item --enable-lance
+AMD LANCE and PCnet (AT1500 and NE2100) network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-tulip
+DECchip Tulip (dc21x4x) PCI network card device driver. On @samp{ix86-at}
+enabled by @samp{default}.
+
+@item --enable-fmv18x
+FMV-181/182/183/184 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-3c515
+3Com 515 ISA Fast EtherLink network card device driver. On @samp{ix86-at}
+enabled by @samp{default}.
+
+@item --enable-pcnet32
+AMD PCI PCnet32 (PCI bus NE2100 cards) network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-ne2k-pci
+PCI NE2000 network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-yellowfin
+Packet Engines Yellowfin Gigabit-NIC network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-rtl8139
+RealTek 8129/8139 (not 8019/8029!) network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-epic100
+SMC 83c170/175 EPIC/100 (EtherPower II) network card device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-tlan
+TI ThunderLAN network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-via-rhine
+VIA Rhine network card device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-hamachi
+Packet Engines "Hamachi" GNIC-2 Gigabit Ethernet device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-intel-gige
+Intel PCI Gigabit Ethernet device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-myson803
+Myson MTD803 Ethernet adapter series device driver. On @samp{ix86-at} enabled
+by @samp{default}.
+
+@item --enable-natsemi
+National Semiconductor DP8381x series PCI Ethernet device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-ns820
+National Semiconductor DP8382x series PCI Ethernet device driver. On
+@samp{ix86-at} enabled by @samp{default}.
+
+@item --enable-starfire
+Adaptec Starfire network adapter device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-sundance
+Sundance ST201 "Alta" PCI Ethernet device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-winbond-840
+Winbond W89c840 PCI Ethernet device driver. On @samp{ix86-at} enabled by
+@samp{default}.
+@end table
+
+The following options either control device drivers for supported PCMCIA
+bridges or control the overall behaviour of the GNU Mach PCMCIA core. To make
+use of GNU Mach PCMCIA support you need to have the corresponding userland
+applications (GNU Mach Card Services) installed.
+
+@table @code
+@item --enable-i82365
+Device driver for the Intel 82365 and compatible PC Card controllers, and
+Yenta-compatible PCI-to-CardBus controllers. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-pcmcia-isa
+ISA bus related bits in the GNU Mach PCMCIA core. Keeping it enabled is
+generally a good idea, since it does not only have effect if your PC Card
+bridge is attached to the ISA bus, but provides more (ISA) interrupts to the
+Card Services for it to assign to the cards in turn. On @samp{ix86-at} enabled
+by @samp{default}.
+@end table
+
+The following options control device drivers for supported PCMCIA Ethernet
+controllers. NIC devices are usually named @samp{eth%d}.
+
+@table @code
+@item --enable-3c574_cs
+PCMCIA ethernet driver for the 3Com 3c574 ``RoadRunner''. On @samp{ix86-at}
+enabled by @samp{default}.
+
+@item --enable-3c589_cs
+Driver for the 3Com 3c589 PCMCIA card. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-axnet_cs
+Driver for the Asix AX88190-based PCMCIA cards. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-fmvj18x_cs
+Driver for PCMCIA cards with the fmvj18x chipset. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-nmclan_cs
+Driver for the New Media Ethernet LAN PCMCIA cards. On @samp{ix86-at} enabled
+by @samp{default}.
+
+@item --enable-pcnet_cs
+Driver for NS8390-based PCMCIA cards. This driver supports the D-Link DE-650
+and Linksys EthernetCard cards, the newer D-Link and Linksys combo cards,
+Accton EN2212 cards, the RPTI EP400, and the PreMax PE-200 in non-shared-memory
+mode, and the IBM Credit Card Adapter, the NE4100, the Thomas Conrad ethernet
+card, and the Kingston KNE-PCM/x in shared-memory mode. It will also handle
+the Socket EA card in either mode. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-smc91c92_cs
+Driver for SMC91c92-based PCMCIA cards. On @samp{ix86-at} enabled by
+@samp{default}.
+
+@item --enable-xirc2ps_cs
+Driver for Xircom CreditCard and Realport PCMCIA ethernet adapters. On
+@samp{ix86-at} enabled by @samp{default}.
+@end table
+
+The following options control device drivers for supported PCMCIA Wireless LAN
+network controllers. NIC devices are usually named @samp{eth%d}.
+
+Please mind, that you need to have some userland applications (the GNU Mach
+Wireless Tools) installed, in order to make use of these devices.
+
+@table @code
+@item --enable-orinoco_cs
+Driver for the Hermes or Prism 2 chipset based PCMCIA wireless adapters, with
+Lucent/Agere, Intersil or Symbol firmware. This driver is suitable for PCMCIA
+wireless adapters, such as the Lucent WavelanIEEE/Orinoco cards and their OEM
+(Cabletron/EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and
+others). It should also be usable on various Prism II based cards such as the
+Linksys, D-Link and Farallon Skyline. It should also work on Symbol cards such
+as the 3Com AirConnect and Ericsson WLAN. On @samp{ix86-at} enabled by
+@samp{default}.
+@end table
+
+
+@node Cross-Compilation
+@section Cross-Compilation
+
+Another way to install the kernel is to use an existing operating system
+in order to compile the kernel binary.
+This is called @dfn{cross-compiling}, because it is done between two
+different platforms. If the pre-built kernels are not working for
+you, and you can't ask someone to compile a custom kernel for your
+machine, this is your last chance to get a kernel that boots on your
+hardware.
+
+Luckily, the kernel does have light dependencies. You don't even
+need a cross compiler if your build machine has a compiler and is
+the same architecture as the system you want to run GNU Mach on.
+
+You need a cross-mig, though.
+
+XXX More info needed.
+
+
+@node Bootstrap
+@chapter Bootstrap
+
+Bootstrapping@footnote{The term @dfn{bootstrapping} refers to a Dutch
+legend about a boy who was able to fly by pulling himself up by his
+bootstraps. In computers, this term refers to any process where a
+simple system activates a more complicated system.} is the procedure by
+which your machine loads the microkernel and transfers control to the
+operating system.
+
+
+@menu
+* Bootloader:: Starting the microkernel, or other OSes.
+* Modules:: Starting the first task of the OS.
+@end menu
+
+@node Bootloader
+@section Bootloader
+
+The @dfn{bootloader} is the first software that runs on your machine.
+Many hardware architectures have a very simple startup routine which
+reads a very simple bootloader from the beginning of the internal hard
+disk, then transfers control to it. Other architectures have startup
+routines which are able to understand more of the contents of the hard
+disk, and directly start a more advanced bootloader.
+
+@cindex GRUB
+@cindex GRand Unified Bootloader
+@dfn{GRUB}@footnote{The GRand Unified Bootloader, available
+from @uref{http://gnu.org/software/grub/}.} is the GNU bootloader.
+GRUB provides advanced functionality, and is capable of loading several
+different kernels (such as Mach, Linux, DOS, and the *BSD family).
+@xref{Top, , Introduction, grub, GRUB Manual}.
+
+GNU Mach conforms to the Multiboot specification which defines an
+interface between the bootloader and the components that run very early
+at startup. GNU Mach can be started by any bootloader which supports
+the multiboot standard. After the bootloader loaded the kernel image to
+a designated address in the system memory, it jumps into the startup
+code of the kernel. This code initializes the kernel and detects the
+available hardware devices. Afterwards, the first system task is
+started. @xref{Top, , Overview, multiboot, Multiboot Specification}.
+
+
+@node Modules
+@section Modules
+@pindex serverboot
+
+This is outdated.
+
+Because the microkernel does not provide filesystem support and other
+features necessary to load the first system task from a storage medium,
+the first task is loaded by the bootloader as a module to a specified
+address. In the GNU system, this first program is the @code{serverboot}
+executable. GNU Mach inserts the host control port and the device
+master port into this task and appends the port numbers to the command
+line before executing it.
+
+The @code{serverboot} program is responsible for loading and executing
+the rest of the Hurd servers. Rather than containing specific
+instructions for starting the Hurd, it follows general steps given in a
+user-supplied boot script.
+
+XXX More about boot scripts.
+
+
+@node Inter Process Communication
+@chapter Inter Process Communication
+
+This chapter describes the details of the Mach IPC system. First the
+actual calls concerned with sending and receiving messages are
+discussed, then the details of the port system are described in detail.
+
+@menu
+* Major Concepts:: The concepts behind the Mach IPC system.
+* Messaging Interface:: Composing, sending and receiving messages.
+* Port Manipulation Interface:: Manipulating ports, port rights, port sets.
+@end menu
+
+
+@node Major Concepts
+@section Major Concepts
+@cindex interprocess communication (IPC)
+@cindex IPC (interprocess communication)
+@cindex communication between tasks
+@cindex remote procedure calls (RPC)
+@cindex RPC (remote procedure calls)
+@cindex messages
+
+The Mach kernel provides message-oriented, capability-based interprocess
+communication. The interprocess communication (IPC) primitives
+efficiently support many different styles of interaction, including
+remote procedure calls (RPC), object-oriented distributed programming,
+streaming of data, and sending very large amounts of data.
+
+The IPC primitives operate on three abstractions: messages, ports, and
+port sets. User tasks access all other kernel services and abstractions
+via the IPC primitives.
+
+The message primitives let tasks send and receive messages. Tasks send
+messages to ports. Messages sent to a port are delivered reliably
+(messages may not be lost) and are received in the order in which they
+were sent. Messages contain a fixed-size header and a variable amount
+of typed data following the header. The header describes the
+destination and size of the message.
+
+The IPC implementation makes use of the VM system to efficiently
+transfer large amounts of data. The message body can contain the
+address of a region in the sender's address space which should be
+transferred as part of the message. When a task receives a message
+containing an out-of-line region of data, the data appears in an unused
+portion of the receiver's address space. This transmission of
+out-of-line data is optimized so that sender and receiver share the
+physical pages of data copy-on-write, and no actual data copy occurs
+unless the pages are written. Regions of memory up to the size of a
+full address space may be sent in this manner.
+
+Ports hold a queue of messages. Tasks operate on a port to send and
+receive messages by exercising capabilities for the port. Multiple
+tasks can hold send capabilities, or rights, for a port. Tasks can also
+hold send-once rights, which grant the ability to send a single message.
+Only one task can hold the receive capability, or receive right, for a
+port. Port rights can be transferred between tasks via messages. The
+sender of a message can specify in the message body that the message
+contains a port right. If a message contains a receive right for a
+port, then the receive right is removed from the sender of the message
+and the right is transferred to the receiver of the message. While the
+receive right is in transit, tasks holding send rights can still send
+messages to the port, and they are queued until a task acquires the
+receive right and uses it to receive the messages.
+
+Tasks can receive messages from ports and port sets. The port set
+abstraction allows a single thread to wait for a message from any of
+several ports. Tasks manipulate port sets with a capability, or
+port-set right, which is taken from the same space as the port
+capabilities. The port-set right may not be transferred in a message.
+A port set holds receive rights, and a receive operation on a port set
+blocks waiting for a message sent to any of the constituent ports. A
+port may not belong to more than one port set, and if a port is a member
+of a port set, the holder of the receive right can't receive directly
+from the port.
+
+Port rights are a secure, location-independent way of naming ports. The
+port queue is a protected data structure, only accessible via the
+kernel's exported message primitives. Rights are also protected by the
+kernel; there is no way for a malicious user task to guess a port name
+and send a message to a port to which it shouldn't have access. Port
+rights do not carry any location information. When a receive right for
+a port moves from task to task, and even between tasks on different
+machines, the send rights for the port remain unchanged and continue to
+function.
+
+@node Messaging Interface
+@section Messaging Interface
+
+This section describes how messages are composed, sent and received
+within the Mach IPC system.
+
+@menu
+* Mach Message Call:: Sending and receiving messages.
+* Message Format:: The format of Mach messages.
+* Exchanging Port Rights:: Sending and receiving port rights.
+* Memory:: Passing memory regions in messages.
+* Message Send:: Sending messages.
+* Message Receive:: Receiving messages.
+* Atomicity:: Atomicity of port rights.
+@end menu
+
+
+@node Mach Message Call
+@subsection Mach Message Call
+
+To use the @code{mach_msg} call, you can include the header files
+@file{mach/port.h} and @file{mach/message.h}.
+
+@deftypefun mach_msg_return_t mach_msg (@w{mach_msg_header_t *@var{msg}}, @w{mach_msg_option_t @var{option}}, @w{mach_msg_size_t @var{send_size}}, @w{mach_msg_size_t @var{rcv_size}}, @w{mach_port_t @var{rcv_name}}, @w{mach_msg_timeout_t @var{timeout}}, @w{mach_port_t @var{notify}})
+The @code{mach_msg} function is used to send and receive messages. Mach
+messages contain typed data, which can include port rights and
+references to large regions of memory.
+
+@var{msg} is the address of a buffer in the caller's address space.
+Message buffers should be aligned on long-word boundaries. The message
+options @var{option} are bit values, combined with bitwise-or. One or
+both of @code{MACH_SEND_MSG} and @code{MACH_RCV_MSG} should be used.
+Other options act as modifiers. When sending a message, @var{send_size}
+specifies the size of the message buffer. Otherwise zero should be
+supplied. When receiving a message, @var{rcv_size} specifies the size
+of the message buffer. Otherwise zero should be supplied. When
+receiving a message, @var{rcv_name} specifies the port or port set.
+Otherwise @code{MACH_PORT_NULL} should be supplied. When using the
+@code{MACH_SEND_TIMEOUT} and @code{MACH_RCV_TIMEOUT} options,
+@var{timeout} specifies the time in milliseconds to wait before giving
+up. Otherwise @code{MACH_MSG_TIMEOUT_NONE} should be supplied. When
+using the @code{MACH_SEND_NOTIFY}, @code{MACH_SEND_CANCEL}, and
+@code{MACH_RCV_NOTIFY} options, @var{notify} specifies the port used for
+the notification. Otherwise @code{MACH_PORT_NULL} should be supplied.
+
+If the option argument is @code{MACH_SEND_MSG}, it sends a message. The
+@var{send_size} argument specifies the size of the message to send. The
+@code{msgh_remote_port} field of the message header specifies the
+destination of the message.
+
+If the option argument is @code{MACH_RCV_MSG}, it receives a message.
+The @var{rcv_size} argument specifies the size of the message buffer
+that will receive the message; messages larger than @var{rcv_size} are
+not received. The @var{rcv_name} argument specifies the port or port
+set from which to receive.
+
+If the option argument is @code{MACH_SEND_MSG|MACH_RCV_MSG}, then
+@code{mach_msg} does both send and receive operations. If the send
+operation encounters an error (any return code other than
+@code{MACH_MSG_SUCCESS}), then the call returns immediately without
+attempting the receive operation. Semantically the combined call is
+equivalent to separate send and receive calls, but it saves a system
+call and enables other internal optimizations.
+
+If the option argument specifies neither @code{MACH_SEND_MSG} nor
+@code{MACH_RCV_MSG}, then @code{mach_msg} does nothing.
+
+Some options, like @code{MACH_SEND_TIMEOUT} and @code{MACH_RCV_TIMEOUT},
+share a supporting argument. If these options are used together, they
+make independent use of the supporting argument's value.
+@end deftypefun
+
+@deftp {Data type} mach_msg_timeout_t
+This is a @code{natural_t} used by the timeout mechanism. The units are
+milliseconds. The value to be used when there is no timeout is
+@code{MACH_MSG_TIMEOUT_NONE}.
+@end deftp
+
+
+@node Message Format
+@subsection Message Format
+@cindex message format
+@cindex format of a message
+@cindex composing messages
+@cindex message composition
+
+A Mach message consists of a fixed size message header, a
+@code{mach_msg_header_t}, followed by zero or more data items. Data
+items are typed. Each item has a type descriptor followed by the actual
+data (or the address of the data, for out-of-line memory regions).
+
+The following data types are related to Mach ports:
+
+@deftp {Data type} mach_port_t
+The @code{mach_port_t} data type is an unsigned integer type which
+represents a port name in the task's port name space. In GNU Mach, this
+is an @code{unsigned int}.
+@end deftp
+
+@c This is defined elsewhere.
+@c @deftp {Data type} mach_port_seqno_t
+@c The @code{mach_port_seqno_t} data type is an unsigned integer type which
+@c represents a sequence number of a message. In GNU Mach, this is an
+@c @code{unsigned int}.
+@c @end deftp
+
+The following data types are related to Mach messages:
+
+@deftp {Data type} mach_msg_bits_t
+The @code{mach_msg_bits_t} data type is an @code{unsigned int} used to
+store various flags for a message.
+@end deftp
+
+@deftp {Data type} mach_msg_size_t
+The @code{mach_msg_size_t} data type is an @code{unsigned int} used to
+store the size of a message.
+@end deftp
+
+@deftp {Data type} mach_msg_id_t
+The @code{mach_msg_id_t} data type is an @code{integer_t} typically used to
+convey a function or operation id for the receiver.
+@end deftp
+
+@deftp {Data type} mach_msg_header_t
+This structure is the start of every message in the Mach IPC system. It
+has the following members:
+
+@table @code
+@item mach_msg_bits_t msgh_bits
+The @code{msgh_bits} field has the following bits defined, all other
+bits should be zero:
+
+@table @code
+@item MACH_MSGH_BITS_REMOTE_MASK
+@itemx MACH_MSGH_BITS_LOCAL_MASK
+The remote and local bits encode @code{mach_msg_type_name_t} values that
+specify the port rights in the @code{msgh_remote_port} and
+@code{msgh_local_port} fields. The remote value must specify a send or
+send-once right for the destination of the message. If the local value
+doesn't specify a send or send-once right for the message's reply port,
+it must be zero and msgh_local_port must be @code{MACH_PORT_NULL}.
+
+@item MACH_MSGH_BITS_COMPLEX
+The complex bit must be specified if the message body contains port
+rights or out-of-line memory regions. If it is not specified, then the
+message body carries no port rights or memory, no matter what the type
+descriptors may seem to indicate.
+@end table
+
+@code{MACH_MSGH_BITS_REMOTE} and @code{MACH_MSGH_BITS_LOCAL} macros
+return the appropriate @code{mach_msg_type_name_t} values, given a
+@code{msgh_bits} value. The @code{MACH_MSGH_BITS} macro constructs a
+value for @code{msgh_bits}, given two @code{mach_msg_type_name_t}
+values.
+
+@item mach_msg_size_t msgh_size
+The @code{msgh_size} field in the header of a received message contains
+the message's size. The message size, a byte quantity, includes the
+message header, type descriptors, and in-line data. For out-of-line
+memory regions, the message size includes the size of the in-line
+address, not the size of the actual memory region. There are no
+arbitrary limits on the size of a Mach message, the number of data items
+in a message, or the size of the data items.
+
+@item mach_port_t msgh_remote_port
+The @code{msgh_remote_port} field specifies the destination port of the
+message. The field must carry a legitimate send or send-once right for
+a port.
+
+@item mach_port_t msgh_local_port
+The @code{msgh_local_port} field specifies an auxiliary port right,
+which is conventionally used as a reply port by the recipient of the
+message. The field must carry a send right, a send-once right,
+@code{MACH_PORT_NULL}, or @code{MACH_PORT_DEAD}.
+
+@item unsigned long msgh_protected_payload
+The @code{msgh_protected_payload} field carries a payload that is set
+by the kernel during message delivery. The payload is an opaque
+identifier that can be used by the receiver to lookup the associated
+data structure.
+
+It is only valid in received messages. See @ref{Message Receive} for
+further information.
+
+@item mach_port_seqno_t msgh_seqno
+The @code{msgh_seqno} field provides a sequence number for the message.
+It is only valid in received messages; its value in sent messages is
+overwritten.
+@c XXX The "MESSAGE RECEIVE" section discusses message sequence numbers.
+
+@item mach_msg_id_t msgh_id
+The @code{mach_msg} call doesn't use the @code{msgh_id} field, but it
+conventionally conveys an operation or function id.
+@end table
+@end deftp
+
+@deftypefn Macro mach_msg_bits_t MACH_MSGH_BITS (@w{mach_msg_type_name_t @var{remote}}, @w{mach_msg_type_name_t @var{local}})
+This macro composes two @code{mach_msg_type_name_t} values that specify
+the port rights in the @code{msgh_remote_port} and
+@code{msgh_local_port} fields of a @code{mach_msg} call into an
+appropriate @code{mach_msg_bits_t} value.
+@end deftypefn
+
+@deftypefn Macro mach_msg_type_name_t MACH_MSGH_BITS_REMOTE (@w{mach_msg_bits_t @var{bits}})
+This macro extracts the @code{mach_msg_type_name_t} value for the remote
+port right in a @code{mach_msg_bits_t} value.
+@end deftypefn
+
+@deftypefn Macro mach_msg_type_name_t MACH_MSGH_BITS_LOCAL (@w{mach_msg_bits_t @var{bits}})
+This macro extracts the @code{mach_msg_type_name_t} value for the local
+port right in a @code{mach_msg_bits_t} value.
+@end deftypefn
+
+@deftypefn Macro mach_msg_bits_t MACH_MSGH_BITS_PORTS (@w{mach_msg_bits_t @var{bits}})
+This macro extracts the @code{mach_msg_bits_t} component consisting of
+the @code{mach_msg_type_name_t} values for the remote and local port
+right in a @code{mach_msg_bits_t} value.
+@end deftypefn
+
+@deftypefn Macro mach_msg_bits_t MACH_MSGH_BITS_OTHER (@w{mach_msg_bits_t @var{bits}})
+This macro extracts the @code{mach_msg_bits_t} component consisting of
+everything except the @code{mach_msg_type_name_t} values for the remote
+and local port right in a @code{mach_msg_bits_t} value.
+@end deftypefn
+
+Each data item has a type descriptor, a @code{mach_msg_type_t} or a
+@code{mach_msg_type_long_t}. The @code{mach_msg_type_long_t} type
+descriptor allows larger values for some fields. The
+@code{msgtl_header} field in the long descriptor is only used for its
+inline, longform, and deallocate bits.
+
+@deftp {Data type} mach_msg_type_name_t
+This is an @code{unsigned int} and can be used to hold the
+@code{msgt_name} component of the @code{mach_msg_type_t} and
+@code{mach_msg_type_long_t} structure.
+@end deftp
+
+@deftp {Data type} mach_msg_type_size_t
+This is an @code{unsigned int} and can be used to hold the
+@code{msgt_size} component of the @code{mach_msg_type_t} and
+@code{mach_msg_type_long_t} structure.
+@end deftp
+
+@deftp {Data type} mach_msg_type_number_t
+This is an @code{natural_t} and can be used to hold the
+@code{msgt_number} component of the @code{mach_msg_type_t} and
+@code{mach_msg_type_long_t} structure.
+@c XXX This is used for the size of arrays, too. Mmh?
+@end deftp
+
+@deftp {Data type} mach_msg_type_t
+This structure has the following members:
+
+@table @code
+@item unsigned int msgt_name : 8
+The @code{msgt_name} field specifies the data's type. The following
+types are predefined:
+
+@table @code
+@item MACH_MSG_TYPE_UNSTRUCTURED
+@item MACH_MSG_TYPE_BIT
+@item MACH_MSG_TYPE_BOOLEAN
+@item MACH_MSG_TYPE_INTEGER_16
+@item MACH_MSG_TYPE_INTEGER_32
+@item MACH_MSG_TYPE_CHAR
+@item MACH_MSG_TYPE_BYTE
+@item MACH_MSG_TYPE_INTEGER_8
+@item MACH_MSG_TYPE_REAL
+@item MACH_MSG_TYPE_STRING
+@item MACH_MSG_TYPE_STRING_C
+@item MACH_MSG_TYPE_PORT_NAME
+@item MACH_MSG_TYPE_PROTECTED_PAYLOAD
+@end table
+
+The following predefined types specify port rights, and receive special
+treatment. The next section discusses these types in detail. The type
+@c XXX cross ref
+@code{MACH_MSG_TYPE_PORT_NAME} describes port right names, when no
+rights are being transferred, but just names. For this purpose, it
+should be used in preference to @code{MACH_MSG_TYPE_INTEGER_32}.
+
+@table @code
+@item MACH_MSG_TYPE_MOVE_RECEIVE
+@item MACH_MSG_TYPE_MOVE_SEND
+@item MACH_MSG_TYPE_MOVE_SEND_ONCE
+@item MACH_MSG_TYPE_COPY_SEND
+@item MACH_MSG_TYPE_MAKE_SEND
+@item MACH_MSG_TYPE_MAKE_SEND_ONCE
+@end table
+
+The type @code{MACH_MSG_TYPE_PROTECTED_PAYLOAD} is used by the kernel
+to indicate that a delivered message carries a payload in the
+@code{msgh_protected_payload} field. See @ref{Message Receive} for
+more information.
+
+@item msgt_size : 8
+The @code{msgt_size} field specifies the size of each datum, in bits. For
+example, the msgt_size of @code{MACH_MSG_TYPE_INTEGER_32} data is 32.
+
+@item msgt_number : 12
+The @code{msgt_number} field specifies how many data elements comprise
+the data item. Zero is a legitimate number.
+
+The total length specified by a type descriptor is @w{@code{(msgt_size *
+msgt_number)}}, rounded up to an integral number of bytes. In-line data
+is then padded to an integral number of long-words. This ensures that
+type descriptors always start on long-word boundaries. It implies that
+message sizes are always an integral multiple of a long-word's size.
+
+@item msgt_inline : 1
+The @code{msgt_inline} bit specifies, when @code{FALSE}, that the data
+actually resides in an out-of-line region. The address of the memory
+region (a @code{vm_offset_t} or @code{vm_address_t}) follows the type
+descriptor in the message body. The @code{msgt_name}, @code{msgt_size},
+and @code{msgt_number} fields describe the memory region, not the
+address.
+
+@item msgt_longform : 1
+The @code{msgt_longform} bit specifies, when @code{TRUE}, that this type
+descriptor is a @code{mach_msg_type_long_t} instead of a
+@code{mach_msg_type_t}. The @code{msgt_name}, @code{msgt_size}, and
+@code{msgt_number} fields should be zero. Instead, @code{mach_msg} uses
+the following @code{msgtl_name}, @code{msgtl_size}, and
+@code{msgtl_number} fields.
+
+@item msgt_deallocate : 1
+The @code{msgt_deallocate} bit is used with out-of-line regions. When
+@code{TRUE}, it specifies that the memory region should be deallocated
+from the sender's address space (as if with @code{vm_deallocate}) when
+the message is sent.
+
+@item msgt_unused : 1
+The @code{msgt_unused} bit should be zero.
+@end table
+@end deftp
+
+@deftypefn Macro boolean_t MACH_MSG_TYPE_PORT_ANY (mach_msg_type_name_t type)
+This macro returns @code{TRUE} if the given type name specifies a port
+type, otherwise it returns @code{FALSE}.
+@end deftypefn
+
+@deftypefn Macro boolean_t MACH_MSG_TYPE_PORT_ANY_SEND (mach_msg_type_name_t type)
+This macro returns @code{TRUE} if the given type name specifies a port
+type with a send or send-once right, otherwise it returns @code{FALSE}.
+@end deftypefn
+
+@deftypefn Macro boolean_t MACH_MSG_TYPE_PORT_ANY_RIGHT (mach_msg_type_name_t type)
+This macro returns @code{TRUE} if the given type name specifies a port
+right type which is moved, otherwise it returns @code{FALSE}.
+@end deftypefn
+
+@deftp {Data type} mach_msg_type_long_t
+This structure has the following members:
+
+@table @code
+@item mach_msg_type_t msgtl_header
+Same meaning as @code{msgt_header}.
+@c XXX cross ref
+
+@item unsigned short msgtl_name
+Same meaning as @code{msgt_name}.
+
+@item unsigned short msgtl_size
+Same meaning as @code{msgt_size}.
+
+@item unsigned int msgtl_number
+Same meaning as @code{msgt_number}.
+@end table
+@end deftp
+
+
+@node Exchanging Port Rights
+@subsection Exchanging Port Rights
+@cindex sending port rights
+@cindex receiving port rights
+@cindex moving port rights
+
+Each task has its own space of port rights. Port rights are named with
+positive integers. Except for the reserved values
+@w{@code{MACH_PORT_NULL (0)}@footnote{In the Hurd system, we don't make
+the assumption that @code{MACH_PORT_NULL} is zero and evaluates to
+false, but rather compare port names to @code{MACH_PORT_NULL}
+explicitly}} and @w{@code{MACH_PORT_DEAD (~0)}}, this is a full 32-bit
+name space. When the kernel chooses a name for a new right, it is free
+to pick any unused name (one which denotes no right) in the space.
+
+There are five basic kinds of rights: receive rights, send rights,
+send-once rights, port-set rights, and dead names. Dead names are not
+capabilities. They act as place-holders to prevent a name from being
+otherwise used.
+
+A port is destroyed, or dies, when its receive right is deallocated.
+When a port dies, send and send-once rights for the port turn into dead
+names. Any messages queued at the port are destroyed, which deallocates
+the port rights and out-of-line memory in the messages.
+
+Tasks may hold multiple user-references for send rights and dead names.
+When a task receives a send right which it already holds, the kernel
+increments the right's user-reference count. When a task deallocates a
+send right, the kernel decrements its user-reference count, and the task
+only loses the send right when the count goes to zero.
+
+Send-once rights always have a user-reference count of one, although a
+port can have multiple send-once rights, because each send-once right
+held by a task has a different name. In contrast, when a task holds
+send rights or a receive right for a port, the rights share a single
+name.
+
+A message body can carry port rights; the @code{msgt_name}
+(@code{msgtl_name}) field in a type descriptor specifies the type of
+port right and how the port right is to be extracted from the caller.
+The values @code{MACH_PORT_NULL} and @code{MACH_PORT_DEAD} are always
+valid in place of a port right in a message body. In a sent message,
+the following @code{msgt_name} values denote port rights:
+
+@table @code
+@item MACH_MSG_TYPE_MAKE_SEND
+The message will carry a send right, but the caller must supply a
+receive right. The send right is created from the receive right, and
+the receive right's make-send count is incremented.
+
+@item MACH_MSG_TYPE_COPY_SEND
+The message will carry a send right, and the caller should supply a send
+right. The user reference count for the supplied send right is not
+changed. The caller may also supply a dead name and the receiving task
+will get @code{MACH_PORT_DEAD}.
+
+@item MACH_MSG_TYPE_MOVE_SEND
+The message will carry a send right, and the caller should supply a send
+right. The user reference count for the supplied send right is
+decremented, and the right is destroyed if the count becomes zero.
+Unless a receive right remains, the name becomes available for
+recycling. The caller may also supply a dead name, which loses a user
+reference, and the receiving task will get @code{MACH_PORT_DEAD}.
+
+@item MACH_MSG_TYPE_MAKE_SEND_ONCE
+The message will carry a send-once right, but the caller must supply a
+receive right. The send-once right is created from the receive right.
+
+@item MACH_MSG_TYPE_MOVE_SEND_ONCE
+The message will carry a send-once right, and the caller should supply a
+send-once right. The caller loses the supplied send-once right. The
+caller may also supply a dead name, which loses a user reference, and
+the receiving task will get @code{MACH_PORT_DEAD}.
+
+@item MACH_MSG_TYPE_MOVE_RECEIVE
+The message will carry a receive right, and the caller should supply a
+receive right. The caller loses the supplied receive right, but retains
+any send rights with the same name.
+@end table
+
+If a message carries a send or send-once right, and the port dies while
+the message is in transit, then the receiving task will get
+@code{MACH_PORT_DEAD} instead of a right. The following
+@code{msgt_name} values in a received message indicate that it carries
+port rights:
+
+@table @code
+@item MACH_MSG_TYPE_PORT_SEND
+This name is an alias for @code{MACH_MSG_TYPE_MOVE_SEND}. The message
+carried a send right. If the receiving task already has send and/or
+receive rights for the port, then that name for the port will be reused.
+Otherwise, the new right will have a new name. If the task already has
+send rights, it gains a user reference for the right (unless this would
+cause the user-reference count to overflow). Otherwise, it acquires the
+send right, with a user-reference count of one.
+
+@item MACH_MSG_TYPE_PORT_SEND_ONCE
+This name is an alias for @code{MACH_MSG_TYPE_MOVE_SEND_ONCE}. The
+message carried a send-once right. The right will have a new name.
+
+@item MACH_MSG_TYPE_PORT_RECEIVE
+This name is an alias for @code{MACH_MSG_TYPE_MOVE_RECEIVE}. The
+message carried a receive right. If the receiving task already has send
+rights for the port, then that name for the port will be reused.
+Otherwise, the right will have a new name. The make-send count of the
+receive right is reset to zero, but the port retains other attributes
+like queued messages, extant send and send-once rights, and requests for
+port-destroyed and no-senders notifications.
+@end table
+
+When the kernel chooses a new name for a port right, it can choose any
+name, other than @code{MACH_PORT_NULL} and @code{MACH_PORT_DEAD}, which
+is not currently being used for a port right or dead name. It might
+choose a name which at some previous time denoted a port right, but is
+currently unused.
+
+
+@node Memory
+@subsection Memory
+@cindex sending memory
+@cindex receiving memory
+
+A message body can contain the address of a region in the sender's
+address space which should be transferred as part of the message. The
+message carries a logical copy of the memory, but the kernel uses VM
+techniques to defer any actual page copies. Unless the sender or the
+receiver modifies the data, the physical pages remain shared.
+
+An out-of-line transfer occurs when the data's type descriptor specifies
+@code{msgt_inline} as @code{FALSE}. The address of the memory region (a
+@code{vm_offset_t} or @code{vm_address_t}) should follow the type
+descriptor in the message body. The type descriptor and the address
+contribute to the message's size (@code{send_size}, @code{msgh_size}).
+The out-of-line data does not contribute to the message's size.
+
+The name, size, and number fields in the type descriptor describe the
+type and length of the out-of-line data, not the in-line address.
+Out-of-line memory frequently requires long type descriptors
+(@code{mach_msg_type_long_t}), because the @code{msgt_number} field is
+too small to describe a page of 4K bytes.
+
+Out-of-line memory arrives somewhere in the receiver's address space as
+new memory. It has the same inheritance and protection attributes as
+newly @code{vm_allocate}'d memory. The receiver has the responsibility
+of deallocating (with @code{vm_deallocate}) the memory when it is no
+longer needed. Security-conscious receivers should exercise caution
+when using out-of-line memory from untrustworthy sources, because the
+memory may be backed by an unreliable memory manager.
+
+Null out-of-line memory is legal. If the out-of-line region size is
+zero (for example, because @code{msgtl_number} is zero), then the
+region's specified address is ignored. A received null out-of-line
+memory region always has a zero address.
+
+Unaligned addresses and region sizes that are not page multiples are
+legal. A received message can also contain memory with unaligned
+addresses and funny sizes. In the general case, the first and last
+pages in the new memory region in the receiver do not contain only data
+from the sender, but are partly zero.@footnote{Sending out-of-line
+memory with a non-page-aligned address, or a size which is not a page
+multiple, works but with a caveat. The extra bytes in the first and
+last page of the received memory are not zeroed, so the receiver can
+peek at more data than the sender intended to transfer. This might be a
+security problem for the sender.} The received address points to the
+start of the data in the first page. This possibility doesn't
+complicate deallocation, because @code{vm_deallocate} does the right
+thing, rounding the start address down and the end address up to
+deallocate all arrived pages.
+
+Out-of-line memory has a deallocate option, controlled by the
+@code{msgt_deallocate} bit. If it is @code{TRUE} and the out-of-line
+memory region is not null, then the region is implicitly deallocated
+from the sender, as if by @code{vm_deallocate}. In particular, the
+start and end addresses are rounded so that every page overlapped by the
+memory region is deallocated. The use of @code{msgt_deallocate}
+effectively changes the memory copy into a memory movement. In a
+received message, @code{msgt_deallocate} is @code{TRUE} in type
+descriptors for out-of-line memory.
+
+Out-of-line memory can carry port rights.
+
+
+@node Message Send
+@subsection Message Send
+@cindex sending messages
+
+The send operation queues a message to a port. The message carries a
+copy of the caller's data. After the send, the caller can freely modify
+the message buffer or the out-of-line memory regions and the message
+contents will remain unchanged.
+
+Message delivery is reliable and sequenced. Messages are not lost, and
+messages sent to a port, from a single thread, are received in the order
+in which they were sent.
+
+If the destination port's queue is full, then several things can happen.
+If the message is sent to a send-once right (@code{msgh_remote_port}
+carries a send-once right), then the kernel ignores the queue limit and
+delivers the message. Otherwise the caller blocks until there is room
+in the queue, unless the @code{MACH_SEND_TIMEOUT} or
+@code{MACH_SEND_NOTIFY} options are used. If a port has several blocked
+senders, then any of them may queue the next message when space in the
+queue becomes available, with the proviso that a blocked sender will not
+be indefinitely starved.
+
+These options modify @code{MACH_SEND_MSG}. If @code{MACH_SEND_MSG} is
+not also specified, they are ignored.
+
+@table @code
+@item MACH_SEND_TIMEOUT
+The timeout argument should specify a maximum time (in milliseconds) for
+the call to block before giving up.@footnote{If MACH_SEND_TIMEOUT is
+used without MACH_SEND_INTERRUPT, then the timeout duration might not be
+accurate. When the call is interrupted and automatically retried, the
+original timeout is used. If interrupts occur frequently enough, the
+timeout interval might never expire.} If the message can't be queued
+before the timeout interval elapses, then the call returns
+@code{MACH_SEND_TIMED_OUT}. A zero timeout is legitimate.
+
+@item MACH_SEND_NOTIFY
+The notify argument should specify a receive right for a notify port.
+If the send were to block, then instead the message is queued,
+@code{MACH_SEND_WILL_NOTIFY} is returned, and a msg-accepted
+notification is requested. If @code{MACH_SEND_TIMEOUT} is also
+specified, then @code{MACH_SEND_NOTIFY} doesn't take effect until the
+timeout interval elapses.
+
+With @code{MACH_SEND_NOTIFY}, a task can forcibly queue to a send right
+one message at a time. A msg-accepted notification is sent to the
+notify port when another message can be forcibly queued. If an attempt
+is made to use @code{MACH_SEND_NOTIFY} before then, the call returns a
+@code{MACH_SEND_NOTIFY_IN_PROGRESS} error.
+
+The msg-accepted notification carries the name of the send right. If
+the send right is deallocated before the msg-accepted notification is
+generated, then the msg-accepted notification carries the value
+@code{MACH_PORT_NULL}. If the destination port is destroyed before the
+notification is generated, then a send-once notification is generated
+instead.
+
+@item MACH_SEND_INTERRUPT
+If specified, the @code{mach_msg} call will return
+@code{MACH_SEND_INTERRUPTED} if a software interrupt aborts the call.
+Otherwise, the send operation will be retried.
+
+@item MACH_SEND_CANCEL
+The notify argument should specify a receive right for a notify port.
+If the send operation removes the destination port right from the
+caller, and the removed right had a dead-name request registered for it,
+and notify is the notify port for the dead-name request, then the
+dead-name request may be silently canceled (instead of resulting in a
+port-deleted notification).
+
+This option is typically used to cancel a dead-name request made with
+the @code{MACH_RCV_NOTIFY} option. It should only be used as an optimization.
+@end table
+
+The send operation can generate the following return codes. These
+return codes imply that the call did nothing:
+
+@table @code
+@item MACH_SEND_MSG_TOO_SMALL
+The specified send_size was smaller than the minimum size for a message.
+
+@item MACH_SEND_NO_BUFFER
+A resource shortage prevented the kernel from allocating a message
+buffer.
+
+@item MACH_SEND_INVALID_DATA
+The supplied message buffer was not readable.
+
+@item MACH_SEND_INVALID_HEADER
+The @code{msgh_bits} value was invalid.
+
+@item MACH_SEND_INVALID_DEST
+The @code{msgh_remote_port} value was invalid.
+
+@item MACH_SEND_INVALID_REPLY
+The @code{msgh_local_port} value was invalid.
+
+@item MACH_SEND_INVALID_NOTIFY
+When using @code{MACH_SEND_CANCEL}, the notify argument did not denote a
+valid receive right.
+@end table
+
+These return codes imply that some or all of the message was destroyed:
+
+@table @code
+@item MACH_SEND_INVALID_MEMORY
+The message body specified out-of-line data that was not readable.
+
+@item MACH_SEND_INVALID_RIGHT
+The message body specified a port right which the caller didn't possess.
+
+@item MACH_SEND_INVALID_TYPE
+A type descriptor was invalid.
+
+@item MACH_SEND_MSG_TOO_SMALL
+The last data item in the message ran over the end of the message.
+@end table
+
+These return codes imply that the message was returned to the caller
+with a pseudo-receive operation:
+
+@table @code
+@item MACH_SEND_TIMED_OUT
+The timeout interval expired.
+
+@item MACH_SEND_INTERRUPTED
+A software interrupt occurred.
+
+@item MACH_SEND_INVALID_NOTIFY
+When using @code{MACH_SEND_NOTIFY}, the notify argument did not denote a
+valid receive right.
+
+@item MACH_SEND_NO_NOTIFY
+A resource shortage prevented the kernel from setting up a msg-accepted
+notification.
+
+@item MACH_SEND_NOTIFY_IN_PROGRESS
+A msg-accepted notification was already requested, and hasn't yet been
+generated.
+@end table
+
+These return codes imply that the message was queued:
+
+@table @code
+@item MACH_SEND_WILL_NOTIFY
+The message was forcibly queued, and a msg-accepted notification was
+requested.
+
+@item MACH_MSG_SUCCESS
+The message was queued.
+@end table
+
+Some return codes, like @code{MACH_SEND_TIMED_OUT}, imply that the
+message was almost sent, but could not be queued. In these situations,
+the kernel tries to return the message contents to the caller with a
+pseudo-receive operation. This prevents the loss of port rights or
+memory which only exist in the message. For example, a receive right
+which was moved into the message, or out-of-line memory sent with the
+deallocate bit.
+
+The pseudo-receive operation is very similar to a normal receive
+operation. The pseudo-receive handles the port rights in the message
+header as if they were in the message body. They are not reversed.
+After the pseudo-receive, the message is ready to be resent. If the
+message is not resent, note that out-of-line memory regions may have
+moved and some port rights may have changed names.
+
+The pseudo-receive operation may encounter resource shortages. This is
+similar to a @code{MACH_RCV_BODY_ERROR} return code from a receive
+operation. When this happens, the normal send return codes are
+augmented with the @code{MACH_MSG_IPC_SPACE}, @code{MACH_MSG_VM_SPACE},
+@code{MACH_MSG_IPC_KERNEL}, and @code{MACH_MSG_VM_KERNEL} bits to
+indicate the nature of the resource shortage.
+
+The queueing of a message carrying receive rights may create a circular
+loop of receive rights and messages, which can never be received. For
+example, a message carrying a receive right can be sent to that receive
+right. This situation is not an error, but the kernel will
+garbage-collect such loops, destroying the messages and ports involved.
+
+
+@node Message Receive
+@subsection Message Receive
+
+The receive operation dequeues a message from a port. The receiving
+task acquires the port rights and out-of-line memory regions carried in
+the message.
+
+The @code{rcv_name} argument specifies a port or port set from which to
+receive. If a port is specified, the caller must possess the receive
+right for the port and the port must not be a member of a port set. If
+no message is present, then the call blocks, subject to the
+@code{MACH_RCV_TIMEOUT} option.
+
+If a port set is specified, the call will receive a message sent to any
+of the member ports. It is permissible for the port set to have no
+member ports, and ports may be added and removed while a receive from
+the port set is in progress. The received message can come from any of
+the member ports which have messages, with the proviso that a member
+port with messages will not be indefinitely starved. The
+@code{msgh_local_port} field in the received message header specifies
+from which port in the port set the message came.
+
+The @code{rcv_size} argument specifies the size of the caller's message
+buffer. The @code{mach_msg} call will not receive a message larger than
+@code{rcv_size}. Messages that are too large are destroyed, unless the
+@code{MACH_RCV_LARGE} option is used.
+
+The destination and reply ports are reversed in a received message
+header. The @code{msgh_local_port} field names the destination port,
+from which the message was received, and the @code{msgh_remote_port}
+field names the reply port right. The bits in @code{msgh_bits} are also
+reversed. The @code{MACH_MSGH_BITS_LOCAL} bits have the value
+@code{MACH_MSG_TYPE_PORT_SEND} if the message was sent to a send right,
+and the value @code{MACH_MSG_TYPE_PORT_SEND_ONCE} if was sent to a
+send-once right. The @code{MACH_MSGH_BITS_REMOTE} bits describe the
+reply port right.
+
+A received message can contain port rights and out-of-line memory. The
+@code{msgh_local_port} field does not receive a port right; the act of
+receiving the message destroys the send or send-once right for the
+destination port. The msgh_remote_port field does name a received port
+right, the reply port right, and the message body can carry port rights
+and memory if @code{MACH_MSGH_BITS_COMPLEX} is present in msgh_bits.
+Received port rights and memory should be consumed or deallocated in
+some fashion.
+
+In almost all cases, @code{msgh_local_port} will specify the name of a
+receive right, either @code{rcv_name} or if @code{rcv_name} is a port
+set, a member of @code{rcv_name}. If other threads are concurrently
+manipulating the receive right, the situation is more complicated. If
+the receive right is renamed during the call, then
+@code{msgh_local_port} specifies the right's new name. If the caller
+loses the receive right after the message was dequeued from it, then
+@code{mach_msg} will proceed instead of returning
+@code{MACH_RCV_PORT_DIED}. If the receive right was destroyed, then
+@code{msgh_local_port} specifies @code{MACH_PORT_DEAD}. If the receive
+right still exists, but isn't held by the caller, then
+@code{msgh_local_port} specifies @code{MACH_PORT_NULL}.
+
+Servers usually associate some state with a receive right. To that
+end, they might use a hash table to look up the state for the port a
+message was sent to. To optimize this, a task may associate an opaque
+@var{payload} with a receive right using the
+@code{mach_port_set_protected_payload} function. Once this is done,
+the kernel will set the @code{msgh_protected_payload} field to
+@var{payload} when delivering a message to this right and indicate
+this by setting the local part of @code{msgh_bits} to
+@code{MACH_MSG_TYPE_PROTECTED_PAYLOAD}.
+
+The support for protected payloads was added to GNU Mach. To preserve
+binary compatibility, the @code{msgh_local_port} and
+@code{msgh_local_port} share the same location. This makes it
+possible to add the payload information without increasing the size of
+@code{mach_msg_header_t}. This is an implementation detail. Which
+field is valid is determined by the local part of the
+@code{msgh_bits}. Existing software is not affected. When a receive
+right is transferred to another task, its payload is cleared.
+
+Received messages are stamped with a sequence number, taken from the
+port from which the message was received. (Messages received from a
+port set are stamped with a sequence number from the appropriate member
+port.) Newly created ports start with a zero sequence number, and the
+sequence number is reset to zero whenever the port's receive right moves
+between tasks. When a message is dequeued from the port, it is stamped
+with the port's sequence number and the port's sequence number is then
+incremented. The dequeue and increment operations are atomic, so that
+multiple threads receiving messages from a port can use the
+@code{msgh_seqno} field to reconstruct the original order of the
+messages.
+
+These options modify @code{MACH_RCV_MSG}. If @code{MACH_RCV_MSG} is not
+also specified, they are ignored.
+
+@table @code
+@item MACH_RCV_TIMEOUT
+The timeout argument should specify a maximum time (in milliseconds) for
+the call to block before giving up.@footnote{If MACH_RCV_TIMEOUT is used
+without MACH_RCV_INTERRUPT, then the timeout duration might not be
+accurate. When the call is interrupted and automatically retried, the
+original timeout is used. If interrupts occur frequently enough, the
+timeout interval might never expire.} If no message arrives before the
+timeout interval elapses, then the call returns
+@code{MACH_RCV_TIMED_OUT}. A zero timeout is legitimate.
+
+@item MACH_RCV_NOTIFY
+The notify argument should specify a receive right for a notify port.
+If receiving the reply port creates a new port right in the caller, then
+the notify port is used to request a dead-name notification for the new
+port right.
+
+@item MACH_RCV_INTERRUPT
+If specified, the @code{mach_msg} call will return
+@code{MACH_RCV_INTERRUPTED} if a software interrupt aborts the call.
+Otherwise, the receive operation will be retried.
+
+@item MACH_RCV_LARGE
+If the message is larger than @code{rcv_size}, then the message remains
+queued instead of being destroyed. The call returns
+@code{MACH_RCV_TOO_LARGE} and the actual size of the message is returned
+in the @code{msgh_size} field of the message header.
+@end table
+
+The receive operation can generate the following return codes. These
+return codes imply that the call did not dequeue a message:
+
+@table @code
+@item MACH_RCV_INVALID_NAME
+The specified @code{rcv_name} was invalid.
+
+@item MACH_RCV_IN_SET
+The specified port was a member of a port set.
+
+@item MACH_RCV_TIMED_OUT
+The timeout interval expired.
+
+@item MACH_RCV_INTERRUPTED
+A software interrupt occurred.
+
+@item MACH_RCV_PORT_DIED
+The caller lost the rights specified by @code{rcv_name}.
+
+@item MACH_RCV_PORT_CHANGED
+@code{rcv_name} specified a receive right which was moved into a port
+set during the call.
+
+@item MACH_RCV_TOO_LARGE
+When using @code{MACH_RCV_LARGE}, and the message was larger than
+@code{rcv_size}. The message is left queued, and its actual size is
+returned in the @code{msgh_size} field of the message buffer.
+@end table
+
+These return codes imply that a message was dequeued and destroyed:
+
+@table @code
+@item MACH_RCV_HEADER_ERROR
+A resource shortage prevented the reception of the port rights in the
+message header.
+
+@item MACH_RCV_INVALID_NOTIFY
+When using @code{MACH_RCV_NOTIFY}, the notify argument did not denote a
+valid receive right.
+
+@item MACH_RCV_TOO_LARGE
+When not using @code{MACH_RCV_LARGE}, a message larger than
+@code{rcv_size} was dequeued and destroyed.
+@end table
+
+In these situations, when a message is dequeued and then destroyed, the
+reply port and all port rights and memory in the message body are
+destroyed. However, the caller receives the message's header, with all
+fields correct, including the destination port but excepting the reply
+port, which is @code{MACH_PORT_NULL}.
+
+These return codes imply that a message was received:
+
+@table @code
+@item MACH_RCV_BODY_ERROR
+A resource shortage prevented the reception of a port right or
+out-of-line memory region in the message body. The message header,
+including the reply port, is correct. The kernel attempts to transfer
+all port rights and memory regions in the body, and only destroys those
+that can't be transferred.
+
+@item MACH_RCV_INVALID_DATA
+The specified message buffer was not writable. The calling task did
+successfully receive the port rights and out-of-line memory regions in
+the message.
+
+@item MACH_MSG_SUCCESS
+A message was received.
+@end table
+
+Resource shortages can occur after a message is dequeued, while
+transferring port rights and out-of-line memory regions to the receiving
+task. The @code{mach_msg} call returns @code{MACH_RCV_HEADER_ERROR} or
+@code{MACH_RCV_BODY_ERROR} in this situation. These return codes always
+carry extra bits (bitwise-ored) that indicate the nature of the resource
+shortage:
+
+@table @code
+@item MACH_MSG_IPC_SPACE
+There was no room in the task's IPC name space for another port name.
+
+@item MACH_MSG_VM_SPACE
+There was no room in the task's VM address space for an out-of-line
+memory region.
+
+@item MACH_MSG_IPC_KERNEL
+A kernel resource shortage prevented the reception of a port right.
+
+@item MACH_MSG_VM_KERNEL
+A kernel resource shortage prevented the reception of an out-of-line
+memory region.
+@end table
+
+If a resource shortage prevents the reception of a port right, the port
+right is destroyed and the caller sees the name @code{MACH_PORT_NULL}.
+If a resource shortage prevents the reception of an out-of-line memory
+region, the region is destroyed and the caller receives a zero address.
+In addition, the @code{msgt_size} (@code{msgtl_size}) field in the
+data's type descriptor is changed to zero. If a resource shortage
+prevents the reception of out-of-line memory carrying port rights, then
+the port rights are always destroyed if the memory region can not be
+received. A task never receives port rights or memory regions that it
+isn't told about.
+
+
+@node Atomicity
+@subsection Atomicity
+
+The @code{mach_msg} call handles port rights in a message header
+atomically. Port rights and out-of-line memory in a message body do not
+enjoy this atomicity guarantee. The message body may be processed
+front-to-back, back-to-front, first out-of-line memory then port rights,
+in some random order, or even atomically.
+
+For example, consider sending a message with the destination port
+specified as @code{MACH_MSG_TYPE_MOVE_SEND} and the reply port specified
+as @code{MACH_MSG_TYPE_COPY_SEND}. The same send right, with one
+user-reference, is supplied for both the @code{msgh_remote_port} and
+@code{msgh_local_port} fields. Because @code{mach_msg} processes the
+message header atomically, this succeeds. If @code{msgh_remote_port}
+were processed before @code{msgh_local_port}, then @code{mach_msg} would
+return @code{MACH_SEND_INVALID_REPLY} in this situation.
+
+On the other hand, suppose the destination and reply port are both
+specified as @code{MACH_MSG_TYPE_MOVE_SEND}, and again the same send
+right with one user-reference is supplied for both. Now the send
+operation fails, but because it processes the header atomically,
+mach_msg can return either @code{MACH_SEND_INVALID_DEST} or
+@code{MACH_SEND_INVALID_REPLY}.
+
+For example, consider receiving a message at the same time another
+thread is deallocating the destination receive right. Suppose the reply
+port field carries a send right for the destination port. If the
+deallocation happens before the dequeuing, then the receiver gets
+@code{MACH_RCV_PORT_DIED}. If the deallocation happens after the
+receive, then the @code{msgh_local_port} and the @code{msgh_remote_port}
+fields both specify the same right, which becomes a dead name when the
+receive right is deallocated. If the deallocation happens between the
+dequeue and the receive, then the @code{msgh_local_port} and
+@code{msgh_remote_port} fields both specify @code{MACH_PORT_DEAD}.
+Because the header is processed atomically, it is not possible for just
+one of the two fields to hold @code{MACH_PORT_DEAD}.
+
+The @code{MACH_RCV_NOTIFY} option provides a more likely example.
+Suppose a message carrying a send-once right reply port is received with
+@code{MACH_RCV_NOTIFY} at the same time the reply port is destroyed. If
+the reply port is destroyed first, then @code{msgh_remote_port}
+specifies @code{MACH_PORT_DEAD} and the kernel does not generate a
+dead-name notification. If the reply port is destroyed after it is
+received, then @code{msgh_remote_port} specifies a dead name for which
+the kernel generates a dead-name notification. It is not possible to
+receive the reply port right and have it turn into a dead name before
+the dead-name notification is requested; as part of the message header
+the reply port is received atomically.
+
+
+@node Port Manipulation Interface
+@section Port Manipulation Interface
+
+This section describes the interface to create, destroy and manipulate
+ports, port rights and port sets.
+
+@cindex IPC space port
+@cindex port representing an IPC space
+@deftp {Data type} ipc_space_t
+This is a @code{task_t} (and as such a @code{mach_port_t}), which holds
+a port name associated with a port that represents an IPC space in the
+kernel. An IPC space is used by the kernel to manage the port names and
+rights available to a task. The IPC space doesn't get a port name of
+its own. Instead the port name of the task containing the IPC space is
+used to name the IPC space of the task (as is indicated by the fact that
+the type of @code{ipc_space_t} is actually @code{task_t}).
+
+The IPC spaces of tasks are the only ones accessible outside of
+the kernel.
+@end deftp
+
+@menu
+* Port Creation:: How to create new ports and port sets.
+* Port Destruction:: How to destroy ports and port sets.
+* Port Names:: How to query and manipulate port names.
+* Port Rights:: How to work with port rights.
+* Ports and other Tasks:: How to move rights between tasks.
+* Receive Rights:: How to work with receive rights.
+* Port Sets:: How to work with port sets.
+* Request Notifications:: How to request notifications for events.
+* Inherited Ports:: How to work with the inherited system ports.
+@end menu
+
+
+@node Port Creation
+@subsection Port Creation
+
+@deftypefun kern_return_t mach_port_allocate (@w{ipc_space_t @var{task}}, @w{mach_port_right_t @var{right}}, @w{mach_port_t *@var{name}})
+The @code{mach_port_allocate} function creates a new right in the
+specified task. The new right's name is returned in @var{name}, which
+may be any name that wasn't in use.
+
+The @var{right} argument takes the following values:
+
+@table @code
+@item MACH_PORT_RIGHT_RECEIVE
+@code{mach_port_allocate} creates a port. The new port is not a member
+of any port set. It doesn't have any extant send or send-once rights.
+Its make-send count is zero, its sequence number is zero, its queue
+limit is @code{MACH_PORT_QLIMIT_DEFAULT}, and it has no queued messages.
+@var{name} denotes the receive right for the new port.
+
+@var{task} does not hold send rights for the new port, only the receive
+right. @code{mach_port_insert_right} and @code{mach_port_extract_right}
+can be used to convert the receive right into a combined send/receive
+right.
+
+@item MACH_PORT_RIGHT_PORT_SET
+@code{mach_port_allocate} creates a port set. The new port set has no
+members.
+
+@item MACH_PORT_RIGHT_DEAD_NAME
+@code{mach_port_allocate} creates a dead name. The new dead name has
+one user reference.
+@end table
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_VALUE} if @var{right} was invalid, @code{KERN_NO_SPACE} if
+there was no room in @var{task}'s IPC name space for another right and
+@code{KERN_RESOURCE_SHORTAGE} if the kernel ran out of memory.
+
+The @code{mach_port_allocate} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun mach_port_t mach_reply_port ()
+The @code{mach_reply_port} system call creates a reply port in the
+calling task.
+
+@code{mach_reply_port} creates a port, giving the calling task the
+receive right for the port. The call returns the name of the new
+receive right.
+
+This is very much like creating a receive right with the
+@code{mach_port_allocate} call, with two differences. First,
+@code{mach_reply_port} is a system call and not an RPC (which requires a
+reply port). Second, the port created by @code{mach_reply_port} may be
+optimized for use as a reply port.
+
+The function returns @code{MACH_PORT_NULL} if a resource shortage
+prevented the creation of the receive right.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_allocate_name (@w{ipc_space_t @var{task}}, @w{mach_port_right_t @var{right}}, @w{mach_port_t @var{name}})
+The function @code{mach_port_allocate_name} creates a new right in the
+specified task, with a specified name for the new right. @var{name}
+must not already be in use for some right, and it can't be the reserved
+values @code{MACH_PORT_NULL} and @code{MACH_PORT_DEAD}.
+
+The @var{right} argument takes the following values:
+
+@table @code
+@item MACH_PORT_RIGHT_RECEIVE
+@code{mach_port_allocate_name} creates a port. The new port is not a
+member of any port set. It doesn't have any extant send or send-once
+rights. Its make-send count is zero, its sequence number is zero, its
+queue limit is @code{MACH_PORT_QLIMIT_DEFAULT}, and it has no queued
+messages. @var{name} denotes the receive right for the new port.
+
+@var{task} does not hold send rights for the new port, only the receive
+right. @code{mach_port_insert_right} and @code{mach_port_extract_right}
+can be used to convert the receive right into a combined send/receive
+right.
+
+@item MACH_PORT_RIGHT_PORT_SET
+@code{mach_port_allocate_name} creates a port set. The new port set has
+no members.
+
+@item MACH_PORT_RIGHT_DEAD_NAME
+@code{mach_port_allocate_name} creates a new dead name. The new dead
+name has one user reference.
+@end table
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_VALUE} if @var{right} was invalid or @var{name} was
+@code{MACH_PORT_NULL} or @code{MACH_PORT_DEAD}, @code{KERN_NAME_EXISTS}
+if @var{name} was already in use for a port right and
+@code{KERN_RESOURCE_SHORTAGE} if the kernel ran out of memory.
+
+The @code{mach_port_allocate_name} call is actually an RPC to
+@var{task}, normally a send right for a task port, but potentially any
+send right. In addition to the normal diagnostic return codes from the
+call's server (normally the kernel), the call may return @code{mach_msg}
+return codes.
+@end deftypefun
+
+
+@node Port Destruction
+@subsection Port Destruction
+
+@deftypefun kern_return_t mach_port_deallocate (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}})
+The function @code{mach_port_deallocate} releases a user reference for a
+right in @var{task}'s IPC name space. It allows a task to release a
+user reference for a send or send-once right without failing if the port
+has died and the right is now actually a dead name.
+
+If @var{name} denotes a dead name, send right, or send-once right, then
+the right loses one user reference. If it only had one user reference,
+then the right is destroyed.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right and
+@code{KERN_INVALID_RIGHT} if @var{name} denoted an invalid right.
+
+The @code{mach_port_deallocate} call is actually an RPC to
+@var{task}, normally a send right for a task port, but potentially any
+send right. In addition to the normal diagnostic return codes from the
+call's server (normally the kernel), the call may return @code{mach_msg}
+return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_destroy (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}})
+The function @code{mach_port_destroy} deallocates all rights denoted by
+a name. The name becomes immediately available for reuse.
+
+For most purposes, @code{mach_port_mod_refs} and
+@code{mach_port_deallocate} are preferable.
+
+If @var{name} denotes a port set, then all members of the port set are
+implicitly removed from the port set.
+
+If @var{name} denotes a receive right that is a member of a port set,
+the receive right is implicitly removed from the port set. If there is
+a port-destroyed request registered for the port, then the receive right
+is not actually destroyed, but instead is sent in a port-destroyed
+notification to the backup port. If there is no registered
+port-destroyed request, remaining messages queued to the port are
+destroyed and extant send and send-once rights turn into dead names. If
+those send and send-once rights have dead-name requests registered, then
+dead-name notifications are generated for them.
+
+If @var{name} denotes a send-once right, then the send-once right is
+used to produce a send-once notification for the port.
+
+If @var{name} denotes a send-once, send, and/or receive right, and it
+has a dead-name request registered, then the registered send-once right
+is used to produce a port-deleted notification for the name.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right.
+
+The @code{mach_port_destroy} call is actually an RPC to
+@var{task}, normally a send right for a task port, but potentially any
+send right. In addition to the normal diagnostic return codes from the
+call's server (normally the kernel), the call may return @code{mach_msg}
+return codes.
+@end deftypefun
+
+
+@node Port Names
+@subsection Port Names
+
+@deftypefun kern_return_t mach_port_names (@w{ipc_space_t @var{task}}, @w{mach_port_array_t *@var{names}}, @w{mach_msg_type_number_t *@var{ncount}}, @w{mach_port_type_array_t *@var{types}}, @w{mach_msg_type_number_t *@var{tcount}})
+The function @code{mach_port_names} returns information about
+@var{task}'s port name space. For each name, it also returns what type
+of rights @var{task} holds. (The same information returned by
+@code{mach_port_type}.) @var{names} and @var{types} are arrays that are
+automatically allocated when the reply message is received. The user
+should @code{vm_deallocate} them when the data is no longer needed.
+
+@code{mach_port_names} will return in @var{names} the names of the
+ports, port sets, and dead names in the task's port name space, in no
+particular order and in @var{ncount} the number of names returned. It
+will return in @var{types} the type of each corresponding name, which
+indicates what kind of rights the task holds with that name.
+@var{tcount} should be the same as @var{ncount}.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_RESOURCE_SHORTAGE} if the kernel ran out of memory.
+
+The @code{mach_port_names} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_type (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_port_type_t *@var{ptype}})
+The function @code{mach_port_type} returns information about
+@var{task}'s rights for a specific name in its port name space. The
+returned @var{ptype} is a bitmask indicating what rights @var{task}
+holds for the port, port set or dead name. The bitmask is composed of
+the following bits:
+
+@table @code
+@item MACH_PORT_TYPE_SEND
+The name denotes a send right.
+
+@item MACH_PORT_TYPE_RECEIVE
+The name denotes a receive right.
+
+@item MACH_PORT_TYPE_SEND_ONCE
+The name denotes a send-once right.
+
+@item MACH_PORT_TYPE_PORT_SET
+The name denotes a port set.
+
+@item MACH_PORT_TYPE_DEAD_NAME
+The name is a dead name.
+
+@item MACH_PORT_TYPE_DNREQUEST
+A dead-name request has been registered for the right.
+
+@item MACH_PORT_TYPE_MAREQUEST
+A msg-accepted request for the right is pending.
+
+@item MACH_PORT_TYPE_COMPAT
+The port right was created in the compatibility mode.
+@end table
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid and
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right.
+
+The @code{mach_port_type} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_rename (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{old_name}}, @w{mach_port_t @var{new_name}})
+The function @code{mach_port_rename} changes the name by which a port,
+port set, or dead name is known to @var{task}. @var{old_name} is the
+original name and @var{new_name} the new name for the port right.
+@var{new_name} must not already be in use, and it can't be the
+distinguished values @code{MACH_PORT_NULL} and @code{MACH_PORT_DEAD}.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{old_name} did not denote a right,
+@code{KERN_INVALID_VALUE} if @var{new_name} was @code{MACH_PORT_NULL} or
+@code{MACH_PORT_DEAD}, @code{KERN_NAME_EXISTS} if @code{new_name}
+already denoted a right and @code{KERN_RESOURCE_SHORTAGE} if the kernel
+ran out of memory.
+
+The @code{mach_port_rename} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+
+@node Port Rights
+@subsection Port Rights
+
+@deftypefun kern_return_t mach_port_get_refs (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_port_right_t @var{right}}, @w{mach_port_urefs_t *@var{refs}})
+The function @code{mach_port_get_refs} returns the number of user
+references a task has for a right.
+
+The @var{right} argument takes the following values:
+@itemize @bullet
+@item @code{MACH_PORT_RIGHT_SEND}
+@item @code{MACH_PORT_RIGHT_RECEIVE}
+@item @code{MACH_PORT_RIGHT_SEND_ONCE}
+@item @code{MACH_PORT_RIGHT_PORT_SET}
+@item @code{MACH_PORT_RIGHT_DEAD_NAME}
+@end itemize
+
+If @var{name} denotes a right, but not the type of right specified, then
+zero is returned. Otherwise a positive number of user references is
+returned. Note that a name may simultaneously denote send and receive
+rights.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_VALUE} if @var{right} was invalid and
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right.
+
+The @code{mach_port_get_refs} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_mod_refs (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_port_right_t @var{right}}, @w{mach_port_delta_t @var{delta}})
+The function @code{mach_port_mod_refs} requests that the number of user
+references a task has for a right be changed. This results in the right
+being destroyed, if the number of user references is changed to zero.
+The task holding the right is @var{task}, @var{name} should denote the
+specified right. @var{right} denotes the type of right being modified.
+@var{delta} is the signed change to the number of user references.
+
+The @var{right} argument takes the following values:
+@itemize @bullet
+@item @code{MACH_PORT_RIGHT_SEND}
+@item @code{MACH_PORT_RIGHT_RECEIVE}
+@item @code{MACH_PORT_RIGHT_SEND_ONCE}
+@item @code{MACH_PORT_RIGHT_PORT_SET}
+@item @code{MACH_PORT_RIGHT_DEAD_NAME}
+@end itemize
+
+The number of user references for the right is changed by the amount
+@var{delta}, subject to the following restrictions: port sets, receive
+rights, and send-once rights may only have one user reference. The
+resulting number of user references can't be negative. If the resulting
+number of user references is zero, the effect is to deallocate the
+right. For dead names and send rights, there is an
+implementation-defined maximum number of user references.
+
+If the call destroys the right, then the effect is as described for
+@code{mach_port_destroy}, with the exception that
+@code{mach_port_destroy} simultaneously destroys all the rights denoted
+by a name, while @code{mach_port_mod_refs} can only destroy one right.
+The name will be available for reuse if it only denoted the one right.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_VALUE} if @var{right} was invalid or the
+user-reference count would become negative, @code{KERN_INVALID_NAME} if
+@var{name} did not denote a right, @code{KERN_INVALID_RIGHT} if
+@var{name} denoted a right, but not the specified right and
+@code{KERN_UREFS_OVERFLOW} if the user-reference count would overflow.
+
+The @code{mach_port_mod_refs} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+
+@node Ports and other Tasks
+@subsection Ports and other Tasks
+
+@deftypefun kern_return_t mach_port_insert_right (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_port_t @var{right}}, @w{mach_msg_type_name_t @var{right_type}})
+The function @var{mach_port_insert_right} inserts into @var{task} the
+caller's right for a port, using a specified name for the right in the
+target task.
+
+The specified @var{name} can't be one of the reserved values
+@code{MACH_PORT_NULL} or @code{MACH_PORT_DEAD}. The @var{right} can't
+be @code{MACH_PORT_NULL} or @code{MACH_PORT_DEAD}.
+
+The argument @var{right_type} specifies a right to be inserted and how
+that right should be extracted from the caller. It should be a value
+appropriate for @var{msgt_name}; see @code{mach_msg}. @c XXX cross ref
+
+If @var{right_type} is @code{MACH_MSG_TYPE_MAKE_SEND},
+@code{MACH_MSG_TYPE_MOVE_SEND}, or @code{MACH_MSG_TYPE_COPY_SEND}, then
+a send right is inserted. If the target already holds send or receive
+rights for the port, then @var{name} should denote those rights in the
+target. Otherwise, @var{name} should be unused in the target. If the
+target already has send rights, then those send rights gain an
+additional user reference. Otherwise, the target gains a send right,
+with a user reference count of one.
+
+If @var{right_type} is @code{MACH_MSG_TYPE_MAKE_SEND_ONCE} or
+@code{MACH_MSG_TYPE_MOVE_SEND_ONCE}, then a send-once right is inserted.
+The name should be unused in the target. The target gains a send-once
+right.
+
+If @var{right_type} is @code{MACH_MSG_TYPE_MOVE_RECEIVE}, then a receive
+right is inserted. If the target already holds send rights for the
+port, then name should denote those rights in the target. Otherwise,
+name should be unused in the target. The receive right is moved into
+the target task.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_VALUE} if @var{right} was not a port right or
+@var{name} was @code{MACH_PORT_NULL} or @code{MACH_PORT_DEAD},
+@code{KERN_NAME_EXISTS} if @var{name} already denoted a right,
+@code{KERN_INVALID_CAPABILITY} if @var{right} was @code{MACH_PORT_NULL}
+or @code{MACH_PORT_DEAD} @code{KERN_RIGHT_EXISTS} if @var{task} already
+had rights for the port, with a different name,
+@code{KERN_UREFS_OVERFLOW} if the user-reference count would overflow
+and @code{KERN_RESOURCE_SHORTAGE} if the kernel ran out of memory.
+
+The @code{mach_port_insert_right} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_extract_right (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_msg_type_name_t @var{desired_type}}, @w{mach_port_t *@var{right}}, @w{mach_msg_type_name_t *@var{acquired_type}})
+The function @var{mach_port_extract_right} extracts a port right from
+the target @var{task} and returns it to the caller as if the task sent
+the right voluntarily, using @var{desired_type} as the value of
+@var{msgt_name}. @xref{Mach Message Call}.
+
+The returned value of @var{acquired_type} will be
+@code{MACH_MSG_TYPE_PORT_SEND} if a send right is extracted,
+@code{MACH_MSG_TYPE_PORT_RECEIVE} if a receive right is extracted, and
+@code{MACH_MSG_TYPE_PORT_SEND_ONCE} if a send-once right is extracted.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right,
+@code{KERN_INVALID_RIGHT} if @var{name} denoted a right, but an invalid one,
+@code{KERN_INVALID_VALUE} if @var{desired_type} was invalid.
+
+The @code{mach_port_extract_right} call is actually an RPC to
+@var{task}, normally a send right for a task port, but potentially any
+send right. In addition to the normal diagnostic return codes from the
+call's server (normally the kernel), the call may return @code{mach_msg}
+return codes.
+@end deftypefun
+
+
+@node Receive Rights
+@subsection Receive Rights
+
+@deftp {Data type} mach_port_seqno_t
+The @code{mach_port_seqno_t} data type is an @code{unsigned int} which
+contains the sequence number of a port.
+@end deftp
+
+@deftp {Data type} mach_port_mscount_t
+The @code{mach_port_mscount_t} data type is an @code{unsigned int} which
+contains the make-send count for a port.
+@end deftp
+
+@deftp {Data type} mach_port_msgcount_t
+The @code{mach_port_msgcount_t} data type is an @code{unsigned int} which
+contains a number of messages.
+@end deftp
+
+@deftp {Data type} mach_port_rights_t
+The @code{mach_port_rights_t} data type is an @code{unsigned int} which
+contains a number of rights for a port.
+@end deftp
+
+@deftp {Data type} mach_port_status_t
+This structure contains some status information about a port, which can
+be queried with @code{mach_port_get_receive_status}. It has the following
+members:
+
+@table @code
+@item mach_port_t mps_pset
+The containing port set.
+
+@item mach_port_seqno_t mps_seqno
+The sequence number.
+
+@item mach_port_mscount_t mps_mscount
+The make-send count.
+
+@item mach_port_msgcount_t mps_qlimit
+The maximum number of messages in the queue.
+
+@item mach_port_msgcount_t mps_msgcount
+The current number of messages in the queue.
+
+@item mach_port_rights_t mps_sorights
+The number of send-once rights that exist.
+
+@item boolean_t mps_srights
+@code{TRUE} if send rights exist.
+
+@item boolean_t mps_pdrequest
+@code{TRUE} if port-deleted notification is requested.
+
+@item boolean_t mps_nsrequest
+@code{TRUE} if no-senders notification is requested.
+@end table
+@end deftp
+
+@deftypefun kern_return_t mach_port_get_receive_status (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_port_status_t *@var{status}})
+The function @code{mach_port_get_receive_status} returns the current
+status of the specified receive right.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right and
+@code{KERN_INVALID_RIGHT} if @var{name} denoted a right, but not a
+receive right.
+
+The @code{mach_port_get_receive_status} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_set_mscount (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_port_mscount_t @var{mscount}})
+The function @code{mach_port_set_mscount} changes the make-send count of
+@var{task}'s receive right named @var{name} to @var{mscount}. All
+values for @var{mscount} are valid.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right and
+@code{KERN_INVALID_RIGHT} if @var{name} denoted a right, but not a
+receive right.
+
+The @code{mach_port_set_mscount} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_set_qlimit (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_port_msgcount_t @var{qlimit}})
+The function @code{mach_port_set_qlimit} changes the queue limit
+@var{task}'s receive right named @var{name} to @var{qlimit}. Valid
+values for @var{qlimit} are between zero and
+@code{MACH_PORT_QLIMIT_MAX}, inclusive.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right,
+@code{KERN_INVALID_RIGHT} if @var{name} denoted a right, but not a
+receive right and @code{KERN_INVALID_VALUE} if @var{qlimit} was invalid.
+
+The @code{mach_port_set_qlimit} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_set_seqno (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_port_seqno_t @var{seqno}})
+The function @code{mach_port_set_seqno} changes the sequence number
+@var{task}'s receive right named @var{name} to @var{seqno}. All
+sequence number values are valid. The next message received from the
+port will be stamped with the specified sequence number.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right and
+@code{KERN_INVALID_RIGHT} if @var{name} denoted a right, but not a
+receive right.
+
+The @code{mach_port_set_seqno} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_set_protected_payload (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{unsigned long @var{payload}})
+The function @code{mach_port_set_protected_payload} sets the protected
+payload associated with the right @var{name} to @var{payload}.
+Section @ref{Message Receive} describes how setting a protected
+payload affects the messages delivered to @var{name}.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right and
+@code{KERN_INVALID_RIGHT} if @var{name} denoted a right, but not a
+receive right.
+
+The @code{mach_port_set_protected_payload} call is actually an RPC to
+@var{task}, normally a send right for a task port, but potentially any
+send right. In addition to the normal diagnostic return codes from
+the call's server (normally the kernel), the call may return
+@code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_clear_protected_payload (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{unsigned long @var{payload}})
+The function @code{mach_port_clear_protected_payload} clears the
+protected payload associated with the right @var{name}.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right and
+@code{KERN_INVALID_RIGHT} if @var{name} denoted a right, but not a
+receive right.
+
+The @code{mach_port_clear_protected_payload} call is actually an RPC
+to @var{task}, normally a send right for a task port, but potentially
+any send right. In addition to the normal diagnostic return codes
+from the call's server (normally the kernel), the call may return
+@code{mach_msg} return codes.
+@end deftypefun
+
+@node Port Sets
+@subsection Port Sets
+
+@deftypefun kern_return_t mach_port_get_set_status (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_port_array_t *@var{members}}, @w{mach_msg_type_number_t *@var{count}})
+The function @code{mach_port_get_set_status} returns the members of a
+port set. @var{members} is an array that is automatically allocated
+when the reply message is received. The user should
+@code{vm_deallocate} it when the data is no longer needed.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right,
+@code{KERN_INVALID_RIGHT} if @var{name} denoted a right, but not a
+receive right and @code{KERN_RESOURCE_SHORTAGE} if the kernel ran out of
+memory.
+
+The @code{mach_port_get_set_status} call is actually an RPC to
+@var{task}, normally a send right for a task port, but potentially any
+send right. In addition to the normal diagnostic return codes from the
+call's server (normally the kernel), the call may return @code{mach_msg}
+return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_move_member (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{member}}, @w{mach_port_t @var{after}})
+The function @var{mach_port_move_member} moves the receive right
+@var{member} into the port set @var{after}. If the receive right is
+already a member of another port set, it is removed from that set first
+(the whole operation is atomic). If the port set is
+@code{MACH_PORT_NULL}, then the receive right is not put into a port
+set, but removed from its current port set.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{member} or @var{after} did not denote a
+right, @code{KERN_INVALID_RIGHT} if @var{member} denoted a right, but
+not a receive right or @var{after} denoted a right, but not a port set,
+and @code{KERN_NOT_IN_SET} if @var{after} was @code{MACH_PORT_NULL}, but
+@code{member} wasn't currently in a port set.
+
+The @code{mach_port_move_member} call is actually an RPC to @var{task},
+normally a send right for a task port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+
+@node Request Notifications
+@subsection Request Notifications
+
+@deftypefun kern_return_t mach_port_request_notification (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{mach_msg_id_t @var{variant}}, @w{mach_port_mscount_t @var{sync}}, @w{mach_port_t @var{notify}}, @w{mach_msg_type_name_t @var{notify_type}}, @w{mach_port_t *@var{previous}})
+The function @code{mach_port_request_notification} registers a request
+for a notification and supplies the send-once right @var{notify} to
+which the notification will be sent. The @var{notify_type} denotes the
+IPC type for the send-once right, which can be
+@code{MACH_MSG_TYPE_MAKE_SEND_ONCE} or
+@code{MACH_MSG_TYPE_MOVE_SEND_ONCE}. It is an atomic swap, returning
+the previously registered send-once right (or @code{MACH_PORT_NULL} for
+none) in @var{previous}. A previous notification request may be
+cancelled by providing @code{MACH_PORT_NULL} for @var{notify}.
+
+The @var{variant} argument takes the following values:
+
+@table @code
+@item MACH_NOTIFY_PORT_DESTROYED
+@var{sync} must be zero. The @var{name} must specify a receive right,
+and the call requests a port-destroyed notification for the receive
+right. If the receive right were to have been destroyed, say by
+@code{mach_port_destroy}, then instead the receive right will be sent in
+a port-destroyed notification to the registered send-once right.
+
+@item MACH_NOTIFY_DEAD_NAME
+The call requests a dead-name notification. @var{name} specifies send,
+receive, or send-once rights for a port. If the port is destroyed (and
+the right remains, becoming a dead name), then a dead-name notification
+which carries the name of the right will be sent to the registered
+send-once right. If @var{notify} is not null and sync is non-zero, the
+name may specify a dead name, and a dead-name notification is
+immediately generated.
+
+Whenever a dead-name notification is generated, the user reference count
+of the dead name is incremented. For example, a send right with two
+user refs has a registered dead-name request. If the port is destroyed,
+the send right turns into a dead name with three user refs (instead of
+two), and a dead-name notification is generated.
+
+If the name is made available for reuse, perhaps because of
+@code{mach_port_destroy} or @code{mach_port_mod_refs}, or the name
+denotes a send-once right which has a message sent to it, then the
+registered send-once right is used to generate a port-deleted
+notification.
+
+@item MACH_NOTIFY_NO_SENDERS
+The call requests a no-senders notification. @var{name} must specify a
+receive right. If @var{notify} is not null, and the receive right's
+make-send count is greater than or equal to the sync value, and it has
+no extant send rights, than an immediate no-senders notification is
+generated. Otherwise the notification is generated when the receive
+right next loses its last extant send right. In either case, any
+previously registered send-once right is returned.
+
+The no-senders notification carries the value the port's make-send count
+had when it was generated. The make-send count is incremented whenever
+@code{MACH_MSG_TYPE_MAKE_SEND} is used to create a new send right from
+the receive right. The make-send count is reset to zero when the
+receive right is carried in a message.
+@end table
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_VALUE} if @var{variant} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right,
+@code{KERN_INVALID_RIGHT} if @var{name} denoted an invalid right and
+@code{KERN_INVALID_CAPABILITY} if @var{notify} was invalid.
+
+When using @code{MACH_NOTIFY_PORT_DESTROYED}, the function returns
+@code{KERN_INVALID_VALUE} if @var{sync} wasn't zero.
+
+When using @code{MACH_NOTIFY_DEAD_NAME}, the function returns
+@code{KERN_RESOURCE_SHORTAGE} if the kernel ran out of memory,
+@code{KERN_INVALID_ARGUMENT} if @var{name} denotes a dead name, but
+@var{sync} is zero or @var{notify} is @code{MACH_PORT_NULL}, and
+@code{KERN_UREFS_OVERFLOW} if @var{name} denotes a dead name, but
+generating an immediate dead-name notification would overflow the name's
+user-reference count.
+
+The @code{mach_port_request_notification} call is actually an RPC to
+@var{task}, normally a send right for a task port, but potentially any
+send right. In addition to the normal diagnostic return codes from the
+call's server (normally the kernel), the call may return @code{mach_msg}
+return codes.
+@end deftypefun
+
+@node Inherited Ports
+@subsection Inherited Ports
+
+The inherited ports concept is not used in the Hurd, and so the _SLOT
+macros are not defined in GNU Mach.
+
+The following section documents how @code{mach_ports_register} and
+@code{mach_ports_lookup} were originally intended to be used.
+
+@deftypefun kern_return_t mach_ports_register (@w{task_t @var{target_task}}, @w{port_array_t @var{init_port_set}}, @w{int @var{init_port_array_count}})
+@deftypefunx kern_return_t mach_ports_lookup (@w{task_t @var{target_task}}, @w{port_array_t *@var{init_port_set}}, @w{int *@var{init_port_array_count}})
+@code{mach_ports_register} manipulates the inherited ports array,
+@code{mach_ports_lookup} is used to acquire specific parent ports.
+@var{target_task} is the task to be affected. @var{init_port_set} is an
+array of system ports to be registered, or returned. Although the array
+size is given as variable, the kernel will only accept a limited number
+of ports. @var{init_port_array_count} is the number of ports returned
+in @var{init_port_set}.
+
+@code{mach_ports_register} registers an array of well-known system ports
+with the kernel on behalf of a specific task. Currently the ports to be
+registered are: the port to the Network Name Server, the port to the
+Environment Manager, and a port to the Service server. These port
+values must be placed in specific slots in the init_port_set. The slot
+numbers are given by the global constants defined in @file{mach_init.h}:
+@code{NAME_SERVER_SLOT}, @code{ENVIRONMENT_SLOT}, and
+@code{SERVICE_SLOT}. These ports may later be retrieved with
+@code{mach_ports_lookup}.
+
+When a new task is created (see @code{task_create}), the child task will
+be given access to these ports. Only port send rights may be
+registered. Furthermore, the number of ports which may be registered is
+fixed and given by the global constant @code{MACH_PORT_SLOTS_USED}
+Attempts to register too many ports will fail.
+
+It is intended that this mechanism be used only for task initialization,
+and then only by runtime support modules. A parent task has three
+choices in passing these system ports to a child task. Most commonly it
+can do nothing and its child will inherit access to the same
+@var{init_port_set} that the parent has; or a parent task may register a
+set of ports it wishes to have passed to all of its children by calling
+@code{mach_ports_register} using its task port; or it may make necessary
+modifications to the set of ports it wishes its child to see, and then
+register those ports using the child's task port prior to starting the
+child's thread(s). The @code{mach_ports_lookup} call which is done by
+@code{mach_init} in the child task will acquire these initial ports for
+the child.
+
+Tasks other than the Network Name Server and the Environment Manager
+should not need access to the Service port. The Network Name Server port
+is the same for all tasks on a given machine. The Environment port is
+the only port likely to have different values for different tasks.
+
+Since the number of ports which may be registered is limited, ports
+other than those used by the runtime system to initialize a task should
+be passed to children either through an initial message, or through the
+Network Name Server for public ports, or the Environment Manager for
+private ports.
+
+The function returns @code{KERN_SUCCESS} if the memory was allocated,
+and @code{KERN_INVALID_ARGUMENT} if an attempt was made to register more
+ports than the current kernel implementation allows.
+@end deftypefun
+
+
+@node Virtual Memory Interface
+@chapter Virtual Memory Interface
+
+@cindex virtual memory map port
+@cindex port representing a virtual memory map
+@deftp {Data type} vm_task_t
+This is a @code{task_t} (and as such a @code{mach_port_t}), which holds
+a port name associated with a port that represents a virtual memory map
+in the kernel. An virtual memory map is used by the kernel to manage
+the address space of a task. The virtual memory map doesn't get a port
+name of its own. Instead the port name of the task provided with the
+virtual memory is used to name the virtual memory map of the task (as is
+indicated by the fact that the type of @code{vm_task_t} is actually
+@code{task_t}).
+
+The virtual memory maps of tasks are the only ones accessible outside of
+the kernel.
+@end deftp
+
+@menu
+* Memory Allocation:: Allocation of new virtual memory.
+* Memory Deallocation:: Freeing unused virtual memory.
+* Data Transfer:: Reading, writing and copying memory.
+* Memory Attributes:: Tweaking memory regions.
+* Mapping Memory Objects:: How to map memory objects.
+* Memory Statistics:: How to get statistics about memory usage.
+* Memory physical addresses:: How to get physical addresses of memory.
+@end menu
+
+@node Memory Allocation
+@section Memory Allocation
+
+@deftypefun kern_return_t vm_allocate (@w{vm_task_t @var{target_task}}, @w{vm_address_t *@var{address}}, @w{vm_size_t @var{size}}, @w{boolean_t @var{anywhere}})
+The function @code{vm_allocate} allocates a region of virtual memory,
+placing it in the specified @var{task}'s address space.
+
+The starting address is @var{address}. If the @var{anywhere} option is
+false, an attempt is made to allocate virtual memory starting at this
+virtual address. If this address is not at the beginning of a virtual
+page, it will be rounded down to one. If there is not enough space at
+this address, no memory will be allocated. If the @var{anywhere} option
+is true, the input value of this address will be ignored, and the space
+will be allocated wherever it is available. In either case, the address
+at which memory was actually allocated will be returned in
+@var{address}.
+
+@var{size} is the number of bytes to allocate (rounded by the system in
+a machine dependent way to an integral number of virtual pages).
+
+If @var{anywhere} is true, the kernel should find and allocate any
+region of the specified size, and return the address of the resulting
+region in address address, rounded to a virtual page boundary if there
+is sufficient space.
+
+The physical memory is not actually allocated until the new virtual
+memory is referenced. By default, the kernel rounds all addresses down
+to the nearest page boundary and all memory sizes up to the nearest page
+size. The global variable @code{vm_page_size} contains the page size.
+@code{mach_task_self} returns the value of the current task port which
+should be used as the @var{target_task} argument in order to allocate
+memory in the caller's address space. For languages other than C, these
+values can be obtained by the calls @code{vm_statistics} and
+@code{mach_task_self}. Initially, the pages of allocated memory will be
+protected to allow all forms of access, and will be inherited in child
+tasks as a copy. Subsequent calls to @code{vm_protect} and
+@code{vm_inherit} may be used to change these properties. The allocated
+region is always zero-filled.
+
+The function returns @code{KERN_SUCCESS} if the memory was successfully
+allocated, @code{KERN_INVALID_ADDRESS} if an invalid address was
+specified and @code{KERN_NO_SPACE} if there was not enough space left to
+satisfy the request.
+@end deftypefun
+
+@deftypefun kern_return_t vm_allocate_contiguous (@w{host_priv_t @var{host_priv}}, @w{vm_task_t @var{target_task}}, @w{vm_address_t *@var{vaddr}}, @w{phys_addr_t *@var{paddr}}, @w{vm_size_t @var{size}}, @w{phys_addr_t @var{pmin}}, @w{phys_addr_t @var{pmax}}, @w{phys_addr_t @var{palign}})
+The function @code{vm_allocate} allocates a region of physical memory,
+placing virtual mapping of the physical pages in the specified @var{task}'s
+address space.
+
+The virtual space will be allocated wherever it is available. The virtual
+address at which the physical memory was mapped will be returned in
+@var{vaddr}. The physical address of the start of the allocated physical
+memory will be returned in @var{paddr}.
+
+@var{size} is the number of bytes to allocate (rounded by the system in
+a machine dependent way to an integral number of virtual pages).
+
+Constraints can be set on the physical address, to cope with hardware physical
+memory access constraints, e.g. DMAs. @var{pmin} is the minimum physical address
+at which the allocated memory should start. @var{pmax} is the maximum physical
+address at which the allocated memory should end. @var{palign} is the alignment
+restriction, which has to be a power of two.
+
+The function returns @code{KERN_SUCCESS} if the memory was successfully
+allocated, @code{KERN_RESOURCE_SHORTAGE} if there was not enough physical memory
+left to satisfy the request, and @code{KERN_NO_SPACE} if there was not enough
+virtual space left to satisfy the request.
+@end deftypefun
+
+@node Memory Deallocation
+@section Memory Deallocation
+
+@deftypefun kern_return_t vm_deallocate (@w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}})
+@code{vm_deallocate} relinquishes access to a region of a @var{task}'s
+address space, causing further access to that memory to fail. This
+address range will be available for reallocation. @var{address} is the
+starting address, which will be rounded down to a page boundary.
+@var{size} is the number of bytes to deallocate, which will be rounded
+up to give a page boundary. Note, that because of the rounding to
+virtual page boundaries, more than @var{size} bytes may be deallocated.
+Use @code{vm_page_size} or @code{vm_statistics} to find out the current
+virtual page size.
+
+This call may be used to deallocate memory that was passed to a task in a
+message (via out of line data). In that case, the rounding should cause
+no trouble, since the region of memory was allocated as a set of pages.
+
+The @code{vm_deallocate} call affects only the task specified by the
+@var{target_task}. Other tasks which may have access to this memory may
+continue to reference it.
+
+The function returns @code{KERN_SUCCESS} if the memory was successfully
+deallocated and @code{KERN_INVALID_ADDRESS} if an invalid or
+non-allocated address was specified.
+@end deftypefun
+
+
+@node Data Transfer
+@section Data Transfer
+
+@deftypefun kern_return_t vm_read (@w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}}, @w{vm_offset_t *@var{data}}, @w{mach_msg_type_number_t *@var{data_count}})
+The function @code{vm_read} allows one task's virtual memory to be read
+by another task. The @var{target_task} is the task whose memory is to
+be read. @var{address} is the first address to be read and must be on a
+page boundary. @var{size} is the number of bytes of data to be read and
+must be an integral number of pages. @var{data} is the array of data
+copied from the given task, and @var{data_count} is the size of the data
+array in bytes (will be an integral number of pages).
+
+Note that the data array is returned in a newly allocated region; the
+task reading the data should @code{vm_deallocate} this region when it is
+done with the data.
+
+The function returns @code{KERN_SUCCESS} if the memory was successfully
+read, @code{KERN_INVALID_ADDRESS} if an invalid or non-allocated address
+was specified or there was not @var{size} bytes of data following the
+address, @code{KERN_INVALID_ARGUMENT} if the address does not start on a
+page boundary or the size is not an integral number of pages,
+@code{KERN_PROTECTION_FAILURE} if the address region in the target task
+is protected against reading and @code{KERN_NO_SPACE} if there was not
+enough room in the callers virtual memory to allocate space for the data
+to be returned.
+@end deftypefun
+
+@deftypefun kern_return_t vm_write (@w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_offset_t @var{data}}, @w{mach_msg_type_number_t @var{data_count}})
+The function @code{vm_write} allows a task to write to the virtual memory
+of @var{target_task}. @var{address} is the starting address in task to
+be affected. @var{data} is an array of bytes to be written, and
+@var{data_count} the size of the @var{data} array.
+
+The current implementation requires that @var{address}, @var{data} and
+@var{data_count} all be page-aligned. Otherwise,
+@code{KERN_INVALID_ARGUMENT} is returned.
+
+The function returns @code{KERN_SUCCESS} if the memory was successfully
+written, @code{KERN_INVALID_ADDRESS} if an invalid or non-allocated
+address was specified or there was not @var{data_count} bytes of
+allocated memory starting at @var{address} and
+@code{KERN_PROTECTION_FAILURE} if the address region in the target task
+is protected against writing.
+@end deftypefun
+
+@deftypefun kern_return_t vm_copy (@w{vm_task_t @var{target_task}}, @w{vm_address_t @var{source_address}}, @w{vm_size_t @var{count}}, @w{vm_offset_t @var{dest_address}})
+The function @code{vm_copy} causes the source memory range to be copied
+to the destination address. The source and destination memory ranges
+may overlap. The destination address range must already be allocated
+and writable; the source range must be readable.
+
+@code{vm_copy} is equivalent to @code{vm_read} followed by
+@code{vm_write}.
+
+The current implementation requires that @var{address}, @var{data} and
+@var{data_count} all be page-aligned. Otherwise,
+@code{KERN_INVALID_ARGUMENT} is returned.
+
+The function returns @code{KERN_SUCCESS} if the memory was successfully
+written, @code{KERN_INVALID_ADDRESS} if an invalid or non-allocated
+address was specified or there was insufficient memory allocated at one
+of the addresses and @code{KERN_PROTECTION_FAILURE} if the destination
+region was not writable or the source region was not readable.
+@end deftypefun
+
+
+@node Memory Attributes
+@section Memory Attributes
+
+@deftypefun kern_return_t vm_region (@w{vm_task_t @var{target_task}}, @w{vm_address_t *@var{address}}, @w{vm_size_t *@var{size}}, @w{vm_prot_t *@var{protection}}, @w{vm_prot_t *@var{max_protection}}, @w{vm_inherit_t *@var{inheritance}}, @w{boolean_t *@var{shared}}, @w{memory_object_name_t *@var{object_name}}, @w{vm_offset_t *@var{offset}})
+The function @code{vm_region} returns a description of the specified
+region of @var{target_task}'s virtual address space. @code{vm_region}
+begins at @var{address} and looks forward through memory until it comes
+to an allocated region. If address is within a region, then that region
+is used. Various bits of information about the region are returned. If
+@var{address} was not within a region, then @var{address} is set to the
+start of the first region which follows the incoming value. In this way
+an entire address space can be scanned.
+
+The @var{size} returned is the size of the located region in bytes.
+@var{protection} is the current protection of the region,
+@var{max_protection} is the maximum allowable protection for this
+region. @var{inheritance} is the inheritance attribute for this region.
+@var{shared} tells if the region is shared or not. The port
+@var{object_name} identifies the memory object associated with this
+region, and @var{offset} is the offset into the pager object that this
+region begins at.
+@c XXX cross ref pager_init
+
+The function returns @code{KERN_SUCCESS} if the memory region was
+successfully located and the information returned and @code{KERN_NO_SPACE} if
+there is no region at or above @var{address} in the specified task.
+@end deftypefun
+
+@deftypefun kern_return_t vm_protect (@w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}}, @w{boolean_t @var{set_maximum}}, @w{vm_prot_t @var{new_protection}})
+The function @code{vm_protect} sets the virtual memory access privileges
+for a range of allocated addresses in @var{target_task}'s virtual
+address space. The protection argument describes a combination of read,
+write, and execute accesses that should be @emph{permitted}.
+
+@var{address} is the starting address, which will be rounded down to a
+page boundary. @var{size} is the size in bytes of the region for which
+protection is to change, and will be rounded up to give a page boundary.
+If @var{set_maximum} is set, make the protection change apply to the
+maximum protection associated with this address range; otherwise, the
+current protection on this range is changed. If the maximum protection
+is reduced below the current protection, both will be changed to reflect
+the new maximum. @var{new_protection} is the new protection value for
+this region; a set of: @code{VM_PROT_READ}, @code{VM_PROT_WRITE},
+@code{VM_PROT_EXECUTE}.
+
+The enforcement of virtual memory protection is machine-dependent.
+Nominally read access requires @code{VM_PROT_READ} permission, write
+access requires @code{VM_PROT_WRITE} permission, and execute access
+requires @code{VM_PROT_EXECUTE} permission. However, some combinations
+of access rights may not be supported. In particular, the kernel
+interface allows write access to require @code{VM_PROT_READ} and
+@code{VM_PROT_WRITE} permission and execute access to require
+@code{VM_PROT_READ} permission.
+
+If a region is wired, changing its protection also updates the
+access types for which no page faults must occur.
+
+The function returns @code{KERN_SUCCESS} if the memory was successfully
+protected, @code{KERN_INVALID_ADDRESS} if an invalid or non-allocated
+address was specified and @code{KERN_PROTECTION_FAILURE} if an attempt
+was made to increase the current or maximum protection beyond the
+existing maximum protection value.
+@end deftypefun
+
+@deftypefun kern_return_t vm_inherit (@w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}}, @w{vm_inherit_t @var{new_inheritance}})
+The function @code{vm_inherit} specifies how a region of
+@var{target_task}'s address space is to be passed to child tasks at the
+time of task creation. Inheritance is an attribute of virtual pages, so
+@var{address} to start from will be rounded down to a page boundary and
+@var{size}, the size in bytes of the region for which inheritance is to
+change, will be rounded up to give a page boundary. How this memory is
+to be inherited in child tasks is specified by @var{new_inheritance}.
+Inheritance is specified by using one of these following three values:
+
+@table @code
+@item VM_INHERIT_SHARE
+Child tasks will share this memory with this task.
+
+@item VM_INHERIT_COPY
+Child tasks will receive a copy of this region.
+
+@item VM_INHERIT_NONE
+This region will be absent from child tasks.
+@end table
+
+Setting @code{vm_inherit} to @code{VM_INHERIT_SHARE} and forking a child
+task is the only way two Mach tasks can share physical memory. Remember
+that all the threads of a given task share all the same memory.
+
+The function returns @code{KERN_SUCCESS} if the memory inheritance was
+successfully set and @code{KERN_INVALID_ADDRESS} if an invalid or
+non-allocated address was specified.
+@end deftypefun
+
+@deftypefun kern_return_t vm_wire (@w{host_t @var{host}}, @w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}}, @w{vm_prot_t @var{access}})
+The function @code{vm_wire} allows applications to control
+memory pageability. @var{host} is the host port for the
+host on which @var{target_task} resides. @var{address} is the starting
+address, which will be rounded down to a page boundary. @var{size} is
+the size in bytes of the region for which protection is to change, and
+will be rounded up to give a page boundary. @var{access} specifies the
+types of accesses that must not cause page faults. If the host port is
+not privileged, the amount of memory is limited per task.
+
+The semantics of a successful @code{vm_wire} operation are that memory
+in the specified range will not cause page faults for any accesses
+included in access. Data memory can be made non-pageable (wired) with a
+access argument of @code{VM_PROT_READ | VM_PROT_WRITE}. A special case
+is that @code{VM_PROT_NONE} makes the memory pageable.
+
+Wiring doesn't stack, i.e. a single call to @code{vm_wire} with
+@var{access} @code{VM_PROT_NONE} unwires the specified range,
+regardless of how many times it was previously wired. Conversely,
+a single call to @code{vm_wire} with @var{access}
+@code{VM_PROT_READ | VM_PROT_WRITE} wires the specified range,
+regardless of how many times it was previously unwired.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_HOST} if @var{host} was not a valid host
+port, @code{KERN_INVALID_TASK} if @var{task} was not a valid task,
+@code{KERN_INVALID_VALUE} if @var{access} specified an invalid access
+mode, and @code{KERN_NO_SPACE} if some memory in the specified range
+is not present or has an inappropriate protection value.
+
+The @code{vm_wire} call is actually an RPC to @var{host}, normally
+a send right for a privileged host port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t vm_wire_all (@w{host_t @var{host}}, @w{vm_task_t @var{target_task}}, @w{vm_wire_t @var{flags}})
+The function @code{vm_wire_all} allows applications to control
+memory pageability, as with @code{vm_wire}, but applies to all
+current and/or future mappings.
+
+The argument @var{flags} are bit values, combined with bitwise-or.
+
+@table @code
+@item VM_WIRE_CURRENT
+All currently existing entries are wired, with access types matching
+their protection.
+
+@item VM_WIRE_FUTURE
+All future entries are automatically wired, with access types matching
+their protection.
+@end table
+
+If flags specifies no bits (@code{VM_WIRE_NONE}), all current entries
+are unwired, and future entries are no longer automatically wired.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_HOST} if @var{host} was not a valid host port,
+@code{KERN_INVALID_TASK} if @var{task} was not a valid task,
+and @code{KERN_INVALID_VALUE} if @var{flags} specifies invalid bits.
+
+The @code{vm_wire_all} call is actually an RPC to @var{host}, normally
+a send right for a privileged host port, but potentially any send right.
+In addition to the normal diagnostic return codes from the call's server
+(normally the kernel), the call may return @code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t vm_machine_attribute (@w{vm_task_t @var{task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}}, @w{vm_prot_t @var{access}}, @w{vm_machine_attribute_t @var{attribute}}, @w{vm_machine_attribute_val_t @var{value}})
+The function @code{vm_machine_attribute} specifies machine-specific
+attributes for a VM mapping, such as cachability, migrability,
+replicability. This is used on machines that allow the user control
+over the cache (this is the case for MIPS architectures) or placement of
+memory pages as in NUMA architectures (Non-Uniform Memory Access time)
+such as the IBM ACE multiprocessor.
+
+Machine-specific attributes can be consider additions to the
+machine-independent ones such as protection and inheritance, but they
+are not guaranteed to be supported by any given machine. Moreover,
+implementations of Mach on new architectures might find the need for new
+attribute types and or values besides the ones defined in the initial
+implementation.
+
+The types currently defined are
+@table @code
+@item MATTR_CACHE
+Controls caching of memory pages
+
+@item MATTR_MIGRATE
+Controls migrability of memory pages
+
+@item MATTR_REPLICATE
+Controls replication of memory pages
+@end table
+
+Corresponding values, and meaning of a specific call to
+@code{vm_machine_attribute}
+@table @code
+@item MATTR_VAL_ON
+Enables the attribute. Being enabled is the default value for any
+applicable attribute.
+
+@item MATTR_VAL_OFF
+Disables the attribute, making memory non-cached, or non-migratable, or
+non-replicatable.
+
+@item MATTR_VAL_GET
+Returns the current value of the attribute for the memory segment. If
+the attribute does not apply uniformly to the given range the value
+returned applies to the initial portion of the segment only.
+
+@item MATTR_VAL_CACHE_FLUSH
+Flush the memory pages from the Cache. The size value in this case
+might be meaningful even if not a multiple of the page size, depending
+on the implementation.
+
+@item MATTR_VAL_ICACHE_FLUSH
+Same as above, applied to the Instruction Cache alone.
+
+@item MATTR_VAL_DCACHE_FLUSH
+Same as above, applied to the Data Cache alone.
+@end table
+
+The function returns @code{KERN_SUCCESS} if call succeeded, and
+@code{KERN_INVALID_ARGUMENT} if @var{task} is not a task, or
+@var{address} and @var{size} do not define a valid address range in
+task, or @var{attribute} is not a valid attribute type, or it is not
+implemented, or @var{value} is not a permissible value for attribute.
+@end deftypefun
+
+
+@node Mapping Memory Objects
+@section Mapping Memory Objects
+
+@deftypefun kern_return_t vm_map (@w{vm_task_t @var{target_task}}, @w{vm_address_t *@var{address}}, @w{vm_size_t @var{size}}, @w{vm_address_t @var{mask}}, @w{boolean_t @var{anywhere}}, @w{memory_object_t @var{memory_object}}, @w{vm_offset_t @var{offset}}, @w{boolean_t @var{copy}}, @w{vm_prot_t @var{cur_protection}}, @w{vm_prot_t @var{max_protection}}, @w{vm_inherit_t @var{inheritance}})
+The function @code{vm_map} maps a region of virtual memory at the
+specified address, for which data is to be supplied by the given memory
+object, starting at the given offset within that object. In addition to
+the arguments used in @code{vm_allocate}, the @code{vm_map} call allows
+the specification of an address alignment parameter, and of the initial
+protection and inheritance values.
+@c XXX See the descriptions of vm_allocate, vm_protect , and vm_inherit
+
+If the memory object in question is not currently in use, the kernel
+will perform a @code{memory_object_init} call at this time. If the copy
+parameter is asserted, the specified region of the memory object will be
+copied to this address space; changes made to this object by other tasks
+will not be visible in this mapping, and changes made in this mapping
+will not be visible to others (or returned to the memory object).
+
+The @code{vm_map} call returns once the mapping is established.
+Completion of the call does not require any action on the part of the
+memory manager.
+
+Warning: Only memory objects that are provided by bona fide memory
+managers should be used in the @code{vm_map} call. A memory manager
+must implement the memory object interface described elsewhere in this
+manual. If other ports are used, a thread that accesses the mapped
+virtual memory may become permanently hung or may receive a memory
+exception.
+
+@var{target_task} is the task to be affected. The starting address is
+@var{address}. If the @var{anywhere} option is used, this address is
+ignored. The address actually allocated will be returned in
+@var{address}. @var{size} is the number of bytes to allocate (rounded by
+the system in a machine dependent way). The alignment and maximum address
+restrictions are specified by @var{mask}. Bits asserted in this mask must not be
+asserted in the address returned. If @var{anywhere} is set, the kernel
+should find and allocate any region of the specified size, and return
+the address of the resulting region in @var{address}.
+
+@var{memory_object} is the port that represents the memory object: used
+by user tasks in @code{vm_map}; used by the make requests for data or
+other management actions. If this port is @code{MEMORY_OBJECT_NULL},
+then zero-filled memory is allocated instead. Within a memory object,
+@var{offset} specifies an offset in bytes. This must be page aligned.
+If @var{copy} is set, the range of the memory object should be copied to
+the target task, rather than mapped read-write.
+
+The function returns @code{KERN_SUCCESS} if the object is mapped,
+@code{KERN_NO_SPACE} if no unused region of the task's virtual address
+space that meets the address, size, and alignment criteria could be
+found, and @code{KERN_INVALID_ARGUMENT} if an invalid argument was provided.
+@end deftypefun
+
+
+@node Memory Statistics
+@section Memory Statistics
+
+@deftp {Data type} vm_statistics_data_t
+This structure is returned in @var{vm_stats} by the @code{vm_statistics}
+function and provides virtual memory statistics for the system. It has
+the following members:
+
+@table @code
+@item long pagesize
+The page size in bytes.
+
+@item long free_count
+The number of free pages.
+
+@item long active_count
+The umber of active pages.
+
+@item long inactive_count
+The number of inactive pages.
+
+@item long wire_count
+The number of pages wired down.
+
+@item long zero_fill_count
+The number of zero filled pages.
+
+@item long reactivations
+The number of reactivated pages.
+
+@item long pageins
+The number of pageins.
+
+@item long pageouts
+The number of pageouts.
+
+@item long faults
+The number of faults.
+
+@item long cow_faults
+The number of copy-on-writes.
+
+@item long lookups
+The number of object cache lookups.
+
+@item long hits
+The number of object cache hits.
+@end table
+@end deftp
+
+@deftypefun kern_return_t vm_statistics (@w{vm_task_t @var{target_task}}, @w{vm_statistics_data_t *@var{vm_stats}})
+The function @code{vm_statistics} returns the statistics about the
+kernel's use of virtual memory since the kernel was booted.
+@code{pagesize} can also be found as a global variable
+@code{vm_page_size} which is set at task initialization and remains
+constant for the life of the task.
+@end deftypefun
+
+
+@node Memory physical addresses
+@section Memory physical addresses
+
+@deftypefun kern_return_t vm_pages_phys (@w{host_t @var{host}}, @w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}}, @w{rpc_phys_addr_array_t *@var{pages}, @w{mach_msg_type_number_t *@var{pagesCnt}}})
+The function @code{vm_pages_phys} retrieves the physical addresses of the
+specified region (@var{size} bytes starting from @var{address}) of
+@var{target_task}'s virtual address space.
+
+Both @var{address} and @var{size} have to be aligned on @code{vm_page_size}.
+
+@var{pages} is an array of @code{rpc_phys_addr_array_t} that is supplied by the
+caller and returned filled with the physical page numbers. @var{pagesCnt} is
+supplied as the maximum number of elements in the @var{pages} array. On
+return, it contains the actual number of integers in @var{pages}.
+@end deftypefun
+
+
+@node External Memory Management
+@chapter External Memory Management
+
+@menu
+* Memory Object Server:: The basics of external memory management.
+* Memory Object Creation:: How new memory objects are created.
+* Memory Object Termination:: How memory objects are terminated.
+* Memory Objects and Data:: Data transfer to and from memory objects.
+* Memory Object Locking:: How memory objects are locked.
+* Memory Object Attributes:: Manipulating attributes of memory objects.
+* Default Memory Manager:: Setting and using the default memory manager.
+@end menu
+
+
+@node Memory Object Server
+@section Memory Object Server
+
+@deftypefun boolean_t memory_object_server (@w{msg_header_t *@var{in_msg}}, @w{msg_header_t *@var{out_msg}})
+@deftypefunx boolean_t memory_object_default_server (@w{msg_header_t *@var{in_msg}}, @w{msg_header_t *@var{out_msg}})
+@deftypefunx boolean_t seqnos_memory_object_server (@w{msg_header_t *@var{in_msg}}, @w{msg_header_t *@var{out_msg}})
+@deftypefunx boolean_t seqnos_memory_object_default_server (@w{msg_header_t *@var{in_msg}}, @w{msg_header_t *@var{out_msg}})
+A memory manager is a server task that responds to specific messages
+from the kernel in order to handle memory management functions for the
+kernel.
+
+In order to isolate the memory manager from the specifics of message
+formatting, the remote procedure call generator produces a procedure,
+@code{memory_object_server}, to handle a received message. This
+function does all necessary argument handling, and actually calls one of
+the following functions: @code{memory_object_init},
+@code{memory_object_data_return},
+@code{memory_object_data_request}, @code{memory_object_data_unlock},
+@code{memory_object_lock_completed}, @code{memory_object_copy},
+@code{memory_object_terminate}. The @strong{default memory manager} may
+get two additional requests from the kernel: @code{memory_object_create}
+and @code{memory_object_data_initialize}. The remote procedure call
+generator produces a procedure @code{memory_object_default_server} to
+handle those functions specific to the default memory manager.
+
+The @code{seqnos_memory_object_server} and
+@code{seqnos_memory_object_default_server} differ from
+@code{memory_object_server} and @code{memory_object_default_server} in
+that they supply message sequence numbers to the server interfaces.
+They call the @code{seqnos_memory_object_*} functions, which complement
+the @code{memory_object_*} set of functions.
+
+The return value from the @code{memory_object_server} function indicates
+that the message was appropriate to the memory management interface
+(returning @code{TRUE}), or that it could not handle this message
+(returning @code{FALSE}).
+
+The @var{in_msg} argument is the message that has been received from the
+kernel. The @var{out_msg} is a reply message, but this is not used for
+this server.
+
+The function returns @code{TRUE} to indicate that the message in
+question was applicable to this interface, and that the appropriate
+routine was called to interpret the message. It returns @code{FALSE} to
+indicate that the message did not apply to this interface, and that no
+other action was taken.
+@end deftypefun
+
+
+@node Memory Object Creation
+@section Memory Object Creation
+
+@deftypefun kern_return_t memory_object_init (@w{memory_object_t @var{memory_object}}, @w{memory_object_control_t @var{memory_control}}, @w{memory_object_name_t @var{memory_object_name}}, @w{vm_size_t @var{memory_object_page_size}})
+@deftypefunx kern_return_t seqnos_memory_object_init (@w{memory_object_t @var{memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{memory_object_control_t @var{memory_control}}, @w{memory_object_name_t @var{memory_object_name}}, @w{vm_size_t @var{memory_object_page_size}})
+The function @code{memory_object_init} serves as a notification that the
+kernel has been asked to map the given memory object into a task's
+virtual address space. Additionally, it provides a port on which the
+memory manager may issue cache management requests, and a port which the
+kernel will use to name this data region. In the event that different
+each will perform a @code{memory_object_init} call with new request and
+name ports. The virtual page size that is used by the calling kernel is
+included for planning purposes.
+
+When the memory manager is prepared to accept requests for data for
+this object, it must call @code{memory_object_ready}.
+Otherwise the kernel will not process requests on this object. To
+reject all mappings of this object, the memory manager may use
+@code{memory_object_destroy}.
+
+The argument @var{memory_object} is the port that represents the memory
+object data, as supplied to the kernel in a @code{vm_map} call.
+@var{memory_control} is the request port to which a response is
+requested. (In the event that a memory object has been supplied to more
+than one the kernel that has made the request.)
+@var{memory_object_name} is a port used by the kernel to refer to the
+memory object data in response to @code{vm_region} calls.
+@code{memory_object_page_size} is the page size to be used by this
+kernel. All data sizes in calls involving this kernel must be an
+integral multiple of the page size. Note that different kernels,
+indicated by a different @code{memory_control}, may have different page
+sizes.
+
+The function should return @code{KERN_SUCCESS}, but since this routine
+is called by the kernel, which does not wait for a reply message, this
+value is ignored.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_ready (@w{memory_object_control_t @var{memory_control}}, @w{boolean_t @var{may_cache_object}}, @w{memory_object_copy_strategy_t @var{copy_strategy}})
+The function @code{memory_object_ready} informs the kernel that the
+memory manager is ready to receive data or unlock requests on behalf of
+the clients. The argument @var{memory_control} is the port, provided by
+the kernel in a @code{memory_object_init} call, to which cache
+management requests may be issued. If @var{may_cache_object} is set,
+the kernel may keep data associated with this memory object, even after
+virtual memory references to it are gone.
+
+@var{copy_strategy} tells how the kernel should copy regions of the
+associated memory object. There are three possible caching strategies:
+@code{MEMORY_OBJECT_COPY_NONE} which specifies that nothing special
+should be done when data in the object is copied;
+@code{MEMORY_OBJECT_COPY_CALL} which specifies that the memory manager
+should be notified via a @code{memory_object_copy} call before any part
+of the object is copied; and @code{MEMORY_OBJECT_COPY_DELAY} which
+guarantees that the memory manager does not externally modify the data
+so that the kernel can use its normal copy-on-write algorithms.
+@code{MEMORY_OBJECT_COPY_DELAY} is the strategy most commonly used.
+
+This routine does not receive a reply message (and consequently has no
+return value), so only message transmission errors apply.
+@end deftypefun
+
+
+@node Memory Object Termination
+@section Memory Object Termination
+
+@deftypefun kern_return_t memory_object_terminate (@w{memory_object_t @var{memory_object}}, @w{memory_object_control_t @var{memory_control}}, @w{memory_object_name_t @var{memory_object_name}})
+@deftypefunx kern_return_t seqnos_memory_object_terminate (@w{memory_object_t @var{memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{memory_object_control_t @var{memory_control}}, @w{memory_object_name_t @var{memory_object_name}})
+The function @code{memory_object_terminate} indicates that the kernel
+has completed its use of the given memory object. All rights to the
+memory object control and name ports are included, so that the memory
+manager can destroy them (using @code{mach_port_deallocate}) after doing
+appropriate bookkeeping. The kernel will terminate a memory object only
+after all address space mappings of that memory object have been
+deallocated, or upon explicit request by the memory manager.
+
+The argument @var{memory_object} is the port that represents the memory
+object data, as supplied to the kernel in a @code{vm_map} call.
+@var{memory_control} is the request port to which a response is
+requested. (In the event that a memory object has been supplied to more
+than one the kernel that has made the request.)
+@var{memory_object_name} is a port used by the kernel to refer to the
+memory object data in response to @code{vm_region} calls.
+
+The function should return @code{KERN_SUCCESS}, but since this routine
+is called by the kernel, which does not wait for a reply message, this
+value is ignored.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_destroy (@w{memory_object_control_t @var{memory_control}}, @w{kern_return_t @var{reason}})
+The function @code{memory_object_destroy} tells the kernel to shut down
+the memory object. As a result of this call the kernel will no longer
+support paging activity or any @code{memory_object} calls on this
+object, and all rights to the memory object port, the memory control
+port and the memory name port will be returned to the memory manager in
+a memory_object_terminate call. If the memory manager is concerned that
+any modified cached data be returned to it before the object is
+terminated, it should call @code{memory_object_lock_request} with
+@var{should_flush} set and a lock value of @code{VM_PROT_WRITE} before
+making this call.
+
+The argument @var{memory_control} is the port, provided by the kernel in
+a @code{memory_object_init} call, to which cache management requests may
+be issued. @var{reason} is an error code indicating why the object
+must be destroyed.
+@c The error code is currently ignored.
+
+This routine does not receive a reply message (and consequently has no
+return value), so only message transmission errors apply.
+@end deftypefun
+
+
+@node Memory Objects and Data
+@section Memory Objects and Data
+
+@deftypefun kern_return_t memory_object_data_return (@w{memory_object_t @var{memory_object}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_offset_t @var{data}}, @w{vm_size_t @var{data_count}}, @w{boolean_t @var{dirty}}, @w{boolean_t @var{kernel_copy}})
+@deftypefunx kern_return_t seqnos_memory_object_data_return (@w{memory_object_t @var{memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_offset_t @var{data}}, @w{vm_size_t @var{data_count}}, @w{boolean_t @var{dirty}}, @w{boolean_t @var{kernel_copy}})
+The function @code{memory_object_data_return} provides the memory
+manager with data that has been modified while cached in physical
+memory. Once the memory manager no longer needs this data (e.g., it has
+been written to another storage medium), it should be deallocated using
+@code{vm_deallocate}.
+
+The argument @var{memory_object} is the port that represents the memory
+object data, as supplied to the kernel in a @code{vm_map} call.
+@var{memory_control} is the request port to which a response is
+requested. (In the event that a memory object has been supplied to more
+than one the kernel that has made the request.) @var{offset} is the
+offset within a memory object to which this call refers. This will be
+page aligned. @var{data} is the data which has been modified while
+cached in physical memory. @var{data_count} is the amount of data to be
+written, in bytes. This will be an integral number of memory object
+pages.
+
+The kernel will also use this call to return precious pages. If an
+unmodified precious age is returned, @var{dirty} is set to @code{FALSE},
+otherwise it is @code{TRUE}. If @var{kernel_copy} is @code{TRUE}, the
+kernel kept a copy of the page. Precious data remains precious if the
+kernel keeps a copy. The indication that the kernel kept a copy is only
+a hint if the data is not precious; the cleaned copy may be discarded
+without further notifying the manager.
+
+The function should return @code{KERN_SUCCESS}, but since this routine
+is called by the kernel, which does not wait for a reply message, this
+value is ignored.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_data_request (@w{memory_object_t @var{memory_object}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_offset_t @var{length}}, @w{vm_prot_t @var{desired_access}})
+@deftypefunx kern_return_t seqnos_memory_object_data_request (@w{memory_object_t @var{memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_offset_t @var{length}}, @w{vm_prot_t @var{desired_access}})
+The function @code{memory_object_data_request} is a request for data
+from the specified memory object, for at least the access specified.
+The memory manager is expected to return at least the specified data,
+with as much access as it can allow, using
+@code{memory_object_data_supply}. If the memory manager is unable to
+provide the data (for example, because of a hardware error), it may use
+the @code{memory_object_data_error} call. The
+@code{memory_object_data_unavailable} call may be used to tell the
+kernel to supply zero-filled memory for this region.
+
+The argument @var{memory_object} is the port that represents the memory
+object data, as supplied to the kernel in a @code{vm_map} call.
+@var{memory_control} is the request port to which a response is
+requested. (In the event that a memory object has been supplied to more
+than one the kernel that has made the request.) @var{offset} is the
+offset within a memory object to which this call refers. This will be
+page aligned. @var{length} is the number of bytes of data, starting at
+@var{offset}, to which this call refers. This will be an integral
+number of memory object pages. @var{desired_access} is a protection
+value describing the memory access modes which must be permitted on the
+specified cached data. One or more of: @code{VM_PROT_READ},
+@code{VM_PROT_WRITE} or @code{VM_PROT_EXECUTE}.
+
+The function should return @code{KERN_SUCCESS}, but since this routine
+is called by the kernel, which does not wait for a reply message, this
+value is ignored.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_data_supply (@w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_offset_t @var{data}}, @w{vm_size_t @var{data_count}}, @w{vm_prot_t @var{lock_value}}, @w{boolean_t @var{precious}}, @w{mach_port_t @var{reply}})
+The function @code{memory_object_data_supply} supplies the kernel with
+data for the specified memory object. Ordinarily, memory managers
+should only provide data in response to @code{memory_object_data_request}
+calls from the kernel (but they may provide data in advance as desired).
+When data already held by this kernel is provided again, the new data is
+ignored. The kernel may not provide any data (or protection)
+consistency among pages with different virtual page alignments within
+the same object.
+
+The argument @var{memory_control} is the port, provided by the kernel in
+a @code{memory_object_init} call, to which cache management requests may
+be issued. @var{offset} is an offset within a memory object in bytes.
+This must be page aligned. @var{data} is the data that is being
+provided to the kernel. This is a pointer to the data.
+@var{data_count} is the amount of data to be provided. Only whole
+virtual pages of data can be accepted; partial pages will be discarded.
+
+@var{lock_value} is a protection value indicating those forms of access
+that should @strong{not} be permitted to the specified cached data. The
+lock values must be one or more of the set: @code{VM_PROT_NONE},
+@code{VM_PROT_READ}, @code{VM_PROT_WRITE}, @code{VM_PROT_EXECUTE} and
+@code{VM_PROT_ALL} as defined in @file{mach/vm_prot.h}.
+
+If @var{precious} is @code{FALSE}, the kernel treats the data as a
+temporary and may throw it away if it hasn't been changed. If the
+@var{precious} value is @code{TRUE}, the kernel treats its copy as a
+data repository and promises to return it to the manager; the manager
+may tell the kernel to throw it away instead by flushing and not
+cleaning the data (see @code{memory_object_lock_request}).
+
+If @var{reply_to} is not @code{MACH_PORT_NULL}, the kernel will send a
+completion message to the provided port (see
+@code{memory_object_supply_completed}).
+
+This routine does not receive a reply message (and consequently has no
+return value), so only message transmission errors apply.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_supply_completed (@w{memory_object_t @var{memory_object}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{length}}, @w{kern_return_t @var{result}}, @w{vm_offset_t @var{error_offset}})
+@deftypefunx kern_return_t seqnos_memory_object_supply_completed (@w{memory_object_t @var{memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{length}}, @w{kern_return_t @var{result}}, @w{vm_offset_t @var{error_offset}})
+The function @code{memory_object_supply_completed} indicates that a
+previous @code{memory_object_data_supply} has been completed. Note that
+this call is made on whatever port was specified in the
+@code{memory_object_data_supply} call; that port need not be the memory
+object port itself. No reply is expected after this call.
+
+The argument @var{memory_object} is the port that represents the memory
+object data, as supplied to the kernel in a @code{vm_map} call.
+@var{memory_control} is the request port to which a response is
+requested. (In the event that a memory object has been supplied to more
+than one the kernel that has made the request.) @var{offset} is the
+offset within a memory object to which this call refers. @var{length}
+is the length of the data covered by the lock request. The @var{result}
+parameter indicates what happened during the supply. If it is not
+@code{KERN_SUCCESS}, then @var{error_offset} identifies the first offset
+at which a problem occurred. The pagein operation stopped at this
+point. Note that the only failures reported by this mechanism are
+@code{KERN_MEMORY_PRESENT}. All other failures (invalid argument, error
+on pagein of supplied data in manager's address space) cause the entire
+operation to fail.
+
+
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_data_error (@w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{size}}, @w{kern_return_t @var{reason}})
+The function @code{memory_object_data_error} indicates that the memory
+manager cannot return the data requested for the given region,
+specifying a reason for the error. This is typically used when a
+hardware error is encountered.
+
+The argument @var{memory_control} is the port, provided by the kernel in
+a @code{memory_object_init} call, to which cache management requests may
+be issued. @var{offset} is an offset within a memory object in bytes.
+This must be page aligned. @var{data} is the data that is being
+provided to the kernel. This is a pointer to the data. @var{size} is
+the amount of cached data (starting at @var{offset}) to be handled.
+This must be an integral number of the memory object page size.
+@var{reason} is an error code indicating what type of error occurred.
+@c The error code is currently ignored.
+
+This routine does not receive a reply message (and consequently has no
+return value), so only message transmission errors apply.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_data_unavailable (@w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{size}}, @w{kern_return_t @var{reason}})
+The function @code{memory_object_data_unavailable} indicates that the
+memory object does not have data for the given region and that the
+kernel should provide the data for this range. The memory manager may
+use this call in three different situations.
+
+@enumerate
+@item
+The object was created by @code{memory_object_create} and the kernel has
+not yet provided data for this range (either via a
+@code{memory_object_data_initialize}, or
+a @code{memory_object_data_return} for the object.
+
+@item
+The object was created by an @code{memory_object_data_copy} and the
+kernel should copy this region from the original memory object.
+
+@item
+The object is a normal user-created memory object and the kernel should
+supply unlocked zero-filled pages for the range.
+@end enumerate
+
+The argument @var{memory_control} is the port, provided by the kernel in
+a @code{memory_object_init} call, to which cache management requests may
+be issued. @var{offset} is an offset within a memory object, in bytes.
+This must be page aligned. @var{size} is the amount of cached data
+(starting at @var{offset}) to be handled. This must be an integral
+number of the memory object page size.
+
+This routine does not receive a reply message (and consequently has no
+return value), so only message transmission errors apply.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_copy (@w{memory_object_t @var{old_memory_object}}, @w{memory_object_control_t @var{old_memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{length}}, @w{memory_object_t @var{new_memory_object}})
+@deftypefunx kern_return_t seqnos_memory_object_copy (@w{memory_object_t @var{old_memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{memory_object_control_t @var{old_memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{length}}, @w{memory_object_t @var{new_memory_object}})
+The function @code{memory_object_copy} indicates that a copy has been
+made of the specified range of the given original memory object. This
+call includes only the new memory object itself; a
+@code{memory_object_init} call will be made on the new memory object
+after the currently cached pages of the original object are prepared.
+After the memory manager receives the init call, it must reply with the
+@code{memory_object_ready} call to assert the "ready" attribute. The
+kernel will use the new memory object, control and name ports to refer
+to the new copy.
+
+This call is made when the original memory object had the caching
+parameter set to @code{MEMORY_OBJECT_COPY_CALL} and a user of the object
+has asked the kernel to copy it.
+
+Cached pages from the original memory object at the time of the copy
+operation are handled as follows: Readable pages may be silently copied
+to the new memory object (with all access permissions). Pages not
+copied are locked to prevent write access.
+
+The new memory object is @strong{temporary}, meaning that the memory
+manager should not change its contents or allow the memory object to be
+mapped in another client. The memory manager may use the
+@code{memory_object_data_unavailable} call to indicate that the
+appropriate pages of the original memory object may be used to fulfill
+the data request.
+
+The argument @var{old_memory_object} is the port that represents the old
+memory object data. @var{old_memory_control} is the kernel port for the
+old object. @var{offset} is the offset within a memory object to which
+this call refers. This will be page aligned. @var{length} is the
+number of bytes of data, starting at @var{offset}, to which this call
+refers. This will be an integral number of memory object pages.
+@var{new_memory_object} is a new memory object created by the kernel;
+see synopsis for further description. Note that all port rights
+(including receive rights) are included for the new memory object.
+
+The function should return @code{KERN_SUCCESS}, but since this routine
+is called by the kernel, which does not wait for a reply message, this
+value is ignored.
+@end deftypefun
+
+
+@node Memory Object Locking
+@section Memory Object Locking
+
+@deftypefun kern_return_t memory_object_lock_request (@w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{size}}, @w{memory_object_return_t @var{should_clean}}, @w{boolean_t @var{should_flush}}, @w{vm_prot_t @var{lock_value}}, @w{mach_port_t @var{reply_to}})
+The function @code{memory_object_lock_request} allows a memory manager
+to make cache management requests. As specified in arguments to the
+call, the kernel will:
+@itemize
+@item
+clean (i.e., write back using @code{memory_object_data_supply} any cached data which has been modified
+since the last time it was written
+
+@item
+flush (i.e., remove any uses of) that data from memory
+
+@item
+lock (i.e., prohibit the specified uses of) the cached data
+@end itemize
+
+Locks applied to cached data are not cumulative; new lock values
+override previous ones. Thus, data may also be unlocked using this
+primitive. The lock values must be one or more of the following values:
+@code{VM_PROT_NONE}, @code{VM_PROT_READ}, @code{VM_PROT_WRITE},
+@code{VM_PROT_EXECUTE} and @code{VM_PROT_ALL} as defined in
+@file{mach/vm_prot.h}.
+
+Only data which is cached at the time of this call is affected. When a
+running thread requires a prohibited access to cached data, the kernel
+will issue a @code{memory_object_data_unlock} call specifying the forms
+of access required.
+
+Once all of the actions requested by this call have been completed, the
+kernel issues a @code{memory_object_lock_completed} call on the
+specified reply port.
+
+The argument @var{memory_control} is the port, provided by the kernel in
+a @code{memory_object_init} call, to which cache management requests may
+be issued. @var{offset} is an offset within a memory object, in bytes.
+This must be page aligned. @var{size} is the amount of cached data
+(starting at @var{offset}) to be handled. This must be an integral
+number of the memory object page size. If @var{should_clean} is set,
+modified data should be written back to the memory manager. If
+@var{should_flush} is set, the specified cached data should be
+invalidated, and all uses of that data should be revoked.
+@var{lock_value} is a protection value indicating those forms of access
+that should @strong{not} be permitted to the specified cached data.
+@var{reply_to} is a port on which a @code{memory_object_lock_completed}
+call should be issued, or @code{MACH_PORT_NULL} if no acknowledgement is
+desired.
+
+This routine does not receive a reply message (and consequently has no
+return value), so only message transmission errors apply.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_lock_completed (@w{memory_object_t @var{memory_object}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{length}})
+@deftypefunx kern_return_t seqnos_memory_object_lock_completed (@w{memory_object_t @var{memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{length}})
+The function @code{memory_object_lock_completed} indicates that a
+previous @code{memory_object_lock_request} has been completed. Note
+that this call is made on whatever port was specified in the
+@code{memory_object_lock_request} call; that port need not be the memory
+object port itself. No reply is expected after this call.
+
+The argument @var{memory_object} is the port that represents the memory
+object data, as supplied to the kernel in a @code{vm_map} call.
+@var{memory_control} is the request port to which a response is
+requested. (In the event that a memory object has been supplied to more
+than one the kernel that has made the request.) @var{offset} is the
+offset within a memory object to which this call refers. @var{length}
+is the length of the data covered by the lock request.
+
+The function should return @code{KERN_SUCCESS}, but since this routine
+is called by the kernel, which does not wait for a reply message, this
+value is ignored.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_data_unlock (@w{memory_object_t @var{memory_object}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{length}}, @w{vm_prot_t @var{desired_access}})
+@deftypefunx kern_return_t seqnos_memory_object_data_unlock (@w{memory_object_t @var{memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{length}}, @w{vm_prot_t @var{desired_access}})
+The function @code{memory_object_data_unlock} is a request that the
+memory manager permit at least the desired access to the specified data
+cached by the kernel. A call to @code{memory_object_lock_request} is
+expected in response.
+
+The argument @var{memory_object} is the port that represents the memory
+object data, as supplied to the kernel in a @code{vm_map} call.
+@var{memory_control} is the request port to which a response is
+requested. (In the event that a memory object has been supplied to more
+than one the kernel that has made the request.) @var{offset} is the
+offset within a memory object to which this call refers. This will be
+page aligned. @var{length} is the number of bytes of data, starting at
+@var{offset}, to which this call refers. This will be an integral
+number of memory object pages. @var{desired_access} a protection value
+describing the memory access modes which must be permitted on the
+specified cached data. One or more of: @code{VM_PROT_READ},
+@code{VM_PROT_WRITE} or @code{VM_PROT_EXECUTE}.
+
+The function should return @code{KERN_SUCCESS}, but since this routine
+is called by the kernel, which does not wait for a reply message, this
+value is ignored.
+@end deftypefun
+
+
+@node Memory Object Attributes
+@section Memory Object Attributes
+
+@deftypefun kern_return_t memory_object_get_attributes (@w{memory_object_control_t @var{memory_control}}, @w{boolean_t *@var{object_ready}}, @w{boolean_t *@var{may_cache_object}}, @w{memory_object_copy_strategy_t *@var{copy_strategy}})
+The function @code{memory_object_get_attribute} retrieves the current
+attributes associated with the memory object.
+
+The argument @var{memory_control} is the port, provided by the kernel in
+a @code{memory_object_init} call, to which cache management requests may
+be issued. If @var{object_ready} is set, the kernel may issue new data
+and unlock requests on the associated memory object. If
+@var{may_cache_object} is set, the kernel may keep data associated with
+this memory object, even after virtual memory references to it are gone.
+@var{copy_strategy} tells how the kernel should copy regions of the
+associated memory object.
+
+This routine does not receive a reply message (and consequently has no
+return value), so only message transmission errors apply.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_change_attributes (@w{memory_object_control_t @var{memory_control}}, @w{boolean_t @var{may_cache_object}}, @w{memory_object_copy_strategy_t @var{copy_strategy}}, @w{mach_port_t @var{reply_to}})
+The function @code{memory_object_change_attribute} informs the kernel
+that the memory manager is ready to receive data or unlock requests on
+behalf of the clients and sets performance-related attributes for the
+specified memory object. If the caching attribute is asserted, the
+kernel is permitted (and encouraged) to maintain cached data for this
+memory object even after no virtual address space contains this data.
+
+There are three possible caching strategies:
+@code{MEMORY_OBJECT_COPY_NONE} which specifies that nothing special
+should be done when data in the object is copied;
+@code{MEMORY_OBJECT_COPY_CALL} which specifies that the memory manager
+should be notified via a @code{memory_object_copy} call before any part
+of the object is copied; and @code{MEMORY_OBJECT_COPY_DELAY} which
+guarantees that the memory manager does not externally modify the data
+so that the kernel can use its normal copy-on-write algorithms.
+@code{MEMORY_OBJECT_COPY_DELAY} is the strategy most commonly used.
+
+The argument @var{memory_control} is the port, provided by the kernel in
+a @code{memory_object_init} call, to which cache management requests may
+be issued. If @var{may_cache_object} is set, the kernel may keep data
+associated with this memory object, even after virtual memory references
+to it are gone. @var{copy_strategy} tells how the kernel should copy
+regions of the associated memory object. @var{reply_to} is a port on
+which a @code{memory_object_change_completed} call will be issued upon
+completion of the attribute change, or @code{MACH_PORT_NULL} if no
+acknowledgement is desired.
+
+This routine does not receive a reply message (and consequently has no
+return value), so only message transmission errors apply.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_change_completed (@w{memory_object_t @var{memory_object}}, @w{boolean_t @var{may_cache_object}}, @w{memory_object_copy_strategy_t @var{copy_strategy}})
+@deftypefunx kern_return_t seqnos_memory_object_change_completed (@w{memory_object_t @var{memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{boolean_t @var{may_cache_object}}, @w{memory_object_copy_strategy_t @var{copy_strategy}})
+The function @code{memory_object_change_completed} indicates the
+completion of an attribute change call.
+
+@c Warning: This routine does NOT contain a memory_object_control_t because
+@c the memory_object_change_attributes call may cause memory object
+@c termination (by uncaching the object). This would yield an invalid
+@c port.
+@end deftypefun
+
+
+@node Default Memory Manager
+@section Default Memory Manager
+
+@deftypefun kern_return_t vm_set_default_memory_manager (@w{host_t @var{host}}, @w{mach_port_t *@var{default_manager}})
+The function @code{vm_set_default_memory_manager} sets the kernel's
+default memory manager. It sets the port to which newly-created
+temporary memory objects are delivered by @code{memory_object_create} to
+the host. The old memory manager port is returned. If
+@var{default_manager} is @code{MACH_PORT_NULL} then this routine just returns
+the current default manager port without changing it.
+
+The argument @var{host} is a task port to the kernel whose default
+memory manager is to be changed. @var{default_manager} is an in/out
+parameter. As input, @var{default_manager} is the port that the new
+memory manager is listening on for @code{memory_object_create} calls.
+As output, it is the old default memory manager's port.
+
+The function returns @code{KERN_SUCCESS} if the new memory manager is
+installed, and @code{KERN_INVALID_ARGUMENT} if this task does not have
+the privileges required for this call.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_create (@w{memory_object_t @var{old_memory_object}}, @w{memory_object_t @var{new_memory_object}}, @w{vm_size_t @var{new_object_size}}, @w{memory_object_control_t @var{new_control}}, @w{memory_object_name_t @var{new_name}}, @w{vm_size_t @var{new_page_size}})
+@deftypefunx kern_return_t seqnos_memory_object_create (@w{memory_object_t @var{old_memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{memory_object_t @var{new_memory_object}}, @w{vm_size_t @var{new_object_size}}, @w{memory_object_control_t @var{new_control}}, @w{memory_object_name_t @var{new_name}}, @w{vm_size_t @var{new_page_size}})
+The function @code{memory_object_create} is a request that the given
+memory manager accept responsibility for the given memory object created
+by the kernel. This call will only be made to the system
+@strong{default memory manager}. The memory object in question
+initially consists of zero-filled memory; only memory pages that are
+actually written will ever be provided to
+@code{memory_object_data_request} calls, the default memory manager must
+use @code{memory_object_data_unavailable} for any pages that have not
+previously been written.
+
+No reply is expected after this call. Since this call is directed to
+the default memory manager, the kernel assumes that it will be ready to
+handle data requests to this object and does not need the confirmation
+of a @code{memory_object_ready} call.
+
+The argument @var{old_memory_object} is a memory object provided by the
+default memory manager on which the kernel can make
+@code{memory_object_create} calls. @var{new_memory_object} is a new
+memory object created by the kernel; see synopsis for further
+description. Note that all port rights (including receive rights) are
+included for the new memory object. @var{new_object_size} is the
+maximum size of the new object. @var{new_control} is a port, created by
+the kernel, on which a memory manager may issue cache management
+requests for the new object. @var{new_name} a port used by the kernel
+to refer to the new memory object data in response to @code{vm_region}
+calls. @var{new_page_size} is the page size to be used by this kernel.
+All data sizes in calls involving this kernel must be an integral
+multiple of the page size. Note that different kernels, indicated by
+different a @code{memory_control}, may have different page sizes.
+
+The function should return @code{KERN_SUCCESS}, but since this routine
+is called by the kernel, which does not wait for a reply message, this
+value is ignored.
+@end deftypefun
+
+@deftypefun kern_return_t memory_object_data_initialize (@w{memory_object_t @var{memory_object}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_offset_t @var{data}}, @w{vm_size_t @var{data_count}})
+@deftypefunx kern_return_t seqnos_memory_object_data_initialize (@w{memory_object_t @var{memory_object}}, @w{mach_port_seqno_t @var{seqno}}, @w{memory_object_control_t @var{memory_control}}, @w{vm_offset_t @var{offset}}, @w{vm_offset_t @var{data}}, @w{vm_size_t @var{data_count}})
+The function @code{memory_object_data_initialize} provides the memory
+manager with initial data for a kernel-created memory object. If the
+memory manager already has been supplied data (by a previous
+@code{memory_object_data_initialize}, or
+@code{memory_object_data_return}), then this data should be ignored.
+Otherwise, this call behaves exactly as does
+@code{memory_object_data_return} on memory objects created by the kernel
+via @code{memory_object_create} and thus will only be made to default
+memory managers. This call will not be made on objects created via
+@code{memory_object_copy}.
+
+The argument @var{memory_object} the port that represents the memory
+object data, as supplied by the kernel in a @code{memory_object_create}
+call. @var{memory_control} is the request port to which a response is
+requested. (In the event that a memory object has been supplied to more
+than one the kernel that has made the request.) @var{offset} is the
+offset within a memory object to which this call refers. This will be
+page aligned. @var{data} is the data which has been modified while
+cached in physical memory. @var{data_count} is the amount of data to be
+written, in bytes. This will be an integral number of memory object
+pages.
+
+The function should return @code{KERN_SUCCESS}, but since this routine
+is called by the kernel, which does not wait for a reply message, this
+value is ignored.
+@end deftypefun
+
+
+@node Threads and Tasks
+@chapter Threads and Tasks
+
+@menu
+* Thread Interface:: Manipulating threads.
+* Task Interface:: Manipulating tasks.
+* Profiling:: Profiling threads and tasks.
+@end menu
+
+
+@node Thread Interface
+@section Thread Interface
+
+@cindex thread port
+@cindex port representing a thread
+@deftp {Data type} thread_t
+This is a @code{mach_port_t} and used to hold the port name of a
+thread port that represents the thread. Manipulations of the thread are
+implemented as remote procedure calls to the thread port. A thread can
+get a port to itself with the @code{mach_thread_self} system call.
+@end deftp
+
+@menu
+* Thread Creation:: Creating new threads.
+* Thread Termination:: Terminating existing threads.
+* Thread Information:: How to get informations on threads.
+* Thread Settings:: How to set threads related informations.
+* Thread Execution:: How to control the thread's machine state.
+* Scheduling:: Operations on thread scheduling.
+* Thread Special Ports:: How to handle the thread's special ports.
+* Exceptions:: Managing exceptions.
+@end menu
+
+
+@node Thread Creation
+@subsection Thread Creation
+
+@deftypefun kern_return_t thread_create (@w{task_t @var{parent_task}}, @w{thread_t *@var{child_thread}})
+The function @code{thread_create} creates a new thread within the task
+specified by @var{parent_task}. The new thread has no processor state,
+and has a suspend count of 1. To get a new thread to run, first
+@code{thread_create} is called to get the new thread's identifier,
+(@var{child_thread}). Then @code{thread_set_state} is called to set a
+processor state, and finally @code{thread_resume} is called to get the
+thread scheduled to execute.
+
+When the thread is created send rights to its thread kernel port are
+given to it and returned to the caller in @var{child_thread}. The new
+thread's exception port is set to @code{MACH_PORT_NULL}.
+
+The function returns @code{KERN_SUCCESS} if a new thread has been
+created, @code{KERN_INVALID_ARGUMENT} if @var{parent_task} is not a
+valid task and @code{KERN_RESOURCE_SHORTAGE} if some critical kernel
+resource is not available.
+@end deftypefun
+
+
+@node Thread Termination
+@subsection Thread Termination
+
+@deftypefun kern_return_t thread_terminate (@w{thread_t @var{target_thread}})
+The function @code{thread_terminate} destroys the thread specified by
+@var{target_thread}.
+
+The function returns @code{KERN_SUCCESS} if the thread has been killed
+and @code{KERN_INVALID_ARGUMENT} if @var{target_thread} is not a thread.
+@end deftypefun
+
+
+@node Thread Information
+@subsection Thread Information
+
+@deftypefun thread_t mach_thread_self ()
+The @code{mach_thread_self} system call returns the calling thread's
+thread port.
+
+@code{mach_thread_self} has an effect equivalent to receiving a send
+right for the thread port. @code{mach_thread_self} returns the name of
+the send right. In particular, successive calls will increase the
+calling task's user-reference count for the send right.
+
+@c author{marcus}
+As a special exception, the kernel will overrun the user reference count
+of the thread name port, so that this function can not fail for that
+reason. Because of this, the user should not deallocate the port right
+if an overrun might have happened. Otherwise the reference count could
+drop to zero and the send right be destroyed while the user still
+expects to be able to use it. As the kernel does not make use of the
+number of extant send rights anyway, this is safe to do (the thread port
+itself is not destroyed, even when there are no send rights anymore).
+
+The function returns @code{MACH_PORT_NULL} if a resource shortage
+prevented the reception of the send right or if the thread port is
+currently null and @code{MACH_PORT_DEAD} if the thread port is currently
+dead.
+@end deftypefun
+
+@deftypefun kern_return_t thread_info (@w{thread_t @var{target_thread}}, @w{int @var{flavor}}, @w{thread_info_t @var{thread_info}}, @w{mach_msg_type_number_t *@var{thread_infoCnt}})
+The function @code{thread_info} returns the selected information array
+for a thread, as specified by @var{flavor}.
+
+@var{thread_info} is an array of integers that is supplied by the caller
+and returned filled with specified information. @var{thread_infoCnt} is
+supplied as the maximum number of integers in @var{thread_info}. On
+return, it contains the actual number of integers in @var{thread_info}.
+The maximum number of integers returned by any flavor is
+@code{THREAD_INFO_MAX}.
+
+The type of information returned is defined by @var{flavor}, which can
+be one of the following:
+
+@table @code
+@item THREAD_BASIC_INFO
+The function returns basic information about the thread, as defined by
+@code{thread_basic_info_t}. This includes the user and system time, the
+run state, and scheduling priority. The number of integers returned is
+@code{THREAD_BASIC_INFO_COUNT}.
+
+@item THREAD_SCHED_INFO
+The function returns information about the scheduling policy for the
+thread as defined by @code{thread_sched_info_t}. The number of integers
+returned is @code{THREAD_SCHED_INFO_COUNT}.
+@end table
+
+The function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{target_thread} is not a thread or
+@var{flavor} is not recognized. The function returns
+@code{MIG_ARRAY_TOO_LARGE} if the returned info array is too large for
+@var{thread_info}. In this case, @var{thread_info} is filled as much as
+possible and @var{thread_infoCnt} is set to the number of elements that
+would have been returned if there were enough room.
+@end deftypefun
+
+@deftp {Data type} {struct thread_basic_info}
+This structure is returned in @var{thread_info} by the
+@code{thread_info} function and provides basic information about the
+thread. You can cast a variable of type @code{thread_info_t} to a
+pointer of this type if you provided it as the @var{thread_info}
+parameter for the @code{THREAD_BASIC_INFO} flavor of @code{thread_info}.
+It has the following members:
+
+@table @code
+@item time_value_t user_time
+user run time
+
+@item time_value_t system_time
+system run time
+@item int cpu_usage
+Scaled cpu usage percentage. The scale factor is @code{TH_USAGE_SCALE}.
+
+@item int base_priority
+The base scheduling priority of the thread.
+
+@item int cur_priority
+The current scheduling priority of the thread.
+
+@item integer_t run_state
+The run state of the thread. The possible values of this field are:
+@table @code
+@item TH_STATE_RUNNING
+The thread is running normally.
+
+@item TH_STATE_STOPPED
+The thread is suspended.
+
+@item TH_STATE_WAITING
+The thread is waiting normally.
+
+@item TH_STATE_UNINTERRUPTIBLE
+The thread is in an uninterruptible wait.
+
+@item TH_STATE_HALTED
+The thread is halted at a clean point.
+@end table
+
+@item flags
+Various flags. The possible values of this field are:
+@table @code
+@item TH_FLAGS_SWAPPED
+The thread is swapped out.
+
+@item TH_FLAGS_IDLE
+The thread is an idle thread.
+@end table
+
+@item int suspend_count
+The suspend count for the thread.
+
+@item int sleep_time
+The number of seconds that the thread has been sleeping.
+
+@item time_value_t creation_time
+The time stamp of creation.
+@end table
+@end deftp
+
+@deftp {Data type} thread_basic_info_t
+This is a pointer to a @code{struct thread_basic_info}.
+@end deftp
+
+@deftp {Data type} {struct thread_sched_info}
+This structure is returned in @var{thread_info} by the
+@code{thread_info} function and provides schedule information about the
+thread. You can cast a variable of type @code{thread_info_t} to a
+pointer of this type if you provided it as the @var{thread_info}
+parameter for the @code{THREAD_SCHED_INFO} flavor of @code{thread_info}.
+It has the following members:
+
+@table @code
+@item int policy
+The scheduling policy of the thread, @ref{Scheduling Policy}.
+
+@item integer_t data
+Policy-dependent scheduling information, @ref{Scheduling Policy}.
+
+@item int base_priority
+The base scheduling priority of the thread.
+
+@item int max_priority
+The maximum scheduling priority of the thread.
+
+@item int cur_priority
+The current scheduling priority of the thread.
+
+@item int depressed
+@code{TRUE} if the thread is depressed.
+
+@item int depress_priority
+The priority the thread was depressed from.
+
+@item int last_processor
+The last processor used by the thread.
+@end table
+@end deftp
+
+@deftp {Data type} thread_sched_info_t
+This is a pointer to a @code{struct thread_sched_info}.
+@end deftp
+
+@deftypefun kern_return_t thread_set_name (@w{thread_t @var{target_thread}}, @w{const_kernel_debug_name_t @var{name}})
+
+The function @code{thread_set_name} sets the name of @var{target_thread}
+to @var{name}, truncating it if necessary.
+
+This is a debugging aid. The name is used in diagnostic messages
+printed by the kernel.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded.
+@end deftypefun
+
+
+@node Thread Settings
+@subsection Thread Settings
+
+@deftypefun kern_return_t thread_wire (@w{host_priv_t @var{host_priv}}, @w{thread_t @var{thread}}, @w{boolean_t @var{wired}})
+The function @code{thread_wire} controls the VM privilege level of the
+thread @var{thread}. A VM-privileged thread never waits inside the
+kernel for memory allocation from the kernel's free list of pages or for
+allocation of a kernel stack.
+
+Threads that are part of the default pageout path should be
+VM-privileged, to prevent system deadlocks. Threads that are not part
+of the default pageout path should not be VM-privileged, to prevent the
+kernel's free list of pages from being exhausted.
+
+The functions returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_ARGUMENT} if @var{host_priv} or @var{thread} was
+invalid.
+
+The @code{thread_wire} call is actually an RPC to @var{host_priv},
+normally a send right for a privileged host port, but potentially any
+send right. In addition to the normal diagnostic return codes from the
+call's server (normally the kernel), the call may return @code{mach_msg}
+return codes.
+@c See also: vm_wire(2), vm_set_default_memory_manager(2).
+@end deftypefun
+
+
+@node Thread Execution
+@subsection Thread Execution
+
+@deftypefun kern_return_t thread_suspend (@w{thread_t @var{target_thread}})
+Increments the thread's suspend count and prevents the thread from
+executing any more user level instructions. In this context a user
+level instruction is either a machine instruction executed in user mode
+or a system trap instruction including page faults. Thus if a thread is
+currently executing within a system trap the kernel code may continue to
+execute until it reaches the system return code or it may suspend within
+the kernel code. In either case, when the thread is resumed the system
+trap will return. This could cause unpredictable results if the user
+did a suspend and then altered the user state of the thread in order to
+change its direction upon a resume. The call @code{thread_abort} is
+provided to allow the user to abort any system call that is in progress
+in a predictable way.
+
+The suspend count may become greater than one with the effect that it
+will take more than one resume call to restart the thread.
+
+The function returns @code{KERN_SUCCESS} if the thread has been
+suspended and @code{KERN_INVALID_ARGUMENT} if @var{target_thread} is not
+a thread.
+@end deftypefun
+
+@deftypefun kern_return_t thread_resume (@w{thread_t @var{target_thread}})
+Decrements the thread's suspend count. If the count becomes zero the
+thread is resumed. If it is still positive, the thread is left
+suspended. The suspend count may not become negative.
+
+The function returns @code{KERN_SUCCESS} if the thread has been resumed,
+@code{KERN_FAILURE} if the suspend count is already zero and
+@code{KERN_INVALID_ARGUMENT} if @var{target_thread} is not a thread.
+@end deftypefun
+
+@deftypefun kern_return_t thread_abort (@w{thread_t @var{target_thread}})
+The function @code{thread_abort} aborts the kernel primitives:
+@code{mach_msg}, @code{msg_send}, @code{msg_receive} and @code{msg_rpc}
+and page-faults, making the call return a code indicating that it was
+interrupted. The call is interrupted whether or not the thread (or task
+containing it) is currently suspended. If it is suspended, the thread
+receives the interrupt when it is resumed.
+
+A thread will retry an aborted page-fault if its state is not modified
+before it is resumed. @code{msg_send} returns @code{SEND_INTERRUPTED};
+@code{msg_receive} returns @code{RCV_INTERRUPTED}; @code{msg_rpc}
+returns either @code{SEND_INTERRUPTED} or @code{RCV_INTERRUPTED},
+depending on which half of the RPC was interrupted.
+
+The main reason for this primitive is to allow one thread to cleanly
+stop another thread in a manner that will allow the future execution of
+the target thread to be controlled in a predictable way.
+@code{thread_suspend} keeps the target thread from executing any further
+instructions at the user level, including the return from a system call.
+@code{thread_get_state}/@code{thread_set_state} allows the examination
+or modification of the user state of a target thread. However, if a
+suspended thread was executing within a system call, it also has
+associated with it a kernel state. This kernel state can not be
+modified by @code{thread_set_state} with the result that when the thread
+is resumed the system call may return changing the user state and
+possibly user memory. @code{thread_abort} aborts the kernel call from
+the target thread's point of view by resetting the kernel state so that
+the thread will resume execution at the system call return with the
+return code value set to one of the interrupted codes. The system call
+itself will either be entirely completed or entirely aborted, depending
+on the precise moment at which the abort was received. Thus if the
+thread's user state has been changed by @code{thread_set_state}, it will
+not be modified by any unexpected system call side effects.
+
+For example to simulate a Unix signal, the following sequence of calls
+may be used:
+
+@enumerate
+@item
+@code{thread_suspend}: Stops the thread.
+
+@item
+@code{thread_abort}: Interrupts any system call in progress, setting the
+return value to `interrupted'. Since the thread is stopped, it will not
+return to user code.
+
+@item
+@code{thread_set_state}: Alters thread's state to simulate a procedure
+call to the signal handler
+
+@item
+@code{thread_resume}: Resumes execution at the signal handler. If the
+thread's stack has been correctly set up, the thread may return to the
+interrupted system call. (Of course, the code to push an extra stack
+frame and change the registers is VERY machine-dependent.)
+@end enumerate
+
+Calling @code{thread_abort} on a non-suspended thread is pretty risky,
+since it is very difficult to know exactly what system trap, if any, the
+thread might be executing and whether an interrupt return would cause
+the thread to do something useful.
+
+The function returns @code{KERN_SUCCESS} if the thread received an
+interrupt and @code{KERN_INVALID_ARGUMENT} if @var{target_thread} is not
+a thread.
+@end deftypefun
+
+@deftypefun kern_return_t thread_get_state (@w{thread_t @var{target_thread}}, @w{int @var{flavor}}, @w{thread_state_t @var{old_state}}, @w{mach_msg_type_number_t *@var{old_stateCnt}})
+The function @code{thread_get_state} returns the execution state
+(e.g. the machine registers) of @var{target_thread} as specified by
+@var{flavor}. The @var{old_state} is an array of integers that is
+provided by the caller and returned filled with the specified
+information. @var{old_stateCnt} is input set to the maximum number of
+integers in @var{old_state} and returned equal to the actual number of
+integers in @var{old_state}.
+
+@var{target_thread} may not be @code{mach_thread_self()}.
+
+The definition of the state structures can be found in
+@file{machine/thread_status.h}.
+
+The function returns @code{KERN_SUCCESS} if the state has been returned,
+@code{KERN_INVALID_ARGUMENT} if @var{target_thread} is not a thread or
+is @code{mach_thread_self} or @var{flavor} is unrecognized for this machine.
+The function returns @code{MIG_ARRAY_TOO_LARGE} if the returned state is
+too large for @var{old_state}. In this case, @var{old_state} is filled
+as much as possible and @var{old_stateCnt} is set to the number of
+elements that would have been returned if there were enough room.
+@end deftypefun
+
+@deftypefun kern_return_t thread_set_state (@w{thread_t @var{target_thread}}, @w{int @var{flavor}}, @w{thread_state_t @var{new_state}}, @w{mach_msg_type_number_t @var{new_state_count}})
+The function @code{thread_set_state} sets the execution state (e.g. the
+machine registers) of @var{target_thread} as specified by @var{flavor}.
+The @var{new_state} is an array of integers. @var{new_state_count} is
+the number of elements in @var{new_state}. The entire set of registers
+is reset. This will do unpredictable things if @var{target_thread} is
+not suspended.
+
+@var{target_thread} may not be @code{mach_thread_self}.
+
+The definition of the state structures can be found in
+@file{machine/thread_status.h}.
+
+The function returns @code{KERN_SUCCESS} if the state has been set and
+@code{KERN_INVALID_ARGUMENT} if @var{target_thread} is not a thread or
+is @code{mach_thread_self} or @var{flavor} is unrecognized for this
+machine.
+@end deftypefun
+
+
+@node Scheduling
+@subsection Scheduling
+
+@menu
+* Thread Priority:: Changing the priority of a thread.
+* Hand-Off Scheduling:: Switching to a new thread.
+* Scheduling Policy:: Setting the scheduling policy.
+@end menu
+
+
+@node Thread Priority
+@subsubsection Thread Priority
+
+Threads have three priorities associated with them by the system, a
+priority, a maximum priority, and a scheduled priority. The scheduled
+priority is used to make scheduling decisions about the thread. It is
+determined from the priority by the policy (for timesharing, this means
+adding an increment derived from cpu usage). The priority can be set
+under user control, but may never exceed the maximum priority. Changing
+the maximum priority requires presentation of the control port for the
+thread's processor set; since the control port for the default processor
+set is privileged, users cannot raise their maximum priority to unfairly
+compete with other users on that set. Newly created threads obtain
+their priority from their task and their max priority from the thread.
+
+@deftypefun kern_return_t thread_priority (@w{thread_t @var{thread}}, @w{int @var{prority}}, @w{boolean_t @var{set_max}})
+The function @code{thread_priority} changes the priority and optionally
+the maximum priority of @var{thread}. Priorities range from 0 to 49,
+where lower numbers denote higher priorities. If the new priority is
+higher than the priority of the current thread, preemption may occur as
+a result of this call. The maximum priority of the thread is also set
+if @var{set_max} is @code{TRUE}. This call will fail if @var{priority}
+is greater than the current maximum priority of the thread. As a
+result, this call can only lower the value of a thread's maximum
+priority.
+
+The functions returns @code{KERN_SUCCESS} if the operation completed
+successfully, @code{KERN_INVALID_ARGUMENT} if @var{thread} is not a
+thread or @var{priority} is out of range (not in 0..49), and
+@code{KERN_FAILURE} if the requested operation would violate the
+thread's maximum priority (thread_priority).
+@end deftypefun
+
+@deftypefun kern_return_t thread_max_priority (@w{thread_t @var{thread}}, @w{processor_set_t @var{processor_set}}, @w{int @var{priority}})
+The function @code{thread_max_priority} changes the maximum priority of
+the thread. Because it requires presentation of the corresponding
+processor set port, this call can reset the maximum priority to any
+legal value.
+
+The functions returns @code{KERN_SUCCESS} if the operation completed
+successfully, @code{KERN_INVALID_ARGUMENT} if @var{thread} is not a
+thread or @var{processor_set} is not a control port for a processor set
+or @var{priority} is out of range (not in 0..49), and
+@code{KERN_FAILURE} if the thread is not assigned to the processor set
+whose control port was presented.
+@end deftypefun
+
+
+@node Hand-Off Scheduling
+@subsubsection Hand-Off Scheduling
+
+@deftypefun kern_return_t thread_switch (@w{thread_t @var{new_thread}}, @w{int @var{option}}, @w{int @var{time}})
+The function @code{thread_switch} provides low-level access to the
+scheduler's context switching code. @var{new_thread} is a hint that
+implements hand-off scheduling. The operating system will attempt to
+switch directly to the new thread (bypassing the normal logic that
+selects the next thread to run) if possible. Since this is a hint, it
+may be incorrect; it is ignored if it doesn't specify a thread on the
+same host as the current thread or if that thread can't be switched to
+(i.e., not runnable or already running on another processor or giving
+a plainly invalid hint, such as @code{MACH_PORT_NULL}). In this case,
+the normal logic to select the next thread to run is used; the current
+thread may continue running if there is no other appropriate thread to
+run.
+
+Options for @var{option} are defined in @file{mach/thread_switch.h} and
+specify the interpretation of @var{time}. The possible values for
+@var{option} are:
+
+@table @code
+@item SWITCH_OPTION_NONE
+No options, the time argument is ignored.
+
+@item SWITCH_OPTION_WAIT
+The thread is blocked for the specified time (in milliseconds;
+specifying @code{0} will wait for the next tick). This can be aborted
+by @code{thread_abort}.
+
+@item SWITCH_OPTION_DEPRESS
+The thread's priority is depressed to the lowest possible value for the
+specified time. This can be aborted by @code{thread_depress_abort}.
+This depression is independent of operations that change the thread's
+priority (e.g. @code{thread_priority} will not abort the depression).
+The minimum time and units of time can be obtained as the
+@code{min_timeout} value from @code{host_info}. The depression is also
+aborted when the current thread is next run (either via hand-off
+scheduling or because the processor set has nothing better to do).
+@end table
+
+@code{thread_switch} is often called when the current thread can proceed
+no further for some reason; the various options and arguments allow
+information about this reason to be transmitted to the kernel. The
+@var{new_thread} argument (handoff scheduling) is useful when the
+identity of the thread that must make progress before the current thread
+runs again is known. The @code{WAIT} option is used when the amount of
+time that the current thread must wait before it can do anything useful
+can be estimated and is fairly long. The @code{DEPRESS} option is used
+when the amount of time that must be waited is fairly short, especially
+when the identity of the thread that is being waited for is not known.
+
+Users should beware of calling @code{thread_switch} with an invalid hint
+(e.g. @code{MACH_PORT_NULL}) and no option. Because the time-sharing
+scheduler varies the priority of threads based on usage, this may result
+in a waste of cpu time if the thread that must be run is of lower
+priority. The use of the @code{DEPRESS} option in this situation is
+highly recommended.
+
+@code{thread_switch} ignores policies. Users relying on the preemption
+semantics of a fixed time policy should be aware that
+@code{thread_switch} ignores these semantics; it will run the specified
+@var{new_thread} independent of its priority and the priority of any other
+threads that could be run instead.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_ARGUMENT} if @var{thread} is not a thread or
+@var{option} is not a recognized option, and @code{KERN_FAILURE} if
+@code{kern_depress_abort} failed because the thread was not depressed.
+@end deftypefun
+
+@deftypefun kern_return_t thread_depress_abort (@w{thread_t @var{thread}})
+The function @code{thread_depress_abort} cancels any priority depression
+for @var{thread} caused by a @code{swtch_pri} or @code{thread_switch}
+call.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{thread} is not a valid thread.
+@end deftypefun
+
+@deftypefun boolean_t swtch ()
+@c XXX Clear up wording.
+The system trap @code{swtch} attempts to switch the current thread off
+the processor. The return value indicates if more than the current
+thread is running in the processor set. This is useful for lock
+management routines.
+
+The call returns @code{FALSE} if the thread is justified in becoming a
+resource hog by continuing to spin because there's nothing else useful
+that the processor could do. @code{TRUE} is returned if the thread
+should make one more check on the lock and then be a good citizen and
+really suspend.
+@end deftypefun
+
+@deftypefun boolean_t swtch_pri (@w{int @var{priority}})
+The system trap @code{swtch_pri} attempts to switch the current thread
+off the processor as @code{swtch} does, but depressing the priority of
+the thread to the minimum possible value during the time.
+@var{priority} is not used currently.
+
+The return value is as for @code{swtch}.
+@end deftypefun
+
+
+@node Scheduling Policy
+@subsubsection Scheduling Policy
+
+@deftypefun kern_return_t thread_policy (@w{thread_t @var{thread}}, @w{int @var{policy}}, @w{int @var{data}})
+The function @code{thread_policy} changes the scheduling policy for
+@var{thread} to @var{policy}.
+
+@var{data} is policy-dependent scheduling information. There are
+currently two supported policies: @code{POLICY_TIMESHARE} and
+@code{POLICY_FIXEDPRI} defined in @file{mach/policy.h}; this file is
+included by @file{mach.h}. @var{data} is meaningless for timesharing,
+but is the quantum to be used (in milliseconds) for the fixed priority
+policy. To be meaningful, this quantum must be a multiple of the basic
+system quantum (min_quantum) which can be obtained from
+@code{host_info}. The system will always round up to the next multiple
+of the quantum.
+
+Processor sets may restrict the allowed policies, so this call will fail
+if the processor set to which @var{thread} is currently assigned does
+not permit @var{policy}.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded.
+@code{KERN_INVALID_ARGUMENT} if @var{thread} is not a thread or
+@var{policy} is not a recognized policy, and @code{KERN_FAILURE} if the
+processor set to which @var{thread} is currently assigned does not
+permit @var{policy}.
+@end deftypefun
+
+
+@node Thread Special Ports
+@subsection Thread Special Ports
+
+@deftypefun kern_return_t thread_get_special_port (@w{thread_t @var{thread}}, @w{int @var{which_port}}, @w{mach_port_t *@var{special_port}})
+The function @code{thread_get_special_port} returns send rights to one
+of a set of special ports for the thread specified by @var{thread}.
+
+The possible values for @var{which_port} are @code{THREAD_KERNEL_PORT}
+and @code{THREAD_EXCEPTION_PORT}. A thread also has access to its
+task's special ports.
+
+The function returns @code{KERN_SUCCESS} if the port was returned and
+@code{KERN_INVALID_ARGUMENT} if @var{thread} is not a thread or
+@var{which_port} is an invalid port selector.
+@end deftypefun
+
+@deftypefun kern_return_t thread_get_kernel_port (@w{thread_t @var{thread}}, @w{mach_port_t *@var{kernel_port}})
+The function @code{thread_get_kernel_port} is equivalent to the function
+@code{thread_get_special_port} with the @var{which_port} argument set to
+@code{THREAD_KERNEL_PORT}.
+@end deftypefun
+
+@deftypefun kern_return_t thread_get_exception_port (@w{thread_t @var{thread}}, @w{mach_port_t *@var{exception_port}})
+The function @code{thread_get_exception_port} is equivalent to the
+function @code{thread_get_special_port} with the @var{which_port}
+argument set to @code{THREAD_EXCEPTION_PORT}.
+@end deftypefun
+
+@deftypefun kern_return_t thread_set_special_port (@w{thread_t @var{thread}}, @w{int @var{which_port}}, @w{mach_port_t @var{special_port}})
+The function @code{thread_set_special_port} sets one of a set of special
+ports for the thread specified by @var{thread}.
+
+The possible values for @var{which_port} are @code{THREAD_KERNEL_PORT}
+and @code{THREAD_EXCEPTION_PORT}. A thread also has access to its
+task's special ports.
+
+The function returns @code{KERN_SUCCESS} if the port was set and
+@code{KERN_INVALID_ARGUMENT} if @var{thread} is not a thread or
+@var{which_port} is an invalid port selector.
+@end deftypefun
+
+@deftypefun kern_return_t thread_set_kernel_port (@w{thread_t @var{thread}}, @w{mach_port_t @var{kernel_port}})
+The function @code{thread_set_kernel_port} is equivalent to the function
+@code{thread_set_special_port} with the @var{which_port} argument set to
+@code{THREAD_KERNEL_PORT}.
+@end deftypefun
+
+@deftypefun kern_return_t thread_set_exception_port (@w{thread_t @var{thread}}, @w{mach_port_t @var{exception_port}})
+The function @code{thread_set_exception_port} is equivalent to the
+function @code{thread_set_special_port} with the @var{which_port}
+argument set to @code{THREAD_EXCEPTION_PORT}.
+@end deftypefun
+
+
+@node Exceptions
+@subsection Exceptions
+
+@deftypefun kern_return_t catch_exception_raise (@w{mach_port_t @var{exception_port}}, @w{thread_t @var{thread}}, @w{task_t @var{task}}, @w{int @var{exception}}, @w{int @var{code}}, @w{long @var{subcode}})
+XXX Fixme
+@end deftypefun
+
+@deftypefun kern_return_t exception_raise (@w{mach_port_t @var{exception_port}}, @w{mach_port_t @var{thread}}, @w{mach_port_t @var{task}}, @w{integer_t @var{exception}}, @w{integer_t @var{code}}, @w{long_integer_t @var{subcode}})
+XXX Fixme
+@end deftypefun
+
+@deftypefun kern_return_t evc_wait (@w{unsigned int @var{event}})
+@c XXX This is for user space drivers, the description is incomplete.
+The system trap @code{evc_wait} makes the calling thread wait for the
+event specified by @var{event}.
+
+The call returns @code{KERN_SUCCESS} if the event has occurred,
+@code{KERN_NO_SPACE} if another thread is waiting for the same event and
+@code{KERN_INVALID_ARGUMENT} if the event object is invalid.
+@end deftypefun
+
+
+@node Task Interface
+@section Task Interface
+
+@cindex task port
+@cindex port representing a task
+@deftp {Data type} task_t
+This is a @code{mach_port_t} and used to hold the port name of a task
+port that represents the thread. Manipulations of the task are
+implemented as remote procedure calls to the task port. A task can get
+a port to itself with the @code{mach_task_self} system call.
+
+The task port name is also used to identify the task's IPC space
+(@pxref{Port Manipulation Interface}) and the task's virtual memory map
+(@pxref{Virtual Memory Interface}).
+@end deftp
+
+@menu
+* Task Creation:: Creating tasks.
+* Task Termination:: Terminating tasks.
+* Task Information:: Informations on tasks.
+* Task Execution:: Thread scheduling in a task.
+* Task Special Ports:: How to get and set the task's special ports.
+* Syscall Emulation:: How to emulate system calls.
+@end menu
+
+
+@node Task Creation
+@subsection Task Creation
+
+@deftypefun kern_return_t task_create (@w{task_t @var{parent_task}}, @w{boolean_t @var{inherit_memory}}, @w{task_t *@var{child_task}})
+The function @code{task_create} creates a new task from
+@var{parent_task}; the resulting task (@var{child_task}) acquires shared
+or copied parts of the parent's address space (see @code{vm_inherit}).
+The child task initially contains no threads.
+
+If @var{inherit_memory} is set, the child task's address space is built
+from the parent task according to its memory inheritance values;
+otherwise, the child task is given an empty address space.
+
+The child task gets the three special ports created or copied for it at
+task creation. The @code{TASK_KERNEL_PORT} is created and send rights
+for it are given to the child and returned to the caller.
+@c The following is only relevant if MACH_IPC_COMPAT is used.
+@c The @code{TASK_NOTIFY_PORT} is created and receive, ownership and send rights
+@c for it are given to the child. The caller has no access to it.
+The @code{TASK_BOOTSTRAP_PORT} and the @code{TASK_EXCEPTION_PORT} are
+inherited from the parent task. The new task can get send rights to
+these ports with the call @code{task_get_special_port}.
+
+The function returns @code{KERN_SUCCESS} if a new task has been created,
+@code{KERN_INVALID_ARGUMENT} if @var{parent_task} is not a valid task
+port and @code{KERN_RESOURCE_SHORTAGE} if some critical kernel resource
+is unavailable.
+@end deftypefun
+
+
+@node Task Termination
+@subsection Task Termination
+
+@deftypefun kern_return_t task_terminate (@w{task_t @var{target_task}})
+The function @code{task_terminate} destroys the task specified by
+@var{target_task} and all its threads. All resources that are used only
+by this task are freed. Any port to which this task has receive and
+ownership rights is destroyed.
+
+The function returns @code{KERN_SUCCESS} if the task has been killed,
+@code{KERN_INVALID_ARGUMENT} if @var{target_task} is not a task.
+@end deftypefun
+
+
+@node Task Information
+@subsection Task Information
+@deftypefun task_t mach_task_self ()
+The @code{mach_task_self} system call returns the calling thread's task
+port.
+
+@code{mach_task_self} has an effect equivalent to receiving a send right
+for the task port. @code{mach_task_self} returns the name of the send
+right. In particular, successive calls will increase the calling task's
+user-reference count for the send right.
+
+As a special exception, the kernel will overrun the user reference count
+of the task name port, so that this function can not fail for that
+reason. Because of this, the user should not deallocate the port right
+if an overrun might have happened. Otherwise the reference count could
+drop to zero and the send right be destroyed while the user still
+expects to be able to use it. As the kernel does not make use of the
+number of extant send rights anyway, this is safe to do (the task port
+itself is not destroyed, even when there are no send rights anymore).
+
+The function returns @code{MACH_PORT_NULL} if a resource shortage
+prevented the reception of the send right, @code{MACH_PORT_NULL} if the
+task port is currently null, @code{MACH_PORT_DEAD} if the task port is
+currently dead.
+@end deftypefun
+
+@deftypefun kern_return_t task_threads (@w{task_t @var{target_task}}, @w{thread_array_t *@var{thread_list}}, @w{mach_msg_type_number_t *@var{thread_count}})
+The function @code{task_threads} gets send rights to the kernel port for
+each thread contained in @var{target_task}. @var{thread_list} is an
+array that is created as a result of this call. The caller may wish to
+@code{vm_deallocate} this array when the data is no longer needed.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{target_task} is not a task.
+@end deftypefun
+
+@deftypefun kern_return_t task_info (@w{task_t @var{target_task}}, @w{int @var{flavor}}, @w{task_info_t @var{task_info}}, @w{mach_msg_type_number_t *@var{task_info_count}})
+The function @code{task_info} returns the selected information array for
+a task, as specified by @var{flavor}. @var{task_info} is an array of
+integers that is supplied by the caller, and filled with specified
+information. @var{task_info_count} is supplied as the maximum number of
+integers in @var{task_info}. On return, it contains the actual number
+of integers in @var{task_info}. The maximum number of integers returned
+by any flavor is @code{TASK_INFO_MAX}.
+
+The type of information returned is defined by @var{flavor}, which can
+be one of the following:
+
+@table @code
+@item TASK_BASIC_INFO
+The function returns basic information about the task, as defined by
+@code{task_basic_info_t}. This includes the user and system time and
+memory consumption. The number of integers returned is
+@code{TASK_BASIC_INFO_COUNT}.
+
+@item TASK_EVENTS_INFO
+The function returns information about events for the task as defined by
+@code{thread_sched_info_t}. This includes statistics about virtual
+memory and IPC events like pageouts, pageins and messages sent and
+received. The number of integers returned is
+@code{TASK_EVENTS_INFO_COUNT}.
+
+@item TASK_THREAD_TIMES_INFO
+The function returns information about the total time for live threads
+as defined by @code{task_thread_times_info_t}. The number of integers
+returned is @code{TASK_THREAD_TIMES_INFO_COUNT}.
+@end table
+
+The function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{target_task} is not a thread or
+@var{flavor} is not recognized. The function returns
+@code{MIG_ARRAY_TOO_LARGE} if the returned info array is too large for
+@var{task_info}. In this case, @var{task_info} is filled as much as
+possible and @var{task_infoCnt} is set to the number of elements that
+would have been returned if there were enough room.
+@end deftypefun
+
+@deftp {Data type} {struct task_basic_info}
+This structure is returned in @var{task_info} by the @code{task_info}
+function and provides basic information about the task. You can cast a
+variable of type @code{task_info_t} to a pointer of this type if you
+provided it as the @var{task_info} parameter for the
+@code{TASK_BASIC_INFO} flavor of @code{task_info}. It has the following
+members:
+
+@table @code
+@item integer_t suspend_count
+suspend count for task
+
+@item integer_t base_priority
+base scheduling priority
+
+@item rpc_vm_size_t virtual_size
+number of virtual pages
+
+@item rpc_vm_size_t resident_size
+number of resident pages
+
+@item time_value_t user_time
+total user run time for terminated threads
+
+@item time_value_t system_time
+total system run time for terminated threads
+
+@item time_value_t creation_time
+creation time stamp
+@end table
+@end deftp
+
+@deftp {Data type} task_basic_info_t
+This is a pointer to a @code{struct task_basic_info}.
+@end deftp
+
+@deftp {Data type} {struct task_events_info}
+This structure is returned in @var{task_info} by the @code{task_info}
+function and provides event statistics for the task. You can cast a
+variable of type @code{task_info_t} to a pointer of this type if you
+provided it as the @var{task_info} parameter for the
+@code{TASK_EVENTS_INFO} flavor of @code{task_info}. It has the
+following members:
+
+@table @code
+@item rpc_long_natural_t faults
+number of page faults
+
+@item rpc_long_natural_t zero_fills
+number of zero fill pages
+
+@item rpc_long_natural_t reactivations
+number of reactivated pages
+
+@item rpc_long_natural_t pageins
+number of actual pageins
+
+@item rpc_long_natural_t cow_faults
+number of copy-on-write faults
+
+@item rpc_long_natural_t messages_sent
+number of messages sent
+
+@item rpc_long_natural_t messages_received
+number of messages received
+@end table
+@end deftp
+
+@deftp {Data type} task_events_info_t
+This is a pointer to a @code{struct task_events_info}.
+@end deftp
+
+@deftp {Data type} {struct task_thread_times_info}
+This structure is returned in @var{task_info} by the @code{task_info}
+function and provides event statistics for the task. You can cast a
+variable of type @code{task_info_t} to a pointer of this type if you
+provided it as the @var{task_info} parameter for the
+@code{TASK_THREAD_TIMES_INFO} flavor of @code{task_info}. It has the
+following members:
+
+@table @code
+@item time_value_t user_time
+total user run time for live threads
+
+@item time_value_t system_time
+total system run time for live threads
+@end table
+@end deftp
+
+@deftp {Data type} task_thread_times_info_t
+This is a pointer to a @code{struct task_thread_times_info}.
+@end deftp
+
+@deftypefun kern_return_t task_set_name (@w{task_t @var{target_task}}, @w{const_kernel_debug_name_t @var{name}})
+
+The function @code{task_set_name} sets the name of @var{target_task}
+to @var{name}, truncating it if necessary.
+
+This is a debugging aid. The name is used in diagnostic messages
+printed by the kernel.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded.
+@end deftypefun
+
+@deftypefun kern_return_t task_set_essential (@w{task_t @var{target_task}}, @w{boolean_t @var{essential}})
+
+The function @code{task_set_essential} sets whether @var{target_task} is
+essential for the system, i.e. the system will completely crash and reboot if
+that task crashes. This means that when the debugger is enabled, it should be
+triggered on the crash, so as to get the opportunity to debug the issue instead
+of just rebooting.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded.
+@end deftypefun
+
+
+@node Task Execution
+@subsection Task Execution
+
+@deftypefun kern_return_t task_suspend (@w{task_t @var{target_task}})
+The function @code{task_suspend} increments the task's suspend count and
+stops all threads in the task. As long as the suspend count is positive
+newly created threads will not run. This call does not return until all
+threads are suspended.
+
+The count may become greater than one, with the effect that it will take
+more than one resume call to restart the task.
+
+The function returns @code{KERN_SUCCESS} if the task has been suspended
+and @code{KERN_INVALID_ARGUMENT} if @var{target_task} is not a task.
+@end deftypefun
+
+@deftypefun kern_return_t task_resume (@w{task_t @var{target_task}})
+The function @code{task_resume} decrements the task's suspend count. If
+it becomes zero, all threads with zero suspend counts in the task are
+resumed. The count may not become negative.
+
+The function returns @code{KERN_SUCCESS} if the task has been resumed,
+@code{KERN_FAILURE} if the suspend count is already at zero and
+@code{KERN_INVALID_ARGUMENT} if @var{target_task} is not a task.
+@end deftypefun
+
+@c XXX Should probably be in the "Scheduling" node of the Thread Interface.
+@deftypefun kern_return_t task_priority (@w{task_t @var{task}}, @w{int @var{priority}}, @w{boolean_t @var{change_threads}})
+The priority of a task is used only for creation of new threads; a new
+thread's priority is set to the enclosing task's priority.
+@code{task_priority} changes this task priority. It also sets the
+priorities of all threads in the task to this new priority if
+@var{change_threads} is @code{TRUE}. Existing threads are not affected
+otherwise. If this priority change violates the maximum priority of
+some threads, as many threads as possible will be changed and an error
+code will be returned.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_ARGUMENT} if @var{task} is not a task, or
+@var{priority} is not a valid priority and @code{KERN_FAILURE} if
+@var{change_threads} was @code{TRUE} and the attempt to change the
+priority of at least one existing thread failed because the new priority
+would have exceeded that thread's maximum priority.
+@end deftypefun
+
+@deftypefun kern_return_t task_ras_control (@w{task_t @var{target_task}}, @w{vm_address_t @var{start_pc}}, @w{vm_address_t @var{end_pc}}, @w{int @var{flavor}})
+The function @code{task_ras_control} manipulates a task's set of
+restartable atomic sequences. If a sequence is installed, and any
+thread in the task is preempted within the range
+[@var{start_pc},@var{end_pc}], then the thread is resumed at
+@var{start_pc}. This enables applications to build atomic sequences
+which, when executed to completion, will have executed atomically.
+Restartable atomic sequences are intended to be used on systems that do
+not have hardware support for low-overhead atomic primitives.
+
+As a thread can be rolled-back, the code in the sequence should have no
+side effects other than a final store at @var{end_pc}. The kernel does
+not guarantee that the sequence is restartable. It assumes the
+application knows what it's doing.
+
+A task may have a finite number of atomic sequences that is defined at
+compile time.
+
+The flavor specifies the particular operation that should be applied to
+this restartable atomic sequence. Possible values for flavor can be:
+
+@table @code
+@item TASK_RAS_CONTROL_PURGE_ALL
+Remove all registered sequences for this task.
+
+@item TASK_RAS_CONTROL_PURGE_ONE
+Remove the named registered sequence for this task.
+
+@item TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE
+Atomically remove all registered sequences and install the named
+sequence.
+
+@item TASK_RAS_CONTROL_INSTALL_ONE
+Install this sequence.
+@end table
+
+The function returns @code{KERN_SUCCESS} if the operation has been
+performed, @code{KERN_INVALID_ADDRESS} if the @var{start_pc} or
+@var{end_pc} values are not a valid address for the requested operation
+(for example, it is invalid to purge a sequence that has not been
+registered), @code{KERN_RESOURCE_SHORTAGE} if an attempt was made to
+install more restartable atomic sequences for a task than can be
+supported by the kernel, @code{KERN_INVALID_VALUE} if a bad flavor was
+specified, @code{KERN_INVALID_ARGUMENT} if @var{target_task} is not a
+task and @code{KERN_FAILURE} if the call is not not supported on this
+configuration.
+@end deftypefun
+
+
+@node Task Special Ports
+@subsection Task Special Ports
+
+@deftypefun kern_return_t task_get_special_port (@w{task_t @var{task}}, @w{int @var{which_port}}, @w{mach_port_t *@var{special_port}})
+The function @code{task_get_special_port} returns send rights to one of
+a set of special ports for the task specified by @var{task}.
+
+The special ports associated with a task are the kernel port
+(@code{TASK_KERNEL_PORT}), the bootstrap port
+(@code{TASK_BOOTSTRAP_PORT}) and the exception port
+(@code{TASK_EXCEPTION_PORT}). The bootstrap port is a port to which a
+task may send a message requesting other system service ports. This
+port is not used by the kernel. The task's exception port is the port
+to which messages are sent by the kernel when an exception occurs and
+the thread causing the exception has no exception port of its own.
+
+The following macros to call @code{task_get_special_port} for a specific
+port are defined in @code{mach/task_special_ports.h}:
+@code{task_get_exception_port} and @code{task_get_bootstrap_port}.
+
+The function returns @code{KERN_SUCCESS} if the port was returned and
+@code{KERN_INVALID_ARGUMENT} if @var{task} is not a task or
+@var{which_port} is an invalid port selector.
+@end deftypefun
+
+@deftypefun kern_return_t task_get_kernel_port (@w{task_t @var{task}}, @w{mach_port_t *@var{kernel_port}})
+The function @code{task_get_kernel_port} is equivalent to the function
+@code{task_get_special_port} with the @var{which_port} argument set to
+@code{TASK_KERNEL_PORT}.
+@end deftypefun
+
+@deftypefun kern_return_t task_get_exception_port (@w{task_t @var{task}}, @w{mach_port_t *@var{exception_port}})
+The function @code{task_get_exception_port} is equivalent to the
+function @code{task_get_special_port} with the @var{which_port} argument
+set to @code{TASK_EXCEPTION_PORT}.
+@end deftypefun
+
+@deftypefun kern_return_t task_get_bootstrap_port (@w{task_t @var{task}}, @w{mach_port_t *@var{bootstrap_port}})
+The function @code{task_get_bootstrap_port} is equivalent to the
+function @code{task_get_special_port} with the @var{which_port} argument
+set to @code{TASK_BOOTSTRAP_PORT}.
+@end deftypefun
+
+@deftypefun kern_return_t task_set_special_port (@w{task_t @var{task}}, @w{int @var{which_port}}, @w{mach_port_t @var{special_port}})
+The function @code{thread_set_special_port} sets one of a set of special
+ports for the task specified by @var{task}.
+
+The special ports associated with a task are the kernel port
+(@code{TASK_KERNEL_PORT}), the bootstrap port
+(@code{TASK_BOOTSTRAP_PORT}) and the exception port
+(@code{TASK_EXCEPTION_PORT}). The bootstrap port is a port to which a
+thread may send a message requesting other system service ports. This
+port is not used by the kernel. The task's exception port is the port
+to which messages are sent by the kernel when an exception occurs and
+the thread causing the exception has no exception port of its own.
+
+The function returns @code{KERN_SUCCESS} if the port was set and
+@code{KERN_INVALID_ARGUMENT} if @var{task} is not a task or
+@var{which_port} is an invalid port selector.
+@end deftypefun
+
+@deftypefun kern_return_t task_set_kernel_port (@w{task_t @var{task}}, @w{mach_port_t @var{kernel_port}})
+The function @code{task_set_kernel_port} is equivalent to the function
+@code{task_set_special_port} with the @var{which_port} argument set to
+@code{TASK_KERNEL_PORT}.
+@end deftypefun
+
+@deftypefun kern_return_t task_set_exception_port (@w{task_t @var{task}}, @w{mach_port_t @var{exception_port}})
+The function @code{task_set_exception_port} is equivalent to the
+function @code{task_set_special_port} with the @var{which_port} argument
+set to @code{TASK_EXCEPTION_PORT}.
+@end deftypefun
+
+@deftypefun kern_return_t task_set_bootstrap_port (@w{task_t @var{task}}, @w{mach_port_t @var{bootstrap_port}})
+The function @code{task_set_bootstrap_port} is equivalent to the
+function @code{task_set_special_port} with the @var{which_port} argument
+set to @code{TASK_BOOTSTRAP_PORT}.
+@end deftypefun
+
+
+@node Syscall Emulation
+@subsection Syscall Emulation
+
+@deftypefun kern_return_t task_get_emulation_vector (@w{task_t @var{task}}, @w{int *@var{vector_start}}, @w{emulation_vector_t *@var{emulation_vector}}, @w{mach_msg_type_number_t *@var{emulation_vector_count}})
+The function @code{task_get_emulation_vector} gets the user-level
+handler entry points for all emulated system calls.
+@c XXX Fixme
+@end deftypefun
+
+@deftypefun kern_return_t task_set_emulation_vector (@w{task_t @var{task}}, @w{int @var{vector_start}}, @w{emulation_vector_t @var{emulation_vector}}, @w{mach_msg_type_number_t @var{emulation_vector_count}})
+The function @code{task_set_emulation_vector} establishes user-level
+handlers for the specified system calls. Non-emulated system calls are
+specified with an entry of @code{EML_ROUTINE_NULL}. System call
+emulation handlers are inherited by the children of @var{task}.
+@c XXX Fixme
+@end deftypefun
+
+@deftypefun kern_return_t task_set_emulation (@w{task_t @var{task}}, @w{vm_address_t @var{routine_entry_pt}}, @w{int @var{routine_number}})
+The function @code{task_set_emulation} establishes a user-level handler
+for the specified system call. System call emulation handlers are
+inherited by the children of @var{task}.
+@c XXX Fixme
+@end deftypefun
+
+@c XXX Fixme datatype emulation_vector_t
+
+
+@node Profiling
+@section Profiling
+
+@deftypefun kern_return_t task_enable_pc_sampling (@w{task_t @var{task}}, @w{int *@var{ticks}}, @w{sampled_pc_flavor_t @var{flavor}})
+@deftypefunx kern_return_t thread_enable_pc_sampling (@w{thread_t @var{thread}}, @w{int *@var{ticks}}, @w{sampled_pc_flavor_t @var{flavor}})
+The function @code{task_enable_pc_sampling} enables PC sampling for
+@var{task}, the function @code{thread_enable_pc_sampling} enables PC
+sampling for @var{thread}. The kernel's idea of clock granularity is
+returned in @var{ticks} in usecs. (this value should not be trusted). The
+sampling flavor is specified by @var{flavor}.
+
+The function returns @code{KERN_SUCCESS} if the operation is completed successfully
+and @code{KERN_INVALID_ARGUMENT} if @var{thread} is not a valid thread.
+@end deftypefun
+
+@deftypefun kern_return_t task_disable_pc_sampling (@w{task_t @var{task}}, @w{int *@var{sample_count}})
+@deftypefunx kern_return_t thread_disable_pc_sampling (@w{thread_t @var{thread}}, @w{int *@var{sample_count}})
+The function @code{task_disable_pc_sampling} disables PC sampling for
+@var{task}, the function @code{thread_disable_pc_sampling} disables PC
+sampling for @var{thread}. The number of sample elements in the kernel
+for the thread is returned in @var{sample_count}.
+
+The function returns @code{KERN_SUCCESS} if the operation is completed successfully
+and @code{KERN_INVALID_ARGUMENT} if @var{thread} is not a valid thread.
+@end deftypefun
+
+@deftypefun kern_return_t task_get_sampled_pcs (@w{task_t @var{task}}, @w{sampled_pc_seqno_t *@var{seqno}}, @w{sampled_pc_array_t @var{sampled_pcs}}, @w{mach_msg_type_number_t *@var{sample_count}})
+@deftypefunx kern_return_t thread_get_sampled_pcs (@w{thread_t @var{thread}}, @w{sampled_pc_seqno_t *@var{seqno}}, @w{sampled_pc_array_t @var{sampled_pcs}}, @w{int *@var{sample_count}})
+The function @code{task_get_sampled_pcs} extracts the PC samples for
+@var{task}, the function @code{thread_get_sampled_pcs} extracts the PC
+samples for @var{thread}. @var{seqno} is the sequence number of the
+sampled PCs. This is useful for determining when a collector thread has
+missed a sample. The sampled PCs for the thread are returned in
+@var{sampled_pcs}. @var{sample_count} contains the number of sample
+elements returned.
+
+The function returns @code{KERN_SUCCESS} if the operation is completed successfully,
+@code{KERN_INVALID_ARGUMENT} if @var{thread} is not a valid thread and
+@code{KERN_FAILURE} if @var{thread} is not sampled.
+@end deftypefun
+
+
+@deftp {Data type} sampled_pc_t
+This structure is returned in @var{sampled_pcs} by the
+@code{thread_get_sampled_pcs} and @code{task_get_sampled_pcs} functions
+and provides pc samples for threads or tasks. It has the following
+members:
+
+@table @code
+@item natural_t id
+A thread-specific unique identifier.
+
+@item vm_offset_t pc
+A pc value.
+
+@item sampled_pc_flavor_t sampletype
+The type of the sample as per flavor.
+@end table
+@end deftp
+
+
+@deftp {Data type} sampled_pc_flavor_t
+This data type specifies a pc sample flavor, either as argument passed
+in @var{flavor} to the @code{thread_enable_pc_sample} and
+@code{thread_disable_pc_sample} functions, or as member
+@code{sampletype} in the @code{sample_pc_t} data type. The flavor is a
+bitwise-or of the possible flavors defined in @file{mach/pc_sample.h}:
+
+@table @code
+@item SAMPLED_PC_PERIODIC
+default
+@item SAMPLED_PC_VM_ZFILL_FAULTS
+zero filled fault
+@item SAMPLED_PC_VM_REACTIVATION_FAULTS
+reactivation fault
+@item SAMPLED_PC_VM_PAGEIN_FAULTS
+pagein fault
+@item SAMPLED_PC_VM_COW_FAULTS
+copy-on-write fault
+@item SAMPLED_PC_VM_FAULTS_ANY
+any fault
+@item SAMPLED_PC_VM_FAULTS
+the bitwise-or of @code{SAMPLED_PC_VM_ZFILL_FAULTS},
+@code{SAMPLED_PC_VM_REACTIVATION_FAULTS},
+@code{SAMPLED_PC_VM_PAGEIN_FAULTS} and @code{SAMPLED_PC_VM_COW_FAULTS}.
+@end table
+@end deftp
+
+@c XXX sampled_pc_array_t, sampled_pc_seqno_t
+
+
+@node Host Interface
+@chapter Host Interface
+@cindex host interface
+
+This section describes the Mach interface to a host executing a Mach
+kernel. The interface allows to query statistics about a host and
+control its behaviour.
+
+A host is represented by two ports, a name port @var{host} used to query
+information about the host accessible to everyone, and a control port
+@var{host_priv} used to manipulate it. For example, you can query the
+current time using the name port, but to change the time you need to
+send a message to the host control port.
+
+Everything described in this section is declared in the header file
+@file{mach.h}.
+
+@menu
+* Host Ports:: Ports representing a host.
+* Host Information:: Retrieval of information about a host.
+* Host Time:: Operations on the time as seen by a host.
+* Host Reboot:: Rebooting the system.
+@end menu
+
+
+@node Host Ports
+@section Host Ports
+@cindex host ports
+@cindex ports representing a host
+
+@cindex host name port
+@deftp {Data type} host_t
+This is a @code{mach_port_t} and used to hold the port name of a host
+name port (or short: host port). Any task can get a send right to the
+name port of the host running the task using the @code{mach_host_self}
+system call. The name port can be used query information about the
+host, for example the current time.
+@end deftp
+
+@deftypefun host_t mach_host_self ()
+The @code{mach_host_self} system call returns the calling thread's host
+name port. It has an effect equivalent to receiving a send right for
+the host port. @code{mach_host_self} returns the name of the send
+right. In particular, successive calls will increase the calling task's
+user-reference count for the send right.
+
+As a special exception, the kernel will overrun the user reference count
+of the host name port, so that this function can not fail for that
+reason. Because of this, the user should not deallocate the port right
+if an overrun might have happened. Otherwise the reference count could
+drop to zero and the send right be destroyed while the user still
+expects to be able to use it. As the kernel does not make use of the
+number of extant send rights anyway, this is safe to do (the host port
+itself is never destroyed).
+
+The function returns @code{MACH_PORT_NULL} if a resource shortage
+prevented the reception of the send right.
+
+This function is also available in @file{mach/mach_traps.h}.
+@end deftypefun
+
+@cindex host control port
+@deftp {Data type} host_priv_t
+This is a @code{mach_port_t} and used to hold the port name of a
+privileged host control port. A send right to the host control port is
+inserted into the first task at bootstrap (@pxref{Modules}). This is
+the only way to get access to the host control port in Mach, so the
+initial task has to preserve the send right carefully, moving a copy of
+it to other privileged tasks if necessary and denying access to
+unprivileged tasks.
+@end deftp
+
+
+@node Host Information
+@section Host Information
+
+@deftypefun kern_return_t host_info (@w{host_t @var{host}}, @w{int @var{flavor}}, @w{host_info_t @var{host_info}}, @w{mach_msg_type_number_t *@var{host_info_count}})
+The @code{host_info} function returns various information about
+@var{host}. @var{host_info} is an array of integers that is supplied by
+the caller. It will be filled with the requested information.
+@var{host_info_count} is supplied as the maximum number of integers in
+@var{host_info}. On return, it contains the actual number of integers
+in @var{host_info}. The maximum number of integers returned by any
+flavor is @code{HOST_INFO_MAX}.
+
+The type of information returned is defined by @var{flavor}, which can
+be one of the following:
+
+@table @code
+@item HOST_BASIC_INFO
+The function returns basic information about the host, as defined by
+@code{host_basic_info_t}. This includes the number of processors, their
+type, and the amount of memory installed in the system. The number of
+integers returned is @code{HOST_BASIC_INFO_COUNT}. For how to get more
+information about the processor, see @ref{Processor Interface}.
+
+@item HOST_PROCESSOR_SLOTS
+The function returns the numbers of the slots with active processors in
+them. The number of integers returned can be up to @code{max_cpus}, as
+returned by the @code{HOST_BASIC_INFO} flavor of @code{host_info}.
+
+@item HOST_SCHED_INFO
+The function returns information of interest to schedulers as defined by
+@code{host_sched_info_t}. The number of integers returned is
+@code{HOST_SCHED_INFO_COUNT}.
+@end table
+
+The function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{host} is not a host or @var{flavor}
+is not recognized. The function returns @code{MIG_ARRAY_TOO_LARGE} if
+the returned info array is too large for @var{host_info}. In this case,
+@var{host_info} is filled as much as possible and @var{host_info_count}
+is set to the number of elements that would be returned if there were
+enough room.
+@c BUGS Availability limited. Systems without this call support a
+@c host_info call with an incompatible calling sequence.
+@end deftypefun
+
+@deftp {Data type} {struct host_basic_info}
+A pointer to this structure is returned in @var{host_info} by the
+@code{host_info} function and provides basic information about the host.
+You can cast a variable of type @code{host_info_t} to a pointer of this
+type if you provided it as the @var{host_info} parameter for the
+@code{HOST_BASIC_INFO} flavor of @code{host_info}. It has the following
+members:
+
+@table @code
+@item int max_cpus
+The maximum number of possible processors for which the kernel is
+configured.
+
+@item int avail_cpus
+The number of cpus currently available.
+
+@item vm_size_t memory_size
+The size of physical memory in bytes.
+
+@item cpu_type_t cpu_type
+The type of the master processor.
+
+@item cpu_subtype_t cpu_subtype
+The subtype of the master processor.
+@end table
+
+The type and subtype of the individual processors are also available
+by @code{processor_info}, see @ref{Processor Interface}.
+@end deftp
+
+@deftp {Data type} host_basic_info_t
+This is a pointer to a @code{struct host_basic_info}.
+@end deftp
+
+@deftp {Data type} {struct host_sched_info}
+A pointer to this structure is returned in @var{host_info} by the
+@code{host_info} function and provides information of interest to
+schedulers. You can cast a variable of type @code{host_info_t} to a
+pointer of this type if you provided it as the @var{host_info} parameter
+for the @code{HOST_SCHED_INFO} flavor of @code{host_info}. It has the
+following members:
+
+@table @code
+@item int min_timeout
+The minimum timeout and unit of time in milliseconds.
+
+@item int min_quantum
+The minimum quantum and unit of quantum in milliseconds.
+@end table
+@end deftp
+
+@deftp {Data type} host_sched_info_t
+This is a pointer to a @code{struct host_sched_info}.
+@end deftp
+
+@deftypefun kern_return_t host_get_kernel_version (@w{host_t @var{host}}, @w{kernel_version_t *@var{version}})
+The @code{host_get_kernel_version} function returns the version string
+compiled into the kernel executing on @var{host} at the time it was
+built in the character string @var{version}. This string describes the
+version of the kernel. The constant @code{KERNEL_VERSION_MAX} should be
+used to dimension storage for the returned string if the
+@code{kernel_version_t} declaration is not used.
+
+If the version string compiled into the kernel is longer than
+@code{KERNEL_VERSION_MAX}, the result is truncated and not necessarily
+null-terminated.
+
+If @var{host} is not a valid send right to a host port, the function
+returns @code{KERN_INVALID_ARGUMENT}. If @var{version} points to
+inaccessible memory, it returns @code{KERN_INVALID_ADDRESS}, and
+@code{KERN_SUCCESS} otherwise.
+@end deftypefun
+
+@node Host Time
+@section Host Time
+
+@deftp {Data type} time_value64_t
+This is the representation of a time in Mach. It is a @code{struct
+time_value64} and consists of the following members:
+
+@table @code
+@item int64_t seconds
+The number of seconds.
+@item int64_t nanoseconds
+The number of nanoseconds.
+@end table
+@end deftp
+
+The number of nanoseconds should always be smaller than
+@code{TIME_NANOS_MAX} (100000000). A time with this property is
+@dfn{normalized}. Normalized time values can be manipulated with the
+following macros:
+
+@defmac time_value64_add_nanos (@w{time_value64_t *@var{val}}, @w{int64_t *@var{nanos}})
+Add @var{nanos} nanoseconds to @var{val}. If @var{val} is normalized
+and @var{nanos} smaller than @code{TIME_NANOS_MAX}, @var{val} will be
+normalized afterwards.
+@end defmac
+
+@defmac time_value64_add (@w{time_value64_t *@var{result}}, @w{time_value64_t *@var{addend}})
+Add the values in @var{addend} to @var{result}. If both are normalized,
+@var{result} will be normalized afterwards.
+@end defmac
+
+A variable of type @code{time_value64_t} can either represent a duration
+or a fixed point in time. In the latter case, it shall be interpreted as
+the number of seconds and nanoseconds after the epoch 1. Jan 1970.
+
+@deftypefun kern_return_t host_get_time64 (@w{host_t @var{host}}, @w{time_value64_t *@var{current_time}})
+Get the current time as seen by @var{host}. On success, the time passed
+since the epoch is returned in @var{current_time}.
+@end deftypefun
+
+@deftypefun kern_return_t host_set_time64 (@w{host_priv_t @var{host_priv}}, @w{time_value64_t @var{new_time}})
+Set the time of @var{host_priv} to @var{new_time}.
+@end deftypefun
+
+@deftypefun kern_return_t host_adjust_time64 (@w{host_priv_t @var{host_priv}}, @w{time_value64_t @var{new_adjustment}}, @w{time_value64_t *@var{old_adjustment}})
+Arrange for the current time as seen by @var{host_priv} to be gradually
+changed by the adjustment value @var{new_adjustment}, and return the old
+adjustment value in @var{old_adjustment}.
+@end deftypefun
+
+For efficiency, the current time is available through a mapped-time
+interface.
+
+@deftp {Data type} mapped_time_value_t
+This structure defines the mapped-time interface. It has the following
+members:
+
+@table @code
+@item integer_t seconds
+The number of seconds.
+
+@item integer_t microseconds
+The number of microseconds.
+
+@item integer_t check_seconds
+This is a copy of the seconds value, which must be checked to protect
+against a race condition when reading out the two time values. This
+should only be used when getting the 32 bit version of @code{time_value64_t}.
+
+@item time_value64_t time_value
+The current time.
+
+@item int64_t check_seconds64
+This is a copy of the seconds value in @var{time_value}, which must be checked to protect
+against a race condition when reading out the two time values.
+@end table
+@end deftp
+
+Here is an example how to read out the current time using the
+mapped-time interface:
+
+@c XXX Complete the example.
+@example
+do
+ @{
+ secs = mtime->time_value.seconds;
+ __sync_synchronize();
+ nanos = mtime->time_value.nanoseconds;
+ __sync_synchronize();
+ @}
+while (secs != mtime->check_seconds64);
+@end example
+
+
+@node Host Reboot
+@section Host Reboot
+
+@deftypefun kern_return_t host_reboot (@w{host_priv_t @var{host_priv}}, @w{int @var{options}})
+Reboot the host specified by @var{host_priv}. The argument
+@var{options} specifies the flags. The available flags are defined in
+@file{sys/reboot.h}:
+
+@table @code
+@item RB_HALT
+Do not reboot, but halt the machine.
+
+@item RB_DEBUGGER
+Do not reboot, but enter kernel debugger from user space.
+@end table
+
+If successful, the function might not return.
+@end deftypefun
+
+
+@node Processors and Processor Sets
+@chapter Processors and Processor Sets
+
+This section describes the Mach interface to processor sets and
+individual processors. The interface allows to group processors into
+sets and control the processors and processor sets.
+
+A processor is not a central part of the interface. It is mostly of
+relevance as a part of a processor set. Threads are always assigned to
+processor sets, and all processors in a set are equally involved in
+executing all threads assigned to that set.
+
+The processor set is represented by two ports, a name port
+@var{processor_set_name} used to query information about the host
+accessible to everyone, and a control port @var{processor_set} used to
+manipulate it.
+
+@menu
+* Processor Set Interface:: How to work with processor sets.
+* Processor Interface:: How to work with individual processors.
+@end menu
+
+
+@node Processor Set Interface
+@section Processor Set Interface
+
+@menu
+* Processor Set Ports:: Ports representing a processor set.
+* Processor Set Access:: How the processor sets are accessed.
+* Processor Set Creation:: How new processor sets are created.
+* Processor Set Destruction:: How processor sets are destroyed.
+* Tasks and Threads on Sets:: Assigning tasks, threads to processor sets.
+* Processor Set Priority:: Specifying the priority of a processor set.
+* Processor Set Policy:: Changing the processor set policies.
+* Processor Set Info:: Obtaining information about a processor set.
+@end menu
+
+
+@node Processor Set Ports
+@subsection Processor Set Ports
+@cindex processor set ports
+@cindex ports representing a processor set
+
+@cindex processor set name port
+@cindex port representing a processor set name
+@deftp {Data type} processor_set_name_t
+This is a @code{mach_port_t} and used to hold the port name of a
+processor set name port that names the processor set. Any task can get
+a send right to name port of a processor set. The processor set name
+port allows to get information about the processor set.
+@end deftp
+
+@cindex processor set port
+@deftp {Data type} processor_set_t
+This is a @code{mach_port_t} and used to hold the port name of a
+privileged processor set control port that represents the processor set.
+Operations on the processor set are implemented as remote procedure
+calls to the processor set port. The processor set port allows to
+manipulate the processor set.
+@end deftp
+
+
+@node Processor Set Access
+@subsection Processor Set Access
+
+@deftypefun kern_return_t host_processor_sets (@w{host_t @var{host}}, @w{processor_set_name_array_t *@var{processor_sets}}, @w{mach_msg_type_number_t *@var{processor_sets_count}})
+The function @code{host_processor_sets} gets send rights to the name
+port for each processor set currently assigned to @var{host}.
+
+@code{host_processor_set_priv} can be used to obtain the control ports
+from these if desired. @var{processor_sets} is an array that is
+created as a result of this call. The caller may wish to
+@code{vm_deallocate} this array when the data is no longer needed.
+@var{processor_sets_count} is set to the number of processor sets in the
+@var{processor_sets}.
+
+This function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{host} is not a host.
+@end deftypefun
+
+@deftypefun kern_return_t host_processor_set_priv (@w{host_priv_t @var{host_priv}}, @w{processor_set_name_t @var{set_name}}, @w{processor_set_t *@var{set}})
+The function @code{host_processor_set_priv} allows a privileged
+application to obtain the control port @var{set} for an existing
+processor set from its name port @var{set_name}. The privileged host
+port @var{host_priv} is required.
+
+This function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{host_priv} is not a valid host
+control port.
+@end deftypefun
+
+@deftypefun kern_return_t processor_set_default (@w{host_t @var{host}}, @w{processor_set_name_t *@var{default_set}})
+The function @code{processor_set_default} returns the default processor
+set of @var{host} in @var{default_set}. The default processor set is
+used by all threads, tasks, and processors that are not explicitly
+assigned to other sets. processor_set_default returns a port that can
+be used to obtain information about this set (e.g. how many threads are
+assigned to it). This port cannot be used to perform operations on that
+set.
+
+This function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_ARGUMENT} if @var{host} is not a host and
+@code{KERN_INVALID_ADDRESS} if @var{default_set} points to
+inaccessible memory.
+@end deftypefun
+
+
+@node Processor Set Creation
+@subsection Processor Set Creation
+
+@deftypefun kern_return_t processor_set_create (@w{host_t @var{host}}, @w{processor_set_t *@var{new_set}}, @w{processor_set_name_t *@var{new_name}})
+The function @code{processor_set_create} creates a new processor set on
+@var{host} and returns the two ports associated with it. The port
+returned in @var{new_set} is the actual port representing the set. It
+is used to perform operations such as assigning processors, tasks, or
+threads. The port returned in @var{new_name} identifies the set, and is
+used to obtain information about the set.
+
+This function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_ARGUMENT} if @var{host} is not a host,
+@code{KERN_INVALID_ADDRESS} if @var{new_set} or @var{new_name} points to
+inaccessible memory and @code{KERN_FAILURE} is the operating system does
+not support processor allocation.
+@end deftypefun
+
+
+@node Processor Set Destruction
+@subsection Processor Set Destruction
+
+@deftypefun kern_return_t processor_set_destroy (@w{processor_set_t @var{processor_set}})
+The function @code{processor_set_destroy} destroys the specified
+processor set. Any assigned processors, tasks, or threads are
+reassigned to the default set. The object port for the processor set is
+required (not the name port). The default processor set cannot be
+destroyed.
+
+This function returns @code{KERN_SUCCESS} if the set was destroyed,
+@code{KERN_FAILURE} if an attempt was made to destroy the default
+processor set, or the operating system does not support processor
+allocation, and @code{KERN_INVALID_ARGUMENT} if @var{processor_set} is
+not a valid processor set control port.
+@end deftypefun
+
+
+@node Tasks and Threads on Sets
+@subsection Tasks and Threads on Sets
+
+@deftypefun kern_return_t processor_set_tasks (@w{processor_set_t @var{processor_set}}, @w{task_array_t *@var{task_list}}, @w{mach_msg_type_number_t *@var{task_count}})
+The function @code{processor_set_tasks} gets send rights to the kernel
+port for each task currently assigned to @var{processor_set}.
+
+@var{task_list} is an array that is created as a result of this call.
+The caller may wish to @code{vm_deallocate} this array when the data is
+no longer needed. @var{task_count} is set to the number of tasks in the
+@var{task_list}.
+
+This function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{processor_set} is not a processor
+set.
+@end deftypefun
+
+@deftypefun kern_return_t processor_set_threads (@w{processor_set_t @var{processor_set}}, @w{thread_array_t *@var{thread_list}}, @w{mach_msg_type_number_t *@var{thread_count}})
+The function @code{processor_set_thread} gets send rights to the kernel
+port for each thread currently assigned to @var{processor_set}.
+
+@var{thread_list} is an array that is created as a result of this call.
+The caller may wish to @code{vm_deallocate} this array when the data is
+no longer needed. @var{thread_count} is set to the number of threads in
+the @var{thread_list}.
+
+This function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{processor_set} is not a processor
+set.
+@end deftypefun
+
+@deftypefun kern_return_t task_assign (@w{task_t @var{task}}, @w{processor_set_t @var{processor_set}}, @w{boolean_t @var{assign_threads}})
+The function @code{task_assign} assigns @var{task} the set
+@var{processor_set}. This assignment is for the purposes of determining
+the initial assignment of newly created threads in task. Any previous
+assignment of the task is nullified. Existing threads within the task
+are also reassigned if @var{assign_threads} is @code{TRUE}. They are
+not affected if it is @code{FALSE}.
+
+This function returns @code{KERN_SUCCESS} if the assignment has been
+performed and @code{KERN_INVALID_ARGUMENT} if @var{task} is not a task,
+or @var{processor_set} is not a processor set on the same host as
+@var{task}.
+@end deftypefun
+
+@deftypefun kern_return_t task_assign_default (@w{task_t @var{task}}, @w{boolean_t @var{assign_threads}})
+The function @code{task_assign_default} is a variant of
+@code{task_assign} that assigns the task to the default processor set on
+that task's host. This variant exists because the control port for the
+default processor set is privileged and not usually available to users.
+
+This function returns @code{KERN_SUCCESS} if the assignment has been
+performed and @code{KERN_INVALID_ARGUMENT} if @var{task} is not a task.
+@end deftypefun
+
+@deftypefun kern_return_t task_get_assignment (@w{task_t @var{task}}, @w{processor_set_name_t *@var{assigned_set}})
+The function @code{task_get_assignment} returns the name of the
+processor set to which the thread is currently assigned in
+@var{assigned_set}. This port can only be used to obtain information
+about the processor set.
+
+This function returns @code{KERN_SUCCESS} if the assignment has been
+performed, @code{KERN_INVALID_ADDRESS} if @var{processor_set} points to
+inaccessible memory, and @code{KERN_INVALID_ARGUMENT} if @var{task} is
+not a task.
+@end deftypefun
+
+@deftypefun kern_return_t thread_assign (@w{thread_t @var{thread}}, @w{processor_set_t @var{processor_set}})
+The function @code{thread_assign} assigns @var{thread} the set
+@var{processor_set}. After the assignment is completed, the thread only
+executes on processors assigned to the designated processor set. If
+there are no such processors, then the thread is unable to execute. Any
+previous assignment of the thread is nullified. Unix system call
+compatibility code may temporarily force threads to execute on the
+master processor.
+
+This function returns @code{KERN_SUCCESS} if the assignment has been
+performed and @code{KERN_INVALID_ARGUMENT} if @var{thread} is not a
+thread, or @var{processor_set} is not a processor set on the same host
+as @var{thread}.
+@end deftypefun
+
+@deftypefun kern_return_t thread_assign_default (@w{thread_t @var{thread}})
+The function @code{thread_assign_default} is a variant of
+@code{thread_assign} that assigns the thread to the default processor
+set on that thread's host. This variant exists because the control port
+for the default processor set is privileged and not usually available
+to users.
+
+This function returns @code{KERN_SUCCESS} if the assignment has been
+performed and @code{KERN_INVALID_ARGUMENT} if @var{thread} is not a
+thread.
+@end deftypefun
+
+@deftypefun kern_return_t thread_get_assignment (@w{thread_t @var{thread}}, @w{processor_set_name_t *@var{assigned_set}})
+The function @code{thread_get_assignment} returns the name of the
+processor set to which the thread is currently assigned in
+@var{assigned_set}. This port can only be used to obtain information
+about the processor set.
+
+This function returns @code{KERN_SUCCESS} if the assignment has been
+performed, @code{KERN_INVALID_ADDRESS} if @var{processor_set} points to
+inaccessible memory, and @code{KERN_INVALID_ARGUMENT} if @var{thread} is
+not a thread.
+@end deftypefun
+
+
+@node Processor Set Priority
+@subsection Processor Set Priority
+
+@deftypefun kern_return_t processor_set_max_priority (@w{processor_set_t @var{processor_set}}, @w{int @var{max_priority}}, @w{boolean_t @var{change_threads}})
+The function @code{processor_set_max_priority} is used to set the
+maximum priority for a processor set. The priority of a processor set
+is used only for newly created threads (thread's maximum priority is set
+to processor set's) and the assignment of threads to the set (thread's
+maximum priority is reduced if it exceeds the set's maximum priority,
+thread's priority is similarly reduced).
+@code{processor_set_max_priority} changes this priority. It also sets
+the maximum priority of all threads assigned to the processor set to
+this new priority if @var{change_threads} is @code{TRUE}. If this
+maximum priority is less than the priorities of any of these threads,
+their priorities will also be set to this new value.
+
+This function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{processor_set} is not a processor
+set or @var{priority} is not a valid priority.
+@end deftypefun
+
+
+@node Processor Set Policy
+@subsection Processor Set Policy
+
+@deftypefun kern_return_t processor_set_policy_enable (@w{processor_set_t @var{processor_set}}, @w{int @var{policy}})
+@deftypefunx kern_return_t processor_set_policy_disable (@w{processor_set_t @var{processor_set}}, @w{int @var{policy}}, @w{boolean_t @var{change_threads}})
+Processor sets may restrict the scheduling policies to be used for
+threads assigned to them. These two calls provide the mechanism for
+designating permitted and forbidden policies. The current set of
+permitted policies can be obtained from @code{processor_set_info}.
+Timesharing may not be forbidden by any processor set. This is a
+compromise to reduce the complexity of the assign operation; any thread
+whose policy is forbidden by the target processor set has its policy
+reset to timesharing. If the @var{change_threads} argument to
+@code{processor_set_policy_disable} is true, threads currently assigned
+to this processor set and using the newly disabled policy will have
+their policy reset to timesharing.
+
+@file{mach/policy.h} contains the allowed policies; it is included by
+@file{mach.h}. Not all policies (e.g. fixed priority) are supported by
+all systems.
+
+This function returns @code{KERN_SUCCESS} if the operation was completed
+successfully and @code{KERN_INVALID_ARGUMENT} if @var{processor_set} is
+not a processor set or @var{policy} is not a valid policy, or an attempt
+was made to disable timesharing.
+@end deftypefun
+
+
+@node Processor Set Info
+@subsection Processor Set Info
+
+@deftypefun kern_return_t processor_set_info (@w{processor_set_name_t @var{set_name}}, @w{int @var{flavor}}, @w{host_t *@var{host}}, @w{processor_set_info_t @var{processor_set_info}}, @w{mach_msg_type_number_t *@var{processor_set_info_count}})
+The function @code{processor_set_info} returns the selected information array
+for a processor set, as specified by @var{flavor}.
+
+@var{host} is set to the host on which the processor set resides. This
+is the non-privileged host port.
+
+@var{processor_set_info} is an array of integers that is supplied by the
+caller and returned filled with specified information.
+@var{processor_set_info_count} is supplied as the maximum number of
+integers in @var{processor_set_info}. On return, it contains the actual
+number of integers in @var{processor_set_info}. The maximum number of
+integers returned by any flavor is @code{PROCESSOR_SET_INFO_MAX}.
+
+The type of information returned is defined by @var{flavor}, which can
+be one of the following:
+
+@table @code
+@item PROCESSOR_SET_BASIC_INFO
+The function returns basic information about the processor set, as
+defined by @code{processor_set_basic_info_t}. This includes the number
+of tasks and threads assigned to the processor set. The number of
+integers returned is @code{PROCESSOR_SET_BASIC_INFO_COUNT}.
+
+@item PROCESSOR_SET_SCHED_INFO
+The function returns information about the scheduling policy for the
+processor set as defined by @code{processor_set_sched_info_t}. The
+number of integers returned is @code{PROCESSOR_SET_SCHED_INFO_COUNT}.
+@end table
+
+Some machines may define additional (machine-dependent) flavors.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{processor_set} is not a processor
+set or @var{flavor} is not recognized. The function returns
+@code{MIG_ARRAY_TOO_LARGE} if the returned info array is too large for
+@var{processor_set_info}. In this case, @var{processor_set_info} is
+filled as much as possible and @var{processor_set_info_count} is set to the
+number of elements that would have been returned if there were enough
+room.
+@end deftypefun
+
+@deftp {Data type} {struct processor_set_basic_info}
+This structure is returned in @var{processor_set_info} by the
+@code{processor_set_info} function and provides basic information about
+the processor set. You can cast a variable of type
+@code{processor_set_info_t} to a pointer of this type if you provided it
+as the @var{processor_set_info} parameter for the
+@code{PROCESSOR_SET_BASIC_INFO} flavor of @code{processor_set_info}. It
+has the following members:
+
+@table @code
+@item int processor_count
+number of processors
+
+@item int task_count
+number of tasks
+
+@item int thread_count
+number of threads
+
+@item int load_average
+scaled load average
+
+@item int mach_factor
+scaled mach factor
+@end table
+@end deftp
+
+@deftp {Data type} processor_set_basic_info_t
+This is a pointer to a @code{struct processor_set_basic_info}.
+@end deftp
+
+@deftp {Data type} {struct processor_set_sched_info}
+This structure is returned in @var{processor_set_info} by the
+@code{processor_set_info} function and provides schedule information
+about the processor set. You can cast a variable of type
+@code{processor_set_info_t} to a pointer of this type if you provided it
+as the @var{processor_set_info} parameter for the
+@code{PROCESSOR_SET_SCHED_INFO} flavor of @code{processor_set_info}. It
+has the following members:
+
+@table @code
+@item int policies
+allowed policies
+
+@item int max_priority
+max priority for new threads
+@end table
+@end deftp
+
+@deftp {Data type} processor_set_sched_info_t
+This is a pointer to a @code{struct processor_set_sched_info}.
+@end deftp
+
+
+@node Processor Interface
+@section Processor Interface
+
+@cindex processor port
+@cindex port representing a processor
+@deftp {Data type} processor_t
+This is a @code{mach_port_t} and used to hold the port name of a
+processor port that represents the processor. Operations on the
+processor are implemented as remote procedure calls to the processor
+port.
+@end deftp
+
+@menu
+* Hosted Processors:: Getting a list of all processors on a host.
+* Processor Control:: Starting, stopping, controlling processors.
+* Processors and Sets:: Combining processors into processor sets.
+* Processor Info:: Obtaining information on processors.
+@end menu
+
+
+@node Hosted Processors
+@subsection Hosted Processors
+
+@deftypefun kern_return_t host_processors (@w{host_priv_t @var{host_priv}}, @w{processor_array_t *@var{processor_list}}, @w{mach_msg_type_number_t *@var{processor_count}})
+The function @code{host_processors} gets send rights to the processor
+port for each processor existing on @var{host_priv}. This is the
+privileged port that allows its holder to control a processor.
+
+@var{processor_list} is an array that is created as a result of this
+call. The caller may wish to @code{vm_deallocate} this array when the
+data is no longer needed. @var{processor_count} is set to the number of
+processors in the @var{processor_list}.
+
+This function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_ARGUMENT} if @var{host_priv} is not a privileged host
+port, and @code{KERN_INVALID_ADDRESS} if @var{processor_count} points to
+inaccessible memory.
+@end deftypefun
+
+
+@node Processor Control
+@subsection Processor Control
+
+@deftypefun kern_return_t processor_start (@w{processor_t @var{processor}})
+@deftypefunx kern_return_t processor_exit (@w{processor_t @var{processor}})
+@deftypefunx kern_return_t processor_control (@w{processor_t @var{processor}}, @w{processor_info_t *@var{cmd}}, @w{mach_msg_type_number_t @var{count}})
+Some multiprocessors may allow privileged software to control
+processors. The @code{processor_start}, @code{processor_exit}, and
+@code{processor_control} operations implement this. The interpretation
+of the command in @var{cmd} is machine dependent. A newly started
+processor is assigned to the default processor set. An exited processor
+is removed from the processor set to which it was assigned and ceases to
+be active.
+
+@var{count} contains the length of the command @var{cmd} as a number of
+ints.
+
+Availability limited. All of these operations are machine-dependent.
+They may do nothing. The ability to restart an exited processor is also
+machine-dependent.
+
+This function returns @code{KERN_SUCCESS} if the operation was
+performed, @code{KERN_FAILURE} if the operation was not performed (a
+likely reason is that it is not supported on this processor),
+@code{KERN_INVALID_ARGUMENT} if @var{processor} is not a processor, and
+@code{KERN_INVALID_ADDRESS} if @var{cmd} points to inaccessible memory.
+@end deftypefun
+
+@node Processors and Sets
+@subsection Processors and Sets
+
+@deftypefun kern_return_t processor_assign (@w{processor_t @var{processor}}, @w{processor_set_t @var{processor_set}}, @w{boolean_t @var{wait}})
+The function @code{processor_assign} assigns @var{processor} to the
+set @var{processor_set}. After the assignment is completed, the
+processor only executes threads that are assigned to that processor set.
+Any previous assignment of the processor is nullified. The master
+processor cannot be reassigned. All processors take clock interrupts at
+all times. The @var{wait} argument indicates whether the caller should
+wait for the assignment to be completed or should return immediately.
+Dedicated kernel threads are used to perform processor assignment, so
+setting wait to @code{FALSE} allows assignment requests to be queued and
+performed faster, especially if the kernel has more than one dedicated
+internal thread for processor assignment. Redirection of other device
+interrupts away from processors assigned to other than the default
+processor set is machine-dependent. Intermediaries that interpose on
+ports must be sure to interpose on both ports involved in this call if
+they interpose on either.
+
+This function returns @code{KERN_SUCCESS} if the assignment has been
+performed, @code{KERN_INVALID_ARGUMENT} if @var{processor} is not a
+processor, or @var{processor_set} is not a processor set on the same
+host as @var{processor}.
+@end deftypefun
+
+@deftypefun kern_return_t processor_get_assignment (@w{processor_t @var{processor}}, @w{processor_set_name_t *@var{assigned_set}})
+The function @code{processor_get_assignment} obtains the current
+assignment of a processor. The name port of the processor set is
+returned in @var{assigned_set}.
+@end deftypefun
+
+@node Processor Info
+@subsection Processor Info
+
+@deftypefun kern_return_t processor_info (@w{processor_t @var{processor}}, @w{int @var{flavor}}, @w{host_t *@var{host}}, @w{processor_info_t @var{processor_info}}, @w{mach_msg_type_number_t *@var{processor_info_count}})
+The function @code{processor_info} returns the selected information array
+for a processor, as specified by @var{flavor}.
+
+@var{host} is set to the host on which the processor set resides. This
+is the non-privileged host port.
+
+@var{processor_info} is an array of integers that is supplied by the
+caller and returned filled with specified information.
+@var{processor_info_count} is supplied as the maximum number of integers in
+@var{processor_info}. On return, it contains the actual number of
+integers in @var{processor_info}. The maximum number of integers
+returned by any flavor is @code{PROCESSOR_INFO_MAX}.
+
+The type of information returned is defined by @var{flavor}, which can
+be one of the following:
+
+@table @code
+@item PROCESSOR_BASIC_INFO
+The function returns basic information about the processor, as defined
+by @code{processor_basic_info_t}. This includes the slot number of the
+processor. The number of integers returned is
+@code{PROCESSOR_BASIC_INFO_COUNT}.
+@end table
+
+Machines which require more configuration information beyond the slot
+number are expected to define additional (machine-dependent) flavors.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded and
+@code{KERN_INVALID_ARGUMENT} if @var{processor} is not a processor or
+@var{flavor} is not recognized. The function returns
+@code{MIG_ARRAY_TOO_LARGE} if the returned info array is too large for
+@var{processor_info}. In this case, @var{processor_info} is filled as
+much as possible and @var{processor_infoCnt} is set to the number of
+elements that would have been returned if there were enough room.
+@end deftypefun
+
+@deftp {Data type} {struct processor_basic_info}
+This structure is returned in @var{processor_info} by the
+@code{processor_info} function and provides basic information about the
+processor. You can cast a variable of type @code{processor_info_t} to a
+pointer of this type if you provided it as the @var{processor_info}
+parameter for the @code{PROCESSOR_BASIC_INFO} flavor of
+@code{processor_info}. It has the following members:
+
+@table @code
+@item cpu_type_t cpu_type
+cpu type
+
+@item cpu_subtype_t cpu_subtype
+cpu subtype
+
+@item boolean_t running
+is processor running?
+
+@item int slot_num
+slot number
+
+@item boolean_t is_master
+is this the master processor
+@end table
+@end deftp
+
+@deftp {Data type} processor_basic_info_t
+This is a pointer to a @code{struct processor_basic_info}.
+@end deftp
+
+
+@node Device Interface
+@chapter Device Interface
+
+The GNU Mach microkernel provides a simple device interface that allows
+the user space programs to access the underlying hardware devices. Each
+device has a unique name, which is a string up to 127 characters long.
+To open a device, the device master port has to be supplied. The device
+master port is only available through the bootstrap port. Anyone who
+has control over the device master port can use all hardware devices.
+@c XXX FIXME bootstrap port, bootstrap
+
+@cindex device port
+@cindex port representing a device
+@deftp {Data type} device_t
+This is a @code{mach_port_t} and used to hold the port name of a
+device port that represents the device. Operations on the device are
+implemented as remote procedure calls to the device port. Each device
+provides a sequence of records. The length of a record is specific to
+the device. Data can be transferred ``out-of-line'' or ``in-line''
+(@pxref{Memory}).
+@end deftp
+
+All constants and functions in this chapter are defined in
+@file{device/device.h}.
+
+@menu
+* Device Reply Server:: Handling device reply messages.
+* Device Open:: Opening hardware devices.
+* Device Close:: Closing hardware devices.
+* Device Read:: Reading data from the device.
+* Device Write:: Writing data to the device.
+* Device Map:: Mapping devices into virtual memory.
+* Device Status:: Querying and manipulating a device.
+* Device Filter:: Filtering packets arriving on a device.
+* Device Interrupt:: Getting hardware interrupt notifications.
+@end menu
+
+
+@node Device Reply Server
+@section Device Reply Server
+
+Beside the usual synchronous interface, an asynchronous interface is
+provided. For this, the caller has to receive and handle the reply
+messages separately from the function call.
+
+@deftypefun boolean_t device_reply_server (@w{msg_header_t *@var{in_msg}}, @w{msg_header_t *@var{out_msg}})
+The function @code{device_reply_server} is produced by the
+remote procedure call generator to handle a received message. This
+function does all necessary argument handling, and actually calls one of
+the following functions: @code{ds_device_open_reply},
+@code{ds_device_read_reply}, @code{ds_device_read_reply_inband},
+@code{ds_device_write_reply} and @code{ds_device_write_reply_inband}.
+
+The @var{in_msg} argument is the message that has been received from the
+kernel. The @var{out_msg} is a reply message, but this is not used for
+this server.
+
+The function returns @code{TRUE} to indicate that the message in
+question was applicable to this interface, and that the appropriate
+routine was called to interpret the message. It returns @code{FALSE} to
+indicate that the message did not apply to this interface, and that no
+other action was taken.
+@end deftypefun
+
+
+@node Device Open
+@section Device Open
+
+@deftypefun kern_return_t device_open (@w{mach_port_t @var{master_port}}, @w{dev_mode_t @var{mode}}, @w{dev_name_t @var{name}}, @w{device_t *@var{device}})
+The function @code{device_open} opens the device @var{name} and returns
+a port to it in @var{device}. The open count for the device is
+incremented by one. If the open count was 0, the open handler for the
+device is invoked.
+
+@var{master_port} must hold the master device port. @var{name}
+specifies the device to open, and is a string up to 128 characters long.
+@var{mode} is the open mode. It is a bitwise-or of the following
+constants:
+
+@table @code
+@item D_READ
+Request read access for the device.
+
+@item D_WRITE
+Request write access for the device.
+
+@item D_NODELAY
+Do not delay an open.
+@c XXX Is this really used at all? Maybe for tape drives? What does it mean?
+@end table
+
+The function returns @code{D_SUCCESS} if the device was successfully
+opened, @code{D_INVALID_OPERATION} if @var{master_port} is not the
+master device port, @code{D_WOULD_BLOCK} is the device is busy and
+@code{D_NOWAIT} was specified in mode, @code{D_ALREADY_OPEN} if the
+device is already open in an incompatible mode and
+@code{D_NO_SUCH_DEVICE} if @var{name} does not denote a know device.
+@end deftypefun
+
+@deftypefun kern_return_t device_open_request (@w{mach_port_t @var{master_port}}, @w{mach_port_t @var{reply_port}}, @w{dev_mode_t @var{mode}}, @w{dev_name_t @var{name}})
+@deftypefunx kern_return_t ds_device_open_reply (@w{mach_port_t @var{reply_port}}, @w{kern_return_t @var{return}}, @w{device_t *@var{device}})
+This is the asynchronous form of the @code{device_open} function.
+@code{device_open_request} performs the open request. The meaning for
+the parameters is as in @code{device_open}. Additionally, the caller
+has to supply a reply port to which the @code{ds_device_open_reply}
+message is sent by the kernel when the open has been performed. The
+return value of the open operation is stored in @var{return_code}.
+
+As neither function receives a reply message, only message transmission
+errors apply. If no error occurs, @code{KERN_SUCCESS} is returned.
+@end deftypefun
+
+
+@node Device Close
+@section Device Close
+
+@deftypefun kern_return_t device_close (@w{device_t @var{device}})
+The function @code{device_close} decrements the open count of the device
+by one. If the open count drops to zero, the close handler for the
+device is called. The device to close is specified by its port
+@var{device}.
+
+The function returns @code{D_SUCCESS} if the device was successfully
+closed and @code{D_NO_SUCH_DEVICE} if @var{device} does not denote a
+device port.
+@end deftypefun
+
+
+@node Device Read
+@section Device Read
+
+@deftypefun kern_return_t device_read (@w{device_t @var{device}}, @w{dev_mode_t @var{mode}}, @w{recnum_t @var{recnum}}, @w{int @var{bytes_wanted}}, @w{io_buf_ptr_t *@var{data}}, @w{mach_msg_type_number_t *@var{data_count}})
+The function @code{device_read} reads @var{bytes_wanted} bytes from
+@var{device}, and stores them in a buffer allocated with
+@code{vm_allocate}, which address is returned in @var{data}. The caller
+must deallocated it if it is no longer needed. The number of bytes
+actually returned is stored in @var{data_count}.
+
+If @var{mode} is @code{D_NOWAIT}, the operation does not block.
+Otherwise @var{mode} should be 0. @var{recnum} is the record number to
+be read, its meaning is device specific.
+
+The function returns @code{D_SUCCESS} if some data was successfully
+read, @code{D_WOULD_BLOCK} if no data is currently available and
+@code{D_NOWAIT} is specified, and @code{D_NO_SUCH_DEVICE} if
+@var{device} does not denote a device port.
+@end deftypefun
+
+@deftypefun kern_return_t device_read_inband (@w{device_t @var{device}}, @w{dev_mode_t @var{mode}}, @w{recnum_t @var{recnum}}, @w{int @var{bytes_wanted}}, @w{io_buf_ptr_inband_t *@var{data}}, @w{mach_msg_type_number_t *@var{data_count}})
+The @code{device_read_inband} function works as the @code{device_read}
+function, except that the data is returned ``in-line'' in the reply IPC
+message (@pxref{Memory}).
+@end deftypefun
+
+@deftypefun kern_return_t device_read_request (@w{device_t @var{device}}, @w{mach_port_t @var{reply_port}}, @w{dev_mode_t @var{mode}}, @w{recnum_t @var{recnum}}, @w{int @var{bytes_wanted}})
+@deftypefunx kern_return_t ds_device_read_reply (@w{mach_port_t @var{reply_port}}, @w{kern_return_t @var{return_code}}, @w{io_buf_ptr_t @var{data}}, @w{mach_msg_type_number_t @var{data_count}})
+This is the asynchronous form of the @code{device_read} function.
+@code{device_read_request} performs the read request. The meaning for
+the parameters is as in @code{device_read}. Additionally, the caller
+has to supply a reply port to which the @code{ds_device_read_reply}
+message is sent by the kernel when the read has been performed. The
+return value of the read operation is stored in @var{return_code}.
+
+As neither function receives a reply message, only message transmission
+errors apply. If no error occurs, @code{KERN_SUCCESS} is returned.
+@end deftypefun
+
+@deftypefun kern_return_t device_read_request_inband (@w{device_t @var{device}}, @w{mach_port_t @var{reply_port}}, @w{dev_mode_t @var{mode}}, @w{recnum_t @var{recnum}}, @w{int @var{bytes_wanted}})
+@deftypefunx kern_return_t ds_device_read_reply_inband (@w{mach_port_t @var{reply_port}}, @w{kern_return_t @var{return_code}}, @w{io_buf_ptr_t @var{data}}, @w{mach_msg_type_number_t @var{data_count}})
+The @code{device_read_request_inband} and
+@code{ds_device_read_reply_inband} functions work as the
+@code{device_read_request} and @code{ds_device_read_reply} functions,
+except that the data is returned ``in-line'' in the reply IPC message
+(@pxref{Memory}).
+@end deftypefun
+
+
+@node Device Write
+@section Device Write
+
+@deftypefun kern_return_t device_write (@w{device_t @var{device}}, @w{dev_mode_t @var{mode}}, @w{recnum_t @var{recnum}}, @w{io_buf_ptr_t @var{data}}, @w{mach_msg_type_number_t @var{data_count}}, @w{int *@var{bytes_written}})
+The function @code{device_write} writes @var{data_count} bytes from the
+buffer @var{data} to @var{device}. The number of bytes actually written
+is returned in @var{bytes_written}.
+
+If @var{mode} is @code{D_NOWAIT}, the function returns without waiting
+for I/O completion. Otherwise @var{mode} should be 0. @var{recnum} is
+the record number to be written, its meaning is device specific.
+
+The function returns @code{D_SUCCESS} if some data was successfully
+written and @code{D_NO_SUCH_DEVICE} if @var{device} does not denote a
+device port or the device is dead or not completely open.
+@end deftypefun
+
+@deftypefun kern_return_t device_write_inband (@w{device_t @var{device}}, @w{dev_mode_t @var{mode}}, @w{recnum_t @var{recnum}}, @w{int @var{bytes_wanted}}, @w{io_buf_ptr_inband_t *@var{data}}, @w{mach_msg_type_number_t *@var{data_count}})
+The @code{device_write_inband} function works as the @code{device_write}
+function, except that the data is sent ``in-line'' in the request IPC
+message (@pxref{Memory}).
+@end deftypefun
+
+@deftypefun kern_return_t device_write_request (@w{device_t @var{device}}, @w{mach_port_t @var{reply_port}}, @w{dev_mode_t @var{mode}}, @w{recnum_t @var{recnum}}, @w{io_buf_ptr_t @var{data}}, @w{mach_msg_type_number_t @var{data_count}})
+@deftypefunx kern_return_t ds_device_write_reply (@w{mach_port_t @var{reply_port}}, @w{kern_return_t @var{return_code}}, @w{int @var{bytes_written}})
+This is the asynchronous form of the @code{device_write} function.
+@code{device_write_request} performs the write request. The meaning for
+the parameters is as in @code{device_write}. Additionally, the caller
+has to supply a reply port to which the @code{ds_device_write_reply}
+message is sent by the kernel when the write has been performed. The
+return value of the write operation is stored in @var{return_code}.
+
+As neither function receives a reply message, only message transmission
+errors apply. If no error occurs, @code{KERN_SUCCESS} is returned.
+@end deftypefun
+
+@deftypefun kern_return_t device_write_request_inband (@w{device_t @var{device}}, @w{mach_port_t @var{reply_port}}, @w{dev_mode_t @var{mode}}, @w{recnum_t @var{recnum}}, @w{io_buf_ptr_t @var{data}}, @w{mach_msg_type_number_t @var{data_count}})
+@deftypefunx kern_return_t ds_device_write_reply_inband (@w{mach_port_t @var{reply_port}}, @w{kern_return_t @var{return_code}}, @w{int @var{bytes_written}})
+The @code{device_write_request_inband} and
+@code{ds_device_write_reply_inband} functions work as the
+@code{device_write_request} and @code{ds_device_write_reply} functions,
+except that the data is sent ``in-line'' in the request IPC message
+(@pxref{Memory}).
+@end deftypefun
+
+
+@node Device Map
+@section Device Map
+
+@deftypefun kern_return_t device_map (@w{device_t @var{device}}, @w{vm_prot_t @var{prot}}, @w{vm_offset_t @var{offset}}, @w{vm_size_t @var{size}}, @w{mach_port_t *@var{pager}}, @w{int @var{unmap}})
+The function @code{device_map} creates a new memory manager for
+@var{device} and returns a port to it in @var{pager}. The memory
+manager is usable as a memory object in a @code{vm_map} call. The call
+is device dependent.
+
+The protection for the memory object is specified by @var{prot}. The
+memory object starts at @var{offset} within the device and extends
+@var{size} bytes. @var{unmap} is currently unused.
+@c XXX I suppose the caller should set it to 0.
+
+The function returns @code{D_SUCCESS} if some data was successfully
+written and @code{D_NO_SUCH_DEVICE} if @var{device} does not denote a
+device port or the device is dead or not completely open.
+@end deftypefun
+
+
+@node Device Status
+@section Device Status
+
+@deftypefun kern_return_t device_set_status (@w{device_t @var{device}}, @w{dev_flavor_t @var{flavor}}, @w{dev_status_t @var{status}}, @w{mach_msg_type_number_t @var{status_count}})
+The function @code{device_set_status} sets the status of a device. The
+possible values for @var{flavor} and their interpretation is device
+specific.
+
+The function returns @code{D_SUCCESS} if some data was successfully
+written and @code{D_NO_SUCH_DEVICE} if @var{device} does not denote a
+device port or the device is dead or not completely open.
+@end deftypefun
+
+@deftypefun kern_return_t device_get_status (@w{device_t @var{device}}, @w{dev_flavor_t @var{flavor}}, @w{dev_status_t @var{status}}, @w{mach_msg_type_number_t *@var{status_count}})
+The function @code{device_get_status} gets the status of a device. The
+possible values for @var{flavor} and their interpretation is device
+specific.
+
+The function returns @code{D_SUCCESS} if some data was successfully
+written and @code{D_NO_SUCH_DEVICE} if @var{device} does not denote a
+device port or the device is dead or not completely open.
+@end deftypefun
+
+
+@node Device Filter
+@section Device Filter
+
+@deftypefun kern_return_t device_set_filter (@w{device_t @var{device}}, @w{mach_port_t @var{receive_port}}, @w{mach_msg_type_name_t @var{receive_port_type}}, @w{int @var{priority}}, @w{filter_array_t @var{filter}}, @w{mach_msg_type_number_t @var{filter_count}})
+The function @code{device_set_filter} makes it possible to filter out
+selected data arriving at or leaving the device and forward it to a port.
+@var{filter} is a list of filter commands, which are applied to incoming
+data to determine if the data should be sent to @var{receive_port}. The
+IPC type of the send right is specified by @var{receive_port_right}, it
+is either @code{MACH_MSG_TYPE_MAKE_SEND} or
+@code{MACH_MSG_TYPE_MOVE_SEND}. The @var{priority} value is used to
+order multiple filters.
+
+There can be up to @code{NET_MAX_FILTER} commands in @var{filter}. The
+actual number of commands is passed in @var{filter_count}. For the
+purpose of the filter test, an internal stack is provided. After all
+commands have been processed, the value on the top of the stack
+determines if the data is forwarded or the next filter is tried.
+
+The first command is a header which contains two fields: one for flags
+and the other for the type of interpreter used to run the rest of the
+commands.
+
+Any combination of the following flags is allowed but at least one of them
+must be specified.
+
+@table @code
+@item NETF_IN
+The filter will be applied to data received by the device.
+
+@item NETF_OUT
+The filter will be applied to data transmitted by the device.
+@end table
+
+Unless the type is given explicitly the native NETF interpreter will be used.
+To select an alternative implementation use one of the following types:
+
+@table @code
+@item NETF_BPF
+Use Berkeley Packet Filter.
+@end table
+
+For the listener to know what kind of packet is being received, when the
+filter code accepts a packet the message sent to @var{receive_port} is
+tagged with either NETF_IN or NETF_OUT.
+
+@c XXX The following description was taken verbatim from the
+@c kernel_interface.pdf document.
+Each word of the command list specifies a data (push) operation (high
+order NETF_NBPO bits) as well as a binary operator (low order NETF_NBPA
+bits). The value to be pushed onto the stack is chosen as follows.
+
+@table @code
+@item NETF_PUSHLIT
+Use the next short word of the filter as the value.
+
+@item NETF_PUSHZERO
+Use 0 as the value.
+
+@item NETF_PUSHWORD+N
+Use short word N of the ``data'' portion of the message as the value.
+
+@item NETF_PUSHHDR+N
+Use short word N of the ``header'' portion of the message as the value.
+
+@item NETF_PUSHIND+N
+Pops the top long word from the stack and then uses short word N of the
+``data'' portion of the message as the value.
+
+@item NETF_PUSHHDRIND+N
+Pops the top long word from the stack and then uses short word N of the
+``header'' portion of the message as the value.
+
+@item NETF_PUSHSTK+N
+Use long word N of the stack (where the top of stack is long word 0) as
+the value.
+
+@item NETF_NOPUSH
+Don't push a value.
+@end table
+
+The unsigned value so chosen is promoted to a long word before being
+pushed. Once a value is pushed (except for the case of
+@code{NETF_NOPUSH}), the top two long words of the stack are popped and
+a binary operator applied to them (with the old top of stack as the
+second operand). The result of the operator is pushed on the stack.
+These operators are:
+
+@table @code
+@item NETF_NOP
+Don't pop off any values and do no operation.
+
+@item NETF_EQ
+Perform an equal comparison.
+
+@item NETF_LT
+Perform a less than comparison.
+
+@item NETF_LE
+Perform a less than or equal comparison.
+
+@item NETF_GT
+Perform a greater than comparison.
+
+@item NETF_GE
+Perform a greater than or equal comparison.
+
+@item NETF_AND
+Perform a bitise boolean AND operation.
+
+@item NETF_OR
+Perform a bitise boolean inclusive OR operation.
+
+@item NETF_XOR
+Perform a bitise boolean exclusive OR operation.
+
+@item NETF_NEQ
+Perform a not equal comparison.
+
+@item NETF_LSH
+Perform a left shift operation.
+
+@item NETF_RSH
+Perform a right shift operation.
+
+@item NETF_ADD
+Perform an addition.
+
+@item NETF_SUB
+Perform a subtraction.
+
+@item NETF_COR
+Perform an equal comparison. If the comparison is @code{TRUE}, terminate
+the filter list. Otherwise, pop the result of the comparison off the
+stack.
+
+@item NETF_CAND
+Perform an equal comparison. If the comparison is @code{FALSE},
+terminate the filter list. Otherwise, pop the result of the comparison
+off the stack.
+
+@item NETF_CNOR
+Perform a not equal comparison. If the comparison is @code{FALSE},
+terminate the filter list. Otherwise, pop the result of the comparison
+off the stack.
+
+@item NETF_CNAND
+Perform a not equal comparison. If the comparison is @code{TRUE},
+terminate the filter list. Otherwise, pop the result of the comparison
+off the stack. The scan of the filter list terminates when the filter
+list is emptied, or a @code{NETF_C...} operation terminates the list. At
+this time, if the final value of the top of the stack is @code{TRUE},
+then the message is accepted for the filter.
+@end table
+
+The function returns @code{D_SUCCESS} if some data was successfully
+written, @code{D_INVALID_OPERATION} if @var{receive_port} is not a valid
+send right, and @code{D_NO_SUCH_DEVICE} if @var{device} does not denote
+a device port or the device is dead or not completely open.
+@end deftypefun
+
+
+@node Device Interrupt
+@section Device Interrupt
+
+@deftypefun kern_return_t device_intr_register (@w{device_t @var{device}}, @w{int @var{id}}, @w{int @var{flags}}, @w{mach_port_t @var{receive_port}})
+The function @code{device_intr_register} registers for receiving hardware
+interrupt events through @var{device_intr_notify} notifications. The hardware
+interrupt identifier is specified by @var{id}. @var{flags} must be set to 0. The
+notifications will be sent on the @var{receive_port} send right.
+@code{device_intr_register} is only available on the dedicated @code{irq} device.
+@end deftypefun
+
+@deftypefun kern_return_t device_intr_ack (@w{device_t @var{device}}, @w{mach_port_t @var{receive_port}})
+On a hardware interrupt, the kernel disables the interrupt line before sending
+notifications. To prevent from interrupt losses, the interrupt is kept disabled
+until @code{device_intr_ack} is called to acknowledge the interrupt.
+@var{receive_port} is the send right on which the interrupt notification was
+received.
+@end deftypefun
+
+
+@node Kernel Debugger
+@chapter Kernel Debugger
+
+The GNU Mach kernel debugger @code{ddb} is a powerful built-in debugger
+with a gdb like syntax. It is enabled at compile time using the
+@option{--enable-kdb} option. Whenever you want to enter the debugger
+while running the kernel, you can press the key combination
+@key{Ctrl-Alt-D}.
+
+@menu
+* Operation:: Basic architecture of the kernel debugger.
+* Commands:: Available commands in the kernel debugger.
+* Variables:: Access of variables from the kernel debugger.
+* Expressions:: Usage of expressions in the kernel debugger.
+@end menu
+
+
+@node Operation
+@section Operation
+
+The current location is called @dfn{dot}. The dot is displayed with a
+hexadecimal format at a prompt. Examine and write commands update dot
+to the address of the last line examined or the last location modified,
+and set @dfn{next} to the address of the next location to be examined or
+changed. Other commands don't change dot, and set next to be the same
+as dot.
+
+The general command syntax is:
+
+@example
+@var{command}[/@var{modifier}] @var{address} [,@var{count}]
+@end example
+
+@kbd{!!} repeats the previous command, and a blank line repeats from the
+address next with count 1 and no modifiers. Specifying @var{address} sets
+dot to the address. Omitting @var{address} uses dot. A missing @var{count}
+is taken to be 1 for printing commands or infinity for stack traces.
+
+Current @code{ddb} is enhanced to support multi-thread debugging. A
+break point can be set only for a specific thread, and the address space
+or registers of non current thread can be examined or modified if
+supported by machine dependent routines. For example,
+
+@example
+break/t mach_msg_trap $task11.0
+@end example
+
+sets a break point at @code{mach_msg_trap} for the first thread of task
+11 listed by a @code{show all threads} command.
+
+In the above example, @code{$task11.0} is translated to the
+corresponding thread structure's address by variable translation
+mechanism described later. If a default target thread is set in a
+variable @code{$thread}, the @code{$task11.0} can be omitted. In
+general, if @code{t} is specified in a modifier of a command line, a
+specified thread or a default target thread is used as a target thread
+instead of the current one. The @code{t} modifier in a command line is
+not valid in evaluating expressions in a command line. If you want to
+get a value indirectly from a specific thread's address space or access
+to its registers within an expression, you have to specify a default
+target thread in advance, and to use @code{:t} modifier immediately
+after the indirect access or the register reference like as follows:
+
+@example
+set $thread $task11.0
+print $eax:t *(0x100):tuh
+@end example
+
+No sign extension and indirection @code{size(long, half word, byte)} can
+be specified with @code{u}, @code{l}, @code{h} and @code{b} respectively
+for the indirect access.
+
+Note: Support of non current space/register access and user space break
+point depend on the machines. If not supported, attempts of such
+operation may provide incorrect information or may cause strange
+behavior. Even if supported, the user space access is limited to the
+pages resident in the main memory at that time. If a target page is not
+in the main memory, an error will be reported.
+
+@code{ddb} has a feature like a command @code{more} for the output. If
+an output line exceeds the number set in the @code{$lines} variable, it
+displays @samp{--db_more--} and waits for a response. The valid
+responses for it are:
+
+@table @kbd
+@item @key{SPC}
+one more page
+
+@item @key{RET}
+one more line
+
+@item q
+abort the current command, and return to the command input mode
+@end table
+
+
+@node Commands
+@section Commands
+
+@table @code
+@item examine(x) [/@var{modifier}] @var{addr}[,@var{count}] [ @var{thread} ]
+Display the addressed locations according to the formats in the
+modifier. Multiple modifier formats display multiple locations. If no
+format is specified, the last formats specified for this command is
+used. Address space other than that of the current thread can be
+specified with @code{t} option in the modifier and @var{thread}
+parameter. The format characters are
+
+@table @code
+@item b
+look at by bytes(8 bits)
+
+@item h
+look at by half words(16 bits)
+
+@item l
+look at by long words(32 bits)
+
+@item q
+look at by quad words(64 bits)
+
+@item a
+print the location being displayed
+
+@item ,
+skip one unit producing no output
+
+@item A
+print the location with a line number if possible
+
+@item x
+display in unsigned hex
+
+@item z
+display in signed hex
+
+@item o
+display in unsigned octal
+
+@item d
+display in signed decimal
+
+@item u
+display in unsigned decimal
+
+@item r
+display in current radix, signed
+
+@item c
+display low 8 bits as a character. Non-printing characters are
+displayed as an octal escape code (e.g. '\000').
+
+@item s
+display the null-terminated string at the location. Non-printing
+characters are displayed as octal escapes.
+
+@item m
+display in unsigned hex with character dump at the end of each line.
+The location is also displayed in hex at the beginning of each line.
+
+@item i
+display as an instruction
+
+@item I
+display as an instruction with possible alternate formats depending on
+the machine:
+
+@table @code
+@item vax
+don't assume that each external label is a procedure entry mask
+
+@item i386
+don't round to the next long word boundary
+
+@item mips
+print register contents
+@end table
+@end table
+
+@item xf
+Examine forward. It executes an examine command with the last specified
+parameters to it except that the next address displayed by it is used as
+the start address.
+
+@item xb
+Examine backward. It executes an examine command with the last
+specified parameters to it except that the last start address subtracted
+by the size displayed by it is used as the start address.
+
+@item whatis @var{addr}
+Try to find what this address is. This looks up in the various tasks, threads,
+maps, caches etc. to give an idea what is behind this address.
+
+@item print[/axzodurc] @var{addr1} [ @var{addr2} @dots{} ]
+Print @var{addr}'s according to the modifier character. Valid formats
+are: @code{a} @code{x} @code{z} @code{o} @code{d} @code{u} @code{r}
+@code{c}. If no modifier is specified, the last one specified to it is
+used. @var{addr} can be a string, and it is printed as it is. For
+example,
+
+@example
+print/x "eax = " $eax "\necx = " $ecx "\n"
+@end example
+
+will print like
+
+@example
+eax = xxxxxx
+ecx = yyyyyy
+@end example
+
+@item write[/bhlt] @var{addr} [ @var{thread} ] @var{expr1} [ @var{expr2} @dots{} ]
+Write the expressions at succeeding locations. The write unit size can
+be specified in the modifier with a letter b (byte), h (half word) or
+l(long word) respectively. If omitted, long word is assumed. Target
+address space can also be specified with @code{t} option in the modifier
+and @var{thread} parameter. Warning: since there is no delimiter
+between expressions, strange things may happen. It's best to enclose
+each expression in parentheses.
+
+@item set $@var{variable} [=] @var{expr}
+Set the named variable or register with the value of @var{expr}. Valid
+variable names are described below.
+
+@item break[/tuTU] @var{addr}[,@var{count}] [ @var{thread1} @dots{} ]
+Set a break point at @var{addr}. If count is supplied, continues
+(@var{count}-1) times before stopping at the break point. If the break
+point is set, a break point number is printed with @samp{#}. This
+number can be used in deleting the break point or adding conditions to
+it.
+
+@table @code
+@item t
+Set a break point only for a specific thread. The thread is specified
+by @var{thread} parameter, or default one is used if the parameter is
+omitted.
+
+@item u
+Set a break point in user space address. It may be combined with
+@code{t} or @code{T} option to specify the non-current target user
+space. Without @code{u} option, the address is considered in the kernel
+space, and wrong space address is rejected with an error message. This
+option can be used only if it is supported by machine dependent
+routines.
+
+@item T
+Set a break point only for threads in a specific task. It is like
+@code{t} option except that the break point is valid for all threads
+which belong to the same task as the specified target thread.
+
+@item U
+Set a break point in shared user space address. It is like @code{u}
+option, except that the break point is valid for all threads which share
+the same address space even if @code{t} option is specified. @code{t}
+option is used only to specify the target shared space. Without
+@code{t} option, @code{u} and @code{U} have the same meanings. @code{U}
+is useful for setting a user space break point in non-current address
+space with @code{t} option such as in an emulation library space. This
+option can be used only if it is supported by machine dependent
+routines.
+@end table
+
+Warning: if a user text is shadowed by a normal user space debugger,
+user space break points may not work correctly. Setting a break point
+at the low-level code paths may also cause strange behavior.
+
+@item delete[/tuTU] @var{addr}|#@var{number} [ @var{thread1} @dots{} ]
+Delete the break point. The target break point can be specified by a
+break point number with @code{#}, or by @var{addr} like specified in
+@code{break} command.
+
+@item cond #@var{number} [ @var{condition} @var{commands} ]
+Set or delete a condition for the break point specified by the
+@var{number}. If the @var{condition} and @var{commands} are null, the
+condition is deleted. Otherwise the condition is set for it. When the
+break point is hit, the @var{condition} is evaluated. The
+@var{commands} will be executed if the condition is true and the break
+point count set by a break point command becomes zero. @var{commands}
+is a list of commands separated by semicolons. Each command in the list
+is executed in that order, but if a @code{continue} command is executed,
+the command execution stops there, and the stopped thread resumes
+execution. If the command execution reaches the end of the list, and it
+enters into a command input mode. For example,
+
+@example
+set $work0 0
+break/Tu xxx_start $task7.0
+cond #1 (1) set $work0 1; set $work1 0; cont
+break/T vm_fault $task7.0
+cond #2 ($work0) set $work1 ($work1+1); cont
+break/Tu xxx_end $task7.0
+cond #3 ($work0) print $work1 " faults\n"; set $work0 0
+cont
+@end example
+
+will print page fault counts from @code{xxx_start} to @code{xxx_end} in
+@code{task7}.
+
+@item step[/p] [,@var{count}]
+Single step @var{count} times. If @code{p} option is specified, print
+each instruction at each step. Otherwise, only print the last
+instruction.
+
+Warning: depending on machine type, it may not be possible to
+single-step through some low-level code paths or user space code. On
+machines with software-emulated single-stepping (e.g., pmax), stepping
+through code executed by interrupt handlers will probably do the wrong
+thing.
+
+@item continue[/c]
+Continue execution until a breakpoint or watchpoint. If @code{/c},
+count instructions while executing. Some machines (e.g., pmax) also
+count loads and stores.
+
+Warning: when counting, the debugger is really silently single-stepping.
+This means that single-stepping on low-level code may cause strange
+behavior.
+
+@item until
+Stop at the next call or return instruction.
+
+@item next[/p]
+Stop at the matching return instruction. If @code{p} option is
+specified, print the call nesting depth and the cumulative instruction
+count at each call or return. Otherwise, only print when the matching
+return is hit.
+
+@item match[/p]
+A synonym for @code{next}.
+
+@item trace[/tu] [ @var{frame_addr}|@var{thread} ][,@var{count}]
+Stack trace. @code{u} option traces user space; if omitted, only traces
+kernel space. If @code{t} option is specified, it shows the stack trace
+of the specified thread or a default target thread. Otherwise, it shows
+the stack trace of the current thread from the frame address specified
+by a parameter or from the current frame. @var{count} is the number of
+frames to be traced. If the @var{count} is omitted, all frames are
+printed.
+
+Warning: If the target thread's stack is not in the main memory at that
+time, the stack trace will fail. User space stack trace is valid only
+if the machine dependent code supports it.
+
+@item search[/bhl] @var{addr} @var{value} [@var{mask}] [,@var{count}]
+Search memory for a value. This command might fail in interesting ways
+if it doesn't find the searched-for value. This is because @code{ddb}
+doesn't always recover from touching bad memory. The optional count
+argument limits the search.
+
+@item macro @var{name} @var{commands}
+Define a debugger macro as @var{name}. @var{commands} is a list of
+commands to be associated with the macro. In the expressions of the
+command list, a variable @code{$argxx} can be used to get a parameter
+passed to the macro. When a macro is called, each argument is evaluated
+as an expression, and the value is assigned to each parameter,
+@code{$arg1}, @code{$arg2}, @dots{} respectively. 10 @code{$arg}
+variables are reserved to each level of macros, and they can be used as
+local variables. The nesting of macro can be allowed up to 5 levels.
+For example,
+
+@example
+macro xinit set $work0 $arg1
+macro xlist examine/m $work0,4; set $work0 *($work0)
+xinit *(xxx_list)
+xlist
+@enddots{}
+@end example
+
+will print the contents of a list starting from @code{xxx_list} by each
+@code{xlist} command.
+
+@item dmacro @var{name}
+Delete the macro named @var{name}.
+
+@item show all threads[/uls]
+Display all tasks and threads information. This version of @code{ddb}
+prints more information than previous one. It shows UNIX process
+information like @command{ps} for each task. The UNIX process
+information may not be shown if it is not supported in the machine, or
+the bottom of the stack of the target task is not in the main memory at
+that time. It also shows task and thread identification numbers. These
+numbers can be used to specify a task or a thread symbolically in
+various commands. The numbers are valid only in the same debugger
+session. If the execution is resumed again, the numbers may change.
+The current thread can be distinguished from others by a @code{#} after
+the thread id instead of @code{:}. Without @code{l} option, it only
+shows thread id, thread structure address and the status for each
+thread. The status consists of 6 letters, R(run), W(wait), S(suspended),
+O(swapped out), N(interruptible), and F(loating) point arithmetic used (if
+supported by the platform). If the corresponding
+status bit is off, @code{.} is printed instead. If @code{l} option is
+specified, more detail information is printed for each thread. If the
+@code{s} option is given, scheduling information is displayed.
+
+@item show all tasks
+Displays all tasks similar to @code{show all threads}, but omits
+information about the individual threads.
+
+@item show task [ @var{addr} ]
+Display the information of a task specified by @var{addr}. If
+@var{addr} is omitted, current task information is displayed.
+
+@code{show task $taskxx} can notably be used to show task number
+@var{xx}
+
+@item show thread [ @var{addr} ]
+Display the information of a thread specified by @var{addr}. If
+@var{addr} is omitted, current thread information is displayed.
+
+@code{show thread $taskxx.yy} can notably be used to show thread
+number @var{yy} of task number @var{xx}.
+
+@item show registers[/tu [ @var{thread} ]]
+Display the register set. Target thread can be specified with @code{t}
+option and @var{thread} parameter. If @code{u} option is specified, it
+displays user registers instead of kernel or currently saved one.
+
+Warning: The support of @code{t} and @code{u} option depends on the
+machine. If not supported, incorrect information will be displayed.
+
+@item show map @var{addr}
+Prints the @code{vm_map} at @var{addr}.
+
+@code{show map $mapxx} can notably be used to show the map of task
+number @var{xx}.
+
+@item show object @var{addr}
+Prints the @code{vm_object} at @var{addr}.
+
+@item show page @var{addr}
+Prints the @code{vm_page} structure at @var{addr}.
+
+@item show port @var{addr}
+Prints the @code{ipc_port} structure at @var{addr}.
+
+@item show ipc_port[/t [ @var{thread} ]]
+Prints all @code{ipc_port} structure's addresses the target thread has.
+The target thread is a current thread or that specified by a parameter.
+
+@item show macro [ @var{name} ]
+Show the definitions of macros. If @var{name} is specified, only the
+definition of it is displayed. Otherwise, definitions of all macros are
+displayed.
+
+@item show watches
+Displays all watchpoints.
+
+@item watch[/T] @var{addr},@var{size} [ @var{task} ]
+Set a watchpoint for a region. Execution stops when an attempt to
+modify the region occurs. The @var{size} argument defaults to 4.
+Without @code{T} option, @var{addr} is assumed to be a kernel address.
+If you want to set a watch point in user space, specify @code{T} and
+@var{task} parameter where the address belongs to. If the @var{task}
+parameter is omitted, a task of the default target thread or a current
+task is assumed. If you specify a wrong space address, the request is
+rejected with an error message.
+
+Warning: Attempts to watch wired kernel memory may cause unrecoverable
+error in some systems such as i386. Watchpoints on user addresses work
+best.
+
+@item dwatch[/T] @var{addr} [ @var{task} ]
+Clears a watchpoint previously set for a region.
+Without @code{T} option, @var{addr} is assumed to be a kernel address.
+If you want to clear a watch point in user space, specify @code{T} and
+@var{task} parameter where the address belongs to. If the @var{task}
+parameter is omitted, a task of the default target thread or a current
+task is assumed. If you specify a wrong space address, the request is
+rejected with an error message.
+
+@item debug traps /on|/off
+Enables or disables debugging of all traps with @code{ddb}.
+
+@item debug references /on|/off
+Enables or disables debugging of all port reference counting errors
+with @code{ddb}.
+
+@end table
+
+
+@node Variables
+@section Variables
+
+The debugger accesses registers and variables as $@var{name}. Register
+names are as in the @code{show registers} command. Some variables are
+suffixed with numbers, and may have some modifier following a colon
+immediately after the variable name. For example, register variables
+can have @code{u} and @code{t} modifier to indicate user register and
+that of a default target thread instead of that of the current thread
+(e.g. @code{$eax:tu}).
+
+Built-in variables currently supported are:
+
+@table @code
+@item task@var{xx}[.@var{yy}]
+Task or thread structure address. @var{xx} and @var{yy} are task and
+thread identification numbers printed by a @code{show all threads}
+command respectively. This variable is read only.
+
+@item map@var{xx}
+VM map structure address. @var{xx} is a task identification number
+printed by a @code{show all tasks} command. This variable is read
+only.
+
+@item thread
+The default target thread. The value is used when @code{t} option is
+specified without explicit thread structure address parameter in command
+lines or expression evaluation.
+
+@item radix
+Input and output radix
+
+@item maxoff
+Addresses are printed as @var{symbol}+@var{offset} unless offset is greater than
+maxoff.
+
+@item maxwidth
+The width of the displayed line.
+
+@item lines
+The number of lines. It is used by @code{more} feature.
+
+@item tabstops
+Tab stop width.
+
+@item arg@var{xx}
+Parameters passed to a macro. @var{xx} can be 1 to 10.
+
+@item work@var{xx}
+Work variable. @var{xx} can be 0 to 31.
+@end table
+
+
+@node Expressions
+@section Expressions
+
+Almost all expression operators in C are supported except @code{~},
+@code{^}, and unary @code{&}. Special rules in @code{ddb} are:
+
+@table @code
+@item @var{identifier}
+name of a symbol. It is translated to the address(or value) of it.
+@code{.} and @code{:} can be used in the identifier. If supported by
+an object format dependent routine,
+[@var{file_name}:]@var{func}[:@var{line_number}]
+[@var{file_name}:]@var{variable}, and
+@var{file_name}[:@var{line_number}] can be accepted as a symbol. The
+symbol may be prefixed with @code{@var{symbol_table_name}::} like
+@code{emulator::mach_msg_trap} to specify other than kernel symbols.
+
+@item @var{number}
+radix is determined by the first two letters:
+@table @code
+@item 0x
+hex
+@item 0o
+octal
+@item 0t
+decimal
+@end table
+
+otherwise, follow current radix.
+
+@item .
+dot
+
+@item +
+next
+
+@item ..
+address of the start of the last line examined. Unlike dot or next,
+this is only changed by @code{examine} or @code{write} command.
+
+@item ´
+last address explicitly specified.
+
+@item $@var{variable}
+register name or variable. It is translated to the value of it. It may
+be followed by a @code{:} and modifiers as described above.
+
+@item a
+multiple of right hand side.
+
+@item *@var{expr}
+indirection. It may be followed by a @code{:} and modifiers as
+described above.
+@end table
+
+@include gpl.texi
+
+@node Documentation License
+@appendix Documentation License
+
+This manual is copyrighted and licensed under the GNU Free Documentation
+license.
+
+Parts of this manual are derived from the Mach manual packages
+originally provided by Carnegie Mellon University.
+
+@menu
+* GNU Free Documentation License:: The GNU Free Documentation License.
+* CMU License:: The CMU license applies to the original Mach
+ kernel and its documentation.
+@end menu
+
+@include fdl.texi
+
+@node CMU License
+@appendixsec CMU License
+
+@quotation
+@display
+Mach Operating System
+Copyright @copyright{} 1991,1990,1989 Carnegie Mellon University
+All Rights Reserved.
+@end display
+
+Permission to use, copy, modify and distribute this software and its
+documentation is hereby granted, provided that both the copyright
+notice and this permission notice appear in all copies of the
+software, derivative works or modified versions, and any portions
+thereof, and that both notices appear in supporting documentation.
+
+@sc{carnegie mellon allows free use of this software in its ``as is''
+condition. carnegie mellon disclaims any liability of any kind for
+any damages whatsoever resulting from the use of this software.}
+
+Carnegie Mellon requests users of this software to return to
+
+@display
+ Software Distribution Coordinator
+ School of Computer Science
+ Carnegie Mellon University
+ Pittsburgh PA 15213-3890
+@end display
+
+@noindent
+or @email{Software.Distribution@@CS.CMU.EDU} any improvements or
+extensions that they make and grant Carnegie Mellon the rights to
+redistribute these changes.
+@end quotation
+
+@node Concept Index
+@unnumbered Concept Index
+
+@printindex cp
+
+
+@node Function and Data Index
+@unnumbered Function and Data Index
+
+@printindex fn
+
+
+@summarycontents
+@contents
+@bye
diff --git a/gensym.awk b/gensym.awk
new file mode 100644
index 0000000..609d927
--- /dev/null
+++ b/gensym.awk
@@ -0,0 +1,78 @@
+#
+# Copyright (c) 1994 The University of Utah and
+# the Computer Systems Laboratory (CSL). All rights reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+# IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+# ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# CSL requests users of this software to return to csl-dist@cs.utah.edu any
+# improvements that they make and grant CSL redistribution rights.
+#
+# Author: Bryan Ford, University of Utah CSL
+#
+
+BEGIN {
+ bogus_printed = "no"
+}
+
+# Start the bogus function just before the first sym directive,
+# so that any #includes higher in the file don't get stuffed inside it.
+/^[a-z]/ {
+ if (bogus_printed == "no")
+ {
+ print "void bogus(void);"
+ print "void bogus(void) {";
+ bogus_printed = "yes";
+ }
+}
+
+# Take an arbitrarily complex C symbol or expression and constantize it.
+/^expr/ {
+ print "__asm (\"\\n\\";
+ if ($3 == "")
+ printf "* %s mAgIc%%0\" : : \"i\" (%s));\n", $2, $2;
+ else
+ printf "* %s mAgIc%%0\" : : \"i\" (%s));\n", $3, $2;
+}
+
+# Output a symbol defining the size of a C structure.
+/^size/ {
+ print "__asm (\"\\n\\";
+ if ($4 == "")
+ printf "* %s_SIZE mAgIc%%0\" : : \"i\" (sizeof(struct %s)));\n",
+ toupper($3), $2;
+ else
+ printf "* %s mAgIc%%0\" : : \"i\" (sizeof(struct %s)));\n",
+ $4, $2;
+}
+
+# Output a symbol defining the byte offset of an element of a C structure.
+/^offset/ {
+ print "__asm (\"\\n\\";
+ if ($5 == "")
+ {
+ printf "* %s_%s mAgIc%%0\" : : \"i\" (&((struct %s*)0)->%s));\n",
+ toupper($3), toupper($4), $2, $4;
+ }
+ else
+ {
+ printf "* %s mAgIc%%0\" : : \"i\" (&((struct %s*)0)->%s));\n",
+ toupper($5), $2, $4;
+ }
+}
+
+# Copy through all preprocessor directives.
+/^#/ {
+ print
+}
+
+END {
+ print "}"
+}
diff --git a/gitlog-to-changelog b/gitlog-to-changelog
new file mode 100755
index 0000000..e02d34c
--- /dev/null
+++ b/gitlog-to-changelog
@@ -0,0 +1,432 @@
+eval '(exit $?0)' && eval 'exec perl -wS "$0" ${1+"$@"}'
+ & eval 'exec perl -wS "$0" $argv:q'
+ if 0;
+# Convert git log output to ChangeLog format.
+
+my $VERSION = '2012-07-29 06:11'; # UTC
+# The definition above must lie within the first 8 lines in order
+# for the Emacs time-stamp write hook (at end) to update it.
+# If you change this file with Emacs, please let the write hook
+# do its job. Otherwise, update this string manually.
+
+# Copyright (C) 2008-2013 Free Software Foundation, Inc.
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Written by Jim Meyering
+
+use strict;
+use warnings;
+use Getopt::Long;
+use POSIX qw(strftime);
+
+(my $ME = $0) =~ s|.*/||;
+
+# use File::Coda; # http://meyering.net/code/Coda/
+END {
+ defined fileno STDOUT or return;
+ close STDOUT and return;
+ warn "$ME: failed to close standard output: $!\n";
+ $? ||= 1;
+}
+
+sub usage ($)
+{
+ my ($exit_code) = @_;
+ my $STREAM = ($exit_code == 0 ? *STDOUT : *STDERR);
+ if ($exit_code != 0)
+ {
+ print $STREAM "Try '$ME --help' for more information.\n";
+ }
+ else
+ {
+ print $STREAM <<EOF;
+Usage: $ME [OPTIONS] [ARGS]
+
+Convert git log output to ChangeLog format. If present, any ARGS
+are passed to "git log". To avoid ARGS being parsed as options to
+$ME, they may be preceded by '--'.
+
+OPTIONS:
+
+ --amend=FILE FILE maps from an SHA1 to perl code (i.e., s/old/new/) that
+ makes a change to SHA1's commit log text or metadata.
+ --append-dot append a dot to the first line of each commit message if
+ there is no other punctuation or blank at the end.
+ --no-cluster never cluster commit messages under the same date/author
+ header; the default is to cluster adjacent commit messages
+ if their headers are the same and neither commit message
+ contains multiple paragraphs.
+ --srcdir=DIR the root of the source tree, from which the .git/
+ directory can be derived.
+ --since=DATE convert only the logs since DATE;
+ the default is to convert all log entries.
+ --format=FMT set format string for commit subject and body;
+ see 'man git-log' for the list of format metacharacters;
+ the default is '%s%n%b%n'
+ --strip-tab remove one additional leading TAB from commit message lines.
+ --strip-cherry-pick remove data inserted by "git cherry-pick";
+ this includes the "cherry picked from commit ..." line,
+ and the possible final "Conflicts:" paragraph.
+ --help display this help and exit
+ --version output version information and exit
+
+EXAMPLE:
+
+ $ME --since=2008-01-01 > ChangeLog
+ $ME -- -n 5 foo > last-5-commits-to-branch-foo
+
+SPECIAL SYNTAX:
+
+The following types of strings are interpreted specially when they appear
+at the beginning of a log message line. They are not copied to the output.
+
+ Copyright-paperwork-exempt: Yes
+ Append the "(tiny change)" notation to the usual "date name email"
+ ChangeLog header to mark a change that does not require a copyright
+ assignment.
+ Co-authored-by: Joe User <user\@example.com>
+ List the specified name and email address on a second
+ ChangeLog header, denoting a co-author.
+ Signed-off-by: Joe User <user\@example.com>
+ These lines are simply elided.
+
+In a FILE specified via --amend, comment lines (starting with "#") are ignored.
+FILE must consist of <SHA,CODE+> pairs where SHA is a 40-byte SHA1 (alone on
+a line) referring to a commit in the current project, and CODE refers to one
+or more consecutive lines of Perl code. Pairs must be separated by one or
+more blank line.
+
+Here is sample input for use with --amend=FILE, from coreutils:
+
+3a169f4c5d9159283548178668d2fae6fced3030
+# fix typo in title:
+s/all tile types/all file types/
+
+1379ed974f1fa39b12e2ffab18b3f7a607082202
+# Due to a bug in vc-dwim, I mis-attributed a patch by Paul to myself.
+# Change the author to be Paul. Note the escaped "@":
+s,Jim .*>,Paul Eggert <eggert\\\@cs.ucla.edu>,
+
+EOF
+ }
+ exit $exit_code;
+}
+
+# If the string $S is a well-behaved file name, simply return it.
+# If it contains white space, quotes, etc., quote it, and return the new string.
+sub shell_quote($)
+{
+ my ($s) = @_;
+ if ($s =~ m![^\w+/.,-]!)
+ {
+ # Convert each single quote to '\''
+ $s =~ s/\'/\'\\\'\'/g;
+ # Then single quote the string.
+ $s = "'$s'";
+ }
+ return $s;
+}
+
+sub quoted_cmd(@)
+{
+ return join (' ', map {shell_quote $_} @_);
+}
+
+# Parse file F.
+# Comment lines (starting with "#") are ignored.
+# F must consist of <SHA,CODE+> pairs where SHA is a 40-byte SHA1
+# (alone on a line) referring to a commit in the current project, and
+# CODE refers to one or more consecutive lines of Perl code.
+# Pairs must be separated by one or more blank line.
+sub parse_amend_file($)
+{
+ my ($f) = @_;
+
+ open F, '<', $f
+ or die "$ME: $f: failed to open for reading: $!\n";
+
+ my $fail;
+ my $h = {};
+ my $in_code = 0;
+ my $sha;
+ while (defined (my $line = <F>))
+ {
+ $line =~ /^\#/
+ and next;
+ chomp $line;
+ $line eq ''
+ and $in_code = 0, next;
+
+ if (!$in_code)
+ {
+ $line =~ /^([0-9a-fA-F]{40})$/
+ or (warn "$ME: $f:$.: invalid line; expected an SHA1\n"),
+ $fail = 1, next;
+ $sha = lc $1;
+ $in_code = 1;
+ exists $h->{$sha}
+ and (warn "$ME: $f:$.: duplicate SHA1\n"),
+ $fail = 1, next;
+ }
+ else
+ {
+ $h->{$sha} ||= '';
+ $h->{$sha} .= "$line\n";
+ }
+ }
+ close F;
+
+ $fail
+ and exit 1;
+
+ return $h;
+}
+
+# git_dir_option $SRCDIR
+#
+# From $SRCDIR, the --git-dir option to pass to git (none if $SRCDIR
+# is undef). Return as a list (0 or 1 element).
+sub git_dir_option($)
+{
+ my ($srcdir) = @_;
+ my @res = ();
+ if (defined $srcdir)
+ {
+ my $qdir = shell_quote $srcdir;
+ my $cmd = "cd $qdir && git rev-parse --show-toplevel";
+ my $qcmd = shell_quote $cmd;
+ my $git_dir = qx($cmd);
+ defined $git_dir
+ or die "$ME: cannot run $qcmd: $!\n";
+ $? == 0
+ or die "$ME: $qcmd had unexpected exit code or signal ($?)\n";
+ chomp $git_dir;
+ push @res, "--git-dir=$git_dir/.git";
+ }
+ @res;
+}
+
+{
+ my $since_date;
+ my $format_string = '%s%n%b%n';
+ my $amend_file;
+ my $append_dot = 0;
+ my $cluster = 1;
+ my $strip_tab = 0;
+ my $strip_cherry_pick = 0;
+ my $srcdir;
+ GetOptions
+ (
+ help => sub { usage 0 },
+ version => sub { print "$ME version $VERSION\n"; exit },
+ 'since=s' => \$since_date,
+ 'format=s' => \$format_string,
+ 'amend=s' => \$amend_file,
+ 'append-dot' => \$append_dot,
+ 'cluster!' => \$cluster,
+ 'strip-tab' => \$strip_tab,
+ 'strip-cherry-pick' => \$strip_cherry_pick,
+ 'srcdir=s' => \$srcdir,
+ ) or usage 1;
+
+ defined $since_date
+ and unshift @ARGV, "--since=$since_date";
+
+ # This is a hash that maps an SHA1 to perl code (i.e., s/old/new/)
+ # that makes a correction in the log or attribution of that commit.
+ my $amend_code = defined $amend_file ? parse_amend_file $amend_file : {};
+
+ my @cmd = ('git',
+ git_dir_option $srcdir,
+ qw(log --log-size),
+ '--pretty=format:%H:%ct %an <%ae>%n%n'.$format_string, @ARGV);
+ open PIPE, '-|', @cmd
+ or die ("$ME: failed to run '". quoted_cmd (@cmd) ."': $!\n"
+ . "(Is your Git too old? Version 1.5.1 or later is required.)\n");
+
+ my $prev_multi_paragraph;
+ my $prev_date_line = '';
+ my @prev_coauthors = ();
+ while (1)
+ {
+ defined (my $in = <PIPE>)
+ or last;
+ $in =~ /^log size (\d+)$/
+ or die "$ME:$.: Invalid line (expected log size):\n$in";
+ my $log_nbytes = $1;
+
+ my $log;
+ my $n_read = read PIPE, $log, $log_nbytes;
+ $n_read == $log_nbytes
+ or die "$ME:$.: unexpected EOF\n";
+
+ # Extract leading hash.
+ my ($sha, $rest) = split ':', $log, 2;
+ defined $sha
+ or die "$ME:$.: malformed log entry\n";
+ $sha =~ /^[0-9a-fA-F]{40}$/
+ or die "$ME:$.: invalid SHA1: $sha\n";
+
+ # If this commit's log requires any transformation, do it now.
+ my $code = $amend_code->{$sha};
+ if (defined $code)
+ {
+ eval 'use Safe';
+ my $s = new Safe;
+ # Put the unpreprocessed entry into "$_".
+ $_ = $rest;
+
+ # Let $code operate on it, safely.
+ my $r = $s->reval("$code")
+ or die "$ME:$.:$sha: failed to eval \"$code\":\n$@\n";
+
+ # Note that we've used this entry.
+ delete $amend_code->{$sha};
+
+ # Update $rest upon success.
+ $rest = $_;
+ }
+
+ # Remove lines inserted by "git cherry-pick".
+ if ($strip_cherry_pick)
+ {
+ $rest =~ s/^\s*Conflicts:\n.*//sm;
+ $rest =~ s/^\s*\(cherry picked from commit [\da-f]+\)\n//m;
+ }
+
+ my @line = split "\n", $rest;
+ my $author_line = shift @line;
+ defined $author_line
+ or die "$ME:$.: unexpected EOF\n";
+ $author_line =~ /^(\d+) (.*>)$/
+ or die "$ME:$.: Invalid line "
+ . "(expected date/author/email):\n$author_line\n";
+
+ # Format 'Copyright-paperwork-exempt: Yes' as a standard ChangeLog
+ # `(tiny change)' annotation.
+ my $tiny = (grep (/^Copyright-paperwork-exempt:\s+[Yy]es$/, @line)
+ ? ' (tiny change)' : '');
+
+ my $date_line = sprintf "%s %s$tiny\n",
+ strftime ("%F", localtime ($1)), $2;
+
+ my @coauthors = grep /^Co-authored-by:.*$/, @line;
+ # Omit meta-data lines we've already interpreted.
+ @line = grep !/^(?:Signed-off-by:[ ].*>$
+ |Co-authored-by:[ ]
+ |Copyright-paperwork-exempt:[ ]
+ )/x, @line;
+
+ # Remove leading and trailing blank lines.
+ if (@line)
+ {
+ while ($line[0] =~ /^\s*$/) { shift @line; }
+ while ($line[$#line] =~ /^\s*$/) { pop @line; }
+ }
+
+ # Record whether there are two or more paragraphs.
+ my $multi_paragraph = grep /^\s*$/, @line;
+
+ # Format 'Co-authored-by: A U Thor <email@example.com>' lines in
+ # standard multi-author ChangeLog format.
+ for (@coauthors)
+ {
+ s/^Co-authored-by:\s*/\t /;
+ s/\s*</ </;
+
+ /<.*?@.*\..*>/
+ or warn "$ME: warning: missing email address for "
+ . substr ($_, 5) . "\n";
+ }
+
+ # If clustering of commit messages has been disabled, if this header
+ # would be different from the previous date/name/email/coauthors header,
+ # or if this or the previous entry consists of two or more paragraphs,
+ # then print the header.
+ if ( ! $cluster
+ || $date_line ne $prev_date_line
+ || "@coauthors" ne "@prev_coauthors"
+ || $multi_paragraph
+ || $prev_multi_paragraph)
+ {
+ $prev_date_line eq ''
+ or print "\n";
+ print $date_line;
+ @coauthors
+ and print join ("\n", @coauthors), "\n";
+ }
+ $prev_date_line = $date_line;
+ @prev_coauthors = @coauthors;
+ $prev_multi_paragraph = $multi_paragraph;
+
+ # If there were any lines
+ if (@line == 0)
+ {
+ warn "$ME: warning: empty commit message:\n $date_line\n";
+ }
+ else
+ {
+ if ($append_dot)
+ {
+ # If the first line of the message has enough room, then
+ if (length $line[0] < 72)
+ {
+ # append a dot if there is no other punctuation or blank
+ # at the end.
+ $line[0] =~ /[[:punct:]\s]$/
+ or $line[0] .= '.';
+ }
+ }
+
+ # Remove one additional leading TAB from each line.
+ $strip_tab
+ and map { s/^\t// } @line;
+
+ # Prefix each non-empty line with a TAB.
+ @line = map { length $_ ? "\t$_" : '' } @line;
+
+ print "\n", join ("\n", @line), "\n";
+ }
+
+ defined ($in = <PIPE>)
+ or last;
+ $in ne "\n"
+ and die "$ME:$.: unexpected line:\n$in";
+ }
+
+ close PIPE
+ or die "$ME: error closing pipe from " . quoted_cmd (@cmd) . "\n";
+ # FIXME-someday: include $PROCESS_STATUS in the diagnostic
+
+ # Complain about any unused entry in the --amend=F specified file.
+ my $fail = 0;
+ foreach my $sha (keys %$amend_code)
+ {
+ warn "$ME:$amend_file: unused entry: $sha\n";
+ $fail = 1;
+ }
+
+ exit $fail;
+}
+
+# Local Variables:
+# mode: perl
+# indent-tabs-mode: nil
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "my $VERSION = '"
+# time-stamp-format: "%:y-%02m-%02d %02H:%02M"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "'; # UTC"
+# End:
diff --git a/i386/Makefrag.am b/i386/Makefrag.am
new file mode 100644
index 0000000..58ee327
--- /dev/null
+++ b/i386/Makefrag.am
@@ -0,0 +1,215 @@
+# Makefile fragment for i386.
+
+# Copyright (C) 1997, 1999, 2006, 2007 Free Software Foundation, Inc.
+
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+# "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+# LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+# USE OF THIS SOFTWARE.
+
+#
+# Building a distribution.
+#
+EXTRA_DIST += \
+ i386/i386/mach_i386.srv \
+ i386/i386/i386asm.sym \
+ i386/ldscript \
+ i386/README-Drivers \
+ i386/include
+
+if HOST_ix86
+
+#
+# Source files for any i386 kernel.
+#
+
+libkernel_a_SOURCES += \
+ i386/i386at/acpi_parse_apic.h \
+ i386/i386at/acpi_parse_apic.c \
+ i386/i386at/autoconf.c \
+ i386/i386at/autoconf.h \
+ i386/i386at/biosmem.c \
+ i386/i386at/biosmem.h \
+ i386/i386at/conf.c \
+ i386/i386at/cons_conf.c \
+ i386/i386at/elf.h \
+ i386/i386at/idt.h \
+ i386/i386at/model_dep.c \
+ i386/i386at/model_dep.h \
+ i386/include/mach/sa/stdarg.h
+
+if PLATFORM_at
+libkernel_a_SOURCES += \
+ i386/i386at/acpi_parse_apic.h \
+ i386/i386at/acpi_parse_apic.c \
+ i386/i386at/boothdr.S \
+ i386/i386at/com.c \
+ i386/i386at/com.h \
+ i386/i386at/comreg.h \
+ i386/i386at/cram.h \
+ i386/i386at/disk.h \
+ i386/i386at/i8250.h \
+ i386/i386at/immc.c \
+ i386/i386at/int_init.c \
+ i386/i386at/int_init.h \
+ i386/i386at/interrupt.S \
+ i386/i386at/kd.c \
+ i386/i386at/kd.h \
+ i386/i386at/kd_event.c \
+ i386/i386at/kd_event.h \
+ i386/i386at/kd_queue.c \
+ i386/i386at/kd_queue.h \
+ i386/i386at/kd_mouse.c \
+ i386/i386at/kd_mouse.h \
+ i386/i386at/kdasm.S \
+ i386/i386at/kdsoft.h \
+ i386/i386at/mem.c \
+ i386/i386at/mem.h \
+ i386/i386at/rtc.c \
+ i386/i386at/rtc.h
+endif
+
+#
+# `lpr' device support.
+#
+
+if enable_lpr
+libkernel_a_SOURCES += \
+ i386/i386at/lpr.c \
+ i386/i386at/lpr.h
+endif
+
+
+#
+# Further source files for any i386 kernel.
+#
+
+libkernel_a_SOURCES += \
+ i386/i386/copy_user.h \
+ i386/i386/cswitch.S \
+ i386/i386/debug_trace.S \
+ i386/i386/idt_inittab.S \
+ i386/i386/locore.S \
+ i386/i386/percpu.c \
+ i386/i386/percpu.h \
+ i386/i386/spl.S \
+ i386/i386/cpuboot.S
+
+if PLATFORM_at
+libkernel_a_SOURCES += \
+ i386/i386/apic.h \
+ i386/i386/apic.c \
+ i386/i386/hardclock.c \
+ i386/i386/hardclock.h \
+ i386/i386/irq.c \
+ i386/i386/irq.h \
+ i386/i386/msr.h \
+ i386/i386/pit.c \
+ i386/i386/pit.h
+
+if enable_apic
+libkernel_a_SOURCES += \
+ i386/i386at/ioapic.c
+else
+libkernel_a_SOURCES += \
+ i386/i386/pic.c \
+ i386/i386/pic.h \
+ i386/i386at/pic_isa.c
+endif
+endif
+
+#
+# KDB support.
+#
+
+if enable_kdb
+libkernel_a_SOURCES += \
+ i386/i386/_setjmp.S
+endif
+
+
+#
+# Files from the generic sources that we want.
+#
+
+libkernel_a_SOURCES += \
+ chips/busses.c \
+ chips/busses.h \
+ device/cirbuf.c
+
+#
+# Automatically generated source files.
+#
+# See Makerules.mig.am.
+#
+
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ i386/i386/mach_i386.server.defs.c
+nodist_libkernel_a_SOURCES += \
+ i386/i386/mach_i386.server.h \
+ i386/i386/mach_i386.server.c \
+ i386/i386/mach_i386.server.msgids
+# i386/i386/mach_i386.server.defs
+
+nodist_libkernel_a_SOURCES += \
+ i386/i386/i386asm.h
+
+#
+# Architecture specialities.
+#
+
+if PLATFORM_at
+gnumach_LINKFLAGS += \
+ --defsym _START_MAP=$(_START_MAP) \
+ --defsym _START=_START_MAP+0xC0000000 \
+ -T '$(srcdir)'/i386/ldscript
+endif
+
+AM_CFLAGS += \
+ -mno-3dnow \
+ -mno-mmx \
+ -mno-sse \
+ -mno-sse2
+
+#
+# Installation.
+#
+
+include_mach_i386dir = $(includedir)/mach/i386
+include_mach_i386_HEADERS = \
+ i386/include/mach/i386/asm.h \
+ i386/include/mach/i386/boolean.h \
+ i386/include/mach/i386/eflags.h \
+ i386/include/mach/i386/exception.h \
+ i386/include/mach/i386/fp_reg.h \
+ i386/include/mach/i386/ioccom.h \
+ i386/include/mach/i386/kern_return.h \
+ i386/include/mach/i386/mach_i386.defs \
+ i386/include/mach/i386/mach_i386_types.h \
+ i386/include/mach/i386/machine_types.defs \
+ i386/include/mach/i386/multiboot.h \
+ i386/include/mach/i386/syscall_sw.h \
+ i386/include/mach/i386/thread_status.h \
+ i386/include/mach/i386/trap.h \
+ i386/include/mach/i386/vm_param.h \
+ i386/include/mach/i386/vm_types.h
+
+#
+# Platform specific parts.
+#
+
+if PLATFORM_xen
+include i386/xen/Makefrag.am
+
+libkernel_a_SOURCES += \
+ i386/i386/xen.h
+
+endif
+
+endif # HOST_i386
diff --git a/i386/Makefrag_x86.am b/i386/Makefrag_x86.am
new file mode 100644
index 0000000..272de02
--- /dev/null
+++ b/i386/Makefrag_x86.am
@@ -0,0 +1,84 @@
+# Copyright (C) 2023 Free Software Foundation, Inc.
+
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+# "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+# LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+# USE OF THIS SOFTWARE.
+
+# Shared files for all x86.
+
+libkernel_a_SOURCES += \
+ i386/i386/ast.h \
+ i386/i386/ast_check.c \
+ i386/i386/ast_types.h \
+ i386/i386/cpu.h \
+ i386/i386/cpu_number.h \
+ i386/i386/db_disasm.c \
+ i386/i386/db_interface.c \
+ i386/i386/db_interface.h \
+ i386/i386/db_machdep.h \
+ i386/i386/db_trace.c \
+ i386/i386/db_trace.h \
+ i386/i386/debug.h \
+ i386/i386/debug_i386.c \
+ i386/i386/eflags.h \
+ i386/i386/fpu.c \
+ i386/i386/fpu.h \
+ i386/i386/gdt.c \
+ i386/i386/gdt.h \
+ i386/i386/idt-gen.h \
+ i386/i386/idt.c \
+ i386/i386/io_perm.c \
+ i386/i386/io_perm.h \
+ i386/i386/ipl.h \
+ i386/i386/ktss.c \
+ i386/i386/ktss.h \
+ i386/i386/kttd_interface.c \
+ i386/i386/kttd_machdep.h \
+ i386/i386/ldt.c \
+ i386/i386/ldt.h \
+ i386/i386/lock.h \
+ i386/i386/locore.h \
+ i386/i386/loose_ends.c \
+ i386/i386/loose_ends.h \
+ i386/i386/mach_param.h \
+ i386/i386/machine_routines.h \
+ i386/i386/machine_task.c \
+ i386/i386/machspl.h \
+ i386/i386/model_dep.h \
+ i386/i386/mp_desc.c \
+ i386/i386/mp_desc.h \
+ i386/i386/pcb.c \
+ i386/i386/pcb.h \
+ i386/i386/phys.c \
+ i386/i386/pio.h \
+ i386/i386/pmap.h \
+ i386/i386/proc_reg.h \
+ i386/i386/sched_param.h \
+ i386/i386/seg.h \
+ i386/i386/setjmp.h \
+ i386/i386/smp.c \
+ i386/i386/smp.h \
+ i386/i386/spl.h \
+ i386/i386/strings.c \
+ i386/i386/task.h \
+ i386/i386/thread.h \
+ i386/i386/time_stamp.h \
+ i386/i386/trap.c \
+ i386/i386/trap.h \
+ i386/i386/tss.h \
+ i386/i386/user_ldt.c \
+ i386/i386/user_ldt.h \
+ i386/i386/vm_param.h \
+ i386/i386/xpr.h \
+ i386/intel/pmap.c \
+ i386/intel/pmap.h \
+ i386/intel/read_fault.c \
+ i386/intel/read_fault.h
+
diff --git a/i386/README-Drivers b/i386/README-Drivers
new file mode 100644
index 0000000..3d1066c
--- /dev/null
+++ b/i386/README-Drivers
@@ -0,0 +1,122 @@
+-*- text -*-
+
+Here some i386 specific details of the device drivers are explained.
+
+Each driver is followed by one or more triplets of three numbers. These
+triplets specify combinations of I/O address, spl, and, pic that are believed
+to work.
+
+Then comes the name of the device to users. `%d' is a unit number.
+
+
+** Table
+
+*** Serial devices and similar equivalents
+
+PC com ports (always enabled)
+ 0x3f8,2f8,3e8
+ com%d
+
+Parallel port
+ lpr%d
+
+System Console (always enabled)
+ (indirect name for kd or first com line)
+ console
+
+PC keyboard/display (always enabled)
+ kd
+
+
+*** Special devices
+
+Mappable time device (always enabled)
+ time
+
+Mouse interface to PC (always enabled)
+ (Piggy backs horribly on COM devices)
+ mouse%d
+
+X Window System interface to keyboard (always enabled)
+ kbd%d
+
+Interface to setting up IO port access for users (always enabled)
+ iopl%d
+
+
+*** Disk controllers (except for SCSI)
+
+PC floppy
+ 0x3f0, 370
+ fd%d
+
+
+*** Ethernet controllers
+These all show up as `eth%d' except the atp device.
+
+NE2000/NE1000 ISA (ne, ne1000, ne2000)
+ 0x300,280,320,340,360
+
+3Com 503 (3c503) / Etherlink II
+ 0x300,310,330,350,250,280,2a0,2e0
+
+WD80x3
+ 0x300,280,380,240
+
+3COM 501 (3c501) / Etherlink I
+ 0x280,300
+
+SMC Ultra
+ 0x200,220,240,280,300,340,380
+
+HP PCLAN+ (27247B and 27252A)
+ 0x200,240,280,2c0,300,320,340
+
+HP PCLAN (27245 and other 27xxx series)
+ 0x300,320,340,280,2c0,200,240
+
+Seeq8005
+ 0x300,320,340,360
+
+Cabletron E21xx
+ 0x300,280,380,220
+
+AT1700 (Fujitsu 86965)
+ 0x260,280,2a0,240,340,320,380,300
+
+ICL EtherTeam 16i/32 (eth16i, eth32)
+ 0x260,280,2a0,240,340,320,380,300 (16i)
+
+EtherExpress 16
+ 0x300,270,320,340
+
+EtherExpressPro
+ 0x200,240,280,2c0,300,320,340,360
+
+AT&T WaveLAN & DEC RoamAbout DS
+ 0x390
+
+3Com 507 (3c507, el16)
+ 0x300,320,340,280
+
+3Com 505 (3c505, elplus)
+ 0x300,280,310
+
+D-Link DE-600
+ 0x378
+
+D-Link DE-620
+ 0x378
+
+Schneider & Koch G16
+ 0x100,180,208,220,288,320,328,390
+
+NI5210
+ 0x300,280,360,320,340
+
+NI6510
+ 0x300/320/340/360
+
+AT-LAN-TEC/RealTek pocket adaptor
+ 0x378,278,3bc
+ atp%d
diff --git a/i386/configfrag.ac b/i386/configfrag.ac
new file mode 100644
index 0000000..f07a98c
--- /dev/null
+++ b/i386/configfrag.ac
@@ -0,0 +1,124 @@
+dnl Configure fragment for i386.
+
+dnl Copyright (C) 1999, 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+
+dnl Permission to use, copy, modify and distribute this software and its
+dnl documentation is hereby granted, provided that both the copyright
+dnl notice and this permission notice appear in all copies of the
+dnl software, derivative works or modified versions, and any portions
+dnl thereof, and that both notices appear in supporting documentation.
+dnl
+dnl THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+dnl "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+dnl LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+dnl USE OF THIS SOFTWARE.
+
+#
+# Definitions.
+#
+
+[case $host_cpu in
+ i?86)]
+ AM_CONDITIONAL([HOST_ix86], [true])
+
+ # Some of the i386-specific code checks for these.
+ AC_DEFINE([__ELF__], [1], [__ELF__])
+
+ # Determines the size of the CPU cache line.
+ AC_DEFINE([CPU_L1_SHIFT], [6], [CPU_L1_SHIFT])
+
+ [# Does the architecture provide machine-specific interfaces?
+ mach_machine_routines=1;;
+ *)]
+ AM_CONDITIONAL([HOST_ix86], [false])[;;
+esac
+
+case $host_platform in
+ at)]
+ AM_CONDITIONAL([PLATFORM_at], [true])[;;
+ *)]
+ AM_CONDITIONAL([PLATFORM_at], [false])[;;
+esac
+
+#
+# Formerly in `i386/bogus/'.
+#
+
+case $host_platform:$host_cpu in
+ at:i?86)
+ # should be 4, but we do not support shared IRQ for these
+ ncom=2
+ nlpr=1
+
+ # i386/bogus/platforms.h]
+ AC_DEFINE([AT386], [1], [AT386])[;;
+ xen:i?86)
+ # TODO. That should probably not be needed.
+ ncom=1
+ # TODO. That should probably not be needed.
+ # i386/bogus/platforms.h]
+ AC_DEFINE([AT386], [1], [AT386])[;;
+ *)
+ :;;
+esac]
+
+#
+# Options.
+#
+
+# The immediate console, useful for debugging early system
+# initialization. Disabled by default.
+AC_DEFINE([ENABLE_IMMEDIATE_CONSOLE], [0], [ENABLE_IMMEDIATE_CONSOLE])
+
+AC_ARG_ENABLE([lpr],
+ AS_HELP_STRING([--enable-lpr], [lpr device; on ix86-at enabled by default]))
+[case $host_platform:$host_cpu in
+ at:i?86)
+ case $enable_device_drivers in
+ default)
+ enable_lpr=${enable_lpr-yes};;
+ *)
+ enable_lpr=${enable_lpr-no};;
+ esac;;
+ *)
+ if [ x"$enable_lpr" = xyes ]; then]
+ AC_MSG_ERROR([cannot enable `lpr' in this configuration.])
+ [fi;;
+esac
+if [ x"$enable_lpr" = xyes ]; then]
+ AC_DEFINE([MACH_LPR], [], [lpr device])
+ AM_CONDITIONAL([enable_lpr], [true])
+[else]
+ AM_CONDITIONAL([enable_lpr], [false])
+[fi]
+
+AC_ARG_ENABLE([apic],
+ AS_HELP_STRING([--enable-apic], [LAPIC/IOAPIC support]))
+[if [ x"$enable_apic" = xyes ]; then]
+ AC_DEFINE([APIC], [1], [APIC support])
+ AM_CONDITIONAL([enable_apic], [true])
+[else]
+ AM_CONDITIONAL([enable_apic], [false])
+[fi]
+
+[case $host_platform:$host_cpu in
+ xen:i?86)
+ enable_pae=${enable_pae-yes};;
+ *:i?86)
+ :;;
+ *:x86_64)
+ enable_pae=${enable_pae-yes};;
+ *)
+ if [ x"$enable_pae" = xyes ]; then]
+ AC_MSG_ERROR([can only enable the `PAE' feature on ix86.])
+ [fi;;
+esac]
+
+AC_ARG_WITH([_START_MAP],
+ AS_HELP_STRING([--with-_START_MAP=0x1000000], [specify kernel mapping start address]),
+ [_START_MAP="$withval"], [_START_MAP=0x1000000])
+AC_SUBST(_START_MAP)
+
+dnl Local Variables:
+dnl mode: autoconf
+dnl End:
diff --git a/i386/i386/.gitignore b/i386/i386/.gitignore
new file mode 100644
index 0000000..4520a2a
--- /dev/null
+++ b/i386/i386/.gitignore
@@ -0,0 +1 @@
+/i386asm.h
diff --git a/i386/i386/_setjmp.S b/i386/i386/_setjmp.S
new file mode 100644
index 0000000..efabeb6
--- /dev/null
+++ b/i386/i386/_setjmp.S
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * C library -- _setjmp, _longjmp
+ *
+ * _longjmp(a,v)
+ * will generate a "return(v)" from
+ * the last call to
+ * _setjmp(a)
+ * by restoring registers from the stack,
+ * The previous signal state is NOT restored.
+ *
+ */
+
+#include <mach/machine/asm.h>
+
+ENTRY(_setjmp)
+ movl 4(%esp),%ecx /* fetch buffer */
+ movl %ebx,0(%ecx)
+ movl %esi,4(%ecx)
+ movl %edi,8(%ecx)
+ movl %ebp,12(%ecx) /* save frame pointer of caller */
+ popl %edx
+ movl %esp,16(%ecx) /* save stack pointer of caller */
+ movl %edx,20(%ecx) /* save pc of caller */
+ xorl %eax,%eax
+ jmp *%edx
+
+ENTRY(_longjmp)
+ movl 8(%esp),%eax /* return(v) */
+ movl 4(%esp),%ecx /* fetch buffer */
+ movl 0(%ecx),%ebx
+ movl 4(%ecx),%esi
+ movl 8(%ecx),%edi
+ movl 12(%ecx),%ebp
+ movl 16(%ecx),%esp
+ orl %eax,%eax
+ jnz 0f
+ incl %eax
+0: jmp *20(%ecx) /* done, return.... */
diff --git a/i386/i386/apic.c b/i386/i386/apic.c
new file mode 100644
index 0000000..0b5be50
--- /dev/null
+++ b/i386/i386/apic.c
@@ -0,0 +1,453 @@
+/* apic.c - APIC controller management for Mach.
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Written by Almudena Garcia Jurado-Centurion
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#include <i386/apic.h>
+#include <i386/cpu.h>
+#include <i386at/idt.h>
+#include <string.h>
+#include <vm/vm_kern.h>
+#include <kern/printf.h>
+#include <kern/kalloc.h>
+
+/*
+ * Period of HPET timer in nanoseconds
+ */
+uint32_t hpet_period_nsec;
+
+/*
+ * This dummy structure is needed so that CPU_NUMBER can be called
+ * before the lapic pointer is initialized to point to the real Local Apic.
+ * It causes the apic_id to be faked as 0, which is the master processor.
+ */
+static ApicLocalUnit dummy_lapic = {0};
+volatile ApicLocalUnit* lapic = &dummy_lapic;
+
+/* This lookup table of [apic_id] -> kernel_id is initially populated with zeroes
+ * so every lookup results in master processor until real kernel ids are populated.
+ */
+int cpu_id_lut[UINT8_MAX + 1] = {0};
+
+ApicInfo apic_data;
+
+/*
+ * apic_data_init: initialize the apic_data structures to preliminary values.
+ * Reserve memory to the lapic list dynamic vector.
+ * Returns 0 if success, -1 if error.
+ */
+int
+apic_data_init(void)
+{
+ apic_data.cpu_lapic_list = NULL;
+ apic_data.ncpus = 0;
+ apic_data.nioapics = 0;
+ apic_data.nirqoverride = 0;
+
+ /* Reserve the vector memory for the maximum number of processors. */
+ apic_data.cpu_lapic_list = (uint16_t*) kalloc(NCPUS*sizeof(uint16_t));
+
+ /* If the memory reserve fails, return -1 to advice about the error. */
+ if (apic_data.cpu_lapic_list == NULL)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * apic_lapic_init: initialize lapic pointer to the memory common address.
+ * Receives as input a pointer to the virtual memory address, previously mapped in a page.
+ */
+void
+apic_lapic_init(ApicLocalUnit* lapic_ptr)
+{
+ lapic = lapic_ptr;
+}
+
+/*
+ * apic_add_cpu: add a new lapic/cpu entry to the cpu_lapic list.
+ * Receives as input the lapic's APIC ID.
+ */
+void
+apic_add_cpu(uint16_t apic_id)
+{
+ apic_data.cpu_lapic_list[apic_data.ncpus] = apic_id;
+ apic_data.ncpus++;
+}
+
+/*
+ * apic_add_ioapic: add a new ioapic entry to the ioapic list.
+ * Receives as input an ioapic_data structure, filled with the IOAPIC entry's data.
+ */
+void
+apic_add_ioapic(IoApicData ioapic)
+{
+ apic_data.ioapic_list[apic_data.nioapics] = ioapic;
+ apic_data.nioapics++;
+}
+
+/*
+ * apic_add_irq_override: add a new IRQ to the irq_override list.
+ * Receives as input an irq_override_data structure, filled with the IRQ entry's data.
+ */
+void
+apic_add_irq_override(IrqOverrideData irq_over)
+{
+ apic_data.irq_override_list[apic_data.nirqoverride] = irq_over;
+ apic_data.nirqoverride++;
+}
+
+IrqOverrideData *
+acpi_get_irq_override(uint8_t pin)
+{
+ int i;
+
+ for (i = 0; i < apic_data.nirqoverride; i++) {
+ if (apic_data.irq_override_list[i].irq == pin) {
+ return &apic_data.irq_override_list[i];
+ }
+ }
+ return NULL;
+}
+
+/*
+ * apic_get_cpu_apic_id: returns the apic_id of a cpu.
+ * Receives as input the kernel ID of a CPU.
+ */
+int
+apic_get_cpu_apic_id(int kernel_id)
+{
+ if (kernel_id >= NCPUS)
+ return -1;
+
+ return apic_data.cpu_lapic_list[kernel_id];
+}
+
+
+/*
+ * apic_get_cpu_kernel_id: returns the kernel_id of a cpu.
+ * Receives as input the APIC ID of a CPU.
+ */
+int
+apic_get_cpu_kernel_id(uint16_t apic_id)
+{
+ return cpu_id_lut[apic_id];
+}
+
+/* apic_get_lapic: returns a reference to the common memory address for Local APIC. */
+volatile ApicLocalUnit*
+apic_get_lapic(void)
+{
+ return lapic;
+}
+
+/*
+ * apic_get_ioapic: returns the IOAPIC identified by its kernel ID.
+ * Receives as input the IOAPIC's Kernel ID.
+ * Returns a ioapic_data structure pointer with the IOAPIC's data.
+ */
+struct IoApicData *
+apic_get_ioapic(int kernel_id)
+{
+ if (kernel_id < MAX_IOAPICS)
+ return &apic_data.ioapic_list[kernel_id];
+ return NULL;
+}
+
+/* apic_get_numcpus: returns the current number of cpus. */
+uint8_t
+apic_get_numcpus(void)
+{
+ return apic_data.ncpus;
+}
+
+/* apic_get_num_ioapics: returns the current number of ioapics. */
+uint8_t
+apic_get_num_ioapics(void)
+{
+ return apic_data.nioapics;
+}
+
+/* apic_get_total_gsis: returns the total number of GSIs in the system. */
+int
+apic_get_total_gsis(void)
+{
+ int id;
+ int gsis = 0;
+
+ for (id = 0; id < apic_get_num_ioapics(); id++)
+ gsis += apic_get_ioapic(id)->ngsis;
+
+ return gsis;
+}
+
+/*
+ * apic_get_current_cpu: returns the apic_id of current cpu.
+ */
+int
+apic_get_current_cpu(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ eax = 1;
+ ecx = 0;
+ cpuid(eax, ebx, ecx, edx);
+ return (ebx >> 24);
+}
+
+
+/*
+ * apic_refit_cpulist: adjust the size of cpu_lapic array to fit the real number of cpus
+ * instead the maximum number.
+ *
+ * Returns 0 if success, -1 if error.
+ */
+int apic_refit_cpulist(void)
+{
+ uint16_t* old_list = apic_data.cpu_lapic_list;
+ uint16_t* new_list = NULL;
+
+ if (old_list == NULL)
+ return -1;
+
+ new_list = (uint16_t*) kalloc(apic_data.ncpus*sizeof(uint16_t));
+
+ if (new_list == NULL)
+ return -1;
+
+ for (int i = 0; i < apic_data.ncpus; i++)
+ new_list[i] = old_list[i];
+
+ apic_data.cpu_lapic_list = new_list;
+ kfree((vm_offset_t) old_list, NCPUS*sizeof(uint16_t));
+
+ return 0;
+}
+
+/*
+ * apic_generate_cpu_id_lut: Generate lookup table of cpu kernel ids from apic ids
+ */
+void apic_generate_cpu_id_lut(void)
+{
+ int i, apic_id;
+
+ for (i = 0; i < apic_data.ncpus; i++) {
+ apic_id = apic_get_cpu_apic_id(i);
+ if (apic_id >= 0)
+ cpu_id_lut[apic_id] = i;
+ else
+ printf("apic_get_cpu_apic_id(%d) failed...\n", i);
+ }
+}
+
+/*
+ * apic_print_info: shows the list of Local APIC and IOAPIC.
+ * Shows each CPU and IOAPIC, with Its Kernel ID and APIC ID.
+ */
+void apic_print_info(void)
+{
+ int i;
+ int ncpus, nioapics;
+
+ ncpus = apic_get_numcpus();
+ nioapics = apic_get_num_ioapics();
+
+ uint16_t lapic_id;
+ uint16_t ioapic_id;
+
+ IoApicData *ioapic;
+
+ printf("CPUS:\n");
+ for (i = 0; i < ncpus; i++) {
+ lapic_id = apic_get_cpu_apic_id(i);
+ printf(" CPU %d - APIC ID %x - addr=0x%p\n", i, lapic_id, apic_get_lapic());
+ }
+
+ printf("IOAPICS:\n");
+ for (i = 0; i < nioapics; i++) {
+ ioapic = apic_get_ioapic(i);
+ if (!ioapic) {
+ printf("ERROR: invalid IOAPIC ID %x\n", i);
+ } else {
+ ioapic_id = ioapic->apic_id;
+ printf(" IOAPIC %d - APIC ID %x - addr=0x%p\n", i, ioapic_id, ioapic->ioapic);
+ }
+ }
+}
+
+void apic_send_ipi(unsigned dest_shorthand, unsigned deliv_mode, unsigned dest_mode, unsigned level, unsigned trig_mode, unsigned vector, unsigned dest_id)
+{
+ IcrLReg icrl_values;
+ IcrHReg icrh_values;
+
+ /* Keep previous values and only overwrite known fields */
+ icrl_values.r = lapic->icr_low.r;
+ icrh_values.r = lapic->icr_high.r;
+
+ icrl_values.destination_shorthand = dest_shorthand;
+ icrl_values.delivery_mode = deliv_mode;
+ icrl_values.destination_mode = dest_mode;
+ icrl_values.level = level;
+ icrl_values.trigger_mode = trig_mode;
+ icrl_values.vector = vector;
+ icrh_values.destination_field = dest_id;
+
+ lapic->icr_high.r = icrh_values.r;
+ lapic->icr_low.r = icrl_values.r;
+}
+
+void
+lapic_enable(void)
+{
+ lapic->spurious_vector.r |= LAPIC_ENABLE;
+}
+
+void
+lapic_disable(void)
+{
+ lapic->spurious_vector.r &= ~LAPIC_ENABLE;
+}
+
+void
+lapic_setup(void)
+{
+ unsigned long flags;
+ int apic_id;
+ volatile uint32_t dummy;
+
+ cpu_intr_save(&flags);
+
+ apic_id = apic_get_current_cpu();
+
+ dummy = lapic->dest_format.r;
+ lapic->dest_format.r = 0xffffffff; /* flat model */
+ dummy = lapic->logical_dest.r;
+ lapic->logical_dest.r = lapic->apic_id.r; /* target self */
+ dummy = lapic->lvt_lint0.r;
+ lapic->lvt_lint0.r = dummy | LAPIC_DISABLE;
+ dummy = lapic->lvt_lint1.r;
+ lapic->lvt_lint1.r = dummy | LAPIC_DISABLE;
+ dummy = lapic->lvt_performance_monitor.r;
+ lapic->lvt_performance_monitor.r = dummy | LAPIC_DISABLE;
+ if (apic_id != 0)
+ {
+ dummy = lapic->lvt_timer.r;
+ lapic->lvt_timer.r = dummy | LAPIC_DISABLE;
+ }
+ dummy = lapic->task_pri.r;
+ lapic->task_pri.r = 0;
+
+ /* Enable LAPIC to send or recieve IPI/SIPIs */
+ dummy = lapic->spurious_vector.r;
+ lapic->spurious_vector.r = IOAPIC_SPURIOUS_BASE
+ | LAPIC_ENABLE_DIRECTED_EOI;
+
+ lapic->error_status.r = 0;
+
+ cpu_intr_restore(flags);
+}
+
+void
+lapic_eoi(void)
+{
+ lapic->eoi.r = 0;
+}
+
+#define HPET32(x) *((volatile uint32_t *)((uint8_t *)hpet_addr + x))
+#define HPET_CAP_PERIOD 0x04
+#define HPET_CFG 0x10
+# define HPET_CFG_ENABLE (1 << 0)
+# define HPET_LEGACY_ROUTE (1 << 1)
+#define HPET_COUNTER 0xf0
+#define HPET_T0_CFG 0x100
+# define HPET_T0_32BIT_MODE (1 << 8)
+# define HPET_T0_VAL_SET (1 << 6)
+# define HPET_T0_TYPE_PERIODIC (1 << 3)
+# define HPET_T0_INT_ENABLE (1 << 2)
+#define HPET_T0_COMPARATOR 0x108
+
+#define FSEC_PER_NSEC 1000000
+#define NSEC_PER_USEC 1000
+
+/* This function sets up the HPET timer to be in
+ * 32 bit periodic mode and not generating any interrupts.
+ * The timer counts upwards and when it reaches 0xffffffff it
+ * wraps to zero. The timer ticks at a constant rate in nanoseconds which
+ * is stored in hpet_period_nsec variable.
+ */
+void
+hpet_init(void)
+{
+ uint32_t period;
+ uint32_t val;
+
+ assert(hpet_addr != 0);
+
+ /* Find out how often the HPET ticks in nanoseconds */
+ period = HPET32(HPET_CAP_PERIOD);
+ hpet_period_nsec = period / FSEC_PER_NSEC;
+ printf("HPET ticks every %d nanoseconds\n", hpet_period_nsec);
+
+ /* Disable HPET and legacy interrupt routing mode */
+ val = HPET32(HPET_CFG);
+ val = val & ~(HPET_LEGACY_ROUTE | HPET_CFG_ENABLE);
+ HPET32(HPET_CFG) = val;
+
+ /* Clear the counter */
+ HPET32(HPET_COUNTER) = 0;
+
+ /* Set up 32 bit periodic timer with no interrupts */
+ val = HPET32(HPET_T0_CFG);
+ val = (val & ~HPET_T0_INT_ENABLE) | HPET_T0_32BIT_MODE | HPET_T0_TYPE_PERIODIC | HPET_T0_VAL_SET;
+ HPET32(HPET_T0_CFG) = val;
+
+ /* Set comparator to max */
+ HPET32(HPET_T0_COMPARATOR) = 0xffffffff;
+
+ /* Enable the HPET */
+ HPET32(HPET_CFG) |= HPET_CFG_ENABLE;
+
+ printf("HPET enabled\n");
+}
+
+void
+hpet_udelay(uint32_t us)
+{
+ uint32_t start, now;
+ uint32_t max_delay_us = 0xffffffff / NSEC_PER_USEC;
+
+ if (us > max_delay_us) {
+ printf("HPET ERROR: Delay too long, %d usec, truncating to %d usec\n",
+ us, max_delay_us);
+ us = max_delay_us;
+ }
+
+ /* Convert us to HPET ticks */
+ us = (us * NSEC_PER_USEC) / hpet_period_nsec;
+
+ start = HPET32(HPET_COUNTER);
+ do {
+ now = HPET32(HPET_COUNTER);
+ } while (now - start < us);
+}
+
+void
+hpet_mdelay(uint32_t ms)
+{
+ hpet_udelay(ms * 1000);
+}
+
diff --git a/i386/i386/apic.h b/i386/i386/apic.h
new file mode 100644
index 0000000..9eef0d8
--- /dev/null
+++ b/i386/i386/apic.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _IMPS_APIC_
+#define _IMPS_APIC_
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+typedef struct ApicReg {
+ uint32_t r; /* the actual register */
+ uint32_t p[3]; /* pad to the next 128-bit boundary */
+} ApicReg;
+
+typedef struct ApicIoUnit {
+ ApicReg select;
+ ApicReg window;
+ ApicReg unused[2];
+ ApicReg eoi; /* write the vector you wish to EOI to this reg */
+} ApicIoUnit;
+
+struct ioapic_route_entry {
+ uint32_t vector : 8,
+ delvmode : 3, /* 000=fixed 001=lowest 111=ExtInt */
+ destmode : 1, /* 0=physical 1=logical */
+ delvstatus : 1,
+ polarity : 1, /* 0=activehigh 1=activelow */
+ irr : 1,
+ trigger : 1, /* 0=edge 1=level */
+ mask : 1, /* 0=enabled 1=disabled */
+ reserved1 : 15;
+ uint32_t reserved2 : 24,
+ dest : 8;
+} __attribute__ ((packed));
+
+union ioapic_route_entry_union {
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ };
+ struct ioapic_route_entry both;
+};
+
+
+/* Grateful to trasterlabs for this snippet */
+
+typedef union u_icr_low
+{
+ uint32_t value[4];
+ struct
+ {
+ uint32_t r; // FEE0 0300H - 4 bytes
+ unsigned :32; // FEE0 0304H
+ unsigned :32; // FEE0 0308H
+ unsigned :32; // FEE0 030CH
+ };
+ struct
+ {
+ unsigned vector: 8; /* Vector of interrupt. Lowest 8 bits of routine address */
+ unsigned delivery_mode : 3;
+ unsigned destination_mode: 1;
+ unsigned delivery_status: 1;
+ unsigned :1;
+ unsigned level: 1;
+ unsigned trigger_mode: 1;
+ unsigned remote_read_status: 2; /* Read-only field */
+ unsigned destination_shorthand: 2;
+ unsigned :12;
+ };
+} IcrLReg;
+
+typedef union u_icr_high
+{
+ uint32_t value[4];
+ struct
+ {
+ uint32_t r; // FEE0 0310H - 4 bytes
+ unsigned :32; // FEE0 0314H
+ unsigned :32; // FEE0 0318H
+ unsigned :32; // FEE0 031CH
+ };
+ struct
+ {
+ unsigned :24; // FEE0 0310H - 4 bytes
+ unsigned destination_field :8; /* APIC ID (in physical mode) or MDA (in logical) of destination processor */
+ };
+} IcrHReg;
+
+
+typedef enum e_icr_dest_shorthand
+{
+ NO_SHORTHAND = 0,
+ SELF = 1,
+ ALL_INCLUDING_SELF = 2,
+ ALL_EXCLUDING_SELF = 3
+} icr_dest_shorthand;
+
+typedef enum e_icr_deliv_mode
+{
+ FIXED = 0,
+ LOWEST_PRIORITY = 1,
+ SMI = 2,
+ NMI = 4,
+ INIT = 5,
+ STARTUP = 6,
+} icr_deliv_mode;
+
+typedef enum e_icr_dest_mode
+{
+ PHYSICAL = 0,
+ LOGICAL = 1
+} icr_dest_mode;
+
+typedef enum e_icr_deliv_status
+{
+ IDLE = 0,
+ SEND_PENDING = 1
+} icr_deliv_status;
+
+typedef enum e_icr_level
+{
+ DE_ASSERT = 0,
+ ASSERT = 1
+} icr_level;
+
+typedef enum e_irc_trigger_mode
+{
+ EDGE = 0,
+ LEVEL = 1
+} irc_trigger_mode;
+
+
+typedef struct ApicLocalUnit {
+ ApicReg reserved0; /* 0x000 */
+ ApicReg reserved1; /* 0x010 */
+ ApicReg apic_id; /* 0x020. Hardware ID of current processor */
+ ApicReg version; /* 0x030 */
+ ApicReg reserved4; /* 0x040 */
+ ApicReg reserved5; /* 0x050 */
+ ApicReg reserved6; /* 0x060 */
+ ApicReg reserved7; /* 0x070 */
+ ApicReg task_pri; /* 0x080 */
+ ApicReg arbitration_pri; /* 0x090 */
+ ApicReg processor_pri; /* 0x0a0 */
+ ApicReg eoi; /* 0x0b0 */
+ ApicReg remote; /* 0x0c0 */
+ ApicReg logical_dest; /* 0x0d0 */
+ ApicReg dest_format; /* 0x0e0 */
+ ApicReg spurious_vector; /* 0x0f0 */
+ ApicReg isr[8]; /* 0x100 */
+ ApicReg tmr[8]; /* 0x180 */
+ ApicReg irr[8]; /* 0x200 */
+ ApicReg error_status; /* 0x280 */
+ ApicReg reserved28[6]; /* 0x290 */
+ ApicReg lvt_cmci; /* 0x2f0 */
+ IcrLReg icr_low; /* 0x300. Store the information to send an IPI (Inter-processor Interrupt) */
+ IcrHReg icr_high; /* 0x310. Store the IPI destination */
+ ApicReg lvt_timer; /* 0x320 */
+ ApicReg lvt_thermal; /* 0x330 */
+ ApicReg lvt_performance_monitor; /* 0x340 */
+ ApicReg lvt_lint0; /* 0x350 */
+ ApicReg lvt_lint1; /* 0x360 */
+ ApicReg lvt_error; /* 0x370 */
+ ApicReg init_count; /* 0x380 */
+ ApicReg cur_count; /* 0x390 */
+ ApicReg reserved3a; /* 0x3a0 */
+ ApicReg reserved3b; /* 0x3b0 */
+ ApicReg reserved3c; /* 0x3c0 */
+ ApicReg reserved3d; /* 0x3d0 */
+ ApicReg divider_config; /* 0x3e0 */
+ ApicReg reserved3f; /* 0x3f0 */
+} ApicLocalUnit;
+
+typedef struct IoApicData {
+ uint8_t apic_id;
+ uint8_t ngsis;
+ uint32_t addr;
+ uint32_t gsi_base;
+ ApicIoUnit *ioapic;
+} IoApicData;
+
+#define APIC_IRQ_OVERRIDE_POLARITY_MASK 1
+#define APIC_IRQ_OVERRIDE_ACTIVE_LOW 2
+#define APIC_IRQ_OVERRIDE_TRIGGER_MASK 4
+#define APIC_IRQ_OVERRIDE_LEVEL_TRIGGERED 8
+
+typedef struct IrqOverrideData {
+ uint8_t bus;
+ uint8_t irq;
+ uint32_t gsi;
+ uint16_t flags;
+} IrqOverrideData;
+
+#define MAX_IOAPICS 16
+#define MAX_IRQ_OVERRIDE 24
+
+typedef struct ApicInfo {
+ uint8_t ncpus;
+ uint8_t nioapics;
+ int nirqoverride;
+ uint16_t* cpu_lapic_list;
+ struct IoApicData ioapic_list[MAX_IOAPICS];
+ struct IrqOverrideData irq_override_list[MAX_IRQ_OVERRIDE];
+} ApicInfo;
+
+int apic_data_init(void);
+void apic_add_cpu(uint16_t apic_id);
+void apic_lapic_init(ApicLocalUnit* lapic_ptr);
+void apic_add_ioapic(struct IoApicData);
+void apic_add_irq_override(struct IrqOverrideData irq_over);
+void apic_send_ipi(unsigned dest_shorthand, unsigned deliv_mode, unsigned dest_mode, unsigned level, unsigned trig_mode, unsigned vector, unsigned dest_id);
+IrqOverrideData *acpi_get_irq_override(uint8_t gsi);
+int apic_get_cpu_apic_id(int kernel_id);
+int apic_get_cpu_kernel_id(uint16_t apic_id);
+volatile ApicLocalUnit* apic_get_lapic(void);
+struct IoApicData *apic_get_ioapic(int kernel_id);
+uint8_t apic_get_numcpus(void);
+uint8_t apic_get_num_ioapics(void);
+int apic_get_current_cpu(void);
+void apic_print_info(void);
+int apic_refit_cpulist(void);
+void apic_generate_cpu_id_lut(void);
+int apic_get_total_gsis(void);
+void picdisable(void);
+void lapic_eoi(void);
+void ioapic_irq_eoi(int pin);
+void lapic_setup(void);
+void lapic_disable(void);
+void lapic_enable(void);
+void lapic_enable_timer(void);
+void calibrate_lapic_timer(void);
+void ioapic_toggle(int pin, int mask);
+void ioapic_configure(void);
+
+void hpet_init(void);
+void hpet_udelay(uint32_t us);
+void hpet_mdelay(uint32_t ms);
+
+extern int timer_pin;
+extern void intnull(int unit);
+extern volatile ApicLocalUnit* lapic;
+extern int cpu_id_lut[];
+extern uint32_t *hpet_addr;
+
+#endif
+
+#define APIC_IO_UNIT_ID 0x00
+#define APIC_IO_VERSION 0x01
+# define APIC_IO_VERSION_SHIFT 0
+# define APIC_IO_ENTRIES_SHIFT 16
+#define APIC_IO_REDIR_LOW(int_pin) (0x10+(int_pin)*2)
+#define APIC_IO_REDIR_HIGH(int_pin) (0x11+(int_pin)*2)
+
+#define IMCR_SELECT 0x22
+#define IMCR_DATA 0x23
+#define MODE_IMCR 0x70
+# define IMCR_USE_PIC 0
+# define IMCR_USE_APIC 1
+
+#define LAPIC_LOW_PRIO 0x100
+#define LAPIC_NMI 0x400
+#define LAPIC_EXTINT 0x700
+#define LAPIC_LEVEL_TRIGGERED 0x8000
+
+#define LAPIC_ENABLE 0x100
+#define LAPIC_FOCUS 0x200
+#define LAPIC_ENABLE_DIRECTED_EOI 0x1000
+#define LAPIC_DISABLE 0x10000
+#define LAPIC_TIMER_PERIODIC 0x20000
+#define LAPIC_TIMER_DIVIDE_2 0
+#define LAPIC_TIMER_DIVIDE_4 1
+#define LAPIC_TIMER_DIVIDE_8 2
+#define LAPIC_TIMER_DIVIDE_16 3
+#define LAPIC_TIMER_BASEDIV 0x100000
+#define LAPIC_HAS_DIRECTED_EOI 0x1000000
+
+#define NINTR 64 /* Max 32 GSIs on each of two IOAPICs */
+#define IOAPIC_FIXED 0
+#define IOAPIC_PHYSICAL 0
+#define IOAPIC_LOGICAL 1
+#define IOAPIC_NMI 4
+#define IOAPIC_EXTINT 7
+#define IOAPIC_ACTIVE_HIGH 0
+#define IOAPIC_ACTIVE_LOW 1
+#define IOAPIC_EDGE_TRIGGERED 0
+#define IOAPIC_LEVEL_TRIGGERED 1
+#define IOAPIC_MASK_ENABLED 0
+#define IOAPIC_MASK_DISABLED 1
+
+#define APIC_MSR 0x1b
+#define APIC_MSR_BSP 0x100 /* Processor is a BSP */
+#define APIC_MSR_X2APIC 0x400 /* LAPIC is in x2APIC mode */
+#define APIC_MSR_ENABLE 0x800 /* LAPIC is enabled */
+
+/* Set or clear a bit in a 255-bit APIC mask register.
+ These registers are spread through eight 32-bit registers. */
+#define APIC_SET_MASK_BIT(reg, bit) \
+ ((reg)[(bit) >> 5].r |= 1 << ((bit) & 0x1f))
+#define APIC_CLEAR_MASK_BIT(reg, bit) \
+ ((reg)[(bit) >> 5].r &= ~(1 << ((bit) & 0x1f)))
+
+#ifndef __ASSEMBLER__
+
+#ifdef APIC
+static inline void mask_irq (unsigned int irq_nr) {
+ ioapic_toggle(irq_nr, IOAPIC_MASK_DISABLED);
+}
+
+static inline void unmask_irq (unsigned int irq_nr) {
+ ioapic_toggle(irq_nr, IOAPIC_MASK_ENABLED);
+}
+#endif
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /*_IMPS_APIC_*/
+
diff --git a/i386/i386/ast.h b/i386/i386/ast.h
new file mode 100644
index 0000000..7afaa41
--- /dev/null
+++ b/i386/i386/ast.h
@@ -0,0 +1,47 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_AST_H_
+#define _I386_AST_H_
+
+/*
+ * Machine-dependent AST file for machines with no hardware AST support.
+ *
+ * For the I386, we define AST_I386_FP to handle delayed
+ * floating-point exceptions. The FPU may interrupt on errors
+ * while the user is not running (in kernel or other thread running).
+ */
+
+#define AST_I386_FP 0x80000000
+
+#define MACHINE_AST_PER_THREAD AST_I386_FP
+
+
+/* Chain to the machine-independent header. */
+/* #include_next "ast.h" */
+
+
+#endif /* _I386_AST_H_ */
diff --git a/i386/i386/ast_check.c b/i386/i386/ast_check.c
new file mode 100644
index 0000000..61cd5e8
--- /dev/null
+++ b/i386/i386/ast_check.c
@@ -0,0 +1,56 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if NCPUS > 1
+
+/*
+ * Handle signalling ASTs on other processors.
+ *
+ * Initial i386 implementation does nothing.
+ */
+
+#include <kern/ast.h>
+#include <kern/processor.h>
+#include <kern/smp.h>
+#include <machine/cpu_number.h>
+#include <machine/apic.h>
+
+/*
+ * Initialize for remote invocation of ast_check.
+ */
+void init_ast_check(const processor_t processor)
+{
+}
+
+/*
+ * Cause remote invocation of ast_check. Caller is at splsched().
+ */
+void cause_ast_check(const processor_t processor)
+{
+ smp_remote_ast(apic_get_cpu_apic_id(processor->slot_num));
+}
+
+#endif /* NCPUS > 1 */
diff --git a/i386/i386/ast_types.h b/i386/i386/ast_types.h
new file mode 100644
index 0000000..89e3182
--- /dev/null
+++ b/i386/i386/ast_types.h
@@ -0,0 +1,36 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_AST_TYPES_H_
+#define _I386_AST_TYPES_H_
+
+/*
+ * Data type for remote ast_check() invocation support. Currently
+ * not implemented. Do this first to avoid include problems.
+ */
+typedef int ast_check_t;
+
+#endif /* _I386_AST_TYPES_H_ */
diff --git a/i386/i386/copy_user.h b/i386/i386/copy_user.h
new file mode 100644
index 0000000..3d1c727
--- /dev/null
+++ b/i386/i386/copy_user.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2023 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef COPY_USER_H
+#define COPY_USER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <machine/locore.h>
+#include <mach/message.h>
+
+/*
+ * The copyin_32to64() and copyout_64to32() routines are meant for data types
+ * that have different size in kernel and user space. They should be independent
+ * of endianness and hopefully can be reused in the future on other archs.
+ * These types are e.g.:
+ * - port names vs port pointers, on a 64-bit kernel
+ * - memory addresses, on a 64-bit kernel and 32-bit user
+ */
+
+static inline int copyin_32to64(const uint32_t *uaddr, uint64_t *kaddr)
+{
+ uint32_t rkaddr;
+ int ret;
+ ret = copyin(uaddr, &rkaddr, sizeof(uint32_t));
+ if (ret)
+ return ret;
+ *kaddr = rkaddr;
+ return 0;
+}
+
+static inline int copyout_64to32(const uint64_t *kaddr, uint32_t *uaddr)
+{
+ uint32_t rkaddr=*kaddr;
+ return copyout(&rkaddr, uaddr, sizeof(uint32_t));
+}
+
+static inline int copyin_address(const rpc_vm_offset_t *uaddr, vm_offset_t *kaddr)
+{
+#ifdef USER32
+ return copyin_32to64(uaddr, kaddr);
+#else /* USER32 */
+ return copyin(uaddr, kaddr, sizeof(*uaddr));
+#endif /* USER32 */
+}
+
+static inline int copyout_address(const vm_offset_t *kaddr, rpc_vm_offset_t *uaddr)
+{
+#ifdef USER32
+ return copyout_64to32(kaddr, uaddr);
+#else /* USER32 */
+ return copyout(kaddr, uaddr, sizeof(*kaddr));
+#endif /* USER32 */
+}
+
+static inline int copyin_port(const mach_port_name_t *uaddr, mach_port_t *kaddr)
+{
+#ifdef __x86_64__
+ return copyin_32to64(uaddr, kaddr);
+#else /* __x86_64__ */
+ return copyin(uaddr, kaddr, sizeof(*uaddr));
+#endif /* __x86_64__ */
+}
+
+static inline int copyout_port(const mach_port_t *kaddr, mach_port_name_t *uaddr)
+{
+#ifdef __x86_64__
+ return copyout_64to32(kaddr, uaddr);
+#else /* __x86_64__ */
+ return copyout(kaddr, uaddr, sizeof(*kaddr));
+#endif /* __x86_64__ */
+}
+
+#if defined(__x86_64__) && defined(USER32)
+/* For 32 bit userland, kernel and user land messages are not the same size. */
+size_t msg_usize(const mach_msg_header_t *kmsg);
+#else
+static inline size_t msg_usize(const mach_msg_header_t *kmsg)
+{
+ return kmsg->msgh_size;
+}
+#endif /* __x86_64__ && USER32 */
+
+#endif /* COPY_USER_H */
diff --git a/i386/i386/cpu.h b/i386/i386/cpu.h
new file mode 100644
index 0000000..1bf40dc
--- /dev/null
+++ b/i386/i386/cpu.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _X86_CPU_H
+#define _X86_CPU_H
+
+#include <kern/macros.h>
+
+/*
+ * EFLAGS register flags.
+ */
+#define CPU_EFL_ONE 0x00000002
+#define CPU_EFL_IF 0x00000200
+
+/*
+ * Return the content of the EFLAGS register.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline unsigned long
+cpu_get_eflags(void)
+{
+ unsigned long eflags;
+
+ asm volatile("pushf\n"
+ "pop %0\n"
+ : "=r" (eflags)
+ : : "memory");
+
+ return eflags;
+}
+
+/*
+ * Enable local interrupts.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_enable(void)
+{
+ asm volatile("sti" : : : "memory");
+}
+
+/*
+ * Disable local interrupts.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_disable(void)
+{
+ asm volatile("cli" : : : "memory");
+}
+
+/*
+ * Restore the content of the EFLAGS register, possibly enabling interrupts.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_restore(unsigned long flags)
+{
+ asm volatile("push %0\n"
+ "popf\n"
+ : : "r" (flags)
+ : "memory");
+}
+
+/*
+ * Disable local interrupts, returning the previous content of the EFLAGS
+ * register.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_save(unsigned long *flags)
+{
+ *flags = cpu_get_eflags();
+ cpu_intr_disable();
+}
+
+/*
+ * Return true if interrupts are enabled.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline int
+cpu_intr_enabled(void)
+{
+ unsigned long eflags;
+
+ eflags = cpu_get_eflags();
+ return (eflags & CPU_EFL_IF) ? 1 : 0;
+}
+
+#endif /* _X86_CPU_H */
diff --git a/i386/i386/cpu_number.h b/i386/i386/cpu_number.h
new file mode 100644
index 0000000..67c19e9
--- /dev/null
+++ b/i386/i386/cpu_number.h
@@ -0,0 +1,119 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent definitions for cpu identification.
+ *
+ */
+#ifndef _I386_CPU_NUMBER_H_
+#define _I386_CPU_NUMBER_H_
+
+#if NCPUS > 1
+
+#define MY(stm) %gs:PERCPU_##stm
+
+#ifdef __i386__
+#define CX(addr, reg) addr(,reg,4)
+#endif
+#ifdef __x86_64__
+#define CX(addr, reg) addr(,reg,8)
+#endif
+
+#define CPU_NUMBER_NO_STACK(reg) \
+ movl %cs:lapic, reg ;\
+ movl %cs:APIC_ID(reg), reg ;\
+ shrl $24, reg ;\
+ movl %cs:CX(cpu_id_lut, reg), reg ;\
+
+#ifdef __i386__
+/* Never call CPU_NUMBER_NO_GS(%esi) */
+#define CPU_NUMBER_NO_GS(reg) \
+ pushl %esi ;\
+ pushl %eax ;\
+ pushl %ebx ;\
+ pushl %ecx ;\
+ pushl %edx ;\
+ movl $1, %eax ;\
+ cpuid ;\
+ shrl $24, %ebx ;\
+ movl %cs:CX(cpu_id_lut, %ebx), %esi ;\
+ popl %edx ;\
+ popl %ecx ;\
+ popl %ebx ;\
+ popl %eax ;\
+ movl %esi, reg ;\
+ popl %esi
+#endif
+#ifdef __x86_64__
+/* Never call CPU_NUMBER_NO_GS(%esi) */
+#define CPU_NUMBER_NO_GS(reg) \
+ pushq %rsi ;\
+ pushq %rax ;\
+ pushq %rbx ;\
+ pushq %rcx ;\
+ pushq %rdx ;\
+ movl $1, %eax ;\
+ cpuid ;\
+ shrl $24, %ebx ;\
+ movl %cs:CX(cpu_id_lut, %ebx), %esi ;\
+ popq %rdx ;\
+ popq %rcx ;\
+ popq %rbx ;\
+ popq %rax ;\
+ movl %esi, reg ;\
+ popq %rsi
+#endif
+
+#define CPU_NUMBER(reg) \
+ movl MY(CPU_ID), reg;
+
+#ifndef __ASSEMBLER__
+#include <kern/cpu_number.h>
+#include <i386/apic.h>
+#include <i386/percpu.h>
+
+static inline int cpu_number_slow(void)
+{
+ return cpu_id_lut[apic_get_current_cpu()];
+}
+
+static inline int cpu_number(void)
+{
+ return percpu_get(int, cpu_id);
+}
+#endif
+
+#else /* NCPUS == 1 */
+
+#define MY(stm) (percpu_array + PERCPU_##stm)
+
+#define CPU_NUMBER_NO_STACK(reg)
+#define CPU_NUMBER_NO_GS(reg)
+#define CPU_NUMBER(reg)
+#define CX(addr,reg) addr
+
+#endif /* NCPUS == 1 */
+
+#endif /* _I386_CPU_NUMBER_H_ */
diff --git a/i386/i386/cpuboot.S b/i386/i386/cpuboot.S
new file mode 100644
index 0000000..7e6c477
--- /dev/null
+++ b/i386/i386/cpuboot.S
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2022 Free Software Foundation, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#if NCPUS > 1
+#include <mach/machine/asm.h>
+#include <i386/i386asm.h>
+#include <i386/proc_reg.h>
+#include <i386/apic.h>
+#include <i386/cpu_number.h>
+#include <i386/seg.h>
+#include <i386/gdt.h>
+
+#define M(addr) (addr - apboot)
+#define CR0_CLEAR_FLAGS_CACHE_ENABLE (CR0_CD | CR0_NW)
+#define CR0_SET_FLAGS (CR0_CLEAR_FLAGS_CACHE_ENABLE | CR0_PE)
+#define CR0_CLEAR_FLAGS (CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_TS | CR0_EM | CR0_MP)
+#define BOOT_CS 0x8
+#define BOOT_DS 0x10
+
+.data
+
+.align 16
+apboot_idt_ptr:
+ .long 0
+.align 16
+ .word 0
+apboot_gdt_descr:
+ .word 14*8-1
+ .long apboot_gdt - KERNELBASE
+.align 16
+apboot_gdt:
+ /* NULL segment = 0x0 */
+ .quad 0
+
+ /* KERNEL_CS = 0x8 */
+ .word 0xffff /* Segment limit first 0-15 bits*/
+ .word (-KERNELBASE) & 0xffff /*Base first 0-15 bits*/
+ .byte ((-KERNELBASE) >> 16) & 0xff /*Base 16-23 bits */
+ .byte ACC_PL_K | ACC_CODE_R | ACC_P /*Access byte */
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf /* High 4 bits */
+ .byte ((-KERNELBASE) >> 24) & 0xff /*Base 24-31 bits */
+
+ /* KERNEL_DS = 0x10 */
+ .word 0xffff /*Segment limit */
+ .word (-KERNELBASE) & 0xffff /*Base first 0-15 bits*/
+ .byte ((-KERNELBASE) >> 16) & 0xff
+ .byte ACC_PL_K | ACC_DATA_W | ACC_P /*Access byte*/
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf /* High 4 bits */
+ .byte ((-KERNELBASE) >> 24) & 0xff /*Base 24-31 bits */
+
+ /* LDT = 0x18 */
+ .quad 0
+
+ /* TSS = 0x20 */
+ .quad 0
+
+ /* USER_LDT = 0x28 */
+ .quad 0
+
+ /* USER_TSS = 0x30 */
+ .quad 0
+
+ /* LINEAR = 0x38 */
+ .quad 0
+
+ /* FPREGS = 0x40 */
+ .quad 0
+
+ /* USER_GDT = 0x48 and 0x50 */
+ .quad 0
+ .quad 0
+
+ /* USER_TSS64 = 0x58 */
+ .quad 0
+
+ /* USER_TSS64 = 0x60 */
+ .quad 0
+
+ /* boot GS = 0x68 */
+ .word 0xffff
+apboot_percpu_low:
+ .word 0
+apboot_percpu_med:
+ .byte 0
+ .byte ACC_PL_K | ACC_DATA_W | ACC_P
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf
+apboot_percpu_high:
+ .byte 0
+
+.globl apboot, apbootend, gdt_descr_tmp, apboot_jmp_offset
+.align 16
+.code16
+
+apboot:
+_apboot:
+ /* This is now address CS:0 in real mode */
+
+ /* Set data seg same as code seg */
+ mov %cs, %dx
+ mov %dx, %ds
+
+ cli
+ xorl %eax, %eax
+ movl %eax, %cr3
+
+ mov %ax, %es
+ mov %ax, %fs
+ mov %ax, %gs
+ mov %ax, %ss
+
+ lgdt M(gdt_descr_tmp)
+
+ movl %cr0, %eax
+ andl $~CR0_CLEAR_FLAGS, %eax
+ orl $CR0_SET_FLAGS, %eax
+ movl %eax, %cr0
+
+ /* ljmpl with relocation from machine_init */
+ .byte 0x66
+ .byte 0xea
+apboot_jmp_offset:
+ .long M(0f)
+ .word BOOT_CS
+
+0:
+ .code32
+ /* Protected mode! */
+ movw $BOOT_DS, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %ss
+
+ lgdtl apboot_gdt_descr - KERNELBASE
+ ljmpl $KERNEL_CS, $1f
+1:
+ xorl %eax, %eax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movw $KERNEL_DS, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movw %ax, %ss
+
+ /* Get CPU number */
+ movl $1, %eax
+ cpuid
+ shrl $24, %ebx
+ movl %cs:CX(cpu_id_lut, %ebx), %ecx
+
+ /* Access per_cpu area */
+ movl %ecx,%eax
+ movl $PC_SIZE,%ebx
+ mul %ebx
+ addl $percpu_array - KERNELBASE, %eax
+
+ /* Record our cpu number */
+ movl %ecx, (PERCPU_CPU_ID + KERNELBASE)(%eax)
+
+ /* Set up temporary percpu descriptor */
+ movw %ax, apboot_percpu_low
+ shr $16, %eax
+ movb %al, apboot_percpu_med
+ shr $8, %ax
+ movb %al, apboot_percpu_high
+
+ movw $PERCPU_DS, %ax
+ movw %ax, %gs
+
+ /* Load null Interrupt descriptor table */
+ mov apboot_idt_ptr, %ebx
+ lidt (%ebx)
+
+ /* Enable local apic in xAPIC mode */
+ xorl %eax, %eax
+ xorl %edx, %edx
+ movl $APIC_MSR, %ecx
+ rdmsr
+ orl $APIC_MSR_ENABLE, %eax
+ andl $(~(APIC_MSR_BSP | APIC_MSR_X2APIC)), %eax
+ movl $APIC_MSR, %ecx
+ wrmsr
+
+ /* Load int_stack_top[cpu] -> esp */
+ CPU_NUMBER_NO_STACK(%edx)
+ movl CX(EXT(int_stack_top), %edx), %esp
+
+ /* Ensure stack alignment */
+ andl $0xfffffff0, %esp
+
+ /* Reset EFLAGS to a known state */
+ pushl $0
+ popfl
+
+ /* Finish the cpu configuration */
+ call EXT(cpu_ap_main)
+
+ /* NOT REACHED */
+ hlt
+
+.align 16
+ .word 0
+gdt_descr_tmp:
+ .short 3*8-1
+ .long M(gdt_tmp)
+
+.align 16
+gdt_tmp:
+ /* 0 */
+ .quad 0
+ /* BOOT_CS */
+ .word 0xffff
+ .word 0x0000
+ .byte 0x00
+ .byte ACC_PL_K | ACC_CODE_R | ACC_P
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf
+ .byte 0x00
+ /* BOOT_DS */
+ .word 0xffff
+ .word 0x0000
+ .byte 0x00
+ .byte ACC_PL_K | ACC_DATA_W | ACC_P
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf
+ .byte 0x00
+
+_apbootend:
+apbootend:
+#endif
diff --git a/i386/i386/cswitch.S b/i386/i386/cswitch.S
new file mode 100644
index 0000000..2dee309
--- /dev/null
+++ b/i386/i386/cswitch.S
@@ -0,0 +1,139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/machine/asm.h>
+
+#include <i386/proc_reg.h>
+#include <i386/i386asm.h>
+#include <i386/cpu_number.h>
+#include <i386/gdt.h>
+
+/*
+ * Context switch routines for i386.
+ */
+
+ENTRY(Load_context)
+ movl S_ARG0,%ecx /* get thread */
+ movl TH_KERNEL_STACK(%ecx),%ecx /* get kernel stack */
+ lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%ecx),%edx
+ /* point to stack top */
+ CPU_NUMBER(%eax)
+ movl %ecx,MY(ACTIVE_STACK) /* store stack address */
+ movl %edx,CX(EXT(kernel_stack),%eax) /* store stack top */
+
+ movl KSS_ESP(%ecx),%esp /* switch stacks */
+ movl KSS_ESI(%ecx),%esi /* restore registers */
+ movl KSS_EDI(%ecx),%edi
+ movl KSS_EBP(%ecx),%ebp
+ movl KSS_EBX(%ecx),%ebx
+ xorl %eax,%eax /* return zero (no old thread) */
+ jmp *KSS_EIP(%ecx) /* resume thread */
+
+/*
+ * This really only has to save registers
+ * when there is no explicit continuation.
+ */
+
+ENTRY(Switch_context)
+ movl MY(ACTIVE_STACK),%ecx /* get old kernel stack */
+
+ movl %ebx,KSS_EBX(%ecx) /* save registers */
+ movl %ebp,KSS_EBP(%ecx)
+ movl %edi,KSS_EDI(%ecx)
+ movl %esi,KSS_ESI(%ecx)
+ popl KSS_EIP(%ecx) /* save return PC */
+ movl %esp,KSS_ESP(%ecx) /* save SP */
+
+ movl 0(%esp),%eax /* get old thread */
+ movl %ecx,TH_KERNEL_STACK(%eax) /* save old stack */
+ movl 4(%esp),%ebx /* get continuation */
+ movl %ebx,TH_SWAP_FUNC(%eax) /* save continuation */
+
+ movl 8(%esp),%esi /* get new thread */
+
+ movl TH_KERNEL_STACK(%esi),%ecx /* get its kernel stack */
+ lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%ecx),%ebx
+ /* point to stack top */
+
+ CPU_NUMBER(%edx)
+ movl %esi,MY(ACTIVE_THREAD) /* new thread is active */
+ movl %ecx,MY(ACTIVE_STACK) /* set current stack */
+ movl %ebx,CX(EXT(kernel_stack),%edx) /* set stack top */
+
+ movl KSS_ESP(%ecx),%esp /* switch stacks */
+ movl KSS_ESI(%ecx),%esi /* restore registers */
+ movl KSS_EDI(%ecx),%edi
+ movl KSS_EBP(%ecx),%ebp
+ movl KSS_EBX(%ecx),%ebx
+ jmp *KSS_EIP(%ecx) /* return old thread */
+
+ENTRY(Thread_continue)
+ pushl %eax /* push the thread argument */
+ xorl %ebp,%ebp /* zero frame pointer */
+ call *%ebx /* call real continuation */
+
+#if NCPUS > 1
+/*
+ * void switch_to_shutdown_context(thread_t thread,
+ * void (*routine)(processor_t),
+ * processor_t processor)
+ *
+ * saves the kernel context of the thread,
+ * switches to the interrupt stack,
+ * continues the thread (with thread_continue),
+ * then runs routine on the interrupt stack.
+ *
+ * Assumes that the thread is a kernel thread (thus
+ * has no FPU state)
+ */
+ENTRY(switch_to_shutdown_context)
+ movl MY(ACTIVE_STACK),%ecx /* get old kernel stack */
+ movl %ebx,KSS_EBX(%ecx) /* save registers */
+ movl %ebp,KSS_EBP(%ecx)
+ movl %edi,KSS_EDI(%ecx)
+ movl %esi,KSS_ESI(%ecx)
+ popl KSS_EIP(%ecx) /* save return PC */
+ movl %esp,KSS_ESP(%ecx) /* save SP */
+
+ movl 0(%esp),%eax /* get old thread */
+ movl %ecx,TH_KERNEL_STACK(%eax) /* save old stack */
+ movl $0,TH_SWAP_FUNC(%eax) /* clear continuation */
+ movl 4(%esp),%ebx /* get routine to run next */
+ movl 8(%esp),%esi /* get its argument */
+
+ CPU_NUMBER(%edx)
+ movl CX(EXT(int_stack_base),%edx),%ecx /* point to its interrupt stack */
+ lea -4+INTSTACK_SIZE(%ecx),%esp /* switch to it (top) */
+
+ pushl %eax /* push thread */
+ call EXT(thread_dispatch) /* reschedule thread */
+ addl $4,%esp /* clean stack */
+
+ pushl %esi /* push argument */
+ call *%ebx /* call routine to run */
+ hlt /* (should never return) */
+
+#endif /* NCPUS > 1 */
diff --git a/i386/i386/db_disasm.c b/i386/i386/db_disasm.c
new file mode 100644
index 0000000..303b462
--- /dev/null
+++ b/i386/i386/db_disasm.c
@@ -0,0 +1,1437 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_KDB
+
+/*
+ * Instruction disassembler.
+ */
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_examine.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+
+#include <kern/task.h>
+
+/*
+ * Switch to disassemble 16-bit code.
+ */
+boolean_t db_disasm_16 = FALSE;
+
+/*
+ * Size attributes
+ */
+#define BYTE 0
+#define WORD 1
+#define LONG 2
+#define QUAD 3
+#define SNGL 4
+#define DBLR 5
+#define EXTR 6
+#define SDEP 7
+#define NONE 8
+
+/*
+ * Addressing modes
+ */
+#define E 1 /* general effective address */
+#define Eind 2 /* indirect address (jump, call) */
+#define El 3 /* address, long size */
+#define Ew 4 /* address, word size */
+#define Eb 5 /* address, byte size */
+#define R 6 /* register, in 'reg' field */
+#define Rw 7 /* word register, in 'reg' field */
+#define Ri 8 /* register in instruction */
+#define S 9 /* segment reg, in 'reg' field */
+#define Si 10 /* segment reg, in instruction */
+#define A 11 /* accumulator */
+#define BX 12 /* (bx) */
+#define CL 13 /* cl, for shifts */
+#define DX 14 /* dx, for IO */
+#define SI 15 /* si */
+#define DI 16 /* di */
+#define CR 17 /* control register */
+#define DR 18 /* debug register */
+#define TR 19 /* test register */
+#define I 20 /* immediate, unsigned */
+#define Is 21 /* immediate, signed */
+#define Ib 22 /* byte immediate, unsigned */
+#define Ibs 23 /* byte immediate, signed */
+#define Iw 24 /* word immediate, unsigned */
+#define Il 25 /* long immediate */
+#define O 26 /* direct address */
+#define Db 27 /* byte displacement from EIP */
+#define Dl 28 /* long displacement from EIP */
+#define o1 29 /* constant 1 */
+#define o3 30 /* constant 3 */
+#define OS 31 /* immediate offset/segment */
+#define ST 32 /* FP stack top */
+#define STI 33 /* FP stack */
+#define X 34 /* extended FP op */
+#define XA 35 /* for 'fstcw %ax' */
+#define Iba 36 /* byte immediate, don't print if 0xa */
+
+struct inst {
+ char * i_name; /* name */
+ short i_has_modrm; /* has regmodrm byte */
+ short i_size; /* operand size */
+ int i_mode; /* addressing modes */
+ char * i_extra; /* pointer to extra opcode table */
+};
+
+#define op1(x) (x)
+#define op2(x,y) ((x)|((y)<<8))
+#define op3(x,y,z) ((x)|((y)<<8)|((z)<<16))
+
+struct finst {
+ char * f_name; /* name for memory instruction */
+ int f_size; /* size for memory instruction */
+ int f_rrmode; /* mode for rr instruction */
+ char * f_rrname; /* name for rr instruction
+ (or pointer to table) */
+};
+
+char * db_Grp6[] = {
+ "sldt",
+ "str",
+ "lldt",
+ "ltr",
+ "verr",
+ "verw",
+ "",
+ ""
+};
+
+char * db_Grp7[] = {
+ "sgdt",
+ "sidt",
+ "lgdt",
+ "lidt",
+ "smsw",
+ "",
+ "lmsw",
+ "invlpg"
+};
+
+char * db_Grp8[] = {
+ "",
+ "",
+ "",
+ "",
+ "bt",
+ "bts",
+ "btr",
+ "btc"
+};
+
+struct inst db_inst_0f0x[] = {
+/*00*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp6 },
+/*01*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp7 },
+/*02*/ { "lar", TRUE, LONG, op2(E,R), 0 },
+/*03*/ { "lsl", TRUE, LONG, op2(E,R), 0 },
+/*04*/ { "", FALSE, NONE, 0, 0 },
+/*05*/ { "", FALSE, NONE, 0, 0 },
+/*06*/ { "clts", FALSE, NONE, 0, 0 },
+/*07*/ { "", FALSE, NONE, 0, 0 },
+
+/*08*/ { "invd", FALSE, NONE, 0, 0 },
+/*09*/ { "wbinvd",FALSE, NONE, 0, 0 },
+/*0a*/ { "", FALSE, NONE, 0, 0 },
+/*0b*/ { "ud2", FALSE, NONE, 0, 0 },
+/*0c*/ { "", FALSE, NONE, 0, 0 },
+/*0d*/ { "", FALSE, NONE, 0, 0 },
+/*0e*/ { "", FALSE, NONE, 0, 0 },
+/*0f*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst db_inst_0f2x[] = {
+/*20*/ { "mov", TRUE, LONG, op2(CR,El), 0 }, /* use El for reg */
+/*21*/ { "mov", TRUE, LONG, op2(DR,El), 0 }, /* since mod == 11 */
+/*22*/ { "mov", TRUE, LONG, op2(El,CR), 0 },
+/*23*/ { "mov", TRUE, LONG, op2(El,DR), 0 },
+/*24*/ { "mov", TRUE, LONG, op2(TR,El), 0 },
+/*25*/ { "", FALSE, NONE, 0, 0 },
+/*26*/ { "mov", TRUE, LONG, op2(El,TR), 0 },
+/*27*/ { "", FALSE, NONE, 0, 0 },
+
+/*28*/ { "", FALSE, NONE, 0, 0 },
+/*29*/ { "", FALSE, NONE, 0, 0 },
+/*2a*/ { "", FALSE, NONE, 0, 0 },
+/*2b*/ { "", FALSE, NONE, 0, 0 },
+/*2c*/ { "", FALSE, NONE, 0, 0 },
+/*2d*/ { "", FALSE, NONE, 0, 0 },
+/*2e*/ { "", FALSE, NONE, 0, 0 },
+/*2f*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst db_inst_0f8x[] = {
+/*80*/ { "jo", FALSE, NONE, op1(Dl), 0 },
+/*81*/ { "jno", FALSE, NONE, op1(Dl), 0 },
+/*82*/ { "jb", FALSE, NONE, op1(Dl), 0 },
+/*83*/ { "jnb", FALSE, NONE, op1(Dl), 0 },
+/*84*/ { "jz", FALSE, NONE, op1(Dl), 0 },
+/*85*/ { "jnz", FALSE, NONE, op1(Dl), 0 },
+/*86*/ { "jbe", FALSE, NONE, op1(Dl), 0 },
+/*87*/ { "jnbe", FALSE, NONE, op1(Dl), 0 },
+
+/*88*/ { "js", FALSE, NONE, op1(Dl), 0 },
+/*89*/ { "jns", FALSE, NONE, op1(Dl), 0 },
+/*8a*/ { "jp", FALSE, NONE, op1(Dl), 0 },
+/*8b*/ { "jnp", FALSE, NONE, op1(Dl), 0 },
+/*8c*/ { "jl", FALSE, NONE, op1(Dl), 0 },
+/*8d*/ { "jnl", FALSE, NONE, op1(Dl), 0 },
+/*8e*/ { "jle", FALSE, NONE, op1(Dl), 0 },
+/*8f*/ { "jnle", FALSE, NONE, op1(Dl), 0 },
+};
+
+struct inst db_inst_0f9x[] = {
+/*90*/ { "seto", TRUE, NONE, op1(Eb), 0 },
+/*91*/ { "setno", TRUE, NONE, op1(Eb), 0 },
+/*92*/ { "setb", TRUE, NONE, op1(Eb), 0 },
+/*93*/ { "setnb", TRUE, NONE, op1(Eb), 0 },
+/*94*/ { "setz", TRUE, NONE, op1(Eb), 0 },
+/*95*/ { "setnz", TRUE, NONE, op1(Eb), 0 },
+/*96*/ { "setbe", TRUE, NONE, op1(Eb), 0 },
+/*97*/ { "setnbe",TRUE, NONE, op1(Eb), 0 },
+
+/*98*/ { "sets", TRUE, NONE, op1(Eb), 0 },
+/*99*/ { "setns", TRUE, NONE, op1(Eb), 0 },
+/*9a*/ { "setp", TRUE, NONE, op1(Eb), 0 },
+/*9b*/ { "setnp", TRUE, NONE, op1(Eb), 0 },
+/*9c*/ { "setl", TRUE, NONE, op1(Eb), 0 },
+/*9d*/ { "setnl", TRUE, NONE, op1(Eb), 0 },
+/*9e*/ { "setle", TRUE, NONE, op1(Eb), 0 },
+/*9f*/ { "setnle",TRUE, NONE, op1(Eb), 0 },
+};
+
+struct inst db_inst_0fax[] = {
+/*a0*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*a1*/ { "pop", FALSE, NONE, op1(Si), 0 },
+/*a2*/ { "", FALSE, NONE, 0, 0 },
+/*a3*/ { "bt", TRUE, LONG, op2(R,E), 0 },
+/*a4*/ { "shld", TRUE, LONG, op3(Ib,E,R), 0 },
+/*a5*/ { "shld", TRUE, LONG, op3(CL,E,R), 0 },
+/*a6*/ { "", FALSE, NONE, 0, 0 },
+/*a7*/ { "", FALSE, NONE, 0, 0 },
+
+/*a8*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*a9*/ { "pop", FALSE, NONE, op1(Si), 0 },
+/*aa*/ { "", FALSE, NONE, 0, 0 },
+/*ab*/ { "bts", TRUE, LONG, op2(R,E), 0 },
+/*ac*/ { "shrd", TRUE, LONG, op3(Ib,E,R), 0 },
+/*ad*/ { "shrd", TRUE, LONG, op3(CL,E,R), 0 },
+/*a6*/ { "", FALSE, NONE, 0, 0 },
+/*a7*/ { "imul", TRUE, LONG, op2(E,R), 0 },
+};
+
+struct inst db_inst_0fbx[] = {
+/*b0*/ { "", FALSE, NONE, 0, 0 },
+/*b1*/ { "", FALSE, NONE, 0, 0 },
+/*b2*/ { "lss", TRUE, LONG, op2(E, R), 0 },
+/*b3*/ { "btr", TRUE, LONG, op2(R, E), 0 },
+/*b4*/ { "lfs", TRUE, LONG, op2(E, R), 0 },
+/*b5*/ { "lgs", TRUE, LONG, op2(E, R), 0 },
+/*b6*/ { "movzb", TRUE, LONG, op2(Eb,R), 0 },
+/*b7*/ { "movzw", TRUE, LONG, op2(Ew,R), 0 },
+
+/*b8*/ { "", FALSE, NONE, 0, 0 },
+/*b9*/ { "", FALSE, NONE, 0, 0 },
+/*ba*/ { "", TRUE, LONG, op2(Ibs,E), (char *)db_Grp8 },
+/*bb*/ { "btc", TRUE, LONG, op2(R, E), 0 },
+/*bc*/ { "bsf", TRUE, LONG, op2(E, R), 0 },
+/*bd*/ { "bsr", TRUE, LONG, op2(E, R), 0 },
+/*be*/ { "movsb", TRUE, LONG, op2(Eb,R), 0 },
+/*bf*/ { "movsw", TRUE, LONG, op2(Ew,R), 0 },
+};
+
+struct inst db_inst_0fcx[] = {
+/*c0*/ { "xadd", TRUE, BYTE, op2(R, E), 0 },
+/*c1*/ { "xadd", TRUE, LONG, op2(R, E), 0 },
+/*c2*/ { "", FALSE, NONE, 0, 0 },
+/*c3*/ { "", FALSE, NONE, 0, 0 },
+/*c4*/ { "", FALSE, NONE, 0, 0 },
+/*c5*/ { "", FALSE, NONE, 0, 0 },
+/*c6*/ { "", FALSE, NONE, 0, 0 },
+/*c7*/ { "", FALSE, NONE, 0, 0 },
+/*c8*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*c9*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*ca*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cb*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cc*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cd*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*ce*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cf*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+};
+
+struct inst db_inst_0fdx[] = {
+/*c0*/ { "cmpxchg",TRUE, BYTE, op2(R, E), 0 },
+/*c1*/ { "cmpxchg",TRUE, LONG, op2(R, E), 0 },
+/*c2*/ { "", FALSE, NONE, 0, 0 },
+/*c3*/ { "", FALSE, NONE, 0, 0 },
+/*c4*/ { "", FALSE, NONE, 0, 0 },
+/*c5*/ { "", FALSE, NONE, 0, 0 },
+/*c6*/ { "", FALSE, NONE, 0, 0 },
+/*c7*/ { "", FALSE, NONE, 0, 0 },
+/*c8*/ { "", FALSE, NONE, 0, 0 },
+/*c9*/ { "", FALSE, NONE, 0, 0 },
+/*ca*/ { "", FALSE, NONE, 0, 0 },
+/*cb*/ { "", FALSE, NONE, 0, 0 },
+/*cc*/ { "", FALSE, NONE, 0, 0 },
+/*cd*/ { "", FALSE, NONE, 0, 0 },
+/*ce*/ { "", FALSE, NONE, 0, 0 },
+/*cf*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst *db_inst_0f[] = {
+ db_inst_0f0x,
+ 0,
+ db_inst_0f2x,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ db_inst_0f8x,
+ db_inst_0f9x,
+ db_inst_0fax,
+ db_inst_0fbx,
+ db_inst_0fcx,
+ db_inst_0fdx,
+ 0,
+ 0
+};
+
+char * db_Esc92[] = {
+ "fnop", "", "", "", "", "", "", ""
+};
+char * db_Esc93[] = {
+ "", "", "", "", "", "", "", ""
+};
+char * db_Esc94[] = {
+ "fchs", "fabs", "", "", "ftst", "fxam", "", ""
+};
+char * db_Esc95[] = {
+ "fld1", "fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz",""
+};
+char * db_Esc96[] = {
+ "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp",
+ "fincstp"
+};
+char * db_Esc97[] = {
+ "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"
+};
+
+char * db_Esca4[] = {
+ "", "fucompp","", "", "", "", "", ""
+};
+
+char * db_Escb4[] = {
+ "", "", "fnclex","fninit","", "", "", ""
+};
+
+char * db_Esce3[] = {
+ "", "fcompp","", "", "", "", "", ""
+};
+
+char * db_Escf4[] = {
+ "fnstsw","", "", "", "", "", "", ""
+};
+
+struct finst db_Esc8[] = {
+/*0*/ { "fadd", SNGL, op2(STI,ST), 0 },
+/*1*/ { "fmul", SNGL, op2(STI,ST), 0 },
+/*2*/ { "fcom", SNGL, op2(STI,ST), 0 },
+/*3*/ { "fcomp", SNGL, op2(STI,ST), 0 },
+/*4*/ { "fsub", SNGL, op2(STI,ST), 0 },
+/*5*/ { "fsubr", SNGL, op2(STI,ST), 0 },
+/*6*/ { "fdiv", SNGL, op2(STI,ST), 0 },
+/*7*/ { "fdivr", SNGL, op2(STI,ST), 0 },
+};
+
+struct finst db_Esc9[] = {
+/*0*/ { "fld", SNGL, op1(STI), 0 },
+/*1*/ { "", NONE, op1(STI), "fxch" },
+/*2*/ { "fst", SNGL, op1(X), (char *)db_Esc92 },
+/*3*/ { "fstp", SNGL, op1(X), (char *)db_Esc93 },
+/*4*/ { "fldenv", NONE, op1(X), (char *)db_Esc94 },
+/*5*/ { "fldcw", NONE, op1(X), (char *)db_Esc95 },
+/*6*/ { "fnstenv",NONE, op1(X), (char *)db_Esc96 },
+/*7*/ { "fnstcw", NONE, op1(X), (char *)db_Esc97 },
+};
+
+struct finst db_Esca[] = {
+/*0*/ { "fiadd", WORD, 0, 0 },
+/*1*/ { "fimul", WORD, 0, 0 },
+/*2*/ { "ficom", WORD, 0, 0 },
+/*3*/ { "ficomp", WORD, 0, 0 },
+/*4*/ { "fisub", WORD, op1(X), (char *)db_Esca4 },
+/*5*/ { "fisubr", WORD, 0, 0 },
+/*6*/ { "fidiv", WORD, 0, 0 },
+/*7*/ { "fidivr", WORD, 0, 0 }
+};
+
+struct finst db_Escb[] = {
+/*0*/ { "fild", WORD, 0, 0 },
+/*1*/ { "", NONE, 0, 0 },
+/*2*/ { "fist", WORD, 0, 0 },
+/*3*/ { "fistp", WORD, 0, 0 },
+/*4*/ { "", WORD, op1(X), (char *)db_Escb4 },
+/*5*/ { "fld", EXTR, 0, 0 },
+/*6*/ { "", WORD, 0, 0 },
+/*7*/ { "fstp", EXTR, 0, 0 },
+};
+
+struct finst db_Escc[] = {
+/*0*/ { "fadd", DBLR, op2(ST,STI), 0 },
+/*1*/ { "fmul", DBLR, op2(ST,STI), 0 },
+/*2*/ { "fcom", DBLR, op2(ST,STI), 0 },
+/*3*/ { "fcomp", DBLR, op2(ST,STI), 0 },
+/*4*/ { "fsub", DBLR, op2(ST,STI), "fsubr" },
+/*5*/ { "fsubr", DBLR, op2(ST,STI), "fsub" },
+/*6*/ { "fdiv", DBLR, op2(ST,STI), "fdivr" },
+/*7*/ { "fdivr", DBLR, op2(ST,STI), "fdiv" },
+};
+
+struct finst db_Escd[] = {
+/*0*/ { "fld", DBLR, op1(STI), "ffree" },
+/*1*/ { "", NONE, 0, 0 },
+/*2*/ { "fst", DBLR, op1(STI), 0 },
+/*3*/ { "fstp", DBLR, op1(STI), 0 },
+/*4*/ { "frstor", NONE, op1(STI), "fucom" },
+/*5*/ { "", NONE, op1(STI), "fucomp" },
+/*6*/ { "fnsave", NONE, 0, 0 },
+/*7*/ { "fnstsw", NONE, 0, 0 },
+};
+
+struct finst db_Esce[] = {
+/*0*/ { "fiadd", LONG, op2(ST,STI), "faddp" },
+/*1*/ { "fimul", LONG, op2(ST,STI), "fmulp" },
+/*2*/ { "ficom", LONG, 0, 0 },
+/*3*/ { "ficomp", LONG, op1(X), (char *)db_Esce3 },
+/*4*/ { "fisub", LONG, op2(ST,STI), "fsubrp" },
+/*5*/ { "fisubr", LONG, op2(ST,STI), "fsubp" },
+/*6*/ { "fidiv", LONG, op2(ST,STI), "fdivrp" },
+/*7*/ { "fidivr", LONG, op2(ST,STI), "fdivp" },
+};
+
+struct finst db_Escf[] = {
+/*0*/ { "fild", LONG, 0, 0 },
+/*1*/ { "", LONG, 0, 0 },
+/*2*/ { "fist", LONG, 0, 0 },
+/*3*/ { "fistp", LONG, 0, 0 },
+/*4*/ { "fbld", NONE, op1(XA), (char *)db_Escf4 },
+/*5*/ { "fld", QUAD, 0, 0 },
+/*6*/ { "fbstp", NONE, 0, 0 },
+/*7*/ { "fstp", QUAD, 0, 0 },
+};
+
+struct finst *db_Esc_inst[] = {
+ db_Esc8, db_Esc9, db_Esca, db_Escb,
+ db_Escc, db_Escd, db_Esce, db_Escf
+};
+
+char * db_Grp1[] = {
+ "add",
+ "or",
+ "adc",
+ "sbb",
+ "and",
+ "sub",
+ "xor",
+ "cmp"
+};
+
+char * db_Grp2[] = {
+ "rol",
+ "ror",
+ "rcl",
+ "rcr",
+ "shl",
+ "shr",
+ "shl",
+ "sar"
+};
+
+struct inst db_Grp3[] = {
+ { "test", TRUE, NONE, op2(I,E), 0 },
+ { "test", TRUE, NONE, op2(I,E), 0 },
+ { "not", TRUE, NONE, op1(E), 0 },
+ { "neg", TRUE, NONE, op1(E), 0 },
+ { "mul", TRUE, NONE, op2(E,A), 0 },
+ { "imul", TRUE, NONE, op2(E,A), 0 },
+ { "div", TRUE, NONE, op2(E,A), 0 },
+ { "idiv", TRUE, NONE, op2(E,A), 0 },
+};
+
+struct inst db_Grp4[] = {
+ { "inc", TRUE, BYTE, op1(E), 0 },
+ { "dec", TRUE, BYTE, op1(E), 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 }
+};
+
+struct inst db_Grp5[] = {
+ { "inc", TRUE, LONG, op1(E), 0 },
+ { "dec", TRUE, LONG, op1(E), 0 },
+ { "call", TRUE, NONE, op1(Eind),0 },
+ { "lcall", TRUE, NONE, op1(Eind),0 },
+ { "jmp", TRUE, NONE, op1(Eind),0 },
+ { "ljmp", TRUE, NONE, op1(Eind),0 },
+ { "push", TRUE, LONG, op1(E), 0 },
+ { "", TRUE, NONE, 0, 0 }
+};
+
+struct inst db_inst_table[256] = {
+/*00*/ { "add", TRUE, BYTE, op2(R, E), 0 },
+/*01*/ { "add", TRUE, LONG, op2(R, E), 0 },
+/*02*/ { "add", TRUE, BYTE, op2(E, R), 0 },
+/*03*/ { "add", TRUE, LONG, op2(E, R), 0 },
+/*04*/ { "add", FALSE, BYTE, op2(Is, A), 0 },
+/*05*/ { "add", FALSE, LONG, op2(Is, A), 0 },
+/*06*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*07*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*08*/ { "or", TRUE, BYTE, op2(R, E), 0 },
+/*09*/ { "or", TRUE, LONG, op2(R, E), 0 },
+/*0a*/ { "or", TRUE, BYTE, op2(E, R), 0 },
+/*0b*/ { "or", TRUE, LONG, op2(E, R), 0 },
+/*0c*/ { "or", FALSE, BYTE, op2(I, A), 0 },
+/*0d*/ { "or", FALSE, LONG, op2(I, A), 0 },
+/*0e*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*0f*/ { "", FALSE, NONE, 0, 0 },
+
+/*10*/ { "adc", TRUE, BYTE, op2(R, E), 0 },
+/*11*/ { "adc", TRUE, LONG, op2(R, E), 0 },
+/*12*/ { "adc", TRUE, BYTE, op2(E, R), 0 },
+/*13*/ { "adc", TRUE, LONG, op2(E, R), 0 },
+/*14*/ { "adc", FALSE, BYTE, op2(Is, A), 0 },
+/*15*/ { "adc", FALSE, LONG, op2(Is, A), 0 },
+/*16*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*17*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*18*/ { "sbb", TRUE, BYTE, op2(R, E), 0 },
+/*19*/ { "sbb", TRUE, LONG, op2(R, E), 0 },
+/*1a*/ { "sbb", TRUE, BYTE, op2(E, R), 0 },
+/*1b*/ { "sbb", TRUE, LONG, op2(E, R), 0 },
+/*1c*/ { "sbb", FALSE, BYTE, op2(Is, A), 0 },
+/*1d*/ { "sbb", FALSE, LONG, op2(Is, A), 0 },
+/*1e*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*1f*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*20*/ { "and", TRUE, BYTE, op2(R, E), 0 },
+/*21*/ { "and", TRUE, LONG, op2(R, E), 0 },
+/*22*/ { "and", TRUE, BYTE, op2(E, R), 0 },
+/*23*/ { "and", TRUE, LONG, op2(E, R), 0 },
+/*24*/ { "and", FALSE, BYTE, op2(I, A), 0 },
+/*25*/ { "and", FALSE, LONG, op2(I, A), 0 },
+/*26*/ { "", FALSE, NONE, 0, 0 },
+/*27*/ { "aaa", FALSE, NONE, 0, 0 },
+
+/*28*/ { "sub", TRUE, BYTE, op2(R, E), 0 },
+/*29*/ { "sub", TRUE, LONG, op2(R, E), 0 },
+/*2a*/ { "sub", TRUE, BYTE, op2(E, R), 0 },
+/*2b*/ { "sub", TRUE, LONG, op2(E, R), 0 },
+/*2c*/ { "sub", FALSE, BYTE, op2(Is, A), 0 },
+/*2d*/ { "sub", FALSE, LONG, op2(Is, A), 0 },
+/*2e*/ { "", FALSE, NONE, 0, 0 },
+/*2f*/ { "das", FALSE, NONE, 0, 0 },
+
+/*30*/ { "xor", TRUE, BYTE, op2(R, E), 0 },
+/*31*/ { "xor", TRUE, LONG, op2(R, E), 0 },
+/*32*/ { "xor", TRUE, BYTE, op2(E, R), 0 },
+/*33*/ { "xor", TRUE, LONG, op2(E, R), 0 },
+/*34*/ { "xor", FALSE, BYTE, op2(I, A), 0 },
+/*35*/ { "xor", FALSE, LONG, op2(I, A), 0 },
+/*36*/ { "", FALSE, NONE, 0, 0 },
+/*37*/ { "daa", FALSE, NONE, 0, 0 },
+
+/*38*/ { "cmp", TRUE, BYTE, op2(R, E), 0 },
+/*39*/ { "cmp", TRUE, LONG, op2(R, E), 0 },
+/*3a*/ { "cmp", TRUE, BYTE, op2(E, R), 0 },
+/*3b*/ { "cmp", TRUE, LONG, op2(E, R), 0 },
+/*3c*/ { "cmp", FALSE, BYTE, op2(Is, A), 0 },
+/*3d*/ { "cmp", FALSE, LONG, op2(Is, A), 0 },
+/*3e*/ { "", FALSE, NONE, 0, 0 },
+/*3f*/ { "aas", FALSE, NONE, 0, 0 },
+
+/*40*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*41*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*42*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*43*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*44*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*45*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*46*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*47*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+
+/*48*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*49*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4a*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4b*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4c*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4d*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4e*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4f*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+
+/*50*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*51*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*52*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*53*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*54*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*55*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*56*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*57*/ { "push", FALSE, LONG, op1(Ri), 0 },
+
+/*58*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*59*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5a*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5b*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5c*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5d*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5e*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5f*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+
+/*60*/ { "pusha", FALSE, LONG, 0, 0 },
+/*61*/ { "popa", FALSE, LONG, 0, 0 },
+/*62*/ { "bound", TRUE, LONG, op2(E, R), 0 },
+/*63*/ { "arpl", TRUE, NONE, op2(Ew,Rw), 0 },
+
+/*64*/ { "", FALSE, NONE, 0, 0 },
+/*65*/ { "", FALSE, NONE, 0, 0 },
+/*66*/ { "", FALSE, NONE, 0, 0 },
+/*67*/ { "", FALSE, NONE, 0, 0 },
+
+/*68*/ { "push", FALSE, LONG, op1(I), 0 },
+/*69*/ { "imul", TRUE, LONG, op3(I,E,R), 0 },
+/*6a*/ { "push", FALSE, LONG, op1(Ib), 0 },
+/*6b*/ { "imul", TRUE, LONG, op3(Ibs,E,R),0 },
+/*6c*/ { "ins", FALSE, BYTE, op2(DX, DI), 0 },
+/*6d*/ { "ins", FALSE, LONG, op2(DX, DI), 0 },
+/*6e*/ { "outs", FALSE, BYTE, op2(SI, DX), 0 },
+/*6f*/ { "outs", FALSE, LONG, op2(SI, DX), 0 },
+
+/*70*/ { "jo", FALSE, NONE, op1(Db), 0 },
+/*71*/ { "jno", FALSE, NONE, op1(Db), 0 },
+/*72*/ { "jb", FALSE, NONE, op1(Db), 0 },
+/*73*/ { "jnb", FALSE, NONE, op1(Db), 0 },
+/*74*/ { "jz", FALSE, NONE, op1(Db), 0 },
+/*75*/ { "jnz", FALSE, NONE, op1(Db), 0 },
+/*76*/ { "jbe", FALSE, NONE, op1(Db), 0 },
+/*77*/ { "jnbe", FALSE, NONE, op1(Db), 0 },
+
+/*78*/ { "js", FALSE, NONE, op1(Db), 0 },
+/*79*/ { "jns", FALSE, NONE, op1(Db), 0 },
+/*7a*/ { "jp", FALSE, NONE, op1(Db), 0 },
+/*7b*/ { "jnp", FALSE, NONE, op1(Db), 0 },
+/*7c*/ { "jl", FALSE, NONE, op1(Db), 0 },
+/*7d*/ { "jnl", FALSE, NONE, op1(Db), 0 },
+/*7e*/ { "jle", FALSE, NONE, op1(Db), 0 },
+/*7f*/ { "jnle", FALSE, NONE, op1(Db), 0 },
+
+/*80*/ { "", TRUE, BYTE, op2(I, E), (char *)db_Grp1 },
+/*81*/ { "", TRUE, LONG, op2(I, E), (char *)db_Grp1 },
+/*82*/ { "", TRUE, BYTE, op2(Is,E), (char *)db_Grp1 },
+/*83*/ { "", TRUE, LONG, op2(Ibs,E), (char *)db_Grp1 },
+/*84*/ { "test", TRUE, BYTE, op2(R, E), 0 },
+/*85*/ { "test", TRUE, LONG, op2(R, E), 0 },
+/*86*/ { "xchg", TRUE, BYTE, op2(R, E), 0 },
+/*87*/ { "xchg", TRUE, LONG, op2(R, E), 0 },
+
+/*88*/ { "mov", TRUE, BYTE, op2(R, E), 0 },
+/*89*/ { "mov", TRUE, LONG, op2(R, E), 0 },
+/*8a*/ { "mov", TRUE, BYTE, op2(E, R), 0 },
+/*8b*/ { "mov", TRUE, LONG, op2(E, R), 0 },
+/*8c*/ { "mov", TRUE, NONE, op2(S, Ew), 0 },
+/*8d*/ { "lea", TRUE, LONG, op2(E, R), 0 },
+/*8e*/ { "mov", TRUE, NONE, op2(Ew, S), 0 },
+/*8f*/ { "pop", TRUE, LONG, op1(E), 0 },
+
+/*90*/ { "nop", FALSE, NONE, 0, 0 },
+/*91*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*92*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*93*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*94*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*95*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*96*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*97*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+
+/*98*/ { "cbw", FALSE, SDEP, 0, "cwde" }, /* cbw/cwde */
+/*99*/ { "cwd", FALSE, SDEP, 0, "cdq" }, /* cwd/cdq */
+/*9a*/ { "lcall", FALSE, NONE, op1(OS), 0 },
+/*9b*/ { "wait", FALSE, NONE, 0, 0 },
+/*9c*/ { "pushf", FALSE, LONG, 0, 0 },
+/*9d*/ { "popf", FALSE, LONG, 0, 0 },
+/*9e*/ { "sahf", FALSE, NONE, 0, 0 },
+/*9f*/ { "lahf", FALSE, NONE, 0, 0 },
+
+/*a0*/ { "mov", FALSE, BYTE, op2(O, A), 0 },
+/*a1*/ { "mov", FALSE, LONG, op2(O, A), 0 },
+/*a2*/ { "mov", FALSE, BYTE, op2(A, O), 0 },
+/*a3*/ { "mov", FALSE, LONG, op2(A, O), 0 },
+/*a4*/ { "movs", FALSE, BYTE, op2(SI,DI), 0 },
+/*a5*/ { "movs", FALSE, LONG, op2(SI,DI), 0 },
+/*a6*/ { "cmps", FALSE, BYTE, op2(SI,DI), 0 },
+/*a7*/ { "cmps", FALSE, LONG, op2(SI,DI), 0 },
+
+/*a8*/ { "test", FALSE, BYTE, op2(I, A), 0 },
+/*a9*/ { "test", FALSE, LONG, op2(I, A), 0 },
+/*aa*/ { "stos", FALSE, BYTE, op1(DI), 0 },
+/*ab*/ { "stos", FALSE, LONG, op1(DI), 0 },
+/*ac*/ { "lods", FALSE, BYTE, op1(SI), 0 },
+/*ad*/ { "lods", FALSE, LONG, op1(SI), 0 },
+/*ae*/ { "scas", FALSE, BYTE, op1(DI), 0 },
+/*af*/ { "scas", FALSE, LONG, op1(DI), 0 },
+
+/*b0*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b1*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b2*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b3*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b4*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b5*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b6*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b7*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+
+/*b8*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*b9*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*ba*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bb*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bc*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bd*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*be*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bf*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+
+/*c0*/ { "", TRUE, BYTE, op2(Ib, E), (char *)db_Grp2 },
+/*c1*/ { "", TRUE, LONG, op2(Ib, E), (char *)db_Grp2 },
+/*c2*/ { "ret", FALSE, NONE, op1(Iw), 0 },
+/*c3*/ { "ret", FALSE, NONE, 0, 0 },
+/*c4*/ { "les", TRUE, LONG, op2(E, R), 0 },
+/*c5*/ { "lds", TRUE, LONG, op2(E, R), 0 },
+/*c6*/ { "mov", TRUE, BYTE, op2(I, E), 0 },
+/*c7*/ { "mov", TRUE, LONG, op2(I, E), 0 },
+
+/*c8*/ { "enter", FALSE, NONE, op2(Ib, Iw), 0 },
+/*c9*/ { "leave", FALSE, NONE, 0, 0 },
+/*ca*/ { "lret", FALSE, NONE, op1(Iw), 0 },
+/*cb*/ { "lret", FALSE, NONE, 0, 0 },
+/*cc*/ { "int", FALSE, NONE, op1(o3), 0 },
+/*cd*/ { "int", FALSE, NONE, op1(Ib), 0 },
+/*ce*/ { "into", FALSE, NONE, 0, 0 },
+/*cf*/ { "iret", FALSE, NONE, 0, 0 },
+
+/*d0*/ { "", TRUE, BYTE, op2(o1, E), (char *)db_Grp2 },
+/*d1*/ { "", TRUE, LONG, op2(o1, E), (char *)db_Grp2 },
+/*d2*/ { "", TRUE, BYTE, op2(CL, E), (char *)db_Grp2 },
+/*d3*/ { "", TRUE, LONG, op2(CL, E), (char *)db_Grp2 },
+/*d4*/ { "aam", FALSE, NONE, op1(Iba), 0 },
+/*d5*/ { "aad", FALSE, NONE, op1(Iba), 0 },
+/*d6*/ { "", FALSE, NONE, 0, 0 },
+/*d7*/ { "xlat", FALSE, BYTE, op1(BX), 0 },
+
+/*d8*/ { "", TRUE, NONE, 0, (char *)db_Esc8 },
+/*d9*/ { "", TRUE, NONE, 0, (char *)db_Esc9 },
+/*da*/ { "", TRUE, NONE, 0, (char *)db_Esca },
+/*db*/ { "", TRUE, NONE, 0, (char *)db_Escb },
+/*dc*/ { "", TRUE, NONE, 0, (char *)db_Escc },
+/*dd*/ { "", TRUE, NONE, 0, (char *)db_Escd },
+/*de*/ { "", TRUE, NONE, 0, (char *)db_Esce },
+/*df*/ { "", TRUE, NONE, 0, (char *)db_Escf },
+
+/*e0*/ { "loopne",FALSE, NONE, op1(Db), 0 },
+/*e1*/ { "loope", FALSE, NONE, op1(Db), 0 },
+/*e2*/ { "loop", FALSE, NONE, op1(Db), 0 },
+/*e3*/ { "jcxz", FALSE, SDEP, op1(Db), "jecxz" },
+/*e4*/ { "in", FALSE, BYTE, op2(Ib, A), 0 },
+/*e5*/ { "in", FALSE, LONG, op2(Ib, A) , 0 },
+/*e6*/ { "out", FALSE, BYTE, op2(A, Ib), 0 },
+/*e7*/ { "out", FALSE, LONG, op2(A, Ib) , 0 },
+
+/*e8*/ { "call", FALSE, NONE, op1(Dl), 0 },
+/*e9*/ { "jmp", FALSE, NONE, op1(Dl), 0 },
+/*ea*/ { "ljmp", FALSE, NONE, op1(OS), 0 },
+/*eb*/ { "jmp", FALSE, NONE, op1(Db), 0 },
+/*ec*/ { "in", FALSE, BYTE, op2(DX, A), 0 },
+/*ed*/ { "in", FALSE, LONG, op2(DX, A) , 0 },
+/*ee*/ { "out", FALSE, BYTE, op2(A, DX), 0 },
+/*ef*/ { "out", FALSE, LONG, op2(A, DX) , 0 },
+
+/*f0*/ { "", FALSE, NONE, 0, 0 },
+/*f1*/ { "", FALSE, NONE, 0, 0 },
+/*f2*/ { "", FALSE, NONE, 0, 0 },
+/*f3*/ { "", FALSE, NONE, 0, 0 },
+/*f4*/ { "hlt", FALSE, NONE, 0, 0 },
+/*f5*/ { "cmc", FALSE, NONE, 0, 0 },
+/*f6*/ { "", TRUE, BYTE, 0, (char *)db_Grp3 },
+/*f7*/ { "", TRUE, LONG, 0, (char *)db_Grp3 },
+
+/*f8*/ { "clc", FALSE, NONE, 0, 0 },
+/*f9*/ { "stc", FALSE, NONE, 0, 0 },
+/*fa*/ { "cli", FALSE, NONE, 0, 0 },
+/*fb*/ { "sti", FALSE, NONE, 0, 0 },
+/*fc*/ { "cld", FALSE, NONE, 0, 0 },
+/*fd*/ { "std", FALSE, NONE, 0, 0 },
+/*fe*/ { "", TRUE, NONE, 0, (char *)db_Grp4 },
+/*ff*/ { "", TRUE, NONE, 0, (char *)db_Grp5 },
+};
+
+struct inst db_bad_inst =
+ { "???", FALSE, NONE, 0, 0 }
+;
+
+#define f_mod(byte) ((byte)>>6)
+#define f_reg(byte) (((byte)>>3)&0x7)
+#define f_rm(byte) ((byte)&0x7)
+
+#define sib_ss(byte) ((byte)>>6)
+#define sib_index(byte) (((byte)>>3)&0x7)
+#define sib_base(byte) ((byte)&0x7)
+
+struct i_addr {
+ int is_reg; /* if reg, reg number is in 'disp' */
+ int disp;
+ char * base;
+ char * index;
+ int ss;
+};
+
+char * db_index_reg_16[8] = {
+ "%bx,%si",
+ "%bx,%di",
+ "%bp,%si",
+ "%bp,%di",
+ "%si",
+ "%di",
+ "%bp",
+ "%bx"
+};
+
+char * db_reg[3][8] = {
+ { "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh" },
+ { "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di" },
+ { "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi" }
+};
+
+char * db_seg_reg[8] = {
+ "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "", ""
+};
+
+/*
+ * lengths for size attributes
+ */
+int db_lengths[] = {
+ 1, /* BYTE */
+ 2, /* WORD */
+ 4, /* LONG */
+ 8, /* QUAD */
+ 4, /* SNGL */
+ 8, /* DBLR */
+ 10, /* EXTR */
+};
+
+#define get_value_inc(result, loc, size, is_signed, task) \
+ result = db_get_task_value((loc), (size), (is_signed), (task)); \
+ (loc) += (size);
+
+/*
+ * Read address at location and return updated location.
+ */
+static db_addr_t
+db_read_address(
+ db_addr_t loc,
+ int short_addr,
+ int regmodrm,
+ struct i_addr *addrp, /* out */
+ task_t task)
+{
+ int mod, rm, sib, index, disp;
+
+ mod = f_mod(regmodrm);
+ rm = f_rm(regmodrm);
+
+ if (mod == 3) {
+ addrp->is_reg = TRUE;
+ addrp->disp = rm;
+ return loc;
+ }
+ addrp->is_reg = FALSE;
+ addrp->index = 0;
+
+ if (short_addr) {
+ addrp->index = 0;
+ addrp->ss = 0;
+ switch (mod) {
+ case 0:
+ if (rm == 6) {
+ get_value_inc(disp, loc, 2, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = 0;
+ }
+ else {
+ addrp->disp = 0;
+ addrp->base = db_index_reg_16[rm];
+ }
+ break;
+ case 1:
+ get_value_inc(disp, loc, 1, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = db_index_reg_16[rm];
+ break;
+ case 2:
+ get_value_inc(disp, loc, 2, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = db_index_reg_16[rm];
+ break;
+ }
+ }
+ else {
+ if (mod != 3 && rm == 4) {
+ get_value_inc(sib, loc, 1, FALSE, task);
+ rm = sib_base(sib);
+ index = sib_index(sib);
+ if (index != 4)
+ addrp->index = db_reg[LONG][index];
+ addrp->ss = sib_ss(sib);
+ }
+
+ switch (mod) {
+ case 0:
+ if (rm == 5) {
+ get_value_inc(addrp->disp, loc, 4, FALSE, task);
+ addrp->base = 0;
+ }
+ else {
+ addrp->disp = 0;
+ addrp->base = db_reg[LONG][rm];
+ }
+ break;
+
+ case 1:
+ get_value_inc(disp, loc, 1, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = db_reg[LONG][rm];
+ break;
+
+ case 2:
+ get_value_inc(disp, loc, 4, FALSE, task);
+ addrp->disp = disp;
+ addrp->base = db_reg[LONG][rm];
+ break;
+ }
+ }
+ return loc;
+}
+
+static void
+db_print_address(
+ const char * seg,
+ int size,
+ const struct i_addr *addrp,
+ task_t task)
+{
+ if (addrp->is_reg) {
+ db_printf("%s", db_reg[size][addrp->disp]);
+ return;
+ }
+
+ if (seg) {
+ db_printf("%s:", seg);
+ }
+
+ if (addrp->base != 0 || addrp->index != 0) {
+ db_printf("%#n", addrp->disp);
+ db_printf("(");
+ if (addrp->base)
+ db_printf("%s", addrp->base);
+ if (addrp->index)
+ db_printf(",%s,%d", addrp->index, 1<<addrp->ss);
+ db_printf(")");
+ } else
+ db_task_printsym((db_addr_t)addrp->disp, DB_STGY_ANY, task);
+}
+
+/*
+ * Disassemble floating-point ("escape") instruction
+ * and return updated location.
+ */
+static db_addr_t
+db_disasm_esc(
+ db_addr_t loc,
+ int inst,
+ int short_addr,
+ int size,
+ const char * seg,
+ task_t task)
+{
+ int regmodrm;
+ struct finst *fp;
+ int mod;
+ struct i_addr address;
+ char * name;
+
+ get_value_inc(regmodrm, loc, 1, FALSE, task);
+ fp = &db_Esc_inst[inst - 0xd8][f_reg(regmodrm)];
+ mod = f_mod(regmodrm);
+ if (mod != 3) {
+ /*
+ * Normal address modes.
+ */
+ loc = db_read_address(loc, short_addr, regmodrm, &address, task);
+ db_printf(fp->f_name);
+ switch(fp->f_size) {
+ case SNGL:
+ db_printf("s");
+ break;
+ case DBLR:
+ db_printf("l");
+ break;
+ case EXTR:
+ db_printf("t");
+ break;
+ case WORD:
+ db_printf("s");
+ break;
+ case LONG:
+ db_printf("l");
+ break;
+ case QUAD:
+ db_printf("q");
+ break;
+ default:
+ break;
+ }
+ db_printf("\t");
+ db_print_address(seg, BYTE, &address, task);
+ }
+ else {
+ /*
+ * 'reg-reg' - special formats
+ */
+ switch (fp->f_rrmode) {
+ case op2(ST,STI):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st,%%st(%d)",name,f_rm(regmodrm));
+ break;
+ case op2(STI,ST):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st(%d),%%st",name, f_rm(regmodrm));
+ break;
+ case op1(STI):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st(%d)",name, f_rm(regmodrm));
+ break;
+ case op1(X):
+ db_printf("%s", ((char **)fp->f_rrname)[f_rm(regmodrm)]);
+ break;
+ case op1(XA):
+ db_printf("%s\t%%ax",
+ ((char **)fp->f_rrname)[f_rm(regmodrm)]);
+ break;
+ default:
+ db_printf("<bad instruction>");
+ break;
+ }
+ }
+
+ return loc;
+}
+
+/*
+ * Disassemble instruction at 'loc'. 'altfmt' specifies an
+ * (optional) alternate format. Return address of start of
+ * next instruction.
+ */
+db_addr_t
+db_disasm(
+ db_addr_t loc,
+ boolean_t altfmt,
+ task_t task)
+{
+ int inst;
+ int size;
+ int short_addr;
+ char * seg;
+ struct inst * ip;
+ char * i_name;
+ int i_size;
+ int i_mode;
+ int regmodrm;
+ boolean_t first;
+ int displ;
+ int prefix;
+ int imm;
+ int imm2;
+ int len;
+ struct i_addr address;
+
+#ifdef __x86_64__
+ /* The instruction set decoding needs an update, avoid showing bogus output. */
+ db_printf("TODO\n");
+ return loc+1;
+#endif
+
+ get_value_inc(inst, loc, 1, FALSE, task);
+ if (db_disasm_16) {
+ short_addr = TRUE;
+ size = WORD;
+ }
+ else {
+ short_addr = FALSE;
+ size = LONG;
+ }
+ seg = 0;
+ regmodrm = 0;
+
+ /*
+ * Get prefixes
+ */
+ prefix = TRUE;
+ do {
+ switch (inst) {
+ case 0x66: /* data16 */
+ if (size == LONG)
+ size = WORD;
+ else
+ size = LONG;
+ break;
+ case 0x67:
+ short_addr = !short_addr;
+ break;
+ case 0x26:
+ seg = "%es";
+ break;
+ case 0x36:
+ seg = "%ss";
+ break;
+ case 0x2e:
+ seg = "%cs";
+ break;
+ case 0x3e:
+ seg = "%ds";
+ break;
+ case 0x64:
+ seg = "%fs";
+ break;
+ case 0x65:
+ seg = "%gs";
+ break;
+ case 0xf0:
+ db_printf("lock ");
+ break;
+ case 0xf2:
+ db_printf("repne ");
+ break;
+ case 0xf3:
+ db_printf("repe "); /* XXX repe VS rep */
+ break;
+ default:
+ prefix = FALSE;
+ break;
+ }
+ if (prefix) {
+ get_value_inc(inst, loc, 1, FALSE, task);
+ }
+ } while (prefix);
+
+ if (inst >= 0xd8 && inst <= 0xdf) {
+ loc = db_disasm_esc(loc, inst, short_addr, size, seg, task);
+ db_printf("\n");
+ return loc;
+ }
+
+ if (inst == 0x0f) {
+ get_value_inc(inst, loc, 1, FALSE, task);
+ ip = db_inst_0f[inst>>4];
+ if (ip == 0) {
+ ip = &db_bad_inst;
+ }
+ else {
+ ip = &ip[inst&0xf];
+ }
+ }
+ else
+ ip = &db_inst_table[inst];
+
+ if (ip->i_has_modrm) {
+ get_value_inc(regmodrm, loc, 1, FALSE, task);
+ loc = db_read_address(loc, short_addr, regmodrm, &address, task);
+ }
+
+ i_name = ip->i_name;
+ i_size = ip->i_size;
+ i_mode = ip->i_mode;
+
+ if (ip->i_extra == (char *)db_Grp1 ||
+ ip->i_extra == (char *)db_Grp2 ||
+ ip->i_extra == (char *)db_Grp6 ||
+ ip->i_extra == (char *)db_Grp7 ||
+ ip->i_extra == (char *)db_Grp8) {
+ i_name = ((char **)ip->i_extra)[f_reg(regmodrm)];
+ }
+ else if (ip->i_extra == (char *)db_Grp3) {
+ ip = (struct inst *)ip->i_extra;
+ ip = &ip[f_reg(regmodrm)];
+ i_name = ip->i_name;
+ i_mode = ip->i_mode;
+ }
+ else if (ip->i_extra == (char *)db_Grp4 ||
+ ip->i_extra == (char *)db_Grp5) {
+ ip = (struct inst *)ip->i_extra;
+ ip = &ip[f_reg(regmodrm)];
+ i_name = ip->i_name;
+ i_mode = ip->i_mode;
+ i_size = ip->i_size;
+ }
+
+ if (i_size == SDEP) {
+ if (size == WORD)
+ db_printf(i_name);
+ else
+ db_printf(ip->i_extra);
+ }
+ else {
+ db_printf(i_name);
+ if (i_size != NONE) {
+ if (i_size == BYTE) {
+ db_printf("b");
+ size = BYTE;
+ }
+ else if (i_size == WORD) {
+ db_printf("w");
+ size = WORD;
+ }
+ else if (size == WORD)
+ db_printf("w");
+ else
+ db_printf("l");
+ }
+ }
+ db_printf("\t");
+ for (first = TRUE;
+ i_mode != 0;
+ i_mode >>= 8, first = FALSE)
+ {
+ if (!first)
+ db_printf(",");
+
+ switch (i_mode & 0xFF) {
+
+ case E:
+ db_print_address(seg, size, &address, task);
+ break;
+
+ case Eind:
+ db_printf("*");
+ db_print_address(seg, size, &address, task);
+ break;
+
+ case El:
+ db_print_address(seg, LONG, &address, task);
+ break;
+
+ case Ew:
+ db_print_address(seg, WORD, &address, task);
+ break;
+
+ case Eb:
+ db_print_address(seg, BYTE, &address, task);
+ break;
+
+ case R:
+ db_printf("%s", db_reg[size][f_reg(regmodrm)]);
+ break;
+
+ case Rw:
+ db_printf("%s", db_reg[WORD][f_reg(regmodrm)]);
+ break;
+
+ case Ri:
+ db_printf("%s", db_reg[size][f_rm(inst)]);
+ break;
+
+ case S:
+ db_printf("%s", db_seg_reg[f_reg(regmodrm)]);
+ break;
+
+ case Si:
+ db_printf("%s", db_seg_reg[f_reg(inst)]);
+ break;
+
+ case A:
+ db_printf("%s", db_reg[size][0]); /* acc */
+ break;
+
+ case BX:
+ if (seg)
+ db_printf("%s:", seg);
+ db_printf("(%s)", short_addr ? "%bx" : "%ebx");
+ break;
+
+ case CL:
+ db_printf("%%cl");
+ break;
+
+ case DX:
+ db_printf("%%dx");
+ break;
+
+ case SI:
+ if (seg)
+ db_printf("%s:", seg);
+ db_printf("(%s)", short_addr ? "%si" : "%esi");
+ break;
+
+ case DI:
+ db_printf("%%es:(%s)", short_addr ? "%di" : "%edi");
+ break;
+
+ case CR:
+ db_printf("%%cr%d", f_reg(regmodrm));
+ break;
+
+ case DR:
+ db_printf("%%dr%d", f_reg(regmodrm));
+ break;
+
+ case TR:
+ db_printf("%%tr%d", f_reg(regmodrm));
+ break;
+
+ case I:
+ len = db_lengths[size];
+ get_value_inc(imm, loc, len, FALSE, task);/* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Is:
+ len = db_lengths[size];
+ get_value_inc(imm, loc, len, TRUE, task); /* signed */
+ db_printf("$%#r", imm);
+ break;
+
+ case Ib:
+ get_value_inc(imm, loc, 1, FALSE, task); /* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Iba:
+ get_value_inc(imm, loc, 1, FALSE, task);
+ if (imm != 0x0a)
+ db_printf("$%#r", imm);
+ break;
+
+ case Ibs:
+ get_value_inc(imm, loc, 1, TRUE, task); /* signed */
+ db_printf("$%#r", imm);
+ break;
+
+ case Iw:
+ get_value_inc(imm, loc, 2, FALSE, task); /* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Il:
+ get_value_inc(imm, loc, 4, FALSE, task);
+ db_printf("$%#n", imm);
+ break;
+
+ case O:
+ if (short_addr) {
+ get_value_inc(displ, loc, 2, TRUE, task);
+ }
+ else {
+ get_value_inc(displ, loc, 4, TRUE, task);
+ }
+ if (seg)
+ db_printf("%s:%#r",seg, displ);
+ else
+ db_task_printsym((db_addr_t)displ, DB_STGY_ANY, task);
+ break;
+
+ case Db:
+ get_value_inc(displ, loc, 1, TRUE, task);
+ if (short_addr) {
+ /* offset only affects low 16 bits */
+ displ = (loc & 0xffff0000)
+ | ((loc + displ) & 0xffff);
+ }
+ else
+ displ = displ + loc;
+ db_task_printsym((db_addr_t)displ,DB_STGY_XTRN,task);
+ break;
+
+ case Dl:
+ if (short_addr) {
+ get_value_inc(displ, loc, 2, TRUE, task);
+ /* offset only affects low 16 bits */
+ displ = (loc & 0xffff0000)
+ | ((loc + displ) & 0xffff);
+ }
+ else {
+ get_value_inc(displ, loc, 4, TRUE, task);
+ displ = displ + loc;
+ }
+ db_task_printsym((db_addr_t)displ, DB_STGY_XTRN, task);
+ break;
+
+ case o1:
+ db_printf("$1");
+ break;
+
+ case o3:
+ db_printf("$3");
+ break;
+
+ case OS:
+ if (short_addr) {
+ get_value_inc(imm, loc, 2, FALSE, task); /* offset */
+ }
+ else {
+ get_value_inc(imm, loc, 4, FALSE, task); /* offset */
+ }
+ get_value_inc(imm2, loc, 2, FALSE, task); /* segment */
+ db_printf("$%#n,%#n", imm2, imm);
+ break;
+ }
+ }
+
+ if (altfmt == 0 && !db_disasm_16) {
+ if (inst == 0xe9 || inst == 0xeb) {
+ /*
+ * GAS pads to longword boundary after unconditional jumps.
+ */
+ loc = (loc + (4-1)) & ~(4-1);
+ }
+ }
+ db_printf("\n");
+ return loc;
+}
+
+#endif /* MACH_KDB */
diff --git a/i386/i386/db_interface.c b/i386/i386/db_interface.c
new file mode 100644
index 0000000..483991d
--- /dev/null
+++ b/i386/i386/db_interface.c
@@ -0,0 +1,865 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Interface to new debugger.
+ */
+
+#include <string.h>
+#include <sys/reboot.h>
+#include <vm/pmap.h>
+
+#include <i386/thread.h>
+#include <i386/db_machdep.h>
+#include <i386/seg.h>
+#include <i386/trap.h>
+#include <i386/setjmp.h>
+#include <i386/pmap.h>
+#include <i386/proc_reg.h>
+#include <i386/locore.h>
+#include <i386at/biosmem.h>
+#include "gdt.h"
+#include "trap.h"
+
+#include "vm_param.h"
+#include <vm/vm_map.h>
+#include <vm/vm_fault.h>
+#include <kern/cpu_number.h>
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_run.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_trap.h>
+#include <ddb/db_watch.h>
+#include <ddb/db_mp.h>
+#include <machine/db_interface.h>
+#include <machine/machspl.h>
+
+#if MACH_KDB
+/* Whether the kernel uses any debugging register. */
+static boolean_t kernel_dr;
+#endif
+/* Whether the current debug registers are zero. */
+static boolean_t zero_dr;
+
+db_regs_t ddb_regs;
+
+void db_load_context(pcb_t pcb)
+{
+#if MACH_KDB
+ int s = splhigh();
+
+ if (kernel_dr) {
+ splx(s);
+ return;
+ }
+#endif
+ /* Else set user debug registers, if any */
+ unsigned int *dr = pcb->ims.ids.dr;
+ boolean_t will_zero_dr = !dr[0] && !dr[1] && !dr[2] && !dr[3] && !dr[7];
+
+ if (!(zero_dr && will_zero_dr))
+ {
+ set_dr0(dr[0]);
+ set_dr1(dr[1]);
+ set_dr2(dr[2]);
+ set_dr3(dr[3]);
+ set_dr7(dr[7]);
+ zero_dr = will_zero_dr;
+ }
+
+#if MACH_KDB
+ splx(s);
+#endif
+}
+
+void cpu_interrupt_to_db(int i){
+#if MACH_KDB && NCPUS > 1
+ db_on(i);
+#endif
+}
+
+void db_get_debug_state(
+ pcb_t pcb,
+ struct i386_debug_state *state)
+{
+ *state = pcb->ims.ids;
+}
+
+kern_return_t db_set_debug_state(
+ pcb_t pcb,
+ const struct i386_debug_state *state)
+{
+ int i;
+
+ for (i = 0; i <= 3; i++)
+ if (state->dr[i] < VM_MIN_USER_ADDRESS
+ || state->dr[i] >= VM_MAX_USER_ADDRESS)
+ return KERN_INVALID_ARGUMENT;
+
+ pcb->ims.ids = *state;
+
+ if (pcb == current_thread()->pcb)
+ db_load_context(pcb);
+
+ return KERN_SUCCESS;
+}
+
+#if MACH_KDB
+
+struct i386_saved_state *i386_last_saved_statep;
+struct i386_saved_state i386_nested_saved_state;
+uintptr_t i386_last_kdb_sp;
+
+extern thread_t db_default_thread;
+
+static struct i386_debug_state ids;
+
+void db_dr (
+ int num,
+ vm_offset_t linear_addr,
+ int type,
+ int len,
+ int persistence)
+{
+ int s = splhigh();
+ unsigned long dr7;
+
+ if (!kernel_dr) {
+ if (!linear_addr) {
+ splx(s);
+ return;
+ }
+ kernel_dr = TRUE;
+ /* Clear user debugging registers */
+ set_dr7(0);
+ set_dr0(0);
+ set_dr1(0);
+ set_dr2(0);
+ set_dr3(0);
+ }
+
+ ids.dr[num] = linear_addr;
+ switch (num) {
+ case 0: set_dr0(linear_addr); break;
+ case 1: set_dr1(linear_addr); break;
+ case 2: set_dr2(linear_addr); break;
+ case 3: set_dr3(linear_addr); break;
+ }
+
+ /* Replace type/len/persistence for DRnum in dr7 */
+ dr7 = get_dr7 ();
+ dr7 &= ~(0xfUL << (4*num+16)) & ~(0x3UL << (2*num));
+ dr7 |= (((len << 2) | type) << (4*num+16)) | (persistence << (2*num));
+ set_dr7 (dr7);
+
+ if (kernel_dr) {
+ if (!ids.dr[0] && !ids.dr[1] && !ids.dr[2] && !ids.dr[3]) {
+ /* Not used any more, switch back to user debugging registers */
+ set_dr7 (0);
+ kernel_dr = FALSE;
+ zero_dr = TRUE;
+ db_load_context(current_thread()->pcb);
+ }
+ }
+ splx(s);
+}
+
+boolean_t
+db_set_hw_watchpoint(
+ const db_watchpoint_t watch,
+ unsigned num)
+{
+ vm_size_t size = watch->hiaddr - watch->loaddr;
+ db_addr_t addr = watch->loaddr;
+ vm_offset_t kern_addr;
+
+ if (num >= 4)
+ return FALSE;
+ if (size != 1 && size != 2 && size != 4)
+ return FALSE;
+
+ if (addr & (size-1))
+ /* Unaligned */
+ return FALSE;
+
+ if (watch->task) {
+ if (db_user_to_kernel_address(watch->task, addr, &kern_addr, 1) < 0)
+ return FALSE;
+ addr = kern_addr;
+ }
+ addr = kvtolin(addr);
+
+ db_dr (num, addr, I386_DB_TYPE_W, size-1, I386_DB_LOCAL|I386_DB_GLOBAL);
+
+ db_printf("Hardware watchpoint %d set for %x\n", num, addr);
+ return TRUE;
+}
+
+boolean_t
+db_clear_hw_watchpoint(
+ unsigned num)
+{
+ if (num >= 4)
+ return FALSE;
+
+ db_dr (num, 0, 0, 0, 0);
+ return TRUE;
+}
+
+/*
+ * Print trap reason.
+ */
+static void
+kdbprinttrap(
+ int type,
+ int code)
+{
+ printf("kernel: %s (%d), code=%x\n",
+ trap_name(type), type, code);
+}
+
+/*
+ * kdb_trap - field a TRACE or BPT trap
+ */
+
+extern jmp_buf_t *db_recover;
+spl_t saved_ipl[NCPUS]; /* just to know what was IPL before trap */
+
+boolean_t
+kdb_trap(
+ int type,
+ int code,
+ struct i386_saved_state *regs)
+{
+ spl_t s;
+
+ s = splhigh();
+ saved_ipl[cpu_number()] = s;
+
+ switch (type) {
+ case T_DEBUG: /* single_step */
+ {
+ int addr;
+ int status = get_dr6();
+
+ if (status & 0xf) { /* hmm hdw break */
+ addr = status & 0x8 ? get_dr3() :
+ status & 0x4 ? get_dr2() :
+ status & 0x2 ? get_dr1() :
+ get_dr0();
+ regs->efl |= EFL_RF;
+ db_single_step_cmd(addr, 0, 1, "p");
+ }
+ }
+ case T_INT3: /* breakpoint */
+ case T_WATCHPOINT: /* watchpoint */
+ case -1: /* keyboard interrupt */
+ break;
+
+ default:
+ if (db_recover) {
+ i386_nested_saved_state = *regs;
+ db_printf("Caught %s (%d), code = %x, pc = %x\n",
+ trap_name(type), type, code, regs->eip);
+ db_error("");
+ /*NOTREACHED*/
+ }
+ kdbprinttrap(type, code);
+ }
+
+#if NCPUS > 1
+ if (db_enter())
+#endif /* NCPUS > 1 */
+ {
+ i386_last_saved_statep = regs;
+ i386_last_kdb_sp = (uintptr_t) &type;
+
+ /* XXX Should switch to ddb`s own stack here. */
+
+ ddb_regs = *regs;
+ if ((regs->cs & 0x3) == KERNEL_RING) {
+ /*
+ * Kernel mode - esp and ss not saved
+ */
+ ddb_regs.uesp = (uintptr_t)&regs->uesp; /* kernel stack pointer */
+ ddb_regs.ss = KERNEL_DS;
+ }
+
+ cnpollc(TRUE);
+ db_task_trap(type, code, (regs->cs & 0x3) != 0);
+ cnpollc(FALSE);
+
+ regs->eip = ddb_regs.eip;
+ regs->efl = ddb_regs.efl;
+ regs->eax = ddb_regs.eax;
+ regs->ecx = ddb_regs.ecx;
+ regs->edx = ddb_regs.edx;
+ regs->ebx = ddb_regs.ebx;
+ if ((regs->cs & 0x3) != KERNEL_RING) {
+ /*
+ * user mode - saved esp and ss valid
+ */
+ regs->uesp = ddb_regs.uesp; /* user stack pointer */
+ regs->ss = ddb_regs.ss & 0xffff; /* user stack segment */
+ }
+ regs->ebp = ddb_regs.ebp;
+ regs->esi = ddb_regs.esi;
+ regs->edi = ddb_regs.edi;
+ regs->cs = ddb_regs.cs & 0xffff;
+#if !defined(__x86_64__) || defined(USER32)
+ regs->es = ddb_regs.es & 0xffff;
+ regs->ds = ddb_regs.ds & 0xffff;
+ regs->fs = ddb_regs.fs & 0xffff;
+ regs->gs = ddb_regs.gs & 0xffff;
+#endif
+ if ((type == T_INT3) &&
+ (db_get_task_value(regs->eip, BKPT_SIZE, FALSE, TASK_NULL)
+ == BKPT_INST))
+ regs->eip += BKPT_SIZE;
+ }
+#if NCPUS > 1
+ db_leave();
+#endif /* NCPUS > 1 */
+
+ splx(s);
+ return 1;
+}
+
+/*
+ * Enter KDB through a keyboard trap.
+ * We show the registers as of the keyboard interrupt
+ * instead of those at its call to KDB.
+ */
+struct int_regs {
+#ifdef __i386__
+ long edi;
+ long esi;
+#endif
+ long ebp;
+ long ebx;
+ struct i386_interrupt_state *is;
+};
+
+void
+kdb_kentry(
+ struct int_regs *int_regs)
+{
+ struct i386_interrupt_state *is = int_regs->is;
+ spl_t s = splhigh();
+
+#if NCPUS > 1
+ if (db_enter())
+#endif /* NCPUS > 1 */
+ {
+ if ((is->cs & 0x3) != KERNEL_RING) {
+ ddb_regs.uesp = *(uintptr_t *)(is+1);
+ ddb_regs.ss = *(int *)((uintptr_t *)(is+1)+1);
+ }
+ else {
+ ddb_regs.ss = KERNEL_DS;
+ ddb_regs.uesp= (uintptr_t)(is+1);
+ }
+ ddb_regs.efl = is->efl;
+ ddb_regs.cs = is->cs;
+ ddb_regs.eip = is->eip;
+ ddb_regs.eax = is->eax;
+ ddb_regs.ecx = is->ecx;
+ ddb_regs.edx = is->edx;
+ ddb_regs.ebx = int_regs->ebx;
+ ddb_regs.ebp = int_regs->ebp;
+#ifdef __i386__
+ ddb_regs.esi = int_regs->esi;
+ ddb_regs.edi = int_regs->edi;
+#endif
+#ifdef __x86_64__
+ ddb_regs.esi = is->rsi;
+ ddb_regs.edi = is->rdi;
+#endif
+#if !defined(__x86_64__) || defined(USER32)
+ ddb_regs.ds = is->ds;
+ ddb_regs.es = is->es;
+ ddb_regs.fs = is->fs;
+ ddb_regs.gs = is->gs;
+#endif
+ cnpollc(TRUE);
+ db_task_trap(-1, 0, (ddb_regs.cs & 0x3) != 0);
+ cnpollc(FALSE);
+
+ if ((ddb_regs.cs & 0x3) != KERNEL_RING) {
+ ((int *)(is+1))[0] = ddb_regs.uesp;
+ ((int *)(is+1))[1] = ddb_regs.ss & 0xffff;
+ }
+ is->efl = ddb_regs.efl;
+ is->cs = ddb_regs.cs & 0xffff;
+ is->eip = ddb_regs.eip;
+ is->eax = ddb_regs.eax;
+ is->ecx = ddb_regs.ecx;
+ is->edx = ddb_regs.edx;
+ int_regs->ebx = ddb_regs.ebx;
+ int_regs->ebp = ddb_regs.ebp;
+#ifdef __i386__
+ int_regs->esi = ddb_regs.esi;
+ int_regs->edi = ddb_regs.edi;
+#endif
+#ifdef __x86_64__
+ is->rsi = ddb_regs.esi;
+ is->rdi = ddb_regs.edi;
+#endif
+#if !defined(__x86_64__) || defined(USER32)
+ is->ds = ddb_regs.ds & 0xffff;
+ is->es = ddb_regs.es & 0xffff;
+ is->fs = ddb_regs.fs & 0xffff;
+ is->gs = ddb_regs.gs & 0xffff;
+#endif
+ }
+#if NCPUS > 1
+ db_leave();
+#endif /* NCPUS > 1 */
+
+ (void) splx(s);
+}
+
+boolean_t db_no_vm_fault = TRUE;
+
+static int
+db_user_to_phys_address(
+ const task_t task,
+ vm_offset_t addr,
+ phys_addr_t *paddr,
+ int flag)
+{
+ pt_entry_t *ptp;
+ boolean_t faulted = FALSE;
+
+ retry:
+ ptp = pmap_pte(task->map->pmap, addr);
+ if (ptp == PT_ENTRY_NULL || (*ptp & INTEL_PTE_VALID) == 0) {
+ if (!faulted && !db_no_vm_fault) {
+ kern_return_t err;
+
+ faulted = TRUE;
+ err = vm_fault( task->map,
+ trunc_page(addr),
+ VM_PROT_READ,
+ FALSE, FALSE, 0);
+ if (err == KERN_SUCCESS)
+ goto retry;
+ }
+ if (flag) {
+ db_printf("\nno memory is assigned to address %08x\n", addr);
+ }
+ return(-1);
+ }
+
+ *paddr = pte_to_pa(*ptp) + (addr & (INTEL_PGBYTES-1));
+ return(0);
+}
+
+int
+db_user_to_kernel_address(
+ const task_t task,
+ vm_offset_t addr,
+ vm_offset_t *kaddr,
+ int flag)
+{
+ phys_addr_t paddr;
+
+ if (db_user_to_phys_address(task, addr, &paddr, flag) < 0)
+ return(-1);
+
+ if (paddr >= biosmem_directmap_end()) {
+ db_printf("\naddr %016llx is stored in highmem at physical %016llx, accessing it is not supported yet\n", (unsigned long long) addr, (unsigned long long) paddr);
+ return(-1);
+ }
+
+ *kaddr = phystokv(paddr);
+ return(0);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+
+boolean_t
+db_read_bytes(
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task)
+{
+ char *src;
+ int n;
+ phys_addr_t phys_addr;
+
+ src = (char *)addr;
+ if ((addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) || task == TASK_NULL) {
+ if (task == TASK_NULL)
+ task = db_current_task();
+ while (--size >= 0) {
+ if (addr < VM_MIN_KERNEL_ADDRESS && task == TASK_NULL) {
+ db_printf("\nbad address %x\n", addr);
+ return FALSE;
+ }
+ addr++;
+ *data++ = *src++;
+ }
+ return TRUE;
+ }
+ while (size > 0) {
+ if (db_user_to_phys_address(task, addr, &phys_addr, 1) < 0)
+ return FALSE;
+ n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
+ if (n > size)
+ n = size;
+ size -= n;
+ addr += n;
+ copy_from_phys(phys_addr, (vm_offset_t) data, n);
+ data += n;
+ }
+ return TRUE;
+}
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+void
+db_write_bytes(
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task)
+{
+ char *dst;
+
+ pt_entry_t *ptep0 = 0;
+ pt_entry_t oldmap0 = 0;
+ vm_offset_t addr1;
+ pt_entry_t *ptep1 = 0;
+ pt_entry_t oldmap1 = 0;
+ extern char etext;
+
+ if ((addr < VM_MIN_KERNEL_ADDRESS) ^
+ ((addr + size) <= VM_MIN_KERNEL_ADDRESS)) {
+ db_error("\ncannot write data into mixed space\n");
+ /* NOTREACHED */
+ }
+ if (addr < VM_MIN_KERNEL_ADDRESS) {
+ if (task) {
+ db_write_bytes_user_space(addr, size, data, task);
+ return;
+ } else if (db_current_task() == TASK_NULL) {
+ db_printf("\nbad address %x\n", addr);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ }
+
+ if (addr >= VM_MIN_KERNEL_ADDRESS &&
+ addr <= (vm_offset_t)&etext)
+ {
+ ptep0 = pmap_pte(kernel_pmap, addr);
+ oldmap0 = *ptep0;
+ *ptep0 |= INTEL_PTE_WRITE;
+
+ addr1 = i386_trunc_page(addr + size - 1);
+ if (i386_trunc_page(addr) != addr1) {
+ /* data crosses a page boundary */
+
+ ptep1 = pmap_pte(kernel_pmap, addr1);
+ oldmap1 = *ptep1;
+ *ptep1 |= INTEL_PTE_WRITE;
+ }
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ set_cr4(get_cr4() & ~CR4_PGE);
+ flush_tlb();
+ }
+
+ dst = (char *)addr;
+
+ while (--size >= 0)
+ *dst++ = *data++;
+
+ if (ptep0) {
+ *ptep0 = oldmap0;
+ if (ptep1) {
+ *ptep1 = oldmap1;
+ }
+ flush_tlb();
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ set_cr4(get_cr4() | CR4_PGE);
+ }
+}
+
+void
+db_write_bytes_user_space(
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task)
+{
+ int n;
+ phys_addr_t phys_addr;
+
+ while (size > 0) {
+ if (db_user_to_phys_address(task, addr, &phys_addr, 1) < 0)
+ return;
+ n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
+ if (n > size)
+ n = size;
+ size -= n;
+ addr += n;
+ copy_to_phys((vm_offset_t) data, phys_addr, n);
+ }
+}
+
+boolean_t
+db_check_access(
+ vm_offset_t addr,
+ int size,
+ task_t task)
+{
+ int n;
+ phys_addr_t phys_addr;
+
+ if (addr >= VM_MIN_KERNEL_ADDRESS) {
+ if (kernel_task == TASK_NULL)
+ return TRUE;
+ task = kernel_task;
+ } else if (task == TASK_NULL) {
+ if (current_thread() == THREAD_NULL)
+ return FALSE;
+ task = current_thread()->task;
+ }
+ while (size > 0) {
+ if (db_user_to_phys_address(task, addr, &phys_addr, 0) < 0)
+ return FALSE;
+ n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
+ if (n > size)
+ n = size;
+ size -= n;
+ addr += n;
+ }
+ return TRUE;
+}
+
+boolean_t
+db_phys_eq(
+ task_t task1,
+ vm_offset_t addr1,
+ const task_t task2,
+ vm_offset_t addr2)
+{
+ phys_addr_t phys_addr1, phys_addr2;
+
+ if (addr1 >= VM_MIN_KERNEL_ADDRESS || addr2 >= VM_MIN_KERNEL_ADDRESS)
+ return FALSE;
+ if ((addr1 & (INTEL_PGBYTES-1)) != (addr2 & (INTEL_PGBYTES-1)))
+ return FALSE;
+ if (task1 == TASK_NULL) {
+ if (current_thread() == THREAD_NULL)
+ return FALSE;
+ task1 = current_thread()->task;
+ }
+ if (db_user_to_phys_address(task1, addr1, &phys_addr1, 0) < 0
+ || db_user_to_phys_address(task2, addr2, &phys_addr2, 0) < 0)
+ return FALSE;
+ return(phys_addr1 == phys_addr2);
+}
+
+#define DB_USER_STACK_ADDR (VM_MIN_KERNEL_ADDRESS)
+#define DB_NAME_SEARCH_LIMIT (DB_USER_STACK_ADDR-(INTEL_PGBYTES*3))
+
+#define GNU
+
+#ifndef GNU
+static boolean_t
+db_search_null(
+ const task_t task,
+ vm_offset_t *svaddr,
+ vm_offset_t evaddr,
+ vm_offset_t *skaddr,
+ int flag)
+{
+ unsigned vaddr;
+ unsigned *kaddr;
+
+ kaddr = (unsigned *)*skaddr;
+ for (vaddr = *svaddr; vaddr > evaddr; ) {
+ if (vaddr % INTEL_PGBYTES == 0) {
+ vaddr -= sizeof(unsigned);
+ if (db_user_to_kernel_address(task, vaddr, skaddr, 0) < 0)
+ return FALSE;
+ kaddr = (vm_offset_t *)*skaddr;
+ } else {
+ vaddr -= sizeof(unsigned);
+ kaddr--;
+ }
+ if ((*kaddr == 0) ^ (flag == 0)) {
+ *svaddr = vaddr;
+ *skaddr = (unsigned)kaddr;
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+#endif /* GNU */
+
+#ifdef GNU
+static boolean_t
+looks_like_command(
+ const task_t task,
+ char* kaddr)
+{
+ char *c;
+
+ assert(!((vm_offset_t) kaddr & (INTEL_PGBYTES-1)));
+
+ /*
+ * Must be the environment.
+ */
+ if (!memcmp(kaddr, "PATH=", 5) || !memcmp(kaddr, "TERM=", 5) || !memcmp(kaddr, "SHELL=", 6) || !memcmp(kaddr, "LOCAL_PART=", 11) || !memcmp(kaddr, "LC_ALL=", 7))
+ return FALSE;
+
+ /*
+ * This is purely heuristical but works quite nicely.
+ * We know that it should look like words separated by \0, and
+ * eventually only \0s.
+ */
+ c = kaddr;
+ while (c < kaddr + INTEL_PGBYTES) {
+ if (!*c) {
+ if (c == kaddr)
+ /* Starts by \0. */
+ return FALSE;
+ break;
+ }
+ while (c < kaddr + INTEL_PGBYTES && *c)
+ c++;
+ if (c < kaddr + INTEL_PGBYTES)
+ c++; /* Skip \0 */
+ }
+ /*
+ * Check that the remainder is just \0s.
+ */
+ while (c < kaddr + INTEL_PGBYTES)
+ if (*c++)
+ return FALSE;
+
+ return TRUE;
+}
+#endif /* GNU */
+
+void
+db_task_name(
+ const task_t task)
+{
+ char *p;
+ int n;
+ vm_offset_t vaddr, kaddr;
+ unsigned sp;
+
+ if (task->name[0]) {
+ db_printf("%s", task->name);
+ return;
+ }
+
+#ifdef GNU
+ /*
+ * GNU Hurd-specific heuristics.
+ */
+
+ /* Heuristical address first. */
+ vaddr = 0x1026000;
+ if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) >= 0 &&
+ looks_like_command(task, (char*) kaddr))
+ goto ok;
+
+ /* Try to catch SP of the main thread. */
+ thread_t thread;
+
+ task_lock(task);
+ thread = (thread_t) queue_first(&task->thread_list);
+ if (!thread) {
+ task_unlock(task);
+ db_printf(DB_NULL_TASK_NAME);
+ return;
+ }
+ sp = thread->pcb->iss.uesp;
+ task_unlock(task);
+
+ vaddr = (sp & ~(INTEL_PGBYTES - 1)) + INTEL_PGBYTES;
+ while (1) {
+ if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) < 0)
+ return;
+ if (looks_like_command(task, (char*) kaddr))
+ break;
+ vaddr += INTEL_PGBYTES;
+ }
+#else /* GNU */
+ vaddr = DB_USER_STACK_ADDR;
+ kaddr = 0;
+
+ /*
+ * skip nulls at the end
+ */
+ if (!db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 0)) {
+ db_printf(DB_NULL_TASK_NAME);
+ return;
+ }
+ /*
+ * search start of args
+ */
+ if (!db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 1)) {
+ db_printf(DB_NULL_TASK_NAME);
+ return;
+ }
+#endif /* GNU */
+
+ok:
+ n = DB_TASK_NAME_LEN-1;
+#ifdef GNU
+ p = (char *)kaddr;
+ for (; n > 0; vaddr++, p++, n--) {
+#else /* GNU */
+ p = (char *)kaddr + sizeof(unsigned);
+ for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR && n > 0;
+ vaddr++, p++, n--) {
+#endif /* GNU */
+ if (vaddr % INTEL_PGBYTES == 0) {
+ (void)db_user_to_kernel_address(task, vaddr, &kaddr, 0);
+ p = (char*)kaddr;
+ }
+ db_printf("%c", (*p < ' ' || *p > '~')? ' ': *p);
+ }
+ while (n-- >= 0) /* compare with >= 0 for one more space */
+ db_printf(" ");
+}
+
+#endif /* MACH_KDB */
diff --git a/i386/i386/db_interface.h b/i386/i386/db_interface.h
new file mode 100644
index 0000000..69a277a
--- /dev/null
+++ b/i386/i386/db_interface.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+
+#ifndef _I386_DB_INTERFACE_H_
+#define _I386_DB_INTERFACE_H_
+
+#include <sys/types.h>
+#include <kern/task.h>
+#include <machine/thread.h>
+#include <ddb/db_watch.h>
+#include <ddb/db_variables.h>
+
+extern boolean_t kdb_trap (
+ int type,
+ int code,
+ struct i386_saved_state *regs);
+
+struct int_regs;
+
+extern void kdb_kentry(struct int_regs *int_regs);
+
+extern boolean_t db_read_bytes (
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task);
+
+extern void db_write_bytes (
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task);
+
+extern boolean_t db_check_access (
+ vm_offset_t addr,
+ int size,
+ task_t task);
+
+extern boolean_t db_phys_eq (
+ task_t task1,
+ vm_offset_t addr1,
+ task_t task2,
+ vm_offset_t addr2);
+
+extern int db_user_to_kernel_address(
+ task_t task,
+ vm_offset_t addr,
+ vm_offset_t *kaddr,
+ int flag);
+
+extern void db_task_name (task_t task);
+
+extern void cpu_interrupt_to_db(int i);
+
+#define I386_DB_TYPE_X 0
+#define I386_DB_TYPE_W 1
+#define I386_DB_TYPE_RW 3
+
+#define I386_DB_LEN_1 0
+#define I386_DB_LEN_2 1
+#define I386_DB_LEN_4 3
+#define I386_DB_LEN_8 2 /* For >= Pentium4 and Xen CPUID >= 15 only */
+
+#define I386_DB_LOCAL 1
+#define I386_DB_GLOBAL 2
+
+#if MACH_KDB
+extern boolean_t db_set_hw_watchpoint(
+ db_watchpoint_t watch,
+ unsigned num);
+
+extern boolean_t db_clear_hw_watchpoint(
+ unsigned num);
+
+extern void db_dr (
+ int num,
+ vm_offset_t linear_addr,
+ int type,
+ int len,
+ int persistence);
+
+extern void
+db_stack_trace_cmd(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char *modif);
+
+extern void
+db_halt_cpu(void);
+extern void
+db_reset_cpu(void);
+
+void
+db_i386_reg_value(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ struct db_var_aux_param *ap);
+
+void feep(void);
+
+/*
+ * Put a debugging character on the screen.
+ * LOC=0 means put it in the bottom right corner, LOC=1 means put it
+ * one column to the left, etc.
+ */
+void kd_debug_put(int loc, char c);
+
+#endif
+
+extern void db_get_debug_state(
+ pcb_t pcb,
+ struct i386_debug_state *state);
+extern kern_return_t db_set_debug_state(
+ pcb_t pcb,
+ const struct i386_debug_state *state);
+
+extern void db_load_context(pcb_t pcb);
+
+extern void cnpollc(boolean_t on);
+
+void
+db_write_bytes_user_space(
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task);
+
+void db_debug_all_traps (boolean_t enable);
+
+#endif /* _I386_DB_INTERFACE_H_ */
diff --git a/i386/i386/db_machdep.h b/i386/i386/db_machdep.h
new file mode 100644
index 0000000..04c874b
--- /dev/null
+++ b/i386/i386/db_machdep.h
@@ -0,0 +1,105 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_DB_MACHDEP_H_
+#define _I386_DB_MACHDEP_H_
+
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <mach/machine/vm_param.h>
+#include <mach/machine/eflags.h>
+#include <i386/thread.h> /* for thread_status */
+#include <i386/trap.h>
+
+typedef vm_offset_t db_addr_t; /* address - unsigned */
+typedef long db_expr_t; /* expression - signed */
+
+typedef struct i386_saved_state db_regs_t;
+extern db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+#define SAVE_DDB_REGS DB_SAVE(db_regs_t, ddb_regs)
+#define RESTORE_DDB_REGS DB_RESTORE(ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->eip)
+
+#define BKPT_INST 0xcc /* breakpoint instruction */
+#define BKPT_SIZE (1) /* size of breakpoint inst */
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK ddb_regs.eip -= 1;
+
+#define db_clear_single_step(regs) ((regs)->efl &= ~EFL_TF)
+#define db_set_single_step(regs) ((regs)->efl |= EFL_TF)
+
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_INT3)
+#define IS_WATCHPOINT_TRAP(type, code) ((type) == T_WATCHPOINT)
+
+#define I_CALL 0xe8
+#define I_CALLI 0xff
+#define I_RET 0xc3
+#define I_IRET 0xcf
+
+#define inst_trap_return(ins) (((ins)&0xff) == I_IRET)
+#define inst_return(ins) (((ins)&0xff) == I_RET)
+#define inst_call(ins) (((ins)&0xff) == I_CALL || \
+ (((ins)&0xff) == I_CALLI && \
+ ((ins)&0x3800) == 0x1000))
+#define inst_load(ins) 0
+#define inst_store(ins) 0
+
+/* access capability and access macros */
+
+#define DB_ACCESS_LEVEL 2 /* access any space */
+#define DB_CHECK_ACCESS(addr,size,task) \
+ db_check_access(addr,size,task)
+#define DB_PHYS_EQ(task1,addr1,task2,addr2) \
+ db_phys_eq(task1,addr1,task2,addr2)
+#define DB_VALID_KERN_ADDR(addr) \
+ ((addr) >= VM_MIN_KERNEL_ADDRESS && \
+ (addr) < VM_MAX_KERNEL_ADDRESS)
+#define DB_VALID_ADDRESS(addr,user) \
+ ((!(user) && DB_VALID_KERN_ADDR(addr)) || \
+ ((user) && (addr) < VM_MIN_KERNEL_ADDRESS))
+
+/* macros for printing OS server dependent task name */
+
+#define DB_TASK_NAME(task) db_task_name(task)
+#define DB_TASK_NAME_TITLE "COMMAND "
+#define DB_TASK_NAME_LEN 23
+#define DB_NULL_TASK_NAME "? "
+
+/* macro for checking if a thread has used floating-point */
+
+#define db_thread_fp_used(thread) ((thread)->pcb->ims.ifps != 0)
+
+/* only a.out symbol tables */
+
+#define DB_NO_COFF 1
+
+#endif /* _I386_DB_MACHDEP_H_ */
diff --git a/i386/i386/db_trace.c b/i386/i386/db_trace.c
new file mode 100644
index 0000000..0ef7251
--- /dev/null
+++ b/i386/i386/db_trace.c
@@ -0,0 +1,586 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_KDB
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <vm/vm_map.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+
+#include <machine/db_machdep.h>
+#include <machine/machspl.h>
+#include <machine/db_interface.h>
+#include <machine/db_trace.h>
+#include <machine/cpu_number.h>
+#include <i386at/model_dep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_task_thread.h>
+
+#include "trap.h"
+
+/*
+ * Machine register set.
+ */
+struct db_variable db_regs[] = {
+ { "cs", (long *)&ddb_regs.cs, db_i386_reg_value },
+#if !defined(__x86_64__) || defined(USER32)
+ { "ds", (long *)&ddb_regs.ds, db_i386_reg_value },
+ { "es", (long *)&ddb_regs.es, db_i386_reg_value },
+ { "fs", (long *)&ddb_regs.fs, db_i386_reg_value },
+ { "gs", (long *)&ddb_regs.gs, db_i386_reg_value },
+#endif
+ { "ss", (long *)&ddb_regs.ss, db_i386_reg_value },
+ { "eax",(long *)&ddb_regs.eax, db_i386_reg_value },
+ { "ecx",(long *)&ddb_regs.ecx, db_i386_reg_value },
+ { "edx",(long *)&ddb_regs.edx, db_i386_reg_value },
+ { "ebx",(long *)&ddb_regs.ebx, db_i386_reg_value },
+ { "esp",(long *)&ddb_regs.uesp,db_i386_reg_value },
+ { "ebp",(long *)&ddb_regs.ebp, db_i386_reg_value },
+ { "esi",(long *)&ddb_regs.esi, db_i386_reg_value },
+ { "edi",(long *)&ddb_regs.edi, db_i386_reg_value },
+ { "eip",(long *)&ddb_regs.eip, db_i386_reg_value },
+ { "efl",(long *)&ddb_regs.efl, db_i386_reg_value },
+#ifdef __x86_64__
+ { "r8", (long *)&ddb_regs.r8, db_i386_reg_value },
+ { "r9", (long *)&ddb_regs.r9, db_i386_reg_value },
+ { "r10",(long *)&ddb_regs.r10, db_i386_reg_value },
+ { "r11",(long *)&ddb_regs.r11, db_i386_reg_value },
+ { "r12",(long *)&ddb_regs.r12, db_i386_reg_value },
+ { "r13",(long *)&ddb_regs.r13, db_i386_reg_value },
+ { "r14",(long *)&ddb_regs.r14, db_i386_reg_value },
+ { "r15",(long *)&ddb_regs.r15, db_i386_reg_value },
+#endif
+};
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+/*
+ * Stack trace.
+ */
+#define INKERNEL(va) (((vm_offset_t)(va)) >= VM_MIN_KERNEL_ADDRESS)
+
+struct i386_frame {
+ struct i386_frame *f_frame;
+ long f_retaddr;
+ long f_arg0;
+};
+
+#define TRAP 1
+#define INTERRUPT 2
+#define SYSCALL 3
+
+db_addr_t db_user_trap_symbol_value = 0;
+db_addr_t db_kernel_trap_symbol_value = 0;
+db_addr_t db_interrupt_symbol_value = 0;
+db_addr_t db_return_to_iret_symbol_value = 0;
+db_addr_t db_syscall_symbol_value = 0;
+boolean_t db_trace_symbols_found = FALSE;
+
+struct i386_kregs {
+ char *name;
+ long offset;
+} i386_kregs[] = {
+ { "ebx", (long)(&((struct i386_kernel_state *)0)->k_ebx) },
+ { "esp", (long)(&((struct i386_kernel_state *)0)->k_esp) },
+ { "ebp", (long)(&((struct i386_kernel_state *)0)->k_ebp) },
+#ifdef __i386__
+ { "edi", (long)(&((struct i386_kernel_state *)0)->k_edi) },
+ { "esi", (long)(&((struct i386_kernel_state *)0)->k_esi) },
+#endif
+#ifdef __x86_64__
+ { "r12", (long)(&((struct i386_kernel_state *)0)->k_r12) },
+ { "r13", (long)(&((struct i386_kernel_state *)0)->k_r13) },
+ { "r14", (long)(&((struct i386_kernel_state *)0)->k_r14) },
+ { "r15", (long)(&((struct i386_kernel_state *)0)->k_r15) },
+#endif
+ { "eip", (long)(&((struct i386_kernel_state *)0)->k_eip) },
+ { 0 },
+};
+
+static long *
+db_lookup_i386_kreg(
+ const char *name,
+ const long *kregp)
+{
+ struct i386_kregs *kp;
+
+ for (kp = i386_kregs; kp->name; kp++) {
+ if (strcmp(name, kp->name) == 0)
+ return (long *)((long)kregp + kp->offset);
+ }
+ return 0;
+}
+
+void
+db_i386_reg_value(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap)
+{
+ long *dp = 0;
+ db_expr_t null_reg = 0;
+ thread_t thread = ap->thread;
+
+ if (db_option(ap->modif, 'u')) {
+ if (thread == THREAD_NULL) {
+ if ((thread = current_thread()) == THREAD_NULL)
+ db_error("no user registers\n");
+ }
+ if (thread == current_thread()) {
+ if (ddb_regs.cs & 0x3)
+ dp = vp->valuep;
+ else if (ON_INT_STACK(ddb_regs.ebp, cpu_number()))
+ db_error("cannot get/set user registers in nested interrupt\n");
+ }
+ } else {
+ if (thread == THREAD_NULL || thread == current_thread()) {
+ dp = vp->valuep;
+ } else if ((thread->state & TH_SWAPPED) == 0 &&
+ thread->kernel_stack) {
+ dp = db_lookup_i386_kreg(vp->name,
+ (long *)(STACK_IKS(thread->kernel_stack)));
+ if (dp == 0)
+ dp = &null_reg;
+ } else if ((thread->state & TH_SWAPPED) &&
+ thread->swap_func != thread_exception_return) {
+/*.....this breaks t/t $taskN.0...*/
+ /* only EIP is valid */
+ if (vp->valuep == (long *) &ddb_regs.eip) {
+ dp = (long *)(&thread->swap_func);
+ } else {
+ dp = &null_reg;
+ }
+ }
+ }
+ if (dp == 0) {
+ if (thread->pcb == 0)
+ db_error("no pcb\n");
+ dp = (long *)((long)(&thread->pcb->iss) +
+ ((long)vp->valuep - (long)&ddb_regs));
+ }
+ if (flag == DB_VAR_SET)
+ *dp = *valuep;
+ else
+ *valuep = *dp;
+}
+
+static void
+db_find_trace_symbols(void)
+{
+ db_expr_t value;
+#ifdef __ELF__
+#define P
+#else
+#define P "_"
+#endif
+ if (db_value_of_name(P"user_trap", &value))
+ db_user_trap_symbol_value = (db_addr_t) value;
+ if (db_value_of_name(P"kernel_trap", &value))
+ db_kernel_trap_symbol_value = (db_addr_t) value;
+ if (db_value_of_name(P"interrupt", &value))
+ db_interrupt_symbol_value = (db_addr_t) value;
+ if (db_value_of_name(P"return_to_iret", &value))
+ db_return_to_iret_symbol_value = (db_addr_t) value;
+ if (db_value_of_name(P"syscall", &value))
+ db_syscall_symbol_value = (db_addr_t) value;
+#undef P
+ db_trace_symbols_found = TRUE;
+}
+
+/*
+ * Figure out how many arguments were passed into the frame at "fp".
+ */
+const int db_numargs_default = 5;
+
+#ifdef __x86_64
+/* Args are in registers */
+#define db_numargs(fp, task) -1
+#else
+static int
+db_numargs(
+ struct i386_frame *fp,
+ task_t task)
+{
+ long *argp;
+ long inst;
+ long args;
+ extern char etext[];
+
+ argp = (long *)db_get_task_value((long)&fp->f_retaddr, sizeof(long), FALSE, task);
+ if (argp < (long *)VM_MIN_KERNEL_ADDRESS || argp > (long *)etext)
+ args = db_numargs_default;
+ else if (!DB_CHECK_ACCESS((long)argp, sizeof(long), task))
+ args = db_numargs_default;
+ else {
+ inst = db_get_task_value((long)argp, sizeof(long), FALSE, task);
+ if ((inst & 0xff) == 0x59) /* popl %ecx */
+ args = 1;
+ else if ((inst & 0xffff) == 0xc483) /* addl %n, %esp */
+ args = ((inst >> 16) & 0xff) / 4;
+ else
+ args = db_numargs_default;
+ }
+ return args;
+}
+#endif
+
+struct interrupt_frame {
+ struct i386_frame *if_frame; /* point to next frame */
+ long if_retaddr; /* return address to _interrupt */
+ long if_unit; /* unit number */
+ spl_t if_spl; /* saved spl */
+ long if_iretaddr; /* _return_to_{iret,iret_i} */
+ long if_edx; /* old sp(iret) or saved edx(iret_i) */
+ long if_ecx; /* saved ecx(iret_i) */
+ long if_eax; /* saved eax(iret_i) */
+ long if_eip; /* saved eip(iret_i) */
+ long if_cs; /* saved cs(iret_i) */
+ long if_efl; /* saved efl(iret_i) */
+};
+
+/*
+ * Figure out the next frame up in the call stack.
+ * For trap(), we print the address of the faulting instruction and
+ * proceed with the calling frame. We return the ip that faulted.
+ * If the trap was caused by jumping through a bogus pointer, then
+ * the next line in the backtrace will list some random function as
+ * being called. It should get the argument list correct, though.
+ * It might be possible to dig out from the next frame up the name
+ * of the function that faulted, but that could get hairy.
+ */
+static void
+db_nextframe(
+ struct i386_frame **lfp, /* in/out */
+ struct i386_frame **fp, /* in/out */
+ db_addr_t *sp, /* out */
+ db_addr_t *ip, /* out */
+ long frame_type, /* in */
+ const thread_t thread) /* in */
+{
+ struct i386_saved_state *saved_regs;
+ struct interrupt_frame *ifp;
+ task_t task = (thread != THREAD_NULL)? thread->task: TASK_NULL;
+
+ switch(frame_type) {
+ case TRAP:
+ /*
+ * We know that trap() has 1 argument and we know that
+ * it is an (struct i386_saved_state *).
+ */
+ saved_regs = (struct i386_saved_state *)
+ db_get_task_value((long)&((*fp)->f_arg0),sizeof(long),FALSE,task);
+ db_printf(">>>>> %s (%d)",
+ trap_name(saved_regs->trapno), saved_regs->trapno);
+ if (saved_regs->trapno == T_PAGE_FAULT)
+ db_printf(" for %s%s%s %lx",
+ saved_regs->err & T_PF_PROT ? "P" : "",
+ saved_regs->err & T_PF_WRITE ? "W" : "",
+ saved_regs->err & T_PF_USER ? "U" : "",
+ lintokv(saved_regs->cr2));
+ db_printf(" at ");
+ db_task_printsym(saved_regs->eip, DB_STGY_PROC, task);
+ db_printf(" <<<<<\n");
+ *fp = (struct i386_frame *)saved_regs->ebp;
+ *sp = (db_addr_t)saved_regs->uesp;
+ *ip = (db_addr_t)saved_regs->eip;
+ break;
+ case INTERRUPT:
+ if (*lfp == 0) {
+ db_printf(">>>>> interrupt <<<<<\n");
+ goto miss_frame;
+ }
+ db_printf(">>>>> interrupt at ");
+ ifp = (struct interrupt_frame *)(*lfp);
+ *fp = ifp->if_frame;
+ *sp = (db_addr_t) ifp->if_frame;
+ if (ifp->if_iretaddr == db_return_to_iret_symbol_value)
+ *ip = ((struct i386_interrupt_state *) ifp->if_edx)->eip;
+ else
+ *ip = (db_addr_t) ifp->if_eip;
+ db_task_printsym(*ip, DB_STGY_PROC, task);
+ db_printf(" <<<<<\n");
+ break;
+ case SYSCALL:
+ if (thread != THREAD_NULL && thread->pcb) {
+ *ip = (db_addr_t) thread->pcb->iss.eip;
+ *sp = (db_addr_t) thread->pcb->iss.uesp;
+ *fp = (struct i386_frame *) thread->pcb->iss.ebp;
+ break;
+ }
+ /* falling down for unknown case */
+ default:
+ miss_frame:
+ *ip = (db_addr_t)
+ db_get_task_value((long)&(*fp)->f_retaddr, sizeof(long), FALSE, task);
+ *lfp = *fp;
+ *fp = (struct i386_frame *)
+ db_get_task_value((long)&(*fp)->f_frame, sizeof(long), FALSE, task);
+ *sp = (db_addr_t) *fp;
+ break;
+ }
+}
+
+#define F_USER_TRACE 1
+#define F_TRACE_THREAD 2
+
+void
+db_stack_trace_cmd(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char *modif)
+{
+ boolean_t trace_thread = FALSE;
+ struct i386_frame *frame;
+ db_addr_t callpc, sp;
+ int flags = 0;
+ thread_t th;
+
+ {
+ const char *cp = modif;
+ char c;
+
+ while ((c = *cp++) != 0) {
+ if (c == 't')
+ trace_thread = TRUE;
+ if (c == 'u')
+ flags |= F_USER_TRACE;
+ }
+ }
+
+ if (!have_addr && !trace_thread) {
+ frame = (struct i386_frame *)ddb_regs.ebp;
+ sp = (db_addr_t)ddb_regs.uesp;
+ callpc = (db_addr_t)ddb_regs.eip;
+ th = current_thread();
+ } else if (trace_thread) {
+ if (have_addr) {
+ th = (thread_t) addr;
+ if (!db_check_thread_address_valid(th))
+ return;
+ } else {
+ th = db_default_thread;
+ if (th == THREAD_NULL)
+ th = current_thread();
+ if (th == THREAD_NULL) {
+ db_printf("no active thread\n");
+ return;
+ }
+ }
+ if (th == current_thread()) {
+ frame = (struct i386_frame *)ddb_regs.ebp;
+ sp = (db_addr_t)ddb_regs.uesp;
+ callpc = (db_addr_t)ddb_regs.eip;
+ } else {
+ if (th->pcb == 0) {
+ db_printf("thread has no pcb\n");
+ return;
+ }
+ if ((th->state & TH_SWAPPED) || th->kernel_stack == 0) {
+ struct i386_saved_state *iss = &th->pcb->iss;
+
+ db_printf("Continuation ");
+ db_task_printsym((db_addr_t)th->swap_func,
+ DB_STGY_PROC,
+ th->task);
+ db_printf("\n");
+
+ frame = (struct i386_frame *) (iss->ebp);
+ sp = (db_addr_t) (iss->uesp);
+ callpc = (db_addr_t) (iss->eip);
+ } else {
+ struct i386_kernel_state *iks;
+ iks = STACK_IKS(th->kernel_stack);
+ frame = (struct i386_frame *) (iks->k_ebp);
+ sp = (db_addr_t) (iks->k_esp);
+ callpc = (db_addr_t) (iks->k_eip);
+ }
+ }
+ } else {
+ frame = (struct i386_frame *)addr;
+ sp = (db_addr_t)addr;
+ th = (db_default_thread)? db_default_thread: current_thread();
+ callpc = (db_addr_t)db_get_task_value((long)&frame->f_retaddr, sizeof(long),
+ FALSE,
+ (th == THREAD_NULL) ? TASK_NULL : th->task);
+ }
+
+ db_i386_stack_trace( th, frame, sp, callpc, count, flags );
+}
+
+
+void
+db_i386_stack_trace(
+ const thread_t th,
+ struct i386_frame *frame,
+ db_addr_t sp,
+ db_addr_t callpc,
+ db_expr_t count,
+ int flags)
+{
+ task_t task;
+ boolean_t kernel_only;
+ long *argp;
+ long user_frame = 0;
+ struct i386_frame *lastframe;
+ int frame_type;
+ char *filename;
+ int linenum;
+ extern unsigned long db_maxoff;
+
+ if (count == -1)
+ count = 65535;
+
+ kernel_only = (flags & F_USER_TRACE) == 0;
+
+ task = (th == THREAD_NULL) ? TASK_NULL : th->task;
+
+ if (!db_trace_symbols_found)
+ db_find_trace_symbols();
+
+ if (!INKERNEL(callpc) && !INKERNEL(frame)) {
+ db_printf(">>>>> user space <<<<<\n");
+ user_frame++;
+ }
+
+ lastframe = 0;
+ while (count--) {
+ int narg;
+ char * name;
+ db_expr_t offset;
+
+ if (INKERNEL(callpc) && user_frame == 0) {
+ db_addr_t call_func = 0;
+
+ db_sym_t sym_tmp;
+ db_symbol_values(0,
+ sym_tmp = db_search_task_symbol(callpc,
+ DB_STGY_XTRN,
+ (db_addr_t *)&offset,
+ TASK_NULL),
+ &name, (db_expr_t *)&call_func);
+ db_free_symbol(sym_tmp);
+ if ((db_user_trap_symbol_value && call_func == db_user_trap_symbol_value) ||
+ (db_kernel_trap_symbol_value && call_func == db_kernel_trap_symbol_value)) {
+ frame_type = TRAP;
+ narg = 1;
+ } else if (db_interrupt_symbol_value && call_func == db_interrupt_symbol_value) {
+ frame_type = INTERRUPT;
+ goto next_frame;
+ } else if (db_syscall_symbol_value && call_func == db_syscall_symbol_value) {
+ frame_type = SYSCALL;
+ goto next_frame;
+ } else {
+ frame_type = 0;
+ if (frame)
+ narg = db_numargs(frame, task);
+ else
+ narg = -1;
+ }
+ } else if (!frame || INKERNEL(callpc) ^ INKERNEL(frame)) {
+ frame_type = 0;
+ narg = -1;
+ } else {
+ frame_type = 0;
+ narg = db_numargs(frame, task);
+ }
+
+ db_find_task_sym_and_offset(callpc, &name,
+ (db_addr_t *)&offset, task);
+ if (name == 0 || offset > db_maxoff) {
+ db_printf("0x%x(", callpc);
+ offset = 0;
+ } else
+ db_printf("%s(", name);
+
+ if (!frame) {
+ db_printf(")\n");
+ }
+
+ if (sp) {
+ unsigned char inst = db_get_task_value(callpc, sizeof(char), FALSE, task);
+ if (inst == 0xc3) {
+ /* RET, unwind this directly */
+ callpc = db_get_task_value(sp, sizeof(callpc), FALSE, task);
+ sp += sizeof(callpc);
+ continue;
+ }
+ }
+
+ if (!frame) {
+ break;
+ }
+
+ argp = &frame->f_arg0;
+ while (narg > 0) {
+ db_printf("%x", db_get_task_value((long)argp,sizeof(long),FALSE,task));
+ argp++;
+ if (--narg != 0)
+ db_printf(",");
+ }
+ if (narg < 0)
+ db_printf("...");
+ db_printf(")");
+ if (offset) {
+ db_printf("+0x%x", offset);
+ }
+ if (db_line_at_pc(0, &filename, &linenum, callpc)) {
+ db_printf(" [%s", filename);
+ if (linenum > 0)
+ db_printf(":%d", linenum);
+ db_printf("]");
+ }
+ db_printf("\n");
+
+ next_frame:
+ db_nextframe(&lastframe, &frame, &sp, &callpc, frame_type, th);
+
+ if (!INKERNEL(lastframe) ||
+ (!INKERNEL(callpc) && !INKERNEL(frame)))
+ user_frame++;
+ if (user_frame == 1) {
+ db_printf(">>>>> user space <<<<<\n");
+ if (kernel_only)
+ break;
+ }
+ if (frame && frame <= lastframe) {
+ if (INKERNEL(lastframe) && !INKERNEL(frame))
+ continue;
+ db_printf("Bad frame pointer: 0x%x\n", frame);
+ break;
+ }
+ }
+}
+
+#endif /* MACH_KDB */
diff --git a/i386/i386/db_trace.h b/i386/i386/db_trace.h
new file mode 100644
index 0000000..4684f57
--- /dev/null
+++ b/i386/i386/db_trace.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _I386_DB_TRACE_H_
+#define _I386_DB_TRACE_H_
+
+struct i386_frame;
+
+void
+db_i386_stack_trace(
+ thread_t th,
+ struct i386_frame *frame,
+ db_addr_t sp,
+ db_addr_t callpc,
+ db_expr_t count,
+ int flags);
+
+#endif /* _I386_DB_TRACE_H_ */
diff --git a/i386/i386/debug.h b/i386/i386/debug.h
new file mode 100644
index 0000000..84397ba
--- /dev/null
+++ b/i386/i386/debug.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_DEBUG_
+#define _I386_DEBUG_
+
+#ifndef __ASSEMBLER__
+/* Dump a saved state.
+ Probably a good idea to have this around
+ even when DEBUG isn't turned on. */
+void dump_ss(const struct i386_saved_state *st);
+#endif /* __ASSEMBLER__ */
+
+#ifdef DEBUG
+
+
+/* Maximum number of entries in a debug trace.
+ If the buffer overflows, the oldest entries are forgotten. */
+#define DEBUG_TRACE_LEN 512
+
+/* Add the caller's current position to the debug trace buffer.
+ Only the kernel stack needs to be valid;
+ the other data segment registers are not needed
+ and all registers are saved. */
+#ifndef __ASSEMBLER__
+
+#define DEBUG_TRACE _debug_trace(__FILE__,__LINE__)
+
+/* Reset the debug trace buffer so it contains no valid entries. */
+void debug_trace_reset(void);
+
+/* Dump the contents of the trace buffer to the console.
+ Also clears the trace buffer. */
+void debug_trace_dump(void);
+
+#else /* __ASSEMBLER__ */
+
+#define DEBUG_TRACE \
+ pushl $__LINE__ ;\
+ pushl $9f ;\
+ call __debug_trace ;\
+ addl $8,%esp ;\
+ .data ;\
+9: .ascii __FILE__"\0" ;\
+ .text
+
+#endif /* __ASSEMBLER__ */
+
+
+#endif /* DEBUG */
+
+/* XXX #include_next "debug.h" */
+
+#endif /* _I386_DEBUG_ */
diff --git a/i386/i386/debug_i386.c b/i386/i386/debug_i386.c
new file mode 100644
index 0000000..41d032e
--- /dev/null
+++ b/i386/i386/debug_i386.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <kern/printf.h>
+
+#include "thread.h"
+#include "trap.h"
+#include "debug.h"
+#include "spl.h"
+
+void dump_ss(const struct i386_saved_state *st)
+{
+ printf("Dump of i386_saved_state %p:\n", st);
+#if defined(__x86_64__) && ! defined(USER32)
+ printf("RAX %016lx RBX %016lx RCX %016lx RDX %016lx\n",
+ st->eax, st->ebx, st->ecx, st->edx);
+ printf("RSI %016lx RDI %016lx RBP %016lx RSP %016lx\n",
+ st->esi, st->edi, st->ebp, st->uesp);
+ printf("R8 %016lx R9 %016lx R10 %016lx R11 %016lx\n",
+ st->r8, st->r9, st->r10, st->r11);
+ printf("R12 %016lx R13 %016lx R14 %016lx R15 %016lx\n",
+ st->r12, st->r13, st->r14, st->r15);
+ printf("RIP %016lx EFLAGS %08lx\n", st->eip, st->efl);
+#else
+ printf("EAX %08lx EBX %08lx ECX %08lx EDX %08lx\n",
+ st->eax, st->ebx, st->ecx, st->edx);
+ printf("ESI %08lx EDI %08lx EBP %08lx ESP %08lx\n",
+ st->esi, st->edi, st->ebp, st->uesp);
+ printf("CS %04lx SS %04lx "
+ "DS %04lx ES %04lx "
+ "FS %04lx GS %04lx\n",
+ st->cs & 0xffff, st->ss & 0xffff,
+ st->ds & 0xffff, st->es & 0xffff,
+ st->fs & 0xffff, st->gs & 0xffff);
+ printf("v86: DS %04lx ES %04lx FS %04lx GS %04lx\n",
+ st->v86_segs.v86_ds & 0xffff, st->v86_segs.v86_es & 0xffff,
+ st->v86_segs.v86_gs & 0xffff, st->v86_segs.v86_gs & 0xffff);
+ printf("EIP %08lx EFLAGS %08lx\n", st->eip, st->efl);
+#endif
+ printf("trapno %ld: %s, error %08lx\n",
+ st->trapno, trap_name(st->trapno),
+ st->err);
+}
+
+#ifdef DEBUG
+
+struct debug_trace_entry
+{
+ char *filename;
+ int linenum;
+};
+struct debug_trace_entry debug_trace_buf[DEBUG_TRACE_LEN];
+int debug_trace_pos;
+
+void
+debug_trace_reset(void)
+{
+ int s = splhigh();
+ debug_trace_pos = 0;
+ debug_trace_buf[DEBUG_TRACE_LEN-1].filename = 0;
+ splx(s);
+}
+
+static void
+print_entry(int i, int *col)
+{
+ char *fn, *p;
+
+ /* Strip off the path from the filename. */
+ fn = debug_trace_buf[i].filename;
+ for (p = fn; *p; p++)
+ if (*p == '/')
+ fn = p+1;
+
+ printf(" %9s:%-4d", fn, debug_trace_buf[i].linenum);
+ if (++*col == 5)
+ {
+ printf("\n");
+ *col = 0;
+ }
+}
+
+void
+debug_trace_dump(void)
+{
+ int s = splhigh();
+ int i;
+ int col = 0;
+
+ printf("Debug trace dump ");
+
+ /* If the last entry is nonzero,
+ the trace probably wrapped around.
+ Print out all the entries after the current position
+ before all the entries before it,
+ so we get a total of DEBUG_TRACE_LEN entries
+ in correct time order. */
+ if (debug_trace_buf[DEBUG_TRACE_LEN-1].filename != 0)
+ {
+ printf("(full):\n");
+
+ for (i = debug_trace_pos; i < DEBUG_TRACE_LEN; i++)
+ {
+ print_entry(i, &col);
+ }
+ }
+ else
+ printf("(%d entries):\n", debug_trace_pos);
+
+ /* Print the entries before the current position. */
+ for (i = 0; i < debug_trace_pos; i++)
+ {
+ print_entry(i, &col);
+ }
+
+ if (col != 0)
+ printf("\n");
+
+ debug_trace_reset();
+
+ splx(s);
+}
+
+#include <kern/syscall_sw.h>
+
+int syscall_trace = 0;
+task_t syscall_trace_task;
+
+int
+syscall_trace_print(int syscallvec, ...)
+{
+ int syscallnum = syscallvec >> 4;
+ int i;
+ const mach_trap_t *trap = &mach_trap_table[syscallnum];
+
+ if (syscall_trace_task && syscall_trace_task != current_task())
+ goto out;
+
+ printf("0x%08x:0x%08x:%s(",
+ current_task(), current_thread(), trap->mach_trap_name);
+ for (i = 0; i < trap->mach_trap_arg_count; i++) {
+ unsigned long value = (&syscallvec)[1+i];
+ /* Use a crude heuristic to format pointers. */
+ if (value > 1024)
+ printf("0x%08x", value);
+ else
+ printf("%d", value);
+
+ if (i + 1 < trap->mach_trap_arg_count)
+ printf(", ");
+ }
+ printf(")\n");
+
+ out:
+ return syscallvec;
+}
+
+#endif /* DEBUG */
diff --git a/i386/i386/debug_trace.S b/i386/i386/debug_trace.S
new file mode 100644
index 0000000..f275e1b
--- /dev/null
+++ b/i386/i386/debug_trace.S
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifdef DEBUG
+
+#include <mach/machine/asm.h>
+#include <i386/xen.h>
+
+#include "debug.h"
+
+ .text
+ENTRY(_debug_trace)
+ pushf
+ cli
+ pushl %eax
+ pushl %ebx
+ .byte 0x36 /* SS: bug in gas? */
+ movl %ss:EXT(debug_trace_pos),%eax
+ movl 16(%esp),%ebx
+ movl %ebx,%ss:EXT(debug_trace_buf)(,%eax,8)
+ movl 20(%esp),%ebx
+ movl %ebx,%ss:EXT(debug_trace_buf)+4(,%eax,8)
+ incl %eax
+ andl $DEBUG_TRACE_LEN-1,%eax
+ .byte 0x36 /* SS: bug in gas? */
+ movl %eax,%ss:EXT(debug_trace_pos)
+ popl %ebx
+ popl %eax
+ popf
+ ret
+
+#endif /* DEBUG */
+
+/* XXX gas bug? need at least one symbol... */
+foo:
+
diff --git a/i386/i386/eflags.h b/i386/i386/eflags.h
new file mode 100644
index 0000000..58ad968
--- /dev/null
+++ b/i386/i386/eflags.h
@@ -0,0 +1,35 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _KERNEL_I386_EFLAGS_H_
+#define _KERNEL_I386_EFLAGS_H_
+
+#include <mach/machine/eflags.h>
+
+/* Eflags bit combinations used by the Mach kernel. */
+#define EFL_USER_SET (EFL_IF)
+#define EFL_USER_CLEAR (EFL_IOPL|EFL_NT|EFL_RF)
+
+#endif /* _KERNEL_I386_EFLAGS_H_ */
diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c
new file mode 100644
index 0000000..4cd31dd
--- /dev/null
+++ b/i386/i386/fpu.c
@@ -0,0 +1,948 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+/*
+ * Support for 80387 floating point or FP emulator.
+ */
+
+#include <string.h>
+
+#include <mach/exception.h>
+#include <mach/machine/thread_status.h>
+#include <mach/machine/fp_reg.h>
+
+#include <kern/debug.h>
+#include <machine/machspl.h> /* spls */
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <kern/slab.h>
+
+#include <i386/thread.h>
+#include <i386/fpu.h>
+#include <i386/pio.h>
+#include <i386/irq.h>
+#include <i386/locore.h>
+#include <i386/trap.h>
+#include "cpu_number.h"
+
+#if 0
+#include <i386/ipl.h>
+#define ASSERT_IPL(L) \
+{ \
+ if (curr_ipl[cpu_number()] != L) { \
+ printf("IPL is %d, expected %d\n", curr_ipl[cpu_number()], L); \
+ panic("fpu: wrong ipl"); \
+ } \
+}
+#else
+#define ASSERT_IPL(L)
+#endif
+
+_Static_assert(sizeof(struct i386_xfp_xstate_header) == 8*8,
+ "struct i386_xfp_xstate_header size");
+_Static_assert(sizeof(struct i386_xfp_save) == 512 + 8*8,
+ "struct i386_xfp_save size");
+
+int fp_kind = FP_387; /* 80387 present */
+enum fp_save_kind fp_save_kind = FP_FNSAVE; /* Which instruction we use to save/restore FPU state */
+uint64_t fp_xsave_support; /* Bitmap of supported XSAVE save areas */
+unsigned fp_xsave_size = sizeof(struct i386_fpsave_state);
+struct i386_fpsave_state *fp_default_state;
+struct kmem_cache ifps_cache; /* cache for FPU save area */
+static unsigned long mxcsr_feature_mask = 0xffffffff; /* Always AND user-provided mxcsr with this security mask */
+
+#if NCPUS == 1
+volatile thread_t fp_thread = THREAD_NULL;
+ /* thread whose state is in FPU */
+ /* always THREAD_NULL if emulating
+ FPU */
+volatile thread_t fp_intr_thread = THREAD_NULL;
+
+
+#define clear_fpu() \
+ { \
+ set_ts(); \
+ fp_thread = THREAD_NULL; \
+ }
+
+#else /* NCPUS > 1 */
+#define clear_fpu() \
+ { \
+ set_ts(); \
+ }
+
+#endif
+
+
+/*
+ * Look for FPU and initialize it.
+ * Called on each CPU.
+ */
+void
+init_fpu(void)
+{
+ unsigned short status, control;
+
+#ifdef MACH_RING1
+ clear_ts();
+#else /* MACH_RING1 */
+ unsigned int native = 0;
+
+ if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486)
+ native = CR0_NE;
+
+ /*
+ * Check for FPU by initializing it,
+ * then trying to read the correct bit patterns from
+ * the control and status registers.
+ */
+ set_cr0((get_cr0() & ~(CR0_EM|CR0_TS)) | native); /* allow use of FPU */
+#endif /* MACH_RING1 */
+
+ fninit();
+ status = fnstsw();
+ fnstcw(&control);
+
+ if ((status & 0xff) == 0 &&
+ (control & 0x103f) == 0x3f)
+ {
+ /*
+ * We have a FPU of some sort.
+ * Compare -infinity against +infinity
+ * to check whether we have a 287 or a 387.
+ */
+ volatile double fp_infinity, fp_one, fp_zero;
+ fp_one = 1.0;
+ fp_zero = 0.0;
+ fp_infinity = fp_one / fp_zero;
+ if (fp_infinity == -fp_infinity) {
+ /*
+ * We have an 80287.
+ */
+ fp_kind = FP_287;
+ fp_save_kind = FP_FNSAVE;
+ asm volatile(".byte 0xdb; .byte 0xe4"); /* fnsetpm */
+ }
+ else {
+ /*
+ * We have a 387.
+ */
+ fp_kind = FP_387;
+ fp_save_kind = FP_FNSAVE;
+
+ if (CPU_HAS_FEATURE(CPU_FEATURE_XSAVE)) {
+ unsigned eax, ebx, ecx, edx;
+ unsigned xsave_cpu_features;
+
+ eax = 0xd;
+ ecx = 0x0;
+ cpuid(eax, ebx, ecx, edx);
+ fp_xsave_support = eax + (((uint64_t) edx) << 32);
+
+#ifndef MACH_RING1
+ set_cr4(get_cr4() | CR4_OSFXSR | CR4_OSXSAVE);
+ set_xcr0(fp_xsave_support);
+#endif /* MACH_RING1 */
+
+ eax = 0xd;
+ ecx = 0x1;
+ cpuid(eax, ebx, ecx, edx);
+ xsave_cpu_features = eax;
+
+ if (xsave_cpu_features & CPU_FEATURE_XSAVES) {
+ // all states enabled by XCR0|IA32_XSS
+ fp_xsave_size = offsetof(struct i386_fpsave_state, xfp_save_state) + ebx;
+ if (fp_xsave_size < sizeof(struct i386_fpsave_state))
+ panic("CPU-provided xstate size %d "
+ "is smaller than our minimum %d!\n",
+ fp_xsave_size,
+ (int) sizeof(struct i386_fpsave_state));
+
+ fp_save_kind = FP_XSAVES;
+ } else {
+ eax = 0xd;
+ ecx = 0x0;
+ cpuid(eax, ebx, ecx, edx);
+ // all states enabled by XCR0
+ fp_xsave_size = offsetof(struct i386_fpsave_state, xfp_save_state) + ebx;
+ if(fp_xsave_size < sizeof(struct i386_fpsave_state))
+ panic("CPU-provided xstate size %d "
+ "is smaller than our minimum %d!\n",
+ fp_xsave_size,
+ (int) sizeof(struct i386_fpsave_state));
+
+ if (xsave_cpu_features & CPU_FEATURE_XSAVEOPT)
+ fp_save_kind = FP_XSAVEOPT;
+ else if (xsave_cpu_features & CPU_FEATURE_XSAVEC)
+ fp_save_kind = FP_XSAVEC;
+ else
+ fp_save_kind = FP_XSAVE;
+ }
+
+ fp_kind = FP_387X;
+ }
+
+ else if (CPU_HAS_FEATURE(CPU_FEATURE_FXSR)) {
+#ifndef MACH_RING1
+ set_cr4(get_cr4() | CR4_OSFXSR);
+#endif /* MACH_RING1 */
+ fp_kind = FP_387FX;
+ fp_save_kind = FP_FXSAVE;
+ }
+
+ if (fp_save_kind != FP_FNSAVE) {
+ /* Compute mxcsr_feature_mask. */
+ static /* because we _need_ alignment */
+ struct i386_xfp_save save;
+ unsigned long mask;
+ fxsave(&save);
+ mask = save.fp_mxcsr_mask;
+ if (!mask)
+ mask = 0x0000ffbf;
+ mxcsr_feature_mask &= mask;
+ }
+ }
+#ifdef MACH_RING1
+ set_ts();
+#else /* MACH_RING1 */
+ /*
+ * Trap wait instructions. Turn off FPU for now.
+ */
+ set_cr0(get_cr0() | CR0_TS | CR0_MP);
+#endif /* MACH_RING1 */
+ }
+ else {
+ /*
+ * NO FPU.
+ */
+ panic("No FPU!");
+ }
+}
+
+/*
+ * Initialize FP handling.
+ */
+void
+fpu_module_init(void)
+{
+ kmem_cache_init(&ifps_cache, "i386_fpsave_state",
+ fp_xsave_size,
+ alignof(struct i386_fpsave_state),
+ NULL, 0);
+
+ fp_default_state = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
+ memset(fp_default_state, 0, fp_xsave_size);
+
+ /* Get default state from CPU. */
+ clear_ts();
+ fninit();
+ switch (fp_save_kind) {
+ case FP_XSAVEC:
+ case FP_XSAVES:
+ /* XRSTORS requires compact format, a bit faster anyway */
+ fp_default_state->xfp_save_state.header.xcomp_bv = XSAVE_XCOMP_BV_COMPACT;
+ /* Fallthrough */
+ case FP_XSAVE:
+ case FP_XSAVEOPT:
+ case FP_FXSAVE:
+ fxsave(&fp_default_state->xfp_save_state);
+ break;
+ case FP_FNSAVE:
+ fnsave(&fp_default_state->fp_save_state);
+ break;
+ }
+ set_ts();
+
+ fp_default_state->fp_valid = TRUE;
+}
+
+/*
+ * Free a FPU save area.
+ * Called only when thread terminating - no locking necessary.
+ */
+void
+fp_free(struct i386_fpsave_state *fps)
+{
+ASSERT_IPL(SPL0);
+#if NCPUS == 1
+ if ((fp_thread != THREAD_NULL) && (fp_thread->pcb->ims.ifps == fps)) {
+ /*
+ * Make sure we don't get FPU interrupts later for
+ * this thread
+ */
+ clear_ts();
+ fwait();
+
+ /* Mark it free and disable access */
+ clear_fpu();
+ }
+#endif /* NCPUS == 1 */
+ kmem_cache_free(&ifps_cache, (vm_offset_t) fps);
+}
+
+/* The two following functions were stolen from Linux's i387.c */
+static inline unsigned short
+twd_i387_to_fxsr (unsigned short twd)
+{
+ unsigned int tmp; /* to avoid 16 bit prefixes in the code */
+
+ /* Transform each pair of bits into 01 (valid) or 00 (empty) */
+ tmp = ~twd;
+ tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
+ /* and move the valid bits to the lower byte. */
+ tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
+ tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
+ tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
+ return tmp;
+}
+
+static inline unsigned long
+twd_fxsr_to_i387 (struct i386_xfp_save *fxsave)
+{
+ struct {
+ unsigned short significand[4];
+ unsigned short exponent;
+ unsigned short padding[3];
+ } *st = NULL;
+ unsigned long tos = (fxsave->fp_status >> 11) & 7;
+ unsigned long twd = (unsigned long) fxsave->fp_tag;
+ unsigned long tag;
+ unsigned long ret = 0xffff0000u;
+ int i;
+
+#define FPREG_ADDR(f, n) ((void *)&(f)->fp_reg_word + (n) * 16);
+
+ for (i = 0 ; i < 8 ; i++) {
+ if (twd & 0x1) {
+ st = FPREG_ADDR (fxsave, (i - tos) & 7);
+
+ switch (st->exponent & 0x7fff) {
+ case 0x7fff:
+ tag = 2; /* Special */
+ break;
+ case 0x0000:
+ if (!st->significand[0] &&
+ !st->significand[1] &&
+ !st->significand[2] &&
+ !st->significand[3] ) {
+ tag = 1; /* Zero */
+ } else {
+ tag = 2; /* Special */
+ }
+ break;
+ default:
+ if (st->significand[3] & 0x8000) {
+ tag = 0; /* Valid */
+ } else {
+ tag = 2; /* Special */
+ }
+ break;
+ }
+ } else {
+ tag = 3; /* Empty */
+ }
+ ret |= (tag << (2 * i));
+ twd = twd >> 1;
+ }
+ return ret;
+}
+
+/*
+ * Set the floating-point state for a thread.
+ * If the thread is not the current thread, it is
+ * not running (held). Locking needed against
+ * concurrent fpu_set_state or fpu_get_state.
+ */
+kern_return_t
+fpu_set_state(const thread_t thread,
+ struct i386_float_state *state)
+{
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps;
+ struct i386_fpsave_state *new_ifps;
+
+ASSERT_IPL(SPL0);
+ if (fp_kind == FP_NO)
+ return KERN_FAILURE;
+
+#if NCPUS == 1
+
+ /*
+ * If this thread`s state is in the FPU,
+ * discard it; we are replacing the entire
+ * FPU state.
+ */
+ if (fp_thread == thread) {
+ clear_ts();
+ fwait(); /* wait for possible interrupt */
+ clear_fpu(); /* no state in FPU */
+ }
+#endif
+
+ if (state->initialized == 0) {
+ /*
+ * new FPU state is 'invalid'.
+ * Deallocate the fp state if it exists.
+ */
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ pcb->ims.ifps = 0;
+ simple_unlock(&pcb->lock);
+
+ if (ifps != 0) {
+ kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
+ }
+ }
+ else {
+ /*
+ * Valid state. Allocate the fp state if there is none.
+ */
+ struct i386_fp_save *user_fp_state;
+ struct i386_fp_regs *user_fp_regs;
+
+ user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
+ user_fp_regs = (struct i386_fp_regs *)
+ &state->hw_state[sizeof(struct i386_fp_save)];
+
+ new_ifps = 0;
+ Retry:
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ if (ifps == 0) {
+ if (new_ifps == 0) {
+ simple_unlock(&pcb->lock);
+ new_ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
+ goto Retry;
+ }
+ ifps = new_ifps;
+ new_ifps = 0;
+ pcb->ims.ifps = ifps;
+ }
+
+ /*
+ * Ensure that reserved parts of the environment are 0.
+ */
+ memset(ifps, 0, fp_xsave_size);
+
+ if (fp_save_kind != FP_FNSAVE) {
+ int i;
+
+ ifps->xfp_save_state.fp_control = user_fp_state->fp_control;
+ ifps->xfp_save_state.fp_status = user_fp_state->fp_status;
+ ifps->xfp_save_state.fp_tag = twd_i387_to_fxsr(user_fp_state->fp_tag);
+ ifps->xfp_save_state.fp_eip = user_fp_state->fp_eip;
+ ifps->xfp_save_state.fp_cs = user_fp_state->fp_cs;
+ ifps->xfp_save_state.fp_opcode = user_fp_state->fp_opcode;
+ ifps->xfp_save_state.fp_dp = user_fp_state->fp_dp;
+ ifps->xfp_save_state.fp_ds = user_fp_state->fp_ds;
+ for (i=0; i<8; i++)
+ memcpy(&ifps->xfp_save_state.fp_reg_word[i], &user_fp_regs->fp_reg_word[i], sizeof(user_fp_regs->fp_reg_word[i]));
+ } else {
+ ifps->fp_save_state.fp_control = user_fp_state->fp_control;
+ ifps->fp_save_state.fp_status = user_fp_state->fp_status;
+ ifps->fp_save_state.fp_tag = user_fp_state->fp_tag;
+ ifps->fp_save_state.fp_eip = user_fp_state->fp_eip;
+ ifps->fp_save_state.fp_cs = user_fp_state->fp_cs;
+ ifps->fp_save_state.fp_opcode = user_fp_state->fp_opcode;
+ ifps->fp_save_state.fp_dp = user_fp_state->fp_dp;
+ ifps->fp_save_state.fp_ds = user_fp_state->fp_ds;
+ ifps->fp_regs = *user_fp_regs;
+ }
+
+ simple_unlock(&pcb->lock);
+ if (new_ifps != 0)
+ kmem_cache_free(&ifps_cache, (vm_offset_t) new_ifps);
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Get the floating-point state for a thread.
+ * If the thread is not the current thread, it is
+ * not running (held). Locking needed against
+ * concurrent fpu_set_state or fpu_get_state.
+ */
+kern_return_t
+fpu_get_state(const thread_t thread,
+ struct i386_float_state *state)
+{
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps;
+
+ASSERT_IPL(SPL0);
+ if (fp_kind == FP_NO)
+ return KERN_FAILURE;
+
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ if (ifps == 0) {
+ /*
+ * No valid floating-point state.
+ */
+ simple_unlock(&pcb->lock);
+ memset(state, 0, sizeof(struct i386_float_state));
+ return KERN_SUCCESS;
+ }
+
+ /* Make sure we`ve got the latest fp state info */
+ /* If the live fpu state belongs to our target */
+#if NCPUS == 1
+ if (thread == fp_thread)
+#else
+ if (thread == current_thread())
+#endif
+ {
+ clear_ts();
+ fp_save(thread);
+ clear_fpu();
+ }
+
+ state->fpkind = fp_kind;
+ state->exc_status = 0;
+
+ {
+ struct i386_fp_save *user_fp_state;
+ struct i386_fp_regs *user_fp_regs;
+
+ state->initialized = ifps->fp_valid;
+
+ user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
+ user_fp_regs = (struct i386_fp_regs *)
+ &state->hw_state[sizeof(struct i386_fp_save)];
+
+ /*
+ * Ensure that reserved parts of the environment are 0.
+ */
+ memset(user_fp_state, 0, sizeof(struct i386_fp_save));
+
+ if (fp_save_kind != FP_FNSAVE) {
+ int i;
+
+ user_fp_state->fp_control = ifps->xfp_save_state.fp_control;
+ user_fp_state->fp_status = ifps->xfp_save_state.fp_status;
+ user_fp_state->fp_tag = twd_fxsr_to_i387(&ifps->xfp_save_state);
+ user_fp_state->fp_eip = ifps->xfp_save_state.fp_eip;
+ user_fp_state->fp_cs = ifps->xfp_save_state.fp_cs;
+ user_fp_state->fp_opcode = ifps->xfp_save_state.fp_opcode;
+ user_fp_state->fp_dp = ifps->xfp_save_state.fp_dp;
+ user_fp_state->fp_ds = ifps->xfp_save_state.fp_ds;
+ for (i=0; i<8; i++)
+ memcpy(&user_fp_regs->fp_reg_word[i], &ifps->xfp_save_state.fp_reg_word[i], sizeof(user_fp_regs->fp_reg_word[i]));
+ } else {
+ user_fp_state->fp_control = ifps->fp_save_state.fp_control;
+ user_fp_state->fp_status = ifps->fp_save_state.fp_status;
+ user_fp_state->fp_tag = ifps->fp_save_state.fp_tag;
+ user_fp_state->fp_eip = ifps->fp_save_state.fp_eip;
+ user_fp_state->fp_cs = ifps->fp_save_state.fp_cs;
+ user_fp_state->fp_opcode = ifps->fp_save_state.fp_opcode;
+ user_fp_state->fp_dp = ifps->fp_save_state.fp_dp;
+ user_fp_state->fp_ds = ifps->fp_save_state.fp_ds;
+ *user_fp_regs = ifps->fp_regs;
+ }
+ }
+ simple_unlock(&pcb->lock);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Initialize FPU for an already-running thread.
+ */
+static void fpinit(thread_t thread)
+{
+ unsigned short control;
+
+ASSERT_IPL(SPL0);
+ clear_ts();
+ fpu_rstor(fp_default_state);
+
+ control = thread->pcb->init_control;
+ if (control)
+ fldcw(control);
+}
+
+/*
+ * Inherit FPU state from a parent to a child, if any
+ */
+void fpinherit(thread_t parent_thread, thread_t thread)
+{
+ pcb_t pcb = parent_thread->pcb;
+ struct i386_fpsave_state *ifps;
+
+ ifps = pcb->ims.ifps;
+ if (ifps) {
+ /* Parent does have a state, inherit it */
+ if (ifps->fp_valid == TRUE)
+ thread->pcb->init_control = ifps->fp_save_state.fp_control;
+ else
+ /* State is in the FPU, fetch from there */
+ fnstcw(&thread->pcb->init_control);
+ }
+}
+
+/*
+ * Coprocessor not present.
+ */
+void
+fpnoextflt(void)
+{
+ /*
+ * Enable FPU use.
+ */
+ASSERT_IPL(SPL0);
+ clear_ts();
+#if NCPUS == 1
+
+ /*
+ * If this thread`s state is in the FPU, we are done.
+ */
+ if (fp_thread == current_thread())
+ return;
+
+ /* Make sure we don't do fpsave() in fp_intr while doing fpsave()
+ * here if the current fpu instruction generates an error.
+ */
+ fwait();
+ /*
+ * If another thread`s state is in the FPU, save it.
+ */
+ if (fp_thread != THREAD_NULL) {
+ fp_save(fp_thread);
+ }
+
+ /*
+ * Give this thread the FPU.
+ */
+ fp_thread = current_thread();
+
+#endif /* NCPUS == 1 */
+
+ /*
+ * Load this thread`s state into the FPU.
+ */
+ fp_load(current_thread());
+}
+
+/*
+ * FPU overran end of segment.
+ * Re-initialize FPU. Floating point state is not valid.
+ */
+void
+fpextovrflt(void)
+{
+ thread_t thread = current_thread();
+ pcb_t pcb;
+ struct i386_fpsave_state *ifps;
+
+#if NCPUS == 1
+
+ /*
+ * Is exception for the currently running thread?
+ */
+ if (fp_thread != thread) {
+ /* Uh oh... */
+ panic("fpextovrflt");
+ }
+#endif
+
+ /*
+ * This is a non-recoverable error.
+ * Invalidate the thread`s FPU state.
+ */
+ pcb = thread->pcb;
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ pcb->ims.ifps = 0;
+ simple_unlock(&pcb->lock);
+
+ /*
+ * Re-initialize the FPU.
+ */
+ clear_ts();
+ fninit();
+
+ /*
+ * And disable access.
+ */
+ clear_fpu();
+
+ if (ifps)
+ kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
+
+ /*
+ * Raise exception.
+ */
+ i386_exception(EXC_BAD_ACCESS, VM_PROT_READ|VM_PROT_EXECUTE, 0);
+ /*NOTREACHED*/
+}
+
+static int
+fphandleerr(void)
+{
+ thread_t thread = current_thread();
+
+ /*
+ * Save the FPU context to the thread using it.
+ */
+#if NCPUS == 1
+ if (fp_thread == THREAD_NULL) {
+ printf("fphandleerr: FPU not belonging to anyone!\n");
+ clear_ts();
+ fninit();
+ clear_fpu();
+ return 1;
+ }
+
+ if (fp_thread != thread) {
+ /*
+ * FPU exception is for a different thread.
+ * When that thread again uses the FPU an exception will be
+ * raised in fp_load. Remember the condition in fp_valid (== 2).
+ */
+ clear_ts();
+ fp_save(fp_thread);
+ fp_thread->pcb->ims.ifps->fp_valid = 2;
+ fninit();
+ clear_fpu();
+ /* leave fp_intr_thread THREAD_NULL */
+ return 1;
+ }
+#endif /* NCPUS == 1 */
+
+ /*
+ * Save the FPU state and turn off the FPU.
+ */
+ clear_ts();
+ fp_save(thread);
+ fninit();
+ clear_fpu();
+
+ return 0;
+}
+
+/*
+ * FPU error. Called by exception handler.
+ */
+void
+fpexterrflt(void)
+{
+ thread_t thread = current_thread();
+
+ if (fphandleerr())
+ return;
+
+ /*
+ * Raise FPU exception.
+ * Locking not needed on pcb->ims.ifps,
+ * since thread is running.
+ */
+ i386_exception(EXC_ARITHMETIC,
+ EXC_I386_EXTERR,
+ fp_save_kind != FP_FNSAVE ?
+ thread->pcb->ims.ifps->xfp_save_state.fp_status :
+ thread->pcb->ims.ifps->fp_save_state.fp_status);
+ /*NOTREACHED*/
+}
+
+#ifndef MACH_RING1
+/*
+ * FPU error. Called by AST.
+ */
+void
+fpastintr(void)
+{
+ thread_t thread = current_thread();
+
+ASSERT_IPL(SPL0);
+#if NCPUS == 1
+ /*
+ * Since FPU errors only occur on ESC or WAIT instructions,
+ * the current thread should own the FPU. If it didn`t,
+ * we should have gotten the task-switched interrupt first.
+ */
+ if (fp_thread != THREAD_NULL) {
+ panic("fpexterrflt");
+ return;
+ }
+
+ /*
+ * Check if we got a context switch between the interrupt and the AST
+ * This can happen if the interrupt arrived after the FPU AST was
+ * checked. In this case, raise the exception in fp_load when this
+ * thread next time uses the FPU. Remember exception condition in
+ * fp_valid (extended boolean 2).
+ */
+ if (fp_intr_thread != thread) {
+ if (fp_intr_thread == THREAD_NULL) {
+ panic("fpexterrflt: fp_intr_thread == THREAD_NULL");
+ return;
+ }
+ fp_intr_thread->pcb->ims.ifps->fp_valid = 2;
+ fp_intr_thread = THREAD_NULL;
+ return;
+ }
+ fp_intr_thread = THREAD_NULL;
+#else /* NCPUS == 1 */
+ /*
+ * Save the FPU state and turn off the FPU.
+ */
+ fp_save(thread);
+#endif /* NCPUS == 1 */
+
+ /*
+ * Raise FPU exception.
+ * Locking not needed on pcb->ims.ifps,
+ * since thread is running.
+ */
+ i386_exception(EXC_ARITHMETIC,
+ EXC_I386_EXTERR,
+ fp_save_kind != FP_FNSAVE ?
+ thread->pcb->ims.ifps->xfp_save_state.fp_status :
+ thread->pcb->ims.ifps->fp_save_state.fp_status);
+ /*NOTREACHED*/
+}
+#endif /* MACH_RING1 */
+
+/*
+ * Save FPU state.
+ *
+ * Locking not needed:
+ * . if called from fpu_get_state, pcb already locked.
+ * . if called from fpnoextflt or fp_intr, we are single-cpu
+ * . otherwise, thread is running.
+ */
+void
+fp_save(thread_t thread)
+{
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps = pcb->ims.ifps;
+
+ if (ifps != 0 && !ifps->fp_valid)
+ /* registers are in FPU */
+ fpu_save(ifps);
+}
+
+/*
+ * Restore FPU state from PCB.
+ *
+ * Locking not needed; always called on the current thread.
+ */
+void
+fp_load(thread_t thread)
+{
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps;
+
+ASSERT_IPL(SPL0);
+ ifps = pcb->ims.ifps;
+ if (ifps == 0) {
+ ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
+ memcpy(ifps, fp_default_state, fp_xsave_size);
+ pcb->ims.ifps = ifps;
+ fpinit(thread);
+#if 1
+/*
+ * I'm not sure this is needed. Does the fpu regenerate the interrupt in
+ * frstor or not? Without this code we may miss some exceptions, with it
+ * we might send too many exceptions.
+ */
+ } else if (ifps->fp_valid == 2) {
+ /* delayed exception pending */
+
+ ifps->fp_valid = TRUE;
+ clear_fpu();
+ /*
+ * Raise FPU exception.
+ * Locking not needed on pcb->ims.ifps,
+ * since thread is running.
+ */
+ i386_exception(EXC_ARITHMETIC,
+ EXC_I386_EXTERR,
+ fp_save_kind != FP_FNSAVE ?
+ thread->pcb->ims.ifps->xfp_save_state.fp_status :
+ thread->pcb->ims.ifps->fp_save_state.fp_status);
+ /*NOTREACHED*/
+#endif
+ } else if (! ifps->fp_valid) {
+ printf("fp_load: invalid FPU state!\n");
+ fninit ();
+ } else {
+ fpu_rstor(ifps);
+ }
+ ifps->fp_valid = FALSE; /* in FPU */
+}
+
+#if (defined(AT386) || defined(ATX86_64)) && !defined(MACH_XEN)
+/*
+ * Handle a coprocessor error interrupt on the AT386.
+ * This comes in on line 5 of the slave PIC at SPL1.
+ */
+void
+fpintr(int unit)
+{
+ spl_t s;
+#if NCPUS == 1
+ thread_t thread = current_thread();
+#endif /* NCPUS == 1 */
+
+ASSERT_IPL(SPL1);
+ /*
+ * Turn off the extended 'busy' line.
+ */
+ outb(0xf0, 0);
+
+ if (fphandleerr())
+ return;
+
+#if NCPUS == 1
+ if (fp_intr_thread != THREAD_NULL && fp_intr_thread != thread)
+ panic("fp_intr: already caught intr");
+ fp_intr_thread = thread;
+#endif /* NCPUS == 1 */
+
+ /*
+ * Since we are running on the interrupt stack, we must
+ * signal the thread to take the exception when we return
+ * to user mode. Use an AST to do this.
+ *
+ * Don`t set the thread`s AST field. If the thread is
+ * descheduled before it takes the AST, it will notice
+ * the FPU error when it reloads its FPU state.
+ */
+ s = splsched();
+ ast_on(cpu_number(), AST_I386_FP);
+ splx(s);
+}
+#endif /* AT386 */
diff --git a/i386/i386/fpu.h b/i386/i386/fpu.h
new file mode 100644
index 0000000..51e0f31
--- /dev/null
+++ b/i386/i386/fpu.h
@@ -0,0 +1,250 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_FPU_H_
+#define _I386_FPU_H_
+
+/*
+ * Macro definitions for routines to manipulate the
+ * floating-point processor.
+ */
+
+#include <sys/types.h>
+#include <i386/proc_reg.h>
+#include <kern/thread.h>
+
+/*
+ * FPU instructions.
+ */
+#define fninit() \
+ asm volatile("fninit")
+
+#define fnstcw(control) \
+ asm("fnstcw %0" : "=m" (*(unsigned short *)(control)))
+
+#define fstcw(control) \
+ asm volatile("fstcw %0" : "=m" (*(unsigned short *)(control)))
+
+#define fldcw(control) \
+ asm volatile("fldcw %0" : : "m" (*(unsigned short *) &(control)) )
+
+#define fnstsw() \
+ ({ \
+ unsigned short _status__; \
+ asm("fnstsw %0" : "=ma" (_status__)); \
+ _status__; \
+ })
+
+#define fnclex() \
+ asm volatile("fnclex")
+
+#define fnsave(state) \
+ asm volatile("fnsave %0" : "=m" (*state))
+
+#define frstor(state) \
+ asm volatile("frstor %0" : : "m" (state))
+
+#define fxsave(state) \
+ asm volatile("fxsave %0" : "=m" (*state))
+
+#define fxrstor(state) \
+ asm volatile("fxrstor %0" : : "m" (state))
+
+static inline uint64_t xgetbv(uint32_t n) {
+ uint32_t eax, edx;
+ asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (n));
+ return eax + ((uint64_t) edx << 32);
+}
+
+static inline uint64_t get_xcr0(void) {
+ return xgetbv(0);
+}
+
+static inline void xsetbv(uint32_t n, uint64_t value) {
+ uint32_t eax, edx;
+
+ eax = value;
+ edx = value >> 32;
+
+ asm volatile("xsetbv" : : "c" (n), "a" (eax), "d" (edx));
+}
+
+static inline void set_xcr0(uint64_t value) {
+ xsetbv(0, value);
+}
+
+#define CPU_XCR0_X87 (1 << 0)
+#define CPU_XCR0_SSE (1 << 1)
+#define CPU_XCR0_AVX (1 << 2)
+#define CPU_XCR0_MPX (3 << 3)
+#define CPU_XCR0_AVX512 (7 << 5)
+
+#define CPU_FEATURE_XSAVEOPT (1 << 0)
+#define CPU_FEATURE_XSAVEC (1 << 1)
+#define CPU_FEATURE_XGETBV1 (1 << 2)
+#define CPU_FEATURE_XSAVES (1 << 3)
+
+#define xsave(state) \
+ asm volatile("xsave %0" \
+ : "=m" (*state) \
+ : "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define xsaveopt(state) \
+ asm volatile("xsaveopt %0" \
+ : "=m" (*state) \
+ : "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define xsavec(state) \
+ asm volatile("xsavec %0" \
+ : "=m" (*state) \
+ : "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define xsaves(state) \
+ asm volatile("xsaves %0" \
+ : "=m" (*state) \
+ : "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define xrstor(state) \
+ asm volatile("xrstor %0" : : "m" (state) \
+ , "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define xrstors(state) \
+ asm volatile("xrstors %0" : : "m" (state) \
+ , "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define fwait() \
+ asm("fwait");
+
+#define fpu_load_context(pcb)
+
+#define fpu_save(ifps) \
+ do { \
+ switch (fp_save_kind) { \
+ case FP_XSAVE: \
+ xsave(&(ifps)->xfp_save_state); \
+ break; \
+ case FP_XSAVEOPT: \
+ xsaveopt(&(ifps)->xfp_save_state); \
+ break; \
+ case FP_XSAVEC: \
+ xsavec(&(ifps)->xfp_save_state); \
+ break; \
+ case FP_XSAVES: \
+ xsaves(&(ifps)->xfp_save_state); \
+ break; \
+ case FP_FXSAVE: \
+ fxsave(&(ifps)->xfp_save_state); \
+ break; \
+ case FP_FNSAVE: \
+ fnsave(&(ifps)->fp_save_state); \
+ break; \
+ } \
+ (ifps)->fp_valid = TRUE; \
+ } while (0)
+
+#define fpu_rstor(ifps) \
+ do { \
+ switch (fp_save_kind) { \
+ case FP_XSAVE: \
+ case FP_XSAVEOPT: \
+ case FP_XSAVEC: \
+ xrstor((ifps)->xfp_save_state); \
+ break; \
+ case FP_XSAVES: \
+ xrstors((ifps)->xfp_save_state); \
+ break; \
+ case FP_FXSAVE: \
+ fxrstor((ifps)->xfp_save_state); \
+ break; \
+ case FP_FNSAVE: \
+ frstor((ifps)->fp_save_state); \
+ break; \
+ } \
+ } while (0)
+
+/*
+ * Save thread`s FPU context.
+ * If only one CPU, we just set the task-switched bit,
+ * to keep the new thread from using the coprocessor.
+ * If multiple CPUs, we save the entire state.
+ */
+#if NCPUS > 1
+#define fpu_save_context(thread) \
+ { \
+ struct i386_fpsave_state *ifps; \
+ ifps = (thread)->pcb->ims.ifps; \
+ if (ifps != 0 && !ifps->fp_valid) { \
+ /* registers are in FPU - save to memory */ \
+ fpu_save(ifps); \
+ set_ts(); \
+ } \
+ }
+
+#else /* NCPUS == 1 */
+#define fpu_save_context(thread) \
+ { \
+ set_ts(); \
+ }
+
+#endif /* NCPUS == 1 */
+
+enum fp_save_kind {
+ FP_FNSAVE,
+ FP_FXSAVE,
+ FP_XSAVE,
+ FP_XSAVEOPT,
+ FP_XSAVEC,
+ FP_XSAVES,
+};
+extern int fp_kind;
+extern enum fp_save_kind fp_save_kind;
+extern struct i386_fpsave_state *fp_default_state;
+extern uint64_t fp_xsave_support;
+extern void fp_save(thread_t thread);
+extern void fp_load(thread_t thread);
+extern void fp_free(struct i386_fpsave_state *fps);
+extern void fpu_module_init(void);
+extern kern_return_t fpu_set_state(
+ thread_t thread,
+ struct i386_float_state *state);
+extern kern_return_t fpu_get_state(
+ thread_t thread,
+ struct i386_float_state *state);
+extern void fpnoextflt(void);
+extern void fpextovrflt(void);
+extern void fpexterrflt(void);
+extern void fpastintr(void);
+extern void init_fpu(void);
+extern void fpintr(int unit);
+extern void fpinherit(thread_t parent_thread, thread_t thread);
+
+#endif /* _I386_FPU_H_ */
diff --git a/i386/i386/gdt.c b/i386/i386/gdt.c
new file mode 100644
index 0000000..4edd3ec
--- /dev/null
+++ b/i386/i386/gdt.c
@@ -0,0 +1,166 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Global descriptor table.
+ */
+#include <mach/machine/vm_types.h>
+#include <mach/xen.h>
+
+#include <kern/assert.h>
+#include <intel/pmap.h>
+#include <kern/cpu_number.h>
+#include <machine/percpu.h>
+
+#include "vm_param.h"
+#include "seg.h"
+#include "gdt.h"
+#include "mp_desc.h"
+
+#ifdef MACH_PV_DESCRIPTORS
+/* It is actually defined in xen_boothdr.S */
+extern
+#endif /* MACH_PV_DESCRIPTORS */
+struct real_descriptor gdt[GDTSZ];
+
+static void
+gdt_fill(int cpu, struct real_descriptor *mygdt)
+{
+ /* Initialize the kernel code and data segment descriptors. */
+#ifdef __x86_64__
+ assert(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS == 0);
+ _fill_gdt_descriptor(mygdt, KERNEL_CS, 0, 0, ACC_PL_K|ACC_CODE_R, SZ_64);
+ _fill_gdt_descriptor(mygdt, KERNEL_DS, 0, 0, ACC_PL_K|ACC_DATA_W, SZ_64);
+#ifndef MACH_PV_DESCRIPTORS
+ _fill_gdt_descriptor(mygdt, LINEAR_DS, 0, 0, ACC_PL_K|ACC_DATA_W, SZ_64);
+#endif /* MACH_PV_DESCRIPTORS */
+#else
+ _fill_gdt_descriptor(mygdt, KERNEL_CS,
+ LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
+ LINEAR_MAX_KERNEL_ADDRESS - (LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) - 1,
+ ACC_PL_K|ACC_CODE_R, SZ_32);
+ _fill_gdt_descriptor(mygdt, KERNEL_DS,
+ LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
+ LINEAR_MAX_KERNEL_ADDRESS - (LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) - 1,
+ ACC_PL_K|ACC_DATA_W, SZ_32);
+#ifndef MACH_PV_DESCRIPTORS
+ _fill_gdt_descriptor(mygdt, LINEAR_DS,
+ 0,
+ 0xffffffff,
+ ACC_PL_K|ACC_DATA_W, SZ_32);
+#endif /* MACH_PV_DESCRIPTORS */
+ vm_offset_t thiscpu = kvtolin(&percpu_array[cpu]);
+ _fill_gdt_descriptor(mygdt, PERCPU_DS,
+ thiscpu,
+ thiscpu + sizeof(struct percpu) - 1,
+#ifdef __x86_64__
+ ACC_PL_K|ACC_DATA_W, SZ_64
+#else
+ ACC_PL_K|ACC_DATA_W, SZ_32
+#endif
+ );
+#endif
+
+#ifdef MACH_PV_DESCRIPTORS
+ unsigned long frame = kv_to_mfn(mygdt);
+ pmap_set_page_readonly(mygdt);
+ if (hyp_set_gdt(kv_to_la(&frame), GDTSZ))
+ panic("couldn't set gdt\n");
+#endif
+#ifdef MACH_PV_PAGETABLES
+ if (hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments))
+ panic("couldn't set 4gb segments vm assist");
+#if 0
+ if (hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify))
+ panic("couldn't set 4gb segments vm assist notify");
+#endif
+#endif /* MACH_PV_PAGETABLES */
+
+#ifndef MACH_PV_DESCRIPTORS
+ /* Load the new GDT. */
+ {
+ struct pseudo_descriptor pdesc;
+
+ pdesc.limit = (GDTSZ * sizeof(struct real_descriptor))-1;
+ pdesc.linear_base = kvtolin(mygdt);
+ lgdt(&pdesc);
+ }
+#endif /* MACH_PV_DESCRIPTORS */
+}
+
+static void
+reload_segs(void)
+{
+ /* Reload all the segment registers from the new GDT.
+ We must load ds and es with 0 before loading them with KERNEL_DS
+ because some processors will "optimize out" the loads
+ if the previous selector values happen to be the same. */
+#ifndef __x86_64__
+ asm volatile("ljmp %0,$1f\n"
+ "1:\n"
+ "movw %w2,%%ds\n"
+ "movw %w2,%%es\n"
+ "movw %w2,%%fs\n"
+ "movw %w2,%%gs\n"
+
+ "movw %w1,%%ds\n"
+ "movw %w1,%%es\n"
+ "movw %w3,%%gs\n"
+ "movw %w1,%%ss\n"
+ : : "i" (KERNEL_CS), "r" (KERNEL_DS), "r" (0), "r" (PERCPU_DS));
+#endif
+}
+
+void
+gdt_init(void)
+{
+ gdt_fill(0, gdt);
+
+ reload_segs();
+
+#ifdef MACH_PV_PAGETABLES
+#if VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+ /* things now get shifted */
+#ifdef MACH_PSEUDO_PHYS
+ pfn_list = (void*) pfn_list + VM_MIN_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS;
+#endif /* MACH_PSEUDO_PHYS */
+ la_shift += LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
+#endif
+#endif /* MACH_PV_PAGETABLES */
+}
+
+#if NCPUS > 1
+void
+ap_gdt_init(int cpu)
+{
+ gdt_fill(cpu, mp_gdt[cpu]);
+
+ reload_segs();
+}
+#endif
diff --git a/i386/i386/gdt.h b/i386/i386/gdt.h
new file mode 100644
index 0000000..c7da012
--- /dev/null
+++ b/i386/i386/gdt.h
@@ -0,0 +1,121 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON, IBM, AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON, IBM, AND CSL DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_GDT_
+#define _I386_GDT_
+
+#include "seg.h"
+
+/*
+ * Kernel descriptors for Mach - 32-bit flat address space.
+ */
+#define KERNEL_CS (0x08 | KERNEL_RING) /* kernel code */
+#define KERNEL_DS (0x10 | KERNEL_RING) /* kernel data */
+
+
+#ifndef MACH_PV_DESCRIPTORS
+#define KERNEL_LDT 0x18 /* master LDT */
+#endif /* MACH_PV_DESCRIPTORS */
+
+#ifdef __x86_64__
+/* LDT needs two entries */
+#define KERNEL_TSS 0x40 /* master TSS (uniprocessor) */
+#else
+#define KERNEL_TSS 0x20 /* master TSS (uniprocessor) */
+#endif
+
+
+#define USER_LDT 0x28 /* place for per-thread LDT */
+
+#ifdef __x86_64__
+/* LDT needs two entries */
+#define USER_TSS 0x58 /* place for per-thread TSS
+ that holds IO bitmap */
+#else
+#define USER_TSS 0x30 /* place for per-thread TSS
+ that holds IO bitmap */
+#endif
+
+
+#ifndef MACH_PV_DESCRIPTORS
+#define LINEAR_DS 0x38 /* linear mapping */
+#endif /* MACH_PV_DESCRIPTORS */
+
+/* 0x40 was USER_FPREGS, now used by TSS in 64bit mode */
+
+#define USER_GDT 0x48 /* user-defined 32bit GDT entries */
+#define USER_GDT_SLOTS 2
+
+/* 0x58 used by user TSS in 64bit mode */
+
+#define PERCPU_DS 0x68 /* per-cpu data mapping */
+
+#define GDTSZ sel_idx(0x70)
+
+#ifndef __ASSEMBLER__
+
+extern struct real_descriptor gdt[GDTSZ];
+
+/* Fill a segment descriptor in the GDT. */
+#define _fill_gdt_descriptor(_gdt, segment, base, limit, access, sizebits) \
+ fill_descriptor(&_gdt[sel_idx(segment)], base, limit, access, sizebits)
+
+#define fill_gdt_descriptor(segment, base, limit, access, sizebits) \
+ _fill_gdt_descriptor(gdt, segment, base, limit, access, sizebits)
+
+/* 64bit variant */
+#ifdef __x86_64__
+#define _fill_gdt_descriptor64(_gdt, segment, base, limit, access, sizebits) \
+ fill_descriptor64((struct real_descriptor64 *) &_gdt[sel_idx(segment)], base, limit, access, sizebits)
+
+#define fill_gdt_descriptor64(segment, base, limit, access, sizebits) \
+ _fill_gdt_descriptor64(gdt, segment, base, limit, access, sizebits)
+#endif
+
+/* System descriptor variants */
+#ifdef __x86_64__
+#define _fill_gdt_sys_descriptor(_gdt, segment, base, limit, access, sizebits) \
+ _fill_gdt_descriptor64(_gdt, segment, base, limit, access, sizebits)
+#define fill_gdt_sys_descriptor(segment, base, limit, access, sizebits) \
+ fill_gdt_descriptor64(segment, base, limit, access, sizebits)
+#else
+#define _fill_gdt_sys_descriptor(_gdt, segment, base, limit, access, sizebits) \
+ _fill_gdt_descriptor(_gdt, segment, base, limit, access, sizebits)
+#define fill_gdt_sys_descriptor(segment, base, limit, access, sizebits) \
+ fill_gdt_descriptor(segment, base, limit, access, sizebits)
+#endif
+
+extern void gdt_init(void);
+extern void ap_gdt_init(int cpu);
+
+#endif /* __ASSEMBLER__ */
+#endif /* _I386_GDT_ */
diff --git a/i386/i386/hardclock.c b/i386/i386/hardclock.c
new file mode 100644
index 0000000..9ac4f51
--- /dev/null
+++ b/i386/i386/hardclock.c
@@ -0,0 +1,81 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Clock interrupt.
+ */
+#include <mach/machine/eflags.h>
+
+#include <kern/mach_clock.h>
+#include <i386/thread.h>
+#include <i386/hardclock.h>
+
+#if defined(AT386) || defined(ATX86_64)
+#include <i386/ipl.h>
+#endif
+
+#ifdef LINUX_DEV
+#include <linux/dev/glue/glue.h>
+#endif
+
+extern char return_to_iret[];
+
+void
+hardclock(int iunit, /* 'unit' number */
+ int old_ipl, /* old interrupt level */
+ const char *ret_addr, /* return address in interrupt handler */
+ struct i386_interrupt_state *regs /* saved registers */
+ )
+{
+ if (ret_addr == return_to_iret)
+ /*
+ * Interrupt from user mode or from thread stack.
+ */
+ clock_interrupt(tick, /* usec per tick */
+ (regs->efl & EFL_VM) || /* user mode */
+ ((regs->cs & 0x03) != 0), /* user mode */
+#if defined(LINUX_DEV)
+ FALSE, /* ignore SPL0 */
+#else /* LINUX_DEV */
+ old_ipl == SPL0, /* base priority */
+#endif /* LINUX_DEV */
+ regs->eip); /* interrupted eip */
+ else
+ /*
+ * Interrupt from interrupt stack.
+ */
+ clock_interrupt(tick, /* usec per tick */
+ FALSE, /* kernel mode */
+ FALSE, /* not SPL0 */
+ 0); /* interrupted eip */
+
+#ifdef LINUX_DEV
+ linux_timer_intr();
+#endif /* LINUX_DEV */
+}
diff --git a/i386/i386/hardclock.h b/i386/i386/hardclock.h
new file mode 100644
index 0000000..b326c3c
--- /dev/null
+++ b/i386/i386/hardclock.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _I386_HARDCLOCK_H_
+#define _I386_HARDCLOCK_H_
+
+void hardclock(
+ int iunit,
+ int old_ipl,
+ const char *ret_addr,
+ struct i386_interrupt_state *regs);
+
+#endif /* _I386_HARDCLOCK_H_ */
diff --git a/i386/i386/i386asm.sym b/i386/i386/i386asm.sym
new file mode 100644
index 0000000..e1f5c6b
--- /dev/null
+++ b/i386/i386/i386asm.sym
@@ -0,0 +1,194 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Pass field offsets to assembly code.
+ */
+#include <sys/reboot.h>
+
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/syscall_emulation.h>
+#include <i386/thread.h>
+#include <i386/pmap.h>
+#include <i386/vm_param.h>
+#include <i386/seg.h>
+#include <i386/tss.h>
+#include <i386at/idt.h>
+#include <i386/gdt.h>
+#include <i386/ldt.h>
+#include <i386/mp_desc.h>
+#include <i386/apic.h>
+#include <i386/xen.h>
+
+expr CALL_AST_CHECK
+expr CALL_PMAP_UPDATE
+
+offset ApicLocalUnit lu apic_id APIC_ID
+
+offset percpu pc cpu_id PERCPU_CPU_ID
+offset percpu pc active_thread PERCPU_ACTIVE_THREAD
+offset percpu pc active_stack PERCPU_ACTIVE_STACK
+
+offset pcb pcb iss
+
+size percpu pc
+
+offset thread th pcb
+offset thread th task
+offset thread th recover
+offset thread th kernel_stack
+offset thread th swap_func
+
+offset task task eml_dispatch TASK_EMUL
+
+offset eml_dispatch eml disp_min DISP_MIN
+offset eml_dispatch eml disp_count DISP_COUNT
+offset eml_dispatch eml disp_vector DISP_VECTOR
+
+expr &STACK_IKS(0)->k_ebx KSS_EBX
+expr &STACK_IKS(0)->k_esp KSS_ESP
+expr &STACK_IKS(0)->k_ebp KSS_EBP
+#ifdef __i386__
+expr &STACK_IKS(0)->k_esi KSS_ESI
+expr &STACK_IKS(0)->k_edi KSS_EDI
+#endif
+expr &STACK_IKS(0)->k_eip KSS_EIP
+#ifdef __x86_64__
+expr &STACK_IKS(0)->k_r12 KSS_R12
+expr &STACK_IKS(0)->k_r13 KSS_R13
+expr &STACK_IKS(0)->k_r14 KSS_R14
+expr &STACK_IKS(0)->k_r15 KSS_R15
+#endif
+size i386_kernel_state iks
+
+size i386_exception_link iel
+
+#if !defined(__x86_64__) || defined(USER32)
+offset i386_saved_state r gs
+offset i386_saved_state r fs
+#endif
+offset i386_saved_state r cs
+offset i386_saved_state r uesp
+offset i386_saved_state r eax
+offset i386_saved_state r ebx
+offset i386_saved_state r ecx
+offset i386_saved_state r edx
+offset i386_saved_state r ebp
+offset i386_saved_state r trapno
+offset i386_saved_state r err
+offset i386_saved_state r efl R_EFLAGS
+offset i386_saved_state r eip
+offset i386_saved_state r cr2
+offset i386_saved_state r edi
+offset i386_saved_state r esi
+#ifdef __x86_64__
+offset i386_saved_state r r8
+offset i386_saved_state r r9
+offset i386_saved_state r r10
+offset i386_saved_state r r12
+offset i386_saved_state r r13
+offset i386_saved_state r r14
+offset i386_saved_state r r15
+#endif
+
+offset i386_interrupt_state i eip
+offset i386_interrupt_state i cs
+offset i386_interrupt_state i efl
+
+#ifdef __x86_64__
+offset i386_tss tss rsp0
+#else
+offset i386_tss tss esp0
+offset i386_tss tss ss0
+#endif
+
+offset machine_slot sub_type cpu_type
+
+expr I386_PGBYTES NBPG
+expr VM_MIN_ADDRESS
+expr VM_MAX_ADDRESS
+expr VM_MIN_KERNEL_ADDRESS KERNELBASE
+expr KERNEL_STACK_SIZE
+#if defined MACH_PSEUDO_PHYS && (VM_MIN_KERNEL_ADDRESS == LINEAR_MIN_KERNEL_ADDRESS)
+expr PFN_LIST pfn_list
+#endif
+
+#if PAE
+expr PDPSHIFT
+#endif /* PAE */
+expr PDESHIFT
+expr PDEMASK
+expr PTESHIFT
+expr PTEMASK
+
+expr sizeof(pt_entry_t) PTE_SIZE
+
+expr INTEL_PTE_PFN PTE_PFN
+expr INTEL_PTE_VALID PTE_V
+expr INTEL_PTE_WRITE PTE_W
+expr INTEL_PTE_PS PTE_S
+expr ~INTEL_PTE_VALID PTE_INVALID
+expr NPTES PTES_PER_PAGE
+expr INTEL_PTE_VALID|INTEL_PTE_WRITE INTEL_PTE_KERNEL
+
+expr IDTSZ
+
+expr KERNEL_RING
+
+expr (VM_MIN_KERNEL_ADDRESS>>PDESHIFT)*sizeof(pt_entry_t) KERNELBASEPDE
+
+#if MACH_KDB
+expr RB_KDB
+#endif /* MACH_KDB */
+
+expr INTSTACK_SIZE
+
+#if !STAT_TIME
+offset timer tm low_bits LOW_BITS
+offset timer tm high_bits HIGH_BITS
+offset timer tm high_bits_check HIGH_BITS_CHECK
+expr TIMER_HIGH_UNIT
+offset thread th system_timer
+offset thread th user_timer
+#endif
+
+#ifdef MACH_XEN
+offset shared_info si vcpu_info[0].evtchn_upcall_mask CPU_CLI
+offset shared_info si vcpu_info[0].evtchn_upcall_pending CPU_PENDING
+offset shared_info si vcpu_info[0].evtchn_pending_sel CPU_PENDING_SEL
+offset shared_info si evtchn_pending PENDING
+offset shared_info si evtchn_mask EVTMASK
+#ifdef MACH_PV_PAGETABLES
+offset shared_info si vcpu_info[0].arch.cr2 CR2
+#endif /* MACH_PV_PAGETABLES */
+#endif /* MACH_XEN */
+
+offset mach_msg_header msgh msgh_size
diff --git a/i386/i386/idt-gen.h b/i386/i386/idt-gen.h
new file mode 100644
index 0000000..daa6aaf
--- /dev/null
+++ b/i386/i386/idt-gen.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifndef _I386_IDT_
+#define _I386_IDT_
+
+#include <mach/vm_param.h>
+
+#include "seg.h"
+
+/*
+ * Interrupt table must always be at least 32 entries long,
+ * to cover the basic i386 exception vectors.
+ * More-specific code will probably define it to be longer,
+ * to allow separate entrypoints for hardware interrupts.
+ */
+#ifndef IDTSZ
+#error you need to define IDTSZ
+#endif
+
+extern struct real_gate idt[IDTSZ];
+
+/* Fill a gate in the IDT. */
+#define fill_idt_gate(_idt, int_num, entry, selector, access, dword_count) \
+ fill_gate(&_idt[int_num], entry, selector, access, dword_count)
+
+#endif /* _I386_IDT_ */
diff --git a/i386/i386/idt.c b/i386/i386/idt.c
new file mode 100644
index 0000000..caa44d7
--- /dev/null
+++ b/i386/i386/idt.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <i386/vm_param.h>
+#include <i386/seg.h>
+#include <i386at/idt.h>
+#include <i386/gdt.h>
+#include <i386/mp_desc.h>
+
+struct real_gate idt[IDTSZ];
+
+struct idt_init_entry
+{
+ unsigned long entrypoint;
+ unsigned short vector;
+ unsigned short type;
+#ifdef __x86_64__
+ unsigned short ist;
+ unsigned short pad_0;
+#endif
+};
+extern struct idt_init_entry idt_inittab[];
+
+static void
+idt_fill(struct real_gate *myidt)
+{
+#ifdef MACH_PV_DESCRIPTORS
+ if (hyp_set_trap_table(kvtolin(idt_inittab)))
+ panic("couldn't set trap table\n");
+#else /* MACH_PV_DESCRIPTORS */
+ struct idt_init_entry *iie = idt_inittab;
+
+ /* Initialize the exception vectors from the idt_inittab. */
+ while (iie->entrypoint)
+ {
+ fill_idt_gate(myidt, iie->vector, iie->entrypoint, KERNEL_CS, iie->type,
+#ifdef __x86_64__
+ iie->ist
+#else
+ 0
+#endif
+ );
+ iie++;
+ }
+
+ /* Load the IDT pointer into the processor. */
+ {
+ struct pseudo_descriptor pdesc;
+
+ pdesc.limit = (IDTSZ * sizeof(struct real_gate))-1;
+ pdesc.linear_base = kvtolin(myidt);
+ lidt(&pdesc);
+ }
+#endif /* MACH_PV_DESCRIPTORS */
+}
+
+void idt_init(void)
+{
+ idt_fill(idt);
+}
+
+#if NCPUS > 1
+void ap_idt_init(int cpu)
+{
+ idt_fill(mp_desc_table[cpu]->idt);
+}
+#endif
diff --git a/i386/i386/idt_inittab.S b/i386/i386/idt_inittab.S
new file mode 100644
index 0000000..fc80e21
--- /dev/null
+++ b/i386/i386/idt_inittab.S
@@ -0,0 +1,140 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#include <mach/machine/asm.h>
+
+#include <i386/seg.h>
+#include <i386/i386asm.h>
+#include <i386/gdt.h>
+
+
+/* We'll be using macros to fill in a table in data hunk 2
+ while writing trap entrypoint routines at the same time.
+ Here's the header that comes before everything else. */
+ .data 2
+ENTRY(idt_inittab)
+ .text
+
+/*
+ * Interrupt descriptor table and code vectors for it.
+ */
+#ifdef MACH_PV_DESCRIPTORS
+#define IDT_ENTRY(n,entry,type) \
+ .data 2 ;\
+ .byte n ;\
+ .byte (((type)&ACC_PL)>>5)|((((type)&(ACC_TYPE|ACC_A))==ACC_INTR_GATE)<<2) ;\
+ .word KERNEL_CS ;\
+ .long entry ;\
+ .text
+#else /* MACH_PV_DESCRIPTORS */
+#define IDT_ENTRY(n,entry,type) \
+ .data 2 ;\
+ .long entry ;\
+ .word n ;\
+ .word type ;\
+ .text
+#endif /* MACH_PV_DESCRIPTORS */
+
+/*
+ * No error code. Clear error code and push trap number.
+ */
+#define EXCEPTION(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_TRAP_GATE);\
+ENTRY(name) ;\
+ pushl $(0) ;\
+ pushl $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * User-accessible exception. Otherwise, same as above.
+ */
+#define EXCEP_USR(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_U|ACC_TRAP_GATE);\
+ENTRY(name) ;\
+ pushl $(0) ;\
+ pushl $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * Error code has been pushed. Just push trap number.
+ */
+#define EXCEP_ERR(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_INTR_GATE);\
+ENTRY(name) ;\
+ pushl $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * Special interrupt code: dispatches to a unique entrypoint,
+ * not defined automatically here.
+ */
+#define EXCEP_SPC(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_TRAP_GATE)
+
+
+EXCEPTION(0x00,t_zero_div)
+EXCEP_SPC(0x01,t_debug)
+/* skip NMI interrupt - let more specific code figure that out. */
+EXCEP_USR(0x03,t_int3)
+EXCEP_USR(0x04,t_into)
+EXCEP_USR(0x05,t_bounds)
+EXCEPTION(0x06,t_invop)
+EXCEPTION(0x07,t_nofpu)
+EXCEPTION(0x08,a_dbl_fault)
+EXCEPTION(0x09,a_fpu_over)
+EXCEPTION(0x0a,a_inv_tss)
+EXCEP_SPC(0x0b,t_segnp)
+EXCEP_ERR(0x0c,t_stack_fault)
+EXCEP_SPC(0x0d,t_gen_prot)
+EXCEP_SPC(0x0e,t_page_fault)
+#ifdef MACH_PV_DESCRIPTORS
+EXCEP_ERR(0x0f,t_trap_0f)
+#else
+EXCEPTION(0x0f,t_trap_0f)
+#endif
+EXCEPTION(0x10,t_fpu_err)
+EXCEPTION(0x11,t_trap_11)
+EXCEPTION(0x12,t_trap_12)
+EXCEPTION(0x13,t_trap_13)
+EXCEPTION(0x14,t_trap_14)
+EXCEPTION(0x15,t_trap_15)
+EXCEPTION(0x16,t_trap_16)
+EXCEPTION(0x17,t_trap_17)
+EXCEPTION(0x18,t_trap_18)
+EXCEPTION(0x19,t_trap_19)
+EXCEPTION(0x1a,t_trap_1a)
+EXCEPTION(0x1b,t_trap_1b)
+EXCEPTION(0x1c,t_trap_1c)
+EXCEPTION(0x1d,t_trap_1d)
+EXCEPTION(0x1e,t_trap_1e)
+EXCEPTION(0x1f,t_trap_1f)
+
+/* Terminator */
+ .data 2
+ .long 0
+#ifdef MACH_PV_DESCRIPTORS
+ .long 0
+#endif /* MACH_PV_DESCRIPTORS */
+
diff --git a/i386/i386/io_perm.c b/i386/i386/io_perm.c
new file mode 100644
index 0000000..aabff49
--- /dev/null
+++ b/i386/i386/io_perm.c
@@ -0,0 +1,329 @@
+/* Manipulate I/O permission bitmap objects.
+
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+
+ Written by Marcus Brinkmann. Glued into GNU Mach by Thomas Schwinge.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <kern/slab.h>
+#include <kern/kalloc.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/thread.h>
+
+#include <device/dev_hdr.h>
+#include <device/device_emul.h>
+#include <device/device_port.h>
+
+#include <i386/i386/mach_i386.server.h>
+
+#include "io_perm.h"
+#include "gdt.h"
+#include "pcb.h"
+
+#define PCI_CFG1_START 0xcf8
+#define PCI_CFG1_END 0xcff
+
+#define CONTAINS_PCI_CFG(from, to) \
+ ( ( from <= PCI_CFG1_END ) && ( to >= PCI_CFG1_START ) )
+
+
+/* Our device emulation ops. See below, at the bottom of this file. */
+static struct device_emulation_ops io_perm_device_emulation_ops;
+
+/* Flag to hold PCI io cfg access lock */
+static boolean_t taken_pci_cfg = FALSE;
+
+/* The outtran which allows MIG to convert an io_perm_t object to a port
+ representing it. */
+ipc_port_t
+convert_io_perm_to_port (io_perm_t io_perm)
+{
+ if (io_perm == IO_PERM_NULL)
+ return IP_NULL;
+
+ ipc_port_t port;
+
+ port = ipc_port_make_send (io_perm->port);
+
+ return port;
+}
+
+
+/* The intran which allows MIG to convert a port representing an
+ io_perm_t object to the object itself. */
+io_perm_t
+convert_port_to_io_perm (ipc_port_t port)
+{
+ device_t device;
+
+ device = dev_port_lookup (port);
+
+ if (device == DEVICE_NULL)
+ return IO_PERM_NULL;
+
+ io_perm_t io_perm;
+
+ io_perm = device->emul_data;
+
+ return io_perm;
+}
+
+/* The destructor which is called when the last send right to a port
+ representing an io_perm_t object vanishes. */
+void
+io_perm_deallocate (io_perm_t io_perm)
+{
+ /* We need to check if the io_perm was a PCI cfg one and release it */
+ if (CONTAINS_PCI_CFG(io_perm->from, io_perm->to))
+ taken_pci_cfg = FALSE;
+}
+
+/* Our ``no senders'' handling routine. Deallocate the object. */
+static
+void
+no_senders (mach_no_senders_notification_t *notification)
+{
+ io_perm_t io_perm;
+
+ io_perm = convert_port_to_io_perm
+ ((ipc_port_t) notification->not_header.msgh_remote_port);
+
+ assert (io_perm != IO_PERM_NULL);
+
+ ipc_kobject_set (io_perm->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (io_perm->port);
+
+ kfree ((vm_offset_t) io_perm, sizeof *io_perm);
+}
+
+
+/* Initialize bitmap by setting all bits to OFF == 1. */
+static inline void
+io_bitmap_init (unsigned char *iopb)
+{
+ memset (iopb, ~0, IOPB_BYTES);
+}
+
+
+/* Set selected bits in bitmap to ON == 0. */
+static inline void
+io_bitmap_set (unsigned char *iopb, io_port_t from, io_port_t to)
+{
+ do
+ iopb[from >> 3] &= ~(1 << (from & 0x7));
+ while (from++ != to);
+}
+
+
+/* Set selected bits in bitmap to OFF == 1. */
+static inline void
+io_bitmap_clear (unsigned char *iopb, io_port_t from, io_port_t to)
+{
+ do
+ iopb[from >> 3] |= (1 << (from & 0x7));
+ while (from++ != to);
+}
+
+
+/* Request a new port IO_PERM that represents the capability to access
+ the I/O ports [FROM; TO] directly. MASTER_PORT is the master device port.
+
+ The function returns KERN_INVALID_ARGUMENT if TARGET_TASK is not a task,
+ or FROM is greater than TO.
+
+ The function is exported. */
+kern_return_t
+i386_io_perm_create (const ipc_port_t master_port, io_port_t from, io_port_t to,
+ io_perm_t *new)
+{
+ if (master_port != master_device_port)
+ return KERN_INVALID_ARGUMENT;
+
+ /* We do not have to check FROM and TO for the limits [0;IOPB_MAX], as
+ they're short integers and all values are within these very limits. */
+ if (from > to)
+ return KERN_INVALID_ARGUMENT;
+
+ /* Only one process may take a range that includes PCI cfg registers */
+ if (taken_pci_cfg && CONTAINS_PCI_CFG(from, to))
+ return KERN_PROTECTION_FAILURE;
+
+ io_perm_t io_perm;
+
+ io_perm = (io_perm_t) kalloc (sizeof *io_perm);
+ if (io_perm == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ io_perm->from = from;
+ io_perm->to = to;
+
+ io_perm->port = ipc_port_alloc_kernel ();
+ if (io_perm->port == IP_NULL)
+ {
+ kfree ((vm_offset_t) io_perm, sizeof *io_perm);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* Set up the dummy device. */
+ ipc_kobject_set(io_perm->port,
+ (ipc_kobject_t) &io_perm->device, IKOT_DEVICE);
+ io_perm->device.emul_data = io_perm;
+ io_perm->device.emul_ops = &io_perm_device_emulation_ops;
+
+ ipc_port_t notify;
+
+ notify = ipc_port_make_sonce(io_perm->port);
+ ip_lock(io_perm->port);
+ ipc_port_nsrequest(io_perm->port, 1, notify, &notify);
+ assert(notify == IP_NULL);
+
+ *new = io_perm;
+
+ if (CONTAINS_PCI_CFG(from, to))
+ taken_pci_cfg = TRUE;
+
+ return KERN_SUCCESS;
+}
+
+/* Modify the I/O permissions for TARGET_TASK. If ENABLE is TRUE, the
+ permission to access the I/O ports specified by IO_PERM is granted,
+ otherwise it is withdrawn.
+
+ The function returns KERN_INVALID_ARGUMENT if TARGET_TASK is not a valid
+ task or IO_PERM not a valid I/O permission port.
+
+ The function is exported. */
+kern_return_t
+i386_io_perm_modify (task_t target_task, io_perm_t io_perm, boolean_t enable)
+{
+ io_port_t from, to;
+ unsigned char *iopb;
+ io_port_t iopb_size;
+
+ if (target_task == TASK_NULL || io_perm == IO_PERM_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ from = io_perm->from;
+ to = io_perm->to;
+
+ simple_lock (&target_task->machine.iopb_lock);
+ iopb = target_task->machine.iopb;
+ iopb_size = target_task->machine.iopb_size;
+
+ if (!enable && !iopb_size)
+ {
+ simple_unlock (&target_task->machine.iopb_lock);
+ return KERN_SUCCESS;
+ }
+
+ if (!iopb)
+ {
+ simple_unlock (&target_task->machine.iopb_lock);
+ iopb = (unsigned char *) kmem_cache_alloc (&machine_task_iopb_cache);
+ simple_lock (&target_task->machine.iopb_lock);
+ if (target_task->machine.iopb)
+ {
+ if (iopb)
+ kmem_cache_free (&machine_task_iopb_cache, (vm_offset_t) iopb);
+ iopb = target_task->machine.iopb;
+ iopb_size = target_task->machine.iopb_size;
+ }
+ else if (iopb)
+ {
+ target_task->machine.iopb = iopb;
+ io_bitmap_init (iopb);
+ }
+ else
+ {
+ simple_unlock (&target_task->machine.iopb_lock);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ }
+
+ if (enable)
+ {
+ io_bitmap_set (iopb, from, to);
+ if ((to >> 3) + 1 > iopb_size)
+ target_task->machine.iopb_size = (to >> 3) + 1;
+ }
+ else
+ {
+ if ((from >> 3) + 1 > iopb_size)
+ {
+ simple_unlock (&target_task->machine.iopb_lock);
+ return KERN_SUCCESS;
+ }
+
+ io_bitmap_clear (iopb, from, to);
+ while (iopb_size > 0 && iopb[iopb_size - 1] == 0xff)
+ iopb_size--;
+ target_task->machine.iopb_size = iopb_size;
+ }
+
+#if NCPUS>1
+#warning SMP support missing (notify all CPUs running threads in that of the I/O bitmap change).
+#endif
+ if (target_task == current_task())
+ update_ktss_iopb (iopb, target_task->machine.iopb_size);
+
+ simple_unlock (&target_task->machine.iopb_lock);
+ return KERN_SUCCESS;
+}
+
+/* We are some sort of Mach device... */
+static struct device_emulation_ops io_perm_device_emulation_ops =
+{
+ /* ... in order to be easily able to receive a ``no senders'' notification
+ which we then use to deallocate ourselves. */
+ .no_senders = no_senders
+};
diff --git a/i386/i386/io_perm.h b/i386/i386/io_perm.h
new file mode 100644
index 0000000..b97cf97
--- /dev/null
+++ b/i386/i386/io_perm.h
@@ -0,0 +1,63 @@
+/* Data types for I/O permission bitmap objects.
+
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+
+ Written by Marcus Brinkmann. Glued into GNU Mach by Thomas Schwinge.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef _I386_IO_PERM_H_
+#define _I386_IO_PERM_H_
+
+#include <device/dev_hdr.h>
+#include <ipc/ipc_types.h>
+
+
+/* The highest possible I/O port. */
+#define IOPB_MAX 0xffff
+
+/* The number of bytes needed to hold all permission bits. */
+#define IOPB_BYTES (((IOPB_MAX + 1) + 7) / 8)
+
+/* An offset that points outside of the permission bitmap, used to
+ disable all permission. */
+#define IOPB_INVAL 0x2fff
+
+
+/* The type of an I/O port address. */
+typedef unsigned short io_port_t;
+
+
+struct io_perm
+{
+ /* We use a ``struct device'' for easy management. */
+ struct device device;
+
+ ipc_port_t port;
+
+ io_port_t from, to;
+};
+
+typedef struct io_perm *io_perm_t;
+
+#define IO_PERM_NULL ((io_perm_t) 0)
+
+extern io_perm_t convert_port_to_io_perm (ipc_port_t);
+extern ipc_port_t convert_io_perm_to_port (io_perm_t);
+extern void io_perm_deallocate (io_perm_t);
+
+#endif /* _I386_IO_PERM_H_ */
diff --git a/i386/i386/ipl.h b/i386/i386/ipl.h
new file mode 100644
index 0000000..6e59b36
--- /dev/null
+++ b/i386/i386/ipl.h
@@ -0,0 +1,83 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _I386_IPL_H_
+#define _I386_IPL_H_
+
+#define SPL0 0
+#define SPL1 1
+#define SPL2 2
+#define SPL3 3
+#define SPL4 4
+#define SPL5 5
+#define SPL6 6
+#define SPL7 7
+
+#define SPLPP 5
+#define SPLTTY 6
+#define SPLNI 6
+#define SPLHI 7
+#define IPLHI SPLHI
+
+#define NSPL (SPL7 + 1)
+
+#ifdef KERNEL
+#ifndef __ASSEMBLER__
+#include <machine/machspl.h>
+/* Note that interrupts have varying signatures */
+typedef void (*interrupt_handler_fn)(int);
+extern interrupt_handler_fn ivect[];
+extern int iunit[];
+extern spl_t curr_ipl[NCPUS];
+#endif /* __ASSEMBLER__ */
+#endif /* KERNEL */
+
+#endif /* _I386_IPL_H_ */
diff --git a/i386/i386/irq.c b/i386/i386/irq.c
new file mode 100644
index 0000000..a7c9889
--- /dev/null
+++ b/i386/i386/irq.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 1995 Shantanu Goel
+ * Copyright (C) 2020 Free Software Foundation, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <i386/irq.h>
+#include <device/intr.h>
+#include <mach/kern_return.h>
+#include <kern/queue.h>
+#include <kern/assert.h>
+#include <machine/machspl.h>
+
+extern queue_head_t main_intr_queue;
+
+static void
+irq_eoi (struct irqdev *dev, int id)
+{
+#ifdef APIC
+ ioapic_irq_eoi (dev->irq[id]);
+#endif
+}
+
+static unsigned int ndisabled_irq[NINTR];
+
+void
+__disable_irq (irq_t irq_nr)
+{
+ assert (irq_nr < NINTR);
+
+ spl_t s = splhigh();
+ ndisabled_irq[irq_nr]++;
+ assert (ndisabled_irq[irq_nr] > 0);
+ if (ndisabled_irq[irq_nr] == 1)
+ mask_irq (irq_nr);
+ splx(s);
+}
+
+void
+__enable_irq (irq_t irq_nr)
+{
+ assert (irq_nr < NINTR);
+
+ spl_t s = splhigh();
+ assert (ndisabled_irq[irq_nr] > 0);
+ ndisabled_irq[irq_nr]--;
+ if (ndisabled_irq[irq_nr] == 0)
+ unmask_irq (irq_nr);
+ splx(s);
+}
+
+struct irqdev irqtab = {
+ "irq", irq_eoi, &main_intr_queue, 0,
+#ifdef APIC
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
+#else
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+#endif
+};
+
diff --git a/i386/i386/irq.h b/i386/i386/irq.h
new file mode 100644
index 0000000..72bbe57
--- /dev/null
+++ b/i386/i386/irq.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2020 Free Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+#ifndef _I386_IRQ_H
+#define _I386_IRQ_H
+
+#ifdef APIC
+# include <i386/apic.h>
+#else
+# include <i386/pic.h>
+#endif
+
+typedef unsigned int irq_t;
+
+void __enable_irq (irq_t irq);
+void __disable_irq (irq_t irq);
+
+extern struct irqdev irqtab;
+
+#endif
diff --git a/i386/i386/ktss.c b/i386/i386/ktss.c
new file mode 100644
index 0000000..34cb6df
--- /dev/null
+++ b/i386/i386/ktss.c
@@ -0,0 +1,92 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Kernel task state segment.
+ *
+ * We don't use the i386 task switch mechanism. We need a TSS
+ * only to hold the kernel stack pointer for the current thread.
+ *
+ * XXX multiprocessor??
+ */
+#include "vm_param.h"
+#include "seg.h"
+#include "gdt.h"
+#include "ktss.h"
+#include "mp_desc.h"
+
+/* A kernel TSS with a complete I/O bitmap. */
+struct task_tss ktss;
+
+void
+ktss_fill(struct task_tss *myktss, struct real_descriptor *mygdt)
+{
+ /* XXX temporary exception stacks */
+ /* FIXME: make it per-processor */
+ static int exception_stack[1024];
+ static int double_fault_stack[1024];
+
+#ifdef MACH_RING1
+ /* Xen won't allow us to do any I/O by default anyway, just register
+ * exception stack */
+ if (hyp_stack_switch(KERNEL_DS, (unsigned long)(exception_stack+1024)))
+ panic("couldn't register exception stack\n");
+#else /* MACH_RING1 */
+ /* Initialize the master TSS descriptor. */
+ _fill_gdt_sys_descriptor(mygdt, KERNEL_TSS,
+ kvtolin(myktss), sizeof(struct task_tss) - 1,
+ ACC_PL_K|ACC_TSS, 0);
+
+ /* Initialize the master TSS. */
+#ifdef __x86_64__
+ myktss->tss.rsp0 = (unsigned long)(exception_stack+1024);
+ myktss->tss.ist1 = (unsigned long)(double_fault_stack+1024);
+#else /* ! __x86_64__ */
+ myktss->tss.ss0 = KERNEL_DS;
+ myktss->tss.esp0 = (unsigned long)(exception_stack+1024);
+#endif /* __x86_64__ */
+
+ myktss->tss.io_bit_map_offset = IOPB_INVAL;
+ /* Set the last byte in the I/O bitmap to all 1's. */
+ myktss->barrier = 0xff;
+
+ /* Load the TSS. */
+ ltr(KERNEL_TSS);
+#endif /* MACH_RING1 */
+}
+
+void
+ktss_init(void)
+{
+ ktss_fill(&ktss, gdt);
+}
+
+#if NCPUS > 1
+void
+ap_ktss_init(int cpu)
+{
+ ktss_fill(&mp_desc_table[cpu]->ktss, mp_gdt[cpu]);
+}
+#endif
diff --git a/i386/i386/ktss.h b/i386/i386/ktss.h
new file mode 100644
index 0000000..171332d
--- /dev/null
+++ b/i386/i386/ktss.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_KTSS_
+#define _I386_KTSS_
+
+#include "tss.h"
+
+extern struct task_tss ktss;
+
+extern void ktss_init(void);
+extern void ap_ktss_init(int cpu);
+
+#endif /* _I386_KTSS_ */
diff --git a/i386/i386/kttd_interface.c b/i386/i386/kttd_interface.c
new file mode 100644
index 0000000..f48fe8e
--- /dev/null
+++ b/i386/i386/kttd_interface.c
@@ -0,0 +1,574 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_TTD
+
+#include <sys/types.h>
+#include <kern/printf.h>
+
+#include <mach/machine/eflags.h>
+
+#include <kern/thread.h>
+#include <kern/processor.h>
+#include <mach/thread_status.h>
+#include <mach/vm_param.h>
+#include <i386/seg.h>
+
+#include <ttd/ttd_types.h>
+#include <ttd/ttd_stub.h>
+#include <machine/kttd_machdep.h>
+
+/*
+ * Shamelessly copied from the ddb sources:
+ */
+struct i386_saved_state *kttd_last_saved_statep;
+struct i386_saved_state kttd_nested_saved_state;
+unsigned last_kttd_sp;
+
+struct i386_saved_state kttd_regs; /* was ddb_regs */
+
+extern int kttd_debug;
+extern boolean_t kttd_enabled;
+extern vm_offset_t virtual_end;
+
+#define I386_BREAKPOINT 0xcc
+
+/*
+ * kernel map
+ */
+extern vm_map_t kernel_map;
+
+boolean_t kttd_console_init(void)
+{
+ /*
+ * Get local machine's IP address via bootp.
+ */
+ return(ttd_ip_bootp());
+}
+
+/*
+ * Execute a break instruction that will invoke ttd
+ */
+void kttd_break(void)
+{
+ if (!kttd_enabled)
+ return;
+ asm("int3");
+}
+
+/*
+ * Halt all processors on the 386at (not really applicable).
+ */
+void kttd_halt_processors(void)
+{
+ /* XXX Fix for Sequent!!! */
+ /* Only one on AT386, so ignore for now... */
+}
+
+/*
+ * Determine whether or not the ehternet device driver supports
+ * ttd.
+ */
+boolean_t kttd_supported(void)
+{
+ return ((int)ttd_get_packet != NULL);
+}
+
+/*
+ * Return the ttd machine type for the i386at
+ */
+ttd_machine_type get_ttd_machine_type(void)
+{
+ return TTD_AT386;
+}
+
+void kttd_machine_getregs(struct i386_gdb_register_state *ttd_state)
+{
+ ttd_state->gs = kttd_regs.gs;
+ ttd_state->fs = kttd_regs.fs;
+ ttd_state->es = kttd_regs.es;
+ ttd_state->ds = kttd_regs.ds;
+ ttd_state->edi = kttd_regs.edi;
+ ttd_state->esi = kttd_regs.esi;
+ ttd_state->ebp = kttd_regs.ebp;
+
+ /*
+ * This is set up to point to the right place in
+ * kttd_trap and .
+ */
+ ttd_state->esp = kttd_regs.uesp;
+
+ ttd_state->ebx = kttd_regs.ebx;
+ ttd_state->edx = kttd_regs.edx;
+ ttd_state->ecx = kttd_regs.ecx;
+ ttd_state->eax = kttd_regs.eax;
+ ttd_state->eip = kttd_regs.eip;
+ ttd_state->cs = kttd_regs.cs;
+ ttd_state->efl = kttd_regs.efl;
+ ttd_state->ss = kttd_regs.ss;
+}
+
+void kttd_machine_setregs(struct i386_gdb_register_state *ttd_state)
+{
+ if (kttd_regs.gs != ttd_state->gs) {
+ if (kttd_debug)
+ printf("gs 0x%x:0x%x, ", kttd_regs.gs, ttd_state->gs);
+ kttd_regs.gs = ttd_state->gs;
+ }
+ if (kttd_regs.fs != ttd_state->fs) {
+ if (kttd_debug)
+ printf("fs 0x%x:0x%x, ", kttd_regs.fs, ttd_state->fs);
+ kttd_regs.fs = ttd_state->fs;
+ }
+ if (kttd_regs.es != ttd_state->es) {
+ if (kttd_debug)
+ printf("es 0x%x:0x%x, ", kttd_regs.es, ttd_state->es);
+ kttd_regs.es = ttd_state->es;
+ }
+ if (kttd_regs.ds != ttd_state->ds) {
+ if (kttd_debug)
+ printf("ds 0x%x:0x%x, ", kttd_regs.ds, ttd_state->ds);
+ kttd_regs.ds = ttd_state->ds;
+ }
+ if (kttd_regs.edi != ttd_state->edi) {
+ if (kttd_debug)
+ printf("edi 0x%x:0x%x, ", kttd_regs.edi, ttd_state->edi);
+ kttd_regs.edi = ttd_state->edi;
+ }
+ if (kttd_regs.esi != ttd_state->esi) {
+ if (kttd_debug)
+ printf("esi 0x%x:0x%x, ", kttd_regs.esi, ttd_state->esi);
+ kttd_regs.esi = ttd_state->esi;
+ }
+ if (kttd_regs.ebp != ttd_state->ebp) {
+ if (kttd_debug)
+ printf("ebp 0x%x:0x%x, ", kttd_regs.ebp, ttd_state->ebp);
+ kttd_regs.ebp = ttd_state->ebp;
+ }
+ if (kttd_regs.ebx != ttd_state->ebx) {
+ if (kttd_debug)
+ printf("ebx 0x%x:0x%x, ", kttd_regs.ebx, ttd_state->ebx);
+ kttd_regs.ebx = ttd_state->ebx;
+ }
+ if (kttd_regs.edx != ttd_state->edx) {
+ if (kttd_debug)
+ printf("edx 0x%x:0x%x, ", kttd_regs.edx, ttd_state->edx);
+ kttd_regs.edx = ttd_state->edx;
+ }
+ if (kttd_regs.ecx != ttd_state->ecx) {
+ if (kttd_debug)
+ printf("ecx 0x%x:0x%x, ", kttd_regs.ecx, ttd_state->ecx);
+ kttd_regs.ecx = ttd_state->ecx;
+ }
+ if (kttd_regs.eax != ttd_state->eax) {
+ if (kttd_debug)
+ printf("eax 0x%x:0x%x, ", kttd_regs.eax, ttd_state->eax);
+ kttd_regs.eax = ttd_state->eax;
+ }
+ if (kttd_regs.eip != ttd_state->eip) {
+ if (kttd_debug)
+ printf("eip 0x%x:0x%x, ", kttd_regs.eip, ttd_state->eip);
+ kttd_regs.eip = ttd_state->eip;
+ }
+ if (kttd_regs.cs != ttd_state->cs) {
+ if (kttd_debug)
+ printf("cs 0x%x:0x%x, ", kttd_regs.cs, ttd_state->cs);
+ kttd_regs.cs = ttd_state->cs;
+ }
+ if (kttd_regs.efl != ttd_state->efl) {
+ if (kttd_debug)
+ printf("efl 0x%x:0x%x, ", kttd_regs.efl, ttd_state->efl);
+ kttd_regs.efl = ttd_state->efl;
+ }
+#if 0
+ /*
+ * We probably shouldn't mess with the uesp or the ss? XXX
+ */
+ if (kttd_regs.ss != ttd_state->ss) {
+ if (kttd_debug)
+ printf("ss 0x%x:0x%x, ", kttd_regs.ss, ttd_state->ss);
+ kttd_regs.ss = ttd_state->ss;
+ }
+#endif /* 0 */
+
+}
+
+/*
+ * Enable a page for access, faulting it in if necessary
+ */
+boolean_t kttd_mem_access(vm_offset_t offset, vm_prot_t access)
+{
+ kern_return_t code;
+
+ /*
+ * VM_MIN_KERNEL_ADDRESS if the beginning of equiv
+ * mapped kernel memory. virtual_end is the end.
+ * If it's in between it's always accessible
+ */
+ if (offset >= VM_MIN_KERNEL_ADDRESS && offset < virtual_end)
+ return TRUE;
+
+ if (offset >= virtual_end) {
+ /*
+ * fault in the memory just to make sure we can access it
+ */
+ if (kttd_debug)
+ printf(">>>>>>>>>>Faulting in memory: 0x%x, 0x%x\n",
+ trunc_page(offset), access);
+ code = vm_fault(kernel_map, trunc_page(offset), access, FALSE,
+ FALSE, (void (*)()) 0);
+ } else {
+ /*
+ * Check for user thread
+ */
+#if 1
+ if ((current_thread() != THREAD_NULL) &&
+ (current_thread()->task->map->pmap != kernel_pmap) &&
+ (current_thread()->task->map->pmap != PMAP_NULL)) {
+ code = vm_fault(current_thread()->task->map,
+ trunc_page(offset), access, FALSE,
+ FALSE, (void (*)()) 0);
+ }else{
+ /*
+ * Invalid kernel address (below VM_MIN_KERNEL_ADDRESS)
+ */
+ return FALSE;
+ }
+#else
+ if (kttd_debug)
+ printf("==========Would've tried to map in user area 0x%x\n",
+ trunc_page(offset));
+ return FALSE;
+#endif /* 0 */
+ }
+
+ return (code == KERN_SUCCESS);
+}
+
+/*
+ * See if we modified the kernel text and if so flush the caches.
+ * This routine is never called with a range that crosses a page
+ * boundary.
+ */
+void kttd_flush_cache(vm_offset_t offset, vm_size_t length)
+{
+ /* 386 doesn't need this */
+ return;
+}
+
+/*
+ * Insert a breakpoint into memory.
+ */
+boolean_t kttd_insert_breakpoint(vm_address_t address,
+ ttd_saved_inst *saved_inst)
+{
+ /*
+ * Saved old memory data:
+ */
+ *saved_inst = *(unsigned char *)address;
+
+ /*
+ * Put in a Breakpoint:
+ */
+ *(unsigned char *)address = I386_BREAKPOINT;
+
+ return TRUE;
+}
+
+/*
+ * Remove breakpoint from memory.
+ */
+boolean_t kttd_remove_breakpoint(vm_address_t address,
+ ttd_saved_inst saved_inst)
+{
+ /*
+ * replace it:
+ */
+ *(unsigned char *)address = (saved_inst & 0xff);
+
+ return TRUE;
+}
+
+/*
+ * Set single stepping mode. Assumes that program counter is set
+ * to the location where single stepping is to begin. The 386 is
+ * an easy single stepping machine, ie. built into the processor.
+ */
+boolean_t kttd_set_machine_single_step(void)
+{
+ /* Turn on Single Stepping */
+ kttd_regs.efl |= EFL_TF;
+
+ return TRUE;
+}
+
+/*
+ * Clear single stepping mode.
+ */
+boolean_t kttd_clear_machine_single_step(void)
+{
+ /* Turn off the trace flag */
+ kttd_regs.efl &= ~EFL_TF;
+
+ return TRUE;
+}
+
+
+/*
+ * kttd_type_to_ttdtrap:
+ *
+ * Fills in the task and thread info structures with the reason
+ * for entering the Teledebugger (bp, single step, pg flt, etc.)
+ *
+ */
+void kttd_type_to_ttdtrap(int type)
+{
+ /* XXX Fill this in sometime for i386 */
+}
+
+/*
+ * kttd_trap:
+ *
+ * This routine is called from the trap or interrupt handler when a
+ * breakpoint instruction is encountered or a single step operation
+ * completes. The argument is a pointer to a machine dependent
+ * saved_state structure that was built on the interrupt or kernel stack.
+ *
+ */
+boolean_t kttd_trap(int type, int code, struct i386_saved_state *regs)
+{
+ int s;
+
+ if (kttd_debug)
+ printf("kttd_TRAP, before splhigh()\n");
+
+ /*
+ * TTD isn't supported by the driver.
+ *
+ * Try to switch off to kdb if it is resident.
+ * Otherwise just hang (this might be panic).
+ *
+ * Check to make sure that TTD is supported.
+ * (Both by the machine's driver's, and bootp if using ether).
+ */
+ if (!kttd_supported()) {
+ kttd_enabled = FALSE;
+ return FALSE;
+ }
+
+ s = splhigh();
+
+ /*
+ * We are already in TTD!
+ */
+ if (++kttd_active > MAX_KTTD_ACTIVE) {
+ printf("kttd_trap: RE-ENTERED!!!\n");
+ }
+
+ if (kttd_debug)
+ printf("kttd_TRAP, after splhigh()\n");
+
+ /* Should switch to kttd's own stack here. */
+
+ kttd_regs = *regs;
+
+ if ((regs->cs & 0x3) == KERNEL_RING) {
+ /*
+ * Kernel mode - esp and ss not saved
+ */
+ kttd_regs.uesp = (int)&regs->uesp; /* kernel stack pointer */
+ kttd_regs.ss = KERNEL_DS;
+ }
+
+ /*
+ * If this was not entered via an interrupt (type != -1)
+ * then we've entered via a bpt, single, etc. and must
+ * set the globals.
+ *
+ * Setup the kttd globals for entry....
+ */
+ if (type != -1) {
+ kttd_current_request = NULL;
+ kttd_current_length = 0;
+ kttd_current_kmsg = NULL;
+ kttd_run_status = FULL_STOP;
+ }else{
+ /*
+ * We know that we can only get here if we did a kttd_intr
+ * since it's the way that we are called with type -1 (via
+ * the trampoline), so we don't have to worry about entering
+ * from Cntl-Alt-D like the mips does.
+ */
+ /*
+ * Perform sanity check!
+ */
+ if ((kttd_current_request == NULL) ||
+ (kttd_current_length == 0) ||
+ (kttd_current_kmsg == NULL) ||
+ (kttd_run_status != ONE_STOP)) {
+
+ printf("kttd_trap: INSANITY!!!\n");
+ }
+ }
+
+ kttd_task_trap(type, code, (regs->cs & 0x3) != 0);
+
+ regs->eip = kttd_regs.eip;
+ regs->efl = kttd_regs.efl;
+ regs->eax = kttd_regs.eax;
+ regs->ecx = kttd_regs.ecx;
+ regs->edx = kttd_regs.edx;
+ regs->ebx = kttd_regs.ebx;
+ if ((regs->cs & 0x3) != KERNEL_RING) {
+ /*
+ * user mode - saved esp and ss valid
+ */
+ regs->uesp = kttd_regs.uesp; /* user stack pointer */
+ regs->ss = kttd_regs.ss & 0xffff; /* user stack segment */
+ }
+ regs->ebp = kttd_regs.ebp;
+ regs->esi = kttd_regs.esi;
+ regs->edi = kttd_regs.edi;
+ regs->es = kttd_regs.es & 0xffff;
+ regs->cs = kttd_regs.cs & 0xffff;
+ regs->ds = kttd_regs.ds & 0xffff;
+ regs->fs = kttd_regs.fs & 0xffff;
+ regs->gs = kttd_regs.gs & 0xffff;
+
+ if (--kttd_active < MIN_KTTD_ACTIVE)
+ printf("ttd_trap: kttd_active < 0\n");
+
+ if (kttd_debug) {
+ printf("Leaving kttd_trap, kttd_active = %d\n", kttd_active);
+ }
+
+ /*
+ * Only reset this if we entered kttd_trap via an async trampoline.
+ */
+ if (type == -1) {
+ if (kttd_run_status == RUNNING)
+ printf("kttd_trap: $$$$$ run_status already RUNNING! $$$$$\n");
+ kttd_run_status = RUNNING;
+ }
+
+ /* Is this right? XXX */
+ kttd_run_status = RUNNING;
+
+ (void) splx(s);
+
+ /*
+ * Return true, that yes we handled the trap.
+ */
+ return TRUE;
+}
+
+/*
+ * Enter KTTD through a network packet trap.
+ * We show the registers as of the network interrupt
+ * instead of those at its call to KDB.
+ */
+struct int_regs {
+ int edi;
+ int esi;
+ int ebp;
+ int ebx;
+ struct i386_interrupt_state *is;
+};
+
+void
+kttd_netentry(struct int_regs *int_regs)
+{
+ struct i386_interrupt_state *is = int_regs->is;
+ int s;
+
+ if (kttd_debug)
+ printf("kttd_NETENTRY before slphigh()\n");
+
+ s = splhigh();
+
+ if (kttd_debug)
+ printf("kttd_NETENTRY after slphigh()\n");
+
+ if ((is->cs & 0x3) != KERNEL_RING) {
+ /*
+ * Interrupted from User Space
+ */
+ kttd_regs.uesp = ((int *)(is+1))[0];
+ kttd_regs.ss = ((int *)(is+1))[1];
+ }
+ else {
+ /*
+ * Interrupted from Kernel Space
+ */
+ kttd_regs.ss = KERNEL_DS;
+ kttd_regs.uesp= (int)(is+1);
+ }
+ kttd_regs.efl = is->efl;
+ kttd_regs.cs = is->cs;
+ kttd_regs.eip = is->eip;
+ kttd_regs.eax = is->eax;
+ kttd_regs.ecx = is->ecx;
+ kttd_regs.edx = is->edx;
+ kttd_regs.ebx = int_regs->ebx;
+ kttd_regs.ebp = int_regs->ebp;
+ kttd_regs.esi = int_regs->esi;
+ kttd_regs.edi = int_regs->edi;
+ kttd_regs.ds = is->ds;
+ kttd_regs.es = is->es;
+ kttd_regs.fs = is->fs;
+ kttd_regs.gs = is->gs;
+
+ kttd_active++;
+ kttd_task_trap(-1, 0, (kttd_regs.cs & 0x3) != 0);
+ kttd_active--;
+
+ if ((kttd_regs.cs & 0x3) != KERNEL_RING) {
+ ((int *)(is+1))[0] = kttd_regs.uesp;
+ ((int *)(is+1))[1] = kttd_regs.ss & 0xffff;
+ }
+ is->efl = kttd_regs.efl;
+ is->cs = kttd_regs.cs & 0xffff;
+ is->eip = kttd_regs.eip;
+ is->eax = kttd_regs.eax;
+ is->ecx = kttd_regs.ecx;
+ is->edx = kttd_regs.edx;
+ int_regs->ebx = kttd_regs.ebx;
+ int_regs->ebp = kttd_regs.ebp;
+ int_regs->esi = kttd_regs.esi;
+ int_regs->edi = kttd_regs.edi;
+ is->ds = kttd_regs.ds & 0xffff;
+ is->es = kttd_regs.es & 0xffff;
+ is->fs = kttd_regs.fs & 0xffff;
+ is->gs = kttd_regs.gs & 0xffff;
+
+ if (kttd_run_status == RUNNING)
+ printf("kttd_netentry: %%%%% run_status already RUNNING! %%%%%\n");
+ kttd_run_status = RUNNING;
+
+ (void) splx(s);
+}
+
+#endif /* MACH_TTD */
diff --git a/i386/i386/kttd_machdep.h b/i386/i386/kttd_machdep.h
new file mode 100644
index 0000000..8ac7de1
--- /dev/null
+++ b/i386/i386/kttd_machdep.h
@@ -0,0 +1,59 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KTTD_MACHDEP_H_
+#define _KTTD_MACHDEP_H_
+
+#define MAX_KTTD_ACTIVE 2
+#define MIN_KTTD_ACTIVE 0
+
+/*
+ * Register state for gdb
+ */
+struct i386_gdb_register_state {
+ int eax;
+ int ecx;
+ int edx;
+ int ebx;
+ int esp; /* 4 */
+ int ebp; /* 5 */
+ int esi;
+ int edi;
+ int eip; /* 8 */
+ int efl; /* 9 */
+ int cs;
+ int ss;
+ int ds;
+ int es;
+ int fs;
+ int gs;
+};
+
+typedef struct i386_gdb_register_state ttd_machine_state;
+
+typedef unsigned long ttd_saved_inst;
+
+#endif /* _KTTD_MACHDEP_H_ */
diff --git a/i386/i386/ldt.c b/i386/i386/ldt.c
new file mode 100644
index 0000000..5db3642
--- /dev/null
+++ b/i386/i386/ldt.c
@@ -0,0 +1,117 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * "Local" descriptor table. At the moment, all tasks use the
+ * same LDT.
+ */
+#include <mach/machine/eflags.h>
+#include <mach/machine/vm_types.h>
+#include <mach/xen.h>
+
+#include <intel/pmap.h>
+#include <kern/debug.h>
+
+#include "vm_param.h"
+#include "seg.h"
+#include "gdt.h"
+#include "ldt.h"
+#include "locore.h"
+#include "mp_desc.h"
+#include "msr.h"
+
+#ifdef MACH_PV_DESCRIPTORS
+/* It is actually defined in xen_boothdr.S */
+extern
+#endif /* MACH_PV_DESCRIPTORS */
+struct real_descriptor ldt[LDTSZ];
+
+#if defined(__x86_64__) && ! defined(USER32)
+#define USER_SEGMENT_SIZEBITS SZ_64
+#else
+#define USER_SEGMENT_SIZEBITS SZ_32
+#endif
+
+void
+ldt_fill(struct real_descriptor *myldt, struct real_descriptor *mygdt)
+{
+#ifdef MACH_PV_DESCRIPTORS
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readwrite(myldt);
+#endif /* MACH_PV_PAGETABLES */
+#else /* MACH_PV_DESCRIPTORS */
+ /* Initialize the master LDT descriptor in the GDT. */
+ _fill_gdt_sys_descriptor(mygdt, KERNEL_LDT,
+ kvtolin(myldt), (LDTSZ * sizeof(struct real_descriptor))-1,
+ ACC_PL_K|ACC_LDT, 0);
+#endif /* MACH_PV_DESCRIPTORS */
+
+ /* Initialize the syscall entry point */
+#if defined(__x86_64__) && ! defined(USER32)
+ if (!CPU_HAS_FEATURE(CPU_FEATURE_SEP))
+ panic("syscall support is missing on 64 bit");
+ /* Enable 64-bit syscalls */
+ wrmsr(MSR_REG_EFER, rdmsr(MSR_REG_EFER) | MSR_EFER_SCE);
+ wrmsr(MSR_REG_LSTAR, (vm_offset_t)syscall64);
+ wrmsr(MSR_REG_STAR, ((((long)USER_CS - 16) << 16) | (long)KERNEL_CS) << 32);
+ wrmsr(MSR_REG_FMASK, EFL_IF | EFL_IOPL_USER);
+#else /* defined(__x86_64__) && ! defined(USER32) */
+ fill_ldt_gate(myldt, USER_SCALL,
+ (vm_offset_t)&syscall, KERNEL_CS,
+ ACC_PL_U|ACC_CALL_GATE, 0);
+#endif /* defined(__x86_64__) && ! defined(USER32) */
+
+ /* Initialize the 32bit LDT descriptors. */
+ fill_ldt_descriptor(myldt, USER_CS,
+ VM_MIN_USER_ADDRESS,
+ VM_MAX_USER_ADDRESS-VM_MIN_USER_ADDRESS-4096,
+ /* XXX LINEAR_... */
+ ACC_PL_U|ACC_CODE_R, USER_SEGMENT_SIZEBITS);
+ fill_ldt_descriptor(myldt, USER_DS,
+ VM_MIN_USER_ADDRESS,
+ VM_MAX_USER_ADDRESS-VM_MIN_USER_ADDRESS-4096,
+ ACC_PL_U|ACC_DATA_W, USER_SEGMENT_SIZEBITS);
+
+ /* Activate the LDT. */
+#ifdef MACH_PV_DESCRIPTORS
+ hyp_set_ldt(myldt, LDTSZ);
+#else /* MACH_PV_DESCRIPTORS */
+ lldt(KERNEL_LDT);
+#endif /* MACH_PV_DESCRIPTORS */
+}
+
+void
+ldt_init(void)
+{
+ ldt_fill(ldt, gdt);
+}
+
+#if NCPUS > 1
+void
+ap_ldt_init(int cpu)
+{
+ ldt_fill(mp_desc_table[cpu]->ldt, mp_gdt[cpu]);
+}
+#endif
diff --git a/i386/i386/ldt.h b/i386/i386/ldt.h
new file mode 100644
index 0000000..51867f4
--- /dev/null
+++ b/i386/i386/ldt.h
@@ -0,0 +1,77 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON, IBM, AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON, IBM, AND CSL DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * This file describes the standard LDT provided by default
+ * to all user-level Mach tasks.
+ */
+#ifndef _I386_LDT_
+#define _I386_LDT_
+
+#include "seg.h"
+
+/*
+ * User descriptors for Mach - 32-bit flat address space
+ */
+#define USER_SCALL 0x07 /* system call gate */
+#if defined(__x86_64__) && ! defined(USER32)
+/* Call gate needs two entries */
+
+/* The sysret instruction puts some constraints on the user segment indexes */
+#define USER_CS 0x1f /* user code segment */
+#define USER_DS 0x17 /* user data segment */
+#else
+#define USER_CS 0x17 /* user code segment */
+#define USER_DS 0x1f /* user data segment */
+#endif
+
+#define LDTSZ 4
+
+
+#ifndef __ASSEMBLER__
+
+extern struct real_descriptor ldt[LDTSZ];
+
+/* Fill a 32bit segment descriptor in the LDT. */
+#define fill_ldt_descriptor(_ldt, selector, base, limit, access, sizebits) \
+ fill_descriptor(&_ldt[sel_idx(selector)], base, limit, access, sizebits)
+
+#define fill_ldt_gate(_ldt, selector, offset, dest_selector, access, word_count) \
+ fill_gate((struct real_gate*)&_ldt[sel_idx(selector)], \
+ offset, dest_selector, access, word_count)
+
+void ldt_init(void);
+void ap_ldt_init(int cpu);
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* _I386_LDT_ */
diff --git a/i386/i386/lock.h b/i386/i386/lock.h
new file mode 100644
index 0000000..b325ae0
--- /dev/null
+++ b/i386/i386/lock.h
@@ -0,0 +1,132 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent simple locks for the i386.
+ */
+#ifndef _I386_LOCK_H_
+#define _I386_LOCK_H_
+
+#if NCPUS > 1
+#include <i386/smp.h>
+
+/*
+ * All of the locking routines are built from calls on
+ * a locked-exchange operation. Values of the lock are
+ * 0 for unlocked, 1 for locked.
+ */
+
+#ifdef __GNUC__
+
+/*
+ * The code here depends on the GNU C compiler.
+ */
+
+#define _simple_lock_xchg_(lock, new_val) \
+({ natural_t _old_val_; \
+ asm volatile("xchg %0, %2" \
+ : "=r" (_old_val_) \
+ : "0" ((natural_t)(new_val)), "m" (*(lock)) : "memory" \
+ ); \
+ _old_val_; \
+ })
+
+#define simple_lock_init(l) \
+ ((l)->lock_data = 0)
+
+#define SIMPLE_LOCK_INITIALIZER(l) \
+ {.lock_data = 0}
+
+#define _simple_lock(l) \
+ ({ \
+ while(_simple_lock_xchg_(l, 1)) \
+ while (*(volatile natural_t *)&(l)->lock_data) \
+ cpu_pause(); \
+ 0; \
+ })
+
+#define _simple_unlock(l) \
+ (_simple_lock_xchg_(l, 0))
+
+#define _simple_lock_try(l) \
+ (!_simple_lock_xchg_(l, 1))
+
+/*
+ * General bit-lock routines.
+ */
+#define bit_lock(bit, l) \
+ ({ \
+ asm volatile(" jmp 1f \n\
+ 0: btl %0, %1 \n\
+ jb 0b \n\
+ 1: lock \n\
+ btsl %0, %1 \n\
+ jb 0b" \
+ : \
+ : "r" ((int)(bit)), "m" (*(volatile int *)(l)) : "memory"); \
+ 0; \
+ })
+
+#define bit_unlock(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btrl %0, %1" \
+ : \
+ : "r" ((int)(bit)), "m" (*(volatile int *)(l)) : "memory"); \
+ 0; \
+ })
+
+/*
+ * Set or clear individual bits in a long word.
+ * The locked access is needed only to lock access
+ * to the word, not to individual bits.
+ */
+#define i_bit_set(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btsl %0, %1" \
+ : \
+ : "r" ((int)(bit)), "m" (*(l)) ); \
+ 0; \
+ })
+
+#define i_bit_clear(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btrl %0, %1" \
+ : \
+ : "r" ((int)(bit)), "m" (*(l)) ); \
+ 0; \
+ })
+
+#endif /* __GNUC__ */
+
+extern void simple_lock_pause(void);
+
+#endif /* NCPUS > 1 */
+
+
+
+#endif /* _I386_LOCK_H_ */
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
new file mode 100644
index 0000000..9d0513a
--- /dev/null
+++ b/i386/i386/locore.S
@@ -0,0 +1,1603 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the nema IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/machine/asm.h>
+#include <mach/machine/eflags.h>
+#include <i386/proc_reg.h>
+#include <i386/trap.h>
+#include <i386/seg.h>
+#include <i386/gdt.h>
+#include <i386/ldt.h>
+#include <i386/i386asm.h>
+#include <i386/cpu_number.h>
+#include <i386/xen.h>
+
+#define PUSH_REGS_ISR \
+ pushl %ecx ;\
+ pushl %edx
+
+#define PUSH_AREGS_ISR \
+ pushl %eax ;\
+ PUSH_REGS_ISR
+
+
+#define POP_REGS_ISR \
+ popl %edx ;\
+ popl %ecx
+
+#define POP_AREGS_ISR \
+ POP_REGS_ISR ;\
+ popl %eax
+
+/*
+ * Note that we have to load the kernel segment registers even if this
+ * is a trap from the kernel, because the kernel uses user segment
+ * registers for copyin/copyout.
+ * (XXX Would it be smarter just to use fs or gs for that?)
+ */
+#define PUSH_SEGMENTS \
+ pushl %ds ;\
+ pushl %es ;\
+ pushl %fs ;\
+ pushl %gs
+
+#define POP_SEGMENTS \
+ popl %gs ;\
+ popl %fs ;\
+ popl %es ;\
+ popl %ds
+
+#define PUSH_SEGMENTS_ISR \
+ pushl %ds ;\
+ pushl %es ;\
+ pushl %fs ;\
+ pushl %gs
+
+#define POP_SEGMENTS_ISR \
+ popl %gs ;\
+ popl %fs ;\
+ popl %es ;\
+ popl %ds
+
+#define SET_KERNEL_SEGMENTS(reg) \
+ mov %ss,reg /* switch to kernel segments */ ;\
+ mov reg,%ds /* (same as kernel stack segment) */ ;\
+ mov reg,%es ;\
+ mov reg,%fs ;\
+ mov $(PERCPU_DS),reg ;\
+ mov reg,%gs
+
+/*
+ * Fault recovery.
+ */
+#define RECOVER_TABLE_START \
+ .text 2 ;\
+DATA(recover_table) ;\
+ .text
+
+#define RECOVER(addr) \
+ .text 2 ;\
+ .long 9f ;\
+ .long addr ;\
+ .text ;\
+9:
+
+#define RECOVER_TABLE_END \
+ .text 2 ;\
+ .globl EXT(recover_table_end) ;\
+LEXT(recover_table_end) ;\
+ .text
+
+/*
+ * Retry table for certain successful faults.
+ */
+#define RETRY_TABLE_START \
+ .text 3 ;\
+DATA(retry_table) ;\
+ .text
+
+#define RETRY(addr) \
+ .text 3 ;\
+ .long 9f ;\
+ .long addr ;\
+ .text ;\
+9:
+
+#define RETRY_TABLE_END \
+ .text 3 ;\
+ .globl EXT(retry_table_end) ;\
+LEXT(retry_table_end) ;\
+ .text
+
+/*
+ * Allocate recovery and retry tables.
+ */
+ RECOVER_TABLE_START
+ RETRY_TABLE_START
+
+/*
+ * Timing routines.
+ */
+#if STAT_TIME
+
+#define TIME_TRAP_UENTRY
+#define TIME_TRAP_SENTRY
+#define TIME_TRAP_UEXIT
+#define TIME_INT_ENTRY
+#define TIME_INT_EXIT
+
+#else /* microsecond timing */
+
+/*
+ * Microsecond timing.
+ * Assumes a free-running microsecond counter.
+ * no TIMER_MAX check needed.
+ */
+
+/*
+ * There is only one current time-stamp per CPU, since only
+ * the time-stamp in the current timer is used.
+ * To save time, we allocate the current time-stamps here.
+ */
+ .comm EXT(current_tstamp), 4*NCPUS
+
+/*
+ * Update time on user trap entry.
+ * 11 instructions (including cli on entry)
+ * Assumes CPU number in %edx.
+ * Uses %eax, %ebx, %ecx.
+ */
+#define TIME_TRAP_UENTRY \
+ pushf /* Save flags */ ;\
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer value */ ;\
+ movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: addl $(TH_SYSTEM_TIMER-TH_USER_TIMER),%ecx ;\
+ /* switch to sys timer */;\
+ movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
+ popf /* allow interrupts */
+
+/*
+ * Update time on system call entry.
+ * 11 instructions (including cli on entry)
+ * Assumes CPU number in %edx.
+ * Uses %ebx, %ecx.
+ * Same as TIME_TRAP_UENTRY, but preserves %eax.
+ */
+#define TIME_TRAP_SENTRY \
+ pushf /* Save flags */ ;\
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer value */ ;\
+ movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ pushl %eax /* save %eax */ ;\
+ call timer_normalize /* normalize timer */ ;\
+ popl %eax /* restore %eax */ ;\
+0: addl $(TH_SYSTEM_TIMER-TH_USER_TIMER),%ecx ;\
+ /* switch to sys timer */;\
+ movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
+ popf /* allow interrupts */
+
+/*
+ * update time on user trap exit.
+ * 10 instructions.
+ * Assumes CPU number in %edx.
+ * Uses %ebx, %ecx.
+ */
+#define TIME_TRAP_UEXIT \
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer */ ;\
+ movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: addl $(TH_USER_TIMER-TH_SYSTEM_TIMER),%ecx ;\
+ /* switch to user timer */;\
+ movl %ecx,CX(EXT(current_timer),%edx) /* make it current */
+
+/*
+ * update time on interrupt entry.
+ * 9 instructions.
+ * Assumes CPU number in %edx.
+ * Leaves old timer in %ebx.
+ * Uses %ecx.
+ */
+#define TIME_INT_ENTRY \
+ movl VA_ETC,%ecx /* get timer */ ;\
+ movl CX(EXT(current_tstamp),%edx),%ebx /* get old time stamp */;\
+ movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ subl %ebx,%ecx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%edx),%ebx /* get current timer */ ;\
+ addl %ecx,LOW_BITS(%ebx) /* add to low bits */ ;\
+ leal CX(0,%edx),%ecx /* timer is 16 bytes */ ;\
+ lea CX(EXT(kernel_timer),%edx),%ecx /* get interrupt timer*/;\
+ movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
+
+/*
+ * update time on interrupt exit.
+ * 11 instructions
+ * Assumes CPU number in %edx, old timer in %ebx.
+ * Uses %eax, %ecx.
+ */
+#define TIME_INT_EXIT \
+ movl VA_ETC,%eax /* get timer */ ;\
+ movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
+ movl %eax,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ subl %ecx,%eax /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ addl %eax,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: testb $0x80,LOW_BITS+3(%ebx) /* old timer overflow? */;\
+ jz 0f /* if overflow, */ ;\
+ movl %ebx,%ecx /* get old timer */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: movl %ebx,CX(EXT(current_timer),%edx) /* set timer */
+
+
+/*
+ * Normalize timer in ecx.
+ * Preserves edx; clobbers eax.
+ */
+ .align 2
+timer_high_unit:
+ .long TIMER_HIGH_UNIT /* div has no immediate opnd */
+
+timer_normalize:
+ pushl %edx /* save register */
+ xorl %edx,%edx /* clear divisor high */
+ movl LOW_BITS(%ecx),%eax /* get divisor low */
+ divl timer_high_unit,%eax /* quotient in eax */
+ /* remainder in edx */
+ addl %eax,HIGH_BITS_CHECK(%ecx) /* add high_inc to check */
+ movl %edx,LOW_BITS(%ecx) /* remainder to low_bits */
+ addl %eax,HIGH_BITS(%ecx) /* add high_inc to high bits */
+ popl %edx /* restore register */
+ ret
+
+/*
+ * Switch to a new timer.
+ */
+ENTRY(timer_switch)
+ CPU_NUMBER(%edx) /* get this CPU */
+ movl VA_ETC,%ecx /* get timer */
+ movl CX(EXT(current_tstamp),%edx),%eax /* get old time stamp */
+ movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */
+ subl %ecx,%eax /* elapsed = new - old */
+ movl CX(EXT(current_timer),%edx),%ecx /* get current timer */
+ addl %eax,LOW_BITS(%ecx) /* add to low bits */
+ jns 0f /* if overflow, */
+ call timer_normalize /* normalize timer */
+0:
+ movl S_ARG0,%ecx /* get new timer */
+ movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
+ ret
+
+/*
+ * Initialize the first timer for a CPU.
+ */
+ENTRY(start_timer)
+ CPU_NUMBER(%edx) /* get this CPU */
+ movl VA_ETC,%ecx /* get timer */
+ movl %ecx,CX(EXT(current_tstamp),%edx) /* set initial time stamp */
+ movl S_ARG0,%ecx /* get timer */
+ movl %ecx,CX(EXT(current_timer),%edx) /* set initial timer */
+ ret
+
+#endif /* accurate timing */
+
+/* */
+
+/*
+ * Trap/interrupt entry points.
+ *
+ * All traps must create the following save area on the kernel stack:
+ *
+ * gs
+ * fs
+ * es
+ * ds
+ * edi
+ * esi
+ * ebp
+ * cr2 if page fault - otherwise unused
+ * ebx
+ * edx
+ * ecx
+ * eax
+ * trap number
+ * error code
+ * eip
+ * cs
+ * eflags
+ * user esp - if from user
+ * user ss - if from user
+ * es - if from V86 thread
+ * ds - if from V86 thread
+ * fs - if from V86 thread
+ * gs - if from V86 thread
+ *
+ */
+
+/*
+ * General protection or segment-not-present fault.
+ * Check for a GP/NP fault in the kernel_return
+ * sequence; if there, report it as a GP/NP fault on the user's instruction.
+ *
+ * esp-> 0: trap code (NP or GP)
+ * 4: segment number in error
+ * 8 eip
+ * 12 cs
+ * 16 eflags
+ * 20 old registers (trap is from kernel)
+ */
+ENTRY(t_gen_prot)
+ pushl $(T_GENERAL_PROTECTION) /* indicate fault type */
+ jmp trap_check_kernel_exit /* check for kernel exit sequence */
+
+ENTRY(t_segnp)
+ pushl $(T_SEGMENT_NOT_PRESENT)
+ /* indicate fault type */
+
+trap_check_kernel_exit:
+ testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */
+ jnz EXT(alltraps) /* isn`t kernel trap if so */
+ /* Note: handling KERNEL_RING value by hand */
+ testl $2,12(%esp) /* is trap from kernel mode? */
+ jnz EXT(alltraps) /* if so: */
+ /* check for the kernel exit sequence */
+ cmpl $_kret_iret,8(%esp) /* on IRET? */
+ je fault_iret
+ cmpl $_kret_popl_ds,8(%esp) /* popping DS? */
+ je fault_popl_ds
+ cmpl $_kret_popl_es,8(%esp) /* popping ES? */
+ je fault_popl_es
+ cmpl $_kret_popl_fs,8(%esp) /* popping FS? */
+ je fault_popl_fs
+ cmpl $_kret_popl_gs,8(%esp) /* popping GS? */
+ je fault_popl_gs
+take_fault: /* if none of the above: */
+ jmp EXT(alltraps) /* treat as normal trap. */
+
+/*
+ * GP/NP fault on IRET: CS or SS is in error.
+ * All registers contain the user's values.
+ *
+ * on SP is
+ * 0 trap number
+ * 4 errcode
+ * 8 eip
+ * 12 cs --> trapno
+ * 16 efl --> errcode
+ * 20 user eip
+ * 24 user cs
+ * 28 user eflags
+ * 32 user esp
+ * 36 user ss
+ */
+fault_iret:
+ movl %eax,8(%esp) /* save eax (we don`t need saved eip) */
+ popl %eax /* get trap number */
+ movl %eax,12-4(%esp) /* put in user trap number */
+ popl %eax /* get error code */
+ movl %eax,16-8(%esp) /* put in user errcode */
+ popl %eax /* restore eax */
+ jmp EXT(alltraps) /* take fault */
+
+/*
+ * Fault restoring a segment register. The user's registers are still
+ * saved on the stack. The offending segment register has not been
+ * popped.
+ */
+fault_popl_ds:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_es /* (DS on top of stack) */
+fault_popl_es:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_fs /* (ES on top of stack) */
+fault_popl_fs:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_gs /* (FS on top of stack) */
+fault_popl_gs:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_segregs /* (GS on top of stack) */
+
+push_es:
+ pushl %es /* restore es, */
+push_fs:
+ pushl %fs /* restore fs, */
+push_gs:
+ pushl %gs /* restore gs. */
+push_segregs:
+ movl %eax,R_TRAPNO(%esp) /* set trap number */
+ movl %edx,R_ERR(%esp) /* set error code */
+ jmp trap_set_segs /* take trap */
+
+/*
+ * Debug trap. Check for single-stepping across system call into
+ * kernel. If this is the case, taking the debug trap has turned
+ * off single-stepping - save the flags register with the trace
+ * bit set.
+ */
+ENTRY(t_debug)
+ testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */
+ jnz 0f /* isn`t kernel trap if so */
+ /* Note: handling KERNEL_RING value by hand */
+ testl $2,4(%esp) /* is trap from kernel mode? */
+ jnz 0f /* if so: */
+ cmpl $syscall_entry,(%esp) /* system call entry? */
+ jne 0f /* if so: */
+ /* flags are sitting where syscall */
+ /* wants them */
+ addl $8,%esp /* remove eip/cs */
+ jmp syscall_entry_2 /* continue system call entry */
+
+0: pushl $0 /* otherwise: */
+ pushl $(T_DEBUG) /* handle as normal */
+ jmp EXT(alltraps) /* debug fault */
+
+/*
+ * Page fault traps save cr2.
+ */
+ENTRY(t_page_fault)
+ pushl $(T_PAGE_FAULT) /* mark a page fault trap */
+ pusha /* save the general registers */
+#ifdef MACH_PV_PAGETABLES
+ movl %ss:hyp_shared_info+CR2,%eax
+#else /* MACH_PV_PAGETABLES */
+ movl %cr2,%eax /* get the faulting address */
+#endif /* MACH_PV_PAGETABLES */
+ movl %eax,R_CR2-R_EDI(%esp) /* save in esp save slot */
+ jmp trap_push_segs /* continue fault */
+
+/*
+ * All 'exceptions' enter here with:
+ * esp-> trap number
+ * error code
+ * old eip
+ * old cs
+ * old eflags
+ * old esp if trapped from user
+ * old ss if trapped from user
+ */
+ENTRY(alltraps)
+ pusha /* save the general registers */
+trap_push_segs:
+ PUSH_SEGMENTS /* and the segment registers */
+ SET_KERNEL_SEGMENTS(%ax) /* switch to kernel data segment */
+trap_set_segs:
+ cld /* clear direction flag */
+ testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
+ jnz trap_from_user /* user mode trap if so */
+ /* Note: handling KERNEL_RING value by hand */
+ testb $2,R_CS(%esp) /* user mode trap? */
+ jz trap_from_kernel /* kernel trap if not */
+trap_from_user:
+
+ CPU_NUMBER(%edx)
+ TIME_TRAP_UENTRY
+
+ movl CX(EXT(kernel_stack),%edx),%ebx
+ xchgl %ebx,%esp /* switch to kernel stack */
+ /* user regs pointer already set */
+_take_trap:
+ pushl %ebx /* pass register save area to trap */
+ call EXT(user_trap) /* call user trap routine */
+ movl 4(%esp),%esp /* switch back to PCB stack */
+
+ orl %eax,%eax /* emulated syscall? */
+ jz _return_from_trap /* no, just return */
+ movl R_EAX(%ebx),%eax /* yes, get syscall number */
+ jmp syscall_entry_3 /* and emulate it */
+
+/*
+ * Return from trap or system call, checking for ASTs.
+ * On PCB stack.
+ */
+
+_return_from_trap:
+ CPU_NUMBER(%edx)
+ cmpl $0,CX(EXT(need_ast),%edx)
+ jz _return_to_user /* if we need an AST: */
+
+ movl CX(EXT(kernel_stack),%edx),%esp
+ /* switch to kernel stack */
+ call EXT(i386_astintr) /* take the AST */
+ popl %esp /* switch back to PCB stack */
+ jmp _return_from_trap /* and check again (rare) */
+ /* ASTs after this point will */
+ /* have to wait */
+
+_return_to_user:
+ TIME_TRAP_UEXIT
+
+/*
+ * Return from kernel mode to interrupted thread.
+ */
+
+_return_from_kernel:
+_kret_popl_gs:
+ popl %gs /* restore segment registers */
+_kret_popl_fs:
+ popl %fs
+_kret_popl_es:
+ popl %es
+_kret_popl_ds:
+ popl %ds
+ popa /* restore general registers */
+ addl $8,%esp /* discard trap number and error code */
+_kret_iret:
+ iret /* return from interrupt */
+
+
+/*
+ * Trap from kernel mode. No need to switch stacks.
+ */
+trap_from_kernel:
+#if MACH_KDB || MACH_TTD
+ movl %esp,%ebx /* save current stack */
+ movl %esp,%edx /* on an interrupt stack? */
+
+ CPU_NUMBER(%ecx)
+ and $(~(INTSTACK_SIZE-1)),%edx
+ cmpl CX(EXT(int_stack_base),%ecx),%edx
+ je 1f /* OK if so */
+
+ movl %ecx,%edx
+ cmpl CX(EXT(kernel_stack),%edx),%esp
+ /* already on kernel stack? */
+ ja 0f
+ cmpl MY(ACTIVE_STACK),%esp
+ ja 1f /* switch if not */
+0:
+ movl CX(EXT(kernel_stack),%edx),%esp
+1:
+ pushl %ebx /* save old stack */
+ pushl %ebx /* pass as parameter */
+ call EXT(kernel_trap) /* to kernel trap routine */
+ addl $4,%esp /* pop parameter */
+ popl %esp /* return to old stack */
+#else /* MACH_KDB || MACH_TTD */
+
+ pushl %esp /* pass parameter */
+ call EXT(kernel_trap) /* to kernel trap routine */
+ addl $4,%esp /* pop parameter */
+#endif /* MACH_KDB || MACH_TTD */
+
+ jmp _return_from_kernel
+
+
+/*
+ * Called as a function, makes the current thread
+ * return from the kernel as if from an exception.
+ */
+
+ENTRY(thread_exception_return)
+ENTRY(thread_bootstrap_return)
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
+ jmp _return_from_trap
+
+/*
+ * Called as a function, makes the current thread
+ * return from the kernel as if from a syscall.
+ * Takes the syscall's return code as an argument.
+ */
+
+ENTRY(thread_syscall_return)
+ movl S_ARG0,%eax /* get return value */
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
+ movl %eax,R_EAX(%esp) /* save return value */
+ jmp _return_from_trap
+
+ENTRY(call_continuation)
+ movl S_ARG0,%eax /* get continuation */
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ addl $(-3-IKS_SIZE),%ecx
+ movl %ecx,%esp /* pop the stack */
+ xorl %ebp,%ebp /* zero frame pointer */
+ pushl $0 /* Dummy return address */
+ jmp *%eax /* goto continuation */
+
+
+/* IOAPIC has 24 interrupts, put spurious in the same array */
+
+#define INTERRUPT(n) \
+ .data 2 ;\
+ .long 0f ;\
+ .text ;\
+ P2ALIGN(TEXT_ALIGN) ;\
+0: ;\
+ pushl %eax ;\
+ movl $(n),%eax ;\
+ jmp EXT(all_intrs)
+
+ .data 2
+DATA(int_entry_table)
+ .text
+/* Legacy APIC interrupts or PIC interrupts */
+INTERRUPT(0)
+INTERRUPT(1)
+INTERRUPT(2)
+INTERRUPT(3)
+INTERRUPT(4)
+INTERRUPT(5)
+INTERRUPT(6)
+INTERRUPT(7)
+INTERRUPT(8)
+INTERRUPT(9)
+INTERRUPT(10)
+INTERRUPT(11)
+INTERRUPT(12)
+INTERRUPT(13)
+INTERRUPT(14)
+INTERRUPT(15)
+#ifdef APIC
+/* APIC PCI interrupts PIRQ A-H */
+INTERRUPT(16)
+INTERRUPT(17)
+INTERRUPT(18)
+INTERRUPT(19)
+INTERRUPT(20)
+INTERRUPT(21)
+INTERRUPT(22)
+INTERRUPT(23)
+/* Possibly 8 more GSIs */
+INTERRUPT(24)
+INTERRUPT(25)
+INTERRUPT(26)
+INTERRUPT(27)
+INTERRUPT(28)
+INTERRUPT(29)
+INTERRUPT(30)
+INTERRUPT(31)
+/* ... APIC IOAPIC #2 */
+INTERRUPT(32)
+INTERRUPT(33)
+INTERRUPT(34)
+INTERRUPT(35)
+INTERRUPT(36)
+INTERRUPT(37)
+INTERRUPT(38)
+INTERRUPT(39)
+INTERRUPT(40)
+INTERRUPT(41)
+INTERRUPT(42)
+INTERRUPT(43)
+INTERRUPT(44)
+INTERRUPT(45)
+INTERRUPT(46)
+INTERRUPT(47)
+INTERRUPT(48)
+INTERRUPT(49)
+INTERRUPT(50)
+INTERRUPT(51)
+INTERRUPT(52)
+INTERRUPT(53)
+INTERRUPT(54)
+INTERRUPT(55)
+/* Possibly 8 more GSIs */
+INTERRUPT(56)
+INTERRUPT(57)
+INTERRUPT(58)
+INTERRUPT(59)
+INTERRUPT(60)
+INTERRUPT(61)
+INTERRUPT(62)
+INTERRUPT(63)
+#endif
+#if NCPUS > 1
+INTERRUPT(CALL_AST_CHECK)
+INTERRUPT(CALL_PMAP_UPDATE)
+#endif
+#ifdef APIC
+/* Spurious interrupt, set irq number to vect number */
+INTERRUPT(255)
+#endif
+
+/* XXX handle NMI - at least print a warning like Linux does. */
+
+/*
+ * All interrupts enter here.
+ * old %eax on stack; interrupt number in %eax.
+ */
+ENTRY(all_intrs)
+ PUSH_REGS_ISR /* save registers */
+ cld /* clear direction flag */
+
+ CPU_NUMBER_NO_GS(%ecx)
+ movl %esp,%edx /* on an interrupt stack? */
+ and $(~(INTSTACK_SIZE-1)),%edx
+ cmpl %ss:CX(EXT(int_stack_base),%ecx),%edx
+ je int_from_intstack /* if not: */
+
+ PUSH_SEGMENTS_ISR /* save segment registers */
+ SET_KERNEL_SEGMENTS(%dx) /* switch to kernel segments */
+
+ CPU_NUMBER(%edx)
+
+ movl CX(EXT(int_stack_top),%edx),%ecx
+
+ xchgl %ecx,%esp /* switch to interrupt stack */
+
+#if STAT_TIME
+ pushl %ecx /* save pointer to old stack */
+#else
+ pushl %ebx /* save %ebx - out of the way */
+ /* so stack looks the same */
+ pushl %ecx /* save pointer to old stack */
+ TIME_INT_ENTRY /* do timing */
+#endif
+
+#ifdef MACH_LDEBUG
+ incl CX(EXT(in_interrupt),%edx)
+#endif
+
+ call EXT(interrupt) /* call generic interrupt routine */
+ .globl EXT(return_to_iret) /* ( label for kdb_kintr and hardclock */
+LEXT(return_to_iret) /* to find the return from calling interrupt) */
+
+ CPU_NUMBER(%edx)
+#ifdef MACH_LDEBUG
+ decl CX(EXT(in_interrupt),%edx)
+#endif
+
+#if STAT_TIME
+#else
+ TIME_INT_EXIT /* do timing */
+ movl 4(%esp),%ebx /* restore the extra reg we saved */
+#endif
+
+ popl %esp /* switch back to old stack */
+
+ testl $(EFL_VM),I_EFL(%esp) /* if in V86 */
+ jnz 0f /* or */
+ /* Note: handling KERNEL_RING value by hand */
+ testb $2,I_CS(%esp) /* user mode, */
+ jz 1f /* check for ASTs */
+0:
+ cmpl $0,CX(EXT(need_ast),%edx)
+ jnz ast_from_interrupt /* take it if so */
+1:
+ POP_SEGMENTS_ISR /* restore segment regs */
+ POP_AREGS_ISR /* restore registers */
+
+ iret /* return to caller */
+
+int_from_intstack:
+ CPU_NUMBER_NO_GS(%edx)
+ cmpl CX(EXT(int_stack_base),%edx),%esp /* seemingly looping? */
+ jb stack_overflowed /* if not: */
+ call EXT(interrupt) /* call interrupt routine */
+_return_to_iret_i: /* ( label for kdb_kintr) */
+ /* must have been on kernel segs */
+ POP_AREGS_ISR /* restore registers */
+ /* no ASTs */
+
+ iret
+
+stack_overflowed:
+ ud2
+
+/*
+ * Take an AST from an interrupt.
+ * On PCB stack.
+ * sp-> gs -> edx
+ * fs -> ecx
+ * es -> eax
+ * ds -> trapno
+ * edx -> code
+ * ecx
+ * eax
+ * eip
+ * cs
+ * efl
+ * esp
+ * ss
+ */
+ast_from_interrupt:
+ POP_SEGMENTS_ISR /* restore all registers ... */
+ POP_AREGS_ISR
+ pushl $0 /* zero code */
+ pushl $0 /* zero trap number */
+ pusha /* save general registers */
+ PUSH_SEGMENTS_ISR /* save segment registers */
+ SET_KERNEL_SEGMENTS(%dx) /* switch to kernel segments */
+ CPU_NUMBER(%edx)
+ TIME_TRAP_UENTRY
+
+ movl CX(EXT(kernel_stack),%edx),%esp
+ /* switch to kernel stack */
+ call EXT(i386_astintr) /* take the AST */
+ popl %esp /* back to PCB stack */
+ jmp _return_from_trap /* return */
+
+#if MACH_KDB
+/*
+ * kdb_kintr: enter kdb from keyboard interrupt.
+ * Chase down the stack frames until we find one whose return
+ * address is the interrupt handler. At that point, we have:
+ *
+ * frame-> saved %ebp
+ * return address in interrupt handler
+ * #ifndef MACH_XEN
+ * 1st parameter iunit
+ * 2nd parameter saved SPL
+ * 3th parameter return address
+ * 4th parameter registers
+ * saved SPL
+ * saved IRQ
+ * #endif
+ * return address == return_to_iret_i
+ * saved %edx
+ * saved %ecx
+ * saved %eax
+ * saved %eip
+ * saved %cs
+ * saved %efl
+ *
+ * OR:
+ * frame-> saved %ebp
+ * return address in interrupt handler
+ * #ifndef MACH_XEN
+ * iunit
+ * saved SPL
+ * irq
+ * #endif
+ * return address == return_to_iret
+ * pointer to save area on old stack
+ * [ saved %ebx, if accurate timing ]
+ *
+ * old stack: saved %gs
+ * saved %fs
+ * saved %es
+ * saved %ds
+ * saved %edx
+ * saved %ecx
+ * saved %eax
+ * saved %eip
+ * saved %cs
+ * saved %efl
+ *
+ * Call kdb, passing it that register save area.
+ */
+
+#ifdef MACH_XEN
+#define RET_OFFSET 8
+#else /* MACH_XEN */
+#define RET_OFFSET 32
+#endif /* MACH_XEN */
+
+ENTRY(kdb_kintr)
+ movl %ebp,%eax /* save caller`s frame pointer */
+ movl $EXT(return_to_iret),%ecx /* interrupt return address 1 */
+ movl $_return_to_iret_i,%edx /* interrupt return address 2 */
+
+0: cmpl RET_OFFSET(%eax),%ecx /* does this frame return to */
+ /* interrupt handler (1)? */
+ je 1f
+ cmpl RET_OFFSET(%eax),%edx /* interrupt handler (2)? */
+ je 2f /* if not: */
+ movl (%eax),%eax /* try next frame */
+ testl %eax,%eax
+ jnz 0b
+ ud2 /* oops, didn't find frame, fix me :/ */
+
+1: movl $kdb_from_iret,RET_OFFSET(%eax)
+ ret /* returns to kernel/user stack */
+
+2: movl $kdb_from_iret_i,RET_OFFSET(%eax)
+ /* returns to interrupt stack */
+ ret
+
+/*
+ * On return from keyboard interrupt, we will execute
+ * kdb_from_iret_i
+ * if returning to an interrupt on the interrupt stack
+ * kdb_from_iret
+ * if returning to an interrupt on the user or kernel stack
+ */
+kdb_from_iret:
+ /* save regs in known locations */
+#if STAT_TIME
+ pushl %ebx /* caller`s %ebx is in reg */
+#else
+ movl 4(%esp),%eax /* get caller`s %ebx */
+ pushl %eax /* push on stack */
+#endif
+ pushl %ebp
+ pushl %esi
+ pushl %edi
+ pushl %esp /* pass regs */
+ call EXT(kdb_kentry) /* to kdb */
+ addl $4,%esp /* pop parameters */
+ popl %edi /* restore registers */
+ popl %esi
+ popl %ebp
+#if STAT_TIME
+ popl %ebx
+#else
+ popl %eax
+ movl %eax,4(%esp)
+#endif
+ jmp EXT(return_to_iret) /* normal interrupt return */
+
+kdb_from_iret_i: /* on interrupt stack */
+ pop %edx /* restore saved registers */
+ pop %ecx
+ pop %eax
+ pushl $0 /* zero error code */
+ pushl $0 /* zero trap number */
+ pusha /* save general registers */
+ PUSH_SEGMENTS /* save segment registers */
+ pushl %esp /* pass regs, */
+ pushl $0 /* code, */
+ pushl $-1 /* type to kdb */
+ call EXT(kdb_trap)
+ addl $12,%esp /* remove parameters */
+ POP_SEGMENTS /* restore segment registers */
+ popa /* restore general registers */
+ addl $8,%esp
+ iret
+
+#endif /* MACH_KDB */
+
+#if MACH_TTD
+/*
+ * Same code as that above for the keyboard entry into kdb.
+ */
+ENTRY(kttd_intr)
+ movl %ebp,%eax /* save caller`s frame pointer */
+ movl $EXT(return_to_iret),%ecx /* interrupt return address 1 */
+ movl $_return_to_iret_i,%edx /* interrupt return address 2 */
+
+0: cmpl 16(%eax),%ecx /* does this frame return to */
+ /* interrupt handler (1)? */
+ je 1f
+ cmpl 16(%eax),%edx /* interrupt handler (2)? */
+ je 2f /* if not: */
+ movl (%eax),%eax /* try next frame */
+ jmp 0b
+
+1: movl $ttd_from_iret,16(%eax) /* returns to kernel/user stack */
+ ret
+
+2: movl $ttd_from_iret_i,16(%eax)
+ /* returns to interrupt stack */
+ ret
+
+/*
+ * On return from keyboard interrupt, we will execute
+ * ttd_from_iret_i
+ * if returning to an interrupt on the interrupt stack
+ * ttd_from_iret
+ * if returning to an interrupt on the user or kernel stack
+ */
+ttd_from_iret:
+ /* save regs in known locations */
+#if STAT_TIME
+ pushl %ebx /* caller`s %ebx is in reg */
+#else
+ movl 4(%esp),%eax /* get caller`s %ebx */
+ pushl %eax /* push on stack */
+#endif
+ pushl %ebp
+ pushl %esi
+ pushl %edi
+ pushl %esp /* pass regs */
+ call _kttd_netentry /* to kdb */
+ addl $4,%esp /* pop parameters */
+ popl %edi /* restore registers */
+ popl %esi
+ popl %ebp
+#if STAT_TIME
+ popl %ebx
+#else
+ popl %eax
+ movl %eax,4(%esp)
+#endif
+ jmp EXT(return_to_iret) /* normal interrupt return */
+
+ttd_from_iret_i: /* on interrupt stack */
+ pop %edx /* restore saved registers */
+ pop %ecx
+ pop %eax
+ pushl $0 /* zero error code */
+ pushl $0 /* zero trap number */
+ pusha /* save general registers */
+ PUSH_SEGMENTS_ISR /* save segment registers */
+ pushl %esp /* pass regs, */
+ pushl $0 /* code, */
+ pushl $-1 /* type to kdb */
+ call _kttd_trap
+ addl $12,%esp /* remove parameters */
+ POP_SEGMENTS_ISR /* restore segment registers */
+ popa /* restore general registers */
+ addl $8,%esp
+ iret
+
+#endif /* MACH_TTD */
+
+/*
+ * System call enters through a call gate. Flags are not saved -
+ * we must shuffle stack to look like trap save area.
+ *
+ * esp-> old eip
+ * old cs
+ * old esp
+ * old ss
+ *
+ * eax contains system call number.
+ */
+ENTRY(syscall)
+syscall_entry:
+ pushf /* save flags as soon as possible */
+syscall_entry_2:
+ cld /* clear direction flag */
+
+ pushl %eax /* save system call number */
+ pushl $0 /* clear trap number slot */
+
+ pusha /* save the general registers */
+ PUSH_SEGMENTS /* and the segment registers */
+ SET_KERNEL_SEGMENTS(%dx) /* switch to kernel data segment */
+
+/*
+ * Shuffle eflags,eip,cs into proper places
+ */
+
+ movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
+ movl R_CS(%esp),%ecx /* eip is in CS slot */
+ movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
+ movl %ecx,R_EIP(%esp) /* fix eip */
+ movl %edx,R_CS(%esp) /* fix cs */
+ movl %ebx,R_EFLAGS(%esp) /* fix eflags */
+
+ CPU_NUMBER(%edx)
+ TIME_TRAP_SENTRY
+
+ movl CX(EXT(kernel_stack),%edx),%ebx
+ /* get current kernel stack */
+ xchgl %ebx,%esp /* switch stacks - %ebx points to */
+ /* user registers. */
+ /* user regs pointer already set */
+
+/*
+ * Check for MACH or emulated system call
+ */
+syscall_entry_3:
+ movl MY(ACTIVE_THREAD),%edx
+ /* point to current thread */
+ movl TH_TASK(%edx),%edx /* point to task */
+ movl TASK_EMUL(%edx),%edx /* get emulation vector */
+ orl %edx,%edx /* if none, */
+ je syscall_native /* do native system call */
+ movl %eax,%ecx /* copy system call number */
+ subl DISP_MIN(%edx),%ecx /* get displacement into syscall */
+ /* vector table */
+ jl syscall_native /* too low - native system call */
+ cmpl DISP_COUNT(%edx),%ecx /* check range */
+ jnl syscall_native /* too high - native system call */
+ movl DISP_VECTOR(%edx,%ecx,4),%edx
+ /* get the emulation vector */
+ orl %edx,%edx /* emulated system call if not zero */
+ jnz syscall_emul
+
+/*
+ * Native system call.
+ */
+syscall_native:
+ negl %eax /* get system call number */
+ jl mach_call_range /* out of range if it was positive */
+ cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
+ jg mach_call_range /* error if out of range */
+#if 0 /* debug hack to show the syscall number on the screen */
+ movb %al,%dl
+ shrb $4,%dl
+ orb $0x30,%dl
+ movb $0x0f,%dh
+ movw %dx,0xb800a
+ movb %al,%dl
+ andb $0xf,%dl
+ orb $0x30,%dl
+ movb $0xf,%dh
+ movw %dx,0xb800c
+#endif
+ shll $4,%eax /* manual indexing of mach_trap_t */
+ movl EXT(mach_trap_table)(%eax),%ecx
+ /* get number of arguments */
+ jecxz mach_call_call /* skip argument copy if none */
+
+ movl R_UESP(%ebx),%esi /* get user stack pointer */
+ lea 4(%esi,%ecx,4),%esi /* skip user return address, */
+ /* and point past last argument */
+ movl $USER_DS,%edx /* use user data segment for accesses */
+ mov %dx,%fs
+ movl %esp,%edx /* save kernel ESP for error recovery */
+
+0: subl $4,%esi
+ RECOVER(mach_call_addr_push)
+ pushl %fs:(%esi) /* push argument on stack */
+ loop 0b /* loop for all arguments */
+
+mach_call_call:
+
+#ifdef DEBUG
+ testb $0xff,EXT(syscall_trace)
+ jz 0f
+ pushl %eax
+ call EXT(syscall_trace_print)
+ /* will return with syscallofs still (or again) in eax */
+ addl $4,%esp
+0:
+#endif /* DEBUG */
+
+ call *EXT(mach_trap_table)+4(%eax)
+ /* call procedure */
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
+ movl %eax,R_EAX(%esp) /* save return value */
+ jmp _return_from_trap /* return to user */
+
+/*
+ * Address out of range. Change to page fault.
+ * %esi holds failing address.
+ */
+mach_call_addr_push:
+ movl %edx,%esp /* clean parameters from stack */
+mach_call_addr:
+ movl %esi,R_CR2(%ebx) /* set fault address */
+ movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
+ /* set page-fault trap */
+ movl $(T_PF_USER),R_ERR(%ebx)
+ /* set error code - read user space */
+ jmp _take_trap /* treat as a trap */
+
+/*
+ * System call out of range. Treat as invalid-instruction trap.
+ * (? general protection?)
+ */
+mach_call_range:
+ movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
+ /* set invalid-operation trap */
+ movl $0,R_ERR(%ebx) /* clear error code */
+ jmp _take_trap /* treat as a trap */
+
+/*
+ * User space emulation of system calls.
+ * edx - user address to handle syscall
+ *
+ * User stack will become:
+ * uesp-> eflags
+ * eip
+ * eax still contains syscall number.
+ */
+syscall_emul:
+ movl $USER_DS,%edi /* use user data segment for accesses */
+ mov %di,%fs
+
+/* XXX what about write-protected pages? */
+ movl R_UESP(%ebx),%edi /* get user stack pointer */
+ subl $8,%edi /* push space for new arguments */
+ movl R_EFLAGS(%ebx),%eax /* move flags */
+ RECOVER(syscall_addr)
+ movl %eax,%fs:0(%edi) /* to user stack */
+ movl R_EIP(%ebx),%eax /* move eip */
+ RECOVER(syscall_addr)
+ movl %eax,%fs:4(%edi) /* to user stack */
+ movl %edi,R_UESP(%ebx) /* set new user stack pointer */
+ movl %edx,R_EIP(%ebx) /* change return address to trap */
+ movl %ebx,%esp /* back to PCB stack */
+ jmp _return_from_trap /* return to user */
+
+/*
+ * Address error - address is in %edi.
+ */
+syscall_addr:
+ movl %edi,R_CR2(%ebx) /* set fault address */
+ movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
+ /* set page-fault trap */
+ movl $(T_PF_USER),R_ERR(%ebx)
+ /* set error code - read user space */
+ jmp _take_trap /* treat as a trap */
+
+
+ .data
+DATA(cpu_features)
+DATA(cpu_features_edx)
+ .long 0
+DATA(cpu_features_ecx)
+ .long 0
+ .text
+
+END(syscall)
+
+/* Discover what kind of cpu we have; return the family number
+ (3, 4, 5, 6, for 386, 486, 586, 686 respectively). */
+ENTRY(discover_x86_cpu_type)
+ pushl %ebp /* Save frame pointer */
+ movl %esp,%ebp /* Save stack pointer */
+ and $~0x3,%esp /* Align stack pointer */
+
+#if 0
+/* Seems to hang with kvm linux 4.3.0 */
+#ifdef MACH_HYP
+#warning Assuming not Cyrix CPU
+#else /* MACH_HYP */
+ inb $0xe8,%al /* Enable ID flag for Cyrix CPU ... */
+ andb $0x80,%al /* ... in CCR4 reg bit7 */
+ outb %al,$0xe8
+#endif /* MACH_HYP */
+#endif
+
+ pushfl /* Fetch flags ... */
+ popl %eax /* ... into eax */
+ movl %eax,%ecx /* Save original flags for return */
+ xorl $(EFL_AC+EFL_ID),%eax /* Attempt to toggle ID and AC bits */
+ pushl %eax /* Save flags... */
+ popfl /* ... In EFLAGS */
+ pushfl /* Fetch flags back ... */
+ popl %eax /* ... into eax */
+ pushl %ecx /* From ecx... */
+ popfl /* ... restore original flags */
+
+ xorl %ecx,%eax /* See if any bits didn't change */
+ testl $EFL_AC,%eax /* Test AC bit */
+ jnz 0f /* Skip next bit if AC toggled */
+ movl $3,%eax /* Return value is 386 */
+ jmp 9f /* And RETURN */
+
+0: testl $EFL_ID,%eax /* Test ID bit */
+ jnz 0f /* Skip next bit if ID toggled */
+ movl $4,%eax /* Return value is 486 */
+ jmp 9f /* And RETURN */
+
+ /* We are a modern enough processor to have the CPUID instruction;
+ use it to find out what we are. */
+0: movl $1,%eax /* Fetch CPU type info ... */
+ cpuid /* ... into eax */
+ movl %ecx,cpu_features_ecx /* Keep a copy */
+ movl %edx,cpu_features_edx /* Keep a copy */
+ shrl $8,%eax /* Slide family bits down */
+ andl $15,%eax /* And select them */
+
+9: movl %ebp,%esp /* Restore stack pointer */
+ popl %ebp /* Restore frame pointer */
+ ret /* And return */
+
+
+/* */
+/*
+ * Utility routines.
+ */
+
+/*
+ * Copy from user address space - generic version.
+ * arg0: user address
+ * arg1: kernel address
+ * arg2: byte count
+ */
+ENTRY(copyin)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get user start address */
+ movl 8+S_ARG1,%edi /* get kernel destination address */
+ movl 8+S_ARG2,%edx /* get count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%ds
+
+ /*cld*/ /* count up: default mode in all GCC code */
+ movl %edx,%ecx /* move by longwords first */
+ shrl $2,%ecx
+ RECOVER(copyin_fail)
+ rep
+ movsl /* move longwords */
+ movl %edx,%ecx /* now move remaining bytes */
+ andl $3,%ecx
+ RECOVER(copyin_fail)
+ rep
+ movsb
+ xorl %eax,%eax /* return 0 for success */
+
+copyin_ret:
+ mov %ss,%di /* restore DS to kernel segment */
+ mov %di,%ds
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyin_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyin_ret /* pop frame and return */
+
+/*
+ * Copy from user address space - version for copying messages.
+ * arg0: user address
+ * arg1: kernel address
+ * arg2: byte count - must be a multiple of four
+ * arg3: kernel byte count
+ */
+ENTRY(copyinmsg)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get user start address */
+ movl 8+S_ARG1,%edi /* get kernel destination address */
+ movl 8+S_ARG2,%ecx /* get count */
+ movl %ecx,%edx /* save count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%ds
+
+ /*cld*/ /* count up: default mode in all GCC code */
+ shrl $2,%ecx
+ RECOVER(copyinmsg_fail)
+ rep
+ movsl /* move longwords */
+ xorl %eax,%eax /* return 0 for success */
+
+ movl 8+S_ARG1,%edi
+ movl %edx,%es:MSGH_MSGH_SIZE(%edi) /* set msgh_size */
+
+copyinmsg_ret:
+ mov %ss,%di /* restore DS to kernel segment */
+ mov %di,%ds
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyinmsg_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyinmsg_ret /* pop frame and return */
+
+/*
+ * Copy to user address space - generic version.
+ * arg0: kernel address
+ * arg1: user address
+ * arg2: byte count
+ */
+ENTRY(copyout)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get kernel start address */
+ movl 8+S_ARG1,%edi /* get user start address */
+ movl 8+S_ARG2,%edx /* get count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%es
+
+#if !defined(MACH_HYP) && !PAE
+ cmpl $3,machine_slot+SUB_TYPE_CPU_TYPE
+ jbe copyout_retry /* Use slow version on i386 */
+#endif /* !defined(MACH_HYP) && !PAE */
+
+ /*cld*/ /* count up: always this way in GCC code */
+ movl %edx,%ecx /* move by longwords first */
+ shrl $2,%ecx
+ RECOVER(copyout_fail)
+ rep
+ movsl
+ movl %edx,%ecx /* now move remaining bytes */
+ andl $3,%ecx
+ RECOVER(copyout_fail)
+ rep
+ movsb /* move */
+ xorl %eax,%eax /* return 0 for success */
+
+copyout_ret:
+ mov %ss,%di /* restore ES to kernel segment */
+ mov %di,%es
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyout_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyout_ret /* pop frame and return */
+
+/*
+ * Copy to user address space - version for copying messages.
+ * arg0: kernel address
+ * arg1: user address
+ * arg2: byte count - must be a multiple of four
+ */
+ENTRY(copyoutmsg)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get kernel start address */
+ movl 8+S_ARG1,%edi /* get user start address */
+ movl 8+S_ARG2,%ecx /* get count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%es
+
+#if !defined(MACH_HYP) && !PAE
+ movl 8+S_ARG2,%edx /* copyout_retry expects count here */
+ cmpl $3,machine_slot+SUB_TYPE_CPU_TYPE
+ jbe copyout_retry /* Use slow version on i386 */
+#endif /* !defined(MACH_HYP) && !PAE */
+
+ shrl $2,%ecx /* move by longwords */
+ RECOVER(copyoutmsg_fail)
+ rep
+ movsl
+ xorl %eax,%eax /* return 0 for success */
+
+copyoutmsg_ret:
+ mov %ss,%di /* restore ES to kernel segment */
+ mov %di,%es
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyoutmsg_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyoutmsg_ret /* pop frame and return */
+
+#if !defined(MACH_HYP) && !PAE
+/*
+ * Check whether user address space is writable
+ * before writing to it - i386 hardware is broken.
+ */
+copyout_retry:
+ movl %cr3,%ecx /* point to page directory */
+ movl %edi,%eax /* get page directory bits */
+ shrl $(PDESHIFT),%eax /* from user address */
+ movl KERNELBASE(%ecx,%eax,PTE_SIZE),%ecx
+ /* get page directory pointer */
+ testl $(PTE_V),%ecx /* present? */
+ jz 0f /* if not, fault is OK */
+ andl $(PTE_PFN),%ecx /* isolate page frame address */
+ movl %edi,%eax /* get page table bits */
+ shrl $(PTESHIFT),%eax
+ andl $(PTEMASK),%eax /* from user address */
+ leal KERNELBASE(%ecx,%eax,PTE_SIZE),%ecx
+ /* point to page table entry */
+ movl (%ecx),%eax /* get it */
+ testl $(PTE_V),%eax /* present? */
+ jz 0f /* if not, fault is OK */
+ testl $(PTE_W),%eax /* writable? */
+ jnz 0f /* OK if so */
+/*
+ * Not writable - must fake a fault. Turn off access to the page.
+ */
+ andl $(PTE_INVALID),(%ecx) /* turn off valid bit */
+ movl %cr3,%eax /* invalidate TLB */
+ movl %eax,%cr3
+0:
+
+/*
+ * Copy only what fits on the current destination page.
+ * Check for write-fault again on the next page.
+ */
+ leal NBPG(%edi),%eax /* point to */
+ andl $(-NBPG),%eax /* start of next page */
+ subl %edi,%eax /* get number of bytes to that point */
+ cmpl %edx,%eax /* bigger than count? */
+ jle 1f /* if so, */
+ movl %edx,%eax /* use count */
+1:
+
+ /*cld*/ /* count up: always this way in GCC code */
+ movl %eax,%ecx /* move by longwords first */
+ shrl $2,%ecx
+ RECOVER(copyout_fail)
+ RETRY(copyout_retry)
+ rep
+ movsl
+ movl %eax,%ecx /* now move remaining bytes */
+ andl $3,%ecx
+ RECOVER(copyout_fail)
+ RETRY(copyout_retry)
+ rep
+ movsb /* move */
+ subl %eax,%edx /* decrement count */
+ jg copyout_retry /* restart on next page if not done */
+ xorl %eax,%eax /* return 0 for success */
+ jmp copyout_ret
+#endif /* !defined(MACH_HYP) && !PAE */
+
+/*
+ * int inst_fetch(int eip, int cs);
+ *
+ * Fetch instruction byte. Return -1 if invalid address.
+ */
+ENTRY(inst_fetch)
+ movl S_ARG1, %eax /* get segment */
+ movw %ax,%fs /* into FS */
+ movl S_ARG0, %eax /* get offset */
+ RETRY(EXT(inst_fetch)) /* re-load FS on retry */
+ RECOVER(_inst_fetch_fault)
+ movzbl %fs:(%eax),%eax /* load instruction byte */
+ ret
+
+_inst_fetch_fault:
+ movl $-1,%eax /* return -1 if error */
+ ret
+
+
+/*
+ * Done with recovery and retry tables.
+ */
+ RECOVER_TABLE_END
+ RETRY_TABLE_END
+
+
+
+/*
+ * cpu_shutdown()
+ * Force reboot
+ */
+null_idt:
+ .space 8 * 32
+
+null_idtr:
+ .word 8 * 32 - 1
+ .long null_idt
+
+Entry(cpu_shutdown)
+ lidt null_idtr /* disable the interrupt handler */
+ xor %ecx,%ecx /* generate a divide by zero */
+ div %ecx,%eax /* reboot now */
+ ret /* this will "never" be executed */
diff --git a/i386/i386/locore.h b/i386/i386/locore.h
new file mode 100644
index 0000000..374c8cf
--- /dev/null
+++ b/i386/i386/locore.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2006, 2011 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _MACHINE_LOCORE_H_
+#define _MACHINE_LOCORE_H_
+
+#include <sys/types.h>
+
+#include <kern/sched_prim.h>
+
+/*
+ * Fault recovery in copyin/copyout routines.
+ */
+struct recovery {
+ vm_offset_t fault_addr;
+ vm_offset_t recover_addr;
+};
+
+extern struct recovery recover_table[];
+extern struct recovery recover_table_end[];
+
+/*
+ * Recovery from Successful fault in copyout does not
+ * return directly - it retries the pte check, since
+ * the 386 ignores write protection in kernel mode.
+ */
+extern struct recovery retry_table[];
+extern struct recovery retry_table_end[];
+
+
+extern int call_continuation (continuation_t continuation);
+
+extern int discover_x86_cpu_type (void);
+
+extern int copyin (const void *userbuf, void *kernelbuf, size_t cn);
+extern int copyinmsg (const void *userbuf, void *kernelbuf, size_t cn, size_t kn);
+extern int copyout (const void *kernelbuf, void *userbuf, size_t cn);
+extern int copyoutmsg (const void *kernelbuf, void *userbuf, size_t cn);
+
+extern int inst_fetch (int eip, int cs);
+
+extern void cpu_shutdown (void);
+
+extern int syscall (void);
+extern int syscall64 (void);
+
+extern unsigned int cpu_features[2];
+
+#define CPU_FEATURE_FPU 0
+#define CPU_FEATURE_VME 1
+#define CPU_FEATURE_DE 2
+#define CPU_FEATURE_PSE 3
+#define CPU_FEATURE_TSC 4
+#define CPU_FEATURE_MSR 5
+#define CPU_FEATURE_PAE 6
+#define CPU_FEATURE_MCE 7
+#define CPU_FEATURE_CX8 8
+#define CPU_FEATURE_APIC 9
+#define CPU_FEATURE_SEP 11
+#define CPU_FEATURE_MTRR 12
+#define CPU_FEATURE_PGE 13
+#define CPU_FEATURE_MCA 14
+#define CPU_FEATURE_CMOV 15
+#define CPU_FEATURE_PAT 16
+#define CPU_FEATURE_PSE_36 17
+#define CPU_FEATURE_PSN 18
+#define CPU_FEATURE_CFLSH 19
+#define CPU_FEATURE_DS 21
+#define CPU_FEATURE_ACPI 22
+#define CPU_FEATURE_MMX 23
+#define CPU_FEATURE_FXSR 24
+#define CPU_FEATURE_SSE 25
+#define CPU_FEATURE_SSE2 26
+#define CPU_FEATURE_SS 27
+#define CPU_FEATURE_HTT 28
+#define CPU_FEATURE_TM 29
+#define CPU_FEATURE_PBE 31
+#define CPU_FEATURE_XSAVE (1*32 + 26)
+
+#define CPU_HAS_FEATURE(feature) (cpu_features[(feature) / 32] & (1 << ((feature) % 32)))
+
+#endif /* _MACHINE__LOCORE_H_ */
+
diff --git a/i386/i386/loose_ends.c b/i386/i386/loose_ends.c
new file mode 100644
index 0000000..7e7f943
--- /dev/null
+++ b/i386/i386/loose_ends.c
@@ -0,0 +1,49 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+
+#include <i386/i386/loose_ends.h>
+
+#ifndef NDEBUG
+#define MACH_ASSERT 1
+#else
+#define MACH_ASSERT 0
+#endif /* NDEBUG */
+
+ /*
+ * For now we will always go to single user mode, since there is
+ * no way pass this request through the boot.
+ */
+
+/* Someone with time should write code to set cpuspeed automagically */
+int cpuspeed = 4;
+#define DELAY(n) { volatile int N = cpuspeed * (n); while (--N > 0); }
+void
+delay(int n)
+{
+ DELAY(n);
+}
diff --git a/i386/i386/loose_ends.h b/i386/i386/loose_ends.h
new file mode 100644
index 0000000..c085527
--- /dev/null
+++ b/i386/i386/loose_ends.h
@@ -0,0 +1,33 @@
+/*
+ * Other useful functions?
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Other useful functions?
+ *
+ */
+
+#ifndef _LOOSE_ENDS_H_
+#define _LOOSE_ENDS_H_
+
+#include <mach/std_types.h>
+
+extern void delay (int n);
+
+#endif /* _LOOSE_ENDS_H_ */
diff --git a/i386/i386/mach_i386.srv b/i386/i386/mach_i386.srv
new file mode 100644
index 0000000..48d16ba
--- /dev/null
+++ b/i386/i386/mach_i386.srv
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach/machine/mach_i386.defs>
diff --git a/i386/i386/mach_param.h b/i386/i386/mach_param.h
new file mode 100644
index 0000000..d7d4dee
--- /dev/null
+++ b/i386/i386/mach_param.h
@@ -0,0 +1,31 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent parameters for i386.
+ */
+
+#define HZ (100)
+ /* clock tick each 10 ms. */
diff --git a/i386/i386/machine_routines.h b/i386/i386/machine_routines.h
new file mode 100644
index 0000000..d9dd94b
--- /dev/null
+++ b/i386/i386/machine_routines.h
@@ -0,0 +1,38 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_MACHINE_ROUTINES_H_
+#define _I386_MACHINE_ROUTINES_H_
+
+/*
+ * The i386 has a set of machine-dependent interfaces.
+ */
+#define MACHINE_SERVER mach_i386_server
+#define MACHINE_SERVER_HEADER "i386/i386/mach_i386.server.h"
+#define MACHINE_SERVER_ROUTINE mach_i386_server_routine
+
+#endif /* _I386_MACHINE_ROUTINES_H_ */
+
diff --git a/i386/i386/machine_task.c b/i386/i386/machine_task.c
new file mode 100644
index 0000000..8bebf36
--- /dev/null
+++ b/i386/i386/machine_task.c
@@ -0,0 +1,80 @@
+/* Machine specific data for a task on i386.
+
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+
+ Written by Marcus Brinkmann. Glued into GNU Mach by Thomas Schwinge.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <kern/lock.h>
+#include <mach/mach_types.h>
+#include <kern/slab.h>
+#include <kern/task.h>
+#include <machine/task.h>
+
+#include <machine/io_perm.h>
+
+
+/* The cache which holds our IO permission bitmaps. */
+struct kmem_cache machine_task_iopb_cache;
+
+
+/* Initialize the machine task module. The function is called once at
+ start up by task_init in kern/task.c. */
+void
+machine_task_module_init (void)
+{
+ kmem_cache_init (&machine_task_iopb_cache, "i386_task_iopb", IOPB_BYTES, 0,
+ NULL, 0);
+}
+
+
+/* Initialize the machine specific part of task TASK. */
+void
+machine_task_init (task_t task)
+{
+ task->machine.iopb_size = 0;
+ task->machine.iopb = 0;
+ simple_lock_init (&task->machine.iopb_lock);
+}
+
+
+/* Destroy the machine specific part of task TASK and release all
+ associated resources. */
+void
+machine_task_terminate (const task_t task)
+{
+ if (task->machine.iopb)
+ kmem_cache_free (&machine_task_iopb_cache,
+ (vm_offset_t) task->machine.iopb);
+}
+
+
+/* Try to release as much memory from the machine specific data in
+ task TASK. */
+void
+machine_task_collect (task_t task)
+{
+ simple_lock (&task->machine.iopb_lock);
+ if (task->machine.iopb_size == 0 && task->machine.iopb)
+ {
+ kmem_cache_free (&machine_task_iopb_cache,
+ (vm_offset_t) task->machine.iopb);
+ task->machine.iopb = 0;
+ }
+ simple_unlock (&task->machine.iopb_lock);
+}
diff --git a/i386/i386/machspl.h b/i386/i386/machspl.h
new file mode 100644
index 0000000..bbb2675
--- /dev/null
+++ b/i386/i386/machspl.h
@@ -0,0 +1,29 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/* XXX replaced by... */
+#include <i386/spl.h>
+
diff --git a/i386/i386/model_dep.h b/i386/i386/model_dep.h
new file mode 100644
index 0000000..5369e28
--- /dev/null
+++ b/i386/i386/model_dep.h
@@ -0,0 +1,68 @@
+/*
+ * Arch dependent functions
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Arch dependent functions.
+ *
+ */
+
+#ifndef _I386AT_MODEL_DEP_H_
+#define _I386AT_MODEL_DEP_H_
+
+#include <mach/std_types.h>
+
+/*
+ * Address to hold AP boot code, held in ASM
+ */
+extern phys_addr_t apboot_addr;
+
+/*
+ * Find devices. The system is alive.
+ */
+extern void machine_init (void);
+
+/* Conserve power on processor CPU. */
+extern void machine_idle (int cpu);
+
+extern void resettodr (void);
+
+extern void startrtclock (void);
+
+/*
+ * Halt a cpu.
+ */
+extern void halt_cpu (void) __attribute__ ((noreturn));
+
+/*
+ * Halt the system or reboot.
+ */
+extern void halt_all_cpus (boolean_t reboot) __attribute__ ((noreturn));
+
+/*
+ * Make cpu pause a bit.
+ */
+extern void machine_relax (void);
+
+/*
+ * C boot entrypoint - called by boot_entry in boothdr.S.
+ */
+extern void c_boot_entry(vm_offset_t bi);
+
+#endif /* _I386AT_MODEL_DEP_H_ */
diff --git a/i386/i386/mp_desc.c b/i386/i386/mp_desc.c
new file mode 100644
index 0000000..61a7607
--- /dev/null
+++ b/i386/i386/mp_desc.c
@@ -0,0 +1,357 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <kern/cpu_number.h>
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <kern/smp.h>
+#include <kern/startup.h>
+#include <kern/kmutex.h>
+#include <mach/machine.h>
+#include <mach/xen.h>
+#include <vm/vm_kern.h>
+
+#include <i386/mp_desc.h>
+#include <i386/lock.h>
+#include <i386/apic.h>
+#include <i386/locore.h>
+#include <i386/fpu.h>
+#include <i386/gdt.h>
+#include <i386at/idt.h>
+#include <i386at/int_init.h>
+#include <i386/cpu.h>
+#include <i386/smp.h>
+
+#include <i386at/model_dep.h>
+#include <machine/ktss.h>
+#include <machine/smp.h>
+#include <machine/tss.h>
+#include <machine/io_perm.h>
+#include <machine/vm_param.h>
+
+#include <i386at/acpi_parse_apic.h>
+#include <string.h>
+
+/*
+ * The i386 needs an interrupt stack to keep the PCB stack from being
+ * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
+ * than any thread`s kernel stack.
+ */
+
+/*
+ * Addresses of bottom and top of interrupt stacks.
+ */
+vm_offset_t int_stack_top[NCPUS];
+vm_offset_t int_stack_base[NCPUS];
+
+/*
+ * Whether we are currently handling an interrupt.
+ * To catch code erroneously taking non-irq-safe locks.
+ */
+#ifdef MACH_LDEBUG
+unsigned long in_interrupt[NCPUS];
+#endif
+
+/* Interrupt stack allocation */
+uint8_t solid_intstack[NCPUS*INTSTACK_SIZE] __aligned(NCPUS*INTSTACK_SIZE);
+
+void
+interrupt_stack_alloc(void)
+{
+ int i;
+
+ /*
+ * Set up pointers to the top of the interrupt stack.
+ */
+
+ for (i = 0; i < NCPUS; i++) {
+ int_stack_base[i] = (vm_offset_t) &solid_intstack[i * INTSTACK_SIZE];
+ int_stack_top[i] = (vm_offset_t) &solid_intstack[(i + 1) * INTSTACK_SIZE] - 4;
+ }
+}
+
+#if NCPUS > 1
+/*
+ * Flag to mark SMP init by BSP complete
+ */
+int bspdone;
+
+phys_addr_t apboot_addr;
+extern void *apboot, *apbootend;
+extern volatile ApicLocalUnit* lapic;
+
+/*
+ * Multiprocessor i386/i486 systems use a separate copy of the
+ * GDT, IDT, LDT, and kernel TSS per processor. The first three
+ * are separate to avoid lock contention: the i386 uses locked
+ * memory cycles to access the descriptor tables. The TSS is
+ * separate since each processor needs its own kernel stack,
+ * and since using a TSS marks it busy.
+ */
+
+/*
+ * Descriptor tables.
+ */
+struct mp_desc_table *mp_desc_table[NCPUS] = { 0 };
+
+/*
+ * Pointer to TSS for access in load_context.
+ */
+struct task_tss *mp_ktss[NCPUS] = { 0 };
+
+/*
+ * Pointer to GDT to reset the KTSS busy bit.
+ */
+struct real_descriptor *mp_gdt[NCPUS] = { 0 };
+
+/*
+ * Boot-time tables, for initialization and master processor.
+ */
+extern struct real_gate idt[IDTSZ];
+extern struct real_descriptor gdt[GDTSZ];
+extern struct real_descriptor ldt[LDTSZ];
+
+/*
+ * Allocate and initialize the per-processor descriptor tables.
+ */
+
+int
+mp_desc_init(int mycpu)
+{
+ struct mp_desc_table *mpt;
+ vm_offset_t mem;
+
+ if (mycpu == 0) {
+ /*
+ * Master CPU uses the tables built at boot time.
+ * Just set the TSS and GDT pointers.
+ */
+ mp_ktss[mycpu] = (struct task_tss *) &ktss;
+ mp_gdt[mycpu] = gdt;
+ return 0;
+ }
+ else {
+ /*
+ * Allocate tables for other CPUs
+ */
+ if (!init_alloc_aligned(sizeof(struct mp_desc_table), &mem))
+ panic("not enough memory for descriptor tables");
+ mpt = (struct mp_desc_table *)phystokv(mem);
+
+ mp_desc_table[mycpu] = mpt;
+ mp_ktss[mycpu] = &mpt->ktss;
+ mp_gdt[mycpu] = mpt->gdt;
+
+ /*
+ * Zero the tables
+ */
+ memset(mpt->idt, 0, sizeof(idt));
+ memset(mpt->gdt, 0, sizeof(gdt));
+ memset(mpt->ldt, 0, sizeof(ldt));
+ memset(&mpt->ktss, 0, sizeof(struct task_tss));
+
+ return mycpu;
+ }
+}
+
+/* XXX should be adjusted per CPU speed */
+int simple_lock_pause_loop = 100;
+
+unsigned int simple_lock_pause_count = 0; /* debugging */
+
+void
+simple_lock_pause(void)
+{
+ static volatile int dummy;
+ int i;
+
+ simple_lock_pause_count++;
+
+ /*
+ * Used in loops that are trying to acquire locks out-of-order.
+ */
+
+ for (i = 0; i < simple_lock_pause_loop; i++)
+ dummy++; /* keep the compiler from optimizing the loop away */
+}
+
+kern_return_t
+cpu_control(int cpu, const int *info, unsigned int count)
+{
+ printf("cpu_control(%d, %p, %d) not implemented\n",
+ cpu, info, count);
+ return KERN_FAILURE;
+}
+
+void
+interrupt_processor(int cpu)
+{
+ smp_pmap_update(apic_get_cpu_apic_id(cpu));
+}
+
+static void
+paging_enable(void)
+{
+#ifndef MACH_HYP
+ /* Turn paging on.
+ * TODO: Why does setting the WP bit here cause a crash?
+ */
+#if PAE
+ set_cr4(get_cr4() | CR4_PAE);
+#endif
+ set_cr0(get_cr0() | CR0_PG /* | CR0_WP */);
+ set_cr0(get_cr0() & ~(CR0_CD | CR0_NW));
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ set_cr4(get_cr4() | CR4_PGE);
+#endif /* MACH_HYP */
+}
+
+void
+cpu_setup(int cpu)
+{
+ pmap_make_temporary_mapping();
+ printf("AP=(%u) tempmap done\n", cpu);
+
+ paging_enable();
+ flush_instr_queue();
+ printf("AP=(%u) paging done\n", cpu);
+
+ init_percpu(cpu);
+ mp_desc_init(cpu);
+ printf("AP=(%u) mpdesc done\n", cpu);
+
+ ap_gdt_init(cpu);
+ printf("AP=(%u) gdt done\n", cpu);
+
+ ap_idt_init(cpu);
+ printf("AP=(%u) idt done\n", cpu);
+
+ ap_int_init(cpu);
+ printf("AP=(%u) int done\n", cpu);
+
+ ap_ldt_init(cpu);
+ printf("AP=(%u) ldt done\n", cpu);
+
+ ap_ktss_init(cpu);
+ printf("AP=(%u) ktss done\n", cpu);
+
+ pmap_remove_temporary_mapping();
+ printf("AP=(%u) remove tempmap done\n", cpu);
+
+ pmap_set_page_dir();
+ flush_tlb();
+ printf("AP=(%u) reset page dir done\n", cpu);
+
+ /* Initialize machine_slot fields with the cpu data */
+ machine_slot[cpu].cpu_subtype = CPU_SUBTYPE_AT386;
+ machine_slot[cpu].cpu_type = machine_slot[0].cpu_type;
+
+ init_fpu();
+ lapic_setup();
+ lapic_enable();
+ cpu_launch_first_thread(THREAD_NULL);
+}
+
+void
+cpu_ap_main()
+{
+ int cpu = cpu_number();
+
+ do {
+ cpu_pause();
+ } while (bspdone != cpu);
+
+ __sync_synchronize();
+
+ cpu_setup(cpu);
+}
+
+kern_return_t
+cpu_start(int cpu)
+{
+ int err;
+
+ assert(machine_slot[cpu].running != TRUE);
+
+ uint16_t apic_id = apic_get_cpu_apic_id(cpu);
+
+ printf("Trying to enable: %d at 0x%lx\n", apic_id, apboot_addr);
+
+ err = smp_startup_cpu(apic_id, apboot_addr);
+
+ if (!err) {
+ printf("Started cpu %d (lapic id %04x)\n", cpu, apic_id);
+ return KERN_SUCCESS;
+ }
+ printf("FATAL: Cannot init AP %d\n", cpu);
+ for (;;);
+}
+
+void
+start_other_cpus(void)
+{
+ int ncpus = smp_get_numcpus();
+
+ //Copy cpu initialization assembly routine
+ memcpy((void*) phystokv(apboot_addr), (void*) &apboot,
+ (uint32_t)&apbootend - (uint32_t)&apboot);
+
+ unsigned cpu;
+
+ splhigh();
+
+ /* Disable IOAPIC interrupts (IPIs not affected).
+ * Clearing this flag is similar to masking all
+ * IOAPIC interrupts individually.
+ *
+ * This is done to prevent IOAPIC interrupts from
+ * interfering with SMP startup. splhigh() may be enough for BSP,
+ * but I'm not sure. We cannot control the lapic
+ * on APs because we don't have execution on them yet.
+ */
+ lapic_disable();
+
+ bspdone = 0;
+ for (cpu = 1; cpu < ncpus; cpu++) {
+ machine_slot[cpu].running = FALSE;
+
+ //Start cpu
+ printf("Starting AP %d\n", cpu);
+ cpu_start(cpu);
+
+ bspdone++;
+ do {
+ cpu_pause();
+ } while (machine_slot[cpu].running == FALSE);
+
+ __sync_synchronize();
+ }
+ printf("BSP: Completed SMP init\n");
+
+ /* Re-enable IOAPIC interrupts as per setup */
+ lapic_enable();
+}
+#endif /* NCPUS > 1 */
diff --git a/i386/i386/mp_desc.h b/i386/i386/mp_desc.h
new file mode 100644
index 0000000..dc3a7dc
--- /dev/null
+++ b/i386/i386/mp_desc.h
@@ -0,0 +1,98 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_MP_DESC_H_
+#define _I386_MP_DESC_H_
+
+#include <mach/kern_return.h>
+
+#if MULTIPROCESSOR
+
+/*
+ * Multiprocessor i386/i486 systems use a separate copy of the
+ * GDT, IDT, LDT, and kernel TSS per processor. The first three
+ * are separate to avoid lock contention: the i386 uses locked
+ * memory cycles to access the descriptor tables. The TSS is
+ * separate since each processor needs its own kernel stack,
+ * and since using a TSS marks it busy.
+ */
+
+#include "seg.h"
+#include "tss.h"
+#include <i386at/idt.h>
+#include "gdt.h"
+#include "ldt.h"
+
+/*
+ * The descriptor tables are together in a structure
+ * allocated one per processor (except for the boot processor).
+ */
+struct mp_desc_table {
+ struct real_gate idt[IDTSZ]; /* IDT */
+ struct real_descriptor gdt[GDTSZ]; /* GDT */
+ struct real_descriptor ldt[LDTSZ]; /* LDT */
+ struct task_tss ktss;
+};
+
+/*
+ * They are pointed to by a per-processor array.
+ */
+extern struct mp_desc_table *mp_desc_table[NCPUS];
+
+/*
+ * The kernel TSS gets its own pointer.
+ */
+extern struct task_tss *mp_ktss[NCPUS];
+
+/*
+ * So does the GDT.
+ */
+extern struct real_descriptor *mp_gdt[NCPUS];
+
+extern uint8_t solid_intstack[];
+
+extern int bspdone;
+
+/*
+ * Each CPU calls this routine to set up its descriptor tables.
+ */
+extern int mp_desc_init(int);
+
+
+extern void interrupt_processor(int cpu);
+
+
+#endif /* MULTIPROCESSOR */
+
+extern void start_other_cpus(void);
+
+extern kern_return_t cpu_start(int cpu);
+
+extern kern_return_t cpu_control(int cpu, const int *info, unsigned int count);
+
+extern void interrupt_stack_alloc(void);
+
+#endif /* _I386_MP_DESC_H_ */
diff --git a/i386/i386/msr.h b/i386/i386/msr.h
new file mode 100644
index 0000000..8f09b80
--- /dev/null
+++ b/i386/i386/msr.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2023 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _MACHINE_MSR_H_
+#define _MACHINE_MSR_H_
+
+#define MSR_REG_EFER 0xC0000080
+#define MSR_REG_STAR 0xC0000081
+#define MSR_REG_LSTAR 0xC0000082
+#define MSR_REG_CSTAR 0xC0000083
+#define MSR_REG_FMASK 0xC0000084
+#define MSR_REG_FSBASE 0xC0000100
+#define MSR_REG_GSBASE 0xC0000101
+
+#define MSR_EFER_SCE 0x00000001
+
+#ifndef __ASSEMBLER__
+
+static inline void wrmsr(uint32_t regaddr, uint64_t value)
+{
+ uint32_t low = (uint32_t) value, high = ((uint32_t) (value >> 32));
+ asm volatile("wrmsr"
+ :
+ : "c" (regaddr), "a" (low), "d" (high)
+ : "memory" /* wrmsr may cause a read from memory, so
+ * make the compiler flush any changes */
+ );
+}
+
+static inline uint64_t rdmsr(uint32_t regaddr)
+{
+ uint32_t low, high;
+ asm volatile("rdmsr"
+ : "=a" (low), "=d" (high)
+ : "c" (regaddr)
+ );
+ return ((uint64_t)high << 32) | low;
+}
+#endif /* __ASSEMBLER__ */
+
+#endif /* _MACHINE_MSR_H_ */
diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c
new file mode 100644
index 0000000..e890155
--- /dev/null
+++ b/i386/i386/pcb.c
@@ -0,0 +1,958 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#include <mach/std_types.h>
+#include <mach/kern_return.h>
+#include <mach/thread_status.h>
+#include <mach/exec/exec.h>
+#include <mach/xen.h>
+
+#include "vm_param.h"
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#include <kern/slab.h>
+#include <vm/vm_kern.h>
+#include <vm/pmap.h>
+
+#include <i386/thread.h>
+#include <i386/proc_reg.h>
+#include <i386/seg.h>
+#include <i386/user_ldt.h>
+#include <i386/db_interface.h>
+#include <i386/fpu.h>
+#include "eflags.h"
+#include "gdt.h"
+#include "ldt.h"
+#include "msr.h"
+#include "ktss.h"
+#include "pcb.h"
+
+#include <machine/tss.h>
+
+#if NCPUS > 1
+#include <i386/mp_desc.h>
+#endif
+
+struct kmem_cache pcb_cache;
+
+vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */
+
+/*
+ * stack_attach:
+ *
+ * Attach a kernel stack to a thread.
+ */
+
+void stack_attach(
+ thread_t thread,
+ vm_offset_t stack,
+ void (*continuation)(thread_t))
+{
+ counter(if (++c_stacks_current > c_stacks_max)
+ c_stacks_max = c_stacks_current);
+
+ thread->kernel_stack = stack;
+
+ /*
+ * We want to run continuation, giving it as an argument
+ * the return value from Load_context/Switch_context.
+ * Thread_continue takes care of the mismatch between
+ * the argument-passing/return-value conventions.
+ * This function will not return normally,
+ * so we don`t have to worry about a return address.
+ */
+ STACK_IKS(stack)->k_eip = (long) Thread_continue;
+ STACK_IKS(stack)->k_ebx = (long) continuation;
+ STACK_IKS(stack)->k_esp = (long) STACK_IEL(stack);
+ STACK_IKS(stack)->k_ebp = (long) 0;
+
+ /*
+ * Point top of kernel stack to user`s registers.
+ */
+ STACK_IEL(stack)->saved_state = USER_REGS(thread);
+}
+
+/*
+ * stack_detach:
+ *
+ * Detaches a kernel stack from a thread, returning the old stack.
+ */
+
+vm_offset_t stack_detach(thread_t thread)
+{
+ vm_offset_t stack;
+
+ counter(if (--c_stacks_current < c_stacks_min)
+ c_stacks_min = c_stacks_current);
+
+ stack = thread->kernel_stack;
+ thread->kernel_stack = 0;
+
+ return stack;
+}
+
+#if NCPUS > 1
+#define curr_gdt(mycpu) (mp_gdt[mycpu])
+#define curr_ktss(mycpu) (mp_ktss[mycpu])
+#else
+#define curr_gdt(mycpu) ((void)(mycpu), gdt)
+#define curr_ktss(mycpu) ((void)(mycpu), (struct task_tss *)&ktss)
+#endif
+
+#define gdt_desc_p(mycpu,sel) \
+ ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
+
+void switch_ktss(pcb_t pcb)
+{
+ int mycpu = cpu_number();
+ {
+ vm_offset_t pcb_stack_top;
+
+ /*
+ * Save a pointer to the top of the "kernel" stack -
+ * actually the place in the PCB where a trap into
+ * kernel mode will push the registers.
+ * The location depends on V8086 mode. If we are
+ * not in V8086 mode, then a trap into the kernel
+ * won`t save the v86 segments, so we leave room.
+ */
+
+#if !defined(__x86_64__) || defined(USER32)
+ pcb_stack_top = (pcb->iss.efl & EFL_VM)
+ ? (long) (&pcb->iss + 1)
+ : (long) (&pcb->iss.v86_segs);
+#else
+ pcb_stack_top = (vm_offset_t) (&pcb->iss + 1);
+#endif
+
+#ifdef __x86_64__
+ assert((pcb_stack_top & 0xF) == 0);
+#endif
+
+#ifdef MACH_RING1
+ /* No IO mask here */
+ if (hyp_stack_switch(KERNEL_DS, pcb_stack_top))
+ panic("stack_switch");
+#else /* MACH_RING1 */
+#ifdef __x86_64__
+ curr_ktss(mycpu)->tss.rsp0 = pcb_stack_top;
+#else /* __x86_64__ */
+ curr_ktss(mycpu)->tss.esp0 = pcb_stack_top;
+#endif /* __x86_64__ */
+#endif /* MACH_RING1 */
+ }
+
+ {
+ user_ldt_t tldt = pcb->ims.ldt;
+ /*
+ * Set the thread`s LDT.
+ */
+ if (tldt == 0) {
+ /*
+ * Use system LDT.
+ */
+#ifdef MACH_PV_DESCRIPTORS
+ hyp_set_ldt(&ldt, LDTSZ);
+#else /* MACH_PV_DESCRIPTORS */
+ if (get_ldt() != KERNEL_LDT)
+ set_ldt(KERNEL_LDT);
+#endif /* MACH_PV_DESCRIPTORS */
+ }
+ else {
+ /*
+ * Thread has its own LDT.
+ */
+#ifdef MACH_PV_DESCRIPTORS
+ hyp_set_ldt(tldt->ldt,
+ (tldt->desc.limit_low|(tldt->desc.limit_high<<16)) /
+ sizeof(struct real_descriptor));
+#else /* MACH_PV_DESCRIPTORS */
+ *gdt_desc_p(mycpu,USER_LDT) = tldt->desc;
+ set_ldt(USER_LDT);
+#endif /* MACH_PV_DESCRIPTORS */
+ }
+ }
+
+#ifdef MACH_PV_DESCRIPTORS
+ {
+ int i;
+ for (i=0; i < USER_GDT_SLOTS; i++) {
+ if (memcmp(gdt_desc_p (mycpu, USER_GDT + (i << 3)),
+ &pcb->ims.user_gdt[i], sizeof pcb->ims.user_gdt[i])) {
+ union {
+ struct real_descriptor real_descriptor;
+ uint64_t descriptor;
+ } user_gdt;
+ user_gdt.real_descriptor = pcb->ims.user_gdt[i];
+
+ if (hyp_do_update_descriptor(kv_to_ma(gdt_desc_p (mycpu, USER_GDT + (i << 3))),
+ user_gdt.descriptor))
+ panic("couldn't set user gdt %d\n",i);
+ }
+ }
+ }
+#else /* MACH_PV_DESCRIPTORS */
+
+ /* Copy in the per-thread GDT slots. No reloading is necessary
+ because just restoring the segment registers on the way back to
+ user mode reloads the shadow registers from the in-memory GDT. */
+ memcpy (gdt_desc_p (mycpu, USER_GDT),
+ pcb->ims.user_gdt, sizeof pcb->ims.user_gdt);
+#endif /* MACH_PV_DESCRIPTORS */
+
+#if defined(__x86_64__) && !defined(USER32)
+ wrmsr(MSR_REG_FSBASE, pcb->ims.sbs.fsbase);
+ wrmsr(MSR_REG_GSBASE, pcb->ims.sbs.gsbase);
+#endif
+
+ db_load_context(pcb);
+
+ /*
+ * Load the floating-point context, if necessary.
+ */
+ fpu_load_context(pcb);
+
+}
+
+/* If NEW_IOPB is not null, the SIZE denotes the number of bytes in
+ the new bitmap. Expects iopb_lock to be held. */
+void
+update_ktss_iopb (unsigned char *new_iopb, io_port_t size)
+{
+ struct task_tss *tss = curr_ktss (cpu_number ());
+
+ if (new_iopb && size > 0)
+ {
+ tss->tss.io_bit_map_offset
+ = offsetof (struct task_tss, barrier) - size;
+ memcpy (((char *) tss) + tss->tss.io_bit_map_offset,
+ new_iopb, size);
+ }
+ else
+ tss->tss.io_bit_map_offset = IOPB_INVAL;
+}
+
+/*
+ * stack_handoff:
+ *
+ * Move the current thread's kernel stack to the new thread.
+ */
+
+void stack_handoff(
+ thread_t old,
+ thread_t new)
+{
+ int mycpu = cpu_number();
+ vm_offset_t stack;
+
+ /*
+ * Save FP registers if in use.
+ */
+ fpu_save_context(old);
+
+ /*
+ * Switch address maps if switching tasks.
+ */
+ {
+ task_t old_task, new_task;
+
+ if ((old_task = old->task) != (new_task = new->task)) {
+ PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
+ old, mycpu);
+ PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
+ new, mycpu);
+
+ simple_lock (&new_task->machine.iopb_lock);
+#if NCPUS>1
+#warning SMP support missing (avoid races with io_perm_modify).
+#else
+ /* This optimization only works on a single processor
+ machine, where old_task's iopb can not change while
+ we are switching. */
+ if (old_task->machine.iopb || new_task->machine.iopb)
+#endif
+ update_ktss_iopb (new_task->machine.iopb,
+ new_task->machine.iopb_size);
+ simple_unlock (&new_task->machine.iopb_lock);
+ }
+ }
+
+ /*
+ * Load the rest of the user state for the new thread
+ */
+ switch_ktss(new->pcb);
+
+ /*
+ * Switch to new thread
+ */
+ stack = current_stack();
+ old->kernel_stack = 0;
+ new->kernel_stack = stack;
+ percpu_assign(active_thread, new);
+
+ /*
+ * Switch exception link to point to new
+ * user registers.
+ */
+
+ STACK_IEL(stack)->saved_state = USER_REGS(new);
+
+}
+
+/*
+ * Switch to the first thread on a CPU.
+ */
+void load_context(thread_t new)
+{
+ switch_ktss(new->pcb);
+ Load_context(new);
+}
+
+/*
+ * Switch to a new thread.
+ * Save the old thread`s kernel state or continuation,
+ * and return it.
+ */
+thread_t switch_context(
+ thread_t old,
+ continuation_t continuation,
+ thread_t new)
+{
+ /*
+ * Save FP registers if in use.
+ */
+ fpu_save_context(old);
+
+ /*
+ * Switch address maps if switching tasks.
+ */
+ {
+ task_t old_task, new_task;
+ int mycpu = cpu_number();
+
+ if ((old_task = old->task) != (new_task = new->task)) {
+ PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
+ old, mycpu);
+ PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
+ new, mycpu);
+
+ simple_lock (&new_task->machine.iopb_lock);
+#if NCPUS>1
+#warning SMP support missing (avoid races with io_perm_modify).
+#else
+ /* This optimization only works on a single processor
+ machine, where old_task's iopb can not change while
+ we are switching. */
+ if (old_task->machine.iopb || new_task->machine.iopb)
+#endif
+ update_ktss_iopb (new_task->machine.iopb,
+ new_task->machine.iopb_size);
+ simple_unlock (&new_task->machine.iopb_lock);
+ }
+ }
+
+ /*
+ * Load the rest of the user state for the new thread
+ */
+ switch_ktss(new->pcb);
+ return Switch_context(old, continuation, new);
+}
+
+void pcb_module_init(void)
+{
+ kmem_cache_init(&pcb_cache, "pcb", sizeof(struct pcb),
+ KERNEL_STACK_ALIGN, NULL, 0);
+
+ fpu_module_init();
+}
+
+void pcb_init(task_t parent_task, thread_t thread)
+{
+ pcb_t pcb;
+
+ pcb = (pcb_t) kmem_cache_alloc(&pcb_cache);
+ if (pcb == 0)
+ panic("pcb_init");
+
+ counter(if (++c_threads_current > c_threads_max)
+ c_threads_max = c_threads_current);
+
+ /*
+ * We can't let random values leak out to the user.
+ */
+ memset(pcb, 0, sizeof *pcb);
+ simple_lock_init(&pcb->lock);
+
+ /*
+ * Guarantee that the bootstrapped thread will be in user
+ * mode.
+ */
+ pcb->iss.cs = USER_CS;
+ pcb->iss.ss = USER_DS;
+#if !defined(__x86_64__) || defined(USER32)
+ pcb->iss.ds = USER_DS;
+ pcb->iss.es = USER_DS;
+ pcb->iss.fs = USER_DS;
+ pcb->iss.gs = USER_DS;
+#endif
+ pcb->iss.efl = EFL_USER_SET;
+
+ thread->pcb = pcb;
+
+ /* This is a new thread for the current task, make it inherit our FPU
+ state. */
+ if (current_thread() && parent_task == current_task())
+ fpinherit(current_thread(), thread);
+}
+
+void pcb_terminate(thread_t thread)
+{
+ pcb_t pcb = thread->pcb;
+
+ counter(if (--c_threads_current < c_threads_min)
+ c_threads_min = c_threads_current);
+
+ if (pcb->ims.ifps != 0)
+ fp_free(pcb->ims.ifps);
+ if (pcb->ims.ldt != 0)
+ user_ldt_free(pcb->ims.ldt);
+ kmem_cache_free(&pcb_cache, (vm_offset_t) pcb);
+ thread->pcb = 0;
+}
+
+/*
+ * pcb_collect:
+ *
+ * Attempt to free excess pcb memory.
+ */
+
+void pcb_collect(__attribute__((unused)) const thread_t thread)
+{
+}
+
+
+/*
+ * thread_setstatus:
+ *
+ * Set the status of the specified thread.
+ */
+
+kern_return_t thread_setstatus(
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ unsigned int count)
+{
+ switch (flavor) {
+ case i386_THREAD_STATE:
+ case i386_REGS_SEGS_STATE:
+ {
+ struct i386_thread_state *state;
+ struct i386_saved_state *saved_state;
+
+ if (count < i386_THREAD_STATE_COUNT) {
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ state = (struct i386_thread_state *) tstate;
+
+ if (flavor == i386_REGS_SEGS_STATE) {
+ /*
+ * Code and stack selectors must not be null,
+ * and must have user protection levels.
+ * Only the low 16 bits are valid.
+ */
+ state->cs &= 0xffff;
+ state->ss &= 0xffff;
+#if !defined(__x86_64__) || defined(USER32)
+ state->ds &= 0xffff;
+ state->es &= 0xffff;
+ state->fs &= 0xffff;
+ state->gs &= 0xffff;
+#endif
+
+ if (state->cs == 0 || (state->cs & SEL_PL) != SEL_PL_U
+ || state->ss == 0 || (state->ss & SEL_PL) != SEL_PL_U)
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ saved_state = USER_REGS(thread);
+
+ /*
+ * General registers
+ */
+#if defined(__x86_64__) && !defined(USER32)
+ saved_state->r8 = state->r8;
+ saved_state->r9 = state->r9;
+ saved_state->r10 = state->r10;
+ saved_state->r11 = state->r11;
+ saved_state->r12 = state->r12;
+ saved_state->r13 = state->r13;
+ saved_state->r14 = state->r14;
+ saved_state->r15 = state->r15;
+ saved_state->edi = state->rdi;
+ saved_state->esi = state->rsi;
+ saved_state->ebp = state->rbp;
+ saved_state->uesp = state->ursp;
+ saved_state->ebx = state->rbx;
+ saved_state->edx = state->rdx;
+ saved_state->ecx = state->rcx;
+ saved_state->eax = state->rax;
+ saved_state->eip = state->rip;
+ saved_state->efl = (state->rfl & ~EFL_USER_CLEAR)
+ | EFL_USER_SET;
+#else
+ saved_state->edi = state->edi;
+ saved_state->esi = state->esi;
+ saved_state->ebp = state->ebp;
+ saved_state->uesp = state->uesp;
+ saved_state->ebx = state->ebx;
+ saved_state->edx = state->edx;
+ saved_state->ecx = state->ecx;
+ saved_state->eax = state->eax;
+ saved_state->eip = state->eip;
+ saved_state->efl = (state->efl & ~EFL_USER_CLEAR)
+ | EFL_USER_SET;
+#endif /* __x86_64__ && !USER32 */
+
+#if !defined(__x86_64__) || defined(USER32)
+ /*
+ * Segment registers. Set differently in V8086 mode.
+ */
+ if (saved_state->efl & EFL_VM) {
+ /*
+ * Set V8086 mode segment registers.
+ */
+ saved_state->cs = state->cs & 0xffff;
+ saved_state->ss = state->ss & 0xffff;
+ saved_state->v86_segs.v86_ds = state->ds & 0xffff;
+ saved_state->v86_segs.v86_es = state->es & 0xffff;
+ saved_state->v86_segs.v86_fs = state->fs & 0xffff;
+ saved_state->v86_segs.v86_gs = state->gs & 0xffff;
+
+ /*
+ * Zero protected mode segment registers.
+ */
+ saved_state->ds = 0;
+ saved_state->es = 0;
+ saved_state->fs = 0;
+ saved_state->gs = 0;
+
+ if (thread->pcb->ims.v86s.int_table) {
+ /*
+ * Hardware assist on.
+ */
+ thread->pcb->ims.v86s.flags =
+ saved_state->efl & (EFL_TF | EFL_IF);
+ }
+ } else
+#endif
+ if (flavor == i386_THREAD_STATE) {
+ /*
+ * 386 mode. Set segment registers for flat
+ * 32-bit address space.
+ */
+ saved_state->cs = USER_CS;
+ saved_state->ss = USER_DS;
+#if !defined(__x86_64__) || defined(USER32)
+ saved_state->ds = USER_DS;
+ saved_state->es = USER_DS;
+ saved_state->fs = USER_DS;
+ saved_state->gs = USER_DS;
+#endif
+ }
+ else {
+ /*
+ * User setting segment registers.
+ * Code and stack selectors have already been
+ * checked. Others will be reset by 'iret'
+ * if they are not valid.
+ */
+ saved_state->cs = state->cs;
+ saved_state->ss = state->ss;
+#if !defined(__x86_64__) || defined(USER32)
+ saved_state->ds = state->ds;
+ saved_state->es = state->es;
+ saved_state->fs = state->fs;
+ saved_state->gs = state->gs;
+#endif
+ }
+ break;
+ }
+
+ case i386_FLOAT_STATE: {
+
+ if (count < i386_FLOAT_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ return fpu_set_state(thread,
+ (struct i386_float_state *) tstate);
+ }
+
+ /*
+ * Temporary - replace by i386_io_map
+ */
+ case i386_ISA_PORT_MAP_STATE: {
+ //register struct i386_isa_port_map_state *state;
+
+ if (count < i386_ISA_PORT_MAP_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+#if 0
+ /*
+ * If the thread has no ktss yet,
+ * we must allocate one.
+ */
+
+ state = (struct i386_isa_port_map_state *) tstate;
+ tss = thread->pcb->ims.io_tss;
+ if (tss == 0) {
+ tss = iopb_create();
+ thread->pcb->ims.io_tss = tss;
+ }
+
+ memcpy(tss->bitmap,
+ state->pm,
+ sizeof state->pm);
+#endif
+ break;
+ }
+#if !defined(__x86_64__) || defined(USER32)
+ case i386_V86_ASSIST_STATE:
+ {
+ struct i386_v86_assist_state *state;
+ vm_offset_t int_table;
+ int int_count;
+
+ if (count < i386_V86_ASSIST_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_v86_assist_state *) tstate;
+ int_table = state->int_table;
+ int_count = state->int_count;
+
+ if (int_table >= VM_MAX_USER_ADDRESS ||
+ int_table +
+ int_count * sizeof(struct v86_interrupt_table)
+ > VM_MAX_USER_ADDRESS)
+ return KERN_INVALID_ARGUMENT;
+
+ thread->pcb->ims.v86s.int_table = int_table;
+ thread->pcb->ims.v86s.int_count = int_count;
+
+ thread->pcb->ims.v86s.flags =
+ USER_REGS(thread)->efl & (EFL_TF | EFL_IF);
+ break;
+ }
+#endif
+ case i386_DEBUG_STATE:
+ {
+ struct i386_debug_state *state;
+ kern_return_t ret;
+
+ if (count < i386_DEBUG_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_debug_state *) tstate;
+ ret = db_set_debug_state(thread->pcb, state);
+ if (ret)
+ return ret;
+ break;
+ }
+#if defined(__x86_64__) && !defined(USER32)
+ case i386_FSGS_BASE_STATE:
+ {
+ struct i386_fsgs_base_state *state;
+ if (count < i386_FSGS_BASE_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_fsgs_base_state *) tstate;
+ thread->pcb->ims.sbs.fsbase = state->fs_base;
+ thread->pcb->ims.sbs.gsbase = state->gs_base;
+ if (thread == current_thread()) {
+ wrmsr(MSR_REG_FSBASE, state->fs_base);
+ wrmsr(MSR_REG_GSBASE, state->gs_base);
+ }
+ break;
+ }
+#endif
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * thread_getstatus:
+ *
+ * Get the status of the specified thread.
+ */
+
+kern_return_t thread_getstatus(
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate, /* pointer to OUT array */
+ unsigned int *count) /* IN/OUT */
+{
+ switch (flavor) {
+ case THREAD_STATE_FLAVOR_LIST:
+#if !defined(__x86_64__) || defined(USER32)
+ unsigned int ncount = 4;
+#else
+ unsigned int ncount = 3;
+#endif
+ if (*count < ncount)
+ return (KERN_INVALID_ARGUMENT);
+ tstate[0] = i386_THREAD_STATE;
+ tstate[1] = i386_FLOAT_STATE;
+ tstate[2] = i386_ISA_PORT_MAP_STATE;
+#if !defined(__x86_64__) || defined(USER32)
+ tstate[3] = i386_V86_ASSIST_STATE;
+#endif
+ *count = ncount;
+ break;
+
+ case i386_THREAD_STATE:
+ case i386_REGS_SEGS_STATE:
+ {
+ struct i386_thread_state *state;
+ struct i386_saved_state *saved_state;
+
+ if (*count < i386_THREAD_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (struct i386_thread_state *) tstate;
+ saved_state = USER_REGS(thread);
+
+ /*
+ * General registers.
+ */
+#if defined(__x86_64__) && !defined(USER32)
+ state->r8 = saved_state->r8;
+ state->r9 = saved_state->r9;
+ state->r10 = saved_state->r10;
+ state->r11 = saved_state->r11;
+ state->r12 = saved_state->r12;
+ state->r13 = saved_state->r13;
+ state->r14 = saved_state->r14;
+ state->r15 = saved_state->r15;
+ state->rdi = saved_state->edi;
+ state->rsi = saved_state->esi;
+ state->rbp = saved_state->ebp;
+ state->rbx = saved_state->ebx;
+ state->rdx = saved_state->edx;
+ state->rcx = saved_state->ecx;
+ state->rax = saved_state->eax;
+ state->rip = saved_state->eip;
+ state->ursp = saved_state->uesp;
+ state->rfl = saved_state->efl;
+ state->rsp = 0; /* unused */
+#else
+ state->edi = saved_state->edi;
+ state->esi = saved_state->esi;
+ state->ebp = saved_state->ebp;
+ state->ebx = saved_state->ebx;
+ state->edx = saved_state->edx;
+ state->ecx = saved_state->ecx;
+ state->eax = saved_state->eax;
+ state->eip = saved_state->eip;
+ state->uesp = saved_state->uesp;
+ state->efl = saved_state->efl;
+ state->esp = 0; /* unused */
+#endif /* __x86_64__ && !USER32 */
+
+ state->cs = saved_state->cs;
+ state->ss = saved_state->ss;
+#if !defined(__x86_64__) || defined(USER32)
+ if (saved_state->efl & EFL_VM) {
+ /*
+ * V8086 mode.
+ */
+ state->ds = saved_state->v86_segs.v86_ds & 0xffff;
+ state->es = saved_state->v86_segs.v86_es & 0xffff;
+ state->fs = saved_state->v86_segs.v86_fs & 0xffff;
+ state->gs = saved_state->v86_segs.v86_gs & 0xffff;
+
+ if (thread->pcb->ims.v86s.int_table) {
+ /*
+ * Hardware assist on
+ */
+ if ((thread->pcb->ims.v86s.flags &
+ (EFL_IF|V86_IF_PENDING))
+ == 0)
+ saved_state->efl &= ~EFL_IF;
+ }
+ } else {
+ /*
+ * 386 mode.
+ */
+ state->ds = saved_state->ds & 0xffff;
+ state->es = saved_state->es & 0xffff;
+ state->fs = saved_state->fs & 0xffff;
+ state->gs = saved_state->gs & 0xffff;
+ }
+#endif
+ *count = i386_THREAD_STATE_COUNT;
+ break;
+ }
+
+ case i386_FLOAT_STATE: {
+
+ if (*count < i386_FLOAT_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ *count = i386_FLOAT_STATE_COUNT;
+ return fpu_get_state(thread,
+ (struct i386_float_state *)tstate);
+ }
+
+ /*
+ * Temporary - replace by i386_io_map
+ */
+ case i386_ISA_PORT_MAP_STATE: {
+ struct i386_isa_port_map_state *state;
+
+ if (*count < i386_ISA_PORT_MAP_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (struct i386_isa_port_map_state *) tstate;
+
+ simple_lock (&thread->task->machine.iopb_lock);
+ if (thread->task->machine.iopb == 0)
+ memset (state->pm, 0xff, sizeof state->pm);
+ else
+ memcpy(state->pm,
+ thread->task->machine.iopb,
+ sizeof state->pm);
+ simple_unlock (&thread->task->machine.iopb_lock);
+
+ *count = i386_ISA_PORT_MAP_STATE_COUNT;
+ break;
+ }
+#if !defined(__x86_64__) || defined(USER32)
+ case i386_V86_ASSIST_STATE:
+ {
+ struct i386_v86_assist_state *state;
+
+ if (*count < i386_V86_ASSIST_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_v86_assist_state *) tstate;
+ state->int_table = thread->pcb->ims.v86s.int_table;
+ state->int_count = thread->pcb->ims.v86s.int_count;
+
+ *count = i386_V86_ASSIST_STATE_COUNT;
+ break;
+ }
+#endif
+ case i386_DEBUG_STATE:
+ {
+ struct i386_debug_state *state;
+
+ if (*count < i386_DEBUG_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_debug_state *) tstate;
+ db_get_debug_state(thread->pcb, state);
+
+ *count = i386_DEBUG_STATE_COUNT;
+ break;
+ }
+#if defined(__x86_64__) && !defined(USER32)
+ case i386_FSGS_BASE_STATE:
+ {
+ struct i386_fsgs_base_state *state;
+ if (*count < i386_FSGS_BASE_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_fsgs_base_state *) tstate;
+ state->fs_base = thread->pcb->ims.sbs.fsbase;
+ state->gs_base = thread->pcb->ims.sbs.gsbase;
+ *count = i386_FSGS_BASE_STATE_COUNT;
+ break;
+ }
+#endif
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Alter the thread`s state so that a following thread_exception_return
+ * will make the thread return 'retval' from a syscall.
+ */
+void
+thread_set_syscall_return(
+ thread_t thread,
+ kern_return_t retval)
+{
+ thread->pcb->iss.eax = retval;
+}
+
+/*
+ * Return preferred address of user stack.
+ * Always returns low address. If stack grows up,
+ * the stack grows away from this address;
+ * if stack grows down, the stack grows towards this
+ * address.
+ */
+vm_offset_t
+user_stack_low(vm_size_t stack_size)
+{
+ return (VM_MAX_USER_ADDRESS - stack_size);
+}
+
+/*
+ * Allocate argument area and set registers for first user thread.
+ */
+vm_offset_t
+set_user_regs(vm_offset_t stack_base, /* low address */
+ vm_offset_t stack_size,
+ const struct exec_info *exec_info,
+ vm_size_t arg_size)
+{
+ vm_offset_t arg_addr;
+ struct i386_saved_state *saved_state;
+
+ assert(P2ALIGNED(stack_size, USER_STACK_ALIGN));
+ assert(P2ALIGNED(stack_base, USER_STACK_ALIGN));
+ arg_size = P2ROUND(arg_size, USER_STACK_ALIGN);
+ arg_addr = stack_base + stack_size - arg_size;
+
+ saved_state = USER_REGS(current_thread());
+ saved_state->uesp = (rpc_vm_offset_t)arg_addr;
+ saved_state->eip = exec_info->entry;
+
+ return (arg_addr);
+}
diff --git a/i386/i386/pcb.h b/i386/i386/pcb.h
new file mode 100644
index 0000000..4d48b9f
--- /dev/null
+++ b/i386/i386/pcb.h
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ *
+ *
+ */
+
+#ifndef _I386_PCB_H_
+#define _I386_PCB_H_
+
+#include <sys/types.h>
+#include <mach/exec/exec.h>
+#include <mach/thread_status.h>
+#include <machine/thread.h>
+#include <machine/io_perm.h>
+
+extern void pcb_init (task_t parent_task, thread_t thread);
+
+extern void pcb_terminate (thread_t thread);
+
+extern void pcb_collect (thread_t thread);
+
+extern kern_return_t thread_setstatus (
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ unsigned int count);
+
+extern kern_return_t thread_getstatus (
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ unsigned int *count);
+
+extern void thread_set_syscall_return (
+ thread_t thread,
+ kern_return_t retval);
+
+extern vm_offset_t user_stack_low (vm_size_t stack_size);
+
+extern vm_offset_t set_user_regs (
+ vm_offset_t stack_base,
+ vm_offset_t stack_size,
+ const struct exec_info *exec_info,
+ vm_size_t arg_size);
+
+extern void load_context (thread_t new);
+
+extern void stack_attach (
+ thread_t thread,
+ vm_offset_t stack,
+ void (*continuation)(thread_t));
+
+extern vm_offset_t stack_detach (thread_t thread);
+
+extern void switch_ktss (pcb_t pcb);
+
+extern void update_ktss_iopb (unsigned char *new_iopb, io_port_t size);
+
+extern thread_t Load_context (thread_t new);
+
+extern thread_t Switch_context (thread_t old, continuation_t continuation, thread_t new);
+
+extern void switch_to_shutdown_context(thread_t thread,
+ void (*routine)(processor_t),
+ processor_t processor);
+
+extern void Thread_continue (void);
+
+extern void pcb_module_init (void);
+
+#endif /* _I386_PCB_H_ */
diff --git a/i386/i386/percpu.c b/i386/i386/percpu.c
new file mode 100644
index 0000000..c6b728b
--- /dev/null
+++ b/i386/i386/percpu.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2023 Free Software Foundation, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <i386/smp.h>
+#include <i386/apic.h>
+#include <kern/cpu_number.h>
+#include <i386/percpu.h>
+
+struct percpu percpu_array[NCPUS] = {0};
+
+#ifndef MACH_XEN
+void init_percpu(int cpu)
+{
+ int apic_id = apic_get_current_cpu();
+
+ percpu_array[cpu].self = &percpu_array[cpu];
+ percpu_array[cpu].apic_id = apic_id;
+ percpu_array[cpu].cpu_id = cpu;
+}
+#endif
diff --git a/i386/i386/percpu.h b/i386/i386/percpu.h
new file mode 100644
index 0000000..637d2ca
--- /dev/null
+++ b/i386/i386/percpu.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2023 Free Software Foundation, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PERCPU_H_
+#define _PERCPU_H_
+
+struct percpu;
+
+#if NCPUS > 1
+
+#define percpu_assign(stm, val) \
+ asm("mov %[src], %%gs:%c[offs]" \
+ : /* No outputs */ \
+ : [src] "r" (val), [offs] "e" (__builtin_offsetof(struct percpu, stm)) \
+ : );
+
+#define percpu_get(typ, stm) \
+MACRO_BEGIN \
+ typ val_; \
+ \
+ asm("mov %%gs:%c[offs], %[dst]" \
+ : [dst] "=r" (val_) \
+ : [offs] "e" (__builtin_offsetof(struct percpu, stm)) \
+ : ); \
+ \
+ val_; \
+MACRO_END
+
+#define percpu_ptr(typ, stm) \
+MACRO_BEGIN \
+ typ *ptr_ = (typ *)__builtin_offsetof(struct percpu, stm); \
+ \
+ asm("add %%gs:0, %[pointer]" \
+ : [pointer] "+r" (ptr_) \
+ : /* No inputs */ \
+ : ); \
+ \
+ ptr_; \
+MACRO_END
+
+#else
+
+#define percpu_assign(stm, val) \
+MACRO_BEGIN \
+ percpu_array[0].stm = val; \
+MACRO_END
+#define percpu_get(typ, stm) \
+ (percpu_array[0].stm)
+#define percpu_ptr(typ, stm) \
+ (&percpu_array[0].stm)
+
+#endif
+
+#include <kern/processor.h>
+#include <mach/mach_types.h>
+
+struct percpu {
+ struct percpu *self;
+ int apic_id;
+ int cpu_id;
+ struct processor processor;
+ thread_t active_thread;
+ vm_offset_t active_stack;
+/*
+ struct machine_slot machine_slot;
+ struct mp_desc_table mp_desc_table;
+ vm_offset_t int_stack_top;
+ vm_offset_t int_stack_base;
+ ast_t need_ast;
+ ipc_kmsg_t ipc_kmsg_cache;
+ pmap_update_list cpu_update_list;
+ spl_t saved_ipl;
+ spl_t curr_ipl;
+ timer_data_t kernel_timer;
+ timer_t current_timer;
+ unsigned long in_interrupt;
+*/
+};
+
+extern struct percpu percpu_array[NCPUS];
+
+void init_percpu(int cpu);
+
+#endif /* _PERCPU_H_ */
diff --git a/i386/i386/phys.c b/i386/i386/phys.c
new file mode 100644
index 0000000..e864489
--- /dev/null
+++ b/i386/i386/phys.c
@@ -0,0 +1,187 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/xen.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <vm/vm_map.h>
+#include "vm_param.h"
+#include <mach/vm_prot.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
+#include <i386/pmap.h>
+#include <i386/model_dep.h>
+#include <mach/machine/vm_param.h>
+
+#define INTEL_PTE_W(p) (INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_REF | INTEL_PTE_MOD | pa_to_pte(p))
+#define INTEL_PTE_R(p) (INTEL_PTE_VALID | INTEL_PTE_REF | pa_to_pte(p))
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ */
+void
+pmap_zero_page(phys_addr_t p)
+{
+ assert(p != vm_page_fictitious_addr);
+ vm_offset_t v;
+ pmap_mapwindow_t *map;
+ boolean_t mapped = p >= VM_PAGE_DIRECTMAP_LIMIT;
+
+ if (mapped)
+ {
+ map = pmap_get_mapwindow(INTEL_PTE_W(p));
+ v = map->vaddr;
+ }
+ else
+ v = phystokv(p);
+
+ memset((void*) v, 0, PAGE_SIZE);
+
+ if (mapped)
+ pmap_put_mapwindow(map);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent) pages.
+ */
+void
+pmap_copy_page(
+ phys_addr_t src,
+ phys_addr_t dst)
+{
+ vm_offset_t src_addr_v, dst_addr_v;
+ pmap_mapwindow_t *src_map = NULL;
+ pmap_mapwindow_t *dst_map;
+ boolean_t src_mapped = src >= VM_PAGE_DIRECTMAP_LIMIT;
+ boolean_t dst_mapped = dst >= VM_PAGE_DIRECTMAP_LIMIT;
+ assert(src != vm_page_fictitious_addr);
+ assert(dst != vm_page_fictitious_addr);
+
+ if (src_mapped)
+ {
+ src_map = pmap_get_mapwindow(INTEL_PTE_R(src));
+ src_addr_v = src_map->vaddr;
+ }
+ else
+ src_addr_v = phystokv(src);
+
+ if (dst_mapped)
+ {
+ dst_map = pmap_get_mapwindow(INTEL_PTE_W(dst));
+ dst_addr_v = dst_map->vaddr;
+ }
+ else
+ dst_addr_v = phystokv(dst);
+
+ memcpy((void *) dst_addr_v, (void *) src_addr_v, PAGE_SIZE);
+
+ if (src_mapped)
+ pmap_put_mapwindow(src_map);
+ if (dst_mapped)
+ pmap_put_mapwindow(dst_map);
+}
+
+/*
+ * copy_to_phys(src_addr_v, dst_addr_p, count)
+ *
+ * Copy virtual memory to physical memory
+ */
+void
+copy_to_phys(
+ vm_offset_t src_addr_v,
+ phys_addr_t dst_addr_p,
+ int count)
+{
+ vm_offset_t dst_addr_v;
+ pmap_mapwindow_t *dst_map;
+ boolean_t mapped = dst_addr_p >= VM_PAGE_DIRECTMAP_LIMIT;
+ assert(dst_addr_p != vm_page_fictitious_addr);
+ assert(pa_to_pte(dst_addr_p + count-1) == pa_to_pte(dst_addr_p));
+
+ if (mapped)
+ {
+ dst_map = pmap_get_mapwindow(INTEL_PTE_W(dst_addr_p));
+ dst_addr_v = dst_map->vaddr + (dst_addr_p & (INTEL_PGBYTES-1));
+ }
+ else
+ dst_addr_v = phystokv(dst_addr_p);
+
+ memcpy((void *)dst_addr_v, (void *)src_addr_v, count);
+
+ if (mapped)
+ pmap_put_mapwindow(dst_map);
+}
+
+/*
+ * copy_from_phys(src_addr_p, dst_addr_v, count)
+ *
+ * Copy physical memory to virtual memory. The virtual memory
+ * is assumed to be present (e.g. the buffer pool).
+ */
+void
+copy_from_phys(
+ phys_addr_t src_addr_p,
+ vm_offset_t dst_addr_v,
+ int count)
+{
+ vm_offset_t src_addr_v;
+ pmap_mapwindow_t *src_map;
+ boolean_t mapped = src_addr_p >= VM_PAGE_DIRECTMAP_LIMIT;
+ assert(src_addr_p != vm_page_fictitious_addr);
+ assert(pa_to_pte(src_addr_p + count-1) == pa_to_pte(src_addr_p));
+
+ if (mapped)
+ {
+ src_map = pmap_get_mapwindow(INTEL_PTE_R(src_addr_p));
+ src_addr_v = src_map->vaddr + (src_addr_p & (INTEL_PGBYTES-1));
+ }
+ else
+ src_addr_v = phystokv(src_addr_p);
+
+ memcpy((void *)dst_addr_v, (void *)src_addr_v, count);
+
+ if (mapped)
+ pmap_put_mapwindow(src_map);
+}
+
+/*
+ * kvtophys(addr)
+ *
+ * Convert a kernel virtual address to a physical address
+ */
+phys_addr_t
+kvtophys(vm_offset_t addr)
+{
+ pt_entry_t *pte;
+
+ if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL)
+ return 0;
+ return pte_to_pa(*pte) | (addr & INTEL_OFFMASK);
+}
diff --git a/i386/i386/pic.c b/i386/i386/pic.c
new file mode 100644
index 0000000..66fbc04
--- /dev/null
+++ b/i386/i386/pic.c
@@ -0,0 +1,262 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <kern/printf.h>
+#include <i386/ipl.h>
+#include <i386/pic.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+
+spl_t curr_ipl[NCPUS] = {0};
+int curr_pic_mask;
+int spl_init = 0;
+
+int iunit[NINTR] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+unsigned short master_icw, master_ocw, slaves_icw, slaves_ocw;
+
+u_short PICM_ICW1, PICM_OCW1, PICS_ICW1, PICS_OCW1 ;
+u_short PICM_ICW2, PICM_OCW2, PICS_ICW2, PICS_OCW2 ;
+u_short PICM_ICW3, PICM_OCW3, PICS_ICW3, PICS_OCW3 ;
+u_short PICM_ICW4, PICS_ICW4 ;
+
+/*
+** picinit() - This routine
+** * Establishes a table of interrupt vectors
+** * Establishes location of PICs in the system
+** * Unmasks all interrupts in the PICs
+** * Initialises them
+**
+** At this stage the interrupt functionality of this system should be
+** complete.
+*/
+
+/*
+** Initialise the PICs , master first, then the slave.
+** All the register field definitions are described in pic.h also
+** the settings of these fields for the various registers are selected.
+*/
+
+void
+picinit(void)
+{
+
+ asm("cli");
+
+ /*
+ ** 0. Initialise the current level to match cli()
+ */
+ int i;
+
+ for (i = 0; i < NCPUS; i++)
+ curr_ipl[i] = SPLHI;
+ curr_pic_mask = 0;
+
+ /*
+ ** 1. Generate addresses to each PIC port.
+ */
+
+ master_icw = PIC_MASTER_ICW;
+ master_ocw = PIC_MASTER_OCW;
+ slaves_icw = PIC_SLAVE_ICW;
+ slaves_ocw = PIC_SLAVE_OCW;
+
+ /*
+ ** 2. Select options for each ICW and each OCW for each PIC.
+ */
+
+ PICM_ICW1 =
+ (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8 | CASCADE_MODE | ICW4__NEEDED);
+
+ PICS_ICW1 =
+ (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8 | CASCADE_MODE | ICW4__NEEDED);
+
+ PICM_ICW2 = PICM_VECTBASE;
+ PICS_ICW2 = PICS_VECTBASE;
+
+#ifdef AT386
+ PICM_ICW3 = ( SLAVE_ON_IR2 );
+ PICS_ICW3 = ( I_AM_SLAVE_2 );
+#endif /* AT386 */
+
+ PICM_ICW4 =
+ (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD | I8086_EMM_MOD);
+ PICS_ICW4 =
+ (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD | I8086_EMM_MOD);
+
+ PICM_OCW1 = (curr_pic_mask & 0x00FF);
+ PICS_OCW1 = ((curr_pic_mask & 0xFF00)>>8);
+
+ PICM_OCW2 = NON_SPEC_EOI;
+ PICS_OCW2 = NON_SPEC_EOI;
+
+ PICM_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
+ PICS_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
+
+ /*
+ ** 3. Initialise master - send commands to master PIC
+ */
+
+ outb ( master_icw, PICM_ICW1 );
+ outb ( master_ocw, PICM_ICW2 );
+ outb ( master_ocw, PICM_ICW3 );
+ outb ( master_ocw, PICM_ICW4 );
+
+ outb ( master_ocw, PICM_MASK );
+ outb ( master_icw, PICM_OCW3 );
+
+ /*
+ ** 4. Initialise slave - send commands to slave PIC
+ */
+
+ outb ( slaves_icw, PICS_ICW1 );
+ outb ( slaves_ocw, PICS_ICW2 );
+ outb ( slaves_ocw, PICS_ICW3 );
+ outb ( slaves_ocw, PICS_ICW4 );
+
+
+ outb ( slaves_ocw, PICS_OCW1 );
+ outb ( slaves_icw, PICS_OCW3 );
+
+ /*
+ ** 5. Initialise interrupts
+ */
+ outb ( master_ocw, PICM_OCW1 );
+
+}
+
+void
+intnull(int unit_dev)
+{
+ static char warned[NINTR];
+
+ if (unit_dev >= NINTR)
+ printf("Unknown interrupt %d\n", unit_dev);
+ else if (!warned[unit_dev])
+ {
+ printf("intnull(%d)\n", unit_dev);
+ warned[unit_dev] = 1;
+ }
+
+}
+
+/*
+ * Mask a PIC IRQ.
+ */
+void
+mask_irq (unsigned int irq_nr)
+{
+ int new_pic_mask = curr_pic_mask | 1 << irq_nr;
+
+ if (curr_pic_mask != new_pic_mask)
+ {
+ curr_pic_mask = new_pic_mask;
+ if (irq_nr < 8)
+ {
+ outb (PIC_MASTER_OCW, curr_pic_mask & 0xff);
+ }
+ else
+ {
+ outb (PIC_SLAVE_OCW, curr_pic_mask >> 8);
+ }
+ }
+}
+
+/*
+ * Unmask a PIC IRQ.
+ */
+void
+unmask_irq (unsigned int irq_nr)
+{
+ int mask;
+ int new_pic_mask;
+
+ mask = 1 << irq_nr;
+ if (irq_nr >= 8)
+ {
+ mask |= 1 << 2;
+ }
+
+ new_pic_mask = curr_pic_mask & ~mask;
+
+ if (curr_pic_mask != new_pic_mask)
+ {
+ curr_pic_mask = new_pic_mask;
+ if (irq_nr < 8)
+ {
+ outb (PIC_MASTER_OCW, curr_pic_mask & 0xff);
+ }
+ else
+ {
+ outb (PIC_SLAVE_OCW, curr_pic_mask >> 8);
+ }
+ }
+}
+
diff --git a/i386/i386/pic.h b/i386/i386/pic.h
new file mode 100644
index 0000000..aec0ef6
--- /dev/null
+++ b/i386/i386/pic.h
@@ -0,0 +1,191 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _I386_PIC_H_
+#define _I386_PIC_H_
+
+#ifndef APIC
+#define NINTR 0x10
+#endif
+#define NPICS 0x02
+
+/*
+** The following are definitions used to locate the PICs in the system
+*/
+
+#if defined(AT386) || defined(ATX86_64)
+#define ADDR_PIC_BASE 0x20
+#define OFF_ICW 0x00
+#define OFF_OCW 0x01
+#define SIZE_PIC 0x80
+#endif /* defined(AT386) */
+
+#define PIC_MASTER_ICW (ADDR_PIC_BASE + OFF_ICW)
+#define PIC_MASTER_OCW (ADDR_PIC_BASE + OFF_OCW)
+#define PIC_SLAVE_ICW (PIC_MASTER_ICW + SIZE_PIC)
+#define PIC_SLAVE_OCW (PIC_MASTER_OCW + SIZE_PIC)
+
+/*
+** The following banks of definitions ICW1, ICW2, ICW3, and ICW4 are used
+** to define the fields of the various ICWs for initialisation of the PICs
+*/
+
+/*
+** ICW1
+*/
+
+#define ICW_TEMPLATE 0x10
+
+#define LEVL_TRIGGER 0x08
+#define EDGE_TRIGGER 0x00
+#define ADDR_INTRVL4 0x04
+#define ADDR_INTRVL8 0x00
+#define SINGLE__MODE 0x02
+#define CASCADE_MODE 0x00
+#define ICW4__NEEDED 0x01
+#define NO_ICW4_NEED 0x00
+
+/*
+** ICW2
+*/
+
+#if defined(AT386) || defined(ATX86_64)
+#define PICM_VECTBASE 0x20
+#define PICS_VECTBASE PICM_VECTBASE + 0x08
+#endif /* defined(AT386) */
+
+/*
+** ICW3
+*/
+
+#define SLAVE_ON_IR0 0x01
+#define SLAVE_ON_IR1 0x02
+#define SLAVE_ON_IR2 0x04
+#define SLAVE_ON_IR3 0x08
+#define SLAVE_ON_IR4 0x10
+#define SLAVE_ON_IR5 0x20
+#define SLAVE_ON_IR6 0x40
+#define SLAVE_ON_IR7 0x80
+
+#define I_AM_SLAVE_0 0x00
+#define I_AM_SLAVE_1 0x01
+#define I_AM_SLAVE_2 0x02
+#define I_AM_SLAVE_3 0x03
+#define I_AM_SLAVE_4 0x04
+#define I_AM_SLAVE_5 0x05
+#define I_AM_SLAVE_6 0x06
+#define I_AM_SLAVE_7 0x07
+
+/*
+** ICW4
+*/
+
+#define SNF_MODE_ENA 0x10
+#define SNF_MODE_DIS 0x00
+#define BUFFERD_MODE 0x08
+#define NONBUFD_MODE 0x00
+#define AUTO_EOI_MOD 0x02
+#define NRML_EOI_MOD 0x00
+#define I8086_EMM_MOD 0x01
+#define SET_MCS_MODE 0x00
+
+/*
+** OCW1
+*/
+#define PICM_MASK 0xFF
+#define PICS_MASK 0xFF
+/*
+** OCW2
+*/
+
+#define NON_SPEC_EOI 0x20
+#define SPECIFIC_EOI 0x60
+#define ROT_NON_SPEC 0xA0
+#define SET_ROT_AEOI 0x80
+#define RSET_ROTAEOI 0x00
+#define ROT_SPEC_EOI 0xE0
+#define SET_PRIORITY 0xC0
+#define NO_OPERATION 0x40
+
+#define SEND_EOI_IR0 0x00
+#define SEND_EOI_IR1 0x01
+#define SEND_EOI_IR2 0x02
+#define SEND_EOI_IR3 0x03
+#define SEND_EOI_IR4 0x04
+#define SEND_EOI_IR5 0x05
+#define SEND_EOI_IR6 0x06
+#define SEND_EOI_IR7 0x07
+
+/*
+** OCW3
+*/
+
+#define OCW_TEMPLATE 0x08
+#define SPECIAL_MASK 0x40
+#define MASK_MDE_SET 0x20
+#define MASK_MDE_RST 0x00
+#define POLL_COMMAND 0x04
+#define NO_POLL_CMND 0x00
+#define READ_NEXT_RD 0x02
+#define READ_IR_ONRD 0x00
+#define READ_IS_ONRD 0x01
+
+#define PIC_MASK_ZERO 0x00
+
+#if !defined(__ASSEMBLER__) && !defined(APIC)
+extern void picinit (void);
+extern int curr_pic_mask;
+extern void intnull(int unit);
+extern void mask_irq (unsigned int irq_nr);
+extern void unmask_irq (unsigned int irq_nr);
+#endif /* __ASSEMBLER__ */
+
+#endif /* _I386_PIC_H_ */
diff --git a/i386/i386/pio.h b/i386/i386/pio.h
new file mode 100644
index 0000000..c488fbb
--- /dev/null
+++ b/i386/i386/pio.h
@@ -0,0 +1,61 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_PIO_H_
+#define _I386_PIO_H_
+
+#ifndef __GNUC__
+#error You do not stand a chance. This file is gcc only.
+#endif /* __GNUC__ */
+
+#define inl(y) \
+({ unsigned int _tmp__; \
+ asm volatile("inl %1, %0" : "=a" (_tmp__) : "dN" ((unsigned short)(y))); \
+ _tmp__; })
+
+#define inw(y) \
+({ unsigned short _tmp__; \
+ asm volatile("inw %1, %0" : "=a" (_tmp__) : "dN" ((unsigned short)(y))); \
+ _tmp__; })
+
+#define inb(y) \
+({ unsigned char _tmp__; \
+ asm volatile("inb %1, %0" : "=a" (_tmp__) : "dN" ((unsigned short)(y))); \
+ _tmp__; })
+
+
+#define outl(x, y) \
+{ asm volatile("outl %0, %1" : : "a" ((unsigned int)(y)) , "dN" ((unsigned short)(x))); }
+
+
+#define outw(x, y) \
+{ asm volatile("outw %0, %1" : : "a" ((unsigned short)(y)) , "dN" ((unsigned short)(x))); }
+
+
+#define outb(x, y) \
+{ asm volatile("outb %0, %1" : : "a" ((unsigned char)(y)) , "dN" ((unsigned short)(x))); }
+
+#endif /* _I386_PIO_H_ */
diff --git a/i386/i386/pit.c b/i386/i386/pit.c
new file mode 100644
index 0000000..6c006a9
--- /dev/null
+++ b/i386/i386/pit.c
@@ -0,0 +1,140 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <kern/mach_clock.h>
+#include <i386/ipl.h>
+#include <machine/irq.h>
+#include <i386/pit.h>
+#include <i386/pio.h>
+#include <kern/cpu_number.h>
+
+int pitctl_port = PITCTL_PORT; /* For 386/20 Board */
+int pitctr0_port = PITCTR0_PORT; /* For 386/20 Board */
+/* We want PIT 0 in square wave mode */
+
+int pit0_mode = PIT_C0|PIT_SQUAREMODE|PIT_READMODE ;
+
+
+unsigned int clknumb = CLKNUM; /* interrupt interval for timer 0 */
+
+void
+pit_prepare_sleep(int persec)
+{
+ /* Prepare to sleep for 1/persec seconds */
+ uint32_t val = 0;
+ uint8_t lsb, msb;
+
+ val = inb(PITAUX_PORT);
+ val &= ~PITAUX_OUT2;
+ val |= PITAUX_GATE2;
+ outb (PITAUX_PORT, val);
+ outb (PITCTL_PORT, PIT_C2 | PIT_LOADMODE | PIT_ONESHOTMODE);
+ val = CLKNUM / persec;
+ lsb = val & 0xff;
+ msb = val >> 8;
+ outb (PITCTR2_PORT, lsb);
+ val = inb(POST_PORT); /* ~1us i/o delay */
+ outb (PITCTR2_PORT, msb);
+}
+
+void
+pit_sleep(void)
+{
+ uint8_t val;
+
+ /* Start counting down */
+ val = inb(PITAUX_PORT);
+ val &= ~PITAUX_GATE2;
+ outb (PITAUX_PORT, val); /* Gate low */
+ val |= PITAUX_GATE2;
+ outb (PITAUX_PORT, val); /* Gate high */
+
+ /* Wait until counter reaches zero */
+ while ((inb(PITAUX_PORT) & PITAUX_VAL) == 0);
+}
+
+void
+pit_udelay(int usec)
+{
+ pit_prepare_sleep(1000000 / usec);
+ pit_sleep();
+}
+
+void
+pit_mdelay(int msec)
+{
+ pit_prepare_sleep(1000 / msec);
+ pit_sleep();
+}
+
+void
+clkstart(void)
+{
+ if (cpu_number() != 0)
+ /* Only one PIT initialization is needed */
+ return;
+ unsigned char byte;
+ unsigned long s;
+
+ s = sploff(); /* disable interrupts */
+
+ /* Since we use only timer 0, we program that.
+ * 8254 Manual specifically says you do not need to program
+ * timers you do not use
+ */
+ outb(pitctl_port, pit0_mode);
+ clknumb = (CLKNUM + hz / 2) / hz;
+ byte = clknumb;
+ outb(pitctr0_port, byte);
+ byte = clknumb>>8;
+ outb(pitctr0_port, byte);
+ splon(s); /* restore interrupt state */
+}
diff --git a/i386/i386/pit.h b/i386/i386/pit.h
new file mode 100644
index 0000000..49e1051
--- /dev/null
+++ b/i386/i386/pit.h
@@ -0,0 +1,98 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _I386_PIT_H_
+#define _I386_PIT_H_
+
+#if defined(AT386) || defined(ATX86_64)
+/* Definitions for 8254 Programmable Interrupt Timer ports on AT 386 */
+#define PITCTR0_PORT 0x40 /* counter 0 port */
+#define PITCTR1_PORT 0x41 /* counter 1 port */
+#define PITCTR2_PORT 0x42 /* counter 2 port */
+#define PITCTL_PORT 0x43 /* PIT control port */
+#define PITAUX_PORT 0x61 /* PIT auxiliary port */
+/* bits used in auxiliary control port for timer 2 */
+#define PITAUX_GATE2 0x01 /* aux port, PIT gate 2 input */
+#define PITAUX_OUT2 0x02 /* aux port, PIT clock out 2 enable */
+#define PITAUX_VAL 0x20 /* aux port, output */
+#endif /* defined(AT386) */
+
+/* Following are used for Timer 0 */
+#define PIT_C0 0x00 /* select counter 0 */
+#define PIT_LOADMODE 0x30 /* load least significant byte followed
+ * by most significant byte */
+#define PIT_NDIVMODE 0x04 /*divide by N counter */
+
+/* Used for Timer 1. Used for delay calculations in countdown mode */
+#define PIT_C1 0x40 /* select counter 1 */
+#define PIT_READMODE 0x30 /* read or load least significant byte
+ * followed by most significant byte */
+
+#define PIT_SQUAREMODE 0x06 /* square-wave mode */
+#define PIT_RATEMODE 0x04 /* rate generator mode */
+#define PIT_ONESHOTMODE 0x02 /* one-shot mode */
+
+/* Used for Timer 2. */
+#define PIT_C2 0x80 /* select counter 2 */
+
+#define POST_PORT 0x80 /* used for tiny i/o delay */
+
+/*
+ * Clock speed for the timer in hz divided by the constant HZ
+ * (defined in param.h)
+ */
+#if defined(AT386) || defined(ATX86_64)
+#define CLKNUM 1193182
+#endif /* AT386 */
+
+extern void clkstart(void);
+extern void pit_prepare_sleep(int hz);
+extern void pit_sleep(void);
+extern void pit_udelay(int usec);
+extern void pit_mdelay(int msec);
+
+#endif /* _I386_PIT_H_ */
diff --git a/i386/i386/pmap.h b/i386/i386/pmap.h
new file mode 100644
index 0000000..a989923
--- /dev/null
+++ b/i386/i386/pmap.h
@@ -0,0 +1,27 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <intel/pmap.h>
diff --git a/i386/i386/proc_reg.h b/i386/i386/proc_reg.h
new file mode 100644
index 0000000..704676c
--- /dev/null
+++ b/i386/i386/proc_reg.h
@@ -0,0 +1,407 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Processor registers for i386 and i486.
+ */
+#ifndef _I386_PROC_REG_H_
+#define _I386_PROC_REG_H_
+
+/*
+ * CR0
+ */
+#define CR0_PG 0x80000000 /* enable paging */
+#define CR0_CD 0x40000000 /* i486: cache disable */
+#define CR0_NW 0x20000000 /* i486: no write-through */
+#define CR0_AM 0x00040000 /* i486: alignment check mask */
+#define CR0_WP 0x00010000 /* i486: write-protect kernel access */
+#define CR0_NE 0x00000020 /* i486: handle numeric exceptions */
+#define CR0_ET 0x00000010 /* extension type is 80387 */
+ /* (not official) */
+#define CR0_TS 0x00000008 /* task switch */
+#define CR0_EM 0x00000004 /* emulate coprocessor */
+#define CR0_MP 0x00000002 /* monitor coprocessor */
+#define CR0_PE 0x00000001 /* enable protected mode */
+
+/*
+ * CR3
+ */
+#define CR3_PCD 0x0010 /* Page-level Cache Disable */
+#define CR3_PWT 0x0008 /* Page-level Writes Transparent */
+
+/*
+ * CR4
+ */
+#define CR4_VME 0x0001 /* Virtual-8086 Mode Extensions */
+#define CR4_PVI 0x0002 /* Protected-Mode Virtual Interrupts */
+#define CR4_TSD 0x0004 /* Time Stamp Disable */
+#define CR4_DE 0x0008 /* Debugging Extensions */
+#define CR4_PSE 0x0010 /* Page Size Extensions */
+#define CR4_PAE 0x0020 /* Physical Address Extension */
+#define CR4_MCE 0x0040 /* Machine-Check Enable */
+#define CR4_PGE 0x0080 /* Page Global Enable */
+#define CR4_PCE 0x0100 /* Performance-Monitoring Counter
+ * Enable */
+#define CR4_OSFXSR 0x0200 /* Operating System Support for FXSAVE
+ * and FXRSTOR instructions */
+#define CR4_OSXMMEXCPT 0x0400 /* Operating System Support for Unmasked
+ * SIMD Floating-Point Exceptions */
+#define CR4_OSXSAVE 0x40000 /* Operating System Support for XSAVE
+ * and XRSTOR instructions */
+
+#ifndef __ASSEMBLER__
+#ifdef __GNUC__
+
+#ifndef MACH_HYP
+#include <i386/gdt.h>
+#include <i386/ldt.h>
+#endif /* MACH_HYP */
+
+static inline unsigned long
+get_eflags(void)
+{
+ unsigned long eflags;
+#ifdef __x86_64__
+ asm("pushfq; popq %0" : "=r" (eflags));
+#else
+ asm("pushfl; popl %0" : "=r" (eflags));
+#endif
+ return eflags;
+}
+
+static inline void
+set_eflags(unsigned long eflags)
+{
+#ifdef __x86_64__
+ asm volatile("pushq %0; popfq" : : "r" (eflags));
+#else
+ asm volatile("pushl %0; popfl" : : "r" (eflags));
+#endif
+}
+
+#define get_esp() \
+ ({ \
+ register unsigned long _temp__ asm("esp"); \
+ _temp__; \
+ })
+
+#ifdef __x86_64__
+#define get_eflags() \
+ ({ \
+ register unsigned long _temp__; \
+ asm("pushfq; popq %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#else
+#define get_eflags() \
+ ({ \
+ register unsigned long _temp__; \
+ asm("pushfl; popl %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#define get_cr0() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr0, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr0(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0, %%cr0" : : "r" (_temp__)); \
+ })
+
+#define get_cr2() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr2, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#ifdef MACH_PV_PAGETABLES
+extern unsigned long cr3;
+#define get_cr3() (cr3)
+#define set_cr3(value) \
+ ({ \
+ cr3 = (value); \
+ if (!hyp_set_cr3(value)) \
+ panic("set_cr3"); \
+ })
+#else /* MACH_PV_PAGETABLES */
+#define get_cr3() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr3, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr3(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0, %%cr3" : : "r" (_temp__) : "memory"); \
+ })
+#endif /* MACH_PV_PAGETABLES */
+
+#define flush_tlb() set_cr3(get_cr3())
+
+#ifndef MACH_PV_PAGETABLES
+#define invlpg(addr) \
+ ({ \
+ asm volatile("invlpg (%0)" : : "r" (addr)); \
+ })
+
+#define invlpg_linear(start) \
+ ({ \
+ asm volatile( \
+ "movw %w1,%%es\n" \
+ "\tinvlpg %%es:(%0)\n" \
+ "\tmovw %w2,%%es" \
+ :: "r" (start), "q" (LINEAR_DS), "q" (KERNEL_DS)); \
+ })
+
+#define invlpg_linear_range(start, end) \
+ ({ \
+ register unsigned long var = trunc_page(start); \
+ asm volatile( \
+ "movw %w2,%%es\n" \
+ "1:\tinvlpg %%es:(%0)\n" \
+ "\taddl %c4,%0\n" \
+ "\tcmpl %0,%1\n" \
+ "\tjb 1b\n" \
+ "\tmovw %w3,%%es" \
+ : "+r" (var) : "r" (end), \
+ "q" (LINEAR_DS), "q" (KERNEL_DS), "i" (PAGE_SIZE)); \
+ })
+#endif /* MACH_PV_PAGETABLES */
+
+#define get_cr4() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr4, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr4(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0, %%cr4" : : "r" (_temp__)); \
+ })
+
+
+#ifdef MACH_RING1
+#define set_ts() \
+ hyp_fpu_taskswitch(1)
+#define clear_ts() \
+ hyp_fpu_taskswitch(0)
+#else /* MACH_RING1 */
+#define set_ts() \
+ set_cr0(get_cr0() | CR0_TS)
+
+#define clear_ts() \
+ asm volatile("clts")
+#endif /* MACH_RING1 */
+
+#define get_tr() \
+ ({ \
+ unsigned short _seg__; \
+ asm volatile("str %0" : "=rm" (_seg__) ); \
+ _seg__; \
+ })
+
+#define set_tr(seg) \
+ asm volatile("ltr %0" : : "rm" ((unsigned short)(seg)) )
+
+#define get_ldt() \
+ ({ \
+ unsigned short _seg__; \
+ asm volatile("sldt %0" : "=rm" (_seg__) ); \
+ _seg__; \
+ })
+
+#define set_ldt(seg) \
+ asm volatile("lldt %0" : : "rm" ((unsigned short)(seg)) )
+
+/* This doesn't set a processor register,
+ but it's often used immediately after setting one,
+ to flush the instruction queue. */
+#define flush_instr_queue() \
+ asm("jmp 0f\n" \
+ "0:\n")
+
+#ifdef MACH_RING1
+#define get_dr0() hyp_get_debugreg(0)
+#else
+#define get_dr0() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr0, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr0(value) hyp_set_debugreg(0, value)
+#else
+#define set_dr0(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr0" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr1() hyp_get_debugreg(1)
+#else
+#define get_dr1() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr1, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr1(value) hyp_set_debugreg(1, value)
+#else
+#define set_dr1(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr1" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr2() hyp_get_debugreg(2)
+#else
+#define get_dr2() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr2, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr2(value) hyp_set_debugreg(2, value)
+#else
+#define set_dr2(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr2" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr3() hyp_get_debugreg(3)
+#else
+#define get_dr3() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr3, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr3(value) hyp_set_debugreg(3, value)
+#else
+#define set_dr3(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr3" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr6() hyp_get_debugreg(6)
+#else
+#define get_dr6() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr6, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr6(value) hyp_set_debugreg(6, value)
+#else
+#define set_dr6(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr6" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr7() hyp_get_debugreg(7)
+#else
+#define get_dr7() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr7, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr7(value) hyp_set_debugreg(7, value)
+#else
+#define set_dr7(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr7" : : "r" (_temp__)); \
+ })
+#endif
+
+/* Note: gcc might want to use bx or the stack for %1 addressing, so we can't
+ * use them :/ */
+#ifdef __x86_64__
+#define cpuid(eax, ebx, ecx, edx) \
+{ \
+ uint64_t sav_rbx; \
+ asm( "mov %%rbx,%2\n\t" \
+ "cpuid\n\t" \
+ "xchg %2,%%rbx\n\t" \
+ "movl %k2,%1\n\t" \
+ : "+a" (eax), "=m" (ebx), "=&r" (sav_rbx), "+c" (ecx), "=&d" (edx)); \
+}
+#else
+#define cpuid(eax, ebx, ecx, edx) \
+{ \
+ asm ( "mov %%ebx,%1\n\t" \
+ "cpuid\n\t" \
+ "xchg %%ebx,%1\n\t" \
+ : "+a" (eax), "=&SD" (ebx), "+c" (ecx), "=&d" (edx)); \
+}
+#endif
+
+#endif /* __GNUC__ */
+#endif /* __ASSEMBLER__ */
+
+#endif /* _I386_PROC_REG_H_ */
diff --git a/i386/i386/sched_param.h b/i386/i386/sched_param.h
new file mode 100644
index 0000000..c93ed8a
--- /dev/null
+++ b/i386/i386/sched_param.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Scheduler parameters.
+ */
+
+#ifndef _I386_SCHED_PARAM_H_
+#define _I386_SCHED_PARAM_H_
+
+/*
+ * Sequent requires a right shift of 17 bits to convert
+ * microseconds to priorities.
+ */
+
+#define PRI_SHIFT 17
+
+#endif /* _I386_SCHED_PARAM_H_ */
diff --git a/i386/i386/seg.h b/i386/i386/seg.h
new file mode 100644
index 0000000..673d1d9
--- /dev/null
+++ b/i386/i386/seg.h
@@ -0,0 +1,264 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_SEG_H_
+#define _I386_SEG_H_
+
+#include <mach/inline.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ * i386 segmentation.
+ */
+
+/* Note: the value of KERNEL_RING is handled by hand in locore.S */
+#ifdef MACH_RING1
+#define KERNEL_RING 1
+#else /* MACH_RING1 */
+#define KERNEL_RING 0
+#endif /* MACH_RING1 */
+
+#ifndef __ASSEMBLER__
+
+/*
+ * Real segment descriptor.
+ */
+struct real_descriptor {
+ unsigned int limit_low:16, /* limit 0..15 */
+ base_low:16, /* base 0..15 */
+ base_med:8, /* base 16..23 */
+ access:8, /* access byte */
+ limit_high:4, /* limit 16..19 */
+ granularity:4, /* granularity */
+ base_high:8; /* base 24..31 */
+};
+typedef struct real_descriptor real_descriptor_t;
+typedef real_descriptor_t *real_descriptor_list_t;
+typedef const real_descriptor_list_t const_real_descriptor_list_t;
+
+#ifdef __x86_64__
+struct real_descriptor64 {
+ unsigned int limit_low:16, /* limit 0..15 */
+ base_low:16, /* base 0..15 */
+ base_med:8, /* base 16..23 */
+ access:8, /* access byte */
+ limit_high:4, /* limit 16..19 */
+ granularity:4, /* granularity */
+ base_high:8, /* base 24..31 */
+ base_ext:32, /* base 32..63 */
+ reserved1:8,
+ zero:5,
+ reserved2:19;
+};
+#endif
+
+struct real_gate {
+ unsigned int offset_low:16, /* offset 0..15 */
+ selector:16,
+ word_count:8,
+ access:8,
+ offset_high:16; /* offset 16..31 */
+#ifdef __x86_64__
+ unsigned int offset_ext:32, /* offset 32..63 */
+ reserved:32;
+#endif
+};
+
+#endif /* !__ASSEMBLER__ */
+
+#define SZ_64 0x2 /* 64-bit segment */
+#define SZ_32 0x4 /* 32-bit segment */
+#define SZ_16 0x0 /* 16-bit segment */
+#define SZ_G 0x8 /* 4K limit field */
+
+#define ACC_A 0x01 /* accessed */
+#define ACC_TYPE 0x1e /* type field: */
+
+#define ACC_TYPE_SYSTEM 0x00 /* system descriptors: */
+
+#define ACC_LDT 0x02 /* LDT */
+#define ACC_CALL_GATE_16 0x04 /* 16-bit call gate */
+#define ACC_TASK_GATE 0x05 /* task gate */
+#define ACC_TSS 0x09 /* task segment */
+#define ACC_CALL_GATE 0x0c /* call gate */
+#define ACC_INTR_GATE 0x0e /* interrupt gate */
+#define ACC_TRAP_GATE 0x0f /* trap gate */
+
+#define ACC_TSS_BUSY 0x02 /* task busy */
+
+#define ACC_TYPE_USER 0x10 /* user descriptors */
+
+#define ACC_DATA 0x10 /* data */
+#define ACC_DATA_W 0x12 /* data, writable */
+#define ACC_DATA_E 0x14 /* data, expand-down */
+#define ACC_DATA_EW 0x16 /* data, expand-down,
+ writable */
+#define ACC_CODE 0x18 /* code */
+#define ACC_CODE_R 0x1a /* code, readable */
+#define ACC_CODE_C 0x1c /* code, conforming */
+#define ACC_CODE_CR 0x1e /* code, conforming,
+ readable */
+#define ACC_PL 0x60 /* access rights: */
+#define ACC_PL_K (KERNEL_RING << 5) /* kernel access only */
+#define ACC_PL_U 0x60 /* user access */
+#define ACC_P 0x80 /* segment present */
+
+/*
+ * Components of a selector
+ */
+#define SEL_LDT 0x04 /* local selector */
+#define SEL_PL 0x03 /* privilege level: */
+#define SEL_PL_K KERNEL_RING /* kernel selector */
+#define SEL_PL_U 0x03 /* user selector */
+
+/*
+ * Convert selector to descriptor table index.
+ */
+#define sel_idx(sel) ((sel)>>3)
+
+
+#ifndef __ASSEMBLER__
+
+#include <mach/inline.h>
+#include <mach/xen.h>
+
+
+/* Format of a "pseudo-descriptor", used for loading the IDT and GDT. */
+struct pseudo_descriptor
+{
+ unsigned short limit;
+ unsigned long linear_base;
+ short pad;
+} __attribute__((packed));
+
+
+/* Load the processor's IDT, GDT, or LDT pointers. */
+static inline void lgdt(struct pseudo_descriptor *pdesc)
+{
+ __asm volatile("lgdt %0" : : "m" (*pdesc));
+}
+static inline void lidt(struct pseudo_descriptor *pdesc)
+{
+ __asm volatile("lidt %0" : : "m" (*pdesc));
+}
+static inline void lldt(unsigned short ldt_selector)
+{
+ __asm volatile("lldt %w0" : : "r" (ldt_selector) : "memory");
+}
+
+#ifdef CODE16
+#define i16_lgdt lgdt
+#define i16_lidt lidt
+#define i16_lldt lldt
+#endif
+
+
+/* Fill a segment descriptor. */
+static inline void
+fill_descriptor(struct real_descriptor *_desc, vm_offset_t base, vm_offset_t limit,
+ unsigned char access, unsigned char sizebits)
+{
+ /* TODO: when !MACH_PV_DESCRIPTORS, setting desc and just memcpy isn't simpler actually */
+#ifdef MACH_PV_DESCRIPTORS
+ struct real_descriptor __desc, *desc = &__desc;
+#else /* MACH_PV_DESCRIPTORS */
+ struct real_descriptor *desc = _desc;
+#endif /* MACH_PV_DESCRIPTORS */
+ if (limit > 0xfffff)
+ {
+ limit >>= 12;
+ sizebits |= SZ_G;
+ }
+ desc->limit_low = limit & 0xffff;
+ desc->base_low = base & 0xffff;
+ desc->base_med = (base >> 16) & 0xff;
+ desc->access = access | ACC_P;
+ desc->limit_high = limit >> 16;
+ desc->granularity = sizebits;
+ desc->base_high = base >> 24;
+#ifdef MACH_PV_DESCRIPTORS
+ if (hyp_do_update_descriptor(kv_to_ma(_desc), *(uint64_t*)desc))
+ panic("couldn't update descriptor(%zu to %08lx%08lx)\n", (vm_offset_t) kv_to_ma(_desc), *(((unsigned long*)desc)+1), *(unsigned long *)desc);
+#endif /* MACH_PV_DESCRIPTORS */
+}
+
+#ifdef __x86_64__
+static inline void
+fill_descriptor64(struct real_descriptor64 *_desc, unsigned long base, unsigned limit,
+ unsigned char access, unsigned char sizebits)
+{
+ /* TODO: when !MACH_PV_DESCRIPTORS, setting desc and just memcpy isn't simpler actually */
+#ifdef MACH_PV_DESCRIPTORS
+ struct real_descriptor64 __desc, *desc = &__desc;
+#else /* MACH_PV_DESCRIPTORS */
+ struct real_descriptor64 *desc = _desc;
+#endif /* MACH_PV_DESCRIPTORS */
+ if (limit > 0xfffff)
+ {
+ limit >>= 12;
+ sizebits |= SZ_G;
+ }
+ desc->limit_low = limit & 0xffff;
+ desc->base_low = base & 0xffff;
+ desc->base_med = (base >> 16) & 0xff;
+ desc->access = access | ACC_P;
+ desc->limit_high = limit >> 16;
+ desc->granularity = sizebits;
+ desc->base_high = base >> 24;
+ desc->base_ext = base >> 32;
+ desc->reserved1 = 0;
+ desc->zero = 0;
+ desc->reserved2 = 0;
+#ifdef MACH_PV_DESCRIPTORS
+ if (hyp_do_update_descriptor(kv_to_ma(_desc), *(uint64_t*)desc))
+ panic("couldn't update descriptor(%lu to %08lx%08lx)\n", (vm_offset_t) kv_to_ma(_desc), *(((unsigned long*)desc)+1), *(unsigned long *)desc);
+#endif /* MACH_PV_DESCRIPTORS */
+}
+#endif
+
+/* Fill a gate with particular values. */
+static inline void
+fill_gate(struct real_gate *gate, unsigned long offset, unsigned short selector,
+ unsigned char access, unsigned char word_count)
+{
+ gate->offset_low = offset & 0xffff;
+ gate->selector = selector;
+ gate->word_count = word_count;
+ gate->access = access | ACC_P;
+ gate->offset_high = (offset >> 16) & 0xffff;
+#ifdef __x86_64__
+ gate->offset_ext = offset >> 32;
+ gate->reserved = 0;
+#endif
+}
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* _I386_SEG_H_ */
diff --git a/i386/i386/setjmp.h b/i386/i386/setjmp.h
new file mode 100644
index 0000000..eacc8e4
--- /dev/null
+++ b/i386/i386/setjmp.h
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Setjmp/longjmp buffer for i386.
+ */
+#ifndef _I386_SETJMP_H_
+#define _I386_SETJMP_H_
+
+typedef struct jmp_buf {
+#ifdef __i386__
+ int jmp_buf[6]; /* ebx, esi, edi, ebp, esp, eip */
+#else
+ long jmp_buf[8]; /* rbx, rbp, r12, r13, r14, r15, rsp, rip */
+#endif
+} jmp_buf_t;
+
+extern int _setjmp(jmp_buf_t*);
+
+extern void _longjmp(jmp_buf_t*, int) __attribute__ ((noreturn));
+
+#endif /* _I386_SETJMP_H_ */
diff --git a/i386/i386/smp.c b/i386/i386/smp.c
new file mode 100644
index 0000000..05e9de6
--- /dev/null
+++ b/i386/i386/smp.c
@@ -0,0 +1,199 @@
+/* smp.h - i386 SMP controller for Mach
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Written by Almudena Garcia Jurado-Centurion
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#include <string.h>
+#include <i386/apic.h>
+#include <i386/smp.h>
+#include <i386/cpu.h>
+#include <i386/pio.h>
+#include <i386/vm_param.h>
+#include <i386at/idt.h>
+#include <i386at/cram.h>
+#include <i386at/acpi_parse_apic.h>
+#include <kern/printf.h>
+#include <mach/machine.h>
+
+#include <kern/smp.h>
+
+/*
+ * smp_data_init: initialize smp_data structure
+ * Must be called after smp_init(), once all APIC structures
+ * has been initialized
+ */
+static void smp_data_init(void)
+{
+ uint8_t numcpus = apic_get_numcpus();
+ smp_set_numcpus(numcpus);
+
+ for(int i = 0; i < numcpus; i++){
+ machine_slot[i].is_cpu = TRUE;
+ }
+
+}
+
+static void smp_send_ipi(unsigned apic_id, unsigned vector)
+{
+ unsigned long flags;
+
+ cpu_intr_save(&flags);
+
+ apic_send_ipi(NO_SHORTHAND, FIXED, PHYSICAL, ASSERT, EDGE, vector, apic_id);
+
+ do {
+ cpu_pause();
+ } while(lapic->icr_low.delivery_status == SEND_PENDING);
+
+ apic_send_ipi(NO_SHORTHAND, FIXED, PHYSICAL, DE_ASSERT, EDGE, vector, apic_id);
+
+ do {
+ cpu_pause();
+ } while(lapic->icr_low.delivery_status == SEND_PENDING);
+
+ cpu_intr_restore(flags);
+}
+
+void smp_remote_ast(unsigned apic_id)
+{
+ smp_send_ipi(apic_id, CALL_AST_CHECK);
+}
+
+void smp_pmap_update(unsigned apic_id)
+{
+ smp_send_ipi(apic_id, CALL_PMAP_UPDATE);
+}
+
+static void
+wait_for_ipi(void)
+{
+ /* This could have a timeout, but if the IPI
+ * is never delivered, its a disaster anyway */
+ while (lapic->icr_low.delivery_status == SEND_PENDING) {
+ cpu_pause();
+ }
+}
+
+static int
+smp_send_ipi_init(int apic_id)
+{
+ int err;
+
+ lapic->error_status.r = 0;
+
+ /* Assert INIT IPI:
+ *
+ * This is EDGE triggered to match the deassert
+ */
+ apic_send_ipi(NO_SHORTHAND, INIT, PHYSICAL, ASSERT, EDGE, 0, apic_id);
+
+ /* Wait for delivery */
+ wait_for_ipi();
+ hpet_mdelay(10);
+
+ /* Deassert INIT IPI:
+ *
+ * NB: This must be an EDGE triggered deassert signal.
+ * A LEVEL triggered deassert is only supported on very old hardware
+ * that does not support STARTUP IPIs at all, and instead jump
+ * via a warm reset vector.
+ */
+ apic_send_ipi(NO_SHORTHAND, INIT, PHYSICAL, DE_ASSERT, EDGE, 0, apic_id);
+
+ /* Wait for delivery */
+ wait_for_ipi();
+
+ err = lapic->error_status.r;
+ if (err) {
+ printf("ESR error upon INIT 0x%x\n", err);
+ }
+ return 0;
+}
+
+static int
+smp_send_ipi_startup(int apic_id, int vector)
+{
+ int err;
+
+ lapic->error_status.r = 0;
+
+ /* StartUp IPI:
+ *
+ * Have not seen any documentation for trigger mode for this IPI
+ * but it seems to work with EDGE. (AMD BKDG FAM16h document specifies dont care)
+ */
+ apic_send_ipi(NO_SHORTHAND, STARTUP, PHYSICAL, ASSERT, EDGE, vector, apic_id);
+
+ /* Wait for delivery */
+ wait_for_ipi();
+
+ err = lapic->error_status.r;
+ if (err) {
+ printf("ESR error upon STARTUP 0x%x\n", err);
+ }
+ return 0;
+}
+
+/* See Intel IA32/64 Software Developer's Manual 3A Section 8.4.4.1 */
+int smp_startup_cpu(unsigned apic_id, phys_addr_t start_eip)
+{
+#if 0
+ /* This block goes with a legacy method of INIT that only works with
+ * old hardware that does not support SIPIs.
+ * Must use INIT DEASSERT LEVEL triggered IPI to use this block.
+ * (At least one AMD FCH does not support this IPI mode,
+ * See AMD BKDG FAM16h document # 48751 page 461).
+ */
+
+ /* Tell CMOS to warm reset through through 40:67 */
+ outb(CMOS_ADDR, CMOS_SHUTDOWN);
+ outb(CMOS_DATA, CM_JMP_467);
+
+ /* Set warm reset vector to point to AP startup code */
+ uint16_t dword[2];
+ dword[0] = 0;
+ dword[1] = start_eip >> 4;
+ memcpy((uint8_t *)phystokv(0x467), dword, 4);
+#endif
+
+ /* Local cache flush */
+ asm("wbinvd":::"memory");
+
+ printf("Sending IPIs to APIC ID %u...\n", apic_id);
+
+ smp_send_ipi_init(apic_id);
+ hpet_mdelay(10);
+ smp_send_ipi_startup(apic_id, start_eip >> STARTUP_VECTOR_SHIFT);
+ hpet_udelay(200);
+ smp_send_ipi_startup(apic_id, start_eip >> STARTUP_VECTOR_SHIFT);
+ hpet_udelay(200);
+
+ printf("done\n");
+ return 0;
+}
+
+/*
+ * smp_init: initialize the SMP support, starting the cpus searching
+ * and enumeration.
+ */
+int smp_init(void)
+{
+ smp_data_init();
+
+ return 0;
+}
diff --git a/i386/i386/smp.h b/i386/i386/smp.h
new file mode 100644
index 0000000..73d273e
--- /dev/null
+++ b/i386/i386/smp.h
@@ -0,0 +1,34 @@
+/* smp.h - i386 SMP controller for Mach. Header file
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Written by Almudena Garcia Jurado-Centurion
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#ifndef _SMP_H_
+#define _SMP_H_
+
+#include <mach/machine/vm_types.h>
+
+int smp_init(void);
+void smp_remote_ast(unsigned apic_id);
+void smp_pmap_update(unsigned apic_id);
+int smp_startup_cpu(unsigned apic_id, phys_addr_t start_eip);
+
+#define cpu_pause() asm volatile ("pause" : : : "memory")
+#define STARTUP_VECTOR_SHIFT (20 - 8)
+
+#endif
diff --git a/i386/i386/spl.S b/i386/i386/spl.S
new file mode 100644
index 0000000..2f2c8e3
--- /dev/null
+++ b/i386/i386/spl.S
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 1995 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * spl routines for the i386at.
+ */
+
+#include <mach/machine/asm.h>
+#include <i386/ipl.h>
+#include <i386/i386asm.h>
+#include <i386/xen.h>
+#include <i386/cpu_number.h>
+#include <i386/gdt.h>
+
+#if NCPUS > 1
+#define mb lock; addl $0,(%esp)
+#else
+#define mb
+#endif
+
+/*
+ * Program XEN evt masks from %eax.
+ */
+#define XEN_SETMASK() \
+ pushl %ebx; \
+ movl %eax,%ebx; \
+ xchgl %eax,hyp_shared_info+EVTMASK; \
+ notl %ebx; \
+ andl %eax,%ebx; /* Get unmasked events */ \
+ testl hyp_shared_info+PENDING, %ebx; \
+ popl %ebx; \
+ jz 9f; /* Check whether there was some pending */ \
+lock orl $1,hyp_shared_info+CPU_PENDING_SEL; /* Yes, activate it */ \
+ movb $1,hyp_shared_info+CPU_PENDING; \
+9:
+
+ENTRY(spl0)
+ mb;
+ CPU_NUMBER(%edx)
+ movl CX(EXT(curr_ipl),%edx),%eax /* save current ipl */
+ pushl %eax
+ cli /* disable interrupts */
+#ifdef LINUX_DEV
+ movl EXT(bh_active),%eax
+ /* get pending mask */
+ andl EXT(bh_mask),%eax /* any pending unmasked interrupts? */
+ jz 1f /* no, skip */
+ call EXT(spl1) /* block further interrupts */
+ incl EXT(intr_count) /* set interrupt flag */
+ call EXT(linux_soft_intr) /* go handle interrupt */
+ decl EXT(intr_count) /* decrement interrupt flag */
+ cli /* disable interrupts */
+1:
+#endif
+ cmpl $0,softclkpending /* softclock pending? */
+ je 1f /* no, skip */
+ movl $0,softclkpending /* clear flag */
+ call EXT(spl1) /* block further interrupts */
+#ifdef LINUX_DEV
+ incl EXT(intr_count) /* set interrupt flag */
+#endif
+ call EXT(softclock) /* go handle interrupt */
+#ifdef LINUX_DEV
+ decl EXT(intr_count) /* decrement interrupt flag */
+#endif
+ cli /* disable interrupts */
+1:
+ CPU_NUMBER(%edx)
+ cmpl $(SPL0),CX(EXT(curr_ipl),%edx) /* are we at spl0? */
+ je 1f /* yes, all done */
+ movl $(SPL0),CX(EXT(curr_ipl),%edx) /* set ipl */
+#ifdef MACH_XEN
+ movl EXT(int_mask)+SPL0*4,%eax
+ /* get xen mask */
+ XEN_SETMASK() /* program xen evts */
+#endif
+1:
+ sti /* enable interrupts */
+ popl %eax /* return previous mask */
+ ret
+
+
+/*
+ * Historically, SETIPL(level) was called
+ * for spl levels 1-6, now we have combined
+ * all the intermediate levels into the highest level
+ * such that interrupts are either on or off,
+ * since modern hardware can handle it.
+ * This simplifies the interrupt handling
+ * especially for the linux drivers.
+ */
+Entry(splsoftclock)
+ENTRY(spl1)
+ENTRY(spl2)
+ENTRY(spl3)
+Entry(splnet)
+Entry(splhdw)
+ENTRY(spl4)
+Entry(splbio)
+Entry(spldcm)
+ENTRY(spl5)
+Entry(spltty)
+Entry(splimp)
+Entry(splvm)
+ENTRY(spl6)
+Entry(splclock)
+Entry(splsched)
+Entry(splhigh)
+Entry(splhi)
+ENTRY(spl7)
+ mb;
+ /* just clear IF */
+ cli
+ CPU_NUMBER(%edx)
+ movl $SPL7,%eax
+ xchgl CX(EXT(curr_ipl),%edx),%eax
+ ret
+
+ENTRY(splx)
+ movl S_ARG0,%edx /* get ipl */
+ CPU_NUMBER(%eax)
+#if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
+ /* First make sure that if we're exitting from ipl7, IF is still cleared */
+ cmpl $SPL7,CX(EXT(curr_ipl),%eax) /* from ipl7? */
+ jne 0f
+ pushfl
+ popl %eax
+ testl $0x200,%eax /* IF? */
+ jz 0f
+ int3 /* Oops, interrupts got enabled?! */
+
+0:
+#endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
+ testl %edx,%edx /* spl0? */
+ jz EXT(spl0) /* yes, handle specially */
+ CPU_NUMBER(%eax)
+ cmpl CX(EXT(curr_ipl),%eax),%edx /* same ipl as current? */
+ jne spl /* no */
+ cmpl $SPL7,%edx /* spl7? */
+ je 1f /* to ipl7, don't enable interrupts */
+ sti /* ensure interrupts are enabled */
+1:
+ movl %edx,%eax /* return previous ipl */
+ ret
+
+/*
+ * Like splx() but returns with interrupts disabled and does
+ * not return the previous ipl. This should only be called
+ * when returning from an interrupt.
+ */
+ .align TEXT_ALIGN
+ .globl splx_cli
+splx_cli:
+ movl S_ARG0,%edx /* get ipl */
+ cli /* disable interrupts */
+ testl %edx,%edx /* spl0? */
+ jnz 2f /* no, skip */
+#ifdef LINUX_DEV
+ movl EXT(bh_active),%eax
+ /* get pending mask */
+ andl EXT(bh_mask),%eax /* any pending unmasked interrupts? */
+ jz 1f /* no, skip */
+ call EXT(spl1) /* block further interrupts */
+ incl EXT(intr_count) /* set interrupt flag */
+ call EXT(linux_soft_intr) /* go handle interrupt */
+ decl EXT(intr_count) /* decrement interrupt flag */
+ cli /* disable interrupts */
+1:
+#endif
+ cmpl $0,softclkpending /* softclock pending? */
+ je 1f /* no, skip */
+ movl $0,softclkpending /* clear flag */
+ call EXT(spl1) /* block further interrupts */
+#ifdef LINUX_DEV
+ incl EXT(intr_count) /* set interrupt flag */
+#endif
+ call EXT(softclock) /* go handle interrupt */
+#ifdef LINUX_DEV
+ decl EXT(intr_count) /* decrement interrupt flag */
+#endif
+ cli /* disable interrupts */
+1:
+ xorl %edx,%edx /* edx = ipl 0 */
+2:
+ CPU_NUMBER(%eax)
+ cmpl CX(EXT(curr_ipl),%eax),%edx /* same ipl as current? */
+ je 1f /* yes, all done */
+ movl %edx,CX(EXT(curr_ipl),%eax) /* set ipl */
+#ifdef MACH_XEN
+ movl EXT(int_mask)(,%edx,4),%eax
+ /* get int mask */
+ XEN_SETMASK() /* program xen evts with new mask */
+#endif
+1:
+ ret
+
+/*
+ * NOTE: This routine must *not* use %ecx, otherwise
+ * the interrupt code will break.
+ */
+ .align TEXT_ALIGN
+ .globl spl
+spl:
+ CPU_NUMBER(%eax)
+#if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
+ /* First make sure that if we're exitting from ipl7, IF is still cleared */
+ cmpl $SPL7,CX(EXT(curr_ipl),%eax) /* from ipl7? */
+ jne 0f
+ pushfl
+ popl %eax
+ testl $0x200,%eax /* IF? */
+ jz 0f
+ int3 /* Oops, interrupts got enabled?! */
+
+0:
+#endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
+ cmpl $SPL7,%edx /* spl7? */
+ je EXT(spl7) /* yes, handle specially */
+#ifdef MACH_XEN
+ movl EXT(int_mask)(,%edx,4),%eax
+ /* get int mask */
+#endif
+ cli /* disable interrupts */
+ CPU_NUMBER(%eax)
+ xchgl CX(EXT(curr_ipl),%eax),%edx /* set ipl */
+#ifdef MACH_XEN
+ XEN_SETMASK() /* program PICs with new mask */
+#endif
+ sti /* enable interrupts */
+ movl %edx,%eax /* return previous ipl */
+ ret
+
+ENTRY(sploff)
+ pushfl
+ popl %eax
+ cli
+ ret
+
+ENTRY(splon)
+ pushl 4(%esp)
+ popfl
+ ret
+
+ .data
+ .align DATA_ALIGN
+softclkpending:
+ .long 0
+ .text
+
+ENTRY(setsoftclock)
+ incl softclkpending
+ ret
diff --git a/i386/i386/spl.h b/i386/i386/spl.h
new file mode 100644
index 0000000..41ad225
--- /dev/null
+++ b/i386/i386/spl.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+
+#ifndef _MACHINE_SPL_H_
+#define _MACHINE_SPL_H_
+
+/*
+ * This file defines the interrupt priority levels used by
+ * machine-dependent code.
+ */
+
+typedef int spl_t;
+
+extern spl_t (splhi)(void);
+
+extern spl_t (spl0)(void);
+
+extern spl_t (spl1)(void);
+extern spl_t (splsoftclock)(void);
+
+extern spl_t (spl2)(void);
+
+extern spl_t (spl3)(void);
+
+extern spl_t (spl4)(void);
+extern spl_t (splnet)(void);
+extern spl_t (splhdw)(void);
+
+extern spl_t (spl5)(void);
+extern spl_t (splbio)(void);
+extern spl_t (spldcm)(void);
+
+extern spl_t (spl6)(void);
+extern spl_t (spltty)(void);
+extern spl_t (splimp)(void);
+extern spl_t (splvm)(void);
+
+extern spl_t (spl7)(void);
+extern spl_t (splclock)(void);
+extern spl_t (splsched)(void);
+#define assert_splsched() assert(splsched() == SPL7)
+extern spl_t (splhigh)(void);
+
+extern spl_t (splx)(spl_t n);
+extern spl_t (splx_cli)(spl_t n);
+
+extern void splon (unsigned long n);
+
+extern unsigned long sploff (void);
+
+extern void setsoftclock (void);
+extern int spl_init;
+
+/* XXX Include each other... */
+#include <i386/ipl.h>
+
+#endif /* _MACHINE_SPL_H_ */
diff --git a/i386/i386/strings.c b/i386/i386/strings.c
new file mode 100644
index 0000000..f1752de
--- /dev/null
+++ b/i386/i386/strings.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2014 Richard Braun.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#define ARCH_STRING_MEMCPY
+#define ARCH_STRING_MEMMOVE
+#define ARCH_STRING_MEMSET
+#define ARCH_STRING_MEMCMP
+
+#ifdef ARCH_STRING_MEMCPY
+void *
+memcpy(void *dest, const void *src, size_t n)
+{
+ void *orig_dest;
+
+ orig_dest = dest;
+ asm volatile("rep movsb"
+ : "+D" (dest), "+S" (src), "+c" (n)
+ : : "memory");
+ return orig_dest;
+}
+#endif /* ARCH_STRING_MEMCPY */
+
+#ifdef ARCH_STRING_MEMMOVE
+void *
+memmove(void *dest, const void *src, size_t n)
+{
+ void *orig_dest;
+
+ orig_dest = dest;
+
+ if (dest <= src)
+ asm volatile("rep movsb"
+ : "+D" (dest), "+S" (src), "+c" (n)
+ : : "memory");
+ else {
+ dest += n - 1;
+ src += n - 1;
+ asm volatile("std; rep movsb; cld"
+ : "+D" (dest), "+S" (src), "+c" (n)
+ : : "memory");
+ }
+
+ return orig_dest;
+}
+#endif /* ARCH_STRING_MEMMOVE */
+
+#ifdef ARCH_STRING_MEMSET
+void *
+memset(void *s, int c, size_t n)
+{
+ void *orig_s;
+
+ orig_s = s;
+ asm volatile("rep stosb"
+ : "+D" (s), "+c" (n)
+ : "a" (c)
+ : "memory");
+ return orig_s;
+}
+#endif /* ARCH_STRING_MEMSET */
+
+#ifdef ARCH_STRING_MEMCMP
+int
+memcmp(const void *s1, const void *s2, size_t n)
+{
+ unsigned char c1, c2;
+
+ if (n == 0)
+ return 0;
+
+ asm volatile("repe cmpsb"
+ : "+D" (s1), "+S" (s2), "+c" (n)
+ : : "memory");
+ c1 = *(((const unsigned char *)s1) - 1);
+ c2 = *(((const unsigned char *)s2) - 1);
+ return (int)c1 - (int)c2;
+}
+#endif /* ARCH_STRING_MEMCMP */
diff --git a/i386/i386/task.h b/i386/i386/task.h
new file mode 100644
index 0000000..0060ad4
--- /dev/null
+++ b/i386/i386/task.h
@@ -0,0 +1,61 @@
+/* Data types for machine specific parts of tasks on i386.
+
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+
+ Written by Marcus Brinkmann. Glued into GNU Mach by Thomas Schwinge.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef _I386_TASK_H_
+#define _I386_TASK_H_
+
+#include <kern/kern_types.h>
+#include <kern/slab.h>
+
+/* The machine specific data of a task. */
+struct machine_task
+{
+ /* A lock protecting iopb_size and iopb. */
+ decl_simple_lock_data (, iopb_lock);
+
+ /* The highest I/O port number enabled. */
+ int iopb_size;
+
+ /* The I/O permission bitmap. */
+ unsigned char *iopb;
+};
+typedef struct machine_task machine_task_t;
+
+
+extern struct kmem_cache machine_task_iopb_cache;
+
+/* Initialize the machine task module. The function is called once at
+ start up by task_init in kern/task.c. */
+void machine_task_module_init (void);
+
+/* Initialize the machine specific part of task TASK. */
+void machine_task_init (task_t);
+
+/* Destroy the machine specific part of task TASK and release all
+ associated resources. */
+void machine_task_terminate (task_t);
+
+/* Try to release as much memory from the machine specific data in
+ task TASK. */
+void machine_task_collect (task_t);
+
+#endif /* _I386_TASK_H_ */
diff --git a/i386/i386/thread.h b/i386/i386/thread.h
new file mode 100644
index 0000000..9c88d09
--- /dev/null
+++ b/i386/i386/thread.h
@@ -0,0 +1,276 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: machine/thread.h
+ *
+ * This file contains the structure definitions for the thread
+ * state as applied to I386 processors.
+ */
+
+#ifndef _I386_THREAD_H_
+#define _I386_THREAD_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <mach/machine/fp_reg.h>
+#include <mach/machine/thread_status.h>
+
+#include <kern/lock.h>
+
+#include "gdt.h"
+
+/*
+ * i386_saved_state:
+ *
+ * This structure corresponds to the state of user registers
+ * as saved upon kernel entry. It lives in the pcb.
+ * It is also pushed onto the stack for exceptions in the kernel.
+ */
+
+struct i386_saved_state {
+#if !defined(__x86_64__) || defined(USER32)
+ unsigned long gs;
+ unsigned long fs;
+ unsigned long es;
+ unsigned long ds;
+#endif
+#ifdef __x86_64__
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+#endif
+ unsigned long edi;
+ unsigned long esi;
+ unsigned long ebp;
+ unsigned long cr2; /* kernel esp stored by pusha -
+ we save cr2 here later */
+ unsigned long ebx;
+ unsigned long edx;
+ unsigned long ecx;
+ unsigned long eax;
+ unsigned long trapno;
+ unsigned long err;
+ unsigned long eip;
+ unsigned long cs;
+ unsigned long efl;
+ unsigned long uesp;
+ unsigned long ss;
+#if !defined(__x86_64__) || defined(USER32)
+ struct v86_segs {
+ unsigned long v86_es; /* virtual 8086 segment registers */
+ unsigned long v86_ds;
+ unsigned long v86_fs;
+ unsigned long v86_gs;
+ } v86_segs;
+#endif
+};
+
+/*
+ * i386_exception_link:
+ *
+ * This structure lives at the high end of the kernel stack.
+ * It points to the current thread`s user registers.
+ */
+struct i386_exception_link {
+ struct i386_saved_state *saved_state;
+};
+
+/*
+ * i386_kernel_state:
+ *
+ * This structure corresponds to the state of kernel registers
+ * as saved in a context-switch. It lives at the base of the stack.
+ */
+
+struct i386_kernel_state {
+ long k_ebx; /* kernel context */
+ long k_esp;
+ long k_ebp;
+#ifdef __i386__
+ long k_edi;
+ long k_esi;
+#endif
+ long k_eip;
+#ifdef __x86_64__
+ long k_r12;
+ long k_r13;
+ long k_r14;
+ long k_r15;
+#endif
+};
+
+/*
+ * Save area for user floating-point state.
+ * Allocated only when necessary.
+ */
+
+struct i386_fpsave_state {
+ boolean_t fp_valid;
+
+ union {
+ struct {
+ struct i386_fp_save fp_save_state;
+ struct i386_fp_regs fp_regs;
+ };
+ struct i386_xfp_save xfp_save_state;
+ };
+};
+
+#if !defined(__x86_64__) || defined(USER32)
+/*
+ * v86_assist_state:
+ *
+ * This structure provides data to simulate 8086 mode
+ * interrupts. It lives in the pcb.
+ */
+
+struct v86_assist_state {
+ vm_offset_t int_table;
+ unsigned short int_count;
+ unsigned short flags; /* 8086 flag bits */
+};
+#define V86_IF_PENDING 0x8000 /* unused bit */
+#endif
+
+#if defined(__x86_64__) && !defined(USER32)
+struct i386_segment_base_state {
+ unsigned long fsbase;
+ unsigned long gsbase;
+};
+#endif
+
+/*
+ * i386_interrupt_state:
+ *
+ * This structure describes the set of registers that must
+ * be pushed on the current ring-0 stack by an interrupt before
+ * we can switch to the interrupt stack.
+ */
+
+struct i386_interrupt_state {
+#if !defined(__x86_64__) || defined(USER32)
+ long gs;
+ long fs;
+ long es;
+ long ds;
+#endif
+#ifdef __x86_64__
+ long r11;
+ long r10;
+ long r9;
+ long r8;
+ long rdi;
+ long rsi;
+#endif
+ long edx;
+ long ecx;
+ long eax;
+ long eip;
+ long cs;
+ long efl;
+};
+
+/*
+ * i386_machine_state:
+ *
+ * This structure corresponds to special machine state.
+ * It lives in the pcb. It is not saved by default.
+ */
+
+struct i386_machine_state {
+ struct user_ldt * ldt;
+ struct i386_fpsave_state *ifps;
+#if !defined(__x86_64__) || defined(USER32)
+ struct v86_assist_state v86s;
+#endif
+ struct real_descriptor user_gdt[USER_GDT_SLOTS];
+ struct i386_debug_state ids;
+#if defined(__x86_64__) && !defined(USER32)
+ struct i386_segment_base_state sbs;
+#endif
+};
+
+typedef struct pcb {
+ /* START of the exception stack.
+ * NOTE: this area is used as exception stack when switching
+ * CPL, and it MUST be big enough to save the thread state and
+ * switch to a proper stack area, even considering recursive
+ * exceptions, otherwise it could corrupt nearby memory */
+ struct i386_interrupt_state iis[2]; /* interrupt and NMI */
+#ifdef __x86_64__
+ unsigned long pad; /* ensure exception stack is aligned to 16 */
+#endif
+ struct i386_saved_state iss;
+ /* END of exception stack*/
+ struct i386_machine_state ims;
+ decl_simple_lock_data(, lock)
+ unsigned short init_control; /* Initial FPU control to set */
+#ifdef LINUX_DEV
+ void *data;
+#endif /* LINUX_DEV */
+} *pcb_t;
+
+/*
+ * On the kernel stack is:
+ * stack: ...
+ * struct i386_exception_link
+ * struct i386_kernel_state
+ * stack+KERNEL_STACK_SIZE
+ */
+
+#define STACK_IKS(stack) \
+ ((struct i386_kernel_state *)((stack) + KERNEL_STACK_SIZE) - 1)
+#define STACK_IEL(stack) \
+ ((struct i386_exception_link *)STACK_IKS(stack) - 1)
+
+#ifdef __x86_64__
+#define KERNEL_STACK_ALIGN 16
+#else
+#define KERNEL_STACK_ALIGN 4
+#endif
+
+#if defined(__x86_64__) && !defined(USER32)
+/* Follow System V AMD64 ABI guidelines. */
+#define USER_STACK_ALIGN 16
+#else
+#define USER_STACK_ALIGN 4
+#endif
+
+#define USER_REGS(thread) (&(thread)->pcb->iss)
+
+
+#define syscall_emulation_sync(task) /* do nothing */
+
+
+/* #include_next "thread.h" */
+
+
+#endif /* _I386_THREAD_H_ */
diff --git a/i386/i386/time_stamp.h b/i386/i386/time_stamp.h
new file mode 100644
index 0000000..43bb956
--- /dev/null
+++ b/i386/i386/time_stamp.h
@@ -0,0 +1,30 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * The i386 timestamp implementation uses the default, so we don't
+ * need to do anything here.
+ */
+
diff --git a/i386/i386/trap.c b/i386/i386/trap.c
new file mode 100644
index 0000000..db4c702
--- /dev/null
+++ b/i386/i386/trap.c
@@ -0,0 +1,675 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Hardware trap/fault handler.
+ */
+
+#include <sys/types.h>
+#include <string.h>
+
+#include <mach/machine/eflags.h>
+#include <i386/trap.h>
+#include <i386/fpu.h>
+#include <i386/locore.h>
+#include <i386/model_dep.h>
+#include <intel/read_fault.h>
+#include <machine/machspl.h> /* for spl_t */
+#include <machine/db_interface.h>
+
+#include <mach/exception.h>
+#include <mach/kern_return.h>
+#include "vm_param.h"
+#include <mach/machine/thread_status.h>
+
+#include <vm/vm_fault.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+
+#include <kern/ast.h>
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/exception.h>
+
+#if MACH_KDB
+#include <ddb/db_break.h>
+#include <ddb/db_run.h>
+#include <ddb/db_watch.h>
+#endif
+
+#include "debug.h"
+
+#if MACH_KDB
+boolean_t debug_all_traps_with_kdb = FALSE;
+extern struct db_watchpoint *db_watchpoint_list;
+extern boolean_t db_watchpoints_inserted;
+
+void
+thread_kdb_return(void)
+{
+ thread_t thread = current_thread();
+ struct i386_saved_state *regs = USER_REGS(thread);
+
+ if (kdb_trap(regs->trapno, regs->err, regs)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+}
+#endif /* MACH_KDB */
+
+#if MACH_TTD
+extern boolean_t kttd_enabled;
+boolean_t debug_all_traps_with_kttd = TRUE;
+#endif /* MACH_TTD */
+
+static void
+user_page_fault_continue(kern_return_t kr)
+{
+ thread_t thread = current_thread();
+ struct i386_saved_state *regs = USER_REGS(thread);
+
+ if (kr == KERN_SUCCESS) {
+#if MACH_KDB
+ if (db_watchpoint_list &&
+ db_watchpoints_inserted &&
+ (regs->err & T_PF_WRITE) &&
+ db_find_watchpoint(thread->task->map,
+ (vm_offset_t)regs->cr2,
+ regs))
+ kdb_trap(T_WATCHPOINT, 0, regs);
+#endif /* MACH_KDB */
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+
+#if MACH_KDB
+ if (debug_all_traps_with_kdb &&
+ kdb_trap(regs->trapno, regs->err, regs)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+#endif /* MACH_KDB */
+
+ i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
+ /*NOTREACHED*/
+}
+
+
+static char *trap_type[] = {
+ "Divide error",
+ "Debug trap",
+ "NMI",
+ "Breakpoint",
+ "Overflow",
+ "Bounds check",
+ "Invalid opcode",
+ "No coprocessor",
+ "Double fault",
+ "Coprocessor overrun",
+ "Invalid TSS",
+ "Segment not present",
+ "Stack bounds",
+ "General protection",
+ "Page fault",
+ "(reserved)",
+ "Coprocessor error"
+};
+#define TRAP_TYPES (sizeof(trap_type)/sizeof(trap_type[0]))
+
+char *trap_name(unsigned int trapnum)
+{
+ return trapnum < TRAP_TYPES ? trap_type[trapnum] : "(unknown)";
+}
+
+/*
+ * Trap from kernel mode. Only page-fault errors are recoverable,
+ * and then only in special circumstances. All other errors are
+ * fatal.
+ */
+void kernel_trap(struct i386_saved_state *regs)
+{
+ unsigned long code;
+ unsigned long subcode;
+ unsigned long type;
+ vm_map_t map;
+ kern_return_t result;
+ thread_t thread;
+ extern char _start[], etext[];
+
+ type = regs->trapno;
+ code = regs->err;
+ thread = current_thread();
+
+#if 0
+((short*)0xb8700)[0] = 0x0f00+'K';
+((short*)0xb8700)[1] = 0x0f30+(type / 10);
+((short*)0xb8700)[2] = 0x0f30+(type % 10);
+#endif
+#if 0
+printf("kernel trap %d error %d\n", (int) type, (int) code);
+dump_ss(regs);
+#endif
+
+ switch (type) {
+ case T_NO_FPU:
+ fpnoextflt();
+ return;
+
+ case T_FPU_FAULT:
+ fpextovrflt();
+ return;
+
+ case T_FLOATING_POINT_ERROR:
+ fpexterrflt();
+ return;
+
+ case T_PAGE_FAULT:
+
+ /* Get faulting linear address */
+ subcode = regs->cr2;
+#if 0
+ printf("kernel page fault at linear address %08x\n", subcode);
+#endif
+
+ /* If it's in the kernel linear address region,
+ convert it to a kernel virtual address
+ and use the kernel map to process the fault. */
+ if (lintokv(subcode) == 0 ||
+ subcode >= LINEAR_MIN_KERNEL_ADDRESS) {
+#if 0
+ printf("%08x in kernel linear address range\n", subcode);
+#endif
+ map = kernel_map;
+ subcode = lintokv(subcode);
+#if 0
+ printf("now %08x\n", subcode);
+#endif
+ if (trunc_page(subcode) == 0
+ || (subcode >= (long)_start
+ && subcode < (long)etext)) {
+ printf("Kernel page fault at address 0x%lx, "
+ "eip = 0x%lx\n",
+ subcode, regs->eip);
+ goto badtrap;
+ }
+ } else {
+ if (thread)
+ map = thread->task->map;
+ if (!thread || map == kernel_map) {
+ printf("kernel page fault at %08lx:\n", subcode);
+ dump_ss(regs);
+ panic("kernel thread accessed user space!\n");
+ }
+ }
+
+ /*
+ * Since the 386 ignores write protection in
+ * kernel mode, always try for write permission
+ * first. If that fails and the fault was a
+ * read fault, retry with read permission.
+ */
+ result = vm_fault(map,
+ trunc_page((vm_offset_t)subcode),
+#if !(__i486__ || __i586__ || __i686__)
+ VM_PROT_READ|VM_PROT_WRITE,
+#else
+ (code & T_PF_WRITE)
+ ? VM_PROT_READ|VM_PROT_WRITE
+ : VM_PROT_READ,
+#endif
+ FALSE,
+ FALSE,
+ (void (*)()) 0);
+#if MACH_KDB
+ if (result == KERN_SUCCESS) {
+ /* Look for watchpoints */
+ if (db_watchpoint_list &&
+ db_watchpoints_inserted &&
+ (code & T_PF_WRITE) &&
+ db_find_watchpoint(map,
+ (vm_offset_t)subcode, regs))
+ kdb_trap(T_WATCHPOINT, 0, regs);
+ }
+ else
+#endif /* MACH_KDB */
+#if !(__i486__ || __i586__ || __i686__)
+ if ((code & T_PF_WRITE) == 0 &&
+ result == KERN_PROTECTION_FAILURE)
+ {
+ /*
+ * Must expand vm_fault by hand,
+ * so that we can ask for read-only access
+ * but enter a (kernel)writable mapping.
+ */
+ result = intel_read_fault(map,
+ trunc_page((vm_offset_t)subcode));
+ }
+#else
+ ;
+#endif
+
+ if (result == KERN_SUCCESS) {
+ /*
+ * Certain faults require that we back up
+ * the EIP.
+ */
+ struct recovery *rp;
+
+ /* Linear searching; but the list is small enough. */
+ for (rp = retry_table; rp < retry_table_end; rp++) {
+ if (regs->eip == rp->fault_addr) {
+ regs->eip = rp->recover_addr;
+ break;
+ }
+ }
+ return;
+ }
+
+ /*
+ * If there is a failure recovery address
+ * for this fault, go there.
+ */
+ {
+ struct recovery *rp;
+
+ /* Linear searching; but the list is small enough. */
+ for (rp = recover_table;
+ rp < recover_table_end;
+ rp++) {
+ if (regs->eip == rp->fault_addr) {
+ regs->eip = rp->recover_addr;
+ return;
+ }
+ }
+ }
+
+ /*
+ * Check thread recovery address also -
+ * v86 assist uses it.
+ */
+ if (thread->recover) {
+ regs->eip = thread->recover;
+ thread->recover = 0;
+ return;
+ }
+
+ /*
+ * Unanticipated page-fault errors in kernel
+ * should not happen.
+ */
+ /* fall through */
+
+ default:
+ badtrap:
+ printf("Kernel ");
+ if (type < TRAP_TYPES)
+ printf("%s trap", trap_type[type]);
+ else
+ printf("trap %ld", type);
+ printf(", eip 0x%lx, code %lx, cr2 %lx\n", regs->eip, code, regs->cr2);
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, code, regs))
+ return;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (kdb_trap(type, code, regs))
+ return;
+#endif /* MACH_KDB */
+ splhigh();
+ printf("kernel trap, type %ld, code = %lx\n",
+ type, code);
+ dump_ss(regs);
+ panic("trap");
+ return;
+ }
+}
+
+
+/*
+ * Trap from user mode.
+ * Return TRUE if from emulated system call.
+ */
+int user_trap(struct i386_saved_state *regs)
+{
+ int exc = 0; /* Suppress gcc warning */
+ unsigned long code;
+ unsigned long subcode;
+ unsigned long type;
+ thread_t thread = current_thread();
+
+#ifdef __x86_64__
+ assert(regs == &thread->pcb->iss);
+#endif
+
+ type = regs->trapno;
+ code = 0;
+ subcode = 0;
+
+#if 0
+ ((short*)0xb8700)[3] = 0x0f00+'U';
+ ((short*)0xb8700)[4] = 0x0f30+(type / 10);
+ ((short*)0xb8700)[5] = 0x0f30+(type % 10);
+#endif
+#if 0
+ printf("user trap %d error %d\n", type, code);
+ dump_ss(regs);
+#endif
+
+ switch (type) {
+
+ case T_DIVIDE_ERROR:
+ exc = EXC_ARITHMETIC;
+ code = EXC_I386_DIV;
+ break;
+
+ case T_DEBUG:
+#if MACH_TTD
+ if (kttd_enabled && kttd_in_single_step()) {
+ if (kttd_trap(type, regs->err, regs))
+ return 0;
+ }
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (db_in_single_step()) {
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+ }
+#endif /* MACH_KDB */
+ /* Make the content of the debug status register (DR6)
+ available to user space. */
+ if (thread->pcb)
+ thread->pcb->ims.ids.dr[6] = get_dr6() & 0x600F;
+ set_dr6(0);
+ exc = EXC_BREAKPOINT;
+ code = EXC_I386_SGL;
+ break;
+
+ case T_INT3:
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, regs->err, regs))
+ return 0;
+ break;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ {
+ if (db_find_breakpoint_here(
+ (current_thread())? current_thread()->task: TASK_NULL,
+ regs->eip - 1)) {
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+ }
+ }
+#endif /* MACH_KDB */
+ exc = EXC_BREAKPOINT;
+ code = EXC_I386_BPT;
+ break;
+
+ case T_OVERFLOW:
+ exc = EXC_ARITHMETIC;
+ code = EXC_I386_INTO;
+ break;
+
+ case T_OUT_OF_BOUNDS:
+ exc = EXC_SOFTWARE;
+ code = EXC_I386_BOUND;
+ break;
+
+ case T_INVALID_OPCODE:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_INVOP;
+ break;
+
+ case T_NO_FPU:
+ case 32: /* XXX */
+ fpnoextflt();
+ return 0;
+
+ case T_FPU_FAULT:
+ fpextovrflt();
+ return 0;
+
+ case 10: /* invalid TSS == iret with NT flag set */
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_INVTSSFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_SEGMENT_NOT_PRESENT:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_SEGNPFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_STACK_FAULT:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_STKFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_GENERAL_PROTECTION:
+ /* Check for an emulated int80 system call.
+ NetBSD-current and Linux use trap instead of call gate. */
+ if (thread->task->eml_dispatch) {
+ unsigned char opcode, intno;
+
+ opcode = inst_fetch(regs->eip, regs->cs);
+ intno = inst_fetch(regs->eip+1, regs->cs);
+ if (opcode == 0xcd && intno == 0x80) {
+ regs->eip += 2;
+ return 1;
+ }
+ }
+#ifdef __x86_64__
+ {
+ unsigned char opcode, addr[4], seg[2];
+ int i;
+
+ opcode = inst_fetch(regs->eip, regs->cs);
+ for (i = 0; i < 4; i++)
+ addr[i] = inst_fetch(regs->eip+i+1, regs->cs);
+ (void) addr;
+ for (i = 0; i < 2; i++)
+ seg[i] = inst_fetch(regs->eip+i+5, regs->cs);
+ if (opcode == 0x9a && seg[0] == 0x7 && seg[1] == 0) {
+ regs->eip += 7;
+ return 1;
+ }
+ }
+#endif
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_GPFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_PAGE_FAULT:
+ subcode = regs->cr2;
+#if 0
+ printf("user page fault at linear address %08x\n", subcode);
+ dump_ss (regs);
+
+#endif
+ if (subcode >= LINEAR_MIN_KERNEL_ADDRESS)
+ i386_exception(EXC_BAD_ACCESS, EXC_I386_PGFLT, subcode);
+ (void) vm_fault(thread->task->map,
+ trunc_page((vm_offset_t)subcode),
+ (regs->err & T_PF_WRITE)
+ ? VM_PROT_READ|VM_PROT_WRITE
+ : VM_PROT_READ,
+ FALSE,
+ FALSE,
+ user_page_fault_continue);
+ /*NOTREACHED*/
+ break;
+
+#ifdef MACH_PV_PAGETABLES
+ case 15:
+ {
+ static unsigned count = 0;
+ count++;
+ if (!(count % 10000))
+ printf("%d 4gb segments accesses\n", count);
+ if (count > 1000000) {
+ printf("A million 4gb segment accesses, stopping reporting them.");
+ if (hyp_vm_assist(VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify))
+ panic("couldn't disable 4gb segments vm assist notify");
+ }
+ return 0;
+ }
+#endif /* MACH_PV_PAGETABLES */
+
+ case T_FLOATING_POINT_ERROR:
+ fpexterrflt();
+ return 0;
+
+ default:
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_KDB */
+ splhigh();
+ printf("user trap, type %ld, code = %lx\n",
+ type, regs->err);
+ dump_ss(regs);
+ panic("trap");
+ return 0;
+ }
+
+#if MACH_TTD
+ if ((debug_all_traps_with_kttd || thread->task->essential) &&
+ kttd_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if ((debug_all_traps_with_kdb || thread->task->essential) &&
+ kdb_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_KDB */
+
+ i386_exception(exc, code, subcode);
+ /*NOTREACHED*/
+}
+
+#define V86_IRET_PENDING 0x4000
+
+/*
+ * Handle AST traps for i386.
+ * Check for delayed floating-point exception from
+ * AT-bus machines.
+ */
+void
+i386_astintr(void)
+{
+ (void) splsched(); /* block interrupts to check reasons */
+#ifndef MACH_RING1
+ int mycpu = cpu_number();
+
+ if (need_ast[mycpu] & AST_I386_FP) {
+ /*
+ * AST was for delayed floating-point exception -
+ * FP interrupt occurred while in kernel.
+ * Turn off this AST reason and handle the FPU error.
+ */
+ ast_off(mycpu, AST_I386_FP);
+ (void) spl0();
+
+ fpastintr();
+ }
+ else
+#endif /* MACH_RING1 */
+ {
+ /*
+ * Not an FPU trap. Handle the AST.
+ * Interrupts are still blocked.
+ */
+ ast_taken();
+ }
+}
+
+/*
+ * Handle exceptions for i386.
+ *
+ * If we are an AT bus machine, we must turn off the AST for a
+ * delayed floating-point exception.
+ *
+ * If we are providing floating-point emulation, we may have
+ * to retrieve the real register values from the floating point
+ * emulator.
+ */
+void
+i386_exception(
+ int exc,
+ int code,
+ long subcode)
+{
+ spl_t s;
+
+ /*
+ * Turn off delayed FPU error handling.
+ */
+ s = splsched();
+ ast_off(cpu_number(), AST_I386_FP);
+ splx(s);
+
+ exception(exc, code, subcode);
+ /*NOTREACHED*/
+}
+
+#if MACH_PCSAMPLE > 0
+/*
+ * return saved state for interrupted user thread
+ */
+unsigned
+interrupted_pc(const thread_t t)
+{
+ struct i386_saved_state *iss;
+
+ iss = USER_REGS(t);
+ return iss->eip;
+}
+#endif /* MACH_PCSAMPLE > 0 */
+
+#if MACH_KDB
+
+void
+db_debug_all_traps (boolean_t enable)
+{
+ debug_all_traps_with_kdb = enable;
+}
+
+#endif /* MACH_KDB */
+
+void handle_double_fault(struct i386_saved_state *regs)
+{
+ dump_ss(regs);
+ panic("DOUBLE FAULT! This is critical\n");
+}
diff --git a/i386/i386/trap.h b/i386/i386/trap.h
new file mode 100644
index 0000000..db22273
--- /dev/null
+++ b/i386/i386/trap.h
@@ -0,0 +1,71 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_TRAP_H_
+#define _I386_TRAP_H_
+
+#include <mach/machine/trap.h>
+
+#ifndef __ASSEMBLER__
+#include <i386/thread.h>
+#include <mach/mach_types.h>
+
+char *trap_name(unsigned int trapnum);
+
+unsigned int interrupted_pc(thread_t);
+
+void
+i386_exception(
+ int exc,
+ int code,
+ long subcode) __attribute__ ((noreturn));
+
+extern void
+thread_kdb_return(void);
+
+/*
+ * Trap from kernel mode. Only page-fault errors are recoverable,
+ * and then only in special circumstances. All other errors are
+ * fatal.
+ */
+void kernel_trap(struct i386_saved_state *regs);
+
+/*
+ * Trap from user mode.
+ * Return TRUE if from emulated system call.
+ */
+int user_trap(struct i386_saved_state *regs);
+
+/*
+ * Handle AST traps for i386.
+ * Check for delayed floating-point exception from
+ * AT-bus machines.
+ */
+void i386_astintr(void);
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* _I386_TRAP_H_ */
diff --git a/i386/i386/tss.h b/i386/i386/tss.h
new file mode 100644
index 0000000..fd7e714
--- /dev/null
+++ b/i386/i386/tss.h
@@ -0,0 +1,109 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_TSS_H_
+#define _I386_TSS_H_
+
+#include <sys/types.h>
+#include <mach/inline.h>
+
+#include <machine/io_perm.h>
+
+/*
+ * x86 Task State Segment
+ */
+#ifdef __x86_64__
+struct i386_tss {
+ uint32_t _reserved0;
+ uint64_t rsp0;
+ uint64_t rsp1;
+ uint64_t rsp2;
+ uint64_t _reserved1;
+ uint64_t ist1;
+ uint64_t ist2;
+ uint64_t ist3;
+ uint64_t ist4;
+ uint64_t ist5;
+ uint64_t ist6;
+ uint64_t ist7;
+ uint64_t _reserved2;
+ uint16_t _reserved3;
+ uint16_t io_bit_map_offset;
+} __attribute__((__packed__));
+#else /* ! __x86_64__ */
+struct i386_tss {
+ int back_link; /* segment number of previous task,
+ if nested */
+ int esp0; /* initial stack pointer ... */
+ int ss0; /* and segment for ring 0 */
+ int esp1; /* initial stack pointer ... */
+ int ss1; /* and segment for ring 1 */
+ int esp2; /* initial stack pointer ... */
+ int ss2; /* and segment for ring 2 */
+ int cr3; /* CR3 - page table directory
+ physical address */
+ int eip;
+ int eflags;
+ int eax;
+ int ecx;
+ int edx;
+ int ebx;
+ int esp; /* current stack pointer */
+ int ebp;
+ int esi;
+ int edi;
+ int es;
+ int cs;
+ int ss; /* current stack segment */
+ int ds;
+ int fs;
+ int gs;
+ int ldt; /* local descriptor table segment */
+ unsigned short trace_trap; /* trap on switch to this task */
+ unsigned short io_bit_map_offset;
+ /* offset to start of IO permission
+ bit map */
+};
+#endif /* __x86_64__ */
+
+/* The structure extends the above TSS structure by an I/O permission bitmap
+ and the barrier. */
+struct task_tss
+ {
+ struct i386_tss tss;
+ unsigned char iopb[IOPB_BYTES];
+ unsigned char barrier;
+};
+
+
+/* Load the current task register. */
+static inline void
+ltr(unsigned short segment)
+{
+ __asm volatile("ltr %0" : : "r" (segment) : "memory");
+}
+
+#endif /* _I386_TSS_H_ */
diff --git a/i386/i386/user_ldt.c b/i386/i386/user_ldt.c
new file mode 100644
index 0000000..4c89bd4
--- /dev/null
+++ b/i386/i386/user_ldt.c
@@ -0,0 +1,451 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1993,1992,1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * User LDT management.
+ * Each thread in a task may have its own LDT.
+ */
+
+#include <string.h>
+
+#include <kern/kalloc.h>
+#include <kern/thread.h>
+
+#include <vm/vm_kern.h>
+
+#include <i386/pcb.h>
+#include <i386/seg.h>
+#include <i386/thread.h>
+#include <i386/user_ldt.h>
+#include <i386/i386/mach_i386.server.h>
+#include <stddef.h>
+#include "ldt.h"
+#include "vm_param.h"
+
+/*
+ * Add the descriptors to the LDT, starting with
+ * the descriptor for 'first_selector'.
+ */
+kern_return_t
+i386_set_ldt(
+ thread_t thread,
+ int first_selector,
+ const struct descriptor *descriptor_list,
+ unsigned int count,
+ boolean_t desc_list_inline)
+{
+ struct real_descriptor* desc_list = (struct real_descriptor *)descriptor_list;
+ user_ldt_t new_ldt, old_ldt, temp;
+ struct real_descriptor *dp;
+ unsigned i;
+ unsigned min_selector = 0;
+ pcb_t pcb;
+ vm_size_t ldt_size_needed;
+ unsigned first_desc = sel_idx(first_selector);
+ vm_map_copy_t old_copy_object = NULL; /* Suppress gcc warning */
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+ if (thread == current_thread())
+ min_selector = LDTSZ;
+ if (first_desc < min_selector || first_desc > 8191)
+ return KERN_INVALID_ARGUMENT;
+ if (first_desc + count >= 8192)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * If desc_list is not inline, it is in copyin form.
+ * We must copy it out to the kernel map, and wire
+ * it down (we touch it while the PCB is locked).
+ *
+ * We make a copy of the copyin object, and clear
+ * out the old one, so that returning KERN_INVALID_ARGUMENT
+ * will not try to deallocate the data twice.
+ */
+ if (!desc_list_inline) {
+ kern_return_t kr;
+ vm_offset_t dst_addr;
+
+ old_copy_object = (vm_map_copy_t) desc_list;
+
+ kr = vm_map_copyout(ipc_kernel_map, &dst_addr,
+ vm_map_copy_copy(old_copy_object));
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ (void) vm_map_pageable(ipc_kernel_map,
+ dst_addr,
+ dst_addr + count * sizeof(struct real_descriptor),
+ VM_PROT_READ|VM_PROT_WRITE, TRUE, TRUE);
+ desc_list = (struct real_descriptor *)dst_addr;
+ }
+
+ for (i = 0, dp = desc_list;
+ i < count;
+ i++, dp++)
+ {
+ switch (dp->access & ~ACC_A) {
+ case 0:
+ case ACC_P:
+ /* valid empty descriptor */
+ break;
+ case ACC_P | ACC_CALL_GATE:
+ /* Mach kernel call */
+ *dp = *(struct real_descriptor *)
+ &ldt[sel_idx(USER_SCALL)];
+ break;
+ case ACC_P | ACC_PL_U | ACC_DATA:
+ case ACC_P | ACC_PL_U | ACC_DATA_W:
+ case ACC_P | ACC_PL_U | ACC_DATA_E:
+ case ACC_P | ACC_PL_U | ACC_DATA_EW:
+ case ACC_P | ACC_PL_U | ACC_CODE:
+ case ACC_P | ACC_PL_U | ACC_CODE_R:
+ case ACC_P | ACC_PL_U | ACC_CODE_C:
+ case ACC_P | ACC_PL_U | ACC_CODE_CR:
+ case ACC_P | ACC_PL_U | ACC_CALL_GATE_16:
+ case ACC_P | ACC_PL_U | ACC_CALL_GATE:
+ break;
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+ }
+ ldt_size_needed = sizeof(struct real_descriptor)
+ * (first_desc + count);
+
+ pcb = thread->pcb;
+ new_ldt = 0;
+ Retry:
+ simple_lock(&pcb->lock);
+ old_ldt = pcb->ims.ldt;
+ if (old_ldt == 0 ||
+ old_ldt->desc.limit_low + 1 < ldt_size_needed)
+ {
+ /*
+ * No old LDT, or not big enough
+ */
+ if (new_ldt == 0) {
+ simple_unlock(&pcb->lock);
+
+#ifdef MACH_PV_DESCRIPTORS
+ /* LDT needs to be aligned on a page */
+ vm_offset_t alloc = kalloc(ldt_size_needed + PAGE_SIZE + offsetof(struct user_ldt, ldt));
+ new_ldt = (user_ldt_t) (round_page((alloc + offsetof(struct user_ldt, ldt))) - offsetof(struct user_ldt, ldt));
+ new_ldt->alloc = alloc;
+
+#else /* MACH_PV_DESCRIPTORS */
+ new_ldt = (user_ldt_t)
+ kalloc(ldt_size_needed
+ + sizeof(struct real_descriptor));
+#endif /* MACH_PV_DESCRIPTORS */
+ /*
+ * Build a descriptor that describes the
+ * LDT itself
+ */
+ {
+ vm_offset_t ldt_base;
+
+ ldt_base = kvtolin(&new_ldt->ldt[0]);
+
+ new_ldt->desc.limit_low = ldt_size_needed - 1;
+ new_ldt->desc.limit_high = 0;
+ new_ldt->desc.base_low = ldt_base & 0xffff;
+ new_ldt->desc.base_med = (ldt_base >> 16) & 0xff;
+ new_ldt->desc.base_high = ldt_base >> 24;
+ new_ldt->desc.access = ACC_P | ACC_LDT;
+ new_ldt->desc.granularity = 0;
+ }
+
+ goto Retry;
+ }
+
+ /*
+ * Have new LDT. If there was a an old ldt, copy descriptors
+ * from old to new. Otherwise copy the default ldt.
+ */
+ if (old_ldt) {
+ memcpy(&new_ldt->ldt[0],
+ &old_ldt->ldt[0],
+ old_ldt->desc.limit_low + 1);
+ }
+ else {
+ struct real_descriptor template = {0, 0, 0, ACC_P, 0, 0 ,0};
+
+ for (dp = &new_ldt->ldt[0], i = 0; i < first_desc; i++, dp++) {
+ if (i < LDTSZ)
+ *dp = *(struct real_descriptor *) &ldt[i];
+ else
+ *dp = template;
+ }
+ }
+
+ temp = old_ldt;
+ old_ldt = new_ldt; /* use new LDT from now on */
+ new_ldt = temp; /* discard old LDT */
+
+ pcb->ims.ldt = old_ldt; /* set LDT for thread */
+
+ /*
+ * If we are modifying the LDT for the current thread,
+ * make sure it is properly set.
+ */
+ if (thread == current_thread())
+ switch_ktss(pcb);
+ }
+
+ /*
+ * Install new descriptors.
+ */
+ memcpy(&old_ldt->ldt[first_desc],
+ desc_list,
+ count * sizeof(struct real_descriptor));
+
+ simple_unlock(&pcb->lock);
+
+ if (new_ldt)
+#ifdef MACH_PV_DESCRIPTORS
+ {
+#ifdef MACH_PV_PAGETABLES
+ for (i=0; i<(new_ldt->desc.limit_low + 1)/sizeof(struct real_descriptor); i+=PAGE_SIZE/sizeof(struct real_descriptor))
+ pmap_set_page_readwrite(&new_ldt->ldt[i]);
+#endif /* MACH_PV_PAGETABLES*/
+ kfree(new_ldt->alloc, new_ldt->desc.limit_low + 1
+ + PAGE_SIZE + offsetof(struct user_ldt, ldt));
+ }
+#else /* MACH_PV_DESCRIPTORS */
+ kfree((vm_offset_t)new_ldt,
+ new_ldt->desc.limit_low + 1
+ + sizeof(struct real_descriptor));
+#endif /* MACH_PV_DESCRIPTORS */
+
+ /*
+ * Free the descriptor list, if it was
+ * out-of-line. Also discard the original
+ * copy object for it.
+ */
+ if (!desc_list_inline) {
+ (void) kmem_free(ipc_kernel_map,
+ (vm_offset_t) desc_list,
+ count * sizeof(struct real_descriptor));
+ vm_map_copy_discard(old_copy_object);
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+i386_get_ldt(const thread_t thread,
+ int first_selector,
+ int selector_count, /* number wanted */
+ struct descriptor **descriptor_list, /* in/out */
+ unsigned int *count /* in/out */
+ )
+{
+ struct real_descriptor** desc_list = (struct real_descriptor **)descriptor_list;
+ struct user_ldt *user_ldt;
+ pcb_t pcb;
+ int first_desc = sel_idx(first_selector);
+ unsigned ldt_count;
+ vm_size_t ldt_size;
+ vm_size_t size, size_needed;
+ vm_offset_t addr;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+ if (first_desc < 0 || first_desc > 8191)
+ return KERN_INVALID_ARGUMENT;
+ if (first_desc + selector_count >= 8192)
+ return KERN_INVALID_ARGUMENT;
+
+ pcb = thread->pcb;
+ addr = 0;
+ size = 0;
+
+ for (;;) {
+ simple_lock(&pcb->lock);
+ user_ldt = pcb->ims.ldt;
+ if (user_ldt == 0) {
+ simple_unlock(&pcb->lock);
+ if (addr)
+ kmem_free(ipc_kernel_map, addr, size);
+ *count = 0;
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Find how many descriptors we should return.
+ */
+ ldt_count = (user_ldt->desc.limit_low + 1) /
+ sizeof (struct real_descriptor);
+ ldt_count -= first_desc;
+ if (ldt_count > selector_count)
+ ldt_count = selector_count;
+
+ ldt_size = ldt_count * sizeof(struct real_descriptor);
+
+ /*
+ * Do we have the memory we need?
+ */
+ if (ldt_count <= *count)
+ break; /* fits in-line */
+
+ size_needed = round_page(ldt_size);
+ if (size_needed <= size)
+ break;
+
+ /*
+ * Unlock the pcb and allocate more memory
+ */
+ simple_unlock(&pcb->lock);
+
+ if (size != 0)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = size_needed;
+
+ if (kmem_alloc(ipc_kernel_map, &addr, size)
+ != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /*
+ * copy out the descriptors
+ */
+ memcpy(*desc_list,
+ &user_ldt->ldt[first_desc],
+ ldt_size);
+ *count = ldt_count;
+ simple_unlock(&pcb->lock);
+
+ if (addr) {
+ vm_size_t size_used, size_left;
+ vm_map_copy_t memory;
+
+ /*
+ * Free any unused memory beyond the end of the last page used
+ */
+ size_used = round_page(ldt_size);
+ if (size_used != size)
+ kmem_free(ipc_kernel_map,
+ addr + size_used, size - size_used);
+
+ /*
+ * Zero the remainder of the page being returned.
+ */
+ size_left = size_used - ldt_size;
+ if (size_left > 0)
+ memset((char *)addr + ldt_size, 0, size_left);
+
+ /*
+ * Make memory into copyin form - this unwires it.
+ */
+ (void) vm_map_copyin(ipc_kernel_map, addr, size_used,
+ TRUE, &memory);
+ *desc_list = (struct real_descriptor *)memory;
+ }
+
+ return KERN_SUCCESS;
+}
+
+void
+user_ldt_free(user_ldt_t user_ldt)
+{
+#ifdef MACH_PV_DESCRIPTORS
+ unsigned i;
+#ifdef MACH_PV_PAGETABLES
+ for (i=0; i<(user_ldt->desc.limit_low + 1)/sizeof(struct real_descriptor); i+=PAGE_SIZE/sizeof(struct real_descriptor))
+ pmap_set_page_readwrite(&user_ldt->ldt[i]);
+#endif /* MACH_PV_PAGETABLES */
+ kfree(user_ldt->alloc, user_ldt->desc.limit_low + 1
+ + PAGE_SIZE + offsetof(struct user_ldt, ldt));
+#else /* MACH_PV_DESCRIPTORS */
+ kfree((vm_offset_t)user_ldt,
+ user_ldt->desc.limit_low + 1
+ + sizeof(struct real_descriptor));
+#endif /* MACH_PV_DESCRIPTORS */
+}
+
+
+kern_return_t
+i386_set_gdt (thread_t thread, int *selector, struct descriptor descriptor)
+{
+ const struct real_descriptor *desc = (struct real_descriptor *)&descriptor;
+ int idx;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (*selector == -1)
+ {
+ for (idx = 0; idx < USER_GDT_SLOTS; ++idx)
+ if ((thread->pcb->ims.user_gdt[idx].access & ACC_P) == 0)
+ {
+ *selector = ((idx + sel_idx(USER_GDT)) << 3) | SEL_PL_U;
+ break;
+ }
+ if (idx == USER_GDT_SLOTS)
+ return KERN_NO_SPACE; /* ? */
+ }
+ else if ((*selector & (SEL_LDT|SEL_PL)) != SEL_PL_U
+ || sel_idx (*selector) < sel_idx(USER_GDT)
+ || sel_idx (*selector) >= sel_idx(USER_GDT) + USER_GDT_SLOTS)
+ return KERN_INVALID_ARGUMENT;
+ else
+ idx = sel_idx (*selector) - sel_idx(USER_GDT);
+
+ if ((desc->access & ACC_P) == 0)
+ memset (&thread->pcb->ims.user_gdt[idx], 0,
+ sizeof thread->pcb->ims.user_gdt[idx]);
+ else if ((desc->access & (ACC_TYPE_USER|ACC_PL)) != (ACC_TYPE_USER|ACC_PL_U) || (desc->granularity & SZ_64))
+
+ return KERN_INVALID_ARGUMENT;
+ else
+ memcpy (&thread->pcb->ims.user_gdt[idx], desc, sizeof (struct descriptor));
+
+ /*
+ * If we are modifying the GDT for the current thread,
+ * make sure it is properly set.
+ */
+ if (thread == current_thread())
+ switch_ktss(thread->pcb);
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+i386_get_gdt (const thread_t thread, int selector, struct descriptor *descriptor)
+{
+ struct real_descriptor *desc = (struct real_descriptor *)descriptor;
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if ((selector & (SEL_LDT|SEL_PL)) != SEL_PL_U
+ || sel_idx (selector) < sel_idx(USER_GDT)
+ || sel_idx (selector) >= sel_idx(USER_GDT) + USER_GDT_SLOTS)
+ return KERN_INVALID_ARGUMENT;
+
+ *desc = thread->pcb->ims.user_gdt[sel_idx (selector) - sel_idx(USER_GDT)];
+
+ return KERN_SUCCESS;
+}
diff --git a/i386/i386/user_ldt.h b/i386/i386/user_ldt.h
new file mode 100644
index 0000000..26caa27
--- /dev/null
+++ b/i386/i386/user_ldt.h
@@ -0,0 +1,50 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_USER_LDT_H_
+#define _I386_USER_LDT_H_
+
+/*
+ * User LDT management.
+ *
+ * Each thread in a task may have its own LDT.
+ */
+
+#include <i386/seg.h>
+
+struct user_ldt {
+#ifdef MACH_PV_DESCRIPTORS
+ vm_offset_t alloc; /* allocation before alignment */
+#endif /* MACH_PV_DESCRIPTORS */
+ struct real_descriptor desc; /* descriptor for self */
+ struct real_descriptor ldt[1]; /* descriptor table (variable) */
+};
+typedef struct user_ldt * user_ldt_t;
+
+extern void
+user_ldt_free(user_ldt_t user_ldt);
+
+#endif /* _I386_USER_LDT_H_ */
diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h
new file mode 100644
index 0000000..056aa52
--- /dev/null
+++ b/i386/i386/vm_param.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_KERNEL_I386_VM_PARAM_
+#define _I386_KERNEL_I386_VM_PARAM_
+
+#include <kern/macros.h>
+
+/* XXX use xu/vm_param.h */
+#include <mach/vm_param.h>
+#ifdef MACH_PV_PAGETABLES
+#include <xen/public/xen.h>
+#endif
+
+/* To avoid ambiguity in kernel code, make the name explicit */
+#define VM_MIN_USER_ADDRESS VM_MIN_ADDRESS
+#define VM_MAX_USER_ADDRESS VM_MAX_ADDRESS
+
+/* The kernel address space is usually 1GB, usually starting at virtual address 0. */
+/* This can be changed freely to separate kernel addresses from user addresses
+ * for better trace support in kdb; the _START symbol has to be offset by the
+ * same amount. */
+#ifdef __x86_64__
+#define VM_MIN_KERNEL_ADDRESS KERNEL_MAP_BASE
+#else
+#define VM_MIN_KERNEL_ADDRESS 0xC0000000UL
+#endif
+
+#if defined(MACH_XEN) || defined (__x86_64__)
+/* PV kernels can be loaded directly to the target virtual address */
+#define INIT_VM_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS
+#else /* MACH_XEN */
+/* This must remain 0 */
+#define INIT_VM_MIN_KERNEL_ADDRESS 0x00000000UL
+#endif /* MACH_XEN */
+
+#ifdef MACH_PV_PAGETABLES
+#ifdef __i386__
+#if PAE
+#define HYP_VIRT_START HYPERVISOR_VIRT_START_PAE
+#else /* PAE */
+#define HYP_VIRT_START HYPERVISOR_VIRT_START_NONPAE
+#endif /* PAE */
+#define VM_MAX_KERNEL_ADDRESS (HYP_VIRT_START - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+#else
+#define HYP_VIRT_START HYPERVISOR_VIRT_START
+#define VM_MAX_KERNEL_ADDRESS (LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+#endif
+#else /* MACH_PV_PAGETABLES */
+#define VM_MAX_KERNEL_ADDRESS (LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+#endif /* MACH_PV_PAGETABLES */
+
+/*
+ * Reserve mapping room for the kernel map, which includes
+ * the device I/O map and the IPC map.
+ */
+#ifdef __x86_64__
+/*
+ * Vm structures are quite bigger on 64 bit.
+ * This should be well enough for 8G of physical memory; on the other hand,
+ * maybe not all of them need to be in directly-mapped memory, see the parts
+ * allocated with pmap_steal_memory().
+ */
+#define VM_KERNEL_MAP_SIZE (512 * 1024 * 1024)
+#else
+#define VM_KERNEL_MAP_SIZE (152 * 1024 * 1024)
+#endif
+
+/* This is the kernel address range in linear addresses. */
+#ifdef __x86_64__
+#define LINEAR_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS
+#define LINEAR_MAX_KERNEL_ADDRESS (0xffffffffffffffffUL)
+#else
+/* On x86, the kernel virtual address space is actually located
+ at high linear addresses. */
+#define LINEAR_MIN_KERNEL_ADDRESS (VM_MAX_USER_ADDRESS)
+#define LINEAR_MAX_KERNEL_ADDRESS (0xffffffffUL)
+#endif
+
+#ifdef MACH_PV_PAGETABLES
+/* need room for mmu updates (2*8bytes) */
+#define KERNEL_STACK_SIZE (4*I386_PGBYTES)
+#define INTSTACK_SIZE (4*I386_PGBYTES)
+#else /* MACH_PV_PAGETABLES */
+#define KERNEL_STACK_SIZE (1*I386_PGBYTES)
+#define INTSTACK_SIZE (1*I386_PGBYTES)
+#endif /* MACH_PV_PAGETABLES */
+ /* interrupt stack size */
+
+/*
+ * Conversion between 80386 pages and VM pages
+ */
+
+#define trunc_i386_to_vm(p) (atop(trunc_page(i386_ptob(p))))
+#define round_i386_to_vm(p) (atop(round_page(i386_ptob(p))))
+#define vm_to_i386(p) (i386_btop(ptoa(p)))
+
+/*
+ * Physical memory is direct-mapped to virtual memory
+ * starting at virtual address VM_MIN_KERNEL_ADDRESS.
+ */
+#define phystokv(a) ((vm_offset_t)(a) + VM_MIN_KERNEL_ADDRESS)
+/*
+ * This can not be used with virtual mappings, but can be used during bootstrap
+ */
+#define _kvtophys(a) ((vm_offset_t)(a) - VM_MIN_KERNEL_ADDRESS)
+
+/*
+ * Kernel virtual memory is actually at 0xc0000000 in linear addresses.
+ */
+#define kvtolin(a) ((vm_offset_t)(a) - VM_MIN_KERNEL_ADDRESS + LINEAR_MIN_KERNEL_ADDRESS)
+#define lintokv(a) ((vm_offset_t)(a) - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+
+/*
+ * Physical memory properties.
+ */
+#define VM_PAGE_DMA_LIMIT DECL_CONST(0x1000000, UL)
+
+#ifdef MACH_XEN
+/* TODO Completely check Xen physical/virtual layout */
+#ifdef __LP64__
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT DECL_CONST(0x400000000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#else
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#endif
+#else /* MACH_XEN */
+#ifdef __LP64__
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE + 1)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL)
+#else /* __LP64__ */
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE + 1)
+#ifdef PAE
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#else /* PAE */
+#define VM_PAGE_MAX_SEGS 3
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0xfffff000, UL)
+#endif /* PAE */
+#endif /* __LP64__ */
+#endif /* MACH_XEN */
+
+/*
+ * Physical segment indexes.
+ */
+#define VM_PAGE_SEG_DMA 0
+
+#if defined(VM_PAGE_DMA32_LIMIT) && (VM_PAGE_DMA32_LIMIT != VM_PAGE_DIRECTMAP_LIMIT)
+
+#if VM_PAGE_DMA32_LIMIT < VM_PAGE_DIRECTMAP_LIMIT
+#define VM_PAGE_SEG_DMA32 (VM_PAGE_SEG_DMA+1)
+#define VM_PAGE_SEG_DIRECTMAP (VM_PAGE_SEG_DMA32+1)
+#define VM_PAGE_SEG_HIGHMEM (VM_PAGE_SEG_DIRECTMAP+1)
+#else /* VM_PAGE_DMA32_LIMIT > VM_PAGE_DIRECTMAP_LIMIT */
+#define VM_PAGE_SEG_DIRECTMAP (VM_PAGE_SEG_DMA+1)
+#define VM_PAGE_SEG_DMA32 (VM_PAGE_SEG_DIRECTMAP+1)
+#define VM_PAGE_SEG_HIGHMEM (VM_PAGE_SEG_DMA32+1)
+#endif
+
+#else
+
+#define VM_PAGE_SEG_DIRECTMAP (VM_PAGE_SEG_DMA+1)
+#define VM_PAGE_SEG_DMA32 VM_PAGE_SEG_DIRECTMAP /* Alias for the DIRECTMAP segment */
+#define VM_PAGE_SEG_HIGHMEM (VM_PAGE_SEG_DIRECTMAP+1)
+#endif
+
+#endif /* _I386_KERNEL_I386_VM_PARAM_ */
diff --git a/i386/i386/xen.h b/i386/i386/xen.h
new file mode 100644
index 0000000..2cd81be
--- /dev/null
+++ b/i386/i386/xen.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2006-2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_HYPCALL_H
+#define XEN_HYPCALL_H
+
+#ifdef MACH_XEN
+#ifndef __ASSEMBLER__
+#include <kern/macros.h>
+#include <kern/printf.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_param.h>
+#include <mach/inline.h>
+#include <mach/xen.h>
+#include <machine/vm_param.h>
+#include <intel/pmap.h>
+#include <kern/debug.h>
+#include <xen/public/xen.h>
+
+/* TODO: this should be moved in appropriate non-Xen place. */
+#define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)":::"memory")
+#define rmb() mb()
+#define wmb() mb()
+static inline unsigned long xchgl(volatile unsigned long *ptr, unsigned long x)
+{
+ __asm__ __volatile__("xchg %0, %1"
+ : "=r" (x)
+ : "m" (*(ptr)), "0" (x): "memory");
+ return x;
+}
+#define _TOSTR(x) #x
+#define TOSTR(x) _TOSTR (x)
+
+#ifdef __i386__
+#define _hypcall_ret "=a"
+#define _hypcall_arg1 "ebx"
+#define _hypcall_arg2 "ecx"
+#define _hypcall_arg3 "edx"
+#define _hypcall_arg4 "esi"
+#define _hypcall_arg5 "edi"
+#endif
+#ifdef __x86_64__
+#define _hypcall_ret "=a"
+#define _hypcall_arg1 "rdi"
+#define _hypcall_arg2 "rsi"
+#define _hypcall_arg3 "rdx"
+#define _hypcall_arg4 "r10"
+#define _hypcall_arg5 "r8"
+#endif
+
+
+/* x86-specific hypercall interface. */
+#define _hypcall0(type, name) \
+static inline type hyp_##name(void) \
+{ \
+ unsigned long __ret; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret) \
+ : : "memory"); \
+ return __ret; \
+}
+
+#define _hypcall1(type, name, type1, arg1) \
+static inline type hyp_##name(type1 arg1) \
+{ \
+ unsigned long __ret; \
+ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret), \
+ "+r" (__arg1) \
+ : : "memory"); \
+ return __ret; \
+}
+
+#define _hypcall2(type, name, type1, arg1, type2, arg2) \
+static inline type hyp_##name(type1 arg1, type2 arg2) \
+{ \
+ unsigned long __ret; \
+ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \
+ register unsigned long __arg2 asm(_hypcall_arg2) = (unsigned long) arg2; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret), \
+ "+r" (__arg1), \
+ "+r" (__arg2) \
+ : : "memory"); \
+ return __ret; \
+}
+
+#define _hypcall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
+static inline type hyp_##name(type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ unsigned long __ret; \
+ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \
+ register unsigned long __arg2 asm(_hypcall_arg2) = (unsigned long) arg2; \
+ register unsigned long __arg3 asm(_hypcall_arg3) = (unsigned long) arg3; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret), \
+ "+r" (__arg1), \
+ "+r" (__arg2), \
+ "+r" (__arg3) \
+ : : "memory"); \
+ return __ret; \
+}
+
+#define _hypcall4(type, name, type1, arg1, type2, arg2, type3, arg3, type4, arg4) \
+static inline type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ unsigned long __ret; \
+ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \
+ register unsigned long __arg2 asm(_hypcall_arg2) = (unsigned long) arg2; \
+ register unsigned long __arg3 asm(_hypcall_arg3) = (unsigned long) arg3; \
+ register unsigned long __arg4 asm(_hypcall_arg4) = (unsigned long) arg4; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret), \
+ "+r" (__arg1), \
+ "+r" (__arg2), \
+ "+r" (__arg3), \
+ "+r" (__arg4) \
+ : : "memory"); \
+ return __ret; \
+}
+
+#define _hypcall5(type, name, type1, arg1, type2, arg2, type3, arg3, type4, arg4, type5, arg5) \
+static inline type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ unsigned long __ret; \
+ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \
+ register unsigned long __arg2 asm(_hypcall_arg2) = (unsigned long) arg2; \
+ register unsigned long __arg3 asm(_hypcall_arg3) = (unsigned long) arg3; \
+ register unsigned long __arg4 asm(_hypcall_arg4) = (unsigned long) arg4; \
+ register unsigned long __arg5 asm(_hypcall_arg5) = (unsigned long) arg5; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret), \
+ "+r" (__arg1), \
+ "+r" (__arg2), \
+ "+r" (__arg3), \
+ "+r" (__arg4), \
+ "+r" (__arg5) \
+ : : "memory"); \
+ return __ret; \
+}
+
+/* x86 Hypercalls */
+
+/* Note: since Hypervisor uses flat memory model, remember to always use
+ * kvtolin when giving pointers as parameters for the hypercall to read data
+ * at. Use kv_to_la when they may be used before GDT got set up. */
+
+_hypcall1(long, set_trap_table, vm_offset_t /* struct trap_info * */, traps);
+
+#ifdef MACH_PV_PAGETABLES
+_hypcall4(int, mmu_update, vm_offset_t /* struct mmu_update * */, req, int, count, vm_offset_t /* int * */, success_count, domid_t, domid)
+static inline int hyp_mmu_update_pte(pt_entry_t pte, pt_entry_t val)
+{
+ struct mmu_update update =
+ {
+ .ptr = pte,
+ .val = val,
+ };
+ int count;
+ hyp_mmu_update(kv_to_la(&update), 1, kv_to_la(&count), DOMID_SELF);
+ return count;
+}
+/* Note: make sure this fits in KERNEL_STACK_SIZE */
+#define HYP_BATCH_MMU_UPDATES 256
+
+#define hyp_mmu_update_la(la, val) hyp_mmu_update_pte( \
+ (kernel_page_dir[lin2pdenum_cont((vm_offset_t)(la))] & INTEL_PTE_PFN) \
+ + ptenum((vm_offset_t)(la)) * sizeof(pt_entry_t), val)
+#endif
+
+_hypcall2(long, set_gdt, vm_offset_t /* unsigned long * */, frame_list, unsigned int, entries)
+
+_hypcall2(long, stack_switch, unsigned long, ss, unsigned long, esp);
+
+#ifdef __i386__
+_hypcall4(long, set_callbacks, unsigned long, es, void *, ea,
+ unsigned long, fss, void *, fsa);
+#endif
+#ifdef __x86_64__
+_hypcall3(long, set_callbacks, void *, ea, void *, fsa, void *, sc);
+#endif
+_hypcall1(long, fpu_taskswitch, int, set);
+
+#ifdef PAE
+#define hyp_high(pte) ((pte) >> 32)
+#else
+#define hyp_high(pte) 0
+#endif
+#ifdef __i386__
+_hypcall4(long, update_descriptor, unsigned long, ma_lo, unsigned long, ma_hi, unsigned long, desc_lo, unsigned long, desc_hi);
+#define hyp_do_update_descriptor(ma, desc) ({ \
+ pt_entry_t __ma = (ma); \
+ uint64_t __desc = (desc); \
+ hyp_update_descriptor(__ma & 0xffffffffU, hyp_high(__ma), __desc & 0xffffffffU, __desc >> 32); \
+})
+#endif
+#ifdef __x86_64__
+_hypcall2(long, update_descriptor, unsigned long, ma, unsigned long, desc);
+#define hyp_do_update_descriptor(ma, desc) hyp_update_descriptor(ma, desc)
+#endif
+
+#ifdef __x86_64__
+_hypcall2(long, set_segment_base, int, reg, unsigned long, value);
+#endif
+
+#include <xen/public/memory.h>
+_hypcall2(long, memory_op, unsigned long, cmd, vm_offset_t /* void * */, arg);
+static inline void hyp_free_mfn(unsigned long mfn)
+{
+ struct xen_memory_reservation reservation;
+ reservation.extent_start = (void*) kvtolin(&mfn);
+ reservation.nr_extents = 1;
+ reservation.extent_order = 0;
+ reservation.address_bits = 0;
+ reservation.domid = DOMID_SELF;
+ if (hyp_memory_op(XENMEM_decrease_reservation, kvtolin(&reservation)) != 1)
+ panic("couldn't free page %lu\n", mfn);
+}
+
+#ifdef __i386__
+_hypcall4(int, update_va_mapping, unsigned long, va, unsigned long, val_lo, unsigned long, val_hi, unsigned long, flags);
+#define hyp_do_update_va_mapping(va, val, flags) ({ \
+ pt_entry_t __val = (val); \
+ hyp_update_va_mapping(va, __val & 0xffffffffU, hyp_high(__val), flags); \
+})
+#endif
+#ifdef __x86_64__
+_hypcall3(int, update_va_mapping, unsigned long, va, unsigned long, val, unsigned long, flags);
+#define hyp_do_update_va_mapping(va, val, flags) hyp_update_va_mapping(va, val, flags)
+#endif
+
+static inline void hyp_free_page(unsigned long pfn, void *va)
+{
+ /* save mfn */
+ unsigned long mfn = pfn_to_mfn(pfn);
+
+#ifdef MACH_PV_PAGETABLES
+ /* remove from mappings */
+ if (hyp_do_update_va_mapping(kvtolin(va), 0, UVMF_INVLPG|UVMF_ALL))
+ panic("couldn't clear page %lu at %p\n", pfn, va);
+
+#ifdef MACH_PSEUDO_PHYS
+ /* drop machine page */
+ mfn_list[pfn] = ~0;
+#endif /* MACH_PSEUDO_PHYS */
+#endif
+
+ /* and free from Xen */
+ hyp_free_mfn(mfn);
+}
+
+#ifdef MACH_PV_PAGETABLES
+_hypcall4(int, mmuext_op, vm_offset_t /* struct mmuext_op * */, op, int, count, vm_offset_t /* int * */, success_count, domid_t, domid);
+static inline int hyp_mmuext_op_void(unsigned int cmd)
+{
+ struct mmuext_op op = {
+ .cmd = cmd,
+ };
+ int count;
+ hyp_mmuext_op(kv_to_la(&op), 1, kv_to_la(&count), DOMID_SELF);
+ return count;
+}
+static inline int hyp_mmuext_op_mfn(unsigned int cmd, unsigned long mfn)
+{
+ struct mmuext_op op = {
+ .cmd = cmd,
+ .arg1.mfn = mfn,
+ };
+ int count;
+ hyp_mmuext_op(kv_to_la(&op), 1, kv_to_la(&count), DOMID_SELF);
+ return count;
+}
+static inline void hyp_set_ldt(void *ldt, unsigned long nbentries) {
+ struct mmuext_op op = {
+ .cmd = MMUEXT_SET_LDT,
+ .arg1.linear_addr = kvtolin(ldt),
+ .arg2.nr_ents = nbentries,
+ };
+ unsigned long count;
+ if (((unsigned long)ldt) & PAGE_MASK)
+ panic("ldt %p is not aligned on a page\n", ldt);
+ for (count=0; count<nbentries; count+= PAGE_SIZE/8)
+ pmap_set_page_readonly(ldt+count*8);
+ hyp_mmuext_op(kvtolin(&op), 1, kvtolin(&count), DOMID_SELF);
+ if (!count)
+ panic("couldn't set LDT\n");
+}
+#define hyp_set_cr3(value) hyp_mmuext_op_mfn(MMUEXT_NEW_BASEPTR, pa_to_mfn(value))
+#define hyp_set_user_cr3(value) hyp_mmuext_op_mfn(MMUEXT_NEW_USER_BASEPTR, pa_to_mfn(value))
+static inline void hyp_invlpg(vm_offset_t lin) {
+ struct mmuext_op ops;
+ int n;
+ ops.cmd = MMUEXT_INVLPG_ALL;
+ ops.arg1.linear_addr = lin;
+ hyp_mmuext_op(kvtolin(&ops), 1, kvtolin(&n), DOMID_SELF);
+ if (n < 1)
+ panic("couldn't invlpg\n");
+}
+#endif
+
+#ifdef __i386__
+_hypcall2(long, set_timer_op, unsigned long, absolute_lo, unsigned long, absolute_hi);
+#define hyp_do_set_timer_op(absolute_nsec) ({ \
+ uint64_t __absolute = (absolute_nsec); \
+ hyp_set_timer_op(__absolute & 0xffffffffU, __absolute >> 32); \
+})
+#endif
+#ifdef __x86_64__
+_hypcall1(long, set_timer_op, unsigned long, absolute);
+#define hyp_do_set_timer_op(absolute_nsec) hyp_set_timer_op(absolute_nsec)
+#endif
+
+#include <xen/public/event_channel.h>
+_hypcall1(int, event_channel_op, vm_offset_t /* evtchn_op_t * */, op);
+static inline int hyp_event_channel_send(evtchn_port_t port) {
+ evtchn_op_t op = {
+ .cmd = EVTCHNOP_send,
+ .u.send.port = port,
+ };
+ return hyp_event_channel_op(kvtolin(&op));
+}
+static inline evtchn_port_t hyp_event_channel_alloc(domid_t domid) {
+ evtchn_op_t op = {
+ .cmd = EVTCHNOP_alloc_unbound,
+ .u.alloc_unbound.dom = DOMID_SELF,
+ .u.alloc_unbound.remote_dom = domid,
+ };
+ if (hyp_event_channel_op(kvtolin(&op)))
+ panic("couldn't allocate event channel");
+ return op.u.alloc_unbound.port;
+}
+static inline evtchn_port_t hyp_event_channel_bind_virq(uint32_t virq, uint32_t vcpu) {
+ evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq, .u.bind_virq = { .virq = virq, .vcpu = vcpu }};
+ if (hyp_event_channel_op(kvtolin(&op)))
+ panic("can't bind virq %d\n",virq);
+ return op.u.bind_virq.port;
+}
+
+_hypcall3(int, console_io, int, cmd, int, count, vm_offset_t /* const char * */, buffer);
+
+_hypcall3(long, grant_table_op, unsigned int, cmd, vm_offset_t /* void * */, uop, unsigned int, count);
+
+_hypcall2(long, vm_assist, unsigned int, cmd, unsigned int, type);
+
+_hypcall0(long, iret);
+
+#include <xen/public/sched.h>
+_hypcall2(long, sched_op, int, cmd, vm_offset_t /* void* */, arg)
+#define hyp_yield() hyp_sched_op(SCHEDOP_yield, 0)
+#define hyp_block() hyp_sched_op(SCHEDOP_block, 0)
+static inline void __attribute__((noreturn)) hyp_crash(void)
+{
+ unsigned int shut = SHUTDOWN_crash;
+ hyp_sched_op(SCHEDOP_shutdown, kvtolin(&shut));
+ /* really shouldn't return */
+ printf("uh, shutdown returned?!\n");
+ for(;;);
+}
+
+static inline void __attribute__((noreturn)) hyp_halt(void)
+{
+ unsigned int shut = SHUTDOWN_poweroff;
+ hyp_sched_op(SCHEDOP_shutdown, kvtolin(&shut));
+ /* really shouldn't return */
+ printf("uh, shutdown returned?!\n");
+ for(;;);
+}
+
+static inline void __attribute__((noreturn)) hyp_reboot(void)
+{
+ unsigned int shut = SHUTDOWN_reboot;
+ hyp_sched_op(SCHEDOP_shutdown, kvtolin(&shut));
+ /* really shouldn't return */
+ printf("uh, reboot returned?!\n");
+ for(;;);
+}
+
+_hypcall2(int, set_debugreg, int, reg, unsigned long, value);
+_hypcall1(unsigned long, get_debugreg, int, reg);
+
+/* x86-specific */
+static inline uint64_t hyp_cpu_clock(void) {
+ uint32_t hi, lo;
+ asm volatile("rdtsc" : "=d"(hi), "=a"(lo));
+ return (((uint64_t) hi) << 32) | lo;
+}
+
+#else /* __ASSEMBLER__ */
+/* TODO: SMP */
+#define cli movb $0xff,hyp_shared_info+CPU_CLI
+#define sti call hyp_sti
+#define iretq jmp hyp_iretq
+#endif /* ASSEMBLER */
+#endif /* MACH_XEN */
+
+#endif /* XEN_HYPCALL_H */
diff --git a/i386/i386/xpr.h b/i386/i386/xpr.h
new file mode 100644
index 0000000..19ef026
--- /dev/null
+++ b/i386/i386/xpr.h
@@ -0,0 +1,32 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: xpr.h
+ *
+ * Machine dependent module for the XPR tracing facility.
+ */
+
+#define XPR_TIMESTAMP (0)
diff --git a/i386/i386at/acpi_parse_apic.c b/i386/i386at/acpi_parse_apic.c
new file mode 100644
index 0000000..1cfc179
--- /dev/null
+++ b/i386/i386at/acpi_parse_apic.c
@@ -0,0 +1,650 @@
+/* acpi_parse_apic.h - ACPI-MADT table parser. Source file
+ Copyright (C) 2018 Juan Bosco Garcia
+ Copyright (C) 2019 2020 Almudena Garcia Jurado-Centurion
+ Written by Juan Bosco Garcia and Almudena Garcia Jurado-Centurion
+
+ This file is part of Min_SMP.
+
+ Min_SMP is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ Min_SMP is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#include <string.h> /* memcmp, memcpy... */
+
+#include <stdint.h> /* uint16_t, uint32_t... */
+
+#include <mach/machine.h> /* machine_slot */
+
+#include <kern/printf.h> /* printf */
+#include <kern/debug.h>
+#include <i386/vm_param.h> /* phystokv */
+#include <i386/apic.h> /* lapic, ioapic... */
+#include <i386at/acpi_parse_apic.h>
+#include <vm/vm_kern.h>
+
+static struct acpi_apic *apic_madt = NULL;
+unsigned lapic_addr;
+uint32_t *hpet_addr;
+
+/*
+ * acpi_print_info: shows by screen the ACPI's rsdp and rsdt virtual address
+ * and the number of entries stored in RSDT table.
+ *
+ * Receives as input the references of RSDP and RSDT tables,
+ * and the number of entries stored in RSDT.
+ */
+void
+acpi_print_info(phys_addr_t rsdp, void *rsdt, int acpi_rsdt_n)
+{
+
+ printf("ACPI:\n");
+ printf(" rsdp = 0x%lx\n", rsdp);
+ printf(" rsdt/xsdt = 0x%p (n = %d)\n", rsdt, acpi_rsdt_n);
+}
+
+/*
+ * acpi_checksum: calculates the checksum of an ACPI table.
+ * Receives as input the virtual address of the table.
+ *
+ * Returns 0 if success, other value if error.
+ */
+static uint8_t
+acpi_checksum(void *addr, uint32_t length)
+{
+ uint8_t *bytes = addr;
+ uint8_t checksum = 0;
+ unsigned int i;
+
+ /* Sum all bytes of addr */
+ for (i = 0; i < length; i++)
+ checksum += bytes[i];
+
+ return checksum;
+}
+
+/*
+ * acpi_check_signature: check if a signature match with the signature of its table.
+ *
+ * Receive as parameter both signatures: table signature, the signature which needs to check,
+ * and real signature, the genuine signature of the table.
+ *
+ * Return 0 if success, other if error.
+ */
+
+static int
+acpi_check_signature(const uint8_t table_signature[], const char *real_signature, uint8_t length)
+{
+ return memcmp(table_signature, real_signature, length);
+}
+
+
+/*
+ * acpi_check_rsdp:
+ * check if the RDSP "candidate" table is the real RSDP table.
+ *
+ * Compare the table signature with the ACPI signature for this table
+ * and check is the checksum is correct.
+ *
+ * Receives as input the reference of RSDT table.
+ *
+ * Preconditions: RSDP pointer must not be NULL.
+ *
+ * Returns 1 if ACPI 1.0 and sets sdt_base
+ * Returns 2 if ACPI >= 2.0 and sets sdt_base
+ */
+static int8_t
+acpi_check_rsdp(struct acpi_rsdp2 *rsdp, phys_addr_t *sdt_base)
+{
+ int is_rsdp;
+ uint8_t cksum;
+
+ /* Check if rsdp signature match with the ACPI RSDP signature. */
+ is_rsdp = acpi_check_signature(rsdp->v1.signature, ACPI_RSDP_SIG, 8*sizeof(uint8_t));
+
+ if (is_rsdp != ACPI_SUCCESS)
+ return ACPI_BAD_SIGNATURE;
+
+ if (rsdp->v1.revision == 0) {
+ // ACPI 1.0
+ *sdt_base = rsdp->v1.rsdt_addr;
+ printf("ACPI v1.0\n");
+ cksum = acpi_checksum((void *)(&rsdp->v1), sizeof(struct acpi_rsdp));
+
+ if (cksum != 0)
+ return ACPI_BAD_CHECKSUM;
+
+ return 1;
+
+ } else if (rsdp->v1.revision == 2) {
+ // ACPI >= 2.0
+ *sdt_base = rsdp->xsdt_addr;
+ printf("ACPI >= v2.0\n");
+ cksum = acpi_checksum((void *)rsdp, sizeof(struct acpi_rsdp2));
+
+ if (cksum != 0)
+ return ACPI_BAD_CHECKSUM;
+
+ return 2;
+ }
+
+ return ACPI_NO_RSDP;
+}
+
+/*
+ * acpi_check_rsdp_align: check if the RSDP address is aligned.
+ * Preconditions: The address must not be NULL
+ *
+ * Returns ACPI_SUCCESS (0) if success, ACPI_BAD_ALIGN if error
+ */
+
+static int8_t
+acpi_check_rsdp_align(void *addr)
+{
+ /* check alignment. */
+ if ((uintptr_t)addr & (ACPI_RSDP_ALIGN-1))
+ return ACPI_BAD_ALIGN;
+
+ return ACPI_SUCCESS;
+}
+
+/*
+ * acpi_search_rsdp: search the rsdp table in a memory range.
+ *
+ * Receives as input the initial virtual address, and the lenght
+ * of memory range.
+ *
+ * Preconditions: The start address (addr) must be aligned.
+ *
+ * Returns the physical address of rsdp structure if success, 0 if failure.
+ */
+static phys_addr_t
+acpi_search_rsdp(void *addr, uint32_t length, int *is_64bit)
+{
+ void *end;
+ int version = 0;
+ phys_addr_t sdt_base = 0;
+
+ /* Search RDSP in memory space between addr and addr+lenght. */
+ for (end = addr+length; addr < end; addr += ACPI_RSDP_ALIGN) {
+
+ /* Check if the current memory block stores the RDSP. */
+ if ((addr != NULL) && ((version = acpi_check_rsdp(addr, &sdt_base)) > 0)) {
+ /* If yes, return RSDT/XSDT address */
+ *is_64bit = (version == 2);
+ return sdt_base;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * acpi_get_rsdp: tries to find the RSDP table,
+ * searching It in many memory ranges, as It's written in ACPI Specification.
+ *
+ * Returns the reference to RDSP structure if success, 0 if failure.
+ */
+static phys_addr_t
+acpi_get_rsdp(int *is_64bit)
+{
+ uint16_t *start = 0;
+ phys_addr_t base = 0;
+ phys_addr_t rsdp = 0;
+
+ /* EDBA start address. */
+ start = (uint16_t*) phystokv(0x040e);
+ base = phystokv((*start) << 4); /* address = paragraph number * 16 */
+
+ /* check alignment. */
+ if (acpi_check_rsdp_align((void *)base) == ACPI_BAD_ALIGN)
+ return 0;
+ rsdp = acpi_search_rsdp((void *)base, 1024, is_64bit);
+
+ if (rsdp == 0) {
+ /* If RSDP isn't in EDBA, search in the BIOS read-only memory space between 0E0000h and 0FFFFFh */
+ rsdp = acpi_search_rsdp((void *)phystokv(0xe0000), 0x100000 - 0x0e0000, is_64bit);
+ }
+
+ return rsdp;
+}
+
+/*
+ * acpi_get_rsdt: Get RSDT table reference from RSDP entries.
+ *
+ * Receives as input a reference for RSDP table
+ * and a reference to store the number of entries of RSDT.
+ *
+ * Returns the reference to RSDT table if success, NULL if error.
+ */
+static struct acpi_rsdt*
+acpi_get_rsdt(phys_addr_t rsdp_phys, int* acpi_rsdt_n)
+{
+ struct acpi_rsdt *rsdt = NULL;
+ int signature_check;
+
+ rsdt = (struct acpi_rsdt*) kmem_map_aligned_table(rsdp_phys, sizeof(struct acpi_rsdt), VM_PROT_READ);
+
+ /* Check if the RSDT mapping is fine. */
+ if (rsdt == NULL)
+ return NULL;
+
+ /* Check is rsdt signature is equals to ACPI RSDT signature. */
+ signature_check = acpi_check_signature(rsdt->header.signature, ACPI_RSDT_SIG,
+ 4*sizeof(uint8_t));
+
+ if (signature_check != ACPI_SUCCESS)
+ return NULL;
+
+ /* Calculated number of elements stored in rsdt. */
+ *acpi_rsdt_n = (rsdt->header.length - sizeof(rsdt->header))
+ / sizeof(rsdt->entry[0]);
+
+ return rsdt;
+}
+
+/*
+ * acpi_get_xsdt: Get XSDT table reference from RSDPv2 entries.
+ *
+ * Receives as input a reference for RSDPv2 table
+ * and a reference to store the number of entries of XSDT.
+ *
+ * Returns the reference to XSDT table if success, NULL if error.
+ */
+static struct acpi_xsdt*
+acpi_get_xsdt(phys_addr_t rsdp_phys, int* acpi_xsdt_n)
+{
+ struct acpi_xsdt *xsdt = NULL;
+ int signature_check;
+
+ xsdt = (struct acpi_xsdt*) kmem_map_aligned_table(rsdp_phys, sizeof(struct acpi_xsdt), VM_PROT_READ);
+
+ /* Check if the RSDT mapping is fine. */
+ if (xsdt == NULL)
+ return NULL;
+
+ /* Check is rsdt signature is equals to ACPI RSDT signature. */
+ signature_check = acpi_check_signature(xsdt->header.signature, ACPI_XSDT_SIG,
+ 4*sizeof(uint8_t));
+
+ if (signature_check != ACPI_SUCCESS)
+ return NULL;
+
+ /* Calculated number of elements stored in rsdt. */
+ *acpi_xsdt_n = (xsdt->header.length - sizeof(xsdt->header))
+ / sizeof(xsdt->entry[0]);
+
+ return xsdt;
+}
+
+/*
+ * acpi_get_apic: get MADT/APIC table from RSDT entries.
+ *
+ * Receives as input the RSDT initial address,
+ * and the number of entries of RSDT table.
+ *
+ * Returns a reference to APIC/MADT table if success, NULL if failure.
+ * Also sets hpet_addr to base address of HPET.
+ */
+static struct acpi_apic*
+acpi_get_apic(struct acpi_rsdt *rsdt, int acpi_rsdt_n)
+{
+ struct acpi_dhdr *descr_header;
+ struct acpi_apic *madt = NULL;
+ int check_signature;
+ uint64_t map_addr;
+
+ /* Search APIC entries in rsdt table. */
+ for (int i = 0; i < acpi_rsdt_n; i++) {
+ descr_header = (struct acpi_dhdr*) kmem_map_aligned_table(rsdt->entry[i], sizeof(struct acpi_dhdr),
+ VM_PROT_READ);
+
+ /* Check if the entry is a MADT */
+ check_signature = acpi_check_signature(descr_header->signature, ACPI_APIC_SIG, 4*sizeof(uint8_t));
+ if (check_signature == ACPI_SUCCESS)
+ madt = (struct acpi_apic*) descr_header;
+
+ /* Check if the entry is a HPET */
+ check_signature = acpi_check_signature(descr_header->signature, ACPI_HPET_SIG, 4*sizeof(uint8_t));
+ if (check_signature == ACPI_SUCCESS) {
+ map_addr = ((struct acpi_hpet *)descr_header)->address.addr64;
+ assert (map_addr != 0);
+ hpet_addr = (uint32_t *)kmem_map_aligned_table(map_addr, 1024, VM_PROT_READ | VM_PROT_WRITE);
+ printf("HPET at physical address 0x%llx\n", map_addr);
+ }
+ }
+
+ return madt;
+}
+
+/*
+ * acpi_get_apic2: get MADT/APIC table from XSDT entries.
+ *
+ * Receives as input the XSDT initial address,
+ * and the number of entries of XSDT table.
+ *
+ * Returns a reference to APIC/MADT table if success, NULL if failure.
+ * Also sets hpet_addr to base address of HPET.
+ */
+static struct acpi_apic*
+acpi_get_apic2(struct acpi_xsdt *xsdt, int acpi_xsdt_n)
+{
+ struct acpi_dhdr *descr_header;
+ struct acpi_apic *madt = NULL;
+ int check_signature;
+ uint64_t map_addr;
+
+ /* Search APIC entries in rsdt table. */
+ for (int i = 0; i < acpi_xsdt_n; i++) {
+ descr_header = (struct acpi_dhdr*) kmem_map_aligned_table(xsdt->entry[i], sizeof(struct acpi_dhdr),
+ VM_PROT_READ);
+
+ /* Check if the entry is an APIC. */
+ check_signature = acpi_check_signature(descr_header->signature, ACPI_APIC_SIG, 4*sizeof(uint8_t));
+ if (check_signature == ACPI_SUCCESS)
+ madt = (struct acpi_apic *)descr_header;
+
+ /* Check if the entry is a HPET. */
+ check_signature = acpi_check_signature(descr_header->signature, ACPI_HPET_SIG, 4*sizeof(uint8_t));
+ if (check_signature == ACPI_SUCCESS) {
+ map_addr = ((struct acpi_hpet *)descr_header)->address.addr64;
+ assert (map_addr != 0);
+ hpet_addr = (uint32_t *)kmem_map_aligned_table(map_addr, 1024, VM_PROT_READ | VM_PROT_WRITE);
+ printf("HPET at physical address 0x%llx\n", map_addr);
+ }
+ }
+
+ return madt;
+}
+
+/*
+ * acpi_add_lapic: add a new Local APIC to cpu_to_lapic array
+ * and increase the number of cpus.
+ *
+ * Receives as input the Local APIC entry in MADT/APIC table.
+ */
+static void
+acpi_apic_add_lapic(struct acpi_apic_lapic *lapic_entry)
+{
+ /* If cpu flag is correct */
+ if (lapic_entry->flags & 0x1) {
+ /* Add cpu to processors' list. */
+ apic_add_cpu(lapic_entry->apic_id);
+ }
+
+}
+
+/*
+ * apic_add_ioapic: add a new IOAPIC to IOAPICS array
+ * and increase the number of IOAPIC.
+ *
+ * Receives as input the IOAPIC entry in MADT/APIC table.
+ */
+
+static void
+acpi_apic_add_ioapic(struct acpi_apic_ioapic *ioapic_entry)
+{
+ IoApicData io_apic;
+
+ /* Fill IOAPIC structure with its main fields */
+ io_apic.apic_id = ioapic_entry->apic_id;
+ io_apic.addr = ioapic_entry->addr;
+ io_apic.gsi_base = ioapic_entry->gsi_base;
+ io_apic.ioapic = (ApicIoUnit *)kmem_map_aligned_table(ioapic_entry->addr,
+ sizeof(ApicIoUnit),
+ VM_PROT_READ | VM_PROT_WRITE);
+ io_apic.ioapic->select.r = APIC_IO_VERSION;
+ io_apic.ngsis = ((io_apic.ioapic->window.r >> APIC_IO_ENTRIES_SHIFT) & 0xff) + 1;
+
+ /* Insert IOAPIC in the list. */
+ apic_add_ioapic(io_apic);
+}
+
+
+/*
+ * apic_add_ioapic: add a new IOAPIC to IOAPICS list
+ * and increase the number of IOAPIC.
+ *
+ * Receives as input the IOAPIC entry in MADT/APIC table.
+ */
+
+static void
+acpi_apic_add_irq_override(struct acpi_apic_irq_override* irq_override)
+{
+ IrqOverrideData irq_over;
+
+ /* Fills IRQ override structure with its fields */
+ irq_over.bus = irq_override->bus;
+ irq_over.irq = irq_override->irq;
+ irq_over.gsi = irq_override->gsi;
+ irq_over.flags = irq_override->flags;
+
+ /* Insert IRQ override in the list */
+ apic_add_irq_override(irq_over);
+}
+
+
+/*
+ * apic_parse_table: parse the MADT/APIC table.
+ *
+ * Read the APIC/MADT table entry to entry,
+ * registering the APIC structures (Local APIC, IOAPIC or IRQ override) entries in their lists.
+ */
+
+static int
+acpi_apic_parse_table(struct acpi_apic *apic)
+{
+ struct acpi_apic_dhdr *apic_entry = NULL;
+ vm_offset_t end = 0;
+ uint8_t numcpus = 1;
+
+ /* Get the address of first APIC entry */
+ apic_entry = (struct acpi_apic_dhdr*) apic->entry;
+
+ /* Get the end address of APIC table */
+ end = (vm_offset_t) apic + apic->header.length;
+
+ printf("APIC entry=0x%p end=0x%x\n", apic_entry, end);
+
+ /* Initialize number of cpus */
+ numcpus = apic_get_numcpus();
+
+ /* Search in APIC entry. */
+ while ((vm_offset_t)apic_entry < end) {
+ struct acpi_apic_lapic *lapic_entry;
+ struct acpi_apic_ioapic *ioapic_entry;
+ struct acpi_apic_irq_override *irq_override_entry;
+
+ printf("APIC entry=0x%p end=0x%x\n", apic_entry, end);
+ /* Check entry type. */
+ switch(apic_entry->type) {
+
+ /* If APIC entry is a CPU's Local APIC. */
+ case ACPI_APIC_ENTRY_LAPIC:
+ if(numcpus < NCPUS) {
+ /* Store Local APIC data. */
+ lapic_entry = (struct acpi_apic_lapic*) apic_entry;
+ acpi_apic_add_lapic(lapic_entry);
+ }
+ break;
+
+ /* If APIC entry is an IOAPIC. */
+ case ACPI_APIC_ENTRY_IOAPIC:
+
+ /* Store IOAPIC data. */
+ ioapic_entry = (struct acpi_apic_ioapic*) apic_entry;
+ acpi_apic_add_ioapic(ioapic_entry);
+
+ break;
+
+ /* If APIC entry is a IRQ Override. */
+ case ACPI_APIC_ENTRY_IRQ_OVERRIDE:
+
+ /* Store IRQ Override data. */
+ irq_override_entry = (struct acpi_apic_irq_override*) apic_entry;
+ acpi_apic_add_irq_override(irq_override_entry);
+ break;
+
+ /* FIXME: There is another unhandled case */
+ default:
+ printf("Unhandled APIC entry type 0x%x\n", apic_entry->type);
+ break;
+ }
+
+ /* Get next APIC entry. */
+ apic_entry = (struct acpi_apic_dhdr*)((vm_offset_t) apic_entry
+ + apic_entry->length);
+
+ /* Update number of cpus. */
+ numcpus = apic_get_numcpus();
+ }
+
+ return ACPI_SUCCESS;
+}
+
+
+/*
+ * acpi_apic_setup: parses the APIC/MADT table, to find the Local APIC and IOAPIC structures
+ * and the common address for Local APIC.
+ *
+ * Receives as input a reference for APIC/MADT table.
+ * Returns 0 if success.
+ *
+ * Fills the cpu_to_lapic and ioapics array, indexed by Kernel ID
+ * with a relationship between Kernel ID and APIC ID,
+ * and map the Local APIC common address, to fill the lapic reference.
+ *
+ * Precondition: The APIC pointer must not be NULL
+ */
+
+static int
+acpi_apic_setup(struct acpi_apic *apic)
+{
+ ApicLocalUnit* lapic_unit;
+ uint8_t ncpus, nioapics;
+
+ /* map common lapic address */
+ lapic_addr = apic->lapic_addr;
+ lapic_unit = kmem_map_aligned_table(apic->lapic_addr, sizeof(ApicLocalUnit),
+ VM_PROT_READ | VM_PROT_WRITE);
+
+ if (lapic_unit == NULL)
+ return ACPI_NO_LAPIC;
+
+ apic_lapic_init(lapic_unit);
+ acpi_apic_parse_table(apic);
+
+ ncpus = apic_get_numcpus();
+ nioapics = apic_get_num_ioapics();
+
+ if (ncpus == 0 || nioapics == 0 || ncpus > NCPUS)
+ return ACPI_APIC_FAILURE;
+
+ /* Refit the apic-cpu array. */
+ if(ncpus < NCPUS) {
+ int refit = apic_refit_cpulist();
+ if (refit != 0)
+ return ACPI_FIT_FAILURE;
+ }
+
+ apic_generate_cpu_id_lut();
+
+ return ACPI_SUCCESS;
+}
+
+/*
+ * acpi_apic_init: find the MADT/APIC table in ACPI tables
+ * and parses It to find Local APIC and IOAPIC structures.
+ * Each Local APIC stores the info and control structores for a cpu.
+ * The IOAPIC controls the communication of the processors with the I/O devices.
+ *
+ * Returns 0 if success, -1 if error.
+ */
+int
+acpi_apic_init(void)
+{
+ phys_addr_t rsdp = 0;
+ struct acpi_rsdt *rsdt = 0;
+ struct acpi_xsdt *xsdt = 0;
+ int acpi_rsdt_n;
+ int ret_acpi_setup;
+ int apic_init_success = 0;
+ int is_64bit = 0;
+ uint8_t checksum;
+
+ /* Try to get the RSDP physical address. */
+ rsdp = acpi_get_rsdp(&is_64bit);
+ if (rsdp == 0)
+ return ACPI_NO_RSDP;
+
+ if (!is_64bit) {
+ /* Try to get the RSDT pointer. */
+ rsdt = acpi_get_rsdt(rsdp, &acpi_rsdt_n);
+ if (rsdt == NULL)
+ return ACPI_NO_RSDT;
+
+ checksum = acpi_checksum((void *)rsdt, rsdt->header.length);
+ if (checksum != 0)
+ return ACPI_BAD_CHECKSUM;
+
+ /* Try to get the APIC table pointer. */
+ apic_madt = acpi_get_apic(rsdt, acpi_rsdt_n);
+ if (apic_madt == NULL)
+ return ACPI_NO_APIC;
+
+ checksum = acpi_checksum((void *)apic_madt, apic_madt->header.length);
+ if (checksum != 0)
+ return ACPI_BAD_CHECKSUM;
+
+ /* Print the ACPI tables addresses. */
+ acpi_print_info(rsdp, rsdt, acpi_rsdt_n);
+
+ } else {
+ /* Try to get the XSDT pointer. */
+ xsdt = acpi_get_xsdt(rsdp, &acpi_rsdt_n);
+ if (xsdt == NULL)
+ return ACPI_NO_RSDT;
+
+ checksum = acpi_checksum((void *)xsdt, xsdt->header.length);
+ if (checksum != 0)
+ return ACPI_BAD_CHECKSUM;
+
+ /* Try to get the APIC table pointer. */
+ apic_madt = acpi_get_apic2(xsdt, acpi_rsdt_n);
+ if (apic_madt == NULL)
+ return ACPI_NO_APIC;
+
+ checksum = acpi_checksum((void *)apic_madt, apic_madt->header.length);
+ if (checksum != 0)
+ return ACPI_BAD_CHECKSUM;
+
+ /* Print the ACPI tables addresses. */
+ acpi_print_info(rsdp, xsdt, acpi_rsdt_n);
+ }
+
+ apic_init_success = apic_data_init();
+ if (apic_init_success != ACPI_SUCCESS)
+ return ACPI_APIC_FAILURE;
+
+ /*
+ * Starts the parsing of APIC table, to find the APIC structures.
+ * and enumerate them. This function also find the common Local APIC address.
+ */
+ ret_acpi_setup = acpi_apic_setup(apic_madt);
+ if (ret_acpi_setup != ACPI_SUCCESS)
+ return ret_acpi_setup;
+
+ /* Prints a table with the list of each cpu and each IOAPIC with its APIC ID. */
+ apic_print_info();
+
+ return ACPI_SUCCESS;
+}
diff --git a/i386/i386at/acpi_parse_apic.h b/i386/i386at/acpi_parse_apic.h
new file mode 100644
index 0000000..85e0117
--- /dev/null
+++ b/i386/i386at/acpi_parse_apic.h
@@ -0,0 +1,201 @@
+/* acpi_parse_apic.h - ACPI-MADT table parser. Header file
+ Copyright (C) 2018 Juan Bosco Garcia
+ Copyright (C) 2019 2020 Almudena Garcia Jurado-Centurion
+ Written by Juan Bosco Garcia and Almudena Garcia Jurado-Centurion
+
+ This file is part of Min_SMP.
+
+ Min_SMP is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ Min_SMP is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#ifndef __ACPI_H__
+#define __ACPI_H__
+
+#include <stdint.h>
+
+enum ACPI_RETURN {
+ ACPI_BAD_CHECKSUM = -1,
+ ACPI_BAD_ALIGN = -2,
+ ACPI_NO_RSDP = -3,
+ ACPI_NO_RSDT = -4,
+ ACPI_BAD_SIGNATURE = -5,
+ ACPI_NO_APIC = -6,
+ ACPI_NO_LAPIC = -7,
+ ACPI_APIC_FAILURE = -8,
+ ACPI_FIT_FAILURE = -9,
+ ACPI_SUCCESS = 0,
+};
+
+#define ACPI_RSDP_ALIGN 16
+#define ACPI_RSDP_SIG "RSD PTR "
+
+struct acpi_rsdp {
+ uint8_t signature[8];
+ uint8_t checksum;
+ uint8_t oem_id[6];
+ uint8_t revision;
+ uint32_t rsdt_addr;
+} __attribute__((__packed__));
+
+struct acpi_rsdp2 {
+ struct acpi_rsdp v1;
+ uint32_t length;
+ uint64_t xsdt_addr;
+ uint8_t checksum;
+ uint8_t reserved[3];
+} __attribute__((__packed__));
+
+/*
+ * RSDT Entry Header
+ *
+ * Header which stores the descriptors of tables pointed from RDSP's Entry Field
+ * Includes the signature of the table, to identify each table.
+ *
+ * In MADT, the signature is 'APIC'.
+ */
+struct acpi_dhdr {
+ uint8_t signature[4];
+ uint32_t length;
+ uint8_t revision;
+ uint8_t checksum;
+ uint8_t oem_id[6];
+ uint8_t oem_table_id[8];
+ uint32_t oem_revision;
+ uint8_t creator_id[4];
+ uint32_t creator_revision;
+} __attribute__((__packed__));
+
+
+#define ACPI_RSDT_SIG "RSDT"
+
+struct acpi_rsdt {
+ struct acpi_dhdr header;
+ uint32_t entry[0];
+} __attribute__((__packed__));
+
+#define ACPI_XSDT_SIG "XSDT"
+
+struct acpi_xsdt {
+ struct acpi_dhdr header;
+ uint64_t entry[0];
+} __attribute__((__packed__));
+
+struct acpi_address {
+ uint8_t is_io;
+ uint8_t reg_width;
+ uint8_t reg_offset;
+ uint8_t reserved;
+ uint64_t addr64;
+} __attribute__((__packed__));
+
+/* APIC table signature. */
+#define ACPI_APIC_SIG "APIC"
+
+/* Types value for MADT entries: Local APIC, IOAPIC and IRQ Override. */
+enum ACPI_APIC_ENTRY_TYPE {
+ ACPI_APIC_ENTRY_LAPIC = 0,
+ ACPI_APIC_ENTRY_IOAPIC = 1,
+ ACPI_APIC_ENTRY_IRQ_OVERRIDE = 2,
+ ACPI_APIC_ENTRY_NONMASK_IRQ = 4
+};
+
+/*
+ * APIC descriptor header
+ * Define the type of the structure (Local APIC, I/O APIC or others).
+ * Type: Local APIC (0), I/O APIC (1).
+ */
+struct acpi_apic_dhdr {
+ uint8_t type;
+ uint8_t length;
+} __attribute__((__packed__));
+
+
+/*
+ * Multiple APIC Description Table (MADT)
+ *
+ * Describes the APIC structures which exist in the machine.
+ * Includes the common address where Local APIC is mapped in main memory.
+ *
+ * Entry field stores the descriptors of APIC structures.
+ */
+struct acpi_apic {
+ struct acpi_dhdr header; /* Header, which stores the descriptor for RDST's Entry field. */
+ uint32_t lapic_addr; /* Local Interrupt Controller Address. */
+ uint32_t flags;
+ struct acpi_apic_dhdr entry[0]; /* Interrupt Controller Structure */
+} __attribute__((__packed__));
+
+/*
+ * Processor Local APIC Structure
+ *
+ * Stores information about APIC ID, flags and ACPI Processor UID
+ */
+
+struct acpi_apic_lapic {
+ struct acpi_apic_dhdr header;
+ uint8_t processor_id; /* ACPI Processor UID */
+ uint8_t apic_id;
+ uint32_t flags;
+} __attribute__((__packed__));
+
+
+/*
+ * I/O APIC Structure
+ *
+ * Stores information about APIC ID, and I/O APIC tables
+ */
+
+struct acpi_apic_ioapic {
+ struct acpi_apic_dhdr header;
+ uint8_t apic_id;
+ uint8_t reserved;
+ uint32_t addr;
+ uint32_t gsi_base;
+} __attribute__((__packed__));
+
+/*
+ * IRQ Override structure
+ *
+ * Stores information about IRQ override, with busses and IRQ.
+ */
+
+struct acpi_apic_irq_override {
+ struct acpi_apic_dhdr header;
+ uint8_t bus;
+ uint8_t irq;
+ uint32_t gsi;
+ uint16_t flags;
+} __attribute__((__packed__));
+
+
+#define ACPI_HPET_SIG "HPET"
+
+/*
+ * HPET High Precision Event Timer structure
+ */
+struct acpi_hpet {
+ struct acpi_dhdr header;
+ uint32_t id;
+ struct acpi_address address;
+ uint8_t sequence;
+ uint16_t minimum_tick;
+ uint8_t flags;
+} __attribute__((__packed__));
+
+int acpi_apic_init(void);
+void acpi_print_info(phys_addr_t rsdp, void *rsdt, int acpi_rsdt_n);
+
+extern unsigned lapic_addr;
+
+#endif /* __ACPI_H__ */
diff --git a/i386/i386at/autoconf.c b/i386/i386at/autoconf.c
new file mode 100644
index 0000000..5c69988
--- /dev/null
+++ b/i386/i386at/autoconf.c
@@ -0,0 +1,149 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <kern/printf.h>
+#include <mach/std_types.h>
+#include <i386at/autoconf.h>
+#include <i386/irq.h>
+#include <i386/ipl.h>
+#ifdef APIC
+# include <i386/apic.h>
+#else
+# include <i386/pic.h>
+#endif
+#include <chips/busses.h>
+
+/* initialization typecasts */
+#define SPL_FIVE (vm_offset_t)SPL5
+#define SPL_SIX (vm_offset_t)SPL6
+#define SPL_TTY (vm_offset_t)SPLTTY
+
+
+#if NCOM > 0
+extern struct bus_driver comdriver;
+#include <i386at/com.h>
+#endif /* NCOM */
+
+#if NLPR > 0
+extern struct bus_driver lprdriver;
+#include <i386at/lpr.h>
+#endif /* NLPR */
+
+struct bus_ctlr bus_master_init[] = {
+
+/* driver name unit intr address len phys_address
+ adaptor alive flags spl pic */
+
+ {0}
+};
+
+
+struct bus_device bus_device_init[] = {
+
+/* driver name unit intr address am phys_address
+ adaptor alive ctlr slave flags *mi *next sysdep sysdep */
+
+#if NCOM > 0
+ {&comdriver, "com", 0, comintr, 0x3f8, 8, 0x3f8,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 4},
+ {&comdriver, "com", 1, comintr, 0x2f8, 8, 0x2f8,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 3},
+ {&comdriver, "com", 2, comintr, 0x3e8, 8, 0x3e8,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 5},
+#endif /* NCOM > 0 */
+
+#ifdef MACH_LPR
+#if NLPR > 0
+ {&lprdriver, "lpr", 0, lprintr, 0x378, 3, 0x378,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 7},
+ {&lprdriver, "lpr", 0, lprintr, 0x278, 3, 0x278,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 7},
+ {&lprdriver, "lpr", 0, lprintr, 0x3bc, 3, 0x3bc,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 7},
+#endif /* NLPR > 0 */
+#endif /* MACH_LPR */
+
+ {0}
+};
+
+/*
+ * probeio:
+ *
+ * Probe and subsequently attach devices out on the AT bus.
+ *
+ *
+ */
+void probeio(void)
+{
+ struct bus_device *device;
+ struct bus_ctlr *master;
+ int i = 0;
+
+ for (master = bus_master_init; master->driver; master++)
+ {
+ if (configure_bus_master(master->name, master->address,
+ master->phys_address, i, "atbus"))
+ i++;
+ }
+
+ for (device = bus_device_init; device->driver; device++)
+ {
+ /* ignore what we (should) have found already */
+ if (device->alive || device->ctlr >= 0)
+ continue;
+ if (configure_bus_device(device->name, device->address,
+ device->phys_address, i, "atbus"))
+ i++;
+ }
+
+#if MACH_TTD
+ /*
+ * Initialize Remote kernel debugger.
+ */
+ ttd_init();
+#endif /* MACH_TTD */
+}
+
+void take_dev_irq(
+ const struct bus_device *dev)
+{
+ int pic = (int)dev->sysdep1;
+
+ if (ivect[pic] == intnull) {
+ iunit[pic] = dev->unit;
+ ivect[pic] = dev->intr;
+ } else {
+ printf("The device below will clobber IRQ %d (%p).\n", pic, ivect[pic]);
+ printf("You have two devices at the same IRQ.\n");
+ printf("This won't work. Reconfigure your hardware and try again.\n");
+ printf("%s%d: port = %zx, spl = %zd, pic = %d.\n",
+ dev->name, dev->unit, dev->address,
+ dev->sysdep, dev->sysdep1);
+ while (1);
+ }
+
+ unmask_irq(pic);
+}
diff --git a/i386/i386at/autoconf.h b/i386/i386at/autoconf.h
new file mode 100644
index 0000000..81fc5da
--- /dev/null
+++ b/i386/i386at/autoconf.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Device auto configuration.
+ *
+ */
+
+#ifndef _AUTOCONF_H_
+#define _AUTOCONF_H_
+
+#include <mach/std_types.h>
+#include <chips/busses.h>
+
+/*
+ * probeio:
+ *
+ * Probe and subsequently attach devices out on the AT bus.
+ *
+ *
+ */
+void probeio(void);
+
+void take_dev_irq(
+ const struct bus_device *dev);
+
+#endif /* _AUTOCONF_H_ */
diff --git a/i386/i386at/biosmem.c b/i386/i386at/biosmem.c
new file mode 100644
index 0000000..937c0e3
--- /dev/null
+++ b/i386/i386at/biosmem.c
@@ -0,0 +1,1070 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <i386/model_dep.h>
+#include <i386at/biosmem.h>
+#include <kern/assert.h>
+#include <kern/debug.h>
+#include <kern/macros.h>
+#include <kern/printf.h>
+#include <mach/vm_param.h>
+#include <mach/xen.h>
+#include <mach/machine/multiboot.h>
+#include <sys/types.h>
+#include <vm/vm_page.h>
+
+#define DEBUG 0
+
+#define __boot
+#define __bootdata
+#define __init
+
+#define boot_memmove memmove
+#define boot_panic(s) panic("%s", s)
+#define boot_strlen strlen
+
+#define BOOT_CGAMEM phystokv(0xb8000)
+#define BOOT_CGACHARS (80 * 25)
+#define BOOT_CGACOLOR 0x7
+
+#define BIOSMEM_MAX_BOOT_DATA 64
+
+/*
+ * Boot data descriptor.
+ *
+ * The start and end addresses must not be page-aligned, since there
+ * could be more than one range inside a single page.
+ */
+struct biosmem_boot_data {
+ phys_addr_t start;
+ phys_addr_t end;
+ boolean_t temporary;
+};
+
+/*
+ * Sorted array of boot data descriptors.
+ */
+static struct biosmem_boot_data biosmem_boot_data_array[BIOSMEM_MAX_BOOT_DATA]
+ __bootdata;
+static unsigned int biosmem_nr_boot_data __bootdata;
+
+/*
+ * Maximum number of entries in the BIOS memory map.
+ *
+ * Because of adjustments of overlapping ranges, the memory map can grow
+ * to twice this size.
+ */
+#define BIOSMEM_MAX_MAP_SIZE 128
+
+/*
+ * Memory range types.
+ */
+#define BIOSMEM_TYPE_AVAILABLE 1
+#define BIOSMEM_TYPE_RESERVED 2
+#define BIOSMEM_TYPE_ACPI 3
+#define BIOSMEM_TYPE_NVS 4
+#define BIOSMEM_TYPE_UNUSABLE 5
+#define BIOSMEM_TYPE_DISABLED 6
+
+/*
+ * Bitmask corresponding to memory ranges that require narrowing
+ * to page boundaries.
+ */
+#define BIOSMEM_MASK_NARROW (((1u << BIOSMEM_TYPE_AVAILABLE) | \
+ (1u << BIOSMEM_TYPE_NVS) | \
+ (1u << BIOSMEM_TYPE_DISABLED)))
+
+/*
+ * Helper macro to test if range type needs narrowing.
+ */
+#define BIOSMEM_NEEDS_NARROW(t) ((1u << t) & BIOSMEM_MASK_NARROW)
+
+/*
+ * Memory map entry.
+ */
+struct biosmem_map_entry {
+ uint64_t base_addr;
+ uint64_t length;
+ unsigned int type;
+};
+
+/*
+ * Memory map built from the information passed by the boot loader.
+ *
+ * If the boot loader didn't pass a valid memory map, a simple map is built
+ * based on the mem_lower and mem_upper multiboot fields.
+ */
+static struct biosmem_map_entry biosmem_map[BIOSMEM_MAX_MAP_SIZE * 2]
+ __bootdata;
+static unsigned int biosmem_map_size __bootdata;
+
+/*
+ * Contiguous block of physical memory.
+ */
+struct biosmem_segment {
+ phys_addr_t start;
+ phys_addr_t end;
+};
+
+/*
+ * Physical segment boundaries.
+ */
+static struct biosmem_segment biosmem_segments[VM_PAGE_MAX_SEGS] __bootdata;
+
+/*
+ * Boundaries of the simple bootstrap heap.
+ *
+ * This heap is located above BIOS memory.
+ */
+static phys_addr_t biosmem_heap_start __bootdata;
+static phys_addr_t biosmem_heap_bottom __bootdata;
+static phys_addr_t biosmem_heap_top __bootdata;
+static phys_addr_t biosmem_heap_end __bootdata;
+
+/*
+ * Boot allocation policy.
+ *
+ * Top-down allocations are normally preferred to avoid unnecessarily
+ * filling the DMA segment.
+ */
+static boolean_t biosmem_heap_topdown __bootdata;
+
+static char biosmem_panic_inval_boot_data[] __bootdata
+ = "biosmem: invalid boot data";
+static char biosmem_panic_too_many_boot_data[] __bootdata
+ = "biosmem: too many boot data ranges";
+static char biosmem_panic_too_big_msg[] __bootdata
+ = "biosmem: too many memory map entries";
+#ifndef MACH_HYP
+static char biosmem_panic_setup_msg[] __bootdata
+ = "biosmem: unable to set up the early memory allocator";
+#endif /* MACH_HYP */
+static char biosmem_panic_noseg_msg[] __bootdata
+ = "biosmem: unable to find any memory segment";
+static char biosmem_panic_inval_msg[] __bootdata
+ = "biosmem: attempt to allocate 0 page";
+static char biosmem_panic_nomem_msg[] __bootdata
+ = "biosmem: unable to allocate memory";
+
+void __boot
+biosmem_register_boot_data(phys_addr_t start, phys_addr_t end,
+ boolean_t temporary)
+{
+ unsigned int i;
+
+ if (start >= end) {
+ boot_panic(biosmem_panic_inval_boot_data);
+ }
+
+ if (biosmem_nr_boot_data == ARRAY_SIZE(biosmem_boot_data_array)) {
+ boot_panic(biosmem_panic_too_many_boot_data);
+ }
+
+ for (i = 0; i < biosmem_nr_boot_data; i++) {
+ /* Check if the new range overlaps */
+ if ((end > biosmem_boot_data_array[i].start)
+ && (start < biosmem_boot_data_array[i].end)) {
+
+ /*
+ * If it does, check whether it's part of another range.
+ * For example, this applies to debugging symbols directly
+ * taken from the kernel image.
+ */
+ if ((start >= biosmem_boot_data_array[i].start)
+ && (end <= biosmem_boot_data_array[i].end)) {
+
+ /*
+ * If it's completely included, make sure that a permanent
+ * range remains permanent.
+ *
+ * XXX This means that if one big range is first registered
+ * as temporary, and a smaller range inside of it is
+ * registered as permanent, the bigger range becomes
+ * permanent. It's not easy nor useful in practice to do
+ * better than that.
+ */
+ if (biosmem_boot_data_array[i].temporary != temporary) {
+ biosmem_boot_data_array[i].temporary = FALSE;
+ }
+
+ return;
+ }
+
+ boot_panic(biosmem_panic_inval_boot_data);
+ }
+
+ if (end <= biosmem_boot_data_array[i].start) {
+ break;
+ }
+ }
+
+ boot_memmove(&biosmem_boot_data_array[i + 1],
+ &biosmem_boot_data_array[i],
+ (biosmem_nr_boot_data - i) * sizeof(*biosmem_boot_data_array));
+
+ biosmem_boot_data_array[i].start = start;
+ biosmem_boot_data_array[i].end = end;
+ biosmem_boot_data_array[i].temporary = temporary;
+ biosmem_nr_boot_data++;
+}
+
+static void __init
+biosmem_unregister_boot_data(phys_addr_t start, phys_addr_t end)
+{
+ unsigned int i;
+
+ if (start >= end) {
+ panic("%s", biosmem_panic_inval_boot_data);
+ }
+
+ assert(biosmem_nr_boot_data != 0);
+
+ for (i = 0; biosmem_nr_boot_data; i++) {
+ if ((start == biosmem_boot_data_array[i].start)
+ && (end == biosmem_boot_data_array[i].end)) {
+ break;
+ }
+ }
+
+ if (i == biosmem_nr_boot_data) {
+ return;
+ }
+
+#if DEBUG
+ printf("biosmem: unregister boot data: %llx:%llx\n",
+ (unsigned long long)biosmem_boot_data_array[i].start,
+ (unsigned long long)biosmem_boot_data_array[i].end);
+#endif /* DEBUG */
+
+ biosmem_nr_boot_data--;
+
+ boot_memmove(&biosmem_boot_data_array[i],
+ &biosmem_boot_data_array[i + 1],
+ (biosmem_nr_boot_data - i) * sizeof(*biosmem_boot_data_array));
+}
+
+#ifndef MACH_HYP
+
+static void __boot
+biosmem_map_adjust_alignment(struct biosmem_map_entry *e)
+{
+ uint64_t end = e->base_addr + e->length;
+
+ if (BIOSMEM_NEEDS_NARROW(e->type)) {
+ e->base_addr = vm_page_round (e->base_addr);
+ e->length = vm_page_trunc (end) - e->base_addr;
+ }
+}
+
+static void __boot
+biosmem_map_build(const struct multiboot_raw_info *mbi)
+{
+ struct multiboot_raw_mmap_entry *mb_entry, *mb_end;
+ struct biosmem_map_entry *start, *entry, *end;
+ unsigned long addr;
+
+ addr = phystokv(mbi->mmap_addr);
+ mb_entry = (struct multiboot_raw_mmap_entry *)addr;
+ mb_end = (struct multiboot_raw_mmap_entry *)(addr + mbi->mmap_length);
+ start = biosmem_map;
+ entry = start;
+ end = entry + BIOSMEM_MAX_MAP_SIZE;
+
+ while ((mb_entry < mb_end) && (entry < end)) {
+ entry->base_addr = mb_entry->base_addr;
+ entry->length = mb_entry->length;
+ entry->type = mb_entry->type;
+
+ mb_entry = (void *)mb_entry + sizeof(mb_entry->size) + mb_entry->size;
+
+ biosmem_map_adjust_alignment(entry);
+ entry++;
+ }
+
+ biosmem_map_size = entry - start;
+}
+
+static void __boot
+biosmem_map_build_simple(const struct multiboot_raw_info *mbi)
+{
+ struct biosmem_map_entry *entry;
+
+ entry = biosmem_map;
+ entry->base_addr = 0;
+ entry->length = mbi->mem_lower << 10;
+ entry->type = BIOSMEM_TYPE_AVAILABLE;
+ biosmem_map_adjust_alignment(entry);
+
+ entry++;
+ entry->base_addr = BIOSMEM_END;
+ entry->length = mbi->mem_upper << 10;
+ entry->type = BIOSMEM_TYPE_AVAILABLE;
+ biosmem_map_adjust_alignment(entry);
+
+ biosmem_map_size = 2;
+}
+
+#endif /* MACH_HYP */
+
+static int __boot
+biosmem_map_entry_is_invalid(const struct biosmem_map_entry *entry)
+{
+ return (entry->base_addr + entry->length) <= entry->base_addr;
+}
+
+static void __boot
+biosmem_map_filter(void)
+{
+ struct biosmem_map_entry *entry;
+ unsigned int i;
+
+ i = 0;
+
+ while (i < biosmem_map_size) {
+ entry = &biosmem_map[i];
+
+ if (biosmem_map_entry_is_invalid(entry)) {
+ biosmem_map_size--;
+ boot_memmove(entry, entry + 1,
+ (biosmem_map_size - i) * sizeof(*entry));
+ continue;
+ }
+
+ i++;
+ }
+}
+
+static void __boot
+biosmem_map_sort(void)
+{
+ struct biosmem_map_entry tmp;
+ unsigned int i, j;
+
+ /*
+ * Simple insertion sort.
+ */
+ for (i = 1; i < biosmem_map_size; i++) {
+ tmp = biosmem_map[i];
+
+ for (j = i - 1; j < i; j--) {
+ if (biosmem_map[j].base_addr < tmp.base_addr)
+ break;
+
+ biosmem_map[j + 1] = biosmem_map[j];
+ }
+
+ biosmem_map[j + 1] = tmp;
+ }
+}
+
+static void __boot
+biosmem_map_adjust(void)
+{
+ struct biosmem_map_entry tmp, *a, *b, *first, *second;
+ uint64_t a_end, b_end, last_end;
+ unsigned int i, j, last_type;
+
+ biosmem_map_filter();
+
+ /*
+ * Resolve overlapping areas, giving priority to most restrictive
+ * (i.e. numerically higher) types.
+ */
+ for (i = 0; i < biosmem_map_size; i++) {
+ a = &biosmem_map[i];
+ a_end = a->base_addr + a->length;
+
+ j = i + 1;
+
+ while (j < biosmem_map_size) {
+ b = &biosmem_map[j];
+ b_end = b->base_addr + b->length;
+
+ if ((a->base_addr >= b_end) || (a_end <= b->base_addr)) {
+ j++;
+ continue;
+ }
+
+ if (a->base_addr < b->base_addr) {
+ first = a;
+ second = b;
+ } else {
+ first = b;
+ second = a;
+ }
+
+ if (a_end > b_end) {
+ last_end = a_end;
+ last_type = a->type;
+ } else {
+ last_end = b_end;
+ last_type = b->type;
+ }
+
+ tmp.base_addr = second->base_addr;
+ tmp.length = MIN(a_end, b_end) - tmp.base_addr;
+ tmp.type = MAX(a->type, b->type);
+ first->length = tmp.base_addr - first->base_addr;
+ second->base_addr += tmp.length;
+ second->length = last_end - second->base_addr;
+ second->type = last_type;
+
+ /*
+ * Filter out invalid entries.
+ */
+ if (biosmem_map_entry_is_invalid(a)
+ && biosmem_map_entry_is_invalid(b)) {
+ *a = tmp;
+ biosmem_map_size--;
+ memmove(b, b + 1, (biosmem_map_size - j) * sizeof(*b));
+ continue;
+ } else if (biosmem_map_entry_is_invalid(a)) {
+ *a = tmp;
+ j++;
+ continue;
+ } else if (biosmem_map_entry_is_invalid(b)) {
+ *b = tmp;
+ j++;
+ continue;
+ }
+
+ if (tmp.type == a->type)
+ first = a;
+ else if (tmp.type == b->type)
+ first = b;
+ else {
+
+ /*
+ * If the overlapping area can't be merged with one of its
+ * neighbors, it must be added as a new entry.
+ */
+
+ if (biosmem_map_size >= ARRAY_SIZE(biosmem_map))
+ boot_panic(biosmem_panic_too_big_msg);
+
+ biosmem_map[biosmem_map_size] = tmp;
+ biosmem_map_size++;
+ j++;
+ continue;
+ }
+
+ if (first->base_addr > tmp.base_addr)
+ first->base_addr = tmp.base_addr;
+
+ first->length += tmp.length;
+ j++;
+ }
+ }
+
+ biosmem_map_sort();
+}
+
+/*
+ * Find addresses of physical memory within a given range.
+ *
+ * This function considers the memory map with the [*phys_start, *phys_end]
+ * range on entry, and returns the lowest address of physical memory
+ * in *phys_start, and the highest address of unusable memory immediately
+ * following physical memory in *phys_end.
+ *
+ * These addresses are normally used to establish the range of a segment.
+ */
+static int __boot
+biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end)
+{
+ const struct biosmem_map_entry *entry, *map_end;
+ phys_addr_t seg_start, seg_end;
+ uint64_t start, end;
+
+ seg_start = (phys_addr_t)-1;
+ seg_end = (phys_addr_t)-1;
+ map_end = biosmem_map + biosmem_map_size;
+
+ for (entry = biosmem_map; entry < map_end; entry++) {
+ if (entry->type != BIOSMEM_TYPE_AVAILABLE)
+ continue;
+
+ start = vm_page_round(entry->base_addr);
+
+ if (start >= *phys_end)
+ break;
+
+ end = vm_page_trunc(entry->base_addr + entry->length);
+
+ if ((start < end) && (start < *phys_end) && (end > *phys_start)) {
+ if (seg_start == (phys_addr_t)-1)
+ seg_start = start;
+
+ seg_end = end;
+ }
+ }
+
+ if ((seg_start == (phys_addr_t)-1) || (seg_end == (phys_addr_t)-1))
+ return -1;
+
+ if (seg_start > *phys_start)
+ *phys_start = seg_start;
+
+ if (seg_end < *phys_end)
+ *phys_end = seg_end;
+
+ return 0;
+}
+
+static void __boot
+biosmem_set_segment(unsigned int seg_index, phys_addr_t start, phys_addr_t end)
+{
+ biosmem_segments[seg_index].start = start;
+ biosmem_segments[seg_index].end = end;
+}
+
+static phys_addr_t __boot
+biosmem_segment_end(unsigned int seg_index)
+{
+ return biosmem_segments[seg_index].end;
+}
+
+static phys_addr_t __boot
+biosmem_segment_size(unsigned int seg_index)
+{
+ return biosmem_segments[seg_index].end - biosmem_segments[seg_index].start;
+}
+
+static int __boot
+biosmem_find_avail_clip(phys_addr_t *avail_start, phys_addr_t *avail_end,
+ phys_addr_t data_start, phys_addr_t data_end)
+{
+ phys_addr_t orig_end;
+
+ assert(data_start < data_end);
+
+ orig_end = data_end;
+ data_start = vm_page_trunc(data_start);
+ data_end = vm_page_round(data_end);
+
+ if (data_end < orig_end) {
+ boot_panic(biosmem_panic_inval_boot_data);
+ }
+
+ if ((data_end <= *avail_start) || (data_start >= *avail_end)) {
+ return 0;
+ }
+
+ if (data_start > *avail_start) {
+ *avail_end = data_start;
+ } else {
+ if (data_end >= *avail_end) {
+ return -1;
+ }
+
+ *avail_start = data_end;
+ }
+
+ return 0;
+}
+
+/*
+ * Find available memory in the given range.
+ *
+ * The search starts at the given start address, up to the given end address.
+ * If a range is found, it is stored through the avail_startp and avail_endp
+ * pointers.
+ *
+ * The range boundaries are page-aligned on return.
+ */
+static int __boot
+biosmem_find_avail(phys_addr_t start, phys_addr_t end,
+ phys_addr_t *avail_start, phys_addr_t *avail_end)
+{
+ phys_addr_t orig_start;
+ unsigned int i;
+ int error;
+
+ assert(start <= end);
+
+ orig_start = start;
+ start = vm_page_round(start);
+ end = vm_page_trunc(end);
+
+ if ((start < orig_start) || (start >= end)) {
+ return -1;
+ }
+
+ *avail_start = start;
+ *avail_end = end;
+
+ for (i = 0; i < biosmem_nr_boot_data; i++) {
+ error = biosmem_find_avail_clip(avail_start, avail_end,
+ biosmem_boot_data_array[i].start,
+ biosmem_boot_data_array[i].end);
+
+ if (error) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+#ifndef MACH_HYP
+
+static void __boot
+biosmem_setup_allocator(const struct multiboot_raw_info *mbi)
+{
+ phys_addr_t heap_start, heap_end, max_heap_start, max_heap_end;
+ phys_addr_t start, end;
+ int error;
+
+ /*
+ * Find some memory for the heap. Look for the largest unused area in
+ * upper memory, carefully avoiding all boot data.
+ */
+ end = vm_page_trunc((mbi->mem_upper + 1024) << 10);
+
+ if (end > VM_PAGE_DIRECTMAP_LIMIT)
+ end = VM_PAGE_DIRECTMAP_LIMIT;
+
+ max_heap_start = 0;
+ max_heap_end = 0;
+ start = BIOSMEM_END;
+
+ for (;;) {
+ error = biosmem_find_avail(start, end, &heap_start, &heap_end);
+
+ if (error) {
+ break;
+ }
+
+ if ((heap_end - heap_start) > (max_heap_end - max_heap_start)) {
+ max_heap_start = heap_start;
+ max_heap_end = heap_end;
+ }
+
+ start = heap_end;
+ }
+
+ if (max_heap_start >= max_heap_end)
+ boot_panic(biosmem_panic_setup_msg);
+
+ biosmem_heap_start = max_heap_start;
+ biosmem_heap_end = max_heap_end;
+ biosmem_heap_bottom = biosmem_heap_start;
+ biosmem_heap_top = biosmem_heap_end;
+ biosmem_heap_topdown = TRUE;
+
+ /* Prevent biosmem_free_usable() from releasing the heap */
+ biosmem_register_boot_data(biosmem_heap_start, biosmem_heap_end, FALSE);
+}
+
+#endif /* MACH_HYP */
+
+static void __boot
+biosmem_bootstrap_common(void)
+{
+ phys_addr_t phys_start, phys_end;
+ int error;
+
+ biosmem_map_adjust();
+
+ phys_start = BIOSMEM_BASE;
+ phys_end = VM_PAGE_DMA_LIMIT;
+ error = biosmem_map_find_avail(&phys_start, &phys_end);
+
+ if (error)
+ boot_panic(biosmem_panic_noseg_msg);
+
+#if !defined(MACH_HYP) && NCPUS > 1
+ /*
+ * Grab an early page for AP boot code which needs to be below 1MB.
+ */
+ assert (phys_start < 0x100000);
+ apboot_addr = phys_start;
+ phys_start += PAGE_SIZE;
+#endif
+
+ biosmem_set_segment(VM_PAGE_SEG_DMA, phys_start, phys_end);
+
+ phys_start = VM_PAGE_DMA_LIMIT;
+
+#ifdef VM_PAGE_DMA32_LIMIT
+#if VM_PAGE_DMA32_LIMIT < VM_PAGE_DIRECTMAP_LIMIT
+ phys_end = VM_PAGE_DMA32_LIMIT;
+ error = biosmem_map_find_avail(&phys_start, &phys_end);
+
+ if (error)
+ return;
+
+ biosmem_set_segment(VM_PAGE_SEG_DMA32, phys_start, phys_end);
+
+ phys_start = VM_PAGE_DMA32_LIMIT;
+#endif
+#endif /* VM_PAGE_DMA32_LIMIT */
+
+ phys_end = VM_PAGE_DIRECTMAP_LIMIT;
+ error = biosmem_map_find_avail(&phys_start, &phys_end);
+
+ if (error)
+ return;
+
+ biosmem_set_segment(VM_PAGE_SEG_DIRECTMAP, phys_start, phys_end);
+
+ phys_start = VM_PAGE_DIRECTMAP_LIMIT;
+
+#ifdef VM_PAGE_DMA32_LIMIT
+#if VM_PAGE_DMA32_LIMIT > VM_PAGE_DIRECTMAP_LIMIT
+ phys_end = VM_PAGE_DMA32_LIMIT;
+ error = biosmem_map_find_avail(&phys_start, &phys_end);
+
+ if (error)
+ return;
+
+ biosmem_set_segment(VM_PAGE_SEG_DMA32, phys_start, phys_end);
+
+ phys_start = VM_PAGE_DMA32_LIMIT;
+#endif
+#endif /* VM_PAGE_DMA32_LIMIT */
+
+ phys_end = VM_PAGE_HIGHMEM_LIMIT;
+ error = biosmem_map_find_avail(&phys_start, &phys_end);
+
+ if (error)
+ return;
+
+ biosmem_set_segment(VM_PAGE_SEG_HIGHMEM, phys_start, phys_end);
+}
+
+#ifdef MACH_HYP
+
+void
+biosmem_xen_bootstrap(void)
+{
+ struct biosmem_map_entry *entry;
+
+ entry = biosmem_map;
+ entry->base_addr = 0;
+ entry->length = boot_info.nr_pages << PAGE_SHIFT;
+ entry->type = BIOSMEM_TYPE_AVAILABLE;
+
+ biosmem_map_size = 1;
+
+ biosmem_bootstrap_common();
+
+ biosmem_heap_start = _kvtophys(boot_info.pt_base)
+ + (boot_info.nr_pt_frames + 3) * 0x1000;
+ biosmem_heap_end = boot_info.nr_pages << PAGE_SHIFT;
+
+#ifndef __LP64__
+ if (biosmem_heap_end > VM_PAGE_DIRECTMAP_LIMIT)
+ biosmem_heap_end = VM_PAGE_DIRECTMAP_LIMIT;
+#endif /* __LP64__ */
+
+ biosmem_heap_bottom = biosmem_heap_start;
+ biosmem_heap_top = biosmem_heap_end;
+
+ /*
+ * XXX Allocation on Xen are initially bottom-up :
+ * At the "start of day", only 512k are available after the boot
+ * data. The pmap module then creates a 4g mapping so all physical
+ * memory is available, but it uses this allocator to do so.
+ * Therefore, it must return pages from this small 512k regions
+ * first.
+ */
+ biosmem_heap_topdown = FALSE;
+
+ /*
+ * Prevent biosmem_free_usable() from releasing the Xen boot information
+ * and the heap.
+ */
+ biosmem_register_boot_data(0, biosmem_heap_end, FALSE);
+}
+
+#else /* MACH_HYP */
+
+void __boot
+biosmem_bootstrap(const struct multiboot_raw_info *mbi)
+{
+ if (mbi->flags & MULTIBOOT_LOADER_MMAP)
+ biosmem_map_build(mbi);
+ else
+ biosmem_map_build_simple(mbi);
+
+ biosmem_bootstrap_common();
+ biosmem_setup_allocator(mbi);
+}
+
+#endif /* MACH_HYP */
+
+unsigned long __boot
+biosmem_bootalloc(unsigned int nr_pages)
+{
+ unsigned long addr, size;
+
+ size = vm_page_ptoa(nr_pages);
+
+ if (size == 0)
+ boot_panic(biosmem_panic_inval_msg);
+
+ if (biosmem_heap_topdown) {
+ addr = biosmem_heap_top - size;
+
+ if ((addr < biosmem_heap_start) || (addr > biosmem_heap_top)) {
+ boot_panic(biosmem_panic_nomem_msg);
+ }
+
+ biosmem_heap_top = addr;
+ } else {
+ unsigned long end;
+
+ addr = biosmem_heap_bottom;
+ end = addr + size;
+
+ if ((end > biosmem_heap_end) || (end < biosmem_heap_bottom)) {
+ boot_panic(biosmem_panic_nomem_msg);
+ }
+
+ biosmem_heap_bottom = end;
+ }
+
+ return addr;
+}
+
+phys_addr_t __boot
+biosmem_directmap_end(void)
+{
+ if (biosmem_segment_size(VM_PAGE_SEG_DIRECTMAP) != 0)
+ return biosmem_segment_end(VM_PAGE_SEG_DIRECTMAP);
+#if defined(VM_PAGE_DMA32_LIMIT) && (VM_PAGE_DMA32_LIMIT < VM_PAGE_DIRECTMAP_LIMIT)
+ if (biosmem_segment_size(VM_PAGE_SEG_DMA32) != 0)
+ return biosmem_segment_end(VM_PAGE_SEG_DMA32);
+#endif
+ return biosmem_segment_end(VM_PAGE_SEG_DMA);
+}
+
+static const char * __init
+biosmem_type_desc(unsigned int type)
+{
+ switch (type) {
+ case BIOSMEM_TYPE_AVAILABLE:
+ return "available";
+ case BIOSMEM_TYPE_RESERVED:
+ return "reserved";
+ case BIOSMEM_TYPE_ACPI:
+ return "ACPI";
+ case BIOSMEM_TYPE_NVS:
+ return "ACPI NVS";
+ case BIOSMEM_TYPE_UNUSABLE:
+ return "unusable";
+ default:
+ return "unknown (reserved)";
+ }
+}
+
+static void __init
+biosmem_map_show(void)
+{
+ const struct biosmem_map_entry *entry, *end;
+
+ printf("biosmem: physical memory map:\n");
+
+ for (entry = biosmem_map, end = entry + biosmem_map_size;
+ entry < end;
+ entry++)
+ printf("biosmem: %018"PRIx64":%018"PRIx64", %s\n", entry->base_addr,
+ entry->base_addr + entry->length,
+ biosmem_type_desc(entry->type));
+
+#if DEBUG
+ printf("biosmem: heap: %llx:%llx\n",
+ (unsigned long long)biosmem_heap_start,
+ (unsigned long long)biosmem_heap_end);
+#endif
+}
+
+static void __init
+biosmem_load_segment(struct biosmem_segment *seg, uint64_t max_phys_end)
+{
+ phys_addr_t phys_start, phys_end, avail_start, avail_end;
+ unsigned int seg_index;
+
+ phys_start = seg->start;
+ phys_end = seg->end;
+ seg_index = seg - biosmem_segments;
+
+ if (phys_end > max_phys_end) {
+ if (max_phys_end <= phys_start) {
+ printf("biosmem: warning: segment %s physically unreachable, "
+ "not loaded\n", vm_page_seg_name(seg_index));
+ return;
+ }
+
+ printf("biosmem: warning: segment %s truncated to %#"PRIx64"\n",
+ vm_page_seg_name(seg_index), max_phys_end);
+ phys_end = max_phys_end;
+ }
+
+ vm_page_load(seg_index, phys_start, phys_end);
+
+ /*
+ * Clip the remaining available heap to fit it into the loaded
+ * segment if possible.
+ */
+
+ if ((biosmem_heap_top > phys_start) && (biosmem_heap_bottom < phys_end)) {
+ if (biosmem_heap_bottom >= phys_start) {
+ avail_start = biosmem_heap_bottom;
+ } else {
+ avail_start = phys_start;
+ }
+
+ if (biosmem_heap_top <= phys_end) {
+ avail_end = biosmem_heap_top;
+ } else {
+ avail_end = phys_end;
+ }
+
+ vm_page_load_heap(seg_index, avail_start, avail_end);
+ }
+}
+
+void __init
+biosmem_setup(void)
+{
+ struct biosmem_segment *seg;
+ unsigned int i;
+
+ biosmem_map_show();
+
+ for (i = 0; i < ARRAY_SIZE(biosmem_segments); i++) {
+ if (biosmem_segment_size(i) == 0)
+ break;
+
+ seg = &biosmem_segments[i];
+ biosmem_load_segment(seg, VM_PAGE_HIGHMEM_LIMIT);
+ }
+}
+
+static void __init
+biosmem_unregister_temporary_boot_data(void)
+{
+ struct biosmem_boot_data *data;
+ unsigned int i;
+
+ for (i = 0; i < biosmem_nr_boot_data; i++) {
+ data = &biosmem_boot_data_array[i];
+
+ if (!data->temporary) {
+ continue;
+ }
+
+ biosmem_unregister_boot_data(data->start, data->end);
+ i = (unsigned int)-1;
+ }
+}
+
+static void __init
+biosmem_free_usable_range(phys_addr_t start, phys_addr_t end)
+{
+ struct vm_page *page;
+
+#if DEBUG
+ printf("biosmem: release to vm_page: %llx:%llx (%lluk)\n",
+ (unsigned long long)start, (unsigned long long)end,
+ (unsigned long long)((end - start) >> 10));
+#endif
+
+ while (start < end) {
+ page = vm_page_lookup_pa(start);
+ assert(page != NULL);
+ vm_page_manage(page);
+ start += PAGE_SIZE;
+ }
+}
+
+static void __init
+biosmem_free_usable_entry(phys_addr_t start, phys_addr_t end)
+{
+ phys_addr_t avail_start, avail_end;
+ int error;
+
+ for (;;) {
+ error = biosmem_find_avail(start, end, &avail_start, &avail_end);
+
+ if (error) {
+ break;
+ }
+
+ biosmem_free_usable_range(avail_start, avail_end);
+ start = avail_end;
+ }
+}
+
+void __init
+biosmem_free_usable(void)
+{
+ struct biosmem_map_entry *entry;
+ uint64_t start, end;
+ unsigned int i;
+
+ biosmem_unregister_temporary_boot_data();
+
+ for (i = 0; i < biosmem_map_size; i++) {
+ entry = &biosmem_map[i];
+
+ if (entry->type != BIOSMEM_TYPE_AVAILABLE)
+ continue;
+
+ start = vm_page_round(entry->base_addr);
+
+ if (start >= VM_PAGE_HIGHMEM_LIMIT)
+ break;
+
+ end = vm_page_trunc(entry->base_addr + entry->length);
+
+ if (end > VM_PAGE_HIGHMEM_LIMIT) {
+ end = VM_PAGE_HIGHMEM_LIMIT;
+ }
+
+ if (start < BIOSMEM_BASE)
+ start = BIOSMEM_BASE;
+
+ if (start >= end) {
+ continue;
+ }
+
+ biosmem_free_usable_entry(start, end);
+ }
+}
+
+boolean_t
+biosmem_addr_available(phys_addr_t addr)
+{
+ struct biosmem_map_entry *entry;
+ unsigned i;
+
+ if (addr < BIOSMEM_BASE)
+ return FALSE;
+
+ for (i = 0; i < biosmem_map_size; i++) {
+ entry = &biosmem_map[i];
+
+ if (addr >= entry->base_addr && addr < entry->base_addr + entry->length)
+ return entry->type == BIOSMEM_TYPE_AVAILABLE;
+ }
+ return FALSE;
+}
diff --git a/i386/i386at/biosmem.h b/i386/i386at/biosmem.h
new file mode 100644
index 0000000..76ab23a
--- /dev/null
+++ b/i386/i386at/biosmem.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _X86_BIOSMEM_H
+#define _X86_BIOSMEM_H
+
+#include <mach/machine/vm_types.h>
+#include <mach/machine/multiboot.h>
+
+/*
+ * Address where the address of the Extended BIOS Data Area segment can be
+ * found.
+ */
+#define BIOSMEM_EBDA_PTR 0x40e
+
+/*
+ * Significant low memory addresses.
+ *
+ * The first 64 KiB are reserved for various reasons (e.g. to preserve BIOS
+ * data and to work around data corruption on some hardware).
+ */
+#define BIOSMEM_BASE 0x010000
+#define BIOSMEM_BASE_END 0x0a0000
+#define BIOSMEM_EXT_ROM 0x0e0000
+#define BIOSMEM_ROM 0x0f0000
+#define BIOSMEM_END 0x100000
+
+/*
+ * Report reserved addresses to the biosmem module.
+ *
+ * Once all boot data have been registered, the user can set up the
+ * early page allocator.
+ *
+ * If the range is marked temporary, it will be unregistered when
+ * biosmem_free_usable() is called, so that pages that used to store
+ * these boot data may be released to the VM system.
+ */
+void biosmem_register_boot_data(phys_addr_t start, phys_addr_t end,
+ boolean_t temporary);
+
+/*
+ * Initialize the early page allocator.
+ *
+ * This function uses the memory map provided by the boot loader along
+ * with the registered boot data addresses to set up a heap of free pages
+ * of physical memory.
+ *
+ * Note that on Xen, this function registers all the Xen boot information
+ * as boot data itself.
+ */
+#ifdef MACH_HYP
+void biosmem_xen_bootstrap(void);
+#else /* MACH_HYP */
+void biosmem_bootstrap(const struct multiboot_raw_info *mbi);
+#endif /* MACH_HYP */
+
+/*
+ * Allocate contiguous physical pages during bootstrap.
+ *
+ * The pages returned are guaranteed to be part of the direct physical
+ * mapping when paging is enabled.
+ *
+ * This function should only be used to allocate initial page table pages.
+ * Those pages are later loaded into the VM system (as reserved pages)
+ * which means they can be freed like other regular pages. Users should
+ * fix up the type of those pages once the VM system is initialized.
+ */
+unsigned long biosmem_bootalloc(unsigned int nr_pages);
+
+/*
+ * Return the limit of physical memory that can be directly mapped.
+ */
+phys_addr_t biosmem_directmap_end(void);
+
+/*
+ * Set up physical memory based on the information obtained during bootstrap
+ * and load it in the VM system.
+ */
+void biosmem_setup(void);
+
+/*
+ * Free all usable memory.
+ *
+ * This function releases all pages that aren't used by boot data and have
+ * not already been loaded into the VM system.
+ */
+void biosmem_free_usable(void);
+
+/*
+ * Tell whether this address is marked as available in the biosmem and thus used
+ * for usable memory.
+ */
+boolean_t biosmem_addr_available(phys_addr_t addr);
+
+#endif /* _X86_BIOSMEM_H */
diff --git a/i386/i386at/boothdr.S b/i386/i386at/boothdr.S
new file mode 100644
index 0000000..daaf57d
--- /dev/null
+++ b/i386/i386at/boothdr.S
@@ -0,0 +1,179 @@
+
+#include <mach/machine/asm.h>
+#include <i386/apic.h>
+#include <i386/seg.h>
+#include <i386/i386asm.h>
+
+ /*
+ * This section will be put first into .text. See also i386/ldscript.
+ */
+ .section .text.start,"ax"
+
+ /* We should never be entered this way. */
+ .globl start,_start
+start:
+_start:
+ jmp boot_entry
+
+ /* MultiBoot header - see multiboot.h. */
+#define MULTIBOOT_MAGIC 0x1BADB002
+#ifdef __ELF__
+#define MULTIBOOT_FLAGS 0x00000003
+#else /* __ELF__ */
+#define MULTIBOOT_FLAGS 0x00010003
+#endif /* __ELF__ */
+ P2ALIGN(2)
+boot_hdr:
+ .long MULTIBOOT_MAGIC
+ .long MULTIBOOT_FLAGS
+ /*
+ * The next item here is the checksum.
+ * XX this works OK until we need at least the 30th bit.
+ */
+ .long - (MULTIBOOT_MAGIC+MULTIBOOT_FLAGS)
+#ifndef __ELF__ /* a.out kludge */
+ .long boot_hdr /* header_addr */
+ .long _start /* load_addr */
+ .long _edata /* load_end_addr */
+ .long _end /* bss_end_addr */
+ .long boot_entry /* entry */
+#endif /* __ELF__ */
+
+boot_entry:
+ movl $percpu_array - KERNELBASE, %eax
+ movw %ax, boot_percpu_low - KERNELBASE
+ shr $16, %eax
+ movb %al, boot_percpu_med - KERNELBASE
+ shr $8, %ax
+ movb %al, boot_percpu_high - KERNELBASE
+
+ /* use segmentation to offset ourself. */
+ lgdt boot_gdt_descr - KERNELBASE
+ ljmp $0x8,$0f
+0:
+ movw $0x0,%ax
+ movw %ax,%ds
+ movw %ax,%es
+ movw %ax,%fs
+ movw %ax,%gs
+ movw $0x10,%ax
+ movw %ax,%ds
+ movw %ax,%es
+ movw %ax,%ss
+ movw $0x68,%ax
+ movw %ax,%gs
+
+ /* Switch to our own interrupt stack. */
+ movl $solid_intstack+INTSTACK_SIZE-4, %esp
+ andl $0xfffffff0,%esp
+
+ /* Enable local apic in xAPIC mode */
+ xorl %eax, %eax
+ xorl %edx, %edx
+ movl $APIC_MSR, %ecx
+ rdmsr
+ orl $APIC_MSR_ENABLE, %eax
+ orl $APIC_MSR_BSP, %eax
+ andl $(~APIC_MSR_X2APIC), %eax
+ movl $APIC_MSR, %ecx
+ wrmsr
+
+ /* Reset EFLAGS to a known state. */
+ pushl $0
+ popf
+
+ /* Clear uninitialized data. */
+ lea _edata,%edi
+ lea _end,%ecx
+ subl %edi,%ecx
+ xorl %eax,%eax
+ rep
+ stosb
+
+ /* Push the boot_info pointer to be the second argument. */
+ pushl %ebx
+
+ /* Fix ifunc entries */
+ movl $__rel_iplt_start,%esi
+ movl $__rel_iplt_end,%edi
+iplt_cont:
+ cmpl %edi,%esi
+ jae iplt_done
+ movl (%esi),%ebx /* r_offset */
+ movb 4(%esi),%al /* info */
+ cmpb $42,%al /* IRELATIVE */
+ jnz iplt_next
+ call *(%ebx) /* call ifunc */
+ movl %eax,(%ebx) /* fixed address */
+iplt_next:
+ addl $8,%esi
+ jmp iplt_cont
+iplt_done:
+
+ /* Jump into C code. */
+ call EXT(c_boot_entry)
+
+.align 16
+ .word 0
+boot_gdt_descr:
+ .word 14*8-1
+ .long boot_gdt - KERNELBASE
+.align 16
+boot_gdt:
+ /* 0 */
+ .quad 0
+
+ /* boot CS = 0x08 */
+ .word 0xffff
+ .word (-KERNELBASE) & 0xffff
+ .byte ((-KERNELBASE) >> 16) & 0xff
+ .byte ACC_PL_K | ACC_CODE_R | ACC_P
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf
+ .byte ((-KERNELBASE) >> 24) & 0xff
+
+ /* boot DS = 0x10 */
+ .word 0xffff
+ .word (-KERNELBASE) & 0xffff
+ .byte ((-KERNELBASE) >> 16) & 0xff
+ .byte ACC_PL_K | ACC_DATA_W | ACC_P
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf
+ .byte ((-KERNELBASE) >> 24) & 0xff
+
+ /* LDT = 0x18 */
+ .quad 0
+
+ /* TSS = 0x20 */
+ .quad 0
+
+ /* USER_LDT = 0x28 */
+ .quad 0
+
+ /* USER_TSS = 0x30 */
+ .quad 0
+
+ /* LINEAR = 0x38 */
+ .quad 0
+
+ /* FPREGS = 0x40 */
+ .quad 0
+
+ /* USER_GDT = 0x48 and 0x50 */
+ .quad 0
+ .quad 0
+
+ /* USER_TSS64 = 0x58 */
+ .quad 0
+
+ /* USER_TSS64 = 0x60 */
+ .quad 0
+
+ /* boot GS = 0x68 */
+ .word 0xffff
+boot_percpu_low:
+ .word 0
+boot_percpu_med:
+ .byte 0
+ .byte ACC_PL_K | ACC_DATA_W | ACC_P
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf
+boot_percpu_high:
+ .byte 0
diff --git a/i386/i386at/com.c b/i386/i386at/com.c
new file mode 100644
index 0000000..bfe353c
--- /dev/null
+++ b/i386/i386at/com.c
@@ -0,0 +1,900 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1993,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if NCOM > 0
+
+#include <string.h>
+#include <util/atoi.h>
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <kern/printf.h>
+#include <kern/mach_clock.h>
+#include <device/conf.h>
+#include <device/device_types.h>
+#include <device/tty.h>
+#include <device/io_req.h>
+
+#include <i386/ipl.h>
+#include <i386/pio.h>
+#include <i386/machspl.h>
+#include <chips/busses.h>
+#include <i386at/autoconf.h>
+#include <i386at/com.h>
+#include <i386at/comreg.h>
+
+#include <device/cons.h>
+
+static void comparam(int);
+
+static vm_offset_t com_std[NCOM] = { 0 };
+struct bus_device *cominfo[NCOM];
+struct bus_driver comdriver = {
+ comprobe, 0, comattach, 0, com_std, "com", cominfo, 0, 0, 0};
+
+struct tty com_tty[NCOM];
+int commodem[NCOM];
+int comcarrier[NCOM] = {0, 0,};
+boolean_t comfifo[NCOM];
+boolean_t comtimer_active;
+int comtimer_state[NCOM];
+
+#define RCBAUD B115200
+static int rcline = -1;
+static struct bus_device *comcndev;
+
+/* XX */
+extern char *kernel_cmdline;
+
+#define ISPEED B115200
+#define IFLAGS (EVENP|ODDP|ECHO|CRMOD|XTABS|LITOUT)
+
+u_short divisorreg[] = {
+ 0, 2304, 1536, 1047, /* 0, 50, 75, 110*/
+ 857, 768, 576, 384, 192, /* 134.5, 150, 200, 300, 600*/
+ 96, 64, 48, /* 1200, 1800, 2000, 2400 */
+ 24, 12, /* 3600, 4800, 7200, 9600 */
+ 6, 3, 2, 1}; /* 19200, 38400, 56000,115200 */
+
+
+/*
+ *
+ * Probes are called during kernel boot: return 1 to mean that
+ * the relevant device is present today.
+ *
+ */
+static int
+comprobe_general(struct bus_device *dev, int noisy)
+{
+ u_short addr = dev->address;
+ int unit = dev->unit;
+ int oldctl, oldmsb;
+ char *type = "8250";
+ int i;
+
+ if ((unit < 0) || (unit >= NCOM)) {
+ printf("com %d out of range\n", unit);
+ return(0);
+ }
+ oldctl = inb(LINE_CTL(addr)); /* Save old value of LINE_CTL */
+ oldmsb = inb(BAUD_MSB(addr)); /* Save old value of BAUD_MSB */
+ outb(LINE_CTL(addr), 0); /* Select INTR_ENAB */
+ outb(BAUD_MSB(addr), 0);
+ if (inb(BAUD_MSB(addr)) != 0)
+ {
+ outb(LINE_CTL(addr), oldctl);
+ outb(BAUD_MSB(addr), oldmsb);
+ return 0;
+ }
+ outb(LINE_CTL(addr), iDLAB); /* Select BAUD_MSB */
+ outb(BAUD_MSB(addr), 255);
+ if (inb(BAUD_MSB(addr)) != 255)
+ {
+ outb(LINE_CTL(addr), oldctl);
+ outb(BAUD_MSB(addr), oldmsb);
+ return 0;
+ }
+ outb(LINE_CTL(addr), 0); /* Select INTR_ENAB */
+ if (inb(BAUD_MSB(addr)) != 0) /* Check that it has kept its value*/
+ {
+ outb(LINE_CTL(addr), oldctl);
+ outb(BAUD_MSB(addr), oldmsb);
+ return 0;
+ }
+
+ /* Com port found, now check what chip it has */
+
+ for(i = 0; i < 256; i++) /* Is there Scratch register */
+ {
+ outb(SCR(addr), i);
+ if (inb(SCR(addr)) != i)
+ break;
+ }
+ if (i == 256)
+ { /* Yes == 450 or 460 */
+ outb(SCR(addr), 0);
+ type = "82450 or 16450";
+ outb(FIFO_CTL(addr), iFIFOENA | iFIFO14CH); /* Enable fifo */
+ if ((inb(FIFO_CTL(addr)) & iFIFO14CH) != 0)
+ { /* Was it successful */
+ /* if both bits are not set then broken xx550 */
+ if ((inb(FIFO_CTL(addr)) & iFIFO14CH) == iFIFO14CH)
+ {
+ type = "82550 or 16550";
+ comfifo[unit] = TRUE;
+ }
+ else
+ {
+ type = "82550 or 16550 with non-working FIFO";
+ }
+ outb(INTR_ID(addr), 0x00); /* Disable fifos */
+ }
+ }
+ if (noisy)
+ printf("com%d: %s chip.\n", unit, type);
+ return 1;
+}
+
+/*
+ * Probe routine for use during kernel startup when it is probing
+ * all of bus_device_init
+ */
+int
+comprobe(vm_offset_t port, struct bus_ctlr *dev)
+{
+ return comprobe_general((struct bus_device *)dev, /*noisy*/ 0);
+}
+
+/*
+ * Probe routine for use by the console
+ */
+int
+comcnprobe(struct consdev *cp)
+{
+ struct bus_device *b;
+ int maj, unit, pri;
+
+#define CONSOLE_PARAMETER " console=com"
+ u_char *console = (u_char *) strstr(kernel_cmdline, CONSOLE_PARAMETER);
+
+ if (console)
+ mach_atoi(console + strlen(CONSOLE_PARAMETER), &rcline);
+
+ if (strncmp(kernel_cmdline, CONSOLE_PARAMETER + 1,
+ strlen(CONSOLE_PARAMETER) - 1) == 0)
+ mach_atoi((u_char*)kernel_cmdline + strlen(CONSOLE_PARAMETER) - 1,
+ &rcline);
+
+ maj = 0;
+ unit = -1;
+ pri = CN_DEAD;
+
+ for (b = bus_device_init; b->driver; b++)
+ if (strcmp(b->name, "com") == 0
+ && b->unit == rcline
+ && comprobe_general(b, /*quiet*/ 0))
+ {
+ /* Found one */
+ comcndev = b;
+ unit = b->unit;
+ pri = CN_REMOTE;
+ break;
+ }
+
+ cp->cn_dev = makedev(maj, unit);
+ cp->cn_pri = pri;
+
+ return 0;
+}
+
+
+/*
+ *
+ * Device Attach's are called during kernel boot, but only if the matching
+ * device Probe returned a 1.
+ *
+ */
+void
+comattach(struct bus_device *dev)
+{
+ u_char unit = dev->unit;
+ u_short addr = dev->address;
+
+ if (unit >= NCOM) {
+ printf(", disabled by NCOM configuration\n");
+ return;
+ }
+
+ take_dev_irq(dev);
+ printf(", port = %zx, spl = %zu, pic = %d. (DOS COM%d)",
+ dev->address, dev->sysdep, dev->sysdep1, unit+1);
+
+/* comcarrier[unit] = addr->flags;*/
+ commodem[unit] = 0;
+
+ outb(INTR_ENAB(addr), 0);
+ outb(MODEM_CTL(addr), 0);
+ while (!(inb(INTR_ID(addr))&1)) {
+ (void) inb(LINE_STAT (addr)); /* reset overrun error etc */
+ (void) inb(TXRX (addr)); /* reset data-ready */
+ (void) inb(MODEM_STAT(addr)); /* reset modem status reg */
+ }
+}
+
+/*
+ * Attach/init routine for console. This isn't called by
+ * configure_bus_device which sets the alive, adaptor, and minfo
+ * fields of the bus_device struct (comattach is), therefore we do
+ * that by hand.
+ */
+int
+comcninit(struct consdev *cp)
+{
+ u_char unit = comcndev->unit;
+ u_short addr = comcndev->address;
+
+ take_dev_irq(comcndev);
+
+ comcndev->alive = 1;
+ comcndev->adaptor = 0;
+ cominfo[minor(cp->cn_dev)] = comcndev;
+
+ outb(LINE_CTL(addr), iDLAB);
+ outb(BAUD_LSB(addr), divisorreg[RCBAUD] & 0xff);
+ outb(BAUD_MSB(addr), divisorreg[RCBAUD] >>8);
+ outb(LINE_CTL(addr), i8BITS);
+ outb(INTR_ENAB(addr), 0);
+ outb(MODEM_CTL(addr), iDTR|iRTS|iOUT2);
+
+ {
+ char msg[128];
+ volatile unsigned char *p = (volatile unsigned char *)phystokv(0xb8000);
+ int i;
+
+ sprintf(msg, " **** using COM port %d for console ****",
+ unit+1);
+ for (i = 0; msg[i]; i++) {
+ p[2*i] = msg[i];
+ p[2*i+1] = (0<<7) /* blink */
+ | (0x0<<4) /* bg */
+ | (1<<3) /* hi-intensity */
+ | 0x4; /* fg */
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Probe for COM<dev> after autoconfiguration.
+ * Used to handle PCMCIA modems, which may appear
+ * at any time.
+ */
+static boolean_t com_reprobe(
+ int unit)
+{
+ struct bus_device *device;
+
+ /*
+ * Look for COM device <unit> in the device
+ * initialization list. It must not be alive
+ * (otherwise we would have opened it already).
+ */
+ for (device = bus_device_init; device->driver; device++) {
+ if (device->driver == &comdriver && device->unit == unit &&
+ !device->alive && device->ctlr == (char)-1)
+ {
+ /*
+ * Found an entry for com port <unit>.
+ * Probe it.
+ */
+ if (configure_bus_device(device->name,
+ device->address,
+ device->phys_address,
+ 0,
+ "atbus"))
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+io_return_t comopen(
+ dev_t dev,
+ int flag,
+ io_req_t ior)
+{
+ int unit = minor(dev);
+ u_short addr;
+ struct bus_device *isai;
+ struct tty *tp;
+ spl_t s;
+ io_return_t result;
+
+ if (unit >= NCOM)
+ return D_NO_SUCH_DEVICE; /* no such device */
+ if ((isai = cominfo[unit]) == 0 || isai->alive == 0) {
+ /*
+ * Try to probe it again
+ */
+ if (!com_reprobe(unit))
+ return D_NO_SUCH_DEVICE;
+ if ((isai = cominfo[unit]) == 0 || isai->alive == 0)
+ return D_NO_SUCH_DEVICE;
+ }
+ tp = &com_tty[unit];
+
+ if ((tp->t_state & (TS_ISOPEN|TS_WOPEN)) == 0) {
+ ttychars(tp);
+ tp->t_addr = (char *)isai->address;
+ tp->t_dev = dev;
+ tp->t_oproc = comstart;
+ tp->t_stop = comstop;
+ tp->t_mctl = commctl;
+ tp->t_getstat = comgetstat;
+ tp->t_setstat = comsetstat;
+ if (tp->t_ispeed == 0) {
+ tp->t_ispeed = ISPEED;
+ tp->t_ospeed = ISPEED;
+ tp->t_flags = IFLAGS;
+ tp->t_state &= ~TS_BUSY;
+ }
+ }
+/*rvb tp->t_state |= TS_WOPEN; */
+ if ((tp->t_state & TS_ISOPEN) == 0)
+ comparam(unit);
+ addr = (uintptr_t)tp->t_addr;
+
+ s = spltty();
+ if (!comcarrier[unit]) /* not originating */
+ tp->t_state |= TS_CARR_ON;
+ else {
+ int modem_stat = inb(MODEM_STAT(addr));
+ if (modem_stat & iRLSD)
+ tp->t_state |= TS_CARR_ON;
+ else
+ tp->t_state &= ~TS_CARR_ON;
+ fix_modem_state(unit, modem_stat);
+ }
+ splx(s);
+
+ result = char_open(dev, tp, flag, ior);
+
+ if (!comtimer_active) {
+ comtimer_active = TRUE;
+ comtimer(NULL);
+ }
+
+ s = spltty();
+ while(!(inb(INTR_ID(addr))&1)) { /* while pending interrupts */
+ (void) inb(LINE_STAT (addr)); /* reset overrun error */
+ (void) inb(TXRX (addr)); /* reset data-ready */
+ (void) inb(MODEM_STAT(addr)); /* reset modem status */
+ }
+ splx(s);
+ return result;
+}
+
+void comclose(dev_t dev, int flag)
+{
+ struct tty *tp = &com_tty[minor(dev)];
+ u_short addr = (uintptr_t)tp->t_addr;
+
+ ttyclose(tp);
+ if (tp->t_state&TS_HUPCLS || (tp->t_state&TS_ISOPEN)==0) {
+ outb(INTR_ENAB(addr), 0);
+ outb(MODEM_CTL(addr), 0);
+ tp->t_state &= ~TS_BUSY;
+ commodem[minor(dev)] = 0;
+ if (comfifo[minor(dev)] != 0)
+ outb(INTR_ID(addr), 0x00); /* Disable fifos */
+ }
+ return;
+}
+
+io_return_t comread(dev_t dev, io_req_t ior)
+{
+ return char_read(&com_tty[minor(dev)], ior);
+}
+
+io_return_t comwrite(dev_t dev, io_req_t ior)
+{
+ return char_write(&com_tty[minor(dev)], ior);
+}
+
+io_return_t comportdeath(dev_t dev, mach_port_t port)
+{
+ return (tty_portdeath(&com_tty[minor(dev)], (ipc_port_t)port));
+}
+
+io_return_t
+comgetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data, /* pointer to OUT array */
+ mach_msg_type_number_t *count /* out */
+ )
+{
+ io_return_t result = D_SUCCESS;
+ int unit = minor(dev);
+
+ switch (flavor) {
+ case TTY_MODEM:
+ fix_modem_state(unit, inb(MODEM_STAT(cominfo[unit]->address)));
+ *data = commodem[unit];
+ *count = 1;
+ break;
+ default:
+ result = tty_get_status(&com_tty[unit], flavor, data, count);
+ break;
+ }
+ return (result);
+}
+
+io_return_t
+comsetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count)
+{
+ io_return_t result = D_SUCCESS;
+ int unit = minor(dev);
+ struct tty *tp = &com_tty[unit];
+
+ switch (flavor) {
+ case TTY_SET_BREAK:
+ commctl(tp, TM_BRK, DMBIS);
+ break;
+ case TTY_CLEAR_BREAK:
+ commctl(tp, TM_BRK, DMBIC);
+ break;
+ case TTY_MODEM:
+ commctl(tp, *data, DMSET);
+ break;
+ default:
+ result = tty_set_status(&com_tty[unit], flavor, data, count);
+ if (result == D_SUCCESS && flavor == TTY_STATUS)
+ comparam(unit);
+ return (result);
+ }
+ return (D_SUCCESS);
+}
+
+void
+comintr(int unit)
+{
+ struct tty *tp = &com_tty[unit];
+ u_short addr = cominfo[unit]->address;
+ static char comoverrun = 0;
+ char c, line, intr_id;
+ int line_stat;
+
+ while (! ((intr_id=(inb(INTR_ID(addr))&MASKi)) & 1))
+ switch (intr_id) {
+ case MODi:
+ /* modem change */
+ commodem_intr(unit, inb(MODEM_STAT(addr)));
+ break;
+
+ case TRAi:
+ comtimer_state[unit] = 0;
+ tp->t_state &= ~(TS_BUSY|TS_FLUSH);
+ tt_write_wakeup(tp);
+ (void) comstart(tp);
+ break;
+ case RECi:
+ case CTIi: /* Character timeout indication */
+ if (tp->t_state&TS_ISOPEN) {
+ int escape = 0;
+ while ((line = inb(LINE_STAT(addr))) & iDR) {
+ c = inb(TXRX(addr));
+
+ if (c == 0x1b) {
+ escape = 1;
+ continue;
+ }
+
+#if MACH_KDB
+ if (escape && c == 'D'-('A'-1))
+ /* ctrl-alt-d pressed,
+ invoke debugger */
+ kdb_kintr();
+ else
+#endif /* MACH_KDB */
+ if (escape) {
+ ttyinput(0x1b, tp);
+ ttyinput(c, tp);
+ }
+ else
+ ttyinput(c, tp);
+
+ escape = 0;
+ }
+
+ if (escape)
+ /* just escape */
+ ttyinput(0x1b, tp);
+ } else
+ tt_open_wakeup(tp);
+ break;
+ case LINi:
+ line_stat = inb(LINE_STAT(addr));
+
+ if ((line_stat & iPE) &&
+ ((tp->t_flags&(EVENP|ODDP)) == EVENP ||
+ (tp->t_flags&(EVENP|ODDP)) == ODDP)) {
+ /* parity error */;
+ } else if (line_stat&iOR && !comoverrun) {
+ printf("com%d: overrun\n", unit);
+ comoverrun = 1;
+ } else if (line_stat & (iFE | iBRKINTR)) {
+ /* framing error or break */
+ ttyinput(tp->t_breakc, tp);
+ }
+ break;
+ }
+}
+
+static void
+comparam(int unit)
+{
+ struct tty *tp = &com_tty[unit];
+ u_short addr = (uintptr_t)tp->t_addr;
+ spl_t s = spltty();
+ int mode;
+
+ if (tp->t_ispeed == B0) {
+ tp->t_state |= TS_HUPCLS;
+ outb(MODEM_CTL(addr), iOUT2);
+ commodem[unit] = 0;
+ splx(s);
+ return;
+ }
+
+ /* Do input buffering */
+ if (tp->t_ispeed >= B300)
+ tp->t_state |= TS_MIN;
+
+ outb(LINE_CTL(addr), iDLAB);
+ outb(BAUD_LSB(addr), divisorreg[tp->t_ispeed] & 0xff);
+ outb(BAUD_MSB(addr), divisorreg[tp->t_ispeed] >> 8);
+
+ if (tp->t_flags & (RAW|LITOUT|PASS8))
+ mode = i8BITS;
+ else
+ mode = i7BITS | iPEN;
+ if (tp->t_flags & EVENP)
+ mode |= iEPS;
+ if (tp->t_ispeed == B110)
+ /*
+ * 110 baud uses two stop bits -
+ * all other speeds use one
+ */
+ mode |= iSTB;
+
+ outb(LINE_CTL(addr), mode);
+
+ outb(INTR_ENAB(addr), iTX_ENAB|iRX_ENAB|iMODEM_ENAB|iERROR_ENAB);
+ if (comfifo[unit])
+ outb(FIFO_CTL(addr), iFIFOENA|iFIFO14CH);
+ outb(MODEM_CTL(addr), iDTR|iRTS|iOUT2);
+ commodem[unit] |= (TM_DTR|TM_RTS);
+ splx(s);
+}
+
+int comst_1, comst_2, comst_3, comst_4, comst_5 = 14;
+
+void
+comstart(struct tty *tp)
+{
+ int nch;
+#if 0
+ int i;
+#endif
+
+ if (tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) {
+comst_1++;
+ return;
+ }
+ if ((!queue_empty(&tp->t_delayed_write)) &&
+ (tp->t_outq.c_cc <= TTLOWAT(tp))) {
+comst_2++;
+ tt_write_wakeup(tp);
+ }
+ if (!tp->t_outq.c_cc) {
+comst_3++;
+ return;
+ }
+
+#if 0
+ i = (comfifo[minor(tp->t_dev)]) ? /*14*/comst_5 : 1;
+
+ tp->t_state |= TS_BUSY;
+ while (i-- > 0) {
+ nch = getc(&tp->t_outq);
+ if (nch == -1) break;
+ if ((nch & 0200) && ((tp->t_flags & LITOUT) == 0)) {
+ timeout(ttrstrt, (char *)tp, (nch & 0x7f) + 6);
+ tp->t_state |= TS_TIMEOUT;
+comst_4++;
+ return(0);
+ }
+ outb(TXRX((uintptr_t)tp->t_addr), nch);
+ }
+#else
+ nch = getc(&tp->t_outq);
+ if (nch == -1)
+ return;
+ if ((nch & 0200) && ((tp->t_flags & LITOUT) == 0)) {
+ timeout((timer_func_t *)ttrstrt, (char *)tp, (nch & 0x7f) + 6);
+ tp->t_state |= TS_TIMEOUT;
+comst_4++;
+ return;
+ }
+ outb(TXRX((uintptr_t)tp->t_addr), nch);
+ tp->t_state |= TS_BUSY;
+#endif
+}
+
+/* Check for stuck xmitters */
+int comtimer_interval = 5;
+
+void
+comtimer(void * param)
+{
+ spl_t s = spltty();
+ struct tty *tp = com_tty;
+ int i, nch;
+
+ for (i = 0; i < NCOM; i++, tp++) {
+ if ((tp->t_state & TS_ISOPEN) == 0)
+ continue;
+ if (!tp->t_outq.c_cc)
+ continue;
+ if (++comtimer_state[i] < 2)
+ continue;
+ /* Its stuck */
+printf("Tty %p was stuck\n", tp);
+ nch = getc(&tp->t_outq);
+ outb(TXRX((uintptr_t)tp->t_addr), nch);
+ }
+
+ splx(s);
+ timeout(comtimer, 0, comtimer_interval*hz);
+}
+
+/*
+ * Set receive modem state from modem status register.
+ */
+void
+fix_modem_state(
+ int unit,
+ int modem_stat)
+{
+ int stat = 0;
+
+ if (modem_stat & iCTS)
+ stat |= TM_CTS; /* clear to send */
+ if (modem_stat & iDSR)
+ stat |= TM_DSR; /* data set ready */
+ if (modem_stat & iRI)
+ stat |= TM_RNG; /* ring indicator */
+ if (modem_stat & iRLSD)
+ stat |= TM_CAR; /* carrier? */
+
+ commodem[unit] = (commodem[unit] & ~(TM_CTS|TM_DSR|TM_RNG|TM_CAR))
+ | stat;
+}
+
+/*
+ * Modem change (input signals)
+ */
+void
+commodem_intr(
+ int unit,
+ int stat)
+{
+ int changed;
+
+ changed = commodem[unit];
+ fix_modem_state(unit, stat);
+ stat = commodem[unit];
+
+ /* Assumption: if the other party can handle
+ modem signals then it should handle all
+ the necessary ones. Else fix the cable. */
+
+ changed ^= stat; /* what changed ? */
+
+ if (changed & TM_CTS)
+ tty_cts( &com_tty[unit], stat & TM_CTS );
+
+#if 0
+ if (changed & TM_CAR)
+ ttymodem( &com_tty[unit], stat & TM_CAR );
+#endif
+
+}
+
+/*
+ * Set/get modem bits
+ */
+int
+commctl(
+ struct tty *tp,
+ int bits,
+ int how)
+{
+ spl_t s;
+ int unit;
+ vm_offset_t dev_addr;
+ int b = 0; /* Suppress gcc warning */
+
+ unit = minor(tp->t_dev);
+
+ if (bits == TM_HUP) { /* close line (internal) */
+ bits = TM_DTR | TM_RTS;
+ how = DMBIC;
+ }
+
+ if (how == DMGET) return commodem[unit];
+
+ dev_addr = cominfo[unit]->address;
+
+ s = spltty();
+
+ switch (how) {
+ case DMSET:
+ b = bits; break;
+ case DMBIS:
+ b = commodem[unit] | bits; break;
+ case DMBIC:
+ b = commodem[unit] & ~bits; break;
+ }
+ commodem[unit] = b;
+
+ if (bits & TM_BRK) {
+ if (b & TM_BRK) {
+ outb(LINE_CTL(dev_addr), inb(LINE_CTL(dev_addr)) | iSETBREAK);
+ } else {
+ outb(LINE_CTL(dev_addr), inb(LINE_CTL(dev_addr)) & ~iSETBREAK);
+ }
+ }
+
+#if 0
+ /* do I need to do something on this ? */
+ if (bits & TM_LE) { /* line enable */
+ }
+#endif
+#if 0
+ /* Unsupported */
+ if (bits & TM_ST) { /* secondary transmit */
+ }
+ if (bits & TM_SR) { /* secondary receive */
+ }
+#endif
+ if (bits & (TM_DTR|TM_RTS)) { /* data terminal ready, request to send */
+ how = iOUT2;
+ if (b & TM_DTR) how |= iDTR;
+ if (b & TM_RTS) how |= iRTS;
+ outb(MODEM_CTL(dev_addr), how);
+ }
+
+ splx(s);
+
+ /* the rest are inputs */
+ return commodem[unit];
+}
+
+void
+comstop(
+ struct tty *tp,
+ int flags)
+{
+ if ((tp->t_state & TS_BUSY) && (tp->t_state & TS_TTSTOP) == 0)
+ tp->t_state |= TS_FLUSH;
+}
+
+/*
+ *
+ * Code to be called from debugger.
+ *
+ */
+void compr_addr(vm_offset_t addr)
+{
+ /* The two line_stat prints may show different values, since
+ * touching some of the registers constitutes changing them.
+ */
+ printf("LINE_STAT(%zu) %x\n",
+ LINE_STAT(addr), inb(LINE_STAT(addr)));
+
+ printf("TXRX(%zu) %x, INTR_ENAB(%zu) %x, INTR_ID(%zu) %x, LINE_CTL(%zu) %x,\n\
+MODEM_CTL(%zu) %x, LINE_STAT(%zu) %x, MODEM_STAT(%zu) %x\n",
+ TXRX(addr), inb(TXRX(addr)),
+ INTR_ENAB(addr), inb(INTR_ENAB(addr)),
+ INTR_ID(addr), inb(INTR_ID(addr)),
+ LINE_CTL(addr), inb(LINE_CTL(addr)),
+ MODEM_CTL(addr), inb(MODEM_CTL(addr)),
+ LINE_STAT(addr), inb(LINE_STAT(addr)),
+ MODEM_STAT(addr),inb(MODEM_STAT(addr)));
+}
+
+int compr(int unit)
+{
+ compr_addr(cominfo[unit]->address);
+ return(0);
+}
+
+int
+comgetc(int unit)
+{
+ u_short addr = (u_short)(cominfo[unit]->address);
+ spl_t s = spltty();
+ int c;
+
+ while((inb(LINE_STAT(addr)) & iDR) == 0) ;
+
+ c = inb(TXRX(addr));
+ splx(s);
+ return c;
+}
+
+/*
+ * Routines for the console
+ */
+int
+comcnputc(dev_t dev, int c)
+{
+ u_short addr = (u_short)(cominfo[minor(dev)]->address);
+
+ /* Wait for transmitter to empty */
+ while((inb(LINE_STAT(addr)) & iTHRE) == 0)
+ continue;
+
+ /* send the char */
+ if (c == '\n')
+ comcnputc(dev, '\r');
+ outb(addr, c);
+
+ return 0;
+}
+
+int
+comcngetc(dev_t dev, int wait)
+{
+ u_short addr = (u_short)(cominfo[minor(dev)]->address);
+ int c;
+
+ while((inb(LINE_STAT(addr)) & iDR) == 0)
+ if (! wait)
+ return 0;
+
+ c = inb(TXRX(addr));
+ return c & 0x7f;
+}
+
+#endif /* NCOM */
diff --git a/i386/i386at/com.h b/i386/i386at/com.h
new file mode 100644
index 0000000..3be2930
--- /dev/null
+++ b/i386/i386at/com.h
@@ -0,0 +1,86 @@
+/*
+ * Communication functions
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Communication functions.
+ *
+ */
+
+#ifndef _COM_H_
+#define _COM_H_
+
+#include <mach/std_types.h>
+#include <device/cons.h>
+#include <device/tty.h>
+#include <chips/busses.h>
+
+/*
+ * Set receive modem state from modem status register.
+ */
+extern void fix_modem_state(int unit, int modem_stat);
+
+extern void comtimer(void * param);
+
+/*
+ * Modem change (input signals)
+ */
+extern void commodem_intr(int unit, int stat);
+
+extern int comgetc(int unit);
+
+extern int comcnprobe(struct consdev *cp);
+extern int comcninit(struct consdev *cp);
+extern int comcngetc(dev_t dev, int wait);
+extern int comcnputc(dev_t dev, int c);
+extern void comintr(int unit);
+
+int comprobe(vm_offset_t port, struct bus_ctlr *dev);
+int commctl(struct tty *tp, int bits, int how);
+void comstart(struct tty *tp);
+void comstop(struct tty *tp, int flags);
+void comattach(struct bus_device *dev);
+
+extern io_return_t
+comgetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t *count);
+
+extern io_return_t
+comsetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count);
+
+#if MACH_KDB
+extern void kdb_kintr(void);
+extern void compr_addr(vm_offset_t addr);
+extern int compr(int unit);
+#endif /* MACH_KDB */
+
+extern io_return_t comopen(dev_t dev, int flag, io_req_t ior);
+extern void comclose(dev_t dev, int flag);
+extern io_return_t comread(dev_t dev, io_req_t ior);
+extern io_return_t comwrite(dev_t dev, io_req_t ior);
+extern io_return_t comportdeath(dev_t dev, mach_port_t port);
+
+#endif /* _COM_H_ */
diff --git a/i386/i386at/comreg.h b/i386/i386at/comreg.h
new file mode 100644
index 0000000..7356574
--- /dev/null
+++ b/i386/i386at/comreg.h
@@ -0,0 +1,139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Olivetti serial port driver v1.0
+ * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989
+ * All rights reserved.
+ *
+ */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _COMREG_H_
+#define _COMREG_H_
+
+#define TXRX(addr) (addr + 0)
+#define BAUD_LSB(addr) (addr + 0)
+#define BAUD_MSB(addr) (addr + 1)
+#define INTR_ENAB(addr) (addr + 1)
+#define INTR_ID(addr) (addr + 2)
+#define FIFO_CTL(addr) (addr + 2)
+#define LINE_CTL(addr) (addr + 3)
+#define MODEM_CTL(addr) (addr + 4)
+#define LINE_STAT(addr) (addr + 5)
+#define MODEM_STAT(addr)(addr + 6)
+#define SCR(addr) (addr + 7)
+
+#define MODi 0
+#define TRAi 2
+#define RECi 4
+#define LINi 6
+#define CTIi 0xc
+#define MASKi 0xf
+
+/* line control register */
+#define iWLS0 0x01 /*word length select bit 0 */
+#define iWLS1 0x02 /*word length select bit 2 */
+#define iSTB 0x04 /* number of stop bits */
+#define iPEN 0x08 /* parity enable */
+#define iEPS 0x10 /* even parity select */
+#define iSP 0x20 /* stick parity */
+#define iSETBREAK 0x40 /* break key */
+#define iDLAB 0x80 /* divisor latch access bit */
+#define i5BITS 0x00 /* 5 bits per char */
+#define i6BITS 0x01 /* 6 bits per char */
+#define i7BITS 0x02 /* 7 bits per char */
+#define i8BITS 0x03 /* 8 bits per char */
+
+/* line status register */
+#define iDR 0x01 /* data ready */
+#define iOR 0x02 /* overrun error */
+#define iPE 0x04 /* parity error */
+#define iFE 0x08 /* framing error */
+#define iBRKINTR 0x10 /* a break has arrived */
+#define iTHRE 0x20 /* tx hold reg is now empty */
+#define iTSRE 0x40 /* tx shift reg is now empty */
+
+/* interrupt id regisger */
+#define iMODEM_INTR 0x01
+#define iTX_INTR 0x02
+#define iRX_INTR 0x04
+#define iERROR_INTR 0x08
+
+/* interrupt enable register */
+#define iRX_ENAB 0x01
+#define iTX_ENAB 0x02
+#define iERROR_ENAB 0x04
+#define iMODEM_ENAB 0x08
+
+/* modem control register */
+#define iDTR 0x01 /* data terminal ready */
+#define iRTS 0x02 /* request to send */
+#define iOUT1 0x04 /* COM aux line -not used */
+#define iOUT2 0x08 /* turns intr to 386 on/off */
+#define iLOOP 0x10 /* loopback for diagnostics */
+
+/* modem status register */
+#define iDCTS 0x01 /* delta clear to send */
+#define iDDSR 0x02 /* delta data set ready */
+#define iTERI 0x04 /* trail edge ring indicator */
+#define iDRLSD 0x08 /* delta rx line sig detect */
+#define iCTS 0x10 /* clear to send */
+#define iDSR 0x20 /* data set ready */
+#define iRI 0x40 /* ring indicator */
+#define iRLSD 0x80 /* rx line sig detect */
+
+/* fifo control register (only in 16550) */
+#define iFIFOENA 0x01 /* Enable fifos */
+#define iCLRRCVRFIFO 0x02 /* Clear receive fifo */
+#define iCLRXMITFIFO 0x04 /* Clear transmit fifo */
+#define iDMAMODE 0x08 /* DMA transfer enable */
+#define iFIFO1CH 0x00 /* Receive fifo trigger level 1 char */
+#define iFIFO4CH 0x40 /* Receive fifo trigger level 4 chars*/
+#define iFIFO8CH 0x80 /* Receive fifo trigger level 8 chars*/
+#define iFIFO14CH 0xc0 /* Receive fifo trigger level 14 chars*/
+
+#endif /* _COMREG_H_ */
diff --git a/i386/i386at/conf.c b/i386/i386at/conf.c
new file mode 100644
index 0000000..ecbf1e4
--- /dev/null
+++ b/i386/i386at/conf.c
@@ -0,0 +1,172 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Device switch for i386 AT bus.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <device/conf.h>
+#include <kern/mach_clock.h>
+#include <i386at/model_dep.h>
+
+#define timename "time"
+
+#ifndef MACH_HYP
+#include <i386at/kd.h>
+#define kdname "kd"
+
+#if NCOM > 0
+#include <i386at/com.h>
+#define comname "com"
+#endif /* NCOM > 0 */
+
+#if NLPR > 0
+#include <i386at/lpr.h>
+#define lprname "lpr"
+#endif /* NLPR > 0 */
+#endif /* MACH_HYP */
+
+#include <i386at/kd_event.h>
+#define kbdname "kbd"
+
+#ifndef MACH_HYP
+#include <i386at/kd_mouse.h>
+#define mousename "mouse"
+
+#include <i386at/mem.h>
+#define memname "mem"
+#endif /* MACH_HYP */
+
+#include <device/kmsg.h>
+#define kmsgname "kmsg"
+
+#ifdef MACH_HYP
+#include <xen/console.h>
+#define hypcnname "hyp"
+#endif /* MACH_HYP */
+
+#include <device/intr.h>
+#define irqname "irq"
+
+/*
+ * List of devices - console must be at slot 0
+ */
+struct dev_ops dev_name_list[] =
+{
+ /*name, open, close, read,
+ write, getstat, setstat, mmap,
+ async_in, reset, port_death, subdev,
+ dev_info */
+
+ /* We don't assign a console here, when we find one via
+ cninit() we stick something appropriate here through the
+ indirect list */
+ { "cn", nulldev_open, nulldev_close, nulldev_read,
+ nulldev_write, nulldev_getstat, nulldev_setstat, nomap,
+ nodev_async_in, nulldev_reset, nulldev_portdeath, 0,
+ nodev_info},
+
+#ifndef MACH_HYP
+#if ENABLE_IMMEDIATE_CONSOLE
+ { "immc", nulldev_open, nulldev_close, nulldev_read,
+ nulldev_write, nulldev_getstat, nulldev_setstat,
+ nomap, nodev_async_in, nulldev_reset, nulldev_portdeath, 0,
+ nodev_info },
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
+ { kdname, kdopen, kdclose, kdread,
+ kdwrite, kdgetstat, kdsetstat, kdmmap,
+ nodev_async_in, nulldev_reset, kdportdeath, 0,
+ nodev_info },
+#endif /* MACH_HYP */
+
+ { timename, timeopen, timeclose, nulldev_read,
+ nulldev_write, nulldev_getstat, nulldev_setstat, timemmap,
+ nodev_async_in, nulldev_reset, nulldev_portdeath, 0,
+ nodev_info },
+
+#ifndef MACH_HYP
+#if NCOM > 0
+ { comname, comopen, comclose, comread,
+ comwrite, comgetstat, comsetstat, nomap,
+ nodev_async_in, nulldev_reset, comportdeath, 0,
+ nodev_info },
+#endif
+
+#ifdef MACH_LPR
+ { lprname, lpropen, lprclose, lprread,
+ lprwrite, lprgetstat, lprsetstat, nomap,
+ nodev_async_in, nulldev_reset, lprportdeath, 0,
+ nodev_info },
+#endif
+
+ { mousename, mouseopen, mouseclose, mouseread,
+ nulldev_write, mousegetstat, nulldev_setstat, nomap,
+ nodev_async_in, nulldev_reset, nulldev_portdeath, 0,
+ nodev_info },
+
+ { kbdname, kbdopen, kbdclose, kbdread,
+ nulldev_write, kbdgetstat, kbdsetstat, nomap,
+ nodev_async_in, nulldev_reset, nulldev_portdeath, 0,
+ nodev_info },
+
+ { memname, nulldev_open, nulldev_close, nulldev_read,
+ nulldev_write, nulldev_getstat, nulldev_setstat, memmmap,
+ nodev_async_in, nulldev_reset, nulldev_portdeath, 0,
+ nodev_info },
+#endif /* MACH_HYP */
+
+#ifdef MACH_KMSG
+ { kmsgname, kmsgopen, kmsgclose, kmsgread,
+ nulldev_write, kmsggetstat, nulldev_setstat, nomap,
+ nodev_async_in, nulldev_reset, nulldev_portdeath, 0,
+ nodev_info },
+#endif
+
+#ifdef MACH_HYP
+ { hypcnname, hypcnopen, hypcnclose, hypcnread,
+ hypcnwrite, hypcngetstat, hypcnsetstat, nomap,
+ nodev_async_in, nulldev_reset, hypcnportdeath, 0,
+ nodev_info },
+#endif /* MACH_HYP */
+
+ { irqname, nulldev_open, nulldev_close, nulldev_read,
+ nulldev_write,nulldev_getstat,nulldev_setstat, nomap,
+ nodev_async_in, nulldev_reset, nulldev_portdeath,0,
+ nodev_info },
+
+};
+int dev_name_count = sizeof(dev_name_list)/sizeof(dev_name_list[0]);
+
+/*
+ * Indirect list.
+ */
+struct dev_indirect dev_indirect_list[] = {
+
+ /* console */
+ { "console", &dev_name_list[0], 0 }
+};
+int dev_indirect_count = sizeof(dev_indirect_list)
+ / sizeof(dev_indirect_list[0]);
diff --git a/i386/i386at/cons_conf.c b/i386/i386at/cons_conf.c
new file mode 100644
index 0000000..1d7dd38
--- /dev/null
+++ b/i386/i386at/cons_conf.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 1988-1994, The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: cons_conf.c 1.7 94/12/14$
+ */
+
+/*
+ * This entire table could be autoconfig()ed but that would mean that
+ * the kernel's idea of the console would be out of sync with that of
+ * the standalone boot. I think it best that they both use the same
+ * known algorithm unless we see a pressing need otherwise.
+ */
+#include <sys/types.h>
+#include <device/cons.h>
+
+#ifdef MACH_HYP
+#include <xen/console.h>
+#else /* MACH_HYP */
+#include "kd.h"
+#if NCOM > 0
+#include "com.h"
+#endif
+#endif /* MACH_HYP */
+
+#if ENABLE_IMMEDIATE_CONSOLE
+#include "immc.h"
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
+
+/*
+ * The rest of the consdev fields are filled in by the respective
+ * cnprobe routine.
+ */
+struct consdev constab[] = {
+#ifdef MACH_HYP
+ {"hyp", hypcnprobe, hypcninit, hypcngetc, hypcnputc},
+#else /* MACH_HYP */
+#if ENABLE_IMMEDIATE_CONSOLE
+ {"immc", immc_cnprobe, immc_cninit, immc_cngetc, immc_cnputc},
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
+ {"kd", kdcnprobe, kdcninit, kdcngetc, kdcnputc},
+#if NCOM > 0
+ {"com", comcnprobe, comcninit, comcngetc, comcnputc},
+#endif
+#endif /* MACH_HYP */
+ {0}
+};
diff --git a/i386/i386at/cram.h b/i386/i386at/cram.h
new file mode 100644
index 0000000..ac40cf1
--- /dev/null
+++ b/i386/i386at/cram.h
@@ -0,0 +1,86 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * cram.h
+ */
+
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _CRAM_H_
+#define _CRAM_H_
+
+/* XXX: this conflicts with read/writing the RTC */
+
+/*
+ * outb(CMOS_ADDR, addr);
+ * result = inb(CMOS_DATA);
+ *
+ * where "addr" tells what value you want to read (some are listed
+ * below). Interrupts should be disabled while you do this.
+ */
+
+/* I/O ports */
+
+#define CMOS_ADDR 0x70 /* port for CMOS ram address */
+#define CMOS_DATA 0x71 /* port for CMOS ram data */
+
+
+/* Addresses, related masks, and potential results */
+
+#define CMOS_SHUTDOWN 0xf
+#define CM_NORM_RST 0x0
+#define CM_LOAD_SYS 0x4
+#define CM_JMP_467 0xa
+
+#define CMOS_EB 0x14 /* read Equipment Byte */
+#define CM_SCRMSK 0x30 /* mask for EB query to get screen */
+#define CM_EGA_VGA 0x00 /* "not CGA or MONO" */
+#define CM_CGA_40 0x10
+#define CM_CGA_80 0x20
+#define CM_MONO_80 0x30
+
+#endif /* _CRAM_H_ */
diff --git a/i386/i386at/disk.h b/i386/i386at/disk.h
new file mode 100644
index 0000000..c558375
--- /dev/null
+++ b/i386/i386at/disk.h
@@ -0,0 +1,89 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * disk.h
+ */
+
+#ifndef _DISK_H_
+#define _DISK_H_
+
+#define V_NUMPAR 16 /* maximum number of partitions */
+
+#define VTOC_SANE 0x600DDEEE /* Indicates a sane VTOC */
+#define PDLOCATION 29 /* location of VTOC */
+
+#define LBLLOC 1 /* label block for xxxbsd */
+
+struct localpartition {
+ u_int p_flag; /*permision flags*/
+ long p_start; /*physical start sector no of partition*/
+ long p_size; /*# of physical sectors in partition*/
+};
+typedef struct localpartition localpartition_t;
+
+struct evtoc {
+ u_int fill0[6];
+ u_int cyls; /*number of cylinders per drive*/
+ u_int tracks; /*number tracks per cylinder*/
+ u_int sectors; /*number sectors per track*/
+ u_int fill1[13];
+ u_int version; /*layout version*/
+ u_int alt_ptr; /*byte offset of alternates table*/
+ u_short alt_len; /*byte length of alternates table*/
+ u_int sanity; /*to verify vtoc sanity*/
+ u_int xcyls; /*number of cylinders per drive*/
+ u_int xtracks; /*number tracks per cylinder*/
+ u_int xsectors; /*number sectors per track*/
+ u_short nparts; /*number of partitions*/
+ u_short fill2; /*pad for 286 compiler*/
+ char label[40];
+ struct localpartition part[V_NUMPAR];/*partition headers*/
+ char fill[512-352];
+};
+
+#endif /* _DISK_H_ */
diff --git a/i386/i386at/elf.h b/i386/i386at/elf.h
new file mode 100644
index 0000000..26f4d87
--- /dev/null
+++ b/i386/i386at/elf.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _X86_ELF_H
+#define _X86_ELF_H
+
+#define ELF_SHT_SYMTAB 2
+#define ELF_SHT_STRTAB 3
+
+struct elf_shdr {
+ unsigned int name;
+ unsigned int type;
+ unsigned int flags;
+ unsigned long addr;
+ unsigned long offset;
+ unsigned int size;
+ unsigned int link;
+ unsigned int info;
+ unsigned int addralign;
+ unsigned int entsize;
+};
+
+#ifdef __LP64__
+
+struct elf_sym {
+ unsigned int name;
+ unsigned char info;
+ unsigned char other;
+ unsigned short shndx;
+ unsigned long value;
+ unsigned long size;
+};
+
+#else /* __LP64__ */
+
+struct elf_sym {
+ unsigned int name;
+ unsigned long value;
+ unsigned long size;
+ unsigned char info;
+ unsigned char other;
+ unsigned short shndx;
+};
+
+#endif /* __LP64__ */
+
+#endif /* _X86_ELF_H */
diff --git a/i386/i386at/i8250.h b/i386/i386at/i8250.h
new file mode 100644
index 0000000..9b8a801
--- /dev/null
+++ b/i386/i386at/i8250.h
@@ -0,0 +1,134 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Header file for i8250 chip
+ */
+
+#ifndef _I8250_H_
+#define _I8250_H_
+
+/* port offsets from the base i/o address */
+
+#define RDAT 0
+#define RIE 1
+#define RID 2
+#define RFC 2
+#define RLC 3
+#define RMC 4
+#define RLS 5
+#define RMS 6
+#define RDLSB 0
+#define RDMSB 1
+
+/* interrupt control register */
+
+#define IERD 0x01 /* read int */
+#define IETX 0x02 /* xmit int */
+#define IELS 0x04 /* line status int */
+#define IEMS 0x08 /* modem int */
+
+/* interrupt status register */
+
+#define IDIP 0x01 /* not interrupt pending */
+#define IDMS 0x00 /* modem int */
+#define IDTX 0x02 /* xmit int */
+#define IDRD 0x04 /* read int */
+#define IDLS 0x06 /* line status int */
+#define IDMASK 0x0f /* interrupt ID mask */
+
+/* line control register */
+
+#define LC5 0x00 /* word length 5 */
+#define LC6 0x01 /* word length 6 */
+#define LC7 0x02 /* word length 7 */
+#define LC8 0x03 /* word length 8 */
+#define LCSTB 0x04 /* 2 stop */
+#define LCPEN 0x08 /* parity enable */
+#define LCEPS 0x10 /* even parity select */
+#define LCSP 0x20 /* stick parity */
+#define LCBRK 0x40 /* send break */
+#define LCDLAB 0x80 /* divisor latch access bit */
+#define LCPAR 0x38 /* parity mask */
+
+/* line status register */
+
+#define LSDR 0x01 /* data ready */
+#define LSOR 0x02 /* overrun error */
+#define LSPE 0x04 /* parity error */
+#define LSFE 0x08 /* framing error */
+#define LSBI 0x10 /* break interrupt */
+#define LSTHRE 0x20 /* xmit holding reg empty */
+#define LSTSRE 0x40 /* xmit shift reg empty */
+
+/* modem control register */
+
+#define MCDTR 0x01 /* DTR */
+#define MCRTS 0x02 /* RTS */
+#define MCOUT1 0x04 /* OUT1 */
+#define MCOUT2 0x08 /* OUT2 */
+#define MCLOOP 0x10 /* loopback */
+
+/* modem status register */
+
+#define MSDCTS 0x01 /* delta CTS */
+#define MSDDSR 0x02 /* delta DSR */
+#define MSTERI 0x04 /* delta RE */
+#define MSDRLSD 0x08 /* delta CD */
+#define MSCTS 0x10 /* CTS */
+#define MSDSR 0x20 /* DSR */
+#define MSRI 0x40 /* RE */
+#define MSRLSD 0x80 /* CD */
+
+/* divisor latch register settings for various baud rates */
+
+#define BCNT1200 0x60
+#define BCNT2400 0x30
+#define BCNT4800 0x18
+#define BCNT9600 0x0c
+
+#endif /* _I8250_H_ */
diff --git a/i386/i386at/idt.h b/i386/i386at/idt.h
new file mode 100644
index 0000000..19e0abe
--- /dev/null
+++ b/i386/i386at/idt.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifndef _I386AT_IDT_
+#define _I386AT_IDT_
+
+/* There are 256 interrupt vectors on x86,
+ * the first 32 are taken by cpu faults */
+#define IDTSZ (0x100)
+
+/* PIC sits at 0x20-0x2f */
+#define PIC_INT_BASE 0x20
+
+/* IOAPIC sits at 0x30-0x47 */
+#define IOAPIC_INT_BASE 0x30
+
+/* IOAPIC spurious interrupt vector set to 0xff */
+#define IOAPIC_SPURIOUS_BASE 0xff
+
+/* Remote -> local AST requests */
+#define CALL_AST_CHECK 0xfa
+
+/* Currently for TLB shootdowns */
+#define CALL_PMAP_UPDATE 0xfb
+
+#include <i386/idt-gen.h>
+
+#ifndef __ASSEMBLER__
+extern void idt_init (void);
+extern void ap_idt_init (int cpu);
+#endif /* __ASSEMBLER__ */
+
+#endif /* _I386AT_IDT_ */
diff --git a/i386/i386at/immc.c b/i386/i386at/immc.c
new file mode 100644
index 0000000..00fc973
--- /dev/null
+++ b/i386/i386at/immc.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#if ENABLE_IMMEDIATE_CONSOLE
+
+#include <device/cons.h>
+#include <mach/boolean.h>
+#include <i386/vm_param.h>
+#include <string.h>
+
+/* This is a special "feature" (read: kludge)
+ intended for use only for kernel debugging.
+ It enables an extremely simple console output mechanism
+ that sends text straight to CGA/EGA/VGA video memory.
+ It has the nice property of being functional right from the start,
+ so it can be used to debug things that happen very early
+ before any devices are initialized. */
+
+boolean_t immediate_console_enable = TRUE;
+
+/*
+ * XXX we assume that pcs *always* have a console
+ */
+int
+immc_cnprobe(struct consdev *cp)
+{
+ int maj, unit, pri;
+
+ maj = 0;
+ unit = 0;
+ pri = CN_INTERNAL;
+
+ cp->cn_dev = makedev(maj, unit);
+ cp->cn_pri = pri;
+ return 0;
+}
+
+int
+immc_cninit(struct consdev *cp)
+{
+ return 0;
+}
+
+int immc_cnmaygetc(void)
+{
+ return -1;
+}
+
+int
+immc_cngetc(dev_t dev, int wait)
+{
+ if (wait) {
+ int c;
+ while ((c = immc_cnmaygetc()) < 0)
+ continue;
+ return c;
+ }
+ else
+ return immc_cnmaygetc();
+}
+
+int
+immc_cnputc(dev_t dev, int c)
+{
+ static int ofs = -1;
+
+ if (!immediate_console_enable)
+ return -1;
+ if (ofs < 0 || ofs >= 80)
+ {
+ ofs = 0;
+ immc_cnputc(dev, '\n');
+ }
+
+ if (c == '\n')
+ {
+ memmove((void *) phystokv(0xb8000),
+ (void *) phystokv(0xb8000+80*2), 80*2*24);
+ memset((void *) phystokv((0xb8000+80*2*24)), 0, 80*2);
+ ofs = 0;
+ }
+ else if (c == '\r')
+ {
+ ofs = 0;
+ }
+ else if (c == '\t')
+ {
+ ofs = (ofs & ~7) + 8;
+ }
+ else
+ {
+ volatile unsigned char *p;
+
+ if (ofs >= 80)
+ {
+ immc_cnputc(dev, '\r');
+ immc_cnputc(dev, '\n');
+ }
+
+ p = (void *) phystokv(0xb8000 + 80*2*24 + ofs*2);
+ p[0] = c;
+ p[1] = 0x0f;
+ ofs++;
+ }
+ return 0;
+}
+
+void
+immc_romputc(char c)
+{
+ immc_cnputc (0, c);
+}
+
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
diff --git a/i386/i386at/immc.h b/i386/i386at/immc.h
new file mode 100644
index 0000000..dc802c8
--- /dev/null
+++ b/i386/i386at/immc.h
@@ -0,0 +1,31 @@
+/* Declarations for the immediate console.
+
+ Copyright (C) 2015 Free Software Foundation, Inc.
+
+ This file is part of the GNU Mach.
+
+ The GNU Mach is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the GNU Mach. If not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef _IMMC_H_
+#define _IMMC_H_
+
+#include <sys/types.h>
+
+int immc_cnprobe(struct consdev *cp);
+int immc_cninit(struct consdev *cp);
+int immc_cngetc(dev_t dev, int wait);
+int immc_cnputc(dev_t dev, int c);
+void immc_romputc(char c);
+
+#endif /* _IMMC_H_ */
diff --git a/i386/i386at/int_init.c b/i386/i386at/int_init.c
new file mode 100644
index 0000000..5c8fce6
--- /dev/null
+++ b/i386/i386at/int_init.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <i386at/idt.h>
+#include <i386at/int_init.h>
+#include <i386/gdt.h>
+#include <i386/mp_desc.h>
+#include <kern/printf.h>
+#ifdef APIC
+#include <i386/apic.h>
+#endif
+
+/* defined in locore.S */
+extern vm_offset_t int_entry_table[];
+
+static void
+int_fill(struct real_gate *myidt)
+{
+ int i;
+#ifndef APIC
+ int base = PIC_INT_BASE;
+ int nirq = 16;
+#else
+ int base = IOAPIC_INT_BASE;
+ int nirq = NINTR;
+#endif
+
+ for (i = 0; i < nirq; i++) {
+ fill_idt_gate(myidt, base + i,
+ int_entry_table[i], KERNEL_CS,
+ ACC_PL_K|ACC_INTR_GATE, 0);
+ }
+#if NCPUS > 1
+ fill_idt_gate(myidt, CALL_AST_CHECK,
+ int_entry_table[i], KERNEL_CS,
+ ACC_PL_K|ACC_INTR_GATE, 0);
+ i++;
+ fill_idt_gate(myidt, CALL_PMAP_UPDATE,
+ int_entry_table[i], KERNEL_CS,
+ ACC_PL_K|ACC_INTR_GATE, 0);
+ i++;
+#endif
+#ifdef APIC
+ fill_idt_gate(myidt, IOAPIC_SPURIOUS_BASE,
+ int_entry_table[i], KERNEL_CS,
+ ACC_PL_K|ACC_INTR_GATE, 0);
+ i++;
+#endif
+}
+
+void
+int_init(void)
+{
+ int_fill(idt);
+}
+
+#if NCPUS > 1
+void ap_int_init(int cpu)
+{
+ int_fill(mp_desc_table[cpu]->idt);
+}
+#endif
diff --git a/i386/i386at/int_init.h b/i386/i386at/int_init.h
new file mode 100644
index 0000000..3c11ebc
--- /dev/null
+++ b/i386/i386at/int_init.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Initialization functions.
+ *
+ */
+
+#ifndef _INT_INIT_H_
+#define _INT_INIT_H_
+
+#include <mach/std_types.h>
+
+#ifndef __ASSEMBLER__
+extern void int_init (void);
+extern void ap_int_init (int cpu);
+#endif /* __ASSEMBLER__ */
+
+#endif /* _INT_INIT_H_ */
diff --git a/i386/i386at/interrupt.S b/i386/i386at/interrupt.S
new file mode 100644
index 0000000..77424b4
--- /dev/null
+++ b/i386/i386at/interrupt.S
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 1995 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+#include <mach/machine/asm.h>
+
+#include <i386/ipl.h>
+#ifdef APIC
+# include <i386/apic.h>
+#else
+# include <i386/pic.h>
+#endif
+#include <i386/i386asm.h>
+
+#define READ_ISR (OCW_TEMPLATE|READ_NEXT_RD|READ_IS_ONRD)
+
+/*
+ * Generic interrupt handler.
+ *
+ * On entry, %eax contains the irq number.
+ *
+ * Note: kdb_kintr needs to know our stack usage
+ */
+
+#define S_REGS 28(%esp)
+#define S_RET 24(%esp)
+#define S_IRQ 20(%esp)
+#define S_IPL 16(%esp)
+
+ENTRY(interrupt)
+#ifdef APIC
+ cmpl $255,%eax /* was this a spurious intr? */
+ jne 1f
+ ret /* if so, just return */
+1:
+#endif
+ subl $24,%esp /* Two local variables + 4 parameters */
+ movl %eax,S_IRQ /* save irq number */
+
+ call spl7 /* set ipl */
+ movl %eax,S_IPL /* save previous ipl */
+
+ movl S_IRQ,%ecx /* restore irq number */
+
+#if NCPUS > 1
+ cmpl $CALL_PMAP_UPDATE,%ecx /* was this a SMP pmap_update request? */
+ je _call_single
+
+ cmpl $CALL_AST_CHECK,%ecx /* was this a SMP remote -> local ast request? */
+ je _call_local_ast
+#endif
+
+#ifndef APIC
+ movl $1,%eax
+ shll %cl,%eax /* get corresponding IRQ mask */
+ orl EXT(curr_pic_mask),%eax /* add current mask */
+
+ cmpl $8,%ecx /* do we need to ack slave? */
+ jl 1f /* no, only master */
+
+ /* EOI on slave */
+ movb %ah,%al
+ outb %al,$(PIC_SLAVE_OCW) /* mask slave out */
+
+ movb $(SPECIFIC_EOI),%al /* specific EOI for this irq */
+ andb $7,%cl /* irq number for the slave */
+ orb %cl,%al /* combine them */
+ outb %al,$(PIC_SLAVE_ICW) /* ack interrupt to slave */
+
+ movb $(SPECIFIC_EOI + I_AM_SLAVE_2),%al /* specific master EOI for cascaded slave */
+ outb %al,$(PIC_MASTER_ICW) /* ack interrupt to master */
+
+ movl EXT(curr_pic_mask),%eax /* restore original mask */
+ movb %ah,%al
+ outb %al,$(PIC_SLAVE_OCW) /* unmask slave */
+ jmp 2f
+
+1:
+ /* EOI on master */
+ outb %al,$(PIC_MASTER_OCW) /* mask master out */
+
+ movb $(SPECIFIC_EOI),%al /* specific EOI for this irq */
+ orb %cl,%al /* combine with irq number */
+ outb %al,$(PIC_MASTER_ICW) /* ack interrupt to master */
+
+ movl EXT(curr_pic_mask),%eax /* restore original mask */
+ outb %al,$(PIC_MASTER_OCW) /* unmask master */
+2:
+#else
+ movl %ecx,(%esp) /* load irq number as 1st arg */
+ call EXT(ioapic_irq_eoi) /* ioapic irq specific EOI */
+#endif
+
+ movl S_IPL,%eax
+ movl %eax,4(%esp) /* previous ipl as 2nd arg */
+
+ movl S_RET,%eax
+ movl %eax,8(%esp) /* return address as 3rd arg */
+
+ movl S_REGS,%eax
+ movl %eax,12(%esp) /* address of interrupted registers as 4th arg */
+
+ movl S_IRQ,%eax /* copy irq number */
+
+ shll $2,%eax /* irq * 4 */
+ movl EXT(iunit)(%eax),%edx /* get device unit number */
+ movl %edx,(%esp) /* unit number as 1st arg */
+
+ call *EXT(ivect)(%eax) /* call interrupt handler */
+
+_completed:
+ movl S_IPL,%eax /* restore previous ipl */
+ movl %eax,(%esp)
+ call splx_cli /* restore previous ipl */
+
+ addl $24,%esp /* pop local variables */
+ ret
+
+#if NCPUS > 1
+_call_single:
+ call EXT(lapic_eoi) /* lapic EOI before the handler to allow extra update */
+ call EXT(pmap_update_interrupt)
+ jmp _completed
+
+_call_local_ast:
+ call EXT(lapic_eoi) /* lapic EOI */
+ call EXT(ast_check) /* AST check on this cpu */
+ jmp _completed
+
+#endif
+END(interrupt)
diff --git a/i386/i386at/ioapic.c b/i386/i386at/ioapic.c
new file mode 100644
index 0000000..2553a2c
--- /dev/null
+++ b/i386/i386at/ioapic.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2019 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Mach.
+ *
+ * GNU Mach is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * GNU Mach is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+ */
+
+#include <sys/types.h>
+#include <i386/ipl.h>
+#include <machine/irq.h>
+#include <i386/fpu.h>
+#include <i386/hardclock.h>
+#include <i386at/kd.h>
+#include <i386at/idt.h>
+#include <i386/pio.h>
+#include <i386/pit.h>
+#include <i386/pic.h> /* only for macros */
+#include <i386/smp.h>
+#include <mach/machine.h>
+#include <kern/printf.h>
+#include <kern/timer.h>
+#include <kern/lock.h>
+
+static int has_irq_specific_eoi = 0;
+int timer_pin;
+
+uint32_t lapic_timer_val = 0;
+uint32_t calibrated_ticks = 0;
+
+spl_t curr_ipl[NCPUS] = {0};
+int spl_init = 0;
+
+def_simple_lock_irq_data(static, ioapic_lock) /* Lock for non-atomic window accesses to ioapic */
+
+int iunit[NINTR] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ /* 2nd IOAPIC */
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63 };
+
+interrupt_handler_fn ivect[NINTR] = {
+ /* 00 */ (interrupt_handler_fn)hardclock,
+ /* 01 */ kdintr, /* kdintr, ... */
+ /* 02 */ intnull,
+ /* 03 */ intnull, /* lnpoll, comintr, ... */
+
+ /* 04 */ intnull, /* comintr, ... */
+ /* 05 */ intnull, /* comintr, wtintr, ... */
+ /* 06 */ intnull, /* fdintr, ... */
+ /* 07 */ intnull, /* qdintr, ... */
+
+ /* 08 */ intnull,
+ /* 09 */ intnull, /* ether */
+ /* 10 */ intnull,
+ /* 11 */ intnull,
+
+ /* 12 */ intnull,
+ /* 13 */ fpintr, /* always */
+ /* 14 */ intnull, /* hdintr, ... */
+ /* 15 */ intnull, /* ??? */
+
+ /* 16 */ intnull, /* PIRQA */
+ /* 17 */ intnull, /* PIRQB */
+ /* 18 */ intnull, /* PIRQC */
+ /* 19 */ intnull, /* PIRQD */
+ /* 20 */ intnull, /* PIRQE */
+ /* 21 */ intnull, /* PIRQF */
+ /* 22 */ intnull, /* PIRQG */
+ /* 23 */ intnull, /* PIRQH */
+
+ /* 24 */ intnull,
+ /* 25 */ intnull,
+ /* 26 */ intnull,
+ /* 27 */ intnull,
+ /* 28 */ intnull,
+ /* 29 */ intnull,
+ /* 30 */ intnull,
+ /* 31 */ intnull,
+
+ /* 32 */ intnull,
+ /* 33 */ intnull,
+ /* 34 */ intnull,
+ /* 35 */ intnull,
+ /* 36 */ intnull,
+ /* 37 */ intnull,
+ /* 38 */ intnull,
+ /* 39 */ intnull,
+ /* 40 */ intnull,
+ /* 41 */ intnull,
+ /* 42 */ intnull,
+ /* 43 */ intnull,
+ /* 44 */ intnull,
+ /* 45 */ intnull,
+ /* 46 */ intnull,
+ /* 47 */ intnull,
+ /* 48 */ intnull,
+ /* 49 */ intnull,
+ /* 50 */ intnull,
+ /* 51 */ intnull,
+ /* 52 */ intnull,
+ /* 53 */ intnull,
+ /* 54 */ intnull,
+ /* 55 */ intnull,
+
+ /* 56 */ intnull,
+ /* 57 */ intnull,
+ /* 58 */ intnull,
+ /* 59 */ intnull,
+ /* 60 */ intnull,
+ /* 61 */ intnull,
+ /* 62 */ intnull,
+ /* 63 */ intnull,
+};
+
+void
+picdisable(void)
+{
+ int i;
+
+ asm("cli");
+ for (i = 0; i < NCPUS; i++)
+ curr_ipl[i] = SPLHI;
+
+ /*
+ ** Disable PIC
+ */
+ outb ( PIC_SLAVE_OCW, PICS_MASK );
+ outb ( PIC_MASTER_OCW, PICM_MASK );
+}
+
+void
+intnull(int unit_dev)
+{
+ printf("intnull(%d)\n", unit_dev);
+}
+
+static uint32_t
+ioapic_read(uint8_t id, uint8_t reg)
+{
+ volatile ApicIoUnit *ioapic = apic_get_ioapic(id)->ioapic;
+ ioapic->select.r = reg;
+ return ioapic->window.r;
+}
+
+static void
+ioapic_write(uint8_t id, uint8_t reg, uint32_t value)
+{
+ volatile ApicIoUnit *ioapic = apic_get_ioapic(id)->ioapic;
+ ioapic->select.r = reg;
+ ioapic->window.r = value;
+}
+
+static void
+ioapic_read_entry(int apic, int pin, struct ioapic_route_entry *e)
+{
+ union ioapic_route_entry_union entry;
+
+ entry.lo = ioapic_read(apic, APIC_IO_REDIR_LOW(pin));
+ entry.hi = ioapic_read(apic, APIC_IO_REDIR_HIGH(pin));
+
+ *e = entry.both;
+}
+
+/* Write the high word first because mask bit is in low word */
+static void
+ioapic_write_entry(int apic, int pin, struct ioapic_route_entry e)
+{
+ union ioapic_route_entry_union entry = {{0, 0}};
+
+ entry.both = e;
+ ioapic_write(apic, APIC_IO_REDIR_HIGH(pin), entry.hi);
+ ioapic_write(apic, APIC_IO_REDIR_LOW(pin), entry.lo);
+}
+
+/* When toggling the interrupt via mask, write low word only */
+static void
+ioapic_toggle_entry(int apic, int pin, int mask)
+{
+ union ioapic_route_entry_union entry;
+
+ spl_t s = simple_lock_irq(&ioapic_lock);
+ ioapic_read_entry(apic, pin, &entry.both);
+ entry.both.mask = mask & 0x1;
+ ioapic_write(apic, APIC_IO_REDIR_LOW(pin), entry.lo);
+ simple_unlock_irq(s, &ioapic_lock);
+}
+
+static int
+ioapic_version(int apic)
+{
+ return (ioapic_read(apic, APIC_IO_VERSION) >> APIC_IO_VERSION_SHIFT) & 0xff;
+}
+
+static int
+ioapic_gsis(int apic)
+{
+ return ((ioapic_read(apic, APIC_IO_VERSION) >> APIC_IO_ENTRIES_SHIFT) & 0xff) + 1;
+}
+
+static void timer_expiry_callback(void *arg)
+{
+ volatile int *done = arg;
+ *done = 1;
+}
+
+static uint32_t
+timer_measure_10x_apic_hz(void)
+{
+ volatile int done = 0;
+ uint32_t start = 0xffffffff;
+ timer_elt_data_t tmp_timer;
+ tmp_timer.fcn = timer_expiry_callback;
+ tmp_timer.param = (void *)&done;
+
+ printf("timer calibration...");
+
+ /* Set APIC timer */
+ lapic->init_count.r = start;
+
+ /* Delay for 10 ticks (10 * 1/hz seconds) */
+ set_timeout(&tmp_timer, 10);
+ do {
+ cpu_pause();
+ } while (!done);
+
+ /* Stop APIC timer */
+ lapic->lvt_timer.r |= LAPIC_DISABLE;
+
+ printf(" done\n");
+
+ return start - lapic->cur_count.r;
+}
+
+void
+calibrate_lapic_timer(void)
+{
+ spl_t s;
+
+ /* Set one-shot timer */
+ lapic->divider_config.r = LAPIC_TIMER_DIVIDE_2;
+ lapic->lvt_timer.r = IOAPIC_INT_BASE;
+
+ /* Measure number of APIC timer ticks in 10 mach ticks
+ * divide by 10 because we want to know how many in 1 tick */
+ if (!calibrated_ticks) {
+ s = splhigh();
+ spl0();
+ calibrated_ticks = timer_measure_10x_apic_hz() / 10;
+ splx(s);
+ }
+}
+
+void
+lapic_enable_timer(void)
+{
+ /* Set up counter */
+ lapic->init_count.r = calibrated_ticks;
+ lapic->divider_config.r = LAPIC_TIMER_DIVIDE_2;
+
+ /* Set the timer to interrupt periodically on remapped timer GSI */
+ lapic->lvt_timer.r = IOAPIC_INT_BASE | LAPIC_TIMER_PERIODIC;
+
+ /* Some buggy hardware requires this set again */
+ lapic->divider_config.r = LAPIC_TIMER_DIVIDE_2;
+
+ /* Enable interrupts for the first time */
+ printf("LAPIC timer configured on cpu%d\n", cpu_number());
+}
+
+void
+ioapic_toggle(int pin, int mask)
+{
+ int apic = 0;
+ ioapic_toggle_entry(apic, pin, mask);
+}
+
+void
+ioapic_irq_eoi(int pin)
+{
+ int apic = 0;
+ union ioapic_route_entry_union oldentry, entry;
+
+ if (pin == 0)
+ goto skip_specific_eoi;
+
+ spl_t s = simple_lock_irq(&ioapic_lock);
+
+ if (!has_irq_specific_eoi) {
+ /* Workaround for old IOAPICs with no specific EOI */
+
+ /* Mask the pin and change to edge triggered */
+ ioapic_read_entry(apic, pin, &entry.both);
+ oldentry = entry;
+ entry.both.mask = IOAPIC_MASK_DISABLED;
+ entry.both.trigger = IOAPIC_EDGE_TRIGGERED;
+ ioapic_write_entry(apic, pin, entry.both);
+
+ /* Restore level entry */
+ ioapic_write_entry(apic, pin, oldentry.both);
+ } else {
+ volatile ApicIoUnit *ioapic = apic_get_ioapic(apic)->ioapic;
+
+ ioapic_read_entry(apic, pin, &entry.both);
+ ioapic->eoi.r = entry.both.vector;
+ }
+
+ simple_unlock_irq(s, &ioapic_lock);
+
+skip_specific_eoi:
+ lapic_eoi ();
+}
+
+static unsigned int
+override_irq(IrqOverrideData *override, union ioapic_route_entry_union *entry)
+{
+ if (override->flags & APIC_IRQ_OVERRIDE_TRIGGER_MASK) {
+ entry->both.trigger = (override->flags & APIC_IRQ_OVERRIDE_LEVEL_TRIGGERED) ?
+ IOAPIC_LEVEL_TRIGGERED : IOAPIC_EDGE_TRIGGERED;
+ } else {
+ if (override->bus == 0) {
+ /* ISA is edge-triggered by default */
+ entry->both.trigger = IOAPIC_EDGE_TRIGGERED;
+ } else {
+ entry->both.trigger = IOAPIC_LEVEL_TRIGGERED;
+ }
+ }
+
+ if (override->flags & APIC_IRQ_OVERRIDE_POLARITY_MASK) {
+ entry->both.polarity = (override->flags & APIC_IRQ_OVERRIDE_ACTIVE_LOW) ?
+ IOAPIC_ACTIVE_LOW : IOAPIC_ACTIVE_HIGH;
+ } else {
+ if (override->bus == 0) {
+ /* EISA is active-low for level-triggered interrupts */
+ if (entry->both.trigger == IOAPIC_LEVEL_TRIGGERED) {
+ entry->both.polarity = IOAPIC_ACTIVE_LOW;
+ } else {
+ entry->both.polarity = IOAPIC_ACTIVE_HIGH;
+ }
+ }
+ }
+ printf("IRQ override: pin=%d gsi=%d trigger=%s polarity=%s\n",
+ override->irq, override->gsi,
+ entry->both.trigger == IOAPIC_LEVEL_TRIGGERED ? "LEVEL" : "EDGE",
+ entry->both.polarity == IOAPIC_ACTIVE_LOW ? "LOW" : "HIGH");
+
+ return override->gsi;
+}
+
+void
+ioapic_configure(void)
+{
+ /* Assume first IO APIC maps to GSI base 0 */
+ int gsi, apic = 0, bsp = 0, pin;
+ IrqOverrideData *irq_over;
+ int timer_gsi;
+ int version = ioapic_version(apic);
+ int ngsis = ioapic_gsis(apic);
+ int ngsis2 = 0;
+
+ if (version >= 0x20) {
+ has_irq_specific_eoi = 1;
+ }
+
+ printf("IOAPIC version 0x%x\n", version);
+
+ /* Disable IOAPIC interrupts and set spurious interrupt */
+ lapic->spurious_vector.r = IOAPIC_SPURIOUS_BASE;
+
+ union ioapic_route_entry_union entry = {{0, 0}};
+
+ entry.both.delvmode = IOAPIC_FIXED;
+ entry.both.destmode = IOAPIC_PHYSICAL;
+ entry.both.mask = IOAPIC_MASK_DISABLED;
+ entry.both.dest = apic_get_cpu_apic_id(bsp);
+
+ for (pin = 0; pin < 16; pin++) {
+ gsi = pin;
+
+ /* ISA legacy IRQs */
+ entry.both.trigger = IOAPIC_EDGE_TRIGGERED;
+ entry.both.polarity = IOAPIC_ACTIVE_HIGH;
+
+ if ((irq_over = acpi_get_irq_override(pin))) {
+ gsi = override_irq(irq_over, &entry);
+ }
+ entry.both.vector = IOAPIC_INT_BASE + gsi;
+ ioapic_write_entry(apic, pin, entry.both);
+
+ /* Timer workaround for x86 */
+ if (pin == 0) {
+ /* Save timer info */
+ timer_gsi = gsi;
+ } else {
+ /* Remap timer irq */
+ if (gsi == timer_gsi) {
+ timer_pin = pin;
+ /* Remap GSI base to timer pin so ivect[0] is the timer */
+ entry.both.vector = IOAPIC_INT_BASE;
+ ioapic_write_entry(apic, timer_pin, entry.both);
+ /* Mask the duplicate pin 0 as we will be using timer_pin */
+ mask_irq(0);
+ }
+ }
+ }
+
+ for (pin = 16; pin < ngsis; pin++) {
+ gsi = pin;
+
+ /* PCI IRQs PIRQ A-H */
+ entry.both.trigger = IOAPIC_LEVEL_TRIGGERED;
+ entry.both.polarity = IOAPIC_ACTIVE_LOW;
+
+ if ((irq_over = acpi_get_irq_override(pin))) {
+ gsi = override_irq(irq_over, &entry);
+ }
+ entry.both.vector = IOAPIC_INT_BASE + gsi;
+ ioapic_write_entry(apic, pin, entry.both);
+ }
+
+ printf("IOAPIC 0 configured with GSI 0-%d\n", ngsis - 1);
+
+ /* Second IOAPIC */
+ if (apic_get_num_ioapics() > 1) {
+ apic = 1;
+ ngsis2 = ioapic_gsis(apic);
+
+ for (pin = 0; pin < ngsis2; pin++) {
+ gsi = pin + ngsis;
+
+ /* Defaults */
+ entry.both.trigger = IOAPIC_LEVEL_TRIGGERED;
+ entry.both.polarity = IOAPIC_ACTIVE_LOW;
+
+ if ((irq_over = acpi_get_irq_override(pin + ngsis))) {
+ gsi = override_irq(irq_over, &entry);
+ }
+ entry.both.vector = IOAPIC_INT_BASE + gsi;
+ ioapic_write_entry(apic, pin, entry.both);
+ }
+
+ printf("IOAPIC 1 configured with GSI %d-%d\n", ngsis, ngsis + ngsis2 - 1);
+ }
+
+ /* Start the IO APIC receiving interrupts */
+ lapic_setup();
+ lapic_enable();
+}
diff --git a/i386/i386at/kd.c b/i386/i386at/kd.c
new file mode 100644
index 0000000..2bea3c8
--- /dev/null
+++ b/i386/i386at/kd.c
@@ -0,0 +1,3059 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Olivetti Mach Console driver v0.0
+ * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989
+ * All rights reserved.
+ *
+ */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/* $ Header: $ */
+
+#include <sys/types.h>
+#include <kern/debug.h>
+#include <kern/mach_clock.h>
+#include <kern/printf.h>
+#include <device/conf.h>
+#include <device/tty.h>
+#include <device/io_req.h>
+#include <device/buf.h>
+#include <vm/vm_kern.h>
+#include <i386/db_interface.h>
+#include <i386/locore.h>
+#include <i386/loose_ends.h>
+#include <i386/vm_param.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+#include <i386at/cram.h>
+#include <i386at/kd.h>
+#include <i386at/kd_event.h>
+#include <i386at/kd_mouse.h>
+#include <i386at/kdsoft.h>
+#include <device/cons.h>
+#include <util/atoi.h>
+
+#define DEBUG 1 /* export feep() */
+
+#if 0
+#define BROKEN_KEYBOARD_RESET
+#endif
+
+struct tty kd_tty;
+extern boolean_t rebootflag;
+
+static void charput(csrpos_t pos, char ch, char chattr);
+static void charmvup(csrpos_t from, csrpos_t to, int count);
+static void charmvdown(csrpos_t from, csrpos_t to, int count);
+static void charclear(csrpos_t to, int count, char chattr);
+static void charsetcursor(csrpos_t newpos);
+static void kd_noopreset(void);
+
+/*
+ * These routines define the interface to the device-specific layer.
+ * See kdsoft.h for a more complete description of what each routine does.
+ */
+void (*kd_dput)(csrpos_t, char, char) = charput; /* put attributed char */
+void (*kd_dmvup)(csrpos_t, csrpos_t, int) = charmvup; /* block move up */
+void (*kd_dmvdown)(csrpos_t, csrpos_t, int) = charmvdown; /* block move down */
+void (*kd_dclear)(csrpos_t, int, char) = charclear; /* block clear */
+void (*kd_dsetcursor)(csrpos_t) = charsetcursor;
+ /* set cursor position on displayed page */
+void (*kd_dreset)(void) = kd_noopreset; /* prepare for reboot */
+
+/*
+ * Globals used for both character-based controllers and bitmap-based
+ * controllers. Default is EGA.
+ */
+
+vm_offset_t kd_bitmap_start = (vm_offset_t)0xa0000; /* XXX - put in kd.h */
+u_char *vid_start = (u_char *)EGA_START;
+ /* VM start of video RAM or frame buffer */
+csrpos_t kd_curpos = 0; /* set indirectly by kd_setpos--see kdsoft.h */
+short kd_lines = 25;
+short kd_cols = 80;
+char kd_attr = KA_NORMAL; /* current attribute */
+char kd_color = KA_NORMAL;
+char kd_attrflags = 0; /* Not reverse, underline, blink */
+
+/*
+ * kd_state shows the state of the modifier keys (ctrl, caps lock,
+ * etc.) It should normally be changed by calling set_kd_state(), so
+ * that the keyboard status LEDs are updated correctly.
+ */
+int kd_state = KS_NORMAL;
+int kb_mode = KB_ASCII; /* event/ascii */
+
+/*
+ * State for the keyboard "mouse".
+ */
+int kd_kbd_mouse = 0;
+int kd_kbd_magic_scale = 6;
+int kd_kbd_magic_button = 0;
+
+/*
+ * Some keyboard commands work by sending a command, waiting for an
+ * ack (handled by kdintr), then sending data, which generates a
+ * second ack. If we are in the middle of such a sequence, kd_ack
+ * shows what the ack is for.
+ *
+ * When a byte is sent to the keyboard, it is kept around in last_sent
+ * in case it needs to be resent.
+ *
+ * The rest of the variables here hold the data required to complete
+ * the sequence.
+ *
+ * XXX - the System V driver keeps a command queue, I guess in case we
+ * want to start a command while another is in progress. Is this
+ * something we should worry about?
+ */
+enum why_ack {NOT_WAITING, SET_LEDS, DATA_ACK};
+enum why_ack kd_ack = NOT_WAITING;
+
+u_char last_sent = 0;
+
+u_char kd_nextled = 0;
+
+/*
+ * We don't provide any mutex protection for this flag because we know
+ * that this module will have been initialized by the time multiple
+ * threads are running.
+ */
+boolean_t kd_initialized = FALSE; /* driver initialized? */
+boolean_t kd_extended = FALSE;
+
+/* Array for processing escape sequences. */
+#define K_MAXESC 32
+u_char esc_seq[K_MAXESC];
+u_char *esc_spt = (u_char *)0;
+
+/*
+ * This array maps scancodes to Ascii characters (or character
+ * sequences).
+ * Each row corresponds to one key. There are NUMOUTPUT bytes per key
+ * state. The states are ordered: Normal, SHIFT, CTRL, ALT,
+ * SHIFT/ALT.
+ */
+
+/* This new keymap from Tudor Hulubei (tudor@cs.unh.edu) makes the
+ following changes to the keyboard driver:
+
+ - Alt + key (m-key) returns `ESC key' instead of `ESC N key'.
+ - Backspace returns 0x7f instead of 0x08.
+ - Delete returns `ESC [ 9' instead of 0x7f.
+ - Alt + function keys return key sequences that are different
+ from the key sequences returned by the function keys alone.
+ This is done with the idea of allowing a terminal server to
+ implement multiple virtual consoles mapped on Alt+F1, Alt+F2,
+ etc, as in Linux.
+
+ -- Derek Upham 1997/06/25 */
+
+unsigned char key_map[NUMKEYS][WIDTH_KMAP] = {
+{NC,NC,NC, NC,NC,NC, NC,NC,NC, NC,NC,NC, NC,NC,NC},
+{K_ESC,NC,NC, K_ESC,NC,NC, K_ESC,NC,NC, 0x1b,K_ESC,NC, K_ESC,NC,NC},
+{K_ONE,NC,NC, K_BANG,NC,NC, K_ONE,NC,NC, 0x1b,K_ONE,NC, 0x1b,0x4e,K_BANG},
+{K_TWO,NC,NC, K_ATSN,NC,NC, K_NUL,NC,NC, 0x1b,K_TWO,NC, 0x1b,0x4e,K_ATSN},
+{K_THREE,NC,NC, K_POUND,NC,NC, K_THREE,NC,NC, 0x1b,K_THREE,NC, 0x1b,0x4e,K_POUND},
+{K_FOUR,NC,NC, K_DOLLAR,NC,NC, K_FOUR,NC,NC, 0x1b,K_FOUR,NC, 0x1b,0x4e,K_DOLLAR},
+{K_FIVE,NC,NC, K_PERC,NC,NC, K_FIVE,NC,NC, 0x1b,K_FIVE,NC, 0x1b,0x4e,K_PERC},
+{K_SIX,NC,NC, K_CARET,NC,NC, K_RS,NC,NC, 0x1b,K_SIX,NC, 0x1b,0x4e,K_CARET},
+{K_SEVEN,NC,NC, K_AMPER,NC,NC, K_SEVEN,NC,NC, 0x1b,K_SEVEN,NC, 0x1b,0x4e,K_AMPER},
+{K_EIGHT,NC,NC, K_ASTER,NC,NC, K_EIGHT,NC,NC, 0x1b,K_EIGHT,NC, 0x1b,0x4e,K_ASTER},
+{K_NINE,NC,NC, K_LPAREN,NC,NC, K_NINE,NC,NC, 0x1b,K_NINE,NC, 0x1b,0x4e,K_LPAREN},
+{K_ZERO,NC,NC, K_RPAREN,NC,NC, K_ZERO,NC,NC, 0x1b,K_ZERO,NC, 0x1b,0x4e,K_RPAREN},
+{K_MINUS,NC,NC, K_UNDSC,NC,NC, K_US,NC,NC, 0x1b,K_MINUS,NC, 0x1b,0x4e,K_UNDSC},
+{K_EQL,NC,NC, K_PLUS,NC,NC, K_EQL,NC,NC, 0x1b,K_EQL,NC, 0x1b,0x4e,K_PLUS},
+{K_DEL,NC,NC, K_DEL,NC,NC, K_DEL,NC,NC, 0x1b,K_DEL,NC, K_DEL,NC,NC},
+{K_HT,NC,NC, K_GS,NC,NC, K_HT,NC,NC, 0x1b,K_HT,NC, K_GS,NC,NC},
+{K_q,NC,NC, K_Q,NC,NC, K_DC1,NC,NC, 0x1b,K_q,NC, 0x1b,0x4e,K_Q},
+{K_w,NC,NC, K_W,NC,NC, K_ETB,NC,NC, 0x1b,K_w,NC, 0x1b,0x4e,K_W},
+{K_e,NC,NC, K_E,NC,NC, K_ENQ,NC,NC, 0x1b,K_e,NC, 0x1b,0x4e,K_E},
+{K_r,NC,NC, K_R,NC,NC, K_DC2,NC,NC, 0x1b,K_r,NC, 0x1b,0x4e,K_R},
+{K_t,NC,NC, K_T,NC,NC, K_DC4,NC,NC, 0x1b,K_t,NC, 0x1b,0x4e,K_T},
+{K_y,NC,NC, K_Y,NC,NC, K_EM,NC,NC, 0x1b,K_y,NC, 0x1b,0x4e,K_Y},
+{K_u,NC,NC, K_U,NC,NC, K_NAK,NC,NC, 0x1b,K_u,NC, 0x1b,0x4e,K_U},
+{K_i,NC,NC, K_I,NC,NC, K_HT,NC,NC, 0x1b,K_i,NC, 0x1b,0x4e,K_I},
+{K_o,NC,NC, K_O,NC,NC, K_SI,NC,NC, 0x1b,K_o,NC, 0x1b,0x4e,K_O},
+{K_p,NC,NC, K_P,NC,NC, K_DLE,NC,NC, 0x1b,K_p,NC, 0x1b,0x4e,K_P},
+{K_LBRKT,NC,NC, K_LBRACE,NC,NC, K_ESC,NC,NC, 0x1b,K_LBRKT,NC, 0x1b,0x4e,K_LBRACE},
+{K_RBRKT,NC,NC, K_RBRACE,NC,NC, K_GS,NC,NC, 0x1b,K_RBRKT,NC, 0x1b,0x4e,K_RBRACE},
+{K_CR,NC,NC, K_CR,NC,NC, K_CR,NC,NC, 0x1b,K_CR,NC, K_CR,NC,NC},
+{K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC},
+{K_a,NC,NC, K_A,NC,NC, K_SOH,NC,NC, 0x1b,K_a,NC, 0x1b,0x4e,K_A},
+{K_s,NC,NC, K_S,NC,NC, K_DC3,NC,NC, 0x1b,K_s,NC, 0x1b,0x4e,K_S},
+{K_d,NC,NC, K_D,NC,NC, K_EOT,NC,NC, 0x1b,K_d,NC, 0x1b,0x4e,K_D},
+{K_f,NC,NC, K_F,NC,NC, K_ACK,NC,NC, 0x1b,K_f,NC, 0x1b,0x4e,K_F},
+{K_g,NC,NC, K_G,NC,NC, K_BEL,NC,NC, 0x1b,K_g,NC, 0x1b,0x4e,K_G},
+{K_h,NC,NC, K_H,NC,NC, K_BS,NC,NC, 0x1b,K_h,NC, 0x1b,0x4e,K_H},
+{K_j,NC,NC, K_J,NC,NC, K_LF,NC,NC, 0x1b,K_j,NC, 0x1b,0x4e,K_J},
+{K_k,NC,NC, K_K,NC,NC, K_VT,NC,NC, 0x1b,K_k,NC, 0x1b,0x4e,K_K},
+{K_l,NC,NC, K_L,NC,NC, K_FF,NC,NC, 0x1b,K_l,NC, 0x1b,0x4e,K_L},
+{K_SEMI,NC,NC, K_COLON,NC,NC, K_SEMI,NC,NC, 0x1b,K_SEMI,NC, 0x1b,0x4e,K_COLON},
+{K_SQUOTE,NC,NC,K_DQUOTE,NC,NC, K_SQUOTE,NC,NC,0x1b,K_SQUOTE,NC, 0x1b,0x4e,K_DQUOTE},
+{K_GRAV,NC,NC, K_TILDE,NC,NC, K_RS,NC,NC, 0x1b,K_GRAV,NC, 0x1b,0x4e,K_TILDE},
+{K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC},
+{K_BSLSH,NC,NC, K_PIPE,NC,NC, K_FS,NC,NC, 0x1b,K_BSLSH,NC, 0x1b,0x4e,K_PIPE},
+{K_z,NC,NC, K_Z,NC,NC, K_SUB,NC,NC, 0x1b,K_z,NC, 0x1b,0x4e,K_Z},
+{K_x,NC,NC, K_X,NC,NC, K_CAN,NC,NC, 0x1b,K_x,NC, 0x1b,0x4e,K_X},
+{K_c,NC,NC, K_C,NC,NC, K_ETX,NC,NC, 0x1b,K_c,NC, 0x1b,0x4e,K_C},
+{K_v,NC,NC, K_V,NC,NC, K_SYN,NC,NC, 0x1b,K_v,NC, 0x1b,0x4e,K_V},
+{K_b,NC,NC, K_B,NC,NC, K_STX,NC,NC, 0x1b,K_b,NC, 0x1b,0x4e,K_B},
+{K_n,NC,NC, K_N,NC,NC, K_SO,NC,NC, 0x1b,K_n,NC, 0x1b,0x4e,K_N},
+{K_m,NC,NC, K_M,NC,NC, K_CR,NC,NC, 0x1b,K_m,NC, 0x1b,0x4e,K_M},
+{K_COMMA,NC,NC, K_LTHN,NC,NC, K_COMMA,NC,NC, 0x1b,K_COMMA,NC, 0x1b,0x4e,K_LTHN},
+{K_PERIOD,NC,NC,K_GTHN,NC,NC, K_PERIOD,NC,NC,0x1b,K_PERIOD,NC, 0x1b,0x4e,K_GTHN},
+{K_SLASH,NC,NC, K_QUES,NC,NC, K_SLASH,NC,NC, 0x1b,K_SLASH,NC, 0x1b,0x4e,K_QUES},
+{K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC},
+{K_ASTER,NC,NC, K_ASTER,NC,NC, K_ASTER,NC,NC, 0x1b,K_ASTER,NC, 0x1b,0x4e,K_ASTER},
+{K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC},
+{K_SPACE,NC,NC, K_SPACE,NC,NC, K_NUL,NC,NC, 0x1b,K_SPACE,NC, K_SPACE,NC,NC},
+{K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC},
+{K_F1, K_F1S, K_F1, K_F1A, K_F1S},
+{K_F2, K_F2S, K_F2, K_F2A, K_F2S},
+{K_F3, K_F3S, K_F3, K_F3A, K_F3S},
+{K_F4, K_F4S, K_F4, K_F4A, K_F4S},
+{K_F5, K_F5S, K_F5, K_F5A, K_F5S},
+{K_F6, K_F6S, K_F6, K_F6A, K_F6S},
+{K_F7, K_F7S, K_F7, K_F7A, K_F7S},
+{K_F8, K_F8S, K_F8, K_F8A, K_F8S},
+{K_F9, K_F9S, K_F9, K_F9A, K_F9S},
+{K_F10, K_F10S, K_F10, K_F10A, K_F10S},
+{K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC},
+{K_SCRL, K_NUL,NC,NC, K_SCRL, K_SCRL, K_NUL,NC,NC},
+{K_HOME, K_SEVEN,NC,NC, K_HOME, K_HOME, 0x1b,0x4e,K_SEVEN},
+{K_UA, K_EIGHT,NC,NC, K_UA, K_UA, 0x1b,0x4e,K_EIGHT},
+{K_PUP, K_NINE,NC,NC, K_PUP, K_PUP, 0x1b,0x4e,K_NINE},
+{0x1b,0x5b,0x53, K_MINUS,NC,NC, 0x1b,0x5b,0x53, 0x1b,0x5b,0x53, 0x1b,0x4e,0x2d},
+{K_LA, K_FOUR,NC,NC, K_LA, K_LA, 0x1b,0x4e,K_FOUR},
+{0x1b,0x5b,0x47, K_FIVE,NC,NC, 0x1b,0x5b,0x47, 0x1b,0x5b,0x47, 0x1b,0x4e,0x35},
+{K_RA, K_SIX,NC,NC, K_RA, K_RA, 0x1b,0x4e,K_SIX},
+{0x1b,0x5b,0x54, K_PLUS,NC,NC, 0x1b,0x5b,0x54, 0x1b,0x5b,0x54, 0x1b,0x4e,0x2b},
+{K_END, K_ONE,NC,NC, K_END, K_END, 0x1b,0x4e,K_ONE},
+{K_DA, K_TWO,NC,NC, K_DA, K_DA, 0x1b,0x4e,K_TWO},
+{K_PDN, K_THREE,NC,NC, K_PDN, K_PDN, 0x1b,0x4e,K_THREE},
+{K_INS, K_ZERO,NC,NC, K_INS, K_INS, 0x1b,0x4e,K_ZERO},
+{0x1b,0x5b,0x39, K_PERIOD,NC,NC, K_DEL,NC,NC, K_DEL,NC,NC, 0x1b,0x4e,K_PERIOD},
+{NC,NC,NC, NC,NC,NC, NC,NC,NC, NC,NC,NC, NC,NC,NC},
+{NC,NC,NC, NC,NC,NC, NC,NC,NC, NC,NC,NC, NC,NC,NC},
+{NC,NC,NC, NC,NC,NC, NC,NC,NC, NC,NC,NC, NC,NC,NC},
+{K_F11, K_F11S, K_F11, K_F11A, K_F11S},
+{K_F12, K_F12S, K_F12, K_F12A, K_F12S}
+};
+
+
+/*
+ * Globals used only for character-based controllers.
+ */
+
+short kd_index_reg = EGA_IDX_REG;
+short kd_io_reg = EGA_IO_REG;
+
+
+/*
+ * Globals used only for bitmap-based controllers. See kdsoft.h for
+ * an explanation of what some of these variables are used for.
+ */
+
+u_char *font_start = 0; /* starting addr of font */
+
+short fb_width = 0; /* bits in frame buffer scan line */
+short fb_height = 0; /* scan lines in frame buffer*/
+short char_width = 0; /* bit width of 1 char */
+short char_height = 0; /* bit height of 1 char */
+short chars_in_font = 0;
+short cursor_height = 0; /* bit height of cursor */
+
+/* These initial values are simply guesses. */
+u_char char_black = 0;
+u_char char_white = 0xff;
+
+short xstart = 0;
+short ystart = 0;
+
+short char_byte_width = 0; /* char_width/NBBY */
+short fb_byte_width = 0; /* fb_width/NBBY */
+short font_byte_width = 0; /* num bytes in 1 scan line of font */
+
+/*
+ * Switch for poll vs. interrupt.
+ */
+int kd_pollc = 0;
+
+#ifdef DEBUG
+static void
+pause(void)
+{
+ int i;
+
+ for (i = 0; i < 50000; ++i)
+ ;
+}
+
+/*
+ * feep:
+ *
+ * Ring the bell for a short time.
+ * Warning: uses outb(). You may prefer to use kd_debug_put.
+ */
+void
+feep(void)
+{
+ kd_bellon();
+ pause();
+ kd_belloff(NULL);
+}
+
+/*
+ * Put a debugging character on the screen.
+ * LOC=0 means put it in the bottom right corner, LOC=1 means put it
+ * one column to the left, etc.
+ */
+void
+kd_debug_put(
+ int loc,
+ char c)
+{
+ csrpos_t pos = ONE_PAGE - (loc+1) * ONE_SPACE;
+
+ (*kd_dput)(pos, c, KA_NORMAL);
+}
+#endif /* DEBUG */
+
+
+extern boolean_t mouse_in_use;
+int old_kb_mode;
+
+void
+cnpollc(boolean_t on)
+{
+ if (mouse_in_use) {
+ if (on) {
+ /* switch into X */
+ old_kb_mode = kb_mode;
+ kb_mode = KB_ASCII;
+ X_kdb_enter();
+
+ kd_pollc++;
+ } else {
+ --kd_pollc;
+
+ /* switch out of X */
+ X_kdb_exit();
+ kb_mode = old_kb_mode;
+ }
+ } else {
+ if (on) {
+ kd_pollc++;
+ } else {
+ --kd_pollc;
+ }
+ }
+}
+
+
+
+/*
+ * kdopen:
+ *
+ * This opens the console driver and sets up the tty and other
+ * rudimentary stuff including calling the line discipline for
+ * setting up the device independent stuff for a tty driver.
+ *
+ * input: device number 'dev', and flag
+ *
+ * output: device is opened and setup
+ *
+ */
+int
+kdopen(
+ dev_t dev,
+ int flag,
+ io_req_t ior)
+{
+ struct tty *tp;
+ spl_t o_pri;
+
+ tp = &kd_tty;
+ o_pri = simple_lock_irq(&tp->t_lock);
+ if (!(tp->t_state & (TS_ISOPEN|TS_WOPEN))) {
+ /* XXX ttychars allocates memory */
+ simple_unlock_nocheck(&tp->t_lock.slock);
+ ttychars(tp);
+ simple_lock_nocheck(&tp->t_lock.slock);
+ /*
+ * Special support for boot-time rc scripts, which don't
+ * stty the console.
+ */
+ tp->t_oproc = kdstart;
+ tp->t_stop = kdstop;
+ tp->t_ospeed = tp->t_ispeed = B115200;
+ tp->t_flags = ODDP|EVENP|ECHO|CRMOD|XTABS|LITOUT;
+ kdinit();
+ }
+ tp->t_state |= TS_CARR_ON;
+ simple_unlock_irq(o_pri, &tp->t_lock);
+ return (char_open(dev, tp, flag, ior));
+}
+
+
+/*
+ * kdclose:
+ *
+ * This function merely executes the device independent code for
+ * closing the line discipline.
+ *
+ * input: device number 'dev', and flag
+ *
+ * output: device is closed
+ *
+ */
+/*ARGSUSED*/
+void
+kdclose(dev_t dev, int flag)
+{
+ struct tty *tp;
+
+ tp = &kd_tty;
+ {
+ spl_t s;
+ s = simple_lock_irq(&tp->t_lock);
+ ttyclose(tp);
+ simple_unlock_irq(s, &tp->t_lock);
+ }
+
+ return;
+}
+
+
+/*
+ * kdread:
+ *
+ * This function executes the device independent code to read from
+ * the tty.
+ *
+ * input: device number 'dev'
+ *
+ * output: characters are read from tty clists
+ *
+ */
+/*ARGSUSED*/
+int
+kdread(dev_t dev, io_req_t uio)
+{
+ struct tty *tp;
+
+ tp = &kd_tty;
+ tp->t_state |= TS_CARR_ON;
+ return((*linesw[kd_tty.t_line].l_read)(tp, uio));
+}
+
+
+/*
+ * kdwrite:
+ *
+ * This function does the device independent write action for this
+ * console (tty) driver.
+ *
+ * input: device number 'dev'
+ *
+ * output: characters are written to tty clists
+ *
+ */
+/*ARGSUSED*/
+int
+kdwrite(dev_t dev, io_req_t uio)
+{
+ return((*linesw[kd_tty.t_line].l_write)(&kd_tty, uio));
+}
+
+/*
+ * Mmap.
+ */
+
+/*ARGSUSED*/
+vm_offset_t
+kdmmap(dev_t dev, vm_offset_t off, vm_prot_t prot)
+{
+ if (off >= (128*1024))
+ return(-1);
+
+ /* Get page frame number for the page to be mapped. */
+ return(i386_btop(kd_bitmap_start+off));
+}
+
+int
+kdportdeath(
+ dev_t dev,
+ mach_port_t port)
+{
+ return (tty_portdeath(&kd_tty, (ipc_port_t)port));
+}
+
+/*ARGSUSED*/
+io_return_t kdgetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data, /* pointer to OUT array */
+ mach_msg_type_number_t *count) /* OUT */
+{
+ io_return_t result;
+
+ switch (flavor) {
+ case KDGSTATE:
+ if (*count < 1)
+ return (D_INVALID_OPERATION);
+ *data = kd_state;
+ *count = 1;
+ result = D_SUCCESS;
+ break;
+
+ case KDGKBENT:
+ result = kdgetkbent((struct kbentry *)data);
+ *count = sizeof(struct kbentry)/sizeof(int);
+ break;
+
+ default:
+ result = tty_get_status(&kd_tty, flavor, data, count);
+ break;
+ }
+ return (result);
+}
+
+/*ARGSUSED*/
+io_return_t kdsetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count)
+{
+ io_return_t result;
+
+ switch (flavor) {
+ case KDSKBENT:
+ if (count < sizeof(struct kbentry)/sizeof(int)) {
+ return (D_INVALID_OPERATION);
+ }
+ result = kdsetkbent((struct kbentry *)data, 0);
+ break;
+
+ case KDSETBELL:
+ if (count < 1)
+ return (D_INVALID_OPERATION);
+ result = kdsetbell(*data, 0);
+ break;
+
+ default:
+ result = tty_set_status(&kd_tty, flavor, data, count);
+ }
+ return (result);
+}
+
+
+
+/*
+ * kdsetbell:
+ *
+ * Turn the bell on or off. Returns error code, if given bogus
+ * on/off value.
+ */
+int
+kdsetbell(
+ int val, /* on or off */
+ int flags) /* flags set for console */
+{
+ int err = 0;
+
+ if (val == KD_BELLON)
+ kd_bellon();
+ else if (val == KD_BELLOFF)
+ kd_belloff(NULL);
+ else
+ err = D_INVALID_OPERATION;
+
+ return(err);
+}
+
+/*
+ * kdgetkbent:
+ *
+ * Get entry from key mapping table. Returns error code, if any.
+ */
+int
+kdgetkbent(struct kbentry *kbent)
+{
+ u_char *cp;
+ spl_t o_pri = SPLKD(); /* probably superfluous */
+
+ cp = &key_map[kbent->kb_index][CHARIDX(kbent->kb_state)];
+ kbent->kb_value[0] = *cp++;
+ kbent->kb_value[1] = *cp++;
+ kbent->kb_value[2] = *cp;
+ (void)splx(o_pri);
+ return(0);
+}
+
+
+/*
+ * kdsetkbent:
+ *
+ * Set entry in key mapping table. Return error code, if any.
+ */
+int
+kdsetkbent(
+ struct kbentry *kbent,
+ int flags) /* flags set for console */
+{
+ u_char *cp;
+ spl_t o_pri;
+
+ o_pri = SPLKD();
+ cp = &key_map[kbent->kb_index][CHARIDX(kbent->kb_state)];
+ *cp++ = kbent->kb_value[0];
+ *cp++ = kbent->kb_value[1];
+ *cp = kbent->kb_value[2];
+ (void)splx(o_pri);
+ return(0);
+}
+
+/*
+ * kdintr:
+ *
+ * This function is the interrupt code for the driver. Since this is
+ * a special tty (console), interrupts are only for input, so we read in
+ * the character. If in ascii mode, we then do the mapping translation
+ * from the keyboard switch table and place the characters on the tty's
+ * input switch table. If in event mode, we create and queue a kd_event.
+ *
+ * input: interrupt vector 'vec'
+ *
+ * output: character or sequence is placed on appropriate queue
+ *
+ */
+/*ARGSUSED*/
+void
+kdintr(int vec)
+{
+ struct tty *tp;
+ unsigned char c;
+ unsigned char scancode;
+ unsigned int char_idx;
+ boolean_t up = FALSE; /* key-up event */
+
+ if (kd_pollc)
+ return; /* kdb polling kbd */
+
+ if (!kd_initialized)
+ return;
+
+ tp = &kd_tty;
+#ifdef old
+ while ((inb(K_STATUS) & K_OBUF_FUL) == 0)
+ ; /* this should never loop */
+#else /* old */
+ {
+ /*
+ * Allow for keyboards that raise interrupt before
+ * the character gets to the buffer. But don't wait
+ * forever if grabbing the character by polling leaves
+ * the interrupt on but buffer empty.
+ */
+ /*
+ * Micronics VLB motherboard with 486DX2 can report keyboard
+ * interrupt before K_STATUS register indicates that the
+ * output buffer is full. Moreover, the bus won't settle w
+ * while we poll K_STATUS at speed. Temporary fix is to break
+ * out after safety runs out and pick up keyboard event. This
+ * should be fixed eventually by putting a 1us timout between
+ * inb's to K_STATUS and fix the pic initialization order to
+ * avoid bootup keyboard wedging (ie make kd a real device)
+ */
+ int safety = 1000;
+ while ((inb(K_STATUS) & K_OBUF_FUL) == 0)
+ if (!safety--) break; /* XXX */
+ }
+#endif /* old */
+ /*
+ * We may have seen a mouse event.
+ */
+ if ((inb(K_STATUS) & 0x20) == 0x20) {
+ if (mouse_in_use) {
+ mouse_handle_byte((u_char)inb(K_RDWR));
+ return;
+ } else {
+ printf("M%xI", inb(K_RDWR));
+ return;
+ }
+ }
+
+ scancode = inb(K_RDWR);
+ if (scancode == K_EXTEND && kb_mode != KB_EVENT) {
+ kd_extended = TRUE;
+ goto done;
+ } else if (scancode == K_RESEND) {
+ kd_resend();
+ goto done;
+ } else if (scancode == K_ACKSC) {
+ kd_handle_ack();
+ goto done;
+ } else if (kd_kbd_mouse && kd_kbd_magic(scancode)) {
+ goto done;
+ } else if (kdcheckmagic(scancode)) {
+ goto done;
+ } else if (kb_mode == KB_EVENT) {
+ kd_enqsc(scancode);
+ goto done;
+ } /* else... */
+
+ if (scancode & K_UP) {
+ up = TRUE;
+ scancode &= ~K_UP;
+ }
+ if (scancode < NUMKEYS) {
+ /* Lookup in map, then process. */
+ char_idx = kdstate2idx(kd_state, kd_extended);
+ c = key_map[scancode][char_idx];
+ if (c == K_SCAN) {
+ c = key_map[scancode][++char_idx];
+ set_kd_state(do_modifier(kd_state, c, up));
+ } else if (!up) {
+ /* regular key-down */
+ unsigned int max; /* max index for char sequence */
+
+ max = char_idx + NUMOUTPUT;
+ char_idx++;
+ if (!kd_extended) {
+ if (kd_state&KS_CLKED) {
+ if (kd_isupper(c)) {
+ c += ('a' - 'A');
+ max = char_idx;
+ }
+ else if (kd_islower(c)) {
+ c -= ('a' - 'A');
+ max = char_idx;
+ }
+ }
+ /*
+ * Notice that even if the keypad is remapped,
+ * NumLock only effects the keys that are
+ * physically part of the keypad. Is this
+ * The Right Thing?
+ */
+ if ((kd_state&KS_NLKED) &&
+ (((K_HOMESC) <= scancode) &&
+ (scancode <= (K_DELSC)))) {
+ char_idx = CHARIDX(SHIFT_STATE);
+ c = key_map[scancode][char_idx];
+ max = char_idx + NUMOUTPUT;
+ char_idx++;
+ }
+ }
+
+ /*
+ * here's where we actually put the char (or
+ * char sequence, for function keys) onto the
+ * input queue.
+ */
+ for ( ; (c != K_DONE) && (char_idx <= max);
+ c = key_map[scancode][char_idx++]) {
+ (*linesw[tp->t_line].l_rint)(c, tp);
+ }
+ kd_extended = FALSE;
+ }
+ }
+
+ done:
+ return;
+}
+
+/*
+ * kd_handle_ack:
+ *
+ * For pending commands, complete the command. For data bytes,
+ * drop the ack on the floor.
+ */
+void
+kd_handle_ack(void)
+{
+ switch (kd_ack) {
+ case SET_LEDS:
+ kd_setleds2();
+ kd_ack = DATA_ACK;
+ break;
+ case DATA_ACK:
+ kd_ack = NOT_WAITING;
+ break;
+ case NOT_WAITING:
+ printf("unexpected ACK from keyboard\n");
+ break;
+ default:
+ panic("bogus kd_ack\n");
+ break;
+ }
+}
+
+/*
+ * kd_resend:
+ *
+ * Resend a missed keyboard command or data byte.
+ */
+void
+kd_resend(void)
+{
+ if (kd_ack == NOT_WAITING)
+ printf("unexpected RESEND from keyboard\n");
+ else
+ kd_senddata(last_sent);
+}
+
+
+/*
+ * do_modifier:
+ *
+ * Change keyboard state according to which modifier key and
+ * whether it went down or up.
+ *
+ * input: the current state, the key, and the key's direction.
+ * The key can be any key, not just a modifier key.
+ *
+ * output: the new state
+ */
+int
+do_modifier(
+ int state,
+ Scancode c,
+ boolean_t up)
+{
+ switch (c) {
+ case (K_ALTSC):
+ if (up)
+ state &= ~KS_ALTED;
+ else
+ state |= KS_ALTED;
+ kd_extended = FALSE;
+ break;
+#ifndef ORC
+ case (K_CLCKSC):
+#endif /* ORC */
+ case (K_CTLSC):
+ if (up)
+ state &= ~KS_CTLED;
+ else
+ state |= KS_CTLED;
+ kd_extended = FALSE;
+ break;
+#ifdef ORC
+ case (K_CLCKSC):
+ if (!up)
+ state ^= KS_CLKED;
+ break;
+#endif /* ORC */
+ case (K_NLCKSC):
+ if (!up)
+ state ^= KS_NLKED;
+ break;
+ case (K_LSHSC):
+ case (K_RSHSC):
+ if (up)
+ state &= ~KS_SHIFTED;
+ else
+ state |= KS_SHIFTED;
+ kd_extended = FALSE;
+ break;
+ }
+
+ return(state);
+}
+
+
+/*
+ * kdcheckmagic:
+ *
+ * Check for magic keystrokes for invoking the debugger or
+ * rebooting or ...
+ *
+ * input: an unprocessed scancode
+ *
+ * output: TRUE if a magic key combination was recognized and
+ * processed. FALSE otherwise.
+ *
+ * side effects:
+ * various actions possible, depending on which keys are
+ * pressed. If the debugger is called, steps are taken
+ * to ensure that the system doesn't think the magic keys
+ * are still held down.
+ */
+boolean_t
+kdcheckmagic(Scancode scancode)
+{
+ static int magic_state = KS_NORMAL; /* like kd_state */
+ boolean_t up = FALSE;
+
+ if (scancode == 0x46) /* scroll lock */
+/* if (scancode == 0x52) ** insert key */
+ {
+ kd_kbd_mouse = !kd_kbd_mouse;
+ kd_kbd_magic_button = 0;
+ return(TRUE);
+ }
+ if (scancode & K_UP) {
+ up = TRUE;
+ scancode &= ~K_UP;
+ }
+ magic_state = do_modifier(magic_state, scancode, up);
+
+ if ((magic_state&(KS_CTLED|KS_ALTED)) == (KS_CTLED|KS_ALTED)) {
+ switch (scancode) {
+#if MACH_KDB
+ case K_dSC: /* ctl-alt-d */
+ kdb_kintr(); /* invoke debugger */
+ /* Returned from debugger, so reset kbd state. */
+ (void)SPLKD();
+ magic_state = KS_NORMAL;
+ if (kb_mode == KB_ASCII)
+ kd_state = KS_NORMAL;
+ /* setting leds kills kbd */
+ else {
+ kd_enqsc(K_ALTSC | K_UP);
+ kd_enqsc(K_CTLSC | K_UP);
+ kd_enqsc(K_dSC | K_UP);
+ }
+ return(TRUE);
+ break;
+#endif /* MACH_KDB */
+ case K_DELSC: /* ctl-alt-del */
+ /* if rebootflag is on, reboot the system */
+ if (rebootflag)
+ kdreboot();
+ break;
+ }
+ }
+ return(FALSE);
+}
+
+
+/*
+ * kdstate2idx:
+ *
+ * Return the value for the 2nd index into key_map that
+ * corresponds to the given state.
+ */
+unsigned int
+kdstate2idx(unsigned int state, /* bit vector, not a state index */
+ boolean_t extended)
+{
+ int state_idx = NORM_STATE;
+
+ if ((!extended) && state != KS_NORMAL) {
+ if ((state&(KS_SHIFTED|KS_ALTED)) == (KS_SHIFTED|KS_ALTED))
+ state_idx = SHIFT_ALT;
+ /* CTRL should have higher priority than SHIFT. That
+ way, CTRL-SHIFT-2 and CTRL-2 produce the same keycode.
+ --Derek Upham 1997/06/25 */
+ else if (state&KS_CTLED)
+ state_idx = CTRL_STATE;
+ else if (state&KS_SHIFTED)
+ state_idx = SHIFT_STATE;
+ else if (state&KS_ALTED)
+ state_idx = ALT_STATE;
+ }
+
+ return (CHARIDX(state_idx));
+}
+
+/*
+ * kdstart:
+ *
+ * This function does the general processing of characters and other
+ * operations for the device driver. The device independent portion of
+ * the tty driver calls this routine (it's setup in kdinit) with a
+ * given command. That command is then processed, and control is passed
+ * back to the kernel.
+ *
+ * input: tty pointer 'tp', and command to execute 'cmd'
+ *
+ * output: command is executed
+ *
+ * Entered and left at spltty. Drops priority to spl0 to display character.
+ * ASSUMES that it is never called from interrupt-driven code.
+ */
+void
+kdstart(struct tty *tp)
+{
+ spl_t o_pri;
+ int ch;
+
+ if (tp->t_state & TS_TTSTOP)
+ return;
+ for ( ; ; ) {
+ tp->t_state &= ~TS_BUSY;
+ if (tp->t_state & TS_TTSTOP)
+ break;
+ if ((tp->t_outq.c_cc <= 0) || (ch = getc(&tp->t_outq)) == -1)
+ break;
+ /*
+ * Drop priority for long screen updates. ttstart() calls us at
+ * spltty.
+ */
+ o_pri = splsoftclock(); /* block timeout */
+ kd_putc_esc(ch);
+ splx(o_pri);
+ }
+ if (tp->t_outq.c_cc <= TTLOWAT(tp)) {
+ tt_write_wakeup(tp);
+ }
+}
+
+/*ARGSUSED*/
+void
+kdstop(
+ struct tty *tp,
+ int flags)
+{
+ /*
+ * do nothing - all characters are output by one call to
+ * kdstart.
+ */
+}
+
+/*
+ * kdinit:
+ *
+ * This code initializes the structures and sets up the port registers
+ * for the console driver.
+ *
+ * Each bitmap-based graphics card is likely to require a unique
+ * way to determine the card's presence. The driver runs through
+ * each "special" card that it knows about and uses the first one
+ * that it finds. If it doesn't find any, it assumes that an
+ * EGA-like card is installed.
+ *
+ * input : None. Interrupts are assumed to be disabled
+ * output : Driver is initialized
+ *
+ */
+void
+kdinit(void)
+{
+ unsigned char k_comm; /* keyboard command byte */
+
+ if (kd_initialized)
+ return;
+
+ esc_spt = esc_seq;
+ kd_attr = KA_NORMAL;
+
+ kd_attrflags = 0;
+ kd_color = KA_NORMAL;
+ /*
+ * board specific initialization: set up globals and kd_dxxx
+ * pointers, and synch displayed cursor with logical cursor.
+ */
+ kd_xga_init();
+
+ /* get rid of any garbage in output buffer */
+ if (inb(K_STATUS) & K_OBUF_FUL)
+ (void)inb(K_RDWR);
+
+ kd_sendcmd(KC_CMD_READ); /* ask for the ctlr command byte */
+ k_comm = kd_getdata();
+ k_comm &= ~K_CB_DISBLE; /* clear keyboard disable bit */
+ k_comm |= K_CB_ENBLIRQ; /* enable interrupt */
+ kd_sendcmd(KC_CMD_WRITE); /* write new ctlr command byte */
+ kd_senddata(k_comm);
+ unmask_irq(KBD_IRQ);
+ kd_initialized = TRUE;
+
+#if ENABLE_IMMEDIATE_CONSOLE
+ /* Now that we're set up, we no longer need or want the
+ immediate console. */
+ {
+ extern boolean_t immediate_console_enable;
+ immediate_console_enable = FALSE;
+ }
+
+ /* The immediate console printed stuff at the bottom of the
+ screen rather than at the cursor position, so that's where
+ we should start. */
+ kd_setpos(ONE_PAGE - ONE_LINE); printf("\n");
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
+
+ cnsetleds(kd_state = KS_NORMAL);
+ /* clear the LEDs AFTER we
+ enable the keyboard controller.
+ This keeps NUM-LOCK from being
+ set on the NEC Versa. */
+
+ /* Allocate the input buffer. */
+ ttychars(&kd_tty);
+}
+
+/*
+ * kd_belloff:
+ *
+ * This routine shuts the bell off, by sending the appropriate code
+ * to the speaker port.
+ *
+ * input : None
+ * output : bell is turned off
+ *
+ */
+static boolean_t kd_bellstate = FALSE;
+
+void
+kd_belloff(void * param)
+{
+ unsigned char status;
+
+ status = (inb(K_PORTB) & ~(K_SPKRDATA | K_ENABLETMR2));
+ outb(K_PORTB, status);
+ kd_bellstate = FALSE;
+ return;
+}
+
+
+/*
+ * kd_bellon:
+ *
+ * This routine turns the bell on.
+ *
+ * input : None
+ * output : bell is turned on
+ *
+ */
+void
+kd_bellon(void)
+{
+ unsigned char status;
+
+ /* program timer 2 */
+ outb(K_TMRCTL, K_SELTMR2 | K_RDLDTWORD | K_TSQRWAVE | K_TBINARY);
+ outb(K_TMR2, 1500 & 0xff); /* LSB */
+ outb(K_TMR2, (int)1500 >> 8); /* MSB */
+
+ /* start speaker - why must we turn on K_SPKRDATA? */
+ status = (inb(K_PORTB)| K_ENABLETMR2 | K_SPKRDATA);
+ outb(K_PORTB, status);
+ return;
+}
+
+/*
+ *
+ * Function kd_putc_esc():
+ *
+ * This function puts a character on the screen, handling escape
+ * sequences.
+ *
+ * input : character to be displayed (or part of an escape code)
+ * output : character is displayed, or some action is taken
+ *
+ */
+void
+kd_putc_esc(u_char c)
+{
+ if (c == (K_ESC)) {
+ if (esc_spt == esc_seq) {
+ *(esc_spt++)=(K_ESC);
+ *(esc_spt) = '\0';
+ } else {
+ kd_putc((K_ESC));
+ esc_spt = esc_seq;
+ }
+ } else {
+ if (esc_spt - esc_seq) {
+ if (esc_spt - esc_seq > K_MAXESC - 1)
+ esc_spt = esc_seq;
+ else {
+ *(esc_spt++) = c;
+ *(esc_spt) = '\0';
+ kd_parseesc();
+ }
+ } else {
+ kd_putc(c);
+ }
+ }
+}
+
+/*
+ *
+ * Function kd_putc():
+ *
+ * This function simply puts a character on the screen. It does some
+ * special processing for linefeed, carriage return, backspace and
+ * the bell.
+ *
+ * input : character to be displayed
+ * output : character is displayed, or some action is taken
+ *
+ */
+int sit_for_0 = 1;
+
+void
+kd_putc(u_char ch)
+{
+ if ((!ch) && sit_for_0)
+ return;
+
+ switch (ch) {
+ case ((K_LF)):
+ kd_down();
+ break;
+ case ((K_CR)):
+ kd_cr();
+ break;
+ case ((K_BS)):
+ kd_left();
+ break;
+ case ((K_HT)):
+ kd_tab();
+ break;
+ case ((K_BEL)):
+ /*
+ * Similar problem to K_BS here (behavior might depend
+ * on tty setting). Also check LF and CR.
+ */
+ if (!kd_bellstate)
+ {
+ kd_bellon();
+ timeout(kd_belloff, 0, hz/8 );
+ kd_bellstate = TRUE;
+ }
+ break;
+ default:
+ (*kd_dput)(kd_curpos, ch, kd_attr);
+ kd_right();
+ break;
+ }
+ return;
+}
+
+
+/*
+ * kd_setpos:
+ *
+ * This function sets the software and hardware cursor position
+ * on the screen, using device-specific code to actually move and
+ * display the cursor.
+ *
+ * input : position on (or off) screen to move the cursor to
+ * output : cursor position is updated, screen has been scrolled
+ * if necessary to bring cursor position back onto
+ * screen.
+ *
+ */
+void
+kd_setpos(csrpos_t newpos)
+{
+ if (newpos > ONE_PAGE) {
+ kd_scrollup();
+ newpos = BOTTOM_LINE;
+ }
+ if (newpos < 0) {
+ kd_scrolldn();
+ newpos = 0;
+ }
+
+ (*kd_dsetcursor)(newpos);
+}
+
+
+/*
+ * kd_scrollup:
+ *
+ * This function scrolls the screen up one line using a DMA memory
+ * copy.
+ *
+ * input : None
+ * output : lines on screen appear to be shifted up one line
+ *
+ */
+void
+kd_scrollup(void)
+{
+ csrpos_t to;
+ csrpos_t from;
+ int count;
+
+ /* scroll up */
+ to = 0;
+ from = ONE_LINE;
+ count = (ONE_PAGE - ONE_LINE)/ONE_SPACE;
+ (*kd_dmvup)(from, to, count);
+
+ /* clear bottom line */
+ to = BOTTOM_LINE;
+ count = ONE_LINE/ONE_SPACE;
+ (*kd_dclear)(to, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_scrolldn:
+ *
+ * Scrolls the characters on the screen down one line.
+ *
+ * input : None
+ * output : Lines on screen appear to be moved down one line
+ *
+ */
+void
+kd_scrolldn(void)
+{
+ csrpos_t to;
+ csrpos_t from;
+ int count;
+
+ /* move down */
+ to = ONE_PAGE - ONE_SPACE;
+ from = ONE_PAGE - ONE_LINE - ONE_SPACE;
+ count = (ONE_PAGE - ONE_LINE) / ONE_SPACE;
+ (*kd_dmvdown)(from, to, count);
+
+ /* clear top line */
+ to = 0;
+ count = ONE_LINE/ONE_SPACE;
+ (*kd_dclear)(to, count, kd_attr);
+ return;
+
+}
+
+
+/*
+ * kd_parseesc:
+ *
+ * This routine begins the parsing of an escape sequence. It uses the
+ * escape sequence array and the escape spot pointer to handle
+ * asynchronous parsing of escape sequences.
+ *
+ * input : String of characters prepended by an escape
+ * output : Appropriate actions are taken depending on the string as
+ * defined by the ansi terminal specification
+ *
+ */
+void
+kd_parseesc(void)
+{
+ u_char *escp;
+
+ escp = esc_seq + 1; /* point to char following ESC */
+ switch(*(escp)) {
+ case 'c':
+ kd_cls();
+ kd_home();
+ esc_spt = esc_seq; /* reset spot in ESC sequence */
+ break;
+ case '[':
+ escp++;
+ kd_parserest(escp);
+ break;
+ case '\0':
+ break; /* not enough info yet */
+ default:
+ kd_putc(*escp);
+ esc_spt = esc_seq; /* inv sequence char, reset */
+ break;
+ }
+ return;
+
+}
+
+
+/* kd_update_kd_attr:
+ *
+ * Updates kd_attr according to kd_attrflags and kd_color.
+ * This code has its origin from console.c and selection.h in
+ * linux 2.2 drivers/char/.
+ * Modified for GNU Mach by Marcus Brinkmann.
+ */
+
+#define reverse_video_char(a) (((a) & 0x88) | ((((a) >> 4) | ((a) << 4)) & 0x77))
+static void
+kd_update_kd_attr(void)
+{
+ kd_attr = kd_color;
+ if (kd_attrflags & KAX_UNDERLINE)
+ kd_attr = (kd_attr & 0xf0) | KAX_COL_UNDERLINE;
+ else if (kd_attrflags & KAX_DIM)
+ kd_attr = (kd_attr & 0xf0) | KAX_COL_DIM;
+ if (kd_attrflags & KAX_REVERSE)
+ kd_attr = reverse_video_char(kd_attr);
+ if (kd_attrflags & KAX_BLINK)
+ kd_attr ^= 0x80;
+ if (kd_attrflags & KAX_BOLD)
+ kd_attr ^= 0x08;
+}
+
+/* color_table added by Julio Merino to take proper color order.
+ * I get this code from Linux 2.2 source code in file:
+ * linux/drivers/char/console.c
+ */
+unsigned char color_table[] = { 0, 4, 2, 6, 1, 5, 3, 7,
+ 8,12,10,14, 9,13,11,15 };
+/*
+ * kd_parserest:
+ *
+ * This function will complete the parsing of an escape sequence and
+ * call the appropriate support routine if it matches a character. This
+ * function could be greatly improved by using a function jump table, and
+ * removing this bulky switch statement.
+ *
+ * input : An string
+ * output : Appropriate action based on whether the string matches a
+ * sequence acceptable to the ansi terminal specification
+ *
+ */
+void
+kd_parserest(u_char *cp)
+{
+ int number[16], npar = 0, i;
+ csrpos_t newpos;
+
+ for(i=0;i<=15;i++)
+ number[i] = MACH_ATOI_DEFAULT;
+
+ do {
+ cp += mach_atoi(cp, &number[npar]);
+ } while (*cp == ';' && ++npar <= 15 && cp++);
+
+ switch(*cp) {
+ case 'm':
+ for (i=0;i<=npar;i++)
+ switch(number[i]) {
+ case MACH_ATOI_DEFAULT:
+ case 0:
+ kd_attrflags = 0;
+ kd_color = KA_NORMAL;
+ break;
+ case 1:
+ kd_attrflags |= KAX_BOLD;
+ kd_attrflags &= ~KAX_DIM;
+ break;
+ case 2:
+ kd_attrflags |= KAX_DIM;
+ kd_attrflags &= ~KAX_BOLD;
+ break;
+ case 4:
+ kd_attrflags |= KAX_UNDERLINE;
+ break;
+ case 5:
+ kd_attrflags |= KAX_BLINK;
+ break;
+ case 7:
+ kd_attrflags |= KAX_REVERSE;
+ break;
+ case 8:
+ kd_attrflags |= KAX_INVISIBLE;
+ break;
+ case 21:
+ case 22:
+ kd_attrflags &= ~(KAX_BOLD | KAX_DIM);
+ break;
+ case 24:
+ kd_attrflags &= ~KAX_UNDERLINE;
+ break;
+ case 25:
+ kd_attrflags &= ~KAX_BLINK;
+ break;
+ case 27:
+ kd_attrflags &= ~KAX_REVERSE;
+ break;
+ case 38:
+ kd_attrflags |= KAX_UNDERLINE;
+ kd_color = (kd_color & 0xf0) | (KA_NORMAL & 0x0f);
+ break;
+ case 39:
+ kd_attrflags &= ~KAX_UNDERLINE;
+ kd_color = (kd_color & 0xf0) | (KA_NORMAL & 0x0f);
+ break;
+ default:
+ if (number[i] >= 30 && number[i] <= 37) {
+ /* foreground color */
+ kd_color = (kd_color & 0xf0) | color_table[(number[i] - 30)];
+ } else if (number[i] >= 40 && number[i] <= 47) {
+ /* background color */
+ kd_color = (kd_color & 0x0f) | (color_table[(number[i] - 40)] << 4);
+ }
+ break;
+ }
+ kd_update_kd_attr();
+ esc_spt = esc_seq;
+ break;
+ case '@':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_insch(1);
+ else
+ kd_insch(number[0]);
+ esc_spt = esc_seq;
+ break;
+ case 'A':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_up();
+ else
+ while (number[0]--)
+ kd_up();
+ esc_spt = esc_seq;
+ break;
+ case 'B':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_down();
+ else
+ while (number[0]--)
+ kd_down();
+ esc_spt = esc_seq;
+ break;
+ case 'C':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_right();
+ else
+ while (number[0]--)
+ kd_right();
+ esc_spt = esc_seq;
+ break;
+ case 'D':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_left();
+ else
+ while (number[0]--)
+ kd_left();
+ esc_spt = esc_seq;
+ break;
+ case 'E':
+ kd_cr();
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_down();
+ else
+ while (number[0]--)
+ kd_down();
+ esc_spt = esc_seq;
+ break;
+ case 'F':
+ kd_cr();
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_up();
+ else
+ while (number[0]--)
+ kd_up();
+ esc_spt = esc_seq;
+ break;
+ case 'G':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ number[0] = 0;
+ else
+ if (number[0] > 0)
+ --number[0]; /* because number[0] is from 1 */
+ kd_setpos(BEG_OF_LINE(kd_curpos) + number[0] * ONE_SPACE);
+ esc_spt = esc_seq;
+ break;
+ case 'f':
+ case 'H':
+ if (number[0] == MACH_ATOI_DEFAULT && number[1] == MACH_ATOI_DEFAULT)
+ {
+ kd_home();
+ esc_spt = esc_seq;
+ break;
+ }
+ if (number[0] == MACH_ATOI_DEFAULT)
+ number[0] = 0;
+ else if (number[0] > 0)
+ --number[0]; /* numbered from 1 */
+ newpos = (number[0] * ONE_LINE); /* setup row */
+ if (number[1] == MACH_ATOI_DEFAULT)
+ number[1] = 0;
+ else if (number[1] > 0)
+ number[1]--;
+ newpos += (number[1] * ONE_SPACE); /* setup column */
+ if (newpos < 0)
+ newpos = 0; /* upper left */
+ if (newpos > ONE_PAGE)
+ newpos = (ONE_PAGE - ONE_SPACE); /* lower right */
+ kd_setpos(newpos);
+ esc_spt = esc_seq;
+ break; /* done or not ready */
+ case 'J':
+ switch(number[0]) {
+ case MACH_ATOI_DEFAULT:
+ case 0:
+ kd_cltobcur(); /* clears from current
+ pos to bottom.
+ */
+ break;
+ case 1:
+ kd_cltopcur(); /* clears from top to
+ current pos.
+ */
+ break;
+ case 2:
+ kd_cls();
+ break;
+ default:
+ break;
+ }
+ esc_spt = esc_seq; /* reset it */
+ break;
+ case 'K':
+ switch(number[0]) {
+ case MACH_ATOI_DEFAULT:
+ case 0:
+ kd_cltoecur(); /* clears from current
+ pos to eoln.
+ */
+ break;
+ case 1:
+ kd_clfrbcur(); /* clears from begin
+ of line to current
+ pos.
+ */
+ break;
+ case 2:
+ kd_eraseln(); /* clear entire line */
+ break;
+ default:
+ break;
+ }
+ esc_spt = esc_seq;
+ break;
+ case 'L':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_insln(1);
+ else
+ kd_insln(number[0]);
+ esc_spt = esc_seq;
+ break;
+ case 'M':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_delln(1);
+ else
+ kd_delln(number[0]);
+ esc_spt = esc_seq;
+ break;
+ case 'P':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_delch(1);
+ else
+ kd_delch(number[0]);
+ esc_spt = esc_seq;
+ break;
+ case 'S':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_scrollup();
+ else
+ while (number[0]--)
+ kd_scrollup();
+ esc_spt = esc_seq;
+ break;
+ case 'T':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_scrolldn();
+ else
+ while (number[0]--)
+ kd_scrolldn();
+ esc_spt = esc_seq;
+ break;
+ case 'X':
+ if (number[0] == MACH_ATOI_DEFAULT)
+ kd_erase(1);
+ else
+ kd_erase(number[0]);
+ esc_spt = esc_seq;
+ break;
+ case '\0':
+ break; /* not enough yet */
+ default:
+ kd_putc(*cp); /* show inv character */
+ esc_spt = esc_seq; /* inv entry, reset */
+ break;
+ }
+ return;
+}
+
+void
+kd_tab(void)
+{
+ int i;
+
+ for (i = 8 - (CURRENT_COLUMN(kd_curpos) % 8); i > 0; i--) {
+ kd_putc(' ');
+ }
+
+}
+
+
+/*
+ * kd_cls:
+ *
+ * This function clears the screen with spaces and the current attribute.
+ *
+ * input : None
+ * output : Screen is cleared
+ *
+ */
+void
+kd_cls(void)
+{
+ (*kd_dclear)(0, ONE_PAGE/ONE_SPACE, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_home:
+ *
+ * This function will move the cursor to the home position on the screen,
+ * as well as set the internal cursor position (kd_curpos) to home.
+ *
+ * input : None
+ * output : Cursor position is moved
+ *
+ */
+void
+kd_home(void)
+{
+ kd_setpos(0);
+ return;
+}
+
+
+/*
+ * kd_up:
+ *
+ * This function moves the cursor up one line position.
+ *
+ * input : None
+ * output : Cursor moves up one line, or screen is scrolled
+ *
+ */
+void
+kd_up(void)
+{
+ if (kd_curpos < ONE_LINE)
+ kd_scrolldn();
+ else
+ kd_setpos(kd_curpos - ONE_LINE);
+ return;
+}
+
+
+/*
+ * kd_down:
+ *
+ * This function moves the cursor down one line position.
+ *
+ * input : None
+ * output : Cursor moves down one line or the screen is scrolled
+ *
+ */
+void
+kd_down(void)
+{
+ if (kd_curpos >= (ONE_PAGE - ONE_LINE))
+ kd_scrollup();
+ else
+ kd_setpos(kd_curpos + ONE_LINE);
+ return;
+}
+
+
+/*
+ * kd_right:
+ *
+ * This function moves the cursor one position to the right.
+ *
+ * input : None
+ * output : Cursor moves one position to the right
+ *
+ */
+void
+kd_right(void)
+{
+ if (kd_curpos < (ONE_PAGE - ONE_SPACE))
+ kd_setpos(kd_curpos + ONE_SPACE);
+ else {
+ kd_scrollup();
+ kd_setpos(BEG_OF_LINE(kd_curpos));
+ }
+ return;
+}
+
+
+/*
+ * kd_left:
+ *
+ * This function moves the cursor one position to the left.
+ *
+ * input : None
+ * output : Cursor moves one position to the left
+ *
+ */
+void
+kd_left(void)
+{
+ if (0 < kd_curpos)
+ kd_setpos(kd_curpos - ONE_SPACE);
+ return;
+}
+
+
+/*
+ * kd_cr:
+ *
+ * This function moves the cursor to the beginning of the current
+ * line.
+ *
+ * input : None
+ * output : Cursor moves to the beginning of the current line
+ *
+ */
+void
+kd_cr(void)
+{
+ kd_setpos(BEG_OF_LINE(kd_curpos));
+ return;
+}
+
+
+/*
+ * kd_cltobcur:
+ *
+ * This function clears from the current cursor position to the bottom
+ * of the screen.
+ *
+ * input : None
+ * output : Screen is cleared from current cursor position to bottom
+ *
+ */
+void
+kd_cltobcur(void)
+{
+ csrpos_t start;
+ int count;
+
+ start = kd_curpos;
+ count = (ONE_PAGE - kd_curpos)/ONE_SPACE;
+ (*kd_dclear)(start, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_cltopcur:
+ *
+ * This function clears from the current cursor position to the top
+ * of the screen.
+ *
+ * input : None
+ * output : Screen is cleared from current cursor position to top
+ *
+ */
+void
+kd_cltopcur(void)
+{
+ int count;
+
+ count = (kd_curpos + ONE_SPACE) / ONE_SPACE;
+ (*kd_dclear)(0, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_cltoecur:
+ *
+ * This function clears from the current cursor position to eoln.
+ *
+ * input : None
+ * output : Line is cleared from current cursor position to eoln
+ *
+ */
+void
+kd_cltoecur(void)
+{
+ csrpos_t i;
+ csrpos_t hold;
+
+ hold = BEG_OF_LINE(kd_curpos) + ONE_LINE;
+ for (i = kd_curpos; i < hold; i += ONE_SPACE) {
+ (*kd_dput)(i, K_SPACE, kd_attr);
+ }
+}
+
+
+/*
+ * kd_clfrbcur:
+ *
+ * This function clears from the beginning of the line to the current
+ * cursor position.
+ *
+ * input : None
+ * output : Line is cleared from beginning to current position
+ *
+ */
+void
+kd_clfrbcur(void)
+{
+ csrpos_t i;
+
+ for (i = BEG_OF_LINE(kd_curpos); i <= kd_curpos; i += ONE_SPACE) {
+ (*kd_dput)(i, K_SPACE, kd_attr);
+ }
+}
+
+
+/*
+ * kd_delln:
+ *
+ * This function deletes 'number' lines on the screen by effectively
+ * scrolling the lines up and replacing the old lines with spaces.
+ *
+ * input : number of lines to delete
+ * output : lines appear to be deleted
+ *
+ */
+void
+kd_delln(int number)
+{
+ csrpos_t to;
+ csrpos_t from;
+ int delbytes; /* num of bytes to delete */
+ int count; /* num of words to move or fill */
+
+ if (number <= 0)
+ return;
+
+ delbytes = number * ONE_LINE;
+ to = BEG_OF_LINE(kd_curpos);
+ if (to + delbytes >= ONE_PAGE)
+ delbytes = ONE_PAGE - to;
+ if (to + delbytes < ONE_PAGE) {
+ from = to + delbytes;
+ count = (ONE_PAGE - from) / ONE_SPACE;
+ (*kd_dmvup)(from, to, count);
+ }
+
+ to = ONE_PAGE - delbytes;
+ count = delbytes / ONE_SPACE;
+ (*kd_dclear)(to, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_insln:
+ *
+ * This function inserts a line above the current one by
+ * scrolling the current line and all the lines below it down.
+ *
+ * input : number of lines to insert
+ * output : New lines appear to be inserted
+ *
+ */
+void
+kd_insln(int number)
+{
+ csrpos_t to;
+ csrpos_t from;
+ int count;
+ csrpos_t top; /* top of block to be moved */
+ int insbytes; /* num of bytes inserted */
+
+ if (number <= 0)
+ return;
+
+ top = BEG_OF_LINE(kd_curpos);
+ insbytes = number * ONE_LINE;
+ if (top + insbytes > ONE_PAGE)
+ insbytes = ONE_PAGE - top;
+ to = ONE_PAGE - ONE_SPACE;
+ from = to - insbytes;
+ if (from > top) {
+ count = (from - top + ONE_SPACE) / ONE_SPACE;
+ (*kd_dmvdown)(from, to, count);
+ }
+
+ count = insbytes / ONE_SPACE;
+ (*kd_dclear)(top, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_delch:
+ *
+ * This function deletes a number of characters from the current
+ * position in the line.
+ *
+ * input : number of characters to delete
+ * output : characters appear to be deleted
+ *
+ */
+void
+kd_delch(int number)
+{
+ int count; /* num words moved/filled */
+ int delbytes; /* bytes to delete */
+ csrpos_t to;
+ csrpos_t from;
+ csrpos_t nextline; /* start of next line */
+
+ if (number <= 0)
+ return;
+
+ nextline = BEG_OF_LINE(kd_curpos) + ONE_LINE;
+ delbytes = number * ONE_SPACE;
+ if (kd_curpos + delbytes > nextline)
+ delbytes = nextline - kd_curpos;
+ if (kd_curpos + delbytes < nextline) {
+ from = kd_curpos + delbytes;
+ to = kd_curpos;
+ count = (nextline - from) / ONE_SPACE;
+ (*kd_dmvup)(from, to, count);
+ }
+
+ to = nextline - delbytes;
+ count = delbytes / ONE_SPACE;
+ (*kd_dclear)(to, count, kd_attr);
+ return;
+
+}
+
+
+/*
+ * kd_erase:
+ *
+ * This function overwrites characters with a space starting with the
+ * current cursor position and ending in number spaces away.
+ *
+ * input : number of characters to erase
+ * output : characters appear to be blanked or erased
+ *
+ */
+void
+kd_erase(int number)
+{
+ csrpos_t i;
+ csrpos_t stop;
+
+ stop = kd_curpos + (ONE_SPACE * number);
+ if (stop > BEG_OF_LINE(kd_curpos) + ONE_LINE)
+ stop = BEG_OF_LINE(kd_curpos) + ONE_LINE;
+ for (i = kd_curpos; i < stop; i += ONE_SPACE) {
+ (*kd_dput)(i, K_SPACE, kd_attr);
+ }
+ return;
+}
+
+
+/*
+ * kd_eraseln:
+ *
+ * This function erases the current line with spaces.
+ *
+ * input : None
+ * output : Current line is erased
+ *
+ */
+void
+kd_eraseln(void)
+{
+ csrpos_t i;
+ csrpos_t stop;
+
+ stop = BEG_OF_LINE(kd_curpos) + ONE_LINE;
+ for (i = BEG_OF_LINE(kd_curpos); i < stop; i += ONE_SPACE) {
+ (*kd_dput)(i, K_SPACE, kd_attr);
+ }
+ return;
+}
+
+
+/*
+ * kd_insch:
+ *
+ * This function inserts a blank at the current cursor position
+ * and moves all other characters on the line over.
+ *
+ * input : number of blanks to insert
+ * output : Blanks are inserted at cursor position
+ *
+ */
+void
+kd_insch(int number)
+{
+ csrpos_t to;
+ csrpos_t from;
+ int count;
+ csrpos_t nextline; /* start of next line */
+ int insbytes; /* num of bytes inserted */
+
+ if (number <= 0)
+ return;
+
+ nextline = BEG_OF_LINE(kd_curpos) + ONE_LINE;
+ insbytes = number * ONE_SPACE;
+ if (kd_curpos + insbytes > nextline)
+ insbytes = nextline - kd_curpos;
+
+ to = nextline - ONE_SPACE;
+ from = to - insbytes;
+ if (from >= kd_curpos) {
+ count = (from - kd_curpos + ONE_SPACE) / ONE_SPACE;
+ (*kd_dmvdown)(from, to, count);
+ }
+
+ count = insbytes / ONE_SPACE;
+ (*kd_dclear)(kd_curpos, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_isupper, kd_islower:
+ *
+ * Didn't want to include ctype.h because it brings in stdio.h, and
+ * only want to see if the darn character is uppercase or lowercase.
+ *
+ * input : Character 'c'
+ * output : isuuper gives TRUE if character is uppercase, islower
+ * returns TRUE if character is lowercase
+ *
+ */
+boolean_t
+kd_isupper(u_char c)
+{
+ if (('A' <= c) && (c <= 'Z'))
+ return(TRUE);
+ return(FALSE);
+}
+
+boolean_t
+kd_islower(u_char c)
+{
+ if (('a' <= c) && (c <= 'z'))
+ return(TRUE);
+ return(FALSE);
+}
+
+/*
+ * kd_senddata:
+ *
+ * This function sends a byte to the keyboard RDWR port, but
+ * first waits until the input/output data buffer is clear before
+ * sending the data. Note that this byte can be either data or a
+ * keyboard command.
+ *
+ */
+void
+kd_senddata(unsigned char ch)
+{
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ ;
+ outb(K_RDWR, ch);
+ last_sent = ch;
+ return;
+}
+
+/*
+ * kd_sendcmd:
+ *
+ * This function sends a command byte to the keyboard command
+ * port, but first waits until the input/output data buffer is
+ * clear before sending the data.
+ *
+ */
+void
+kd_sendcmd(unsigned char ch)
+{
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ ;
+ outb(K_CMD, ch);
+ return;
+}
+
+
+/*
+ * kd_getdata:
+ *
+ * This function returns a data byte from the keyboard RDWR port,
+ * after waiting until the port is flagged as having something to
+ * read.
+ */
+unsigned char
+kd_getdata(void)
+{
+ while ((inb(K_STATUS) & K_OBUF_FUL) == 0)
+ ;
+ return(inb(K_RDWR));
+}
+
+void
+kd_cmdreg_write(int val)
+{
+int ch=KC_CMD_WRITE;
+
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ ;
+ outb(K_CMD, ch);
+
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ ;
+ outb(K_RDWR, val);
+}
+
+void
+kd_mouse_drain(void)
+{
+ int i;
+ while(inb(K_STATUS) & K_IBUF_FUL)
+ ;
+ while((i = inb(K_STATUS)) & K_OBUF_FUL)
+ printf("kbd: S = %x D = %x\n", i, inb(K_RDWR));
+}
+
+/*
+ * set_kd_state:
+ *
+ * Set kd_state and update the keyboard status LEDs.
+ */
+void
+set_kd_state(int newstate)
+{
+ kd_state = newstate;
+ kd_setleds1(state2leds(newstate));
+}
+
+/*
+ * state2leds:
+ *
+ * Return a byte containing LED settings for the keyboard, given
+ * a state vector.
+ */
+u_char
+state2leds(int state)
+{
+ u_char result = 0;
+
+ if (state & KS_NLKED)
+ result |= K_LED_NUMLK;
+ if (state & KS_CLKED)
+ result |= K_LED_CAPSLK;
+ return(result);
+}
+
+/*
+ * kd_setleds[12]:
+ *
+ * Set the keyboard LEDs according to the given byte.
+ */
+void
+kd_setleds1(u_char val)
+{
+ if (kd_ack != NOT_WAITING) {
+#ifdef MACH_KBD
+ printf("kd_setleds1: unexpected state (%d)\n", kd_ack);
+#endif
+ return;
+ }
+
+ kd_ack = SET_LEDS;
+ kd_nextled = val;
+ kd_senddata(K_CMD_LEDS);
+}
+
+void
+kd_setleds2(void)
+{
+ kd_senddata(kd_nextled);
+}
+
+
+/*
+ * cnsetleds:
+ *
+ * like kd_setleds[12], but not interrupt-based.
+ * Currently disabled because cngetc ignores caps lock and num
+ * lock anyway.
+ */
+void
+cnsetleds(u_char val)
+{
+ kd_senddata(K_CMD_LEDS);
+ (void)kd_getdata(); /* XXX - assume is ACK */
+ kd_senddata(val);
+ (void)kd_getdata(); /* XXX - assume is ACK */
+}
+
+void
+kdreboot(void)
+{
+ (*kd_dreset)();
+
+#ifndef BROKEN_KEYBOARD_RESET
+ kd_sendcmd(0xFE); /* XXX - magic # */
+ delay(1000000); /* wait to see if anything happens */
+#endif
+ /*
+ * If that didn't work, then we'll just have to try and
+ * do it the hard way.
+ */
+ cpu_shutdown();
+}
+
+static int which_button[] = {0, MOUSE_LEFT, MOUSE_MIDDLE, MOUSE_RIGHT};
+static struct mouse_motion moved;
+
+int
+kd_kbd_magic(int scancode)
+{
+int new_button = 0;
+
+ if (kd_kbd_mouse == 2)
+ printf("sc = %x\n", scancode);
+
+ switch (scancode) {
+/* f1 f2 f3 */
+ case 0x3d:
+ new_button++;
+ case 0x3c:
+ new_button++;
+ case 0x3b:
+ new_button++;
+ if (kd_kbd_magic_button && (new_button != kd_kbd_magic_button)) {
+ /* down w/o up */
+ mouse_button(which_button[kd_kbd_magic_button], 1);
+ }
+ /* normal */
+ if (kd_kbd_magic_button == new_button) {
+ mouse_button(which_button[new_button], 1);
+ kd_kbd_magic_button = 0;
+ } else {
+ mouse_button(which_button[new_button], 0);
+ kd_kbd_magic_button = new_button;
+ }
+ break;
+
+/* right left up down */
+ case 0x4d:
+ moved.mm_deltaX = kd_kbd_magic_scale;
+ moved.mm_deltaY = 0;
+ mouse_moved(moved);
+ break;
+ case 0x4b:
+ moved.mm_deltaX = -kd_kbd_magic_scale;
+ moved.mm_deltaY = 0;
+ mouse_moved(moved);
+ break;
+ case 0x48:
+ moved.mm_deltaX = 0;
+ moved.mm_deltaY = kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+ case 0x50:
+ moved.mm_deltaX = 0;
+ moved.mm_deltaY = -kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+/* home pageup end pagedown */
+ case 0x47:
+ moved.mm_deltaX = -2*kd_kbd_magic_scale;
+ moved.mm_deltaY = 2*kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+ case 0x49:
+ moved.mm_deltaX = 2*kd_kbd_magic_scale;
+ moved.mm_deltaY = 2*kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+ case 0x4f:
+ moved.mm_deltaX = -2*kd_kbd_magic_scale;
+ moved.mm_deltaY = -2*kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+ case 0x51:
+ moved.mm_deltaX = 2*kd_kbd_magic_scale;
+ moved.mm_deltaY = -2*kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+
+
+/*
+ * Code specific to EGA/CGA/VGA boards. This code relies on the fact
+ * that the "slam" functions take a word count and ONE_SPACE takes up
+ * 1 word.
+ */
+#define SLAMBPW 2 /* bytes per word for "slam" fcns */
+
+/*
+ * xga_getpos:
+ *
+ * This function returns the current hardware cursor position on the
+ * screen, scaled for compatibility with kd_curpos.
+ *
+ * input : None
+ * output : returns the value of cursor position on screen
+ *
+ */
+static csrpos_t
+xga_getpos(void)
+
+{
+ unsigned char low;
+ unsigned char high;
+ short pos;
+
+ outb(kd_index_reg, C_HIGH);
+ high = inb(kd_io_reg);
+ outb(kd_index_reg, C_LOW);
+ low = inb(kd_io_reg);
+ pos = (0xff&low) + ((unsigned short)high<<8);
+
+ return(ONE_SPACE * (csrpos_t)pos);
+}
+
+
+/*
+ * kd_xga_init:
+ *
+ * Initialization specific to character-based graphics adapters.
+ */
+void
+kd_xga_init(void)
+{
+ unsigned char start, stop;
+
+#if 0
+ unsigned char screen;
+
+ /* XXX: this conflicts with read/writing the RTC */
+
+ outb(CMOS_ADDR, CMOS_EB);
+ screen = inb(CMOS_DATA) & CM_SCRMSK;
+ switch(screen) {
+ default:
+ printf("kd: unknown screen type, defaulting to EGA\n");
+ /* FALLTHROUGH */
+ case CM_EGA_VGA:
+#endif
+ /*
+ * Here we'll want to query to bios on the card
+ * itself, because then we can figure out what
+ * type we have exactly. At this point we only
+ * know that the card is NOT CGA or MONO. For
+ * now, however, we assume backwards compatibility
+ * with 0xb8000 as the starting screen offset
+ * memory location for these cards.
+ *
+ */
+
+ vid_start = (u_char *)phystokv(EGA_START);
+ kd_index_reg = EGA_IDX_REG;
+ kd_io_reg = EGA_IO_REG;
+ kd_lines = 25;
+ kd_cols = 80;
+ kd_bitmap_start = 0xa0000; /* XXX - magic numbers */
+ { /* XXX - is there a cleaner way to do this? */
+ char *addr = (char *)phystokv(kd_bitmap_start);
+ int i;
+ for (i = 0; i < 200; i++)
+ addr[i] = 0x00;
+ }
+#if 0
+ break;
+ /* XXX: some buggy BIOSes report these... */
+ case CM_CGA_40:
+ vid_start = (u_char *)phystokv(CGA_START);
+ kd_index_reg = CGA_IDX_REG;
+ kd_io_reg = CGA_IO_REG;
+ kd_lines = 25;
+ kd_cols = 40;
+ break;
+ case CM_CGA_80:
+ vid_start = (u_char *)phystokv(CGA_START);
+ kd_index_reg = CGA_IDX_REG;
+ kd_io_reg = CGA_IO_REG;
+ kd_lines = 25;
+ kd_cols = 80;
+ break;
+ case CM_MONO_80:
+ vid_start = (u_char *)phystokv(MONO_START);
+ kd_index_reg = MONO_IDX_REG;
+ kd_io_reg = MONO_IO_REG;
+ kd_lines = 25;
+ kd_cols = 80;
+ break;
+ }
+#endif
+
+ outb(kd_index_reg, C_START);
+ start = inb(kd_io_reg);
+ /* Make sure cursor is enabled */
+ start &= ~0x20;
+ outb(kd_io_reg, start);
+ outb(kd_index_reg, C_STOP);
+ stop = inb(kd_io_reg);
+
+ if (!start && !stop)
+ {
+ /* Some firmware seem not to be initializing the cursor size
+ * any more... Try using standard values. */
+ outb(kd_index_reg, C_START);
+ outb(kd_io_reg, 14);
+ outb(kd_index_reg, C_STOP);
+ outb(kd_io_reg, 15);
+ }
+
+ kd_setpos(xga_getpos());
+}
+
+
+/*
+ * charput:
+ *
+ * Put attributed character for EGA/CGA/etc.
+ */
+static void
+charput(csrpos_t pos, char ch, char chattr)
+{
+ *(vid_start + pos) = ch;
+ *(vid_start + pos + 1) = chattr;
+}
+
+
+/*
+ * charsetcursor:
+ *
+ * Set hardware cursor position for EGA/CGA/etc.
+ */
+static void
+charsetcursor(csrpos_t newpos)
+{
+ short curpos; /* position, not scaled for attribute byte */
+
+ curpos = newpos / ONE_SPACE;
+ outb(kd_index_reg, C_HIGH);
+ outb(kd_io_reg, (u_char)(curpos>>8));
+ outb(kd_index_reg, C_LOW);
+ outb(kd_io_reg, (u_char)(curpos&0xff));
+
+ kd_curpos = newpos;
+}
+
+
+/*
+ * charmvup:
+ *
+ * Block move up for EGA/CGA/etc.
+ */
+static void
+charmvup(csrpos_t from, csrpos_t to, int count)
+{
+ kd_slmscu(vid_start+from, vid_start+to, count);
+}
+
+
+/*
+ * charmvdown:
+ *
+ * Block move down for EGA/CGA/etc.
+ */
+static void
+charmvdown(csrpos_t from, csrpos_t to, int count)
+{
+ kd_slmscd(vid_start+from, vid_start+to, count);
+}
+
+
+/*
+ * charclear:
+ *
+ * Fast clear for CGA/EGA/etc.
+ */
+static void
+charclear(csrpos_t to, int count, char chattr)
+{
+ kd_slmwd(vid_start+to, count, ((unsigned short)chattr<<8)+K_SPACE);
+}
+
+
+/*
+ * kd_noopreset:
+ *
+ * No-op reset routine for kd_dreset.
+ */
+static void
+kd_noopreset(void)
+{
+}
+
+
+/*
+ * bmpput: Copy a character from the font to the frame buffer.
+ */
+
+void
+bmpput(
+ csrpos_t pos,
+ char ch,
+ char chattr)
+{
+ short xbit, ybit; /* u/l corner of char pos */
+ u_char *to, *from;
+ short i, j;
+ u_char mask = (chattr == KA_REVERSE ? 0xff : 0);
+
+ if ((u_char)ch >= chars_in_font)
+ ch = K_QUES;
+
+ bmpch2bit(pos, &xbit, &ybit);
+ to = bit2fbptr(xbit, ybit);
+ from = font_start + ch * char_byte_width;
+ for (i = 0; i < char_height; ++i) {
+ for (j = 0; j < char_byte_width; ++j)
+ *(to+j) = *(from+j) ^ mask;
+ to += fb_byte_width;
+ from += font_byte_width;
+ }
+}
+
+/*
+ * bmpcp1char: copy 1 char from one place in the frame buffer to
+ * another.
+ */
+static void
+bmpcp1char(
+ csrpos_t from,
+ csrpos_t to)
+{
+ short from_xbit, from_ybit;
+ short to_xbit, to_ybit;
+ u_char *tp, *fp;
+ short i, j;
+
+ bmpch2bit(from, &from_xbit, &from_ybit);
+ bmpch2bit(to, &to_xbit, &to_ybit);
+
+ tp = bit2fbptr(to_xbit, to_ybit);
+ fp = bit2fbptr(from_xbit, from_ybit);
+
+ for (i = 0; i < char_height; ++i) {
+ for (j = 0; j < char_byte_width; ++j)
+ *(tp+j) = *(fp+j);
+ tp += fb_byte_width;
+ fp += fb_byte_width;
+ }
+}
+
+/*
+ * bmpvmup: Copy a block of character positions upwards.
+ */
+void
+bmpmvup(
+ csrpos_t from,
+ csrpos_t to,
+ int count)
+{
+ short from_xbit, from_ybit;
+ short to_xbit, to_ybit;
+ short i;
+
+ bmpch2bit(from, &from_xbit, &from_ybit);
+ bmpch2bit(to, &to_xbit, &to_ybit);
+
+ if (from_xbit == xstart && to_xbit == xstart && count%kd_cols == 0) {
+ /* fast case - entire lines */
+ from_xbit = to_xbit = 0;
+ bmppaintcsr(kd_curpos, char_black); /* don't copy cursor */
+ count /= kd_cols; /* num lines */
+ count *= fb_byte_width * (char_height+cursor_height);
+ kd_slmscu(bit2fbptr(from_xbit, from_ybit),
+ bit2fbptr(to_xbit, to_ybit),
+ count/SLAMBPW);
+ bmppaintcsr(kd_curpos, char_white);
+ } else {
+ /* slow case - everything else */
+ for (i=0; i < count; ++i) {
+ bmpcp1char(from, to);
+ from += ONE_SPACE;
+ to += ONE_SPACE;
+ }
+ }
+}
+
+/*
+ * bmpmvdown: copy a block of characters down.
+ */
+void
+bmpmvdown(
+ csrpos_t from,
+ csrpos_t to,
+ int count)
+{
+ short from_xbit, from_ybit;
+ short to_xbit, to_ybit;
+ short i;
+
+ bmpch2bit(from, &from_xbit, &from_ybit);
+ bmpch2bit(to, &to_xbit, &to_ybit);
+
+ if (from_xbit == xstart + (kd_cols - 1) * char_width
+ && to_xbit == xstart + (kd_cols - 1) * char_width
+ && count%kd_cols == 0) {
+ /* fast case - entire lines*/
+ from_xbit = to_xbit = 8 * (fb_byte_width - 1);
+ /* last byte on line */
+ bmppaintcsr(kd_curpos, char_black); /* don't copy cursor */
+ count /= kd_cols; /* num lines */
+ count *= fb_byte_width * (char_height+cursor_height);
+ kd_slmscd(bit2fbptr(from_xbit, from_ybit),
+ bit2fbptr(to_xbit, to_ybit),
+ count/SLAMBPW);
+ bmppaintcsr(kd_curpos, char_white);
+ } else {
+ /* slow case - everything else */
+ for (i=0; i < count; ++i) {
+ bmpcp1char(from, to);
+ from -= ONE_SPACE;
+ to -= ONE_SPACE;
+ }
+ }
+}
+
+/*
+ * bmpclear: clear one or more character positions.
+ */
+void
+bmpclear(
+ csrpos_t to, /* 1st char */
+ int count, /* num chars */
+ char chattr) /* reverse or normal */
+{
+ short i;
+ u_short clearval;
+ u_short clearbyte = (chattr == KA_REVERSE ? char_white : char_black);
+
+ clearval = (u_short)(clearbyte<<8) + clearbyte;
+ if (to == 0 && count >= kd_lines * kd_cols) {
+ /* fast case - entire page */
+ kd_slmwd(vid_start, (fb_byte_width * fb_height)/SLAMBPW,
+ clearval);
+ } else
+ /* slow case */
+ for (i = 0; i < count; ++i) {
+ bmpput(to, K_SPACE, chattr);
+ to += ONE_SPACE;
+ }
+}
+
+/*
+ * bmpsetcursor: update the display and set the logical cursor.
+ */
+void
+bmpsetcursor(csrpos_t pos)
+{
+ /* erase old cursor & paint new one */
+ bmppaintcsr(kd_curpos, char_black);
+ bmppaintcsr(pos, char_white);
+ kd_curpos = pos;
+}
+
+/*
+ * bmppaintcsr: paint cursor bits.
+ */
+void
+bmppaintcsr(
+ csrpos_t pos,
+ u_char val)
+{
+ short xbit, ybit;
+ u_char *cp;
+ short line, byte;
+
+ bmpch2bit(pos, &xbit, &ybit);
+ ybit += char_height; /* position at bottom of line */
+ cp = bit2fbptr(xbit, ybit);
+ for (line = 0; line < cursor_height; ++line) {
+ for (byte = 0; byte < char_byte_width; ++byte)
+ *(cp+byte) = val;
+ cp += fb_byte_width;
+ }
+}
+
+/*
+ * bmpch2bit: convert character position to x and y bit addresses.
+ * (0, 0) is the upper left corner.
+ */
+void
+bmpch2bit(
+ csrpos_t pos,
+ short *xb,
+ short *yb) /* x, y bit positions, u/l corner */
+{
+ short xch, ych;
+
+ xch = (pos / ONE_SPACE) % kd_cols;
+ ych = pos / (ONE_SPACE * kd_cols);
+ *xb = xstart + xch * char_width;
+ *yb = ystart + ych * (char_height + cursor_height);
+}
+
+/*
+ * bit2fbptr: return a pointer into the frame buffer corresponding to
+ * the bit address (x, y).
+ * Assumes that xb and yb don't point to the middle of a
+ * byte.
+ */
+u_char *
+bit2fbptr(
+ short xb,
+ short yb)
+{
+ return(vid_start + yb * fb_byte_width + xb/8);
+}
+
+
+/*
+ * console stuff
+ */
+
+/*
+ * XXX we assume that pcs *always* have a console
+ */
+int
+kdcnprobe(struct consdev *cp)
+{
+ int maj, unit, pri;
+
+ maj = 0;
+ unit = 0;
+ pri = CN_INTERNAL;
+
+ cp->cn_dev = makedev(maj, unit);
+ cp->cn_pri = pri;
+ return 0;
+}
+
+int
+kdcninit(struct consdev *cp)
+{
+ kdinit();
+ return 0;
+}
+
+int
+kdcngetc(dev_t dev, int wait)
+{
+ if (wait) {
+ int c;
+ while ((c = kdcnmaygetc()) < 0)
+ continue;
+ return c;
+ }
+ else
+ return kdcnmaygetc();
+}
+
+int
+kdcnputc(dev_t dev, int c)
+{
+ if (!kd_initialized)
+ return -1;
+
+ /* Note that tab is handled in kd_putc */
+ if (c == '\n')
+ kd_putc('\r');
+ kd_putc_esc(c);
+
+ return 0;
+}
+
+/*
+ * kdcnmaygetc:
+ *
+ * Get one character using polling, rather than interrupts. Used
+ * by the kernel debugger. Note that Caps Lock is ignored.
+ * Normally this routine is called with interrupts already
+ * disabled, but there is code in place so that it will be more
+ * likely to work even if interrupts are turned on.
+ */
+int
+kdcnmaygetc(void)
+{
+ unsigned char c;
+ unsigned char scancode;
+ unsigned int char_idx;
+#ifdef notdef
+ spl_t o_pri;
+#endif
+ boolean_t up;
+
+ if (! kd_initialized)
+ return -1;
+
+ kd_extended = FALSE;
+#ifdef notdef
+ o_pri = splhi();
+#endif
+ for ( ; ; ) {
+ if (!(inb(K_STATUS) & K_OBUF_FUL))
+ return -1;
+
+ up = FALSE;
+ /*
+ * We'd come here for mouse events in debugger, if
+ * the mouse were on.
+ */
+ if ((inb(K_STATUS) & 0x20) == 0x20) {
+ printf("M%xP", inb(K_RDWR));
+ continue;
+ }
+ scancode = inb(K_RDWR);
+ /*
+ * Handle extend modifier and
+ * ack/resend, otherwise we may never receive
+ * a key.
+ */
+ if (scancode == K_EXTEND) {
+ kd_extended = TRUE;
+ continue;
+ } else if (scancode == K_RESEND) {
+ printf("cngetc: resend");
+ kd_resend();
+ continue;
+ } else if (scancode == K_ACKSC) {
+ printf("cngetc: handle_ack");
+ kd_handle_ack();
+ continue;
+ }
+ if (scancode & K_UP) {
+ up = TRUE;
+ scancode &= ~K_UP;
+ }
+ if (kd_kbd_mouse)
+ kd_kbd_magic(scancode);
+ if (scancode < NUMKEYS) {
+ /* Lookup in map, then process. */
+ char_idx = kdstate2idx(kd_state, kd_extended);
+ c = key_map[scancode][char_idx];
+ if (c == K_SCAN) {
+ c = key_map[scancode][++char_idx];
+ kd_state = do_modifier(kd_state, c, up);
+#ifdef notdef
+ cnsetleds(state2leds(kd_state));
+#endif
+ } else if (! up
+ && c == K_ESC
+ && key_map[scancode][char_idx+1] == 0x5b) {
+ /* As a convenience for the nice
+ people using our debugger, remap
+ some keys to the readline-like
+ shortcuts supported by dde.
+
+ XXX This is a workaround for the
+ limited kernel getchar interface.
+ It is only used by the debugger. */
+ c = key_map[scancode][char_idx+2];
+ switch (c) {
+#define _MAP(A,B,C) (C)
+#define MAP(T) _MAP(T)
+#define CTRL(c) ((c) & 0x1f)
+ case MAP(K_HOME): c = CTRL('a'); break;
+ case MAP(K_UA): c = CTRL('p'); break;
+ case MAP(K_LA): c = CTRL('b'); break;
+ case MAP(K_RA): c = CTRL('f'); break;
+ case MAP(K_DA): c = CTRL('n'); break;
+ case MAP(K_END): c = CTRL('e'); break;
+ /* delete */
+ case 0x39: c = CTRL('d'); break;
+#undef CTRL
+#undef MAP
+#undef _MAP
+ default:
+ /* Retain the old behavior. */
+ c = K_ESC;
+ }
+
+ return(c);
+ } else if (!up) {
+ /* regular key-down */
+ if (c == K_CR)
+ c = K_LF;
+#ifdef notdef
+ splx(o_pri);
+#endif
+ return(c & 0177);
+ }
+ }
+ }
+}
diff --git a/i386/i386at/kd.h b/i386/i386at/kd.h
new file mode 100644
index 0000000..5bfabce
--- /dev/null
+++ b/i386/i386at/kd.h
@@ -0,0 +1,744 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kd.h
+ Description: definitions for AT keyboard/display driver
+ Authors: Eugene Kuerner, Adrienne Jardetzky, Mike Kupfer
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * This file contains defines and structures that implement hardware
+ * keyboard mapping into ansi defined output codes. Note that this
+ * is structured so that "re-mapping" of actual keys is allowed at
+ * anytime during execution of the console driver. And each scan code
+ * is potentially expanded into NUMKEYS characters. Which is programmable
+ * at runtime or whenever.
+ *
+ * 02 Nov 1988 orc!eugene
+ *
+ */
+
+#ifndef _KD_H_
+#define _KD_H_
+
+#include <device/input.h>
+#include <mach/boolean.h>
+#include <sys/types.h>
+#include <device/cons.h>
+#include <device/io_req.h>
+#include <device/buf.h>
+#include <device/input.h>
+#include <device/tty.h>
+#include <i386at/kdsoft.h>
+
+/*
+ * Where memory for various graphics adapters starts.
+ */
+#define EGA_START 0x0b8000
+#define CGA_START 0x0b8000
+#define MONO_START 0x0b0000
+
+/*
+ * Common I/O ports.
+ */
+#define K_TMR0 0x40 /* timer 0, 1, or 2 value (r/w) */
+#define K_TMR1 0x41
+#define K_TMR2 0x42
+#define K_TMRCTL 0x43 /* timer control (write-only) */
+#define K_RDWR 0x60 /* keyboard data & cmds (read/write) */
+#define K_PORTB 0x61 /* r/w. speaker & status lines */
+#define K_STATUS 0x64 /* keybd status (read-only) */
+#define K_CMD 0x64 /* keybd ctlr command (write-only) */
+
+/*
+ * I/O ports for various graphics adapters.
+ */
+#define EGA_IDX_REG 0x3d4
+#define EGA_IO_REG 0x3d5
+#define CGA_IDX_REG 0x3d4
+#define CGA_IO_REG 0x3d5
+#define MONO_IDX_REG 0x3b4
+#define MONO_IO_REG 0x3b5
+
+/*
+ * Commands sent to graphics adapter.
+ */
+#define C_START 0x0a /* return cursor line start */
+#define C_STOP 0x0b /* return cursor line stop */
+#define C_LOW 0x0f /* return low byte of cursor addr */
+#define C_HIGH 0x0e /* high byte */
+
+/*
+ * Bit definitions for K_STATUS port.
+ */
+#define K_OBUF_FUL 0x01 /* output (from keybd) buffer full */
+#define K_IBUF_FUL 0x02 /* input (to keybd) buffer full */
+#define K_SYSFLAG 0x04 /* "System Flag" */
+#define K_CMD_DATA 0x08 /* 1 = input buf has cmd, 0 = data */
+#define K_KBD_INHBT 0x10 /* 0 if keyboard inhibited */
+
+/*
+ * Keyboard controller commands (sent to K_CMD port).
+ */
+#define KC_CMD_READ 0x20 /* read controller command byte */
+#define KC_CMD_WRITE 0x60 /* write controller command byte */
+#define KC_CMD_TEST 0xab /* test interface */
+#define KC_CMD_DUMP 0xac /* diagnostic dump */
+#define KC_CMD_DISBLE 0xad /* disable keyboard */
+#define KC_CMD_ENBLE 0xae /* enable keyboard */
+#define KC_CMD_RDKBD 0xc4 /* read keyboard ID */
+#define KC_CMD_ECHO 0xee /* used for diagnostic testing */
+
+/*
+ * Keyboard commands (send to K_RDWR).
+ */
+#define K_CMD_LEDS 0xed /* set status LEDs (caps lock, etc.) */
+
+/*
+ * Bit definitions for controller command byte (sent following
+ * K_CMD_WRITE command).
+ */
+#define K_CB_ENBLIRQ 0x01 /* enable data-ready intrpt */
+#define K_CB_SETSYSF 0x04 /* Set System Flag */
+#define K_CB_INHBOVR 0x08 /* Inhibit Override */
+#define K_CB_DISBLE 0x10 /* disable keyboard */
+
+/*
+ * Bit definitions for "Indicator Status Byte" (sent after a
+ * K_CMD_LEDS command). If the bit is on, the LED is on. Undefined
+ * bit positions must be 0.
+ */
+#define K_LED_SCRLLK 0x1 /* scroll lock */
+#define K_LED_NUMLK 0x2 /* num lock */
+#define K_LED_CAPSLK 0x4 /* caps lock */
+
+/*
+ * Bit definitions for "Miscellaneous port B" (K_PORTB).
+ */
+/* read/write */
+#define K_ENABLETMR2 0x01 /* enable output from timer 2 */
+#define K_SPKRDATA 0x02 /* direct input to speaker */
+#define K_ENABLEPRTB 0x04 /* "enable" port B */
+#define K_EIOPRTB 0x08 /* enable NMI on parity error */
+/* read-only */
+#define K_REFRESHB 0x10 /* refresh flag from INLTCONT PAL */
+#define K_OUT2B 0x20 /* timer 2 output */
+#define K_ICKB 0x40 /* I/O channel check (parity error) */
+
+/*
+ * Bit definitions for timer control port (K_TMRCTL).
+ */
+/* select timer 0, 1, or 2. Don't mess with 0 or 1. */
+#define K_SELTMRMASK 0xc0
+#define K_SELTMR0 0x00
+#define K_SELTMR1 0x40
+#define K_SELTMR2 0x80
+
+/* read/load control */
+#define K_RDLDTMRMASK 0x30
+#define K_HOLDTMR 0x00 /* freeze timer until read */
+#define K_RDLDTLSB 0x10 /* read/load LSB */
+#define K_RDLDTMSB 0x20 /* read/load MSB */
+#define K_RDLDTWORD 0x30 /* read/load LSB then MSB */
+
+/* mode control */
+#define K_TMDCTLMASK 0x0e
+#define K_TCOUNTINTR 0x00 /* "Term Count Intr" */
+#define K_TONESHOT 0x02 /* "Progr One-Shot" */
+#define K_TRATEGEN 0x04 /* "Rate Gen (/n)" */
+#define K_TSQRWAVE 0x06 /* "Sqr Wave Gen" */
+#define K_TSOFTSTRB 0x08 /* "Softw Trig Strob" */
+#define K_THARDSTRB 0x0a /* "Hardw Trig Strob" */
+
+/* count mode */
+#define K_TCNTMDMASK 0x01
+#define K_TBINARY 0x00 /* 16-bit binary counter */
+#define K_TBCD 0x01 /* 4-decade BCD counter */
+
+
+
+/*
+ * Fun definitions for displayed characters and characters read from
+ * the keyboard.
+ */
+
+/*
+ * Attributes for character sent to display.
+ */
+#define KA_NORMAL 0x07
+#define KA_REVERSE 0x70
+
+#define KAX_REVERSE 0x01
+#define KAX_UNDERLINE 0x02
+#define KAX_BLINK 0x04
+#define KAX_BOLD 0x08
+#define KAX_DIM 0x10
+#define KAX_INVISIBLE 0x20
+
+#define KAX_COL_UNDERLINE 0x0f /* bright white */
+#define KAX_COL_DIM 0x08 /* gray */
+
+/*
+ * For an EGA-like display, each character takes two bytes, one for the
+ * actual character, followed by one for its attributes.
+ * Be very careful if you change ONE_SPACE, as these constants are also used
+ * to define the device-independent display implemented by kd.c.
+ * (See kdsoft.h for more details on the device-independent display.)
+ */
+#define ONE_SPACE 2 /* bytes in 1 char, EGA-like display */
+#define BOTTOM_LINE 3840 /* 1st byte in last line of display */
+#define ONE_PAGE 4000 /* number of bytes in page */
+#define ONE_LINE 160 /* number of bytes in line */
+
+#define BEG_OF_LINE(pos) ((pos) - (pos)%ONE_LINE)
+#define CURRENT_COLUMN(pos) (((pos) % ONE_LINE) / ONE_SPACE)
+
+#define NUMKEYS 89
+#define NUMSTATES 5 /* NORM_STATE, ... */
+#define NUMOUTPUT 3 /* max size of byte seq from key */
+#define WIDTH_KMAP (NUMSTATES * NUMOUTPUT)
+
+/*
+ * Keyboard states. Used for KDGKBENT, KDSKBENT ioctl's. If you
+ * change these values, you should also rearrange the entries in
+ * key_map.
+ */
+/* "state indices" (for computing key_map index) */
+#define NORM_STATE 0
+#define SHIFT_STATE 1
+#define CTRL_STATE 2
+#define ALT_STATE 3
+#define SHIFT_ALT 4
+/* macro to convert from state index to actual key_map index */
+#define CHARIDX(sidx) ((sidx) * NUMOUTPUT)
+ /* where sidx is in [NORM_STATE ... SHIFT_ALT] */
+
+/* "state bits" for kd_state vector */
+#define KS_NORMAL 0x00
+#define KS_SLKED 0x01
+#define KS_NLKED 0x02
+#define KS_CLKED 0x04
+#define KS_ALTED 0x08
+#define KS_SHIFTED 0x10
+#define KS_CTLED 0x20
+
+
+/* special codes */
+#define K_UP 0x80 /* OR'd in if key below is released */
+#define K_EXTEND 0xe0 /* marker for "extended" sequence */
+#define K_ACKSC 0xfa /* ack for keyboard command */
+#define K_RESEND 0xfe /* request to resend keybd cmd */
+
+/* modifier keys */
+#define K_CTLSC 0x1d /* control down */
+#define K_LSHSC 0x2a /* left shift down */
+#define K_RSHSC 0x36 /* right shift down */
+#define K_ALTSC 0x38 /* alt key down */
+#define K_CLCKSC 0x3a /* caps lock */
+#define K_NLCKSC 0x45 /* num lock down */
+
+/* "special keys" */
+#define K_BSSC 0x0e /* backspace */
+#define K_TABSC 0x0f /* tab */
+#define K_RETSC 0x1c /* return */
+#define K_SPSC 0x39 /* space */
+#define K_ESCSC 0x01 /* ESC */
+
+/* alphabetic keys */
+#define K_qSC 0x10
+#define K_wSC 0x11
+#define K_eSC 0x12
+#define K_rSC 0x13
+#define K_tSC 0x14
+#define K_ySC 0x15
+#define K_uSC 0x16
+#define K_iSC 0x17
+#define K_oSC 0x18
+#define K_pSC 0x19
+
+#define K_aSC 0x1e
+#define K_sSC 0x1f
+#define K_dSC 0x20
+#define K_fSC 0x21
+#define K_gSC 0x22
+#define K_hSC 0x23
+#define K_jSC 0x24
+#define K_kSC 0x25
+#define K_lSC 0x26
+
+#define K_zSC 0x2c
+#define K_xSC 0x2d
+#define K_cSC 0x2e
+#define K_vSC 0x2f
+#define K_bSC 0x30
+#define K_nSC 0x31
+#define K_mSC 0x32
+
+/* numbers and punctuation */
+#define K_ONESC 0x02 /* 1 */
+#define K_TWOSC 0x03 /* 2 */
+#define K_THREESC 0x04 /* 3 */
+#define K_FOURSC 0x05 /* 4 */
+#define K_FIVESC 0x06 /* 5 */
+#define K_SIXSC 0x07 /* 6 */
+#define K_SEVENSC 0x08 /* 7 */
+#define K_EIGHTSC 0x09 /* 8 */
+#define K_NINESC 0x0a /* 9 */
+#define K_ZEROSC 0x0b /* 0 */
+
+#define K_MINUSSC 0x0c /* - */
+#define K_EQLSC 0x0d /* = */
+#define K_LBRKTSC 0x1a /* [ */
+#define K_RBRKTSC 0x1b /* ] */
+#define K_SEMISC 0x27 /* ; */
+#define K_SQUOTESC 0x28 /* ' */
+#define K_GRAVSC 0x29 /* ` */
+#define K_BSLSHSC 0x2b /* \ */
+#define K_COMMASC 0x33 /* , */
+#define K_PERIODSC 0x34 /* . */
+#define K_SLASHSC 0x35 /* / */
+
+/* keypad keys */
+#define K_HOMESC 0x47 /* scancode for home */
+#define K_DELSC 0x53 /* scancode for del */
+
+/*
+ * Ascii values and flag characters for key map.
+ * A function key is represented by the 3-byte char sequence that it
+ * corresponds to.
+ * Other mappable non-Ascii keys (e.g., "ctrl") are represented by a
+ * two-byte sequence: K_SCAN, followed by the key's scan code.
+ */
+#define K_DONE 0xffu /* must be same as NC */
+#define NC 0xffu /* No character defined */
+
+#define K_SCAN 0xfeu /* followed by scan code */
+
+/* ascii char set */
+#define K_NUL 0x00 /* Null character */
+#define K_SOH 0x01
+#define K_STX 0x02
+#define K_ETX 0x03
+#define K_EOT 0x04
+#define K_ENQ 0x05
+#define K_ACK 0x06
+#define K_BEL 0x07 /* bell character */
+#define K_BS 0x08 /* back space */
+#define K_HT 0x09
+#define K_LF 0x0a /* line feed */
+#define K_VT 0x0b
+#define K_FF 0x0c
+#define K_CR 0x0d /* carriage return */
+#define K_SO 0x0e
+#define K_SI 0x0f
+#define K_DLE 0x10
+#define K_DC1 0x11
+#define K_DC2 0x12
+#define K_DC3 0x13
+#define K_DC4 0x14
+#define K_NAK 0x15
+#define K_SYN 0x16
+#define K_ETB 0x17
+#define K_CAN 0x18
+#define K_EM 0x19
+#define K_SUB 0x1a
+#define K_ESC 0x1b /* escape character */
+#define K_FS 0x1c
+#define K_GS 0x1d
+#define K_RS 0x1e
+#define K_US 0x1f
+#define K_SPACE 0x20 /* space character */
+#define K_BANG 0x21 /* ! */
+#define K_DQUOTE 0x22 /* " */
+#define K_POUND 0x23 /* # */
+#define K_DOLLAR 0x24 /* $ */
+#define K_PERC 0x25 /* % */
+#define K_AMPER 0x26 /* & */
+#define K_SQUOTE 0x27 /* ' */
+#define K_LPAREN 0x28 /* ( */
+#define K_RPAREN 0x29 /* ) */
+#define K_ASTER 0x2a /* * */
+#define K_PLUS 0x2b /* + */
+#define K_COMMA 0x2c /* , */
+#define K_MINUS 0x2d /* - */
+#define K_PERIOD 0x2e /* . */
+#define K_SLASH 0x2f /* / */
+#define K_ZERO 0x30 /* 0 */
+#define K_ONE 0x31 /* 1 */
+#define K_TWO 0x32 /* 2 */
+#define K_THREE 0x33 /* 3 */
+#define K_FOUR 0x34 /* 4 */
+#define K_FIVE 0x35 /* 5 */
+#define K_SIX 0x36 /* 6 */
+#define K_SEVEN 0x37 /* 7 */
+#define K_EIGHT 0x38 /* 8 */
+#define K_NINE 0x39 /* 9 */
+#define K_COLON 0x3a /* : */
+#define K_SEMI 0x3b /* ; */
+#define K_LTHN 0x3c /* < */
+#define K_EQL 0x3d /* = */
+#define K_GTHN 0x3e /* > */
+#define K_QUES 0x3f /* ? */
+#define K_ATSN 0x40 /* @ */
+#define K_A 0x41 /* A */
+#define K_B 0x42 /* B */
+#define K_C 0x43 /* C */
+#define K_D 0x44 /* D */
+#define K_E 0x45 /* E */
+#define K_F 0x46 /* F */
+#define K_G 0x47 /* G */
+#define K_H 0x48 /* H */
+#define K_I 0x49 /* I */
+#define K_J 0x4a /* J */
+#define K_K 0x4b /* K */
+#define K_L 0x4c /* L */
+#define K_M 0x4d /* M */
+#define K_N 0x4e /* N */
+#define K_O 0x4f /* O */
+#define K_P 0x50 /* P */
+#define K_Q 0x51 /* Q */
+#define K_R 0x52 /* R */
+#define K_S 0x53 /* S */
+#define K_T 0x54 /* T */
+#define K_U 0x55 /* U */
+#define K_V 0x56 /* V */
+#define K_W 0x57 /* W */
+#define K_X 0x58 /* X */
+#define K_Y 0x59 /* Y */
+#define K_Z 0x5a /* Z */
+#define K_LBRKT 0x5b /* [ */
+#define K_BSLSH 0x5c /* \ */
+#define K_RBRKT 0x5d /* ] */
+#define K_CARET 0x5e /* ^ */
+#define K_UNDSC 0x5f /* _ */
+#define K_GRAV 0x60 /* ` */
+#define K_a 0x61 /* a */
+#define K_b 0x62 /* b */
+#define K_c 0x63 /* c */
+#define K_d 0x64 /* d */
+#define K_e 0x65 /* e */
+#define K_f 0x66 /* f */
+#define K_g 0x67 /* g */
+#define K_h 0x68 /* h */
+#define K_i 0x69 /* i */
+#define K_j 0x6a /* j */
+#define K_k 0x6b /* k */
+#define K_l 0x6c /* l */
+#define K_m 0x6d /* m */
+#define K_n 0x6e /* n */
+#define K_o 0x6f /* o */
+#define K_p 0x70 /* p */
+#define K_q 0x71 /* q */
+#define K_r 0x72 /* r */
+#define K_s 0x73 /* s */
+#define K_t 0x74 /* t */
+#define K_u 0x75 /* u */
+#define K_v 0x76 /* v */
+#define K_w 0x77 /* w */
+#define K_x 0x78 /* x */
+#define K_y 0x79 /* y */
+#define K_z 0x7a /* z */
+#define K_LBRACE 0x7b /* { */
+#define K_PIPE 0x7c /* | */
+#define K_RBRACE 0x7d /* } */
+#define K_TILDE 0x7e /* ~ */
+#define K_DEL 0x7f /* delete */
+
+/* Ascii sequences to be generated by the named key */
+#define K_F1 0x1b,0x4f,0x50
+#define K_F1S 0x1b,0x4f,0x70
+#define K_F2 0x1b,0x4f,0x51
+#define K_F2S 0x1b,0x4f,0x71
+#define K_F3 0x1b,0x4f,0x52
+#define K_F3S 0x1b,0x4f,0x72
+#define K_F4 0x1b,0x4f,0x53
+#define K_F4S 0x1b,0x4f,0x73
+#define K_F5 0x1b,0x4f,0x54
+#define K_F5S 0x1b,0x4f,0x74
+#define K_F6 0x1b,0x4f,0x55
+#define K_F6S 0x1b,0x4f,0x75
+#define K_F7 0x1b,0x4f,0x56
+#define K_F7S 0x1b,0x4f,0x76
+#define K_F8 0x1b,0x4f,0x57
+#define K_F8S 0x1b,0x4f,0x77
+#define K_F9 0x1b,0x4f,0x58
+#define K_F9S 0x1b,0x4f,0x78
+#define K_F10 0x1b,0x4f,0x59
+#define K_F10S 0x1b,0x4f,0x79
+#define K_F11 0x1b,0x4f,0x5a
+#define K_F11S 0x1b,0x4f,0x7a
+#define K_F12 0x1b,0x4f,0x41
+#define K_F12S 0x1b,0x4f,0x61
+
+/* These are the Alt-FxxA #defines. They work with the new keymap
+ -- Derek Upham 1997/06/25 */
+#define K_F1A 0x1b,0x4f,0x30
+#define K_F2A 0x1b,0x4f,0x31
+#define K_F3A 0x1b,0x4f,0x32
+#define K_F4A 0x1b,0x4f,0x33
+#define K_F5A 0x1b,0x4f,0x34
+#define K_F6A 0x1b,0x4f,0x35
+#define K_F7A 0x1b,0x4f,0x36
+#define K_F8A 0x1b,0x4f,0x37
+#define K_F9A 0x1b,0x4f,0x38
+#define K_F10A 0x1b,0x4f,0x39
+#define K_F11A 0x1b,0x4f,0x3a
+#define K_F12A 0x1b,0x4f,0x3b
+
+#define K_SCRL 0x1b,0x5b,0x4d
+#define K_HOME 0x1b,0x5b,0x48
+#define K_UA 0x1b,0x5b,0x41
+#define K_PUP 0x1b,0x5b,0x56
+#define K_LA 0x1b,0x5b,0x44
+#define K_RA 0x1b,0x5b,0x43
+#define K_END 0x1b,0x5b,0x59
+#define K_DA 0x1b,0x5b,0x42
+#define K_PDN 0x1b,0x5b,0x55
+#define K_INS 0x1b,0x5b,0x40
+
+#define KBD_IRQ 1
+
+/*
+ * This array maps scancodes to Ascii characters (or character
+ * sequences).
+ * The first index is the scancode. The first NUMOUTPUT characters
+ * (accessed using the second index) correspond to the key's char
+ * sequence for the Normal state. The next NUMOUTPUT characters
+ * are for the Shift state, then Ctrl, then Alt, then Shift/Alt.
+ */
+#ifdef KERNEL
+extern u_char key_map[NUMKEYS][WIDTH_KMAP];
+#endif /* KERNEL */
+
+
+
+/*
+ * These routines are declared here so that all the modules making
+ * up the kd driver agree on how to do locking.
+ */
+
+#ifdef KERNEL
+#include <i386/machspl.h>
+#define SPLKD spltty
+#endif /* KERNEL */
+
+
+/*
+ * Ioctl's on /dev/console.
+ */
+
+/*
+ * KDGKBENT, KDSKBENT - Get and set keyboard table entry. Useful for
+ * remapping keys.
+ *
+ * KDGSTATE - Get the keyboard state variable, which flags the
+ * modifier keys (shift, ctrl, etc.) that are down. See
+ * KS_NORMAL et al above. Used for debugging.
+ *
+ * KDSETBELL - Turns the bell on or off.
+ */
+
+#define KDGKBENT _IOWR('k', 1, struct kbentry) /* get keybd entry */
+
+#define KDSKBENT _IOW('k', 2, struct kbentry) /* set keybd entry */
+
+#define KDGSTATE _IOR('k', 3, int) /* get keybd state */
+
+#define KDSETBELL _IOW('k', 4, int) /* turn bell on or off */
+# define KD_BELLON 1
+# define KD_BELLOFF 0
+
+/*
+ * This struct is used for getting and setting key definitions. The
+ * values for kb_index are obtainable from the man page for
+ * keyboard(7) (though they should really be defined here!).
+ */
+struct kbentry {
+ u_char kb_state; /* which state to use */
+ u_char kb_index; /* which keycode */
+ u_char kb_value[NUMOUTPUT]; /* value to get/set */
+};
+
+
+/*
+ * Ioctl's on /dev/kbd.
+ */
+
+#ifdef KERNEL
+extern int kb_mode;
+#endif
+
+struct X_kdb {
+ u_int *ptr;
+ u_int size;
+};
+
+#define K_X_KDB_ENTER _IOW('K', 16, struct X_kdb)
+#define K_X_KDB_EXIT _IOW('K', 17, struct X_kdb)
+
+#define K_X_IN 0x01000000
+#define K_X_OUT 0x02000000
+#define K_X_BYTE 0x00010000
+#define K_X_WORD 0x00020000
+#define K_X_LONG 0x00040000
+#define K_X_TYPE 0x03070000
+#define K_X_PORT 0x0000ffff
+
+extern boolean_t kd_isupper (u_char);
+extern boolean_t kd_islower (u_char);
+extern void kd_senddata (unsigned char);
+extern void kd_sendcmd (unsigned char);
+extern void kd_cmdreg_write (int);
+extern void kd_mouse_drain (void);
+extern void set_kd_state (int);
+extern void kd_setleds1 (u_char);
+extern void kd_setleds2 (void);
+extern void cnsetleds (u_char);
+extern void kdreboot (void);
+extern void kd_putc_esc (u_char);
+extern void kd_putc (u_char);
+extern void kd_parseesc (void);
+extern void kd_down (void);
+extern void kd_up (void);
+extern void kd_cr (void);
+extern void kd_tab (void);
+extern void kd_left (void);
+extern void kd_right (void);
+extern void kd_scrollup (void);
+extern void kd_scrolldn (void);
+extern void kd_cls (void);
+extern void kd_home (void);
+extern void kd_insch (int number);
+extern void kd_cltobcur (void);
+extern void kd_cltopcur (void);
+extern void kd_cltoecur (void);
+extern void kd_clfrbcur (void);
+extern void kd_eraseln (void);
+extern void kd_insln (int);
+extern void kd_delln (int);
+extern void kd_delch (int);
+extern void kd_erase (int);
+extern void kd_bellon (void);
+extern void kd_belloff (void *param);
+extern void kdinit (void);
+extern int kdsetkbent (struct kbentry *, int);
+extern int kdgetkbent (struct kbentry *);
+extern int kdsetbell (int, int);
+extern void kd_resend (void);
+extern void kd_handle_ack (void);
+extern int kd_kbd_magic (int);
+extern unsigned int kdstate2idx (unsigned int, boolean_t);
+extern void kd_parserest (u_char *);
+extern int kdcnprobe(struct consdev *cp);
+extern int kdcninit(struct consdev *cp);
+extern int kdcngetc(dev_t dev, int wait);
+extern int kdcnmaygetc (void);
+extern int kdcnputc(dev_t dev, int c);
+extern void kd_setpos(csrpos_t newpos);
+
+extern void kd_slmwd (void *start, int count, int value);
+extern void kd_slmscu (void *from, void *to, int count);
+extern void kd_slmscd (void *from, void *to, int count);
+
+extern void kdintr(int vec);
+
+#if MACH_KDB
+#include <ddb/db_input.h>
+#endif /* MACH_KDB */
+
+extern int kdopen(dev_t dev, int flag, io_req_t ior);
+extern void kdclose(dev_t dev, int flag);
+extern int kdread(dev_t dev, io_req_t uio);
+extern int kdwrite(dev_t dev, io_req_t uio);
+
+extern io_return_t kdgetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t *count);
+
+extern io_return_t kdsetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count);
+
+extern int kdportdeath(dev_t dev, mach_port_t port);
+extern vm_offset_t kdmmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
+
+boolean_t kdcheckmagic(Scancode scancode);
+
+int do_modifier(int state, Scancode c, boolean_t up);
+
+/*
+ * Generic routines for bitmap devices (i.e., assume no hardware
+ * assist). Assumes a simple byte ordering (i.e., a byte at a lower
+ * address is to the left of the byte at the next higher address).
+ * For the 82786, this works anyway if the characters are 2 bytes
+ * wide. (more bubble gum and paper clips.)
+ *
+ * See the comments above (in i386at/kd.c) about SLAMBPW.
+ */
+void bmpch2bit(csrpos_t pos, short *xb, short *yb);
+void bmppaintcsr(csrpos_t pos, u_char val);
+u_char *bit2fbptr(short xb, short yb);
+
+unsigned char kd_getdata(void);
+unsigned char state2leds(int state);
+
+void kdstart(struct tty *tp);
+void kdstop(struct tty *tp, int flags);
+
+void kd_xga_init(void);
+
+#endif /* _KD_H_ */
diff --git a/i386/i386at/kd_event.c b/i386/i386at/kd_event.c
new file mode 100644
index 0000000..247d95b
--- /dev/null
+++ b/i386/i386at/kd_event.c
@@ -0,0 +1,392 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kd_event.c
+ Description: Driver for event interface to keyboard.
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1989. All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <mach/boolean.h>
+#include <sys/types.h>
+#include <kern/printf.h>
+#include <string.h>
+
+#include <device/ds_routines.h>
+#include <device/device_types.h>
+#include <device/io_req.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+#include <i386at/kd.h>
+#include <i386at/kd_queue.h>
+#ifdef APIC
+# include <i386/apic.h>
+#else
+# include <i386/pic.h>
+#endif
+
+#include "kd_event.h"
+
+/*
+ * Code for /dev/kbd. The interrupt processing is done in kd.c,
+ * which calls into this module to enqueue scancode events when
+ * the keyboard is in Event mode.
+ */
+
+/*
+ * Note: These globals are protected by raising the interrupt level
+ * via SPLKD.
+ */
+
+kd_event_queue kbd_queue; /* queue of keyboard events */
+queue_head_t kbd_read_queue = { &kbd_read_queue, &kbd_read_queue };
+
+static boolean_t initialized = FALSE;
+
+
+/*
+ * kbdinit - set up event queue.
+ */
+
+static void
+kbdinit(void)
+{
+ spl_t s = SPLKD();
+
+ if (!initialized) {
+ kdq_reset(&kbd_queue);
+ initialized = TRUE;
+ }
+ splx(s);
+}
+
+
+/*
+ * kbdopen - Verify that open is read-only and remember process
+ * group leader.
+ */
+
+/*ARGSUSED*/
+int
+kbdopen(dev_t dev, int flags, io_req_t ior)
+{
+ spl_t o_pri = spltty();
+ kdinit();
+ splx(o_pri);
+ kbdinit();
+
+ return(0);
+}
+
+
+/*
+ * kbdclose - Make sure that the kd driver is in Ascii mode and
+ * reset various flags.
+ */
+
+/*ARGSUSED*/
+void
+kbdclose(
+ dev_t dev,
+ int flags)
+{
+ spl_t s = SPLKD();
+
+ kb_mode = KB_ASCII;
+ kdq_reset(&kbd_queue);
+ splx(s);
+}
+
+
+io_return_t kbdgetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data, /* pointer to OUT array */
+ mach_msg_type_number_t *count) /* OUT */
+{
+ switch (flavor) {
+ case KDGKBDTYPE:
+ *data = KB_VANILLAKB;
+ *count = 1;
+ break;
+ case DEV_GET_SIZE:
+ data[DEV_GET_SIZE_DEVICE_SIZE] = 0;
+ data[DEV_GET_SIZE_RECORD_SIZE] = sizeof(kd_event);
+ *count = DEV_GET_SIZE_COUNT;
+ break;
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+io_return_t kbdsetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count)
+{
+ switch (flavor) {
+ case KDSKBDMODE:
+ kb_mode = *data;
+ /* XXX - what to do about unread events? */
+ /* XXX - should check that 'data' contains an OK valud */
+ break;
+ case KDSETLEDS:
+ if (count != 1)
+ return (D_INVALID_OPERATION);
+ kd_setleds1 (*data);
+ break;
+ case K_X_KDB_ENTER:
+ return X_kdb_enter_init((unsigned int *)data, count);
+ case K_X_KDB_EXIT:
+ return X_kdb_exit_init((unsigned int *)data, count);
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+
+
+/*
+ * kbdread - dequeue and return any queued events.
+ */
+int
+kbdread(
+ dev_t dev,
+ io_req_t ior)
+{
+ int err, count;
+ spl_t s;
+
+ /* Check if IO_COUNT is a multiple of the record size. */
+ if (ior->io_count % sizeof(kd_event) != 0)
+ return D_INVALID_SIZE;
+
+ err = device_read_alloc(ior, (vm_size_t)ior->io_count);
+ if (err != KERN_SUCCESS)
+ return (err);
+
+ s = SPLKD();
+ if (kdq_empty(&kbd_queue)) {
+ if (ior->io_mode & D_NOWAIT) {
+ splx(s);
+ return (D_WOULD_BLOCK);
+ }
+ ior->io_done = kbd_read_done;
+ enqueue_tail(&kbd_read_queue, (queue_entry_t) ior);
+ splx(s);
+ return (D_IO_QUEUED);
+ }
+ count = 0;
+ while (!kdq_empty(&kbd_queue) && count < ior->io_count) {
+ kd_event *ev;
+
+ ev = kdq_get(&kbd_queue);
+ *(kd_event *)(&ior->io_data[count]) = *ev;
+ count += sizeof(kd_event);
+ }
+ splx(s);
+ ior->io_residual = ior->io_count - count;
+ return (D_SUCCESS);
+}
+
+boolean_t kbd_read_done(io_req_t ior)
+{
+ int count;
+ spl_t s;
+
+ s = SPLKD();
+ if (kdq_empty(&kbd_queue)) {
+ ior->io_done = kbd_read_done;
+ enqueue_tail(&kbd_read_queue, (queue_entry_t)ior);
+ splx(s);
+ return (FALSE);
+ }
+
+ count = 0;
+ while (!kdq_empty(&kbd_queue) && count < ior->io_count) {
+ kd_event *ev;
+
+ ev = kdq_get(&kbd_queue);
+ *(kd_event *)(&ior->io_data[count]) = *ev;
+ count += sizeof(kd_event);
+ }
+ splx(s);
+
+ ior->io_residual = ior->io_count - count;
+ ds_read_done(ior);
+
+ return (TRUE);
+}
+
+
+
+/*
+ * kd_enqsc - enqueue a scancode. Should be called at SPLKD.
+ */
+
+void
+kd_enqsc(Scancode sc)
+{
+ kd_event ev;
+
+ ev.type = KEYBD_EVENT;
+ /* Not used but we set it to avoid garbage */
+ ev.unused_time.seconds = 0;
+ ev.unused_time.microseconds = 0;
+ ev.value.sc = sc;
+ kbd_enqueue(&ev);
+}
+
+
+/*
+ * kbd_enqueue - enqueue an event and wake up selecting processes, if
+ * any. Should be called at SPLKD.
+ */
+
+void
+kbd_enqueue(kd_event *ev)
+{
+ if (kdq_full(&kbd_queue))
+ printf_once("kbd: queue full\n");
+ else
+ kdq_put(&kbd_queue, ev);
+
+ {
+ io_req_t ior;
+ while ((ior = (io_req_t)dequeue_head(&kbd_read_queue)) != 0)
+ iodone(ior);
+ }
+}
+
+u_int X_kdb_enter_str[512], X_kdb_exit_str[512];
+int X_kdb_enter_len = 0, X_kdb_exit_len = 0;
+
+static void
+kdb_in_out(const u_int *p)
+{
+ int t = p[0];
+
+ switch (t & K_X_TYPE) {
+ case K_X_IN|K_X_BYTE:
+ inb(t & K_X_PORT);
+ break;
+
+ case K_X_IN|K_X_WORD:
+ inw(t & K_X_PORT);
+ break;
+
+ case K_X_IN|K_X_LONG:
+ inl(t & K_X_PORT);
+ break;
+
+ case K_X_OUT|K_X_BYTE:
+ outb(t & K_X_PORT, p[1]);
+ break;
+
+ case K_X_OUT|K_X_WORD:
+ outw(t & K_X_PORT, p[1]);
+ break;
+
+ case K_X_OUT|K_X_LONG:
+ outl(t & K_X_PORT, p[1]);
+ break;
+ }
+}
+
+void
+X_kdb_enter(void)
+{
+ u_int *u_ip, *endp;
+
+ for (u_ip = X_kdb_enter_str, endp = &X_kdb_enter_str[X_kdb_enter_len];
+ u_ip < endp;
+ u_ip += 2)
+ kdb_in_out(u_ip);
+}
+
+void
+X_kdb_exit(void)
+{
+ u_int *u_ip, *endp;
+
+ for (u_ip = X_kdb_exit_str, endp = &X_kdb_exit_str[X_kdb_exit_len];
+ u_ip < endp;
+ u_ip += 2)
+ kdb_in_out(u_ip);
+}
+
+io_return_t
+X_kdb_enter_init(
+ u_int *data,
+ u_int count)
+{
+ if (count * sizeof X_kdb_enter_str[0] > sizeof X_kdb_enter_str)
+ return D_INVALID_OPERATION;
+
+ memcpy(X_kdb_enter_str, data, count * sizeof X_kdb_enter_str[0]);
+ X_kdb_enter_len = count;
+ return D_SUCCESS;
+}
+
+io_return_t
+X_kdb_exit_init(
+ u_int *data,
+ u_int count)
+{
+ if (count * sizeof X_kdb_exit_str[0] > sizeof X_kdb_exit_str)
+ return D_INVALID_OPERATION;
+
+ memcpy(X_kdb_exit_str, data, count * sizeof X_kdb_exit_str[0]);
+ X_kdb_exit_len = count;
+ return D_SUCCESS;
+}
diff --git a/i386/i386at/kd_event.h b/i386/i386at/kd_event.h
new file mode 100644
index 0000000..7e66f76
--- /dev/null
+++ b/i386/i386at/kd_event.h
@@ -0,0 +1,62 @@
+/*
+ * Keyboard event handlers
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Keyboard event handling functions.
+ *
+ */
+
+#ifndef _KD_EVENT_H_
+#define _KD_EVENT_H_
+
+#include <sys/types.h>
+#include <device/io_req.h>
+#include <i386at/kd.h>
+
+extern void X_kdb_enter (void);
+
+extern void X_kdb_exit (void);
+
+extern int kbdopen(dev_t dev, int flags, io_req_t ior);
+extern void kbdclose(dev_t dev, int flags);
+extern int kbdread(dev_t dev, io_req_t ior);
+
+extern io_return_t kbdgetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t *count);
+
+extern io_return_t kbdsetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count);
+
+extern void kd_enqsc(Scancode sc);
+
+void kbd_enqueue(kd_event *ev);
+
+io_return_t X_kdb_enter_init(u_int *data, u_int count);
+io_return_t X_kdb_exit_init(u_int *data, u_int count);
+
+boolean_t kbd_read_done(io_req_t ior);
+
+#endif /* _KD_EVENT_H_ */
diff --git a/i386/i386at/kd_mouse.c b/i386/i386at/kd_mouse.c
new file mode 100644
index 0000000..9bd001c
--- /dev/null
+++ b/i386/i386at/kd_mouse.c
@@ -0,0 +1,800 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kd_mouse.c
+ Description: mouse driver as part of keyboard/display driver
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Hacked up support for serial mouse connected to COM1, using Mouse
+ * Systems 5-byte protocol at 1200 baud. This should work for
+ * Mouse Systems, SummaMouse, and Logitek C7 mice.
+ *
+ * The interface provided by /dev/mouse is a series of events as
+ * described in i386at/kd.h.
+ */
+
+#include <mach/boolean.h>
+#include <sys/types.h>
+#include <kern/printf.h>
+#include <device/ds_routines.h>
+#include <device/device_types.h>
+#include <device/io_req.h>
+#include <device/subrs.h>
+#include <i386/ipl.h>
+#include <i386/irq.h>
+#include <i386/pio.h>
+#include <chips/busses.h>
+#include <i386at/com.h>
+#include <i386at/kd.h>
+#include <i386at/kd_queue.h>
+#include <i386at/i8250.h>
+
+#include "kd_mouse.h"
+
+static interrupt_handler_fn oldvect; /* old interrupt vector */
+static int oldunit;
+extern struct bus_device *cominfo[];
+
+kd_event_queue mouse_queue; /* queue of mouse events */
+boolean_t mouse_in_use = FALSE;
+queue_head_t mouse_read_queue = { &mouse_read_queue, &mouse_read_queue };
+
+
+/*
+ * The state of the 3 buttons is encoded in the low-order 3 bits (both
+ * here and in other variables in the driver).
+ */
+u_char lastbuttons; /* previous state of mouse buttons */
+#define MOUSE_UP 1
+#define MOUSE_DOWN 0
+#define MOUSE_ALL_UP 0x7
+
+int mouse_baud = BCNT1200;
+
+boolean_t mouse_char_cmd = FALSE; /* mouse response is to cmd */
+boolean_t mouse_char_wanted = FALSE; /* want mouse response */
+int mouse_char_index; /* mouse response */
+
+#define IBM_MOUSE_IRQ 12
+
+/*
+ * init_mouse_hw - initialize the serial port.
+ */
+static void
+init_mouse_hw(dev_t unit, int mode)
+{
+ unsigned short base_addr = cominfo[unit]->address;
+
+ outb(base_addr + RIE, 0);
+ outb(base_addr + RLC, LCDLAB);
+ outb(base_addr + RDLSB, mouse_baud & 0xff);
+ outb(base_addr + RDMSB, (mouse_baud >> 8) & 0xff);
+ outb(base_addr + RLC, mode);
+ outb(base_addr + RMC, MCDTR | MCRTS | MCOUT2);
+ outb(base_addr + RIE, IERD | IELS);
+}
+
+
+/*
+ * mouseopen - Verify that the request is read-only, initialize,
+ * and remember process group leader.
+ */
+/*
+ * Low 3 bits of minor are the com port #.
+ * The high 5 bits of minor are the mouse type
+ */
+#define MOUSE_SYSTEM_MOUSE 0
+#define MICROSOFT_MOUSE 1
+#define IBM_MOUSE 2
+#define NO_MOUSE 3
+#define LOGITECH_TRACKMAN 4
+#define MICROSOFT_MOUSE7 5
+static int mouse_type;
+static int mousebufsize;
+static int mousebufindex = 0;
+int track_man[10];
+
+/*ARGSUSED*/
+int
+mouseopen(dev_t dev, int flags, io_req_t ior)
+{
+ if (mouse_in_use)
+ return (D_ALREADY_OPEN);
+ mouse_in_use = TRUE; /* locking? */
+ kdq_reset(&mouse_queue);
+ lastbuttons = MOUSE_ALL_UP;
+
+ switch (mouse_type = ((minor(dev) & 0xf8) >> 3)) {
+ case MICROSOFT_MOUSE7:
+ mousebufsize = 3;
+ serial_mouse_open(dev);
+ init_mouse_hw(dev&7, LC7);
+ break;
+ case MICROSOFT_MOUSE:
+ mousebufsize = 3;
+ serial_mouse_open(dev);
+ init_mouse_hw(dev&7, LC8);
+ break;
+ case MOUSE_SYSTEM_MOUSE:
+ mousebufsize = 5;
+ serial_mouse_open(dev);
+ init_mouse_hw(dev&7, LC8);
+ break;
+ case LOGITECH_TRACKMAN:
+ mousebufsize = 3;
+ serial_mouse_open(dev);
+ init_mouse_hw(dev&7, LC7);
+ track_man[0] = comgetc(dev&7);
+ track_man[1] = comgetc(dev&7);
+ if (track_man[0] != 0x4d &&
+ track_man[1] != 0x33) {
+ printf("LOGITECH_TRACKMAN: NOT M3");
+ }
+ break;
+ case IBM_MOUSE:
+ mousebufsize = 3;
+ kd_mouse_open(dev, IBM_MOUSE_IRQ);
+ ibm_ps2_mouse_open(dev);
+ break;
+ case NO_MOUSE:
+ break;
+ }
+ mousebufindex = 0;
+ return(0);
+}
+
+void
+serial_mouse_open(dev_t dev)
+{
+ int unit = minor(dev) & 0x7;
+ int mouse_pic = cominfo[unit]->sysdep1;
+
+ spl_t s = splhi(); /* disable interrupts */
+
+ oldvect = ivect[mouse_pic];
+ ivect[mouse_pic] = mouseintr;
+
+ oldunit = iunit[mouse_pic];
+ iunit[mouse_pic] = unit;
+
+ /* XXX other arrays to init? */
+ splx(s); /* XXX - should come after init? */
+}
+
+int mouse_packets = 0;
+
+void
+kd_mouse_open(
+ dev_t dev,
+ int mouse_pic)
+{
+ spl_t s = splhi(); /* disable interrupts */
+
+ oldvect = ivect[mouse_pic];
+ ivect[mouse_pic] = kdintr;
+ unmask_irq(mouse_pic);
+ splx(s);
+}
+
+/*
+ * mouseclose - Disable interrupts on the serial port, reset driver flags,
+ * and restore the serial port interrupt vector.
+ */
+void
+mouseclose(
+ dev_t dev,
+ int flags)
+{
+ switch (mouse_type) {
+ case MICROSOFT_MOUSE:
+ case MICROSOFT_MOUSE7:
+ case MOUSE_SYSTEM_MOUSE:
+ case LOGITECH_TRACKMAN:
+ serial_mouse_close(dev, flags);
+ break;
+ case IBM_MOUSE:
+ ibm_ps2_mouse_close(dev);
+ kd_mouse_close(dev, IBM_MOUSE_IRQ);
+ {int i = 20000; for (;i--;); }
+ kd_mouse_drain();
+ break;
+ case NO_MOUSE:
+ break;
+ }
+
+ kdq_reset(&mouse_queue); /* paranoia */
+ mouse_in_use = FALSE;
+}
+
+/*ARGSUSED*/
+void
+serial_mouse_close(
+ dev_t dev,
+ int flags)
+{
+ spl_t o_pri = splhi(); /* mutex with open() */
+ int unit = minor(dev) & 0x7;
+ int mouse_pic = cominfo[unit]->sysdep1;
+ unsigned short base_addr = cominfo[unit]->address;
+
+ assert(ivect[mouse_pic] == mouseintr);
+ outb(base_addr + RIE, 0); /* disable serial port */
+ outb(base_addr + RMC, 0); /* no rts */
+ ivect[mouse_pic] = oldvect;
+ iunit[mouse_pic] = oldunit;
+
+ (void)splx(o_pri);
+}
+
+void
+kd_mouse_close(
+ dev_t dev,
+ int mouse_pic)
+{
+ spl_t s = splhi();
+
+ mask_irq(mouse_pic);
+ ivect[mouse_pic] = oldvect;
+ splx(s);
+}
+
+io_return_t mousegetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data, /* pointer to OUT array */
+ mach_msg_type_number_t *count) /* OUT */
+{
+ switch (flavor) {
+ case DEV_GET_SIZE:
+ data[DEV_GET_SIZE_DEVICE_SIZE] = 0;
+ data[DEV_GET_SIZE_RECORD_SIZE] = sizeof(kd_event);
+ *count = DEV_GET_SIZE_COUNT;
+ break;
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+
+/*
+ * mouseread - dequeue and return any queued events.
+ */
+int
+mouseread(
+ dev_t dev,
+ io_req_t ior)
+{
+ int err, count;
+ spl_t s;
+
+ /* Check if IO_COUNT is a multiple of the record size. */
+ if (ior->io_count % sizeof(kd_event) != 0)
+ return D_INVALID_SIZE;
+
+ err = device_read_alloc(ior, (vm_size_t)ior->io_count);
+ if (err != KERN_SUCCESS)
+ return (err);
+
+ s = SPLKD();
+ if (kdq_empty(&mouse_queue)) {
+ if (ior->io_mode & D_NOWAIT) {
+ splx(s);
+ return (D_WOULD_BLOCK);
+ }
+ ior->io_done = mouse_read_done;
+ enqueue_tail(&mouse_read_queue, (queue_entry_t)ior);
+ splx(s);
+ return (D_IO_QUEUED);
+ }
+ count = 0;
+ while (!kdq_empty(&mouse_queue) && count < ior->io_count) {
+ kd_event *ev;
+
+ ev = kdq_get(&mouse_queue);
+ *(kd_event *)(&ior->io_data[count]) = *ev;
+ count += sizeof(kd_event);
+ }
+ splx(s);
+ ior->io_residual = ior->io_count - count;
+ return (D_SUCCESS);
+}
+
+boolean_t mouse_read_done(io_req_t ior)
+{
+ int count;
+ spl_t s;
+
+ s = SPLKD();
+ if (kdq_empty(&mouse_queue)) {
+ ior->io_done = mouse_read_done;
+ enqueue_tail(&mouse_read_queue, (queue_entry_t)ior);
+ splx(s);
+ return (FALSE);
+ }
+
+ count = 0;
+ while (!kdq_empty(&mouse_queue) && count < ior->io_count) {
+ kd_event *ev;
+
+ ev = kdq_get(&mouse_queue);
+ *(kd_event *)(&ior->io_data[count]) = *ev;
+ count += sizeof(kd_event);
+ }
+ splx(s);
+
+ ior->io_residual = ior->io_count - count;
+ ds_read_done(ior);
+
+ return (TRUE);
+}
+
+
+
+/*
+ * mouseintr - Get a byte and pass it up for handling. Called at SPLKD.
+ */
+void
+mouseintr(int unit)
+{
+ unsigned short base_addr = cominfo[unit]->address;
+ unsigned char id, ls;
+
+ /* get reason for interrupt and line status */
+ id = inb(base_addr + RID);
+ ls = inb(base_addr + RLS);
+
+ /* handle status changes */
+ if (id == IDLS) {
+ if (ls & LSDR) {
+ inb(base_addr + RDAT); /* flush bad character */
+ }
+ return; /* ignore status change */
+ }
+
+ if (id & IDRD) {
+ mouse_handle_byte((u_char)(inb(base_addr + RDAT) & 0xff));
+ }
+}
+
+
+/*
+ * handle_byte - Accumulate bytes until we have an entire packet.
+ * If the mouse has moved or any of the buttons have changed state (up
+ * or down), enqueue the corresponding events.
+ * Called at SPLKD.
+ * XXX - magic numbers.
+ */
+int show_mouse_byte = 0;
+/*
+ X down; middle down; middle up; X up 50 0 0; 50 0 0 22; 50 0 0 02; 40 0 0
+ X down; middle down; X up; middle up 50 0 0; 50 0 0 22; 40 0 0 22; 40 0 0 2
+ *
+ * The trick here is that all the while the middle button is down you get 4 byte
+ * packets with the last byte 0x22. When the middle button goes up you get a
+ * last packet with 0x02.
+ */
+int lastgitech = 0x40; /* figure whether the first 3 bytes imply */
+ /* its time to expect a fourth */
+int fourthgitech = 0; /* look for the 4th byte; we must process it */
+int middlegitech = 0; /* what should the middle button be */
+
+static u_char mousebuf[MOUSEBUFSIZE]; /* 5-byte packet from mouse */
+
+void
+mouse_handle_byte(u_char ch)
+{
+ if (show_mouse_byte) {
+ printf("%x(%c) ", ch, ch);
+ }
+
+ if (mouse_char_cmd) {
+ /*
+ * Mouse character is response to command
+ */
+ if (mousebufindex < mousebufsize)
+ mousebuf[mousebufindex++] = ch;
+ if (mouse_char_wanted) {
+ mouse_char_wanted = FALSE;
+ wakeup((vm_offset_t)&mousebuf);
+ }
+ return;
+ }
+
+ if (mousebufindex == 0) {
+ switch (mouse_type) {
+ case MICROSOFT_MOUSE7:
+ if ((ch & 0x40) != 0x40)
+ return;
+ break;
+ case MICROSOFT_MOUSE:
+ if ((ch & 0xc0) != 0xc0)
+ return;
+ break;
+ case MOUSE_SYSTEM_MOUSE:
+ if ((ch & 0xf8) != 0x80)
+ return;
+ break;
+ case LOGITECH_TRACKMAN:
+ if (fourthgitech == 1) {
+ fourthgitech = 0;
+ if (ch & 0xf0)
+ middlegitech = 0x4;
+ else
+ middlegitech = 0x0;
+ mouse_packet_microsoft_mouse(mousebuf);
+ return;
+ } else if ((ch & 0xc0) != 0x40)
+ return;
+ break;
+ case IBM_MOUSE:
+ break;
+ }
+ }
+
+ mousebuf[mousebufindex++] = ch;
+ if (mousebufindex < mousebufsize)
+ return;
+
+ /* got a packet */
+ mousebufindex = 0;
+
+ switch (mouse_type) {
+ case MICROSOFT_MOUSE7:
+ case MICROSOFT_MOUSE:
+ mouse_packet_microsoft_mouse(mousebuf);
+ break;
+ case MOUSE_SYSTEM_MOUSE:
+ mouse_packet_mouse_system_mouse(mousebuf);
+ break;
+ case LOGITECH_TRACKMAN:
+ if ( mousebuf[1] || mousebuf[2] ||
+ mousebuf[0] != lastgitech) {
+ mouse_packet_microsoft_mouse(mousebuf);
+ lastgitech = mousebuf[0] & 0xf0;
+ } else {
+ fourthgitech = 1;
+ }
+ break;
+ case IBM_MOUSE:
+ mouse_packet_ibm_ps2_mouse(mousebuf);
+ break;
+ }
+}
+
+void
+mouse_packet_mouse_system_mouse(u_char mousebuf[MOUSEBUFSIZE])
+{
+ u_char buttons, buttonchanges;
+ struct mouse_motion moved;
+
+ buttons = mousebuf[0] & 0x7; /* get current state of buttons */
+ buttonchanges = buttons ^ lastbuttons;
+ moved.mm_deltaX = (char)mousebuf[1] + (char)mousebuf[3];
+ moved.mm_deltaY = (char)mousebuf[2] + (char)mousebuf[4];
+
+ if (moved.mm_deltaX != 0 || moved.mm_deltaY != 0)
+ mouse_moved(moved);
+
+ if (buttonchanges != 0) {
+ lastbuttons = buttons;
+ if (buttonchanges & 1)
+ mouse_button(MOUSE_RIGHT, buttons & 1);
+ if (buttonchanges & 2)
+ mouse_button(MOUSE_MIDDLE, (buttons & 2) >> 1);
+ if (buttonchanges & 4)
+ mouse_button(MOUSE_LEFT, (buttons & 4) >> 2);
+ }
+}
+
+/* same as above for microsoft mouse */
+/*
+ * 3 byte microsoft format used
+ *
+ * 7 6 5 4 3 2 1 0
+ * 1 1 L R Y7 Y6 X7 X6
+ * 1 0 X5 X4 X3 X3 X1 X0
+ * 1 0 Y5 Y4 Y3 Y2 Y1 Y0
+ *
+ */
+void
+mouse_packet_microsoft_mouse(u_char mousebuf[MOUSEBUFSIZE])
+{
+ u_char buttons, buttonchanges;
+ struct mouse_motion moved;
+
+ buttons = ((mousebuf[0] & 0x30) >> 4);
+ buttons |= middlegitech;
+ /* get current state of buttons */
+#ifdef gross_hack
+ if (buttons == 0x03) /* both buttons down */
+ buttons = 0x04;
+#endif /* gross_hack */
+ buttons = (~buttons) & 0x07; /* convert to not pressed */
+
+ buttonchanges = buttons ^ lastbuttons;
+ moved.mm_deltaX = ((mousebuf[0] & 0x03) << 6) | (mousebuf[1] & 0x3F);
+ moved.mm_deltaY = ((mousebuf[0] & 0x0c) << 4) | (mousebuf[2] & 0x3F);
+ if (moved.mm_deltaX & 0x80) /* negative, in fact */
+ moved.mm_deltaX = moved.mm_deltaX - 0x100;
+ if (moved.mm_deltaY & 0x80) /* negative, in fact */
+ moved.mm_deltaY = moved.mm_deltaY - 0x100;
+ /* and finally the Y orientation is different for the microsoft mouse */
+ moved.mm_deltaY = -moved.mm_deltaY;
+
+ if (moved.mm_deltaX != 0 || moved.mm_deltaY != 0)
+ mouse_moved(moved);
+
+ if (buttonchanges != 0) {
+ lastbuttons = buttons;
+ if (buttonchanges & 1)
+ mouse_button(MOUSE_RIGHT, (buttons & 1) ?
+ MOUSE_UP : MOUSE_DOWN);
+ if (buttonchanges & 2)
+ mouse_button(MOUSE_LEFT, (buttons & 2) ?
+ MOUSE_UP : MOUSE_DOWN);
+ if (buttonchanges & 4)
+ mouse_button(MOUSE_MIDDLE, (buttons & 4) ?
+ MOUSE_UP : MOUSE_DOWN);
+ }
+}
+
+/*
+ * AUX device (PS2) open/close
+ */
+
+/*
+ * Write character to mouse. Called at spltty.
+ */
+static void kd_mouse_write(
+ unsigned char ch)
+{
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ continue; /* wait for 'input' port empty */
+ outb(K_CMD, 0xd4); /* send next character to mouse */
+
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ continue; /* wait for 'input' port empty */
+ outb(K_RDWR, ch); /* send command to mouse */
+}
+
+/*
+ * Read next character from mouse, waiting for interrupt
+ * to deliver it. Called at spltty.
+ */
+static int kd_mouse_read(void)
+{
+ int ch;
+
+ if (mouse_char_index >= mousebufsize)
+ return -1;
+
+ while (mousebufindex <= mouse_char_index) {
+ mouse_char_wanted = TRUE;
+ assert_wait((event_t) &mousebuf, FALSE);
+ /* We are at tty SPL level, interrupts can not happen between
+ * assert_wait and thread_block. */
+ thread_block((void (*)()) 0);
+ }
+
+ ch = mousebuf[mouse_char_index++];
+
+ return ch;
+}
+
+/*
+ * Prepare buffer for receiving next packet from mouse.
+ */
+static void kd_mouse_read_reset(void)
+{
+ mousebufindex = 0;
+ mouse_char_index = 0;
+}
+
+void
+ibm_ps2_mouse_open(dev_t dev)
+{
+ spl_t s = spltty();
+
+ lastbuttons = 0;
+ mouse_char_cmd = TRUE; /* responses are to commands */
+
+ kd_sendcmd(0xa8); /* enable mouse in kbd */
+
+ kd_cmdreg_write(0x47); /* allow mouse interrupts */
+ /* magic number for ibm? */
+
+ kd_mouse_read_reset();
+ kd_mouse_write(0xff); /* reset mouse */
+ if (kd_mouse_read() != 0xfa) {
+ splx(s);
+ return; /* need ACK */
+ }
+
+ (void) kd_mouse_read(); /* discard 2-character mouse ID */
+ (void) kd_mouse_read();
+
+ kd_mouse_read_reset();
+ kd_mouse_write(0xea); /* set stream mode */
+ if (kd_mouse_read() != 0xfa) {
+ splx(s);
+ return; /* need ACK */
+ }
+
+ kd_mouse_read_reset();
+ kd_mouse_write(0xf4); /* enable */
+ if (kd_mouse_read() != 0xfa) {
+ splx(s);
+ return; /* need ACK */
+ }
+
+ kd_mouse_read_reset();
+ mouse_char_cmd = FALSE; /* now we get mouse packets */
+
+ splx(s);
+}
+
+void
+ibm_ps2_mouse_close(dev_t dev)
+{
+ spl_t s = spltty();
+
+ mouse_char_cmd = TRUE; /* responses are to commands */
+
+ kd_mouse_read_reset();
+ kd_mouse_write(0xff); /* reset mouse */
+ if (kd_mouse_read() == 0xfa) {
+ /* got ACK: discard 2-char mouse ID */
+ (void) kd_mouse_read();
+ (void) kd_mouse_read();
+ }
+
+ kd_sendcmd(0xa7); /* disable mouse in kbd */
+ kd_cmdreg_write(0x65); /* disallow mouse interrupts */
+ /* magic number for ibm? */
+
+ splx(s);
+}
+
+/*
+ * 3 byte ibm ps2 format used
+ *
+ * 7 6 5 4 3 2 1 0
+ * YO XO YS XS 1 M R L
+ * X7 X6 X5 X4 X3 X3 X1 X0
+ * Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+ *
+ */
+void
+mouse_packet_ibm_ps2_mouse(u_char mousebuf[MOUSEBUFSIZE])
+{
+ u_char buttons, buttonchanges;
+ struct mouse_motion moved;
+
+ buttons = mousebuf[0] & 0x7; /* get current state of buttons */
+ buttonchanges = buttons ^ lastbuttons;
+ moved.mm_deltaX = ((mousebuf[0]&0x10) ? 0xffffff00 : 0 ) | (u_char)mousebuf[1];
+ moved.mm_deltaY = ((mousebuf[0]&0x20) ? 0xffffff00 : 0 ) | (u_char)mousebuf[2];
+ if (mouse_packets) {
+ printf("(%x:%x:%x)", mousebuf[0], mousebuf[1], mousebuf[2]);
+ return;
+ }
+
+ if (moved.mm_deltaX != 0 || moved.mm_deltaY != 0)
+ mouse_moved(moved);
+
+ if (buttonchanges != 0) {
+ lastbuttons = buttons;
+ if (buttonchanges & 1)
+ mouse_button(MOUSE_LEFT, !(buttons & 1));
+ if (buttonchanges & 2)
+ mouse_button(MOUSE_RIGHT, !((buttons & 2) >> 1));
+ if (buttonchanges & 4)
+ mouse_button(MOUSE_MIDDLE, !((buttons & 4) >> 2));
+ }
+}
+
+/*
+ * Enqueue a mouse-motion event. Called at SPLKD.
+ */
+void
+mouse_moved(struct mouse_motion where)
+{
+ kd_event ev;
+
+ ev.type = MOUSE_MOTION;
+ /* Not used but we set it to avoid garbage */
+ ev.unused_time.seconds = 0;
+ ev.unused_time.microseconds = 0;
+ ev.value.mmotion = where;
+ mouse_enqueue(&ev);
+}
+
+/*
+ * Enqueue an event for mouse button press or release. Called at SPLKD.
+ */
+void
+mouse_button(
+ kev_type which,
+ u_char direction)
+{
+ kd_event ev;
+
+ ev.type = which;
+ ev.value.up = (direction == MOUSE_UP) ? TRUE : FALSE;
+ /* Not used but we set it to avoid garbage */
+ ev.unused_time.seconds = 0;
+ ev.unused_time.microseconds = 0;
+ mouse_enqueue(&ev);
+}
+
+/*
+ * mouse_enqueue - enqueue an event and wake up selecting processes, if
+ * any. Called at SPLKD.
+ */
+
+void
+mouse_enqueue(kd_event *ev)
+{
+ if (kdq_full(&mouse_queue))
+ printf_once("mouse: queue full\n");
+ else
+ kdq_put(&mouse_queue, ev);
+
+ {
+ io_req_t ior;
+ while ((ior = (io_req_t)dequeue_head(&mouse_read_queue)) != 0)
+ iodone(ior);
+ }
+}
diff --git a/i386/i386at/kd_mouse.h b/i386/i386at/kd_mouse.h
new file mode 100644
index 0000000..a9fb128
--- /dev/null
+++ b/i386/i386at/kd_mouse.h
@@ -0,0 +1,72 @@
+/*
+ * Mouse event handlers
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Mouse event handling functions.
+ *
+ */
+
+#ifndef _KD_MOUSE_H_
+#define _KD_MOUSE_H_
+
+#include <sys/types.h>
+
+#define MOUSEBUFSIZE 5 /* num bytes def'd by protocol */
+
+extern void mouse_button (kev_type which, u_char direction);
+
+extern void mouse_enqueue (kd_event *ev);
+
+extern void mouse_moved (struct mouse_motion where);
+
+extern void mouse_handle_byte (u_char ch);
+
+extern void serial_mouse_open (dev_t dev);
+
+extern void serial_mouse_close (dev_t dev, int flags);
+
+extern void kd_mouse_open (dev_t dev, int mouse_pic);
+
+extern void kd_mouse_close (dev_t dev, int mouse_pic);
+
+extern void ibm_ps2_mouse_open (dev_t dev);
+
+extern void ibm_ps2_mouse_close (dev_t dev);
+
+extern void mouse_packet_microsoft_mouse (u_char mousebuf[MOUSEBUFSIZE]);
+
+extern void mouse_packet_mouse_system_mouse (u_char mousebuf[MOUSEBUFSIZE]);
+
+extern void mouse_packet_ibm_ps2_mouse (u_char mousebuf[MOUSEBUFSIZE]);
+
+extern int mouseopen(dev_t dev, int flags, io_req_t ior);
+extern void mouseclose(dev_t dev, int flags);
+extern int mouseread(dev_t dev, io_req_t ior);
+
+extern io_return_t mousegetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t *count);
+
+void mouseintr(int unit);
+boolean_t mouse_read_done(io_req_t ior);
+
+#endif /* _KD_MOUSE_H_ */
diff --git a/i386/i386at/kd_queue.c b/i386/i386at/kd_queue.c
new file mode 100644
index 0000000..ab399cd
--- /dev/null
+++ b/i386/i386at/kd_queue.c
@@ -0,0 +1,109 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kd_queue.c
+ Description: Event queue code for keyboard/display (and mouse) driver.
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+
+#include <i386at/kd_queue.h>
+
+/*
+ * Notice that when adding an entry to the queue, the caller provides
+ * its own storage, which is copied into the queue. However, when
+ * removing an entry from the queue, the caller is given a pointer to a
+ * queue element. This means that the caller must either process the
+ * element or copy it into its own storage before unlocking the queue.
+ *
+ * These routines should be called only at a protected SPL.
+ */
+
+#define q_next(index) (((index)+1) % KDQSIZE)
+
+boolean_t
+kdq_empty(const kd_event_queue *q)
+{
+ return(q->firstfree == q->firstout);
+}
+
+boolean_t
+kdq_full(const kd_event_queue *q)
+{
+ return(q_next(q->firstfree) == q->firstout);
+}
+
+void
+kdq_put(kd_event_queue *q, kd_event *ev)
+{
+ kd_event *qp = q->events + q->firstfree;
+
+ qp->type = ev->type;
+ qp->unused_time = ev->unused_time;
+ qp->value = ev->value;
+ q->firstfree = q_next(q->firstfree);
+}
+
+kd_event *
+kdq_get(kd_event_queue *q)
+{
+ kd_event *result = q->events + q->firstout;
+
+ q->firstout = q_next(q->firstout);
+ return(result);
+}
+
+void
+kdq_reset(kd_event_queue *q)
+{
+ q->firstout = q->firstfree = 0;
+}
diff --git a/i386/i386at/kd_queue.h b/i386/i386at/kd_queue.h
new file mode 100644
index 0000000..702efe8
--- /dev/null
+++ b/i386/i386at/kd_queue.h
@@ -0,0 +1,86 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kd_queue.h
+ Description: definitions for keybd/display Event queue
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Definitions for keyboard/mouse events.
+ *
+ * The keyboard and mouse can be read as a stream of events. The event
+ * definition is the same in both cases, but only keyboard events will
+ * be generated by /dev/kbd, and only mouse events will be generated by
+ * /dev/mouse.
+ */
+
+#ifndef _KD_QUEUE_H_
+#define _KD_QUEUE_H_
+
+#include <mach/std_types.h>
+#include <i386at/kd.h>
+
+#define KDQSIZE 100 /* is this a good size? */
+
+typedef struct {
+ kd_event events[KDQSIZE];
+ int firstfree, firstout;
+} kd_event_queue;
+
+extern void kdq_put(kd_event_queue *, kd_event *);
+extern void kdq_reset(kd_event_queue *);
+extern boolean_t kdq_empty(const kd_event_queue *);
+extern boolean_t kdq_full(const kd_event_queue *);
+extern kd_event *kdq_get(kd_event_queue *);
+
+#endif /* _KD_QUEUE_H_ */
diff --git a/i386/i386at/kdasm.S b/i386/i386at/kdasm.S
new file mode 100644
index 0000000..fd0e1c8
--- /dev/null
+++ b/i386/i386at/kdasm.S
@@ -0,0 +1,145 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Some inline code to speed up major block copies to and from the
+ * screen buffer.
+ *
+ * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ * All rights reserved.
+ *
+ * orc!eugene 28 Oct 1988
+ *
+ */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/* $ Header: $ */
+
+
+#include <mach/machine/asm.h>
+
+/*
+ * Function: kd_slmwd()
+ *
+ * This function "slams" a word (char/attr) into the screen memory using
+ * a block fill operation on the 386.
+ *
+ */
+
+#define start B_ARG0
+#define count B_ARG1
+#define value B_ARG2
+
+ENTRY(kd_slmwd)
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %edi
+
+ movl start, %edi
+ movl count, %ecx
+ movw value, %ax
+ cld
+ rep
+ stosw
+
+ popl %edi
+ leave
+ ret
+#undef start
+#undef count
+#undef value
+
+/*
+ * "slam up"
+ */
+
+#define from B_ARG0
+#define to B_ARG1
+#define count B_ARG2
+ENTRY(kd_slmscu)
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %esi
+ pushl %edi
+
+ movl from, %esi
+ movl to, %edi
+ movl count, %ecx
+ cmpl %edi, %esi
+ cld
+ rep
+ movsw
+
+ popl %edi
+ popl %esi
+ leave
+ ret
+
+/*
+ * "slam down"
+ */
+ENTRY(kd_slmscd)
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %esi
+ pushl %edi
+
+ movl from, %esi
+ movl to, %edi
+ movl count, %ecx
+ cmpl %edi, %esi
+ std
+ rep
+ movsw
+ cld
+
+ popl %edi
+ popl %esi
+ leave
+ ret
+#undef from
+#undef to
+#undef count
diff --git a/i386/i386at/kdsoft.h b/i386/i386at/kdsoft.h
new file mode 100644
index 0000000..79bfdb0
--- /dev/null
+++ b/i386/i386at/kdsoft.h
@@ -0,0 +1,209 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kdsoft.h
+ Description: Software structures for keyboard/display driver, shared with
+ drivers for specific graphics cards.
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ All rights reserved.
+********************************************************************** */
+
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _KDSOFT_H_
+#define _KDSOFT_H_
+
+/*
+ * Globals used for both character-based controllers and bitmap-based
+ * controllers.
+ */
+typedef short csrpos_t; /* cursor position, ONE_SPACE bytes per char */
+
+extern u_char *vid_start; /* VM start of video RAM or frame buffer */
+extern csrpos_t kd_curpos; /* should be set only by kd_setpos */
+extern short kd_lines; /* num lines in tty display */
+extern short kd_cols;
+extern char kd_attr; /* current character attribute */
+
+
+/*
+ * Globals used only for bitmap-based controllers.
+ * XXX - probably needs reworking for color.
+ */
+
+/*
+ * This driver handles two types of graphics cards. The first type
+ * (e.g., EGA, CGA), treats the screen as a page of characters and
+ * has a hardware cursor. The second type (e.g., the Blit) treats the
+ * screen as a bitmap. A hardware cursor may be present, but it is
+ * ignored in favor of a software cursor.
+ *
+ *
+ * Most of the driver uses the following abstraction for the display:
+ *
+ * The cursor position is simply an index into a (logical) linear char
+ * array that wraps around at the end of each line. Each character
+ * takes up ONE_SPACE bytes. Values in [0..ONE_PAGE) are positions in
+ * the displayed page. Values < 0 and >= ONE_PAGE are off the page
+ * and require some scrolling to put the cursor back on the page.
+ *
+ * The kd_dxxx routines handle the conversion from this abstraction to
+ * what the hardware requires.
+ *
+ * (*kd_dput)(pos, ch, chattr)
+ * csrpos_t pos;
+ * char ch, chattr;
+ * Displays a character at "pos", where "ch" = the character to
+ * be displayed and "chattr" is its attribute byte.
+ *
+ * (*kd_dmvup)(from, to, count)
+ * csrpos_t from, to;
+ * int count;
+ * Does a (relatively) fast block transfer of characters upward.
+ * "count" is the number of character positions (not bytes) to move.
+ * "from" is the character position to start moving from (at the start
+ * of the block to be moved). "to" is the character position to start
+ * moving to.
+ *
+ * (*kd_dmvdown)(from, to, count)
+ * csrpos_t from, to;
+ * int count;
+ * "count" is the number of character positions (not bytes) to move.
+ * "from" is the character position to start moving from (at the end
+ * of the block to be moved). "to" is the character position to
+ * start moving to.
+ *
+ * (*kd_dclear)(to, count, chattr)
+ * csrpos_t, to;
+ * int count;
+ * char chattr;
+ * Erases "count" character positions, starting with "to".
+ *
+ * (*kd_dsetcursor)(pos)
+ * Sets kd_curpos and moves the displayed cursor to track it. "pos"
+ * should be in the range [0..ONE_PAGE).
+ *
+ * (*kd_dreset)()
+ * In some cases, the boot program expects the display to be in a
+ * particular state, and doing a soft reset (i.e.,
+ * software-controlled reboot) doesn't put it into that state. For
+ * these cases, the machine-specific driver should provide a "reset"
+ * procedure, which will be called just before the kd code causes the
+ * system to reboot.
+ */
+
+extern void bmpput(csrpos_t, char, char);
+extern void bmpmvup(csrpos_t, csrpos_t, int);
+extern void bmpmvdown(csrpos_t, csrpos_t, int);
+extern void bmpclear(csrpos_t, int, char);
+extern void bmpsetcursor(csrpos_t);
+
+extern void (*kd_dput)(csrpos_t, char, char); /* put attributed char */
+extern void (*kd_dmvup)(csrpos_t, csrpos_t, int); /* block move up */
+extern void (*kd_dmvdown)(csrpos_t, csrpos_t, int); /* block move down */
+extern void (*kd_dclear)(csrpos_t, int, char); /* block clear */
+extern void (*kd_dsetcursor)(csrpos_t); /* set cursor position on displayed page */
+extern void (*kd_dreset)(void); /* prepare for reboot */
+
+
+/*
+ * The following font layout is assumed:
+ *
+ * The top scan line of all the characters comes first. Then the
+ * second scan line, then the third, etc.
+ *
+ * ------ ... ---------|-----N--------|-------------- ... -----------
+ * ------ ... ---------|-----N--------|-------------- ... -----------
+ * .
+ * .
+ * .
+ * ------ ... ---------|-----N--------|-------------- ... -----------
+ *
+ * In the picture, each line is a scan line from the font. Each scan
+ * line is stored in memory immediately after the previous one. The
+ * bits between the vertical lines are the bits for a single character
+ * (e.g., the letter "N").
+ * There are "char_height" scan lines. Each character is "char_width"
+ * bits wide. We make the simplifying assumption that characters are
+ * on byte boundaries. (We also assume that a byte is 8 bits.)
+ */
+
+extern u_char *font_start; /* starting addr of font */
+
+extern short fb_width; /* bits in frame buffer scan line */
+extern short fb_height; /* scan lines in frame buffer*/
+extern short char_width; /* bit width of 1 char */
+extern short char_height; /* bit height of 1 char */
+extern short chars_in_font;
+extern short cursor_height; /* bit height of cursor */
+ /* char_height + cursor_height = line_height */
+
+extern u_char char_black; /* 8 black (off) bits */
+extern u_char char_white; /* 8 white (on) bits */
+
+
+/*
+ * The tty emulation does not usually require the entire frame buffer.
+ * (xstart, ystart) is the bit address for the upper left corner of the
+ * tty "screen".
+ */
+
+extern short xstart, ystart;
+
+
+/*
+ * Accelerators for bitmap displays.
+ */
+
+extern short char_byte_width; /* char_width/8 */
+extern short fb_byte_width; /* fb_width/8 */
+extern short font_byte_width; /* num bytes in 1 scan line of font */
+
+#endif /* _KDSOFT_H_ */
diff --git a/i386/i386at/lpr.c b/i386/i386at/lpr.c
new file mode 100644
index 0000000..f8d42f3
--- /dev/null
+++ b/i386/i386at/lpr.c
@@ -0,0 +1,285 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Parallel port printer driver v1.0
+ * All rights reserved.
+ */
+
+#if NLPR > 0
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <kern/printf.h>
+#include <kern/mach_clock.h>
+#include <device/conf.h>
+#include <device/device_types.h>
+#include <device/tty.h>
+#include <device/io_req.h>
+
+#include <i386/ipl.h>
+#include <i386/pio.h>
+#include <chips/busses.h>
+#include <i386at/autoconf.h>
+#include <i386at/lpr.h>
+
+/*
+ * Driver information for auto-configuration stuff.
+ */
+
+struct bus_device *lprinfo[NLPR]; /* ??? */
+
+static vm_offset_t lpr_std[NLPR] = { 0 };
+static struct bus_device *lpr_info[NLPR];
+struct bus_driver lprdriver = {
+ lprprobe, 0, lprattach, 0, lpr_std, "lpr", lpr_info, 0, 0, 0};
+
+struct tty lpr_tty[NLPR];
+
+int lpr_alive[NLPR];
+
+int
+lprprobe(vm_offset_t port, struct bus_ctlr *dev)
+{
+ u_short addr = (u_short) dev->address;
+ int unit = dev->unit;
+ int ret;
+
+ if ((unit < 0) || (unit >= NLPR)) {
+ printf("com %d out of range\n", unit);
+ return(0);
+ }
+
+ outb(INTR_ENAB(addr),0x07);
+ outb(DATA(addr),0xaa);
+ ret = inb(DATA(addr)) == 0xaa;
+ if (ret) {
+ if (lpr_alive[unit]) {
+ printf("lpr: Multiple alive entries for unit %d.\n", unit);
+ printf("lpr: Ignoring entry with address = %x .\n", addr);
+ ret = 0;
+ } else
+ lpr_alive[unit]++;
+ }
+ return(ret);
+}
+
+void lprattach(struct bus_device *dev)
+{
+ u_char unit = dev->unit;
+ u_short addr = (u_short) dev->address;
+
+ if (unit >= NLPR) {
+ printf(", disabled by NLPR configuration\n");
+ return;
+ }
+
+ take_dev_irq(dev);
+ printf(", port = %zx, spl = %zd, pic = %d.",
+ dev->address, dev->sysdep, dev->sysdep1);
+ lprinfo[unit] = dev;
+
+ outb(INTR_ENAB(addr), inb(INTR_ENAB(addr)) & 0x0f);
+
+ return;
+}
+
+int
+lpropen(dev_t dev, int flag, io_req_t ior)
+{
+ int unit = minor(dev);
+ struct bus_device *isai;
+ struct tty *tp;
+ u_short addr;
+
+ if (unit >= NLPR)
+ return D_NO_SUCH_DEVICE;
+
+ isai = lprinfo[unit];
+ if (isai == NULL || !isai->alive)
+ return D_NO_SUCH_DEVICE;
+
+ tp = &lpr_tty[unit];
+ addr = (u_short) isai->address;
+ tp->t_dev = dev;
+ tp->t_addr = (void*) (natural_t) addr;
+ tp->t_oproc = lprstart;
+ tp->t_state |= TS_WOPEN;
+ tp->t_stop = lprstop;
+ tp->t_getstat = lprgetstat;
+ tp->t_setstat = lprsetstat;
+ if ((tp->t_state & TS_ISOPEN) == 0)
+ ttychars(tp);
+ outb(INTR_ENAB(addr), inb(INTR_ENAB(addr)) | 0x10);
+ tp->t_state |= TS_CARR_ON;
+ return (char_open(dev, tp, flag, ior));
+}
+
+void
+lprclose(dev_t dev, int flag)
+{
+int unit = minor(dev);
+struct tty *tp = &lpr_tty[unit];
+u_short addr = (u_short) lprinfo[unit]->address;
+
+ ttyclose(tp);
+ if (tp->t_state&TS_HUPCLS || (tp->t_state&TS_ISOPEN)==0) {
+ outb(INTR_ENAB(addr), inb(INTR_ENAB(addr)) & 0x0f);
+ tp->t_state &= ~TS_BUSY;
+ }
+}
+
+int
+lprread(dev_t dev, io_req_t ior)
+{
+ return char_read(&lpr_tty[minor(dev)], ior);
+}
+
+int
+lprwrite(dev_t dev, io_req_t ior)
+{
+ return char_write(&lpr_tty[minor(dev)], ior);
+}
+
+int
+lprportdeath(dev_t dev, mach_port_t port)
+{
+ return (tty_portdeath(&lpr_tty[minor(dev)], (ipc_port_t)port));
+}
+
+io_return_t
+lprgetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data, /* pointer to OUT array */
+ mach_msg_type_number_t *count /* out */
+ )
+{
+ io_return_t result = D_SUCCESS;
+ int unit = minor(dev);
+
+ switch (flavor) {
+ default:
+ result = tty_get_status(&lpr_tty[unit], flavor, data, count);
+ break;
+ }
+ return (result);
+}
+
+io_return_t
+lprsetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count)
+{
+ io_return_t result = D_SUCCESS;
+ int unit = minor(dev);
+
+ switch (flavor) {
+ default:
+ result = tty_set_status(&lpr_tty[unit], flavor, data, count);
+/* if (result == D_SUCCESS && flavor == TTY_STATUS)
+ lprparam(unit);
+*/ return (result);
+ }
+ return (D_SUCCESS);
+}
+
+void lprintr(int unit)
+{
+ struct tty *tp = &lpr_tty[unit];
+
+ if ((tp->t_state & TS_ISOPEN) == 0)
+ return;
+
+ tp->t_state &= ~TS_BUSY;
+ if (tp->t_state&TS_FLUSH)
+ tp->t_state &=~TS_FLUSH;
+ tt_write_wakeup(tp);
+ lprstart(tp);
+}
+
+void lprstart(struct tty *tp)
+{
+ spl_t s = spltty();
+ u_short addr = (natural_t) tp->t_addr;
+ int status = inb(STATUS(addr));
+ int nch;
+
+ if (tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) {
+ splx(s);
+ return;
+ }
+
+ if (status & 0x20) {
+ printf("Printer out of paper!\n");
+ splx(s);
+ return;
+ }
+
+ if (tp->t_outq.c_cc <= TTLOWAT(tp)) {
+ tt_write_wakeup(tp);
+ }
+ if (tp->t_outq.c_cc == 0) {
+ splx(s);
+ return;
+ }
+ nch = getc(&tp->t_outq);
+ if (nch == -1) {
+ splx(s);
+ return;
+ }
+ if ((tp->t_flags & LITOUT) == 0 && (nch & 0200)) {
+ timeout((timer_func_t *)ttrstrt, (char *)tp, (nch & 0x7f) + 6);
+ tp->t_state |= TS_TIMEOUT;
+ return;
+ }
+ outb(DATA(addr), nch);
+ outb(INTR_ENAB(addr),inb(INTR_ENAB(addr)) | 0x01);
+ outb(INTR_ENAB(addr),inb(INTR_ENAB(addr)) & 0x1e);
+ tp->t_state |= TS_BUSY;
+ splx(s);
+ return;
+}
+
+void
+lprstop(
+ struct tty *tp,
+ int flags)
+{
+ if ((tp->t_state & TS_BUSY) && (tp->t_state & TS_TTSTOP) == 0)
+ tp->t_state |= TS_FLUSH;
+}
+
+void
+lprpr_addr(unsigned short addr)
+{
+ printf("DATA(%x) %x, STATUS(%x) %x, INTR_ENAB(%x) %x\n",
+ DATA(addr), inb(DATA(addr)),
+ STATUS(addr), inb(STATUS(addr)),
+ INTR_ENAB(addr), inb(INTR_ENAB(addr)));
+}
+#endif /* NLPR */
diff --git a/i386/i386at/lpr.h b/i386/i386at/lpr.h
new file mode 100644
index 0000000..cab3016
--- /dev/null
+++ b/i386/i386at/lpr.h
@@ -0,0 +1,66 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Parallel port printer driver v1.0
+ * All rights reserved.
+ */
+
+#ifndef _LPRREG_H_
+#define _LPRREG_H_
+
+#define DATA(addr) (addr + 0)
+#define STATUS(addr) (addr + 1)
+#define INTR_ENAB(addr) (addr + 2)
+
+extern void lprintr(int unit);
+int lprprobe(vm_offset_t port, struct bus_ctlr *dev);
+void lprstop(struct tty *tp, int flags);
+void lprstart(struct tty *tp);
+void lprattach(struct bus_device *dev);
+
+extern io_return_t
+lprgetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t *count);
+
+extern io_return_t
+lprsetstat(
+ dev_t dev,
+ dev_flavor_t flavor,
+ dev_status_t data,
+ mach_msg_type_number_t count);
+
+void lprpr_addr(unsigned short addr);
+
+extern int lpropen(dev_t dev, int flag, io_req_t ior);
+extern void lprclose(dev_t dev, int flag);
+extern int lprread(dev_t dev, io_req_t ior);
+extern int lprwrite(dev_t dev, io_req_t ior);
+extern int lprportdeath(dev_t dev, mach_port_t port);
+
+#endif /* _LPRREG_H_ */
diff --git a/i386/i386at/mem.c b/i386/i386at/mem.c
new file mode 100644
index 0000000..f46fc03
--- /dev/null
+++ b/i386/i386at/mem.c
@@ -0,0 +1,42 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <device/io_req.h>
+#include <i386/model_dep.h>
+#include <i386at/biosmem.h>
+#include <i386at/mem.h>
+
+/* This provides access to any memory that is not main RAM */
+
+/*ARGSUSED*/
+vm_offset_t
+memmmap(dev_t dev, vm_offset_t off, vm_prot_t prot)
+{
+ if (biosmem_addr_available(off))
+ return -1;
+
+ return i386_btop(off);
+}
diff --git a/i386/i386at/mem.h b/i386/i386at/mem.h
new file mode 100644
index 0000000..a5b4aef
--- /dev/null
+++ b/i386/i386at/mem.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _MEM_H_
+#define _MEM_H_
+
+extern vm_offset_t memmmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
+
+#endif /* _MEM_H_ */
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
new file mode 100644
index 0000000..edb5b48
--- /dev/null
+++ b/i386/i386at/model_dep.c
@@ -0,0 +1,674 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: model_dep.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Basic initialization for I386 - ISA bus machines.
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <device/cons.h>
+
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <mach/machine.h>
+#include <mach/machine/multiboot.h>
+#include <mach/xen.h>
+
+#include <kern/assert.h>
+#include <kern/cpu_number.h>
+#include <kern/debug.h>
+#include <kern/mach_clock.h>
+#include <kern/macros.h>
+#include <kern/printf.h>
+#include <kern/startup.h>
+#include <kern/smp.h>
+#include <sys/types.h>
+#include <vm/vm_page.h>
+#include <i386/fpu.h>
+#include <i386/gdt.h>
+#include <i386/ktss.h>
+#include <i386/ldt.h>
+#include <i386/machspl.h>
+#include <i386/mp_desc.h>
+#include <i386/pit.h>
+#include <i386/pmap.h>
+#include <i386/proc_reg.h>
+#include <i386/vm_param.h>
+#include <i386/locore.h>
+#include <i386/model_dep.h>
+#include <i386/smp.h>
+#include <i386/seg.h>
+#include <i386at/acpi_parse_apic.h>
+#include <i386at/autoconf.h>
+#include <i386at/biosmem.h>
+#include <i386at/elf.h>
+#include <i386at/idt.h>
+#include <i386at/int_init.h>
+#include <i386at/kd.h>
+#include <i386at/rtc.h>
+#include <i386at/model_dep.h>
+#include <machine/irq.h>
+
+#ifdef MACH_XEN
+#include <xen/console.h>
+#include <xen/store.h>
+#include <xen/evt.h>
+#include <xen/xen.h>
+#endif /* MACH_XEN */
+
+#if ENABLE_IMMEDIATE_CONSOLE
+#include "immc.h"
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
+
+/* Location of the kernel's symbol table.
+ Both of these are 0 if none is available. */
+#if MACH_KDB
+#include <ddb/db_sym.h>
+#include <i386/db_interface.h>
+
+/* ELF section header */
+static unsigned elf_shdr_num;
+static vm_size_t elf_shdr_size;
+static vm_offset_t elf_shdr_addr;
+static unsigned elf_shdr_shndx;
+
+#endif /* MACH_KDB */
+
+#define RESERVED_BIOS 0x10000
+
+/* A copy of the multiboot info structure passed by the boot loader. */
+#ifdef MACH_XEN
+struct start_info boot_info;
+#ifdef MACH_PSEUDO_PHYS
+unsigned long *mfn_list;
+#if VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+unsigned long *pfn_list = (void*) PFN_LIST;
+#endif
+#endif /* MACH_PSEUDO_PHYS */
+#if VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+unsigned long la_shift = VM_MIN_KERNEL_ADDRESS;
+#endif
+#else /* MACH_XEN */
+struct multiboot_raw_info boot_info;
+#endif /* MACH_XEN */
+
+/* Command line supplied to kernel. */
+char *kernel_cmdline = "";
+
+extern char version[];
+
+/* Realmode temporary GDT */
+extern struct pseudo_descriptor gdt_descr_tmp;
+
+/* Realmode relocated jmp */
+extern uint32_t apboot_jmp_offset;
+
+/* If set, reboot the system on ctrl-alt-delete. */
+boolean_t rebootflag = FALSE; /* exported to kdintr */
+
+#ifdef LINUX_DEV
+extern void linux_init(void);
+#endif
+
+/*
+ * Find devices. The system is alive.
+ */
+void machine_init(void)
+{
+ /*
+ * Make more free memory.
+ *
+ * This is particularly important for the Linux drivers which
+ * require available DMA memory.
+ */
+ biosmem_free_usable();
+
+ /*
+ * Set up to use floating point.
+ */
+ init_fpu();
+
+#ifdef MACH_HYP
+ hyp_init();
+#else /* MACH_HYP */
+#if defined(APIC)
+ int err;
+
+ err = acpi_apic_init();
+ if (err) {
+ printf("acpi_apic_init failed with %d\n", err);
+ for (;;);
+ }
+#endif
+#if (NCPUS > 1)
+ smp_init();
+#endif
+#if defined(APIC)
+ ioapic_configure();
+#endif
+ clkstart();
+
+ /*
+ * Initialize the console.
+ */
+ cninit();
+
+#ifdef LINUX_DEV
+ /*
+ * Initialize Linux drivers.
+ */
+ linux_init();
+#endif
+ /*
+ * Find the devices
+ */
+ probeio();
+#endif /* MACH_HYP */
+
+ /*
+ * Get the time
+ */
+ inittodr();
+
+#ifndef MACH_HYP
+ /*
+ * Tell the BIOS not to clear and test memory.
+ */
+ *(unsigned short *)phystokv(0x472) = 0x1234;
+#endif /* MACH_HYP */
+
+#if VM_MIN_KERNEL_ADDRESS == 0
+ /*
+ * Unmap page 0 to trap NULL references.
+ *
+ * Note that this breaks accessing some BIOS areas stored there.
+ */
+ pmap_unmap_page_zero();
+#endif
+
+#if NCPUS > 1
+ /*
+ * Patch the realmode gdt with the correct offset and the first jmp to
+ * protected mode with the correct target.
+ */
+ gdt_descr_tmp.linear_base += apboot_addr;
+ apboot_jmp_offset += apboot_addr;
+
+ /*
+ * Initialize the HPET
+ */
+ hpet_init();
+#endif
+}
+
+/* Conserve power on processor CPU. */
+void machine_idle (int cpu)
+{
+#ifdef MACH_HYP
+ hyp_idle();
+#else /* MACH_HYP */
+ assert (cpu == cpu_number ());
+ asm volatile ("hlt" : : : "memory");
+#endif /* MACH_HYP */
+}
+
+void machine_relax (void)
+{
+ asm volatile ("rep; nop" : : : "memory");
+}
+
+/*
+ * Halt a cpu.
+ */
+void halt_cpu(void)
+{
+#ifdef MACH_HYP
+ hyp_halt();
+#else /* MACH_HYP */
+ asm volatile("cli");
+ while (TRUE)
+ machine_idle (cpu_number ());
+#endif /* MACH_HYP */
+}
+
+/*
+ * Halt the system or reboot.
+ */
+void halt_all_cpus(boolean_t reboot)
+{
+ if (reboot) {
+#ifdef MACH_HYP
+ hyp_reboot();
+#endif /* MACH_HYP */
+ kdreboot();
+ }
+ else {
+ rebootflag = TRUE;
+#ifdef MACH_HYP
+ hyp_halt();
+#endif /* MACH_HYP */
+ printf("Shutdown completed successfully, now in tight loop.\n");
+ printf("You can safely power off the system or hit ctl-alt-del to reboot\n");
+ (void) spl0();
+ }
+ while (TRUE)
+ machine_idle (cpu_number ());
+}
+
+void db_halt_cpu(void)
+{
+ halt_all_cpus(0);
+}
+
+void db_reset_cpu(void)
+{
+ halt_all_cpus(1);
+}
+
+#ifndef MACH_HYP
+
+static void
+register_boot_data(const struct multiboot_raw_info *mbi)
+{
+ struct multiboot_raw_module *mod;
+ struct elf_shdr *shdr;
+ unsigned long tmp;
+ unsigned int i;
+
+ extern char _start[], _end[];
+
+ biosmem_register_boot_data(_kvtophys(&_start), _kvtophys(&_end), FALSE);
+
+ /* cmdline and modules are moved to a safe place by i386at_init. */
+
+ if ((mbi->flags & MULTIBOOT_LOADER_CMDLINE) && (mbi->cmdline != 0)) {
+ biosmem_register_boot_data(mbi->cmdline,
+ mbi->cmdline
+ + strlen((void *)phystokv(mbi->cmdline)) + 1, TRUE);
+ }
+
+ if (mbi->flags & MULTIBOOT_LOADER_MODULES && mbi->mods_count) {
+ i = mbi->mods_count * sizeof(struct multiboot_raw_module);
+ biosmem_register_boot_data(mbi->mods_addr, mbi->mods_addr + i, TRUE);
+
+ tmp = phystokv(mbi->mods_addr);
+
+ for (i = 0; i < mbi->mods_count; i++) {
+ mod = (struct multiboot_raw_module *)tmp + i;
+ if (mod->mod_end != mod->mod_start)
+ biosmem_register_boot_data(mod->mod_start, mod->mod_end, TRUE);
+
+ if (mod->string != 0) {
+ biosmem_register_boot_data(mod->string,
+ mod->string
+ + strlen((void *)phystokv(mod->string)) + 1,
+ TRUE);
+ }
+ }
+ }
+
+ if (mbi->flags & MULTIBOOT_LOADER_SHDR) {
+ tmp = mbi->shdr_num * mbi->shdr_size;
+ if (tmp != 0)
+ biosmem_register_boot_data(mbi->shdr_addr, mbi->shdr_addr + tmp, FALSE);
+
+ tmp = phystokv(mbi->shdr_addr);
+
+ for (i = 0; i < mbi->shdr_num; i++) {
+ shdr = (struct elf_shdr *)(tmp + (i * mbi->shdr_size));
+
+ if ((shdr->type != ELF_SHT_SYMTAB)
+ && (shdr->type != ELF_SHT_STRTAB))
+ continue;
+
+ if (shdr->size != 0)
+ biosmem_register_boot_data(shdr->addr, shdr->addr + shdr->size, FALSE);
+ }
+ }
+}
+
+#endif /* MACH_HYP */
+
+/*
+ * Basic PC VM initialization.
+ * Turns on paging and changes the kernel segments to use high linear addresses.
+ */
+static void
+i386at_init(void)
+{
+ /*
+ * Initialize the PIC prior to any possible call to an spl.
+ */
+#ifndef MACH_HYP
+# ifdef APIC
+ picdisable();
+# else
+ picinit();
+# endif
+#else /* MACH_HYP */
+ hyp_intrinit();
+#endif /* MACH_HYP */
+ spl_init = 1;
+
+ /*
+ * Read memory map and load it into the physical page allocator.
+ */
+#ifdef MACH_HYP
+ biosmem_xen_bootstrap();
+#else /* MACH_HYP */
+ register_boot_data((struct multiboot_raw_info *) &boot_info);
+ biosmem_bootstrap((struct multiboot_raw_info *) &boot_info);
+#endif /* MACH_HYP */
+
+#ifdef MACH_XEN
+ kernel_cmdline = (char*) boot_info.cmd_line;
+#else /* MACH_XEN */
+ vm_offset_t addr;
+
+ /* Copy content pointed by boot_info before losing access to it when it
+ * is too far in physical memory.
+ * Also avoids leaving them in precious areas such as DMA memory. */
+ if (boot_info.flags & MULTIBOOT_CMDLINE) {
+ int len = strlen ((char*)phystokv(boot_info.cmdline)) + 1;
+ if (! init_alloc_aligned(round_page(len), &addr))
+ panic("could not allocate memory for multiboot command line");
+ kernel_cmdline = (char*) phystokv(addr);
+ memcpy(kernel_cmdline, (void *)phystokv(boot_info.cmdline), len);
+ boot_info.cmdline = addr;
+ }
+
+ if (boot_info.flags & MULTIBOOT_MODS && boot_info.mods_count) {
+ struct multiboot_raw_module *m;
+ int i;
+
+ if (! init_alloc_aligned(
+ round_page(boot_info.mods_count * sizeof(*m)), &addr))
+ panic("could not allocate memory for multiboot modules");
+ m = (void*) phystokv(addr);
+ memcpy(m, (void*) phystokv(boot_info.mods_addr), boot_info.mods_count * sizeof(*m));
+ boot_info.mods_addr = addr;
+
+ for (i = 0; i < boot_info.mods_count; i++) {
+ vm_size_t size = m[i].mod_end - m[i].mod_start;
+ if (! init_alloc_aligned(round_page(size), &addr))
+ panic("could not allocate memory for multiboot "
+ "module %d", i);
+ memcpy((void*) phystokv(addr), (void*) phystokv(m[i].mod_start), size);
+ m[i].mod_start = addr;
+ m[i].mod_end = addr + size;
+
+ size = strlen((char*) phystokv(m[i].string)) + 1;
+ if (! init_alloc_aligned(round_page(size), &addr))
+ panic("could not allocate memory for multiboot "
+ "module command line %d", i);
+ memcpy((void*) phystokv(addr), (void*) phystokv(m[i].string), size);
+ m[i].string = addr;
+ }
+ }
+#endif /* MACH_XEN */
+
+ /*
+ * Initialize kernel physical map, mapping the
+ * region from loadpt to avail_start.
+ * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS.
+ * XXX make the BIOS page (page 0) read-only.
+ */
+ pmap_bootstrap();
+
+ /*
+ * Load physical segments into the VM system.
+ * The early allocation functions become unusable after
+ * this point.
+ */
+ biosmem_setup();
+
+ pmap_make_temporary_mapping();
+
+#ifndef MACH_HYP
+ /* Turn paging on.
+ * Also set the WP bit so that on 486 or better processors
+ * page-level write protection works in kernel mode.
+ */
+ set_cr0(get_cr0() | CR0_PG | CR0_WP);
+ set_cr0(get_cr0() & ~(CR0_CD | CR0_NW));
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ set_cr4(get_cr4() | CR4_PGE);
+#endif /* MACH_HYP */
+ flush_instr_queue();
+#ifdef MACH_PV_PAGETABLES
+ pmap_clear_bootstrap_pagetable((void *)boot_info.pt_base);
+#endif /* MACH_PV_PAGETABLES */
+
+ /*
+ * Initialize and activate the real i386 protected-mode structures.
+ */
+ gdt_init();
+ idt_init();
+#ifndef MACH_HYP
+ int_init();
+#endif /* MACH_HYP */
+ ldt_init();
+ ktss_init();
+
+#ifndef MACH_XEN
+ init_percpu(0);
+#endif
+#if NCPUS > 1
+ /* Initialize SMP structures in the master processor */
+ mp_desc_init(0);
+#endif // NCPUS
+
+ pmap_remove_temporary_mapping();
+
+#ifdef MACH_XEN
+ hyp_p2m_init();
+#endif /* MACH_XEN */
+
+ interrupt_stack_alloc();
+}
+
+/*
+ * C boot entrypoint - called by boot_entry in boothdr.S.
+ * Running in flat mode, but without paging yet.
+ */
+void c_boot_entry(vm_offset_t bi)
+{
+#if ENABLE_IMMEDIATE_CONSOLE
+ romputc = immc_romputc;
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
+
+ /* Stash the boot_image_info pointer. */
+ boot_info = *(typeof(boot_info)*)phystokv(bi);
+ int cpu_type;
+
+ /* Before we do _anything_ else, print the hello message.
+ If there are no initialized console devices yet,
+ it will be stored and printed at the first opportunity. */
+ printf("%s", version);
+ printf("\n");
+
+#ifdef MACH_XEN
+ printf("Running on %s.\n", boot_info.magic);
+ if (boot_info.flags & SIF_PRIVILEGED)
+ panic("Mach can't run as dom0.");
+#ifdef MACH_PSEUDO_PHYS
+ mfn_list = (void*)boot_info.mfn_list;
+#endif
+#else /* MACH_XEN */
+
+#if MACH_KDB
+ /*
+ * Locate the kernel's symbol table, if the boot loader provided it.
+ * We need to do this before i386at_init()
+ * so that the symbol table's memory won't be stomped on.
+ */
+ if ((boot_info.flags & MULTIBOOT_ELF_SHDR)
+ && boot_info.shdr_num)
+ {
+ elf_shdr_num = boot_info.shdr_num;
+ elf_shdr_size = boot_info.shdr_size;
+ elf_shdr_addr = (vm_offset_t)phystokv(boot_info.shdr_addr);
+ elf_shdr_shndx = boot_info.shdr_strndx;
+
+ printf("ELF section header table at %08" PRIxPTR "\n", elf_shdr_addr);
+ }
+#endif /* MACH_KDB */
+#endif /* MACH_XEN */
+
+ cpu_type = discover_x86_cpu_type ();
+
+ /*
+ * Do basic VM initialization
+ */
+ i386at_init();
+
+#if MACH_KDB
+ /*
+ * Initialize the kernel debugger's kernel symbol table.
+ */
+ if (elf_shdr_num)
+ {
+ elf_db_sym_init(elf_shdr_num,elf_shdr_size,
+ elf_shdr_addr, elf_shdr_shndx,
+ "mach", NULL);
+ }
+#endif /* MACH_KDB */
+
+ machine_slot[0].is_cpu = TRUE;
+ machine_slot[0].cpu_subtype = CPU_SUBTYPE_AT386;
+
+#if defined(__x86_64__) && !defined(USER32)
+ machine_slot[0].cpu_type = CPU_TYPE_X86_64;
+#else
+ switch (cpu_type)
+ {
+ default:
+ printf("warning: unknown cpu type %d, assuming i386\n", cpu_type);
+ case 3:
+ machine_slot[0].cpu_type = CPU_TYPE_I386;
+ break;
+ case 4:
+ machine_slot[0].cpu_type = CPU_TYPE_I486;
+ break;
+ case 5:
+ machine_slot[0].cpu_type = CPU_TYPE_PENTIUM;
+ break;
+ case 6:
+ case 15:
+ machine_slot[0].cpu_type = CPU_TYPE_PENTIUMPRO;
+ break;
+ }
+#endif
+
+ /*
+ * Start the system.
+ */
+ setup_main();
+
+}
+
+#include <mach/vm_prot.h>
+#include <vm/pmap.h>
+#include <mach/time_value.h>
+
+vm_offset_t
+timemmap(dev_t dev, vm_offset_t off, vm_prot_t prot)
+{
+ extern time_value_t *mtime;
+
+ if (prot & VM_PROT_WRITE) return (-1);
+
+ return (i386_btop(pmap_extract(pmap_kernel(), (vm_offset_t) mtime)));
+}
+
+void
+startrtclock(void)
+{
+#ifdef APIC
+ unmask_irq(timer_pin);
+ calibrate_lapic_timer();
+ if (cpu_number() != 0) {
+ lapic_enable_timer();
+ }
+#else
+ clkstart();
+#ifndef MACH_HYP
+ unmask_irq(0);
+#endif
+#endif
+}
+
+void
+inittodr(void)
+{
+ time_value64_t new_time;
+ uint64_t newsecs;
+
+ (void) readtodc(&newsecs);
+ new_time.seconds = newsecs;
+ new_time.nanoseconds = 0;
+
+ {
+ spl_t s = splhigh();
+ time = new_time;
+ splx(s);
+ }
+}
+
+void
+resettodr(void)
+{
+ writetodc();
+}
+
+boolean_t
+init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
+{
+ *addrp = biosmem_bootalloc(vm_page_atop(vm_page_round(size)));
+
+ if (*addrp == 0)
+ return FALSE;
+
+ return TRUE;
+}
+
+/* Grab a physical page:
+ the standard memory allocation mechanism
+ during system initialization. */
+vm_offset_t
+pmap_grab_page(void)
+{
+ vm_offset_t addr;
+ if (!init_alloc_aligned(PAGE_SIZE, &addr))
+ panic("Not enough memory to initialize Mach");
+ return addr;
+}
diff --git a/i386/i386at/model_dep.h b/i386/i386at/model_dep.h
new file mode 100644
index 0000000..3d5b664
--- /dev/null
+++ b/i386/i386at/model_dep.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _MODEL_DEP_H_
+#define _MODEL_DEP_H_
+
+#include <i386/vm_param.h>
+#include <mach/vm_prot.h>
+
+/*
+ * Interrupt stack.
+ */
+extern vm_offset_t int_stack_top[NCPUS], int_stack_base[NCPUS];
+
+/* Check whether P points to the per-cpu interrupt stack. */
+#define ON_INT_STACK(P, CPU) (((P) & ~(INTSTACK_SIZE-1)) == int_stack_base[CPU])
+
+extern vm_offset_t timemmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
+
+void inittodr(void);
+
+boolean_t init_alloc_aligned(vm_size_t size, vm_offset_t *addrp);
+
+#endif /* _MODEL_DEP_H_ */
diff --git a/i386/i386at/pic_isa.c b/i386/i386at/pic_isa.c
new file mode 100644
index 0000000..1e5ac10
--- /dev/null
+++ b/i386/i386at/pic_isa.c
@@ -0,0 +1,56 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <sys/types.h>
+#include <i386/ipl.h>
+#include <i386/pic.h>
+#include <i386/fpu.h>
+#include <i386/hardclock.h>
+#include <i386at/kd.h>
+
+/* These interrupts are always present */
+
+interrupt_handler_fn ivect[NINTR] = {
+ /* 00 */ (interrupt_handler_fn)hardclock, /* always */
+ /* 01 */ kdintr, /* kdintr, ... */
+ /* 02 */ intnull,
+ /* 03 */ intnull, /* lnpoll, comintr, ... */
+
+ /* 04 */ intnull, /* comintr, ... */
+ /* 05 */ intnull, /* comintr, wtintr, ... */
+ /* 06 */ intnull, /* fdintr, ... */
+ /* 07 */ intnull, /* qdintr, ... */
+
+ /* 08 */ intnull,
+ /* 09 */ intnull, /* ether */
+ /* 10 */ intnull,
+ /* 11 */ intnull,
+
+ /* 12 */ intnull,
+ /* 13 */ fpintr, /* always */
+ /* 14 */ intnull, /* hdintr, ... */
+ /* 15 */ intnull, /* ??? */
+};
diff --git a/i386/i386at/rtc.c b/i386/i386at/rtc.c
new file mode 100644
index 0000000..1930beb
--- /dev/null
+++ b/i386/i386at/rtc.c
@@ -0,0 +1,242 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <sys/types.h>
+#include <kern/mach_clock.h>
+#include <kern/printf.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+#include <i386at/rtc.h>
+
+/* time of day stored in RTC are currently between 1970 and 2070. Update that
+ * before 2070 please. */
+#define CENTURY_START 1970
+
+static boolean_t first_rtcopen_ever = TRUE;
+
+static void
+rtcinit(void)
+{
+ outb(RTC_ADDR, RTC_A);
+ outb(RTC_DATA, RTC_DIV2 | RTC_RATE6);
+ outb(RTC_ADDR, RTC_B);
+ outb(RTC_DATA, RTC_HM);
+}
+
+
+static int
+rtcget(struct rtc_st *st)
+{
+ unsigned char *regs = (unsigned char *)st;
+ if (first_rtcopen_ever) {
+ rtcinit();
+ first_rtcopen_ever = FALSE;
+ }
+ outb(RTC_ADDR, RTC_D);
+ if ((inb(RTC_DATA) & RTC_VRT) == 0) return(-1);
+ outb(RTC_ADDR, RTC_A);
+ while (inb(RTC_DATA) & RTC_UIP) /* busy wait */
+ outb(RTC_ADDR, RTC_A);
+ load_rtc(regs);
+ return(0);
+}
+
+static void
+rtcput(struct rtc_st *st)
+{
+ unsigned char *regs = (unsigned char *)st;
+ unsigned char x;
+
+ if (first_rtcopen_ever) {
+ rtcinit();
+ first_rtcopen_ever = FALSE;
+ }
+ outb(RTC_ADDR, RTC_B);
+ x = inb(RTC_DATA);
+ outb(RTC_ADDR, RTC_B);
+ outb(RTC_DATA, x | RTC_SET);
+ save_rtc(regs);
+ outb(RTC_ADDR, RTC_B);
+ outb(RTC_DATA, x & ~RTC_SET);
+}
+
+
+static int month[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+
+static int
+yeartoday(int year)
+{
+ if (year%4)
+ /* Not divisible by 4, not bissextile */
+ return 365;
+
+ /* Divisible by 4 */
+ if (year % 100)
+ /* Not divisible by 100, bissextile */
+ return 366;
+
+ /* Divisible by 100 */
+ if (year % 400)
+ /* Not divisible by 400, not bissextile */
+ return 365;
+
+ /* Divisible by 400 */
+ /* Rules for 2000 and further have not been officially decided yet.
+ * 2000 was made bissextile. */
+ return 366;
+}
+
+static int
+hexdectodec(char n)
+{
+ return(((n>>4)&0x0F)*10 + (n&0x0F));
+}
+
+static char
+dectohexdec(int n)
+{
+ return((char)(((n/10)<<4)&0xF0) | ((n%10)&0x0F));
+}
+
+int
+readtodc(uint64_t *tp)
+{
+ struct rtc_st rtclk;
+ time_t n;
+ int sec, min, hr, dom, mon, yr;
+ int i, days = 0;
+ spl_t ospl;
+
+ ospl = splclock();
+ if (rtcget(&rtclk)) {
+ splx(ospl);
+ return(-1);
+ }
+ splx (ospl);
+
+ sec = hexdectodec(rtclk.rtc_sec);
+ min = hexdectodec(rtclk.rtc_min);
+ hr = hexdectodec(rtclk.rtc_hr);
+ dom = hexdectodec(rtclk.rtc_dom);
+ mon = hexdectodec(rtclk.rtc_mon);
+ yr = hexdectodec(rtclk.rtc_yr);
+ yr = (yr < CENTURY_START%100) ?
+ yr+CENTURY_START-CENTURY_START%100+100 :
+ yr+CENTURY_START-CENTURY_START%100;
+
+ if (yr >= CENTURY_START+90) {
+ printf("FIXME: we are approaching %u, update CENTURY_START\n", CENTURY_START);
+ }
+
+ printf("RTC time is %04u-%02u-%02u %02u:%02u:%02u\n", yr, mon, dom, hr, min, sec);
+
+ n = sec + 60 * min + 3600 * hr;
+ n += (dom - 1) * 3600 * 24;
+
+ if (yeartoday(yr) == 366)
+ month[1] = 29;
+ for (i = mon - 2; i >= 0; i--)
+ days += month[i];
+ month[1] = 28;
+ /* Epoch shall be 1970 January 1st */
+ for (i = 1970; i < yr; i++)
+ days += yeartoday(i);
+ n += days * 3600 * 24;
+
+
+ *tp = n;
+
+ return(0);
+}
+
+int
+writetodc(void)
+{
+ struct rtc_st rtclk;
+ time_t n;
+ int diff, i, j;
+ spl_t ospl;
+
+ ospl = splclock();
+ if (rtcget(&rtclk)) {
+ splx(ospl);
+ return(-1);
+ }
+ splx(ospl);
+
+ diff = 0;
+ n = (time.seconds - diff) % (3600 * 24); /* hrs+mins+secs */
+ rtclk.rtc_sec = dectohexdec(n%60);
+ n /= 60;
+ rtclk.rtc_min = dectohexdec(n%60);
+ rtclk.rtc_hr = dectohexdec(n/60);
+
+ n = (time.seconds - diff) / (3600 * 24); /* days */
+ rtclk.rtc_dow = (n + 4) % 7; /* 1/1/70 is Thursday */
+
+ /* Epoch shall be 1970 January 1st */
+ for (j = 1970, i = yeartoday(j); n >= i; j++, i = yeartoday(j))
+ n -= i;
+
+ rtclk.rtc_yr = dectohexdec(j % 100);
+
+ if (i == 366)
+ month[1] = 29;
+ for (i = 0; n >= month[i]; i++)
+ n -= month[i];
+ month[1] = 28;
+ rtclk.rtc_mon = dectohexdec(++i);
+
+ rtclk.rtc_dom = dectohexdec(++n);
+
+ ospl = splclock();
+ rtcput(&rtclk);
+ splx(ospl);
+
+ return(0);
+}
diff --git a/i386/i386at/rtc.h b/i386/i386at/rtc.h
new file mode 100644
index 0000000..5379722
--- /dev/null
+++ b/i386/i386at/rtc.h
@@ -0,0 +1,143 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _RTC_H_
+#define _RTC_H_
+
+#define RTC_ADDR 0x70 /* I/O port address for register select */
+#define RTC_DATA 0x71 /* I/O port address for data read/write */
+
+/*
+ * Register A definitions
+ */
+#define RTC_A 0x0a /* register A address */
+#define RTC_UIP 0x80 /* Update in progress bit */
+#define RTC_DIV0 0x00 /* Time base of 4.194304 MHz */
+#define RTC_DIV1 0x10 /* Time base of 1.048576 MHz */
+#define RTC_DIV2 0x20 /* Time base of 32.768 KHz */
+#define RTC_RATE6 0x06 /* interrupt rate of 976.562 */
+
+/*
+ * Register B definitions
+ */
+#define RTC_B 0x0b /* register B address */
+#define RTC_SET 0x80 /* stop updates for time set */
+#define RTC_PIE 0x40 /* Periodic interrupt enable */
+#define RTC_AIE 0x20 /* Alarm interrupt enable */
+#define RTC_UIE 0x10 /* Update ended interrupt enable */
+#define RTC_SQWE 0x08 /* Square wave enable */
+#define RTC_DM 0x04 /* Date mode, 1 = binary, 0 = BCD */
+#define RTC_HM 0x02 /* hour mode, 1 = 24 hour, 0 = 12 hour */
+#define RTC_DSE 0x01 /* Daylight savings enable */
+
+/*
+ * Register C definitions
+ */
+#define RTC_C 0x0c /* register C address */
+#define RTC_IRQF 0x80 /* IRQ flag */
+#define RTC_PF 0x40 /* PF flag bit */
+#define RTC_AF 0x20 /* AF flag bit */
+#define RTC_UF 0x10 /* UF flag bit */
+
+/*
+ * Register D definitions
+ */
+#define RTC_D 0x0d /* register D address */
+#define RTC_VRT 0x80 /* Valid RAM and time bit */
+
+#define RTC_NREG 0x0e /* number of RTC registers */
+#define RTC_NREGP 0x0a /* number of RTC registers to set time */
+
+#define RTCRTIME _IOR('c', 0x01, struct rtc_st) /* Read time from RTC */
+#define RTCSTIME _IOW('c', 0x02, struct rtc_st) /* Set time into RTC */
+
+struct rtc_st {
+ char rtc_sec;
+ char rtc_asec;
+ char rtc_min;
+ char rtc_amin;
+ char rtc_hr;
+ char rtc_ahr;
+ char rtc_dow;
+ char rtc_dom;
+ char rtc_mon;
+ char rtc_yr;
+ char rtc_statusa;
+ char rtc_statusb;
+ char rtc_statusc;
+ char rtc_statusd;
+};
+
+/*
+ * this macro reads contents of real time clock to specified buffer
+ */
+#define load_rtc(regs) \
+{\
+ int i; \
+ \
+ for (i = 0; i < RTC_NREG; i++) { \
+ outb(RTC_ADDR, i); \
+ regs[i] = inb(RTC_DATA); \
+ } \
+}
+
+/*
+ * this macro writes contents of specified buffer to real time clock
+ */
+#define save_rtc(regs) \
+{ \
+ int i; \
+ for (i = 0; i < RTC_NREGP; i++) { \
+ outb(RTC_ADDR, i); \
+ outb(RTC_DATA, regs[i]);\
+ } \
+}
+
+extern int readtodc(uint64_t *tp);
+extern int writetodc(void);
+
+#endif /* _RTC_H_ */
diff --git a/i386/include/mach/i386/asm.h b/i386/include/mach/i386/asm.h
new file mode 100644
index 0000000..8ceae8c
--- /dev/null
+++ b/i386/include/mach/i386/asm.h
@@ -0,0 +1,146 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_ASM_H_
+#define _MACH_I386_ASM_H_
+
+#ifdef __i386__
+#define S_ARG0 4(%esp)
+#define S_ARG1 8(%esp)
+#define S_ARG2 12(%esp)
+#define S_ARG3 16(%esp)
+
+#define FRAME pushl %ebp; movl %esp, %ebp
+#define EMARF leave
+
+#define B_ARG0 8(%ebp)
+#define B_ARG1 12(%ebp)
+#define B_ARG2 16(%ebp)
+#define B_ARG3 20(%ebp)
+#endif
+
+#ifdef __x86_64__
+#define S_ARG0 %rdi
+#define S_ARG1 %rsi
+#define S_ARG2 %rdx
+#define S_ARG3 %rcx
+#define S_ARG4 %r8
+#define S_ARG5 %r9
+
+#define FRAME pushq %rbp; movq %rsp, %rbp
+#define EMARF leave
+
+#define B_ARG0 S_ARG0
+#define B_ARG1 S_ARG1
+#define B_ARG2 S_ARG2
+#define B_ARG3 S_ARG3
+
+#ifdef MACH_XEN
+#define INT_FIX \
+ popq %rcx ;\
+ popq %r11
+#else
+#define INT_FIX
+#endif
+#endif
+
+#ifdef i486
+#define TEXT_ALIGN 4
+#else
+#define TEXT_ALIGN 2
+#endif
+#define DATA_ALIGN 2
+#define ALIGN TEXT_ALIGN
+
+#define P2ALIGN(p2) .p2align p2 /* gas-specific */
+
+#define LCL(x) x
+
+#define LB(x,n) n
+#ifdef __STDC__
+#ifndef __ELF__
+#define EXT(x) _ ## x
+#define LEXT(x) _ ## x ## :
+#define SEXT(x) "_"#x
+#else
+#define EXT(x) x
+#define LEXT(x) x ## :
+#define SEXT(x) #x
+#endif
+#define LCLL(x) x ## :
+#define gLB(n) n ## :
+#define LBb(x,n) n ## b
+#define LBf(x,n) n ## f
+#else /* __STDC__ */
+#error XXX elf
+#define EXT(x) _/**/x
+#define LEXT(x) _/**/x/**/:
+#define LCLL(x) x/**/:
+#define gLB(n) n/**/:
+#define LBb(x,n) n/**/b
+#define LBf(x,n) n/**/f
+#endif /* __STDC__ */
+#define SVC .byte 0x9a; .long 0; .word 0x7
+
+#define String .ascii
+#define Value .word
+#define Times(a,b) (a*b)
+#define Divide(a,b) (a/b)
+
+#define INB inb %dx, %al
+#define OUTB outb %al, %dx
+#define INL inl %dx, %eax
+#define OUTL outl %eax, %dx
+
+#define data16 .byte 0x66
+#define addr16 .byte 0x67
+
+
+
+#ifdef GPROF
+
+#define MCOUNT .data; gLB(9) .long 0; .text; lea LBb(x, 9),%edx; call mcount
+#define ENTRY(x) .globl EXT(x); .type EXT(x), @function; .p2align TEXT_ALIGN; LEXT(x) ; \
+ pushl %ebp; movl %esp, %ebp; MCOUNT; popl %ebp;
+#define ENTRY2(x,y) .globl EXT(x); .type EXT(x), @function; .globl EXT(y); .type EXT(y), @function; \
+ .p2align TEXT_ALIGN; LEXT(x) LEXT(y)
+#define ASENTRY(x) .globl x; .type x, @function; .p2align TEXT_ALIGN; gLB(x) ; \
+ pushl %ebp; movl %esp, %ebp; MCOUNT; popl %ebp;
+#define END(x) .size x,.-x
+#else /* GPROF */
+
+#define MCOUNT
+#define ENTRY(x) .globl EXT(x); .type EXT(x), @function; .p2align TEXT_ALIGN; LEXT(x)
+#define ENTRY2(x,y) .globl EXT(x); .type EXT(x), @function; .globl EXT(y); .type EXT(y), @function; \
+ .p2align TEXT_ALIGN; LEXT(x) LEXT(y)
+#define ASENTRY(x) .globl x; .type x, @function; .p2align TEXT_ALIGN; gLB(x)
+#define END(x) .size x,.-x
+#endif /* GPROF */
+
+#define Entry(x) .globl EXT(x); .type EXT(x), @function; .p2align TEXT_ALIGN; LEXT(x)
+#define DATA(x) .globl EXT(x); .p2align DATA_ALIGN; LEXT(x)
+
+#endif /* _MACH_I386_ASM_H_ */
diff --git a/i386/include/mach/i386/boolean.h b/i386/include/mach/i386/boolean.h
new file mode 100644
index 0000000..a33d007
--- /dev/null
+++ b/i386/include/mach/i386/boolean.h
@@ -0,0 +1,37 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: boolean.h
+ *
+ * Boolean type, for I386.
+ */
+
+#ifndef _MACH_I386_BOOLEAN_H_
+#define _MACH_I386_BOOLEAN_H_
+
+typedef int boolean_t;
+
+#endif /* _MACH_I386_BOOLEAN_H_ */
diff --git a/i386/include/mach/i386/eflags.h b/i386/include/mach/i386/eflags.h
new file mode 100644
index 0000000..336a73a
--- /dev/null
+++ b/i386/include/mach/i386/eflags.h
@@ -0,0 +1,53 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_EFLAGS_H_
+#define _MACH_I386_EFLAGS_H_
+
+/*
+ * i386 flags register
+ */
+#define EFL_CF 0x00000001 /* carry */
+#define EFL_PF 0x00000004 /* parity of low 8 bits */
+#define EFL_AF 0x00000010 /* carry out of bit 3 */
+#define EFL_ZF 0x00000040 /* zero */
+#define EFL_SF 0x00000080 /* sign */
+#define EFL_TF 0x00000100 /* trace trap */
+#define EFL_IF 0x00000200 /* interrupt enable */
+#define EFL_DF 0x00000400 /* direction */
+#define EFL_OF 0x00000800 /* overflow */
+#define EFL_IOPL 0x00003000 /* IO privilege level: */
+#define EFL_IOPL_KERNEL 0x00000000 /* kernel */
+#define EFL_IOPL_USER 0x00003000 /* user */
+#define EFL_NT 0x00004000 /* nested task */
+#define EFL_RF 0x00010000 /* resume without tracing */
+#define EFL_VM 0x00020000 /* virtual 8086 mode */
+#define EFL_AC 0x00040000 /* alignment check */
+#define EFL_VI 0x00080000 /* virtual interrupt */
+#define EFL_VIP 0x00100000 /* virtual interrupt pending */
+#define EFL_ID 0x00200000 /* cpuid available */
+
+#endif /* _MACH_I386_EFLAGS_H_ */
diff --git a/i386/include/mach/i386/exception.h b/i386/include/mach/i386/exception.h
new file mode 100644
index 0000000..1aaf6c7
--- /dev/null
+++ b/i386/include/mach/i386/exception.h
@@ -0,0 +1,85 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Codes and subcodes for 80386 exceptions.
+ */
+
+/*
+ * EXC_BAD_INSTRUCTION
+ */
+
+#ifndef _MACH_I386_EXCEPTION_H_
+#define _MACH_I386_EXCEPTION_H_
+
+#define EXC_I386_INVOP 1
+
+/*
+ * EXC_ARITHMETIC
+ */
+
+#define EXC_I386_DIV 1
+#define EXC_I386_INTO 2
+#define EXC_I386_NOEXT 3
+#define EXC_I386_EXTOVR 4
+#define EXC_I386_EXTERR 5
+#define EXC_I386_EMERR 6
+#define EXC_I386_BOUND 7
+
+/*
+ * EXC_SOFTWARE
+ */
+
+/*
+ * EXC_BAD_ACCESS
+ */
+
+/*
+ * EXC_BREAKPOINT
+ */
+
+#define EXC_I386_SGL 1
+#define EXC_I386_BPT 2
+
+#define EXC_I386_DIVERR 0 /* divide by 0 eprror */
+#define EXC_I386_SGLSTP 1 /* single step */
+#define EXC_I386_NMIFLT 2 /* NMI */
+#define EXC_I386_BPTFLT 3 /* breakpoint fault */
+#define EXC_I386_INTOFLT 4 /* INTO overflow fault */
+#define EXC_I386_BOUNDFLT 5 /* BOUND instruction fault */
+#define EXC_I386_INVOPFLT 6 /* invalid opcode fault */
+#define EXC_I386_NOEXTFLT 7 /* extension not available fault*/
+#define EXC_I386_DBLFLT 8 /* double fault */
+#define EXC_I386_EXTOVRFLT 9 /* extension overrun fault */
+#define EXC_I386_INVTSSFLT 10 /* invalid TSS fault */
+#define EXC_I386_SEGNPFLT 11 /* segment not present fault */
+#define EXC_I386_STKFLT 12 /* stack fault */
+#define EXC_I386_GPFLT 13 /* general protection fault */
+#define EXC_I386_PGFLT 14 /* page fault */
+#define EXC_I386_EXTERRFLT 16 /* extension error fault */
+#define EXC_I386_ENDPERR 33 /* emulated extension error flt */
+#define EXC_I386_ENOEXTFLT 32 /* emulated ext not present */
+
+#endif /* _MACH_I386_EXCEPTION_H_ */
diff --git a/i386/include/mach/i386/exec/elf.h b/i386/include/mach/i386/exec/elf.h
new file mode 100644
index 0000000..60b1657
--- /dev/null
+++ b/i386/include/mach/i386/exec/elf.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_I386_EXEC_ELF_H_
+#define _MACH_I386_EXEC_ELF_H_
+
+typedef unsigned int Elf32_Addr;
+typedef unsigned short Elf32_Half;
+typedef unsigned int Elf32_Off;
+typedef signed int Elf32_Sword;
+typedef unsigned int Elf32_Word;
+
+typedef uint64_t Elf64_Addr;
+typedef uint64_t Elf64_Off;
+typedef int32_t Elf64_Shalf;
+typedef int32_t Elf64_Sword;
+typedef uint32_t Elf64_Word;
+typedef int64_t Elf64_Sxword;
+typedef uint64_t Elf64_Xword;
+typedef uint16_t Elf64_Half;
+
+
+/* Architecture identification parameters for x86. */
+#if defined(__x86_64__) && ! defined(USER32)
+#define MY_ELF_CLASS ELFCLASS64
+#define MY_EI_DATA ELFDATA2LSB
+#define MY_E_MACHINE EM_X86_64
+#else
+#define MY_ELF_CLASS ELFCLASS32
+#define MY_EI_DATA ELFDATA2LSB
+#define MY_E_MACHINE EM_386
+#endif
+
+#endif /* _MACH_I386_EXEC_ELF_H_ */
diff --git a/i386/include/mach/i386/fp_reg.h b/i386/include/mach/i386/fp_reg.h
new file mode 100644
index 0000000..7ad0ade
--- /dev/null
+++ b/i386/include/mach/i386/fp_reg.h
@@ -0,0 +1,140 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_FP_REG_H_
+#define _MACH_I386_FP_REG_H_
+
+/*
+ * Floating point registers and status, as saved
+ * and restored by FP save/restore instructions.
+ */
+struct i386_fp_save {
+ unsigned short fp_control; /* control */
+ unsigned short fp_unused_1;
+ unsigned short fp_status; /* status */
+ unsigned short fp_unused_2;
+ unsigned short fp_tag; /* register tags */
+ unsigned short fp_unused_3;
+ unsigned int fp_eip; /* eip at failed instruction */
+ unsigned short fp_cs; /* cs at failed instruction */
+ unsigned short fp_opcode; /* opcode of failed instruction */
+ unsigned int fp_dp; /* data address */
+ unsigned short fp_ds; /* data segment */
+ unsigned short fp_unused_4;
+};
+
+struct i386_fp_regs {
+ unsigned short fp_reg_word[8][5];
+ /* space for 8 80-bit FP registers */
+};
+
+#define XSAVE_XCOMP_BV_COMPACT (((unsigned long long)1) << 63)
+struct i386_xfp_xstate_header {
+ unsigned long long xfp_features;
+ unsigned long long xcomp_bv;
+ unsigned long long reserved[6];
+} __attribute__((packed, aligned(64)));
+
+struct i386_xfp_save {
+ unsigned short fp_control; /* control */
+ unsigned short fp_status; /* status */
+ unsigned short fp_tag; /* register tags */
+ unsigned short fp_opcode; /* opcode of failed instruction */
+ unsigned int fp_eip; /* eip at failed instruction */
+ unsigned short fp_cs; /* cs at failed instruction / eip high */
+ unsigned short fp_eip3; /* eip higher */
+ unsigned int fp_dp; /* data address */
+ unsigned short fp_ds; /* data segment / dp high */
+ unsigned short fp_dp3; /* dp higher */
+ unsigned int fp_mxcsr; /* MXCSR */
+ unsigned int fp_mxcsr_mask; /* MXCSR_MASK */
+ unsigned char fp_reg_word[8][16];
+ /* space for 8 128-bit FP registers */
+ unsigned char fp_xreg_word[16][16];
+ /* space for 16 128-bit XMM registers */
+ unsigned int padding[24];
+ struct i386_xfp_xstate_header header;
+ unsigned char extended[0]; /* Extended region */
+} __attribute__((packed, aligned(64)));
+
+/*
+ * Control register
+ */
+#define FPC_IE 0x0001 /* enable invalid operation
+ exception */
+#define FPC_IM FPC_IE
+#define FPC_DE 0x0002 /* enable denormalized operation
+ exception */
+#define FPC_DM FPC_DE
+#define FPC_ZE 0x0004 /* enable zero-divide exception */
+#define FPC_ZM FPC_ZE
+#define FPC_OE 0x0008 /* enable overflow exception */
+#define FPC_OM FPC_OE
+#define FPC_UE 0x0010 /* enable underflow exception */
+#define FPC_PE 0x0020 /* enable precision exception */
+#define FPC_PC 0x0300 /* precision control: */
+#define FPC_PC_24 0x0000 /* 24 bits */
+#define FPC_PC_53 0x0200 /* 53 bits */
+#define FPC_PC_64 0x0300 /* 64 bits */
+#define FPC_RC 0x0c00 /* rounding control: */
+#define FPC_RC_RN 0x0000 /* round to nearest or even */
+#define FPC_RC_RD 0x0400 /* round down */
+#define FPC_RC_RU 0x0800 /* round up */
+#define FPC_RC_CHOP 0x0c00 /* chop */
+#define FPC_IC 0x1000 /* infinity control (obsolete) */
+#define FPC_IC_PROJ 0x0000 /* projective infinity */
+#define FPC_IC_AFF 0x1000 /* affine infinity (std) */
+
+/*
+ * Status register
+ */
+#define FPS_IE 0x0001 /* invalid operation */
+#define FPS_DE 0x0002 /* denormalized operand */
+#define FPS_ZE 0x0004 /* divide by zero */
+#define FPS_OE 0x0008 /* overflow */
+#define FPS_UE 0x0010 /* underflow */
+#define FPS_PE 0x0020 /* precision */
+#define FPS_SF 0x0040 /* stack flag */
+#define FPS_ES 0x0080 /* error summary */
+#define FPS_C0 0x0100 /* condition code bit 0 */
+#define FPS_C1 0x0200 /* condition code bit 1 */
+#define FPS_C2 0x0400 /* condition code bit 2 */
+#define FPS_TOS 0x3800 /* top-of-stack pointer */
+#define FPS_TOS_SHIFT 11
+#define FPS_C3 0x4000 /* condition code bit 3 */
+#define FPS_BUSY 0x8000 /* FPU busy */
+
+/*
+ * Kind of floating-point support provided by kernel.
+ */
+#define FP_NO 0 /* no floating point */
+#define FP_SOFT 1 /* software FP emulator */
+#define FP_287 2 /* 80287 */
+#define FP_387 3 /* 80387 or 80486 */
+#define FP_387FX 4 /* FXSAVE/RSTOR-capable */
+#define FP_387X 5 /* XSAVE/RSTOR-capable */
+
+#endif /* _MACH_I386_FP_REG_H_ */
diff --git a/i386/include/mach/i386/ioccom.h b/i386/include/mach/i386/ioccom.h
new file mode 100644
index 0000000..17566a3
--- /dev/null
+++ b/i386/include/mach/i386/ioccom.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1982, 1986 Regents of the University of California.
+ * All rights reserved. The Berkeley software License Agreement
+ * specifies the terms and conditions for redistribution.
+ */
+
+#ifndef __sys_ioccom_h
+#define __sys_ioccom_h
+
+/*
+ * Ioctl's have the command encoded in the lower word,
+ * and the size of any in or out parameters in the upper
+ * word. The high 2 bits of the upper word are used
+ * to encode the in/out status of the parameter; for now
+ * we restrict parameters to at most 255 bytes.
+ */
+#define _IOCPARM_MASK 0xff /* parameters must be < 256 bytes */
+#define _IOC_VOID 0x20000000 /* no parameters */
+#define _IOC_OUT 0x40000000 /* copy out parameters */
+#define _IOC_IN 0x80000000 /* copy in parameters */
+#define _IOC_INOUT (_IOC_IN|_IOC_OUT)
+/* the 0x20000000 is so we can distinguish new ioctl's from old */
+#define _IO(x,y) (_IOC_VOID|('x'<<8)|y)
+#define _IOR(x,y,t) (_IOC_OUT|((sizeof(t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+#define _IORN(x,y,t) (_IOC_OUT|(((t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+#define _IOW(x,y,t) (_IOC_IN|((sizeof(t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+#define _IOWN(x,y,t) (_IOC_IN|(((t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+/* this should be _IORW, but stdio got there first */
+#define _IOWR(x,y,t) (_IOC_INOUT|((sizeof(t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+#define _IOWRN(x,y,t) (_IOC_INOUT|(((t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+
+#endif /* !__sys_ioccom_h */
diff --git a/i386/include/mach/i386/kern_return.h b/i386/include/mach/i386/kern_return.h
new file mode 100644
index 0000000..8df41ca
--- /dev/null
+++ b/i386/include/mach/i386/kern_return.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern_return.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Machine-dependent kernel return definitions.
+ */
+
+#ifndef _MACH_I386_KERN_RETURN_H_
+#define _MACH_I386_KERN_RETURN_H_
+
+#ifndef __ASSEMBLER__
+typedef int kern_return_t;
+#endif /* __ASSEMBLER__ */
+#endif /* _MACH_I386_KERN_RETURN_H_ */
diff --git a/i386/include/mach/i386/mach_i386.defs b/i386/include/mach/i386/mach_i386.defs
new file mode 100644
index 0000000..965d5c3
--- /dev/null
+++ b/i386/include/mach/i386/mach_i386.defs
@@ -0,0 +1,113 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Special functions for i386.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+ mach_i386 3800;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+#ifdef MACH_I386_IMPORTS
+MACH_I386_IMPORTS
+#endif
+
+type descriptor_t = struct[2] of uint32_t;
+type descriptor_list_t = array[*] of descriptor_t;
+
+import <mach/machine/mach_i386_types.h>;
+
+#if KERNEL_SERVER
+simport <machine/io_perm.h>;
+#endif
+
+type io_port_t = MACH_MSG_TYPE_INTEGER_16;
+type io_perm_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: io_perm_t convert_port_to_io_perm(mach_port_t)
+ outtran: mach_port_t convert_io_perm_to_port(io_perm_t)
+ destructor: io_perm_deallocate(io_perm_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+skip; /* i386_io_port_add */
+skip; /* i386_io_port_remove */
+skip; /* i386_io_port_list */
+
+routine i386_set_ldt(
+ target_thread : thread_t;
+ first_selector : int;
+ desc_list : descriptor_list_t, serverCopy);
+
+routine i386_get_ldt(
+ target_thread : thread_t;
+ first_selector : int;
+ selector_count : int;
+ out desc_list : descriptor_list_t);
+
+/* Request a new port IO_PERM that represents the capability to access
+ the I/O ports [FROM; TO] directly. MASTER_PORT is the master device port.
+
+ The function returns KERN_INVALID_ARGUMENT if TARGET_TASK is not a task,
+ or FROM is greater than TO. */
+routine i386_io_perm_create(
+ master_port : mach_port_t;
+ from : io_port_t;
+ to : io_port_t;
+ out io_perm : io_perm_t);
+
+/* Modify the I/O permissions for TARGET_TASK. If ENABLE is TRUE, the
+ permission to access the I/O ports specified by IO_PERM is granted,
+ otherwise it is withdrawn.
+
+ The function returns KERN_INVALID_ARGUMENT if TARGET_TASK is not a valid
+ task or IO_PERM not a valid I/O permission port. */
+routine i386_io_perm_modify(
+ target_task : task_t;
+ io_perm : io_perm_t;
+ enable : boolean_t);
+
+/* Modify one of a few available thread-specific segment descriptor slots.
+ The SELECTOR must be a value from a previous call (on any thread),
+ or -1 to allocate an available slot and return the segment selector for it.
+ These slots are copied into the CPU on each thread switch.
+ Returns KERN_NO_SPACE when there are no more slots available. */
+routine i386_set_gdt(
+ target_thread : thread_t;
+ inout selector : int;
+ desc : descriptor_t);
+
+/* Fetch a segment descriptor set with a prior i386_set_gdt call. */
+routine i386_get_gdt(
+ target_thread : thread_t;
+ selector : int;
+ out desc : descriptor_t);
diff --git a/i386/include/mach/i386/mach_i386_types.h b/i386/include/mach/i386/mach_i386_types.h
new file mode 100644
index 0000000..f5177fb
--- /dev/null
+++ b/i386/include/mach/i386/mach_i386_types.h
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Type definitions for i386 interface routines.
+ */
+
+#ifndef _MACH_MACH_I386_TYPES_H_
+#define _MACH_MACH_I386_TYPES_H_
+
+#ifndef __ASSEMBLER__
+/*
+ * i386 segment descriptor.
+ */
+struct descriptor {
+ unsigned int low_word;
+ unsigned int high_word;
+};
+
+typedef struct descriptor descriptor_t;
+typedef struct descriptor *descriptor_list_t;
+typedef const struct descriptor *const_descriptor_list_t;
+
+#endif /* !__ASSEMBLER__ */
+
+/*
+ * i386 I/O port
+ */
+
+#ifndef MACH_KERNEL
+typedef unsigned short io_port_t;
+typedef mach_port_t io_perm_t;
+#endif /* !MACH_KERNEL */
+
+#endif /* _MACH_MACH_I386_TYPES_H_ */
diff --git a/i386/include/mach/i386/machine_types.defs b/i386/include/mach/i386/machine_types.defs
new file mode 100755
index 0000000..76c7dcf
--- /dev/null
+++ b/i386/include/mach/i386/machine_types.defs
@@ -0,0 +1,107 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/machine/machine_types.defs
+ * Author: Alessandro Forin
+ * Date: 7/92
+ *
+ * Header file for the basic, machine-dependent data types.
+ * Version for 32 bit architectures.
+ *
+ */
+
+#ifndef _MACHINE_MACHINE_TYPES_DEFS_
+#define _MACHINE_MACHINE_TYPES_DEFS_ 1
+
+/*
+ * A natural_t is the type for the native
+ * unsigned integer type, usually 32 bits. It is suitable for
+ * most counters with a small chance of overflow.
+ * While historically natural_t was meant to be the same
+ * as a pointer, that is not the case here.
+ */
+type natural_t = uint32_t;
+
+/*
+ * An integer_t is the signed counterpart
+ * of the natural_t type. Both types are
+ * only supposed to be used to define
+ * other types in a machine-independent
+ * way.
+ */
+type integer_t = int32_t;
+
+/*
+ * long_natural_t and long_integer_t for kernel <-> userland interfaces as the
+ * size depends on the architecture of both kernel and userland.
+ */
+#if defined(KERNEL_SERVER) && defined(USER32)
+type rpc_long_natural_t = uint32_t;
+type rpc_long_integer_t = int32_t;
+#else /* KERNEL and USER32 */
+#if defined(__x86_64__)
+type rpc_long_natural_t = uint64_t;
+type rpc_long_integer_t = int64_t;
+#else
+type rpc_long_natural_t = uint32_t;
+type rpc_long_integer_t = int32_t;
+#endif /* __x86_64__ */
+#endif /* KERNEL_SERVER and USER32 */
+
+/*
+ * A long_natural_t is a possibly larger unsigned integer type than natural_t.
+ * Should be used instead of natural_t when we want the data to be less subject
+ * to overflows.
+ */
+type long_natural_t = rpc_long_natural_t
+#if defined(KERNEL_SERVER)
+ intran: long_natural_t convert_long_natural_from_user(rpc_long_natural_t)
+ outtran: rpc_long_natural_t convert_long_natural_to_user(long_natural_t)
+#elif defined(KERNEL_USER)
+ ctype: rpc_long_natural_t
+#endif
+ ;
+
+/*
+ * Larger version of integer_t. Only used when we want to hold possibly larger
+ * values than what is possible with integer_t.
+ */
+type long_integer_t = rpc_long_integer_t
+#if defined(KERNEL_SERVER)
+ intran: long_integer_t convert_long_integer_from_user(rpc_long_integer_t)
+ outtran: rpc_long_integer_t convert_long_integer_to_user(long_integer_t)
+#elif defined(KERNEL_USER)
+ ctype: rpc_long_integer_t
+#endif
+ ;
+
+/*
+ * Physical address size
+ */
+type rpc_phys_addr_t = uint64_t;
+type rpc_phys_addr_array_t = array[] of rpc_phys_addr_t;
+
+#endif /* _MACHINE_MACHINE_TYPES_DEFS_ */
diff --git a/i386/include/mach/i386/multiboot.h b/i386/include/mach/i386/multiboot.h
new file mode 100644
index 0000000..c3538c1
--- /dev/null
+++ b/i386/include/mach/i386/multiboot.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_I386_MULTIBOOT_H_
+#define _MACH_I386_MULTIBOOT_H_
+
+#include <mach/machine/vm_types.h>
+
+/* The entire multiboot_header must be contained
+ within the first MULTIBOOT_SEARCH bytes of the kernel image. */
+#define MULTIBOOT_SEARCH 8192
+
+/* Magic value identifying the multiboot_header. */
+#define MULTIBOOT_MAGIC 0x1badb002
+
+/* Features flags for 'flags'.
+ If a boot loader sees a flag in MULTIBOOT_MUSTKNOW set
+ and it doesn't understand it, it must fail. */
+#define MULTIBOOT_MUSTKNOW 0x0000ffff
+
+/* Align all boot modules on page (4KB) boundaries. */
+#define MULTIBOOT_PAGE_ALIGN 0x00000001
+
+/* Must be provided memory information in multiboot_raw_info structure */
+#define MULTIBOOT_MEMORY_INFO 0x00000002
+
+/* Use the load address fields above instead of the ones in the a.out header
+ to figure out what to load where, and what to do afterwards.
+ This should only be needed for a.out kernel images
+ (ELF and other formats can generally provide the needed information). */
+#define MULTIBOOT_AOUT_KLUDGE 0x00010000
+
+/* The boot loader passes this value in register EAX to signal the kernel
+ that the multiboot method is being used */
+#define MULTIBOOT_VALID 0x2badb002
+
+
+
+#define MULTIBOOT_MEMORY 0x00000001
+#define MULTIBOOT_BOOT_DEVICE 0x00000002
+#define MULTIBOOT_CMDLINE 0x00000004
+#define MULTIBOOT_MODS 0x00000008
+#define MULTIBOOT_AOUT_SYMS 0x00000010
+#define MULTIBOOT_ELF_SHDR 0x00000020
+#define MULTIBOOT_MEM_MAP 0x00000040
+
+
+/* The mods_addr field above contains the physical address of the first
+ of 'mods_count' multiboot_module structures. */
+struct multiboot_module
+{
+ /* Physical start and end addresses of the module data itself. */
+ vm_offset_t mod_start;
+ vm_offset_t mod_end;
+
+ /* Arbitrary ASCII string associated with the module. */
+ vm_offset_t string;
+
+ /* Boot loader must set to 0; OS must ignore. */
+ unsigned reserved;
+};
+
+#ifdef __x86_64__
+/* The mods_addr field above contains the physical address of the first
+ of 'mods_count' multiboot_module structures. */
+struct multiboot32_module
+{
+ /* Physical start and end addresses of the module data itself. */
+ unsigned mod_start;
+ unsigned mod_end;
+
+ /* Arbitrary ASCII string associated with the module. */
+ unsigned string;
+
+ /* Boot loader must set to 0; OS must ignore. */
+ unsigned reserved;
+};
+#endif
+
+/* usable memory "Type", all others are reserved. */
+#define MB_ARD_MEMORY 1
+
+/*
+ * Copyright (c) 2010, 2012 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Versions used by the biosmem module.
+ */
+
+#include <kern/macros.h>
+
+/*
+ * Magic number provided by the OS to the boot loader.
+ */
+#define MULTIBOOT_OS_MAGIC 0x1badb002
+
+/*
+ * Multiboot flags requesting services from the boot loader.
+ */
+#define MULTIBOOT_OS_MEMORY_INFO 0x2
+
+#define MULTIBOOT_OS_FLAGS MULTIBOOT_OS_MEMORY_INFO
+
+/*
+ * Magic number to identify a multiboot compliant boot loader.
+ */
+#define MULTIBOOT_LOADER_MAGIC 0x2badb002
+
+/*
+ * Multiboot flags set by the boot loader.
+ */
+#define MULTIBOOT_LOADER_MEMORY 0x01
+#define MULTIBOOT_LOADER_CMDLINE 0x04
+#define MULTIBOOT_LOADER_MODULES 0x08
+#define MULTIBOOT_LOADER_SHDR 0x20
+#define MULTIBOOT_LOADER_MMAP 0x40
+
+/*
+ * A multiboot module.
+ */
+struct multiboot_raw_module {
+ uint32_t mod_start;
+ uint32_t mod_end;
+ uint32_t string;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * Memory map entry.
+ */
+struct multiboot_raw_mmap_entry {
+ uint32_t size;
+ uint64_t base_addr;
+ uint64_t length;
+ uint32_t type;
+} __packed;
+
+/*
+ * Multiboot information structure as passed by the boot loader.
+ */
+struct multiboot_raw_info {
+ uint32_t flags;
+ uint32_t mem_lower;
+ uint32_t mem_upper;
+ uint32_t unused0;
+ uint32_t cmdline;
+ uint32_t mods_count;
+ uint32_t mods_addr;
+ uint32_t shdr_num;
+ uint32_t shdr_size;
+ uint32_t shdr_addr;
+ uint32_t shdr_strndx;
+ uint32_t mmap_length;
+ uint32_t mmap_addr;
+ uint32_t unused1[9];
+} __packed;
+
+/*
+ * Versions of the multiboot structures suitable for use with 64-bit pointers.
+ */
+
+struct multiboot_os_module {
+ void *mod_start;
+ void *mod_end;
+ char *string;
+};
+
+struct multiboot_os_info {
+ uint32_t flags;
+ char *cmdline;
+ struct multiboot_module *mods_addr;
+ uint32_t mods_count;
+};
+
+#endif /* _MACH_I386_MULTIBOOT_H_ */
diff --git a/i386/include/mach/i386/syscall_sw.h b/i386/include/mach/i386/syscall_sw.h
new file mode 100644
index 0000000..9eeb293
--- /dev/null
+++ b/i386/include/mach/i386/syscall_sw.h
@@ -0,0 +1,39 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_SYSCALL_SW_H_
+#define _MACH_I386_SYSCALL_SW_H_
+
+#include <mach/machine/asm.h>
+
+#define kernel_trap(trap_name,trap_number,number_args) \
+ENTRY(trap_name) \
+ movl $ trap_number,%eax; \
+ SVC; \
+ ret; \
+END(trap_name)
+
+#endif /* _MACH_I386_SYSCALL_SW_H_ */
diff --git a/i386/include/mach/i386/thread_status.h b/i386/include/mach/i386/thread_status.h
new file mode 100644
index 0000000..94596a7
--- /dev/null
+++ b/i386/include/mach/i386/thread_status.h
@@ -0,0 +1,190 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: thread_status.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * This file contains the structure definitions for the thread
+ * state as applied to I386 processors.
+ */
+
+#ifndef _MACH_I386_THREAD_STATUS_H_
+#define _MACH_I386_THREAD_STATUS_H_
+
+#include <mach/machine/fp_reg.h>
+/*
+ * i386_thread_state this is the structure that is exported
+ * to user threads for use in status/mutate
+ * calls. This structure should never
+ * change.
+ *
+ * i386_float_state exported to use threads for access to
+ * floating point registers. Try not to
+ * change this one, either.
+ *
+ * i386_isa_port_map_state exported to user threads to allow
+ * selective in/out operations
+ *
+ */
+
+#define i386_THREAD_STATE 1
+#define i386_FLOAT_STATE 2
+#define i386_ISA_PORT_MAP_STATE 3
+#define i386_V86_ASSIST_STATE 4
+#define i386_REGS_SEGS_STATE 5
+#define i386_DEBUG_STATE 6
+#define i386_FSGS_BASE_STATE 7
+
+/*
+ * This structure is used for both
+ * i386_THREAD_STATE and i386_REGS_SEGS_STATE.
+ */
+struct i386_thread_state {
+#if defined(__x86_64__) && !defined(USER32)
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ uint64_t rdi;
+ uint64_t rsi;
+ uint64_t rbp;
+ uint64_t rsp;
+ uint64_t rbx;
+ uint64_t rdx;
+ uint64_t rcx;
+ uint64_t rax;
+ uint64_t rip;
+#else
+ unsigned int gs;
+ unsigned int fs;
+ unsigned int es;
+ unsigned int ds;
+
+ unsigned int edi;
+ unsigned int esi;
+ unsigned int ebp;
+ unsigned int esp;
+ unsigned int ebx;
+ unsigned int edx;
+ unsigned int ecx;
+ unsigned int eax;
+ unsigned int eip;
+#endif /* __x86_64__ && !USER32 */
+
+ unsigned int cs;
+#if defined(__x86_64__) && !defined(USER32)
+ uint64_t rfl;
+ uint64_t ursp;
+#else
+ unsigned int efl;
+ unsigned int uesp;
+#endif /* __x86_64__ and !USER32 */
+
+ unsigned int ss;
+};
+#define i386_THREAD_STATE_COUNT (sizeof (struct i386_thread_state)/sizeof(unsigned int))
+
+/*
+ * Floating point state.
+ *
+ * fpkind tells in what way floating point operations are supported.
+ * See the values for fp_kind in <mach/i386/fp_reg.h>.
+ *
+ * If the kind is FP_NO, then calls to set the state will fail, and
+ * thread_getstatus will return garbage for the rest of the state.
+ * If "initialized" is false, then the rest of the state is garbage.
+ * Clients can set "initialized" to false to force the coprocessor to
+ * be reset.
+ * "exc_status" is non-zero if the thread has noticed (but not
+ * proceeded from) a coprocessor exception. It contains the status
+ * word with the exception bits set. The status word in "fp_status"
+ * will have the exception bits turned off. If an exception bit in
+ * "fp_status" is turned on, then "exc_status" should be zero. This
+ * happens when the coprocessor exception is noticed after the system
+ * has context switched to some other thread.
+ *
+ * If kind is FP_387, then "state" is a i387_state. Other kinds might
+ * also use i387_state, but somebody will have to verify it (XXX).
+ * Note that the registers are ordered from top-of-stack down, not
+ * according to physical register number.
+ */
+
+#define FP_STATE_BYTES \
+ (sizeof (struct i386_fp_save) + sizeof (struct i386_fp_regs))
+
+struct i386_float_state {
+ int fpkind; /* FP_NO..FP_387X (readonly) */
+ int initialized;
+ unsigned char hw_state[FP_STATE_BYTES]; /* actual "hardware" state */
+ int exc_status; /* exception status (readonly) */
+};
+#define i386_FLOAT_STATE_COUNT (sizeof(struct i386_float_state)/sizeof(unsigned int))
+
+
+#define PORT_MAP_BITS 0x400
+struct i386_isa_port_map_state {
+ unsigned char pm[PORT_MAP_BITS>>3];
+};
+
+#define i386_ISA_PORT_MAP_STATE_COUNT (sizeof(struct i386_isa_port_map_state)/sizeof(unsigned int))
+
+/*
+ * V8086 assist supplies a pointer to an interrupt
+ * descriptor table in task space.
+ */
+struct i386_v86_assist_state {
+ unsigned int int_table; /* interrupt table address */
+ int int_count; /* interrupt table size */
+};
+
+struct v86_interrupt_table {
+ unsigned int count; /* count of pending interrupts */
+ unsigned short mask; /* ignore this interrupt if true */
+ unsigned short vec; /* vector to take */
+};
+
+#define i386_V86_ASSIST_STATE_COUNT \
+ (sizeof(struct i386_v86_assist_state)/sizeof(unsigned int))
+
+struct i386_debug_state {
+ unsigned int dr[8];
+};
+#define i386_DEBUG_STATE_COUNT \
+ (sizeof(struct i386_debug_state)/sizeof(unsigned int))
+
+struct i386_fsgs_base_state {
+ unsigned long fs_base;
+ unsigned long gs_base;
+};
+#define i386_FSGS_BASE_STATE_COUNT \
+ (sizeof(struct i386_fsgs_base_state)/sizeof(unsigned int))
+
+#endif /* _MACH_I386_THREAD_STATUS_H_ */
diff --git a/i386/include/mach/i386/trap.h b/i386/include/mach/i386/trap.h
new file mode 100644
index 0000000..70b28fe
--- /dev/null
+++ b/i386/include/mach/i386/trap.h
@@ -0,0 +1,60 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_TRAP_H_
+#define _MACH_I386_TRAP_H_
+
+/*
+ * Hardware trap vectors for i386.
+ */
+#define T_DIVIDE_ERROR 0
+#define T_DEBUG 1
+#define T_NMI 2 /* non-maskable interrupt */
+#define T_INT3 3 /* int 3 instruction */
+#define T_OVERFLOW 4 /* overflow test */
+#define T_OUT_OF_BOUNDS 5 /* bounds check */
+#define T_INVALID_OPCODE 6 /* invalid op code */
+#define T_NO_FPU 7 /* no floating point */
+#define T_DOUBLE_FAULT 8 /* double fault */
+#define T_FPU_FAULT 9
+/* 10 */
+#define T_SEGMENT_NOT_PRESENT 11
+#define T_STACK_FAULT 12
+#define T_GENERAL_PROTECTION 13
+#define T_PAGE_FAULT 14
+/* 15 */
+#define T_FLOATING_POINT_ERROR 16
+#define T_WATCHPOINT 17
+
+/*
+ * Page-fault trap codes.
+ */
+#define T_PF_PROT 0x1 /* protection violation */
+#define T_PF_WRITE 0x2 /* write access */
+#define T_PF_USER 0x4 /* from user state */
+
+
+#endif /* _MACH_I386_TRAP_H_ */
diff --git a/i386/include/mach/i386/vm_param.h b/i386/include/mach/i386/vm_param.h
new file mode 100644
index 0000000..3e5c18c
--- /dev/null
+++ b/i386/include/mach/i386/vm_param.h
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_param.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * I386 machine dependent virtual memory parameters.
+ * Most of the declarations are preceded by I386_ (or i386_)
+ * which is OK because only I386 specific code will be using
+ * them.
+ */
+
+#ifndef _MACH_I386_VM_PARAM_H_
+#define _MACH_I386_VM_PARAM_H_
+
+#include <mach/machine/vm_types.h>
+
+#define BYTE_SIZE 8 /* byte size in bits */
+
+#define I386_PGBYTES 4096 /* bytes per 80386 page */
+#define I386_PGSHIFT 12 /* number of bits to shift for pages */
+
+/* Virtual page size is the same as real page size - 4K is big enough. */
+#define PAGE_SHIFT I386_PGSHIFT
+
+/*
+ * Convert bytes to pages and convert pages to bytes.
+ * No rounding is used.
+ */
+
+#define i386_btop(x) (((phys_addr_t)(x)) >> I386_PGSHIFT)
+#define i386_ptob(x) (((phys_addr_t)(x)) << I386_PGSHIFT)
+
+/*
+ * Round off or truncate to the nearest page. These will work
+ * for either addresses or counts. (i.e. 1 byte rounds to 1 page
+ * bytes.)
+ */
+
+#define i386_round_page(x) ((((phys_addr_t)(x)) + I386_PGBYTES - 1) & \
+ ~(I386_PGBYTES-1))
+#define i386_trunc_page(x) (((phys_addr_t)(x)) & ~(I386_PGBYTES-1))
+
+/* User address spaces are 3GB each on a 32-bit kernel, starting at
+ virtual and linear address 0.
+ On a 64-bit krenel we split the address space in half, with the
+ lower 128TB for the user address space and the upper 128TB for the
+ kernel address space.
+
+ On a 32-bit kernel VM_MAX_ADDRESS can be reduced to leave more
+ space for the kernel, but must not be increased to more than 3GB as
+ glibc and hurd servers would not cope with that.
+ */
+#define VM_MIN_ADDRESS (0ULL)
+
+#ifdef __x86_64__
+#if defined(KERNEL) && defined(USER32)
+#define VM_MAX_ADDRESS (0xc0000000UL)
+#else /* defined(KERNEL) && defined(USER32) */
+#define VM_MAX_ADDRESS (0x800000000000ULL)
+#endif /* defined(KERNEL) && defined(USER32) */
+#else /* __x86_64__ */
+#define VM_MAX_ADDRESS (0xc0000000UL)
+#endif /* __x86_64__ */
+
+#endif /* _MACH_I386_VM_PARAM_H_ */
diff --git a/i386/include/mach/i386/vm_types.h b/i386/include/mach/i386/vm_types.h
new file mode 100644
index 0000000..8f528ae
--- /dev/null
+++ b/i386/include/mach/i386/vm_types.h
@@ -0,0 +1,173 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_types.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Header file for VM data types. I386 version.
+ */
+
+#ifndef _MACHINE_VM_TYPES_H_
+#define _MACHINE_VM_TYPES_H_ 1
+
+#ifdef __ASSEMBLER__
+#else /* __ASSEMBLER__ */
+
+#include <stdint.h>
+
+#ifdef MACH_KERNEL
+#include <kern/assert.h>
+#endif
+
+/*
+ * A natural_t is the type for the native
+ * unsigned integer type, usually 32 bits. It is suitable for
+ * most counters with a small chance of overflow.
+ * While historically natural_t was meant to be the same
+ * as a pointer, that is not the case here.
+ */
+typedef unsigned int natural_t;
+
+/*
+ * An integer_t is the signed counterpart
+ * of the natural_t type. Both types are
+ * only supposed to be used to define
+ * other types in a machine-independent
+ * way.
+ */
+typedef int integer_t;
+
+/*
+ * A long_natural_t is a possibly larger unsigned integer type than natural_t.
+ * Should be used instead of natural_t when we want the data to be less subject
+ * to overflows.
+ */
+typedef unsigned long long_natural_t;
+
+/*
+ * Larger version of integer_t. Only used when we want to hold possibly larger
+ * values than what is possible with integer_t.
+ */
+typedef long long_integer_t;
+
+/*
+ * A vm_offset_t is a type-neutral pointer,
+ * e.g. an offset into a virtual memory space.
+ */
+typedef uintptr_t vm_offset_t;
+typedef vm_offset_t * vm_offset_array_t;
+
+/*
+ * A type for physical addresses.
+ */
+#ifdef MACH_KERNEL
+#ifdef PAE
+typedef unsigned long long phys_addr_t;
+#else /* PAE */
+typedef unsigned long phys_addr_t;
+#endif /* PAE */
+#else
+typedef unsigned long long phys_addr_t;
+#endif
+typedef unsigned long long rpc_phys_addr_t;
+typedef rpc_phys_addr_t *rpc_phys_addr_array_t;
+
+/*
+ * A vm_size_t is the proper type for e.g.
+ * expressing the difference between two
+ * vm_offset_t entities.
+ */
+typedef uintptr_t vm_size_t;
+typedef vm_size_t * vm_size_array_t;
+
+/*
+ * rpc_types are for user/kernel interfaces. On kernel side they may differ from
+ * the native types, while on user space they shall be the same.
+ * These three types are always of the same size, so we can reuse the conversion
+ * functions.
+ */
+#if defined(MACH_KERNEL) && defined(USER32)
+typedef uint32_t rpc_uintptr_t;
+typedef uint32_t rpc_vm_address_t;
+typedef uint32_t rpc_vm_offset_t;
+typedef uint32_t rpc_vm_size_t;
+
+static inline uint64_t convert_vm_from_user(uint32_t uaddr)
+{
+ return (uint64_t)uaddr;
+}
+static inline uint32_t convert_vm_to_user(uint64_t kaddr)
+{
+ assert(kaddr <= 0xFFFFFFFF);
+ return (uint32_t)kaddr;
+}
+
+typedef uint32_t rpc_long_natural_t;
+typedef int32_t rpc_long_integer_t;
+
+static inline int64_t convert_long_integer_from_user(int32_t i)
+{
+ return (int64_t)i;
+}
+static inline int32_t convert_long_integer_to_user(int64_t i)
+{
+ assert(i <= 0x7FFFFFFF);
+ return (int32_t)i;
+}
+typedef uint32_t rpc_long_natural_t;
+typedef int32_t rpc_long_integer_t;
+#else /* MACH_KERNEL */
+typedef uintptr_t rpc_uintptr_t;
+typedef vm_offset_t rpc_vm_address_t;
+typedef vm_offset_t rpc_vm_offset_t;
+typedef vm_size_t rpc_vm_size_t;
+
+#define convert_vm_to_user null_conversion
+#define convert_vm_from_user null_conversion
+
+typedef long_natural_t rpc_long_natural_t;
+typedef long_integer_t rpc_long_integer_t;
+
+#define convert_long_integer_to_user null_conversion
+#define convert_long_integer_from_user null_conversion
+#endif /* MACH_KERNEL */
+
+#define convert_long_natural_to_user convert_vm_to_user
+#define convert_long_natural_from_user convert_vm_from_user
+
+typedef rpc_vm_size_t * rpc_vm_size_array_t;
+typedef rpc_vm_offset_t * rpc_vm_offset_array_t;
+
+#endif /* __ASSEMBLER__ */
+
+/*
+ * If composing messages by hand (please dont)
+ */
+
+#define MACH_MSG_TYPE_INTEGER_T MACH_MSG_TYPE_INTEGER_32
+
+#endif /* _MACHINE_VM_TYPES_H_ */
diff --git a/i386/include/mach/sa/stdarg.h b/i386/include/mach/sa/stdarg.h
new file mode 100644
index 0000000..550fec4
--- /dev/null
+++ b/i386/include/mach/sa/stdarg.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University.
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _MACH_SA_STDARG_H_
+#define _MACH_SA_STDARG_H_
+
+#if __GNUC__ >= 3
+
+typedef __builtin_va_list va_list;
+
+#define va_start(v,l) __builtin_va_start(v,l)
+#define va_end(v) __builtin_va_end(v)
+#define va_arg(v,l) __builtin_va_arg(v,l)
+
+#else
+
+#define __va_size(type) ((sizeof(type)+sizeof(unsigned long)-1) & ~(sizeof(unsigned long)-1))
+
+#ifndef _VA_LIST_
+#define _VA_LIST_
+typedef char *va_list;
+#endif
+
+#define va_start(pvar, lastarg) \
+ ((pvar) = (char*)(void*)&(lastarg) + __va_size(lastarg))
+#define va_end(pvar)
+#define va_arg(pvar,type) \
+ ((pvar) += __va_size(type), \
+ *((type *)((pvar) - __va_size(type))))
+
+#endif
+
+#endif /* _MACH_SA_STDARG_H_ */
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
new file mode 100644
index 0000000..e43b06c
--- /dev/null
+++ b/i386/intel/pmap.c
@@ -0,0 +1,3325 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: pmap.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * (These guys wrote the Vax version)
+ *
+ * Physical Map management code for Intel i386, and i486.
+ *
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include <string.h>
+
+#include <mach/machine/vm_types.h>
+
+#include <mach/boolean.h>
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <kern/slab.h>
+
+#include <kern/lock.h>
+
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <i386/vm_param.h>
+#include <mach/vm_prot.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_user.h>
+
+#include <mach/machine/vm_param.h>
+#include <mach/xen.h>
+#include <machine/thread.h>
+#include <i386/cpu_number.h>
+#include <i386/proc_reg.h>
+#include <i386/locore.h>
+#include <i386/model_dep.h>
+#include <i386/spl.h>
+#include <i386at/biosmem.h>
+#include <i386at/model_dep.h>
+
+#if NCPUS > 1
+#include <i386/mp_desc.h>
+#endif
+
+#include <ddb/db_output.h>
+#include <machine/db_machdep.h>
+
+#ifdef MACH_PSEUDO_PHYS
+#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = pte_entry?pa_to_ma(pte_entry):0;
+#else /* MACH_PSEUDO_PHYS */
+#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry);
+#endif /* MACH_PSEUDO_PHYS */
+
+/*
+ * Private data structures.
+ */
+
+/*
+ * For each vm_page_t, there is a list of all currently
+ * valid virtual mappings of that page. An entry is
+ * a pv_entry_t; the list is the pv_table.
+ */
+
+typedef struct pv_entry {
+ struct pv_entry *next; /* next pv_entry */
+ pmap_t pmap; /* pmap where mapping lies */
+ vm_offset_t va; /* virtual address for mapping */
+} *pv_entry_t;
+
+#define PV_ENTRY_NULL ((pv_entry_t) 0)
+
+pv_entry_t pv_head_table; /* array of entries, one per page */
+
+/*
+ * pv_list entries are kept on a list that can only be accessed
+ * with the pmap system locked (at SPLVM, not in the cpus_active set).
+ * The list is refilled from the pv_list_cache if it becomes empty.
+ */
+pv_entry_t pv_free_list; /* free list at SPLVM */
+def_simple_lock_data(static, pv_free_list_lock)
+
+#define PV_ALLOC(pv_e) { \
+ simple_lock(&pv_free_list_lock); \
+ if ((pv_e = pv_free_list) != 0) { \
+ pv_free_list = pv_e->next; \
+ } \
+ simple_unlock(&pv_free_list_lock); \
+}
+
+#define PV_FREE(pv_e) { \
+ simple_lock(&pv_free_list_lock); \
+ pv_e->next = pv_free_list; \
+ pv_free_list = pv_e; \
+ simple_unlock(&pv_free_list_lock); \
+}
+
+struct kmem_cache pv_list_cache; /* cache of pv_entry structures */
+
+/*
+ * Each entry in the pv_head_table is locked by a bit in the
+ * pv_lock_table. The lock bits are accessed by the physical
+ * address of the page they lock.
+ */
+
+char *pv_lock_table; /* pointer to array of bits */
+#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
+
+/* Has pmap_init completed? */
+boolean_t pmap_initialized = FALSE;
+
+/*
+ * Range of kernel virtual addresses available for kernel memory mapping.
+ * Does not include the virtual addresses used to map physical memory 1-1.
+ * Initialized by pmap_bootstrap.
+ */
+vm_offset_t kernel_virtual_start;
+vm_offset_t kernel_virtual_end;
+
+/*
+ * Index into pv_head table, its lock bits, and the modify/reference
+ * bits.
+ */
+#define pa_index(pa) vm_page_table_index(pa)
+
+#define pai_to_pvh(pai) (&pv_head_table[pai])
+#define lock_pvh_pai(pai) (bit_lock(pai, pv_lock_table))
+#define unlock_pvh_pai(pai) (bit_unlock(pai, pv_lock_table))
+
+/*
+ * Array of physical page attributes for managed pages.
+ * One byte per physical page.
+ */
+char *pmap_phys_attributes;
+
+/*
+ * Physical page attributes. Copy bits from PTE definition.
+ */
+#define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */
+#define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */
+
+/*
+ * Amount of virtual memory mapped by one
+ * page-directory entry.
+ */
+#define PDE_MAPPED_SIZE (pdenum2lin(1))
+
+/*
+ * We allocate page table pages directly from the VM system
+ * through this object. It maps physical memory.
+ */
+vm_object_t pmap_object = VM_OBJECT_NULL;
+
+/*
+ * Locking and TLB invalidation
+ */
+
+/*
+ * Locking Protocols:
+ *
+ * There are two structures in the pmap module that need locking:
+ * the pmaps themselves, and the per-page pv_lists (which are locked
+ * by locking the pv_lock_table entry that corresponds to the pv_head
+ * for the list in question.) Most routines want to lock a pmap and
+ * then do operations in it that require pv_list locking -- however
+ * pmap_remove_all and pmap_copy_on_write operate on a physical page
+ * basis and want to do the locking in the reverse order, i.e. lock
+ * a pv_list and then go through all the pmaps referenced by that list.
+ * To protect against deadlock between these two cases, the pmap_lock
+ * is used. There are three different locking protocols as a result:
+ *
+ * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
+ * the pmap.
+ *
+ * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
+ * lock on the pmap_lock (shared read), then lock the pmap
+ * and finally the pv_lists as needed [i.e. pmap lock before
+ * pv_list lock.]
+ *
+ * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
+ * Get a write lock on the pmap_lock (exclusive write); this
+ * also guaranteees exclusive access to the pv_lists. Lock the
+ * pmaps as needed.
+ *
+ * At no time may any routine hold more than one pmap lock or more than
+ * one pv_list lock. Because interrupt level routines can allocate
+ * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
+ * kernel_pmap can only be held at splvm.
+ */
+
+#if NCPUS > 1
+/*
+ * We raise the interrupt level to splvm, to block interprocessor
+ * interrupts during pmap operations. We must take the CPU out of
+ * the cpus_active set while interrupts are blocked.
+ */
+#define SPLVM(spl) { \
+ spl = splvm(); \
+ i_bit_clear(cpu_number(), &cpus_active); \
+}
+
+#define SPLX(spl) { \
+ i_bit_set(cpu_number(), &cpus_active); \
+ splx(spl); \
+}
+
+/*
+ * Lock on pmap system
+ */
+lock_data_t pmap_system_lock;
+
+#define PMAP_READ_LOCK(pmap, spl) { \
+ SPLVM(spl); \
+ lock_read(&pmap_system_lock); \
+ simple_lock(&(pmap)->lock); \
+}
+
+#define PMAP_WRITE_LOCK(spl) { \
+ SPLVM(spl); \
+ lock_write(&pmap_system_lock); \
+}
+
+#define PMAP_READ_UNLOCK(pmap, spl) { \
+ simple_unlock(&(pmap)->lock); \
+ lock_read_done(&pmap_system_lock); \
+ SPLX(spl); \
+}
+
+#define PMAP_WRITE_UNLOCK(spl) { \
+ lock_write_done(&pmap_system_lock); \
+ SPLX(spl); \
+}
+
+#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
+ simple_lock(&(pmap)->lock); \
+ lock_write_to_read(&pmap_system_lock); \
+}
+
+#define LOCK_PVH(index) (lock_pvh_pai(index))
+
+#define UNLOCK_PVH(index) (unlock_pvh_pai(index))
+
+#define PMAP_UPDATE_TLBS(pmap, s, e) \
+{ \
+ cpu_set cpu_mask = 1 << cpu_number(); \
+ cpu_set users; \
+ \
+ /* Since the pmap is locked, other updates are locked */ \
+ /* out, and any pmap_activate has finished. */ \
+ \
+ /* find other cpus using the pmap */ \
+ users = (pmap)->cpus_using & ~cpu_mask; \
+ if (users) { \
+ /* signal them, and wait for them to finish */ \
+ /* using the pmap */ \
+ signal_cpus(users, (pmap), (s), (e)); \
+ while ((pmap)->cpus_using & cpus_active & ~cpu_mask) \
+ cpu_pause(); \
+ } \
+ \
+ /* invalidate our own TLB if pmap is in use */ \
+ if ((pmap)->cpus_using & cpu_mask) { \
+ INVALIDATE_TLB((pmap), (s), (e)); \
+ } \
+}
+
+#else /* NCPUS > 1 */
+
+#define SPLVM(spl) ((void)(spl))
+#define SPLX(spl) ((void)(spl))
+
+#define PMAP_READ_LOCK(pmap, spl) SPLVM(spl)
+#define PMAP_WRITE_LOCK(spl) SPLVM(spl)
+#define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl)
+#define PMAP_WRITE_UNLOCK(spl) SPLX(spl)
+#define PMAP_WRITE_TO_READ_LOCK(pmap)
+
+#define LOCK_PVH(index)
+#define UNLOCK_PVH(index)
+
+#define PMAP_UPDATE_TLBS(pmap, s, e) { \
+ /* invalidate our own TLB if pmap is in use */ \
+ if ((pmap)->cpus_using) { \
+ INVALIDATE_TLB((pmap), (s), (e)); \
+ } \
+}
+
+#endif /* NCPUS > 1 */
+
+#ifdef MACH_PV_PAGETABLES
+#define INVALIDATE_TLB(pmap, s, e) do { \
+ if (__builtin_constant_p((e) - (s)) \
+ && (e) - (s) == PAGE_SIZE) \
+ hyp_invlpg((pmap) == kernel_pmap ? kvtolin(s) : (s)); \
+ else \
+ hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL); \
+} while(0)
+#else /* MACH_PV_PAGETABLES */
+/* It is hard to know when a TLB flush becomes less expensive than a bunch of
+ * invlpgs. But it surely is more expensive than just one invlpg. */
+#define INVALIDATE_TLB(pmap, s, e) do { \
+ if (__builtin_constant_p((e) - (s)) \
+ && (e) - (s) == PAGE_SIZE) \
+ invlpg_linear((pmap) == kernel_pmap ? kvtolin(s) : (s)); \
+ else \
+ flush_tlb(); \
+} while (0)
+#endif /* MACH_PV_PAGETABLES */
+
+
+#if NCPUS > 1
+/*
+ * Structures to keep track of pending TLB invalidations
+ */
+
+#define UPDATE_LIST_SIZE 4
+
+struct pmap_update_item {
+ pmap_t pmap; /* pmap to invalidate */
+ vm_offset_t start; /* start address to invalidate */
+ vm_offset_t end; /* end address to invalidate */
+} ;
+
+typedef struct pmap_update_item *pmap_update_item_t;
+
+/*
+ * List of pmap updates. If the list overflows,
+ * the last entry is changed to invalidate all.
+ */
+struct pmap_update_list {
+ decl_simple_lock_data(, lock)
+ int count;
+ struct pmap_update_item item[UPDATE_LIST_SIZE];
+} ;
+typedef struct pmap_update_list *pmap_update_list_t;
+
+struct pmap_update_list cpu_update_list[NCPUS];
+
+cpu_set cpus_active;
+cpu_set cpus_idle;
+volatile
+boolean_t cpu_update_needed[NCPUS];
+
+#endif /* NCPUS > 1 */
+
+/*
+ * Other useful macros.
+ */
+#define current_pmap() (vm_map_pmap(current_thread()->task->map))
+#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
+
+struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+struct kmem_cache pmap_cache; /* cache of pmap structures */
+struct kmem_cache pt_cache; /* cache of page tables */
+struct kmem_cache pd_cache; /* cache of page directories */
+#if PAE
+struct kmem_cache pdpt_cache; /* cache of page directory pointer tables */
+#ifdef __x86_64__
+struct kmem_cache l4_cache; /* cache of L4 tables */
+#endif /* __x86_64__ */
+#endif /* PAE */
+
+boolean_t pmap_debug = FALSE; /* flag for debugging prints */
+
+#if 0
+int ptes_per_vm_page; /* number of hardware ptes needed
+ to map one VM page. */
+#else
+#define ptes_per_vm_page 1
+#endif
+
+unsigned int inuse_ptepages_count = 0; /* debugging */
+
+/*
+ * Pointer to the basic page directory for the kernel.
+ * Initialized by pmap_bootstrap().
+ */
+pt_entry_t *kernel_page_dir;
+
+/*
+ * Two slots for temporary physical page mapping, to allow for
+ * physical-to-physical transfers.
+ */
+static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS * NCPUS];
+#define MAPWINDOW_SIZE (PMAP_NMAPWINDOWS * NCPUS * PAGE_SIZE)
+
+#ifdef __x86_64__
+static inline pt_entry_t *
+pmap_l4base(const pmap_t pmap, vm_offset_t lin_addr)
+{
+ return &pmap->l4base[lin2l4num(lin_addr)];
+}
+#endif
+
+#ifdef PAE
+static inline pt_entry_t *
+pmap_ptp(const pmap_t pmap, vm_offset_t lin_addr)
+{
+ pt_entry_t *pdp_table;
+#ifdef __x86_64__
+ pt_entry_t *l4_table;
+ l4_table = pmap_l4base(pmap, lin_addr);
+ if (l4_table == PT_ENTRY_NULL)
+ return(PT_ENTRY_NULL);
+ pt_entry_t pdp = *l4_table;
+ if ((pdp & INTEL_PTE_VALID) == 0)
+ return PT_ENTRY_NULL;
+ pdp_table = (pt_entry_t *) ptetokv(pdp);
+#else /* __x86_64__ */
+ pdp_table = pmap->pdpbase;
+#endif /* __x86_64__ */
+ return &pdp_table[lin2pdpnum(lin_addr)];
+}
+#endif
+
+static inline pt_entry_t *
+pmap_pde(const pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *page_dir;
+ if (pmap == kernel_pmap)
+ addr = kvtolin(addr);
+#if PAE
+ pt_entry_t *pdp_table;
+ pdp_table = pmap_ptp(pmap, addr);
+ if (pdp_table == PT_ENTRY_NULL)
+ return(PT_ENTRY_NULL);
+ pt_entry_t pde = *pdp_table;
+ if ((pde & INTEL_PTE_VALID) == 0)
+ return PT_ENTRY_NULL;
+ page_dir = (pt_entry_t *) ptetokv(pde);
+#else /* PAE */
+ page_dir = pmap->dirbase;
+#endif /* PAE */
+ return &page_dir[lin2pdenum(addr)];
+}
+
+/*
+ * Given an offset and a map, compute the address of the
+ * pte. If the address is invalid with respect to the map
+ * then PT_ENTRY_NULL is returned (and the map may need to grow).
+ *
+ * This is only used internally.
+ */
+pt_entry_t *
+pmap_pte(const pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *ptp;
+ pt_entry_t pte;
+
+#ifdef __x86_64__
+ if (pmap->l4base == 0)
+ return(PT_ENTRY_NULL);
+#elif PAE
+ if (pmap->pdpbase == 0)
+ return(PT_ENTRY_NULL);
+#else
+ if (pmap->dirbase == 0)
+ return(PT_ENTRY_NULL);
+#endif
+ ptp = pmap_pde(pmap, addr);
+ if (ptp == 0)
+ return(PT_ENTRY_NULL);
+ pte = *ptp;
+ if ((pte & INTEL_PTE_VALID) == 0)
+ return(PT_ENTRY_NULL);
+ ptp = (pt_entry_t *)ptetokv(pte);
+ return(&ptp[ptenum(addr)]);
+}
+
+#define DEBUG_PTE_PAGE 0
+
+#if DEBUG_PTE_PAGE
+void ptep_check(ptep_t ptep)
+{
+ pt_entry_t *pte, *epte;
+ int ctu, ctw;
+
+ /* check the use and wired counts */
+ if (ptep == PTE_PAGE_NULL)
+ return;
+ pte = pmap_pte(ptep->pmap, ptep->va);
+ epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t);
+ ctu = 0;
+ ctw = 0;
+ while (pte < epte) {
+ if (pte->pfn != 0) {
+ ctu++;
+ if (pte->wired)
+ ctw++;
+ }
+ pte += ptes_per_vm_page;
+ }
+
+ if (ctu != ptep->use_count || ctw != ptep->wired_count) {
+ printf("use %d wired %d - actual use %d wired %d\n",
+ ptep->use_count, ptep->wired_count, ctu, ctw);
+ panic("pte count");
+ }
+}
+#endif /* DEBUG_PTE_PAGE */
+
+/*
+ * Back-door routine for mapping kernel VM at initialization.
+ * Useful for mapping memory outside the range of direct mapped
+ * physical memory (i.e., devices).
+ */
+vm_offset_t pmap_map_bd(
+ vm_offset_t virt,
+ phys_addr_t start,
+ phys_addr_t end,
+ vm_prot_t prot)
+{
+ pt_entry_t template;
+ pt_entry_t *pte;
+ int spl;
+#ifdef MACH_PV_PAGETABLES
+ int n, i = 0;
+ struct mmu_update update[HYP_BATCH_MMU_UPDATES];
+#endif /* MACH_PV_PAGETABLES */
+
+ template = pa_to_pte(start)
+ | INTEL_PTE_NCACHE|INTEL_PTE_WTHRU
+ | INTEL_PTE_VALID;
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ template |= INTEL_PTE_GLOBAL;
+ if (prot & VM_PROT_WRITE)
+ template |= INTEL_PTE_WRITE;
+
+ PMAP_READ_LOCK(kernel_pmap, spl);
+ while (start < end) {
+ pte = pmap_pte(kernel_pmap, virt);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_map_bd: Invalid kernel address\n");
+#ifdef MACH_PV_PAGETABLES
+ update[i].ptr = kv_to_ma(pte);
+ update[i].val = pa_to_ma(template);
+ i++;
+ if (i == HYP_BATCH_MMU_UPDATES) {
+ hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
+ if (n != i)
+ panic("couldn't pmap_map_bd\n");
+ i = 0;
+ }
+#else /* MACH_PV_PAGETABLES */
+ WRITE_PTE(pte, template)
+#endif /* MACH_PV_PAGETABLES */
+ pte_increment_pa(template);
+ virt += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+#ifdef MACH_PV_PAGETABLES
+ if (i > HYP_BATCH_MMU_UPDATES)
+ panic("overflowed array in pmap_map_bd");
+ hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
+ if (n != i)
+ panic("couldn't pmap_map_bd\n");
+#endif /* MACH_PV_PAGETABLES */
+ PMAP_READ_UNLOCK(kernel_pmap, spl);
+ return(virt);
+}
+
+#ifdef PAE
+static void pmap_bootstrap_pae(void)
+{
+ vm_offset_t addr;
+ pt_entry_t *pdp_kernel;
+
+#ifdef __x86_64__
+#ifdef MACH_HYP
+ kernel_pmap->user_l4base = NULL;
+ kernel_pmap->user_pdpbase = NULL;
+#endif
+ kernel_pmap->l4base = (pt_entry_t*)phystokv(pmap_grab_page());
+ memset(kernel_pmap->l4base, 0, INTEL_PGBYTES);
+#else
+ const int PDPNUM_KERNEL = PDPNUM;
+#endif /* x86_64 */
+
+ init_alloc_aligned(PDPNUM_KERNEL * INTEL_PGBYTES, &addr);
+ kernel_page_dir = (pt_entry_t*)phystokv(addr);
+ memset(kernel_page_dir, 0, PDPNUM_KERNEL * INTEL_PGBYTES);
+
+ pdp_kernel = (pt_entry_t*)phystokv(pmap_grab_page());
+ memset(pdp_kernel, 0, INTEL_PGBYTES);
+ for (int i = 0; i < PDPNUM_KERNEL; i++) {
+ int pdp_index = i;
+#ifdef __x86_64__
+ pdp_index += lin2pdpnum(VM_MIN_KERNEL_ADDRESS);
+#endif
+ WRITE_PTE(&pdp_kernel[pdp_index],
+ pa_to_pte(_kvtophys((void *) kernel_page_dir
+ + i * INTEL_PGBYTES))
+ | INTEL_PTE_VALID
+#if (defined(__x86_64__) && !defined(MACH_HYP)) || defined(MACH_PV_PAGETABLES)
+ | INTEL_PTE_WRITE
+#endif
+ );
+ }
+
+#ifdef __x86_64__
+ /* only fill the kernel pdpte during bootstrap */
+ WRITE_PTE(&kernel_pmap->l4base[lin2l4num(VM_MIN_KERNEL_ADDRESS)],
+ pa_to_pte(_kvtophys(pdp_kernel)) | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readonly_init(kernel_pmap->l4base);
+#endif /* MACH_PV_PAGETABLES */
+#else /* x86_64 */
+ kernel_pmap->pdpbase = pdp_kernel;
+#endif /* x86_64 */
+}
+#endif /* PAE */
+
+#ifdef MACH_PV_PAGETABLES
+#ifdef PAE
+#define NSUP_L1 4
+#else
+#define NSUP_L1 1
+#endif
+static void pmap_bootstrap_xen(pt_entry_t *l1_map[NSUP_L1])
+{
+ /* We don't actually deal with the CR3 register content at all */
+ hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);
+ /*
+ * Xen may only provide as few as 512KB extra bootstrap linear memory,
+ * which is far from enough to map all available memory, so we need to
+ * map more bootstrap linear memory. We here map 1 (resp. 4 for PAE)
+ * other L1 table(s), thus 4MiB extra memory (resp. 8MiB), which is
+ * enough for a pagetable mapping 4GiB.
+ */
+ vm_offset_t la;
+ int n_l1map;
+ for (n_l1map = 0, la = VM_MIN_KERNEL_ADDRESS; la >= VM_MIN_KERNEL_ADDRESS; la += NPTES * PAGE_SIZE) {
+ pt_entry_t *base = (pt_entry_t*) boot_info.pt_base;
+#ifdef PAE
+#ifdef __x86_64__
+ base = (pt_entry_t*) ptetokv(base[0]);
+#endif /* x86_64 */
+ pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[lin2pdpnum(la)]);
+#else /* PAE */
+ pt_entry_t *l2_map = base;
+#endif /* PAE */
+ /* Like lin2pdenum, but works with non-contiguous boot L3 */
+ l2_map += (la >> PDESHIFT) & PDEMASK;
+ if (!(*l2_map & INTEL_PTE_VALID)) {
+ struct mmu_update update;
+ unsigned j, n;
+
+ l1_map[n_l1map] = (pt_entry_t*) phystokv(pmap_grab_page());
+ for (j = 0; j < NPTES; j++)
+ l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn(lin2pdenum(la - VM_MIN_KERNEL_ADDRESS) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
+ pmap_set_page_readonly_init(l1_map[n_l1map]);
+ if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (l1_map[n_l1map])))
+ panic("couldn't pin page %p(%lx)", l1_map[n_l1map], (vm_offset_t) kv_to_ma (l1_map[n_l1map]));
+ update.ptr = kv_to_ma(l2_map);
+ update.val = kv_to_ma(l1_map[n_l1map]) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
+ hyp_mmu_update(kv_to_la(&update), 1, kv_to_la(&n), DOMID_SELF);
+ if (n != 1)
+ panic("couldn't complete bootstrap map");
+ /* added the last L1 table, can stop */
+ if (++n_l1map >= NSUP_L1)
+ break;
+ }
+ }
+}
+#endif /* MACH_PV_PAGETABLES */
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * Allocate the kernel page directory and page tables,
+ * and direct-map all physical memory.
+ * Called with mapping off.
+ */
+void pmap_bootstrap(void)
+{
+ /*
+ * Mapping is turned off; we must reference only physical addresses.
+ * The load image of the system is to be mapped 1-1 physical = virtual.
+ */
+
+ /*
+ * Set ptes_per_vm_page for general use.
+ */
+#if 0
+ ptes_per_vm_page = PAGE_SIZE / INTEL_PGBYTES;
+#endif
+
+ /*
+ * The kernel's pmap is statically allocated so we don't
+ * have to use pmap_create, which is unlikely to work
+ * correctly at this part of the boot sequence.
+ */
+
+ kernel_pmap = &kernel_pmap_store;
+
+#if NCPUS > 1
+ lock_init(&pmap_system_lock, FALSE); /* NOT a sleep lock */
+#endif /* NCPUS > 1 */
+
+ simple_lock_init(&kernel_pmap->lock);
+
+ kernel_pmap->ref_count = 1;
+
+ /*
+ * Determine the kernel virtual address range.
+ * It starts at the end of the physical memory
+ * mapped into the kernel address space,
+ * and extends to a stupid arbitrary limit beyond that.
+ */
+ kernel_virtual_start = phystokv(biosmem_directmap_end());
+ kernel_virtual_end = kernel_virtual_start + VM_KERNEL_MAP_SIZE;
+
+ if (kernel_virtual_end < kernel_virtual_start
+ || kernel_virtual_end > VM_MAX_KERNEL_ADDRESS - PAGE_SIZE)
+ kernel_virtual_end = VM_MAX_KERNEL_ADDRESS - PAGE_SIZE;
+
+ /*
+ * Allocate and clear a kernel page directory.
+ */
+ /* Note: initial Xen mapping holds at least 512kB free mapped page.
+ * We use that for directly building our linear mapping. */
+#if PAE
+ pmap_bootstrap_pae();
+#else /* PAE */
+ kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(pmap_grab_page());
+ {
+ unsigned i;
+ for (i = 0; i < NPDES; i++)
+ kernel_page_dir[i] = 0;
+ }
+#endif /* PAE */
+
+#ifdef MACH_PV_PAGETABLES
+ pt_entry_t *l1_map[NSUP_L1];
+ pmap_bootstrap_xen(l1_map);
+#endif /* MACH_PV_PAGETABLES */
+
+ /*
+ * Allocate and set up the kernel page tables.
+ */
+ {
+ vm_offset_t va;
+ pt_entry_t global = CPU_HAS_FEATURE(CPU_FEATURE_PGE) ? INTEL_PTE_GLOBAL : 0;
+
+ /*
+ * Map virtual memory for all directly mappable physical memory, 1-1,
+ * Make any mappings completely in the kernel's text segment read-only.
+ *
+ * Also allocate some additional all-null page tables afterwards
+ * for kernel virtual memory allocation,
+ * because this PMAP module is too stupid
+ * to allocate new kernel page tables later.
+ * XX fix this
+ */
+ for (va = phystokv(0); va >= phystokv(0) && va < kernel_virtual_end; )
+ {
+ pt_entry_t *pde = kernel_page_dir + lin2pdenum_cont(kvtolin(va));
+ pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page());
+ pt_entry_t *pte;
+
+ /* Initialize the page directory entry. */
+ WRITE_PTE(pde, pa_to_pte((vm_offset_t)_kvtophys(ptable))
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+
+ /* Initialize the page table. */
+ for (pte = ptable; (va < phystokv(biosmem_directmap_end())) && (pte < ptable+NPTES); pte++)
+ {
+ if ((pte - ptable) < ptenum(va))
+ {
+ WRITE_PTE(pte, 0);
+ }
+ else
+#ifdef MACH_PV_PAGETABLES
+ if (va == (vm_offset_t) &hyp_shared_info)
+ {
+ *pte = boot_info.shared_info | INTEL_PTE_VALID | INTEL_PTE_WRITE;
+ va += INTEL_PGBYTES;
+ }
+ else
+#endif /* MACH_PV_PAGETABLES */
+ {
+ extern char _start[], etext[];
+
+ if (((va >= (vm_offset_t) _start)
+ && (va + INTEL_PGBYTES <= (vm_offset_t)etext))
+#ifdef MACH_PV_PAGETABLES
+ || (va >= (vm_offset_t) boot_info.pt_base
+ && (va + INTEL_PGBYTES <=
+ (vm_offset_t) ptable + INTEL_PGBYTES))
+#endif /* MACH_PV_PAGETABLES */
+ )
+ {
+ WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
+ | INTEL_PTE_VALID | global);
+ }
+ else
+ {
+#ifdef MACH_PV_PAGETABLES
+ /* Keep supplementary L1 pages read-only */
+ int i;
+ for (i = 0; i < NSUP_L1; i++)
+ if (va == (vm_offset_t) l1_map[i]) {
+ WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
+ | INTEL_PTE_VALID | global);
+ break;
+ }
+ if (i == NSUP_L1)
+#endif /* MACH_PV_PAGETABLES */
+ WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE | global)
+
+ }
+ va += INTEL_PGBYTES;
+ }
+ }
+ for (; pte < ptable+NPTES; pte++)
+ {
+ if (va >= kernel_virtual_end - MAPWINDOW_SIZE && va < kernel_virtual_end)
+ {
+ pmap_mapwindow_t *win = &mapwindows[atop(va - (kernel_virtual_end - MAPWINDOW_SIZE))];
+ win->entry = pte;
+ win->vaddr = va;
+ }
+ WRITE_PTE(pte, 0);
+ va += INTEL_PGBYTES;
+ }
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readonly_init(ptable);
+ if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (ptable)))
+ panic("couldn't pin page %p(%lx)\n", ptable, (vm_offset_t) kv_to_ma (ptable));
+#endif /* MACH_PV_PAGETABLES */
+ }
+ }
+
+ /* Architecture-specific code will turn on paging
+ soon after we return from here. */
+}
+
+#ifdef MACH_PV_PAGETABLES
+/* These are only required because of Xen security policies */
+
+/* Set back a page read write */
+void pmap_set_page_readwrite(void *_vaddr) {
+ vm_offset_t vaddr = (vm_offset_t) _vaddr;
+ phys_addr_t paddr = kvtophys(vaddr);
+ vm_offset_t canon_vaddr = phystokv(paddr);
+ if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE))
+ panic("couldn't set hiMMU readwrite for addr %lx(%lx)\n", vaddr, (vm_offset_t) pa_to_ma (paddr));
+ if (canon_vaddr != vaddr)
+ if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE))
+ panic("couldn't set hiMMU readwrite for paddr %lx(%lx)\n", canon_vaddr, (vm_offset_t) pa_to_ma (paddr));
+}
+
+/* Set a page read only (so as to pin it for instance) */
+void pmap_set_page_readonly(void *_vaddr) {
+ vm_offset_t vaddr = (vm_offset_t) _vaddr;
+ phys_addr_t paddr = kvtophys(vaddr);
+ vm_offset_t canon_vaddr = phystokv(paddr);
+ if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) {
+ if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE))
+ panic("couldn't set hiMMU readonly for vaddr %lx(%lx)\n", vaddr, (vm_offset_t) pa_to_ma (paddr));
+ }
+ if (canon_vaddr != vaddr &&
+ *pmap_pde(kernel_pmap, canon_vaddr) & INTEL_PTE_VALID) {
+ if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE))
+ panic("couldn't set hiMMU readonly for vaddr %lx canon_vaddr %lx paddr %lx (%lx)\n", vaddr, canon_vaddr, paddr, (vm_offset_t) pa_to_ma (paddr));
+ }
+}
+
+/* This needs to be called instead of pmap_set_page_readonly as long as RC3
+ * still points to the bootstrap dirbase, to also fix the bootstrap table. */
+void pmap_set_page_readonly_init(void *_vaddr) {
+ vm_offset_t vaddr = (vm_offset_t) _vaddr;
+#if PAE
+ pt_entry_t *pdpbase = (void*) boot_info.pt_base;
+#ifdef __x86_64__
+ pdpbase = (pt_entry_t *) ptetokv(pdpbase[lin2l4num(vaddr)]);
+#endif
+ /* The bootstrap table does not necessarily use contiguous pages for the pde tables */
+ pt_entry_t *dirbase = (void*) ptetokv(pdpbase[lin2pdpnum(vaddr)]);
+#else
+ pt_entry_t *dirbase = (void*) boot_info.pt_base;
+#endif
+ pt_entry_t *pte = &dirbase[lin2pdenum(vaddr) & PTEMASK];
+ /* Modify our future kernel map (can't use update_va_mapping for this)... */
+ if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) {
+ if (!hyp_mmu_update_la (kvtolin(vaddr), pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID))
+ panic("couldn't set hiMMU readonly for vaddr %lx(%lx)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
+ }
+ /* ... and the bootstrap map. */
+ if (*pte & INTEL_PTE_VALID) {
+ if (hyp_do_update_va_mapping (vaddr, pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID, UVMF_NONE))
+ panic("couldn't set MMU readonly for vaddr %lx(%lx)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
+ }
+}
+
+void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
+ unsigned i;
+ pt_entry_t *dir;
+ vm_offset_t va = 0;
+#ifdef __x86_64__
+ int l4i, l3i;
+#else
+#if PAE
+ unsigned j;
+#endif /* PAE */
+#endif
+ if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(base)))
+ panic("pmap_clear_bootstrap_pagetable: couldn't unpin page %p(%lx)\n", base, (vm_offset_t) kv_to_ma(base));
+#ifdef __x86_64__
+ /* 4-level page table */
+ for (l4i = 0; l4i < NPTES && va < HYP_VIRT_START && va < 0x0000800000000000UL; l4i++) {
+ pt_entry_t l4e = base[l4i];
+ pt_entry_t *l3;
+ if (!(l4e & INTEL_PTE_VALID)) {
+ va += NPTES * NPTES * NPTES * INTEL_PGBYTES;
+ continue;
+ }
+ l3 = (pt_entry_t *) ptetokv(l4e);
+
+ for (l3i = 0; l3i < NPTES && va < HYP_VIRT_START; l3i++) {
+ pt_entry_t l3e = l3[l3i];
+ if (!(l3e & INTEL_PTE_VALID)) {
+ va += NPTES * NPTES * INTEL_PGBYTES;
+ continue;
+ }
+ dir = (pt_entry_t *) ptetokv(l3e);
+#else
+#if PAE
+ /* 3-level page table */
+ for (j = 0; j < PDPNUM && va < HYP_VIRT_START; j++)
+ {
+ pt_entry_t pdpe = base[j];
+ if (!(pdpe & INTEL_PTE_VALID)) {
+ va += NPTES * NPTES * INTEL_PGBYTES;
+ continue;
+ }
+ dir = (pt_entry_t *) ptetokv(pdpe);
+#else /* PAE */
+ /* 2-level page table */
+ dir = base;
+#endif /* PAE */
+#endif
+ for (i = 0; i < NPTES && va < HYP_VIRT_START; i++) {
+ pt_entry_t pde = dir[i];
+ unsigned long pfn = atop(pte_to_pa(pde));
+ void *pgt = (void*) phystokv(ptoa(pfn));
+ if (pde & INTEL_PTE_VALID)
+ hyp_free_page(pfn, pgt);
+ va += NPTES * INTEL_PGBYTES;
+ }
+#ifndef __x86_64__
+#if PAE
+ hyp_free_page(atop(_kvtophys(dir)), dir);
+ }
+#endif /* PAE */
+#else
+ hyp_free_page(atop(_kvtophys(dir)), dir);
+ }
+ hyp_free_page(atop(_kvtophys(l3)), l3);
+ }
+#endif
+ hyp_free_page(atop(_kvtophys(base)), base);
+}
+#endif /* MACH_PV_PAGETABLES */
+
+/*
+ * Create a temporary mapping for a given physical entry
+ *
+ * This can be used to access physical pages which are not mapped 1:1 by
+ * phystokv().
+ */
+pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry)
+{
+ pmap_mapwindow_t *map;
+ int cpu = cpu_number();
+
+ assert(entry != 0);
+
+ /* Find an empty one. */
+ for (map = &mapwindows[cpu * PMAP_NMAPWINDOWS]; map < &mapwindows[(cpu+1) * PMAP_NMAPWINDOWS]; map++)
+ if (!(*map->entry))
+ break;
+ assert(map < &mapwindows[(cpu+1) * PMAP_NMAPWINDOWS]);
+
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(map->entry), pa_to_ma(entry)))
+ panic("pmap_get_mapwindow");
+#else /* MACH_PV_PAGETABLES */
+ WRITE_PTE(map->entry, entry);
+#endif /* MACH_PV_PAGETABLES */
+ INVALIDATE_TLB(kernel_pmap, map->vaddr, map->vaddr + PAGE_SIZE);
+ return map;
+}
+
+/*
+ * Destroy a temporary mapping for a physical entry
+ */
+void pmap_put_mapwindow(pmap_mapwindow_t *map)
+{
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(map->entry), 0))
+ panic("pmap_put_mapwindow");
+#else /* MACH_PV_PAGETABLES */
+ WRITE_PTE(map->entry, 0);
+#endif /* MACH_PV_PAGETABLES */
+ INVALIDATE_TLB(kernel_pmap, map->vaddr, map->vaddr + PAGE_SIZE);
+}
+
+void pmap_virtual_space(
+ vm_offset_t *startp,
+ vm_offset_t *endp)
+{
+ *startp = kernel_virtual_start;
+ *endp = kernel_virtual_end - MAPWINDOW_SIZE;
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void pmap_init(void)
+{
+ unsigned long npages;
+ vm_offset_t addr;
+ vm_size_t s;
+#if NCPUS > 1
+ int i;
+#endif /* NCPUS > 1 */
+
+ /*
+ * Allocate memory for the pv_head_table and its lock bits,
+ * the modify bit array, and the pte_page table.
+ */
+
+ npages = vm_page_table_size();
+ s = (vm_size_t) (sizeof(struct pv_entry) * npages
+ + pv_lock_table_size(npages)
+ + npages);
+
+ s = round_page(s);
+ if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
+ panic("pmap_init");
+ memset((void *) addr, 0, s);
+
+ /*
+ * Allocate the structures first to preserve word-alignment.
+ */
+ pv_head_table = (pv_entry_t) addr;
+ addr = (vm_offset_t) (pv_head_table + npages);
+
+ pv_lock_table = (char *) addr;
+ addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
+
+ pmap_phys_attributes = (char *) addr;
+
+ /*
+ * Create the cache of physical maps,
+ * and of the physical-to-virtual entries.
+ */
+ s = (vm_size_t) sizeof(struct pmap);
+ kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, 0);
+ kmem_cache_init(&pt_cache, "pmap_L1",
+ INTEL_PGBYTES, INTEL_PGBYTES, NULL,
+ KMEM_CACHE_PHYSMEM);
+ kmem_cache_init(&pd_cache, "pmap_L2",
+ INTEL_PGBYTES, INTEL_PGBYTES, NULL,
+ KMEM_CACHE_PHYSMEM);
+#if PAE
+ kmem_cache_init(&pdpt_cache, "pmap_L3",
+ INTEL_PGBYTES, INTEL_PGBYTES, NULL,
+ KMEM_CACHE_PHYSMEM);
+#ifdef __x86_64__
+ kmem_cache_init(&l4_cache, "pmap_L4",
+ INTEL_PGBYTES, INTEL_PGBYTES, NULL,
+ KMEM_CACHE_PHYSMEM);
+#endif /* __x86_64__ */
+#endif /* PAE */
+ s = (vm_size_t) sizeof(struct pv_entry);
+ kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, 0);
+
+#if NCPUS > 1
+ /*
+ * Set up the pmap request lists
+ */
+ for (i = 0; i < NCPUS; i++) {
+ pmap_update_list_t up = &cpu_update_list[i];
+
+ simple_lock_init(&up->lock);
+ up->count = 0;
+ }
+#endif /* NCPUS > 1 */
+
+ /*
+ * Indicate that the PMAP module is now fully initialized.
+ */
+ pmap_initialized = TRUE;
+}
+
+static inline boolean_t
+valid_page(phys_addr_t addr)
+{
+ struct vm_page *p;
+
+ if (!pmap_initialized)
+ return FALSE;
+
+ p = vm_page_lookup_pa(addr);
+ return (p != NULL);
+}
+
+/*
+ * Routine: pmap_page_table_page_alloc
+ *
+ * Allocates a new physical page to be used as a page-table page.
+ *
+ * Must be called with the pmap system and the pmap unlocked,
+ * since these must be unlocked to use vm_page_grab.
+ */
+static vm_offset_t
+pmap_page_table_page_alloc(void)
+{
+ vm_page_t m;
+ phys_addr_t pa;
+
+ check_simple_locks();
+
+ /*
+ * We cannot allocate the pmap_object in pmap_init,
+ * because it is called before the cache package is up.
+ * Allocate it now if it is missing.
+ */
+ if (pmap_object == VM_OBJECT_NULL)
+ pmap_object = vm_object_allocate(vm_page_table_size() * PAGE_SIZE);
+
+ /*
+ * Allocate a VM page for the level 2 page table entries.
+ */
+ while ((m = vm_page_grab(VM_PAGE_DIRECTMAP)) == VM_PAGE_NULL)
+ VM_PAGE_WAIT((void (*)()) 0);
+
+ /*
+ * Map the page to its physical address so that it
+ * can be found later.
+ */
+ pa = m->phys_addr;
+ assert(pa == (vm_offset_t) pa);
+ vm_object_lock(pmap_object);
+ vm_page_insert(m, pmap_object, pa);
+ vm_page_lock_queues();
+ vm_page_wire(m);
+ inuse_ptepages_count++;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+
+ /*
+ * Zero the page.
+ */
+ memset((void *)phystokv(pa), 0, PAGE_SIZE);
+
+ return pa;
+}
+
+#ifdef MACH_XEN
+void pmap_map_mfn(void *_addr, unsigned long mfn) {
+ vm_offset_t addr = (vm_offset_t) _addr;
+ pt_entry_t *pte, *pdp;
+ vm_offset_t ptp;
+ pt_entry_t ma = ((pt_entry_t) mfn) << PAGE_SHIFT;
+
+ /* Add a ptp if none exist yet for this pte */
+ if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL) {
+ ptp = phystokv(pmap_page_table_page_alloc());
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readonly((void*) ptp);
+ if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, pa_to_mfn(ptp)))
+ panic("couldn't pin page %lx(%lx)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
+#endif /* MACH_PV_PAGETABLES */
+ pdp = pmap_pde(kernel_pmap, addr);
+
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pdp),
+ pa_to_pte(kv_to_ma(ptp)) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE))
+ panic("%s:%d could not set pde %llx(%lx) to %lx(%lx)\n",__FILE__,__LINE__,kvtophys((vm_offset_t)pdp),(vm_offset_t) kv_to_ma(pdp), ptp, (vm_offset_t) pa_to_ma(ptp));
+#else /* MACH_PV_PAGETABLES */
+ *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE;
+#endif /* MACH_PV_PAGETABLES */
+ pte = pmap_pte(kernel_pmap, addr);
+ }
+
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pte), ma | INTEL_PTE_VALID | INTEL_PTE_WRITE))
+ panic("%s:%d could not set pte %p(%lx) to %llx(%llx)\n",__FILE__,__LINE__,pte,(vm_offset_t) kv_to_ma(pte), ma, ma_to_pa(ma));
+#else /* MACH_PV_PAGETABLES */
+ /* Note: in this case, mfn is actually a pfn. */
+ WRITE_PTE(pte, ma | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+#endif /* MACH_PV_PAGETABLES */
+}
+#endif /* MACH_XEN */
+
+/*
+ * Deallocate a page-table page.
+ * The page-table page must have all mappings removed,
+ * and be removed from its page directory.
+ */
+static void
+pmap_page_table_page_dealloc(vm_offset_t pa)
+{
+ vm_page_t m;
+
+ vm_object_lock(pmap_object);
+ m = vm_page_lookup(pmap_object, pa);
+ vm_page_lock_queues();
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, pa_to_mfn(pa)))
+ panic("couldn't unpin page %llx(%lx)\n", pa, (vm_offset_t) kv_to_ma(pa));
+ pmap_set_page_readwrite((void*) phystokv(pa));
+#endif /* MACH_PV_PAGETABLES */
+ vm_page_free(m);
+ inuse_ptepages_count--;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+}
+
+/*
+ * Create and return a physical map.
+ *
+ * If the size specified for the map
+ * is zero, the map is an actual physical
+ * map, and may be referenced by the
+ * hardware.
+ *
+ * If the size specified is non-zero,
+ * the map will be used in software only, and
+ * is bounded by that size.
+ */
+pmap_t pmap_create(vm_size_t size)
+{
+#ifdef __x86_64__
+ // needs to be reworked if we want to dynamically allocate PDPs for kernel
+ const int PDPNUM = PDPNUM_KERNEL;
+#endif
+ pt_entry_t *page_dir[PDPNUM];
+ int i;
+ pmap_t p;
+ pmap_statistics_t stats;
+
+ /*
+ * A software use-only map doesn't even need a map.
+ */
+
+ if (size != 0) {
+ return(PMAP_NULL);
+ }
+
+/*
+ * Allocate a pmap struct from the pmap_cache. Then allocate
+ * the page descriptor table.
+ */
+
+ p = (pmap_t) kmem_cache_alloc(&pmap_cache);
+ if (p == PMAP_NULL)
+ return PMAP_NULL;
+
+ for (i = 0; i < PDPNUM; i++) {
+ page_dir[i] = (pt_entry_t *) kmem_cache_alloc(&pd_cache);
+ if (page_dir[i] == NULL) {
+ i -= 1;
+ while (i >= 0) {
+ kmem_cache_free(&pd_cache,
+ (vm_address_t) page_dir[i]);
+ i -= 1;
+ }
+ kmem_cache_free(&pmap_cache, (vm_address_t) p);
+ return PMAP_NULL;
+ }
+ memcpy(page_dir[i],
+ (void *) kernel_page_dir + i * INTEL_PGBYTES,
+ INTEL_PGBYTES);
+ }
+
+#ifdef LINUX_DEV
+#if VM_MIN_KERNEL_ADDRESS != 0
+ /* Do not map BIOS in user tasks */
+ page_dir
+#if PAE
+ [lin2pdpnum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)]
+#else
+ [0]
+#endif
+ [lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)]
+ = 0;
+#endif
+#endif /* LINUX_DEV */
+
+#ifdef MACH_PV_PAGETABLES
+ {
+ for (i = 0; i < PDPNUM; i++)
+ pmap_set_page_readonly((void *) page_dir[i]);
+ }
+#endif /* MACH_PV_PAGETABLES */
+
+#if PAE
+ pt_entry_t *pdp_kernel = (pt_entry_t *) kmem_cache_alloc(&pdpt_cache);
+ if (pdp_kernel == NULL) {
+ for (i = 0; i < PDPNUM; i++)
+ kmem_cache_free(&pd_cache, (vm_address_t) page_dir[i]);
+ kmem_cache_free(&pmap_cache, (vm_address_t) p);
+ return PMAP_NULL;
+ }
+
+ memset(pdp_kernel, 0, INTEL_PGBYTES);
+ {
+ for (i = 0; i < PDPNUM; i++) {
+ int pdp_index = i;
+#ifdef __x86_64__
+ pdp_index += lin2pdpnum(VM_MIN_KERNEL_ADDRESS);
+#endif
+ WRITE_PTE(&pdp_kernel[pdp_index],
+ pa_to_pte(kvtophys((vm_offset_t) page_dir[i]))
+ | INTEL_PTE_VALID
+#if (defined(__x86_64__) && !defined(MACH_HYP)) || defined(MACH_PV_PAGETABLES)
+ | INTEL_PTE_WRITE
+#ifdef __x86_64__
+ | INTEL_PTE_USER
+#endif /* __x86_64__ */
+#endif
+ );
+ }
+ }
+#ifdef __x86_64__
+ p->l4base = (pt_entry_t *) kmem_cache_alloc(&l4_cache);
+ if (p->l4base == NULL)
+ panic("pmap_create");
+ memset(p->l4base, 0, INTEL_PGBYTES);
+ WRITE_PTE(&p->l4base[lin2l4num(VM_MIN_KERNEL_ADDRESS)],
+ pa_to_pte(kvtophys((vm_offset_t) pdp_kernel)) | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+#ifdef MACH_PV_PAGETABLES
+ // FIXME: use kmem_cache_alloc instead
+ if (kmem_alloc_wired(kernel_map,
+ (vm_offset_t *)&p->user_pdpbase, INTEL_PGBYTES)
+ != KERN_SUCCESS)
+ panic("pmap_create");
+ memset(p->user_pdpbase, 0, INTEL_PGBYTES);
+ {
+ int i;
+ for (i = 0; i < lin2pdpnum(VM_MAX_USER_ADDRESS); i++)
+ WRITE_PTE(&p->user_pdpbase[i], pa_to_pte(kvtophys((vm_offset_t) page_dir[i])) | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+ }
+ // FIXME: use kmem_cache_alloc instead
+ if (kmem_alloc_wired(kernel_map,
+ (vm_offset_t *)&p->user_l4base, INTEL_PGBYTES)
+ != KERN_SUCCESS)
+ panic("pmap_create");
+ memset(p->user_l4base, 0, INTEL_PGBYTES);
+ WRITE_PTE(&p->user_l4base[0], pa_to_pte(kvtophys((vm_offset_t) p->user_pdpbase)) | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+#endif /* MACH_PV_PAGETABLES */
+#else /* _x86_64 */
+ p->pdpbase = pdp_kernel;
+#endif /* _x86_64 */
+#ifdef MACH_PV_PAGETABLES
+#ifdef __x86_64__
+ pmap_set_page_readonly(p->l4base);
+ pmap_set_page_readonly(p->user_l4base);
+ pmap_set_page_readonly(p->user_pdpbase);
+#else
+ pmap_set_page_readonly(p->pdpbase);
+#endif
+#endif /* MACH_PV_PAGETABLES */
+#else /* PAE */
+ p->dirbase = page_dir[0];
+#endif /* PAE */
+
+ p->ref_count = 1;
+
+ simple_lock_init(&p->lock);
+ p->cpus_using = 0;
+
+ /*
+ * Initialize statistics.
+ */
+
+ stats = &p->stats;
+ stats->resident_count = 0;
+ stats->wired_count = 0;
+
+ return(p);
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+
+void pmap_destroy(pmap_t p)
+{
+ int c, s;
+
+ if (p == PMAP_NULL)
+ return;
+
+ SPLVM(s);
+ simple_lock(&p->lock);
+ c = --p->ref_count;
+ simple_unlock(&p->lock);
+ SPLX(s);
+
+ if (c != 0) {
+ return; /* still in use */
+ }
+
+ /*
+ * Free the page table tree.
+ */
+#if PAE
+#ifdef __x86_64__
+ for (int l4i = 0; l4i < NPTES; l4i++) {
+ pt_entry_t pdp = (pt_entry_t) p->l4base[l4i];
+ if (!(pdp & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdpbase = (pt_entry_t*) ptetokv(pdp);
+#else /* __x86_64__ */
+ pt_entry_t *pdpbase = p->pdpbase;
+#endif /* __x86_64__ */
+ for (int l3i = 0; l3i < NPTES; l3i++) {
+ pt_entry_t pde = (pt_entry_t) pdpbase[l3i];
+ if (!(pde & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdebase = (pt_entry_t*) ptetokv(pde);
+ if (
+#ifdef __x86_64__
+ l4i < lin2l4num(VM_MAX_USER_ADDRESS) ||
+ (l4i == lin2l4num(VM_MAX_USER_ADDRESS) && l3i < lin2pdpnum(VM_MAX_USER_ADDRESS))
+#else /* __x86_64__ */
+ l3i < lin2pdpnum(VM_MAX_USER_ADDRESS)
+#endif /* __x86_64__ */
+ )
+ for (int l2i = 0; l2i < NPTES; l2i++)
+#else /* PAE */
+ pt_entry_t *pdebase = p->dirbase;
+ for (int l2i = 0; l2i < lin2pdenum(VM_MAX_USER_ADDRESS); l2i++)
+#endif /* PAE */
+ {
+ pt_entry_t pte = (pt_entry_t) pdebase[l2i];
+ if (!(pte & INTEL_PTE_VALID))
+ continue;
+ kmem_cache_free(&pt_cache, (vm_offset_t)ptetokv(pte));
+ }
+ kmem_cache_free(&pd_cache, (vm_offset_t)pdebase);
+#if PAE
+ }
+ kmem_cache_free(&pdpt_cache, (vm_offset_t)pdpbase);
+#ifdef __x86_64__
+ }
+ kmem_cache_free(&l4_cache, (vm_offset_t) p->l4base);
+#endif /* __x86_64__ */
+#endif /* PAE */
+
+ /* Finally, free the pmap itself */
+ kmem_cache_free(&pmap_cache, (vm_offset_t) p);
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+
+void pmap_reference(pmap_t p)
+{
+ int s;
+ if (p != PMAP_NULL) {
+ SPLVM(s);
+ simple_lock(&p->lock);
+ p->ref_count++;
+ simple_unlock(&p->lock);
+ SPLX(s);
+ }
+}
+
+/*
+ * Remove a range of hardware page-table entries.
+ * The entries given are the first (inclusive)
+ * and last (exclusive) entries for the VM pages.
+ * The virtual address is the va for the first pte.
+ *
+ * The pmap must be locked.
+ * If the pmap is not the kernel pmap, the range must lie
+ * entirely within one pte-page. This is NOT checked.
+ * Assumes that the pte-page exists.
+ */
+
+static
+void pmap_remove_range(
+ pmap_t pmap,
+ vm_offset_t va,
+ pt_entry_t *spte,
+ pt_entry_t *epte)
+{
+ pt_entry_t *cpte;
+ unsigned long num_removed, num_unwired;
+ unsigned long pai;
+ phys_addr_t pa;
+#ifdef MACH_PV_PAGETABLES
+ int n, ii = 0;
+ struct mmu_update update[HYP_BATCH_MMU_UPDATES];
+#endif /* MACH_PV_PAGETABLES */
+
+ if (pmap == kernel_pmap && (va < kernel_virtual_start || va + (epte-spte)*PAGE_SIZE > kernel_virtual_end))
+ panic("pmap_remove_range(%lx-%lx) falls in physical memory area!\n", (unsigned long) va, (unsigned long) va + (epte-spte)*PAGE_SIZE);
+
+#if DEBUG_PTE_PAGE
+ if (pmap != kernel_pmap)
+ ptep_check(get_pte_page(spte));
+#endif /* DEBUG_PTE_PAGE */
+ num_removed = 0;
+ num_unwired = 0;
+
+ for (cpte = spte; cpte < epte;
+ cpte += ptes_per_vm_page, va += PAGE_SIZE) {
+
+ if (*cpte == 0)
+ continue;
+
+ assert(*cpte & INTEL_PTE_VALID);
+
+ pa = pte_to_pa(*cpte);
+
+ num_removed++;
+ if (*cpte & INTEL_PTE_WIRED)
+ num_unwired++;
+
+ if (!valid_page(pa)) {
+
+ /*
+ * Outside range of managed physical memory.
+ * Just remove the mappings.
+ */
+ int i = ptes_per_vm_page;
+ pt_entry_t *lpte = cpte;
+ do {
+#ifdef MACH_PV_PAGETABLES
+ update[ii].ptr = kv_to_ma(lpte);
+ update[ii].val = 0;
+ ii++;
+ if (ii == HYP_BATCH_MMU_UPDATES) {
+ hyp_mmu_update(kvtolin(&update), ii, kvtolin(&n), DOMID_SELF);
+ if (n != ii)
+ panic("couldn't pmap_remove_range\n");
+ ii = 0;
+ }
+#else /* MACH_PV_PAGETABLES */
+ *lpte = 0;
+#endif /* MACH_PV_PAGETABLES */
+ lpte++;
+ } while (--i > 0);
+ continue;
+ }
+
+ pai = pa_index(pa);
+ LOCK_PVH(pai);
+
+ /*
+ * Get the modify and reference bits.
+ */
+ {
+ int i;
+ pt_entry_t *lpte;
+
+ i = ptes_per_vm_page;
+ lpte = cpte;
+ do {
+ pmap_phys_attributes[pai] |=
+ *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
+#ifdef MACH_PV_PAGETABLES
+ update[ii].ptr = kv_to_ma(lpte);
+ update[ii].val = 0;
+ ii++;
+ if (ii == HYP_BATCH_MMU_UPDATES) {
+ hyp_mmu_update(kvtolin(&update), ii, kvtolin(&n), DOMID_SELF);
+ if (n != ii)
+ panic("couldn't pmap_remove_range\n");
+ ii = 0;
+ }
+#else /* MACH_PV_PAGETABLES */
+ *lpte = 0;
+#endif /* MACH_PV_PAGETABLES */
+ lpte++;
+ } while (--i > 0);
+ }
+
+ /*
+ * Remove the mapping from the pvlist for
+ * this physical page.
+ */
+ {
+ pv_entry_t pv_h, prev, cur;
+
+ pv_h = pai_to_pvh(pai);
+ if (pv_h->pmap == PMAP_NULL) {
+ panic("pmap_remove: null pv_list for pai %lx at va %lx!", pai, (unsigned long) va);
+ }
+ if (pv_h->va == va && pv_h->pmap == pmap) {
+ /*
+ * Header is the pv_entry. Copy the next one
+ * to header and free the next one (we cannot
+ * free the header)
+ */
+ cur = pv_h->next;
+ if (cur != PV_ENTRY_NULL) {
+ *pv_h = *cur;
+ PV_FREE(cur);
+ }
+ else {
+ pv_h->pmap = PMAP_NULL;
+ }
+ }
+ else {
+ cur = pv_h;
+ do {
+ prev = cur;
+ if ((cur = prev->next) == PV_ENTRY_NULL) {
+ panic("pmap-remove: mapping not in pv_list!");
+ }
+ } while (cur->va != va || cur->pmap != pmap);
+ prev->next = cur->next;
+ PV_FREE(cur);
+ }
+ UNLOCK_PVH(pai);
+ }
+ }
+
+#ifdef MACH_PV_PAGETABLES
+ if (ii > HYP_BATCH_MMU_UPDATES)
+ panic("overflowed array in pmap_remove_range");
+ hyp_mmu_update(kvtolin(&update), ii, kvtolin(&n), DOMID_SELF);
+ if (n != ii)
+ panic("couldn't pmap_remove_range\n");
+#endif /* MACH_PV_PAGETABLES */
+
+ /*
+ * Update the counts
+ */
+ pmap->stats.resident_count -= num_removed;
+ pmap->stats.wired_count -= num_unwired;
+}
+
+/*
+ * Remove the given range of addresses
+ * from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the hardware page size.
+ */
+
+void pmap_remove(
+ pmap_t map,
+ vm_offset_t s,
+ vm_offset_t e)
+{
+ int spl;
+ pt_entry_t *spte, *epte;
+ vm_offset_t l;
+ vm_offset_t _s = s;
+
+ if (map == PMAP_NULL)
+ return;
+
+ PMAP_READ_LOCK(map, spl);
+
+ while (s < e) {
+ pt_entry_t *pde = pmap_pde(map, s);
+
+ l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
+ if (l > e || l < s)
+ l = e;
+ if (pde && (*pde & INTEL_PTE_VALID)) {
+ spte = (pt_entry_t *)ptetokv(*pde);
+ spte = &spte[ptenum(s)];
+ epte = &spte[intel_btop(l-s)];
+ pmap_remove_range(map, s, spte, epte);
+ }
+ s = l;
+ }
+ PMAP_UPDATE_TLBS(map, _s, e);
+
+ PMAP_READ_UNLOCK(map, spl);
+}
+
+/*
+ * Routine: pmap_page_protect
+ *
+ * Function:
+ * Lower the permission for all mappings to a given
+ * page.
+ */
+void pmap_page_protect(
+ phys_addr_t phys,
+ vm_prot_t prot)
+{
+ pv_entry_t pv_h, prev;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ unsigned long pai;
+ pmap_t pmap;
+ int spl;
+ boolean_t remove;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return;
+ }
+
+ /*
+ * Determine the new protection.
+ */
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ remove = FALSE;
+ break;
+ case VM_PROT_ALL:
+ return; /* nothing to do */
+ default:
+ remove = TRUE;
+ break;
+ }
+
+ /*
+ * Lock the pmap system first, since we will be changing
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ /*
+ * Walk down PV list, changing or removing all mappings.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+
+ prev = pv_e = pv_h;
+ do {
+ vm_offset_t va;
+
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & INTEL_PTE_VALID);
+ assert(pte_to_pa(*pte) == phys);
+
+ /*
+ * Remove the mapping if new protection is NONE
+ * or if write-protecting a kernel mapping.
+ */
+ if (remove || pmap == kernel_pmap) {
+ /*
+ * Remove the mapping, collecting any modify bits.
+ */
+
+ if (*pte & INTEL_PTE_WIRED) {
+ pmap->stats.wired_count--;
+ }
+
+ {
+ int i = ptes_per_vm_page;
+
+ do {
+ pmap_phys_attributes[pai] |=
+ *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pte++), 0))
+ panic("%s:%d could not clear pte %p\n",__FILE__,__LINE__,pte-1);
+#else /* MACH_PV_PAGETABLES */
+ *pte++ = 0;
+#endif /* MACH_PV_PAGETABLES */
+ } while (--i > 0);
+ }
+
+ pmap->stats.resident_count--;
+
+ /*
+ * Remove the pv_entry.
+ */
+ if (pv_e == pv_h) {
+ /*
+ * Fix up head later.
+ */
+ pv_h->pmap = PMAP_NULL;
+ }
+ else {
+ /*
+ * Delete this entry.
+ */
+ prev->next = pv_e->next;
+ PV_FREE(pv_e);
+ }
+ }
+ else {
+ /*
+ * Write-protect.
+ */
+ int i = ptes_per_vm_page;
+
+ do {
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~INTEL_PTE_WRITE))
+ panic("%s:%d could not disable write on pte %p\n",__FILE__,__LINE__,pte);
+#else /* MACH_PV_PAGETABLES */
+ *pte &= ~INTEL_PTE_WRITE;
+#endif /* MACH_PV_PAGETABLES */
+ pte++;
+ } while (--i > 0);
+
+ /*
+ * Advance prev.
+ */
+ prev = pv_e;
+ }
+ PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+
+ simple_unlock(&pmap->lock);
+
+ } while ((pv_e = prev->next) != PV_ENTRY_NULL);
+
+ /*
+ * If pv_head mapping was removed, fix it up.
+ */
+ if (pv_h->pmap == PMAP_NULL) {
+ pv_e = pv_h->next;
+ if (pv_e != PV_ENTRY_NULL) {
+ *pv_h = *pv_e;
+ PV_FREE(pv_e);
+ }
+ }
+ }
+
+ PMAP_WRITE_UNLOCK(spl);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ * Will not increase permissions.
+ */
+void pmap_protect(
+ pmap_t map,
+ vm_offset_t s,
+ vm_offset_t e,
+ vm_prot_t prot)
+{
+ pt_entry_t *spte, *epte;
+ vm_offset_t l;
+ int spl;
+ vm_offset_t _s = s;
+
+ if (map == PMAP_NULL)
+ return;
+
+ /*
+ * Determine the new protection.
+ */
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ break;
+ case VM_PROT_READ|VM_PROT_WRITE:
+ case VM_PROT_ALL:
+ return; /* nothing to do */
+ default:
+ pmap_remove(map, s, e);
+ return;
+ }
+
+#if !(__i486__ || __i586__ || __i686__)
+ /*
+ * If write-protecting in the kernel pmap,
+ * remove the mappings; the i386 ignores
+ * the write-permission bit in kernel mode.
+ */
+ if (map == kernel_pmap) {
+ pmap_remove(map, s, e);
+ return;
+ }
+#endif
+
+ SPLVM(spl);
+ simple_lock(&map->lock);
+
+ while (s < e) {
+ pt_entry_t *pde = pde = pmap_pde(map, s);
+
+ l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
+ if (l > e || l < s)
+ l = e;
+ if (pde && (*pde & INTEL_PTE_VALID)) {
+ spte = (pt_entry_t *)ptetokv(*pde);
+ spte = &spte[ptenum(s)];
+ epte = &spte[intel_btop(l-s)];
+
+#ifdef MACH_PV_PAGETABLES
+ int n, i = 0;
+ struct mmu_update update[HYP_BATCH_MMU_UPDATES];
+#endif /* MACH_PV_PAGETABLES */
+
+ while (spte < epte) {
+ if (*spte & INTEL_PTE_VALID) {
+#ifdef MACH_PV_PAGETABLES
+ update[i].ptr = kv_to_ma(spte);
+ update[i].val = *spte & ~INTEL_PTE_WRITE;
+ i++;
+ if (i == HYP_BATCH_MMU_UPDATES) {
+ hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
+ if (n != i)
+ panic("couldn't pmap_protect\n");
+ i = 0;
+ }
+#else /* MACH_PV_PAGETABLES */
+ *spte &= ~INTEL_PTE_WRITE;
+#endif /* MACH_PV_PAGETABLES */
+ }
+ spte++;
+ }
+#ifdef MACH_PV_PAGETABLES
+ if (i > HYP_BATCH_MMU_UPDATES)
+ panic("overflowed array in pmap_protect");
+ hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
+ if (n != i)
+ panic("couldn't pmap_protect\n");
+#endif /* MACH_PV_PAGETABLES */
+ }
+ s = l;
+ }
+ PMAP_UPDATE_TLBS(map, _s, e);
+
+ simple_unlock(&map->lock);
+ SPLX(spl);
+}
+
+typedef pt_entry_t* (*pmap_level_getter_t)(const pmap_t pmap, vm_offset_t addr);
+/*
+* Expand one single level of the page table tree
+*/
+static inline pt_entry_t* pmap_expand_level(pmap_t pmap, vm_offset_t v, int spl,
+ pmap_level_getter_t pmap_level,
+ pmap_level_getter_t pmap_level_upper,
+ int n_per_vm_page,
+ struct kmem_cache *cache)
+{
+ pt_entry_t *pte;
+
+ /*
+ * Expand pmap to include this pte. Assume that
+ * pmap is always expanded to include enough hardware
+ * pages to map one VM page.
+ */
+ while ((pte = pmap_level(pmap, v)) == PT_ENTRY_NULL) {
+ /*
+ * Need to allocate a new page-table page.
+ */
+ vm_offset_t ptp;
+ pt_entry_t *pdp;
+ int i;
+
+ if (pmap == kernel_pmap) {
+ /*
+ * Would have to enter the new page-table page in
+ * EVERY pmap.
+ */
+ panic("pmap_expand kernel pmap to %#zx", v);
+ }
+
+ /*
+ * Unlock the pmap and allocate a new page-table page.
+ */
+ PMAP_READ_UNLOCK(pmap, spl);
+
+ while (!(ptp = kmem_cache_alloc(cache)))
+ VM_PAGE_WAIT((void (*)()) 0);
+ memset((void *)ptp, 0, PAGE_SIZE);
+
+ /*
+ * Re-lock the pmap and check that another thread has
+ * not already allocated the page-table page. If it
+ * has, discard the new page-table page (and try
+ * again to make sure).
+ */
+ PMAP_READ_LOCK(pmap, spl);
+
+ if (pmap_level(pmap, v) != PT_ENTRY_NULL) {
+ /*
+ * Oops...
+ */
+ PMAP_READ_UNLOCK(pmap, spl);
+ kmem_cache_free(cache, ptp);
+ PMAP_READ_LOCK(pmap, spl);
+ continue;
+ }
+
+ /*
+ * Enter the new page table page in the page directory.
+ */
+ i = n_per_vm_page;
+ pdp = pmap_level_upper(pmap, v);
+ do {
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readonly((void *) ptp);
+ if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn(ptp)))
+ panic("couldn't pin page %lx(%lx)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
+ if (!hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdp)),
+ pa_to_pte(pa_to_ma(kvtophys(ptp))) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE))
+ panic("%s:%d could not set pde %p(%llx,%lx) to %lx(%llx,%lx) %lx\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp)), (vm_offset_t) pa_to_pte(kv_to_ma(ptp)));
+#else /* MACH_PV_PAGETABLES */
+ *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE;
+#endif /* MACH_PV_PAGETABLES */
+ pdp++; /* Note: This is safe b/c we stay in one page. */
+ ptp += INTEL_PGBYTES;
+ } while (--i > 0);
+
+ /*
+ * Now, get the address of the page-table entry.
+ */
+ continue;
+ }
+ return pte;
+}
+
+/*
+ * Expand, if required, the PMAP to include the virtual address V.
+ * PMAP needs to be locked, and it will be still locked on return. It
+ * can temporarily unlock the PMAP, during allocation or deallocation
+ * of physical pages.
+ */
+static inline pt_entry_t* pmap_expand(pmap_t pmap, vm_offset_t v, int spl)
+{
+#ifdef PAE
+#ifdef __x86_64__
+ pmap_expand_level(pmap, v, spl, pmap_ptp, pmap_l4base, 1, &pdpt_cache);
+#endif /* __x86_64__ */
+ pmap_expand_level(pmap, v, spl, pmap_pde, pmap_ptp, 1, &pd_cache);
+#endif /* PAE */
+ return pmap_expand_level(pmap, v, spl, pmap_pte, pmap_pde, ptes_per_vm_page, &pt_cache);
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+void pmap_enter(
+ pmap_t pmap,
+ vm_offset_t v,
+ phys_addr_t pa,
+ vm_prot_t prot,
+ boolean_t wired)
+{
+ boolean_t is_physmem;
+ pt_entry_t *pte;
+ pv_entry_t pv_h;
+ unsigned long i, pai;
+ pv_entry_t pv_e;
+ pt_entry_t template;
+ int spl;
+ phys_addr_t old_pa;
+
+ assert(pa != vm_page_fictitious_addr);
+ if (pmap_debug) printf("pmap(%zx, %llx)\n", v, (unsigned long long) pa);
+ if (pmap == PMAP_NULL)
+ return;
+
+ if (pmap == kernel_pmap && (v < kernel_virtual_start || v >= kernel_virtual_end))
+ panic("pmap_enter(%lx, %llx) falls in physical memory area!\n", (unsigned long) v, (unsigned long long) pa);
+#if !(__i486__ || __i586__ || __i686__)
+ if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0
+ && !wired /* hack for io_wire */ ) {
+ /*
+ * Because the 386 ignores write protection in kernel mode,
+ * we cannot enter a read-only kernel mapping, and must
+ * remove an existing mapping if changing it.
+ */
+ PMAP_READ_LOCK(pmap, spl);
+
+ pte = pmap_pte(pmap, v);
+ if (pte != PT_ENTRY_NULL && *pte != 0) {
+ /*
+ * Invalidate the translation buffer,
+ * then remove the mapping.
+ */
+ pmap_remove_range(pmap, v, pte,
+ pte + ptes_per_vm_page);
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+ }
+ PMAP_READ_UNLOCK(pmap, spl);
+ return;
+ }
+#endif
+
+ /*
+ * Must allocate a new pvlist entry while we're unlocked;
+ * Allocating may cause pageout (which will lock the pmap system).
+ * If we determine we need a pvlist entry, we will unlock
+ * and allocate one. Then we will retry, throughing away
+ * the allocated entry later (if we no longer need it).
+ */
+ pv_e = PV_ENTRY_NULL;
+Retry:
+ PMAP_READ_LOCK(pmap, spl);
+
+ pte = pmap_expand(pmap, v, spl);
+
+ if (vm_page_ready())
+ is_physmem = (vm_page_lookup_pa(pa) != NULL);
+ else
+ is_physmem = (pa < biosmem_directmap_end());
+
+ /*
+ * Special case if the physical page is already mapped
+ * at this address.
+ */
+ old_pa = pte_to_pa(*pte);
+ if (*pte && old_pa == pa) {
+ /*
+ * May be changing its wired attribute or protection
+ */
+
+ if (wired && !(*pte & INTEL_PTE_WIRED))
+ pmap->stats.wired_count++;
+ else if (!wired && (*pte & INTEL_PTE_WIRED))
+ pmap->stats.wired_count--;
+
+ template = pa_to_pte(pa) | INTEL_PTE_VALID;
+ if (pmap != kernel_pmap)
+ template |= INTEL_PTE_USER;
+ if (prot & VM_PROT_WRITE)
+ template |= INTEL_PTE_WRITE;
+ if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486
+ && !is_physmem)
+ template |= INTEL_PTE_NCACHE|INTEL_PTE_WTHRU;
+ if (wired)
+ template |= INTEL_PTE_WIRED;
+ i = ptes_per_vm_page;
+ do {
+ if (*pte & INTEL_PTE_MOD)
+ template |= INTEL_PTE_MOD;
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template)))
+ panic("%s:%d could not set pte %p to %llx\n",__FILE__,__LINE__,pte,template);
+#else /* MACH_PV_PAGETABLES */
+ WRITE_PTE(pte, template)
+#endif /* MACH_PV_PAGETABLES */
+ pte++;
+ pte_increment_pa(template);
+ } while (--i > 0);
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+ }
+ else {
+
+ /*
+ * Remove old mapping from the PV list if necessary.
+ */
+ if (*pte) {
+ /*
+ * Don't free the pte page if removing last
+ * mapping - we will immediately replace it.
+ */
+ pmap_remove_range(pmap, v, pte,
+ pte + ptes_per_vm_page);
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+ }
+
+ if (valid_page(pa)) {
+
+ /*
+ * Enter the mapping in the PV list for this
+ * physical page.
+ */
+
+ pai = pa_index(pa);
+ LOCK_PVH(pai);
+ pv_h = pai_to_pvh(pai);
+
+ if (pv_h->pmap == PMAP_NULL) {
+ /*
+ * No mappings yet
+ */
+ pv_h->va = v;
+ pv_h->pmap = pmap;
+ pv_h->next = PV_ENTRY_NULL;
+ }
+ else {
+#if DEBUG
+ {
+ /* check that this mapping is not already there */
+ pv_entry_t e = pv_h;
+ while (e != PV_ENTRY_NULL) {
+ if (e->pmap == pmap && e->va == v)
+ panic("pmap_enter: already in pv_list");
+ e = e->next;
+ }
+ }
+#endif /* DEBUG */
+
+ /*
+ * Add new pv_entry after header.
+ */
+ if (pv_e == PV_ENTRY_NULL) {
+ PV_ALLOC(pv_e);
+ if (pv_e == PV_ENTRY_NULL) {
+ UNLOCK_PVH(pai);
+ PMAP_READ_UNLOCK(pmap, spl);
+
+ /*
+ * Refill from cache.
+ */
+ pv_e = (pv_entry_t) kmem_cache_alloc(&pv_list_cache);
+ goto Retry;
+ }
+ }
+ pv_e->va = v;
+ pv_e->pmap = pmap;
+ pv_e->next = pv_h->next;
+ pv_h->next = pv_e;
+ /*
+ * Remember that we used the pvlist entry.
+ */
+ pv_e = PV_ENTRY_NULL;
+ }
+ UNLOCK_PVH(pai);
+ }
+
+ /*
+ * And count the mapping.
+ */
+
+ pmap->stats.resident_count++;
+ if (wired)
+ pmap->stats.wired_count++;
+
+ /*
+ * Build a template to speed up entering -
+ * only the pfn changes.
+ */
+ template = pa_to_pte(pa) | INTEL_PTE_VALID;
+ if (pmap != kernel_pmap)
+ template |= INTEL_PTE_USER;
+ if (prot & VM_PROT_WRITE)
+ template |= INTEL_PTE_WRITE;
+ if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486
+ && !is_physmem)
+ template |= INTEL_PTE_NCACHE|INTEL_PTE_WTHRU;
+ if (wired)
+ template |= INTEL_PTE_WIRED;
+ i = ptes_per_vm_page;
+ do {
+#ifdef MACH_PV_PAGETABLES
+ if (!(hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template))))
+ panic("%s:%d could not set pte %p to %llx\n",__FILE__,__LINE__,pte,template);
+#else /* MACH_PV_PAGETABLES */
+ WRITE_PTE(pte, template)
+#endif /* MACH_PV_PAGETABLES */
+ pte++;
+ pte_increment_pa(template);
+ } while (--i > 0);
+ }
+
+ if (pv_e != PV_ENTRY_NULL) {
+ PV_FREE(pv_e);
+ }
+
+ PMAP_READ_UNLOCK(pmap, spl);
+}
+
+/*
+ * Routine: pmap_change_wiring
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void pmap_change_wiring(
+ pmap_t map,
+ vm_offset_t v,
+ boolean_t wired)
+{
+ pt_entry_t *pte;
+ int i;
+ int spl;
+
+ /*
+ * We must grab the pmap system lock because we may
+ * change a pte_page queue.
+ */
+ PMAP_READ_LOCK(map, spl);
+
+ if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
+ panic("pmap_change_wiring: pte missing");
+
+ if (wired && !(*pte & INTEL_PTE_WIRED)) {
+ /*
+ * wiring down mapping
+ */
+ map->stats.wired_count++;
+ i = ptes_per_vm_page;
+ do {
+ *pte++ |= INTEL_PTE_WIRED;
+ } while (--i > 0);
+ }
+ else if (!wired && (*pte & INTEL_PTE_WIRED)) {
+ /*
+ * unwiring mapping
+ */
+ map->stats.wired_count--;
+ i = ptes_per_vm_page;
+ do {
+#ifdef MACH_PV_PAGETABLES
+ if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~INTEL_PTE_WIRED)))
+ panic("%s:%d could not wire down pte %p\n",__FILE__,__LINE__,pte);
+#else /* MACH_PV_PAGETABLES */
+ *pte &= ~INTEL_PTE_WIRED;
+#endif /* MACH_PV_PAGETABLES */
+ pte++;
+ } while (--i > 0);
+ }
+
+ PMAP_READ_UNLOCK(map, spl);
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+
+phys_addr_t pmap_extract(
+ pmap_t pmap,
+ vm_offset_t va)
+{
+ pt_entry_t *pte;
+ phys_addr_t pa;
+ int spl;
+
+ SPLVM(spl);
+ simple_lock(&pmap->lock);
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = 0;
+ else if (!(*pte & INTEL_PTE_VALID))
+ pa = 0;
+ else
+ pa = pte_to_pa(*pte) + (va & INTEL_OFFMASK);
+ simple_unlock(&pmap->lock);
+ SPLX(spl);
+ return(pa);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+#if 0
+void pmap_copy(
+ pmap_t dst_pmap,
+ pmap_t src_pmap,
+ vm_offset_t dst_addr,
+ vm_size_t len,
+ vm_offset_t src_addr)
+{
+}
+#endif /* 0 */
+
+/*
+ * Routine: pmap_collect
+ * Function:
+ * Garbage collects the physical map system for
+ * pages which are no longer used.
+ * Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but
+ * others may be collected.
+ * Usage:
+ * Called by the pageout daemon when pages are scarce.
+ */
+void pmap_collect(pmap_t p)
+{
+ pt_entry_t *ptp;
+ pt_entry_t *eptp;
+ phys_addr_t pa;
+ int spl, wired;
+
+ if (p == PMAP_NULL)
+ return;
+
+ if (p == kernel_pmap)
+ return;
+
+ /*
+ * Free the page table tree.
+ */
+ PMAP_READ_LOCK(p, spl);
+#if PAE
+#ifdef __x86_64__
+ for (int l4i = 0; l4i < lin2l4num(VM_MAX_USER_ADDRESS); l4i++) {
+ pt_entry_t pdp = (pt_entry_t) p->l4base[l4i];
+ if (!(pdp & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdpbase = (pt_entry_t*) ptetokv(pdp);
+ for (int l3i = 0; l3i < NPTES; l3i++)
+#else /* __x86_64__ */
+ pt_entry_t *pdpbase = p->pdpbase;
+ for (int l3i = 0; l3i < lin2pdpnum(VM_MAX_USER_ADDRESS); l3i++)
+#endif /* __x86_64__ */
+ {
+ pt_entry_t pde = (pt_entry_t ) pdpbase[l3i];
+ if (!(pde & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdebase = (pt_entry_t*) ptetokv(pde);
+ for (int l2i = 0; l2i < NPTES; l2i++)
+#else /* PAE */
+ pt_entry_t *pdebase = p->dirbase;
+ for (int l2i = 0; l2i < lin2pdenum(VM_MAX_USER_ADDRESS); l2i++)
+#endif /* PAE */
+ {
+ pt_entry_t pte = (pt_entry_t) pdebase[l2i];
+ if (!(pte & INTEL_PTE_VALID))
+ continue;
+
+ pa = pte_to_pa(pte);
+ ptp = (pt_entry_t *)phystokv(pa);
+ eptp = ptp + NPTES*ptes_per_vm_page;
+
+ /*
+ * If the pte page has any wired mappings, we cannot
+ * free it.
+ */
+ wired = 0;
+ {
+ pt_entry_t *ptep;
+ for (ptep = ptp; ptep < eptp; ptep++) {
+ if (*ptep & INTEL_PTE_WIRED) {
+ wired = 1;
+ break;
+ }
+ }
+ }
+ if (!wired) {
+ /*
+ * Remove the virtual addresses mapped by this pte page.
+ */
+ { /*XXX big hack*/
+ vm_offset_t va = pagenum2lin(l4i, l3i, l2i, 0);
+ if (p == kernel_pmap)
+ va = lintokv(va);
+ pmap_remove_range(p, va, ptp, eptp);
+ }
+
+ /*
+ * Invalidate the page directory pointer.
+ */
+ {
+ int i = ptes_per_vm_page;
+ pt_entry_t *pdep = &pdebase[l2i];
+ do {
+#ifdef MACH_PV_PAGETABLES
+ unsigned long pte = *pdep;
+ void *ptable = (void*) ptetokv(pte);
+ if (!(hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdep++)), 0)))
+ panic("%s:%d could not clear pde %p\n",__FILE__,__LINE__,pdep-1);
+ if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(ptable)))
+ panic("couldn't unpin page %p(%lx)\n", ptable, (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)ptable)));
+ pmap_set_page_readwrite(ptable);
+#else /* MACH_PV_PAGETABLES */
+ *pdep++ = 0;
+#endif /* MACH_PV_PAGETABLES */
+ } while (--i > 0);
+ }
+
+ PMAP_READ_UNLOCK(p, spl);
+
+ /*
+ * And free the pte page itself.
+ */
+ kmem_cache_free(&pt_cache, (vm_offset_t)ptetokv(pte));
+
+ PMAP_READ_LOCK(p, spl);
+
+ }
+ }
+#if PAE
+ // TODO check l2
+ }
+#ifdef __x86_64__
+ // TODO check l3
+ }
+#endif /* __x86_64__ */
+#endif /* PAE */
+
+ PMAP_UPDATE_TLBS(p, VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
+
+ PMAP_READ_UNLOCK(p, spl);
+ return;
+
+}
+
+#if MACH_KDB
+/*
+ * Routine: pmap_whatis
+ * Function:
+ * Check whether this address is within a pmap
+ * Usage:
+ * Called from debugger
+ */
+int pmap_whatis(pmap_t p, vm_offset_t a)
+{
+ pt_entry_t *ptp;
+ phys_addr_t pa;
+ int spl;
+ int ret = 0;
+
+ if (p == PMAP_NULL)
+ return 0;
+
+ PMAP_READ_LOCK(p, spl);
+#if PAE
+#ifdef __x86_64__
+ if (a >= (vm_offset_t) p->l4base && a < (vm_offset_t) (&p->l4base[NPTES])) {
+ db_printf("L4 for pmap %p\n", p);
+ ret = 1;
+ }
+ for (int l4i = 0; l4i < NPTES; l4i++) {
+ pt_entry_t pdp = (pt_entry_t) p->l4base[l4i];
+ if (!(pdp & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdpbase = (pt_entry_t*) ptetokv(pdp);
+#else /* __x86_64__ */
+ int l4i = 0;
+ pt_entry_t *pdpbase = p->pdpbase;
+#endif /* __x86_64__ */
+ if (a >= (vm_offset_t) pdpbase && a < (vm_offset_t) (&pdpbase[NPTES])) {
+ db_printf("PDP %d for pmap %p\n", l4i, p);
+ ret = 1;
+ }
+ for (int l3i = 0; l3i < NPTES; l3i++)
+ {
+ pt_entry_t pde = (pt_entry_t ) pdpbase[l3i];
+ if (!(pde & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdebase = (pt_entry_t*) ptetokv(pde);
+#else /* PAE */
+ int l4i = 0, l3i = 0;
+ pt_entry_t *pdebase = p->dirbase;
+#endif /* PAE */
+ if (a >= (vm_offset_t) pdebase && a < (vm_offset_t) (&pdebase[NPTES])) {
+ db_printf("PDE %d %d for pmap %p\n", l4i, l3i, p);
+ ret = 1;
+ }
+ for (int l2i = 0; l2i < NPTES; l2i++)
+ {
+ pt_entry_t pte = (pt_entry_t) pdebase[l2i];
+ if (!(pte & INTEL_PTE_VALID))
+ continue;
+
+ pa = pte_to_pa(pte);
+ ptp = (pt_entry_t *)phystokv(pa);
+
+ if (a >= (vm_offset_t) ptp && a < (vm_offset_t) (&ptp[NPTES*ptes_per_vm_page])) {
+ db_printf("PTP %d %d %d for pmap %p\n", l4i, l3i, l2i, p);
+ ret = 1;
+ }
+ }
+#if PAE
+ }
+#ifdef __x86_64__
+ }
+#endif /* __x86_64__ */
+#endif /* PAE */
+ PMAP_READ_UNLOCK(p, spl);
+
+ if (p == kernel_pmap) {
+ phys_addr_t pa;
+ if (DB_VALID_KERN_ADDR(a))
+ pa = kvtophys(a);
+ else
+ pa = pmap_extract(current_task()->map->pmap, a);
+
+ if (valid_page(pa)) {
+ unsigned long pai;
+ pv_entry_t pv_h;
+
+ pai = pa_index(pa);
+ for (pv_h = pai_to_pvh(pai);
+ pv_h && pv_h->pmap;
+ pv_h = pv_h->next)
+ db_printf("pmap %p at %llx\n", pv_h->pmap, pv_h->va);
+ }
+ }
+
+ return ret;
+}
+#endif /* MACH_KDB */
+
+/*
+ * Routine: pmap_activate
+ * Function:
+ * Binds the given physical map to the given
+ * processor, and returns a hardware map description.
+ */
+#if 0
+void pmap_activate(pmap_t my_pmap, thread_t th, int my_cpu)
+{
+ PMAP_ACTIVATE(my_pmap, th, my_cpu);
+}
+#endif /* 0 */
+
+/*
+ * Routine: pmap_deactivate
+ * Function:
+ * Indicates that the given physical map is no longer
+ * in use on the specified processor. (This is a macro
+ * in pmap.h)
+ */
+#if 0
+void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu)
+{
+ PMAP_DEACTIVATE(pmap, th, which_cpu);
+}
+#endif /* 0 */
+
+/*
+ * Routine: pmap_kernel
+ * Function:
+ * Returns the physical map handle for the kernel.
+ */
+#if 0
+pmap_t pmap_kernel()
+{
+ return (kernel_pmap);
+}
+#endif /* 0 */
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ * See machine/phys.c or machine/phys.s for implementation.
+ */
+#if 0
+pmap_zero_page(vm_offset_t phys)
+{
+ int i;
+
+ assert(phys != vm_page_fictitious_addr);
+ i = PAGE_SIZE / INTEL_PGBYTES;
+ phys = intel_pfn(phys);
+
+ while (i--)
+ zero_phys(phys++);
+}
+#endif /* 0 */
+
+/*
+ * pmap_copy_page copies the specified (machine independent) page.
+ * See machine/phys.c or machine/phys.s for implementation.
+ */
+#if 0
+pmap_copy_page(vm_offset_t src, vm_offset_t dst)
+{
+ int i;
+
+ assert(src != vm_page_fictitious_addr);
+ assert(dst != vm_page_fictitious_addr);
+ i = PAGE_SIZE / INTEL_PGBYTES;
+
+ while (i--) {
+ copy_phys(intel_pfn(src), intel_pfn(dst));
+ src += INTEL_PGBYTES;
+ dst += INTEL_PGBYTES;
+ }
+}
+#endif /* 0 */
+
+/*
+ * Routine: pmap_pageable
+ * Function:
+ * Make the specified pages (by pmap, offset)
+ * pageable (or not) as requested.
+ *
+ * A page which is not pageable may not take
+ * a fault; therefore, its page table entry
+ * must remain valid for the duration.
+ *
+ * This routine is merely advisory; pmap_enter
+ * will specify that these pages are to be wired
+ * down (or not) as appropriate.
+ */
+void
+pmap_pageable(
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end,
+ boolean_t pageable)
+{
+}
+
+/*
+ * Clear specified attribute bits.
+ */
+static void
+phys_attribute_clear(
+ phys_addr_t phys,
+ int bits)
+{
+ pv_entry_t pv_h;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ unsigned long pai;
+ pmap_t pmap;
+ int spl;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return;
+ }
+
+ /*
+ * Lock the pmap system first, since we will be changing
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ /*
+ * Walk down PV list, clearing all modify or reference bits.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+ /*
+ * There are some mappings.
+ */
+ for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+ vm_offset_t va;
+
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & INTEL_PTE_VALID);
+ assert(pte_to_pa(*pte) == phys);
+
+ /*
+ * Clear modify or reference bits.
+ */
+ {
+ int i = ptes_per_vm_page;
+ do {
+#ifdef MACH_PV_PAGETABLES
+ if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~bits)))
+ panic("%s:%d could not clear bits %x from pte %p\n",__FILE__,__LINE__,bits,pte);
+#else /* MACH_PV_PAGETABLES */
+ *pte &= ~bits;
+#endif /* MACH_PV_PAGETABLES */
+ } while (--i > 0);
+ }
+ PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+ simple_unlock(&pmap->lock);
+ }
+ }
+
+ pmap_phys_attributes[pai] &= ~bits;
+
+ PMAP_WRITE_UNLOCK(spl);
+}
+
+/*
+ * Check specified attribute bits.
+ */
+static boolean_t
+phys_attribute_test(
+ phys_addr_t phys,
+ int bits)
+{
+ pv_entry_t pv_h;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ unsigned long pai;
+ pmap_t pmap;
+ int spl;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return (FALSE);
+ }
+
+ /*
+ * Lock the pmap system first, since we will be checking
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ if (pmap_phys_attributes[pai] & bits) {
+ PMAP_WRITE_UNLOCK(spl);
+ return (TRUE);
+ }
+
+ /*
+ * Walk down PV list, checking all mappings.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+ /*
+ * There are some mappings.
+ */
+ for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ {
+ vm_offset_t va;
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & INTEL_PTE_VALID);
+ assert(pte_to_pa(*pte) == phys);
+ }
+
+ /*
+ * Check modify or reference bits.
+ */
+ {
+ int i = ptes_per_vm_page;
+
+ do {
+ if (*pte & bits) {
+ simple_unlock(&pmap->lock);
+ PMAP_WRITE_UNLOCK(spl);
+ return (TRUE);
+ }
+ } while (--i > 0);
+ }
+ simple_unlock(&pmap->lock);
+ }
+ }
+ PMAP_WRITE_UNLOCK(spl);
+ return (FALSE);
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+
+void pmap_clear_modify(phys_addr_t phys)
+{
+ phys_attribute_clear(phys, PHYS_MODIFIED);
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+
+boolean_t pmap_is_modified(phys_addr_t phys)
+{
+ return (phys_attribute_test(phys, PHYS_MODIFIED));
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+
+void pmap_clear_reference(phys_addr_t phys)
+{
+ phys_attribute_clear(phys, PHYS_REFERENCED);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
+ */
+
+boolean_t pmap_is_referenced(phys_addr_t phys)
+{
+ return (phys_attribute_test(phys, PHYS_REFERENCED));
+}
+
+#if NCPUS > 1
+/*
+* TLB Coherence Code (TLB "shootdown" code)
+*
+* Threads that belong to the same task share the same address space and
+* hence share a pmap. However, they may run on distinct cpus and thus
+* have distinct TLBs that cache page table entries. In order to guarantee
+* the TLBs are consistent, whenever a pmap is changed, all threads that
+* are active in that pmap must have their TLB updated. To keep track of
+* this information, the set of cpus that are currently using a pmap is
+* maintained within each pmap structure (cpus_using). Pmap_activate() and
+* pmap_deactivate add and remove, respectively, a cpu from this set.
+* Since the TLBs are not addressable over the bus, each processor must
+* flush its own TLB; a processor that needs to invalidate another TLB
+* needs to interrupt the processor that owns that TLB to signal the
+* update.
+*
+* Whenever a pmap is updated, the lock on that pmap is locked, and all
+* cpus using the pmap are signaled to invalidate. All threads that need
+* to activate a pmap must wait for the lock to clear to await any updates
+* in progress before using the pmap. They must ACQUIRE the lock to add
+* their cpu to the cpus_using set. An implicit assumption made
+* throughout the TLB code is that all kernel code that runs at or higher
+* than splvm blocks out update interrupts, and that such code does not
+* touch pageable pages.
+*
+* A shootdown interrupt serves another function besides signaling a
+* processor to invalidate. The interrupt routine (pmap_update_interrupt)
+* waits for the both the pmap lock (and the kernel pmap lock) to clear,
+* preventing user code from making implicit pmap updates while the
+* sending processor is performing its update. (This could happen via a
+* user data write reference that turns on the modify bit in the page
+* table). It must wait for any kernel updates that may have started
+* concurrently with a user pmap update because the IPC code
+* changes mappings.
+* Spinning on the VALUES of the locks is sufficient (rather than
+* having to acquire the locks) because any updates that occur subsequent
+* to finding the lock unlocked will be signaled via another interrupt.
+* (This assumes the interrupt is cleared before the low level interrupt code
+* calls pmap_update_interrupt()).
+*
+* The signaling processor must wait for any implicit updates in progress
+* to terminate before continuing with its update. Thus it must wait for an
+* acknowledgement of the interrupt from each processor for which such
+* references could be made. For maintaining this information, a set
+* cpus_active is used. A cpu is in this set if and only if it can
+* use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from
+* this set; when all such cpus are removed, it is safe to update.
+*
+* Before attempting to acquire the update lock on a pmap, a cpu (A) must
+* be at least at the priority of the interprocessor interrupt
+* (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a
+* kernel update; it would spin forever in pmap_update_interrupt() trying
+* to acquire the user pmap lock it had already acquired. Furthermore A
+* must remove itself from cpus_active. Otherwise, another cpu holding
+* the lock (B) could be in the process of sending an update signal to A,
+* and thus be waiting for A to remove itself from cpus_active. If A is
+* spinning on the lock at priority this will never happen and a deadlock
+* will result.
+*/
+
+/*
+ * Signal another CPU that it must flush its TLB
+ */
+void signal_cpus(
+ cpu_set use_list,
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ int which_cpu, j;
+ pmap_update_list_t update_list_p;
+
+ while ((which_cpu = __builtin_ffs(use_list)) != 0) {
+ which_cpu -= 1; /* convert to 0 origin */
+
+ update_list_p = &cpu_update_list[which_cpu];
+ simple_lock(&update_list_p->lock);
+
+ j = update_list_p->count;
+ if (j >= UPDATE_LIST_SIZE) {
+ /*
+ * list overflowed. Change last item to
+ * indicate overflow.
+ */
+ update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap;
+ update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_USER_ADDRESS;
+ update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS;
+ }
+ else {
+ update_list_p->item[j].pmap = pmap;
+ update_list_p->item[j].start = start;
+ update_list_p->item[j].end = end;
+ update_list_p->count = j+1;
+ }
+ cpu_update_needed[which_cpu] = TRUE;
+ simple_unlock(&update_list_p->lock);
+
+ __sync_synchronize();
+ if (((cpus_idle & (1 << which_cpu)) == 0))
+ interrupt_processor(which_cpu);
+ use_list &= ~(1 << which_cpu);
+ }
+}
+
+/*
+ * This is called at splvm
+ */
+void process_pmap_updates(pmap_t my_pmap)
+{
+ int my_cpu = cpu_number();
+ pmap_update_list_t update_list_p;
+ int j;
+ pmap_t pmap;
+
+ update_list_p = &cpu_update_list[my_cpu];
+ simple_lock_nocheck(&update_list_p->lock);
+
+ for (j = 0; j < update_list_p->count; j++) {
+ pmap = update_list_p->item[j].pmap;
+ if (pmap == my_pmap ||
+ pmap == kernel_pmap) {
+
+ INVALIDATE_TLB(pmap,
+ update_list_p->item[j].start,
+ update_list_p->item[j].end);
+ }
+ }
+ update_list_p->count = 0;
+ cpu_update_needed[my_cpu] = FALSE;
+ simple_unlock_nocheck(&update_list_p->lock);
+}
+
+/*
+ * Interrupt routine for TBIA requested from other processor.
+ */
+void pmap_update_interrupt(void)
+{
+ int my_cpu;
+ pmap_t my_pmap;
+ int s;
+
+ my_cpu = cpu_number();
+
+ /*
+ * Exit now if we're idle. We'll pick up the update request
+ * when we go active, and we must not put ourselves back in
+ * the active set because we'll never process the interrupt
+ * while we're idle (thus hanging the system).
+ */
+ if (cpus_idle & (1 << my_cpu))
+ return;
+
+ if (current_thread() == THREAD_NULL)
+ my_pmap = kernel_pmap;
+ else {
+ my_pmap = current_pmap();
+ if (!pmap_in_use(my_pmap, my_cpu))
+ my_pmap = kernel_pmap;
+ }
+
+ /*
+ * Raise spl to splvm (above splip) to block out pmap_extract
+ * from IO code (which would put this cpu back in the active
+ * set).
+ */
+ s = splvm();
+
+ do {
+
+ /*
+ * Indicate that we're not using either user or kernel
+ * pmap.
+ */
+ i_bit_clear(my_cpu, &cpus_active);
+
+ /*
+ * Wait for any pmap updates in progress, on either user
+ * or kernel pmap.
+ */
+ while (my_pmap->lock.lock_data ||
+ kernel_pmap->lock.lock_data)
+ cpu_pause();
+
+ process_pmap_updates(my_pmap);
+
+ i_bit_set(my_cpu, &cpus_active);
+
+ } while (cpu_update_needed[my_cpu]);
+
+ splx(s);
+}
+#else /* NCPUS > 1 */
+/*
+ * Dummy routine to satisfy external reference.
+ */
+void pmap_update_interrupt(void)
+{
+ /* should never be called. */
+}
+#endif /* NCPUS > 1 */
+
+#if defined(__i386__) || defined (__x86_64__)
+/* Unmap page 0 to trap NULL references. */
+void
+pmap_unmap_page_zero (void)
+{
+ int *pte;
+
+ printf("Unmapping the zero page. Some BIOS functions may not be working any more.\n");
+ pte = (int *) pmap_pte (kernel_pmap, 0);
+ if (!pte)
+ return;
+ assert (pte);
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pte), 0))
+ printf("couldn't unmap page 0\n");
+#else /* MACH_PV_PAGETABLES */
+ *pte = 0;
+ INVALIDATE_TLB(kernel_pmap, 0, PAGE_SIZE);
+#endif /* MACH_PV_PAGETABLES */
+}
+#endif /* __i386__ */
+
+void
+pmap_make_temporary_mapping(void)
+{
+ int i;
+ /*
+ * We'll have to temporarily install a direct mapping
+ * between physical memory and low linear memory,
+ * until we start using our new kernel segment descriptors.
+ */
+#if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+ vm_offset_t delta = INIT_VM_MIN_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS;
+ if ((vm_offset_t)(-delta) < delta)
+ delta = (vm_offset_t)(-delta);
+ int nb_direct = delta >> PDESHIFT;
+ for (i = 0; i < nb_direct; i++)
+ kernel_page_dir[lin2pdenum_cont(INIT_VM_MIN_KERNEL_ADDRESS) + i] =
+ kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS) + i];
+#endif
+
+#ifdef LINUX_DEV
+ /* We need BIOS memory mapped at 0xc0000 & co for BIOS accesses */
+#if VM_MIN_KERNEL_ADDRESS != 0
+ kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)] =
+ kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS)];
+#endif
+#endif /* LINUX_DEV */
+
+#ifdef MACH_PV_PAGETABLES
+#ifndef __x86_64__
+ const int PDPNUM_KERNEL = PDPNUM;
+#endif
+ for (i = 0; i < PDPNUM_KERNEL; i++)
+ pmap_set_page_readonly_init((void*) kernel_page_dir + i * INTEL_PGBYTES);
+#if PAE
+#ifndef __x86_64__
+ pmap_set_page_readonly_init(kernel_pmap->pdpbase);
+#endif
+#endif /* PAE */
+#endif /* MACH_PV_PAGETABLES */
+
+ pmap_set_page_dir();
+}
+
+void
+pmap_set_page_dir(void)
+{
+#if PAE
+#ifdef __x86_64__
+ set_cr3((unsigned long)_kvtophys(kernel_pmap->l4base));
+#else
+ set_cr3((unsigned long)_kvtophys(kernel_pmap->pdpbase));
+#endif
+#ifndef MACH_HYP
+ if (!CPU_HAS_FEATURE(CPU_FEATURE_PAE))
+ panic("CPU doesn't have support for PAE.");
+ set_cr4(get_cr4() | CR4_PAE);
+#endif /* MACH_HYP */
+#else
+ set_cr3((unsigned long)_kvtophys(kernel_page_dir));
+#endif /* PAE */
+}
+
+void
+pmap_remove_temporary_mapping(void)
+{
+#if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+ int i;
+ vm_offset_t delta = INIT_VM_MIN_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS;
+ if ((vm_offset_t)(-delta) < delta)
+ delta = (vm_offset_t)(-delta);
+ int nb_direct = delta >> PDESHIFT;
+ /* Get rid of the temporary direct mapping and flush it out of the TLB. */
+ for (i = 0 ; i < nb_direct; i++) {
+#ifdef MACH_XEN
+#ifdef MACH_PSEUDO_PHYS
+ if (!hyp_mmu_update_pte(kv_to_ma(&kernel_page_dir[lin2pdenum_cont(VM_MIN_KERNEL_ADDRESS) + i]), 0))
+#else /* MACH_PSEUDO_PHYS */
+ if (hyp_do_update_va_mapping(VM_MIN_KERNEL_ADDRESS + i * INTEL_PGBYTES, 0, UVMF_INVLPG | UVMF_ALL))
+#endif /* MACH_PSEUDO_PHYS */
+ printf("couldn't unmap frame %d\n", i);
+#else /* MACH_XEN */
+ kernel_page_dir[lin2pdenum_cont(INIT_VM_MIN_KERNEL_ADDRESS) + i] = 0;
+#endif /* MACH_XEN */
+ }
+#endif
+
+#ifdef LINUX_DEV
+ /* Keep BIOS memory mapped */
+#if VM_MIN_KERNEL_ADDRESS != 0
+ kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)] =
+ kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS)];
+#endif
+#endif /* LINUX_DEV */
+
+ /* Not used after boot, better give it back. */
+#ifdef MACH_XEN
+ hyp_free_page(0, (void*) VM_MIN_KERNEL_ADDRESS);
+#endif /* MACH_XEN */
+
+ flush_tlb();
+}
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
new file mode 100644
index 0000000..8b0eba0
--- /dev/null
+++ b/i386/intel/pmap.h
@@ -0,0 +1,574 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: pmap.h
+ *
+ * Authors: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Machine-dependent structures for the physical map module.
+ */
+
+#ifndef _PMAP_MACHINE_
+#define _PMAP_MACHINE_ 1
+
+#ifndef __ASSEMBLER__
+
+#include <kern/lock.h>
+#include <mach/machine/vm_param.h>
+#include <mach/vm_statistics.h>
+#include <mach/kern_return.h>
+#include <mach/vm_prot.h>
+#include <i386/proc_reg.h>
+
+/*
+ * Define the generic in terms of the specific
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+#define INTEL_PGBYTES I386_PGBYTES
+#define INTEL_PGSHIFT I386_PGSHIFT
+#define intel_btop(x) i386_btop(x)
+#define intel_ptob(x) i386_ptob(x)
+#define intel_round_page(x) i386_round_page(x)
+#define intel_trunc_page(x) i386_trunc_page(x)
+#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
+#define round_intel_to_vm(x) round_i386_to_vm(x)
+#define vm_to_intel(x) vm_to_i386(x)
+#endif /* __i386__ */
+
+/*
+ * i386/i486 Page Table Entry
+ */
+
+typedef phys_addr_t pt_entry_t;
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+#endif /* __ASSEMBLER__ */
+
+#define INTEL_OFFMASK 0xfff /* offset within page */
+#if PAE
+#ifdef __x86_64__
+#define L4SHIFT 39 /* L4 shift */
+#define L4MASK 0x1ff /* mask for L4 index */
+#define PDPNUM_KERNEL (((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) >> PDPSHIFT) + 1)
+#define PDPMASK 0x1ff /* mask for page directory pointer index */
+#else /* __x86_64__ */
+#define PDPNUM 4 /* number of page directory pointers */
+#define PDPMASK 3 /* mask for page directory pointer index */
+#endif /* __x86_64__ */
+#define PDPSHIFT 30 /* page directory pointer */
+#define PDESHIFT 21 /* page descriptor shift */
+#define PDEMASK 0x1ff /* mask for page descriptor index */
+#define PTESHIFT 12 /* page table shift */
+#define PTEMASK 0x1ff /* mask for page table index */
+#else /* PAE */
+#define PDPNUM 1 /* number of page directory pointers */
+#define PDESHIFT 22 /* page descriptor shift */
+#define PDEMASK 0x3ff /* mask for page descriptor index */
+#define PTESHIFT 12 /* page table shift */
+#define PTEMASK 0x3ff /* mask for page table index */
+#endif /* PAE */
+
+/*
+ * Convert linear offset to L4 pointer index
+ */
+#ifdef __x86_64__
+#define lin2l4num(a) (((a) >> L4SHIFT) & L4MASK)
+#endif
+
+/*
+ * Convert linear offset to page descriptor index
+ */
+#define lin2pdenum(a) (((a) >> PDESHIFT) & PDEMASK)
+
+#if PAE
+/* Special version assuming contiguous page directories. Making it
+ include the page directory pointer table index too. */
+#ifdef __x86_64__
+#define lin2pdenum_cont(a) (((a) >> PDESHIFT) & 0x3ff)
+#else
+#define lin2pdenum_cont(a) (((a) >> PDESHIFT) & 0x7ff)
+#endif
+#else
+#define lin2pdenum_cont(a) lin2pdenum(a)
+#endif
+
+/*
+ * Convert linear offset to page directory pointer index
+ */
+#if PAE
+#define lin2pdpnum(a) (((a) >> PDPSHIFT) & PDPMASK)
+#endif
+
+/*
+ * Convert page descriptor index to linear address
+ */
+#define pdenum2lin(a) ((vm_offset_t)(a) << PDESHIFT)
+
+#if PAE
+#ifdef __x86_64__
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l4num) << L4SHIFT) + \
+ ((vm_offset_t)(l3num) << PDPSHIFT) + \
+ ((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#else /* __x86_64__ */
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l3num) << PDPSHIFT) + \
+ ((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#endif
+#else /* PAE */
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#endif
+
+
+/*
+ * Convert linear offset to page table index
+ */
+#define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
+
+#define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
+#define NPDES (PDPNUM * (intel_ptob(1)/sizeof(pt_entry_t)))
+
+/*
+ * Hardware pte bit definitions (to be used directly on the ptes
+ * without using the bit fields).
+ */
+
+#define INTEL_PTE_VALID 0x00000001
+#define INTEL_PTE_WRITE 0x00000002
+#define INTEL_PTE_USER 0x00000004
+#define INTEL_PTE_WTHRU 0x00000008
+#define INTEL_PTE_NCACHE 0x00000010
+#define INTEL_PTE_REF 0x00000020
+#define INTEL_PTE_MOD 0x00000040
+#define INTEL_PTE_PS 0x00000080
+#ifdef MACH_PV_PAGETABLES
+/* Not supported */
+#define INTEL_PTE_GLOBAL 0x00000000
+#else /* MACH_PV_PAGETABLES */
+#define INTEL_PTE_GLOBAL 0x00000100
+#endif /* MACH_PV_PAGETABLES */
+#define INTEL_PTE_WIRED 0x00000200
+#ifdef PAE
+#ifdef __x86_64__
+#define INTEL_PTE_PFN 0xfffffffffffff000ULL
+#else /* __x86_64__ */
+#define INTEL_PTE_PFN 0x00007ffffffff000ULL
+#endif/* __x86_64__ */
+#else
+#define INTEL_PTE_PFN 0xfffff000
+#endif
+
+#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
+#ifdef MACH_PSEUDO_PHYS
+#define pte_to_pa(p) ma_to_pa((p) & INTEL_PTE_PFN)
+#else /* MACH_PSEUDO_PHYS */
+#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
+#endif /* MACH_PSEUDO_PHYS */
+#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
+
+/*
+ * Convert page table entry to kernel virtual address
+ */
+#define ptetokv(a) (phystokv(pte_to_pa(a)))
+
+#ifndef __ASSEMBLER__
+typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
+ /* changed by other processors */
+
+struct pmap {
+#if ! PAE
+ pt_entry_t *dirbase; /* page directory table */
+#else /* PAE */
+#ifdef __x86_64__
+ pt_entry_t *l4base; /* l4 table */
+#ifdef MACH_HYP
+ pt_entry_t *user_l4base; /* Userland l4 table */
+ pt_entry_t *user_pdpbase; /* Userland l4 table */
+#endif /* MACH_HYP */
+#else /* x86_64 */
+ pt_entry_t *pdpbase; /* page directory pointer table */
+#endif /* x86_64 */
+#endif /* PAE */
+ int ref_count; /* reference count */
+ decl_simple_lock_data(,lock)
+ /* lock on map */
+ struct pmap_statistics stats; /* map statistics */
+ cpu_set cpus_using; /* bitmap of cpus using pmap */
+};
+
+typedef struct pmap *pmap_t;
+
+#define PMAP_NULL ((pmap_t) 0)
+
+#ifdef MACH_PV_PAGETABLES
+extern void pmap_set_page_readwrite(void *addr);
+extern void pmap_set_page_readonly(void *addr);
+extern void pmap_set_page_readonly_init(void *addr);
+extern void pmap_map_mfn(void *addr, unsigned long mfn);
+extern void pmap_clear_bootstrap_pagetable(pt_entry_t *addr);
+#endif /* MACH_PV_PAGETABLES */
+
+#if PAE
+#ifdef __x86_64__
+/* TODO: support PCID */
+#ifdef MACH_HYP
+#define set_pmap(pmap) \
+ MACRO_BEGIN \
+ set_cr3(kvtophys((vm_offset_t)(pmap)->l4base)); \
+ if (pmap->user_l4base) \
+ if (!hyp_set_user_cr3(kvtophys((vm_offset_t)(pmap)->user_l4base))) \
+ panic("set_user_cr3"); \
+ MACRO_END
+#else /* MACH_HYP */
+#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->l4base))
+#endif /* MACH_HYP */
+#else /* x86_64 */
+#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->pdpbase))
+#endif /* x86_64 */
+#else /* PAE */
+#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->dirbase))
+#endif /* PAE */
+
+typedef struct {
+ pt_entry_t *entry;
+ vm_offset_t vaddr;
+} pmap_mapwindow_t;
+
+extern pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry);
+extern void pmap_put_mapwindow(pmap_mapwindow_t *map);
+
+#define PMAP_NMAPWINDOWS 2 /* Per CPU */
+
+#if NCPUS > 1
+/*
+ * List of cpus that are actively using mapped memory. Any
+ * pmap update operation must wait for all cpus in this list.
+ * Update operations must still be queued to cpus not in this
+ * list.
+ */
+extern cpu_set cpus_active;
+
+/*
+ * List of cpus that are idle, but still operating, and will want
+ * to see any kernel pmap updates when they become active.
+ */
+extern cpu_set cpus_idle;
+
+/*
+ * Quick test for pmap update requests.
+ */
+extern volatile
+boolean_t cpu_update_needed[NCPUS];
+
+/*
+ * External declarations for PMAP_ACTIVATE.
+ */
+
+void process_pmap_updates(pmap_t);
+extern pmap_t kernel_pmap;
+
+#endif /* NCPUS > 1 */
+
+void pmap_update_interrupt(void);
+
+/*
+ * Machine dependent routines that are used only for i386/i486.
+ */
+
+pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
+
+/*
+ * Macros for speed.
+ */
+
+#if NCPUS > 1
+
+/*
+ * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
+ * fields to control TLB invalidation on other CPUS.
+ */
+
+#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+ \
+ /* \
+ * Let pmap updates proceed while we wait for this pmap. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_active); \
+ \
+ /* \
+ * Lock the pmap to put this cpu in its active set. \
+ * Wait for updates here. \
+ */ \
+ simple_lock(&kernel_pmap->lock); \
+ \
+ /* \
+ * Process invalidate requests for the kernel pmap. \
+ */ \
+ if (cpu_update_needed[(my_cpu)]) \
+ process_pmap_updates(kernel_pmap); \
+ \
+ /* \
+ * Mark that this cpu is using the pmap. \
+ */ \
+ i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
+ \
+ /* \
+ * Mark this cpu active - IPL will be lowered by \
+ * load_context(). \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ \
+ simple_unlock(&kernel_pmap->lock); \
+}
+
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+ /* \
+ * Mark pmap no longer in use by this cpu even if \
+ * pmap is locked against updates. \
+ */ \
+ i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
+}
+
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+ pmap_t tpmap = (pmap); \
+ \
+ if (tpmap == kernel_pmap) { \
+ /* \
+ * If this is the kernel pmap, switch to its page tables. \
+ */ \
+ set_pmap(tpmap); \
+ } \
+ else { \
+ /* \
+ * Let pmap updates proceed while we wait for this pmap. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_active); \
+ \
+ /* \
+ * Lock the pmap to put this cpu in its active set. \
+ * Wait for updates here. \
+ */ \
+ simple_lock(&tpmap->lock); \
+ \
+ /* \
+ * No need to invalidate the TLB - the entire user pmap \
+ * will be invalidated by reloading dirbase. \
+ */ \
+ set_pmap(tpmap); \
+ \
+ /* \
+ * Mark that this cpu is using the pmap. \
+ */ \
+ i_bit_set((my_cpu), &tpmap->cpus_using); \
+ \
+ /* \
+ * Mark this cpu active - IPL will be lowered by \
+ * load_context(). \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ \
+ simple_unlock(&tpmap->lock); \
+ } \
+}
+
+#define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu) { \
+ pmap_t tpmap = (pmap); \
+ \
+ /* \
+ * Do nothing if this is the kernel pmap. \
+ */ \
+ if (tpmap != kernel_pmap) { \
+ /* \
+ * Mark pmap no longer in use by this cpu even if \
+ * pmap is locked against updates. \
+ */ \
+ i_bit_clear((my_cpu), &(pmap)->cpus_using); \
+ } \
+}
+
+#define MARK_CPU_IDLE(my_cpu) { \
+ /* \
+ * Mark this cpu idle, and remove it from the active set, \
+ * since it is not actively using any pmap. Signal_cpus \
+ * will notice that it is idle, and avoid signaling it, \
+ * but will queue the update request for when the cpu \
+ * becomes active. \
+ */ \
+ int s = splvm(); \
+ i_bit_set((my_cpu), &cpus_idle); \
+ i_bit_clear((my_cpu), &cpus_active); \
+ splx(s); \
+}
+
+#define MARK_CPU_ACTIVE(my_cpu) { \
+ \
+ int s = splvm(); \
+ /* \
+ * If a kernel_pmap update was requested while this cpu \
+ * was idle, process it as if we got the interrupt. \
+ * Before doing so, remove this cpu from the idle set. \
+ * Since we do not grab any pmap locks while we flush \
+ * our TLB, another cpu may start an update operation \
+ * before we finish. Removing this cpu from the idle \
+ * set assures that we will receive another update \
+ * interrupt if this happens. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_idle); \
+ __sync_synchronize(); \
+ \
+ if (cpu_update_needed[(my_cpu)]) \
+ pmap_update_interrupt(); \
+ \
+ /* \
+ * Mark that this cpu is now active. \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ splx(s); \
+}
+
+#else /* NCPUS > 1 */
+
+/*
+ * With only one CPU, we just have to indicate whether the pmap is
+ * in use.
+ */
+
+#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+ (void) (my_cpu); \
+ kernel_pmap->cpus_using = TRUE; \
+}
+
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+ (void) (my_cpu); \
+ kernel_pmap->cpus_using = FALSE; \
+}
+
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+ pmap_t tpmap = (pmap); \
+ (void) (th); \
+ (void) (my_cpu); \
+ \
+ set_pmap(tpmap); \
+ if (tpmap != kernel_pmap) { \
+ tpmap->cpus_using = TRUE; \
+ } \
+}
+
+#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
+ (void) (thread); \
+ (void) (cpu); \
+ if ((pmap) != kernel_pmap) \
+ (pmap)->cpus_using = FALSE; \
+}
+
+#endif /* NCPUS > 1 */
+
+#define PMAP_CONTEXT(pmap, thread)
+
+#define pmap_kernel() (kernel_pmap)
+#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
+#define pmap_phys_address(frame) ((intel_ptob((phys_addr_t) frame)))
+#define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
+#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
+#define pmap_attribute(pmap,addr,size,attr,value) \
+ (KERN_INVALID_ADDRESS)
+
+extern pt_entry_t *kernel_page_dir;
+
+extern vm_offset_t kernel_virtual_start;
+extern vm_offset_t kernel_virtual_end;
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * Allocate the kernel page directory and page tables,
+ * and direct-map all physical memory.
+ * Called with mapping off.
+ */
+extern void pmap_bootstrap(void);
+
+extern void pmap_set_page_dir(void);
+extern void pmap_make_temporary_mapping(void);
+extern void pmap_remove_temporary_mapping(void);
+
+extern void pmap_unmap_page_zero (void);
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ */
+extern void pmap_zero_page (phys_addr_t);
+
+/*
+ * pmap_copy_page copies the specified (machine independent) pages.
+ */
+extern void pmap_copy_page (phys_addr_t, phys_addr_t);
+
+/*
+ * copy_to_phys(src_addr_v, dst_addr_p, count)
+ *
+ * Copy virtual memory to physical memory
+ */
+extern void
+copy_to_phys(
+ vm_offset_t src_addr_v,
+ phys_addr_t dst_addr_p,
+ int count);
+
+/*
+ * copy_from_phys(src_addr_p, dst_addr_v, count)
+ *
+ * Copy physical memory to virtual memory. The virtual memory
+ * is assumed to be present (e.g. the buffer pool).
+ */
+extern void
+copy_from_phys(
+ phys_addr_t src_addr_p,
+ vm_offset_t dst_addr_v,
+ int count);
+
+/*
+ * kvtophys(addr)
+ *
+ * Convert a kernel virtual address to a physical address
+ */
+extern phys_addr_t kvtophys (vm_offset_t);
+
+#if NCPUS > 1
+void signal_cpus(
+ cpu_set use_list,
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end);
+#endif /* NCPUS > 1 */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _PMAP_MACHINE_ */
diff --git a/i386/intel/read_fault.c b/i386/intel/read_fault.c
new file mode 100644
index 0000000..0b79e3d
--- /dev/null
+++ b/i386/intel/read_fault.c
@@ -0,0 +1,178 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <vm/vm_fault.h>
+#include <mach/kern_return.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+
+#include <kern/macros.h>
+
+#if !(__i486__ || __i586__ || __i686__)
+/*
+ * Expansion of vm_fault for read fault in kernel mode.
+ * Must enter the mapping as writable, since the i386
+ * ignores write protection in kernel mode.
+ */
+kern_return_t
+intel_read_fault(
+ vm_map_t map,
+ vm_offset_t vaddr)
+{
+ vm_map_version_t version; /* Map version for
+ verification */
+ vm_object_t object; /* Top-level object */
+ vm_offset_t offset; /* Top-level offset */
+ vm_prot_t prot; /* Protection for mapping */
+ vm_page_t result_page; /* Result of vm_fault_page */
+ vm_page_t top_page; /* Placeholder page */
+ boolean_t wired; /* Is map region wired? */
+ kern_return_t result;
+ vm_page_t m;
+
+ RetryFault:
+
+ /*
+ * Find the backing store object and offset into it
+ * to begin search.
+ */
+ result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version,
+ &object, &offset, &prot, &wired);
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ /*
+ * Make a reference to this object to prevent its
+ * disposal while we are playing with it.
+ */
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ vm_object_paging_begin(object);
+
+ result = vm_fault_page(object, offset, VM_PROT_READ, FALSE, TRUE,
+ &prot, &result_page, &top_page,
+ FALSE, (void (*)()) 0);
+
+ if (result != VM_FAULT_SUCCESS) {
+ vm_object_deallocate(object);
+
+ switch (result) {
+ case VM_FAULT_RETRY:
+ goto RetryFault;
+ case VM_FAULT_INTERRUPTED:
+ return (KERN_SUCCESS);
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ goto RetryFault;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ goto RetryFault;
+ case VM_FAULT_MEMORY_ERROR:
+ return (KERN_MEMORY_ERROR);
+ }
+ }
+
+ m = result_page;
+
+ /*
+ * How to clean up the result of vm_fault_page. This
+ * happens whether the mapping is entered or not.
+ */
+
+#define UNLOCK_AND_DEALLOCATE \
+ MACRO_BEGIN \
+ vm_fault_cleanup(m->object, top_page); \
+ vm_object_deallocate(object); \
+ MACRO_END
+
+ /*
+ * What to do with the resulting page from vm_fault_page
+ * if it doesn't get entered into the physical map:
+ */
+
+#define RELEASE_PAGE(m) \
+ MACRO_BEGIN \
+ PAGE_WAKEUP_DONE(m); \
+ vm_page_lock_queues(); \
+ if (!m->active && !m->inactive) \
+ vm_page_activate(m); \
+ vm_page_unlock_queues(); \
+ MACRO_END
+
+ /*
+ * We must verify that the maps have not changed.
+ */
+ vm_object_unlock(m->object);
+ while (!vm_map_verify(map, &version)) {
+ vm_object_t retry_object;
+ vm_offset_t retry_offset;
+ vm_prot_t retry_prot;
+
+ result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version,
+ &retry_object, &retry_offset, &retry_prot,
+ &wired);
+ if (result != KERN_SUCCESS) {
+ vm_object_lock(m->object);
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ return (result);
+ }
+
+ vm_object_unlock(retry_object);
+
+ if (retry_object != object || retry_offset != offset) {
+ vm_object_lock(m->object);
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ goto RetryFault;
+ }
+ }
+
+ /*
+ * Put the page in the physical map.
+ */
+ PMAP_ENTER(map->pmap, vaddr, m, VM_PROT_READ|VM_PROT_WRITE, wired);
+
+ vm_object_lock(m->object);
+ vm_page_lock_queues();
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ m->reference = TRUE;
+ vm_page_unlock_queues();
+
+ vm_map_verify_done(map, &version);
+ PAGE_WAKEUP_DONE(m);
+
+ UNLOCK_AND_DEALLOCATE;
+
+#undef UNLOCK_AND_DEALLOCATE
+#undef RELEASE_PAGE
+
+ return (KERN_SUCCESS);
+}
+#endif
diff --git a/i386/intel/read_fault.h b/i386/intel/read_fault.h
new file mode 100644
index 0000000..8aa3f03
--- /dev/null
+++ b/i386/intel/read_fault.h
@@ -0,0 +1,35 @@
+/*
+ * Kernel read_fault on i386 functions
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Kernel read_fault on i386 functions.
+ *
+ */
+
+#ifndef _READ_FAULT_H_
+#define _READ_FAULT_H_
+
+#include <mach/std_types.h>
+
+extern kern_return_t intel_read_fault(
+ vm_map_t map,
+ vm_offset_t vaddr);
+
+#endif /* _READ_FAULT_H_ */
diff --git a/i386/ldscript b/i386/ldscript
new file mode 100644
index 0000000..ddbbf91
--- /dev/null
+++ b/i386/ldscript
@@ -0,0 +1,201 @@
+/* Default linker script, for normal executables */
+OUTPUT_FORMAT("elf32-i386", "elf32-i386",
+ "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+SECTIONS
+{
+ /*
+ * There are specific requirements about entry points, so we have it
+ * configurable via `_START': `.text' will begin there and `.text.start' will
+ * be first in there. See also `i386/i386at/boothdr.S' and
+ * `gnumach_LINKFLAGS' in `i386/Makefrag.am'.
+ */
+ . = _START;
+ .text :
+ AT (_START_MAP)
+ {
+ *(.text.start)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ *(.text.unlikely .text.*_unlikely)
+ KEEP (*(.text.*personality*))
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ } =0x90909090
+ .init :
+ {
+ KEEP (*(.init))
+ } =0x90909090
+ .fini :
+ {
+ KEEP (*(.fini))
+ } =0x90909090
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = .);
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rel.ifunc : { *(.rel.ifunc) }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .plt : { *(.plt) *(.iplt) }
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ .preinit_array :
+ {
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ KEEP (*(.preinit_array))
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ }
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT(.fini_array.*)))
+ KEEP (*(.fini_array))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin?.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin?.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (12, .);
+ .got.plt : { *(.got.plt) *(.igot.plt) }
+ .data :
+ {
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ __bss_start = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections.
+ FIXME: Why do we need it? When there is no .bss section, we don't
+ pad the .data section. */
+ . = ALIGN(. != 0 ? 32 / 8 : 1);
+ }
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ _end = .; PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) }
+}
diff --git a/i386/linux/Makefrag.am b/i386/linux/Makefrag.am
new file mode 100644
index 0000000..87b1ae2
--- /dev/null
+++ b/i386/linux/Makefrag.am
@@ -0,0 +1,25 @@
+# Makefile fragment for i386-specific Linux code.
+
+# Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# Files for device driver support.
+#
+
+liblinux_a_SOURCES += \
+ i386/linux/dev/include/linux/autoconf.h \
+ linux/src/arch/i386/lib/semaphore.S
diff --git a/i386/linux/dev/include/linux/autoconf.h b/i386/linux/dev/include/linux/autoconf.h
new file mode 100644
index 0000000..bd035d4
--- /dev/null
+++ b/i386/linux/dev/include/linux/autoconf.h
@@ -0,0 +1,284 @@
+/*
+ * Automatically generated C config: don't edit
+ */
+#define AUTOCONF_INCLUDED
+
+/*
+ * Code maturity level options
+ */
+#define CONFIG_EXPERIMENTAL 1
+
+/*
+ * Loadable module support
+ */
+#undef CONFIG_MODULES
+
+/*
+ * General setup
+ */
+#undef CONFIG_MATH_EMULATION
+#define CONFIG_NET 1
+#undef CONFIG_MAX_16M
+#define CONFIG_PCI 1
+#define CONFIG_PCI_OPTIMIZE 1
+#define CONFIG_SYSVIPC 1
+#undef CONFIG_BINFMT_AOUT
+#define CONFIG_BINFMT_ELF 1
+#undef CONFIG_BINFMT_JAVA
+#define CONFIG_KERNEL_ELF 1
+
+#if 0
+#undef CONFIG_M386
+#define CONFIG_M486 1
+#undef CONFIG_M586
+#undef CONFIG_M686
+#endif
+
+#if NCPUS > 1
+#define CONFIG_SMP 1
+#endif
+
+/*
+ * Floppy, IDE, and other block devices
+ */
+#if 0
+#define CONFIG_BLK_DEV_FD 1
+#define CONFIG_BLK_DEV_IDE 1
+#endif
+
+/*
+ * Please see Documentation/ide.txt for help/info on IDE drives
+ */
+#undef CONFIG_BLK_DEV_HD_IDE
+#define CONFIG_BLK_DEV_IDECD 1
+#undef CONFIG_BLK_DEV_IDETAPE
+#undef CONFIG_BLK_DEV_IDEFLOPPY
+#undef CONFIG_BLK_DEV_IDESCSI
+#undef CONFIG_BLK_DEV_IDE_PCMCIA
+#undef CONFIG_BLK_DEV_CMD640
+#undef CONFIG_BLK_DEV_CMD640_ENHANCED
+#define CONFIG_BLK_DEV_RZ1000 1
+#define CONFIG_BLK_DEV_TRITON 1
+#undef CONFIG_IDE_CHIPSETS
+
+/*
+ * Additional Block Devices
+ */
+#undef CONFIG_BLK_DEV_LOOP
+#undef CONFIG_BLK_DEV_MD
+#undef CONFIG_BLK_DEV_RAM
+#undef CONFIG_BLK_DEV_XD
+#undef CONFIG_BLK_DEV_HD
+
+/*
+ * Networking options
+ */
+#if 0
+#undef CONFIG_FIREWALL
+#undef CONFIG_NET_ALIAS
+#define CONFIG_INET 1
+#undef CONFIG_IP_FORWARD
+#undef CONFIG_IP_MULTICAST
+#undef CONFIG_SYN_COOKIES
+#undef CONFIG_RST_COOKIES
+#undef CONFIG_IP_ACCT
+#undef CONFIG_IP_ROUTER
+#undef CONFIG_NET_IPIP
+#endif
+
+/*
+ * (it is safe to leave these untouched)
+ */
+#undef CONFIG_INET_PCTCP
+#undef CONFIG_INET_RARP
+#undef CONFIG_NO_PATH_MTU_DISCOVERY
+#undef CONFIG_IP_NOSR
+#undef CONFIG_SKB_LARGE
+
+/*
+ *
+ */
+#undef CONFIG_IPX
+#undef CONFIG_ATALK
+#undef CONFIG_AX25
+#undef CONFIG_BRIDGE
+#undef CONFIG_NETLINK
+
+/*
+ * SCSI support
+ */
+#if 0
+#define CONFIG_SCSI 1
+#endif
+
+/*
+ * SCSI support type (disk, tape, CD-ROM)
+ */
+#define CONFIG_BLK_DEV_SD 1
+#undef CONFIG_CHR_DEV_ST
+#define CONFIG_BLK_DEV_SR 1
+#undef CONFIG_CHR_DEV_SG
+
+/*
+ * Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+ */
+#if 0
+#undef CONFIG_SCSI_MULTI_LUN
+#undef CONFIG_SCSI_CONSTANTS
+
+/*
+ * SCSI low-level drivers
+ */
+#undef CONFIG_SCSI_7000FASST
+#undef CONFIG_SCSI_AHA152X
+#undef CONFIG_SCSI_AHA1542
+#undef CONFIG_SCSI_AHA1740
+#undef CONFIG_SCSI_AIC7XXX
+#undef CONFIG_SCSI_ADVANSYS
+#undef CONFIG_SCSI_IN2000
+#undef CONFIG_SCSI_AM53C974
+#undef CONFIG_SCSI_BUSLOGIC
+#undef CONFIG_SCSI_DTC3280
+#undef CONFIG_SCSI_EATA_DMA
+#undef CONFIG_SCSI_EATA_PIO
+#undef CONFIG_SCSI_EATA
+#undef CONFIG_SCSI_FUTURE_DOMAIN
+#undef CONFIG_SCSI_GENERIC_NCR5380
+#undef CONFIG_SCSI_NCR53C406A
+#undef CONFIG_SCSI_NCR53C7xx
+#undef CONFIG_SCSI_NCR53C8XX
+#undef CONFIG_SCSI_DC390W
+#undef CONFIG_SCSI_PPA
+#undef CONFIG_SCSI_PAS16
+#undef CONFIG_SCSI_QLOGIC_FAS
+#undef CONFIG_SCSI_QLOGIC_ISP
+#undef CONFIG_SCSI_SEAGATE
+#undef CONFIG_SCSI_DC390T
+#undef CONFIG_SCSI_T128
+#undef CONFIG_SCSI_U14_34F
+#undef CONFIG_SCSI_ULTRASTOR
+#undef CONFIG_SCSI_GDTH
+#endif
+
+/*
+ * Network device support
+ */
+#define CONFIG_NETDEVICES 1
+#undef CONFIG_DUMMY
+#undef CONFIG_EQUALIZER
+#undef CONFIG_DLCI
+#undef CONFIG_PLIP
+#undef CONFIG_PPP
+#undef CONFIG_SLIP
+#undef CONFIG_NET_RADIO
+
+#if 0
+#define CONFIG_NET_ETHERNET 1
+#define CONFIG_NET_VENDOR_3COM 1
+#undef CONFIG_EL1
+#undef CONFIG_EL2
+#undef CONFIG_ELPLUS
+#undef CONFIG_EL16
+#undef CONFIG_EL3
+#undef CONFIG_VORTEX
+#undef CONFIG_LANCE
+#undef CONFIG_NET_VENDOR_SMC
+#define CONFIG_NET_ISA 1
+#undef CONFIG_AT1700
+#undef CONFIG_E2100
+#undef CONFIG_DEPCA
+#undef CONFIG_EWRK3
+#undef CONFIG_EEXPRESS
+#undef CONFIG_EEXPRESS_PRO
+#undef CONFIG_FMV18X
+#undef CONFIG_HPLAN_PLUS
+#undef CONFIG_HPLAN
+#undef CONFIG_HP100
+#undef CONFIG_ETH16I
+#undef CONFIG_NE2000
+#undef CONFIG_NI52
+#undef CONFIG_NI65
+#undef CONFIG_SEEQ8005
+#undef CONFIG_SK_G16
+#undef CONFIG_NET_EISA
+#undef CONFIG_NET_POCKET
+#undef CONFIG_TR
+#undef CONFIG_FDDI
+#undef CONFIG_ARCNET
+#endif
+
+/*
+ * ISDN subsystem
+ */
+#undef CONFIG_ISDN
+
+/*
+ * CD-ROM drivers (not for SCSI or IDE/ATAPI drives)
+ */
+#undef CONFIG_CD_NO_IDESCSI
+
+/*
+ * Filesystems
+ */
+#undef CONFIG_QUOTA
+#define CONFIG_MINIX_FS 1
+#undef CONFIG_EXT_FS
+#define CONFIG_EXT2_FS 1
+#undef CONFIG_XIA_FS
+#define CONFIG_FAT_FS 1
+#define CONFIG_MSDOS_FS 1
+#define CONFIG_VFAT_FS 1
+#define CONFIG_UMSDOS_FS 1
+#define CONFIG_PROC_FS 1
+#define CONFIG_NFS_FS 1
+#undef CONFIG_ROOT_NFS
+#undef CONFIG_SMB_FS
+#define CONFIG_ISO9660_FS 1
+#define CONFIG_HPFS_FS 1
+#define CONFIG_SYSV_FS 1
+#undef CONFIG_AUTOFS_FS
+#define CONFIG_AFFS_FS 1
+#undef CONFIG_AMIGA_PARTITION
+#define CONFIG_UFS_FS 1
+
+/* We want Linux's partitioning code to do only the DOS partition table,
+ since the Mach glue code does BSD disklabels for us. */
+#undef CONFIG_BSD_DISKLABEL
+#undef CONFIG_SMD_DISKLABEL
+
+#define CONFIG_GPT_DISKLABEL 1
+
+/*
+ * Character devices
+ */
+#if 0
+#define CONFIG_SERIAL 1
+#undef CONFIG_DIGI
+#undef CONFIG_CYCLADES
+#undef CONFIG_STALDRV
+#undef CONFIG_RISCOM8
+#define CONFIG_PRINTER 1
+#undef CONFIG_SPECIALIX
+#define CONFIG_MOUSE 1
+#undef CONFIG_ATIXL_BUSMOUSE
+#undef CONFIG_BUSMOUSE
+#undef CONFIG_MS_BUSMOUSE
+#define CONFIG_PSMOUSE 1
+#undef CONFIG_82C710_MOUSE
+#undef CONFIG_UMISC
+#undef CONFIG_QIC02_TAPE
+#undef CONFIG_FTAPE
+#undef CONFIG_APM
+#undef CONFIG_WATCHDOG
+#undef CONFIG_RTC
+#endif
+
+/*
+ * Sound
+ */
+#undef CONFIG_SOUND
+
+/*
+ * Kernel hacking
+ */
+#undef CONFIG_PROFILE
diff --git a/i386/xen/Makefrag.am b/i386/xen/Makefrag.am
new file mode 100644
index 0000000..ecb33ff
--- /dev/null
+++ b/i386/xen/Makefrag.am
@@ -0,0 +1,34 @@
+# Makefile fragment for the ix86 specific part of the Xen platform.
+
+# Copyright (C) 2007 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# Xen support.
+#
+
+libkernel_a_SOURCES += \
+ i386/xen/xen.c \
+ i386/xen/xen_locore.S \
+ i386/xen/xen_boothdr.S
+
+
+if PLATFORM_xen
+gnumach_LINKFLAGS += \
+ --defsym _START=0xC0000000 \
+ --defsym _START_MAP=0xC0000000 \
+ -T '$(srcdir)'/i386/ldscript
+endif
diff --git a/i386/xen/xen.c b/i386/xen/xen.c
new file mode 100644
index 0000000..5309675
--- /dev/null
+++ b/i386/xen/xen.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <kern/printf.h>
+#include <kern/debug.h>
+#include <kern/mach_clock.h>
+
+#include <mach/machine/eflags.h>
+#include <machine/thread.h>
+#include <machine/ipl.h>
+#include <machine/model_dep.h>
+
+#include <xen/xen.h>
+
+unsigned long cr3;
+
+void hyp_failsafe_c_callback(struct failsafe_callback_regs *regs) {
+ printf("Fail-Safe callback!\n");
+ printf("IP: %08X CS: %4X DS: %4X ES: %4X FS: %4X GS: %4X FLAGS %08X MASK %04X\n", regs->ip, regs->cs_and_mask & 0xffff, regs->ds, regs->es, regs->fs, regs->gs, regs->flags, regs->cs_and_mask >> 16);
+ panic("failsafe");
+}
+
+extern char return_to_iret[];
+
+void hypclock_machine_intr(int old_ipl, void *ret_addr, struct i386_interrupt_state *regs, uint64_t delta) {
+ if (ret_addr == &return_to_iret) {
+ clock_interrupt(delta/1000, /* usec per tick */
+ (regs->efl & EFL_VM) || /* user mode */
+ ((regs->cs & 0x02) != 0), /* user mode */
+ old_ipl == SPL0, /* base priority */
+ regs->eip); /* interrupted eip */
+ } else
+ clock_interrupt(delta/1000, FALSE, FALSE, 0);
+}
+
+void hyp_p2m_init(void) {
+ unsigned long nb_pfns = vm_page_table_size();
+#ifdef MACH_PSEUDO_PHYS
+#define P2M_PAGE_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
+ unsigned long *l3 = (unsigned long *)phystokv(pmap_grab_page()), *l2 = NULL;
+ unsigned long i;
+
+ for (i = 0; i < (nb_pfns + P2M_PAGE_ENTRIES) / P2M_PAGE_ENTRIES; i++) {
+ if (!(i % P2M_PAGE_ENTRIES)) {
+ l2 = (unsigned long *) phystokv(pmap_grab_page());
+ l3[i / P2M_PAGE_ENTRIES] = kv_to_mfn(l2);
+ }
+ l2[i % P2M_PAGE_ENTRIES] = kv_to_mfn(&mfn_list[i * P2M_PAGE_ENTRIES]);
+ }
+
+ hyp_shared_info.arch.pfn_to_mfn_frame_list_list = kv_to_mfn(l3);
+#endif
+ hyp_shared_info.arch.max_pfn = nb_pfns;
+}
diff --git a/i386/xen/xen_boothdr.S b/i386/xen/xen_boothdr.S
new file mode 100644
index 0000000..4704c66
--- /dev/null
+++ b/i386/xen/xen_boothdr.S
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2006-2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <xen/public/elfnote.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=GNU Mach"
+ .ascii ",GUEST_VERSION=1.3"
+ .ascii ",XEN_VER=xen-3.0"
+ .ascii ",VIRT_BASE=0xC0000000"
+ .ascii ",ELF_PADDR_OFFSET=0xC0000000"
+ .ascii ",HYPERCALL_PAGE=0x2"
+#if PAE
+ .ascii ",PAE=yes[extended-cr3]"
+#else
+ .ascii ",PAE=no"
+#endif
+ .ascii ",LOADER=generic"
+#ifdef MACH_PSEUDO_PHYS
+ .ascii ",FEATURES=pae_pgdir_above_4gb"
+#else /* MACH_PSEUDO_PHYS */
+ .ascii ",FEATURES=!auto_translated_physmap"
+#endif
+#ifndef MACH_PV_PAGETABLES
+ .ascii "|!writable_page_tables"
+#endif /* MACH_PV_PAGETABLES */
+#ifndef MACH_PV_DESCRIPTORS
+ .ascii "|!writable_descriptor_tables"
+#endif /* MACH_PV_DESCRIPTORS */
+#ifndef MACH_RING1
+ .ascii "|!supervisor_mode_kernel"
+#endif /* MACH_PV_DESCRIPTORS */
+ .byte 0
+
+/* Macro taken from linux/include/linux/elfnote.h */
+#define ELFNOTE(name, type, desctype, descdata) \
+.pushsection .note.name ; \
+ .align 4 ; \
+ .long 2f - 1f /* namesz */ ; \
+ .long 4f - 3f /* descsz */ ; \
+ .long type ; \
+1:.asciz "name" ; \
+2:.align 4 ; \
+3:desctype descdata ; \
+4:.align 4 ; \
+.popsection ;
+
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "GNU Mach")
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "1.3")
+ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
+ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, _START)
+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, _START)
+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, start)
+ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypcalls)
+#if PAE
+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes[extended-cr3]")
+#else
+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
+#endif
+ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, ""
+#ifdef MACH_PSEUDO_PHYS
+ "pae_pgdir_above_4gb"
+#else /* MACH_PSEUDO_PHYS */
+ "!auto_translated_physmap"
+#endif
+#ifndef MACH_PV_PAGETABLES
+ "|!writable_page_tables"
+#endif /* MACH_PV_PAGETABLES */
+#ifndef MACH_PV_DESCRIPTORS
+ "|!writable_descriptor_tables"
+#endif /* MACH_PV_DESCRIPTORS */
+#ifndef MACH_RING1
+ "|!supervisor_mode_kernel"
+#endif /* MACH_RING1 */
+ )
+
+#include <mach/machine/asm.h>
+
+#include <i386/i386/i386asm.h>
+
+ .text
+ .globl gdt, ldt
+ .globl start, _start, gdt
+start:
+_start:
+
+ /* Switch to our own interrupt stack. */
+ movl $(_intstack+INTSTACK_SIZE),%eax
+ movl %eax,%esp
+
+ /* Reset EFLAGS to a known state. */
+ pushl $0
+ popf
+
+ /* Push the start_info pointer to be the second argument. */
+ subl $KERNELBASE,%esi
+ pushl %esi
+
+ /* Fix ifunc entries */
+ movl $__rel_iplt_start,%esi
+ movl $__rel_iplt_end,%edi
+iplt_cont:
+ cmpl %edi,%esi
+ jae iplt_done
+ movl (%esi),%ebx /* r_offset */
+ movb 4(%esi),%al /* info */
+ cmpb $42,%al /* IRELATIVE */
+ jnz iplt_next
+ call *(%ebx) /* call ifunc */
+ movl %eax,(%ebx) /* fixed address */
+iplt_next:
+ addl $8,%esi
+ jmp iplt_cont
+iplt_done:
+
+ /* Jump into C code. */
+ call EXT(c_boot_entry)
+
+/* Those need to be aligned on page boundaries. */
+.global hyp_shared_info, hypcalls
+
+ .org (start + 0x1000)
+hyp_shared_info:
+ .org hyp_shared_info + 0x1000
+
+/* Labels just for debuggers */
+#define hypcall(name, n) \
+ .org hypcalls + n*32 ; \
+.globl __hyp_##name ; \
+__hyp_##name:
+
+hypcalls:
+ hypcall(set_trap_table, 0)
+ hypcall(mmu_update, 1)
+ hypcall(set_gdt, 2)
+ hypcall(stack_switch, 3)
+ hypcall(set_callbacks, 4)
+ hypcall(fpu_taskswitch, 5)
+ hypcall(sched_op_compat, 6)
+ hypcall(platform_op, 7)
+ hypcall(set_debugreg, 8)
+ hypcall(get_debugreg, 9)
+ hypcall(update_descriptor, 10)
+ hypcall(memory_op, 12)
+ hypcall(multicall, 13)
+ hypcall(update_va_mapping, 14)
+ hypcall(set_timer_op, 15)
+ hypcall(event_channel_op_compat, 16)
+ hypcall(xen_version, 17)
+ hypcall(console_io, 18)
+ hypcall(physdev_op_compat, 19)
+ hypcall(grant_table_op, 20)
+ hypcall(vm_assist, 21)
+ hypcall(update_va_mapping_otherdomain, 22)
+ hypcall(iret, 23)
+ hypcall(vcpu_op, 24)
+ hypcall(set_segment_base, 25)
+ hypcall(mmuext_op, 26)
+ hypcall(acm_op, 27)
+ hypcall(nmi_op, 28)
+ hypcall(sched_op, 29)
+ hypcall(callback_op, 30)
+ hypcall(xenoprof_op, 31)
+ hypcall(event_channel_op, 32)
+ hypcall(physdev_op, 33)
+ hypcall(hvm_op, 34)
+ hypcall(sysctl, 35)
+ hypcall(domctl, 36)
+ hypcall(kexec_op, 37)
+
+ hypcall(arch_0, 48)
+ hypcall(arch_1, 49)
+ hypcall(arch_2, 50)
+ hypcall(arch_3, 51)
+ hypcall(arch_4, 52)
+ hypcall(arch_5, 53)
+ hypcall(arch_6, 54)
+ hypcall(arch_7, 55)
+
+ .org hypcalls + 0x1000
+
+gdt:
+ .org gdt + 0x1000
+
+ldt:
+ .org ldt + 0x1000
+
+stack:
+ .long _intstack+INTSTACK_SIZE,0xe021
+ .comm _intstack,INTSTACK_SIZE
+ .comm _eintstack,0
+
diff --git a/i386/xen/xen_locore.S b/i386/xen/xen_locore.S
new file mode 100644
index 0000000..1468ef8
--- /dev/null
+++ b/i386/xen/xen_locore.S
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <mach/machine/asm.h>
+
+#include <i386/i386asm.h>
+#include <i386/cpu_number.h>
+#include <i386/xen.h>
+
+ .data 2
+int_active:
+ .long 0
+
+
+ .text
+ .globl hyp_callback, hyp_failsafe_callback
+ P2ALIGN(TEXT_ALIGN)
+hyp_callback:
+ pushl %eax
+ jmp EXT(all_intrs)
+
+ENTRY(interrupt)
+ incl int_active /* currently handling interrupts */
+ call EXT(hyp_c_callback) /* call generic interrupt routine */
+ decl int_active /* stopped handling interrupts */
+ sti
+ ret
+
+/* FIXME: if we're _very_ unlucky, we may be re-interrupted, filling stack
+ *
+ * Far from trivial, see mini-os. That said, maybe we could just, before poping
+ * everything (which is _not_ destructive), save sp into a known place and use
+ * it+jmp back?
+ *
+ * Mmm, there seems to be an iret hypcall that does exactly what we want:
+ * perform iret, and if IF is set, clear the interrupt mask.
+ */
+
+/* Pfff, we have to check pending interrupts ourselves. Some other DomUs just make an hypercall for retriggering the irq. Not sure it's really easier/faster */
+ENTRY(hyp_sti)
+ pushl %ebp
+ movl %esp, %ebp
+_hyp_sti:
+ movb $0,hyp_shared_info+CPU_CLI /* Enable interrupts */
+ cmpl $0,int_active /* Check whether we were already checking pending interrupts */
+ jz 0f
+ popl %ebp
+ ret /* Already active, just return */
+0:
+ /* Not active, check pending interrupts by hand */
+ /* no memory barrier needed on x86 */
+ cmpb $0,hyp_shared_info+CPU_PENDING
+ jne 0f
+ popl %ebp
+ ret
+0:
+ movb $0xff,hyp_shared_info+CPU_CLI
+1:
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ incl int_active /* currently handling interrupts */
+
+ pushl $0
+ pushl $0
+ call EXT(hyp_c_callback)
+ popl %edx
+ popl %edx
+
+ popl %edx
+ popl %ecx
+ popl %eax
+ decl int_active /* stopped handling interrupts */
+ cmpb $0,hyp_shared_info+CPU_PENDING
+ jne 1b
+ jmp _hyp_sti
+
+/* Hypervisor failed to reload segments. Dump them. */
+hyp_failsafe_callback:
+#if 1
+ /* load sane segments */
+ mov %ss, %ax
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %fs
+ mov %ax, %gs
+ push %esp
+ call EXT(hyp_failsafe_c_callback)
+#else
+ popl %ds
+ popl %es
+ popl %fs
+ popl %gs
+ iret
+#endif
diff --git a/include/alloca.h b/include/alloca.h
new file mode 100644
index 0000000..29081ca
--- /dev/null
+++ b/include/alloca.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_ALLOCA_H_
+#define _MACH_ALLOCA_H_
+
+#define alloca(size) __builtin_alloca(size)
+
+#endif /* _MACH_ALLOCA_H_ */
diff --git a/include/cache.h b/include/cache.h
new file mode 100644
index 0000000..6260366
--- /dev/null
+++ b/include/cache.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MACH_CACHE_H_
+#define _MACH_CACHE_H_
+
+/* This macro can be used to align statically allocated objects so
+ that they start at a cache line. */
+#define __cacheline_aligned __attribute__((aligned(1 << CPU_L1_SHIFT)))
+
+#endif /* _MACH_CACHE_H_ */
diff --git a/include/device/audio_status.h b/include/device/audio_status.h
new file mode 100644
index 0000000..7effe99
--- /dev/null
+++ b/include/device/audio_status.h
@@ -0,0 +1,164 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright (c) 1991, 1992 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. The name of the Laboratory may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEVICE_AUDIO_STATUS_H_
+#define _DEVICE_AUDIO_STATUS_H_
+
+/*
+ * Access to ADC devices, such as the AMD 79C30A/32A.
+ */
+
+/*
+ * Programmable gains, see tables in device drivers
+ * for detailed mapping to device specifics.
+ */
+#define AUDIO_MIN_GAIN (0)
+#define AUDIO_MAX_GAIN (255)
+
+/*
+ * Encoding of audio samples
+ */
+#define AUDIO_ENCODING_ULAW (1)
+#define AUDIO_ENCODING_ALAW (2)
+
+/*
+ * Selection of input/output jack
+ */
+#define AUDIO_MIKE 1
+
+#define AUDIO_SPEAKER 1
+#define AUDIO_HEADPHONE 2
+
+/*
+ * Programming information from/to user application.
+ * Only portions of this might be available on any given chip.
+ */
+struct audio_prinfo {
+ unsigned int sample_rate;
+ unsigned int channels;
+ unsigned int precision;
+ unsigned int encoding;
+ unsigned int gain;
+ unsigned int port; /* input/output jack */
+ unsigned int seek; /* BSD extension */
+ unsigned int ispare[3];
+ unsigned int samples;
+ unsigned int eof;
+
+ unsigned char pause;
+ unsigned char error;
+ unsigned char waiting;
+ unsigned char cspare[3];
+ unsigned char open;
+ unsigned char active;
+
+};
+
+struct audio_info {
+ struct audio_prinfo play;
+ struct audio_prinfo record;
+ unsigned int monitor_gain;
+ /* BSD extensions */
+ unsigned int blocksize; /* input blocking threshold */
+ unsigned int hiwat; /* output high water mark */
+ unsigned int lowat; /* output low water mark */
+ unsigned int backlog; /* samples of output backlog to gen. */
+};
+
+typedef struct audio_info audio_info_t;
+
+#define AUDIO_INITINFO(p)\
+ (void)memset((void *)(p), 0xff, sizeof(struct audio_info))
+
+#define AUDIO_GETINFO _IOR('A', 21, audio_info_t)
+#define AUDIO_SETINFO _IOWR('A', 22, audio_info_t)
+#define AUDIO_DRAIN _IO('A', 23)
+#define AUDIO_FLUSH _IO('A', 24)
+#define AUDIO_WSEEK _IOR('A', 25, unsigned int)
+#define AUDIO_RERROR _IOR('A', 26, int)
+#define AUDIO_WERROR _IOR('A', 27, int)
+
+/*
+ * Low level interface to the amd79c30.
+ * Internal registers of the MAP block,
+ * the Main Audio Processor.
+ */
+struct mapreg {
+ unsigned short mr_x[8];
+ unsigned short mr_r[8];
+ unsigned short mr_gx;
+ unsigned short mr_gr;
+ unsigned short mr_ger;
+ unsigned short mr_stgr;
+ unsigned short mr_ftgr;
+ unsigned short mr_atgr;
+ unsigned char mr_mmr1;
+ unsigned char mr_mmr2;
+};
+
+#define AUDIO_GETMAP _IOR('A', 27, struct mapreg)
+#define AUDIO_SETMAP _IOW('A', 28, struct mapreg)
+
+/*
+ * Compatibility with Sun interface
+ */
+struct audio_ioctl {
+ short control;
+ unsigned char data[46];
+};
+
+#define AUDIOGETREG _IOWR('i',1,struct audio_ioctl)
+#define AUDIOSETREG _IOW('i',2,struct audio_ioctl)
+
+#endif /* _DEVICE_AUDIO_STATUS_H_ */
diff --git a/include/device/bpf.h b/include/device/bpf.h
new file mode 100644
index 0000000..abc2d77
--- /dev/null
+++ b/include/device/bpf.h
@@ -0,0 +1,244 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Berkeley Packet Filter Definitions from Berkeley
+ */
+
+/*-
+ * Copyright (c) 1990-1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf.h 7.1 (Berkeley) 5/7/91
+ *
+ */
+
+#ifndef _DEVICE_BPF_H_
+#define _DEVICE_BPF_H_
+
+/*
+ * Alignment macros. BPF_WORDALIGN rounds up to the next
+ * even multiple of BPF_ALIGNMENT.
+ */
+#define BPF_ALIGNMENT sizeof(int)
+#define BPF_WORDALIGN(x) (((x)+(BPF_ALIGNMENT-1))&~(BPF_ALIGNMENT-1))
+
+/*
+ * Struct return by BIOCVERSION. This represents the version number of
+ * the filter language described by the instruction encodings below.
+ * bpf understands a program iff kernel_major == filter_major &&
+ * kernel_minor >= filter_minor, that is, if the value returned by the
+ * running kernel has the same major number and a minor number equal
+ * equal to or less than the filter being downloaded. Otherwise, the
+ * results are undefined, meaning an error may be returned or packets
+ * may be accepted haphazardly.
+ * It has nothing to do with the source code version.
+ */
+struct bpf_version {
+ unsigned short bv_major;
+ unsigned short bv_minor;
+};
+/* Current version number. */
+#define BPF_MAJOR_VERSION 1
+#define BPF_MINOR_VERSION 1
+
+/*
+ * Data-link level type codes.
+ * Currently, only DLT_EN10MB and DLT_SLIP are supported.
+ */
+#define DLT_NULL 0 /* no link-layer encapsulation */
+#define DLT_EN10MB 1 /* Ethernet (10Mb) */
+#define DLT_EN3MB 2 /* Experimental Ethernet (3Mb) */
+#define DLT_AX25 3 /* Amateur Radio AX.25 */
+#define DLT_PRONET 4 /* Proteon ProNET Token Ring */
+#define DLT_CHAOS 5 /* Chaos */
+#define DLT_IEEE802 6 /* IEEE 802 Networks */
+#define DLT_ARCNET 7 /* ARCNET */
+#define DLT_SLIP 8 /* Serial Line IP */
+#define DLT_PPP 9 /* Point-to-point Protocol */
+#define DLT_FDDI 10 /* FDDI */
+
+/*
+ * The instruction encondings.
+ */
+
+/* Magic number and flags for the first instruction */
+#define BPF_BEGIN NETF_BPF
+#define BPF_IN NETF_IN
+#define BPF_OUT NETF_OUT
+
+/* instruction classes */
+#define BPF_CLASS(code) ((code) & 0x07)
+#define BPF_LD 0x00
+#define BPF_LDX 0x01
+#define BPF_ST 0x02
+#define BPF_STX 0x03
+#define BPF_ALU 0x04
+#define BPF_JMP 0x05
+#define BPF_RET 0x06
+#define BPF_MISC 0x07
+
+/* ld/ldx fields */
+#define BPF_SIZE(code) ((code) & 0x18)
+#define BPF_W 0x00
+#define BPF_H 0x08
+#define BPF_B 0x10
+#define BPF_MODE(code) ((code) & 0xe0)
+#define BPF_IMM 0x00
+#define BPF_ABS 0x20
+#define BPF_IND 0x40
+#define BPF_MEM 0x60
+#define BPF_LEN 0x80
+#define BPF_MSH 0xa0
+
+/* alu/jmp fields */
+#define BPF_OP(code) ((code) & 0xf0)
+#define BPF_ADD 0x00
+#define BPF_SUB 0x10
+#define BPF_MUL 0x20
+#define BPF_DIV 0x30
+#define BPF_OR 0x40
+#define BPF_AND 0x50
+#define BPF_LSH 0x60
+#define BPF_RSH 0x70
+#define BPF_NEG 0x80
+#define BPF_JA 0x00
+#define BPF_JEQ 0x10
+#define BPF_JGT 0x20
+#define BPF_JGE 0x30
+#define BPF_JSET 0x40
+#define BPF_CKMATCH_IMM 0x50
+#define BPF_SRC(code) ((code) & 0x08)
+#define BPF_K 0x00
+#define BPF_X 0x08
+
+/* ret - BPF_K and BPF_X also apply */
+#define BPF_RVAL(code) ((code) & 0x38)
+#define BPF_A 0x10
+#define BPF_MATCH_IMM 0x18
+#define BPF_MATCH_DATA 0x20
+
+/* misc */
+#define BPF_MISCOP(code) ((code) & 0xf8)
+#define BPF_TAX 0x00
+#define BPF_TXA 0x80
+#define BPF_KEY 0x10
+#define BPF_REG_DATA 0x18
+#define BPF_POSTPONE 0x20
+
+/*
+ * The instruction data structure.
+ */
+struct bpf_insn {
+ unsigned short code;
+ unsigned char jt;
+ unsigned char jf;
+ int k;
+};
+typedef struct bpf_insn *bpf_insn_t;
+
+/*
+ * largest bpf program size
+ */
+#define NET_MAX_BPF ((NET_MAX_FILTER*sizeof(filter_t))/sizeof(struct bpf_insn))
+
+/*
+ * Macros for insn array initializers.
+ */
+#define BPF_STMT(code, k) { (unsigned short)(code), 0, 0, k }
+#define BPF_JUMP(code, k, jt, jf) { (unsigned short)(code), jt, jf, k }
+#define BPF_RETMATCH(code, k, nkey) { (unsigned short)(code), nkey, 0, k }
+
+#define BPF_INSN_STMT(pc, c, n) {\
+ (pc)->code = (c); \
+ (pc)->jt = (pc)->jf = 0; \
+ (pc)->k = (n); \
+ (pc)++; \
+}
+
+#define BPF_INSN_JUMP(pc, c, n, jtrue, jfalse) {\
+ (pc)->code = (c); \
+ (pc)->jt = (jtrue); \
+ (pc)->jf = (jfalse); \
+ (pc)->k = (n); \
+ (pc)++; \
+}
+
+#define BPF_INSN_RETMATCH(pc, c, n, nkey) {\
+ (pc)->code = (c); \
+ (pc)->jt = (nkey); \
+ (pc)->jf = 0; \
+ (pc)->k = (n); \
+ (pc)++; \
+}
+
+/*
+ * Number of scratch memory words (for BPF_LD|BPF_MEM and BPF_ST).
+ */
+#define BPF_MEMWORDS 16
+
+/*
+ * Link level header can be accessed by adding BPF_DLBASE to an offset.
+ */
+#define BPF_DLBASE (1<<30)
+
+#define BPF_BYTES(n) ((n) * sizeof (struct bpf_insn))
+#define BPF_BYTES2LEN(n) ((n) / sizeof (struct bpf_insn))
+#define BPF_INSN_EQ(p,q) ((p)->code == (q)->code && \
+ (p)->jt == (q)->jt && \
+ (p)->jf == (q)->jf && \
+ (p)->k == (q)->k)
+
+#endif /* _DEVICE_BPF_H_ */
diff --git a/include/device/device.defs b/include/device/device.defs
new file mode 100644
index 0000000..7f31612
--- /dev/null
+++ b/include/device/device.defs
@@ -0,0 +1,183 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: device/device.defs
+ * Author: Douglas Orr
+ * Feb 10, 1988
+ * Abstract:
+ * Mach device support. Mach devices are accessed through
+ * block and character device interfaces to the kernel.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif
+ device 2800;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <device/device_types.defs>
+
+serverprefix ds_;
+
+type reply_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE | polymorphic
+ ctype: mach_port_t
+#ifndef KERNEL_SERVER
+#ifdef MACH_PAYLOAD_TO_PORT
+ intranpayload: mach_port_t MACH_PAYLOAD_TO_PORT
+#endif /* MACH_PAYLOAD_TO_PORT */
+#endif /* KERNEL_SERVER */
+;
+
+/* Deprecated in favor of device_open_new. */
+routine device_open(
+ master_port : mach_port_t;
+ sreplyport reply_port : reply_port_t;
+ mode : dev_mode_t;
+ name : dev_name_t;
+ out device : device_t =
+ MACH_MSG_TYPE_PORT_SEND
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ outtran: mach_port_t convert_device_to_port(device_t)
+#else
+#ifdef DEVICE_OUTTRAN
+ outtran: DEVICE_OUTTRAN
+#endif
+#endif /* KERNEL_SERVER */
+ );
+
+routine device_close(
+ device : device_t
+ );
+
+routine device_write(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in data : io_buf_ptr_t;
+ out bytes_written : int
+ );
+
+routine device_write_inband(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in data : io_buf_ptr_inband_t;
+ out bytes_written : int
+ );
+
+routine device_read(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in bytes_wanted : int;
+ out data : io_buf_ptr_t, dealloc
+ );
+
+routine device_read_inband(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in bytes_wanted : int;
+ out data : io_buf_ptr_inband_t
+ );
+
+#if defined(KERNEL_SERVER) || defined(DEVICE_ENABLE_DEVICE_OPEN_NEW)
+routine device_open_new(
+ master_port : mach_port_t;
+ sreplyport reply_port : reply_port_t;
+ mode : dev_mode_t;
+ name : new_dev_name_t;
+ out device : device_t =
+ MACH_MSG_TYPE_PORT_SEND
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ outtran: mach_port_t convert_device_to_port(device_t)
+#else
+#ifdef DEVICE_OUTTRAN
+ outtran: DEVICE_OUTTRAN
+#endif
+#endif /* KERNEL_SERVER */
+ );
+#else
+skip; /* old xxx_device_set_status */
+#endif
+
+skip; /* old xxx_device_get_status */
+skip; /* old xxx_device_set_filter*/
+
+routine device_map(
+ device : device_t;
+ in prot : vm_prot_t;
+ in offset : vm_offset_t;
+ in size : vm_size_t;
+ out pager : memory_object_t;
+ in unmap : int
+ );
+
+routine device_set_status(
+ device : device_t;
+ in flavor : dev_flavor_t;
+ in status : dev_status_t
+ );
+
+routine device_get_status(
+ device : device_t;
+ in flavor : dev_flavor_t;
+ out status : dev_status_t, CountInOut
+ );
+
+routine device_set_filter(
+ device : device_t;
+ in receive_port : mach_port_send_t;
+ in priority : int;
+ in filter : filter_array_t
+ );
+
+routine device_intr_register(
+ device : device_t;
+ in id : int;
+ in flags : int;
+ in receive_port : mach_port_send_t
+ );
+
+/*
+ * Acknowledge the specified interrupt notification.
+ */
+/*
+ * When an IRQ happens and an intr notification is thus sent, the IRQ line
+ * is kept disabled until the notification is acknowledged with this RPC
+ */
+routine device_intr_ack(
+ device : device_t;
+ in receive_port : mach_port_send_t);
+
diff --git a/include/device/device_reply.defs b/include/device/device_reply.defs
new file mode 100644
index 0000000..5a32507
--- /dev/null
+++ b/include/device/device_reply.defs
@@ -0,0 +1,110 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ *
+ * Reply-only side of device interface.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif
+ device_reply 2900;
+ /* to match reply numbers for device.defs */
+
+/*
+ * Device_write_reply (only user of this data type) deallocates
+ * the data.
+ */
+
+
+#include <mach/std_types.defs>
+#include <device/device_types.defs>
+
+userprefix ds_;
+
+#if SEQNOS
+serverprefix seqnos_;
+serverdemux seqnos_device_reply_server;
+#endif /* SEQNOS */
+
+type reply_port_t = polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
+ ctype: mach_port_t
+#ifndef KERNEL_SERVER
+#ifdef MACH_PAYLOAD_TO_PORT
+ intranpayload: mach_port_t MACH_PAYLOAD_TO_PORT
+#endif /* MACH_PAYLOAD_TO_PORT */
+#endif /* KERNEL_SERVER */
+;
+
+simpleroutine device_open_reply(
+ reply_port : reply_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ in return_code : kern_return_t;
+ in device_port : mach_port_make_send_t
+ );
+
+skip; /* device_close */
+
+simpleroutine device_write_reply(
+ reply_port : reply_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ in return_code : kern_return_t;
+ in bytes_written : int
+ );
+
+simpleroutine device_write_reply_inband(
+ reply_port : reply_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ in return_code : kern_return_t;
+ in bytes_written : int
+ );
+
+simpleroutine device_read_reply(
+ reply_port : reply_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ in return_code : kern_return_t;
+ in data : io_buf_ptr_t, dealloc
+ );
+
+simpleroutine device_read_reply_inband(
+ reply_port : reply_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ in return_code : kern_return_t;
+ in data : io_buf_ptr_inband_t
+ );
diff --git a/include/device/device_request.defs b/include/device/device_request.defs
new file mode 100644
index 0000000..a8af3a8
--- /dev/null
+++ b/include/device/device_request.defs
@@ -0,0 +1,95 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ *
+ * Request-only side of device interface.
+ */
+
+subsystem device_request 2800; /* to match device.defs */
+
+#include <device/device_types.defs>
+
+serverprefix ds_;
+
+type reply_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE
+ ctype: mach_port_t
+#ifndef KERNEL_SERVER
+#ifdef MACH_PAYLOAD_TO_PORT
+ intranpayload: mach_port_t MACH_PAYLOAD_TO_PORT
+#endif /* MACH_PAYLOAD_TO_PORT */
+#endif /* KERNEL_SERVER */
+;
+
+/* Deprecated in favor of device_open_new_request. */
+simpleroutine device_open_request(
+ device_server_port : mach_port_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in name : dev_name_t
+ );
+
+skip; /* device_close */
+
+simpleroutine device_write_request(
+ device : device_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in data : io_buf_ptr_t
+ );
+
+simpleroutine device_write_request_inband(
+ device : device_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in data : io_buf_ptr_inband_t
+ );
+
+simpleroutine device_read_request(
+ device : device_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in bytes_wanted : int
+ );
+
+simpleroutine device_read_request_inband(
+ device : device_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in bytes_wanted : int
+ );
+
+simpleroutine device_open_new_request(
+ device_server_port : mach_port_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in name : new_dev_name_t
+ );
diff --git a/include/device/device_types.defs b/include/device/device_types.defs
new file mode 100644
index 0000000..c74bff5
--- /dev/null
+++ b/include/device/device_types.defs
@@ -0,0 +1,92 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ *
+ * Common definitions for device interface types.
+ */
+
+#ifndef _DEVICE_DEVICE_TYPES_DEFS_
+#define _DEVICE_DEVICE_TYPES_DEFS_
+
+/*
+ * Basic types
+ */
+
+#include <mach/std_types.defs>
+
+#ifdef DEVICE_IMPORTS
+DEVICE_IMPORTS
+#endif
+
+type rpc_recnum_t = rpc_long_natural_t;
+type recnum_t = rpc_recnum_t
+#if defined(KERNEL_SERVER)
+ intran: recnum_t convert_long_natural_from_user(rpc_recnum_t)
+ outtran: rpc_recnum_t convert_long_natural_to_user(recnum_t)
+#elif defined(KERNEL_USER)
+ ctype: rpc_recnum_t
+#endif
+ ;
+
+type dev_mode_t = uint32_t;
+type dev_flavor_t = uint32_t;
+type dev_name_t = (MACH_MSG_TYPE_STRING_C, 8*128);
+type new_dev_name_t = c_string[128]
+ ctype: dev_name_t;
+type dev_status_t = array[*:1024] of int;
+type io_buf_ptr_t = ^array[] of MACH_MSG_TYPE_INTEGER_8;
+type io_buf_ptr_inband_t= array[*:128] of char;
+type filter_t = short;
+type filter_array_t = array[*:128] of filter_t;
+
+type device_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: device_t dev_port_lookup(mach_port_t)
+ outtran: mach_port_t convert_device_to_port(device_t)
+ destructor: device_deallocate(device_t)
+#else /* KERNEL_SERVER */
+#ifdef DEVICE_INTRAN
+ intran: DEVICE_INTRAN
+#endif
+#ifdef DEVICE_INTRAN_PAYLOAD
+ intranpayload: DEVICE_INTRAN_PAYLOAD
+#endif
+#ifdef DEVICE_OUTTRAN
+ outtran: DEVICE_OUTTRAN
+#endif
+#ifdef DEVICE_DESTRUCTOR
+ destructor: DEVICE_DESTRUCTOR
+#endif
+#endif /* KERNEL_SERVER */
+ ;
+
+import <device/device_types.h>;
+import <device/net_status.h>;
+
+#endif /* _DEVICE_DEVICE_TYPES_DEFS_ */
diff --git a/include/device/device_types.h b/include/device/device_types.h
new file mode 100644
index 0000000..583d9e0
--- /dev/null
+++ b/include/device/device_types.h
@@ -0,0 +1,148 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ */
+
+#ifndef DEVICE_TYPES_H
+#define DEVICE_TYPES_H
+
+/*
+ * Types for device interface.
+ */
+#include <mach/std_types.h>
+
+#ifdef MACH_KERNEL
+/*
+ * Get kernel-only type definitions.
+ */
+#include <device/device_types_kernel.h>
+
+#else /* MACH_KERNEL */
+/*
+ * Device handle.
+ */
+typedef mach_port_t device_t;
+
+#endif /* MACH_KERNEL */
+
+/*
+ * Device name string
+ */
+typedef char dev_name_t[128]; /* must match device_types.defs */
+typedef const char *const_dev_name_t;
+
+/*
+ * Mode for open/read/write
+ */
+typedef unsigned int dev_mode_t;
+#define D_READ 0x1 /* read */
+#define D_WRITE 0x2 /* write */
+#define D_NODELAY 0x4 /* no delay on open */
+#define D_NOWAIT 0x8 /* do not wait if data not available */
+
+/*
+ * IO buffer - out-of-line array of characters.
+ */
+typedef char * io_buf_ptr_t;
+typedef const char * const_io_buf_ptr_t;
+
+/*
+ * IO buffer - in-line array of characters.
+ */
+#define IO_INBAND_MAX (128) /* must match device_types.defs */
+typedef char io_buf_ptr_inband_t[IO_INBAND_MAX];
+typedef const char *const_io_buf_ptr_inband_t;
+
+/*
+ * IO buffer vector - for scatter/gather IO.
+ */
+typedef struct {
+ vm_offset_t data;
+ vm_size_t count;
+} io_buf_vec_t;
+typedef struct {
+ rpc_vm_offset_t data;
+ rpc_vm_size_t count;
+} rpc_io_buf_vec_t;
+
+/*
+ * Record number for random-access devices
+ */
+typedef long_natural_t recnum_t;
+typedef rpc_long_natural_t rpc_recnum_t;
+
+/*
+ * Flavors of set/get statuses
+ */
+typedef unsigned int dev_flavor_t;
+
+/*
+ * Generic array for get/set status
+ */
+typedef int *dev_status_t; /* Variable-length array of integers */
+#define DEV_STATUS_MAX (1024) /* Maximum array size */
+
+typedef int dev_status_data_t[DEV_STATUS_MAX];
+
+/*
+ * Mandatory get/set status operations
+ */
+
+/* size a device: op code and indexes for returned values */
+#define DEV_GET_SIZE 0
+# define DEV_GET_SIZE_DEVICE_SIZE 0 /* 0 if unknown */
+# define DEV_GET_SIZE_RECORD_SIZE 1 /* 1 if sequential */
+#define DEV_GET_SIZE_COUNT 2
+/* size a device in record numbers, not bytes */
+#define DEV_GET_RECORDS 1
+# define DEV_GET_RECORDS_DEVICE_RECORDS 0 /* 0 if unknown */
+# define DEV_GET_RECORDS_RECORD_SIZE 1 /* 1 if sequential */
+#define DEV_GET_RECORDS_COUNT 2
+
+/*
+ * Device error codes
+ */
+typedef int io_return_t;
+
+#define D_IO_QUEUED (-1) /* IO queued - do not return result */
+#define D_SUCCESS 0
+
+#define D_IO_ERROR 2500 /* hardware IO error */
+#define D_WOULD_BLOCK 2501 /* would block, but D_NOWAIT set */
+#define D_NO_SUCH_DEVICE 2502 /* no such device */
+#define D_ALREADY_OPEN 2503 /* exclusive-use device already open */
+#define D_DEVICE_DOWN 2504 /* device has been shut down */
+#define D_INVALID_OPERATION 2505 /* bad operation for device */
+#define D_INVALID_RECNUM 2506 /* invalid record (block) number */
+#define D_INVALID_SIZE 2507 /* invalid IO size */
+#define D_NO_MEMORY 2508 /* memory allocation failure */
+#define D_READ_ONLY 2509 /* device cannot be written to */
+
+void device_deallocate(device_t);
+
+#endif /* DEVICE_TYPES_H */
diff --git a/include/device/disk_status.h b/include/device/disk_status.h
new file mode 100644
index 0000000..a6ed106
--- /dev/null
+++ b/include/device/disk_status.h
@@ -0,0 +1,318 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright (c) 1987, 1988 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. The name of the Laboratory may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)disklabel.h 7.10 (Berkeley) 6/27/88
+ */
+
+#ifndef _DISK_STATUS_H_
+#define _DISK_STATUS_H_
+
+/*
+ * Each disk has a label which includes information about the hardware
+ * disk geometry, filesystem partitions, and drive specific information.
+ * The label is in block 0 or 1, possibly offset from the beginning
+ * to leave room for a bootstrap, etc.
+ */
+
+#define LABELSECTOR 0 /* sector containing label */
+#define LABELOFFSET 64 /* offset of label in sector */
+#define DISKMAGIC ((unsigned int) 0x82564557U) /* The disk magic number */
+#ifndef MAXPARTITIONS
+#define MAXPARTITIONS 8
+#endif
+
+
+#ifndef LOCORE
+struct disklabel {
+ unsigned int d_magic; /* the magic number */
+ short d_type; /* drive type */
+ short d_subtype; /* controller/d_type specific */
+ char d_typename[16]; /* type name, e.g. "eagle" */
+ /*
+ * d_packname contains the pack identifier and is returned when
+ * the disklabel is read off the disk or in-core copy.
+ * d_boot0 and d_boot1 are the (optional) names of the
+ * primary (block 0) and secondary (block 1-15) bootstraps
+ * as found in /usr/mdec. These are returned when using
+ * getdiskbyname(3) to retrieve the values from /etc/disktab.
+ */
+#if defined(MACH_KERNEL) || defined(STANDALONE)
+ char d_packname[16]; /* pack identifier */
+#else
+ union {
+ char un_d_packname[16]; /* pack identifier */
+ struct {
+ char *un_d_boot0; /* primary bootstrap name */
+ char *un_d_boot1; /* secondary bootstrap name */
+ } un_b;
+ } d_un;
+#define d_packname d_un.un_d_packname
+#define d_boot0 d_un.un_b.un_d_boot0
+#define d_boot1 d_un.un_b.un_d_boot1
+#endif /* ! MACH_KERNEL or STANDALONE */
+ /* disk geometry: */
+ unsigned int d_secsize; /* # of bytes per sector */
+ unsigned int d_nsectors; /* # of data sectors per track */
+ unsigned int d_ntracks; /* # of tracks per cylinder */
+ unsigned int d_ncylinders; /* # of data cylinders per unit */
+ unsigned int d_secpercyl; /* # of data sectors per cylinder */
+ unsigned int d_secperunit; /* # of data sectors per unit */
+ /*
+ * Spares (bad sector replacements) below
+ * are not counted in d_nsectors or d_secpercyl.
+ * Spare sectors are assumed to be physical sectors
+ * which occupy space at the end of each track and/or cylinder.
+ */
+ unsigned short d_sparespertrack; /* # of spare sectors per track */
+ unsigned short d_sparespercyl; /* # of spare sectors per cylinder */
+ /*
+ * Alternate cylinders include maintenance, replacement,
+ * configuration description areas, etc.
+ */
+ unsigned int d_acylinders; /* # of alt. cylinders per unit */
+
+ /* hardware characteristics: */
+ /*
+ * d_interleave, d_trackskew and d_cylskew describe perturbations
+ * in the media format used to compensate for a slow controller.
+ * Interleave is physical sector interleave, set up by the formatter
+ * or controller when formatting. When interleaving is in use,
+ * logically adjacent sectors are not physically contiguous,
+ * but instead are separated by some number of sectors.
+ * It is specified as the ratio of physical sectors traversed
+ * per logical sector. Thus an interleave of 1:1 implies contiguous
+ * layout, while 2:1 implies that logical sector 0 is separated
+ * by one sector from logical sector 1.
+ * d_trackskew is the offset of sector 0 on track N
+ * relative to sector 0 on track N-1 on the same cylinder.
+ * Finally, d_cylskew is the offset of sector 0 on cylinder N
+ * relative to sector 0 on cylinder N-1.
+ */
+ unsigned short d_rpm; /* rotational speed */
+ unsigned short d_interleave; /* hardware sector interleave */
+ unsigned short d_trackskew; /* sector 0 skew, per track */
+ unsigned short d_cylskew; /* sector 0 skew, per cylinder */
+ unsigned int d_headswitch; /* head switch time, usec */
+ unsigned int d_trkseek; /* track-to-track seek, usec */
+ unsigned int d_flags; /* generic flags */
+#define NDDATA 5
+ unsigned int d_drivedata[NDDATA]; /* drive-type specific information */
+#define NSPARE 5
+ unsigned int d_spare[NSPARE]; /* reserved for future use */
+ unsigned int d_magic2; /* the magic number (again) */
+ unsigned short d_checksum; /* xor of data incl. partitions */
+
+ /* filesystem and partition information: */
+ unsigned short d_npartitions; /* number of partitions in following */
+ unsigned int d_bbsize; /* size of boot area at sn0, bytes */
+ unsigned int d_sbsize; /* max size of fs superblock, bytes */
+ struct partition { /* the partition table */
+ unsigned int p_size; /* number of sectors in partition */
+ unsigned int p_offset; /* starting sector */
+ unsigned int p_fsize; /* filesystem basic fragment size */
+ unsigned char p_fstype; /* filesystem type, see below */
+ unsigned char p_frag; /* filesystem fragments per block */
+ unsigned short p_cpg; /* filesystem cylinders per group */
+ } d_partitions[MAXPARTITIONS+1]; /* actually may be more */
+
+#if defined(alpha) && defined(MACH_KERNEL)
+ /*
+ * Disgusting hack. If this structure contains a pointer,
+ * as it does for non-kernel, then the compiler rounds
+ * the size to make it pointer-sized properly (arrays of..).
+ * But if I define the pointer for the kernel then instances
+ * of this structure better be aligned otherwise picking
+ * up a short might be done by too-smart compilers (GCC) with
+ * a load-long instruction expecting the short to be aligned.
+ * I bet the OSF folks stomped into this too, since they use
+ * the same disgusting hack below.. [whatelse can I do ??]
+ */
+ int bugfix;
+#endif
+};
+#else /* LOCORE */
+ /*
+ * offsets for asm boot files.
+ */
+ .set d_secsize,40
+ .set d_nsectors,44
+ .set d_ntracks,48
+ .set d_ncylinders,52
+ .set d_secpercyl,56
+ .set d_secperunit,60
+ .set d_end_,276 /* size of disk label */
+#endif /* LOCORE */
+
+/* d_type values: */
+#define DTYPE_SMD 1 /* SMD, XSMD; VAX hp/up */
+#define DTYPE_MSCP 2 /* MSCP */
+#define DTYPE_DEC 3 /* other DEC (rk, rl) */
+#define DTYPE_SCSI 4 /* SCSI */
+#define DTYPE_ESDI 5 /* ESDI interface */
+#define DTYPE_ST506 6 /* ST506 etc. */
+#define DTYPE_FLOPPY 10 /* floppy */
+
+#ifdef DKTYPENAMES
+static char *dktypenames[] = {
+ "unknown",
+ "SMD",
+ "MSCP",
+ "old DEC",
+ "SCSI",
+ "ESDI",
+ "type 6",
+ "type 7",
+ "type 8",
+ "type 9",
+ "floppy",
+ 0
+};
+#define DKMAXTYPES (sizeof(dktypenames) / sizeof(dktypenames[0]) - 1)
+#endif
+
+/*
+ * Filesystem type and version.
+ * Used to interpret other filesystem-specific
+ * per-partition information.
+ */
+#define FS_UNUSED 0 /* unused */
+#define FS_SWAP 1 /* swap */
+#define FS_V6 2 /* Sixth Edition */
+#define FS_V7 3 /* Seventh Edition */
+#define FS_SYSV 4 /* System V */
+#define FS_V71K 5 /* V7 with 1K blocks (4.1, 2.9) */
+#define FS_V8 6 /* Eighth Edition, 4K blocks */
+#define FS_BSDFFS 7 /* 4.2BSD fast file system */
+#define FS_LINUXFS 8 /* Linux file system */
+
+#ifdef DKTYPENAMES
+static char *fstypenames[] = {
+ "unused",
+ "swap",
+ "Version 6",
+ "Version 7",
+ "System V",
+ "4.1BSD",
+ "Eighth Edition",
+ "4.2BSD",
+ "Linux",
+ 0
+};
+#define FSMAXTYPES (sizeof(fstypenames) / sizeof(fstypenames[0]) - 1)
+#endif
+
+/*
+ * flags shared by various drives:
+ */
+#define D_REMOVABLE 0x01 /* removable media */
+#define D_ECC 0x02 /* supports ECC */
+#define D_BADSECT 0x04 /* supports bad sector forw. */
+#define D_RAMDISK 0x08 /* disk emulator */
+#define D_CHAIN 0x10 /* can do back-back transfers */
+
+/*
+ * Drive data for SMD.
+ */
+#define d_smdflags d_drivedata[0]
+#define D_SSE 0x1 /* supports skip sectoring */
+#define d_mindist d_drivedata[1]
+#define d_maxdist d_drivedata[2]
+#define d_sdist d_drivedata[3]
+
+/*
+ * Drive data for ST506.
+ */
+#define d_precompcyl d_drivedata[0]
+#define d_gap3 d_drivedata[1] /* used only when formatting */
+
+/*
+ * IBM controller info (d_precompcyl used, too)
+ */
+#define d_step d_drivedata[2]
+
+#ifndef LOCORE
+/*
+ * Structure used to perform a format
+ * or other raw operation, returning data
+ * and/or register values.
+ * Register identification and format
+ * are device- and driver-dependent.
+ */
+struct format_op {
+ char *df_buf;
+ int df_count; /* value-result */
+ recnum_t df_startblk;
+ int df_reg[8]; /* result */
+};
+
+/*
+ * Disk-specific ioctls.
+ */
+ /* get and set disklabel; DIOCGPART used internally */
+#define DIOCGDINFO _IOR('d', 101, struct disklabel)/* get */
+#define DIOCSDINFO _IOW('d', 102, struct disklabel)/* set */
+#define DIOCWDINFO _IOW('d', 103, struct disklabel)/* set, update disk */
+
+/* do format operation, read or write */
+#define DIOCRFORMAT _IOWR('d', 105, struct format_op)
+#define DIOCWFORMAT _IOWR('d', 106, struct format_op)
+
+#define DIOCSSTEP _IOW('d', 107, int) /* set step rate */
+#define DIOCSRETRIES _IOW('d', 108, int) /* set # of retries */
+#define DIOCWLABEL _IOW('d', 109, int) /* write en/disable label */
+
+#define DIOCSBAD _IOW('d', 110, struct dkbad) /* set kernel dkbad */
+
+#endif /* LOCORE */
+
+#endif /* _DISK_STATUS_H_ */
diff --git a/include/device/input.h b/include/device/input.h
new file mode 100644
index 0000000..9de73a3
--- /dev/null
+++ b/include/device/input.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2023 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Mach.
+ *
+ * GNU Mach is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _DEVICE_INPUT_H
+#define _DEVICE_INPUT_H
+
+#include <mach/boolean.h>
+#include <mach/time_value.h>
+
+/*
+ * Ioctl's have the command encoded in the lower word, and the size of
+ * any in or out parameters in the upper word. The high 3 bits of the
+ * upper word are used to encode the in/out status of the parameter.
+ */
+#define IOCPARM_MASK 0x1fff /* parameter length, at most 13 bits */
+#define IOC_VOID 0x20000000 /* no parameters */
+#define IOC_OUT 0x40000000 /* copy out parameters */
+#define IOC_IN 0x80000000U /* copy in parameters */
+#define IOC_INOUT (IOC_IN|IOC_OUT)
+
+#define _IOC(inout,group,num,len) \
+ (inout | ((len & IOCPARM_MASK) << 16) | ((group) << 8) | (num))
+#define _IO(g,n) _IOC(IOC_VOID, (g), (n), 0)
+#define _IOR(g,n,t) _IOC(IOC_OUT, (g), (n), sizeof(t))
+#define _IOW(g,n,t) _IOC(IOC_IN, (g), (n), sizeof(t))
+#define _IOWR(g,n,t) _IOC(IOC_INOUT, (g), (n), sizeof(t))
+
+typedef uint8_t Scancode;
+typedef uint16_t kev_type; /* kd event type */
+
+/* (used for event records) */
+struct mouse_motion {
+ short mm_deltaX; /* units? */
+ short mm_deltaY;
+};
+
+typedef struct {
+ kev_type type; /* see below */
+ /*
+ * This is not used anymore but is kept for backwards compatibility.
+ * Note the use of rpc_time_value to ensure compatibility for a 64 bit kernel and
+ * 32 bit user land.
+ */
+ struct rpc_time_value unused_time; /* timestamp*/
+ union { /* value associated with event */
+ boolean_t up; /* MOUSE_LEFT .. MOUSE_RIGHT */
+ Scancode sc; /* KEYBD_EVENT */
+ struct mouse_motion mmotion; /* MOUSE_MOTION */
+ } value;
+} kd_event;
+#define m_deltaX mmotion.mm_deltaX
+#define m_deltaY mmotion.mm_deltaY
+
+/*
+ * kd_event ID's.
+ */
+#define MOUSE_LEFT 1 /* mouse left button up/down */
+#define MOUSE_MIDDLE 2
+#define MOUSE_RIGHT 3
+#define MOUSE_MOTION 4 /* mouse motion */
+#define KEYBD_EVENT 5 /* key up/down */
+
+/* Keyboard ioctls */
+
+/*
+ * KDSKBDMODE - When the console is in "ascii" mode, keyboard events are
+ * converted to Ascii characters that are readable from /dev/console.
+ * When the console is in "event" mode, keyboard events are
+ * timestamped and queued up on /dev/kbd as kd_events. When the last
+ * close is done on /dev/kbd, the console automatically reverts to ascii
+ * mode.
+ * When /dev/mouse is opened, mouse events are timestamped and queued
+ * on /dev/mouse, again as kd_events.
+ *
+ * KDGKBDTYPE - Returns the type of keyboard installed. Currently
+ * there is only one type, KB_VANILLAKB, which is your standard PC-AT
+ * keyboard.
+ */
+
+#define KDSKBDMODE _IOW('K', 1, int) /* set keyboard mode */
+#define KB_EVENT 1
+#define KB_ASCII 2
+
+#define KDGKBDTYPE _IOR('K', 2, int) /* get keyboard type */
+#define KB_VANILLAKB 0
+
+#define KDSETLEDS _IOW('K', 5, int) /* set the keyboard ledstate */
+
+#endif /* _DEVICE_INPUT_H */
diff --git a/include/device/net_status.h b/include/device/net_status.h
new file mode 100644
index 0000000..9ab95b9
--- /dev/null
+++ b/include/device/net_status.h
@@ -0,0 +1,201 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ *
+ * Status information for network interfaces.
+ */
+
+#ifndef _DEVICE_NET_STATUS_H_
+#define _DEVICE_NET_STATUS_H_
+
+#include <device/device_types.h>
+#include <mach/message.h>
+
+/*
+ * General interface status
+ */
+struct net_status {
+ int min_packet_size; /* minimum size, including header */
+ int max_packet_size; /* maximum size, including header */
+ int header_format; /* format of network header */
+ int header_size; /* size of network header */
+ int address_size; /* size of network address */
+ int flags; /* interface status */
+ int mapped_size; /* if mappable, virtual mem needed */
+};
+#define NET_STATUS_COUNT (sizeof(struct net_status)/sizeof(int))
+#define NET_STATUS (('n'<<16) + 1)
+
+/*
+ * Header formats, as given by RFC 826/1010 for ARP:
+ */
+#define HDR_ETHERNET 1 /* Ethernet hardware address */
+#define HDR_EXP_ETHERNET 2 /* 3Mhz experimental Ethernet
+ hardware address */
+#define HDR_PRO_NET 4 /* Proteon ProNET Token Ring */
+#define HDR_CHAOS 5 /* Chaosnet */
+#define HDR_802 6 /* IEEE 802 networks */
+
+
+/*
+ * A network address is an array of bytes. In order to return
+ * this in an array of (long) integers, it is returned in net order.
+ * Use 'ntohl' on each element of the array to retrieve the original
+ * ordering.
+ */
+#define NET_ADDRESS (('n'<<16) + 2)
+
+#define NET_DSTADDR (('n'<<16) + 3)
+
+#define NET_FLAGS (('n'<<16) + 4)
+
+/*
+ * Input packet filter definition
+ */
+#define NET_MAX_FILTER 128 /* was 64, bpf programs are big */
+#define NET_FILTER_STACK_DEPTH 32
+
+/*
+ * We allow specification of up to NET_MAX_FILTER (short) words of a filter
+ * command list to be applied to incoming packets to determine if
+ * those packets should be given to a particular network input filter.
+ *
+ * Each network filter specifies the filter command list via net_add_filter.
+ * Each filter command list specifies a sequences of actions which leave a
+ * boolean value on the top of an internal stack. Each word of the
+ * command list specifies an action from the set {PUSHLIT, PUSHZERO,
+ * PUSHWORD+N} which respectively push the next word of the filter, zero,
+ * or word N of the incoming packet on the stack, and a binary operator
+ * from the set {EQ, LT, LE, GT, GE, AND, OR, XOR} which operates on the
+ * top two elements of the stack and replaces them with its result. The
+ * special action NOPUSH and the special operator NOP can be used to only
+ * perform the binary operation or to only push a value on the stack.
+ *
+ * If the final value of the filter operation is true, then the packet is
+ * accepted for the filter.
+ *
+ * The first filter_t object is a header which allows to set flags for the
+ * filter code. Main flags concern the direction of packets. This header is
+ * split in the same way NETF words are : the 6 MSB bits indicate the type
+ * of filter while the 10 LSB bits are the flags. For native NETF filters,
+ * clear the 6 MSB bits (which is why there is no dedicated macro).
+ */
+
+typedef unsigned short filter_t;
+typedef filter_t *filter_array_t;
+
+#define CSPF_BYTES(n) ((n) * sizeof (filter_t))
+
+/* these must sum to 16! */
+#define NETF_NBPA 10 /* # bits / argument */
+#define NETF_NBPO 6 /* # bits / operator */
+
+#define NETF_ARG(word) ((word) & 0x3ff)
+#define NETF_OP(word) (((word)>>NETF_NBPA)&0x3f)
+
+/* filter types */
+#define NETF_TYPE_MASK (((1 << NETF_NBPO) - 1) << NETF_NBPA)
+#define NETF_BPF (1 << NETF_NBPA)
+
+/* flags */
+#define NETF_IN 0x1
+#define NETF_OUT 0x2
+
+/* binary operators */
+#define NETF_NOP (0<<NETF_NBPA)
+#define NETF_EQ (1<<NETF_NBPA)
+#define NETF_LT (2<<NETF_NBPA)
+#define NETF_LE (3<<NETF_NBPA)
+#define NETF_GT (4<<NETF_NBPA)
+#define NETF_GE (5<<NETF_NBPA)
+#define NETF_AND (6<<NETF_NBPA)
+#define NETF_OR (7<<NETF_NBPA)
+#define NETF_XOR (8<<NETF_NBPA)
+#define NETF_COR (9<<NETF_NBPA)
+#define NETF_CAND (10<<NETF_NBPA)
+#define NETF_CNOR (11<<NETF_NBPA)
+#define NETF_CNAND (12<<NETF_NBPA)
+#define NETF_NEQ (13<<NETF_NBPA)
+#define NETF_LSH (14<<NETF_NBPA)
+#define NETF_RSH (15<<NETF_NBPA)
+#define NETF_ADD (16<<NETF_NBPA)
+#define NETF_SUB (17<<NETF_NBPA)
+
+
+/* stack arguments */
+#define NETF_NOPUSH 0 /* don`t push */
+#define NETF_PUSHLIT 1 /* next word in filter */
+#define NETF_PUSHZERO 2 /* 0 */
+#define NETF_PUSHIND 14 /* word indexed by stack top */
+#define NETF_PUSHHDRIND 15 /* header word indexed by stack top */
+#define NETF_PUSHWORD 16 /* word 0 .. 944 in packet */
+#define NETF_PUSHHDR 960 /* word 0 .. 31 in header */
+#define NETF_PUSHSTK 992 /* word 0 .. 31 in stack */
+
+/* priorities */
+#define NET_HI_PRI 100
+#define NET_PRI_MAX 255
+
+/*
+ * BPF support.
+ */
+#include <device/bpf.h>
+
+/*
+ * Net receive message format.
+ *
+ * The header and data are packaged separately, since some hardware
+ * supports variable-length headers. We prefix the packet with
+ * a packet_hdr structure so that the real data portion begins
+ * on a long-word boundary, and so that packet filters can address
+ * the type field and packet size uniformly.
+ */
+#define NET_RCV_MAX 4095
+#define NET_HDW_HDR_MAX 64
+
+#define NET_RCV_MSG_ID 2999 /* in device.defs reply range */
+
+struct packet_header {
+ unsigned short length;
+ unsigned short type; /* network order */
+};
+
+struct net_rcv_msg {
+ mach_msg_header_t msg_hdr;
+ mach_msg_type_t header_type;
+ char header[NET_HDW_HDR_MAX];
+ mach_msg_type_t packet_type;
+ char packet[NET_RCV_MAX];
+ boolean_t sent;
+};
+typedef struct net_rcv_msg *net_rcv_msg_t;
+#define net_rcv_msg_packet_count packet_type.msgt_number
+
+
+
+#endif /* _DEVICE_NET_STATUS_H_ */
diff --git a/include/device/notify.defs b/include/device/notify.defs
new file mode 100644
index 0000000..7919b33
--- /dev/null
+++ b/include/device/notify.defs
@@ -0,0 +1,36 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+subsystem notify 100;
+
+#include <mach/std_types.defs>
+
+serverprefix do_;
+serverdemux device_intr_notify_server;
+
+simpleroutine device_intr_notify(
+ notify : notify_port_t;
+ id : int);
diff --git a/include/device/notify.h b/include/device/notify.h
new file mode 100644
index 0000000..addf911
--- /dev/null
+++ b/include/device/notify.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2010 Free Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE FREE SOFTWARE FOUNDATIONALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * Device notification definitions.
+ */
+
+#ifndef _MACH_DEVICE_NOTIFY_H_
+#define _MACH_DEVICE_NOTIFY_H_
+
+#include <mach/port.h>
+#include <mach/message.h>
+
+typedef struct
+{
+ mach_msg_header_t intr_header;
+ mach_msg_type_t intr_type;
+ int id;
+} device_intr_notification_t;
+
+#define DEVICE_INTR_NOTIFY 100
+
+#endif /* _MACH_DEVICE_NOTIFY_H_ */
diff --git a/include/device/tape_status.h b/include/device/tape_status.h
new file mode 100644
index 0000000..603d76c
--- /dev/null
+++ b/include/device/tape_status.h
@@ -0,0 +1,140 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. The name of the Laboratory may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)mtio.h 7.4 (Berkeley) 8/31/88
+ */
+
+#ifndef _TAPE_STATUS_H_
+#define _TAPE_STATUS_H_
+
+/*
+ * Tape status
+ */
+
+struct tape_status {
+ unsigned int mt_type;
+ unsigned int speed;
+ unsigned int density;
+ unsigned int flags;
+# define TAPE_FLG_REWIND 0x1
+# define TAPE_FLG_WP 0x2
+};
+#define TAPE_STATUS_COUNT (sizeof(struct tape_status)/sizeof(int))
+#define TAPE_STATUS (('m'<<16) + 1)
+
+/*
+ * Constants for mt_type. These are the same
+ * for controllers compatible with the types listed.
+ */
+#define MT_ISTS 0x01 /* TS-11 */
+#define MT_ISHT 0x02 /* TM03 Massbus: TE16, TU45, TU77 */
+#define MT_ISTM 0x03 /* TM11/TE10 Unibus */
+#define MT_ISMT 0x04 /* TM78/TU78 Massbus */
+#define MT_ISUT 0x05 /* SI TU-45 emulation on Unibus */
+#define MT_ISCPC 0x06 /* SUN */
+#define MT_ISAR 0x07 /* SUN */
+#define MT_ISTMSCP 0x08 /* DEC TMSCP protocol (TU81, TK50) */
+#define MT_ISCY 0x09 /* CCI Cipher */
+#define MT_ISSCSI 0x0a /* SCSI tape (all brands) */
+
+
+/*
+ * Set status parameters
+ */
+
+struct tape_params {
+ unsigned int mt_operation;
+ unsigned int mt_repeat_count;
+};
+
+/* operations */
+#define MTWEOF 0 /* write an end-of-file record */
+#define MTFSF 1 /* forward space file */
+#define MTBSF 2 /* backward space file */
+#define MTFSR 3 /* forward space record */
+#define MTBSR 4 /* backward space record */
+#define MTREW 5 /* rewind */
+#define MTOFFL 6 /* rewind and put the drive offline */
+#define MTNOP 7 /* no operation, sets status only */
+#define MTCACHE 8 /* enable controller cache */
+#define MTNOCACHE 9 /* disable controller cache */
+
+
+/*
+ * U*x compatibility
+ */
+
+/* structure for MTIOCGET - mag tape get status command */
+
+struct mtget {
+ short mt_type; /* type of magtape device */
+/* the following two registers are grossly device dependent */
+ short mt_dsreg; /* ``drive status'' register */
+ short mt_erreg; /* ``error'' register */
+/* end device-dependent registers */
+ short mt_resid; /* residual count */
+/* the following two are not yet implemented */
+ unsigned long mt_fileno; /* file number of current position */
+ unsigned long mt_blkno; /* block number of current position */
+/* end not yet implemented */
+};
+
+
+/* mag tape io control commands */
+#define MTIOCTOP _IOW('m', 1, struct tape_params)/* do a mag tape op */
+#define MTIOCGET _IOR('m', 2, struct mtget) /* get tape status */
+#define MTIOCIEOT _IO('m', 3) /* ignore EOT error */
+#define MTIOCEEOT _IO('m', 4) /* enable EOT error */
+
+
+#endif /* _TAPE_STATUS_H_ */
diff --git a/include/device/tty_status.h b/include/device/tty_status.h
new file mode 100644
index 0000000..2eed5d0
--- /dev/null
+++ b/include/device/tty_status.h
@@ -0,0 +1,134 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: ll/90
+ *
+ * Status information for tty.
+ */
+
+#ifndef _DEVICE_TTY_STATUS_H_
+#define _DEVICE_TTY_STATUS_H_
+
+struct tty_status {
+ int tt_ispeed; /* input speed */
+ int tt_ospeed; /* output speed */
+ int tt_breakc; /* character to deliver when break
+ detected on line */
+ int tt_flags; /* mode flags */
+};
+#define TTY_STATUS_COUNT (sizeof(struct tty_status)/sizeof(int))
+#define TTY_STATUS (dev_flavor_t)(('t'<<16) + 1)
+
+/*
+ * Speeds
+ */
+#define B0 0
+#define B50 1
+#define B75 2
+#define B110 3
+#define B134 4
+#define B150 5
+#define B200 6
+#define B300 7
+#define B600 8
+#define B1200 9
+#define B1800 10
+#define B2400 11
+#define B4800 12
+#define B9600 13
+#define EXTA 14 /* XX can we just get rid of EXTA and EXTB? */
+#define EXTB 15
+#define B19200 EXTA
+#define B38400 EXTB
+#define B57600 16
+#define B115200 17
+
+#define NSPEEDS 18
+
+/*
+ * Flags
+ */
+#define TF_TANDEM 0x00000001 /* send stop character when input
+ queue full */
+#define TF_ODDP 0x00000002 /* get/send odd parity */
+#define TF_EVENP 0x00000004 /* get/send even parity */
+#define TF_ANYP (TF_ODDP|TF_EVENP)
+ /* get any parity/send none */
+#define TF_LITOUT 0x00000008 /* output all 8 bits
+ otherwise, characters >= 0x80
+ are time delays XXX */
+#define TF_MDMBUF 0x00000010 /* start/stop output on carrier
+ interrupt
+ otherwise, dropping carrier
+ hangs up line */
+#define TF_NOHANG 0x00000020 /* no hangup signal on carrier drop */
+#define TF_HUPCLS 0x00000040 /* hang up (outgoing) on last close */
+
+/*
+ * Read-only flags - information about device
+ */
+#define TF_ECHO 0x00000080 /* device wants user to echo input */
+#define TF_CRMOD 0x00000100 /* device wants \r\n, not \n */
+#define TF_XTABS 0x00000200 /* device does not understand tabs */
+
+/*
+ * Modem control
+ */
+#define TTY_MODEM_COUNT (1) /* one integer */
+#define TTY_MODEM (dev_flavor_t)(('t'<<16) + 2)
+
+#define TM_LE 0x0001 /* line enable */
+#define TM_DTR 0x0002 /* data terminal ready */
+#define TM_RTS 0x0004 /* request to send */
+#define TM_ST 0x0008 /* secondary transmit */
+#define TM_SR 0x0010 /* secondary receive */
+#define TM_CTS 0x0020 /* clear to send */
+#define TM_CAR 0x0040 /* carrier detect */
+#define TM_RNG 0x0080 /* ring */
+#define TM_DSR 0x0100 /* data set ready */
+
+#define TM_BRK 0x0200 /* set line break (internal) */
+#define TM_HUP 0x0000 /* close line (internal) */
+
+/*
+ * Other controls
+ */
+#define TTY_FLUSH_COUNT (1) /* one integer - D_READ|D_WRITE */
+#define TTY_FLUSH (dev_flavor_t)(('t'<<16) + 3)
+ /* flush input or output */
+#define TTY_STOP (dev_flavor_t)(('t'<<16) + 4)
+ /* stop output */
+#define TTY_START (dev_flavor_t)(('t'<<16) + 5)
+ /* start output */
+#define TTY_SET_BREAK (dev_flavor_t)(('t'<<16) + 6)
+ /* set break condition */
+#define TTY_CLEAR_BREAK (dev_flavor_t)(('t'<<16) + 7)
+ /* clear break condition */
+#define TTY_SET_TRANSLATION (dev_flavor_t)(('t'<<16) + 8)
+ /* set translation table */
+
+#endif /* _DEVICE_TTY_STATUS_H_ */
diff --git a/include/inttypes.h b/include/inttypes.h
new file mode 100644
index 0000000..353984a
--- /dev/null
+++ b/include/inttypes.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2020 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Mach.
+ *
+ * GNU Mach is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _INTTYPES_H_
+#define _INTTYPES_H_
+
+#include <stdint.h>
+
+#ifdef __x86_64__
+#define __64PREFIX "l"
+#define __PTRPREFIX "l"
+#else
+#define __64PREFIX "ll"
+#define __PTRPREFIX
+#endif
+
+#define PRId8 "d"
+#define PRId16 "d"
+#define PRId32 "d"
+#define PRId64 __64PREFIX"d"
+#define PRIdPTR __PTRPREFIX"d"
+
+#define PRIi8 "i"
+#define PRIi16 "i"
+#define PRIi32 "i"
+#define PRIi64 __64PREFIX"i"
+#define PRIiPTR __PTRPREFIX"i"
+
+#define PRIu8 "u"
+#define PRIu16 "u"
+#define PRIu32 "u"
+#define PRIu64 __64PREFIX"u"
+#define PRIuPTR __PTRPREFIX"u"
+
+#define PRIx8 "x"
+#define PRIx16 "x"
+#define PRIx32 "x"
+#define PRIx64 __64PREFIX"x"
+#define PRIxPTR __PTRPREFIX"x"
+
+#define PRIo8 "o"
+#define PRIo16 "o"
+#define PRIo32 "o"
+#define PRIo64 __64PREFIX"o"
+#define PRIoPTR __PTRPREFIX"o"
+
+#endif /* _INTTYPES_H_ */
diff --git a/include/mach/alert.h b/include/mach/alert.h
new file mode 100644
index 0000000..e8eb371
--- /dev/null
+++ b/include/mach/alert.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: mach/alert.h
+ *
+ * Standard alert definitions
+ *
+ */
+
+#ifndef _MACH_ALERT_H_
+#define _MACH_ALERT_H_
+
+#define ALERT_BITS 32 /* Minimum; more may actually be available */
+
+#define ALERT_ABORT_STRONG 0x00000001 /* Request to abort _all_ operations */
+#define ALERT_ABORT_SAFE 0x00000002 /* Request to abort restartable operations */
+
+#define ALERT_USER 0xffff0000 /* User-defined alert bits */
+
+#endif /* _MACH_ALERT_H_ */
diff --git a/include/mach/boolean.h b/include/mach/boolean.h
new file mode 100644
index 0000000..f0f36a2
--- /dev/null
+++ b/include/mach/boolean.h
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/boolean.h
+ *
+ * Boolean data type.
+ *
+ */
+
+#ifndef _MACH_BOOLEAN_H_
+#define _MACH_BOOLEAN_H_
+
+/*
+ * Pick up "boolean_t" type definition
+ */
+
+#ifndef __ASSEMBLER__
+#include <mach/machine/boolean.h>
+#endif /* __ASSEMBLER__ */
+
+#endif /* _MACH_BOOLEAN_H_ */
+
+/*
+ * Define TRUE and FALSE, only if they haven't been before,
+ * and not if they're explicitly refused. Note that we're
+ * outside the BOOLEAN_H_ conditional, to avoid ordering
+ * problems.
+ */
+
+#if !defined(NOBOOL)
+
+#ifndef TRUE
+#define TRUE ((boolean_t) 1)
+#endif /* TRUE */
+
+#ifndef FALSE
+#define FALSE ((boolean_t) 0)
+#endif /* FALSE */
+
+#endif /* !defined(NOBOOL) */
diff --git a/include/mach/boot.h b/include/mach/boot.h
new file mode 100644
index 0000000..7f14cc4
--- /dev/null
+++ b/include/mach/boot.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_BOOT_
+#define _MACH_BOOT_
+
+#include <mach/machine/boot.h>
+
+#ifndef __ASSEMBLER__
+
+#include <mach/machine/vm_types.h>
+
+struct boot_image_info
+{
+ /* First of the chain of boot modules in the boot image. */
+ struct boot_module *first_bmod;
+
+ /* List of rendezvous points:
+ starts out 0; and bmods can add nodes as needed. */
+ struct boot_rendezvous *first_rzv;
+
+ /* These register the total virtual address extent of the boot image. */
+ vm_offset_t start, end;
+
+ /* Machine-dependent boot information. */
+ struct machine_boot_image_info mboot;
+};
+
+struct boot_module
+{
+ int magic;
+ int (*init)(struct boot_image_info *bii);
+ vm_offset_t text;
+ vm_offset_t etext;
+ vm_offset_t data;
+ vm_offset_t edata;
+ vm_offset_t bss;
+ vm_offset_t ebss;
+};
+#define BMOD_VALID(bmod) ((bmod)->magic == BMOD_MAGIC)
+#define BMOD_NEXT(bmod) ((struct boot_module*)((bmod)->edata))
+
+struct boot_rendezvous
+{
+ struct boot_rendezvous *next;
+ int code;
+};
+
+#endif /* !__ASSEMBLER__ */
+
+
+/* This is the magic value that must appear in boot_module.magic. */
+#define BMOD_MAGIC 0x424d4f44 /* 'BMOD' */
+
+
+/* Following are the codes for boot_rendezvous.code. */
+
+/* This rendezvous is used for choosing a microkernel to start.
+ XX not used yet */
+#define BRZV_KERNEL 'K'
+
+/* Once the microkernel is fully initialized,
+ it starts one or more bootstrap services... */
+#define BRZV_BOOTSTRAP 'B'
+
+/* The bootstrap services might need other OS-dependent data,
+ such as initial programs to run, filesystem snapshots, etc.
+ These generic chunks of data are packaged up by the microkernel
+ and provided to the bootstrap services upon request.
+ XX When can they be deallocated? */
+#define BRZV_DATA 'D'
+
+
+#endif /* _MACH_BOOT_ */
diff --git a/include/mach/default_pager.defs b/include/mach/default_pager.defs
new file mode 100644
index 0000000..e2154e2
--- /dev/null
+++ b/include/mach/default_pager.defs
@@ -0,0 +1,65 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+subsystem default_pager 2275;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <mach/default_pager_types.defs>
+
+routine default_pager_object_create(
+ default_pager : mach_port_t;
+ out memory_object : memory_object_t =
+ MACH_MSG_TYPE_MAKE_SEND;
+ object_size : vm_size_t);
+
+routine default_pager_info(
+ default_pager : mach_port_t;
+ out info : default_pager_info_t);
+
+routine default_pager_objects(
+ default_pager : mach_port_t;
+ out objects : default_pager_object_array_t,
+ CountInOut, Dealloc;
+ out ports : mach_port_array_t =
+ array[] of mach_port_move_send_t,
+ CountInOut, Dealloc);
+
+routine default_pager_object_pages(
+ default_pager : mach_port_t;
+ memory_object : memory_object_name_t;
+ out pages : default_pager_page_array_t,
+ CountInOut, Dealloc);
+
+routine default_pager_paging_file(
+ default_pager : mach_port_t;
+ master_device_port : mach_port_t;
+ filename : default_pager_filename_t;
+ add : boolean_t);
+
+routine default_pager_register_fileserver(
+ default_pager : mach_port_t;
+ fileserver_port : mach_port_t);
diff --git a/include/mach/default_pager_types.defs b/include/mach/default_pager_types.defs
new file mode 100644
index 0000000..398c62c
--- /dev/null
+++ b/include/mach/default_pager_types.defs
@@ -0,0 +1,53 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_DEFAULT_PAGER_TYPES_DEFS_
+#define _MACH_DEFAULT_PAGER_TYPES_DEFS_
+
+#include <mach/std_types.defs>
+
+type default_pager_info_t = struct {
+ vm_size_t dpi_total_space;
+ vm_size_t dpi_free_space;
+ vm_size_t dpi_page_size;
+};
+
+type default_pager_object_t = struct {
+ vm_offset_t dpo_object;
+ vm_size_t dpo_size;
+};
+type default_pager_object_array_t = array[] of default_pager_object_t;
+
+type default_pager_page_t = struct {
+ vm_offset_t dpp_offset;
+};
+type default_pager_page_array_t = array[] of default_pager_page_t;
+
+type default_pager_filename_t = (MACH_MSG_TYPE_STRING_C, 8*256);
+
+import <mach/default_pager_types.h>;
+
+#endif /* _MACH_DEFAULT_PAGER_TYPES_DEFS_ */
diff --git a/include/mach/default_pager_types.h b/include/mach/default_pager_types.h
new file mode 100644
index 0000000..2cf7da2
--- /dev/null
+++ b/include/mach/default_pager_types.h
@@ -0,0 +1,59 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_DEFAULT_PAGER_TYPES_H_
+#define _MACH_DEFAULT_PAGER_TYPES_H_
+
+/*
+ * Remember to update the mig type definitions
+ * in default_pager_types.defs when adding/removing fields.
+ */
+
+typedef struct default_pager_info {
+ vm_size_t dpi_total_space; /* size of backing store */
+ vm_size_t dpi_free_space; /* how much of it is unused */
+ vm_size_t dpi_page_size; /* the pager's vm page size */
+} default_pager_info_t;
+
+
+typedef struct default_pager_object {
+ vm_offset_t dpo_object; /* object managed by the pager */
+ vm_size_t dpo_size; /* backing store used for the object */
+} default_pager_object_t;
+
+typedef default_pager_object_t *default_pager_object_array_t;
+
+
+typedef struct default_pager_page {
+ vm_offset_t dpp_offset; /* offset of the page in its object */
+} default_pager_page_t;
+
+typedef default_pager_page_t *default_pager_page_array_t;
+
+typedef char default_pager_filename_t[256];
+typedef const char *const_default_pager_filename_t;
+
+#endif /* _MACH_DEFAULT_PAGER_TYPES_H_ */
diff --git a/include/mach/error.h b/include/mach/error.h
new file mode 100644
index 0000000..035dcf8
--- /dev/null
+++ b/include/mach/error.h
@@ -0,0 +1,93 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/error.h
+ * Purpose:
+ * error module definitions
+ *
+ */
+
+#ifndef _MACH_ERROR_H_
+#define _MACH_ERROR_H_
+#include <mach/kern_return.h>
+
+/*
+ * error number layout as follows:
+ *
+ * hi lo
+ * | system(6) | subsystem(12) | code(14) |
+ */
+
+
+#define err_none (mach_error_t)0
+#define ERR_SUCCESS (mach_error_t)0
+
+
+#define err_system(x) (((x)&0x3f)<<26)
+#define err_sub(x) (((x)&0xfff)<<14)
+
+#define err_get_system(err) (((err)>>26)&0x3f)
+#define err_get_sub(err) (((err)>>14)&0xfff)
+#define err_get_code(err) ((err)&0x3fff)
+
+#define system_emask (err_system(0x3f))
+#define sub_emask (err_sub(0xfff))
+#define code_emask (0x3fff)
+
+
+/* Mach error systems */
+#define err_kern err_system(0x0) /* kernel */
+#define err_us err_system(0x1) /* user space library */
+#define err_server err_system(0x2) /* user space servers */
+#define err_ipc err_system(0x3) /* old ipc errors */
+#define err_mach_ipc err_system(0x4) /* mach-ipc errors */
+#define err_bootstrap err_system(0x5) /* bootstrap errors */
+#define err_hurd err_system(0x10) /* GNU Hurd server errors */
+#define err_local err_system(0x3e) /* user defined errors */
+#define err_ipc_compat err_system(0x3f) /* (compatibility) mach-ipc errors */
+
+#define err_max_system 0x3f
+
+
+/* special old "subsystems" that don't really follow the above rules */
+#define err_mig -300
+#define err_exec 6000
+
+/* unix errors get lumped into one subsystem */
+#define err_unix (err_kern|err_sub(3))
+#define unix_err(errno) (err_kern|err_sub(3)|errno)
+
+/* MS-DOS extended error codes */
+#define err_dos (err_kern|err_sub(0xd05))
+
+/* Flux OS error systems */
+#define err_fluke err_system(0x20) /* Fluke API */
+
+#ifndef __ASSEMBLER__
+typedef kern_return_t mach_error_t;
+#endif /* __ASSEMBLER__ */
+
+#endif /* _MACH_ERROR_H_ */
diff --git a/include/mach/exc.defs b/include/mach/exc.defs
new file mode 100644
index 0000000..28638e2
--- /dev/null
+++ b/include/mach/exc.defs
@@ -0,0 +1,47 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Abstract:
+ * MiG definitions file for Mach exception interface.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+ exc 2400;
+
+#include <mach/std_types.defs>
+
+ServerPrefix catch_;
+
+routine exception_raise(
+ exception_port : mach_port_t;
+ thread : mach_port_t;
+ task : mach_port_t;
+ exception : integer_t;
+ code : integer_t;
+ subcode : rpc_long_integer_t);
diff --git a/include/mach/exception.h b/include/mach/exception.h
new file mode 100644
index 0000000..c44fd53
--- /dev/null
+++ b/include/mach/exception.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_EXCEPTION_H_
+#define _MACH_EXCEPTION_H_
+
+#include <mach/machine/exception.h>
+
+/*
+ * Machine-independent exception definitions.
+ */
+
+#define EXC_BAD_ACCESS 1 /* Could not access memory */
+ /* Code contains kern_return_t describing error. */
+ /* Subcode contains bad memory address. */
+
+#define EXC_BAD_INSTRUCTION 2 /* Instruction failed */
+ /* Illegal or undefined instruction or operand */
+
+#define EXC_ARITHMETIC 3 /* Arithmetic exception */
+ /* Exact nature of exception is in code field */
+
+#define EXC_EMULATION 4 /* Emulation instruction */
+ /* Emulation support instruction encountered */
+ /* Details in code and subcode fields */
+
+#define EXC_SOFTWARE 5 /* Software generated exception */
+ /* Exact exception is in code field. */
+ /* Codes 0 - 0xFFFF reserved to hardware */
+ /* Codes 0x10000 - 0x1FFFF reserved for OS emulation (Unix) */
+
+#define EXC_BREAKPOINT 6 /* Trace, breakpoint, etc. */
+ /* Details in code field. */
+
+#endif /* _MACH_EXCEPTION_H_ */
diff --git a/include/mach/exec/a.out.h b/include/mach/exec/a.out.h
new file mode 100644
index 0000000..c6dcaff
--- /dev/null
+++ b/include/mach/exec/a.out.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_A_OUT_
+#define _MACH_A_OUT_
+
+struct exec
+{
+ unsigned long a_magic; /* magic number */
+ unsigned long a_text; /* size of text segment */
+ unsigned long a_data; /* size of initialized data */
+ unsigned long a_bss; /* size of uninitialized data */
+ unsigned long a_syms; /* size of symbol table */
+ unsigned long a_entry; /* entry point */
+ unsigned long a_trsize; /* size of text relocation */
+ unsigned long a_drsize; /* size of data relocation */
+};
+
+struct nlist {
+ long n_strx;
+ unsigned char n_type;
+ char n_other;
+ short n_desc;
+ unsigned long n_value;
+};
+
+#define OMAGIC 0407
+#define NMAGIC 0410
+#define ZMAGIC 0413
+#define QMAGIC 0314
+
+#define N_GETMAGIC(ex) \
+ ( (ex).a_magic & 0xffff )
+#define N_GETMAGIC_NET(ex) \
+ (ntohl((ex).a_magic) & 0xffff)
+
+/* Valid magic number check. */
+#define N_BADMAG(ex) \
+ (N_GETMAGIC(ex) != OMAGIC && N_GETMAGIC(ex) != NMAGIC && \
+ N_GETMAGIC(ex) != ZMAGIC && N_GETMAGIC(ex) != QMAGIC && \
+ N_GETMAGIC_NET(ex) != OMAGIC && N_GETMAGIC_NET(ex) != NMAGIC && \
+ N_GETMAGIC_NET(ex) != ZMAGIC && N_GETMAGIC_NET(ex) != QMAGIC)
+
+/* We don't provide any N_???OFF macros here
+ because they vary too much between the different a.out variants;
+ it's practically impossible to create one set of macros
+ that works for UX, FreeBSD, NetBSD, Linux, etc. */
+
+#endif /* _MACH_A_OUT_ */
diff --git a/include/mach/exec/elf.h b/include/mach/exec/elf.h
new file mode 100644
index 0000000..409947c
--- /dev/null
+++ b/include/mach/exec/elf.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ *
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+#ifndef _MACH_EXEC_ELF_H_
+#define _MACH_EXEC_ELF_H_
+
+#include <mach/machine/exec/elf.h>
+
+/* ELF Header - figure 4-3, page 4-4 */
+
+#define EI_NIDENT 16
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT];
+ Elf32_Half e_type;
+ Elf32_Half e_machine;
+ Elf32_Word e_version;
+ Elf32_Addr e_entry;
+ Elf32_Off e_phoff;
+ Elf32_Off e_shoff;
+ Elf32_Word e_flags;
+ Elf32_Half e_ehsize;
+ Elf32_Half e_phentsize;
+ Elf32_Half e_phnum;
+ Elf32_Half e_shentsize;
+ Elf32_Half e_shnum;
+ Elf32_Half e_shstrndx;
+} Elf32_Ehdr;
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT]; /* Id bytes */
+ Elf64_Half e_type; /* file type */
+ Elf64_Half e_machine; /* machine type */
+ Elf64_Word e_version; /* version number */
+ Elf64_Addr e_entry; /* entry point */
+ Elf64_Off e_phoff; /* Program hdr offset */
+ Elf64_Off e_shoff; /* Section hdr offset */
+ Elf64_Word e_flags; /* Processor flags */
+ Elf64_Half e_ehsize; /* sizeof ehdr */
+ Elf64_Half e_phentsize; /* Program header entry size */
+ Elf64_Half e_phnum; /* Number of program headers */
+ Elf64_Half e_shentsize; /* Section header entry size */
+ Elf64_Half e_shnum; /* Number of section headers */
+ Elf64_Half e_shstrndx; /* String table index */
+} Elf64_Ehdr;
+
+/* e_ident[] identification indexes - figure 4-4, page 4-7 */
+
+#define EI_MAG0 0
+#define EI_MAG1 1
+#define EI_MAG2 2
+#define EI_MAG3 3
+#define EI_CLASS 4
+#define EI_DATA 5
+#define EI_VERSION 6
+#define EI_PAD 7
+
+/* magic number - pg 4-8 */
+
+#define ELFMAG0 0x7f
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+
+/* file class or capacity - page 4-8 */
+
+#define ELFCLASSNONE 0
+#define ELFCLASS32 1
+#define ELFCLASS64 2
+
+/* date encoding - page 4-9 */
+
+#define ELFDATANONE 0
+#define ELFDATA2LSB 1
+#define ELFDATA2MSB 2
+
+/* object file types - page 4-5 */
+
+#define ET_NONE 0
+#define ET_REL 1
+#define ET_EXEC 2
+#define ET_DYN 3
+#define ET_CORE 4
+
+#define ET_LOPROC 0xff00
+#define ET_HIPROC 0xffff
+
+/* architecture - page 4-5 */
+
+#define EM_NONE 0
+#define EM_M32 1
+#define EM_SPARC 2
+#define EM_386 3
+#define EM_68K 4
+#define EM_88K 5
+#define EM_860 7
+#define EM_MIPS 8
+#define EM_MIPS_RS4_BE 10
+#define EM_SPARC64 11
+#define EM_PARISC 15
+#define EM_PPC 20
+#define EM_X86_64 62
+
+/* version - page 4-6 */
+
+#define EV_NONE 0
+#define EV_CURRENT 1
+
+/* special section indexes - page 4-11, figure 4-7 */
+
+#define SHN_UNDEF 0
+#define SHN_LORESERVE 0xff00
+#define SHN_LOPROC 0xff00
+#define SHN_HIPROC 0xff1f
+#define SHN_ABS 0xfff1
+#define SHN_COMMON 0xfff2
+#define SHN_HIRESERVE 0xffff
+
+/* section header - page 4-13, figure 4-8 */
+
+typedef struct {
+ Elf32_Word sh_name;
+ Elf32_Word sh_type;
+ Elf32_Word sh_flags;
+ Elf32_Addr sh_addr;
+ Elf32_Off sh_offset;
+ Elf32_Word sh_size;
+ Elf32_Word sh_link;
+ Elf32_Word sh_info;
+ Elf32_Word sh_addralign;
+ Elf32_Word sh_entsize;
+} Elf32_Shdr;
+
+typedef struct elf64_shdr {
+ Elf64_Word sh_name;
+ Elf64_Word sh_type;
+ Elf64_Xword sh_flags;
+ Elf64_Addr sh_addr;
+ Elf64_Off sh_offset;
+ Elf64_Xword sh_size;
+ Elf64_Word sh_link;
+ Elf64_Word sh_info;
+ Elf64_Xword sh_addralign;
+ Elf64_Xword sh_entsize;
+} Elf64_Shdr;
+
+/* section types - page 4-15, figure 4-9 */
+
+#define SHT_NULL 0
+#define SHT_PROGBITS 1
+#define SHT_SYMTAB 2
+#define SHT_STRTAB 3
+#define SHT_RELA 4
+#define SHT_HASH 5
+#define SHT_DYNAMIC 6
+#define SHT_NOTE 7
+#define SHT_NOBITS 8
+#define SHT_REL 9
+#define SHT_SHLIB 10
+#define SHT_DYNSYM 11
+
+#define SHT_LOPROC 0x70000000
+#define SHT_HIPROC 0x7fffffff
+#define SHT_LOUSER 0x80000000
+#define SHT_HIUSER 0xffffffff
+
+/* section attribute flags - page 4-18, figure 4-11 */
+
+#define SHF_WRITE 0x1
+#define SHF_ALLOC 0x2
+#define SHF_EXECINSTR 0x4
+#define SHF_MASKPROC 0xf0000000
+
+/* symbol table - page 4-25, figure 4-15 */
+typedef struct
+{
+ Elf32_Word st_name;
+ Elf32_Addr st_value;
+ Elf32_Word st_size;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf32_Half st_shndx;
+} Elf32_Sym;
+
+typedef struct elf64_sym {
+ Elf64_Word st_name;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf64_Half st_shndx;
+ Elf64_Addr st_value;
+ Elf64_Xword st_size;
+} Elf64_Sym;
+
+#ifdef __x86_64__
+#define Elf_Sym Elf64_Sym
+#define Elf_Shdr Elf64_Shdr
+#else
+#define Elf_Sym Elf32_Sym
+#define Elf_Shdr Elf32_Shdr
+#endif
+
+/* symbol type and binding attributes - page 4-26 */
+
+#define ELF_ST_BIND(i) ((i) >> 4)
+#define ELF_ST_TYPE(i) ((i) & 0xf)
+#define ELF_ST_INFO(b,t) (((b)<<4)+((t)&0xf))
+
+/* symbol binding - page 4-26, figure 4-16 */
+
+#define STB_LOCAL 0
+#define STB_GLOBAL 1
+#define STB_WEAK 2
+#define STB_LOPROC 13
+#define STB_HIPROC 15
+
+/* symbol types - page 4-28, figure 4-17 */
+
+#define STT_NOTYPE 0
+#define STT_OBJECT 1
+#define STT_FUNC 2
+#define STT_SECTION 3
+#define STT_FILE 4
+#define STT_LOPROC 13
+#define STT_HIPROC 15
+
+
+/* relocation entries - page 4-31, figure 4-19 */
+
+typedef struct
+{
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+} Elf32_Rel;
+
+typedef struct
+{
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+ Elf32_Sword r_addend;
+} Elf32_Rela;
+
+/* Macros to split/combine relocation type and symbol page 4-32 */
+
+#define ELF32_R_SYM(__i) ((__i)>>8)
+#define ELF32_R_TYPE(__i) ((unsigned char) (__i))
+#define ELF32_R_INFO(__s, __t) (((__s)<<8) + (unsigned char) (__t))
+
+
+/* program header - page 5-2, figure 5-1 */
+
+typedef struct {
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+ Elf32_Addr p_vaddr;
+ Elf32_Addr p_paddr;
+ Elf32_Word p_filesz;
+ Elf32_Word p_memsz;
+ Elf32_Word p_flags;
+ Elf32_Word p_align;
+} Elf32_Phdr;
+
+typedef struct {
+ Elf64_Word p_type; /* entry type */
+ Elf64_Word p_flags; /* flags */
+ Elf64_Off p_offset; /* offset */
+ Elf64_Addr p_vaddr; /* virtual address */
+ Elf64_Addr p_paddr; /* physical address */
+ Elf64_Xword p_filesz; /* file size */
+ Elf64_Xword p_memsz; /* memory size */
+ Elf64_Xword p_align; /* memory & file alignment */
+} Elf64_Phdr;
+
+/* segment types - page 5-3, figure 5-2 */
+
+#define PT_NULL 0
+#define PT_LOAD 1
+#define PT_DYNAMIC 2
+#define PT_INTERP 3
+#define PT_NOTE 4
+#define PT_SHLIB 5
+#define PT_PHDR 6
+
+#define PT_LOPROC 0x70000000
+#define PT_HIPROC 0x7fffffff
+
+/* segment permissions - page 5-6 */
+
+#define PF_X 0x1
+#define PF_W 0x2
+#define PF_R 0x4
+#define PF_MASKPROC 0xf0000000
+
+
+/* dynamic structure - page 5-15, figure 5-9 */
+
+typedef struct {
+ Elf32_Sword d_tag;
+ union {
+ Elf32_Word d_val;
+ Elf32_Addr d_ptr;
+ } d_un;
+} Elf32_Dyn;
+
+/* Dynamic array tags - page 5-16, figure 5-10. */
+
+#define DT_NULL 0
+#define DT_NEEDED 1
+#define DT_PLTRELSZ 2
+#define DT_PLTGOT 3
+#define DT_HASH 4
+#define DT_STRTAB 5
+#define DT_SYMTAB 6
+#define DT_RELA 7
+#define DT_RELASZ 8
+#define DT_RELAENT 9
+#define DT_STRSZ 10
+#define DT_SYMENT 11
+#define DT_INIT 12
+#define DT_FINI 13
+#define DT_SONAME 14
+#define DT_RPATH 15
+#define DT_SYMBOLIC 16
+#define DT_REL 17
+#define DT_RELSZ 18
+#define DT_RELENT 19
+#define DT_PLTREL 20
+#define DT_DEBUG 21
+#define DT_TEXTREL 22
+#define DT_JMPREL 23
+
+#if defined(__x86_64__) && ! defined(USER32)
+typedef Elf64_Ehdr Elf_Ehdr;
+typedef Elf64_Phdr Elf_Phdr;
+#else
+typedef Elf32_Ehdr Elf_Ehdr;
+typedef Elf32_Phdr Elf_Phdr;
+#endif
+
+/*
+ * Bootstrap doesn't need machine dependent extensions.
+ */
+
+#endif /* _MACH_EXEC_ELF_H_ */
diff --git a/include/mach/exec/exec.h b/include/mach/exec/exec.h
new file mode 100644
index 0000000..94b234b
--- /dev/null
+++ b/include/mach/exec/exec.h
@@ -0,0 +1,130 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef _MACH_EXEC_H_
+#define _MACH_EXEC_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/vm_prot.h>
+
+/* XXX */
+typedef enum
+{
+ EXEC_ELF = 1,
+ EXEC_AOUT = 2,
+} exec_format_t;
+
+typedef struct exec_info
+{
+ /* Format of executable loaded - see above. */
+ exec_format_t format;
+
+ /* Program entrypoint. */
+ vm_offset_t entry;
+
+ /* Initial data pointer - only some architectures use this. */
+ vm_offset_t init_dp;
+
+ /* (ELF) Address of interpreter string for loading shared libraries, null if none. */
+ vm_offset_t interp;
+
+} exec_info_t;
+
+typedef int exec_sectype_t;
+#define EXEC_SECTYPE_READ VM_PROT_READ
+#define EXEC_SECTYPE_WRITE VM_PROT_WRITE
+#define EXEC_SECTYPE_EXECUTE VM_PROT_EXECUTE
+#define EXEC_SECTYPE_PROT_MASK VM_PROT_ALL
+#define EXEC_SECTYPE_ALLOC ((exec_sectype_t)0x000100)
+#define EXEC_SECTYPE_LOAD ((exec_sectype_t)0x000200)
+#define EXEC_SECTYPE_DEBUG ((exec_sectype_t)0x010000)
+#define EXEC_SECTYPE_AOUT_SYMTAB ((exec_sectype_t)0x020000)
+#define EXEC_SECTYPE_AOUT_STRTAB ((exec_sectype_t)0x040000)
+
+typedef int exec_read_func_t(void *handle, vm_offset_t file_ofs,
+ void *buf, vm_size_t size,
+ vm_size_t *out_actual);
+
+typedef int exec_read_exec_func_t(void *handle,
+ vm_offset_t file_ofs, vm_size_t file_size,
+ vm_offset_t mem_addr, vm_size_t mem_size,
+ exec_sectype_t section_type);
+
+/*
+ * Routines exported from libmach_exec.a
+ */
+
+/* Generic function to interpret an executable "file"
+ and "load" it into "memory".
+ Doesn't really know about files, loading, or memory;
+ all file I/O and destination memory accesses
+ go through provided functions.
+ Thus, this is a very generic loading mechanism.
+
+ The read() function is used to read metadata from the file
+ into the local address space.
+
+ The read_exec() function is used to load the actual sections.
+ It is used for all kinds of sections - code, data, bss, debugging data.
+ The 'section_type' parameter specifies what type of section is being loaded.
+
+ For code, data, and bss, the EXEC_SECTYPE_ALLOC flag will be set.
+ For code and data (i.e. stuff that's actually loaded from the file),
+ EXEC_SECTYPE_LOAD will also be set.
+ The EXEC_SECTYPE_PROT_MASK contains the intended access permissions
+ for the section.
+ 'file_size' may be less than 'mem_size';
+ the remaining data must be zero-filled.
+ 'mem_size' is always greater than zero, but 'file_size' may be zero
+ (e.g. in the case of a bss section).
+ No two read_exec() calls for one executable
+ will load data into the same virtual memory page,
+ although they may load from arbitrary (possibly overlapping) file positions.
+
+ For sections that aren't normally loaded into the process image
+ (e.g. debug sections), EXEC_SECTYPE_ALLOC isn't set,
+ but some other appropriate flag is set to indicate the type of section.
+
+ The 'handle' is an opaque pointer which is simply passed on
+ to the read() and read_exec() functions.
+
+ On return, the specified info structure is filled in
+ with information about the loaded executable.
+*/
+int exec_load(exec_read_func_t *read, exec_read_exec_func_t *read_exec,
+ void *handle, exec_info_t *out_info);
+
+/*
+ * Error codes
+ */
+
+#define EX_NOT_EXECUTABLE 6000 /* not a recognized executable format */
+#define EX_WRONG_ARCH 6001 /* valid executable, but wrong arch. */
+#define EX_CORRUPT 6002 /* recognized executable, but mangled */
+#define EX_BAD_LAYOUT 6003 /* something wrong with the memory or file image layout */
+
+
+#endif /* _MACH_EXEC_H_ */
diff --git a/include/mach/experimental.defs b/include/mach/experimental.defs
new file mode 100644
index 0000000..ddcbea5
--- /dev/null
+++ b/include/mach/experimental.defs
@@ -0,0 +1,15 @@
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+ experimental 424242;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+serverprefix experimental_;
+
+/* This is free for experimenting RPCs, with no backward compatibility guarantees. */
diff --git a/include/mach/gnumach.defs b/include/mach/gnumach.defs
new file mode 100644
index 0000000..7ecf74d
--- /dev/null
+++ b/include/mach/gnumach.defs
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2012 Free Software Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+ gnumach 4200;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <mach_debug/mach_debug_types.defs>
+
+#ifdef GNUMACH_IMPORTS
+GNUMACH_IMPORTS
+#endif
+
+type vm_cache_statistics_data_t = struct[11] of integer_t;
+
+type vm_wire_t = int;
+
+/*
+ * Return page cache statistics for the host on which the target task
+ * resides.
+ */
+routine vm_cache_statistics(
+ target_task : vm_task_t;
+ out vm_cache_stats : vm_cache_statistics_data_t);
+
+/*
+ * Terminate a thread and release rights and memory.
+ *
+ * Intended to be used by threading libraries to provide a clean way for
+ * threads to terminate themselves. The resources a thread wouldn't be able
+ * to release without this call when terminating itself are its
+ * last reference to its kernel port, its reply port, and its stack.
+ *
+ * This call is semantically equivalent to :
+ * - mach_port_deallocate(task, thread_name);
+ * - if (reply_port != MACH_PORT_NULL)
+ * mach_port_destroy(task, reply_port);
+ * - if ((address != 0) || (size != 0))
+ * vm_deallocate(task, address, size)
+ * - thread_terminate(thread)
+ *
+ * Implemented as a simple routine so a reply port isn't required.
+ */
+simpleroutine thread_terminate_release(
+ thread : thread_t;
+ task : task_t;
+ thread_name : mach_port_name_t;
+ reply_port : mach_port_name_t;
+ address : vm_address_t;
+ size : vm_size_t);
+
+/*
+ * Set the name of task TASK to NAME. This is a debugging aid.
+ * NAME will be used in error messages printed by the kernel.
+ */
+simpleroutine task_set_name(
+ task : task_t;
+ name : kernel_debug_name_t);
+
+/*
+ * Register a port to which a notification about newly created tasks
+ * are sent.
+ */
+routine register_new_task_notification(
+ host_priv : host_priv_t;
+ notification : mach_port_send_t);
+
+/* Test that the contents of ADDR are equal to the 32-bit integer VAL1.
+ * If they are not, return immediately, otherwise, block until a
+ * matching 'gsync_wake' is done on the same address. FLAGS is used
+ * to control how the thread waits, and may be composed of:
+ * - GSYNC_SHARED: The address may be shared among tasks. If this
+ bit is not set, the address is assumed to be task-local.
+ * - GSYNC_QUAD: Additionally check that the adjacent 32-bit word
+ following ADDR matches the value VAL2.
+ * - GSYNC_TIMED: The call only blocks for MSEC milliseconds. */
+routine gsync_wait(
+ task : task_t;
+ addr : vm_address_t;
+ val1 : unsigned;
+ val2 : unsigned;
+ msec : natural_t;
+ flags : int);
+
+/* Wake up threads waiting on the address ADDR. Much like with
+ * 'gsync_wait', the parameter FLAGS controls how it is done. In this
+ * case, it may be composed of the following:
+ * - GSYNC_SHARED: Same as with 'gsync_wait'.
+ * - GSYNC_BROADCAST: Wake up every thread waiting on the address. If
+ * this flag is not set, the call wakes (at most) 1 thread.
+ * - GSYNC_MUTATE: Before waking any potential waiting threads, set the
+ * contents of ADDR to VAL.
+ *
+ * This RPC is implemented as a simple routine for efficiency reasons,
+ * and because the return value rarely matters. */
+simpleroutine gsync_wake(
+ task : task_t;
+ addr : vm_address_t;
+ val : unsigned;
+ flags : int);
+
+/* Arrange for threads waiting on address SRC_ADDR to instead
+ * wait on address DST_ADDR. If WAKE_ONE is true, additionally
+ * wake one of the threads waiting on SRC_ADDR. For this function,
+ * the parameter flags may be a combination of:
+ * - GSYNC_SHARED: Just like with 'gsync_wait' and 'gsync_wake'.
+ * - GSYNC_BROADCAST: Move all the threads waiting on SRC_ADDR. If
+ this flag is not set, the call moves (at most) 1 thread.
+ *
+ * This RPC is also a simple routine, and for the same reasons as
+ * with 'gsync_wake'. */
+simpleroutine gsync_requeue(
+ task : task_t;
+ src_addr : vm_address_t;
+ dst_addr : vm_address_t;
+ wake_one : boolean_t;
+ flags : int);
+
+/*
+ * If the VM_WIRE_CURRENT flag is passed, specify that the entire
+ * virtual address space of the target task must not cause page faults.
+ *
+ * If the VM_WIRE_FUTURE flag is passed, automatically wire new
+ * mappings in the address space of the target task.
+ *
+ * If the flags are empty (VM_WIRE_NONE), unwire all mappings.
+ */
+routine vm_wire_all(
+ host : mach_port_t;
+ task : vm_task_t;
+ flags : vm_wire_t);
+
+routine vm_object_sync(
+ object : memory_object_name_t;
+ offset : vm_offset_t;
+ size : vm_size_t;
+ should_flush : boolean_t;
+ should_return : boolean_t;
+ should_iosync : boolean_t);
+
+routine vm_msync(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ sync_flags : vm_sync_t);
+
+/*
+ * This routine is created for allocating DMA buffers.
+ * We are going to get a contiguous physical memory
+ * and its physical address in addition to the virtual address.
+ * We can specify physical memory range limits and alignment.
+ * NB:
+ * pmax is defined as the byte after the maximum address,
+ * eg 0x100000000 for 4GiB limit.
+ */
+/* XXX
+ * Future work: the RPC should return a special
+ * memory object (similar to device_map() ), which can then be mapped into
+ * the process address space with vm_map() like any other memory object.
+ */
+routine vm_allocate_contiguous(
+ host_priv : host_priv_t;
+ target_task : vm_task_t;
+ out vaddr : vm_address_t;
+ out paddr : rpc_phys_addr_t;
+ size : vm_size_t;
+ pmin : rpc_phys_addr_t;
+ pmax : rpc_phys_addr_t;
+ palign : rpc_phys_addr_t);
+
+/*
+ * Set whether TASK is an essential task, i.e. the whole system will crash
+ * if this task crashes.
+ */
+simpleroutine task_set_essential(
+ task : task_t;
+ essential : boolean_t);
+
+/*
+ * Returns physical addresses of a region of memory
+ */
+routine vm_pages_phys(
+ host_priv : host_priv_t;
+ target_task : vm_task_t;
+ vaddr : vm_address_t;
+ size : vm_size_t;
+ out pages : rpc_phys_addr_array_t);
+
+/*
+ * Set the name of thread THREAD to NAME. This is a debugging aid.
+ * NAME will be used in error messages printed by the kernel.
+ */
+simpleroutine thread_set_name(
+ thread : thread_t;
+ name : kernel_debug_name_t);
diff --git a/include/mach/host_info.h b/include/mach/host_info.h
new file mode 100644
index 0000000..b84376b
--- /dev/null
+++ b/include/mach/host_info.h
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/host_info.h
+ *
+ * Definitions for host_info call.
+ */
+
+#ifndef _MACH_HOST_INFO_H_
+#define _MACH_HOST_INFO_H_
+
+#include <mach/machine.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ * Generic information structure to allow for expansion.
+ */
+typedef integer_t *host_info_t; /* varying array of integers */
+
+#define HOST_INFO_MAX (1024) /* max array size */
+typedef integer_t host_info_data_t[HOST_INFO_MAX];
+
+#define KERNEL_VERSION_MAX (512)
+typedef char kernel_version_t[KERNEL_VERSION_MAX];
+
+/*
+ * Currently defined information.
+ */
+#define HOST_BASIC_INFO 1 /* basic info */
+#define HOST_PROCESSOR_SLOTS 2 /* processor slot numbers */
+#define HOST_SCHED_INFO 3 /* scheduling info */
+#define HOST_LOAD_INFO 4 /* avenrun/mach_factor info */
+
+struct host_basic_info {
+ integer_t max_cpus; /* max number of cpus possible */
+ integer_t avail_cpus; /* number of cpus now available */
+ rpc_vm_size_t memory_size; /* size of memory in bytes */
+ cpu_type_t cpu_type; /* cpu type */
+ cpu_subtype_t cpu_subtype; /* cpu subtype */
+};
+
+typedef struct host_basic_info host_basic_info_data_t;
+typedef struct host_basic_info *host_basic_info_t;
+#define HOST_BASIC_INFO_COUNT \
+ (sizeof(host_basic_info_data_t)/sizeof(integer_t))
+
+struct host_sched_info {
+ integer_t min_timeout; /* minimum timeout in milliseconds */
+ integer_t min_quantum; /* minimum quantum in milliseconds */
+};
+
+typedef struct host_sched_info host_sched_info_data_t;
+typedef struct host_sched_info *host_sched_info_t;
+#define HOST_SCHED_INFO_COUNT \
+ (sizeof(host_sched_info_data_t)/sizeof(integer_t))
+
+struct host_load_info {
+ integer_t avenrun[3]; /* scaled by LOAD_SCALE */
+ integer_t mach_factor[3]; /* scaled by LOAD_SCALE */
+};
+
+typedef struct host_load_info host_load_info_data_t;
+typedef struct host_load_info *host_load_info_t;
+#define HOST_LOAD_INFO_COUNT \
+ (sizeof(host_load_info_data_t)/sizeof(integer_t))
+
+#endif /* _MACH_HOST_INFO_H_ */
diff --git a/include/mach/inline.h b/include/mach/inline.h
new file mode 100644
index 0000000..35f5c5d
--- /dev/null
+++ b/include/mach/inline.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSS
+ */
+#ifndef _MACH_INLINE_H_
+#define _MACH_INLINE_H_
+
+#ifndef MACH_INLINE
+#define MACH_INLINE extern __inline
+#endif
+
+#endif /* _MACH_INLINE_H_ */
diff --git a/include/mach/kern_return.h b/include/mach/kern_return.h
new file mode 100644
index 0000000..15b836f
--- /dev/null
+++ b/include/mach/kern_return.h
@@ -0,0 +1,166 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: h/kern_return.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Kernel return codes.
+ *
+ */
+
+#ifndef _MACH_KERN_RETURN_H_
+#define _MACH_KERN_RETURN_H_
+
+#include <mach/machine/kern_return.h>
+
+#define KERN_SUCCESS 0
+
+#define KERN_INVALID_ADDRESS 1
+ /* Specified address is not currently valid.
+ */
+
+#define KERN_PROTECTION_FAILURE 2
+ /* Specified memory is valid, but does not permit the
+ * required forms of access.
+ */
+
+#define KERN_NO_SPACE 3
+ /* The address range specified is already in use, or
+ * no address range of the size specified could be
+ * found.
+ */
+
+#define KERN_INVALID_ARGUMENT 4
+ /* The function requested was not applicable to this
+ * type of argument, or an argument
+ */
+
+#define KERN_FAILURE 5
+ /* The function could not be performed. A catch-all.
+ */
+
+#define KERN_RESOURCE_SHORTAGE 6
+ /* A system resource could not be allocated to fulfill
+ * this request. This failure may not be permanent.
+ */
+
+#define KERN_NOT_RECEIVER 7
+ /* The task in question does not hold receive rights
+ * for the port argument.
+ */
+
+#define KERN_NO_ACCESS 8
+ /* Bogus access restriction.
+ */
+
+#define KERN_MEMORY_FAILURE 9
+ /* During a page fault, the target address refers to a
+ * memory object that has been destroyed. This
+ * failure is permanent.
+ */
+
+#define KERN_MEMORY_ERROR 10
+ /* During a page fault, the memory object indicated
+ * that the data could not be returned. This failure
+ * may be temporary; future attempts to access this
+ * same data may succeed, as defined by the memory
+ * object.
+ */
+
+/* KERN_ALREADY_IN_SET 11 obsolete */
+
+#define KERN_NOT_IN_SET 12
+ /* The receive right is not a member of a port set.
+ */
+
+#define KERN_NAME_EXISTS 13
+ /* The name already denotes a right in the task.
+ */
+
+#define KERN_ABORTED 14
+ /* The operation was aborted. Ipc code will
+ * catch this and reflect it as a message error.
+ */
+
+#define KERN_INVALID_NAME 15
+ /* The name doesn't denote a right in the task.
+ */
+
+#define KERN_INVALID_TASK 16
+ /* Target task isn't an active task.
+ */
+
+#define KERN_INVALID_RIGHT 17
+ /* The name denotes a right, but not an appropriate right.
+ */
+
+#define KERN_INVALID_VALUE 18
+ /* A blatant range error.
+ */
+
+#define KERN_UREFS_OVERFLOW 19
+ /* Operation would overflow limit on user-references.
+ */
+
+#define KERN_INVALID_CAPABILITY 20
+ /* The supplied (port) capability is improper.
+ */
+
+#define KERN_RIGHT_EXISTS 21
+ /* The task already has send or receive rights
+ * for the port under another name.
+ */
+
+#define KERN_INVALID_HOST 22
+ /* Target host isn't actually a host.
+ */
+
+#define KERN_MEMORY_PRESENT 23
+ /* An attempt was made to supply "precious" data
+ * for memory that is already present in a
+ * memory object.
+ */
+
+#define KERN_WRITE_PROTECTION_FAILURE 24
+ /*
+ * A page was marked as VM_PROT_NOTIFY and an attempt was
+ * made to write it
+ */
+#define KERN_TERMINATED 26
+ /* Object has been terminated and is no longer available.
+ */
+
+#define KERN_TIMEDOUT 27
+ /* Kernel operation timed out. */
+
+#define KERN_INTERRUPTED 28
+ /* Kernel operation was interrupted. */
+
+#endif /* _MACH_KERN_RETURN_H_ */
diff --git a/include/mach/mach.defs b/include/mach/mach.defs
new file mode 100644
index 0000000..c6ad077
--- /dev/null
+++ b/include/mach/mach.defs
@@ -0,0 +1,724 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Matchmaker definitions file for Mach kernel interface.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+ mach 2000;
+
+#ifdef KERNEL_USER
+userprefix r_;
+#endif /* KERNEL_USER */
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+#ifdef MACH_IMPORTS
+MACH_IMPORTS
+#endif
+
+skip; /* old port_allocate */
+skip; /* old port_deallocate */
+skip; /* old port_enable */
+skip; /* old port_disable */
+skip; /* old port_select */
+skip; /* old port_set_backlog */
+skip; /* old port_status */
+
+/* We use only a handful of RPCs as client. Skip the rest. */
+#if ! KERNEL_USER
+
+/*
+ * Create a new task with an empty set of IPC rights,
+ * and having an address space constructed from the
+ * target task (or empty, if inherit_memory is FALSE).
+ */
+routine task_create(
+ target_task : task_t;
+ inherit_memory : boolean_t;
+ out child_task : task_t);
+
+/*
+ * Destroy the target task, causing all of its threads
+ * to be destroyed, all of its IPC rights to be deallocated,
+ * and all of its address space to be deallocated.
+ */
+routine task_terminate(
+ target_task : task_t);
+
+/*
+ * Get user-level handler entry points for all
+ * emulated system calls.
+ */
+routine task_get_emulation_vector(
+ task : task_t;
+ out vector_start : int;
+ out emulation_vector: emulation_vector_t);
+
+/*
+ * Establish user-level handlers for the specified
+ * system calls. Non-emulated system calls are specified
+ * with emulation_vector[i] == EML_ROUTINE_NULL.
+ */
+routine task_set_emulation_vector(
+ task : task_t;
+ vector_start : int;
+ emulation_vector: emulation_vector_t);
+
+
+/*
+ * Returns the set of threads belonging to the target task.
+ */
+routine task_threads(
+ target_task : task_t;
+ out thread_list : thread_array_t);
+
+/*
+ * Returns information about the target task.
+ */
+routine task_info(
+ target_task : task_t;
+ flavor : int;
+ out task_info_out : task_info_t, CountInOut);
+
+
+skip; /* old task_status */
+skip; /* old task_set_notify */
+skip; /* old thread_create */
+
+/*
+ * Destroy the target thread.
+ */
+routine thread_terminate(
+ target_thread : thread_t);
+
+/*
+ * Return the selected state information for the target
+ * thread. If the thread is currently executing, the results
+ * may be stale. [Flavor THREAD_STATE_FLAVOR_LIST provides a
+ * list of valid flavors for the target thread.]
+ */
+routine thread_get_state(
+ target_thread : thread_t;
+ flavor : int;
+ out old_state : thread_state_t, CountInOut);
+
+/*
+ * Set the selected state information for the target thread.
+ * If the thread is currently executing, the state change
+ * may be ill-defined.
+ */
+routine thread_set_state(
+ target_thread : thread_t;
+ flavor : int;
+ new_state : thread_state_t);
+
+/*
+ * Returns information about the target thread.
+ */
+routine thread_info(
+ target_thread : thread_t;
+ flavor : int;
+ out thread_info_out : thread_info_t, CountInOut);
+
+skip; /* old thread_mutate */
+
+/*
+ * Allocate zero-filled memory in the address space
+ * of the target task, either at the specified address,
+ * or wherever space can be found (if anywhere is TRUE),
+ * of the specified size. The address at which the
+ * allocation actually took place is returned.
+ */
+#ifdef EMULATOR
+skip; /* the emulator redefines vm_allocate using vm_map */
+#else /* EMULATOR */
+routine vm_allocate(
+ target_task : vm_task_t;
+ inout address : vm_address_t;
+ size : vm_size_t;
+ anywhere : boolean_t);
+#endif /* EMULATOR */
+
+skip; /* old vm_allocate_with_pager */
+
+/*
+ * Deallocate the specified range from the virtual
+ * address space of the target task.
+ */
+routine vm_deallocate(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t);
+
+/*
+ * Set the current or maximum protection attribute
+ * for the specified range of the virtual address
+ * space of the target task. The current protection
+ * limits the memory access rights of threads within
+ * the task; the maximum protection limits the accesses
+ * that may be given in the current protection.
+ * Protections are specified as a set of {read, write, execute}
+ * *permissions*.
+ */
+routine vm_protect(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ set_maximum : boolean_t;
+ new_protection : vm_prot_t);
+
+/*
+ * Set the inheritance attribute for the specified range
+ * of the virtual address space of the target task.
+ * The inheritance value is one of {none, copy, share}, and
+ * specifies how the child address space should acquire
+ * this memory at the time of a task_create call.
+ */
+routine vm_inherit(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ new_inheritance : vm_inherit_t);
+
+/*
+ * Returns the contents of the specified range of the
+ * virtual address space of the target task. [The
+ * range must be aligned on a virtual page boundary,
+ * and must be a multiple of pages in extent. The
+ * protection on the specified range must permit reading.]
+ */
+routine vm_read(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ out data : pointer_t);
+
+/*
+ * Writes the contents of the specified range of the
+ * virtual address space of the target task. [The
+ * range must be aligned on a virtual page boundary,
+ * and must be a multiple of pages in extent. The
+ * protection on the specified range must permit writing.]
+ */
+routine vm_write(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ data : pointer_t);
+
+/*
+ * Copy the contents of the source range of the virtual
+ * address space of the target task to the destination
+ * range in that same address space. [Both of the
+ * ranges must be aligned on a virtual page boundary,
+ * and must be multiples of pages in extent. The
+ * protection on the source range must permit reading,
+ * and the protection on the destination range must
+ * permit writing.]
+ */
+routine vm_copy(
+ target_task : vm_task_t;
+ source_address : vm_address_t;
+ size : vm_size_t;
+ dest_address : vm_address_t);
+
+/*
+ * Returns information about the contents of the virtual
+ * address space of the target task at the specified
+ * address. The returned protection, inheritance, sharing
+ * and memory object values apply to the entire range described
+ * by the address range returned; the memory object offset
+ * corresponds to the beginning of the address range.
+ * [If the specified address is not allocated, the next
+ * highest address range is described. If no addresses beyond
+ * the one specified are allocated, the call returns KERN_NO_SPACE.]
+ */
+routine vm_region(
+ target_task : vm_task_t;
+ inout address : vm_address_t;
+ out size : vm_size_t;
+ out protection : vm_prot_t;
+ out max_protection : vm_prot_t;
+ out inheritance : vm_inherit_t;
+ out is_shared : boolean_t;
+ /* avoid out-translation of the argument */
+ out object_name : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+ out offset : vm_offset_t);
+
+/*
+ * Return virtual memory statistics for the host
+ * on which the target task resides. [Note that the
+ * statistics are not specific to the target task.]
+ */
+routine vm_statistics(
+ target_task : vm_task_t;
+ out vm_stats : vm_statistics_data_t);
+
+skip; /* old task_by_u*x_pid */
+skip; /* old vm_pageable */
+
+/*
+ * Stash a handful of ports for the target task; child
+ * tasks inherit this stash at task_create time.
+ */
+routine mach_ports_register(
+ target_task : task_t;
+ init_port_set : mach_port_array_t =
+ ^array[] of mach_port_t);
+
+/*
+ * Retrieve the stashed ports for the target task.
+ */
+routine mach_ports_lookup(
+ target_task : task_t;
+ out init_port_set : mach_port_array_t =
+ ^array[] of mach_port_t);
+
+skip; /* old u*x_pid */
+skip; /* old netipc_listen */
+skip; /* old netipc_ignore */
+
+#else /* ! KERNEL_USER */
+
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip;
+
+#endif /* ! KERNEL_USER */
+
+skip; /* was: memory_object_data_provided */
+
+/*
+ * Indicate that a range of the given temporary memory object does
+ * not exist, and that the backing memory object should be used
+ * instead (or zero-fill memory be used, if no backing object exists).
+ * [This call is intended for use only by the default memory manager.
+ * It should not be used to indicate a real error --
+ * memory_object_data_error should be used for that purpose.]
+ */
+simpleroutine memory_object_data_unavailable(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t);
+
+/*
+ * Retrieves the attributes currently associated with
+ * a memory object.
+ */
+routine memory_object_get_attributes(
+ memory_control : memory_object_control_t;
+ out object_ready : boolean_t;
+ out may_cache : boolean_t;
+ out copy_strategy : memory_object_copy_strategy_t);
+
+#if ! KERNEL_USER
+
+/*
+ * Sets the default memory manager, the port to which
+ * newly-created temporary memory objects are delivered.
+ * [See (memory_object_default)memory_object_create.]
+ * The old memory manager port is returned.
+ */
+routine vm_set_default_memory_manager(
+ host_priv : host_priv_t;
+ inout default_manager : mach_port_make_send_t);
+
+#else /* ! KERNEL_USER */
+
+skip;
+
+#endif /* ! KERNEL_USER */
+
+skip; /* old pager_flush_request */
+
+/*
+ * Control use of the data associated with the given
+ * memory object. For each page in the given range,
+ * perform the following operations, in order:
+ * 1) restrict access to the page (disallow
+ * forms specified by "prot");
+ * 2) write back modifications (if "should_return"
+ * is RETURN_DIRTY and the page is dirty, or
+ * "should_return" is RETURN_ALL and the page
+ * is either dirty or precious); and,
+ * 3) flush the cached copy (if "should_flush"
+ * is asserted).
+ * The set of pages is defined by a starting offset
+ * ("offset") and size ("size"). Only pages with the
+ * same page alignment as the starting offset are
+ * considered.
+ *
+ * A single acknowledgement is sent (to the "reply_to"
+ * port) when these actions are complete.
+ *
+ * There are two versions of this routine because IPC distinguishes
+ * between booleans and integers (a 2-valued integer is NOT a
+ * boolean). The new routine is backwards compatible at the C
+ * language interface.
+ */
+
+skip; /* old xxx_memory_object_lock_request */
+
+simpleroutine memory_object_lock_request(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t;
+ should_return : memory_object_return_t;
+ should_flush : boolean_t;
+ lock_value : vm_prot_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+skip; /* old xxx_task_get_emulation_vector */
+skip; /* old xxx_task_set_emulation_vector */
+skip; /* old xxx_host_info */
+skip; /* old xxx_slot_info */
+skip; /* old xxx_cpu_control */
+skip; /* old thread_statistics */
+skip; /* old task_statistics */
+skip; /* old netport_init */
+skip; /* old netport_enter */
+skip; /* old netport_remove */
+skip; /* old thread_set_priority */
+
+#if ! KERNEL_USER
+
+/*
+ * Increment the suspend count for the target task.
+ * No threads within a task may run when the suspend
+ * count for that task is non-zero.
+ */
+routine task_suspend(
+ target_task : task_t);
+
+/*
+ * Decrement the suspend count for the target task,
+ * if the count is currently non-zero. If the resulting
+ * suspend count is zero, then threads within the task
+ * that also have non-zero suspend counts may execute.
+ */
+routine task_resume(
+ target_task : task_t);
+
+/*
+ * Returns the current value of the selected special port
+ * associated with the target task.
+ */
+routine task_get_special_port(
+ task : task_t;
+ which_port : int;
+ out special_port : mach_port_t);
+
+/*
+ * Set one of the special ports associated with the
+ * target task.
+ */
+routine task_set_special_port(
+ task : task_t;
+ which_port : int;
+ special_port : mach_port_t);
+
+skip; /* old xxx_task_info */
+
+
+/*
+ * Create a new thread within the target task, returning
+ * the port representing that new thread. The
+ * initial execution state of the thread is undefined.
+ */
+routine thread_create(
+ parent_task : task_t;
+ out child_thread : thread_t);
+
+/*
+ * Increment the suspend count for the target thread.
+ * Once this call has completed, the thread will not
+ * execute any further user or meta- instructions.
+ * Once suspended, a thread may not execute again until
+ * its suspend count is zero, and the suspend count
+ * for its task is also zero.
+ */
+routine thread_suspend(
+ target_thread : thread_t);
+
+/*
+ * Decrement the suspend count for the target thread,
+ * if that count is not already zero.
+ */
+routine thread_resume(
+ target_thread : thread_t);
+
+/*
+ * Cause any user or meta- instructions currently being
+ * executed by the target thread to be aborted. [Meta-
+ * instructions consist of the basic traps for IPC
+ * (e.g., msg_send, msg_receive) and self-identification
+ * (e.g., task_self, thread_self, thread_reply). Calls
+ * described by MiG interfaces are not meta-instructions
+ * themselves.]
+ */
+routine thread_abort(
+ target_thread : thread_t);
+
+skip; /* old xxx_thread_get_state */
+skip; /* old xxx_thread_set_state */
+
+/*
+ * Returns the current value of the selected special port
+ * associated with the target thread.
+ */
+routine thread_get_special_port(
+ thread : thread_t;
+ which_port : int;
+ out special_port : mach_port_t);
+
+/*
+ * Set one of the special ports associated with the
+ * target thread.
+ */
+routine thread_set_special_port(
+ thread : thread_t;
+ which_port : int;
+ special_port : mach_port_t);
+
+skip; /* old xxx_thread_info */
+
+/*
+ * Establish a user-level handler for the specified
+ * system call.
+ */
+routine task_set_emulation(
+ target_port : task_t;
+ routine_entry_pt: vm_address_t;
+ routine_number : int);
+
+/*
+ * Establish restart pc for interrupted atomic sequences.
+ * This reuses the message number for the old task_get_io_port.
+ * See task_info.h for description of flavors.
+ *
+ */
+routine task_ras_control(
+ target_task : task_t;
+ basepc : vm_address_t;
+ boundspc : vm_address_t;
+ flavor : int);
+
+
+
+skip; /* old host_ipc_statistics */
+skip; /* old port_names */
+skip; /* old port_type */
+skip; /* old port_rename */
+skip; /* old port_allocate */
+skip; /* old port_deallocate */
+skip; /* old port_set_backlog */
+skip; /* old port_status */
+skip; /* old port_set_allocate */
+skip; /* old port_set_deallocate */
+skip; /* old port_set_add */
+skip; /* old port_set_remove */
+skip; /* old port_set_status */
+skip; /* old port_insert_send */
+skip; /* old port_extract_send */
+skip; /* old port_insert_receive */
+skip; /* old port_extract_receive */
+
+/*
+ * Map a user-defined memory object into the virtual address
+ * space of the target task. If desired (anywhere is TRUE),
+ * the kernel will find a suitable address range of the
+ * specified size; else, the specific address will be allocated.
+ *
+ * The beginning address of the range will be aligned on a virtual
+ * page boundary, be at or beyond the address specified, and
+ * meet the mask requirements (bits turned on in the mask must not
+ * be turned on in the result); the size of the range, in bytes,
+ * will be rounded up to an integral number of virtual pages.
+ *
+ * The memory in the resulting range will be associated with the
+ * specified memory object, with the beginning of the memory range
+ * referring to the specified offset into the memory object.
+ *
+ * The mapping will take the current and maximum protections and
+ * the inheritance attributes specified; see the vm_protect and
+ * vm_inherit calls for a description of these attributes.
+ *
+ * If desired (copy is TRUE), the memory range will be filled
+ * with a copy of the data from the memory object; this copy will
+ * be private to this mapping in this target task. Otherwise,
+ * the memory in this mapping will be shared with other mappings
+ * of the same memory object at the same offset (in this task or
+ * in other tasks). [The Mach kernel only enforces shared memory
+ * consistency among mappings on one host with similar page alignments.
+ * The user-defined memory manager for this object is responsible
+ * for further consistency.]
+ */
+#ifdef EMULATOR
+routine htg_vm_map(
+ target_task : vm_task_t;
+ ureplyport reply_port : mach_port_make_send_once_t;
+ inout address : vm_address_t;
+ size : vm_size_t;
+ mask : vm_address_t;
+ anywhere : boolean_t;
+ memory_object : memory_object_t;
+ offset : vm_offset_t;
+ copy : boolean_t;
+ cur_protection : vm_prot_t;
+ max_protection : vm_prot_t;
+ inheritance : vm_inherit_t);
+#else /* EMULATOR */
+routine vm_map(
+ target_task : vm_task_t;
+ inout address : vm_address_t;
+ size : vm_size_t;
+ mask : vm_address_t;
+ anywhere : boolean_t;
+ memory_object : memory_object_t;
+ offset : vm_offset_t;
+ copy : boolean_t;
+ cur_protection : vm_prot_t;
+ max_protection : vm_prot_t;
+ inheritance : vm_inherit_t);
+#endif /* EMULATOR */
+
+#else /* ! KERNEL_USER */
+
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip;
+
+#endif /* ! KERNEL_USER */
+
+/*
+ * Indicate that a range of the specified memory object cannot
+ * be provided at this time. [Threads waiting for memory pages
+ * specified by this call will experience a memory exception.
+ * Only threads waiting at the time of the call are affected.]
+ */
+simpleroutine memory_object_data_error(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t;
+ error_value : kern_return_t);
+
+skip; /* was: memory_object_set_attributes */
+
+/*
+ */
+simpleroutine memory_object_destroy(
+ memory_control : memory_object_control_t;
+ reason : kern_return_t);
+
+/*
+ * Provide the data contents of a range of the given memory
+ * object, with the access restriction specified, optional
+ * precious attribute, and reply message. [Only
+ * whole virtual pages of data can be accepted; partial pages
+ * will be discarded. Data should be provided on request, but
+ * may be provided in advance as desired. When data already
+ * held by this kernel is provided again, the new data is ignored.
+ * The access restriction is the subset of {read, write, execute}
+ * which are prohibited. The kernel may not provide any data (or
+ * protection) consistency among pages with different virtual page
+ * alignments within the same object. The precious value controls
+ * how the kernel treats the data. If it is FALSE, the kernel treats
+ * its copy as a temporary and may throw it away if it hasn't been
+ * changed. If the precious value is TRUE, the kernel treats its
+ * copy as a data repository and promises to return it to the manager;
+ * the manager may tell the kernel to throw it away instead by flushing
+ * and not cleaning the data -- see memory_object_lock_request. The
+ * reply_to port is for a compeletion message; it will be
+ * memory_object_supply_completed.]
+ */
+
+simpleroutine memory_object_data_supply(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ data : pointer_t, Dealloc[];
+ lock_value : vm_prot_t;
+ precious : boolean_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+simpleroutine memory_object_ready(
+ memory_control : memory_object_control_t;
+ may_cache : boolean_t;
+ copy_strategy : memory_object_copy_strategy_t);
+
+simpleroutine memory_object_change_attributes(
+ memory_control : memory_object_control_t;
+ may_cache : boolean_t;
+ copy_strategy : memory_object_copy_strategy_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+#if ! KERNEL_USER
+
+skip; /* old host_callout_statistics_reset */
+skip; /* old port_set_select */
+skip; /* old port_set_backup */
+
+/*
+ * Set/Get special properties of memory associated
+ * to some virtual address range, such as cachability,
+ * migrability, replicability. Machine-dependent.
+ */
+routine vm_machine_attribute(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ attribute : vm_machine_attribute_t;
+ inout value : vm_machine_attribute_val_t);
+
+skip; /* old host_fpa_counters_reset */
+
+#endif /* ! KERNEL_USER */
+
+/*
+ * There is no more room in this interface for additional calls.
+ */
diff --git a/include/mach/mach4.defs b/include/mach/mach4.defs
new file mode 100644
index 0000000..d63d6f7
--- /dev/null
+++ b/include/mach/mach4.defs
@@ -0,0 +1,131 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Matchmaker definitions file for Mach4 kernel interface.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+ mach4 4000;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+
+#ifdef MACH_PCSAMPLE
+type sampled_pc_flavor_t = unsigned;
+
+type sampled_pc_t = struct {
+ rpc_vm_offset_t id;
+ rpc_vm_offset_t pc;
+ sampled_pc_flavor_t sampletype;
+};
+
+type sampled_pc_array_t = array[*:512] of sampled_pc_t;
+type sampled_pc_seqno_t = unsigned;
+
+routine task_enable_pc_sampling(
+ host : task_t;
+ out tick : int; /* sample frequency in usecs */
+ flavor : sampled_pc_flavor_t );
+
+routine task_disable_pc_sampling(
+ host : task_t;
+ out samplecnt : int);
+
+routine task_get_sampled_pcs(
+ host : task_t;
+ inout seqno : sampled_pc_seqno_t;
+ out sampled_pcs : sampled_pc_array_t);
+
+routine thread_enable_pc_sampling(
+ host : thread_t;
+ out tick : int; /* sample frequency in usecs*/
+ flavor : sampled_pc_flavor_t );
+
+routine thread_disable_pc_sampling(
+ host : thread_t;
+ out samplecnt : int);
+
+routine thread_get_sampled_pcs(
+ host : thread_t;
+ inout seqno : sampled_pc_seqno_t;
+ out sampled_pcs : sampled_pc_array_t);
+
+
+skip /* pc_sampling reserved 1*/;
+skip /* pc_sampling reserved 2*/;
+skip /* pc_sampling reserved 3*/;
+skip /* pc_sampling reserved 4*/;
+
+#else
+
+skip; /* task_enable_pc_sampling */
+skip; /* task_disable_pc_sampling */
+skip; /* task_get_sampled_pcs */
+skip; /* thread_enable_pc_sampling */
+skip; /* thread_disable_pc_sampling */
+skip; /* thread_get_sampled_pcs */
+
+skip /* pc_sampling reserved 1*/;
+skip /* pc_sampling reserved 2*/;
+skip /* pc_sampling reserved 3*/;
+skip /* pc_sampling reserved 4*/;
+
+#endif
+
+
+/* Create a new proxy memory object from [START;START+LEN) in the
+ given memory object OBJECT at OFFSET in the new object with the maximum
+ protection MAX_PROTECTION and return it in *PORT. */
+type vm_offset_array_t = array[*:1024] of vm_offset_t;
+type vm_size_array_t = array[*:1024] of vm_size_t;
+type rpc_vm_size_array_t = array[*:1024] of rpc_vm_size_t;
+type rpc_vm_offset_array_t = array[*:1024] of rpc_vm_offset_t;
+routine memory_object_create_proxy(
+ task : ipc_space_t;
+ max_protection : vm_prot_t;
+ object : memory_object_array_t =
+ array[*:1024] of mach_port_send_t;
+ offset : rpc_vm_offset_array_t;
+ start : rpc_vm_offset_array_t;
+ len : rpc_vm_size_array_t;
+ out proxy : mach_port_t);
+
+/* Gets a proxy to the region that ADDRESS belongs to, starting at the region
+ start, with MAX_PROTECTION and LEN limited by the region ones, and returns
+ it in *PORT. */
+routine vm_region_create_proxy(
+ task : task_t;
+ address : vm_address_t;
+ max_protection : vm_prot_t;
+ len : vm_size_t;
+ out proxy : mach_port_t);
diff --git a/include/mach/mach_host.defs b/include/mach/mach_host.defs
new file mode 100644
index 0000000..a8c40af
--- /dev/null
+++ b/include/mach/mach_host.defs
@@ -0,0 +1,388 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/mach_host.defs
+ *
+ * Abstract:
+ * Mach host operations support. Includes processor allocation and
+ * control.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif
+ mach_host 2600;
+
+/*
+ * Basic types
+ */
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+#ifdef MACH_HOST_IMPORTS
+MACH_HOST_IMPORTS
+#endif
+
+/*
+ * Get list of processors on this host.
+ */
+
+routine host_processors(
+ host_priv : host_priv_t;
+ out processor_list : processor_array_t);
+
+skip; /* old yyy_host_info */
+skip; /* old yyy_processor_info */
+
+/*
+ * Start processor.
+ */
+
+routine processor_start(
+ processor : processor_t);
+
+/*
+ * Exit processor -- may not be restartable.
+ */
+
+routine processor_exit(
+ processor : processor_t);
+
+skip; /* old yyy_processor_control */
+
+/*
+ * Get default processor set for host.
+ */
+routine processor_set_default(
+ host : host_t;
+ out default_set : processor_set_name_t);
+
+skip; /* old xxx_processor_set_default_priv */
+
+/*
+ * Create new processor set. Returns real port for manipulations,
+ * and name port for obtaining information.
+ */
+routine processor_set_create(
+ host : host_t;
+ out new_set : processor_set_t;
+ out new_name : processor_set_name_t);
+
+/*
+ * Destroy processor set.
+ */
+routine processor_set_destroy(
+ set : processor_set_t);
+
+skip; /* old yyy_processor_set_info */
+
+/*
+ * Assign processor to processor set.
+ */
+routine processor_assign(
+ processor : processor_t;
+ new_set : processor_set_t;
+ wait : boolean_t);
+
+/*
+ * Get current assignment for processor.
+ */
+
+routine processor_get_assignment(
+ processor : processor_t;
+ out assigned_set : processor_set_name_t);
+
+/*
+ * Assign thread to processor set.
+ */
+routine thread_assign(
+ thread : thread_t;
+ new_set : processor_set_t);
+
+/*
+ * Assign thread to default set.
+ */
+routine thread_assign_default(
+ thread : thread_t);
+
+/*
+ * Get current assignment for thread.
+ */
+routine thread_get_assignment(
+ thread : thread_t;
+ out assigned_set : processor_set_name_t);
+
+/*
+ * Assign task to processor set.
+ */
+routine task_assign(
+ task : task_t;
+ new_set : processor_set_t;
+ assign_threads : boolean_t);
+/*
+ * Assign task to default set.
+ */
+routine task_assign_default(
+ task : task_t;
+ assign_threads : boolean_t);
+
+/*
+ * Get current assignment for task.
+ */
+routine task_get_assignment(
+ task : task_t;
+ out assigned_set : processor_set_name_t);
+
+#if defined(__x86_64__) && !defined(USER32)
+skip;
+#else
+/*
+ * Get string describing current kernel version.
+ * Deprecated, use host_get_kernel_version.
+ */
+routine host_kernel_version(
+ host : host_t;
+ out kernel_version : kernel_version_t);
+#endif
+
+/*
+ * Set priority for thread.
+ */
+routine thread_priority(
+ thread : thread_t;
+ priority : int;
+ set_max : boolean_t);
+
+/*
+ * Set max priority for thread.
+ */
+routine thread_max_priority(
+ thread : thread_t;
+ processor_set : processor_set_t;
+ max_priority : int);
+
+/*
+ * Set task priority.
+ */
+routine task_priority(
+ task : task_t;
+ priority : int;
+ change_threads : boolean_t);
+
+/*
+ * Set max priority for processor_set.
+ */
+routine processor_set_max_priority(
+ processor_set : processor_set_t;
+ max_priority : int;
+ change_threads : boolean_t);
+
+/*
+ * Set policy for thread
+ */
+routine thread_policy(
+ thread : thread_t;
+ policy : int;
+ data : int);
+
+/*
+ * Enable policy for processor set
+ */
+routine processor_set_policy_enable(
+ processor_set : processor_set_t;
+ policy : int);
+
+/*
+ * Disable policy for processor set
+ */
+routine processor_set_policy_disable(
+ processor_set : processor_set_t;
+ policy : int;
+ change_threads : boolean_t);
+/*
+ * List all tasks in processor set.
+ */
+routine processor_set_tasks(
+ processor_set : processor_set_t;
+ out task_list : task_array_t);
+
+/*
+ * List all threads in processor set.
+ */
+routine processor_set_threads(
+ processor_set : processor_set_t;
+ out thread_list : thread_array_t);
+
+/*
+ * List all processor sets on host.
+ */
+routine host_processor_sets(
+ host : host_t;
+ out processor_sets : processor_set_name_array_t);
+
+/*
+ * Get control port for a processor set.
+ */
+routine host_processor_set_priv(
+ host_priv : host_priv_t;
+ set_name : processor_set_name_t;
+ out set : processor_set_t);
+
+routine thread_depress_abort(
+ thread : thread_t);
+
+/*
+ * Set the time on this host.
+ * Only available to privileged users.
+ */
+routine host_set_time(
+ host_priv : host_priv_t;
+ new_time : time_value_t);
+
+/*
+ * Arrange for the time on this host to be gradually changed
+ * by an adjustment value, and return the old value.
+ * Only available to privileged users.
+ */
+routine host_adjust_time(
+ host_priv : host_priv_t;
+ in new_adjustment : time_value_t;
+ out old_adjustment : time_value_t);
+
+/*
+ * Get the time on this host.
+ * Available to all.
+ */
+routine host_get_time(
+ host : host_t;
+ out current_time : time_value_t);
+
+/*
+ * Reboot this host.
+ * Only available to privileged users.
+ */
+routine host_reboot(
+ host_priv : host_priv_t;
+ options : int);
+
+/*
+ * Specify that the range of the virtual address space
+ * of the target task must not cause page faults for
+ * the indicated accesses.
+ *
+ * [ To unwire the pages, specify VM_PROT_NONE. ]
+ */
+routine vm_wire(
+ host : mach_port_t;
+ task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ access : vm_prot_t);
+
+/*
+ * Specify that the target thread must always be able
+ * to run and to allocate memory.
+ */
+routine thread_wire(
+ host_priv : host_priv_t;
+ thread : thread_t;
+ wired : boolean_t);
+
+/*
+ * Return information about this host.
+ */
+
+routine host_info(
+ host : host_t;
+ flavor : int;
+ out host_info_out : host_info_t, CountInOut);
+
+
+/*
+ * Return information about this processor.
+ */
+routine processor_info(
+ processor : processor_t;
+ flavor : int;
+ out host : host_t;
+ out processor_info_out: processor_info_t, CountInOut);
+
+/*
+ * Get information about processor set.
+ */
+routine processor_set_info(
+ set_name : processor_set_name_t;
+ flavor : int;
+ out host : host_t;
+ out info_out : processor_set_info_t, CountInOut);
+
+/*
+ * Do something machine-dependent to processor.
+ */
+routine processor_control(
+ processor : processor_t;
+ processor_cmd : processor_info_t);
+
+/* host_get_boot_info */
+skip;
+
+/*
+ * Get the time on this host.
+ * Available to all.
+ */
+routine host_get_time64(
+ host : host_t;
+ out current_time : time_value64_t);
+
+/*
+ * Set the time on this host.
+ * Only available to privileged users.
+ */
+routine host_set_time64(
+ host : host_t;
+ new_time : time_value64_t);
+
+/*
+ * Arrange for the time on this host to be gradually changed
+ * by an adjustment value, and return the old value.
+ * Only available to privileged users.
+ */
+routine host_adjust_time64(
+ host_priv : host_priv_t;
+ in new_adjustment : time_value64_t;
+ out old_adjustment : time_value64_t);
+
+/*
+ * Get string describing current kernel version.
+ */
+routine host_get_kernel_version(
+ host : host_t;
+ out kernel_version : new_kernel_version_t);
diff --git a/include/mach/mach_param.h b/include/mach/mach_param.h
new file mode 100644
index 0000000..b2aca08
--- /dev/null
+++ b/include/mach/mach_param.h
@@ -0,0 +1,39 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/mach_param.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1986
+ *
+ * Mach system sizing parameters
+ */
+
+#ifndef _MACH_MACH_PARAM_H_
+#define _MACH_MACH_PARAM_H_
+
+#define TASK_PORT_REGISTER_MAX 4 /* Number of "registered" ports */
+
+#endif /* _MACH_MACH_PARAM_H_ */
diff --git a/include/mach/mach_port.defs b/include/mach/mach_port.defs
new file mode 100644
index 0000000..3823bb1
--- /dev/null
+++ b/include/mach/mach_port.defs
@@ -0,0 +1,360 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/mach_port.defs
+ * Author: Rich Draves
+ *
+ * Copyright (c) 1989 Richard P. Draves, Jr.
+ *
+ * Exported kernel calls.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif
+ mach_port 3200;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+/*
+ * Returns the set of port and port set names
+ * to which the target task has access, along with
+ * the type (set or port) for each name.
+ */
+
+routine mach_port_names(
+ task : ipc_space_t;
+ out names : mach_port_name_array_t =
+ ^array[] of mach_port_name_t;
+ out types : mach_port_type_array_t =
+ ^array[] of mach_port_type_t);
+
+/*
+ * Returns the type (set or port) for the port name
+ * within the target task. Also indicates whether
+ * there is a dead-name request for the name.
+ */
+
+routine mach_port_type(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out ptype : mach_port_type_t);
+
+/*
+ * Changes the name by which a port (or port set) is known to
+ * the target task. The new name can't be in use. The
+ * old name becomes available for recycling.
+ */
+
+routine mach_port_rename(
+ task : ipc_space_t;
+ old_name : mach_port_name_t;
+ new_name : mach_port_name_t);
+
+/*
+ * Allocates the specified kind of object, with the given name.
+ * The right must be one of
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ * New port sets are empty. New ports don't have any
+ * send/send-once rights or queued messages. The make-send
+ * count is zero and their queue limit is MACH_PORT_QLIMIT_DEFAULT.
+ * New sets, ports, and dead names have one user reference.
+ */
+
+routine mach_port_allocate_name(
+ task : ipc_space_t;
+ right : mach_port_right_t;
+ name : mach_port_name_t);
+
+/*
+ * Allocates the specified kind of object.
+ * The right must be one of
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ * Like port_allocate_name, but the kernel picks a name.
+ * It can use any name not associated with a right.
+ */
+
+routine mach_port_allocate(
+ task : ipc_space_t;
+ right : mach_port_right_t;
+ out name : mach_port_name_t);
+
+/*
+ * Destroys all rights associated with the name and makes it
+ * available for recycling immediately. The name can be a
+ * port (possibly with multiple user refs), a port set, or
+ * a dead name (again, with multiple user refs).
+ */
+
+routine mach_port_destroy(
+ task : ipc_space_t;
+ name : mach_port_name_t);
+
+/*
+ * Releases one send/send-once/dead-name user ref.
+ * Just like mach_port_mod_refs -1, but deduces the
+ * correct type of right. This allows a user task
+ * to release a ref for a port without worrying
+ * about whether the port has died or not.
+ */
+
+routine mach_port_deallocate(
+ task : ipc_space_t;
+ name : mach_port_name_t);
+
+/*
+ * A port set always has one user ref.
+ * A send-once right always has one user ref.
+ * A dead name always has one or more user refs.
+ * A send right always has one or more user refs.
+ * A receive right always has one user ref.
+ * The right must be one of
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ * MACH_PORT_RIGHT_SEND
+ * MACH_PORT_RIGHT_SEND_ONCE
+ */
+
+routine mach_port_get_refs(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ right : mach_port_right_t;
+ out refs : mach_port_urefs_t);
+
+/*
+ * The delta is a signed change to the task's
+ * user ref count for the right. Only dead names
+ * and send rights can have a positive delta.
+ * The resulting user ref count can't be negative.
+ * If it is zero, the right is deallocated.
+ * If the name isn't a composite right, it becomes
+ * available for recycling. The right must be one of
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ * MACH_PORT_RIGHT_SEND
+ * MACH_PORT_RIGHT_SEND_ONCE
+ */
+
+routine mach_port_mod_refs(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ right : mach_port_right_t;
+ delta : mach_port_delta_t);
+
+skip; /* old old_mach_port_get_receive_status */
+
+/*
+ * Only valid for receive rights.
+ * Sets the queue-limit for the port.
+ * The limit must be
+ * 1 <= qlimit <= MACH_PORT_QLIMIT_MAX
+ */
+
+routine mach_port_set_qlimit(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ qlimit : mach_port_msgcount_t);
+
+/*
+ * Only valid for receive rights.
+ * Sets the make-send count for the port.
+ */
+
+routine mach_port_set_mscount(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ mscount : mach_port_mscount_t);
+
+/*
+ * Only valid for port sets. Returns a list of
+ * the members.
+ */
+
+routine mach_port_get_set_status(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out members : mach_port_name_array_t =
+ ^array[] of mach_port_name_t);
+
+/*
+ * Puts the member port (the task must have receive rights)
+ * into the after port set. (Or removes it from any port set
+ * if after is MACH_PORT_NULL.) If the port is already in
+ * a set, does an atomic move.
+ */
+
+routine mach_port_move_member(
+ task : ipc_space_t;
+ member : mach_port_name_t;
+ after : mach_port_name_t);
+
+/*
+ * Requests a notification from the kernel. The request
+ * must supply the send-once right which is used for
+ * the notification. If a send-once right was previously
+ * registered, it is returned. The msg_id must be one of
+ * MACH_NOTIFY_PORT_DESTROYED (receive rights)
+ * MACH_NOTIFY_DEAD_NAME (send/receive/send-once rights)
+ * MACH_NOTIFY_NO_SENDERS (receive rights)
+ *
+ * The sync value specifies whether a notification should
+ * get sent immediately, if appropriate. The exact meaning
+ * depends on the notification:
+ * MACH_NOTIFY_PORT_DESTROYED: must be zero.
+ * MACH_NOTIFY_DEAD_NAME: if non-zero, then name can be dead,
+ * and the notification gets sent immediately.
+ * If zero, then name can't be dead.
+ * MACH_NOTIFY_NO_SENDERS: the notification gets sent
+ * immediately if the current mscount is greater
+ * than or equal to the sync value and there are no
+ * extant send rights.
+ */
+
+routine mach_port_request_notification(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ id : mach_msg_id_t;
+ sync : mach_port_mscount_t;
+ notify : mach_port_send_once_t;
+ out previous : mach_port_send_once_t);
+
+/*
+ * Inserts the specified rights into the target task,
+ * using the specified name. If inserting send/receive
+ * rights and the task already has send/receive rights
+ * for the port, then the names must agree. In any case,
+ * the task gains a user ref for the port.
+ */
+
+routine mach_port_insert_right(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ poly : mach_port_poly_t);
+
+/*
+ * Returns the specified right for the named port
+ * in the target task, extracting that right from
+ * the target task. The target task loses a user
+ * ref and the name may be available for recycling.
+ * msgt_name must be one of
+ * MACH_MSG_TYPE_MOVE_RECEIVE
+ * MACH_MSG_TYPE_COPY_SEND
+ * MACH_MSG_TYPE_MAKE_SEND
+ * MACH_MSG_TYPE_MOVE_SEND
+ * MACH_MSG_TYPE_MAKE_SEND_ONCE
+ * MACH_MSG_TYPE_MOVE_SEND_ONCE
+ */
+
+routine mach_port_extract_right(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ msgt_name : mach_msg_type_name_t;
+ out poly : mach_port_poly_t);
+
+/*
+ * The task must have receive rights for the named port.
+ * Returns a status structure (see mach/port.h).
+ */
+
+routine mach_port_get_receive_status(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out status : mach_port_status_t);
+
+/*
+ * Only valid for receive rights.
+ * Sets the sequence number for the port.
+ */
+
+routine mach_port_set_seqno(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ seqno : mach_port_seqno_t);
+
+#ifdef MIGRATING_THREADS
+/*
+ * Only valid for receive rights.
+ * Set the user-mode entry info for RPCs coming through this port.
+ * Do this BEFORE attaching an ActPool to this port,
+ * unless you can be sure no RPCs will be coming through it yet.
+ */
+
+routine mach_port_set_rpcinfo(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ rpc_info : thread_info_t); /* XXX */
+
+/*
+ * Only valid for receive rights.
+ * Create a new activation for migrating RPC, and attach it to the port's ActPool.
+ * Create an ActPool for the port if it doesn't already have one.
+ * Supply a stack and receive memory buffer.
+ */
+
+routine mach_port_create_act(
+ task : task_t;
+ name : mach_port_name_t;
+ user_stack : vm_offset_t;
+ user_rbuf : vm_offset_t;
+ user_rbuf_size : vm_size_t;
+ out new_act : thread_t);
+
+#else /* MIGRATING_THREADS */
+
+skip; /* mach_port_set_rpcinfo */
+skip; /* mach_port_create_act */
+
+#endif /* MIGRATING_THREADS */
+
+/*
+ * Only valid for receive rights.
+ * Set the protected payload for this right to the given value.
+ */
+
+routine mach_port_set_protected_payload(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ payload : rpc_uintptr_t);
+
+/*
+ * Only valid for receive rights.
+ * Clear the protected payload for this right.
+ */
+
+routine mach_port_clear_protected_payload(
+ task : ipc_space_t;
+ name : mach_port_name_t);
diff --git a/include/mach/mach_traps.h b/include/mach/mach_traps.h
new file mode 100644
index 0000000..2a87f62
--- /dev/null
+++ b/include/mach/mach_traps.h
@@ -0,0 +1,43 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Definitions of general Mach system traps.
+ *
+ * IPC traps are defined in <mach/message.h>.
+ * Kernel RPC functions are defined in <mach/mach_interface.h>.
+ */
+
+#ifndef _MACH_MACH_TRAPS_H_
+#define _MACH_MACH_TRAPS_H_
+
+#include <mach/port.h>
+
+mach_port_name_t mach_reply_port (void);
+mach_port_name_t mach_thread_self (void);
+mach_port_name_t mach_task_self (void);
+mach_port_name_t mach_host_self (void);
+
+#endif /* _MACH_MACH_TRAPS_H_ */
diff --git a/include/mach/mach_types.defs b/include/mach/mach_types.defs
new file mode 100644
index 0000000..7419601
--- /dev/null
+++ b/include/mach/mach_types.defs
@@ -0,0 +1,299 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994-1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach kernel interface type declarations
+ */
+
+#ifndef _MACH_MACH_TYPES_DEFS_
+#define _MACH_MACH_TYPES_DEFS_
+
+/*
+ * For KernelServer and KernelUser interfaces, Mig will
+ * automagically use ipc_port_t instead of mach_port_t
+ * on the kernel side of the interface. For example,
+ * convert_task_to_port really returns ipc_port_t.
+ * Doing this in Mig saves many explicit conditional
+ * cusertype/cservertype declarations.
+ *
+ * Mig doesn't translate the components of an array.
+ * For example, Mig won't use the thread_t translations
+ * to translate a thread_array_t argument.
+ */
+
+#include <mach/std_types.defs>
+#if KERNEL_SERVER
+#endif /* KERNEL_SERVER */
+
+#ifdef USERPREFIX
+userprefix USERPREFIX;
+#endif
+
+#ifdef SERVERPREFIX
+serverprefix SERVERPREFIX;
+#endif
+
+type mach_port_status_t = struct {
+ mach_port_name_t mps_pset; /* containing port set */
+ mach_port_seqno_t mps_seqno; /* sequence number */
+ mach_port_mscount_t mps_mscount; /* make-send count */
+ mach_port_msgcount_t mps_qlimit; /* queue limit */
+ mach_port_msgcount_t mps_msgcount; /* number in the queue */
+ mach_port_rights_t mps_sorights; /* how many send-once rights */
+ boolean_t mps_srights; /* do send rights exist? */
+ boolean_t mps_pdrequest; /* port-deleted requested? */
+ boolean_t mps_nsrequest; /* no-senders requested? */
+};
+
+type task_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: task_t convert_port_to_task(mach_port_t)
+ outtran: mach_port_t convert_task_to_port(task_t)
+ destructor: task_deallocate(task_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+#ifdef MIGRATING_THREADS
+#if KERNEL
+/* What the conventional external Mach interfaces see as a thread_t
+ is really an act_t within the kernel. */
+#define thread_t act_t
+#define convert_port_to_thread convert_port_to_act
+#define convert_thread_to_port convert_act_to_port
+#define thread_deallocate act_deallocate
+#endif /* KERNEL */
+#endif /* MIGRATING_THREADS */
+
+type thread_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: thread_t convert_port_to_thread(mach_port_t)
+ outtran: mach_port_t convert_thread_to_port(thread_t)
+ destructor: thread_deallocate(thread_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type thread_state_t = array[*:1024] of natural_t;
+
+type task_array_t = ^array[] of task_t;
+type thread_array_t = ^array[] of thread_t;
+
+type vm_task_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: vm_map_t convert_port_to_map(mach_port_t)
+ destructor: vm_map_deallocate(vm_map_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type ipc_space_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: ipc_space_t convert_port_to_space(mach_port_t)
+ destructor: space_deallocate(ipc_space_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+#if defined(KERNEL) && defined(USER32)
+type rpc_uintptr_t = uint32_t;
+type rpc_vm_size_t = uint32_t;
+#else /* KERNEL and USER32 */
+type rpc_uintptr_t = uintptr_t;
+type rpc_vm_size_t = uintptr_t;
+#endif /* KERNEL_SERVER and USER32 */
+
+type rpc_vm_offset_t = rpc_vm_size_t;
+
+type vm_address_t = rpc_vm_size_t
+#if defined(KERNEL_SERVER)
+ intran: vm_address_t convert_vm_from_user(rpc_vm_address_t)
+ outtran: rpc_vm_address_t convert_vm_to_user(vm_address_t)
+#elif defined(KERNEL_USER)
+ ctype: rpc_vm_address_t
+#endif
+ ;
+type vm_offset_t = rpc_vm_offset_t
+#if defined(KERNEL_SERVER)
+ intran: vm_offset_t convert_vm_from_user(rpc_vm_offset_t)
+ outtran: rpc_vm_offset_t convert_vm_to_user(vm_offset_t)
+#elif defined(KERNEL_USER)
+ ctype: rpc_vm_offset_t
+#endif
+ ;
+type vm_size_t = rpc_vm_size_t
+#if defined(KERNEL_SERVER)
+ intran: vm_size_t convert_vm_from_user(rpc_vm_size_t)
+ outtran: rpc_vm_size_t convert_vm_to_user(vm_size_t)
+#elif defined(KERNEL_USER)
+ ctype: rpc_vm_size_t
+#endif
+;
+type vm_prot_t = int;
+type vm_inherit_t = int;
+type vm_statistics_data_t = struct[13] of integer_t;
+type vm_machine_attribute_t = int;
+type vm_machine_attribute_val_t = int;
+type vm_sync_t = int;
+
+type thread_info_t = array[*:1024] of integer_t;
+
+type task_info_t = array[*:1024] of integer_t;
+
+type memory_object_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: ipc_port_t null_conversion(mach_port_t)
+#else /* KERNEL_SERVER */
+#ifdef MEMORY_OBJECT_INTRAN
+ intran: MEMORY_OBJECT_INTRAN
+#endif
+#ifdef MEMORY_OBJECT_INTRAN_PAYLOAD
+ intranpayload: MEMORY_OBJECT_INTRAN_PAYLOAD
+#endif
+#ifdef MEMORY_OBJECT_OUTTRAN
+ outtran: MEMORY_OBJECT_OUTTRAN
+#endif
+#ifdef MEMORY_OBJECT_DESTRUCTOR
+ destructor: MEMORY_OBJECT_DESTRUCTOR
+#endif
+#endif /* KERNEL_SERVER */
+ ;
+
+type memory_object_control_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: vm_object_t vm_object_lookup(mach_port_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type memory_object_name_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: vm_object_t vm_object_lookup_name(mach_port_t)
+ destructor: vm_object_deallocate(vm_object_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type memory_object_copy_strategy_t = int;
+type memory_object_return_t = int;
+
+type host_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: host_t convert_port_to_host(mach_port_t)
+ outtran: mach_port_t convert_host_to_port(host_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type host_priv_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: host_t convert_port_to_host_priv(mach_port_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type host_info_t = array[*:1024] of integer_t;
+
+type processor_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: processor_t convert_port_to_processor(mach_port_t)
+ outtran: mach_port_t convert_processor_to_port(processor_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type processor_array_t = ^array[] of processor_t;
+type processor_info_t = array[*:1024] of integer_t;
+
+type processor_set_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: processor_set_t convert_port_to_pset(mach_port_t)
+ outtran: mach_port_t convert_pset_to_port(processor_set_t)
+ destructor: pset_deallocate(processor_set_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type processor_set_array_t = ^array[] of processor_set_t;
+
+type processor_set_name_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: processor_set_t convert_port_to_pset_name(mach_port_t)
+ outtran: mach_port_t convert_pset_name_to_port(processor_set_t)
+ destructor: pset_deallocate(processor_set_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type processor_set_name_array_t = ^array[] of processor_set_name_t;
+
+type processor_set_info_t = array[*:1024] of integer_t;
+
+type kernel_version_t = (MACH_MSG_TYPE_STRING, 512*8);
+type new_kernel_version_t = c_string[512]
+ ctype: kernel_version_t;
+
+type rpc_time_value_t = struct {
+ rpc_long_integer_t seconds;
+ integer_t microseconds;
+};
+type time_value_t = rpc_time_value_t
+#if defined(KERNEL_SERVER)
+ intran: time_value_t convert_time_value_from_user(rpc_time_value_t)
+ outtran: rpc_time_value_t convert_time_value_to_user(time_value_t)
+#elif defined(KERNEL_USER)
+ ctype: rpc_time_value_t
+#endif
+ ;
+
+type time_value64_t = struct {
+ int64_t seconds;
+ int64_t nanoseconds;
+};
+
+type emulation_vector_t = ^array[] of vm_offset_t;
+
+type rpc_signature_info_t = array[*:1024] of int;
+
+#if KERNEL_SERVER
+simport <kern/ipc_kobject.h>; /* for null conversion */
+simport <kern/ipc_tt.h>; /* for task/thread conversion */
+simport <kern/ipc_host.h>; /* for host/processor/pset conversions */
+simport <kern/task.h>; /* for task_t */
+simport <kern/thread.h>; /* for thread_t */
+simport <kern/host.h>; /* for host_t */
+simport <kern/processor.h>; /* for processor_t, processor_set_t */
+simport <vm/vm_object.h>; /* for vm_object_t */
+simport <vm/vm_map.h>; /* for vm_map_t */
+simport <ipc/ipc_space.h>; /* for ipc_space_t */
+#endif /* KERNEL_SERVER */
+
+import <mach/mach_types.h>;
+
+#endif /* _MACH_MACH_TYPES_DEFS_ */
diff --git a/include/mach/mach_types.h b/include/mach/mach_types.h
new file mode 100644
index 0000000..5ecd686
--- /dev/null
+++ b/include/mach/mach_types.h
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/mach_types.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1986
+ *
+ * Mach external interface definitions.
+ *
+ */
+
+#ifndef _MACH_MACH_TYPES_H_
+#define _MACH_MACH_TYPES_H_
+
+#include <mach/host_info.h>
+#include <mach/machine.h>
+#include <mach/machine/vm_types.h>
+#include <mach/memory_object.h>
+#include <mach/pc_sample.h>
+#include <mach/port.h>
+#include <mach/processor_info.h>
+#include <mach/task_info.h>
+#include <mach/task_special_ports.h>
+#include <mach/thread_info.h>
+#include <mach/thread_special_ports.h>
+#include <mach/thread_status.h>
+#include <mach/time_value.h>
+#include <mach/vm_attributes.h>
+#include <mach/vm_inherit.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_statistics.h>
+#include <mach/vm_cache_statistics.h>
+#include <mach/vm_wire.h>
+#include <mach/vm_sync.h>
+
+#ifdef MACH_KERNEL
+
+typedef struct task *task_t;
+typedef struct thread *thread_t;
+typedef struct processor *processor_t;
+typedef struct processor_set *processor_set_t;
+
+#else /* MACH_KERNEL */
+typedef mach_port_t task_t;
+typedef task_t *task_array_t;
+typedef task_t vm_task_t;
+typedef task_t ipc_space_t;
+typedef mach_port_t thread_t;
+typedef thread_t *thread_array_t;
+typedef mach_port_t host_t;
+typedef mach_port_t host_priv_t;
+typedef mach_port_t processor_t;
+typedef mach_port_t *processor_array_t;
+typedef mach_port_t processor_set_t;
+typedef mach_port_t processor_set_name_t;
+typedef mach_port_t *processor_set_array_t;
+typedef mach_port_t *processor_set_name_array_t;
+typedef vm_offset_t *emulation_vector_t;
+#endif /* MACH_KERNEL */
+
+/*
+ * Backwards compatibility, for those programs written
+ * before mach/{std,mach}_types.{defs,h} were set up.
+ */
+#include <mach/std_types.h>
+
+#endif /* _MACH_MACH_TYPES_H_ */
diff --git a/include/mach/machine.h b/include/mach/machine.h
new file mode 100644
index 0000000..9a176e8
--- /dev/null
+++ b/include/mach/machine.h
@@ -0,0 +1,268 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* File: machine.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Machine independent machine abstraction.
+ */
+
+#ifndef _MACH_MACHINE_H_
+#define _MACH_MACHINE_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/boolean.h>
+
+/*
+ * For each host, there is a maximum possible number of
+ * cpus that may be available in the system. This is the
+ * compile-time constant NCPUS, which is defined in cpus.h.
+ *
+ * In addition, there is a machine_slot specifier for each
+ * possible cpu in the system.
+ */
+
+struct machine_info {
+ integer_t major_version; /* kernel major version id */
+ integer_t minor_version; /* kernel minor version id */
+ integer_t max_cpus; /* max number of cpus compiled */
+ integer_t avail_cpus; /* number actually available */
+ vm_size_t memory_size; /* size of memory in bytes */
+};
+
+typedef struct machine_info *machine_info_t;
+typedef struct machine_info machine_info_data_t; /* bogus */
+
+typedef integer_t cpu_type_t;
+typedef integer_t cpu_subtype_t;
+
+#define CPU_STATE_MAX 3
+
+#define CPU_STATE_USER 0
+#define CPU_STATE_SYSTEM 1
+#define CPU_STATE_IDLE 2
+
+struct machine_slot {
+/*boolean_t*/integer_t is_cpu; /* is there a cpu in this slot? */
+ cpu_type_t cpu_type; /* type of cpu */
+ cpu_subtype_t cpu_subtype; /* subtype of cpu */
+/*boolean_t*/integer_t running; /* is cpu running */
+ integer_t cpu_ticks[CPU_STATE_MAX];
+ integer_t clock_freq; /* clock interrupt frequency */
+};
+
+typedef struct machine_slot *machine_slot_t;
+typedef struct machine_slot machine_slot_data_t; /* bogus */
+
+#ifdef MACH_KERNEL
+extern struct machine_info machine_info;
+extern struct machine_slot machine_slot[NCPUS];
+#endif /* MACH_KERNEL */
+
+/*
+ * Machine types known by all.
+ *
+ * When adding new types & subtypes, please also update slot_name.c
+ * in the libmach sources.
+ */
+
+#define CPU_TYPE_VAX ((cpu_type_t) 1)
+#define CPU_TYPE_ROMP ((cpu_type_t) 2)
+#define CPU_TYPE_MC68020 ((cpu_type_t) 3)
+#define CPU_TYPE_NS32032 ((cpu_type_t) 4)
+#define CPU_TYPE_NS32332 ((cpu_type_t) 5)
+#define CPU_TYPE_NS32532 ((cpu_type_t) 6)
+#define CPU_TYPE_I386 ((cpu_type_t) 7)
+#define CPU_TYPE_MIPS ((cpu_type_t) 8)
+#define CPU_TYPE_MC68030 ((cpu_type_t) 9)
+#define CPU_TYPE_MC68040 ((cpu_type_t) 10)
+#define CPU_TYPE_HPPA ((cpu_type_t) 11)
+#define CPU_TYPE_ARM ((cpu_type_t) 12)
+#define CPU_TYPE_MC88000 ((cpu_type_t) 13)
+#define CPU_TYPE_SPARC ((cpu_type_t) 14)
+#define CPU_TYPE_I860 ((cpu_type_t) 15)
+#define CPU_TYPE_ALPHA ((cpu_type_t) 16)
+#define CPU_TYPE_I486 ((cpu_type_t) 17)
+#define CPU_TYPE_PENTIUM ((cpu_type_t) 18)
+#define CPU_TYPE_PENTIUMPRO ((cpu_type_t) 19)
+#define CPU_TYPE_POWERPC ((cpu_type_t) 20)
+#define CPU_TYPE_X86_64 ((cpu_type_t) 21)
+
+/*
+ * Machine subtypes (these are defined here, instead of in a machine
+ * dependent directory, so that any program can get all definitions
+ * regardless of where is it compiled).
+ */
+
+/*
+ * VAX subtypes (these do *not* necessarily conform to the actual cpu
+ * ID assigned by DEC available via the SID register).
+ */
+
+#define CPU_SUBTYPE_VAX780 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_VAX785 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_VAX750 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_VAX730 ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_UVAXI ((cpu_subtype_t) 5)
+#define CPU_SUBTYPE_UVAXII ((cpu_subtype_t) 6)
+#define CPU_SUBTYPE_VAX8200 ((cpu_subtype_t) 7)
+#define CPU_SUBTYPE_VAX8500 ((cpu_subtype_t) 8)
+#define CPU_SUBTYPE_VAX8600 ((cpu_subtype_t) 9)
+#define CPU_SUBTYPE_VAX8650 ((cpu_subtype_t) 10)
+#define CPU_SUBTYPE_VAX8800 ((cpu_subtype_t) 11)
+#define CPU_SUBTYPE_UVAXIII ((cpu_subtype_t) 12)
+
+/*
+ * ROMP subtypes.
+ */
+
+#define CPU_SUBTYPE_RT_PC ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_RT_APC ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_RT_135 ((cpu_subtype_t) 3)
+
+/*
+ * 68020 subtypes.
+ */
+
+#define CPU_SUBTYPE_SUN3_50 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_SUN3_160 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_SUN3_260 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_SUN3_110 ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_SUN3_60 ((cpu_subtype_t) 5)
+
+#define CPU_SUBTYPE_HP_320 ((cpu_subtype_t) 6)
+ /* 16.67 Mhz HP 300 series, custom MMU [HP 320] */
+#define CPU_SUBTYPE_HP_330 ((cpu_subtype_t) 7)
+ /* 16.67 Mhz HP 300 series, MC68851 MMU [HP 318,319,330,349] */
+#define CPU_SUBTYPE_HP_350 ((cpu_subtype_t) 8)
+ /* 25.00 Mhz HP 300 series, custom MMU [HP 350] */
+
+/*
+ * 32032/32332/32532 subtypes.
+ */
+
+#define CPU_SUBTYPE_MMAX_DPC ((cpu_subtype_t) 1) /* 032 CPU */
+#define CPU_SUBTYPE_SQT ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_MMAX_APC_FPU ((cpu_subtype_t) 3) /* 32081 FPU */
+#define CPU_SUBTYPE_MMAX_APC_FPA ((cpu_subtype_t) 4) /* Weitek FPA */
+#define CPU_SUBTYPE_MMAX_XPC ((cpu_subtype_t) 5) /* 532 CPU */
+#define CPU_SUBTYPE_PC532 ((cpu_subtype_t) 6) /* pc532 board */
+
+/*
+ * 80386/80486 subtypes.
+ */
+
+#define CPU_SUBTYPE_AT386 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_EXL ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_iPSC386 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_SYMMETRY ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_PS2 ((cpu_subtype_t) 5) /* PS/2 w/ MCA */
+
+/*
+ * Mips subtypes.
+ */
+
+#define CPU_SUBTYPE_MIPS_R2300 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_MIPS_R2600 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_MIPS_R2800 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_MIPS_R2000a ((cpu_subtype_t) 4) /* pmax */
+#define CPU_SUBTYPE_MIPS_R2000 ((cpu_subtype_t) 5)
+#define CPU_SUBTYPE_MIPS_R3000a ((cpu_subtype_t) 6) /* 3max */
+#define CPU_SUBTYPE_MIPS_R3000 ((cpu_subtype_t) 7)
+
+/*
+ * MC68030 subtypes.
+ */
+
+#define CPU_SUBTYPE_NeXT ((cpu_subtype_t) 1)
+ /* NeXt thinks MC68030 is 6 rather than 9 */
+#define CPU_SUBTYPE_HP_340 ((cpu_subtype_t) 2)
+ /* 16.67 Mhz HP 300 series [HP 332,340] */
+#define CPU_SUBTYPE_HP_360 ((cpu_subtype_t) 3)
+ /* 25.00 Mhz HP 300 series [HP 360] */
+#define CPU_SUBTYPE_HP_370 ((cpu_subtype_t) 4)
+ /* 33.33 Mhz HP 300 series [HP 370] */
+
+/*
+ * HPPA subtypes.
+ */
+
+#define CPU_SUBTYPE_HPPA_825 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_HPPA_835 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_HPPA_840 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_HPPA_850 ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_HPPA_855 ((cpu_subtype_t) 5)
+
+/*
+ * ARM subtypes.
+ */
+
+#define CPU_SUBTYPE_ARM_A500_ARCH ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_ARM_A500 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_ARM_A440 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_ARM_M4 ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_ARM_A680 ((cpu_subtype_t) 5)
+
+/*
+ * MC88000 subtypes.
+ */
+
+#define CPU_SUBTYPE_MMAX_JPC ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_LUNA88K ((cpu_subtype_t) 2)
+
+/*
+ * Sparc subtypes.
+ */
+
+#define CPU_SUBTYPE_SUN4_260 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_SUN4_110 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_SUN4_330 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_SUN4C_60 ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_SUN4C_65 ((cpu_subtype_t) 5)
+#define CPU_SUBTYPE_SUN4C_20 ((cpu_subtype_t) 6)
+#define CPU_SUBTYPE_SUN4C_30 ((cpu_subtype_t) 7)
+#define CPU_SUBTYPE_SUN4C_40 ((cpu_subtype_t) 8)
+#define CPU_SUBTYPE_SUN4C_50 ((cpu_subtype_t) 9)
+#define CPU_SUBTYPE_SUN4C_75 ((cpu_subtype_t) 10)
+
+/*
+ * i860 subtypes.
+ */
+
+#define CPU_SUBTYPE_iPSC860 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_OKI860 ((cpu_subtype_t) 2)
+
+/*
+ * Alpha subtypes.
+ */
+
+#define CPU_SUBTYPE_ALPHA_EV3 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_ALPHA_EV4 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_ALPHA_ISP ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_ALPHA_21064 ((cpu_subtype_t) 4)
+
+
+#endif /* _MACH_MACHINE_H_ */
diff --git a/include/mach/macro_help.h b/include/mach/macro_help.h
new file mode 100644
index 0000000..f041e40
--- /dev/null
+++ b/include/mach/macro_help.h
@@ -0,0 +1,18 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1988 Carnegie-Mellon University
+ * All rights reserved. The CMU software License Agreement specifies
+ * the terms and conditions for use and redistribution.
+ */
+
+#ifndef _MACRO_HELP_H_
+#define _MACRO_HELP_H_ 1
+
+#define MACRO_BEGIN do {
+#define MACRO_END } while (0)
+
+#define MACRO_RETURN if (1) return
+
+#endif /* _MACRO_HELP_H_ */
+
+
diff --git a/include/mach/memory_object.defs b/include/mach/memory_object.defs
new file mode 100644
index 0000000..4afd67b
--- /dev/null
+++ b/include/mach/memory_object.defs
@@ -0,0 +1,333 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/memory_object.defs
+ *
+ * Abstract:
+ * Basic Mach external memory management interface declaration.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+ memory_object 2200;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+#ifdef MEMORY_OBJECT_IMPORTS
+MEMORY_OBJECT_IMPORTS
+#endif
+
+#if SEQNOS
+serverprefix seqnos_;
+serverdemux seqnos_memory_object_server;
+#endif /* SEQNOS */
+
+/*
+ * Initialize the specified memory object, providing
+ * a reqeust port on which control calls can be made, and
+ * a name port that identifies this object to callers of
+ * vm_regions.
+ * [To allow the mapping of this object to be used, the
+ * memory manager must call memory_object_ready or
+ * memory_object_change_attributes. To reject all mappings of
+ * this object, the memory manager may use
+ * memory_object_destroy.]
+ */
+simpleroutine memory_object_init(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ memory_object_name : memory_object_name_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ memory_object_page_size : vm_size_t);
+
+/*
+ * Indicates that the specified memory object is no longer mapped
+ * (or cached -- see memory_object_ready or
+ * memory_object_change_attributes), and that further mappings
+ * will cause another memory_object_init call to be made. No
+ * further calls will be made on the memory object by this
+ * kernel.
+ *
+ * [All rights to the control and name ports are included
+ * in this call. The memory manager should use port_deallocate
+ * to release them once they are no longer needed.]
+ */
+simpleroutine memory_object_terminate(
+ memory_object : memory_object_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t
+#ifdef MEMORY_OBJECT_INTRAN
+ intran: MEMORY_OBJECT_INTRAN
+#endif
+#ifdef MEMORY_OBJECT_INTRAN_PAYLOAD
+ intranpayload:
+ MEMORY_OBJECT_INTRAN_PAYLOAD
+#endif
+#ifdef MEMORY_OBJECT_DESTRUCTOR
+ destructor: MEMORY_OBJECT_DESTRUCTOR
+#endif
+ ;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t
+#if KERNEL_USER
+ /* for compatibility with Mach 2.5 kernels */
+ , dealloc
+#endif /* KERNEL_USER */
+ ;
+ memory_object_name : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t
+#if KERNEL_USER
+ /* for compatibility with Mach 2.5 kernels */
+ , dealloc
+#endif /* KERNEL_USER */
+ );
+
+/*
+ * Indicates that a copy has been made of the specified range of
+ * the given original memory object. The kernel will use the new
+ * memory object, control and name ports to refer to the new copy
+ * (once the memory manager has asserted its "ready" attribute).
+ *
+ * Cached pages from the original memory object at the time of
+ * the copy operation are handled as follows:
+ * Readable pages may be silently copied to the new
+ * memory object (with all access permissions).
+ * Pages not copied are locked to prevent write access.
+ *
+ * This call includes only the new memory object itself; a
+ * memory_object_init call will be made on the new memory
+ * object after the actions above are completed.
+ *
+ * The new memory object is *temporary*, meaning that the
+ * memory manager should not change its contents or allow
+ * the memory object to be mapped in another client. The
+ * memory manager may use the memory_object_data_unavailable
+ * call to indicate that the appropriate page of the original
+ * memory object may be used to fulfill a data request.
+ *
+ * [Reply should be memory_object_ready or
+ * memory_object_change_attributes on the new memory object control
+ * port to indicate readiness.]
+ */
+simpleroutine memory_object_copy(
+ old_memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ old_memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ length : vm_size_t;
+ new_memory_object : memory_object_t =
+ MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t
+#if KERNEL_USER
+ /* for compatibility with Mach 2.5 kernels */
+ , dealloc
+#endif /* KERNEL_USER */
+ );
+
+/*
+ * Request data from this memory object. At least
+ * the specified data should be returned with at
+ * least the specified access permitted.
+ *
+ * [Reply should be memory_object_data_supply.]
+ */
+simpleroutine memory_object_data_request(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ length : vm_size_t;
+ desired_access : vm_prot_t);
+
+/*
+ * Request that the specified portion of this
+ * memory object be unlocked to allow the specified
+ * forms of access; the kernel already has the data.
+ *
+ * [Reply should be memory_object_lock_request.]
+ */
+simpleroutine memory_object_data_unlock(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ length : vm_size_t;
+ desired_access : vm_prot_t);
+
+skip; /* was: memory_object_data_write */
+
+/*
+ * Indicate that a previous memory_object_lock_request has been
+ * completed. Note that this call is made on whatever
+ * port is specified in the memory_object_lock_request; that port
+ * need not be the memory object port itself.
+ *
+ * [No reply expected.]
+ */
+simpleroutine memory_object_lock_completed(
+ memory_object : memory_object_t =
+ polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
+ ctype: mach_port_t
+#ifdef MEMORY_OBJECT_INTRAN
+ intran: MEMORY_OBJECT_INTRAN
+#endif
+#ifdef MEMORY_OBJECT_INTRAN_PAYLOAD
+ intranpayload: MEMORY_OBJECT_INTRAN_PAYLOAD
+#endif
+#ifdef MEMORY_OBJECT_DESTRUCTOR
+ destructor: MEMORY_OBJECT_DESTRUCTOR
+#endif
+ ;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ length : vm_size_t);
+
+/*
+ * Indicate that a previous memory_object_data_supply has been
+ * completed. Note that this call is made on whatever
+ * port is specified in the memory_object_data_supply; that port
+ * need not be the memory object port itself.
+ *
+ * The result parameter indicates what happened during the supply.
+ * If it is not KERN_SUCCESS, then error_offset identifies the
+ * first offset at which a problem occurred. The pagein operation
+ * stopped at this point. Note that the only failures reported
+ * by this mechanism are KERN_MEMORY_PRESENT. All other failures
+ * (invalid argument, error on pagein of supplied data in manager's
+ * address space) cause the entire operation to fail.
+ *
+ * XXX Check what actually happens in latter case!
+ *
+ * [No reply expected.]
+ */
+simpleroutine memory_object_supply_completed(
+ memory_object : memory_object_t =
+ polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
+ ctype: mach_port_t
+#ifdef MEMORY_OBJECT_INTRAN
+ intran: MEMORY_OBJECT_INTRAN
+#endif
+#ifdef MEMORY_OBJECT_INTRAN_PAYLOAD
+ intranpayload: MEMORY_OBJECT_INTRAN_PAYLOAD
+#endif
+#ifdef MEMORY_OBJECT_DESTRUCTOR
+ destructor: MEMORY_OBJECT_DESTRUCTOR
+#endif
+ ;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ length : vm_size_t;
+ result : kern_return_t;
+ error_offset : vm_offset_t);
+
+/*
+ * Return data to manager. This call indicates whether the
+ * returned data is dirty and whether the kernel kept a copy.
+ * Precious data remains precious if the kernel keeps a copy.
+ * The indication that the kernel kept a copy is only a hint if
+ * the data is not precious; the cleaned copy may be discarded
+ * without further notifying the manager.
+ *
+ * [Reply should be vm_deallocate to release the data.]
+ */
+simpleroutine memory_object_data_return(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ data : pointer_t;
+ dirty : boolean_t;
+ kernel_copy : boolean_t);
+
+/*
+ * XXX Warning: This routine does NOT contain a memory_object_control_t
+ * XXX because the memory_object_change_attributes call may cause
+ * XXX memory object termination (by uncaching the object). This would
+ * XXX yield an invalid port.
+ */
+
+simpleroutine memory_object_change_completed(
+ memory_object : memory_object_t =
+ polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
+ ctype: mach_port_t
+#ifdef MEMORY_OBJECT_INTRAN
+ intran: MEMORY_OBJECT_INTRAN
+#endif
+#ifdef MEMORY_OBJECT_INTRAN_PAYLOAD
+ intranpayload: MEMORY_OBJECT_INTRAN_PAYLOAD
+#endif
+#ifdef MEMORY_OBJECT_DESTRUCTOR
+ destructor: MEMORY_OBJECT_DESTRUCTOR
+#endif
+ ;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ may_cache : boolean_t;
+ copy_strategy : memory_object_copy_strategy_t);
diff --git a/include/mach/memory_object.h b/include/mach/memory_object.h
new file mode 100644
index 0000000..7e0c374
--- /dev/null
+++ b/include/mach/memory_object.h
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: memory_object.h
+ * Author: Michael Wayne Young
+ *
+ * External memory management interface definition.
+ */
+
+#ifndef _MACH_MEMORY_OBJECT_H_
+#define _MACH_MEMORY_OBJECT_H_
+
+/*
+ * User-visible types used in the external memory
+ * management interface:
+ */
+
+#include <mach/port.h>
+
+#ifdef MACH_KERNEL
+#include <ipc/ipc_types.h>
+typedef ipc_port_t memory_object_t;
+#else
+typedef mach_port_t memory_object_t;
+#endif
+ /* Represents a memory object ... */
+ /* Used by user programs to specify */
+ /* the object to map; used by the */
+ /* kernel to retrieve or store data */
+
+typedef memory_object_t *memory_object_array_t;
+
+typedef mach_port_t memory_object_control_t;
+ /* Provided to a memory manager; ... */
+ /* used to control a memory object */
+
+typedef mach_port_t memory_object_name_t;
+ /* Used to describe the memory ... */
+ /* object in vm_regions() calls */
+
+typedef int memory_object_copy_strategy_t;
+ /* How memory manager handles copy: */
+#define MEMORY_OBJECT_COPY_NONE 0
+ /* ... No special support */
+#define MEMORY_OBJECT_COPY_CALL 1
+ /* ... Make call on memory manager */
+#define MEMORY_OBJECT_COPY_DELAY 2
+ /* ... Memory manager doesn't ... */
+ /* change data externally. */
+#define MEMORY_OBJECT_COPY_TEMPORARY 3
+ /* ... Memory manager doesn't ... */
+ /* change data externally, and */
+ /* doesn't need to see changes. */
+
+typedef int memory_object_return_t;
+ /* Which pages to return to manager
+ this time (lock_request) */
+#define MEMORY_OBJECT_RETURN_NONE 0
+ /* ... don't return any. */
+#define MEMORY_OBJECT_RETURN_DIRTY 1
+ /* ... only dirty pages. */
+#define MEMORY_OBJECT_RETURN_ALL 2
+ /* ... dirty and precious pages. */
+
+#define MEMORY_OBJECT_NULL MACH_PORT_NULL
+
+#endif /* _MACH_MEMORY_OBJECT_H_ */
diff --git a/include/mach/memory_object_default.defs b/include/mach/memory_object_default.defs
new file mode 100644
index 0000000..e62f14d
--- /dev/null
+++ b/include/mach/memory_object_default.defs
@@ -0,0 +1,118 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/memory_object_default.defs
+ *
+ * Abstract:
+ * Mach external memory management interface declaration; subset
+ * that is applicable to managers of kernel-created memory objects.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+ memory_object_default 2250;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+#ifdef MEMORY_OBJECT_IMPORTS
+MEMORY_OBJECT_IMPORTS
+#endif
+
+#if SEQNOS
+serverprefix seqnos_;
+serverdemux seqnos_memory_object_default_server;
+#endif /* SEQNOS */
+
+/*
+ * Pass on responsibility for the new kernel-created memory
+ * object. The port on which this request is that port
+ * (possibly a memory object itself) registered as the "default
+ * pager". Other arguments are as described for memory_object_init.
+ * [No reply required.]
+ */
+simpleroutine memory_object_create(
+ old_memory_object : memory_object_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ new_memory_object : memory_object_t =
+ MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t
+#if KERNEL_USER
+ /* for compatibility with Mach 2.5 kernels */
+ , dealloc
+#endif /* KERNEL_USER */
+ ;
+ new_object_size : vm_size_t;
+ new_control_port : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ new_name : memory_object_name_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ new_page_size : vm_size_t);
+
+/*
+ * Provide initial data contents for this region of
+ * the memory object. If data has already been written
+ * to the object, this value must be discarded; otherwise,
+ * this call acts identically to memory_object_data_return.
+ */
+simpleroutine memory_object_data_initialize(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ memory_control_port : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ data : pointer_t);
+
+#if 0
+/*
+ * Indicate that the specified range of data in this memory object
+ * will not be requested again until it is reinitialized with
+ * memory_object_data_return or memory_object_data_initialize.
+ */
+simpleroutine memory_object_data_terminate(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif /* SEQNOS */
+ memory_control_port : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ size : vm_size_t);
+#else /* 0 */
+skip; /* memory_object_data_terminate */
+#endif /* 0 */
diff --git a/include/mach/message.h b/include/mach/message.h
new file mode 100644
index 0000000..9790ef9
--- /dev/null
+++ b/include/mach/message.h
@@ -0,0 +1,540 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/message.h
+ *
+ * Mach IPC message and primitive function definitions.
+ */
+
+#ifndef _MACH_MESSAGE_H_
+#define _MACH_MESSAGE_H_
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+
+
+/*
+ * The timeout mechanism uses mach_msg_timeout_t values,
+ * passed by value. The timeout units are milliseconds.
+ * It is controlled with the MACH_SEND_TIMEOUT
+ * and MACH_RCV_TIMEOUT options.
+ */
+
+typedef natural_t mach_msg_timeout_t;
+
+/*
+ * The value to be used when there is no timeout.
+ * (No MACH_SEND_TIMEOUT/MACH_RCV_TIMEOUT option.)
+ */
+
+#define MACH_MSG_TIMEOUT_NONE ((mach_msg_timeout_t) 0)
+
+/*
+ * The kernel uses MACH_MSGH_BITS_COMPLEX as a hint. It it isn't on, it
+ * assumes the body of the message doesn't contain port rights or OOL
+ * data. The field is set in received messages. A user task must
+ * use caution in interpreting the body of a message if the bit isn't
+ * on, because the mach_msg_type's in the body might "lie" about the
+ * contents. If the bit isn't on, but the mach_msg_types
+ * in the body specify rights or OOL data, the behaviour is undefined.
+ * (Ie, an error may or may not be produced.)
+ *
+ * The value of MACH_MSGH_BITS_REMOTE determines the interpretation
+ * of the msgh_remote_port field. It is handled like a msgt_name.
+ *
+ * The value of MACH_MSGH_BITS_LOCAL determines the interpretation
+ * of the msgh_local_port field. It is handled like a msgt_name.
+ *
+ * MACH_MSGH_BITS() combines two MACH_MSG_TYPE_* values, for the remote
+ * and local fields, into a single value suitable for msgh_bits.
+ *
+ * MACH_MSGH_BITS_COMPLEX_PORTS, MACH_MSGH_BITS_COMPLEX_DATA, and
+ * MACH_MSGH_BITS_CIRCULAR should be zero; they are used internally.
+ *
+ * The unused bits should be zero.
+ */
+
+#define MACH_MSGH_BITS_ZERO 0x00000000
+#define MACH_MSGH_BITS_REMOTE_MASK 0x000000ff
+#define MACH_MSGH_BITS_LOCAL_MASK 0x0000ff00
+#define MACH_MSGH_BITS_COMPLEX 0x80000000U
+#define MACH_MSGH_BITS_CIRCULAR 0x40000000 /* internal use only */
+#define MACH_MSGH_BITS_COMPLEX_PORTS 0x20000000 /* internal use only */
+#define MACH_MSGH_BITS_COMPLEX_DATA 0x10000000 /* internal use only */
+#define MACH_MSGH_BITS_MIGRATED 0x08000000 /* internal use only */
+#define MACH_MSGH_BITS_UNUSED 0x07ff0000
+
+#define MACH_MSGH_BITS_PORTS_MASK \
+ (MACH_MSGH_BITS_REMOTE_MASK|MACH_MSGH_BITS_LOCAL_MASK)
+
+#define MACH_MSGH_BITS(remote, local) \
+ ((remote) | ((local) << 8))
+#define MACH_MSGH_BITS_REMOTE(bits) \
+ ((bits) & MACH_MSGH_BITS_REMOTE_MASK)
+#define MACH_MSGH_BITS_LOCAL(bits) \
+ (((bits) & MACH_MSGH_BITS_LOCAL_MASK) >> 8)
+#define MACH_MSGH_BITS_PORTS(bits) \
+ ((bits) & MACH_MSGH_BITS_PORTS_MASK)
+#define MACH_MSGH_BITS_OTHER(bits) \
+ ((bits) &~ MACH_MSGH_BITS_PORTS_MASK)
+
+/*
+ * Every message starts with a message header.
+ * Following the message header are zero or more pairs of
+ * type descriptors (mach_msg_type_t/mach_msg_type_long_t) and
+ * data values. The size of the message must be specified in bytes,
+ * and includes the message header, type descriptors, inline
+ * data, and inline pointer for out-of-line data.
+ *
+ * The msgh_remote_port field specifies the destination of the message.
+ * It must specify a valid send or send-once right for a port.
+ *
+ * The msgh_local_port field specifies a "reply port". Normally,
+ * This field carries a send-once right that the receiver will use
+ * to reply to the message. It may carry the values MACH_PORT_NULL,
+ * MACH_PORT_DEAD, a send-once right, or a send right.
+ *
+ * The msgh_seqno field carries a sequence number associated with the
+ * received-from port. A port's sequence number is incremented every
+ * time a message is received from it. In sent messages, the field's
+ * value is ignored.
+ *
+ * The msgh_id field is uninterpreted by the message primitives.
+ * It normally carries information specifying the format
+ * or meaning of the message.
+ */
+
+typedef unsigned int mach_msg_bits_t;
+typedef unsigned int mach_msg_size_t;
+typedef natural_t mach_msg_seqno_t;
+typedef integer_t mach_msg_id_t;
+
+/* full header structure, may have different size in user/kernel spaces */
+typedef struct mach_msg_header {
+ mach_msg_bits_t msgh_bits;
+ mach_msg_size_t msgh_size;
+ union {
+ mach_port_t msgh_remote_port;
+ /*
+ * Ensure msgh_remote_port is wide enough to hold a kernel pointer
+ * to avoid message resizing for the 64 bits case. This field should
+ * not be used since it is here just for padding purposes.
+ */
+ rpc_uintptr_t msgh_remote_port_do_not_use;
+ };
+ union {
+ mach_port_t msgh_local_port;
+ rpc_uintptr_t msgh_protected_payload;
+ };
+ mach_port_seqno_t msgh_seqno;
+ mach_msg_id_t msgh_id;
+} mach_msg_header_t;
+
+#ifdef KERNEL
+/* user-side header format, needed in the kernel */
+typedef struct {
+ mach_msg_bits_t msgh_bits;
+ mach_msg_size_t msgh_size;
+ union {
+ mach_port_name_t msgh_remote_port;
+ rpc_uintptr_t msgh_remote_port_do_not_use;
+ };
+ union {
+ mach_port_name_t msgh_local_port;
+ rpc_uintptr_t msgh_protected_payload;
+ };
+ mach_port_seqno_t msgh_seqno;
+ mach_msg_id_t msgh_id;
+} mach_msg_user_header_t;
+#else
+typedef mach_msg_header_t mach_msg_user_header_t;
+#endif
+
+/*
+ * There is no fixed upper bound to the size of Mach messages.
+ */
+
+#define MACH_MSG_SIZE_MAX ((mach_msg_size_t) ~0)
+
+/*
+ * Compatibility definitions, for code written
+ * when there was a msgh_kind instead of msgh_seqno.
+ */
+
+#define MACH_MSGH_KIND_NORMAL 0x00000000
+#if 0
+/* code using this is likely to break, so better not to have it defined */
+#define MACH_MSGH_KIND_NOTIFICATION 0x00000001
+#endif
+#define msgh_kind msgh_seqno
+#define mach_msg_kind_t mach_port_seqno_t
+
+/*
+ * The msgt_number field specifies the number of data elements.
+ * The msgt_size field specifies the size of each data element, in bits.
+ * The msgt_name field specifies the type of each data element.
+ * If msgt_inline is TRUE, the data follows the type descriptor
+ * in the body of the message. If msgt_inline is FALSE, then a pointer
+ * to the data should follow the type descriptor, and the data is
+ * sent out-of-line. In this case, if msgt_deallocate is TRUE,
+ * then the out-of-line data is moved (instead of copied) into the message.
+ * If msgt_longform is TRUE, then the type descriptor is actually
+ * a mach_msg_type_long_t.
+ *
+ * The actual amount of inline data following the descriptor must
+ * a multiple of the word size. For out-of-line data, this is a
+ * pointer. For inline data, the supplied data size (calculated
+ * from msgt_number/msgt_size) is rounded up. This guarantees
+ * that type descriptors always fall on word boundaries.
+ *
+ * For port rights, msgt_size must be 8*sizeof(mach_port_t).
+ * If the data is inline, msgt_deallocate should be FALSE.
+ * The msgt_unused bit should be zero.
+ * The msgt_name, msgt_size, msgt_number fields in
+ * a mach_msg_type_long_t should be zero.
+ */
+
+typedef unsigned int mach_msg_type_name_t;
+typedef unsigned int mach_msg_type_size_t;
+typedef natural_t mach_msg_type_number_t;
+
+/**
+ * Structure used for inlined port rights in messages.
+ *
+ * We use this to avoid having to perform message resizing in the kernel
+ * since userspace port rights might be smaller than kernel ports in 64 bit
+ * architectures.
+ */
+typedef struct {
+ union {
+ mach_port_name_t name;
+#ifdef KERNEL
+ mach_port_t kernel_port;
+#else
+ uintptr_t kernel_port_do_not_use;
+#endif /* KERNEL */
+ };
+} mach_port_name_inlined_t;
+
+typedef struct {
+#ifdef __x86_64__
+ /*
+ * For 64 bits, this struct is 8 bytes long so we
+ * can pack the same amount of information as mach_msg_type_long_t.
+ * Note that for 64 bit userland, msgt_size only needs to be 8 bits long
+ * but for kernel compatibility with 32 bit userland we allow it to be
+ * 16 bits long.
+ *
+ * Effectively, we don't need mach_msg_type_long_t but we are keeping it
+ * for a while to make the code similar between 32 and 64 bits.
+ *
+ * We also keep the msgt_longform bit around simply because it makes it
+ * very easy to convert messages from a 32 bit userland into a 64 bit
+ * kernel. Otherwise, we would have to replicate some of the MiG logic
+ * internally in the kernel.
+ */
+ unsigned int msgt_name : 8,
+ msgt_size : 16,
+ msgt_unused : 5,
+ msgt_inline : 1,
+ msgt_longform : 1,
+ msgt_deallocate : 1;
+ mach_msg_type_number_t msgt_number;
+#else
+ unsigned int msgt_name : 8,
+ msgt_size : 8,
+ msgt_number : 12,
+ msgt_inline : 1,
+ msgt_longform : 1,
+ msgt_deallocate : 1,
+ msgt_unused : 1;
+#endif
+} __attribute__ ((aligned (__alignof__ (uintptr_t)))) mach_msg_type_t;
+
+typedef struct {
+#ifdef __x86_64__
+ union {
+ /* On x86_64 this is equivalent to mach_msg_type_t so use
+ * union to overlay with the old field names. */
+ mach_msg_type_t msgtl_header;
+ struct {
+ unsigned int msgtl_name : 8,
+ msgtl_size : 16,
+ msgtl_unused : 5,
+ msgtl_inline : 1,
+ msgtl_longform : 1,
+ msgtl_deallocate : 1;
+ mach_msg_type_number_t msgtl_number;
+ };
+ };
+#else
+ mach_msg_type_t msgtl_header;
+ unsigned short msgtl_name;
+ unsigned short msgtl_size;
+ natural_t msgtl_number;
+#endif
+} __attribute__ ((aligned (__alignof__ (uintptr_t)))) mach_msg_type_long_t;
+
+#ifdef __x86_64__
+#ifdef __cplusplus
+#if __cplusplus >= 201103L
+static_assert (sizeof (mach_msg_type_t) == sizeof (mach_msg_type_long_t),
+ "mach_msg_type_t and mach_msg_type_long_t need to have the same size.");
+#endif
+#else
+_Static_assert (sizeof (mach_msg_type_t) == sizeof (mach_msg_type_long_t),
+ "mach_msg_type_t and mach_msg_type_long_t need to have the same size.");
+#endif
+#endif
+
+/*
+ * Known values for the msgt_name field.
+ *
+ * The only types known to the Mach kernel are
+ * the port types, and those types used in the
+ * kernel RPC interface.
+ */
+
+#define MACH_MSG_TYPE_UNSTRUCTURED 0
+#define MACH_MSG_TYPE_BIT 0
+#define MACH_MSG_TYPE_BOOLEAN 0
+#define MACH_MSG_TYPE_INTEGER_16 1
+#define MACH_MSG_TYPE_INTEGER_32 2
+#define MACH_MSG_TYPE_CHAR 8
+#define MACH_MSG_TYPE_BYTE 9
+#define MACH_MSG_TYPE_INTEGER_8 9
+#define MACH_MSG_TYPE_REAL 10
+#define MACH_MSG_TYPE_INTEGER_64 11
+#define MACH_MSG_TYPE_STRING 12
+#define MACH_MSG_TYPE_STRING_C 12
+
+/*
+ * Values used when sending a port right.
+ */
+
+#define MACH_MSG_TYPE_MOVE_RECEIVE 16 /* Must hold receive rights */
+#define MACH_MSG_TYPE_MOVE_SEND 17 /* Must hold send rights */
+#define MACH_MSG_TYPE_MOVE_SEND_ONCE 18 /* Must hold sendonce rights */
+#define MACH_MSG_TYPE_COPY_SEND 19 /* Must hold send rights */
+#define MACH_MSG_TYPE_MAKE_SEND 20 /* Must hold receive rights */
+#define MACH_MSG_TYPE_MAKE_SEND_ONCE 21 /* Must hold receive rights */
+
+/*
+ * Values received/carried in messages. Tells the receiver what
+ * sort of port right he now has.
+ *
+ * MACH_MSG_TYPE_PORT_NAME is used to transfer a port name
+ * which should remain uninterpreted by the kernel. (Port rights
+ * are not transferred, just the port name.)
+ */
+
+#define MACH_MSG_TYPE_PORT_NAME 15
+#define MACH_MSG_TYPE_PORT_RECEIVE MACH_MSG_TYPE_MOVE_RECEIVE
+#define MACH_MSG_TYPE_PORT_SEND MACH_MSG_TYPE_MOVE_SEND
+#define MACH_MSG_TYPE_PORT_SEND_ONCE MACH_MSG_TYPE_MOVE_SEND_ONCE
+
+#define MACH_MSG_TYPE_PROTECTED_PAYLOAD 23
+
+#define MACH_MSG_TYPE_LAST 23 /* Last assigned */
+
+/*
+ * A dummy value. Mostly used to indicate that the actual value
+ * will be filled in later, dynamically.
+ */
+
+#define MACH_MSG_TYPE_POLYMORPHIC ((mach_msg_type_name_t) -1)
+
+/*
+ * Is a given item a port type?
+ */
+
+#define MACH_MSG_TYPE_PORT_ANY(x) \
+ (((x) >= MACH_MSG_TYPE_MOVE_RECEIVE) && \
+ ((x) <= MACH_MSG_TYPE_MAKE_SEND_ONCE))
+
+#define MACH_MSG_TYPE_PORT_ANY_SEND(x) \
+ (((x) >= MACH_MSG_TYPE_MOVE_SEND) && \
+ ((x) <= MACH_MSG_TYPE_MAKE_SEND_ONCE))
+
+#define MACH_MSG_TYPE_PORT_ANY_RIGHT(x) \
+ (((x) >= MACH_MSG_TYPE_MOVE_RECEIVE) && \
+ ((x) <= MACH_MSG_TYPE_MOVE_SEND_ONCE))
+
+typedef integer_t mach_msg_option_t;
+
+#define MACH_MSG_OPTION_NONE 0x00000000
+
+#define MACH_SEND_MSG 0x00000001
+#define MACH_RCV_MSG 0x00000002
+
+#define MACH_SEND_TIMEOUT 0x00000010
+#define MACH_SEND_NOTIFY 0x00000020
+#define MACH_SEND_INTERRUPT 0x00000040 /* libmach implements */
+#define MACH_SEND_CANCEL 0x00000080
+#define MACH_RCV_TIMEOUT 0x00000100
+#define MACH_RCV_NOTIFY 0x00000200
+#define MACH_RCV_INTERRUPT 0x00000400 /* libmach implements */
+#define MACH_RCV_LARGE 0x00000800
+
+#define MACH_SEND_ALWAYS 0x00010000 /* internal use only */
+
+#ifdef __x86_64__
+#if defined(KERNEL) && defined(USER32)
+#define MACH_MSG_USER_ALIGNMENT 4
+#else
+#define MACH_MSG_USER_ALIGNMENT 8
+#endif
+#else
+#define MACH_MSG_USER_ALIGNMENT 4
+#endif
+
+#ifdef KERNEL
+/* This is the alignment of msg descriptors and the actual data
+ * for both in kernel messages and user land messages.
+ *
+ * We have two types of alignment because for specific configurations
+ * (in particular a 64 bit kernel with 32 bit userland) we transform
+ * 4-byte aligned user messages into 8-byte aligned messages (and vice-versa)
+ * so that kernel messages are correctly aligned.
+ */
+#define MACH_MSG_KERNEL_ALIGNMENT sizeof(uintptr_t)
+
+#define mach_msg_align(x, alignment) \
+ ( ( ((vm_offset_t)(x)) + ((alignment)-1) ) & ~((alignment)-1) )
+#define mach_msg_user_align(x) mach_msg_align(x, MACH_MSG_USER_ALIGNMENT)
+#define mach_msg_kernel_align(x) mach_msg_align(x, MACH_MSG_KERNEL_ALIGNMENT)
+#define mach_msg_user_is_misaligned(x) ((x) & ((MACH_MSG_USER_ALIGNMENT)-1))
+#define mach_msg_kernel_is_misaligned(x) ((x) & ((MACH_MSG_KERNEL_ALIGNMENT)-1))
+#endif /* KERNEL */
+
+/*
+ * Much code assumes that mach_msg_return_t == kern_return_t.
+ * This definition is useful for descriptive purposes.
+ *
+ * See <mach/error.h> for the format of error codes.
+ * IPC errors are system 4. Send errors are subsystem 0;
+ * receive errors are subsystem 1. The code field is always non-zero.
+ * The high bits of the code field communicate extra information
+ * for some error codes. MACH_MSG_MASK masks off these special bits.
+ */
+
+typedef kern_return_t mach_msg_return_t;
+
+#define MACH_MSG_SUCCESS 0x00000000
+
+#define MACH_MSG_MASK 0x00003c00
+ /* All special error code bits defined below. */
+#define MACH_MSG_IPC_SPACE 0x00002000
+ /* No room in IPC name space for another capability name. */
+#define MACH_MSG_VM_SPACE 0x00001000
+ /* No room in VM address space for out-of-line memory. */
+#define MACH_MSG_IPC_KERNEL 0x00000800
+ /* Kernel resource shortage handling an IPC capability. */
+#define MACH_MSG_VM_KERNEL 0x00000400
+ /* Kernel resource shortage handling out-of-line memory. */
+
+#define MACH_SEND_IN_PROGRESS 0x10000001
+ /* Thread is waiting to send. (Internal use only.) */
+#define MACH_SEND_INVALID_DATA 0x10000002
+ /* Bogus in-line data. */
+#define MACH_SEND_INVALID_DEST 0x10000003
+ /* Bogus destination port. */
+#define MACH_SEND_TIMED_OUT 0x10000004
+ /* Message not sent before timeout expired. */
+#define MACH_SEND_WILL_NOTIFY 0x10000005
+ /* Msg-accepted notification will be generated. */
+#define MACH_SEND_NOTIFY_IN_PROGRESS 0x10000006
+ /* Msg-accepted notification already pending. */
+#define MACH_SEND_INTERRUPTED 0x10000007
+ /* Software interrupt. */
+#define MACH_SEND_MSG_TOO_SMALL 0x10000008
+ /* Data doesn't contain a complete message. */
+#define MACH_SEND_INVALID_REPLY 0x10000009
+ /* Bogus reply port. */
+#define MACH_SEND_INVALID_RIGHT 0x1000000a
+ /* Bogus port rights in the message body. */
+#define MACH_SEND_INVALID_NOTIFY 0x1000000b
+ /* Bogus notify port argument. */
+#define MACH_SEND_INVALID_MEMORY 0x1000000c
+ /* Invalid out-of-line memory pointer. */
+#define MACH_SEND_NO_BUFFER 0x1000000d
+ /* No message buffer is available. */
+#define MACH_SEND_NO_NOTIFY 0x1000000e
+ /* Resource shortage; can't request msg-accepted notif. */
+#define MACH_SEND_INVALID_TYPE 0x1000000f
+ /* Invalid msg-type specification. */
+#define MACH_SEND_INVALID_HEADER 0x10000010
+ /* A field in the header had a bad value. */
+
+#define MACH_RCV_IN_PROGRESS 0x10004001
+ /* Thread is waiting for receive. (Internal use only.) */
+#define MACH_RCV_INVALID_NAME 0x10004002
+ /* Bogus name for receive port/port-set. */
+#define MACH_RCV_TIMED_OUT 0x10004003
+ /* Didn't get a message within the timeout value. */
+#define MACH_RCV_TOO_LARGE 0x10004004
+ /* Message buffer is not large enough for inline data. */
+#define MACH_RCV_INTERRUPTED 0x10004005
+ /* Software interrupt. */
+#define MACH_RCV_PORT_CHANGED 0x10004006
+ /* Port moved into a set during the receive. */
+#define MACH_RCV_INVALID_NOTIFY 0x10004007
+ /* Bogus notify port argument. */
+#define MACH_RCV_INVALID_DATA 0x10004008
+ /* Bogus message buffer for inline data. */
+#define MACH_RCV_PORT_DIED 0x10004009
+ /* Port/set was sent away/died during receive. */
+#define MACH_RCV_IN_SET 0x1000400a
+ /* Port is a member of a port set. */
+#define MACH_RCV_HEADER_ERROR 0x1000400b
+ /* Error receiving message header. See special bits. */
+#define MACH_RCV_BODY_ERROR 0x1000400c
+ /* Error receiving message body. See special bits. */
+
+extern mach_msg_return_t
+mach_msg_trap
+ (mach_msg_user_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size,
+ mach_port_name_t rcv_name,
+ mach_msg_timeout_t timeout,
+ mach_port_name_t notify);
+
+extern mach_msg_return_t
+mach_msg
+ (mach_msg_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size,
+ mach_port_name_t rcv_name,
+ mach_msg_timeout_t timeout,
+ mach_port_name_t notify);
+
+extern __typeof (mach_msg) __mach_msg;
+extern __typeof (mach_msg_trap) __mach_msg_trap;
+
+#endif /* _MACH_MESSAGE_H_ */
diff --git a/include/mach/mig_errors.h b/include/mach/mig_errors.h
new file mode 100644
index 0000000..389ce77
--- /dev/null
+++ b/include/mach/mig_errors.h
@@ -0,0 +1,89 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach Interface Generator errors
+ *
+ */
+
+#ifndef _MACH_MIG_ERRORS_H_
+#define _MACH_MIG_ERRORS_H_
+
+#include <mach/kern_return.h>
+#include <mach/message.h>
+
+/*
+ * These error codes should be specified as system 4, subsytem 2.
+ * But alas backwards compatibility makes that impossible.
+ * The problem is old clients of new servers (eg, the kernel)
+ * which get strange large error codes when there is a Mig problem
+ * in the server. Unfortunately, the IPC system doesn't have
+ * the knowledge to convert the codes in this situation.
+ */
+
+#define MIG_TYPE_ERROR -300 /* client type check failure */
+#define MIG_REPLY_MISMATCH -301 /* wrong reply message ID */
+#define MIG_REMOTE_ERROR -302 /* server detected error */
+#define MIG_BAD_ID -303 /* bad request message ID */
+#define MIG_BAD_ARGUMENTS -304 /* server type check failure */
+#define MIG_NO_REPLY -305 /* no reply should be sent */
+#define MIG_EXCEPTION -306 /* server raised exception */
+#define MIG_ARRAY_TOO_LARGE -307 /* array not large enough */
+#define MIG_SERVER_DIED -308 /* server died */
+#define MIG_DESTROY_REQUEST -309 /* destroy request with no reply */
+
+typedef struct {
+ mach_msg_header_t Head;
+ mach_msg_type_t RetCodeType;
+ kern_return_t RetCode;
+} mig_reply_header_t;
+
+typedef struct mig_symtab {
+ char *ms_routine_name;
+ int ms_routine_number;
+#if defined(__STDC__) || defined(c_plus_plus) || defined(hc)
+ void
+#else
+ int
+#endif
+ (*ms_routine)(void);
+} mig_symtab_t;
+
+/*
+ * Definition for server stub routines. These routines
+ * unpack the request message, call the server procedure,
+ * and pack the reply message.
+ */
+#if defined(__STDC__) || defined(c_plus_plus)
+typedef void (*mig_routine_t)(mach_msg_header_t *, mach_msg_header_t *);
+#else
+#if defined(hc)
+typedef void (*mig_routine_t)();
+#else
+typedef int (*mig_routine_t)(); /* PCC cannot handle void (*)() */
+#endif
+#endif
+
+#endif /* _MACH_MIG_ERRORS_H_ */
diff --git a/include/mach/mig_support.h b/include/mach/mig_support.h
new file mode 100644
index 0000000..ed871c0
--- /dev/null
+++ b/include/mach/mig_support.h
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Abstract:
+ * Header file for support routines called by MiG generated interfaces.
+ *
+ */
+
+#ifndef _MACH_MIG_SUPPORT_H_
+#define _MACH_MIG_SUPPORT_H_
+
+#include <string.h>
+
+#include <mach/message.h>
+#include <mach/mach_types.h>
+
+extern void mig_init(void *_first);
+
+extern void mig_allocate(vm_address_t *_addr_p, vm_size_t _size);
+
+extern void mig_deallocate(vm_address_t _addr, vm_size_t _size);
+
+extern void mig_dealloc_reply_port(mach_port_t);
+
+extern void mig_put_reply_port(mach_port_t);
+
+extern mach_port_name_t mig_get_reply_port(void);
+
+extern void mig_reply_setup(const mach_msg_header_t *_request,
+ mach_msg_header_t *reply);
+
+extern vm_size_t mig_strncpy(char *_dest, const char *_src, vm_size_t _len);
+
+#endif /* not defined(_MACH_MIG_SUPPORT_H_) */
diff --git a/include/mach/notify.defs b/include/mach/notify.defs
new file mode 100644
index 0000000..6ba4cde
--- /dev/null
+++ b/include/mach/notify.defs
@@ -0,0 +1,112 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+subsystem notify 64;
+
+#include <mach/std_types.defs>
+
+#ifdef NOTIFY_IMPORTS
+NOTIFY_IMPORTS
+#endif
+
+#if SEQNOS
+serverprefix do_seqnos_;
+serverdemux seqnos_notify_server;
+#else
+serverprefix do_;
+serverdemux notify_server;
+#endif
+
+type notify_port_t = MACH_MSG_TYPE_MOVE_SEND_ONCE
+ ctype: mach_port_t
+#ifdef NOTIFY_INTRAN
+ intran: NOTIFY_INTRAN
+#endif
+#ifdef NOTIFY_INTRAN_PAYLOAD
+ intranpayload: NOTIFY_INTRAN_PAYLOAD
+#endif
+#ifdef NOTIFY_OUTTRAN
+ outtran: NOTIFY_OUTTRAN
+#endif
+#ifdef NOTIFY_DESTRUCTOR
+ destructor: NOTIFY_DESTRUCTOR
+#endif
+;
+
+/* MACH_NOTIFY_FIRST: 0100 */
+skip;
+
+/* MACH_NOTIFY_PORT_DELETED: 0101 */
+simpleroutine mach_notify_port_deleted(
+ notify : notify_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif
+ name : mach_port_name_t);
+
+/* MACH_NOTIFY_MSG_ACCEPTED: 0102 */
+simpleroutine mach_notify_msg_accepted(
+ notify : notify_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif
+ name : mach_port_name_t);
+
+skip; /* was NOTIFY_OWNERSHIP_RIGHTS: 0103 */
+
+skip; /* was NOTIFY_RECEIVE_RIGHTS: 0104 */
+
+/* MACH_NOTIFY_PORT_DESTROYED: 0105 */
+simpleroutine mach_notify_port_destroyed(
+ notify : notify_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif
+ rights : mach_port_receive_t);
+
+/* MACH_NOTIFY_NO_SENDERS: 0106 */
+simpleroutine mach_notify_no_senders(
+ notify : notify_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif
+ mscount : mach_port_mscount_t);
+
+/* MACH_NOTIFY_SEND_ONCE: 0107 */
+simpleroutine mach_notify_send_once(
+ notify : notify_port_t
+#if SEQNOS
+; msgseqno seqno : mach_port_seqno_t
+#endif
+ );
+
+/* MACH_NOTIFY_DEAD_NAME: 0110 */
+simpleroutine mach_notify_dead_name(
+ notify : notify_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif
+ name : mach_port_name_t);
diff --git a/include/mach/notify.h b/include/mach/notify.h
new file mode 100644
index 0000000..14bcd6f
--- /dev/null
+++ b/include/mach/notify.h
@@ -0,0 +1,92 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/notify.h
+ *
+ * Kernel notification message definitions.
+ */
+
+#ifndef _MACH_NOTIFY_H_
+#define _MACH_NOTIFY_H_
+
+#include <mach/port.h>
+#include <mach/message.h>
+
+/*
+ * An alternative specification of the notification interface
+ * may be found in mach/notify.defs.
+ */
+
+#define MACH_NOTIFY_FIRST 0100
+#define MACH_NOTIFY_PORT_DELETED (MACH_NOTIFY_FIRST + 001 )
+ /* A send or send-once right was deleted. */
+#define MACH_NOTIFY_MSG_ACCEPTED (MACH_NOTIFY_FIRST + 002)
+ /* A MACH_SEND_NOTIFY msg was accepted */
+#define MACH_NOTIFY_PORT_DESTROYED (MACH_NOTIFY_FIRST + 005)
+ /* A receive right was (would have been) deallocated */
+#define MACH_NOTIFY_NO_SENDERS (MACH_NOTIFY_FIRST + 006)
+ /* Receive right has no extant send rights */
+#define MACH_NOTIFY_SEND_ONCE (MACH_NOTIFY_FIRST + 007)
+ /* An extant send-once right died */
+#define MACH_NOTIFY_DEAD_NAME (MACH_NOTIFY_FIRST + 010)
+ /* Send or send-once right died, leaving a dead-name */
+#define MACH_NOTIFY_LAST (MACH_NOTIFY_FIRST + 015)
+
+typedef struct {
+ mach_msg_header_t not_header;
+ mach_msg_type_t not_type; /* MACH_MSG_TYPE_PORT_NAME */
+ mach_port_name_t not_port;
+} mach_port_deleted_notification_t;
+
+typedef struct {
+ mach_msg_header_t not_header;
+ mach_msg_type_t not_type; /* MACH_MSG_TYPE_PORT_NAME */
+ mach_port_name_t not_port;
+} mach_msg_accepted_notification_t;
+
+typedef struct {
+ mach_msg_header_t not_header;
+ mach_msg_type_t not_type; /* MACH_MSG_TYPE_PORT_RECEIVE */
+ mach_port_t not_port;
+} mach_port_destroyed_notification_t;
+
+typedef struct {
+ mach_msg_header_t not_header;
+ mach_msg_type_t not_type; /* MACH_MSG_TYPE_INTEGER_32 */
+ unsigned int not_count;
+} mach_no_senders_notification_t;
+
+typedef struct {
+ mach_msg_header_t not_header;
+} mach_send_once_notification_t;
+
+typedef struct {
+ mach_msg_header_t not_header;
+ mach_msg_type_t not_type; /* MACH_MSG_TYPE_PORT_NAME */
+ mach_port_name_t not_port;
+} mach_dead_name_notification_t;
+
+#endif /* _MACH_NOTIFY_H_ */
diff --git a/include/mach/pc_sample.h b/include/mach/pc_sample.h
new file mode 100644
index 0000000..2d56b34
--- /dev/null
+++ b/include/mach/pc_sample.h
@@ -0,0 +1,66 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_PC_SAMPLE_H_
+#define _MACH_PC_SAMPLE_H_
+
+#include <mach/machine/vm_types.h>
+
+typedef unsigned int sampled_pc_flavor_t;
+
+
+#define SAMPLED_PC_PERIODIC 0x1 /* default */
+
+
+#define SAMPLED_PC_VM_ZFILL_FAULTS 0x10
+#define SAMPLED_PC_VM_REACTIVATION_FAULTS 0x20
+#define SAMPLED_PC_VM_PAGEIN_FAULTS 0x40
+#define SAMPLED_PC_VM_COW_FAULTS 0x80
+#define SAMPLED_PC_VM_FAULTS_ANY 0x100
+#define SAMPLED_PC_VM_FAULTS \
+ (SAMPLED_PC_VM_ZFILL_FAULTS | \
+ SAMPLED_PC_VM_REACTIVATION_FAULTS |\
+ SAMPLED_PC_VM_PAGEIN_FAULTS |\
+ SAMPLED_PC_VM_COW_FAULTS )
+
+
+
+
+/*
+ * Definitions for the PC sampling interface.
+ */
+
+typedef struct sampled_pc {
+ rpc_vm_offset_t id; /* task_t address */
+ rpc_vm_offset_t pc; /* program counter */
+ sampled_pc_flavor_t sampletype;
+} sampled_pc_t;
+
+typedef sampled_pc_t *sampled_pc_array_t;
+typedef unsigned int sampled_pc_seqno_t;
+
+
+#endif /* _MACH_PC_SAMPLE_H_ */
diff --git a/include/mach/policy.h b/include/mach/policy.h
new file mode 100644
index 0000000..da776c9
--- /dev/null
+++ b/include/mach/policy.h
@@ -0,0 +1,45 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_POLICY_H_
+#define _MACH_POLICY_H_
+
+/*
+ * mach/policy.h
+ *
+ * Definitions for scheduing policy.
+ */
+
+/*
+ * Policy definitions. Policies must be powers of 2.
+ */
+#define POLICY_TIMESHARE 1
+#define POLICY_FIXEDPRI 2
+#define POLICY_LAST 2
+
+#define invalid_policy(policy) (((policy) <= 0) || ((policy) > POLICY_LAST))
+
+#endif /* _MACH_POLICY_H_ */
diff --git a/include/mach/port.h b/include/mach/port.h
new file mode 100644
index 0000000..c9bbcf1
--- /dev/null
+++ b/include/mach/port.h
@@ -0,0 +1,159 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/port.h
+ *
+ * Definition of a port
+ *
+ * [The basic mach_port_t type should probably be machine-dependent,
+ * as it must be represented by a 32-bit integer.]
+ */
+
+#ifndef _MACH_PORT_H_
+#define _MACH_PORT_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ * Port names are the type used by userspace, they are always 32-bit wide.
+ */
+typedef unsigned int mach_port_name_t;
+typedef mach_port_name_t *mach_port_name_array_t;
+typedef const mach_port_name_t *const_mach_port_name_array_t;
+
+/*
+ * A port is represented
+ * - by a port name in userspace
+ * - by a pointer in kernel space
+ * While in userspace mach_port_name_t and mach_port_name are interchangable,
+ * in kernelspace they need to be different and appropriately converted.
+ */
+#ifdef KERNEL
+typedef vm_offset_t mach_port_t;
+#else /* KERNEL */
+typedef mach_port_name_t mach_port_t;
+#endif
+typedef mach_port_t *mach_port_array_t;
+typedef const mach_port_t *const_mach_port_array_t;
+typedef int *rpc_signature_info_t;
+
+/*
+ * MACH_PORT_NULL is a legal value that can be carried in messages.
+ * It indicates the absence of any port or port rights. (A port
+ * argument keeps the message from being "simple", even if the
+ * value is MACH_PORT_NULL.) The value MACH_PORT_DEAD is also
+ * a legal value that can be carried in messages. It indicates
+ * that a port right was present, but it died.
+ */
+
+#define MACH_PORT_NULL 0 /* works with both user and kernel ports */
+#define MACH_PORT_DEAD ((mach_port_t) ~0)
+#define MACH_PORT_NAME_NULL ((mach_port_name_t) 0)
+#define MACH_PORT_NAME_DEAD ((mach_port_name_t) ~0)
+
+#define MACH_PORT_VALID(port) \
+ (((port) != MACH_PORT_NULL) && ((port) != MACH_PORT_DEAD))
+#define MACH_PORT_NAME_VALID(name) \
+ (((name) != MACH_PORT_NAME_NULL) && ((name) != MACH_PORT_NAME_DEAD))
+
+/*
+ * These are the different rights a task may have.
+ * The MACH_PORT_RIGHT_* definitions are used as arguments
+ * to mach_port_allocate, mach_port_get_refs, etc, to specify
+ * a particular right to act upon. The mach_port_names and
+ * mach_port_type calls return bitmasks using the MACH_PORT_TYPE_*
+ * definitions. This is because a single name may denote
+ * multiple rights.
+ */
+
+typedef natural_t mach_port_right_t;
+
+#define MACH_PORT_RIGHT_SEND ((mach_port_right_t) 0)
+#define MACH_PORT_RIGHT_RECEIVE ((mach_port_right_t) 1)
+#define MACH_PORT_RIGHT_SEND_ONCE ((mach_port_right_t) 2)
+#define MACH_PORT_RIGHT_PORT_SET ((mach_port_right_t) 3)
+#define MACH_PORT_RIGHT_DEAD_NAME ((mach_port_right_t) 4)
+#define MACH_PORT_RIGHT_NUMBER ((mach_port_right_t) 5)
+
+typedef natural_t mach_port_type_t;
+typedef mach_port_type_t *mach_port_type_array_t;
+
+#define MACH_PORT_TYPE(right) ((mach_port_type_t)(1 << ((right)+16)))
+#define MACH_PORT_TYPE_NONE ((mach_port_type_t) 0)
+#define MACH_PORT_TYPE_SEND MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND)
+#define MACH_PORT_TYPE_RECEIVE MACH_PORT_TYPE(MACH_PORT_RIGHT_RECEIVE)
+#define MACH_PORT_TYPE_SEND_ONCE MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND_ONCE)
+#define MACH_PORT_TYPE_PORT_SET MACH_PORT_TYPE(MACH_PORT_RIGHT_PORT_SET)
+#define MACH_PORT_TYPE_DEAD_NAME MACH_PORT_TYPE(MACH_PORT_RIGHT_DEAD_NAME)
+
+/* Convenient combinations. */
+
+#define MACH_PORT_TYPE_SEND_RECEIVE \
+ (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_RECEIVE)
+#define MACH_PORT_TYPE_SEND_RIGHTS \
+ (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE)
+#define MACH_PORT_TYPE_PORT_RIGHTS \
+ (MACH_PORT_TYPE_SEND_RIGHTS|MACH_PORT_TYPE_RECEIVE)
+#define MACH_PORT_TYPE_PORT_OR_DEAD \
+ (MACH_PORT_TYPE_PORT_RIGHTS|MACH_PORT_TYPE_DEAD_NAME)
+#define MACH_PORT_TYPE_ALL_RIGHTS \
+ (MACH_PORT_TYPE_PORT_OR_DEAD|MACH_PORT_TYPE_PORT_SET)
+
+/* Dummy type bits that mach_port_type/mach_port_names can return. */
+
+#define MACH_PORT_TYPE_DNREQUEST 0x80000000U
+#define MACH_PORT_TYPE_MAREQUEST 0x40000000
+#define MACH_PORT_TYPE_COMPAT 0x20000000
+
+/* User-references for capabilities. */
+
+typedef natural_t mach_port_urefs_t;
+typedef integer_t mach_port_delta_t; /* change in urefs */
+
+/* Attributes of ports. (See mach_port_get_receive_status.) */
+
+typedef natural_t mach_port_seqno_t; /* sequence number */
+typedef unsigned int mach_port_mscount_t; /* make-send count */
+typedef unsigned int mach_port_msgcount_t; /* number of msgs */
+typedef unsigned int mach_port_rights_t; /* number of rights */
+
+typedef struct mach_port_status {
+ mach_port_name_t mps_pset; /* containing port set */
+ mach_port_seqno_t mps_seqno; /* sequence number */
+ mach_port_mscount_t mps_mscount; /* make-send count */
+ mach_port_msgcount_t mps_qlimit; /* queue limit */
+ mach_port_msgcount_t mps_msgcount; /* number in the queue */
+ mach_port_rights_t mps_sorights; /* how many send-once rights */
+ boolean_t mps_srights; /* do send rights exist? */
+ boolean_t mps_pdrequest; /* port-deleted requested? */
+ boolean_t mps_nsrequest; /* no-senders requested? */
+} mach_port_status_t;
+
+#define MACH_PORT_QLIMIT_DEFAULT ((mach_port_msgcount_t) 5)
+#define MACH_PORT_QLIMIT_MAX ((mach_port_msgcount_t) 16)
+
+#endif /* _MACH_PORT_H_ */
diff --git a/include/mach/processor_info.h b/include/mach/processor_info.h
new file mode 100644
index 0000000..5f761ea
--- /dev/null
+++ b/include/mach/processor_info.h
@@ -0,0 +1,104 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/processor_info.h
+ * Author: David L. Black
+ * Date: 1988
+ *
+ * Data structure definitions for processor_info, processor_set_info
+ */
+
+#ifndef _MACH_PROCESSOR_INFO_H_
+#define _MACH_PROCESSOR_INFO_H_
+
+#include <mach/machine.h>
+
+/*
+ * Generic information structure to allow for expansion.
+ */
+typedef integer_t *processor_info_t; /* varying array of int. */
+
+#define PROCESSOR_INFO_MAX (1024) /* max array size */
+typedef integer_t processor_info_data_t[PROCESSOR_INFO_MAX];
+
+
+typedef integer_t *processor_set_info_t; /* varying array of int. */
+
+#define PROCESSOR_SET_INFO_MAX (1024) /* max array size */
+typedef integer_t processor_set_info_data_t[PROCESSOR_SET_INFO_MAX];
+
+/*
+ * Currently defined information.
+ */
+#define PROCESSOR_BASIC_INFO 1 /* basic information */
+
+struct processor_basic_info {
+ cpu_type_t cpu_type; /* type of cpu */
+ cpu_subtype_t cpu_subtype; /* subtype of cpu */
+/*boolean_t*/integer_t running; /* is processor running */
+ integer_t slot_num; /* slot number */
+/*boolean_t*/integer_t is_master; /* is this the master processor */
+};
+
+typedef struct processor_basic_info processor_basic_info_data_t;
+typedef struct processor_basic_info *processor_basic_info_t;
+#define PROCESSOR_BASIC_INFO_COUNT \
+ (sizeof(processor_basic_info_data_t)/sizeof(integer_t))
+
+
+#define PROCESSOR_SET_BASIC_INFO 1 /* basic information */
+
+struct processor_set_basic_info {
+ integer_t processor_count; /* How many processors */
+ integer_t task_count; /* How many tasks */
+ integer_t thread_count; /* How many threads */
+ integer_t load_average; /* Scaled */
+ integer_t mach_factor; /* Scaled */
+};
+
+/*
+ * Scaling factor for load_average, mach_factor.
+ */
+#define LOAD_SCALE 1000
+
+typedef struct processor_set_basic_info processor_set_basic_info_data_t;
+typedef struct processor_set_basic_info *processor_set_basic_info_t;
+#define PROCESSOR_SET_BASIC_INFO_COUNT \
+ (sizeof(processor_set_basic_info_data_t)/sizeof(integer_t))
+
+#define PROCESSOR_SET_SCHED_INFO 2 /* scheduling info */
+
+struct processor_set_sched_info {
+ integer_t policies; /* allowed policies */
+ integer_t max_priority; /* max priority for new threads */
+};
+
+typedef struct processor_set_sched_info processor_set_sched_info_data_t;
+typedef struct processor_set_sched_info *processor_set_sched_info_t;
+#define PROCESSOR_SET_SCHED_INFO_COUNT \
+ (sizeof(processor_set_sched_info_data_t)/sizeof(integer_t))
+
+#endif /* _MACH_PROCESSOR_INFO_H_ */
diff --git a/include/mach/profil.h b/include/mach/profil.h
new file mode 100644
index 0000000..866f267
--- /dev/null
+++ b/include/mach/profil.h
@@ -0,0 +1,212 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Copyright 1991 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef _MACH_PROFIL_H_
+#define _MACH_PROFIL_H_
+
+#include <mach/boolean.h>
+#include <ipc/ipc_object.h>
+#include <vm/vm_kern.h>
+
+
+#define NB_PROF_BUFFER 2 /* number of buffers servicing a
+ * profiled thread */
+#define SIZE_PROF_BUFFER 100 /* size of a profil buffer (in int)
+ * This values is also defined in
+ * the server (ugly), be careful ! */
+
+
+struct prof_data {
+ ipc_object_t prof_port; /* where to send a full buffer */
+
+ struct buffer {
+ int *p_zone; /* points to the actual storage area */
+ int p_index;/* next slot to be filled */
+ boolean_t p_full; /* is the current buffer full ? */
+ } prof_area[NB_PROF_BUFFER];
+
+ int prof_index; /* index of the buffer structure
+ * currently in use */
+
+};
+typedef struct prof_data *prof_data_t;
+#define NULLPBUF ((prof_data_t) 0)
+typedef struct buffer *buffer_t;
+
+/* Macros */
+
+#define set_pbuf_nb(pbuf, nb) \
+ (((nb) >= 0 && (nb) < NB_PROF_BUFFER) \
+ ? (pbuf)->prof_index = (nb), 1 \
+ : 0)
+
+
+#define get_pbuf_nb(pbuf) \
+ (pbuf)->prof_index
+
+
+extern vm_map_t kernel_map;
+
+#define dealloc_pbuf_area(pbuf) \
+ { \
+ register int i; \
+ \
+ for(i=0; i < NB_PROF_BUFFER ; i++) \
+ kmem_free(kernel_map, \
+ (vm_offset_t) (pbuf)->prof_area[i].p_zone, \
+ SIZE_PROF_BUFFER*sizeof(int)); \
+ kmem_free(kernel_map, \
+ (vm_offset_t)(pbuf), \
+ sizeof(struct prof_data)); \
+ }
+
+
+#define alloc_pbuf_area(pbuf, vmpbuf) \
+ (vmpbuf) = (vm_offset_t) 0; \
+ if (kmem_alloc(kernel_map, &(vmpbuf) , sizeof(struct prof_data)) == \
+ KERN_SUCCESS) { \
+ register int i; \
+ register boolean_t end; \
+ \
+ (pbuf) = (prof_data_t) (vmpbuf); \
+ for(i=0, end=FALSE; i < NB_PROF_BUFFER && end == FALSE; i++) { \
+ (vmpbuf) = (vm_offset_t) 0; \
+ if (kmem_alloc(kernel_map,&(vmpbuf),SIZE_PROF_BUFFER*sizeof(int)) == KERN_SUCCESS) { \
+ (pbuf)->prof_area[i].p_zone = (int *) (vmpbuf); \
+ (pbuf)->prof_area[i].p_full = FALSE; \
+ } \
+ else { \
+ (pbuf) = NULLPBUF; \
+ end = TRUE; \
+ } \
+ } \
+ } \
+ else \
+ (pbuf) = NULLPBUF;
+
+
+
+/* MACRO set_pbuf_value
+**
+** enters the value 'val' in the buffer 'pbuf' and returns the following
+** indications: 0: means that a fatal error occurred: the buffer was full
+** (it hasn't been sent yet)
+** 1: means that a value has been inserted successfully
+** 2: means that we'v just entered the last value causing
+** the current buffer to be full.(must switch to
+** another buffer and signal the sender to send it)
+*/
+
+#define set_pbuf_value(pbuf, val) \
+ { \
+ register buffer_t a = &((pbuf)->prof_area[(pbuf)->prof_index]); \
+ register int i = a->p_index++; \
+ register boolean_t f = a->p_full; \
+ \
+ if (f == TRUE ) \
+ *(val) = 0; \
+ else { \
+ a->p_zone[i] = *(val); \
+ if (i == SIZE_PROF_BUFFER-1) { \
+ a->p_full = TRUE; \
+ *(val) = 2; \
+ } \
+ else \
+ *(val) = 1; \
+ } \
+ }
+
+
+#define reset_pbuf_area(pbuf) \
+ { \
+ register int *i = &((pbuf)->prof_index); \
+ \
+ *i = (*i == NB_PROF_BUFFER-1) ? 0 : ++(*i); \
+ (pbuf)->prof_area[*i].p_index = 0; \
+ }
+
+
+/**************************************************************/
+/* Structure, elements used for queuing operations on buffers */
+/**************************************************************/
+
+#define thread_t int *
+/*
+** This must be done in order to avoid a circular inclusion
+** with file kern/thread.h .
+** When using this data structure, one must cast the actual
+** type, this is (int *) or (thread_t)
+*/
+
+struct buf_to_send {
+ queue_chain_t list;
+ thread_t thread;
+ int number; /* the number of the buffer to be sent */
+ char wakeme; /* do wakeup when buffer has been sent */
+ } ;
+
+#undef thread_t
+
+
+
+typedef struct buf_to_send *buf_to_send_t;
+
+#define NULLBTS ((buf_to_send_t) 0)
+
+/*
+** Global variable: the head of the queue of buffers to send
+** It is a queue with locks (uses macros from queue.h) and it
+** is shared by hardclock() and the sender_thread()
+*/
+
+mpqueue_head_t prof_queue;
+
+#endif /* _MACH_PROF_H_ */
diff --git a/include/mach/profilparam.h b/include/mach/profilparam.h
new file mode 100644
index 0000000..20a8aaf
--- /dev/null
+++ b/include/mach/profilparam.h
@@ -0,0 +1,62 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Copyright 1991 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACH_PROFILPARAM_H_
+#define _MACH_PROFILPARAM_H_
+
+/*
+ * These values are also used when compiling the server, be careful !
+ */
+
+#define NB_PROF_BUFFER 2 /* number of buffers servicing a
+ * profiled thread */
+#define SIZE_PROF_BUFFER 100 /* size of a profil buffer (in int) */
+
+#endif /* _MACH_PROFILPARAM_H_ */
diff --git a/include/mach/std_types.defs b/include/mach/std_types.defs
new file mode 100644
index 0000000..b461f06
--- /dev/null
+++ b/include/mach/std_types.defs
@@ -0,0 +1,101 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach kernel standard interface type declarations
+ */
+
+#ifndef _MACH_STD_TYPES_DEFS_
+#define _MACH_STD_TYPES_DEFS_
+
+type int32_t = MACH_MSG_TYPE_INTEGER_32;
+type int64_t = MACH_MSG_TYPE_INTEGER_64;
+type boolean_t = MACH_MSG_TYPE_BOOLEAN;
+type unsigned = MACH_MSG_TYPE_INTEGER_32;
+type uint32_t = MACH_MSG_TYPE_INTEGER_32;
+type uint64_t = MACH_MSG_TYPE_INTEGER_64;
+
+/* Get the definitions for natural_t and integer_t */
+#include <mach/machine/machine_types.defs>
+
+type kern_return_t = int;
+
+type pointer_t = ^array[] of MACH_MSG_TYPE_BYTE
+ ctype: vm_offset_t;
+
+
+type mach_port_t = MACH_MSG_TYPE_COPY_SEND
+#ifndef KERNEL_SERVER
+#ifdef MACH_PAYLOAD_TO_PORT
+ intranpayload: mach_port_t MACH_PAYLOAD_TO_PORT
+#endif /* MACH_PAYLOAD_TO_PORT */
+#endif /* KERNEL_SERVER */
+;
+type mach_port_array_t = array[] of mach_port_t;
+
+type mach_port_name_t = MACH_MSG_TYPE_PORT_NAME;
+type mach_port_name_array_t = array[] of mach_port_name_t;
+
+type mach_port_right_t = natural_t;
+
+type mach_port_type_t = natural_t;
+type mach_port_type_array_t = array[] of mach_port_type_t;
+
+type mach_port_urefs_t = natural_t;
+type mach_port_delta_t = integer_t;
+type mach_port_seqno_t = natural_t;
+type mach_port_mscount_t = unsigned;
+type mach_port_msgcount_t = unsigned;
+type mach_port_rights_t = unsigned;
+type mach_msg_id_t = integer_t;
+type mach_msg_type_name_t = unsigned;
+type mach_msg_type_number_t = natural_t;
+
+type mach_port_move_receive_t = MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t;
+type mach_port_copy_send_t = MACH_MSG_TYPE_COPY_SEND
+ ctype: mach_port_t;
+type mach_port_make_send_t = MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+type mach_port_move_send_t = MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+type mach_port_make_send_once_t = MACH_MSG_TYPE_MAKE_SEND_ONCE
+ ctype: mach_port_t;
+type mach_port_move_send_once_t = MACH_MSG_TYPE_MOVE_SEND_ONCE
+ ctype: mach_port_t;
+
+type mach_port_receive_t = MACH_MSG_TYPE_PORT_RECEIVE
+ ctype: mach_port_t;
+type mach_port_send_t = MACH_MSG_TYPE_PORT_SEND
+ ctype: mach_port_t;
+type mach_port_send_once_t = MACH_MSG_TYPE_PORT_SEND_ONCE
+ ctype: mach_port_t;
+
+type mach_port_poly_t = polymorphic
+ ctype: mach_port_t;
+
+import <mach/std_types.h>;
+
+#endif /* _MACH_STD_TYPES_DEFS_ */
diff --git a/include/mach/std_types.h b/include/mach/std_types.h
new file mode 100644
index 0000000..0d5db0a
--- /dev/null
+++ b/include/mach/std_types.h
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach standard external interface type definitions.
+ *
+ */
+
+#ifndef _MACH_STD_TYPES_H_
+#define _MACH_STD_TYPES_H_
+
+#define EXPORT_BOOLEAN
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/machine/vm_types.h>
+
+typedef vm_offset_t pointer_t;
+typedef vm_offset_t vm_address_t;
+
+#endif /* _MACH_STD_TYPES_H_ */
diff --git a/include/mach/syscall_sw.h b/include/mach/syscall_sw.h
new file mode 100644
index 0000000..89597e9
--- /dev/null
+++ b/include/mach/syscall_sw.h
@@ -0,0 +1,121 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_SYSCALL_SW_H_
+#define _MACH_SYSCALL_SW_H_
+
+/*
+ * The machine-dependent "syscall_sw.h" file should
+ * define a macro for
+ * kernel_trap(trap_name, trap_number, arg_count)
+ * which will expand into assembly code for the
+ * trap.
+ *
+ * N.B.: When adding calls, do not put spaces in the macros.
+ */
+
+#include <mach/machine/syscall_sw.h>
+
+/*
+ * These trap numbers should be taken from the
+ * table in <kern/syscall_sw.c>.
+ */
+
+kernel_trap(evc_wait,-17,1)
+kernel_trap(evc_wait_clear,-18,1)
+
+kernel_trap(mach_msg_trap,-25,7)
+kernel_trap(mach_reply_port,-26,0)
+kernel_trap(mach_thread_self,-27,0)
+kernel_trap(mach_task_self,-28,0)
+kernel_trap(mach_host_self,-29,0)
+kernel_trap(mach_print,-30,1)
+
+kernel_trap(swtch_pri,-59,1)
+kernel_trap(swtch,-60,0)
+kernel_trap(thread_switch,-61,3)
+kernel_trap(nw_update,-80,3)
+kernel_trap(nw_lookup,-81,2)
+kernel_trap(nw_endpoint_allocate,-82,4)
+kernel_trap(nw_endpoint_deallocate,-83,1)
+kernel_trap(nw_buffer_allocate,-84,2)
+kernel_trap(nw_buffer_deallocate,-85,2)
+kernel_trap(nw_connection_open,-86,4)
+kernel_trap(nw_connection_accept,-87,3)
+kernel_trap(nw_connection_close,-88,1)
+kernel_trap(nw_multicast_add,-89,4)
+kernel_trap(nw_multicast_drop,-90,4)
+kernel_trap(nw_endpoint_status,-91,3)
+kernel_trap(nw_send,-92,3)
+kernel_trap(nw_receive,-93,2)
+kernel_trap(nw_rpc,-94,4)
+kernel_trap(nw_select,-95,3)
+
+
+/*
+ * These are syscall versions of Mach kernel calls.
+ * They only work on local tasks.
+ */
+
+kernel_trap(syscall_vm_map,-64,11)
+kernel_trap(syscall_vm_allocate,-65,4)
+kernel_trap(syscall_vm_deallocate,-66,3)
+
+kernel_trap(syscall_task_create,-68,3)
+kernel_trap(syscall_task_terminate,-69,1)
+kernel_trap(syscall_task_suspend,-70,1)
+kernel_trap(syscall_task_set_special_port,-71,3)
+
+kernel_trap(syscall_mach_port_allocate,-72,3)
+kernel_trap(syscall_mach_port_deallocate,-73,2)
+kernel_trap(syscall_mach_port_insert_right,-74,4)
+kernel_trap(syscall_mach_port_allocate_name,-75,3)
+kernel_trap(syscall_thread_depress_abort,-76,1)
+
+/* These are screwing up glibc somehow. */
+/*kernel_trap(syscall_device_writev_request,-39,6)*/
+/*kernel_trap(syscall_device_write_request,-40,6)*/
+
+/*
+ * These "Mach" traps are not implemented by the kernel;
+ * the emulation library and Unix server implement them.
+ * But they are traditionally part of libmach, and use
+ * the Mach trap calling conventions and numbering.
+ */
+
+#if UNIXOID_TRAPS
+
+kernel_trap(task_by_pid,-33,1)
+kernel_trap(pid_by_task,-34,4)
+kernel_trap(init_process,-41,0)
+kernel_trap(map_fd,-43,5)
+kernel_trap(rfs_make_symlink,-44,3)
+kernel_trap(htg_syscall,-52,3)
+kernel_trap(set_ras_address,-53,2)
+
+#endif /* UNIXOID_TRAPS */
+
+#endif /* _MACH_SYSCALL_SW_H_ */
diff --git a/include/mach/task_info.h b/include/mach/task_info.h
new file mode 100644
index 0000000..0e048c5
--- /dev/null
+++ b/include/mach/task_info.h
@@ -0,0 +1,126 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-independent task information structures and definitions.
+ *
+ * The definitions in this file are exported to the user. The kernel
+ * will translate its internal data structures to these structures
+ * as appropriate.
+ *
+ */
+
+#ifndef _MACH_TASK_INFO_H_
+#define _MACH_TASK_INFO_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/time_value.h>
+
+/*
+ * Generic information structure to allow for expansion.
+ */
+typedef integer_t *task_info_t; /* varying array of int */
+
+#define TASK_INFO_MAX (1024) /* maximum array size */
+typedef integer_t task_info_data_t[TASK_INFO_MAX];
+
+/*
+ * Currently defined information structures.
+ */
+#define TASK_BASIC_INFO 1 /* basic information */
+
+struct task_basic_info {
+ integer_t suspend_count; /* suspend count for task */
+ integer_t base_priority; /* base scheduling priority */
+ rpc_vm_size_t virtual_size; /* number of virtual pages */
+ rpc_vm_size_t resident_size; /* number of resident pages */
+ /* Deprecated, please use user_time64 */
+ rpc_time_value_t user_time; /* total user run time for
+ terminated threads */
+ /* Deprecated, please use system_time64 */
+ rpc_time_value_t system_time; /* total system run time for
+ terminated threads */
+ /* Deprecated, please use creation_time64 */
+ rpc_time_value_t creation_time; /* creation time stamp */
+ time_value64_t user_time64; /* total user run time for
+ terminated threads */
+ time_value64_t system_time64; /* total system run time for
+ terminated threads */
+ time_value64_t creation_time64; /* creation time stamp */
+};
+
+typedef struct task_basic_info task_basic_info_data_t;
+typedef struct task_basic_info *task_basic_info_t;
+#define TASK_BASIC_INFO_COUNT \
+ (sizeof(task_basic_info_data_t) / sizeof(integer_t))
+
+
+#define TASK_EVENTS_INFO 2 /* various event counts */
+
+struct task_events_info {
+ rpc_long_natural_t faults; /* number of page faults */
+ rpc_long_natural_t zero_fills; /* number of zero fill pages */
+ rpc_long_natural_t reactivations; /* number of reactivated pages */
+ rpc_long_natural_t pageins; /* number of actual pageins */
+ rpc_long_natural_t cow_faults; /* number of copy-on-write faults */
+ rpc_long_natural_t messages_sent; /* number of messages sent */
+ rpc_long_natural_t messages_received; /* number of messages received */
+};
+typedef struct task_events_info task_events_info_data_t;
+typedef struct task_events_info *task_events_info_t;
+#define TASK_EVENTS_INFO_COUNT \
+ (sizeof(task_events_info_data_t) / sizeof(integer_t))
+
+#define TASK_THREAD_TIMES_INFO 3 /* total times for live threads -
+ only accurate if suspended */
+
+struct task_thread_times_info {
+ /* Deprecated, please use user_time64 */
+ rpc_time_value_t user_time; /* total user run time for
+ live threads */
+ /* Deprecated, please use system_time64 */
+ rpc_time_value_t system_time; /* total system run time for
+ live threads */
+ time_value64_t user_time64; /* total user run time for
+ live threads */
+ time_value64_t system_time64; /* total system run time for
+ live threads */
+};
+
+typedef struct task_thread_times_info task_thread_times_info_data_t;
+typedef struct task_thread_times_info *task_thread_times_info_t;
+#define TASK_THREAD_TIMES_INFO_COUNT \
+ (sizeof(task_thread_times_info_data_t) / sizeof(integer_t))
+
+/*
+ * Flavor definitions for task_ras_control
+ */
+#define TASK_RAS_CONTROL_PURGE_ALL 0
+#define TASK_RAS_CONTROL_PURGE_ONE 1
+#define TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE 2
+#define TASK_RAS_CONTROL_INSTALL_ONE 3
+
+#endif /* _MACH_TASK_INFO_H_ */
+
diff --git a/include/mach/task_notify.defs b/include/mach/task_notify.defs
new file mode 100644
index 0000000..a4aff67
--- /dev/null
+++ b/include/mach/task_notify.defs
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2014 Free Software Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+ task_notify 4400;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+type task_notify_port_t = mach_port_t
+ ctype: mach_port_t
+#ifdef TASK_NOTIFY_INTRAN
+ intran: TASK_NOTIFY_INTRAN
+#endif
+#ifdef TASK_NOTIFY_INTRAN_PAYLOAD
+ intranpayload: TASK_NOTIFY_INTRAN_PAYLOAD
+#endif
+#ifdef TASK_NOTIFY_OUTTRAN
+ outtran: TASK_NOTIFY_OUTTRAN
+#endif
+#ifdef TASK_NOTIFY_DESTRUCTOR
+ destructor: TASK_NOTIFY_DESTRUCTOR
+#endif
+;
+
+#ifdef TASK_NOTIFY_IMPORTS
+TASK_NOTIFY_IMPORTS
+#endif
+
+type task_move_t = MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+
+/* These notifications are sent to the port registered via
+ `register_new_task_notification' and provide a robust parental
+ relation between tasks. */
+simpleroutine mach_notify_new_task(
+ notify : task_notify_port_t;
+ task : task_move_t;
+ parent : task_move_t);
diff --git a/include/mach/task_special_ports.h b/include/mach/task_special_ports.h
new file mode 100644
index 0000000..42ecc15
--- /dev/null
+++ b/include/mach/task_special_ports.h
@@ -0,0 +1,66 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/task_special_ports.h
+ *
+ * Defines codes for special_purpose task ports. These are NOT
+ * port identifiers - they are only used for the task_get_special_port
+ * and task_set_special_port routines.
+ *
+ */
+
+#ifndef _MACH_TASK_SPECIAL_PORTS_H_
+#define _MACH_TASK_SPECIAL_PORTS_H_
+
+#define TASK_KERNEL_PORT 1 /* Represents task to the outside
+ world.*/
+#define TASK_EXCEPTION_PORT 3 /* Exception messages for task are
+ sent to this port. */
+#define TASK_BOOTSTRAP_PORT 4 /* Bootstrap environment for task. */
+
+/*
+ * Definitions for ease of use
+ */
+
+#define task_get_kernel_port(task, port) \
+ (task_get_special_port((task), TASK_KERNEL_PORT, (port)))
+
+#define task_set_kernel_port(task, port) \
+ (task_set_special_port((task), TASK_KERNEL_PORT, (port)))
+
+#define task_get_exception_port(task, port) \
+ (task_get_special_port((task), TASK_EXCEPTION_PORT, (port)))
+
+#define task_set_exception_port(task, port) \
+ (task_set_special_port((task), TASK_EXCEPTION_PORT, (port)))
+
+#define task_get_bootstrap_port(task, port) \
+ (task_get_special_port((task), TASK_BOOTSTRAP_PORT, (port)))
+
+#define task_set_bootstrap_port(task, port) \
+ (task_set_special_port((task), TASK_BOOTSTRAP_PORT, (port)))
+
+#endif /* _MACH_TASK_SPECIAL_PORTS_H_ */
diff --git a/include/mach/thread_info.h b/include/mach/thread_info.h
new file mode 100644
index 0000000..4f322e0
--- /dev/null
+++ b/include/mach/thread_info.h
@@ -0,0 +1,124 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/thread_info
+ *
+ * Thread information structure and definitions.
+ *
+ * The defintions in this file are exported to the user. The kernel
+ * will translate its internal data structures to these structures
+ * as appropriate.
+ *
+ */
+
+#ifndef _MACH_THREAD_INFO_H_
+#define _MACH_THREAD_INFO_H_
+
+#include <mach/boolean.h>
+#include <mach/policy.h>
+#include <mach/time_value.h>
+
+/*
+ * Generic information structure to allow for expansion.
+ */
+typedef integer_t *thread_info_t; /* varying array of ints */
+
+#define THREAD_INFO_MAX (1024) /* maximum array size */
+typedef integer_t thread_info_data_t[THREAD_INFO_MAX];
+
+/*
+ * Currently defined information.
+ */
+#define THREAD_BASIC_INFO 1 /* basic information */
+
+struct thread_basic_info {
+ /* Deprecated, please use user_time64 */
+ rpc_time_value_t user_time; /* user run time */
+ /* Deprecated, please use system_time64 */
+ rpc_time_value_t system_time; /* system run time */
+ integer_t cpu_usage; /* scaled cpu usage percentage */
+ integer_t base_priority; /* base scheduling priority */
+ integer_t cur_priority; /* current scheduling priority */
+ integer_t run_state; /* run state (see below) */
+ integer_t flags; /* various flags (see below) */
+ integer_t suspend_count; /* suspend count for thread */
+ integer_t sleep_time; /* number of seconds that thread
+ has been sleeping */
+ /* Deprecated, please use creation_time64 */
+ rpc_time_value_t creation_time; /* time stamp of creation */
+ time_value64_t user_time64; /* user run time */
+ time_value64_t system_time64; /* system run time */
+ time_value64_t creation_time64; /* time stamp of creation */
+};
+
+typedef struct thread_basic_info thread_basic_info_data_t;
+typedef struct thread_basic_info *thread_basic_info_t;
+#define THREAD_BASIC_INFO_COUNT \
+ (sizeof(thread_basic_info_data_t) / sizeof(natural_t))
+
+/*
+ * Scale factor for usage field.
+ */
+
+#define TH_USAGE_SCALE 1000
+
+/*
+ * Thread run states (state field).
+ */
+
+#define TH_STATE_RUNNING 1 /* thread is running normally */
+#define TH_STATE_STOPPED 2 /* thread is stopped */
+#define TH_STATE_WAITING 3 /* thread is waiting normally */
+#define TH_STATE_UNINTERRUPTIBLE 4 /* thread is in an uninterruptible
+ wait */
+#define TH_STATE_HALTED 5 /* thread is halted at a
+ clean point */
+
+/*
+ * Thread flags (flags field).
+ */
+#define TH_FLAGS_SWAPPED 0x1 /* thread is swapped out */
+#define TH_FLAGS_IDLE 0x2 /* thread is an idle thread */
+
+#define THREAD_SCHED_INFO 2
+
+struct thread_sched_info {
+ integer_t policy; /* scheduling policy */
+ integer_t data; /* associated data */
+ integer_t base_priority; /* base priority */
+ integer_t max_priority; /* max priority */
+ integer_t cur_priority; /* current priority */
+/*boolean_t*/integer_t depressed; /* depressed ? */
+ integer_t depress_priority; /* priority depressed from */
+ integer_t last_processor; /* last processor used by the thread */
+};
+
+typedef struct thread_sched_info thread_sched_info_data_t;
+typedef struct thread_sched_info *thread_sched_info_t;
+#define THREAD_SCHED_INFO_COUNT \
+ (sizeof(thread_sched_info_data_t) / sizeof(natural_t))
+
+#endif /* _MACH_THREAD_INFO_H_ */
diff --git a/include/mach/thread_special_ports.h b/include/mach/thread_special_ports.h
new file mode 100644
index 0000000..33e3a1f
--- /dev/null
+++ b/include/mach/thread_special_ports.h
@@ -0,0 +1,59 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/thread_special_ports.h
+ *
+ * Defines codes for special_purpose thread ports. These are NOT
+ * port identifiers - they are only used for the thread_get_special_port
+ * and thread_set_special_port routines.
+ *
+ */
+
+#ifndef _MACH_THREAD_SPECIAL_PORTS_H_
+#define _MACH_THREAD_SPECIAL_PORTS_H_
+
+#define THREAD_KERNEL_PORT 1 /* Represents the thread to the outside
+ world.*/
+#define THREAD_EXCEPTION_PORT 3 /* Exception messages for the thread
+ are sent to this port. */
+
+/*
+ * Definitions for ease of use
+ */
+
+#define thread_get_kernel_port(thread, port) \
+ (thread_get_special_port((thread), THREAD_KERNEL_PORT, (port)))
+
+#define thread_set_kernel_port(thread, port) \
+ (thread_set_special_port((thread), THREAD_KERNEL_PORT, (port)))
+
+#define thread_get_exception_port(thread, port) \
+ (thread_get_special_port((thread), THREAD_EXCEPTION_PORT, (port)))
+
+#define thread_set_exception_port(thread, port) \
+ (thread_set_special_port((thread), THREAD_EXCEPTION_PORT, (port)))
+
+#endif /* _MACH_THREAD_SPECIAL_PORTS_H_ */
diff --git a/include/mach/thread_status.h b/include/mach/thread_status.h
new file mode 100644
index 0000000..b02f5b4
--- /dev/null
+++ b/include/mach/thread_status.h
@@ -0,0 +1,55 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ *
+ * This file contains the structure definitions for the user-visible
+ * thread state. This thread state is examined with the thread_get_state
+ * kernel call and may be changed with the thread_set_state kernel call.
+ *
+ */
+
+#ifndef _MACH_THREAD_STATUS_H_
+#define _MACH_THREAD_STATUS_H_
+
+/*
+ * The actual structure that comprises the thread state is defined
+ * in the machine dependent module.
+ */
+#include <mach/machine/vm_types.h>
+#include <mach/machine/thread_status.h>
+
+/*
+ * Generic definition for machine-dependent thread status.
+ */
+
+typedef natural_t *thread_state_t; /* Variable-length array */
+
+#define THREAD_STATE_MAX (1024) /* Maximum array size */
+typedef natural_t thread_state_data_t[THREAD_STATE_MAX];
+
+#define THREAD_STATE_FLAVOR_LIST 0 /* List of valid flavors */
+
+#endif /* _MACH_THREAD_STATUS_H_ */
diff --git a/include/mach/thread_switch.h b/include/mach/thread_switch.h
new file mode 100644
index 0000000..5235b87
--- /dev/null
+++ b/include/mach/thread_switch.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_THREAD_SWITCH_H_
+#define _MACH_THREAD_SWITCH_H_
+
+/*
+ * Constant definitions for thread_switch trap.
+ */
+
+#define SWITCH_OPTION_NONE 0
+#define SWITCH_OPTION_DEPRESS 1
+#define SWITCH_OPTION_WAIT 2
+
+#define valid_switch_option(opt) ((0 <= (opt)) && ((opt) <= 2))
+
+#endif /* _MACH_THREAD_SWITCH_H_ */
diff --git a/include/mach/time_value.h b/include/mach/time_value.h
new file mode 100644
index 0000000..e08707b
--- /dev/null
+++ b/include/mach/time_value.h
@@ -0,0 +1,201 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_TIME_VALUE_H_
+#define _MACH_TIME_VALUE_H_
+
+#include <mach/machine/vm_types.h>
+
+/*
+ * Time value returned by kernel.
+ */
+
+struct rpc_time_value {
+ /* TODO: this should be 64 bits regardless of the arch to be Y2038 proof. */
+ rpc_long_integer_t seconds;
+ integer_t microseconds;
+};
+
+/*
+ * Time value used by kernel interfaces. Ideally they should be migrated
+ * to use time_value64 below.
+ */
+struct time_value {
+ long_integer_t seconds;
+ integer_t microseconds;
+};
+typedef struct time_value time_value_t;
+
+#ifdef KERNEL
+typedef struct rpc_time_value rpc_time_value_t;
+#else
+typedef struct time_value rpc_time_value_t;
+#endif
+
+/*
+ * Time value used internally by the kernel that uses 64 bits to track seconds
+ * and nanoseconds. Note that the current resolution is only microseconds.
+ */
+struct time_value64 {
+ int64_t seconds;
+ int64_t nanoseconds;
+};
+typedef struct time_value64 time_value64_t;
+
+/**
+ * Functions used by Mig to perform user to kernel conversion and vice-versa.
+ * We only do this because we may run a 64 bit kernel with a 32 bit user space.
+ */
+static __inline__ rpc_time_value_t convert_time_value_to_user(time_value_t tv)
+{
+ rpc_time_value_t user = {.seconds = tv.seconds, .microseconds = tv.microseconds};
+ return user;
+}
+static __inline__ time_value_t convert_time_value_from_user(rpc_time_value_t tv)
+{
+ time_value_t kernel = {.seconds = tv.seconds, .microseconds = tv.microseconds};
+ return kernel;
+}
+
+/*
+ * Macros to manipulate time values. Assume that time values
+ * are normalized (microseconds <= 999999).
+ */
+#define TIME_MICROS_MAX (1000000)
+#define TIME_NANOS_MAX (1000000000)
+
+#define time_value_assert(val) \
+ assert(0 <= (val)->microseconds && (val)->microseconds < TIME_MICROS_MAX);
+
+#define time_value64_assert(val) \
+ assert(0 <= (val)->nanoseconds && (val)->nanoseconds < TIME_NANOS_MAX);
+
+#define time_value_add_usec(val, micros) { \
+ time_value_assert(val); \
+ if (((val)->microseconds += (micros)) \
+ >= TIME_MICROS_MAX) { \
+ (val)->microseconds -= TIME_MICROS_MAX; \
+ (val)->seconds++; \
+ } \
+ time_value_assert(val); \
+}
+
+#define time_value64_add_nanos(val, nanos) { \
+ time_value64_assert(val); \
+ if (((val)->nanoseconds += (nanos)) \
+ >= TIME_NANOS_MAX) { \
+ (val)->nanoseconds -= TIME_NANOS_MAX; \
+ (val)->seconds++; \
+ } \
+ time_value64_assert(val); \
+}
+
+#define time_value64_sub_nanos(val, nanos) { \
+ time_value64_assert(val); \
+ if (((val)->nanoseconds -= (nanos)) < 0) { \
+ (val)->nanoseconds += TIME_NANOS_MAX; \
+ (val)->seconds--; \
+ } \
+ time_value64_assert(val); \
+}
+
+#define time_value_add(result, addend) { \
+ time_value_assert(addend); \
+ (result)->seconds += (addend)->seconds; \
+ time_value_add_usec(result, (addend)->microseconds); \
+ }
+
+#define time_value64_add(result, addend) { \
+ time_value64_assert(addend); \
+ (result)->seconds += (addend)->seconds; \
+ time_value64_add_nanos(result, (addend)->nanoseconds); \
+ }
+
+#define time_value64_sub(result, subtrahend) { \
+ time_value64_assert(subtrahend); \
+ (result)->seconds -= (subtrahend)->seconds; \
+ time_value64_sub_nanos(result, (subtrahend)->nanoseconds); \
+ }
+
+#define time_value64_init(tv) { \
+ (tv)->seconds = 0; \
+ (tv)->nanoseconds = 0; \
+ }
+
+#define TIME_VALUE64_TO_TIME_VALUE(tv64, tv) do { \
+ (tv)->seconds = (tv64)->seconds; \
+ (tv)->microseconds = (tv64)->nanoseconds / 1000; \
+} while(0)
+
+#define TIME_VALUE_TO_TIME_VALUE64(tv, tv64) do { \
+ (tv64)->seconds = (tv)->seconds; \
+ (tv64)->nanoseconds = (tv)->microseconds * 1000; \
+} while(0)
+
+/*
+ * Time value available through the mapped-time interface.
+ * Read this mapped value with
+ * do {
+ * secs = mtime->seconds;
+ * __sync_synchronize();
+ * usecs = mtime->microseconds;
+ * __sync_synchronize();
+ * } while (secs != mtime->check_seconds);
+ */
+
+typedef struct mapped_time_value {
+ integer_t seconds;
+ integer_t microseconds;
+ integer_t check_seconds;
+ struct time_value64 time_value;
+ int64_t check_seconds64;
+} mapped_time_value_t;
+
+/* Macros for converting between struct timespec and time_value_t. */
+
+#define TIME_VALUE_TO_TIMESPEC(tv, ts) do { \
+ (ts)->tv_sec = (tv)->seconds; \
+ (ts)->tv_nsec = (tv)->microseconds * 1000; \
+} while(0)
+
+#define TIMESPEC_TO_TIME_VALUE(tv, ts) do { \
+ (tv)->seconds = (ts)->tv_sec; \
+ (tv)->microseconds = (ts)->tv_nsec / 1000; \
+} while(0)
+
+/* Macros for converting between struct timespec and time_value64_t. */
+
+#define TIME_VALUE64_TO_TIMESPEC(tv, ts) do { \
+ (ts)->tv_sec = (tv)->seconds; \
+ (ts)->tv_nsec = (tv)->nanoseconds; \
+} while(0)
+
+#define TIMESPEC_TO_TIME_VALUE64(tv, ts) do { \
+ (tv)->seconds = (ts)->tv_sec; \
+ (tv)->nanoseconds = (ts)->tv_nsec; \
+} while(0)
+
+#endif /* _MACH_TIME_VALUE_H_ */
diff --git a/include/mach/version.h b/include/mach/version.h
new file mode 100644
index 0000000..3ef7859
--- /dev/null
+++ b/include/mach/version.h
@@ -0,0 +1,73 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon rights
+ * to redistribute these changes.
+ */
+/*
+ * Each kernel has a major and minor version number. Changes in
+ * the major number in general indicate a change in exported features.
+ * Changes in minor number usually correspond to internal-only
+ * changes that the user need not be aware of (in general). These
+ * values are stored at boot time in the machine_info strucuture and
+ * can be obtained by user programs with the host_info kernel call.
+ * This mechanism is intended to be the formal way for Mach programs
+ * to provide for backward compatibility in future releases.
+ *
+ * [ This needs to be reconciled somehow with the major/minor version
+ * number stuffed into the version string - mja, 5/8/87 ]
+ *
+ * Following is an informal history of the numbers:
+ *
+ * 25-March-87 Avadis Tevanian, Jr.
+ * Created version numbering scheme. Started with major 1,
+ * minor 0.
+ */
+
+#ifndef _MACH_VERSION_H_
+#define _MACH_VERSION_H_
+
+#define KERNEL_MAJOR_VERSION 4
+#define KERNEL_MINOR_VERSION 0
+
+/*
+ * Version number of the kernel include files.
+ *
+ * This number must be changed whenever an incompatible change is made to one
+ * or more of our include files which are used by application programs that
+ * delve into kernel memory. The number should normally be simply incremented
+ * but may actually be changed in any manner so long as it differs from the
+ * numbers previously assigned to any other versions with which the current
+ * version is incompatible. It is used at boot time to determine which
+ * versions of the system programs to install.
+ *
+ * Note that the symbol _INCLUDE_VERSION must be set to this in the symbol
+ * table. On the VAX for example, this is done in locore.s.
+ */
+
+/*
+ * Current allocation strategy: bump either branch by 2, until non-MACH is
+ * excised from the CSD environment.
+ */
+#define INCLUDE_VERSION 0
+
+#endif /* _MACH_VERSION_H_ */
diff --git a/include/mach/vm_attributes.h b/include/mach/vm_attributes.h
new file mode 100644
index 0000000..9ca3ef5
--- /dev/null
+++ b/include/mach/vm_attributes.h
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/vm_attributes.h
+ * Author: Alessandro Forin
+ *
+ * Virtual memory attributes definitions.
+ *
+ * These definitions are in addition to the machine-independent
+ * ones (e.g. protection), and are only selectively supported
+ * on specific machine architectures.
+ *
+ */
+
+#ifndef _MACH_VM_ATTRIBUTES_H_
+#define _MACH_VM_ATTRIBUTES_H_
+
+/*
+ * Types of machine-dependent attributes
+ */
+typedef unsigned int vm_machine_attribute_t;
+
+#define MATTR_CACHE 1 /* cachability */
+#define MATTR_MIGRATE 2 /* migrability */
+#define MATTR_REPLICATE 4 /* replicability */
+
+/*
+ * Values for the above, e.g. operations on attribute
+ */
+typedef int vm_machine_attribute_val_t;
+
+#define MATTR_VAL_OFF 0 /* (generic) turn attribute off */
+#define MATTR_VAL_ON 1 /* (generic) turn attribute on */
+#define MATTR_VAL_GET 2 /* (generic) return current value */
+
+#define MATTR_VAL_CACHE_FLUSH 6 /* flush from all caches */
+#define MATTR_VAL_DCACHE_FLUSH 7 /* flush from data caches */
+#define MATTR_VAL_ICACHE_FLUSH 8 /* flush from instruction caches */
+
+#endif /* _MACH_VM_ATTRIBUTES_H_ */
diff --git a/include/mach/vm_cache_statistics.h b/include/mach/vm_cache_statistics.h
new file mode 100644
index 0000000..072976a
--- /dev/null
+++ b/include/mach/vm_cache_statistics.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2012 Free Software Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _MACH_VM_CACHE_STATISTICS_H_
+#define _MACH_VM_CACHE_STATISTICS_H_
+
+#include <mach/machine/vm_types.h>
+
+struct vm_cache_statistics {
+ integer_t cache_object_count; /* # of cached objects */
+ integer_t cache_count; /* # of cached pages */
+ integer_t active_tmp_count; /* # of active temporary pages */
+ integer_t inactive_tmp_count; /* # of inactive temporary pages */
+ integer_t active_perm_count; /* # of active permanent pages */
+ integer_t inactive_perm_count; /* # of inactive permanent pages */
+ integer_t dirty_count; /* # of dirty pages */
+ integer_t laundry_count; /* # of pages being laundered */
+ integer_t writeback_count; /* # of pages being written back */
+ integer_t slab_count; /* # of slab allocator pages */
+ integer_t slab_reclaim_count; /* # of reclaimable slab pages */
+};
+
+typedef struct vm_cache_statistics *vm_cache_statistics_t;
+typedef struct vm_cache_statistics vm_cache_statistics_data_t;
+
+#endif /* _MACH_VM_CACHE_STATISTICS_H_ */
diff --git a/include/mach/vm_inherit.h b/include/mach/vm_inherit.h
new file mode 100644
index 0000000..2899290
--- /dev/null
+++ b/include/mach/vm_inherit.h
@@ -0,0 +1,55 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/vm_inherit.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Virtual memory map inheritance definitions.
+ *
+ */
+
+#ifndef _MACH_VM_INHERIT_H_
+#define _MACH_VM_INHERIT_H_
+
+/*
+ * Types defined:
+ *
+ * vm_inherit_t inheritance codes.
+ */
+
+typedef int vm_inherit_t; /* might want to change this */
+
+/*
+ * Enumeration of valid values for vm_inherit_t.
+ */
+
+#define VM_INHERIT_SHARE ((vm_inherit_t) 0) /* share with child */
+#define VM_INHERIT_COPY ((vm_inherit_t) 1) /* copy into child */
+#define VM_INHERIT_NONE ((vm_inherit_t) 2) /* absent from child */
+
+#define VM_INHERIT_DEFAULT VM_INHERIT_COPY
+
+#endif /* _MACH_VM_INHERIT_H_ */
diff --git a/include/mach/vm_param.h b/include/mach/vm_param.h
new file mode 100644
index 0000000..4cbd0ec
--- /dev/null
+++ b/include/mach/vm_param.h
@@ -0,0 +1,102 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/vm_param.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Machine independent virtual memory parameters.
+ *
+ */
+
+#ifndef _MACH_VM_PARAM_H_
+#define _MACH_VM_PARAM_H_
+
+#include <mach/machine/vm_param.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ * The machine independent pages are referred to as PAGES. A page
+ * is some number of hardware pages, depending on the target machine.
+ *
+ * All references to the size of a page should be done
+ * with PAGE_SIZE, PAGE_SHIFT, or PAGE_MASK.
+ * They may be implemented as either constants or variables,
+ * depending on more-specific code.
+ * If they're variables, they had better be initialized
+ * by the time system-independent code starts getting called.
+ *
+ * Regardless whether it is implemented with a constant or a variable,
+ * the PAGE_SIZE is assumed to be a power of two throughout the
+ * virtual memory system implementation.
+ *
+ * More-specific code must at least provide PAGE_SHIFT;
+ * we can calculate the others if necessary.
+ * (However, if PAGE_SHIFT really refers to a variable,
+ * PAGE_SIZE and PAGE_MASK should also be variables
+ * so their values don't have to be constantly recomputed.)
+ */
+#ifndef PAGE_SHIFT
+#error mach/machine/vm_param.h needs to define PAGE_SHIFT.
+#endif
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#endif
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (PAGE_SIZE-1)
+#endif
+
+/*
+ * Convert addresses to pages and vice versa.
+ * No rounding is used.
+ */
+
+#define atop(x) (((vm_size_t)(x)) >> PAGE_SHIFT)
+#define ptoa(x) ((vm_offset_t)((x) << PAGE_SHIFT))
+
+/*
+ * Round off or truncate to the nearest page. These will work
+ * for either addresses or counts. (i.e. 1 byte rounds to 1 page
+ * bytes.
+ */
+
+#define round_page(x) ((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) & ~PAGE_MASK))
+#define trunc_page(x) ((vm_offset_t)(((vm_offset_t)(x)) & ~PAGE_MASK))
+
+#define round_phys(x) ((phys_addr_t)((((phys_addr_t)(x)) + PAGE_MASK) & ~PAGE_MASK))
+#define trunc_phys(x) ((phys_addr_t)(((phys_addr_t)(x)) & ~PAGE_MASK))
+
+/*
+ * Determine whether an address is page-aligned, or a count is
+ * an exact page multiple.
+ */
+
+#define page_aligned(x) ((((vm_offset_t) (x)) & PAGE_MASK) == 0)
+#define phys_aligned(x) ((((phys_addr_t) (x)) & PAGE_MASK) == 0)
+
+#endif /* _MACH_VM_PARAM_H_ */
diff --git a/include/mach/vm_prot.h b/include/mach/vm_prot.h
new file mode 100644
index 0000000..22a76a8
--- /dev/null
+++ b/include/mach/vm_prot.h
@@ -0,0 +1,79 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/vm_prot.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Virtual memory protection definitions.
+ *
+ */
+
+#ifndef _MACH_VM_PROT_H_
+#define _MACH_VM_PROT_H_
+
+/*
+ * Types defined:
+ *
+ * vm_prot_t VM protection values.
+ */
+
+typedef int vm_prot_t;
+
+/*
+ * Protection values, defined as bits within the vm_prot_t type
+ */
+
+#define VM_PROT_NONE ((vm_prot_t) 0x00)
+
+#define VM_PROT_READ ((vm_prot_t) 0x01) /* read permission */
+#define VM_PROT_WRITE ((vm_prot_t) 0x02) /* write permission */
+#define VM_PROT_EXECUTE ((vm_prot_t) 0x04) /* execute permission */
+
+/*
+ * The default protection for newly-created virtual memory
+ */
+
+#define VM_PROT_DEFAULT (VM_PROT_READ|VM_PROT_WRITE)
+
+/*
+ * The maximum privileges possible, for parameter checking.
+ */
+
+#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
+
+/*
+ * An invalid protection value.
+ * Used only by memory_object_lock_request to indicate no change
+ * to page locks. Using -1 here is a bad idea because it
+ * looks like VM_PROT_ALL and then some.
+ */
+#define VM_PROT_NO_CHANGE ((vm_prot_t) 0x08)
+
+/*
+ * This protection value says whether special notification is to be used.
+ */
+#define VM_PROT_NOTIFY ((vm_prot_t) 0x10)
+#endif /* _MACH_VM_PROT_H_ */
diff --git a/include/mach/vm_statistics.h b/include/mach/vm_statistics.h
new file mode 100644
index 0000000..2039a82
--- /dev/null
+++ b/include/mach/vm_statistics.h
@@ -0,0 +1,75 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/vm_statistics.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
+ *
+ * Virtual memory statistics structure.
+ *
+ */
+
+#ifndef _MACH_VM_STATISTICS_H_
+#define _MACH_VM_STATISTICS_H_
+
+#include <mach/machine/vm_types.h>
+
+struct vm_statistics {
+ integer_t pagesize; /* page size in bytes */
+ integer_t free_count; /* # of pages free */
+ integer_t active_count; /* # of pages active */
+ integer_t inactive_count; /* # of pages inactive */
+ integer_t wire_count; /* # of pages wired down */
+ integer_t zero_fill_count; /* # of zero fill pages */
+ integer_t reactivations; /* # of pages reactivated */
+ integer_t pageins; /* # of pageins */
+ integer_t pageouts; /* # of pageouts */
+ integer_t faults; /* # of faults */
+ integer_t cow_faults; /* # of copy-on-writes */
+ integer_t lookups; /* object cache lookups */
+ integer_t hits; /* object cache hits */
+};
+
+typedef struct vm_statistics *vm_statistics_t;
+typedef struct vm_statistics vm_statistics_data_t;
+
+#ifdef MACH_KERNEL
+extern vm_statistics_data_t vm_stat;
+#endif /* MACH_KERNEL */
+
+/*
+ * Each machine dependent implementation is expected to
+ * keep certain statistics. They may do this anyway they
+ * so choose, but are expected to return the statistics
+ * in the following structure.
+ */
+
+struct pmap_statistics {
+ integer_t resident_count; /* # of pages mapped (total)*/
+ integer_t wired_count; /* # of pages wired */
+};
+
+typedef struct pmap_statistics *pmap_statistics_t;
+#endif /* _MACH_VM_STATISTICS_H_ */
diff --git a/include/mach/vm_sync.h b/include/mach/vm_sync.h
new file mode 100644
index 0000000..0c7451c
--- /dev/null
+++ b/include/mach/vm_sync.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Free Software Foundation
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * All Rights Reserved.
+ */
+
+#ifndef _MACH_VM_SYNC_H_
+#define _MACH_VM_SYNC_H_
+
+/*
+ * Types defined:
+ *
+ * vm_sync_t VM synchronization flags
+ */
+
+typedef int vm_sync_t;
+
+/*
+ * Synchronization values
+ */
+
+#define VM_SYNC_ASYNCHRONOUS ((vm_sync_t) 0x01)
+#define VM_SYNC_SYNCHRONOUS ((vm_sync_t) 0x02)
+#define VM_SYNC_INVALIDATE ((vm_sync_t) 0x04)
+#if 0
+/* Not supported yet. */
+#define VM_SYNC_KILLPAGES ((vm_sync_t) 0x08)
+#define VM_SYNC_DEACTIVATE ((vm_sync_t) 0x10)
+#define VM_SYNC_CONTIGUOUS ((vm_sync_t) 0x20)
+#define VM_SYNC_REUSABLEPAGES ((vm_sync_t) 0x40)
+#endif
+
+#endif /* _MACH_VM_SYNC_H_ */
diff --git a/include/mach/vm_wire.h b/include/mach/vm_wire.h
new file mode 100644
index 0000000..1552dfa
--- /dev/null
+++ b/include/mach/vm_wire.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 Free Software Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _MACH_VM_WIRE_H_
+#define _MACH_VM_WIRE_H_
+
+typedef int vm_wire_t;
+
+#define VM_WIRE_NONE 0
+#define VM_WIRE_CURRENT 1
+#define VM_WIRE_FUTURE 2
+
+#define VM_WIRE_ALL (VM_WIRE_CURRENT | VM_WIRE_FUTURE)
+
+#endif /* _MACH_VM_WIRE_H_ */
diff --git a/include/mach/xen.h b/include/mach/xen.h
new file mode 100644
index 0000000..4462082
--- /dev/null
+++ b/include/mach/xen.h
@@ -0,0 +1,95 @@
+
+/*
+ * Copyright (C) 2006-2009, 2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _MACH_XEN_H
+#define _MACH_XEN_H
+#ifdef MACH_XEN
+#include <sys/types.h>
+#include <xen/public/xen.h>
+#include <i386/vm_param.h>
+
+extern struct start_info boot_info;
+
+extern volatile struct shared_info hyp_shared_info;
+
+#ifdef MACH_PV_PAGETABLES
+/* Memory translations */
+
+/* pa are physical addresses, from 0 to size of memory */
+/* ma are machine addresses, i.e. _real_ hardware adresses */
+/* la are linear addresses, i.e. without segmentation */
+
+/* This might also be useful out of Xen */
+#if VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+extern unsigned long la_shift;
+#else
+#define la_shift LINEAR_MIN_KERNEL_ADDRESS
+#endif
+#define la_to_pa(a) ((vm_offset_t)(((vm_offset_t)(a)) - la_shift))
+#define pa_to_la(a) ((vm_offset_t)(((vm_offset_t)(a)) + la_shift))
+
+#define kv_to_la(a) pa_to_la(_kvtophys(a))
+#define la_to_kv(a) phystokv(la_to_pa(a))
+
+#ifdef MACH_PSEUDO_PHYS
+#ifdef __i386__
+#if PAE
+#define PFN_LIST MACH2PHYS_VIRT_START_PAE
+#else
+#define PFN_LIST MACH2PHYS_VIRT_START_NONPAE
+#endif
+#else
+#define PFN_LIST MACH2PHYS_VIRT_START
+#endif
+#if VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+extern unsigned long *pfn_list;
+#else
+#define pfn_list ((unsigned long *) PFN_LIST)
+#endif
+#define mfn_to_pfn(n) (pfn_list[n])
+
+extern unsigned long *mfn_list;
+#define pfn_to_mfn(n) (mfn_list[n])
+#else
+#define mfn_to_pfn(n) (n)
+#define pfn_to_mfn(n) (n)
+#endif /* MACH_PSEUDO_PHYS */
+
+#define pa_to_mfn(a) (pfn_to_mfn(atop(a)))
+#ifdef PAE
+#define pa_to_ma(a) ({ vm_offset_t __a = (vm_offset_t) (a); (((pt_entry_t) pa_to_mfn(__a)) << PAGE_SHIFT) | (__a & PAGE_MASK); })
+#define ma_to_pa(a) ({ pt_entry_t __a = (pt_entry_t) (a); (mfn_to_pfn(__a >> PAGE_SHIFT) << PAGE_SHIFT) | (__a & PAGE_MASK); })
+#else
+#define pa_to_ma(a) ({ vm_offset_t __a = (vm_offset_t) (a); ptoa(pa_to_mfn(__a)) | (__a & PAGE_MASK); })
+#define ma_to_pa(a) ({ vm_offset_t __a = (vm_offset_t) (a); (mfn_to_pfn(atop((__a))) << PAGE_SHIFT) | (__a & PAGE_MASK); })
+#endif
+
+#define kv_to_mfn(a) pa_to_mfn(_kvtophys(a))
+#define kv_to_ma(a) pa_to_ma(_kvtophys(a))
+#else /* MACH_PV_PAGETABLES */
+#define mfn_to_pfn(n) (n)
+#define pfn_to_mfn(n) (n)
+#endif /* MACH_PV_PAGETABLES */
+
+#define mfn_to_kv(mfn) phystokv(ptoa(mfn_to_pfn(mfn)))
+
+#include <machine/xen.h>
+
+#endif /* MACH_XEN */
+#endif /* _MACH_XEN_H */
diff --git a/include/mach_debug/hash_info.h b/include/mach_debug/hash_info.h
new file mode 100644
index 0000000..8e6f19c
--- /dev/null
+++ b/include/mach_debug/hash_info.h
@@ -0,0 +1,41 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_DEBUG_HASH_INFO_H_
+#define _MACH_DEBUG_HASH_INFO_H_
+
+/*
+ * Remember to update the mig type definitions
+ * in mach_debug_types.defs when adding/removing fields.
+ */
+
+typedef struct hash_info_bucket {
+ unsigned int hib_count; /* number of records in bucket */
+} hash_info_bucket_t;
+
+typedef hash_info_bucket_t *hash_info_bucket_array_t;
+
+#endif /* _MACH_DEBUG_HASH_INFO_H_ */
diff --git a/include/mach_debug/mach_debug.defs b/include/mach_debug/mach_debug.defs
new file mode 100644
index 0000000..2de7df5
--- /dev/null
+++ b/include/mach_debug/mach_debug.defs
@@ -0,0 +1,228 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Matchmaker definitions file for Mach kernel debugging interface.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+ mach_debug 3000;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <mach_debug/mach_debug_types.defs>
+
+skip; /* host_ipc_statistics */
+skip; /* host_ipc_statistics_reset */
+skip; /* host_callout_info */
+skip; /* host_callout_statistics */
+skip; /* host_callout_statistics_reset */
+skip; /* host_zone_info */
+skip; /* host_ipc_bucket_info */
+
+#if !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG
+
+/*
+ * Returns the exact number of extant send rights
+ * for the given receive right.
+ */
+
+routine mach_port_get_srights(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out srights : mach_port_rights_t);
+
+skip; /* host_ipc_hash_info */
+
+/*
+ * Returns information about the marequest hash table.
+ */
+
+routine host_ipc_marequest_info(
+ host : host_t;
+ out max_requests : unsigned;
+ out info : hash_info_bucket_array_t,
+ CountInOut, Dealloc);
+
+skip; /* mach_port_space_info */
+
+/*
+ * Returns information about the dead-name requests
+ * registered with the named receive right.
+ */
+
+routine mach_port_dnrequest_info(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out total : unsigned; /* total size of table */
+ out used : unsigned); /* amount used */
+
+#else /* !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG */
+skip; /* mach_port_get_srights */
+skip; /* host_ipc_hash_info */
+skip; /* host_ipc_marequest_info */
+skip; /* mach_port_space_info */
+skip; /* mach_port_dnrequest_info */
+#endif /* !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG */
+
+skip; /* mach_vm_region_info */
+skip; /* vm_mapped_pages_info */
+
+/*
+ * Returns stack usage information:
+ * reserved Amount of stack space reserved for pcb.
+ * total Number of stacks.
+ * space Total VM space for stacks.
+ * resident Resident VM space for stacks.
+ * maxusage Maximum amount of stack used.
+ * maxstack Address in the kernel of the largest stack.
+ */
+
+routine host_stack_usage(
+ host : host_t;
+ out reserved : vm_size_t;
+ out total : unsigned;
+ out space : vm_size_t;
+ out resident : vm_size_t;
+ out maxusage : vm_size_t;
+ out maxstack : vm_offset_t);
+
+routine processor_set_stack_usage(
+ pset : processor_set_name_t;
+ out total : unsigned;
+ out space : vm_size_t;
+ out resident : vm_size_t;
+ out maxusage : vm_size_t;
+ out maxstack : vm_offset_t);
+
+#if !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG
+
+/*
+ * Returns information about the global VP table.
+ */
+
+routine host_virtual_physical_table_info(
+ host : host_t;
+ out info : hash_info_bucket_array_t,
+ CountInOut, Dealloc);
+
+#else /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
+skip; /* host_virtual_physical_table_info */
+#endif /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
+
+/* The old host_load_symbol_table with a different ABI for symtab_name_t */
+skip;
+
+#if !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG
+
+/*
+ * Return the type and address of the kernel object
+ * that the given send/receive right represents.
+ */
+
+routine mach_port_kernel_object(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out object_type : unsigned;
+ out object_addr : vm_offset_t);
+
+#else /* !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG */
+skip; /* mach_port_kernel_object */
+#endif /* !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG */
+
+#if !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG
+
+/*
+ * Returns information about a region of memory.
+ */
+
+routine mach_vm_region_info(
+ task : vm_task_t;
+ address : vm_address_t;
+ out region : vm_region_info_t;
+ /* avoid out-translation of the argument */
+ out object : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t);
+
+routine mach_vm_object_info(
+ object : memory_object_name_t;
+ out info : vm_object_info_t;
+ /* avoid out-translation of the argument */
+ out shadow : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+ /* avoid out-translation of the argument */
+ out copy : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t);
+
+routine mach_vm_object_pages(
+ object : memory_object_name_t;
+ out pages : vm_page_info_array_t,
+ CountInOut, Dealloc);
+
+#else /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
+skip; /* mach_vm_region_info */
+skip; /* mach_vm_object_info */
+skip; /* mach_vm_object_pages */
+#endif /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
+
+/*
+ * Returns information about the memory allocation caches.
+ */
+routine host_slab_info(
+ host : host_t;
+ out info : cache_info_array_t,
+ CountInOut, Dealloc);
+
+#if !defined(MACH_KDB) || MACH_KDB
+/*
+ * Loads a symbol table for an external file into the kernel debugger.
+ * The symbol table data is an array of characters. It is assumed that
+ * the caller and the kernel debugger agree on its format.
+ */
+
+routine host_load_symbol_table(
+ host : host_priv_t;
+ task : task_t;
+ name : symtab_name_t;
+ symtab : pointer_t);
+
+#else /* !defined(MACH_KDB) || MACH_KDB */
+skip; /* host_load_symbol_table */
+#endif /* !defined(MACH_KDB) || MACH_KDB */
+
+#if !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG
+routine mach_vm_object_pages_phys(
+ object : memory_object_name_t;
+ out pages : vm_page_phys_info_array_t,
+ CountInOut, Dealloc);
+#else /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
+skip; /* mach_vm_object_pages_phys */
+#endif /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
diff --git a/include/mach_debug/mach_debug_types.defs b/include/mach_debug/mach_debug_types.defs
new file mode 100644
index 0000000..d897380
--- /dev/null
+++ b/include/mach_debug/mach_debug_types.defs
@@ -0,0 +1,121 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach kernel debugging interface type declarations
+ */
+
+#ifndef _MACH_DEBUG_MACH_DEBUG_TYPES_DEFS_
+#define _MACH_DEBUG_MACH_DEBUG_TYPES_DEFS_
+
+#include <mach/std_types.defs>
+
+#define CACHE_NAME_MAX_LEN 32
+type cache_name_t = struct[CACHE_NAME_MAX_LEN] of char;
+#undef CACHE_NAME_MAX_LEN
+type cache_info_t = struct {
+ integer_t flags;
+ rpc_vm_size_t cpu_pool_size;
+ rpc_vm_size_t obj_size;
+ rpc_vm_size_t align;
+ rpc_vm_size_t buf_size;
+ rpc_vm_size_t slab_size;
+ rpc_long_natural_t bufs_per_slab;
+ rpc_long_natural_t nr_objs;
+ rpc_long_natural_t nr_bufs;
+ rpc_long_natural_t nr_slabs;
+ rpc_long_natural_t nr_free_slabs;
+ cache_name_t name;
+};
+type cache_info_array_t = array[] of cache_info_t;
+
+type hash_info_bucket_t = struct {
+ unsigned hib_count;
+};
+type hash_info_bucket_array_t = array[] of hash_info_bucket_t;
+
+type vm_region_info_t = struct {
+ rpc_vm_offset_t vri_start;
+ rpc_vm_offset_t vri_end;
+ vm_prot_t vri_protection;
+ vm_prot_t vri_max_protection;
+ vm_inherit_t vri_inheritance;
+ unsigned vri_wired_count;
+ unsigned vri_user_wired_count;
+ rpc_vm_offset_t vri_object;
+ rpc_vm_offset_t vri_offset;
+ integer_t vri_needs_copy;
+ unsigned vri_sharing;
+};
+type vm_region_info_array_t = array[] of vm_region_info_t;
+
+type vm_object_info_state_t = uint32_t;
+type vm_object_info_t = struct {
+ rpc_vm_offset_t voi_object;
+ rpc_vm_size_t voi_pagesize;
+ rpc_vm_size_t voi_size;
+ unsigned voi_ref_count;
+ unsigned voi_resident_page_count;
+ unsigned voi_absent_count;
+ rpc_vm_offset_t voi_copy;
+ rpc_vm_offset_t voi_shadow;
+ rpc_vm_offset_t voi_shadow_offset;
+ rpc_vm_offset_t voi_paging_offset;
+ memory_object_copy_strategy_t voi_copy_strategy;
+ rpc_vm_offset_t voi_last_alloc;
+ unsigned voi_paging_in_progress;
+ vm_object_info_state_t voi_state;
+};
+type vm_object_info_array_t = array[] of vm_object_info_t;
+
+type vm_page_info_state_t = uint32_t;
+
+type vm_page_info_t = struct {
+ rpc_vm_offset_t vpi_offset;
+ rpc_vm_offset_t vpi_phys_addr;
+ unsigned vpi_wire_count;
+ vm_prot_t vpi_page_lock;
+ vm_prot_t vpi_unlock_request;
+ vm_page_info_state_t vpi_state;
+};
+type vm_page_info_array_t = array[] of vm_page_info_t;
+
+type vm_page_phys_info_t = struct {
+ rpc_vm_offset_t vpi_offset;
+ rpc_phys_addr_t vpi_phys_addr;
+ unsigned vpi_wire_count;
+ vm_prot_t vpi_page_lock;
+ vm_prot_t vpi_unlock_request;
+ vm_page_info_state_t vpi_state;
+};
+type vm_page_phys_info_array_t = array[] of vm_page_phys_info_t;
+
+type symtab_name_t = c_string[32];
+
+type kernel_debug_name_t = c_string[*: 64];
+
+import <mach_debug/mach_debug_types.h>;
+
+#endif /* _MACH_DEBUG_MACH_DEBUG_TYPES_DEFS_ */
diff --git a/include/mach_debug/mach_debug_types.h b/include/mach_debug/mach_debug_types.h
new file mode 100644
index 0000000..98124ad
--- /dev/null
+++ b/include/mach_debug/mach_debug_types.h
@@ -0,0 +1,52 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach kernel debugging interface type declarations
+ */
+
+#ifndef _MACH_DEBUG_MACH_DEBUG_TYPES_H_
+#define _MACH_DEBUG_MACH_DEBUG_TYPES_H_
+
+#include <mach_debug/vm_info.h>
+#include <mach_debug/slab_info.h>
+#include <mach_debug/hash_info.h>
+
+typedef char symtab_name_t[32];
+typedef const char *const_symtab_name_t;
+
+/*
+ * A fixed-length string data type intended for names given to
+ * kernel objects.
+ *
+ * Note that it is not guaranteed that the in-kernel data
+ * structure will hold KERNEL_DEBUG_NAME_MAX bytes. The given
+ * name will be truncated to fit into the target data structure.
+ */
+#define KERNEL_DEBUG_NAME_MAX (64)
+typedef char kernel_debug_name_t[KERNEL_DEBUG_NAME_MAX];
+typedef const char *const_kernel_debug_name_t;
+
+#endif /* _MACH_DEBUG_MACH_DEBUG_TYPES_H_ */
diff --git a/include/mach_debug/slab_info.h b/include/mach_debug/slab_info.h
new file mode 100644
index 0000000..0f6b5a2
--- /dev/null
+++ b/include/mach_debug/slab_info.h
@@ -0,0 +1,56 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_DEBUG_SLAB_INFO_H_
+#define _MACH_DEBUG_SLAB_INFO_H_
+
+#include <sys/types.h>
+
+/*
+ * Remember to update the mig type definitions
+ * in mach_debug_types.defs when adding/removing fields.
+ */
+
+#define CACHE_NAME_MAX_LEN 32
+
+typedef struct cache_info {
+ int flags;
+ rpc_vm_size_t cpu_pool_size;
+ rpc_vm_size_t obj_size;
+ rpc_vm_size_t align;
+ rpc_vm_size_t buf_size;
+ rpc_vm_size_t slab_size;
+ rpc_long_natural_t bufs_per_slab;
+ rpc_long_natural_t nr_objs;
+ rpc_long_natural_t nr_bufs;
+ rpc_long_natural_t nr_slabs;
+ rpc_long_natural_t nr_free_slabs;
+ char name[CACHE_NAME_MAX_LEN];
+} cache_info_t;
+
+typedef cache_info_t *cache_info_array_t;
+
+#endif /* _MACH_DEBUG_SLAB_INFO_H_ */
diff --git a/include/mach_debug/vm_info.h b/include/mach_debug/vm_info.h
new file mode 100644
index 0000000..cf45a2c
--- /dev/null
+++ b/include/mach_debug/vm_info.h
@@ -0,0 +1,143 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach_debug/vm_info.h
+ * Author: Rich Draves
+ * Date: March, 1990
+ *
+ * Definitions for the VM debugging interface.
+ */
+
+#ifndef _MACH_DEBUG_VM_INFO_H_
+#define _MACH_DEBUG_VM_INFO_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_inherit.h>
+#include <mach/vm_prot.h>
+#include <mach/memory_object.h>
+#include <stdint.h>
+
+/*
+ * Remember to update the mig type definitions
+ * in mach_debug_types.defs when adding/removing fields.
+ */
+
+typedef struct vm_region_info {
+ rpc_vm_offset_t vri_start; /* start of region */
+ rpc_vm_offset_t vri_end; /* end of region */
+
+ vm_prot_t vri_protection; /* protection code */
+ vm_prot_t vri_max_protection; /* maximum protection */
+ vm_inherit_t vri_inheritance; /* inheritance */
+ unsigned int vri_wired_count; /* number of times wired */
+ unsigned int vri_user_wired_count; /* number of times user has wired */
+
+ rpc_vm_offset_t vri_object; /* the mapped object */
+ rpc_vm_offset_t vri_offset; /* offset into object */
+/*boolean_t*/integer_t vri_needs_copy; /* does object need to be copied? */
+ unsigned int vri_sharing; /* share map references */
+} vm_region_info_t;
+
+typedef vm_region_info_t *vm_region_info_array_t;
+
+
+typedef uint32_t vm_object_info_state_t;
+
+#define VOI_STATE_PAGER_CREATED 0x00000001
+#define VOI_STATE_PAGER_INITIALIZED 0x00000002
+#define VOI_STATE_PAGER_READY 0x00000004
+#define VOI_STATE_CAN_PERSIST 0x00000008
+#define VOI_STATE_INTERNAL 0x00000010
+#define VOI_STATE_TEMPORARY 0x00000020
+#define VOI_STATE_ALIVE 0x00000040
+#define VOI_STATE_LOCK_IN_PROGRESS 0x00000080
+#define VOI_STATE_LOCK_RESTART 0x00000100
+
+typedef struct vm_object_info {
+ rpc_vm_offset_t voi_object; /* this object */
+ rpc_vm_size_t voi_pagesize; /* object's page size */
+ rpc_vm_size_t voi_size; /* object size (valid if internal) */
+ unsigned int voi_ref_count; /* number of references */
+ unsigned int voi_resident_page_count; /* number of resident pages */
+ unsigned int voi_absent_count; /* number requested but not filled */
+ rpc_vm_offset_t voi_copy; /* copy object */
+ rpc_vm_offset_t voi_shadow; /* shadow object */
+ rpc_vm_offset_t voi_shadow_offset; /* offset into shadow object */
+ rpc_vm_offset_t voi_paging_offset; /* offset into memory object */
+ memory_object_copy_strategy_t voi_copy_strategy;
+ /* how to handle data copy */
+ rpc_vm_offset_t voi_last_alloc; /* offset of last allocation */
+ unsigned int voi_paging_in_progress; /* paging references */
+ vm_object_info_state_t voi_state; /* random state bits */
+} vm_object_info_t;
+
+typedef vm_object_info_t *vm_object_info_array_t;
+
+typedef uint32_t vm_page_info_state_t;
+
+#define VPI_STATE_BUSY 0x00000001
+#define VPI_STATE_WANTED 0x00000002
+#define VPI_STATE_TABLED 0x00000004
+#define VPI_STATE_FICTITIOUS 0x00000008
+#define VPI_STATE_PRIVATE 0x00000010
+#define VPI_STATE_ABSENT 0x00000020
+#define VPI_STATE_ERROR 0x00000040
+#define VPI_STATE_DIRTY 0x00000080
+#define VPI_STATE_PRECIOUS 0x00000100
+#define VPI_STATE_OVERWRITING 0x00000200
+#define VPI_STATE_INACTIVE 0x00000400
+#define VPI_STATE_ACTIVE 0x00000800
+#define VPI_STATE_LAUNDRY 0x00001000
+#define VPI_STATE_FREE 0x00002000
+#define VPI_STATE_REFERENCE 0x00004000
+
+#define VPI_STATE_PAGER 0x80000000 /* pager has the page */
+
+/* XXX: This structure holds a 32bit vpi_phys_addr. */
+typedef struct vm_page_info {
+ rpc_vm_offset_t vpi_offset; /* offset in object */
+ rpc_vm_offset_t vpi_phys_addr; /* physical address */
+ unsigned int vpi_wire_count; /* number of times wired */
+ vm_prot_t vpi_page_lock; /* XP access restrictions */
+ vm_prot_t vpi_unlock_request; /* outstanding unlock requests */
+ vm_page_info_state_t vpi_state; /* random state bits */
+} vm_page_info_t;
+
+typedef vm_page_info_t *vm_page_info_array_t;
+
+typedef struct vm_page_phys_info {
+ rpc_vm_offset_t vpi_offset; /* offset in object */
+ rpc_phys_addr_t vpi_phys_addr; /* physical address */
+ unsigned int vpi_wire_count; /* number of times wired */
+ vm_prot_t vpi_page_lock; /* XP access restrictions */
+ vm_prot_t vpi_unlock_request; /* outstanding unlock requests */
+ vm_page_info_state_t vpi_state; /* random state bits */
+} vm_page_phys_info_t;
+
+typedef vm_page_phys_info_t *vm_page_phys_info_array_t;
+
+#endif /* _MACH_DEBUG_VM_INFO_H_ */
diff --git a/include/string.h b/include/string.h
new file mode 100644
index 0000000..91c5fe4
--- /dev/null
+++ b/include/string.h
@@ -0,0 +1,55 @@
+/*
+ * String Handling Functions.
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * String handling functions.
+ *
+ */
+
+#ifndef _MACH_SA_SYS_STRING_H_
+#define _MACH_SA_SYS_STRING_H_
+
+#include <sys/types.h>
+
+extern void *memcpy (void *dest, const void *src, size_t n);
+
+extern void *memmove (void *dest, const void *src, size_t n);
+
+extern int memcmp (const void *s1, const void *s2, size_t n) __attribute__ ((pure));
+
+extern void *memset (void *s, int c, size_t n);
+
+extern char *strchr (const char *s, int c);
+
+extern char *strcpy (char *dest, const char *src);
+
+extern char *strncpy (char *dest, const char *src, size_t n);
+
+extern char *strsep (char **strp, const char *delim);
+
+extern int strcmp (const char *s1, const char *s2) __attribute__ ((pure));
+
+extern int strncmp (const char *s1, const char *s2, size_t n) __attribute__ ((pure));
+
+extern size_t strlen (const char *s) __attribute__ ((pure));
+
+extern char *strstr(const char *haystack, const char *needle);
+
+#endif /* _MACH_SA_SYS_STRING_H_ */
diff --git a/include/sys/reboot.h b/include/sys/reboot.h
new file mode 100644
index 0000000..21d421a
--- /dev/null
+++ b/include/sys/reboot.h
@@ -0,0 +1,135 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. The name of the Laboratory may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)reboot.h 7.5 (Berkeley) 6/27/88
+ */
+/*
+ * Warning: The contents of this file are deprecated;
+ * it should only ever be used for BSD and Mach 3 compatibility.
+ * As the above copyright notice suggests, this file originated in BSD;
+ * it is mostly the same, except the flags after RB_DFLTROOT
+ * have diverged from BSD.
+ */
+#ifndef _MACH_SYS_REBOOT_H_
+#define _MACH_SYS_REBOOT_H_
+
+/*
+ * Arguments to reboot system call.
+ * These are converted to switches, and passed to startup program,
+ * and on to init.
+ */
+#define RB_AUTOBOOT 0 /* flags for system auto-booting itself */
+
+#define RB_ASKNAME 0x01 /* -a: ask for file name to reboot from */
+#define RB_SINGLE 0x02 /* -s: reboot to single user only */
+#define RB_KDB 0x04 /* -d: kernel debugger symbols loaded */
+#define RB_HALT 0x08 /* -h: enter KDB at bootup */
+ /* for host_reboot(): don't reboot,
+ just halt */
+#define RB_INITNAME 0x10 /* -i: name given for /etc/init (unused) */
+#define RB_DFLTROOT 0x20 /* use compiled-in rootdev */
+#define RB_NOBOOTRC 0x20 /* -b: don't run /etc/rc.boot */
+#define RB_ALTBOOT 0x40 /* use /boot.old vs /boot */
+#define RB_UNIPROC 0x80 /* -u: start only one processor */
+
+#define RB_SHIFT 8 /* second byte is for ux */
+
+#define RB_DEBUGGER 0x1000 /* for host_reboot(): enter kernel
+ debugger from user level */
+
+/* Corresponding BSD definitions, where they disagree with the Mach flags. */
+#define BSD_RB_NOSYNC 0x04 /* dont sync before reboot */
+#define BSD_RB_KDB 0x40 /* give control to kernel debugger */
+#define BSD_RB_RDONLY 0x80 /* mount root fs read-only */
+#define BSD_RB_DUMP 0x100 /* dump kernel memory before reboot */
+#define BSD_RB_MINIROOT 0x200 /* mini-root present in memory at boot time */
+#define BSD_RB_CONFIG 0x400 /* invoke user configuration routing */
+
+
+/*
+ * Constants for converting boot-style device number to type,
+ * adaptor (uba, mba, etc), unit number and partition number.
+ * Type (== major device number) is in the low byte
+ * for backward compatibility. Except for that of the "magic
+ * number", each mask applies to the shifted value.
+ * Format:
+ * (4) (4) (4) (4) (8) (8)
+ * --------------------------------
+ * |MA | AD| CT| UN| PART | TYPE |
+ * --------------------------------
+ */
+#define B_ADAPTORSHIFT 24
+#define B_ADAPTORMASK 0x0f
+#define B_ADAPTOR(val) (((val) >> B_ADAPTORSHIFT) & B_ADAPTORMASK)
+#define B_CONTROLLERSHIFT 20
+#define B_CONTROLLERMASK 0xf
+#define B_CONTROLLER(val) (((val)>>B_CONTROLLERSHIFT) & B_CONTROLLERMASK)
+#define B_UNITSHIFT 16
+#define B_UNITMASK 0xf
+#define B_UNIT(val) (((val) >> B_UNITSHIFT) & B_UNITMASK)
+#define B_PARTITIONSHIFT 8
+#define B_PARTITIONMASK 0xff
+#define B_PARTITION(val) (((val) >> B_PARTITIONSHIFT) & B_PARTITIONMASK)
+#define B_TYPESHIFT 0
+#define B_TYPEMASK 0xff
+#define B_TYPE(val) (((val) >> B_TYPESHIFT) & B_TYPEMASK)
+
+#define B_MAGICMASK ((u_int)0xf0000000U)
+#define B_DEVMAGIC ((u_int)0xa0000000U)
+
+#define MAKEBOOTDEV(type, adaptor, controller, unit, partition) \
+ (((type) << B_TYPESHIFT) | ((adaptor) << B_ADAPTORSHIFT) | \
+ ((controller) << B_CONTROLLERSHIFT) | ((unit) << B_UNITSHIFT) | \
+ ((partition) << B_PARTITIONSHIFT) | B_DEVMAGIC)
+
+#endif /* _MACH_SYS_REBOOT_H_ */
diff --git a/include/sys/types.h b/include/sys/types.h
new file mode 100644
index 0000000..8d5af37
--- /dev/null
+++ b/include/sys/types.h
@@ -0,0 +1,88 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _MACH_SA_SYS_TYPES_H_
+#define _MACH_SA_SYS_TYPES_H_
+
+#include <mach/machine/vm_types.h>
+#include <stdint.h>
+
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef unsigned long size_t;
+#endif
+
+#ifndef _SSIZE_T
+#define _SSIZE_T
+typedef integer_t ssize_t;
+#endif
+
+typedef unsigned short dev_t; /* device id */
+typedef unsigned long gid_t; /* group id */
+typedef unsigned long ino_t; /* inode number */
+typedef unsigned short mode_t; /* permissions */
+typedef unsigned short nlink_t; /* link count */
+typedef natural_t off_t; /* file offset */
+typedef unsigned long uid_t; /* user id */
+
+
+/* Symbols allowed but not required by POSIX */
+
+typedef char * caddr_t; /* address of a (signed) char */
+
+#ifndef _TIME_T
+#define _TIME_T
+typedef unsigned long long time_t;
+#endif
+
+#define RAND_MAX 0x7fffffff
+
+/* Symbols not allowed by POSIX */
+#ifndef _POSIX_SOURCE
+
+/*
+ * Common type definitions that lots of old files seem to want.
+ */
+
+typedef unsigned char u_char; /* unsigned char */
+typedef unsigned short u_short; /* unsigned short */
+typedef unsigned int u_int; /* unsigned int */
+typedef unsigned long u_long; /* unsigned long */
+
+typedef unsigned int daddr_t; /* disk address */
+
+#define major(i) (((i) >> 8) & 0xFF)
+#define minor(i) ((i) & 0xFF)
+#define makedev(i,j) ((((i) & 0xFF) << 8) | ((j) & 0xFF))
+
+#define NBBY 8
+
+#ifndef NULL
+#define NULL ((void *) 0) /* the null pointer */
+#endif
+
+#endif /* _POSIX_SOURCE */
+
+#endif /* _MACH_SA_SYS_TYPES_H_ */
diff --git a/ipc/.gitignore b/ipc/.gitignore
new file mode 100644
index 0000000..b750932
--- /dev/null
+++ b/ipc/.gitignore
@@ -0,0 +1,2 @@
+notify.none.defs.c
+notify.none.msgids
diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c
new file mode 100644
index 0000000..f13c442
--- /dev/null
+++ b/ipc/ipc_entry.c
@@ -0,0 +1,214 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_entry.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Primitive functions to manipulate translation entries.
+ */
+
+#include <kern/printf.h>
+#include <string.h>
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/sched_prim.h>
+#include <kern/slab.h>
+#include <ipc/port.h>
+#include <ipc/ipc_types.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_object.h>
+
+struct kmem_cache ipc_entry_cache;
+
+/*
+ * Routine: ipc_entry_alloc
+ * Purpose:
+ * Allocate an entry out of the space.
+ * Conditions:
+ * The space must be write-locked. May allocate memory.
+ * Returns:
+ * KERN_SUCCESS An entry was allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
+ */
+
+kern_return_t
+ipc_entry_alloc(
+ ipc_space_t space,
+ mach_port_name_t *namep,
+ ipc_entry_t *entryp)
+{
+ kern_return_t kr;
+ ipc_entry_t entry;
+ rdxtree_key_t key;
+
+ if (!space->is_active) {
+ return KERN_INVALID_TASK;
+ }
+
+ kr = ipc_entry_get(space, namep, entryp);
+ if (kr == KERN_SUCCESS)
+ return kr;
+
+ entry = ie_alloc();
+ if (entry == IE_NULL) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ kr = rdxtree_insert_alloc(&space->is_map, entry, &key);
+ if (kr) {
+ ie_free(entry);
+ return kr;
+ }
+ space->is_size += 1;
+
+ entry->ie_bits = 0;
+ entry->ie_object = IO_NULL;
+ entry->ie_request = 0;
+ entry->ie_name = (mach_port_name_t) key;
+
+ *entryp = entry;
+ *namep = (mach_port_name_t) key;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_entry_alloc_name
+ * Purpose:
+ * Allocates/finds an entry with a specific name.
+ * If an existing entry is returned, its type will be nonzero.
+ * Conditions:
+ * The space must be write-locked. May allocate memory.
+ * Returns:
+ * KERN_SUCCESS Found existing entry with same name.
+ * KERN_SUCCESS Allocated a new entry.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_entry_alloc_name(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t *entryp)
+{
+ kern_return_t kr;
+ ipc_entry_t entry, e, *prevp;
+ void **slot;
+ assert(MACH_PORT_NAME_VALID(name));
+
+ if (!space->is_active) {
+ return KERN_INVALID_TASK;
+ }
+
+ slot = rdxtree_lookup_slot(&space->is_map, (rdxtree_key_t) name);
+ if (slot != NULL)
+ entry = *(ipc_entry_t *) slot;
+
+ if (slot == NULL || entry == IE_NULL) {
+ entry = ie_alloc();
+ if (entry == IE_NULL) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ entry->ie_bits = 0;
+ entry->ie_object = IO_NULL;
+ entry->ie_request = 0;
+ entry->ie_name = name;
+
+ if (slot != NULL)
+ rdxtree_replace_slot(slot, entry);
+ else {
+ kr = rdxtree_insert(&space->is_map,
+ (rdxtree_key_t) name, entry);
+ if (kr != KERN_SUCCESS) {
+ ie_free(entry);
+ return kr;
+ }
+ }
+ space->is_size += 1;
+
+ *entryp = entry;
+ return KERN_SUCCESS;
+ }
+
+ if (IE_BITS_TYPE(entry->ie_bits)) {
+ /* Used entry. */
+ *entryp = entry;
+ return KERN_SUCCESS;
+ }
+
+ /* Free entry. Rip the entry out of the free list. */
+ for (prevp = &space->is_free_list, e = space->is_free_list;
+ e != entry;
+ ({ prevp = &e->ie_next_free; e = e->ie_next_free; }))
+ continue;
+
+ *prevp = entry->ie_next_free;
+ space->is_free_list_size -= 1;
+
+ entry->ie_bits = 0;
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_name == name);
+ entry->ie_request = 0;
+
+ space->is_size += 1;
+ *entryp = entry;
+ return KERN_SUCCESS;
+}
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#include <kern/task.h>
+
+#define printf kdbprintf
+
+ipc_entry_t
+db_ipc_object_by_name(
+ const task_t task,
+ mach_port_name_t name)
+{
+ ipc_space_t space = task->itk_space;
+ ipc_entry_t entry;
+
+ entry = ipc_entry_lookup(space, name);
+ if(entry != IE_NULL) {
+ iprintf("(task 0x%x, name 0x%x) ==> object 0x%x",
+ entry->ie_object);
+ return (ipc_entry_t) entry->ie_object;
+ }
+ return entry;
+}
+#endif /* MACH_KDB */
diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h
new file mode 100644
index 0000000..9f7b593
--- /dev/null
+++ b/ipc/ipc_entry.h
@@ -0,0 +1,110 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_entry.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for translation entries, which represent
+ * tasks' capabilities for ports and port sets.
+ */
+
+#ifndef _IPC_IPC_ENTRY_H_
+#define _IPC_IPC_ENTRY_H_
+
+#include <mach/mach_types.h>
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <kern/slab.h>
+#include <ipc/port.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_types.h>
+
+/*
+ * Spaces hold capabilities for ipc_object_t's (ports and port sets).
+ * Each ipc_entry_t records a capability.
+ */
+
+typedef unsigned int ipc_entry_bits_t;
+typedef ipc_table_elems_t ipc_entry_num_t; /* number of entries */
+
+typedef struct ipc_entry {
+ mach_port_name_t ie_name;
+ ipc_entry_bits_t ie_bits;
+ struct ipc_object *ie_object;
+ union {
+ struct ipc_entry *next_free;
+ /*XXX ipc_port_request_index_t request;*/
+ unsigned int request;
+ } index;
+} *ipc_entry_t;
+
+#define IE_NULL ((ipc_entry_t) 0)
+
+#define ie_request index.request
+#define ie_next_free index.next_free
+
+#define IE_BITS_UREFS_MASK 0x0000ffff /* 16 bits of user-reference */
+#define IE_BITS_UREFS(bits) ((bits) & IE_BITS_UREFS_MASK)
+
+#define IE_BITS_TYPE_MASK 0x001f0000 /* 5 bits of capability type */
+#define IE_BITS_TYPE(bits) ((bits) & IE_BITS_TYPE_MASK)
+
+#define IE_BITS_MAREQUEST 0x00200000 /* 1 bit for msg-accepted */
+
+#define IE_BITS_RIGHT_MASK 0x003fffff /* relevant to the right */
+
+#if PORT_GENERATIONS
+#error "not supported"
+#define IE_BITS_GEN_MASK 0xff000000U /* 8 bits for generation */
+#define IE_BITS_GEN(bits) ((bits) & IE_BITS_GEN_MASK)
+#define IE_BITS_GEN_ONE 0x01000000 /* low bit of generation */
+#else
+#define IE_BITS_GEN_MASK 0
+#define IE_BITS_GEN(bits) 0
+#define IE_BITS_GEN_ONE 0
+#endif
+
+
+extern struct kmem_cache ipc_entry_cache;
+#define ie_alloc() ((ipc_entry_t) kmem_cache_alloc(&ipc_entry_cache))
+#define ie_free(e) kmem_cache_free(&ipc_entry_cache, (vm_offset_t) (e))
+
+extern kern_return_t
+ipc_entry_alloc(ipc_space_t space, mach_port_name_t *namep, ipc_entry_t *entryp);
+
+extern kern_return_t
+ipc_entry_alloc_name(ipc_space_t space, mach_port_name_t name, ipc_entry_t *entryp);
+
+ipc_entry_t
+db_ipc_object_by_name(
+ task_t task,
+ mach_port_name_t name);
+
+#endif /* _IPC_IPC_ENTRY_H_ */
diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c
new file mode 100644
index 0000000..8e628ad
--- /dev/null
+++ b/ipc/ipc_init.c
@@ -0,0 +1,117 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_init.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to initialize the IPC system.
+ */
+
+#include <mach/kern_return.h>
+#include <kern/ipc_host.h>
+#include <kern/slab.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_init.h>
+
+
+
+static struct vm_map ipc_kernel_map_store;
+vm_map_t ipc_kernel_map = &ipc_kernel_map_store;
+const vm_size_t ipc_kernel_map_size = 8 * 1024 * 1024;
+
+/*
+ * Routine: ipc_bootstrap
+ * Purpose:
+ * Initialization needed before the kernel task
+ * can be created.
+ */
+
+void
+ipc_bootstrap(void)
+{
+ kern_return_t kr;
+
+ ipc_port_multiple_lock_init();
+
+ ipc_port_timestamp_lock_init();
+ ipc_port_timestamp_data = 0;
+
+ kmem_cache_init(&ipc_space_cache, "ipc_space",
+ sizeof(struct ipc_space), 0, NULL, 0);
+
+ kmem_cache_init(&ipc_entry_cache, "ipc_entry",
+ sizeof(struct ipc_entry), 0, NULL, 0);
+
+ kmem_cache_init(&ipc_object_caches[IOT_PORT], "ipc_port",
+ sizeof(struct ipc_port), 0, NULL, 0);
+
+ kmem_cache_init(&ipc_object_caches[IOT_PORT_SET], "ipc_pset",
+ sizeof(struct ipc_pset), 0, NULL, 0);
+
+ /* create special spaces */
+
+ kr = ipc_space_create_special(&ipc_space_kernel);
+ assert(kr == KERN_SUCCESS);
+
+ kr = ipc_space_create_special(&ipc_space_reply);
+ assert(kr == KERN_SUCCESS);
+
+ /* initialize modules with hidden data structures */
+
+ ipc_table_init();
+ ipc_notify_init();
+ ipc_marequest_init();
+}
+
+/*
+ * Routine: ipc_init
+ * Purpose:
+ * Final initialization of the IPC system.
+ */
+
+void
+ipc_init(void)
+{
+ vm_offset_t min, max;
+
+ kmem_submap(ipc_kernel_map, kernel_map, &min, &max,
+ ipc_kernel_map_size);
+
+ ipc_host_init();
+}
diff --git a/ipc/ipc_init.h b/ipc/ipc_init.h
new file mode 100644
index 0000000..8dd64bb
--- /dev/null
+++ b/ipc/ipc_init.h
@@ -0,0 +1,50 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_init.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of functions to initialize the IPC system.
+ */
+
+#ifndef _IPC_IPC_INIT_H_
+#define _IPC_IPC_INIT_H_
+
+/*
+ * Exported interfaces
+ */
+
+/* IPC initialization needed before creation of kernel task */
+extern void ipc_bootstrap(void);
+
+/* Remaining IPC initialization */
+extern void ipc_init(void);
+
+#endif /* _IPC_IPC_INIT_H_ */
diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c
new file mode 100644
index 0000000..bd84380
--- /dev/null
+++ b/ipc/ipc_kmsg.c
@@ -0,0 +1,2904 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_kmsg.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Operations on kernel messages.
+ */
+
+#include <kern/printf.h>
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <mach/port.h>
+#include <machine/locore.h>
+#include <machine/copy_user.h>
+#include <kern/assert.h>
+#include <kern/debug.h>
+#include <kern/kalloc.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_right.h>
+
+#include <ipc/ipc_machdep.h>
+
+#include <device/net_io.h>
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#include <ipc/ipc_print.h>
+#endif
+
+
+ipc_kmsg_t ipc_kmsg_cache[NCPUS];
+
+/*
+ * Routine: ipc_kmsg_enqueue
+ * Purpose:
+ * Enqueue a kmsg.
+ */
+
+void
+ipc_kmsg_enqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_enqueue_macro(queue, kmsg);
+}
+
+/*
+ * Routine: ipc_kmsg_dequeue
+ * Purpose:
+ * Dequeue and return a kmsg.
+ */
+
+ipc_kmsg_t
+ipc_kmsg_dequeue(
+ ipc_kmsg_queue_t queue)
+{
+ ipc_kmsg_t first;
+
+ first = ipc_kmsg_queue_first(queue);
+
+ if (first != IKM_NULL)
+ ipc_kmsg_rmqueue_first_macro(queue, first);
+
+ return first;
+}
+
+/*
+ * Routine: ipc_kmsg_rmqueue
+ * Purpose:
+ * Pull a kmsg out of a queue.
+ */
+
+void
+ipc_kmsg_rmqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_t next, prev;
+
+ assert(queue->ikmq_base != IKM_NULL);
+
+ next = kmsg->ikm_next;
+ prev = kmsg->ikm_prev;
+
+ if (next == kmsg) {
+ assert(prev == kmsg);
+ assert(queue->ikmq_base == kmsg);
+
+ queue->ikmq_base = IKM_NULL;
+ } else {
+ if (queue->ikmq_base == kmsg)
+ queue->ikmq_base = next;
+
+ next->ikm_prev = prev;
+ prev->ikm_next = next;
+ }
+ ikm_mark_bogus (kmsg);
+}
+
+/*
+ * Routine: ipc_kmsg_queue_next
+ * Purpose:
+ * Return the kmsg following the given kmsg.
+ * (Or IKM_NULL if it is the last one in the queue.)
+ */
+
+ipc_kmsg_t
+ipc_kmsg_queue_next(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_t next;
+
+ assert(queue->ikmq_base != IKM_NULL);
+
+ next = kmsg->ikm_next;
+ if (queue->ikmq_base == next)
+ next = IKM_NULL;
+
+ return next;
+}
+
+/*
+ * Routine: ipc_kmsg_destroy
+ * Purpose:
+ * Destroys a kernel message. Releases all rights,
+ * references, and memory held by the message.
+ * Frees the message.
+ * Conditions:
+ * No locks held.
+ */
+
+void
+ipc_kmsg_destroy(
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_queue_t queue;
+ boolean_t empty;
+
+ /*
+ * ipc_kmsg_clean can cause more messages to be destroyed.
+ * Curtail recursion by queueing messages. If a message
+ * is already queued, then this is a recursive call.
+ */
+
+ queue = &current_thread()->ith_messages;
+ empty = ipc_kmsg_queue_empty(queue);
+ ipc_kmsg_enqueue(queue, kmsg);
+
+ if (empty) {
+ /* must leave kmsg in queue while cleaning it */
+
+ while ((kmsg = ipc_kmsg_queue_first(queue)) != IKM_NULL) {
+ ipc_kmsg_clean(kmsg);
+ ipc_kmsg_rmqueue(queue, kmsg);
+ ikm_free(kmsg);
+ }
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_clean_body
+ * Purpose:
+ * Cleans the body of a kernel message.
+ * Releases all rights, references, and memory.
+ *
+ * The last type/data pair might stretch past eaddr.
+ * (See the usage in ipc_kmsg_copyout.)
+ * Conditions:
+ * No locks held.
+ */
+
+static void
+ipc_kmsg_clean_body(
+ vm_offset_t saddr,
+ vm_offset_t eaddr)
+{
+ while (saddr < eaddr) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, is_port;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ if (((mach_msg_type_t*)type)->msgt_longform) {
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_long_t))) {
+ saddr = mach_msg_kernel_align(saddr);
+ }
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_t))) {
+ saddr = mach_msg_kernel_align(saddr);
+ }
+ }
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ ipc_object_t *objects;
+ mach_msg_type_number_t i;
+
+ if (is_inline) {
+ objects = (ipc_object_t *) saddr;
+ /* sanity check */
+ while (eaddr < (vm_offset_t)&objects[number]) number--;
+ } else {
+ objects = (ipc_object_t *)
+ * (vm_offset_t *) saddr;
+ }
+
+ /* destroy port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t object = objects[i];
+
+ if (!IO_VALID(object))
+ continue;
+
+ ipc_object_destroy(object, name);
+ }
+ }
+
+ if (is_inline) {
+ saddr += length;
+ } else {
+ vm_offset_t data = * (vm_offset_t *) saddr;
+
+ /* destroy memory carried in the message */
+
+ if (length == 0)
+ assert(data == 0);
+ else if (is_port)
+ kfree(data, length);
+ else
+ vm_map_copy_discard((vm_map_copy_t) data);
+
+ saddr += sizeof(vm_offset_t);
+ }
+ saddr = mach_msg_kernel_align(saddr);
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_clean
+ * Purpose:
+ * Cleans a kernel message. Releases all rights,
+ * references, and memory held by the message.
+ * Conditions:
+ * No locks held.
+ */
+
+void
+ipc_kmsg_clean(ipc_kmsg_t kmsg)
+{
+ ipc_marequest_t marequest;
+ ipc_object_t object;
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+
+ marequest = kmsg->ikm_marequest;
+ if (marequest != IMAR_NULL)
+ ipc_marequest_destroy(marequest);
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ if (IO_VALID(object))
+ ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits));
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ if (IO_VALID(object))
+ ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ ipc_kmsg_clean_body(saddr, eaddr);
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_clean_partial
+ * Purpose:
+ * Cleans a partially-acquired kernel message.
+ * eaddr is the address of the type specification
+ * in the body of the message that contained the error.
+ * If dolast, the memory and port rights in this last
+ * type spec are also cleaned. In that case, number
+ * specifies the number of port rights to clean.
+ * Conditions:
+ * Nothing locked.
+ */
+
+static void
+ipc_kmsg_clean_partial(
+ ipc_kmsg_t kmsg,
+ vm_offset_t eaddr,
+ boolean_t dolast,
+ mach_msg_type_number_t number)
+{
+ ipc_object_t object;
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ vm_offset_t saddr;
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ assert(IO_VALID(object));
+ ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits));
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ if (IO_VALID(object))
+ ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ ipc_kmsg_clean_body(saddr, eaddr);
+
+ if (dolast) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t rnumber;
+ boolean_t is_inline, is_port;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) eaddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ if (((mach_msg_type_t*)type)->msgt_longform) {
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ rnumber = type->msgtl_number;
+ eaddr += sizeof(mach_msg_type_long_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_long_t))) {
+ eaddr = mach_msg_kernel_align(eaddr);
+ }
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ rnumber = ((mach_msg_type_t*)type)->msgt_number;
+ eaddr += sizeof(mach_msg_type_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_t))) {
+ eaddr = mach_msg_kernel_align(eaddr);
+ }
+ }
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((rnumber * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ ipc_object_t *objects;
+ mach_msg_type_number_t i;
+
+ objects = (ipc_object_t *)
+ (is_inline ? eaddr : * (vm_offset_t *) eaddr);
+
+ /* destroy port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t obj = objects[i];
+
+ if (!IO_VALID(obj))
+ continue;
+
+ ipc_object_destroy(obj, name);
+ }
+ }
+
+ if (!is_inline) {
+ vm_offset_t data = * (vm_offset_t *) eaddr;
+
+ /* destroy memory carried in the message */
+
+ if (length == 0)
+ assert(data == 0);
+ else if (is_port)
+ kfree(data, length);
+ else
+ vm_map_copy_discard((vm_map_copy_t) data);
+ }
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_free
+ * Purpose:
+ * Free a kernel message buffer.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_free(ipc_kmsg_t kmsg)
+{
+ vm_size_t size = kmsg->ikm_size;
+
+ switch (size) {
+
+ case IKM_SIZE_NETWORK:
+ /* return it to the network code */
+ net_kmsg_put(kmsg);
+ break;
+
+ default:
+ kfree((vm_offset_t) kmsg, size);
+ break;
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_get
+ * Purpose:
+ * Allocates a kernel message buffer.
+ * Copies a user message to the message buffer.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Acquired a message buffer.
+ * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
+ * MACH_SEND_MSG_TOO_SMALL Message size not long-word multiple.
+ * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
+ * MACH_SEND_INVALID_DATA Couldn't copy message data.
+ */
+
+mach_msg_return_t
+ipc_kmsg_get(
+ mach_msg_user_header_t *msg,
+ mach_msg_size_t size,
+ ipc_kmsg_t *kmsgp)
+{
+ ipc_kmsg_t kmsg;
+ mach_msg_size_t ksize = size * IKM_EXPAND_FACTOR;
+
+ if ((size < sizeof(mach_msg_user_header_t)) || mach_msg_user_is_misaligned(size))
+ return MACH_SEND_MSG_TOO_SMALL;
+
+ if (ksize <= IKM_SAVED_MSG_SIZE) {
+ kmsg = ikm_cache_alloc();
+ if (kmsg == IKM_NULL)
+ return MACH_SEND_NO_BUFFER;
+ } else {
+ kmsg = ikm_alloc(ksize);
+ if (kmsg == IKM_NULL)
+ return MACH_SEND_NO_BUFFER;
+ ikm_init(kmsg, ksize);
+ }
+
+ if (copyinmsg(msg, &kmsg->ikm_header, size, kmsg->ikm_size)) {
+ ikm_free(kmsg);
+ return MACH_SEND_INVALID_DATA;
+ }
+
+ *kmsgp = kmsg;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_get_from_kernel
+ * Purpose:
+ * Allocates a kernel message buffer.
+ * Copies a kernel message to the message buffer.
+ * Only resource errors are allowed.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Acquired a message buffer.
+ * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
+ */
+
+extern mach_msg_return_t
+ipc_kmsg_get_from_kernel(
+ mach_msg_header_t *msg,
+ mach_msg_size_t size,
+ ipc_kmsg_t *kmsgp)
+{
+ ipc_kmsg_t kmsg;
+
+ assert(size >= sizeof(mach_msg_header_t));
+ assert(!mach_msg_kernel_is_misaligned(size));
+
+ kmsg = ikm_alloc(size);
+ if (kmsg == IKM_NULL)
+ return MACH_SEND_NO_BUFFER;
+ ikm_init(kmsg, size);
+
+ memcpy(&kmsg->ikm_header, msg, size);
+
+ kmsg->ikm_header.msgh_size = size;
+ *kmsgp = kmsg;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_put
+ * Purpose:
+ * Copies a message buffer to a user message.
+ * Copies only the specified number of bytes.
+ * Frees the message buffer.
+ * Conditions:
+ * Nothing locked. The message buffer must have clean
+ * header (ikm_marequest) fields.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied data out of message buffer.
+ * MACH_RCV_INVALID_DATA Couldn't copy to user message.
+ */
+
+mach_msg_return_t
+ipc_kmsg_put(
+ mach_msg_user_header_t *msg,
+ ipc_kmsg_t kmsg,
+ mach_msg_size_t size)
+{
+ mach_msg_return_t mr;
+
+ ikm_check_initialized(kmsg, kmsg->ikm_size);
+
+ if (copyoutmsg(&kmsg->ikm_header, msg, size))
+ mr = MACH_RCV_INVALID_DATA;
+ else
+ mr = MACH_MSG_SUCCESS;
+
+ ikm_cache_free(kmsg);
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_put_to_kernel
+ * Purpose:
+ * Copies a message buffer to a kernel message.
+ * Frees the message buffer.
+ * No errors allowed.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_put_to_kernel(
+ mach_msg_header_t *msg,
+ ipc_kmsg_t kmsg,
+ mach_msg_size_t size)
+{
+#if DIPC
+ assert(!KMSG_IN_DIPC(kmsg));
+#endif /* DIPC */
+
+ memcpy(msg, &kmsg->ikm_header, size);
+
+ ikm_free(kmsg);
+}
+
+/*
+ * Routine: ipc_kmsg_copyin_header
+ * Purpose:
+ * "Copy-in" port rights in the header of a message.
+ * Operates atomically; if it doesn't succeed the
+ * message header and the space are left untouched.
+ * If it does succeed the remote/local port fields
+ * contain object pointers instead of port names,
+ * and the bits field is updated. The destination port
+ * will be a valid port pointer.
+ *
+ * The notify argument implements the MACH_SEND_CANCEL option.
+ * If it is not MACH_PORT_NULL, it should name a receive right.
+ * If the processing of the destination port would generate
+ * a port-deleted notification (because the right for the
+ * destination port is destroyed and it had a request for
+ * a dead-name notification registered), and the port-deleted
+ * notification would be sent to the named receive right,
+ * then it isn't sent and the send-once right for the notify
+ * port is quietly destroyed.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyin.
+ * MACH_SEND_INVALID_HEADER
+ * Illegal value in the message header bits.
+ * MACH_SEND_INVALID_DEST The space is dead.
+ * MACH_SEND_INVALID_NOTIFY
+ * Notify is non-null and doesn't name a receive right.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyin_header(
+ mach_msg_header_t *msg,
+ ipc_space_t space,
+ mach_port_name_t notify)
+{
+ mach_msg_bits_t mbits = msg->msgh_bits &~ MACH_MSGH_BITS_CIRCULAR;
+ /*
+ * TODO: For 64 bits, msgh_remote_port as written by user space
+ * is 4 bytes long but here we assume it is the same size as a pointer.
+ * When copying the message to the kernel, we need to perform the
+ * conversion so that port names are parsed correctly.
+ *
+ * When copying the message out of the kernel to user space, we also need
+ * to be careful with the reverse translation.
+ */
+
+ mach_port_name_t dest_name = (mach_port_name_t)msg->msgh_remote_port;
+ mach_port_name_t reply_name = (mach_port_name_t)msg->msgh_local_port;
+ kern_return_t kr;
+
+#ifndef MIGRATING_THREADS
+ /* first check for common cases */
+
+ if (notify == MACH_PORT_NULL) switch (MACH_MSGH_BITS_PORTS(mbits)) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0): {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port;
+
+ /* sending an asynchronous message */
+
+ if (reply_name != MACH_PORT_NULL)
+ break;
+
+ is_read_lock(space);
+ if (!space->is_active)
+ goto abort_async;
+
+ entry = ipc_entry_lookup (space, dest_name);
+ if (entry == IE_NULL)
+ {
+ ipc_entry_lookup_failed (msg, dest_name);
+ goto abort_async;
+ }
+ bits = entry->ie_bits;
+
+ /* check type bits */
+ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND)
+ goto abort_async;
+
+ /* optimized ipc_right_copyin */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ ip_lock(dest_port);
+ /* can unlock space now without compromising atomicity */
+ is_read_unlock(space);
+
+ if (!ip_active(dest_port)) {
+ ip_unlock(dest_port);
+ break;
+ }
+
+ assert(dest_port->ip_srights > 0);
+ dest_port->ip_srights++;
+ ip_reference(dest_port);
+ ip_unlock(dest_port);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ return MACH_MSG_SUCCESS;
+
+ abort_async:
+ is_read_unlock(space);
+ break;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE): {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port, reply_port;
+
+ /* sending a request message */
+
+ is_read_lock(space);
+ if (!space->is_active)
+ goto abort_request;
+
+ entry = ipc_entry_lookup (space, dest_name);
+ if (entry == IE_NULL)
+ {
+ ipc_entry_lookup_failed (msg, dest_name);
+ goto abort_request;
+ }
+ bits = entry->ie_bits;
+
+ /* check type bits */
+ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND)
+ goto abort_request;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ entry = ipc_entry_lookup (space, reply_name);
+ if (entry == IE_NULL)
+ {
+ ipc_entry_lookup_failed (msg, reply_name);
+ goto abort_request;
+ }
+ bits = entry->ie_bits;
+
+ /* check type bits */
+ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_RECEIVE)
+ goto abort_request;
+
+ reply_port = (ipc_port_t) entry->ie_object;
+ assert(reply_port != IP_NULL);
+
+ /*
+ * To do an atomic copyin, need simultaneous
+ * locks on both ports and the space. If
+ * dest_port == reply_port, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) || !ip_lock_try(reply_port)) {
+ ip_unlock(dest_port);
+ goto abort_request;
+ }
+ /* can unlock space now without compromising atomicity */
+ is_read_unlock(space);
+
+ assert(dest_port->ip_srights > 0);
+ dest_port->ip_srights++;
+ ip_reference(dest_port);
+ ip_unlock(dest_port);
+
+ assert(ip_active(reply_port));
+ assert(reply_port->ip_receiver_name == reply_name);
+ assert(reply_port->ip_receiver == space);
+
+ reply_port->ip_sorights++;
+ ip_reference(reply_port);
+ ip_unlock(reply_port);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ msg->msgh_local_port = (mach_port_t) reply_port;
+ return MACH_MSG_SUCCESS;
+
+ abort_request:
+ is_read_unlock(space);
+ break;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port;
+
+ /* sending a reply message */
+
+ if (reply_name != MACH_PORT_NULL)
+ break;
+
+ is_write_lock(space);
+ if (!space->is_active)
+ goto abort_reply;
+
+ entry = ipc_entry_lookup (space, dest_name);
+ if (entry == IE_NULL)
+ {
+ ipc_entry_lookup_failed (msg, dest_name);
+ goto abort_reply;
+ }
+ bits = entry->ie_bits;
+
+ /* check and type bits */
+ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND_ONCE)
+ goto abort_reply;
+
+ /* optimized ipc_right_copyin */
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ if (entry->ie_request != 0)
+ goto abort_reply;
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port)) {
+ ip_unlock(dest_port);
+ goto abort_reply;
+ }
+
+ assert(dest_port->ip_sorights > 0);
+ ip_unlock(dest_port);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc (space, dest_name, entry);
+ is_write_unlock(space);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ 0));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ return MACH_MSG_SUCCESS;
+
+ abort_reply:
+ is_write_unlock(space);
+ break;
+ }
+
+ default:
+ /* don't bother optimizing */
+ break;
+ }
+#endif /* MIGRATING_THREADS */
+
+ {
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ ipc_object_t dest_port, reply_port;
+ ipc_port_t dest_soright, reply_soright;
+ ipc_port_t notify_port = 0; /* '=0' to quiet gcc warnings */
+
+ if (!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type))
+ return MACH_SEND_INVALID_HEADER;
+
+ if ((reply_type == 0) ?
+ (reply_name != MACH_PORT_NULL) :
+ !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type))
+ return MACH_SEND_INVALID_HEADER;
+
+ is_write_lock(space);
+ if (!space->is_active)
+ goto invalid_dest;
+
+ if (notify != MACH_PORT_NULL) {
+ ipc_entry_t entry;
+
+ if (((entry = ipc_entry_lookup(space, notify)) == IE_NULL) ||
+ ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)) {
+ if (entry == IE_NULL)
+ ipc_entry_lookup_failed (msg, notify);
+ is_write_unlock(space);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ notify_port = (ipc_port_t) entry->ie_object;
+ }
+
+ if (dest_name == reply_name) {
+ ipc_entry_t entry;
+ mach_port_name_t name = dest_name;
+
+ /*
+ * Destination and reply ports are the same!
+ * This is a little tedious to make atomic, because
+ * there are 25 combinations of dest_type/reply_type.
+ * However, most are easy. If either is move-sonce,
+ * then there must be an error. If either are
+ * make-send or make-sonce, then we must be looking
+ * at a receive right so the port can't die.
+ * The hard cases are the combinations of
+ * copy-send and make-send.
+ */
+
+ entry = ipc_entry_lookup(space, name);
+ if (entry == IE_NULL) {
+ ipc_entry_lookup_failed (msg, name);
+ goto invalid_dest;
+ }
+
+ assert(reply_type != 0); /* because name not null */
+
+ if (!ipc_right_copyin_check(space, name, entry, reply_type))
+ goto invalid_reply;
+
+ if ((dest_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) ||
+ (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE)) {
+ /*
+ * Why must there be an error? To get a valid
+ * destination, this entry must name a live
+ * port (not a dead name or dead port). However
+ * a successful move-sonce will destroy a
+ * live entry. Therefore the other copyin,
+ * whatever it is, would fail. We've already
+ * checked for reply port errors above,
+ * so report a destination error.
+ */
+
+ goto invalid_dest;
+ } else if ((dest_type == MACH_MSG_TYPE_MAKE_SEND) ||
+ (dest_type == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
+ (reply_type == MACH_MSG_TYPE_MAKE_SEND) ||
+ (reply_type == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
+ kr = ipc_right_copyin(space, name, entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /*
+ * Either dest or reply needs a receive right.
+ * We know the receive right is there, because
+ * of the copyin_check and copyin calls. Hence
+ * the port is not in danger of dying. If dest
+ * used the receive right, then the right needed
+ * by reply (and verified by copyin_check) will
+ * still be there.
+ */
+
+ assert(IO_VALID(dest_port));
+ assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
+ assert(dest_soright == IP_NULL);
+
+ kr = ipc_right_copyin(space, name, entry,
+ reply_type, TRUE,
+ &reply_port, &reply_soright);
+
+ assert(kr == KERN_SUCCESS);
+ assert(reply_port == dest_port);
+ assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
+ assert(reply_soright == IP_NULL);
+ } else if ((dest_type == MACH_MSG_TYPE_COPY_SEND) &&
+ (reply_type == MACH_MSG_TYPE_COPY_SEND)) {
+ /*
+ * To make this atomic, just do one copy-send,
+ * and dup the send right we get out.
+ */
+
+ kr = ipc_right_copyin(space, name, entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
+ assert(dest_soright == IP_NULL);
+
+ /*
+ * It's OK if the port we got is dead now,
+ * so reply_port is IP_DEAD, because the msg
+ * won't go anywhere anyway.
+ */
+
+ reply_port = (ipc_object_t)
+ ipc_port_copy_send((ipc_port_t) dest_port);
+ reply_soright = IP_NULL;
+ } else if ((dest_type == MACH_MSG_TYPE_MOVE_SEND) &&
+ (reply_type == MACH_MSG_TYPE_MOVE_SEND)) {
+ /*
+ * This is an easy case. Just use our
+ * handy-dandy special-purpose copyin call
+ * to get two send rights for the price of one.
+ */
+
+ kr = ipc_right_copyin_two(space, name, entry,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /* the entry might need to be deallocated */
+
+ if (IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+
+ reply_port = dest_port;
+ reply_soright = IP_NULL;
+ } else {
+ ipc_port_t soright;
+
+ assert(((dest_type == MACH_MSG_TYPE_COPY_SEND) &&
+ (reply_type == MACH_MSG_TYPE_MOVE_SEND)) ||
+ ((dest_type == MACH_MSG_TYPE_MOVE_SEND) &&
+ (reply_type == MACH_MSG_TYPE_COPY_SEND)));
+
+ /*
+ * To make this atomic, just do a move-send,
+ * and dup the send right we get out.
+ */
+
+ kr = ipc_right_copyin(space, name, entry,
+ MACH_MSG_TYPE_MOVE_SEND, FALSE,
+ &dest_port, &soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /* the entry might need to be deallocated */
+
+ if (IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+
+ /*
+ * It's OK if the port we got is dead now,
+ * so reply_port is IP_DEAD, because the msg
+ * won't go anywhere anyway.
+ */
+
+ reply_port = (ipc_object_t)
+ ipc_port_copy_send((ipc_port_t) dest_port);
+
+ if (dest_type == MACH_MSG_TYPE_MOVE_SEND) {
+ dest_soright = soright;
+ reply_soright = IP_NULL;
+ } else {
+ dest_soright = IP_NULL;
+ reply_soright = soright;
+ }
+ }
+ } else if (!MACH_PORT_NAME_VALID(reply_name)) {
+ ipc_entry_t entry;
+
+ /*
+ * No reply port! This is an easy case
+ * to make atomic. Just copyin the destination.
+ */
+
+ entry = ipc_entry_lookup(space, dest_name);
+ if (entry == IE_NULL) {
+ ipc_entry_lookup_failed (msg, dest_name);
+ goto invalid_dest;
+ }
+
+ kr = ipc_right_copyin(space, dest_name, entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /* the entry might need to be deallocated */
+
+ if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, dest_name, entry);
+
+ reply_port = (ipc_object_t) invalid_name_to_port(reply_name);
+ reply_soright = IP_NULL;
+ } else {
+ ipc_entry_t dest_entry, reply_entry;
+ ipc_port_t saved_reply;
+
+ /*
+ * This is the tough case to make atomic.
+ * The difficult problem is serializing with port death.
+ * At the time we copyin dest_port, it must be alive.
+ * If reply_port is alive when we copyin it, then
+ * we are OK, because we serialize before the death
+ * of both ports. Assume reply_port is dead at copyin.
+ * Then if dest_port dies/died after reply_port died,
+ * we are OK, because we serialize between the death
+ * of the two ports. So the bad case is when dest_port
+ * dies after its copyin, reply_port dies before its
+ * copyin, and dest_port dies before reply_port. Then
+ * the copyins operated as if dest_port was alive
+ * and reply_port was dead, which shouldn't have happened
+ * because they died in the other order.
+ *
+ * We handle the bad case by undoing the copyins
+ * (which is only possible because the ports are dead)
+ * and failing with MACH_SEND_INVALID_DEST, serializing
+ * after the death of the ports.
+ *
+ * Note that it is easy for a user task to tell if
+ * a copyin happened before or after a port died.
+ * For example, suppose both dest and reply are
+ * send-once rights (types are both move-sonce) and
+ * both rights have dead-name requests registered.
+ * If a port dies before copyin, a dead-name notification
+ * is generated and the dead name's urefs are incremented,
+ * and if the copyin happens first, a port-deleted
+ * notification is generated.
+ *
+ * Note that although the entries are different,
+ * dest_port and reply_port might still be the same.
+ */
+
+ dest_entry = ipc_entry_lookup(space, dest_name);
+ if (dest_entry == IE_NULL) {
+ ipc_entry_lookup_failed (msg, dest_name);
+ goto invalid_dest;
+ }
+
+ reply_entry = ipc_entry_lookup(space, reply_name);
+ if (reply_entry == IE_NULL)
+ {
+ ipc_entry_lookup_failed (msg, reply_name);
+ goto invalid_reply;
+ }
+
+ assert(dest_entry != reply_entry); /* names are not equal */
+ assert(reply_type != 0); /* because reply_name not null */
+
+ if (!ipc_right_copyin_check(space, reply_name, reply_entry,
+ reply_type))
+ goto invalid_reply;
+
+ kr = ipc_right_copyin(space, dest_name, dest_entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ assert(IO_VALID(dest_port));
+
+ saved_reply = (ipc_port_t) reply_entry->ie_object;
+ /* might be IP_NULL, if this is a dead name */
+ if (saved_reply != IP_NULL)
+ ipc_port_reference(saved_reply);
+
+ kr = ipc_right_copyin(space, reply_name, reply_entry,
+ reply_type, TRUE,
+ &reply_port, &reply_soright);
+ assert(kr == KERN_SUCCESS);
+
+ if ((saved_reply != IP_NULL) && (reply_port == IO_DEAD)) {
+ ipc_port_t dest = (ipc_port_t) dest_port;
+ ipc_port_timestamp_t timestamp;
+ boolean_t must_undo;
+
+ /*
+ * The reply port died before copyin.
+ * Check if dest port died before reply.
+ */
+
+ ip_lock(saved_reply);
+ assert(!ip_active(saved_reply));
+ timestamp = saved_reply->ip_timestamp;
+ ip_unlock(saved_reply);
+
+ ip_lock(dest);
+ must_undo = (!ip_active(dest) &&
+ IP_TIMESTAMP_ORDER(dest->ip_timestamp,
+ timestamp));
+ ip_unlock(dest);
+
+ if (must_undo) {
+ /*
+ * Our worst nightmares are realized.
+ * Both destination and reply ports
+ * are dead, but in the wrong order,
+ * so we must undo the copyins and
+ * possibly generate a dead-name notif.
+ */
+
+ ipc_right_copyin_undo(
+ space, dest_name, dest_entry,
+ dest_type, dest_port,
+ dest_soright);
+ /* dest_entry may be deallocated now */
+
+ ipc_right_copyin_undo(
+ space, reply_name, reply_entry,
+ reply_type, reply_port,
+ reply_soright);
+ /* reply_entry may be deallocated now */
+
+ is_write_unlock(space);
+
+ if (dest_soright != IP_NULL)
+ ipc_notify_dead_name(dest_soright,
+ dest_name);
+ assert(reply_soright == IP_NULL);
+
+ ipc_port_release(saved_reply);
+ return MACH_SEND_INVALID_DEST;
+ }
+ }
+
+ /* the entries might need to be deallocated */
+
+ if (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, reply_name, reply_entry);
+
+ if (IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, dest_name, dest_entry);
+
+ if (saved_reply != IP_NULL)
+ ipc_port_release(saved_reply);
+ }
+
+ /*
+ * At this point, dest_port, reply_port,
+ * dest_soright, reply_soright are all initialized.
+ * Any defunct entries have been deallocated.
+ * The space is still write-locked, and we need to
+ * make the MACH_SEND_CANCEL check. The notify_port pointer
+ * is still usable, because the copyin code above won't ever
+ * deallocate a receive right, so its entry still exists
+ * and holds a ref. Note notify_port might even equal
+ * dest_port or reply_port.
+ */
+
+ if ((notify != MACH_PORT_NULL) &&
+ (dest_soright == notify_port)) {
+ ipc_port_release_sonce(dest_soright);
+ dest_soright = IP_NULL;
+ }
+
+ is_write_unlock(space);
+
+ if (dest_soright != IP_NULL)
+ ipc_notify_port_deleted(dest_soright, dest_name);
+
+ if (reply_soright != IP_NULL)
+ ipc_notify_port_deleted(reply_soright, reply_name);
+
+ dest_type = ipc_object_copyin_type(dest_type);
+ reply_type = ipc_object_copyin_type(reply_type);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(dest_type, reply_type));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ msg->msgh_local_port = (mach_port_t) reply_port;
+ }
+
+ return MACH_MSG_SUCCESS;
+
+ invalid_dest:
+ is_write_unlock(space);
+ return MACH_SEND_INVALID_DEST;
+
+ invalid_reply:
+ is_write_unlock(space);
+ return MACH_SEND_INVALID_REPLY;
+}
+
+static mach_msg_return_t
+ipc_kmsg_copyin_body(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map)
+{
+ ipc_object_t dest;
+ vm_offset_t saddr, eaddr;
+ boolean_t complex;
+ boolean_t use_page_lists, steal_pages;
+
+ dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ complex = FALSE;
+ use_page_lists = ipc_kobject_vm_page_list(ip_kotype((ipc_port_t)dest));
+ steal_pages = ipc_kobject_vm_page_steal(ip_kotype((ipc_port_t)dest));
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
+
+ // We make assumptions about the alignment of the header.
+ _Static_assert(!mach_msg_kernel_is_misaligned(sizeof(mach_msg_header_t)),
+ "mach_msg_header_t needs to be MACH_MSG_KERNEL_ALIGNMENT aligned.");
+
+ while (saddr < eaddr) {
+ vm_offset_t taddr = saddr;
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, dealloc, is_port;
+ vm_offset_t data;
+ uint64_t length;
+ kern_return_t kr;
+
+ type = (mach_msg_type_long_t *) saddr;
+
+ if (((eaddr - saddr) < sizeof(mach_msg_type_t)) ||
+ ((longform = ((mach_msg_type_t*)type)->msgt_longform) &&
+ ((eaddr - saddr) < sizeof(mach_msg_type_long_t)))) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ dealloc = ((mach_msg_type_t*)type)->msgt_deallocate;
+ if (longform) {
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_long_t))) {
+ saddr = mach_msg_kernel_align(saddr);
+ }
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_t))) {
+ saddr = mach_msg_kernel_align(saddr);
+ }
+ }
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if ((is_port && (size != PORT_T_SIZE_IN_BITS)) ||
+#ifndef __x86_64__
+ (longform && ((type->msgtl_header.msgt_name != 0) ||
+ (type->msgtl_header.msgt_size != 0) ||
+ (type->msgtl_header.msgt_number != 0))) ||
+#endif
+ (((mach_msg_type_t*)type)->msgt_unused != 0) ||
+ (dealloc && is_inline)) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_INVALID_TYPE;
+ }
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = (((uint64_t) number * size) + 7) >> 3;
+
+ if (is_inline) {
+ vm_size_t amount = length;
+
+ if ((eaddr - saddr) < amount) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ data = saddr;
+ saddr += amount;
+ } else {
+ vm_offset_t addr;
+
+ if ((eaddr - saddr) < sizeof(vm_offset_t)) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ /* grab the out-of-line data */
+
+ addr = * (vm_offset_t *) saddr;
+
+ if (length == 0)
+ data = 0;
+ else if (is_port) {
+ data = kalloc(length);
+ if (data == 0)
+ goto invalid_memory;
+
+ if (sizeof(mach_port_name_t) != sizeof(mach_port_t))
+ {
+ mach_port_name_t *src = (mach_port_name_t*)addr;
+ mach_port_t *dst = (mach_port_t*)data;
+ for (int i=0; i<number; i++) {
+ if (copyin_port(src + i, dst + i)) {
+ kfree(data, length);
+ goto invalid_memory;
+ }
+ }
+ } else if (copyinmap(map, (char *) addr,
+ (char *) data, length)) {
+ kfree(data, length);
+ goto invalid_memory;
+ }
+ if (dealloc &&
+ (vm_deallocate(map, addr, length) != KERN_SUCCESS)) {
+ kfree(data, length);
+ goto invalid_memory;
+ }
+
+ } else {
+ vm_map_copy_t copy;
+
+ if (use_page_lists) {
+ kr = vm_map_copyin_page_list(map,
+ addr, length, dealloc,
+ steal_pages, &copy, FALSE);
+ } else {
+ kr = vm_map_copyin(map, addr, length,
+ dealloc, &copy);
+ }
+ if (kr != KERN_SUCCESS) {
+ invalid_memory:
+ ipc_kmsg_clean_partial(kmsg, taddr,
+ FALSE, 0);
+ return MACH_SEND_INVALID_MEMORY;
+ }
+
+ data = (vm_offset_t) copy;
+ }
+
+ * (vm_offset_t *) saddr = data;
+ saddr += sizeof(vm_offset_t);
+ complex = TRUE;
+ }
+
+ if (is_port) {
+ mach_msg_type_name_t newname =
+ ipc_object_copyin_type(name);
+ ipc_object_t *objects = (ipc_object_t *) data;
+ mach_msg_type_number_t i;
+
+ if (longform)
+ type->msgtl_name = newname;
+ else
+ ((mach_msg_type_t*)type)->msgt_name = newname;
+
+ for (i = 0; i < number; i++) {
+ mach_port_name_t port = ((mach_port_t*)data)[i];
+ ipc_object_t object;
+
+ if (!MACH_PORT_NAME_VALID(port)) {
+ objects[i] = (ipc_object_t)invalid_name_to_port(port);
+ continue;
+ }
+
+ kr = ipc_object_copyin(space, port,
+ name, &object);
+ if (kr != KERN_SUCCESS) {
+ ipc_kmsg_clean_partial(kmsg, taddr,
+ TRUE, i);
+ return MACH_SEND_INVALID_RIGHT;
+ }
+
+ if ((newname == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity(
+ (ipc_port_t) object,
+ (ipc_port_t) dest))
+ kmsg->ikm_header.msgh_bits |=
+ MACH_MSGH_BITS_CIRCULAR;
+
+ objects[i] = object;
+ }
+
+ complex = TRUE;
+ }
+ saddr = mach_msg_kernel_align(saddr);
+ }
+
+ if (!complex)
+ kmsg->ikm_header.msgh_bits &= ~MACH_MSGH_BITS_COMPLEX;
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_copyin
+ * Purpose:
+ * "Copy-in" port rights and out-of-line memory
+ * in the message.
+ *
+ * In all failure cases, the message is left holding
+ * no rights or memory. However, the message buffer
+ * is not deallocated. If successful, the message
+ * contains a valid destination port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyin.
+ * MACH_SEND_INVALID_HEADER
+ * Illegal value in the message header bits.
+ * MACH_SEND_INVALID_NOTIFY Bad notify port.
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
+ * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
+ * MACH_SEND_INVALID_TYPE Bad type specification.
+ * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyin(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map,
+ mach_port_name_t notify)
+{
+ mach_msg_return_t mr;
+
+ mr = ipc_kmsg_copyin_header(&kmsg->ikm_header, space, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ if ((kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0)
+ return MACH_MSG_SUCCESS;
+
+ return ipc_kmsg_copyin_body(kmsg, space, map);
+}
+
+/*
+ * Routine: ipc_kmsg_copyin_from_kernel
+ * Purpose:
+ * "Copy-in" port rights and out-of-line memory
+ * in a message sent from the kernel.
+ *
+ * Because the message comes from the kernel,
+ * the implementation assumes there are no errors
+ * or peculiarities in the message.
+ *
+ * Returns TRUE if queueing the message
+ * would result in a circularity.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_copyin_from_kernel(ipc_kmsg_t kmsg)
+{
+ mach_msg_bits_t bits = kmsg->ikm_header.msgh_bits;
+ mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
+ mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
+ ipc_object_t remote = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t local = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ vm_offset_t saddr, eaddr;
+
+ /* translate the destination and reply ports */
+
+ ipc_object_copyin_from_kernel(remote, rname);
+ if (IO_VALID(local))
+ ipc_object_copyin_from_kernel(local, lname);
+
+ /*
+ * The common case is a complex message with no reply port,
+ * because that is what the memory_object interface uses.
+ */
+
+ if (bits == (MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) {
+ bits = (MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
+
+ kmsg->ikm_header.msgh_bits = bits;
+ } else {
+ bits = (MACH_MSGH_BITS_OTHER(bits) |
+ MACH_MSGH_BITS(ipc_object_copyin_type(rname),
+ ipc_object_copyin_type(lname)));
+
+ kmsg->ikm_header.msgh_bits = bits;
+ if ((bits & MACH_MSGH_BITS_COMPLEX) == 0)
+ return;
+ }
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
+
+ while (saddr < eaddr) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, is_port;
+ vm_offset_t data;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ longform = ((mach_msg_type_t*)type)->msgt_longform;
+ /* type->msgtl_header.msgt_deallocate not used */
+ if (longform) {
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_long_t))) {
+ saddr = mach_msg_kernel_align(saddr);
+ }
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_t))) {
+ saddr = mach_msg_kernel_align(saddr);
+ }
+ }
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_inline) {
+ data = saddr;
+ saddr += length;
+ } else {
+ /*
+ * The sender should supply ready-made memory
+ * for us, so we don't need to do anything.
+ */
+
+ data = * (vm_offset_t *) saddr;
+ saddr += sizeof(vm_offset_t);
+ }
+
+ if (is_port) {
+ mach_msg_type_name_t newname =
+ ipc_object_copyin_type(name);
+ ipc_object_t *objects = (ipc_object_t *) data;
+ mach_msg_type_number_t i;
+
+ if (longform)
+ type->msgtl_name = newname;
+ else
+ ((mach_msg_type_t*)type)->msgt_name = newname;
+ for (i = 0; i < number; i++) {
+ ipc_object_t object = objects[i];
+
+ if (!IO_VALID(object))
+ continue;
+
+ ipc_object_copyin_from_kernel(object, name);
+
+ if ((newname == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity(
+ (ipc_port_t) object,
+ (ipc_port_t) remote))
+ kmsg->ikm_header.msgh_bits |=
+ MACH_MSGH_BITS_CIRCULAR;
+ }
+ }
+ saddr = mach_msg_kernel_align(saddr);
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_header
+ * Purpose:
+ * "Copy-out" port rights in the header of a message.
+ * Operates atomically; if it doesn't succeed the
+ * message header and the space are left untouched.
+ * If it does succeed the remote/local port fields
+ * contain port names instead of object pointers,
+ * and the bits field is updated.
+ *
+ * The notify argument implements the MACH_RCV_NOTIFY option.
+ * If it is not MACH_PORT_NULL, it should name a receive right.
+ * If the process of receiving the reply port creates a
+ * new right in the receiving task, then the new right is
+ * automatically registered for a dead-name notification,
+ * with the notify port supplying the send-once right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied out port rights.
+ * MACH_RCV_INVALID_NOTIFY
+ * Notify is non-null and doesn't name a receive right.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
+ * The space is dead.
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
+ * No room in space for another name.
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
+ * Couldn't allocate memory for the reply port.
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
+ * Couldn't allocate memory for the dead-name request.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_header(
+ mach_msg_header_t *msg,
+ ipc_space_t space,
+ mach_port_name_t notify)
+{
+ mach_msg_bits_t mbits = msg->msgh_bits;
+ ipc_port_t dest = (ipc_port_t) msg->msgh_remote_port;
+
+ assert(IP_VALID(dest));
+
+#ifndef MIGRATING_THREADS
+ /* first check for common cases */
+
+ if (notify == MACH_PORT_NULL) switch (MACH_MSGH_BITS_PORTS(mbits)) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0): {
+ mach_port_name_t dest_name;
+ ipc_port_t nsrequest;
+ rpc_uintptr_t payload;
+
+ /* receiving an asynchronous message */
+
+ ip_lock(dest);
+ if (!ip_active(dest)) {
+ ip_unlock(dest);
+ break;
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest->ip_srights > 0);
+ ip_release(dest);
+
+ if (dest->ip_receiver == space)
+ dest_name = dest->ip_receiver_name;
+ else
+ dest_name = MACH_PORT_NULL;
+ payload = dest->ip_protected_payload;
+
+ if ((--dest->ip_srights == 0) &&
+ ((nsrequest = dest->ip_nsrequest) != IP_NULL)) {
+ mach_port_mscount_t mscount;
+
+ dest->ip_nsrequest = IP_NULL;
+ mscount = dest->ip_mscount;
+ ip_unlock(dest);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest);
+
+ if (! ipc_port_flag_protected_payload(dest)) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND));
+ msg->msgh_local_port = dest_name;
+ } else {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(
+ 0, MACH_MSG_TYPE_PROTECTED_PAYLOAD));
+ msg->msgh_protected_payload = payload;
+ }
+ msg->msgh_remote_port = MACH_PORT_NULL;
+ return MACH_MSG_SUCCESS;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE): {
+ ipc_entry_t entry;
+ ipc_port_t reply = (ipc_port_t) msg->msgh_local_port;
+ mach_port_name_t dest_name, reply_name;
+ ipc_port_t nsrequest;
+ rpc_uintptr_t payload;
+
+ /* receiving a request message */
+
+ if (!IP_VALID(reply))
+ break;
+
+ is_write_lock(space);
+ if (!space->is_active || space->is_free_list == NULL) {
+ is_write_unlock(space);
+ break;
+ }
+
+ /*
+ * To do an atomic copyout, need simultaneous
+ * locks on both ports and the space. If
+ * dest == reply, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest);
+ if (!ip_active(dest) || !ip_lock_try(reply)) {
+ ip_unlock(dest);
+ is_write_unlock(space);
+ break;
+ }
+
+ if (!ip_active(reply)) {
+ ip_unlock(reply);
+ ip_unlock(dest);
+ is_write_unlock(space);
+ break;
+ }
+
+ assert(reply->ip_sorights > 0);
+ ip_unlock(reply);
+
+ kern_return_t kr;
+ kr = ipc_entry_get (space, &reply_name, &entry);
+ if (kr) {
+ ip_unlock(reply);
+ ip_unlock(dest);
+ is_write_unlock(space);
+ break;
+ }
+
+ {
+ mach_port_gen_t gen;
+
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ }
+
+ assert(MACH_PORT_NAME_VALID(reply_name));
+ entry->ie_object = (ipc_object_t) reply;
+ is_write_unlock(space);
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest->ip_srights > 0);
+ ip_release(dest);
+
+ if (dest->ip_receiver == space)
+ dest_name = dest->ip_receiver_name;
+ else
+ dest_name = MACH_PORT_NULL;
+ payload = dest->ip_protected_payload;
+
+ if ((--dest->ip_srights == 0) &&
+ ((nsrequest = dest->ip_nsrequest) != IP_NULL)) {
+ mach_port_mscount_t mscount;
+
+ dest->ip_nsrequest = IP_NULL;
+ mscount = dest->ip_mscount;
+ ip_unlock(dest);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest);
+
+ if (! ipc_port_flag_protected_payload(dest)) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND));
+ msg->msgh_local_port = dest_name;
+ } else {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD));
+ msg->msgh_protected_payload = payload;
+ }
+ msg->msgh_remote_port = reply_name;
+ return MACH_MSG_SUCCESS;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ mach_port_name_t dest_name;
+ rpc_uintptr_t payload;
+
+ /* receiving a reply message */
+
+ ip_lock(dest);
+ if (!ip_active(dest)) {
+ ip_unlock(dest);
+ break;
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest->ip_sorights > 0);
+
+ payload = dest->ip_protected_payload;
+
+ if (dest->ip_receiver == space) {
+ ip_release(dest);
+ dest->ip_sorights--;
+ dest_name = dest->ip_receiver_name;
+ ip_unlock(dest);
+ } else {
+ ip_unlock(dest);
+
+ ipc_notify_send_once(dest);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ if (! ipc_port_flag_protected_payload(dest)) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE));
+ msg->msgh_local_port = dest_name;
+ } else {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD));
+ msg->msgh_protected_payload = payload;
+ }
+ msg->msgh_remote_port = MACH_PORT_NULL;
+ return MACH_MSG_SUCCESS;
+ }
+
+ default:
+ /* don't bother optimizing */
+ break;
+ }
+#endif /* MIGRATING_THREADS */
+
+ {
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ ipc_port_t reply = (ipc_port_t) msg->msgh_local_port;
+ mach_port_name_t dest_name, reply_name;
+ rpc_uintptr_t payload;
+
+ if (IP_VALID(reply)) {
+ ipc_port_t notify_port;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ /*
+ * Handling notify (for MACH_RCV_NOTIFY) is tricky.
+ * The problem is atomically making a send-once right
+ * from the notify port and installing it for a
+ * dead-name request in the new entry, because this
+ * requires two port locks (on the notify port and
+ * the reply port). However, we can safely make
+ * and consume send-once rights for the notify port
+ * as long as we hold the space locked. This isn't
+ * an atomicity problem, because the only way
+ * to detect that a send-once right has been created
+ * and then consumed if it wasn't needed is by getting
+ * at the receive right to look at ip_sorights, and
+ * because the space is write-locked status calls can't
+ * lookup the notify port receive right. When we make
+ * the send-once right, we lock the notify port,
+ * so any status calls in progress will be done.
+ */
+
+ is_write_lock(space);
+
+ for (;;) {
+ ipc_port_request_index_t request;
+
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_SPACE);
+ }
+
+ if (notify != MACH_PORT_NULL) {
+ notify_port = ipc_port_lookup_notify(space,
+ notify);
+ if (notify_port == IP_NULL) {
+ is_write_unlock(space);
+ return MACH_RCV_INVALID_NOTIFY;
+ }
+ } else
+ notify_port = IP_NULL;
+
+ if ((reply_type != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, (ipc_object_t) reply,
+ &reply_name, &entry)) {
+ /* reply port is locked and active */
+
+ /*
+ * We don't need the notify_port
+ * send-once right, but we can't release
+ * it here because reply port is locked.
+ * Wait until after the copyout to
+ * release the notify port right.
+ */
+
+ assert(entry->ie_bits &
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ break;
+ }
+
+ ip_lock(reply);
+ if (!ip_active(reply)) {
+ ip_release(reply);
+ ip_check_unlock(reply);
+
+ if (notify_port != IP_NULL)
+ ipc_port_release_sonce(notify_port);
+
+ ip_lock(dest);
+ is_write_unlock(space);
+
+ reply = IP_DEAD;
+ reply_name = MACH_PORT_NAME_DEAD;
+ goto copyout_dest;
+ }
+
+ kr = ipc_entry_alloc(space, &reply_name, &entry);
+ if (kr != KERN_SUCCESS) {
+ ip_unlock(reply);
+
+ if (notify_port != IP_NULL)
+ ipc_port_release_sonce(notify_port);
+
+ is_write_unlock(space);
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_KERNEL);
+ else
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_SPACE);
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ if (notify_port == IP_NULL) {
+ /* not making a dead-name request */
+
+ entry->ie_object = (ipc_object_t) reply;
+ break;
+ }
+
+ kr = ipc_port_dnrequest(reply, reply_name,
+ notify_port, &request);
+ if (kr != KERN_SUCCESS) {
+ ip_unlock(reply);
+
+ ipc_port_release_sonce(notify_port);
+
+ ipc_entry_dealloc(space, reply_name, entry);
+ is_write_unlock(space);
+
+ ip_lock(reply);
+ if (!ip_active(reply)) {
+ /* will fail next time around loop */
+
+ ip_unlock(reply);
+ is_write_lock(space);
+ continue;
+ }
+
+ kr = ipc_port_dngrow(reply);
+ /* port is unlocked */
+ if (kr != KERN_SUCCESS)
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_KERNEL);
+
+ is_write_lock(space);
+ continue;
+ }
+
+ notify_port = IP_NULL; /* don't release right below */
+
+ entry->ie_object = (ipc_object_t) reply;
+ entry->ie_request = request;
+ break;
+ }
+
+ /* space and reply port are locked and active */
+
+ ip_reference(reply); /* hold onto the reply port */
+
+ kr = ipc_right_copyout(space, reply_name, entry,
+ reply_type, TRUE, (ipc_object_t) reply);
+ /* reply port is unlocked */
+ assert(kr == KERN_SUCCESS);
+
+ if (notify_port != IP_NULL)
+ ipc_port_release_sonce(notify_port);
+
+ ip_lock(dest);
+ is_write_unlock(space);
+ } else {
+ /*
+ * No reply port! This is an easy case.
+ * We only need to have the space locked
+ * when checking notify and when locking
+ * the destination (to ensure atomicity).
+ */
+
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ return MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE;
+ }
+
+ if (notify != MACH_PORT_NULL) {
+ ipc_entry_t entry;
+
+ /* must check notify even though it won't be used */
+
+ if (((entry = ipc_entry_lookup(space, notify))
+ == IE_NULL) ||
+ ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)) {
+ if (entry == IE_NULL)
+ ipc_entry_lookup_failed (msg, notify);
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NOTIFY;
+ }
+ }
+
+ ip_lock(dest);
+ is_read_unlock(space);
+
+ reply_name = invalid_port_to_name(msg->msgh_local_port);
+ }
+
+ /*
+ * At this point, the space is unlocked and the destination
+ * port is locked. (Lock taken while space was locked.)
+ * reply_name is taken care of; we still need dest_name.
+ * We still hold a ref for reply (if it is valid).
+ *
+ * If the space holds receive rights for the destination,
+ * we return its name for the right. Otherwise the task
+ * managed to destroy or give away the receive right between
+ * receiving the message and this copyout. If the destination
+ * is dead, return MACH_PORT_DEAD, and if the receive right
+ * exists somewhere else (another space, in transit)
+ * return MACH_PORT_NULL.
+ *
+ * Making this copyout operation atomic with the previous
+ * copyout of the reply port is a bit tricky. If there was
+ * no real reply port (it wasn't IP_VALID) then this isn't
+ * an issue. If the reply port was dead at copyout time,
+ * then we are OK, because if dest is dead we serialize
+ * after the death of both ports and if dest is alive
+ * we serialize after reply died but before dest's (later) death.
+ * So assume reply was alive when we copied it out. If dest
+ * is alive, then we are OK because we serialize before
+ * the ports' deaths. So assume dest is dead when we look at it.
+ * If reply dies/died after dest, then we are OK because
+ * we serialize after dest died but before reply dies.
+ * So the hard case is when reply is alive at copyout,
+ * dest is dead at copyout, and reply died before dest died.
+ * In this case pretend that dest is still alive, so
+ * we serialize while both ports are alive.
+ *
+ * Because the space lock is held across the copyout of reply
+ * and locking dest, the receive right for dest can't move
+ * in or out of the space while the copyouts happen, so
+ * that isn't an atomicity problem. In the last hard case
+ * above, this implies that when dest is dead that the
+ * space couldn't have had receive rights for dest at
+ * the time reply was copied-out, so when we pretend
+ * that dest is still alive, we can return MACH_PORT_NULL.
+ *
+ * If dest == reply, then we have to make it look like
+ * either both copyouts happened before the port died,
+ * or both happened after the port died. This special
+ * case works naturally if the timestamp comparison
+ * is done correctly.
+ */
+
+ copyout_dest:
+ payload = dest->ip_protected_payload;
+
+ if (ip_active(dest)) {
+ ipc_object_copyout_dest(space, (ipc_object_t) dest,
+ dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ ipc_port_timestamp_t timestamp;
+
+ timestamp = dest->ip_timestamp;
+ ip_release(dest);
+ ip_check_unlock(dest);
+
+ if (IP_VALID(reply)) {
+ ip_lock(reply);
+ if (ip_active(reply) ||
+ IP_TIMESTAMP_ORDER(timestamp,
+ reply->ip_timestamp))
+ dest_name = MACH_PORT_NAME_DEAD;
+ else
+ dest_name = MACH_PORT_NAME_NULL;
+ ip_unlock(reply);
+ } else
+ dest_name = MACH_PORT_NAME_DEAD;
+ }
+
+ if (IP_VALID(reply))
+ ipc_port_release(reply);
+
+ if (! ipc_port_flag_protected_payload(dest)) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ msg->msgh_local_port = dest_name;
+ } else {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD));
+ msg->msgh_protected_payload = payload;
+ }
+
+ msg->msgh_remote_port = reply_name;
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_object
+ * Purpose:
+ * Copy-out a port right. Always returns a name,
+ * even for unsuccessful return codes. Always
+ * consumes the supplied object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS The space acquired the right
+ * (name is valid) or the object is dead (MACH_PORT_DEAD).
+ * MACH_MSG_IPC_SPACE No room in space for the right,
+ * or the space is dead. (Name is MACH_PORT_NULL.)
+ * MACH_MSG_IPC_KERNEL Kernel resource shortage.
+ * (Name is MACH_PORT_NULL.)
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_object(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ mach_port_name_t *namep)
+{
+ if (!IO_VALID(object)) {
+ *namep = invalid_port_to_name((mach_port_t)object);
+ return MACH_MSG_SUCCESS;
+ }
+
+#ifndef MIGRATING_THREADS
+ /*
+ * Attempt quick copyout of send rights. We optimize for a
+ * live port for which the receiver holds send (and not
+ * receive) rights in his local table.
+ */
+
+ if (msgt_name != MACH_MSG_TYPE_PORT_SEND)
+ goto slow_copyout;
+
+ {
+ ipc_port_t port = (ipc_port_t) object;
+ ipc_entry_t entry;
+
+ is_write_lock(space);
+ if (!space->is_active) {
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
+
+ ip_lock(port);
+ if (!ip_active(port) ||
+ (entry = ipc_reverse_lookup(space,
+ (ipc_object_t) port)) == NULL) {
+ ip_unlock(port);
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
+ *namep = entry->ie_name;
+
+ /*
+ * Copyout the send right, incrementing urefs
+ * unless it would overflow, and consume the right.
+ */
+
+ assert(port->ip_srights > 1);
+ port->ip_srights--;
+ ip_release(port);
+ ip_unlock(port);
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
+ assert(IE_BITS_UREFS(entry->ie_bits) > 0);
+ assert(IE_BITS_UREFS(entry->ie_bits) < MACH_PORT_UREFS_MAX);
+
+ {
+ ipc_entry_bits_t bits = entry->ie_bits + 1;
+
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ entry->ie_bits = bits;
+ }
+
+ is_write_unlock(space);
+ return MACH_MSG_SUCCESS;
+ }
+
+ slow_copyout:
+#endif /* MIGRATING_THREADS */
+
+ {
+ kern_return_t kr;
+
+ kr = ipc_object_copyout(space, object, msgt_name, TRUE, namep);
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(object, msgt_name);
+
+ if (kr == KERN_INVALID_CAPABILITY)
+ *namep = MACH_PORT_NAME_DEAD;
+ else {
+ *namep = MACH_PORT_NAME_NULL;
+
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ return MACH_MSG_IPC_KERNEL;
+ else
+ return MACH_MSG_IPC_SPACE;
+ }
+ }
+
+ return MACH_MSG_SUCCESS;
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_body
+ * Purpose:
+ * "Copy-out" port rights and out-of-line memory
+ * in the body of a message.
+ *
+ * The error codes are a combination of special bits.
+ * The copyout proceeds despite errors.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyout.
+ * MACH_MSG_IPC_SPACE No room for port right in name space.
+ * MACH_MSG_VM_SPACE No room for memory in address space.
+ * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
+ * MACH_MSG_VM_KERNEL Resource shortage handling memory.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_body(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map)
+{
+ mach_msg_return_t mr = MACH_MSG_SUCCESS;
+ kern_return_t kr;
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ while (saddr < eaddr) {
+ vm_offset_t taddr = saddr;
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, is_port;
+ uint64_t length;
+ vm_offset_t addr;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ longform = ((mach_msg_type_t*)type)->msgt_longform;
+ if (longform) {
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_long_t))) {
+ saddr = mach_msg_kernel_align(saddr);
+ }
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_t))) {
+ saddr = mach_msg_kernel_align(saddr);
+ }
+ }
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = (((uint64_t) number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ ipc_object_t *objects;
+ mach_msg_type_number_t i;
+
+ if (!is_inline && (length != 0)) {
+ /* first allocate memory in the map */
+ uint64_t allocated = length;
+
+ _Static_assert(sizeof(mach_port_name_t) <= sizeof(mach_port_t),
+ "Size of mach_port_t should be equal or larger than mach_port_name_t.");
+ allocated -= (sizeof(mach_port_t) - sizeof(mach_port_name_t)) * number;
+
+ kr = vm_allocate(map, &addr, allocated, TRUE);
+ if (kr != KERN_SUCCESS) {
+ ipc_kmsg_clean_body(taddr, saddr);
+ goto vm_copyout_failure;
+ }
+ }
+
+ objects = (ipc_object_t *)
+ (is_inline ? saddr : * (vm_offset_t *) saddr);
+
+ /* copyout port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t object = objects[i];
+
+ mr |= ipc_kmsg_copyout_object_to_port(space, object,
+ name, (mach_port_t *)&objects[i]);
+ }
+ }
+
+ if (is_inline) {
+ ((mach_msg_type_t*)type)->msgt_deallocate = FALSE;
+ saddr += length;
+ } else {
+ vm_offset_t data;
+
+ data = * (vm_offset_t *) saddr;
+
+ /* copyout memory carried in the message */
+
+ if (length == 0) {
+ assert(data == 0);
+ addr = 0;
+ } else if (is_port) {
+ /* copyout to memory allocated above */
+
+ if (sizeof(mach_port_name_t) != sizeof(mach_port_t)) {
+ mach_port_t *src = (mach_port_t*)data;
+ mach_port_name_t *dst = (mach_port_name_t*)addr;
+ for (int i=0; i<number; i++) {
+ if (copyout_port(src + i, dst + i)) {
+ kr = KERN_FAILURE;
+ goto vm_copyout_failure;
+ }
+ }
+ } else {
+ (void) copyoutmap(map, (char *) data,
+ (char *) addr, length);
+ }
+ kfree(data, length);
+ } else {
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+
+ kr = vm_map_copyout(map, &addr, copy);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+
+ vm_copyout_failure:
+
+ addr = 0;
+ if (longform)
+ type->msgtl_size = 0;
+ else
+ ((mach_msg_type_t*)type)->msgt_size = 0;
+
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ mr |= MACH_MSG_VM_KERNEL;
+ else
+ mr |= MACH_MSG_VM_SPACE;
+ }
+ }
+
+ ((mach_msg_type_t*)type)->msgt_deallocate = TRUE;
+ * (vm_offset_t *) saddr = addr;
+ saddr += sizeof(vm_offset_t);
+ }
+
+ /* Next element is always correctly aligned */
+ saddr = mach_msg_kernel_align(saddr);
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout
+ * Purpose:
+ * "Copy-out" port rights and out-of-line memory
+ * in the message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied out all rights and memory.
+ * MACH_RCV_INVALID_NOTIFY Bad notify port.
+ * Rights and memory in the message are intact.
+ * MACH_RCV_HEADER_ERROR + special bits
+ * Rights and memory in the message are intact.
+ * MACH_RCV_BODY_ERROR + special bits
+ * The message header was successfully copied out.
+ * As much of the body was handled as possible.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map,
+ mach_port_name_t notify)
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ mach_msg_return_t mr;
+
+ mr = ipc_kmsg_copyout_header(&kmsg->ikm_header, space, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ mr = ipc_kmsg_copyout_body(kmsg, space, map);
+ if (mr != MACH_MSG_SUCCESS)
+ mr |= MACH_RCV_BODY_ERROR;
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_pseudo
+ * Purpose:
+ * Does a pseudo-copyout of the message.
+ * This is like a regular copyout, except
+ * that the ports in the header are handled
+ * as if they are in the body. They aren't reversed.
+ *
+ * The error codes are a combination of special bits.
+ * The copyout proceeds despite errors.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyout.
+ * MACH_MSG_IPC_SPACE No room for port right in name space.
+ * MACH_MSG_VM_SPACE No room for memory in address space.
+ * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
+ * MACH_MSG_VM_KERNEL Resource shortage handling memory.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_pseudo(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map)
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ mach_port_name_t dest_name, reply_name;
+ mach_msg_return_t mr;
+
+ assert(IO_VALID(dest));
+
+ mr = (ipc_kmsg_copyout_object(space, dest, dest_type, &dest_name) |
+ ipc_kmsg_copyout_object(space, reply, reply_type, &reply_name));
+
+ kmsg->ikm_header.msgh_bits = mbits &~ MACH_MSGH_BITS_CIRCULAR;
+ kmsg->ikm_header.msgh_remote_port = dest_name;
+ kmsg->ikm_header.msgh_local_port = reply_name;
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ mr |= ipc_kmsg_copyout_body(kmsg, space, map);
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_dest
+ * Purpose:
+ * Copies out the destination port in the message.
+ * Destroys all other rights and memory in the message.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_copyout_dest(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space)
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ mach_port_name_t dest_name, reply_name;
+
+ assert(IO_VALID(dest));
+
+ io_lock(dest);
+ if (io_active(dest)) {
+ ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ io_release(dest);
+ io_check_unlock(dest);
+ dest_name = MACH_PORT_NAME_DEAD;
+ }
+
+ if (IO_VALID(reply)) {
+ ipc_object_destroy(reply, reply_type);
+ reply_name = MACH_PORT_NAME_NULL;
+ } else
+ reply_name = invalid_port_to_name((mach_port_t)reply);
+
+ kmsg->ikm_header.msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ kmsg->ikm_header.msgh_remote_port = reply_name;
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ ipc_kmsg_clean_body(saddr, eaddr);
+ }
+}
+
+#if MACH_KDB
+
+static char *
+ipc_type_name(
+ int type_name,
+ boolean_t received)
+{
+ switch (type_name) {
+ case MACH_MSG_TYPE_BOOLEAN:
+ return "boolean";
+
+ case MACH_MSG_TYPE_INTEGER_16:
+ return "short";
+
+ case MACH_MSG_TYPE_INTEGER_32:
+ return "int32";
+
+ case MACH_MSG_TYPE_INTEGER_64:
+ return "int64";
+
+ case MACH_MSG_TYPE_CHAR:
+ return "char";
+
+ case MACH_MSG_TYPE_BYTE:
+ return "byte";
+
+ case MACH_MSG_TYPE_REAL:
+ return "real";
+
+ case MACH_MSG_TYPE_STRING:
+ return "string";
+
+ case MACH_MSG_TYPE_PORT_NAME:
+ return "port_name";
+
+ case MACH_MSG_TYPE_MOVE_RECEIVE:
+ if (received) {
+ return "port_receive";
+ } else {
+ return "move_receive";
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND:
+ if (received) {
+ return "port_send";
+ } else {
+ return "move_send";
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE:
+ if (received) {
+ return "port_send_once";
+ } else {
+ return "move_send_once";
+ }
+
+ case MACH_MSG_TYPE_COPY_SEND:
+ return "copy_send";
+
+ case MACH_MSG_TYPE_MAKE_SEND:
+ return "make_send";
+
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE:
+ return "make_send_once";
+
+ default:
+ return (char *) 0;
+ }
+}
+
+static void
+ipc_print_type_name(
+ int type_name)
+{
+ char *name = ipc_type_name(type_name, TRUE);
+ if (name) {
+ printf("%s", name);
+ } else {
+ printf("type%d", type_name);
+ }
+}
+
+/*
+ * ipc_kmsg_print [ debug ]
+ */
+void
+ipc_kmsg_print(ipc_kmsg_t kmsg)
+{
+ db_printf("kmsg=0x%x\n", kmsg);
+ db_printf("ikm_next=0x%x,prev=0x%x,size=%d,marequest=0x%x",
+ kmsg->ikm_next,
+ kmsg->ikm_prev,
+ kmsg->ikm_size,
+ kmsg->ikm_marequest);
+ db_printf("\n");
+ ipc_msg_print(&kmsg->ikm_header);
+}
+
+/*
+ * ipc_msg_print [ debug ]
+ */
+void
+ipc_msg_print(mach_msg_header_t *msgh)
+{
+ vm_offset_t saddr, eaddr;
+
+ db_printf("msgh_bits=0x%x: ", msgh->msgh_bits);
+ if (msgh->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
+ db_printf("complex,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
+ db_printf("circular,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_COMPLEX_PORTS) {
+ db_printf("complex_ports,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_COMPLEX_DATA) {
+ db_printf("complex_data,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_MIGRATED) {
+ db_printf("migrated,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_UNUSED) {
+ db_printf("unused=0x%x,",
+ msgh->msgh_bits & MACH_MSGH_BITS_UNUSED);
+ }
+ db_printf("l=0x%x,r=0x%x\n",
+ MACH_MSGH_BITS_LOCAL(msgh->msgh_bits),
+ MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
+
+ db_printf("msgh_id=%d,size=%d,seqno=%d,",
+ msgh->msgh_id,
+ msgh->msgh_size,
+ msgh->msgh_seqno);
+
+ if (msgh->msgh_remote_port) {
+ db_printf("remote=0x%x(", msgh->msgh_remote_port);
+ ipc_print_type_name(MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
+ db_printf("),");
+ } else {
+ db_printf("remote=null,\n");
+ }
+
+ if (msgh->msgh_local_port) {
+ db_printf("local=0x%x(", msgh->msgh_local_port);
+ ipc_print_type_name(MACH_MSGH_BITS_LOCAL(msgh->msgh_bits));
+ db_printf(")\n");
+ } else {
+ db_printf("local=null\n");
+ }
+
+ saddr = (vm_offset_t) (msgh + 1);
+ eaddr = (vm_offset_t) msgh + msgh->msgh_size;
+
+ while (saddr < eaddr) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, dealloc, is_port;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+
+ if (((eaddr - saddr) < sizeof(mach_msg_type_t)) ||
+ ((longform = ((mach_msg_type_t*)type)->msgt_longform) &&
+ ((eaddr - saddr) < sizeof(mach_msg_type_long_t)))) {
+ db_printf("*** msg too small\n");
+ return;
+ }
+
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ dealloc = ((mach_msg_type_t*)type)->msgt_deallocate;
+ if (longform) {
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_long_t))) {
+ saddr = mach_msg_kernel_align(saddr);
+ }
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ if (mach_msg_kernel_is_misaligned(sizeof(mach_msg_type_t))) {
+ saddr = mach_msg_kernel_align(saddr);
+ }
+ }
+
+ db_printf("-- type=");
+ ipc_print_type_name(name);
+ if (! is_inline) {
+ db_printf(",ool");
+ }
+ if (dealloc) {
+ db_printf(",dealloc");
+ }
+ if (longform) {
+ db_printf(",longform");
+ }
+ db_printf(",size=%d,number=%d,addr=0x%x\n",
+ size,
+ number,
+ saddr);
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if ((is_port && (size != PORT_T_SIZE_IN_BITS)) ||
+#ifndef __x86_64__
+ (longform && ((type->msgtl_header.msgt_name != 0) ||
+ (type->msgtl_header.msgt_size != 0) ||
+ (type->msgtl_header.msgt_number != 0))) ||
+#endif
+ (((mach_msg_type_t*)type)->msgt_unused != 0) ||
+ (dealloc && is_inline)) {
+ db_printf("*** invalid type\n");
+ return;
+ }
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ if (is_inline) {
+ vm_size_t amount;
+ unsigned i, numwords;
+
+ /* round up to int boundaries for printing */
+ amount = (length + 3) &~ 3;
+ if ((eaddr - saddr) < amount) {
+ db_printf("*** too small\n");
+ return;
+ }
+ numwords = amount / sizeof(int);
+ if (numwords > 8) {
+ numwords = 8;
+ }
+ for (i = 0; i < numwords; i++) {
+ db_printf("0x%x\n", ((int *) saddr)[i]);
+ }
+ if (numwords < amount / sizeof(int)) {
+ db_printf("...\n");
+ }
+ saddr += amount;
+ } else {
+ if ((eaddr - saddr) < sizeof(vm_offset_t)) {
+ db_printf("*** too small\n");
+ return;
+ }
+ db_printf("0x%x\n", * (vm_offset_t *) saddr);
+ saddr += sizeof(vm_offset_t);
+ }
+ saddr = mach_msg_kernel_align(saddr);
+ }
+}
+#endif /* MACH_KDB */
diff --git a/ipc/ipc_kmsg.h b/ipc/ipc_kmsg.h
new file mode 100644
index 0000000..9ee1aa4
--- /dev/null
+++ b/ipc/ipc_kmsg.h
@@ -0,0 +1,345 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_kmsg.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for kernel messages.
+ */
+
+#ifndef _IPC_IPC_KMSG_H_
+#define _IPC_IPC_KMSG_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <kern/cpu_number.h>
+#include <kern/macros.h>
+#include <kern/kalloc.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_types.h>
+#include <vm/vm_map.h>
+
+/*
+ * This structure is only the header for a kmsg buffer;
+ * the actual buffer is normally larger. The rest of the buffer
+ * holds the body of the message.
+ *
+ * In a kmsg, the port fields hold pointers to ports instead
+ * of port names. These pointers hold references.
+ *
+ * The ikm_header.msgh_remote_port field is the destination
+ * of the message.
+ */
+
+typedef struct ipc_kmsg {
+ struct ipc_kmsg *ikm_next, *ikm_prev;
+ vm_size_t ikm_size;
+ ipc_marequest_t ikm_marequest;
+ mach_msg_header_t ikm_header;
+} *ipc_kmsg_t;
+
+#define IKM_NULL ((ipc_kmsg_t) 0)
+
+#define IKM_OVERHEAD \
+ (sizeof(struct ipc_kmsg) - sizeof(mach_msg_header_t))
+
+#define ikm_plus_overhead(size) ((vm_size_t)((size) + IKM_OVERHEAD))
+#define ikm_less_overhead(size) ((mach_msg_size_t)((size) - IKM_OVERHEAD))
+
+#if MACH_IPC_TEST
+/*
+ * For debugging.
+ */
+#define IKM_BOGUS ((ipc_kmsg_t) 0xffffff10)
+
+#define ikm_mark_bogus(kmsg) \
+MACRO_BEGIN \
+ (kmsg)->ikm_next = IKM_BOGUS; \
+ (kmsg)->ikm_prev = IKM_BOGUS; \
+MACRO_END
+
+#else /* MACH_IPC_TEST */
+
+#define ikm_mark_bogus(kmsg) ;
+
+#endif /* MACH_IPC_TEST */
+
+/*
+ * We keep a per-processor cache of kernel message buffers.
+ * The cache saves the overhead/locking of using kalloc/kfree.
+ * The per-processor cache seems to miss less than a per-thread cache,
+ * and it also uses less memory. Access to the cache doesn't
+ * require locking.
+ */
+
+extern ipc_kmsg_t ipc_kmsg_cache[NCPUS];
+
+#define ikm_cache() ipc_kmsg_cache[cpu_number()]
+
+#define ikm_cache_alloc_try() \
+MACRO_BEGIN \
+ ipc_kmsg_t __kmsg = ikm_cache(); \
+ if (__kmsg != IKM_NULL) { \
+ ikm_cache() = IKM_NULL; \
+ ikm_check_initialized(__kmsg, IKM_SAVED_KMSG_SIZE); \
+ } \
+ __kmsg; \
+MACRO_END
+
+#define ikm_cache_alloc() \
+MACRO_BEGIN \
+ ipc_kmsg_t __kmsg = ikm_cache_alloc_try(); \
+ if (!__kmsg) { \
+ __kmsg = ikm_alloc(IKM_SAVED_MSG_SIZE); \
+ if (__kmsg != IKM_NULL) \
+ ikm_init(__kmsg, IKM_SAVED_MSG_SIZE); \
+ } \
+ __kmsg; \
+MACRO_END
+
+#define ikm_cache_free_try(kmsg) \
+MACRO_BEGIN \
+ int __success = 0; \
+ if (ikm_cache() == IKM_NULL) { \
+ ikm_cache() = (kmsg); \
+ __success = 1; \
+ } \
+ __success; \
+MACRO_END
+
+#define ikm_cache_free(kmsg) \
+MACRO_BEGIN \
+ if (((kmsg)->ikm_size == IKM_SAVED_KMSG_SIZE) && \
+ (ikm_cache() == IKM_NULL)) \
+ ikm_cache() = (kmsg); \
+ else \
+ ikm_free(kmsg); \
+MACRO_END
+
+/*
+ * The size of the kernel message buffers that will be cached.
+ * IKM_SAVED_KMSG_SIZE includes overhead; IKM_SAVED_MSG_SIZE doesn't.
+ *
+ * We use the page size for IKM_SAVED_KMSG_SIZE to make sure the
+ * page is pinned to a single processor.
+ */
+
+#define IKM_SAVED_KMSG_SIZE PAGE_SIZE
+#define IKM_SAVED_MSG_SIZE ikm_less_overhead(IKM_SAVED_KMSG_SIZE)
+
+#define ikm_alloc(size) \
+ ((ipc_kmsg_t) kalloc(ikm_plus_overhead(size)))
+
+/*
+ * The conversion between userland and kernel-land has to convert from port
+ * names to ports. This may increase the size that needs to be allocated
+ * on the kernel size. At worse the message is full of port names to be
+ * converted.
+ */
+#define IKM_EXPAND_FACTOR ((sizeof(mach_port_t) + sizeof(mach_port_name_t) - 1) / sizeof(mach_port_name_t))
+/* But make sure it's not the converse. */
+_Static_assert(sizeof(mach_port_t) >= sizeof(mach_port_name_t));
+
+#define ikm_init(kmsg, size) \
+MACRO_BEGIN \
+ ikm_init_special((kmsg), ikm_plus_overhead(size)); \
+MACRO_END
+
+#define ikm_init_special(kmsg, size) \
+MACRO_BEGIN \
+ (kmsg)->ikm_size = (size); \
+ (kmsg)->ikm_marequest = IMAR_NULL; \
+MACRO_END
+
+#define ikm_check_initialized(kmsg, size) \
+MACRO_BEGIN \
+ assert((kmsg)->ikm_size == (size)); \
+ assert((kmsg)->ikm_marequest == IMAR_NULL); \
+MACRO_END
+
+/*
+ * Non-positive message sizes are special. They indicate that
+ * the message buffer doesn't come from ikm_alloc and
+ * requires some special handling to free.
+ *
+ * ipc_kmsg_free is the non-macro form of ikm_free.
+ * It frees kmsgs of all varieties.
+ */
+
+#define IKM_SIZE_NORMA 0
+#define IKM_SIZE_NETWORK -1
+
+#define ikm_free(kmsg) \
+MACRO_BEGIN \
+ vm_size_t _size = (kmsg)->ikm_size; \
+ \
+ if ((integer_t)_size > 0) \
+ kfree((vm_offset_t) (kmsg), _size); \
+ else \
+ ipc_kmsg_free(kmsg); \
+MACRO_END
+
+/*
+ * struct ipc_kmsg_queue is defined in ipc/ipc_kmsg_queue.h
+ */
+
+#include <ipc/ipc_kmsg_queue.h>
+
+typedef struct ipc_kmsg_queue *ipc_kmsg_queue_t;
+
+#define IKMQ_NULL ((ipc_kmsg_queue_t) 0)
+
+
+#define ipc_kmsg_queue_init(queue) \
+MACRO_BEGIN \
+ (queue)->ikmq_base = IKM_NULL; \
+MACRO_END
+
+#define ipc_kmsg_queue_empty(queue) ((queue)->ikmq_base == IKM_NULL)
+
+/* Enqueue a kmsg */
+extern void ipc_kmsg_enqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg);
+
+/* Dequeue and return a kmsg */
+extern ipc_kmsg_t ipc_kmsg_dequeue(
+ ipc_kmsg_queue_t queue);
+
+/* Pull a kmsg out of a queue */
+extern void ipc_kmsg_rmqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg);
+
+#define ipc_kmsg_queue_first(queue) ((queue)->ikmq_base)
+
+/* Return the kmsg following the given kmsg */
+extern ipc_kmsg_t ipc_kmsg_queue_next(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg);
+
+#define ipc_kmsg_rmqueue_first_macro(queue, kmsg) \
+MACRO_BEGIN \
+ ipc_kmsg_t _next; \
+ \
+ assert((queue)->ikmq_base == (kmsg)); \
+ \
+ _next = (kmsg)->ikm_next; \
+ if (_next == (kmsg)) { \
+ assert((kmsg)->ikm_prev == (kmsg)); \
+ (queue)->ikmq_base = IKM_NULL; \
+ } else { \
+ ipc_kmsg_t _prev = (kmsg)->ikm_prev; \
+ \
+ (queue)->ikmq_base = _next; \
+ _next->ikm_prev = _prev; \
+ _prev->ikm_next = _next; \
+ } \
+ ikm_mark_bogus (kmsg); \
+MACRO_END
+
+#define ipc_kmsg_enqueue_macro(queue, kmsg) \
+MACRO_BEGIN \
+ ipc_kmsg_t _first = (queue)->ikmq_base; \
+ \
+ if (_first == IKM_NULL) { \
+ (queue)->ikmq_base = (kmsg); \
+ (kmsg)->ikm_next = (kmsg); \
+ (kmsg)->ikm_prev = (kmsg); \
+ } else { \
+ ipc_kmsg_t _last = _first->ikm_prev; \
+ \
+ (kmsg)->ikm_next = _first; \
+ (kmsg)->ikm_prev = _last; \
+ _first->ikm_prev = (kmsg); \
+ _last->ikm_next = (kmsg); \
+ } \
+MACRO_END
+
+extern void
+ipc_kmsg_destroy(ipc_kmsg_t);
+
+extern void
+ipc_kmsg_clean(ipc_kmsg_t);
+
+extern void
+ipc_kmsg_free(ipc_kmsg_t);
+
+extern mach_msg_return_t
+ipc_kmsg_get(mach_msg_user_header_t *, mach_msg_size_t, ipc_kmsg_t *);
+
+extern mach_msg_return_t
+ipc_kmsg_get_from_kernel(mach_msg_header_t *, mach_msg_size_t, ipc_kmsg_t *);
+
+extern mach_msg_return_t
+ipc_kmsg_put(mach_msg_user_header_t *, ipc_kmsg_t, mach_msg_size_t);
+
+extern void
+ipc_kmsg_put_to_kernel(mach_msg_header_t *, ipc_kmsg_t, mach_msg_size_t);
+
+extern mach_msg_return_t
+ipc_kmsg_copyin_header(mach_msg_header_t *, ipc_space_t, mach_port_name_t);
+
+extern mach_msg_return_t
+ipc_kmsg_copyin(ipc_kmsg_t, ipc_space_t, vm_map_t, mach_port_name_t);
+
+extern void
+ipc_kmsg_copyin_from_kernel(ipc_kmsg_t);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_header(mach_msg_header_t *, ipc_space_t, mach_port_name_t);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_object(ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, mach_port_name_t *);
+
+static inline mach_msg_return_t
+ipc_kmsg_copyout_object_to_port(ipc_space_t space, ipc_object_t object,
+ mach_msg_type_name_t msgt_name, mach_port_t *portp)
+{
+ mach_port_name_t name;;
+ mach_msg_return_t mr;
+ mr = ipc_kmsg_copyout_object(space, object, msgt_name, &name);
+ *portp = (mach_port_t)name;
+ return mr;
+}
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_body(ipc_kmsg_t, ipc_space_t, vm_map_t);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout(ipc_kmsg_t, ipc_space_t, vm_map_t, mach_port_name_t);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_pseudo(ipc_kmsg_t, ipc_space_t, vm_map_t);
+
+extern void
+ipc_kmsg_copyout_dest(ipc_kmsg_t, ipc_space_t);
+
+#endif /* _IPC_IPC_KMSG_H_ */
diff --git a/ipc/ipc_kmsg_queue.h b/ipc/ipc_kmsg_queue.h
new file mode 100644
index 0000000..b4b3df1
--- /dev/null
+++ b/ipc/ipc_kmsg_queue.h
@@ -0,0 +1,31 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _IPC_KMSG_QUEUE_H_
+#define _IPC_KMSG_QUEUE_H_
+struct ipc_kmsg_queue {
+ struct ipc_kmsg *ikmq_base; };
+#endif /* _IPC_KMSG_QUEUE_H_ */
+
diff --git a/ipc/ipc_machdep.h b/ipc/ipc_machdep.h
new file mode 100755
index 0000000..2871fc3
--- /dev/null
+++ b/ipc/ipc_machdep.h
@@ -0,0 +1,39 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _IPC_IPC_MACHDEP_H_
+#define _IPC_IPC_MACHDEP_H_
+
+#include <mach/message.h>
+
+/*
+ * At times, we need to know the size of a port in bits
+ */
+
+#define PORT_T_SIZE_IN_BITS (sizeof(mach_port_t)*8)
+#define PORT_NAME_T_SIZE_IN_BITS (sizeof(mach_port_name_t)*8)
+
+#endif /* _IPC_IPC_MACHDEP_H_ */
diff --git a/ipc/ipc_marequest.c b/ipc/ipc_marequest.c
new file mode 100644
index 0000000..c096fe2
--- /dev/null
+++ b/ipc/ipc_marequest.c
@@ -0,0 +1,437 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_marequest.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to handle msg-accepted requests.
+ */
+
+#include <mach/message.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/kalloc.h>
+#include <kern/slab.h>
+#include <ipc/port.h>
+#include <ipc/ipc_init.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_notify.h>
+
+#if MACH_IPC_DEBUG
+#include <mach/kern_return.h>
+#include <mach_debug/hash_info.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#endif
+
+
+struct kmem_cache ipc_marequest_cache;
+
+#define imar_alloc() ((ipc_marequest_t) kmem_cache_alloc(&ipc_marequest_cache))
+#define imar_free(imar) kmem_cache_free(&ipc_marequest_cache, (vm_offset_t) (imar))
+
+typedef unsigned int ipc_marequest_index_t;
+
+ipc_marequest_index_t ipc_marequest_size;
+ipc_marequest_index_t ipc_marequest_mask;
+
+#define IMAR_HASH(space, name) \
+ ((((ipc_marequest_index_t)((vm_offset_t)space) >> 4) + \
+ MACH_PORT_INDEX(name) + MACH_PORT_NGEN(name)) & \
+ ipc_marequest_mask)
+
+typedef struct ipc_marequest_bucket {
+ decl_simple_lock_data(, imarb_lock_data)
+ ipc_marequest_t imarb_head;
+} *ipc_marequest_bucket_t;
+
+#define IMARB_NULL ((ipc_marequest_bucket_t) 0)
+
+#define imarb_lock_init(imarb) simple_lock_init(&(imarb)->imarb_lock_data)
+#define imarb_lock(imarb) simple_lock(&(imarb)->imarb_lock_data)
+#define imarb_unlock(imarb) simple_unlock(&(imarb)->imarb_lock_data)
+
+ipc_marequest_bucket_t ipc_marequest_table;
+
+
+
+/*
+ * Routine: ipc_marequest_init
+ * Purpose:
+ * Initialize the msg-accepted request module.
+ */
+
+void
+ipc_marequest_init(void)
+{
+ ipc_marequest_index_t i;
+
+ /* initialize ipc_marequest_size */
+
+ ipc_marequest_size = IPC_MAREQUEST_SIZE;
+
+ /* make sure it is a power of two */
+
+ ipc_marequest_mask = ipc_marequest_size - 1;
+ if ((ipc_marequest_size & ipc_marequest_mask) != 0) {
+ unsigned int bit;
+
+ /* round up to closest power of two */
+
+ for (bit = 1;; bit <<= 1) {
+ ipc_marequest_mask |= bit;
+ ipc_marequest_size = ipc_marequest_mask + 1;
+
+ if ((ipc_marequest_size & ipc_marequest_mask) == 0)
+ break;
+ }
+ }
+
+ /* allocate ipc_marequest_table */
+
+ ipc_marequest_table = (ipc_marequest_bucket_t)
+ kalloc((vm_size_t) (ipc_marequest_size *
+ sizeof(struct ipc_marequest_bucket)));
+ assert(ipc_marequest_table != IMARB_NULL);
+
+ /* and initialize it */
+
+ for (i = 0; i < ipc_marequest_size; i++) {
+ ipc_marequest_bucket_t bucket;
+
+ bucket = &ipc_marequest_table[i];
+ imarb_lock_init(bucket);
+ bucket->imarb_head = IMAR_NULL;
+ }
+
+ kmem_cache_init(&ipc_marequest_cache, "ipc_marequest",
+ sizeof(struct ipc_marequest), 0, NULL, 0);
+}
+
+/*
+ * Routine: ipc_marequest_create
+ * Purpose:
+ * Create a msg-accepted request, because
+ * a sender is forcing a message with MACH_SEND_NOTIFY.
+ *
+ * The "notify" argument should name a receive right
+ * that is used to create the send-once notify port.
+ * Conditions:
+ * Nothing locked; refs held for space and port.
+ * Returns:
+ * MACH_MSG_SUCCESS Msg-accepted request created.
+ * MACH_SEND_INVALID_NOTIFY The space is dead.
+ * MACH_SEND_INVALID_NOTIFY The notify port is bad.
+ * MACH_SEND_NOTIFY_IN_PROGRESS
+ * This space has already forced a message to this port.
+ * MACH_SEND_NO_NOTIFY Can't allocate a msg-accepted request.
+ */
+
+mach_msg_return_t
+ipc_marequest_create(
+ ipc_space_t space,
+ ipc_port_t port,
+ mach_port_name_t notify,
+ ipc_marequest_t *marequestp)
+{
+ mach_port_name_t name;
+ ipc_entry_t entry;
+ ipc_port_t soright;
+ ipc_marequest_t marequest;
+ ipc_marequest_bucket_t bucket;
+
+ marequest = imar_alloc();
+ if (marequest == IMAR_NULL)
+ return MACH_SEND_NO_NOTIFY;
+
+ /*
+ * Delay creating the send-once right until
+ * we know there will be no errors. Otherwise,
+ * we would have to worry about disposing of it
+ * when it turned out it wasn't needed.
+ */
+
+ is_write_lock(space);
+ if (!space->is_active) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ if (ipc_right_reverse(space, (ipc_object_t) port, &name, &entry)) {
+ ipc_entry_bits_t bits;
+
+ /* port is locked and active */
+ ip_unlock(port);
+ bits = entry->ie_bits;
+
+ assert(port == (ipc_port_t) entry->ie_object);
+ assert(bits & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ if (bits & IE_BITS_MAREQUEST) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_NOTIFY_IN_PROGRESS;
+ }
+
+ if ((soright = ipc_port_lookup_notify(space, notify))
+ == IP_NULL) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ entry->ie_bits = bits | IE_BITS_MAREQUEST;
+
+ is_reference(space);
+ marequest->imar_space = space;
+ marequest->imar_name = name;
+ marequest->imar_soright = soright;
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, name)];
+ imarb_lock(bucket);
+
+ marequest->imar_next = bucket->imarb_head;
+ bucket->imarb_head = marequest;
+
+ imarb_unlock(bucket);
+ } else {
+ if ((soright = ipc_port_lookup_notify(space, notify))
+ == IP_NULL) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ is_reference(space);
+ marequest->imar_space = space;
+ marequest->imar_name = MACH_PORT_NULL;
+ marequest->imar_soright = soright;
+ }
+
+ is_write_unlock(space);
+ *marequestp = marequest;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_marequest_cancel
+ * Purpose:
+ * Cancel a msg-accepted request, because
+ * the space's entry is being destroyed.
+ * Conditions:
+ * The space is write-locked and active.
+ */
+
+void
+ipc_marequest_cancel(
+ ipc_space_t space,
+ mach_port_name_t name)
+{
+ ipc_marequest_bucket_t bucket;
+ ipc_marequest_t marequest, *last;
+
+ assert(space->is_active);
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, name)];
+ imarb_lock(bucket);
+
+ for (last = &bucket->imarb_head;
+ (marequest = *last) != IMAR_NULL;
+ last = &marequest->imar_next)
+ if ((marequest->imar_space == space) &&
+ (marequest->imar_name == name))
+ break;
+
+ assert(marequest != IMAR_NULL);
+ *last = marequest->imar_next;
+ imarb_unlock(bucket);
+
+ marequest->imar_name = MACH_PORT_NAME_NULL;
+}
+
+/*
+ * Routine: ipc_marequest_rename
+ * Purpose:
+ * Rename a msg-accepted request, because the entry
+ * in the space is being renamed.
+ * Conditions:
+ * The space is write-locked and active.
+ */
+
+void
+ipc_marequest_rename(
+ ipc_space_t space,
+ mach_port_name_t old,
+ mach_port_name_t new)
+{
+ ipc_marequest_bucket_t bucket;
+ ipc_marequest_t marequest, *last;
+
+ assert(space->is_active);
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, old)];
+ imarb_lock(bucket);
+
+ for (last = &bucket->imarb_head;
+ (marequest = *last) != IMAR_NULL;
+ last = &marequest->imar_next)
+ if ((marequest->imar_space == space) &&
+ (marequest->imar_name == old))
+ break;
+
+ assert(marequest != IMAR_NULL);
+ *last = marequest->imar_next;
+ imarb_unlock(bucket);
+
+ marequest->imar_name = new;
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, new)];
+ imarb_lock(bucket);
+
+ marequest->imar_next = bucket->imarb_head;
+ bucket->imarb_head = marequest;
+
+ imarb_unlock(bucket);
+}
+
+/*
+ * Routine: ipc_marequest_destroy
+ * Purpose:
+ * Destroy a msg-accepted request, because
+ * the kernel message is being received/destroyed.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_marequest_destroy(ipc_marequest_t marequest)
+{
+ ipc_space_t space = marequest->imar_space;
+ mach_port_name_t name;
+ ipc_port_t soright;
+
+ is_write_lock(space);
+
+ name = marequest->imar_name;
+ soright = marequest->imar_soright;
+
+ if (name != MACH_PORT_NULL) {
+ ipc_marequest_bucket_t bucket;
+ ipc_marequest_t this, *last;
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, name)];
+ imarb_lock(bucket);
+
+ for (last = &bucket->imarb_head;
+ (this = *last) != IMAR_NULL;
+ last = &this->imar_next)
+ if ((this->imar_space == space) &&
+ (this->imar_name == name))
+ break;
+
+ assert(this == marequest);
+ *last = this->imar_next;
+ imarb_unlock(bucket);
+
+ if (space->is_active) {
+ ipc_entry_t entry;
+
+ entry = ipc_entry_lookup(space, name);
+ assert(entry != IE_NULL);
+ assert(entry->ie_bits & IE_BITS_MAREQUEST);
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ entry->ie_bits &= ~IE_BITS_MAREQUEST;
+
+ } else
+ name = MACH_PORT_NAME_NULL;
+ }
+
+ is_write_unlock(space);
+ is_release(space);
+
+ imar_free(marequest);
+
+ assert(soright != IP_NULL);
+ ipc_notify_msg_accepted(soright, name);
+}
+
+#if MACH_IPC_DEBUG
+
+
+/*
+ * Routine: ipc_marequest_info
+ * Purpose:
+ * Return information about the marequest hash table.
+ * Fills the buffer with as much information as possible
+ * and returns the desired size of the buffer.
+ * Conditions:
+ * Nothing locked. The caller should provide
+ * possibly-pageable memory.
+ */
+
+unsigned int
+ipc_marequest_info(
+ unsigned int *maxp,
+ hash_info_bucket_t *info,
+ unsigned int count)
+{
+ ipc_marequest_index_t i;
+
+ if (ipc_marequest_size < count)
+ count = ipc_marequest_size;
+
+ for (i = 0; i < count; i++) {
+ ipc_marequest_bucket_t bucket = &ipc_marequest_table[i];
+ unsigned int bucket_count = 0;
+ ipc_marequest_t marequest;
+
+ imarb_lock(bucket);
+ for (marequest = bucket->imarb_head;
+ marequest != IMAR_NULL;
+ marequest = marequest->imar_next)
+ bucket_count++;
+ imarb_unlock(bucket);
+
+ /* don't touch pageable memory while holding locks */
+ info[i].hib_count = bucket_count;
+ }
+
+ *maxp = (unsigned int)-1;
+ return ipc_marequest_size;
+}
+
+#endif /* MACH_IPC_DEBUG */
diff --git a/ipc/ipc_marequest.h b/ipc/ipc_marequest.h
new file mode 100644
index 0000000..a55d4e2
--- /dev/null
+++ b/ipc/ipc_marequest.h
@@ -0,0 +1,99 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_marequest.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for msg-accepted requests.
+ */
+
+#ifndef _IPC_IPC_MAREQUEST_H_
+#define _IPC_IPC_MAREQUEST_H_
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach_debug/hash_info.h>
+#include <ipc/ipc_types.h>
+
+/*
+ * A msg-accepted request is made when MACH_SEND_NOTIFY is used
+ * to force a message to a send right. The IE_BITS_MAREQUEST bit
+ * in an entry indicates the entry is blocked because MACH_SEND_NOTIFY
+ * has already been used to force a message. The kmsg holds
+ * a pointer to the marequest; it is destroyed when the kmsg
+ * is received/destroyed. (If the send right is destroyed,
+ * this just changes imar_name. If the space is destroyed,
+ * the marequest is left unchanged.)
+ *
+ * Locking considerations: The imar_space field is read-only and
+ * points to the space which locks the imar_name field. imar_soright
+ * is read-only. Normally it is a non-null send-once right for
+ * the msg-accepted notification, but in compat mode it is null
+ * and the notification goes to the space's notify port. Normally
+ * imar_name is non-null, but if the send right is destroyed then
+ * it is changed to be null. imar_next is locked by a bucket lock;
+ * imar_name is read-only when the request is in a bucket. (So lookups
+ * in the bucket can safely check imar_space and imar_name.)
+ * imar_space and imar_soright both hold references.
+ */
+
+typedef struct ipc_marequest {
+ struct ipc_space *imar_space;
+ mach_port_name_t imar_name;
+ struct ipc_port *imar_soright;
+ struct ipc_marequest *imar_next;
+} *ipc_marequest_t;
+
+#define IMAR_NULL ((ipc_marequest_t) 0)
+
+#define IPC_MAREQUEST_SIZE 16
+
+extern void
+ipc_marequest_init(void);
+
+#if MACH_IPC_DEBUG
+
+extern unsigned int
+ipc_marequest_info(unsigned int *, hash_info_bucket_t *, unsigned int);
+
+#endif /* MACH_IPC_DEBUG */
+
+extern mach_msg_return_t
+ipc_marequest_create(ipc_space_t space, ipc_port_t port,
+ mach_port_name_t notify, ipc_marequest_t *marequestp);
+
+extern void
+ipc_marequest_cancel(ipc_space_t space, mach_port_name_t name);
+
+extern void
+ipc_marequest_rename(ipc_space_t space,
+ mach_port_name_t old, mach_port_name_t new);
+
+extern void
+ipc_marequest_destroy(ipc_marequest_t marequest);
+
+#endif /* _IPC_IPC_MAREQUEST_H_ */
diff --git a/ipc/ipc_mqueue.c b/ipc/ipc_mqueue.c
new file mode 100644
index 0000000..44e1eb9
--- /dev/null
+++ b/ipc/ipc_mqueue.c
@@ -0,0 +1,695 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_mqueue.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC message queues.
+ */
+
+#include <mach/port.h>
+#include <mach/message.h>
+#include <machine/copy_user.h>
+#include <kern/assert.h>
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/sched_prim.h>
+#include <kern/ipc_sched.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_marequest.h>
+
+
+
+/*
+ * Routine: ipc_mqueue_init
+ * Purpose:
+ * Initialize a newly-allocated message queue.
+ */
+
+void
+ipc_mqueue_init(
+ ipc_mqueue_t mqueue)
+{
+ imq_lock_init(mqueue);
+ ipc_kmsg_queue_init(&mqueue->imq_messages);
+ ipc_thread_queue_init(&mqueue->imq_threads);
+}
+
+/*
+ * Routine: ipc_mqueue_move
+ * Purpose:
+ * Move messages from one queue (source) to another (dest).
+ * Only moves messages sent to the specified port.
+ * Conditions:
+ * Both queues must be locked.
+ * (This is sufficient to manipulate port->ip_seqno.)
+ */
+
+void
+ipc_mqueue_move(
+ ipc_mqueue_t dest,
+ ipc_mqueue_t source,
+ const ipc_port_t port)
+{
+ ipc_kmsg_queue_t oldq, newq;
+ ipc_thread_queue_t blockedq;
+ ipc_kmsg_t kmsg, next;
+ ipc_thread_t th;
+
+ oldq = &source->imq_messages;
+ newq = &dest->imq_messages;
+ blockedq = &dest->imq_threads;
+
+ for (kmsg = ipc_kmsg_queue_first(oldq);
+ kmsg != IKM_NULL; kmsg = next) {
+ next = ipc_kmsg_queue_next(oldq, kmsg);
+
+ /* only move messages sent to port */
+
+ if (kmsg->ikm_header.msgh_remote_port != (mach_port_t) port)
+ continue;
+
+ ipc_kmsg_rmqueue(oldq, kmsg);
+
+ /* before adding kmsg to newq, check for a blocked receiver */
+
+ while ((th = ipc_thread_dequeue(blockedq)) != ITH_NULL) {
+ assert(ipc_kmsg_queue_empty(newq));
+
+ thread_go(th);
+
+ /* check if the receiver can handle the message */
+
+ if (kmsg->ikm_header.msgh_size <= th->ith_msize) {
+ th->ith_state = MACH_MSG_SUCCESS;
+ th->ith_kmsg = kmsg;
+ th->ith_seqno = port->ip_seqno++;
+
+ goto next_kmsg;
+ }
+
+ th->ith_state = MACH_RCV_TOO_LARGE;
+ th->ith_msize = kmsg->ikm_header.msgh_size;
+ }
+
+ /* didn't find a receiver to handle the message */
+
+ ipc_kmsg_enqueue(newq, kmsg);
+ next_kmsg:;
+ }
+}
+
+/*
+ * Routine: ipc_mqueue_changed
+ * Purpose:
+ * Wake up receivers waiting in a message queue.
+ * Conditions:
+ * The message queue is locked.
+ */
+
+void
+ipc_mqueue_changed(
+ ipc_mqueue_t mqueue,
+ mach_msg_return_t mr)
+{
+ ipc_thread_t th;
+
+ while ((th = ipc_thread_dequeue(&mqueue->imq_threads)) != ITH_NULL) {
+ th->ith_state = mr;
+ thread_go(th);
+ }
+}
+
+/*
+ * Routine: ipc_mqueue_send
+ * Purpose:
+ * Send a message to a port. The message holds a reference
+ * for the destination port in the msgh_remote_port field.
+ *
+ * If unsuccessful, the caller still has possession of
+ * the message and must do something with it. If successful,
+ * the message is queued, given to a receiver, destroyed,
+ * or handled directly by the kernel via mach_msg.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS The message was accepted.
+ * MACH_SEND_TIMED_OUT Caller still has message.
+ * MACH_SEND_INTERRUPTED Caller still has message.
+ */
+
+mach_msg_return_t
+ipc_mqueue_send(
+ ipc_kmsg_t kmsg,
+ mach_msg_option_t option,
+ mach_msg_timeout_t time_out)
+{
+ ipc_port_t port;
+
+ port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+
+ if (port->ip_receiver == ipc_space_kernel) {
+ ipc_kmsg_t reply;
+
+ /*
+ * We can check ip_receiver == ipc_space_kernel
+ * before checking that the port is active because
+ * ipc_port_dealloc_kernel clears ip_receiver
+ * before destroying a kernel port.
+ */
+
+ assert(ip_active(port));
+ ip_unlock(port);
+
+ reply = ipc_kobject_server(kmsg);
+ if (reply != IKM_NULL)
+ ipc_mqueue_send_always(reply);
+
+ return MACH_MSG_SUCCESS;
+ }
+
+ for (;;) {
+ ipc_thread_t self;
+
+ /*
+ * Can't deliver to a dead port.
+ * However, we can pretend it got sent
+ * and was then immediately destroyed.
+ */
+
+ if (!ip_active(port)) {
+ /*
+ * We can't let ipc_kmsg_destroy deallocate
+ * the port right, because we might end up
+ * in an infinite loop trying to deliver
+ * a send-once notification.
+ */
+
+ ip_release(port);
+ ip_check_unlock(port);
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ ipc_kmsg_destroy(kmsg);
+ return MACH_MSG_SUCCESS;
+ }
+
+ /*
+ * Don't block if:
+ * 1) We're under the queue limit.
+ * 2) Caller used the MACH_SEND_ALWAYS internal option.
+ * 3) Message is sent to a send-once right.
+ */
+
+ if ((port->ip_msgcount < port->ip_qlimit) ||
+ (option & MACH_SEND_ALWAYS) ||
+ (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE))
+ break;
+
+ /* must block waiting for queue to clear */
+
+ self = current_thread();
+
+ if (option & MACH_SEND_TIMEOUT) {
+ if (time_out == 0) {
+ ip_unlock(port);
+ return MACH_SEND_TIMED_OUT;
+ }
+
+ thread_will_wait_with_timeout(self, time_out);
+ } else
+ thread_will_wait(self);
+
+ ipc_thread_enqueue(&port->ip_blocked, self);
+ self->ith_state = MACH_SEND_IN_PROGRESS;
+
+ ip_unlock(port);
+ counter(c_ipc_mqueue_send_block++);
+ thread_block(thread_no_continuation);
+ ip_lock(port);
+
+ /* why did we wake up? */
+
+ if (self->ith_state == MACH_MSG_SUCCESS)
+ continue;
+ assert(self->ith_state == MACH_SEND_IN_PROGRESS);
+
+ /* take ourselves off blocked queue */
+
+ ipc_thread_rmqueue(&port->ip_blocked, self);
+
+ /*
+ * Thread wakeup-reason field tells us why
+ * the wait was interrupted.
+ */
+
+ switch (self->ith_wait_result) {
+ case THREAD_INTERRUPTED:
+ /* send was interrupted - give up */
+
+ ip_unlock(port);
+ return MACH_SEND_INTERRUPTED;
+
+ case THREAD_TIMED_OUT:
+ /* timeout expired */
+
+ assert(option & MACH_SEND_TIMEOUT);
+ time_out = 0;
+ break;
+
+ case THREAD_RESTART:
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_mqueue_send");
+#else
+ panic("ipc_mqueue_send");
+#endif
+ }
+ }
+
+ if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
+ ip_unlock(port);
+
+ /* don't allow the creation of a circular loop */
+
+ ipc_kmsg_destroy(kmsg);
+ return MACH_MSG_SUCCESS;
+ }
+
+ {
+ ipc_mqueue_t mqueue;
+ ipc_pset_t pset;
+ ipc_thread_t receiver;
+ ipc_thread_queue_t receivers;
+
+ port->ip_msgcount++;
+ assert(port->ip_msgcount > 0);
+
+ pset = port->ip_pset;
+ if (pset == IPS_NULL)
+ mqueue = &port->ip_messages;
+ else
+ mqueue = &pset->ips_messages;
+
+ imq_lock(mqueue);
+ receivers = &mqueue->imq_threads;
+
+ /*
+ * Can unlock the port now that the msg queue is locked
+ * and we know the port is active. While the msg queue
+ * is locked, we have control of the kmsg, so the ref in
+ * it for the port is still good. If the msg queue is in
+ * a set (dead or alive), then we're OK because the port
+ * is still a member of the set and the set won't go away
+ * until the port is taken out, which tries to lock the
+ * set's msg queue to remove the port's msgs.
+ */
+
+ ip_unlock(port);
+
+ /* check for a receiver for the message */
+
+ for (;;) {
+ receiver = ipc_thread_queue_first(receivers);
+ if (receiver == ITH_NULL) {
+ /* no receivers; queue kmsg */
+
+ ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
+ imq_unlock(mqueue);
+ break;
+ }
+
+ ipc_thread_rmqueue_first_macro(receivers, receiver);
+ assert(ipc_kmsg_queue_empty(&mqueue->imq_messages));
+
+ if (kmsg->ikm_header.msgh_size <= receiver->ith_msize) {
+ /* got a successful receiver */
+
+ receiver->ith_state = MACH_MSG_SUCCESS;
+ receiver->ith_kmsg = kmsg;
+ receiver->ith_seqno = port->ip_seqno++;
+ imq_unlock(mqueue);
+
+ thread_go(receiver);
+ break;
+ }
+
+ receiver->ith_state = MACH_RCV_TOO_LARGE;
+ receiver->ith_msize = kmsg->ikm_header.msgh_size;
+ thread_go(receiver);
+ }
+ }
+
+ current_task()->messages_sent++;
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_mqueue_copyin
+ * Purpose:
+ * Convert a name in a space to a message queue.
+ * Conditions:
+ * Nothing locked. If successful, the message queue
+ * is returned locked and caller gets a ref for the object.
+ * This ref ensures the continued existence of the queue.
+ * Returns:
+ * MACH_MSG_SUCCESS Found a message queue.
+ * MACH_RCV_INVALID_NAME The space is dead.
+ * MACH_RCV_INVALID_NAME The name doesn't denote a right.
+ * MACH_RCV_INVALID_NAME
+ * The denoted right is not receive or port set.
+ * MACH_RCV_IN_SET Receive right is a member of a set.
+ */
+
+mach_msg_return_t
+ipc_mqueue_copyin(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_mqueue_t *mqueuep,
+ ipc_object_t *objectp)
+{
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NAME;
+ }
+
+ entry = ipc_entry_lookup(space, name);
+ if (entry == IE_NULL) {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NAME;
+ }
+
+ bits = entry->ie_bits;
+ object = entry->ie_object;
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ ipc_port_t port;
+ ipc_pset_t pset;
+
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ is_read_unlock(space);
+
+ pset = port->ip_pset;
+ if (pset != IPS_NULL) {
+ ips_lock(pset);
+ if (ips_active(pset)) {
+ ips_unlock(pset);
+ ip_unlock(port);
+ return MACH_RCV_IN_SET;
+ }
+
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ assert(port->ip_pset == IPS_NULL);
+ }
+
+ mqueue = &port->ip_messages;
+ } else if (bits & MACH_PORT_TYPE_PORT_SET) {
+ ipc_pset_t pset;
+
+ pset = (ipc_pset_t) object;
+ assert(pset != IPS_NULL);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ assert(pset->ips_local_name == name);
+ is_read_unlock(space);
+
+ mqueue = &pset->ips_messages;
+ } else {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NAME;
+ }
+
+ /*
+ * At this point, the object is locked and active,
+ * the space is unlocked, and mqueue is initialized.
+ */
+
+ io_reference(object);
+ imq_lock(mqueue);
+ io_unlock(object);
+
+ *objectp = object;
+ *mqueuep = mqueue;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_mqueue_receive
+ * Purpose:
+ * Receive a message from a message queue.
+ *
+ * If continuation is non-zero, then we might discard
+ * our kernel stack when we block. We will continue
+ * after unblocking by executing continuation.
+ *
+ * If resume is true, then we are resuming a receive
+ * operation after a blocked receive discarded our stack.
+ * Conditions:
+ * The message queue is locked; it will be returned unlocked.
+ *
+ * Our caller must hold a reference for the port or port set
+ * to which this queue belongs, to keep the queue
+ * from being deallocated. Furthermore, the port or set
+ * must have been active when the queue was locked.
+ *
+ * The kmsg is returned with clean header fields
+ * and with the circular bit turned off.
+ * Returns:
+ * MACH_MSG_SUCCESS Message returned in kmsgp.
+ * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
+ * MACH_RCV_TIMED_OUT No message obtained.
+ * MACH_RCV_INTERRUPTED No message obtained.
+ * MACH_RCV_PORT_DIED Port/set died; no message.
+ * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
+ *
+ */
+
+mach_msg_return_t
+ipc_mqueue_receive(
+ ipc_mqueue_t mqueue,
+ mach_msg_option_t option,
+ mach_msg_size_t max_size,
+ mach_msg_timeout_t time_out,
+ boolean_t resume,
+ continuation_t continuation,
+ ipc_kmsg_t *kmsgp,
+ mach_port_seqno_t *seqnop)
+{
+ ipc_port_t port;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+
+ {
+ ipc_kmsg_queue_t kmsgs = &mqueue->imq_messages;
+ ipc_thread_t self = current_thread();
+
+ if (resume)
+ goto after_thread_block;
+
+ for (;;) {
+ kmsg = ipc_kmsg_queue_first(kmsgs);
+ if (kmsg != IKM_NULL) {
+ /* check space requirements */
+
+ if (msg_usize(&kmsg->ikm_header) > max_size) {
+ * (mach_msg_size_t *) kmsgp =
+ kmsg->ikm_header.msgh_size;
+ imq_unlock(mqueue);
+ return MACH_RCV_TOO_LARGE;
+ }
+
+ ipc_kmsg_rmqueue_first_macro(kmsgs, kmsg);
+ port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ seqno = port->ip_seqno++;
+ break;
+ }
+
+ /* must block waiting for a message */
+
+ if (option & MACH_RCV_TIMEOUT) {
+ if (time_out == 0) {
+ imq_unlock(mqueue);
+ return MACH_RCV_TIMED_OUT;
+ }
+
+ thread_will_wait_with_timeout(self, time_out);
+ } else
+ thread_will_wait(self);
+
+ ipc_thread_enqueue_macro(&mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = max_size;
+
+ imq_unlock(mqueue);
+ if (continuation != (void (*)(void)) 0) {
+ counter(c_ipc_mqueue_receive_block_user++);
+ } else {
+ counter(c_ipc_mqueue_receive_block_kernel++);
+ }
+ thread_block(continuation);
+ after_thread_block:
+ imq_lock(mqueue);
+
+ /* why did we wake up? */
+
+ if (self->ith_state == MACH_MSG_SUCCESS) {
+ /* pick up the message that was handed to us */
+
+ kmsg = self->ith_kmsg;
+ seqno = self->ith_seqno;
+ port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ break;
+ }
+
+ switch (self->ith_state) {
+ case MACH_RCV_TOO_LARGE:
+ /* pick up size of the too-large message */
+
+ * (mach_msg_size_t *) kmsgp = self->ith_msize;
+ /* fall-through */
+
+ case MACH_RCV_PORT_DIED:
+ case MACH_RCV_PORT_CHANGED:
+ /* something bad happened to the port/set */
+
+ imq_unlock(mqueue);
+ return self->ith_state;
+
+ case MACH_RCV_IN_PROGRESS:
+ /*
+ * Awakened for other than IPC completion.
+ * Remove ourselves from the waiting queue,
+ * then check the wakeup cause.
+ */
+
+ ipc_thread_rmqueue(&mqueue->imq_threads, self);
+
+ switch (self->ith_wait_result) {
+ case THREAD_INTERRUPTED:
+ /* receive was interrupted - give up */
+
+ imq_unlock(mqueue);
+ return MACH_RCV_INTERRUPTED;
+
+ case THREAD_TIMED_OUT:
+ /* timeout expired */
+
+ assert(option & MACH_RCV_TIMEOUT);
+ time_out = 0;
+ break;
+
+ case THREAD_RESTART:
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_mqueue_receive");
+#else
+ panic("ipc_mqueue_receive");
+#endif
+ }
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_mqueue_receive: strange ith_state");
+#else
+ panic("ipc_mqueue_receive: strange ith_state");
+#endif
+ }
+ }
+
+ /* we have a kmsg; unlock the msg queue */
+
+ imq_unlock(mqueue);
+ assert(msg_usize(&kmsg->ikm_header) <= max_size);
+ }
+
+ {
+ ipc_marequest_t marequest;
+
+ marequest = kmsg->ikm_marequest;
+ if (marequest != IMAR_NULL) {
+ ipc_marequest_destroy(marequest);
+ kmsg->ikm_marequest = IMAR_NULL;
+ }
+ assert((kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR) == 0);
+
+ assert(port == (ipc_port_t) kmsg->ikm_header.msgh_remote_port);
+ ip_lock(port);
+
+ if (ip_active(port)) {
+ ipc_thread_queue_t senders;
+ ipc_thread_t sender;
+
+ assert(port->ip_msgcount > 0);
+ port->ip_msgcount--;
+
+ senders = &port->ip_blocked;
+ sender = ipc_thread_queue_first(senders);
+
+ if ((sender != ITH_NULL) &&
+ (port->ip_msgcount < port->ip_qlimit)) {
+ ipc_thread_rmqueue(senders, sender);
+ sender->ith_state = MACH_MSG_SUCCESS;
+ thread_go(sender);
+ }
+ }
+
+ ip_unlock(port);
+ }
+
+ current_task()->messages_received++;
+
+ *kmsgp = kmsg;
+ *seqnop = seqno;
+ return MACH_MSG_SUCCESS;
+}
diff --git a/ipc/ipc_mqueue.h b/ipc/ipc_mqueue.h
new file mode 100644
index 0000000..dfac745
--- /dev/null
+++ b/ipc/ipc_mqueue.h
@@ -0,0 +1,112 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_mqueue.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for message queues.
+ */
+
+#ifndef _IPC_IPC_MQUEUE_H_
+#define _IPC_IPC_MQUEUE_H_
+
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <kern/lock.h>
+#include <kern/macros.h>
+#include <ipc/ipc_kmsg_queue.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_thread.h>
+
+typedef struct ipc_mqueue {
+ decl_simple_lock_data(, imq_lock_data)
+ struct ipc_kmsg_queue imq_messages;
+ struct ipc_thread_queue imq_threads;
+} *ipc_mqueue_t;
+
+#define IMQ_NULL ((ipc_mqueue_t) 0)
+
+#define imq_lock_init(mq) simple_lock_init(&(mq)->imq_lock_data)
+#define imq_lock(mq) simple_lock(&(mq)->imq_lock_data)
+#define imq_lock_try(mq) simple_lock_try(&(mq)->imq_lock_data)
+#define imq_unlock(mq) simple_unlock(&(mq)->imq_lock_data)
+
+extern void
+ipc_mqueue_init(ipc_mqueue_t);
+
+extern void
+ipc_mqueue_move(ipc_mqueue_t, ipc_mqueue_t, ipc_port_t);
+
+extern void
+ipc_mqueue_changed(ipc_mqueue_t, mach_msg_return_t);
+
+extern mach_msg_return_t
+ipc_mqueue_send(ipc_kmsg_t, mach_msg_option_t, mach_msg_timeout_t);
+
+extern mach_msg_return_t
+ipc_mqueue_copyin(ipc_space_t, mach_port_name_t, ipc_mqueue_t *, ipc_object_t *);
+
+#define IMQ_NULL_CONTINUE ((void (*)()) 0)
+
+extern mach_msg_return_t
+ipc_mqueue_receive(ipc_mqueue_t, mach_msg_option_t,
+ mach_msg_size_t, mach_msg_timeout_t,
+ boolean_t, continuation_t,
+ ipc_kmsg_t *, mach_port_seqno_t *);
+
+/*
+ * extern void
+ * ipc_mqueue_send_always(ipc_kmsg_t);
+ *
+ * Unfortunately, to avoid warnings/lint about unused variables
+ * when assertions are turned off, we need two versions of this.
+ */
+
+#include <kern/assert.h>
+
+#if MACH_ASSERT
+
+#define ipc_mqueue_send_always(kmsg) \
+MACRO_BEGIN \
+ mach_msg_return_t mr; \
+ \
+ mr = ipc_mqueue_send((kmsg), MACH_SEND_ALWAYS, \
+ MACH_MSG_TIMEOUT_NONE); \
+ assert(mr == MACH_MSG_SUCCESS); \
+MACRO_END
+
+#else /* MACH_ASSERT */
+
+#define ipc_mqueue_send_always(kmsg) \
+MACRO_BEGIN \
+ (void) ipc_mqueue_send((kmsg), MACH_SEND_ALWAYS, \
+ MACH_MSG_TIMEOUT_NONE); \
+MACRO_END
+
+#endif /* MACH_ASSERT */
+
+#endif /* _IPC_IPC_MQUEUE_H_ */
diff --git a/ipc/ipc_notify.c b/ipc/ipc_notify.c
new file mode 100644
index 0000000..d0b71cf
--- /dev/null
+++ b/ipc/ipc_notify.c
@@ -0,0 +1,449 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_notify.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Notification-sending functions.
+ */
+
+#include <kern/printf.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/notify.h>
+#include <kern/assert.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+
+#include <ipc/ipc_machdep.h>
+
+mach_port_deleted_notification_t ipc_notify_port_deleted_template;
+mach_msg_accepted_notification_t ipc_notify_msg_accepted_template;
+mach_port_destroyed_notification_t ipc_notify_port_destroyed_template;
+mach_no_senders_notification_t ipc_notify_no_senders_template;
+mach_send_once_notification_t ipc_notify_send_once_template;
+mach_dead_name_notification_t ipc_notify_dead_name_template;
+
+#define NOTIFY_MSGH_SEQNO 0
+
+/*
+ * Routine: ipc_notify_init_port_deleted
+ * Purpose:
+ * Initialize a template for port-deleted notifications.
+ */
+
+static void
+ipc_notify_init_port_deleted(mach_port_deleted_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_PORT_DELETED;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_NAME;
+ t->msgt_size = PORT_NAME_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init_msg_accepted
+ * Purpose:
+ * Initialize a template for msg-accepted notifications.
+ */
+
+static void
+ipc_notify_init_msg_accepted(mach_msg_accepted_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_MSG_ACCEPTED;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_NAME;
+ t->msgt_size = PORT_NAME_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init_port_destroyed
+ * Purpose:
+ * Initialize a template for port-destroyed notifications.
+ */
+
+static void
+ipc_notify_init_port_destroyed(mach_port_destroyed_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_PORT_DESTROYED;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_RECEIVE;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init_no_senders
+ * Purpose:
+ * Initialize a template for no-senders notifications.
+ */
+
+static void
+ipc_notify_init_no_senders(
+ mach_no_senders_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_NO_SENDERS;
+
+ t->msgt_name = MACH_MSG_TYPE_INTEGER_32;
+ t->msgt_size = 32;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_count = 0;
+}
+
+/*
+ * Routine: ipc_notify_init_send_once
+ * Purpose:
+ * Initialize a template for send-once notifications.
+ */
+
+static void
+ipc_notify_init_send_once(
+ mach_send_once_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_SEND_ONCE;
+}
+
+/*
+ * Routine: ipc_notify_init_dead_name
+ * Purpose:
+ * Initialize a template for dead-name notifications.
+ */
+
+static void
+ipc_notify_init_dead_name(
+ mach_dead_name_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_DEAD_NAME;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_NAME;
+ t->msgt_size = PORT_NAME_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init
+ * Purpose:
+ * Initialize the notification subsystem.
+ */
+
+void
+ipc_notify_init(void)
+{
+ ipc_notify_init_port_deleted(&ipc_notify_port_deleted_template);
+ ipc_notify_init_msg_accepted(&ipc_notify_msg_accepted_template);
+ ipc_notify_init_port_destroyed(&ipc_notify_port_destroyed_template);
+ ipc_notify_init_no_senders(&ipc_notify_no_senders_template);
+ ipc_notify_init_send_once(&ipc_notify_send_once_template);
+ ipc_notify_init_dead_name(&ipc_notify_dead_name_template);
+}
+
+/*
+ * Routine: ipc_notify_port_deleted
+ * Purpose:
+ * Send a port-deleted notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_port_deleted(
+ ipc_port_t port,
+ mach_port_name_t name)
+{
+ ipc_kmsg_t kmsg;
+ mach_port_deleted_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped port-deleted (0x%p, 0x%x)\n", port, name);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_port_deleted_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_port_deleted_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_msg_accepted
+ * Purpose:
+ * Send a msg-accepted notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_msg_accepted(
+ ipc_port_t port,
+ mach_port_name_t name)
+{
+ ipc_kmsg_t kmsg;
+ mach_msg_accepted_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped msg-accepted (0x%p, 0x%x)\n", port, name);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_msg_accepted_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_msg_accepted_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_port_destroyed
+ * Purpose:
+ * Send a port-destroyed notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ * Consumes a ref for right, which should be a receive right
+ * prepped for placement into a message. (In-transit,
+ * or in-limbo if a circularity was detected.)
+ */
+
+void
+ipc_notify_port_destroyed(
+ ipc_port_t port,
+ ipc_port_t right)
+{
+ ipc_kmsg_t kmsg;
+ mach_port_destroyed_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped port-destroyed (0x%p, 0x%p)\n",
+ port, right);
+ ipc_port_release_sonce(port);
+ ipc_port_release_receive(right);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_port_destroyed_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_port_destroyed_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = (mach_port_t) right;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_no_senders
+ * Purpose:
+ * Send a no-senders notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_no_senders(
+ ipc_port_t port,
+ mach_port_mscount_t mscount)
+{
+ ipc_kmsg_t kmsg;
+ mach_no_senders_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped no-senders (0x%p, %u)\n", port, mscount);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_no_senders_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_no_senders_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_count = mscount;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_send_once
+ * Purpose:
+ * Send a send-once notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_send_once(ipc_port_t port)
+{
+ ipc_kmsg_t kmsg;
+ mach_send_once_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped send-once (0x%p)\n", port);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_send_once_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_send_once_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_dead_name
+ * Purpose:
+ * Send a dead-name notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_dead_name(
+ ipc_port_t port,
+ mach_port_name_t name)
+{
+ ipc_kmsg_t kmsg;
+ mach_dead_name_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped dead-name (0x%p, 0x%x)\n", port, name);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_dead_name_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_dead_name_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
diff --git a/ipc/ipc_notify.h b/ipc/ipc_notify.h
new file mode 100644
index 0000000..8940f38
--- /dev/null
+++ b/ipc/ipc_notify.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_notify.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of notification-sending functions.
+ */
+
+#ifndef _IPC_IPC_NOTIFY_H_
+#define _IPC_IPC_NOTIFY_H_
+
+extern void
+ipc_notify_init(void);
+
+extern void
+ipc_notify_port_deleted(ipc_port_t, mach_port_name_t);
+
+extern void
+ipc_notify_msg_accepted(ipc_port_t, mach_port_name_t);
+
+extern void
+ipc_notify_port_destroyed(ipc_port_t, ipc_port_t);
+
+extern void
+ipc_notify_no_senders(ipc_port_t, mach_port_mscount_t);
+
+extern void
+ipc_notify_send_once(ipc_port_t);
+
+extern void
+ipc_notify_dead_name(ipc_port_t, mach_port_name_t);
+
+#endif /* _IPC_IPC_NOTIFY_H_ */
diff --git a/ipc/ipc_object.c b/ipc/ipc_object.c
new file mode 100644
index 0000000..1074fb2
--- /dev/null
+++ b/ipc/ipc_object.c
@@ -0,0 +1,969 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_object.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC objects.
+ */
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <ipc/port.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_pset.h>
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <kern/slab.h>
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#endif /* MACH_KDB */
+
+
+struct kmem_cache ipc_object_caches[IOT_NUMBER];
+
+
+
+/*
+ * Routine: ipc_object_reference
+ * Purpose:
+ * Take a reference to an object.
+ */
+
+void
+ipc_object_reference(
+ ipc_object_t object)
+{
+ io_lock(object);
+ assert(object->io_references > 0);
+ io_reference(object);
+ io_unlock(object);
+}
+
+/*
+ * Routine: ipc_object_release
+ * Purpose:
+ * Release a reference to an object.
+ */
+
+void
+ipc_object_release(
+ ipc_object_t object)
+{
+ io_lock(object);
+ assert(object->io_references > 0);
+ io_release(object);
+ io_check_unlock(object);
+}
+
+/*
+ * Routine: ipc_object_translate
+ * Purpose:
+ * Look up an object in a space.
+ * Conditions:
+ * Nothing locked before. If successful, the object
+ * is returned locked. The caller doesn't get a ref.
+ * Returns:
+ * KERN_SUCCESS Objected returned locked.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote the correct right.
+ */
+
+kern_return_t
+ipc_object_translate(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_right_t right,
+ ipc_object_t *objectp)
+{
+ ipc_entry_t entry;
+ ipc_object_t object;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_read(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is read-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE(right)) == (mach_port_right_t) 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ object = entry->ie_object;
+ assert(object != IO_NULL);
+
+ io_lock(object);
+ is_read_unlock(space);
+
+ *objectp = object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc_dead
+ * Purpose:
+ * Allocate a dead-name entry.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The dead name is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc_dead(
+ ipc_space_t space,
+ mach_port_name_t *namep)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ is_write_lock(space);
+ kr = ipc_entry_alloc(space, namep, &entry);
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
+ return kr;
+ }
+
+ /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
+
+ assert(entry->ie_object == IO_NULL);
+ entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
+
+ is_write_unlock(space);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc_dead_name
+ * Purpose:
+ * Allocate a dead-name entry, with a specific name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The dead name is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc_dead_name(
+ ipc_space_t space,
+ mach_port_name_t name)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ is_write_lock(space);
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
+ return kr;
+ }
+
+ if (ipc_right_inuse(space, name, entry))
+ return KERN_NAME_EXISTS;
+
+ /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
+
+ assert(entry->ie_object == IO_NULL);
+ entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
+
+ is_write_unlock(space);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc
+ * Purpose:
+ * Allocate an object.
+ * Conditions:
+ * Nothing locked. If successful, the object is returned locked.
+ * The caller doesn't get a reference for the object.
+ * Returns:
+ * KERN_SUCCESS The object is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc(
+ ipc_space_t space,
+ ipc_object_type_t otype,
+ mach_port_type_t type,
+ mach_port_urefs_t urefs,
+ mach_port_name_t *namep,
+ ipc_object_t *objectp)
+{
+ ipc_object_t object;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(otype < IOT_NUMBER);
+ assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
+ assert(type != MACH_PORT_TYPE_NONE);
+ assert(urefs <= MACH_PORT_UREFS_MAX);
+
+ object = io_alloc(otype);
+ if (object == IO_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ if (otype == IOT_PORT) {
+ ipc_port_t port = (ipc_port_t)object;
+
+ memset(port, 0, sizeof(*port));
+ } else if (otype == IOT_PORT_SET) {
+ ipc_pset_t pset = (ipc_pset_t)object;
+
+ memset(pset, 0, sizeof(*pset));
+ }
+ is_write_lock(space);
+ kr = ipc_entry_alloc(space, namep, &entry);
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
+ io_free(otype, object);
+ return kr;
+ }
+
+ entry->ie_bits |= type | urefs;
+ entry->ie_object = object;
+
+ io_lock_init(object);
+ io_lock(object);
+ is_write_unlock(space);
+
+ object->io_references = 1; /* for entry, not caller */
+ object->io_bits = io_makebits(TRUE, otype, 0);
+
+ *objectp = object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc_name
+ * Purpose:
+ * Allocate an object, with a specific name.
+ * Conditions:
+ * Nothing locked. If successful, the object is returned locked.
+ * The caller doesn't get a reference for the object.
+ * Returns:
+ * KERN_SUCCESS The object is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc_name(
+ ipc_space_t space,
+ ipc_object_type_t otype,
+ mach_port_type_t type,
+ mach_port_urefs_t urefs,
+ mach_port_name_t name,
+ ipc_object_t *objectp)
+{
+ ipc_object_t object;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(otype < IOT_NUMBER);
+ assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
+ assert(type != MACH_PORT_TYPE_NONE);
+ assert(urefs <= MACH_PORT_UREFS_MAX);
+
+ object = io_alloc(otype);
+ if (object == IO_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ if (otype == IOT_PORT) {
+ ipc_port_t port = (ipc_port_t)object;
+
+ memset(port, 0, sizeof(*port));
+ } else if (otype == IOT_PORT_SET) {
+ ipc_pset_t pset = (ipc_pset_t)object;
+
+ memset(pset, 0, sizeof(*pset));
+ }
+
+ is_write_lock(space);
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
+ io_free(otype, object);
+ return kr;
+ }
+
+ if (ipc_right_inuse(space, name, entry)) {
+ io_free(otype, object);
+ return KERN_NAME_EXISTS;
+ }
+
+ entry->ie_bits |= type | urefs;
+ entry->ie_object = object;
+
+ io_lock_init(object);
+ io_lock(object);
+ is_write_unlock(space);
+
+ object->io_references = 1; /* for entry, not caller */
+ object->io_bits = io_makebits(TRUE, otype, 0);
+
+ *objectp = object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_copyin_type
+ * Purpose:
+ * Convert a send type name to a received type name.
+ */
+
+mach_msg_type_name_t
+ipc_object_copyin_type(
+ mach_msg_type_name_t msgt_name)
+{
+ switch (msgt_name) {
+ case 0:
+ return 0;
+
+ case MACH_MSG_TYPE_MOVE_RECEIVE:
+ return MACH_MSG_TYPE_PORT_RECEIVE;
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE:
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE:
+ return MACH_MSG_TYPE_PORT_SEND_ONCE;
+
+ case MACH_MSG_TYPE_MOVE_SEND:
+ case MACH_MSG_TYPE_MAKE_SEND:
+ case MACH_MSG_TYPE_COPY_SEND:
+ return MACH_MSG_TYPE_PORT_SEND;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyin_type: strange rights");
+#else
+ panic("ipc_object_copyin_type: strange rights");
+#endif
+ return 0; /* in case assert/panic returns */
+ }
+}
+
+/*
+ * Routine: ipc_object_copyin
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, unless it is IO_DEAD.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_object_copyin(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_msg_type_name_t msgt_name,
+ ipc_object_t *objectp)
+{
+ ipc_entry_t entry;
+ ipc_port_t soright;
+ kern_return_t kr;
+
+ /*
+ * Could first try a read lock when doing
+ * MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
+ * and MACH_MSG_TYPE_MAKE_SEND_ONCE.
+ */
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_copyin(space, name, entry,
+ msgt_name, TRUE,
+ objectp, &soright);
+ if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ if ((kr == KERN_SUCCESS) && (soright != IP_NULL))
+ ipc_notify_port_deleted(soright, name);
+
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyin_from_kernel
+ * Purpose:
+ * Copyin a naked capability from the kernel.
+ *
+ * MACH_MSG_TYPE_MOVE_RECEIVE
+ * The receiver must be ipc_space_kernel.
+ * Consumes the naked receive right.
+ * MACH_MSG_TYPE_COPY_SEND
+ * A naked send right must be supplied.
+ * The port gains a reference, and a send right
+ * if the port is still active.
+ * MACH_MSG_TYPE_MAKE_SEND
+ * The receiver must be ipc_space_kernel.
+ * The port gains a reference and a send right.
+ * MACH_MSG_TYPE_MOVE_SEND
+ * Consumes a naked send right.
+ * MACH_MSG_TYPE_MAKE_SEND_ONCE
+ * The receiver must be ipc_space_kernel.
+ * The port gains a reference and a send-once right.
+ * MACH_MSG_TYPE_MOVE_SEND_ONCE
+ * Consumes a naked send-once right.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_object_copyin_from_kernel(
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name)
+{
+ assert(IO_VALID(object));
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_MOVE_RECEIVE: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == ipc_space_kernel);
+
+ /* relevant part of ipc_port_clear_receiver */
+ ipc_port_set_mscount(port, 0);
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ipc_port_flag_protected_payload_clear(port);
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_COPY_SEND: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ if (ip_active(port)) {
+ assert(port->ip_srights > 0);
+ port->ip_srights++;
+ }
+ ip_reference(port);
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_MAKE_SEND: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == ipc_space_kernel);
+
+ ip_reference(port);
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND:
+ /* move naked send right into the message */
+ break;
+
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == ipc_space_kernel);
+
+ ip_reference(port);
+ port->ip_sorights++;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE:
+ /* move naked send-once right into the message */
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyin_from_kernel: strange rights");
+#else
+ panic("ipc_object_copyin_from_kernel: strange rights");
+#endif
+ }
+}
+
+/*
+ * Routine: ipc_object_destroy
+ * Purpose:
+ * Destroys a naked capability.
+ * Consumes a ref for the object.
+ *
+ * A receive right should be in limbo or in transit.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_object_destroy(
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name)
+{
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND:
+ ipc_port_release_send((ipc_port_t) object);
+ break;
+
+ case MACH_MSG_TYPE_PORT_SEND_ONCE:
+ ipc_notify_send_once((ipc_port_t) object);
+ break;
+
+ case MACH_MSG_TYPE_PORT_RECEIVE:
+ ipc_port_release_receive((ipc_port_t) object);
+ break;
+
+ default:
+ panic("ipc_object_destroy: strange rights");
+ }
+}
+
+/*
+ * Routine: ipc_object_copyout
+ * Purpose:
+ * Copyout a capability, placing it into a space.
+ * If successful, consumes a ref for the object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Copied out object, consumed ref.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_CAPABILITY The object is dead.
+ * KERN_NO_SPACE No room in space for another right.
+ * KERN_RESOURCE_SHORTAGE No memory available.
+ * KERN_UREFS_OVERFLOW Urefs limit exceeded
+ * and overflow wasn't specified.
+ */
+
+kern_return_t
+ipc_object_copyout(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ boolean_t overflow,
+ mach_port_name_t *namep)
+{
+ mach_port_name_t name;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ is_write_lock(space);
+
+ for (;;) {
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, object, &name, &entry)) {
+ /* object is locked and active */
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+ break;
+ }
+
+ kr = ipc_entry_alloc(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
+ return kr;
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ io_lock(object);
+ if (!io_active(object)) {
+ io_unlock(object);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ entry->ie_object = object;
+ break;
+ }
+
+ /* space is write-locked and active, object is locked and active */
+
+ kr = ipc_right_copyout(space, name, entry,
+ msgt_name, overflow, object);
+ /* object is unlocked */
+ is_write_unlock(space);
+
+ if (kr == KERN_SUCCESS)
+ *namep = name;
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyout_name
+ * Purpose:
+ * Copyout a capability, placing it into a space.
+ * The specified name is used for the capability.
+ * If successful, consumes a ref for the object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Copied out object, consumed ref.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_CAPABILITY The object is dead.
+ * KERN_RESOURCE_SHORTAGE No memory available.
+ * KERN_UREFS_OVERFLOW Urefs limit exceeded
+ * and overflow wasn't specified.
+ * KERN_RIGHT_EXISTS Space has rights under another name.
+ * KERN_NAME_EXISTS Name is already used.
+ */
+
+kern_return_t
+ipc_object_copyout_name(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ boolean_t overflow,
+ mach_port_name_t name)
+{
+ mach_port_name_t oname;
+ ipc_entry_t oentry;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ is_write_lock(space);
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
+ return kr;
+ }
+
+ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, object, &oname, &oentry)) {
+ /* object is locked and active */
+
+ if (name != oname) {
+ io_unlock(object);
+
+ if (IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+
+ is_write_unlock(space);
+ return KERN_RIGHT_EXISTS;
+ }
+
+ assert(entry == oentry);
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+ } else {
+ if (ipc_right_inuse(space, name, entry))
+ return KERN_NAME_EXISTS;
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ io_lock(object);
+ if (!io_active(object)) {
+ io_unlock(object);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ entry->ie_object = object;
+ }
+
+ /* space is write-locked and active, object is locked and active */
+
+ kr = ipc_right_copyout(space, name, entry,
+ msgt_name, overflow, object);
+ /* object is unlocked */
+ is_write_unlock(space);
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyout_dest
+ * Purpose:
+ * Translates/consumes the destination right of a message.
+ * This is unlike normal copyout because the right is consumed
+ * in a funny way instead of being given to the receiving space.
+ * The receiver gets his name for the port, if he has receive
+ * rights, otherwise MACH_PORT_NULL.
+ * Conditions:
+ * The object is locked and active. Nothing else locked.
+ * The object is unlocked and loses a reference.
+ */
+
+void
+ipc_object_copyout_dest(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ mach_port_name_t *namep)
+{
+ mach_port_name_t name;
+
+ assert(IO_VALID(object));
+ assert(io_active(object));
+
+ io_release(object);
+
+ /*
+ * If the space is the receiver/owner of the object,
+ * then we quietly consume the right and return
+ * the space's name for the object. Otherwise
+ * we destroy the right and return MACH_PORT_NULL.
+ */
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND: {
+ ipc_port_t port = (ipc_port_t) object;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(port->ip_srights > 0);
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ if (port->ip_receiver == space)
+ name = port->ip_receiver_name;
+ else
+ name = MACH_PORT_NAME_NULL;
+
+ ip_unlock(port);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ break;
+ }
+
+ case MACH_MSG_TYPE_PORT_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ assert(port->ip_sorights > 0);
+
+ if (port->ip_receiver == space) {
+ /* quietly consume the send-once right */
+
+ port->ip_sorights--;
+ name = port->ip_receiver_name;
+ ip_unlock(port);
+ } else {
+ /*
+ * A very bizarre case. The message
+ * was received, but before this copyout
+ * happened the space lost receive rights.
+ * We can't quietly consume the soright
+ * out from underneath some other task,
+ * so generate a send-once notification.
+ */
+
+ ip_reference(port); /* restore ref */
+ ip_unlock(port);
+
+ ipc_notify_send_once(port);
+ name = MACH_PORT_NAME_NULL;
+ }
+
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyout_dest: strange rights");
+#else
+ panic("ipc_object_copyout_dest: strange rights");
+#endif
+
+ }
+
+ *namep = name;
+}
+
+/*
+ * Routine: ipc_object_rename
+ * Purpose:
+ * Rename an entry in a space.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Renamed the entry.
+ * KERN_INVALID_TASK The space was dead.
+ * KERN_INVALID_NAME oname didn't denote an entry.
+ * KERN_NAME_EXISTS nname already denoted an entry.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate new entry.
+ */
+
+kern_return_t
+ipc_object_rename(
+ ipc_space_t space,
+ mach_port_name_t oname,
+ mach_port_name_t nname)
+{
+ ipc_entry_t oentry, nentry;
+ kern_return_t kr;
+
+ is_write_lock(space);
+ kr = ipc_entry_alloc_name(space, nname, &nentry);
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
+ return kr;
+ }
+
+ if (ipc_right_inuse(space, nname, nentry)) {
+ /* space is unlocked */
+ return KERN_NAME_EXISTS;
+ }
+
+ /* don't let ipc_entry_lookup see the uninitialized new entry */
+
+ if ((oname == nname) ||
+ ((oentry = ipc_entry_lookup(space, oname)) == IE_NULL)) {
+ ipc_entry_dealloc(space, nname, nentry);
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+
+ kr = ipc_right_rename(space, oname, oentry, nname, nentry);
+ /* space is unlocked */
+ return kr;
+}
+
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: ipc_object_print
+ * Purpose:
+ * Pretty-print an object for kdb.
+ */
+
+char *ikot_print_array[IKOT_MAX_TYPE] = {
+ "(NONE) ",
+ "(THREAD) ",
+ "(TASK) ",
+ "(HOST) ",
+ "(HOST_PRIV) ",
+ "(PROCESSOR) ",
+ "(PSET) ",
+ "(PSET_NAME) ",
+ "(PAGER) ",
+ "(PAGER_REQUEST) ",
+ "(DEVICE) ", /* 10 */
+ "(XMM_OBJECT) ",
+ "(XMM_PAGER) ",
+ "(XMM_KERNEL) ",
+ "(XMM_REPLY) ",
+ "(PAGER_TERMINATING)",
+ "(PAGING_NAME) ",
+ "(HOST_SECURITY) ",
+ "(LEDGER) ",
+ "(MASTER_DEVICE) ",
+ "(ACTIVATION) ", /* 20 */
+ "(SUBSYSTEM) ",
+ "(IO_DONE_QUEUE) ",
+ "(SEMAPHORE) ",
+ "(LOCK_SET) ",
+ "(CLOCK) ",
+ "(CLOCK_CTRL) ",
+ "(PAGER_PROXY) ", /* 27 */
+ /* << new entries here */
+ "(UNKNOWN) " /* magic catchall */
+}; /* Please keep in sync with kern/ipc_kobject.h */
+
+void
+ipc_object_print(
+ const ipc_object_t object)
+{
+ int kotype;
+
+ iprintf("%s", io_active(object) ? "active" : "dead");
+ printf(", refs=%d", object->io_references);
+ printf(", otype=%d", io_otype(object));
+ kotype = io_kotype(object);
+ if (kotype >= 0 && kotype < IKOT_MAX_TYPE)
+ printf(", kotype=%d %s\n", io_kotype(object),
+ ikot_print_array[kotype]);
+ else
+ printf(", kotype=0x%x %s\n", io_kotype(object),
+ ikot_print_array[IKOT_UNKNOWN]);
+}
+
+#endif /* MACH_KDB */
diff --git a/ipc/ipc_object.h b/ipc/ipc_object.h
new file mode 100644
index 0000000..209fae1
--- /dev/null
+++ b/ipc/ipc_object.h
@@ -0,0 +1,169 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_object.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for IPC objects, for which tasks have capabilities.
+ */
+
+#ifndef _IPC_IPC_OBJECT_H_
+#define _IPC_IPC_OBJECT_H_
+
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <ipc/ipc_types.h>
+#include <kern/lock.h>
+#include <kern/macros.h>
+#include <kern/slab.h>
+
+typedef unsigned int ipc_object_refs_t;
+typedef unsigned int ipc_object_bits_t;
+typedef unsigned int ipc_object_type_t;
+
+typedef struct ipc_object {
+ decl_simple_lock_data(,io_lock_data)
+ ipc_object_refs_t io_references;
+ ipc_object_bits_t io_bits;
+} *ipc_object_t;
+
+#define IO_NULL ((ipc_object_t) 0)
+#define IO_DEAD ((ipc_object_t) -1)
+
+#define IO_VALID(io) (((io) != IO_NULL) && ((io) != IO_DEAD))
+
+#define IO_BITS_KOTYPE 0x0000ffff /* used by the object */
+#define IO_BITS_OTYPE 0x3fff0000 /* determines a cache */
+/* The following masks are used to store attributes of ipc ports. */
+#define IO_BITS_PROTECTED_PAYLOAD 0x40000000 /* pp set? */
+#define IO_BITS_ACTIVE 0x80000000U /* is object alive? */
+
+#define io_active(io) ((int)(io)->io_bits < 0) /* hack */
+
+#define io_otype(io) (((io)->io_bits & IO_BITS_OTYPE) >> 16)
+#define io_kotype(io) ((io)->io_bits & IO_BITS_KOTYPE)
+
+#define io_makebits(active, otype, kotype) \
+ (((active) ? IO_BITS_ACTIVE : 0) | ((otype) << 16) | (kotype))
+
+/*
+ * Object types: ports, port sets, kernel-loaded ports
+ */
+#define IOT_PORT 0
+#define IOT_PORT_SET 1
+#define IOT_NUMBER 2 /* number of types used */
+
+extern struct kmem_cache ipc_object_caches[IOT_NUMBER];
+
+#define io_alloc(otype) \
+ ((ipc_object_t) kmem_cache_alloc(&ipc_object_caches[(otype)]))
+
+#define io_free(otype, io) \
+ kmem_cache_free(&ipc_object_caches[(otype)], (vm_offset_t) (io))
+
+#define io_lock_init(io) simple_lock_init(&(io)->io_lock_data)
+#define io_lock(io) simple_lock(&(io)->io_lock_data)
+#define io_lock_try(io) simple_lock_try(&(io)->io_lock_data)
+#define io_unlock(io) simple_unlock(&(io)->io_lock_data)
+
+#define io_check_unlock(io) \
+MACRO_BEGIN \
+ ipc_object_refs_t _refs = (io)->io_references; \
+ \
+ io_unlock(io); \
+ if (_refs == 0) \
+ io_free(io_otype(io), io); \
+MACRO_END
+
+#define io_reference(io) \
+MACRO_BEGIN \
+ (io)->io_references++; \
+MACRO_END
+
+#define io_release(io) \
+MACRO_BEGIN \
+ (io)->io_references--; \
+MACRO_END
+
+extern void
+ipc_object_reference(ipc_object_t);
+
+extern void
+ipc_object_release(ipc_object_t);
+
+extern kern_return_t
+ipc_object_translate(ipc_space_t, mach_port_name_t,
+ mach_port_right_t, ipc_object_t *);
+
+extern kern_return_t
+ipc_object_alloc_dead(ipc_space_t, mach_port_name_t *);
+
+extern kern_return_t
+ipc_object_alloc_dead_name(ipc_space_t, mach_port_name_t);
+
+extern kern_return_t
+ipc_object_alloc(ipc_space_t, ipc_object_type_t,
+ mach_port_type_t, mach_port_urefs_t,
+ mach_port_name_t *, ipc_object_t *);
+
+extern kern_return_t
+ipc_object_alloc_name(ipc_space_t, ipc_object_type_t,
+ mach_port_type_t, mach_port_urefs_t,
+ mach_port_name_t, ipc_object_t *);
+
+extern mach_msg_type_name_t
+ipc_object_copyin_type(mach_msg_type_name_t);
+
+extern kern_return_t
+ipc_object_copyin(ipc_space_t, mach_port_name_t,
+ mach_msg_type_name_t, ipc_object_t *);
+
+extern void
+ipc_object_copyin_from_kernel(ipc_object_t, mach_msg_type_name_t);
+
+extern void
+ipc_object_destroy(ipc_object_t, mach_msg_type_name_t);
+
+extern kern_return_t
+ipc_object_copyout(ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, boolean_t, mach_port_name_t *);
+
+extern kern_return_t
+ipc_object_copyout_name(ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, boolean_t, mach_port_name_t);
+
+extern void
+ipc_object_copyout_dest(ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, mach_port_name_t *);
+
+extern kern_return_t
+ipc_object_rename(ipc_space_t, mach_port_name_t, mach_port_name_t);
+
+extern void
+ipc_object_print(ipc_object_t);
+
+#endif /* _IPC_IPC_OBJECT_H_ */
diff --git a/ipc/ipc_port.c b/ipc/ipc_port.c
new file mode 100644
index 0000000..e959f67
--- /dev/null
+++ b/ipc/ipc_port.c
@@ -0,0 +1,1290 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_port.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC ports.
+ */
+
+#include <kern/printf.h>
+#include <string.h>
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <kern/lock.h>
+#include <kern/ipc_sched.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_notify.h>
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#include <ipc/ipc_print.h>
+#endif /* MACH_KDB */
+
+
+def_simple_lock_data(, ipc_port_multiple_lock_data)
+
+def_simple_lock_data(, ipc_port_timestamp_lock_data)
+ipc_port_timestamp_t ipc_port_timestamp_data;
+
+/*
+ * Routine: ipc_port_timestamp
+ * Purpose:
+ * Retrieve a timestamp value.
+ */
+
+ipc_port_timestamp_t
+ipc_port_timestamp(void)
+{
+ ipc_port_timestamp_t timestamp;
+
+ ipc_port_timestamp_lock();
+ timestamp = ipc_port_timestamp_data++;
+ ipc_port_timestamp_unlock();
+
+ return timestamp;
+}
+
+/*
+ * Routine: ipc_port_dnrequest
+ * Purpose:
+ * Try to allocate a dead-name request slot.
+ * If successful, returns the request index.
+ * Otherwise returns zero.
+ * Conditions:
+ * The port is locked and active.
+ * Returns:
+ * KERN_SUCCESS A request index was found.
+ * KERN_NO_SPACE No index allocated.
+ */
+
+kern_return_t
+ipc_port_dnrequest(
+ ipc_port_t port,
+ mach_port_name_t name,
+ ipc_port_t soright,
+ ipc_port_request_index_t *indexp)
+{
+ ipc_port_request_t ipr, table;
+ ipc_port_request_index_t index;
+
+ assert(ip_active(port));
+ assert(name != MACH_PORT_NULL);
+ assert(soright != IP_NULL);
+
+ table = port->ip_dnrequests;
+ if (table == IPR_NULL)
+ return KERN_NO_SPACE;
+
+ index = table->ipr_next;
+ if (index == 0)
+ return KERN_NO_SPACE;
+
+ ipr = &table[index];
+ assert(ipr->ipr_name == MACH_PORT_NULL);
+
+ table->ipr_next = ipr->ipr_next;
+ ipr->ipr_name = name;
+ ipr->ipr_soright = soright;
+
+ *indexp = index;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_dngrow
+ * Purpose:
+ * Grow a port's table of dead-name requests.
+ * Conditions:
+ * The port must be locked and active.
+ * Nothing else locked; will allocate memory.
+ * Upon return the port is unlocked.
+ * Returns:
+ * KERN_SUCCESS Grew the table.
+ * KERN_SUCCESS Somebody else grew the table.
+ * KERN_SUCCESS The port died.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
+ */
+
+kern_return_t
+ipc_port_dngrow(ipc_port_t port)
+{
+ ipc_table_size_t its;
+ ipc_port_request_t otable, ntable;
+
+ assert(ip_active(port));
+
+ otable = port->ip_dnrequests;
+ if (otable == IPR_NULL)
+ its = &ipc_table_dnrequests[0];
+ else
+ its = otable->ipr_size + 1;
+
+ ip_reference(port);
+ ip_unlock(port);
+
+ if ((its->its_size == 0) ||
+ ((ntable = it_dnrequests_alloc(its)) == IPR_NULL)) {
+ ipc_port_release(port);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ ip_lock(port);
+ ip_release(port);
+
+ /*
+ * Check that port is still active and that nobody else
+ * has slipped in and grown the table on us. Note that
+ * just checking port->ip_dnrequests == otable isn't
+ * sufficient; must check ipr_size.
+ */
+
+ if (ip_active(port) &&
+ (port->ip_dnrequests == otable) &&
+ ((otable == IPR_NULL) || (otable->ipr_size+1 == its))) {
+ ipc_table_size_t oits = 0; /* '=0' to shut up lint */
+ ipc_table_elems_t osize, nsize;
+ ipc_port_request_index_t free, i;
+
+ /* copy old table to new table */
+
+ if (otable != IPR_NULL) {
+ oits = otable->ipr_size;
+ osize = oits->its_size;
+ free = otable->ipr_next;
+
+ memcpy((ntable + 1), (otable + 1),
+ (osize - 1) * sizeof(struct ipc_port_request));
+ } else {
+ osize = 1;
+ free = 0;
+ }
+
+ nsize = its->its_size;
+ assert(nsize > osize);
+
+ /* add new elements to the new table's free list */
+
+ for (i = osize; i < nsize; i++) {
+ ipc_port_request_t ipr = &ntable[i];
+
+ ipr->ipr_name = MACH_PORT_NULL;
+ ipr->ipr_next = free;
+ free = i;
+ }
+
+ ntable->ipr_next = free;
+ ntable->ipr_size = its;
+ port->ip_dnrequests = ntable;
+ ip_unlock(port);
+
+ if (otable != IPR_NULL)
+ it_dnrequests_free(oits, otable);
+ } else {
+ ip_check_unlock(port);
+ it_dnrequests_free(its, ntable);
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_dncancel
+ * Purpose:
+ * Cancel a dead-name request and return the send-once right.
+ * Conditions:
+ * The port must locked and active.
+ */
+
+ipc_port_t
+ipc_port_dncancel(
+ ipc_port_t port,
+ mach_port_name_t name,
+ ipc_port_request_index_t index)
+{
+ ipc_port_request_t ipr, table;
+ ipc_port_t dnrequest;
+
+ assert(ip_active(port));
+ assert(name != MACH_PORT_NULL);
+ assert(index != 0);
+
+ table = port->ip_dnrequests;
+ assert(table != IPR_NULL);
+
+ ipr = &table[index];
+ dnrequest = ipr->ipr_soright;
+ assert(ipr->ipr_name == name);
+
+ /* return ipr to the free list inside the table */
+
+ ipr->ipr_name = MACH_PORT_NULL;
+ ipr->ipr_next = table->ipr_next;
+ table->ipr_next = index;
+
+ return dnrequest;
+}
+
+/*
+ * Routine: ipc_port_pdrequest
+ * Purpose:
+ * Make a port-deleted request, returning the
+ * previously registered send-once right.
+ * Just cancels the previous request if notify is IP_NULL.
+ * Conditions:
+ * The port is locked and active. It is unlocked.
+ * Consumes a ref for notify (if non-null), and
+ * returns previous with a ref (if non-null).
+ */
+
+void
+ipc_port_pdrequest(
+ ipc_port_t port,
+ const ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ ipc_port_t previous;
+
+ assert(ip_active(port));
+
+ previous = port->ip_pdrequest;
+ port->ip_pdrequest = notify;
+ ip_unlock(port);
+
+ *previousp = previous;
+}
+
+/*
+ * Routine: ipc_port_nsrequest
+ * Purpose:
+ * Make a no-senders request, returning the
+ * previously registered send-once right.
+ * Just cancels the previous request if notify is IP_NULL.
+ * Conditions:
+ * The port is locked and active. It is unlocked.
+ * Consumes a ref for notify (if non-null), and
+ * returns previous with a ref (if non-null).
+ */
+
+void
+ipc_port_nsrequest(
+ ipc_port_t port,
+ mach_port_mscount_t sync,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ ipc_port_t previous;
+ mach_port_mscount_t mscount;
+
+ assert(ip_active(port));
+
+ previous = port->ip_nsrequest;
+ mscount = port->ip_mscount;
+
+ if ((port->ip_srights == 0) &&
+ (sync <= mscount) &&
+ (notify != IP_NULL)) {
+ port->ip_nsrequest = IP_NULL;
+ ip_unlock(port);
+ ipc_notify_no_senders(notify, mscount);
+ } else {
+ port->ip_nsrequest = notify;
+ ip_unlock(port);
+ }
+
+ *previousp = previous;
+}
+
+/*
+ * Routine: ipc_port_set_qlimit
+ * Purpose:
+ * Changes a port's queue limit; the maximum number
+ * of messages which may be queued to the port.
+ * Conditions:
+ * The port is locked and active.
+ */
+
+void
+ipc_port_set_qlimit(
+ ipc_port_t port,
+ mach_port_msgcount_t qlimit)
+{
+ assert(ip_active(port));
+
+ /* wake up senders allowed by the new qlimit */
+
+ if (qlimit > port->ip_qlimit) {
+ mach_port_msgcount_t i, wakeup;
+
+ /* caution: wakeup, qlimit are unsigned */
+
+ wakeup = qlimit - port->ip_qlimit;
+
+ for (i = 0; i < wakeup; i++) {
+ ipc_thread_t th;
+
+ th = ipc_thread_dequeue(&port->ip_blocked);
+ if (th == ITH_NULL)
+ break;
+
+ th->ith_state = MACH_MSG_SUCCESS;
+ thread_go(th);
+ }
+ }
+
+ port->ip_qlimit = qlimit;
+}
+
+/*
+ * Routine: ipc_port_lock_mqueue
+ * Purpose:
+ * Locks and returns the message queue that the port is using.
+ * The message queue may be in the port or in its port set.
+ * Conditions:
+ * The port is locked and active.
+ * Port set, message queue locks may be taken.
+ */
+
+ipc_mqueue_t
+ipc_port_lock_mqueue(ipc_port_t port)
+{
+ if (port->ip_pset != IPS_NULL) {
+ ipc_pset_t pset = port->ip_pset;
+
+ ips_lock(pset);
+ if (ips_active(pset)) {
+ imq_lock(&pset->ips_messages);
+ ips_unlock(pset);
+ return &pset->ips_messages;
+ }
+
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ }
+
+ imq_lock(&port->ip_messages);
+ return &port->ip_messages;
+}
+
+/*
+ * Routine: ipc_port_set_seqno
+ * Purpose:
+ * Changes a port's sequence number.
+ * Conditions:
+ * The port is locked and active.
+ * Port set, message queue locks may be taken.
+ */
+
+void
+ipc_port_set_seqno(
+ ipc_port_t port,
+ mach_port_seqno_t seqno)
+{
+ ipc_mqueue_t mqueue;
+
+ mqueue = ipc_port_lock_mqueue(port);
+ port->ip_seqno = seqno;
+ imq_unlock(mqueue);
+}
+
+/*
+ * Routine: ipc_port_set_protected_payload
+ * Purpose:
+ * Changes a port's protected payload.
+ * Conditions:
+ * The port is locked and active.
+ */
+
+void
+ipc_port_set_protected_payload(ipc_port_t port, rpc_uintptr_t payload)
+{
+ ipc_mqueue_t mqueue;
+
+ mqueue = ipc_port_lock_mqueue(port);
+ port->ip_protected_payload = payload;
+ ipc_port_flag_protected_payload_set(port);
+ imq_unlock(mqueue);
+}
+
+/*
+ * Routine: ipc_port_clear_protected_payload
+ * Purpose:
+ * Clear a port's protected payload.
+ * Conditions:
+ * The port is locked and active.
+ */
+
+void
+ipc_port_clear_protected_payload(ipc_port_t port)
+{
+ ipc_mqueue_t mqueue;
+
+ mqueue = ipc_port_lock_mqueue(port);
+ ipc_port_flag_protected_payload_clear(port);
+ imq_unlock(mqueue);
+}
+
+
+/*
+ * Routine: ipc_port_clear_receiver
+ * Purpose:
+ * Prepares a receive right for transmission/destruction.
+ * Conditions:
+ * The port is locked and active.
+ */
+
+void
+ipc_port_clear_receiver(
+ ipc_port_t port)
+{
+ ipc_pset_t pset;
+
+ assert(ip_active(port));
+
+ pset = port->ip_pset;
+ if (pset != IPS_NULL) {
+ /* No threads receiving from port, but must remove from set. */
+
+ ips_lock(pset);
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ } else {
+ /* Else, wake up all receivers, indicating why. */
+
+ imq_lock(&port->ip_messages);
+ ipc_mqueue_changed(&port->ip_messages, MACH_RCV_PORT_DIED);
+ imq_unlock(&port->ip_messages);
+ }
+
+ ipc_port_set_mscount(port, 0);
+ imq_lock(&port->ip_messages);
+ port->ip_seqno = 0;
+ imq_unlock(&port->ip_messages);
+}
+
+/*
+ * Routine: ipc_port_init
+ * Purpose:
+ * Initializes a newly-allocated port.
+ * Doesn't touch the ip_object fields.
+ */
+
+void
+ipc_port_init(
+ ipc_port_t port,
+ ipc_space_t space,
+ mach_port_name_t name)
+{
+ /* port->ip_kobject doesn't have to be initialized */
+
+ ipc_target_init(&port->ip_target, name);
+
+ port->ip_receiver = space;
+
+ port->ip_mscount = 0;
+ port->ip_srights = 0;
+ port->ip_sorights = 0;
+
+ port->ip_nsrequest = IP_NULL;
+ port->ip_pdrequest = IP_NULL;
+ port->ip_dnrequests = IPR_NULL;
+
+ port->ip_pset = IPS_NULL;
+ port->ip_cur_target = &port->ip_target;
+ port->ip_seqno = 0;
+ port->ip_msgcount = 0;
+ port->ip_qlimit = MACH_PORT_QLIMIT_DEFAULT;
+ ipc_port_flag_protected_payload_clear(port);
+ port->ip_protected_payload = 0;
+
+ ipc_mqueue_init(&port->ip_messages);
+ ipc_thread_queue_init(&port->ip_blocked);
+}
+
+/*
+ * Routine: ipc_port_alloc
+ * Purpose:
+ * Allocate a port.
+ * Conditions:
+ * Nothing locked. If successful, the port is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_port_alloc(
+ ipc_space_t space,
+ mach_port_name_t *namep,
+ ipc_port_t *portp)
+{
+ ipc_port_t port;
+ mach_port_name_t name;
+ kern_return_t kr;
+
+ kr = ipc_object_alloc(space, IOT_PORT,
+ MACH_PORT_TYPE_RECEIVE, 0,
+ &name, (ipc_object_t *) &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ /* port is locked */
+
+ ipc_port_init(port, space, name);
+
+ *namep = name;
+ *portp = port;
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_alloc_name
+ * Purpose:
+ * Allocate a port, with a specific name.
+ * Conditions:
+ * Nothing locked. If successful, the port is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_port_alloc_name(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_port_t *portp)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ kr = ipc_object_alloc_name(space, IOT_PORT,
+ MACH_PORT_TYPE_RECEIVE, 0,
+ name, (ipc_object_t *) &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked */
+
+ ipc_port_init(port, space, name);
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_destroy
+ * Purpose:
+ * Destroys a port. Cleans up queued messages.
+ *
+ * If the port has a backup, it doesn't get destroyed,
+ * but is sent in a port-destroyed notification to the backup.
+ * Conditions:
+ * The port is locked and alive; nothing else locked.
+ * The caller has a reference, which is consumed.
+ * Afterwards, the port is unlocked and dead.
+ */
+
+void
+ipc_port_destroy(
+ ipc_port_t port)
+{
+ ipc_port_t pdrequest, nsrequest;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_queue_t kmqueue;
+ ipc_kmsg_t kmsg;
+ ipc_thread_t sender;
+ ipc_port_request_t dnrequests;
+
+ assert(ip_active(port));
+ /* port->ip_receiver_name is garbage */
+ /* port->ip_receiver/port->ip_destination is garbage */
+ assert(port->ip_pset == IPS_NULL);
+ assert(port->ip_mscount == 0);
+ assert(port->ip_seqno == 0);
+
+ /* first check for a backup port */
+
+ pdrequest = port->ip_pdrequest;
+ if (pdrequest != IP_NULL) {
+ /* we assume the ref for pdrequest */
+ port->ip_pdrequest = IP_NULL;
+
+ /* make port be in limbo */
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ipc_port_flag_protected_payload_clear(port);
+ ip_unlock(port);
+
+ if (!ipc_port_check_circularity(port, pdrequest)) {
+ /* consumes our refs for port and pdrequest */
+ ipc_notify_port_destroyed(pdrequest, port);
+ return;
+ } else {
+ /* consume pdrequest and destroy port */
+ ipc_port_release_sonce(pdrequest);
+ }
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_pset == IPS_NULL);
+ assert(port->ip_mscount == 0);
+ assert(port->ip_seqno == 0);
+ assert(port->ip_pdrequest == IP_NULL);
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination == IP_NULL);
+
+ /* fall through and destroy the port */
+ }
+
+ /*
+ * rouse all blocked senders
+ *
+ * This must be done with the port locked, because
+ * ipc_mqueue_send can play with the ip_blocked queue
+ * of a dead port.
+ */
+
+ while ((sender = ipc_thread_dequeue(&port->ip_blocked)) != ITH_NULL) {
+ sender->ith_state = MACH_MSG_SUCCESS;
+ thread_go(sender);
+ }
+
+ /* once port is dead, we don't need to keep it locked */
+
+ port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
+ port->ip_timestamp = ipc_port_timestamp();
+ ip_unlock(port);
+
+ /* throw away no-senders request */
+
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL)
+ ipc_notify_send_once(nsrequest); /* consumes ref */
+
+ /* destroy any queued messages */
+
+ mqueue = &port->ip_messages;
+ imq_lock(mqueue);
+ assert(ipc_thread_queue_empty(&mqueue->imq_threads));
+ kmqueue = &mqueue->imq_messages;
+
+ while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
+ imq_unlock(mqueue);
+
+ assert(kmsg->ikm_header.msgh_remote_port ==
+ (mach_port_t) port);
+
+ ipc_port_release(port);
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ ipc_kmsg_destroy(kmsg);
+
+ imq_lock(mqueue);
+ }
+
+ imq_unlock(mqueue);
+
+ /* generate dead-name notifications */
+
+ dnrequests = port->ip_dnrequests;
+ if (dnrequests != IPR_NULL) {
+ ipc_table_size_t its = dnrequests->ipr_size;
+ ipc_table_elems_t size = its->its_size;
+ ipc_port_request_index_t index;
+
+ for (index = 1; index < size; index++) {
+ ipc_port_request_t ipr = &dnrequests[index];
+ mach_port_name_t name = ipr->ipr_name;
+ ipc_port_t soright;
+
+ if (name == MACH_PORT_NULL)
+ continue;
+
+ soright = ipr->ipr_soright;
+ assert(soright != IP_NULL);
+
+ ipc_notify_dead_name(soright, name);
+ }
+
+ it_dnrequests_free(its, dnrequests);
+ }
+
+ if (ip_kotype(port) != IKOT_NONE)
+ ipc_kobject_destroy(port);
+
+ /* Common destruction for the IPC target. */
+ ipc_target_terminate(&port->ip_target);
+
+ ipc_port_release(port); /* consume caller's ref */
+}
+
+/*
+ * Routine: ipc_port_check_circularity
+ * Purpose:
+ * Check if queueing "port" in a message for "dest"
+ * would create a circular group of ports and messages.
+ *
+ * If no circularity (FALSE returned), then "port"
+ * is changed from "in limbo" to "in transit".
+ *
+ * That is, we want to set port->ip_destination == dest,
+ * but guaranteeing that this doesn't create a circle
+ * port->ip_destination->ip_destination->... == port
+ * Conditions:
+ * No ports locked. References held for "port" and "dest".
+ */
+
+boolean_t
+ipc_port_check_circularity(
+ ipc_port_t port,
+ ipc_port_t dest)
+{
+ ipc_port_t base;
+
+ assert(port != IP_NULL);
+ assert(dest != IP_NULL);
+
+ if (port == dest)
+ return TRUE;
+ base = dest;
+
+ /*
+ * First try a quick check that can run in parallel.
+ * No circularity if dest is not in transit.
+ */
+
+ ip_lock(port);
+ if (ip_lock_try(dest)) {
+ if (!ip_active(dest) ||
+ (dest->ip_receiver_name != MACH_PORT_NULL) ||
+ (dest->ip_destination == IP_NULL))
+ goto not_circular;
+
+ /* dest is in transit; further checking necessary */
+
+ ip_unlock(dest);
+ }
+ ip_unlock(port);
+
+ ipc_port_multiple_lock(); /* massive serialization */
+
+ /*
+ * Search for the end of the chain (a port not in transit),
+ * acquiring locks along the way.
+ */
+
+ for (;;) {
+ ip_lock(base);
+
+ if (!ip_active(base) ||
+ (base->ip_receiver_name != MACH_PORT_NULL) ||
+ (base->ip_destination == IP_NULL))
+ break;
+
+ base = base->ip_destination;
+ }
+
+ /* all ports in chain from dest to base, inclusive, are locked */
+
+ if (port == base) {
+ /* circularity detected! */
+
+ ipc_port_multiple_unlock();
+
+ /* port (== base) is in limbo */
+
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination == IP_NULL);
+
+ while (dest != IP_NULL) {
+ ipc_port_t next;
+
+ /* dest is in transit or in limbo */
+
+ assert(ip_active(dest));
+ assert(dest->ip_receiver_name == MACH_PORT_NULL);
+
+ next = dest->ip_destination;
+ ip_unlock(dest);
+ dest = next;
+ }
+
+ return TRUE;
+ }
+
+ /*
+ * The guarantee: lock port while the entire chain is locked.
+ * Once port is locked, we can take a reference to dest,
+ * add port to the chain, and unlock everything.
+ */
+
+ ip_lock(port);
+ ipc_port_multiple_unlock();
+
+ not_circular:
+
+ /* port is in limbo */
+
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination == IP_NULL);
+
+ ip_reference(dest);
+ port->ip_destination = dest;
+
+ /* now unlock chain */
+
+ while (port != base) {
+ ipc_port_t next;
+
+ /* port is in transit */
+
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination != IP_NULL);
+
+ next = port->ip_destination;
+ ip_unlock(port);
+ port = next;
+ }
+
+ /* base is not in transit */
+
+ assert(!ip_active(base) ||
+ (base->ip_receiver_name != MACH_PORT_NULL) ||
+ (base->ip_destination == IP_NULL));
+ ip_unlock(base);
+
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_port_lookup_notify
+ * Purpose:
+ * Make a send-once notify port from a receive right.
+ * Returns IP_NULL if name doesn't denote a receive right.
+ * Conditions:
+ * The space must be locked (read or write) and active.
+ */
+
+ipc_port_t
+ipc_port_lookup_notify(
+ ipc_space_t space,
+ mach_port_name_t name)
+{
+ ipc_port_t port;
+ ipc_entry_t entry;
+
+ assert(space->is_active);
+
+ entry = ipc_entry_lookup(space, name);
+ if (entry == IE_NULL)
+ return IP_NULL;
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ return IP_NULL;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ ip_reference(port);
+ port->ip_sorights++;
+ ip_unlock(port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_make_send
+ * Purpose:
+ * Make a naked send right from a receive right.
+ * Conditions:
+ * The port is not locked but it is active.
+ */
+
+ipc_port_t
+ipc_port_make_send(
+ ipc_port_t port)
+{
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ assert(ip_active(port));
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_copy_send
+ * Purpose:
+ * Make a naked send right from another naked send right.
+ * IP_NULL -> IP_NULL
+ * IP_DEAD -> IP_DEAD
+ * dead port -> IP_DEAD
+ * live port -> port + ref
+ * Conditions:
+ * Nothing locked except possibly a space.
+ */
+
+ipc_port_t
+ipc_port_copy_send(
+ ipc_port_t port)
+{
+ ipc_port_t sright;
+
+ if (!IP_VALID(port))
+ return port;
+
+ ip_lock(port);
+ if (ip_active(port)) {
+ assert(port->ip_srights > 0);
+
+ ip_reference(port);
+ port->ip_srights++;
+ sright = port;
+ } else
+ sright = IP_DEAD;
+ ip_unlock(port);
+
+ return sright;
+}
+
+/*
+ * Routine: ipc_port_copyout_send
+ * Purpose:
+ * Copyout a naked send right (possibly null/dead),
+ * or if that fails, destroy the right.
+ * Conditions:
+ * Nothing locked.
+ */
+
+mach_port_name_t
+ipc_port_copyout_send(
+ ipc_port_t sright,
+ ipc_space_t space)
+{
+ mach_port_name_t name;
+
+ if (IP_VALID(sright)) {
+ kern_return_t kr;
+
+ kr = ipc_object_copyout(space, (ipc_object_t) sright,
+ MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
+ if (kr != KERN_SUCCESS) {
+ ipc_port_release_send(sright);
+
+ if (kr == KERN_INVALID_CAPABILITY)
+ name = MACH_PORT_NAME_DEAD;
+ else
+ name = MACH_PORT_NAME_NULL;
+ }
+ } else
+ name = invalid_port_to_name((mach_port_t)sright);
+
+ return name;
+}
+
+/*
+ * Routine: ipc_port_release_send
+ * Purpose:
+ * Release a (valid) naked send right.
+ * Consumes a ref for the port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_port_release_send(
+ ipc_port_t port)
+{
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount;
+
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ ip_release(port);
+
+ if (!ip_active(port)) {
+ ip_check_unlock(port);
+ return;
+ }
+
+ assert(port->ip_srights > 0);
+
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ ip_unlock(port);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+}
+
+/*
+ * Routine: ipc_port_make_sonce
+ * Purpose:
+ * Make a naked send-once right from a receive right.
+ * Conditions:
+ * The port is not locked but it is active.
+ */
+
+ipc_port_t
+ipc_port_make_sonce(
+ ipc_port_t port)
+{
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ assert(ip_active(port));
+ port->ip_sorights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_release_sonce
+ * Purpose:
+ * Release a naked send-once right.
+ * Consumes a ref for the port.
+ *
+ * In normal situations, this is never used.
+ * Send-once rights are only consumed when
+ * a message (possibly a send-once notification)
+ * is sent to them.
+ * Conditions:
+ * Nothing locked except possibly a space.
+ */
+
+void
+ipc_port_release_sonce(
+ ipc_port_t port)
+{
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+
+ assert(port->ip_sorights > 0);
+
+ port->ip_sorights--;
+
+ ip_release(port);
+
+ if (!ip_active(port)) {
+ ip_check_unlock(port);
+ return;
+ }
+
+ ip_unlock(port);
+}
+
+/*
+ * Routine: ipc_port_release_receive
+ * Purpose:
+ * Release a naked (in limbo or in transit) receive right.
+ * Consumes a ref for the port; destroys the port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_port_release_receive(
+ ipc_port_t port)
+{
+ ipc_port_t dest;
+
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ dest = port->ip_destination;
+
+ ipc_port_destroy(port); /* consumes ref, unlocks */
+
+ if (dest != IP_NULL)
+ ipc_port_release(dest);
+}
+
+/*
+ * Routine: ipc_port_alloc_special
+ * Purpose:
+ * Allocate a port in a special space.
+ * The new port is returned with one ref.
+ * If unsuccessful, IP_NULL is returned.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+ipc_port_alloc_special(ipc_space_t space)
+{
+ ipc_port_t port;
+
+ port = ip_alloc();
+ if (port == IP_NULL)
+ return IP_NULL;
+
+ ip_lock_init(port);
+ port->ip_references = 1;
+ port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
+
+ /*
+ * The actual values of ip_receiver_name aren't important,
+ * as long as they are valid (not null/dead).
+ *
+ * Mach4: we set it to the internal port structure address
+ * so we can always just pass on ip_receiver_name during
+ * an rpc regardless of whether the destination is user or
+ * kernel (i.e. no special-casing code for the kernel along
+ * the fast rpc path).
+ */
+
+ ipc_port_init(port, space, (mach_port_name_t)port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_dealloc_special
+ * Purpose:
+ * Deallocate a port in a special space.
+ * Consumes one ref for the port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_port_dealloc_special(
+ ipc_port_t port,
+ ipc_space_t space)
+{
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == space);
+
+ /*
+ * We clear ip_receiver_name and ip_receiver to simplify
+ * the ipc_space_kernel check in ipc_mqueue_send.
+ */
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_receiver = IS_NULL;
+
+ /*
+ * For ipc_space_kernel, all ipc_port_clear_receiver does
+ * is clean things up for the assertions in ipc_port_destroy.
+ * For ipc_space_reply, there might be a waiting receiver.
+ */
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port);
+}
+
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: ipc_port_print
+ * Purpose:
+ * Pretty-print a port for kdb.
+ */
+
+void
+ipc_port_print(const ipc_port_t port)
+{
+ printf("port 0x%x\n", port);
+
+ indent += 2;
+
+ iprintf("flags ");
+ printf("has_protected_payload=%d",
+ ipc_port_flag_protected_payload(port));
+ printf("\n");
+
+ ipc_object_print(&port->ip_object);
+ iprintf("receiver=0x%x", port->ip_receiver);
+ printf(", receiver_name=0x%x\n", port->ip_receiver_name);
+
+ iprintf("mscount=%d", port->ip_mscount);
+ printf(", srights=%d", port->ip_srights);
+ printf(", sorights=%d\n", port->ip_sorights);
+
+ iprintf("nsrequest=0x%x", port->ip_nsrequest);
+ printf(", pdrequest=0x%x", port->ip_pdrequest);
+ printf(", dnrequests=0x%x\n", port->ip_dnrequests);
+
+ iprintf("pset=0x%x", port->ip_pset);
+ printf(", seqno=%d", port->ip_seqno);
+ printf(", msgcount=%d", port->ip_msgcount);
+ printf(", qlimit=%d\n", port->ip_qlimit);
+
+ iprintf("kmsgs=0x%x", port->ip_messages.imq_messages.ikmq_base);
+ printf(", rcvrs=0x%x", port->ip_messages.imq_threads.ithq_base);
+ printf(", sndrs=0x%x", port->ip_blocked.ithq_base);
+ printf(", kobj=0x%x\n", port->ip_kobject);
+
+ iprintf("protected_payload=%p\n", (void *) (vm_offset_t) port->ip_protected_payload);
+
+ indent -= 2;
+}
+
+#endif /* MACH_KDB */
diff --git a/ipc/ipc_port.h b/ipc/ipc_port.h
new file mode 100644
index 0000000..192d880
--- /dev/null
+++ b/ipc/ipc_port.h
@@ -0,0 +1,354 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_port.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for ports.
+ */
+
+#ifndef _IPC_IPC_PORT_H_
+#define _IPC_IPC_PORT_H_
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/macros.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_object.h>
+#include "ipc_target.h"
+
+/*
+ * A receive right (port) can be in four states:
+ * 1) dead (not active, ip_timestamp has death time)
+ * 2) in a space (ip_receiver_name != 0, ip_receiver points
+ * to the space but doesn't hold a ref for it)
+ * 3) in transit (ip_receiver_name == 0, ip_destination points
+ * to the destination port and holds a ref for it)
+ * 4) in limbo (ip_receiver_name == 0, ip_destination == IP_NULL)
+ *
+ * If the port is active, and ip_receiver points to some space,
+ * then ip_receiver_name != 0, and that space holds receive rights.
+ * If the port is not active, then ip_timestamp contains a timestamp
+ * taken when the port was destroyed.
+ */
+
+typedef unsigned int ipc_port_timestamp_t;
+
+struct ipc_port {
+ struct ipc_target ip_target;
+
+ /* This points to the ip_target above if this port isn't on a port set;
+ otherwise it points to the port set's ips_target. */
+ struct ipc_target *ip_cur_target;
+
+ union {
+ struct ipc_space *receiver;
+ struct ipc_port *destination;
+ ipc_port_timestamp_t timestamp;
+ } data;
+
+ ipc_kobject_t ip_kobject;
+
+ mach_port_mscount_t ip_mscount;
+ mach_port_rights_t ip_srights;
+ mach_port_rights_t ip_sorights;
+
+ struct ipc_port *ip_nsrequest;
+ struct ipc_port *ip_pdrequest;
+ struct ipc_port_request *ip_dnrequests;
+
+ struct ipc_pset *ip_pset;
+ mach_port_seqno_t ip_seqno; /* locked by message queue */
+ mach_port_msgcount_t ip_msgcount;
+ mach_port_msgcount_t ip_qlimit;
+ struct ipc_thread_queue ip_blocked;
+ rpc_uintptr_t ip_protected_payload;
+};
+
+#define ip_object ip_target.ipt_object
+#define ip_receiver_name ip_target.ipt_name
+#define ip_messages ip_target.ipt_messages
+#define ip_references ip_object.io_references
+#define ip_bits ip_object.io_bits
+#define ip_receiver data.receiver
+#define ip_destination data.destination
+#define ip_timestamp data.timestamp
+
+#define IP_NULL ((ipc_port_t) IO_NULL)
+#define IP_DEAD ((ipc_port_t) IO_DEAD)
+
+#define IP_VALID(port) IO_VALID(&(port)->ip_object)
+
+#define ip_active(port) io_active(&(port)->ip_object)
+#define ip_lock_init(port) io_lock_init(&(port)->ip_object)
+#define ip_lock(port) io_lock(&(port)->ip_object)
+#define ip_lock_try(port) io_lock_try(&(port)->ip_object)
+#define ip_unlock(port) io_unlock(&(port)->ip_object)
+#define ip_check_unlock(port) io_check_unlock(&(port)->ip_object)
+#define ip_reference(port) io_reference(&(port)->ip_object)
+#define ip_release(port) io_release(&(port)->ip_object)
+
+#define ip_alloc() ((ipc_port_t) io_alloc(IOT_PORT))
+#define ip_free(port) io_free(IOT_PORT, &(port)->ip_object)
+
+#define ip_kotype(port) io_kotype(&(port)->ip_object)
+
+typedef ipc_table_index_t ipc_port_request_index_t;
+
+typedef struct ipc_port_request {
+ union {
+ struct ipc_port *port;
+ ipc_port_request_index_t index;
+ } notify;
+
+ union {
+ mach_port_name_t name;
+ struct ipc_table_size *size;
+ } name;
+} *ipc_port_request_t;
+
+#define ipr_next notify.index
+#define ipr_size name.size
+
+#define ipr_soright notify.port
+#define ipr_name name.name
+
+#define IPR_NULL ((ipc_port_request_t) 0)
+
+/*
+ * Taking the ipc_port_multiple lock grants the privilege
+ * to lock multiple ports at once. No ports must locked
+ * when it is taken.
+ */
+
+decl_simple_lock_data(extern, ipc_port_multiple_lock_data)
+
+#define ipc_port_multiple_lock_init() \
+ simple_lock_init(&ipc_port_multiple_lock_data)
+
+#define ipc_port_multiple_lock() \
+ simple_lock(&ipc_port_multiple_lock_data)
+
+#define ipc_port_multiple_unlock() \
+ simple_unlock(&ipc_port_multiple_lock_data)
+
+/*
+ * The port timestamp facility provides timestamps
+ * for port destruction. It is used to serialize
+ * mach_port_names with port death.
+ */
+
+decl_simple_lock_data(extern, ipc_port_timestamp_lock_data)
+extern ipc_port_timestamp_t ipc_port_timestamp_data;
+
+#define ipc_port_timestamp_lock_init() \
+ simple_lock_init(&ipc_port_timestamp_lock_data)
+
+#define ipc_port_timestamp_lock() \
+ simple_lock(&ipc_port_timestamp_lock_data)
+
+#define ipc_port_timestamp_unlock() \
+ simple_unlock(&ipc_port_timestamp_lock_data)
+
+extern ipc_port_timestamp_t
+ipc_port_timestamp(void);
+
+/*
+ * Compares two timestamps, and returns TRUE if one
+ * happened before two. Note that this formulation
+ * works when the timestamp wraps around at 2^32,
+ * as long as one and two aren't too far apart.
+ */
+
+#define IP_TIMESTAMP_ORDER(one, two) ((int) ((one) - (two)) < 0)
+
+#define ipc_port_translate_receive(space, name, portp) \
+ ipc_object_translate((space), (name), \
+ MACH_PORT_RIGHT_RECEIVE, \
+ (ipc_object_t *) (portp))
+
+#define ipc_port_translate_send(space, name, portp) \
+ ipc_object_translate((space), (name), \
+ MACH_PORT_RIGHT_SEND, \
+ (ipc_object_t *) (portp))
+
+extern kern_return_t
+ipc_port_dnrequest(ipc_port_t, mach_port_name_t, ipc_port_t,
+ ipc_port_request_index_t *);
+
+extern kern_return_t
+ipc_port_dngrow(ipc_port_t);
+
+extern ipc_port_t
+ipc_port_dncancel(ipc_port_t, mach_port_name_t, ipc_port_request_index_t);
+
+#define ipc_port_dnrename(port, index, oname, nname) \
+MACRO_BEGIN \
+ ipc_port_request_t ipr, table; \
+ \
+ assert(ip_active(port)); \
+ \
+ table = port->ip_dnrequests; \
+ assert(table != IPR_NULL); \
+ \
+ ipr = &table[index]; \
+ assert(ipr->ipr_name == oname); \
+ \
+ ipr->ipr_name = nname; \
+MACRO_END
+
+/* Make a port-deleted request */
+extern void ipc_port_pdrequest(
+ ipc_port_t port,
+ ipc_port_t notify,
+ ipc_port_t *previousp);
+
+/* Make a no-senders request */
+extern void ipc_port_nsrequest(
+ ipc_port_t port,
+ mach_port_mscount_t sync,
+ ipc_port_t notify,
+ ipc_port_t *previousp);
+
+/* Change a port's queue limit */
+extern void ipc_port_set_qlimit(
+ ipc_port_t port,
+ mach_port_msgcount_t qlimit);
+
+#define ipc_port_set_mscount(port, mscount) \
+MACRO_BEGIN \
+ assert(ip_active(port)); \
+ \
+ (port)->ip_mscount = (mscount); \
+MACRO_END
+
+extern struct ipc_mqueue *
+ipc_port_lock_mqueue(ipc_port_t);
+
+extern void
+ipc_port_set_seqno(ipc_port_t, mach_port_seqno_t);
+
+extern void
+ipc_port_set_protected_payload(ipc_port_t, rpc_uintptr_t);
+
+extern void
+ipc_port_clear_protected_payload(ipc_port_t);
+
+extern void
+ipc_port_clear_receiver(ipc_port_t);
+
+extern void
+ipc_port_init(ipc_port_t, ipc_space_t, mach_port_name_t);
+
+extern kern_return_t
+ipc_port_alloc(ipc_space_t, mach_port_name_t *, ipc_port_t *);
+
+extern kern_return_t
+ipc_port_alloc_name(ipc_space_t, mach_port_name_t, ipc_port_t *);
+
+extern void
+ipc_port_destroy(ipc_port_t);
+
+extern boolean_t
+ipc_port_check_circularity(ipc_port_t, ipc_port_t);
+
+extern ipc_port_t
+ipc_port_lookup_notify(ipc_space_t, mach_port_name_t);
+
+extern ipc_port_t
+ipc_port_make_send(ipc_port_t);
+
+extern ipc_port_t
+ipc_port_copy_send(ipc_port_t);
+
+extern mach_port_name_t
+ipc_port_copyout_send(ipc_port_t, ipc_space_t);
+
+extern void
+ipc_port_release_send(ipc_port_t);
+
+extern ipc_port_t
+ipc_port_make_sonce(ipc_port_t);
+
+extern void
+ipc_port_release_sonce(ipc_port_t);
+
+extern void
+ipc_port_release_receive(ipc_port_t);
+
+extern ipc_port_t
+ipc_port_alloc_special(ipc_space_t);
+
+extern void
+ipc_port_dealloc_special(ipc_port_t, ipc_space_t);
+
+#define ipc_port_alloc_kernel() \
+ ipc_port_alloc_special(ipc_space_kernel)
+#define ipc_port_dealloc_kernel(port) \
+ ipc_port_dealloc_special((port), ipc_space_kernel)
+
+#define ipc_port_alloc_reply() \
+ ipc_port_alloc_special(ipc_space_reply)
+#define ipc_port_dealloc_reply(port) \
+ ipc_port_dealloc_special((port), ipc_space_reply)
+
+#define ipc_port_reference(port) \
+ ipc_object_reference(&(port)->ip_object)
+
+#define ipc_port_release(port) \
+ ipc_object_release(&(port)->ip_object)
+
+static inline boolean_t
+ipc_port_flag_protected_payload(const struct ipc_port *port)
+{
+ return !! (port->ip_target.ipt_object.io_bits
+ & IO_BITS_PROTECTED_PAYLOAD);
+}
+
+static inline void
+ipc_port_flag_protected_payload_set(struct ipc_port *port)
+{
+ port->ip_target.ipt_object.io_bits |= IO_BITS_PROTECTED_PAYLOAD;
+}
+
+static inline void
+ipc_port_flag_protected_payload_clear(struct ipc_port *port)
+{
+ port->ip_target.ipt_object.io_bits &= ~IO_BITS_PROTECTED_PAYLOAD;
+}
+
+#endif /* _IPC_IPC_PORT_H_ */
diff --git a/ipc/ipc_print.h b/ipc/ipc_print.h
new file mode 100644
index 0000000..5e8e4f3
--- /dev/null
+++ b/ipc/ipc_print.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _IPC_PRINT_H_
+#define _IPC_PRINT_H_
+
+#if MACH_KDB
+
+#include <mach/mach_types.h>
+#include <mach/message.h>
+#include <ipc/ipc_types.h>
+#include <ipc/ipc_pset.h>
+
+extern void ipc_port_print(const ipc_port_t);
+
+extern void ipc_pset_print(const ipc_pset_t);
+
+extern void ipc_kmsg_print(const ipc_kmsg_t);
+
+extern void ipc_msg_print(mach_msg_header_t*);
+
+#endif /* MACH_KDB */
+
+#endif /* IPC_PRINT_H */
diff --git a/ipc/ipc_pset.c b/ipc/ipc_pset.c
new file mode 100644
index 0000000..30c12a2
--- /dev/null
+++ b/ipc/ipc_pset.c
@@ -0,0 +1,350 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_pset.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC port sets.
+ */
+
+#include <kern/printf.h>
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_space.h>
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#include <ipc/ipc_print.h>
+#endif /* MACH_KDB */
+
+
+/*
+ * Routine: ipc_pset_alloc
+ * Purpose:
+ * Allocate a port set.
+ * Conditions:
+ * Nothing locked. If successful, the port set is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port set is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_pset_alloc(
+ ipc_space_t space,
+ mach_port_name_t *namep,
+ ipc_pset_t *psetp)
+{
+ ipc_pset_t pset;
+ mach_port_name_t name;
+ kern_return_t kr;
+
+ kr = ipc_object_alloc(space, IOT_PORT_SET,
+ MACH_PORT_TYPE_PORT_SET, 0,
+ &name, (ipc_object_t *) &pset);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* pset is locked */
+
+ ipc_target_init(&pset->ips_target, name);
+
+ *namep = name;
+ *psetp = pset;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_pset_alloc_name
+ * Purpose:
+ * Allocate a port set, with a specific name.
+ * Conditions:
+ * Nothing locked. If successful, the port set is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port set is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_pset_alloc_name(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_pset_t *psetp)
+{
+ ipc_pset_t pset;
+ kern_return_t kr;
+
+ kr = ipc_object_alloc_name(space, IOT_PORT_SET,
+ MACH_PORT_TYPE_PORT_SET, 0,
+ name, (ipc_object_t *) &pset);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* pset is locked */
+
+ ipc_target_init(&pset->ips_target, name);
+
+ *psetp = pset;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_pset_add
+ * Purpose:
+ * Puts a port into a port set.
+ * The port set gains a reference.
+ * Conditions:
+ * Both port and port set are locked and active.
+ * The port isn't already in a set.
+ * The owner of the port set is also receiver for the port.
+ */
+
+void
+ipc_pset_add(
+ ipc_pset_t pset,
+ ipc_port_t port)
+{
+ assert(ips_active(pset));
+ assert(ip_active(port));
+ assert(port->ip_pset == IPS_NULL);
+
+ port->ip_pset = pset;
+ port->ip_cur_target = &pset->ips_target;
+ ips_reference(pset);
+
+ imq_lock(&port->ip_messages);
+ imq_lock(&pset->ips_messages);
+
+ /* move messages from port's queue to the port set's queue */
+
+ ipc_mqueue_move(&pset->ips_messages, &port->ip_messages, port);
+ imq_unlock(&pset->ips_messages);
+ assert(ipc_kmsg_queue_empty(&port->ip_messages.imq_messages));
+
+ /* wake up threads waiting to receive from the port */
+
+ ipc_mqueue_changed(&port->ip_messages, MACH_RCV_PORT_CHANGED);
+ assert(ipc_thread_queue_empty(&port->ip_messages.imq_threads));
+ imq_unlock(&port->ip_messages);
+}
+
+/*
+ * Routine: ipc_pset_remove
+ * Purpose:
+ * Removes a port from a port set.
+ * The port set loses a reference.
+ * Conditions:
+ * Both port and port set are locked.
+ * The port must be active.
+ */
+
+void
+ipc_pset_remove(
+ ipc_pset_t pset,
+ ipc_port_t port)
+{
+ assert(ip_active(port));
+ assert(port->ip_pset == pset);
+
+ port->ip_pset = IPS_NULL;
+ port->ip_cur_target = &port->ip_target;
+ ips_release(pset);
+
+ imq_lock(&port->ip_messages);
+ imq_lock(&pset->ips_messages);
+
+ /* move messages from port set's queue to the port's queue */
+
+ ipc_mqueue_move(&port->ip_messages, &pset->ips_messages, port);
+
+ imq_unlock(&pset->ips_messages);
+ imq_unlock(&port->ip_messages);
+}
+
+/*
+ * Routine: ipc_pset_move
+ * Purpose:
+ * If nset is IPS_NULL, removes port
+ * from the port set it is in. Otherwise, adds
+ * port to nset, removing it from any set
+ * it might already be in.
+ * Conditions:
+ * The space is read-locked.
+ * Returns:
+ * KERN_SUCCESS Moved the port.
+ * KERN_NOT_IN_SET nset is null and port isn't in a set.
+ */
+
+kern_return_t
+ipc_pset_move(
+ ipc_space_t space,
+ ipc_port_t port,
+ ipc_pset_t nset)
+{
+ ipc_pset_t oset;
+
+ /*
+ * While we've got the space locked, it holds refs for
+ * the port and nset (because of the entries). Also,
+ * they must be alive. While we've got port locked, it
+ * holds a ref for oset, which might not be alive.
+ */
+
+ ip_lock(port);
+ assert(ip_active(port));
+
+ oset = port->ip_pset;
+
+ if (oset == nset) {
+ /* the port is already in the new set: a noop */
+
+ is_read_unlock(space);
+ } else if (oset == IPS_NULL) {
+ /* just add port to the new set */
+
+ ips_lock(nset);
+ assert(ips_active(nset));
+ is_read_unlock(space);
+
+ ipc_pset_add(nset, port);
+
+ ips_unlock(nset);
+ } else if (nset == IPS_NULL) {
+ /* just remove port from the old set */
+
+ is_read_unlock(space);
+ ips_lock(oset);
+
+ ipc_pset_remove(oset, port);
+
+ if (ips_active(oset))
+ ips_unlock(oset);
+ else {
+ ips_check_unlock(oset);
+ oset = IPS_NULL; /* trigger KERN_NOT_IN_SET */
+ }
+ } else {
+ /* atomically move port from oset to nset */
+
+ if (oset < nset) {
+ ips_lock(oset);
+ ips_lock(nset);
+ } else {
+ ips_lock(nset);
+ ips_lock(oset);
+ }
+
+ is_read_unlock(space);
+ assert(ips_active(nset));
+
+ ipc_pset_remove(oset, port);
+ ipc_pset_add(nset, port);
+
+ ips_unlock(nset);
+ ips_check_unlock(oset); /* KERN_NOT_IN_SET not a possibility */
+ }
+
+ ip_unlock(port);
+
+ return (((nset == IPS_NULL) && (oset == IPS_NULL)) ?
+ KERN_NOT_IN_SET : KERN_SUCCESS);
+}
+
+/*
+ * Routine: ipc_pset_destroy
+ * Purpose:
+ * Destroys a port_set.
+ *
+ * Doesn't remove members from the port set;
+ * that happens lazily. As members are removed,
+ * their messages are removed from the queue.
+ * Conditions:
+ * The port_set is locked and alive.
+ * The caller has a reference, which is consumed.
+ * Afterwards, the port_set is unlocked and dead.
+ */
+
+void
+ipc_pset_destroy(
+ ipc_pset_t pset)
+{
+ assert(ips_active(pset));
+
+ pset->ips_object.io_bits &= ~IO_BITS_ACTIVE;
+
+ imq_lock(&pset->ips_messages);
+ ipc_mqueue_changed(&pset->ips_messages, MACH_RCV_PORT_DIED);
+ imq_unlock(&pset->ips_messages);
+
+ /* Common destruction for the IPC target. */
+ ipc_target_terminate(&pset->ips_target);
+
+ ips_release(pset); /* consume the ref our caller gave us */
+ ips_check_unlock(pset);
+}
+
+
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: ipc_pset_print
+ * Purpose:
+ * Pretty-print a port set for kdb.
+ */
+
+void
+ipc_pset_print(
+ const ipc_pset_t pset)
+{
+ printf("pset 0x%x\n", pset);
+
+ indent += 2;
+
+ ipc_object_print(&pset->ips_object);
+ iprintf("local_name = 0x%x\n", pset->ips_local_name);
+ iprintf("kmsgs = 0x%x", pset->ips_messages.imq_messages.ikmq_base);
+ printf(",rcvrs = 0x%x\n", pset->ips_messages.imq_threads.ithq_base);
+
+ indent -= 2;
+}
+
+#endif /* MACH_KDB */
diff --git a/ipc/ipc_pset.h b/ipc/ipc_pset.h
new file mode 100644
index 0000000..3f94be5
--- /dev/null
+++ b/ipc/ipc_pset.h
@@ -0,0 +1,92 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_pset.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for port sets.
+ */
+
+#ifndef _IPC_IPC_PSET_H_
+#define _IPC_IPC_PSET_H_
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_mqueue.h>
+#include "ipc_target.h"
+
+typedef struct ipc_pset {
+ struct ipc_target ips_target;
+
+} *ipc_pset_t;
+
+#define ips_object ips_target.ipt_object
+#define ips_local_name ips_target.ipt_name
+#define ips_messages ips_target.ipt_messages
+#define ips_references ips_object.io_references
+
+#define IPS_NULL ((ipc_pset_t) IO_NULL)
+
+#define ips_active(pset) io_active(&(pset)->ips_object)
+#define ips_lock(pset) io_lock(&(pset)->ips_object)
+#define ips_lock_try(pset) io_lock_try(&(pset)->ips_object)
+#define ips_unlock(pset) io_unlock(&(pset)->ips_object)
+#define ips_check_unlock(pset) io_check_unlock(&(pset)->ips_object)
+#define ips_reference(pset) io_reference(&(pset)->ips_object)
+#define ips_release(pset) io_release(&(pset)->ips_object)
+
+extern kern_return_t
+ipc_pset_alloc(ipc_space_t, mach_port_name_t *, ipc_pset_t *);
+
+extern kern_return_t
+ipc_pset_alloc_name(ipc_space_t, mach_port_name_t, ipc_pset_t *);
+
+extern void
+ipc_pset_add(ipc_pset_t, ipc_port_t);
+
+extern void
+ipc_pset_remove(ipc_pset_t, ipc_port_t);
+
+extern kern_return_t
+ipc_pset_move(ipc_space_t, ipc_port_t, ipc_pset_t);
+
+extern void
+ipc_pset_destroy(ipc_pset_t);
+
+#define ipc_pset_reference(pset) \
+ ipc_object_reference(&(pset)->ips_object)
+
+#define ipc_pset_release(pset) \
+ ipc_object_release(&(pset)->ips_object)
+
+#endif /* _IPC_IPC_PSET_H_ */
diff --git a/ipc/ipc_right.c b/ipc/ipc_right.c
new file mode 100644
index 0000000..79f70c3
--- /dev/null
+++ b/ipc/ipc_right.c
@@ -0,0 +1,2115 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_right.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC capabilities.
+ */
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <kern/debug.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_notify.h>
+
+
+
+/*
+ * Routine: ipc_right_lookup_write
+ * Purpose:
+ * Finds an entry in a space, given the name.
+ * Conditions:
+ * Nothing locked. If successful, the space is write-locked.
+ * Returns:
+ * KERN_SUCCESS Found an entry.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ */
+
+kern_return_t
+ipc_right_lookup_write(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t *entryp)
+{
+ ipc_entry_t entry;
+
+ assert(space != IS_NULL);
+
+ is_write_lock(space);
+
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+
+ *entryp = entry;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_reverse
+ * Purpose:
+ * Translate (space, object) -> (name, entry).
+ * Only finds send/receive rights.
+ * Returns TRUE if an entry is found; if so,
+ * the object is locked and active.
+ * Conditions:
+ * The space must be locked (read or write) and active.
+ * Nothing else locked.
+ */
+
+boolean_t
+ipc_right_reverse(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_port_name_t *namep,
+ ipc_entry_t *entryp)
+{
+ ipc_port_t port;
+ mach_port_name_t name;
+ ipc_entry_t entry;
+
+ /* would switch on io_otype to handle multiple types of object */
+
+ assert(space->is_active);
+ assert(io_otype(object) == IOT_PORT);
+
+ port = (ipc_port_t) object;
+
+ ip_lock(port);
+ if (!ip_active(port)) {
+ ip_unlock(port);
+
+ return FALSE;
+ }
+
+ if (port->ip_receiver == space) {
+ name = port->ip_receiver_name;
+ assert(name != MACH_PORT_NULL);
+
+ entry = ipc_entry_lookup(space, name);
+
+ assert(entry != IE_NULL);
+ assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ *namep = name;
+ *entryp = entry;
+ return TRUE;
+ }
+
+ if ((*entryp = ipc_reverse_lookup(space, (ipc_object_t) port))) {
+ *namep = (*entryp)->ie_name;
+ assert((entry = *entryp) != IE_NULL);
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ return TRUE;
+ }
+
+ ip_unlock(port);
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_right_dnrequest
+ * Purpose:
+ * Make a dead-name request, returning the previously
+ * registered send-once right. If notify is IP_NULL,
+ * just cancels the previously registered request.
+ *
+ * This interacts with the IE_BITS_COMPAT, because they
+ * both use ie_request. If this is a compat entry, then
+ * previous always gets IP_NULL. If notify is IP_NULL,
+ * then the entry remains a compat entry. Otherwise
+ * the real dead-name request is registered and the entry
+ * is no longer a compat entry.
+ * Conditions:
+ * Nothing locked. May allocate memory.
+ * Only consumes/returns refs if successful.
+ * Returns:
+ * KERN_SUCCESS Made/canceled dead-name request.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ * KERN_INVALID_RIGHT Name doesn't denote port/dead rights.
+ * KERN_INVALID_ARGUMENT Name denotes dead name, but
+ * immediate is FALSE or notify is IP_NULL.
+ * KERN_UREFS_OVERFLOW Name denotes dead name, but
+ * generating immediate notif. would overflow urefs.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_right_dnrequest(
+ ipc_space_t space,
+ mach_port_name_t name,
+ boolean_t immediate,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ ipc_port_t previous;
+
+ for (;;) {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ bits = entry->ie_bits;
+ if (bits & MACH_PORT_TYPE_PORT_RIGHTS) {
+ ipc_port_t port;
+ ipc_port_request_index_t request;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (!ipc_right_check(space, port, name, entry)) {
+ /* port is locked and active */
+
+ if (notify == IP_NULL) {
+ previous = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+ ip_unlock(port);
+ is_write_unlock(space);
+ break;
+ }
+
+ /*
+ * If a registered soright exists,
+ * want to atomically switch with it.
+ * If ipc_port_dncancel finds us a
+ * soright, then the following
+ * ipc_port_dnrequest will reuse
+ * that slot, so we are guaranteed
+ * not to unlock and retry.
+ */
+
+ previous = ipc_right_dncancel_macro(space,
+ port, name, entry);
+
+ kr = ipc_port_dnrequest(port, name, notify,
+ &request);
+ if (kr != KERN_SUCCESS) {
+ assert(previous == IP_NULL);
+ is_write_unlock(space);
+
+ kr = ipc_port_dngrow(port);
+ /* port is unlocked */
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ continue;
+ }
+
+ assert(request != 0);
+ ip_unlock(port);
+
+ entry->ie_request = request;
+ is_write_unlock(space);
+ break;
+ }
+
+ bits = entry->ie_bits;
+ assert(bits & MACH_PORT_TYPE_DEAD_NAME);
+ }
+
+ if ((bits & MACH_PORT_TYPE_DEAD_NAME) &&
+ immediate && (notify != IP_NULL)) {
+ mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(urefs > 0);
+
+ if (MACH_PORT_UREFS_OVERFLOW(urefs, 1)) {
+ is_write_unlock(space);
+ return KERN_UREFS_OVERFLOW;
+ }
+
+ entry->ie_bits = bits + 1; /* increment urefs */
+ is_write_unlock(space);
+
+ ipc_notify_dead_name(notify, name);
+ previous = IP_NULL;
+ break;
+ }
+
+ is_write_unlock(space);
+ if (bits & MACH_PORT_TYPE_PORT_OR_DEAD)
+ return KERN_INVALID_ARGUMENT;
+ else
+ return KERN_INVALID_RIGHT;
+ }
+
+ *previousp = previous;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_dncancel
+ * Purpose:
+ * Cancel a dead-name request and return the send-once right.
+ * Afterwards, entry->ie_request == 0.
+ * Conditions:
+ * The space must be write-locked; the port must be locked.
+ * The port must be active; the space doesn't have to be.
+ */
+
+ipc_port_t
+ipc_right_dncancel(
+ ipc_space_t space,
+ ipc_port_t port,
+ mach_port_name_t name,
+ ipc_entry_t entry)
+{
+ ipc_port_t dnrequest;
+
+ assert(ip_active(port));
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ dnrequest = ipc_port_dncancel(port, name, entry->ie_request);
+ entry->ie_request = 0;
+
+ return dnrequest;
+}
+
+/*
+ * Routine: ipc_right_inuse
+ * Purpose:
+ * Check if an entry is being used.
+ * Returns TRUE if it is.
+ * Conditions:
+ * The space is write-locked and active.
+ * It is unlocked if the entry is inuse.
+ */
+
+boolean_t
+ipc_right_inuse(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
+ is_write_unlock(space);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_right_check
+ * Purpose:
+ * Check if the port has died. If it has,
+ * clean up the entry and return TRUE.
+ * Conditions:
+ * The space is write-locked; the port is not locked.
+ * If returns FALSE, the port is also locked and active.
+ * Otherwise, entry is converted to a dead name, freeing
+ * a reference to port.
+ */
+
+boolean_t
+ipc_right_check(
+ ipc_space_t space,
+ ipc_port_t port,
+ mach_port_name_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_bits_t bits;
+
+ assert(space->is_active);
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ ip_lock(port);
+ if (ip_active(port))
+ return FALSE;
+ ip_unlock(port);
+
+ /* this was either a pure send right or a send-once right */
+
+ bits = entry->ie_bits;
+ assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+
+ /* clean up msg-accepted request */
+
+ if (bits & IE_BITS_MAREQUEST) {
+ bits &= ~IE_BITS_MAREQUEST;
+
+ ipc_marequest_cancel(space, name);
+ }
+
+ ipc_reverse_remove(space, (ipc_object_t) port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ }
+
+ ipc_port_release(port);
+
+ /* convert entry to dead name */
+
+ bits = (bits &~ IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
+
+ if (entry->ie_request != 0) {
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
+
+ entry->ie_request = 0;
+ bits++; /* increment urefs */
+ }
+
+ entry->ie_bits = bits;
+ entry->ie_object = IO_NULL;
+
+ return TRUE;
+}
+
+/*
+ * Routine: ipc_right_clean
+ * Purpose:
+ * Cleans up an entry in a dead space.
+ * The entry isn't deallocated or removed
+ * from the reverse mappings.
+ * Conditions:
+ * The space is dead and unlocked.
+ */
+
+void
+ipc_right_clean(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(!space->is_active);
+
+ /*
+ * We can't clean up IE_BITS_MAREQUEST when the space is dead.
+ * This is because ipc_marequest_destroy can't turn off
+ * the bit if the space is dead. Hence, it might be on
+ * even though the marequest has been destroyed. It's OK
+ * not to cancel the marequest, because ipc_marequest_destroy
+ * cancels for us if the space is dead.
+ *
+ * IE_BITS_COMPAT/ipc_right_dncancel doesn't have this
+ * problem, because we check that the port is active. If
+ * we didn't cancel IE_BITS_COMPAT, ipc_port_destroy
+ * would still work, but dead space refs would accumulate
+ * in ip_dnrequests. They would use up slots in
+ * ip_dnrequests and keep the spaces from being freed.
+ */
+
+ switch (type) {
+ case MACH_PORT_TYPE_DEAD_NAME:
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ break;
+
+ case MACH_PORT_TYPE_PORT_SET: {
+ ipc_pset_t pset = (ipc_pset_t) entry->ie_object;
+
+ assert(entry->ie_request == 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(pset != IPS_NULL);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+
+ ipc_pset_destroy(pset); /* consumes ref, unlocks */
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND:
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE:
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) entry->ie_object;
+ ipc_port_t dnrequest;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(port != IP_NULL);
+ ip_lock(port);
+
+ if (!ip_active(port)) {
+ ip_release(port);
+ ip_check_unlock(port);
+ break;
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+
+ if (type & MACH_PORT_TYPE_SEND) {
+ assert(port->ip_srights > 0);
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+ }
+
+ if (type & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port); /* consumes our ref, unlocks */
+ } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
+ assert(port->ip_sorights > 0);
+ ip_unlock(port);
+
+ ipc_notify_send_once(port); /* consumes our ref */
+ } else {
+ assert(port->ip_receiver != space);
+
+ ip_release(port);
+ ip_unlock(port); /* port is active */
+ }
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_clean: strange type");
+#else
+ panic("ipc_right_clean: strange type");
+#endif
+ }
+}
+
+/*
+ * Routine: ipc_right_destroy
+ * Purpose:
+ * Destroys an entry in a space.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS The entry was destroyed.
+ */
+
+kern_return_t
+ipc_right_destroy(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(space->is_active);
+
+ switch (type) {
+ case MACH_PORT_TYPE_DEAD_NAME:
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ break;
+
+ case MACH_PORT_TYPE_PORT_SET: {
+ ipc_pset_t pset = (ipc_pset_t) entry->ie_object;
+
+ assert(entry->ie_request == 0);
+ assert(pset != IPS_NULL);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ is_write_unlock(space);
+
+ ipc_pset_destroy(pset); /* consumes ref, unlocks */
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND:
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE:
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) entry->ie_object;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+ ipc_port_t dnrequest;
+
+ assert(port != IP_NULL);
+
+ if (bits & IE_BITS_MAREQUEST) {
+ assert(type & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ ipc_marequest_cancel(space, name);
+ }
+
+ if (type == MACH_PORT_TYPE_SEND)
+ ipc_reverse_remove(space, (ipc_object_t) port);
+
+ ip_lock(port);
+
+ if (!ip_active(port)) {
+ assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
+
+ ip_release(port);
+ ip_check_unlock(port);
+
+ entry->ie_request = 0;
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ break;
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ if (type & MACH_PORT_TYPE_SEND) {
+ assert(port->ip_srights > 0);
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+ }
+
+ if (type & MACH_PORT_TYPE_RECEIVE) {
+ assert(ip_active(port));
+ assert(port->ip_receiver == space);
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port); /* consumes our ref, unlocks */
+ } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
+ assert(port->ip_sorights > 0);
+ ip_unlock(port);
+
+ ipc_notify_send_once(port); /* consumes our ref */
+ } else {
+ assert(port->ip_receiver != space);
+
+ ip_release(port);
+ ip_unlock(port);
+ }
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_destroy: strange type");
+#else
+ panic("ipc_right_destroy: strange type");
+#endif
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_dealloc
+ * Purpose:
+ * Releases a send/send-once/dead-name user ref.
+ * Like ipc_right_delta with a delta of -1,
+ * but looks at the entry to determine the right.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS A user ref was released.
+ * KERN_INVALID_RIGHT Entry has wrong type.
+ */
+
+kern_return_t
+ipc_right_dealloc(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(space->is_active);
+
+ switch (type) {
+ case MACH_PORT_TYPE_DEAD_NAME: {
+ dead_name:
+
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ if (IE_BITS_UREFS(bits) == 1)
+ ipc_entry_dealloc(space, name, entry);
+ else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ is_write_unlock(space);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port, dnrequest;
+
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ bits = entry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ goto dead_name;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_sorights > 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ ipc_notify_send_once(port);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ bits = entry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ goto dead_name;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+
+ if (IE_BITS_UREFS(bits) == 1) {
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ ipc_reverse_remove(space, (ipc_object_t) port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ ip_release(port);
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ } else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ ip_unlock(port); /* even if dropped a ref, port is active */
+ is_write_unlock(space);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_RECEIVE: {
+ ipc_port_t port;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(port->ip_srights > 0);
+
+ if (IE_BITS_UREFS(bits) == 1) {
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK|
+ MACH_PORT_TYPE_SEND);
+ } else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ ip_unlock(port);
+ is_write_unlock(space);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+ break;
+ }
+
+ default:
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_delta
+ * Purpose:
+ * Modifies the user-reference count for a right.
+ * May deallocate the right, if the count goes to zero.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS Count was modified.
+ * KERN_INVALID_RIGHT Entry has wrong type.
+ * KERN_INVALID_VALUE Bad delta for the right.
+ * KERN_UREFS_OVERFLOW OK delta, except would overflow.
+ */
+
+kern_return_t
+ipc_right_delta(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry,
+ mach_port_right_t right,
+ mach_port_delta_t delta)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+ assert(right < MACH_PORT_RIGHT_NUMBER);
+
+ /* Rights-specific restrictions and operations. */
+
+ switch (right) {
+ case MACH_PORT_RIGHT_PORT_SET: {
+ ipc_pset_t pset;
+
+ if ((bits & MACH_PORT_TYPE_PORT_SET) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
+ assert(IE_BITS_UREFS(bits) == 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_request == 0);
+
+ if (delta == 0)
+ goto success;
+
+ if (delta != -1)
+ goto invalid_value;
+
+ pset = (ipc_pset_t) entry->ie_object;
+ assert(pset != IPS_NULL);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ is_write_unlock(space);
+
+ ipc_pset_destroy(pset); /* consumes ref, unlocks */
+ break;
+ }
+
+ case MACH_PORT_RIGHT_RECEIVE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ if (delta == 0)
+ goto success;
+
+ if (delta != -1)
+ goto invalid_value;
+
+ if (bits & IE_BITS_MAREQUEST) {
+ bits &= ~IE_BITS_MAREQUEST;
+
+ ipc_marequest_cancel(space, name);
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ /*
+ * The port lock is needed for ipc_right_dncancel;
+ * otherwise, we wouldn't have to take the lock
+ * until just before dropping the space lock.
+ */
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
+ assert(port->ip_srights > 0);
+
+ /*
+ * The remaining send right turns into a
+ * dead name. Notice we don't decrement
+ * ip_srights, generate a no-senders notif,
+ * or use ipc_right_dncancel, because the
+ * port is destroyed "first".
+ */
+
+ bits &= ~IE_BITS_TYPE_MASK;
+ bits |= MACH_PORT_TYPE_DEAD_NAME;
+
+ if (entry->ie_request != 0) {
+ entry->ie_request = 0;
+ bits++; /* increment urefs */
+ }
+
+ entry->ie_bits = bits;
+ entry->ie_object = IO_NULL;
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ }
+ is_write_unlock(space);
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port); /* consumes ref, unlocks */
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_SEND_ONCE: {
+ ipc_port_t port, dnrequest;
+
+ if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ if ((delta > 0) || (delta < -1))
+ goto invalid_value;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_sorights > 0);
+
+ if (delta == 0) {
+ ip_unlock(port);
+ goto success;
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ ipc_notify_send_once(port);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_DEAD_NAME: {
+ mach_port_urefs_t urefs;
+
+ if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
+ ipc_port_t port;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (!ipc_right_check(space, port, name, entry)) {
+ /* port is locked and active */
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+ bits = entry->ie_bits;
+ } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_request == 0);
+
+ urefs = IE_BITS_UREFS(bits);
+ if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta))
+ goto invalid_value;
+ if (MACH_PORT_UREFS_OVERFLOW(urefs, delta))
+ goto urefs_overflow;
+
+ if ((urefs + delta) == 0)
+ ipc_entry_dealloc(space, name, entry);
+ else
+ entry->ie_bits = bits + delta;
+
+ is_write_unlock(space);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_SEND: {
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0)
+ goto invalid_right;
+
+ /* maximum urefs for send is MACH_PORT_UREFS_MAX-1 */
+
+ urefs = IE_BITS_UREFS(bits);
+ if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta))
+ goto invalid_value;
+ if (MACH_PORT_UREFS_OVERFLOW(urefs+1, delta))
+ goto urefs_overflow;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+
+ if ((urefs + delta) == 0) {
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+
+ entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK|
+ MACH_PORT_TYPE_SEND);
+ } else {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND);
+
+ dnrequest = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+ ipc_reverse_remove(space, (ipc_object_t) port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ ip_release(port);
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ }
+ } else
+ entry->ie_bits = bits + delta;
+
+ ip_unlock(port); /* even if dropped a ref, port is active */
+ is_write_unlock(space);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_delta: strange right");
+#else
+ panic("ipc_right_delta: strange right");
+#endif
+ }
+
+ return KERN_SUCCESS;
+
+ success:
+ is_write_unlock(space);
+ return KERN_SUCCESS;
+
+ invalid_right:
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+
+ invalid_value:
+ is_write_unlock(space);
+ return KERN_INVALID_VALUE;
+
+ urefs_overflow:
+ is_write_unlock(space);
+ return KERN_UREFS_OVERFLOW;
+}
+
+/*
+ * Routine: ipc_right_info
+ * Purpose:
+ * Retrieves information about the right.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return
+ * if the call is unsuccessful. The space must be active.
+ * Returns:
+ * KERN_SUCCESS Retrieved info; space still locked.
+ */
+
+kern_return_t
+ipc_right_info(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry,
+ mach_port_type_t *typep,
+ mach_port_urefs_t *urefsp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_request_index_t request;
+ mach_port_type_t type;
+
+ if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
+ ipc_port_t port = (ipc_port_t) entry->ie_object;
+
+ if (ipc_right_check(space, port, name, entry)) {
+ bits = entry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ } else
+ ip_unlock(port);
+ }
+
+ type = IE_BITS_TYPE(bits);
+ request = entry->ie_request;
+
+ if (request != 0)
+ type |= MACH_PORT_TYPE_DNREQUEST;
+ if (bits & IE_BITS_MAREQUEST)
+ type |= MACH_PORT_TYPE_MAREQUEST;
+
+ *typep = type;
+ *urefsp = IE_BITS_UREFS(bits);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_copyin_check
+ * Purpose:
+ * Check if a subsequent ipc_right_copyin would succeed.
+ * Conditions:
+ * The space is locked (read or write) and active.
+ */
+
+boolean_t
+ipc_right_copyin_check(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_MAKE_SEND:
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE:
+ case MACH_MSG_TYPE_MOVE_RECEIVE:
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ return FALSE;
+
+ break;
+
+ case MACH_MSG_TYPE_COPY_SEND:
+ case MACH_MSG_TYPE_MOVE_SEND:
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
+ ipc_port_t port;
+ boolean_t active;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ break;
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ return FALSE;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ active = ip_active(port);
+ ip_unlock(port);
+
+ if (!active) {
+ break;
+ }
+
+ if (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
+ if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0)
+ return FALSE;
+ } else {
+ if ((bits & MACH_PORT_TYPE_SEND) == 0)
+ return FALSE;
+ }
+
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyin_check: strange rights");
+#else
+ panic("ipc_right_copyin_check: strange rights");
+#endif
+ }
+
+ return TRUE;
+}
+
+/*
+ * Routine: ipc_right_copyin
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, unless it is IO_DEAD,
+ * and possibly a send-once right which should
+ * be used in a port-deleted notification.
+ *
+ * If deadok is not TRUE, the copyin operation
+ * will fail instead of producing IO_DEAD.
+ *
+ * The entry is never deallocated (except
+ * when KERN_INVALID_NAME), so the caller
+ * should deallocate the entry if its type
+ * is MACH_PORT_TYPE_NONE.
+ * Conditions:
+ * The space is write-locked and active.
+ * Returns:
+ * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_right_copyin(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name,
+ boolean_t deadok,
+ ipc_object_t *objectp,
+ ipc_port_t *sorightp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_MAKE_SEND: {
+ ipc_port_t port;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = IP_NULL;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
+ ipc_port_t port;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ port->ip_sorights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = IP_NULL;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_RECEIVE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(port->ip_srights > 0);
+
+ entry->ie_name = name;
+ ipc_reverse_insert(space, (ipc_object_t) port, entry);
+
+ ip_reference(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ entry->ie_object = IO_NULL;
+ }
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_RECEIVE;
+
+ ipc_port_clear_receiver(port);
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+
+ /*
+ * Clear the protected payload field to retain
+ * the behavior of mach_msg.
+ */
+ ipc_port_flag_protected_payload_clear(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ break;
+ }
+
+ case MACH_MSG_TYPE_COPY_SEND: {
+ ipc_port_t port;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ goto copy_dead;
+
+ /* allow for dead send-once rights */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ bits = entry->ie_bits;
+ goto copy_dead;
+ }
+ /* port is locked and active */
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(port->ip_sorights > 0);
+
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+ assert(port->ip_srights > 0);
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = IP_NULL;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ goto move_dead;
+
+ /* allow for dead send-once rights */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ bits = entry->ie_bits;
+ goto move_dead;
+ }
+ /* port is locked and active */
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(port->ip_sorights > 0);
+
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+ assert(port->ip_srights > 0);
+
+ if (IE_BITS_UREFS(bits) == 1) {
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+
+ ip_reference(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND);
+
+ dnrequest = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+ ipc_reverse_remove(space, (ipc_object_t) port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ entry->ie_object = IO_NULL;
+ }
+ entry->ie_bits = bits &~
+ (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND);
+ } else {
+ port->ip_srights++;
+ ip_reference(port);
+ entry->ie_bits = bits-1; /* decrement urefs */
+ }
+
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ goto move_dead;
+
+ /* allow for dead send rights */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ bits = entry->ie_bits;
+ goto move_dead;
+ }
+ /* port is locked and active */
+
+ if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
+ assert(bits & MACH_PORT_TYPE_SEND);
+ assert(port->ip_srights > 0);
+
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(port->ip_sorights > 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_SEND_ONCE;
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyin: strange rights");
+#else
+ panic("ipc_right_copyin: strange rights");
+#endif
+ }
+
+ return KERN_SUCCESS;
+
+ copy_dead:
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == 0);
+
+ if (!deadok)
+ goto invalid_right;
+
+ *objectp = IO_DEAD;
+ *sorightp = IP_NULL;
+ return KERN_SUCCESS;
+
+ move_dead:
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == 0);
+
+ if (!deadok)
+ goto invalid_right;
+
+ if (IE_BITS_UREFS(bits) == 1)
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_DEAD_NAME;
+ else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ *objectp = IO_DEAD;
+ *sorightp = IP_NULL;
+ return KERN_SUCCESS;
+
+ invalid_right:
+ return KERN_INVALID_RIGHT;
+}
+
+/*
+ * Routine: ipc_right_copyin_undo
+ * Purpose:
+ * Undoes the effects of an ipc_right_copyin
+ * of a send/send-once right that is dead.
+ * (Object is either IO_DEAD or a dead port.)
+ * Conditions:
+ * The space is write-locked and active.
+ */
+
+void
+ipc_right_copyin_undo(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name,
+ ipc_object_t object,
+ ipc_port_t soright)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_COPY_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE));
+
+ if (soright != IP_NULL) {
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE));
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+ assert(object != IO_DEAD);
+
+ entry->ie_bits = ((bits &~ IE_BITS_RIGHT_MASK) |
+ MACH_PORT_TYPE_DEAD_NAME | 2);
+ } else if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE) {
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE));
+ assert(entry->ie_object == IO_NULL);
+
+ entry->ie_bits = ((bits &~ IE_BITS_RIGHT_MASK) |
+ MACH_PORT_TYPE_DEAD_NAME | 1);
+ } else if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME) {
+ assert(entry->ie_object == IO_NULL);
+ assert(object == IO_DEAD);
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ if (msgt_name != MACH_MSG_TYPE_COPY_SEND) {
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
+
+ entry->ie_bits = bits+1; /* increment urefs */
+ }
+ } else {
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_COPY_SEND));
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+ assert(object != IO_DEAD);
+ assert(entry->ie_object == object);
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ if (msgt_name != MACH_MSG_TYPE_COPY_SEND) {
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX-1);
+
+ entry->ie_bits = bits+1; /* increment urefs */
+ }
+
+ /*
+ * May as well convert the entry to a dead name.
+ * (Or if it is a compat entry, destroy it.)
+ */
+
+ (void) ipc_right_check(space, (ipc_port_t) object,
+ name, entry);
+ /* object is dead so it is not locked */
+ }
+
+ /* release the reference acquired by copyin */
+
+ if (object != IO_DEAD)
+ ipc_object_release(object);
+}
+
+/*
+ * Routine: ipc_right_copyin_two
+ * Purpose:
+ * Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
+ * and deadok == FALSE, except that this moves two
+ * send rights at once.
+ * Conditions:
+ * The space is write-locked and active.
+ * The object is returned with two refs/send rights.
+ * Returns:
+ * KERN_SUCCESS Acquired an object.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_right_copyin_two(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry,
+ ipc_object_t *objectp,
+ ipc_port_t *sorightp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ assert(space->is_active);
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0)
+ goto invalid_right;
+
+ urefs = IE_BITS_UREFS(bits);
+ if (urefs < 2)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+
+ if (urefs == 2) {
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_reference(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ ipc_reverse_remove(space, (ipc_object_t) port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ port->ip_srights++;
+ ip_reference(port);
+ entry->ie_object = IO_NULL;
+ }
+ entry->ie_bits = bits &~
+ (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND);
+ } else {
+ port->ip_srights += 2;
+ ip_reference(port);
+ ip_reference(port);
+ entry->ie_bits = bits-2; /* decrement urefs */
+ }
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ return KERN_SUCCESS;
+
+ invalid_right:
+ return KERN_INVALID_RIGHT;
+}
+
+/*
+ * Routine: ipc_right_copyout
+ * Purpose:
+ * Copyout a capability to a space.
+ * If successful, consumes a ref for the object.
+ *
+ * Always succeeds when given a newly-allocated entry,
+ * because user-reference overflow isn't a possibility.
+ *
+ * If copying out the object would cause the user-reference
+ * count in the entry to overflow, and overflow is TRUE,
+ * then instead the user-reference count is left pegged
+ * to its maximum value and the copyout succeeds anyway.
+ * Conditions:
+ * The space is write-locked and active.
+ * The object is locked and active.
+ * The object is unlocked; the space isn't.
+ * Returns:
+ * KERN_SUCCESS Copied out capability.
+ * KERN_UREFS_OVERFLOW User-refs would overflow;
+ * guaranteed not to happen with a fresh entry
+ * or if overflow=TRUE was specified.
+ */
+
+kern_return_t
+ipc_right_copyout(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name,
+ boolean_t overflow,
+ ipc_object_t object)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_t port;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+ assert(io_active(object));
+ assert(entry->ie_object == object);
+
+ port = (ipc_port_t) object;
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND_ONCE:
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(port->ip_sorights > 0);
+
+ /* transfer send-once right and ref to entry */
+ ip_unlock(port);
+
+ entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ break;
+
+ case MACH_MSG_TYPE_PORT_SEND:
+ assert(port->ip_srights > 0);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
+
+ assert(port->ip_srights > 1);
+ assert(urefs > 0);
+ assert(urefs < MACH_PORT_UREFS_MAX);
+
+ if (urefs+1 == MACH_PORT_UREFS_MAX) {
+ if (overflow) {
+ /* leave urefs pegged to maximum */
+
+ port->ip_srights--;
+ ip_release(port);
+ ip_unlock(port);
+ return KERN_SUCCESS;
+ }
+
+ ip_unlock(port);
+ return KERN_UREFS_OVERFLOW;
+ }
+
+ port->ip_srights--;
+ ip_release(port);
+ ip_unlock(port);
+ } else if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer send right to entry */
+ ip_release(port);
+ ip_unlock(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer send right and ref to entry */
+ ip_unlock(port);
+
+ /* entry is locked holding ref, so can use port */
+
+ entry->ie_name = name;
+ ipc_reverse_insert(space, (ipc_object_t) port, entry);
+ }
+
+ entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1;
+ break;
+
+ case MACH_MSG_TYPE_PORT_RECEIVE: {
+ ipc_port_t dest;
+
+ assert(port->ip_mscount == 0);
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ dest = port->ip_destination;
+
+ port->ip_receiver_name = name;
+ port->ip_receiver = space;
+
+ /*
+ * Clear the protected payload field to retain
+ * the behavior of mach_msg.
+ */
+ ipc_port_flag_protected_payload_clear(port);
+
+ assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(port->ip_srights > 0);
+
+ ip_release(port);
+ ip_unlock(port);
+
+ /* entry is locked holding ref, so can use port */
+
+ ipc_reverse_remove(space, (ipc_object_t) port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer ref to entry */
+ ip_unlock(port);
+ }
+
+ entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
+
+ if (dest != IP_NULL)
+ ipc_port_release(dest);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyout: strange rights");
+#else
+ panic("ipc_right_copyout: strange rights");
+#endif
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_rename
+ * Purpose:
+ * Transfer an entry from one name to another.
+ * The old entry is deallocated.
+ * Conditions:
+ * The space is write-locked and active.
+ * The new entry is unused. Upon return,
+ * the space is unlocked.
+ * Returns:
+ * KERN_SUCCESS Moved entry to new name.
+ */
+
+kern_return_t
+ipc_right_rename(
+ ipc_space_t space,
+ mach_port_name_t oname,
+ ipc_entry_t oentry,
+ mach_port_name_t nname,
+ ipc_entry_t nentry)
+{
+ ipc_entry_bits_t bits = oentry->ie_bits;
+ ipc_port_request_index_t request = oentry->ie_request;
+ ipc_object_t object = oentry->ie_object;
+
+ assert(space->is_active);
+ assert(oname != nname);
+
+ /*
+ * If IE_BITS_COMPAT, we can't allow the entry to be renamed
+ * if the port is dead. (This would foil ipc_port_destroy.)
+ * Instead we should fail because oentry shouldn't exist.
+ * Note IE_BITS_COMPAT implies ie_request != 0.
+ */
+
+ if (request != 0) {
+ ipc_port_t port;
+
+ assert(bits & MACH_PORT_TYPE_PORT_RIGHTS);
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, oname, oentry)) {
+ bits = oentry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(oentry->ie_request == 0);
+ request = 0;
+ assert(oentry->ie_object == IO_NULL);
+ object = IO_NULL;
+ } else {
+ /* port is locked and active */
+
+ ipc_port_dnrename(port, request, oname, nname);
+ ip_unlock(port);
+ oentry->ie_request = 0;
+ }
+ }
+
+ if (bits & IE_BITS_MAREQUEST) {
+ assert(bits & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ ipc_marequest_rename(space, oname, nname);
+ }
+
+ /* initialize nentry before letting ipc_reverse_insert see it */
+
+ assert((nentry->ie_bits & IE_BITS_RIGHT_MASK) == 0);
+ nentry->ie_bits |= bits & IE_BITS_RIGHT_MASK;
+ nentry->ie_request = request;
+ nentry->ie_object = object;
+
+ switch (IE_BITS_TYPE(bits)) {
+ case MACH_PORT_TYPE_SEND: {
+ ipc_port_t port;
+
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ ipc_reverse_remove(space, (ipc_object_t) port);
+ nentry->ie_name = nname;
+ ipc_reverse_insert(space, (ipc_object_t) port, nentry);
+ break;
+ }
+
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE: {
+ ipc_port_t port;
+
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == oname);
+ assert(port->ip_receiver == space);
+
+ port->ip_receiver_name = nname;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_PORT_TYPE_PORT_SET: {
+ ipc_pset_t pset;
+
+ pset = (ipc_pset_t) object;
+ assert(pset != IPS_NULL);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ assert(pset->ips_local_name == oname);
+
+ pset->ips_local_name = nname;
+ ips_unlock(pset);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_ONCE:
+ case MACH_PORT_TYPE_DEAD_NAME:
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_rename: strange rights");
+#else
+ panic("ipc_right_rename: strange rights");
+#endif
+ }
+
+ assert(oentry->ie_request == 0);
+ oentry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, oname, oentry);
+ is_write_unlock(space);
+
+ return KERN_SUCCESS;
+}
diff --git a/ipc/ipc_right.h b/ipc/ipc_right.h
new file mode 100644
index 0000000..6802abb
--- /dev/null
+++ b/ipc/ipc_right.h
@@ -0,0 +1,112 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_right.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of functions to manipulate IPC capabilities.
+ */
+
+#ifndef _IPC_IPC_RIGHT_H_
+#define _IPC_IPC_RIGHT_H_
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_port.h>
+
+#define ipc_right_lookup_read ipc_right_lookup_write
+
+extern kern_return_t
+ipc_right_lookup_write(ipc_space_t, mach_port_name_t, ipc_entry_t *);
+
+extern boolean_t
+ipc_right_reverse(ipc_space_t, ipc_object_t,
+ mach_port_name_t *, ipc_entry_t *);
+
+extern kern_return_t
+ipc_right_dnrequest(ipc_space_t, mach_port_name_t, boolean_t,
+ ipc_port_t, ipc_port_t *);
+
+extern ipc_port_t
+ipc_right_dncancel(ipc_space_t, ipc_port_t, mach_port_name_t, ipc_entry_t);
+
+#define ipc_right_dncancel_macro(space, port, name, entry) \
+ (((entry)->ie_request == 0) ? IP_NULL : \
+ ipc_right_dncancel((space), (port), (name), (entry)))
+
+extern boolean_t
+ipc_right_inuse(ipc_space_t, mach_port_name_t, ipc_entry_t);
+
+extern boolean_t
+ipc_right_check(ipc_space_t, ipc_port_t, mach_port_name_t, ipc_entry_t);
+
+extern void
+ipc_right_clean(ipc_space_t, mach_port_name_t, ipc_entry_t);
+
+extern kern_return_t
+ipc_right_destroy(ipc_space_t, mach_port_name_t, ipc_entry_t);
+
+extern kern_return_t
+ipc_right_dealloc(ipc_space_t, mach_port_name_t, ipc_entry_t);
+
+extern kern_return_t
+ipc_right_delta(ipc_space_t, mach_port_name_t, ipc_entry_t,
+ mach_port_right_t, mach_port_delta_t);
+
+extern kern_return_t
+ipc_right_info(ipc_space_t, mach_port_name_t, ipc_entry_t,
+ mach_port_type_t *, mach_port_urefs_t *);
+
+extern boolean_t
+ipc_right_copyin_check(ipc_space_t, mach_port_name_t, ipc_entry_t,
+ mach_msg_type_name_t);
+
+extern kern_return_t
+ipc_right_copyin(ipc_space_t, mach_port_name_t, ipc_entry_t,
+ mach_msg_type_name_t, boolean_t,
+ ipc_object_t *, ipc_port_t *);
+
+extern void
+ipc_right_copyin_undo(ipc_space_t, mach_port_name_t, ipc_entry_t,
+ mach_msg_type_name_t, ipc_object_t, ipc_port_t);
+
+extern kern_return_t
+ipc_right_copyin_two(ipc_space_t, mach_port_name_t, ipc_entry_t,
+ ipc_object_t *, ipc_port_t *);
+
+extern kern_return_t
+ipc_right_copyout(ipc_space_t, mach_port_name_t, ipc_entry_t,
+ mach_msg_type_name_t, boolean_t, ipc_object_t);
+
+extern kern_return_t
+ipc_right_rename(ipc_space_t, mach_port_name_t, ipc_entry_t,
+ mach_port_name_t, ipc_entry_t);
+
+#endif /* _IPC_IPC_RIGHT_H_ */
diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c
new file mode 100644
index 0000000..77040d1
--- /dev/null
+++ b/ipc/ipc_space.c
@@ -0,0 +1,215 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_space.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC capability spaces.
+ */
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/sched_prim.h>
+#include <kern/slab.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_right.h>
+
+
+
+struct kmem_cache ipc_space_cache;
+ipc_space_t ipc_space_kernel;
+ipc_space_t ipc_space_reply;
+
+/*
+ * Routine: ipc_space_reference
+ * Routine: ipc_space_release
+ * Purpose:
+ * Function versions of the IPC space macros.
+ * The "is_" cover macros can be defined to use the
+ * macros or the functions, as desired.
+ */
+
+void
+ipc_space_reference(
+ ipc_space_t space)
+{
+ ipc_space_reference_macro(space);
+}
+
+void
+ipc_space_release(
+ ipc_space_t space)
+{
+ ipc_space_release_macro(space);
+}
+
+/* A place-holder object for the zeroth entry. */
+struct ipc_entry zero_entry;
+
+/*
+ * Routine: ipc_space_create
+ * Purpose:
+ * Creates a new IPC space.
+ *
+ * The new space has two references, one for the caller
+ * and one because it is active.
+ * Conditions:
+ * Nothing locked. Allocates memory.
+ * Returns:
+ * KERN_SUCCESS Created a space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_space_create(
+ ipc_space_t *spacep)
+{
+ ipc_space_t space;
+
+ space = is_alloc();
+ if (space == IS_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ is_ref_lock_init(space);
+ space->is_references = 2;
+
+ is_lock_init(space);
+ space->is_active = TRUE;
+
+ rdxtree_init(&space->is_map);
+ rdxtree_init(&space->is_reverse_map);
+ /* The zeroth entry is reserved. */
+ rdxtree_insert(&space->is_map, 0, &zero_entry);
+ space->is_size = 1;
+ space->is_free_list = NULL;
+ space->is_free_list_size = 0;
+
+ *spacep = space;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_space_create_special
+ * Purpose:
+ * Create a special space. A special space
+ * doesn't hold rights in the normal way.
+ * Instead it is place-holder for holding
+ * disembodied (naked) receive rights.
+ * See ipc_port_alloc_special/ipc_port_dealloc_special.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Created a space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_space_create_special(
+ ipc_space_t *spacep)
+{
+ ipc_space_t space;
+
+ space = is_alloc();
+ if (space == IS_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ is_ref_lock_init(space);
+ space->is_references = 1;
+
+ is_lock_init(space);
+ space->is_active = FALSE;
+
+ *spacep = space;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_space_destroy
+ * Purpose:
+ * Marks the space as dead and cleans up the entries.
+ * Does nothing if the space is already dead.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_space_destroy(
+ ipc_space_t space)
+{
+ boolean_t active;
+
+ assert(space != IS_NULL);
+
+ is_write_lock(space);
+ active = space->is_active;
+ space->is_active = FALSE;
+ is_write_unlock(space);
+
+ if (!active)
+ return;
+
+ ipc_entry_t entry;
+ struct rdxtree_iter iter;
+ rdxtree_for_each(&space->is_map, &iter, entry) {
+ if (entry->ie_name == MACH_PORT_NULL)
+ continue;
+
+ mach_port_type_t type = IE_BITS_TYPE(entry->ie_bits);
+
+ if (type != MACH_PORT_TYPE_NONE) {
+ mach_port_name_t name =
+ MACH_PORT_MAKEB(entry->ie_name, entry->ie_bits);
+
+ ipc_right_clean(space, name, entry);
+ }
+
+ ie_free(entry);
+ }
+ rdxtree_remove_all(&space->is_map);
+ rdxtree_remove_all(&space->is_reverse_map);
+
+ /*
+ * Because the space is now dead,
+ * we must release the "active" reference for it.
+ * Our caller still has his reference.
+ */
+
+ is_release(space);
+}
diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
new file mode 100644
index 0000000..96d5894
--- /dev/null
+++ b/ipc/ipc_space.h
@@ -0,0 +1,324 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_space.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for IPC spaces of capabilities.
+ */
+
+#ifndef _IPC_IPC_SPACE_H_
+#define _IPC_IPC_SPACE_H_
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/mach_types.h>
+#include <machine/vm_param.h>
+#include <kern/macros.h>
+#include <kern/lock.h>
+#include <kern/rdxtree.h>
+#include <kern/slab.h>
+#include <kern/printf.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_types.h>
+
+/*
+ * Every task has a space of IPC capabilities.
+ * IPC operations like send and receive use this space.
+ * IPC kernel calls manipulate the space of the target task.
+ */
+
+typedef unsigned int ipc_space_refs_t;
+
+struct ipc_space {
+ decl_simple_lock_data(,is_ref_lock_data)
+ ipc_space_refs_t is_references;
+
+ struct lock is_lock_data;
+ boolean_t is_active; /* is the space alive? */
+ struct rdxtree is_map; /* a map of entries */
+ size_t is_size; /* number of entries */
+ struct rdxtree is_reverse_map; /* maps objects to entries */
+ ipc_entry_t is_free_list; /* a linked list of free entries */
+ size_t is_free_list_size; /* number of free entries */
+#define IS_FREE_LIST_SIZE_LIMIT 64 /* maximum number of entries
+ in the free list */
+};
+
+
+#define IS_NULL ((ipc_space_t) 0)
+
+extern struct kmem_cache ipc_space_cache;
+
+#define is_alloc() ((ipc_space_t) kmem_cache_alloc(&ipc_space_cache))
+#define is_free(is) kmem_cache_free(&ipc_space_cache, (vm_offset_t) (is))
+
+extern struct ipc_space *ipc_space_kernel;
+extern struct ipc_space *ipc_space_reply;
+
+#define is_ref_lock_init(is) simple_lock_init(&(is)->is_ref_lock_data)
+
+#define ipc_space_reference_macro(is) \
+MACRO_BEGIN \
+ simple_lock(&(is)->is_ref_lock_data); \
+ assert((is)->is_references > 0); \
+ (is)->is_references++; \
+ simple_unlock(&(is)->is_ref_lock_data); \
+MACRO_END
+
+#define ipc_space_release_macro(is) \
+MACRO_BEGIN \
+ ipc_space_refs_t _refs; \
+ \
+ simple_lock(&(is)->is_ref_lock_data); \
+ assert((is)->is_references > 0); \
+ _refs = --(is)->is_references; \
+ simple_unlock(&(is)->is_ref_lock_data); \
+ \
+ if (_refs == 0) \
+ is_free(is); \
+MACRO_END
+
+#define is_lock_init(is) lock_init(&(is)->is_lock_data, TRUE)
+
+#define is_read_lock(is) lock_read(&(is)->is_lock_data)
+#define is_read_unlock(is) lock_done(&(is)->is_lock_data)
+
+#define is_write_lock(is) lock_write(&(is)->is_lock_data)
+#define is_write_lock_try(is) lock_try_write(&(is)->is_lock_data)
+#define is_write_unlock(is) lock_done(&(is)->is_lock_data)
+
+#define is_write_to_read_lock(is) lock_write_to_read(&(is)->is_lock_data)
+
+extern void ipc_space_reference(struct ipc_space *space);
+extern void ipc_space_release(struct ipc_space *space);
+
+#define is_reference(is) ipc_space_reference_macro(is)
+#define is_release(is) ipc_space_release_macro(is)
+
+kern_return_t ipc_space_create(ipc_space_t *);
+kern_return_t ipc_space_create_special(struct ipc_space **);
+void ipc_space_destroy(struct ipc_space *);
+
+/* IPC entry lookups. */
+
+/*
+ * Routine: ipc_entry_lookup
+ * Purpose:
+ * Searches for an entry, given its name.
+ * Conditions:
+ * The space must be read or write locked throughout.
+ * The space must be active.
+ */
+
+static inline ipc_entry_t
+ipc_entry_lookup(
+ ipc_space_t space,
+ mach_port_name_t name)
+{
+ ipc_entry_t entry;
+
+ assert(space->is_active);
+ entry = rdxtree_lookup(&space->is_map, (rdxtree_key_t) name);
+ if (entry != IE_NULL
+ && IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ entry = NULL;
+ assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
+ return entry;
+}
+
+extern volatile boolean_t mach_port_deallocate_debug;
+
+static inline void
+ipc_entry_lookup_failed(mach_msg_header_t *msg, mach_port_name_t name)
+{
+ if (name == MACH_PORT_NAME_NULL || name == MACH_PORT_NAME_DEAD)
+ return;
+ printf("task %.*s looked up a bogus port %lu for %d, most probably a bug.\n", (int) sizeof current_task()->name, current_task()->name, (unsigned long) name, msg->msgh_id);
+ if (mach_port_deallocate_debug)
+ SoftDebugger("ipc_entry_lookup");
+}
+
+/*
+ * Routine: ipc_entry_get
+ * Purpose:
+ * Tries to allocate an entry out of the space.
+ * Conditions:
+ * The space is write-locked and active throughout.
+ * An object may be locked. Will not allocate memory.
+ * Returns:
+ * KERN_SUCCESS A free entry was found.
+ * KERN_NO_SPACE No entry allocated.
+ */
+
+static inline kern_return_t
+ipc_entry_get(
+ ipc_space_t space,
+ mach_port_name_t *namep,
+ ipc_entry_t *entryp)
+{
+ mach_port_name_t new_name;
+ ipc_entry_t free_entry;
+
+ assert(space->is_active);
+
+ /* Get entry from the free list. */
+ free_entry = space->is_free_list;
+ if (free_entry == IE_NULL)
+ return KERN_NO_SPACE;
+
+ space->is_free_list = free_entry->ie_next_free;
+ space->is_free_list_size -= 1;
+
+ /*
+ * Initialize the new entry. We need only
+ * increment the generation number and clear ie_request.
+ */
+
+ {
+ mach_port_gen_t gen;
+
+ assert((free_entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
+ free_entry->ie_bits = gen;
+ free_entry->ie_request = 0;
+ new_name = MACH_PORT_MAKE(free_entry->ie_name, gen);
+ }
+
+ /*
+ * The new name can't be MACH_PORT_NULL because index
+ * is non-zero. It can't be MACH_PORT_DEAD because
+ * the table isn't allowed to grow big enough.
+ * (See comment in ipc/ipc_table.h.)
+ */
+
+ assert(MACH_PORT_NAME_VALID(new_name));
+ assert(free_entry->ie_object == IO_NULL);
+
+ space->is_size += 1;
+ *namep = new_name;
+ *entryp = free_entry;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_entry_dealloc
+ * Purpose:
+ * Deallocates an entry from a space.
+ * Conditions:
+ * The space must be write-locked throughout.
+ * The space must be active.
+ */
+
+static inline void
+ipc_entry_dealloc(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_entry_t entry)
+{
+ assert(space->is_active);
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_request == 0);
+
+ if (space->is_free_list_size < IS_FREE_LIST_SIZE_LIMIT) {
+ space->is_free_list_size += 1;
+ entry->ie_bits &= IE_BITS_GEN_MASK;
+ entry->ie_next_free = space->is_free_list;
+ space->is_free_list = entry;
+ } else {
+ rdxtree_remove(&space->is_map, (rdxtree_key_t) name);
+ ie_free(entry);
+ }
+ space->is_size -= 1;
+}
+
+/* Reverse lookups. */
+
+/* Cast a pointer to a suitable key. */
+#define KEY(X) \
+ ({ \
+ assert((((unsigned long) (X)) & 0x07) == 0); \
+ ((unsigned long long) \
+ (((unsigned long) (X) - VM_MIN_KERNEL_ADDRESS) >> 3)); \
+ })
+
+/* Insert (OBJ, ENTRY) pair into the reverse mapping. SPACE must
+ be write-locked. */
+static inline kern_return_t
+ipc_reverse_insert(ipc_space_t space,
+ ipc_object_t obj,
+ ipc_entry_t entry)
+{
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+ return (kern_return_t) rdxtree_insert(&space->is_reverse_map,
+ KEY(obj), entry);
+}
+
+/* Remove OBJ from the reverse mapping. SPACE must be
+ write-locked. */
+static inline ipc_entry_t
+ipc_reverse_remove(ipc_space_t space,
+ ipc_object_t obj)
+{
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+ return rdxtree_remove(&space->is_reverse_map, KEY(obj));
+}
+
+/* Remove all entries from the reverse mapping. SPACE must be
+ write-locked. */
+static inline void
+ipc_reverse_remove_all(ipc_space_t space)
+{
+ assert(space != IS_NULL);
+ rdxtree_remove_all(&space->is_reverse_map);
+ assert(space->is_reverse_map.height == 0);
+ assert(space->is_reverse_map.root == NULL);
+}
+
+/* Return ENTRY related to OBJ, or NULL if no such entry is found in
+ the reverse mapping. SPACE must be read-locked or
+ write-locked. */
+static inline ipc_entry_t
+ipc_reverse_lookup(ipc_space_t space,
+ ipc_object_t obj)
+{
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+ return rdxtree_lookup(&space->is_reverse_map, KEY(obj));
+}
+
+#undef KEY
+
+#endif /* _IPC_IPC_SPACE_H_ */
diff --git a/ipc/ipc_table.c b/ipc/ipc_table.c
new file mode 100644
index 0000000..0f8592a
--- /dev/null
+++ b/ipc/ipc_table.c
@@ -0,0 +1,135 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_table.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate tables of IPC capabilities.
+ */
+
+#include <mach/kern_return.h>
+#include <mach/vm_param.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_entry.h>
+#include <kern/kalloc.h>
+#include <kern/slab.h>
+#include <vm/vm_kern.h>
+
+ipc_table_size_t ipc_table_dnrequests;
+const unsigned int ipc_table_dnrequests_size = 64;
+
+void
+ipc_table_fill(
+ ipc_table_size_t its, /* array to fill */
+ unsigned int num, /* size of array */
+ unsigned int min, /* at least this many elements */
+ vm_size_t elemsize) /* size of elements */
+{
+ unsigned int index;
+ vm_size_t minsize = min * elemsize;
+ vm_size_t size;
+ vm_size_t incrsize;
+
+ /* first use powers of two, up to the page size */
+
+ for (index = 0, size = 1;
+ (index < num) && (size < PAGE_SIZE);
+ size <<= 1) {
+ if (size >= minsize) {
+ its[index].its_size = size / elemsize;
+ index++;
+ }
+ }
+
+ /* then increments of a page, then two pages, etc. */
+
+ for (incrsize = PAGE_SIZE; index < num;) {
+ unsigned int period;
+
+ for (period = 0;
+ (period < 15) && (index < num);
+ period++, size += incrsize) {
+ if (size >= minsize) {
+ its[index].its_size = size / elemsize;
+ index++;
+ }
+ }
+ if (incrsize < (PAGE_SIZE << 3))
+ incrsize <<= 1;
+ }
+}
+
+void
+ipc_table_init(void)
+{
+ ipc_table_dnrequests = (ipc_table_size_t)
+ kalloc(sizeof(struct ipc_table_size) *
+ ipc_table_dnrequests_size);
+ assert(ipc_table_dnrequests != ITS_NULL);
+
+ ipc_table_fill(ipc_table_dnrequests, ipc_table_dnrequests_size - 1,
+ 2, sizeof(struct ipc_port_request));
+
+ /* the last element should have zero size */
+
+ ipc_table_dnrequests[ipc_table_dnrequests_size - 1].its_size = 0;
+}
+
+/*
+ * Routine: ipc_table_alloc
+ * Purpose:
+ * Allocate a table.
+ * Conditions:
+ * May block.
+ */
+
+vm_offset_t
+ipc_table_alloc(
+ vm_size_t size)
+{
+ return kalloc(size);
+}
+
+/*
+ * Routine: ipc_table_free
+ * Purpose:
+ * Free a table allocated with ipc_table_alloc or
+ * ipc_table_realloc.
+ * Conditions:
+ * May block.
+ */
+
+void
+ipc_table_free(
+ vm_size_t size,
+ vm_offset_t table)
+{
+ kfree(table, size);
+}
diff --git a/ipc/ipc_table.h b/ipc/ipc_table.h
new file mode 100644
index 0000000..7968e6b
--- /dev/null
+++ b/ipc/ipc_table.h
@@ -0,0 +1,101 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_table.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for tables, used for dead-name requests
+ * (ipc_port_request_t).
+ */
+
+#ifndef _IPC_IPC_TABLE_H_
+#define _IPC_IPC_TABLE_H_
+
+#include <mach/boolean.h>
+#include <mach/vm_param.h>
+
+/*
+ * Every its_size value must must be a power of two.
+ *
+ * The ipr_size field of the first element in a table of
+ * dead-name requests (ipc_port_request_t) points to the
+ * ipc_table_size structure. The structures must be elements
+ * of ipc_table_dnrequests. ipc_table_dnrequests must end
+ * with an element with zero its_size, and except for this last
+ * element, the its_size values must be strictly increasing.
+ *
+ * The ipr_size field points to the currently used ipc_table_size.
+ */
+
+typedef unsigned int ipc_table_index_t; /* index into tables */
+typedef unsigned int ipc_table_elems_t; /* size of tables */
+
+typedef struct ipc_table_size {
+ ipc_table_elems_t its_size; /* number of elements in table */
+} *ipc_table_size_t;
+
+#define ITS_NULL ((ipc_table_size_t) 0)
+
+extern ipc_table_size_t ipc_table_dnrequests;
+
+extern void
+ipc_table_init(void);
+
+/*
+ * Note that ipc_table_alloc, and ipc_table_free all potentially
+ * use the VM system. Hence simple locks can't be held across
+ * them.
+ */
+
+/* Allocate a table */
+extern vm_offset_t ipc_table_alloc(
+ vm_size_t size);
+
+/* Free a table */
+extern void ipc_table_free(
+ vm_size_t size,
+ vm_offset_t table);
+
+void ipc_table_fill(
+ ipc_table_size_t its,
+ unsigned int num,
+ unsigned int min,
+ vm_size_t elemsize);
+
+#define it_dnrequests_alloc(its) \
+ ((ipc_port_request_t) \
+ ipc_table_alloc((its)->its_size * \
+ sizeof(struct ipc_port_request)))
+
+#define it_dnrequests_free(its, table) \
+ ipc_table_free((its)->its_size * \
+ sizeof(struct ipc_port_request), \
+ (vm_offset_t)(table))
+
+#endif /* _IPC_IPC_TABLE_H_ */
diff --git a/ipc/ipc_target.c b/ipc/ipc_target.c
new file mode 100644
index 0000000..94c5d40
--- /dev/null
+++ b/ipc/ipc_target.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ */
+/*
+ * File: ipc_target.c
+ *
+ * Implementation for common part of IPC ports and port sets
+ * representing a target of messages and migrating RPCs.
+ */
+
+#include <kern/sched_prim.h>
+#include "ipc_target.h"
+
+void
+ipc_target_init(struct ipc_target *ipt, mach_port_name_t name)
+{
+ ipt->ipt_name = name;
+ ipc_mqueue_init(&ipt->ipt_messages);
+
+#ifdef MIGRATING_THREADS
+ ipt->ipt_type = IPT_TYPE_MESSAGE_RPC;
+ ipt->ipt_acts = 0;
+
+ ipc_target_machine_init(ipt);
+#endif
+}
+
+void
+ipc_target_terminate(struct ipc_target *ipt)
+{
+}
+
+#ifdef MIGRATING_THREADS
+struct Act *
+ipc_target_block(struct ipc_target *ipt)
+{
+ struct Act *act;
+
+ ipt_lock(ipt);
+ while ((act = ipt->ipt_acts) == 0) {
+ /* XXX mp unsafe */
+ ipt->ipt_waiting = 1;
+ ipt_unlock(ipt);
+ thread_wait((int)&ipt->ipt_acts, FALSE);
+ ipt_lock(ipt);
+ }
+ ipt->ipt_acts = act->ipt_next;
+ ipt_unlock(ipt);
+
+ return act;
+}
+
+void
+ipc_target_wakeup(struct ipc_target *ipt)
+{
+ ipt_lock(ipt);
+ if (ipt->ipt_waiting) {
+ thread_wakeup((int)&ipt->ipt_acts);
+ ipt->ipt_waiting = 0;
+ }
+ ipt_unlock(ipt);
+}
+#endif /* MIGRATING_THREADS */
+
diff --git a/ipc/ipc_target.h b/ipc/ipc_target.h
new file mode 100644
index 0000000..c2cc924
--- /dev/null
+++ b/ipc/ipc_target.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ */
+/*
+ * File: ipc_target.h
+ *
+ * Common part of IPC ports and port sets
+ * representing a target of messages and migrating RPCs.
+ */
+
+#ifndef _IPC_IPC_RECEIVER_H_
+#define _IPC_IPC_RECEIVER_H_
+
+#include "ipc_mqueue.h"
+#include "ipc_object.h"
+
+typedef struct ipc_target {
+
+ struct ipc_object ipt_object;
+
+ mach_port_name_t ipt_name;
+ struct ipc_mqueue ipt_messages;
+
+#ifdef MIGRATING_THREADS
+ /*** Migrating RPC stuff ***/
+
+ int ipt_type;
+
+ /* User entry info for migrating RPC */
+ rpc_info_t ipt_rpcinfo;
+
+ /* List of available activations, all active but not in use. */
+ struct Act *ipt_acts;
+
+ /* TRUE if someone is waiting for an activation from this pool. */
+ int ipt_waiting;
+#endif /* MIGRATING_THREADS */
+
+} *ipc_target_t;
+
+#define IPT_TYPE_MESSAGE_RPC 1
+#define IPT_TYPE_MIGRATE_RPC 2
+
+void ipc_target_init(struct ipc_target *ipt, mach_port_name_t name);
+void ipc_target_terminate(struct ipc_target *ipt);
+
+#define ipt_lock(ipt) io_lock(&(ipt)->ipt_object)
+#define ipt_unlock(ipt) io_unlock(&(ipt)->ipt_object)
+#define ipt_reference(ipt) io_reference(&(ipt)->ipt_object)
+#define ipt_release(ipt) io_release(&(ipt)->ipt_object)
+#define ipt_check_unlock(ipt) io_check_unlock(&(ipt)->ipt_object)
+
+#endif /* _IPC_IPC_RECEIVER_H_ */
diff --git a/ipc/ipc_thread.c b/ipc/ipc_thread.c
new file mode 100644
index 0000000..1e738a5
--- /dev/null
+++ b/ipc/ipc_thread.c
@@ -0,0 +1,107 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_thread.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * IPC operations on threads.
+ */
+
+#include <kern/assert.h>
+#include <ipc/ipc_thread.h>
+
+/*
+ * Routine: ipc_thread_enqueue
+ * Purpose:
+ * Enqueue a thread.
+ */
+
+void
+ipc_thread_enqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread)
+{
+ ipc_thread_enqueue_macro(queue, thread);
+}
+
+/*
+ * Routine: ipc_thread_dequeue
+ * Purpose:
+ * Dequeue and return a thread.
+ */
+
+ipc_thread_t
+ipc_thread_dequeue(
+ ipc_thread_queue_t queue)
+{
+ ipc_thread_t first;
+
+ first = ipc_thread_queue_first(queue);
+
+ if (first != ITH_NULL)
+ ipc_thread_rmqueue_first_macro(queue, first);
+
+ return first;
+}
+
+/*
+ * Routine: ipc_thread_rmqueue
+ * Purpose:
+ * Pull a thread out of a queue.
+ */
+
+void
+ipc_thread_rmqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread)
+{
+ ipc_thread_t next, prev;
+
+ assert(queue->ithq_base != ITH_NULL);
+
+ next = thread->ith_next;
+ prev = thread->ith_prev;
+
+ if (next == thread) {
+ assert(prev == thread);
+ assert(queue->ithq_base == thread);
+
+ queue->ithq_base = ITH_NULL;
+ } else {
+ if (queue->ithq_base == thread)
+ queue->ithq_base = next;
+
+ next->ith_prev = prev;
+ prev->ith_next = next;
+ ipc_thread_links_init(thread);
+ }
+}
diff --git a/ipc/ipc_thread.h b/ipc/ipc_thread.h
new file mode 100644
index 0000000..008ab4a
--- /dev/null
+++ b/ipc/ipc_thread.h
@@ -0,0 +1,129 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_thread.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for the IPC component of threads.
+ */
+
+#ifndef _IPC_IPC_THREAD_H_
+#define _IPC_IPC_THREAD_H_
+
+#include <kern/thread.h>
+
+typedef thread_t ipc_thread_t;
+
+#define ITH_NULL THREAD_NULL
+
+#define ith_lock_init(thread) simple_lock_init(&(thread)->ith_lock_data)
+#define ith_lock(thread) simple_lock(&(thread)->ith_lock_data)
+#define ith_unlock(thread) simple_unlock(&(thread)->ith_lock_data)
+
+/*
+ * Note that this isn't a queue, but rather a stack. This causes
+ * threads that were recently running to be reused earlier, which
+ * helps improve locality of reference.
+ */
+typedef struct ipc_thread_queue {
+ ipc_thread_t ithq_base;
+} *ipc_thread_queue_t;
+
+#define ITHQ_NULL ((ipc_thread_queue_t) 0)
+
+
+#define ipc_thread_links_init(thread) \
+MACRO_BEGIN \
+ (thread)->ith_next = (thread); \
+ (thread)->ith_prev = (thread); \
+MACRO_END
+
+#define ipc_thread_queue_init(queue) \
+MACRO_BEGIN \
+ (queue)->ithq_base = ITH_NULL; \
+MACRO_END
+
+#define ipc_thread_queue_empty(queue) ((queue)->ithq_base == ITH_NULL)
+
+#define ipc_thread_queue_first(queue) ((queue)->ithq_base)
+
+#define ipc_thread_rmqueue_first_macro(queue, thread) \
+MACRO_BEGIN \
+ ipc_thread_t _next; \
+ \
+ assert((queue)->ithq_base == (thread)); \
+ \
+ _next = (thread)->ith_next; \
+ if (_next == (thread)) { \
+ assert((thread)->ith_prev == (thread)); \
+ (queue)->ithq_base = ITH_NULL; \
+ } else { \
+ ipc_thread_t _prev = (thread)->ith_prev; \
+ \
+ (queue)->ithq_base = _next; \
+ _next->ith_prev = _prev; \
+ _prev->ith_next = _next; \
+ ipc_thread_links_init(thread); \
+ } \
+MACRO_END
+
+#define ipc_thread_enqueue_macro(queue, thread) \
+MACRO_BEGIN \
+ ipc_thread_t _first = (queue)->ithq_base; \
+ \
+ if (_first == ITH_NULL) { \
+ (queue)->ithq_base = (thread); \
+ assert((thread)->ith_next == (thread)); \
+ assert((thread)->ith_prev == (thread)); \
+ } else { \
+ ipc_thread_t _last = _first->ith_prev; \
+ \
+ (thread)->ith_next = _first; \
+ (thread)->ith_prev = _last; \
+ _first->ith_prev = (thread); \
+ _last->ith_next = (thread); \
+ (queue)->ithq_base = (thread); \
+ } \
+MACRO_END
+
+/* Enqueue a thread on a message queue */
+extern void ipc_thread_enqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread);
+
+/* Dequeue a thread from a message queue */
+extern ipc_thread_t ipc_thread_dequeue(
+ ipc_thread_queue_t queue);
+
+/* Remove a thread from a message queue */
+extern void ipc_thread_rmqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread);
+
+#endif /* _IPC_IPC_THREAD_H_ */
diff --git a/ipc/ipc_types.h b/ipc/ipc_types.h
new file mode 100644
index 0000000..c8f0d0b
--- /dev/null
+++ b/ipc/ipc_types.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+
+#ifndef _IPC_TYPES_H_
+#define _IPC_TYPES_H_
+
+typedef struct ipc_space *ipc_space_t;
+typedef struct ipc_port *ipc_port_t;
+
+#endif /* _IPC_TYPES_H_ */
diff --git a/ipc/mach_debug.c b/ipc/mach_debug.c
new file mode 100644
index 0000000..7dca4b6
--- /dev/null
+++ b/ipc/mach_debug.c
@@ -0,0 +1,288 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_debug.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Exported kernel calls. See mach_debug/mach_debug.defs.
+ */
+
+#include <string.h>
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_param.h>
+#include <mach_debug/hash_info.h>
+#include <kern/host.h>
+#include <kern/mach_debug.server.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_right.h>
+
+
+
+/*
+ * Routine: mach_port_get_srights [kernel call]
+ * Purpose:
+ * Retrieve the number of extant send rights
+ * that a receive right has.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved number of send rights.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_get_srights(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_rights_t *srightsp)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+ mach_port_rights_t srights;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ srights = port->ip_srights;
+ ip_unlock(port);
+
+ *srightsp = srights;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: host_ipc_marequest_info
+ * Purpose:
+ * Return information about the marequest hash table.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Returned information.
+ * KERN_INVALID_HOST The host is null.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+host_ipc_marequest_info(
+ host_t host,
+ unsigned int *maxp,
+ hash_info_bucket_array_t *infop,
+ unsigned int *countp)
+{
+ vm_offset_t addr;
+ vm_size_t size = 0; /* '=0' to shut up lint */
+ hash_info_bucket_t *info;
+ unsigned int potential, actual;
+ kern_return_t kr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ /* start with in-line data */
+
+ info = *infop;
+ potential = *countp;
+
+ for (;;) {
+ actual = ipc_marequest_info(maxp, info, potential);
+ if (actual <= potential)
+ break;
+
+ /* allocate more memory */
+
+ if (info != *infop)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = round_page(actual * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ info = (hash_info_bucket_t *) addr;
+ potential = size/sizeof *info;
+ }
+
+ if (info == *infop) {
+ /* data fit in-line; nothing to deallocate */
+
+ *countp = actual;
+ } else if (actual == 0) {
+ kmem_free(ipc_kernel_map, addr, size);
+
+ *countp = 0;
+ } else {
+ vm_map_copy_t copy;
+ vm_size_t used;
+
+ used = round_page(actual * sizeof *info);
+
+ if (used != size)
+ kmem_free(ipc_kernel_map, addr + used, size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, used,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *infop = (hash_info_bucket_t *) copy;
+ *countp = actual;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_dnrequest_info
+ * Purpose:
+ * Returns information about the dead-name requests
+ * registered with the named receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved information.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_dnrequest_info(
+ ipc_space_t space,
+ mach_port_name_t name,
+ unsigned int *totalp,
+ unsigned int *usedp)
+{
+ unsigned int total, used;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ if (port->ip_dnrequests == IPR_NULL) {
+ total = 0;
+ used = 0;
+ } else {
+ ipc_port_request_t dnrequests = port->ip_dnrequests;
+ ipc_port_request_index_t index;
+
+ total = dnrequests->ipr_size->its_size;
+
+ for (index = 1, used = 0;
+ index < total; index++) {
+ ipc_port_request_t ipr = &dnrequests[index];
+
+ if (ipr->ipr_name != MACH_PORT_NULL)
+ used++;
+ }
+ }
+ ip_unlock(port);
+
+ *totalp = total;
+ *usedp = used;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_kernel_object [kernel call]
+ * Purpose:
+ * Retrieve the type and address of the kernel object
+ * represented by a send or receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved kernel object info.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote
+ * send or receive rights.
+ */
+
+kern_return_t
+mach_port_kernel_object(
+ ipc_space_t space,
+ mach_port_name_t name,
+ unsigned int *typep,
+ vm_offset_t *addrp)
+{
+ ipc_entry_t entry;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_read(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is read-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ is_read_unlock(space);
+
+ if (!ip_active(port)) {
+ ip_unlock(port);
+ return KERN_INVALID_RIGHT;
+ }
+
+ *typep = ip_kotype(port);
+ *addrp = port->ip_kobject;
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c
new file mode 100644
index 0000000..6194ef7
--- /dev/null
+++ b/ipc/mach_msg.c
@@ -0,0 +1,1709 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_msg.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Exported message traps. See mach/message.h.
+ */
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <machine/copy_user.h>
+#include <kern/assert.h>
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/lock.h>
+#include <kern/printf.h>
+#include <kern/sched_prim.h>
+#include <kern/ipc_sched.h>
+#include <kern/exception.h>
+#include <vm/vm_map.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/mach_msg.h>
+#include <machine/locore.h>
+#include <machine/pcb.h>
+
+/*
+ * Routine: mach_msg_send
+ * Purpose:
+ * Send a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Sent the message.
+ * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
+ * MACH_SEND_NO_BUFFER Couldn't allocate buffer.
+ * MACH_SEND_INVALID_DATA Couldn't copy message data.
+ * MACH_SEND_INVALID_HEADER
+ * Illegal value in the message header bits.
+ * MACH_SEND_INVALID_DEST The space is dead.
+ * MACH_SEND_INVALID_NOTIFY Bad notify port.
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * MACH_SEND_TIMED_OUT Timeout expired without delivery.
+ * MACH_SEND_INTERRUPTED Delivery interrupted.
+ * MACH_SEND_NO_NOTIFY Can't allocate a msg-accepted request.
+ * MACH_SEND_WILL_NOTIFY Msg-accepted notif. requested.
+ * MACH_SEND_NOTIFY_IN_PROGRESS
+ * This space has already forced a message to this port.
+ */
+
+mach_msg_return_t
+mach_msg_send(
+ mach_msg_user_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_timeout_t time_out,
+ mach_port_name_t notify)
+{
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_kmsg_t kmsg;
+ mach_msg_return_t mr;
+
+ mr = ipc_kmsg_get(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ if (option & MACH_SEND_CANCEL) {
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_SEND_INVALID_NOTIFY;
+ else
+ mr = ipc_kmsg_copyin(kmsg, space, map, notify);
+ } else
+ mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return mr;
+ }
+
+ if (option & MACH_SEND_NOTIFY) {
+ mr = ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT,
+ ((option & MACH_SEND_TIMEOUT) ?
+ time_out : MACH_MSG_TIMEOUT_NONE));
+ if (mr == MACH_SEND_TIMED_OUT) {
+ ipc_port_t dest = (ipc_port_t)
+ kmsg->ikm_header.msgh_remote_port;
+
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_SEND_INVALID_NOTIFY;
+ else
+ mr = ipc_marequest_create(space, dest,
+ notify, &kmsg->ikm_marequest);
+ if (mr == MACH_MSG_SUCCESS) {
+ ipc_mqueue_send_always(kmsg);
+ return MACH_SEND_WILL_NOTIFY;
+ }
+ }
+ } else
+ mr = ipc_mqueue_send(kmsg, option & MACH_SEND_TIMEOUT,
+ time_out);
+
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: mach_msg_receive
+ * Purpose:
+ * Receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Received a message.
+ * MACH_RCV_INVALID_NAME The name doesn't denote a right,
+ * or the denoted right is not receive or port set.
+ * MACH_RCV_IN_SET Receive right is a member of a set.
+ * MACH_RCV_TOO_LARGE Message wouldn't fit into buffer.
+ * MACH_RCV_TIMED_OUT Timeout expired without a message.
+ * MACH_RCV_INTERRUPTED Reception interrupted.
+ * MACH_RCV_PORT_DIED Port/set died while receiving.
+ * MACH_RCV_PORT_CHANGED Port moved into set while receiving.
+ * MACH_RCV_INVALID_DATA Couldn't copy to user buffer.
+ * MACH_RCV_INVALID_NOTIFY Bad notify port.
+ * MACH_RCV_HEADER_ERROR
+ */
+
+mach_msg_return_t
+mach_msg_receive(
+ mach_msg_user_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t rcv_size,
+ mach_port_name_t rcv_name,
+ mach_msg_timeout_t time_out,
+ mach_port_name_t notify)
+{
+ ipc_thread_t self = current_thread();
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ /* hold ref for object; mqueue is locked */
+
+ /*
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for mach_msg_receive_continue to pick up.
+ */
+
+ self->ith_msg = msg;
+ self->ith_option = option;
+ self->ith_rcv_size = rcv_size;
+ self->ith_timeout = time_out;
+ self->ith_notify = notify;
+ self->ith_object = object;
+ self->ith_mqueue = mqueue;
+
+ if (option & MACH_RCV_LARGE) {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ rcv_size, time_out,
+ FALSE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ mach_msg_size_t real_size =
+ (mach_msg_size_t) (vm_offset_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout(&real_size,
+ &msg->msgh_size,
+ sizeof(mach_msg_size_t));
+ }
+
+ return mr;
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ assert(kmsg->ikm_header.msgh_size <= rcv_size);
+ } else {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ MACH_MSG_SIZE_MAX, time_out,
+ FALSE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (msg_usize(&kmsg->ikm_header) > rcv_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ return MACH_RCV_TOO_LARGE;
+ }
+ }
+
+ if (option & MACH_RCV_NOTIFY) {
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_RCV_INVALID_NOTIFY;
+ else
+ mr = ipc_kmsg_copyout(kmsg, space, map, notify);
+ } else
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ return mr;
+ }
+
+ return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+}
+
+/*
+ * Routine: mach_msg_receive_continue
+ * Purpose:
+ * Continue after blocking for a message.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack,
+ * with the receive state saved in the thread. From here
+ * control goes back to user space.
+ */
+
+void
+mach_msg_receive_continue(void)
+{
+ ipc_thread_t self = current_thread();
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ mach_msg_user_header_t *msg = self->ith_msg;
+ mach_msg_option_t option = self->ith_option;
+ mach_msg_size_t rcv_size = self->ith_rcv_size;
+ mach_msg_timeout_t time_out = self->ith_timeout;
+ mach_port_name_t notify = self->ith_notify;
+ ipc_object_t object = self->ith_object;
+ ipc_mqueue_t mqueue = self->ith_mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ if (option & MACH_RCV_LARGE) {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ rcv_size, time_out,
+ TRUE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ mach_msg_size_t real_size =
+ (mach_msg_size_t) (vm_offset_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout(&real_size,
+ &msg->msgh_size,
+ sizeof(mach_msg_size_t));
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ assert(msg_usize(&kmsg->ikm_header) <= rcv_size);
+ } else {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ MACH_MSG_SIZE_MAX, time_out,
+ TRUE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (msg_usize(&kmsg->ikm_header) > rcv_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+ }
+
+ if (option & MACH_RCV_NOTIFY) {
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_RCV_INVALID_NOTIFY;
+ else
+ mr = ipc_kmsg_copyout(kmsg, space, map, notify);
+ } else
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: mach_msg_trap [mach trap]
+ * Purpose:
+ * Possibly send a message; possibly receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * All of mach_msg_send and mach_msg_receive error codes.
+ */
+
+mach_msg_return_t
+mach_msg_trap(
+ mach_msg_user_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size,
+ mach_port_name_t rcv_name,
+ mach_msg_timeout_t time_out,
+ mach_port_name_t notify)
+{
+ mach_msg_return_t mr;
+
+ /* first check for common cases */
+
+ if (option == (MACH_SEND_MSG|MACH_RCV_MSG)) {
+ ipc_thread_t self = current_thread();
+ ipc_space_t space = self->task->itk_space;
+ ipc_kmsg_t kmsg;
+ ipc_port_t dest_port;
+ ipc_object_t rcv_object;
+ ipc_mqueue_t rcv_mqueue;
+ mach_msg_size_t reply_size;
+
+ /*
+ * This case is divided into ten sections, each
+ * with a label. There are five optimized
+ * sections and six unoptimized sections, which
+ * do the same thing but handle all possible
+ * cases and are slower.
+ *
+ * The five sections for an RPC are
+ * 1) Get request message into a buffer.
+ * (fast_get or slow_get)
+ * 2) Copyin request message and rcv_name.
+ * (fast_copyin or slow_copyin)
+ * 3) Enqueue request and dequeue reply.
+ * (fast_send_receive or
+ * slow_send and slow_receive)
+ * 4) Copyout reply message.
+ * (fast_copyout or slow_copyout)
+ * 5) Put reply message to user's buffer.
+ * (fast_put or slow_put)
+ *
+ * Keep the locking hierarchy firmly in mind.
+ * (First spaces, then ports, then port sets,
+ * then message queues.) Only a non-blocking
+ * attempt can be made to acquire locks out of
+ * order, or acquire two locks on the same level.
+ * Acquiring two locks on the same level will
+ * fail if the objects are really the same,
+ * unless simple locking is disabled. This is OK,
+ * because then the extra unlock does nothing.
+ *
+ * There are two major reasons these RPCs can't use
+ * ipc_thread_switch, and use slow_send/slow_receive:
+ * 1) Kernel RPCs.
+ * 2) Servers fall behind clients, so
+ * client doesn't find a blocked server thread and
+ * server finds waiting messages and can't block.
+ */
+
+ /*
+ fast_get:
+ */
+ /*
+ * optimized ipc_kmsg_get
+ *
+ * No locks, references, or messages held.
+ * We must clear ikm_cache before copyinmsg.
+ */
+
+ if (((send_size * IKM_EXPAND_FACTOR) > IKM_SAVED_MSG_SIZE) ||
+ (send_size < sizeof(mach_msg_user_header_t)) ||
+ (send_size & 3))
+ goto slow_get;
+
+ kmsg = ikm_cache_alloc_try();
+ if (kmsg == IKM_NULL)
+ goto slow_get;
+
+ if (copyinmsg(msg, &kmsg->ikm_header,
+ send_size, kmsg->ikm_size)) {
+ ikm_free(kmsg);
+ goto slow_get;
+ }
+
+ fast_copyin:
+ /*
+ * optimized ipc_kmsg_copyin/ipc_mqueue_copyin
+ *
+ * We have the request message data in kmsg.
+ * Must still do copyin, send, receive, etc.
+ *
+ * If the message isn't simple, we can't combine
+ * ipc_kmsg_copyin_header and ipc_mqueue_copyin,
+ * because copyin of the message body might
+ * affect rcv_name.
+ */
+
+ switch (kmsg->ikm_header.msgh_bits) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE): {
+ ipc_port_t reply_port;
+ {
+ mach_port_name_t reply_name =
+ kmsg->ikm_header.msgh_local_port;
+
+ if (reply_name != rcv_name)
+ goto slow_copyin;
+
+ is_read_lock(space);
+ assert(space->is_active);
+
+ ipc_entry_t entry;
+ entry = ipc_entry_lookup (space, reply_name);
+ if (entry == IE_NULL)
+ {
+ ipc_entry_lookup_failed (msg, reply_name);
+ goto abort_request_copyin;
+ }
+ reply_port = (ipc_port_t) entry->ie_object;
+ assert(reply_port != IP_NULL);
+ }
+
+ {
+ mach_port_name_t dest_name =
+ kmsg->ikm_header.msgh_remote_port;
+
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ entry = ipc_entry_lookup (space, dest_name);
+ if (entry == IE_NULL)
+ {
+ ipc_entry_lookup_failed (msg, dest_name);
+ goto abort_request_copyin;
+ }
+ bits = entry->ie_bits;
+
+ /* check type bits */
+ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND)
+ goto abort_request_copyin;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+ }
+
+ /*
+ * To do an atomic copyin, need simultaneous
+ * locks on both ports and the space. If
+ * dest_port == reply_port, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) ||
+ !ip_lock_try(reply_port)) {
+ ip_unlock(dest_port);
+ goto abort_request_copyin;
+ }
+ is_read_unlock(space);
+
+ assert(dest_port->ip_srights > 0);
+ dest_port->ip_srights++;
+ ip_reference(dest_port);
+
+ assert(ip_active(reply_port));
+ assert(reply_port->ip_receiver_name ==
+ kmsg->ikm_header.msgh_local_port);
+ assert(reply_port->ip_receiver == space);
+
+ reply_port->ip_sorights++;
+ ip_reference(reply_port);
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_remote_port =
+ (mach_port_t) dest_port;
+ kmsg->ikm_header.msgh_local_port =
+ (mach_port_t) reply_port;
+
+ /* make sure we can queue to the destination */
+
+ if (dest_port->ip_receiver == ipc_space_kernel) {
+ /*
+ * The kernel server has a reference to
+ * the reply port, which it hands back
+ * to us in the reply message. We do
+ * not need to keep another reference to
+ * it.
+ */
+ ip_unlock(reply_port);
+
+ assert(ip_active(dest_port));
+ ip_unlock(dest_port);
+ goto kernel_send;
+ }
+
+ if (dest_port->ip_msgcount >= dest_port->ip_qlimit)
+ goto abort_request_send_receive;
+
+ /* optimized ipc_mqueue_copyin */
+
+ if (reply_port->ip_pset != IPS_NULL)
+ goto abort_request_send_receive;
+
+ rcv_object = (ipc_object_t) reply_port;
+ io_reference(rcv_object);
+ rcv_mqueue = &reply_port->ip_messages;
+ imq_lock(rcv_mqueue);
+ io_unlock(rcv_object);
+ goto fast_send_receive;
+
+ abort_request_copyin:
+ is_read_unlock(space);
+ goto slow_copyin;
+
+ abort_request_send_receive:
+ ip_unlock(dest_port);
+ ip_unlock(reply_port);
+ goto slow_send;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
+ /* sending a reply message */
+
+ {
+ mach_port_name_t reply_name =
+ kmsg->ikm_header.msgh_local_port;
+
+ if (reply_name != MACH_PORT_NULL)
+ goto slow_copyin;
+ }
+
+ is_write_lock(space);
+ assert(space->is_active);
+
+ {
+ ipc_entry_t entry;
+ mach_port_name_t dest_name =
+ kmsg->ikm_header.msgh_remote_port;
+
+ entry = ipc_entry_lookup (space, dest_name);
+ if (entry == IE_NULL)
+ {
+ ipc_entry_lookup_failed (msg, dest_name);
+ goto abort_reply_dest_copyin;
+ }
+
+ /* check type bits */
+ if (IE_BITS_TYPE (entry->ie_bits) !=
+ MACH_PORT_TYPE_SEND_ONCE)
+ goto abort_reply_dest_copyin;
+
+ /* optimized ipc_right_copyin */
+
+ assert(IE_BITS_TYPE(entry->ie_bits) ==
+ MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(entry->ie_bits) == 1);
+ assert((entry->ie_bits & IE_BITS_MAREQUEST) == 0);
+
+ if (entry->ie_request != 0)
+ goto abort_reply_dest_copyin;
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port)) {
+ ip_unlock(dest_port);
+ goto abort_reply_dest_copyin;
+ }
+
+ assert(dest_port->ip_sorights > 0);
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc (space, dest_name, entry);
+ }
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ 0);
+ kmsg->ikm_header.msgh_remote_port =
+ (mach_port_t) dest_port;
+
+ /* make sure we can queue to the destination */
+
+ assert(dest_port->ip_receiver != ipc_space_kernel);
+
+ /* optimized ipc_mqueue_copyin */
+
+ {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ entry = ipc_entry_lookup (space, rcv_name);
+ if (entry == IE_NULL)
+ {
+ ipc_entry_lookup_failed (msg, rcv_name);
+ goto abort_reply_rcv_copyin;
+ }
+ bits = entry->ie_bits;
+
+ /* check type bits; looking for receive or set */
+
+ if (bits & MACH_PORT_TYPE_PORT_SET) {
+ ipc_pset_t rcv_pset;
+
+ rcv_pset = (ipc_pset_t) entry->ie_object;
+ assert(rcv_pset != IPS_NULL);
+
+ ips_lock(rcv_pset);
+ assert(ips_active(rcv_pset));
+
+ rcv_object = (ipc_object_t) rcv_pset;
+ rcv_mqueue = &rcv_pset->ips_messages;
+ } else if (bits & MACH_PORT_TYPE_RECEIVE) {
+ ipc_port_t rcv_port;
+
+ rcv_port = (ipc_port_t) entry->ie_object;
+ assert(rcv_port != IP_NULL);
+
+ if (!ip_lock_try(rcv_port))
+ goto abort_reply_rcv_copyin;
+ assert(ip_active(rcv_port));
+
+ if (rcv_port->ip_pset != IPS_NULL) {
+ ip_unlock(rcv_port);
+ goto abort_reply_rcv_copyin;
+ }
+
+ rcv_object = (ipc_object_t) rcv_port;
+ rcv_mqueue = &rcv_port->ip_messages;
+ } else
+ goto abort_reply_rcv_copyin;
+ }
+
+ is_write_unlock(space);
+ io_reference(rcv_object);
+ imq_lock(rcv_mqueue);
+ io_unlock(rcv_object);
+ goto fast_send_receive;
+
+ abort_reply_dest_copyin:
+ is_write_unlock(space);
+ goto slow_copyin;
+
+ abort_reply_rcv_copyin:
+ ip_unlock(dest_port);
+ is_write_unlock(space);
+ goto slow_send;
+ }
+
+ default:
+ goto slow_copyin;
+ }
+ /*NOTREACHED*/
+
+ fast_send_receive:
+ /*
+ * optimized ipc_mqueue_send/ipc_mqueue_receive
+ *
+ * Finished get/copyin of kmsg and copyin of rcv_name.
+ * space is unlocked, dest_port is locked,
+ * we can queue kmsg to dest_port,
+ * rcv_mqueue is locked, rcv_object holds a ref,
+ * if rcv_object is a port it isn't in a port set
+ *
+ * Note that if simple locking is turned off,
+ * then we could have dest_mqueue == rcv_mqueue
+ * and not abort when we try to lock dest_mqueue.
+ */
+
+ assert(ip_active(dest_port));
+ assert(dest_port->ip_receiver != ipc_space_kernel);
+ assert((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
+ (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE));
+ assert((kmsg->ikm_header.msgh_bits &
+ MACH_MSGH_BITS_CIRCULAR) == 0);
+
+ {
+ ipc_mqueue_t dest_mqueue;
+ ipc_thread_t receiver;
+
+ {
+ ipc_pset_t dest_pset;
+
+ dest_pset = dest_port->ip_pset;
+ if (dest_pset == IPS_NULL)
+ dest_mqueue = &dest_port->ip_messages;
+ else
+ dest_mqueue = &dest_pset->ips_messages;
+ }
+
+ if (!imq_lock_try(dest_mqueue)) {
+ abort_send_receive:
+ ip_unlock(dest_port);
+ imq_unlock(rcv_mqueue);
+ ipc_object_release(rcv_object);
+ goto slow_send;
+ }
+
+ receiver = ipc_thread_queue_first(&dest_mqueue->imq_threads);
+ if ((receiver == ITH_NULL) ||
+ (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
+ != IKM_NULL)) {
+ imq_unlock(dest_mqueue);
+ goto abort_send_receive;
+ }
+
+ /*
+ * There is a receiver thread waiting, and
+ * there is no reply message for us to pick up.
+ * We have hope of hand-off, so save state.
+ */
+
+ self->ith_msg = msg;
+ self->ith_rcv_size = rcv_size;
+ self->ith_object = rcv_object;
+ self->ith_mqueue = rcv_mqueue;
+
+ if ((receiver->swap_func == mach_msg_continue) &&
+ thread_handoff(self, mach_msg_continue, receiver)) {
+ assert(current_thread() == receiver);
+
+ /*
+ * We can use the optimized receive code,
+ * because the receiver is using no options.
+ */
+ } else if ((receiver->swap_func ==
+ exception_raise_continue) &&
+ thread_handoff(self, mach_msg_continue, receiver)) {
+ counter(c_mach_msg_trap_block_exc++);
+ assert(current_thread() == receiver);
+
+ /*
+ * We are a reply message coming back through
+ * the optimized exception-handling path.
+ * Finish with rcv_mqueue and dest_mqueue,
+ * and then jump to exception code with
+ * dest_port still locked. We don't bother
+ * with a sequence number in this case.
+ */
+
+ ipc_thread_enqueue_macro(
+ &rcv_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(rcv_mqueue);
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ imq_unlock(dest_mqueue);
+
+ exception_raise_continue_fast(dest_port, kmsg);
+ /*NOTREACHED*/
+ return MACH_MSG_SUCCESS;
+ } else if ((send_size <= receiver->ith_msize) &&
+ thread_handoff(self, mach_msg_continue, receiver)) {
+ assert(current_thread() == receiver);
+
+ if ((receiver->swap_func ==
+ mach_msg_receive_continue) &&
+ ((receiver->ith_option & MACH_RCV_NOTIFY) == 0)) {
+ /*
+ * We can still use the optimized code.
+ */
+ } else {
+ counter(c_mach_msg_trap_block_slow++);
+ /*
+ * We are running as the receiver,
+ * but we can't use the optimized code.
+ * Finish send/receive processing.
+ */
+
+ dest_port->ip_msgcount++;
+ ip_unlock(dest_port);
+
+ ipc_thread_enqueue_macro(
+ &rcv_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(rcv_mqueue);
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ receiver->ith_state = MACH_MSG_SUCCESS;
+ receiver->ith_kmsg = kmsg;
+ receiver->ith_seqno = dest_port->ip_seqno++;
+ imq_unlock(dest_mqueue);
+
+ /*
+ * Call the receiver's continuation.
+ */
+
+ receiver->wait_result = THREAD_AWAKENED;
+ (*receiver->swap_func)();
+ /*NOTREACHED*/
+ return MACH_MSG_SUCCESS;
+ }
+ } else {
+ /*
+ * The receiver can't accept the message,
+ * or we can't switch to the receiver.
+ */
+
+ imq_unlock(dest_mqueue);
+ goto abort_send_receive;
+ }
+ counter(c_mach_msg_trap_block_fast++);
+
+ /*
+ * Safe to unlock dest_port now that we are
+ * committed to this path, because we hold
+ * dest_mqueue locked. We never bother changing
+ * dest_port->ip_msgcount.
+ */
+
+ ip_unlock(dest_port);
+
+ /*
+ * We need to finish preparing self for its
+ * time asleep in rcv_mqueue.
+ */
+
+ ipc_thread_enqueue_macro(&rcv_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(rcv_mqueue);
+
+ /*
+ * Finish extracting receiver from dest_mqueue.
+ */
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
+ imq_unlock(dest_mqueue);
+
+ /*
+ * We don't have to do any post-dequeue processing of
+ * the message. We never incremented ip_msgcount, we
+ * know it has no msg-accepted request, and blocked
+ * senders aren't a worry because we found the port
+ * with a receiver waiting.
+ */
+
+ self = receiver;
+ space = self->task->itk_space;
+
+ msg = self->ith_msg;
+ rcv_size = self->ith_rcv_size;
+ rcv_object = self->ith_object;
+
+ /* inline ipc_object_release */
+ io_lock(rcv_object);
+ io_release(rcv_object);
+ io_check_unlock(rcv_object);
+ }
+
+ fast_copyout:
+ /*
+ * Nothing locked and no references held, except
+ * we have kmsg with msgh_seqno filled in. Must
+ * still check against rcv_size and do
+ * ipc_kmsg_copyout/ipc_kmsg_put.
+ */
+
+ assert((ipc_port_t) kmsg->ikm_header.msgh_remote_port
+ == dest_port);
+
+ reply_size = kmsg->ikm_header.msgh_size;
+ if (rcv_size < msg_usize(&kmsg->ikm_header))
+ goto slow_copyout;
+
+ /* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */
+
+ switch (kmsg->ikm_header.msgh_bits) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE): {
+ ipc_port_t reply_port =
+ (ipc_port_t) kmsg->ikm_header.msgh_local_port;
+ mach_port_name_t dest_name, reply_name;
+ rpc_uintptr_t payload;
+
+ /* receiving a request message */
+
+ if (!IP_VALID(reply_port))
+ goto slow_copyout;
+
+ is_write_lock(space);
+ assert(space->is_active);
+
+ /*
+ * To do an atomic copyout, need simultaneous
+ * locks on both ports and the space. If
+ * dest_port == reply_port, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) ||
+ !ip_lock_try(reply_port))
+ goto abort_request_copyout;
+
+ if (!ip_active(reply_port)) {
+ ip_unlock(reply_port);
+ goto abort_request_copyout;
+ }
+
+ assert(reply_port->ip_sorights > 0);
+ ip_unlock(reply_port);
+
+ {
+ ipc_entry_t entry;
+ kern_return_t kr;
+ kr = ipc_entry_get (space, &reply_name, &entry);
+ if (kr)
+ goto abort_request_copyout;
+ assert (entry != NULL);
+
+ {
+ mach_port_gen_t gen;
+
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ }
+
+ assert(MACH_PORT_NAME_VALID(reply_name));
+ entry->ie_object = (ipc_object_t) reply_port;
+ is_write_unlock(space);
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_srights > 0);
+ ip_release(dest_port);
+
+ if (dest_port->ip_receiver == space)
+ dest_name = dest_port->ip_receiver_name;
+ else
+ dest_name = MACH_PORT_NULL;
+ payload = dest_port->ip_protected_payload;
+
+ if ((--dest_port->ip_srights == 0) &&
+ (dest_port->ip_nsrequest != IP_NULL)) {
+ ipc_port_t nsrequest;
+ mach_port_mscount_t mscount;
+
+ /* a rather rare case */
+
+ nsrequest = dest_port->ip_nsrequest;
+ mscount = dest_port->ip_mscount;
+ dest_port->ip_nsrequest = IP_NULL;
+ ip_unlock(dest_port);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest_port);
+
+ if (! ipc_port_flag_protected_payload(dest_port)) {
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
+ MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND);
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ } else {
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
+ MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD);
+ kmsg->ikm_header.msgh_protected_payload =
+ payload;
+ }
+ kmsg->ikm_header.msgh_remote_port = reply_name;
+ goto fast_put;
+
+ abort_request_copyout:
+ ip_unlock(dest_port);
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ mach_port_name_t dest_name;
+ rpc_uintptr_t payload;
+
+ /* receiving a reply message */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port))
+ goto slow_copyout;
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_sorights > 0);
+
+ payload = dest_port->ip_protected_payload;
+
+ if (dest_port->ip_receiver == space) {
+ ip_release(dest_port);
+ dest_port->ip_sorights--;
+ dest_name = dest_port->ip_receiver_name;
+ ip_unlock(dest_port);
+ } else {
+ ip_unlock(dest_port);
+
+ ipc_notify_send_once(dest_port);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ if (! ipc_port_flag_protected_payload(dest_port)) {
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
+ 0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ } else {
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
+ 0,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD);
+ kmsg->ikm_header.msgh_protected_payload =
+ payload;
+ }
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ goto fast_put;
+ }
+
+ case MACH_MSGH_BITS_COMPLEX|
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ mach_port_name_t dest_name;
+ rpc_uintptr_t payload;
+
+ /* receiving a complex reply message */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port))
+ goto slow_copyout;
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_sorights > 0);
+
+ payload = dest_port->ip_protected_payload;
+
+ if (dest_port->ip_receiver == space) {
+ ip_release(dest_port);
+ dest_port->ip_sorights--;
+ dest_name = dest_port->ip_receiver_name;
+ ip_unlock(dest_port);
+ } else {
+ ip_unlock(dest_port);
+
+ ipc_notify_send_once(dest_port);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ if (! ipc_port_flag_protected_payload(dest_port)) {
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS_COMPLEX
+ | MACH_MSGH_BITS(
+ 0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ } else {
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS_COMPLEX
+ | MACH_MSGH_BITS(
+ 0,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD);
+ kmsg->ikm_header.msgh_protected_payload =
+ payload;
+ }
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+
+ mr = ipc_kmsg_copyout_body(
+ kmsg,
+ space,
+ current_map());
+
+ if (mr != MACH_MSG_SUCCESS) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ return mr | MACH_RCV_BODY_ERROR;
+ }
+ goto fast_put;
+ }
+
+ default:
+ goto slow_copyout;
+ }
+ /*NOTREACHED*/
+
+ fast_put:
+ /*
+ * We have the reply message data in kmsg,
+ * and the reply message size in reply_size.
+ * Just need to copy it out to the user and free kmsg.
+ * We must check ikm_cache after copyoutmsg.
+ */
+
+ ikm_check_initialized(kmsg, kmsg->ikm_size);
+
+ if ((kmsg->ikm_size != IKM_SAVED_KMSG_SIZE) ||
+ copyoutmsg(&kmsg->ikm_header, msg,
+ reply_size))
+ goto slow_put;
+
+ if (!ikm_cache_free_try(kmsg))
+ goto slow_put;
+
+ thread_syscall_return(MACH_MSG_SUCCESS);
+ /*NOTREACHED*/
+ return MACH_MSG_SUCCESS; /* help for the compiler */
+
+ /*
+ * The slow path has a few non-register temporary
+ * variables used only for call-by-reference.
+ */
+
+ {
+ ipc_kmsg_t temp_kmsg;
+ mach_port_seqno_t temp_seqno;
+ ipc_object_t temp_rcv_object;
+ ipc_mqueue_t temp_rcv_mqueue;
+
+ slow_get:
+ /*
+ * No locks, references, or messages held.
+ * Still have to get the request, send it,
+ * receive reply, etc.
+ */
+
+ mr = ipc_kmsg_get(msg, send_size, &temp_kmsg);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+ kmsg = temp_kmsg;
+
+ /* try to get back on optimized path */
+ goto fast_copyin;
+
+ slow_copyin:
+ /*
+ * We have the message data in kmsg, but
+ * we still need to copyin, send it,
+ * receive a reply, and do copyout.
+ */
+
+ mr = ipc_kmsg_copyin(kmsg, space, current_map(),
+ MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ /* try to get back on optimized path */
+
+ if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR)
+ goto slow_send;
+
+ dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ assert(IP_VALID(dest_port));
+
+ ip_lock(dest_port);
+ if (dest_port->ip_receiver == ipc_space_kernel) {
+ assert(ip_active(dest_port));
+ ip_unlock(dest_port);
+ goto kernel_send;
+ }
+
+ if (ip_active(dest_port) &&
+ ((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
+ (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE)))
+ {
+ /*
+ * Try an optimized ipc_mqueue_copyin.
+ * It will work if this is a request message.
+ */
+
+ ipc_port_t reply_port;
+
+ reply_port = (ipc_port_t)
+ kmsg->ikm_header.msgh_local_port;
+ if (IP_VALID(reply_port)) {
+ if (ip_lock_try(reply_port)) {
+ if (ip_active(reply_port) &&
+ reply_port->ip_receiver == space &&
+ reply_port->ip_receiver_name == rcv_name &&
+ reply_port->ip_pset == IPS_NULL)
+ {
+ /* Grab a reference to the reply port. */
+ rcv_object = (ipc_object_t) reply_port;
+ io_reference(rcv_object);
+ rcv_mqueue = &reply_port->ip_messages;
+ imq_lock(rcv_mqueue);
+ io_unlock(rcv_object);
+ goto fast_send_receive;
+ }
+ ip_unlock(reply_port);
+ }
+ }
+ }
+
+ ip_unlock(dest_port);
+ goto slow_send;
+
+ kernel_send:
+ /*
+ * Special case: send message to kernel services.
+ * The request message has been copied into the
+ * kmsg. Nothing is locked.
+ */
+
+ {
+ ipc_port_t reply_port;
+
+ /*
+ * Perform the kernel function.
+ */
+
+ kmsg = ipc_kobject_server(kmsg);
+ if (kmsg == IKM_NULL) {
+ /*
+ * No reply. Take the
+ * slow receive path.
+ */
+ goto slow_get_rcv_port;
+ }
+
+ /*
+ * Check that:
+ * the reply port is alive
+ * we hold the receive right
+ * the name has not changed.
+ * the port is not in a set
+ * If any of these are not true,
+ * we cannot directly receive the reply
+ * message.
+ */
+ reply_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ ip_lock(reply_port);
+
+ if ((!ip_active(reply_port)) ||
+ (reply_port->ip_receiver != space) ||
+ (reply_port->ip_receiver_name != rcv_name) ||
+ (reply_port->ip_pset != IPS_NULL))
+ {
+ ip_unlock(reply_port);
+ ipc_mqueue_send_always(kmsg);
+ goto slow_get_rcv_port;
+ }
+
+ rcv_mqueue = &reply_port->ip_messages;
+ imq_lock(rcv_mqueue);
+ /* keep port locked, and don`t change ref count yet */
+
+ /*
+ * If there are messages on the port
+ * or other threads waiting for a message,
+ * we cannot directly receive the reply.
+ */
+ if ((ipc_thread_queue_first(&rcv_mqueue->imq_threads)
+ != ITH_NULL) ||
+ (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
+ != IKM_NULL))
+ {
+ imq_unlock(rcv_mqueue);
+ ip_unlock(reply_port);
+ ipc_mqueue_send_always(kmsg);
+ goto slow_get_rcv_port;
+ }
+
+ /*
+ * We can directly receive this reply.
+ * Since the kernel reply never blocks,
+ * it holds no message_accepted request.
+ * Since there were no messages queued
+ * on the reply port, there should be
+ * no threads blocked waiting to send.
+ */
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ assert(ipc_thread_queue_first(&reply_port->ip_blocked)
+ == ITH_NULL);
+
+ dest_port = reply_port;
+ kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
+ imq_unlock(rcv_mqueue);
+
+ /*
+ * inline ipc_object_release.
+ * Port is still locked.
+ * Reference count was not incremented.
+ */
+ ip_check_unlock(reply_port);
+
+ /* copy out the kernel reply */
+ goto fast_copyout;
+ }
+
+ slow_send:
+ /*
+ * Nothing is locked. We have acquired kmsg, but
+ * we still need to send it and receive a reply.
+ */
+
+ mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
+ MACH_MSG_TIMEOUT_NONE);
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
+ current_map());
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ slow_get_rcv_port:
+ /*
+ * We have sent the message. Copy in the receive port.
+ */
+ mr = ipc_mqueue_copyin(space, rcv_name,
+ &temp_rcv_mqueue, &temp_rcv_object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+ rcv_mqueue = temp_rcv_mqueue;
+ rcv_object = temp_rcv_object;
+ /* hold ref for rcv_object; rcv_mqueue is locked */
+
+ /*
+ slow_receive:
+ */
+ /*
+ * Now we have sent the request and copied in rcv_name,
+ * so rcv_mqueue is locked and hold ref for rcv_object.
+ * Just receive a reply and try to get back to fast path.
+ *
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for mach_msg_continue to pick up.
+ */
+
+ self->ith_msg = msg;
+ self->ith_rcv_size = rcv_size;
+ self->ith_object = rcv_object;
+ self->ith_mqueue = rcv_mqueue;
+
+ mr = ipc_mqueue_receive(rcv_mqueue,
+ MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, mach_msg_continue,
+ &temp_kmsg, &temp_seqno);
+ /* rcv_mqueue is unlocked */
+ ipc_object_release(rcv_object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ (kmsg = temp_kmsg)->ikm_header.msgh_seqno = temp_seqno;
+ dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ goto fast_copyout;
+
+ slow_copyout:
+ /*
+ * Nothing locked and no references held, except
+ * we have kmsg with msgh_seqno filled in. Must
+ * still check against rcv_size and do
+ * ipc_kmsg_copyout/ipc_kmsg_put.
+ */
+
+ reply_size = kmsg->ikm_header.msgh_size;
+ if (rcv_size < msg_usize(&kmsg->ikm_header)) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, current_map(),
+ MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ /* try to get back on optimized path */
+
+ goto fast_put;
+
+ slow_put:
+ mr = ipc_kmsg_put(msg, kmsg, reply_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+ } else if (option == MACH_SEND_MSG) {
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_kmsg_t kmsg;
+
+ mr = ipc_kmsg_get(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return mr;
+ }
+
+ mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
+ MACH_MSG_TIMEOUT_NONE);
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ }
+
+ return mr;
+ } else if (option == MACH_RCV_MSG) {
+ ipc_thread_t self = current_thread();
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+
+ mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ /* hold ref for object; mqueue is locked */
+
+ /*
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for mach_msg_continue to pick up.
+ */
+
+ self->ith_msg = msg;
+ self->ith_rcv_size = rcv_size;
+ self->ith_object = object;
+ self->ith_mqueue = mqueue;
+
+ mr = ipc_mqueue_receive(mqueue,
+ MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, mach_msg_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (rcv_size < msg_usize(&kmsg->ikm_header)) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ return MACH_RCV_TOO_LARGE;
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ return mr;
+ }
+
+ return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ } else if (option == MACH_MSG_OPTION_NONE) {
+ /*
+ * We can measure the "null mach_msg_trap"
+ * (syscall entry and thread_syscall_return exit)
+ * with this path.
+ */
+
+ thread_syscall_return(MACH_MSG_SUCCESS);
+ /*NOTREACHED*/
+ }
+
+ if (option & MACH_SEND_MSG) {
+ mr = mach_msg_send(msg, option, send_size,
+ time_out, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ }
+
+ if (option & MACH_RCV_MSG) {
+ mr = mach_msg_receive(msg, option, rcv_size, rcv_name,
+ time_out, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: mach_msg_continue
+ * Purpose:
+ * Continue after blocking for a message.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack,
+ * with the receive state saved in the thread. From here
+ * control goes back to user space.
+ */
+
+void
+mach_msg_continue(void)
+{
+ ipc_thread_t thread = current_thread();
+ task_t task = thread->task;
+ ipc_space_t space = task->itk_space;
+ vm_map_t map = task->map;
+ mach_msg_user_header_t *msg = thread->ith_msg;
+ mach_msg_size_t rcv_size = thread->ith_rcv_size;
+ ipc_object_t object = thread->ith_object;
+ ipc_mqueue_t mqueue = thread->ith_mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_receive(mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX, MACH_MSG_TIMEOUT_NONE,
+ TRUE, mach_msg_continue, &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (msg_usize(&kmsg->ikm_header) > rcv_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: mach_msg_interrupt
+ * Purpose:
+ * Attempts to force a thread waiting at mach_msg_continue or
+ * mach_msg_receive_continue into a clean point. Returns TRUE
+ * if this was possible.
+ * Conditions:
+ * Nothing locked. The thread must NOT be runnable.
+ */
+
+boolean_t
+mach_msg_interrupt(thread_t thread)
+{
+ ipc_mqueue_t mqueue;
+
+ assert((thread->swap_func == mach_msg_continue) ||
+ (thread->swap_func == mach_msg_receive_continue));
+
+ mqueue = thread->ith_mqueue;
+ imq_lock(mqueue);
+ if (thread->ith_state != MACH_RCV_IN_PROGRESS) {
+ /*
+ * The thread is no longer waiting for a message.
+ * It may have a message sitting in ith_kmsg.
+ * We can't clean this up.
+ */
+
+ imq_unlock(mqueue);
+ return FALSE;
+ }
+ ipc_thread_rmqueue(&mqueue->imq_threads, thread);
+ imq_unlock(mqueue);
+
+ ipc_object_release(thread->ith_object);
+
+ thread_set_syscall_return(thread, MACH_RCV_INTERRUPTED);
+ thread->swap_func = thread_exception_return;
+ return TRUE;
+}
diff --git a/ipc/mach_msg.h b/ipc/mach_msg.h
new file mode 100644
index 0000000..2951bce
--- /dev/null
+++ b/ipc/mach_msg.h
@@ -0,0 +1,60 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_msg.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of internal messaging primitives.
+ */
+
+#ifndef _IPC_MACH_MSG_H_
+#define _IPC_MACH_MSG_H_
+
+#include <mach/boolean.h>
+#include <mach/message.h>
+
+extern mach_msg_return_t
+mach_msg_send(mach_msg_user_header_t *, mach_msg_option_t,
+ mach_msg_size_t, mach_msg_timeout_t, mach_port_name_t);
+
+extern mach_msg_return_t
+mach_msg_receive(mach_msg_user_header_t *, mach_msg_option_t,
+ mach_msg_size_t, mach_port_name_t,
+ mach_msg_timeout_t, mach_port_name_t);
+
+extern void
+mach_msg_receive_continue(void);
+
+extern void
+mach_msg_continue(void);
+
+extern boolean_t
+mach_msg_interrupt(thread_t);
+
+#endif /* _IPC_MACH_MSG_H_ */
diff --git a/ipc/mach_port.c b/ipc/mach_port.c
new file mode 100644
index 0000000..d8696e2
--- /dev/null
+++ b/ipc/mach_port.c
@@ -0,0 +1,1578 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_port.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Exported kernel calls. See mach/mach_port.defs.
+ */
+
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <mach/notify.h>
+#include <mach/mach_param.h>
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#ifdef MIGRATING_THREADS
+#include <kern/task.h>
+#include <kern/act.h>
+#endif /* MIGRATING_THREADS */
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_right.h>
+#include <ipc/mach_port.h>
+#include <ipc/mach_port.server.h>
+
+
+/*
+ * Routine: mach_port_names_helper
+ * Purpose:
+ * A helper function for mach_port_names.
+ */
+
+static void
+mach_port_names_helper(
+ ipc_port_timestamp_t timestamp,
+ ipc_entry_t entry,
+ mach_port_name_t name,
+ mach_port_name_t *names,
+ mach_port_type_t *types,
+ ipc_entry_num_t *actualp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_request_index_t request = entry->ie_request;
+ mach_port_type_t type;
+ ipc_entry_num_t actual;
+
+ if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
+ ipc_port_t port;
+ boolean_t died;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ /*
+ * The timestamp serializes mach_port_names
+ * with ipc_port_destroy. If the port died,
+ * but after mach_port_names started, pretend
+ * that it isn't dead.
+ */
+
+ ip_lock(port);
+ died = (!ip_active(port) &&
+ IP_TIMESTAMP_ORDER(port->ip_timestamp, timestamp));
+ ip_unlock(port);
+
+ if (died) {
+ /* pretend this is a dead-name entry */
+
+ bits &= ~(IE_BITS_TYPE_MASK|IE_BITS_MAREQUEST);
+ bits |= MACH_PORT_TYPE_DEAD_NAME;
+ if (request != 0)
+ bits++;
+ request = 0;
+ }
+ }
+
+ type = IE_BITS_TYPE(bits);
+ if (request != 0)
+ type |= MACH_PORT_TYPE_DNREQUEST;
+ if (bits & IE_BITS_MAREQUEST)
+ type |= MACH_PORT_TYPE_MAREQUEST;
+
+ actual = *actualp;
+ names[actual] = name;
+ types[actual] = type;
+ *actualp = actual+1;
+}
+
+/*
+ * Routine: mach_port_names [kernel call]
+ * Purpose:
+ * Retrieves a list of the rights present in the space,
+ * along with type information. (Same as returned
+ * by mach_port_type.) The names are returned in
+ * no particular order, but they (and the type info)
+ * are an accurate snapshot of the space.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Arrays of names and types returned.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_names(
+ ipc_space_t space,
+ mach_port_name_t **namesp,
+ mach_msg_type_number_t *namesCnt,
+ mach_port_type_t **typesp,
+ mach_msg_type_number_t *typesCnt)
+{
+ ipc_entry_num_t actual; /* this many names */
+ ipc_port_timestamp_t timestamp; /* logical time of this operation */
+ mach_port_name_t *names;
+ mach_port_type_t *types;
+ kern_return_t kr;
+
+ vm_size_t size; /* size of allocated memory */
+ vm_offset_t addr1; /* allocated memory, for names */
+ vm_offset_t addr2; /* allocated memory, for types */
+ vm_map_copy_t memory1; /* copied-in memory, for names */
+ vm_map_copy_t memory2; /* copied-in memory, for types */
+ ipc_entry_num_t bound;
+
+ /* safe simplifying assumption */
+ assert_static(sizeof(mach_port_name_t) == sizeof(mach_port_type_t));
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ size = 0;
+
+ for (;;) {
+ vm_size_t size_needed;
+
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ if (size != 0) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ kmem_free(ipc_kernel_map, addr2, size);
+ }
+ return KERN_INVALID_TASK;
+ }
+
+ /* upper bound on number of names in the space */
+
+ bound = space->is_size;
+ size_needed = round_page(bound * sizeof(mach_port_name_t));
+
+ if (size_needed <= size)
+ break;
+
+ is_read_unlock(space);
+
+ if (size != 0) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ kmem_free(ipc_kernel_map, addr2, size);
+ }
+ size = size_needed;
+
+ kr = vm_allocate(ipc_kernel_map, &addr1, size, TRUE);
+ if (kr != KERN_SUCCESS) {
+ printf_once("no more room in ipc_kernel_map\n");
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ kr = vm_allocate(ipc_kernel_map, &addr2, size, TRUE);
+ if (kr != KERN_SUCCESS) {
+ printf_once("no more room in ipc_kernel_map\n");
+ kmem_free(ipc_kernel_map, addr1, size);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* can't fault while we hold locks */
+
+ kr = vm_map_pageable(ipc_kernel_map, addr1, addr1 + size,
+ VM_PROT_READ|VM_PROT_WRITE, TRUE, TRUE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_pageable(ipc_kernel_map, addr2, addr2 + size,
+ VM_PROT_READ|VM_PROT_WRITE, TRUE, TRUE);
+ assert(kr == KERN_SUCCESS);
+ }
+ /* space is read-locked and active */
+
+ names = (mach_port_name_t *) addr1;
+ types = (mach_port_type_t *) addr2;
+ actual = 0;
+
+ timestamp = ipc_port_timestamp();
+
+ ipc_entry_t entry;
+ struct rdxtree_iter iter;
+ rdxtree_for_each(&space->is_map, &iter, entry) {
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
+ mach_port_names_helper(timestamp, entry, entry->ie_name,
+ names, types, &actual);
+ }
+ }
+ assert(actual < bound);
+ is_read_unlock(space);
+
+ if (actual == 0) {
+ memory1 = VM_MAP_COPY_NULL;
+ memory2 = VM_MAP_COPY_NULL;
+
+ if (size != 0) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ kmem_free(ipc_kernel_map, addr2, size);
+ }
+ } else {
+ vm_size_t size_used;
+
+ size_used = round_page(actual * sizeof(mach_port_name_t));
+
+ /*
+ * Make used memory pageable and get it into
+ * copied-in form. Free any unused memory.
+ */
+
+ kr = vm_map_pageable(ipc_kernel_map,
+ addr1, addr1 + size_used,
+ VM_PROT_NONE, TRUE, TRUE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_pageable(ipc_kernel_map,
+ addr2, addr2 + size_used,
+ VM_PROT_NONE, TRUE, TRUE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr1, size_used,
+ TRUE, &memory1);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr2, size_used,
+ TRUE, &memory2);
+ assert(kr == KERN_SUCCESS);
+
+ if (size_used != size) {
+ kmem_free(ipc_kernel_map,
+ addr1 + size_used, size - size_used);
+ kmem_free(ipc_kernel_map,
+ addr2 + size_used, size - size_used);
+ }
+ }
+
+ *namesp = (mach_port_name_t *) memory1;
+ *namesCnt = actual;
+ *typesp = (mach_port_type_t *) memory2;
+ *typesCnt = actual;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_type [kernel call]
+ * Purpose:
+ * Retrieves the type of a right in the space.
+ * The type is a bitwise combination of one or more
+ * of the following type bits:
+ * MACH_PORT_TYPE_SEND
+ * MACH_PORT_TYPE_RECEIVE
+ * MACH_PORT_TYPE_SEND_ONCE
+ * MACH_PORT_TYPE_PORT_SET
+ * MACH_PORT_TYPE_DEAD_NAME
+ * In addition, the following pseudo-type bits may be present:
+ * MACH_PORT_TYPE_DNREQUEST
+ * A dead-name notification is requested.
+ * MACH_PORT_TYPE_MAREQUEST
+ * The send/receive right is blocked;
+ * a msg-accepted notification is outstanding.
+ * MACH_PORT_TYPE_COMPAT
+ * This is a compatibility-mode right;
+ * when the port dies, it will disappear
+ * instead of turning into a dead-name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Type is returned.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ */
+
+kern_return_t
+mach_port_type(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_type_t *typep)
+{
+ mach_port_urefs_t urefs;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_info(space, name, entry, typep, &urefs);
+ if (kr == KERN_SUCCESS)
+ is_write_unlock(space);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_rename [kernel call]
+ * Purpose:
+ * Changes the name denoting a right,
+ * from oname to nname.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The right is renamed.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The oname doesn't denote a right.
+ * KERN_INVALID_VALUE The nname isn't a legal name.
+ * KERN_NAME_EXISTS The nname already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_rename(
+ ipc_space_t space,
+ mach_port_name_t oname,
+ mach_port_name_t nname)
+{
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_PORT_NAME_VALID(nname))
+ return KERN_INVALID_VALUE;
+
+ return ipc_object_rename(space, oname, nname);
+}
+
+/*
+ * Routine: mach_port_allocate_name [kernel call]
+ * Purpose:
+ * Allocates a right in a space, using a specific name
+ * for the new right. Possible rights:
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ *
+ * A new port (allocated with MACH_PORT_RIGHT_RECEIVE)
+ * has no extant send or send-once rights and no queued
+ * messages. Its queue limit is MACH_PORT_QLIMIT_DEFAULT
+ * and its make-send count is 0. It is not a member of
+ * a port set. It has no registered no-senders or
+ * port-destroyed notification requests.
+ *
+ * A new port set has no members.
+ *
+ * A new dead name has one user reference.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The right is allocated.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE The name isn't a legal name.
+ * KERN_INVALID_VALUE "right" isn't a legal kind of right.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_allocate_name(
+ ipc_space_t space,
+ mach_port_right_t right,
+ mach_port_name_t name)
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_PORT_NAME_VALID(name))
+ return KERN_INVALID_VALUE;
+
+ switch (right) {
+ case MACH_PORT_RIGHT_RECEIVE: {
+ ipc_port_t port;
+
+ kr = ipc_port_alloc_name(space, name, &port);
+ if (kr == KERN_SUCCESS)
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_PORT_SET: {
+ ipc_pset_t pset;
+
+ kr = ipc_pset_alloc_name(space, name, &pset);
+ if (kr == KERN_SUCCESS)
+ ips_unlock(pset);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_DEAD_NAME:
+ kr = ipc_object_alloc_dead_name(space, name);
+ break;
+
+ default:
+ kr = KERN_INVALID_VALUE;
+ break;
+ }
+
+ return kr;
+}
+
+/*
+ * Routine: mach_port_allocate [kernel call]
+ * Purpose:
+ * Allocates a right in a space. Like mach_port_allocate_name,
+ * except that the implementation picks a name for the right.
+ * The name may be any legal name in the space that doesn't
+ * currently denote a right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The right is allocated.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE "right" isn't a legal kind of right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ * KERN_NO_SPACE No room in space for another right.
+ */
+
+kern_return_t
+mach_port_allocate(
+ ipc_space_t space,
+ mach_port_right_t right,
+ mach_port_name_t *namep)
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ switch (right) {
+ case MACH_PORT_RIGHT_RECEIVE: {
+ ipc_port_t port;
+
+ kr = ipc_port_alloc(space, namep, &port);
+ if (kr == KERN_SUCCESS)
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_PORT_SET: {
+ ipc_pset_t pset;
+
+ kr = ipc_pset_alloc(space, namep, &pset);
+ if (kr == KERN_SUCCESS)
+ ips_unlock(pset);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_DEAD_NAME:
+ kr = ipc_object_alloc_dead(space, namep);
+ break;
+
+ default:
+ kr = KERN_INVALID_VALUE;
+ break;
+ }
+
+ return (kr);
+}
+
+/*
+ * Routine: mach_port_destroy [kernel call]
+ * Purpose:
+ * Cleans up and destroys all rights denoted by a name
+ * in a space. The destruction of a receive right
+ * destroys the port, unless a port-destroyed request
+ * has been made for it; the destruction of a port-set right
+ * destroys the port set.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The name is destroyed.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ */
+
+volatile boolean_t mach_port_deallocate_debug = FALSE;
+
+kern_return_t
+mach_port_destroy(
+ ipc_space_t space,
+ mach_port_name_t name)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ if (MACH_PORT_NAME_VALID (name) && space == current_space()) {
+ printf("task %.*s destroying a bogus port %lu, most probably a bug.\n", (int) sizeof current_task()->name, current_task()->name, (unsigned long) name);
+ if (mach_port_deallocate_debug)
+ SoftDebugger("mach_port_deallocate");
+ }
+ return kr;
+ }
+ /* space is write-locked and active */
+
+ kr = ipc_right_destroy(space, name, entry); /* unlocks space */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_deallocate [kernel call]
+ * Purpose:
+ * Deallocates a user reference from a send right,
+ * send-once right, or a dead-name right. May
+ * deallocate the right, if this is the last uref,
+ * and destroy the name, if it doesn't denote
+ * other rights.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The uref is deallocated.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT The right isn't correct.
+ */
+
+kern_return_t
+mach_port_deallocate(
+ ipc_space_t space,
+ mach_port_name_t name)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ if (MACH_PORT_NAME_VALID (name) && space == current_space()) {
+ printf("task %.*s deallocating a bogus port %lu, most probably a bug.\n", (int) sizeof current_task()->name, current_task()->name, (unsigned long) name);
+ if (mach_port_deallocate_debug)
+ SoftDebugger("mach_port_deallocate");
+ }
+ return kr;
+ }
+ /* space is write-locked */
+
+ kr = ipc_right_dealloc(space, name, entry); /* unlocks space */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_get_refs [kernel call]
+ * Purpose:
+ * Retrieves the number of user references held by a right.
+ * Receive rights, port-set rights, and send-once rights
+ * always have one user reference. Returns zero if the
+ * name denotes a right, but not the queried right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Number of urefs returned.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE "right" isn't a legal value.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ */
+
+kern_return_t
+mach_port_get_refs(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_right_t right,
+ mach_port_urefs_t *urefsp)
+{
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (right >= MACH_PORT_RIGHT_NUMBER)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs); /* unlocks */
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+ is_write_unlock(space);
+
+ if (type & MACH_PORT_TYPE(right))
+ switch (right) {
+ case MACH_PORT_RIGHT_SEND_ONCE:
+ assert(urefs == 1);
+ /* fall-through */
+
+ case MACH_PORT_RIGHT_PORT_SET:
+ case MACH_PORT_RIGHT_RECEIVE:
+ *urefsp = 1;
+ break;
+
+ case MACH_PORT_RIGHT_DEAD_NAME:
+ case MACH_PORT_RIGHT_SEND:
+ assert(urefs > 0);
+ *urefsp = urefs;
+ break;
+
+ default:
+ panic("mach_port_get_refs: strange rights");
+ }
+ else
+ *urefsp = 0;
+
+ return kr;
+}
+
+/*
+ * Routine: mach_port_mod_refs
+ * Purpose:
+ * Modifies the number of user references held by a right.
+ * The resulting number of user references must be non-negative.
+ * If it is zero, the right is deallocated. If the name
+ * doesn't denote other rights, it is destroyed.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Modified number of urefs.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE "right" isn't a legal value.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote specified right.
+ * KERN_INVALID_VALUE Impossible modification to urefs.
+ * KERN_UREFS_OVERFLOW Urefs would overflow.
+ */
+
+kern_return_t
+mach_port_mod_refs(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_right_t right,
+ mach_port_delta_t delta)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (right >= MACH_PORT_RIGHT_NUMBER)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ if (MACH_PORT_NAME_VALID (name) && space == current_space()) {
+ printf("task %.*s %screasing a bogus port "
+ "%u by %d, most probably a bug.\n",
+ (int) (sizeof current_task()->name),
+ current_task()->name,
+ delta < 0 ? "de" : "in", name,
+ delta < 0 ? -delta : delta);
+ if (mach_port_deallocate_debug)
+ SoftDebugger("mach_port_mod_refs");
+ }
+ return kr;
+ }
+ /* space is write-locked and active */
+
+ kr = ipc_right_delta(space, name, entry, right, delta); /* unlocks */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_set_qlimit [kernel call]
+ * Purpose:
+ * Changes a receive right's queue limit.
+ * The new queue limit must be between 0 and
+ * MACH_PORT_QLIMIT_MAX, inclusive.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set queue limit.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ * KERN_INVALID_VALUE Illegal queue limit.
+ */
+
+kern_return_t
+mach_port_set_qlimit(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_msgcount_t qlimit)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (qlimit > MACH_PORT_QLIMIT_MAX)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_qlimit(port, qlimit);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_set_mscount [kernel call]
+ * Purpose:
+ * Changes a receive right's make-send count.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set make-send count.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_set_mscount(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_mscount_t mscount)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_mscount(port, mscount);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_set_seqno [kernel call]
+ * Purpose:
+ * Changes a receive right's sequence number.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set sequence number.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_set_seqno(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_seqno_t seqno)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_seqno(port, seqno);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_gst_helper
+ * Purpose:
+ * A helper function for mach_port_get_set_status.
+ */
+
+static void
+mach_port_gst_helper(
+ ipc_pset_t pset,
+ ipc_port_t port,
+ ipc_entry_num_t maxnames,
+ mach_port_name_t *names,
+ ipc_entry_num_t *actualp)
+{
+ ipc_pset_t ip_pset;
+ mach_port_name_t name;
+
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+
+ name = port->ip_receiver_name;
+ assert(name != MACH_PORT_NULL);
+ ip_pset = port->ip_pset;
+
+ ip_unlock(port);
+
+ if (pset == ip_pset) {
+ ipc_entry_num_t actual = *actualp;
+
+ if (actual < maxnames)
+ names[actual] = name;
+
+ *actualp = actual+1;
+ }
+}
+
+/*
+ * Routine: mach_port_get_set_status [kernel call]
+ * Purpose:
+ * Retrieves a list of members in a port set.
+ * Returns the space's name for each receive right member.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved list of members.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote a port set.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_get_set_status(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_name_t **members,
+ mach_msg_type_number_t *membersCnt)
+{
+ ipc_entry_num_t actual; /* this many members */
+ ipc_entry_num_t maxnames; /* space for this many members */
+ kern_return_t kr;
+
+ vm_size_t size; /* size of allocated memory */
+ vm_offset_t addr; /* allocated memory */
+ vm_map_copy_t memory; /* copied-in memory */
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ size = PAGE_SIZE; /* initial guess */
+
+ for (;;) {
+ ipc_entry_t entry;
+ mach_port_name_t *names;
+ ipc_pset_t pset;
+
+ kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
+ if (kr != KERN_SUCCESS) {
+ printf_once("no more room in ipc_kernel_map\n");
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* can't fault while we hold locks */
+
+ kr = vm_map_pageable(ipc_kernel_map, addr, addr + size,
+ VM_PROT_READ|VM_PROT_WRITE, TRUE, TRUE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = ipc_right_lookup_read(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ kmem_free(ipc_kernel_map, addr, size);
+ return kr;
+ }
+ /* space is read-locked and active */
+
+ if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_PORT_SET) {
+ is_read_unlock(space);
+ kmem_free(ipc_kernel_map, addr, size);
+ return KERN_INVALID_RIGHT;
+ }
+
+ pset = (ipc_pset_t) entry->ie_object;
+ assert(pset != IPS_NULL);
+ /* the port set must be active */
+
+ names = (mach_port_name_t *) addr;
+ maxnames = size / sizeof(mach_port_name_t);
+ actual = 0;
+
+ ipc_entry_t ientry;
+ struct rdxtree_iter iter;
+ rdxtree_for_each(&space->is_map, &iter, ientry) {
+ ipc_entry_bits_t bits = ientry->ie_bits;
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ ipc_port_t port =
+ (ipc_port_t) ientry->ie_object;
+
+ mach_port_gst_helper(pset, port, maxnames,
+ names, &actual);
+ }
+ }
+
+ is_read_unlock(space);
+
+ if (actual <= maxnames)
+ break;
+
+ /* didn't have enough memory; allocate more */
+
+ kmem_free(ipc_kernel_map, addr, size);
+ size = round_page(actual * sizeof(mach_port_name_t)) + PAGE_SIZE;
+ }
+
+ if (actual == 0) {
+ memory = VM_MAP_COPY_NULL;
+
+ kmem_free(ipc_kernel_map, addr, size);
+ } else {
+ vm_size_t size_used;
+
+ size_used = round_page(actual * sizeof(mach_port_name_t));
+
+ /*
+ * Make used memory pageable and get it into
+ * copied-in form. Free any unused memory.
+ */
+
+ kr = vm_map_pageable(ipc_kernel_map,
+ addr, addr + size_used,
+ VM_PROT_NONE, TRUE, TRUE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, size_used,
+ TRUE, &memory);
+ assert(kr == KERN_SUCCESS);
+
+ if (size_used != size)
+ kmem_free(ipc_kernel_map,
+ addr + size_used, size - size_used);
+ }
+
+ *members = (mach_port_name_t *) memory;
+ *membersCnt = actual;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_move_member [kernel call]
+ * Purpose:
+ * If after is MACH_PORT_NULL, removes member
+ * from the port set it is in. Otherwise, adds
+ * member to after, removing it from any set
+ * it might already be in.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Moved the port.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Member didn't denote a right.
+ * KERN_INVALID_RIGHT Member didn't denote a receive right.
+ * KERN_INVALID_NAME After didn't denote a right.
+ * KERN_INVALID_RIGHT After didn't denote a port set right.
+ * KERN_NOT_IN_SET
+ * After is MACH_PORT_NULL and Member isn't in a port set.
+ */
+
+kern_return_t
+mach_port_move_member(
+ ipc_space_t space,
+ mach_port_name_t member,
+ mach_port_name_t after)
+{
+ ipc_entry_t entry;
+ ipc_port_t port;
+ ipc_pset_t nset;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_read(space, member, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is read-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (after == MACH_PORT_NULL)
+ nset = IPS_NULL;
+ else {
+ entry = ipc_entry_lookup(space, after);
+ if (entry == IE_NULL) {
+ is_read_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ nset = (ipc_pset_t) entry->ie_object;
+ assert(nset != IPS_NULL);
+ }
+
+ kr = ipc_pset_move(space, port, nset);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_request_notification [kernel call]
+ * Purpose:
+ * Requests a notification. The caller supplies
+ * a send-once right for the notification to use,
+ * and the call returns the previously registered
+ * send-once right, if any. Possible types:
+ *
+ * MACH_NOTIFY_PORT_DESTROYED
+ * Requests a port-destroyed notification
+ * for a receive right. Sync should be zero.
+ * MACH_NOTIFY_NO_SENDERS
+ * Requests a no-senders notification for a
+ * receive right. If there are currently no
+ * senders, sync is less than or equal to the
+ * current make-send count, and a send-once right
+ * is supplied, then an immediate no-senders
+ * notification is generated.
+ * MACH_NOTIFY_DEAD_NAME
+ * Requests a dead-name notification for a send
+ * or receive right. If the name is already a
+ * dead name, sync is non-zero, and a send-once
+ * right is supplied, then an immediate dead-name
+ * notification is generated.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Requested a notification.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE Bad id value.
+ * KERN_INVALID_NAME Name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote appropriate right.
+ * KERN_INVALID_CAPABILITY The notify port is dead.
+ * MACH_NOTIFY_PORT_DESTROYED:
+ * KERN_INVALID_VALUE Sync isn't zero.
+ * MACH_NOTIFY_DEAD_NAME:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ * KERN_INVALID_ARGUMENT Name denotes dead name, but
+ * sync is zero or notify is IP_NULL.
+ * KERN_UREFS_OVERFLOW Name denotes dead name, but
+ * generating immediate notif. would overflow urefs.
+ */
+
+kern_return_t
+mach_port_request_notification(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_msg_id_t id,
+ mach_port_mscount_t sync,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (notify == IP_DEAD)
+ return KERN_INVALID_CAPABILITY;
+
+ switch (id) {
+ case MACH_NOTIFY_PORT_DESTROYED: {
+ ipc_port_t port, previous;
+
+ if (sync != 0)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_pdrequest(port, notify, &previous);
+ /* port is unlocked */
+
+ *previousp = previous;
+ break;
+ }
+
+ case MACH_NOTIFY_NO_SENDERS: {
+ ipc_port_t port;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_nsrequest(port, sync, notify, previousp);
+ /* port is unlocked */
+ break;
+ }
+
+ case MACH_NOTIFY_DEAD_NAME:
+ kr = ipc_right_dnrequest(space, name, sync != 0,
+ notify, previousp);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ break;
+
+ default:
+ return KERN_INVALID_VALUE;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_insert_right [kernel call]
+ * Purpose:
+ * Inserts a right into a space, as if the space
+ * voluntarily received the right in a message,
+ * except that the right gets the specified name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Inserted the right.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE The name isn't a legal name.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_INVALID_VALUE Message doesn't carry a port right.
+ * KERN_INVALID_CAPABILITY Port is null or dead.
+ * KERN_UREFS_OVERFLOW Urefs limit would be exceeded.
+ * KERN_RIGHT_EXISTS Space has rights under another name.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_insert_right(
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_port_t poly,
+ mach_msg_type_name_t polyPoly)
+{
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_PORT_NAME_VALID(name) ||
+ !MACH_MSG_TYPE_PORT_ANY_RIGHT(polyPoly))
+ return KERN_INVALID_VALUE;
+
+ if (!IO_VALID((ipc_object_t)poly))
+ return KERN_INVALID_CAPABILITY;
+
+ return ipc_object_copyout_name(space, (ipc_object_t)poly,
+ polyPoly, FALSE, name);
+}
+
+/*
+ * Routine: mach_port_extract_right [kernel call]
+ * Purpose:
+ * Extracts a right from a space, as if the space
+ * voluntarily sent the right to the caller.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted the right.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE Requested type isn't a port right.
+ * KERN_INVALID_NAME Name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote appropriate right.
+ */
+
+kern_return_t
+mach_port_extract_right(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_msg_type_name_t msgt_name,
+ ipc_port_t *poly,
+ mach_msg_type_name_t *polyPoly)
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_MSG_TYPE_PORT_ANY(msgt_name))
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_object_copyin(space, name, msgt_name, (ipc_object_t *) poly);
+
+ if (kr == KERN_SUCCESS)
+ *polyPoly = ipc_object_copyin_type(msgt_name);
+ return kr;
+}
+
+/*
+ * Routine: mach_port_get_receive_status [kernel call]
+ * Purpose:
+ * Retrieves mucho info about a receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved status.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_get_receive_status(
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_status_t *statusp)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ if (port->ip_pset != IPS_NULL) {
+ ipc_pset_t pset = port->ip_pset;
+
+ ips_lock(pset);
+ if (!ips_active(pset)) {
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ goto no_port_set;
+ } else {
+ statusp->mps_pset = pset->ips_local_name;
+ imq_lock(&pset->ips_messages);
+ statusp->mps_seqno = port->ip_seqno;
+ imq_unlock(&pset->ips_messages);
+ ips_unlock(pset);
+ assert(MACH_PORT_NAME_VALID(statusp->mps_pset));
+ }
+ } else {
+ no_port_set:
+ statusp->mps_pset = MACH_PORT_NULL;
+ imq_lock(&port->ip_messages);
+ statusp->mps_seqno = port->ip_seqno;
+ imq_unlock(&port->ip_messages);
+ }
+
+ statusp->mps_mscount = port->ip_mscount;
+ statusp->mps_qlimit = port->ip_qlimit;
+ statusp->mps_msgcount = port->ip_msgcount;
+ statusp->mps_sorights = port->ip_sorights;
+ statusp->mps_srights = port->ip_srights > 0;
+ statusp->mps_pdrequest = port->ip_pdrequest != IP_NULL;
+ statusp->mps_nsrequest = port->ip_nsrequest != IP_NULL;
+ ip_unlock(port);
+
+ return KERN_SUCCESS;
+}
+
+#ifdef MIGRATING_THREADS
+kern_return_t
+mach_port_set_rpcinfo(
+ ipc_space_t space,
+ mach_port_name_t name,
+ void *rpc_info,
+ unsigned int rpc_info_count)
+{
+ ipc_target_t target;
+ ipc_object_t object;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_PORT_SET, &object);
+ if (kr == KERN_SUCCESS)
+ target = &((ipc_pset_t)object)->ips_target;
+ else {
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_RECEIVE, &object);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ target = &((ipc_port_t)object)->ip_target;
+ }
+
+ /* port/pset is locked and active */
+
+ kr = port_machine_set_rpcinfo(target, rpc_info, rpc_info_count);
+
+ io_unlock(object);
+
+ return kr;
+}
+
+#if 1
+int sacts, maxsacts;
+#endif
+
+void sact_count(void)
+{
+ printf("%d server activations in use, %d max\n", sacts, maxsacts);
+}
+
+kern_return_t
+mach_port_create_act(
+ task_t task,
+ mach_port_name_t name,
+ vm_offset_t user_stack,
+ vm_offset_t user_rbuf,
+ vm_size_t user_rbuf_size,
+ Act **out_act)
+{
+ ipc_target_t target;
+ ipc_space_t space;
+ ipc_object_t object;
+ kern_return_t kr;
+ Act *act;
+
+ if (task == 0)
+ return KERN_INVALID_TASK;
+
+ /* First create the new activation. */
+ kr = act_create(task, user_stack, user_rbuf, user_rbuf_size, &act);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ space = task->itk_space;
+
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_PORT_SET, &object);
+ if (kr == KERN_SUCCESS)
+ target = &((ipc_pset_t)object)->ips_target;
+ else {
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_RECEIVE, &object);
+ if (kr != KERN_SUCCESS) {
+ act_terminate(act);
+ act_deallocate(act);
+ return kr;
+ }
+ target = &((ipc_port_t)object)->ip_target;
+ }
+
+ /* port/pset is locked and active */
+#if 0
+ printf("act port/pset %08x ipc_target %08x stack %08x act %08x\n",
+ object, target, user_stack, act);
+#endif
+
+ /* Assign the activation to the port's actpool. */
+ kr = act_set_target(act, target);
+ if (kr != KERN_SUCCESS) {
+ io_unlock(object);
+ act_terminate(act);
+ act_deallocate(act);
+ return kr;
+ }
+#if 0
+ printf(" actpool %08x act %08x\n", target->ip_actpool, act);
+#endif
+
+ io_unlock(object);
+
+ /* Pass our reference to the activation back to the user. */
+ *out_act = act;
+
+#if 1
+ sacts++;
+ if (sacts > maxsacts)
+ maxsacts = sacts;
+ act->mact.pcb->ss.mpsfu_high = 0x69;
+#endif
+ return KERN_SUCCESS;
+}
+
+#ifdef RPCKERNELSIG
+kern_return_t
+mach_port_set_syscall_right(
+ task_t task,
+ mach_port_name_t name)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (task == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(task, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ if (!(entry->ie_bits & MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND))) {
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ task->syscall_ipc_entry = *entry;
+
+ is_write_unlock(space);
+
+ return KERN_SUCCESS;
+}
+#endif
+#endif /* MIGRATING_THREADS */
+
+/*
+ * Routine: mach_port_set_protected_payload [kernel call]
+ * Purpose:
+ * Changes a receive right's protected payload.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set protected payload.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_set_protected_payload(
+ ipc_space_t space,
+ mach_port_name_t name,
+ rpc_uintptr_t payload)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_protected_payload(port, payload);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_clear_protected_payload [kernel call]
+ * Purpose:
+ * Clears a receive right's protected payload.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Clear protected payload.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_clear_protected_payload(
+ ipc_space_t space,
+ mach_port_name_t name)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_clear_protected_payload(port);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+#if MACH_KDB
+
+void
+db_debug_port_references (boolean_t enable)
+{
+ mach_port_deallocate_debug = enable;
+}
+
+#endif /* MACH_KDB */
diff --git a/ipc/mach_port.h b/ipc/mach_port.h
new file mode 100644
index 0000000..e91e495
--- /dev/null
+++ b/ipc/mach_port.h
@@ -0,0 +1,37 @@
+/*
+ * Mach Port Functions.
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Mach port functions.
+ *
+ */
+
+#ifndef _IPC_MACH_PORT_H_
+#define _IPC_MACH_PORT_H_
+
+#include <sys/types.h>
+#include <ipc/ipc_types.h>
+#include <ipc/ipc_entry.h>
+
+#if MACH_KDB
+void db_debug_port_references (boolean_t enable);
+#endif /* MACH_KDB */
+
+#endif /* _IPC_MACH_PORT_H_ */
diff --git a/ipc/mach_port.srv b/ipc/mach_port.srv
new file mode 100644
index 0000000..c4f8536
--- /dev/null
+++ b/ipc/mach_port.srv
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach/mach_port.defs>
diff --git a/ipc/notify.defs b/ipc/notify.defs
new file mode 100644
index 0000000..db059b8
--- /dev/null
+++ b/ipc/notify.defs
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* We use custom functions to send notifications. These functions can
+ be found in `ipc_notify.c'. We use this file merely to produce the
+ list of message ids. */
+
+#include <mach/notify.defs>
diff --git a/ipc/port.h b/ipc/port.h
new file mode 100644
index 0000000..c85685d
--- /dev/null
+++ b/ipc/port.h
@@ -0,0 +1,106 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/port.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Implementation specific complement to mach/port.h.
+ */
+
+#ifndef _IPC_PORT_H_
+#define _IPC_PORT_H_
+
+#include <kern/debug.h>
+#include <mach/port.h>
+
+/*
+ * mach_port_name_t must be an unsigned type. Port values
+ * have two parts, a generation number and an index.
+ * These macros encapsulate all knowledge of how
+ * a mach_port_name_t is laid out.
+ *
+ * If the size of generation numbers changes,
+ * be sure to update IE_BITS_GEN_MASK and friends
+ * in ipc/ipc_entry.h.
+ */
+
+#if PORT_GENERATIONS
+#define MACH_PORT_INDEX(name) ((name) >> 8)
+#define MACH_PORT_GEN(name) (((name) & 0xff) << 24)
+#define MACH_PORT_MAKE(index, gen) (((index) << 8) | ((gen) >> 24))
+#else
+#define MACH_PORT_INDEX(name) (name)
+#define MACH_PORT_GEN(name) 0
+#define MACH_PORT_MAKE(index, gen) (index)
+#endif
+
+#define MACH_PORT_NGEN(name) MACH_PORT_MAKE(0, MACH_PORT_GEN(name))
+#define MACH_PORT_MAKEB(index, bits) MACH_PORT_MAKE(index, IE_BITS_GEN(bits))
+
+/*
+ * Typedefs for code cleanliness. These must all have
+ * the same (unsigned) type as mach_port_name_t.
+ */
+
+typedef mach_port_name_t mach_port_gen_t; /* generation numbers */
+
+
+#define MACH_PORT_UREFS_MAX ((mach_port_urefs_t) ((1 << 16) - 1))
+
+#define MACH_PORT_UREFS_OVERFLOW(urefs, delta) \
+ (((delta) > 0) && \
+ ((((urefs) + (delta)) <= (urefs)) || \
+ (((urefs) + (delta)) > MACH_PORT_UREFS_MAX)))
+
+#define MACH_PORT_UREFS_UNDERFLOW(urefs, delta) \
+ (((delta) < 0) && (-(delta) > (urefs)))
+
+
+static inline mach_port_t invalid_name_to_port(mach_port_name_t name)
+{
+ if (name == MACH_PORT_NAME_NULL)
+ return MACH_PORT_NULL;
+ if (name == MACH_PORT_NAME_DEAD)
+ return MACH_PORT_DEAD;
+ panic("invalid_name_to_port() called with a valid port");
+}
+
+static inline mach_port_name_t invalid_port_to_name(mach_port_t port)
+{
+ if (port == MACH_PORT_NULL)
+ return MACH_PORT_NAME_NULL;
+ if (port == MACH_PORT_DEAD)
+ return MACH_PORT_NAME_DEAD;
+ panic("invalid_port_to_name() called with a valid name");
+}
+
+#endif /* _IPC_PORT_H_ */
diff --git a/kern/.gitignore b/kern/.gitignore
new file mode 100644
index 0000000..72bccc6
--- /dev/null
+++ b/kern/.gitignore
@@ -0,0 +1,2 @@
+exc.none.defs.c
+exc.none.msgids
diff --git a/kern/act.c b/kern/act.c
new file mode 100644
index 0000000..3819ef3
--- /dev/null
+++ b/kern/act.c
@@ -0,0 +1,1118 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: act.c
+ *
+ * Activation management routines
+ *
+ */
+
+#ifdef MIGRATING_THREADS
+
+#include <string.h>
+
+#include <mach/kern_return.h>
+#include <mach/alert.h>
+#include <kern/slab.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/debug.h>
+#include <kern/act.h>
+#include <kern/current.h>
+#include "ipc_target.h"
+
+static void special_handler(ReturnHandler *rh, struct Act *act);
+
+#ifdef ACT_STATIC_KLUDGE
+#undef ACT_STATIC_KLUDGE
+#define ACT_STATIC_KLUDGE 300
+#endif
+
+#ifndef ACT_STATIC_KLUDGE
+static struct kmem_cache act_cache;
+#else
+static Act *act_freelist;
+static Act free_acts[ACT_STATIC_KLUDGE];
+#endif
+
+/* This is a rather special activation
+ which resides at the top and bottom of every thread.
+ When the last "real" activation on a thread is destroyed,
+ the null_act on the bottom gets invoked, destroying the thread.
+ At the top, the null_act acts as an "invalid" cached activation,
+ which will always fail the cached-activation test on RPC paths.
+
+ As you might expect, most of its members have no particular value.
+ alerts is zero. */
+Act null_act;
+
+void
+global_act_init(void)
+{
+#ifndef ACT_STATIC_KLUDGE
+ kmem_cache_init(&act_cache, "Act", sizeof(struct Act), 0,
+ NULL, 0);
+#else
+ int i;
+
+printf("activations: [%x-%x]\n", &free_acts[0], &free_acts[ACT_STATIC_KLUDGE]);
+ act_freelist = &free_acts[0];
+ free_acts[0].ipt_next = 0;
+ for (i = 1; i < ACT_STATIC_KLUDGE; i++) {
+ free_acts[i].ipt_next = act_freelist;
+ act_freelist = &free_acts[i];
+ }
+ /* XXX simple_lock_init(&act_freelist->lock); */
+#endif
+
+#if 0
+ simple_lock_init(&null_act.lock);
+ refcount_init(&null_act.ref_count, 1);
+#endif
+
+ act_machine_init();
+}
+
+/* Create a new activation in a specific task.
+ Locking: Task */
+kern_return_t act_create(task_t task, vm_offset_t user_stack,
+ vm_offset_t user_rbuf, vm_size_t user_rbuf_size,
+ struct Act **new_act)
+{
+ Act *act;
+
+#ifndef ACT_STATIC_KLUDGE
+ act = (Act*)kmem_cache_alloc(&act_cache);
+ if (act == 0)
+ return(KERN_RESOURCE_SHORTAGE);
+#else
+ /* XXX ipt_lock(act_freelist); */
+ act = act_freelist;
+ if (act == 0) panic("out of activations");
+ act_freelist = act->ipt_next;
+ /* XXX ipt_unlock(act_freelist); */
+ act->ipt_next = 0;
+#endif
+ memset(act, 0, sizeof(*act)); /*XXX shouldn't be needed */
+
+#ifdef DEBUG
+ act->lower = act->higher = 0;
+#endif
+
+ /* Start with one reference for being active, another for the caller */
+ simple_lock_init(&act->lock);
+ refcount_init(&act->ref_count, 2);
+
+ /* Latch onto the task. */
+ act->task = task;
+ task_reference(task);
+
+ /* Other simple setup */
+ act->ipt = 0;
+ act->thread = 0;
+ act->suspend_count = 0;
+ act->active = 1;
+ act->handlers = 0;
+
+ /* The special_handler will always be last on the returnhandlers list. */
+ act->special_handler.next = 0;
+ act->special_handler.handler = special_handler;
+
+ ipc_act_init(task, act);
+ act_machine_create(task, act, user_stack, user_rbuf, user_rbuf_size);
+
+ task_lock(task);
+
+ /* Chain the act onto the task's list */
+ act->task_links.next = task->acts.next;
+ act->task_links.prev = &task->acts;
+ task->acts.next->prev = &act->task_links;
+ task->acts.next = &act->task_links;
+ task->act_count++;
+
+ task_unlock(task);
+
+ *new_act = act;
+ return KERN_SUCCESS;
+}
+
+/* This is called when an act's ref_count drops to zero.
+ This can only happen when thread is zero (not in use),
+ ipt is zero (not attached to any ipt),
+ and active is false (terminated). */
+static void act_free(Act *inc)
+{
+ act_machine_destroy(inc);
+ ipc_act_destroy(inc);
+
+ /* Drop the task reference. */
+ task_deallocate(inc->task);
+
+ /* Put the act back on the act cache */
+#ifndef ACT_STATIC_KLUDGE
+ kmem_cache_free(&act_cache, (vm_offset_t)inc);
+#else
+ /* XXX ipt_lock(act_freelist); */
+ inc->ipt_next = act_freelist;
+ act_freelist = inc;
+ /* XXX ipt_unlock(act_freelist); */
+#endif
+}
+
+void act_deallocate(Act *inc)
+{
+ refcount_drop(&inc->ref_count, act_free(inc));
+}
+
+/* Attach an act to the top of a thread ("push the stack").
+ The thread must be either the current one or a brand-new one.
+ Assumes the act is active but not in use.
+ Assumes that if it is attached to an ipt (i.e. the ipt pointer is nonzero),
+ the act has already been taken off the ipt's list.
+
+ Already locked: cur_thread, act */
+void act_attach(Act *act, thread_t thread, unsigned init_alert_mask)
+{
+ Act *lower;
+
+ act->thread = thread;
+
+ /* The thread holds a reference to the activation while using it. */
+ refcount_take(&act->ref_count);
+
+ /* XXX detach any cached activations from above the target */
+
+ /* Chain the act onto the thread's act stack. */
+ lower = thread->top_act;
+ act->lower = lower;
+ lower->higher = act;
+ thread->top_act = act;
+
+ act->alert_mask = init_alert_mask;
+ act->alerts = lower->alerts & init_alert_mask;
+}
+
+/* Remove the current act from the top of the current thread ("pop the stack").
+ Return it to the ipt it lives on, if any.
+ Locking: Thread > Act(not on ipt) > ipc_target */
+void act_detach(Act *cur_act)
+{
+ thread_t cur_thread = cur_act->thread;
+
+ thread_lock(cur_thread);
+ act_lock(cur_act);
+
+ /* Unlink the act from the thread's act stack */
+ cur_thread->top_act = cur_act->lower;
+ cur_act->thread = 0;
+#ifdef DEBUG
+ cur_act->lower = cur_act->higher = 0;
+#endif
+
+ thread_unlock(cur_thread);
+
+ /* Return it to the ipt's list */
+ if (cur_act->ipt)
+ {
+ ipt_lock(cur_act->ipt);
+ cur_act->ipt_next = cur_act->ipt->ipt_acts;
+ cur_act->ipt->ipt_acts = cur_act;
+ ipt_unlock(cur_act->ipt);
+#if 0
+ printf(" return to ipt %x\n", cur_act->ipt);
+#endif
+ }
+
+ act_unlock(cur_act);
+
+ /* Drop the act reference taken for being in use. */
+ refcount_drop(&cur_act->ref_count, act_free(cur_act));
+}
+
+
+
+/*** Activation control support routines ***/
+
+/* This is called by system-dependent code
+ when it detects that act->handlers is non-null
+ while returning into user mode.
+ Activations linked onto an ipt always have null act->handlers,
+ so RPC entry paths need not check it.
+
+ Locking: Act */
+void act_execute_returnhandlers(void)
+{
+ Act *act = current_act();
+
+#if 0
+ printf("execute_returnhandlers\n");
+#endif
+ while (1) {
+ ReturnHandler *rh;
+
+ /* Grab the next returnhandler */
+ act_lock(act);
+ rh = act->handlers;
+ if (!rh) {
+ act_unlock(act);
+ return;
+ }
+ act->handlers = rh->next;
+ act_unlock(act);
+
+ /* Execute it */
+ (*rh->handler)(rh, act);
+ }
+}
+
+/* Try to nudge an act into executing its returnhandler chain.
+ Ensures that the activation will execute its returnhandlers
+ before it next executes any of its user-level code.
+ Also ensures that it is safe to break the thread's activation chain
+ immediately above this activation,
+ by rolling out of any outstanding two-way-optimized RPC.
+
+ The target activation is not necessarily active
+ or even in use by a thread.
+ If it isn't, this routine does nothing.
+
+ Already locked: Act */
+static void act_nudge(struct Act *act)
+{
+ /* If it's suspended, wake it up. */
+ thread_wakeup(&act->suspend_count);
+
+ /* Do a machine-dependent low-level nudge.
+ If we're on a multiprocessor,
+ this may mean sending an interprocessor interrupt.
+ In any case, it means rolling out of two-way-optimized RPC paths. */
+ act_machine_nudge(act);
+}
+
+/* Install the special returnhandler that handles suspension and termination,
+ if it hasn't been installed already.
+
+ Already locked: Act */
+static void install_special_handler(struct Act *act)
+{
+ ReturnHandler **rh;
+
+ /* The work handler must always be the last ReturnHandler on the list,
+ because it can do tricky things like detach the act. */
+ for (rh = &act->handlers; *rh; rh = &(*rh)->next);
+ if (rh != &act->special_handler.next) {
+ *rh = &act->special_handler;
+ }
+
+ /* Nudge the target activation,
+ to ensure that it will see the returnhandler we're adding. */
+ act_nudge(act);
+}
+
+/* Locking: Act */
+static void special_handler(ReturnHandler *rh, struct Act *cur_act)
+{
+ retry:
+
+ act_lock(cur_act);
+
+ /* If someone has killed this invocation,
+ invoke the return path with a terminated exception. */
+ if (!cur_act->active) {
+ act_unlock(cur_act);
+ act_machine_return(KERN_TERMINATED);
+ /* XXX should just set the activation's reentry_routine
+ and then return from special_handler().
+ The magic reentry_routine should just pop its own activation
+ and chain to the reentry_routine of the _lower_ activation.
+ If that lower activation is the null_act,
+ the thread will then be terminated. */
+ }
+
+ /* If we're suspended, go to sleep and wait for someone to wake us up. */
+ if (cur_act->suspend_count) {
+ act_unlock(cur_act);
+ /* XXX mp unsafe */
+ thread_wait((int)&cur_act->suspend_count, FALSE);
+
+ act_lock(cur_act);
+
+ /* If we're still (or again) suspended,
+ go to sleep again after executing any new returnhandlers that may have appeared. */
+ if (cur_act->suspend_count)
+ install_special_handler(cur_act);
+ }
+
+ act_unlock(cur_act);
+}
+
+#if 0 /************************ OLD SEMI-OBSOLETE CODE *********************/
+static __dead void act_throughcall_return(Act *act)
+{
+ /* Done - destroy the act and return */
+ act_detach(act);
+ act_terminate(act);
+ act_deallocate(act);
+
+ /* XXX */
+ thread_terminate_self();
+}
+
+__dead void act_throughcall(task_t task, void (*infunc)())
+{
+ thread_t thread = current_thread();
+ Act *act;
+ ReturnHandler rh;
+ int rc;
+
+ rc = act_create(task, 0, 0, 0, &act);
+ if (rc) return rc;
+
+ act->return_routine = act_throughcall_return;
+
+ thread_lock(thread);
+ act_lock(act);
+
+ act_attach(thread, act, 0);
+
+ rh.handler = infunc;
+ rh.next = act->handlers;
+ act->handlers = &rh;
+
+ act_unlock(act);
+ thread_unlock(thread);
+
+ /* Call through the act into the returnhandler list */
+ act_machine_throughcall(act);
+}
+
+
+/* Grab an act from the specified pool, to pass to act_upcall.
+ Returns with the act locked, since it's in an inconsistent state
+ (not on its ipt but not on a thread either).
+ Returns null if no acts are available on the ipt.
+
+ Locking: ipc_target > Act(on ipt) */
+Act *act_grab(struct ipc_target *ipt)
+{
+ Act *act;
+
+ ipt_lock(ipt);
+
+ retry:
+
+ /* Pull an act off the ipt's list. */
+ act = ipt->acts;
+ if (!act)
+ goto none_avail;
+ ipt->acts = act->ipt_next;
+
+ act_lock(act);
+
+ /* If it's been terminated, drop it and get another one. */
+ if (!act->active) {
+#if 0
+ printf("dropping terminated act %08x\n", act);
+#endif
+ /* XXX ipt_deallocate(ipt); */
+ act->ipt = 0;
+ act_unlock(act);
+ act_deallocate(act);
+ goto retry;
+ }
+
+none_avail:
+ ipt_unlock(ipt);
+
+ return act;
+}
+
+/* Try to make an upcall with an act on the specified ipt.
+ If the ipt is empty, returns KERN_RESOURCE_SHORTAGE. XXX???
+
+ Locking: ipc_target > Act > Thread */
+kern_return_t act_upcall(struct Act *act, unsigned init_alert_mask,
+ vm_offset_t user_entrypoint, vm_offset_t user_data)
+{
+ thread_t cur_thread = current_thread();
+ int rc;
+
+ /* XXX locking */
+
+ act_attach(cur_thread, act, init_alert_mask);
+
+ /* Make the upcall into the destination task */
+ rc = act_machine_upcall(act, user_entrypoint, user_data);
+
+ /* Done - detach the act and return */
+ act_detach(act);
+
+ return rc;
+}
+#endif /************************ END OF OLD SEMI-OBSOLETE CODE *********************/
+
+
+
+
+/*** Act service routines ***/
+
+/* Lock this act and its current thread.
+ We can only find the thread from the act
+ and the thread must be locked before the act,
+ requiring a little icky juggling.
+
+ If the thread is not currently on any thread,
+ returns with only the act locked.
+
+ Note that this routine is not called on any performance-critical path.
+ It is only for explicit act operations
+ which don't happen often.
+
+ Locking: Thread > Act */
+static thread_t act_lock_thread(Act *act)
+{
+ thread_t thread;
+
+ retry:
+
+ /* Find the thread */
+ act_lock(act);
+ thread = act->thread;
+ if (thread == 0)
+ {
+ act_unlock(act);
+ return 0;
+ }
+ thread_reference(thread);
+ act_unlock(act);
+
+ /* Lock the thread and re-lock the act,
+ and make sure the thread didn't change. */
+ thread_lock(thread);
+ act_lock(act);
+ if (act->thread != thread)
+ {
+ act_unlock(act);
+ thread_unlock(thread);
+ thread_deallocate(thread);
+ goto retry;
+ }
+
+ thread_deallocate(thread);
+
+ return thread;
+}
+
+/* Already locked: act->task
+ Locking: Task > Act */
+kern_return_t act_terminate_task_locked(struct Act *act)
+{
+ act_lock(act);
+
+ if (act->active)
+ {
+ /* Unlink the act from the task's act list,
+ so it doesn't appear in calls to task_acts and such.
+ The act still keeps its ref on the task, however,
+ until it loses all its own references and is freed. */
+ act->task_links.next->prev = act->task_links.prev;
+ act->task_links.prev->next = act->task_links.next;
+ act->task->act_count--;
+
+ /* Remove it from any ipc_target. XXX is this right? */
+ act_set_target(act, 0);
+
+ /* This will allow no more control operations on this act. */
+ act->active = 0;
+
+ /* When the special_handler gets executed,
+ it will see the terminated condition and exit immediately. */
+ install_special_handler(act);
+
+ /* Drop the act reference taken for being active.
+ (There is still at least one reference left: the one we were passed.) */
+ act_deallocate(act);
+ }
+
+ act_unlock(act);
+
+ return KERN_SUCCESS;
+}
+
+/* Locking: Task > Act */
+kern_return_t act_terminate(struct Act *act)
+{
+ task_t task = act->task;
+ kern_return_t rc;
+
+ /* act->task never changes,
+ so we can read it before locking the act. */
+ task_lock(act->task);
+
+ rc = act_terminate_task_locked(act);
+
+ task_unlock(act->task);
+
+ return rc;
+}
+
+/* If this Act is on a Thread and is not the topmost,
+ yank it and everything below it off of the thread's stack
+ and put it all on a new thread forked from the original one.
+ May fail due to resource shortage, but can always be retried.
+
+ Locking: Thread > Act */
+kern_return_t act_yank(Act *act)
+{
+ thread_t thread = act_lock_thread(act);
+
+#if 0
+ printf("act_yank inc %08x thread %08x\n", act, thread);
+#endif
+ if (thread)
+ {
+ if (thread->top_act != act)
+ {
+ printf("detaching act %08x from thread %08x\n", act, thread);
+
+ /* Nudge the activation into a clean point for detachment. */
+ act_nudge(act);
+
+ /* Now detach the activation
+ and give the orphan its own flow of control. */
+ /*XXX*/
+ }
+
+ thread_unlock(thread);
+ }
+ act_unlock(act);
+
+ /* Ask the thread to return as quickly as possible,
+ because its results are now useless. */
+ act_abort(act);
+
+ return KERN_SUCCESS;
+}
+
+/* Assign an activation to a specific ipc_target.
+ Fails if the activation is already assigned to another pool.
+ If ipt == 0, we remove the from its ipt.
+
+ Locking: Act(not on ipt) > ipc_target > Act(on ipt) */
+kern_return_t act_set_target(Act *act, struct ipc_target *ipt)
+{
+ act_lock(act);
+
+ if (ipt == 0)
+ {
+ Act **lact;
+
+ ipt = act->ipt;
+ if (ipt == 0)
+ return;
+
+ /* XXX This is a violation of the locking order. */
+ ipt_lock(ipt);
+ for (lact = &ipt->ipt_acts; *lact; lact = &((*lact)->ipt_next))
+ if (act == *lact)
+ {
+ *lact = act->ipt_next;
+ break;
+ }
+ ipt_unlock(ipt);
+
+ act->ipt = 0;
+ /* XXX ipt_deallocate(ipt); */
+ act_deallocate(act);
+ return;
+ }
+ if (act->ipt != ipt)
+ {
+ if (act->ipt != 0)
+ {
+ act_unlock(act);
+ return KERN_FAILURE; /*XXX*/
+ }
+ act->ipt = ipt;
+ ipt->ipt_type |= IPT_TYPE_MIGRATE_RPC;
+
+ /* They get references to each other. */
+ act_reference(act);
+ ipt_reference(ipt);
+
+ /* If it is available,
+ add it to the ipt's available-activation list. */
+ if ((act->thread == 0) && (act->suspend_count == 0))
+ {
+ ipt_lock(ipt);
+ act->ipt_next = ipt->ipt_acts;
+ act->ipt->ipt_acts = act;
+ ipt_unlock(ipt);
+ }
+ }
+ act_unlock(act);
+
+ return KERN_SUCCESS;
+}
+
+/* Register an alert from this activation.
+ Each set bit is propagated upward from (but not including) this activation,
+ until the top of the chain is reached or the bit is masked.
+
+ Locking: Thread > Act */
+kern_return_t act_alert(struct Act *act, unsigned alerts)
+{
+ thread_t thread = act_lock_thread(act);
+
+#if 0
+ printf("act_alert %08x: %08x\n", act, alerts);
+#endif
+ if (thread)
+ {
+ struct Act *act_up = act;
+ while ((alerts) && (act_up != thread->top_act))
+ {
+ act_up = act_up->higher;
+ alerts &= act_up->alert_mask;
+ act_up->alerts |= alerts;
+ }
+
+ /* XXX If we reach the top, and it is blocked in glue code, do something. */
+
+ thread_unlock(thread);
+ }
+ act_unlock(act);
+
+ return KERN_SUCCESS;
+}
+
+/* Locking: Thread > Act */
+kern_return_t act_abort(struct Act *act)
+{
+ return act_alert(act, ALERT_ABORT_STRONG);
+}
+
+/* Locking: Thread > Act */
+kern_return_t act_abort_safely(struct Act *act)
+{
+ return act_alert(act, ALERT_ABORT_SAFE);
+}
+
+/* Locking: Thread > Act */
+kern_return_t act_alert_mask(struct Act *act, unsigned alert_mask)
+{
+ panic("act_alert_mask\n");
+ return KERN_SUCCESS;
+}
+
+/* Locking: Thread > Act */
+kern_return_t act_suspend(struct Act *act)
+{
+ thread_t thread = act_lock_thread(act);
+ kern_return_t rc = KERN_SUCCESS;
+
+#if 0
+ printf("act_suspend %08x\n", act);
+#endif
+ if (act->active)
+ {
+ if (act->suspend_count++ == 0)
+ {
+ /* XXX remove from ipt */
+ install_special_handler(act);
+ act_nudge(act);
+ }
+ }
+ else
+ rc = KERN_TERMINATED;
+
+ if (thread)
+ thread_unlock(thread);
+ act_unlock(act);
+
+ return rc;
+}
+
+/* Locking: Act */
+kern_return_t act_resume(struct Act *act)
+{
+#if 0
+ printf("act_resume %08x from %d\n", act, act->suspend_count);
+#endif
+
+ act_lock(act);
+ if (!act->active)
+ {
+ act_unlock(act);
+ return KERN_TERMINATED;
+ }
+
+ if (act->suspend_count > 0) {
+ if (--act->suspend_count == 0) {
+ thread_wakeup(&act->suspend_count);
+ /* XXX return to ipt */
+ }
+ }
+
+ act_unlock(act);
+
+ return KERN_SUCCESS;
+}
+
+typedef struct GetSetState {
+ struct ReturnHandler rh;
+ int flavor;
+ void *state;
+ int *pcount;
+ int result;
+} GetSetState;
+
+/* Locking: Thread */
+kern_return_t get_set_state(struct Act *act, int flavor, void *state, int *pcount,
+ void (*handler)(ReturnHandler *rh, struct Act *act))
+{
+ GetSetState gss;
+
+ /* Initialize a small parameter structure */
+ gss.rh.handler = handler;
+ gss.flavor = flavor;
+ gss.state = state;
+ gss.pcount = pcount;
+
+ /* Add it to the act's return handler list */
+ act_lock(act);
+ gss.rh.next = act->handlers;
+ act->handlers = &gss.rh;
+
+ act_nudge(act);
+
+ act_unlock(act);
+ /* XXX mp unsafe */
+ thread_wait((int)&gss, 0); /* XXX could be interruptible */
+
+ return gss.result;
+}
+
+static void get_state_handler(ReturnHandler *rh, struct Act *act)
+{
+ GetSetState *gss = (GetSetState*)rh;
+
+ gss->result = act_machine_get_state(act, gss->flavor, gss->state, gss->pcount);
+ thread_wakeup((int)gss);
+}
+
+/* Locking: Thread */
+kern_return_t act_get_state(struct Act *act, int flavor, natural_t *state, natural_t *pcount)
+{
+ return get_set_state(act, flavor, state, pcount, get_state_handler);
+}
+
+static void set_state_handler(ReturnHandler *rh, struct Act *act)
+{
+ GetSetState *gss = (GetSetState*)rh;
+
+ gss->result = act_machine_set_state(act, gss->flavor, gss->state, *gss->pcount);
+ thread_wakeup((int)gss);
+}
+
+/* Locking: Thread */
+kern_return_t act_set_state(struct Act *act, int flavor, natural_t *state, natural_t count)
+{
+ return get_set_state(act, flavor, state, &count, set_state_handler);
+}
+
+
+
+/*** backward compatibility hacks ***/
+
+#include <mach/thread_info.h>
+#include <mach/thread_special_ports.h>
+#include <ipc/ipc_port.h>
+
+kern_return_t act_thread_info(Act *act, int flavor,
+ thread_info_t thread_info_out, unsigned *thread_info_count)
+{
+ return thread_info(act->thread, flavor, thread_info_out, thread_info_count);
+}
+
+kern_return_t
+act_thread_assign(Act *act, processor_set_t new_pset)
+{
+ return thread_assign(act->thread, new_pset);
+}
+
+kern_return_t
+act_thread_assign_default(Act *act)
+{
+ return thread_assign_default(act->thread);
+}
+
+kern_return_t
+act_thread_get_assignment(Act *act, processor_set_t *pset)
+{
+ return thread_get_assignment(act->thread, pset);
+}
+
+kern_return_t
+act_thread_priority(Act *act, int priority, boolean_t set_max)
+{
+ return thread_priority(act->thread, priority, set_max);
+}
+
+kern_return_t
+act_thread_max_priority(Act *act, processor_set_t *pset, int max_priority)
+{
+ return thread_max_priority(act->thread, pset, max_priority);
+}
+
+kern_return_t
+act_thread_policy(Act *act, int policy, int data)
+{
+ return thread_policy(act->thread, policy, data);
+}
+
+kern_return_t
+act_thread_wire(struct host *host, Act *act, boolean_t wired)
+{
+ return thread_wire(host, act->thread, wired);
+}
+
+kern_return_t
+act_thread_depress_abort(Act *act)
+{
+ return thread_depress_abort(act->thread);
+}
+
+/*
+ * Routine: act_get_special_port [kernel call]
+ * Purpose:
+ * Clones a send right for one of the thread's
+ * special ports.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted a send right.
+ * KERN_INVALID_ARGUMENT The thread is null.
+ * KERN_FAILURE The thread is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+act_get_special_port(Act *act, int which, ipc_port_t *portp)
+{
+ ipc_port_t *whichp;
+ ipc_port_t port;
+
+#if 0
+ printf("act_get_special_port\n");
+#endif
+ if (act == 0)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+ case THREAD_KERNEL_PORT:
+ whichp = &act->self_port;
+ break;
+
+ case THREAD_EXCEPTION_PORT:
+ whichp = &act->exception_port;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_lock(act->thread);
+
+ if (act->self_port == IP_NULL) {
+ thread_unlock(act->thread);
+ return KERN_FAILURE;
+ }
+
+ port = ipc_port_copy_send(*whichp);
+ thread_unlock(act->thread);
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: act_set_special_port [kernel call]
+ * Purpose:
+ * Changes one of the thread's special ports,
+ * setting it to the supplied send right.
+ * Conditions:
+ * Nothing locked. If successful, consumes
+ * the supplied send right.
+ * Returns:
+ * KERN_SUCCESS Changed the special port.
+ * KERN_INVALID_ARGUMENT The thread is null.
+ * KERN_FAILURE The thread is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+act_set_special_port(Act *act, int which, ipc_port_t port)
+{
+ ipc_port_t *whichp;
+ ipc_port_t old;
+
+#if 0
+ printf("act_set_special_port\n");
+#endif
+ if (act == 0)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+ case THREAD_KERNEL_PORT:
+ whichp = &act->self_port;
+ break;
+
+ case THREAD_EXCEPTION_PORT:
+ whichp = &act->exception_port;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_lock(act->thread);
+ if (act->self_port == IP_NULL) {
+ thread_unlock(act->thread);
+ return KERN_FAILURE;
+ }
+
+ old = *whichp;
+ *whichp = port;
+ thread_unlock(act->thread);
+
+ if (IP_VALID(old))
+ ipc_port_release_send(old);
+ return KERN_SUCCESS;
+}
+
+/*
+ * XXX lame, non-blocking ways to get/set state.
+ * Return thread's machine-dependent state.
+ */
+kern_return_t
+act_get_state_immediate(
+ Act *act,
+ int flavor,
+ void *old_state, /* pointer to OUT array */
+ unsigned int *old_state_count) /*IN/OUT*/
+{
+ kern_return_t ret;
+
+ act_lock(act);
+ /* not the top activation, return current state */
+ if (act->thread && act->thread->top_act != act) {
+ ret = act_machine_get_state(act, flavor,
+ old_state, old_state_count);
+ act_unlock(act);
+ return ret;
+ }
+ act_unlock(act);
+
+ /* not sure this makes sense */
+ return act_get_state(act, flavor, old_state, old_state_count);
+}
+
+/*
+ * Change thread's machine-dependent state.
+ */
+kern_return_t
+act_set_state_immediate(
+ Act *act,
+ int flavor,
+ void *new_state,
+ unsigned int new_state_count)
+{
+ kern_return_t ret;
+
+ act_lock(act);
+ /* not the top activation, set it now */
+ if (act->thread && act->thread->top_act != act) {
+ ret = act_machine_set_state(act, flavor,
+ new_state, new_state_count);
+ act_unlock(act);
+ return ret;
+ }
+ act_unlock(act);
+
+ /* not sure this makes sense */
+ return act_set_state(act, flavor, new_state, new_state_count);
+}
+
+void act_count(void)
+{
+ int i;
+ Act *act;
+ static int amin = ACT_STATIC_KLUDGE;
+
+ i = 0;
+ for (act = act_freelist; act; act = act->ipt_next)
+ i++;
+ if (i < amin)
+ amin = i;
+ printf("%d of %d activations in use, %d max\n",
+ ACT_STATIC_KLUDGE-i, ACT_STATIC_KLUDGE, ACT_STATIC_KLUDGE-amin);
+}
+
+void dump_act(act)
+ Act *act;
+{
+ act_count();
+ kact_count();
+ while (act) {
+ printf("%08.8x: thread=%x, task=%x, hi=%x, lo=%x, ref=%x\n",
+ act, act->thread, act->task,
+ act->higher, act->lower, act->ref_count);
+ printf("\talerts=%x, mask=%x, susp=%x, active=%x\n",
+ act->alerts, act->alert_mask,
+ act->suspend_count, act->active);
+ machine_dump_act(&act->mact);
+ if (act == act->lower)
+ break;
+ act = act->lower;
+ }
+}
+
+#ifdef ACTWATCH
+Act *
+get_next_act(int sp)
+{
+ static int i;
+ Act *act;
+
+ while (1) {
+ if (i == ACT_STATIC_KLUDGE) {
+ i = 0;
+ return 0;
+ }
+ act = &free_acts[i];
+ i++;
+ if (act->mact.space == sp)
+ return act;
+ }
+}
+#endif /* ACTWATCH */
+
+#endif /* MIGRATING_THREADS */
diff --git a/kern/act.h b/kern/act.h
new file mode 100644
index 0000000..f46f53a
--- /dev/null
+++ b/kern/act.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: act.h
+ *
+ * This defines the Act structure,
+ * which is the kernel representation of a user-space activation.
+ *
+ */
+
+#ifndef _KERN_ACT_H_
+#define _KERN_ACT_H_
+
+#ifdef MIGRATING_THREADS
+
+#ifndef __dead /* XXX */
+#define __dead
+#endif
+
+#include <mach/vm_param.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/refcount.h>
+#include <kern/queue.h>
+
+struct task;
+struct thread;
+struct Act;
+
+
+struct ReturnHandler {
+ struct ReturnHandler *next;
+ void (*handler)(struct ReturnHandler *rh, struct Act *act);
+};
+typedef struct ReturnHandler ReturnHandler;
+
+
+
+struct Act {
+
+ /*** Task linkage ***/
+
+ /* Links for task's circular list of activations.
+ The activation is only on the task's activation list while active.
+ Must be first. */
+ queue_chain_t task_links;
+
+ /* Reference to the task this activation is in.
+ This is constant as long as the activation is allocated. */
+ struct task *task;
+
+
+
+ /*** Machine-dependent state ***/
+ /* XXX should be first to allow maximum flexibility to MD code */
+ MachineAct mact;
+
+
+
+ /*** Consistency ***/
+ RefCount ref_count;
+ decl_simple_lock_data(,lock)
+
+
+
+ /*** ipc_target-related stuff ***/
+
+ /* ActPool this activation normally lives on, zero if none.
+ The activation and actpool hold references to each other as long as this is nonzero
+ (even when the activation isn't actually on the actpool's list). */
+ struct ipc_target *ipt;
+
+ /* Link on the ipt's list of activations.
+ The activation is only actually on the ipt's list (and hence this is valid)
+ when we're not in use (thread == 0) and not suspended (suspend_count == 0). */
+ struct Act *ipt_next;
+
+
+
+ /*** Thread linkage ***/
+
+ /* Thread this activation is in, zero if not in use.
+ The thread holds a reference on the activation while this is nonzero. */
+ struct thread *thread;
+
+ /* The rest in this section is only valid when thread is nonzero. */
+
+ /* Next higher and next lower activation on the thread's activation stack.
+ For a topmost activation or the null_act, higher is undefined.
+ The bottommost activation is always the null_act. */
+ struct Act *higher, *lower;
+
+ /* Alert bits pending at this activation;
+ some of them may have propagated from lower activations. */
+ unsigned alerts;
+
+ /* Mask of alert bits to be allowed to pass through from lower levels. */
+ unsigned alert_mask;
+
+
+
+ /*** Control information ***/
+
+ /* Number of outstanding suspensions on this activation. */
+ int suspend_count;
+
+ /* This is normally true, but is set to false when the activation is terminated. */
+ int active;
+
+ /* Chain of return handlers to be called
+ before the thread is allowed to return to this invocation */
+ ReturnHandler *handlers;
+
+ /* A special ReturnHandler attached to the above chain to handle suspension and such */
+ ReturnHandler special_handler;
+
+
+
+ /* Special ports attached to this activation */
+ struct ipc_port *self; /* not a right, doesn't hold ref */
+ struct ipc_port *self_port; /* a send right */
+ struct ipc_port *exception_port; /* a send right */
+ struct ipc_port *syscall_port; /* a send right */
+};
+typedef struct Act Act;
+typedef struct Act *act_t;
+typedef mach_port_t *act_array_t;
+
+#define ACT_NULL ((Act*)0)
+
+
+/* Exported to world */
+kern_return_t act_create(struct task *task, vm_offset_t user_stack, vm_offset_t user_rbuf, vm_size_t user_rbuf_size, struct Act **new_act);
+kern_return_t act_alert_mask(struct Act *act, unsigned alert_mask);
+kern_return_t act_alert(struct Act *act, unsigned alerts);
+kern_return_t act_abort(struct Act *act);
+kern_return_t act_abort_safely(struct Act *act);
+kern_return_t act_terminate(struct Act *act);
+kern_return_t act_suspend(struct Act *act);
+kern_return_t act_resume(struct Act *act);
+kern_return_t act_get_state(struct Act *act, int flavor,
+ natural_t *state, natural_t *pcount);
+kern_return_t act_set_state(struct Act *act, int flavor,
+ natural_t *state, natural_t count);
+
+#define act_lock(act) simple_lock(&(act)->lock)
+#define act_unlock(act) simple_unlock(&(act)->lock)
+
+#define act_reference(act) refcount_take(&(act)->ref_count)
+void act_deallocate(struct Act *act);
+
+/* Exported to startup.c */
+void act_init(void);
+
+/* Exported to task.c */
+kern_return_t act_terminate_task_locked(struct Act *act);
+
+/* Exported to thread.c */
+extern Act null_act;
+
+/* Exported to machine-dependent activation code */
+void act_execute_returnhandlers(void);
+
+
+
+/* System-dependent functions */
+kern_return_t act_machine_create(struct task *task, Act *inc, vm_offset_t user_stack, vm_offset_t user_rbuf, vm_size_t user_rbuf_size);
+void act_machine_destroy(Act *inc);
+kern_return_t act_machine_set_state(Act *inc, int flavor, int *tstate, unsigned count);
+kern_return_t act_machine_get_state(Act *inc, int flavor, int *tstate, unsigned *count);
+
+
+
+#endif /* MIGRATING_THREADS */
+#endif /* _KERN_ACT_H_ */
diff --git a/kern/assert.h b/kern/assert.h
new file mode 100644
index 0000000..fed2a20
--- /dev/null
+++ b/kern/assert.h
@@ -0,0 +1,54 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_ASSERT_H_
+#define _KERN_ASSERT_H_
+
+/* assert.h 4.2 85/01/21 */
+
+#include <kern/macros.h>
+
+#ifndef NDEBUG
+#define MACH_ASSERT 1
+#endif
+
+#if MACH_ASSERT
+extern void Assert(const char *exp, const char *filename, int line,
+ const char *fun) __attribute__ ((noreturn));
+
+#define assert(ex) \
+ (likely(ex) \
+ ? (void) (0) \
+ : Assert (#ex, __FILE__, __LINE__, __FUNCTION__))
+
+#define assert_static(x) assert(x)
+
+#else /* MACH_ASSERT */
+#define assert(ex)
+#define assert_static(ex)
+#endif /* MACH_ASSERT */
+
+#endif /* _KERN_ASSERT_H_ */
diff --git a/kern/ast.c b/kern/ast.c
new file mode 100644
index 0000000..8c514b3
--- /dev/null
+++ b/kern/ast.c
@@ -0,0 +1,235 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ *
+ * This file contains routines to check whether an ast is needed.
+ *
+ * ast_check() - check whether ast is needed for interrupt or context
+ * switch. Usually called by clock interrupt handler.
+ *
+ */
+
+#include <kern/ast.h>
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include "cpu_number.h"
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+#include <kern/processor.h>
+#include <device/net_io.h>
+
+#include <machine/machspl.h> /* for splsched */
+
+#if MACH_FIXPRI
+#include <mach/policy.h>
+#endif /* MACH_FIXPRI */
+
+
+volatile ast_t need_ast[NCPUS];
+
+void
+ast_init(void)
+{
+#ifndef MACHINE_AST
+ int i;
+
+ for (i=0; i<NCPUS; i++)
+ need_ast[i] = 0;
+#endif /* MACHINE_AST */
+}
+
+void
+ast_taken(void)
+{
+ thread_t self = current_thread();
+ ast_t reasons;
+
+ /*
+ * Interrupts are still disabled.
+ * We must clear need_ast and then enable interrupts.
+ */
+
+ reasons = need_ast[cpu_number()];
+ need_ast[cpu_number()] = AST_ZILCH;
+ (void) spl0();
+
+ /*
+ * These actions must not block.
+ */
+
+ if (reasons & AST_NETWORK)
+ net_ast();
+
+ /*
+ * Make darn sure that we don't call thread_halt_self
+ * or thread_block from the idle thread.
+ */
+
+ if (self != current_processor()->idle_thread) {
+#ifndef MIGRATING_THREADS
+ while (thread_should_halt(self))
+ thread_halt_self(thread_exception_return);
+#endif
+
+ /*
+ * One of the previous actions might well have
+ * woken a high-priority thread, so we use
+ * csw_needed in addition to AST_BLOCK.
+ */
+
+ if ((reasons & AST_BLOCK) ||
+ csw_needed(self, current_processor())) {
+ counter(c_ast_taken_block++);
+ thread_block(thread_exception_return);
+ }
+ }
+}
+
+void
+ast_check(void)
+{
+ int mycpu = cpu_number();
+ processor_t myprocessor;
+ thread_t thread = current_thread();
+ run_queue_t rq;
+ spl_t s = splsched();
+
+ /*
+ * Check processor state for ast conditions.
+ */
+ myprocessor = cpu_to_processor(mycpu);
+ switch(myprocessor->state) {
+ case PROCESSOR_OFF_LINE:
+ case PROCESSOR_IDLE:
+ case PROCESSOR_DISPATCHING:
+ /*
+ * No ast.
+ */
+ break;
+
+#if NCPUS > 1
+ case PROCESSOR_ASSIGN:
+ case PROCESSOR_SHUTDOWN:
+ /*
+ * Need ast to force action thread onto processor.
+ *
+ * XXX Should check if action thread is already there.
+ */
+ ast_on(mycpu, AST_BLOCK);
+ break;
+#endif /* NCPUS > 1 */
+
+ case PROCESSOR_RUNNING:
+
+ /*
+ * Propagate thread ast to processor. If we already
+ * need an ast, don't look for more reasons.
+ */
+ ast_propagate(thread, mycpu);
+ if (ast_needed(mycpu))
+ break;
+
+ /*
+ * Context switch check. The csw_needed macro isn't
+ * used here because the rq->low hint may be wrong,
+ * and fixing it here avoids an extra ast.
+ * First check the easy cases.
+ */
+ if (thread->state & TH_SUSP || myprocessor->runq.count > 0) {
+ ast_on(mycpu, AST_BLOCK);
+ break;
+ }
+
+ /*
+ * Update lazy evaluated runq->low if only timesharing.
+ */
+#if MACH_FIXPRI
+ if (myprocessor->processor_set->policies & POLICY_FIXEDPRI) {
+ if (csw_needed(thread,myprocessor)) {
+ ast_on(mycpu, AST_BLOCK);
+ break;
+ }
+ else {
+ /*
+ * For fixed priority threads, set first_quantum
+ * so entire new quantum is used.
+ */
+ if (thread->policy == POLICY_FIXEDPRI)
+ myprocessor->first_quantum = TRUE;
+ }
+ }
+ else {
+#endif /* MACH_FIXPRI */
+ rq = &(myprocessor->processor_set->runq);
+ if (!(myprocessor->first_quantum) && (rq->count > 0)) {
+ queue_t q;
+ /*
+ * This is not the first quantum, and there may
+ * be something in the processor_set runq.
+ * Check whether low hint is accurate.
+ */
+ q = rq->runq + *(volatile int *)&rq->low;
+ if (queue_empty(q)) {
+ int i;
+
+ /*
+ * Need to recheck and possibly update hint.
+ */
+ runq_lock(rq);
+ q = rq->runq + rq->low;
+ if (rq->count > 0) {
+ for (i = rq->low; i < NRQS; i++) {
+ if(!(queue_empty(q)))
+ break;
+ q++;
+ }
+ rq->low = i;
+ }
+ runq_unlock(rq);
+ }
+
+ if (rq->low <= thread->sched_pri) {
+ ast_on(mycpu, AST_BLOCK);
+ break;
+ }
+ }
+#if MACH_FIXPRI
+ }
+#endif /* MACH_FIXPRI */
+ break;
+
+ default:
+ panic("ast_check: Bad processor state (cpu %d processor %p) state: %d",
+ mycpu, myprocessor, myprocessor->state);
+ }
+
+ (void) splx(s);
+}
diff --git a/kern/ast.h b/kern/ast.h
new file mode 100644
index 0000000..aded167
--- /dev/null
+++ b/kern/ast.h
@@ -0,0 +1,139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * kern/ast.h: Definitions for Asynchronous System Traps.
+ */
+
+#ifndef _KERN_AST_H_
+#define _KERN_AST_H_
+
+/*
+ * A CPU takes an AST when it is about to return to user code.
+ * Instead of going back to user code, it calls ast_taken.
+ * Machine-dependent code is responsible for maintaining
+ * a set of reasons for an AST, and passing this set to ast_taken.
+ */
+
+#include <kern/kern_types.h>
+#include <kern/macros.h>
+#include <machine/ast.h>
+
+/*
+ * Bits for reasons
+ */
+
+#define AST_ZILCH 0x0
+#define AST_HALT 0x1
+#define AST_TERMINATE 0x2
+#define AST_BLOCK 0x4
+#define AST_NETWORK 0x8
+#define AST_NETIPC 0x10
+
+#define AST_SCHEDULING (AST_HALT|AST_TERMINATE|AST_BLOCK)
+
+/*
+ * Per-thread ASTs are reset at context-switch time.
+ * machine/ast.h can define MACHINE_AST_PER_THREAD.
+ */
+
+#ifndef MACHINE_AST_PER_THREAD
+#define MACHINE_AST_PER_THREAD 0
+#endif
+
+#define AST_PER_THREAD (AST_HALT | AST_TERMINATE | MACHINE_AST_PER_THREAD)
+
+typedef unsigned long ast_t;
+
+extern volatile ast_t need_ast[NCPUS];
+
+#ifdef MACHINE_AST
+/*
+ * machine/ast.h is responsible for defining aston and astoff.
+ */
+#else /* MACHINE_AST */
+
+#define aston(mycpu)
+#define astoff(mycpu)
+
+#endif /* MACHINE_AST */
+
+extern void ast_taken(void);
+
+/*
+ * ast_needed, ast_on, ast_off, ast_context, and ast_propagate
+ * assume splsched. mycpu is always cpu_number(). It is an
+ * argument in case cpu_number() is expensive.
+ */
+
+#define ast_needed(mycpu) need_ast[mycpu]
+
+#define ast_on(mycpu, reasons) \
+MACRO_BEGIN \
+ if ((need_ast[mycpu] |= (reasons)) != AST_ZILCH) \
+ { aston(mycpu); } \
+MACRO_END
+
+#define ast_off(mycpu, reasons) \
+MACRO_BEGIN \
+ if ((need_ast[mycpu] &= ~(reasons)) == AST_ZILCH) \
+ { astoff(mycpu); } \
+MACRO_END
+
+#define ast_propagate(thread, mycpu) ast_on((mycpu), (thread)->ast)
+
+#define ast_context(thread, mycpu) \
+MACRO_BEGIN \
+ if ((need_ast[mycpu] = \
+ (need_ast[mycpu] &~ AST_PER_THREAD) | (thread)->ast) \
+ != AST_ZILCH) \
+ { aston(mycpu); } \
+ else \
+ { astoff(mycpu); } \
+MACRO_END
+
+
+#define thread_ast_set(thread, reason) (thread)->ast |= (reason)
+#define thread_ast_clear(thread, reason) (thread)->ast &= ~(reason)
+#define thread_ast_clear_all(thread) (thread)->ast = AST_ZILCH
+
+/*
+ * NOTE: if thread is the current thread, thread_ast_set should
+ * be followed by ast_propagate().
+ */
+
+extern void ast_init (void);
+
+extern void ast_check (void);
+
+#if NCPUS > 1
+extern void init_ast_check(const processor_t processor);
+extern void cause_ast_check(const processor_t processor);
+#endif
+
+#endif /* _KERN_AST_H_ */
diff --git a/kern/atomic.h b/kern/atomic.h
new file mode 100644
index 0000000..00da164
--- /dev/null
+++ b/kern/atomic.h
@@ -0,0 +1,54 @@
+/* Copyright (C) 2017 Free Software Foundation, Inc.
+ Contributed by Agustina Arzille <avarzille@riseup.net>, 2017.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either
+ version 2 of the license, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _KERN_ATOMIC_H_
+#define _KERN_ATOMIC_H_ 1
+
+/* Atomically compare *PTR with EXP and set it to NVAL if they're equal.
+ * Evaluates to a boolean, indicating whether the comparison was successful.*/
+#define __atomic_cas_helper(ptr, exp, nval, mo) \
+ ({ \
+ typeof(exp) __e = (exp); \
+ __atomic_compare_exchange_n ((ptr), &__e, (nval), 0, \
+ __ATOMIC_##mo, __ATOMIC_RELAXED); \
+ })
+
+#define atomic_cas_acq(ptr, exp, nval) \
+ __atomic_cas_helper (ptr, exp, nval, ACQUIRE)
+
+#define atomic_cas_rel(ptr, exp, nval) \
+ __atomic_cas_helper (ptr, exp, nval, RELEASE)
+
+#define atomic_cas_seq(ptr, exp, nval) \
+ __atomic_cas_helper (ptr, exp, nval, SEQ_CST)
+
+/* Atomically exchange the value of *PTR with VAL, evaluating to
+ * its previous value. */
+#define __atomic_swap_helper(ptr, val, mo) \
+ __atomic_exchange_n ((ptr), (val), __ATOMIC_##mo)
+
+#define atomic_swap_acq(ptr, val) \
+ __atomic_swap_helper (ptr, val, ACQUIRE)
+
+#define atomic_swap_rel(ptr, val) \
+ __atomic_swap_helper (ptr, val, RELEASE)
+
+#define atomic_swap_seq(ptr, val) \
+ __atomic_swap_helper (ptr, val, SEQ_CST)
+
+#endif
diff --git a/kern/boot_script.c b/kern/boot_script.c
new file mode 100644
index 0000000..07ce4b3
--- /dev/null
+++ b/kern/boot_script.c
@@ -0,0 +1,791 @@
+/* Boot script parser for Mach. */
+
+/* Written by Shantanu Goel (goel@cs.columbia.edu). */
+
+#include <mach/mach_types.h>
+#include <string.h>
+#include <kern/printf.h>
+#include "boot_script.h"
+#include "bootstrap.h"
+
+
+/* This structure describes a symbol. */
+struct sym
+{
+ /* Symbol name. */
+ const char *name;
+
+ /* Type of value returned by function. */
+ int type;
+
+ /* Symbol value. */
+ long val;
+
+ /* For function symbols; type of value returned by function. */
+ int ret_type;
+
+ /* For function symbols; if set, execute function at the time
+ of command execution, not during parsing. A function with
+ this field set must also have `no_arg' set. Also, the function's
+ `val' argument will always be NULL. */
+ int run_on_exec;
+};
+
+/* Additional values symbols can take.
+ These are only used internally. */
+#define VAL_SYM 10 /* symbol table entry */
+#define VAL_FUNC 11 /* function pointer */
+
+/* This structure describes an argument. */
+struct arg
+{
+ /* Argument text copied verbatim. 0 if none. */
+ char *text;
+
+ /* Type of value assigned. 0 if none. */
+ int type;
+
+ /* Argument value. */
+ long val;
+};
+
+/* List of commands. */
+static struct cmd **cmds = 0;
+
+/* Amount allocated for `cmds'. */
+static int cmds_alloc = 0;
+
+/* Next available slot in `cmds'. */
+static int cmds_index = 0;
+
+/* Symbol table. */
+static struct sym **symtab = 0;
+
+/* Amount allocated for `symtab'. */
+static int symtab_alloc = 0;
+
+/* Next available slot in `symtab'. */
+static int symtab_index = 0;
+
+/* Create a task and suspend it. */
+static int
+create_task (struct cmd *cmd, long *val)
+{
+ int err = boot_script_task_create (cmd);
+ *val = (long) cmd->task;
+ return err;
+}
+
+/* Resume a task. */
+static int
+resume_task (struct cmd *cmd, const long *val)
+{
+ return boot_script_task_resume (cmd);
+}
+
+/* Resume a task when the user hits return. */
+static int
+prompt_resume_task (struct cmd *cmd, const long *val)
+{
+ return boot_script_prompt_task_resume (cmd);
+}
+
+/* List of builtin symbols. */
+static struct sym builtin_symbols[] =
+{
+ { "task-create", VAL_FUNC, (long) create_task, VAL_TASK, 0 },
+ { "task-resume", VAL_FUNC, (long) resume_task, VAL_NONE, 1 },
+ { "prompt-task-resume", VAL_FUNC, (long) prompt_resume_task, VAL_NONE, 1 },
+};
+#define NUM_BUILTIN (sizeof (builtin_symbols) / sizeof (builtin_symbols[0]))
+
+/* Free CMD and all storage associated with it.
+ If ABORTING is set, terminate the task associated with CMD,
+ otherwise just deallocate the send right. */
+static void
+free_cmd (struct cmd *cmd, int aborting)
+{
+ if (cmd->task)
+ boot_script_free_task (cmd->task, aborting);
+ if (cmd->args)
+ {
+ int i;
+ for (i = 0; i < cmd->args_index; i++)
+ boot_script_free (cmd->args[i], sizeof *cmd->args[i]);
+ boot_script_free (cmd->args, sizeof cmd->args[0] * cmd->args_alloc);
+ }
+ if (cmd->exec_funcs)
+ boot_script_free (cmd->exec_funcs,
+ sizeof cmd->exec_funcs[0] * cmd->exec_funcs_alloc);
+ boot_script_free (cmd, sizeof *cmd);
+}
+
+/* Free all storage allocated by the parser.
+ If ABORTING is set, terminate all tasks. */
+static void
+cleanup (int aborting)
+{
+ int i;
+
+ for (i = 0; i < cmds_index; i++)
+ free_cmd (cmds[i], aborting);
+ boot_script_free (cmds, sizeof cmds[0] * cmds_alloc);
+ cmds = 0;
+ cmds_index = cmds_alloc = 0;
+
+ for (i = 0; i < symtab_index; i++)
+ boot_script_free (symtab[i], sizeof *symtab[i]);
+ boot_script_free (symtab, sizeof symtab[0] * symtab_alloc);
+ symtab = 0;
+ symtab_index = symtab_alloc = 0;
+}
+
+/* Add PTR to the list of pointers PTR_LIST, which
+ currently has ALLOC amount of space allocated to it, and
+ whose next available slot is INDEX. If more space
+ needs to to allocated, INCR is the amount by which
+ to increase it. Return 0 on success, non-zero otherwise. */
+static int
+add_list (void *ptr, void ***ptr_list, int *alloc, int *index, int incr)
+{
+ if (*index == *alloc)
+ {
+ void **p;
+
+ *alloc += incr;
+ p = boot_script_malloc (*alloc * sizeof (void *));
+ if (! p)
+ {
+ *alloc -= incr;
+ return 1;
+ }
+ if (*ptr_list)
+ {
+ memcpy (p, *ptr_list, *index * sizeof (void *));
+ boot_script_free (*ptr_list, (*alloc - incr) * sizeof (void *));
+ }
+ *ptr_list = p;
+ }
+ *(*ptr_list + *index) = ptr;
+ *index += 1;
+ return 0;
+}
+
+/* Create an argument with TEXT, value type TYPE, and value VAL.
+ Add the argument to the argument list of CMD. */
+static struct arg *
+add_arg (struct cmd *cmd, char *text, int type, long val)
+{
+ struct arg *arg;
+
+ arg = boot_script_malloc (sizeof (struct arg));
+ if (arg)
+ {
+ arg->text = text;
+ arg->type = type;
+ arg->val = val;
+ if (add_list (arg, (void ***) &cmd->args,
+ &cmd->args_alloc, &cmd->args_index, 5))
+ {
+ boot_script_free (arg, sizeof *arg);
+ return 0;
+ }
+ }
+ return arg;
+}
+
+/* Search for the symbol NAME in the symbol table. */
+static struct sym *
+sym_lookup (const char *name)
+{
+ int i;
+
+ for (i = 0; i < symtab_index; i++)
+ if (! strcmp (name, symtab[i]->name))
+ return symtab[i];
+ return 0;
+}
+
+/* Create an entry for symbol NAME in the symbol table. */
+static struct sym *
+sym_enter (const char *name)
+{
+ struct sym *sym;
+
+ sym = boot_script_malloc (sizeof (struct sym));
+ if (sym)
+ {
+ memset (sym, 0, sizeof (struct sym));
+ sym->name = name;
+ if (add_list (sym, (void ***) &symtab, &symtab_alloc, &symtab_index, 20))
+ {
+ boot_script_free (sym, sizeof *sym);
+ return 0;
+ }
+ }
+ return sym;
+}
+
+/* Parse the command line CMDLINE. */
+int
+boot_script_parse_line (void *hook, char *cmdline)
+{
+ char *p, *q;
+ int error;
+ struct cmd *cmd;
+ struct arg *arg;
+
+ /* Extract command name. Ignore line if it lacks a command. */
+ for (p = cmdline; *p == ' ' || *p == '\t'; p++)
+ ;
+ if (*p == '#')
+ /* Ignore comment line. */
+ return 0;
+
+#if 0
+ if (*p && *p != ' ' && *p != '\t' && *p != '\n')
+ {
+ printf ("(bootstrap): %s\n", cmdline);
+ }
+#endif
+
+ for (q = p; *q && *q != ' ' && *q != '\t' && *q != '\n'; q++)
+ ;
+ if (p == q)
+ return 0;
+
+ *q = '\0';
+
+ /* Allocate a command structure. */
+ cmd = boot_script_malloc (sizeof (struct cmd));
+ if (! cmd)
+ return BOOT_SCRIPT_NOMEM;
+ memset (cmd, 0, sizeof (struct cmd));
+ cmd->hook = hook;
+ cmd->path = p;
+ p = q + 1;
+
+ for (arg = 0;;)
+ {
+ if (! arg)
+ {
+ /* Skip whitespace. */
+ while (*p == ' ' || *p == '\t')
+ p++;
+
+ /* End of command line. */
+ if (! *p || *p == '\n')
+ {
+ /* Add command to list. */
+ if (add_list (cmd, (void ***) &cmds,
+ &cmds_alloc, &cmds_index, 10))
+ {
+ error = BOOT_SCRIPT_NOMEM;
+ goto bad;
+ }
+ return 0;
+ }
+ }
+
+ /* Look for a symbol. */
+ if (arg || (*p == '$' && (*(p + 1) == '{' || *(p + 1) == '(')))
+ {
+ char end_char = (*(p + 1) == '{') ? '}' : ')';
+ struct sym *sym = 0;
+
+ for (p += 2;;)
+ {
+ char c;
+ unsigned i;
+ int type;
+ long val;
+ struct sym *s;
+
+ /* Parse symbol name. */
+ for (q = p; *q && *q != '\n' && *q != end_char && *q != '='; q++)
+ ;
+ if (p == q || ! *q || *q == '\n'
+ || (end_char == '}' && *q != '}'))
+ {
+ error = BOOT_SCRIPT_SYNTAX_ERROR;
+ goto bad;
+ }
+ c = *q;
+ *q = '\0';
+
+ /* See if this is a builtin symbol. */
+ for (i = 0; i < NUM_BUILTIN; i++)
+ if (! strcmp (p, builtin_symbols[i].name))
+ break;
+
+ if (i < NUM_BUILTIN)
+ s = &builtin_symbols[i];
+ else
+ {
+ /* Look up symbol in symbol table.
+ If no entry exists, create one. */
+ s = sym_lookup (p);
+ if (! s)
+ {
+ s = sym_enter (p);
+ if (! s)
+ {
+ error = BOOT_SCRIPT_NOMEM;
+ goto bad;
+ }
+ }
+ }
+
+ /* Only values are allowed in ${...} constructs. */
+ if (end_char == '}' && s->type == VAL_FUNC)
+ return BOOT_SCRIPT_INVALID_SYM;
+
+ /* Check that assignment is valid. */
+ if (c == '=' && s->type == VAL_FUNC)
+ {
+ error = BOOT_SCRIPT_INVALID_ASG;
+ goto bad;
+ }
+
+ /* For function symbols, execute the function. */
+ if (s->type == VAL_FUNC)
+ {
+ if (! s->run_on_exec)
+ {
+ (error
+ = ((*((int (*) (struct cmd *, long *)) s->val))
+ (cmd, &val)));
+ if (error)
+ goto bad;
+ type = s->ret_type;
+ }
+ else
+ {
+ if (add_list (s, (void ***) &cmd->exec_funcs,
+ &cmd->exec_funcs_alloc,
+ &cmd->exec_funcs_index, 5))
+ {
+ error = BOOT_SCRIPT_NOMEM;
+ goto bad;
+ }
+ type = VAL_NONE;
+ goto out;
+ }
+ }
+ else if (s->type == VAL_NONE)
+ {
+ type = VAL_SYM;
+ val = (long) s;
+ }
+ else
+ {
+ type = s->type;
+ val = s->val;
+ }
+
+ if (sym)
+ {
+ sym->type = type;
+ sym->val = val;
+ }
+ else if (arg)
+ {
+ arg->type = type;
+ arg->val = val;
+ }
+
+ out:
+ p = q + 1;
+ if (c == end_char)
+ {
+ /* Create an argument if necessary.
+ We create an argument if the symbol appears
+ in the expression by itself.
+
+ NOTE: This is temporary till the boot filesystem
+ servers support arguments. When that happens,
+ symbol values will only be printed if they're
+ associated with an argument. */
+ if (! arg && end_char == '}')
+ {
+ if (! add_arg (cmd, 0, type, val))
+ {
+ error = BOOT_SCRIPT_NOMEM;
+ goto bad;
+ }
+ }
+ arg = 0;
+ break;
+ }
+ if (s->type != VAL_FUNC)
+ sym = s;
+ }
+ }
+ else
+ {
+ char c;
+
+ /* Command argument; just copy the text. */
+ for (q = p;; q++)
+ {
+ if (! *q || *q == ' ' || *q == '\t' || *q == '\n')
+ break;
+ if (*q == '$' && *(q + 1) == '{')
+ break;
+ }
+ c = *q;
+ *q = '\0';
+
+ /* Add argument to list. */
+ arg = add_arg (cmd, p, VAL_NONE, 0);
+ if (! arg)
+ {
+ error = BOOT_SCRIPT_NOMEM;
+ goto bad;
+ }
+ if (c == '$')
+ p = q;
+ else
+ {
+ if (c)
+ p = q + 1;
+ else
+ p = q;
+ arg = 0;
+ }
+ }
+ }
+
+
+ bad:
+ free_cmd (cmd, 1);
+ cleanup (1);
+ return error;
+}
+
+/* Ensure that the command line buffer can accommodate LEN bytes of space. */
+#define CHECK_CMDLINE_LEN(len) \
+{ \
+ if (cmdline_alloc - cmdline_index < len) \
+ { \
+ char *ptr; \
+ int alloc, i; \
+ alloc = cmdline_alloc + len - (cmdline_alloc - cmdline_index) + 100; \
+ ptr = boot_script_malloc (alloc); \
+ if (! ptr) \
+ { \
+ error = BOOT_SCRIPT_NOMEM; \
+ goto done; \
+ } \
+ memcpy (ptr, cmdline, cmdline_index); \
+ for (i = 0; i < argc; ++i) \
+ argv[i] = ptr + (argv[i] - cmdline); \
+ boot_script_free (cmdline, cmdline_alloc); \
+ cmdline = ptr; \
+ cmdline_alloc = alloc; \
+ } \
+}
+
+/* Execute commands previously parsed. */
+int
+boot_script_exec (void)
+{
+ int cmd_index;
+
+ for (cmd_index = 0; cmd_index < cmds_index; cmd_index++)
+ {
+ char **argv, *cmdline;
+ int i, argc, cmdline_alloc;
+ int cmdline_index, error, arg_index;
+ struct cmd *cmd = cmds[cmd_index];
+
+ /* Skip command if it doesn't have an associated task. */
+ if (cmd->task == 0)
+ continue;
+
+ /* Allocate a command line and copy command name. */
+ cmdline_index = strlen (cmd->path) + 1;
+ cmdline_alloc = cmdline_index + 100;
+ cmdline = boot_script_malloc (cmdline_alloc);
+ if (! cmdline)
+ {
+ cleanup (1);
+ return BOOT_SCRIPT_NOMEM;
+ }
+ memcpy (cmdline, cmd->path, cmdline_index);
+
+ /* Allocate argument vector. */
+ argv = boot_script_malloc (sizeof (char *) * (cmd->args_index + 2));
+ if (! argv)
+ {
+ boot_script_free (cmdline, cmdline_alloc);
+ cleanup (1);
+ return BOOT_SCRIPT_NOMEM;
+ }
+ argv[0] = cmdline;
+ argc = 1;
+
+ /* Build arguments. */
+ for (arg_index = 0; arg_index < cmd->args_index; arg_index++)
+ {
+ struct arg *arg = cmd->args[arg_index];
+
+ /* Copy argument text. */
+ if (arg->text)
+ {
+ int len = strlen (arg->text);
+
+ if (arg->type == VAL_NONE)
+ len++;
+ CHECK_CMDLINE_LEN (len);
+ memcpy (cmdline + cmdline_index, arg->text, len);
+ argv[argc++] = &cmdline[cmdline_index];
+ cmdline_index += len;
+ }
+
+ /* Add value of any symbol associated with this argument. */
+ if (arg->type != VAL_NONE)
+ {
+ char *p, buf[50];
+ int len;
+ mach_port_name_t name;
+
+ if (arg->type == VAL_SYM)
+ {
+ struct sym *sym = (struct sym *) arg->val;
+
+ /* Resolve symbol value. */
+ while (sym->type == VAL_SYM)
+ sym = (struct sym *) sym->val;
+ if (sym->type == VAL_NONE)
+ {
+ error = BOOT_SCRIPT_UNDEF_SYM;
+ printf("bootstrap script missing symbol '%s'\n", sym->name);
+ goto done;
+ }
+ arg->type = sym->type;
+ arg->val = sym->val;
+ }
+
+ /* Print argument value. */
+ switch (arg->type)
+ {
+ case VAL_STR:
+ p = (char *) arg->val;
+ len = strlen (p);
+ break;
+
+ case VAL_TASK:
+ case VAL_PORT:
+ if (arg->type == VAL_TASK)
+ /* Insert send right to task port. */
+ error = boot_script_insert_task_port
+ (cmd, (task_t) arg->val, &name);
+ else
+ /* Insert send right. */
+ error = boot_script_insert_right (cmd,
+ (mach_port_t) arg->val,
+ &name);
+ if (error)
+ goto done;
+
+ i = name;
+ p = buf + sizeof (buf);
+ len = 0;
+ do
+ {
+ *--p = i % 10 + '0';
+ len++;
+ }
+ while (i /= 10);
+ break;
+
+ default:
+ error = BOOT_SCRIPT_BAD_TYPE;
+ goto done;
+ }
+ len++;
+ CHECK_CMDLINE_LEN (len);
+ memcpy (cmdline + cmdline_index, p, len - 1);
+ *(cmdline + cmdline_index + len - 1) = '\0';
+ if (! arg->text)
+ argv[argc++] = &cmdline[cmdline_index];
+ cmdline_index += len;
+ }
+ }
+
+ /* Terminate argument vector. */
+ argv[argc] = 0;
+
+ /* Execute the command. */
+ if (boot_script_exec_cmd (cmd->hook, cmd->task, cmd->path,
+ argc, argv, cmdline, cmdline_index))
+ {
+ error = BOOT_SCRIPT_EXEC_ERROR;
+ goto done;
+ }
+
+ error = 0;
+
+ done:
+ boot_script_free (cmdline, cmdline_alloc);
+ boot_script_free (argv, sizeof (char *) * (cmd->args_index + 2));
+ if (error)
+ {
+ cleanup (1);
+ return error;
+ }
+ }
+
+ for (cmd_index = 0; cmd_index < cmds_index; cmd_index++)
+ {
+ int i;
+ struct cmd *cmd = cmds[cmd_index];
+
+ /* Execute functions that want to be run on exec. */
+ for (i = 0; i < cmd->exec_funcs_index; i++)
+ {
+ struct sym *sym = cmd->exec_funcs[i];
+ int error = ((*((int (*) (struct cmd *, int *)) sym->val))
+ (cmd, 0));
+ if (error)
+ {
+ cleanup (1);
+ return error;
+ }
+ }
+ }
+
+ cleanup (0);
+ return 0;
+}
+
+/* Create an entry for the variable NAME with TYPE and value VAL,
+ in the symbol table. */
+int
+boot_script_set_variable (const char *name, int type, long val)
+{
+ struct sym *sym = sym_enter (name);
+
+ if (sym)
+ {
+ sym->type = type;
+ sym->val = val;
+ }
+ return sym ? 0 : 1;
+}
+
+
+/* Define the function NAME, which will return type RET_TYPE. */
+int
+boot_script_define_function (const char *name, int ret_type,
+ int (*func) (const struct cmd *cmd, int *val))
+{
+ struct sym *sym = sym_enter (name);
+
+ if (sym)
+ {
+ sym->type = VAL_FUNC;
+ sym->val = (long) func;
+ sym->ret_type = ret_type;
+ sym->run_on_exec = ret_type == VAL_NONE;
+ }
+ return sym ? 0 : 1;
+}
+
+
+/* Return a string describing ERR. */
+char *
+boot_script_error_string (int err)
+{
+ switch (err)
+ {
+ case BOOT_SCRIPT_NOMEM:
+ return "no memory";
+
+ case BOOT_SCRIPT_SYNTAX_ERROR:
+ return "syntax error";
+
+ case BOOT_SCRIPT_INVALID_ASG:
+ return "invalid variable in assignment";
+
+ case BOOT_SCRIPT_MACH_ERROR:
+ return "mach error";
+
+ case BOOT_SCRIPT_UNDEF_SYM:
+ return "undefined symbol";
+
+ case BOOT_SCRIPT_EXEC_ERROR:
+ return "exec error";
+
+ case BOOT_SCRIPT_INVALID_SYM:
+ return "invalid variable in expression";
+
+ case BOOT_SCRIPT_BAD_TYPE:
+ return "invalid value type";
+ }
+ return 0;
+}
+
+#ifdef BOOT_SCRIPT_TEST
+#include <stdio.h>
+
+int
+boot_script_exec_cmd (void *hook,
+ mach_port_t task, char *path, int argc,
+ char **argv, char *strings, int stringlen)
+{
+ int i;
+
+ printf ("port = %d: ", (int) task);
+ for (i = 0; i < argc; i++)
+ printf ("%s ", argv[i]);
+ printf ("\n");
+ return 0;
+}
+
+void
+main (int argc, char **argv)
+{
+ char buf[500], *p;
+ int len;
+ FILE *fp;
+ mach_port_name_t host_port, device_port;
+
+ if (argc < 2)
+ {
+ fprintf (stderr, "Usage: %s <script>\n", argv[0]);
+ exit (1);
+ }
+ fp = fopen (argv[1], "r");
+ if (! fp)
+ {
+ fprintf (stderr, "Can't open %s\n", argv[1]);
+ exit (1);
+ }
+ host_port = 1;
+ device_port = 2;
+ boot_script_set_variable ("host-port", VAL_PORT, (int) host_port);
+ boot_script_set_variable ("device-port", VAL_PORT, (int) device_port);
+ boot_script_set_variable ("root-device", VAL_STR, (int) "hd0a");
+ boot_script_set_variable ("boot-args", VAL_STR, (int) "-ad");
+ p = buf;
+ len = sizeof (buf);
+ while (fgets (p, len, fp))
+ {
+ int i, err;
+
+ i = strlen (p) + 1;
+ err = boot_script_parse_line (0, p);
+ if (err)
+ {
+ fprintf (stderr, "error %s\n", boot_script_error_string (err));
+ exit (1);
+ }
+ p += i;
+ len -= i;
+ }
+ boot_script_exec ();
+ exit (0);
+}
+#endif /* BOOT_SCRIPT_TEST */
diff --git a/kern/boot_script.h b/kern/boot_script.h
new file mode 100644
index 0000000..d1f968d
--- /dev/null
+++ b/kern/boot_script.h
@@ -0,0 +1,111 @@
+/* Definitions for boot script parser for Mach. */
+
+#ifndef _boot_script_h
+#define _boot_script_h
+
+/* Written by Shantanu Goel (goel@cs.columbia.edu). */
+
+/* Error codes returned by boot_script_parse_line()
+ and boot_script_exec_cmd(). */
+#define BOOT_SCRIPT_NOMEM 1
+#define BOOT_SCRIPT_SYNTAX_ERROR 2
+#define BOOT_SCRIPT_INVALID_ASG 3
+#define BOOT_SCRIPT_MACH_ERROR 4
+#define BOOT_SCRIPT_UNDEF_SYM 5
+#define BOOT_SCRIPT_EXEC_ERROR 6
+#define BOOT_SCRIPT_INVALID_SYM 7
+#define BOOT_SCRIPT_BAD_TYPE 8
+
+/* Legal values for argument `type' to function
+ boot_script_set_variable and boot_script_define_function. */
+#define VAL_NONE 0 /* none -- function runs at exec time */
+#define VAL_STR 1 /* string */
+#define VAL_PORT 2 /* port */
+#define VAL_TASK 3 /* task port */
+
+/* This structure describes a command. */
+struct cmd
+{
+ /* Cookie passed in to boot_script_parse_line. */
+ void *hook;
+
+ /* Path of executable. */
+ char *path;
+
+ /* Task port. */
+ task_t task;
+
+ /* Argument list. */
+ struct arg **args;
+
+ /* Amount allocated for `args'. */
+ int args_alloc;
+
+ /* Next available slot in `args'. */
+ int args_index;
+
+ /* List of functions that want to be run on command execution. */
+ struct sym **exec_funcs;
+
+ /* Amount allocated for `exec_funcs'. */
+ int exec_funcs_alloc;
+
+ /* Next available slot in `exec_funcs'. */
+ int exec_funcs_index;
+};
+
+
+/* The user must define these functions, we work like malloc and free. */
+void *boot_script_malloc (unsigned int);
+void boot_script_free (void *, unsigned int);
+
+/* The user must define this function. Load the image of the
+ executable specified by PATH in TASK. Create a thread
+ in TASK and point it at the executable's entry point. Initialize
+ TASK's stack with argument vector ARGV of length ARGC whose
+ strings are STRINGS. STRINGS has length STRINGLEN.
+ Return 0 for success, non-zero otherwise. */
+int boot_script_exec_cmd (void *hook,
+ task_t task, char *path, int argc,
+ char **argv, char *strings, int stringlen);
+
+/* The user must define this functions to perform the corresponding
+ Mach task manipulations. */
+int boot_script_task_create (struct cmd *); /* task_create + task_suspend */
+int boot_script_task_resume (struct cmd *);
+int boot_script_prompt_task_resume (struct cmd *);
+int boot_script_insert_right (struct cmd *, mach_port_t, mach_port_name_t *namep);
+int boot_script_insert_task_port (struct cmd *, task_t, mach_port_name_t *namep);
+
+/* The user must define this function to clean up the `task_t'
+ returned by boot_script_task_create. */
+void boot_script_free_task (task_t task, int aborting);
+
+
+/* Parse the command line LINE. This causes the command line to be
+ converted into an internal format. Returns 0 for success, non-zero
+ otherwise.
+
+ NOTE: The parser writes into the line so it must not be a string constant.
+ It is also the responsibility of the caller not to deallocate the line
+ across calls to the parser. */
+int boot_script_parse_line (void *hook, char *cmdline);
+
+/* Execute the command lines prevously parsed.
+ Returns 0 for success, non-zero otherwise. */
+int boot_script_exec (void);
+
+/* Create an entry in the symbol table for variable NAME,
+ whose type is TYPE and value is VAL. Returns 0 on success,
+ non-zero otherwise. */
+int boot_script_set_variable (const char *name, int type, long val);
+
+/* Define the function NAME, which will return type RET_TYPE. */
+int boot_script_define_function (const char *name, int ret_type,
+ int (*func) (const struct cmd *cmd, int *val));
+
+/* Returns a string describing the error ERR. */
+char *boot_script_error_string (int err);
+
+
+#endif /* _boot_script_h */
diff --git a/kern/bootstrap.c b/kern/bootstrap.c
new file mode 100644
index 0000000..49358ac
--- /dev/null
+++ b/kern/bootstrap.c
@@ -0,0 +1,918 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1989 Carnegie Mellon University.
+ * Copyright (c) 1995-1993 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Bootstrap the various built-in servers.
+ */
+
+#include <alloca.h>
+#include <string.h>
+
+#include <mach/port.h>
+#include <mach/message.h>
+#include <machine/locore.h>
+#include <machine/vm_param.h>
+#include <machine/pcb.h>
+#include <ipc/ipc_port.h>
+#include <ipc/mach_port.server.h>
+#include <kern/bootstrap.h>
+#include <kern/debug.h>
+#include <kern/host.h>
+#include <kern/printf.h>
+#include <kern/kalloc.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/lock.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <vm/pmap.h>
+#include <device/device_port.h>
+
+#if MACH_KDB
+#include <machine/db_machdep.h>
+#include <ddb/db_sym.h>
+#endif
+
+#if OSKIT_MACH
+#include <stddef.h>
+#include <oskit/machine/base_multiboot.h>
+#include <oskit/exec/exec.h>
+#include <oskit/c/stdio.h>
+#define safe_gets(s, n) fgets((s),(n),stdin)
+#else
+#include <mach/machine/multiboot.h>
+#include <mach/exec/exec.h>
+#ifdef MACH_XEN
+#include <mach/xen.h>
+extern struct start_info boot_info; /* XXX put this in a header! */
+#else /* MACH_XEN */
+extern struct multiboot_raw_info boot_info; /* XXX put this in a header! */
+#endif /* MACH_XEN */
+#endif
+
+#include "boot_script.h"
+
+
+static mach_port_name_t boot_device_port; /* local name */
+static mach_port_name_t boot_host_port; /* local name */
+
+extern char *kernel_cmdline;
+
+static void user_bootstrap(void); /* forward */
+static void user_bootstrap_compat(void); /* forward */
+static void bootstrap_exec_compat(void *exec_data); /* forward */
+static void get_compat_strings(char *flags_str, char *root_str); /* forward */
+
+static mach_port_name_t
+task_insert_send_right(
+ task_t task,
+ ipc_port_t port)
+{
+ mach_port_name_t name;
+
+ for (name = 1;; name++) {
+ kern_return_t kr;
+
+ kr = mach_port_insert_right(task->itk_space, name,
+ port, MACH_MSG_TYPE_PORT_SEND);
+ if (kr == KERN_SUCCESS)
+ break;
+ assert(kr == KERN_NAME_EXISTS);
+ }
+
+ return name;
+}
+
+static void
+free_bootstrap_pages(phys_addr_t start, phys_addr_t end)
+{
+ struct vm_page *page;
+
+ while (start < end)
+ {
+ page = vm_page_lookup_pa(start);
+ assert(page != NULL);
+ vm_page_manage(page);
+ start += PAGE_SIZE;
+ }
+}
+
+void bootstrap_create(void)
+{
+ int compat;
+ unsigned n = 0;
+#ifdef MACH_XEN
+#ifdef __x86_64__ // 32_ON_64 actually
+ struct multiboot32_module *bmods32 = (struct multiboot32_module *)
+ boot_info.mod_start;
+ struct multiboot_module *bmods;
+ if (bmods32) {
+ int i;
+ for (n = 0; bmods32[n].mod_start; n++)
+ ;
+ bmods = alloca(n * sizeof(*bmods));
+ for (i = 0; i < n ; i++)
+ {
+ bmods[i].mod_start = kvtophys(bmods32[i].mod_start + (vm_offset_t) bmods32);
+ bmods[i].mod_end = kvtophys(bmods32[i].mod_end + (vm_offset_t) bmods32);
+ bmods[i].string = kvtophys(bmods32[i].string + (vm_offset_t) bmods32);
+ }
+ }
+#else
+ struct multiboot_module *bmods = (struct multiboot_module *)
+ boot_info.mod_start;
+ if (bmods)
+ for (n = 0; bmods[n].mod_start; n++) {
+ bmods[n].mod_start = kvtophys(bmods[n].mod_start + (vm_offset_t) bmods);
+ bmods[n].mod_end = kvtophys(bmods[n].mod_end + (vm_offset_t) bmods);
+ bmods[n].string = kvtophys(bmods[n].string + (vm_offset_t) bmods);
+ }
+#endif
+ boot_info.mods_count = n;
+ boot_info.flags |= MULTIBOOT_MODS;
+#else /* MACH_XEN */
+#ifdef __x86_64__
+ struct multiboot_raw_module *bmods32 = ((struct multiboot_raw_module *)
+ phystokv(boot_info.mods_addr));
+ struct multiboot_module *bmods=NULL;
+ if (bmods32)
+ {
+ int i;
+ bmods = alloca(boot_info.mods_count * sizeof(*bmods));
+ for (i=0; i<boot_info.mods_count; i++)
+ {
+ bmods[i].mod_start = bmods32[i].mod_start;
+ bmods[i].mod_end = bmods32[i].mod_end;
+ bmods[i].string = bmods32[i].string;
+ }
+ }
+#else
+ struct multiboot_module *bmods = ((struct multiboot_module *)
+ phystokv(boot_info.mods_addr));
+#endif
+#endif /* MACH_XEN */
+ if (!(boot_info.flags & MULTIBOOT_MODS)
+ || (boot_info.mods_count == 0))
+ panic ("No bootstrap code loaded with the kernel!");
+
+ compat = boot_info.mods_count == 1;
+ if (compat)
+ {
+ char *p = strchr((char*)phystokv(bmods[0].string), ' ');
+ if (p != 0)
+ do
+ ++p;
+ while (*p == ' ' || *p == '\n');
+ compat = p == 0 || *p == '\0';
+ }
+
+ if (compat)
+ {
+ printf("Loading single multiboot module in compat mode: %s\n",
+ (char*)phystokv(bmods[0].string));
+ bootstrap_exec_compat(&bmods[0]);
+ }
+ else
+ {
+ unsigned i;
+ int losers;
+
+ /* Initialize boot script variables. We leak these send rights. */
+ losers = boot_script_set_variable
+ ("host-port", VAL_PORT,
+ (long) realhost.host_priv_self);
+ if (losers)
+ panic ("cannot set boot-script variable host-port: %s",
+ boot_script_error_string (losers));
+ losers = boot_script_set_variable
+ ("device-port", VAL_PORT,
+ (long) master_device_port);
+ if (losers)
+ panic ("cannot set boot-script variable device-port: %s",
+ boot_script_error_string (losers));
+ losers = boot_script_set_variable
+ ("kernel-task", VAL_PORT,
+ (long) kernel_task->itk_self);
+ if (losers)
+ panic ("cannot set boot-script variable kernel-task: %s",
+ boot_script_error_string (losers));
+
+ losers = boot_script_set_variable ("kernel-command-line", VAL_STR,
+ (long) kernel_cmdline);
+ if (losers)
+ panic ("cannot set boot-script variable %s: %s",
+ "kernel-command-line", boot_script_error_string (losers));
+
+ {
+ /* Set the same boot script variables that the old Hurd's
+ serverboot did, so an old Hurd and boot script previously
+ used with serverboot can be used directly with this kernel. */
+
+ char *flag_string = alloca(1024);
+ char *root_string = alloca(1024);
+
+ /*
+ * Get the (compatibility) boot flags and root name strings.
+ */
+ get_compat_strings(flag_string, root_string);
+
+ losers = boot_script_set_variable ("boot-args", VAL_STR,
+ (long) flag_string);
+ if (losers)
+ panic ("cannot set boot-script variable %s: %s",
+ "boot-args", boot_script_error_string (losers));
+ losers = boot_script_set_variable ("root-device", VAL_STR,
+ (long) root_string);
+ if (losers)
+ panic ("cannot set boot-script variable %s: %s",
+ "root-device", boot_script_error_string (losers));
+ }
+
+#if OSKIT_MACH
+ {
+ /* The oskit's "environ" array contains all the words from
+ the multiboot command line that looked like VAR=VAL.
+ We set each of these as boot-script variables, which
+ can be used for things like ${root}. */
+
+ extern char **environ;
+ char **ep;
+ for (ep = environ; *ep != 0; ++ep)
+ {
+ size_t len = strlen (*ep) + 1;
+ char *var = memcpy (alloca (len), *ep, len);
+ char *val = strchr (var, '=');
+ *val++ = '\0';
+ losers = boot_script_set_variable (var, VAL_STR, (long) val);
+ if (losers)
+ panic ("cannot set boot-script variable %s: %s",
+ var, boot_script_error_string (losers));
+ }
+ }
+#else /* GNUmach, not oskit-mach */
+ {
+ /* Turn each `FOO=BAR' word in the command line into a boot script
+ variable ${FOO} with value BAR. This matches what we get from
+ oskit's environ in the oskit-mach case (above). */
+
+ int len = strlen (kernel_cmdline) + 1;
+ char *s = memcpy (alloca (len), kernel_cmdline, len);
+ char *word;
+ while ((word = strsep (&s, " \t")) != 0)
+ {
+ char *eq = strchr (word, '=');
+ if (eq == 0)
+ continue;
+ *eq++ = '\0';
+ losers = boot_script_set_variable (word, VAL_STR, (long) eq);
+ if (losers)
+ panic ("cannot set boot-script variable %s: %s",
+ word, boot_script_error_string (losers));
+ }
+ }
+#endif
+
+ for (i = 0; i < boot_info.mods_count; ++i)
+ {
+ int err;
+ char *line = (char*)phystokv(bmods[i].string);
+ printf ("module %d: %s\n", i, line);
+ err = boot_script_parse_line (&bmods[i], line);
+ if (err)
+ {
+ printf ("\n\tERROR: %s", boot_script_error_string (err));
+ ++losers;
+ }
+ }
+ printf ("%d multiboot modules\n", i);
+ if (losers)
+ panic ("%d of %d boot script commands could not be parsed",
+ losers, boot_info.mods_count);
+ losers = boot_script_exec ();
+ if (losers)
+ panic ("ERROR in executing boot script: %s",
+ boot_script_error_string (losers));
+ }
+ /* XXX we could free the memory used
+ by the boot loader's descriptors and such. */
+ for (n = 0; n < boot_info.mods_count; n++)
+ free_bootstrap_pages(bmods[n].mod_start, bmods[n].mod_end);
+}
+
+static void
+bootstrap_exec_compat(void *e)
+{
+ task_t bootstrap_task;
+ thread_t bootstrap_thread;
+
+ /*
+ * Create the bootstrap task.
+ */
+
+ (void) task_create(TASK_NULL, FALSE, &bootstrap_task);
+ (void) thread_create(bootstrap_task, &bootstrap_thread);
+
+ /*
+ * Insert send rights to the master host and device ports.
+ */
+
+ boot_host_port =
+ task_insert_send_right(bootstrap_task,
+ ipc_port_make_send(realhost.host_priv_self));
+
+ boot_device_port =
+ task_insert_send_right(bootstrap_task,
+ ipc_port_make_send(master_device_port));
+
+ /*
+ * Start the bootstrap thread.
+ */
+ bootstrap_thread->saved.other = e;
+ thread_start(bootstrap_thread, user_bootstrap_compat);
+ (void) thread_resume(bootstrap_thread);
+}
+
+/*
+ * The following code runs as the kernel mode portion of the
+ * first user thread.
+ */
+
+/*
+ * Convert an unsigned integer to its decimal representation.
+ */
+static void
+itoa(
+ char *str,
+ vm_size_t num)
+{
+ char buf[sizeof(vm_size_t)*2+3];
+ char *np;
+
+ np = buf + sizeof(buf);
+ *--np = 0;
+
+ do {
+ *--np = '0' + num % 10;
+ num /= 10;
+ } while (num != 0);
+
+ strcpy(str, np);
+}
+
+/*
+ * Collect the boot flags into a single argument string,
+ * for compatibility with existing bootstrap and startup code.
+ * Format as a standard flag argument: '-qsdn...'
+ */
+static void get_compat_strings(char *flags_str, char *root_str)
+{
+ char *ip, *cp;
+
+ strcpy (root_str, "UNKNOWN");
+
+ cp = flags_str;
+ *cp++ = '-';
+
+ for (ip = kernel_cmdline; *ip; )
+ {
+ if (*ip == ' ')
+ {
+ ip++;
+ }
+ else if (*ip == '-')
+ {
+ ip++;
+ while (*ip > ' ')
+ *cp++ = *ip++;
+ }
+ else if (strncmp(ip, "root=", 5) == 0)
+ {
+ char *rp = root_str;
+
+ ip += 5;
+ if (strncmp(ip, "/dev/", 5) == 0)
+ ip += 5;
+ while (*ip > ' ')
+ *rp++ = *ip++;
+ *rp = '\0';
+ }
+ else
+ {
+ while (*ip > ' ')
+ ip++;
+ }
+ }
+
+ if (cp == &flags_str[1]) /* no flags */
+ *cp++ = 'x';
+ *cp = '\0';
+}
+
+#if 0
+/*
+ * Copy boot_data (executable) to the user portion of this task.
+ */
+static boolean_t load_protect_text = TRUE;
+#if MACH_KDB
+ /* if set, fault in the text segment */
+static boolean_t load_fault_in_text = TRUE;
+#endif
+
+static vm_offset_t
+boot_map(
+ void * data, /* private data */
+ vm_offset_t offset) /* offset to map */
+{
+ vm_offset_t start_offset = (vm_offset_t) data;
+
+ return pmap_extract(kernel_pmap, start_offset + offset);
+}
+
+
+#if BOOTSTRAP_SYMBOLS
+static boolean_t load_bootstrap_symbols = TRUE;
+#else
+static boolean_t load_bootstrap_symbols = FALSE;
+#endif
+#endif
+
+
+
+static int
+boot_read(void *handle, vm_offset_t file_ofs, void *buf, vm_size_t size,
+ vm_size_t *out_actual)
+{
+ struct multiboot_module *mod = handle;
+
+ if (mod->mod_start + file_ofs + size > mod->mod_end)
+ return -1;
+
+ memcpy(buf, (const char*) phystokv (mod->mod_start) + file_ofs, size);
+ *out_actual = size;
+ return 0;
+}
+
+static int
+read_exec(void *handle, vm_offset_t file_ofs, vm_size_t file_size,
+ vm_offset_t mem_addr, vm_size_t mem_size,
+ exec_sectype_t sec_type)
+{
+ struct multiboot_module *mod = handle;
+
+ vm_map_t user_map = current_task()->map;
+ vm_offset_t start_page, end_page;
+ vm_prot_t mem_prot = sec_type & EXEC_SECTYPE_PROT_MASK;
+ int err;
+
+ if (mod->mod_start + file_ofs + file_size > mod->mod_end)
+ return -1;
+
+ if (!(sec_type & EXEC_SECTYPE_ALLOC))
+ return 0;
+
+ assert(mem_size > 0);
+ assert(mem_size >= file_size);
+
+ start_page = trunc_page(mem_addr);
+ end_page = round_page(mem_addr + mem_size);
+
+#if 0
+ printf("reading bootstrap section %08x-%08x-%08x prot %d pages %08x-%08x\n",
+ mem_addr, mem_addr+file_size, mem_addr+mem_size, mem_prot, start_page, end_page);
+#endif
+
+ err = vm_allocate(user_map, &start_page, end_page - start_page, FALSE);
+ assert(err == 0);
+ assert(start_page == trunc_page(mem_addr));
+
+ if (file_size > 0)
+ {
+ err = copyout((char *)phystokv (mod->mod_start) + file_ofs,
+ (void *)mem_addr, file_size);
+ assert(err == 0);
+ }
+
+ if (mem_prot != VM_PROT_ALL)
+ {
+ err = vm_protect(user_map, start_page, end_page - start_page, FALSE, mem_prot);
+ assert(err == 0);
+ }
+
+ return 0;
+}
+
+static void copy_bootstrap(void *e, exec_info_t *boot_exec_info)
+{
+ /* vm_map_t user_map = current_task()->map; */
+ int err;
+
+ if ((err = exec_load(boot_read, read_exec, e, boot_exec_info)))
+ panic("Cannot load user-bootstrap image: error code %d", err);
+
+#if MACH_KDB
+ /*
+ * Enter the bootstrap symbol table.
+ */
+
+#if 0 /*XXX*/
+ if (load_bootstrap_symbols)
+ (void) X_db_sym_init(
+ (char*) boot_start+lp->sym_offset,
+ (char*) boot_start+lp->sym_offset+lp->sym_size,
+ "bootstrap",
+ (char *) user_map);
+#endif
+
+#if 0 /*XXX*/
+ if (load_fault_in_text)
+ {
+ vm_offset_t lenp = round_page(lp->text_start+lp->text_size) -
+ trunc_page(lp->text_start);
+ vm_offset_t i = 0;
+
+ while (i < lenp)
+ {
+ vm_fault(user_map, text_page_start +i,
+ load_protect_text ?
+ VM_PROT_READ|VM_PROT_EXECUTE :
+ VM_PROT_READ|VM_PROT_EXECUTE | VM_PROT_WRITE,
+ 0,0,0);
+ i = round_page (i+1);
+ }
+ }
+#endif
+#endif /* MACH_KDB */
+}
+
+/*
+ * Allocate the stack, and build the argument list.
+ */
+static void
+build_args_and_stack(struct exec_info *boot_exec_info,
+ char **argv, char **envp)
+{
+ vm_offset_t stack_base;
+ vm_size_t stack_size;
+ char * arg_ptr;
+ long arg_count, envc;
+ int arg_len;
+ char * arg_pos;
+ int arg_item_len;
+ char * string_pos;
+ rpc_vm_offset_t zero = 0;
+ int i;
+
+#define STACK_SIZE (2*64*1024)
+
+ /*
+ * Calculate the size of the argument list.
+ */
+ arg_len = 0;
+ arg_count = 0;
+ while (argv[arg_count] != 0) {
+ arg_ptr = argv[arg_count++];
+ arg_len += strlen(arg_ptr) + 1;
+ }
+ envc = 0;
+ if (envp != 0)
+ while (envp[envc] != 0)
+ arg_len += strlen (envp[envc++]) + 1;
+
+ /*
+ * Add space for:
+ * arg count
+ * pointers to arguments
+ * trailing 0 pointer
+ * pointers to environment variables
+ * trailing 0 pointer
+ */
+ arg_len += (sizeof(rpc_vm_offset_t)
+ + (arg_count + 1 + envc + 1) * sizeof(rpc_vm_offset_t));
+
+ /*
+ * Allocate the stack.
+ */
+ stack_size = round_page(STACK_SIZE);
+ stack_base = user_stack_low(stack_size);
+
+ (void) vm_allocate(current_task()->map,
+ &stack_base,
+ stack_size,
+ FALSE);
+
+ arg_pos = (char *)
+ set_user_regs(stack_base, stack_size, boot_exec_info, arg_len);
+
+ /*
+ * Start the strings after the arg-count and pointers
+ */
+ string_pos = (arg_pos
+ + sizeof(rpc_vm_offset_t)
+ + (arg_count + 1 + envc + 1) * sizeof(rpc_vm_offset_t));
+
+ /*
+ * first the argument count
+ */
+ (void) copyout(&arg_count,
+ arg_pos,
+ sizeof(rpc_vm_offset_t));
+ arg_pos += sizeof(rpc_vm_offset_t);
+
+ /*
+ * Then the strings and string pointers for each argument
+ */
+ for (i = 0; i < arg_count; ++i) {
+ rpc_vm_offset_t pos = convert_vm_to_user((vm_offset_t) string_pos);
+ arg_ptr = argv[i];
+ arg_item_len = strlen(arg_ptr) + 1; /* include trailing 0 */
+
+ /* set string pointer */
+ (void) copyout(&pos, arg_pos, sizeof (rpc_vm_offset_t));
+ arg_pos += sizeof(rpc_vm_offset_t);
+
+ /* copy string */
+ (void) copyout(arg_ptr, string_pos, arg_item_len);
+ string_pos += arg_item_len;
+ }
+
+ /*
+ * Null terminator for argv.
+ */
+ (void) copyout(&zero, arg_pos, sizeof(rpc_vm_offset_t));
+ arg_pos += sizeof(rpc_vm_offset_t);
+
+ /*
+ * Then the strings and string pointers for each environment variable
+ */
+ for (i = 0; i < envc; ++i) {
+ rpc_vm_offset_t pos = convert_vm_to_user((vm_offset_t) string_pos);
+ arg_ptr = envp[i];
+ arg_item_len = strlen(arg_ptr) + 1; /* include trailing 0 */
+
+ /* set string pointer */
+ (void) copyout(&pos, arg_pos, sizeof (rpc_vm_offset_t));
+ arg_pos += sizeof(rpc_vm_offset_t);
+
+ /* copy string */
+ (void) copyout(arg_ptr, string_pos, arg_item_len);
+ string_pos += arg_item_len;
+ }
+
+ /*
+ * Null terminator for envp.
+ */
+ (void) copyout(&zero, arg_pos, sizeof(rpc_vm_offset_t));
+}
+
+
+static void
+user_bootstrap_compat(void)
+{
+ exec_info_t boot_exec_info;
+
+ char host_string[12];
+ char device_string[12];
+ char flag_string[1024];
+ char root_string[1024];
+
+ /*
+ * Copy the bootstrap code from boot_exec into the user task.
+ */
+ copy_bootstrap(current_thread()->saved.other, &boot_exec_info);
+
+ /*
+ * Convert the host and device ports to strings,
+ * to put in the argument list.
+ */
+ itoa(host_string, boot_host_port);
+ itoa(device_string, boot_device_port);
+
+ /*
+ * Get the (compatibility) boot flags and root name strings.
+ */
+ get_compat_strings(flag_string, root_string);
+
+ /*
+ * Build the argument list and insert in the user task.
+ * Argument list is
+ * "bootstrap -<boothowto> <host_port> <device_port> <root_name>"
+
+$0 ${boot-args} ${host-port} ${device-port} ${root-device} $(task-create) $(task-resume)
+
+ */
+ {
+ char *argv[] = { "bootstrap",
+ flag_string,
+ host_string,
+ device_string,
+ root_string,
+ 0 };
+ char *envp[] = { 0, 0 };
+ if (kernel_cmdline[0] != '\0')
+ {
+ static const char cmdline_var[] = "MULTIBOOT_CMDLINE=";
+ envp[0] = alloca (sizeof cmdline_var + strlen (kernel_cmdline));
+ memcpy (envp[0], cmdline_var, sizeof cmdline_var - 1);
+ strcpy (envp[0] + sizeof cmdline_var - 1, kernel_cmdline);
+ }
+ build_args_and_stack(&boot_exec_info, argv, envp);
+ }
+
+ /*
+ * Exit to user thread.
+ */
+ thread_bootstrap_return();
+ /*NOTREACHED*/
+}
+
+
+struct user_bootstrap_info
+{
+ struct multiboot_module *mod;
+ char **argv;
+ int done;
+ decl_simple_lock_data(,lock)
+};
+
+int
+boot_script_exec_cmd (void *hook, task_t task, char *path, int argc,
+ char **argv, char *strings, int stringlen)
+{
+ struct multiboot_module *mod = hook;
+
+ int err;
+
+ if (task != MACH_PORT_NULL)
+ {
+ thread_t thread;
+ struct user_bootstrap_info info = { mod, argv, 0, };
+ simple_lock_init (&info.lock);
+
+ err = thread_create ((task_t)task, &thread);
+ assert(err == 0);
+ simple_lock (&info.lock);
+ thread->saved.other = &info;
+ thread_start (thread, user_bootstrap);
+ err = thread_resume (thread);
+ assert(err == 0);
+
+ /* We need to synchronize with the new thread and block this
+ main thread until it has finished referring to our local state. */
+ while (! info.done)
+ {
+ thread_sleep ((event_t) &info, simple_lock_addr(info.lock), FALSE);
+ simple_lock (&info.lock);
+ }
+ simple_unlock (&info.lock);
+ thread_deallocate (thread);
+ printf ("\n");
+ }
+
+ return 0;
+}
+
+static void user_bootstrap(void)
+{
+ struct user_bootstrap_info *info = current_thread()->saved.other;
+ exec_info_t boot_exec_info;
+ int err;
+ char **av;
+
+ /* Load this task up from the executable file in the module. */
+ err = exec_load(boot_read, read_exec, info->mod, &boot_exec_info);
+ if (err)
+ panic ("Cannot load user executable module (error code %d): %s",
+ err, info->argv[0]);
+
+ printf ("task loaded:");
+
+ /* Set up the stack with arguments. */
+ build_args_and_stack(&boot_exec_info, info->argv, 0);
+
+ for (av = info->argv; *av != 0; ++av)
+ printf (" %s", *av);
+
+ task_suspend (current_task());
+
+ /* Tell the bootstrap thread running boot_script_exec_cmd
+ that we are done looking at INFO. */
+ simple_lock (&info->lock);
+ assert (!info->done);
+ info->done = 1;
+ simple_unlock (&info->lock);
+ thread_wakeup ((event_t) info);
+
+ /*
+ * Exit to user thread.
+ */
+ thread_bootstrap_return();
+ /*NOTREACHED*/
+}
+
+
+
+void *
+boot_script_malloc (unsigned int size)
+{
+ return (void *) kalloc (size);
+}
+
+void
+boot_script_free (void *ptr, unsigned int size)
+{
+ kfree ((vm_offset_t)ptr, size);
+}
+
+int
+boot_script_task_create (struct cmd *cmd)
+{
+ kern_return_t rc = task_create_kernel(TASK_NULL, FALSE, &cmd->task);
+ if (rc)
+ {
+ printf("boot_script_task_create failed with %x\n", rc);
+ return BOOT_SCRIPT_MACH_ERROR;
+ }
+ task_set_name(cmd->task, cmd->path);
+ return 0;
+}
+
+int
+boot_script_task_resume (struct cmd *cmd)
+{
+ kern_return_t rc = task_resume (cmd->task);
+ if (rc)
+ {
+ printf("boot_script_task_resume failed with %x\n", rc);
+ return BOOT_SCRIPT_MACH_ERROR;
+ }
+ printf ("\nstart %s: ", cmd->path);
+ return 0;
+}
+
+int
+boot_script_prompt_task_resume (struct cmd *cmd)
+{
+#if ! MACH_KDB
+ char xx[5];
+#endif
+
+ printf ("Pausing for %s...\n", cmd->path);
+
+#if ! MACH_KDB
+ printf ("Hit <return> to resume bootstrap.");
+ safe_gets (xx, sizeof xx);
+#else
+ SoftDebugger("Hit `c<return>' to resume bootstrap.");
+#endif
+
+ return boot_script_task_resume (cmd);
+}
+
+void
+boot_script_free_task (task_t task, int aborting)
+{
+ if (aborting)
+ task_terminate (task);
+ task_deallocate (task);
+}
+
+int
+boot_script_insert_right (struct cmd *cmd, mach_port_t port, mach_port_name_t *name)
+{
+ *name = task_insert_send_right (cmd->task,
+ ipc_port_make_send((ipc_port_t) port));
+ return 0;
+}
+
+int
+boot_script_insert_task_port (struct cmd *cmd, task_t task, mach_port_name_t *name)
+{
+ *name = task_insert_send_right (cmd->task,
+ ipc_port_make_send(task->itk_sself));
+ return 0;
+}
diff --git a/kern/bootstrap.h b/kern/bootstrap.h
new file mode 100644
index 0000000..309a63f
--- /dev/null
+++ b/kern/bootstrap.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _KERN_BOOTSTRAP_H_
+#define _KERN_BOOTSTRAP_H_
+
+#include <kern/boot_script.h>
+
+void bootstrap_create(void);
+
+#endif /* _KERN_BOOTSTRAP_H_ */
diff --git a/kern/counters.c b/kern/counters.c
new file mode 100644
index 0000000..0a0665b
--- /dev/null
+++ b/kern/counters.c
@@ -0,0 +1,82 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <kern/counters.h>
+
+/*
+ * We explicitly initialize the counters to make
+ * them contiguous in the kernel's data space.
+ * This makes them easier to examine with ddb.
+ */
+
+#if MACH_COUNTERS
+mach_counter_t c_thread_invoke_hits = 0;
+mach_counter_t c_thread_invoke_misses = 0;
+mach_counter_t c_thread_invoke_csw = 0;
+mach_counter_t c_thread_handoff_hits = 0;
+mach_counter_t c_thread_handoff_misses = 0;
+mach_counter_t c_threads_current = 0;
+mach_counter_t c_threads_max = 0;
+mach_counter_t c_threads_min = 0;
+mach_counter_t c_threads_total = 0;
+mach_counter_t c_stacks_current = 0;
+mach_counter_t c_stacks_max = 0;
+mach_counter_t c_stacks_min = 0;
+mach_counter_t c_stacks_total = 0;
+mach_counter_t c_stack_alloc_hits = 0;
+mach_counter_t c_stack_alloc_misses = 0;
+mach_counter_t c_stack_alloc_max = 0;
+mach_counter_t c_clock_ticks = 0;
+mach_counter_t c_ipc_mqueue_send_block = 0;
+mach_counter_t c_ipc_mqueue_receive_block_user = 0;
+mach_counter_t c_ipc_mqueue_receive_block_kernel = 0;
+mach_counter_t c_mach_msg_trap_block_fast = 0;
+mach_counter_t c_mach_msg_trap_block_slow = 0;
+mach_counter_t c_mach_msg_trap_block_exc = 0;
+mach_counter_t c_exception_raise_block = 0;
+mach_counter_t c_swtch_block = 0;
+mach_counter_t c_swtch_pri_block = 0;
+mach_counter_t c_thread_switch_block = 0;
+mach_counter_t c_thread_switch_handoff = 0;
+mach_counter_t c_ast_taken_block = 0;
+mach_counter_t c_thread_halt_self_block = 0;
+mach_counter_t c_vm_fault_page_block_busy_user = 0;
+mach_counter_t c_vm_fault_page_block_busy_kernel = 0;
+mach_counter_t c_vm_fault_page_block_backoff_user = 0;
+mach_counter_t c_vm_fault_page_block_backoff_kernel = 0;
+mach_counter_t c_vm_page_wait_block_user = 0;
+mach_counter_t c_vm_page_wait_block_kernel = 0;
+mach_counter_t c_vm_pageout_block = 0;
+mach_counter_t c_vm_pageout_scan_block = 0;
+mach_counter_t c_idle_thread_block = 0;
+mach_counter_t c_idle_thread_handoff = 0;
+mach_counter_t c_sched_thread_block = 0;
+mach_counter_t c_io_done_thread_block = 0;
+mach_counter_t c_net_thread_block = 0;
+mach_counter_t c_reaper_thread_block = 0;
+mach_counter_t c_swapin_thread_block = 0;
+mach_counter_t c_action_thread_block = 0;
+#endif /* MACH_COUNTERS */
diff --git a/kern/counters.h b/kern/counters.h
new file mode 100644
index 0000000..aa1e739
--- /dev/null
+++ b/kern/counters.h
@@ -0,0 +1,107 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_COUNTERS_
+#define _KERN_COUNTERS_
+
+/*
+ * We can count various interesting events and paths.
+ *
+ * Use counter() to change the counters, eg:
+ * counter(c_idle_thread_block++);
+ * Use counter_always() for non-conditional counters.
+ */
+
+#define counter_always(code) code
+
+#if MACH_COUNTERS
+
+#define counter(code) counter_always(code)
+
+#else /* MACH_COUNTERS */
+
+#define counter(code)
+
+#endif /* MACH_COUNTERS */
+
+/*
+ * We define the counters with individual integers,
+ * instead of a big structure, so that ddb
+ * will know the addresses of the counters.
+ */
+
+typedef unsigned int mach_counter_t;
+
+#if MACH_COUNTERS
+extern mach_counter_t c_thread_invoke_hits;
+extern mach_counter_t c_thread_invoke_misses;
+extern mach_counter_t c_thread_invoke_csw;
+extern mach_counter_t c_thread_handoff_hits;
+extern mach_counter_t c_thread_handoff_misses;
+extern mach_counter_t c_threads_current;
+extern mach_counter_t c_threads_max;
+extern mach_counter_t c_threads_min;
+extern mach_counter_t c_threads_total;
+extern mach_counter_t c_stacks_current;
+extern mach_counter_t c_stacks_max;
+extern mach_counter_t c_stacks_min;
+extern mach_counter_t c_stacks_total;
+extern mach_counter_t c_stack_alloc_hits;
+extern mach_counter_t c_stack_alloc_misses;
+extern mach_counter_t c_stack_alloc_max;
+extern mach_counter_t c_clock_ticks;
+extern mach_counter_t c_ipc_mqueue_send_block;
+extern mach_counter_t c_ipc_mqueue_receive_block_user;
+extern mach_counter_t c_ipc_mqueue_receive_block_kernel;
+extern mach_counter_t c_mach_msg_trap_block_fast;
+extern mach_counter_t c_mach_msg_trap_block_slow;
+extern mach_counter_t c_mach_msg_trap_block_exc;
+extern mach_counter_t c_exception_raise_block;
+extern mach_counter_t c_swtch_block;
+extern mach_counter_t c_swtch_pri_block;
+extern mach_counter_t c_thread_switch_block;
+extern mach_counter_t c_thread_switch_handoff;
+extern mach_counter_t c_ast_taken_block;
+extern mach_counter_t c_thread_halt_self_block;
+extern mach_counter_t c_vm_fault_page_block_busy_user;
+extern mach_counter_t c_vm_fault_page_block_busy_kernel;
+extern mach_counter_t c_vm_fault_page_block_backoff_user;
+extern mach_counter_t c_vm_fault_page_block_backoff_kernel;
+extern mach_counter_t c_vm_page_wait_block_user;
+extern mach_counter_t c_vm_page_wait_block_kernel;
+extern mach_counter_t c_vm_pageout_block;
+extern mach_counter_t c_vm_pageout_scan_block;
+extern mach_counter_t c_idle_thread_block;
+extern mach_counter_t c_idle_thread_handoff;
+extern mach_counter_t c_sched_thread_block;
+extern mach_counter_t c_io_done_thread_block;
+extern mach_counter_t c_net_thread_block;
+extern mach_counter_t c_reaper_thread_block;
+extern mach_counter_t c_swapin_thread_block;
+extern mach_counter_t c_action_thread_block;
+#endif /* MACH_COUNTERS */
+
+#endif /* _KERN_COUNTERS_ */
diff --git a/kern/cpu_number.h b/kern/cpu_number.h
new file mode 100644
index 0000000..1abe3db
--- /dev/null
+++ b/kern/cpu_number.h
@@ -0,0 +1,47 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_CPU_NUMBER_H_
+#define _KERN_CPU_NUMBER_H_
+
+#include <machine/cpu_number.h>
+
+/*
+ * Definitions for cpu identification in multi-processors.
+ */
+
+extern int master_cpu; /* 'master' processor - keeps time */
+
+#if (NCPUS == 1)
+ /* cpu number is always 0 on a single processor system */
+#define cpu_number() (0)
+#define cpu_number_slow() (0)
+
+#endif /* NCPUS == 1 */
+
+#define CPU_L1_SIZE (1 << CPU_L1_SHIFT)
+
+#endif /* _KERN_CPU_NUMBER_H_ */
diff --git a/kern/debug.c b/kern/debug.c
new file mode 100644
index 0000000..eec2f14
--- /dev/null
+++ b/kern/debug.c
@@ -0,0 +1,207 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/xen.h>
+
+#include <kern/printf.h>
+#include <stdarg.h>
+
+#include "cpu_number.h"
+#include <kern/lock.h>
+#include <kern/thread.h>
+
+#include <kern/debug.h>
+
+#include <machine/loose_ends.h>
+#include <machine/model_dep.h>
+
+#include <device/cons.h>
+
+#if NCPUS>1
+simple_lock_irq_data_t Assert_print_lock; /* uninited, we take our chances */
+#endif
+
+static void
+do_cnputc(char c, vm_offset_t offset)
+{
+ cnputc(c);
+}
+
+void
+Assert(const char *exp, const char *file, int line, const char *fun)
+{
+#if NCPUS > 1
+ spl_t s = simple_lock_irq(&Assert_print_lock);
+ printf("{cpu%d} %s:%d: %s: Assertion `%s' failed.",
+ cpu_number(), file, line, fun, exp);
+ simple_unlock_irq(s, &Assert_print_lock);
+#else
+ printf("%s:%d: %s: Assertion `%s' failed.",
+ file, line, fun, exp);
+#endif
+
+ Debugger("assertion failure");
+}
+
+void SoftDebugger(const char *message)
+{
+ printf("Debugger invoked: %s\n", message);
+
+#if !MACH_KDB
+ printf("But no debugger, continuing.\n");
+ return;
+#endif
+
+#if defined(vax) || defined(PC532)
+ asm("bpt");
+#endif /* vax */
+
+#ifdef sun3
+ current_thread()->pcb->flag |= TRACE_KDB;
+ asm("orw #0x00008000,sr");
+#endif /* sun3 */
+#ifdef sun4
+ current_thread()->pcb->pcb_flag |= TRACE_KDB;
+ asm("ta 0x81");
+#endif /* sun4 */
+
+#if defined(mips ) || defined(i860) || defined(alpha)
+ gimmeabreak();
+#endif
+
+#if defined(__i386__) || defined(__x86_64__)
+ asm("int3");
+#endif
+}
+
+void Debugger(const char *message)
+{
+#if !MACH_KDB
+ panic("Debugger invoked, but there isn't one!");
+#endif
+
+ SoftDebugger(message);
+
+ panic("Debugger returned!");
+}
+
+/* Be prepared to panic anytime,
+ even before panic_init() gets called from the "normal" place in kern/startup.c.
+ (panic_init() still needs to be called from there
+ to make sure we get initialized before starting multiple processors.) */
+def_simple_lock_irq_data(static, panic_lock)
+
+const char *panicstr;
+int paniccpu;
+
+void
+panic_init(void)
+{
+}
+
+#if ! MACH_KBD
+extern boolean_t reboot_on_panic;
+#endif
+
+/*VARARGS1*/
+void
+Panic(const char *file, int line, const char *fun, const char *s, ...)
+{
+ va_list listp;
+ spl_t spl;
+
+ panic_init();
+
+ spl = simple_lock_irq(&panic_lock);
+ if (panicstr) {
+ if (cpu_number() != paniccpu) {
+ simple_unlock_irq(spl, &panic_lock);
+ halt_cpu();
+ /* NOTREACHED */
+ }
+ }
+ else {
+ panicstr = s;
+ paniccpu = cpu_number();
+ }
+ simple_unlock_irq(spl, &panic_lock);
+ printf("panic ");
+#if NCPUS > 1
+ printf("{cpu%d} ", paniccpu);
+#endif
+ printf("%s:%d: %s: ",file, line, fun);
+ va_start(listp, s);
+ _doprnt(s, listp, do_cnputc, 16, 0);
+ va_end(listp);
+ printf("\n");
+
+#if MACH_KDB
+ Debugger("panic");
+#else
+# ifdef MACH_HYP
+ hyp_crash();
+# else
+ /* Give the user time to see the message */
+ {
+ int i = 1000; /* seconds */
+ while (i--)
+ delay (1000000); /* microseconds */
+ }
+
+ halt_all_cpus (reboot_on_panic);
+# endif /* MACH_HYP */
+#endif
+}
+
+/*
+ * We'd like to use BSD's log routines here...
+ */
+/*VARARGS2*/
+void
+log(int level, const char *fmt, ...)
+{
+ va_list listp;
+
+ va_start(listp, fmt);
+ _doprnt(fmt, listp, do_cnputc, 16, 0);
+ va_end(listp);
+}
+
+/* GCC references this for stack protection. */
+unsigned char __stack_chk_guard [ sizeof (vm_offset_t) ] =
+{
+ [ sizeof (vm_offset_t) - 3 ] = '\r',
+ [ sizeof (vm_offset_t) - 2 ] = '\n',
+ [ sizeof (vm_offset_t) - 1 ] = 0xff,
+};
+
+void __stack_chk_fail (void);
+
+void
+__stack_chk_fail (void)
+{
+ panic("stack smashing detected");
+}
diff --git a/kern/debug.h b/kern/debug.h
new file mode 100644
index 0000000..1a5cd07
--- /dev/null
+++ b/kern/debug.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: debug.h
+ * Author: Bryan Ford
+ *
+ * This file contains definitions for kernel debugging,
+ * which are compiled in on the DEBUG symbol.
+ *
+ */
+#ifndef _mach_debug__debug_
+#define _mach_debug__debug_
+
+#include <kern/assert.h> /*XXX*/
+
+#ifndef NDEBUG
+
+#define here() printf("@ %s:%d\n", __FILE__, __LINE__)
+#define message(args) ({ printf("@ %s:%d: ", __FILE__, __LINE__); printf args; printf("\n"); })
+
+#define otsan() panic("%s:%d: off the straight and narrow!", __FILE__, __LINE__)
+
+#define struct_id_decl unsigned struct_id;
+#define struct_id_init(p,id) ((p)->struct_id = (id))
+#define struct_id_denit(p) ((p)->struct_id = 0)
+#define struct_id_verify(p,id) \
+ ({ if ((p)->struct_id != (id)) \
+ panic("%s:%d: "#p" (%08x) struct_id should be "#id" (%08x), is %08x\n", \
+ __FILE__, __LINE__, (p), (id), (p->struct_id)); \
+ })
+
+#else /* NDEBUG */
+
+#define otsan()
+
+#define struct_id_decl
+#define struct_id_init(p,id)
+#define struct_id_denit(p)
+#define struct_id_verify(p,id)
+
+#endif /* NDEBUG */
+
+extern void log (int level, const char *fmt, ...);
+
+extern void panic_init(void);
+extern void Panic (const char *file, int line, const char *fun,
+ const char *s, ...)
+ __attribute__ ((noreturn, format (printf, 4, 5)));
+#define panic(s, ...) \
+ Panic (__FILE__, __LINE__, __FUNCTION__, s, ##__VA_ARGS__)
+
+extern void SoftDebugger (const char *message);
+extern void Debugger (const char *message) __attribute__ ((noreturn));
+
+#endif /* _mach_debug__debug_ */
diff --git a/kern/elf-load.c b/kern/elf-load.c
new file mode 100644
index 0000000..ce86327
--- /dev/null
+++ b/kern/elf-load.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+
+#include <alloca.h>
+#include <mach/machine/vm_types.h>
+#include <mach/exec/elf.h>
+#include <mach/exec/exec.h>
+
+int exec_load(exec_read_func_t *read, exec_read_exec_func_t *read_exec,
+ void *handle, exec_info_t *out_info)
+{
+ vm_size_t actual;
+ Elf_Ehdr x;
+ Elf_Phdr *phdr, *ph;
+ vm_size_t phsize;
+ int i;
+ int result;
+ vm_offset_t loadbase = 0;
+
+ /* Read the ELF header. */
+ if ((result = (*read)(handle, 0, &x, sizeof(x), &actual)) != 0)
+ return result;
+ if (actual < sizeof(x))
+ return EX_NOT_EXECUTABLE;
+
+ if ((x.e_ident[EI_MAG0] != ELFMAG0) ||
+ (x.e_ident[EI_MAG1] != ELFMAG1) ||
+ (x.e_ident[EI_MAG2] != ELFMAG2) ||
+ (x.e_ident[EI_MAG3] != ELFMAG3))
+ return EX_NOT_EXECUTABLE;
+
+ /* Make sure the file is of the right architecture. */
+ if ((x.e_ident[EI_CLASS] != MY_ELF_CLASS) ||
+ (x.e_ident[EI_DATA] != MY_EI_DATA) ||
+ (x.e_machine != MY_E_MACHINE))
+ return EX_WRONG_ARCH;
+
+ /* Leave room for mmaps etc. before PIE binaries.
+ * Could add address randomization here. */
+ if (x.e_type == ET_DYN || x.e_type == ET_REL)
+ loadbase = 128 << 20;
+
+ /* XXX others */
+ out_info->entry = (vm_offset_t) x.e_entry + loadbase;
+
+ phsize = x.e_phnum * x.e_phentsize;
+ phdr = (Elf_Phdr *)alloca(phsize);
+
+ result = (*read)(handle, x.e_phoff, phdr, phsize, &actual);
+ if (result)
+ return result;
+ if (actual < phsize)
+ return EX_CORRUPT;
+
+ for (i = 0; i < x.e_phnum; i++)
+ {
+ ph = (Elf_Phdr *)((vm_offset_t)phdr + i * x.e_phentsize);
+ if (ph->p_type == PT_LOAD)
+ {
+ exec_sectype_t type = EXEC_SECTYPE_ALLOC |
+ EXEC_SECTYPE_LOAD;
+ if (ph->p_flags & PF_R) type |= EXEC_SECTYPE_READ;
+ if (ph->p_flags & PF_W) type |= EXEC_SECTYPE_WRITE;
+ if (ph->p_flags & PF_X) type |= EXEC_SECTYPE_EXECUTE;
+
+ result = (*read_exec)(handle,
+ ph->p_offset, ph->p_filesz,
+ ph->p_vaddr + loadbase, ph->p_memsz, type);
+ if (result)
+ return result;
+ }
+ }
+
+ return 0;
+}
+
diff --git a/kern/eventcount.c b/kern/eventcount.c
new file mode 100644
index 0000000..1cbc15a
--- /dev/null
+++ b/kern/eventcount.c
@@ -0,0 +1,361 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: eventcount.c
+ * Author: Alessandro Forin
+ * Date: 10/91
+ *
+ * Eventcounters, for user-level drivers synchronization
+ *
+ */
+
+#include <kern/printf.h>
+#include <string.h>
+
+#include <mach/machine.h>
+#include <kern/ast.h>
+#include <kern/debug.h>
+#include "cpu_number.h"
+#include <kern/lock.h>
+#include <kern/processor.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+
+#include <machine/machspl.h> /* For def'n of splsched() */
+
+#include <kern/eventcount.h>
+
+#define MAX_EVCS 10 /* xxx for now */
+evc_t all_eventcounters[MAX_EVCS];
+
+/*
+ * Initialization
+ */
+void
+evc_init(evc_t ev)
+{
+ int i;
+
+ memset(ev, 0, sizeof(*ev));
+
+ /* keep track of who is who */
+ for (i = 0; i < MAX_EVCS; i++)
+ if (all_eventcounters[i] == 0) break;
+ if (i == MAX_EVCS) {
+ printf("Too many eventcounters\n");
+ return;
+ }
+
+ all_eventcounters[i] = ev;
+ ev->ev_id = i;
+ ev->sanity = ev;
+ ev->waiting_thread = THREAD_NULL;
+ simple_lock_init(&ev->lock);
+}
+
+/*
+ * Finalization
+ */
+void
+evc_destroy(evc_t ev)
+{
+ evc_signal(ev);
+ ev->sanity = 0;
+ if (all_eventcounters[ev->ev_id] == ev)
+ all_eventcounters[ev->ev_id] = 0;
+ ev->ev_id = -1;
+}
+
+/*
+ * Thread termination.
+ * HORRIBLE. This stuff needs to be fixed.
+ */
+void evc_notify_abort(const thread_t thread)
+{
+ int i;
+ evc_t ev;
+ int s = splsched();
+ for (i = 0; i < MAX_EVCS; i++) {
+ ev = all_eventcounters[i];
+ if (ev) {
+ simple_lock(&ev->lock);
+ if (ev->waiting_thread == thread)
+ {
+ ev->waiting_thread = 0;
+ /* Removal of a waiting thread has to bump the count by one */
+ ev->count++;
+ }
+ simple_unlock(&ev->lock);
+ }
+ }
+ splx(s);
+}
+
+/*
+ * Just so that we return success, and give
+ * up the stack while blocked
+ */
+static void __attribute__((noreturn))
+evc_continue(void)
+{
+ thread_syscall_return(KERN_SUCCESS);
+ /* NOTREACHED */
+}
+
+/*
+ * User-trappable
+ */
+kern_return_t evc_wait(natural_t ev_id)
+{
+ spl_t s;
+ kern_return_t ret;
+ evc_t ev;
+
+ if ((ev_id >= MAX_EVCS) ||
+ ((ev = all_eventcounters[ev_id]) == 0) ||
+ (ev->ev_id != ev_id) || (ev->sanity != ev))
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ simple_lock(&ev->lock);
+ /*
+ * The values assumed by the "count" field are
+ * as follows:
+ * 0 At initialization time, and with no
+ * waiting thread means no events pending;
+ * with waiting thread means the event
+ * was signalled and the thread not yet resumed
+ * -1 no events, there must be a waiting thread
+ * N>0 no waiting thread means N pending,
+ * with waiting thread N-1 pending.
+ *
+ */
+ if (ev->count > 0) {
+ ev->count--;
+ ret = KERN_SUCCESS;
+ } else {
+ if (ev->waiting_thread == THREAD_NULL) {
+ ev->count--;
+ ev->waiting_thread = current_thread();
+ assert_wait((event_t) 0, TRUE); /* ifnot race */
+ simple_unlock(&ev->lock);
+ thread_block(evc_continue);
+ return KERN_SUCCESS;
+ }
+ ret = KERN_NO_SPACE; /* XX */
+ }
+ simple_unlock(&ev->lock);
+ splx(s);
+ return ret;
+}
+
+/*
+ * User-trappable
+ */
+kern_return_t evc_wait_clear(natural_t ev_id)
+{
+ spl_t s;
+ evc_t ev;
+
+ if ((ev_id >= MAX_EVCS) ||
+ ((ev = all_eventcounters[ev_id]) == 0) ||
+ (ev->ev_id != ev_id) || (ev->sanity != ev))
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ simple_lock(&ev->lock);
+
+ /*
+ * The values assumed by the "count" field are
+ * as follows:
+ * 0 At initialization time, and with no
+ * waiting thread means no events pending;
+ * with waiting thread means the event
+ * was signalled and the thread not yet resumed
+ * -1 no events, there must be a waiting thread
+ * N>0 no waiting thread means N pending,
+ * with waiting thread N-1 pending.
+ *
+ */
+ /*
+ * Note that we always clear count before blocking.
+ */
+ if (ev->waiting_thread == THREAD_NULL) {
+ ev->count = -1;
+ ev->waiting_thread = current_thread();
+ assert_wait((event_t) 0, TRUE); /* ifnot race */
+ simple_unlock(&ev->lock);
+ thread_block(evc_continue);
+ /* NOTREACHED */
+ }
+
+ simple_unlock(&ev->lock);
+ splx(s);
+ return KERN_NO_SPACE; /* XX */
+}
+
+/*
+ * Called exclusively from interrupt context
+ */
+void
+evc_signal(evc_t ev)
+{
+ volatile thread_t thread;
+ int state;
+ spl_t s;
+ if (ev->sanity != ev)
+ return;
+
+ s = splsched();
+ simple_lock(&ev->lock);
+ ev->count++;
+ if (thread = ev->waiting_thread, thread != THREAD_NULL)
+ {
+ ev->waiting_thread = 0;
+
+#if (NCPUS > 1)
+ retry:
+ while((thread->state & TH_RUN) || thread->lock.lock_data)
+ cpu_pause();
+#endif
+ thread_lock(thread);
+
+ /* make thread runnable on this processor */
+ /* taken from clear_wait */
+ switch ((state = thread->state) & TH_SCHED_STATE)
+ {
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_UNINT:
+ case TH_WAIT:
+ /*
+ * Sleeping and not suspendable - put
+ * on run queue.
+ */
+ thread->state = (state &~ TH_WAIT) | TH_RUN;
+#if NCPUS > 1
+ thread_setrun(thread, TRUE);
+#else
+ simpler_thread_setrun(thread, TRUE);
+#endif
+ thread_unlock(thread);
+ break;
+
+ case TH_RUN | TH_WAIT:
+#if (NCPUS > 1)
+ /*
+ * Legal on MP: between assert_wait()
+ * and thread_block(), in evc_wait() above.
+ *
+ * Mmm. Maybe don't need now that the while(..) check is
+ * done before the thread lock is grabbed.....
+ */
+ thread_unlock(thread);
+ goto retry;
+#else
+ /*FALLTHROUGH*/
+#endif
+ case TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+
+ /*
+ * Either already running, or suspended.
+ * Just clear the wait.
+ */
+ thread->state = state &~ TH_WAIT;
+ thread_unlock(thread);
+ break;
+
+ default:
+ /*
+ * Not waiting.
+ */
+ panic("evc_signal.3");
+ thread_unlock(thread);
+ break;
+ }
+ }
+
+ simple_unlock(&ev->lock);
+ splx(s);
+}
+
+#if NCPUS <= 1
+/*
+ * The scheduler is too messy for my old little brain
+ */
+void
+simpler_thread_setrun(
+ thread_t th,
+ boolean_t may_preempt)
+{
+ struct run_queue *rq;
+ int whichq;
+
+ /*
+ * XXX should replace queue with a boolean in this case.
+ */
+ if (default_pset.idle_count > 0) {
+ processor_t processor;
+
+ processor = (processor_t) queue_first(&default_pset.idle_queue);
+ queue_remove(&default_pset.idle_queue, processor,
+ processor_t, processor_queue);
+ default_pset.idle_count--;
+ processor->next_thread = th;
+ processor->state = PROCESSOR_DISPATCHING;
+ return;
+ }
+ rq = &(master_processor->runq);
+ ast_on(cpu_number(), AST_BLOCK);
+
+ whichq = (th)->sched_pri;
+ runq_lock(rq); /* lock the run queue */
+ enqueue_head(&(rq)->runq[whichq], &((th)->links));
+
+ if (whichq < (rq)->low || (rq)->count == 0)
+ (rq)->low = whichq; /* minimize */
+ (rq)->count++;
+#ifdef MIGRATING_THREADS
+ (th)->shuttle.runq = (rq);
+#else
+ (th)->runq = (rq);
+#endif
+ runq_unlock(rq);
+
+ /*
+ * Turn off first_quantum to allow context switch.
+ */
+ current_processor()->first_quantum = FALSE;
+}
+#endif /* NCPUS > 1 */
+
diff --git a/kern/eventcount.h b/kern/eventcount.h
new file mode 100644
index 0000000..598d7e0
--- /dev/null
+++ b/kern/eventcount.h
@@ -0,0 +1,66 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: eventcount.c
+ * Author: Alessandro Forin
+ * Date: 10/91
+ *
+ * Eventcounters, for user-level drivers synchronization
+ *
+ */
+
+#ifndef _KERN_EVENTCOUNT_H_
+#define _KERN_EVENTCOUNT_H_ 1
+
+#include <kern/lock.h>
+
+/* kernel visible only */
+
+typedef struct evc {
+ int count;
+ thread_t waiting_thread;
+ natural_t ev_id;
+ struct evc *sanity;
+ decl_simple_lock_data(, lock)
+} *evc_t;
+
+extern void evc_init(evc_t ev),
+ evc_destroy(evc_t ev),
+ evc_signal(evc_t ev),
+ evc_notify_abort(thread_t thread);
+
+/* kernel and user visible */
+
+extern kern_return_t evc_wait(natural_t ev_id);
+extern kern_return_t evc_wait_clear(natural_t ev_id);
+
+#if NCPUS <= 1
+void simpler_thread_setrun(
+ thread_t th,
+ boolean_t may_preempt);
+#endif
+
+#endif /* _KERN_EVENTCOUNT_H_ */
diff --git a/kern/exc.defs b/kern/exc.defs
new file mode 100644
index 0000000..e614fff
--- /dev/null
+++ b/kern/exc.defs
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* We use custom functions to send exceptions. These functions can
+ be found in `exception.c'. We use this file merely to produce the
+ list of message ids. */
+
+#include <mach/exc.defs>
diff --git a/kern/exception.c b/kern/exception.c
new file mode 100644
index 0000000..15f2970
--- /dev/null
+++ b/kern/exception.c
@@ -0,0 +1,1023 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <mach/port.h>
+#include <mach/mig_errors.h>
+#include <machine/locore.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/mach_msg.h>
+#include <ipc/ipc_machdep.h>
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/ipc_tt.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/processor.h>
+#include <kern/printf.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/exception.h>
+#include <kern/macros.h>
+#include <mach/machine/vm_types.h>
+
+#if MACH_KDB
+#include <machine/trap.h>
+#include <ddb/db_output.h>
+
+boolean_t debug_user_with_kdb = FALSE;
+#endif /* MACH_KDB */
+
+#ifdef KEEP_STACKS
+/*
+ * Some obsolete architectures don't support kernel stack discarding
+ * or the thread_exception_return, thread_syscall_return continuations.
+ * For these architectures, the NOTREACHED comments below are incorrect.
+ * The exception function is expected to return.
+ * So the return statements along the slow paths are important.
+ */
+#endif /* KEEP_STACKS */
+
+/*
+ * Routine: exception
+ * Purpose:
+ * The current thread caught an exception.
+ * We make an up-call to the thread's exception server.
+ * Conditions:
+ * Nothing locked and no resources held.
+ * Called from an exception context, so
+ * thread_exception_return and thread_kdb_return
+ * are possible.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception(
+ integer_t _exception,
+ integer_t code,
+ long_integer_t subcode)
+{
+ ipc_thread_t self = current_thread();
+ ipc_port_t exc_port;
+
+ if (_exception == KERN_SUCCESS)
+ panic("exception");
+
+ /*
+ * Optimized version of retrieve_thread_exception.
+ */
+
+ ith_lock(self);
+ assert(self->ith_self != IP_NULL);
+ exc_port = self->ith_exception;
+ if (!IP_VALID(exc_port)) {
+ ith_unlock(self);
+ exception_try_task(_exception, code, subcode);
+ /*NOTREACHED*/
+ }
+
+ ip_lock(exc_port);
+ ith_unlock(self);
+ if (!ip_active(exc_port)) {
+ ip_unlock(exc_port);
+ exception_try_task(_exception, code, subcode);
+ /*NOTREACHED*/
+ }
+
+ /*
+ * Make a naked send right for the exception port.
+ */
+
+ ip_reference(exc_port);
+ exc_port->ip_srights++;
+ ip_unlock(exc_port);
+
+ /*
+ * If this exception port doesn't work,
+ * we will want to try the task's exception port.
+ * Indicate this by saving the exception state.
+ */
+
+ self->ith_exc = _exception;
+ self->ith_exc_code = code;
+ self->ith_exc_subcode = subcode;
+
+ exception_raise(exc_port,
+ retrieve_thread_self_fast(self),
+ retrieve_task_self_fast(self->task),
+ _exception, code, subcode);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: exception_try_task
+ * Purpose:
+ * The current thread caught an exception.
+ * We make an up-call to the task's exception server.
+ * Conditions:
+ * Nothing locked and no resources held.
+ * Called from an exception context, so
+ * thread_exception_return and thread_kdb_return
+ * are possible.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception_try_task(
+ integer_t _exception,
+ integer_t code,
+ long_integer_t subcode)
+{
+ ipc_thread_t self = current_thread();
+ task_t task = self->task;
+ ipc_port_t exc_port;
+
+ /*
+ * Optimized version of retrieve_task_exception.
+ */
+
+ itk_lock(task);
+ assert(task->itk_self != IP_NULL);
+ exc_port = task->itk_exception;
+ if (!IP_VALID(exc_port)) {
+ itk_unlock(task);
+ exception_no_server();
+ /*NOTREACHED*/
+ }
+
+ ip_lock(exc_port);
+ itk_unlock(task);
+ if (!ip_active(exc_port)) {
+ ip_unlock(exc_port);
+ exception_no_server();
+ /*NOTREACHED*/
+ }
+
+ /*
+ * Make a naked send right for the exception port.
+ */
+
+ ip_reference(exc_port);
+ exc_port->ip_srights++;
+ ip_unlock(exc_port);
+
+ /*
+ * This is the thread's last chance.
+ * Clear the saved exception state.
+ */
+
+ self->ith_exc = KERN_SUCCESS;
+
+ exception_raise(exc_port,
+ retrieve_thread_self_fast(self),
+ retrieve_task_self_fast(task),
+ _exception, code, subcode);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: exception_no_server
+ * Purpose:
+ * The current thread took an exception,
+ * and no exception server took responsibility
+ * for the exception. So good bye, charlie.
+ * Conditions:
+ * Nothing locked and no resources held.
+ * Called from an exception context, so
+ * thread_kdb_return is possible.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception_no_server(void)
+{
+ ipc_thread_t self = current_thread();
+
+ /*
+ * If this thread is being terminated, cooperate.
+ */
+
+ while (thread_should_halt(self))
+ thread_halt_self(thread_exception_return);
+
+
+#if 0
+ if (thread_suspend (self) == KERN_SUCCESS)
+ thread_exception_return ();
+#endif
+
+#if MACH_KDB
+ if (debug_user_with_kdb) {
+ /*
+ * Debug the exception with kdb.
+ * If kdb handles the exception,
+ * then thread_kdb_return won't return.
+ */
+
+ db_printf("No exception server, calling kdb...\n");
+ thread_kdb_return();
+ }
+#endif /* MACH_KDB */
+
+ /*
+ * All else failed; terminate task.
+ */
+
+ (void) task_terminate(self->task);
+ thread_halt_self(thread_exception_return);
+ panic("terminating the task didn't kill us");
+ /*NOTREACHED*/
+}
+
+#define MACH_EXCEPTION_ID 2400 /* from mach/exc.defs */
+#define MACH_EXCEPTION_REPLY_ID (MACH_EXCEPTION_ID + 100)
+
+struct mach_exception {
+ mach_msg_header_t Head;
+ mach_msg_type_t threadType;
+ mach_port_t thread;
+ mach_msg_type_t taskType;
+ mach_port_t task;
+ mach_msg_type_t exceptionType;
+ integer_t exception;
+ mach_msg_type_t codeType;
+ integer_t code;
+ mach_msg_type_t subcodeType;
+ rpc_long_integer_t subcode;
+};
+
+#define INTEGER_T_SIZE_IN_BITS (8 * sizeof(integer_t))
+#define INTEGER_T_TYPE MACH_MSG_TYPE_INTEGER_T
+#define RPC_LONG_INTEGER_T_SIZE_IN_BITS (8 * sizeof(rpc_long_integer_t))
+#if defined(__x86_64__) && !defined(USER32)
+#define RPC_LONG_INTEGER_T_TYPE MACH_MSG_TYPE_INTEGER_64
+#else
+#define RPC_LONG_INTEGER_T_TYPE MACH_MSG_TYPE_INTEGER_32
+#endif
+ /* in mach/machine/vm_types.h */
+
+mach_msg_type_t exc_port_proto = {
+ .msgt_name = MACH_MSG_TYPE_PORT_SEND,
+ .msgt_size = PORT_T_SIZE_IN_BITS,
+ .msgt_number = 1,
+ .msgt_inline = TRUE,
+ .msgt_longform = FALSE,
+ .msgt_deallocate = FALSE,
+ .msgt_unused = 0
+};
+
+mach_msg_type_t exc_code_proto = {
+ .msgt_name = INTEGER_T_TYPE,
+ .msgt_size = INTEGER_T_SIZE_IN_BITS,
+ .msgt_number = 1,
+ .msgt_inline = TRUE,
+ .msgt_longform = FALSE,
+ .msgt_deallocate = FALSE,
+ .msgt_unused = 0
+};
+
+mach_msg_type_t exc_subcode_proto = {
+ .msgt_name = RPC_LONG_INTEGER_T_TYPE,
+ .msgt_size = RPC_LONG_INTEGER_T_SIZE_IN_BITS,
+ .msgt_number = 1,
+ .msgt_inline = TRUE,
+ .msgt_longform = FALSE,
+ .msgt_deallocate = FALSE,
+ .msgt_unused = 0
+};
+
+/*
+ * Routine: exception_raise
+ * Purpose:
+ * Make an exception_raise up-call to an exception server.
+ *
+ * dest_port must be a valid naked send right.
+ * thread_port and task_port are naked send rights.
+ * All three are always consumed.
+ *
+ * self->ith_exc, self->ith_exc_code, self->ith_exc_subcode
+ * must be appropriately initialized.
+ * Conditions:
+ * Nothing locked. We are being called in an exception context,
+ * so thread_exception_return may be called.
+ * Returns:
+ * Doesn't return.
+ */
+
+int exception_raise_misses = 0;
+
+void
+exception_raise(
+ ipc_port_t dest_port,
+ ipc_port_t thread_port,
+ ipc_port_t task_port,
+ integer_t _exception,
+ integer_t code,
+ long_integer_t subcode)
+{
+ ipc_thread_t self = current_thread();
+ ipc_thread_t receiver;
+ ipc_port_t reply_port;
+ ipc_mqueue_t dest_mqueue;
+ ipc_mqueue_t reply_mqueue;
+ ipc_kmsg_t kmsg;
+ mach_msg_return_t mr;
+
+ assert(IP_VALID(dest_port));
+
+ /*
+ * We will eventually need a message buffer.
+ * Grab the buffer now, while nothing is locked.
+ * This buffer will get handed to the exception server,
+ * and it will give the buffer back with its reply.
+ */
+
+ kmsg = ikm_cache_alloc();
+ if (kmsg == IKM_NULL)
+ panic("exception_raise");
+
+ /*
+ * We need a reply port for the RPC.
+ * Check first for a cached port.
+ */
+
+ ith_lock(self);
+ assert(self->ith_self != IP_NULL);
+
+ reply_port = self->ith_rpc_reply;
+ if (reply_port == IP_NULL) {
+ ith_unlock(self);
+ reply_port = ipc_port_alloc_reply();
+ ith_lock(self);
+ if ((reply_port == IP_NULL) ||
+ (self->ith_rpc_reply != IP_NULL))
+ panic("exception_raise");
+ self->ith_rpc_reply = reply_port;
+ }
+
+ ip_lock(reply_port);
+ assert(ip_active(reply_port));
+ ith_unlock(self);
+
+ /*
+ * Make a naked send-once right for the reply port,
+ * to hand to the exception server.
+ * Make an extra reference for the reply port,
+ * to receive on. This protects us against
+ * mach_msg_abort_rpc.
+ */
+
+ reply_port->ip_sorights++;
+ ip_reference(reply_port);
+
+ ip_reference(reply_port);
+ self->ith_port = reply_port;
+
+ reply_mqueue = &reply_port->ip_messages;
+ imq_lock(reply_mqueue);
+ assert(ipc_kmsg_queue_empty(&reply_mqueue->imq_messages));
+ ip_unlock(reply_port);
+
+ /*
+ * Make sure we can queue to the destination port.
+ */
+
+ if (!ip_lock_try(dest_port)) {
+ imq_unlock(reply_mqueue);
+ goto slow_exception_raise;
+ }
+
+ if (!ip_active(dest_port) ||
+ (dest_port->ip_receiver == ipc_space_kernel)) {
+ imq_unlock(reply_mqueue);
+ ip_unlock(dest_port);
+ goto slow_exception_raise;
+ }
+
+ /*
+ * Find the destination message queue.
+ */
+
+ {
+ ipc_pset_t dest_pset;
+
+ dest_pset = dest_port->ip_pset;
+ if (dest_pset == IPS_NULL)
+ dest_mqueue = &dest_port->ip_messages;
+ else
+ dest_mqueue = &dest_pset->ips_messages;
+ }
+
+ if (!imq_lock_try(dest_mqueue)) {
+ imq_unlock(reply_mqueue);
+ ip_unlock(dest_port);
+ goto slow_exception_raise;
+ }
+
+ /*
+ * Safe to unlock dest_port, because we hold
+ * dest_mqueue locked. We never bother changing
+ * dest_port->ip_msgcount.
+ */
+
+ ip_unlock(dest_port);
+
+ receiver = ipc_thread_queue_first(&dest_mqueue->imq_threads);
+ if ((receiver == ITH_NULL) ||
+ !((receiver->swap_func == mach_msg_continue) ||
+ ((receiver->swap_func == mach_msg_receive_continue) &&
+ (sizeof(struct mach_exception) <= receiver->ith_msize) &&
+ ((receiver->ith_option & MACH_RCV_NOTIFY) == 0))) ||
+ !thread_handoff(self, exception_raise_continue, receiver)) {
+ imq_unlock(reply_mqueue);
+ imq_unlock(dest_mqueue);
+ goto slow_exception_raise;
+ }
+ counter(c_exception_raise_block++);
+
+ assert(current_thread() == receiver);
+
+ /*
+ * We need to finish preparing self for its
+ * time asleep in reply_mqueue. self is left
+ * holding the extra ref for reply_port.
+ */
+
+ ipc_thread_enqueue_macro(&reply_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(reply_mqueue);
+
+ /*
+ * Finish extracting receiver from dest_mqueue.
+ */
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ imq_unlock(dest_mqueue);
+
+ /*
+ * Release the receiver's reference for his object.
+ */
+ {
+ ipc_object_t object = receiver->ith_object;
+
+ io_lock(object);
+ io_release(object);
+ io_check_unlock(object);
+ }
+
+ {
+ struct mach_exception *exc =
+ (struct mach_exception *) &kmsg->ikm_header;
+ ipc_space_t space = receiver->task->itk_space;
+
+ /*
+ * We are running as the receiver now. We hold
+ * the following resources, which must be consumed:
+ * kmsg, send-once right for reply_port
+ * send rights for dest_port, thread_port, task_port
+ * Synthesize a kmsg for copyout to the receiver.
+ */
+
+ exc->Head.msgh_bits = (MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND) |
+ MACH_MSGH_BITS_COMPLEX);
+ exc->Head.msgh_size = sizeof *exc;
+ /* exc->Head.msgh_remote_port later */
+ /* exc->Head.msgh_local_port later */
+ exc->Head.msgh_seqno = 0;
+ exc->Head.msgh_id = MACH_EXCEPTION_ID;
+ exc->threadType = exc_port_proto;
+ /* exc->thread later */
+ exc->taskType = exc_port_proto;
+ /* exc->task later */
+ exc->exceptionType = exc_code_proto;
+ exc->exception = _exception;
+ exc->codeType = exc_code_proto;
+ exc->code = code;
+ exc->subcodeType = exc_subcode_proto;
+ exc->subcode = subcode;
+
+ /*
+ * Check that the receiver can handle the message.
+ */
+
+ if (receiver->ith_rcv_size < sizeof(struct mach_exception)) {
+ /*
+ * ipc_kmsg_destroy is a handy way to consume
+ * the resources we hold, but it requires setup.
+ */
+
+ exc->Head.msgh_bits =
+ (MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE) |
+ MACH_MSGH_BITS_COMPLEX);
+ exc->Head.msgh_remote_port = (mach_port_t) dest_port;
+ exc->Head.msgh_local_port = (mach_port_t) reply_port;
+ exc->thread = (mach_port_t) thread_port;
+ exc->task = (mach_port_t) task_port;
+
+ ipc_kmsg_destroy(kmsg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+
+ is_write_lock(space);
+ assert(space->is_active);
+
+ /*
+ * To do an atomic copyout, need simultaneous
+ * locks on both ports and the space.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) ||
+ !ip_lock_try(reply_port)) {
+ abort_copyout:
+ ip_unlock(dest_port);
+ is_write_unlock(space);
+
+ /*
+ * Oh well, we have to do the header the slow way.
+ * First make it look like it's in-transit.
+ */
+
+ exc->Head.msgh_bits =
+ (MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE) |
+ MACH_MSGH_BITS_COMPLEX);
+ exc->Head.msgh_remote_port = (mach_port_t) dest_port;
+ exc->Head.msgh_local_port = (mach_port_t) reply_port;
+
+ mr = ipc_kmsg_copyout_header(&exc->Head, space,
+ MACH_PORT_NULL);
+ if (mr == MACH_MSG_SUCCESS)
+ goto copyout_body;
+
+ /*
+ * Ack! Prepare for ipc_kmsg_copyout_dest.
+ * It will consume thread_port and task_port.
+ */
+
+ exc->thread = (mach_port_t) thread_port;
+ exc->task = (mach_port_t) task_port;
+
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(receiver->ith_msg, kmsg,
+ sizeof(mach_msg_header_t));
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ if (!ip_active(reply_port)) {
+ ip_unlock(reply_port);
+ goto abort_copyout;
+ }
+
+ assert(reply_port->ip_sorights > 0);
+ ip_unlock(reply_port);
+
+ {
+ kern_return_t kr;
+ ipc_entry_t entry;
+ mach_port_name_t port_name;
+
+ kr = ipc_entry_get (space, &port_name, &entry);
+ if (kr)
+ goto abort_copyout;
+ exc->Head.msgh_remote_port = (mach_port_t) port_name;
+ {
+ mach_port_gen_t gen;
+
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ }
+
+ entry->ie_object = (ipc_object_t) reply_port;
+ is_write_unlock(space);
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_srights > 0);
+ ip_release(dest_port);
+
+ exc->Head.msgh_local_port =
+ ((dest_port->ip_receiver == space) ?
+ dest_port->ip_receiver_name : MACH_PORT_NULL);
+
+ if ((--dest_port->ip_srights == 0) &&
+ (dest_port->ip_nsrequest != IP_NULL)) {
+ ipc_port_t nsrequest;
+ mach_port_mscount_t mscount;
+
+ /* a rather rare case */
+
+ nsrequest = dest_port->ip_nsrequest;
+ mscount = dest_port->ip_mscount;
+ dest_port->ip_nsrequest = IP_NULL;
+ ip_unlock(dest_port);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest_port);
+
+ copyout_body:
+ /*
+ * Optimized version of ipc_kmsg_copyout_body,
+ * to handle the two ports in the body.
+ */
+
+ mr = (ipc_kmsg_copyout_object_to_port(space, (ipc_object_t) thread_port,
+ MACH_MSG_TYPE_PORT_SEND, &exc->thread) |
+ ipc_kmsg_copyout_object_to_port(space, (ipc_object_t) task_port,
+ MACH_MSG_TYPE_PORT_SEND, &exc->task));
+ if (mr != MACH_MSG_SUCCESS) {
+ (void) ipc_kmsg_put(receiver->ith_msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr | MACH_RCV_BODY_ERROR);
+ /*NOTREACHED*/
+ }
+ }
+
+ /*
+ * Optimized version of ipc_kmsg_put.
+ * We must check ikm_cache after copyoutmsg.
+ */
+
+ ikm_check_initialized(kmsg, kmsg->ikm_size);
+ assert(kmsg->ikm_size == IKM_SAVED_KMSG_SIZE);
+
+ if (copyoutmsg(&kmsg->ikm_header, receiver->ith_msg,
+ sizeof(struct mach_exception))) {
+ mr = ipc_kmsg_put(receiver->ith_msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ if (!ikm_cache_free_try(kmsg)) {
+ mr = ipc_kmsg_put(receiver->ith_msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ thread_syscall_return(MACH_MSG_SUCCESS);
+ /*NOTREACHED*/
+#ifndef __GNUC__
+ return; /* help for the compiler */
+#endif
+
+ slow_exception_raise: {
+ struct mach_exception *exc =
+ (struct mach_exception *) &kmsg->ikm_header;
+ ipc_kmsg_t reply_kmsg;
+ mach_port_seqno_t reply_seqno;
+
+ exception_raise_misses++;
+
+ /*
+ * We hold the following resources, which must be consumed:
+ * kmsg, send-once right and ref for reply_port
+ * send rights for dest_port, thread_port, task_port
+ * Synthesize a kmsg to send.
+ */
+
+ exc->Head.msgh_bits = (MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE) |
+ MACH_MSGH_BITS_COMPLEX);
+ exc->Head.msgh_size = sizeof *exc;
+ exc->Head.msgh_remote_port = (mach_port_t) dest_port;
+ exc->Head.msgh_local_port = (mach_port_t) reply_port;
+ exc->Head.msgh_seqno = 0;
+ exc->Head.msgh_id = MACH_EXCEPTION_ID;
+ exc->threadType = exc_port_proto;
+ exc->thread = (mach_port_t) thread_port;
+ exc->taskType = exc_port_proto;
+ exc->task = (mach_port_t) task_port;
+ exc->exceptionType = exc_code_proto;
+ exc->exception = _exception;
+ exc->codeType = exc_code_proto;
+ exc->code = code;
+ exc->subcodeType = exc_subcode_proto;
+ exc->subcode = subcode;
+
+ ipc_mqueue_send_always(kmsg);
+
+ /*
+ * We are left with a ref for reply_port,
+ * which we use to receive the reply message.
+ */
+
+ ip_lock(reply_port);
+ if (!ip_active(reply_port)) {
+ ip_unlock(reply_port);
+ exception_raise_continue_slow(MACH_RCV_PORT_DIED, IKM_NULL, /*dummy*/0);
+ /*NOTREACHED*/
+ }
+
+ imq_lock(reply_mqueue);
+ ip_unlock(reply_port);
+
+ mr = ipc_mqueue_receive(reply_mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, exception_raise_continue,
+ &reply_kmsg, &reply_seqno);
+ /* reply_mqueue is unlocked */
+
+ exception_raise_continue_slow(mr, reply_kmsg, reply_seqno);
+ /*NOTREACHED*/
+ }
+}
+
+/* Macro used by MIG to cleanly check the type. */
+#define BAD_TYPECHECK(type, check) unlikely (({\
+ union { mach_msg_type_t t; uint32_t w; } _t, _c;\
+ _t.t = *(type); _c.t = *(check);_t.w != _c.w; }))
+
+/* Type descriptor for the return code. */
+mach_msg_type_t exc_RetCode_proto = {
+ .msgt_name = MACH_MSG_TYPE_INTEGER_32,
+ .msgt_size = 32,
+ .msgt_number = 1,
+ .msgt_inline = TRUE,
+ .msgt_longform = FALSE,
+ .msgt_deallocate = FALSE,
+ .msgt_unused = 0
+};
+
+/*
+ * Routine: exception_parse_reply
+ * Purpose:
+ * Parse and consume an exception reply message.
+ * Conditions:
+ * The destination port right has already been consumed.
+ * The message buffer and anything else in it is consumed.
+ * Returns:
+ * The reply return code.
+ */
+
+kern_return_t
+exception_parse_reply(ipc_kmsg_t kmsg)
+{
+ mig_reply_header_t *msg =
+ (mig_reply_header_t *) &kmsg->ikm_header;
+ kern_return_t kr;
+
+ if ((msg->Head.msgh_bits !=
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0)) ||
+ (msg->Head.msgh_size != sizeof *msg) ||
+ (msg->Head.msgh_id != MACH_EXCEPTION_REPLY_ID) ||
+ (BAD_TYPECHECK(&msg->RetCodeType, &exc_RetCode_proto))) {
+ /*
+ * Bozo user sent us a misformatted reply.
+ */
+
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ ipc_kmsg_destroy(kmsg);
+ return MIG_REPLY_MISMATCH;
+ }
+
+ kr = msg->RetCode;
+
+ ikm_cache_free(kmsg);
+
+ return kr;
+}
+
+/*
+ * Routine: exception_raise_continue
+ * Purpose:
+ * Continue after blocking for an exception.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack,
+ * with the exception state saved in the thread. From here
+ * control goes back to user space.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception_raise_continue(void)
+{
+ ipc_thread_t self = current_thread();
+ ipc_port_t reply_port = self->ith_port;
+ ipc_mqueue_t reply_mqueue = &reply_port->ip_messages;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_receive(reply_mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ TRUE, exception_raise_continue,
+ &kmsg, &seqno);
+ /* reply_mqueue is unlocked */
+
+ exception_raise_continue_slow(mr, kmsg, seqno);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: thread_release_and_exception_return
+ * Purpose:
+ * Continue after thread was halted.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack and
+ * control goes back to thread_exception_return.
+ * Returns:
+ * Doesn't return.
+ */
+static void
+thread_release_and_exception_return(void)
+{
+ ipc_thread_t self = current_thread();
+ /* reply port must be released */
+ ipc_port_release(self->ith_port);
+ thread_exception_return();
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: exception_raise_continue_slow
+ * Purpose:
+ * Continue after finishing an ipc_mqueue_receive
+ * for an exception reply message.
+ * Conditions:
+ * Nothing locked. We hold a ref for reply_port.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception_raise_continue_slow(
+ mach_msg_return_t mr,
+ ipc_kmsg_t kmsg,
+ mach_port_seqno_t seqno)
+{
+ ipc_thread_t self = current_thread();
+ ipc_port_t reply_port = self->ith_port;
+ ipc_mqueue_t reply_mqueue = &reply_port->ip_messages;
+
+ while (mr == MACH_RCV_INTERRUPTED) {
+ /*
+ * Somebody is trying to force this thread
+ * to a clean point. We must cooperate
+ * and then resume the receive.
+ */
+
+ while (thread_should_halt(self)) {
+ /* if thread is about to terminate, release the port */
+ if (self->ast & AST_TERMINATE)
+ ipc_port_release(reply_port);
+ /*
+ * Use the continuation to release the port in
+ * case the thread is about to halt.
+ */
+ thread_halt_self(thread_release_and_exception_return);
+ }
+
+ ip_lock(reply_port);
+ if (!ip_active(reply_port)) {
+ ip_unlock(reply_port);
+ mr = MACH_RCV_PORT_DIED;
+ break;
+ }
+
+ imq_lock(reply_mqueue);
+ ip_unlock(reply_port);
+
+ mr = ipc_mqueue_receive(reply_mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, exception_raise_continue,
+ &kmsg, &seqno);
+ /* reply_mqueue is unlocked */
+ }
+ ipc_port_release(reply_port);
+
+ assert((mr == MACH_MSG_SUCCESS) ||
+ (mr == MACH_RCV_PORT_DIED));
+
+ if (mr == MACH_MSG_SUCCESS) {
+ /*
+ * Consume the reply message.
+ */
+
+ ipc_port_release_sonce(reply_port);
+ mr = exception_parse_reply(kmsg);
+ }
+
+ if ((mr == KERN_SUCCESS) ||
+ (mr == MACH_RCV_PORT_DIED)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+
+ if (self->ith_exc != KERN_SUCCESS) {
+ exception_try_task(self->ith_exc,
+ self->ith_exc_code,
+ self->ith_exc_subcode);
+ /*NOTREACHED*/
+ }
+
+ exception_no_server();
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: exception_raise_continue_fast
+ * Purpose:
+ * Special-purpose fast continuation for exceptions.
+ * Conditions:
+ * reply_port is locked and alive.
+ * kmsg is our reply message.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception_raise_continue_fast(
+ ipc_port_t reply_port,
+ ipc_kmsg_t kmsg)
+{
+ ipc_thread_t self = current_thread();
+ kern_return_t kr;
+
+ assert(ip_active(reply_port));
+ assert(reply_port == self->ith_port);
+ assert(reply_port == (ipc_port_t) kmsg->ikm_header.msgh_remote_port);
+ assert(MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+
+ /*
+ * Release the send-once right (from the message header)
+ * and the saved reference (from self->ith_port).
+ */
+
+ reply_port->ip_sorights--;
+ ip_release(reply_port);
+ ip_release(reply_port);
+ ip_unlock(reply_port);
+
+ /*
+ * Consume the reply message.
+ */
+
+ kr = exception_parse_reply(kmsg);
+ if (kr == KERN_SUCCESS) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+
+ if (self->ith_exc != KERN_SUCCESS) {
+ exception_try_task(self->ith_exc,
+ self->ith_exc_code,
+ self->ith_exc_subcode);
+ /*NOTREACHED*/
+ }
+
+ exception_no_server();
+ /*NOTREACHED*/
+}
diff --git a/kern/exception.h b/kern/exception.h
new file mode 100644
index 0000000..36138da
--- /dev/null
+++ b/kern/exception.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _KERN_EXCEPTION_H_
+#define _KERN_EXCEPTION_H_
+
+#include <ipc/ipc_types.h>
+#include <ipc/ipc_kmsg.h>
+
+extern void
+exception(
+ integer_t _exception,
+ integer_t code,
+ long_integer_t subcode) __attribute__ ((noreturn));
+
+extern void
+exception_try_task(
+ integer_t _exception,
+ integer_t code,
+ long_integer_t subcode) __attribute__ ((noreturn));
+
+extern void
+exception_no_server(void) __attribute__ ((noreturn));
+
+extern void
+exception_raise(
+ ipc_port_t dest_port,
+ ipc_port_t thread_port,
+ ipc_port_t task_port,
+ integer_t _exception,
+ integer_t code,
+ long_integer_t subcode) __attribute__ ((noreturn));
+
+extern kern_return_t
+exception_parse_reply(ipc_kmsg_t kmsg);
+
+extern void
+exception_raise_continue(void) __attribute__ ((noreturn));
+
+extern void
+exception_raise_continue_slow(
+ mach_msg_return_t mr,
+ ipc_kmsg_t kmsg,
+ mach_port_seqno_t seqno) __attribute__ ((noreturn));
+
+extern void
+exception_raise_continue_fast(
+ ipc_port_t reply_port,
+ ipc_kmsg_t kmsg) __attribute__ ((noreturn));
+
+#endif /* _KERN_EXCEPTION_H_ */
diff --git a/kern/experimental.srv b/kern/experimental.srv
new file mode 100644
index 0000000..2ccfd78
--- /dev/null
+++ b/kern/experimental.srv
@@ -0,0 +1,3 @@
+#define KERNEL_SERVER 1
+
+#include <mach/experimental.defs>
diff --git a/kern/gnumach.srv b/kern/gnumach.srv
new file mode 100644
index 0000000..38bc909
--- /dev/null
+++ b/kern/gnumach.srv
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach/gnumach.defs>
diff --git a/kern/gsync.c b/kern/gsync.c
new file mode 100644
index 0000000..e73a6cf
--- /dev/null
+++ b/kern/gsync.c
@@ -0,0 +1,517 @@
+/* Copyright (C) 2016 Free Software Foundation, Inc.
+ Contributed by Agustina Arzille <avarzille@riseup.net>, 2016.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either
+ version 2 of the license, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <kern/gsync.h>
+#include <kern/kmutex.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+#include <kern/list.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+
+/* An entry in the global hash table. */
+struct gsync_hbucket
+{
+ struct list entries;
+ struct kmutex lock;
+};
+
+/* A key used to uniquely identify an address that a thread is
+ * waiting on. Its members' values depend on whether said
+ * address is shared or task-local. Note that different types of keys
+ * should never compare equal, since a task map should never have
+ * the same address as a VM object. */
+union gsync_key
+{
+ struct
+ {
+ vm_map_t map;
+ vm_offset_t addr;
+ } local;
+
+ struct
+ {
+ vm_object_t obj;
+ vm_offset_t off;
+ } shared;
+
+ struct
+ {
+ unsigned long u;
+ unsigned long v;
+ } any;
+};
+
+/* A thread that is blocked on an address with 'gsync_wait'. */
+struct gsync_waiter
+{
+ struct list link;
+ union gsync_key key;
+ thread_t waiter;
+};
+
+/* Needed data for temporary mappings. */
+struct vm_args
+{
+ vm_object_t obj;
+ vm_offset_t off;
+};
+
+#define GSYNC_NBUCKETS 512
+static struct gsync_hbucket gsync_buckets[GSYNC_NBUCKETS];
+
+void gsync_setup (void)
+{
+ int i;
+ for (i = 0; i < GSYNC_NBUCKETS; ++i)
+ {
+ list_init (&gsync_buckets[i].entries);
+ kmutex_init (&gsync_buckets[i].lock);
+ }
+}
+
+/* Convenience comparison functions for gsync_key's. */
+
+static inline int
+gsync_key_eq (const union gsync_key *lp,
+ const union gsync_key *rp)
+{
+ return (lp->any.u == rp->any.u && lp->any.v == rp->any.v);
+}
+
+static inline int
+gsync_key_lt (const union gsync_key *lp,
+ const union gsync_key *rp)
+{
+ return (lp->any.u < rp->any.u ||
+ (lp->any.u == rp->any.u && lp->any.v < rp->any.v));
+}
+
+#define MIX2_LL(x, y) ((((x) << 5) | ((x) >> 27)) ^ (y))
+
+static inline unsigned int
+gsync_key_hash (const union gsync_key *keyp)
+{
+ unsigned int ret = sizeof (void *);
+#ifndef __LP64__
+ ret = MIX2_LL (ret, keyp->any.u);
+ ret = MIX2_LL (ret, keyp->any.v);
+#else
+ ret = MIX2_LL (ret, keyp->any.u & ~0U);
+ ret = MIX2_LL (ret, keyp->any.u >> 32);
+ ret = MIX2_LL (ret, keyp->any.v & ~0U);
+ ret = MIX2_LL (ret, keyp->any.v >> 32);
+#endif
+ return (ret);
+}
+
+/* Perform a VM lookup for the address in the map. The FLAGS
+ * parameter is used to specify some attributes for the address,
+ * such as protection. Place the corresponding VM object/offset pair
+ * in VAP. Returns 0 if successful, -1 otherwise. */
+static int
+probe_address (vm_map_t map, vm_offset_t addr,
+ int flags, struct vm_args *vap)
+{
+ vm_prot_t prot = VM_PROT_READ |
+ ((flags & GSYNC_MUTATE) ? VM_PROT_WRITE : 0);
+ vm_map_version_t ver;
+ vm_prot_t rprot;
+ boolean_t wired_p;
+
+ if (vm_map_lookup (&map, addr, prot, &ver,
+ &vap->obj, &vap->off, &rprot, &wired_p) != KERN_SUCCESS)
+ return (-1);
+ else if ((rprot & prot) != prot)
+ {
+ vm_object_unlock (vap->obj);
+ return (-1);
+ }
+
+ return (0);
+}
+
+/* Initialize the key with its needed members, depending on whether the
+ * address is local or shared. Also stores the VM object and offset inside
+ * the argument VAP for future use. */
+static int
+gsync_prepare_key (task_t task, vm_offset_t addr, int flags,
+ union gsync_key *keyp, struct vm_args *vap)
+{
+ if (probe_address (task->map, addr, flags, vap) < 0)
+ return (-1);
+ else if (flags & GSYNC_SHARED)
+ {
+ /* For a shared address, we need the VM object
+ * and offset as the keys. */
+ keyp->shared.obj = vap->obj;
+ keyp->shared.off = vap->off;
+ }
+ else
+ {
+ /* Task-local address. The keys are the task's map and
+ * the virtual address itself. */
+ keyp->local.map = task->map;
+ keyp->local.addr = addr;
+ }
+
+ return ((int)(gsync_key_hash (keyp) % GSYNC_NBUCKETS));
+}
+
+static inline struct gsync_waiter*
+node_to_waiter (struct list *nodep)
+{
+ return (list_entry (nodep, struct gsync_waiter, link));
+}
+
+static inline struct list*
+gsync_find_key (const struct list *entries,
+ const union gsync_key *keyp, int *exactp)
+{
+ /* Look for a key that matches. We take advantage of the fact
+ * that the entries are sorted to break out of the loop as
+ * early as possible. */
+ struct list *runp;
+ list_for_each (entries, runp)
+ {
+ struct gsync_waiter *p = node_to_waiter (runp);
+ if (gsync_key_lt (keyp, &p->key))
+ break;
+ else if (gsync_key_eq (keyp, &p->key))
+ {
+ if (exactp != 0)
+ *exactp = 1;
+ break;
+ }
+ }
+
+ return (runp);
+}
+
+/* Create a temporary mapping in the kernel.*/
+static inline vm_offset_t
+temp_mapping (struct vm_args *vap, vm_offset_t addr, vm_prot_t prot)
+{
+ vm_offset_t paddr = VM_MIN_KERNEL_ADDRESS;
+ /* Adjust the offset for addresses that aren't page-aligned. */
+ vm_offset_t off = vap->off - (addr - trunc_page (addr));
+
+ if (vm_map_enter (kernel_map, &paddr, PAGE_SIZE,
+ 0, TRUE, vap->obj, off, FALSE, prot, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT) != KERN_SUCCESS)
+ paddr = 0;
+
+ return (paddr);
+}
+
+kern_return_t gsync_wait (task_t task, vm_offset_t addr,
+ unsigned int lo, unsigned int hi, natural_t msec, int flags)
+{
+ if (task == 0)
+ return (KERN_INVALID_TASK);
+ else if (addr % sizeof (int) != 0)
+ return (KERN_INVALID_ADDRESS);
+
+ vm_map_lock_read (task->map);
+
+ struct gsync_waiter w;
+ struct vm_args va;
+ boolean_t remote = task != current_task ();
+ int bucket = gsync_prepare_key (task, addr, flags, &w.key, &va);
+
+ if (bucket < 0)
+ {
+ vm_map_unlock_read (task->map);
+ return (KERN_INVALID_ADDRESS);
+ }
+ else if (remote)
+ /* The VM object is returned locked. However, we are about to acquire
+ * a sleeping lock for a bucket, so we must not hold any simple
+ * locks. To prevent this object from going away, we add a reference
+ * to it when requested. */
+ vm_object_reference_locked (va.obj);
+
+ /* We no longer need the lock on the VM object. */
+ vm_object_unlock (va.obj);
+
+ struct gsync_hbucket *hbp = gsync_buckets + bucket;
+ kmutex_lock (&hbp->lock, FALSE);
+
+ /* Before doing any work, check that the expected value(s)
+ * match the contents of the address. Otherwise, the waiting
+ * thread could potentially miss a wakeup. */
+
+ boolean_t equal;
+ if (! remote)
+ equal = ((unsigned int *)addr)[0] == lo &&
+ ((flags & GSYNC_QUAD) == 0 ||
+ ((unsigned int *)addr)[1] == hi);
+ else
+ {
+ vm_offset_t paddr = temp_mapping (&va, addr, VM_PROT_READ);
+ if (unlikely (paddr == 0))
+ {
+ kmutex_unlock (&hbp->lock);
+ vm_map_unlock_read (task->map);
+ /* Make sure to remove the reference we added. */
+ vm_object_deallocate (va.obj);
+ return (KERN_MEMORY_FAILURE);
+ }
+
+ vm_offset_t off = addr & (PAGE_SIZE - 1);
+ paddr += off;
+
+ equal = ((unsigned int *)paddr)[0] == lo &&
+ ((flags & GSYNC_QUAD) == 0 ||
+ ((unsigned int *)paddr)[1] == hi);
+
+ paddr -= off;
+
+ /* Note that the call to 'vm_map_remove' will unreference
+ * the VM object, so we don't have to do it ourselves. */
+ vm_map_remove (kernel_map, paddr, paddr + PAGE_SIZE);
+ }
+
+ /* Done with the task's map. */
+ vm_map_unlock_read (task->map);
+
+ if (! equal)
+ {
+ kmutex_unlock (&hbp->lock);
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+ /* Look for the first entry in the hash bucket that
+ * compares strictly greater than this waiter. */
+ struct list *runp;
+ list_for_each (&hbp->entries, runp)
+ if (gsync_key_lt (&w.key, &node_to_waiter(runp)->key))
+ break;
+
+ /* Finally, add ourselves to the list and go to sleep. */
+ list_add (runp->prev, runp, &w.link);
+ w.waiter = current_thread ();
+
+ if (flags & GSYNC_TIMED)
+ thread_will_wait_with_timeout (w.waiter, msec);
+ else
+ thread_will_wait (w.waiter);
+
+ kmutex_unlock (&hbp->lock);
+ thread_block (thread_no_continuation);
+
+ /* We're back. */
+ kern_return_t ret = KERN_SUCCESS;
+ if (current_thread()->wait_result != THREAD_AWAKENED)
+ {
+ /* We were interrupted or timed out. */
+ kmutex_lock (&hbp->lock, FALSE);
+ if (!list_node_unlinked (&w.link))
+ list_remove (&w.link);
+ kmutex_unlock (&hbp->lock);
+
+ /* Map the error code. */
+ ret = current_thread()->wait_result == THREAD_INTERRUPTED ?
+ KERN_INTERRUPTED : KERN_TIMEDOUT;
+ }
+
+ return (ret);
+}
+
+/* Remove a waiter from the queue, wake it up, and
+ * return the next node. */
+static inline struct list*
+dequeue_waiter (struct list *nodep)
+{
+ struct list *nextp = list_next (nodep);
+ list_remove (nodep);
+ list_node_init (nodep);
+ clear_wait (node_to_waiter(nodep)->waiter,
+ THREAD_AWAKENED, FALSE);
+ return (nextp);
+}
+
+kern_return_t gsync_wake (task_t task,
+ vm_offset_t addr, unsigned int val, int flags)
+{
+ if (task == 0)
+ return (KERN_INVALID_TASK);
+ else if (addr % sizeof (int) != 0)
+ return (KERN_INVALID_ADDRESS);
+
+ vm_map_lock_read (task->map);
+
+ union gsync_key key;
+ struct vm_args va;
+ int bucket = gsync_prepare_key (task, addr, flags, &key, &va);
+
+ if (bucket < 0)
+ {
+ vm_map_unlock_read (task->map);
+ return (KERN_INVALID_ADDRESS);
+ }
+ else if (current_task () != task && (flags & GSYNC_MUTATE) != 0)
+ /* See above on why we do this. */
+ vm_object_reference_locked (va.obj);
+
+ /* Done with the VM object lock. */
+ vm_object_unlock (va.obj);
+
+ kern_return_t ret = KERN_INVALID_ARGUMENT;
+ struct gsync_hbucket *hbp = gsync_buckets + bucket;
+
+ kmutex_lock (&hbp->lock, FALSE);
+
+ if (flags & GSYNC_MUTATE)
+ {
+ /* Set the contents of the address to the specified value,
+ * even if we don't end up waking any threads. Note that
+ * the buckets' simple locks give us atomicity. */
+
+ if (task != current_task ())
+ {
+ vm_offset_t paddr = temp_mapping (&va, addr,
+ VM_PROT_READ | VM_PROT_WRITE);
+
+ if (paddr == 0)
+ {
+ kmutex_unlock (&hbp->lock);
+ vm_map_unlock_read (task->map);
+ vm_object_deallocate (va.obj);
+ return (KERN_MEMORY_FAILURE);
+ }
+
+ addr = paddr + (addr & (PAGE_SIZE - 1));
+ }
+
+ *(unsigned int *)addr = val;
+ if (task != current_task ())
+ vm_map_remove (kernel_map, addr, addr + sizeof (int));
+ }
+
+ vm_map_unlock_read (task->map);
+
+ int found = 0;
+ struct list *runp = gsync_find_key (&hbp->entries, &key, &found);
+ if (found)
+ {
+ do
+ runp = dequeue_waiter (runp);
+ while ((flags & GSYNC_BROADCAST) &&
+ !list_end (&hbp->entries, runp) &&
+ gsync_key_eq (&node_to_waiter(runp)->key, &key));
+
+ ret = KERN_SUCCESS;
+ }
+
+ kmutex_unlock (&hbp->lock);
+ return (ret);
+}
+
+kern_return_t gsync_requeue (task_t task, vm_offset_t src,
+ vm_offset_t dst, boolean_t wake_one, int flags)
+{
+ if (task == 0)
+ return (KERN_INVALID_TASK);
+ else if (src % sizeof (int) != 0 || dst % sizeof (int) != 0)
+ return (KERN_INVALID_ADDRESS);
+
+ union gsync_key src_k, dst_k;
+ struct vm_args va;
+
+ int src_bkt = gsync_prepare_key (task, src, flags, &src_k, &va);
+ if (src_bkt < 0)
+ return (KERN_INVALID_ADDRESS);
+
+ /* Unlock the VM object before the second lookup. */
+ vm_object_unlock (va.obj);
+
+ int dst_bkt = gsync_prepare_key (task, dst, flags, &dst_k, &va);
+ if (dst_bkt < 0)
+ return (KERN_INVALID_ADDRESS);
+
+ /* We never create any temporary mappings in 'requeue', so we
+ * can unlock the VM object right now. */
+ vm_object_unlock (va.obj);
+
+ /* If we're asked to unconditionally wake up a waiter, then
+ * we need to remove a maximum of two threads from the queue. */
+ unsigned int nw = 1 + wake_one;
+ struct gsync_hbucket *bp1 = gsync_buckets + src_bkt;
+ struct gsync_hbucket *bp2 = gsync_buckets + dst_bkt;
+
+ /* Acquire the locks in order, to prevent any potential deadlock. */
+ if (bp1 == bp2)
+ kmutex_lock (&bp1->lock, FALSE);
+ else if ((unsigned long)bp1 < (unsigned long)bp2)
+ {
+ kmutex_lock (&bp1->lock, FALSE);
+ kmutex_lock (&bp2->lock, FALSE);
+ }
+ else
+ {
+ kmutex_lock (&bp2->lock, FALSE);
+ kmutex_lock (&bp1->lock, FALSE);
+ }
+
+ kern_return_t ret = KERN_SUCCESS;
+ int exact;
+ struct list *inp = gsync_find_key (&bp1->entries, &src_k, &exact);
+
+ if (! exact)
+ /* There are no waiters in the source queue. */
+ ret = KERN_INVALID_ARGUMENT;
+ else
+ {
+ struct list *outp = gsync_find_key (&bp2->entries, &dst_k, 0);
+
+ /* We're going to need a node that points one past the
+ * end of the waiters in the source queue. */
+ struct list *endp = inp;
+
+ do
+ {
+ /* Modify the keys while iterating. */
+ node_to_waiter(endp)->key = dst_k;
+ endp = list_next (endp);
+ }
+ while (((flags & GSYNC_BROADCAST) || --nw != 0) &&
+ !list_end (&bp1->entries, endp) &&
+ gsync_key_eq (&node_to_waiter(endp)->key, &src_k));
+
+ /* Splice the list by removing waiters from the source queue
+ * and inserting them into the destination queue. */
+ inp->prev->next = endp;
+ endp->prev->next = outp->next;
+ endp->prev = inp->prev;
+
+ outp->next = inp;
+ inp->prev = outp;
+
+ if (wake_one)
+ (void)dequeue_waiter (inp);
+ }
+
+ /* Release the locks and we're done.*/
+ kmutex_unlock (&bp1->lock);
+ if (bp1 != bp2)
+ kmutex_unlock (&bp2->lock);
+
+ return (ret);
+}
+
diff --git a/kern/gsync.h b/kern/gsync.h
new file mode 100644
index 0000000..8f69be3
--- /dev/null
+++ b/kern/gsync.h
@@ -0,0 +1,41 @@
+/* Copyright (C) 2016 Free Software Foundation, Inc.
+ Contributed by Agustina Arzille <avarzille@riseup.net>, 2016.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either
+ version 2 of the license, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _KERN_GSYNC_H_
+#define _KERN_GSYNC_H_ 1
+
+#define GSYNC_SHARED 0x01
+#define GSYNC_QUAD 0x02
+#define GSYNC_TIMED 0x04
+#define GSYNC_BROADCAST 0x08
+#define GSYNC_MUTATE 0x10
+
+#include <mach/mach_types.h>
+
+void gsync_setup (void);
+
+kern_return_t gsync_wait (task_t task, vm_offset_t addr,
+ unsigned int lo, unsigned int hi, natural_t msec, int flags);
+
+kern_return_t gsync_wake (task_t task,
+ vm_offset_t addr, unsigned int val, int flags);
+
+kern_return_t gsync_requeue (task_t task, vm_offset_t src_addr,
+ vm_offset_t dst_addr, boolean_t wake_one, int flags);
+
+#endif
diff --git a/kern/host.c b/kern/host.c
new file mode 100644
index 0000000..6939437
--- /dev/null
+++ b/kern/host.c
@@ -0,0 +1,389 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * host.c
+ *
+ * Non-ipc host functions.
+ */
+
+#include <string.h>
+
+#include <kern/assert.h>
+#include <kern/debug.h>
+#include <kern/kalloc.h>
+#include <kern/host.h>
+#include <mach/host_info.h>
+#include <mach/kern_return.h>
+#include <mach/machine.h>
+#include <mach/port.h>
+#include <kern/processor.h>
+#include <kern/ipc_host.h>
+#include <kern/mach_clock.h>
+#include <kern/mach_host.server.h>
+#include <mach/vm_param.h>
+
+host_data_t realhost;
+
+kern_return_t host_processors(
+ const host_t host,
+ processor_array_t *processor_list,
+ natural_t *countp)
+{
+ unsigned i;
+ processor_t *tp;
+ vm_offset_t addr;
+ unsigned int count;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Determine how many processors we have.
+ * (This number shouldn't change.)
+ */
+
+ count = 0;
+ for (i = 0; i < NCPUS; i++)
+ if (machine_slot[i].is_cpu)
+ count++;
+
+ if (count == 0)
+ panic("host_processors");
+
+ addr = kalloc((vm_size_t) (count * sizeof(mach_port_t)));
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+
+ tp = (processor_t *) addr;
+ for (i = 0; i < NCPUS; i++)
+ if (machine_slot[i].is_cpu)
+ *tp++ = cpu_to_processor(i);
+
+ *countp = count;
+ *processor_list = (mach_port_t *) addr;
+
+ /* do the conversion that Mig should handle */
+
+ tp = (processor_t *) addr;
+ for (i = 0; i < count; i++)
+ ((mach_port_t *) tp)[i] =
+ (mach_port_t)convert_processor_to_port(tp[i]);
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t host_info(
+ const host_t host,
+ int flavor,
+ host_info_t info,
+ natural_t *count)
+{
+ integer_t i, *slot_ptr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch(flavor) {
+
+ case HOST_BASIC_INFO:
+ {
+ host_basic_info_t basic_info;
+
+ /*
+ * Basic information about this host.
+ */
+ if (*count < HOST_BASIC_INFO_COUNT)
+ return KERN_FAILURE;
+
+ basic_info = (host_basic_info_t) info;
+
+ basic_info->max_cpus = machine_info.max_cpus;
+ basic_info->avail_cpus = machine_info.avail_cpus;
+ basic_info->memory_size = machine_info.memory_size;
+ basic_info->cpu_type =
+ machine_slot[master_processor->slot_num].cpu_type;
+ basic_info->cpu_subtype =
+ machine_slot[master_processor->slot_num].cpu_subtype;
+
+ *count = HOST_BASIC_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+
+ case HOST_PROCESSOR_SLOTS:
+ /*
+ * Return numbers of slots with active processors
+ * in them.
+ */
+ if (*count < NCPUS)
+ return KERN_INVALID_ARGUMENT;
+
+ slot_ptr = (integer_t *)info;
+ *count = 0;
+ for (i = 0; i < NCPUS; i++) {
+ if (machine_slot[i].is_cpu &&
+ machine_slot[i].running) {
+ *slot_ptr++ = i;
+ (*count)++;
+ }
+ }
+ return KERN_SUCCESS;
+
+ case HOST_SCHED_INFO:
+ {
+ host_sched_info_t sched_info;
+ extern int min_quantum;
+ /* minimum quantum, in ticks */
+
+ /*
+ * Return scheduler information.
+ */
+ if (*count < HOST_SCHED_INFO_COUNT)
+ return(KERN_FAILURE);
+
+ sched_info = (host_sched_info_t) info;
+
+ sched_info->min_timeout = tick / 1000;
+ /* convert microseconds to milliseconds */
+ sched_info->min_quantum = min_quantum * tick / 1000;
+ /* convert ticks to milliseconds */
+
+ *count = HOST_SCHED_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+
+ case HOST_LOAD_INFO:
+ {
+ host_load_info_t load_info;
+ extern long avenrun[3], mach_factor[3];
+
+ if (*count < HOST_LOAD_INFO_COUNT)
+ return KERN_FAILURE;
+
+ load_info = (host_load_info_t) info;
+
+ memcpy(load_info->avenrun,
+ avenrun,
+ sizeof avenrun);
+ memcpy(load_info->mach_factor,
+ mach_factor,
+ sizeof mach_factor);
+
+ *count = HOST_LOAD_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+}
+
+/*
+ * Return kernel version string (more than you ever
+ * wanted to know about what version of the kernel this is).
+ */
+
+kern_return_t host_get_kernel_version(
+ const host_t host,
+ kernel_version_t out_version)
+{
+ extern char version[];
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ (void) strncpy(out_version, version, sizeof(kernel_version_t));
+
+ return KERN_SUCCESS;
+}
+
+#if !defined(__x86_64__) || defined(USER32)
+/* Same as above, but does not exist for x86_64. */
+kern_return_t host_kernel_version(
+ const host_t host,
+ kernel_version_t out_version)
+{
+ return host_get_kernel_version(host, out_version);
+}
+#endif
+
+/*
+ * host_processor_sets:
+ *
+ * List all processor sets on the host.
+ */
+#if MACH_HOST
+kern_return_t
+host_processor_sets(
+ const host_t host,
+ processor_set_name_array_t *pset_list,
+ natural_t *count)
+{
+ unsigned int actual; /* this many psets */
+ processor_set_t pset;
+ processor_set_t *psets;
+ int i;
+
+ vm_size_t size;
+ vm_size_t size_needed;
+ vm_offset_t addr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ size = 0; addr = 0;
+
+ for (;;) {
+ simple_lock(&all_psets_lock);
+ actual = all_psets_count;
+
+ /* do we have the memory we need? */
+
+ size_needed = actual * sizeof(mach_port_t);
+ if (size_needed <= size)
+ break;
+
+ /* unlock and allocate more memory */
+ simple_unlock(&all_psets_lock);
+
+ if (size != 0)
+ kfree(addr, size);
+
+ assert(size_needed > 0);
+ size = size_needed;
+
+ addr = kalloc(size);
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* OK, have memory and the all_psets_lock */
+
+ psets = (processor_set_t *) addr;
+
+ for (i = 0, pset = (processor_set_t) queue_first(&all_psets);
+ i < actual;
+ i++, pset = (processor_set_t) queue_next(&pset->all_psets)) {
+ /* take ref for convert_pset_name_to_port */
+ pset_reference(pset);
+ psets[i] = pset;
+ }
+ assert(queue_end(&all_psets, (queue_entry_t) pset));
+
+ /* can unlock now that we've got the pset refs */
+ simple_unlock(&all_psets_lock);
+
+ /*
+ * Always have default port.
+ */
+
+ assert(actual > 0);
+
+ /* if we allocated too much, must copy */
+
+ if (size_needed < size) {
+ vm_offset_t newaddr;
+
+ newaddr = kalloc(size_needed);
+ if (newaddr == 0) {
+ for (i = 0; i < actual; i++)
+ pset_deallocate(psets[i]);
+ kfree(addr, size);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ memcpy((void *) newaddr, (void *) addr, size_needed);
+ kfree(addr, size);
+ psets = (processor_set_t *) newaddr;
+ }
+
+ *pset_list = (mach_port_t *) psets;
+ *count = actual;
+
+ /* do the conversion that Mig should handle */
+
+ for (i = 0; i < actual; i++)
+ ((mach_port_t *) psets)[i] =
+ (mach_port_t)convert_pset_name_to_port(psets[i]);
+
+ return KERN_SUCCESS;
+}
+#else /* MACH_HOST */
+/*
+ * Only one processor set, the default processor set, in this case.
+ */
+kern_return_t
+host_processor_sets(
+ const host_t host,
+ processor_set_name_array_t *pset_list,
+ natural_t *count)
+{
+ vm_offset_t addr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Allocate memory. Can be pageable because it won't be
+ * touched while holding a lock.
+ */
+
+ addr = kalloc((vm_size_t) sizeof(mach_port_t));
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /* take for for convert_pset_name_to_port */
+ pset_reference(&default_pset);
+ /* do the conversion that Mig should handle */
+ *((mach_port_t *) addr) =
+ (mach_port_t) convert_pset_name_to_port(&default_pset);
+
+ *pset_list = (mach_port_t *) addr;
+ *count = 1;
+
+ return KERN_SUCCESS;
+}
+#endif /* MACH_HOST */
+
+/*
+ * host_processor_set_priv:
+ *
+ * Return control port for given processor set.
+ */
+kern_return_t
+host_processor_set_priv(
+ const host_t host,
+ processor_set_t pset_name,
+ processor_set_t *pset)
+{
+ if ((host == HOST_NULL) || (pset_name == PROCESSOR_SET_NULL)) {
+ *pset = PROCESSOR_SET_NULL;
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *pset = pset_name;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
diff --git a/kern/host.h b/kern/host.h
new file mode 100644
index 0000000..5771da1
--- /dev/null
+++ b/kern/host.h
@@ -0,0 +1,48 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * kern/host.h
+ *
+ * Definitions for host data structures.
+ *
+ */
+
+#ifndef _KERN_HOST_H_
+#define _KERN_HOST_H_
+
+struct host {
+ struct ipc_port *host_self;
+ struct ipc_port *host_priv_self;
+};
+
+typedef struct host *host_t;
+typedef struct host host_data_t;
+
+#define HOST_NULL ((host_t)0)
+
+extern host_data_t realhost;
+
+#endif /* _KERN_HOST_H_ */
diff --git a/kern/ipc_host.c b/kern/ipc_host.c
new file mode 100644
index 0000000..6b81862
--- /dev/null
+++ b/kern/ipc_host.c
@@ -0,0 +1,451 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * kern/ipc_host.c
+ *
+ * Routines to implement host ports.
+ */
+
+#include <mach/message.h>
+#include <kern/debug.h>
+#include <kern/host.h>
+#include <kern/mach_host.server.h>
+#include <kern/processor.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/ipc_host.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <mach/mach_traps.h>
+
+#include <machine/machspl.h> /* for spl */
+
+
+
+/*
+ * ipc_host_init: set up various things.
+ */
+
+void ipc_host_init(void)
+{
+ ipc_port_t port;
+ /*
+ * Allocate and set up the two host ports.
+ */
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("ipc_host_init");
+
+ ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST);
+ realhost.host_self = port;
+
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("ipc_host_init");
+
+ ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV);
+ realhost.host_priv_self = port;
+
+ /*
+ * Set up ipc for default processor set.
+ */
+ ipc_pset_init(&default_pset);
+ ipc_pset_enable(&default_pset);
+
+ /*
+ * And for master processor
+ */
+ ipc_processor_init(master_processor);
+}
+
+/*
+ * Routine: mach_host_self [mach trap]
+ * Purpose:
+ * Give the caller send rights for his own host port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+mach_port_name_t
+mach_host_self(void)
+{
+ ipc_port_t sright;
+
+ sright = ipc_port_make_send(realhost.host_self);
+ return ipc_port_copyout_send(sright, current_space());
+}
+
+/*
+ * ipc_processor_init:
+ *
+ * Initialize ipc access to processor by allocating port.
+ * Enable ipc control of processor by setting port object.
+ */
+
+void
+ipc_processor_init(
+ processor_t processor)
+{
+ ipc_port_t port;
+
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("ipc_processor_init");
+ processor->processor_self = port;
+ ipc_kobject_set(port, (ipc_kobject_t) processor, IKOT_PROCESSOR);
+}
+
+
+/*
+ * ipc_pset_init:
+ *
+ * Initialize ipc control of a processor set by allocating its ports.
+ */
+
+void
+ipc_pset_init(
+ processor_set_t pset)
+{
+ ipc_port_t port;
+
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("ipc_pset_init");
+ pset->pset_self = port;
+
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("ipc_pset_init");
+ pset->pset_name_self = port;
+}
+
+/*
+ * ipc_pset_enable:
+ *
+ * Enable ipc access to a processor set.
+ */
+void
+ipc_pset_enable(
+ processor_set_t pset)
+{
+ pset_lock(pset);
+ if (pset->active) {
+ ipc_kobject_set(pset->pset_self,
+ (ipc_kobject_t) pset, IKOT_PSET);
+ ipc_kobject_set(pset->pset_name_self,
+ (ipc_kobject_t) pset, IKOT_PSET_NAME);
+ pset_ref_lock(pset);
+ pset->ref_count += 2;
+ pset_ref_unlock(pset);
+ }
+ pset_unlock(pset);
+}
+
+/*
+ * ipc_pset_disable:
+ *
+ * Disable ipc access to a processor set by clearing the port objects.
+ * Caller must hold pset lock and a reference to the pset. Ok to
+ * just decrement pset reference count as a result.
+ */
+void
+ipc_pset_disable(
+ processor_set_t pset)
+{
+ ipc_kobject_set(pset->pset_self, IKO_NULL, IKOT_NONE);
+ ipc_kobject_set(pset->pset_name_self, IKO_NULL, IKOT_NONE);
+ pset->ref_count -= 2;
+}
+
+/*
+ * ipc_pset_terminate:
+ *
+ * Processor set is dead. Deallocate the ipc control structures.
+ */
+void
+ipc_pset_terminate(
+ processor_set_t pset)
+{
+ ipc_port_dealloc_kernel(pset->pset_self);
+ ipc_port_dealloc_kernel(pset->pset_name_self);
+}
+
+/*
+ * processor_set_default:
+ *
+ * Return ports for manipulating default_processor set.
+ */
+kern_return_t
+processor_set_default(
+ const host_t host,
+ processor_set_t *pset)
+{
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ *pset = &default_pset;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: convert_port_to_host
+ * Purpose:
+ * Convert from a port to a host.
+ * Doesn't consume the port ref; the host produced may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+host_t
+convert_port_to_host(
+ ipc_port_t port)
+{
+ host_t host = HOST_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ ((ip_kotype(port) == IKOT_HOST) ||
+ (ip_kotype(port) == IKOT_HOST_PRIV)))
+ host = (host_t) port->ip_kobject;
+ ip_unlock(port);
+ }
+
+ return host;
+}
+
+/*
+ * Routine: convert_port_to_host_priv
+ * Purpose:
+ * Convert from a port to a host.
+ * Doesn't consume the port ref; the host produced may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+host_t
+convert_port_to_host_priv(
+ ipc_port_t port)
+{
+ host_t host = HOST_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_HOST_PRIV))
+ host = (host_t) port->ip_kobject;
+ ip_unlock(port);
+ }
+
+ return host;
+}
+
+/*
+ * Routine: convert_port_to_processor
+ * Purpose:
+ * Convert from a port to a processor.
+ * Doesn't consume the port ref;
+ * the processor produced may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+processor_t
+convert_port_to_processor(
+ ipc_port_t port)
+{
+ processor_t processor = PROCESSOR_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_PROCESSOR))
+ processor = (processor_t) port->ip_kobject;
+ ip_unlock(port);
+ }
+
+ return processor;
+}
+
+/*
+ * Routine: convert_port_to_pset
+ * Purpose:
+ * Convert from a port to a pset.
+ * Doesn't consume the port ref; produces a pset ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+processor_set_t
+convert_port_to_pset(
+ ipc_port_t port)
+{
+ processor_set_t pset = PROCESSOR_SET_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_PSET)) {
+ pset = (processor_set_t) port->ip_kobject;
+ pset_reference(pset);
+ }
+ ip_unlock(port);
+ }
+
+ return pset;
+}
+
+/*
+ * Routine: convert_port_to_pset_name
+ * Purpose:
+ * Convert from a port to a pset.
+ * Doesn't consume the port ref; produces a pset ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+processor_set_t
+convert_port_to_pset_name(
+ ipc_port_t port)
+{
+ processor_set_t pset = PROCESSOR_SET_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ ((ip_kotype(port) == IKOT_PSET) ||
+ (ip_kotype(port) == IKOT_PSET_NAME))) {
+ pset = (processor_set_t) port->ip_kobject;
+ pset_reference(pset);
+ }
+ ip_unlock(port);
+ }
+
+ return pset;
+}
+
+/*
+ * Routine: convert_host_to_port
+ * Purpose:
+ * Convert from a host to a port.
+ * Produces a naked send right which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_host_to_port(
+ host_t host)
+{
+ ipc_port_t port;
+
+ port = ipc_port_make_send(host->host_self);
+
+ return port;
+}
+
+/*
+ * Routine: convert_processor_to_port
+ * Purpose:
+ * Convert from a processor to a port.
+ * Produces a naked send right which is always valid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_processor_to_port(processor_t processor)
+{
+ ipc_port_t port;
+
+ port = ipc_port_make_send(processor->processor_self);
+
+ return port;
+}
+
+/*
+ * Routine: convert_pset_to_port
+ * Purpose:
+ * Convert from a pset to a port.
+ * Consumes a pset ref; produces a naked send right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_pset_to_port(
+ processor_set_t pset)
+{
+ ipc_port_t port;
+
+ pset_lock(pset);
+ if (pset->active)
+ port = ipc_port_make_send(pset->pset_self);
+ else
+ port = IP_NULL;
+ pset_unlock(pset);
+
+ pset_deallocate(pset);
+ return port;
+}
+
+/*
+ * Routine: convert_pset_name_to_port
+ * Purpose:
+ * Convert from a pset to a port.
+ * Consumes a pset ref; produces a naked send right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_pset_name_to_port(
+ processor_set_t pset)
+{
+ ipc_port_t port;
+
+ pset_lock(pset);
+ if (pset->active)
+ port = ipc_port_make_send(pset->pset_name_self);
+ else
+ port = IP_NULL;
+ pset_unlock(pset);
+
+ pset_deallocate(pset);
+ return port;
+}
diff --git a/kern/ipc_host.h b/kern/ipc_host.h
new file mode 100644
index 0000000..cd2ffaa
--- /dev/null
+++ b/kern/ipc_host.h
@@ -0,0 +1,72 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_IPC_HOST_H_
+#define _KERN_IPC_HOST_H_
+
+#include <mach/port.h>
+#include <kern/processor.h>
+
+extern void ipc_host_init(void);
+
+extern void ipc_processor_init(processor_t);
+
+extern void ipc_pset_init(processor_set_t);
+extern void ipc_pset_enable(processor_set_t);
+extern void ipc_pset_disable(processor_set_t);
+extern void ipc_pset_terminate(processor_set_t);
+
+extern struct host *
+convert_port_to_host(struct ipc_port *);
+
+extern struct ipc_port *
+convert_host_to_port(struct host *);
+
+extern struct host *
+convert_port_to_host_priv(struct ipc_port *);
+
+extern processor_t
+convert_port_to_processor(struct ipc_port *);
+
+extern struct ipc_port *
+convert_processor_to_port(processor_t);
+
+extern processor_set_t
+convert_port_to_pset(struct ipc_port *);
+
+extern struct ipc_port *
+convert_pset_to_port(processor_set_t);
+
+extern processor_set_t
+convert_port_to_pset_name(struct ipc_port *);
+
+extern struct ipc_port *
+convert_pset_name_to_port(processor_set_t);
+
+#endif /* _KERN_IPC_HOST_H_ */
diff --git a/kern/ipc_kobject.c b/kern/ipc_kobject.c
new file mode 100644
index 0000000..0a81595
--- /dev/null
+++ b/kern/ipc_kobject.c
@@ -0,0 +1,365 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: kern/ipc_kobject.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions for letting a port represent a kernel object.
+ */
+
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <mach/mig_errors.h>
+#include <mach/notify.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_thread.h>
+#include <vm/vm_object.h>
+#include <vm/memory_object_proxy.h>
+#include <device/ds_routines.h>
+
+#include <kern/mach.server.h>
+#include <ipc/mach_port.server.h>
+#include <kern/mach_host.server.h>
+#include <device/device.server.h>
+#include <device/device_pager.server.h>
+#include <kern/mach4.server.h>
+#include <kern/gnumach.server.h>
+#include <kern/experimental.server.h>
+
+#if MACH_DEBUG
+#include <kern/mach_debug.server.h>
+#endif
+
+#if MACH_MACHINE_ROUTINES
+#include <machine/machine_routines.h>
+#include MACHINE_SERVER_HEADER
+#endif
+
+
+/*
+ * Routine: ipc_kobject_server
+ * Purpose:
+ * Handle a message sent to the kernel.
+ * Generates a reply message.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_kmsg_t
+ipc_kobject_server(ipc_kmsg_t request)
+{
+ mach_msg_size_t reply_size = ikm_less_overhead(8192);
+ ipc_kmsg_t reply;
+ kern_return_t kr;
+ mig_routine_t routine;
+ ipc_port_t *destp;
+
+ reply = ikm_alloc(reply_size);
+ if (reply == IKM_NULL) {
+ printf("ipc_kobject_server: dropping request\n");
+ ipc_kmsg_destroy(request);
+ return IKM_NULL;
+ }
+ ikm_init(reply, reply_size);
+
+ /*
+ * Initialize reply message.
+ */
+ {
+#define InP ((mach_msg_header_t *) &request->ikm_header)
+#define OutP ((mig_reply_header_t *) &reply->ikm_header)
+
+ static const mach_msg_type_t RetCodeType = {
+ .msgt_name = MACH_MSG_TYPE_INTEGER_32,
+ .msgt_size = 32,
+ .msgt_number = 1,
+ .msgt_inline = TRUE,
+ .msgt_longform = FALSE,
+ .msgt_deallocate = FALSE,
+ .msgt_unused = 0
+ };
+ OutP->Head.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0);
+ OutP->Head.msgh_size = sizeof(mig_reply_header_t);
+ OutP->Head.msgh_remote_port = InP->msgh_local_port;
+ OutP->Head.msgh_local_port = MACH_PORT_NULL;
+ OutP->Head.msgh_seqno = 0;
+ OutP->Head.msgh_id = InP->msgh_id + 100;
+#if 0
+ if (InP->msgh_id) {
+ static long _calls;
+ static struct { long id, count; } _counts[512];
+ int i, id;
+
+ id = InP->msgh_id;
+ for (i = 0; i < 511; i++) {
+ if (_counts[i].id == 0) {
+ _counts[i].id = id;
+ _counts[i].count++;
+ break;
+ }
+ if (_counts[i].id == id) {
+ _counts[i].count++;
+ break;
+ }
+ }
+ if (i == 511) {
+ _counts[i].id = id;
+ _counts[i].count++;
+ }
+ if ((++_calls & 0x7fff) == 0)
+ for (i = 0; i < 512; i++) {
+ if (_counts[i].id == 0)
+ break;
+ printf("%d: %d\n",
+ _counts[i].id, _counts[i].count);
+ }
+ }
+#endif
+
+ OutP->RetCodeType = RetCodeType;
+
+#undef InP
+#undef OutP
+ }
+
+ /*
+ * Find the server routine to call, and call it
+ * to perform the kernel function
+ */
+ {
+ check_simple_locks();
+ if ((routine = mach_server_routine(&request->ikm_header)) != 0
+ || (routine = mach_port_server_routine(&request->ikm_header)) != 0
+ || (routine = mach_host_server_routine(&request->ikm_header)) != 0
+ || (routine = device_server_routine(&request->ikm_header)) != 0
+ || (routine = device_pager_server_routine(&request->ikm_header)) != 0
+#if MACH_DEBUG
+ || (routine = mach_debug_server_routine(&request->ikm_header)) != 0
+#endif /* MACH_DEBUG */
+ || (routine = mach4_server_routine(&request->ikm_header)) != 0
+ || (routine = gnumach_server_routine(&request->ikm_header)) != 0
+ || (routine = experimental_server_routine(&request->ikm_header)) != 0
+#if MACH_MACHINE_ROUTINES
+ || (routine = MACHINE_SERVER_ROUTINE(&request->ikm_header)) != 0
+#endif /* MACH_MACHINE_ROUTINES */
+ ) {
+ (*routine)(&request->ikm_header, &reply->ikm_header);
+ kernel_task->messages_received++;
+ } else {
+ if (!ipc_kobject_notify(&request->ikm_header,
+ &reply->ikm_header)) {
+ ((mig_reply_header_t *) &reply->ikm_header)->RetCode
+ = MIG_BAD_ID;
+#if MACH_IPC_TEST
+ printf("ipc_kobject_server: bogus kernel message, id=%d\n",
+ request->ikm_header.msgh_id);
+#endif /* MACH_IPC_TEST */
+ } else {
+ kernel_task->messages_received++;
+ }
+ }
+ kernel_task->messages_sent++;
+ }
+ check_simple_locks();
+
+ /*
+ * Destroy destination. The following code differs from
+ * ipc_object_destroy in that we release the send-once
+ * right instead of generating a send-once notification
+ * (which would bring us here again, creating a loop).
+ * It also differs in that we only expect send or
+ * send-once rights, never receive rights.
+ *
+ * We set msgh_remote_port to IP_NULL so that the kmsg
+ * destroy routines don't try to destroy the port twice.
+ */
+ destp = (ipc_port_t *) &request->ikm_header.msgh_remote_port;
+ switch (MACH_MSGH_BITS_REMOTE(request->ikm_header.msgh_bits)) {
+ case MACH_MSG_TYPE_PORT_SEND:
+ ipc_port_release_send(*destp);
+ break;
+
+ case MACH_MSG_TYPE_PORT_SEND_ONCE:
+ ipc_port_release_sonce(*destp);
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_destroy: strange destination rights");
+#else
+ panic("ipc_object_destroy: strange destination rights");
+#endif
+ }
+ *destp = IP_NULL;
+
+ kr = ((mig_reply_header_t *) &reply->ikm_header)->RetCode;
+ if ((kr == KERN_SUCCESS) || (kr == MIG_NO_REPLY)) {
+ /*
+ * The server function is responsible for the contents
+ * of the message. The reply port right is moved
+ * to the reply message, and we have deallocated
+ * the destination port right, so we just need
+ * to free the kmsg.
+ */
+
+ /* like ipc_kmsg_put, but without the copyout */
+
+ ikm_check_initialized(request, request->ikm_size);
+ ikm_cache_free(request);
+ } else {
+ /*
+ * The message contents of the request are intact.
+ * Destroy everything except the reply port right,
+ * which is needed in the reply message.
+ */
+
+ request->ikm_header.msgh_local_port = MACH_PORT_NULL;
+ ipc_kmsg_destroy(request);
+ }
+
+ if (kr == MIG_NO_REPLY) {
+ /*
+ * The server function will send a reply message
+ * using the reply port right, which it has saved.
+ */
+
+ ikm_free(reply);
+ return IKM_NULL;
+ } else if (!IP_VALID((ipc_port_t)reply->ikm_header.msgh_remote_port)) {
+ /*
+ * Can't queue the reply message if the destination
+ * (the reply port) isn't valid.
+ */
+
+ ipc_kmsg_destroy(reply);
+ return IKM_NULL;
+ }
+
+ return reply;
+}
+
+/*
+ * Routine: ipc_kobject_set
+ * Purpose:
+ * Make a port represent a kernel object of the given type.
+ * The caller is responsible for handling refs for the
+ * kernel object, if necessary.
+ * Conditions:
+ * Nothing locked. The port must be active.
+ */
+
+void
+ipc_kobject_set(ipc_port_t port, ipc_kobject_t kobject, ipc_kobject_type_t type)
+{
+ ip_lock(port);
+ assert(ip_active(port));
+ port->ip_bits = (port->ip_bits &~ IO_BITS_KOTYPE) | type;
+ port->ip_kobject = kobject;
+ ip_unlock(port);
+}
+
+/*
+ * Routine: ipc_kobject_destroy
+ * Purpose:
+ * Release any kernel object resources associated
+ * with the port, which is being destroyed.
+ *
+ * This should only be needed when resources are
+ * associated with a user's port. In the normal case,
+ * when the kernel is the receiver, the code calling
+ * ipc_port_dealloc_kernel should clean up the resources.
+ * Conditions:
+ * The port is not locked, but it is dead.
+ */
+
+void
+ipc_kobject_destroy(
+ ipc_port_t port)
+{
+ switch (ip_kotype(port)) {
+ case IKOT_PAGER:
+ vm_object_destroy(port);
+ break;
+
+ case IKOT_PAGER_TERMINATING:
+ vm_object_pager_wakeup(port);
+ break;
+
+ default:
+#if MACH_ASSERT
+ printf("ipc_kobject_destroy: port 0x%p, kobj 0x%zd, type %d\n",
+ port, port->ip_kobject, ip_kotype(port));
+#endif /* MACH_ASSERT */
+ break;
+ }
+}
+
+/*
+ * Routine: ipc_kobject_notify
+ * Purpose:
+ * Deliver notifications to kobjects that care about them.
+ */
+
+boolean_t
+ipc_kobject_notify(mach_msg_header_t *request_header,
+ mach_msg_header_t *reply_header)
+{
+ ipc_port_t port = (ipc_port_t) request_header->msgh_remote_port;
+
+ ((mig_reply_header_t *) reply_header)->RetCode = MIG_NO_REPLY;
+ switch (request_header->msgh_id) {
+ case MACH_NOTIFY_PORT_DELETED:
+ case MACH_NOTIFY_MSG_ACCEPTED:
+ case MACH_NOTIFY_PORT_DESTROYED:
+ case MACH_NOTIFY_NO_SENDERS:
+ case MACH_NOTIFY_SEND_ONCE:
+ case MACH_NOTIFY_DEAD_NAME:
+ break;
+
+ default:
+ return FALSE;
+ }
+ switch (ip_kotype(port)) {
+ case IKOT_DEVICE:
+ return ds_notify(request_header);
+
+ case IKOT_PAGER_PROXY:
+ return memory_object_proxy_notify(request_header);
+
+ default:
+ return FALSE;
+ }
+}
diff --git a/kern/ipc_kobject.h b/kern/ipc_kobject.h
new file mode 100644
index 0000000..606a66a
--- /dev/null
+++ b/kern/ipc_kobject.h
@@ -0,0 +1,123 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: kern/ipc_kobject.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations for letting a port represent a kernel object.
+ */
+
+#ifndef _KERN_IPC_KOBJECT_H_
+#define _KERN_IPC_KOBJECT_H_
+
+#include <mach/machine/vm_types.h>
+#include <ipc/ipc_types.h>
+#include <ipc/ipc_kmsg.h>
+
+typedef vm_offset_t ipc_kobject_t;
+
+#define IKO_NULL ((ipc_kobject_t) 0)
+
+typedef unsigned int ipc_kobject_type_t;
+
+#define IKOT_NONE 0
+#define IKOT_THREAD 1
+#define IKOT_TASK 2
+#define IKOT_HOST 3
+#define IKOT_HOST_PRIV 4
+#define IKOT_PROCESSOR 5
+#define IKOT_PSET 6
+#define IKOT_PSET_NAME 7
+#define IKOT_PAGER 8
+#define IKOT_PAGING_REQUEST 9
+#define IKOT_DEVICE 10
+#define IKOT_XMM_OBJECT 11
+#define IKOT_XMM_PAGER 12
+#define IKOT_XMM_KERNEL 13
+#define IKOT_XMM_REPLY 14
+#define IKOT_PAGER_TERMINATING 15
+#define IKOT_PAGING_NAME 16
+#define IKOT_HOST_SECURITY 17
+#define IKOT_LEDGER 18
+#define IKOT_MASTER_DEVICE 19
+#define IKOT_ACT 20
+#define IKOT_SUBSYSTEM 21
+#define IKOT_IO_DONE_QUEUE 22
+#define IKOT_SEMAPHORE 23
+#define IKOT_LOCK_SET 24
+#define IKOT_CLOCK 25
+#define IKOT_CLOCK_CTRL 26
+#define IKOT_PAGER_PROXY 27
+ /* << new entries here */
+#define IKOT_UNKNOWN 28 /* magic catchall */
+#define IKOT_MAX_TYPE 29 /* # of IKOT_ types */
+ /* Please keep ipc/ipc_object.c:ikot_print_array up to date */
+
+#define is_ipc_kobject(ikot) (ikot != IKOT_NONE)
+
+/*
+ * Define types of kernel objects that use page lists instead
+ * of entry lists for copyin of out of line memory.
+ */
+
+#define ipc_kobject_vm_page_list(ikot) \
+ ((ikot == IKOT_PAGING_REQUEST) || (ikot == IKOT_DEVICE))
+
+#define ipc_kobject_vm_page_steal(ikot) (ikot == IKOT_PAGING_REQUEST)
+
+/* Initialize kernel server dispatch table */
+/* XXX
+extern void mig_init(void);
+*/
+
+/* Dispatch a kernel server function */
+extern ipc_kmsg_t ipc_kobject_server(
+ ipc_kmsg_t request);
+
+/* Make a port represent a kernel object of the given type */
+extern void ipc_kobject_set(
+ ipc_port_t port,
+ ipc_kobject_t kobject,
+ ipc_kobject_type_t type);
+
+/* Release any kernel object resources associated with a port */
+extern void ipc_kobject_destroy(
+ ipc_port_t port);
+
+/* Deliver notifications to kobjects that care about them */
+extern boolean_t ipc_kobject_notify (
+ mach_msg_header_t *request_header,
+ mach_msg_header_t *reply_header);
+
+#define null_conversion(port) (port)
+
+#endif /* _KERN_IPC_KOBJECT_H_ */
diff --git a/kern/ipc_mig.c b/kern/ipc_mig.c
new file mode 100644
index 0000000..d26d2c6
--- /dev/null
+++ b/kern/ipc_mig.c
@@ -0,0 +1,984 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/mig_support.h>
+#include <mach/thread_status.h>
+#include <machine/locore.h>
+#include <machine/copy_user.h>
+#include <kern/ast.h>
+#include <kern/debug.h>
+#include <kern/ipc_tt.h>
+#include <kern/syscall_subr.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/ipc_kobject.h>
+#include <kern/ipc_tt.h>
+#include <kern/ipc_mig.h>
+#include <vm/vm_map.h>
+#include <vm/vm_user.h>
+#include <ipc/port.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/mach_port.server.h>
+#include <device/dev_hdr.h>
+#include <device/device_types.h>
+#include <device/ds_routines.h>
+
+/*
+ * Routine: mach_msg_send_from_kernel
+ * Purpose:
+ * Send a message from the kernel.
+ *
+ * This is used by the client side of KernelUser interfaces
+ * to implement SimpleRoutines. Currently, this includes
+ * device_reply and memory_object messages.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Sent the message.
+ * MACH_SEND_INVALID_DATA Bad destination port.
+ */
+
+mach_msg_return_t
+mach_msg_send_from_kernel(
+ mach_msg_header_t *msg,
+ mach_msg_size_t send_size)
+{
+ ipc_kmsg_t kmsg;
+ mach_msg_return_t mr;
+
+ if (!MACH_PORT_VALID(msg->msgh_remote_port))
+ return MACH_SEND_INVALID_DEST;
+
+ mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ panic("mach_msg_send_from_kernel");
+
+ ipc_kmsg_copyin_from_kernel(kmsg);
+ ipc_mqueue_send_always(kmsg);
+
+ return MACH_MSG_SUCCESS;
+}
+
+mach_msg_return_t
+mach_msg_rpc_from_kernel(const mach_msg_header_t *msg,
+ mach_msg_size_t send_size,
+ mach_msg_size_t reply_size)
+{
+ panic("mach_msg_rpc_from_kernel"); /*XXX*/
+}
+
+/*
+ * Routine: mach_msg_abort_rpc
+ * Purpose:
+ * Destroy the thread's ith_rpc_reply port.
+ * This will interrupt a mach_msg_rpc_from_kernel
+ * with a MACH_RCV_PORT_DIED return code.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+mach_msg_abort_rpc(ipc_thread_t thread)
+{
+ ipc_port_t reply = IP_NULL;
+
+ ith_lock(thread);
+ if (thread->ith_self != IP_NULL) {
+ reply = thread->ith_rpc_reply;
+ thread->ith_rpc_reply = IP_NULL;
+ }
+ ith_unlock(thread);
+
+ if (reply != IP_NULL)
+ ipc_port_dealloc_reply(reply);
+}
+
+/*
+ * Routine: mach_msg
+ * Purpose:
+ * Like mach_msg_trap except that message buffers
+ * live in kernel space. Doesn't handle any options.
+ *
+ * This is used by in-kernel server threads to make
+ * kernel calls, to receive request messages, and
+ * to send reply messages.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ */
+
+mach_msg_return_t
+mach_msg(
+ mach_msg_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size,
+ mach_port_name_t rcv_name,
+ mach_msg_timeout_t time_out,
+ mach_port_name_t notify)
+{
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ if (option & MACH_SEND_MSG) {
+ mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ panic("mach_msg");
+
+ mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return mr;
+ }
+
+ do
+ mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
+ MACH_MSG_TIMEOUT_NONE);
+ while (mr == MACH_SEND_INTERRUPTED);
+ assert(mr == MACH_MSG_SUCCESS);
+ }
+
+ if (option & MACH_RCV_MSG) {
+ do {
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+
+ mr = ipc_mqueue_copyin(space, rcv_name,
+ &mqueue, &object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ /* hold ref for object; mqueue is locked */
+
+ mr = ipc_mqueue_receive(mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, IMQ_NULL_CONTINUE,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ } while (mr == MACH_RCV_INTERRUPTED);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+
+ if (rcv_size < msg_usize(&kmsg->ikm_header)) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ ipc_kmsg_put_to_kernel(msg, kmsg, sizeof *msg);
+ return MACH_RCV_TOO_LARGE;
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ ipc_kmsg_put_to_kernel(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ ipc_kmsg_put_to_kernel(msg, kmsg, sizeof *msg);
+ }
+
+ return mr;
+ }
+
+ ipc_kmsg_put_to_kernel(msg, kmsg, kmsg->ikm_header.msgh_size);
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: mig_get_reply_port
+ * Purpose:
+ * Called by client side interfaces living in the kernel
+ * to get a reply port. This port is used for
+ * mach_msg() calls which are kernel calls.
+ */
+
+mach_port_name_t
+mig_get_reply_port(void)
+{
+ ipc_thread_t self = current_thread();
+
+ if (self->ith_mig_reply == MACH_PORT_NULL)
+ self->ith_mig_reply = mach_reply_port();
+
+ return self->ith_mig_reply;
+}
+
+/*
+ * Routine: mig_dealloc_reply_port
+ * Purpose:
+ * Called by client side interfaces to get rid of a reply port.
+ * Shouldn't ever be called inside the kernel, because
+ * kernel calls shouldn't prompt Mig to call it.
+ */
+
+void
+mig_dealloc_reply_port(
+ mach_port_t reply_port)
+{
+ panic("mig_dealloc_reply_port");
+}
+
+/*
+ * Routine: mig_put_reply_port
+ * Purpose:
+ * Called by client side interfaces after each RPC to
+ * let the client recycle the reply port if it wishes.
+ */
+void
+mig_put_reply_port(
+ mach_port_t reply_port)
+{
+}
+
+/*
+ * mig_strncpy.c - by Joshua Block
+ *
+ * mig_strncpy -- Bounded string copy. Does what the library routine
+ * strncpy does: Copies the (null terminated) string in src into dest,
+ * a buffer of length len. Returns the length of the destination
+ * string excluding the terminating null.
+ *
+ * Parameters:
+ *
+ * dest - Pointer to destination buffer.
+ *
+ * src - Pointer to source string.
+ *
+ * len - Length of destination buffer.
+ */
+vm_size_t
+mig_strncpy(char *dest, const char *src, vm_size_t len)
+{
+ char *dest_ = dest;
+ int i;
+
+ if (len <= 0)
+ return 0;
+
+ for (i = 0; i < len; i++) {
+ if (! (*dest = *src))
+ break;
+ dest++;
+ src++;
+ }
+
+ return dest - dest_;
+}
+
+/* Called by MiG to deallocate memory, which in this case happens
+ * to be kernel memory. */
+void
+mig_deallocate(vm_address_t addr, vm_size_t size)
+{
+ (void) size;
+ /* We do the same thing as in ipc_kmsg_clean_body. */
+ vm_map_copy_discard((vm_map_copy_t) addr);
+}
+
+#define fast_send_right_lookup(name, port, abort) \
+MACRO_BEGIN \
+ ipc_space_t space = current_space(); \
+ ipc_entry_t entry; \
+ \
+ is_read_lock(space); \
+ assert(space->is_active); \
+ \
+ entry = ipc_entry_lookup (space, name); \
+ if (entry == IE_NULL) { \
+ is_read_unlock (space); \
+ abort; \
+ } \
+ \
+ if (IE_BITS_TYPE (entry->ie_bits) != MACH_PORT_TYPE_SEND) { \
+ is_read_unlock (space); \
+ abort; \
+ } \
+ \
+ port = (ipc_port_t) entry->ie_object; \
+ assert(port != IP_NULL); \
+ \
+ ip_lock(port); \
+ /* can safely unlock space now that port is locked */ \
+ is_read_unlock(space); \
+MACRO_END
+
+static device_t
+port_name_to_device(mach_port_name_t name)
+{
+ ipc_port_t port;
+ device_t device;
+
+ fast_send_right_lookup(name, port, goto abort);
+ /* port is locked */
+
+ /*
+ * Now map the port object to a device object.
+ * This is an inline version of dev_port_lookup().
+ */
+ if (ip_active(port) && (ip_kotype(port) == IKOT_DEVICE)) {
+ device = (device_t) port->ip_kobject;
+ device_reference(device);
+ ip_unlock(port);
+ return device;
+ }
+
+ ip_unlock(port);
+ return DEVICE_NULL;
+
+ /*
+ * The slow case. The port wasn't easily accessible.
+ */
+ abort: {
+ ipc_port_t kern_port;
+ kern_return_t kr;
+
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &kern_port);
+ if (kr != KERN_SUCCESS)
+ return DEVICE_NULL;
+
+ device = dev_port_lookup(kern_port);
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+ return device;
+ }
+}
+
+static thread_t
+port_name_to_thread(mach_port_name_t name)
+{
+ ipc_port_t port;
+
+ fast_send_right_lookup(name, port, goto abort);
+ /* port is locked */
+
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_THREAD)) {
+ thread_t thread;
+
+ thread = (thread_t) port->ip_kobject;
+ assert(thread != THREAD_NULL);
+
+ /* thread referencing is a bit complicated,
+ so don't bother to expand inline */
+ thread_reference(thread);
+ ip_unlock(port);
+
+ return thread;
+ }
+
+ ip_unlock(port);
+ return THREAD_NULL;
+
+ abort: {
+ thread_t thread;
+ ipc_port_t kern_port;
+ kern_return_t kr;
+
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &kern_port);
+ if (kr != KERN_SUCCESS)
+ return THREAD_NULL;
+
+ thread = convert_port_to_thread(kern_port);
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+
+ return thread;
+ }
+}
+
+static task_t
+port_name_to_task(mach_port_name_t name)
+{
+ ipc_port_t port;
+
+ fast_send_right_lookup(name, port, goto abort);
+ /* port is locked */
+
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ task_t task;
+
+ task = (task_t) port->ip_kobject;
+ assert(task != TASK_NULL);
+
+ task_lock(task);
+ /* can safely unlock port now that task is locked */
+ ip_unlock(port);
+
+ task->ref_count++;
+ task_unlock(task);
+
+ return task;
+ }
+
+ ip_unlock(port);
+ return TASK_NULL;
+
+ abort: {
+ task_t task;
+ ipc_port_t kern_port;
+ kern_return_t kr;
+
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &kern_port);
+ if (kr != KERN_SUCCESS)
+ return TASK_NULL;
+
+ task = convert_port_to_task(kern_port);
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+
+ return task;
+ }
+}
+
+static vm_map_t
+port_name_to_map(
+ mach_port_name_t name)
+{
+ ipc_port_t port;
+
+ fast_send_right_lookup(name, port, goto abort);
+ /* port is locked */
+
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ vm_map_t map;
+
+ map = ((task_t) port->ip_kobject)->map;
+ assert(map != VM_MAP_NULL);
+
+ simple_lock(&map->ref_lock);
+ /* can safely unlock port now that map is locked */
+ ip_unlock(port);
+
+ map->ref_count++;
+ simple_unlock(&map->ref_lock);
+
+ return map;
+ }
+
+ ip_unlock(port);
+ return VM_MAP_NULL;
+
+ abort: {
+ vm_map_t map;
+ ipc_port_t kern_port;
+ kern_return_t kr;
+
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &kern_port);
+ if (kr != KERN_SUCCESS)
+ return VM_MAP_NULL;
+
+ map = convert_port_to_map(kern_port);
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+
+ return map;
+ }
+}
+
+static ipc_space_t
+port_name_to_space(mach_port_name_t name)
+{
+ ipc_port_t port;
+
+ fast_send_right_lookup(name, port, goto abort);
+ /* port is locked */
+
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ ipc_space_t space;
+
+ space = ((task_t) port->ip_kobject)->itk_space;
+ assert(space != IS_NULL);
+
+ simple_lock(&space->is_ref_lock_data);
+ /* can safely unlock port now that space is locked */
+ ip_unlock(port);
+
+ space->is_references++;
+ simple_unlock(&space->is_ref_lock_data);
+
+ return space;
+ }
+
+ ip_unlock(port);
+ return IS_NULL;
+
+ abort: {
+ ipc_space_t space;
+ ipc_port_t kern_port;
+ kern_return_t kr;
+
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &kern_port);
+ if (kr != KERN_SUCCESS)
+ return IS_NULL;
+
+ space = convert_port_to_space(kern_port);
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+
+ return space;
+ }
+}
+
+/*
+ * Things to keep in mind:
+ *
+ * The idea here is to duplicate the semantics of the true kernel RPC.
+ * The destination port/object should be checked first, before anything
+ * that the user might notice (like ipc_object_copyin). Return
+ * MACH_SEND_INTERRUPTED if it isn't correct, so that the user stub
+ * knows to fall back on an RPC. For other return values, it won't
+ * retry with an RPC. The retry might get a different (incorrect) rc.
+ * Return values are only set (and should only be set, with copyout)
+ * on successful calls.
+ */
+
+kern_return_t
+syscall_vm_map(
+ mach_port_name_t target_map,
+ rpc_vm_offset_t *address,
+ rpc_vm_size_t size,
+ rpc_vm_offset_t mask,
+ boolean_t anywhere,
+ mach_port_name_t memory_object,
+ rpc_vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_map_t map;
+ ipc_port_t port;
+ vm_offset_t addr;
+ kern_return_t result;
+
+ map = port_name_to_map(target_map);
+ if (map == VM_MAP_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ if (MACH_PORT_NAME_VALID(memory_object)) {
+ result = ipc_object_copyin(current_space(), memory_object,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &port);
+ if (result != KERN_SUCCESS) {
+ vm_map_deallocate(map);
+ return result;
+ }
+ } else
+ port = (ipc_port_t)invalid_name_to_port(memory_object);
+
+ copyin_address(address, &addr);
+ result = vm_map(map, &addr, size, mask, anywhere,
+ port, offset, copy,
+ cur_protection, max_protection, inheritance);
+ if (result == KERN_SUCCESS)
+ copyout_address(&addr, address);
+ if (IP_VALID(port))
+ ipc_port_release_send(port);
+ vm_map_deallocate(map);
+
+ return result;
+}
+
+kern_return_t syscall_vm_allocate(
+ mach_port_name_t target_map,
+ rpc_vm_offset_t *address,
+ rpc_vm_size_t size,
+ boolean_t anywhere)
+{
+ vm_map_t map;
+ vm_offset_t addr;
+ kern_return_t result;
+
+ map = port_name_to_map(target_map);
+ if (map == VM_MAP_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ copyin_address(address, &addr);
+ result = vm_allocate(map, &addr, size, anywhere);
+ if (result == KERN_SUCCESS)
+ copyout_address(&addr, address);
+ vm_map_deallocate(map);
+
+ return result;
+}
+
+kern_return_t syscall_vm_deallocate(
+ mach_port_name_t target_map,
+ rpc_vm_offset_t start,
+ rpc_vm_size_t size)
+{
+ vm_map_t map;
+ kern_return_t result;
+
+ map = port_name_to_map(target_map);
+ if (map == VM_MAP_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ result = vm_deallocate(map, start, size);
+ vm_map_deallocate(map);
+
+ return result;
+}
+
+kern_return_t syscall_task_create(
+ mach_port_name_t parent_task,
+ boolean_t inherit_memory,
+ mach_port_name_t *child_task) /* OUT */
+{
+ task_t t, c;
+ ipc_port_t port;
+ mach_port_name_t name;
+ kern_return_t result;
+
+ t = port_name_to_task(parent_task);
+ if (t == TASK_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ result = task_create(t, inherit_memory, &c);
+ if (result == KERN_SUCCESS) {
+ port = (ipc_port_t) convert_task_to_port(c);
+ /* always returns a name, even for non-success return codes */
+ (void) ipc_kmsg_copyout_object(current_space(),
+ (ipc_object_t) port,
+ MACH_MSG_TYPE_PORT_SEND, &name);
+ copyout(&name, child_task, sizeof(mach_port_name_t));
+ }
+ task_deallocate(t);
+
+ return result;
+}
+
+kern_return_t syscall_task_terminate(mach_port_name_t task)
+{
+ task_t t;
+ kern_return_t result;
+
+ t = port_name_to_task(task);
+ if (t == TASK_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ result = task_terminate(t);
+ task_deallocate(t);
+
+ return result;
+}
+
+kern_return_t syscall_task_suspend(mach_port_name_t task)
+{
+ task_t t;
+ kern_return_t result;
+
+ t = port_name_to_task(task);
+ if (t == TASK_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ result = task_suspend(t);
+ task_deallocate(t);
+
+ return result;
+}
+
+kern_return_t syscall_task_set_special_port(
+ mach_port_name_t task,
+ int which_port,
+ mach_port_name_t port_name)
+{
+ task_t t;
+ ipc_port_t port;
+ kern_return_t result;
+
+ t = port_name_to_task(task);
+ if (t == TASK_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ if (MACH_PORT_NAME_VALID(port_name)) {
+ result = ipc_object_copyin(current_space(), port_name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &port);
+ if (result != KERN_SUCCESS) {
+ task_deallocate(t);
+ return result;
+ }
+ } else
+ port = (ipc_port_t)invalid_name_to_port(port_name);
+
+ result = task_set_special_port(t, which_port, port);
+ if ((result != KERN_SUCCESS) && IP_VALID(port))
+ ipc_port_release_send(port);
+ task_deallocate(t);
+
+ return result;
+}
+
+kern_return_t
+syscall_mach_port_allocate(
+ mach_port_name_t task,
+ mach_port_right_t right,
+ mach_port_name_t *namep)
+{
+ ipc_space_t space;
+ mach_port_name_t name;
+ kern_return_t kr;
+
+ space = port_name_to_space(task);
+ if (space == IS_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ kr = mach_port_allocate(space, right, &name);
+ if (kr == KERN_SUCCESS)
+ {
+ copyout(&name, namep, sizeof(mach_port_name_t));
+ }
+ is_release(space);
+
+ return kr;
+}
+
+kern_return_t
+syscall_mach_port_allocate_name(
+ mach_port_name_t task,
+ mach_port_right_t right,
+ mach_port_name_t name)
+{
+ ipc_space_t space;
+ kern_return_t kr;
+
+ space = port_name_to_space(task);
+ if (space == IS_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ kr = mach_port_allocate_name(space, right, name);
+ is_release(space);
+
+ return kr;
+}
+
+kern_return_t
+syscall_mach_port_deallocate(
+ mach_port_name_t task,
+ mach_port_name_t name)
+{
+ ipc_space_t space;
+ kern_return_t kr;
+
+ space = port_name_to_space(task);
+ if (space == IS_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ kr = mach_port_deallocate(space, name);
+ is_release(space);
+
+ return kr;
+}
+
+kern_return_t
+syscall_mach_port_insert_right(
+ mach_port_name_t task,
+ mach_port_name_t name,
+ mach_port_name_t right,
+ mach_msg_type_name_t rightType)
+{
+ ipc_space_t space;
+ ipc_object_t object;
+ mach_msg_type_name_t newtype;
+ kern_return_t kr;
+
+ space = port_name_to_space(task);
+ if (space == IS_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ if (!MACH_MSG_TYPE_PORT_ANY(rightType)) {
+ is_release(space);
+ return KERN_INVALID_VALUE;
+ }
+
+ if (MACH_PORT_NAME_VALID(right)) {
+ kr = ipc_object_copyin(current_space(), right, rightType,
+ &object);
+ if (kr != KERN_SUCCESS) {
+ is_release(space);
+ return kr;
+ }
+ } else
+ object = (ipc_object_t)invalid_name_to_port(right);
+ newtype = ipc_object_copyin_type(rightType);
+
+ kr = mach_port_insert_right(space, name, (ipc_port_t) object, newtype);
+ if ((kr != KERN_SUCCESS) && IO_VALID(object))
+ ipc_object_destroy(object, newtype);
+ is_release(space);
+
+ return kr;
+}
+
+kern_return_t syscall_thread_depress_abort(mach_port_name_t thread)
+{
+ thread_t t;
+ kern_return_t result;
+
+ t = port_name_to_thread(thread);
+ if (t == THREAD_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ result = thread_depress_abort(t);
+ thread_deallocate(t);
+
+ return result;
+}
+
+/*
+ * Device traps -- these are way experimental.
+ */
+io_return_t
+syscall_device_write_request(mach_port_name_t device_name,
+ mach_port_name_t reply_name,
+ dev_mode_t mode,
+ rpc_recnum_t recnum,
+ rpc_vm_offset_t data,
+ rpc_vm_size_t data_count)
+{
+ device_t dev;
+ /*ipc_port_t reply_port;*/
+ io_return_t res;
+
+ /*
+ * First try to translate the device name.
+ *
+ * If this fails, return KERN_INVALID_CAPABILITY.
+ * Caller knows that this most likely means that
+ * device is not local to node and IPC should be used.
+ *
+ * If kernel doesn't do device traps, kern_invalid()
+ * will be called instead of this function which will
+ * return KERN_INVALID_ARGUMENT.
+ */
+ dev = port_name_to_device(device_name);
+ if (dev == DEVICE_NULL)
+ return KERN_INVALID_CAPABILITY;
+
+ /*
+ * Translate reply port.
+ */
+ /*if (reply_name == MACH_PORT_NULL)
+ reply_port = IP_NULL;
+ */
+ if (reply_name != MACH_PORT_NULL) {
+ /* Homey don't play that. */
+ device_deallocate(dev);
+ return KERN_INVALID_RIGHT;
+ }
+
+ /* note: doesn't take reply_port arg yet. */
+ res = ds_device_write_trap(dev, /*reply_port,*/
+ mode, recnum,
+ data, data_count);
+
+ /*
+ * Give up reference from port_name_to_device.
+ */
+ device_deallocate(dev);
+ return res;
+}
+
+io_return_t
+syscall_device_writev_request(mach_port_name_t device_name,
+ mach_port_name_t reply_name,
+ dev_mode_t mode,
+ rpc_recnum_t recnum,
+ rpc_io_buf_vec_t *iovec,
+ rpc_vm_size_t iocount)
+{
+ device_t dev;
+ /*ipc_port_t reply_port;*/
+ io_return_t res;
+
+ /*
+ * First try to translate the device name.
+ *
+ * If this fails, return KERN_INVALID_CAPABILITY.
+ * Caller knows that this most likely means that
+ * device is not local to node and IPC should be used.
+ *
+ * If kernel doesn't do device traps, kern_invalid()
+ * will be called instead of this function which will
+ * return KERN_INVALID_ARGUMENT.
+ */
+ dev = port_name_to_device(device_name);
+ if (dev == DEVICE_NULL)
+ return KERN_INVALID_CAPABILITY;
+
+ /*
+ * Translate reply port.
+ */
+ /*if (reply_name == MACH_PORT_NULL)
+ reply_port = IP_NULL;
+ */
+ if (reply_name != MACH_PORT_NULL) {
+ /* Homey don't play that. */
+ device_deallocate(dev);
+ return KERN_INVALID_RIGHT;
+ }
+
+ /* note: doesn't take reply_port arg yet. */
+ res = ds_device_writev_trap(dev, /*reply_port,*/
+ mode, recnum,
+ iovec, iocount);
+
+ /*
+ * Give up reference from port_name_to_device.
+ */
+ device_deallocate(dev);
+ return res;
+}
diff --git a/kern/ipc_mig.h b/kern/ipc_mig.h
new file mode 100644
index 0000000..422e8d8
--- /dev/null
+++ b/kern/ipc_mig.h
@@ -0,0 +1,143 @@
+/*
+ * MIG IPC functions
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * MIG IPC functions.
+ *
+ */
+
+#ifndef _IPC_MIG_H_
+#define _IPC_MIG_H_
+
+#include <mach/std_types.h>
+#include <device/device_types.h>
+#include <ipc/ipc_thread.h>
+
+/*
+ * Routine: mach_msg_send_from_kernel
+ * Purpose:
+ * Send a message from the kernel.
+ *
+ * This is used by the client side of KernelUser interfaces
+ * to implement SimpleRoutines. Currently, this includes
+ * device_reply and memory_object messages.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Sent the message.
+ * MACH_SEND_INVALID_DATA Bad destination port.
+ */
+extern mach_msg_return_t mach_msg_send_from_kernel(
+ mach_msg_header_t *msg,
+ mach_msg_size_t send_size);
+
+/*
+ * Routine: mach_msg_abort_rpc
+ * Purpose:
+ * Destroy the thread's ith_rpc_reply port.
+ * This will interrupt a mach_msg_rpc_from_kernel
+ * with a MACH_RCV_PORT_DIED return code.
+ * Conditions:
+ * Nothing locked.
+ */
+extern void mach_msg_abort_rpc (ipc_thread_t);
+
+extern mach_msg_return_t mach_msg_rpc_from_kernel(
+ const mach_msg_header_t *msg,
+ mach_msg_size_t send_size,
+ mach_msg_size_t reply_size);
+
+extern kern_return_t syscall_vm_map(
+ mach_port_name_t target_map,
+ rpc_vm_offset_t *address,
+ rpc_vm_size_t size,
+ rpc_vm_offset_t mask,
+ boolean_t anywhere,
+ mach_port_name_t memory_object,
+ rpc_vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance);
+
+extern kern_return_t syscall_vm_allocate(
+ mach_port_name_t target_map,
+ rpc_vm_offset_t *address,
+ rpc_vm_size_t size,
+ boolean_t anywhere);
+
+extern kern_return_t syscall_vm_deallocate(
+ mach_port_name_t target_map,
+ rpc_vm_offset_t start,
+ rpc_vm_size_t size);
+
+extern kern_return_t syscall_task_create(
+ mach_port_name_t parent_task,
+ boolean_t inherit_memory,
+ mach_port_name_t *child_task);
+
+extern kern_return_t syscall_task_terminate(mach_port_name_t task);
+
+extern kern_return_t syscall_task_suspend(mach_port_name_t task);
+
+extern kern_return_t syscall_task_set_special_port(
+ mach_port_name_t task,
+ int which_port,
+ mach_port_name_t port_name);
+
+extern kern_return_t syscall_mach_port_allocate(
+ mach_port_name_t task,
+ mach_port_right_t right,
+ mach_port_name_t *namep);
+
+extern kern_return_t syscall_mach_port_deallocate(
+ mach_port_name_t task,
+ mach_port_name_t name);
+
+extern kern_return_t syscall_mach_port_insert_right(
+ mach_port_name_t task,
+ mach_port_name_t name,
+ mach_port_name_t right,
+ mach_msg_type_name_t rightType);
+
+extern kern_return_t syscall_mach_port_allocate_name(
+ mach_port_name_t task,
+ mach_port_right_t right,
+ mach_port_name_t name);
+
+extern kern_return_t syscall_thread_depress_abort(mach_port_name_t thread);
+
+extern io_return_t syscall_device_write_request(
+ mach_port_name_t device_name,
+ mach_port_name_t reply_name,
+ dev_mode_t mode,
+ rpc_recnum_t recnum,
+ rpc_vm_offset_t data,
+ rpc_vm_size_t data_count);
+
+io_return_t syscall_device_writev_request(
+ mach_port_name_t device_name,
+ mach_port_name_t reply_name,
+ dev_mode_t mode,
+ rpc_recnum_t recnum,
+ rpc_io_buf_vec_t *iovec,
+ rpc_vm_size_t iocount);
+
+#endif /* _IPC_MIG_H_ */
diff --git a/kern/ipc_sched.c b/kern/ipc_sched.c
new file mode 100644
index 0000000..4519c65
--- /dev/null
+++ b/kern/ipc_sched.c
@@ -0,0 +1,283 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993, 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/message.h>
+#include <kern/counters.h>
+#include "cpu_number.h"
+#include <kern/debug.h>
+#include <kern/lock.h>
+#include <kern/mach_clock.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#include <kern/processor.h>
+#include <kern/thread_swap.h>
+#include <kern/ipc_sched.h>
+#include <machine/machspl.h> /* for splsched/splx */
+#include <machine/pmap.h>
+
+
+
+/*
+ * These functions really belong in kern/sched_prim.c.
+ */
+
+/*
+ * Routine: thread_go
+ * Purpose:
+ * Start a thread running.
+ * Conditions:
+ * IPC locks may be held.
+ */
+
+void
+thread_go(
+ thread_t thread)
+{
+ int state;
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
+ reset_timeout_check(&thread->timer);
+
+ state = thread->state;
+ switch (state & TH_SCHED_STATE) {
+
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_UNINT:
+ case TH_WAIT:
+ /*
+ * Sleeping and not suspendable - put
+ * on run queue.
+ */
+ thread->state = (state &~ TH_WAIT) | TH_RUN;
+ thread->wait_result = THREAD_AWAKENED;
+ thread_setrun(thread, TRUE);
+ break;
+
+ case TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ /*
+ * Either already running, or suspended.
+ */
+ thread->state = state & ~TH_WAIT;
+ thread->wait_result = THREAD_AWAKENED;
+ break;
+
+ default:
+ /*
+ * Not waiting.
+ */
+ break;
+ }
+
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * Routine: thread_will_wait
+ * Purpose:
+ * Assert that the thread intends to block.
+ */
+
+void
+thread_will_wait(
+ thread_t thread)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
+ assert(thread->wait_result = -1); /* for later assertions */
+ thread->state |= TH_WAIT;
+
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * Routine: thread_will_wait_with_timeout
+ * Purpose:
+ * Assert that the thread intends to block,
+ * with a timeout.
+ */
+
+void
+thread_will_wait_with_timeout(
+ thread_t thread,
+ mach_msg_timeout_t msecs)
+{
+ natural_t ticks = convert_ipc_timeout_to_ticks(msecs);
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
+ assert(thread->wait_result = -1); /* for later assertions */
+ thread->state |= TH_WAIT;
+
+ set_timeout(&thread->timer, ticks);
+
+ thread_unlock(thread);
+ splx(s);
+}
+
+#if MACH_HOST
+#define check_processor_set(thread) \
+ (current_processor()->processor_set == (thread)->processor_set)
+#else /* MACH_HOST */
+#define check_processor_set(thread) TRUE
+#endif /* MACH_HOST */
+
+#if NCPUS > 1
+#define check_bound_processor(thread) \
+ ((thread)->bound_processor == PROCESSOR_NULL || \
+ (thread)->bound_processor == current_processor())
+#else /* NCPUS > 1 */
+#define check_bound_processor(thread) TRUE
+#endif /* NCPUS > 1 */
+
+/*
+ * Routine: thread_handoff
+ * Purpose:
+ * Switch to a new thread (new), leaving the current
+ * thread (old) blocked. If successful, moves the
+ * kernel stack from old to new and returns as the
+ * new thread. An explicit continuation for the old thread
+ * must be supplied.
+ *
+ * NOTE: Although we wakeup new, we don't set new->wait_result.
+ * Returns:
+ * TRUE if the handoff happened.
+ */
+
+boolean_t
+thread_handoff(
+ thread_t old,
+ continuation_t continuation,
+ thread_t new)
+{
+ spl_t s;
+
+ assert(current_thread() == old);
+
+ /*
+ * XXX Dubious things here:
+ * I don't check the idle_count on the processor set.
+ * No scheduling priority or policy checks.
+ * I assume the new thread is interruptible.
+ */
+
+ s = splsched();
+ thread_lock(new);
+
+ /*
+ * The first thing we must do is check the state
+ * of the threads, to ensure we can handoff.
+ * This check uses current_processor()->processor_set,
+ * which we can read without locking.
+ */
+
+ if ((old->stack_privilege == current_stack()) ||
+ (new->state != (TH_WAIT|TH_SWAPPED)) ||
+ !check_processor_set(new) ||
+ !check_bound_processor(new)) {
+ thread_unlock(new);
+ (void) splx(s);
+
+ counter(c_thread_handoff_misses++);
+ return FALSE;
+ }
+
+ reset_timeout_check(&new->timer);
+
+ new->state = TH_RUN;
+ thread_unlock(new);
+
+#if NCPUS > 1
+ new->last_processor = current_processor();
+#endif /* NCPUS > 1 */
+
+ ast_context(new, cpu_number());
+ timer_switch(&new->system_timer);
+
+ /*
+ * stack_handoff is machine-dependent. It does the
+ * machine-dependent components of a context-switch, like
+ * changing address spaces. It updates active_thread.
+ */
+
+ stack_handoff(old, new);
+
+ /*
+ * Now we must dispose of the old thread.
+ * This is like thread_continue, except
+ * that the old thread isn't waiting yet.
+ */
+
+ thread_lock(old);
+ old->swap_func = continuation;
+ assert(old->wait_result = -1); /* for later assertions */
+
+ if (old->state == TH_RUN) {
+ /*
+ * This is our fast path.
+ */
+
+ old->state = TH_WAIT|TH_SWAPPED;
+ }
+ else if (old->state == (TH_RUN|TH_SUSP)) {
+ /*
+ * Somebody is trying to suspend the thread.
+ */
+
+ old->state = TH_WAIT|TH_SUSP|TH_SWAPPED;
+ if (old->wake_active) {
+ /*
+ * Someone wants to know when the thread
+ * really stops.
+ */
+ old->wake_active = FALSE;
+ thread_unlock(old);
+ thread_wakeup(TH_EV_WAKE_ACTIVE(old));
+ goto after_old_thread;
+ }
+ } else
+ panic("thread_handoff");
+
+ thread_unlock(old);
+ after_old_thread:
+ (void) splx(s);
+
+ counter(c_thread_handoff_hits++);
+ return TRUE;
+}
diff --git a/kern/ipc_sched.h b/kern/ipc_sched.h
new file mode 100644
index 0000000..bdee832
--- /dev/null
+++ b/kern/ipc_sched.h
@@ -0,0 +1,32 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_IPC_SCHED_H_
+#define _KERN_IPC_SCHED_H_
+
+#include <kern/sched_prim.h>
+
+#endif /* _KERN_IPC_SCHED_H_ */
diff --git a/kern/ipc_tt.c b/kern/ipc_tt.c
new file mode 100644
index 0000000..7c9a0b8
--- /dev/null
+++ b/kern/ipc_tt.c
@@ -0,0 +1,1113 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc_tt.c
+ * Purpose:
+ * Task and thread related IPC functions.
+ */
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/mach_param.h>
+#include <mach/mach_traps.h>
+#include <mach/task_special_ports.h>
+#include <mach/thread_special_ports.h>
+#include <vm/vm_kern.h>
+#include <kern/debug.h>
+#include <kern/kalloc.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/ipc_kobject.h>
+#include <kern/ipc_tt.h>
+#include <kern/mach.server.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_object.h>
+
+
+
+/*
+ * Routine: ipc_task_init
+ * Purpose:
+ * Initialize a task's IPC state.
+ *
+ * If non-null, some state will be inherited from the parent.
+ * The parent must be appropriately initialized.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_task_init(
+ task_t task,
+ task_t parent)
+{
+ ipc_space_t space;
+ ipc_port_t kport;
+ kern_return_t kr;
+ int i;
+
+
+ kr = ipc_space_create(&space);
+ if (kr != KERN_SUCCESS)
+ panic("ipc_task_init");
+
+
+ kport = ipc_port_alloc_kernel();
+ if (kport == IP_NULL)
+ panic("ipc_task_init");
+
+ itk_lock_init(task);
+ task->itk_self = kport;
+ task->itk_sself = ipc_port_make_send(kport);
+ task->itk_space = space;
+
+ if (parent == TASK_NULL) {
+ task->itk_exception = IP_NULL;
+ task->itk_bootstrap = IP_NULL;
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+ task->itk_registered[i] = IP_NULL;
+ } else {
+ itk_lock(parent);
+ assert(parent->itk_self != IP_NULL);
+
+ /* inherit registered ports */
+
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+ task->itk_registered[i] =
+ ipc_port_copy_send(parent->itk_registered[i]);
+
+ /* inherit exception and bootstrap ports */
+
+ task->itk_exception =
+ ipc_port_copy_send(parent->itk_exception);
+ task->itk_bootstrap =
+ ipc_port_copy_send(parent->itk_bootstrap);
+
+ itk_unlock(parent);
+ }
+}
+
+/*
+ * Routine: ipc_task_enable
+ * Purpose:
+ * Enable a task for IPC access.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_task_enable(
+ task_t task)
+{
+ ipc_port_t kport;
+
+ itk_lock(task);
+ kport = task->itk_self;
+ if (kport != IP_NULL)
+ ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
+ itk_unlock(task);
+}
+
+/*
+ * Routine: ipc_task_disable
+ * Purpose:
+ * Disable IPC access to a task.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_task_disable(
+ task_t task)
+{
+ ipc_port_t kport;
+
+ itk_lock(task);
+ kport = task->itk_self;
+ if (kport != IP_NULL)
+ ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
+ itk_unlock(task);
+}
+
+/*
+ * Routine: ipc_task_terminate
+ * Purpose:
+ * Clean up and destroy a task's IPC state.
+ * Conditions:
+ * Nothing locked. The task must be suspended.
+ * (Or the current thread must be in the task.)
+ */
+
+void
+ipc_task_terminate(
+ task_t task)
+{
+ ipc_port_t kport;
+ int i;
+
+ itk_lock(task);
+ kport = task->itk_self;
+
+ if (kport == IP_NULL) {
+ /* the task is already terminated (can this happen?) */
+ itk_unlock(task);
+ return;
+ }
+
+ task->itk_self = IP_NULL;
+ itk_unlock(task);
+
+ /* release the naked send rights */
+
+ if (IP_VALID(task->itk_sself))
+ ipc_port_release_send(task->itk_sself);
+ if (IP_VALID(task->itk_exception))
+ ipc_port_release_send(task->itk_exception);
+ if (IP_VALID(task->itk_bootstrap))
+ ipc_port_release_send(task->itk_bootstrap);
+
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+ if (IP_VALID(task->itk_registered[i]))
+ ipc_port_release_send(task->itk_registered[i]);
+
+ /* destroy the space, leaving just a reference for it */
+
+ ipc_space_destroy(task->itk_space);
+
+ /* destroy the kernel port */
+
+ ipc_port_dealloc_kernel(kport);
+}
+
+/*
+ * Routine: ipc_thread_init
+ * Purpose:
+ * Initialize a thread's IPC state.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_thread_init(thread_t thread)
+{
+ ipc_port_t kport;
+
+ kport = ipc_port_alloc_kernel();
+ if (kport == IP_NULL)
+ panic("ipc_thread_init");
+
+ ipc_thread_links_init(thread);
+ ipc_kmsg_queue_init(&thread->ith_messages);
+
+ ith_lock_init(thread);
+ thread->ith_self = kport;
+ thread->ith_sself = ipc_port_make_send(kport);
+ thread->ith_exception = IP_NULL;
+
+ thread->ith_mig_reply = MACH_PORT_NULL;
+ thread->ith_rpc_reply = IP_NULL;
+}
+
+/*
+ * Routine: ipc_thread_enable
+ * Purpose:
+ * Enable a thread for IPC access.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_thread_enable(thread_t thread)
+{
+ ipc_port_t kport;
+
+ ith_lock(thread);
+ kport = thread->ith_self;
+ if (kport != IP_NULL)
+ ipc_kobject_set(kport, (ipc_kobject_t) thread, IKOT_THREAD);
+ ith_unlock(thread);
+}
+
+/*
+ * Routine: ipc_thread_disable
+ * Purpose:
+ * Disable IPC access to a thread.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_thread_disable(thread_t thread)
+{
+ ipc_port_t kport;
+
+ ith_lock(thread);
+ kport = thread->ith_self;
+ if (kport != IP_NULL)
+ ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
+ ith_unlock(thread);
+}
+
+/*
+ * Routine: ipc_thread_terminate
+ * Purpose:
+ * Clean up and destroy a thread's IPC state.
+ * Conditions:
+ * Nothing locked. The thread must be suspended.
+ * (Or be the current thread.)
+ */
+
+void
+ipc_thread_terminate(thread_t thread)
+{
+ ipc_port_t kport;
+
+ ith_lock(thread);
+ kport = thread->ith_self;
+
+ if (kport == IP_NULL) {
+ /* the thread is already terminated (can this happen?) */
+ ith_unlock(thread);
+ return;
+ }
+
+ thread->ith_self = IP_NULL;
+ ith_unlock(thread);
+
+ assert(ipc_kmsg_queue_empty(&thread->ith_messages));
+
+ /* release the naked send rights */
+
+ if (IP_VALID(thread->ith_sself))
+ ipc_port_release_send(thread->ith_sself);
+ if (IP_VALID(thread->ith_exception))
+ ipc_port_release_send(thread->ith_exception);
+
+ /* destroy the kernel port */
+
+ ipc_port_dealloc_kernel(kport);
+}
+
+#if 0
+/*
+ * Routine: retrieve_task_self
+ * Purpose:
+ * Return a send right (possibly null/dead)
+ * for the task's user-visible self port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_task_self(task)
+ task_t task;
+{
+ ipc_port_t port;
+
+ assert(task != TASK_NULL);
+
+ itk_lock(task);
+ if (task->itk_self != IP_NULL)
+ port = ipc_port_copy_send(task->itk_sself);
+ else
+ port = IP_NULL;
+ itk_unlock(task);
+
+ return port;
+}
+
+/*
+ * Routine: retrieve_thread_self
+ * Purpose:
+ * Return a send right (possibly null/dead)
+ * for the thread's user-visible self port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_thread_self(thread)
+ thread_t thread;
+{
+ ipc_port_t port;
+
+ assert(thread != ITH_NULL);
+
+ ith_lock(thread);
+ if (thread->ith_self != IP_NULL)
+ port = ipc_port_copy_send(thread->ith_sself);
+ else
+ port = IP_NULL;
+ ith_unlock(thread);
+
+ return port;
+}
+#endif /* 0 */
+
+/*
+ * Routine: retrieve_task_self_fast
+ * Purpose:
+ * Optimized version of retrieve_task_self,
+ * that only works for the current task.
+ *
+ * Return a send right (possibly null/dead)
+ * for the task's user-visible self port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_task_self_fast(
+ task_t task)
+{
+ ipc_port_t port;
+
+ assert(task == current_task());
+
+ itk_lock(task);
+ assert(task->itk_self != IP_NULL);
+
+ if ((port = task->itk_sself) == task->itk_self) {
+ /* no interposing */
+
+ ip_lock(port);
+ assert(ip_active(port));
+ ip_reference(port);
+ port->ip_srights++;
+ ip_unlock(port);
+ } else
+ port = ipc_port_copy_send(port);
+ itk_unlock(task);
+
+ return port;
+}
+
+/*
+ * Routine: retrieve_thread_self_fast
+ * Purpose:
+ * Optimized version of retrieve_thread_self,
+ * that only works for the current thread.
+ *
+ * Return a send right (possibly null/dead)
+ * for the thread's user-visible self port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_thread_self_fast(thread_t thread)
+{
+ ipc_port_t port;
+
+ assert(thread == current_thread());
+
+ ith_lock(thread);
+ assert(thread->ith_self != IP_NULL);
+
+ if ((port = thread->ith_sself) == thread->ith_self) {
+ /* no interposing */
+
+ ip_lock(port);
+ assert(ip_active(port));
+ ip_reference(port);
+ port->ip_srights++;
+ ip_unlock(port);
+ } else
+ port = ipc_port_copy_send(port);
+ ith_unlock(thread);
+
+ return port;
+}
+
+#if 0
+/*
+ * Routine: retrieve_task_exception
+ * Purpose:
+ * Return a send right (possibly null/dead)
+ * for the task's exception port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_task_exception(task)
+ task_t task;
+{
+ ipc_port_t port;
+
+ assert(task != TASK_NULL);
+
+ itk_lock(task);
+ if (task->itk_self != IP_NULL)
+ port = ipc_port_copy_send(task->itk_exception);
+ else
+ port = IP_NULL;
+ itk_unlock(task);
+
+ return port;
+}
+
+/*
+ * Routine: retrieve_thread_exception
+ * Purpose:
+ * Return a send right (possibly null/dead)
+ * for the thread's exception port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_thread_exception(thread)
+ thread_t thread;
+{
+ ipc_port_t port;
+
+ assert(thread != ITH_NULL);
+
+ ith_lock(thread);
+ if (thread->ith_self != IP_NULL)
+ port = ipc_port_copy_send(thread->ith_exception);
+ else
+ port = IP_NULL;
+ ith_unlock(thread);
+
+ return port;
+}
+#endif /* 0 */
+
+/*
+ * Routine: mach_task_self [mach trap]
+ * Purpose:
+ * Give the caller send rights for his own task port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+mach_port_name_t
+mach_task_self(void)
+{
+ task_t task = current_task();
+ ipc_port_t sright;
+
+ sright = retrieve_task_self_fast(task);
+ return ipc_port_copyout_send(sright, task->itk_space);
+}
+
+/*
+ * Routine: mach_thread_self [mach trap]
+ * Purpose:
+ * Give the caller send rights for his own thread port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+mach_port_name_t
+mach_thread_self(void)
+{
+ thread_t thread = current_thread();
+ task_t task = thread->task;
+ ipc_port_t sright;
+
+ sright = retrieve_thread_self_fast(thread);
+ return ipc_port_copyout_send(sright, task->itk_space);
+}
+
+/*
+ * Routine: mach_reply_port [mach trap]
+ * Purpose:
+ * Allocate a port for the caller.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+mach_port_name_t
+mach_reply_port(void)
+{
+ ipc_port_t port;
+ mach_port_name_t name;
+ kern_return_t kr;
+
+ kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
+ if (kr == KERN_SUCCESS)
+ ip_unlock(port);
+ else
+ name = MACH_PORT_NULL;
+
+ return name;
+}
+
+/*
+ * Routine: task_get_special_port [kernel call]
+ * Purpose:
+ * Clones a send right for one of the task's
+ * special ports.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted a send right.
+ * KERN_INVALID_ARGUMENT The task is null.
+ * KERN_FAILURE The task/space is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+task_get_special_port(
+ task_t task,
+ int which,
+ ipc_port_t *portp)
+{
+ ipc_port_t *whichp;
+ ipc_port_t port;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+ case TASK_KERNEL_PORT:
+ whichp = &task->itk_sself;
+ break;
+
+ case TASK_EXCEPTION_PORT:
+ whichp = &task->itk_exception;
+ break;
+
+ case TASK_BOOTSTRAP_PORT:
+ whichp = &task->itk_bootstrap;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ itk_lock(task);
+ if (task->itk_self == IP_NULL) {
+ itk_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ port = ipc_port_copy_send(*whichp);
+ itk_unlock(task);
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: task_set_special_port [kernel call]
+ * Purpose:
+ * Changes one of the task's special ports,
+ * setting it to the supplied send right.
+ * Conditions:
+ * Nothing locked. If successful, consumes
+ * the supplied send right.
+ * Returns:
+ * KERN_SUCCESS Changed the special port.
+ * KERN_INVALID_ARGUMENT The task is null.
+ * KERN_FAILURE The task/space is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+task_set_special_port(
+ task_t task,
+ int which,
+ const ipc_port_t port)
+{
+ ipc_port_t *whichp;
+ ipc_port_t old;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+ case TASK_KERNEL_PORT:
+ whichp = &task->itk_sself;
+ break;
+
+ case TASK_EXCEPTION_PORT:
+ whichp = &task->itk_exception;
+ break;
+
+ case TASK_BOOTSTRAP_PORT:
+ whichp = &task->itk_bootstrap;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ itk_lock(task);
+ if (task->itk_self == IP_NULL) {
+ itk_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ old = *whichp;
+ *whichp = port;
+ itk_unlock(task);
+
+ if (IP_VALID(old))
+ ipc_port_release_send(old);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: thread_get_special_port [kernel call]
+ * Purpose:
+ * Clones a send right for one of the thread's
+ * special ports.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted a send right.
+ * KERN_INVALID_ARGUMENT The thread is null.
+ * KERN_FAILURE The thread is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+thread_get_special_port(
+ thread_t thread,
+ int which,
+ ipc_port_t *portp)
+{
+ ipc_port_t *whichp;
+ ipc_port_t port;
+
+ if (thread == ITH_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+ case THREAD_KERNEL_PORT:
+ whichp = &thread->ith_sself;
+ break;
+
+ case THREAD_EXCEPTION_PORT:
+ whichp = &thread->ith_exception;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ ith_lock(thread);
+ if (thread->ith_self == IP_NULL) {
+ ith_unlock(thread);
+ return KERN_FAILURE;
+ }
+
+ port = ipc_port_copy_send(*whichp);
+ ith_unlock(thread);
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: thread_set_special_port [kernel call]
+ * Purpose:
+ * Changes one of the thread's special ports,
+ * setting it to the supplied send right.
+ * Conditions:
+ * Nothing locked. If successful, consumes
+ * the supplied send right.
+ * Returns:
+ * KERN_SUCCESS Changed the special port.
+ * KERN_INVALID_ARGUMENT The thread is null.
+ * KERN_FAILURE The thread is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+thread_set_special_port(
+ thread_t thread,
+ int which,
+ ipc_port_t port)
+{
+ ipc_port_t *whichp;
+ ipc_port_t old;
+
+ if (thread == ITH_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+ case THREAD_KERNEL_PORT:
+ whichp = &thread->ith_sself;
+ break;
+
+ case THREAD_EXCEPTION_PORT:
+ whichp = &thread->ith_exception;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ ith_lock(thread);
+ if (thread->ith_self == IP_NULL) {
+ ith_unlock(thread);
+ return KERN_FAILURE;
+ }
+
+ old = *whichp;
+ *whichp = port;
+ ith_unlock(thread);
+
+ if (IP_VALID(old))
+ ipc_port_release_send(old);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_ports_register [kernel call]
+ * Purpose:
+ * Stash a handful of port send rights in the task.
+ * Child tasks will inherit these rights, but they
+ * must use mach_ports_lookup to acquire them.
+ *
+ * The rights are supplied in a (wired) kalloc'd segment.
+ * Rights which aren't supplied are assumed to be null.
+ * Conditions:
+ * Nothing locked. If successful, consumes
+ * the supplied rights and memory.
+ * Returns:
+ * KERN_SUCCESS Stashed the port rights.
+ * KERN_INVALID_ARGUMENT The task is null.
+ * KERN_INVALID_ARGUMENT The task is dead.
+ * KERN_INVALID_ARGUMENT Too many port rights supplied.
+ */
+
+kern_return_t
+mach_ports_register(
+ task_t task,
+ mach_port_array_t memory,
+ mach_msg_type_number_t portsCnt)
+{
+ ipc_port_t ports[TASK_PORT_REGISTER_MAX];
+ unsigned i;
+
+ if ((task == TASK_NULL) ||
+ (portsCnt > TASK_PORT_REGISTER_MAX))
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Pad the port rights with nulls.
+ */
+
+ for (i = 0; i < portsCnt; i++)
+ ports[i] = (ipc_port_t)memory[i];
+ for (; i < TASK_PORT_REGISTER_MAX; i++)
+ ports[i] = IP_NULL;
+
+ itk_lock(task);
+ if (task->itk_self == IP_NULL) {
+ itk_unlock(task);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Replace the old send rights with the new.
+ * Release the old rights after unlocking.
+ */
+
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
+ ipc_port_t old;
+
+ old = task->itk_registered[i];
+ task->itk_registered[i] = ports[i];
+ ports[i] = old;
+ }
+
+ itk_unlock(task);
+
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+ if (IP_VALID(ports[i]))
+ ipc_port_release_send(ports[i]);
+
+ /*
+ * Now that the operation is known to be successful,
+ * we can free the memory.
+ */
+
+ if (portsCnt != 0)
+ kfree((vm_offset_t) memory,
+ (vm_size_t) (portsCnt * sizeof(mach_port_t)));
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_ports_lookup [kernel call]
+ * Purpose:
+ * Retrieves (clones) the stashed port send rights.
+ * Conditions:
+ * Nothing locked. If successful, the caller gets
+ * rights and memory.
+ * Returns:
+ * KERN_SUCCESS Retrieved the send rights.
+ * KERN_INVALID_ARGUMENT The task is null.
+ * KERN_INVALID_ARGUMENT The task is dead.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_ports_lookup(
+ task_t task,
+ mach_port_t **portsp,
+ mach_msg_type_number_t *portsCnt)
+{
+ vm_offset_t memory;
+ vm_size_t size;
+ ipc_port_t *ports;
+ int i;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
+
+ memory = kalloc(size);
+ if (memory == 0)
+ return KERN_RESOURCE_SHORTAGE;
+
+ itk_lock(task);
+ if (task->itk_self == IP_NULL) {
+ itk_unlock(task);
+
+ kfree(memory, size);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ ports = (ipc_port_t *) memory;
+
+ /*
+ * Clone port rights. Because kalloc'd memory
+ * is wired, we won't fault while holding the task lock.
+ */
+
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+ ports[i] = ipc_port_copy_send(task->itk_registered[i]);
+
+ itk_unlock(task);
+
+ *portsp = (mach_port_t *)ports;
+ *portsCnt = TASK_PORT_REGISTER_MAX;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: convert_port_to_task
+ * Purpose:
+ * Convert from a port to a task.
+ * Doesn't consume the port ref; produces a task ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+task_t
+convert_port_to_task(
+ ipc_port_t port)
+{
+ task_t task = TASK_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ task = (task_t) port->ip_kobject;
+ task_reference(task);
+ }
+ ip_unlock(port);
+ }
+
+ return task;
+}
+
+/*
+ * Routine: convert_port_to_space
+ * Purpose:
+ * Convert from a port to a space.
+ * Doesn't consume the port ref; produces a space ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_space_t
+convert_port_to_space(
+ ipc_port_t port)
+{
+ ipc_space_t space = IS_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ space = ((task_t) port->ip_kobject)->itk_space;
+ is_reference(space);
+ }
+ ip_unlock(port);
+ }
+
+ return space;
+}
+
+/*
+ * Routine: convert_port_to_map
+ * Purpose:
+ * Convert from a port to a map.
+ * Doesn't consume the port ref; produces a map ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+vm_map_t
+convert_port_to_map(ipc_port_t port)
+{
+ vm_map_t map = VM_MAP_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ map = ((task_t) port->ip_kobject)->map;
+ vm_map_reference(map);
+ }
+ ip_unlock(port);
+ }
+
+ return map;
+}
+
+/*
+ * Routine: convert_port_to_thread
+ * Purpose:
+ * Convert from a port to a thread.
+ * Doesn't consume the port ref; produces a thread ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+thread_t
+convert_port_to_thread(ipc_port_t port)
+{
+ thread_t thread = THREAD_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_THREAD)) {
+ thread = (thread_t) port->ip_kobject;
+ thread_reference(thread);
+ }
+ ip_unlock(port);
+ }
+
+ return thread;
+}
+
+/*
+ * Routine: convert_task_to_port
+ * Purpose:
+ * Convert from a task to a port.
+ * Consumes a task ref; produces a naked send right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_task_to_port(task_t task)
+{
+ ipc_port_t port;
+
+ itk_lock(task);
+ if (task->itk_self != IP_NULL)
+ port = ipc_port_make_send(task->itk_self);
+ else
+ port = IP_NULL;
+ itk_unlock(task);
+
+ task_deallocate(task);
+ return port;
+}
+
+/*
+ * Routine: convert_thread_to_port
+ * Purpose:
+ * Convert from a thread to a port.
+ * Consumes a thread ref; produces a naked send right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_thread_to_port(thread_t thread)
+{
+ ipc_port_t port;
+
+ ith_lock(thread);
+ if (thread->ith_self != IP_NULL)
+ port = ipc_port_make_send(thread->ith_self);
+ else
+ port = IP_NULL;
+ ith_unlock(thread);
+
+ thread_deallocate(thread);
+ return port;
+}
+
+/*
+ * Routine: space_deallocate
+ * Purpose:
+ * Deallocate a space ref produced by convert_port_to_space.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+space_deallocate(ipc_space_t space)
+{
+ if (space != IS_NULL)
+ is_release(space);
+}
diff --git a/kern/ipc_tt.h b/kern/ipc_tt.h
new file mode 100644
index 0000000..5c66738
--- /dev/null
+++ b/kern/ipc_tt.h
@@ -0,0 +1,92 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_IPC_TT_H_
+#define _KERN_IPC_TT_H_
+
+#include <mach/boolean.h>
+#include <mach/mach_types.h>
+#include <mach/port.h>
+
+extern void ipc_task_init(task_t, task_t);
+extern void ipc_task_enable(task_t);
+extern void ipc_task_disable(task_t);
+extern void ipc_task_terminate(task_t);
+
+extern void ipc_thread_init(thread_t);
+extern void ipc_thread_enable(thread_t);
+extern void ipc_thread_disable(thread_t);
+extern void ipc_thread_terminate(thread_t);
+
+extern struct ipc_port *
+retrieve_task_self(task_t);
+
+extern struct ipc_port *
+retrieve_task_self_fast(task_t);
+
+extern struct ipc_port *
+retrieve_thread_self(thread_t);
+
+extern struct ipc_port *
+retrieve_thread_self_fast(thread_t);
+
+extern struct ipc_port *
+retrieve_task_exception(task_t);
+
+extern struct ipc_port *
+retrieve_thread_exception(thread_t);
+
+extern struct task *
+convert_port_to_task(struct ipc_port *);
+
+extern struct ipc_port *
+convert_task_to_port(task_t);
+
+extern void
+task_deallocate(task_t);
+
+extern struct thread *
+convert_port_to_thread(struct ipc_port *);
+
+extern struct ipc_port *
+convert_thread_to_port(thread_t);
+
+extern void
+thread_deallocate(thread_t);
+
+extern struct vm_map *
+convert_port_to_map(struct ipc_port *);
+
+extern struct ipc_space *
+convert_port_to_space(struct ipc_port *);
+
+extern void
+space_deallocate(ipc_space_t);
+
+mach_port_name_t
+mach_reply_port (void);
+
+#endif /* _KERN_IPC_TT_H_ */
diff --git a/kern/kalloc.h b/kern/kalloc.h
new file mode 100644
index 0000000..004e3a6
--- /dev/null
+++ b/kern/kalloc.h
@@ -0,0 +1,38 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_KALLOC_H_
+#define _KERN_KALLOC_H_
+
+#include <mach/machine/vm_types.h>
+#include <vm/vm_types.h>
+
+extern vm_offset_t kalloc (vm_size_t size);
+extern void kfree (vm_offset_t data, vm_size_t size);
+
+extern void kalloc_init (void);
+
+#endif /* _KERN_KALLOC_H_ */
diff --git a/kern/kern_types.h b/kern/kern_types.h
new file mode 100644
index 0000000..f715cb1
--- /dev/null
+++ b/kern/kern_types.h
@@ -0,0 +1,70 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_KERN_TYPES_H_
+#define _KERN_KERN_TYPES_H_
+
+#include <mach/port.h> /* for mach_port_t */
+
+/*
+ * Common kernel type declarations.
+ * These are handles to opaque data structures defined elsewhere.
+ *
+ * These types are recursively included in each other`s definitions.
+ * This file exists to export the common declarations to each
+ * of the definitions, and to other files that need only the
+ * type declarations.
+ */
+
+/*
+ * Task structure, from kern/task.h
+ */
+typedef struct task * task_t;
+#define TASK_NULL ((task_t) 0)
+
+typedef mach_port_t * task_array_t; /* should be task_t * */
+
+/*
+ * Thread structure, from kern/thread.h
+ */
+typedef struct thread * thread_t;
+#define THREAD_NULL ((thread_t) 0)
+
+typedef mach_port_t * thread_array_t; /* should be thread_t * */
+
+/*
+ * Processor structure, from kern/processor.h
+ */
+typedef struct processor * processor_t;
+#define PROCESSOR_NULL ((processor_t) 0)
+
+/*
+ * Processor set structure, from kern/processor.h
+ */
+typedef struct processor_set * processor_set_t;
+#define PROCESSOR_SET_NULL ((processor_set_t) 0)
+
+#endif /* _KERN_KERN_TYPES_H_ */
diff --git a/kern/kmutex.c b/kern/kmutex.c
new file mode 100644
index 0000000..5926d1d
--- /dev/null
+++ b/kern/kmutex.c
@@ -0,0 +1,76 @@
+/* Copyright (C) 2017 Free Software Foundation, Inc.
+ Contributed by Agustina Arzille <avarzille@riseup.net>, 2017.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either
+ version 2 of the license, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <kern/kmutex.h>
+#include <kern/atomic.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+
+void kmutex_init (struct kmutex *mtxp)
+{
+ mtxp->state = KMUTEX_AVAIL;
+ simple_lock_init (&mtxp->lock);
+}
+
+kern_return_t kmutex_lock (struct kmutex *mtxp, boolean_t interruptible)
+{
+ check_simple_locks ();
+
+ if (atomic_cas_acq (&mtxp->state, KMUTEX_AVAIL, KMUTEX_LOCKED))
+ /* Unowned mutex - We're done. */
+ return (KERN_SUCCESS);
+
+ /* The mutex is locked. We may have to sleep. */
+ simple_lock (&mtxp->lock);
+ if (atomic_swap_acq (&mtxp->state, KMUTEX_CONTENDED) == KMUTEX_AVAIL)
+ {
+ /* The mutex was released in-between. */
+ simple_unlock (&mtxp->lock);
+ return (KERN_SUCCESS);
+ }
+
+ /* Sleep and check the result value of the waiting, in order to
+ * inform our caller if we were interrupted or not. Note that
+ * we don't need to set again the mutex state. The owner will
+ * handle that in every case. */
+ thread_sleep ((event_t)mtxp, (simple_lock_t)&mtxp->lock, interruptible);
+ return (current_thread()->wait_result == THREAD_AWAKENED ?
+ KERN_SUCCESS : KERN_INTERRUPTED);
+}
+
+kern_return_t kmutex_trylock (struct kmutex *mtxp)
+{
+ return (atomic_cas_acq (&mtxp->state, KMUTEX_AVAIL, KMUTEX_LOCKED) ?
+ KERN_SUCCESS : KERN_FAILURE);
+}
+
+void kmutex_unlock (struct kmutex *mtxp)
+{
+ if (atomic_cas_rel (&mtxp->state, KMUTEX_LOCKED, KMUTEX_AVAIL))
+ /* No waiters - We're done. */
+ return;
+
+ simple_lock (&mtxp->lock);
+
+ if (!thread_wakeup_one ((event_t)mtxp))
+ /* Any threads that were waiting on this mutex were
+ * interrupted and left - Reset the mutex state. */
+ mtxp->state = KMUTEX_AVAIL;
+
+ simple_unlock (&mtxp->lock);
+}
diff --git a/kern/kmutex.h b/kern/kmutex.h
new file mode 100644
index 0000000..2981515
--- /dev/null
+++ b/kern/kmutex.h
@@ -0,0 +1,52 @@
+/* Copyright (C) 2017 Free Software Foundation, Inc.
+ Contributed by Agustina Arzille <avarzille@riseup.net>, 2017.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either
+ version 2 of the license, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _KERN_KMUTEX_H_
+#define _KERN_KMUTEX_H_ 1
+
+#include <kern/lock.h>
+#include <mach/kern_return.h>
+
+struct kmutex
+{
+ unsigned int state;
+ decl_simple_lock_data (, lock)
+};
+
+/* Possible values for the mutex state. */
+#define KMUTEX_AVAIL 0
+#define KMUTEX_LOCKED 1
+#define KMUTEX_CONTENDED 2
+
+/* Initialize mutex in *MTXP. */
+extern void kmutex_init (struct kmutex *mtxp);
+
+/* Acquire lock MTXP. If INTERRUPTIBLE is true, the sleep may be
+ * prematurely terminated, in which case the function returns
+ * KERN_INTERRUPTED. Otherwise, KERN_SUCCESS is returned. */
+extern kern_return_t kmutex_lock (struct kmutex *mtxp,
+ boolean_t interruptible);
+
+/* Try to acquire the lock MTXP without sleeping.
+ * Returns KERN_SUCCESS if successful, KERN_FAILURE otherwise. */
+extern kern_return_t kmutex_trylock (struct kmutex *mtxp);
+
+/* Unlock the mutex MTXP. */
+extern void kmutex_unlock (struct kmutex *mtxp);
+
+#endif
diff --git a/kern/list.h b/kern/list.h
new file mode 100644
index 0000000..be92762
--- /dev/null
+++ b/kern/list.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2009, 2010 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Simple doubly-linked list.
+ */
+
+#ifndef _KERN_LIST_H
+#define _KERN_LIST_H
+
+#include <stddef.h>
+#include <sys/types.h>
+#include <kern/macros.h>
+
+/*
+ * Structure used as both head and node.
+ *
+ * This implementation relies on using the same type for both heads and nodes.
+ *
+ * It is recommended to encode the use of struct list variables in their names,
+ * e.g. struct list free_list or struct list free_objects is a good hint for a
+ * list of free objects. A declaration like struct list free_node clearly
+ * indicates it is used as part of a node in the free list.
+ */
+struct list {
+ struct list *prev;
+ struct list *next;
+};
+
+/*
+ * Static list initializer.
+ */
+#define LIST_INITIALIZER(list) { &(list), &(list) }
+
+/*
+ * Initialize a list.
+ */
+static inline void list_init(struct list *list)
+{
+ list->prev = list;
+ list->next = list;
+}
+
+/*
+ * Initialize a list node.
+ *
+ * An entry is in no list when its node members point to NULL.
+ */
+static inline void list_node_init(struct list *node)
+{
+ node->prev = NULL;
+ node->next = NULL;
+}
+
+/*
+ * Return true if node is in no list.
+ */
+static inline int list_node_unlinked(const struct list *node)
+{
+ return node->prev == NULL;
+}
+
+/*
+ * Macro that evaluates to the address of the structure containing the
+ * given node based on the given type and member.
+ */
+#define list_entry(node, type, member) structof(node, type, member)
+
+/*
+ * Return the first node of a list.
+ */
+static inline struct list * list_first(const struct list *list)
+{
+ return list->next;
+}
+
+/*
+ * Return the last node of a list.
+ */
+static inline struct list * list_last(const struct list *list)
+{
+ return list->prev;
+}
+
+/*
+ * Return the node next to the given node.
+ */
+static inline struct list * list_next(const struct list *node)
+{
+ return node->next;
+}
+
+/*
+ * Return the node previous to the given node.
+ */
+static inline struct list * list_prev(const struct list *node)
+{
+ return node->prev;
+}
+
+/*
+ * Get the first entry of a list.
+ */
+#define list_first_entry(list, type, member) \
+ list_entry(list_first(list), type, member)
+
+/*
+ * Get the last entry of a list.
+ */
+#define list_last_entry(list, type, member) \
+ list_entry(list_last(list), type, member)
+
+/*
+ * Return true if node is after the last or before the first node of the list.
+ */
+static inline int list_end(const struct list *list, const struct list *node)
+{
+ return list == node;
+}
+
+/*
+ * Return true if list is empty.
+ */
+static inline int list_empty(const struct list *list)
+{
+ return list == list->next;
+}
+
+/*
+ * Return true if list contains exactly one node.
+ */
+static inline int list_singular(const struct list *list)
+{
+ return (list != list->next) && (list->next == list->prev);
+}
+
+/*
+ * Split list2 by moving its nodes up to (but not including) the given
+ * node into list1 (which can be in a stale state).
+ *
+ * If list2 is empty, or node is list2 or list2->next, nothing is done.
+ */
+static inline void list_split(struct list *list1, struct list *list2,
+ struct list *node)
+{
+ if (list_empty(list2) || (list2->next == node) || list_end(list2, node))
+ return;
+
+ list1->next = list2->next;
+ list1->next->prev = list1;
+
+ list1->prev = node->prev;
+ node->prev->next = list1;
+
+ list2->next = node;
+ node->prev = list2;
+}
+
+/*
+ * Append the nodes of list2 at the end of list1.
+ *
+ * After completion, list2 is stale.
+ */
+static inline void list_concat(struct list *list1, const struct list *list2)
+{
+ struct list *last1, *first2, *last2;
+
+ if (list_empty(list2))
+ return;
+
+ last1 = list1->prev;
+ first2 = list2->next;
+ last2 = list2->prev;
+
+ last1->next = first2;
+ first2->prev = last1;
+
+ last2->next = list1;
+ list1->prev = last2;
+}
+
+/*
+ * Set the new head of a list.
+ *
+ * This function is an optimized version of :
+ * list_init(&new_list);
+ * list_concat(&new_list, &old_list);
+ *
+ * After completion, old_head is stale.
+ */
+static inline void list_set_head(struct list *new_head,
+ const struct list *old_head)
+{
+ if (list_empty(old_head)) {
+ list_init(new_head);
+ return;
+ }
+
+ *new_head = *old_head;
+ new_head->next->prev = new_head;
+ new_head->prev->next = new_head;
+}
+
+/*
+ * Add a node between two nodes.
+ */
+static inline void list_add(struct list *prev, struct list *next,
+ struct list *node)
+{
+ next->prev = node;
+ node->next = next;
+
+ prev->next = node;
+ node->prev = prev;
+}
+
+/*
+ * Insert a node at the head of a list.
+ */
+static inline void list_insert_head(struct list *list, struct list *node)
+{
+ list_add(list, list->next, node);
+}
+
+/*
+ * Insert a node at the tail of a list.
+ */
+static inline void list_insert_tail(struct list *list, struct list *node)
+{
+ list_add(list->prev, list, node);
+}
+
+/*
+ * Insert a node before another node.
+ */
+static inline void list_insert_before(struct list *next, struct list *node)
+{
+ list_add(next->prev, next, node);
+}
+
+/*
+ * Insert a node after another node.
+ */
+static inline void list_insert_after(struct list *prev, struct list *node)
+{
+ list_add(prev, prev->next, node);
+}
+
+/*
+ * Remove a node from a list.
+ *
+ * After completion, the node is stale.
+ */
+static inline void list_remove(struct list *node)
+{
+ node->prev->next = node->next;
+ node->next->prev = node->prev;
+}
+
+/*
+ * Forge a loop to process all nodes of a list.
+ *
+ * The node must not be altered during the loop.
+ */
+#define list_for_each(list, node) \
+for (node = list_first(list); \
+ !list_end(list, node); \
+ node = list_next(node))
+
+/*
+ * Forge a loop to process all nodes of a list.
+ */
+#define list_for_each_safe(list, node, tmp) \
+for (node = list_first(list), tmp = list_next(node); \
+ !list_end(list, node); \
+ node = tmp, tmp = list_next(node))
+
+/*
+ * Version of list_for_each() that processes nodes backward.
+ */
+#define list_for_each_reverse(list, node) \
+for (node = list_last(list); \
+ !list_end(list, node); \
+ node = list_prev(node))
+
+/*
+ * Version of list_for_each_safe() that processes nodes backward.
+ */
+#define list_for_each_reverse_safe(list, node, tmp) \
+for (node = list_last(list), tmp = list_prev(node); \
+ !list_end(list, node); \
+ node = tmp, tmp = list_prev(node))
+
+/*
+ * Forge a loop to process all entries of a list.
+ *
+ * The entry node must not be altered during the loop.
+ */
+#define list_for_each_entry(list, entry, member) \
+for (entry = list_entry(list_first(list), typeof(*entry), member); \
+ !list_end(list, &entry->member); \
+ entry = list_entry(list_next(&entry->member), typeof(*entry), \
+ member))
+
+/*
+ * Forge a loop to process all entries of a list.
+ */
+#define list_for_each_entry_safe(list, entry, tmp, member) \
+for (entry = list_entry(list_first(list), typeof(*entry), member), \
+ tmp = list_entry(list_next(&entry->member), typeof(*entry), \
+ member); \
+ !list_end(list, &entry->member); \
+ entry = tmp, tmp = list_entry(list_next(&entry->member), \
+ typeof(*entry), member))
+
+/*
+ * Version of list_for_each_entry() that processes entries backward.
+ */
+#define list_for_each_entry_reverse(list, entry, member) \
+for (entry = list_entry(list_last(list), typeof(*entry), member); \
+ !list_end(list, &entry->member); \
+ entry = list_entry(list_prev(&entry->member), typeof(*entry), \
+ member))
+
+/*
+ * Version of list_for_each_entry_safe() that processes entries backward.
+ */
+#define list_for_each_entry_reverse_safe(list, entry, tmp, member) \
+for (entry = list_entry(list_last(list), typeof(*entry), member), \
+ tmp = list_entry(list_prev(&entry->member), typeof(*entry), \
+ member); \
+ !list_end(list, &entry->member); \
+ entry = tmp, tmp = list_entry(list_prev(&entry->member), \
+ typeof(*entry), member))
+
+#endif /* _KERN_LIST_H */
diff --git a/kern/lock.c b/kern/lock.c
new file mode 100644
index 0000000..36b6d20
--- /dev/null
+++ b/kern/lock.c
@@ -0,0 +1,689 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/lock.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Locking primitives implementation
+ */
+
+#include <string.h>
+
+#include <machine/smp.h>
+
+#include <kern/debug.h>
+#include <kern/lock.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#if MACH_KDB
+#include <machine/db_machdep.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+#endif
+
+
+#if NCPUS > 1
+
+/*
+ * Module: lock
+ * Function:
+ * Provide reader/writer sychronization.
+ * Implementation:
+ * Simple interlock on a bit. Readers first interlock,
+ * increment the reader count, then let go. Writers hold
+ * the interlock (thus preventing further readers), and
+ * wait for already-accepted readers to go away.
+ */
+
+/*
+ * The simple-lock routines are the primitives out of which
+ * the lock package is built. The implementation is left
+ * to the machine-dependent code.
+ */
+
+#ifdef notdef
+/*
+ * A sample implementation of simple locks.
+ * assumes:
+ * boolean_t test_and_set(boolean_t *)
+ * indivisibly sets the boolean to TRUE
+ * and returns its old value
+ * and that setting a boolean to FALSE is indivisible.
+ */
+/*
+ * simple_lock_init initializes a simple lock. A simple lock
+ * may only be used for exclusive locks.
+ */
+
+void simple_lock_init(simple_lock_t l)
+{
+ *(boolean_t *)l = FALSE;
+}
+
+void simple_lock(simple_lock_t l)
+{
+ while (test_and_set((boolean_t *)l))
+ cpu_pause();
+}
+
+void simple_unlock(simple_lock_t l)
+{
+ *(boolean_t *)l = FALSE;
+}
+
+boolean_t simple_lock_try(simple_lock_t l)
+{
+ return (!test_and_set((boolean_t *)l));
+}
+#endif /* notdef */
+#endif /* NCPUS > 1 */
+
+#if NCPUS > 1
+static int lock_wait_time = 100;
+#else /* NCPUS > 1 */
+
+ /*
+ * It is silly to spin on a uni-processor as if we
+ * thought something magical would happen to the
+ * want_write bit while we are executing.
+ */
+static int lock_wait_time = 0;
+#endif /* NCPUS > 1 */
+
+#if MACH_SLOCKS && NCPUS == 1
+/*
+ * This code does not protect simple_locks_taken and simple_locks_info.
+ * It works despite the fact that interrupt code does use simple locks.
+ * This is because interrupts use locks in a stack-like manner.
+ * Each interrupt releases all the locks it acquires, so the data
+ * structures end up in the same state after the interrupt as before.
+ * The only precaution necessary is that simple_locks_taken be
+ * incremented first and decremented last, so that interrupt handlers
+ * don't over-write active slots in simple_locks_info.
+ */
+
+unsigned int simple_locks_taken = 0;
+
+#define NSLINFO 1000 /* maximum number of locks held */
+
+struct simple_locks_info {
+ simple_lock_t l;
+ const char *expr;
+ const char *loc;
+} simple_locks_info[NSLINFO];
+
+int do_check_simple_locks = 1;
+
+void check_simple_locks(void)
+{
+ assert(! do_check_simple_locks || simple_locks_taken == 0);
+}
+
+void check_simple_locks_enable(void)
+{
+ do_check_simple_locks = 1;
+}
+
+void check_simple_locks_disable(void)
+{
+ do_check_simple_locks = 0;
+}
+
+/* Need simple lock sanity checking code if simple locks are being
+ compiled in, and we are compiling for a uniprocessor. */
+
+void simple_lock_init(
+ simple_lock_t l)
+{
+ l->lock_data = 0;
+}
+
+void _simple_lock(
+ simple_lock_t l,
+ const char *expression,
+ const char *location)
+{
+ struct simple_locks_info *info;
+
+ assert(l->lock_data == 0);
+
+ l->lock_data = 1;
+
+ info = &simple_locks_info[simple_locks_taken++];
+ barrier();
+ info->l = l;
+ info->expr = expression;
+ info->loc = location;
+}
+
+boolean_t _simple_lock_try(
+ simple_lock_t l,
+ const char *expression,
+ const char *location)
+{
+ struct simple_locks_info *info;
+
+ if (l->lock_data != 0)
+ return FALSE;
+
+ l->lock_data = 1;
+
+ info = &simple_locks_info[simple_locks_taken++];
+ barrier();
+ info->l = l;
+ info->expr = expression;
+ info->loc = location;
+
+ return TRUE;
+}
+
+void _simple_unlock(
+ simple_lock_t l)
+{
+ assert(l->lock_data != 0);
+
+ l->lock_data = 0;
+
+ if (simple_locks_info[simple_locks_taken-1].l != l) {
+ unsigned int i = simple_locks_taken;
+
+ /* out-of-order unlocking */
+
+ do
+ if (i == 0)
+ panic("simple_unlock");
+ while (simple_locks_info[--i].l != l);
+
+ simple_locks_info[i] = simple_locks_info[simple_locks_taken-1];
+ }
+ barrier();
+ simple_locks_taken--;
+ simple_locks_info[simple_locks_taken] = (struct simple_locks_info) {0};
+}
+
+#endif /* MACH_SLOCKS && NCPUS == 1 */
+
+/*
+ * Routine: lock_init
+ * Function:
+ * Initialize a lock; required before use.
+ * Note that clients declare the "struct lock"
+ * variables and then initialize them, rather
+ * than getting a new one from this module.
+ */
+void lock_init(
+ lock_t l,
+ boolean_t can_sleep)
+{
+ memset(l, 0, sizeof(lock_data_t));
+ simple_lock_init(&l->interlock);
+ l->want_write = FALSE;
+ l->want_upgrade = FALSE;
+ l->read_count = 0;
+ l->can_sleep = can_sleep;
+ l->thread = (struct thread *)-1; /* XXX */
+ l->recursion_depth = 0;
+}
+
+void lock_sleepable(
+ lock_t l,
+ boolean_t can_sleep)
+{
+ simple_lock(&l->interlock);
+ l->can_sleep = can_sleep;
+ simple_unlock(&l->interlock);
+}
+
+
+/*
+ * Sleep locks. These use the same data structure and algorithm
+ * as the spin locks, but the process sleeps while it is waiting
+ * for the lock. These work on uniprocessor systems.
+ */
+
+void lock_write(
+ lock_t l)
+{
+ int i;
+
+ check_simple_locks();
+ simple_lock(&l->interlock);
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock.
+ */
+ l->recursion_depth++;
+ simple_unlock(&l->interlock);
+ return;
+ }
+
+ /*
+ * Try to acquire the want_write bit.
+ */
+ while (l->want_write) {
+ if ((i = lock_wait_time) > 0) {
+ simple_unlock(&l->interlock);
+ while (--i > 0 && l->want_write)
+ cpu_pause();
+ simple_lock(&l->interlock);
+ }
+
+ if (l->can_sleep && l->want_write) {
+ l->waiting = TRUE;
+ thread_sleep(l,
+ simple_lock_addr(l->interlock), FALSE);
+ simple_lock(&l->interlock);
+ }
+ }
+ l->want_write = TRUE;
+
+ /* Wait for readers (and upgrades) to finish */
+
+ while ((l->read_count != 0) || l->want_upgrade) {
+ if ((i = lock_wait_time) > 0) {
+ simple_unlock(&l->interlock);
+ while (--i > 0 && (l->read_count != 0 ||
+ l->want_upgrade))
+ cpu_pause();
+ simple_lock(&l->interlock);
+ }
+
+ if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
+ l->waiting = TRUE;
+ thread_sleep(l,
+ simple_lock_addr(l->interlock), FALSE);
+ simple_lock(&l->interlock);
+ }
+ }
+#if MACH_LDEBUG
+ l->writer = current_thread();
+#endif /* MACH_LDEBUG */
+ simple_unlock(&l->interlock);
+}
+
+void lock_done(
+ lock_t l)
+{
+ simple_lock(&l->interlock);
+
+ if (l->read_count != 0)
+ l->read_count--;
+ else
+ if (l->recursion_depth != 0)
+ l->recursion_depth--;
+ else
+ if (l->want_upgrade) {
+ l->want_upgrade = FALSE;
+#if MACH_LDEBUG
+ assert(l->writer == current_thread());
+ l->writer = THREAD_NULL;
+#endif /* MACH_LDEBUG */
+ } else {
+ l->want_write = FALSE;
+#if MACH_LDEBUG
+ assert(l->writer == current_thread());
+ l->writer = THREAD_NULL;
+#endif /* MACH_LDEBUG */
+ }
+
+ /*
+ * There is no reason to wakeup a waiting thread
+ * if the read-count is non-zero. Consider:
+ * we must be dropping a read lock
+ * threads are waiting only if one wants a write lock
+ * if there are still readers, they can't proceed
+ */
+
+ if (l->waiting && (l->read_count == 0)) {
+ l->waiting = FALSE;
+ thread_wakeup(l);
+ }
+
+ simple_unlock(&l->interlock);
+}
+
+void lock_read(
+ lock_t l)
+{
+ int i;
+
+ check_simple_locks();
+ simple_lock(&l->interlock);
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock.
+ */
+ l->read_count++;
+ simple_unlock(&l->interlock);
+ return;
+ }
+
+ while (l->want_write || l->want_upgrade) {
+ if ((i = lock_wait_time) > 0) {
+ simple_unlock(&l->interlock);
+ while (--i > 0 && (l->want_write || l->want_upgrade))
+ cpu_pause();
+ simple_lock(&l->interlock);
+ }
+
+ if (l->can_sleep && (l->want_write || l->want_upgrade)) {
+ l->waiting = TRUE;
+ thread_sleep(l,
+ simple_lock_addr(l->interlock), FALSE);
+ simple_lock(&l->interlock);
+ }
+ }
+
+ l->read_count++;
+ simple_unlock(&l->interlock);
+}
+
+/*
+ * Routine: lock_read_to_write
+ * Function:
+ * Improves a read-only lock to one with
+ * write permission. If another reader has
+ * already requested an upgrade to a write lock,
+ * no lock is held upon return.
+ *
+ * Returns TRUE if the upgrade *failed*.
+ */
+boolean_t lock_read_to_write(
+ lock_t l)
+{
+ int i;
+
+ check_simple_locks();
+ simple_lock(&l->interlock);
+
+ l->read_count--;
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock.
+ */
+ l->recursion_depth++;
+ simple_unlock(&l->interlock);
+ return(FALSE);
+ }
+
+ if (l->want_upgrade) {
+ /*
+ * Someone else has requested upgrade.
+ * Since we've released a read lock, wake
+ * him up.
+ */
+ if (l->waiting && (l->read_count == 0)) {
+ l->waiting = FALSE;
+ thread_wakeup(l);
+ }
+
+ simple_unlock(&l->interlock);
+ return TRUE;
+ }
+
+ l->want_upgrade = TRUE;
+
+ while (l->read_count != 0) {
+ if ((i = lock_wait_time) > 0) {
+ simple_unlock(&l->interlock);
+ while (--i > 0 && l->read_count != 0)
+ cpu_pause();
+ simple_lock(&l->interlock);
+ }
+
+ if (l->can_sleep && l->read_count != 0) {
+ l->waiting = TRUE;
+ thread_sleep(l,
+ simple_lock_addr(l->interlock), FALSE);
+ simple_lock(&l->interlock);
+ }
+ }
+
+#if MACH_LDEBUG
+ l->writer = current_thread();
+#endif /* MACH_LDEBUG */
+ simple_unlock(&l->interlock);
+ return FALSE;
+}
+
+void lock_write_to_read(
+ lock_t l)
+{
+ simple_lock(&l->interlock);
+#if MACH_LDEBUG
+ assert(l->writer == current_thread());
+#endif /* MACH_LDEBUG */
+
+ l->read_count++;
+ if (l->recursion_depth != 0)
+ l->recursion_depth--;
+ else
+ if (l->want_upgrade)
+ l->want_upgrade = FALSE;
+ else
+ l->want_write = FALSE;
+
+ if (l->waiting) {
+ l->waiting = FALSE;
+ thread_wakeup(l);
+ }
+
+#if MACH_LDEBUG
+ assert(l->writer == current_thread());
+ l->writer = THREAD_NULL;
+#endif /* MACH_LDEBUG */
+ simple_unlock(&l->interlock);
+}
+
+
+/*
+ * Routine: lock_try_write
+ * Function:
+ * Tries to get a write lock.
+ *
+ * Returns FALSE if the lock is not held on return.
+ */
+
+boolean_t lock_try_write(
+ lock_t l)
+{
+ simple_lock(&l->interlock);
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock
+ */
+ l->recursion_depth++;
+ simple_unlock(&l->interlock);
+ return TRUE;
+ }
+
+ if (l->want_write || l->want_upgrade || l->read_count) {
+ /*
+ * Can't get lock.
+ */
+ simple_unlock(&l->interlock);
+ return FALSE;
+ }
+
+ /*
+ * Have lock.
+ */
+
+ l->want_write = TRUE;
+#if MACH_LDEBUG
+ l->writer = current_thread();
+#endif /* MACH_LDEBUG */
+ simple_unlock(&l->interlock);
+ return TRUE;
+}
+
+/*
+ * Routine: lock_try_read
+ * Function:
+ * Tries to get a read lock.
+ *
+ * Returns FALSE if the lock is not held on return.
+ */
+
+boolean_t lock_try_read(
+ lock_t l)
+{
+ simple_lock(&l->interlock);
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock
+ */
+ l->read_count++;
+ simple_unlock(&l->interlock);
+ return TRUE;
+ }
+
+ if (l->want_write || l->want_upgrade) {
+ simple_unlock(&l->interlock);
+ return FALSE;
+ }
+
+ l->read_count++;
+ simple_unlock(&l->interlock);
+ return TRUE;
+}
+
+/*
+ * Routine: lock_try_read_to_write
+ * Function:
+ * Improves a read-only lock to one with
+ * write permission. If another reader has
+ * already requested an upgrade to a write lock,
+ * the read lock is still held upon return.
+ *
+ * Returns FALSE if the upgrade *failed*.
+ */
+boolean_t lock_try_read_to_write(
+ lock_t l)
+{
+ check_simple_locks();
+ simple_lock(&l->interlock);
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock
+ */
+ l->read_count--;
+ l->recursion_depth++;
+ simple_unlock(&l->interlock);
+ return TRUE;
+ }
+
+ if (l->want_upgrade) {
+ simple_unlock(&l->interlock);
+ return FALSE;
+ }
+ l->want_upgrade = TRUE;
+ l->read_count--;
+
+ while (l->read_count != 0) {
+ l->waiting = TRUE;
+ thread_sleep(l,
+ simple_lock_addr(l->interlock), FALSE);
+ simple_lock(&l->interlock);
+ }
+
+#if MACH_LDEBUG
+ l->writer = current_thread();
+#endif /* MACH_LDEBUG */
+ simple_unlock(&l->interlock);
+ return TRUE;
+}
+
+/*
+ * Allow a process that has a lock for write to acquire it
+ * recursively (for read, write, or update).
+ */
+void lock_set_recursive(
+ lock_t l)
+{
+ simple_lock(&l->interlock);
+#if MACH_LDEBUG
+ assert(l->writer == current_thread());
+#endif /* MACH_LDEBUG */
+
+ if (!l->want_write) {
+ panic("lock_set_recursive: don't have write lock");
+ }
+ l->thread = current_thread();
+ simple_unlock(&l->interlock);
+}
+
+/*
+ * Prevent a lock from being re-acquired.
+ */
+void lock_clear_recursive(
+ lock_t l)
+{
+ simple_lock(&l->interlock);
+ if (l->thread != current_thread()) {
+ panic("lock_clear_recursive: wrong thread");
+ }
+ if (l->recursion_depth == 0)
+ l->thread = (struct thread *)-1; /* XXX */
+ simple_unlock(&l->interlock);
+}
+
+#if MACH_KDB
+#if MACH_SLOCKS && NCPUS == 1
+void db_show_all_slocks(void)
+{
+ int i;
+ struct simple_locks_info *info;
+ simple_lock_t l;
+
+ for (i = 0; i < simple_locks_taken; i++) {
+ info = &simple_locks_info[i];
+ db_printf("%d: %s (", i, info->expr);
+ db_printsym(info->l, DB_STGY_ANY);
+ db_printf(") locked by %s\n", info->loc);
+ }
+}
+#else /* MACH_SLOCKS && NCPUS == 1 */
+void db_show_all_slocks(void)
+{
+#if MACH_LOCK_MON
+ lip();
+#else
+ db_printf("simple lock info not available\n");
+#endif
+}
+#endif /* MACH_SLOCKS && NCPUS == 1 */
+#endif /* MACH_KDB */
diff --git a/kern/lock.h b/kern/lock.h
new file mode 100644
index 0000000..9d081d3
--- /dev/null
+++ b/kern/lock.h
@@ -0,0 +1,316 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/lock.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Locking primitives definitions
+ */
+
+#ifndef _KERN_LOCK_H_
+#define _KERN_LOCK_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <machine/spl.h>
+
+/*
+ * Note: we cannot blindly use simple locks in interrupt handlers, otherwise one
+ * may try to acquire a lock while already having the lock, thus a deadlock.
+ *
+ * When locks are needed in interrupt handlers, the _irq versions of the calls
+ * should be used, which disable interrupts (by calling splhigh) before acquiring
+ * the lock, thus preventing the deadlock. They need to be used this way:
+ *
+ * spl_t s = simple_lock_irq(&mylock);
+ * [... critical section]
+ * simple_unlock_irq(s, &mylock);
+ *
+ * To catch faulty code, when MACH_LDEBUG is set we check that non-_irq versions
+ * are not called while handling an interrupt.
+ *
+ * In the following, the _nocheck versions don't check anything, the _irq
+ * versions disable interrupts, and the pristine versions add a check when
+ * MACH_LDEBUG is set.
+ */
+
+#if NCPUS > 1
+#include <machine/lock.h>/*XXX*/
+#if MACH_LOCK_MON == 0
+#define simple_lock_nocheck _simple_lock
+#define simple_lock_try_nocheck _simple_lock_try
+#define simple_unlock_nocheck _simple_unlock
+#else
+#define simple_lock_nocheck simple_lock
+#define simple_lock_try_nocheck simple_lock_try
+#define simple_unlock_nocheck simple_unlock
+#endif
+#endif
+
+#define MACH_SLOCKS NCPUS > 1
+
+/*
+ * A simple spin lock.
+ */
+
+struct slock {
+ volatile natural_t lock_data; /* in general 1 bit is sufficient */
+ struct {} is_a_simple_lock;
+};
+
+/*
+ * Used by macros to assert that the given argument is a simple
+ * lock.
+ */
+#define simple_lock_assert(l) (void) &(l)->is_a_simple_lock
+
+typedef struct slock simple_lock_data_t;
+typedef struct slock *simple_lock_t;
+
+#if MACH_SLOCKS
+/*
+ * Use the locks.
+ */
+
+#define decl_simple_lock_data(class,name) \
+class simple_lock_data_t name;
+#define def_simple_lock_data(class,name) \
+class simple_lock_data_t name = SIMPLE_LOCK_INITIALIZER(&name);
+#define def_simple_lock_irq_data(class,name) \
+class simple_lock_irq_data_t name = { SIMPLE_LOCK_INITIALIZER(&name.lock) };
+
+#define simple_lock_addr(lock) (simple_lock_assert(&(lock)), \
+ &(lock))
+#define simple_lock_irq_addr(l) (simple_lock_irq_assert(&(l)), \
+ &(l)->lock)
+
+#if (NCPUS > 1)
+
+/*
+ * The single-CPU debugging routines are not valid
+ * on a multiprocessor.
+ */
+#define simple_lock_taken(lock) (simple_lock_assert(lock), \
+ 1) /* always succeeds */
+#define check_simple_locks()
+#define check_simple_locks_enable()
+#define check_simple_locks_disable()
+
+#else /* NCPUS > 1 */
+/*
+ * Use our single-CPU locking test routines.
+ */
+
+extern void simple_lock_init(simple_lock_t);
+extern void _simple_lock(simple_lock_t,
+ const char *, const char *);
+extern void _simple_unlock(simple_lock_t);
+extern boolean_t _simple_lock_try(simple_lock_t,
+ const char *, const char *);
+
+/* We provide simple_lock and simple_lock_try so that we can save the
+ location. */
+#define XSTR(x) #x
+#define STR(x) XSTR(x)
+#define LOCATION __FILE__ ":" STR(__LINE__)
+
+#define simple_lock_nocheck(lock) _simple_lock((lock), #lock, LOCATION)
+#define simple_lock_try_nocheck(lock) _simple_lock_try((lock), #lock, LOCATION)
+#define simple_unlock_nocheck(lock) _simple_unlock((lock))
+
+#define simple_lock_pause()
+#define simple_lock_taken(lock) (simple_lock_assert(lock), \
+ (lock)->lock_data)
+
+extern void check_simple_locks(void);
+extern void check_simple_locks_enable(void);
+extern void check_simple_locks_disable(void);
+
+#endif /* NCPUS > 1 */
+
+#else /* MACH_SLOCKS */
+/*
+ * Do not allocate storage for locks if not needed.
+ */
+struct simple_lock_data_empty { struct {} is_a_simple_lock; };
+struct simple_lock_irq_data_empty { struct simple_lock_data_empty slock; };
+#define decl_simple_lock_data(class,name) \
+class struct simple_lock_data_empty name;
+#define def_simple_lock_data(class,name) \
+class struct simple_lock_data_empty name;
+#define def_simple_lock_irq_data(class,name) \
+class struct simple_lock_irq_data_empty name;
+#define simple_lock_addr(lock) (simple_lock_assert(&(lock)), \
+ (simple_lock_t)0)
+#define simple_lock_irq_addr(lock) (simple_lock_irq_assert(&(lock)), \
+ (simple_lock_t)0)
+
+/*
+ * No multiprocessor locking is necessary.
+ */
+#define simple_lock_init(l) simple_lock_assert(l)
+#define simple_lock_nocheck(l) simple_lock_assert(l)
+#define simple_unlock_nocheck(l) simple_lock_assert(l)
+#define simple_lock_try_nocheck(l) (simple_lock_assert(l), \
+ TRUE) /* always succeeds */
+#define simple_lock_taken(l) (simple_lock_assert(l), \
+ 1) /* always succeeds */
+#define check_simple_locks()
+#define check_simple_locks_enable()
+#define check_simple_locks_disable()
+#define simple_lock_pause()
+
+#endif /* MACH_SLOCKS */
+
+
+#define decl_mutex_data(class,name) decl_simple_lock_data(class,name)
+#define def_mutex_data(class,name) def_simple_lock_data(class,name)
+#define mutex_try(l) simple_lock_try(l)
+#define mutex_lock(l) simple_lock(l)
+#define mutex_unlock(l) simple_unlock(l)
+#define mutex_init(l) simple_lock_init(l)
+
+
+/*
+ * The general lock structure. Provides for multiple readers,
+ * upgrading from read to write, and sleeping until the lock
+ * can be gained.
+ *
+ * On some architectures, assembly language code in the 'inline'
+ * program fiddles the lock structures. It must be changed in
+ * concert with the structure layout.
+ *
+ * Only the "interlock" field is used for hardware exclusion;
+ * other fields are modified with normal instructions after
+ * acquiring the interlock bit.
+ */
+struct lock {
+ struct thread *thread; /* Thread that has lock, if
+ recursive locking allowed */
+ unsigned int read_count:16, /* Number of accepted readers */
+ /* boolean_t */ want_upgrade:1, /* Read-to-write upgrade waiting */
+ /* boolean_t */ want_write:1, /* Writer is waiting, or
+ locked for write */
+ /* boolean_t */ waiting:1, /* Someone is sleeping on lock */
+ /* boolean_t */ can_sleep:1, /* Can attempts to lock go to sleep? */
+ recursion_depth:12, /* Depth of recursion */
+ :0;
+#if MACH_LDEBUG
+ struct thread *writer;
+#endif /* MACH_LDEBUG */
+ decl_simple_lock_data(,interlock)
+ /* Hardware interlock field.
+ Last in the structure so that
+ field offsets are the same whether
+ or not it is present. */
+};
+
+typedef struct lock lock_data_t;
+typedef struct lock *lock_t;
+
+/* Sleep locks must work even if no multiprocessing */
+
+extern void lock_init(lock_t, boolean_t);
+extern void lock_sleepable(lock_t, boolean_t);
+extern void lock_write(lock_t);
+extern void lock_read(lock_t);
+extern void lock_done(lock_t);
+extern boolean_t lock_read_to_write(lock_t);
+extern void lock_write_to_read(lock_t);
+extern boolean_t lock_try_write(lock_t);
+extern boolean_t lock_try_read(lock_t);
+extern boolean_t lock_try_read_to_write(lock_t);
+
+#define lock_read_done(l) lock_done(l)
+#define lock_write_done(l) lock_done(l)
+
+extern void lock_set_recursive(lock_t);
+extern void lock_clear_recursive(lock_t);
+
+/* Lock debugging support. */
+#if ! MACH_LDEBUG
+#define have_read_lock(l) 1
+#define have_write_lock(l) 1
+#define lock_check_no_interrupts()
+#else /* MACH_LDEBUG */
+/* XXX: We don't keep track of readers, so this is an approximation. */
+#define have_read_lock(l) ((l)->read_count > 0)
+#define have_write_lock(l) ((l)->writer == current_thread())
+extern unsigned long in_interrupt[NCPUS];
+#define lock_check_no_interrupts() assert(!in_interrupt[cpu_number()])
+#endif /* MACH_LDEBUG */
+#define have_lock(l) (have_read_lock(l) || have_write_lock(l))
+
+/* These are defined elsewhere with lock monitoring */
+#if MACH_LOCK_MON == 0
+#define simple_lock(l) do { \
+ lock_check_no_interrupts(); \
+ simple_lock_nocheck(l); \
+} while (0)
+#define simple_lock_try(l) ({ \
+ lock_check_no_interrupts(); \
+ simple_lock_try_nocheck(l); \
+})
+#define simple_unlock(l) do { \
+ lock_check_no_interrupts(); \
+ simple_unlock_nocheck(l); \
+} while (0)
+#endif
+
+/* _irq variants */
+
+struct slock_irq {
+ struct slock slock;
+};
+
+#define simple_lock_irq_assert(l) simple_lock_assert(&(l)->slock)
+
+typedef struct slock_irq simple_lock_irq_data_t;
+typedef struct slock_irq *simple_lock_irq_t;
+
+#define decl_simple_lock_irq_data(class,name) \
+class simple_lock_irq_data_t name;
+
+#define simple_lock_init_irq(l) simple_lock_init(&(l)->slock)
+
+#define simple_lock_irq(l) ({ \
+ spl_t __s = splhigh(); \
+ simple_lock_nocheck(&(l)->slock); \
+ __s; \
+})
+#define simple_unlock_irq(s, l) do { \
+ simple_unlock_nocheck(&(l)->slock); \
+ splx(s); \
+} while (0)
+
+#if MACH_KDB
+extern void db_show_all_slocks(void);
+#endif /* MACH_KDB */
+
+extern void lip(void);
+
+#endif /* _KERN_LOCK_H_ */
diff --git a/kern/lock_mon.c b/kern/lock_mon.c
new file mode 100644
index 0000000..3ca4592
--- /dev/null
+++ b/kern/lock_mon.c
@@ -0,0 +1,364 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1990 Carnegie-Mellon University
+ * Copyright (c) 1989 Carnegie-Mellon University
+ * All rights reserved. The CMU software License Agreement specifies
+ * the terms and conditions for use and redistribution.
+ */
+/*
+ * Copyright 1990 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Support For MP Debugging
+ * if MACH_MP_DEBUG is on, we use alternate locking
+ * routines do detect dealocks
+ * Support for MP lock monitoring (MACH_LOCK_MON).
+ * Registers use of locks, contention.
+ * Depending on hardware also records time spent with locks held
+ */
+
+#include <sys/types.h>
+#include <string.h>
+
+#include <mach/machine/vm_types.h>
+#include <mach/boolean.h>
+#include <kern/thread.h>
+#include <kern/lock.h>
+#include <kern/printf.h>
+#include <kern/mach_clock.h>
+#include <machine/ipl.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_output.h>
+
+static void lis(int arg, int abs, int count);
+
+def_simple_lock_data(, kdb_lock)
+def_simple_lock_data(, printf_lock)
+
+#if NCPUS > 1 && MACH_LOCK_MON
+#define TIME_STAMP 1
+typedef unsigned int time_stamp_t;
+/* in milliseconds */
+#define time_stamp (elapsed_ticks * 1000 / hz)
+
+#define LOCK_INFO_MAX (1024*32)
+#define LOCK_INFO_HASH_COUNT 1024
+#define LOCK_INFO_PER_BUCKET (LOCK_INFO_MAX/LOCK_INFO_HASH_COUNT)
+
+#define HASH_LOCK(lock) ((long)lock>>5 & (LOCK_INFO_HASH_COUNT-1))
+
+struct lock_info {
+ unsigned int success;
+ unsigned int fail;
+ unsigned int masked;
+ unsigned int stack;
+ time_stamp_t time;
+ decl_simple_lock_data(, *lock)
+ vm_offset_t caller;
+};
+
+struct lock_info_bucket {
+ struct lock_info info[LOCK_INFO_PER_BUCKET];
+};
+
+static void print_lock_info(struct lock_info *li);
+
+struct lock_info_bucket lock_info[LOCK_INFO_HASH_COUNT];
+struct lock_info default_lock_info;
+unsigned default_lock_stack = 0;
+
+extern spl_t curr_ipl[];
+
+
+
+struct lock_info *
+locate_lock_info(lock)
+decl_simple_lock_data(, **lock)
+{
+ struct lock_info *li = &(lock_info[HASH_LOCK(*lock)].info[0]);
+ int i;
+
+ for (i=0; i < LOCK_INFO_PER_BUCKET; i++, li++)
+ if (li->lock) {
+ if (li->lock == *lock)
+ return(li);
+ } else {
+ li->lock = *lock;
+ li->caller = *((vm_offset_t *)lock - 1);
+ return(li);
+ }
+ db_printf("out of lock_info slots\n");
+ li = &default_lock_info;
+ return(li);
+}
+
+
+void simple_lock(lock)
+decl_simple_lock_data(, *lock)
+{
+ struct lock_info *li = locate_lock_info(&lock);
+ int my_cpu = cpu_number();
+
+ if (current_thread())
+ li->stack = current_thread()->lock_stack++;
+ if (curr_ipl[my_cpu])
+ li->masked++;
+ if (_simple_lock_try(lock))
+ li->success++;
+ else {
+ _simple_lock(lock);
+ li->fail++;
+ }
+ li->time = time_stamp - li->time;
+}
+
+int simple_lock_try(lock)
+decl_simple_lock_data(, *lock)
+{
+ struct lock_info *li = locate_lock_info(&lock);
+ int my_cpu = cpu_number();
+
+ if (curr_ipl[my_cpu])
+ li->masked++;
+ if (_simple_lock_try(lock)) {
+ li->success++;
+ li->time = time_stamp - li->time;
+ if (current_thread())
+ li->stack = current_thread()->lock_stack++;
+ return(1);
+ } else {
+ li->fail++;
+ return(0);
+ }
+}
+
+void simple_unlock(lock)
+decl_simple_lock_data(, *lock)
+{
+ time_stamp_t stamp = time_stamp;
+ time_stamp_t *time = &locate_lock_info(&lock)->time;
+ unsigned *lock_stack;
+
+ *time = stamp - *time;
+ _simple_unlock(lock);
+ if (current_thread()) {
+ lock_stack = &current_thread()->lock_stack;
+ if (*lock_stack)
+ (*lock_stack)--;
+ }
+}
+
+void lip(void) {
+ lis(4, 1, 0);
+}
+
+#define lock_info_sort lis
+
+static void lock_info_sort(int arg, int abs, int count)
+{
+ struct lock_info *li, mean;
+ int bucket = 0;
+ int i;
+ unsigned max_val;
+ unsigned old_val = (unsigned)-1;
+ struct lock_info *target_li = &lock_info[0].info[0];
+ unsigned sum;
+ unsigned empty, total;
+ unsigned curval;
+
+ printf("\nSUCCESS FAIL MASKED STACK TIME LOCK/CALLER\n");
+ if (!count)
+ count = 8 ;
+ while (count && target_li) {
+ empty = LOCK_INFO_HASH_COUNT;
+ target_li = 0;
+ total = 0;
+ max_val = 0;
+ mean.success = 0;
+ mean.fail = 0;
+ mean.masked = 0;
+ mean.stack = 0;
+ mean.time = 0;
+ mean.lock = (simple_lock_data_t *) &lock_info;
+ mean.caller = (vm_offset_t) &lock_info;
+ for (bucket = 0; bucket < LOCK_INFO_HASH_COUNT; bucket++) {
+ li = &lock_info[bucket].info[0];
+ if (li->lock)
+ empty--;
+ for (i= 0; i< LOCK_INFO_PER_BUCKET && li->lock; i++, li++) {
+ if (li->lock == &kdb_lock || li->lock == &printf_lock)
+ continue;
+ total++;
+ curval = *((int *)li + arg);
+ sum = li->success + li->fail;
+ if(!sum && !abs)
+ continue;
+ if (!abs) switch(arg) {
+ case 0:
+ break;
+ case 1:
+ case 2:
+ curval = (curval*100) / sum;
+ break;
+ case 3:
+ case 4:
+ curval = curval / sum;
+ break;
+ }
+ if (curval > max_val && curval < old_val) {
+ max_val = curval;
+ target_li = li;
+ }
+ if (curval == old_val && count != 0) {
+ print_lock_info(li);
+ count--;
+ }
+ mean.success += li->success;
+ mean.fail += li->fail;
+ mean.masked += li->masked;
+ mean.stack += li->stack;
+ mean.time += li->time;
+ }
+ }
+ if (target_li)
+ old_val = max_val;
+ }
+ db_printf("\n%d total locks, %d empty buckets", total, empty );
+ if (default_lock_info.success)
+ db_printf(", default: %d", default_lock_info.success + default_lock_info.fail);
+ db_printf("\n");
+ print_lock_info(&mean);
+}
+
+#define lock_info_clear lic
+
+void lock_info_clear(void)
+{
+ struct lock_info *li;
+ int bucket = 0;
+ int i;
+ for (bucket = 0; bucket < LOCK_INFO_HASH_COUNT; bucket++) {
+ li = &lock_info[bucket].info[0];
+ for (i= 0; i< LOCK_INFO_PER_BUCKET; i++, li++) {
+ memset(li, 0, sizeof(struct lock_info));
+ }
+ }
+ memset(&default_lock_info, 0, sizeof(struct lock_info));
+}
+
+static void print_lock_info(struct lock_info *li)
+{
+ db_addr_t off;
+ int sum = li->success + li->fail;
+ db_printf("%d %d/%d %d/%d %d/%d %d/%d ", li->success,
+ li->fail, (li->fail*100)/sum,
+ li->masked, (li->masked*100)/sum,
+ li->stack, li->stack/sum,
+ li->time, li->time/sum);
+ db_free_symbol(db_search_symbol((db_addr_t) li->lock, 0, &off));
+ if (off < 1024)
+ db_printsym((db_addr_t) li->lock, 0);
+ else {
+ db_printsym(li->caller, 0);
+ db_printf("(%X)", li->lock);
+ }
+ db_printf("\n");
+}
+
+#endif /* NCPUS > 1 && MACH_LOCK_MON */
+
+#if TIME_STAMP
+
+/*
+ * Measure lock/unlock operations
+ */
+
+void time_lock(int loops)
+{
+ decl_simple_lock_data(, lock)
+ time_stamp_t stamp;
+ int i;
+
+
+ if (!loops)
+ loops = 1000;
+ simple_lock_init(&lock);
+ stamp = time_stamp;
+ for (i = 0; i < loops; i++) {
+ simple_lock(&lock);
+ simple_unlock(&lock);
+ }
+ stamp = time_stamp - stamp;
+ db_printf("%d stamps for simple_locks\n", stamp/loops);
+#if MACH_LOCK_MON
+ stamp = time_stamp;
+ for (i = 0; i < loops; i++) {
+ _simple_lock(&lock);
+ _simple_unlock(&lock);
+ }
+ stamp = time_stamp - stamp;
+ db_printf("%d stamps for _simple_locks\n", stamp/loops);
+#endif /* MACH_LOCK_MON */
+}
+#endif /* TIME_STAMP */
+
+#if MACH_MP_DEBUG
+
+/*
+ * Arrange in the lock routines to call the following
+ * routines. This way, when locks are free there is no performance
+ * penalty
+ */
+
+void
+retry_simple_lock(lock)
+decl_simple_lock_data(, *lock)
+{
+ count = 0;
+
+ while(!simple_lock_try(lock))
+ if (count++ > 1000000 && lock != &kdb_lock) {
+ if (lock == &printf_lock)
+ return;
+ db_printf("cpu %d looping on simple_lock(%x) called by %x\n",
+ cpu_number(), lock, *(((int *)&lock) -1));
+ SoftDebugger("simple_lock timeout");
+ count = 0;
+ }
+}
+
+void
+retry_bit_lock(index, addr)
+{
+ count = 0;
+
+ while(!bit_lock_try(index, addr))
+ if (count++ > 1000000) {
+ db_printf("cpu %d looping on bit_lock(%x, %x) called by %x\n",
+ cpu_number(), index, addr, *(((int *)&index) -1));
+ SoftDebugger("bit_lock timeout");
+ count = 0;
+ }
+}
+#endif /* MACH_MP_DEBUG */
diff --git a/kern/log2.h b/kern/log2.h
new file mode 100644
index 0000000..0e67701
--- /dev/null
+++ b/kern/log2.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Integer base 2 logarithm operations.
+ */
+
+#ifndef _KERN_LOG2_H
+#define _KERN_LOG2_H
+
+#include <kern/assert.h>
+
+#ifdef __LP64__
+#define LONG_BIT 64
+#else /* __LP64__ */
+#define LONG_BIT 32
+#endif /* __LP64__ */
+
+static inline unsigned int
+ilog2(unsigned long x)
+{
+ assert(x != 0);
+ return LONG_BIT - __builtin_clzl(x) - 1;
+}
+
+static inline unsigned int
+iorder2(unsigned long size)
+{
+ assert(size != 0);
+
+ if (size == 1)
+ return 0;
+
+ return ilog2(size - 1) + 1;
+}
+
+#endif /* _KERN_LOG2_H */
diff --git a/kern/mach.srv b/kern/mach.srv
new file mode 100644
index 0000000..b1cec60
--- /dev/null
+++ b/kern/mach.srv
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#ifdef MIGRATING_THREADS
+#define task_threads task_acts
+#define thread_terminate act_terminate
+#define thread_set_state act_set_state_immediate
+#define thread_get_state act_get_state_immediate
+#define thread_info act_thread_info
+#define thread_suspend act_suspend
+#define thread_resume act_resume
+#define thread_abort act_abort
+#define thread_set_special_port act_set_special_port
+#define thread_get_special_port act_get_special_port
+#endif /* MIGRATING_THREADS */
+
+#include <mach/mach.defs>
diff --git a/kern/mach4.srv b/kern/mach4.srv
new file mode 100644
index 0000000..ead5484
--- /dev/null
+++ b/kern/mach4.srv
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#ifdef MIGRATING_THREADS
+#define thread_enable_pc_sampling act_enable_pc_sampling
+#define thread_disable_pc_sampling act_disable_pc_sampling
+#define thread_get_sampled_pcs act_get_sampled_pcs
+#endif /* MIGRATING_THREADS */
+
+#include <mach/mach4.defs>
diff --git a/kern/mach_clock.c b/kern/mach_clock.c
new file mode 100644
index 0000000..864704c
--- /dev/null
+++ b/kern/mach_clock.c
@@ -0,0 +1,657 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994-1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach_clock.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Clock primitives.
+ */
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/machine.h>
+#include <mach/time_value.h>
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <kern/counters.h>
+#include "cpu_number.h"
+#include <kern/debug.h>
+#include <kern/host.h>
+#include <kern/lock.h>
+#include <kern/mach_clock.h>
+#include <kern/mach_host.server.h>
+#include <kern/processor.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+#include <kern/timer.h>
+#include <kern/priority.h>
+#include <vm/vm_kern.h>
+#include <machine/mach_param.h> /* HZ */
+#include <machine/machspl.h>
+#include <machine/model_dep.h>
+
+#if MACH_PCSAMPLE
+#include <kern/pc_sample.h>
+#endif
+
+#define MICROSECONDS_IN_ONE_SECOND 1000000
+
+int hz = HZ; /* number of ticks per second */
+int tick = (MICROSECONDS_IN_ONE_SECOND / HZ); /* number of usec per tick */
+time_value64_t time = { 0, 0 }; /* time since bootup (uncorrected) */
+unsigned long elapsed_ticks = 0; /* ticks elapsed since bootup */
+
+int timedelta = 0;
+int tickdelta = 0;
+
+#if HZ > 500
+unsigned tickadj = 1; /* can adjust HZ usecs per second */
+#else
+unsigned tickadj = 500 / HZ; /* can adjust 100 usecs per second */
+#endif
+unsigned bigadj = 1000000; /* adjust 10*tickadj if adjustment
+ > bigadj */
+
+/*
+ * This update protocol, with a check value, allows
+ * do {
+ * secs = mtime->seconds;
+ * __sync_synchronize();
+ * usecs = mtime->microseconds;
+ * __sync_synchronize();
+ * } while (secs != mtime->check_seconds);
+ * to read the time correctly.
+ */
+
+volatile mapped_time_value_t *mtime = 0;
+
+#define update_mapped_time(time) \
+MACRO_BEGIN \
+ if (mtime != 0) { \
+ mtime->check_seconds = (time)->seconds; \
+ mtime->check_seconds64 = (time)->seconds; \
+ __sync_synchronize(); \
+ mtime->microseconds = (time)->nanoseconds / 1000; \
+ mtime->time_value.nanoseconds = (time)->nanoseconds; \
+ __sync_synchronize(); \
+ mtime->seconds = (time)->seconds; \
+ mtime->time_value.seconds = (time)->seconds; \
+ } \
+MACRO_END
+
+#define read_mapped_time(time) \
+MACRO_BEGIN \
+ do { \
+ (time)->seconds = mtime->time_value.seconds; \
+ __sync_synchronize(); \
+ (time)->nanoseconds = mtime->time_value.nanoseconds; \
+ __sync_synchronize(); \
+ } while ((time)->seconds != mtime->check_seconds64); \
+MACRO_END
+
+def_simple_lock_irq_data(static, timer_lock) /* lock for ... */
+timer_elt_data_t timer_head; /* ordered list of timeouts */
+ /* (doubles as end-of-list) */
+
+/*
+ * Handle clock interrupts.
+ *
+ * The clock interrupt is assumed to be called at a (more or less)
+ * constant rate. The rate must be identical on all CPUS (XXX - fix).
+ *
+ * Usec is the number of microseconds that have elapsed since the
+ * last clock tick. It may be constant or computed, depending on
+ * the accuracy of the hardware clock.
+ *
+ */
+void clock_interrupt(
+ int usec, /* microseconds per tick */
+ boolean_t usermode, /* executing user code */
+ boolean_t basepri, /* at base priority */
+ vm_offset_t pc) /* address of interrupted instruction */
+{
+ int my_cpu = cpu_number();
+ thread_t thread = current_thread();
+
+ counter(c_clock_ticks++);
+ counter(c_threads_total += c_threads_current);
+ counter(c_stacks_total += c_stacks_current);
+
+#if STAT_TIME
+ /*
+ * Increment the thread time, if using
+ * statistical timing.
+ */
+ if (usermode) {
+ timer_bump(&thread->user_timer, usec);
+ }
+ else {
+ /* Only bump timer if threads are initialized */
+ if (thread)
+ timer_bump(&thread->system_timer, usec);
+ }
+#endif /* STAT_TIME */
+
+ /*
+ * Increment the CPU time statistics.
+ */
+ {
+ int state;
+
+ if (usermode)
+ state = CPU_STATE_USER;
+ else if (!cpu_idle(my_cpu))
+ state = CPU_STATE_SYSTEM;
+ else
+ state = CPU_STATE_IDLE;
+
+ machine_slot[my_cpu].cpu_ticks[state]++;
+
+ /*
+ * Adjust the thread's priority and check for
+ * quantum expiration.
+ */
+
+ thread_quantum_update(my_cpu, thread, 1, state);
+ }
+
+#if MACH_PCSAMPLE
+ /*
+ * Take a sample of pc for the user if required.
+ * This had better be MP safe. It might be interesting
+ * to keep track of cpu in the sample.
+ */
+#ifndef MACH_KERNSAMPLE
+ if (usermode)
+#endif
+ {
+ if (thread)
+ take_pc_sample_macro(thread, SAMPLED_PC_PERIODIC, usermode, pc);
+ }
+#endif /* MACH_PCSAMPLE */
+
+ /*
+ * Time-of-day and time-out list are updated only
+ * on the master CPU.
+ */
+ if (my_cpu == master_cpu) {
+
+ spl_t s;
+ timer_elt_t telt;
+ boolean_t needsoft = FALSE;
+
+
+ /*
+ * Update the tick count since bootup, and handle
+ * timeouts.
+ */
+
+ s = simple_lock_irq(&timer_lock);
+
+ elapsed_ticks++;
+
+ telt = (timer_elt_t)queue_first(&timer_head.chain);
+ if (telt->ticks <= elapsed_ticks)
+ needsoft = TRUE;
+ simple_unlock_irq(s, &timer_lock);
+
+ /*
+ * Increment the time-of-day clock.
+ */
+ if (timedelta == 0) {
+ time_value64_add_nanos(&time, usec * 1000);
+ }
+ else {
+ int delta;
+
+ if (timedelta < 0) {
+ if (usec > tickdelta) {
+ delta = usec - tickdelta;
+ timedelta += tickdelta;
+ } else {
+ /* Not enough time has passed, defer overflowing
+ * correction for later, keep only one microsecond
+ * delta */
+ delta = 1;
+ timedelta += usec - 1;
+ }
+ }
+ else {
+ delta = usec + tickdelta;
+ timedelta -= tickdelta;
+ }
+ time_value64_add_nanos(&time, delta * 1000);
+ }
+ update_mapped_time(&time);
+
+ /*
+ * Schedule soft-interrupt for timeout if needed
+ */
+ if (needsoft) {
+ if (basepri) {
+ (void) splsoftclock();
+ softclock();
+ }
+ else {
+ setsoftclock();
+ }
+ }
+ }
+}
+
+/*
+ * There is a nasty race between softclock and reset_timeout.
+ * For example, scheduling code looks at timer_set and calls
+ * reset_timeout, thinking the timer is set. However, softclock
+ * has already removed the timer but hasn't called thread_timeout
+ * yet.
+ *
+ * Interim solution: We initialize timers after pulling
+ * them out of the queue, so a race with reset_timeout won't
+ * hurt. The timeout functions (eg, thread_timeout,
+ * thread_depress_timeout) check timer_set/depress_priority
+ * to see if the timer has been cancelled and if so do nothing.
+ *
+ * This still isn't correct. For example, softclock pulls a
+ * timer off the queue, then thread_go resets timer_set (but
+ * reset_timeout does nothing), then thread_set_timeout puts the
+ * timer back on the queue and sets timer_set, then
+ * thread_timeout finally runs and clears timer_set, then
+ * thread_set_timeout tries to put the timer on the queue again
+ * and corrupts it.
+ */
+
+void softclock(void)
+{
+ /*
+ * Handle timeouts.
+ */
+ spl_t s;
+ timer_elt_t telt;
+ void (*fcn)( void * param );
+ void *param;
+
+ while (TRUE) {
+ s = simple_lock_irq(&timer_lock);
+ telt = (timer_elt_t) queue_first(&timer_head.chain);
+ if (telt->ticks > elapsed_ticks) {
+ simple_unlock_irq(s, &timer_lock);
+ break;
+ }
+ fcn = telt->fcn;
+ param = telt->param;
+
+ remqueue(&timer_head.chain, (queue_entry_t)telt);
+ telt->set = TELT_UNSET;
+ simple_unlock_irq(s, &timer_lock);
+
+ assert(fcn != 0);
+ (*fcn)(param);
+ }
+}
+
+/*
+ * Set timeout.
+ *
+ * Parameters:
+ * telt timer element. Function and param are already set.
+ * interval time-out interval, in hz.
+ */
+void set_timeout(
+ timer_elt_t telt, /* already loaded */
+ unsigned int interval)
+{
+ spl_t s;
+ timer_elt_t next;
+
+ s = simple_lock_irq(&timer_lock);
+
+ interval += elapsed_ticks;
+
+ for (next = (timer_elt_t)queue_first(&timer_head.chain);
+ ;
+ next = (timer_elt_t)queue_next((queue_entry_t)next)) {
+
+ if (next->ticks > interval)
+ break;
+ }
+ telt->ticks = interval;
+ /*
+ * Insert new timer element before 'next'
+ * (after 'next'->prev)
+ */
+ insque((queue_entry_t) telt, ((queue_entry_t)next)->prev);
+ telt->set = TELT_SET;
+ simple_unlock_irq(s, &timer_lock);
+}
+
+boolean_t reset_timeout(timer_elt_t telt)
+{
+ spl_t s;
+
+ s = simple_lock_irq(&timer_lock);
+ if (telt->set) {
+ remqueue(&timer_head.chain, (queue_entry_t)telt);
+ telt->set = TELT_UNSET;
+ simple_unlock_irq(s, &timer_lock);
+ return TRUE;
+ }
+ else {
+ simple_unlock_irq(s, &timer_lock);
+ return FALSE;
+ }
+}
+
+void init_timeout(void)
+{
+ simple_lock_init_irq(&timer_lock);
+ queue_init(&timer_head.chain);
+ timer_head.ticks = ~0; /* MAXUINT - sentinel */
+
+ elapsed_ticks = 0;
+}
+
+/*
+ * We record timestamps using the boot-time clock. We keep track of
+ * the boot-time clock by storing the difference to the real-time
+ * clock.
+ */
+struct time_value64 clock_boottime_offset;
+
+/*
+ * Update the offset of the boot-time clock from the real-time clock.
+ * This function must be called when the real-time clock is updated.
+ * This function must be called at SPLHIGH.
+ */
+static void
+clock_boottime_update(const struct time_value64 *new_time)
+{
+ struct time_value64 delta = time;
+ time_value64_sub(&delta, new_time);
+ time_value64_add(&clock_boottime_offset, &delta);
+}
+
+/*
+ * Record a timestamp in STAMP. Records values in the boot-time clock
+ * frame.
+ */
+void
+record_time_stamp(time_value64_t *stamp)
+{
+ read_mapped_time(stamp);
+ time_value64_add(stamp, &clock_boottime_offset);
+}
+
+/*
+ * Read a timestamp in STAMP into RESULT. Returns values in the
+ * real-time clock frame.
+ */
+void
+read_time_stamp (const time_value64_t *stamp, time_value64_t *result)
+{
+ *result = *stamp;
+ time_value64_sub(result, &clock_boottime_offset);
+}
+
+
+/*
+ * Read the time (deprecated version).
+ */
+kern_return_t
+host_get_time(const host_t host, time_value_t *current_time)
+{
+ if (host == HOST_NULL)
+ return(KERN_INVALID_HOST);
+
+ time_value64_t current_time64;
+ read_mapped_time(&current_time64);
+ TIME_VALUE64_TO_TIME_VALUE(&current_time64, current_time);
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Read the time.
+ */
+kern_return_t
+host_get_time64(const host_t host, time_value64_t *current_time)
+{
+ if (host == HOST_NULL)
+ return(KERN_INVALID_HOST);
+
+ read_mapped_time(current_time);
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Set the time. Only available to privileged users.
+ */
+kern_return_t
+host_set_time(const host_t host, time_value_t new_time)
+{
+ time_value64_t new_time64;
+ TIME_VALUE_TO_TIME_VALUE64(&new_time, &new_time64);
+ return host_set_time64(host, new_time64);
+}
+
+kern_return_t
+host_set_time64(const host_t host, time_value64_t new_time)
+{
+ spl_t s;
+
+ if (host == HOST_NULL)
+ return(KERN_INVALID_HOST);
+
+#if NCPUS > 1
+ /*
+ * Switch to the master CPU to synchronize correctly.
+ */
+ thread_bind(current_thread(), master_processor);
+ if (current_processor() != master_processor)
+ thread_block(thread_no_continuation);
+#endif /* NCPUS > 1 */
+
+ s = splhigh();
+ clock_boottime_update(&new_time);
+ time = new_time;
+ update_mapped_time(&time);
+ resettodr();
+ splx(s);
+
+#if NCPUS > 1
+ /*
+ * Switch off the master CPU.
+ */
+ thread_bind(current_thread(), PROCESSOR_NULL);
+#endif /* NCPUS > 1 */
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Adjust the time gradually.
+ */
+kern_return_t
+host_adjust_time(
+ const host_t host,
+ time_value_t new_adjustment,
+ time_value_t *old_adjustment /* OUT */)
+{
+ time_value64_t old_adjustment64;
+ time_value64_t new_adjustment64;
+ kern_return_t ret;
+
+ TIME_VALUE_TO_TIME_VALUE64(&new_adjustment, &new_adjustment64);
+ ret = host_adjust_time64(host, new_adjustment64, &old_adjustment64);
+ if (ret == KERN_SUCCESS) {
+ TIME_VALUE64_TO_TIME_VALUE(&old_adjustment64, old_adjustment);
+ }
+ return ret;
+}
+
+/*
+ * Adjust the time gradually.
+ */
+kern_return_t
+host_adjust_time64(
+ const host_t host,
+ time_value64_t new_adjustment,
+ time_value64_t *old_adjustment /* OUT */)
+{
+ time_value64_t oadj;
+ uint64_t ndelta_microseconds;
+ spl_t s;
+
+ if (host == HOST_NULL)
+ return (KERN_INVALID_HOST);
+
+ /* Note we only adjust up to microsecond precision */
+ ndelta_microseconds = new_adjustment.seconds * MICROSECONDS_IN_ONE_SECOND
+ + new_adjustment.nanoseconds / 1000;
+
+#if NCPUS > 1
+ thread_bind(current_thread(), master_processor);
+ if (current_processor() != master_processor)
+ thread_block(thread_no_continuation);
+#endif /* NCPUS > 1 */
+
+ s = splclock();
+
+ oadj.seconds = timedelta / MICROSECONDS_IN_ONE_SECOND;
+ oadj.nanoseconds = (timedelta % MICROSECONDS_IN_ONE_SECOND) * 1000;
+
+ if (timedelta == 0) {
+ if (ndelta_microseconds > bigadj)
+ tickdelta = 10 * tickadj;
+ else
+ tickdelta = tickadj;
+ }
+ /* Make ndelta_microseconds a multiple of tickdelta */
+ if (ndelta_microseconds % tickdelta)
+ ndelta_microseconds = ndelta_microseconds / tickdelta * tickdelta;
+
+ timedelta = ndelta_microseconds;
+
+ splx(s);
+#if NCPUS > 1
+ thread_bind(current_thread(), PROCESSOR_NULL);
+#endif /* NCPUS > 1 */
+
+ *old_adjustment = oadj;
+
+ return (KERN_SUCCESS);
+}
+
+void mapable_time_init(void)
+{
+ if (kmem_alloc_wired(kernel_map, (vm_offset_t *) &mtime, PAGE_SIZE)
+ != KERN_SUCCESS)
+ panic("mapable_time_init");
+ memset((void *) mtime, 0, PAGE_SIZE);
+ update_mapped_time(&time);
+}
+
+int timeopen(dev_t dev, int flag, io_req_t ior)
+{
+ return(0);
+}
+void timeclose(dev_t dev, int flag)
+{
+ return;
+}
+
+/*
+ * Compatibility for device drivers.
+ * New code should use set_timeout/reset_timeout and private timers.
+ * These code can't use a cache to allocate timers, because
+ * it can be called from interrupt handlers.
+ */
+
+#define NTIMERS 20
+
+timer_elt_data_t timeout_timers[NTIMERS];
+
+/*
+ * Set timeout.
+ *
+ * fcn: function to call
+ * param: parameter to pass to function
+ * interval: timeout interval, in hz.
+ */
+void timeout(
+ void (*fcn)(void *param),
+ void * param,
+ int interval)
+{
+ spl_t s;
+ timer_elt_t elt;
+
+ s = simple_lock_irq(&timer_lock);
+ for (elt = &timeout_timers[0]; elt < &timeout_timers[NTIMERS]; elt++)
+ if (elt->set == TELT_UNSET)
+ break;
+ if (elt == &timeout_timers[NTIMERS])
+ panic("timeout");
+ elt->fcn = fcn;
+ elt->param = param;
+ elt->set = TELT_ALLOC;
+ simple_unlock_irq(s, &timer_lock);
+
+ set_timeout(elt, (unsigned int)interval);
+}
+
+/*
+ * Returns a boolean indicating whether the timeout element was found
+ * and removed.
+ */
+boolean_t untimeout(void (*fcn)( void * param ), const void *param)
+{
+ spl_t s;
+ timer_elt_t elt;
+
+ s = simple_lock_irq(&timer_lock);
+ queue_iterate(&timer_head.chain, elt, timer_elt_t, chain) {
+
+ if ((fcn == elt->fcn) && (param == elt->param)) {
+ /*
+ * Found it.
+ */
+ remqueue(&timer_head.chain, (queue_entry_t)elt);
+ elt->set = TELT_UNSET;
+
+ simple_unlock_irq(s, &timer_lock);
+ return (TRUE);
+ }
+ }
+ simple_unlock_irq(s, &timer_lock);
+ return (FALSE);
+}
diff --git a/kern/mach_clock.h b/kern/mach_clock.h
new file mode 100644
index 0000000..66903b8
--- /dev/null
+++ b/kern/mach_clock.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese and others.
+ */
+
+#ifndef _KERN_MACH_CLOCK_H_
+#define _KERN_MACH_CLOCK_H_
+
+/*
+ * Mach time-out and time-of-day facility.
+ */
+
+#include <mach/machine/kern_return.h>
+#include <mach/time_value.h>
+#include <kern/host.h>
+#include <kern/queue.h>
+#include <sys/types.h>
+
+struct io_req;
+typedef struct io_req *io_req_t;
+
+
+/* Timers in kernel. */
+extern unsigned long elapsed_ticks; /* number of ticks elapsed since bootup */
+extern int hz; /* number of ticks per second */
+extern int tick; /* number of usec per tick */
+
+extern time_value64_t time; /* time since bootup (uncorrected) */
+
+typedef void timer_func_t(void *);
+
+/* Time-out element. */
+struct timer_elt {
+ queue_chain_t chain; /* chain in order of expiration */
+ timer_func_t *fcn; /* function to call */
+ void * param; /* with this parameter */
+ unsigned long ticks; /* expiration time, in ticks */
+ int set; /* unset | set | allocated */
+};
+#define TELT_UNSET 0 /* timer not set */
+#define TELT_SET 1 /* timer set */
+#define TELT_ALLOC 2 /* timer allocated from pool */
+
+typedef struct timer_elt timer_elt_data_t;
+typedef struct timer_elt *timer_elt_t;
+
+
+extern void clock_interrupt(
+ int usec,
+ boolean_t usermode,
+ boolean_t basepri,
+ vm_offset_t pc);
+
+extern void softclock (void);
+
+/* For `private' timer elements. */
+extern void set_timeout(
+ timer_elt_t telt,
+ unsigned int interval);
+extern boolean_t reset_timeout(timer_elt_t telt);
+
+#define set_timeout_setup(telt,fcn,param,interval) \
+ ((telt)->fcn = (fcn), \
+ (telt)->param = (param), \
+ (telt)->private = TRUE, \
+ set_timeout((telt), (interval)))
+
+#define reset_timeout_check(t) \
+ MACRO_BEGIN \
+ if ((t)->set) \
+ reset_timeout((t)); \
+ MACRO_END
+
+extern void init_timeout (void);
+
+/*
+ * Record a timestamp in STAMP. Records values in the boot-time clock
+ * frame.
+ */
+extern void record_time_stamp (time_value64_t *stamp);
+
+/*
+ * Read a timestamp in STAMP into RESULT. Returns values in the
+ * real-time clock frame.
+ */
+extern void read_time_stamp (const time_value64_t *stamp, time_value64_t *result);
+
+extern void mapable_time_init (void);
+
+/* For public timer elements. */
+extern void timeout(timer_func_t *fcn, void *param, int interval);
+extern boolean_t untimeout(timer_func_t *fcn, const void *param);
+
+extern int timeopen(dev_t dev, int flag, io_req_t ior);
+extern void timeclose(dev_t dev, int flag);
+
+#endif /* _KERN_MACH_CLOCK_H_ */
diff --git a/kern/mach_debug.srv b/kern/mach_debug.srv
new file mode 100644
index 0000000..c78b9a4
--- /dev/null
+++ b/kern/mach_debug.srv
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach_debug/mach_debug.defs>
diff --git a/kern/mach_factor.c b/kern/mach_factor.c
new file mode 100644
index 0000000..debce0b
--- /dev/null
+++ b/kern/mach_factor.c
@@ -0,0 +1,150 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/mach_factor.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Compute the Mach Factor.
+ */
+
+#include <mach/machine.h>
+#include <mach/processor_info.h>
+#include <kern/mach_clock.h>
+#include <kern/sched.h>
+#include <kern/processor.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+
+#include "mach_factor.h"
+
+long avenrun[3] = {0, 0, 0};
+long mach_factor[3] = {0, 0, 0};
+
+/*
+ * Values are scaled by LOAD_SCALE, defined in processor_info.h
+ */
+static long fract[3] = {
+ 800, /* (4.0/5.0) 5 second average */
+ 966, /* (29.0/30.0) 30 second average */
+ 983, /* (59.0/60.) 1 minute average */
+};
+
+void compute_mach_factor(void)
+{
+ processor_set_t pset;
+ processor_t processor;
+ int ncpus;
+ int nthreads;
+ long factor_now;
+ long average_now;
+ long load_now;
+
+ simple_lock(&all_psets_lock);
+ pset = (processor_set_t) queue_first(&all_psets);
+ while (!queue_end(&all_psets, (queue_entry_t)pset)) {
+
+ /*
+ * If no processors, this pset is in suspended animation.
+ * No load calculations are performed.
+ */
+ pset_lock(pset);
+ if((ncpus = pset->processor_count) > 0) {
+
+ /*
+ * Count number of threads.
+ */
+ nthreads = pset->runq.count;
+ processor = (processor_t) queue_first(&pset->processors);
+ while (!queue_end(&pset->processors,
+ (queue_entry_t)processor)) {
+ nthreads += processor->runq.count;
+ processor =
+ (processor_t) queue_next(&processor->processors);
+ }
+
+ /*
+ * account for threads on cpus.
+ */
+ nthreads += ncpus - pset->idle_count;
+
+ /*
+ * The current thread (running this calculation)
+ * doesn't count; it's always in the default pset.
+ */
+ if (pset == &default_pset)
+ nthreads -= 1;
+
+ if (nthreads > ncpus) {
+ factor_now = (ncpus * LOAD_SCALE) / (nthreads + 1);
+ load_now = (nthreads << SCHED_SHIFT) / ncpus;
+ }
+ else {
+ factor_now = (ncpus - nthreads) * LOAD_SCALE;
+ load_now = SCHED_SCALE;
+ }
+
+ /*
+ * Load average and mach factor calculations for
+ * those that ask about these things.
+ */
+
+ average_now = nthreads * LOAD_SCALE;
+
+ pset->mach_factor =
+ ((pset->mach_factor << 2) + factor_now)/5;
+ pset->load_average =
+ ((pset->load_average << 2) + average_now)/5;
+
+ /*
+ * And some ugly stuff to keep w happy.
+ */
+ if (pset == &default_pset) {
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ mach_factor[i] = ( (mach_factor[i]*fract[i])
+ + (factor_now*(LOAD_SCALE-fract[i])) )
+ / LOAD_SCALE;
+ avenrun[i] = ( (avenrun[i]*fract[i])
+ + (average_now*(LOAD_SCALE-fract[i])) )
+ / LOAD_SCALE;
+ }
+ }
+
+ /*
+ * sched_load is the only thing used by scheduler.
+ * It is always at least 1 (i.e. SCHED_SCALE).
+ */
+ pset->sched_load = (pset->sched_load + load_now) >> 1;
+ }
+
+ pset_unlock(pset);
+ pset = (processor_set_t) queue_next(&pset->all_psets);
+ }
+
+ simple_unlock(&all_psets_lock);
+}
diff --git a/kern/mach_factor.h b/kern/mach_factor.h
new file mode 100644
index 0000000..0ec64be
--- /dev/null
+++ b/kern/mach_factor.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ *
+ */
+
+#ifndef _KERN_MACH_FACTOR_H_
+#define _KERN_MACH_FACTOR_H_
+
+#include <sys/types.h>
+
+extern void compute_mach_factor(void);
+
+#endif /* _KERN_MACH_FACTOR_H_ */
diff --git a/kern/mach_host.srv b/kern/mach_host.srv
new file mode 100644
index 0000000..a18ab1c
--- /dev/null
+++ b/kern/mach_host.srv
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#ifdef MIGRATING_THREADS
+#define thread_assign act_thread_assign
+#define thread_assign_default act_thread_assign_default
+#define thread_get_assignment act_thread_get_assignment
+#define thread_priority act_thread_priority
+#define thread_max_priority act_thread_max_priority
+#define thread_policy act_thread_policy
+#define thread_depress_abort act_thread_depress_abort
+#define thread_wire act_thread_wire
+#endif /* MIGRATING_THREADS */
+
+#include <mach/mach_host.defs>
diff --git a/kern/machine.c b/kern/machine.c
new file mode 100644
index 0000000..f757d14
--- /dev/null
+++ b/kern/machine.c
@@ -0,0 +1,672 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/machine.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1987
+ *
+ * Support for machine independent machine abstraction.
+ */
+
+#include <string.h>
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/mach_types.h>
+#include <mach/machine.h>
+#include <mach/host_info.h>
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/ipc_host.h>
+#include <kern/host.h>
+#include <kern/machine.h>
+#include <kern/mach_host.server.h>
+#include <kern/lock.h>
+#include <kern/processor.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/printf.h>
+#include <machine/machspl.h> /* for splsched */
+#include <machine/model_dep.h>
+#include <machine/pcb.h>
+#include <sys/reboot.h>
+
+
+
+/*
+ * Exported variables:
+ */
+
+struct machine_info machine_info;
+struct machine_slot machine_slot[NCPUS];
+
+queue_head_t action_queue; /* assign/shutdown queue */
+def_simple_lock_data(,action_lock);
+
+/*
+ * cpu_up:
+ *
+ * Flag specified cpu as up and running. Called when a processor comes
+ * online.
+ */
+void cpu_up(int cpu)
+{
+ struct machine_slot *ms;
+ processor_t processor;
+ spl_t s;
+
+ processor = cpu_to_processor(cpu);
+ pset_lock(&default_pset);
+#if MACH_HOST
+ pset_lock(slave_pset);
+#endif
+ s = splsched();
+ processor_lock(processor);
+#if NCPUS > 1
+ init_ast_check(processor);
+#endif /* NCPUS > 1 */
+ ms = &machine_slot[cpu];
+ ms->running = TRUE;
+ machine_info.avail_cpus++;
+#if MACH_HOST
+ if (cpu != 0)
+ pset_add_processor(slave_pset, processor);
+ else
+#endif
+ pset_add_processor(&default_pset, processor);
+ processor->state = PROCESSOR_RUNNING;
+ processor_unlock(processor);
+ splx(s);
+#if MACH_HOST
+ pset_unlock(slave_pset);
+#endif
+ pset_unlock(&default_pset);
+}
+
+kern_return_t
+host_reboot(const host_t host, int options)
+{
+ if (host == HOST_NULL)
+ return (KERN_INVALID_HOST);
+
+ if (options & RB_DEBUGGER) {
+ Debugger("Debugger");
+ } else {
+#ifdef parisc
+/* XXX this could be made common */
+ halt_all_cpus(options);
+#else
+ halt_all_cpus(!(options & RB_HALT));
+#endif
+ }
+ return (KERN_SUCCESS);
+}
+
+#if NCPUS > 1
+
+/*
+ * cpu_down:
+ *
+ * Flag specified cpu as down. Called when a processor is about to
+ * go offline.
+ */
+static void cpu_down(int cpu)
+{
+ struct machine_slot *ms;
+ processor_t processor;
+ spl_t s;
+
+ s = splsched();
+ processor = cpu_to_processor(cpu);
+ processor_lock(processor);
+ ms = &machine_slot[cpu];
+ ms->running = FALSE;
+ machine_info.avail_cpus--;
+ /*
+ * processor has already been removed from pset.
+ */
+ processor->processor_set_next = PROCESSOR_SET_NULL;
+ processor->state = PROCESSOR_OFF_LINE;
+ processor_unlock(processor);
+ splx(s);
+}
+
+/*
+ * processor_request_action - common internals of processor_assign
+ * and processor_shutdown. If new_pset is null, this is
+ * a shutdown, else it's an assign and caller must donate
+ * a reference.
+ */
+static void
+processor_request_action(
+ processor_t processor,
+ processor_set_t new_pset)
+{
+ processor_set_t pset;
+
+ /*
+ * Processor must be in a processor set. Must lock its idle lock to
+ * get at processor state.
+ */
+ pset = processor->processor_set;
+ simple_lock(&pset->idle_lock);
+
+ /*
+ * If the processor is dispatching, let it finish - it will set its
+ * state to running very soon.
+ */
+ while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING)
+ cpu_pause();
+
+ /*
+ * Now lock the action queue and do the dirty work.
+ */
+ simple_lock(&action_lock);
+
+ switch (processor->state) {
+ case PROCESSOR_IDLE:
+ /*
+ * Remove from idle queue.
+ */
+ queue_remove(&pset->idle_queue, processor, processor_t,
+ processor_queue);
+ pset->idle_count--;
+
+ /* fall through ... */
+ case PROCESSOR_RUNNING:
+ /*
+ * Put it on the action queue.
+ */
+ queue_enter(&action_queue, processor, processor_t,
+ processor_queue);
+
+ /* fall through ... */
+ case PROCESSOR_ASSIGN:
+ /*
+ * And ask the action_thread to do the work.
+ */
+
+ if (new_pset == PROCESSOR_SET_NULL) {
+ processor->state = PROCESSOR_SHUTDOWN;
+ }
+ else {
+ assert(processor->state != PROCESSOR_ASSIGN);
+ processor->state = PROCESSOR_ASSIGN;
+ processor->processor_set_next = new_pset;
+ }
+ break;
+
+ default:
+ printf("state: %d\n", processor->state);
+ panic("processor_request_action: bad state");
+ }
+ simple_unlock(&action_lock);
+ simple_unlock(&pset->idle_lock);
+
+ thread_wakeup((event_t)&action_queue);
+}
+
+#if MACH_HOST
+/*
+ * processor_assign() changes the processor set that a processor is
+ * assigned to. Any previous assignment in progress is overridden.
+ * Synchronizes with assignment completion if wait is TRUE.
+ */
+kern_return_t
+processor_assign(
+ processor_t processor,
+ processor_set_t new_pset,
+ boolean_t wait)
+{
+ spl_t s;
+
+ /*
+ * Check for null arguments.
+ * XXX Can't assign master processor.
+ */
+ if (processor == PROCESSOR_NULL || new_pset == PROCESSOR_SET_NULL ||
+ processor == master_processor) {
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ /*
+ * Get pset reference to donate to processor_request_action.
+ */
+ pset_reference(new_pset);
+
+ /*
+ * Check processor status.
+ * If shutdown or being shutdown, can`t reassign.
+ * If being assigned, wait for assignment to finish.
+ */
+Retry:
+ s = splsched();
+ processor_lock(processor);
+ if(processor->state == PROCESSOR_OFF_LINE ||
+ processor->state == PROCESSOR_SHUTDOWN) {
+ /*
+ * Already shutdown or being shutdown -- Can't reassign.
+ */
+ processor_unlock(processor);
+ (void) splx(s);
+ pset_deallocate(new_pset);
+ return(KERN_FAILURE);
+ }
+
+ if (processor->state == PROCESSOR_ASSIGN) {
+ assert_wait((event_t) processor, TRUE);
+ processor_unlock(processor);
+ splx(s);
+ thread_block(thread_no_continuation);
+ goto Retry;
+ }
+
+ /*
+ * Avoid work if processor is already in this processor set.
+ */
+ if (processor->processor_set == new_pset) {
+ processor_unlock(processor);
+ (void) splx(s);
+ /* clean up dangling ref */
+ pset_deallocate(new_pset);
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * OK to start processor assignment.
+ */
+ processor_request_action(processor, new_pset);
+
+ /*
+ * Synchronization with completion.
+ */
+ if (wait) {
+ while (processor->state == PROCESSOR_ASSIGN ||
+ processor->state == PROCESSOR_SHUTDOWN) {
+ assert_wait((event_t)processor, TRUE);
+ processor_unlock(processor);
+ splx(s);
+ thread_block(thread_no_continuation);
+ s = splsched();
+ processor_lock(processor);
+ }
+ }
+ processor_unlock(processor);
+ splx(s);
+
+ return(KERN_SUCCESS);
+}
+
+#else /* MACH_HOST */
+
+kern_return_t
+processor_assign(
+ processor_t processor,
+ processor_set_t new_pset,
+ boolean_t wait)
+{
+ return KERN_FAILURE;
+}
+
+#endif /* MACH_HOST */
+
+/*
+ * processor_shutdown() queues a processor up for shutdown.
+ * Any assignment in progress is overriden. It does not synchronize
+ * with the shutdown (can be called from interrupt level).
+ */
+kern_return_t
+processor_shutdown(processor_t processor)
+{
+ spl_t s;
+
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ processor_lock(processor);
+ if(processor->state == PROCESSOR_OFF_LINE ||
+ processor->state == PROCESSOR_SHUTDOWN) {
+ /*
+ * Already shutdown or being shutdown -- nothing to do.
+ */
+ processor_unlock(processor);
+ splx(s);
+ return(KERN_SUCCESS);
+ }
+
+ processor_request_action(processor, PROCESSOR_SET_NULL);
+ processor_unlock(processor);
+ splx(s);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * processor_doaction actually does the shutdown. The trick here
+ * is to schedule ourselves onto a cpu and then save our
+ * context back into the runqs before taking out the cpu.
+ */
+static void processor_doaction(processor_t processor)
+{
+ thread_t this_thread;
+ spl_t s;
+ processor_set_t pset;
+#if MACH_HOST
+ processor_set_t new_pset;
+ thread_t thread;
+ thread_t prev_thread = THREAD_NULL;
+ boolean_t have_pset_ref = FALSE;
+#endif /* MACH_HOST */
+
+ /*
+ * Get onto the processor to shutdown
+ */
+ this_thread = current_thread();
+ thread_bind(this_thread, processor);
+ thread_block(thread_no_continuation);
+
+ pset = processor->processor_set;
+#if MACH_HOST
+ /*
+ * If this is the last processor in the processor_set,
+ * stop all the threads first.
+ */
+ pset_lock(pset);
+ if (pset->processor_count == 1) {
+ /*
+ * First suspend all of them.
+ */
+ queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
+ thread_hold(thread);
+ }
+ pset->empty = TRUE;
+ /*
+ * Now actually stop them. Need a pset reference.
+ */
+ pset->ref_count++;
+ have_pset_ref = TRUE;
+
+Restart_thread:
+ prev_thread = THREAD_NULL;
+ queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
+ thread_reference(thread);
+ pset_unlock(pset);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+
+ /*
+ * Only wait for threads still in the pset.
+ */
+ thread_freeze(thread);
+ if (thread->processor_set != pset) {
+ /*
+ * It got away - start over.
+ */
+ thread_unfreeze(thread);
+ thread_deallocate(thread);
+ pset_lock(pset);
+ goto Restart_thread;
+ }
+
+ (void) thread_dowait(thread, TRUE);
+ prev_thread = thread;
+ pset_lock(pset);
+ thread_unfreeze(prev_thread);
+ }
+ }
+ pset_unlock(pset);
+
+ /*
+ * At this point, it is ok to remove the processor from the pset.
+ * We can use processor->processor_set_next without locking the
+ * processor, since it cannot change while processor->state is
+ * PROCESSOR_ASSIGN or PROCESSOR_SHUTDOWN.
+ */
+
+ new_pset = processor->processor_set_next;
+
+Restart_pset:
+ if (new_pset) {
+ /*
+ * Reassigning processor.
+ */
+
+ if ((integer_t) pset < (integer_t) new_pset) {
+ pset_lock(pset);
+ pset_lock(new_pset);
+ }
+ else {
+ pset_lock(new_pset);
+ pset_lock(pset);
+ }
+ if (!(new_pset->active)) {
+ pset_unlock(new_pset);
+ pset_unlock(pset);
+ pset_deallocate(new_pset);
+ new_pset = &default_pset;
+ pset_reference(new_pset);
+ goto Restart_pset;
+ }
+
+ /*
+ * Handle remove last / assign first race.
+ * Only happens if there is more than one action thread.
+ */
+ while (new_pset->empty && new_pset->processor_count > 0) {
+ pset_unlock(new_pset);
+ pset_unlock(pset);
+ while (*(volatile boolean_t *)&new_pset->empty &&
+ *(volatile int *)&new_pset->processor_count > 0)
+ /* spin */;
+ goto Restart_pset;
+ }
+
+ /*
+ * Lock the processor. new_pset should not have changed.
+ */
+ s = splsched();
+ processor_lock(processor);
+ assert(processor->processor_set_next == new_pset);
+
+ /*
+ * Shutdown may have been requested while this assignment
+ * was in progress.
+ */
+ if (processor->state == PROCESSOR_SHUTDOWN) {
+ processor->processor_set_next = PROCESSOR_SET_NULL;
+ pset_unlock(new_pset);
+ goto shutdown; /* releases pset reference */
+ }
+
+ /*
+ * Do assignment, then wakeup anyone waiting for it.
+ */
+ pset_remove_processor(pset, processor);
+ pset_unlock(pset);
+
+ pset_add_processor(new_pset, processor);
+ if (new_pset->empty) {
+ /*
+ * Set all the threads loose.
+ *
+ * NOTE: this appears to violate the locking
+ * order, since the processor lock should
+ * be taken AFTER a thread lock. However,
+ * thread_setrun (called by thread_release)
+ * only takes the processor lock if the
+ * processor is idle. The processor is
+ * not idle here.
+ */
+ queue_iterate(&new_pset->threads, thread, thread_t,
+ pset_threads) {
+ thread_release(thread);
+ }
+ new_pset->empty = FALSE;
+ }
+ processor->processor_set_next = PROCESSOR_SET_NULL;
+ processor->state = PROCESSOR_RUNNING;
+ thread_wakeup((event_t)processor);
+ processor_unlock(processor);
+ splx(s);
+ pset_unlock(new_pset);
+
+ /*
+ * Clean up dangling references, and release our binding.
+ */
+ pset_deallocate(new_pset);
+ if (have_pset_ref)
+ pset_deallocate(pset);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+ thread_bind(this_thread, PROCESSOR_NULL);
+
+ thread_block(thread_no_continuation);
+ return;
+ }
+
+#endif /* MACH_HOST */
+
+ /*
+ * Do shutdown, make sure we live when processor dies.
+ */
+ if (processor->state != PROCESSOR_SHUTDOWN) {
+ printf("state: %d\n", processor->state);
+ panic("action_thread -- bad processor state");
+ }
+
+ s = splsched();
+ processor_lock(processor);
+
+#if MACH_HOST
+ shutdown:
+#endif /* MACH_HOST */
+ pset_remove_processor(pset, processor);
+ processor_unlock(processor);
+ pset_unlock(pset);
+ splx(s);
+
+ /*
+ * Clean up dangling references, and release our binding.
+ */
+#if MACH_HOST
+ if (new_pset != PROCESSOR_SET_NULL)
+ pset_deallocate(new_pset);
+ if (have_pset_ref)
+ pset_deallocate(pset);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+#endif /* MACH_HOST */
+
+ thread_bind(this_thread, PROCESSOR_NULL);
+ switch_to_shutdown_context(this_thread,
+ processor_doshutdown,
+ processor);
+
+}
+
+/*
+ * action_thread() shuts down processors or changes their assignment.
+ */
+void __attribute__((noreturn)) action_thread_continue(void)
+{
+ processor_t processor;
+ spl_t s;
+
+ while (TRUE) {
+ s = splsched();
+ simple_lock(&action_lock);
+ while ( !queue_empty(&action_queue)) {
+ processor = (processor_t) queue_first(&action_queue);
+ queue_remove(&action_queue, processor, processor_t,
+ processor_queue);
+ simple_unlock(&action_lock);
+ (void) splx(s);
+
+ processor_doaction(processor);
+
+ s = splsched();
+ simple_lock(&action_lock);
+ }
+
+ assert_wait((event_t) &action_queue, FALSE);
+ simple_unlock(&action_lock);
+ (void) splx(s);
+ counter(c_action_thread_block++);
+ thread_block(action_thread_continue);
+ }
+}
+
+void __attribute__((noreturn)) action_thread(void)
+{
+ action_thread_continue();
+ /*NOTREACHED*/
+}
+
+/*
+ * Actually do the processor shutdown. This is called at splsched,
+ * running on the processor's shutdown stack.
+ */
+
+void processor_doshutdown(processor_t processor)
+{
+ int cpu = processor->slot_num;
+
+ timer_switch(&kernel_timer[cpu]);
+
+ /*
+ * Ok, now exit this cpu.
+ */
+ PMAP_DEACTIVATE_KERNEL(cpu);
+#ifndef MIGRATING_THREADS
+ percpu_array[cpu].active_thread = THREAD_NULL;
+#endif
+ cpu_down(cpu);
+ thread_wakeup((event_t)processor);
+ halt_cpu();
+ /*
+ * The action thread returns to life after the call to
+ * switch_to_shutdown_context above, on some other cpu.
+ */
+
+ /*NOTREACHED*/
+}
+#else /* NCPUS > 1 */
+
+kern_return_t
+processor_assign(
+ processor_t processor,
+ processor_set_t new_pset,
+ boolean_t wait)
+{
+ return(KERN_FAILURE);
+}
+
+#endif /* NCPUS > 1 */
diff --git a/kern/machine.h b/kern/machine.h
new file mode 100644
index 0000000..5c55d2c
--- /dev/null
+++ b/kern/machine.h
@@ -0,0 +1,59 @@
+/*
+ * Machine abstraction functions
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Machine abstraction functions.
+ *
+ */
+
+#ifndef _MACHINE_H_
+#define _MACHINE_H_
+
+#include <mach/std_types.h>
+
+/*
+ * cpu_up:
+ *
+ * Flag specified cpu as up and running. Called when a processor comes
+ * online.
+ */
+extern void cpu_up (int);
+
+/*
+ * processor_assign() changes the processor set that a processor is
+ * assigned to. Any previous assignment in progress is overridden.
+ * Synchronizes with assignment completion if wait is TRUE.
+ */
+extern kern_return_t processor_assign (processor_t, processor_set_t, boolean_t);
+
+/*
+ * processor_shutdown() queues a processor up for shutdown.
+ * Any assignment in progress is overriden. It does not synchronize
+ * with the shutdown (can be called from interrupt level).
+ */
+extern kern_return_t processor_shutdown (processor_t);
+
+/*
+ * action_thread() shuts down processors or changes their assignment.
+ */
+extern void action_thread_continue (void) __attribute__((noreturn));
+extern void action_thread(void) __attribute__((noreturn));
+
+#endif /* _MACHINE_H_ */
diff --git a/kern/macros.h b/kern/macros.h
new file mode 100644
index 0000000..01deab6
--- /dev/null
+++ b/kern/macros.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2009-2015 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Helper macros.
+ *
+ * Upstream site with license notes :
+ * http://git.sceen.net/rbraun/librbraun.git/
+ */
+
+#ifndef _KERN_MACROS_H
+#define _KERN_MACROS_H
+
+#define MACRO_BEGIN ({
+#define MACRO_END })
+#define MACRO_RETURN if (1) return
+
+#define __QUOTE(x) #x
+#define QUOTE(x) __QUOTE(x)
+
+#ifdef __ASSEMBLER__
+#define DECL_CONST(x, s) x
+#else /* __ASSEMBLER__ */
+#define __DECL_CONST(x, s) x##s
+#define DECL_CONST(x, s) __DECL_CONST(x, s)
+#endif /* __ASSEMBLER__ */
+
+#define STRLEN(x) (sizeof(x) - 1)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define DIV_CEIL(n, d) (((n) + (d) - 1) / (d))
+
+#define P2ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
+#define ISP2(x) P2ALIGNED(x, x)
+#define P2ALIGN(x, a) ((x) & -(a))
+#define P2ROUND(x, a) (-(-(x) & -(a)))
+#define P2END(x, a) (-(~(x) & -(a)))
+
+#define structof(ptr, type, member) \
+ ((type *)((char *)(ptr) - offsetof(type, member)))
+
+#define access_once(x) (*(volatile typeof(x) *)&(x))
+
+#define alignof(x) __alignof__(x)
+
+#ifndef likely
+#define likely(expr) __builtin_expect(!!(expr), 1)
+#endif /* likely */
+#ifndef unlikely
+#define unlikely(expr) __builtin_expect(!!(expr), 0)
+#endif /* unlikely */
+
+#ifndef barrier
+#define barrier() asm volatile("" : : : "memory")
+#endif /* barrier */
+
+#define __noreturn __attribute__((noreturn))
+#define __aligned(x) __attribute__((aligned(x)))
+#define __always_inline inline __attribute__((always_inline))
+#ifndef __section
+#define __section(x) __attribute__((section(x)))
+#endif /* __section */
+#define __packed __attribute__((packed))
+#define __alias(x) __attribute__((alias(x)))
+
+#define __format_printf(fmt, args) \
+ __attribute__((format(printf, fmt, args)))
+
+#endif /* _KERN_MACROS_H */
diff --git a/kern/pc_sample.c b/kern/pc_sample.c
new file mode 100644
index 0000000..497bd89
--- /dev/null
+++ b/kern/pc_sample.c
@@ -0,0 +1,306 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <kern/printf.h>
+#include <string.h>
+
+#include <mach/mach_types.h> /* vm_address_t */
+#include <mach/std_types.h> /* pointer_t */
+#include <mach/pc_sample.h>
+#include <machine/trap.h>
+#include <kern/kalloc.h>
+#include <kern/host.h>
+#include <kern/thread.h>
+#include <kern/pc_sample.h>
+#include <kern/mach4.server.h>
+#include <kern/mach_clock.h>
+
+#if MACH_PCSAMPLE
+
+#define MAX_PC_SAMPLES 512
+
+typedef sampled_pc_t sampled_pcs[MAX_PC_SAMPLES];
+
+void take_pc_sample(
+ const thread_t t,
+ sample_control_t *cp,
+ sampled_pc_flavor_t flavor,
+ boolean_t usermode,
+ vm_offset_t kern_pc)
+{
+ vm_offset_t pc;
+ struct sampled_pc *sample;
+
+ if (usermode)
+ pc = interrupted_pc(t);
+ else
+ pc = kern_pc;
+
+ cp->seqno++;
+ sample = &((sampled_pc_t *)cp->buffer)[cp->seqno % MAX_PC_SAMPLES];
+ sample->id = (rpc_vm_offset_t)(vm_offset_t)t;
+ sample->pc = (rpc_vm_offset_t)pc;
+ sample->sampletype = flavor;
+}
+
+kern_return_t
+thread_enable_pc_sampling(
+ thread_t thread,
+ int *tickp,
+ sampled_pc_flavor_t flavors)
+{
+ vm_offset_t buf;
+
+ if (thread == THREAD_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if (thread->pc_sample.buffer == 0) {
+ buf = (vm_offset_t) kalloc(sizeof (sampled_pcs));
+ if (buf == 0) {
+ printf("thread_enable_pc_sampling: kalloc failed\n");
+ return KERN_INVALID_ARGUMENT;
+ }
+ thread->pc_sample.buffer = buf;
+ thread->pc_sample.seqno = 0;
+ }
+ *tickp = tick;
+ thread->pc_sample.sampletypes = flavors;
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+task_enable_pc_sampling(
+ task_t task,
+ int *tickp,
+ sampled_pc_flavor_t flavors)
+{
+ vm_offset_t buf;
+
+ if (task == TASK_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if (task->pc_sample.buffer == 0) {
+ buf = (vm_offset_t) kalloc(sizeof (sampled_pcs));
+ if (buf == 0) {
+ printf("task_enable_pc_sampling: kalloc failed\n");
+ return KERN_INVALID_ARGUMENT;
+ }
+ task->pc_sample.buffer = buf;
+ task->pc_sample.seqno = 0;
+ }
+ *tickp = tick;
+ task->pc_sample.sampletypes = flavors;
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+thread_disable_pc_sampling(
+ thread_t thread,
+ int *samplecntp)
+{
+ vm_offset_t buf;
+
+ if (thread == THREAD_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if ((buf = thread->pc_sample.buffer) != 0)
+ kfree(buf, sizeof (sampled_pcs));
+ thread->pc_sample.buffer = (vm_offset_t) 0;
+ thread->pc_sample.seqno = 0;
+ thread->pc_sample.sampletypes = 0; /* shut off sampling */
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+task_disable_pc_sampling(
+ task_t task,
+ int *samplecntp)
+{
+ vm_offset_t buf;
+
+ if (task == TASK_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if ((buf = task->pc_sample.buffer) != 0)
+ kfree(buf, sizeof (sampled_pcs));
+ task->pc_sample.buffer = (vm_offset_t) 0;
+ task->pc_sample.seqno = 0;
+ task->pc_sample.sampletypes = 0; /* shut off sampling */
+
+ return KERN_SUCCESS;
+}
+
+static kern_return_t
+get_sampled_pcs(
+ sample_control_t *cp,
+ sampled_pc_seqno_t *seqnop,
+ sampled_pc_array_t sampled_pcs_out,
+ mach_msg_type_number_t *sampled_pcs_cntp)
+{
+ int nsamples;
+ sampled_pc_seqno_t seqidx1, seqidx2;
+
+ nsamples = cp->seqno - *seqnop;
+ seqidx1 = *seqnop % MAX_PC_SAMPLES; /* index of *seqnop */
+ seqidx2 = cp->seqno % MAX_PC_SAMPLES; /* index of cp->seqno */
+
+ if (nsamples > MAX_PC_SAMPLES) {
+ nsamples = MAX_PC_SAMPLES;
+ seqidx1 = (seqidx2 + 1) % MAX_PC_SAMPLES;
+ }
+
+ if (nsamples > 0) {
+ /*
+ * Carefully copy sampled_pcs into sampled_pcs_msgbuf IN ORDER.
+ */
+ if (seqidx1 < seqidx2) {
+ /*
+ * Simple case: no wraparound.
+ * Copy from seqidx1 to seqidx2.
+ */
+ memcpy(sampled_pcs_out,
+ (sampled_pc_array_t)cp->buffer + seqidx1 + 1,
+ nsamples * sizeof(sampled_pc_t));
+ } else {
+ /* seqidx1 > seqidx2 -- Handle wraparound. */
+
+ memcpy(sampled_pcs_out,
+ (sampled_pc_array_t)cp->buffer + seqidx1 + 1,
+ (MAX_PC_SAMPLES - seqidx1 - 1) * sizeof(sampled_pc_t));
+
+ memcpy(sampled_pcs_out + (MAX_PC_SAMPLES - seqidx1 - 1),
+ (sampled_pc_array_t)cp->buffer,
+ (seqidx2 + 1) * sizeof(sampled_pc_t));
+ }
+ } else if (nsamples < 0) {
+ /* Bogus SEQNO supplied. */
+ nsamples = 0;
+ } else {
+ /* could either be zero because of overflow, or because
+ * we are being lied to. In either case, return nothing.
+ * If overflow, only once in a blue moon. If being lied to,
+ * then we have no obligation to return anything useful anyway.
+ */
+ ;
+ }
+
+ *sampled_pcs_cntp = nsamples;
+ *seqnop = cp->seqno;
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+thread_get_sampled_pcs(
+ thread_t thread,
+ sampled_pc_seqno_t *seqnop,
+ sampled_pc_array_t sampled_pcs_out,
+ mach_msg_type_number_t *sampled_pcs_cntp)
+{
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (thread->pc_sample.buffer == 0)
+ return KERN_FAILURE;
+
+ return get_sampled_pcs(&thread->pc_sample, seqnop, sampled_pcs_out,
+ sampled_pcs_cntp);
+}
+
+kern_return_t
+task_get_sampled_pcs(
+ task_t task,
+ sampled_pc_seqno_t *seqnop,
+ sampled_pc_array_t sampled_pcs_out,
+ mach_msg_type_number_t *sampled_pcs_cntp)
+{
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (task->pc_sample.buffer == 0)
+ return KERN_FAILURE;
+
+ return get_sampled_pcs(&task->pc_sample, seqnop, sampled_pcs_out,
+ sampled_pcs_cntp);
+}
+
+#else /* MACH_PCSAMPLE */
+
+kern_return_t
+thread_enable_pc_sampling(
+ thread_t thread,
+ int *tickp,
+ sampled_pc_flavor_t flavors)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+kern_return_t
+task_enable_pc_sampling(
+ task_t task,
+ int *tickp,
+ sampled_pc_flavor_t flavors)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+kern_return_t
+thread_disable_pc_sampling(
+ thread_t thread,
+ int *samplecntp)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+kern_return_t
+task_disable_pc_sampling(
+ task_t task,
+ int *samplecntp)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+kern_return_t
+thread_get_sampled_pcs(
+ thread_t thread,
+ sampled_pc_seqno_t *seqnop,
+ sampled_pc_array_t sampled_pcs_out,
+ mach_msg_type_number_t *sampled_pcs_cntp)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+kern_return_t
+task_get_sampled_pcs(
+ task_t task,
+ sampled_pc_seqno_t *seqnop,
+ sampled_pc_array_t sampled_pcs_out,
+ mach_msg_type_number_t *sampled_pcs_cntp)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+#endif /* MACH_PCSAMPLE */
diff --git a/kern/pc_sample.h b/kern/pc_sample.h
new file mode 100644
index 0000000..04ca667
--- /dev/null
+++ b/kern/pc_sample.h
@@ -0,0 +1,94 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * Revision 1.1.1.1 1997/02/25 21:28:25 thomas
+ * Initial source
+ *
+ * Revision 1.1.1.1 1996/10/30 01:38:13 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1994/11/02 02:24:15 law
+ * Initial revision
+ *
+ * Revision 2.2 93/11/17 19:06:01 dbg
+ * Moved kernel internal definitions here from mach/pc_sample.h.
+ * [93/09/24 dbg]
+ *
+ */
+
+/*
+ * Kernel definitions for PC sampling.
+ */
+#ifndef _KERN_PC_SAMPLE_H_
+#define _KERN_PC_SAMPLE_H_
+
+#include <mach/pc_sample.h>
+#include <mach/machine/vm_types.h>
+#include <kern/kern_types.h>
+#include <kern/macros.h>
+
+/*
+ * Control structure for sampling, included in
+ * threads and tasks. If sampletypes is 0, no
+ * sampling is done.
+ */
+
+struct sample_control {
+ vm_offset_t buffer;
+ unsigned int seqno;
+ sampled_pc_flavor_t sampletypes;
+};
+
+typedef struct sample_control sample_control_t;
+
+/*
+ * Routines to take PC samples.
+ */
+extern void take_pc_sample(
+ thread_t thread,
+ sample_control_t *cp,
+ sampled_pc_flavor_t flavor,
+ boolean_t usermode,
+ vm_offset_t pc);
+
+/*
+ * Macro to do quick flavor check for sampling,
+ * on both threads and tasks.
+ */
+#define take_pc_sample_macro(thread, flavor, usermode, pc) \
+ MACRO_BEGIN \
+ task_t task; \
+ \
+ if ((thread)->pc_sample.sampletypes & (flavor)) \
+ take_pc_sample((thread), &(thread)->pc_sample, (flavor), usermode, pc); \
+ \
+ task = (thread)->task; \
+ if (task->pc_sample.sampletypes & (flavor)) \
+ take_pc_sample((thread), &task->pc_sample, (flavor), usermode, pc); \
+ MACRO_END
+
+#endif /* _KERN_PC_SAMPLE_H_ */
diff --git a/kern/printf.c b/kern/printf.c
new file mode 100644
index 0000000..cbc27ae
--- /dev/null
+++ b/kern/printf.c
@@ -0,0 +1,656 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Common code for printf et al.
+ *
+ * The calling routine typically takes a variable number of arguments,
+ * and passes the address of the first one. This implementation
+ * assumes a straightforward, stack implementation, aligned to the
+ * machine's wordsize. Increasing addresses are assumed to point to
+ * successive arguments (left-to-right), as is the case for a machine
+ * with a downward-growing stack with arguments pushed right-to-left.
+ *
+ * To write, for example, fprintf() using this routine, the code
+ *
+ * fprintf(fd, format, args)
+ * FILE *fd;
+ * char *format;
+ * {
+ * va_list listp;
+ * va_start(listp, fmt);
+ * _doprnt(format, &args, fd);
+ * va_end(listp);
+ * }
+ *
+ * would suffice. (This example does not handle the fprintf's "return
+ * value" correctly, but who looks at the return value of fprintf
+ * anyway?)
+ *
+ * This version implements the following printf features:
+ *
+ * %d decimal conversion
+ * %u unsigned conversion
+ * %p pointer address
+ * %x hexadecimal conversion
+ * %X hexadecimal conversion with capital letters
+ * %o octal conversion
+ * %c character
+ * %s string
+ * %m.n field width, precision
+ * %-m.n left adjustment
+ * %0m.n zero-padding
+ * %*.* width and precision taken from arguments
+ *
+ * This version does not implement %f, %e, or %g. It accepts, but
+ * ignores, an `l' as in %ld, %lo, %lx, and %lu, and therefore will not
+ * work correctly on machines for which sizeof(long) != sizeof(int).
+ * It does not even parse %D, %O, or %U; you should be using %ld, %o and
+ * %lu if you mean long conversion.
+ *
+ * As mentioned, this version does not return any reasonable value.
+ *
+ * Permission is granted to use, modify, or propagate this code as
+ * long as this notice is incorporated.
+ *
+ * Steve Summit 3/25/87
+ */
+
+/*
+ * Added formats for decoding device registers:
+ *
+ * printf("reg = %b", regval, "<base><arg>*")
+ *
+ * where <base> is the output base expressed as a control character:
+ * i.e. '\10' gives octal, '\20' gives hex. Each <arg> is a sequence of
+ * characters, the first of which gives the bit number to be inspected
+ * (origin 1), and the rest (up to a control character (<= 32)) give the
+ * name of the register. Thus
+ * printf("reg = %b\n", 3, "\10\2BITTWO\1BITONE")
+ * would produce
+ * reg = 3<BITTWO,BITONE>
+ *
+ * If the second character in <arg> is also a control character, it
+ * indicates the last bit of a bit field. In this case, printf will extract
+ * bits <1> to <2> and print it. Characters following the second control
+ * character are printed before the bit field.
+ * printf("reg = %b\n", 0xb, "\10\4\3FIELD1=\2BITTWO\1BITONE")
+ * would produce
+ * reg = b<FIELD1=2,BITONE>
+ */
+/*
+ * Added for general use:
+ * # prefix for alternate format:
+ * 0x (0X) for hex
+ * leading 0 for octal
+ * + print '+' if positive
+ * blank print ' ' if positive
+ *
+ * z signed hexadecimal
+ * r signed, 'radix'
+ * n unsigned, 'radix'
+ *
+ * D,U,O,Z same as corresponding lower-case versions
+ * (compatibility)
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <device/cons.h>
+#include <kern/printf.h>
+#include <mach/boolean.h>
+
+
+#define isdigit(d) ((d) >= '0' && (d) <= '9')
+#define Ctod(c) ((c) - '0')
+
+#define MAXBUF (sizeof(long long int) * 8) /* enough for binary */
+
+
+void printnum(
+ unsigned long long u,
+ int base,
+ void (*putc)( char, vm_offset_t ),
+ vm_offset_t putc_arg)
+{
+ char buf[MAXBUF]; /* build number here */
+ char * p = &buf[MAXBUF-1];
+ static char digs[] = "0123456789abcdef";
+
+ do {
+ *p-- = digs[u % base];
+ u /= base;
+ } while (u != 0);
+
+ while (++p != &buf[MAXBUF])
+ (*putc)(*p, putc_arg);
+
+}
+
+boolean_t _doprnt_truncates = FALSE;
+
+void _doprnt(
+ const char *fmt,
+ va_list argp,
+ /* character output routine */
+ void (*putc)( char, vm_offset_t),
+ int radix, /* default radix - for '%r' */
+ vm_offset_t putc_arg)
+{
+ int length;
+ int prec;
+ boolean_t ladjust;
+ char padc;
+ long long n;
+ unsigned long long u;
+ int have_long_long;
+ int plus_sign;
+ int sign_char;
+ boolean_t altfmt, truncate;
+ int base;
+ char c;
+
+ while ((c = *fmt) != '\0') {
+ if (c != '%') {
+ (*putc)(c, putc_arg);
+ fmt++;
+ continue;
+ }
+
+ fmt++;
+
+ length = 0;
+ prec = -1;
+ ladjust = FALSE;
+ padc = ' ';
+ plus_sign = 0;
+ sign_char = 0;
+ altfmt = FALSE;
+ have_long_long = FALSE;
+
+ while (TRUE) {
+ c = *fmt;
+ if (c == '#') {
+ altfmt = TRUE;
+ }
+ else if (c == '-') {
+ ladjust = TRUE;
+ }
+ else if (c == '+') {
+ plus_sign = '+';
+ }
+ else if (c == ' ') {
+ if (plus_sign == 0)
+ plus_sign = ' ';
+ }
+ else
+ break;
+ fmt++;
+ }
+
+ if (c == '0') {
+ padc = '0';
+ c = *++fmt;
+ }
+
+ if (isdigit(c)) {
+ while(isdigit(c)) {
+ length = 10 * length + Ctod(c);
+ c = *++fmt;
+ }
+ }
+ else if (c == '*') {
+ length = va_arg(argp, int);
+ c = *++fmt;
+ if (length < 0) {
+ ladjust = !ladjust;
+ length = -length;
+ }
+ }
+
+ if (c == '.') {
+ c = *++fmt;
+ if (isdigit(c)) {
+ prec = 0;
+ while(isdigit(c)) {
+ prec = 10 * prec + Ctod(c);
+ c = *++fmt;
+ }
+ }
+ else if (c == '*') {
+ prec = va_arg(argp, int);
+ c = *++fmt;
+ }
+ }
+
+ if (c == 'l')
+ c = *++fmt; /* need it if sizeof(int) < sizeof(long) */
+ if (c == 'l') {
+ c = *++fmt; /* handle `long long' */
+ have_long_long = TRUE;
+ }
+
+ truncate = FALSE;
+
+ switch(c) {
+ case 'b':
+ case 'B':
+ {
+ char *p;
+ boolean_t any;
+ int i;
+
+ if (! have_long_long)
+ u = va_arg(argp, unsigned long);
+ else
+ u = va_arg(argp, unsigned long long);
+ p = va_arg(argp, char *);
+ base = *p++;
+ printnum(u, base, putc, putc_arg);
+
+ if (u == 0)
+ break;
+
+ any = FALSE;
+ while ((i = *p++)) {
+ /* NOTE: The '32' here is because ascii space */
+ if (*p <= 32) {
+ /*
+ * Bit field
+ */
+ int j;
+ if (any)
+ (*putc)(',', putc_arg);
+ else {
+ (*putc)('<', putc_arg);
+ any = TRUE;
+ }
+ j = *p++;
+ for (; (c = *p) > 32; p++)
+ (*putc)(c, putc_arg);
+ printnum((unsigned)( (u>>(j-1)) & ((2<<(i-j))-1)),
+ base, putc, putc_arg);
+ }
+ else if (u & (1<<(i-1))) {
+ if (any)
+ (*putc)(',', putc_arg);
+ else {
+ (*putc)('<', putc_arg);
+ any = TRUE;
+ }
+ for (; (c = *p) > 32; p++)
+ (*putc)(c, putc_arg);
+ }
+ else {
+ for (; *p > 32; p++)
+ continue;
+ }
+ }
+ if (any)
+ (*putc)('>', putc_arg);
+ break;
+ }
+
+ case 'c':
+ c = va_arg(argp, int);
+ (*putc)(c, putc_arg);
+ break;
+
+ case 's':
+ {
+ char *p;
+ char *p2;
+
+ if (prec == -1)
+ prec = 0x7fffffff; /* MAXINT */
+
+ p = va_arg(argp, char *);
+
+ if (p == (char *)0)
+ p = "";
+
+ if (length > 0 && !ladjust) {
+ n = 0;
+ p2 = p;
+
+ for (; *p != '\0' && n < prec; p++)
+ n++;
+
+ p = p2;
+
+ while (n < length) {
+ (*putc)(' ', putc_arg);
+ n++;
+ }
+ }
+
+ n = 0;
+
+ while (*p != '\0') {
+ if (++n > prec)
+ break;
+
+ (*putc)(*p++, putc_arg);
+ }
+
+ if (n < length && ladjust) {
+ while (n < length) {
+ (*putc)(' ', putc_arg);
+ n++;
+ }
+ }
+
+ break;
+ }
+
+ case 'o':
+ truncate = _doprnt_truncates;
+ case 'O':
+ base = 8;
+ goto print_unsigned;
+
+ case 'd':
+ truncate = _doprnt_truncates;
+ case 'D':
+ base = 10;
+ goto print_signed;
+
+ case 'u':
+ truncate = _doprnt_truncates;
+ case 'U':
+ base = 10;
+ goto print_unsigned;
+
+ case 'p':
+ case 'x':
+ truncate = _doprnt_truncates;
+ case 'X':
+ base = 16;
+ goto print_unsigned;
+
+ case 'z':
+ truncate = _doprnt_truncates;
+ case 'Z':
+ base = 16;
+ goto print_signed;
+
+ case 'r':
+ truncate = _doprnt_truncates;
+ case 'R':
+ base = radix;
+ goto print_signed;
+
+ case 'n':
+ truncate = _doprnt_truncates;
+ case 'N':
+ base = radix;
+ goto print_unsigned;
+
+ print_signed:
+ if (! have_long_long)
+ n = va_arg(argp, long);
+ else
+ n = va_arg(argp, long long);
+ if (n >= 0) {
+ u = n;
+ sign_char = plus_sign;
+ }
+ else {
+ u = -n;
+ sign_char = '-';
+ }
+ goto print_num;
+
+ print_unsigned:
+ if (! have_long_long)
+ u = va_arg(argp, unsigned long);
+ else
+ u = va_arg(argp, unsigned long long);
+ goto print_num;
+
+ print_num:
+ {
+ char buf[MAXBUF]; /* build number here */
+ char * p = &buf[MAXBUF-1];
+ static char digits[] = "0123456789abcdef";
+ char *prefix = 0;
+
+ if (truncate) u = (long)((int)(u));
+
+ if (u != 0 && altfmt) {
+ if (base == 8)
+ prefix = "0";
+ else if (base == 16)
+ prefix = "0x";
+ }
+
+ do {
+ *p-- = digits[u % base];
+ u /= base;
+ } while (u != 0);
+
+ length -= (&buf[MAXBUF-1] - p);
+ if (sign_char)
+ length--;
+ if (prefix)
+ length -= strlen(prefix);
+
+ if (padc == ' ' && !ladjust) {
+ /* blank padding goes before prefix */
+ while (--length >= 0)
+ (*putc)(' ', putc_arg);
+ }
+ if (sign_char)
+ (*putc)(sign_char, putc_arg);
+ if (prefix)
+ while (*prefix)
+ (*putc)(*prefix++, putc_arg);
+ if (padc == '0') {
+ /* zero padding goes after sign and prefix */
+ while (--length >= 0)
+ (*putc)('0', putc_arg);
+ }
+ while (++p != &buf[MAXBUF])
+ (*putc)(*p, putc_arg);
+
+ if (ladjust) {
+ while (--length >= 0)
+ (*putc)(' ', putc_arg);
+ }
+ break;
+ }
+
+ case '\0':
+ fmt--;
+ break;
+
+ default:
+ (*putc)(c, putc_arg);
+ }
+ fmt++;
+ }
+}
+
+/*
+ * Printing (to console)
+ */
+
+int vprintf(const char *fmt, va_list listp)
+{
+ _doprnt(fmt, listp, (void (*)( char, vm_offset_t)) cnputc, 16, 0);
+ return 0;
+}
+
+/*VARARGS1*/
+int printf(const char *fmt, ...)
+{
+ va_list listp;
+ va_start(listp, fmt);
+ vprintf(fmt, listp);
+ va_end(listp);
+ return 0;
+}
+
+int indent = 0;
+
+/*
+ * Printing (to console) with indentation.
+ */
+/*VARARGS1*/
+void iprintf(const char *fmt, ...)
+{
+ va_list listp;
+ int i;
+
+ for (i = indent; i > 0; ){
+ if (i >= 8) {
+ printf("\t");
+ i -= 8;
+ }
+ else {
+ printf(" ");
+ i--;
+ }
+ }
+ va_start(listp, fmt);
+ _doprnt(fmt, listp, (void (*)( char, vm_offset_t)) cnputc, 16, 0);
+ va_end(listp);
+}
+
+/*
+ * Printing to generic buffer
+ * Returns #bytes printed.
+ * Strings are zero-terminated.
+ */
+static void
+sputc(
+ char c,
+ vm_offset_t arg)
+{
+ char **bufp = (char **) arg;
+ char *p = *bufp;
+ *p++ = c;
+ *bufp = p;
+}
+
+int
+sprintf(char *buf, const char *fmt, ...)
+{
+ va_list listp;
+ char *start = buf;
+
+ va_start(listp, fmt);
+ _doprnt(fmt, listp, sputc, 16, (vm_offset_t)&buf);
+ va_end(listp);
+
+ *buf = 0;
+ return (buf - start);
+}
+
+struct vsnprintf_cookie
+{
+ char *buf;
+ int index;
+ int max_len;
+};
+
+static void
+snputc(char c, vm_offset_t arg)
+{
+ struct vsnprintf_cookie *cookie = (void *) arg;
+
+ if (cookie->index < cookie->max_len)
+ cookie->buf[cookie->index ++] = c;
+}
+
+int
+vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ struct vsnprintf_cookie cookie
+ = { .buf = buf, .index = 0, .max_len = size };
+
+ _doprnt (fmt, args, snputc, 16, (vm_offset_t)&cookie);
+ cookie.buf[cookie.index] = '\0';
+
+ return cookie.index;
+}
+
+int
+snprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ int written;
+ va_list listp;
+ va_start(listp, fmt);
+ written = vsnprintf(buf, size, fmt, listp);
+ va_end(listp);
+ return written;
+}
+
+void safe_gets(
+ char *str,
+ int maxlen)
+{
+ char *lp;
+ int c;
+ char *strmax = str + maxlen - 1; /* allow space for trailing 0 */
+
+ lp = str;
+ for (;;) {
+ c = cngetc();
+ switch (c) {
+ case '\n':
+ case '\r':
+ printf("\n");
+ *lp++ = 0;
+ return;
+
+ case '\b':
+ case '#':
+ case '\177':
+ if (lp > str) {
+ printf("\b \b");
+ lp--;
+ }
+ continue;
+
+ case '@':
+ case 'u'&037:
+ lp = str;
+ printf("\n\r");
+ continue;
+
+ default:
+ if (c >= ' ' && c < '\177') {
+ if (lp < strmax) {
+ *lp++ = c;
+ printf("%c", c);
+ }
+ else {
+ printf("%c", '\007'); /* beep */
+ }
+ }
+ }
+ }
+}
diff --git a/kern/printf.h b/kern/printf.h
new file mode 100644
index 0000000..b72640a
--- /dev/null
+++ b/kern/printf.h
@@ -0,0 +1,68 @@
+/*
+ * Header file for printf type functions.
+ * Copyright (C) 2006, 2007 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+/*
+ * String handling functions.
+ *
+ */
+
+#ifndef _MACH_SA_SYS_PRINTF_H_
+#define _MACH_SA_SYS_PRINTF_H_
+
+#include <sys/types.h>
+#include <stdarg.h>
+
+extern void _doprnt (const char *fmt,
+ va_list argp,
+ void (*putc)(char, vm_offset_t),
+ int radix,
+ vm_offset_t putc_arg);
+
+extern void printnum (unsigned long long u, int base,
+ void (*putc)(char, vm_offset_t),
+ vm_offset_t putc_arg);
+
+extern int sprintf (char *buf, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern int snprintf (char *buf, size_t size, const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern int vsnprintf (char *buf, size_t size, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 3, 0)));
+
+
+extern int printf (const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+#define printf_once(fmt, ...) \
+ MACRO_BEGIN \
+ static int __once = 0; \
+ if (!__once) { \
+ printf(fmt, ##__VA_ARGS__); \
+ __once = 1; \
+ } \
+ MACRO_END
+
+extern int indent;
+extern void iprintf (const char *fmt, ...);
+
+extern int vprintf(const char *fmt, va_list listp);
+
+extern void safe_gets (char *str, int maxlen);
+
+#endif /* _MACH_SA_SYS_PRINTF_H_ */
+
diff --git a/kern/priority.c b/kern/priority.c
new file mode 100644
index 0000000..fe11d0d
--- /dev/null
+++ b/kern/priority.c
@@ -0,0 +1,223 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: priority.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Clock primitives.
+ */
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/machine.h>
+#include <kern/host.h>
+#include <kern/mach_clock.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+#include <kern/priority.h>
+#include <kern/processor.h>
+#include <kern/timer.h>
+#include <machine/machspl.h>
+
+
+
+/*
+ * USAGE_THRESHOLD is the amount by which usage must change to
+ * cause a priority shift that moves a thread between run queues.
+ */
+
+#ifdef PRI_SHIFT_2
+#if PRI_SHIFT_2 > 0
+#define USAGE_THRESHOLD (((1 << PRI_SHIFT) + (1 << PRI_SHIFT_2)) << (2 + SCHED_SHIFT))
+#else /* PRI_SHIFT_2 > 0 */
+#define USAGE_THRESHOLD (((1 << PRI_SHIFT) - (1 << -(PRI_SHIFT_2))) << (2 + SCHED_SHIFT))
+#endif /* PRI_SHIFT_2 > 0 */
+#else /* PRI_SHIFT_2 */
+#define USAGE_THRESHOLD (1 << (PRI_SHIFT + 2 + SCHED_SHIFT))
+#endif /* PRI_SHIFT_2 */
+
+/*
+ * thread_quantum_update:
+ *
+ * Recalculate the quantum and priority for a thread.
+ * The number of ticks that has elapsed since we were last called
+ * is passed as "nticks."
+ *
+ * Called only from clock_interrupt().
+ */
+
+void thread_quantum_update(
+ int mycpu,
+ thread_t thread,
+ int nticks,
+ int state)
+{
+ int quantum;
+ processor_t myprocessor;
+#if NCPUS > 1
+ processor_set_t pset;
+#endif
+ spl_t s;
+
+ myprocessor = cpu_to_processor(mycpu);
+#if NCPUS > 1
+ pset = myprocessor->processor_set;
+ if (pset == 0) {
+ /*
+ * Processor is being reassigned.
+ * Should rewrite processor assignment code to
+ * block clock interrupts.
+ */
+ return;
+ }
+#endif /* NCPUS > 1 */
+
+ /*
+ * Account for thread's utilization of these ticks.
+ * This assumes that there is *always* a current thread.
+ * When the processor is idle, it should be the idle thread.
+ */
+
+ /*
+ * Update set_quantum and calculate the current quantum.
+ */
+#if NCPUS > 1
+ pset->set_quantum = pset->machine_quantum[
+ ((pset->runq.count > pset->processor_count) ?
+ pset->processor_count : pset->runq.count)];
+
+ if (myprocessor->runq.count != 0)
+ quantum = min_quantum;
+ else
+ quantum = pset->set_quantum;
+#else /* NCPUS > 1 */
+ quantum = min_quantum;
+ default_pset.set_quantum = quantum;
+#endif /* NCPUS > 1 */
+
+ /*
+ * Now recompute the priority of the thread if appropriate.
+ */
+
+ if (state != CPU_STATE_IDLE) {
+ myprocessor->quantum -= nticks;
+#if NCPUS > 1
+ /*
+ * Runtime quantum adjustment. Use quantum_adj_index
+ * to avoid synchronizing quantum expirations.
+ */
+ if ((quantum != myprocessor->last_quantum) &&
+ (pset->processor_count > 1)) {
+ myprocessor->last_quantum = quantum;
+ simple_lock(&pset->quantum_adj_lock);
+ quantum = min_quantum + (pset->quantum_adj_index *
+ (quantum - min_quantum)) /
+ (pset->processor_count - 1);
+ if (++(pset->quantum_adj_index) >=
+ pset->processor_count)
+ pset->quantum_adj_index = 0;
+ simple_unlock(&pset->quantum_adj_lock);
+ }
+#endif /* NCPUS > 1 */
+ if (myprocessor->quantum <= 0) {
+ s = splsched();
+ thread_lock(thread);
+ if (thread->sched_stamp != sched_tick) {
+ update_priority(thread);
+ }
+ else {
+ if (
+#if MACH_FIXPRI
+ (thread->policy == POLICY_TIMESHARE) &&
+#endif /* MACH_FIXPRI */
+ (thread->depress_priority < 0)) {
+ thread_timer_delta(thread);
+ thread->sched_usage +=
+ thread->sched_delta;
+ thread->sched_delta = 0;
+ compute_my_priority(thread);
+ }
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+ /*
+ * This quantum is up, give this thread another.
+ */
+ myprocessor->first_quantum = FALSE;
+#if MACH_FIXPRI
+ if (thread->policy == POLICY_TIMESHARE) {
+#endif /* MACH_FIXPRI */
+ myprocessor->quantum += quantum;
+#if MACH_FIXPRI
+ }
+ else {
+ /*
+ * Fixed priority has per-thread quantum.
+ *
+ */
+ myprocessor->quantum += thread->sched_data;
+ }
+#endif /* MACH_FIXPRI */
+ }
+ /*
+ * Recompute priority if appropriate.
+ */
+ else {
+ s = splsched();
+ thread_lock(thread);
+ if (thread->sched_stamp != sched_tick) {
+ update_priority(thread);
+ }
+ else {
+ if (
+#if MACH_FIXPRI
+ (thread->policy == POLICY_TIMESHARE) &&
+#endif /* MACH_FIXPRI */
+ (thread->depress_priority < 0)) {
+ thread_timer_delta(thread);
+ if (thread->sched_delta >= USAGE_THRESHOLD) {
+ thread->sched_usage +=
+ thread->sched_delta;
+ thread->sched_delta = 0;
+ compute_my_priority(thread);
+ }
+ }
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+ }
+ /*
+ * Check for and schedule ast if needed.
+ */
+ ast_check();
+ }
+}
+
diff --git a/kern/priority.h b/kern/priority.h
new file mode 100644
index 0000000..2da93eb
--- /dev/null
+++ b/kern/priority.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _KERN_PRIORITY_H_
+#define _KERN_PRIORITY_H_
+
+extern void thread_quantum_update(
+ int mycpu,
+ thread_t thread,
+ int nticks,
+ int state);
+
+#endif /* _KERN_PRIORITY_H_ */
diff --git a/kern/processor.c b/kern/processor.c
new file mode 100644
index 0000000..71bbb75
--- /dev/null
+++ b/kern/processor.c
@@ -0,0 +1,1034 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * processor.c: processor and processor_set manipulation routines.
+ */
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/policy.h>
+#include <mach/processor_info.h>
+#include <mach/vm_param.h>
+#include <kern/cpu_number.h>
+#include <kern/debug.h>
+#include <kern/kalloc.h>
+#include <kern/lock.h>
+#include <kern/host.h>
+#include <kern/ipc_tt.h>
+#include <kern/machine.h>
+#include <kern/processor.h>
+#include <kern/sched.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/ipc_host.h>
+#include <ipc/ipc_port.h>
+#include <machine/mp_desc.h>
+
+#if MACH_HOST
+#include <kern/slab.h>
+struct kmem_cache pset_cache;
+struct processor_set *slave_pset;
+#endif /* MACH_HOST */
+
+
+/*
+ * Exported variables.
+ */
+int master_cpu;
+
+struct processor_set default_pset;
+
+queue_head_t all_psets;
+int all_psets_count;
+def_simple_lock_data(, all_psets_lock);
+
+processor_t master_processor;
+
+/*
+ * Bootstrap the processor/pset system so the scheduler can run.
+ */
+void pset_sys_bootstrap(void)
+{
+ int i;
+
+ pset_init(&default_pset);
+ default_pset.empty = FALSE;
+ for (i = 0; i < NCPUS; i++) {
+ /*
+ * Initialize processor data structures.
+ * Note that cpu_to_processor is processor_ptr.
+ */
+ processor_init(processor_ptr(i), i);
+ }
+ master_processor = cpu_to_processor(master_cpu);
+ queue_init(&all_psets);
+ simple_lock_init(&all_psets_lock);
+ queue_enter(&all_psets, &default_pset, processor_set_t, all_psets);
+ all_psets_count = 1;
+ default_pset.active = TRUE;
+ default_pset.empty = FALSE;
+
+ /*
+ * Note: the default_pset has a max_priority of BASEPRI_USER.
+ * Internal kernel threads override this in kernel_thread.
+ */
+}
+
+#if MACH_HOST
+/*
+ * Rest of pset system initializations.
+ */
+void pset_sys_init(void)
+{
+ int i;
+ processor_t processor;
+
+ /*
+ * Allocate the cache for processor sets.
+ */
+ kmem_cache_init(&pset_cache, "processor_set",
+ sizeof(struct processor_set), 0, NULL, 0);
+
+ /*
+ * Give each processor a control port.
+ * The master processor already has one.
+ */
+ for (i = 0; i < NCPUS; i++) {
+ processor = cpu_to_processor(i);
+ if (processor != master_processor &&
+ machine_slot[i].is_cpu)
+ {
+ ipc_processor_init(processor);
+ }
+ }
+
+ processor_set_create(&realhost, &slave_pset, &slave_pset);
+}
+#endif /* MACH_HOST */
+
+/*
+ * Initialize the given processor_set structure.
+ */
+
+void pset_init(
+ processor_set_t pset)
+{
+ int i;
+
+ simple_lock_init(&pset->runq.lock);
+ pset->runq.low = 0;
+ pset->runq.count = 0;
+ for (i = 0; i < NRQS; i++) {
+ queue_init(&(pset->runq.runq[i]));
+ }
+ queue_init(&pset->idle_queue);
+ pset->idle_count = 0;
+ simple_lock_init(&pset->idle_lock);
+ queue_init(&pset->processors);
+ pset->processor_count = 0;
+ pset->empty = TRUE;
+ queue_init(&pset->tasks);
+ pset->task_count = 0;
+ queue_init(&pset->threads);
+ pset->thread_count = 0;
+ pset->ref_count = 1;
+ simple_lock_init(&pset->ref_lock);
+ queue_init(&pset->all_psets);
+ pset->active = FALSE;
+ simple_lock_init(&pset->lock);
+ pset->pset_self = IP_NULL;
+ pset->pset_name_self = IP_NULL;
+ pset->max_priority = BASEPRI_USER;
+#if MACH_FIXPRI
+ pset->policies = POLICY_TIMESHARE;
+#endif /* MACH_FIXPRI */
+ pset->set_quantum = min_quantum;
+#if NCPUS > 1
+ pset->quantum_adj_index = 0;
+ simple_lock_init(&pset->quantum_adj_lock);
+
+ for (i = 0; i <= NCPUS; i++) {
+ pset->machine_quantum[i] = min_quantum;
+ }
+#endif /* NCPUS > 1 */
+ pset->mach_factor = 0;
+ pset->load_average = 0;
+ pset->sched_load = SCHED_SCALE; /* i.e. 1 */
+}
+
+/*
+ * Initialize the given processor structure for the processor in
+ * the slot specified by slot_num.
+ */
+
+void processor_init(
+ processor_t pr,
+ int slot_num)
+{
+ int i;
+
+ simple_lock_init(&pr->runq.lock);
+ pr->runq.low = 0;
+ pr->runq.count = 0;
+ for (i = 0; i < NRQS; i++) {
+ queue_init(&(pr->runq.runq[i]));
+ }
+ queue_init(&pr->processor_queue);
+ pr->state = PROCESSOR_OFF_LINE;
+ pr->next_thread = THREAD_NULL;
+ pr->idle_thread = THREAD_NULL;
+ pr->quantum = 0;
+ pr->first_quantum = FALSE;
+ pr->last_quantum = 0;
+ pr->processor_set = PROCESSOR_SET_NULL;
+ pr->processor_set_next = PROCESSOR_SET_NULL;
+ queue_init(&pr->processors);
+ simple_lock_init(&pr->lock);
+ pr->processor_self = IP_NULL;
+ pr->slot_num = slot_num;
+}
+
+/*
+ * pset_remove_processor() removes a processor from a processor_set.
+ * It can only be called on the current processor. Caller must
+ * hold lock on current processor and processor set.
+ */
+
+void pset_remove_processor(
+ processor_set_t pset,
+ processor_t processor)
+{
+ if (pset != processor->processor_set)
+ panic("pset_remove_processor: wrong pset");
+
+ queue_remove(&pset->processors, processor, processor_t, processors);
+ processor->processor_set = PROCESSOR_SET_NULL;
+ pset->processor_count--;
+ quantum_set(pset);
+}
+
+/*
+ * pset_add_processor() adds a processor to a processor_set.
+ * It can only be called on the current processor. Caller must
+ * hold lock on curent processor and on pset. No reference counting on
+ * processors. Processor reference to pset is implicit.
+ */
+
+void pset_add_processor(
+ processor_set_t pset,
+ processor_t processor)
+{
+ queue_enter(&pset->processors, processor, processor_t, processors);
+ processor->processor_set = pset;
+ pset->processor_count++;
+ pset->empty = FALSE;
+ quantum_set(pset);
+}
+
+/*
+ * pset_remove_task() removes a task from a processor_set.
+ * Caller must hold locks on pset and task. Pset reference count
+ * is not decremented; caller must explicitly pset_deallocate.
+ */
+
+void pset_remove_task(
+ processor_set_t pset,
+ task_t task)
+{
+ if (pset != task->processor_set)
+ return;
+
+ queue_remove(&pset->tasks, task, task_t, pset_tasks);
+ task->processor_set = PROCESSOR_SET_NULL;
+ pset->task_count--;
+}
+
+/*
+ * pset_add_task() adds a task to a processor_set.
+ * Caller must hold locks on pset and task. Pset references to
+ * tasks are implicit.
+ */
+
+void pset_add_task(
+ processor_set_t pset,
+ task_t task)
+{
+ queue_enter(&pset->tasks, task, task_t, pset_tasks);
+ task->processor_set = pset;
+ pset->task_count++;
+}
+
+/*
+ * pset_remove_thread() removes a thread from a processor_set.
+ * Caller must hold locks on pset and thread. Pset reference count
+ * is not decremented; caller must explicitly pset_deallocate.
+ */
+
+void pset_remove_thread(
+ processor_set_t pset,
+ thread_t thread)
+{
+ queue_remove(&pset->threads, thread, thread_t, pset_threads);
+ thread->processor_set = PROCESSOR_SET_NULL;
+ pset->thread_count--;
+}
+
+/*
+ * pset_add_thread() adds a thread to a processor_set.
+ * Caller must hold locks on pset and thread. Pset references to
+ * threads are implicit.
+ */
+
+void pset_add_thread(
+ processor_set_t pset,
+ thread_t thread)
+{
+ queue_enter(&pset->threads, thread, thread_t, pset_threads);
+ thread->processor_set = pset;
+ pset->thread_count++;
+}
+
+/*
+ * thread_change_psets() changes the pset of a thread. Caller must
+ * hold locks on both psets and thread. The old pset must be
+ * explicitly pset_deallocat()'ed by caller.
+ */
+
+void thread_change_psets(
+ thread_t thread,
+ processor_set_t old_pset,
+ processor_set_t new_pset)
+{
+ queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
+ old_pset->thread_count--;
+ queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
+ thread->processor_set = new_pset;
+ new_pset->thread_count++;
+}
+
+/*
+ * pset_deallocate:
+ *
+ * Remove one reference to the processor set. Destroy processor_set
+ * if this was the last reference.
+ */
+void pset_deallocate(
+ processor_set_t pset)
+{
+ if (pset == PROCESSOR_SET_NULL)
+ return;
+
+ pset_ref_lock(pset);
+ if (--pset->ref_count > 0) {
+ pset_ref_unlock(pset);
+ return;
+ }
+#if !MACH_HOST
+ panic("pset_deallocate: default_pset destroyed");
+#endif /* !MACH_HOST */
+
+#if MACH_HOST
+ /*
+ * Reference count is zero, however the all_psets list
+ * holds an implicit reference and may make new ones.
+ * Its lock also dominates the pset lock. To check for this,
+ * temporarily restore one reference, and then lock the
+ * other structures in the right order.
+ */
+ pset->ref_count = 1;
+ pset_ref_unlock(pset);
+
+ simple_lock(&all_psets_lock);
+ pset_ref_lock(pset);
+ if (--pset->ref_count > 0) {
+ /*
+ * Made an extra reference.
+ */
+ pset_ref_unlock(pset);
+ simple_unlock(&all_psets_lock);
+ return;
+ }
+
+ /*
+ * Ok to destroy pset. Make a few paranoia checks.
+ */
+
+ if ((pset == &default_pset) || (pset->thread_count > 0) ||
+ (pset->task_count > 0) || pset->processor_count > 0) {
+ panic("pset_deallocate: destroy default or active pset");
+ }
+ /*
+ * Remove from all_psets queue.
+ */
+ queue_remove(&all_psets, pset, processor_set_t, all_psets);
+ all_psets_count--;
+
+ pset_ref_unlock(pset);
+ simple_unlock(&all_psets_lock);
+
+ /*
+ * That's it, free data structure.
+ */
+ kmem_cache_free(&pset_cache, (vm_offset_t)pset);
+#endif /* MACH_HOST */
+}
+
+/*
+ * pset_reference:
+ *
+ * Add one reference to the processor set.
+ */
+void pset_reference(
+ processor_set_t pset)
+{
+ pset_ref_lock(pset);
+ pset->ref_count++;
+ pset_ref_unlock(pset);
+}
+
+kern_return_t
+processor_info(
+ processor_t processor,
+ int flavor,
+ host_t *host,
+ processor_info_t info,
+ natural_t *count)
+{
+ int slot_num, state;
+ processor_basic_info_t basic_info;
+
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (flavor != PROCESSOR_BASIC_INFO ||
+ *count < PROCESSOR_BASIC_INFO_COUNT)
+ return KERN_FAILURE;
+
+ basic_info = (processor_basic_info_t) info;
+
+ slot_num = processor->slot_num;
+ basic_info->cpu_type = machine_slot[slot_num].cpu_type;
+ basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
+ state = processor->state;
+ if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
+ basic_info->running = FALSE;
+ else
+ basic_info->running = TRUE;
+ basic_info->slot_num = slot_num;
+ if (processor == master_processor)
+ basic_info->is_master = TRUE;
+ else
+ basic_info->is_master = FALSE;
+
+ *count = PROCESSOR_BASIC_INFO_COUNT;
+ *host = &realhost;
+ return KERN_SUCCESS;
+}
+
+kern_return_t processor_start(
+ processor_t processor)
+{
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+#if NCPUS > 1
+ return cpu_start(processor->slot_num);
+#else /* NCPUS > 1 */
+ return KERN_FAILURE;
+#endif /* NCPUS > 1 */
+}
+
+kern_return_t processor_exit(
+ processor_t processor)
+{
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+#if NCPUS > 1
+ return processor_shutdown(processor);
+#else /* NCPUS > 1 */
+ return KERN_FAILURE;
+#endif /* NCPUS > 1 */
+}
+
+kern_return_t
+processor_control(
+ processor_t processor,
+ processor_info_t info,
+ natural_t count)
+{
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+#if NCPUS > 1
+ return cpu_control(processor->slot_num, (int *)info, count);
+#else /* NCPUS > 1 */
+ return KERN_FAILURE;
+#endif /* NCPUS > 1 */
+}
+
+/*
+ * Precalculate the appropriate system quanta based on load. The
+ * index into machine_quantum is the number of threads on the
+ * processor set queue. It is limited to the number of processors in
+ * the set.
+ */
+
+void quantum_set(
+ processor_set_t pset)
+{
+#if NCPUS > 1
+ int i, ncpus;
+
+ ncpus = pset->processor_count;
+
+ for ( i=1 ; i <= ncpus ; i++) {
+ pset->machine_quantum[i] =
+ ((min_quantum * ncpus) + (i/2)) / i ;
+ }
+ pset->machine_quantum[0] = 2 * pset->machine_quantum[1];
+
+ i = ((pset->runq.count > pset->processor_count) ?
+ pset->processor_count : pset->runq.count);
+ pset->set_quantum = pset->machine_quantum[i];
+#else /* NCPUS > 1 */
+ default_pset.set_quantum = min_quantum;
+#endif /* NCPUS > 1 */
+}
+
+#if MACH_HOST
+/*
+ * processor_set_create:
+ *
+ * Create and return a new processor set.
+ */
+
+kern_return_t
+processor_set_create(
+ host_t host,
+ processor_set_t *new_set,
+ processor_set_t *new_name)
+{
+ processor_set_t pset;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ pset = (processor_set_t) kmem_cache_alloc(&pset_cache);
+ pset_init(pset);
+ pset_reference(pset); /* for new_set out argument */
+ pset_reference(pset); /* for new_name out argument */
+ ipc_pset_init(pset);
+ pset->active = TRUE;
+
+ simple_lock(&all_psets_lock);
+ queue_enter(&all_psets, pset, processor_set_t, all_psets);
+ all_psets_count++;
+ simple_unlock(&all_psets_lock);
+
+ ipc_pset_enable(pset);
+
+ *new_set = pset;
+ *new_name = pset;
+ return KERN_SUCCESS;
+}
+
+/*
+ * processor_set_destroy:
+ *
+ * destroy a processor set. Any tasks, threads or processors
+ * currently assigned to it are reassigned to the default pset.
+ */
+kern_return_t processor_set_destroy(
+ processor_set_t pset)
+{
+ queue_entry_t elem;
+ queue_head_t *list;
+
+ if (pset == PROCESSOR_SET_NULL || pset == &default_pset)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Handle multiple termination race. First one through sets
+ * active to FALSE and disables ipc access.
+ */
+ pset_lock(pset);
+ if (!(pset->active)) {
+ pset_unlock(pset);
+ return KERN_FAILURE;
+ }
+
+ pset->active = FALSE;
+ ipc_pset_disable(pset);
+
+
+ /*
+ * Now reassign everything in this set to the default set.
+ */
+
+ if (pset->task_count > 0) {
+ list = &pset->tasks;
+ while (!queue_empty(list)) {
+ elem = queue_first(list);
+ task_reference((task_t) elem);
+ pset_unlock(pset);
+ task_assign((task_t) elem, &default_pset, FALSE);
+ task_deallocate((task_t) elem);
+ pset_lock(pset);
+ }
+ }
+
+ if (pset->thread_count > 0) {
+ list = &pset->threads;
+ while (!queue_empty(list)) {
+ elem = queue_first(list);
+ thread_reference((thread_t) elem);
+ pset_unlock(pset);
+ thread_assign((thread_t) elem, &default_pset);
+ thread_deallocate((thread_t) elem);
+ pset_lock(pset);
+ }
+ }
+
+ if (pset->processor_count > 0) {
+ list = &pset->processors;
+ while(!queue_empty(list)) {
+ elem = queue_first(list);
+ pset_unlock(pset);
+ processor_assign((processor_t) elem, &default_pset, TRUE);
+ pset_lock(pset);
+ }
+ }
+
+ pset_unlock(pset);
+
+ /*
+ * Destroy ipc state.
+ */
+ ipc_pset_terminate(pset);
+
+ /*
+ * Deallocate pset's reference to itself.
+ */
+ pset_deallocate(pset);
+ return KERN_SUCCESS;
+}
+
+#else /* MACH_HOST */
+
+kern_return_t
+processor_set_create(
+ host_t host,
+ processor_set_t *new_set,
+ processor_set_t *new_name)
+{
+ return KERN_FAILURE;
+}
+
+kern_return_t processor_set_destroy(
+ processor_set_t pset)
+{
+ return KERN_FAILURE;
+}
+
+#endif /* MACH_HOST */
+
+kern_return_t
+processor_get_assignment(
+ processor_t processor,
+ processor_set_t *pset)
+{
+ int state;
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ state = processor->state;
+ if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
+ return KERN_FAILURE;
+
+ *pset = processor->processor_set;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+processor_set_info(
+ processor_set_t pset,
+ int flavor,
+ host_t *host,
+ processor_set_info_t info,
+ natural_t *count)
+{
+ if (pset == PROCESSOR_SET_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (flavor == PROCESSOR_SET_BASIC_INFO) {
+ processor_set_basic_info_t basic_info;
+
+ if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
+ return KERN_FAILURE;
+
+ basic_info = (processor_set_basic_info_t) info;
+
+ pset_lock(pset);
+ basic_info->processor_count = pset->processor_count;
+ basic_info->task_count = pset->task_count;
+ basic_info->thread_count = pset->thread_count;
+ basic_info->mach_factor = pset->mach_factor;
+ basic_info->load_average = pset->load_average;
+ pset_unlock(pset);
+
+ *count = PROCESSOR_SET_BASIC_INFO_COUNT;
+ *host = &realhost;
+ return KERN_SUCCESS;
+ }
+ else if (flavor == PROCESSOR_SET_SCHED_INFO) {
+ processor_set_sched_info_t sched_info;
+
+ if (*count < PROCESSOR_SET_SCHED_INFO_COUNT)
+ return KERN_FAILURE;
+
+ sched_info = (processor_set_sched_info_t) info;
+
+ pset_lock(pset);
+#if MACH_FIXPRI
+ sched_info->policies = pset->policies;
+#else /* MACH_FIXPRI */
+ sched_info->policies = POLICY_TIMESHARE;
+#endif /* MACH_FIXPRI */
+ sched_info->max_priority = pset->max_priority;
+ pset_unlock(pset);
+
+ *count = PROCESSOR_SET_SCHED_INFO_COUNT;
+ *host = &realhost;
+ return KERN_SUCCESS;
+ }
+
+ *host = HOST_NULL;
+ return KERN_INVALID_ARGUMENT;
+}
+
+/*
+ * processor_set_max_priority:
+ *
+ * Specify max priority permitted on processor set. This affects
+ * newly created and assigned threads. Optionally change existing
+ * ones.
+ */
+kern_return_t
+processor_set_max_priority(
+ processor_set_t pset,
+ int max_priority,
+ boolean_t change_threads)
+{
+ if (pset == PROCESSOR_SET_NULL || invalid_pri(max_priority))
+ return KERN_INVALID_ARGUMENT;
+
+ pset_lock(pset);
+ pset->max_priority = max_priority;
+
+ if (change_threads) {
+ queue_head_t *list;
+ thread_t thread;
+
+ list = &pset->threads;
+ queue_iterate(list, thread, thread_t, pset_threads) {
+ if (thread->max_priority < max_priority)
+ thread_max_priority(thread, pset, max_priority);
+ }
+ }
+
+ pset_unlock(pset);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * processor_set_policy_enable:
+ *
+ * Allow indicated policy on processor set.
+ */
+
+kern_return_t
+processor_set_policy_enable(
+ processor_set_t pset,
+ int policy)
+{
+ if ((pset == PROCESSOR_SET_NULL) || invalid_policy(policy))
+ return KERN_INVALID_ARGUMENT;
+
+#if MACH_FIXPRI
+ pset_lock(pset);
+ pset->policies |= policy;
+ pset_unlock(pset);
+
+ return KERN_SUCCESS;
+#else /* MACH_FIXPRI */
+ if (policy == POLICY_TIMESHARE)
+ return KERN_SUCCESS;
+ else
+ return KERN_FAILURE;
+#endif /* MACH_FIXPRI */
+}
+
+/*
+ * processor_set_policy_disable:
+ *
+ * Forbid indicated policy on processor set. Time sharing cannot
+ * be forbidden.
+ */
+
+kern_return_t
+processor_set_policy_disable(
+ processor_set_t pset,
+ int policy,
+ boolean_t change_threads)
+{
+ if ((pset == PROCESSOR_SET_NULL) || policy == POLICY_TIMESHARE ||
+ invalid_policy(policy))
+ return KERN_INVALID_ARGUMENT;
+
+#if MACH_FIXPRI
+ pset_lock(pset);
+
+ /*
+ * Check if policy enabled. Disable if so, then handle
+ * change_threads.
+ */
+ if (pset->policies & policy) {
+ pset->policies &= ~policy;
+
+ if (change_threads) {
+ queue_head_t *list;
+ thread_t thread;
+
+ list = &pset->threads;
+ queue_iterate(list, thread, thread_t, pset_threads) {
+ if (thread->policy == policy)
+ thread_policy(thread, POLICY_TIMESHARE, 0);
+ }
+ }
+ }
+ pset_unlock(pset);
+#endif /* MACH_FIXPRI */
+
+ return KERN_SUCCESS;
+}
+
+#define THING_TASK 0
+#define THING_THREAD 1
+
+/*
+ * processor_set_things:
+ *
+ * Common internals for processor_set_{threads,tasks}
+ */
+static kern_return_t
+processor_set_things(
+ processor_set_t pset,
+ mach_port_t **thing_list,
+ natural_t *count,
+ int type)
+{
+ unsigned int actual; /* this many things */
+ unsigned i;
+
+ vm_size_t size, size_needed;
+ vm_offset_t addr;
+
+ if (pset == PROCESSOR_SET_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ size = 0; addr = 0;
+
+ for (;;) {
+ pset_lock(pset);
+ if (!pset->active) {
+ pset_unlock(pset);
+ return KERN_FAILURE;
+ }
+
+ if (type == THING_TASK)
+ actual = pset->task_count;
+ else
+ actual = pset->thread_count;
+
+ /* do we have the memory we need? */
+
+ size_needed = actual * sizeof(mach_port_t);
+ if (size_needed <= size)
+ break;
+
+ /* unlock the pset and allocate more memory */
+ pset_unlock(pset);
+
+ if (size != 0)
+ kfree(addr, size);
+
+ assert(size_needed > 0);
+ size = size_needed;
+
+ addr = kalloc(size);
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* OK, have memory and the processor_set is locked & active */
+
+ switch (type) {
+ case THING_TASK: {
+ task_t *tasks = (task_t *) addr;
+ task_t task;
+
+ for (i = 0, task = (task_t) queue_first(&pset->tasks);
+ i < actual;
+ i++, task = (task_t) queue_next(&task->pset_tasks)) {
+ /* take ref for convert_task_to_port */
+ task_reference(task);
+ tasks[i] = task;
+ }
+ assert(queue_end(&pset->tasks, (queue_entry_t) task));
+ break;
+ }
+
+ case THING_THREAD: {
+ thread_t *threads = (thread_t *) addr;
+ thread_t thread;
+
+ for (i = 0, thread = (thread_t) queue_first(&pset->threads);
+ i < actual;
+ i++,
+ thread = (thread_t) queue_next(&thread->pset_threads)) {
+ /* take ref for convert_thread_to_port */
+ thread_reference(thread);
+ threads[i] = thread;
+ }
+ assert(queue_end(&pset->threads, (queue_entry_t) thread));
+ break;
+ }
+ }
+
+ /* can unlock processor set now that we have the task/thread refs */
+ pset_unlock(pset);
+
+ if (actual == 0) {
+ /* no things, so return null pointer and deallocate memory */
+ *thing_list = 0;
+ *count = 0;
+
+ if (size != 0)
+ kfree(addr, size);
+ } else {
+ /* if we allocated too much, must copy */
+
+ if (size_needed < size) {
+ vm_offset_t newaddr;
+
+ newaddr = kalloc(size_needed);
+ if (newaddr == 0) {
+ switch (type) {
+ case THING_TASK: {
+ task_t *tasks = (task_t *) addr;
+
+ for (i = 0; i < actual; i++)
+ task_deallocate(tasks[i]);
+ break;
+ }
+
+ case THING_THREAD: {
+ thread_t *threads = (thread_t *) addr;
+
+ for (i = 0; i < actual; i++)
+ thread_deallocate(threads[i]);
+ break;
+ }
+ }
+ kfree(addr, size);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ memcpy((void *) newaddr, (void *) addr, size_needed);
+ kfree(addr, size);
+ addr = newaddr;
+ }
+
+ *thing_list = (mach_port_t *) addr;
+ *count = actual;
+
+ /* do the conversion that Mig should handle */
+
+ switch (type) {
+ case THING_TASK: {
+ task_t *tasks = (task_t *) addr;
+
+ for (i = 0; i < actual; i++)
+ ((mach_port_t *) tasks)[i] =
+ (mach_port_t)convert_task_to_port(tasks[i]);
+ break;
+ }
+
+ case THING_THREAD: {
+ thread_t *threads = (thread_t *) addr;
+
+ for (i = 0; i < actual; i++)
+ ((mach_port_t *) threads)[i] =
+ (mach_port_t)convert_thread_to_port(threads[i]);
+ break;
+ }
+ }
+ }
+
+ return KERN_SUCCESS;
+}
+
+
+/*
+ * processor_set_tasks:
+ *
+ * List all tasks in the processor set.
+ */
+kern_return_t
+processor_set_tasks(
+ processor_set_t pset,
+ task_array_t *task_list,
+ natural_t *count)
+{
+ return processor_set_things(pset, task_list, count, THING_TASK);
+}
+
+/*
+ * processor_set_threads:
+ *
+ * List all threads in the processor set.
+ */
+kern_return_t
+processor_set_threads(
+ processor_set_t pset,
+ thread_array_t *thread_list,
+ natural_t *count)
+{
+ return processor_set_things(pset, thread_list, count, THING_THREAD);
+}
diff --git a/kern/processor.h b/kern/processor.h
new file mode 100644
index 0000000..747badf
--- /dev/null
+++ b/kern/processor.h
@@ -0,0 +1,326 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * processor.h: Processor and processor-set definitions.
+ */
+
+#ifndef _KERN_PROCESSOR_H_
+#define _KERN_PROCESSOR_H_
+
+/*
+ * Data structures for managing processors and sets of processors.
+ */
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/processor_info.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/kern_types.h>
+#include <kern/host.h>
+
+#if NCPUS > 1
+#include <machine/ast_types.h>
+#endif /* NCPUS > 1 */
+
+struct processor_set {
+ struct run_queue runq; /* runq for this set */
+ queue_head_t idle_queue; /* idle processors */
+ int idle_count; /* how many ? */
+ decl_simple_lock_data(, idle_lock) /* lock for above, shall be taken at splsched only */
+ queue_head_t processors; /* all processors here */
+ int processor_count; /* how many ? */
+ boolean_t empty; /* true if no processors */
+ queue_head_t tasks; /* tasks assigned */
+ int task_count; /* how many */
+ queue_head_t threads; /* threads in this set */
+ int thread_count; /* how many */
+ int ref_count; /* structure ref count */
+ decl_simple_lock_data(, ref_lock) /* lock for ref count */
+ queue_chain_t all_psets; /* link for all_psets */
+ boolean_t active; /* is pset in use */
+ decl_simple_lock_data(, lock) /* lock for everything else */
+ struct ipc_port * pset_self; /* port for operations */
+ struct ipc_port * pset_name_self; /* port for information */
+ int max_priority; /* maximum priority */
+#if MACH_FIXPRI
+ int policies; /* bit vector for policies */
+#endif /* MACH_FIXPRI */
+ int set_quantum; /* current default quantum */
+#if NCPUS > 1
+ int quantum_adj_index; /* runtime quantum adj. */
+ decl_simple_lock_data(, quantum_adj_lock) /* lock for above */
+ int machine_quantum[NCPUS+1]; /* ditto */
+#endif /* NCPUS > 1 */
+ long mach_factor; /* mach_factor */
+ long load_average; /* load_average */
+ long sched_load; /* load avg for scheduler */
+};
+extern struct processor_set default_pset;
+#if MACH_HOST
+extern struct processor_set *slave_pset;
+#endif
+
+struct processor {
+ struct run_queue runq; /* local runq for this processor */
+ /* XXX want to do this round robin eventually */
+ queue_chain_t processor_queue; /* idle/assign/shutdown queue link */
+ int state; /* See below */
+ struct thread *next_thread; /* next thread to run if dispatched */
+ struct thread *idle_thread; /* this processor's idle thread. */
+ int quantum; /* quantum for current thread */
+ boolean_t first_quantum; /* first quantum in succession */
+ int last_quantum; /* last quantum assigned */
+
+ processor_set_t processor_set; /* processor set I belong to */
+ processor_set_t processor_set_next; /* set I will belong to */
+ queue_chain_t processors; /* all processors in set */
+ decl_simple_lock_data(, lock)
+ struct ipc_port *processor_self; /* port for operations */
+ int slot_num; /* machine-indep slot number */
+#if NCPUS > 1
+ ast_check_t ast_check_data; /* for remote ast_check invocation */
+#endif /* NCPUS > 1 */
+ /* punt id data temporarily */
+};
+typedef struct processor Processor;
+extern struct processor processor_array[NCPUS];
+
+#include <kern/cpu_number.h>
+#include <machine/percpu.h>
+
+/*
+ * Chain of all processor sets.
+ */
+extern queue_head_t all_psets;
+extern int all_psets_count;
+decl_simple_lock_data(extern, all_psets_lock);
+
+/*
+ * The lock ordering is:
+ *
+ * all_psets_lock
+ * |
+ * |
+ * V
+ * pset_lock
+ * |
+ * +-----------+---------------+-------------------+
+ * | | | |
+ * | | | |
+ * | | V V
+ * | | task_lock pset_self->ip_lock
+ * | | | |
+ * | | +-----------+---------------+ |
+ * | | | | |
+ * | V V V V
+ * | thread_lock* pset_ref_lock
+ * | |
+ * | +-------+
+ * | | |
+ * | | V
+ * | | runq_lock*
+ * | |
+ * V V
+ * processor_lock*
+ * |
+ * |
+ * V
+ * pset_idle_lock*
+ * |
+ * |
+ * V
+ * action_lock*
+ *
+ * Locks marked with "*" are taken at splsched.
+ */
+
+/*
+ * XXX need a pointer to the master processor structure
+ */
+
+extern processor_t master_processor;
+
+/*
+ * NOTE: The processor->processor_set link is needed in one of the
+ * scheduler's critical paths. [Figure out where to look for another
+ * thread to run on this processor.] It is accessed without locking.
+ * The following access protocol controls this field.
+ *
+ * Read from own processor - just read.
+ * Read from another processor - lock processor structure during read.
+ * Write from own processor - lock processor structure during write.
+ * Write from another processor - NOT PERMITTED.
+ *
+ */
+
+/*
+ * Processor state locking:
+ *
+ * Values for the processor state are defined below. If the processor
+ * is off-line or being shutdown, then it is only necessary to lock
+ * the processor to change its state. Otherwise it is only necessary
+ * to lock its processor set's idle_lock. Scheduler code will
+ * typically lock only the idle_lock, but processor manipulation code
+ * will often lock both.
+ */
+
+#define PROCESSOR_OFF_LINE 0 /* Not in system */
+#define PROCESSOR_RUNNING 1 /* Running normally */
+#define PROCESSOR_IDLE 2 /* idle */
+#define PROCESSOR_DISPATCHING 3 /* dispatching (idle -> running) */
+#define PROCESSOR_ASSIGN 4 /* Assignment is changing */
+#define PROCESSOR_SHUTDOWN 5 /* Being shutdown */
+
+#define processor_ptr(i) (&percpu_array[i].processor)
+#define cpu_to_processor processor_ptr
+
+#define current_processor() (percpu_ptr(struct processor, processor))
+#define current_processor_set() (current_processor()->processor_set)
+
+/* Compatibility -- will go away */
+
+#define cpu_state(slot_num) (processor_ptr(slot_num)->state)
+#define cpu_idle(slot_num) (cpu_state(slot_num) == PROCESSOR_IDLE)
+
+/* Useful lock macros */
+
+#define pset_lock(pset) simple_lock(&(pset)->lock)
+#define pset_unlock(pset) simple_unlock(&(pset)->lock)
+#define pset_ref_lock(pset) simple_lock(&(pset)->ref_lock)
+#define pset_ref_unlock(pset) simple_unlock(&(pset)->ref_lock)
+
+/* Shall be taken at splsched only */
+#define processor_lock(pr) simple_lock(&(pr)->lock)
+#define processor_unlock(pr) simple_unlock(&(pr)->lock)
+
+typedef mach_port_t *processor_array_t;
+typedef mach_port_t *processor_set_array_t;
+typedef mach_port_t *processor_set_name_array_t;
+
+
+/*
+ * Exported functions
+ */
+
+/* Initialization */
+
+#ifdef KERNEL
+#if MACH_HOST
+extern void pset_sys_init(void);
+#endif /* MACH_HOST */
+
+/* Pset internal functions */
+
+extern void pset_sys_bootstrap(void);
+extern void pset_reference(processor_set_t);
+extern void pset_deallocate(processor_set_t);
+extern void pset_remove_processor(processor_set_t, processor_t);
+extern void pset_add_processor(processor_set_t, processor_t);
+extern void pset_remove_task(processor_set_t, struct task *);
+extern void pset_add_task(processor_set_t, struct task *);
+extern void pset_remove_thread(processor_set_t, struct thread *);
+extern void pset_add_thread(processor_set_t, struct thread *);
+extern void thread_change_psets(struct thread *,
+ processor_set_t, processor_set_t);
+
+/* Processor interface */
+
+extern kern_return_t processor_get_assignment(
+ processor_t processor,
+ processor_set_t *processor_set);
+
+extern kern_return_t processor_info(
+ processor_t processor,
+ int flavor,
+ host_t * host,
+ processor_info_t info,
+ natural_t * count);
+
+extern kern_return_t processor_start(
+ processor_t processor);
+
+extern kern_return_t processor_exit(
+ processor_t processor);
+
+extern kern_return_t processor_control(
+ processor_t processor,
+ processor_info_t info,
+ natural_t count);
+
+/* Pset interface */
+
+extern kern_return_t processor_set_create(
+ host_t host,
+ processor_set_t *new_set,
+ processor_set_t *new_name);
+
+extern kern_return_t processor_set_destroy(
+ processor_set_t pset);
+
+extern kern_return_t processor_set_info(
+ processor_set_t pset,
+ int flavor,
+ host_t *host,
+ processor_set_info_t info,
+ natural_t *count);
+
+extern kern_return_t processor_set_max_priority(
+ processor_set_t pset,
+ int max_priority,
+ boolean_t change_threads);
+
+extern kern_return_t processor_set_policy_enable(
+ processor_set_t pset,
+ int policy);
+
+extern kern_return_t processor_set_policy_disable(
+ processor_set_t pset,
+ int policy,
+ boolean_t change_threads);
+
+extern kern_return_t processor_set_tasks(
+ processor_set_t pset,
+ task_array_t *task_list,
+ natural_t *count);
+
+extern kern_return_t processor_set_threads(
+ processor_set_t pset,
+ thread_array_t *thread_list,
+ natural_t *count);
+#endif
+
+void processor_doshutdown(processor_t processor);
+void quantum_set(processor_set_t pset);
+void pset_init(processor_set_t pset);
+void processor_init(processor_t pr, int slot_num);
+
+#endif /* _KERN_PROCESSOR_H_ */
diff --git a/kern/profile.c b/kern/profile.c
new file mode 100644
index 0000000..4fcd541
--- /dev/null
+++ b/kern/profile.c
@@ -0,0 +1,408 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright 1991 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if 0
+
+#include <kern/thread.h>
+#include <kern/queue.h>
+#include <mach/profil.h>
+#include <kern/sched_prim.h>
+#include <ipc/ipc_space.h>
+
+extern vm_map_t kernel_map; /* can be discarded, defined in <vm/vm_kern.h> */
+
+thread_t profile_thread_id = THREAD_NULL;
+
+
+void profile_thread()
+{
+ struct message {
+ mach_msg_header_t head;
+ mach_msg_type_t type;
+ int arg[SIZE_PROF_BUFFER+1];
+ } msg;
+
+ spl_t s;
+ buf_to_send_t buf_entry;
+ queue_entry_t prof_queue_entry;
+ prof_data_t pbuf;
+ simple_lock_t lock;
+ msg_return_t mr;
+ int j;
+
+ /* Initialise the queue header for the prof_queue */
+ mpqueue_init(&prof_queue);
+
+ /* Template initialisation of header and type structures */
+ msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE);
+ msg.head.msgh_size = sizeof(msg);
+ msg.head.msgh_local_port = MACH_PORT_NULL;
+ msg.head.msgh_kind = MACH_MSGH_KIND_NORMAL;
+ msg.head.msgh_id = 666666;
+
+ msg.type.msgt_name = MACH_MSG_TYPE_INTEGER_32;
+ msg.type.msgt_size = 32;
+ msg.type.msgt_number = SIZE_PROF_BUFFER+1;
+ msg.type.msgt_inline = TRUE;
+ msg.type.msgt_longform = FALSE;
+ msg.type.msgt_deallocate = FALSE;
+ msg.type.msgt_unused = 0;
+
+ while (TRUE) {
+
+ /* Dequeue the first buffer. */
+ s = splsched();
+ mpdequeue_head(&prof_queue, &prof_queue_entry);
+ splx(s);
+
+ if ((buf_entry = (buf_to_send_t) prof_queue_entry) == NULLBTS)
+ {
+ thread_sleep((event_t) profile_thread, lock, TRUE);
+ if (current_thread()->wait_result != THREAD_AWAKENED)
+ break;
+ }
+ else {
+ task_t curr_task;
+ thread_t curr_th;
+ int *sample;
+ int curr_buf;
+ int imax;
+
+ curr_th = (thread_t) buf_entry->thread;
+ curr_buf = (int) buf_entry->number;
+ pbuf = curr_th->profil_buffer;
+
+ /* Set the remote port */
+ msg.head.msgh_remote_port = (mach_port_t) pbuf->prof_port;
+
+
+ sample = pbuf->prof_area[curr_buf].p_zone;
+ imax = pbuf->prof_area[curr_buf].p_index;
+ for(j=0 ;j<imax; j++,sample++)
+ msg.arg[j] = *sample;
+
+ /* Let hardclock() know you've finished the dirty job */
+ pbuf->prof_area[curr_buf].p_full = FALSE;
+
+ /*
+ * Store the number of samples actually sent
+ * as the last element of the array.
+ */
+ msg.arg[SIZE_PROF_BUFFER] = imax;
+
+ mr = mach_msg(&(msg.head), MACH_SEND_MSG,
+ sizeof(struct message), 0,
+ MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
+ MACH_PORT_NULL);
+
+ if (mr != MACH_MSG_SUCCESS) {
+printf("profile_thread: mach_msg failed returned %x\n",(int)mr);
+ }
+
+ if (buf_entry->wakeme)
+ thread_wakeup((event_t) &buf_entry->wakeme);
+ kmem_free(kernel_map, (buf_to_send_t) buf_entry,
+ sizeof(struct buf_to_send));
+
+ }
+
+ }
+ /* The profile thread has been signalled to exit. There may still
+ be sample data queued for us, which we must now throw away.
+ Once we set profile_thread_id to null, hardclock() will stop
+ queueing any additional samples, so we do not need to alter
+ the interrupt level. */
+ profile_thread_id = THREAD_NULL;
+ while (1) {
+ mpdequeue_head(&prof_queue, &prof_queue_entry);
+ if ((buf_entry = (buf_to_send_t) prof_queue_entry) == NULLBTS)
+ break;
+ if (buf_entry->wakeme)
+ thread_wakeup((event_t) &buf_entry->wakeme);
+ kmem_free(kernel_map, (buf_to_send_t) buf_entry,
+ sizeof(struct buf_to_send));
+ }
+
+ thread_halt_self(thread_exception_return);
+}
+
+
+
+#include <mach/message.h>
+
+void
+send_last_sample_buf(thread_t th)
+{
+ spl_t s;
+ buf_to_send_t buf_entry;
+ vm_offset_t vm_buf_entry;
+
+ if (th->profil_buffer == NULLPBUF)
+ return;
+
+ /* Ask for the sending of the last PC buffer.
+ * Make a request to the profile_thread by inserting
+ * the buffer in the send queue, and wake it up.
+ * The last buffer must be inserted at the head of the
+ * send queue, so the profile_thread handles it immediately.
+ */
+ if (kmem_alloc( kernel_map, &vm_buf_entry,
+ sizeof(struct buf_to_send)) != KERN_SUCCESS)
+ return;
+ buf_entry = (buf_to_send_t) vm_buf_entry;
+ buf_entry->thread = (int *) th;
+ buf_entry->number = th->profil_buffer->prof_index;
+
+ /* Watch out in case profile thread exits while we are about to
+ queue data for it. */
+ s = splsched();
+ if (profile_thread_id != THREAD_NULL) {
+ simple_lock_t lock;
+ buf_entry->wakeme = 1;
+ mpenqueue_tail( &prof_queue, &(buf_entry->list));
+ thread_wakeup((event_t) profile_thread);
+ assert_wait((event_t) &buf_entry->wakeme, TRUE);
+ splx(s);
+ thread_block(thread_no_continuation);
+ } else {
+ splx(s);
+ kmem_free(kernel_map, vm_buf_entry, sizeof(struct buf_to_send));
+ }
+}
+
+/*
+ * Profile current thread
+ */
+
+profile(pc) {
+
+ /* Find out which thread has been interrupted. */
+ thread_t it_thread = current_thread();
+ int inout_val = pc;
+ buf_to_send_t buf_entry;
+ vm_offset_t vm_buf_entry;
+ int *val;
+ /*
+ * Test if the current thread is to be sampled
+ */
+ if (it_thread->thread_profiled) {
+ /* Inserts the PC value in the buffer of the thread */
+ set_pbuf_value(it_thread->profil_buffer, &inout_val);
+ switch(inout_val) {
+ case 0:
+ if (profile_thread_id == THREAD_NULL) {
+ reset_pbuf_area(it_thread->profil_buffer);
+ } else printf("ERROR : hardclock : full buffer unsent\n");
+ break;
+ case 1:
+ /* Normal case, value successfully inserted */
+ break;
+ case 2 :
+ /*
+ * The value we have just inserted caused the
+ * buffer to be full, and ready to be sent.
+ * If profile_thread_id is null, the profile
+ * thread has been killed. Since this generally
+ * happens only when the O/S server task of which
+ * it is a part is killed, it is not a great loss
+ * to throw away the data.
+ */
+ if (profile_thread_id == THREAD_NULL ||
+ kmem_alloc(kernel_map,
+ &vm_buf_entry ,
+ sizeof(struct buf_to_send)) !=
+ KERN_SUCCESS) {
+ reset_pbuf_area(it_thread->profil_buffer);
+ break;
+ }
+ buf_entry = (buf_to_send_t) vm_buf_entry;
+ buf_entry->thread = (int *)it_thread;
+ buf_entry->number =
+ (it_thread->profil_buffer)->prof_index;
+ mpenqueue_tail(&prof_queue, &(buf_entry->list));
+
+ /* Switch to another buffer */
+ reset_pbuf_area(it_thread->profil_buffer);
+
+ /* Wake up the profile thread */
+ if (profile_thread_id != THREAD_NULL)
+ thread_wakeup((event_t) profile_thread);
+ break;
+
+ default:
+ printf("ERROR: profile : unexpected case\n");
+ }
+ }
+}
+
+
+/* The task parameter in this and the subsequent routine is needed for
+ MiG, even though it is not used in the function itself. */
+
+kern_return_t
+mach_sample_thread (ipc_space_t task,
+ ipc_object_t reply,
+ thread_t cur_thread)
+{
+/*
+ * This routine is called every time that a new thread has made
+ * a request for the sampling service. We must keep track of the
+ * correspondance between it's identity (cur_thread) and the port
+ * we are going to use as a reply port to send out the samples resulting
+ * from its execution.
+ */
+ prof_data_t pbuf;
+ vm_offset_t vmpbuf;
+
+ if (reply != MACH_PORT_NULL) {
+ if (cur_thread->thread_profiled && cur_thread->thread_profiled_own) {
+ if (reply == cur_thread->profil_buffer->prof_port)
+ return KERN_SUCCESS;
+ mach_sample_thread(MACH_PORT_NULL, cur_thread);
+ }
+ /* Start profiling this thread , do the initialization. */
+ alloc_pbuf_area(pbuf, vmpbuf);
+ if ((cur_thread->profil_buffer = pbuf) == NULLPBUF) {
+printf("ERROR:mach_sample_thread:cannot allocate pbuf\n");
+ return KERN_RESOURCE_SHORTAGE;
+ } else {
+ if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
+printf("ERROR:mach_sample_thread:cannot set pbuf_nb\n");
+ return KERN_FAILURE;
+ }
+ reset_pbuf_area(pbuf);
+ }
+
+ pbuf->prof_port = reply;
+ cur_thread->thread_profiled = TRUE;
+ cur_thread->thread_profiled_own = TRUE;
+ if (profile_thread_id == THREAD_NULL)
+ profile_thread_id = kernel_thread(current_task(), profile_thread);
+ } else {
+ if (!cur_thread->thread_profiled_own)
+ cur_thread->thread_profiled = FALSE;
+ if (!cur_thread->thread_profiled)
+ return KERN_SUCCESS;
+
+ send_last_sample_buf(cur_thread);
+
+ /* Stop profiling this thread, do the cleanup. */
+
+ cur_thread->thread_profiled_own = FALSE;
+ cur_thread->thread_profiled = FALSE;
+ dealloc_pbuf_area(cur_thread->profil_buffer);
+ cur_thread->profil_buffer = NULLPBUF;
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+mach_sample_task (ipc_space_t task, ipc_object_t reply, task_t cur_task)
+{
+ prof_data_t pbuf=cur_task->profil_buffer;
+ vm_offset_t vmpbuf;
+ int turnon = (reply != MACH_PORT_NULL);
+
+ if (turnon) {
+ if (cur_task->task_profiled) {
+ if (cur_task->profil_buffer->prof_port == reply)
+ return KERN_SUCCESS;
+ (void) mach_sample_task(task, MACH_PORT_NULL, cur_task);
+ }
+ if (pbuf == NULLPBUF) {
+ alloc_pbuf_area(pbuf, vmpbuf);
+ if (pbuf == NULLPBUF) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ cur_task->profil_buffer = pbuf;
+ }
+ if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
+ return KERN_FAILURE;
+ }
+ reset_pbuf_area(pbuf);
+ pbuf->prof_port = reply;
+ }
+
+ if (turnon != cur_task->task_profiled) {
+ int actual,i,sentone;
+ thread_t thread;
+
+ if (turnon && profile_thread_id == THREAD_NULL)
+ profile_thread_id =
+ kernel_thread(current_task(), profile_thread);
+ cur_task->task_profiled = turnon;
+ actual = cur_task->thread_count;
+ sentone = 0;
+ for (i=0, thread=(thread_t) queue_first(&cur_task->thread_list);
+ i < actual;
+ i++, thread=(thread_t) queue_next(&thread->thread_list)) {
+ if (!thread->thread_profiled_own) {
+ thread->thread_profiled = turnon;
+ if (turnon)
+ thread->profil_buffer = cur_task->profil_buffer;
+ else if (!sentone) {
+ send_last_sample_buf(thread);
+ sentone = 1;
+ }
+ }
+ }
+ if (!turnon) {
+ dealloc_pbuf_area(pbuf);
+ cur_task->profil_buffer = NULLPBUF;
+ }
+ }
+
+ return KERN_SUCCESS;
+}
+
+#endif /* 0 */
diff --git a/kern/queue.c b/kern/queue.c
new file mode 100644
index 0000000..f532620
--- /dev/null
+++ b/kern/queue.c
@@ -0,0 +1,121 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Routines to implement queue package.
+ */
+
+#include <kern/queue.h>
+
+
+
+/*
+ * Insert element at head of queue.
+ */
+void enqueue_head(
+ queue_t que,
+ queue_entry_t elt)
+{
+ elt->next = que->next;
+ elt->prev = que;
+ elt->next->prev = elt;
+ que->next = elt;
+}
+
+/*
+ * Insert element at tail of queue.
+ */
+void enqueue_tail(
+ queue_t que,
+ queue_entry_t elt)
+{
+ elt->next = que;
+ elt->prev = que->prev;
+ elt->prev->next = elt;
+ que->prev = elt;
+}
+
+/*
+ * Remove and return element at head of queue.
+ */
+queue_entry_t dequeue_head(
+ queue_t que)
+{
+ queue_entry_t elt;
+
+ if (que->next == que)
+ return((queue_entry_t)0);
+
+ elt = que->next;
+ elt->next->prev = que;
+ que->next = elt->next;
+ return(elt);
+}
+
+/*
+ * Remove and return element at tail of queue.
+ */
+queue_entry_t dequeue_tail(
+ queue_t que)
+{
+ queue_entry_t elt;
+
+ if (que->prev == que)
+ return((queue_entry_t)0);
+
+ elt = que->prev;
+ elt->prev->next = que;
+ que->prev = elt->prev;
+ return(elt);
+}
+
+/*
+ * Remove arbitrary element from queue.
+ * Does not check whether element is on queue - the world
+ * will go haywire if it isn't.
+ */
+
+/*ARGSUSED*/
+void remqueue(
+ queue_t que,
+ queue_entry_t elt)
+{
+ elt->next->prev = elt->prev;
+ elt->prev->next = elt->next;
+}
+
+/*
+ * Routines to directly imitate the VAX hardware queue
+ * package.
+ */
+void insque(
+ struct queue_entry *entry,
+ struct queue_entry *pred)
+{
+ entry->next = pred->next;
+ entry->prev = pred;
+ (pred->next)->prev = entry;
+ pred->next = entry;
+}
diff --git a/kern/queue.h b/kern/queue.h
new file mode 100644
index 0000000..f0b4002
--- /dev/null
+++ b/kern/queue.h
@@ -0,0 +1,391 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon rights
+ * to redistribute these changes.
+ */
+/*
+ * File: queue.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Type definitions for generic queues.
+ *
+ */
+
+#ifndef _KERN_QUEUE_H_
+#define _KERN_QUEUE_H_
+
+#include <kern/lock.h>
+
+/*
+ * Queue of abstract objects. Queue is maintained
+ * within that object.
+ *
+ * Supports fast removal from within the queue.
+ *
+ * How to declare a queue of elements of type "foo_t":
+ * In the "*foo_t" type, you must have a field of
+ * type "queue_chain_t" to hold together this queue.
+ * There may be more than one chain through a
+ * "foo_t", for use by different queues.
+ *
+ * Declare the queue as a "queue_t" type.
+ *
+ * Elements of the queue (of type "foo_t", that is)
+ * are referred to by reference, and cast to type
+ * "queue_entry_t" within this module.
+ */
+
+/*
+ * A generic doubly-linked list (queue).
+ */
+
+struct queue_entry {
+ struct queue_entry *next; /* next element */
+ struct queue_entry *prev; /* previous element */
+};
+
+typedef struct queue_entry *queue_t;
+typedef struct queue_entry queue_head_t;
+typedef struct queue_entry queue_chain_t;
+typedef struct queue_entry *queue_entry_t;
+
+/*
+ * enqueue puts "elt" on the "queue".
+ * dequeue returns the first element in the "queue".
+ * remqueue removes the specified "elt" from the specified "queue".
+ */
+
+#define enqueue(queue,elt) enqueue_tail(queue, elt)
+#define dequeue(queue) dequeue_head(queue)
+
+void enqueue_head(queue_t, queue_entry_t);
+void enqueue_tail(queue_t, queue_entry_t);
+queue_entry_t dequeue_head(queue_t);
+queue_entry_t dequeue_tail(queue_t);
+void remqueue(queue_t, queue_entry_t);
+void insque(queue_entry_t, queue_entry_t);
+
+/*
+ * Macro: queue_assert
+ * Function:
+ * Used by macros to assert that the given argument is a
+ * queue.
+ */
+#define queue_assert(q) (void) ((void) (q)->next, (q)->prev)
+
+/*
+ * Macro: queue_init
+ * Function:
+ * Initialize the given queue.
+ * Header:
+ * void queue_init(q)
+ * queue_t q; *MODIFIED*
+ */
+#define queue_init(q) ((q)->next = (q)->prev = q)
+
+/*
+ * Macro: queue_first
+ * Function:
+ * Returns the first entry in the queue,
+ * Header:
+ * queue_entry_t queue_first(q)
+ * queue_t q; *IN*
+ */
+#define queue_first(q) (queue_assert(q), (q)->next)
+
+/*
+ * Macro: queue_next
+ * Function:
+ * Returns the entry after an item in the queue.
+ * Header:
+ * queue_entry_t queue_next(qc)
+ * queue_t qc;
+ */
+#define queue_next(qc) (queue_assert(qc), (qc)->next)
+
+/*
+ * Macro: queue_last
+ * Function:
+ * Returns the last entry in the queue.
+ * Header:
+ * queue_entry_t queue_last(q)
+ * queue_t q; *IN*
+ */
+#define queue_last(q) (queue_assert(q), (q)->prev)
+
+/*
+ * Macro: queue_prev
+ * Function:
+ * Returns the entry before an item in the queue.
+ * Header:
+ * queue_entry_t queue_prev(qc)
+ * queue_t qc;
+ */
+#define queue_prev(qc) (queue_assert(qc), (qc)->prev)
+
+/*
+ * Macro: queue_end
+ * Function:
+ * Tests whether a new entry is really the end of
+ * the queue.
+ * Header:
+ * boolean_t queue_end(q, qe)
+ * queue_t q;
+ * queue_entry_t qe;
+ */
+#define queue_end(q, qe) (queue_assert(q), queue_assert(qe), \
+ (q) == (qe))
+
+/*
+ * Macro: queue_empty
+ * Function:
+ * Tests whether a queue is empty.
+ * Header:
+ * boolean_t queue_empty(q)
+ * queue_t q;
+ */
+#define queue_empty(q) queue_end((q), queue_first(q))
+
+
+/*----------------------------------------------------------------*/
+/*
+ * Macros that operate on generic structures. The queue
+ * chain may be at any location within the structure, and there
+ * may be more than one chain.
+ */
+
+/*
+ * Macro: queue_enter
+ * Function:
+ * Insert a new element at the tail of the queue.
+ * Header:
+ * void queue_enter(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ */
+#define queue_enter(head, elt, type, field) \
+{ \
+ queue_assert(head); \
+ queue_assert(&(elt)->field); \
+ queue_entry_t prev; \
+ \
+ prev = (head)->prev; \
+ if ((head) == prev) { \
+ (head)->next = (queue_entry_t) (elt); \
+ } \
+ else { \
+ ((type)prev)->field.next = (queue_entry_t)(elt);\
+ } \
+ (elt)->field.prev = prev; \
+ (elt)->field.next = head; \
+ (head)->prev = (queue_entry_t) elt; \
+}
+
+/*
+ * Macro: queue_enter_first
+ * Function:
+ * Insert a new element at the head of the queue.
+ * Header:
+ * void queue_enter_first(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ */
+#define queue_enter_first(head, elt, type, field) \
+{ \
+ queue_assert(head); \
+ queue_assert(&(elt)->field); \
+ queue_entry_t next; \
+ \
+ next = (head)->next; \
+ if ((head) == next) { \
+ (head)->prev = (queue_entry_t) (elt); \
+ } \
+ else { \
+ ((type)next)->field.prev = (queue_entry_t)(elt);\
+ } \
+ (elt)->field.next = next; \
+ (elt)->field.prev = head; \
+ (head)->next = (queue_entry_t) elt; \
+}
+
+/*
+ * Macro: queue_field [internal use only]
+ * Function:
+ * Find the queue_chain_t (or queue_t) for the
+ * given element (thing) in the given queue (head)
+ */
+#define queue_field(head, thing, type, field) \
+ (((head) == (thing)) ? (head) : &((type)(thing))->field)
+
+/*
+ * Macro: queue_remove
+ * Function:
+ * Remove an arbitrary item from the queue.
+ * Header:
+ * void queue_remove(q, qe, type, field)
+ * arguments as in queue_enter
+ */
+#define queue_remove(head, elt, type, field) \
+{ \
+ queue_assert(head); \
+ queue_assert(&(elt)->field); \
+ queue_entry_t next, prev; \
+ \
+ next = (elt)->field.next; \
+ prev = (elt)->field.prev; \
+ \
+ if ((head) == next) \
+ (head)->prev = prev; \
+ else \
+ ((type)next)->field.prev = prev; \
+ \
+ if ((head) == prev) \
+ (head)->next = next; \
+ else \
+ ((type)prev)->field.next = next; \
+}
+
+/*
+ * Macro: queue_remove_first
+ * Function:
+ * Remove and return the entry at the head of
+ * the queue.
+ * Header:
+ * queue_remove_first(head, entry, type, field)
+ * entry is returned by reference
+ */
+#define queue_remove_first(head, entry, type, field) \
+{ \
+ queue_assert(head); \
+ queue_assert(&(entry)->field); \
+ queue_entry_t next; \
+ \
+ (entry) = (type) ((head)->next); \
+ next = (entry)->field.next; \
+ \
+ if ((head) == next) \
+ (head)->prev = (head); \
+ else \
+ ((type)(next))->field.prev = (head); \
+ (head)->next = next; \
+}
+
+/*
+ * Macro: queue_remove_last
+ * Function:
+ * Remove and return the entry at the tail of
+ * the queue.
+ * Header:
+ * queue_remove_last(head, entry, type, field)
+ * entry is returned by reference
+ */
+#define queue_remove_last(head, entry, type, field) \
+{ \
+ queue_assert(head); \
+ queue_assert(&(entry)->field); \
+ queue_entry_t prev; \
+ \
+ (entry) = (type) ((head)->prev); \
+ prev = (entry)->field.prev; \
+ \
+ if ((head) == prev) \
+ (head)->next = (head); \
+ else \
+ ((type)(prev))->field.next = (head); \
+ (head)->prev = prev; \
+}
+
+/*
+ * Macro: queue_assign
+ */
+#define queue_assign(to, from, type, field) \
+{ \
+ queue_assert(&(to)->field); \
+ queue_assert(&(from)->field); \
+ ((type)((from)->prev))->field.next = (to); \
+ ((type)((from)->next))->field.prev = (to); \
+ *to = *from; \
+}
+
+/*
+ * Macro: queue_iterate
+ * Function:
+ * iterate over each item in the queue.
+ * Generates a 'for' loop, setting elt to
+ * each item in turn (by reference).
+ * Header:
+ * queue_iterate(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ */
+#define queue_iterate(head, elt, type, field) \
+ for ((elt) = (type) queue_first(head); \
+ !queue_end((head), (queue_entry_t)(elt)); \
+ (elt) = (type) queue_next(&(elt)->field))
+
+
+
+/*----------------------------------------------------------------*/
+/*
+ * Define macros for queues with locks.
+ */
+struct mpqueue_head {
+ struct queue_entry head; /* header for queue */
+ struct slock lock; /* lock for queue */
+};
+
+typedef struct mpqueue_head mpqueue_head_t;
+
+#define round_mpq(size) (size)
+
+#define mpqueue_init(q) \
+ { \
+ queue_init(&(q)->head); \
+ simple_lock_init(&(q)->lock); \
+ }
+
+#define mpenqueue_tail(q, elt) \
+ simple_lock(&(q)->lock); \
+ enqueue_tail(&(q)->head, elt); \
+ simple_unlock(&(q)->lock);
+
+#define mpdequeue_head(q, elt) \
+ simple_lock(&(q)->lock); \
+ if (queue_empty(&(q)->head)) \
+ *(elt) = 0; \
+ else \
+ *(elt) = dequeue_head(&(q)->head); \
+ simple_unlock(&(q)->lock);
+
+/*
+ * Old queue stuff, will go away soon.
+ */
+
+#endif /* _KERN_QUEUE_H_ */
diff --git a/kern/rbtree.c b/kern/rbtree.c
new file mode 100644
index 0000000..0f5eb9a
--- /dev/null
+++ b/kern/rbtree.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2010, 2012 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kern/assert.h>
+#include <kern/rbtree.h>
+#include <kern/rbtree_i.h>
+#include <sys/types.h>
+
+#define unlikely(expr) __builtin_expect(!!(expr), 0)
+
+/*
+ * Return the index of a node in the children array of its parent.
+ *
+ * The parent parameter must not be null, and must be the parent of the
+ * given node.
+ */
+static inline int rbtree_index(const struct rbtree_node *node,
+ const struct rbtree_node *parent)
+{
+ assert(parent != NULL);
+ assert((node == NULL) || (rbtree_parent(node) == parent));
+
+ if (parent->children[RBTREE_LEFT] == node)
+ return RBTREE_LEFT;
+
+ assert(parent->children[RBTREE_RIGHT] == node);
+
+ return RBTREE_RIGHT;
+}
+
+/*
+ * Return the color of a node.
+ */
+static inline int rbtree_color(const struct rbtree_node *node)
+{
+ return node->parent & RBTREE_COLOR_MASK;
+}
+
+/*
+ * Return true if the node is red.
+ */
+static inline int rbtree_is_red(const struct rbtree_node *node)
+{
+ return rbtree_color(node) == RBTREE_COLOR_RED;
+}
+
+/*
+ * Return true if the node is black.
+ */
+static inline int rbtree_is_black(const struct rbtree_node *node)
+{
+ return rbtree_color(node) == RBTREE_COLOR_BLACK;
+}
+
+/*
+ * Set the parent of a node, retaining its current color.
+ */
+static inline void rbtree_set_parent(struct rbtree_node *node,
+ struct rbtree_node *parent)
+{
+ assert(rbtree_check_alignment(node));
+ assert(rbtree_check_alignment(parent));
+
+ node->parent = (unsigned long)parent | (node->parent & RBTREE_COLOR_MASK);
+}
+
+/*
+ * Set the color of a node, retaining its current parent.
+ */
+static inline void rbtree_set_color(struct rbtree_node *node, int color)
+{
+ assert((color & ~RBTREE_COLOR_MASK) == 0);
+ node->parent = (node->parent & RBTREE_PARENT_MASK) | color;
+}
+
+/*
+ * Set the color of a node to red, retaining its current parent.
+ */
+static inline void rbtree_set_red(struct rbtree_node *node)
+{
+ rbtree_set_color(node, RBTREE_COLOR_RED);
+}
+
+/*
+ * Set the color of a node to black, retaining its current parent.
+ */
+static inline void rbtree_set_black(struct rbtree_node *node)
+{
+ rbtree_set_color(node, RBTREE_COLOR_BLACK);
+}
+
+/*
+ * Perform a tree rotation, rooted at the given node.
+ *
+ * The direction parameter defines the rotation direction and is either
+ * RBTREE_LEFT or RBTREE_RIGHT.
+ */
+static void rbtree_rotate(struct rbtree *tree, struct rbtree_node *node,
+ int direction)
+{
+ struct rbtree_node *parent, *rnode;
+ int left, right;
+
+ left = direction;
+ right = 1 - left;
+ parent = rbtree_parent(node);
+ rnode = node->children[right];
+
+ node->children[right] = rnode->children[left];
+
+ if (rnode->children[left] != NULL)
+ rbtree_set_parent(rnode->children[left], node);
+
+ rnode->children[left] = node;
+ rbtree_set_parent(rnode, parent);
+
+ if (unlikely(parent == NULL))
+ tree->root = rnode;
+ else
+ parent->children[rbtree_index(node, parent)] = rnode;
+
+ rbtree_set_parent(node, rnode);
+}
+
+void rbtree_insert_rebalance(struct rbtree *tree, struct rbtree_node *parent,
+ int index, struct rbtree_node *node)
+{
+ struct rbtree_node *grand_parent, *uncle, *tmp;
+ int left, right;
+
+ assert(rbtree_check_alignment(parent));
+ assert(rbtree_check_alignment(node));
+
+ node->parent = (unsigned long)parent | RBTREE_COLOR_RED;
+ node->children[RBTREE_LEFT] = NULL;
+ node->children[RBTREE_RIGHT] = NULL;
+
+ if (unlikely(parent == NULL))
+ tree->root = node;
+ else
+ parent->children[index] = node;
+
+ for (;;) {
+ if (parent == NULL) {
+ rbtree_set_black(node);
+ break;
+ }
+
+ if (rbtree_is_black(parent))
+ break;
+
+ grand_parent = rbtree_parent(parent);
+ assert(grand_parent != NULL);
+
+ left = rbtree_index(parent, grand_parent);
+ right = 1 - left;
+
+ uncle = grand_parent->children[right];
+
+ /*
+ * Uncle is red. Flip colors and repeat at grand parent.
+ */
+ if ((uncle != NULL) && rbtree_is_red(uncle)) {
+ rbtree_set_black(uncle);
+ rbtree_set_black(parent);
+ rbtree_set_red(grand_parent);
+ node = grand_parent;
+ parent = rbtree_parent(node);
+ continue;
+ }
+
+ /*
+ * Node is the right child of its parent. Rotate left at parent.
+ */
+ if (parent->children[right] == node) {
+ rbtree_rotate(tree, parent, left);
+ tmp = node;
+ node = parent;
+ parent = tmp;
+ }
+
+ /*
+ * Node is the left child of its parent. Handle colors, rotate right
+ * at grand parent, and leave.
+ */
+ rbtree_set_black(parent);
+ rbtree_set_red(grand_parent);
+ rbtree_rotate(tree, grand_parent, right);
+ break;
+ }
+
+ assert(rbtree_is_black(tree->root));
+}
+
+void rbtree_remove(struct rbtree *tree, struct rbtree_node *node)
+{
+ struct rbtree_node *child, *parent, *brother;
+ int color, left, right;
+
+ if (node->children[RBTREE_LEFT] == NULL)
+ child = node->children[RBTREE_RIGHT];
+ else if (node->children[RBTREE_RIGHT] == NULL)
+ child = node->children[RBTREE_LEFT];
+ else {
+ struct rbtree_node *successor;
+
+ /*
+ * Two-children case: replace the node with its successor.
+ */
+
+ successor = node->children[RBTREE_RIGHT];
+
+ while (successor->children[RBTREE_LEFT] != NULL)
+ successor = successor->children[RBTREE_LEFT];
+
+ color = rbtree_color(successor);
+ child = successor->children[RBTREE_RIGHT];
+ parent = rbtree_parent(node);
+
+ if (unlikely(parent == NULL))
+ tree->root = successor;
+ else
+ parent->children[rbtree_index(node, parent)] = successor;
+
+ parent = rbtree_parent(successor);
+
+ /*
+ * Set parent directly to keep the original color.
+ */
+ successor->parent = node->parent;
+ successor->children[RBTREE_LEFT] = node->children[RBTREE_LEFT];
+ rbtree_set_parent(successor->children[RBTREE_LEFT], successor);
+
+ if (node == parent)
+ parent = successor;
+ else {
+ successor->children[RBTREE_RIGHT] = node->children[RBTREE_RIGHT];
+ rbtree_set_parent(successor->children[RBTREE_RIGHT], successor);
+ parent->children[RBTREE_LEFT] = child;
+
+ if (child != NULL)
+ rbtree_set_parent(child, parent);
+ }
+
+ goto update_color;
+ }
+
+ /*
+ * Node has at most one child.
+ */
+
+ color = rbtree_color(node);
+ parent = rbtree_parent(node);
+
+ if (child != NULL)
+ rbtree_set_parent(child, parent);
+
+ if (unlikely(parent == NULL))
+ tree->root = child;
+ else
+ parent->children[rbtree_index(node, parent)] = child;
+
+ /*
+ * The node has been removed, update the colors. The child pointer can
+ * be null, in which case it is considered a black leaf.
+ */
+update_color:
+ if (color == RBTREE_COLOR_RED)
+ return;
+
+ for (;;) {
+ if ((child != NULL) && rbtree_is_red(child)) {
+ rbtree_set_black(child);
+ break;
+ }
+
+ if (parent == NULL)
+ break;
+
+ left = rbtree_index(child, parent);
+ right = 1 - left;
+
+ brother = parent->children[right];
+
+ /*
+ * Brother is red. Recolor and rotate left at parent so that brother
+ * becomes black.
+ */
+ if (rbtree_is_red(brother)) {
+ rbtree_set_black(brother);
+ rbtree_set_red(parent);
+ rbtree_rotate(tree, parent, left);
+ brother = parent->children[right];
+ }
+
+ /*
+ * Brother has no red child. Recolor and repeat at parent.
+ */
+ if (((brother->children[RBTREE_LEFT] == NULL)
+ || rbtree_is_black(brother->children[RBTREE_LEFT]))
+ && ((brother->children[RBTREE_RIGHT] == NULL)
+ || rbtree_is_black(brother->children[RBTREE_RIGHT]))) {
+ rbtree_set_red(brother);
+ child = parent;
+ parent = rbtree_parent(child);
+ continue;
+ }
+
+ /*
+ * Brother's right child is black. Recolor and rotate right at brother.
+ */
+ if ((brother->children[right] == NULL)
+ || rbtree_is_black(brother->children[right])) {
+ rbtree_set_black(brother->children[left]);
+ rbtree_set_red(brother);
+ rbtree_rotate(tree, brother, right);
+ brother = parent->children[right];
+ }
+
+ /*
+ * Brother's left child is black. Exchange parent and brother colors
+ * (we already know brother is black), set brother's right child black,
+ * rotate left at parent and leave.
+ */
+ rbtree_set_color(brother, rbtree_color(parent));
+ rbtree_set_black(parent);
+ rbtree_set_black(brother->children[right]);
+ rbtree_rotate(tree, parent, left);
+ break;
+ }
+
+ assert((tree->root == NULL) || rbtree_is_black(tree->root));
+}
+
+struct rbtree_node * rbtree_nearest(struct rbtree_node *parent, int index,
+ int direction)
+{
+ assert(rbtree_check_index(direction));
+
+ if (parent == NULL)
+ return NULL;
+
+ assert(rbtree_check_index(index));
+
+ if (index != direction)
+ return parent;
+
+ return rbtree_walk(parent, direction);
+}
+
+struct rbtree_node * rbtree_firstlast(const struct rbtree *tree, int direction)
+{
+ struct rbtree_node *prev, *cur;
+
+ assert(rbtree_check_index(direction));
+
+ prev = NULL;
+
+ for (cur = tree->root; cur != NULL; cur = cur->children[direction])
+ prev = cur;
+
+ return prev;
+}
+
+struct rbtree_node * rbtree_walk(struct rbtree_node *node, int direction)
+{
+ int left, right;
+
+ assert(rbtree_check_index(direction));
+
+ left = direction;
+ right = 1 - left;
+
+ if (node == NULL)
+ return NULL;
+
+ if (node->children[left] != NULL) {
+ node = node->children[left];
+
+ while (node->children[right] != NULL)
+ node = node->children[right];
+ } else {
+ struct rbtree_node *parent;
+ int index;
+
+ for (;;) {
+ parent = rbtree_parent(node);
+
+ if (parent == NULL)
+ return NULL;
+
+ index = rbtree_index(node, parent);
+ node = parent;
+
+ if (index == right)
+ break;
+ }
+ }
+
+ return node;
+}
+
+/*
+ * Return the left-most deepest child node of the given node.
+ */
+static struct rbtree_node * rbtree_find_deepest(struct rbtree_node *node)
+{
+ struct rbtree_node *parent;
+
+ assert(node != NULL);
+
+ for (;;) {
+ parent = node;
+ node = node->children[RBTREE_LEFT];
+
+ if (node == NULL) {
+ node = parent->children[RBTREE_RIGHT];
+
+ if (node == NULL)
+ return parent;
+ }
+ }
+}
+
+struct rbtree_node * rbtree_postwalk_deepest(const struct rbtree *tree)
+{
+ struct rbtree_node *node;
+
+ node = tree->root;
+
+ if (node == NULL)
+ return NULL;
+
+ return rbtree_find_deepest(node);
+}
+
+struct rbtree_node * rbtree_postwalk_unlink(struct rbtree_node *node)
+{
+ struct rbtree_node *parent;
+ int index;
+
+ if (node == NULL)
+ return NULL;
+
+ assert(node->children[RBTREE_LEFT] == NULL);
+ assert(node->children[RBTREE_RIGHT] == NULL);
+
+ parent = rbtree_parent(node);
+
+ if (parent == NULL)
+ return NULL;
+
+ index = rbtree_index(node, parent);
+ parent->children[index] = NULL;
+ node = parent->children[RBTREE_RIGHT];
+
+ if (node == NULL)
+ return parent;
+
+ return rbtree_find_deepest(node);
+}
diff --git a/kern/rbtree.h b/kern/rbtree.h
new file mode 100644
index 0000000..f885fe7
--- /dev/null
+++ b/kern/rbtree.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Red-black tree.
+ */
+
+#ifndef _KERN_RBTREE_H
+#define _KERN_RBTREE_H
+
+#include <stddef.h>
+#include <kern/assert.h>
+#include <kern/macros.h>
+#include <sys/types.h>
+
+/*
+ * Indexes of the left and right nodes in the children array of a node.
+ */
+#define RBTREE_LEFT 0
+#define RBTREE_RIGHT 1
+
+/*
+ * Red-black node.
+ */
+struct rbtree_node;
+
+/*
+ * Red-black tree.
+ */
+struct rbtree;
+
+/*
+ * Static tree initializer.
+ */
+#define RBTREE_INITIALIZER { NULL }
+
+#include "rbtree_i.h"
+
+/*
+ * Initialize a tree.
+ */
+static inline void rbtree_init(struct rbtree *tree)
+{
+ tree->root = NULL;
+}
+
+/*
+ * Initialize a node.
+ *
+ * A node is in no tree when its parent points to itself.
+ */
+static inline void rbtree_node_init(struct rbtree_node *node)
+{
+ assert(rbtree_check_alignment(node));
+
+ node->parent = (unsigned long)node | RBTREE_COLOR_RED;
+ node->children[RBTREE_LEFT] = NULL;
+ node->children[RBTREE_RIGHT] = NULL;
+}
+
+/*
+ * Return true if node is in no tree.
+ */
+static inline int rbtree_node_unlinked(const struct rbtree_node *node)
+{
+ return rbtree_parent(node) == node;
+}
+
+/*
+ * Macro that evaluates to the address of the structure containing the
+ * given node based on the given type and member.
+ */
+#define rbtree_entry(node, type, member) structof(node, type, member)
+
+/*
+ * Return true if tree is empty.
+ */
+static inline int rbtree_empty(const struct rbtree *tree)
+{
+ return tree->root == NULL;
+}
+
+/*
+ * Look up a node in a tree.
+ *
+ * Note that implementing the lookup algorithm as a macro gives two benefits:
+ * First, it avoids the overhead of a callback function. Next, the type of the
+ * cmp_fn parameter isn't rigid. The only guarantee offered by this
+ * implementation is that the key parameter is the first parameter given to
+ * cmp_fn. This way, users can pass only the value they need for comparison
+ * instead of e.g. allocating a full structure on the stack.
+ *
+ * See rbtree_insert().
+ */
+#define rbtree_lookup(tree, key, cmp_fn) \
+MACRO_BEGIN \
+ struct rbtree_node *___cur; \
+ int ___diff; \
+ \
+ ___cur = (tree)->root; \
+ \
+ while (___cur != NULL) { \
+ ___diff = cmp_fn(key, ___cur); \
+ \
+ if (___diff == 0) \
+ break; \
+ \
+ ___cur = ___cur->children[rbtree_d2i(___diff)]; \
+ } \
+ \
+ ___cur; \
+MACRO_END
+
+/*
+ * Look up a node or one of its nearest nodes in a tree.
+ *
+ * This macro essentially acts as rbtree_lookup() but if no entry matched
+ * the key, an additional step is performed to obtain the next or previous
+ * node, depending on the direction (left or right).
+ *
+ * The constraints that apply to the key parameter are the same as for
+ * rbtree_lookup().
+ */
+#define rbtree_lookup_nearest(tree, key, cmp_fn, dir) \
+MACRO_BEGIN \
+ struct rbtree_node *___cur, *___prev; \
+ int ___diff, ___index; \
+ \
+ ___prev = NULL; \
+ ___index = -1; \
+ ___cur = (tree)->root; \
+ \
+ while (___cur != NULL) { \
+ ___diff = cmp_fn(key, ___cur); \
+ \
+ if (___diff == 0) \
+ break; \
+ \
+ ___prev = ___cur; \
+ ___index = rbtree_d2i(___diff); \
+ ___cur = ___cur->children[___index]; \
+ } \
+ \
+ if (___cur == NULL) \
+ ___cur = rbtree_nearest(___prev, ___index, dir); \
+ \
+ ___cur; \
+MACRO_END
+
+/*
+ * Insert a node in a tree.
+ *
+ * This macro performs a standard lookup to obtain the insertion point of
+ * the given node in the tree (it is assumed that the inserted node never
+ * compares equal to any other entry in the tree) and links the node. It
+ * then checks red-black rules violations, and rebalances the tree if
+ * necessary.
+ *
+ * Unlike rbtree_lookup(), the cmp_fn parameter must compare two complete
+ * entries, so it is suggested to use two different comparison inline
+ * functions, such as myobj_cmp_lookup() and myobj_cmp_insert(). There is no
+ * guarantee about the order of the nodes given to the comparison function.
+ *
+ * See rbtree_lookup().
+ */
+#define rbtree_insert(tree, node, cmp_fn) \
+MACRO_BEGIN \
+ struct rbtree_node *___cur, *___prev; \
+ int ___diff, ___index; \
+ \
+ ___prev = NULL; \
+ ___index = -1; \
+ ___cur = (tree)->root; \
+ \
+ while (___cur != NULL) { \
+ ___diff = cmp_fn(node, ___cur); \
+ assert(___diff != 0); \
+ ___prev = ___cur; \
+ ___index = rbtree_d2i(___diff); \
+ ___cur = ___cur->children[___index]; \
+ } \
+ \
+ rbtree_insert_rebalance(tree, ___prev, ___index, node); \
+MACRO_END
+
+/*
+ * Look up a node/slot pair in a tree.
+ *
+ * This macro essentially acts as rbtree_lookup() but in addition to a node,
+ * it also returns a slot, which identifies an insertion point in the tree.
+ * If the returned node is null, the slot can be used by rbtree_insert_slot()
+ * to insert without the overhead of an additional lookup. The slot is a
+ * simple unsigned long integer.
+ *
+ * The constraints that apply to the key parameter are the same as for
+ * rbtree_lookup().
+ */
+#define rbtree_lookup_slot(tree, key, cmp_fn, slot) \
+MACRO_BEGIN \
+ struct rbtree_node *___cur, *___prev; \
+ int ___diff, ___index; \
+ \
+ ___prev = NULL; \
+ ___index = 0; \
+ ___cur = (tree)->root; \
+ \
+ while (___cur != NULL) { \
+ ___diff = cmp_fn(key, ___cur); \
+ \
+ if (___diff == 0) \
+ break; \
+ \
+ ___prev = ___cur; \
+ ___index = rbtree_d2i(___diff); \
+ ___cur = ___cur->children[___index]; \
+ } \
+ \
+ (slot) = rbtree_slot(___prev, ___index); \
+ ___cur; \
+MACRO_END
+
+/*
+ * Insert a node at an insertion point in a tree.
+ *
+ * This macro essentially acts as rbtree_insert() except that it doesn't
+ * obtain the insertion point with a standard lookup. The insertion point
+ * is obtained by calling rbtree_lookup_slot(). In addition, the new node
+ * must not compare equal to an existing node in the tree (i.e. the slot
+ * must denote a null node).
+ */
+static inline void
+rbtree_insert_slot(struct rbtree *tree, unsigned long slot,
+ struct rbtree_node *node)
+{
+ struct rbtree_node *parent;
+ int index;
+
+ parent = rbtree_slot_parent(slot);
+ index = rbtree_slot_index(slot);
+ rbtree_insert_rebalance(tree, parent, index, node);
+}
+
+/*
+ * Remove a node from a tree.
+ *
+ * After completion, the node is stale.
+ */
+void rbtree_remove(struct rbtree *tree, struct rbtree_node *node);
+
+/*
+ * Return the first node of a tree.
+ */
+#define rbtree_first(tree) rbtree_firstlast(tree, RBTREE_LEFT)
+
+/*
+ * Return the last node of a tree.
+ */
+#define rbtree_last(tree) rbtree_firstlast(tree, RBTREE_RIGHT)
+
+/*
+ * Return the node previous to the given node.
+ */
+#define rbtree_prev(node) rbtree_walk(node, RBTREE_LEFT)
+
+/*
+ * Return the node next to the given node.
+ */
+#define rbtree_next(node) rbtree_walk(node, RBTREE_RIGHT)
+
+/*
+ * Forge a loop to process all nodes of a tree, removing them when visited.
+ *
+ * This macro can only be used to destroy a tree, so that the resources used
+ * by the entries can be released by the user. It basically removes all nodes
+ * without doing any color checking.
+ *
+ * After completion, all nodes and the tree root member are stale.
+ */
+#define rbtree_for_each_remove(tree, node, tmp) \
+for (node = rbtree_postwalk_deepest(tree), \
+ tmp = rbtree_postwalk_unlink(node); \
+ node != NULL; \
+ node = tmp, tmp = rbtree_postwalk_unlink(node))
+
+#endif /* _KERN_RBTREE_H */
diff --git a/kern/rbtree_i.h b/kern/rbtree_i.h
new file mode 100644
index 0000000..69dfb9d
--- /dev/null
+++ b/kern/rbtree_i.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _KERN_RBTREE_I_H
+#define _KERN_RBTREE_I_H
+
+#include <kern/assert.h>
+
+/*
+ * Red-black node structure.
+ *
+ * To reduce the number of branches and the instruction cache footprint,
+ * the left and right child pointers are stored in an array, and the symmetry
+ * of most tree operations is exploited by using left/right variables when
+ * referring to children.
+ *
+ * In addition, this implementation assumes that all nodes are 4-byte aligned,
+ * so that the least significant bit of the parent member can be used to store
+ * the color of the node. This is true for all modern 32 and 64 bits
+ * architectures, as long as the nodes aren't embedded in structures with
+ * special alignment constraints such as member packing.
+ */
+struct rbtree_node {
+ unsigned long parent;
+ struct rbtree_node *children[2];
+};
+
+/*
+ * Red-black tree structure.
+ */
+struct rbtree {
+ struct rbtree_node *root;
+};
+
+/*
+ * Masks applied on the parent member of a node to obtain either the
+ * color or the parent address.
+ */
+#define RBTREE_COLOR_MASK 0x1UL
+#define RBTREE_PARENT_MASK (~0x3UL)
+
+/*
+ * Node colors.
+ */
+#define RBTREE_COLOR_RED 0
+#define RBTREE_COLOR_BLACK 1
+
+/*
+ * Masks applied on slots to obtain either the child index or the parent
+ * address.
+ */
+#define RBTREE_SLOT_INDEX_MASK 0x1UL
+#define RBTREE_SLOT_PARENT_MASK (~RBTREE_SLOT_INDEX_MASK)
+
+/*
+ * Return true if the given pointer is suitably aligned.
+ */
+static inline int rbtree_check_alignment(const struct rbtree_node *node)
+{
+ return ((unsigned long)node & (~RBTREE_PARENT_MASK)) == 0;
+}
+
+/*
+ * Return true if the given index is a valid child index.
+ */
+static inline int rbtree_check_index(int index)
+{
+ return index == (index & 1);
+}
+
+/*
+ * Convert the result of a comparison into an index in the children array
+ * (0 or 1).
+ *
+ * This function is mostly used when looking up a node.
+ */
+static inline int rbtree_d2i(int diff)
+{
+ return !(diff <= 0);
+}
+
+/*
+ * Return the parent of a node.
+ */
+static inline struct rbtree_node * rbtree_parent(const struct rbtree_node *node)
+{
+ return (struct rbtree_node *)(node->parent & RBTREE_PARENT_MASK);
+}
+
+/*
+ * Translate an insertion point into a slot.
+ */
+static inline unsigned long rbtree_slot(struct rbtree_node *parent, int index)
+{
+ assert(rbtree_check_alignment(parent));
+ assert(rbtree_check_index(index));
+ return (unsigned long)parent | index;
+}
+
+/*
+ * Extract the parent address from a slot.
+ */
+static inline struct rbtree_node * rbtree_slot_parent(unsigned long slot)
+{
+ return (struct rbtree_node *)(slot & RBTREE_SLOT_PARENT_MASK);
+}
+
+/*
+ * Extract the index from a slot.
+ */
+static inline int rbtree_slot_index(unsigned long slot)
+{
+ return slot & RBTREE_SLOT_INDEX_MASK;
+}
+
+/*
+ * Insert a node in a tree, rebalancing it if necessary.
+ *
+ * The index parameter is the index in the children array of the parent where
+ * the new node is to be inserted. It is ignored if the parent is null.
+ *
+ * This function is intended to be used by the rbtree_insert() macro only.
+ */
+void rbtree_insert_rebalance(struct rbtree *tree, struct rbtree_node *parent,
+ int index, struct rbtree_node *node);
+
+/*
+ * Return the previous or next node relative to a location in a tree.
+ *
+ * The parent and index parameters define the location, which can be empty.
+ * The direction parameter is either RBTREE_LEFT (to obtain the previous
+ * node) or RBTREE_RIGHT (to obtain the next one).
+ */
+struct rbtree_node * rbtree_nearest(struct rbtree_node *parent, int index,
+ int direction);
+
+/*
+ * Return the first or last node of a tree.
+ *
+ * The direction parameter is either RBTREE_LEFT (to obtain the first node)
+ * or RBTREE_RIGHT (to obtain the last one).
+ */
+struct rbtree_node * rbtree_firstlast(const struct rbtree *tree, int direction);
+
+/*
+ * Return the node next to, or previous to the given node.
+ *
+ * The direction parameter is either RBTREE_LEFT (to obtain the previous node)
+ * or RBTREE_RIGHT (to obtain the next one).
+ */
+struct rbtree_node * rbtree_walk(struct rbtree_node *node, int direction);
+
+/*
+ * Return the left-most deepest node of a tree, which is the starting point of
+ * the postorder traversal performed by rbtree_for_each_remove().
+ */
+struct rbtree_node * rbtree_postwalk_deepest(const struct rbtree *tree);
+
+/*
+ * Unlink a node from its tree and return the next (right) node in postorder.
+ */
+struct rbtree_node * rbtree_postwalk_unlink(struct rbtree_node *node);
+
+#endif /* _KERN_RBTREE_I_H */
diff --git a/kern/rdxtree.c b/kern/rdxtree.c
new file mode 100644
index 0000000..a23d6e7
--- /dev/null
+++ b/kern/rdxtree.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright (c) 2011-2015 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Upstream site with license notes :
+ * http://git.sceen.net/rbraun/librbraun.git/
+ */
+
+#include <kern/assert.h>
+#include <kern/slab.h>
+#include <mach/kern_return.h>
+#include <stddef.h>
+#include <string.h>
+
+#include "macros.h"
+#include "rdxtree.h"
+#include "rdxtree_i.h"
+
+/* XXX */
+#define CHAR_BIT 8U
+#define ERR_SUCCESS KERN_SUCCESS
+#define ERR_BUSY KERN_INVALID_ARGUMENT
+#define ERR_NOMEM KERN_RESOURCE_SHORTAGE
+
+/*
+ * Mask applied on an entry to obtain its address.
+ */
+#define RDXTREE_ENTRY_ADDR_MASK (~0x3UL)
+
+/*
+ * Global properties used to shape radix trees.
+ */
+#define RDXTREE_RADIX 6
+#define RDXTREE_RADIX_SIZE (1UL << RDXTREE_RADIX)
+#define RDXTREE_RADIX_MASK (RDXTREE_RADIX_SIZE - 1)
+
+#if RDXTREE_RADIX < 6
+typedef unsigned long rdxtree_bm_t;
+#define rdxtree_ffs(x) __builtin_ffsl(x)
+#elif RDXTREE_RADIX == 6 /* RDXTREE_RADIX < 6 */
+typedef unsigned long long rdxtree_bm_t;
+#define rdxtree_ffs(x) __builtin_ffsll(x)
+#else /* RDXTREE_RADIX < 6 */
+#error "radix too high"
+#endif /* RDXTREE_RADIX < 6 */
+
+/*
+ * Allocation bitmap size in bits.
+ */
+#define RDXTREE_BM_SIZE (sizeof(rdxtree_bm_t) * CHAR_BIT)
+
+/*
+ * Empty/full allocation bitmap words.
+ */
+#define RDXTREE_BM_EMPTY ((rdxtree_bm_t)0)
+#define RDXTREE_BM_FULL \
+ ((~(rdxtree_bm_t)0) >> (RDXTREE_BM_SIZE - RDXTREE_RADIX_SIZE))
+
+/*
+ * These macros can be replaced by actual functions in an environment
+ * that provides lockless synchronization such as RCU.
+ */
+#define llsync_assign_ptr(ptr, value) ((ptr) = (value))
+#define llsync_read_ptr(ptr) (ptr)
+
+/*
+ * Radix tree node.
+ *
+ * The height of a tree is the number of nodes to traverse until stored
+ * pointers are reached. A height of 0 means the entries of a node (or the
+ * tree root) directly point to stored pointers.
+ *
+ * The index is valid if and only if the parent isn't NULL.
+ *
+ * Concerning the allocation bitmap, a bit is set when the node it denotes,
+ * or one of its children, can be used to allocate an entry. Conversely, a bit
+ * is clear when the matching node and all of its children have no free entry.
+ *
+ * In order to support safe lockless lookups, in particular during a resize,
+ * each node includes the height of its subtree, which is invariant during
+ * the entire node lifetime. Since the tree height does vary, it can't be
+ * used to determine whether the tree root is a node or a stored pointer.
+ * This implementation assumes that all nodes and stored pointers are at least
+ * 4-byte aligned, and uses the least significant bit of entries to indicate
+ * the pointer type. This bit is set for internal nodes, and clear for stored
+ * pointers so that they can be accessed from slots without conversion.
+ */
+struct rdxtree_node {
+ struct rdxtree_node *parent;
+ unsigned int index;
+ unsigned int height;
+ unsigned int nr_entries;
+ rdxtree_bm_t alloc_bm;
+ void *entries[RDXTREE_RADIX_SIZE];
+};
+
+/*
+ * We allocate nodes using the slab allocator.
+ */
+static struct kmem_cache rdxtree_node_cache;
+
+void
+rdxtree_cache_init(void)
+{
+ kmem_cache_init(&rdxtree_node_cache, "rdxtree_node",
+ sizeof(struct rdxtree_node), 0, NULL, 0);
+}
+
+#ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES
+unsigned int rdxtree_fail_node_creation_threshold;
+unsigned int rdxtree_nr_node_creations;
+#endif /* RDXTREE_ENABLE_NODE_CREATION_FAILURES */
+
+static inline int
+rdxtree_check_alignment(const void *ptr)
+{
+ return ((unsigned long)ptr & ~RDXTREE_ENTRY_ADDR_MASK) == 0;
+}
+
+static inline void *
+rdxtree_entry_addr(void *entry)
+{
+ return (void *)((unsigned long)entry & RDXTREE_ENTRY_ADDR_MASK);
+}
+
+static inline int
+rdxtree_entry_is_node(const void *entry)
+{
+ return ((unsigned long)entry & 1) != 0;
+}
+
+static inline void *
+rdxtree_node_to_entry(struct rdxtree_node *node)
+{
+ return (void *)((unsigned long)node | 1);
+}
+
+static int
+rdxtree_node_create(struct rdxtree_node **nodep, unsigned int height)
+{
+ struct rdxtree_node *node;
+
+#ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES
+ if (rdxtree_fail_node_creation_threshold != 0) {
+ rdxtree_nr_node_creations++;
+
+ if (rdxtree_nr_node_creations == rdxtree_fail_node_creation_threshold)
+ return ERR_NOMEM;
+ }
+#endif /* RDXTREE_ENABLE_NODE_CREATION_FAILURES */
+
+ node = (struct rdxtree_node *) kmem_cache_alloc(&rdxtree_node_cache);
+
+ if (node == NULL)
+ return ERR_NOMEM;
+
+ assert(rdxtree_check_alignment(node));
+ node->parent = NULL;
+ node->height = height;
+ node->nr_entries = 0;
+ node->alloc_bm = RDXTREE_BM_FULL;
+ memset(node->entries, 0, sizeof(node->entries));
+ *nodep = node;
+ return 0;
+}
+
+static void
+rdxtree_node_schedule_destruction(struct rdxtree_node *node)
+{
+ /*
+ * This function is intended to use the appropriate interface to defer
+ * destruction until all read-side references are dropped in an
+ * environment that provides lockless synchronization.
+ *
+ * Otherwise, it simply "schedules" destruction immediately.
+ */
+ kmem_cache_free(&rdxtree_node_cache, (vm_offset_t) node);
+}
+
+static inline void
+rdxtree_node_link(struct rdxtree_node *node, struct rdxtree_node *parent,
+ unsigned int index)
+{
+ node->parent = parent;
+ node->index = index;
+}
+
+static inline void
+rdxtree_node_unlink(struct rdxtree_node *node)
+{
+ assert(node->parent != NULL);
+ node->parent = NULL;
+}
+
+static inline int
+rdxtree_node_full(struct rdxtree_node *node)
+{
+ return (node->nr_entries == ARRAY_SIZE(node->entries));
+}
+
+static inline int
+rdxtree_node_empty(struct rdxtree_node *node)
+{
+ return (node->nr_entries == 0);
+}
+
+static inline void
+rdxtree_node_insert(struct rdxtree_node *node, unsigned int index,
+ void *entry)
+{
+ assert(index < ARRAY_SIZE(node->entries));
+ assert(node->entries[index] == NULL);
+
+ node->nr_entries++;
+ llsync_assign_ptr(node->entries[index], entry);
+}
+
+static inline void
+rdxtree_node_insert_node(struct rdxtree_node *node, unsigned int index,
+ struct rdxtree_node *child)
+{
+ rdxtree_node_insert(node, index, rdxtree_node_to_entry(child));
+}
+
+static inline void
+rdxtree_node_remove(struct rdxtree_node *node, unsigned int index)
+{
+ assert(index < ARRAY_SIZE(node->entries));
+ assert(node->entries[index] != NULL);
+
+ node->nr_entries--;
+ llsync_assign_ptr(node->entries[index], NULL);
+}
+
+static inline void *
+rdxtree_node_find(struct rdxtree_node *node, unsigned int *indexp)
+{
+ unsigned int index;
+ void *ptr;
+
+ index = *indexp;
+
+ while (index < ARRAY_SIZE(node->entries)) {
+ ptr = rdxtree_entry_addr(llsync_read_ptr(node->entries[index]));
+
+ if (ptr != NULL) {
+ *indexp = index;
+ return ptr;
+ }
+
+ index++;
+ }
+
+ return NULL;
+}
+
+static inline void
+rdxtree_node_bm_set(struct rdxtree_node *node, unsigned int index)
+{
+ node->alloc_bm |= (rdxtree_bm_t)1 << index;
+}
+
+static inline void
+rdxtree_node_bm_clear(struct rdxtree_node *node, unsigned int index)
+{
+ node->alloc_bm &= ~((rdxtree_bm_t)1 << index);
+}
+
+static inline int
+rdxtree_node_bm_is_set(struct rdxtree_node *node, unsigned int index)
+{
+ return (node->alloc_bm & ((rdxtree_bm_t)1 << index));
+}
+
+static inline int
+rdxtree_node_bm_empty(struct rdxtree_node *node)
+{
+ return (node->alloc_bm == RDXTREE_BM_EMPTY);
+}
+
+static inline unsigned int
+rdxtree_node_bm_first(struct rdxtree_node *node)
+{
+ return rdxtree_ffs(node->alloc_bm) - 1;
+}
+
+static inline rdxtree_key_t
+rdxtree_max_key(unsigned int height)
+{
+ size_t shift;
+
+ shift = RDXTREE_RADIX * height;
+
+ if (likely(shift < (sizeof(rdxtree_key_t) * CHAR_BIT)))
+ return ((rdxtree_key_t)1 << shift) - 1;
+ else
+ return ~((rdxtree_key_t)0);
+}
+
+static void
+rdxtree_shrink(struct rdxtree *tree)
+{
+ struct rdxtree_node *node;
+ void *entry;
+
+ while (tree->height > 0) {
+ node = rdxtree_entry_addr(tree->root);
+
+ if (node->nr_entries != 1)
+ break;
+
+ entry = node->entries[0];
+
+ if (entry == NULL)
+ break;
+
+ tree->height--;
+
+ if (tree->height > 0)
+ rdxtree_node_unlink(rdxtree_entry_addr(entry));
+
+ llsync_assign_ptr(tree->root, entry);
+ rdxtree_node_schedule_destruction(node);
+ }
+}
+
+static int
+rdxtree_grow(struct rdxtree *tree, rdxtree_key_t key)
+{
+ struct rdxtree_node *root, *node;
+ unsigned int new_height;
+ int error;
+
+ new_height = tree->height + 1;
+
+ while (key > rdxtree_max_key(new_height))
+ new_height++;
+
+ if (tree->root == NULL) {
+ tree->height = new_height;
+ return ERR_SUCCESS;
+ }
+
+ root = rdxtree_entry_addr(tree->root);
+
+ do {
+ error = rdxtree_node_create(&node, tree->height);
+
+ if (error) {
+ rdxtree_shrink(tree);
+ return error;
+ }
+
+ if (tree->height == 0)
+ rdxtree_node_bm_clear(node, 0);
+ else {
+ rdxtree_node_link(root, node, 0);
+
+ if (rdxtree_node_bm_empty(root))
+ rdxtree_node_bm_clear(node, 0);
+ }
+
+ rdxtree_node_insert(node, 0, tree->root);
+ tree->height++;
+ llsync_assign_ptr(tree->root, rdxtree_node_to_entry(node));
+ root = node;
+ } while (new_height > tree->height);
+
+ return ERR_SUCCESS;
+}
+
+static void
+rdxtree_cleanup(struct rdxtree *tree, struct rdxtree_node *node)
+{
+ struct rdxtree_node *prev;
+
+ for (;;) {
+ if (likely(!rdxtree_node_empty(node))) {
+ if (unlikely(node->parent == NULL))
+ rdxtree_shrink(tree);
+
+ break;
+ }
+
+ if (node->parent == NULL) {
+ tree->height = 0;
+ llsync_assign_ptr(tree->root, NULL);
+ rdxtree_node_schedule_destruction(node);
+ break;
+ }
+
+ prev = node;
+ node = node->parent;
+ rdxtree_node_unlink(prev);
+ rdxtree_node_remove(node, prev->index);
+ rdxtree_node_schedule_destruction(prev);
+ }
+}
+
+static void
+rdxtree_insert_bm_clear(struct rdxtree_node *node, unsigned int index)
+{
+ for (;;) {
+ rdxtree_node_bm_clear(node, index);
+
+ if (!rdxtree_node_full(node) || (node->parent == NULL))
+ break;
+
+ index = node->index;
+ node = node->parent;
+ }
+}
+
+int
+rdxtree_insert_common(struct rdxtree *tree, rdxtree_key_t key,
+ void *ptr, void ***slotp)
+{
+ struct rdxtree_node *node, *prev;
+ unsigned int height, shift, index = index;
+ int error;
+
+ assert(ptr != NULL);
+ assert(rdxtree_check_alignment(ptr));
+
+ if (unlikely(key > rdxtree_max_key(tree->height))) {
+ error = rdxtree_grow(tree, key);
+
+ if (error)
+ return error;
+ }
+
+ height = tree->height;
+
+ if (unlikely(height == 0)) {
+ if (tree->root != NULL)
+ return ERR_BUSY;
+
+ llsync_assign_ptr(tree->root, ptr);
+
+ if (slotp != NULL)
+ *slotp = &tree->root;
+
+ return ERR_SUCCESS;
+ }
+
+ node = rdxtree_entry_addr(tree->root);
+ shift = (height - 1) * RDXTREE_RADIX;
+ prev = NULL;
+
+ do {
+ if (node == NULL) {
+ error = rdxtree_node_create(&node, height - 1);
+
+ if (error) {
+ if (prev == NULL)
+ tree->height = 0;
+ else
+ rdxtree_cleanup(tree, prev);
+
+ return error;
+ }
+
+ if (prev == NULL)
+ llsync_assign_ptr(tree->root, rdxtree_node_to_entry(node));
+ else {
+ rdxtree_node_link(node, prev, index);
+ rdxtree_node_insert_node(prev, index, node);
+ }
+ }
+
+ prev = node;
+ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK;
+ node = rdxtree_entry_addr(prev->entries[index]);
+ shift -= RDXTREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ if (unlikely(node != NULL))
+ return ERR_BUSY;
+
+ rdxtree_node_insert(prev, index, ptr);
+ rdxtree_insert_bm_clear(prev, index);
+
+ if (slotp != NULL)
+ *slotp = &prev->entries[index];
+
+ return ERR_SUCCESS;
+}
+
+int
+rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr,
+ rdxtree_key_t *keyp, void ***slotp)
+{
+ struct rdxtree_node *node, *prev;
+ unsigned int height, shift, index = index;
+ rdxtree_key_t key;
+ int error;
+
+ assert(ptr != NULL);
+ assert(rdxtree_check_alignment(ptr));
+
+ height = tree->height;
+
+ if (unlikely(height == 0)) {
+ if (tree->root == NULL) {
+ llsync_assign_ptr(tree->root, ptr);
+ *keyp = 0;
+
+ if (slotp != NULL)
+ *slotp = &tree->root;
+
+ return ERR_SUCCESS;
+ }
+
+ goto grow;
+ }
+
+ node = rdxtree_entry_addr(tree->root);
+ key = 0;
+ shift = (height - 1) * RDXTREE_RADIX;
+ prev = NULL;
+
+ do {
+ if (node == NULL) {
+ error = rdxtree_node_create(&node, height - 1);
+
+ if (error) {
+ rdxtree_cleanup(tree, prev);
+ return error;
+ }
+
+ rdxtree_node_link(node, prev, index);
+ rdxtree_node_insert_node(prev, index, node);
+ }
+
+ prev = node;
+ index = rdxtree_node_bm_first(node);
+
+ if (index == (unsigned int)-1)
+ goto grow;
+
+ key |= (rdxtree_key_t)index << shift;
+ node = rdxtree_entry_addr(node->entries[index]);
+ shift -= RDXTREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ rdxtree_node_insert(prev, index, ptr);
+ rdxtree_insert_bm_clear(prev, index);
+
+ if (slotp != NULL)
+ *slotp = &prev->entries[index];
+
+ goto out;
+
+grow:
+ key = rdxtree_max_key(height) + 1;
+ error = rdxtree_insert_common(tree, key, ptr, slotp);
+
+ if (error)
+ return error;
+
+out:
+ *keyp = key;
+ return ERR_SUCCESS;
+}
+
+static void
+rdxtree_remove_bm_set(struct rdxtree_node *node, unsigned int index)
+{
+ do {
+ rdxtree_node_bm_set(node, index);
+
+ if (node->parent == NULL)
+ break;
+
+ index = node->index;
+ node = node->parent;
+ } while (!rdxtree_node_bm_is_set(node, index));
+}
+
+void *
+rdxtree_remove(struct rdxtree *tree, rdxtree_key_t key)
+{
+ struct rdxtree_node *node, *prev;
+ unsigned int height, shift, index;
+
+ height = tree->height;
+
+ if (unlikely(key > rdxtree_max_key(height)))
+ return NULL;
+
+ node = rdxtree_entry_addr(tree->root);
+
+ if (unlikely(height == 0)) {
+ llsync_assign_ptr(tree->root, NULL);
+ return node;
+ }
+
+ shift = (height - 1) * RDXTREE_RADIX;
+
+ do {
+ if (node == NULL)
+ return NULL;
+
+ prev = node;
+ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK;
+ node = rdxtree_entry_addr(node->entries[index]);
+ shift -= RDXTREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ if (node == NULL)
+ return NULL;
+
+ rdxtree_node_remove(prev, index);
+ rdxtree_remove_bm_set(prev, index);
+ rdxtree_cleanup(tree, prev);
+ return node;
+}
+
+void *
+rdxtree_lookup_common(const struct rdxtree *tree, rdxtree_key_t key,
+ int get_slot)
+{
+ struct rdxtree_node *node, *prev;
+ unsigned int height, shift, index;
+ void *entry;
+
+ entry = llsync_read_ptr(tree->root);
+
+ if (entry == NULL) {
+ node = NULL;
+ height = 0;
+ } else {
+ node = rdxtree_entry_addr(entry);
+ height = rdxtree_entry_is_node(entry) ? node->height + 1 : 0;
+ }
+
+ if (key > rdxtree_max_key(height))
+ return NULL;
+
+ if (height == 0) {
+ if (node == NULL)
+ return NULL;
+
+ return get_slot ? (void *)&tree->root : node;
+ }
+
+ shift = (height - 1) * RDXTREE_RADIX;
+
+ do {
+ if (node == NULL)
+ return NULL;
+
+ prev = node;
+ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK;
+ entry = llsync_read_ptr(node->entries[index]);
+ node = rdxtree_entry_addr(entry);
+ shift -= RDXTREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ if (node == NULL)
+ return NULL;
+
+ return get_slot ? (void *)&prev->entries[index] : node;
+}
+
+void *
+rdxtree_replace_slot(void **slot, void *ptr)
+{
+ void *old;
+
+ assert(ptr != NULL);
+ assert(rdxtree_check_alignment(ptr));
+
+ old = *slot;
+ assert(old != NULL);
+ assert(rdxtree_check_alignment(old));
+ llsync_assign_ptr(*slot, ptr);
+ return old;
+}
+
+static void *
+rdxtree_walk_next(struct rdxtree *tree, struct rdxtree_iter *iter)
+{
+ struct rdxtree_node *root, *node, *prev;
+ unsigned int height, shift, index, orig_index;
+ rdxtree_key_t key;
+ void *entry;
+
+ entry = llsync_read_ptr(tree->root);
+
+ if (entry == NULL)
+ return NULL;
+
+ if (!rdxtree_entry_is_node(entry)) {
+ if (iter->key != (rdxtree_key_t)-1)
+ return NULL;
+ else {
+ iter->key = 0;
+ return rdxtree_entry_addr(entry);
+ }
+ }
+
+ key = iter->key + 1;
+
+ if ((key == 0) && (iter->node != NULL))
+ return NULL;
+
+ root = rdxtree_entry_addr(entry);
+
+restart:
+ node = root;
+ height = root->height + 1;
+
+ if (key > rdxtree_max_key(height))
+ return NULL;
+
+ shift = (height - 1) * RDXTREE_RADIX;
+
+ do {
+ prev = node;
+ index = (key >> shift) & RDXTREE_RADIX_MASK;
+ orig_index = index;
+ node = rdxtree_node_find(node, &index);
+
+ if (node == NULL) {
+ shift += RDXTREE_RADIX;
+ key = ((key >> shift) + 1) << shift;
+
+ if (key == 0)
+ return NULL;
+
+ goto restart;
+ }
+
+ if (orig_index != index)
+ key = ((key >> shift) + (index - orig_index)) << shift;
+
+ shift -= RDXTREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ iter->node = prev;
+ iter->key = key;
+ return node;
+}
+
+void *
+rdxtree_walk(struct rdxtree *tree, struct rdxtree_iter *iter)
+{
+ unsigned int index, orig_index;
+ void *ptr;
+
+ if (iter->node == NULL)
+ return rdxtree_walk_next(tree, iter);
+
+ index = (iter->key + 1) & RDXTREE_RADIX_MASK;
+
+ if (index != 0) {
+ orig_index = index;
+ ptr = rdxtree_node_find(iter->node, &index);
+
+ if (ptr != NULL) {
+ iter->key += (index - orig_index) + 1;
+ return ptr;
+ }
+ }
+
+ return rdxtree_walk_next(tree, iter);
+}
+
+void
+rdxtree_remove_all(struct rdxtree *tree)
+{
+ struct rdxtree_node *node, *parent;
+ struct rdxtree_iter iter;
+
+ if (tree->height == 0) {
+ if (tree->root != NULL)
+ llsync_assign_ptr(tree->root, NULL);
+
+ return;
+ }
+
+ for (;;) {
+ rdxtree_iter_init(&iter);
+ rdxtree_walk_next(tree, &iter);
+
+ if (iter.node == NULL)
+ break;
+
+ node = iter.node;
+ parent = node->parent;
+
+ if (parent == NULL)
+ rdxtree_init(tree);
+ else {
+ rdxtree_node_remove(parent, node->index);
+ rdxtree_remove_bm_set(parent, node->index);
+ rdxtree_cleanup(tree, parent);
+ node->parent = NULL;
+ }
+
+ rdxtree_node_schedule_destruction(node);
+ }
+}
diff --git a/kern/rdxtree.h b/kern/rdxtree.h
new file mode 100644
index 0000000..9892d56
--- /dev/null
+++ b/kern/rdxtree.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2011-2015 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Radix tree.
+ *
+ * In addition to the standard insertion operation, this implementation
+ * can allocate keys for the caller at insertion time.
+ *
+ * Upstream site with license notes :
+ * http://git.sceen.net/rbraun/librbraun.git/
+ */
+
+#ifndef _RDXTREE_H
+#define _RDXTREE_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*
+ * Initialize the node cache.
+ */
+void rdxtree_cache_init(void);
+
+/*
+ * This macro selects between 32 or 64-bits (the default) keys.
+ */
+#if 0
+#define RDXTREE_KEY_32
+#endif
+
+#ifdef RDXTREE_KEY_32
+typedef uint32_t rdxtree_key_t;
+#else /* RDXTREE_KEY_32 */
+typedef uint64_t rdxtree_key_t;
+#endif /* RDXTREE_KEY_32 */
+
+/*
+ * Radix tree.
+ */
+struct rdxtree;
+
+/*
+ * Radix tree iterator.
+ */
+struct rdxtree_iter;
+
+/*
+ * Static tree initializer.
+ */
+#define RDXTREE_INITIALIZER { 0, NULL }
+
+#include "rdxtree_i.h"
+
+/*
+ * Initialize a tree.
+ */
+static inline void
+rdxtree_init(struct rdxtree *tree)
+{
+ tree->height = 0;
+ tree->root = NULL;
+}
+
+/*
+ * Insert a pointer in a tree.
+ *
+ * The ptr parameter must not be NULL.
+ */
+static inline int
+rdxtree_insert(struct rdxtree *tree, rdxtree_key_t key, void *ptr)
+{
+ return rdxtree_insert_common(tree, key, ptr, NULL);
+}
+
+/*
+ * Insert a pointer in a tree and obtain its slot.
+ *
+ * The ptr and slotp parameters must not be NULL. If successful, the slot of
+ * the newly inserted pointer is stored at the address pointed to by the slotp
+ * parameter.
+ */
+static inline int
+rdxtree_insert_slot(struct rdxtree *tree, rdxtree_key_t key,
+ void *ptr, void ***slotp)
+{
+ return rdxtree_insert_common(tree, key, ptr, slotp);
+}
+
+/*
+ * Insert a pointer in a tree, for which a new key is allocated.
+ *
+ * The ptr and keyp parameters must not be NULL. The newly allocated key is
+ * stored at the address pointed to by the keyp parameter.
+ */
+static inline int
+rdxtree_insert_alloc(struct rdxtree *tree, void *ptr, rdxtree_key_t *keyp)
+{
+ return rdxtree_insert_alloc_common(tree, ptr, keyp, NULL);
+}
+
+/*
+ * Insert a pointer in a tree, for which a new key is allocated, and obtain
+ * its slot.
+ *
+ * The ptr, keyp and slotp parameters must not be NULL. The newly allocated
+ * key is stored at the address pointed to by the keyp parameter while the
+ * slot of the inserted pointer is stored at the address pointed to by the
+ * slotp parameter.
+ */
+static inline int
+rdxtree_insert_alloc_slot(struct rdxtree *tree, void *ptr,
+ rdxtree_key_t *keyp, void ***slotp)
+{
+ return rdxtree_insert_alloc_common(tree, ptr, keyp, slotp);
+}
+
+/*
+ * Remove a pointer from a tree.
+ *
+ * The matching pointer is returned if successful, NULL otherwise.
+ */
+void * rdxtree_remove(struct rdxtree *tree, rdxtree_key_t key);
+
+/*
+ * Look up a pointer in a tree.
+ *
+ * The matching pointer is returned if successful, NULL otherwise.
+ */
+static inline void *
+rdxtree_lookup(const struct rdxtree *tree, rdxtree_key_t key)
+{
+ return rdxtree_lookup_common(tree, key, 0);
+}
+
+/*
+ * Look up a slot in a tree.
+ *
+ * A slot is a pointer to a stored pointer in a tree. It can be used as
+ * a placeholder for fast replacements to avoid multiple lookups on the same
+ * key.
+ *
+ * A slot for the matching pointer is returned if successful, NULL otherwise.
+ *
+ * See rdxtree_replace_slot().
+ */
+static inline void **
+rdxtree_lookup_slot(const struct rdxtree *tree, rdxtree_key_t key)
+{
+ return rdxtree_lookup_common(tree, key, 1);
+}
+
+/*
+ * Replace a pointer in a tree.
+ *
+ * The ptr parameter must not be NULL. The previous pointer is returned.
+ *
+ * See rdxtree_lookup_slot().
+ */
+void * rdxtree_replace_slot(void **slot, void *ptr);
+
+/*
+ * Forge a loop to process all pointers of a tree.
+ */
+#define rdxtree_for_each(tree, iter, ptr) \
+for (rdxtree_iter_init(iter), ptr = rdxtree_walk(tree, iter); \
+ ptr != NULL; \
+ ptr = rdxtree_walk(tree, iter))
+
+/*
+ * Return the key of the current pointer from an iterator.
+ */
+static inline rdxtree_key_t
+rdxtree_iter_key(const struct rdxtree_iter *iter)
+{
+ return iter->key;
+}
+
+/*
+ * Remove all pointers from a tree.
+ *
+ * The common way to destroy a tree and its pointers is to loop over all
+ * the pointers using rdxtree_for_each(), freeing them, then call this
+ * function.
+ */
+void rdxtree_remove_all(struct rdxtree *tree);
+
+#endif /* _RDXTREE_H */
diff --git a/kern/rdxtree_i.h b/kern/rdxtree_i.h
new file mode 100644
index 0000000..d9a59bf
--- /dev/null
+++ b/kern/rdxtree_i.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2011-2015 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Upstream site with license notes :
+ * http://git.sceen.net/rbraun/librbraun.git/
+ */
+
+#ifndef _RDXTREE_I_H
+#define _RDXTREE_I_H
+
+/*
+ * Radix tree.
+ */
+struct rdxtree {
+ unsigned int height;
+ void *root;
+};
+
+/*
+ * Radix tree iterator.
+ *
+ * The node member refers to the node containing the current pointer, if any.
+ * The key member refers to the current pointer, and is valid if and only if
+ * rdxtree_walk() has been called at least once on the iterator.
+ */
+struct rdxtree_iter {
+ void *node;
+ rdxtree_key_t key;
+};
+
+/*
+ * Initialize an iterator.
+ */
+static inline void
+rdxtree_iter_init(struct rdxtree_iter *iter)
+{
+ iter->node = NULL;
+ iter->key = (rdxtree_key_t)-1;
+}
+
+int rdxtree_insert_common(struct rdxtree *tree, rdxtree_key_t key,
+ void *ptr, void ***slotp);
+
+int rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr,
+ rdxtree_key_t *keyp, void ***slotp);
+
+void * rdxtree_lookup_common(const struct rdxtree *tree, rdxtree_key_t key,
+ int get_slot);
+
+void * rdxtree_walk(struct rdxtree *tree, struct rdxtree_iter *iter);
+
+#endif /* _RDXTREE_I_H */
diff --git a/kern/refcount.h b/kern/refcount.h
new file mode 100644
index 0000000..f32feb8
--- /dev/null
+++ b/kern/refcount.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: refcount.h
+ *
+ * This defines the system-independent part of the atomic reference count data type.
+ *
+ */
+
+#ifndef _KERN_REFCOUNT_H_
+#define _KERN_REFCOUNT_H_
+
+#include <kern/macros.h>
+
+/* Unless the above include file specified otherwise,
+ use the system-independent (unoptimized) atomic reference counter. */
+#ifndef MACHINE_REFCOUNT
+
+#include <kern/lock.h>
+
+struct RefCount {
+ decl_simple_lock_data(,lock) /* lock for reference count */
+ int ref_count; /* number of references */
+};
+typedef struct RefCount RefCount;
+
+#define refcount_init(refcount, refs) \
+ MACRO_BEGIN \
+ simple_lock_init(&(refcount)->lock); \
+ ((refcount)->ref_count = (refs)); \
+ MACRO_END
+
+#define refcount_take(refcount) \
+ MACRO_BEGIN \
+ simple_lock(&(refcount)->lock); \
+ (refcount)->ref_count++; \
+ simple_unlock(&(refcount)->lock); \
+ MACRO_END
+
+#define refcount_drop(refcount, func) \
+ MACRO_BEGIN \
+ int new_value; \
+ simple_lock(&(refcount)->lock); \
+ new_value = --(refcount)->ref_count; \
+ simple_unlock(&(refcount)->lock); \
+ if (new_value == 0) { func; } \
+ MACRO_END
+
+#endif /* MACHINE_REFCOUNT */
+
+#endif /* _KERN_REFCOUNT_H_ */
diff --git a/kern/sched.h b/kern/sched.h
new file mode 100644
index 0000000..d7e74d3
--- /dev/null
+++ b/kern/sched.h
@@ -0,0 +1,186 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: sched.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Header file for scheduler.
+ *
+ */
+
+#ifndef _KERN_SCHED_H_
+#define _KERN_SCHED_H_
+
+#include <kern/queue.h>
+#include <kern/lock.h>
+#include <kern/kern_types.h>
+#include <kern/macros.h>
+
+#if MACH_FIXPRI
+#include <mach/policy.h>
+#endif /* MACH_FIXPRI */
+
+#if STAT_TIME
+
+/*
+ * Statistical timing uses microseconds as timer units. 17 bit shift
+ * yields priorities. PRI_SHIFT_2 isn't needed.
+ */
+#define PRI_SHIFT 17
+
+#else /* STAT_TIME */
+
+/*
+ * Otherwise machine provides shift(s) based on time units it uses.
+ */
+#include <machine/sched_param.h>
+
+#endif /* STAT_TIME */
+#define NRQS 64 /* 64 run queues per cpu */
+
+struct run_queue {
+ queue_head_t runq[NRQS]; /* one for each priority */
+ decl_simple_lock_data(, lock) /* one lock for all queues,
+ shall be taken at splsched
+ only */
+ int low; /* low queue value */
+ int count; /* count of threads runable */
+};
+
+typedef struct run_queue *run_queue_t;
+#define RUN_QUEUE_NULL ((run_queue_t) 0)
+
+/* Shall be taken at splsched only */
+#ifdef MACH_LDEBUG
+#define runq_lock(rq) do { \
+ assert_splsched(); \
+ simple_lock_nocheck(&(rq)->lock); \
+} while (0)
+#define runq_unlock(rq) do { \
+ assert_splsched(); \
+ simple_unlock_nocheck(&(rq)->lock); \
+} while (0)
+#else
+#define runq_lock(rq) simple_lock_nocheck(&(rq)->lock)
+#define runq_unlock(rq) simple_unlock_nocheck(&(rq)->lock)
+#endif
+
+#if MACH_FIXPRI
+/*
+ * NOTE: For fixed priority threads, first_quantum indicates
+ * whether context switch at same priority is ok. For timeshareing
+ * it indicates whether preempt is ok.
+ */
+
+#define csw_needed(thread, processor) ((thread)->state & TH_SUSP || \
+ ((processor)->runq.count > 0) || \
+ ((thread)->policy == POLICY_TIMESHARE && \
+ (processor)->first_quantum == FALSE && \
+ (processor)->processor_set->runq.count > 0 && \
+ (processor)->processor_set->runq.low <= \
+ (thread)->sched_pri) || \
+ ((thread)->policy == POLICY_FIXEDPRI && \
+ (processor)->processor_set->runq.count > 0 && \
+ ((((processor)->first_quantum == FALSE) && \
+ ((processor)->processor_set->runq.low <= \
+ (thread)->sched_pri)) || \
+ ((processor)->processor_set->runq.low < \
+ (thread)->sched_pri))))
+
+#else /* MACH_FIXPRI */
+#define csw_needed(thread, processor) ((thread)->state & TH_SUSP || \
+ ((processor)->runq.count > 0) || \
+ ((processor)->first_quantum == FALSE && \
+ ((processor)->processor_set->runq.count > 0 && \
+ (processor)->processor_set->runq.low <= \
+ ((thread)->sched_pri))))
+#endif /* MACH_FIXPRI */
+
+/*
+ * Scheduler routines.
+ */
+
+extern struct run_queue *rem_runq(thread_t);
+extern struct thread *choose_thread(processor_t);
+extern queue_head_t action_queue; /* assign/shutdown queue */
+decl_simple_lock_data(extern,action_lock);
+
+extern int min_quantum; /* defines max context switch rate */
+#define MIN_QUANTUM (hz / 33) /* context switch 33 times/second */
+
+/*
+ * Default base priorities for threads.
+ */
+#define BASEPRI_SYSTEM 6
+#define BASEPRI_USER 25
+
+/*
+ * Macro to check for invalid priorities.
+ */
+
+#define invalid_pri(pri) (((pri) < 0) || ((pri) >= NRQS))
+
+/*
+ * Shift structures for holding update shifts. Actual computation
+ * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
+ * +/- is determined by the sign of shift 2.
+ */
+struct shift {
+ int shift1;
+ int shift2;
+};
+
+typedef struct shift *shift_t, shift_data_t;
+
+/*
+ * sched_tick increments once a second. Used to age priorities.
+ */
+
+extern unsigned sched_tick;
+
+#define SCHED_SCALE 128
+#define SCHED_SHIFT 7
+
+/*
+ * thread_timer_delta macro takes care of both thread timers.
+ */
+
+#define thread_timer_delta(thread) \
+MACRO_BEGIN \
+ unsigned delta; \
+ \
+ delta = 0; \
+ TIMER_DELTA((thread)->system_timer, \
+ (thread)->system_timer_save, delta); \
+ TIMER_DELTA((thread)->user_timer, \
+ (thread)->user_timer_save, delta); \
+ (thread)->cpu_delta += delta; \
+ (thread)->sched_delta += delta * \
+ (thread)->processor_set->sched_load; \
+MACRO_END
+
+#endif /* _KERN_SCHED_H_ */
diff --git a/kern/sched_prim.c b/kern/sched_prim.c
new file mode 100644
index 0000000..24f342f
--- /dev/null
+++ b/kern/sched_prim.c
@@ -0,0 +1,2059 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: sched_prim.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Scheduling primitives
+ *
+ */
+
+#include <kern/printf.h>
+#include <mach/machine.h>
+#include <machine/locore.h>
+#include <machine/machspl.h> /* For def'n of splsched() */
+#include <machine/model_dep.h>
+#include <kern/ast.h>
+#include <kern/counters.h>
+#include <kern/cpu_number.h>
+#include <kern/debug.h>
+#include <kern/lock.h>
+#include <kern/mach_clock.h>
+#include <kern/mach_factor.h>
+#include <kern/macros.h>
+#include <kern/processor.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/smp.h>
+#include <kern/syscall_subr.h>
+#include <kern/thread.h>
+#include <kern/thread_swap.h>
+#include <vm/pmap.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+
+#if MACH_FIXPRI
+#include <mach/policy.h>
+#endif /* MACH_FIXPRI */
+
+int min_quantum; /* defines max context switch rate */
+
+unsigned sched_tick;
+
+thread_t sched_thread_id;
+
+timer_elt_data_t recompute_priorities_timer;
+
+/*
+ * State machine
+ *
+ * states are combinations of:
+ * R running
+ * W waiting (or on wait queue)
+ * S suspended (or will suspend)
+ * N non-interruptible
+ *
+ * init action
+ * assert_wait thread_block clear_wait suspend resume
+ *
+ * R RW, RWN R; setrun - RS -
+ * RS RWS, RWNS S; wake_active - - R
+ * RN RWN RN; setrun - RNS -
+ * RNS RWNS RNS; setrun - - RN
+ *
+ * RW W R RWS -
+ * RWN WN RN RWNS -
+ * RWS WS; wake_active RS - RW
+ * RWNS WNS RNS - RWN
+ *
+ * W R; setrun WS -
+ * WN RN; setrun WNS -
+ * WNS RNS; setrun - WN
+ *
+ * S - - R
+ * WS S - W
+ *
+ */
+
+/*
+ * Waiting protocols and implementation:
+ *
+ * Each thread may be waiting for exactly one event; this event
+ * is set using assert_wait(). That thread may be awakened either
+ * by performing a thread_wakeup_prim() on its event,
+ * or by directly waking that thread up with clear_wait().
+ *
+ * The implementation of wait events uses a hash table. Each
+ * bucket is queue of threads having the same hash function
+ * value; the chain for the queue (linked list) is the run queue
+ * field. [It is not possible to be waiting and runnable at the
+ * same time.]
+ *
+ * Locks on both the thread and on the hash buckets govern the
+ * wait event field and the queue chain field. Because wakeup
+ * operations only have the event as an argument, the event hash
+ * bucket must be locked before any thread.
+ *
+ * Scheduling operations may also occur at interrupt level; therefore,
+ * interrupts below splsched() must be prevented when holding
+ * thread or hash bucket locks.
+ *
+ * The wait event hash table declarations are as follows:
+ */
+
+#define NUMQUEUES 1031
+
+/* Shall be taken at splsched only */
+decl_simple_lock_data(static, wait_lock[NUMQUEUES]) /* Lock for... */
+queue_head_t wait_queue[NUMQUEUES];
+
+#ifdef MACH_LDEBUG
+#define waitq_lock(wl) do { \
+ assert_splsched(); \
+ simple_lock_nocheck(wl); \
+} while (0)
+#define waitq_unlock(wl) do { \
+ assert_splsched(); \
+ simple_unlock_nocheck(wl); \
+} while (0)
+#else
+#define waitq_lock(wl) simple_lock_nocheck(wl)
+#define waitq_unlock(wl) simple_unlock_nocheck(wl)
+#endif
+
+
+/* NOTE: we want a small positive integer out of this */
+#define wait_hash(event) \
+ ((((long)(event) < 0) ? ~(long)(event) : (long)(event)) % NUMQUEUES)
+
+static void wait_queue_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUMQUEUES; i++) {
+ queue_init(&wait_queue[i]);
+ simple_lock_init(&wait_lock[i]);
+ }
+}
+
+void sched_init(void)
+{
+ recompute_priorities_timer.fcn = recompute_priorities;
+ recompute_priorities_timer.param = NULL;
+
+ min_quantum = MIN_QUANTUM;
+ wait_queue_init();
+ pset_sys_bootstrap(); /* initialize processor mgmt. */
+ queue_init(&action_queue);
+ simple_lock_init(&action_lock);
+ sched_tick = 0;
+ ast_init();
+}
+
+/*
+ * Thread timeout routine, called when timer expires.
+ * Called at splsoftclock.
+ */
+static void thread_timeout(
+ void *_thread)
+{
+ thread_t thread = _thread;
+ assert(thread->timer.set == TELT_UNSET);
+
+ clear_wait(thread, THREAD_TIMED_OUT, FALSE);
+}
+
+/*
+ * thread_set_timeout:
+ *
+ * Set a timer for the current thread, if the thread
+ * is ready to wait. Must be called between assert_wait()
+ * and thread_block().
+ */
+
+void thread_set_timeout(
+ int t) /* timeout interval in ticks */
+{
+ thread_t thread = current_thread();
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ if ((thread->state & TH_WAIT) != 0) {
+ set_timeout(&thread->timer, t);
+ }
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * Set up thread timeout element when thread is created.
+ */
+void thread_timeout_setup(
+ thread_t thread)
+{
+ thread->timer.fcn = thread_timeout;
+ thread->timer.param = thread;
+ thread->depress_timer.fcn = (void (*)(void*))thread_depress_timeout;
+ thread->depress_timer.param = thread;
+}
+
+/*
+ * assert_wait:
+ *
+ * Assert that the current thread is about to go to
+ * sleep until the specified event occurs.
+ */
+void assert_wait(
+ event_t event,
+ boolean_t interruptible)
+{
+ queue_t q;
+ int index;
+ thread_t thread;
+ decl_simple_lock_data( , *lock);
+ spl_t s;
+
+ thread = current_thread();
+ if (thread->wait_event != 0) {
+ panic("assert_wait: already asserted event %p\n",
+ thread->wait_event);
+ }
+ s = splsched();
+ if (event != 0) {
+ index = wait_hash(event);
+ q = &wait_queue[index];
+ lock = &wait_lock[index];
+ waitq_lock(lock);
+ thread_lock(thread);
+ enqueue_tail(q, &(thread->links));
+ thread->wait_event = event;
+ if (interruptible)
+ thread->state |= TH_WAIT;
+ else
+ thread->state |= TH_WAIT | TH_UNINT;
+ thread_unlock(thread);
+ waitq_unlock(lock);
+ }
+ else {
+ thread_lock(thread);
+ if (interruptible)
+ thread->state |= TH_WAIT;
+ else
+ thread->state |= TH_WAIT | TH_UNINT;
+ thread_unlock(thread);
+ }
+ splx(s);
+}
+
+/*
+ * clear_wait:
+ *
+ * Clear the wait condition for the specified thread. Start the thread
+ * executing if that is appropriate.
+ *
+ * parameters:
+ * thread thread to awaken
+ * result Wakeup result the thread should see
+ * interrupt_only Don't wake up the thread if it isn't
+ * interruptible.
+ */
+void clear_wait(
+ thread_t thread,
+ int result,
+ boolean_t interrupt_only)
+{
+ int index;
+ queue_t q;
+ decl_simple_lock_data( , *lock);
+ event_t event;
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ if (interrupt_only && (thread->state & TH_UNINT)) {
+ /*
+ * can`t interrupt thread
+ */
+ thread_unlock(thread);
+ splx(s);
+ return;
+ }
+
+ event = thread->wait_event;
+ if (event != 0) {
+ thread_unlock(thread);
+ index = wait_hash(event);
+ q = &wait_queue[index];
+ lock = &wait_lock[index];
+ waitq_lock(lock);
+ /*
+ * If the thread is still waiting on that event,
+ * then remove it from the list. If it is waiting
+ * on a different event, or no event at all, then
+ * someone else did our job for us.
+ */
+ thread_lock(thread);
+ if (thread->wait_event == event) {
+ remqueue(q, (queue_entry_t)thread);
+ thread->wait_event = 0;
+ event = 0; /* cause to run below */
+ }
+ waitq_unlock(lock);
+ }
+ if (event == 0) {
+ int state = thread->state;
+
+ reset_timeout_check(&thread->timer);
+
+ switch (state & TH_SCHED_STATE) {
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_UNINT:
+ case TH_WAIT:
+ /*
+ * Sleeping and not suspendable - put
+ * on run queue.
+ */
+ thread->state = (state &~ TH_WAIT) | TH_RUN;
+ thread->wait_result = result;
+ thread_setrun(thread, TRUE);
+ break;
+
+ case TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ /*
+ * Either already running, or suspended.
+ */
+ thread->state = state &~ TH_WAIT;
+ thread->wait_result = result;
+ break;
+
+ default:
+ /*
+ * Not waiting.
+ */
+ break;
+ }
+ }
+ thread_unlock(thread);
+ splx(s);
+}
+
+#define state_panic(thread) \
+ panic ("thread %p has unexpected state %x (%s%s%s%s%s%s%s%s)", \
+ thread, thread->state, \
+ thread->state & TH_WAIT ? "TH_WAIT|" : "", \
+ thread->state & TH_SUSP ? "TH_SUSP|" : "", \
+ thread->state & TH_RUN ? "TH_RUN|" : "", \
+ thread->state & TH_UNINT ? "TH_UNINT|" : "", \
+ thread->state & TH_HALTED ? "TH_HALTED|" : "", \
+ thread->state & TH_IDLE ? "TH_IDLE|" : "", \
+ thread->state & TH_SWAPPED ? "TH_SWAPPED|" : "", \
+ thread->state & TH_SW_COMING_IN ? "TH_SW_COMING_IN|" : "")
+
+/*
+ * thread_wakeup_prim:
+ *
+ * Common routine for thread_wakeup, thread_wakeup_with_result,
+ * and thread_wakeup_one.
+ *
+ */
+boolean_t thread_wakeup_prim(
+ event_t event,
+ boolean_t one_thread,
+ int result)
+{
+ queue_t q;
+ int index;
+ boolean_t woke = FALSE;
+ thread_t thread, next_th;
+ decl_simple_lock_data( , *lock);
+ spl_t s;
+ int state;
+
+ index = wait_hash(event);
+ q = &wait_queue[index];
+ s = splsched();
+ lock = &wait_lock[index];
+ waitq_lock(lock);
+ thread = (thread_t) queue_first(q);
+ while (!queue_end(q, (queue_entry_t)thread)) {
+ next_th = (thread_t) queue_next((queue_t) thread);
+
+ if (thread->wait_event == event) {
+ thread_lock(thread);
+ remqueue(q, (queue_entry_t) thread);
+ thread->wait_event = 0;
+ reset_timeout_check(&thread->timer);
+
+ state = thread->state;
+ switch (state & TH_SCHED_STATE) {
+
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_UNINT:
+ case TH_WAIT:
+ /*
+ * Sleeping and not suspendable - put
+ * on run queue.
+ */
+ thread->state = (state &~ TH_WAIT) | TH_RUN;
+ thread->wait_result = result;
+ thread_setrun(thread, TRUE);
+ break;
+
+ case TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ /*
+ * Either already running, or suspended.
+ */
+ thread->state = state &~ TH_WAIT;
+ thread->wait_result = result;
+ break;
+
+ default:
+ state_panic(thread);
+ break;
+ }
+ thread_unlock(thread);
+ woke = TRUE;
+ if (one_thread)
+ break;
+ }
+ thread = next_th;
+ }
+ waitq_unlock(lock);
+ splx(s);
+ return (woke);
+}
+
+/*
+ * thread_sleep:
+ *
+ * Cause the current thread to wait until the specified event
+ * occurs. The specified lock is unlocked before releasing
+ * the cpu. (This is a convenient way to sleep without manually
+ * calling assert_wait).
+ *
+ * Note: if the event may be woken from an interrupt handler, this must be
+ * called at an spl level that prevents such interrupts.
+ */
+void thread_sleep(
+ event_t event,
+ simple_lock_t lock,
+ boolean_t interruptible)
+{
+ assert_wait(event, interruptible); /* assert event */
+ simple_unlock(lock); /* release the lock */
+ thread_block(thread_no_continuation); /* block ourselves */
+}
+
+/*
+ * thread_bind:
+ *
+ * Force a thread to execute on the specified processor.
+ * If the thread is currently executing, it may wait until its
+ * time slice is up before switching onto the specified processor.
+ *
+ * A processor of PROCESSOR_NULL causes the thread to be unbound.
+ * xxx - DO NOT export this to users.
+ */
+void thread_bind(
+ thread_t thread,
+ processor_t processor)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ thread->bound_processor = processor;
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * Select a thread for this processor (the current processor) to run.
+ * May select the current thread.
+ * Assumes splsched.
+ */
+
+static thread_t thread_select(
+ processor_t myprocessor)
+{
+ thread_t thread;
+
+ myprocessor->first_quantum = TRUE;
+ /*
+ * Check for obvious simple case; local runq is
+ * empty and global runq has entry at hint.
+ */
+ if (myprocessor->runq.count > 0) {
+ thread = choose_thread(myprocessor);
+ myprocessor->quantum = min_quantum;
+ }
+ else {
+ processor_set_t pset;
+
+#if MACH_HOST
+ pset = myprocessor->processor_set;
+#else /* MACH_HOST */
+ pset = &default_pset;
+#endif /* MACH_HOST */
+ simple_lock(&pset->runq.lock);
+#if DEBUG
+ checkrq(&pset->runq, "thread_select");
+#endif /* DEBUG */
+ if (pset->runq.count == 0) {
+ /*
+ * Nothing else runnable. Return if this
+ * thread is still runnable on this processor.
+ * Check for priority update if required.
+ */
+ thread = current_thread();
+ if ((thread->state == TH_RUN) &&
+#if MACH_HOST
+ (thread->processor_set == pset) &&
+#endif /* MACH_HOST */
+ ((thread->bound_processor == PROCESSOR_NULL) ||
+ (thread->bound_processor == myprocessor))) {
+
+ simple_unlock(&pset->runq.lock);
+ thread_lock(thread);
+ if (thread->sched_stamp != sched_tick)
+ update_priority(thread);
+ thread_unlock(thread);
+ }
+ else {
+ thread = choose_pset_thread(myprocessor, pset);
+ }
+ }
+ else {
+ queue_t q;
+
+ /*
+ * If there is a thread at hint, grab it,
+ * else call choose_pset_thread.
+ */
+ q = pset->runq.runq + pset->runq.low;
+
+ if (queue_empty(q)) {
+ pset->runq.low++;
+ thread = choose_pset_thread(myprocessor, pset);
+ }
+ else {
+ thread = (thread_t) dequeue_head(q);
+ thread->runq = RUN_QUEUE_NULL;
+ pset->runq.count--;
+#if MACH_FIXPRI
+ /*
+ * Cannot lazy evaluate pset->runq.low for
+ * fixed priority policy
+ */
+ if ((pset->runq.count > 0) &&
+ (pset->policies & POLICY_FIXEDPRI)) {
+ while (queue_empty(q)) {
+ pset->runq.low++;
+ q++;
+ }
+ }
+#endif /* MACH_FIXPRI */
+#if DEBUG
+ checkrq(&pset->runq, "thread_select: after");
+#endif /* DEBUG */
+ simple_unlock(&pset->runq.lock);
+ }
+ }
+
+#if MACH_FIXPRI
+ if (thread->policy == POLICY_TIMESHARE) {
+#endif /* MACH_FIXPRI */
+ myprocessor->quantum = pset->set_quantum;
+#if MACH_FIXPRI
+ }
+ else {
+ /*
+ * POLICY_FIXEDPRI
+ */
+ myprocessor->quantum = thread->sched_data;
+ }
+#endif /* MACH_FIXPRI */
+ }
+
+ return thread;
+}
+
+/*
+ * Stop running the current thread and start running the new thread.
+ * If continuation is non-zero, and the current thread is blocked,
+ * then it will resume by executing continuation on a new stack.
+ * Returns TRUE if the hand-off succeeds.
+ * Assumes splsched.
+ */
+
+boolean_t thread_invoke(
+ thread_t old_thread,
+ continuation_t continuation,
+ thread_t new_thread)
+{
+ /*
+ * Check for invoking the same thread.
+ */
+ if (old_thread == new_thread) {
+ /*
+ * Mark thread interruptible.
+ * Run continuation if there is one.
+ */
+ thread_lock(new_thread);
+ new_thread->state &= ~TH_UNINT;
+ thread_unlock(new_thread);
+ thread_wakeup(TH_EV_STATE(new_thread));
+
+ if (continuation != thread_no_continuation) {
+ (void) spl0();
+ call_continuation(continuation);
+ /*NOTREACHED*/
+ }
+ return TRUE;
+ }
+
+ /*
+ * Check for stack-handoff.
+ */
+ thread_lock(new_thread);
+ if ((old_thread->stack_privilege != current_stack()) &&
+ (continuation != thread_no_continuation))
+ {
+ switch (new_thread->state & TH_SWAP_STATE) {
+ case TH_SWAPPED:
+
+ new_thread->state &= ~(TH_SWAPPED | TH_UNINT);
+ thread_unlock(new_thread);
+ thread_wakeup(TH_EV_STATE(new_thread));
+
+#if NCPUS > 1
+ new_thread->last_processor = current_processor();
+#endif /* NCPUS > 1 */
+
+ /*
+ * Set up ast context of new thread and
+ * switch to its timer.
+ */
+ ast_context(new_thread, cpu_number());
+ timer_switch(&new_thread->system_timer);
+
+ stack_handoff(old_thread, new_thread);
+
+ /*
+ * We can dispatch the old thread now.
+ * This is like thread_dispatch, except
+ * that the old thread is left swapped
+ * *without* freeing its stack.
+ * This path is also much more frequent
+ * than actual calls to thread_dispatch.
+ */
+
+ thread_lock(old_thread);
+ old_thread->swap_func = continuation;
+
+ switch (old_thread->state) {
+ case TH_RUN | TH_SUSP:
+ case TH_RUN | TH_SUSP | TH_HALTED:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ /*
+ * Suspend the thread
+ */
+ old_thread->state = (old_thread->state & ~TH_RUN)
+ | TH_SWAPPED;
+ if (old_thread->wake_active) {
+ old_thread->wake_active = FALSE;
+ thread_unlock(old_thread);
+ thread_wakeup(TH_EV_WAKE_ACTIVE(old_thread));
+
+ goto after_old_thread;
+ }
+ break;
+
+ case TH_RUN | TH_SUSP | TH_UNINT:
+ case TH_RUN | TH_UNINT:
+ case TH_RUN:
+ /*
+ * We can`t suspend the thread yet,
+ * or it`s still running.
+ * Put back on a run queue.
+ */
+ old_thread->state |= TH_SWAPPED;
+ thread_setrun(old_thread, FALSE);
+ break;
+
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT:
+ /*
+ * Waiting, and not suspendable.
+ */
+ old_thread->state = (old_thread->state & ~TH_RUN)
+ | TH_SWAPPED;
+ break;
+
+ case TH_RUN | TH_IDLE:
+ /*
+ * Drop idle thread -- it is already in
+ * idle_thread_array.
+ */
+ old_thread->state = TH_RUN | TH_IDLE | TH_SWAPPED;
+ break;
+
+ default:
+ state_panic(old_thread);
+ }
+ thread_unlock(old_thread);
+ after_old_thread:
+
+ /*
+ * call_continuation calls the continuation
+ * after resetting the current stack pointer
+ * to recover stack space. If we called
+ * the continuation directly, we would risk
+ * running out of stack.
+ */
+
+ counter(c_thread_invoke_hits++);
+ (void) spl0();
+ call_continuation(new_thread->swap_func);
+ /*NOTREACHED*/
+ return TRUE; /* help for the compiler */
+
+ case TH_SW_COMING_IN:
+ /*
+ * Waiting for a stack
+ */
+ thread_swapin(new_thread);
+ thread_unlock(new_thread);
+ counter(c_thread_invoke_misses++);
+ return FALSE;
+
+ case 0:
+ /*
+ * Already has a stack - can`t handoff.
+ */
+ break;
+ }
+ }
+
+ else {
+ /*
+ * Check that the thread is swapped-in.
+ */
+ if (new_thread->state & TH_SWAPPED) {
+ if ((new_thread->state & TH_SW_COMING_IN) ||
+ !stack_alloc_try(new_thread, thread_continue))
+ {
+ thread_swapin(new_thread);
+ thread_unlock(new_thread);
+ counter(c_thread_invoke_misses++);
+ return FALSE;
+ }
+ }
+ }
+
+ new_thread->state &= ~(TH_SWAPPED | TH_UNINT);
+ thread_unlock(new_thread);
+ thread_wakeup(TH_EV_STATE(new_thread));
+
+ /*
+ * Thread is now interruptible.
+ */
+#if NCPUS > 1
+ new_thread->last_processor = current_processor();
+#endif /* NCPUS > 1 */
+
+ /*
+ * Set up ast context of new thread and switch to its timer.
+ */
+ ast_context(new_thread, cpu_number());
+ timer_switch(&new_thread->system_timer);
+
+ /*
+ * switch_context is machine-dependent. It does the
+ * machine-dependent components of a context-switch, like
+ * changing address spaces. It updates active_thread.
+ * It returns only if a continuation is not supplied.
+ */
+ counter(c_thread_invoke_csw++);
+ old_thread = switch_context(old_thread, continuation, new_thread);
+
+ /*
+ * We're back. Now old_thread is the thread that resumed
+ * us, and we have to dispatch it.
+ */
+ thread_dispatch(old_thread);
+
+ return TRUE;
+}
+
+/*
+ * thread_continue:
+ *
+ * Called when the current thread is given a new stack.
+ * Called at splsched.
+ */
+void thread_continue(
+ thread_t old_thread)
+{
+ continuation_t continuation = current_thread()->swap_func;
+
+ /*
+ * We must dispatch the old thread and then
+ * call the current thread's continuation.
+ * There might not be an old thread, if we are
+ * the first thread to run on this processor.
+ */
+
+ if (old_thread != THREAD_NULL)
+ thread_dispatch(old_thread);
+ (void) spl0();
+ (*continuation)();
+ /*NOTREACHED*/
+}
+
+
+/*
+ * thread_block:
+ *
+ * Block the current thread. If the thread is runnable
+ * then someone must have woken it up between its request
+ * to sleep and now. In this case, it goes back on a
+ * run queue.
+ *
+ * If a continuation is specified, then thread_block will
+ * attempt to discard the thread's kernel stack. When the
+ * thread resumes, it will execute the continuation function
+ * on a new kernel stack.
+ */
+
+void thread_block(
+ continuation_t continuation)
+{
+ thread_t thread = current_thread();
+ processor_t myprocessor = cpu_to_processor(cpu_number());
+ thread_t new_thread;
+ spl_t s;
+
+ check_simple_locks();
+
+ s = splsched();
+
+#if FAST_TAS
+ {
+ extern void recover_ras();
+
+ if (csw_needed(thread, myprocessor))
+ recover_ras(thread);
+ }
+#endif /* FAST_TAS */
+
+ ast_off(cpu_number(), AST_BLOCK);
+
+ do
+ new_thread = thread_select(myprocessor);
+ while (!thread_invoke(thread, continuation, new_thread));
+
+ splx(s);
+}
+
+/*
+ * thread_run:
+ *
+ * Switch directly from the current thread to a specified
+ * thread. Both the current and new threads must be
+ * runnable.
+ *
+ * If a continuation is specified, then thread_block will
+ * attempt to discard the current thread's kernel stack. When the
+ * thread resumes, it will execute the continuation function
+ * on a new kernel stack.
+ */
+void thread_run(
+ continuation_t continuation,
+ thread_t new_thread)
+{
+ thread_t thread = current_thread();
+ processor_t myprocessor = cpu_to_processor(cpu_number());
+ spl_t s;
+
+ check_simple_locks();
+
+ s = splsched();
+
+ while (!thread_invoke(thread, continuation, new_thread))
+ new_thread = thread_select(myprocessor);
+
+ splx(s);
+}
+
+/*
+ * Dispatches a running thread that is not on a runq.
+ * Called at splsched.
+ */
+
+void thread_dispatch(
+ thread_t thread)
+{
+ /*
+ * If we are discarding the thread's stack, we must do it
+ * before the thread has a chance to run.
+ */
+
+ thread_lock(thread);
+
+ if (thread->swap_func != thread_no_continuation) {
+ assert((thread->state & TH_SWAP_STATE) == 0);
+ thread->state |= TH_SWAPPED;
+ stack_free(thread);
+ }
+
+ switch (thread->state &~ TH_SWAP_STATE) {
+ case TH_RUN | TH_SUSP:
+ case TH_RUN | TH_SUSP | TH_HALTED:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ /*
+ * Suspend the thread
+ */
+ thread->state &= ~TH_RUN;
+ if (thread->wake_active) {
+ thread->wake_active = FALSE;
+ thread_unlock(thread);
+ thread_wakeup(TH_EV_WAKE_ACTIVE(thread));
+ return;
+ }
+ break;
+
+ case TH_RUN | TH_SUSP | TH_UNINT:
+ case TH_RUN | TH_UNINT:
+ case TH_RUN:
+ /*
+ * No reason to stop. Put back on a run queue.
+ */
+ thread_setrun(thread, FALSE);
+ break;
+
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT:
+ /*
+ * Waiting, and not suspended.
+ */
+ thread->state &= ~TH_RUN;
+ break;
+
+ case TH_RUN | TH_IDLE:
+ /*
+ * Drop idle thread -- it is already in
+ * idle_thread_array.
+ */
+ break;
+
+ default:
+ state_panic(thread);
+ }
+ thread_unlock(thread);
+}
+
+
+/*
+ * Define shifts for simulating (5/8)**n
+ */
+
+shift_data_t wait_shift[32] = {
+ {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
+ {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
+ {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
+ {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
+
+/*
+ * do_priority_computation:
+ *
+ * Calculate new priority for thread based on its base priority plus
+ * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
+ * usage to priorities. SCHED_SHIFT converts for the scaling
+ * of the sched_usage field by SCHED_SCALE. This scaling comes
+ * from the multiplication by sched_load (thread_timer_delta)
+ * in sched.h. sched_load is calculated as a scaled overload
+ * factor in compute_mach_factor (mach_factor.c).
+ */
+
+#ifdef PRI_SHIFT_2
+#if PRI_SHIFT_2 > 0
+#define do_priority_computation(th, pri) \
+ MACRO_BEGIN \
+ (pri) = (th)->priority /* start with base priority */ \
+ + ((th)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
+ + ((th)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
+ if ((pri) > NRQS - 1) (pri) = NRQS - 1; \
+ MACRO_END
+#else /* PRI_SHIFT_2 */
+#define do_priority_computation(th, pri) \
+ MACRO_BEGIN \
+ (pri) = (th)->priority /* start with base priority */ \
+ + ((th)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
+ - ((th)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
+ if ((pri) > NRQS - 1) (pri) = NRQS - 1; \
+ MACRO_END
+#endif /* PRI_SHIFT_2 */
+#else /* defined(PRI_SHIFT_2) */
+#define do_priority_computation(th, pri) \
+ MACRO_BEGIN \
+ (pri) = (th)->priority /* start with base priority */ \
+ + ((th)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
+ if ((pri) > NRQS - 1) (pri) = NRQS - 1; \
+ MACRO_END
+#endif /* defined(PRI_SHIFT_2) */
+
+/*
+ * compute_priority:
+ *
+ * Compute the effective priority of the specified thread.
+ * The effective priority computation is as follows:
+ *
+ * Take the base priority for this thread and add
+ * to it an increment derived from its cpu_usage.
+ *
+ * The thread *must* be locked by the caller.
+ */
+
+void compute_priority(
+ thread_t thread,
+ boolean_t resched)
+{
+ int pri;
+
+#if MACH_FIXPRI
+ if (thread->policy == POLICY_TIMESHARE) {
+#endif /* MACH_FIXPRI */
+ do_priority_computation(thread, pri);
+ if (thread->depress_priority < 0)
+ set_pri(thread, pri, resched);
+ else
+ thread->depress_priority = pri;
+#if MACH_FIXPRI
+ }
+ else {
+ set_pri(thread, thread->priority, resched);
+ }
+#endif /* MACH_FIXPRI */
+}
+
+/*
+ * compute_my_priority:
+ *
+ * Version of compute priority for current thread or thread
+ * being manipulated by scheduler (going on or off a runq).
+ * Only used for priority updates. Policy or priority changes
+ * must call compute_priority above. Caller must have thread
+ * locked and know it is timesharing and not depressed.
+ */
+
+void compute_my_priority(
+ thread_t thread)
+{
+ int temp_pri;
+
+ do_priority_computation(thread,temp_pri);
+ thread->sched_pri = temp_pri;
+}
+
+/*
+ * recompute_priorities:
+ *
+ * Update the priorities of all threads periodically.
+ */
+void recompute_priorities(void *param)
+{
+ sched_tick++; /* age usage one more time */
+ set_timeout(&recompute_priorities_timer, hz);
+ /*
+ * Wakeup scheduler thread.
+ */
+ if (sched_thread_id != THREAD_NULL) {
+ clear_wait(sched_thread_id, THREAD_AWAKENED, FALSE);
+ }
+}
+
+/*
+ * update_priority
+ *
+ * Cause the priority computation of a thread that has been
+ * sleeping or suspended to "catch up" with the system. Thread
+ * *MUST* be locked by caller. If thread is running, then this
+ * can only be called by the thread on itself.
+ */
+void update_priority(
+ thread_t thread)
+{
+ unsigned int ticks;
+ shift_t shiftp;
+ int temp_pri;
+
+ ticks = sched_tick - thread->sched_stamp;
+
+ assert(ticks != 0);
+
+ /*
+ * If asleep for more than 30 seconds forget all
+ * cpu_usage, else catch up on missed aging.
+ * 5/8 ** n is approximated by the two shifts
+ * in the wait_shift array.
+ */
+ thread->sched_stamp += ticks;
+ thread_timer_delta(thread);
+ if (ticks > 30) {
+ thread->cpu_usage = 0;
+ thread->sched_usage = 0;
+ }
+ else {
+ thread->cpu_usage += thread->cpu_delta;
+ thread->sched_usage += thread->sched_delta;
+ shiftp = &wait_shift[ticks];
+ if (shiftp->shift2 > 0) {
+ thread->cpu_usage =
+ (thread->cpu_usage >> shiftp->shift1) +
+ (thread->cpu_usage >> shiftp->shift2);
+ thread->sched_usage =
+ (thread->sched_usage >> shiftp->shift1) +
+ (thread->sched_usage >> shiftp->shift2);
+ }
+ else {
+ thread->cpu_usage =
+ (thread->cpu_usage >> shiftp->shift1) -
+ (thread->cpu_usage >> -(shiftp->shift2));
+ thread->sched_usage =
+ (thread->sched_usage >> shiftp->shift1) -
+ (thread->sched_usage >> -(shiftp->shift2));
+ }
+ }
+ thread->cpu_delta = 0;
+ thread->sched_delta = 0;
+ /*
+ * Recompute priority if appropriate.
+ */
+ if (
+#if MACH_FIXPRI
+ (thread->policy == POLICY_TIMESHARE) &&
+#endif /* MACH_FIXPRI */
+ (thread->depress_priority < 0)) {
+ do_priority_computation(thread, temp_pri);
+ thread->sched_pri = temp_pri;
+ }
+}
+
+/*
+ * run_queue_enqueue macro for thread_setrun().
+ */
+#if DEBUG
+#define run_queue_enqueue(rq, th) \
+ MACRO_BEGIN \
+ unsigned int whichq; \
+ \
+ whichq = (th)->sched_pri; \
+ if (whichq >= NRQS) { \
+ printf("thread_setrun: pri too high (%d)\n", (th)->sched_pri); \
+ whichq = NRQS - 1; \
+ } \
+ \
+ runq_lock(rq); /* lock the run queue */ \
+ checkrq((rq), "thread_setrun: before adding thread"); \
+ enqueue_tail(&(rq)->runq[whichq], &((th)->links)); \
+ \
+ if (whichq < (rq)->low || (rq)->count == 0) \
+ (rq)->low = whichq; /* minimize */ \
+ \
+ (rq)->count++; \
+ (th)->runq = (rq); \
+ thread_check((th), (rq)); \
+ checkrq((rq), "thread_setrun: after adding thread"); \
+ runq_unlock(rq); \
+ MACRO_END
+#else /* DEBUG */
+#define run_queue_enqueue(rq, th) \
+ MACRO_BEGIN \
+ unsigned int whichq; \
+ \
+ whichq = (th)->sched_pri; \
+ if (whichq >= NRQS) { \
+ printf("thread_setrun: pri too high (%d)\n", (th)->sched_pri); \
+ whichq = NRQS - 1; \
+ } \
+ \
+ runq_lock(rq); /* lock the run queue */ \
+ enqueue_tail(&(rq)->runq[whichq], &((th)->links)); \
+ \
+ if (whichq < (rq)->low || (rq)->count == 0) \
+ (rq)->low = whichq; /* minimize */ \
+ \
+ (rq)->count++; \
+ (th)->runq = (rq); \
+ runq_unlock(rq); \
+ MACRO_END
+#endif /* DEBUG */
+/*
+ * thread_setrun:
+ *
+ * Make thread runnable; dispatch directly onto an idle processor
+ * if possible. Else put on appropriate run queue (processor
+ * if bound, else processor set. Caller must have lock on thread.
+ * This is always called at splsched.
+ */
+
+void thread_setrun(
+ thread_t th,
+ boolean_t may_preempt)
+{
+ processor_t processor;
+ run_queue_t rq;
+#if NCPUS > 1
+ processor_set_t pset;
+#endif /* NCPUS > 1 */
+
+ /*
+ * Update priority if needed.
+ */
+ if (th->sched_stamp != sched_tick) {
+ update_priority(th);
+ }
+
+ assert(th->runq == RUN_QUEUE_NULL);
+
+#if NCPUS > 1
+ /*
+ * Try to dispatch the thread directly onto an idle processor.
+ */
+ if ((processor = th->bound_processor) == PROCESSOR_NULL) {
+ /*
+ * Not bound, any processor in the processor set is ok.
+ */
+ pset = th->processor_set;
+#if HW_FOOTPRINT
+ /*
+ * But first check the last processor it ran on.
+ */
+ processor = th->last_processor;
+ if (processor->state == PROCESSOR_IDLE) {
+ processor_lock(processor);
+ simple_lock(&pset->idle_lock);
+ if ((processor->state == PROCESSOR_IDLE)
+#if MACH_HOST
+ && (processor->processor_set == pset)
+#endif /* MACH_HOST */
+ ) {
+ queue_remove(&pset->idle_queue, processor,
+ processor_t, processor_queue);
+ pset->idle_count--;
+ processor->next_thread = th;
+ processor->state = PROCESSOR_DISPATCHING;
+ simple_unlock(&pset->idle_lock);
+ processor_unlock(processor);
+ if (processor != current_processor())
+ cause_ast_check(processor);
+ return;
+ }
+ simple_unlock(&pset->idle_lock);
+ processor_unlock(processor);
+ }
+#endif /* HW_FOOTPRINT */
+
+ if (pset->idle_count > 0) {
+ simple_lock(&pset->idle_lock);
+ if (pset->idle_count > 0) {
+ processor = (processor_t) queue_first(&pset->idle_queue);
+ queue_remove(&(pset->idle_queue), processor, processor_t,
+ processor_queue);
+ pset->idle_count--;
+ processor->next_thread = th;
+ processor->state = PROCESSOR_DISPATCHING;
+ simple_unlock(&pset->idle_lock);
+ if (processor != current_processor())
+ cause_ast_check(processor);
+ return;
+ }
+ simple_unlock(&pset->idle_lock);
+ }
+ rq = &(pset->runq);
+ run_queue_enqueue(rq,th);
+ /*
+ * Preempt check
+ */
+ if (may_preempt &&
+#if MACH_HOST
+ (pset == current_processor()->processor_set) &&
+#endif /* MACH_HOST */
+ (current_thread()->sched_pri > th->sched_pri)) {
+ /*
+ * Turn off first_quantum to allow csw.
+ */
+ current_processor()->first_quantum = FALSE;
+ ast_on(cpu_number(), AST_BLOCK);
+ }
+ }
+ else {
+ /*
+ * Bound, can only run on bound processor. Have to lock
+ * processor here because it may not be the current one.
+ */
+ if (processor->state == PROCESSOR_IDLE) {
+ processor_lock(processor);
+ pset = processor->processor_set;
+ simple_lock(&pset->idle_lock);
+ if (processor->state == PROCESSOR_IDLE) {
+ queue_remove(&pset->idle_queue, processor,
+ processor_t, processor_queue);
+ pset->idle_count--;
+ processor->next_thread = th;
+ processor->state = PROCESSOR_DISPATCHING;
+ simple_unlock(&pset->idle_lock);
+ processor_unlock(processor);
+ if (processor != current_processor())
+ cause_ast_check(processor);
+ return;
+ }
+ simple_unlock(&pset->idle_lock);
+ processor_unlock(processor);
+ }
+ rq = &(processor->runq);
+ run_queue_enqueue(rq,th);
+
+ /*
+ * Cause ast on processor if processor is on line.
+ */
+ if (processor == current_processor()) {
+ ast_on(cpu_number(), AST_BLOCK);
+ }
+ else if ((processor->state != PROCESSOR_OFF_LINE)) {
+ cause_ast_check(processor);
+ }
+ }
+#else /* NCPUS > 1 */
+ /*
+ * XXX should replace queue with a boolean in this case.
+ */
+ if (default_pset.idle_count > 0) {
+ processor = (processor_t) queue_first(&default_pset.idle_queue);
+ queue_remove(&default_pset.idle_queue, processor,
+ processor_t, processor_queue);
+ default_pset.idle_count--;
+ processor->next_thread = th;
+ processor->state = PROCESSOR_DISPATCHING;
+ return;
+ }
+ if (th->bound_processor == PROCESSOR_NULL) {
+ rq = &(default_pset.runq);
+ }
+ else {
+ rq = &(master_processor->runq);
+ ast_on(cpu_number(), AST_BLOCK);
+ }
+ run_queue_enqueue(rq,th);
+
+ /*
+ * Preempt check
+ */
+ if (may_preempt && (current_thread()->sched_pri > th->sched_pri)) {
+ /*
+ * Turn off first_quantum to allow context switch.
+ */
+ current_processor()->first_quantum = FALSE;
+ ast_on(cpu_number(), AST_BLOCK);
+ }
+#endif /* NCPUS > 1 */
+}
+
+/*
+ * set_pri:
+ *
+ * Set the priority of the specified thread to the specified
+ * priority. This may cause the thread to change queues.
+ *
+ * The thread *must* be locked by the caller.
+ */
+
+void set_pri(
+ thread_t th,
+ int pri,
+ boolean_t resched)
+{
+ struct run_queue *rq;
+
+ rq = rem_runq(th);
+ th->sched_pri = pri;
+ if (rq != RUN_QUEUE_NULL) {
+ if (resched)
+ thread_setrun(th, TRUE);
+ else
+ run_queue_enqueue(rq, th);
+ }
+}
+
+/*
+ * rem_runq:
+ *
+ * Remove a thread from its run queue.
+ * The run queue that the process was on is returned
+ * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked
+ * before calling this routine. Unusual locking protocol on runq
+ * field in thread structure makes this code interesting; see thread.h.
+ */
+
+struct run_queue *rem_runq(
+ thread_t th)
+{
+ struct run_queue *rq;
+
+ rq = th->runq;
+ /*
+ * If rq is RUN_QUEUE_NULL, the thread will stay out of the
+ * run_queues because the caller locked the thread. Otherwise
+ * the thread is on a runq, but could leave.
+ */
+ if (rq != RUN_QUEUE_NULL) {
+ runq_lock(rq);
+#if DEBUG
+ checkrq(rq, "rem_runq: at entry");
+#endif /* DEBUG */
+ if (rq == th->runq) {
+ /*
+ * Thread is in a runq and we have a lock on
+ * that runq.
+ */
+#if DEBUG
+ checkrq(rq, "rem_runq: before removing thread");
+ thread_check(th, rq);
+#endif /* DEBUG */
+ remqueue(&rq->runq[0], (queue_entry_t) th);
+ rq->count--;
+#if DEBUG
+ checkrq(rq, "rem_runq: after removing thread");
+#endif /* DEBUG */
+ th->runq = RUN_QUEUE_NULL;
+ runq_unlock(rq);
+ }
+ else {
+ /*
+ * The thread left the runq before we could
+ * lock the runq. It is not on a runq now, and
+ * can't move again because this routine's
+ * caller locked the thread.
+ */
+ runq_unlock(rq);
+ rq = RUN_QUEUE_NULL;
+ }
+ }
+
+ return rq;
+}
+
+
+/*
+ * choose_thread:
+ *
+ * Choose a thread to execute. The thread chosen is removed
+ * from its run queue. Note that this requires only that the runq
+ * lock be held.
+ *
+ * Strategy:
+ * Check processor runq first; if anything found, run it.
+ * Else check pset runq; if nothing found, return idle thread.
+ *
+ * Second line of strategy is implemented by choose_pset_thread.
+ * This is only called on processor startup and when thread_block
+ * thinks there's something in the processor runq.
+ */
+
+thread_t choose_thread(
+ processor_t myprocessor)
+{
+ thread_t th;
+ queue_t q;
+ run_queue_t runq;
+ int i;
+ processor_set_t pset;
+
+ runq = &myprocessor->runq;
+
+ simple_lock(&runq->lock);
+ if (runq->count > 0) {
+ q = runq->runq + runq->low;
+ for (i = runq->low; i < NRQS ; i++, q++) {
+ if (!queue_empty(q)) {
+ th = (thread_t) dequeue_head(q);
+ th->runq = RUN_QUEUE_NULL;
+ runq->count--;
+ runq->low = i;
+ simple_unlock(&runq->lock);
+ return th;
+ }
+ }
+ panic("choose_thread");
+ /*NOTREACHED*/
+ }
+ simple_unlock(&runq->lock);
+
+ pset = myprocessor->processor_set;
+
+ simple_lock(&pset->runq.lock);
+ return choose_pset_thread(myprocessor,pset);
+}
+
+/*
+ * choose_pset_thread: choose a thread from processor_set runq or
+ * set processor idle and choose its idle thread.
+ *
+ * Caller must be at splsched and have a lock on the runq. This
+ * lock is released by this routine. myprocessor is always the current
+ * processor, and pset must be its processor set.
+ * This routine chooses and removes a thread from the runq if there
+ * is one (and returns it), else it sets the processor idle and
+ * returns its idle thread.
+ */
+
+thread_t choose_pset_thread(
+ processor_t myprocessor,
+ processor_set_t pset)
+{
+ run_queue_t runq;
+ thread_t th;
+ queue_t q;
+ int i;
+
+ runq = &pset->runq;
+
+ if (runq->count > 0) {
+ q = runq->runq + runq->low;
+ for (i = runq->low; i < NRQS ; i++, q++) {
+ if (!queue_empty(q)) {
+ th = (thread_t) dequeue_head(q);
+ th->runq = RUN_QUEUE_NULL;
+ runq->count--;
+ /*
+ * For POLICY_FIXEDPRI, runq->low must be
+ * accurate!
+ */
+#if MACH_FIXPRI
+ if ((runq->count > 0) &&
+ (pset->policies & POLICY_FIXEDPRI)) {
+ while (queue_empty(q)) {
+ q++;
+ i++;
+ }
+ }
+#endif /* MACH_FIXPRI */
+ runq->low = i;
+#if DEBUG
+ checkrq(runq, "choose_pset_thread");
+#endif /* DEBUG */
+ simple_unlock(&runq->lock);
+ return th;
+ }
+ }
+ panic("choose_pset_thread");
+ /*NOTREACHED*/
+ }
+ simple_unlock(&runq->lock);
+
+ /*
+ * Nothing is runnable, so set this processor idle if it
+ * was running. If it was in an assignment or shutdown,
+ * leave it alone. Return its idle thread.
+ */
+ simple_lock(&pset->idle_lock);
+ if (myprocessor->state == PROCESSOR_RUNNING) {
+ myprocessor->state = PROCESSOR_IDLE;
+ /*
+ * XXX Until it goes away, put master on end of queue, others
+ * XXX on front so master gets used last.
+ */
+ if (myprocessor == master_processor) {
+ queue_enter(&(pset->idle_queue), myprocessor,
+ processor_t, processor_queue);
+ }
+ else {
+ queue_enter_first(&(pset->idle_queue), myprocessor,
+ processor_t, processor_queue);
+ }
+
+ pset->idle_count++;
+ }
+ simple_unlock(&pset->idle_lock);
+
+ return myprocessor->idle_thread;
+}
+
+/*
+ * no_dispatch_count counts number of times processors go non-idle
+ * without being dispatched. This should be very rare.
+ */
+int no_dispatch_count = 0;
+
+/*
+ * This is the idle thread, which just looks for other threads
+ * to execute.
+ */
+
+static void __attribute__((noreturn)) idle_thread_continue(void)
+{
+ processor_t myprocessor;
+ volatile thread_t *threadp;
+ volatile int *gcount;
+ volatile int *lcount;
+ thread_t new_thread;
+ int state;
+ int mycpu;
+ spl_t s;
+
+ mycpu = cpu_number();
+ myprocessor = current_processor();
+ threadp = (volatile thread_t *) &myprocessor->next_thread;
+ lcount = (volatile int *) &myprocessor->runq.count;
+
+ while (TRUE) {
+#ifdef MARK_CPU_IDLE
+ MARK_CPU_IDLE(mycpu);
+#endif /* MARK_CPU_IDLE */
+
+#if MACH_HOST
+ gcount = (volatile int *)
+ &myprocessor->processor_set->runq.count;
+#else /* MACH_HOST */
+ gcount = (volatile int *) &default_pset.runq.count;
+#endif /* MACH_HOST */
+
+/*
+ * This cpu will be dispatched (by thread_setrun) by setting next_thread
+ * to the value of the thread to run next. Also check runq counts.
+ */
+ while ((*threadp == (volatile thread_t)THREAD_NULL) &&
+ (*gcount == 0) && (*lcount == 0)) {
+
+ /* check for ASTs while we wait */
+
+ if (need_ast[mycpu] &~ AST_SCHEDULING) {
+ (void) splsched();
+ /* don't allow scheduling ASTs */
+ need_ast[mycpu] &= ~AST_SCHEDULING;
+ ast_taken();
+ /* back at spl0 */
+ }
+
+ /*
+ * machine_idle is a machine dependent function,
+ * to conserve power.
+ */
+#if POWER_SAVE
+ machine_idle(mycpu);
+#endif /* POWER_SAVE */
+ }
+
+#ifdef MARK_CPU_ACTIVE
+ MARK_CPU_ACTIVE(mycpu);
+#endif /* MARK_CPU_ACTIVE */
+
+ s = splsched();
+
+ /*
+ * This is not a switch statement to avoid the
+ * bounds checking code in the common case.
+ */
+retry:
+ state = myprocessor->state;
+ if (state == PROCESSOR_DISPATCHING) {
+ /*
+ * Commmon case -- cpu dispatched.
+ */
+ new_thread = (thread_t) *threadp;
+ *threadp = (volatile thread_t) THREAD_NULL;
+ myprocessor->state = PROCESSOR_RUNNING;
+ /*
+ * set up quantum for new thread.
+ */
+#if MACH_FIXPRI
+ if (new_thread->policy == POLICY_TIMESHARE) {
+#endif /* MACH_FIXPRI */
+ /*
+ * Just use set quantum. No point in
+ * checking for shorter local runq quantum;
+ * csw_needed will handle correctly.
+ */
+#if MACH_HOST
+ myprocessor->quantum = new_thread->
+ processor_set->set_quantum;
+#else /* MACH_HOST */
+ myprocessor->quantum =
+ default_pset.set_quantum;
+#endif /* MACH_HOST */
+
+#if MACH_FIXPRI
+ }
+ else {
+ /*
+ * POLICY_FIXEDPRI
+ */
+ myprocessor->quantum = new_thread->sched_data;
+ }
+#endif /* MACH_FIXPRI */
+ myprocessor->first_quantum = TRUE;
+ counter(c_idle_thread_handoff++);
+ thread_run(idle_thread_continue, new_thread);
+ }
+ else if (state == PROCESSOR_IDLE) {
+ processor_set_t pset;
+
+ pset = myprocessor->processor_set;
+ simple_lock(&pset->idle_lock);
+ if (myprocessor->state != PROCESSOR_IDLE) {
+ /*
+ * Something happened, try again.
+ */
+ simple_unlock(&pset->idle_lock);
+ goto retry;
+ }
+ /*
+ * Processor was not dispatched (Rare).
+ * Set it running again.
+ */
+ no_dispatch_count++;
+ pset->idle_count--;
+ queue_remove(&pset->idle_queue, myprocessor,
+ processor_t, processor_queue);
+ myprocessor->state = PROCESSOR_RUNNING;
+ simple_unlock(&pset->idle_lock);
+ counter(c_idle_thread_block++);
+ thread_block(idle_thread_continue);
+ }
+ else if ((state == PROCESSOR_ASSIGN) ||
+ (state == PROCESSOR_SHUTDOWN)) {
+ /*
+ * Changing processor sets, or going off-line.
+ * Release next_thread if there is one. Actual
+ * thread to run is on a runq.
+ */
+ if ((new_thread = (thread_t)*threadp)!= THREAD_NULL) {
+ *threadp = (volatile thread_t) THREAD_NULL;
+ thread_lock(new_thread);
+ thread_setrun(new_thread, FALSE);
+ thread_unlock(new_thread);
+ }
+
+ counter(c_idle_thread_block++);
+ thread_block(idle_thread_continue);
+ }
+ else {
+ printf(" Bad processor state %d (Cpu %d)\n",
+ cpu_state(mycpu), mycpu);
+ panic("idle_thread");
+ }
+
+ (void) splx(s);
+ }
+}
+
+void idle_thread(void)
+{
+ thread_t self = current_thread();
+ spl_t s;
+
+ stack_privilege(self);
+
+ s = splsched();
+ self->priority = NRQS-1;
+ self->sched_pri = NRQS-1;
+
+ /*
+ * Set the idle flag to indicate that this is an idle thread,
+ * enter ourselves in the idle array, and thread_block() to get
+ * out of the run queues (and set the processor idle when we
+ * run next time).
+ */
+ thread_lock(self);
+ self->state |= TH_IDLE;
+ thread_unlock(self);
+ current_processor()->idle_thread = self;
+ (void) splx(s);
+
+ counter(c_idle_thread_block++);
+ thread_block(idle_thread_continue);
+ idle_thread_continue();
+ /*NOTREACHED*/
+}
+
+/*
+ * sched_thread: scheduler thread.
+ *
+ * This thread handles periodic calculations in the scheduler that
+ * we don't want to do at interrupt level. This allows us to
+ * avoid blocking.
+ */
+static void sched_thread_continue(void)
+{
+ while (TRUE) {
+ (void) compute_mach_factor();
+
+ /*
+ * Check for stuck threads. This can't be done off of
+ * the callout queue because it requires operations that
+ * can't be used from interrupt level.
+ */
+ if (sched_tick & 1)
+ do_thread_scan();
+
+ assert_wait((event_t) 0, FALSE);
+ counter(c_sched_thread_block++);
+ thread_block(sched_thread_continue);
+ }
+}
+
+void sched_thread(void)
+{
+ sched_thread_id = current_thread();
+
+ /*
+ * Sleep on event 0, recompute_priorities() will awaken
+ * us by calling clear_wait().
+ */
+ assert_wait((event_t) 0, FALSE);
+ counter(c_sched_thread_block++);
+ thread_block(sched_thread_continue);
+ sched_thread_continue();
+ /*NOTREACHED*/
+}
+
+#define MAX_STUCK_THREADS 16
+
+/*
+ * do_thread_scan: scan for stuck threads. A thread is stuck if
+ * it is runnable but its priority is so low that it has not
+ * run for several seconds. Its priority should be higher, but
+ * won't be until it runs and calls update_priority. The scanner
+ * finds these threads and does the updates.
+ *
+ * Scanner runs in two passes. Pass one squirrels likely
+ * thread ids away in an array, and removes them from the run queue.
+ * Pass two does the priority updates. This is necessary because
+ * the run queue lock is required for the candidate scan, but
+ * cannot be held during updates [set_pri will deadlock].
+ *
+ * Array length should be enough so that restart isn't necessary,
+ * but restart logic is included. Does not scan processor runqs.
+ *
+ */
+
+boolean_t do_thread_scan_debug = FALSE;
+
+thread_t stuck_threads[MAX_STUCK_THREADS];
+int stuck_count = 0;
+
+/*
+ * do_runq_scan is the guts of pass 1. It scans a runq for
+ * stuck threads. A boolean is returned indicating whether
+ * it ran out of space.
+ */
+
+static boolean_t
+do_runq_scan(
+ run_queue_t runq)
+{
+ spl_t s;
+ queue_t q;
+ thread_t thread;
+ int count;
+
+ s = splsched();
+ simple_lock(&runq->lock);
+ if((count = runq->count) > 0) {
+ q = runq->runq + runq->low;
+ while (count > 0) {
+ thread = (thread_t) queue_first(q);
+ while (!queue_end(q, (queue_entry_t) thread)) {
+ /*
+ * Get the next thread now, since we may
+ * remove this thread from the run queue.
+ */
+ thread_t next = (thread_t) queue_next(&thread->links);
+
+ if ((thread->state & TH_SCHED_STATE) == TH_RUN &&
+ sched_tick - thread->sched_stamp > 1) {
+ /*
+ * Stuck, save its id for later.
+ */
+ if (stuck_count == MAX_STUCK_THREADS) {
+ /*
+ * !@#$% No more room.
+ */
+ simple_unlock(&runq->lock);
+ splx(s);
+ return TRUE;
+ }
+ /*
+ * We can`t take the thread_lock here,
+ * since we already have the runq lock.
+ * So we can`t grab a reference to the
+ * thread. However, a thread that is
+ * in RUN state cannot be deallocated
+ * until it stops running. If it isn`t
+ * on the runq, then thread_halt cannot
+ * see it. So we remove the thread
+ * from the runq to make it safe.
+ */
+ remqueue(q, (queue_entry_t) thread);
+ runq->count--;
+ thread->runq = RUN_QUEUE_NULL;
+
+ stuck_threads[stuck_count++] = thread;
+if (do_thread_scan_debug)
+ printf("do_runq_scan: adding thread %p\n", thread);
+ }
+ count--;
+ thread = next;
+ }
+ q++;
+ }
+ }
+ simple_unlock(&runq->lock);
+ splx(s);
+
+ return FALSE;
+}
+
+void do_thread_scan(void)
+{
+ spl_t s;
+ boolean_t restart_needed = 0;
+ thread_t thread;
+ int i;
+#if MACH_HOST
+ processor_set_t pset;
+#endif /* MACH_HOST */
+
+ do {
+#if MACH_HOST
+ simple_lock(&all_psets_lock);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ if ((restart_needed = do_runq_scan(&pset->runq)))
+ break;
+ }
+ simple_unlock(&all_psets_lock);
+#else /* MACH_HOST */
+ restart_needed = do_runq_scan(&default_pset.runq);
+#endif /* MACH_HOST */
+ if (!restart_needed) {
+ for (i = 0; i < smp_get_numcpus(); i++) {
+ if ((restart_needed = do_runq_scan(&cpu_to_processor(i)->runq)))
+ break;
+ }
+ }
+
+ /*
+ * Ok, we now have a collection of candidates -- fix them.
+ */
+
+ while (stuck_count > 0) {
+ thread = stuck_threads[--stuck_count];
+ stuck_threads[stuck_count] = THREAD_NULL;
+ s = splsched();
+ thread_lock(thread);
+ if ((thread->state & TH_SCHED_STATE) == TH_RUN) {
+ /*
+ * Do the priority update. Call
+ * thread_setrun because thread is
+ * off the run queues.
+ */
+ update_priority(thread);
+ thread_setrun(thread, TRUE);
+ }
+ thread_unlock(thread);
+ splx(s);
+ }
+ } while (restart_needed);
+}
+
+#if DEBUG
+void checkrq(
+ run_queue_t rq,
+ const char *msg)
+{
+ queue_t q1;
+ int i, j;
+ queue_entry_t e;
+ int low;
+
+ low = -1;
+ j = 0;
+ q1 = rq->runq;
+ for (i = 0; i < NRQS; i++) {
+ if (q1->next == q1) {
+ if (q1->prev != q1)
+ panic("checkrq: empty at %s", msg);
+ }
+ else {
+ if (low == -1)
+ low = i;
+
+ for (e = q1->next; e != q1; e = e->next) {
+ j++;
+ if (e->next->prev != e)
+ panic("checkrq-2 at %s", msg);
+ if (e->prev->next != e)
+ panic("checkrq-3 at %s", msg);
+ }
+ }
+ q1++;
+ }
+ if (j != rq->count)
+ panic("checkrq: count wrong at %s", msg);
+ if (rq->count != 0 && low < rq->low)
+ panic("checkrq: low wrong at %s", msg);
+}
+
+void thread_check(
+ thread_t th,
+ run_queue_t rq)
+{
+ unsigned int whichq;
+
+ whichq = th->sched_pri;
+ if (whichq >= NRQS) {
+ printf("thread_check: priority too high\n");
+ whichq = NRQS-1;
+ }
+ if ((th->links.next == &rq->runq[whichq]) &&
+ (rq->runq[whichq].prev != (queue_entry_t)th))
+ panic("thread_check");
+}
+#endif /* DEBUG */
diff --git a/kern/sched_prim.h b/kern/sched_prim.h
new file mode 100644
index 0000000..c250b22
--- /dev/null
+++ b/kern/sched_prim.h
@@ -0,0 +1,189 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: sched_prim.h
+ * Author: David Golub
+ *
+ * Scheduling primitive definitions file
+ *
+ */
+
+#ifndef _KERN_SCHED_PRIM_H_
+#define _KERN_SCHED_PRIM_H_
+
+#include <mach/boolean.h>
+#include <mach/message.h> /* for mach_msg_timeout_t */
+#include <kern/lock.h>
+#include <kern/kern_types.h> /* for thread_t */
+
+/*
+ * Possible results of assert_wait - returned in
+ * current_thread()->wait_result.
+ */
+#define THREAD_AWAKENED 0 /* normal wakeup */
+#define THREAD_TIMED_OUT 1 /* timeout expired */
+#define THREAD_INTERRUPTED 2 /* interrupted by clear_wait */
+#define THREAD_RESTART 3 /* restart operation entirely */
+
+typedef void *event_t; /* wait event */
+
+typedef void (*continuation_t)(void); /* continuation */
+
+#define thread_no_continuation ((continuation_t) 0) /* no continuation */
+
+/*
+ * Exported interface to sched_prim.c.
+ */
+
+extern void sched_init(void);
+
+extern void assert_wait(
+ event_t event,
+ boolean_t interruptible);
+extern void clear_wait(
+ thread_t thread,
+ int result,
+ boolean_t interrupt_only);
+extern void thread_sleep(
+ event_t event,
+ simple_lock_t lock,
+ boolean_t interruptible);
+extern void thread_wakeup(void); /* for function pointers */
+extern boolean_t thread_wakeup_prim(
+ event_t event,
+ boolean_t one_thread,
+ int result);
+extern boolean_t thread_invoke(
+ thread_t old_thread,
+ continuation_t continuation,
+ thread_t new_thread);
+extern void thread_block(
+ continuation_t continuation);
+extern void thread_run(
+ continuation_t continuation,
+ thread_t new_thread);
+extern void thread_set_timeout(
+ int t);
+extern void thread_setrun(
+ thread_t thread,
+ boolean_t may_preempt);
+extern void thread_dispatch(
+ thread_t thread);
+extern void thread_continue(
+ thread_t old_thread);
+extern void thread_go(
+ thread_t thread);
+extern void thread_will_wait(
+ thread_t thread);
+extern void thread_will_wait_with_timeout(
+ thread_t thread,
+ mach_msg_timeout_t msecs);
+extern boolean_t thread_handoff(
+ thread_t old_thread,
+ continuation_t continuation,
+ thread_t new_thread);
+extern void recompute_priorities(void *param);
+extern void update_priority(
+ thread_t thread);
+extern void compute_my_priority(
+ thread_t thread);
+extern void thread_bind(
+ thread_t thread,
+ processor_t processor);
+extern void compute_priority(
+ thread_t thread,
+ boolean_t resched);
+extern void thread_timeout_setup(
+ thread_t thread);
+
+/*
+ * Routines defined as macros
+ */
+
+#define thread_wakeup(x) \
+ thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
+#define thread_wakeup_with_result(x, z) \
+ thread_wakeup_prim((x), FALSE, (z))
+#define thread_wakeup_one(x) \
+ thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
+
+/*
+ * Machine-dependent code must define these functions.
+ */
+
+extern void thread_bootstrap_return(void) __attribute__((noreturn));
+extern void thread_exception_return(void) __attribute__((noreturn));
+extern void __attribute__((__noreturn__)) thread_syscall_return(kern_return_t);
+
+extern thread_t switch_context(
+ thread_t old_thread,
+ continuation_t continuation,
+ thread_t new_thread);
+extern void stack_handoff(
+ thread_t old_thread,
+ thread_t new_thread);
+
+/*
+ * These functions are either defined in kern/thread.c
+ * via machine-dependent stack_attach and stack_detach functions,
+ * or are defined directly by machine-dependent code.
+ */
+
+extern kern_return_t stack_alloc(
+ thread_t thread,
+ void (*resume)(thread_t));
+extern boolean_t stack_alloc_try(
+ thread_t thread,
+ void (*resume)(thread_t));
+extern void stack_free(
+ thread_t thread);
+
+/*
+ * Convert a timeout in milliseconds (mach_msg_timeout_t)
+ * to a timeout in ticks (for use by set_timeout).
+ * This conversion rounds UP so that small timeouts
+ * at least wait for one tick instead of not waiting at all.
+ */
+
+#define convert_ipc_timeout_to_ticks(millis) \
+ (((millis) * hz + 999) / 1000)
+
+void set_pri(thread_t th, int pri, boolean_t resched);
+void do_thread_scan(void);
+thread_t choose_pset_thread(processor_t myprocessor, processor_set_t pset);
+
+#if DEBUG
+#include <kern/sched.h> /* for run_queue_t */
+
+void checkrq(run_queue_t rq, const char *msg);
+void thread_check(thread_t th, run_queue_t rq);
+#endif /* DEBUG */
+
+extern void idle_thread(void) __attribute__((noreturn));
+extern void sched_thread(void);
+extern int stuck_count;
+
+#endif /* _KERN_SCHED_PRIM_H_ */
diff --git a/kern/shuttle.h b/kern/shuttle.h
new file mode 100644
index 0000000..0b1c2c5
--- /dev/null
+++ b/kern/shuttle.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: shuttle.h
+ * Author: Bryan Ford
+ *
+ * This file contains definitions for shuttles,
+ * which handle microscheduling for individual threads.
+ *
+ */
+
+#ifndef _KERN_SHUTTLE_H_
+#define _KERN_SHUTTLE_H_
+
+#include <kern/lock.h>
+
+
+
+struct Shuttle {
+ /* XXX must be first in thread */
+/*
+ * NOTE: The runq field in the thread structure has an unusual
+ * locking protocol. If its value is RUN_QUEUE_NULL, then it is
+ * locked by the thread_lock, but if its value is something else
+ * (i.e. a run_queue) then it is locked by that run_queue's lock.
+ */
+ queue_chain_t links; /* current run queue links */
+ run_queue_t runq; /* run queue p is on SEE BELOW */
+
+ /* Next pointer when on a queue */
+ struct Shuttle *next;
+
+ /* Micropriority level */
+ int priority;
+
+ /* General-purpose pointer field whose use depends on what the
+ thread happens to be doing */
+ void *message;
+
+ int foobar[1];
+};
+typedef struct Shuttle Shuttle;
+
+
+
+/* Exported functions */
+
+
+
+/* Machine-dependent code must define the following functions */
+
+
+
+#endif /* _KERN_SHUTTLE_H_ */
diff --git a/kern/slab.c b/kern/slab.c
new file mode 100644
index 0000000..dc44e42
--- /dev/null
+++ b/kern/slab.c
@@ -0,0 +1,1686 @@
+/*
+ * Copyright (c) 2011 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Object caching and general purpose memory allocator.
+ *
+ * This allocator is based on the paper "The Slab Allocator: An Object-Caching
+ * Kernel Memory Allocator" by Jeff Bonwick.
+ *
+ * It allows the allocation of objects (i.e. fixed-size typed buffers) from
+ * caches and is efficient in both space and time. This implementation follows
+ * many of the indications from the paper mentioned. The most notable
+ * differences are outlined below.
+ *
+ * The per-cache self-scaling hash table for buffer-to-bufctl conversion,
+ * described in 3.2.3 "Slab Layout for Large Objects", has been replaced by
+ * a red-black tree storing slabs, sorted by address. The use of a
+ * self-balancing tree for buffer-to-slab conversions provides a few advantages
+ * over a hash table. Unlike a hash table, a BST provides a "lookup nearest"
+ * operation, so obtaining the slab data (whether it is embedded in the slab or
+ * off slab) from a buffer address simply consists of a "lookup nearest towards
+ * 0" tree search. Finally, a self-balancing tree is a true self-scaling data
+ * structure, whereas a hash table requires periodic maintenance and complete
+ * resizing, which is expensive. The only drawback is that releasing a buffer
+ * to the slab layer takes logarithmic time instead of constant time.
+ *
+ * This implementation uses per-cpu pools of objects, which service most
+ * allocation requests. These pools act as caches (but are named differently
+ * to avoid confusion with CPU caches) that reduce contention on multiprocessor
+ * systems. When a pool is empty and cannot provide an object, it is filled by
+ * transferring multiple objects from the slab layer. The symmetric case is
+ * handled likewise.
+ */
+
+#include <string.h>
+#include <kern/assert.h>
+#include <kern/mach_clock.h>
+#include <kern/macros.h>
+#include <kern/printf.h>
+#include <kern/slab.h>
+#include <kern/kalloc.h>
+#include <kern/cpu_number.h>
+#include <kern/mach_debug.server.h>
+#include <mach/vm_param.h>
+#include <mach/machine/vm_types.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_types.h>
+#include <sys/types.h>
+
+#ifdef MACH_DEBUG
+#include <mach_debug/slab_info.h>
+#endif
+
+/*
+ * Utility macros.
+ */
+#define P2ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
+#define ISP2(x) P2ALIGNED(x, x)
+#define P2ALIGN(x, a) ((x) & -(a))
+#define P2ROUND(x, a) (-(-(x) & -(a)))
+#define P2END(x, a) (-(~(x) & -(a)))
+#define likely(expr) __builtin_expect(!!(expr), 1)
+#define unlikely(expr) __builtin_expect(!!(expr), 0)
+
+/*
+ * Minimum required alignment.
+ */
+#define KMEM_ALIGN_MIN 8
+
+/*
+ * Special buffer size under which slab data is unconditionnally allocated
+ * from its associated slab.
+ */
+#define KMEM_BUF_SIZE_THRESHOLD (PAGE_SIZE / 8)
+
+/*
+ * Time (in ticks) between two garbage collection operations.
+ */
+#define KMEM_GC_INTERVAL (5 * hz)
+
+/*
+ * The transfer size of a CPU pool is computed by dividing the pool size by
+ * this value.
+ */
+#define KMEM_CPU_POOL_TRANSFER_RATIO 2
+
+/*
+ * Redzone guard word.
+ */
+#ifdef __LP64__
+#if _HOST_BIG_ENDIAN
+#define KMEM_REDZONE_WORD 0xfeedfacefeedfaceUL
+#else /* _HOST_BIG_ENDIAN */
+#define KMEM_REDZONE_WORD 0xcefaedfecefaedfeUL
+#endif /* _HOST_BIG_ENDIAN */
+#else /* __LP64__ */
+#if _HOST_BIG_ENDIAN
+#define KMEM_REDZONE_WORD 0xfeedfaceUL
+#else /* _HOST_BIG_ENDIAN */
+#define KMEM_REDZONE_WORD 0xcefaedfeUL
+#endif /* _HOST_BIG_ENDIAN */
+#endif /* __LP64__ */
+
+/*
+ * Redzone byte for padding.
+ */
+#define KMEM_REDZONE_BYTE 0xbb
+
+/*
+ * Shift for the first kalloc cache size.
+ */
+#define KALLOC_FIRST_SHIFT 5
+
+/*
+ * Number of caches backing general purpose allocations.
+ */
+#define KALLOC_NR_CACHES 13
+
+/*
+ * Values the buftag state member can take.
+ */
+#ifdef __LP64__
+#if _HOST_BIG_ENDIAN
+#define KMEM_BUFTAG_ALLOC 0xa110c8eda110c8edUL
+#define KMEM_BUFTAG_FREE 0xf4eeb10cf4eeb10cUL
+#else /* _HOST_BIG_ENDIAN */
+#define KMEM_BUFTAG_ALLOC 0xedc810a1edc810a1UL
+#define KMEM_BUFTAG_FREE 0x0cb1eef40cb1eef4UL
+#endif /* _HOST_BIG_ENDIAN */
+#else /* __LP64__ */
+#if _HOST_BIG_ENDIAN
+#define KMEM_BUFTAG_ALLOC 0xa110c8edUL
+#define KMEM_BUFTAG_FREE 0xf4eeb10cUL
+#else /* _HOST_BIG_ENDIAN */
+#define KMEM_BUFTAG_ALLOC 0xedc810a1UL
+#define KMEM_BUFTAG_FREE 0x0cb1eef4UL
+#endif /* _HOST_BIG_ENDIAN */
+#endif /* __LP64__ */
+
+/*
+ * Free and uninitialized patterns.
+ *
+ * These values are unconditionnally 64-bit wide since buffers are at least
+ * 8-byte aligned.
+ */
+#if _HOST_BIG_ENDIAN
+#define KMEM_FREE_PATTERN 0xdeadbeefdeadbeefULL
+#define KMEM_UNINIT_PATTERN 0xbaddcafebaddcafeULL
+#else /* _HOST_BIG_ENDIAN */
+#define KMEM_FREE_PATTERN 0xefbeaddeefbeaddeULL
+#define KMEM_UNINIT_PATTERN 0xfecaddbafecaddbaULL
+#endif /* _HOST_BIG_ENDIAN */
+
+/*
+ * Cache flags.
+ *
+ * The flags don't change once set and can be tested without locking.
+ */
+#define KMEM_CF_SLAB_EXTERNAL 0x01 /* Slab data is off slab */
+#define KMEM_CF_PHYSMEM 0x02 /* Allocate from physical memory */
+#define KMEM_CF_DIRECT 0x04 /* Direct buf-to-slab translation
+ (implies !KMEM_CF_SLAB_EXTERNAL) */
+#define KMEM_CF_USE_TREE 0x08 /* Use red-black tree to track slab
+ data */
+#define KMEM_CF_USE_PAGE 0x10 /* Use page private data to track slab
+ data (implies KMEM_CF_SLAB_EXTERNAL
+ and KMEM_CF_PHYSMEM) */
+#define KMEM_CF_VERIFY 0x20 /* Debugging facilities enabled
+ (implies KMEM_CF_USE_TREE) */
+
+/*
+ * Options for kmem_cache_alloc_verify().
+ */
+#define KMEM_AV_NOCONSTRUCT 0
+#define KMEM_AV_CONSTRUCT 1
+
+/*
+ * Error codes for kmem_cache_error().
+ */
+#define KMEM_ERR_INVALID 0 /* Invalid address being freed */
+#define KMEM_ERR_DOUBLEFREE 1 /* Freeing already free address */
+#define KMEM_ERR_BUFTAG 2 /* Invalid buftag content */
+#define KMEM_ERR_MODIFIED 3 /* Buffer modified while free */
+#define KMEM_ERR_REDZONE 4 /* Redzone violation */
+
+#if SLAB_USE_CPU_POOLS
+/*
+ * Available CPU pool types.
+ *
+ * For each entry, the CPU pool size applies from the entry buf_size
+ * (excluded) up to (and including) the buf_size of the preceding entry.
+ *
+ * See struct kmem_cpu_pool_type for a description of the values.
+ */
+static struct kmem_cpu_pool_type kmem_cpu_pool_types[] = {
+ { 32768, 1, 0, NULL },
+ { 4096, 8, CPU_L1_SIZE, NULL },
+ { 256, 64, CPU_L1_SIZE, NULL },
+ { 0, 128, CPU_L1_SIZE, NULL }
+};
+
+/*
+ * Caches where CPU pool arrays are allocated from.
+ */
+static struct kmem_cache kmem_cpu_array_caches[ARRAY_SIZE(kmem_cpu_pool_types)];
+#endif /* SLAB_USE_CPU_POOLS */
+
+/*
+ * Cache for off slab data.
+ */
+static struct kmem_cache kmem_slab_cache;
+
+/*
+ * General purpose caches array.
+ */
+static struct kmem_cache kalloc_caches[KALLOC_NR_CACHES];
+
+/*
+ * List of all caches managed by the allocator.
+ */
+static struct list kmem_cache_list;
+static unsigned int kmem_nr_caches;
+static simple_lock_data_t __attribute__((used)) kmem_cache_list_lock;
+
+/*
+ * Time of the last memory reclaim, in clock ticks.
+ */
+static unsigned long kmem_gc_last_tick;
+
+#define kmem_error(format, ...) \
+ panic("mem: error: %s(): " format "\n", __func__, \
+ ## __VA_ARGS__)
+
+#define kmem_warn(format, ...) \
+ printf("mem: warning: %s(): " format "\n", __func__, \
+ ## __VA_ARGS__)
+
+#define kmem_print(format, ...) \
+ printf(format "\n", ## __VA_ARGS__)
+
+static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error,
+ void *arg);
+static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache);
+static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf);
+
+static void * kmem_buf_verify_bytes(void *buf, void *pattern, size_t size)
+{
+ char *ptr, *pattern_ptr, *end;
+
+ end = buf + size;
+
+ for (ptr = buf, pattern_ptr = pattern; ptr < end; ptr++, pattern_ptr++)
+ if (*ptr != *pattern_ptr)
+ return ptr;
+
+ return NULL;
+}
+
+static void * kmem_buf_verify(void *buf, uint64_t pattern, vm_size_t size)
+{
+ uint64_t *ptr, *end;
+
+ assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)));
+ assert(P2ALIGNED(size, sizeof(uint64_t)));
+
+ end = buf + size;
+
+ for (ptr = buf; ptr < end; ptr++)
+ if (*ptr != pattern)
+ return kmem_buf_verify_bytes(ptr, &pattern, sizeof(pattern));
+
+ return NULL;
+}
+
+static void kmem_buf_fill(void *buf, uint64_t pattern, size_t size)
+{
+ uint64_t *ptr, *end;
+
+ assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)));
+ assert(P2ALIGNED(size, sizeof(uint64_t)));
+
+ end = buf + size;
+
+ for (ptr = buf; ptr < end; ptr++)
+ *ptr = pattern;
+}
+
+static void * kmem_buf_verify_fill(void *buf, uint64_t old, uint64_t new,
+ size_t size)
+{
+ uint64_t *ptr, *end;
+
+ assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)));
+ assert(P2ALIGNED(size, sizeof(uint64_t)));
+
+ end = buf + size;
+
+ for (ptr = buf; ptr < end; ptr++) {
+ if (*ptr != old)
+ return kmem_buf_verify_bytes(ptr, &old, sizeof(old));
+
+ *ptr = new;
+ }
+
+ return NULL;
+}
+
+static inline union kmem_bufctl *
+kmem_buf_to_bufctl(void *buf, struct kmem_cache *cache)
+{
+ return (union kmem_bufctl *)(buf + cache->bufctl_dist);
+}
+
+static inline struct kmem_buftag *
+kmem_buf_to_buftag(void *buf, struct kmem_cache *cache)
+{
+ return (struct kmem_buftag *)(buf + cache->buftag_dist);
+}
+
+static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl,
+ struct kmem_cache *cache)
+{
+ return (void *)bufctl - cache->bufctl_dist;
+}
+
+static vm_offset_t
+kmem_pagealloc_physmem(vm_size_t size)
+{
+ struct vm_page *page;
+
+ assert(size == PAGE_SIZE);
+
+ for (;;) {
+ page = vm_page_grab(VM_PAGE_DIRECTMAP);
+
+ if (page != NULL)
+ break;
+
+ VM_PAGE_WAIT(NULL);
+ }
+
+ return phystokv(vm_page_to_pa(page));
+}
+
+static void
+kmem_pagefree_physmem(vm_offset_t addr, vm_size_t size)
+{
+ struct vm_page *page;
+
+ assert(size == PAGE_SIZE);
+ page = vm_page_lookup_pa(kvtophys(addr));
+ assert(page != NULL);
+ vm_page_release(page, FALSE, FALSE);
+}
+
+static vm_offset_t
+kmem_pagealloc_virtual(vm_size_t size, vm_size_t align)
+{
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ assert(size > PAGE_SIZE);
+ size = vm_page_round(size);
+
+ if (align <= PAGE_SIZE)
+ kr = kmem_alloc_wired(kernel_map, &addr, size);
+ else
+ kr = kmem_alloc_aligned(kernel_map, &addr, size);
+
+ if (kr != KERN_SUCCESS)
+ return 0;
+
+ return addr;
+}
+
+static void
+kmem_pagefree_virtual(vm_offset_t addr, vm_size_t size)
+{
+ if (addr < kernel_virtual_start || addr + size > kernel_virtual_end)
+ panic("kmem_pagefree_virtual(%lx-%lx) falls in physical memory area!\n",
+ (unsigned long) addr, (unsigned long) addr + size);
+ assert(size > PAGE_SIZE);
+ size = vm_page_round(size);
+ kmem_free(kernel_map, addr, size);
+}
+
+static vm_offset_t
+kmem_pagealloc(vm_size_t size, vm_size_t align, int flags)
+{
+ assert(align <= size);
+ return (flags & KMEM_CF_PHYSMEM)
+ ? kmem_pagealloc_physmem(size)
+ : kmem_pagealloc_virtual(size, align);
+}
+
+static void
+kmem_pagefree(vm_offset_t addr, vm_size_t size, int flags)
+{
+ return (flags & KMEM_CF_PHYSMEM)
+ ? kmem_pagefree_physmem(addr, size)
+ : kmem_pagefree_virtual(addr, size);
+}
+
+static void kmem_slab_create_verify(struct kmem_slab *slab,
+ struct kmem_cache *cache)
+{
+ struct kmem_buftag *buftag;
+ size_t buf_size;
+ unsigned long buffers;
+ void *buf;
+
+ buf_size = cache->buf_size;
+ buf = slab->addr;
+ buftag = kmem_buf_to_buftag(buf, cache);
+
+ for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
+ kmem_buf_fill(buf, KMEM_FREE_PATTERN, cache->bufctl_dist);
+ buftag->state = KMEM_BUFTAG_FREE;
+ buf += buf_size;
+ buftag = kmem_buf_to_buftag(buf, cache);
+ }
+}
+
+/*
+ * Create an empty slab for a cache.
+ *
+ * The caller must drop all locks before calling this function.
+ */
+static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
+ size_t color)
+{
+ struct kmem_slab *slab;
+ union kmem_bufctl *bufctl;
+ size_t buf_size;
+ unsigned long buffers;
+ vm_offset_t slab_buf;
+
+ slab_buf = kmem_pagealloc(cache->slab_size, cache->align, cache->flags);
+
+ if (slab_buf == 0)
+ return NULL;
+
+ if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
+ slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache);
+
+ if (slab == NULL) {
+ kmem_pagefree(slab_buf, cache->slab_size, cache->flags);
+ return NULL;
+ }
+
+ if (cache->flags & KMEM_CF_USE_PAGE) {
+ struct vm_page *page;
+
+ page = vm_page_lookup_pa(kvtophys(slab_buf));
+ assert(page != NULL);
+ vm_page_set_priv(page, slab);
+ }
+ } else {
+ slab = (struct kmem_slab *)(slab_buf + cache->slab_size) - 1;
+ }
+
+ slab->cache = cache;
+ list_node_init(&slab->list_node);
+ rbtree_node_init(&slab->tree_node);
+ slab->nr_refs = 0;
+ slab->first_free = NULL;
+ slab->addr = (void *)(slab_buf + color);
+
+ buf_size = cache->buf_size;
+ bufctl = kmem_buf_to_bufctl(slab->addr, cache);
+
+ for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
+ bufctl->next = slab->first_free;
+ slab->first_free = bufctl;
+ bufctl = (union kmem_bufctl *)((void *)bufctl + buf_size);
+ }
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ kmem_slab_create_verify(slab, cache);
+
+ return slab;
+}
+
+static void kmem_slab_destroy_verify(struct kmem_slab *slab,
+ struct kmem_cache *cache)
+{
+ struct kmem_buftag *buftag;
+ size_t buf_size;
+ unsigned long buffers;
+ void *buf, *addr;
+
+ buf_size = cache->buf_size;
+ buf = slab->addr;
+ buftag = kmem_buf_to_buftag(buf, cache);
+
+ for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
+ if (buftag->state != KMEM_BUFTAG_FREE)
+ kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag);
+
+ addr = kmem_buf_verify(buf, KMEM_FREE_PATTERN, cache->bufctl_dist);
+
+ if (addr != NULL)
+ kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED, addr);
+
+ buf += buf_size;
+ buftag = kmem_buf_to_buftag(buf, cache);
+ }
+}
+
+/*
+ * Destroy a slab.
+ *
+ * The caller must drop all locks before calling this function.
+ */
+static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache)
+{
+ vm_offset_t slab_buf;
+
+ assert(slab->nr_refs == 0);
+ assert(slab->first_free != NULL);
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ kmem_slab_destroy_verify(slab, cache);
+
+ slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE);
+
+ if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
+ if (cache->flags & KMEM_CF_USE_PAGE) {
+ struct vm_page *page;
+
+ /* Not strictly needed, but let's increase safety */
+ page = vm_page_lookup_pa(kvtophys(slab_buf));
+ assert(page != NULL);
+ vm_page_set_priv(page, NULL);
+ }
+
+ kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab);
+ }
+
+ kmem_pagefree(slab_buf, cache->slab_size, cache->flags);
+}
+
+static inline int kmem_slab_cmp_lookup(const void *addr,
+ const struct rbtree_node *node)
+{
+ struct kmem_slab *slab;
+
+ slab = rbtree_entry(node, struct kmem_slab, tree_node);
+
+ if (addr == slab->addr)
+ return 0;
+ else if (addr < slab->addr)
+ return -1;
+ else
+ return 1;
+}
+
+static inline int kmem_slab_cmp_insert(const struct rbtree_node *a,
+ const struct rbtree_node *b)
+{
+ struct kmem_slab *slab;
+
+ slab = rbtree_entry(a, struct kmem_slab, tree_node);
+ return kmem_slab_cmp_lookup(slab->addr, b);
+}
+
+#if SLAB_USE_CPU_POOLS
+static void kmem_cpu_pool_init(struct kmem_cpu_pool *cpu_pool,
+ struct kmem_cache *cache)
+{
+ simple_lock_init(&cpu_pool->lock);
+ cpu_pool->flags = cache->flags;
+ cpu_pool->size = 0;
+ cpu_pool->transfer_size = 0;
+ cpu_pool->nr_objs = 0;
+ cpu_pool->array = NULL;
+}
+
+/*
+ * Return a CPU pool.
+ *
+ * This function will generally return the pool matching the CPU running the
+ * calling thread. Because of context switches and thread migration, the
+ * caller might be running on another processor after this function returns.
+ * Although not optimal, this should rarely happen, and it doesn't affect the
+ * allocator operations in any other way, as CPU pools are always valid, and
+ * their access is serialized by a lock.
+ */
+static inline struct kmem_cpu_pool * kmem_cpu_pool_get(struct kmem_cache *cache)
+{
+ return &cache->cpu_pools[cpu_number()];
+}
+
+static inline void kmem_cpu_pool_build(struct kmem_cpu_pool *cpu_pool,
+ struct kmem_cache *cache, void **array)
+{
+ cpu_pool->size = cache->cpu_pool_type->array_size;
+ cpu_pool->transfer_size = (cpu_pool->size
+ + KMEM_CPU_POOL_TRANSFER_RATIO - 1)
+ / KMEM_CPU_POOL_TRANSFER_RATIO;
+ cpu_pool->array = array;
+}
+
+static inline void * kmem_cpu_pool_pop(struct kmem_cpu_pool *cpu_pool)
+{
+ cpu_pool->nr_objs--;
+ return cpu_pool->array[cpu_pool->nr_objs];
+}
+
+static inline void kmem_cpu_pool_push(struct kmem_cpu_pool *cpu_pool, void *obj)
+{
+ cpu_pool->array[cpu_pool->nr_objs] = obj;
+ cpu_pool->nr_objs++;
+}
+
+static int kmem_cpu_pool_fill(struct kmem_cpu_pool *cpu_pool,
+ struct kmem_cache *cache)
+{
+ kmem_cache_ctor_t ctor;
+ void *buf;
+ int i;
+
+ ctor = (cpu_pool->flags & KMEM_CF_VERIFY) ? NULL : cache->ctor;
+
+ simple_lock(&cache->lock);
+
+ for (i = 0; i < cpu_pool->transfer_size; i++) {
+ buf = kmem_cache_alloc_from_slab(cache);
+
+ if (buf == NULL)
+ break;
+
+ if (ctor != NULL)
+ ctor(buf);
+
+ kmem_cpu_pool_push(cpu_pool, buf);
+ }
+
+ simple_unlock(&cache->lock);
+
+ return i;
+}
+
+static void kmem_cpu_pool_drain(struct kmem_cpu_pool *cpu_pool,
+ struct kmem_cache *cache)
+{
+ void *obj;
+ int i;
+
+ simple_lock(&cache->lock);
+
+ for (i = cpu_pool->transfer_size; i > 0; i--) {
+ obj = kmem_cpu_pool_pop(cpu_pool);
+ kmem_cache_free_to_slab(cache, obj);
+ }
+
+ simple_unlock(&cache->lock);
+}
+#endif /* SLAB_USE_CPU_POOLS */
+
+static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error,
+ void *arg)
+{
+ struct kmem_buftag *buftag;
+
+ kmem_warn("cache: %s, buffer: %p", cache->name, (void *)buf);
+
+ switch(error) {
+ case KMEM_ERR_INVALID:
+ kmem_error("freeing invalid address");
+ break;
+ case KMEM_ERR_DOUBLEFREE:
+ kmem_error("attempting to free the same address twice");
+ break;
+ case KMEM_ERR_BUFTAG:
+ buftag = arg;
+ kmem_error("invalid buftag content, buftag state: %p",
+ (void *)buftag->state);
+ break;
+ case KMEM_ERR_MODIFIED:
+ kmem_error("free buffer modified, fault address: %p, "
+ "offset in buffer: %td", arg, arg - buf);
+ break;
+ case KMEM_ERR_REDZONE:
+ kmem_error("write beyond end of buffer, fault address: %p, "
+ "offset in buffer: %td", arg, arg - buf);
+ break;
+ default:
+ kmem_error("unknown error");
+ }
+
+ /*
+ * Never reached.
+ */
+}
+
+/*
+ * Compute properties such as slab size for the given cache.
+ *
+ * Once the slab size is known, this function sets the related properties
+ * (buffers per slab and maximum color). It can also set some KMEM_CF_xxx
+ * flags depending on the resulting layout.
+ */
+static void kmem_cache_compute_properties(struct kmem_cache *cache, int flags)
+{
+ size_t size, waste;
+ int embed;
+
+ if (cache->buf_size < KMEM_BUF_SIZE_THRESHOLD)
+ flags |= KMEM_CACHE_NOOFFSLAB;
+
+ cache->slab_size = PAGE_SIZE;
+
+ for (;;) {
+ if (flags & KMEM_CACHE_NOOFFSLAB)
+ embed = 1;
+ else {
+ waste = cache->slab_size % cache->buf_size;
+ embed = (sizeof(struct kmem_slab) <= waste);
+ }
+
+ size = cache->slab_size;
+
+ if (embed)
+ size -= sizeof(struct kmem_slab);
+
+ if (size >= cache->buf_size)
+ break;
+
+ cache->slab_size += PAGE_SIZE;
+ }
+
+ cache->bufs_per_slab = size / cache->buf_size;
+ cache->color_max = size % cache->buf_size;
+
+ if (cache->color_max >= PAGE_SIZE)
+ cache->color_max = 0;
+
+ if (!embed)
+ cache->flags |= KMEM_CF_SLAB_EXTERNAL;
+
+ if ((flags & KMEM_CACHE_PHYSMEM) || (cache->slab_size == PAGE_SIZE)) {
+ cache->flags |= KMEM_CF_PHYSMEM;
+
+ /*
+ * Avoid using larger-than-page slabs backed by the direct physical
+ * mapping to completely prevent physical memory fragmentation from
+ * making slab allocations fail.
+ */
+ if (cache->slab_size != PAGE_SIZE)
+ panic("slab: invalid cache parameters");
+ }
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ cache->flags |= KMEM_CF_USE_TREE;
+
+ if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
+ if (cache->flags & KMEM_CF_PHYSMEM)
+ cache->flags |= KMEM_CF_USE_PAGE;
+ else
+ cache->flags |= KMEM_CF_USE_TREE;
+ } else {
+ if (cache->slab_size == PAGE_SIZE)
+ cache->flags |= KMEM_CF_DIRECT;
+ else
+ cache->flags |= KMEM_CF_USE_TREE;
+ }
+}
+
+void kmem_cache_init(struct kmem_cache *cache, const char *name,
+ size_t obj_size, size_t align,
+ kmem_cache_ctor_t ctor, int flags)
+{
+#if SLAB_USE_CPU_POOLS
+ struct kmem_cpu_pool_type *cpu_pool_type;
+ size_t i;
+#endif /* SLAB_USE_CPU_POOLS */
+ size_t buf_size;
+
+ cache->flags = 0;
+#if SLAB_VERIFY
+ if (obj_size < PAGE_SIZE - sizeof(union kmem_bufctl) + sizeof(struct kmem_buftag))
+ cache->flags |= KMEM_CF_VERIFY;
+#endif /* SLAB_VERIFY */
+
+ if (flags & KMEM_CACHE_VERIFY)
+ cache->flags |= KMEM_CF_VERIFY;
+
+ if (align < KMEM_ALIGN_MIN)
+ align = KMEM_ALIGN_MIN;
+
+ assert(obj_size > 0);
+ assert(ISP2(align));
+
+ buf_size = P2ROUND(obj_size, align);
+
+ simple_lock_init(&cache->lock);
+ list_node_init(&cache->node);
+ list_init(&cache->partial_slabs);
+ list_init(&cache->free_slabs);
+ rbtree_init(&cache->active_slabs);
+ cache->obj_size = obj_size;
+ cache->align = align;
+ cache->buf_size = buf_size;
+ cache->bufctl_dist = buf_size - sizeof(union kmem_bufctl);
+ cache->color = 0;
+ cache->nr_objs = 0;
+ cache->nr_bufs = 0;
+ cache->nr_slabs = 0;
+ cache->nr_free_slabs = 0;
+ cache->ctor = ctor;
+ strncpy(cache->name, name, sizeof(cache->name));
+ cache->name[sizeof(cache->name) - 1] = '\0';
+ cache->buftag_dist = 0;
+ cache->redzone_pad = 0;
+
+ if (cache->flags & KMEM_CF_VERIFY) {
+ cache->bufctl_dist = buf_size;
+ cache->buftag_dist = cache->bufctl_dist + sizeof(union kmem_bufctl);
+ cache->redzone_pad = cache->bufctl_dist - cache->obj_size;
+ buf_size += sizeof(union kmem_bufctl) + sizeof(struct kmem_buftag);
+ buf_size = P2ROUND(buf_size, align);
+ cache->buf_size = buf_size;
+ }
+
+ kmem_cache_compute_properties(cache, flags);
+
+#if SLAB_USE_CPU_POOLS
+ for (cpu_pool_type = kmem_cpu_pool_types;
+ buf_size <= cpu_pool_type->buf_size;
+ cpu_pool_type++);
+
+ cache->cpu_pool_type = cpu_pool_type;
+
+ for (i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++)
+ kmem_cpu_pool_init(&cache->cpu_pools[i], cache);
+#endif /* SLAB_USE_CPU_POOLS */
+
+ simple_lock(&kmem_cache_list_lock);
+ list_insert_tail(&kmem_cache_list, &cache->node);
+ kmem_nr_caches++;
+ simple_unlock(&kmem_cache_list_lock);
+}
+
+static inline int kmem_cache_empty(struct kmem_cache *cache)
+{
+ return cache->nr_objs == cache->nr_bufs;
+}
+
+static int kmem_cache_grow(struct kmem_cache *cache)
+{
+ struct kmem_slab *slab;
+ size_t color;
+ int empty;
+
+ simple_lock(&cache->lock);
+
+ if (!kmem_cache_empty(cache)) {
+ simple_unlock(&cache->lock);
+ return 1;
+ }
+
+ color = cache->color;
+ cache->color += cache->align;
+
+ if (cache->color > cache->color_max)
+ cache->color = 0;
+
+ simple_unlock(&cache->lock);
+
+ slab = kmem_slab_create(cache, color);
+
+ simple_lock(&cache->lock);
+
+ if (slab != NULL) {
+ list_insert_head(&cache->free_slabs, &slab->list_node);
+ cache->nr_bufs += cache->bufs_per_slab;
+ cache->nr_slabs++;
+ cache->nr_free_slabs++;
+ }
+
+ /*
+ * Even if our slab creation failed, another thread might have succeeded
+ * in growing the cache.
+ */
+ empty = kmem_cache_empty(cache);
+
+ simple_unlock(&cache->lock);
+
+ return !empty;
+}
+
+static void kmem_cache_reap(struct kmem_cache *cache, struct list *dead_slabs)
+{
+ simple_lock(&cache->lock);
+
+ list_concat(dead_slabs, &cache->free_slabs);
+ list_init(&cache->free_slabs);
+ cache->nr_bufs -= cache->bufs_per_slab * cache->nr_free_slabs;
+ cache->nr_slabs -= cache->nr_free_slabs;
+ cache->nr_free_slabs = 0;
+
+ simple_unlock(&cache->lock);
+}
+
+/*
+ * Allocate a raw (unconstructed) buffer from the slab layer of a cache.
+ *
+ * The cache must be locked before calling this function.
+ */
+static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache)
+{
+ struct kmem_slab *slab;
+ union kmem_bufctl *bufctl;
+
+ if (!list_empty(&cache->partial_slabs))
+ slab = list_first_entry(&cache->partial_slabs, struct kmem_slab,
+ list_node);
+ else if (!list_empty(&cache->free_slabs))
+ slab = list_first_entry(&cache->free_slabs, struct kmem_slab,
+ list_node);
+ else
+ return NULL;
+
+ bufctl = slab->first_free;
+ assert(bufctl != NULL);
+ slab->first_free = bufctl->next;
+ slab->nr_refs++;
+ cache->nr_objs++;
+
+ if (slab->nr_refs == cache->bufs_per_slab) {
+ /* The slab has become complete */
+ list_remove(&slab->list_node);
+
+ if (slab->nr_refs == 1)
+ cache->nr_free_slabs--;
+ } else if (slab->nr_refs == 1) {
+ /*
+ * The slab has become partial. Insert the new slab at the end of
+ * the list to reduce fragmentation.
+ */
+ list_remove(&slab->list_node);
+ list_insert_tail(&cache->partial_slabs, &slab->list_node);
+ cache->nr_free_slabs--;
+ }
+
+ if ((slab->nr_refs == 1) && (cache->flags & KMEM_CF_USE_TREE))
+ rbtree_insert(&cache->active_slabs, &slab->tree_node,
+ kmem_slab_cmp_insert);
+
+ return kmem_bufctl_to_buf(bufctl, cache);
+}
+
+/*
+ * Release a buffer to the slab layer of a cache.
+ *
+ * The cache must be locked before calling this function.
+ */
+static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
+{
+ struct kmem_slab *slab;
+ union kmem_bufctl *bufctl;
+
+ if (cache->flags & KMEM_CF_DIRECT) {
+ assert(cache->slab_size == PAGE_SIZE);
+ slab = (struct kmem_slab *)P2END((unsigned long)buf, cache->slab_size)
+ - 1;
+ } else if (cache->flags & KMEM_CF_USE_PAGE) {
+ struct vm_page *page;
+
+ page = vm_page_lookup_pa(kvtophys((vm_offset_t)buf));
+ assert(page != NULL);
+ slab = vm_page_get_priv(page);
+ } else {
+ struct rbtree_node *node;
+
+ assert(cache->flags & KMEM_CF_USE_TREE);
+ node = rbtree_lookup_nearest(&cache->active_slabs, buf,
+ kmem_slab_cmp_lookup, RBTREE_LEFT);
+ assert(node != NULL);
+ slab = rbtree_entry(node, struct kmem_slab, tree_node);
+ }
+
+ assert((unsigned long)buf >= (unsigned long)slab->addr);
+ assert(((unsigned long)buf + cache->buf_size)
+ <= vm_page_trunc((unsigned long)slab->addr + cache->slab_size));
+
+ assert(slab->nr_refs >= 1);
+ assert(slab->nr_refs <= cache->bufs_per_slab);
+ bufctl = kmem_buf_to_bufctl(buf, cache);
+ bufctl->next = slab->first_free;
+ slab->first_free = bufctl;
+ slab->nr_refs--;
+ cache->nr_objs--;
+
+ if (slab->nr_refs == 0) {
+ /* The slab has become free */
+
+ if (cache->flags & KMEM_CF_USE_TREE)
+ rbtree_remove(&cache->active_slabs, &slab->tree_node);
+
+ if (cache->bufs_per_slab > 1)
+ list_remove(&slab->list_node);
+
+ list_insert_head(&cache->free_slabs, &slab->list_node);
+ cache->nr_free_slabs++;
+ } else if (slab->nr_refs == (cache->bufs_per_slab - 1)) {
+ /* The slab has become partial */
+ list_insert_head(&cache->partial_slabs, &slab->list_node);
+ }
+}
+
+static void kmem_cache_alloc_verify(struct kmem_cache *cache, void *buf,
+ int construct)
+{
+ struct kmem_buftag *buftag;
+ union kmem_bufctl *bufctl;
+ void *addr;
+
+ buftag = kmem_buf_to_buftag(buf, cache);
+
+ if (buftag->state != KMEM_BUFTAG_FREE)
+ kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag);
+
+ addr = kmem_buf_verify_fill(buf, KMEM_FREE_PATTERN, KMEM_UNINIT_PATTERN,
+ cache->bufctl_dist);
+
+ if (addr != NULL)
+ kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED, addr);
+
+ addr = buf + cache->obj_size;
+ memset(addr, KMEM_REDZONE_BYTE, cache->redzone_pad);
+
+ bufctl = kmem_buf_to_bufctl(buf, cache);
+ bufctl->redzone = KMEM_REDZONE_WORD;
+ buftag->state = KMEM_BUFTAG_ALLOC;
+
+ if (construct && (cache->ctor != NULL))
+ cache->ctor(buf);
+}
+
+vm_offset_t kmem_cache_alloc(struct kmem_cache *cache)
+{
+ int filled;
+ void *buf;
+
+#if SLAB_USE_CPU_POOLS
+ struct kmem_cpu_pool *cpu_pool;
+
+ cpu_pool = kmem_cpu_pool_get(cache);
+
+ if (cpu_pool->flags & KMEM_CF_NO_CPU_POOL)
+ goto slab_alloc;
+
+ simple_lock(&cpu_pool->lock);
+
+fast_alloc:
+ if (likely(cpu_pool->nr_objs > 0)) {
+ buf = kmem_cpu_pool_pop(cpu_pool);
+ simple_unlock(&cpu_pool->lock);
+
+ if (cpu_pool->flags & KMEM_CF_VERIFY)
+ kmem_cache_alloc_verify(cache, buf, KMEM_AV_CONSTRUCT);
+
+ return (vm_offset_t)buf;
+ }
+
+ if (cpu_pool->array != NULL) {
+ filled = kmem_cpu_pool_fill(cpu_pool, cache);
+
+ if (!filled) {
+ simple_unlock(&cpu_pool->lock);
+
+ filled = kmem_cache_grow(cache);
+
+ if (!filled)
+ return 0;
+
+ simple_lock(&cpu_pool->lock);
+ }
+
+ goto fast_alloc;
+ }
+
+ simple_unlock(&cpu_pool->lock);
+#endif /* SLAB_USE_CPU_POOLS */
+
+slab_alloc:
+ simple_lock(&cache->lock);
+ buf = kmem_cache_alloc_from_slab(cache);
+ simple_unlock(&cache->lock);
+
+ if (buf == NULL) {
+ filled = kmem_cache_grow(cache);
+
+ if (!filled)
+ return 0;
+
+ goto slab_alloc;
+ }
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ kmem_cache_alloc_verify(cache, buf, KMEM_AV_NOCONSTRUCT);
+
+ if (cache->ctor != NULL)
+ cache->ctor(buf);
+
+ return (vm_offset_t)buf;
+}
+
+static void kmem_cache_free_verify(struct kmem_cache *cache, void *buf)
+{
+ struct rbtree_node *node;
+ struct kmem_buftag *buftag;
+ struct kmem_slab *slab;
+ union kmem_bufctl *bufctl;
+ unsigned char *redzone_byte;
+ unsigned long slabend;
+
+ assert(cache->flags & KMEM_CF_USE_TREE);
+
+ simple_lock(&cache->lock);
+ node = rbtree_lookup_nearest(&cache->active_slabs, buf,
+ kmem_slab_cmp_lookup, RBTREE_LEFT);
+ simple_unlock(&cache->lock);
+
+ if (node == NULL)
+ kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
+
+ slab = rbtree_entry(node, struct kmem_slab, tree_node);
+ slabend = P2ALIGN((unsigned long)slab->addr + cache->slab_size, PAGE_SIZE);
+
+ if ((unsigned long)buf >= slabend)
+ kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
+
+ if ((((unsigned long)buf - (unsigned long)slab->addr) % cache->buf_size)
+ != 0)
+ kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
+
+ /*
+ * As the buffer address is valid, accessing its buftag is safe.
+ */
+ buftag = kmem_buf_to_buftag(buf, cache);
+
+ if (buftag->state != KMEM_BUFTAG_ALLOC) {
+ if (buftag->state == KMEM_BUFTAG_FREE)
+ kmem_cache_error(cache, buf, KMEM_ERR_DOUBLEFREE, NULL);
+ else
+ kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag);
+ }
+
+ redzone_byte = buf + cache->obj_size;
+ bufctl = kmem_buf_to_bufctl(buf, cache);
+
+ while (redzone_byte < (unsigned char *)bufctl) {
+ if (*redzone_byte != KMEM_REDZONE_BYTE)
+ kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
+
+ redzone_byte++;
+ }
+
+ if (bufctl->redzone != KMEM_REDZONE_WORD) {
+ unsigned long word;
+
+ word = KMEM_REDZONE_WORD;
+ redzone_byte = kmem_buf_verify_bytes(&bufctl->redzone, &word,
+ sizeof(bufctl->redzone));
+ kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
+ }
+
+ kmem_buf_fill(buf, KMEM_FREE_PATTERN, cache->bufctl_dist);
+ buftag->state = KMEM_BUFTAG_FREE;
+}
+
+void kmem_cache_free(struct kmem_cache *cache, vm_offset_t obj)
+{
+#if SLAB_USE_CPU_POOLS
+ struct kmem_cpu_pool *cpu_pool;
+ void **array;
+
+ cpu_pool = kmem_cpu_pool_get(cache);
+
+ if (cpu_pool->flags & KMEM_CF_VERIFY) {
+#else /* SLAB_USE_CPU_POOLS */
+ if (cache->flags & KMEM_CF_VERIFY) {
+#endif /* SLAB_USE_CPU_POOLS */
+ kmem_cache_free_verify(cache, (void *)obj);
+ }
+
+#if SLAB_USE_CPU_POOLS
+ if (cpu_pool->flags & KMEM_CF_NO_CPU_POOL)
+ goto slab_free;
+
+ simple_lock(&cpu_pool->lock);
+
+fast_free:
+ if (likely(cpu_pool->nr_objs < cpu_pool->size)) {
+ kmem_cpu_pool_push(cpu_pool, (void *)obj);
+ simple_unlock(&cpu_pool->lock);
+ return;
+ }
+
+ if (cpu_pool->array != NULL) {
+ kmem_cpu_pool_drain(cpu_pool, cache);
+ goto fast_free;
+ }
+
+ simple_unlock(&cpu_pool->lock);
+
+ array = (void *)kmem_cache_alloc(cache->cpu_pool_type->array_cache);
+
+ if (array != NULL) {
+ simple_lock(&cpu_pool->lock);
+
+ /*
+ * Another thread may have built the CPU pool while the lock was
+ * dropped.
+ */
+ if (cpu_pool->array != NULL) {
+ simple_unlock(&cpu_pool->lock);
+ kmem_cache_free(cache->cpu_pool_type->array_cache,
+ (vm_offset_t)array);
+ simple_lock(&cpu_pool->lock);
+ goto fast_free;
+ }
+
+ kmem_cpu_pool_build(cpu_pool, cache, array);
+ goto fast_free;
+ }
+
+slab_free:
+#endif /* SLAB_USE_CPU_POOLS */
+
+ simple_lock(&cache->lock);
+ kmem_cache_free_to_slab(cache, (void *)obj);
+ simple_unlock(&cache->lock);
+}
+
+void slab_collect(void)
+{
+ struct kmem_cache *cache;
+ struct kmem_slab *slab;
+ struct list dead_slabs;
+
+ if (elapsed_ticks <= (kmem_gc_last_tick + KMEM_GC_INTERVAL))
+ return;
+
+ kmem_gc_last_tick = elapsed_ticks;
+
+ list_init(&dead_slabs);
+
+ simple_lock(&kmem_cache_list_lock);
+
+ list_for_each_entry(&kmem_cache_list, cache, node)
+ kmem_cache_reap(cache, &dead_slabs);
+
+ simple_unlock(&kmem_cache_list_lock);
+
+ while (!list_empty(&dead_slabs)) {
+ slab = list_first_entry(&dead_slabs, struct kmem_slab, list_node);
+ list_remove(&slab->list_node);
+ kmem_slab_destroy(slab, slab->cache);
+ }
+}
+
+void slab_bootstrap(void)
+{
+ /* Make sure a bufctl can always be stored in a buffer */
+ assert(sizeof(union kmem_bufctl) <= KMEM_ALIGN_MIN);
+
+ list_init(&kmem_cache_list);
+ simple_lock_init(&kmem_cache_list_lock);
+}
+
+void slab_init(void)
+{
+#if SLAB_USE_CPU_POOLS
+ struct kmem_cpu_pool_type *cpu_pool_type;
+ char name[KMEM_CACHE_NAME_SIZE];
+ size_t i, size;
+#endif /* SLAB_USE_CPU_POOLS */
+
+#if SLAB_USE_CPU_POOLS
+ for (i = 0; i < ARRAY_SIZE(kmem_cpu_pool_types); i++) {
+ cpu_pool_type = &kmem_cpu_pool_types[i];
+ cpu_pool_type->array_cache = &kmem_cpu_array_caches[i];
+ sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size);
+ size = sizeof(void *) * cpu_pool_type->array_size;
+ kmem_cache_init(cpu_pool_type->array_cache, name, size,
+ cpu_pool_type->array_align, NULL, 0);
+ }
+#endif /* SLAB_USE_CPU_POOLS */
+
+ /*
+ * Prevent off slab data for the slab cache to avoid infinite recursion.
+ */
+ kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab),
+ 0, NULL, KMEM_CACHE_NOOFFSLAB);
+}
+
+void kalloc_init(void)
+{
+ char name[KMEM_CACHE_NAME_SIZE];
+ size_t i, size;
+
+ size = 1 << KALLOC_FIRST_SHIFT;
+
+ for (i = 0; i < ARRAY_SIZE(kalloc_caches); i++) {
+ sprintf(name, "kalloc_%lu", size);
+ kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL, 0);
+ size <<= 1;
+ }
+}
+
+/*
+ * Return the kalloc cache index matching the given allocation size, which
+ * must be strictly greater than 0.
+ */
+static inline size_t kalloc_get_index(unsigned long size)
+{
+ assert(size != 0);
+
+ size = (size - 1) >> KALLOC_FIRST_SHIFT;
+
+ if (size == 0)
+ return 0;
+ else
+ return (sizeof(long) * 8) - __builtin_clzl(size);
+}
+
+static void kalloc_verify(struct kmem_cache *cache, void *buf, size_t size)
+{
+ size_t redzone_size;
+ void *redzone;
+
+ assert(size <= cache->obj_size);
+
+ redzone = buf + size;
+ redzone_size = cache->obj_size - size;
+ memset(redzone, KMEM_REDZONE_BYTE, redzone_size);
+}
+
+vm_offset_t kalloc(vm_size_t size)
+{
+ size_t index;
+ void *buf;
+
+ if (size == 0)
+ return 0;
+
+ index = kalloc_get_index(size);
+
+ if (index < ARRAY_SIZE(kalloc_caches)) {
+ struct kmem_cache *cache;
+
+ cache = &kalloc_caches[index];
+ buf = (void *)kmem_cache_alloc(cache);
+
+ if ((buf != 0) && (cache->flags & KMEM_CF_VERIFY))
+ kalloc_verify(cache, buf, size);
+ } else if (size <= PAGE_SIZE) {
+ buf = (void *)kmem_pagealloc_physmem(PAGE_SIZE);
+ } else {
+ buf = (void *)kmem_pagealloc_virtual(size, 0);
+ }
+
+ return (vm_offset_t)buf;
+}
+
+static void kfree_verify(struct kmem_cache *cache, void *buf, size_t size)
+{
+ unsigned char *redzone_byte, *redzone_end;
+
+ assert(size <= cache->obj_size);
+
+ redzone_byte = buf + size;
+ redzone_end = buf + cache->obj_size;
+
+ while (redzone_byte < redzone_end) {
+ if (*redzone_byte != KMEM_REDZONE_BYTE)
+ kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
+
+ redzone_byte++;
+ }
+}
+
+void kfree(vm_offset_t data, vm_size_t size)
+{
+ size_t index;
+
+ if ((data == 0) || (size == 0))
+ return;
+
+ index = kalloc_get_index(size);
+
+ if (index < ARRAY_SIZE(kalloc_caches)) {
+ struct kmem_cache *cache;
+
+ cache = &kalloc_caches[index];
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ kfree_verify(cache, (void *)data, size);
+
+ kmem_cache_free(cache, data);
+ } else if (size <= PAGE_SIZE) {
+ kmem_pagefree_physmem(data, PAGE_SIZE);
+ } else {
+ kmem_pagefree_virtual(data, size);
+ }
+}
+
+static void _slab_info(int (printx)(const char *fmt, ...))
+{
+ struct kmem_cache *cache;
+ vm_size_t mem_usage, mem_reclaimable, mem_total, mem_total_reclaimable;
+
+ mem_total = 0;
+ mem_total_reclaimable = 0;
+
+ printx("cache obj slab bufs objs bufs"
+ " total reclaimable\n"
+ "name flags size size /slab usage count"
+ " memory memory\n");
+
+ simple_lock(&kmem_cache_list_lock);
+
+ list_for_each_entry(&kmem_cache_list, cache, node) {
+ simple_lock(&cache->lock);
+
+ mem_usage = (cache->nr_slabs * cache->slab_size) >> 10;
+ mem_reclaimable = (cache->nr_free_slabs * cache->slab_size) >> 10;
+
+ printx("%-20s %04x %7lu %3luk %4lu %6lu %6lu %7uk %10uk\n",
+ cache->name, cache->flags, cache->obj_size,
+ cache->slab_size >> 10,
+ cache->bufs_per_slab, cache->nr_objs, cache->nr_bufs,
+ mem_usage, mem_reclaimable);
+
+ simple_unlock(&cache->lock);
+
+ mem_total += mem_usage;
+ mem_total_reclaimable += mem_reclaimable;
+ }
+
+ simple_unlock(&kmem_cache_list_lock);
+
+ printx("total: %uk, reclaimable: %uk\n",
+ mem_total, mem_total_reclaimable);
+}
+
+void slab_info(void)
+{
+ _slab_info(printf);
+}
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+
+void db_show_slab_info(void)
+{
+ _slab_info(db_printf);
+}
+
+void db_whatis_slab(vm_offset_t a)
+{
+ struct kmem_cache *cache;
+ int done = 0;
+
+#ifndef SLAB_VERIFY
+ db_printf("enabling SLAB_VERIFY is recommended\n");
+#endif
+
+ simple_lock(&kmem_cache_list_lock);
+
+ list_for_each_entry(&kmem_cache_list, cache, node) {
+ if (a >= (vm_offset_t) cache
+ && a < (vm_offset_t) cache + sizeof(*cache))
+ db_printf("Cache %s\n", cache->name);
+
+ simple_lock(&cache->lock);
+
+ if (cache->flags & KMEM_CF_USE_TREE) {
+ struct rbtree_node *node;
+
+ node = rbtree_lookup_nearest(&cache->active_slabs, (void*) a,
+ kmem_slab_cmp_lookup, RBTREE_LEFT);
+ if (node) {
+ struct kmem_slab *slab;
+ slab = rbtree_entry(node, struct kmem_slab, tree_node);
+ if (a >= (vm_offset_t) slab->addr
+ && a < (vm_offset_t) slab->addr + cache->slab_size) {
+ db_printf("Allocated from cache %s\n", cache->name);
+ done = 1;
+ goto out_cache;
+ }
+ }
+ }
+
+ union kmem_bufctl *free;
+ struct kmem_slab *slab;
+
+ list_for_each_entry(&cache->partial_slabs, slab, list_node) {
+ if (a >= (vm_offset_t) slab->addr
+ && a < (vm_offset_t) slab->addr + cache->slab_size) {
+ db_printf("In cache %s\n", cache->name);
+
+ for (free = slab->first_free; free; free = free->next) {
+ void *buf = kmem_bufctl_to_buf(free, cache);
+
+ if (a >= (vm_offset_t) buf
+ && a < (vm_offset_t) buf + cache->buf_size) {
+ db_printf(" In free list\n");
+ break;
+ }
+ }
+
+ done = 1;
+ goto out_cache;
+ }
+ }
+
+ list_for_each_entry(&cache->free_slabs, slab, list_node) {
+ if (a >= (vm_offset_t) slab->addr
+ && a < (vm_offset_t) slab->addr + cache->slab_size) {
+ db_printf("In cache %s\n", cache->name);
+
+ for (free = slab->first_free; free; free = free->next) {
+ void *buf = kmem_bufctl_to_buf(free, cache);
+
+ if (a >= (vm_offset_t) buf
+ && a < (vm_offset_t) buf + cache->buf_size) {
+ db_printf(" In free list\n");
+ break;
+ }
+ }
+
+ done = 1;
+ goto out_cache;
+ }
+ }
+
+out_cache:
+ simple_unlock(&cache->lock);
+ if (done)
+ goto out;
+ }
+
+out:
+ simple_unlock(&kmem_cache_list_lock);
+}
+
+#endif /* MACH_KDB */
+
+#if MACH_DEBUG
+kern_return_t host_slab_info(host_t host, cache_info_array_t *infop,
+ unsigned int *infoCntp)
+{
+ struct kmem_cache *cache;
+ cache_info_t *info;
+ unsigned int i, nr_caches;
+ vm_size_t info_size;
+ kern_return_t kr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ /* Assume the cache list is mostly unaltered once the kernel is ready */
+
+retry:
+ /* Harmless unsynchronized access, real value checked later */
+ nr_caches = kmem_nr_caches;
+ info_size = nr_caches * sizeof(*info);
+ info = (cache_info_t *)kalloc(info_size);
+
+ if (info == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ i = 0;
+
+ simple_lock(&kmem_cache_list_lock);
+
+ if (nr_caches != kmem_nr_caches) {
+ simple_unlock(&kmem_cache_list_lock);
+ kfree((vm_offset_t)info, info_size);
+ goto retry;
+ }
+
+ list_for_each_entry(&kmem_cache_list, cache, node) {
+ simple_lock(&cache->lock);
+ info[i].flags = cache->flags;
+#if SLAB_USE_CPU_POOLS
+ info[i].cpu_pool_size = cache->cpu_pool_type->array_size;
+#else /* SLAB_USE_CPU_POOLS */
+ info[i].cpu_pool_size = 0;
+#endif /* SLAB_USE_CPU_POOLS */
+ info[i].obj_size = cache->obj_size;
+ info[i].align = cache->align;
+ info[i].buf_size = cache->buf_size;
+ info[i].slab_size = cache->slab_size;
+ info[i].bufs_per_slab = cache->bufs_per_slab;
+ info[i].nr_objs = cache->nr_objs;
+ info[i].nr_bufs = cache->nr_bufs;
+ info[i].nr_slabs = cache->nr_slabs;
+ info[i].nr_free_slabs = cache->nr_free_slabs;
+ strncpy(info[i].name, cache->name, sizeof(info[i].name));
+ info[i].name[sizeof(info[i].name) - 1] = '\0';
+ simple_unlock(&cache->lock);
+
+ i++;
+ }
+
+ simple_unlock(&kmem_cache_list_lock);
+
+ if (nr_caches <= *infoCntp) {
+ memcpy(*infop, info, info_size);
+ } else {
+ vm_offset_t info_addr;
+ vm_size_t total_size;
+ vm_map_copy_t copy;
+
+ kr = kmem_alloc_pageable(ipc_kernel_map, &info_addr, info_size);
+
+ if (kr != KERN_SUCCESS)
+ goto out;
+
+ memcpy((char *)info_addr, info, info_size);
+ total_size = round_page(info_size);
+
+ if (info_size < total_size)
+ memset((char *)(info_addr + info_size),
+ 0, total_size - info_size);
+
+ kr = vm_map_copyin(ipc_kernel_map, info_addr, info_size, TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+ *infop = (cache_info_t *)copy;
+ }
+
+ *infoCntp = nr_caches;
+ kr = KERN_SUCCESS;
+
+out:
+ kfree((vm_offset_t)info, info_size);
+
+ return kr;
+}
+#endif /* MACH_DEBUG */
diff --git a/kern/slab.h b/kern/slab.h
new file mode 100644
index 0000000..4d51755
--- /dev/null
+++ b/kern/slab.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2011 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Copyright (c) 2010, 2011 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Object caching memory allocator.
+ */
+
+#ifndef _KERN_SLAB_H
+#define _KERN_SLAB_H
+
+#include <cache.h>
+#include <kern/cpu_number.h>
+#include <kern/lock.h>
+#include <kern/list.h>
+#include <kern/rbtree.h>
+#include <mach/machine/vm_types.h>
+#include <sys/types.h>
+#include <vm/vm_types.h>
+
+struct kmem_cache;
+
+#if SLAB_USE_CPU_POOLS
+
+/*
+ * Per-processor cache of pre-constructed objects.
+ *
+ * The flags member is a read-only CPU-local copy of the parent cache flags.
+ */
+struct kmem_cpu_pool {
+ simple_lock_data_t lock;
+ int flags;
+ int size;
+ int transfer_size;
+ int nr_objs;
+ void **array;
+} __attribute__((aligned(CPU_L1_SIZE)));
+
+/*
+ * When a cache is created, its CPU pool type is determined from the buffer
+ * size. For small buffer sizes, many objects can be cached in a CPU pool.
+ * Conversely, for large buffer sizes, this would incur much overhead, so only
+ * a few objects are stored in a CPU pool.
+ */
+struct kmem_cpu_pool_type {
+ size_t buf_size;
+ int array_size;
+ size_t array_align;
+ struct kmem_cache *array_cache;
+};
+#endif /* SLAB_USE_CPU_POOLS */
+
+/*
+ * Buffer descriptor.
+ *
+ * For normal caches (i.e. without SLAB_CF_VERIFY), bufctls are located at the
+ * end of (but inside) each buffer. If SLAB_CF_VERIFY is set, bufctls are
+ * located after each buffer.
+ *
+ * When an object is allocated to a client, its bufctl isn't used. This memory
+ * is instead used for redzoning if cache debugging is in effect.
+ */
+union kmem_bufctl {
+ union kmem_bufctl *next;
+ unsigned long redzone;
+};
+
+/*
+ * Buffer tag.
+ *
+ * This structure is only used for SLAB_CF_VERIFY caches. It is located after
+ * the bufctl and includes information about the state of the buffer it
+ * describes (allocated or not). It should be thought of as a debugging
+ * extension of the bufctl.
+ */
+struct kmem_buftag {
+ unsigned long state;
+};
+
+/*
+ * Page-aligned collection of unconstructed buffers.
+ */
+struct kmem_slab {
+ struct kmem_cache *cache;
+ struct list list_node;
+ struct rbtree_node tree_node;
+ unsigned long nr_refs;
+ union kmem_bufctl *first_free;
+ void *addr;
+};
+
+/*
+ * Type for constructor functions.
+ *
+ * The pre-constructed state of an object is supposed to include only
+ * elements such as e.g. linked lists, locks, reference counters. Therefore
+ * constructors are expected to 1) never fail and 2) not need any
+ * user-provided data. The first constraint implies that object construction
+ * never performs dynamic resource allocation, which also means there is no
+ * need for destructors.
+ */
+typedef void (*kmem_cache_ctor_t)(void *obj);
+
+/*
+ * Cache name buffer size. The size is chosen so that struct
+ * kmem_cache fits into two cache lines. The size of a cache line on
+ * a typical CPU is 64 bytes.
+ */
+#define KMEM_CACHE_NAME_SIZE 24
+
+/*
+ * Cache of objects.
+ *
+ * Locking order : cpu_pool -> cache. CPU pools locking is ordered by CPU ID.
+ *
+ * Currently, SLAB_USE_CPU_POOLS is not defined. KMEM_CACHE_NAME_SIZE
+ * is chosen so that the struct fits into two cache lines. The first
+ * cache line contains all hot fields.
+ */
+struct kmem_cache {
+#if SLAB_USE_CPU_POOLS
+ /* CPU pool layer */
+ struct kmem_cpu_pool cpu_pools[NCPUS];
+ struct kmem_cpu_pool_type *cpu_pool_type;
+#endif /* SLAB_USE_CPU_POOLS */
+
+ /* Slab layer */
+ simple_lock_data_t lock;
+ struct list node; /* Cache list linkage */
+ struct list partial_slabs;
+ struct list free_slabs;
+ struct rbtree active_slabs;
+ int flags;
+ size_t bufctl_dist; /* Distance from buffer to bufctl */
+ size_t slab_size;
+ long_natural_t bufs_per_slab;
+ long_natural_t nr_objs; /* Number of allocated objects */
+ long_natural_t nr_free_slabs;
+ kmem_cache_ctor_t ctor;
+ /* All fields below are cold */
+ size_t obj_size; /* User-provided size */
+ /* Assuming ! SLAB_USE_CPU_POOLS, here is the cacheline boundary */
+ size_t align;
+ size_t buf_size; /* Aligned object size */
+ size_t color;
+ size_t color_max;
+ long_natural_t nr_bufs; /* Total number of buffers */
+ long_natural_t nr_slabs;
+ char name[KMEM_CACHE_NAME_SIZE];
+ size_t buftag_dist; /* Distance from buffer to buftag */
+ size_t redzone_pad; /* Bytes from end of object to redzone word */
+} __cacheline_aligned;
+
+/*
+ * Mach-style declarations for struct kmem_cache.
+ */
+typedef struct kmem_cache *kmem_cache_t;
+#define KMEM_CACHE_NULL ((kmem_cache_t) 0)
+
+/*
+ * Cache initialization flags.
+ */
+#define KMEM_CACHE_NOOFFSLAB 0x1 /* Don't allocate external slab data */
+#define KMEM_CACHE_PHYSMEM 0x2 /* Allocate from physical memory */
+#define KMEM_CACHE_VERIFY 0x4 /* Use debugging facilities */
+
+/*
+ * Initialize a cache.
+ */
+void kmem_cache_init(struct kmem_cache *cache, const char *name,
+ size_t obj_size, size_t align,
+ kmem_cache_ctor_t ctor, int flags);
+
+/*
+ * Allocate an object from a cache.
+ */
+vm_offset_t kmem_cache_alloc(struct kmem_cache *cache);
+
+/*
+ * Release an object to its cache.
+ */
+void kmem_cache_free(struct kmem_cache *cache, vm_offset_t obj);
+
+/*
+ * Initialize the memory allocator module.
+ */
+void slab_bootstrap(void);
+void slab_init(void);
+
+/*
+ * Release free slabs to the VM system.
+ */
+void slab_collect(void);
+
+/*
+ * Display a summary of all kernel caches.
+ */
+void slab_info(void);
+
+#if MACH_KDB
+void db_show_slab_info(void);
+void db_whatis_slab(vm_offset_t addr);
+#endif /* MACH_KDB */
+
+#endif /* _KERN_SLAB_H */
diff --git a/kern/smp.c b/kern/smp.c
new file mode 100644
index 0000000..295f703
--- /dev/null
+++ b/kern/smp.c
@@ -0,0 +1,49 @@
+/* smp.c - Template for generic SMP controller for Mach.
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Written by Almudena Garcia Jurado-Centurion
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#include <kern/smp.h>
+#include <machine/smp.h>
+#include <stdint.h>
+
+struct smp_data {
+ uint8_t num_cpus;
+} smp_info;
+
+/*
+ * smp_set_numcpus: initialize the number of cpus in smp_info structure
+ */
+
+void smp_set_numcpus(uint8_t numcpus)
+{
+ smp_info.num_cpus = numcpus;
+}
+
+/*
+ * smp_get_numcpus: returns the number of cpus existing in the machine
+ */
+uint8_t smp_get_numcpus(void)
+{
+ uint8_t numcpus = smp_info.num_cpus;
+
+ if (numcpus == 0)
+ return 1; /* Although SMP doesn't find cpus, always there are at least one. */
+ else
+ return numcpus;
+}
diff --git a/kern/smp.h b/kern/smp.h
new file mode 100644
index 0000000..44e96f3
--- /dev/null
+++ b/kern/smp.h
@@ -0,0 +1,24 @@
+/* smp.h - Template for generic SMP controller for Mach. Header file
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Written by Almudena Garcia Jurado-Centurion
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#include <stdint.h>
+
+void smp_set_numcpus(uint8_t numcpus);
+uint8_t smp_get_numcpus(void);
diff --git a/kern/startup.c b/kern/startup.c
new file mode 100644
index 0000000..e72cf6f
--- /dev/null
+++ b/kern/startup.c
@@ -0,0 +1,316 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach kernel startup.
+ */
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/machine.h>
+#include <mach/task_special_ports.h>
+#include <mach/vm_param.h>
+#include <ipc/ipc_init.h>
+#include <kern/cpu_number.h>
+#include <kern/debug.h>
+#include <kern/gsync.h>
+#include <kern/machine.h>
+#include <kern/mach_factor.h>
+#include <kern/mach_clock.h>
+#include <kern/processor.h>
+#include <kern/rdxtree.h>
+#include <kern/sched_prim.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/thread_swap.h>
+#include <kern/timer.h>
+#include <kern/xpr.h>
+#include <kern/bootstrap.h>
+#include <kern/startup.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_init.h>
+#include <vm/vm_pageout.h>
+#include <machine/machspl.h>
+#include <machine/pcb.h>
+#include <machine/pmap.h>
+#include <machine/model_dep.h>
+#include <mach/version.h>
+#include <device/device_init.h>
+#include <device/intr.h>
+
+#if MACH_KDB
+#include <device/cons.h>
+#endif /* MACH_KDB */
+
+#if ! MACH_KBD
+boolean_t reboot_on_panic = TRUE;
+#endif
+
+#if NCPUS > 1
+#include <machine/mp_desc.h>
+#include <kern/smp.h>
+#include <kern/machine.h>
+#endif /* NCPUS > 1 */
+
+/* XX */
+extern char *kernel_cmdline;
+
+/*
+ * Running in virtual memory, on the interrupt stack.
+ * Does not return. Dispatches initial thread.
+ *
+ * Assumes that master_cpu is set.
+ */
+void setup_main(void)
+{
+ thread_t startup_thread;
+ phys_addr_t memsize;
+
+#if MACH_KDB
+ /*
+ * Cause a breakpoint trap to the debugger before proceeding
+ * any further if the proper option flag was specified
+ * on the kernel's command line.
+ * XXX check for surrounding spaces.
+ */
+ if (strstr(kernel_cmdline, "-d ")) {
+ cninit(); /* need console for debugger */
+ SoftDebugger("init");
+ }
+#else /* MACH_KDB */
+ if (strstr (kernel_cmdline, "-H ")) {
+ reboot_on_panic = FALSE;
+ }
+#endif /* MACH_KDB */
+
+ panic_init();
+
+ sched_init();
+ vm_mem_bootstrap();
+ rdxtree_cache_init();
+ ipc_bootstrap();
+ vm_mem_init();
+ ipc_init();
+
+ /*
+ * As soon as the virtual memory system is up, we record
+ * that this CPU is using the kernel pmap.
+ */
+ PMAP_ACTIVATE_KERNEL(master_cpu);
+
+ init_timers();
+ init_timeout();
+
+#if XPR_DEBUG
+ xprbootstrap();
+#endif /* XPR_DEBUG */
+
+ machine_init();
+
+ mapable_time_init();
+
+ machine_info.max_cpus = NCPUS;
+ memsize = vm_page_mem_size();
+ machine_info.memory_size = memsize;
+ if (machine_info.memory_size < memsize)
+ /* Overflow, report at least 4GB */
+ machine_info.memory_size = ~0;
+ machine_info.avail_cpus = 0;
+ machine_info.major_version = KERNEL_MAJOR_VERSION;
+ machine_info.minor_version = KERNEL_MINOR_VERSION;
+
+ /*
+ * Initialize the IPC, task, and thread subsystems.
+ */
+ task_init();
+ thread_init();
+ swapper_init();
+#if MACH_HOST
+ pset_sys_init();
+#endif /* MACH_HOST */
+
+ /*
+ * Kick off the time-out driven routines by calling
+ * them the first time.
+ */
+ recompute_priorities(NULL);
+ compute_mach_factor();
+
+ gsync_setup ();
+
+ /*
+ * Create a kernel thread to start the other kernel
+ * threads. Thread_resume (from kernel_thread) calls
+ * thread_setrun, which may look at current thread;
+ * we must avoid this, since there is no current thread.
+ */
+
+ /*
+ * Create the thread, and point it at the routine.
+ */
+ (void) thread_create(kernel_task, &startup_thread);
+ thread_start(startup_thread, start_kernel_threads);
+
+ /*
+ * Give it a kernel stack.
+ */
+ thread_doswapin(startup_thread);
+
+ /*
+ * Pretend it is already running, and resume it.
+ * Since it looks as if it is running, thread_resume
+ * will not try to put it on the run queues.
+ *
+ * We can do all of this without locking, because nothing
+ * else is running yet.
+ */
+ startup_thread->state |= TH_RUN;
+ (void) thread_resume(startup_thread);
+
+ /*
+ * Start the thread.
+ */
+ cpu_launch_first_thread(startup_thread);
+ /*NOTREACHED*/
+}
+
+/*
+ * Now running in a thread. Create the rest of the kernel threads
+ * and the bootstrap task.
+ */
+void start_kernel_threads(void)
+{
+ int i;
+
+ /*
+ * Create the idle threads and the other
+ * service threads.
+ */
+ for (i = 0; i < NCPUS; i++) {
+ if (machine_slot[i].is_cpu) {
+ thread_t th;
+
+ (void) thread_create(kernel_task, &th);
+ thread_bind(th, cpu_to_processor(i));
+ thread_start(th, idle_thread);
+ thread_doswapin(th);
+ (void) thread_resume(th);
+ }
+ }
+
+ (void) kernel_thread(kernel_task, reaper_thread, (char *) 0);
+ (void) kernel_thread(kernel_task, swapin_thread, (char *) 0);
+ (void) kernel_thread(kernel_task, sched_thread, (char *) 0);
+#ifndef MACH_XEN
+ (void) kernel_thread(kernel_task, intr_thread, (char *)0);
+#endif /* MACH_XEN */
+
+#if NCPUS > 1
+ /*
+ * Create the shutdown thread.
+ */
+ (void) kernel_thread(kernel_task, action_thread, (char *) 0);
+
+ /*
+ * Allow other CPUs to run.
+ */
+ start_other_cpus();
+#endif /* NCPUS > 1 */
+
+ /*
+ * Create the device service.
+ */
+ device_service_create();
+
+ /*
+ * Initialize kernel task's creation time.
+ * When we created the kernel task in task_init, the mapped
+ * time was not yet available. Now, last thing before starting
+ * the user bootstrap, record the current time as the kernel
+ * task's creation time.
+ */
+ record_time_stamp (&kernel_task->creation_time);
+
+ /*
+ * Start the user bootstrap.
+ */
+ bootstrap_create();
+
+#if XPR_DEBUG
+ xprinit(); /* XXX */
+#endif /* XPR_DEBUG */
+
+ /*
+ * Become the pageout daemon.
+ */
+ (void) spl0();
+ vm_pageout();
+ /*NOTREACHED*/
+}
+
+/*
+ * Start up the first thread on a CPU.
+ * First thread is specified for the master CPU.
+ */
+void cpu_launch_first_thread(thread_t th)
+{
+ int mycpu;
+
+ mycpu = cpu_number();
+
+ cpu_up(mycpu);
+
+ start_timer(&kernel_timer[mycpu]);
+
+ /*
+ * Block all interrupts for choose_thread.
+ */
+ (void) splhigh();
+
+ if (th == THREAD_NULL)
+ th = choose_thread(cpu_to_processor(mycpu));
+ if (th == THREAD_NULL)
+ panic("cpu_launch_first_thread");
+
+ PMAP_ACTIVATE_KERNEL(mycpu);
+
+ percpu_assign(active_thread, th);
+ percpu_assign(active_stack, th->kernel_stack);
+ thread_lock(th);
+ th->state &= ~TH_UNINT;
+ thread_unlock(th);
+ timer_switch(&th->system_timer);
+
+ PMAP_ACTIVATE_USER(vm_map_pmap(th->task->map), th, mycpu);
+
+ startrtclock(); /* needs an active thread */
+
+ load_context(th);
+ /*NOTREACHED*/
+}
diff --git a/kern/startup.h b/kern/startup.h
new file mode 100644
index 0000000..d924d15
--- /dev/null
+++ b/kern/startup.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _KERN_STARTUP_H_
+#define _KERN_STARTUP_H_
+
+#include <kern/thread.h>
+
+extern void setup_main(void);
+void cpu_launch_first_thread(thread_t th);
+void start_kernel_threads(void);
+
+#endif /* _KERN_STARTUP_H_ */
diff --git a/kern/strings.c b/kern/strings.c
new file mode 100644
index 0000000..7e7fda0
--- /dev/null
+++ b/kern/strings.c
@@ -0,0 +1,275 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: strings.c
+ * Author: Robert V. Baron, Carnegie Mellon University
+ * Date: ??/92
+ *
+ * String functions.
+ */
+
+#include <string.h>
+
+#ifdef strcpy
+#undef strcmp
+#undef strncmp
+#undef strcpy
+#undef strncpy
+#undef strlen
+#endif
+
+/*
+ * Abstract:
+ * strcmp (s1, s2) compares the strings "s1" and "s2".
+ * It returns 0 if the strings are identical. It returns
+ * > 0 if the first character that differs in the two strings
+ * is larger in s1 than in s2 or if s1 is longer than s2 and
+ * the contents are identical up to the length of s2.
+ * It returns < 0 if the first differing character is smaller
+ * in s1 than in s2 or if s1 is shorter than s2 and the
+ * contents are identical up to the length of s1.
+ */
+
+int __attribute__ ((pure))
+strcmp(
+ const char *s1,
+ const char *s2)
+{
+ unsigned int a, b;
+
+ do {
+ a = *s1++;
+ b = *s2++;
+ if (a != b)
+ return a-b; /* includes case when
+ 'a' is zero and 'b' is not zero
+ or vice versa */
+ } while (a != '\0');
+
+ return 0; /* both are zero */
+}
+
+
+/*
+ * Abstract:
+ * strncmp (s1, s2, n) compares the strings "s1" and "s2"
+ * in exactly the same way as strcmp does. Except the
+ * comparison runs for at most "n" characters.
+ */
+
+int __attribute__ ((pure))
+strncmp(
+ const char *s1,
+ const char *s2,
+ size_t n)
+{
+ unsigned int a, b;
+
+ while (n != 0) {
+ a = *s1++;
+ b = *s2++;
+ if (a != b)
+ return a-b; /* includes case when
+ 'a' is zero and 'b' is not zero
+ or vice versa */
+ if (a == '\0')
+ return 0; /* both are zero */
+ n--;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Abstract:
+ * strcpy copies the contents of the string "from" including
+ * the null terminator to the string "to". A pointer to "to"
+ * is returned.
+ */
+
+char *
+strcpy(
+ char *to,
+ const char *from)
+{
+ char *ret = to;
+
+ while ((*to++ = *from++) != '\0')
+ continue;
+
+ return ret;
+}
+
+/*
+ * Abstract:
+ * strncpy copies "count" characters from the "from" string to
+ * the "to" string. If "from" contains less than "count" characters
+ * "to" will be padded with null characters until exactly "count"
+ * characters have been written. The return value is a pointer
+ * to the "to" string.
+ */
+
+char *
+strncpy(
+ char *to,
+ const char *from,
+ size_t count)
+{
+ char *ret = to;
+
+ while (count != 0) {
+ count--;
+ if ((*to++ = *from++) == '\0')
+ break;
+ }
+
+ while (count != 0) {
+ *to++ = '\0';
+ count--;
+ }
+
+ return ret;
+}
+
+/*
+ * Abstract:
+ * strlen returns the number of characters in "string" preceding
+ * the terminating null character.
+ */
+
+size_t __attribute__ ((pure))
+strlen(
+ const char *string)
+{
+ const char *ret = string;
+
+ while (*string++ != '\0')
+ continue;
+
+ return string - 1 - ret;
+}
+
+/*
+ * Abstract:
+ * strchr returns a pointer to the first occurrence of the character
+ * "c" in the string "s". If "c" is not found, return NULL.
+ */
+char *
+strchr(
+ const char *s,
+ int c)
+{
+ while (*s != c) {
+ if (*s == '\0') {
+ return NULL;
+ }
+
+ s++;
+ }
+
+ return (char *)s;
+}
+
+/*
+ * Abstract:
+ * strsep extracts tokens from strings. If "*sp" is NULL, return NULL
+ * and do nothing. Otherwise, find the first token in string "*sp".
+ * Tokens are delimited by characters in the string "delim". If no
+ * delimiter is found, the token is the entire string "*sp", and "*sp"
+ * is made NULL. Otherwise, overwrite the delimiter with a null byte,
+ * and make "*sp" point past it.
+ */
+char *
+strsep(
+ char **sp,
+ const char *delim)
+{
+ const char *d;
+ char *s, *t;
+
+ s = t = *sp;
+
+ if (s == NULL) {
+ return NULL;
+ }
+
+ for (;;) {
+ if (*s == '\0') {
+ *sp = NULL;
+ return t;
+ }
+
+ d = delim;
+
+ for (;;) {
+ if (*d == '\0') {
+ break;
+ }
+
+ if (*d == *s) {
+ *s = '\0';
+ *sp = s + 1;
+ return t;
+ }
+
+ d++;
+ }
+
+ s++;
+ }
+}
+
+/*
+ * Abstract:
+ * strstr returns a pointer to the first occurrence of the substring
+ * "find" in the string "s". If no substring was found, return NULL.
+ */
+char *
+strstr(
+ const char *s,
+ const char *find)
+{
+ size_t len;
+
+ len = strlen(find);
+
+ if (len == 0) {
+ return (char *)s;
+ }
+
+ for (;;) {
+ if (*s == '\0') {
+ return NULL;
+ }
+
+ if (strncmp(s, find, len) == 0) {
+ return (char *)s;
+ }
+
+ s++;
+ }
+}
diff --git a/kern/syscall_emulation.c b/kern/syscall_emulation.c
new file mode 100644
index 0000000..620c235
--- /dev/null
+++ b/kern/syscall_emulation.c
@@ -0,0 +1,453 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <string.h>
+
+#include <mach/error.h>
+#include <mach/vm_param.h>
+#include <kern/syscall_emulation.h>
+#include <kern/task.h>
+#include <kern/kalloc.h>
+#include <kern/mach.server.h>
+#include <vm/vm_kern.h>
+
+/* XXX */
+#define syscall_emulation_sync(task)
+
+
+
+/*
+ * WARNING:
+ * This code knows that kalloc() allocates memory most efficiently
+ * in sizes that are powers of 2, and asks for those sizes.
+ */
+
+/*
+ * Go from number of entries to size of struct eml_dispatch and back.
+ */
+#define base_size (sizeof(struct eml_dispatch) - sizeof(eml_routine_t))
+#define count_to_size(count) \
+ (base_size + sizeof(vm_offset_t) * (count))
+
+#define size_to_count(size) \
+ ( ((size) - base_size) / sizeof(vm_offset_t) )
+
+/*
+ * eml_init: initialize user space emulation code
+ */
+void eml_init(void)
+{
+}
+
+/*
+ * eml_task_reference() [Exported]
+ *
+ * Bumps the reference count on the common emulation
+ * vector.
+ */
+
+void eml_task_reference(
+ task_t task,
+ task_t parent)
+{
+ eml_dispatch_t eml;
+
+ if (parent == TASK_NULL)
+ eml = EML_DISPATCH_NULL;
+ else
+ eml = parent->eml_dispatch;
+
+ if (eml != EML_DISPATCH_NULL) {
+ simple_lock(&eml->lock);
+ eml->ref_count++;
+ simple_unlock(&eml->lock);
+ }
+ task->eml_dispatch = eml;
+}
+
+
+/*
+ * eml_task_deallocate() [Exported]
+ *
+ * Cleans up after the emulation code when a process exits.
+ */
+
+void eml_task_deallocate(const task_t task)
+{
+ eml_dispatch_t eml;
+
+ eml = task->eml_dispatch;
+ if (eml != EML_DISPATCH_NULL) {
+ int count;
+
+ simple_lock(&eml->lock);
+ count = --eml->ref_count;
+ simple_unlock(&eml->lock);
+
+ if (count == 0)
+ kfree((vm_offset_t)eml, count_to_size(eml->disp_count));
+ }
+}
+
+/*
+ * task_set_emulation_vector: [Server Entry]
+ * set a list of emulated system calls for this task.
+ */
+static kern_return_t
+task_set_emulation_vector_internal(
+ task_t task,
+ int vector_start,
+ emulation_vector_t emulation_vector,
+ unsigned int emulation_vector_count)
+{
+ eml_dispatch_t cur_eml, new_eml, old_eml;
+ vm_size_t new_size;
+ int cur_start, cur_end;
+ int new_start = 0, new_end = 0;
+ int vector_end;
+
+ if (task == TASK_NULL)
+ return EML_BAD_TASK;
+
+ vector_end = vector_start + emulation_vector_count;
+
+ /*
+ * We try to re-use the existing emulation vector
+ * if possible. We can reuse the vector if it
+ * is not shared with another task and if it is
+ * large enough to contain the entries we are
+ * supplying.
+ *
+ * We must grab the lock on the task to check whether
+ * there is an emulation vector.
+ * If the vector is shared or not large enough, we
+ * need to drop the lock and allocate a new emulation
+ * vector.
+ *
+ * While the lock is dropped, the emulation vector
+ * may be released by all other tasks (giving us
+ * exclusive use), or may be enlarged by another
+ * task_set_emulation_vector call. Therefore,
+ * after allocating the new emulation vector, we
+ * must grab the lock again to check whether we
+ * really need the new vector we just allocated.
+ *
+ * Since an emulation vector cannot be altered
+ * if it is in use by more than one task, the
+ * task lock is sufficient to protect the vector`s
+ * start, count, and contents. The lock in the
+ * vector protects only the reference count.
+ */
+
+ old_eml = EML_DISPATCH_NULL; /* vector to discard */
+ new_eml = EML_DISPATCH_NULL; /* new vector */
+
+ for (;;) {
+ /*
+ * Find the current emulation vector.
+ * See whether we can overwrite it.
+ */
+ task_lock(task);
+ cur_eml = task->eml_dispatch;
+ if (cur_eml != EML_DISPATCH_NULL) {
+ cur_start = cur_eml->disp_min;
+ cur_end = cur_eml->disp_count + cur_start;
+
+ simple_lock(&cur_eml->lock);
+ if (cur_eml->ref_count == 1 &&
+ cur_start <= vector_start &&
+ cur_end >= vector_end)
+ {
+ /*
+ * Can use the existing emulation vector.
+ * Discard any new one we allocated.
+ */
+ simple_unlock(&cur_eml->lock);
+ old_eml = new_eml;
+ break;
+ }
+
+ if (new_eml != EML_DISPATCH_NULL &&
+ new_start <= cur_start &&
+ new_end >= cur_end)
+ {
+ /*
+ * A new vector was allocated, and it is large enough
+ * to hold all the entries from the current vector.
+ * Copy the entries to the new emulation vector,
+ * deallocate the current one, and use the new one.
+ */
+ memcpy(&new_eml->disp_vector[cur_start-new_start],
+ &cur_eml->disp_vector[0],
+ cur_eml->disp_count * sizeof(vm_offset_t));
+
+ if (--cur_eml->ref_count == 0)
+ old_eml = cur_eml; /* discard old vector */
+ simple_unlock(&cur_eml->lock);
+
+ task->eml_dispatch = new_eml;
+ syscall_emulation_sync(task);
+ cur_eml = new_eml;
+ break;
+ }
+ simple_unlock(&cur_eml->lock);
+
+ /*
+ * Need a new emulation vector.
+ * Ensure it will hold all the entries from
+ * both the old and new emulation vectors.
+ */
+ new_start = vector_start;
+ if (new_start > cur_start)
+ new_start = cur_start;
+ new_end = vector_end;
+ if (new_end < cur_end)
+ new_end = cur_end;
+ }
+ else {
+ /*
+ * There is no current emulation vector.
+ * If a new one was allocated, use it.
+ */
+ if (new_eml != EML_DISPATCH_NULL) {
+ task->eml_dispatch = new_eml;
+ cur_eml = new_eml;
+ break;
+ }
+
+ /*
+ * Compute the size needed for the new vector.
+ */
+ new_start = vector_start;
+ new_end = vector_end;
+ }
+
+ /*
+ * Have no vector (or one that is no longer large enough).
+ * Drop all the locks and allocate a new vector.
+ * Repeat the loop to check whether the old vector was
+ * changed while we didn`t hold the locks.
+ */
+
+ task_unlock(task);
+
+ if (new_eml != EML_DISPATCH_NULL)
+ kfree((vm_offset_t)new_eml, count_to_size(new_eml->disp_count));
+
+ new_size = count_to_size(new_end - new_start);
+ new_eml = (eml_dispatch_t) kalloc(new_size);
+
+ memset(new_eml, 0, new_size);
+ simple_lock_init(&new_eml->lock);
+ new_eml->ref_count = 1;
+ new_eml->disp_min = new_start;
+ new_eml->disp_count = new_end - new_start;
+
+ continue;
+ }
+
+ /*
+ * We have the emulation vector.
+ * Install the new emulation entries.
+ */
+ memcpy(&cur_eml->disp_vector[vector_start - cur_eml->disp_min],
+ &emulation_vector[0],
+ emulation_vector_count * sizeof(vm_offset_t));
+
+ task_unlock(task);
+
+ /*
+ * Discard any old emulation vector we don`t need.
+ */
+ if (old_eml)
+ kfree((vm_offset_t) old_eml, count_to_size(old_eml->disp_count));
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_set_emulation_vector: [Server Entry]
+ *
+ * Set the list of emulated system calls for this task.
+ * The list is out-of-line.
+ */
+kern_return_t
+task_set_emulation_vector(
+ task_t task,
+ int vector_start,
+ emulation_vector_t emulation_vector,
+ unsigned int emulation_vector_count)
+{
+ kern_return_t kr;
+ vm_offset_t emul_vector_addr;
+
+ if (task == TASK_NULL)
+ return EML_BAD_TASK; /* XXX sb KERN_INVALID_ARGUMENT */
+
+ /*
+ * The emulation vector is really a vm_map_copy_t.
+ */
+ kr = vm_map_copyout(ipc_kernel_map, &emul_vector_addr,
+ (vm_map_copy_t) emulation_vector);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ /*
+ * Do the work.
+ */
+ kr = task_set_emulation_vector_internal(
+ task,
+ vector_start,
+ (emulation_vector_t) emul_vector_addr,
+ emulation_vector_count);
+
+ /*
+ * Discard the memory
+ */
+ (void) kmem_free(ipc_kernel_map,
+ emul_vector_addr,
+ emulation_vector_count * sizeof(eml_dispatch_t));
+
+ return kr;
+}
+
+/*
+ * task_get_emulation_vector: [Server Entry]
+ *
+ * Get the list of emulated system calls for this task.
+ * List is returned out-of-line.
+ */
+kern_return_t
+task_get_emulation_vector(
+ task_t task,
+ int *vector_start, /* out */
+ emulation_vector_t *emulation_vector, /* out */
+ unsigned int *emulation_vector_count) /* out */
+{
+ eml_dispatch_t eml;
+ vm_size_t vector_size, size;
+ vm_offset_t addr;
+
+ if (task == TASK_NULL)
+ return EML_BAD_TASK;
+
+ addr = 0;
+ size = 0;
+
+ for(;;) {
+ vm_size_t size_needed;
+
+ task_lock(task);
+ eml = task->eml_dispatch;
+ if (eml == EML_DISPATCH_NULL) {
+ task_unlock(task);
+ if (addr)
+ (void) kmem_free(ipc_kernel_map, addr, size);
+ *vector_start = 0;
+ *emulation_vector = 0;
+ *emulation_vector_count = 0;
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Do we have the memory we need?
+ */
+ vector_size = eml->disp_count * sizeof(vm_offset_t);
+
+ size_needed = round_page(vector_size);
+ if (size_needed <= size)
+ break;
+
+ /*
+ * If not, unlock the task and allocate more memory.
+ */
+ task_unlock(task);
+
+ if (size != 0)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = size_needed;
+ if (kmem_alloc(ipc_kernel_map, &addr, size) != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /*
+ * Copy out the dispatch addresses
+ */
+ *vector_start = eml->disp_min;
+ *emulation_vector_count = eml->disp_count;
+ memcpy((void *)addr,
+ eml->disp_vector,
+ vector_size);
+
+ /*
+ * Unlock the task and free any memory we did not need
+ */
+ task_unlock(task);
+
+ {
+ vm_size_t size_used, size_left;
+ vm_map_copy_t memory;
+
+ /*
+ * Free any unused memory beyond the end of the last page used
+ */
+ size_used = round_page(vector_size);
+ if (size_used != size)
+ (void) kmem_free(ipc_kernel_map,
+ addr + size_used,
+ size - size_used);
+
+ /*
+ * Zero the remainder of the page being returned.
+ */
+ size_left = size_used - vector_size;
+ if (size_left > 0)
+ memset((char *)addr + vector_size, 0, size_left);
+
+ /*
+ * Make memory into copyin form - this unwires it.
+ */
+ (void) vm_map_copyin(ipc_kernel_map, addr, vector_size, TRUE, &memory);
+
+ *emulation_vector = (emulation_vector_t) memory;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_set_emulation: [Server Entry]
+ * set up for user space emulation of syscalls within this task.
+ */
+kern_return_t task_set_emulation(
+ task_t task,
+ vm_offset_t routine_entry_pt,
+ int routine_number)
+{
+ return task_set_emulation_vector_internal(task, routine_number,
+ &routine_entry_pt, 1);
+}
diff --git a/kern/syscall_emulation.h b/kern/syscall_emulation.h
new file mode 100644
index 0000000..bf20e44
--- /dev/null
+++ b/kern/syscall_emulation.h
@@ -0,0 +1,67 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_SYSCALL_EMULATION_H_
+#define _KERN_SYSCALL_EMULATION_H_
+
+#ifndef __ASSEMBLER__
+#include <mach/machine/vm_types.h>
+#include <kern/lock.h>
+#include <kern/task.h>
+
+typedef vm_offset_t eml_routine_t;
+
+typedef struct eml_dispatch {
+ decl_simple_lock_data(, lock) /* lock for reference count */
+ int ref_count; /* reference count */
+ int disp_count; /* count of entries in vector */
+ int disp_min; /* index of lowest entry in vector */
+ eml_routine_t disp_vector[1]; /* first entry in array of dispatch */
+ /* routines (array has disp_count */
+ /* elements) */
+} *eml_dispatch_t;
+
+typedef vm_offset_t *emulation_vector_t; /* Variable-length array */
+
+#define EML_ROUTINE_NULL (eml_routine_t)0
+#define EML_DISPATCH_NULL (eml_dispatch_t)0
+
+#define EML_SUCCESS (0)
+
+#define EML_MOD (err_kern|err_sub(2))
+#define EML_BAD_TASK (EML_MOD|0x0001)
+#define EML_BAD_CNT (EML_MOD|0x0002)
+
+extern void eml_init(void);
+extern void eml_task_reference(task_t task, task_t parent);
+extern void eml_task_deallocate(task_t task);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _KERN_SYSCALL_EMULATION_H_ */
diff --git a/kern/syscall_subr.c b/kern/syscall_subr.c
new file mode 100644
index 0000000..0030e02
--- /dev/null
+++ b/kern/syscall_subr.c
@@ -0,0 +1,386 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/boolean.h>
+#include <mach/thread_switch.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <kern/counters.h>
+#include <kern/ipc_kobject.h>
+#include <kern/mach_clock.h>
+#include <kern/printf.h>
+#include <kern/processor.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/syscall_subr.h>
+#include <kern/ipc_sched.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <machine/machspl.h> /* for splsched */
+
+#if MACH_FIXPRI
+#include <mach/policy.h>
+#endif /* MACH_FIXPRI */
+
+/*
+ * swtch and swtch_pri both attempt to context switch (logic in
+ * thread_block no-ops the context switch if nothing would happen).
+ * A boolean is returned that indicates whether there is anything
+ * else runnable.
+ *
+ * This boolean can be used by a thread waiting on a
+ * lock or condition: If FALSE is returned, the thread is justified
+ * in becoming a resource hog by continuing to spin because there's
+ * nothing else useful that the processor could do. If TRUE is
+ * returned, the thread should make one more check on the
+ * lock and then be a good citizen and really suspend.
+ */
+static void swtch_continue(void)
+{
+ processor_t myprocessor;
+
+ myprocessor = current_processor();
+ thread_syscall_return(myprocessor->runq.count > 0 ||
+ myprocessor->processor_set->runq.count > 0);
+ /*NOTREACHED*/
+}
+
+boolean_t swtch(void)
+{
+ processor_t myprocessor;
+
+#if NCPUS > 1
+ myprocessor = current_processor();
+ if (myprocessor->runq.count == 0 &&
+ myprocessor->processor_set->runq.count == 0)
+ return(FALSE);
+#endif /* NCPUS > 1 */
+
+ counter(c_swtch_block++);
+ thread_block(swtch_continue);
+ myprocessor = current_processor();
+ return(myprocessor->runq.count > 0 ||
+ myprocessor->processor_set->runq.count > 0);
+}
+
+static void swtch_pri_continue(void)
+{
+ thread_t thread = current_thread();
+ processor_t myprocessor;
+
+ if (thread->depress_priority >= 0)
+ (void) thread_depress_abort(thread);
+ myprocessor = current_processor();
+ thread_syscall_return(myprocessor->runq.count > 0 ||
+ myprocessor->processor_set->runq.count > 0);
+ /*NOTREACHED*/
+}
+
+boolean_t swtch_pri(int pri)
+{
+ thread_t thread = current_thread();
+ processor_t myprocessor;
+
+#if NCPUS > 1
+ myprocessor = current_processor();
+ if (myprocessor->runq.count == 0 &&
+ myprocessor->processor_set->runq.count == 0)
+ return(FALSE);
+#endif /* NCPUS > 1 */
+
+ /*
+ * XXX need to think about depression duration.
+ * XXX currently using min quantum.
+ */
+ thread_depress_priority(thread, min_quantum);
+
+ counter(c_swtch_pri_block++);
+ thread_block(swtch_pri_continue);
+
+ if (thread->depress_priority >= 0)
+ (void) thread_depress_abort(thread);
+ myprocessor = current_processor();
+ return(myprocessor->runq.count > 0 ||
+ myprocessor->processor_set->runq.count > 0);
+}
+
+static void thread_switch_continue(void)
+{
+ thread_t cur_thread = current_thread();
+
+ /*
+ * Restore depressed priority
+ */
+ if (cur_thread->depress_priority >= 0)
+ (void) thread_depress_abort(cur_thread);
+ thread_syscall_return(KERN_SUCCESS);
+ /*NOTREACHED*/
+}
+
+/*
+ * thread_switch:
+ *
+ * Context switch. User may supply thread hint.
+ *
+ * Fixed priority threads that call this get what they asked for
+ * even if that violates priority order.
+ */
+kern_return_t thread_switch(
+ mach_port_name_t thread_name,
+ int option,
+ mach_msg_timeout_t option_time)
+{
+ thread_t cur_thread = current_thread();
+ processor_t myprocessor;
+ ipc_port_t port;
+
+ /*
+ * Process option.
+ */
+ switch (option) {
+ case SWITCH_OPTION_NONE:
+ /*
+ * Nothing to do.
+ */
+ break;
+
+ case SWITCH_OPTION_DEPRESS:
+ /*
+ * Depress priority for given time.
+ */
+ thread_depress_priority(cur_thread, option_time);
+ break;
+
+ case SWITCH_OPTION_WAIT:
+ thread_will_wait_with_timeout(cur_thread, option_time);
+ break;
+
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+#ifndef MIGRATING_THREADS /* XXX thread_run defunct */
+ /*
+ * Check and act on thread hint if appropriate.
+ */
+ if ((thread_name != 0) &&
+ (ipc_port_translate_send(cur_thread->task->itk_space,
+ thread_name, &port) == KERN_SUCCESS)) {
+ /* port is locked, but it might not be active */
+
+ /*
+ * Get corresponding thread.
+ */
+ if (ip_active(port) && (ip_kotype(port) == IKOT_THREAD)) {
+ thread_t thread;
+ spl_t s;
+
+ thread = (thread_t) port->ip_kobject;
+ /*
+ * Check if the thread is in the right pset. Then
+ * pull it off its run queue. If it
+ * doesn't come, then it's not eligible.
+ */
+ s = splsched();
+ thread_lock(thread);
+ if ((thread->processor_set == cur_thread->processor_set)
+ && (rem_runq(thread) != RUN_QUEUE_NULL)) {
+ /*
+ * Hah, got it!!
+ */
+ thread_unlock(thread);
+ (void) splx(s);
+ ip_unlock(port);
+ /* XXX thread might disappear on us now? */
+#if MACH_FIXPRI
+ if (thread->policy == POLICY_FIXEDPRI) {
+ myprocessor = current_processor();
+ myprocessor->quantum = thread->sched_data;
+ myprocessor->first_quantum = TRUE;
+ }
+#endif /* MACH_FIXPRI */
+ counter(c_thread_switch_handoff++);
+ thread_run(thread_switch_continue, thread);
+ /*
+ * Restore depressed priority
+ */
+ if (cur_thread->depress_priority >= 0)
+ (void) thread_depress_abort(cur_thread);
+
+ return(KERN_SUCCESS);
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+ }
+ ip_unlock(port);
+ }
+#endif /* not MIGRATING_THREADS */
+
+ /*
+ * No handoff hint supplied, or hint was wrong. Call thread_block() in
+ * hopes of running something else. If nothing else is runnable,
+ * thread_block will detect this. WARNING: thread_switch with no
+ * option will not do anything useful if the thread calling it is the
+ * highest priority thread (can easily happen with a collection
+ * of timesharing threads).
+ */
+#if NCPUS > 1
+ myprocessor = current_processor();
+ if (myprocessor->processor_set->runq.count > 0 ||
+ myprocessor->runq.count > 0)
+#endif /* NCPUS > 1 */
+ {
+ counter(c_thread_switch_block++);
+ thread_block(thread_switch_continue);
+ }
+
+ /*
+ * Restore depressed priority
+ */
+ if (cur_thread->depress_priority >= 0)
+ (void) thread_depress_abort(cur_thread);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * thread_depress_priority
+ *
+ * Depress thread's priority to lowest possible for specified period.
+ * Intended for use when thread wants a lock but doesn't know which
+ * other thread is holding it. As with thread_switch, fixed
+ * priority threads get exactly what they asked for. Users access
+ * this by the SWITCH_OPTION_DEPRESS option to thread_switch. A Time
+ * of zero will result in no timeout being scheduled.
+ */
+void
+thread_depress_priority(
+ thread_t thread,
+ mach_msg_timeout_t depress_time)
+{
+ unsigned int ticks;
+ spl_t s;
+
+ /* convert from milliseconds to ticks */
+ ticks = convert_ipc_timeout_to_ticks(depress_time);
+
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * If thread is already depressed, override previous depression.
+ */
+ reset_timeout_check(&thread->depress_timer);
+
+ /*
+ * Save current priority, then set priority and
+ * sched_pri to their lowest possible values.
+ */
+ thread->depress_priority = thread->priority;
+ thread->priority = NRQS-1;
+ thread->sched_pri = NRQS-1;
+ if (ticks != 0)
+ set_timeout(&thread->depress_timer, ticks);
+
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_depress_timeout:
+ *
+ * Timeout routine for priority depression.
+ */
+void
+thread_depress_timeout(thread_t thread)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * If we lose a race with thread_depress_abort,
+ * then depress_priority might be -1.
+ */
+
+ if (thread->depress_priority >= 0) {
+ thread->priority = thread->depress_priority;
+ thread->depress_priority = -1;
+ compute_priority(thread, FALSE);
+ }
+
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_depress_abort:
+ *
+ * Prematurely abort priority depression if there is one.
+ */
+kern_return_t
+thread_depress_abort(thread_t thread)
+{
+ spl_t s;
+
+ if (thread == THREAD_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * Only restore priority if thread is depressed.
+ */
+ if (thread->depress_priority >= 0) {
+ reset_timeout_check(&thread->depress_timer);
+ thread->priority = thread->depress_priority;
+ thread->depress_priority = -1;
+ compute_priority(thread, FALSE);
+ }
+
+ thread_unlock(thread);
+ (void) splx(s);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * mach_print
+ *
+ * Display a null-terminated character string on the Mach console.
+ * This system call is meant as a debugging tool useful to circumvent
+ * messaging altogether.
+ */
+#ifdef MACH_KDB
+void
+mach_print(const char *s)
+{
+ printf("%s", s);
+}
+#endif /* MACH_KDB */
diff --git a/kern/syscall_subr.h b/kern/syscall_subr.h
new file mode 100644
index 0000000..c9a2777
--- /dev/null
+++ b/kern/syscall_subr.h
@@ -0,0 +1,42 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <sys/types.h>
+#include <mach/mach_types.h>
+#include <kern/kern_types.h>
+
+#ifndef _KERN_SYSCALL_SUBR_H_
+#define _KERN_SYSCALL_SUBR_H_
+
+extern int swtch(void);
+extern int swtch_pri(int);
+extern int thread_switch(mach_port_name_t, int, mach_msg_timeout_t);
+extern void thread_depress_timeout(thread_t);
+extern kern_return_t thread_depress_abort(thread_t);
+extern void mach_print(const char *);
+extern void thread_depress_priority(thread_t thread, mach_msg_timeout_t depress_time);
+
+#endif /* _KERN_SYSCALL_SUBR_H_ */
diff --git a/kern/syscall_sw.c b/kern/syscall_sw.c
new file mode 100644
index 0000000..4249b71
--- /dev/null
+++ b/kern/syscall_sw.c
@@ -0,0 +1,224 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <kern/debug.h>
+#include <kern/syscall_sw.h>
+
+/* Include declarations of the trap functions. */
+#include <mach/mach_traps.h>
+#include <mach/message.h>
+#include <kern/syscall_subr.h>
+#include <kern/ipc_mig.h>
+#include <kern/eventcount.h>
+#include <ipc/mach_port.server.h>
+
+
+/*
+ * To add a new entry:
+ * Add an "MACH_TRAP(routine, arg count)" to the table below.
+ *
+ * Add trap definition to mach/syscall_sw.h and
+ * recompile user library.
+ *
+ * WARNING: If you add a trap which requires more than 7
+ * parameters, mach/ca/syscall_sw.h and ca/trap.c both need
+ * to be modified for it to work successfully on an
+ * RT. Similarly, mach/mips/syscall_sw.h and mips/locore.s
+ * need to be modified before it will work on Pmaxen.
+ *
+ * WARNING: Don't use numbers 0 through -9. They (along with
+ * the positive numbers) are reserved for Unix.
+ */
+
+boolean_t kern_invalid_debug = FALSE;
+
+static mach_port_name_t null_port(void)
+{
+ if (kern_invalid_debug) SoftDebugger("null_port mach trap");
+ return(MACH_PORT_NULL);
+}
+
+static kern_return_t kern_invalid(void)
+{
+ if (kern_invalid_debug) SoftDebugger("kern_invalid mach trap");
+ return(KERN_INVALID_ARGUMENT);
+}
+
+mach_trap_t mach_trap_table[] = {
+ MACH_TRAP(kern_invalid, 0), /* 0 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 1 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 2 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 3 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 4 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 5 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 6 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 7 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 8 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 9 */ /* Unix */
+ MACH_TRAP(null_port, 0), /* 10 */
+ MACH_TRAP(null_port, 0), /* 11 */
+ MACH_TRAP(null_port, 0), /* 12 */
+ MACH_TRAP(null_port, 0), /* 13 */
+ MACH_TRAP(kern_invalid, 0), /* 14 */
+ MACH_TRAP(kern_invalid, 0), /* 15 */
+ MACH_TRAP(kern_invalid, 0), /* 16 */
+ MACH_TRAP_STACK(evc_wait, 1), /* 17 */
+ MACH_TRAP_STACK(evc_wait_clear, 1), /* 18 */
+ MACH_TRAP(kern_invalid, 0), /* 19 */
+ MACH_TRAP(kern_invalid, 0), /* 20 */
+ MACH_TRAP(kern_invalid, 0), /* 21 */
+ MACH_TRAP(kern_invalid, 0), /* 22 */
+ MACH_TRAP(kern_invalid, 0), /* 23 */
+ MACH_TRAP(kern_invalid, 0), /* 24 */
+ MACH_TRAP_STACK(mach_msg_trap, 7), /* 25 */
+ MACH_TRAP(mach_reply_port, 0), /* 26 */
+ MACH_TRAP(mach_thread_self, 0), /* 27 */
+ MACH_TRAP(mach_task_self, 0), /* 28 */
+ MACH_TRAP(mach_host_self, 0), /* 29 */
+#ifdef MACH_KDB
+ MACH_TRAP_STACK(mach_print, 1), /* 30 */
+#else /* MACH_KDB */
+ MACH_TRAP_STACK(kern_invalid, 0), /* 30 */
+#endif /* MACH_KDB */
+
+ MACH_TRAP(kern_invalid, 0), /* 31 */
+ MACH_TRAP(kern_invalid, 0), /* 32 */
+ MACH_TRAP(kern_invalid, 0), /* 33 emul: task_by_pid */
+ MACH_TRAP(kern_invalid, 0), /* 34 emul: pid_by_task */
+ MACH_TRAP(kern_invalid, 0), /* 35 */
+ MACH_TRAP(kern_invalid, 0), /* 36 */
+ MACH_TRAP(kern_invalid, 0), /* 37 */
+ MACH_TRAP(kern_invalid, 0), /* 38 */
+
+ MACH_TRAP(syscall_device_writev_request, 6), /* 39 */
+ MACH_TRAP(syscall_device_write_request, 6), /* 40 */
+
+ MACH_TRAP(kern_invalid, 0), /* 41 emul: init_process */
+ MACH_TRAP(kern_invalid, 0), /* 42 */
+ MACH_TRAP(kern_invalid, 0), /* 43 emul: map_fd */
+ MACH_TRAP(kern_invalid, 0), /* 44 emul: rfs_make_symlink */
+ MACH_TRAP(kern_invalid, 0), /* 45 */
+ MACH_TRAP(kern_invalid, 0), /* 46 */
+ MACH_TRAP(kern_invalid, 0), /* 47 */
+ MACH_TRAP(kern_invalid, 0), /* 48 */
+ MACH_TRAP(kern_invalid, 0), /* 49 */
+
+ MACH_TRAP(kern_invalid, 0), /* 50 */
+ MACH_TRAP(kern_invalid, 0), /* 51 */
+ MACH_TRAP(kern_invalid, 0), /* 52 emul: htg_syscall */
+ MACH_TRAP(kern_invalid, 0), /* 53 emul: set_ras_address */
+ MACH_TRAP(kern_invalid, 0), /* 54 */
+ MACH_TRAP(null_port, 0), /* 55 */
+ MACH_TRAP(null_port, 0), /* 56 */
+ MACH_TRAP(kern_invalid, 0), /* 57 */
+ MACH_TRAP(kern_invalid, 0), /* 58 */
+ MACH_TRAP_STACK(swtch_pri, 1), /* 59 */
+
+ MACH_TRAP_STACK(swtch, 0), /* 60 */
+ MACH_TRAP_STACK(thread_switch, 3), /* 61 */
+ MACH_TRAP(kern_invalid, 0), /* 62 */
+ MACH_TRAP(kern_invalid, 0), /* 63 */
+ MACH_TRAP(syscall_vm_map, 11), /* 64 */
+ MACH_TRAP(syscall_vm_allocate, 4), /* 65 */
+ MACH_TRAP(syscall_vm_deallocate, 3), /* 66 */
+ MACH_TRAP(kern_invalid, 0), /* 67 */
+ MACH_TRAP(syscall_task_create, 3), /* 68 */
+ MACH_TRAP(syscall_task_terminate, 1), /* 69 */
+
+ MACH_TRAP(syscall_task_suspend, 1), /* 70 */
+ MACH_TRAP(syscall_task_set_special_port, 3), /* 71 */
+ MACH_TRAP(syscall_mach_port_allocate, 3), /* 72 */
+ MACH_TRAP(syscall_mach_port_deallocate, 2), /* 73 */
+ MACH_TRAP(syscall_mach_port_insert_right, 4), /* 74 */
+ MACH_TRAP(syscall_mach_port_allocate_name, 3), /* 75 */
+ MACH_TRAP(syscall_thread_depress_abort, 1), /* 76 */
+ MACH_TRAP(kern_invalid, 0), /* 77 */
+ MACH_TRAP(kern_invalid, 0), /* 78 */
+ MACH_TRAP(kern_invalid, 0), /* 79 */
+
+ MACH_TRAP(kern_invalid, 0), /* 80 */
+ MACH_TRAP(kern_invalid, 0), /* 81 */
+ MACH_TRAP(kern_invalid, 0), /* 82 */
+ MACH_TRAP(kern_invalid, 0), /* 83 */
+ MACH_TRAP(kern_invalid, 0), /* 84 */
+ MACH_TRAP(kern_invalid, 0), /* 85 */
+ MACH_TRAP(kern_invalid, 0), /* 86 */
+ MACH_TRAP(kern_invalid, 0), /* 87 */
+ MACH_TRAP(kern_invalid, 0), /* 88 */
+ MACH_TRAP(kern_invalid, 0), /* 89 */
+ MACH_TRAP(kern_invalid, 0), /* 90 */
+ MACH_TRAP(kern_invalid, 0), /* 91 */
+ MACH_TRAP(kern_invalid, 0), /* 92 */
+ MACH_TRAP(kern_invalid, 0), /* 93 */
+ MACH_TRAP(kern_invalid, 0), /* 94 */
+ MACH_TRAP(kern_invalid, 0), /* 95 */
+
+ MACH_TRAP(kern_invalid, 0), /* 96 */
+ MACH_TRAP(kern_invalid, 0), /* 97 */
+
+ MACH_TRAP(kern_invalid, 0), /* 98 */
+ MACH_TRAP(kern_invalid, 0), /* 99 */
+
+ MACH_TRAP(kern_invalid, 0), /* 100 */
+ MACH_TRAP(kern_invalid, 0), /* 101 */
+ MACH_TRAP(kern_invalid, 0), /* 102 */
+ MACH_TRAP(kern_invalid, 0), /* 103 */
+ MACH_TRAP(kern_invalid, 0), /* 104 */
+ MACH_TRAP(kern_invalid, 0), /* 105 */
+ MACH_TRAP(kern_invalid, 0), /* 106 */
+ MACH_TRAP(kern_invalid, 0), /* 107 */
+ MACH_TRAP(kern_invalid, 0), /* 108 */
+ MACH_TRAP(kern_invalid, 0), /* 109 */
+
+ MACH_TRAP(kern_invalid, 0), /* 110 */
+ MACH_TRAP(kern_invalid, 0), /* 111 */
+ MACH_TRAP(kern_invalid, 0), /* 112 */
+ MACH_TRAP(kern_invalid, 0), /* 113 */
+ MACH_TRAP(kern_invalid, 0), /* 114 */
+ MACH_TRAP(kern_invalid, 0), /* 115 */
+ MACH_TRAP(kern_invalid, 0), /* 116 */
+ MACH_TRAP(kern_invalid, 0), /* 117 */
+ MACH_TRAP(kern_invalid, 0), /* 118 */
+ MACH_TRAP(kern_invalid, 0), /* 119 */
+
+ MACH_TRAP(kern_invalid, 0), /* 120 */
+ MACH_TRAP(kern_invalid, 0), /* 121 */
+ MACH_TRAP(kern_invalid, 0), /* 122 */
+ MACH_TRAP(kern_invalid, 0), /* 123 */
+ MACH_TRAP(kern_invalid, 0), /* 124 */
+ MACH_TRAP(kern_invalid, 0), /* 125 */
+ MACH_TRAP(kern_invalid, 0), /* 126 */
+ MACH_TRAP(kern_invalid, 0), /* 127 */
+ MACH_TRAP(kern_invalid, 0), /* 128 */
+ MACH_TRAP(kern_invalid, 0), /* 129 */
+};
+
+int mach_trap_count = (sizeof(mach_trap_table) / sizeof(mach_trap_table[0]));
diff --git a/kern/syscall_sw.h b/kern/syscall_sw.h
new file mode 100644
index 0000000..9e76fc6
--- /dev/null
+++ b/kern/syscall_sw.h
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_SYSCALL_SW_H_
+#define _KERN_SYSCALL_SW_H_
+
+#include <mach/boolean.h>
+
+/*
+ * mach_trap_stack indicates the trap may discard
+ * its kernel stack. Some architectures may need
+ * to save more state in the pcb for these traps.
+ *
+ * Note: this is indexed manually by locore.S!
+ */
+
+typedef void (*generic_trap_function)(void);
+
+typedef struct {
+ int mach_trap_arg_count;
+ generic_trap_function mach_trap_function;
+ boolean_t mach_trap_stack;
+ const char *mach_trap_name;
+} mach_trap_t;
+
+extern mach_trap_t mach_trap_table[];
+extern int mach_trap_count;
+
+#define MACH_TRAP(name, arg_count) \
+ { (arg_count), (generic_trap_function) (name), FALSE, #name }
+#define MACH_TRAP_STACK(name, arg_count) \
+ { (arg_count), (generic_trap_function) (name), TRUE, #name }
+
+#endif /* _KERN_SYSCALL_SW_H_ */
diff --git a/kern/task.c b/kern/task.c
new file mode 100644
index 0000000..60ab4d7
--- /dev/null
+++ b/kern/task.c
@@ -0,0 +1,1351 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/task.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
+ * David Black
+ *
+ * Task management primitives implementation.
+ */
+
+#include <string.h>
+
+#include <mach/machine/vm_types.h>
+#include <mach/vm_param.h>
+#include <mach/task_info.h>
+#include <mach/task_special_ports.h>
+#include <mach_debug/mach_debug_types.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_types.h>
+#include <kern/debug.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/slab.h>
+#include <kern/gnumach.server.h>
+#include <kern/kalloc.h>
+#include <kern/mach.server.h>
+#include <kern/mach_host.server.h>
+#include <kern/processor.h>
+#include <kern/printf.h>
+#include <kern/sched_prim.h> /* for thread_wakeup */
+#include <kern/ipc_tt.h>
+#include <kern/syscall_emulation.h>
+#include <kern/task_notify.user.h>
+#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
+#include <machine/machspl.h> /* for splsched */
+
+task_t kernel_task = TASK_NULL;
+struct kmem_cache task_cache;
+
+/* Where to send notifications about newly created tasks. */
+ipc_port_t new_task_notification = NULL;
+
+void task_init(void)
+{
+ kmem_cache_init(&task_cache, "task", sizeof(struct task), 0,
+ NULL, 0);
+
+ eml_init();
+ machine_task_module_init ();
+
+ /*
+ * Create the kernel task as the first task.
+ * Task_create must assign to kernel_task as a side effect,
+ * for other initialization. (:-()
+ */
+ (void) task_create_kernel(TASK_NULL, FALSE, &kernel_task);
+ (void) task_set_name(kernel_task, "gnumach");
+ vm_map_set_name(kernel_map, kernel_task->name);
+}
+
+kern_return_t task_create(
+ task_t parent_task,
+ boolean_t inherit_memory,
+ task_t *child_task) /* OUT */
+{
+ if (parent_task == TASK_NULL)
+ return KERN_INVALID_TASK;
+
+ return task_create_kernel (parent_task, inherit_memory,
+ child_task);
+}
+
+kern_return_t
+task_create_kernel(
+ task_t parent_task,
+ boolean_t inherit_memory,
+ task_t *child_task) /* OUT */
+{
+ task_t new_task;
+ processor_set_t pset;
+#if FAST_TAS
+ int i;
+#endif
+
+ new_task = (task_t) kmem_cache_alloc(&task_cache);
+ if (new_task == TASK_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /* one ref for just being alive; one for our caller */
+ new_task->ref_count = 2;
+
+ if (child_task == &kernel_task) {
+ new_task->map = kernel_map;
+ } else if (inherit_memory) {
+ new_task->map = vm_map_fork(parent_task->map);
+ } else {
+ pmap_t new_pmap = pmap_create((vm_size_t) 0);
+ if (new_pmap == PMAP_NULL)
+ new_task->map = VM_MAP_NULL;
+ else {
+ new_task->map = vm_map_create(new_pmap,
+ round_page(VM_MIN_USER_ADDRESS),
+ trunc_page(VM_MAX_USER_ADDRESS));
+ if (new_task->map == VM_MAP_NULL)
+ pmap_destroy(new_pmap);
+ }
+ }
+ if (new_task->map == VM_MAP_NULL) {
+ kmem_cache_free(&task_cache, (vm_address_t) new_task);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ if (child_task != &kernel_task)
+ vm_map_set_name(new_task->map, new_task->name);
+
+ simple_lock_init(&new_task->lock);
+ queue_init(&new_task->thread_list);
+ new_task->suspend_count = 0;
+ new_task->active = TRUE;
+ new_task->user_stop_count = 0;
+ new_task->thread_count = 0;
+ new_task->faults = 0;
+ new_task->zero_fills = 0;
+ new_task->reactivations = 0;
+ new_task->pageins = 0;
+ new_task->cow_faults = 0;
+ new_task->messages_sent = 0;
+ new_task->messages_received = 0;
+
+ eml_task_reference(new_task, parent_task);
+
+ ipc_task_init(new_task, parent_task);
+ machine_task_init (new_task);
+
+ time_value64_init(&new_task->total_user_time);
+ time_value64_init(&new_task->total_system_time);
+
+ record_time_stamp (&new_task->creation_time);
+
+ if (parent_task != TASK_NULL) {
+ task_lock(parent_task);
+ pset = parent_task->processor_set;
+ if (!pset->active)
+ pset = &default_pset;
+ pset_reference(pset);
+ new_task->priority = parent_task->priority;
+ task_unlock(parent_task);
+ }
+ else {
+ pset = &default_pset;
+ pset_reference(pset);
+ new_task->priority = BASEPRI_USER;
+ }
+ pset_lock(pset);
+ pset_add_task(pset, new_task);
+ pset_unlock(pset);
+
+ new_task->may_assign = TRUE;
+ new_task->assign_active = FALSE;
+ new_task->essential = FALSE;
+
+#if MACH_PCSAMPLE
+ new_task->pc_sample.buffer = 0;
+ new_task->pc_sample.seqno = 0;
+ new_task->pc_sample.sampletypes = 0;
+#endif /* MACH_PCSAMPLE */
+
+#if FAST_TAS
+ for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
+ if (inherit_memory) {
+ new_task->fast_tas_base[i] = parent_task->fast_tas_base[i];
+ new_task->fast_tas_end[i] = parent_task->fast_tas_end[i];
+ } else {
+ new_task->fast_tas_base[i] = (vm_offset_t)0;
+ new_task->fast_tas_end[i] = (vm_offset_t)0;
+ }
+ }
+#endif /* FAST_TAS */
+
+ if (parent_task == TASK_NULL)
+ snprintf (new_task->name, sizeof new_task->name, "%p",
+ new_task);
+ else
+ snprintf (new_task->name, sizeof new_task->name, "(%.*s)",
+ (int) (sizeof new_task->name - 3), parent_task->name);
+
+ if (new_task_notification != NULL) {
+ task_reference (new_task);
+ task_reference (parent_task);
+ mach_notify_new_task (new_task_notification,
+ convert_task_to_port (new_task),
+ parent_task
+ ? convert_task_to_port (parent_task)
+ : IP_NULL);
+ }
+
+ ipc_task_enable(new_task);
+
+ *child_task = new_task;
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_deallocate:
+ *
+ * Give up a reference to the specified task and destroy it if there
+ * are no other references left. It is assumed that the current thread
+ * is never in this task.
+ */
+void task_deallocate(
+ task_t task)
+{
+ int c;
+ processor_set_t pset;
+
+ if (task == TASK_NULL)
+ return;
+
+ task_lock(task);
+ c = --(task->ref_count);
+ task_unlock(task);
+ if (c != 0)
+ return;
+
+ machine_task_terminate (task);
+
+ eml_task_deallocate(task);
+
+ pset = task->processor_set;
+ pset_lock(pset);
+ pset_remove_task(pset,task);
+ pset_unlock(pset);
+ pset_deallocate(pset);
+ vm_map_deallocate(task->map);
+ is_release(task->itk_space);
+ kmem_cache_free(&task_cache, (vm_offset_t) task);
+}
+
+void task_reference(
+ task_t task)
+{
+ if (task == TASK_NULL)
+ return;
+
+ task_lock(task);
+ task->ref_count++;
+ task_unlock(task);
+}
+
+/*
+ * task_terminate:
+ *
+ * Terminate the specified task. See comments on thread_terminate
+ * (kern/thread.c) about problems with terminating the "current task."
+ */
+kern_return_t task_terminate(
+ task_t task)
+{
+ thread_t thread, cur_thread;
+ queue_head_t *list;
+ task_t cur_task;
+ spl_t s;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ list = &task->thread_list;
+ cur_task = current_task();
+ cur_thread = current_thread();
+
+ /*
+ * Deactivate task so that it can't be terminated again,
+ * and so lengthy operations in progress will abort.
+ *
+ * If the current thread is in this task, remove it from
+ * the task's thread list to keep the thread-termination
+ * loop simple.
+ */
+ if (task == cur_task) {
+ task_lock(task);
+ if (!task->active) {
+ /*
+ * Task is already being terminated.
+ */
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+ /*
+ * Make sure current thread is not being terminated.
+ */
+ s = splsched();
+ thread_lock(cur_thread);
+ if (!cur_thread->active) {
+ thread_unlock(cur_thread);
+ (void) splx(s);
+ task_unlock(task);
+ thread_terminate(cur_thread);
+ return KERN_FAILURE;
+ }
+ task_hold_locked(task);
+ task->active = FALSE;
+ queue_remove(list, cur_thread, thread_t, thread_list);
+ thread_unlock(cur_thread);
+ (void) splx(s);
+ task_unlock(task);
+
+ /*
+ * Shut down this thread's ipc now because it must
+ * be left alone to terminate the task.
+ */
+ ipc_thread_disable(cur_thread);
+ ipc_thread_terminate(cur_thread);
+ }
+ else {
+ /*
+ * Lock both current and victim task to check for
+ * potential deadlock.
+ */
+ if ((vm_offset_t)task < (vm_offset_t)cur_task) {
+ task_lock(task);
+ task_lock(cur_task);
+ }
+ else {
+ task_lock(cur_task);
+ task_lock(task);
+ }
+ /*
+ * Check if current thread or task is being terminated.
+ */
+ s = splsched();
+ thread_lock(cur_thread);
+ if ((!cur_task->active) ||(!cur_thread->active)) {
+ /*
+ * Current task or thread is being terminated.
+ */
+ thread_unlock(cur_thread);
+ (void) splx(s);
+ task_unlock(task);
+ task_unlock(cur_task);
+ thread_terminate(cur_thread);
+ return KERN_FAILURE;
+ }
+ thread_unlock(cur_thread);
+ (void) splx(s);
+ task_unlock(cur_task);
+
+ if (!task->active) {
+ /*
+ * Task is already being terminated.
+ */
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+ task_hold_locked(task);
+ task->active = FALSE;
+ task_unlock(task);
+ }
+
+ /*
+ * Prevent further execution of the task. ipc_task_disable
+ * prevents further task operations via the task port.
+ * If this is the current task, the current thread will
+ * be left running.
+ */
+ (void) task_dowait(task,TRUE); /* may block */
+ ipc_task_disable(task);
+
+ /*
+ * Terminate each thread in the task.
+ *
+ * The task_port is closed down, so no more thread_create
+ * operations can be done. Thread_force_terminate closes the
+ * thread port for each thread; when that is done, the
+ * thread will eventually disappear. Thus the loop will
+ * terminate. Call thread_force_terminate instead of
+ * thread_terminate to avoid deadlock checks. Need
+ * to call thread_block() inside loop because some other
+ * thread (e.g., the reaper) may have to run to get rid
+ * of all references to the thread; it won't vanish from
+ * the task's thread list until the last one is gone.
+ */
+ task_lock(task);
+ while (!queue_empty(list)) {
+ thread = (thread_t) queue_first(list);
+ thread_reference(thread);
+ task_unlock(task);
+ thread_force_terminate(thread);
+ thread_deallocate(thread);
+ thread_block(thread_no_continuation);
+ task_lock(task);
+ }
+ task_unlock(task);
+
+ /*
+ * Shut down IPC.
+ */
+ ipc_task_terminate(task);
+
+
+ /*
+ * Deallocate the task's reference to itself.
+ */
+ task_deallocate(task);
+
+ /*
+ * If the current thread is in this task, it has not yet
+ * been terminated (since it was removed from the task's
+ * thread-list). Put it back in the thread list (for
+ * completeness), and terminate it. Since it holds the
+ * last reference to the task, terminating it will deallocate
+ * the task.
+ */
+ if (cur_thread->task == task) {
+ task_lock(task);
+ s = splsched();
+ queue_enter(list, cur_thread, thread_t, thread_list);
+ (void) splx(s);
+ task_unlock(task);
+ (void) thread_terminate(cur_thread);
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_hold:
+ *
+ * Suspend execution of the specified task.
+ * This is a recursive-style suspension of the task, a count of
+ * suspends is maintained.
+ *
+ * CONDITIONS: the task is locked and active.
+ */
+void task_hold_locked(
+ task_t task)
+{
+ queue_head_t *list;
+ thread_t thread, cur_thread;
+
+ assert(task->active);
+
+ cur_thread = current_thread();
+
+ task->suspend_count++;
+
+ /*
+ * Iterate through all the threads and hold them.
+ * Do not hold the current thread if it is within the
+ * task.
+ */
+ list = &task->thread_list;
+ queue_iterate(list, thread, thread_t, thread_list) {
+ if (thread != cur_thread)
+ thread_hold(thread);
+ }
+}
+
+/*
+ * task_hold:
+ *
+ * Suspend execution of the specified task.
+ * This is a recursive-style suspension of the task, a count of
+ * suspends is maintained.
+ */
+kern_return_t task_hold(
+ task_t task)
+{
+ task_lock(task);
+ if (!task->active) {
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ task_hold_locked(task);
+
+ task_unlock(task);
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_dowait:
+ *
+ * Wait until the task has really been suspended (all of the threads
+ * are stopped). Skip the current thread if it is within the task.
+ *
+ * If task is deactivated while waiting, return a failure code unless
+ * must_wait is true.
+ */
+kern_return_t task_dowait(
+ task_t task,
+ boolean_t must_wait)
+{
+ queue_head_t *list;
+ thread_t thread, cur_thread, prev_thread;
+ kern_return_t ret = KERN_SUCCESS;
+
+ /*
+ * Iterate through all the threads.
+ * While waiting for each thread, we gain a reference to it
+ * to prevent it from going away on us. This guarantees
+ * that the "next" thread in the list will be a valid thread.
+ *
+ * We depend on the fact that if threads are created while
+ * we are looping through the threads, they will be held
+ * automatically. We don't care about threads that get
+ * deallocated along the way (the reference prevents it
+ * from happening to the thread we are working with).
+ *
+ * If the current thread is in the affected task, it is skipped.
+ *
+ * If the task is deactivated before we're done, and we don't
+ * have to wait for it (must_wait is FALSE), just bail out.
+ */
+ cur_thread = current_thread();
+
+ list = &task->thread_list;
+ prev_thread = THREAD_NULL;
+ task_lock(task);
+ queue_iterate(list, thread, thread_t, thread_list) {
+ if (!(task->active) && !(must_wait)) {
+ ret = KERN_FAILURE;
+ break;
+ }
+ if (thread != cur_thread) {
+ thread_reference(thread);
+ task_unlock(task);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+ /* may block */
+ (void) thread_dowait(thread, TRUE); /* may block */
+ prev_thread = thread;
+ task_lock(task);
+ }
+ }
+ task_unlock(task);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread); /* may block */
+ return ret;
+}
+
+kern_return_t task_release(
+ task_t task)
+{
+ queue_head_t *list;
+ thread_t thread, next;
+
+ task_lock(task);
+ if (!task->active) {
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ task->suspend_count--;
+
+ /*
+ * Iterate through all the threads and release them
+ */
+ list = &task->thread_list;
+ thread = (thread_t) queue_first(list);
+ while (!queue_end(list, (queue_entry_t) thread)) {
+ next = (thread_t) queue_next(&thread->thread_list);
+ thread_release(thread);
+ thread = next;
+ }
+ task_unlock(task);
+ return KERN_SUCCESS;
+}
+
+kern_return_t task_threads(
+ task_t task,
+ thread_array_t *thread_list,
+ natural_t *count)
+{
+ unsigned int actual; /* this many threads */
+ thread_t thread;
+ thread_t *threads;
+ unsigned i;
+
+ vm_size_t size, size_needed;
+ vm_offset_t addr;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ size = 0; addr = 0;
+
+ for (;;) {
+ task_lock(task);
+ if (!task->active) {
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ actual = task->thread_count;
+
+ /* do we have the memory we need? */
+
+ size_needed = actual * sizeof(mach_port_t);
+ if (size_needed <= size)
+ break;
+
+ /* unlock the task and allocate more memory */
+ task_unlock(task);
+
+ if (size != 0)
+ kfree(addr, size);
+
+ assert(size_needed > 0);
+ size = size_needed;
+
+ addr = kalloc(size);
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* OK, have memory and the task is locked & active */
+
+ threads = (thread_t *) addr;
+
+ for (i = 0, thread = (thread_t) queue_first(&task->thread_list);
+ i < actual;
+ i++, thread = (thread_t) queue_next(&thread->thread_list)) {
+ /* take ref for convert_thread_to_port */
+ thread_reference(thread);
+ threads[i] = thread;
+ }
+ assert(queue_end(&task->thread_list, (queue_entry_t) thread));
+
+ /* can unlock task now that we've got the thread refs */
+ task_unlock(task);
+
+ if (actual == 0) {
+ /* no threads, so return null pointer and deallocate memory */
+
+ *thread_list = 0;
+ *count = 0;
+
+ if (size != 0)
+ kfree(addr, size);
+ } else {
+ /* if we allocated too much, must copy */
+
+ if (size_needed < size) {
+ vm_offset_t newaddr;
+
+ newaddr = kalloc(size_needed);
+ if (newaddr == 0) {
+ for (i = 0; i < actual; i++)
+ thread_deallocate(threads[i]);
+ kfree(addr, size);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ memcpy((void *) newaddr, (void *) addr, size_needed);
+ kfree(addr, size);
+ threads = (thread_t *) newaddr;
+ }
+
+ *thread_list = (mach_port_t *) threads;
+ *count = actual;
+
+ /* do the conversion that Mig should handle */
+
+ for (i = 0; i < actual; i++)
+ ((ipc_port_t *) threads)[i] =
+ convert_thread_to_port(threads[i]);
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t task_suspend(
+ task_t task)
+{
+ boolean_t hold;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ hold = FALSE;
+ task_lock(task);
+ if ((task->user_stop_count)++ == 0)
+ hold = TRUE;
+ task_unlock(task);
+
+ /*
+ * If the stop count was positive, the task is
+ * already stopped and we can exit.
+ */
+ if (!hold) {
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Hold all of the threads in the task, and wait for
+ * them to stop. If the current thread is within
+ * this task, hold it separately so that all of the
+ * other threads can stop first.
+ */
+
+ if (task_hold(task) != KERN_SUCCESS)
+ return KERN_FAILURE;
+
+ if (task_dowait(task, FALSE) != KERN_SUCCESS)
+ return KERN_FAILURE;
+
+ if (current_task() == task) {
+ spl_t s;
+
+ thread_hold(current_thread());
+ /*
+ * We want to call thread_block on our way out,
+ * to stop running.
+ */
+ s = splsched();
+ ast_on(cpu_number(), AST_BLOCK);
+ (void) splx(s);
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t task_resume(
+ task_t task)
+{
+ boolean_t release;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ release = FALSE;
+ task_lock(task);
+ if (task->user_stop_count > 0) {
+ if (--(task->user_stop_count) == 0)
+ release = TRUE;
+ }
+ else {
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+ task_unlock(task);
+
+ /*
+ * Release the task if necessary.
+ */
+ if (release)
+ return task_release(task);
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t task_info(
+ task_t task,
+ int flavor,
+ task_info_t task_info_out, /* pointer to OUT array */
+ natural_t *task_info_count) /* IN/OUT */
+{
+ vm_map_t map;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (flavor) {
+ case TASK_BASIC_INFO:
+ {
+ task_basic_info_t basic_info;
+
+ /* Allow *task_info_count to be smaller than the provided amount
+ * that does not contain the new time_value64_t fields as some
+ * callers might not know about them yet. */
+
+ if (*task_info_count <
+ TASK_BASIC_INFO_COUNT - 3 * sizeof(time_value64_t)/sizeof(integer_t))
+ return KERN_INVALID_ARGUMENT;
+
+ basic_info = (task_basic_info_t) task_info_out;
+
+ map = (task == kernel_task) ? kernel_map : task->map;
+
+ basic_info->virtual_size = map->size;
+ basic_info->resident_size = pmap_resident_count(map->pmap)
+ * PAGE_SIZE;
+
+ task_lock(task);
+ basic_info->base_priority = task->priority;
+ basic_info->suspend_count = task->user_stop_count;
+ TIME_VALUE64_TO_TIME_VALUE(&task->total_user_time,
+ &basic_info->user_time);
+ TIME_VALUE64_TO_TIME_VALUE(&task->total_system_time,
+ &basic_info->system_time);
+ time_value64_t creation_time64;
+ read_time_stamp(&task->creation_time, &creation_time64);
+ TIME_VALUE64_TO_TIME_VALUE(&creation_time64, &basic_info->creation_time);
+ if (*task_info_count == TASK_BASIC_INFO_COUNT) {
+ /* Copy new time_value64_t fields */
+ basic_info->user_time64 = task->total_user_time;
+ basic_info->system_time64 = task->total_system_time;
+ basic_info->creation_time64 = creation_time64;
+ }
+ task_unlock(task);
+
+ if (*task_info_count > TASK_BASIC_INFO_COUNT)
+ *task_info_count = TASK_BASIC_INFO_COUNT;
+ break;
+ }
+
+ case TASK_EVENTS_INFO:
+ {
+ task_events_info_t event_info;
+
+ if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ event_info = (task_events_info_t) task_info_out;
+
+ task_lock(task);
+ event_info->faults = task->faults;
+ event_info->zero_fills = task->zero_fills;
+ event_info->reactivations = task->reactivations;
+ event_info->pageins = task->pageins;
+ event_info->cow_faults = task->cow_faults;
+ event_info->messages_sent = task->messages_sent;
+ event_info->messages_received = task->messages_received;
+ task_unlock(task);
+
+ *task_info_count = TASK_EVENTS_INFO_COUNT;
+ break;
+ }
+
+ case TASK_THREAD_TIMES_INFO:
+ {
+ task_thread_times_info_t times_info;
+ thread_t thread;
+
+ /* Callers might not known about time_value64_t fields yet. */
+ if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT - (2 * sizeof(time_value64_t)) / sizeof(integer_t)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ times_info = (task_thread_times_info_t) task_info_out;
+
+ time_value64_t acc_user_time, acc_system_time;
+ time_value64_init(&acc_user_time);
+ time_value64_init(&acc_system_time);
+
+ task_lock(task);
+ queue_iterate(&task->thread_list, thread,
+ thread_t, thread_list)
+ {
+ time_value64_t user_time, system_time;
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
+ thread_read_times(thread, &user_time, &system_time);
+
+ thread_unlock(thread);
+ splx(s);
+
+ time_value64_add(&acc_user_time, &user_time);
+ time_value64_add(&acc_system_time, &system_time);
+ }
+ task_unlock(task);
+ TIME_VALUE64_TO_TIME_VALUE(&acc_user_time, &times_info->user_time);
+ TIME_VALUE64_TO_TIME_VALUE(&acc_system_time, &times_info->system_time);
+ if (*task_info_count >= TASK_THREAD_TIMES_INFO_COUNT) {
+ /* Copy new time_value64_t fields */
+ times_info->user_time64 = acc_user_time;
+ times_info->system_time64 = acc_system_time;
+ }
+
+ if (*task_info_count > TASK_THREAD_TIMES_INFO_COUNT)
+ *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
+ break;
+ }
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return KERN_SUCCESS;
+}
+
+#if MACH_HOST
+/*
+ * task_assign:
+ *
+ * Change the assigned processor set for the task
+ */
+kern_return_t
+task_assign(
+ task_t task,
+ processor_set_t new_pset,
+ boolean_t assign_threads)
+{
+ kern_return_t ret = KERN_SUCCESS;
+ thread_t thread, prev_thread;
+ queue_head_t *list;
+ processor_set_t pset;
+
+ if (task == TASK_NULL || new_pset == PROCESSOR_SET_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Freeze task`s assignment. Prelude to assigning
+ * task. Only one freeze may be held per task.
+ */
+
+ task_lock(task);
+ while (task->may_assign == FALSE) {
+ task->assign_active = TRUE;
+ assert_wait((event_t)&task->assign_active, TRUE);
+ task_unlock(task);
+ thread_block(thread_no_continuation);
+ task_lock(task);
+ }
+
+ /*
+ * Avoid work if task already in this processor set.
+ */
+ if (task->processor_set == new_pset) {
+ /*
+ * No need for task->assign_active wakeup:
+ * task->may_assign is still TRUE.
+ */
+ task_unlock(task);
+ return KERN_SUCCESS;
+ }
+
+ task->may_assign = FALSE;
+ task_unlock(task);
+
+ /*
+ * Safe to get the task`s pset: it cannot change while
+ * task is frozen.
+ */
+ pset = task->processor_set;
+
+ /*
+ * Lock both psets now. Use ordering to avoid deadlock.
+ */
+ Restart:
+ if ((vm_offset_t) pset < (vm_offset_t) new_pset) {
+ pset_lock(pset);
+ pset_lock(new_pset);
+ }
+ else {
+ pset_lock(new_pset);
+ pset_lock(pset);
+ }
+
+ /*
+ * Check if new_pset is ok to assign to. If not,
+ * reassign to default_pset.
+ */
+ if (!new_pset->active) {
+ pset_unlock(pset);
+ pset_unlock(new_pset);
+ new_pset = &default_pset;
+ goto Restart;
+ }
+
+ pset_reference(new_pset);
+
+ /*
+ * Now grab the task lock and move the task.
+ */
+
+ task_lock(task);
+ pset_remove_task(pset, task);
+ pset_add_task(new_pset, task);
+
+ pset_unlock(pset);
+ pset_unlock(new_pset);
+
+ if (assign_threads == FALSE) {
+ /*
+ * We leave existing threads at their
+ * old assignments. Unfreeze task`s
+ * assignment.
+ */
+ task->may_assign = TRUE;
+ if (task->assign_active) {
+ task->assign_active = FALSE;
+ thread_wakeup((event_t) &task->assign_active);
+ }
+ task_unlock(task);
+ pset_deallocate(pset);
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * If current thread is in task, freeze its assignment.
+ */
+ if (current_thread()->task == task) {
+ task_unlock(task);
+ thread_freeze(current_thread());
+ task_lock(task);
+ }
+
+ /*
+ * Iterate down the thread list reassigning all the threads.
+ * New threads pick up task's new processor set automatically.
+ * Do current thread last because new pset may be empty.
+ */
+ list = &task->thread_list;
+ prev_thread = THREAD_NULL;
+ queue_iterate(list, thread, thread_t, thread_list) {
+ if (!(task->active)) {
+ ret = KERN_FAILURE;
+ break;
+ }
+ if (thread != current_thread()) {
+ thread_reference(thread);
+ task_unlock(task);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread); /* may block */
+ thread_assign(thread,new_pset); /* may block */
+ prev_thread = thread;
+ task_lock(task);
+ }
+ }
+
+ /*
+ * Done, wakeup anyone waiting for us.
+ */
+ task->may_assign = TRUE;
+ if (task->assign_active) {
+ task->assign_active = FALSE;
+ thread_wakeup((event_t)&task->assign_active);
+ }
+ task_unlock(task);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread); /* may block */
+
+ /*
+ * Finish assignment of current thread.
+ */
+ if (current_thread()->task == task)
+ thread_doassign(current_thread(), new_pset, TRUE);
+
+ pset_deallocate(pset);
+
+ return ret;
+}
+#else /* MACH_HOST */
+/*
+ * task_assign:
+ *
+ * Change the assigned processor set for the task
+ */
+kern_return_t
+task_assign(
+ task_t task,
+ processor_set_t new_pset,
+ boolean_t assign_threads)
+{
+ return KERN_FAILURE;
+}
+#endif /* MACH_HOST */
+
+
+/*
+ * task_assign_default:
+ *
+ * Version of task_assign to assign to default processor set.
+ */
+kern_return_t
+task_assign_default(
+ task_t task,
+ boolean_t assign_threads)
+{
+ return task_assign(task, &default_pset, assign_threads);
+}
+
+/*
+ * task_get_assignment
+ *
+ * Return name of processor set that task is assigned to.
+ */
+kern_return_t task_get_assignment(
+ task_t task,
+ processor_set_t *pset)
+{
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (!task->active)
+ return KERN_FAILURE;
+
+ *pset = task->processor_set;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_priority
+ *
+ * Set priority of task; used only for newly created threads.
+ * Optionally change priorities of threads.
+ */
+kern_return_t
+task_priority(
+ task_t task,
+ int priority,
+ boolean_t change_threads)
+{
+ kern_return_t ret = KERN_SUCCESS;
+
+ if (task == TASK_NULL || invalid_pri(priority))
+ return KERN_INVALID_ARGUMENT;
+
+ task_lock(task);
+ task->priority = priority;
+
+ if (change_threads) {
+ thread_t thread;
+ queue_head_t *list;
+
+ list = &task->thread_list;
+ queue_iterate(list, thread, thread_t, thread_list) {
+ if (thread_priority(thread, priority, FALSE)
+ != KERN_SUCCESS)
+ ret = KERN_FAILURE;
+ }
+ }
+
+ task_unlock(task);
+ return ret;
+}
+
+/*
+ * task_set_name
+ *
+ * Set the name of task TASK to NAME. This is a debugging aid.
+ * NAME will be used in error messages printed by the kernel.
+ */
+kern_return_t
+task_set_name(
+ task_t task,
+ const_kernel_debug_name_t name)
+{
+ strncpy(task->name, name, sizeof task->name - 1);
+ task->name[sizeof task->name - 1] = '\0';
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_set_essential
+ *
+ * Set whether TASK is an essential task, i.e. the whole system will crash
+ * if this task crashes.
+ */
+kern_return_t
+task_set_essential(
+ task_t task,
+ boolean_t essential)
+{
+ task->essential = !!essential;
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_collect_scan:
+ *
+ * Attempt to free resources owned by tasks.
+ */
+
+static void task_collect_scan(void)
+{
+ task_t task, prev_task;
+ processor_set_t pset, prev_pset;
+
+ prev_task = TASK_NULL;
+ prev_pset = PROCESSOR_SET_NULL;
+
+ simple_lock(&all_psets_lock);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ pset_lock(pset);
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ task_reference(task);
+ pset_reference(pset);
+ pset_unlock(pset);
+ simple_unlock(&all_psets_lock);
+
+ machine_task_collect (task);
+ pmap_collect(task->map->pmap);
+
+ if (prev_task != TASK_NULL)
+ task_deallocate(prev_task);
+ prev_task = task;
+
+ if (prev_pset != PROCESSOR_SET_NULL)
+ pset_deallocate(prev_pset);
+ prev_pset = pset;
+
+ simple_lock(&all_psets_lock);
+ pset_lock(pset);
+ }
+ pset_unlock(pset);
+ }
+ simple_unlock(&all_psets_lock);
+
+ if (prev_task != TASK_NULL)
+ task_deallocate(prev_task);
+ if (prev_pset != PROCESSOR_SET_NULL)
+ pset_deallocate(prev_pset);
+}
+
+boolean_t task_collect_allowed = TRUE;
+unsigned task_collect_last_tick = 0;
+unsigned task_collect_max_rate = 0; /* in ticks */
+
+/*
+ * consider_task_collect:
+ *
+ * Called by the pageout daemon when the system needs more free pages.
+ */
+
+void consider_task_collect(void)
+{
+ /*
+ * By default, don't attempt task collection more frequently
+ * than once a second.
+ */
+
+ if (task_collect_max_rate == 0)
+ task_collect_max_rate = hz;
+
+ if (task_collect_allowed &&
+ (sched_tick > (task_collect_last_tick +
+ task_collect_max_rate / (hz / 1)))) {
+ task_collect_last_tick = sched_tick;
+ task_collect_scan();
+ }
+}
+
+kern_return_t
+task_ras_control(
+ task_t task,
+ vm_offset_t pc,
+ vm_offset_t endpc,
+ int flavor)
+{
+ kern_return_t ret = KERN_FAILURE;
+
+#if FAST_TAS
+ int i;
+
+ ret = KERN_SUCCESS;
+ task_lock(task);
+ switch (flavor) {
+ case TASK_RAS_CONTROL_PURGE_ALL: /* remove all RAS */
+ for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
+ task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
+ }
+ break;
+ case TASK_RAS_CONTROL_PURGE_ONE: /* remove this RAS, collapse remaining */
+ for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
+ if ( (task->fast_tas_base[i] == pc)
+ && (task->fast_tas_end[i] == endpc)) {
+ while (i < TASK_FAST_TAS_NRAS-1) {
+ task->fast_tas_base[i] = task->fast_tas_base[i+1];
+ task->fast_tas_end[i] = task->fast_tas_end[i+1];
+ i++;
+ }
+ task->fast_tas_base[TASK_FAST_TAS_NRAS-1] = 0;
+ task->fast_tas_end[TASK_FAST_TAS_NRAS-1] = 0;
+ break;
+ }
+ }
+ if (i == TASK_FAST_TAS_NRAS) {
+ ret = KERN_INVALID_ADDRESS;
+ }
+ break;
+ case TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE:
+ /* remove all RAS an install this RAS */
+ for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
+ task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
+ }
+ /* FALL THROUGH */
+ case TASK_RAS_CONTROL_INSTALL_ONE: /* install this RAS */
+ for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
+ if ( (task->fast_tas_base[i] == pc)
+ && (task->fast_tas_end[i] == endpc)) {
+ /* already installed */
+ break;
+ }
+ if ((task->fast_tas_base[i] == 0) && (task->fast_tas_end[i] == 0)){
+ task->fast_tas_base[i] = pc;
+ task->fast_tas_end[i] = endpc;
+ break;
+ }
+ }
+ if (i == TASK_FAST_TAS_NRAS) {
+ ret = KERN_RESOURCE_SHORTAGE;
+ }
+ break;
+ default: ret = KERN_INVALID_VALUE;
+ break;
+ }
+ task_unlock(task);
+#endif /* FAST_TAS */
+ return ret;
+}
+
+/*
+ * register_new_task_notification
+ *
+ * Register a port to which a notification about newly created
+ * tasks are sent.
+ */
+kern_return_t
+register_new_task_notification(
+ const host_t host,
+ ipc_port_t notification)
+{
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ if (new_task_notification != NULL)
+ return KERN_NO_ACCESS;
+
+ new_task_notification = notification;
+ return KERN_SUCCESS;
+}
diff --git a/kern/task.h b/kern/task.h
new file mode 100644
index 0000000..9521e95
--- /dev/null
+++ b/kern/task.h
@@ -0,0 +1,197 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: task.h
+ * Author: Avadis Tevanian, Jr.
+ *
+ * This file contains the structure definitions for tasks.
+ *
+ */
+
+#ifndef _KERN_TASK_H_
+#define _KERN_TASK_H_
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <mach/time_value.h>
+#include <mach/mach_param.h>
+#include <mach/task_info.h>
+#include <mach_debug/mach_debug_types.h>
+#include <kern/kern_types.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/pc_sample.h>
+#include <kern/processor.h>
+#include <kern/syscall_emulation.h>
+#include <vm/vm_types.h>
+#include <machine/task.h>
+
+/*
+ * Task name buffer size. The size is chosen so that struct task fits
+ * into three cache lines. The size of a cache line on a typical CPU
+ * is 64 bytes.
+ */
+#define TASK_NAME_SIZE 32
+
+struct task {
+ /* Synchronization/destruction information */
+ decl_simple_lock_data(,lock) /* Task's lock */
+ int ref_count; /* Number of references to me */
+
+ /* Flags */
+ unsigned char assign_active; /* waiting for may_assign */
+ unsigned char active:1, /* Task has not been terminated */
+ /* boolean_t */ may_assign:1, /* can assigned pset be changed? */
+ essential:1; /* Is this task essential for the system? */
+
+ /* Miscellaneous */
+ vm_map_t map; /* Address space description */
+ queue_chain_t pset_tasks; /* list of tasks assigned to pset */
+ int suspend_count; /* Internal scheduling only */
+
+ /* Thread information */
+ queue_head_t thread_list; /* list of threads */
+ int thread_count; /* number of threads */
+ processor_set_t processor_set; /* processor set for new threads */
+
+ /* User-visible scheduling information */
+ int user_stop_count; /* outstanding stops */
+ int priority; /* for new threads */
+
+ /* Statistics */
+ time_value64_t total_user_time;
+ /* total user time for dead threads */
+ time_value64_t total_system_time;
+ /* total system time for dead threads */
+
+ time_value64_t creation_time; /* time stamp at creation */
+
+ /* IPC structures */
+ decl_simple_lock_data(, itk_lock_data)
+ struct ipc_port *itk_self; /* not a right, doesn't hold ref */
+ struct ipc_port *itk_sself; /* a send right */
+ struct ipc_port *itk_exception; /* a send right */
+ struct ipc_port *itk_bootstrap; /* a send right */
+ struct ipc_port *itk_registered[TASK_PORT_REGISTER_MAX];
+ /* all send rights */
+
+ struct ipc_space *itk_space;
+
+ /* User space system call emulation support */
+ struct eml_dispatch *eml_dispatch;
+
+ sample_control_t pc_sample;
+
+#if FAST_TAS
+#define TASK_FAST_TAS_NRAS 8
+ vm_offset_t fast_tas_base[TASK_FAST_TAS_NRAS];
+ vm_offset_t fast_tas_end[TASK_FAST_TAS_NRAS];
+#endif /* FAST_TAS */
+
+ /* Hardware specific data. */
+ machine_task_t machine;
+
+ /* Statistics */
+ long_natural_t faults; /* page faults counter */
+ long_natural_t zero_fills; /* zero fill pages counter */
+ long_natural_t reactivations; /* reactivated pages counter */
+ long_natural_t pageins; /* actual pageins couter */
+ long_natural_t cow_faults; /* copy-on-write faults counter */
+ long_natural_t messages_sent; /* messages sent counter */
+ long_natural_t messages_received; /* messages received counter */
+
+ char name[TASK_NAME_SIZE];
+};
+
+#define task_lock(task) simple_lock(&(task)->lock)
+#define task_unlock(task) simple_unlock(&(task)->lock)
+
+#define itk_lock_init(task) simple_lock_init(&(task)->itk_lock_data)
+#define itk_lock(task) simple_lock(&(task)->itk_lock_data)
+#define itk_unlock(task) simple_unlock(&(task)->itk_lock_data)
+
+/*
+ * Exported routines/macros
+ */
+
+extern kern_return_t task_create(
+ task_t parent_task,
+ boolean_t inherit_memory,
+ task_t *child_task);
+extern kern_return_t task_create_kernel(
+ task_t parent_task,
+ boolean_t inherit_memory,
+ task_t *child_task);
+extern kern_return_t task_terminate(
+ task_t task);
+extern kern_return_t task_suspend(
+ task_t task);
+extern kern_return_t task_resume(
+ task_t task);
+extern kern_return_t task_threads(
+ task_t task,
+ thread_array_t *thread_list,
+ natural_t *count);
+extern kern_return_t task_info(
+ task_t task,
+ int flavor,
+ task_info_t task_info_out,
+ natural_t *task_info_count);
+extern kern_return_t task_get_special_port(
+ task_t task,
+ int which,
+ struct ipc_port **portp);
+extern kern_return_t task_set_special_port(
+ task_t task,
+ int which,
+ struct ipc_port *port);
+extern kern_return_t task_assign(
+ task_t task,
+ processor_set_t new_pset,
+ boolean_t assign_threads);
+extern kern_return_t task_assign_default(
+ task_t task,
+ boolean_t assign_threads);
+extern kern_return_t task_set_name(
+ task_t task,
+ const_kernel_debug_name_t name);
+extern void consider_task_collect(void);
+
+/*
+ * Internal only routines
+ */
+
+extern void task_init(void);
+extern void task_reference(task_t);
+extern void task_deallocate(task_t);
+extern void task_hold_locked(task_t);
+extern kern_return_t task_hold(task_t);
+extern kern_return_t task_dowait(task_t, boolean_t);
+extern kern_return_t task_release(task_t);
+
+extern task_t kernel_task;
+
+#endif /* _KERN_TASK_H_ */
diff --git a/kern/task_notify.cli b/kern/task_notify.cli
new file mode 100644
index 0000000..c6c85d9
--- /dev/null
+++ b/kern/task_notify.cli
@@ -0,0 +1,7 @@
+/* XXX */
+
+/* This is a client presentation file. */
+
+#define KERNEL_USER 1
+
+#include <mach/task_notify.defs>
diff --git a/kern/thread.c b/kern/thread.c
new file mode 100644
index 0000000..2eab1ca
--- /dev/null
+++ b/kern/thread.c
@@ -0,0 +1,2646 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/thread.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
+ * Date: 1986
+ *
+ * Thread management primitives implementation.
+ */
+
+#include <kern/printf.h>
+#include <mach/message.h>
+#include <mach/std_types.h>
+#include <mach/policy.h>
+#include <mach/thread_info.h>
+#include <mach/thread_special_ports.h>
+#include <mach/thread_status.h>
+#include <mach/time_value.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_inherit.h>
+#include <machine/vm_param.h>
+#include <kern/ast.h>
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/eventcount.h>
+#include <kern/gnumach.server.h>
+#include <kern/ipc_mig.h>
+#include <kern/ipc_tt.h>
+#include <kern/mach_debug.server.h>
+#include <kern/mach_host.server.h>
+#include <kern/processor.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/syscall_subr.h>
+#include <kern/thread.h>
+#include <kern/thread_swap.h>
+#include <kern/host.h>
+#include <kern/kalloc.h>
+#include <kern/slab.h>
+#include <kern/smp.h>
+#include <kern/mach_clock.h>
+#include <string.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_port.h>
+#include <ipc/mach_msg.h>
+#include <ipc/mach_port.server.h>
+#include <machine/machspl.h> /* for splsched */
+#include <machine/pcb.h>
+#include <machine/thread.h> /* for MACHINE_STACK */
+
+struct kmem_cache thread_cache;
+struct kmem_cache thread_stack_cache;
+
+queue_head_t reaper_queue;
+def_simple_lock_data(static, reaper_lock)
+
+/* private */
+struct thread thread_template;
+
+#if MACH_DEBUG
+#define STACK_MARKER 0xdeadbeefU
+boolean_t stack_check_usage = FALSE;
+def_simple_lock_data(static, stack_usage_lock)
+vm_size_t stack_max_usage = 0;
+#endif /* MACH_DEBUG */
+
+/*
+ * Machine-dependent code must define:
+ * pcb_init
+ * pcb_terminate
+ * pcb_collect
+ *
+ * The thread->pcb field is reserved for machine-dependent code.
+ */
+
+#ifdef MACHINE_STACK
+/*
+ * Machine-dependent code must define:
+ * stack_alloc_try
+ * stack_alloc
+ * stack_free
+ * stack_handoff
+ * stack_collect
+ * and if MACH_DEBUG:
+ * stack_statistics
+ */
+#else /* MACHINE_STACK */
+/*
+ * We allocate stacks from generic kernel VM.
+ * Machine-dependent code must define:
+ * stack_attach
+ * stack_detach
+ * stack_handoff
+ *
+ * The stack_free_list can only be accessed at splsched,
+ * because stack_alloc_try/thread_invoke operate at splsched.
+ */
+
+def_simple_lock_data(static, stack_lock_data)/* splsched only */
+#define stack_lock() simple_lock(&stack_lock_data)
+#define stack_unlock() simple_unlock(&stack_lock_data)
+
+vm_offset_t stack_free_list; /* splsched only */
+unsigned int stack_free_count = 0; /* splsched only */
+unsigned int stack_free_limit = 1; /* patchable */
+
+/*
+ * The next field is at the base of the stack,
+ * so the low end is left unsullied.
+ */
+
+#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
+
+/*
+ * stack_alloc_try:
+ *
+ * Non-blocking attempt to allocate a kernel stack.
+ * Called at splsched with the thread locked.
+ */
+
+boolean_t stack_alloc_try(
+ thread_t thread,
+ void (*resume)(thread_t))
+{
+ vm_offset_t stack;
+
+ stack_lock();
+ stack = stack_free_list;
+ if (stack != 0) {
+ stack_free_list = stack_next(stack);
+ stack_free_count--;
+ } else {
+ stack = thread->stack_privilege;
+ }
+ stack_unlock();
+
+ if (stack != 0) {
+ stack_attach(thread, stack, resume);
+ counter(c_stack_alloc_hits++);
+ return TRUE;
+ } else {
+ counter(c_stack_alloc_misses++);
+ return FALSE;
+ }
+}
+
+/*
+ * stack_alloc:
+ *
+ * Allocate a kernel stack for a thread.
+ * May block.
+ */
+
+kern_return_t stack_alloc(
+ thread_t thread,
+ void (*resume)(thread_t))
+{
+ vm_offset_t stack;
+ spl_t s;
+
+ /*
+ * We first try the free list. It is probably empty,
+ * or stack_alloc_try would have succeeded, but possibly
+ * a stack was freed before the swapin thread got to us.
+ */
+
+ s = splsched();
+ stack_lock();
+ stack = stack_free_list;
+ if (stack != 0) {
+ stack_free_list = stack_next(stack);
+ stack_free_count--;
+ }
+ stack_unlock();
+ (void) splx(s);
+
+ if (stack == 0) {
+ stack = kmem_cache_alloc(&thread_stack_cache);
+ assert(stack != 0);
+#if MACH_DEBUG
+ stack_init(stack);
+#endif /* MACH_DEBUG */
+ }
+
+ stack_attach(thread, stack, resume);
+ return KERN_SUCCESS;
+}
+
+/*
+ * stack_free:
+ *
+ * Free a thread's kernel stack.
+ * Called at splsched with the thread locked.
+ */
+
+void stack_free(
+ thread_t thread)
+{
+ vm_offset_t stack;
+
+ stack = stack_detach(thread);
+
+ if (stack != thread->stack_privilege) {
+ stack_lock();
+ stack_next(stack) = stack_free_list;
+ stack_free_list = stack;
+ stack_free_count += 1;
+#if MACH_COUNTERS
+ if (stack_free_count > c_stack_alloc_max)
+ c_stack_alloc_max = stack_free_count;
+#endif /* MACH_COUNTERS */
+ stack_unlock();
+ }
+}
+
+/*
+ * stack_collect:
+ *
+ * Free excess kernel stacks.
+ * May block.
+ */
+
+void stack_collect(void)
+{
+ vm_offset_t stack;
+ spl_t s;
+
+ s = splsched();
+ stack_lock();
+ while (stack_free_count > stack_free_limit) {
+ stack = stack_free_list;
+ stack_free_list = stack_next(stack);
+ stack_free_count--;
+ stack_unlock();
+ (void) splx(s);
+
+#if MACH_DEBUG
+ stack_finalize(stack);
+#endif /* MACH_DEBUG */
+ kmem_cache_free(&thread_stack_cache, stack);
+
+ s = splsched();
+ stack_lock();
+ }
+ stack_unlock();
+ (void) splx(s);
+}
+#endif /* MACHINE_STACK */
+
+/*
+ * stack_privilege:
+ *
+ * stack_alloc_try on this thread must always succeed.
+ */
+
+void stack_privilege(
+ thread_t thread)
+{
+ /*
+ * This implementation only works for the current thread.
+ */
+
+ if (thread != current_thread())
+ panic("stack_privilege");
+
+ if (thread->stack_privilege == 0)
+ thread->stack_privilege = current_stack();
+}
+
+void thread_init(void)
+{
+ kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0,
+ NULL, 0);
+ /*
+ * Kernel stacks should be naturally aligned,
+ * so that it is easy to find the starting/ending
+ * addresses of a stack given an address in the middle.
+ */
+ kmem_cache_init(&thread_stack_cache, "thread_stack",
+ KERNEL_STACK_SIZE, KERNEL_STACK_SIZE,
+ NULL, 0);
+
+ /*
+ * Fill in a template thread for fast initialization.
+ * [Fields that must be (or are typically) reset at
+ * time of creation are so noted.]
+ */
+
+ /* thread_template.links (none) */
+ thread_template.runq = RUN_QUEUE_NULL;
+
+ /* thread_template.task (later) */
+ /* thread_template.thread_list (later) */
+ /* thread_template.pset_threads (later) */
+
+ /* thread_template.lock (later) */
+ /* one ref for being alive; one for the guy who creates the thread */
+ thread_template.ref_count = 2;
+
+ thread_template.pcb = (pcb_t) 0; /* (reset) */
+ thread_template.kernel_stack = (vm_offset_t) 0;
+ thread_template.stack_privilege = (vm_offset_t) 0;
+
+ thread_template.wait_event = 0;
+ /* thread_template.suspend_count (later) */
+ thread_template.wait_result = KERN_SUCCESS;
+ thread_template.wake_active = FALSE;
+ thread_template.state = TH_SUSP | TH_SWAPPED;
+ thread_template.swap_func = thread_bootstrap_return;
+
+/* thread_template.priority (later) */
+ thread_template.max_priority = BASEPRI_USER;
+/* thread_template.sched_pri (later - compute_priority) */
+#if MACH_FIXPRI
+ thread_template.sched_data = 0;
+ thread_template.policy = POLICY_TIMESHARE;
+#endif /* MACH_FIXPRI */
+ thread_template.depress_priority = -1;
+ thread_template.cpu_usage = 0;
+ thread_template.sched_usage = 0;
+ /* thread_template.sched_stamp (later) */
+
+ thread_template.recover = (vm_offset_t) 0;
+ thread_template.vm_privilege = 0;
+
+ thread_template.user_stop_count = 1;
+
+ /* thread_template.<IPC structures> (later) */
+
+ timer_init(&(thread_template.user_timer));
+ timer_init(&(thread_template.system_timer));
+ thread_template.user_timer_save.low = 0;
+ thread_template.user_timer_save.high = 0;
+ thread_template.system_timer_save.low = 0;
+ thread_template.system_timer_save.high = 0;
+ thread_template.cpu_delta = 0;
+ thread_template.sched_delta = 0;
+
+ thread_template.active = FALSE; /* reset */
+ thread_template.ast = AST_ZILCH;
+
+ /* thread_template.processor_set (later) */
+ thread_template.bound_processor = PROCESSOR_NULL;
+#if MACH_HOST
+ thread_template.may_assign = TRUE;
+ thread_template.assign_active = FALSE;
+#endif /* MACH_HOST */
+
+#if NCPUS > 1
+ /* thread_template.last_processor (later) */
+#endif /* NCPUS > 1 */
+
+ /*
+ * Initialize other data structures used in
+ * this module.
+ */
+
+ queue_init(&reaper_queue);
+ simple_lock_init(&reaper_lock);
+
+#ifndef MACHINE_STACK
+ simple_lock_init(&stack_lock_data);
+#endif /* MACHINE_STACK */
+
+#if MACH_DEBUG
+ simple_lock_init(&stack_usage_lock);
+#endif /* MACH_DEBUG */
+
+ /*
+ * Initialize any machine-dependent
+ * per-thread structures necessary.
+ */
+
+ pcb_module_init();
+}
+
+kern_return_t thread_create(
+ task_t parent_task,
+ thread_t *child_thread) /* OUT */
+{
+ thread_t new_thread;
+ processor_set_t pset;
+
+ if (parent_task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Allocate a thread and initialize static fields
+ */
+
+ new_thread = (thread_t) kmem_cache_alloc(&thread_cache);
+
+ if (new_thread == THREAD_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ *new_thread = thread_template;
+
+ record_time_stamp (&new_thread->creation_time);
+
+ /*
+ * Initialize runtime-dependent fields
+ */
+
+ new_thread->task = parent_task;
+ simple_lock_init(&new_thread->lock);
+ new_thread->sched_stamp = sched_tick;
+ thread_timeout_setup(new_thread);
+
+ /*
+ * Create a pcb. The kernel stack is created later,
+ * when the thread is swapped-in.
+ */
+ pcb_init(parent_task, new_thread);
+
+ ipc_thread_init(new_thread);
+
+ /*
+ * Find the processor set for the parent task.
+ */
+ task_lock(parent_task);
+ pset = parent_task->processor_set;
+ pset_reference(pset);
+ task_unlock(parent_task);
+
+ /*
+ * This thread will mosty probably start working, assume it
+ * will take its share of CPU, to avoid having to find it out
+ * slowly. Decaying will however fix that quickly if it actually
+ * does not work
+ */
+ new_thread->cpu_usage = TIMER_RATE * SCHED_SCALE /
+ (pset->load_average >= SCHED_SCALE ?
+ pset->load_average : SCHED_SCALE);
+ new_thread->sched_usage = TIMER_RATE * SCHED_SCALE;
+
+ /*
+ * Lock both the processor set and the task,
+ * so that the thread can be added to both
+ * simultaneously. Processor set must be
+ * locked first.
+ */
+
+ Restart:
+ pset_lock(pset);
+ task_lock(parent_task);
+
+ /*
+ * If the task has changed processor sets,
+ * catch up (involves lots of lock juggling).
+ */
+ {
+ processor_set_t cur_pset;
+
+ cur_pset = parent_task->processor_set;
+ if (!cur_pset->active)
+ cur_pset = &default_pset;
+
+ if (cur_pset != pset) {
+ pset_reference(cur_pset);
+ task_unlock(parent_task);
+ pset_unlock(pset);
+ pset_deallocate(pset);
+ pset = cur_pset;
+ goto Restart;
+ }
+ }
+
+ /*
+ * Set the thread`s priority from the pset and task.
+ */
+
+ new_thread->priority = parent_task->priority;
+ if (pset->max_priority > new_thread->max_priority)
+ new_thread->max_priority = pset->max_priority;
+ if (new_thread->max_priority > new_thread->priority)
+ new_thread->priority = new_thread->max_priority;
+ /*
+ * Don't need to lock thread here because it can't
+ * possibly execute and no one else knows about it.
+ */
+ compute_priority(new_thread, TRUE);
+
+ /*
+ * Thread is suspended if the task is. Add 1 to
+ * suspend count since thread is created in suspended
+ * state.
+ */
+ new_thread->suspend_count = parent_task->suspend_count + 1;
+
+ /*
+ * Add the thread to the processor set.
+ * If the pset is empty, suspend the thread again.
+ */
+
+ pset_add_thread(pset, new_thread);
+ if (pset->empty)
+ new_thread->suspend_count++;
+
+#if HW_FOOTPRINT
+ /*
+ * Need to set last_processor, idle processor would be best, but
+ * that requires extra locking nonsense. Go for tail of
+ * processors queue to avoid master.
+ */
+ if (!pset->empty) {
+ new_thread->last_processor =
+ (processor_t)queue_first(&pset->processors);
+ }
+ else {
+ /*
+ * Thread created in empty processor set. Pick
+ * master processor as an acceptable legal value.
+ */
+ new_thread->last_processor = master_processor;
+ }
+#else /* HW_FOOTPRINT */
+ /*
+ * Don't need to initialize because the context switch
+ * code will set it before it can be used.
+ */
+#endif /* HW_FOOTPRINT */
+
+#if MACH_PCSAMPLE
+ new_thread->pc_sample.seqno = 0;
+ new_thread->pc_sample.sampletypes = 0;
+#endif /* MACH_PCSAMPLE */
+
+ new_thread->pc_sample.buffer = 0;
+
+ /* Inherit the task name as the thread name. */
+ memcpy (new_thread->name, parent_task->name, THREAD_NAME_SIZE);
+
+ /*
+ * Add the thread to the task`s list of threads.
+ * The new thread holds another reference to the task.
+ */
+
+ parent_task->ref_count++;
+
+ parent_task->thread_count++;
+ queue_enter(&parent_task->thread_list, new_thread, thread_t,
+ thread_list);
+
+ /*
+ * Finally, mark the thread active.
+ */
+
+ new_thread->active = TRUE;
+
+ if (!parent_task->active) {
+ task_unlock(parent_task);
+ pset_unlock(pset);
+ (void) thread_terminate(new_thread);
+ /* release ref we would have given our caller */
+ thread_deallocate(new_thread);
+ return KERN_FAILURE;
+ }
+ task_unlock(parent_task);
+ pset_unlock(pset);
+
+ ipc_thread_enable(new_thread);
+
+ *child_thread = new_thread;
+ return KERN_SUCCESS;
+}
+
+unsigned int thread_deallocate_stack = 0;
+
+void thread_deallocate(
+ thread_t thread)
+{
+ spl_t s;
+ task_t task;
+ processor_set_t pset;
+
+ time_value64_t user_time, system_time;
+
+ if (thread == THREAD_NULL)
+ return;
+
+ /*
+ * First, check for new count > 0 (the common case).
+ * Only the thread needs to be locked.
+ */
+ s = splsched();
+ thread_lock(thread);
+ if (--thread->ref_count > 0) {
+ thread_unlock(thread);
+ (void) splx(s);
+ return;
+ }
+
+ /*
+ * Count is zero. However, the task's and processor set's
+ * thread lists have implicit references to
+ * the thread, and may make new ones. Their locks also
+ * dominate the thread lock. To check for this, we
+ * temporarily restore the one thread reference, unlock
+ * the thread, and then lock the other structures in
+ * the proper order.
+ */
+ thread->ref_count = 1;
+ thread_unlock(thread);
+ (void) splx(s);
+
+ pset = thread->processor_set;
+ pset_lock(pset);
+
+#if MACH_HOST
+ /*
+ * The thread might have moved.
+ */
+ while (pset != thread->processor_set) {
+ pset_unlock(pset);
+ pset = thread->processor_set;
+ pset_lock(pset);
+ }
+#endif /* MACH_HOST */
+
+ task = thread->task;
+ task_lock(task);
+
+ s = splsched();
+ thread_lock(thread);
+
+ if (--thread->ref_count > 0) {
+ /*
+ * Task or processor_set made extra reference.
+ */
+ thread_unlock(thread);
+ (void) splx(s);
+ task_unlock(task);
+ pset_unlock(pset);
+ return;
+ }
+
+ /*
+ * Thread has no references - we can remove it.
+ */
+
+ /*
+ * Remove pending timeouts.
+ */
+ reset_timeout_check(&thread->timer);
+
+ reset_timeout_check(&thread->depress_timer);
+ thread->depress_priority = -1;
+
+ /*
+ * Accumulate times for dead threads in task.
+ */
+ thread_read_times(thread, &user_time, &system_time);
+ time_value64_add(&task->total_user_time, &user_time);
+ time_value64_add(&task->total_system_time, &system_time);
+
+ /*
+ * Remove thread from task list and processor_set threads list.
+ */
+ task->thread_count--;
+ queue_remove(&task->thread_list, thread, thread_t, thread_list);
+
+ pset_remove_thread(pset, thread);
+
+ thread_unlock(thread); /* no more references - safe */
+ (void) splx(s);
+ task_unlock(task);
+ pset_unlock(pset);
+ pset_deallocate(pset);
+
+ /*
+ * A couple of quick sanity checks
+ */
+
+ if (thread == current_thread()) {
+ panic("thread deallocating itself");
+ }
+ if ((thread->state & ~(TH_RUN | TH_HALTED | TH_SWAPPED)) != TH_SUSP)
+ panic("unstopped thread destroyed!");
+
+ /*
+ * Deallocate the task reference, since we know the thread
+ * is not running.
+ */
+ task_deallocate(thread->task); /* may block */
+
+ /*
+ * Clean up any machine-dependent resources.
+ */
+ if ((thread->state & TH_SWAPPED) == 0) {
+ splsched();
+ stack_free(thread);
+ (void) splx(s);
+ thread_deallocate_stack++;
+ }
+ /*
+ * Rattle the event count machinery (gag)
+ */
+ evc_notify_abort(thread);
+
+ pcb_terminate(thread);
+ kmem_cache_free(&thread_cache, (vm_offset_t) thread);
+}
+
+void thread_reference(
+ thread_t thread)
+{
+ spl_t s;
+
+ if (thread == THREAD_NULL)
+ return;
+
+ s = splsched();
+ thread_lock(thread);
+ thread->ref_count++;
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_terminate:
+ *
+ * Permanently stop execution of the specified thread.
+ *
+ * A thread to be terminated must be allowed to clean up any state
+ * that it has before it exits. The thread is broken out of any
+ * wait condition that it is in, and signalled to exit. It then
+ * cleans up its state and calls thread_halt_self on its way out of
+ * the kernel. The caller waits for the thread to halt, terminates
+ * its IPC state, and then deallocates it.
+ *
+ * If the caller is the current thread, it must still exit the kernel
+ * to clean up any state (thread and port references, messages, etc).
+ * When it exits the kernel, it then terminates its IPC state and
+ * queues itself for the reaper thread, which will wait for the thread
+ * to stop and then deallocate it. (A thread cannot deallocate itself,
+ * since it needs a kernel stack to execute.)
+ */
+kern_return_t thread_terminate(
+ thread_t thread)
+{
+ thread_t cur_thread = current_thread();
+ task_t cur_task;
+ spl_t s;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Break IPC control over the thread.
+ */
+ ipc_thread_disable(thread);
+
+ if (thread == cur_thread) {
+
+ /*
+ * Current thread will queue itself for reaper when
+ * exiting kernel.
+ */
+ s = splsched();
+ thread_lock(thread);
+ if (thread->active) {
+ thread->active = FALSE;
+ thread_ast_set(thread, AST_TERMINATE);
+ }
+ thread_unlock(thread);
+ ast_on(cpu_number(), AST_TERMINATE);
+ splx(s);
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Lock both threads and the current task
+ * to check termination races and prevent deadlocks.
+ */
+ cur_task = current_task();
+ task_lock(cur_task);
+ s = splsched();
+ if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
+ thread_lock(thread);
+ thread_lock(cur_thread);
+ }
+ else {
+ thread_lock(cur_thread);
+ thread_lock(thread);
+ }
+
+ /*
+ * If the current thread is being terminated, help out.
+ */
+ if ((!cur_task->active) || (!cur_thread->active)) {
+ thread_unlock(cur_thread);
+ thread_unlock(thread);
+ (void) splx(s);
+ task_unlock(cur_task);
+ thread_terminate(cur_thread);
+ return KERN_FAILURE;
+ }
+
+ thread_unlock(cur_thread);
+ task_unlock(cur_task);
+
+ /*
+ * Terminate victim thread.
+ */
+ if (!thread->active) {
+ /*
+ * Someone else got there first.
+ */
+ thread_unlock(thread);
+ (void) splx(s);
+ return KERN_FAILURE;
+ }
+
+ thread->active = FALSE;
+
+ thread_unlock(thread);
+ (void) splx(s);
+
+#if MACH_HOST
+ /*
+ * Reassign thread to default pset if needed.
+ */
+ thread_freeze(thread);
+ if (thread->processor_set != &default_pset)
+ thread_doassign(thread, &default_pset, FALSE);
+#endif /* MACH_HOST */
+
+ /*
+ * Halt the victim at the clean point.
+ */
+ (void) thread_halt(thread, TRUE);
+#if MACH_HOST
+ thread_unfreeze(thread);
+#endif /* MACH_HOST */
+ /*
+ * Shut down the victims IPC and deallocate its
+ * reference to itself.
+ */
+ ipc_thread_terminate(thread);
+ thread_deallocate(thread);
+ return KERN_SUCCESS;
+}
+
+kern_return_t thread_terminate_release(
+ thread_t thread,
+ task_t task,
+ mach_port_name_t thread_name,
+ mach_port_name_t reply_port,
+ vm_offset_t address,
+ vm_size_t size)
+{
+ if (task == NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (thread == NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ mach_port_deallocate(task->itk_space, thread_name);
+
+ if (reply_port != MACH_PORT_NULL)
+ mach_port_destroy(task->itk_space, reply_port);
+
+ if ((address != 0) || (size != 0))
+ vm_deallocate(task->map, address, size);
+
+ return thread_terminate(thread);
+}
+
+/*
+ * thread_force_terminate:
+ *
+ * Version of thread_terminate called by task_terminate. thread is
+ * not the current thread. task_terminate is the dominant operation,
+ * so we can force this thread to stop.
+ */
+void
+thread_force_terminate(
+ thread_t thread)
+{
+ boolean_t deallocate_here;
+ spl_t s;
+
+ ipc_thread_disable(thread);
+
+#if MACH_HOST
+ /*
+ * Reassign thread to default pset if needed.
+ */
+ thread_freeze(thread);
+ if (thread->processor_set != &default_pset)
+ thread_doassign(thread, &default_pset, FALSE);
+#endif /* MACH_HOST */
+
+ s = splsched();
+ thread_lock(thread);
+ deallocate_here = thread->active;
+ thread->active = FALSE;
+ thread_unlock(thread);
+ (void) splx(s);
+
+ (void) thread_halt(thread, TRUE);
+ ipc_thread_terminate(thread);
+
+#if MACH_HOST
+ thread_unfreeze(thread);
+#endif /* MACH_HOST */
+
+ if (deallocate_here)
+ thread_deallocate(thread);
+}
+
+
+/*
+ * Halt a thread at a clean point, leaving it suspended.
+ *
+ * must_halt indicates whether thread must halt.
+ *
+ */
+kern_return_t thread_halt(
+ thread_t thread,
+ boolean_t must_halt)
+{
+ thread_t cur_thread = current_thread();
+ kern_return_t ret;
+ spl_t s;
+
+ if (thread == cur_thread)
+ panic("thread_halt: trying to halt current thread.");
+ /*
+ * If must_halt is FALSE, then a check must be made for
+ * a cycle of halt operations.
+ */
+ if (!must_halt) {
+ /*
+ * Grab both thread locks.
+ */
+ s = splsched();
+ if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
+ thread_lock(thread);
+ thread_lock(cur_thread);
+ }
+ else {
+ thread_lock(cur_thread);
+ thread_lock(thread);
+ }
+
+ /*
+ * If target thread is already halted, grab a hold
+ * on it and return.
+ */
+ if (thread->state & TH_HALTED) {
+ thread->suspend_count++;
+ thread_unlock(cur_thread);
+ thread_unlock(thread);
+ (void) splx(s);
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * If someone is trying to halt us, we have a potential
+ * halt cycle. Break the cycle by interrupting anyone
+ * who is trying to halt us, and causing this operation
+ * to fail; retry logic will only retry operations
+ * that cannot deadlock. (If must_halt is TRUE, this
+ * operation can never cause a deadlock.)
+ */
+ if (cur_thread->ast & AST_HALT) {
+ thread_wakeup_with_result(TH_EV_WAKE_ACTIVE(cur_thread),
+ THREAD_INTERRUPTED);
+ thread_unlock(thread);
+ thread_unlock(cur_thread);
+ (void) splx(s);
+ return KERN_FAILURE;
+ }
+
+ thread_unlock(cur_thread);
+
+ }
+ else {
+ /*
+ * Lock thread and check whether it is already halted.
+ */
+ s = splsched();
+ thread_lock(thread);
+ if (thread->state & TH_HALTED) {
+ thread->suspend_count++;
+ thread_unlock(thread);
+ (void) splx(s);
+ return KERN_SUCCESS;
+ }
+ }
+
+ /*
+ * Suspend thread - inline version of thread_hold() because
+ * thread is already locked.
+ */
+ thread->suspend_count++;
+ thread->state |= TH_SUSP;
+
+ /*
+ * If someone else is halting it, wait for that to complete.
+ * Fail if wait interrupted and must_halt is false.
+ */
+ while ((thread->ast & AST_HALT) && (!(thread->state & TH_HALTED))) {
+ thread->wake_active = TRUE;
+ thread_sleep(TH_EV_WAKE_ACTIVE(thread),
+ simple_lock_addr(thread->lock), TRUE);
+
+ if (thread->state & TH_HALTED) {
+ (void) splx(s);
+ return KERN_SUCCESS;
+ }
+ if ((current_thread()->wait_result != THREAD_AWAKENED)
+ && !(must_halt)) {
+ (void) splx(s);
+ thread_release(thread);
+ return KERN_FAILURE;
+ }
+ thread_lock(thread);
+ }
+
+ /*
+ * Otherwise, have to do it ourselves.
+ */
+
+ thread_ast_set(thread, AST_HALT);
+
+ while (TRUE) {
+ /*
+ * Wait for thread to stop.
+ */
+ thread_unlock(thread);
+ (void) splx(s);
+
+ ret = thread_dowait(thread, must_halt);
+
+ /*
+ * If the dowait failed, so do we. Drop AST_HALT, and
+ * wake up anyone else who might be waiting for it.
+ */
+ if (ret != KERN_SUCCESS) {
+ s = splsched();
+ thread_lock(thread);
+ thread_ast_clear(thread, AST_HALT);
+ thread_wakeup_with_result(TH_EV_WAKE_ACTIVE(thread),
+ THREAD_INTERRUPTED);
+ thread_unlock(thread);
+ (void) splx(s);
+
+ thread_release(thread);
+ return ret;
+ }
+
+ /*
+ * Clear any interruptible wait.
+ */
+ clear_wait(thread, THREAD_INTERRUPTED, TRUE);
+
+ /*
+ * If the thread's at a clean point, we're done.
+ * Don't need a lock because it really is stopped.
+ */
+ if (thread->state & TH_HALTED)
+ return KERN_SUCCESS;
+
+ /*
+ * If the thread is at a nice continuation,
+ * or a continuation with a cleanup routine,
+ * call the cleanup routine.
+ */
+ if ((((thread->swap_func == mach_msg_continue) ||
+ (thread->swap_func == mach_msg_receive_continue)) &&
+ mach_msg_interrupt(thread)) ||
+ (thread->swap_func == thread_exception_return) ||
+ (thread->swap_func == thread_bootstrap_return)) {
+ s = splsched();
+ thread_lock(thread);
+ thread->state |= TH_HALTED;
+ thread_ast_clear(thread, AST_HALT);
+ thread_unlock(thread);
+ splx(s);
+
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Force the thread to stop at a clean
+ * point, and arrange to wait for it.
+ *
+ * Set it running, so it can notice. Override
+ * the suspend count. We know that the thread
+ * is suspended and not waiting.
+ *
+ * Since the thread may hit an interruptible wait
+ * before it reaches a clean point, we must force it
+ * to wake us up when it does so. This involves some
+ * trickery:
+ * We mark the thread SUSPENDED so that thread_block
+ * will suspend it and wake us up.
+ * We mark the thread RUNNING so that it will run.
+ * We mark the thread UN-INTERRUPTIBLE (!) so that
+ * some other thread trying to halt or suspend it won't
+ * take it off the run queue before it runs. Since
+ * dispatching a thread (the tail of thread_invoke) marks
+ * the thread interruptible, it will stop at the next
+ * context switch or interruptible wait.
+ */
+
+ s = splsched();
+ thread_lock(thread);
+ if ((thread->state & TH_SCHED_STATE) != TH_SUSP)
+ panic("thread_halt");
+ thread->state |= TH_RUN | TH_UNINT;
+ thread_setrun(thread, FALSE);
+
+ /*
+ * Continue loop and wait for thread to stop.
+ */
+ }
+}
+
+static void __attribute__((noreturn)) walking_zombie(void)
+{
+ panic("the zombie walks!");
+}
+
+/*
+ * Thread calls this routine on exit from the kernel when it
+ * notices a halt request.
+ */
+void thread_halt_self(continuation_t continuation)
+{
+ thread_t thread = current_thread();
+ spl_t s;
+
+ if (thread->ast & AST_TERMINATE) {
+ /*
+ * Thread is terminating itself. Shut
+ * down IPC, then queue it up for the
+ * reaper thread.
+ */
+ ipc_thread_terminate(thread);
+
+ thread_hold(thread);
+
+ s = splsched();
+ simple_lock(&reaper_lock);
+ enqueue_tail(&reaper_queue, &(thread->links));
+ simple_unlock(&reaper_lock);
+
+ thread_lock(thread);
+ thread->state |= TH_HALTED;
+ thread_unlock(thread);
+ (void) splx(s);
+
+ thread_wakeup((event_t)&reaper_queue);
+ counter(c_thread_halt_self_block++);
+ thread_block(walking_zombie);
+ /*NOTREACHED*/
+ } else {
+ /*
+ * Thread was asked to halt - show that it
+ * has done so.
+ */
+ s = splsched();
+ thread_lock(thread);
+ thread->state |= TH_HALTED;
+ thread_ast_clear(thread, AST_HALT);
+ thread_unlock(thread);
+ splx(s);
+ counter(c_thread_halt_self_block++);
+ thread_block(continuation);
+ /*
+ * thread_release resets TH_HALTED.
+ */
+ }
+}
+
+/*
+ * thread_hold:
+ *
+ * Suspend execution of the specified thread.
+ * This is a recursive-style suspension of the thread, a count of
+ * suspends is maintained.
+ */
+void thread_hold(
+ thread_t thread)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ thread->suspend_count++;
+ thread->state |= TH_SUSP;
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_dowait:
+ *
+ * Wait for a thread to actually enter stopped state.
+ *
+ * must_halt argument indicates if this may fail on interruption.
+ * This is FALSE only if called from thread_abort via thread_halt.
+ */
+kern_return_t
+thread_dowait(
+ thread_t thread,
+ boolean_t must_halt)
+{
+ boolean_t need_wakeup;
+ kern_return_t ret = KERN_SUCCESS;
+ spl_t s;
+
+ if (thread == current_thread())
+ panic("thread_dowait");
+
+ /*
+ * If a thread is not interruptible, it may not be suspended
+ * until it becomes interruptible. In this case, we wait for
+ * the thread to stop itself, and indicate that we are waiting
+ * for it to stop so that it can wake us up when it does stop.
+ *
+ * If the thread is interruptible, we may be able to suspend
+ * it immediately. There are several cases:
+ *
+ * 1) The thread is already stopped (trivial)
+ * 2) The thread is runnable (marked RUN and on a run queue).
+ * We pull it off the run queue and mark it stopped.
+ * 3) The thread is running. We wait for it to stop.
+ */
+
+ need_wakeup = FALSE;
+ s = splsched();
+ thread_lock(thread);
+
+ for (;;) {
+ switch (thread->state & TH_SCHED_STATE) {
+ case TH_SUSP:
+ case TH_WAIT | TH_SUSP:
+ /*
+ * Thread is already suspended, or sleeping in an
+ * interruptible wait. We win!
+ */
+ break;
+
+ case TH_RUN | TH_SUSP:
+ /*
+ * The thread is interruptible. If we can pull
+ * it off a runq, stop it here.
+ */
+ if (rem_runq(thread) != RUN_QUEUE_NULL) {
+ thread->state &= ~TH_RUN;
+ need_wakeup = thread->wake_active;
+ thread->wake_active = FALSE;
+ break;
+ }
+#if NCPUS > 1
+ /*
+ * The thread must be running, so make its
+ * processor execute ast_check(). This
+ * should cause the thread to take an ast and
+ * context switch to suspend for us.
+ */
+ cause_ast_check(thread->last_processor);
+#endif /* NCPUS > 1 */
+
+ /*
+ * Fall through to wait for thread to stop.
+ */
+
+ case TH_RUN | TH_SUSP | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ /*
+ * Wait for the thread to stop, or sleep interruptibly
+ * (thread_block will stop it in the latter case).
+ * Check for failure if interrupted.
+ */
+ thread->wake_active = TRUE;
+ thread_sleep(TH_EV_WAKE_ACTIVE(thread),
+ simple_lock_addr(thread->lock), TRUE);
+ thread_lock(thread);
+ if ((current_thread()->wait_result != THREAD_AWAKENED) &&
+ !must_halt) {
+ ret = KERN_FAILURE;
+ break;
+ }
+
+ /*
+ * Repeat loop to check thread`s state.
+ */
+ continue;
+ }
+ /*
+ * Thread is stopped at this point.
+ */
+ break;
+ }
+
+ thread_unlock(thread);
+ (void) splx(s);
+
+ if (need_wakeup)
+ thread_wakeup(TH_EV_WAKE_ACTIVE(thread));
+
+ return ret;
+}
+
+void thread_release(
+ thread_t thread)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ if (--thread->suspend_count == 0) {
+ thread->state &= ~(TH_SUSP | TH_HALTED);
+ if ((thread->state & (TH_WAIT | TH_RUN)) == 0) {
+ /* was only suspended */
+ thread->state |= TH_RUN;
+ thread_setrun(thread, TRUE);
+ }
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+kern_return_t thread_suspend(
+ thread_t thread)
+{
+ boolean_t hold;
+ spl_t spl;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ hold = FALSE;
+ spl = splsched();
+ thread_lock(thread);
+ /* Wait for thread to get interruptible */
+ while (thread->state & TH_UNINT) {
+ assert_wait(TH_EV_STATE(thread), TRUE);
+ thread_unlock(thread);
+ thread_block(thread_no_continuation);
+ thread_lock(thread);
+ }
+ if (thread->user_stop_count++ == 0) {
+ hold = TRUE;
+ thread->suspend_count++;
+ thread->state |= TH_SUSP;
+ }
+ thread_unlock(thread);
+ (void) splx(spl);
+
+ /*
+ * Now wait for the thread if necessary.
+ */
+ if (hold) {
+ if (thread == current_thread()) {
+ /*
+ * We want to call thread_block on our way out,
+ * to stop running.
+ */
+ spl = splsched();
+ ast_on(cpu_number(), AST_BLOCK);
+ (void) splx(spl);
+ } else
+ (void) thread_dowait(thread, TRUE);
+ }
+ return KERN_SUCCESS;
+}
+
+
+kern_return_t thread_resume(
+ thread_t thread)
+{
+ kern_return_t ret;
+ spl_t s;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ ret = KERN_SUCCESS;
+
+ s = splsched();
+ thread_lock(thread);
+ if (thread->user_stop_count > 0) {
+ if (--thread->user_stop_count == 0) {
+ if (--thread->suspend_count == 0) {
+ thread->state &= ~(TH_SUSP | TH_HALTED);
+ if ((thread->state & (TH_WAIT | TH_RUN)) == 0) {
+ /* was only suspended */
+ thread->state |= TH_RUN;
+ thread_setrun(thread, TRUE);
+ }
+ }
+ }
+ }
+ else {
+ ret = KERN_FAILURE;
+ }
+
+ thread_unlock(thread);
+ (void) splx(s);
+
+ return ret;
+}
+
+/*
+ * Return thread's machine-dependent state.
+ */
+kern_return_t thread_get_state(
+ thread_t thread,
+ int flavor,
+ thread_state_t old_state, /* pointer to OUT array */
+ natural_t *old_state_count) /*IN/OUT*/
+{
+ kern_return_t ret;
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (flavor == i386_DEBUG_STATE && thread == current_thread())
+ /* This state can be obtained directly for the curren thread. */
+ return thread_getstatus(thread, flavor, old_state, old_state_count);
+#endif
+
+ if (thread == THREAD_NULL || thread == current_thread())
+ return KERN_INVALID_ARGUMENT;
+
+ thread_hold(thread);
+ (void) thread_dowait(thread, TRUE);
+
+ ret = thread_getstatus(thread, flavor, old_state, old_state_count);
+
+ thread_release(thread);
+ return ret;
+}
+
+/*
+ * Change thread's machine-dependent state.
+ */
+kern_return_t thread_set_state(
+ thread_t thread,
+ int flavor,
+ thread_state_t new_state,
+ natural_t new_state_count)
+{
+ kern_return_t ret;
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (flavor == i386_DEBUG_STATE && thread == current_thread())
+ /* This state can be set directly for the curren thread. */
+ return thread_setstatus(thread, flavor, new_state, new_state_count);
+ if (flavor == i386_FSGS_BASE_STATE && thread == current_thread())
+ /* This state can be set directly for the curren thread. */
+ return thread_setstatus(thread, flavor, new_state, new_state_count);
+#endif
+
+ if (thread == THREAD_NULL || thread == current_thread())
+ return KERN_INVALID_ARGUMENT;
+
+ thread_hold(thread);
+ (void) thread_dowait(thread, TRUE);
+
+ ret = thread_setstatus(thread, flavor, new_state, new_state_count);
+
+ thread_release(thread);
+ return ret;
+}
+
+kern_return_t thread_info(
+ thread_t thread,
+ int flavor,
+ thread_info_t thread_info_out, /* pointer to OUT array */
+ natural_t *thread_info_count) /*IN/OUT*/
+{
+ int state, flags;
+ spl_t s;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (flavor == THREAD_BASIC_INFO) {
+ thread_basic_info_t basic_info;
+
+ /* Allow *thread_info_count to be smaller than the provided amount
+ * that does not contain the new time_value64_t fields as some
+ * callers might not know about them yet. */
+
+ if (*thread_info_count <
+ THREAD_BASIC_INFO_COUNT - 3 * sizeof(time_value64_t)/sizeof(natural_t))
+ return KERN_INVALID_ARGUMENT;
+
+ basic_info = (thread_basic_info_t) thread_info_out;
+
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * Update lazy-evaluated scheduler info because someone wants it.
+ */
+ if ((thread->state & TH_RUN) == 0 &&
+ thread->sched_stamp != sched_tick)
+ update_priority(thread);
+
+ /* fill in info */
+
+ time_value64_t user_time, system_time;
+ thread_read_times(thread, &user_time, &system_time);
+ TIME_VALUE64_TO_TIME_VALUE(&user_time, &basic_info->user_time);
+ TIME_VALUE64_TO_TIME_VALUE(&system_time, &basic_info->system_time);
+
+ basic_info->base_priority = thread->priority;
+ basic_info->cur_priority = thread->sched_pri;
+ time_value64_t creation_time;
+ read_time_stamp(&thread->creation_time, &creation_time);
+ TIME_VALUE64_TO_TIME_VALUE(&creation_time, &basic_info->creation_time);
+
+ if (*thread_info_count == THREAD_BASIC_INFO_COUNT) {
+ /* Copy new time_value64_t fields */
+ basic_info->user_time64 = user_time;
+ basic_info->system_time64 = user_time;
+ basic_info->creation_time64 = creation_time;
+ }
+
+ /*
+ * To calculate cpu_usage, first correct for timer rate,
+ * then for 5/8 ageing. The correction factor [3/5] is
+ * (1/(5/8) - 1).
+ */
+ basic_info->cpu_usage = thread->cpu_usage /
+ (TIMER_RATE/TH_USAGE_SCALE);
+ basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
+
+ flags = 0;
+ if (thread->state & TH_SWAPPED)
+ flags |= TH_FLAGS_SWAPPED;
+ if (thread->state & TH_IDLE)
+ flags |= TH_FLAGS_IDLE;
+
+ if (thread->state & TH_HALTED)
+ state = TH_STATE_HALTED;
+ else
+ if (thread->state & TH_RUN)
+ state = TH_STATE_RUNNING;
+ else
+ if (thread->state & TH_UNINT)
+ state = TH_STATE_UNINTERRUPTIBLE;
+ else
+ if (thread->state & TH_SUSP)
+ state = TH_STATE_STOPPED;
+ else
+ if (thread->state & TH_WAIT)
+ state = TH_STATE_WAITING;
+ else
+ state = 0; /* ? */
+
+ basic_info->run_state = state;
+ basic_info->flags = flags;
+ basic_info->suspend_count = thread->user_stop_count;
+ if (state == TH_STATE_RUNNING)
+ basic_info->sleep_time = 0;
+ else
+ basic_info->sleep_time = sched_tick - thread->sched_stamp;
+
+ thread_unlock(thread);
+ splx(s);
+
+ if (*thread_info_count > THREAD_BASIC_INFO_COUNT)
+ *thread_info_count = THREAD_BASIC_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+ else if (flavor == THREAD_SCHED_INFO) {
+ thread_sched_info_t sched_info;
+
+ /* Allow *thread_info_count to be one smaller than the
+ usual amount, because last_processor is a
+ new member that some callers might not know about. */
+ if (*thread_info_count < THREAD_SCHED_INFO_COUNT -1)
+ return KERN_INVALID_ARGUMENT;
+
+ sched_info = (thread_sched_info_t) thread_info_out;
+
+ s = splsched();
+ thread_lock(thread);
+
+#if MACH_FIXPRI
+ sched_info->policy = thread->policy;
+ if (thread->policy == POLICY_FIXEDPRI)
+ sched_info->data = (thread->sched_data * tick)/1000;
+ else
+ sched_info->data = 0;
+
+#else /* MACH_FIXPRI */
+ sched_info->policy = POLICY_TIMESHARE;
+ sched_info->data = 0;
+#endif /* MACH_FIXPRI */
+
+ sched_info->base_priority = thread->priority;
+ sched_info->max_priority = thread->max_priority;
+ sched_info->cur_priority = thread->sched_pri;
+
+ sched_info->depressed = (thread->depress_priority >= 0);
+ sched_info->depress_priority = thread->depress_priority;
+
+#if NCPUS > 1
+ if (thread->last_processor)
+ sched_info->last_processor = thread->last_processor->slot_num;
+ else
+#endif
+ sched_info->last_processor = 0;
+
+ thread_unlock(thread);
+ splx(s);
+
+ *thread_info_count = THREAD_SCHED_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+
+ return KERN_INVALID_ARGUMENT;
+}
+
+kern_return_t thread_abort(
+ thread_t thread)
+{
+ if (thread == THREAD_NULL || thread == current_thread()) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ *
+ * clear it of an event wait
+ */
+
+ evc_notify_abort(thread);
+
+ /*
+ * Try to force the thread to a clean point
+ * If the halt operation fails return KERN_ABORTED.
+ * ipc code will convert this to an ipc interrupted error code.
+ */
+ if (thread_halt(thread, FALSE) != KERN_SUCCESS)
+ return KERN_ABORTED;
+
+ /*
+ * If the thread was in an exception, abort that too.
+ */
+ mach_msg_abort_rpc(thread);
+
+ /*
+ * Then set it going again.
+ */
+ thread_release(thread);
+
+ /*
+ * Also abort any depression.
+ */
+ if (thread->depress_priority != -1)
+ thread_depress_abort(thread);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_start:
+ *
+ * Start a thread at the specified routine.
+ * The thread must be in a swapped state.
+ */
+
+void
+thread_start(
+ thread_t thread,
+ continuation_t start)
+{
+ thread->swap_func = start;
+}
+
+/*
+ * kernel_thread:
+ *
+ * Start up a kernel thread in the specified task.
+ */
+
+thread_t kernel_thread(
+ task_t task,
+ continuation_t start,
+ void * arg)
+{
+ kern_return_t kr;
+ thread_t thread;
+
+ kr = thread_create(task, &thread);
+ if (kr != KERN_SUCCESS)
+ return THREAD_NULL;
+
+ /* release "extra" ref that thread_create gave us */
+ thread_deallocate(thread);
+ thread_start(thread, start);
+ thread->ith_other = arg;
+
+ /*
+ * We ensure that the kernel thread starts with a stack.
+ * The swapin mechanism might not be operational yet.
+ */
+ thread_doswapin(thread);
+ thread->max_priority = BASEPRI_SYSTEM;
+ thread->priority = BASEPRI_SYSTEM;
+ thread->sched_pri = BASEPRI_SYSTEM;
+ (void) thread_resume(thread);
+ return thread;
+}
+
+/*
+ * reaper_thread:
+ *
+ * This kernel thread runs forever looking for threads to destroy
+ * (when they request that they be destroyed, of course).
+ */
+static void __attribute__((noreturn)) reaper_thread_continue(void)
+{
+ for (;;) {
+ thread_t thread;
+ spl_t s;
+
+ s = splsched();
+ simple_lock(&reaper_lock);
+
+ while ((thread = (thread_t) dequeue_head(&reaper_queue))
+ != THREAD_NULL) {
+ simple_unlock(&reaper_lock);
+ (void) splx(s);
+
+ (void) thread_dowait(thread, TRUE); /* may block */
+ thread_deallocate(thread); /* may block */
+
+ s = splsched();
+ simple_lock(&reaper_lock);
+ }
+
+ assert_wait((event_t) &reaper_queue, FALSE);
+ simple_unlock(&reaper_lock);
+ (void) splx(s);
+ counter(c_reaper_thread_block++);
+ thread_block(reaper_thread_continue);
+ }
+}
+
+void reaper_thread(void)
+{
+ reaper_thread_continue();
+ /*NOTREACHED*/
+}
+
+#if MACH_HOST
+/*
+ * thread_assign:
+ *
+ * Change processor set assignment.
+ * Caller must hold an extra reference to the thread (if this is
+ * called directly from the ipc interface, this is an operation
+ * in progress reference). Caller must hold no locks -- this may block.
+ */
+
+kern_return_t
+thread_assign(thread_t thread,
+ processor_set_t new_pset)
+{
+ if (thread == THREAD_NULL || new_pset == PROCESSOR_SET_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_freeze(thread);
+ thread_doassign(thread, new_pset, TRUE);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_freeze:
+ *
+ * Freeze thread's assignment. Prelude to assigning thread.
+ * Only one freeze may be held per thread.
+ */
+void
+thread_freeze(thread_t thread)
+{
+ spl_t s;
+ /*
+ * Freeze the assignment, deferring to a prior freeze.
+ */
+ s = splsched();
+ thread_lock(thread);
+ while (thread->may_assign == FALSE) {
+ thread->assign_active = TRUE;
+ thread_sleep((event_t) &thread->assign_active,
+ simple_lock_addr(thread->lock), FALSE);
+ thread_lock(thread);
+ }
+ thread->may_assign = FALSE;
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_unfreeze: release freeze on thread's assignment.
+ */
+void
+thread_unfreeze(
+ thread_t thread)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ thread->may_assign = TRUE;
+ if (thread->assign_active) {
+ thread->assign_active = FALSE;
+ thread_wakeup((event_t)&thread->assign_active);
+ }
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * thread_doassign:
+ *
+ * Actually do thread assignment. thread_will_assign must have been
+ * called on the thread. release_freeze argument indicates whether
+ * to release freeze on thread.
+ */
+
+void
+thread_doassign(
+ thread_t thread,
+ processor_set_t new_pset,
+ boolean_t release_freeze)
+{
+ processor_set_t pset;
+ boolean_t old_empty, new_empty;
+ boolean_t recompute_pri = FALSE;
+ spl_t s;
+
+ /*
+ * Check for silly no-op.
+ */
+ pset = thread->processor_set;
+ if (pset == new_pset) {
+ if (release_freeze)
+ thread_unfreeze(thread);
+ return;
+ }
+ /*
+ * Suspend the thread and stop it if it's not the current thread.
+ */
+ thread_hold(thread);
+ if (thread != current_thread())
+ (void) thread_dowait(thread, TRUE);
+
+ /*
+ * Lock both psets now, use ordering to avoid deadlocks.
+ */
+Restart:
+ if ((vm_offset_t)pset < (vm_offset_t)new_pset) {
+ pset_lock(pset);
+ pset_lock(new_pset);
+ }
+ else {
+ pset_lock(new_pset);
+ pset_lock(pset);
+ }
+
+ /*
+ * Check if new_pset is ok to assign to. If not, reassign
+ * to default_pset.
+ */
+ if (!new_pset->active) {
+ pset_unlock(pset);
+ pset_unlock(new_pset);
+ new_pset = &default_pset;
+ goto Restart;
+ }
+
+ pset_reference(new_pset);
+
+ /*
+ * Grab the thread lock and move the thread.
+ * Then drop the lock on the old pset and the thread's
+ * reference to it.
+ */
+ s = splsched();
+ thread_lock(thread);
+
+ thread_change_psets(thread, pset, new_pset);
+
+ old_empty = pset->empty;
+ new_empty = new_pset->empty;
+
+ pset_unlock(pset);
+
+ /*
+ * Reset policy and priorities if needed.
+ */
+#if MACH_FIXPRI
+ if ((thread->policy & new_pset->policies) == 0) {
+ thread->policy = POLICY_TIMESHARE;
+ recompute_pri = TRUE;
+ }
+#endif /* MACH_FIXPRI */
+
+ if (thread->max_priority < new_pset->max_priority) {
+ thread->max_priority = new_pset->max_priority;
+ if (thread->priority < thread->max_priority) {
+ thread->priority = thread->max_priority;
+ recompute_pri = TRUE;
+ }
+ else {
+ if ((thread->depress_priority >= 0) &&
+ (thread->depress_priority < thread->max_priority)) {
+ thread->depress_priority = thread->max_priority;
+ }
+ }
+ }
+
+ pset_unlock(new_pset);
+
+ if (recompute_pri)
+ compute_priority(thread, TRUE);
+
+ if (release_freeze) {
+ thread->may_assign = TRUE;
+ if (thread->assign_active) {
+ thread->assign_active = FALSE;
+ thread_wakeup((event_t)&thread->assign_active);
+ }
+ }
+
+ thread_unlock(thread);
+ splx(s);
+
+ pset_deallocate(pset);
+
+ /*
+ * Figure out hold status of thread. Threads assigned to empty
+ * psets must be held. Therefore:
+ * If old pset was empty release its hold.
+ * Release our hold from above unless new pset is empty.
+ */
+
+ if (old_empty)
+ thread_release(thread);
+ if (!new_empty)
+ thread_release(thread);
+
+ /*
+ * If current_thread is assigned, context switch to force
+ * assignment to happen. This also causes hold to take
+ * effect if the new pset is empty.
+ */
+ if (thread == current_thread()) {
+ s = splsched();
+ ast_on(cpu_number(), AST_BLOCK);
+ (void) splx(s);
+ }
+}
+#else /* MACH_HOST */
+kern_return_t
+thread_assign(
+ thread_t thread,
+ processor_set_t new_pset)
+{
+ return KERN_FAILURE;
+}
+#endif /* MACH_HOST */
+
+/*
+ * thread_assign_default:
+ *
+ * Special version of thread_assign for assigning threads to default
+ * processor set.
+ */
+kern_return_t
+thread_assign_default(
+ thread_t thread)
+{
+ return thread_assign(thread, &default_pset);
+}
+
+/*
+ * thread_get_assignment
+ *
+ * Return current assignment for this thread.
+ */
+kern_return_t thread_get_assignment(
+ thread_t thread,
+ processor_set_t *pset)
+{
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ *pset = thread->processor_set;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_priority:
+ *
+ * Set priority (and possibly max priority) for thread.
+ */
+kern_return_t
+thread_priority(
+ thread_t thread,
+ int priority,
+ boolean_t set_max)
+{
+ spl_t s;
+ kern_return_t ret = KERN_SUCCESS;
+
+ if ((thread == THREAD_NULL) || invalid_pri(priority))
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * Check for violation of max priority
+ */
+ if (priority < thread->max_priority)
+ ret = KERN_FAILURE;
+ else {
+ /*
+ * Set priorities. If a depression is in progress,
+ * change the priority to restore.
+ */
+ if (thread->depress_priority >= 0)
+ thread->depress_priority = priority;
+
+ else {
+ thread->priority = priority;
+ compute_priority(thread, TRUE);
+ }
+
+ if (set_max)
+ thread->max_priority = priority;
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+
+ return ret;
+}
+
+/*
+ * thread_set_own_priority:
+ *
+ * Internal use only; sets the priority of the calling thread.
+ * Will adjust max_priority if necessary.
+ */
+void
+thread_set_own_priority(
+ int priority)
+{
+ spl_t s;
+ thread_t thread = current_thread();
+
+ s = splsched();
+ thread_lock(thread);
+
+ if (priority < thread->max_priority)
+ thread->max_priority = priority;
+ thread->priority = priority;
+ compute_priority(thread, TRUE);
+
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_max_priority:
+ *
+ * Reset the max priority for a thread.
+ */
+kern_return_t
+thread_max_priority(
+ thread_t thread,
+ processor_set_t pset,
+ int max_priority)
+{
+ spl_t s;
+ kern_return_t ret = KERN_SUCCESS;
+
+ if ((thread == THREAD_NULL) || (pset == PROCESSOR_SET_NULL) ||
+ invalid_pri(max_priority))
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ thread_lock(thread);
+
+#if MACH_HOST
+ /*
+ * Check for wrong processor set.
+ */
+ if (pset != thread->processor_set)
+ ret = KERN_FAILURE;
+
+ else {
+#endif /* MACH_HOST */
+ thread->max_priority = max_priority;
+
+ /*
+ * Reset priority if it violates new max priority
+ */
+ if (max_priority > thread->priority) {
+ thread->priority = max_priority;
+
+ compute_priority(thread, TRUE);
+ }
+ else {
+ if (thread->depress_priority >= 0 &&
+ max_priority > thread->depress_priority)
+ thread->depress_priority = max_priority;
+ }
+#if MACH_HOST
+ }
+#endif /* MACH_HOST */
+
+ thread_unlock(thread);
+ (void) splx(s);
+
+ return ret;
+}
+
+/*
+ * thread_policy:
+ *
+ * Set scheduling policy for thread.
+ */
+kern_return_t
+thread_policy(
+ thread_t thread,
+ int policy,
+ int data)
+{
+#if MACH_FIXPRI
+ kern_return_t ret = KERN_SUCCESS;
+ int temp;
+ spl_t s;
+#endif /* MACH_FIXPRI */
+
+ if ((thread == THREAD_NULL) || invalid_policy(policy))
+ return KERN_INVALID_ARGUMENT;
+
+#if MACH_FIXPRI
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * Check if changing policy.
+ */
+ if (policy == thread->policy) {
+ /*
+ * Just changing data. This is meaningless for
+ * timesharing, quantum for fixed priority (but
+ * has no effect until current quantum runs out).
+ */
+ if (policy == POLICY_FIXEDPRI) {
+ temp = data * 1000;
+ if (temp % tick)
+ temp += tick;
+ thread->sched_data = temp/tick;
+ }
+ }
+ else {
+ /*
+ * Changing policy. Check if new policy is allowed.
+ */
+ if ((thread->processor_set->policies & policy) == 0)
+ ret = KERN_FAILURE;
+ else {
+ /*
+ * Changing policy. Save data and calculate new
+ * priority.
+ */
+ thread->policy = policy;
+ if (policy == POLICY_FIXEDPRI) {
+ temp = data * 1000;
+ if (temp % tick)
+ temp += tick;
+ thread->sched_data = temp/tick;
+ }
+ compute_priority(thread, TRUE);
+ }
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+
+ return ret;
+#else /* MACH_FIXPRI */
+ if (policy == POLICY_TIMESHARE)
+ return KERN_SUCCESS;
+ else
+ return KERN_FAILURE;
+#endif /* MACH_FIXPRI */
+}
+
+/*
+ * thread_wire:
+ *
+ * Specify that the target thread must always be able
+ * to run and to allocate memory.
+ */
+kern_return_t
+thread_wire(
+ host_t host,
+ thread_t thread,
+ boolean_t wired)
+{
+ spl_t s;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * This implementation only works for the current thread.
+ * See stack_privilege.
+ */
+ if (thread != current_thread())
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ thread_lock(thread);
+
+ if (wired) {
+ thread->vm_privilege = 1;
+ stack_privilege(thread);
+ }
+ else {
+ thread->vm_privilege = 0;
+/*XXX stack_unprivilege(thread); */
+ thread->stack_privilege = 0;
+ }
+
+ thread_unlock(thread);
+ splx(s);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_collect_scan:
+ *
+ * Attempt to free resources owned by threads.
+ * pcb_collect doesn't do anything yet.
+ */
+
+static void thread_collect_scan(void)
+{
+ thread_t thread, prev_thread;
+ processor_set_t pset, prev_pset;
+
+ prev_thread = THREAD_NULL;
+ prev_pset = PROCESSOR_SET_NULL;
+
+ simple_lock(&all_psets_lock);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ pset_lock(pset);
+ queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
+ spl_t s = splsched();
+ thread_lock(thread);
+
+ /*
+ * Only collect threads which are
+ * not runnable and are swapped.
+ */
+
+ if ((thread->state & (TH_RUN|TH_SWAPPED))
+ == TH_SWAPPED) {
+ thread->ref_count++;
+ thread_unlock(thread);
+ (void) splx(s);
+ pset->ref_count++;
+ pset_unlock(pset);
+ simple_unlock(&all_psets_lock);
+
+ pcb_collect(thread);
+
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+ prev_thread = thread;
+
+ if (prev_pset != PROCESSOR_SET_NULL)
+ pset_deallocate(prev_pset);
+ prev_pset = pset;
+
+ simple_lock(&all_psets_lock);
+ pset_lock(pset);
+ } else {
+ thread_unlock(thread);
+ (void) splx(s);
+ }
+ }
+ pset_unlock(pset);
+ }
+ simple_unlock(&all_psets_lock);
+
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+ if (prev_pset != PROCESSOR_SET_NULL)
+ pset_deallocate(prev_pset);
+}
+
+boolean_t thread_collect_allowed = TRUE;
+unsigned thread_collect_last_tick = 0;
+unsigned thread_collect_max_rate = 0; /* in ticks */
+
+/*
+ * consider_thread_collect:
+ *
+ * Called by the pageout daemon when the system needs more free pages.
+ */
+
+void consider_thread_collect(void)
+{
+ /*
+ * By default, don't attempt thread collection more frequently
+ * than once a second.
+ */
+
+ if (thread_collect_max_rate == 0)
+ thread_collect_max_rate = hz;
+
+ if (thread_collect_allowed &&
+ (sched_tick >
+ (thread_collect_last_tick +
+ thread_collect_max_rate / (hz / 1)))) {
+ thread_collect_last_tick = sched_tick;
+ thread_collect_scan();
+ }
+}
+
+#if MACH_DEBUG
+
+static vm_size_t stack_usage(vm_offset_t stack)
+{
+ unsigned i;
+
+ for (i = 0; i < KERNEL_STACK_SIZE/sizeof(unsigned int); i++)
+ if (((unsigned int *)stack)[i] != STACK_MARKER)
+ break;
+
+ return KERNEL_STACK_SIZE - i * sizeof(unsigned int);
+}
+
+/*
+ * Machine-dependent code should call stack_init
+ * before doing its own initialization of the stack.
+ */
+
+void stack_init(
+ vm_offset_t stack)
+{
+ if (stack_check_usage) {
+ unsigned i;
+
+ for (i = 0; i < KERNEL_STACK_SIZE/sizeof(unsigned int); i++)
+ ((unsigned int *)stack)[i] = STACK_MARKER;
+ }
+}
+
+/*
+ * Machine-dependent code should call stack_finalize
+ * before releasing the stack memory.
+ */
+
+void stack_finalize(
+ vm_offset_t stack)
+{
+ if (stack_check_usage) {
+ vm_size_t used = stack_usage(stack);
+
+ simple_lock(&stack_usage_lock);
+ if (used > stack_max_usage)
+ stack_max_usage = used;
+ simple_unlock(&stack_usage_lock);
+ }
+}
+
+#ifndef MACHINE_STACK
+/*
+ * stack_statistics:
+ *
+ * Return statistics on cached kernel stacks.
+ * *maxusagep must be initialized by the caller.
+ */
+
+static void stack_statistics(
+ natural_t *totalp,
+ vm_size_t *maxusagep)
+{
+ spl_t s;
+
+ s = splsched();
+ stack_lock();
+ if (stack_check_usage) {
+ vm_offset_t stack;
+
+ /*
+ * This is pretty expensive to do at splsched,
+ * but it only happens when someone makes
+ * a debugging call, so it should be OK.
+ */
+
+ for (stack = stack_free_list; stack != 0;
+ stack = stack_next(stack)) {
+ vm_size_t usage = stack_usage(stack);
+
+ if (usage > *maxusagep)
+ *maxusagep = usage;
+ }
+ }
+
+ *totalp = stack_free_count;
+ stack_unlock();
+ (void) splx(s);
+}
+#endif /* MACHINE_STACK */
+
+kern_return_t host_stack_usage(
+ host_t host,
+ vm_size_t *reservedp,
+ unsigned int *totalp,
+ vm_size_t *spacep,
+ vm_size_t *residentp,
+ vm_size_t *maxusagep,
+ vm_offset_t *maxstackp)
+{
+ natural_t total;
+ vm_size_t maxusage;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ simple_lock(&stack_usage_lock);
+ maxusage = stack_max_usage;
+ simple_unlock(&stack_usage_lock);
+
+ stack_statistics(&total, &maxusage);
+
+ *reservedp = 0;
+ *totalp = total;
+ *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE);
+ *maxusagep = maxusage;
+ *maxstackp = 0;
+ return KERN_SUCCESS;
+}
+
+kern_return_t processor_set_stack_usage(
+ processor_set_t pset,
+ unsigned int *totalp,
+ vm_size_t *spacep,
+ vm_size_t *residentp,
+ vm_size_t *maxusagep,
+ vm_offset_t *maxstackp)
+{
+ unsigned int total;
+ vm_size_t maxusage;
+ vm_offset_t maxstack;
+
+ thread_t *threads;
+ thread_t tmp_thread;
+
+ unsigned int actual; /* this many things */
+ unsigned int i;
+
+ vm_size_t size, size_needed;
+ vm_offset_t addr;
+
+ if (pset == PROCESSOR_SET_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ size = 0; addr = 0;
+
+ for (;;) {
+ pset_lock(pset);
+ if (!pset->active) {
+ pset_unlock(pset);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ actual = pset->thread_count;
+
+ /* do we have the memory we need? */
+
+ size_needed = actual * sizeof(thread_t);
+ if (size_needed <= size)
+ break;
+
+ /* unlock the pset and allocate more memory */
+ pset_unlock(pset);
+
+ if (size != 0)
+ kfree(addr, size);
+
+ assert(size_needed > 0);
+ size = size_needed;
+
+ addr = kalloc(size);
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* OK, have memory and the processor_set is locked & active */
+
+ threads = (thread_t *) addr;
+ for (i = 0, tmp_thread = (thread_t) queue_first(&pset->threads);
+ i < actual;
+ i++,
+ tmp_thread = (thread_t) queue_next(&tmp_thread->pset_threads)) {
+ thread_reference(tmp_thread);
+ threads[i] = tmp_thread;
+ }
+ assert(queue_end(&pset->threads, (queue_entry_t) tmp_thread));
+
+ /* can unlock processor set now that we have the thread refs */
+ pset_unlock(pset);
+
+ /* calculate maxusage and free thread references */
+
+ total = 0;
+ maxusage = 0;
+ maxstack = 0;
+ for (i = 0; i < actual; i++) {
+ thread_t thread = threads[i];
+ vm_offset_t stack = 0;
+
+ /*
+ * thread->kernel_stack is only accurate if the
+ * thread isn't swapped and is not executing.
+ *
+ * Of course, we don't have the appropriate locks
+ * for these shenanigans.
+ */
+
+ if ((thread->state & TH_SWAPPED) == 0) {
+ int cpu;
+
+ stack = thread->kernel_stack;
+
+ for (cpu = 0; cpu < smp_get_numcpus(); cpu++)
+ if (percpu_array[cpu].active_thread == thread) {
+ stack = percpu_array[cpu].active_stack;
+ break;
+ }
+ }
+
+ if (stack != 0) {
+ total++;
+
+ if (stack_check_usage) {
+ vm_size_t usage = stack_usage(stack);
+
+ if (usage > maxusage) {
+ maxusage = usage;
+ maxstack = (vm_offset_t) thread;
+ }
+ }
+ }
+
+ thread_deallocate(thread);
+ }
+
+ if (size != 0)
+ kfree(addr, size);
+
+ *totalp = total;
+ *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE);
+ *maxusagep = maxusage;
+ *maxstackp = maxstack;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Useful in the debugger:
+ */
+void
+thread_stats(void)
+{
+ thread_t thread;
+ int total = 0, rpcreply = 0;
+
+ queue_iterate(&default_pset.threads, thread, thread_t, pset_threads) {
+ total++;
+ if (thread->ith_rpc_reply != IP_NULL)
+ rpcreply++;
+ }
+
+ printf("%d total threads.\n", total);
+ printf("%d using rpc_reply.\n", rpcreply);
+}
+#endif /* MACH_DEBUG */
+
+/*
+ * thread_set_name
+ *
+ * Set the name of thread THREAD to NAME.
+ */
+kern_return_t
+thread_set_name(
+ thread_t thread,
+ const_kernel_debug_name_t name)
+{
+ strncpy(thread->name, name, sizeof thread->name - 1);
+ thread->name[sizeof thread->name - 1] = '\0';
+ return KERN_SUCCESS;
+}
diff --git a/kern/thread.h b/kern/thread.h
new file mode 100644
index 0000000..81d3292
--- /dev/null
+++ b/kern/thread.h
@@ -0,0 +1,437 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: thread.h
+ * Author: Avadis Tevanian, Jr.
+ *
+ * This file contains the structure definitions for threads.
+ *
+ */
+
+#ifndef _KERN_THREAD_H_
+#define _KERN_THREAD_H_
+
+#include <mach/boolean.h>
+#include <mach/thread_info.h>
+#include <mach/thread_status.h>
+#include <mach/machine/vm_types.h>
+#include <mach/message.h>
+#include <mach/port.h>
+#include <mach/vm_prot.h>
+#include <kern/ast.h>
+#include <kern/mach_clock.h>
+#include <kern/queue.h>
+#include <kern/pc_sample.h>
+#include <kern/processor.h>
+#include <kern/sched_prim.h> /* event_t, continuation_t */
+#include <kern/timer.h>
+#include <kern/lock.h>
+#include <kern/sched.h>
+#include <kern/task.h> /* for current_space(), current_map() */
+#include <machine/thread.h>
+#include <ipc/ipc_kmsg_queue.h>
+
+/*
+ * Thread name buffer size. Use the same size as the task so
+ * the thread can inherit the task's name.
+ */
+#define THREAD_NAME_SIZE TASK_NAME_SIZE
+
+struct thread {
+ /* Run queues */
+ queue_chain_t links; /* current run queue links */
+ run_queue_t runq; /* run queue p is on SEE BELOW */
+/*
+ * NOTE: The runq field in the thread structure has an unusual
+ * locking protocol. If its value is RUN_QUEUE_NULL, then it is
+ * locked by the thread_lock, but if its value is something else
+ * (i.e. a run_queue) then it is locked by that run_queue's lock.
+ */
+
+ /* Task information */
+ task_t task; /* Task to which I belong */
+ queue_chain_t thread_list; /* list of threads in task */
+
+ /* Flags */
+ /* The flags are grouped here, but documented at the original
+ position. */
+ union {
+ struct {
+ unsigned state:16;
+ unsigned wake_active:1;
+ unsigned active:1;
+ };
+ event_t event_key;
+/* These keys can be used with thread_wakeup and friends. */
+#define TH_EV_WAKE_ACTIVE(t) ((event_t) (&(t)->event_key + 0))
+#define TH_EV_STATE(t) ((event_t) (&(t)->event_key + 1))
+ };
+
+ /* Thread bookkeeping */
+ queue_chain_t pset_threads; /* list of all threads in proc set*/
+
+ /* Self-preservation */
+ decl_simple_lock_data(,lock)
+ int ref_count; /* number of references to me */
+
+ /* Hardware state */
+ pcb_t pcb; /* hardware pcb & machine state */
+ vm_offset_t kernel_stack; /* accurate only if the thread is
+ not swapped and not executing */
+ vm_offset_t stack_privilege;/* reserved kernel stack */
+
+ /* Swapping information */
+ continuation_t swap_func; /* start here after swapin */
+
+ /* Blocking information */
+ event_t wait_event; /* event we are waiting on */
+ int suspend_count; /* internal use only */
+ kern_return_t wait_result; /* outcome of wait -
+ may be examined by this thread
+ WITHOUT locking */
+ /* Defined above */
+ /* boolean_t wake_active; someone is waiting for this
+ thread to become suspended */
+ /* int state; Thread state: */
+/*
+ * Thread states [bits or'ed]
+ */
+#define TH_WAIT 0x01 /* thread is queued for waiting */
+#define TH_SUSP 0x02 /* thread has been asked to stop */
+#define TH_RUN 0x04 /* thread is running or on runq */
+#define TH_UNINT 0x08 /* thread is waiting uninteruptibly */
+#define TH_HALTED 0x10 /* thread is halted at clean point ? */
+
+#define TH_IDLE 0x80 /* thread is an idle thread */
+
+#define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
+
+#define TH_SWAPPED 0x0100 /* thread has no kernel stack */
+#define TH_SW_COMING_IN 0x0200 /* thread is waiting for kernel stack */
+
+#define TH_SWAP_STATE (TH_SWAPPED | TH_SW_COMING_IN)
+
+ /* Scheduling information */
+ int priority; /* thread's priority */
+ int max_priority; /* maximum priority */
+ int sched_pri; /* scheduled (computed) priority */
+#if MACH_FIXPRI
+ int sched_data; /* for use by policy */
+ int policy; /* scheduling policy */
+#endif /* MACH_FIXPRI */
+ int depress_priority; /* depressed from this priority */
+ unsigned int cpu_usage; /* exp. decaying cpu usage [%cpu] */
+ unsigned int sched_usage; /* load-weighted cpu usage [sched] */
+ unsigned int sched_stamp; /* last time priority was updated */
+
+ /* VM global variables */
+
+ vm_offset_t recover; /* page fault recovery (copyin/out) */
+ unsigned int vm_privilege; /* Can use reserved memory?
+ Implemented as a counter */
+
+ /* User-visible scheduling state */
+ int user_stop_count; /* outstanding stops */
+
+ /* IPC data structures */
+ struct thread *ith_next, *ith_prev;
+ mach_msg_return_t ith_state;
+ union {
+ mach_msg_size_t msize; /* max size for recvd msg */
+ struct ipc_kmsg *kmsg; /* received message */
+ } data;
+ mach_port_seqno_t ith_seqno; /* seqno of recvd message */
+
+ /* This queue is used only when destroying messages:
+ it prevents nasty recursion problems when destroying one message
+ causes other messages to be destroyed.
+ This queue should always be empty under normal circumstances.
+ See ipc_kmsg_destroy() for more details. */
+ struct ipc_kmsg_queue ith_messages;
+
+ decl_simple_lock_data(, ith_lock_data)
+ struct ipc_port *ith_self; /* not a right, doesn't hold ref */
+ struct ipc_port *ith_sself; /* a send right */
+ struct ipc_port *ith_exception; /* a send right */
+
+ mach_port_name_t ith_mig_reply; /* reply port for mig */
+ struct ipc_port *ith_rpc_reply; /* reply port for kernel RPCs */
+
+ /* State saved when thread's stack is discarded */
+ union {
+ struct {
+ mach_msg_user_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t rcv_size;
+ mach_msg_timeout_t timeout;
+ mach_port_name_t notify;
+ struct ipc_object *object;
+ struct ipc_mqueue *mqueue;
+ } receive;
+ struct {
+ struct ipc_port *port;
+ int exc;
+ int code;
+ long subcode;
+ } exception;
+ void *other; /* catch-all for other state */
+ } saved;
+
+ /* Timing data structures */
+ timer_data_t user_timer; /* user mode timer */
+ timer_data_t system_timer; /* system mode timer */
+ timer_save_data_t user_timer_save; /* saved user timer value */
+ timer_save_data_t system_timer_save; /* saved sys timer val. */
+ unsigned int cpu_delta; /* cpu usage since last update */
+ unsigned int sched_delta; /* weighted cpu usage since update */
+
+ /* Creation time stamp */
+ time_value64_t creation_time;
+
+ /* Time-outs */
+ timer_elt_data_t timer; /* timer for thread */
+ timer_elt_data_t depress_timer; /* timer for priority depression */
+
+ /* Ast/Halt data structures */
+ /* Defined above */
+ /* boolean_t active; how alive is the thread */
+ int ast; /* ast's needed. See ast.h */
+
+ /* Processor data structures */
+ processor_set_t processor_set; /* assigned processor set */
+ processor_t bound_processor; /* bound to processor ?*/
+
+ sample_control_t pc_sample;
+
+#if MACH_HOST
+ boolean_t may_assign; /* may assignment change? */
+ boolean_t assign_active; /* someone waiting for may_assign */
+#endif /* MACH_HOST */
+
+#if NCPUS > 1
+ processor_t last_processor; /* processor this last ran on */
+#endif /* NCPUS > 1 */
+
+#if MACH_LOCK_MON
+ unsigned lock_stack;
+#endif
+
+ char name[THREAD_NAME_SIZE];
+};
+
+#include <kern/cpu_number.h>
+
+/* typedef of thread_t is in kern/kern_types.h */
+typedef struct thread_shuttle *thread_shuttle_t;
+#define THREAD_NULL ((thread_t) 0)
+#define THREAD_SHUTTLE_NULL ((thread_shuttle_t)0)
+
+#define ith_msize data.msize
+#define ith_kmsg data.kmsg
+#define ith_wait_result wait_result
+
+#define ith_msg saved.receive.msg
+#define ith_option saved.receive.option
+#define ith_rcv_size saved.receive.rcv_size
+#define ith_timeout saved.receive.timeout
+#define ith_notify saved.receive.notify
+#define ith_object saved.receive.object
+#define ith_mqueue saved.receive.mqueue
+
+#define ith_port saved.exception.port
+#define ith_exc saved.exception.exc
+#define ith_exc_code saved.exception.code
+#define ith_exc_subcode saved.exception.subcode
+
+#define ith_other saved.other
+
+#ifndef _KERN_KERN_TYPES_H_
+typedef struct thread *thread_t;
+
+#define THREAD_NULL ((thread_t) 0)
+
+typedef mach_port_t *thread_array_t;
+#endif /* _KERN_KERN_TYPES_H_ */
+
+#ifdef KERNEL
+/*
+ * User routines
+ */
+
+extern kern_return_t thread_create(
+ task_t parent_task,
+ thread_t *child_thread);
+extern kern_return_t thread_terminate(
+ thread_t thread);
+extern kern_return_t thread_terminate_release(
+ thread_t thread,
+ task_t task,
+ mach_port_name_t thread_name,
+ mach_port_name_t reply_port,
+ vm_offset_t address,
+ vm_size_t size);
+extern kern_return_t thread_suspend(
+ thread_t thread);
+extern kern_return_t thread_resume(
+ thread_t thread);
+extern kern_return_t thread_abort(
+ thread_t thread);
+extern void thread_start(
+ thread_t thread,
+ continuation_t start);
+extern thread_t kernel_thread(
+ task_t task,
+ continuation_t start,
+ void *arg);
+extern kern_return_t thread_priority(
+ thread_t thread,
+ int priority,
+ boolean_t set_max);
+extern void thread_set_own_priority(
+ int priority);
+extern kern_return_t thread_max_priority(
+ thread_t thread,
+ processor_set_t pset,
+ int max_priority);
+extern kern_return_t thread_policy(
+ thread_t thread,
+ int policy,
+ int data);
+extern void consider_thread_collect(
+ void);
+extern void stack_privilege(
+ thread_t thread);
+extern kern_return_t thread_get_state(
+ thread_t thread,
+ int flavor,
+ thread_state_t old_state,
+ natural_t *old_state_count);
+extern kern_return_t thread_set_state(
+ thread_t thread,
+ int flavor,
+ thread_state_t new_state,
+ natural_t new_state_count);
+extern kern_return_t thread_get_special_port(
+ thread_t thread,
+ int which,
+ struct ipc_port **portp);
+extern kern_return_t thread_set_special_port(
+ thread_t thread,
+ int which,
+ struct ipc_port *port);
+extern kern_return_t thread_info(
+ thread_t thread,
+ int flavor,
+ thread_info_t thread_info_out,
+ natural_t *thread_info_count);
+extern kern_return_t thread_assign(
+ thread_t thread,
+ processor_set_t new_pset);
+extern kern_return_t thread_assign_default(
+ thread_t thread);
+extern void stack_collect(void);
+#endif
+
+/*
+ * Kernel-only routines
+ */
+
+extern void thread_init(void);
+extern void thread_reference(thread_t);
+extern void thread_deallocate(thread_t);
+extern void thread_hold(thread_t);
+extern kern_return_t thread_dowait(
+ thread_t thread,
+ boolean_t must_halt);
+extern void thread_release(thread_t);
+extern kern_return_t thread_halt(
+ thread_t thread,
+ boolean_t must_halt);
+extern void thread_halt_self(continuation_t);
+extern void thread_force_terminate(thread_t);
+extern thread_t kernel_thread(
+ task_t task,
+ void (*start)(void),
+ void * arg);
+
+extern void reaper_thread(void) __attribute__((noreturn));
+
+#if MACH_HOST
+extern void thread_freeze(
+ thread_t thread);
+extern void thread_doassign(
+ thread_t thread,
+ processor_set_t new_pset,
+ boolean_t release_freeze);
+extern void thread_unfreeze(
+ thread_t thread);
+#endif /* MACH_HOST */
+
+/*
+ * Macro-defined routines
+ */
+
+#define thread_pcb(th) ((th)->pcb)
+
+/* Shall be taken at splsched only */
+#ifdef MACH_LDEBUG
+#define thread_lock(th) do { \
+ assert_splsched(); \
+ simple_lock_nocheck(&(th)->lock); \
+} while (0)
+#define thread_unlock(th) do { \
+ assert_splsched(); \
+ simple_unlock_nocheck(&(th)->lock); \
+} while (0)
+#else
+#define thread_lock(th) simple_lock_nocheck(&(th)->lock)
+#define thread_unlock(th) simple_unlock_nocheck(&(th)->lock)
+#endif
+
+#define thread_should_halt(thread) \
+ ((thread)->ast & (AST_HALT|AST_TERMINATE))
+
+/*
+ * Machine specific implementations of the current thread macro
+ * designate this by defining CURRENT_THREAD.
+ */
+#ifndef CURRENT_THREAD
+#define current_thread() (percpu_get(thread_t, active_thread))
+#endif /* CURRENT_THREAD */
+
+#define current_stack() (percpu_get(vm_offset_t, active_stack))
+
+#define current_task() (current_thread()->task)
+#define current_space() (current_task()->itk_space)
+#define current_map() (current_task()->map)
+
+#if MACH_DEBUG
+void stack_init(vm_offset_t stack);
+void stack_finalize(vm_offset_t stack);
+void thread_stats(void);
+#endif /* MACH_DEBUG */
+
+#endif /* _KERN_THREAD_H_ */
diff --git a/kern/thread_swap.c b/kern/thread_swap.c
new file mode 100644
index 0000000..a5fc052
--- /dev/null
+++ b/kern/thread_swap.c
@@ -0,0 +1,200 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ *
+ * File: kern/thread_swap.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1987
+ *
+ * Mach thread swapper:
+ * Find idle threads to swap, freeing up kernel stack resources
+ * at the expense of allowing them to execute.
+ *
+ * Swap in threads that need to be run. This is done here
+ * by the swapper thread since it cannot be done (in general)
+ * when the kernel tries to place a thread on a run queue.
+ *
+ * Note: The act of swapping a thread in Mach does not mean that
+ * its memory gets forcibly swapped to secondary storage. The memory
+ * for the task corresponding to a swapped thread is paged out
+ * through the normal paging mechanism.
+ *
+ */
+
+#include <ipc/ipc_kmsg.h>
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/thread.h>
+#include <kern/lock.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <mach/vm_param.h>
+#include <kern/sched_prim.h>
+#include <kern/processor.h>
+#include <kern/thread_swap.h>
+#include <machine/machspl.h> /* for splsched */
+
+
+
+queue_head_t swapin_queue;
+def_simple_lock_data(static, swapper_lock_data)
+
+#define swapper_lock() simple_lock(&swapper_lock_data)
+#define swapper_unlock() simple_unlock(&swapper_lock_data)
+
+/*
+ * swapper_init: [exported]
+ *
+ * Initialize the swapper module.
+ */
+void swapper_init(void)
+{
+ queue_init(&swapin_queue);
+ simple_lock_init(&swapper_lock_data);
+}
+
+/*
+ * thread_swapin: [exported]
+ *
+ * Place the specified thread in the list of threads to swapin. It
+ * is assumed that the thread is locked, therefore we are at splsched.
+ *
+ * We don't bother with stack_alloc_try to optimize swapin;
+ * our callers have already tried that route.
+ */
+
+void thread_swapin(thread_t thread)
+{
+ switch (thread->state & TH_SWAP_STATE) {
+ case TH_SWAPPED:
+ /*
+ * Swapped out - queue for swapin thread.
+ */
+ thread->state = (thread->state & ~TH_SWAP_STATE)
+ | TH_SW_COMING_IN;
+ swapper_lock();
+ enqueue_tail(&swapin_queue, &(thread->links));
+ swapper_unlock();
+ thread_wakeup((event_t) &swapin_queue);
+ break;
+
+ case TH_SW_COMING_IN:
+ /*
+ * Already queued for swapin thread, or being
+ * swapped in.
+ */
+ break;
+
+ default:
+ /*
+ * Already swapped in.
+ */
+ panic("thread_swapin");
+ }
+}
+
+/*
+ * thread_doswapin:
+ *
+ * Swapin the specified thread, if it should be runnable, then put
+ * it on a run queue. No locks should be held on entry, as it is
+ * likely that this routine will sleep (waiting for stack allocation).
+ */
+kern_return_t thread_doswapin(thread_t thread)
+{
+ kern_return_t kr;
+ spl_t s;
+
+ /*
+ * Allocate the kernel stack.
+ */
+
+ kr = stack_alloc(thread, thread_continue);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ /*
+ * Place on run queue.
+ */
+
+ s = splsched();
+ thread_lock(thread);
+ thread->state &= ~(TH_SWAPPED | TH_SW_COMING_IN);
+ if (thread->state & TH_RUN)
+ thread_setrun(thread, TRUE);
+ thread_unlock(thread);
+ (void) splx(s);
+ return KERN_SUCCESS;
+}
+
+/*
+ * swapin_thread: [exported]
+ *
+ * This procedure executes as a kernel thread. Threads that need to
+ * be swapped in are swapped in by this thread.
+ */
+static void __attribute__((noreturn)) swapin_thread_continue(void)
+{
+ for (;;) {
+ thread_t thread;
+ spl_t s;
+
+ s = splsched();
+ swapper_lock();
+
+ while ((thread = (thread_t) dequeue_head(&swapin_queue))
+ != THREAD_NULL) {
+ kern_return_t kr;
+ swapper_unlock();
+ (void) splx(s);
+
+ kr = thread_doswapin(thread); /* may block */
+
+ s = splsched();
+ swapper_lock();
+
+ if (kr != KERN_SUCCESS) {
+ enqueue_head(&swapin_queue,
+ (queue_entry_t) thread);
+ break;
+ }
+ }
+
+ assert_wait((event_t) &swapin_queue, FALSE);
+ swapper_unlock();
+ (void) splx(s);
+ counter(c_swapin_thread_block++);
+ thread_block(swapin_thread_continue);
+ }
+}
+
+void swapin_thread(void)
+{
+ stack_privilege(current_thread());
+
+ swapin_thread_continue();
+ /*NOTREACHED*/
+}
diff --git a/kern/thread_swap.h b/kern/thread_swap.h
new file mode 100644
index 0000000..d032acc
--- /dev/null
+++ b/kern/thread_swap.h
@@ -0,0 +1,43 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/thread_swap.h
+ *
+ * Declarations of thread swapping routines.
+ */
+
+#ifndef _KERN_THREAD_SWAP_H_
+#define _KERN_THREAD_SWAP_H_
+
+/*
+ * exported routines
+ */
+extern void swapper_init(void);
+extern void thread_swapin(thread_t thread);
+extern kern_return_t thread_doswapin(thread_t thread);
+extern void swapin_thread(void) __attribute__((noreturn));
+
+#endif /* _KERN_THREAD_SWAP_H_ */
diff --git a/kern/timer.c b/kern/timer.c
new file mode 100644
index 0000000..13dfc20
--- /dev/null
+++ b/kern/timer.c
@@ -0,0 +1,501 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/queue.h>
+#include <kern/thread.h>
+#include <mach/time_value.h>
+#include <kern/timer.h>
+#include <kern/cpu_number.h>
+
+#include <kern/assert.h>
+#include <kern/macros.h>
+
+
+
+timer_t current_timer[NCPUS];
+timer_data_t kernel_timer[NCPUS];
+
+/*
+ * init_timers initializes all non-thread timers and puts the
+ * service routine on the callout queue. All timers must be
+ * serviced by the callout routine once an hour.
+ */
+void init_timers(void)
+{
+ int i;
+ timer_t this_timer;
+
+ /*
+ * Initialize all the kernel timers and start the one
+ * for this cpu (master) slaves start theirs later.
+ */
+ this_timer = &kernel_timer[0];
+ for ( i=0 ; i<NCPUS ; i++, this_timer++) {
+ timer_init(this_timer);
+ current_timer[i] = (timer_t) 0;
+ }
+
+ start_timer(&kernel_timer[cpu_number()]);
+}
+
+/*
+ * timer_init initializes a single timer.
+ */
+void timer_init(timer_t this_timer)
+{
+ this_timer->low_bits = 0;
+ this_timer->high_bits = 0;
+ this_timer->tstamp = 0;
+ this_timer->high_bits_check = 0;
+}
+
+#if STAT_TIME
+#else /* STAT_TIME */
+
+#ifdef MACHINE_TIMER_ROUTINES
+
+/*
+ * Machine-dependent code implements the timer routines.
+ */
+
+#else /* MACHINE_TIMER_ROUTINES */
+
+/*
+ * start_timer starts the given timer for this cpu. It is called
+ * exactly once for each cpu during the boot sequence.
+ */
+void
+start_timer(timer_t timer)
+{
+ timer->tstamp = get_timestamp();
+ current_timer[cpu_number()] = timer;
+}
+
+/*
+ * time_trap_uentry does trap entry timing. Caller must lock out
+ * interrupts and take a timestamp. ts is a timestamp taken after
+ * interrupts were locked out. Must only be called if trap was
+ * from user mode.
+ */
+void
+time_trap_uentry(unsigned ts)
+{
+ int elapsed;
+ int mycpu;
+ timer_t mytimer;
+
+ /*
+ * Calculate elapsed time.
+ */
+ mycpu = cpu_number();
+ mytimer = current_timer[mycpu];
+ elapsed = ts - mytimer->tstamp;
+#ifdef TIMER_MAX
+ if (elapsed < 0) elapsed += TIMER_MAX;
+#endif /* TIMER_MAX */
+
+ /*
+ * Update current timer.
+ */
+ mytimer->low_bits += elapsed;
+ mytimer->tstamp = 0;
+
+ if (mytimer->low_bits & TIMER_LOW_FULL) {
+ timer_normalize(mytimer);
+ }
+
+ /*
+ * Record new timer.
+ */
+ mytimer = &(current_thread()->system_timer);
+ current_timer[mycpu] = mytimer;
+ mytimer->tstamp = ts;
+}
+
+/*
+ * time_trap_uexit does trap exit timing. Caller must lock out
+ * interrupts and take a timestamp. ts is a timestamp taken after
+ * interrupts were locked out. Must only be called if returning to
+ * user mode.
+ */
+void
+time_trap_uexit(int ts)
+{
+ int elapsed;
+ int mycpu;
+ timer_t mytimer;
+
+ /*
+ * Calculate elapsed time.
+ */
+ mycpu = cpu_number();
+ mytimer = current_timer[mycpu];
+ elapsed = ts - mytimer->tstamp;
+#ifdef TIMER_MAX
+ if (elapsed < 0) elapsed += TIMER_MAX;
+#endif /* TIMER_MAX */
+
+ /*
+ * Update current timer.
+ */
+ mytimer->low_bits += elapsed;
+ mytimer->tstamp = 0;
+
+ if (mytimer->low_bits & TIMER_LOW_FULL) {
+ timer_normalize(mytimer); /* SYSTEMMODE */
+ }
+
+ mytimer = &(current_thread()->user_timer);
+
+ /*
+ * Record new timer.
+ */
+ current_timer[mycpu] = mytimer;
+ mytimer->tstamp = ts;
+}
+
+/*
+ * time_int_entry does interrupt entry timing. Caller must lock out
+ * interrupts and take a timestamp. ts is a timestamp taken after
+ * interrupts were locked out. new_timer is the new timer to
+ * switch to. This routine returns the currently running timer,
+ * which MUST be pushed onto the stack by the caller, or otherwise
+ * saved for time_int_exit.
+ */
+timer_t
+time_int_entry(
+ unsigned ts,
+ timer_t new_timer)
+{
+ int elapsed;
+ int mycpu;
+ timer_t mytimer;
+
+ /*
+ * Calculate elapsed time.
+ */
+ mycpu = cpu_number();
+ mytimer = current_timer[mycpu];
+
+ elapsed = ts - mytimer->tstamp;
+#ifdef TIMER_MAX
+ if (elapsed < 0) elapsed += TIMER_MAX;
+#endif /* TIMER_MAX */
+
+ /*
+ * Update current timer.
+ */
+ mytimer->low_bits += elapsed;
+ mytimer->tstamp = 0;
+
+ /*
+ * Switch to new timer, and save old one on stack.
+ */
+ new_timer->tstamp = ts;
+ current_timer[mycpu] = new_timer;
+ return(mytimer);
+}
+
+/*
+ * time_int_exit does interrupt exit timing. Caller must lock out
+ * interrupts and take a timestamp. ts is a timestamp taken after
+ * interrupts were locked out. old_timer is the timer value pushed
+ * onto the stack or otherwise saved after time_int_entry returned
+ * it.
+ */
+void
+time_int_exit(
+ unsigned ts,
+ timer_t old_timer)
+{
+ int elapsed;
+ int mycpu;
+ timer_t mytimer;
+
+ /*
+ * Calculate elapsed time.
+ */
+ mycpu = cpu_number();
+ mytimer = current_timer[mycpu];
+ elapsed = ts - mytimer->tstamp;
+#ifdef TIMER_MAX
+ if (elapsed < 0) elapsed += TIMER_MAX;
+#endif /* TIMER_MAX */
+
+ /*
+ * Update current timer.
+ */
+ mytimer->low_bits += elapsed;
+ mytimer->tstamp = 0;
+
+ /*
+ * If normalization requested, do it.
+ */
+ if (mytimer->low_bits & TIMER_LOW_FULL) {
+ timer_normalize(mytimer);
+ }
+ if (old_timer->low_bits & TIMER_LOW_FULL) {
+ timer_normalize(old_timer);
+ }
+
+ /*
+ * Start timer that was running before interrupt.
+ */
+ old_timer->tstamp = ts;
+ current_timer[mycpu] = old_timer;
+}
+
+/*
+ * timer_switch switches to a new timer. The machine
+ * dependent routine/macro get_timestamp must return a timestamp.
+ * Caller must lock out interrupts.
+ */
+void
+timer_switch(timer_t new_timer)
+{
+ int elapsed;
+ int mycpu;
+ timer_t mytimer;
+ unsigned ts;
+
+ /*
+ * Calculate elapsed time.
+ */
+ mycpu = cpu_number();
+ mytimer = current_timer[mycpu];
+ ts = get_timestamp();
+ elapsed = ts - mytimer->tstamp;
+#ifdef TIMER_MAX
+ if (elapsed < 0) elapsed += TIMER_MAX;
+#endif /* TIMER_MAX */
+
+ /*
+ * Update current timer.
+ */
+ mytimer->low_bits += elapsed;
+ mytimer->tstamp = 0;
+
+ /*
+ * Normalization check
+ */
+ if (mytimer->low_bits & TIMER_LOW_FULL) {
+ timer_normalize(mytimer);
+ }
+
+ /*
+ * Record new timer.
+ */
+ current_timer[mycpu] = new_timer;
+ new_timer->tstamp = ts;
+}
+
+#endif /* MACHINE_TIMER_ROUTINES */
+#endif /* STAT_TIME */
+
+/*
+ * timer_normalize normalizes the value of a timer. It is
+ * called only rarely, to make sure low_bits never overflows.
+ */
+void timer_normalize(timer_t timer)
+{
+ unsigned int high_increment;
+
+ /*
+ * Calculate high_increment, then write high check field first
+ * followed by low and high. timer_grab() reads these fields in
+ * reverse order so if high and high check match, we know
+ * that the values read are ok.
+ */
+
+ high_increment = timer->low_bits/TIMER_HIGH_UNIT;
+ timer->high_bits_check += high_increment;
+ __sync_synchronize();
+ timer->low_bits %= TIMER_HIGH_UNIT;
+ __sync_synchronize();
+ timer->high_bits += high_increment;
+}
+
+/*
+ * timer_grab() retrieves the value of a timer.
+ *
+ * Critical scheduling code uses TIMER_DELTA macro in timer.h
+ * (called from thread_timer_delta in sched.h).
+ *
+ * Keep coherent with db_time_grab below.
+ */
+
+static void timer_grab(
+ timer_t timer,
+ timer_save_t save)
+{
+#if MACH_ASSERT
+ unsigned int passes=0;
+#endif
+ do {
+ (save)->high = (timer)->high_bits;
+ __sync_synchronize ();
+ (save)->low = (timer)->low_bits;
+ __sync_synchronize ();
+ /*
+ * If the timer was normalized while we were doing this,
+ * the high_bits value read above and the high_bits check
+ * value will not match because high_bits_check is the first
+ * field touched by the normalization procedure, and
+ * high_bits is the last.
+ *
+ * Additions to timer only touch low bits and
+ * are therefore atomic with respect to this.
+ */
+#if MACH_ASSERT
+ passes++;
+ assert((passes < 10000) ? (1) : ((timer->high_bits_check = save->high), 0));
+#endif
+ } while ( (save)->high != (timer)->high_bits_check);
+}
+
+#define TIMER_TO_TIME_VALUE64(tv, timer) do { \
+ (tv)->seconds = (timer)->high + (timer)->low / 1000000; \
+ (tv)->nanoseconds = (timer)->low % 1000000 * 1000; \
+} while(0);
+
+/*
+ * timer_read reads the value of a timer into a time_value64_t. If the
+ * timer was modified during the read, retry. The value returned
+ * is accurate to the last update; time accumulated by a running
+ * timer since its last timestamp is not included.
+ */
+
+void
+timer_read(
+ timer_t timer,
+ time_value64_t *tv)
+{
+ timer_save_data_t temp;
+
+ timer_grab(timer,&temp);
+ /*
+ * Normalize the result
+ */
+#ifdef TIMER_ADJUST
+ TIMER_ADJUST(&temp);
+#endif /* TIMER_ADJUST */
+ TIMER_TO_TIME_VALUE64(tv, &temp);
+}
+
+/*
+ * thread_read_times reads the user and system times from a thread.
+ * Time accumulated since last timestamp is not included. Should
+ * be called at splsched() to avoid having user and system times
+ * be out of step. Doesn't care if caller locked thread.
+ *
+ * Needs to be kept coherent with thread_read_times ahead.
+ */
+void thread_read_times(
+ thread_t thread,
+ time_value64_t *user_time_p,
+ time_value64_t *system_time_p)
+{
+ timer_read(&thread->user_timer, user_time_p);
+ timer_read(&thread->system_timer, system_time_p);
+}
+
+#if MACH_DEBUG
+
+/*
+ *
+ * Db_timer_grab(): used by db_thread_read_times. An nonblocking
+ * version of db_thread_get_times. Keep coherent with timer_grab
+ * above.
+ *
+ */
+static void db_timer_grab(
+ timer_t timer,
+ timer_save_t save)
+{
+ /* Don't worry about coherency */
+
+ (save)->high = (timer)->high_bits;
+ (save)->low = (timer)->low_bits;
+}
+
+static void
+nonblocking_timer_read(
+ timer_t timer,
+ time_value64_t *tv)
+{
+ timer_save_data_t temp;
+
+ db_timer_grab(timer, &temp);
+ /*
+ * Normalize the result
+ */
+#ifdef TIMER_ADJUST
+ TIMER_ADJUST(&temp);
+#endif /* TIMER_ADJUST */
+ TIMER_TO_TIME_VALUE64(tv, &temp);
+}
+
+/*
+ * Db_thread_read_times: A version of thread_read_times that
+ * can be called by the debugger. This version does not call
+ * timer_grab, which can block. Please keep it up to date with
+ * thread_read_times above.
+ *
+ */
+void db_thread_read_times(
+ thread_t thread,
+ time_value64_t *user_time_p,
+ time_value64_t *system_time_p)
+{
+ nonblocking_timer_read(&thread->user_timer, user_time_p);
+ nonblocking_timer_read(&thread->system_timer, system_time_p);
+}
+#endif /* MACH_DEBUG */
+
+/*
+ * timer_delta takes the difference of a saved timer value
+ * and the current one, and updates the saved value to current.
+ * The difference is returned as a function value. See
+ * TIMER_DELTA macro (timer.h) for optimization to this.
+ */
+
+unsigned
+timer_delta(
+ timer_t timer,
+ timer_save_t save)
+{
+ timer_save_data_t new_save;
+ unsigned result;
+
+ timer_grab(timer,&new_save);
+ result = (new_save.high - save->high) * TIMER_HIGH_UNIT +
+ new_save.low - save->low;
+ save->high = new_save.high;
+ save->low = new_save.low;
+ return(result);
+}
diff --git a/kern/timer.h b/kern/timer.h
new file mode 100644
index 0000000..92259a2
--- /dev/null
+++ b/kern/timer.h
@@ -0,0 +1,195 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_TIMER_H_
+#define _KERN_TIMER_H_
+
+#include <kern/macros.h>
+
+#if STAT_TIME
+/*
+ * Statistical timer definitions - use microseconds in timer, seconds
+ * in high unit field. No adjustment needed to convert to time_value64_t
+ * as a result. Service timers once an hour.
+ */
+
+/*
+ * TIMER_MAX is needed if a 32-bit rollover timer needs to be adjusted for
+ * maximum value.
+ */
+#undef TIMER_MAX
+
+/*
+ * TIMER_RATE is the rate of the timer in ticks per second. It is used to
+ * calculate percent cpu usage.
+ */
+#define TIMER_RATE 1000000
+
+/*
+ * TIMER_HIGH_UNIT is the unit for high_bits in terms of low_bits.
+ * Setting it to TIMER_RATE makes the high unit seconds.
+ */
+#define TIMER_HIGH_UNIT TIMER_RATE
+
+/*
+ * TIMER_ADJUST is used to adjust the value of a timer after it has been
+ * copied into a time_value64_t. No adjustment is needed if high_bits is in
+ * seconds.
+ */
+#undef TIMER_ADJUST
+
+/*
+ * MACHINE_TIMER_ROUTINES should defined if the timer routines are
+ * implemented in machine-dependent code (e.g. assembly language).
+ */
+#undef MACHINE_TIMER_ROUTINES
+
+#else /* STAT_TIME */
+/*
+ * Machine dependent definitions based on hardware support.
+ */
+
+#include <machine/timer.h>
+
+#endif /* STAT_TIME */
+
+/*
+ * Definitions for accurate timers. high_bits_check is a copy of
+ * high_bits that allows reader to verify that values read are ok.
+ */
+
+struct timer {
+ unsigned low_bits;
+ unsigned high_bits;
+ unsigned high_bits_check;
+ unsigned tstamp;
+};
+
+typedef struct timer timer_data_t;
+typedef struct timer *timer_t;
+
+/*
+ * Mask to check if low_bits is in danger of overflowing
+ */
+
+#define TIMER_LOW_FULL 0x80000000U
+
+/*
+ * Kernel timers and current timer array. [Exported]
+ */
+
+extern timer_t current_timer[NCPUS];
+extern timer_data_t kernel_timer[NCPUS];
+
+/*
+ * save structure for timer readings. This is used to save timer
+ * readings for elapsed time computations.
+ */
+
+struct timer_save {
+ unsigned low;
+ unsigned high;
+};
+
+typedef struct timer_save timer_save_data_t, *timer_save_t;
+
+/*
+ * Exported kernel interface to timers
+ */
+
+#if STAT_TIME
+#define start_timer(timer)
+#define timer_switch(timer)
+#else /* STAT_TIME */
+extern void start_timer(timer_t);
+extern void timer_switch(timer_t);
+#endif /* STAT_TIME */
+
+extern void timer_read(timer_t, time_value64_t *);
+extern void thread_read_times(thread_t, time_value64_t *, time_value64_t *);
+extern unsigned timer_delta(timer_t, timer_save_t);
+extern void timer_normalize(timer_t);
+extern void timer_init(timer_t);
+
+#if STAT_TIME
+/*
+ * Macro to bump timer values.
+ */
+#define timer_bump(timer, usec) \
+MACRO_BEGIN \
+ (timer)->low_bits += usec; \
+ if ((timer)->low_bits & TIMER_LOW_FULL) { \
+ timer_normalize(timer); \
+ } \
+MACRO_END
+
+#else /* STAT_TIME */
+/*
+ * Exported hardware interface to timers
+ */
+extern void time_trap_uentry(unsigned);
+extern void time_trap_uexit(int);
+extern timer_t time_int_entry(unsigned, timer_t);
+extern void time_int_exit(unsigned, timer_t);
+#endif /* STAT_TIME */
+
+/*
+ * TIMER_DELTA finds the difference between a timer and a saved value,
+ * and updates the saved value. Look at high_bits check field after
+ * reading low because that's the first written by a normalize
+ * operation; this isn't necessary for current usage because
+ * this macro is only used when the timer can't be normalized:
+ * thread is not running, or running thread calls it on itself at
+ * splsched().
+ */
+
+#define TIMER_DELTA(timer, save, result) \
+MACRO_BEGIN \
+ unsigned temp; \
+ \
+ temp = (timer).low_bits; \
+ if ((save).high != (timer).high_bits_check) { \
+ result += timer_delta(&(timer), &(save)); \
+ } \
+ else { \
+ result += temp - (save).low; \
+ (save).low = temp; \
+ } \
+MACRO_END
+
+extern void init_timers(void);
+
+void timer_init(timer_t this_timer);
+
+#if MACH_DEBUG
+void db_thread_read_times(
+ thread_t thread,
+ time_value64_t *user_time_p,
+ time_value64_t *system_time_p);
+#endif
+
+
+#endif /* _KERN_TIMER_H_ */
diff --git a/kern/xpr.c b/kern/xpr.c
new file mode 100644
index 0000000..1b551eb
--- /dev/null
+++ b/kern/xpr.c
@@ -0,0 +1,197 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * xpr silent tracing circular buffer.
+ */
+#include <string.h>
+
+#include <kern/debug.h>
+#include <kern/xpr.h>
+#include <kern/lock.h>
+#include "cpu_number.h"
+#include <machine/machspl.h>
+#include <vm/vm_kern.h>
+
+
+/*
+ * After a spontaneous reboot, it is desirable to look
+ * at the old xpr buffer. Assuming xprbootstrap allocates
+ * the buffer in the same place in physical memory and
+ * the reboot doesn't clear memory, this should work.
+ * xprptr will be reset, but the saved value should be OK.
+ * Just set xprenable false so the buffer isn't overwritten.
+ */
+
+def_simple_lock_data(static, xprlock)
+
+boolean_t xprenable = TRUE; /* Enable xpr tracing */
+int nxprbufs = 0; /* Number of contiguous xprbufs allocated */
+int xprflags = 0; /* Bit mask of xpr flags enabled */
+struct xprbuf *xprbase; /* Pointer to circular buffer nxprbufs*sizeof(xprbuf)*/
+struct xprbuf *xprptr; /* Currently allocated xprbuf */
+struct xprbuf *xprlast; /* Pointer to end of circular buffer */
+
+/*VARARGS1*/
+void xpr(
+ char *msg,
+ int arg1,
+ int arg2,
+ int arg3,
+ int arg4,
+ int arg5)
+{
+ spl_t s;
+ struct xprbuf *x;
+
+ /* If we aren't initialized, ignore trace request */
+ if (!xprenable || (xprptr == 0))
+ return;
+ /* Guard against all interrupts and allocate next buffer. */
+ s = splhigh();
+ simple_lock(&xprlock);
+ x = xprptr++;
+ if (xprptr >= xprlast) {
+ /* wrap around */
+ xprptr = xprbase;
+ }
+ /* Save xprptr in allocated memory. */
+ *(struct xprbuf **)xprlast = xprptr;
+ simple_unlock(&xprlock);
+ splx(s);
+ x->msg = msg;
+ x->arg1 = arg1;
+ x->arg2 = arg2;
+ x->arg3 = arg3;
+ x->arg4 = arg4;
+ x->arg5 = arg5;
+ x->timestamp = XPR_TIMESTAMP;
+ x->cpuinfo = cpu_number();
+}
+
+void xprbootstrap(void)
+{
+ vm_offset_t addr;
+ vm_size_t size;
+ kern_return_t kr;
+
+ simple_lock_init(&xprlock);
+ if (nxprbufs == 0)
+ return; /* assume XPR support not desired */
+
+ /* leave room at the end for a saved copy of xprptr */
+ size = nxprbufs * sizeof(struct xprbuf) + sizeof xprptr;
+
+ kr = kmem_alloc_wired(kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ panic("xprbootstrap");
+
+ if (xprenable) {
+ /*
+ * If xprenable is set (the default) then we zero
+ * the buffer so xpr_dump doesn't encounter bad pointers.
+ * If xprenable isn't set, then we preserve
+ * the original contents of the buffer. This is useful
+ * if memory survives reboots, so xpr_dump can show
+ * the previous buffer contents.
+ */
+
+ memset((void *) addr, 0, size);
+ }
+
+ xprbase = (struct xprbuf *) addr;
+ xprlast = &xprbase[nxprbufs];
+ xprptr = xprbase; /* setting xprptr enables tracing */
+}
+
+int xprinitial = 0;
+
+void xprinit(void)
+{
+ xprflags |= xprinitial;
+}
+
+#if MACH_KDB
+#include <machine/setjmp.h>
+#include <ddb/db_output.h>
+
+extern jmp_buf_t *db_recover;
+
+/*
+ * Print current content of xpr buffers (KDB's sake)
+ * Use stack order to make it understandable.
+ *
+ * Called as "!xpr_dump" this dumps the kernel's xpr buffer.
+ * Called with arguments, it can dump xpr buffers in user tasks,
+ * assuming they use the same format as the kernel.
+ */
+void xpr_dump(
+ struct xprbuf *base,
+ int nbufs)
+{
+ jmp_buf_t db_jmpbuf;
+ jmp_buf_t *prev;
+ struct xprbuf *last, *ptr;
+ struct xprbuf *x;
+ int i;
+ spl_t s = s;
+
+ if (base == 0) {
+ base = xprbase;
+ nbufs = nxprbufs;
+ }
+
+ if (nbufs == 0)
+ return;
+
+ if (base == xprbase) {
+ s = splhigh();
+ simple_lock(&xprlock);
+ }
+
+ last = base + nbufs;
+ ptr = * (struct xprbuf **) last;
+
+ prev = db_recover;
+ if (_setjmp(db_recover = &db_jmpbuf) == 0)
+ for (x = ptr, i = 0; i < nbufs; i++) {
+ if (--x < base)
+ x = last - 1;
+
+ if (x->msg == 0)
+ break;
+
+ db_printf("<%d:%x:%x> ", x - base, x->cpuinfo, x->timestamp);
+ db_printf(x->msg, x->arg1,x->arg2,x->arg3,x->arg4,x->arg5);
+ }
+ db_recover = prev;
+
+ if (base == xprbase) {
+ simple_unlock(&xprlock);
+ (void) splx(s);
+ }
+}
+#endif /* MACH_KDB */
diff --git a/kern/xpr.h b/kern/xpr.h
new file mode 100644
index 0000000..72f6817
--- /dev/null
+++ b/kern/xpr.h
@@ -0,0 +1,97 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Include file for xpr circular buffer silent tracing.
+ *
+ */
+/*
+ * If the kernel flag XPRDEBUG is set, the XPR macro is enabled. The
+ * macro should be invoked something like the following:
+ * XPR(XPR_SYSCALLS, ("syscall: %d, 0x%x\n", syscallno, arg1);
+ * which will expand into the following code:
+ * if (xprflags & XPR_SYSCALLS)
+ * xpr("syscall: %d, 0x%x\n", syscallno, arg1);
+ * Xpr will log the pointer to the printf string and up to 6 arguments,
+ * along with a timestamp and cpuinfo (for multi-processor systems), into
+ * a circular buffer. The actual printf processing is delayed until after
+ * the buffer has been collected. It is assumed that the text/data segments
+ * of the kernel can easily be reconstructed in a post-processor which
+ * performs the printf processing.
+ *
+ * If the XPRDEBUG compilation switch is not set, the XPR macro expands
+ * to nothing.
+ */
+
+#ifndef _KERN_XPR_H_
+#define _KERN_XPR_H_
+
+#ifndef KERNEL
+#include <sys/features.h>
+#endif /* KERNEL */
+
+#include <machine/xpr.h>
+
+#if XPR_DEBUG
+
+#define XPR(flags,xprargs) if(xprflags&flags) xpr xprargs
+
+extern int xprflags;
+/*
+ * flags for message types.
+ */
+#define XPR_SYSCALLS 0x00000001
+#define XPR_TRAPS 0x00000002
+#define XPR_SCHED 0x00000004
+#define XPR_NPTCP 0x00000008
+#define XPR_NP 0x00000010
+#define XPR_TCP 0x00000020
+
+#define XPR_VM_OBJECT (1 << 8)
+#define XPR_VM_OBJECT_CACHE (1 << 9)
+#define XPR_VM_PAGE (1 << 10)
+#define XPR_VM_PAGEOUT (1 << 11)
+#define XPR_MEMORY_OBJECT (1 << 12)
+#define XPR_VM_FAULT (1 << 13)
+#define XPR_INODE_PAGER (1 << 14)
+#define XPR_INODE_PAGER_DATA (1 << 15)
+
+#else /* XPR_DEBUG */
+#define XPR(flags,xprargs)
+#endif /* XPR_DEBUG */
+
+struct xprbuf {
+ char *msg;
+ int arg1,arg2,arg3,arg4,arg5;
+ int timestamp;
+ int cpuinfo;
+};
+
+extern void xpr(char *, int, int, int, int, int);
+extern void xpr_dump(struct xprbuf *, int);
+extern void xprinit(void);
+extern void xprbootstrap(void);
+
+#endif /* _KERN_XPR_H_ */
diff --git a/linux/Makefrag.am b/linux/Makefrag.am
new file mode 100644
index 0000000..2338452
--- /dev/null
+++ b/linux/Makefrag.am
@@ -0,0 +1,788 @@
+# Makefile fragment for Linux device drivers and the glue code.
+
+# Copyright (C) 2006, 2007, 2011 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# Files for device driver support.
+#
+
+if CODE_linux
+noinst_LIBRARIES += \
+ liblinux.a
+gnumach_o_LDADD += \
+ liblinux.a
+endif
+
+liblinux_a_CPPFLAGS = $(AM_CPPFLAGS) \
+ -I$(srcdir)/$(systype)/linux/dev/include \
+ -I$(top_builddir)/linux/dev/include \
+ -I$(srcdir)/linux/dev/include \
+ -I$(top_builddir)/linux/src/include \
+ -I$(srcdir)/linux/src/include
+# Because of the use of `extern inline' in some Linux header files without
+# corresponding text segment definitions, we must always optimize.
+liblinux_a_CFLAGS = -O2 $(AM_CFLAGS)
+
+# Disable warnings that are applied to the core Mach code.
+liblinux_a_CFLAGS += -Wno-missing-prototypes -Wno-strict-prototypes \
+ -Wno-old-style-definition
+
+# See <http://lists.gnu.org/archive/html/bug-hurd/2006-01/msg00148.html>.
+liblinux_a_CFLAGS += \
+ -fno-strict-aliasing
+
+# TODO. Do we really need `-traditional'?
+liblinux_a_CCASFLAGS = $(AM_CCASFLAGS) \
+ -traditional \
+ $(liblinux_a_CPPFLAGS)
+
+liblinux_a_SOURCES = \
+ linux/dev/init/version.c \
+ linux/dev/kernel/softirq.c \
+ linux/src/arch/i386/lib/delay.c \
+ linux/dev/kernel/dma.c \
+ linux/dev/kernel/resource.c \
+ linux/dev/kernel/printk.c \
+ linux/src/arch/i386/kernel/bios32.c \
+ linux/dev/arch/i386/kernel/irq.c \
+ linux/src/lib/ctype.c \
+ linux/dev/lib/vsprintf.c \
+ linux/dev/init/main.c \
+ linux/dev/glue/misc.c \
+ linux/dev/kernel/sched.c \
+ linux/dev/glue/kmem.c \
+ linux/dev/glue/block.c \
+ linux/dev/glue/glue.h \
+ linux/dev/arch/i386/kernel/setup.c
+
+liblinux_a_SOURCES += \
+ linux/src/drivers/pci/pci.c \
+ linux/dev/drivers/block/genhd.c
+
+#
+# Linux device drivers.
+#
+if device_driver_floppy
+liblinux_a_SOURCES += \
+ linux/dev/drivers/block/floppy.c
+endif
+
+if device_driver_ide
+liblinux_a_SOURCES += \
+ linux/src/drivers/block/cmd640.c \
+ linux/src/drivers/block/ide-cd.c \
+ linux/src/drivers/block/ide.c \
+ linux/src/drivers/block/ide.h \
+ linux/dev/drivers/block/ahci.c \
+ linux/dev/include/ahci.h \
+ linux/src/drivers/block/ide_modes.h \
+ linux/src/drivers/block/rz1000.c \
+ linux/src/drivers/block/triton.c
+endif
+
+if device_driver_group_scsi
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/constants.c \
+ linux/src/drivers/scsi/constants.h \
+ linux/src/drivers/scsi/hosts.c \
+ linux/src/drivers/scsi/hosts.h \
+ linux/src/drivers/scsi/scsi.c \
+ linux/src/drivers/scsi/scsi.h \
+ linux/src/drivers/scsi/scsi_ioctl.c \
+ linux/src/drivers/scsi/scsi_proc.c \
+ linux/src/drivers/scsi/scsicam.c \
+ linux/src/drivers/scsi/sd.c \
+ linux/src/drivers/scsi/sd.h \
+ linux/src/drivers/scsi/sd_ioctl.c \
+ linux/src/drivers/scsi/sr.c \
+ linux/src/drivers/scsi/sr.h \
+ linux/src/drivers/scsi/sr_ioctl.c
+endif
+
+if device_driver_53c78xx
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/53c7,8xx.h \
+ linux/src/drivers/scsi/53c78xx.c \
+ linux/src/drivers/scsi/53c8xx_d.h \
+ linux/src/drivers/scsi/53c8xx_u.h
+endif
+
+if device_driver_AM53C974
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/AM53C974.c \
+ linux/src/drivers/scsi/AM53C974.h
+endif
+
+if device_driver_BusLogic
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/BusLogic.c \
+ linux/src/drivers/scsi/BusLogic.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/FlashPoint.c
+endif
+
+if device_driver_NCR53c406a
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/NCR53c406a.c \
+ linux/src/drivers/scsi/NCR53c406a.h
+endif
+
+if device_driver_advansys
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/advansys.c \
+ linux/src/drivers/scsi/advansys.h
+endif
+
+if device_driver_aha152x
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/aha152x.c \
+ linux/src/drivers/scsi/aha152x.h
+endif
+
+if device_driver_aha1542
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/aha1542.c \
+ linux/src/drivers/scsi/aha1542.h
+endif
+
+if device_driver_aha1740
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/aha1740.c \
+ linux/src/drivers/scsi/aha1740.h
+endif
+
+if device_driver_aic7xxx
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/aic7xxx.c \
+ linux/src/drivers/scsi/aic7xxx.h \
+ linux/src/drivers/scsi/aic7xxx/scsi_message.h \
+ linux/src/drivers/scsi/aic7xxx/sequencer.h \
+ linux/src/drivers/scsi/aic7xxx_reg.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/aic7xxx_proc.c \
+ linux/src/drivers/scsi/aic7xxx_seq.c
+endif
+
+if device_driver_dtc
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/dtc.c \
+ linux/src/drivers/scsi/dtc.h
+endif
+
+if device_driver_eata
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/eata.c \
+ linux/src/drivers/scsi/eata.h \
+ linux/src/drivers/scsi/eata_generic.h
+endif
+
+if device_driver_eata_dma
+liblinux_a_SOURCES += \
+ linux/dev/drivers/scsi/eata_dma.c \
+ linux/src/drivers/scsi/eata_dma.h \
+ linux/src/drivers/scsi/eata_dma_proc.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/eata_dma_proc.c
+endif
+
+if device_driver_eata_pio
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/eata_pio.c \
+ linux/src/drivers/scsi/eata_pio.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/eata_pio_proc.c
+endif
+
+if device_driver_fdomain
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/fdomain.c \
+ linux/src/drivers/scsi/fdomain.h
+endif
+
+if device_driver_g_NCR5380
+liblinux_a_SOURCES += \
+ linux/dev/drivers/scsi/g_NCR5380.c \
+ linux/src/drivers/scsi/g_NCR5380.h
+endif
+
+if device_driver_gdth
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/gdth.c \
+ linux/src/drivers/scsi/gdth.h \
+ linux/src/drivers/scsi/gdth_ioctl.h \
+ linux/src/drivers/scsi/gdth_proc.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/gdth_proc.c
+endif
+
+if device_driver_in2000
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/in2000.c \
+ linux/src/drivers/scsi/in2000.h
+endif
+
+if device_driver_ncr53c8xx
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/ncr53c8xx.c \
+ linux/src/drivers/scsi/ncr53c8xx.h
+endif
+
+if device_driver_pas16
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/pas16.c \
+ linux/src/drivers/scsi/pas16.h
+endif
+
+if device_driver_ppa
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/ppa.c \
+ linux/src/drivers/scsi/ppa.h
+endif
+
+if device_driver_qlogicfas
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/qlogicfas.c \
+ linux/src/drivers/scsi/qlogicfas.h
+endif
+
+if device_driver_qlogicisp
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/qlogicisp.c \
+ linux/src/drivers/scsi/qlogicisp.h
+endif
+
+if device_driver_seagate
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/seagate.c \
+ linux/src/drivers/scsi/seagate.h
+endif
+
+if device_driver_sym53c8xx
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/sym53c8xx.c \
+ linux/src/drivers/scsi/sym53c8xx_comm.h \
+ linux/src/drivers/scsi/sym53c8xx.h \
+ linux/src/drivers/scsi/sym53c8xx_defs.h
+endif
+
+if device_driver_t128
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/t128.c \
+ linux/src/drivers/scsi/t128.h
+endif
+
+if device_driver_tmscsim
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/dc390.h \
+ linux/src/drivers/scsi/tmscsim.c \
+ linux/src/drivers/scsi/tmscsim.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/scsiiom.c
+endif
+
+if device_driver_u14_34f
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/u14-34f.c \
+ linux/src/drivers/scsi/u14-34f.h
+endif
+
+if device_driver_ultrastor
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/ultrastor.c \
+ linux/src/drivers/scsi/ultrastor.h
+endif
+
+if device_driver_wd7000
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/wd7000.c \
+ linux/src/drivers/scsi/wd7000.h
+endif
+
+EXTRA_DIST += \
+ linux/src/drivers/scsi/NCR5380.c \
+ linux/src/drivers/scsi/NCR5380.h
+
+if device_driver_group_net
+liblinux_a_SOURCES += \
+ linux/dev/drivers/net/auto_irq.c \
+ linux/dev/glue/net.c \
+ linux/dev/drivers/net/Space.c \
+ linux/dev/net/core/dev.c \
+ linux/dev/drivers/net/net_init.c \
+ linux/src/drivers/net/pci-scan.c \
+ linux/src/drivers/net/pci-scan.h
+endif
+
+if device_driver_3c501
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c501.c
+endif
+
+if device_driver_3c503
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c503.c \
+ linux/src/drivers/net/3c503.h \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_3c505
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c505.c \
+ linux/src/drivers/net/3c505.h
+endif
+
+if device_driver_3c507
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c507.c
+endif
+
+if device_driver_3c509
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c509.c
+endif
+
+if device_driver_3c59x
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c59x.c
+endif
+
+if device_driver_3c515
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c515.c
+endif
+
+if device_driver_ac3200
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ac3200.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_apricot
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/apricot.c
+endif
+
+if device_driver_at1700
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/at1700.c
+endif
+
+if device_driver_atp
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/atp.c \
+ linux/src/drivers/net/atp.h
+endif
+
+#if device_driver_cb_shim
+#liblinux_a_SOURCES += \
+# linux/src/drivers/net/cb_shim.c
+#endif
+
+if device_driver_de4x5
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/de4x5.c \
+ linux/src/drivers/net/de4x5.h
+endif
+
+if device_driver_de600
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/de600.c
+endif
+
+if device_driver_de620
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/de620.c \
+ linux/src/drivers/net/de620.h
+endif
+
+if device_driver_depca
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/depca.c \
+ linux/src/drivers/net/depca.h
+endif
+
+if device_driver_e2100
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/e2100.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_eepro
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/eepro.c
+endif
+
+if device_driver_eepro100
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/eepro100.c
+endif
+
+if device_driver_eexpress
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/eexpress.c \
+ linux/src/drivers/net/eth82586.h
+endif
+
+if device_driver_epic100
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/epic100.c
+endif
+
+if device_driver_eth16i
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/eth16i.c
+endif
+
+if device_driver_ewrk3
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ewrk3.c \
+ linux/src/drivers/net/ewrk3.h
+endif
+
+if device_driver_fmv18x
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/fmv18x.c
+endif
+
+if device_driver_hamachi
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/hamachi.c
+endif
+
+if device_driver_hp_plus
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/hp-plus.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_hp
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/hp.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_hp100
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/hp100.c \
+ linux/src/drivers/net/hp100.h
+endif
+
+if device_driver_intel_gige
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/intel-gige.c
+endif
+
+if device_driver_lance
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/lance.c
+endif
+
+if device_driver_myson803
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/myson803.c
+endif
+
+if device_driver_natsemi
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/natsemi.c
+endif
+
+if device_driver_ne
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ne.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_ne2k_pci
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ne2k-pci.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_ni52
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ni52.c \
+ linux/src/drivers/net/ni52.h
+endif
+
+if device_driver_ni65
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ni65.c \
+ linux/src/drivers/net/ni65.h
+endif
+
+if device_driver_ns820
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ns820.c
+endif
+
+if device_driver_pcnet32
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/pcnet32.c
+endif
+
+if device_driver_rtl8139
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/rtl8139.c
+endif
+
+if device_driver_seeq8005
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/seeq8005.c \
+ linux/src/drivers/net/seeq8005.h
+endif
+
+if device_driver_sis900
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/sis900.c \
+ linux/src/drivers/net/sis900.h
+endif
+
+if device_driver_sk_g16
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/sk_g16.c \
+ linux/src/drivers/net/sk_g16.h
+endif
+
+if device_driver_smc_ultra
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/smc-ultra.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_smc_ultra32
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/smc-ultra32.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_starfire
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/starfire.c
+endif
+
+if device_driver_sundance
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/sundance.c
+endif
+
+if device_driver_tlan
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/tlan.c \
+ linux/src/drivers/net/tlan.h
+endif
+
+if device_driver_tulip
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/tulip.c
+endif
+
+if device_driver_via_rhine
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/via-rhine.c
+endif
+
+if device_driver_wavelan
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/i82586.h \
+ linux/src/drivers/net/wavelan.c \
+ linux/src/drivers/net/wavelan.h \
+ linux/dev/drivers/net/wavelan.p.h
+endif
+
+if device_driver_wd
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/wd.c
+endif
+
+if device_driver_winbond_840
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/winbond-840.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_yellowfin
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/yellowfin.c
+endif
+
+if device_driver_znet
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/znet.c
+endif
+
+EXTRA_DIST += \
+ linux/src/drivers/net/8390.h \
+ linux/src/drivers/net/kern_compat.h
+
+# pcmcia-cs.
+
+liblinux_pcmcia_cs_modules_a_CPPFLAGS = $(liblinux_a_CPPFLAGS) \
+ -I$(srcdir)/linux/pcmcia-cs/include
+liblinux_pcmcia_cs_modules_a_CFLAGS = $(liblinux_a_CFLAGS) \
+ -include $(srcdir)/linux/pcmcia-cs/glue/pcmcia_glue.h
+liblinux_pcmcia_cs_modules_a_SOURCES =
+if device_driver_group_pcmcia
+noinst_LIBRARIES += \
+ liblinux_pcmcia_cs_modules.a
+gnumach_o_LDADD += \
+ liblinux_pcmcia_cs_modules.a
+endif
+
+liblinux_pcmcia_cs_modules_a_SOURCES += \
+ linux/pcmcia-cs/glue/pcmcia.c \
+ linux/pcmcia-cs/glue/pcmcia_glue.h \
+ linux/pcmcia-cs/modules/cs.c \
+ linux/pcmcia-cs/modules/cs_internal.h \
+ linux/pcmcia-cs/modules/ds.c \
+ linux/pcmcia-cs/modules/rsrc_mgr.c \
+ linux/pcmcia-cs/modules/bulkmem.c \
+ linux/pcmcia-cs/modules/cistpl.c \
+ linux/pcmcia-cs/modules/pci_fixup.c
+EXTRA_DIST += \
+ linux/pcmcia-cs/glue/ds.c
+
+if device_driver_i82365
+liblinux_pcmcia_cs_modules_a_SOURCES += \
+ linux/pcmcia-cs/modules/cirrus.h \
+ linux/pcmcia-cs/modules/ene.h \
+ linux/pcmcia-cs/modules/i82365.c \
+ linux/pcmcia-cs/modules/i82365.h \
+ linux/pcmcia-cs/modules/o2micro.h \
+ linux/pcmcia-cs/modules/ricoh.h \
+ linux/pcmcia-cs/modules/smc34c90.h \
+ linux/pcmcia-cs/modules/ti113x.h \
+ linux/pcmcia-cs/modules/topic.h \
+ linux/pcmcia-cs/modules/vg468.h \
+ linux/pcmcia-cs/modules/yenta.h
+endif
+
+liblinux_pcmcia_cs_clients_a_CPPFLAGS = $(liblinux_a_CPPFLAGS) \
+ -DPCMCIA_CLIENT -I$(srcdir)/linux/pcmcia-cs/include
+liblinux_pcmcia_cs_clients_a_CFLAGS = $(liblinux_a_CFLAGS) \
+ -include $(srcdir)/linux/pcmcia-cs/glue/pcmcia_glue.h
+liblinux_pcmcia_cs_clients_a_SOURCES =
+if device_driver_group_pcmcia
+noinst_LIBRARIES += \
+ liblinux_pcmcia_cs_clients.a
+gnumach_o_LDADD += \
+ liblinux_pcmcia_cs_clients.a
+endif
+
+if device_driver_3c574_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/3c574_cs.c
+endif
+
+if device_driver_3c589_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/3c589_cs.c
+endif
+
+if device_driver_axnet_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/ax8390.h \
+ linux/pcmcia-cs/clients/axnet_cs.c
+endif
+
+if device_driver_fmvj18x_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/fmvj18x_cs.c
+endif
+
+if device_driver_nmclan_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/nmclan_cs.c
+endif
+
+if device_driver_pcnet_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/pcnet_cs.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_smc91c92_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/ositech.h \
+ linux/pcmcia-cs/clients/smc91c92_cs.c
+endif
+
+if device_driver_xirc2ps_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/xirc2ps_cs.c
+endif
+
+liblinux_pcmcia_cs_wireless_a_CPPFLAGS = $(liblinux_a_CPPFLAGS) \
+ -I$(srcdir)/linux/pcmcia-cs/include
+liblinux_pcmcia_cs_wireless_a_CFLAGS = $(liblinux_a_CFLAGS) \
+ -include $(srcdir)/linux/pcmcia-cs/glue/wireless_glue.h
+liblinux_pcmcia_cs_wireless_a_SOURCES =
+if device_driver_group_pcmcia
+noinst_LIBRARIES += \
+ liblinux_pcmcia_cs_wireless.a
+gnumach_o_LDADD += \
+ liblinux_pcmcia_cs_wireless.a
+endif
+
+if device_driver_orinoco_cs
+liblinux_pcmcia_cs_wireless_a_SOURCES += \
+ linux/pcmcia-cs/glue/wireless_glue.h \
+ linux/pcmcia-cs/wireless/hermes.c \
+ linux/pcmcia-cs/wireless/hermes.h \
+ linux/pcmcia-cs/wireless/hermes_rid.h \
+ linux/pcmcia-cs/wireless/ieee802_11.h \
+ linux/pcmcia-cs/wireless/orinoco.c \
+ linux/pcmcia-cs/wireless/orinoco.h \
+ linux/pcmcia-cs/wireless/orinoco_cs.c
+endif
+
+#
+# Building a distribution.
+#
+
+EXTRA_DIST += \
+ linux/dev/README \
+ linux/src/COPYING
+
+# Those get #included...
+EXTRA_DIST += \
+ linux/src/drivers/scsi/FlashPoint.c \
+ linux/src/drivers/scsi/eata_pio_proc.c \
+ linux/src/drivers/scsi/scsiiom.c
+
+# Instead of listing each file individually...
+EXTRA_DIST += \
+ linux/dev/include \
+ linux/src/include
+EXTRA_DIST += \
+ linux/pcmcia-cs/include
+dist-hook: dist-hook-linux
+.PHONY: dist-hook-linux
+dist-hook-linux:
+# These symbolic links are copied from the build directory due to including
+# `linux/dev/include linux/src/include' to `EXTRA_DIST' above.
+ rm -f \
+ $(distdir)/linux/dev/include/asm \
+ $(distdir)/linux/src/include/asm
+
+#
+# Architecture specific parts.
+#
+
+if HOST_ix86
+include i386/linux/Makefrag.am
+endif
diff --git a/linux/configfrag.ac b/linux/configfrag.ac
new file mode 100644
index 0000000..c851e56
--- /dev/null
+++ b/linux/configfrag.ac
@@ -0,0 +1,664 @@
+dnl Configure fragment for Linux code snarfed into GNU Mach.
+
+dnl Copyright (C) 1997, 1999, 2004, 2006, 2007 Free Software Foundation, Inc.
+
+dnl Permission to use, copy, modify and distribute this software and its
+dnl documentation is hereby granted, provided that both the copyright
+dnl notice and this permission notice appear in all copies of the
+dnl software, derivative works or modified versions, and any portions
+dnl thereof, and that both notices appear in supporting documentation.
+dnl
+dnl THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+dnl "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+dnl LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+dnl USE OF THIS SOFTWARE.
+
+#
+# Internals.
+#
+
+[have_linux_code=no]
+
+#
+# Helper functions.
+#
+
+#
+# Calling `device_driver_group group' makes sure that the infrastructure needed
+# for the group `group' will be set-up.
+#
+
+[device_driver_group() {
+ case $1 in
+ '')
+ # No group.
+ :;;
+ block)
+ device_driver_group_block=selected;;
+ net)
+ device_driver_group_net=selected;;
+ pcmcia)
+ # Pull in group `net'.
+ device_driver_group net
+ device_driver_group_pcmcia=selected;;
+ scsi)
+ device_driver_group_scsi=selected;;
+ wireless)
+ # Pull in group `pcmcia'.
+ device_driver_group pcmcia
+ device_driver_group_wireless=selected;;
+ *)]
+ AC_MSG_ERROR([invalid device driver group `$1'])[;;
+ esac
+}]
+
+AC_ARG_ENABLE([linux-groups],
+ AS_HELP_STRING([--disable-linux-groups], [Linux drivers]))
+
+AC_DEFUN([AC_OPTION_Linux_group], [
+AC_ARG_ENABLE([$1-group],
+ AS_HELP_STRING([--enable-$1-group], [$2]),
+ enable_$1_group=$enableval, enable_$1_group=$enable_linux_groups)
+])
+
+#
+# AC_OPTION_Linux_ix86_at(name,description,option[,class]). Process
+# configuration option --enable-`name' (with description `description'). If
+# it's set, then `option' is defined with AC_DEFINE. The option optionally
+# pulls in group `group'; see the comments on device_driver_group for more
+# information. For ix86-at, the value from $enable_default_device_drivers is
+# considered when deciding whether to activate the option by default or not.
+#
+AC_DEFUN([AC_OPTION_Linux_ix86_at], [
+[unset enableval]
+AC_ARG_ENABLE([$1],
+ AS_HELP_STRING([--enable-$1], [$2]))
+[if test x$enable_$4_group = xno;
+then
+ enableval=${enableval-no}
+fi
+#TODO. Could use some M4 magic to avoid a lot of shell code.
+case $host_platform:$host_cpu in
+ at:i?86)
+ case $enable_device_drivers:'$2' in
+ default:*by\ default* | qemu:*for\ qemu*)
+ enableval=${enableval-yes};;
+ *)
+ enableval=${enableval-no};;
+ esac;;
+ *)
+ if [ x"$enableval" = xyes ]; then
+ # TODO. That might not always be true.]
+ AC_MSG_ERROR([cannot enable `$1' in this configuration.])
+ [fi;;
+esac]
+AM_CONDITIONAL([device_driver_]m4_bpatsubst([$1], [-], [_]),
+ [[[ x"$enableval" = xyes ]]])
+[if [ x"$enableval" = xyes ]; then
+ have_linux_code=yes]
+ AC_DEFINE([$3], [1], [option $1: $2])
+ [device_driver_group $4
+fi]])
+
+#
+# AC_OPTION_Linux_ix86_at_nodef() is like AC_OPTION_Linux_ix86_at(), but
+# doesn't consider $enable_default_device_drivers.
+#
+AC_DEFUN([AC_OPTION_Linux_ix86_at_nodef], [
+[unset enableval]
+AC_ARG_ENABLE([$1],
+ AS_HELP_STRING([--enable-$1], [$2]))
+[#TODO.
+case $host_platform:$host_cpu in
+ at:i?86)
+ :;;
+ *)
+ if [ x"$enableval" = xyes ]; then
+ # TODO. That might not always be true.]
+ AC_MSG_ERROR([cannot enable `$1' in this configuration.])
+ [fi;;
+esac]
+AM_CONDITIONAL([device_driver_]m4_bpatsubst([$1], [-], [_]),
+ [[[ x"$enableval" = xyes ]]])
+[if [ x"$enableval" = xyes ]; then
+ have_linux_code=yes]
+ AC_DEFINE([$3], [1], [option $1: $2])
+ [device_driver_group $4
+fi]])
+
+#
+# AC_Linux_DRIVER(machname, description, definition, [group]). Convenience.
+# TODO. The naming of those is nearly everything but reasonable.
+#
+
+AC_DEFUN([AC_Linux_DRIVER], [
+ AC_OPTION_Linux_ix86_at([$1], [Linux device driver for $2; on ix86-at enabled]
+ [by default], [$3], [$4])
+])
+AC_DEFUN([AC_Linux_DRIVER_qemu], [
+ AC_OPTION_Linux_ix86_at([$1], [Linux device driver for $2; on ix86-at enabled]
+ [by default and for qemu], [$3], [$4])
+])
+AC_DEFUN([AC_Linux_DRIVER_nodef], [
+ AC_OPTION_Linux_ix86_at_nodef([$1], [Linux device driver for $2], [$3], [$4])
+])
+
+#
+# Configuration options.
+#
+
+dnl Block drivers.
+AC_OPTION_Linux_group([block], [Block drivers])
+
+AC_Linux_DRIVER_qemu([floppy],
+ [PC floppy],
+ [CONFIG_BLK_DEV_FD],
+ [block])
+AC_Linux_DRIVER_qemu([ide],
+ [IDE disk controllers],
+ [CONFIG_BLK_DEV_IDE],
+ [block])
+
+AC_ARG_ENABLE([ide-forcedma],
+ AS_HELP_STRING([--enable-ide-forcedma], [enable forced use of DMA on IDE]),
+ [test x"$enableval" = xno ||
+ AC_DEFINE([CONFIG_BLK_DEV_FORCE_DMA], [1], [Force DMA on IDE block devices])])
+
+dnl SCSI controllers.
+AC_OPTION_Linux_group([scsi], [SCSI drivers])
+
+# Disabled by default.
+AC_Linux_DRIVER_nodef([53c78xx],
+ [SCSI controller NCR 53C7,8xx],
+ [CONFIG_SCSI_NCR53C7xx],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([AM53C974],
+ [SCSI controller AM53/79C974 (am53c974, am79c974)],
+ [CONFIG_SCSI_AM53C974],
+ [scsi])
+AC_Linux_DRIVER([BusLogic],
+ [SCSI controller BusLogic],
+ [CONFIG_SCSI_BUSLOGIC],
+ [scsi])
+# TODO. What's that? And what about FlashPoint.c?
+dnl Dirty implementation...
+AC_ARG_ENABLE([flashpoint],
+ AS_HELP_STRING([--enable-flashpoint], [SCSI flashpoint]),
+ [test x"$enableval" = xno &&
+ AC_DEFINE([CONFIG_SCSI_OMIT_FLASHPOINT], [], [scsi omit flashpoint])])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([NCR53c406a],
+ [SCSI controller NCR53c406a chip],
+ [CONFIG_SCSI_NCR53C406A],
+ [scsi])
+AC_Linux_DRIVER([advansys],
+ [SCSI controller AdvanSys],
+ [CONFIG_SCSI_ADVANSYS],
+ [scsi])
+AC_Linux_DRIVER([aha152x],
+ [SCSI controller Adaptec AHA-152x/2825 (aha152x, aha2825)],
+ [CONFIG_SCSI_AHA152X],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([aha1542],
+ [SCSI controller Adaptec AHA-1542],
+ [CONFIG_SCSI_AHA1542],
+ [scsi])
+AC_Linux_DRIVER([aha1740],
+ [SCSI controller Adaptec AHA-1740],
+ [CONFIG_SCSI_AHA1740],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([aic7xxx],
+ [SCSI controller Adaptec AIC7xxx],
+ [CONFIG_SCSI_AIC7XXX],
+ [scsi])
+AC_Linux_DRIVER([dtc],
+ [SCSI controller DTC3180/3280 (dtc3180, dtc3280)],
+ [CONFIG_SCSI_DTC3280],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([eata],
+ [SCSI controller EATA ISA/EISA/PCI
+ (DPT and generic EATA/DMA-compliant boards)],
+ [CONFIG_SCSI_EATA],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([eata_dma],
+ [SCSI controller EATA-DMA (DPT, NEC, AT&T, SNI, AST, Olivetti, Alphatronix)],
+ [CONFIG_SCSI_EATA_DMA],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([eata_pio],
+ [SCSI controller EATA-PIO (old DPT PM2001, PM2012A)],
+ [CONFIG_SCSI_EATA_PIO],
+ [scsi])
+AC_Linux_DRIVER([fdomain],
+ [SCSI controller Future Domain 16xx],
+ [CONFIG_SCSI_FUTURE_DOMAIN],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([g_NCR5380],
+ [SCSI controller Generic NCR5380/53c400 (ncr5380, ncr53c400)],
+ [CONFIG_SCSI_GENERIC_NCR5380],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([gdth],
+ [GDT SCSI Disk Array Controller],
+ [CONFIG_SCSI_GDTH],
+ [scsi])
+AC_Linux_DRIVER([in2000],
+ [SCSI controller Always IN 2000],
+ [CONFIG_SCSI_IN2000],
+ [scsi])
+AC_Linux_DRIVER([ncr53c8xx],
+ [SCSI controller NCR53C8XX (ncr53c8xx, dc390f, dc390u, dc390w)],
+ [CONFIG_SCSI_NCR53C8XX],
+ [scsi])
+AC_Linux_DRIVER([pas16],
+ [SCSI controller PAS16],
+ [CONFIG_SCSI_PASS16],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([ppa],
+ [IOMEGA Parallel Port ZIP drive],
+ [CONFIG_SCSI_PPA],
+ [scsi])
+AC_Linux_DRIVER([qlogicfas],
+ [SCSI controller Qlogic FAS],
+ [CONFIG_SCSI_QLOGIC_FAS],
+ [scsi])
+AC_Linux_DRIVER([qlogicisp],
+ [SCSI controller Qlogic ISP],
+ [CONFIG_SCSI_QLOGIC_ISP],
+ [scsi])
+AC_Linux_DRIVER([seagate],
+ [SCSI controller Seagate ST02, Future Domain TMC-8xx],
+ [CONFIG_SCSI_SEAGATE],
+ [scsi])
+AC_Linux_DRIVER([sym53c8xx],
+ [SCSI controller Symbios 53C8XX],
+ [CONFIG_SCSI_SYM53C8XX],
+ [scsi])
+AC_Linux_DRIVER([t128],
+ [SCSI controller Trantor T128/T128F/T228 (t128, t128f, t228)],
+ [CONFIG_SCSI_T128],
+ [scsi])
+AC_Linux_DRIVER([tmscsim],
+ [SCSI controller Tekram DC-390(T) (dc390, dc390t)],
+ [CONFIG_SCSI_DC390T],
+ [scsi])
+AC_Linux_DRIVER([u14-34f],
+ [SCSI controller UltraStor 14F/34F],
+ [CONFIG_SCSI_U14_34F],
+ [scsi])
+AC_Linux_DRIVER([ultrastor],
+ [SCSI controller UltraStor],
+ [CONFIG_SCSI_ULTRASTOR],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([wd7000],
+ [SCSI controller WD 7000],
+ [CONFIG_SCSI_7000FASST],
+ [scsi])
+
+dnl Ethernet controllers.
+AC_OPTION_Linux_group([net], [Network drivers])
+
+AC_Linux_DRIVER([3c501],
+ [Ethernet controller 3COM 501 (3c501) / Etherlink I],
+ [CONFIG_EL1],
+ [net])
+AC_Linux_DRIVER([3c503],
+ [Ethernet controller 3Com 503 (3c503) / Etherlink II],
+ [CONFIG_EL2],
+ [net])
+AC_Linux_DRIVER([3c505],
+ [Ethernet controller 3Com 505 (3c505, elplus)],
+ [CONFIG_ELPLUS],
+ [net])
+AC_Linux_DRIVER([3c507],
+ [Ethernet controller 3Com 507 (3c507, el16)],
+ [CONFIG_EL16],
+ [net])
+AC_Linux_DRIVER([3c509],
+ [Ethernet controller 3Com 509/579 (3c509, 3c579) / Etherlink III],
+ [CONFIG_EL3],
+ [net])
+AC_Linux_DRIVER([3c59x],
+ [Ethernet controller 3Com 59x/90x
+ (3c59x, 3c590, 3c592, 3c595, 3c597, 3c90x, 3c900, 3c905)
+ "Vortex/Boomerang"],
+ [CONFIG_VORTEX],
+ [net])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([3c515],
+ [Ethernet controller 3Com 515 ISA Fast EtherLink],
+ [CONFIG_3C515],
+ [net])
+AC_Linux_DRIVER([ac3200],
+ [Ethernet controller Ansel Communications EISA 3200],
+ [CONFIG_AC3200],
+ [net])
+AC_Linux_DRIVER([apricot],
+ [Ethernet controller Apricot XEN-II on board ethernet],
+ [CONFIG_APRICOT],
+ [net])
+AC_Linux_DRIVER([at1700],
+ [Ethernet controller AT1700 (Fujitsu 86965)],
+ [CONFIG_AT1700],
+ [net])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([atp],
+ [Ethernet controller AT-LAN-TEC/RealTek pocket adaptor],
+ [CONFIG_ATP],
+ [net])
+dnl FIXME: Can't be enabled since it is a pcmcia driver, and we don't
+dnl have that kind of fluff.
+dnl linux_DRIVER([cb_shim], [CB_SHIM], [cb_shim], [net])
+AC_Linux_DRIVER([de4x5],
+ [Ethernet controller DE4x5 (de4x5, de425, de434, de435, de450, de500)],
+ [CONFIG_DE4X5],
+ [net])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([de600],
+ [Ethernet controller D-Link DE-600],
+ [CONFIG_DE600],
+ [net])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([de620],
+ [Ethernet controller D-Link DE-620],
+ [CONFIG_DE620],
+ [net])
+AC_Linux_DRIVER([depca],
+ [Ethernet controller DEPCA
+ (de100, de101, de200, de201, de202, de210, de422)],
+ [CONFIG_DEPCA],
+ [net])
+AC_Linux_DRIVER([e2100],
+ [Ethernet controller Cabletron E21xx],
+ [CONFIG_E2100],
+ [net])
+AC_Linux_DRIVER([eepro],
+ [Ethernet controller EtherExpressPro],
+ [CONFIG_EEXPRESS_PRO],
+ [net])
+AC_Linux_DRIVER([eepro100],
+ [Ethernet controller Intel EtherExpressPro PCI 10+/100B/100+],
+ [CONFIG_EEXPRESS_PRO100B],
+ [net])
+AC_Linux_DRIVER([eexpress],
+ [Ethernet controller EtherExpress 16],
+ [CONFIG_EEXPRESS],
+ [net])
+AC_Linux_DRIVER([epic100],
+ [Ethernet controller SMC 83c170/175 EPIC/100 (epic, epic100) / EtherPower II],
+ [CONFIG_EPIC],
+ [net])
+AC_Linux_DRIVER([eth16i],
+ [Ethernet controller ICL EtherTeam 16i/32 (eth16i, eth32)],
+ [CONFIG_ETH16I],
+ [net])
+AC_Linux_DRIVER([ewrk3],
+ [Ethernet controller EtherWORKS 3 (ewrk3, de203, de204, de205)],
+ [CONFIG_EWRK3],
+ [net])
+AC_Linux_DRIVER([fmv18x],
+ [Ethernet controller FMV-181/182/183/184],
+ [CONFIG_FMV18X],
+ [net])
+AC_Linux_DRIVER([hamachi],
+ [Ethernet controller Packet Engines "Hamachi" GNIC-2 Gigabit Ethernet],
+ [CONFIG_HAMACHI],
+ [net])
+AC_Linux_DRIVER([hp-plus],
+ [Ethernet controller HP PCLAN+ (27247B and 27252A)],
+ [CONFIG_HPLAN_PLUS],
+ [net])
+AC_Linux_DRIVER([hp],
+ [Ethernet controller HP PCLAN (27245 and other 27xxx series)],
+ [CONFIG_HPLAN],
+ [net])
+AC_Linux_DRIVER([hp100],
+ [Ethernet controller HP 10/100VG PCLAN (ISA, EISA, PCI)
+ (hp100, hpj2577, hpj2573, hpj2585, hp27248b)],
+ [CONFIG_HP100],
+ [net])
+AC_Linux_DRIVER([intel-gige],
+ [Ethernet controller Intel PCI Gigabit Ethernet],
+ [CONFIG_INTEL_GIGE],
+ [net])
+AC_Linux_DRIVER([lance],
+ [Ethernet controller AMD LANCE and PCnet (at1500, ne2100)],
+ [CONFIG_LANCE],
+ [net])
+AC_Linux_DRIVER([myson803],
+ [Ethernet controller Myson MTD803 Ethernet adapter series],
+ [CONFIG_MYSON803],
+ [net])
+AC_Linux_DRIVER([natsemi],
+ [Ethernet controller National Semiconductor DP8381x series PCI Ethernet],
+ [CONFIG_NATSEMI],
+ [net])
+AC_Linux_DRIVER_qemu([ne],
+ [Ethernet controller NE2000/NE1000 ISA (ne, ne1000, ne2000)],
+ [CONFIG_NE2000],
+ [net])
+AC_Linux_DRIVER([ne2k-pci],
+ [Ethernet controller PCI NE2000],
+ [CONFIG_NE2K_PCI],
+ [net])
+AC_Linux_DRIVER([ni52],
+ [Ethernet controller NI5210],
+ [CONFIG_NI52],
+ [net])
+AC_Linux_DRIVER([ni65],
+ [Ethernet controller NI6510],
+ [CONFIG_NI65],
+ [net])
+AC_Linux_DRIVER([ns820],
+ [Ethernet controller National Semiconductor DP8382x series PCI Ethernet],
+ [CONFIG_NS820],
+ [net])
+AC_Linux_DRIVER([pcnet32],
+ [Ethernet controller AMD PCI PCnet32 (PCI bus NE2100 cards)],
+ [CONFIG_PCNET32],
+ [net])
+AC_Linux_DRIVER([rtl8139],
+ [Ethernet controller RealTek 8129/8139 (rtl8129, rtl8139) (not 8019/8029!)],
+ [CONFIG_RTL8139],
+ [net])
+AC_Linux_DRIVER([seeq8005],
+ [Ethernet controller Seeq8005],
+ [CONFIG_SEEQ8005],
+ [net])
+AC_Linux_DRIVER([sis900],
+ [Ethernet controller SiS 900],
+ [CONFIG_SIS900],
+ [net])
+AC_Linux_DRIVER([sk_g16],
+ [Ethernet controller Schneider & Koch G16],
+ [CONFIG_SK_G16],
+ [net])
+AC_Linux_DRIVER([smc-ultra],
+ [Ethernet controller SMC Ultra],
+ [CONFIG_ULTRA],
+ [net])
+AC_Linux_DRIVER([smc-ultra32],
+ [Ethernet controller SMC Ultra32],
+ [CONFIG_ULTRA32],
+ [net])
+AC_Linux_DRIVER([starfire],
+ [Ethernet controller Adaptec Starfire network adapter],
+ [CONFIG_STARFIRE],
+ [net])
+AC_Linux_DRIVER([sundance],
+ [Ethernet controller Sundance ST201 "Alta" PCI Ethernet],
+ [CONFIG_SUNDANCE],
+ [net])
+AC_Linux_DRIVER([tlan],
+ [Ethernet controller TI ThunderLAN],
+ [CONFIG_TLAN],
+ [net])
+AC_Linux_DRIVER([tulip],
+ [Ethernet controller DECchip Tulip (dc21x4x) PCI (elcp, tulip)],
+ [CONFIG_DEC_ELCP],
+ [net])
+AC_Linux_DRIVER([via-rhine],
+ [Ethernet controller VIA Rhine],
+ [CONFIG_VIA_RHINE],
+ [net])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([wavelan],
+ [Ethernet controller AT&T WaveLAN & DEC RoamAbout DS],
+ [CONFIG_WAVELAN],
+ [net])
+AC_Linux_DRIVER([wd],
+ [Ethernet controller WD80x3],
+ [CONFIG_WD80x3],
+ [net])
+AC_Linux_DRIVER([winbond-840],
+ [Ethernet controller Winbond W89c840 PCI Ethernet],
+ [CONFIG_WINBOND840],
+ [net])
+AC_Linux_DRIVER([yellowfin],
+ [Ethernet controller Packet Engines Yellowfin Gigabit-NIC],
+ [CONFIG_YELLOWFIN],
+ [net])
+AC_Linux_DRIVER([znet],
+ [Ethernet controller Zenith Z-Note (znet, znote)],
+ [CONFIG_ZNET],
+ [net])
+
+dnl PCMCIA device support.
+AC_OPTION_Linux_group([pcmcia], [PCMCIA drivers])
+
+AC_Linux_DRIVER([i82365],
+ [Intel 82365 PC Card controller],
+ [CONFIG_I82365],
+ [pcmcia])
+
+AC_OPTION_Linux_ix86_at([pcmcia-isa],
+ [isa bus support in the pcmcia core; on ix86-at enabled by default],
+ [CONFIG_ISA],
+ [pcmcia])
+
+dnl PCMCIA device drivers.
+
+AC_Linux_DRIVER([3c574_cs],
+ [3Com 3c574 ``RoadRunner'' PCMCIA Ethernet],
+ [CONFIG_3C574_CS],
+ [pcmcia])
+AC_Linux_DRIVER([3c589_cs],
+ [3Com 3c589 PCMCIA Ethernet card],
+ [CONFIG_3C589_CS],
+ [pcmcia])
+AC_Linux_DRIVER([axnet_cs],
+ [Asix AX88190-based PCMCIA Ethernet adapters],
+ [CONFIG_AXNET_CS],
+ [pcmcia])
+AC_Linux_DRIVER([fmvj18x_cs],
+ [fmvj18x chipset based PCMCIA Ethernet cards],
+ [CONFIG_FMVJ18X_CS],
+ [pcmcia])
+AC_Linux_DRIVER([nmclan_cs],
+ [New Media Ethernet LAN PCMCIA cards],
+ [CONFIG_NMCLAN_CS],
+ [pcmcia])
+AC_Linux_DRIVER([pcnet_cs],
+ [NS8390-based PCMCIA cards],
+ [CONFIG_PCNET_CS],
+ [pcmcia])
+AC_Linux_DRIVER([smc91c92_cs],
+ [SMC91c92-based PCMCIA cards],
+ [CONFIG_SMC91C92_CS],
+ [pcmcia])
+AC_Linux_DRIVER([xirc2ps_cs],
+ [Xircom CreditCard and Realport PCMCIA ethernet],
+ [CONFIG_XIRC2PS_CS],
+ [pcmcia])
+
+dnl Wireless device drivers.
+AC_OPTION_Linux_group([wireless], [Wireless drivers])
+
+AC_Linux_DRIVER([orinoco_cs],
+ [Hermes or Prism 2 PCMCIA Wireless adapters (Orinoco)],
+ [CONFIG_ORINOCO_CS],
+ [wireless])
+
+#
+# Process device driver groups: kinds of drivers that have gobs of source files
+# that get brought in, need special symbols defined, etc.
+#
+
+[if [ x"$device_driver_group_block" = xselected ]; then]
+ AC_DEFINE([CONFIG_BLOCK], [1], [CONFIG_BLOCK])
+ AM_CONDITIONAL([device_driver_group_block], [true])
+[else] AM_CONDITIONAL([device_driver_group_block], [false])
+[fi
+
+if [ x"$device_driver_group_net" = xselected ]; then]
+ AC_DEFINE([CONFIG_INET], [1], [CONFIG_INET])
+ AM_CONDITIONAL([device_driver_group_net], [true])
+[else] AM_CONDITIONAL([device_driver_group_net], [false])
+[fi
+
+if [ x"$device_driver_group_pcmcia" = xselected ]; then]
+ AC_DEFINE([CONFIG_PCMCIA], [1], [CONFIG_PCMCIA])
+ AM_CONDITIONAL([device_driver_group_pcmcia], [true])
+[else] AM_CONDITIONAL([device_driver_group_pcmcia], [false])
+[fi
+
+if [ x"$device_driver_group_scsi" = xselected ]; then]
+ AC_DEFINE([CONFIG_SCSI], [1], [CONFIG_SCSI])
+ AM_CONDITIONAL([device_driver_group_scsi], [true])
+[else] AM_CONDITIONAL([device_driver_group_scsi], [false])
+[fi
+
+if [ x"$device_driver_group_wireless" = xselected ]; then]
+ AC_DEFINE([CONFIG_WIRELESS], [1], [CONFIG_WIRELESS])
+[fi]
+
+#
+# Internals.
+#
+
+AC_DEFUN([hurd_host_CPU], [
+ AC_DEFINE([CONFIG_M$1], [1], [$1])
+ AC_DEFINE([CPU], [$1], [CPU])])
+
+[if [ "$have_linux_code" = yes ]; then]
+ AM_CONDITIONAL([CODE_linux], [true])
+
+ [case $host_cpu in
+ i386)]
+ hurd_host_CPU([386])[;;
+ i486)]
+ hurd_host_CPU([486])[;;
+ i586)]
+ hurd_host_CPU([586])[;;
+ i686)]
+ hurd_host_CPU([686])[;;
+ *)
+ # TODO. Warn here?]
+ hurd_host_CPU([486])[;;
+ esac]
+
+ # The glue code dependend code checks for this.
+ AC_DEFINE([LINUX_DEV], [1], [Linux device drivers.])
+ # Instead of Mach's KERNEL, Linux uses __KERNEL__. Whee.
+ AC_DEFINE([__KERNEL__], [1], [__KERNEL__])
+ [if [ $mach_ncpus -gt 1 ]; then]
+ AC_DEFINE([__SMP__], [1], [__SMP__])
+ [fi]
+
+ # Set up `asm-SYSTYPE' links.
+ AC_CONFIG_LINKS([linux/src/include/asm:linux/src/include/asm-$systype
+ linux/dev/include/asm:linux/dev/include/asm-$systype])
+[else] AM_CONDITIONAL([CODE_linux], [false])
+[fi]
+
+dnl Local Variables:
+dnl mode: autoconf
+dnl End:
diff --git a/linux/dev/README b/linux/dev/README
new file mode 100644
index 0000000..c3ceca1
--- /dev/null
+++ b/linux/dev/README
@@ -0,0 +1,8 @@
+This hierarchy used to contain modified files, based on files from the
+Linux kernel, as opposed to `../src/' containing only files that have not
+been modified (or have only been modified marginally). This policy is
+NOT adhered to any further, so please don't change (or even add) files
+below here, but instead merge the files in here back into `../src/'
+(which should really be called `../linux-2.0' or similar) or even better
+--- when adding large chunks --- create a more suitable hierarchy like
+we've done with `../pcmcia-cs/'.
diff --git a/linux/dev/arch/i386/kernel/irq.c b/linux/dev/arch/i386/kernel/irq.c
new file mode 100644
index 0000000..3b349cc
--- /dev/null
+++ b/linux/dev/arch/i386/kernel/irq.c
@@ -0,0 +1,775 @@
+/*
+ * Linux IRQ management.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * linux/arch/i386/kernel/irq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <mach/mach_types.h>
+#include <mach/vm_param.h>
+#include <kern/assert.h>
+#include <kern/cpu_number.h>
+
+#include <i386/spl.h>
+#include <i386/irq.h>
+#include <i386/pit.h>
+
+#define MACH_INCLUDE
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+
+#include <linux/dev/glue/glue.h>
+#include <machine/machspl.h>
+
+#include <device/intr.h>
+
+#if 0
+/* XXX: This is the way it's done in linux 2.2. GNU Mach currently uses intr_count. It should be made using local_{bh/irq}_count instead (through hardirq_enter/exit) for SMP support. */
+unsigned int local_bh_count[NR_CPUS];
+unsigned int local_irq_count[NR_CPUS];
+#else
+#define local_bh_count (&intr_count)
+#define local_irq_count (&intr_count)
+#endif
+
+/*
+ * XXX Move this into more suitable place...
+ * Set if the machine has an EISA bus.
+ */
+int EISA_bus = 0;
+
+/*
+ * Flag indicating an interrupt is being handled.
+ */
+unsigned int intr_count = 0;
+
+/*
+ * List of Linux interrupt handlers.
+ */
+struct linux_action
+{
+ void (*handler) (int, void *, struct pt_regs *);
+ void *dev_id;
+ struct linux_action *next;
+ unsigned long flags;
+ user_intr_t *user_intr;
+};
+
+static struct linux_action *irq_action[NINTR] = {0};
+
+/*
+ * Generic interrupt handler for Linux devices.
+ * Set up a fake `struct pt_regs' then call the real handler.
+ */
+static void
+linux_intr (int irq)
+{
+ struct pt_regs regs;
+ struct linux_action *action = *(irq_action + irq);
+ struct linux_action **prev = &irq_action[irq];
+ unsigned long flags;
+
+ kstat.interrupts[irq]++;
+ intr_count++;
+
+ save_flags (flags);
+ if (action && (action->flags & SA_INTERRUPT))
+ cli ();
+
+ while (action)
+ {
+ // TODO I might need to check whether the interrupt belongs to
+ // the current device. But I don't do it for now.
+ if (action->user_intr)
+ {
+ if (!deliver_user_intr(&irqtab, irq, action->user_intr))
+ {
+ *prev = action->next;
+ linux_kfree(action);
+ action = *prev;
+ continue;
+ }
+ }
+ else if (action->handler)
+ action->handler (irq, action->dev_id, &regs);
+ prev = &action->next;
+ action = action->next;
+ }
+
+ if (!irq_action[irq])
+ {
+ /* No handler any more, disable interrupt */
+ mask_irq (irq);
+ ivect[irq] = intnull;
+ iunit[irq] = irq;
+ }
+
+ restore_flags (flags);
+
+ intr_count--;
+}
+
+/* IRQ mask according to Linux drivers */
+static unsigned linux_pic_mask;
+
+/* These only record that Linux requested to mask IRQs */
+void
+disable_irq (unsigned int irq_nr)
+{
+ unsigned long flags;
+ unsigned mask = 1U << irq_nr;
+
+ save_flags (flags);
+ cli ();
+ if (!(linux_pic_mask & mask))
+ {
+ linux_pic_mask |= mask;
+ __disable_irq(irq_nr);
+ }
+ restore_flags (flags);
+}
+
+void
+enable_irq (unsigned int irq_nr)
+{
+ unsigned long flags;
+ unsigned mask = 1U << irq_nr;
+
+ save_flags (flags);
+ cli ();
+ if (linux_pic_mask & mask)
+ {
+ linux_pic_mask &= ~mask;
+ __enable_irq(irq_nr);
+ }
+ restore_flags (flags);
+}
+
+static int
+setup_x86_irq (int irq, struct linux_action *new)
+{
+ int shared = 0;
+ struct linux_action *old, **p;
+ unsigned long flags;
+
+ p = irq_action + irq;
+ if ((old = *p) != NULL)
+ {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ))
+ return (-EBUSY);
+
+ /* Can't share interrupts unless both are same type */
+ if ((old->flags ^ new->flags) & SA_INTERRUPT)
+ return (-EBUSY);
+
+ /* add new interrupt at end of irq queue */
+ do
+ {
+ p = &old->next;
+ old = *p;
+ }
+ while (old);
+ shared = 1;
+ }
+
+ save_flags (flags);
+ cli ();
+ *p = new;
+
+ if (!shared)
+ {
+ ivect[irq] = linux_intr;
+ iunit[irq] = irq;
+ unmask_irq (irq);
+ }
+ restore_flags (flags);
+ return 0;
+}
+
+int
+install_user_intr_handler (struct irqdev *dev, int id, unsigned long flags,
+ user_intr_t *user_intr)
+{
+ struct linux_action *action;
+ struct linux_action *old;
+ int retval;
+
+ unsigned int irq = dev->irq[id];
+
+ assert (irq < NINTR);
+
+ /* Test whether the irq handler has been set */
+ // TODO I need to protect the array when iterating it.
+ old = irq_action[irq];
+ while (old)
+ {
+ if (old->user_intr && old->user_intr->dst_port == user_intr->dst_port)
+ {
+ printk ("The interrupt handler has already been installed on line %d", irq);
+ return linux_to_mach_error (-EAGAIN);
+ }
+ old = old->next;
+ }
+
+ /*
+ * Hmm... Should I use `kalloc()' ?
+ * By OKUJI Yoshinori.
+ */
+ action = (struct linux_action *)
+ linux_kmalloc (sizeof (struct linux_action), GFP_KERNEL);
+ if (action == NULL)
+ return linux_to_mach_error (-ENOMEM);
+
+ action->handler = NULL;
+ action->next = NULL;
+ action->dev_id = NULL;
+ action->flags = SA_SHIRQ;
+ action->user_intr = user_intr;
+
+ retval = setup_x86_irq (irq, action);
+ if (retval)
+ linux_kfree (action);
+
+ return linux_to_mach_error (retval);
+}
+
+/*
+ * Attach a handler to an IRQ.
+ */
+int
+request_irq (unsigned int irq, void (*handler) (int, void *, struct pt_regs *),
+ unsigned long flags, const char *device, void *dev_id)
+{
+ struct linux_action *action;
+ int retval;
+
+ assert (irq < NINTR);
+
+ if (!handler)
+ return -EINVAL;
+
+ /*
+ * Hmm... Should I use `kalloc()' ?
+ * By OKUJI Yoshinori.
+ */
+ action = (struct linux_action *)
+ linux_kmalloc (sizeof (struct linux_action), GFP_KERNEL);
+ if (action == NULL)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->next = NULL;
+ action->dev_id = dev_id;
+ action->flags = flags;
+ action->user_intr = NULL;
+
+ retval = setup_x86_irq (irq, action);
+ if (retval)
+ linux_kfree (action);
+
+ return retval;
+}
+
+/*
+ * Deallocate an irq.
+ */
+void
+free_irq (unsigned int irq, void *dev_id)
+{
+ struct linux_action *action, **p;
+ unsigned long flags;
+
+ if (irq >= NINTR)
+ panic ("free_irq: bad irq number");
+
+ for (p = irq_action + irq; (action = *p) != NULL; p = &action->next)
+ {
+ if (action->dev_id != dev_id)
+ continue;
+
+ save_flags (flags);
+ cli ();
+ *p = action->next;
+ if (!irq_action[irq])
+ {
+ mask_irq (irq);
+ ivect[irq] = intnull;
+ iunit[irq] = irq;
+ }
+ restore_flags (flags);
+ linux_kfree (action);
+ return;
+ }
+
+ panic ("free_irq: bad irq number");
+}
+
+/*
+ * Set for an irq probe.
+ */
+unsigned long
+probe_irq_on (void)
+{
+ unsigned i, irqs = 0;
+ unsigned long delay;
+
+ assert (curr_ipl[cpu_number()] == 0);
+
+ /*
+ * Allocate all available IRQs.
+ */
+ for (i = NINTR - 1; i > 0; i--)
+ {
+ if (!irq_action[i] && ivect[i] == intnull)
+ {
+ enable_irq (i);
+ irqs |= 1 << i;
+ }
+ }
+
+ /*
+ * Wait for spurious interrupts to mask themselves out.
+ */
+ for (delay = jiffies + HZ / 10; delay > jiffies;)
+ ;
+
+ return (irqs & ~linux_pic_mask);
+}
+
+/*
+ * Return the result of an irq probe.
+ */
+int
+probe_irq_off (unsigned long irqs)
+{
+ unsigned int i;
+
+ assert (curr_ipl[cpu_number()] == 0);
+
+ irqs &= linux_pic_mask;
+
+ /*
+ * Disable unnecessary IRQs.
+ */
+ for (i = NINTR - 1; i > 0; i--)
+ {
+ if (!irq_action[i] && ivect[i] == intnull)
+ {
+ disable_irq (i);
+ }
+ }
+
+ /*
+ * Return IRQ number.
+ */
+ if (!irqs)
+ return 0;
+ i = ffz (~irqs);
+ if (irqs != (irqs & (1 << i)))
+ i = -i;
+ return i;
+}
+
+/*
+ * Reserve IRQs used by Mach drivers.
+ * Must be called before Linux IRQ detection, after Mach IRQ detection.
+ */
+
+static void reserved_mach_handler (int line, void *cookie, struct pt_regs *regs)
+{
+ /* These interrupts are actually handled in Mach. */
+ assert (! "reached");
+}
+
+static const struct linux_action reserved_mach =
+ {
+ reserved_mach_handler, NULL, NULL, 0
+ };
+
+static void
+reserve_mach_irqs (void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NINTR; i++)
+ {
+ if (ivect[i] != intnull)
+ /* This dummy action does not specify SA_SHIRQ, so
+ setup_x86_irq will not try to add a handler to this
+ slot. Therefore, the cast is safe. */
+ irq_action[i] = (struct linux_action *) &reserved_mach;
+ }
+}
+
+#ifdef __SMP__
+unsigned char global_irq_holder = NO_PROC_ID;
+unsigned volatile int global_irq_lock;
+atomic_t global_irq_count;
+
+atomic_t global_bh_count;
+atomic_t global_bh_lock;
+
+/*
+ * "global_cli()" is a special case, in that it can hold the
+ * interrupts disabled for a longish time, and also because
+ * we may be doing TLB invalidates when holding the global
+ * IRQ lock for historical reasons. Thus we may need to check
+ * SMP invalidate events specially by hand here (but not in
+ * any normal spinlocks)
+ */
+#if 0
+/* XXX: check how Mach handles this */
+static inline void check_smp_invalidate(int cpu)
+{
+ if (test_bit(cpu, &smp_invalidate_needed)) {
+ clear_bit(cpu, &smp_invalidate_needed);
+ local_flush_tlb();
+ }
+}
+#endif
+
+static void show(char * str)
+{
+ int i;
+ unsigned long *stack;
+ int cpu = smp_processor_id();
+
+ printk("\n%s, CPU %d:\n", str, cpu);
+ printk("irq: %d [%d %d]\n",
+ atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]);
+ printk("bh: %d [%d %d]\n",
+ atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]);
+ stack = (unsigned long *) &stack;
+ for (i = 40; i ; i--) {
+ unsigned long x = *++stack;
+ //if (x > (unsigned long) &get_options && x < (unsigned long) &vsprintf) {
+ printk("<[%08lx]> ", x);
+ //}
+ }
+}
+
+#define MAXCOUNT 100000000
+
+static inline void wait_on_bh(void)
+{
+ int count = MAXCOUNT;
+ do {
+ if (!--count) {
+ show("wait_on_bh");
+ count = ~0;
+ }
+ /* nothing .. wait for the other bh's to go away */
+ } while (atomic_read(&global_bh_count) != 0);
+}
+
+/*
+ * I had a lockup scenario where a tight loop doing
+ * spin_unlock()/spin_lock() on CPU#1 was racing with
+ * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
+ * apparently the spin_unlock() information did not make it
+ * through to CPU#0 ... nasty, is this by design, do we have to limit
+ * 'memory update oscillation frequency' artificially like here?
+ *
+ * Such 'high frequency update' races can be avoided by careful design, but
+ * some of our major constructs like spinlocks use similar techniques,
+ * it would be nice to clarify this issue. Set this define to 0 if you
+ * want to check whether your system freezes. I suspect the delay done
+ * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
+ * i thought that such things are guaranteed by design, since we use
+ * the 'LOCK' prefix.
+ */
+#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 1
+
+#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
+# define SYNC_OTHER_CORES(x) udelay(x+1)
+#else
+/*
+ * We have to allow irqs to arrive between __sti and __cli
+ */
+# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
+#endif
+
+static inline void wait_on_irq(int cpu)
+{
+ int count = MAXCOUNT;
+
+ for (;;) {
+
+ /*
+ * Wait until all interrupts are gone. Wait
+ * for bottom half handlers unless we're
+ * already executing in one..
+ */
+ if (!atomic_read(&global_irq_count)) {
+ if (local_bh_count[cpu] || !atomic_read(&global_bh_count))
+ break;
+ }
+
+ /* Duh, we have to loop. Release the lock to avoid deadlocks */
+ clear_bit(0,&global_irq_lock);
+
+ for (;;) {
+ if (!--count) {
+ show("wait_on_irq");
+ count = ~0;
+ }
+ __sti();
+ SYNC_OTHER_CORES(cpu);
+ __cli();
+ //check_smp_invalidate(cpu);
+ if (atomic_read(&global_irq_count))
+ continue;
+ if (global_irq_lock)
+ continue;
+ if (!local_bh_count[cpu] && atomic_read(&global_bh_count))
+ continue;
+ if (!test_and_set_bit(0,&global_irq_lock))
+ break;
+ }
+ }
+}
+
+/*
+ * This is called when we want to synchronize with
+ * bottom half handlers. We need to wait until
+ * no other CPU is executing any bottom half handler.
+ *
+ * Don't wait if we're already running in an interrupt
+ * context or are inside a bh handler.
+ */
+void synchronize_bh(void)
+{
+ if (atomic_read(&global_bh_count) && !in_interrupt())
+ wait_on_bh();
+}
+
+/*
+ * This is called when we want to synchronize with
+ * interrupts. We may for example tell a device to
+ * stop sending interrupts: but to make sure there
+ * are no interrupts that are executing on another
+ * CPU we need to call this function.
+ */
+void synchronize_irq(void)
+{
+ if (atomic_read(&global_irq_count)) {
+ /* Stupid approach */
+ cli();
+ sti();
+ }
+}
+
+static inline void get_irqlock(int cpu)
+{
+ if (test_and_set_bit(0,&global_irq_lock)) {
+ /* do we already hold the lock? */
+ if ((unsigned char) cpu == global_irq_holder)
+ return;
+ /* Uhhuh.. Somebody else got it. Wait.. */
+ do {
+ do {
+ //check_smp_invalidate(cpu);
+ } while (test_bit(0,&global_irq_lock));
+ } while (test_and_set_bit(0,&global_irq_lock));
+ }
+ /*
+ * We also to make sure that nobody else is running
+ * in an interrupt context.
+ */
+ wait_on_irq(cpu);
+
+ /*
+ * Ok, finally..
+ */
+ global_irq_holder = cpu;
+}
+
+#define EFLAGS_IF_SHIFT 9
+
+/*
+ * A global "cli()" while in an interrupt context
+ * turns into just a local cli(). Interrupts
+ * should use spinlocks for the (very unlikely)
+ * case that they ever want to protect against
+ * each other.
+ *
+ * If we already have local interrupts disabled,
+ * this will not turn a local disable into a
+ * global one (problems with spinlocks: this makes
+ * save_flags+cli+sti usable inside a spinlock).
+ */
+void __global_cli(void)
+{
+ unsigned int flags;
+
+ __save_flags(flags);
+ if (flags & (1 << EFLAGS_IF_SHIFT)) {
+ int cpu = smp_processor_id();
+ __cli();
+ if (!local_irq_count[cpu])
+ get_irqlock(cpu);
+ }
+}
+
+void __global_sti(void)
+{
+ int cpu = smp_processor_id();
+
+ if (!local_irq_count[cpu])
+ release_irqlock(cpu);
+ __sti();
+}
+
+/*
+ * SMP flags value to restore to:
+ * 0 - global cli
+ * 1 - global sti
+ * 2 - local cli
+ * 3 - local sti
+ */
+unsigned long __global_save_flags(void)
+{
+ int retval;
+ int local_enabled;
+ unsigned long flags;
+
+ __save_flags(flags);
+ local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
+ /* default to local */
+ retval = 2 + local_enabled;
+
+ /* check for global flags if we're not in an interrupt */
+ if (!local_irq_count[smp_processor_id()]) {
+ if (local_enabled)
+ retval = 1;
+ if (global_irq_holder == (unsigned char) smp_processor_id())
+ retval = 0;
+ }
+ return retval;
+}
+
+void __global_restore_flags(unsigned long flags)
+{
+ switch (flags) {
+ case 0:
+ __global_cli();
+ break;
+ case 1:
+ __global_sti();
+ break;
+ case 2:
+ __cli();
+ break;
+ case 3:
+ __sti();
+ break;
+ default:
+ printk("global_restore_flags: %08lx (%08lx)\n",
+ flags, (&flags)[-1]);
+ }
+}
+
+#endif
+
+static void (*old_clock_handler) ();
+
+void
+init_IRQ (void)
+{
+ char *p;
+ int latch = (CLKNUM + hz / 2) / hz;
+
+ /*
+ * Ensure interrupts are disabled.
+ */
+ (void) splhigh ();
+
+#ifndef APIC
+ /*
+ * Program counter 0 of 8253 to interrupt hz times per second.
+ */
+ outb_p (PIT_C0 | PIT_SQUAREMODE | PIT_READMODE, PITCTL_PORT);
+ outb_p (latch & 0xff, PITCTR0_PORT);
+ outb (latch >> 8, PITCTR0_PORT);
+
+ /*
+ * Install our clock interrupt handler.
+ */
+ old_clock_handler = ivect[0];
+ ivect[0] = linux_timer_intr;
+#endif
+
+ reserve_mach_irqs ();
+
+ /*
+ * Enable interrupts.
+ */
+ (void) spl0 ();
+
+ /*
+ * Check if the machine has an EISA bus.
+ */
+ p = (char *) phystokv(0x0FFFD9);
+ if (*p++ == 'E' && *p++ == 'I' && *p++ == 'S' && *p == 'A')
+ EISA_bus = 1;
+
+ /*
+ * Permanently allocate standard device ports.
+ */
+ request_region (0x00, 0x20, "dma1");
+ request_region (0x20, 0x20, "pic1");
+ request_region (0x40, 0x20, "timer");
+ request_region (0x70, 0x10, "rtc");
+ request_region (0x80, 0x20, "dma page reg");
+ request_region (0xa0, 0x20, "pic2");
+ request_region (0xc0, 0x20, "dma2");
+ request_region (0xf0, 0x10, "npu");
+}
+
+void
+restore_IRQ (void)
+{
+ /*
+ * Disable interrupts.
+ */
+ (void) splhigh ();
+
+#ifndef APIC
+ /*
+ * Restore clock interrupt handler.
+ */
+ ivect[0] = old_clock_handler;
+#endif
+}
+
diff --git a/linux/dev/arch/i386/kernel/setup.c b/linux/dev/arch/i386/kernel/setup.c
new file mode 100644
index 0000000..92b782a
--- /dev/null
+++ b/linux/dev/arch/i386/kernel/setup.c
@@ -0,0 +1,13 @@
+char x86 =
+#if defined(CONFIG_M386)
+3;
+#elif defined(CONFIG_M486)
+4;
+#elif defined(CONFIG_M586)
+5;
+#elif defined(CONFIG_M686)
+6;
+#else
+#error "CPU type is undefined!"
+#endif
+
diff --git a/linux/dev/drivers/block/ahci.c b/linux/dev/drivers/block/ahci.c
new file mode 100644
index 0000000..751c7ca
--- /dev/null
+++ b/linux/dev/drivers/block/ahci.c
@@ -0,0 +1,1038 @@
+/*
+ * Copyright (C) 2013 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <ahci.h>
+#include <kern/assert.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/bios32.h>
+#include <linux/major.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <asm/io.h>
+
+#define MAJOR_NR SCSI_DISK_MAJOR
+#include <linux/blk.h>
+
+/* Standard AHCI BAR for mmio */
+#define AHCI_PCI_BAR 5
+
+/* minor: 2 bits for device number, 6 bits for partition number. */
+
+#define MAX_PORTS 8
+#define PARTN_BITS 5
+#define PARTN_MASK ((1<<PARTN_BITS)-1)
+
+/* We need to use one DMA scatter element per physical page.
+ * ll_rw_block creates at most 8 buffer heads */
+/* See MAX_BUF */
+#define PRDTL_SIZE 8
+
+#define WAIT_MAX (1*HZ) /* Wait at most 1s for requests completion */
+
+/* AHCI standard structures */
+
+struct ahci_prdt {
+ u32 dba; /* Data base address */
+ u32 dbau; /* upper 32bit */
+ u32 rsv0; /* Reserved */
+
+ u32 dbc; /* Byte count bits 0-21,
+ * bit31 interrupt on completion. */
+};
+
+struct ahci_cmd_tbl {
+ u8 cfis[64];
+ u8 acmd[16];
+ u8 rsv[48];
+
+ struct ahci_prdt prdtl[PRDTL_SIZE];
+};
+
+struct ahci_command {
+ u32 opts; /* Command options */
+
+ u32 prdbc; /* Physical Region Descriptor byte count */
+
+ u32 ctba; /* Command Table Descriptor Base Address */
+ u32 ctbau; /* upper 32bit */
+
+ u32 rsv1[4]; /* Reserved */
+};
+
+struct ahci_fis_dma {
+ u8 fis_type;
+ u8 flags;
+ u8 rsved[2];
+ u64 id;
+ u32 rsvd;
+ u32 offset;
+ u32 count;
+ u32 resvd;
+};
+
+struct ahci_fis_pio {
+ u8 fis_type;
+ u8 flags;
+ u8 status;
+ u8 error;
+
+ u8 lba0;
+ u8 lba1;
+ u8 lba2;
+ u8 device;
+
+ u8 lba3;
+ u8 lba4;
+ u8 lba5;
+ u8 rsv2;
+
+ u8 countl;
+ u8 counth;
+ u8 rsv3;
+ u8 e_status;
+
+ u16 tc; /* Transfer Count */
+ u8 rsv4[2];
+};
+
+struct ahci_fis_d2h {
+ u8 fis_type;
+ u8 flags;
+ u8 status;
+ u8 error;
+
+ u8 lba0;
+ u8 lba1;
+ u8 lba2;
+ u8 device;
+
+ u8 lba3;
+ u8 lba4;
+ u8 lba5;
+ u8 rsv2;
+
+ u8 countl;
+ u8 counth;
+ u8 rsv3[2];
+
+ u8 rsv4[4];
+};
+
+struct ahci_fis_dev {
+ u8 rsvd[8];
+};
+
+struct ahci_fis_h2d {
+ u8 fis_type;
+ u8 flags;
+ u8 command;
+ u8 featurel;
+
+ u8 lba0;
+ u8 lba1;
+ u8 lba2;
+ u8 device;
+
+ u8 lba3;
+ u8 lba4;
+ u8 lba5;
+ u8 featureh;
+
+ u8 countl;
+ u8 counth;
+ u8 icc;
+ u8 control;
+
+ u8 rsv1[4];
+};
+
+struct ahci_fis_data {
+ u8 fis_type;
+ u8 flags;
+ u8 rsv1[2];
+ u32 data1[];
+};
+
+struct ahci_fis {
+ struct ahci_fis_dma dma_fis;
+ u8 pad0[4];
+
+ struct ahci_fis_pio pio_fis;
+ u8 pad1[12];
+
+ struct ahci_fis_d2h d2h_fis;
+ u8 pad2[4];
+
+ struct ahci_fis_dev dev_fis;
+
+ u8 ufis[64];
+
+ u8 rsv[0x100 - 0xa0];
+};
+
+struct ahci_port {
+ u32 clb; /* Command List Base address */
+ u32 clbu; /* upper 32bit */
+ u32 fb; /* FIS Base */
+ u32 fbu; /* upper 32bit */
+ u32 is; /* Interrupt Status */
+ u32 ie; /* Interrupt Enable */
+ u32 cmd; /* Command and Status */
+ u32 rsv0; /* Reserved */
+ u32 tfd; /* Task File Data */
+ u32 sig; /* Signature */
+ u32 ssts; /* SATA Status */
+ u32 sctl; /* SATA Control */
+ u32 serr; /* SATA Error */
+ u32 sact; /* SATA Active */
+ u32 ci; /* Command Issue */
+ u32 sntf; /* SATA Notification */
+ u32 fbs; /* FIS-based switch control */
+ u8 rsv1[0x70 - 0x44]; /* Reserved */
+ u8 vendor[0x80 - 0x70]; /* Vendor-specific */
+};
+
+struct ahci_host {
+ u32 cap; /* Host capabilities */
+ u32 ghc; /* Global Host Control */
+ u32 is; /* Interrupt Status */
+ u32 pi; /* Port Implemented */
+ u32 v; /* Version */
+ u32 ccc_ctl; /* Command Completion Coalescing control */
+ u32 ccc_pts; /* Command Completion Coalescing ports */
+ u32 em_loc; /* Enclosure Management location */
+ u32 em_ctrl; /* Enclosure Management control */
+ u32 cap2; /* Host capabilities extended */
+ u32 bohc; /* BIOS/OS Handoff Control and status */
+ u8 rsv[0xa0 - 0x2c]; /* Reserved */
+ u8 vendor[0x100 - 0xa0]; /* Vendor-specific */
+ struct ahci_port ports[]; /* Up to 32 ports */
+};
+
+/* Our own data */
+
+static struct port {
+ /* memory-mapped regions */
+ const volatile struct ahci_host *ahci_host;
+ const volatile struct ahci_port *ahci_port;
+
+ /* host-memory buffers */
+ struct ahci_command *command;
+ struct ahci_fis *fis;
+ struct ahci_cmd_tbl *prdtl;
+
+ struct hd_driveid id;
+ unsigned is_cd;
+ unsigned long long capacity; /* Nr of sectors */
+ u32 status; /* interrupt status */
+ unsigned cls; /* Command list maximum size.
+ We currently only use 1. */
+ struct wait_queue *q; /* IRQ wait queue */
+ struct hd_struct *part; /* drive partition table */
+ unsigned lba48; /* Whether LBA48 is supported */
+ unsigned identify; /* Whether we are just identifying
+ at boot */
+ struct gendisk *gd;
+} ports[MAX_PORTS];
+
+
+/* do_request() gets called by the block layer to push a request to the disk.
+ We just push one, and when an interrupt tells it's over, we call do_request()
+ ourself again to push the next request, etc. */
+
+/* Request completed, either successfully or with an error */
+static void ahci_end_request(int uptodate)
+{
+ struct request *rq = CURRENT;
+ struct buffer_head *bh;
+
+ rq->errors = 0;
+ if (!uptodate) {
+ if (!rq->quiet)
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(rq->rq_dev), rq->sector);
+ }
+
+ for (bh = rq->bh; bh; )
+ {
+ struct buffer_head *next = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ mark_buffer_uptodate (bh, uptodate);
+ unlock_buffer (bh);
+ bh = next;
+ }
+
+ CURRENT = rq->next;
+ if (rq->sem != NULL)
+ up(rq->sem);
+ rq->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+}
+
+/* Push the request to the controler port */
+static int ahci_do_port_request(struct port *port, unsigned long long sector, struct request *rq)
+{
+ struct ahci_command *command = port->command;
+ struct ahci_cmd_tbl *prdtl = port->prdtl;
+ struct ahci_fis_h2d *fis_h2d;
+ unsigned slot = 0;
+ struct buffer_head *bh;
+ unsigned i;
+
+ rq->rq_status = RQ_SCSI_BUSY;
+
+ /* Shouldn't ever happen: the block glue is limited at 8 blocks */
+ assert(rq->nr_sectors < 0x10000);
+
+ fis_h2d = (void*) &prdtl[slot].cfis;
+ fis_h2d->fis_type = FIS_TYPE_REG_H2D;
+ fis_h2d->flags = 128;
+ if (port->lba48) {
+ if (sector >= 1ULL << 48) {
+ printk("sector %llu beyond LBA48\n", sector);
+ return -EOVERFLOW;
+ }
+ if (rq->cmd == READ)
+ fis_h2d->command = WIN_READDMA_EXT;
+ else
+ fis_h2d->command = WIN_WRITEDMA_EXT;
+ } else {
+ if (sector >= 1ULL << 28) {
+ printk("sector %llu beyond LBA28\n", sector);
+ return -EOVERFLOW;
+ }
+ if (rq->cmd == READ)
+ fis_h2d->command = WIN_READDMA;
+ else
+ fis_h2d->command = WIN_WRITEDMA;
+ }
+
+ fis_h2d->device = 1<<6; /* LBA */
+
+ fis_h2d->lba0 = sector;
+ fis_h2d->lba1 = sector >> 8;
+ fis_h2d->lba2 = sector >> 16;
+
+ fis_h2d->lba3 = sector >> 24;
+ fis_h2d->lba4 = sector >> 32;
+ fis_h2d->lba5 = sector >> 40;
+
+ fis_h2d->countl = rq->nr_sectors;
+ fis_h2d->counth = rq->nr_sectors >> 8;
+
+ command[slot].opts = sizeof(*fis_h2d) / sizeof(u32);
+
+ if (rq->cmd == WRITE)
+ command[slot].opts |= AHCI_CMD_WRITE;
+
+ for (i = 0, bh = rq->bh; bh; i++, bh = bh->b_reqnext)
+ {
+ assert(i < PRDTL_SIZE);
+ assert((((unsigned long) bh->b_data) & ~PAGE_MASK) ==
+ (((unsigned long) bh->b_data + bh->b_size - 1) & ~PAGE_MASK));
+ prdtl[slot].prdtl[i].dbau = 0;
+ prdtl[slot].prdtl[i].dba = vmtophys(bh->b_data);
+ prdtl[slot].prdtl[i].dbc = bh->b_size - 1;
+ }
+
+ command[slot].opts |= i << 16;
+
+ /* Make sure main memory buffers are up to date */
+ mb();
+
+ /* Issue command */
+ writel(1 << slot, &port->ahci_port->ci);
+
+ /* TODO: IRQ timeout handler */
+ return 0;
+}
+
+/* Called by block core to push a request */
+/* TODO: ideally, would have one request queue per port */
+/* TODO: ideally, would use tags to process several requests at a time */
+static void ahci_do_request() /* invoked with cli() */
+{
+ struct request *rq;
+ unsigned minor, unit;
+ unsigned long long block, blockend;
+ struct port *port;
+
+ rq = CURRENT;
+ if (!rq)
+ return;
+
+ if (rq->rq_status != RQ_ACTIVE)
+ /* Current one is already ongoing, let the interrupt handler
+ * push the new one when the current one is finished. */
+ return;
+
+ if (MAJOR(rq->rq_dev) != MAJOR_NR) {
+ printk("bad ahci major %u\n", MAJOR(rq->rq_dev));
+ goto kill_rq;
+ }
+
+ minor = MINOR(rq->rq_dev);
+ unit = minor >> PARTN_BITS;
+ if (unit >= MAX_PORTS) {
+ printk("bad ahci unit %u\n", unit);
+ goto kill_rq;
+ }
+
+ port = &ports[unit];
+
+ /* Compute start sector */
+ block = rq->sector;
+ block += port->part[minor & PARTN_MASK].start_sect;
+
+ /* And check end */
+ blockend = block + rq->nr_sectors;
+ if (blockend < block) {
+ if (!rq->quiet)
+ printk("bad blockend %lu vs %lu\n", (unsigned long) blockend, (unsigned long) block);
+ goto kill_rq;
+ }
+ if (blockend > port->capacity) {
+ if (!rq->quiet)
+ {
+ printk("offset for %u was %lu\n", minor, port->part[minor & PARTN_MASK].start_sect);
+ printk("bad access: block %lu, count= %lu\n", (unsigned long) blockend, (unsigned long) port->capacity);
+ }
+ goto kill_rq;
+ }
+
+ /* Push this to the port */
+ if (ahci_do_port_request(port, block, rq))
+ goto kill_rq;
+ return;
+
+kill_rq:
+ ahci_end_request(0);
+}
+
+/* The given port got an interrupt, terminate the current request if any */
+static void ahci_port_interrupt(struct port *port, u32 status)
+{
+ unsigned slot = 0;
+
+ if (readl(&port->ahci_port->ci) & (1 << slot)) {
+ /* Command still pending */
+ return;
+ }
+
+ if (port->identify) {
+ port->status = status;
+ wake_up(&port->q);
+ return;
+ }
+
+ if (!CURRENT || CURRENT->rq_status != RQ_SCSI_BUSY) {
+ /* No request currently running */
+ return;
+ }
+
+ if (status & (PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_IF_NONFATAL)) {
+ printk("ahci error %x %x\n", status, readl(&port->ahci_port->tfd));
+ ahci_end_request(0);
+ return;
+ }
+
+ ahci_end_request(1);
+}
+
+/* Start of IRQ handler. Iterate over all ports for this host */
+static void ahci_interrupt (int irq, void *host, struct pt_regs *regs)
+{
+ struct port *port;
+ struct ahci_host *ahci_host = host;
+ u32 irq_mask;
+ u32 status;
+
+ irq_mask = readl(&ahci_host->is);
+
+ if (!irq_mask)
+ return;
+
+ for (port = &ports[0]; port < &ports[MAX_PORTS]; port++) {
+ if (port->ahci_host == ahci_host && (irq_mask & (1 << (port->ahci_port - ahci_host->ports)))) {
+ status = readl(&port->ahci_port->is);
+ /* Clear interrupt before possibly triggering others */
+ writel(status, &port->ahci_port->is);
+ ahci_port_interrupt (port, status);
+ }
+ }
+
+ if (CURRENT)
+ /* Still some requests, queue another one */
+ ahci_do_request();
+
+ /* Clear host after clearing ports */
+ writel(irq_mask, &ahci_host->is);
+
+ /* unlock */
+}
+
+static int ahci_ioctl (struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int major, unit;
+
+ if (!inode || !inode->i_rdev)
+ return -EINVAL;
+
+ major = MAJOR(inode->i_rdev);
+ if (major != MAJOR_NR)
+ return -ENOTTY;
+
+ unit = DEVICE_NR(inode->i_rdev);
+ if (unit >= MAX_PORTS)
+ return -EINVAL;
+
+ switch (cmd) {
+ case BLKRRPART:
+ if (!suser()) return -EACCES;
+ if (!ports[unit].gd)
+ return -EINVAL;
+ resetup_one_dev(ports[unit].gd, unit);
+ return 0;
+ default:
+ return -EPERM;
+ }
+}
+
+static int ahci_open (struct inode *inode, struct file *file)
+{
+ int target;
+
+ if (MAJOR(inode->i_rdev) != MAJOR_NR)
+ return -ENXIO;
+
+ target = MINOR(inode->i_rdev) >> PARTN_BITS;
+ if (target >= MAX_PORTS)
+ return -ENXIO;
+
+ if (!ports[target].ahci_port)
+ return -ENXIO;
+
+ return 0;
+}
+
+static void ahci_release (struct inode *inode, struct file *file)
+{
+}
+
+static int ahci_fsync (struct inode *inode, struct file *file)
+{
+ printk("fsync\n");
+ return -ENOSYS;
+}
+
+static struct file_operations ahci_fops = {
+ .lseek = NULL,
+ .read = block_read,
+ .write = block_write,
+ .readdir = NULL,
+ .select = NULL,
+ .ioctl = ahci_ioctl,
+ .mmap = NULL,
+ .open = ahci_open,
+ .release = ahci_release,
+ .fsync = ahci_fsync,
+ .fasync = NULL,
+ .check_media_change = NULL,
+ .revalidate = NULL,
+};
+
+/* Disk timed out while processing identify, interrupt ahci_probe_port */
+static void identify_timeout(unsigned long data)
+{
+ struct port *port = (void*) data;
+
+ wake_up(&port->q);
+}
+
+static struct timer_list identify_timer = { .function = identify_timeout };
+
+static int ahci_identify(const volatile struct ahci_host *ahci_host, const volatile struct ahci_port *ahci_port, struct port *port, unsigned cmd)
+{
+ struct hd_driveid id;
+ struct ahci_fis_h2d *fis_h2d;
+ struct ahci_command *command = port->command;
+ struct ahci_cmd_tbl *prdtl = port->prdtl;
+ unsigned long flags;
+ unsigned slot;
+ unsigned long first_part;
+ unsigned long long timeout;
+ int ret = 0;
+
+ /* Identify device */
+ /* TODO: make this a request */
+ slot = 0;
+
+ fis_h2d = (void*) &prdtl[slot].cfis;
+ fis_h2d->fis_type = FIS_TYPE_REG_H2D;
+ fis_h2d->flags = 128;
+ fis_h2d->command = cmd;
+ fis_h2d->device = 0;
+
+ /* Fetch the 512 identify data */
+ memset(&id, 0, sizeof(id));
+
+ command[slot].opts = sizeof(*fis_h2d) / sizeof(u32);
+
+ first_part = PAGE_ALIGN((unsigned long) &id) - (unsigned long) &id;
+
+ if (first_part && first_part < sizeof(id)) {
+ /* split over two pages */
+
+ command[slot].opts |= (2 << 16);
+
+ prdtl[slot].prdtl[0].dbau = 0;
+ prdtl[slot].prdtl[0].dba = vmtophys((void*) &id);
+ prdtl[slot].prdtl[0].dbc = first_part - 1;
+ prdtl[slot].prdtl[1].dbau = 0;
+ prdtl[slot].prdtl[1].dba = vmtophys((void*) &id + first_part);
+ prdtl[slot].prdtl[1].dbc = sizeof(id) - first_part - 1;
+ }
+ else
+ {
+ command[slot].opts |= (1 << 16);
+
+ prdtl[slot].prdtl[0].dbau = 0;
+ prdtl[slot].prdtl[0].dba = vmtophys((void*) &id);
+ prdtl[slot].prdtl[0].dbc = sizeof(id) - 1;
+ }
+
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->tfd) & (BUSY_STAT | DRQ_STAT))
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for ready\n", port-ports);
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return 3;
+ }
+
+ save_flags(flags);
+ cli();
+
+ port->identify = 1;
+ port->status = 0;
+
+ /* Issue command */
+ mb();
+ writel(1 << slot, &ahci_port->ci);
+
+ timeout = jiffies + WAIT_MAX;
+ identify_timer.expires = timeout;
+ identify_timer.data = (unsigned long) port;
+ add_timer(&identify_timer);
+ while (!port->status) {
+ if (jiffies >= timeout) {
+ printk("sd%u: timeout waiting for identify\n", port-ports);
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ del_timer(&identify_timer);
+ restore_flags(flags);
+ return 3;
+ }
+ sleep_on(&port->q);
+ }
+ del_timer(&identify_timer);
+ restore_flags(flags);
+
+ if ((port->status & PORT_IRQ_TF_ERR) || readl(&ahci_port->is) & PORT_IRQ_TF_ERR)
+ {
+ /* Identify error */
+ port->capacity = 0;
+ port->lba48 = 0;
+ ret = 2;
+ } else {
+ memcpy(&port->id, &id, sizeof(id));
+ port->is_cd = 0;
+
+ ide_fixstring(id.model, sizeof(id.model), 1);
+ ide_fixstring(id.fw_rev, sizeof(id.fw_rev), 1);
+ ide_fixstring(id.serial_no, sizeof(id.serial_no), 1);
+ if (cmd == WIN_PIDENTIFY)
+ {
+ unsigned char type = (id.config >> 8) & 0x1f;
+
+ printk("sd%u: %s, ATAPI ", port - ports, id.model);
+ if (type == 5)
+ {
+ printk("unsupported CDROM drive\n");
+ port->is_cd = 1;
+ port->lba48 = 0;
+ port->capacity = 0;
+ }
+ else
+ {
+ printk("unsupported type %d\n", type);
+ port->lba48 = 0;
+ port->capacity = 0;
+ return 2;
+ }
+ return 0;
+ }
+
+ if (id.command_set_2 & (1U<<10))
+ {
+ port->lba48 = 1;
+ port->capacity = id.lba_capacity_2;
+ if (port->capacity >= (1ULL << 32))
+ {
+ port->capacity = (1ULL << 32) - 1;
+ printk("Warning: truncating disk size to 2TiB\n");
+ }
+ }
+ else
+ {
+ port->lba48 = 0;
+ port->capacity = id.lba_capacity;
+ if (port->capacity > (1ULL << 24))
+ {
+ port->capacity = (1ULL << 24);
+ printk("Warning: truncating disk size to 128GiB\n");
+ }
+ }
+ if (port->capacity/2048 >= 10240)
+ printk("sd%u: %s, %uGB w/%dkB Cache\n", (unsigned) (port - ports), id.model, (unsigned) (port->capacity/(2048*1024)), id.buf_size/2);
+ else
+ printk("sd%u: %s, %uMB w/%dkB Cache\n", (unsigned) (port - ports), id.model, (unsigned) (port->capacity/2048), id.buf_size/2);
+ }
+ port->identify = 0;
+
+ return ret;
+}
+
+/* Probe one AHCI port */
+static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const volatile struct ahci_port *ahci_port)
+{
+ struct port *port;
+ void *mem;
+ unsigned cls = ((readl(&ahci_host->cap) >> 8) & 0x1f) + 1;
+ struct ahci_command *command;
+ struct ahci_fis *fis;
+ struct ahci_cmd_tbl *prdtl;
+ vm_size_t size =
+ cls * sizeof(*command)
+ + sizeof(*fis)
+ + cls * sizeof(*prdtl);
+ unsigned i;
+ unsigned long long timeout;
+
+ for (i = 0; i < MAX_PORTS; i++) {
+ if (!ports[i].ahci_port)
+ break;
+ }
+ if (i == MAX_PORTS)
+ return;
+ port = &ports[i];
+
+ /* Has to be 1K-aligned */
+ mem = vmalloc (size);
+ if (!mem)
+ return;
+ assert (!(((unsigned long) mem) & (1024-1)));
+ memset (mem, 0, size);
+
+ port->ahci_host = ahci_host;
+ port->ahci_port = ahci_port;
+ port->cls = cls;
+
+ port->command = command = mem;
+ port->fis = fis = (void*) command + cls * sizeof(*command);
+ port->prdtl = prdtl = (void*) fis + sizeof(*fis);
+
+ /* Stop commands */
+ writel(readl(&ahci_port->cmd) & ~PORT_CMD_START, &ahci_port->cmd);
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON)
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for list completion\n", (unsigned) (port-ports));
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return;
+ }
+
+ writel(readl(&ahci_port->cmd) & ~PORT_CMD_FIS_RX, &ahci_port->cmd);
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->cmd) & PORT_CMD_FIS_ON)
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for FIS completion\n", (unsigned) (port-ports));
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return;
+ }
+
+ /* We don't support 64bit */
+ /* Point controller to our buffers */
+ writel(0, &ahci_port->clbu);
+ writel(vmtophys((void*) command), &ahci_port->clb);
+ writel(0, &ahci_port->fbu);
+ writel(vmtophys((void*) fis), &ahci_port->fb);
+
+ /* Clear any previous interrupts */
+ writel(readl(&ahci_port->is), &ahci_port->is);
+ writel(1 << (ahci_port - ahci_host->ports), &ahci_host->is);
+
+ /* And activate them */
+ writel(DEF_PORT_IRQ, &ahci_port->ie);
+ writel(readl(&ahci_host->ghc) | HOST_IRQ_EN, &ahci_host->ghc);
+
+ for (i = 0; i < cls; i++)
+ {
+ command[i].ctbau = 0;
+ command[i].ctba = vmtophys((void*) &prdtl[i]);
+ }
+
+ /* Start commands */
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON)
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for list completion\n", (unsigned) (port-ports));
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return;
+ }
+
+ writel(readl(&ahci_port->cmd) | PORT_CMD_FIS_RX | PORT_CMD_START, &ahci_port->cmd);
+
+ /* if PxCMD.ATAPI is set, try ATAPI identify; otherwise try AHCI, then ATAPI */
+ if (readl(&ahci_port->cmd) & PORT_CMD_ATAPI ||
+ ahci_identify(ahci_host, ahci_port, port, WIN_IDENTIFY) >= 2)
+ ahci_identify(ahci_host, ahci_port, port, WIN_PIDENTIFY);
+}
+
+/* Probe one AHCI PCI device */
+static void ahci_probe_dev(unsigned char bus, unsigned char device)
+{
+ unsigned char hdrtype;
+ unsigned char dev, fun;
+ const volatile struct ahci_host *ahci_host;
+ const volatile struct ahci_port *ahci_port;
+ unsigned nports, n, i;
+ unsigned port_map;
+ unsigned bar;
+ unsigned char irq;
+
+ dev = PCI_SLOT(device);
+ fun = PCI_FUNC(device);
+
+ /* Get configuration */
+ if (pcibios_read_config_byte(bus, device, PCI_HEADER_TYPE, &hdrtype) != PCIBIOS_SUCCESSFUL) {
+ printk("ahci: %02x:%02x.%x: Can not read configuration", bus, dev, fun);
+ return;
+ }
+ /* Ignore multifunction bit */
+ hdrtype &= ~0x80;
+
+ if (hdrtype != 0) {
+ printk("ahci: %02x:%02x.%x: Unknown hdrtype %d\n", bus, dev, fun, hdrtype);
+ return;
+ }
+
+ if (pcibios_read_config_dword(bus, device, PCI_BASE_ADDRESS_5, &bar) != PCIBIOS_SUCCESSFUL) {
+ printk("ahci: %02x:%02x.%x: Can not read BAR 5", bus, dev, fun);
+ return;
+ }
+ if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
+ printk("ahci: %02x:%02x.%x: BAR 5 is I/O?!", bus, dev, fun);
+ return;
+ }
+ bar &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ if (pcibios_read_config_byte(bus, device, PCI_INTERRUPT_LINE, &irq) != PCIBIOS_SUCCESSFUL) {
+ printk("ahci: %02x:%02x.%x: Can not read IRQ", bus, dev, fun);
+ return;
+ }
+
+ printk("AHCI SATA %02x:%02x.%x BAR 0x%x IRQ %u\n", bus, dev, fun, bar, irq);
+
+ /* Map mmio */
+ ahci_host = vremap(bar, 0x2000);
+
+ /* Request IRQ */
+ if (request_irq(irq, &ahci_interrupt, SA_SHIRQ, "ahci", (void*) ahci_host)) {
+ printk("ahci: %02x:%02x.%x: Can not get irq %u\n", bus, dev, fun, irq);
+ return;
+ }
+
+#ifdef CONFIG_BLK_DEV_IDE
+ /* OK, we will handle it. Disable probing on legacy IDE ports it may have. */
+ for (i = 0; i < 6; i++)
+ {
+ unsigned mybar;
+ if (pcibios_read_config_dword(bus, device, PCI_BASE_ADDRESS_0 + i*4, &mybar) == PCIBIOS_SUCCESSFUL) {
+ if (!(bar & PCI_BASE_ADDRESS_SPACE_IO))
+ /* Memory, don't care */
+ continue;
+ /* printk("ahci: %02x:%02x.%x: BAR %d is %x\n", bus, dev, fun, i, mybar); */
+ ide_disable_base(bar & PCI_BASE_ADDRESS_IO_MASK);
+ }
+ }
+#endif
+
+ nports = (readl(&ahci_host->cap) & 0x1f) + 1;
+ port_map = readl(&ahci_host->pi);
+
+ for (n = 0, i = 0; i < AHCI_MAX_PORTS; i++)
+ if (port_map & (1U << i))
+ n++;
+
+ if (nports != n) {
+ printk("ahci: %02x:%02x.%x: Odd number of ports %u, assuming %u is correct\n", bus, dev, fun, n, nports);
+ port_map = 0;
+ }
+ if (!port_map) {
+ port_map = (1U << nports) - 1;
+ }
+
+ for (i = 0; i < AHCI_MAX_PORTS; i++) {
+ u32 ssts;
+ u8 det, ipm;
+
+ if (!(port_map & (1U << i)))
+ continue;
+
+ ahci_port = &ahci_host->ports[i];
+
+ ssts = readl(&ahci_port->ssts);
+ det = ssts & 0xf;
+ switch (det)
+ {
+ case 0x0:
+ /* Device not present */
+ continue;
+ case 0x1:
+ printk("ahci: %02x:%02x.%x: Port %u communication not established. TODO: power on device\n", bus, dev, fun, i);
+ continue;
+ case 0x3:
+ /* Present and communication established */
+ break;
+ case 0x4:
+ printk("ahci: %02x:%02x.%x: Port %u phy offline?!\n", bus, dev, fun, i);
+ continue;
+ default:
+ printk("ahci: %02x:%02x.%x: Unknown port %u DET %x\n", bus, dev, fun, i, det);
+ continue;
+ }
+
+ ipm = (ssts >> 8) & 0xf;
+ switch (ipm)
+ {
+ case 0x0:
+ /* Device not present */
+ continue;
+ case 0x1:
+ /* Active */
+ break;
+ case 0x2:
+ printk("ahci: %02x:%02x.%x: Port %u in Partial power management. TODO: power on device\n", bus, dev, fun, i);
+ continue;
+ case 0x6:
+ printk("ahci: %02x:%02x.%x: Port %u in Slumber power management. TODO: power on device\n", bus, dev, fun, i);
+ continue;
+ default:
+ printk("ahci: %02x:%02x.%x: Unknown port %u IPM %x\n", bus, dev, fun, i, ipm);
+ continue;
+ }
+
+ /* OK! Probe this port */
+ ahci_probe_port(ahci_host, ahci_port);
+ }
+}
+
+/* genhd callback to set size of disks */
+static void ahci_geninit(struct gendisk *gd)
+{
+ unsigned unit;
+ struct port *port;
+
+ for (unit = 0; unit < gd->nr_real; unit++) {
+ port = &ports[unit];
+ port->part[0].nr_sects = port->capacity;
+ if (!port->part[0].nr_sects)
+ port->part[0].nr_sects = -1;
+ }
+}
+
+/* Probe all AHCI PCI devices */
+void ahci_probe_pci(void)
+{
+ unsigned char bus, device;
+ unsigned short index;
+ int ret;
+ unsigned nports, unit, nminors;
+ struct port *port;
+ struct gendisk *gd, **gdp;
+ int *bs;
+
+ for (index = 0;
+ (ret = pcibios_find_class(PCI_CLASS_STORAGE_SATA_AHCI, index, &bus, &device)) == PCIBIOS_SUCCESSFUL;
+ index++)
+ {
+ /* Note: this prevents from also having a SCSI controler.
+ * It shouldn't harm too much until we have proper hardware
+ * enumeration.
+ */
+ if (register_blkdev(MAJOR_NR, "sd", &ahci_fops) < 0)
+ printk("could not register ahci\n");
+ ahci_probe_dev(bus, device);
+ }
+
+ for (nports = 0, port = &ports[0]; port < &ports[MAX_PORTS]; port++)
+ if (port->ahci_port)
+ nports++;
+
+ nminors = nports * (1<<PARTN_BITS);
+
+ gd = kmalloc(sizeof(*gd), GFP_KERNEL);
+ gd->sizes = kmalloc(nminors * sizeof(*gd->sizes), GFP_KERNEL);
+ gd->part = kmalloc(nminors * sizeof(*gd->part), GFP_KERNEL);
+ bs = kmalloc(nminors * sizeof(*bs), GFP_KERNEL);
+
+ blksize_size[MAJOR_NR] = bs;
+ for (unit = 0; unit < nminors; unit++)
+ /* We prefer to transfer whole pages */
+ *bs++ = PAGE_SIZE;
+
+ memset(gd->part, 0, nminors * sizeof(*gd->part));
+
+ for (unit = 0; unit < nports; unit++) {
+ ports[unit].gd = gd;
+ ports[unit].part = &gd->part[unit << PARTN_BITS];
+ }
+
+ gd->major = MAJOR_NR;
+ gd->major_name = "sd";
+ gd->minor_shift = PARTN_BITS;
+ gd->max_p = 1<<PARTN_BITS;
+ gd->max_nr = nports;
+ gd->nr_real = nports;
+ gd->init = ahci_geninit;
+ gd->next = NULL;
+
+ for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next))
+ ;
+ *gdp = gd;
+
+ blk_dev[MAJOR_NR].request_fn = ahci_do_request;
+}
diff --git a/linux/dev/drivers/block/floppy.c b/linux/dev/drivers/block/floppy.c
new file mode 100644
index 0000000..83d66f0
--- /dev/null
+++ b/linux/dev/drivers/block/floppy.c
@@ -0,0 +1,4288 @@
+/*
+ * linux/kernel/floppy.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1993, 1994 Alain Knaff
+ */
+/*
+ * 02.12.91 - Changed to static variables to indicate need for reset
+ * and recalibrate. This makes some things easier (output_byte reset
+ * checking etc), and means less interrupt jumping in case of errors,
+ * so the code is hopefully easier to understand.
+ */
+
+/*
+ * This file is certainly a mess. I've tried my best to get it working,
+ * but I don't like programming floppies, and I have only one anyway.
+ * Urgel. I should check for more errors, and do more graceful error
+ * recovery. Seems there are problems with several drives. I've tried to
+ * correct them. No promises.
+ */
+
+/*
+ * As with hd.c, all routines within this file can (and will) be called
+ * by interrupts, so extreme caution is needed. A hardware interrupt
+ * handler may not sleep, or a kernel panic will happen. Thus I cannot
+ * call "floppy-on" directly, but have to set a special timer interrupt
+ * etc.
+ */
+
+/*
+ * 28.02.92 - made track-buffering routines, based on the routines written
+ * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus.
+ */
+
+/*
+ * Automatic floppy-detection and formatting written by Werner Almesberger
+ * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with
+ * the floppy-change signal detection.
+ */
+
+/*
+ * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed
+ * FDC data overrun bug, added some preliminary stuff for vertical
+ * recording support.
+ *
+ * 1992/9/17: Added DMA allocation & DMA functions. -- hhb.
+ *
+ * TODO: Errors are still not counted properly.
+ */
+
+/* 1992/9/20
+ * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl)
+ * modeled after the freeware MS-DOS program fdformat/88 V1.8 by
+ * Christoph H. Hochst\"atter.
+ * I have fixed the shift values to the ones I always use. Maybe a new
+ * ioctl() should be created to be able to modify them.
+ * There is a bug in the driver that makes it impossible to format a
+ * floppy as the first thing after bootup.
+ */
+
+/*
+ * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and
+ * this helped the floppy driver as well. Much cleaner, and still seems to
+ * work.
+ */
+
+/* 1994/6/24 --bbroad-- added the floppy table entries and made
+ * minor modifications to allow 2.88 floppies to be run.
+ */
+
+/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more
+ * disk types.
+ */
+
+/*
+ * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger
+ * format bug fixes, but unfortunately some new bugs too...
+ */
+
+/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write
+ * errors to allow safe writing by specialized programs.
+ */
+
+/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
+ * by defining bit 1 of the "stretch" parameter to mean put sectors on the
+ * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's
+ * drives are "upside-down").
+ */
+
+/*
+ * 1995/8/26 -- Andreas Busse -- added Mips support.
+ */
+
+/*
+ * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent
+ * features to asm/floppy.h.
+ */
+
+
+#define FLOPPY_SANITY_CHECK
+#undef FLOPPY_SILENT_DCL_CLEAR
+
+#define REALLY_SLOW_IO
+
+#define DEBUGT 2
+#define DCL_DEBUG /* debug disk change line */
+
+/* do print messages for unexpected interrupts */
+static int print_unex=1;
+#include <linux/utsname.h>
+#include <linux/module.h>
+
+/* the following is the mask of allowed drives. By default units 2 and
+ * 3 of both floppy controllers are disabled, because switching on the
+ * motor of these drives causes system hangs on some PCI computers. drive
+ * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if
+ * a drive is allowed. */
+static int FLOPPY_IRQ=6;
+static int FLOPPY_DMA=2;
+static int allowed_drive_mask = 0x33;
+
+static int irqdma_allocated = 0;
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/tqueue.h>
+#define FDPATCHES
+#include <linux/fdreg.h>
+
+
+#include <linux/fd.h>
+
+
+#define OLDFDRAWCMD 0x020d /* send a raw command to the FDC */
+
+struct old_floppy_raw_cmd {
+ void *data;
+ long length;
+
+ unsigned char rate;
+ unsigned char flags;
+ unsigned char cmd_count;
+ unsigned char cmd[9];
+ unsigned char reply_count;
+ unsigned char reply[7];
+ int track;
+};
+
+#include <linux/errno.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/segment.h>
+
+static int use_virtual_dma=0; /* virtual DMA for Intel */
+static unsigned short virtual_dma_port=0x3f0;
+void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static int set_dor(int fdc, char mask, char data);
+static inline int __get_order(unsigned long size);
+#include <asm/floppy.h>
+
+
+#define MAJOR_NR FLOPPY_MAJOR
+
+#include <linux/blk.h>
+#include <linux/cdrom.h> /* for the compatibility eject ioctl */
+
+#include <linux/dev/glue/glue.h>
+
+
+#ifndef FLOPPY_MOTOR_MASK
+#define FLOPPY_MOTOR_MASK 0xf0
+#endif
+
+#ifndef fd_get_dma_residue
+#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
+#endif
+
+/* Dma Memory related stuff */
+
+/* Pure 2^n version of get_order */
+static inline int __get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#ifndef fd_dma_mem_free
+#define fd_dma_mem_free(addr, size) free_pages(addr, __get_order(size))
+#endif
+
+#ifndef fd_dma_mem_alloc
+#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,__get_order(size))
+#endif
+
+/* End dma memory related stuff */
+
+static unsigned int fake_change = 0;
+static int initialising=1;
+
+static inline int TYPE(kdev_t x) {
+ return (MINOR(x)>>2) & 0x1f;
+}
+static inline int DRIVE(kdev_t x) {
+ return (MINOR(x)&0x03) | ((MINOR(x)&0x80) >> 5);
+}
+#define ITYPE(x) (((x)>>2) & 0x1f)
+#define TOMINOR(x) ((x & 3) | ((x & 4) << 5))
+#define UNIT(x) ((x) & 0x03) /* drive on fdc */
+#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */
+#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
+ /* reverse mapping from unit and fdc to drive */
+#define DP (&drive_params[current_drive])
+#define DRS (&drive_state[current_drive])
+#define DRWE (&write_errors[current_drive])
+#define FDCS (&fdc_state[fdc])
+#define CLEARF(x) (clear_bit(x##_BIT, &DRS->flags))
+#define SETF(x) (set_bit(x##_BIT, &DRS->flags))
+#define TESTF(x) (test_bit(x##_BIT, &DRS->flags))
+
+#define UDP (&drive_params[drive])
+#define UDRS (&drive_state[drive])
+#define UDRWE (&write_errors[drive])
+#define UFDCS (&fdc_state[FDC(drive)])
+#define UCLEARF(x) (clear_bit(x##_BIT, &UDRS->flags))
+#define USETF(x) (set_bit(x##_BIT, &UDRS->flags))
+#define UTESTF(x) (test_bit(x##_BIT, &UDRS->flags))
+
+#define DPRINT(format, args...) printk(DEVICE_NAME "%d: " format, current_drive , ## args)
+
+#define PH_HEAD(floppy,head) (((((floppy)->stretch & 2) >>1) ^ head) << 2)
+#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
+
+#define CLEARSTRUCT(x) memset((x), 0, sizeof(*(x)))
+
+#define INT_OFF save_flags(flags); cli()
+#define INT_ON restore_flags(flags)
+
+/* read/write */
+#define COMMAND raw_cmd->cmd[0]
+#define DR_SELECT raw_cmd->cmd[1]
+#define TRACK raw_cmd->cmd[2]
+#define HEAD raw_cmd->cmd[3]
+#define SECTOR raw_cmd->cmd[4]
+#define SIZECODE raw_cmd->cmd[5]
+#define SECT_PER_TRACK raw_cmd->cmd[6]
+#define GAP raw_cmd->cmd[7]
+#define SIZECODE2 raw_cmd->cmd[8]
+#define NR_RW 9
+
+/* format */
+#define F_SIZECODE raw_cmd->cmd[2]
+#define F_SECT_PER_TRACK raw_cmd->cmd[3]
+#define F_GAP raw_cmd->cmd[4]
+#define F_FILL raw_cmd->cmd[5]
+#define NR_F 6
+
+/*
+ * Maximum disk size (in kilobytes). This default is used whenever the
+ * current disk size is unknown.
+ * [Now it is rather a minimum]
+ */
+#define MAX_DISK_SIZE 4 /* 3984*/
+
+#define K_64 0x10000 /* 64KB */
+
+/*
+ * globals used by 'result()'
+ */
+#define MAX_REPLIES 16
+static unsigned char reply_buffer[MAX_REPLIES];
+static int inr; /* size of reply buffer, when called from interrupt */
+#define ST0 (reply_buffer[0])
+#define ST1 (reply_buffer[1])
+#define ST2 (reply_buffer[2])
+#define ST3 (reply_buffer[0]) /* result of GETSTATUS */
+#define R_TRACK (reply_buffer[3])
+#define R_HEAD (reply_buffer[4])
+#define R_SECTOR (reply_buffer[5])
+#define R_SIZECODE (reply_buffer[6])
+
+#define SEL_DLY (2*HZ/100)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+/*
+ * this struct defines the different floppy drive types.
+ */
+static struct {
+ struct floppy_drive_params params;
+ const char *name; /* name printed while booting */
+} default_drive_params[]= {
+/* NOTE: the time values in jiffies should be in msec!
+ CMOS drive type
+ | Maximum data rate supported by drive type
+ | | Head load time, msec
+ | | | Head unload time, msec (not used)
+ | | | | Step rate interval, usec
+ | | | | | Time needed for spinup time (jiffies)
+ | | | | | | Timeout for spinning down (jiffies)
+ | | | | | | | Spindown offset (where disk stops)
+ | | | | | | | | Select delay
+ | | | | | | | | | RPS
+ | | | | | | | | | | Max number of tracks
+ | | | | | | | | | | | Interrupt timeout
+ | | | | | | | | | | | | Max nonintlv. sectors
+ | | | | | | | | | | | | | -Max Errors- flags */
+{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" },
+
+{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0,
+ 0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/
+
+{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0,
+ 0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/
+
+{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/
+
+{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/
+
+{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
+ 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/
+
+{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
+ 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/
+/* | --autodetected formats--- | | |
+ * read_track | | Name printed when booting
+ * | Native format
+ * Frequency of disk change checks */
+};
+
+static struct floppy_drive_params drive_params[N_DRIVE];
+static struct floppy_drive_struct drive_state[N_DRIVE];
+static struct floppy_write_errors write_errors[N_DRIVE];
+static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
+
+/*
+ * This struct defines the different floppy types.
+ *
+ * Bit 0 of 'stretch' tells if the tracks need to be doubled for some
+ * types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch'
+ * tells if the disk is in Commodore 1581 format, which means side 0 sectors
+ * are located on side 1 of the disk but with a side 0 ID, and vice-versa.
+ * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the
+ * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
+ * side 0 is on physical side 0 (but with the misnamed sector IDs).
+ * 'stretch' should probably be renamed to something more general, like
+ * 'options'. Other parameters should be self-explanatory (see also
+ * setfdprm(8)).
+ */
+static struct floppy_struct floppy_type[32] = {
+ { 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */
+ { 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */
+ { 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */
+ { 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */
+ { 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */
+ { 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */
+ { 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */
+ { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */
+ { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */
+ { 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120"}, /* 9 3.12MB 3.5" */
+
+ { 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */
+ { 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */
+ { 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */
+ { 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */
+ { 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */
+ { 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */
+ { 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */
+ { 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */
+ { 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */
+ { 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */
+
+ { 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */
+ { 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */
+ { 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */
+ { 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */
+ { 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */
+ { 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */
+ { 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */
+ { 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */
+ { 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */
+
+ { 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */
+ { 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */
+ { 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
+};
+
+#define NUMBER(x) (sizeof(x) / sizeof(*(x)))
+#define SECTSIZE (_FD_SECTSIZE(*floppy))
+
+/* Auto-detection: Disk type used until the next media change occurs. */
+static struct floppy_struct *current_type[N_DRIVE] = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+/*
+ * User-provided type information. current_type points to
+ * the respective entry of this array.
+ */
+static struct floppy_struct user_params[N_DRIVE];
+
+static int floppy_sizes[256];
+static int floppy_blocksizes[256] = { 0, };
+
+/*
+ * The driver is trying to determine the correct media format
+ * while probing is set. rw_interrupt() clears it after a
+ * successful access.
+ */
+static int probing = 0;
+
+/* Synchronization of FDC access. */
+#define FD_COMMAND_NONE -1
+#define FD_COMMAND_ERROR 2
+#define FD_COMMAND_OKAY 3
+
+static volatile int command_status = FD_COMMAND_NONE, fdc_busy = 0;
+static struct wait_queue *fdc_wait = NULL, *command_done = NULL;
+#ifdef MACH
+#define NO_SIGNAL (! issig () || ! interruptible)
+#else
+#define NO_SIGNAL (!(current->signal & ~current->blocked) || !interruptible)
+#endif
+#define CALL(x) if ((x) == -EINTR) return -EINTR
+#define ECALL(x) if ((ret = (x))) return ret;
+#define _WAIT(x,i) CALL(ret=wait_til_done((x),i))
+#define WAIT(x) _WAIT((x),interruptible)
+#define IWAIT(x) _WAIT((x),1)
+
+/* Errors during formatting are counted here. */
+static int format_errors;
+
+/* Format request descriptor. */
+static struct format_descr format_req;
+
+/*
+ * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps
+ * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc),
+ * H is head unload time (1=16ms, 2=32ms, etc)
+ */
+
+/*
+ * Track buffer
+ * Because these are written to by the DMA controller, they must
+ * not contain a 64k byte boundary crossing, or data will be
+ * corrupted/lost.
+ */
+static char *floppy_track_buffer=0;
+static int max_buffer_sectors=0;
+
+static int *errors;
+typedef void (*done_f)(int);
+static struct cont_t {
+ void (*interrupt)(void); /* this is called after the interrupt of the
+ * main command */
+ void (*redo)(void); /* this is called to retry the operation */
+ void (*error)(void); /* this is called to tally an error */
+ done_f done; /* this is called to say if the operation has
+ * succeeded/failed */
+} *cont=NULL;
+
+static void floppy_ready(void);
+static void floppy_start(void);
+static void process_fd_request(void);
+static void recalibrate_floppy(void);
+static void floppy_shutdown(void);
+
+static int floppy_grab_irq_and_dma(void);
+static void floppy_release_irq_and_dma(void);
+
+/*
+ * The "reset" variable should be tested whenever an interrupt is scheduled,
+ * after the commands have been sent. This is to ensure that the driver doesn't
+ * get wedged when the interrupt doesn't come because of a failed command.
+ * reset doesn't need to be tested before sending commands, because
+ * output_byte is automatically disabled when reset is set.
+ */
+#define CHECK_RESET { if (FDCS->reset){ reset_fdc(); return; } }
+static void reset_fdc(void);
+
+/*
+ * These are global variables, as that's the easiest way to give
+ * information to interrupts. They are the data used for the current
+ * request.
+ */
+#define NO_TRACK -1
+#define NEED_1_RECAL -2
+#define NEED_2_RECAL -3
+
+/* */
+static int usage_count = 0;
+
+
+/* buffer related variables */
+static int buffer_track = -1;
+static int buffer_drive = -1;
+static int buffer_min = -1;
+static int buffer_max = -1;
+
+/* fdc related variables, should end up in a struct */
+static struct floppy_fdc_state fdc_state[N_FDC];
+static int fdc; /* current fdc */
+
+static struct floppy_struct *_floppy = floppy_type;
+static unsigned char current_drive = 0;
+static long current_count_sectors = 0;
+static unsigned char sector_t; /* sector in track */
+
+#ifndef fd_eject
+#define fd_eject(x) -EINVAL
+#endif
+
+
+#ifdef DEBUGT
+static long unsigned debugtimer;
+#endif
+
+/*
+ * Debugging
+ * =========
+ */
+static inline void set_debugt(void)
+{
+#ifdef DEBUGT
+ debugtimer = jiffies;
+#endif
+}
+
+static inline void debugt(const char *message)
+{
+#ifdef DEBUGT
+ if (DP->flags & DEBUGT)
+ printk("%s dtime=%lu\n", message, jiffies-debugtimer);
+#endif
+}
+
+typedef void (*timeout_fn)(unsigned long);
+static struct timer_list fd_timeout ={ NULL, NULL, 0, 0,
+ (timeout_fn) floppy_shutdown };
+
+static const char *timeout_message;
+
+#ifdef FLOPPY_SANITY_CHECK
+static void is_alive(const char *message)
+{
+ /* this routine checks whether the floppy driver is "alive" */
+ if (fdc_busy && command_status < 2 && !fd_timeout.prev){
+ DPRINT("timeout handler died: %s\n",message);
+ }
+}
+#endif
+
+#ifdef FLOPPY_SANITY_CHECK
+
+#define OLOGSIZE 20
+
+static void (*lasthandler)(void) = NULL;
+static int interruptjiffies=0;
+static int resultjiffies=0;
+static int resultsize=0;
+static int lastredo=0;
+
+static struct output_log {
+ unsigned char data;
+ unsigned char status;
+ unsigned long jiffies;
+} output_log[OLOGSIZE];
+
+static int output_log_pos=0;
+#endif
+
+#define CURRENTD -1
+#define MAXTIMEOUT -2
+
+static void reschedule_timeout(int drive, const char *message, int marg)
+{
+ if (drive == CURRENTD)
+ drive = current_drive;
+ del_timer(&fd_timeout);
+ if (drive < 0 || drive > N_DRIVE) {
+ fd_timeout.expires = jiffies + 20*HZ;
+ drive=0;
+ } else
+ fd_timeout.expires = jiffies + UDP->timeout;
+ add_timer(&fd_timeout);
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("reschedule timeout ");
+ printk(message, marg);
+ printk("\n");
+ }
+ timeout_message = message;
+}
+
+static int maximum(int a, int b)
+{
+ if(a > b)
+ return a;
+ else
+ return b;
+}
+#define INFBOUND(a,b) (a)=maximum((a),(b));
+
+static int minimum(int a, int b)
+{
+ if(a < b)
+ return a;
+ else
+ return b;
+}
+#define SUPBOUND(a,b) (a)=minimum((a),(b));
+
+
+/*
+ * Bottom half floppy driver.
+ * ==========================
+ *
+ * This part of the file contains the code talking directly to the hardware,
+ * and also the main service loop (seek-configure-spinup-command)
+ */
+
+/*
+ * disk change.
+ * This routine is responsible for maintaining the FD_DISK_CHANGE flag,
+ * and the last_checked date.
+ *
+ * last_checked is the date of the last check which showed 'no disk change'
+ * FD_DISK_CHANGE is set under two conditions:
+ * 1. The floppy has been changed after some i/o to that floppy already
+ * took place.
+ * 2. No floppy disk is in the drive. This is done in order to ensure that
+ * requests are quickly flushed in case there is no disk in the drive. It
+ * follows that FD_DISK_CHANGE can only be cleared if there is a disk in
+ * the drive.
+ *
+ * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet.
+ * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on
+ * each seek. If a disk is present, the disk change line should also be
+ * cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk
+ * change line is set, this means either that no disk is in the drive, or
+ * that it has been removed since the last seek.
+ *
+ * This means that we really have a third possibility too:
+ * The floppy has been changed after the last seek.
+ */
+
+static int disk_change(int drive)
+{
+ int fdc=FDC(drive);
+#ifdef FLOPPY_SANITY_CHECK
+ if (jiffies - UDRS->select_date < UDP->select_delay)
+ DPRINT("WARNING disk change called early\n");
+ if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
+ (FDCS->dor & 3) != UNIT(drive) ||
+ fdc != FDC(drive)){
+ DPRINT("probing disk change on unselected drive\n");
+ DPRINT("drive=%d fdc=%d dor=%x\n",drive, FDC(drive),
+ FDCS->dor);
+ }
+#endif
+
+#ifdef DCL_DEBUG
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("checking disk change line for drive %d\n",drive);
+ DPRINT("jiffies=%ld\n", jiffies);
+ DPRINT("disk change line=%x\n",fd_inb(FD_DIR)&0x80);
+ DPRINT("flags=%x\n",UDRS->flags);
+ }
+#endif
+ if (UDP->flags & FD_BROKEN_DCL)
+ return UTESTF(FD_DISK_CHANGED);
+ if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80){
+ USETF(FD_VERIFY); /* verify write protection */
+ if (UDRS->maxblock){
+ /* mark it changed */
+ USETF(FD_DISK_CHANGED);
+ }
+
+ /* invalidate its geometry */
+ if (UDRS->keep_data >= 0) {
+ if ((UDP->flags & FTD_MSG) &&
+ current_type[drive] != NULL)
+ DPRINT("Disk type is undefined after "
+ "disk change\n");
+ current_type[drive] = NULL;
+ floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE;
+ }
+
+ /*USETF(FD_DISK_NEWCHANGE);*/
+ return 1;
+ } else {
+ UDRS->last_checked=jiffies;
+ UCLEARF(FD_DISK_NEWCHANGE);
+ }
+ return 0;
+}
+
+static inline int is_selected(int dor, int unit)
+{
+ return ((dor & (0x10 << unit)) && (dor &3) == unit);
+}
+
+static int set_dor(int fdc, char mask, char data)
+{
+ register unsigned char drive, unit, newdor,olddor;
+
+ if (FDCS->address == -1)
+ return -1;
+
+ olddor = FDCS->dor;
+ newdor = (olddor & mask) | data;
+ if (newdor != olddor){
+ unit = olddor & 0x3;
+ if (is_selected(olddor, unit) && !is_selected(newdor,unit)){
+ drive = REVDRIVE(fdc,unit);
+#ifdef DCL_DEBUG
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("calling disk change from set_dor\n");
+ }
+#endif
+ disk_change(drive);
+ }
+ FDCS->dor = newdor;
+ fd_outb(newdor, FD_DOR);
+
+ unit = newdor & 0x3;
+ if (!is_selected(olddor, unit) && is_selected(newdor,unit)){
+ drive = REVDRIVE(fdc,unit);
+ UDRS->select_date = jiffies;
+ }
+ }
+
+ /* FIXME: we should be more graceful here */
+
+ if (newdor & FLOPPY_MOTOR_MASK)
+ floppy_grab_irq_and_dma();
+ if (olddor & FLOPPY_MOTOR_MASK)
+ floppy_release_irq_and_dma();
+ return olddor;
+}
+
+static void twaddle(void)
+{
+ if (DP->select_delay)
+ return;
+ fd_outb(FDCS->dor & ~(0x10<<UNIT(current_drive)),FD_DOR);
+ fd_outb(FDCS->dor, FD_DOR);
+ DRS->select_date = jiffies;
+}
+
+/* reset all driver information about the current fdc. This is needed after
+ * a reset, and after a raw command. */
+static void reset_fdc_info(int mode)
+{
+ int drive;
+
+ FDCS->spec1 = FDCS->spec2 = -1;
+ FDCS->need_configure = 1;
+ FDCS->perp_mode = 1;
+ FDCS->rawcmd = 0;
+ for (drive = 0; drive < N_DRIVE; drive++)
+ if (FDC(drive) == fdc &&
+ (mode || UDRS->track != NEED_1_RECAL))
+ UDRS->track = NEED_2_RECAL;
+}
+
+/* selects the fdc and drive, and enables the fdc's input/dma. */
+static void set_fdc(int drive)
+{
+ if (drive >= 0 && drive < N_DRIVE){
+ fdc = FDC(drive);
+ current_drive = drive;
+ }
+ if (fdc != 1 && fdc != 0) {
+ printk("bad fdc value\n");
+ return;
+ }
+ set_dor(fdc,~0,8);
+#if N_FDC > 1
+ set_dor(1-fdc, ~8, 0);
+#endif
+ if (FDCS->rawcmd == 2)
+ reset_fdc_info(1);
+ if (fd_inb(FD_STATUS) != STATUS_READY)
+ FDCS->reset = 1;
+}
+
+/* locks the driver */
+static int lock_fdc(int drive, int interruptible)
+{
+ unsigned long flags;
+
+ if (!usage_count){
+ printk(KERN_ERR "trying to lock fdc while usage count=0\n");
+ return -1;
+ }
+ if(floppy_grab_irq_and_dma()==-1)
+ return -EBUSY;
+ INT_OFF;
+ while (fdc_busy && NO_SIGNAL)
+ interruptible_sleep_on(&fdc_wait);
+ if (fdc_busy){
+ INT_ON;
+ return -EINTR;
+ }
+ fdc_busy = 1;
+ INT_ON;
+ command_status = FD_COMMAND_NONE;
+ reschedule_timeout(drive, "lock fdc", 0);
+ set_fdc(drive);
+ return 0;
+}
+
+#define LOCK_FDC(drive,interruptible) \
+if (lock_fdc(drive,interruptible)) return -EINTR;
+
+
+/* unlocks the driver */
+static inline void unlock_fdc(void)
+{
+ raw_cmd = 0;
+ if (!fdc_busy)
+ DPRINT("FDC access conflict!\n");
+
+ if (DEVICE_INTR)
+ DPRINT("device interrupt still active at FDC release: %p!\n",
+ DEVICE_INTR);
+ command_status = FD_COMMAND_NONE;
+ del_timer(&fd_timeout);
+ cont = NULL;
+ fdc_busy = 0;
+ floppy_release_irq_and_dma();
+ wake_up(&fdc_wait);
+}
+
+/* switches the motor off after a given timeout */
+static void motor_off_callback(unsigned long nr)
+{
+ unsigned char mask = ~(0x10 << UNIT(nr));
+
+ set_dor(FDC(nr), mask, 0);
+}
+
+static struct timer_list motor_off_timer[N_DRIVE] = {
+ { NULL, NULL, 0, 0, motor_off_callback },
+ { NULL, NULL, 0, 1, motor_off_callback },
+ { NULL, NULL, 0, 2, motor_off_callback },
+ { NULL, NULL, 0, 3, motor_off_callback },
+ { NULL, NULL, 0, 4, motor_off_callback },
+ { NULL, NULL, 0, 5, motor_off_callback },
+ { NULL, NULL, 0, 6, motor_off_callback },
+ { NULL, NULL, 0, 7, motor_off_callback }
+};
+
+/* schedules motor off */
+static void floppy_off(unsigned int drive)
+{
+ unsigned long volatile delta;
+ register int fdc=FDC(drive);
+
+ if (!(FDCS->dor & (0x10 << UNIT(drive))))
+ return;
+
+ del_timer(motor_off_timer+drive);
+
+ /* make spindle stop in a position which minimizes spinup time
+ * next time */
+ if (UDP->rps){
+ delta = jiffies - UDRS->first_read_date + HZ -
+ UDP->spindown_offset;
+ delta = ((delta * UDP->rps) % HZ) / UDP->rps;
+ motor_off_timer[drive].expires = jiffies + UDP->spindown - delta;
+ }
+ add_timer(motor_off_timer+drive);
+}
+
+/*
+ * cycle through all N_DRIVE floppy drives, for disk change testing.
+ * stopping at current drive. This is done before any long operation, to
+ * be sure to have up to date disk change information.
+ */
+static void scandrives(void)
+{
+ int i, drive, saved_drive;
+
+ if (DP->select_delay)
+ return;
+
+ saved_drive = current_drive;
+ for (i=0; i < N_DRIVE; i++){
+ drive = (saved_drive + i + 1) % N_DRIVE;
+ if (UDRS->fd_ref == 0 || UDP->select_delay != 0)
+ continue; /* skip closed drives */
+ set_fdc(drive);
+ if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
+ (0x10 << UNIT(drive))))
+ /* switch the motor off again, if it was off to
+ * begin with */
+ set_dor(fdc, ~(0x10 << UNIT(drive)), 0);
+ }
+ set_fdc(saved_drive);
+}
+
+static void empty(void)
+{
+}
+
+static struct tq_struct floppy_tq =
+{ 0, 0, 0, 0 };
+
+static struct timer_list fd_timer ={ NULL, NULL, 0, 0, 0 };
+
+static void cancel_activity(void)
+{
+ CLEAR_INTR;
+ floppy_tq.routine = (void *)(void *) empty;
+ del_timer(&fd_timer);
+}
+
+/* this function makes sure that the disk stays in the drive during the
+ * transfer */
+static void fd_watchdog(void)
+{
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from watchdog\n");
+ }
+#endif
+
+ if (disk_change(current_drive)){
+ DPRINT("disk removed during i/o\n");
+ cancel_activity();
+ cont->done(0);
+ reset_fdc();
+ } else {
+ del_timer(&fd_timer);
+ fd_timer.function = (timeout_fn) fd_watchdog;
+ fd_timer.expires = jiffies + HZ / 10;
+ add_timer(&fd_timer);
+ }
+}
+
+static void main_command_interrupt(void)
+{
+ del_timer(&fd_timer);
+ cont->interrupt();
+}
+
+/* waits for a delay (spinup or select) to pass */
+static int wait_for_completion(int delay, timeout_fn function)
+{
+ if (FDCS->reset){
+ reset_fdc(); /* do the reset during sleep to win time
+ * if we don't need to sleep, it's a good
+ * occasion anyways */
+ return 1;
+ }
+
+ if ((signed) (jiffies - delay) < 0){
+ del_timer(&fd_timer);
+ fd_timer.function = function;
+ fd_timer.expires = delay;
+ add_timer(&fd_timer);
+ return 1;
+ }
+ return 0;
+}
+
+static int hlt_disabled=0;
+static void floppy_disable_hlt(void)
+{
+ unsigned long flags;
+
+ INT_OFF;
+ if (!hlt_disabled){
+ hlt_disabled=1;
+#ifdef HAVE_DISABLE_HLT
+ disable_hlt();
+#endif
+ }
+ INT_ON;
+}
+
+static void floppy_enable_hlt(void)
+{
+ unsigned long flags;
+
+ INT_OFF;
+ if (hlt_disabled){
+ hlt_disabled=0;
+#ifdef HAVE_DISABLE_HLT
+ enable_hlt();
+#endif
+ }
+ INT_ON;
+}
+
+
+static void setup_DMA(void)
+{
+ unsigned long flags;
+
+#ifdef FLOPPY_SANITY_CHECK
+ if (raw_cmd->length == 0){
+ int i;
+
+ printk("zero dma transfer size:");
+ for (i=0; i < raw_cmd->cmd_count; i++)
+ printk("%x,", raw_cmd->cmd[i]);
+ printk("\n");
+ cont->done(0);
+ FDCS->reset = 1;
+ return;
+ }
+ if ((long) raw_cmd->kernel_data % 512){
+ printk("non aligned address: %p\n", raw_cmd->kernel_data);
+ cont->done(0);
+ FDCS->reset=1;
+ return;
+ }
+ if (CROSS_64KB(raw_cmd->kernel_data, raw_cmd->length)) {
+ printk("DMA crossing 64-K boundary %p-%p\n",
+ raw_cmd->kernel_data,
+ raw_cmd->kernel_data + raw_cmd->length);
+ cont->done(0);
+ FDCS->reset=1;
+ return;
+ }
+#endif
+ INT_OFF;
+ fd_disable_dma();
+ fd_clear_dma_ff();
+ fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length);
+ fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ)?
+ DMA_MODE_READ : DMA_MODE_WRITE);
+ fd_set_dma_addr(virt_to_bus(raw_cmd->kernel_data));
+ fd_set_dma_count(raw_cmd->length);
+ virtual_dma_port = FDCS->address;
+ fd_enable_dma();
+ INT_ON;
+ floppy_disable_hlt();
+}
+
+void show_floppy(void);
+
+/* waits until the fdc becomes ready */
+static int wait_til_ready(void)
+{
+ int counter, status;
+ if(FDCS->reset)
+ return -1;
+ for (counter = 0; counter < 10000; counter++) {
+ status = fd_inb(FD_STATUS);
+ if (status & STATUS_READY)
+ return status;
+ }
+ if (!initialising) {
+ DPRINT("Getstatus times out (%x) on fdc %d\n",
+ status, fdc);
+ show_floppy();
+ }
+ FDCS->reset = 1;
+ return -1;
+}
+
+/* sends a command byte to the fdc */
+static int output_byte(char byte)
+{
+ int status;
+
+ if ((status = wait_til_ready()) < 0)
+ return -1;
+ if ((status & (STATUS_READY|STATUS_DIR|STATUS_DMA)) == STATUS_READY){
+ fd_outb(byte,FD_DATA);
+#ifdef FLOPPY_SANITY_CHECK
+ output_log[output_log_pos].data = byte;
+ output_log[output_log_pos].status = status;
+ output_log[output_log_pos].jiffies = jiffies;
+ output_log_pos = (output_log_pos + 1) % OLOGSIZE;
+#endif
+ return 0;
+ }
+ FDCS->reset = 1;
+ if (!initialising) {
+ DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
+ byte, fdc, status);
+ show_floppy();
+ }
+ return -1;
+}
+#define LAST_OUT(x) if (output_byte(x)<0){ reset_fdc();return;}
+
+/* gets the response from the fdc */
+static int result(void)
+{
+ int i, status;
+
+ for(i=0; i < MAX_REPLIES; i++) {
+ if ((status = wait_til_ready()) < 0)
+ break;
+ status &= STATUS_DIR|STATUS_READY|STATUS_BUSY|STATUS_DMA;
+ if ((status & ~STATUS_BUSY) == STATUS_READY){
+#ifdef FLOPPY_SANITY_CHECK
+ resultjiffies = jiffies;
+ resultsize = i;
+#endif
+ return i;
+ }
+ if (status == (STATUS_DIR|STATUS_READY|STATUS_BUSY))
+ reply_buffer[i] = fd_inb(FD_DATA);
+ else
+ break;
+ }
+ if(!initialising) {
+ DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
+ fdc, status, i);
+ show_floppy();
+ }
+ FDCS->reset = 1;
+ return -1;
+}
+
+#define MORE_OUTPUT -2
+/* does the fdc need more output? */
+static int need_more_output(void)
+{
+ int status;
+ if( (status = wait_til_ready()) < 0)
+ return -1;
+ if ((status & (STATUS_READY|STATUS_DIR|STATUS_DMA)) == STATUS_READY)
+ return MORE_OUTPUT;
+ return result();
+}
+
+/* Set perpendicular mode as required, based on data rate, if supported.
+ * 82077 Now tested. 1Mbps data rate only possible with 82077-1.
+ */
+static inline void perpendicular_mode(void)
+{
+ unsigned char perp_mode;
+
+ if (raw_cmd->rate & 0x40){
+ switch(raw_cmd->rate & 3){
+ case 0:
+ perp_mode=2;
+ break;
+ case 3:
+ perp_mode=3;
+ break;
+ default:
+ DPRINT("Invalid data rate for perpendicular mode!\n");
+ cont->done(0);
+ FDCS->reset = 1; /* convenient way to return to
+ * redo without to much hassle (deep
+ * stack et al. */
+ return;
+ }
+ } else
+ perp_mode = 0;
+
+ if (FDCS->perp_mode == perp_mode)
+ return;
+ if (FDCS->version >= FDC_82077_ORIG) {
+ output_byte(FD_PERPENDICULAR);
+ output_byte(perp_mode);
+ FDCS->perp_mode = perp_mode;
+ } else if (perp_mode) {
+ DPRINT("perpendicular mode not supported by this FDC.\n");
+ }
+} /* perpendicular_mode */
+
+static int fifo_depth = 0xa;
+static int no_fifo = 0;
+
+static int fdc_configure(void)
+{
+ /* Turn on FIFO */
+ output_byte(FD_CONFIGURE);
+ if(need_more_output() != MORE_OUTPUT)
+ return 0;
+ output_byte(0);
+ output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
+ output_byte(0); /* pre-compensation from track
+ 0 upwards */
+ return 1;
+}
+
+#define NOMINAL_DTR 500
+
+/* Issue a "SPECIFY" command to set the step rate time, head unload time,
+ * head load time, and DMA disable flag to values needed by floppy.
+ *
+ * The value "dtr" is the data transfer rate in Kbps. It is needed
+ * to account for the data rate-based scaling done by the 82072 and 82077
+ * FDC types. This parameter is ignored for other types of FDCs (i.e.
+ * 8272a).
+ *
+ * Note that changing the data transfer rate has a (probably deleterious)
+ * effect on the parameters subject to scaling for 82072/82077 FDCs, so
+ * fdc_specify is called again after each data transfer rate
+ * change.
+ *
+ * srt: 1000 to 16000 in microseconds
+ * hut: 16 to 240 milliseconds
+ * hlt: 2 to 254 milliseconds
+ *
+ * These values are rounded up to the next highest available delay time.
+ */
+static void fdc_specify(void)
+{
+ unsigned char spec1, spec2;
+ int srt, hlt, hut;
+ unsigned long dtr = NOMINAL_DTR;
+ unsigned long scale_dtr = NOMINAL_DTR;
+ int hlt_max_code = 0x7f;
+ int hut_max_code = 0xf;
+
+ if (FDCS->need_configure && FDCS->version >= FDC_82072A) {
+ fdc_configure();
+ FDCS->need_configure = 0;
+ /*DPRINT("FIFO enabled\n");*/
+ }
+
+ switch (raw_cmd->rate & 0x03) {
+ case 3:
+ dtr = 1000;
+ break;
+ case 1:
+ dtr = 300;
+ if (FDCS->version >= FDC_82078) {
+ /* chose the default rate table, not the one
+ * where 1 = 2 Mbps */
+ output_byte(FD_DRIVESPEC);
+ if(need_more_output() == MORE_OUTPUT) {
+ output_byte(UNIT(current_drive));
+ output_byte(0xc0);
+ }
+ }
+ break;
+ case 2:
+ dtr = 250;
+ break;
+ }
+
+ if (FDCS->version >= FDC_82072) {
+ scale_dtr = dtr;
+ hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
+ hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
+ }
+
+ /* Convert step rate from microseconds to milliseconds and 4 bits */
+ srt = 16 - (DP->srt*scale_dtr/1000 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ SUPBOUND(srt, 0xf);
+ INFBOUND(srt, 0);
+
+ hlt = (DP->hlt*scale_dtr/2 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ if (hlt < 0x01)
+ hlt = 0x01;
+ else if (hlt > 0x7f)
+ hlt = hlt_max_code;
+
+ hut = (DP->hut*scale_dtr/16 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ if (hut < 0x1)
+ hut = 0x1;
+ else if (hut > 0xf)
+ hut = hut_max_code;
+
+ spec1 = (srt << 4) | hut;
+ spec2 = (hlt << 1) | (use_virtual_dma & 1);
+
+ /* If these parameters did not change, just return with success */
+ if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) {
+ /* Go ahead and set spec1 and spec2 */
+ output_byte(FD_SPECIFY);
+ output_byte(FDCS->spec1 = spec1);
+ output_byte(FDCS->spec2 = spec2);
+ }
+} /* fdc_specify */
+
+/* Set the FDC's data transfer rate on behalf of the specified drive.
+ * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue
+ * of the specify command (i.e. using the fdc_specify function).
+ */
+static int fdc_dtr(void)
+{
+ /* If data rate not already set to desired value, set it. */
+ if ((raw_cmd->rate & 3) == FDCS->dtr)
+ return 0;
+
+ /* Set dtr */
+ fd_outb(raw_cmd->rate & 3, FD_DCR);
+
+ /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
+ * need a stabilization period of several milliseconds to be
+ * enforced after data rate changes before R/W operations.
+ * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
+ */
+ FDCS->dtr = raw_cmd->rate & 3;
+ return(wait_for_completion(jiffies+2*HZ/100,
+ (timeout_fn) floppy_ready));
+} /* fdc_dtr */
+
+static void tell_sector(void)
+{
+ printk(": track %d, head %d, sector %d, size %d",
+ R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE);
+} /* tell_sector */
+
+
+/*
+ * OK, this error interpreting routine is called after a
+ * DMA read/write has succeeded
+ * or failed, so we check the results, and copy any buffers.
+ * hhb: Added better error reporting.
+ * ak: Made this into a separate routine.
+ */
+static int interpret_errors(void)
+{
+ char bad;
+
+ if (inr!=7) {
+ DPRINT("-- FDC reply error");
+ FDCS->reset = 1;
+ return 1;
+ }
+
+ /* check IC to find cause of interrupt */
+ switch (ST0 & ST0_INTR) {
+ case 0x40: /* error occurred during command execution */
+ if (ST1 & ST1_EOC)
+ return 0; /* occurs with pseudo-DMA */
+ bad = 1;
+ if (ST1 & ST1_WP) {
+ DPRINT("Drive is write protected\n");
+ CLEARF(FD_DISK_WRITABLE);
+ cont->done(0);
+ bad = 2;
+ } else if (ST1 & ST1_ND) {
+ SETF(FD_NEED_TWADDLE);
+ } else if (ST1 & ST1_OR) {
+ if (DP->flags & FTD_MSG)
+ DPRINT("Over/Underrun - retrying\n");
+ bad = 0;
+ }else if (*errors >= DP->max_errors.reporting){
+ DPRINT("");
+ if (ST0 & ST0_ECE) {
+ printk("Recalibrate failed!");
+ } else if (ST2 & ST2_CRC) {
+ printk("data CRC error");
+ tell_sector();
+ } else if (ST1 & ST1_CRC) {
+ printk("CRC error");
+ tell_sector();
+ } else if ((ST1 & (ST1_MAM|ST1_ND)) || (ST2 & ST2_MAM)) {
+ if (!probing) {
+ printk("sector not found");
+ tell_sector();
+ } else
+ printk("probe failed...");
+ } else if (ST2 & ST2_WC) { /* seek error */
+ printk("wrong cylinder");
+ } else if (ST2 & ST2_BC) { /* cylinder marked as bad */
+ printk("bad cylinder");
+ } else {
+ printk("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x", ST0, ST1, ST2);
+ tell_sector();
+ }
+ printk("\n");
+
+ }
+ if (ST2 & ST2_WC || ST2 & ST2_BC)
+ /* wrong cylinder => recal */
+ DRS->track = NEED_2_RECAL;
+ return bad;
+ case 0x80: /* invalid command given */
+ DPRINT("Invalid FDC command given!\n");
+ cont->done(0);
+ return 2;
+ case 0xc0:
+ DPRINT("Abnormal termination caused by polling\n");
+ cont->error();
+ return 2;
+ default: /* (0) Normal command termination */
+ return 0;
+ }
+}
+
+/*
+ * This routine is called when everything should be correctly set up
+ * for the transfer (i.e. floppy motor is on, the correct floppy is
+ * selected, and the head is sitting on the right track).
+ */
+static void setup_rw_floppy(void)
+{
+ int i, ready_date, r, flags;
+ timeout_fn function;
+
+ flags = raw_cmd->flags;
+ if (flags & (FD_RAW_READ | FD_RAW_WRITE))
+ flags |= FD_RAW_INTR;
+
+ if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)){
+ ready_date = DRS->spinup_date + DP->spinup;
+ /* If spinup will take a long time, rerun scandrives
+ * again just before spinup completion. Beware that
+ * after scandrives, we must again wait for selection.
+ */
+ if ((signed) (ready_date - jiffies) > DP->select_delay){
+ ready_date -= DP->select_delay;
+ function = (timeout_fn) floppy_start;
+ } else
+ function = (timeout_fn) setup_rw_floppy;
+
+ /* wait until the floppy is spinning fast enough */
+ if (wait_for_completion(ready_date,function))
+ return;
+ }
+
+ if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
+ setup_DMA();
+
+ if (flags & FD_RAW_INTR)
+ SET_INTR(main_command_interrupt);
+
+ r=0;
+ for (i=0; i< raw_cmd->cmd_count; i++)
+ r|=output_byte(raw_cmd->cmd[i]);
+
+#ifdef DEBUGT
+ debugt("rw_command: ");
+#endif
+ if (r){
+ cont->error();
+ reset_fdc();
+ return;
+ }
+
+ if (!(flags & FD_RAW_INTR)){
+ inr = result();
+ cont->interrupt();
+ } else if (flags & FD_RAW_NEED_DISK)
+ fd_watchdog();
+}
+
+static int blind_seek;
+
+/*
+ * This is the routine called after every seek (or recalibrate) interrupt
+ * from the floppy controller.
+ */
+static void seek_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("seek interrupt:");
+#endif
+ if (inr != 2 || (ST0 & 0xF8) != 0x20) {
+ DPRINT("seek failed\n");
+ DRS->track = NEED_2_RECAL;
+ cont->error();
+ cont->redo();
+ return;
+ }
+ if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek){
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("clearing NEWCHANGE flag because of effective seek\n");
+ DPRINT("jiffies=%ld\n", jiffies);
+ }
+#endif
+ CLEARF(FD_DISK_NEWCHANGE); /* effective seek */
+ DRS->select_date = jiffies;
+ }
+ DRS->track = ST1;
+ floppy_ready();
+}
+
+static void check_wp(void)
+{
+ if (TESTF(FD_VERIFY)) {
+ /* check write protection */
+ output_byte(FD_GETSTATUS);
+ output_byte(UNIT(current_drive));
+ if (result() != 1){
+ FDCS->reset = 1;
+ return;
+ }
+ CLEARF(FD_VERIFY);
+ CLEARF(FD_NEED_TWADDLE);
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("checking whether disk is write protected\n");
+ DPRINT("wp=%x\n",ST3 & 0x40);
+ }
+#endif
+ if (!(ST3 & 0x40))
+ SETF(FD_DISK_WRITABLE);
+ else
+ CLEARF(FD_DISK_WRITABLE);
+ }
+}
+
+static void seek_floppy(void)
+{
+ int track;
+
+ blind_seek=0;
+
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from seek\n");
+ }
+#endif
+
+ if (!TESTF(FD_DISK_NEWCHANGE) &&
+ disk_change(current_drive) &&
+ (raw_cmd->flags & FD_RAW_NEED_DISK)){
+ /* the media changed flag should be cleared after the seek.
+ * If it isn't, this means that there is really no disk in
+ * the drive.
+ */
+ SETF(FD_DISK_CHANGED);
+ cont->done(0);
+ cont->redo();
+ return;
+ }
+ if (DRS->track <= NEED_1_RECAL){
+ recalibrate_floppy();
+ return;
+ } else if (TESTF(FD_DISK_NEWCHANGE) &&
+ (raw_cmd->flags & FD_RAW_NEED_DISK) &&
+ (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) {
+ /* we seek to clear the media-changed condition. Does anybody
+ * know a more elegant way, which works on all drives? */
+ if (raw_cmd->track)
+ track = raw_cmd->track - 1;
+ else {
+ if (DP->flags & FD_SILENT_DCL_CLEAR){
+ set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0);
+ blind_seek = 1;
+ raw_cmd->flags |= FD_RAW_NEED_SEEK;
+ }
+ track = 1;
+ }
+ } else {
+ check_wp();
+ if (raw_cmd->track != DRS->track &&
+ (raw_cmd->flags & FD_RAW_NEED_SEEK))
+ track = raw_cmd->track;
+ else {
+ setup_rw_floppy();
+ return;
+ }
+ }
+
+ SET_INTR(seek_interrupt);
+ output_byte(FD_SEEK);
+ output_byte(UNIT(current_drive));
+ LAST_OUT(track);
+#ifdef DEBUGT
+ debugt("seek command:");
+#endif
+}
+
+static void recal_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("recal interrupt:");
+#endif
+ if (inr !=2)
+ FDCS->reset = 1;
+ else if (ST0 & ST0_ECE) {
+ switch(DRS->track){
+ case NEED_1_RECAL:
+#ifdef DEBUGT
+ debugt("recal interrupt need 1 recal:");
+#endif
+ /* after a second recalibrate, we still haven't
+ * reached track 0. Probably no drive. Raise an
+ * error, as failing immediately might upset
+ * computers possessed by the Devil :-) */
+ cont->error();
+ cont->redo();
+ return;
+ case NEED_2_RECAL:
+#ifdef DEBUGT
+ debugt("recal interrupt need 2 recal:");
+#endif
+ /* If we already did a recalibrate,
+ * and we are not at track 0, this
+ * means we have moved. (The only way
+ * not to move at recalibration is to
+ * be already at track 0.) Clear the
+ * new change flag */
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("clearing NEWCHANGE flag because of second recalibrate\n");
+ }
+#endif
+
+ CLEARF(FD_DISK_NEWCHANGE);
+ DRS->select_date = jiffies;
+ /* fall through */
+ default:
+#ifdef DEBUGT
+ debugt("recal interrupt default:");
+#endif
+ /* Recalibrate moves the head by at
+ * most 80 steps. If after one
+ * recalibrate we don't have reached
+ * track 0, this might mean that we
+ * started beyond track 80. Try
+ * again. */
+ DRS->track = NEED_1_RECAL;
+ break;
+ }
+ } else
+ DRS->track = ST1;
+ floppy_ready();
+}
+
+static void print_result(char *message, int inr)
+{
+ int i;
+
+ DPRINT("%s ", message);
+ if (inr >= 0)
+ for (i=0; i<inr; i++)
+ printk("repl[%d]=%x ", i, reply_buffer[i]);
+ printk("\n");
+}
+
+/* interrupt handler */
+void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ void (*handler)(void) = DEVICE_INTR;
+ int do_print;
+
+ lasthandler = handler;
+ interruptjiffies = jiffies;
+
+ fd_disable_dma();
+ floppy_enable_hlt();
+ CLEAR_INTR;
+ if (fdc >= N_FDC || FDCS->address == -1){
+ /* we don't even know which FDC is the culprit */
+ printk("DOR0=%x\n", fdc_state[0].dor);
+ printk("floppy interrupt on bizarre fdc %d\n",fdc);
+ printk("handler=%p\n", handler);
+ is_alive("bizarre fdc");
+ return;
+ }
+
+ FDCS->reset = 0;
+ /* We have to clear the reset flag here, because apparently on boxes
+ * with level triggered interrupts (PS/2, Sparc, ...), it is needed to
+ * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the
+ * emission of the SENSEI's.
+ * It is OK to emit floppy commands because we are in an interrupt
+ * handler here, and thus we have to fear no interference of other
+ * activity.
+ */
+
+ do_print = !handler && print_unex && !initialising;
+
+ inr = result();
+ if(do_print)
+ print_result("unexpected interrupt", inr);
+ if (inr == 0){
+ int max_sensei = 4;
+ do {
+ output_byte(FD_SENSEI);
+ inr = result();
+ if(do_print)
+ print_result("sensei", inr);
+ max_sensei--;
+ } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2 && max_sensei);
+ }
+ if (handler) {
+ if(intr_count >= 2) {
+ /* expected interrupt */
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task_irq(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ } else
+ handler();
+ } else
+ FDCS->reset = 1;
+ is_alive("normal interrupt end");
+}
+
+static void recalibrate_floppy(void)
+{
+#ifdef DEBUGT
+ debugt("recalibrate floppy:");
+#endif
+ SET_INTR(recal_interrupt);
+ output_byte(FD_RECALIBRATE);
+ LAST_OUT(UNIT(current_drive));
+}
+
+/*
+ * Must do 4 FD_SENSEIs after reset because of ``drive polling''.
+ */
+static void reset_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("reset interrupt:");
+#endif
+ result(); /* get the status ready for set_fdc */
+ if (FDCS->reset) {
+ printk("reset set in interrupt, calling %p\n", cont->error);
+ cont->error(); /* a reset just after a reset. BAD! */
+ }
+ cont->redo();
+}
+
+/*
+ * reset is done by pulling bit 2 of DOR low for a while (old FDCs),
+ * or by setting the self clearing bit 7 of STATUS (newer FDCs)
+ */
+static void reset_fdc(void)
+{
+ SET_INTR(reset_interrupt);
+ FDCS->reset = 0;
+ reset_fdc_info(0);
+
+ /* Pseudo-DMA may intercept 'reset finished' interrupt. */
+ /* Irrelevant for systems with true DMA (i386). */
+ fd_disable_dma();
+
+ if (FDCS->version >= FDC_82072A)
+ fd_outb(0x80 | (FDCS->dtr &3), FD_STATUS);
+ else {
+ fd_outb(FDCS->dor & ~0x04, FD_DOR);
+ udelay(FD_RESET_DELAY);
+ fd_outb(FDCS->dor, FD_DOR);
+ }
+}
+
+void show_floppy(void)
+{
+ int i;
+
+ printk("\n");
+ printk("floppy driver state\n");
+ printk("-------------------\n");
+ printk("now=%ld last interrupt=%d last called handler=%p\n",
+ jiffies, interruptjiffies, lasthandler);
+
+
+#ifdef FLOPPY_SANITY_CHECK
+ printk("timeout_message=%s\n", timeout_message);
+ printk("last output bytes:\n");
+ for (i=0; i < OLOGSIZE; i++)
+ printk("%2x %2x %ld\n",
+ output_log[(i+output_log_pos) % OLOGSIZE].data,
+ output_log[(i+output_log_pos) % OLOGSIZE].status,
+ output_log[(i+output_log_pos) % OLOGSIZE].jiffies);
+ printk("last result at %d\n", resultjiffies);
+ printk("last redo_fd_request at %d\n", lastredo);
+ for (i=0; i<resultsize; i++){
+ printk("%2x ", reply_buffer[i]);
+ }
+ printk("\n");
+#endif
+
+ printk("status=%x\n", fd_inb(FD_STATUS));
+ printk("fdc_busy=%d\n", fdc_busy);
+ if (DEVICE_INTR)
+ printk("DEVICE_INTR=%p\n", DEVICE_INTR);
+ if (floppy_tq.sync)
+ printk("floppy_tq.routine=%p\n", floppy_tq.routine);
+ if (fd_timer.prev)
+ printk("fd_timer.function=%p\n", fd_timer.function);
+ if (fd_timeout.prev){
+ printk("timer_table=%p\n",fd_timeout.function);
+ printk("expires=%ld\n",fd_timeout.expires-jiffies);
+ printk("now=%ld\n",jiffies);
+ }
+ printk("cont=%p\n", cont);
+ printk("CURRENT=%p\n", CURRENT);
+ printk("command_status=%d\n", command_status);
+ printk("\n");
+}
+
+static void floppy_shutdown(void)
+{
+ if (!initialising)
+ show_floppy();
+ cancel_activity();
+ sti();
+
+ floppy_enable_hlt();
+ fd_disable_dma();
+ /* avoid dma going to a random drive after shutdown */
+
+ if (!initialising)
+ DPRINT("floppy timeout called\n");
+ FDCS->reset = 1;
+ if (cont){
+ cont->done(0);
+ cont->redo(); /* this will recall reset when needed */
+ } else {
+ printk("no cont in shutdown!\n");
+ process_fd_request();
+ }
+ is_alive("floppy shutdown");
+}
+/*typedef void (*timeout_fn)(unsigned long);*/
+
+/* start motor, check media-changed condition and write protection */
+static int start_motor(void (*function)(void) )
+{
+ int mask, data;
+
+ mask = 0xfc;
+ data = UNIT(current_drive);
+ if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)){
+ if (!(FDCS->dor & (0x10 << UNIT(current_drive)))){
+ set_debugt();
+ /* no read since this drive is running */
+ DRS->first_read_date = 0;
+ /* note motor start time if motor is not yet running */
+ DRS->spinup_date = jiffies;
+ data |= (0x10 << UNIT(current_drive));
+ }
+ } else
+ if (FDCS->dor & (0x10 << UNIT(current_drive)))
+ mask &= ~(0x10 << UNIT(current_drive));
+
+ /* starts motor and selects floppy */
+ del_timer(motor_off_timer + current_drive);
+ set_dor(fdc, mask, data);
+
+ /* wait_for_completion also schedules reset if needed. */
+ return(wait_for_completion(DRS->select_date+DP->select_delay,
+ (timeout_fn) function));
+}
+
+static void floppy_ready(void)
+{
+ CHECK_RESET;
+ if (start_motor(floppy_ready)) return;
+ if (fdc_dtr()) return;
+
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from floppy_ready\n");
+ }
+#endif
+
+ if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
+ disk_change(current_drive) &&
+ !DP->select_delay)
+ twaddle(); /* this clears the dcl on certain drive/controller
+ * combinations */
+
+ if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)){
+ perpendicular_mode();
+ fdc_specify(); /* must be done here because of hut, hlt ... */
+ seek_floppy();
+ } else
+ setup_rw_floppy();
+}
+
+static void floppy_start(void)
+{
+ reschedule_timeout(CURRENTD, "floppy start", 0);
+
+ scandrives();
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("setting NEWCHANGE in floppy_start\n");
+ }
+#endif
+ SETF(FD_DISK_NEWCHANGE);
+ floppy_ready();
+}
+
+/*
+ * ========================================================================
+ * here ends the bottom half. Exported routines are:
+ * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc,
+ * start_motor, reset_fdc, reset_fdc_info, interpret_errors.
+ * Initialization also uses output_byte, result, set_dor, floppy_interrupt
+ * and set_dor.
+ * ========================================================================
+ */
+/*
+ * General purpose continuations.
+ * ==============================
+ */
+
+static void do_wakeup(void)
+{
+ reschedule_timeout(MAXTIMEOUT, "do wakeup", 0);
+ cont = 0;
+ command_status += 2;
+ wake_up(&command_done);
+}
+
+static struct cont_t wakeup_cont={
+ empty,
+ do_wakeup,
+ empty,
+ (done_f)empty
+};
+
+
+static struct cont_t intr_cont={
+ empty,
+ process_fd_request,
+ empty,
+ (done_f) empty
+};
+
+static int wait_til_done(void (*handler)(void), int interruptible)
+{
+ int ret;
+ unsigned long flags;
+
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ INT_OFF;
+ while(command_status < 2 && NO_SIGNAL){
+ is_alive("wait_til_done");
+ if (interruptible)
+ interruptible_sleep_on(&command_done);
+ else
+ sleep_on(&command_done);
+ }
+ if (command_status < 2){
+ cancel_activity();
+ cont = &intr_cont;
+ reset_fdc();
+ INT_ON;
+ return -EINTR;
+ }
+ INT_ON;
+
+ if (FDCS->reset)
+ command_status = FD_COMMAND_ERROR;
+ if (command_status == FD_COMMAND_OKAY)
+ ret=0;
+ else
+ ret=-EIO;
+ command_status = FD_COMMAND_NONE;
+ return ret;
+}
+
+static void generic_done(int result)
+{
+ command_status = result;
+ cont = &wakeup_cont;
+}
+
+static void generic_success(void)
+{
+ cont->done(1);
+}
+
+static void generic_failure(void)
+{
+ cont->done(0);
+}
+
+static void success_and_wakeup(void)
+{
+ generic_success();
+ cont->redo();
+}
+
+
+/*
+ * formatting and rw support.
+ * ==========================
+ */
+
+static int next_valid_format(void)
+{
+ int probed_format;
+
+ probed_format = DRS->probed_format;
+ while(1){
+ if (probed_format >= 8 ||
+ !DP->autodetect[probed_format]){
+ DRS->probed_format = 0;
+ return 1;
+ }
+ if (floppy_type[DP->autodetect[probed_format]].sect){
+ DRS->probed_format = probed_format;
+ return 0;
+ }
+ probed_format++;
+ }
+}
+
+static void bad_flp_intr(void)
+{
+ if (probing){
+ DRS->probed_format++;
+ if (!next_valid_format())
+ return;
+ }
+ (*errors)++;
+ INFBOUND(DRWE->badness, *errors);
+ if (*errors > DP->max_errors.abort)
+ cont->done(0);
+ if (*errors > DP->max_errors.reset)
+ FDCS->reset = 1;
+ else if (*errors > DP->max_errors.recal)
+ DRS->track = NEED_2_RECAL;
+}
+
+static void set_floppy(kdev_t device)
+{
+ if (TYPE(device))
+ _floppy = TYPE(device) + floppy_type;
+ else
+ _floppy = current_type[ DRIVE(device) ];
+}
+
+/*
+ * formatting support.
+ * ===================
+ */
+static void format_interrupt(void)
+{
+ switch (interpret_errors()){
+ case 1:
+ cont->error();
+ case 2:
+ break;
+ case 0:
+ cont->done(1);
+ }
+ cont->redo();
+}
+
+#define CODE2SIZE (ssize = ((1 << SIZECODE) + 3) >> 2)
+#define FM_MODE(x,y) ((y) & ~(((x)->rate & 0x80) >>1))
+#define CT(x) ((x) | 0x40)
+static void setup_format_params(int track)
+{
+ struct fparm {
+ unsigned char track,head,sect,size;
+ } *here = (struct fparm *)floppy_track_buffer;
+ int il,n;
+ int count,head_shift,track_shift;
+
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->track = track;
+
+ raw_cmd->flags = FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN |
+ FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
+ raw_cmd->rate = _floppy->rate & 0x43;
+ raw_cmd->cmd_count = NR_F;
+ COMMAND = FM_MODE(_floppy,FD_FORMAT);
+ DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy,format_req.head);
+ F_SIZECODE = FD_SIZECODE(_floppy);
+ F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE;
+ F_GAP = _floppy->fmt_gap;
+ F_FILL = FD_FILL_BYTE;
+
+ raw_cmd->kernel_data = floppy_track_buffer;
+ raw_cmd->length = 4 * F_SECT_PER_TRACK;
+
+ /* allow for about 30ms for data transport per track */
+ head_shift = (F_SECT_PER_TRACK + 5) / 6;
+
+ /* a ``cylinder'' is two tracks plus a little stepping time */
+ track_shift = 2 * head_shift + 3;
+
+ /* position of logical sector 1 on this track */
+ n = (track_shift * format_req.track + head_shift * format_req.head)
+ % F_SECT_PER_TRACK;
+
+ /* determine interleave */
+ il = 1;
+ if (_floppy->fmt_gap < 0x22)
+ il++;
+
+ /* initialize field */
+ for (count = 0; count < F_SECT_PER_TRACK; ++count) {
+ here[count].track = format_req.track;
+ here[count].head = format_req.head;
+ here[count].sect = 0;
+ here[count].size = F_SIZECODE;
+ }
+ /* place logical sectors */
+ for (count = 1; count <= F_SECT_PER_TRACK; ++count) {
+ here[n].sect = count;
+ n = (n+il) % F_SECT_PER_TRACK;
+ if (here[n].sect) { /* sector busy, find next free sector */
+ ++n;
+ if (n>= F_SECT_PER_TRACK) {
+ n-=F_SECT_PER_TRACK;
+ while (here[n].sect) ++n;
+ }
+ }
+ }
+}
+
+static void redo_format(void)
+{
+ buffer_track = -1;
+ setup_format_params(format_req.track << STRETCH(_floppy));
+ floppy_start();
+#ifdef DEBUGT
+ debugt("queue format request");
+#endif
+}
+
+static struct cont_t format_cont={
+ format_interrupt,
+ redo_format,
+ bad_flp_intr,
+ generic_done };
+
+static int do_format(kdev_t device, struct format_descr *tmp_format_req)
+{
+ int ret;
+ int drive=DRIVE(device);
+
+ LOCK_FDC(drive,1);
+ set_floppy(device);
+ if (!_floppy ||
+ _floppy->track > DP->tracks ||
+ tmp_format_req->track >= _floppy->track ||
+ tmp_format_req->head >= _floppy->head ||
+ (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) ||
+ !_floppy->fmt_gap) {
+ process_fd_request();
+ return -EINVAL;
+ }
+ format_req = *tmp_format_req;
+ format_errors = 0;
+ cont = &format_cont;
+ errors = &format_errors;
+ IWAIT(redo_format);
+ process_fd_request();
+ return ret;
+}
+
+/*
+ * Buffer read/write and support
+ * =============================
+ */
+
+/* new request_done. Can handle physical sectors which are smaller than a
+ * logical buffer */
+static void request_done(int uptodate)
+{
+ int block;
+
+ probing = 0;
+ reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate);
+
+ if (!CURRENT){
+ DPRINT("request list destroyed in floppy request done\n");
+ return;
+ }
+
+ if (uptodate){
+ /* maintain values for invalidation on geometry
+ * change */
+ block = current_count_sectors + CURRENT->sector;
+ INFBOUND(DRS->maxblock, block);
+ if (block > _floppy->sect)
+ DRS->maxtrack = 1;
+
+ /* unlock chained buffers */
+ while (current_count_sectors && CURRENT &&
+ current_count_sectors >= CURRENT->current_nr_sectors){
+ current_count_sectors -= CURRENT->current_nr_sectors;
+ CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
+ CURRENT->sector += CURRENT->current_nr_sectors;
+ end_request(1);
+ }
+ if (current_count_sectors && CURRENT){
+ /* "unlock" last subsector */
+ CURRENT->buffer += current_count_sectors <<9;
+ CURRENT->current_nr_sectors -= current_count_sectors;
+ CURRENT->nr_sectors -= current_count_sectors;
+ CURRENT->sector += current_count_sectors;
+ return;
+ }
+
+ if (current_count_sectors && !CURRENT)
+ DPRINT("request list destroyed in floppy request done\n");
+
+ } else {
+ if (CURRENT->cmd == WRITE) {
+ /* record write error information */
+ DRWE->write_errors++;
+ if (DRWE->write_errors == 1) {
+ DRWE->first_error_sector = CURRENT->sector;
+ DRWE->first_error_generation = DRS->generation;
+ }
+ DRWE->last_error_sector = CURRENT->sector;
+ DRWE->last_error_generation = DRS->generation;
+ }
+ end_request(0);
+ }
+}
+
+/* Interrupt handler evaluating the result of the r/w operation */
+static void rw_interrupt(void)
+{
+ int nr_sectors, ssize, eoc;
+
+ if (!DRS->first_read_date)
+ DRS->first_read_date = jiffies;
+
+ nr_sectors = 0;
+ CODE2SIZE;
+
+ if(ST1 & ST1_EOC)
+ eoc = 1;
+ else
+ eoc = 0;
+ nr_sectors = ((R_TRACK-TRACK)*_floppy->head+R_HEAD-HEAD) *
+ _floppy->sect + ((R_SECTOR-SECTOR+eoc) << SIZECODE >> 2) -
+ (sector_t % _floppy->sect) % ssize;
+
+#ifdef FLOPPY_SANITY_CHECK
+ if (nr_sectors > current_count_sectors + ssize -
+ (current_count_sectors + sector_t) % ssize +
+ sector_t % ssize){
+ DPRINT("long rw: %x instead of %lx\n",
+ nr_sectors, current_count_sectors);
+ printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
+ printk("rh=%d h=%d\n", R_HEAD, HEAD);
+ printk("rt=%d t=%d\n", R_TRACK, TRACK);
+ printk("spt=%d st=%d ss=%d\n", SECT_PER_TRACK,
+ sector_t, ssize);
+ }
+#endif
+ INFBOUND(nr_sectors,0);
+ SUPBOUND(current_count_sectors, nr_sectors);
+
+ switch (interpret_errors()){
+ case 2:
+ cont->redo();
+ return;
+ case 1:
+ if (!current_count_sectors){
+ cont->error();
+ cont->redo();
+ return;
+ }
+ break;
+ case 0:
+ if (!current_count_sectors){
+ cont->redo();
+ return;
+ }
+ current_type[current_drive] = _floppy;
+ floppy_sizes[TOMINOR(current_drive) ]= _floppy->size>>1;
+ break;
+ }
+
+ if (probing) {
+ if (DP->flags & FTD_MSG)
+ DPRINT("Auto-detected floppy type %s in fd%d\n",
+ _floppy->name,current_drive);
+ current_type[current_drive] = _floppy;
+ floppy_sizes[TOMINOR(current_drive)] = _floppy->size >> 1;
+ probing = 0;
+ }
+
+ if (CT(COMMAND) != FD_READ ||
+ raw_cmd->kernel_data == CURRENT->buffer){
+ /* transfer directly from buffer */
+ cont->done(1);
+ } else if (CT(COMMAND) == FD_READ){
+ buffer_track = raw_cmd->track;
+ buffer_drive = current_drive;
+ INFBOUND(buffer_max, nr_sectors + sector_t);
+ }
+ cont->redo();
+}
+
+/* Compute maximal contiguous buffer size. */
+static int buffer_chain_size(void)
+{
+ struct buffer_head *bh;
+ int size;
+ char *base;
+
+ base = CURRENT->buffer;
+ size = CURRENT->current_nr_sectors << 9;
+ bh = CURRENT->bh;
+
+ if (bh){
+ bh = bh->b_reqnext;
+ while (bh && bh->b_data == base + size){
+ size += bh->b_size;
+ bh = bh->b_reqnext;
+ }
+ }
+ return size >> 9;
+}
+
+/* Compute the maximal transfer size */
+static int transfer_size(int ssize, int max_sector, int max_size)
+{
+ SUPBOUND(max_sector, sector_t + max_size);
+
+ /* alignment */
+ max_sector -= (max_sector % _floppy->sect) % ssize;
+
+ /* transfer size, beginning not aligned */
+ current_count_sectors = max_sector - sector_t ;
+
+ return max_sector;
+}
+
+/*
+ * Move data from/to the track buffer to/from the buffer cache.
+ */
+static void copy_buffer(int ssize, int max_sector, int max_sector_2)
+{
+ int remaining; /* number of transferred 512-byte sectors */
+ struct buffer_head *bh;
+ char *buffer, *dma_buffer;
+ int size;
+
+ max_sector = transfer_size(ssize,
+ minimum(max_sector, max_sector_2),
+ CURRENT->nr_sectors);
+
+ if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
+ buffer_max > sector_t + CURRENT->nr_sectors)
+ current_count_sectors = minimum(buffer_max - sector_t,
+ CURRENT->nr_sectors);
+
+ remaining = current_count_sectors << 9;
+#ifdef FLOPPY_SANITY_CHECK
+ if ((remaining >> 9) > CURRENT->nr_sectors &&
+ CT(COMMAND) == FD_WRITE){
+ DPRINT("in copy buffer\n");
+ printk("current_count_sectors=%ld\n", current_count_sectors);
+ printk("remaining=%d\n", remaining >> 9);
+ printk("CURRENT->nr_sectors=%ld\n",CURRENT->nr_sectors);
+ printk("CURRENT->current_nr_sectors=%ld\n",
+ CURRENT->current_nr_sectors);
+ printk("max_sector=%d\n", max_sector);
+ printk("ssize=%d\n", ssize);
+ }
+#endif
+
+ buffer_max = maximum(max_sector, buffer_max);
+
+ dma_buffer = floppy_track_buffer + ((sector_t - buffer_min) << 9);
+
+ bh = CURRENT->bh;
+ size = CURRENT->current_nr_sectors << 9;
+ buffer = CURRENT->buffer;
+
+ while (remaining > 0){
+ SUPBOUND(size, remaining);
+#ifdef FLOPPY_SANITY_CHECK
+ if (dma_buffer + size >
+ floppy_track_buffer + (max_buffer_sectors << 10) ||
+ dma_buffer < floppy_track_buffer){
+ DPRINT("buffer overrun in copy buffer %d\n",
+ (int) ((floppy_track_buffer - dma_buffer) >>9));
+ printk("sector_t=%d buffer_min=%d\n",
+ sector_t, buffer_min);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
+ if (CT(COMMAND) == FD_READ)
+ printk("read\n");
+ if (CT(COMMAND) == FD_READ)
+ printk("write\n");
+ break;
+ }
+ if (((unsigned long)buffer) % 512)
+ DPRINT("%p buffer not aligned\n", buffer);
+#endif
+ if (CT(COMMAND) == FD_READ)
+ memcpy(buffer, dma_buffer, size);
+ else
+ memcpy(dma_buffer, buffer, size);
+ remaining -= size;
+ if (!remaining)
+ break;
+
+ dma_buffer += size;
+ bh = bh->b_reqnext;
+#ifdef FLOPPY_SANITY_CHECK
+ if (!bh){
+ DPRINT("bh=null in copy buffer after copy\n");
+ break;
+ }
+#endif
+ size = bh->b_size;
+ buffer = bh->b_data;
+ }
+#ifdef FLOPPY_SANITY_CHECK
+ if (remaining){
+ if (remaining > 0)
+ max_sector -= remaining >> 9;
+ DPRINT("weirdness: remaining %d\n", remaining>>9);
+ }
+#endif
+}
+
+/*
+ * Formulate a read/write request.
+ * this routine decides where to load the data (directly to buffer, or to
+ * tmp floppy area), how much data to load (the size of the buffer, the whole
+ * track, or a single sector)
+ * All floppy_track_buffer handling goes in here. If we ever add track buffer
+ * allocation on the fly, it should be done here. No other part should need
+ * modification.
+ */
+
+static int make_raw_rw_request(void)
+{
+ int aligned_sector_t;
+ int max_sector, max_size, tracksize, ssize;
+
+ set_fdc(DRIVE(CURRENT->rq_dev));
+
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK |
+ FD_RAW_NEED_SEEK;
+ raw_cmd->cmd_count = NR_RW;
+ if (CURRENT->cmd == READ){
+ raw_cmd->flags |= FD_RAW_READ;
+ COMMAND = FM_MODE(_floppy,FD_READ);
+ } else if (CURRENT->cmd == WRITE){
+ raw_cmd->flags |= FD_RAW_WRITE;
+ COMMAND = FM_MODE(_floppy,FD_WRITE);
+ } else {
+ DPRINT("make_raw_rw_request: unknown command\n");
+ return 0;
+ }
+
+ max_sector = _floppy->sect * _floppy->head;
+
+ TRACK = CURRENT->sector / max_sector;
+ sector_t = CURRENT->sector % max_sector;
+ if (_floppy->track && TRACK >= _floppy->track)
+ return 0;
+ HEAD = sector_t / _floppy->sect;
+
+ if (((_floppy->stretch & FD_SWAPSIDES) || TESTF(FD_NEED_TWADDLE)) &&
+ sector_t < _floppy->sect)
+ max_sector = _floppy->sect;
+
+ /* 2M disks have phantom sectors on the first track */
+ if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)){
+ max_sector = 2 * _floppy->sect / 3;
+ if (sector_t >= max_sector){
+ current_count_sectors = minimum(_floppy->sect - sector_t,
+ CURRENT->nr_sectors);
+ return 1;
+ }
+ SIZECODE = 2;
+ } else
+ SIZECODE = FD_SIZECODE(_floppy);
+ raw_cmd->rate = _floppy->rate & 0x43;
+ if ((_floppy->rate & FD_2M) &&
+ (TRACK || HEAD) &&
+ raw_cmd->rate == 2)
+ raw_cmd->rate = 1;
+
+ if (SIZECODE)
+ SIZECODE2 = 0xff;
+ else
+ SIZECODE2 = 0x80;
+ raw_cmd->track = TRACK << STRETCH(_floppy);
+ DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy,HEAD);
+ GAP = _floppy->gap;
+ CODE2SIZE;
+ SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
+ SECTOR = ((sector_t % _floppy->sect) << 2 >> SIZECODE) + 1;
+ tracksize = _floppy->sect - _floppy->sect % ssize;
+ if (tracksize < _floppy->sect){
+ SECT_PER_TRACK ++;
+ if (tracksize <= sector_t % _floppy->sect)
+ SECTOR--;
+ while (tracksize <= sector_t % _floppy->sect){
+ while(tracksize + ssize > _floppy->sect){
+ SIZECODE--;
+ ssize >>= 1;
+ }
+ SECTOR++; SECT_PER_TRACK ++;
+ tracksize += ssize;
+ }
+ max_sector = HEAD * _floppy->sect + tracksize;
+ } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing)
+ max_sector = _floppy->sect;
+
+ aligned_sector_t = sector_t - (sector_t % _floppy->sect) % ssize;
+ max_size = CURRENT->nr_sectors;
+ if ((raw_cmd->track == buffer_track) &&
+ (current_drive == buffer_drive) &&
+ (sector_t >= buffer_min) && (sector_t < buffer_max)) {
+ /* data already in track buffer */
+ if (CT(COMMAND) == FD_READ) {
+ copy_buffer(1, max_sector, buffer_max);
+ return 1;
+ }
+ } else if (aligned_sector_t != sector_t || CURRENT->nr_sectors < ssize){
+ if (CT(COMMAND) == FD_WRITE){
+ if (sector_t + CURRENT->nr_sectors > ssize &&
+ sector_t + CURRENT->nr_sectors < ssize + ssize)
+ max_size = ssize + ssize;
+ else
+ max_size = ssize;
+ }
+ raw_cmd->flags &= ~FD_RAW_WRITE;
+ raw_cmd->flags |= FD_RAW_READ;
+ COMMAND = FM_MODE(_floppy,FD_READ);
+ } else if ((unsigned long)CURRENT->buffer < MAX_DMA_ADDRESS) {
+ unsigned long dma_limit;
+ int direct, indirect;
+
+ indirect= transfer_size(ssize,max_sector,max_buffer_sectors*2) -
+ sector_t;
+
+ /*
+ * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide
+ * on a 64 bit machine!
+ */
+ max_size = buffer_chain_size();
+ dma_limit = (MAX_DMA_ADDRESS - ((unsigned long) CURRENT->buffer)) >> 9;
+ if ((unsigned long) max_size > dma_limit) {
+ max_size = dma_limit;
+ }
+ /* 64 kb boundaries */
+ if (CROSS_64KB(CURRENT->buffer, max_size << 9))
+ max_size = (K_64 - ((long) CURRENT->buffer) % K_64)>>9;
+ direct = transfer_size(ssize,max_sector,max_size) - sector_t;
+ /*
+ * We try to read tracks, but if we get too many errors, we
+ * go back to reading just one sector at a time.
+ *
+ * This means we should be able to read a sector even if there
+ * are other bad sectors on this track.
+ */
+ if (!direct ||
+ (indirect * 2 > direct * 3 &&
+ *errors < DP->max_errors.read_track &&
+ /*!TESTF(FD_NEED_TWADDLE) &&*/
+ ((!probing || (DP->read_track&(1<<DRS->probed_format)))))){
+ max_size = CURRENT->nr_sectors;
+ } else {
+ raw_cmd->kernel_data = CURRENT->buffer;
+ raw_cmd->length = current_count_sectors << 9;
+ if (raw_cmd->length == 0){
+ DPRINT("zero dma transfer attempted from make_raw_request\n");
+ DPRINT("indirect=%d direct=%d sector_t=%d",
+ indirect, direct, sector_t);
+ return 0;
+ }
+ return 2;
+ }
+ }
+
+ if (CT(COMMAND) == FD_READ)
+ max_size = max_sector; /* unbounded */
+
+ /* claim buffer track if needed */
+ if (buffer_track != raw_cmd->track || /* bad track */
+ buffer_drive !=current_drive || /* bad drive */
+ sector_t > buffer_max ||
+ sector_t < buffer_min ||
+ ((CT(COMMAND) == FD_READ ||
+ (aligned_sector_t == sector_t && CURRENT->nr_sectors >= ssize))&&
+ max_sector > 2 * max_buffer_sectors + buffer_min &&
+ max_size + sector_t > 2 * max_buffer_sectors + buffer_min)
+ /* not enough space */){
+ buffer_track = -1;
+ buffer_drive = current_drive;
+ buffer_max = buffer_min = aligned_sector_t;
+ }
+ raw_cmd->kernel_data = floppy_track_buffer +
+ ((aligned_sector_t-buffer_min)<<9);
+
+ if (CT(COMMAND) == FD_WRITE){
+ /* copy write buffer to track buffer.
+ * if we get here, we know that the write
+ * is either aligned or the data already in the buffer
+ * (buffer will be overwritten) */
+#ifdef FLOPPY_SANITY_CHECK
+ if (sector_t != aligned_sector_t && buffer_track == -1)
+ DPRINT("internal error offset !=0 on write\n");
+#endif
+ buffer_track = raw_cmd->track;
+ buffer_drive = current_drive;
+ copy_buffer(ssize, max_sector, 2*max_buffer_sectors+buffer_min);
+ } else
+ transfer_size(ssize, max_sector,
+ 2*max_buffer_sectors+buffer_min-aligned_sector_t);
+
+ /* round up current_count_sectors to get dma xfer size */
+ raw_cmd->length = sector_t+current_count_sectors-aligned_sector_t;
+ raw_cmd->length = ((raw_cmd->length -1)|(ssize-1))+1;
+ raw_cmd->length <<= 9;
+#ifdef FLOPPY_SANITY_CHECK
+ if ((raw_cmd->length < current_count_sectors << 9) ||
+ (raw_cmd->kernel_data != CURRENT->buffer &&
+ CT(COMMAND) == FD_WRITE &&
+ (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
+ aligned_sector_t < buffer_min)) ||
+ raw_cmd->length % (128 << SIZECODE) ||
+ raw_cmd->length <= 0 || current_count_sectors <= 0){
+ DPRINT("fractionary current count b=%lx s=%lx\n",
+ raw_cmd->length, current_count_sectors);
+ if (raw_cmd->kernel_data != CURRENT->buffer)
+ printk("addr=%d, length=%ld\n",
+ (int) ((raw_cmd->kernel_data -
+ floppy_track_buffer) >> 9),
+ current_count_sectors);
+ printk("st=%d ast=%d mse=%d msi=%d\n",
+ sector_t, aligned_sector_t, max_sector, max_size);
+ printk("ssize=%x SIZECODE=%d\n", ssize, SIZECODE);
+ printk("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
+ COMMAND, SECTOR, HEAD, TRACK);
+ printk("buffer drive=%d\n", buffer_drive);
+ printk("buffer track=%d\n", buffer_track);
+ printk("buffer_min=%d\n", buffer_min);
+ printk("buffer_max=%d\n", buffer_max);
+ return 0;
+ }
+
+ if (raw_cmd->kernel_data != CURRENT->buffer){
+ if (raw_cmd->kernel_data < floppy_track_buffer ||
+ current_count_sectors < 0 ||
+ raw_cmd->length < 0 ||
+ raw_cmd->kernel_data + raw_cmd->length >
+ floppy_track_buffer + (max_buffer_sectors << 10)){
+ DPRINT("buffer overrun in schedule dma\n");
+ printk("sector_t=%d buffer_min=%d current_count=%ld\n",
+ sector_t, buffer_min,
+ raw_cmd->length >> 9);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
+ if (CT(COMMAND) == FD_READ)
+ printk("read\n");
+ if (CT(COMMAND) == FD_READ)
+ printk("write\n");
+ return 0;
+ }
+ } else if (raw_cmd->length > CURRENT->nr_sectors << 9 ||
+ current_count_sectors > CURRENT->nr_sectors){
+ DPRINT("buffer overrun in direct transfer\n");
+ return 0;
+ } else if (raw_cmd->length < current_count_sectors << 9){
+ DPRINT("more sectors than bytes\n");
+ printk("bytes=%ld\n", raw_cmd->length >> 9);
+ printk("sectors=%ld\n", current_count_sectors);
+ }
+ if (raw_cmd->length == 0){
+ DPRINT("zero dma transfer attempted from make_raw_request\n");
+ return 0;
+ }
+#endif
+ return 2;
+}
+
+static void redo_fd_request(void)
+{
+#define REPEAT {request_done(0); continue; }
+ kdev_t device;
+ int tmp;
+
+ lastredo = jiffies;
+ if (current_drive < N_DRIVE)
+ floppy_off(current_drive);
+
+ if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+ CLEAR_INTR;
+ unlock_fdc();
+ return;
+ }
+
+ while(1){
+ if (!CURRENT) {
+ CLEAR_INTR;
+ unlock_fdc();
+ return;
+ }
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
+ panic(DEVICE_NAME ": request list destroyed");
+ if (CURRENT->bh && !buffer_locked(CURRENT->bh))
+ panic(DEVICE_NAME ": block not locked");
+
+ device = CURRENT->rq_dev;
+ set_fdc(DRIVE(device));
+ reschedule_timeout(CURRENTD, "redo fd request", 0);
+
+ set_floppy(device);
+ raw_cmd = & default_raw_cmd;
+ raw_cmd->flags = 0;
+ if (start_motor(redo_fd_request)) return;
+ disk_change(current_drive);
+ if (test_bit(current_drive, &fake_change) ||
+ TESTF(FD_DISK_CHANGED)){
+ DPRINT("disk absent or changed during operation\n");
+ REPEAT;
+ }
+ if (!_floppy) { /* Autodetection */
+ if (!probing){
+ DRS->probed_format = 0;
+ if (next_valid_format()){
+ DPRINT("no autodetectable formats\n");
+ _floppy = NULL;
+ REPEAT;
+ }
+ }
+ probing = 1;
+ _floppy = floppy_type+DP->autodetect[DRS->probed_format];
+ } else
+ probing = 0;
+ errors = & (CURRENT->errors);
+ tmp = make_raw_rw_request();
+ if (tmp < 2){
+ request_done(tmp);
+ continue;
+ }
+
+ if (TESTF(FD_NEED_TWADDLE))
+ twaddle();
+ floppy_tq.routine = (void *)(void *) floppy_start;
+ queue_task(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+#ifdef DEBUGT
+ debugt("queue fd request");
+#endif
+ return;
+ }
+#undef REPEAT
+}
+
+static struct cont_t rw_cont={
+ rw_interrupt,
+ redo_fd_request,
+ bad_flp_intr,
+ request_done };
+
+static struct tq_struct request_tq =
+{ 0, 0, (void *) (void *) redo_fd_request, 0 };
+
+static void process_fd_request(void)
+{
+ cont = &rw_cont;
+ queue_task(&request_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+static void do_fd_request(void)
+{
+ sti();
+ if (fdc_busy){
+ /* fdc busy, this new request will be treated when the
+ current one is done */
+ is_alive("do fd request, old request running");
+ return;
+ }
+ lock_fdc(MAXTIMEOUT,0);
+ process_fd_request();
+ is_alive("do fd request");
+}
+
+static struct cont_t poll_cont={
+ success_and_wakeup,
+ floppy_ready,
+ generic_failure,
+ generic_done };
+
+static int poll_drive(int interruptible, int flag)
+{
+ int ret;
+ /* no auto-sense, just clear dcl */
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->flags= flag;
+ raw_cmd->track=0;
+ raw_cmd->cmd_count=0;
+ cont = &poll_cont;
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("setting NEWCHANGE in poll_drive\n");
+ }
+#endif
+ SETF(FD_DISK_NEWCHANGE);
+ WAIT(floppy_ready);
+ return ret;
+}
+
+/*
+ * User triggered reset
+ * ====================
+ */
+
+static void reset_intr(void)
+{
+ printk("weird, reset interrupt called\n");
+}
+
+static struct cont_t reset_cont={
+ reset_intr,
+ success_and_wakeup,
+ generic_failure,
+ generic_done };
+
+static int user_reset_fdc(int drive, int arg, int interruptible)
+{
+ int ret;
+
+ ret=0;
+ LOCK_FDC(drive,interruptible);
+ if (arg == FD_RESET_ALWAYS)
+ FDCS->reset=1;
+ if (FDCS->reset){
+ cont = &reset_cont;
+ WAIT(reset_fdc);
+ }
+ process_fd_request();
+ return ret;
+}
+
+/*
+ * Misc Ioctl's and support
+ * ========================
+ */
+static int fd_copyout(void *param, const void *address, int size)
+{
+ int ret;
+
+ ECALL(verify_area(VERIFY_WRITE,param,size));
+ memcpy_tofs(param,(void *) address, size);
+ return 0;
+}
+
+static int fd_copyin(void *param, void *address, int size)
+{
+ int ret;
+
+ ECALL(verify_area(VERIFY_READ,param,size));
+ memcpy_fromfs((void *) address, param, size);
+ return 0;
+}
+
+#define COPYOUT(x) ECALL(fd_copyout((void *)param, &(x), sizeof(x)))
+#define COPYIN(x) ECALL(fd_copyin((void *)param, &(x), sizeof(x)))
+
+static inline const char *drive_name(int type, int drive)
+{
+ struct floppy_struct *floppy;
+
+ if (type)
+ floppy = floppy_type + type;
+ else {
+ if (UDP->native_format)
+ floppy = floppy_type + UDP->native_format;
+ else
+ return "(null)";
+ }
+ if (floppy->name)
+ return floppy->name;
+ else
+ return "(null)";
+}
+
+
+/* raw commands */
+static void raw_cmd_done(int flag)
+{
+ int i;
+
+ if (!flag) {
+ raw_cmd->flags |= FD_RAW_FAILURE;
+ raw_cmd->flags |= FD_RAW_HARDFAILURE;
+ } else {
+ raw_cmd->reply_count = inr;
+ for (i=0; i< raw_cmd->reply_count; i++)
+ raw_cmd->reply[i] = reply_buffer[i];
+
+ if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE))
+ raw_cmd->length = fd_get_dma_residue();
+
+ if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) &&
+ (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0)))
+ raw_cmd->flags |= FD_RAW_FAILURE;
+
+ if (disk_change(current_drive))
+ raw_cmd->flags |= FD_RAW_DISK_CHANGE;
+ else
+ raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
+ if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
+ motor_off_callback(current_drive);
+
+ if (raw_cmd->next &&
+ (!(raw_cmd->flags & FD_RAW_FAILURE) ||
+ !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) &&
+ ((raw_cmd->flags & FD_RAW_FAILURE) ||
+ !(raw_cmd->flags &FD_RAW_STOP_IF_SUCCESS))) {
+ raw_cmd = raw_cmd->next;
+ return;
+ }
+ }
+ generic_done(flag);
+}
+
+
+static struct cont_t raw_cmd_cont={
+ success_and_wakeup,
+ floppy_start,
+ generic_failure,
+ raw_cmd_done
+};
+
+static inline int raw_cmd_copyout(int cmd, char *param,
+ struct floppy_raw_cmd *ptr)
+{
+ struct old_floppy_raw_cmd old_raw_cmd;
+ int ret;
+
+ while(ptr) {
+ if (cmd == OLDFDRAWCMD) {
+ old_raw_cmd.flags = ptr->flags;
+ old_raw_cmd.data = ptr->data;
+ old_raw_cmd.length = ptr->length;
+ old_raw_cmd.rate = ptr->rate;
+ old_raw_cmd.reply_count = ptr->reply_count;
+ memcpy(old_raw_cmd.reply, ptr->reply, 7);
+ COPYOUT(old_raw_cmd);
+ param += sizeof(old_raw_cmd);
+ } else {
+ COPYOUT(*ptr);
+ param += sizeof(struct floppy_raw_cmd);
+ }
+
+ if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length){
+ if (ptr->length>=0 && ptr->length<=ptr->buffer_length)
+ ECALL(fd_copyout(ptr->data,
+ ptr->kernel_data,
+ ptr->buffer_length -
+ ptr->length));
+ }
+ ptr = ptr->next;
+ }
+ return 0;
+}
+
+
+static void raw_cmd_free(struct floppy_raw_cmd **ptr)
+{
+ struct floppy_raw_cmd *next,*this;
+
+ this = *ptr;
+ *ptr = 0;
+ while(this) {
+ if (this->buffer_length) {
+ fd_dma_mem_free((unsigned long)this->kernel_data,
+ this->buffer_length);
+ this->buffer_length = 0;
+ }
+ next = this->next;
+ kfree(this);
+ this = next;
+ }
+}
+
+
+static inline int raw_cmd_copyin(int cmd, char *param,
+ struct floppy_raw_cmd **rcmd)
+{
+ struct floppy_raw_cmd *ptr;
+ struct old_floppy_raw_cmd old_raw_cmd;
+ int ret;
+ int i;
+
+ *rcmd = 0;
+ while(1) {
+ ptr = (struct floppy_raw_cmd *)
+ kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
+ if (!ptr)
+ return -ENOMEM;
+ *rcmd = ptr;
+ if (cmd == OLDFDRAWCMD){
+ COPYIN(old_raw_cmd);
+ ptr->flags = old_raw_cmd.flags;
+ ptr->data = old_raw_cmd.data;
+ ptr->length = old_raw_cmd.length;
+ ptr->rate = old_raw_cmd.rate;
+ ptr->cmd_count = old_raw_cmd.cmd_count;
+ ptr->track = old_raw_cmd.track;
+ ptr->phys_length = 0;
+ ptr->next = 0;
+ ptr->buffer_length = 0;
+ memcpy(ptr->cmd, old_raw_cmd.cmd, 9);
+ param += sizeof(struct old_floppy_raw_cmd);
+ if (ptr->cmd_count > 9)
+ return -EINVAL;
+ } else {
+ COPYIN(*ptr);
+ ptr->next = 0;
+ ptr->buffer_length = 0;
+ param += sizeof(struct floppy_raw_cmd);
+ if (ptr->cmd_count > 33)
+ /* the command may now also take up the space
+ * initially intended for the reply & the
+ * reply count. Needed for long 82078 commands
+ * such as RESTORE, which takes ... 17 command
+ * bytes. Murphy's law #137: When you reserve
+ * 16 bytes for a structure, you'll one day
+ * discover that you really need 17...
+ */
+ return -EINVAL;
+ }
+
+ for (i=0; i< 16; i++)
+ ptr->reply[i] = 0;
+ ptr->resultcode = 0;
+ ptr->kernel_data = 0;
+
+ if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
+ if (ptr->length <= 0)
+ return -EINVAL;
+ ptr->kernel_data =(char*)fd_dma_mem_alloc(ptr->length);
+ if (!ptr->kernel_data)
+ return -ENOMEM;
+ ptr->buffer_length = ptr->length;
+ }
+ if ( ptr->flags & FD_RAW_READ )
+ ECALL( verify_area( VERIFY_WRITE, ptr->data,
+ ptr->length ));
+ if (ptr->flags & FD_RAW_WRITE)
+ ECALL(fd_copyin(ptr->data, ptr->kernel_data,
+ ptr->length));
+ rcmd = & (ptr->next);
+ if (!(ptr->flags & FD_RAW_MORE))
+ return 0;
+ ptr->rate &= 0x43;
+ }
+}
+
+
+static int raw_cmd_ioctl(int cmd, void *param)
+{
+ int drive, ret, ret2;
+ struct floppy_raw_cmd *my_raw_cmd;
+
+ if (FDCS->rawcmd <= 1)
+ FDCS->rawcmd = 1;
+ for (drive= 0; drive < N_DRIVE; drive++){
+ if (FDC(drive) != fdc)
+ continue;
+ if (drive == current_drive){
+ if (UDRS->fd_ref > 1){
+ FDCS->rawcmd = 2;
+ break;
+ }
+ } else if (UDRS->fd_ref){
+ FDCS->rawcmd = 2;
+ break;
+ }
+ }
+
+ if (FDCS->reset)
+ return -EIO;
+
+ ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
+ if (ret) {
+ raw_cmd_free(&my_raw_cmd);
+ return ret;
+ }
+
+ raw_cmd = my_raw_cmd;
+ cont = &raw_cmd_cont;
+ ret=wait_til_done(floppy_start,1);
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from raw_cmd ioctl\n");
+ }
+#endif
+
+ if (ret != -EINTR && FDCS->reset)
+ ret = -EIO;
+
+ DRS->track = NO_TRACK;
+
+ ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
+ if (!ret)
+ ret = ret2;
+ raw_cmd_free(&my_raw_cmd);
+ return ret;
+}
+
+static int invalidate_drive(kdev_t rdev)
+{
+ /* invalidate the buffer track to force a reread */
+ set_bit(DRIVE(rdev), &fake_change);
+ process_fd_request();
+ check_disk_change(rdev);
+ return 0;
+}
+
+
+static inline void clear_write_error(int drive)
+{
+ CLEARSTRUCT(UDRWE);
+}
+
+static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
+ int drive, int type, kdev_t device)
+{
+ int cnt;
+
+ /* sanity checking for parameters.*/
+ if (g->sect <= 0 ||
+ g->head <= 0 ||
+ g->track <= 0 ||
+ g->track > UDP->tracks>>STRETCH(g) ||
+ /* check if reserved bits are set */
+ (g->stretch&~(FD_STRETCH|FD_SWAPSIDES)) != 0)
+ return -EINVAL;
+ if (type){
+ if (!suser())
+ return -EPERM;
+ LOCK_FDC(drive,1);
+ for (cnt = 0; cnt < N_DRIVE; cnt++){
+ if (ITYPE(drive_state[cnt].fd_device) == type &&
+ drive_state[cnt].fd_ref)
+ set_bit(drive, &fake_change);
+ }
+ floppy_type[type] = *g;
+ floppy_type[type].name="user format";
+ for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
+ floppy_sizes[cnt]= floppy_sizes[cnt+0x80]=
+ floppy_type[type].size>>1;
+ process_fd_request();
+ for (cnt = 0; cnt < N_DRIVE; cnt++){
+ if (ITYPE(drive_state[cnt].fd_device) == type &&
+ drive_state[cnt].fd_ref)
+ check_disk_change(
+ MKDEV(FLOPPY_MAJOR,
+ drive_state[cnt].fd_device));
+ }
+ } else {
+ LOCK_FDC(drive,1);
+ if (cmd != FDDEFPRM)
+ /* notice a disk change immediately, else
+ * we loose our settings immediately*/
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ user_params[drive] = *g;
+ if (buffer_drive == drive)
+ SUPBOUND(buffer_max, user_params[drive].sect);
+ current_type[drive] = &user_params[drive];
+ floppy_sizes[drive] = user_params[drive].size >> 1;
+ if (cmd == FDDEFPRM)
+ DRS->keep_data = -1;
+ else
+ DRS->keep_data = 1;
+ /* invalidation. Invalidate only when needed, i.e.
+ * when there are already sectors in the buffer cache
+ * whose number will change. This is useful, because
+ * mtools often changes the geometry of the disk after
+ * looking at the boot block */
+ if (DRS->maxblock > user_params[drive].sect || DRS->maxtrack)
+ invalidate_drive(device);
+ else
+ process_fd_request();
+ }
+ return 0;
+}
+
+/* handle obsolete ioctl's */
+static struct translation_entry {
+ int newcmd;
+ int oldcmd;
+ int oldsize; /* size of 0x00xx-style ioctl. Reflects old structures, thus
+ * use numeric values. NO SIZEOFS */
+} translation_table[]= {
+ {FDCLRPRM, 0, 0},
+ {FDSETPRM, 1, 28},
+ {FDDEFPRM, 2, 28},
+ {FDGETPRM, 3, 28},
+ {FDMSGON, 4, 0},
+ {FDMSGOFF, 5, 0},
+ {FDFMTBEG, 6, 0},
+ {FDFMTTRK, 7, 12},
+ {FDFMTEND, 8, 0},
+ {FDSETEMSGTRESH, 10, 0},
+ {FDFLUSH, 11, 0},
+ {FDSETMAXERRS, 12, 20},
+ {OLDFDRAWCMD, 30, 0},
+ {FDGETMAXERRS, 14, 20},
+ {FDGETDRVTYP, 16, 16},
+ {FDSETDRVPRM, 20, 88},
+ {FDGETDRVPRM, 21, 88},
+ {FDGETDRVSTAT, 22, 52},
+ {FDPOLLDRVSTAT, 23, 52},
+ {FDRESET, 24, 0},
+ {FDGETFDCSTAT, 25, 40},
+ {FDWERRORCLR, 27, 0},
+ {FDWERRORGET, 28, 24},
+ {FDRAWCMD, 0, 0},
+ {FDEJECT, 0, 0},
+ {FDTWADDLE, 40, 0} };
+
+static inline int normalize_0x02xx_ioctl(int *cmd, int *size)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(translation_table); i++) {
+ if ((*cmd & 0xffff) == (translation_table[i].newcmd & 0xffff)){
+ *size = _IOC_SIZE(*cmd);
+ *cmd = translation_table[i].newcmd;
+ if (*size > _IOC_SIZE(*cmd)) {
+ printk("ioctl not yet supported\n");
+ return -EFAULT;
+ }
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static inline int xlate_0x00xx_ioctl(int *cmd, int *size)
+{
+ int i;
+ /* old ioctls' for kernels <= 1.3.33 */
+ /* When the next even release will come around, we'll start
+ * warning against these.
+ * When the next odd release will come around, we'll fail with
+ * -EINVAL */
+ if(strcmp(system_utsname.version, "1.4.0") >= 0)
+ printk("obsolete floppy ioctl %x\n", *cmd);
+ if((system_utsname.version[0] == '1' &&
+ strcmp(system_utsname.version, "1.5.0") >= 0) ||
+ (system_utsname.version[0] >= '2' &&
+ strcmp(system_utsname.version, "2.1.0") >= 0))
+ return -EINVAL;
+ for (i=0; i < ARRAY_SIZE(translation_table); i++) {
+ if (*cmd == translation_table[i].oldcmd) {
+ *size = translation_table[i].oldsize;
+ *cmd = translation_table[i].newcmd;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long param)
+{
+#define IOCTL_MODE_BIT 8
+#define OPEN_WRITE_BIT 16
+#define IOCTL_ALLOWED (filp && (filp->f_mode & IOCTL_MODE_BIT))
+#define OUT(c,x) case c: outparam = (const char *) (x); break
+#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0
+
+ int i,drive,type;
+ kdev_t device;
+ int ret;
+ int size;
+ union inparam {
+ struct floppy_struct g; /* geometry */
+ struct format_descr f;
+ struct floppy_max_errors max_errors;
+ struct floppy_drive_params dp;
+ } inparam; /* parameters coming from user space */
+ const char *outparam; /* parameters passed back to user space */
+
+ device = inode->i_rdev;
+ switch (cmd) {
+ RO_IOCTLS(device,param);
+ }
+ type = TYPE(device);
+ drive = DRIVE(device);
+
+ /* convert compatibility eject ioctls into floppy eject ioctl.
+ * We do this in order to provide a means to eject floppy disks before
+ * installing the new fdutils package */
+ if(cmd == CDROMEJECT || /* CD-ROM eject */
+ cmd == 0x6470 /* SunOS floppy eject */) {
+ DPRINT("obsolete eject ioctl\n");
+ DPRINT("please use floppycontrol --eject\n");
+ cmd = FDEJECT;
+ }
+
+ /* convert the old style command into a new style command */
+ if ((cmd & 0xff00) == 0x0200) {
+ ECALL(normalize_0x02xx_ioctl(&cmd, &size));
+ } else if ((cmd & 0xff00) == 0x0000) {
+ ECALL(xlate_0x00xx_ioctl(&cmd, &size));
+ } else
+ return -EINVAL;
+
+ /* permission checks */
+ if (((cmd & 0x80) && !suser()) ||
+ ((cmd & 0x40) && !IOCTL_ALLOWED))
+ return -EPERM;
+
+ /* verify writability of result, and fail early */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ ECALL(verify_area(VERIFY_WRITE,(void *) param, size));
+
+ /* copyin */
+ CLEARSTRUCT(&inparam);
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ ECALL(fd_copyin((void *)param, &inparam, size))
+
+ switch (cmd) {
+ case FDEJECT:
+ if(UDRS->fd_ref != 1)
+ /* somebody else has this drive open */
+ return -EBUSY;
+ LOCK_FDC(drive,1);
+
+ /* do the actual eject. Fails on
+ * non-Sparc architectures */
+ ret=fd_eject(UNIT(drive));
+
+ USETF(FD_DISK_CHANGED);
+ USETF(FD_VERIFY);
+ process_fd_request();
+ return ret;
+ case FDCLRPRM:
+ LOCK_FDC(drive,1);
+ current_type[drive] = NULL;
+ floppy_sizes[drive] = MAX_DISK_SIZE;
+ UDRS->keep_data = 0;
+ return invalidate_drive(device);
+ case FDSETPRM:
+ case FDDEFPRM:
+ return set_geometry(cmd, & inparam.g,
+ drive, type, device);
+ case FDGETPRM:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1,0));
+ process_fd_request();
+ if (type)
+ outparam = (char *) &floppy_type[type];
+ else
+ outparam = (char *) current_type[drive];
+ if(!outparam)
+ return -ENODEV;
+ break;
+
+ case FDMSGON:
+ UDP->flags |= FTD_MSG;
+ return 0;
+ case FDMSGOFF:
+ UDP->flags &= ~FTD_MSG;
+ return 0;
+
+ case FDFMTBEG:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ ret = UDRS->flags;
+ process_fd_request();
+ if(ret & FD_VERIFY)
+ return -ENODEV;
+ if(!(ret & FD_DISK_WRITABLE))
+ return -EROFS;
+ return 0;
+ case FDFMTTRK:
+ if (UDRS->fd_ref != 1)
+ return -EBUSY;
+ return do_format(device, &inparam.f);
+ case FDFMTEND:
+ case FDFLUSH:
+ LOCK_FDC(drive,1);
+ return invalidate_drive(device);
+
+ case FDSETEMSGTRESH:
+ UDP->max_errors.reporting =
+ (unsigned short) (param & 0x0f);
+ return 0;
+ OUT(FDGETMAXERRS, &UDP->max_errors);
+ IN(FDSETMAXERRS, &UDP->max_errors, max_errors);
+
+ case FDGETDRVTYP:
+ outparam = drive_name(type,drive);
+ SUPBOUND(size,strlen(outparam)+1);
+ break;
+
+ IN(FDSETDRVPRM, UDP, dp);
+ OUT(FDGETDRVPRM, UDP);
+
+ case FDPOLLDRVSTAT:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ process_fd_request();
+ /* fall through */
+ OUT(FDGETDRVSTAT, UDRS);
+
+ case FDRESET:
+ return user_reset_fdc(drive, (int)param, 1);
+
+ OUT(FDGETFDCSTAT,UFDCS);
+
+ case FDWERRORCLR:
+ CLEARSTRUCT(UDRWE);
+ return 0;
+ OUT(FDWERRORGET,UDRWE);
+
+ case OLDFDRAWCMD:
+ case FDRAWCMD:
+ if (type)
+ return -EINVAL;
+ LOCK_FDC(drive,1);
+ set_floppy(device);
+ CALL(i = raw_cmd_ioctl(cmd,(void *) param));
+ process_fd_request();
+ return i;
+
+ case FDTWADDLE:
+ LOCK_FDC(drive,1);
+ twaddle();
+ process_fd_request();
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ return fd_copyout((void *)param, outparam, size);
+ else
+ return 0;
+#undef IOCTL_ALLOWED
+#undef OUT
+#undef IN
+}
+
+static void config_types(void)
+{
+ int first=1;
+ int drive;
+
+ /* read drive info out of physical CMOS */
+ drive=0;
+ if (!UDP->cmos)
+ UDP->cmos= FLOPPY0_TYPE;
+ drive=1;
+ if (!UDP->cmos && FLOPPY1_TYPE)
+ UDP->cmos = FLOPPY1_TYPE;
+
+ /* XXX */
+ /* additional physical CMOS drive detection should go here */
+
+ for (drive=0; drive < N_DRIVE; drive++){
+ if (UDP->cmos >= 16)
+ UDP->cmos = 0;
+ if (UDP->cmos >= 0 && UDP->cmos <= NUMBER(default_drive_params))
+ memcpy((char *) UDP,
+ (char *) (&default_drive_params[(int)UDP->cmos].params),
+ sizeof(struct floppy_drive_params));
+ if (UDP->cmos){
+ if (first)
+ printk(KERN_INFO "Floppy drive(s): ");
+ else
+ printk(", ");
+ first=0;
+ if (UDP->cmos > 0){
+ allowed_drive_mask |= 1 << drive;
+ printk("fd%d is %s", drive,
+ default_drive_params[(int)UDP->cmos].name);
+ } else
+ printk("fd%d is unknown type %d",drive,
+ UDP->cmos);
+ }
+ else
+ allowed_drive_mask &= ~(1 << drive);
+ }
+ if (!first)
+ printk("\n");
+}
+
+static int floppy_read(struct inode * inode, struct file * filp,
+ char * buf, int count)
+{
+ int drive = DRIVE(inode->i_rdev);
+
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ return -ENXIO;
+ return block_read(inode, filp, buf, count);
+}
+
+static int floppy_write(struct inode * inode, struct file * filp,
+ const char * buf, int count)
+{
+ int block;
+ int ret;
+ int drive = DRIVE(inode->i_rdev);
+
+ if (!UDRS->maxblock)
+ UDRS->maxblock=1;/* make change detectable */
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ return -ENXIO;
+ if (!UTESTF(FD_DISK_WRITABLE))
+ return -EROFS;
+ block = (filp->f_pos + count) >> 9;
+ INFBOUND(UDRS->maxblock, block);
+ ret= block_write(inode, filp, buf, count);
+ return ret;
+}
+
+static void floppy_release(struct inode * inode, struct file * filp)
+{
+ int drive;
+
+ drive = DRIVE(inode->i_rdev);
+
+ if (!filp || (filp->f_mode & (2 | OPEN_WRITE_BIT)))
+ /* if the file is mounted OR (writable now AND writable at
+ * open time) Linus: Does this cover all cases? */
+ block_fsync(inode,filp);
+
+ if (UDRS->fd_ref < 0)
+ UDRS->fd_ref=0;
+ else if (!UDRS->fd_ref--) {
+ DPRINT("floppy_release with fd_ref == 0");
+ UDRS->fd_ref = 0;
+ }
+ floppy_release_irq_and_dma();
+}
+
+/*
+ * floppy_open check for aliasing (/dev/fd0 can be the same as
+ * /dev/PS0 etc), and disallows simultaneous access to the same
+ * drive with different device numbers.
+ */
+#define RETERR(x) do{floppy_release(inode,filp); return -(x);}while(0)
+
+static int floppy_open(struct inode * inode, struct file * filp)
+{
+ int drive;
+ int old_dev;
+ int try;
+ char *tmp;
+
+ if (!filp) {
+ DPRINT("Weird, open called with filp=0\n");
+ return -EIO;
+ }
+
+ drive = DRIVE(inode->i_rdev);
+ if (drive >= N_DRIVE ||
+ !(allowed_drive_mask & (1 << drive)) ||
+ fdc_state[FDC(drive)].version == FDC_NONE)
+ return -ENXIO;
+
+ if (TYPE(inode->i_rdev) >= NUMBER(floppy_type))
+ return -ENXIO;
+ old_dev = UDRS->fd_device;
+ if (UDRS->fd_ref && old_dev != MINOR(inode->i_rdev))
+ return -EBUSY;
+
+ if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)){
+ USETF(FD_DISK_CHANGED);
+ USETF(FD_VERIFY);
+ }
+
+ if (UDRS->fd_ref == -1 ||
+ (UDRS->fd_ref && (filp->f_flags & O_EXCL)))
+ return -EBUSY;
+
+ if (floppy_grab_irq_and_dma())
+ return -EBUSY;
+
+ if (filp->f_flags & O_EXCL)
+ UDRS->fd_ref = -1;
+ else
+ UDRS->fd_ref++;
+
+ if (!floppy_track_buffer){
+ /* if opening an ED drive, reserve a big buffer,
+ * else reserve a small one */
+ if ((UDP->cmos == 6) || (UDP->cmos == 5))
+ try = 64; /* Only 48 actually useful */
+ else
+ try = 32; /* Only 24 actually useful */
+
+ tmp=(char *)fd_dma_mem_alloc(1024 * try);
+ if (!tmp) {
+ try >>= 1; /* buffer only one side */
+ INFBOUND(try, 16);
+ tmp= (char *)fd_dma_mem_alloc(1024*try);
+ }
+ if (!tmp) {
+ DPRINT("Unable to allocate DMA memory\n");
+ RETERR(ENXIO);
+ }
+ if (floppy_track_buffer)
+ fd_dma_mem_free((unsigned long)tmp,try*1024);
+ else {
+ buffer_min = buffer_max = -1;
+ floppy_track_buffer = tmp;
+ max_buffer_sectors = try;
+ }
+ }
+
+ UDRS->fd_device = MINOR(inode->i_rdev);
+ if (old_dev != -1 && old_dev != MINOR(inode->i_rdev)) {
+ if (buffer_drive == drive)
+ buffer_track = -1;
+ invalidate_buffers(MKDEV(FLOPPY_MAJOR,old_dev));
+ }
+
+ /* Allow ioctls if we have write-permissions even if read-only open */
+ if ((filp->f_mode & 2) || (permission(inode,2) == 0))
+ filp->f_mode |= IOCTL_MODE_BIT;
+ if (filp->f_mode & 2)
+ filp->f_mode |= OPEN_WRITE_BIT;
+
+ if (UFDCS->rawcmd == 1)
+ UFDCS->rawcmd = 2;
+
+ if (filp->f_flags & O_NDELAY)
+ return 0;
+ if (filp->f_mode & 3) {
+ UDRS->last_checked = 0;
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ RETERR(ENXIO);
+ }
+ if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE)))
+ RETERR(EROFS);
+ return 0;
+#undef RETERR
+}
+
+/*
+ * Check if the disk has been changed or if a change has been faked.
+ */
+static int check_floppy_change(kdev_t dev)
+{
+ int drive = DRIVE(dev);
+
+ if (MAJOR(dev) != MAJOR_NR) {
+ DPRINT("check_floppy_change: not a floppy\n");
+ return 0;
+ }
+
+ if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY))
+ return 1;
+
+ if (UDP->checkfreq < jiffies - UDRS->last_checked){
+ lock_fdc(drive,0);
+ poll_drive(0,0);
+ process_fd_request();
+ }
+
+ if (UTESTF(FD_DISK_CHANGED) ||
+ UTESTF(FD_VERIFY) ||
+ test_bit(drive, &fake_change) ||
+ (!TYPE(dev) && !current_type[drive]))
+ return 1;
+ return 0;
+}
+
+/* revalidate the floppy disk, i.e. trigger format autodetection by reading
+ * the bootblock (block 0). "Autodetection" is also needed to check whether
+ * there is a disk in the drive at all... Thus we also do it for fixed
+ * geometry formats */
+static int floppy_revalidate(kdev_t dev)
+{
+#define NO_GEOM (!current_type[drive] && !TYPE(dev))
+ struct buffer_head * bh;
+ int drive=DRIVE(dev);
+ int cf;
+
+ if (UTESTF(FD_DISK_CHANGED) ||
+ UTESTF(FD_VERIFY) ||
+ test_bit(drive, &fake_change) ||
+ NO_GEOM){
+ lock_fdc(drive,0);
+ cf = UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY);
+ if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)){
+ process_fd_request(); /*already done by another thread*/
+ return 0;
+ }
+ UDRS->maxblock = 0;
+ UDRS->maxtrack = 0;
+ if (buffer_drive == drive)
+ buffer_track = -1;
+ clear_bit(drive, &fake_change);
+ UCLEARF(FD_DISK_CHANGED);
+ if (cf)
+ UDRS->generation++;
+ if (NO_GEOM){
+ /* auto-sensing */
+ int size = floppy_blocksizes[MINOR(dev)];
+ if (!size)
+ size = 1024;
+ if (!(bh = getblk(dev,0,size))){
+ process_fd_request();
+ return 1;
+ }
+ if (bh && !buffer_uptodate(bh))
+ ll_rw_block(READ, 1, &bh, 1);
+ process_fd_request();
+ wait_on_buffer(bh);
+ brelse(bh);
+ return 0;
+ }
+ if (cf)
+ poll_drive(0, FD_RAW_NEED_DISK);
+ process_fd_request();
+ }
+ return 0;
+}
+
+static struct file_operations floppy_fops = {
+ NULL, /* lseek - default */
+ floppy_read, /* read - general block-dev read */
+ floppy_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ fd_ioctl, /* ioctl */
+ NULL, /* mmap */
+ floppy_open, /* open */
+ floppy_release, /* release */
+ block_fsync, /* fsync */
+ NULL, /* fasync */
+ check_floppy_change, /* media_change */
+ floppy_revalidate, /* revalidate */
+};
+
+/*
+ * Floppy Driver initialization
+ * =============================
+ */
+
+/* Determine the floppy disk controller type */
+/* This routine was written by David C. Niemi */
+static char get_fdc_version(void)
+{
+ int r;
+
+ output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
+ if (FDCS->reset)
+ return FDC_NONE;
+ if ((r = result()) <= 0x00)
+ return FDC_NONE; /* No FDC present ??? */
+ if ((r==1) && (reply_buffer[0] == 0x80)){
+ printk(KERN_INFO "FDC %d is an 8272A\n",fdc);
+ return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
+ }
+ if (r != 10) {
+ printk("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+
+ if(!fdc_configure()) {
+ printk(KERN_INFO "FDC %d is an 82072\n",fdc);
+ return FDC_82072; /* 82072 doesn't know CONFIGURE */
+ }
+
+ output_byte(FD_PERPENDICULAR);
+ if(need_more_output() == MORE_OUTPUT) {
+ output_byte(0);
+ } else {
+ printk(KERN_INFO "FDC %d is an 82072A\n", fdc);
+ return FDC_82072A; /* 82072A as found on Sparcs. */
+ }
+
+ output_byte(FD_UNLOCK);
+ r = result();
+ if ((r == 1) && (reply_buffer[0] == 0x80)){
+ printk(KERN_INFO "FDC %d is a pre-1991 82077\n", fdc);
+ return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
+ * LOCK/UNLOCK */
+ }
+ if ((r != 1) || (reply_buffer[0] != 0x00)) {
+ printk("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ output_byte(FD_PARTID);
+ r = result();
+ if (r != 1) {
+ printk("FDC %d init: PARTID: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ if (reply_buffer[0] == 0x80) {
+ printk(KERN_INFO "FDC %d is a post-1991 82077\n",fdc);
+ return FDC_82077; /* Revised 82077AA passes all the tests */
+ }
+ switch (reply_buffer[0] >> 5) {
+ case 0x0:
+ /* Either a 82078-1 or a 82078SL running at 5Volt */
+ printk(KERN_INFO "FDC %d is an 82078.\n",fdc);
+ return FDC_82078;
+ case 0x1:
+ printk(KERN_INFO "FDC %d is a 44pin 82078\n",fdc);
+ return FDC_82078;
+ case 0x2:
+ printk(KERN_INFO "FDC %d is a S82078B\n", fdc);
+ return FDC_S82078B;
+ case 0x3:
+ printk(KERN_INFO "FDC %d is a National Semiconductor PC87306\n", fdc);
+ return FDC_87306;
+ default:
+ printk(KERN_INFO "FDC %d init: 82078 variant with unknown PARTID=%d.\n",
+ fdc, reply_buffer[0] >> 5);
+ return FDC_82078_UNKN;
+ }
+} /* get_fdc_version */
+
+/* lilo configuration */
+
+/* we make the invert_dcl function global. One day, somebody might
+ * want to centralize all thinkpad related options into one lilo option,
+ * there are just so many thinkpad related quirks! */
+void floppy_invert_dcl(int *ints,int param)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(default_drive_params); i++){
+ if (param)
+ default_drive_params[i].params.flags |= 0x80;
+ else
+ default_drive_params[i].params.flags &= ~0x80;
+ }
+ DPRINT("Configuring drives for inverted dcl\n");
+}
+
+static void daring(int *ints,int param)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(default_drive_params); i++){
+ if (param){
+ default_drive_params[i].params.select_delay = 0;
+ default_drive_params[i].params.flags |= FD_SILENT_DCL_CLEAR;
+ } else {
+ default_drive_params[i].params.select_delay = 2*HZ/100;
+ default_drive_params[i].params.flags &= ~FD_SILENT_DCL_CLEAR;
+ }
+ }
+ DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken");
+}
+
+static void set_cmos(int *ints, int dummy)
+{
+ int current_drive=0;
+
+ if (ints[0] != 2){
+ DPRINT("wrong number of parameter for cmos\n");
+ return;
+ }
+ current_drive = ints[1];
+ if (current_drive < 0 || current_drive >= 8){
+ DPRINT("bad drive for set_cmos\n");
+ return;
+ }
+ if (current_drive >= 4 && !FDC2)
+ FDC2 = 0x370;
+ if (ints[2] <= 0 ||
+ (ints[2] >= NUMBER(default_drive_params) && ints[2] != 16)){
+ DPRINT("bad cmos code %d\n", ints[2]);
+ return;
+ }
+ DP->cmos = ints[2];
+ DPRINT("setting cmos code to %d\n", ints[2]);
+}
+
+static struct param_table {
+ const char *name;
+ void (*fn)(int *ints, int param);
+ int *var;
+ int def_param;
+} config_params[]={
+ { "allowed_drive_mask", 0, &allowed_drive_mask, 0xff },
+ { "all_drives", 0, &allowed_drive_mask, 0xff },
+ { "asus_pci", 0, &allowed_drive_mask, 0x33 },
+
+ { "daring", daring, 0, 1},
+
+ { "two_fdc", 0, &FDC2, 0x370 },
+ { "one_fdc", 0, &FDC2, 0 },
+
+ { "thinkpad", floppy_invert_dcl, 0, 1 },
+
+ { "nodma", 0, &use_virtual_dma, 1 },
+ { "omnibook", 0, &use_virtual_dma, 1 },
+ { "dma", 0, &use_virtual_dma, 0 },
+
+ { "fifo_depth", 0, &fifo_depth, 0xa },
+ { "nofifo", 0, &no_fifo, 0x20 },
+ { "usefifo", 0, &no_fifo, 0 },
+
+ { "cmos", set_cmos, 0, 0 },
+
+ { "unexpected_interrupts", 0, &print_unex, 1 },
+ { "no_unexpected_interrupts", 0, &print_unex, 0 },
+ { "L40SX", 0, &print_unex, 0 } };
+
+#define FLOPPY_SETUP
+void floppy_setup(char *str, int *ints)
+{
+ int i;
+ int param;
+ if (str)
+ for (i=0; i< ARRAY_SIZE(config_params); i++){
+ if (strcmp(str,config_params[i].name) == 0){
+ if (ints[0])
+ param = ints[1];
+ else
+ param = config_params[i].def_param;
+ if(config_params[i].fn)
+ config_params[i].fn(ints,param);
+ if(config_params[i].var) {
+ DPRINT("%s=%d\n", str, param);
+ *config_params[i].var = param;
+ }
+ return;
+ }
+ }
+ if (str) {
+ DPRINT("unknown floppy option [%s]\n", str);
+
+ DPRINT("allowed options are:");
+ for (i=0; i< ARRAY_SIZE(config_params); i++)
+ printk(" %s",config_params[i].name);
+ printk("\n");
+ } else
+ DPRINT("botched floppy option\n");
+ DPRINT("Read linux/drivers/block/README.fd\n");
+}
+
+int floppy_init(void)
+{
+ int i,unit,drive;
+ int have_no_fdc= -EIO;
+
+ raw_cmd = 0;
+
+ if (register_blkdev(MAJOR_NR,"fd",&floppy_fops)) {
+ printk("Unable to get major %d for floppy\n",MAJOR_NR);
+ return -EBUSY;
+ }
+
+ for (i=0; i<256; i++)
+ if (ITYPE(i))
+ floppy_sizes[i] = floppy_type[ITYPE(i)].size >> 1;
+ else
+ floppy_sizes[i] = MAX_DISK_SIZE;
+
+ blk_size[MAJOR_NR] = floppy_sizes;
+ blksize_size[MAJOR_NR] = floppy_blocksizes;
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
+ config_types();
+
+ for (i = 0; i < N_FDC; i++) {
+ fdc = i;
+ CLEARSTRUCT(FDCS);
+ FDCS->dtr = -1;
+ FDCS->dor = 0x4;
+#ifdef __sparc__
+ /*sparcs don't have a DOR reset which we can fall back on to*/
+ FDCS->version = FDC_82072A;
+#endif
+ }
+
+ fdc_state[0].address = FDC1;
+#if N_FDC > 1
+ fdc_state[1].address = FDC2;
+#endif
+
+ if (floppy_grab_irq_and_dma()){
+ del_timer(&fd_timeout);
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ unregister_blkdev(MAJOR_NR,"fd");
+ return -EBUSY;
+ }
+
+ /* initialise drive state */
+ for (drive = 0; drive < N_DRIVE; drive++) {
+ CLEARSTRUCT(UDRS);
+ CLEARSTRUCT(UDRWE);
+ UDRS->flags = FD_VERIFY | FD_DISK_NEWCHANGE | FD_DISK_CHANGED;
+ UDRS->fd_device = -1;
+ floppy_track_buffer = NULL;
+ max_buffer_sectors = 0;
+ }
+
+ for (i = 0; i < N_FDC; i++) {
+ fdc = i;
+ FDCS->driver_version = FD_DRIVER_VERSION;
+ for (unit=0; unit<4; unit++)
+ FDCS->track[unit] = 0;
+ if (FDCS->address == -1)
+ continue;
+ FDCS->rawcmd = 2;
+ if (user_reset_fdc(-1,FD_RESET_ALWAYS,0)){
+ FDCS->address = -1;
+ FDCS->version = FDC_NONE;
+ continue;
+ }
+ /* Try to determine the floppy controller type */
+ FDCS->version = get_fdc_version();
+ if (FDCS->version == FDC_NONE){
+ FDCS->address = -1;
+ continue;
+ }
+
+ request_region(FDCS->address, 6, "floppy");
+ request_region(FDCS->address+7, 1, "floppy DIR");
+ /* address + 6 is reserved, and may be taken by IDE.
+ * Unfortunately, Adaptec doesn't know this :-(, */
+
+ have_no_fdc = 0;
+ /* Not all FDCs seem to be able to handle the version command
+ * properly, so force a reset for the standard FDC clones,
+ * to avoid interrupt garbage.
+ */
+ user_reset_fdc(-1,FD_RESET_ALWAYS,0);
+ }
+ fdc=0;
+ del_timer(&fd_timeout);
+ current_drive = 0;
+ floppy_release_irq_and_dma();
+ initialising=0;
+ if (have_no_fdc) {
+ DPRINT("no floppy controllers found\n");
+ request_tq.routine = (void *)(void *) empty;
+ /*
+ * When we return we may be unloaded. This little
+ * trick forces the immediate_bh handler to have run
+ * before we unload it, lest we cause bad things.
+ */
+ mark_bh(IMMEDIATE_BH);
+ schedule();
+ if (usage_count)
+ floppy_release_irq_and_dma();
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ unregister_blkdev(MAJOR_NR,"fd");
+ }
+ return have_no_fdc;
+}
+
+static int floppy_grab_irq_and_dma(void)
+{
+ int i;
+ unsigned long flags;
+
+ INT_OFF;
+ if (usage_count++){
+ INT_ON;
+ return 0;
+ }
+ INT_ON;
+ MOD_INC_USE_COUNT;
+ for (i=0; i< N_FDC; i++){
+ if (fdc_state[i].address != -1){
+ fdc = i;
+ reset_fdc_info(1);
+ fd_outb(FDCS->dor, FD_DOR);
+ }
+ }
+ fdc = 0;
+ set_dor(0, ~0, 8); /* avoid immediate interrupt */
+
+ if (fd_request_irq()) {
+ DPRINT("Unable to grab IRQ%d for the floppy driver\n",
+ FLOPPY_IRQ);
+ MOD_DEC_USE_COUNT;
+ usage_count--;
+ return -1;
+ }
+ if (fd_request_dma()) {
+ DPRINT("Unable to grab DMA%d for the floppy driver\n",
+ FLOPPY_DMA);
+ fd_free_irq();
+ MOD_DEC_USE_COUNT;
+ usage_count--;
+ return -1;
+ }
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (FDCS->address != -1)
+ fd_outb(FDCS->dor, FD_DOR);
+ fdc = 0;
+ fd_enable_irq();
+ irqdma_allocated=1;
+ return 0;
+}
+
+static void floppy_release_irq_and_dma(void)
+{
+#ifdef FLOPPY_SANITY_CHECK
+ int drive;
+#endif
+ long tmpsize;
+ unsigned long tmpaddr;
+ unsigned long flags;
+
+ INT_OFF;
+ if (--usage_count){
+ INT_ON;
+ return;
+ }
+ INT_ON;
+ if(irqdma_allocated)
+ {
+ fd_disable_dma();
+ fd_free_dma();
+ fd_disable_irq();
+ fd_free_irq();
+ irqdma_allocated=0;
+ }
+
+ set_dor(0, ~0, 8);
+#if N_FDC > 1
+ set_dor(1, ~8, 0);
+#endif
+ floppy_enable_hlt();
+
+ if (floppy_track_buffer && max_buffer_sectors) {
+ tmpsize = max_buffer_sectors*1024;
+ tmpaddr = (unsigned long)floppy_track_buffer;
+ floppy_track_buffer = 0;
+ max_buffer_sectors = 0;
+ buffer_min = buffer_max = -1;
+ fd_dma_mem_free(tmpaddr, tmpsize);
+ }
+
+#ifdef FLOPPY_SANITY_CHECK
+#ifndef __sparc__
+ for (drive=0; drive < N_FDC * 4; drive++)
+ if (motor_off_timer[drive].next)
+ printk("motor off timer %d still active\n", drive);
+#endif
+
+ if (fd_timeout.next)
+ printk("floppy timer still active:%s\n", timeout_message);
+ if (fd_timer.next)
+ printk("auxiliary floppy timer still active\n");
+ if (floppy_tq.sync)
+ printk("task queue still active\n");
+#endif
+ MOD_DEC_USE_COUNT;
+}
+
+
+#ifdef MODULE
+
+char *floppy=NULL;
+
+static void parse_floppy_cfg_string(char *cfg)
+{
+ char *ptr;
+ int ints[11];
+
+ while(*cfg) {
+ for(ptr = cfg;*cfg && *cfg != ' ' && *cfg != '\t'; cfg++);
+ if(*cfg) {
+ *cfg = '\0';
+ cfg++;
+ }
+ if(*ptr)
+ floppy_setup(get_options(ptr,ints),ints);
+ }
+}
+
+static void mod_setup(char *pattern, void (*setup)(char *, int *))
+{
+ unsigned long i;
+ char c;
+ int j;
+ int match;
+ char buffer[100];
+ int ints[11];
+ int length = strlen(pattern)+1;
+
+ match=0;
+ j=1;
+
+ for (i=current->mm->env_start; i< current->mm->env_end; i ++){
+ c= get_fs_byte(i);
+ if (match){
+ if (j==99)
+ c='\0';
+ buffer[j] = c;
+ if (!c || c == ' ' || c == '\t'){
+ if (j){
+ buffer[j] = '\0';
+ setup(get_options(buffer,ints),ints);
+ }
+ j=0;
+ } else
+ j++;
+ if (!c)
+ break;
+ continue;
+ }
+ if ((!j && !c) || (j && c == pattern[j-1]))
+ j++;
+ else
+ j=0;
+ if (j==length){
+ match=1;
+ j=0;
+ }
+ }
+}
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+int init_module(void)
+{
+ printk(KERN_INFO "inserting floppy driver for %s\n", kernel_version);
+
+ if(floppy)
+ parse_floppy_cfg_string(floppy);
+ else
+ mod_setup("floppy=", floppy_setup);
+
+ return floppy_init();
+}
+
+void cleanup_module(void)
+{
+ int fdc, dummy;
+
+ for (fdc=0; fdc<2; fdc++)
+ if (FDCS->address != -1){
+ release_region(FDCS->address, 6);
+ release_region(FDCS->address+7, 1);
+ }
+
+ unregister_blkdev(MAJOR_NR, "fd");
+
+ blk_dev[MAJOR_NR].request_fn = 0;
+ /* eject disk, if any */
+ dummy = fd_eject(0);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#else
+/* eject the boot floppy (if we need the drive for a different root floppy) */
+/* This should only be called at boot time when we're sure that there's no
+ * resource contention. */
+void floppy_eject(void)
+{
+ if(floppy_grab_irq_and_dma()==0)
+ {
+ lock_fdc(MAXTIMEOUT,0);
+ fd_eject(0);
+ process_fd_request();
+ floppy_release_irq_and_dma();
+ }
+}
+#endif
diff --git a/linux/dev/drivers/block/genhd.c b/linux/dev/drivers/block/genhd.c
new file mode 100644
index 0000000..903135c
--- /dev/null
+++ b/linux/dev/drivers/block/genhd.c
@@ -0,0 +1,1080 @@
+/*
+ * Code extracted from
+ * linux/kernel/hd.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ *
+ * Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * in the early extended-partition checks and added DM partitions
+ *
+ * Support for DiskManager v6.0x added by Mark Lord,
+ * with information provided by OnTrack. This now works for linux fdisk
+ * and LILO, as well as loadlin and bootln. Note that disks other than
+ * /dev/hda *must* have a "DOS" type 0x51 partition in the first slot (hda1).
+ *
+ * More flexible handling of extended partitions - aeb, 950831
+ *
+ * Check partition table on IDE disks for common CHS translations
+ */
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
+#include <linux/hdreg.h>
+#include <alloca.h>
+#ifdef CONFIG_GPT_DISKLABEL
+#include <linux/blkdev.h>
+#include <kern/kalloc.h>
+#include <stddef.h>
+#endif
+
+#include <asm/system.h>
+
+/*
+ * Many architectures don't like unaligned accesses, which is
+ * frequently the case with the nr_sects and start_sect partition
+ * table entries.
+ */
+#include <asm/unaligned.h>
+
+#ifdef MACH
+#include <machine/spl.h>
+#include <linux/dev/glue/glue.h>
+#endif
+
+#define SYS_IND(p) get_unaligned(&p->sys_ind)
+#define NR_SECTS(p) get_unaligned(&p->nr_sects)
+#define START_SECT(p) get_unaligned(&p->start_sect)
+
+
+struct gendisk *gendisk_head = NULL;
+
+static int current_minor = 0;
+extern int *blk_size[];
+extern void rd_load(void);
+extern void initrd_load(void);
+
+extern int chr_dev_init(void);
+extern int blk_dev_init(void);
+extern int scsi_dev_init(void);
+extern int net_dev_init(void);
+
+/*
+ * disk_name() is used by genhd.c and md.c.
+ * It formats the devicename of the indicated disk
+ * into the supplied buffer, and returns a pointer
+ * to that same buffer (for convenience).
+ */
+char *disk_name (struct gendisk *hd, int minor, char *buf)
+{
+ unsigned int part;
+ const char *maj = hd->major_name;
+#ifdef MACH
+ char unit = (minor >> hd->minor_shift) + '0';
+#else
+ char unit = (minor >> hd->minor_shift) + 'a';
+#endif
+
+#ifdef CONFIG_BLK_DEV_IDE
+ /*
+ * IDE devices use multiple major numbers, but the drives
+ * are named as: {hda,hdb}, {hdc,hdd}, {hde,hdf}, {hdg,hdh}..
+ * This requires special handling here.
+ */
+ switch (hd->major) {
+ case IDE3_MAJOR:
+ unit += 2;
+ case IDE2_MAJOR:
+ unit += 2;
+ case IDE1_MAJOR:
+ unit += 2;
+ case IDE0_MAJOR:
+ maj = "hd";
+ }
+#endif
+ part = minor & ((1 << hd->minor_shift) - 1);
+ if (part)
+#ifdef MACH
+ sprintf(buf, "%s%cs%d", maj, unit, part);
+#else
+ sprintf(buf, "%s%c%d", maj, unit, part);
+#endif
+ else
+ sprintf(buf, "%s%c", maj, unit);
+ return buf;
+}
+
+static void add_partition (struct gendisk *hd, int minor, int start, int size)
+{
+ char buf[8];
+ hd->part[minor].start_sect = start;
+ hd->part[minor].nr_sects = size;
+ printk(" %s", disk_name(hd, minor, buf));
+}
+
+#if defined (MACH) && defined (CONFIG_BSD_DISKLABEL)
+static int mach_minor;
+static void
+add_bsd_partition (struct gendisk *hd, int minor, int slice,
+ int start, int size)
+{
+ char buf[16];
+ hd->part[minor].start_sect = start;
+ hd->part[minor].nr_sects = size;
+ printk (" %s%c", disk_name (hd, mach_minor, buf), slice);
+}
+#endif
+
+static inline int is_extended_partition(struct partition *p)
+{
+ return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
+ SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
+ SYS_IND(p) == LINUX_EXTENDED_PARTITION);
+}
+
+#ifdef CONFIG_MSDOS_PARTITION
+/*
+ * Create devices for each logical partition in an extended partition.
+ * The logical partitions form a linked list, with each entry being
+ * a partition table with two entries. The first entry
+ * is the real data partition (with a start relative to the partition
+ * table start). The second is a pointer to the next logical partition
+ * (with a start relative to the entire extended partition).
+ * We do not create a Linux partition for the partition tables, but
+ * only for the actual data partitions.
+ */
+
+static void extended_partition(struct gendisk *hd, kdev_t dev)
+{
+ struct buffer_head *bh;
+ struct partition *p;
+ unsigned long first_sector, first_size, this_sector, this_size;
+ int mask = (1 << hd->minor_shift) - 1;
+ int i;
+
+ first_sector = hd->part[MINOR(dev)].start_sect;
+ first_size = hd->part[MINOR(dev)].nr_sects;
+ this_sector = first_sector;
+
+ while (1) {
+ if ((current_minor & mask) == 0)
+ return;
+ if (!(bh = bread(dev,0,1024)))
+ return;
+ /*
+ * This block is from a device that we're about to stomp on.
+ * So make sure nobody thinks this block is usable.
+ */
+ bh->b_state = 0;
+
+ if (*(unsigned short *) (bh->b_data+510) != 0xAA55)
+ goto done;
+
+ p = (struct partition *) (0x1BE + bh->b_data);
+
+ this_size = hd->part[MINOR(dev)].nr_sects;
+
+ /*
+ * Usually, the first entry is the real data partition,
+ * the 2nd entry is the next extended partition, or empty,
+ * and the 3rd and 4th entries are unused.
+ * However, DRDOS sometimes has the extended partition as
+ * the first entry (when the data partition is empty),
+ * and OS/2 seems to use all four entries.
+ */
+
+ /*
+ * First process the data partition(s)
+ */
+ for (i=0; i<4; i++, p++) {
+ if (!NR_SECTS(p) || is_extended_partition(p))
+ continue;
+
+ /* Check the 3rd and 4th entries -
+ these sometimes contain random garbage */
+ if (i >= 2
+ && START_SECT(p) + NR_SECTS(p) > this_size
+ && (this_sector + START_SECT(p) < first_sector ||
+ this_sector + START_SECT(p) + NR_SECTS(p) >
+ first_sector + first_size))
+ continue;
+
+ add_partition(hd, current_minor, this_sector+START_SECT(p), NR_SECTS(p));
+ current_minor++;
+ if ((current_minor & mask) == 0)
+ goto done;
+ }
+ /*
+ * Next, process the (first) extended partition, if present.
+ * (So far, there seems to be no reason to make
+ * extended_partition() recursive and allow a tree
+ * of extended partitions.)
+ * It should be a link to the next logical partition.
+ * Create a minor for this just long enough to get the next
+ * partition table. The minor will be reused for the next
+ * data partition.
+ */
+ p -= 4;
+ for (i=0; i<4; i++, p++)
+ if(NR_SECTS(p) && is_extended_partition(p))
+ break;
+ if (i == 4)
+ goto done; /* nothing left to do */
+
+ hd->part[current_minor].nr_sects = NR_SECTS(p);
+ hd->part[current_minor].start_sect = first_sector + START_SECT(p);
+ this_sector = first_sector + START_SECT(p);
+ dev = MKDEV(hd->major, current_minor);
+ brelse(bh);
+ }
+done:
+ brelse(bh);
+}
+
+#ifdef CONFIG_BSD_DISKLABEL
+/*
+ * Create devices for BSD partitions listed in a disklabel, under a
+ * dos-like partition. See extended_partition() for more information.
+ */
+static void bsd_disklabel_partition(struct gendisk *hd, kdev_t dev)
+{
+ struct buffer_head *bh;
+ struct bsd_disklabel *l;
+ struct bsd_partition *p;
+ int mask = (1 << hd->minor_shift) - 1;
+
+ if (!(bh = bread(dev,0,1024)))
+ return;
+ bh->b_state = 0;
+ l = (struct bsd_disklabel *) (bh->b_data+512);
+ if (l->d_magic != BSD_DISKMAGIC) {
+ brelse(bh);
+ return;
+ }
+
+ p = &l->d_partitions[0];
+ while (p - &l->d_partitions[0] <= BSD_MAXPARTITIONS) {
+ if ((current_minor & mask) >= (4 + hd->max_p))
+ break;
+
+ if (p->p_fstype != BSD_FS_UNUSED) {
+#ifdef MACH
+ add_bsd_partition (hd, current_minor,
+ p - &l->d_partitions[0] + 'a',
+ p->p_offset, p->p_size);
+#else
+ add_partition(hd, current_minor, p->p_offset, p->p_size);
+#endif
+ current_minor++;
+ }
+ p++;
+ }
+ brelse(bh);
+
+}
+#endif
+
+#ifdef CONFIG_GPT_DISKLABEL
+/*
+ * Compute a CRC32 but treat some range as if it were zeros.
+ *
+ * Straight copy of ether_crc_le() from linux/pcmcia-cs/include/linux/crc32.h, except for the first if/else
+ */
+static inline unsigned ether_crc_le_hole(int length, unsigned char *data, unsigned int skip_offset, unsigned int skip_length)
+{
+ static unsigned const ethernet_polynomial_le = 0xedb88320U;
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ if(skip_offset == 0 && skip_length-- != 0)
+ current_octet = 0;
+ else
+ --skip_offset;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+/*
+ * Read in a full GPT array into a contiguous chunk, allocates *PP_S bytes into *PP.
+ *
+ * An attempt to do as few round-trips as possible is made by reading a PAGE_SIZE at a time,
+ * since that's the bread() maximum.
+ */
+static int gpt_read_part_table(void **pp, vm_size_t *pp_s, kdev_t dev, int bsize, __u64 first_sector, struct gpt_disklabel_header *h)
+{
+ __u64 lba = first_sector + h->h_part_table_lba;
+ __u32 bytes_left = *pp_s = h->h_part_table_len * h->h_part_table_entry_size;
+ struct buffer_head *bh;
+ void *cur = *pp = (void *)kalloc(*pp_s);
+ if (!cur) {
+ printk(" unable to allocate GPT partition table buffer");
+ return -2;
+ }
+
+ while (bytes_left) {
+ unsigned bytes_to_read = MIN(bytes_left, PAGE_SIZE);
+ if(!(bh = bread(dev, lba, bytes_to_read))) {
+ printk(" unable to read partition table array");
+ return -3;
+ }
+
+ memcpy(cur, bh->b_data, bytes_to_read);
+ cur += bytes_to_read;
+ bytes_left -= bytes_to_read;
+ lba += PAGE_SIZE / bsize;
+
+ brelse(bh);
+ }
+
+ return 0;
+}
+
+/*
+ * Sequence from section 5.3.2 of spec 2.8A:
+ * signature, CRC, lba_current matches, partition table CRC, primary: check backup for validity
+ */
+static int gpt_verify_header(void **pp, vm_size_t *pp_s, kdev_t dev, int bsize, __u64 first_sector, __u64 lba, struct gpt_disklabel_header *h)
+{
+ int res;
+ __u32 crc;
+
+ if (memcmp(h->h_signature, GPT_SIGNATURE, strlen(GPT_SIGNATURE)) != 0) {
+ printk(" bad GPT signature \"%c%c%c%c%c%c%c%c\";",
+ h->h_signature[0], h->h_signature[1], h->h_signature[2], h->h_signature[3],
+ h->h_signature[4], h->h_signature[5], h->h_signature[6], h->h_signature[7]);
+ return 1;
+ }
+
+ crc = ether_crc_le_hole(h->h_header_size, (void *)h,
+ offsetof(struct gpt_disklabel_header, h_header_crc), sizeof(h->h_header_crc)) ^ ~0;
+ if (crc != h->h_header_crc) {
+ printk(" bad header CRC: %x != %x;", crc, h->h_header_crc);
+ return 2;
+ }
+
+ if (h->h_lba_current != lba) {
+ printk(" current LBA mismatch: %lld != %lld;", h->h_lba_current, lba);
+ return 3;
+ }
+
+ if (*pp) {
+ kfree((vm_offset_t)*pp, *pp_s);
+ *pp = NULL;
+ }
+ if ((res = gpt_read_part_table(pp, pp_s, dev, bsize, first_sector, h)))
+ return res;
+
+ crc = ether_crc_le_hole(*pp_s, *pp, 0, 0) ^ ~0;
+ if (crc != h->h_part_table_crc) {
+ printk(" bad partition table CRC: %x != %x;", crc, h->h_part_table_crc);
+ return 4;
+ }
+
+ for (int i = h->h_header_size; i < bsize; ++i)
+ res |= ((char*)h)[i];
+ if (res) {
+ printk(" rest of GPT block dirty;");
+ return 5;
+ }
+
+ return 0;
+}
+
+static void gpt_print_part_name(struct gpt_disklabel_part *p)
+{
+ for(int n = 0; n < sizeof(p->p_name) / sizeof(*p->p_name) && p->p_name[n]; ++n)
+ if(p->p_name[n] & ~0xFF)
+ printk("?"); /* Can't support all of Unicode, but don't print garbage at least... */
+ else
+ printk("%c", p->p_name[n]);
+}
+
+#ifdef DEBUG
+static void gpt_print_guid(struct gpt_guid *guid)
+{
+ printk("%08X-%04X-%04X-%02X%02X-", guid->g_time_low, guid->g_time_mid, guid->g_time_high_version, guid->g_clock_sec_high, guid->g_clock_sec_low);
+ for (int i = 0; i < sizeof(guid->g_node_id); ++i)
+ printk("%02X", guid->g_node_id[i]);
+}
+
+static void gpt_dump_header(struct gpt_disklabel_header *h)
+{
+ printk(" [h_signature: \"%c%c%c%c%c%c%c%c\"; ",
+ h->h_signature[0], h->h_signature[1], h->h_signature[2], h->h_signature[3],
+ h->h_signature[4], h->h_signature[5], h->h_signature[6], h->h_signature[7]);
+ printk("h_revision: %x; ", h->h_revision);
+ printk("h_header_size: %u; ", h->h_header_size);
+ printk("h_header_crc: %x; ", h->h_header_crc);
+ printk("h_reserved: %u; ", h->h_reserved);
+ printk("h_lba_current: %llu; ", h->h_lba_current);
+ printk("h_lba_backup: %llu; ", h->h_lba_backup);
+ printk("h_lba_usable_first: %llu; ", h->h_lba_usable_first);
+ printk("h_lba_usable_last: %llu; ", h->h_lba_usable_last);
+ printk("h_guid: "); gpt_print_guid(&h->h_guid); printk("; ");
+ printk("h_part_table_lba: %llu; ", h->h_part_table_lba);
+ printk("h_part_table_len: %u; ", h->h_part_table_len);
+ printk("h_part_table_crc: %x]", h->h_part_table_crc);
+}
+
+static void gpt_dump_part(struct gpt_disklabel_part *p, int i)
+{
+ printk(" part#%d:[", i);
+ printk("p_type: "); gpt_print_guid(&p->p_type);
+ printk("; p_guid:"); gpt_print_guid(&p->p_guid);
+ printk("; p_lba_first: %llu", p->p_lba_first);
+ printk("; p_lba_last: %llu", p->p_lba_last);
+ printk("; p_attrs: %llx", p->p_attrs);
+ printk("; p_name: \""); gpt_print_part_name(p); printk("\"]");
+}
+#else
+static void gpt_dump_header(struct gpt_disklabel_header *h) {}
+static void gpt_dump_part(struct gpt_disklabel_part *p, int i) {}
+#endif
+
+static int gpt_partition(struct gendisk *hd, kdev_t dev, __u64 first_sector, int minor)
+{
+ struct buffer_head *bh;
+ struct gpt_disklabel_header *h;
+ void *pp = NULL; vm_size_t pp_s = 0;
+ int res, bsize = 512;
+ /* Note: this must be set by the driver; SCSI does --
+ * only, in practice, it always sets this to 512, see sd_init() in sd.c */
+ if (hardsect_size[MAJOR(dev)] && hardsect_size[MAJOR(dev)][MINOR(dev)])
+ bsize = hardsect_size[MAJOR(dev)][MINOR(dev)];
+ set_blocksize(dev,bsize); /* Must override read block size since GPT has pointers, stolen from amiga_partition(). */
+ if (!(bh = bread(dev, first_sector + 1, bsize))) {
+ printk("unable to read GPT");
+ res = -1;
+ goto done;
+ }
+
+ h = (struct gpt_disklabel_header *)bh->b_data;
+ gpt_dump_header(h);
+
+ res = gpt_verify_header(&pp, &pp_s, dev, bsize, first_sector, 1, h);
+ if (res < 0)
+ goto done;
+ else if (res > 0) {
+ printk(" main GPT dirty, trying backup at %llu;", h->h_lba_backup);
+ __u64 lba = h->h_lba_backup;
+ brelse(bh);
+
+ if (!(bh = bread(dev, first_sector + lba, bsize))) {
+ printk("unable to read backup GPT");
+ res = -4;
+ goto done;
+ }
+
+ h = (struct gpt_disklabel_header *)bh->b_data;
+ gpt_dump_header(h);
+
+ res = gpt_verify_header(&pp, &pp_s, dev, bsize, first_sector, lba, h);
+ if (res < 0)
+ goto done;
+ else if (res > 0) {
+ printk(" backup GPT dirty as well; cowardly refusing to continue");
+ res = -5;
+ goto done;
+ }
+ }
+
+ /* At least one good GPT+array */
+
+ for(int i = 0; i < h->h_part_table_len; ++i, ++minor) {
+ struct gpt_disklabel_part *p =
+ (struct gpt_disklabel_part *) (pp + i * h->h_part_table_entry_size);
+ if(memcmp(&p->p_type, &GPT_GUID_TYPE_UNUSED, sizeof(struct gpt_guid)) == 0)
+ continue;
+ gpt_dump_part(p, i);
+
+ if (minor > hd->max_nr * hd->max_p) {
+ printk(" [ignoring GPT partition %d \"", i); gpt_print_part_name(p); printk("\": too many partitions (max %d)]", hd->max_p);
+ } else {
+ add_partition(hd, minor, first_sector + p->p_lba_first, p->p_lba_last - p->p_lba_first + 1);
+ if(p->p_name[0]) {
+ printk(" ("); gpt_print_part_name(p); printk(")");
+ }
+ }
+ }
+
+done:
+ brelse(bh);
+ set_blocksize(dev,BLOCK_SIZE);
+ kfree((vm_offset_t)pp, pp_s);
+ printk("\n");
+ return !res;
+}
+#endif
+
+static int msdos_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector)
+{
+ int i, minor = current_minor;
+ struct buffer_head *bh;
+ struct partition *p;
+ unsigned char *data;
+ int mask = (1 << hd->minor_shift) - 1;
+#ifdef CONFIG_BLK_DEV_IDE
+ int tested_for_xlate = 0;
+
+read_mbr:
+#endif
+ if (!(bh = bread(dev,0,1024))) {
+ printk(" unable to read partition table\n");
+ return -1;
+ }
+ data = (unsigned char *)bh->b_data;
+ /* In some cases we modify the geometry */
+ /* of the drive (below), so ensure that */
+ /* nobody else tries to re-use this data. */
+ bh->b_state = 0;
+#ifdef CONFIG_BLK_DEV_IDE
+check_table:
+#endif
+ if (*(unsigned short *) (0x1fe + data) != 0xAA55) {
+ brelse(bh);
+ return 0;
+ }
+ p = (struct partition *) (0x1be + data);
+
+#ifdef CONFIG_BLK_DEV_IDE
+ if (!tested_for_xlate++) { /* Do this only once per disk */
+ /*
+ * Look for various forms of IDE disk geometry translation
+ */
+ extern int ide_xlate_1024(kdev_t, int, const char *);
+ unsigned int sig = *(unsigned short *)(data + 2);
+ if (SYS_IND(p) == EZD_PARTITION) {
+ /*
+ * The remainder of the disk must be accessed using
+ * a translated geometry that reduces the number of
+ * apparent cylinders to less than 1024 if possible.
+ *
+ * ide_xlate_1024() will take care of the necessary
+ * adjustments to fool fdisk/LILO and partition check.
+ */
+ if (ide_xlate_1024(dev, -1, " [EZD]")) {
+ data += 512;
+ goto check_table;
+ }
+ } else if (SYS_IND(p) == DM6_PARTITION) {
+
+ /*
+ * Everything on the disk is offset by 63 sectors,
+ * including a "new" MBR with its own partition table,
+ * and the remainder of the disk must be accessed using
+ * a translated geometry that reduces the number of
+ * apparent cylinders to less than 1024 if possible.
+ *
+ * ide_xlate_1024() will take care of the necessary
+ * adjustments to fool fdisk/LILO and partition check.
+ */
+ if (ide_xlate_1024(dev, 1, " [DM6:DDO]")) {
+ brelse(bh);
+ goto read_mbr; /* start over with new MBR */
+ }
+ } else if (sig <= 0x1ae && *(unsigned short *)(data + sig) == 0x55AA
+ && (1 & *(unsigned char *)(data + sig + 2)) )
+ {
+ /*
+ * DM6 signature in MBR, courtesy of OnTrack
+ */
+ (void) ide_xlate_1024 (dev, 0, " [DM6:MBR]");
+ } else if (SYS_IND(p) == DM6_AUX1PARTITION || SYS_IND(p) == DM6_AUX3PARTITION) {
+ /*
+ * DM6 on other than the first (boot) drive
+ */
+ (void) ide_xlate_1024(dev, 0, " [DM6:AUX]");
+ } else {
+ /*
+ * Examine the partition table for common translations.
+ * This is necessary for drives for situations where
+ * the translated geometry is unavailable from the BIOS.
+ */
+ for (i = 0; i < 4 ; i++) {
+ struct partition *q = &p[i];
+ if (NR_SECTS(q)
+ && (q->sector & 63) == 1
+ && (q->end_sector & 63) == 63) {
+ unsigned int heads = q->end_head + 1;
+ if (heads == 32 || heads == 64 || heads == 128 || heads == 255) {
+
+ (void) ide_xlate_1024(dev, heads, " [PTBL]");
+ break;
+ }
+ }
+ }
+ }
+ }
+#endif /* CONFIG_BLK_DEV_IDE */
+
+ current_minor += 4; /* first "extra" minor (for extended partitions) */
+ for (i=1 ; i<=4 ; minor++,i++,p++) {
+ if (!NR_SECTS(p))
+ continue;
+#ifdef CONFIG_GPT_DISKLABEL
+ if (SYS_IND(p) == GPT_PARTITION) {
+ brelse(bh);
+ return gpt_partition(hd, dev, first_sector, minor);
+ } else
+#endif
+ add_partition(hd, minor, first_sector+START_SECT(p), NR_SECTS(p));
+ if (is_extended_partition(p)) {
+ printk(" <");
+ /*
+ * If we are rereading the partition table, we need
+ * to set the size of the partition so that we will
+ * be able to bread the block containing the extended
+ * partition info.
+ */
+ hd->sizes[minor] = hd->part[minor].nr_sects
+ >> (BLOCK_SIZE_BITS - 9);
+ extended_partition(hd, MKDEV(hd->major, minor));
+ printk(" >");
+ /* prevent someone doing mkfs or mkswap on an
+ extended partition, but leave room for LILO */
+ if (hd->part[minor].nr_sects > 2)
+ hd->part[minor].nr_sects = 2;
+ }
+#ifdef CONFIG_BSD_DISKLABEL
+ if (SYS_IND(p) == BSD_PARTITION) {
+ printk(" <");
+#ifdef MACH
+ mach_minor = minor;
+#endif
+ bsd_disklabel_partition(hd, MKDEV(hd->major, minor));
+ printk(" >");
+ }
+#endif
+ }
+ /*
+ * Check for old-style Disk Manager partition table
+ */
+ if (*(unsigned short *) (data+0xfc) == 0x55AA) {
+ p = (struct partition *) (0x1be + data);
+ for (i = 4 ; i < 16 ; i++, current_minor++) {
+ p--;
+ if ((current_minor & mask) == 0)
+ break;
+ if (!(START_SECT(p) && NR_SECTS(p)))
+ continue;
+ add_partition(hd, current_minor, START_SECT(p), NR_SECTS(p));
+ }
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_MSDOS_PARTITION */
+
+#ifdef CONFIG_OSF_PARTITION
+
+static int osf_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector)
+{
+ int i;
+ int mask = (1 << hd->minor_shift) - 1;
+ struct buffer_head *bh;
+ struct disklabel {
+ u32 d_magic;
+ u16 d_type,d_subtype;
+ u8 d_typename[16];
+ u8 d_packname[16];
+ u32 d_secsize;
+ u32 d_nsectors;
+ u32 d_ntracks;
+ u32 d_ncylinders;
+ u32 d_secpercyl;
+ u32 d_secprtunit;
+ u16 d_sparespertrack;
+ u16 d_sparespercyl;
+ u32 d_acylinders;
+ u16 d_rpm, d_interleave, d_trackskew, d_cylskew;
+ u32 d_headswitch, d_trkseek, d_flags;
+ u32 d_drivedata[5];
+ u32 d_spare[5];
+ u32 d_magic2;
+ u16 d_checksum;
+ u16 d_npartitions;
+ u32 d_bbsize, d_sbsize;
+ struct d_partition {
+ u32 p_size;
+ u32 p_offset;
+ u32 p_fsize;
+ u8 p_fstype;
+ u8 p_frag;
+ u16 p_cpg;
+ } d_partitions[8];
+ } * label;
+ struct d_partition * partition;
+#define DISKLABELMAGIC (0x82564557UL)
+
+ if (!(bh = bread(dev,0,1024))) {
+ printk("unable to read partition table\n");
+ return -1;
+ }
+ label = (struct disklabel *) (bh->b_data+64);
+ partition = label->d_partitions;
+ if (label->d_magic != DISKLABELMAGIC) {
+ printk("magic: %08x\n", label->d_magic);
+ brelse(bh);
+ return 0;
+ }
+ if (label->d_magic2 != DISKLABELMAGIC) {
+ printk("magic2: %08x\n", label->d_magic2);
+ brelse(bh);
+ return 0;
+ }
+ for (i = 0 ; i < label->d_npartitions; i++, partition++) {
+ if ((current_minor & mask) == 0)
+ break;
+ if (partition->p_size)
+ add_partition(hd, current_minor,
+ first_sector+partition->p_offset,
+ partition->p_size);
+ current_minor++;
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_OSF_PARTITION */
+
+#ifdef CONFIG_SUN_PARTITION
+
+static int sun_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector)
+{
+ int i, csum;
+ unsigned short *ush;
+ struct buffer_head *bh;
+ struct sun_disklabel {
+ unsigned char info[128]; /* Informative text string */
+ unsigned char spare[292]; /* Boot information etc. */
+ unsigned short rspeed; /* Disk rotational speed */
+ unsigned short pcylcount; /* Physical cylinder count */
+ unsigned short sparecyl; /* extra sects per cylinder */
+ unsigned char spare2[4]; /* More magic... */
+ unsigned short ilfact; /* Interleave factor */
+ unsigned short ncyl; /* Data cylinder count */
+ unsigned short nacyl; /* Alt. cylinder count */
+ unsigned short ntrks; /* Tracks per cylinder */
+ unsigned short nsect; /* Sectors per track */
+ unsigned char spare3[4]; /* Even more magic... */
+ struct sun_partition {
+ __u32 start_cylinder;
+ __u32 num_sectors;
+ } partitions[8];
+ unsigned short magic; /* Magic number */
+ unsigned short csum; /* Label xor'd checksum */
+ } * label;
+ struct sun_partition *p;
+ int other_endian;
+ unsigned long spc;
+#define SUN_LABEL_MAGIC 0xDABE
+#define SUN_LABEL_MAGIC_SWAPPED 0xBEDA
+/* No need to optimize these macros since they are called only when reading
+ * the partition table. This occurs only at each disk change. */
+#define SWAP16(x) (other_endian ? (((__u16)(x) & 0xFF) << 8) \
+ | (((__u16)(x) & 0xFF00) >> 8) \
+ : (__u16)(x))
+#define SWAP32(x) (other_endian ? (((__u32)(x) & 0xFF) << 24) \
+ | (((__u32)(x) & 0xFF00) << 8) \
+ | (((__u32)(x) & 0xFF0000) >> 8) \
+ | (((__u32)(x) & 0xFF000000) >> 24) \
+ : (__u32)(x))
+
+ if(!(bh = bread(dev, 0, 1024))) {
+ printk("Dev %s: unable to read partition table\n",
+ kdevname(dev));
+ return -1;
+ }
+ label = (struct sun_disklabel *) bh->b_data;
+ p = label->partitions;
+ if (label->magic != SUN_LABEL_MAGIC && label->magic != SUN_LABEL_MAGIC_SWAPPED) {
+ printk("Dev %s Sun disklabel: bad magic %04x\n",
+ kdevname(dev), label->magic);
+ brelse(bh);
+ return 0;
+ }
+ other_endian = (label->magic == SUN_LABEL_MAGIC_SWAPPED);
+ /* Look at the checksum */
+ ush = ((unsigned short *) (label+1)) - 1;
+ for(csum = 0; ush >= ((unsigned short *) label);)
+ csum ^= *ush--;
+ if(csum) {
+ printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
+ kdevname(dev));
+ brelse(bh);
+ return 0;
+ }
+ /* All Sun disks have 8 partition entries */
+ spc = SWAP16(label->ntrks) * SWAP16(label->nsect);
+ for(i=0; i < 8; i++, p++) {
+ unsigned long st_sector;
+
+ /* We register all partitions, even if zero size, so that
+ * the minor numbers end up ok as per SunOS interpretation.
+ */
+ st_sector = first_sector + SWAP32(p->start_cylinder) * spc;
+ add_partition(hd, current_minor, st_sector, SWAP32(p->num_sectors));
+ current_minor++;
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+#undef SWAP16
+#undef SWAP32
+}
+
+#endif /* CONFIG_SUN_PARTITION */
+
+#ifdef CONFIG_AMIGA_PARTITION
+#include <asm/byteorder.h>
+#include <linux/affs_hardblocks.h>
+
+static __inline__ __u32
+checksum_block(__u32 *m, int size)
+{
+ __u32 sum = 0;
+
+ while (size--)
+ sum += htonl(*m++);
+ return sum;
+}
+
+static int
+amiga_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector)
+{
+ struct buffer_head *bh;
+ struct RigidDiskBlock *rdb;
+ struct PartitionBlock *pb;
+ int start_sect;
+ int nr_sects;
+ int blk;
+ int part, res;
+
+ set_blocksize(dev,512);
+ res = 0;
+
+ for (blk = 0; blk < RDB_ALLOCATION_LIMIT; blk++) {
+ if(!(bh = bread(dev,blk,512))) {
+ printk("Dev %d: unable to read RDB block %d\n",dev,blk);
+ goto rdb_done;
+ }
+ if (*(__u32 *)bh->b_data == htonl(IDNAME_RIGIDDISK)) {
+ rdb = (struct RigidDiskBlock *)bh->b_data;
+ if (checksum_block((__u32 *)bh->b_data,htonl(rdb->rdb_SummedLongs) & 0x7F)) {
+ printk("Dev %d: RDB in block %d has bad checksum\n",dev,blk);
+ brelse(bh);
+ continue;
+ }
+ printk(" RDSK");
+ blk = htonl(rdb->rdb_PartitionList);
+ brelse(bh);
+ for (part = 1; blk > 0 && part <= 16; part++) {
+ if (!(bh = bread(dev,blk,512))) {
+ printk("Dev %d: unable to read partition block %d\n",
+ dev,blk);
+ goto rdb_done;
+ }
+ pb = (struct PartitionBlock *)bh->b_data;
+ blk = htonl(pb->pb_Next);
+ if (pb->pb_ID == htonl(IDNAME_PARTITION) && checksum_block(
+ (__u32 *)pb,htonl(pb->pb_SummedLongs) & 0x7F) == 0 ) {
+
+ /* Tell Kernel about it */
+
+ if (!(nr_sects = (htonl(pb->pb_Environment[10]) + 1 -
+ htonl(pb->pb_Environment[9])) *
+ htonl(pb->pb_Environment[3]) *
+ htonl(pb->pb_Environment[5]))) {
+ continue;
+ }
+ start_sect = htonl(pb->pb_Environment[9]) *
+ htonl(pb->pb_Environment[3]) *
+ htonl(pb->pb_Environment[5]);
+ add_partition(hd,current_minor,start_sect,nr_sects);
+ current_minor++;
+ res = 1;
+ }
+ brelse(bh);
+ }
+ printk("\n");
+ break;
+ }
+ }
+
+rdb_done:
+ set_blocksize(dev,BLOCK_SIZE);
+ return res;
+}
+#endif /* CONFIG_AMIGA_PARTITION */
+
+static void check_partition(struct gendisk *hd, kdev_t dev)
+{
+ static int first_time = 1;
+ unsigned long first_sector;
+ char buf[8];
+
+ if (first_time)
+ printk("Partition check (DOS partitions):\n");
+ first_time = 0;
+ first_sector = hd->part[MINOR(dev)].start_sect;
+
+ /*
+ * This is a kludge to allow the partition check to be
+ * skipped for specific drives (e.g. IDE cd-rom drives)
+ */
+ if ((int)first_sector == -1) {
+ hd->part[MINOR(dev)].start_sect = 0;
+ return;
+ }
+
+ printk(" %s:", disk_name(hd, MINOR(dev), buf));
+#ifdef CONFIG_MSDOS_PARTITION
+ if (msdos_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_OSF_PARTITION
+ if (osf_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_SUN_PARTITION
+ if(sun_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_AMIGA_PARTITION
+ if(amiga_partition(hd, dev, first_sector))
+ return;
+#endif
+ printk(" unknown partition table\n");
+}
+
+/* This function is used to re-read partition tables for removable disks.
+ Much of the cleanup from the old partition tables should have already been
+ done */
+
+/* This function will re-read the partition tables for a given device,
+and set things back up again. There are some important caveats,
+however. You must ensure that no one is using the device, and no one
+can start using the device while this function is being executed. */
+
+void resetup_one_dev(struct gendisk *dev, int drive)
+{
+ int i;
+ int first_minor = drive << dev->minor_shift;
+ int end_minor = first_minor + dev->max_p;
+
+ blk_size[dev->major] = NULL;
+ current_minor = 1 + first_minor;
+ check_partition(dev, MKDEV(dev->major, first_minor));
+
+ /*
+ * We need to set the sizes array before we will be able to access
+ * any of the partitions on this device.
+ */
+ if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */
+ for (i = first_minor; i < end_minor; i++)
+ dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+ blk_size[dev->major] = dev->sizes;
+ }
+}
+
+static void setup_dev(struct gendisk *dev)
+{
+ int i, drive;
+ int end_minor = dev->max_nr * dev->max_p;
+
+ blk_size[dev->major] = NULL;
+ for (i = 0 ; i < end_minor; i++) {
+ dev->part[i].start_sect = 0;
+ dev->part[i].nr_sects = 0;
+ }
+ dev->init(dev);
+ for (drive = 0 ; drive < dev->nr_real ; drive++) {
+ int first_minor = drive << dev->minor_shift;
+ current_minor = 1 + first_minor;
+ check_partition(dev, MKDEV(dev->major, first_minor));
+ }
+ if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */
+ for (i = 0; i < end_minor; i++)
+ dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+ blk_size[dev->major] = dev->sizes;
+ }
+}
+
+void device_setup(void)
+{
+ extern void console_map_init(void);
+ struct gendisk *p;
+ int nr=0;
+
+#ifdef CONFIG_BLK_DEV_IDE
+ extern char *kernel_cmdline;
+ char *c, *param, *white;
+
+ for (c = kernel_cmdline; c; )
+ {
+ param = strstr(c, " ide");
+ if (!param)
+ param = strstr(c, " hd");
+ if (!param)
+ break;
+ if (param) {
+ param++;
+ white = strchr(param, ' ');
+ if (!white) {
+ ide_setup(param);
+ c = NULL;
+ } else {
+ char *word = alloca(white - param + 1);
+ strncpy(word, param, white - param);
+ word[white-param] = '\0';
+ ide_setup(word);
+ c = white + 1;
+ }
+ }
+ }
+#endif
+#ifndef MACH
+ chr_dev_init();
+#endif
+ blk_dev_init();
+ sti();
+#ifdef CONFIG_SCSI
+ scsi_dev_init();
+#endif
+#ifdef CONFIG_INET
+ net_dev_init();
+#endif
+#ifndef MACH
+ console_map_init();
+#endif
+
+ for (p = gendisk_head ; p ; p=p->next) {
+ setup_dev(p);
+ nr += p->nr_real;
+ }
+#ifdef CONFIG_BLK_DEV_RAM
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start && mount_initrd) initrd_load();
+ else
+#endif
+ rd_load();
+#endif
+}
diff --git a/linux/dev/drivers/net/Space.c b/linux/dev/drivers/net/Space.c
new file mode 100644
index 0000000..213fa9b
--- /dev/null
+++ b/linux/dev/drivers/net/Space.c
@@ -0,0 +1,582 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Holds initial configuration information for devices.
+ *
+ * NOTE: This file is a nice idea, but its current format does not work
+ * well for drivers that support multiple units, like the SLIP
+ * driver. We should actually have only one pointer to a driver
+ * here, with the driver knowing how many units it supports.
+ * Currently, the SLIP driver abuses the "base_addr" integer
+ * field of the 'device' structure to store the unit number...
+ * -FvK
+ *
+ * Version: @(#)Space.c 1.0.8 07/31/96
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald J. Becker, <becker@super.org>
+ *
+ * FIXME:
+ * Sort the device chain fastest first.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+
+#define NEXT_DEV NULL
+
+
+/* A unified ethernet device probe. This is the easiest way to have every
+ ethernet adaptor have the name "eth[0123...]".
+ */
+
+extern int tulip_probe(struct device *dev);
+extern int hp100_probe(struct device *dev);
+extern int ultra_probe(struct device *dev);
+extern int ultra32_probe(struct device *dev);
+extern int wd_probe(struct device *dev);
+extern int el2_probe(struct device *dev);
+extern int ne_probe(struct device *dev);
+extern int ne2k_pci_probe(struct device *dev);
+extern int hp_probe(struct device *dev);
+extern int hp_plus_probe(struct device *dev);
+extern int znet_probe(struct device *);
+extern int express_probe(struct device *);
+extern int eepro_probe(struct device *);
+extern int el3_probe(struct device *);
+extern int at1500_probe(struct device *);
+extern int at1700_probe(struct device *);
+extern int fmv18x_probe(struct device *);
+extern int eth16i_probe(struct device *);
+extern int depca_probe(struct device *);
+extern int apricot_probe(struct device *);
+extern int ewrk3_probe(struct device *);
+extern int de4x5_probe(struct device *);
+extern int el1_probe(struct device *);
+extern int via_rhine_probe(struct device *);
+extern int natsemi_probe(struct device *);
+extern int ns820_probe(struct device *);
+extern int winbond840_probe(struct device *);
+extern int hamachi_probe(struct device *);
+extern int sundance_probe(struct device *);
+extern int starfire_probe(struct device *);
+extern int myson803_probe(struct device *);
+extern int igige_probe(struct device *);
+#if defined(CONFIG_WAVELAN)
+extern int wavelan_probe(struct device *);
+#endif /* defined(CONFIG_WAVELAN) */
+extern int el16_probe(struct device *);
+extern int elplus_probe(struct device *);
+extern int ac3200_probe(struct device *);
+extern int e2100_probe(struct device *);
+extern int ni52_probe(struct device *);
+extern int ni65_probe(struct device *);
+extern int SK_init(struct device *);
+extern int seeq8005_probe(struct device *);
+extern int tc59x_probe(struct device *);
+extern int dgrs_probe(struct device *);
+extern int smc_init( struct device * );
+extern int sparc_lance_probe(struct device *);
+extern int atarilance_probe(struct device *);
+extern int a2065_probe(struct device *);
+extern int ariadne_probe(struct device *);
+extern int hydra_probe(struct device *);
+extern int yellowfin_probe(struct device *);
+extern int eepro100_probe(struct device *);
+extern int epic100_probe(struct device *);
+extern int rtl8139_probe(struct device *);
+extern int sis900_probe(struct device *);
+extern int tlan_probe(struct device *);
+extern int isa515_probe(struct device *);
+extern int pcnet32_probe(struct device *);
+extern int lance_probe(struct device *);
+/* Detachable devices ("pocket adaptors") */
+extern int atp_init(struct device *);
+extern int de600_probe(struct device *);
+extern int de620_probe(struct device *);
+extern int tc515_probe(struct device *);
+
+static int
+ethif_probe(struct device *dev)
+{
+ u_long base_addr = dev->base_addr;
+
+ if ((base_addr == 0xffe0) || (base_addr == 1))
+ return 1; /* ENXIO */
+
+ if (1
+ /* All PCI probes are safe, and thus should be first. */
+#ifdef CONFIG_DE4X5 /* DEC DE425, DE434, DE435 adapters */
+ && de4x5_probe(dev)
+#endif
+#ifdef CONFIG_DGRS
+ && dgrs_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS_PRO100B /* Intel EtherExpress Pro100B */
+ && eepro100_probe(dev)
+#endif
+#ifdef CONFIG_EPIC
+ && epic100_probe(dev)
+#endif
+#if defined(CONFIG_HP100)
+ && hp100_probe(dev)
+#endif
+#if defined(CONFIG_NE2K_PCI)
+ && ne2k_pci_probe(dev)
+#endif
+#ifdef CONFIG_PCNET32
+ && pcnet32_probe(dev)
+#endif
+#ifdef CONFIG_RTL8139
+ && rtl8139_probe(dev)
+#endif
+#ifdef CONFIG_SIS900
+ && sis900_probe(dev)
+#endif
+#ifdef CONFIG_VIA_RHINE
+ && via_rhine_probe(dev)
+#endif
+#ifdef CONFIG_NATSEMI
+ && natsemi_probe(dev)
+#endif
+#ifdef CONFIG_NS820
+ && ns820_probe(dev)
+#endif
+#ifdef CONFIG_WINBOND840
+ && winbond840_probe(dev)
+#endif
+#ifdef CONFIG_HAMACHI
+ && hamachi_probe(dev)
+#endif
+#ifdef CONFIG_SUNDANCE
+ && sundance_probe(dev)
+#endif
+#ifdef CONFIG_STARFIRE
+ && starfire_probe(dev)
+#endif
+#ifdef CONFIG_MYSON803
+ && myson803_probe(dev)
+#endif
+#ifdef CONFIG_INTEL_GIGE
+ && igige_probe(dev)
+#endif
+#if defined(CONFIG_DEC_ELCP)
+ && tulip_probe(dev)
+#endif
+#ifdef CONFIG_YELLOWFIN
+ && yellowfin_probe(dev)
+#endif
+ /* Next mostly-safe EISA-only drivers. */
+#ifdef CONFIG_AC3200 /* Ansel Communications EISA 3200. */
+ && ac3200_probe(dev)
+#endif
+#if defined(CONFIG_ULTRA32)
+ && ultra32_probe(dev)
+#endif
+ /* Third, sensitive ISA boards. */
+#ifdef CONFIG_AT1700
+ && at1700_probe(dev)
+#endif
+#if defined(CONFIG_ULTRA)
+ && ultra_probe(dev)
+#endif
+#if defined(CONFIG_SMC9194)
+ && smc_init(dev)
+#endif
+#if defined(CONFIG_WD80x3)
+ && wd_probe(dev)
+#endif
+#if defined(CONFIG_EL2) /* 3c503 */
+ && el2_probe(dev)
+#endif
+#if defined(CONFIG_HPLAN)
+ && hp_probe(dev)
+#endif
+#if defined(CONFIG_HPLAN_PLUS)
+ && hp_plus_probe(dev)
+#endif
+#if defined(CONFIG_SEEQ8005)
+ && seeq8005_probe(dev)
+#endif
+#ifdef CONFIG_E2100 /* Cabletron E21xx series. */
+ && e2100_probe(dev)
+#endif
+#if defined(CONFIG_NE2000)
+ && ne_probe(dev)
+#endif
+#ifdef CONFIG_AT1500
+ && at1500_probe(dev)
+#endif
+#ifdef CONFIG_FMV18X /* Fujitsu FMV-181/182 */
+ && fmv18x_probe(dev)
+#endif
+#ifdef CONFIG_ETH16I
+ && eth16i_probe(dev) /* ICL EtherTeam 16i/32 */
+#endif
+#ifdef CONFIG_EL3 /* 3c509 */
+ && el3_probe(dev)
+#endif
+#if defined(CONFIG_VORTEX)
+ && tc59x_probe(dev)
+#endif
+#ifdef CONFIG_3C515 /* 3c515 */
+ && tc515_probe(dev)
+#endif
+#ifdef CONFIG_ZNET /* Zenith Z-Note and some IBM Thinkpads. */
+ && znet_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS /* Intel EtherExpress */
+ && express_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS_PRO /* Intel EtherExpress Pro/10 */
+ && eepro_probe(dev)
+#endif
+#ifdef CONFIG_DEPCA /* DEC DEPCA */
+ && depca_probe(dev)
+#endif
+#ifdef CONFIG_EWRK3 /* DEC EtherWORKS 3 */
+ && ewrk3_probe(dev)
+#endif
+#ifdef CONFIG_APRICOT /* Apricot I82596 */
+ && apricot_probe(dev)
+#endif
+#ifdef CONFIG_EL1 /* 3c501 */
+ && el1_probe(dev)
+#endif
+#if defined(CONFIG_WAVELAN) /* WaveLAN */
+ && wavelan_probe(dev)
+#endif /* defined(CONFIG_WAVELAN) */
+#ifdef CONFIG_EL16 /* 3c507 */
+ && el16_probe(dev)
+#endif
+#ifdef CONFIG_ELPLUS /* 3c505 */
+ && elplus_probe(dev)
+#endif
+#ifdef CONFIG_DE600 /* D-Link DE-600 adapter */
+ && de600_probe(dev)
+#endif
+#ifdef CONFIG_DE620 /* D-Link DE-620 adapter */
+ && de620_probe(dev)
+#endif
+#if defined(CONFIG_SK_G16)
+ && SK_init(dev)
+#endif
+#ifdef CONFIG_NI52
+ && ni52_probe(dev)
+#endif
+#ifdef CONFIG_NI65
+ && ni65_probe(dev)
+#endif
+#ifdef CONFIG_LANCE /* ISA LANCE boards */
+ && lance_probe(dev)
+#endif
+#ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */
+ && atarilance_probe(dev)
+#endif
+#ifdef CONFIG_A2065 /* Commodore/Ameristar A2065 Ethernet Board */
+ && a2065_probe(dev)
+#endif
+#ifdef CONFIG_ARIADNE /* Village Tronic Ariadne Ethernet Board */
+ && ariadne_probe(dev)
+#endif
+#ifdef CONFIG_HYDRA /* Hydra Systems Amiganet Ethernet board */
+ && hydra_probe(dev)
+#endif
+#ifdef CONFIG_SUNLANCE
+ && sparc_lance_probe(dev)
+#endif
+#ifdef CONFIG_TLAN
+ && tlan_probe(dev)
+#endif
+#ifdef CONFIG_LANCE
+ && lance_probe(dev)
+#endif
+ && 1 ) {
+ return 1; /* -ENODEV or -EAGAIN would be more accurate. */
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SDLA
+ extern int sdla_init(struct device *);
+ static struct device sdla0_dev = { "sdla0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, sdla_init, };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&sdla0_dev)
+#endif
+
+#ifdef CONFIG_NETROM
+ extern int nr_init(struct device *);
+
+ static struct device nr3_dev = { "nr3", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, nr_init, };
+ static struct device nr2_dev = { "nr2", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr3_dev, nr_init, };
+ static struct device nr1_dev = { "nr1", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr2_dev, nr_init, };
+ static struct device nr0_dev = { "nr0", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr1_dev, nr_init, };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&nr0_dev)
+#endif
+
+/* Run-time ATtachable (Pocket) devices have a different (not "eth#") name. */
+#ifdef CONFIG_ATP /* AT-LAN-TEC (RealTek) pocket adaptor. */
+static struct device atp_dev = {
+ "atp0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, atp_init, /* ... */ };
+# undef NEXT_DEV
+# define NEXT_DEV (&atp_dev)
+#endif
+
+#ifdef CONFIG_ARCNET
+ extern int arcnet_probe(struct device *dev);
+ static struct device arcnet_dev = {
+ "arc0", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, arcnet_probe, };
+# undef NEXT_DEV
+# define NEXT_DEV (&arcnet_dev)
+#endif
+
+/* In Mach, by default allow at least 2 interfaces. */
+#ifdef MACH
+#ifndef ETH1_ADDR
+# define ETH1_ADDR 0
+#endif
+#ifndef ETH1_IRQ
+# define ETH1_IRQ 0
+#endif
+#endif
+
+/* The first device defaults to I/O base '0', which means autoprobe. */
+#ifndef ETH0_ADDR
+# define ETH0_ADDR 0
+#endif
+#ifndef ETH0_IRQ
+# define ETH0_IRQ 0
+#endif
+/* "eth0" defaults to autoprobe (== 0), other use a base of 0xffe0 (== -0x20),
+ which means "don't probe". These entries exist to only to provide empty
+ slots which may be enabled at boot-time. */
+
+static struct device eth7_dev = {
+ "eth7", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, NEXT_DEV, ethif_probe };
+static struct device eth6_dev = {
+ "eth6", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth7_dev, ethif_probe };
+static struct device eth5_dev = {
+ "eth5", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth6_dev, ethif_probe };
+static struct device eth4_dev = {
+ "eth4", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth5_dev, ethif_probe };
+static struct device eth3_dev = {
+ "eth3", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth4_dev, ethif_probe };
+static struct device eth2_dev = {
+ "eth2", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth3_dev, ethif_probe };
+
+#ifdef MACH
+static struct device eth1_dev = {
+ "eth1", 0, 0, 0, 0, ETH1_ADDR, ETH1_IRQ, 0, 0, 0, &eth2_dev, ethif_probe };
+#else
+static struct device eth1_dev = {
+ "eth1", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth2_dev, ethif_probe };
+#endif
+
+static struct device eth0_dev = {
+ "eth0", 0, 0, 0, 0, ETH0_ADDR, ETH0_IRQ, 0, 0, 0, &eth1_dev, ethif_probe };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&eth0_dev)
+
+#if defined(PLIP) || defined(CONFIG_PLIP)
+ extern int plip_init(struct device *);
+ static struct device plip2_dev = {
+ "plip2", 0, 0, 0, 0, 0x278, 2, 0, 0, 0, NEXT_DEV, plip_init, };
+ static struct device plip1_dev = {
+ "plip1", 0, 0, 0, 0, 0x378, 7, 0, 0, 0, &plip2_dev, plip_init, };
+ static struct device plip0_dev = {
+ "plip0", 0, 0, 0, 0, 0x3BC, 5, 0, 0, 0, &plip1_dev, plip_init, };
+# undef NEXT_DEV
+# define NEXT_DEV (&plip0_dev)
+#endif /* PLIP */
+
+#if defined(SLIP) || defined(CONFIG_SLIP)
+ /* To be exact, this node just hooks the initialization
+ routines to the device structures. */
+extern int slip_init_ctrl_dev(struct device *);
+static struct device slip_bootstrap = {
+ "slip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, slip_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&slip_bootstrap)
+#endif /* SLIP */
+
+#if defined(CONFIG_STRIP)
+extern int strip_init_ctrl_dev(struct device *);
+static struct device strip_bootstrap = {
+ "strip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, strip_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&strip_bootstrap)
+#endif /* STRIP */
+
+#if defined(CONFIG_PPP)
+extern int ppp_init(struct device *);
+static struct device ppp_bootstrap = {
+ "ppp_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, ppp_init, };
+#undef NEXT_DEV
+#define NEXT_DEV (&ppp_bootstrap)
+#endif /* PPP */
+
+#ifdef CONFIG_DUMMY
+ extern int dummy_init(struct device *dev);
+ static struct device dummy_dev = {
+ "dummy", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, dummy_init, };
+# undef NEXT_DEV
+# define NEXT_DEV (&dummy_dev)
+#endif
+
+#ifdef CONFIG_EQUALIZER
+extern int eql_init(struct device *dev);
+struct device eql_dev = {
+ "eql", /* Master device for IP traffic load
+ balancing */
+ 0x0, 0x0, 0x0, 0x0, /* recv end/start; mem end/start */
+ 0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ eql_init /* set up the rest */
+};
+# undef NEXT_DEV
+# define NEXT_DEV (&eql_dev)
+#endif
+
+#ifdef CONFIG_IBMTR
+
+ extern int tok_probe(struct device *dev);
+ static struct device ibmtr_dev1 = {
+ "tr1", /* IBM Token Ring (Non-DMA) Interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0xa24, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tok_probe /* ??? Token_init should set up the rest */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&ibmtr_dev1)
+
+
+ static struct device ibmtr_dev0 = {
+ "tr0", /* IBM Token Ring (Non-DMA) Interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0xa20, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tok_probe /* ??? Token_init should set up the rest */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&ibmtr_dev0)
+
+#endif
+
+#ifdef CONFIG_DEFXX
+ extern int dfx_probe(struct device *dev);
+ static struct device fddi7_dev =
+ {"fddi7", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, dfx_probe};
+ static struct device fddi6_dev =
+ {"fddi6", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi7_dev, dfx_probe};
+ static struct device fddi5_dev =
+ {"fddi5", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi6_dev, dfx_probe};
+ static struct device fddi4_dev =
+ {"fddi4", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi5_dev, dfx_probe};
+ static struct device fddi3_dev =
+ {"fddi3", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi4_dev, dfx_probe};
+ static struct device fddi2_dev =
+ {"fddi2", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi3_dev, dfx_probe};
+ static struct device fddi1_dev =
+ {"fddi1", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi2_dev, dfx_probe};
+ static struct device fddi0_dev =
+ {"fddi0", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi1_dev, dfx_probe};
+
+#undef NEXT_DEV
+#define NEXT_DEV (&fddi0_dev)
+#endif
+
+#ifdef CONFIG_NET_IPIP
+ extern int tunnel_init(struct device *);
+
+ static struct device tunnel_dev1 =
+ {
+ "tunl1", /* IPIP tunnel */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0x0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tunnel_init /* Fill in the details */
+ };
+
+ static struct device tunnel_dev0 =
+ {
+ "tunl0", /* IPIP tunnel */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0x0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ &tunnel_dev1, /* next device */
+ tunnel_init /* Fill in the details */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&tunnel_dev0)
+
+#endif
+
+#ifdef CONFIG_APFDDI
+ extern int apfddi_init(struct device *dev);
+ static struct device fddi_dev = {
+ "fddi", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, apfddi_init };
+# undef NEXT_DEV
+# define NEXT_DEV (&fddi_dev)
+#endif
+
+#ifdef CONFIG_APBIF
+ extern int bif_init(struct device *dev);
+ static struct device bif_dev = {
+ "bif", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, bif_init };
+# undef NEXT_DEV
+# define NEXT_DEV (&bif_dev)
+#endif
+
+#ifdef MACH
+struct device *dev_base = &eth0_dev;
+#else
+extern int loopback_init(struct device *dev);
+struct device loopback_dev = {
+ "lo", /* Software Loopback interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ loopback_init /* loopback_init should set up the rest */
+};
+
+struct device *dev_base = &loopback_dev;
+#endif
diff --git a/linux/dev/drivers/net/auto_irq.c b/linux/dev/drivers/net/auto_irq.c
new file mode 100644
index 0000000..73cfe34
--- /dev/null
+++ b/linux/dev/drivers/net/auto_irq.c
@@ -0,0 +1,123 @@
+/* auto_irq.c: Auto-configure IRQ lines for linux. */
+/*
+ Written 1994 by Donald Becker.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This code is a general-purpose IRQ line detector for devices with
+ jumpered IRQ lines. If you can make the device raise an IRQ (and
+ that IRQ line isn't already being used), these routines will tell
+ you what IRQ line it's using -- perfect for those oh-so-cool boot-time
+ device probes!
+
+ To use this, first call autoirq_setup(timeout). TIMEOUT is how many
+ 'jiffies' (1/100 sec.) to detect other devices that have active IRQ lines,
+ and can usually be zero at boot. 'autoirq_setup()' returns the bit
+ vector of nominally-available IRQ lines (lines may be physically in-use,
+ but not yet registered to a device).
+ Next, set up your device to trigger an interrupt.
+ Finally call autoirq_report(TIMEOUT) to find out which IRQ line was
+ most recently active. The TIMEOUT should usually be zero, but may
+ be set to the number of jiffies to wait for a slow device to raise an IRQ.
+
+ The idea of using the setup timeout to filter out bogus IRQs came from
+ the serial driver.
+ */
+
+
+#ifdef version
+static const char *version =
+"auto_irq.c:v1.11 Donald Becker (becker@cesdis.gsfc.nasa.gov)";
+#endif
+
+#include <sys/types.h>
+#include <mach/mach_types.h>
+#include <mach/vm_param.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/netdevice.h>
+
+void *irq2dev_map[NR_IRQS] = {0, 0, /* ... zeroed */ };
+
+unsigned long irqs_busy = 0x2147; /* The set of fixed IRQs (keyboard, timer, etc) */
+unsigned long irqs_used = 0x0001; /* The set of fixed IRQs sometimes enabled. */
+unsigned long irqs_reserved = 0x0000; /* An advisory "reserved" table. */
+unsigned long irqs_shared = 0x0000; /* IRQ lines "shared" among conforming cards. */
+
+static volatile unsigned long irq_bitmap; /* The irqs we actually found. */
+static unsigned long irq_handled; /* The irq lines we have a handler on. */
+static volatile int irq_number; /* The latest irq number we actually found. */
+
+static void
+autoirq_probe (int irq, void *dev_id, struct pt_regs *regs)
+{
+ irq_number = irq;
+ set_bit (irq, (void *) &irq_bitmap); /* irq_bitmap |= 1 << irq; */
+ /* This code used to disable the irq. However, the interrupt stub
+ * would then re-enable the interrupt with (potentially) disastrous
+ * consequences
+ */
+ free_irq (irq, dev_id);
+ return;
+}
+
+int
+autoirq_setup (int waittime)
+{
+ int i;
+ unsigned long timeout = jiffies + waittime;
+ unsigned long boguscount = (waittime * loops_per_sec) / 100;
+
+ irq_handled = 0;
+ irq_bitmap = 0;
+
+ for (i = 0; i < 16; i++)
+ {
+ if (test_bit (i, &irqs_busy) == 0
+ && request_irq (i, autoirq_probe, SA_INTERRUPT, "irq probe", NULL) == 0)
+ set_bit (i, (void *) &irq_handled); /* irq_handled |= 1 << i; */
+ }
+ /* Update our USED lists. */
+ irqs_used |= ~irq_handled;
+
+ /* Hang out at least <waittime> jiffies waiting for bogus IRQ hits. */
+ while (timeout > jiffies && --boguscount > 0)
+ ;
+
+ irq_handled &= ~irq_bitmap;
+
+ irq_number = 0; /* We are interested in new interrupts from now on */
+
+ return irq_handled;
+}
+
+int
+autoirq_report (int waittime)
+{
+ int i;
+ unsigned long timeout = jiffies + waittime;
+ unsigned long boguscount = (waittime * loops_per_sec) / 100;
+
+ /* Hang out at least <waittime> jiffies waiting for the IRQ. */
+
+ while (timeout > jiffies && --boguscount > 0)
+ if (irq_number)
+ break;
+
+ irq_handled &= ~irq_bitmap; /* This eliminates the already reset handlers */
+
+ /* Retract the irq handlers that we installed. */
+ for (i = 0; i < 16; i++)
+ {
+ if (test_bit (i, (void *) &irq_handled))
+ free_irq (i, NULL);
+ }
+ return irq_number;
+}
diff --git a/linux/dev/drivers/net/net_init.c b/linux/dev/drivers/net/net_init.c
new file mode 100644
index 0000000..46dbb17
--- /dev/null
+++ b/linux/dev/drivers/net/net_init.c
@@ -0,0 +1,446 @@
+/* netdrv_init.c: Initialization for network devices. */
+/*
+ Written 1993,1994,1995 by Donald Becker.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov or
+ C/O Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This file contains the initialization for the "pl14+" style ethernet
+ drivers. It should eventually replace most of drivers/net/Space.c.
+ It's primary advantage is that it's able to allocate low-memory buffers.
+ A secondary advantage is that the dangerous NE*000 netcards can reserve
+ their I/O port region before the SCSI probes start.
+
+ Modifications/additions by Bjorn Ekwall <bj0rn@blox.se>:
+ ethdev_index[MAX_ETH_CARDS]
+ register_netdev() / unregister_netdev()
+
+ Modifications by Wolfgang Walter
+ Use dev_close cleanly so we always shut things down tidily.
+
+ Changed 29/10/95, Alan Cox to pass sockaddr's around for mac addresses.
+
+ 14/06/96 - Paul Gortmaker: Add generic eth_change_mtu() function.
+
+ August 12, 1996 - Lawrence V. Stefani: Added fddi_change_mtu() and
+ fddi_setup() functions.
+ Sept. 10, 1996 - Lawrence V. Stefani: Increased hard_header_len to
+ include 3 pad bytes.
+*/
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/malloc.h>
+#include <linux/if_ether.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/trdevice.h>
+#include <linux/if_arp.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+
+/* The network devices currently exist only in the socket namespace, so these
+ entries are unused. The only ones that make sense are
+ open start the ethercard
+ close stop the ethercard
+ ioctl To get statistics, perhaps set the interface port (AUI, BNC, etc.)
+ One can also imagine getting raw packets using
+ read & write
+ but this is probably better handled by a raw packet socket.
+
+ Given that almost all of these functions are handled in the current
+ socket-based scheme, putting ethercard devices in /dev/ seems pointless.
+
+ [Removed all support for /dev network devices. When someone adds
+ streams then by magic we get them, but otherwise they are un-needed
+ and a space waste]
+*/
+
+/* The list of used and available "eth" slots (for "eth0", "eth1", etc.) */
+#define MAX_ETH_CARDS 16 /* same as the number if irq's in irq2dev[] */
+static struct device *ethdev_index[MAX_ETH_CARDS];
+
+
+/* Fill in the fields of the device structure with ethernet-generic values.
+
+ If no device structure is passed, a new one is constructed, complete with
+ a SIZEOF_PRIVATE private data area.
+
+ If an empty string area is passed as dev->name, or a new structure is made,
+ a new name string is constructed. The passed string area should be 8 bytes
+ long.
+ */
+
+struct device *
+init_etherdev(struct device *dev, int sizeof_priv)
+{
+ int new_device = 0;
+ int i;
+
+ /* Use an existing correctly named device in Space.c:dev_base. */
+ if (dev == NULL) {
+ int alloc_size = sizeof(struct device) + sizeof("eth%d ")
+ + sizeof_priv + 3;
+ struct device *cur_dev;
+ char pname[8]; /* Putative name for the device. */
+
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(pname, "eth%d", i);
+ for (cur_dev = dev_base; cur_dev; cur_dev = cur_dev->next)
+ if (strcmp(pname, cur_dev->name) == 0) {
+ dev = cur_dev;
+ dev->init = NULL;
+ sizeof_priv = (sizeof_priv + 3) & ~3;
+ dev->priv = sizeof_priv
+ ? kmalloc(sizeof_priv, GFP_KERNEL)
+ : NULL;
+ if (dev->priv) memset(dev->priv, 0, sizeof_priv);
+ goto found;
+ }
+ }
+
+ alloc_size &= ~3; /* Round to dword boundary. */
+
+ dev = (struct device *)kmalloc(alloc_size, GFP_KERNEL);
+ memset(dev, 0, alloc_size);
+ if (sizeof_priv)
+ dev->priv = (void *) (dev + 1);
+ dev->name = sizeof_priv + (char *)(dev + 1);
+ new_device = 1;
+ }
+
+ found: /* From the double loop above. */
+
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ ether_setup(dev); /* Hmmm, should this be called here? */
+
+ if (new_device) {
+ /* Append the device to the device queue. */
+ struct device **old_devp = &dev_base;
+ while ((*old_devp)->next)
+ old_devp = & (*old_devp)->next;
+ (*old_devp)->next = dev;
+ dev->next = 0;
+ }
+ return dev;
+}
+
+
+static int eth_mac_addr(struct device *dev, void *p)
+{
+ struct sockaddr *addr=p;
+ if(dev->start)
+ return -EBUSY;
+ memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+ return 0;
+}
+
+static int eth_change_mtu(struct device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#ifdef CONFIG_FDDI
+
+static int fddi_change_mtu(struct device *dev, int new_mtu)
+{
+ if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN))
+ return(-EINVAL);
+ dev->mtu = new_mtu;
+ return(0);
+}
+
+#endif
+
+void ether_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ /* register boot-defined "eth" devices */
+ if (dev->name && (strncmp(dev->name, "eth", 3) == 0)) {
+ i = simple_strtoul(dev->name + 3, NULL, 0);
+ if (ethdev_index[i] == NULL) {
+ ethdev_index[i] = dev;
+ }
+ else if (dev != ethdev_index[i]) {
+ /* Really shouldn't happen! */
+#ifdef MACH
+ panic("ether_setup: Ouch! Someone else took %s\n",
+ dev->name);
+#else
+ printk("ether_setup: Ouch! Someone else took %s\n",
+ dev->name);
+#endif
+ }
+ }
+
+#ifndef MACH
+ dev->change_mtu = eth_change_mtu;
+ dev->hard_header = eth_header;
+ dev->rebuild_header = eth_rebuild_header;
+ dev->set_mac_address = eth_mac_addr;
+ dev->header_cache_bind = eth_header_cache_bind;
+ dev->header_cache_update= eth_header_cache_update;
+#endif
+
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_len = ETH_HLEN;
+ dev->mtu = 1500; /* eth_mtu */
+ dev->addr_len = ETH_ALEN;
+ dev->tx_queue_len = 100; /* Ethernet wants good queues */
+
+ memset(dev->broadcast,0xFF, ETH_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#ifdef CONFIG_TR
+
+void tr_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->hard_header = tr_header;
+ dev->rebuild_header = tr_rebuild_header;
+
+ dev->type = ARPHRD_IEEE802;
+ dev->hard_header_len = TR_HLEN;
+ dev->mtu = 2000; /* bug in fragmenter...*/
+ dev->addr_len = TR_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on tr */
+
+ memset(dev->broadcast,0xFF, TR_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#endif
+
+#ifdef CONFIG_FDDI
+
+void fddi_setup(struct device *dev)
+ {
+ int i;
+
+ /*
+ * Fill in the fields of the device structure with FDDI-generic values.
+ * This should be in a common file instead of per-driver.
+ */
+ for (i=0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->change_mtu = fddi_change_mtu;
+ dev->hard_header = fddi_header;
+ dev->rebuild_header = fddi_rebuild_header;
+
+ dev->type = ARPHRD_FDDI;
+ dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
+ dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
+ dev->addr_len = FDDI_K_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on FDDI */
+
+ memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
+
+ /* New-style flags */
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+ return;
+ }
+
+#endif
+
+int ether_config(struct device *dev, struct ifmap *map)
+{
+ if (map->mem_start != (u_long)(-1))
+ dev->mem_start = map->mem_start;
+ if (map->mem_end != (u_long)(-1))
+ dev->mem_end = map->mem_end;
+ if (map->base_addr != (u_short)(-1))
+ dev->base_addr = map->base_addr;
+ if (map->irq != (u_char)(-1))
+ dev->irq = map->irq;
+ if (map->dma != (u_char)(-1))
+ dev->dma = map->dma;
+ if (map->port != (u_char)(-1))
+ dev->if_port = map->port;
+ return 0;
+}
+
+int register_netdev(struct device *dev)
+{
+ struct device *d = dev_base;
+ unsigned long flags;
+ int i=MAX_ETH_CARDS;
+
+ save_flags(flags);
+ cli();
+
+ if (dev && dev->init) {
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+ printk("loading device '%s'...\n", dev->name);
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ sti(); /* device probes assume interrupts enabled */
+ if (dev->init(dev) != 0) {
+ if (i < MAX_ETH_CARDS) ethdev_index[i] = NULL;
+ restore_flags(flags);
+ return -EIO;
+ }
+ cli();
+
+ /* Add device to end of chain */
+ if (dev_base) {
+ while (d->next)
+ d = d->next;
+ d->next = dev;
+ }
+ else
+ dev_base = dev;
+ dev->next = NULL;
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+void unregister_netdev(struct device *dev)
+{
+ struct device *d = dev_base;
+ unsigned long flags;
+ int i;
+
+ save_flags(flags);
+ cli();
+
+ if (dev == NULL)
+ {
+ printk("was NULL\n");
+ restore_flags(flags);
+ return;
+ }
+ /* else */
+ if (dev->start)
+ printk("ERROR '%s' busy and not MOD_IN_USE.\n", dev->name);
+
+ /*
+ * must jump over main_device+aliases
+ * avoid alias devices unregistration so that only
+ * net_alias module manages them
+ */
+#ifdef CONFIG_NET_ALIAS
+ if (dev_base == dev)
+ dev_base = net_alias_nextdev(dev);
+ else
+ {
+ while(d && (net_alias_nextdev(d) != dev)) /* skip aliases */
+ d = net_alias_nextdev(d);
+
+ if (d && (net_alias_nextdev(d) == dev))
+ {
+ /*
+ * Critical: Bypass by consider devices as blocks (maindev+aliases)
+ */
+ net_alias_nextdev_set(d, net_alias_nextdev(dev));
+ }
+#else
+ if (dev_base == dev)
+ dev_base = dev->next;
+ else
+ {
+ while (d && (d->next != dev))
+ d = d->next;
+
+ if (d && (d->next == dev))
+ {
+ d->next = dev->next;
+ }
+#endif
+ else
+ {
+ printk("unregister_netdev: '%s' not found\n", dev->name);
+ restore_flags(flags);
+ return;
+ }
+ }
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ {
+ if (ethdev_index[i] == dev)
+ {
+ ethdev_index[i] = NULL;
+ break;
+ }
+ }
+
+ restore_flags(flags);
+
+ /*
+ * You can i.e use a interfaces in a route though it is not up.
+ * We call close_dev (which is changed: it will down a device even if
+ * dev->flags==0 (but it will not call dev->stop if IFF_UP
+ * is not set).
+ * This will call notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev),
+ * dev_mc_discard(dev), ....
+ */
+
+ dev_close(dev);
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c net_init.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/dev/drivers/net/wavelan.p.h b/linux/dev/drivers/net/wavelan.p.h
new file mode 100644
index 0000000..0549844
--- /dev/null
+++ b/linux/dev/drivers/net/wavelan.p.h
@@ -0,0 +1,639 @@
+/*
+ * Wavelan ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ *
+ * This file contain all definition and declarations necessary for the
+ * wavelan isa driver. This file is a private header, so it should
+ * be included only on wavelan.c !!!
+ */
+
+#ifndef WAVELAN_P_H
+#define WAVELAN_P_H
+
+/************************** DOCUMENTATION **************************/
+/*
+ * This driver provide a Linux interface to the Wavelan ISA hardware
+ * The Wavelan is a product of Lucent ("http://wavelan.netland.nl/").
+ * This division was formerly part of NCR and then AT&T.
+ * Wavelan are also distributed by DEC (RoamAbout), Digital Ocean and
+ * Aironet (Arlan). If you have one of those product, you will need to
+ * make some changes below...
+ *
+ * This driver is still a beta software. A lot of bugs have been corrected,
+ * a lot of functionalities are implemented, the whole appear pretty stable,
+ * but there is still some area of improvement (encryption, performance...).
+ *
+ * To know how to use this driver, read the NET3 HOWTO.
+ * If you want to exploit the many other fonctionalities, look comments
+ * in the code...
+ *
+ * This driver is the result of the effort of many peoples (see below).
+ */
+
+/* ------------------------ SPECIFIC NOTES ------------------------ */
+/*
+ * wavelan.o is darn too big
+ * -------------------------
+ * That's true ! There is a very simple way to reduce the driver
+ * object by 33% (yes !). Comment out the following line :
+ * #include <linux/wireless.h>
+ *
+ * MAC address and hardware detection :
+ * ----------------------------------
+ * The detection code of the wavelan chech that the first 3
+ * octets of the MAC address fit the company code. This type of
+ * detection work well for AT&T cards (because the AT&T code is
+ * hardcoded in wavelan.h), but of course will fail for other
+ * manufacturer.
+ *
+ * If you are sure that your card is derived from the wavelan,
+ * here is the way to configure it :
+ * 1) Get your MAC address
+ * a) With your card utilities (wfreqsel, instconf, ...)
+ * b) With the driver :
+ * o compile the kernel with DEBUG_CONFIG_INFO enabled
+ * o Boot and look the card messages
+ * 2) Set your MAC code (3 octets) in MAC_ADDRESSES[][3] (wavelan.h)
+ * 3) Compile & verify
+ * 4) Send me the MAC code - I will include it in the next version...
+ *
+ * "CU Inactive" message at boot up :
+ * -----------------------------------
+ * It seem that there is some weird timings problems with the
+ * Intel microcontroler. In fact, this message is triggered by a
+ * bad reading of the on board ram the first time we read the
+ * control block. If you ignore this message, all is ok (but in
+ * fact, currently, it reset the wavelan hardware).
+ *
+ * To get rid of that problem, there is two solution. The first
+ * is to add a dummy read of the scb at the end of
+ * wv_82586_config. The second is to add the timers
+ * wv_synchronous_cmd and wv_ack (the udelay just after the
+ * waiting loops - seem that the controler is not totally ready
+ * when it say it is !).
+ *
+ * In the current code, I use the second solution (to be
+ * consistent with the original solution of Bruce Janson).
+ */
+
+/* --------------------- WIRELESS EXTENSIONS --------------------- */
+/*
+ * This driver is the first one to support "wireless extensions".
+ * This set of extensions provide you some way to control the wireless
+ * caracteristics of the hardware in a standard way and support for
+ * applications for taking advantage of it (like Mobile IP).
+ *
+ * You will need to enable the CONFIG_NET_RADIO define in the kernel
+ * configuration to enable the wireless extensions (this is the one
+ * giving access to the radio network device choice).
+ *
+ * It might also be a good idea as well to fetch the wireless tools to
+ * configure the device and play a bit.
+ */
+
+/* ---------------------------- FILES ---------------------------- */
+/*
+ * wavelan.c : The actual code for the driver - C functions
+ *
+ * wavelan.p.h : Private header : local types / vars for the driver
+ *
+ * wavelan.h : Description of the hardware interface & structs
+ *
+ * i82586.h : Description if the Ethernet controler
+ */
+
+/* --------------------------- HISTORY --------------------------- */
+/*
+ * (Made with information in drivers headers. It may not be accurate,
+ * and I garantee nothing except my best effort...)
+ *
+ * The history of the Wavelan drivers is as complicated as history of
+ * the Wavelan itself (NCR -> AT&T -> Lucent).
+ *
+ * All started with Anders Klemets <klemets@paul.rutgers.edu>,
+ * writting a Wavelan ISA driver for the MACH microkernel. Girish
+ * Welling <welling@paul.rutgers.edu> had also worked on it.
+ * Keith Moore modify this for the Pcmcia hardware.
+ *
+ * Robert Morris <rtm@das.harvard.edu> port these two drivers to BSDI
+ * and add specific Pcmcia support (there is currently no equivalent
+ * of the PCMCIA package under BSD...).
+ *
+ * Jim Binkley <jrb@cs.pdx.edu> port both BSDI drivers to freeBSD.
+ *
+ * Bruce Janson <bruce@cs.usyd.edu.au> port the BSDI ISA driver to Linux.
+ *
+ * Anthony D. Joseph <adj@lcs.mit.edu> started modify Bruce driver
+ * (with help of the BSDI PCMCIA driver) for PCMCIA.
+ * Yunzhou Li <yunzhou@strat.iol.unh.edu> finished is work.
+ * Joe Finney <joe@comp.lancs.ac.uk> patched the driver to start
+ * correctly 2.00 cards (2.4 GHz with frequency selection).
+ * David Hinds <dhinds@hyper.stanford.edu> integrated the whole in his
+ * Pcmcia package (+ bug corrections).
+ *
+ * I (Jean Tourrilhes - jt@hplb.hpl.hp.com) then started to make some
+ * patchs to the Pcmcia driver. After, I added code in the ISA driver
+ * for Wireless Extensions and full support of frequency selection
+ * cards. Then, I've done the same to the Pcmcia driver + some
+ * reorganisation. Finally, I came back to the ISA driver to
+ * upgrade it at the same level as the Pcmcia one and reorganise
+ * the code
+ * Loeke Brederveld <lbrederv@wavelan.com> from Lucent has given me
+ * much needed informations on the Wavelan hardware.
+ */
+
+/* The original copyrights and litteratures mention others names and
+ * credits. I don't know what there part in this development was...
+ */
+
+/* By the way : for the copyright & legal stuff :
+ * Almost everybody wrote code under GNU or BSD license (or alike),
+ * and want that their original copyright remain somewhere in the
+ * code (for myself, I go with the GPL).
+ * Nobody want to take responsibility for anything, except the fame...
+ */
+
+/* --------------------------- CREDITS --------------------------- */
+/*
+ * This software was developed as a component of the
+ * Linux operating system.
+ * It is based on other device drivers and information
+ * either written or supplied by:
+ * Ajay Bakre (bakre@paul.rutgers.edu),
+ * Donald Becker (becker@cesdis.gsfc.nasa.gov),
+ * Loeke Brederveld (Loeke.Brederveld@Utrecht.NCR.com),
+ * Brent Elphick <belphick@uwaterloo.ca>,
+ * Anders Klemets (klemets@it.kth.se),
+ * Vladimir V. Kolpakov (w@stier.koenig.ru),
+ * Marc Meertens (Marc.Meertens@Utrecht.NCR.com),
+ * Pauline Middelink (middelin@polyware.iaf.nl),
+ * Robert Morris (rtm@das.harvard.edu),
+ * Jean Tourrilhes (jt@hplb.hpl.hp.com),
+ * Girish Welling (welling@paul.rutgers.edu),
+ * Clark Woodworth <clark@hiway1.exit109.com>
+ * Yongguang Zhang <ygz@isl.hrl.hac.com>...
+ *
+ * Thanks go also to:
+ * James Ashton (jaa101@syseng.anu.edu.au),
+ * Alan Cox (iialan@iiit.swan.ac.uk),
+ * Allan Creighton (allanc@cs.usyd.edu.au),
+ * Matthew Geier (matthew@cs.usyd.edu.au),
+ * Remo di Giovanni (remo@cs.usyd.edu.au),
+ * Eckhard Grah (grah@wrcs1.urz.uni-wuppertal.de),
+ * Vipul Gupta (vgupta@cs.binghamton.edu),
+ * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
+ * Tim Nicholson (tim@cs.usyd.edu.au),
+ * Ian Parkin (ian@cs.usyd.edu.au),
+ * John Rosenberg (johnr@cs.usyd.edu.au),
+ * George Rossi (george@phm.gov.au),
+ * Arthur Scott (arthur@cs.usyd.edu.au),
+ * Stanislav Sinyagin <stas@isf.ru>
+ * Peter Storey,
+ * for their assistance and advice.
+ *
+ * Additional Credits:
+ *
+ * My developpement has been done under Linux 2.0.x (Debian 1.1) with
+ * an HP Vectra XP/60.
+ *
+ */
+
+/* ------------------------- IMPROVEMENTS ------------------------- */
+/*
+ * I proudly present :
+ *
+ * Changes mades in first pre-release :
+ * ----------------------------------
+ * - Reorganisation of the code, function name change
+ * - Creation of private header (wavelan.p.h)
+ * - Reorganised debug messages
+ * - More comments, history, ...
+ * - mmc_init : configure the PSA if not done
+ * - mmc_init : correct default value of level threshold for pcmcia
+ * - mmc_init : 2.00 detection better code for 2.00 init
+ * - better info at startup
+ * - irq setting (note : this setting is permanent...)
+ * - Watchdog : change strategy (+ solve module removal problems)
+ * - add wireless extensions (ioctl & get_wireless_stats)
+ * get/set nwid/frequency on fly, info for /proc/net/wireless
+ * - More wireless extension : SETSPY and GETSPY
+ * - Make wireless extensions optional
+ * - Private ioctl to set/get quality & level threshold, histogram
+ * - Remove /proc/net/wavelan
+ * - Supress useless stuff from lp (net_local)
+ * - kernel 2.1 support (copy_to/from_user instead of memcpy_to/fromfs)
+ * - Add message level (debug stuff in /var/adm/debug & errors not
+ * displayed at console and still in /var/adm/messages)
+ * - multi device support
+ * - Start fixing the probe (init code)
+ * - More inlines
+ * - man page
+ * - Lot of others minor details & cleanups
+ *
+ * Changes made in second pre-release :
+ * ----------------------------------
+ * - Cleanup init code (probe & module init)
+ * - Better multi device support (module)
+ * - name assignement (module)
+ *
+ * Changes made in third pre-release :
+ * ---------------------------------
+ * - Be more conservative on timers
+ * - Preliminary support for multicast (I still lack some details...)
+ *
+ * Changes made in fourth pre-release :
+ * ----------------------------------
+ * - multicast (revisited and finished)
+ * - Avoid reset in set_multicast_list (a really big hack)
+ * if somebody could apply this code for other i82586 based driver...
+ * - Share on board memory 75% RU / 25% CU (instead of 50/50)
+ *
+ * Changes made for release in 2.1.15 :
+ * ----------------------------------
+ * - Change the detection code for multi manufacturer code support
+ *
+ * Changes made for release in 2.1.17 :
+ * ----------------------------------
+ * - Update to wireless extensions changes
+ * - Silly bug in card initial configuration (psa_conf_status)
+ *
+ * Changes made for release in 2.1.27 & 2.0.30 :
+ * -------------------------------------------
+ * - Small bug in debug code (probably not the last one...)
+ * - Remove extern kerword for wavelan_probe()
+ * - Level threshold is now a standard wireless extension (version 4 !)
+ *
+ * Changes made for release in 2.1.36 :
+ * ----------------------------------
+ * - Encryption setting from Brent Elphick (thanks a lot !)
+ * - 'ioaddr' to 'u_long' for the Alpha (thanks to Stanislav Sinyagin)
+ *
+ * Wishes & dreams :
+ * ---------------
+ * - Roaming
+ */
+
+/***************************** INCLUDES *****************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+#include <linux/timer.h>
+
+#include <linux/wireless.h> /* Wireless extensions */
+
+/* Wavelan declarations */
+#ifdef MACH
+#include <linuxdev/drivers/net/i82586.h>
+#else
+#include "i82586.h"
+#endif
+#include "wavelan.h"
+
+/****************************** DEBUG ******************************/
+
+#undef DEBUG_MODULE_TRACE /* Module insertion/removal */
+#undef DEBUG_CALLBACK_TRACE /* Calls made by Linux */
+#undef DEBUG_INTERRUPT_TRACE /* Calls to handler */
+#undef DEBUG_INTERRUPT_INFO /* type of interrupt & so on */
+#define DEBUG_INTERRUPT_ERROR /* problems */
+#undef DEBUG_CONFIG_TRACE /* Trace the config functions */
+#undef DEBUG_CONFIG_INFO /* What's going on... */
+#define DEBUG_CONFIG_ERRORS /* Errors on configuration */
+#undef DEBUG_TX_TRACE /* Transmission calls */
+#undef DEBUG_TX_INFO /* Header of the transmited packet */
+#define DEBUG_TX_ERROR /* unexpected conditions */
+#undef DEBUG_RX_TRACE /* Transmission calls */
+#undef DEBUG_RX_INFO /* Header of the transmited packet */
+#define DEBUG_RX_ERROR /* unexpected conditions */
+#undef DEBUG_PACKET_DUMP 16 /* Dump packet on the screen */
+#undef DEBUG_IOCTL_TRACE /* Misc call by Linux */
+#undef DEBUG_IOCTL_INFO /* Various debug info */
+#define DEBUG_IOCTL_ERROR /* What's going wrong */
+#define DEBUG_BASIC_SHOW /* Show basic startup info */
+#undef DEBUG_VERSION_SHOW /* Print version info */
+#undef DEBUG_PSA_SHOW /* Dump psa to screen */
+#undef DEBUG_MMC_SHOW /* Dump mmc to screen */
+#undef DEBUG_SHOW_UNUSED /* Show also unused fields */
+#undef DEBUG_I82586_SHOW /* Show i82586 status */
+#undef DEBUG_DEVICE_SHOW /* Show device parameters */
+
+/* Options : */
+#define USE_PSA_CONFIG /* Use info from the PSA */
+#define IGNORE_NORMAL_XMIT_ERRS /* Don't bother with normal conditions */
+#undef STRUCT_CHECK /* Verify padding of structures */
+#undef PSA_CRC /* Check CRC in PSA */
+#undef OLDIES /* Old code (to redo) */
+#undef RECORD_SNR /* To redo */
+#undef EEPROM_IS_PROTECTED /* Doesn't seem to be necessary */
+#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+/* Warning : these stuff will slow down the driver... */
+#define WIRELESS_SPY /* Enable spying addresses */
+#undef HISTOGRAM /* Enable histogram of sig level... */
+#endif
+
+/************************ CONSTANTS & MACROS ************************/
+
+#ifdef DEBUG_VERSION_SHOW
+static const char *version = "wavelan.c : v16 (wireless extensions) 17/4/97\n";
+#endif
+
+/* Watchdog temporisation */
+#define WATCHDOG_JIFFIES 32 /* TODO: express in HZ. */
+
+/* Macro to get the number of elements in an array */
+#define NELS(a) (sizeof(a) / sizeof(a[0]))
+
+/* ------------------------ PRIVATE IOCTL ------------------------ */
+
+#define SIOCSIPQTHR SIOCDEVPRIVATE /* Set quality threshold */
+#define SIOCGIPQTHR SIOCDEVPRIVATE + 1 /* Get quality threshold */
+#define SIOCSIPLTHR SIOCDEVPRIVATE + 2 /* Set level threshold */
+#define SIOCGIPLTHR SIOCDEVPRIVATE + 3 /* Get level threshold */
+
+#define SIOCSIPHISTO SIOCDEVPRIVATE + 6 /* Set histogram ranges */
+#define SIOCGIPHISTO SIOCDEVPRIVATE + 7 /* Get histogram values */
+
+/* ----------------------- VERSION SUPPORT ----------------------- */
+
+/* This ugly patch is needed to cope with old version of the kernel */
+#ifndef copy_from_user
+#define copy_from_user memcpy_fromfs
+#define copy_to_user memcpy_tofs
+#endif
+
+/****************************** TYPES ******************************/
+
+/* Shortcuts */
+typedef struct device device;
+typedef struct enet_statistics en_stats;
+typedef struct iw_statistics iw_stats;
+typedef struct iw_quality iw_qual;
+typedef struct iw_freq iw_freq;
+typedef struct net_local net_local;
+typedef struct timer_list timer_list;
+
+/* Basic types */
+typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
+
+/*
+ * Static specific data for the interface.
+ *
+ * For each network interface, Linux keep data in two structure. "device"
+ * keep the generic data (same format for everybody) and "net_local" keep
+ * the additional specific data.
+ * Note that some of this specific data is in fact generic (en_stats, for
+ * example).
+ */
+struct net_local
+{
+ net_local * next; /* Linked list of the devices */
+ device * dev; /* Reverse link... */
+ en_stats stats; /* Ethernet interface statistics */
+ int nresets; /* Number of hw resets */
+ u_char reconfig_82586; /* Need to reconfigure the controler */
+ u_char promiscuous; /* Promiscuous mode */
+ int mc_count; /* Number of multicast addresses */
+ timer_list watchdog; /* To avoid blocking state */
+ u_short hacr; /* Current host interface state */
+
+ int tx_n_in_use;
+ u_short rx_head;
+ u_short rx_last;
+ u_short tx_first_free;
+ u_short tx_first_in_use;
+
+#ifdef WIRELESS_EXT
+ iw_stats wstats; /* Wireless specific stats */
+#endif
+
+#ifdef WIRELESS_SPY
+ int spy_number; /* Number of addresses to spy */
+ mac_addr spy_address[IW_MAX_SPY]; /* The addresses to spy */
+ iw_qual spy_stat[IW_MAX_SPY]; /* Statistics gathered */
+#endif /* WIRELESS_SPY */
+#ifdef HISTOGRAM
+ int his_number; /* Number of intervals */
+ u_char his_range[16]; /* Boundaries of interval ]n-1; n] */
+ u_long his_sum[16]; /* Sum in interval */
+#endif /* HISTOGRAM */
+};
+
+/**************************** PROTOTYPES ****************************/
+
+/* ----------------------- MISC SUBROUTINES ------------------------ */
+static inline unsigned long /* flags */
+ wv_splhi(void); /* Disable interrupts */
+static inline void
+ wv_splx(unsigned long); /* ReEnable interrupts : flags */
+static u_char
+ wv_irq_to_psa(int);
+static int
+ wv_psa_to_irq(u_char);
+/* ------------------- HOST ADAPTER SUBROUTINES ------------------- */
+static inline u_short /* data */
+ hasr_read(u_long); /* Read the host interface : base address */
+static inline void
+ hacr_write(u_long, /* Write to host interface : base address */
+ u_short), /* data */
+ hacr_write_slow(u_long,
+ u_short),
+ set_chan_attn(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_hacr_reset(u_long), /* ioaddr */
+ wv_16_off(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_16_on(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_ints_off(device *),
+ wv_ints_on(device *);
+/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
+static void
+ psa_read(u_long, /* Read the Parameter Storage Area */
+ u_short, /* hacr */
+ int, /* offset in PSA */
+ u_char *, /* buffer to fill */
+ int), /* size to read */
+ psa_write(u_long, /* Write to the PSA */
+ u_short, /* hacr */
+ int, /* Offset in psa */
+ u_char *, /* Buffer in memory */
+ int); /* Length of buffer */
+static inline void
+ mmc_out(u_long, /* Write 1 byte to the Modem Manag Control */
+ u_short,
+ u_char),
+ mmc_write(u_long, /* Write n bytes to the MMC */
+ u_char,
+ u_char *,
+ int);
+static inline u_char /* Read 1 byte from the MMC */
+ mmc_in(u_long,
+ u_short);
+static inline void
+ mmc_read(u_long, /* Read n bytes from the MMC */
+ u_char,
+ u_char *,
+ int),
+ fee_wait(u_long, /* Wait for frequency EEprom : base address */
+ int, /* Base delay to wait for */
+ int); /* Number of time to wait */
+static void
+ fee_read(u_long, /* Read the frequency EEprom : base address */
+ u_short, /* destination offset */
+ u_short *, /* data buffer */
+ int); /* number of registers */
+/* ---------------------- I82586 SUBROUTINES ----------------------- */
+static /*inline*/ void
+ obram_read(u_long, /* ioaddr */
+ u_short, /* o */
+ u_char *, /* b */
+ int); /* n */
+static inline void
+ obram_write(u_long, /* ioaddr */
+ u_short, /* o */
+ u_char *, /* b */
+ int); /* n */
+static void
+ wv_ack(device *);
+static inline int
+ wv_synchronous_cmd(device *,
+ const char *),
+ wv_config_complete(device *,
+ u_long,
+ net_local *);
+static int
+ wv_complete(device *,
+ u_long,
+ net_local *);
+static inline void
+ wv_82586_reconfig(device *);
+/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */
+#ifdef DEBUG_I82586_SHOW
+static void
+ wv_scb_show(unsigned short);
+#endif
+static inline void
+ wv_init_info(device *); /* display startup info */
+/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
+static en_stats *
+ wavelan_get_stats(device *); /* Give stats /proc/net/dev */
+static void
+ wavelan_set_multicast_list(device *);
+/* ----------------------- PACKET RECEPTION ----------------------- */
+static inline void
+ wv_packet_read(device *, /* Read a packet from a frame */
+ u_short,
+ int),
+ wv_receive(device *); /* Read all packets waiting */
+/* --------------------- PACKET TRANSMISSION --------------------- */
+static inline void
+ wv_packet_write(device *, /* Write a packet to the Tx buffer */
+ void *,
+ short);
+static int
+ wavelan_packet_xmit(struct sk_buff *, /* Send a packet */
+ device *);
+/* -------------------- HARDWARE CONFIGURATION -------------------- */
+static inline int
+ wv_mmc_init(device *), /* Initialize the modem */
+ wv_ru_start(device *), /* Start the i82586 receiver unit */
+ wv_cu_start(device *), /* Start the i82586 command unit */
+ wv_82586_start(device *); /* Start the i82586 */
+static void
+ wv_82586_config(device *); /* Configure the i82586 */
+static inline void
+ wv_82586_stop(device *);
+static int
+ wv_hw_reset(device *), /* Reset the wavelan hardware */
+ wv_check_ioaddr(u_long, /* ioaddr */
+ u_char *); /* mac address (read) */
+/* ---------------------- INTERRUPT HANDLING ---------------------- */
+static void
+ wavelan_interrupt(int, /* Interrupt handler */
+ void *,
+ struct pt_regs *);
+static void
+ wavelan_watchdog(u_long); /* Transmission watchdog */
+/* ------------------- CONFIGURATION CALLBACKS ------------------- */
+static int
+ wavelan_open(device *), /* Open the device */
+ wavelan_close(device *), /* Close the device */
+ wavelan_config(device *); /* Configure one device */
+extern int
+ wavelan_probe(device *); /* See Space.c */
+
+/**************************** VARIABLES ****************************/
+
+/*
+ * This is the root of the linked list of wavelan drivers
+ * It is use to verify that we don't reuse the same base address
+ * for two differents drivers and to make the cleanup when
+ * removing the module.
+ */
+static net_local * wavelan_list = (net_local *) NULL;
+
+/*
+ * This table is used to translate the psa value to irq number
+ * and vice versa...
+ */
+static u_char irqvals[] =
+{
+ 0, 0, 0, 0x01,
+ 0x02, 0x04, 0, 0x08,
+ 0, 0, 0x10, 0x20,
+ 0x40, 0, 0, 0x80,
+};
+
+/*
+ * Table of the available i/o address (base address) for wavelan
+ */
+static unsigned short iobase[] =
+{
+#if 0
+ /* Leave out 0x3C0 for now -- seems to clash with some video
+ * controllers.
+ * Leave out the others too -- we will always use 0x390 and leave
+ * 0x300 for the Ethernet device.
+ * Jean II : 0x3E0 is really fine as well...
+ */
+ 0x300, 0x390, 0x3E0, 0x3C0
+#endif /* 0 */
+ 0x390, 0x3E0
+};
+
+#ifdef MODULE
+/* Name of the devices (memory allocation) */
+static char devname[4][IFNAMSIZ] = { "", "", "", "" };
+
+/* Parameters set by insmod */
+static int io[4] = { 0, 0, 0, 0 };
+static int irq[4] = { 0, 0, 0, 0 };
+static char * name[4] = { devname[0], devname[1], devname[2], devname[3] };
+#endif /* MODULE */
+
+#endif /* WAVELAN_P_H */
diff --git a/linux/dev/drivers/scsi/eata_dma.c b/linux/dev/drivers/scsi/eata_dma.c
new file mode 100644
index 0000000..e902ea1
--- /dev/null
+++ b/linux/dev/drivers/scsi/eata_dma.c
@@ -0,0 +1,1607 @@
+/************************************************************
+ * *
+ * Linux EATA SCSI driver *
+ * *
+ * based on the CAM document CAM/89-004 rev. 2.0c, *
+ * DPT's driver kit, some internal documents and source, *
+ * and several other Linux scsi drivers and kernel docs. *
+ * *
+ * The driver currently: *
+ * -supports all ISA based EATA-DMA boards *
+ * like PM2011, PM2021, PM2041, PM3021 *
+ * -supports all EISA based EATA-DMA boards *
+ * like PM2012B, PM2022, PM2122, PM2322, PM2042, *
+ * PM3122, PM3222, PM3332 *
+ * -supports all PCI based EATA-DMA boards *
+ * like PM2024, PM2124, PM2044, PM2144, PM3224, *
+ * PM3334 *
+ * -supports the Wide, Ultra Wide and Differential *
+ * versions of the boards *
+ * -supports multiple HBAs with & without IRQ sharing *
+ * -supports all SCSI channels on multi channel boards *
+ * -supports ix86 and MIPS, untested on ALPHA *
+ * -needs identical IDs on all channels of a HBA *
+ * -can be loaded as module *
+ * -displays statistical and hardware information *
+ * in /proc/scsi/eata_dma *
+ * -provides rudimentary latency measurement *
+ * possibilities via /proc/scsi/eata_dma/<hostnum> *
+ * *
+ * (c)1993-96 Michael Neuffer *
+ * mike@i-Connect.Net *
+ * neuffer@mail.uni-mainz.de *
+ * *
+ * This program is free software; you can redistribute it *
+ * and/or modify it under the terms of the GNU General *
+ * Public License as published by the Free Software *
+ * Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the *
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A *
+ * PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. *
+ * *
+ * You should have received a copy of the GNU General *
+ * Public License along with this kernel; if not, write to *
+ * the Free Software Foundation, Inc., 675 Mass Ave, *
+ * Cambridge, MA 02139, USA. *
+ * *
+ * I have to thank DPT for their excellent support. I took *
+ * me almost a year and a stopover at their HQ, on my first *
+ * trip to the USA, to get it, but since then they've been *
+ * very helpful and tried to give me all the infos and *
+ * support I need. *
+ * *
+ * Thanks also to Simon Shapiro, Greg Hosler and Mike *
+ * Jagdis who did a lot of testing and found quite a number *
+ * of bugs during the development. *
+ ************************************************************
+ * last change: 96/10/21 OS: Linux 2.0.23 *
+ ************************************************************/
+
+/* Look in eata_dma.h for configuration and revision information */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/in.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/types.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#ifdef MACH
+#define flush_cache_all()
+#else
+#include <asm/pgtable.h>
+#endif
+#ifdef __mips__
+#include <asm/cachectl.h>
+#endif
+#include <linux/blk.h>
+#include "scsi.h"
+#include "sd.h"
+#include "hosts.h"
+#include "eata_dma.h"
+#include "eata_dma_proc.h"
+
+#include <linux/stat.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_eata_dma = {
+ PROC_SCSI_EATA, 8, "eata_dma",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+static u32 ISAbases[] =
+{0x1F0, 0x170, 0x330, 0x230};
+static unchar EISAbases[] =
+{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static uint registered_HBAs = 0;
+static struct Scsi_Host *last_HBA = NULL;
+static struct Scsi_Host *first_HBA = NULL;
+static unchar reg_IRQ[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static unchar reg_IRQL[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static struct eata_sp *status = 0; /* Statuspacket array */
+static void *dma_scratch = 0;
+
+static struct eata_register *fake_int_base;
+static int fake_int_result;
+static int fake_int_happened;
+
+static ulong int_counter = 0;
+static ulong queue_counter = 0;
+
+void eata_scsi_done (Scsi_Cmnd * scmd)
+{
+ scmd->request.rq_status = RQ_SCSI_DONE;
+
+ if (scmd->request.sem != NULL)
+ up(scmd->request.sem);
+
+ return;
+}
+
+void eata_fake_int_handler(s32 irq, void *dev_id, struct pt_regs * regs)
+{
+ fake_int_result = inb((ulong)fake_int_base + HA_RSTATUS);
+ fake_int_happened = TRUE;
+ DBG(DBG_INTR3, printk("eata_fake_int_handler called irq%d base %p"
+ " res %#x\n", irq, fake_int_base, fake_int_result));
+ return;
+}
+
+#include "eata_dma_proc.c"
+
+#ifdef MODULE
+int eata_release(struct Scsi_Host *sh)
+{
+ uint i;
+ if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq, NULL);
+ else reg_IRQ[sh->irq]--;
+
+ scsi_init_free((void *)status, 512);
+ scsi_init_free((void *)dma_scratch - 4, 1024);
+ for (i = 0; i < sh->can_queue; i++){ /* Free all SG arrays */
+ if(SD(sh)->ccb[i].sg_list != NULL)
+ scsi_init_free((void *) SD(sh)->ccb[i].sg_list,
+ sh->sg_tablesize * sizeof(struct eata_sg_list));
+ }
+
+ if (SD(sh)->channel == 0) {
+ if (sh->dma_channel != BUSMASTER) free_dma(sh->dma_channel);
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ }
+ return(TRUE);
+}
+#endif
+
+
+inline void eata_latency_in(struct eata_ccb *cp, hostdata *hd)
+{
+ uint time;
+ time = jiffies - cp->timestamp;
+ if(hd->all_lat[1] > time)
+ hd->all_lat[1] = time;
+ if(hd->all_lat[2] < time)
+ hd->all_lat[2] = time;
+ hd->all_lat[3] += time;
+ hd->all_lat[0]++;
+ if((cp->rw_latency) == WRITE) { /* was WRITE */
+ if(hd->writes_lat[cp->sizeindex][1] > time)
+ hd->writes_lat[cp->sizeindex][1] = time;
+ if(hd->writes_lat[cp->sizeindex][2] < time)
+ hd->writes_lat[cp->sizeindex][2] = time;
+ hd->writes_lat[cp->sizeindex][3] += time;
+ hd->writes_lat[cp->sizeindex][0]++;
+ } else if((cp->rw_latency) == READ) {
+ if(hd->reads_lat[cp->sizeindex][1] > time)
+ hd->reads_lat[cp->sizeindex][1] = time;
+ if(hd->reads_lat[cp->sizeindex][2] < time)
+ hd->reads_lat[cp->sizeindex][2] = time;
+ hd->reads_lat[cp->sizeindex][3] += time;
+ hd->reads_lat[cp->sizeindex][0]++;
+ }
+}
+
+inline void eata_latency_out(struct eata_ccb *cp, Scsi_Cmnd *cmd)
+{
+ int x, z;
+ short *sho;
+ long *lon;
+ x = 0; /* just to keep GCC quiet */
+ cp->timestamp = jiffies; /* For latency measurements */
+ switch(cmd->cmnd[0]) {
+ case WRITE_6:
+ x = cmd->cmnd[4]/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_6:
+ x = cmd->cmnd[4]/2;
+ cp->rw_latency = READ;
+ break;
+ case WRITE_10:
+ sho = (short *) &cmd->cmnd[7];
+ x = ntohs(*sho)/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_10:
+ sho = (short *) &cmd->cmnd[7];
+ x = ntohs(*sho)/2;
+ cp->rw_latency = READ;
+ break;
+ case WRITE_12:
+ lon = (long *) &cmd->cmnd[6];
+ x = ntohl(*lon)/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_12:
+ lon = (long *) &cmd->cmnd[6];
+ x = ntohl(*lon)/2;
+ cp->rw_latency = READ;
+ break;
+ default:
+ cp->rw_latency = OTHER;
+ break;
+ }
+ if (cmd->cmnd[0] == WRITE_6 || cmd->cmnd[0] == WRITE_10 ||
+ cmd->cmnd[0] == WRITE_12 || cmd->cmnd[0] == READ_6 ||
+ cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == READ_12) {
+ for(z = 0; (x > (1 << z)) && (z <= 11); z++)
+ /* nothing */;
+ cp->sizeindex = z;
+ }
+}
+
+
+void eata_int_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+ uint i, result = 0;
+ uint hba_stat, scsi_stat, eata_stat;
+ Scsi_Cmnd *cmd;
+ struct eata_ccb *ccb;
+ struct eata_sp *sp;
+ uint base;
+ uint x;
+ struct Scsi_Host *sh;
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if (sh->irq != irq)
+ continue;
+
+ while(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+
+ int_counter++;
+
+ sp = &SD(sh)->sp;
+#ifdef __mips__
+ sys_cacheflush(sp, sizeof(struct eata_sp), 2);
+#endif
+ ccb = sp->ccb;
+
+ if(ccb == NULL) {
+ eata_stat = inb((uint)sh->base + HA_RSTATUS);
+ printk("eata_dma: int_handler, Spurious IRQ %d "
+ "received. CCB pointer not set.\n", irq);
+ break;
+ }
+
+ cmd = ccb->cmd;
+ base = (uint) cmd->host->base;
+ hba_stat = sp->hba_stat;
+
+ scsi_stat = (sp->scsi_stat >> 1) & 0x1f;
+
+ if (sp->EOC == FALSE) {
+ eata_stat = inb(base + HA_RSTATUS);
+ printk(KERN_WARNING "eata_dma: int_handler, board: %x cmd %lx "
+ "returned unfinished.\n"
+ "EATA: %x HBA: %x SCSI: %x spadr %lx spadrirq %lx, "
+ "irq%d\n", base, (long)ccb, eata_stat, hba_stat,
+ scsi_stat,(long)&status, (long)&status[irq], irq);
+ cmd->result = DID_ERROR << 16;
+ ccb->status = FREE;
+ cmd->scsi_done(cmd);
+ break;
+ }
+
+ sp->EOC = FALSE; /* Clean out this flag */
+
+ if (ccb->status == LOCKED || ccb->status == RESET) {
+ printk("eata_dma: int_handler, reseted command pid %ld returned"
+ "\n", cmd->pid);
+ DBG(DBG_INTR && DBG_DELAY, DELAY(1));
+ }
+
+ eata_stat = inb(base + HA_RSTATUS);
+ DBG(DBG_INTR, printk("IRQ %d received, base %#.4x, pid %ld, "
+ "target: %x, lun: %x, ea_s: %#.2x, hba_s: "
+ "%#.2x \n", irq, base, cmd->pid, cmd->target,
+ cmd->lun, eata_stat, hba_stat));
+
+ switch (hba_stat) {
+ case HA_NO_ERROR: /* NO Error */
+ if(HD(cmd)->do_latency == TRUE && ccb->timestamp)
+ eata_latency_in(ccb, HD(cmd));
+ result = DID_OK << 16;
+ break;
+ case HA_ERR_SEL_TO: /* Selection Timeout */
+ case HA_ERR_CMD_TO: /* Command Timeout */
+ result = DID_TIME_OUT << 16;
+ break;
+ case HA_BUS_RESET: /* SCSI Bus Reset Received */
+ result = DID_RESET << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: BUS RESET "
+ "received on cmd %ld\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ break;
+ case HA_INIT_POWERUP: /* Initial Controller Power-up */
+ if (cmd->device->type != TYPE_TAPE)
+ result = DID_BUS_BUSY << 16;
+ else
+ result = DID_ERROR << 16;
+
+ for (i = 0; i < MAXTARGET; i++)
+ DBG(DBG_STATUS, printk(KERN_DEBUG "scsi%d: cmd pid %ld "
+ "returned with INIT_POWERUP\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ break;
+ case HA_CP_ABORT_NA:
+ case HA_CP_ABORTED:
+ result = DID_ABORT << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: aborted cmd "
+ "returned\n", HD(cmd)->HBA_number));
+ break;
+ case HA_CP_RESET_NA:
+ case HA_CP_RESET:
+ HD(cmd)->resetlevel[cmd->channel] = 0;
+ result = DID_RESET << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: reseted cmd "
+ "pid %ldreturned\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ case HA_SCSI_HUNG: /* SCSI Hung */
+ printk(KERN_ERR "scsi%d: SCSI hung\n", HD(cmd)->HBA_number);
+ result = DID_ERROR << 16;
+ break;
+ case HA_RSENSE_FAIL: /* Auto Request-Sense Failed */
+ DBG(DBG_STATUS, printk(KERN_ERR "scsi%d: Auto Request Sense "
+ "Failed\n", HD(cmd)->HBA_number));
+ result = DID_ERROR << 16;
+ break;
+ case HA_UNX_BUSPHASE: /* Unexpected Bus Phase */
+ case HA_UNX_BUS_FREE: /* Unexpected Bus Free */
+ case HA_BUS_PARITY: /* Bus Parity Error */
+ case HA_UNX_MSGRJCT: /* Unexpected Message Reject */
+ case HA_RESET_STUCK: /* SCSI Bus Reset Stuck */
+ case HA_PARITY_ERR: /* Controller Ram Parity */
+ default:
+ result = DID_ERROR << 16;
+ break;
+ }
+ cmd->result = result | (scsi_stat << 1);
+
+#if DBG_INTR2
+ if (scsi_stat || result || hba_stat || eata_stat != 0x50
+ || cmd->scsi_done == NULL || cmd->device->id == 7)
+ printk("HBA: %d, channel %d, id: %d, lun %d, pid %ld:\n"
+ "eata_stat %#x, hba_stat %#.2x, scsi_stat %#.2x, "
+ "sense_key: %#x, result: %#.8x\n", x,
+ cmd->device->channel, cmd->device->id, cmd->device->lun,
+ cmd->pid, eata_stat, hba_stat, scsi_stat,
+ cmd->sense_buffer[2] & 0xf, cmd->result);
+ DBG(DBG_INTR&&DBG_DELAY,DELAY(1));
+#endif
+
+ ccb->status = FREE; /* now we can release the slot */
+ cmd->scsi_done(cmd);
+ }
+ }
+
+ return;
+}
+
+inline int eata_send_command(u32 addr, u32 base, u8 command)
+{
+ long loop = R_LIMIT;
+
+ while (inb(base + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0)
+ return(FALSE);
+
+ if(addr != (u32) NULL)
+ addr = virt_to_bus((void *)addr);
+
+ /*
+ * This is overkill.....but the MIPSen seem to need this
+ * and it will be optimized away for i86 and ALPHA machines.
+ */
+ flush_cache_all();
+
+ /* And now the address in nice little byte chunks */
+#ifdef __LITTLE_ENDIAN
+ outb(addr, base + HA_WDMAADDR);
+ outb(addr >> 8, base + HA_WDMAADDR + 1);
+ outb(addr >> 16, base + HA_WDMAADDR + 2);
+ outb(addr >> 24, base + HA_WDMAADDR + 3);
+#else
+ outb(addr >> 24, base + HA_WDMAADDR);
+ outb(addr >> 16, base + HA_WDMAADDR + 1);
+ outb(addr >> 8, base + HA_WDMAADDR + 2);
+ outb(addr, base + HA_WDMAADDR + 3);
+#endif
+ outb(command, base + HA_WCOMMAND);
+ return(TRUE);
+}
+
+inline int eata_send_immediate(u32 base, u32 addr, u8 ifc, u8 code, u8 code2)
+{
+ if(addr != (u32) NULL)
+ addr = virt_to_bus((void *)addr);
+
+ /*
+ * This is overkill.....but the MIPSen seem to need this
+ * and it will be optimized away for i86 and ALPHA machines.
+ */
+ flush_cache_all();
+
+ outb(0x0, base + HA_WDMAADDR - 1);
+ if(addr){
+#ifdef __LITTLE_ENDIAN
+ outb(addr, base + HA_WDMAADDR);
+ outb(addr >> 8, base + HA_WDMAADDR + 1);
+ outb(addr >> 16, base + HA_WDMAADDR + 2);
+ outb(addr >> 24, base + HA_WDMAADDR + 3);
+#else
+ outb(addr >> 24, base + HA_WDMAADDR);
+ outb(addr >> 16, base + HA_WDMAADDR + 1);
+ outb(addr >> 8, base + HA_WDMAADDR + 2);
+ outb(addr, base + HA_WDMAADDR + 3);
+#endif
+ } else {
+ outb(0x0, base + HA_WDMAADDR);
+ outb(0x0, base + HA_WDMAADDR + 1);
+ outb(code2, base + HA_WCODE2);
+ outb(code, base + HA_WCODE);
+ }
+
+ outb(ifc, base + HA_WIFC);
+ outb(EATA_CMD_IMMEDIATE, base + HA_WCOMMAND);
+ return(TRUE);
+}
+
+int eata_queue(Scsi_Cmnd * cmd, void (* done) (Scsi_Cmnd *))
+{
+ unsigned int i, x, y;
+ ulong flags;
+ hostdata *hd;
+ struct Scsi_Host *sh;
+ struct eata_ccb *ccb;
+ struct scatterlist *sl;
+
+
+ save_flags(flags);
+ cli();
+
+#if 0
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_queue.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+#endif
+
+ queue_counter++;
+
+ hd = HD(cmd);
+ sh = cmd->host;
+
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->sense_buffer[0] != 0) {
+ DBG(DBG_REQSENSE, printk(KERN_DEBUG "Tried to REQUEST SENSE\n"));
+ cmd->result = DID_OK << 16;
+ done(cmd);
+
+ return(0);
+ }
+
+ /* check for free slot */
+ for (y = hd->last_ccb + 1, x = 0; x < sh->can_queue; x++, y++) {
+ if (y >= sh->can_queue)
+ y = 0;
+ if (hd->ccb[y].status == FREE)
+ break;
+ }
+
+ hd->last_ccb = y;
+
+ if (x >= sh->can_queue) {
+ cmd->result = DID_BUS_BUSY << 16;
+ DBG(DBG_QUEUE && DBG_ABNORM,
+ printk(KERN_CRIT "eata_queue pid %ld, HBA QUEUE FULL..., "
+ "returning DID_BUS_BUSY\n", cmd->pid));
+ done(cmd);
+ restore_flags(flags);
+ return(0);
+ }
+ ccb = &hd->ccb[y];
+
+ memset(ccb, 0, sizeof(struct eata_ccb) - sizeof(struct eata_sg_list *));
+
+ ccb->status = USED; /* claim free slot */
+
+ restore_flags(flags);
+
+ DBG(DBG_QUEUE, printk("eata_queue pid %ld, target: %x, lun: %x, y %d\n",
+ cmd->pid, cmd->target, cmd->lun, y));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ if(hd->do_latency == TRUE)
+ eata_latency_out(ccb, cmd);
+
+ cmd->scsi_done = (void *)done;
+
+ switch (cmd->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
+ case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12:
+ case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW:
+ case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea: /* alternate number for WRITE LONG */
+ ccb->DataOut = TRUE; /* Output mode */
+ break;
+ case TEST_UNIT_READY:
+ default:
+ ccb->DataIn = TRUE; /* Input mode */
+ }
+
+ /* FIXME: This will have to be changed once the midlevel driver
+ * allows different HBA IDs on every channel.
+ */
+ if (cmd->target == sh->this_id)
+ ccb->Interpret = TRUE; /* Interpret command */
+
+ if (cmd->use_sg) {
+ ccb->scatter = TRUE; /* SG mode */
+ if (ccb->sg_list == NULL) {
+ ccb->sg_list = kmalloc(sh->sg_tablesize * sizeof(struct eata_sg_list),
+ GFP_ATOMIC | GFP_DMA);
+ }
+ if (ccb->sg_list == NULL)
+ panic("eata_dma: Run out of DMA memory for SG lists !\n");
+ ccb->cp_dataDMA = htonl(virt_to_bus(ccb->sg_list));
+
+ ccb->cp_datalen = htonl(cmd->use_sg * sizeof(struct eata_sg_list));
+ sl=(struct scatterlist *)cmd->request_buffer;
+ for(i = 0; i < cmd->use_sg; i++, sl++){
+ ccb->sg_list[i].data = htonl(virt_to_bus(sl->address));
+ ccb->sg_list[i].len = htonl((u32) sl->length);
+ }
+ } else {
+ ccb->scatter = FALSE;
+ ccb->cp_datalen = htonl(cmd->request_bufflen);
+ ccb->cp_dataDMA = htonl(virt_to_bus(cmd->request_buffer));
+ }
+
+ ccb->Auto_Req_Sen = TRUE;
+ ccb->cp_reqDMA = htonl(virt_to_bus(cmd->sense_buffer));
+ ccb->reqlen = sizeof(cmd->sense_buffer);
+
+ ccb->cp_id = cmd->target;
+ ccb->cp_channel = cmd->channel;
+ ccb->cp_lun = cmd->lun;
+ ccb->cp_dispri = TRUE;
+ ccb->cp_identify = TRUE;
+ memcpy(ccb->cp_cdb, cmd->cmnd, cmd->cmd_len);
+
+ ccb->cp_statDMA = htonl(virt_to_bus(&(hd->sp)));
+
+ ccb->cp_viraddr = ccb; /* This will be passed thru, so we don't need to
+ * convert it */
+ ccb->cmd = cmd;
+ cmd->host_scribble = (char *)&hd->ccb[y];
+
+ if(eata_send_command((u32) ccb, (u32) sh->base, EATA_CMD_DMA_SEND_CP) == FALSE) {
+ cmd->result = DID_BUS_BUSY << 16;
+ DBG(DBG_QUEUE && DBG_ABNORM,
+ printk("eata_queue target %d, pid %ld, HBA busy, "
+ "returning DID_BUS_BUSY\n",cmd->target, cmd->pid));
+ ccb->status = FREE;
+ done(cmd);
+ return(0);
+ }
+ DBG(DBG_QUEUE, printk("Queued base %#.4x pid: %ld target: %x lun: %x "
+ "slot %d irq %d\n", (s32)sh->base, cmd->pid,
+ cmd->target, cmd->lun, y, sh->irq));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ return(0);
+}
+
+
+int eata_abort(Scsi_Cmnd * cmd)
+{
+ ulong loop = HZ / 2;
+ ulong flags;
+ int x;
+ struct Scsi_Host *sh;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_abort called pid: %ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ /* Some interrupt controllers seem to loose interrupts */
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_abort.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+
+ while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY) {
+ if (--loop == 0) {
+ printk("eata_dma: abort, timeout error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_ERROR);
+ }
+ }
+ if (CD(cmd)->status == RESET) {
+ printk("eata_dma: abort, command reset error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == LOCKED) {
+ DBG(DBG_ABNORM, printk("eata_dma: abort, queue slot locked.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ if (CD(cmd)->status == USED) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_BUSY\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_BUSY); /* SNOOZE */
+ }
+ if (CD(cmd)->status == FREE) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_NOT_RUNNING\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ restore_flags(flags);
+ panic("eata_dma: abort: invalid slot status\n");
+}
+
+int eata_reset(Scsi_Cmnd * cmd, unsigned int resetflags)
+{
+ uint x;
+ ulong loop = loops_per_sec / 3;
+ ulong flags;
+ unchar success = FALSE;
+ Scsi_Cmnd *sp;
+ struct Scsi_Host *sh;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_reset called pid:%ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_reset.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+
+ if (HD(cmd)->state == RESET) {
+ printk("eata_reset: exit, already in reset.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_ERROR);
+ }
+
+ while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0) {
+ printk("eata_reset: exit, timeout error.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_ERROR);
+ }
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+ if (HD(cmd)->ccb[x].status == FREE)
+ continue;
+
+ if (HD(cmd)->ccb[x].status == LOCKED) {
+ HD(cmd)->ccb[x].status = FREE;
+ printk("eata_reset: locked slot %d forced free.\n", x);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ continue;
+ }
+
+
+ sp = HD(cmd)->ccb[x].cmd;
+ HD(cmd)->ccb[x].status = RESET;
+
+ if (sp == NULL)
+ panic("eata_reset: slot %d, sp==NULL.\n", x);
+
+ printk("eata_reset: slot %d in reset, pid %ld.\n", x, sp->pid);
+
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ if (sp == cmd)
+ success = TRUE;
+ }
+
+ /* hard reset the HBA */
+ inb((u32) (cmd->host->base) + HA_RSTATUS); /* This might cause trouble */
+ eata_send_command(0, (u32) cmd->host->base, EATA_CMD_RESET);
+
+ HD(cmd)->state = RESET;
+
+ DBG(DBG_ABNORM, printk("eata_reset: board reset done, enabling "
+ "interrupts.\n"));
+
+ DELAY(2); /* In theorie we should get interrupts and set free all
+ * used queueslots */
+
+ DBG(DBG_ABNORM, printk("eata_reset: interrupts disabled again.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ /* Skip slots already set free by interrupt and those that
+ * are still LOCKED from the last reset */
+ if (HD(cmd)->ccb[x].status != RESET)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ sp->result = DID_RESET << 16;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(cmd)->ccb[x].status = LOCKED;
+
+ printk("eata_reset: slot %d locked, DID_RESET, pid %ld done.\n",
+ x, sp->pid);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ sp->scsi_done(sp);
+ }
+
+ HD(cmd)->state = FALSE;
+ restore_flags(flags);
+
+ if (success) {
+ DBG(DBG_ABNORM, printk("eata_reset: exit, pending.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_PENDING);
+ } else {
+ DBG(DBG_ABNORM, printk("eata_reset: exit, wakeup.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_PUNT);
+ }
+}
+
+/* Here we try to determine the optimum queue depth for
+ * each attached device.
+ *
+ * At the moment the algorithm is rather simple
+ */
+static void eata_select_queue_depths(struct Scsi_Host *host,
+ Scsi_Device *devicelist)
+{
+ Scsi_Device *device;
+ int devcount = 0;
+ int factor = 0;
+
+#if CRIPPLE_QUEUE
+ for(device = devicelist; device != NULL; device = device->next) {
+ if(device->host == host)
+ device->queue_depth = 2;
+ }
+#else
+ /* First we do a sample run go find out what we have */
+ for(device = devicelist; device != NULL; device = device->next) {
+ if (device->host == host) {
+ devcount++;
+ switch(device->type) {
+ case TYPE_DISK:
+ case TYPE_MOD:
+ factor += TYPE_DISK_QUEUE;
+ break;
+ case TYPE_TAPE:
+ factor += TYPE_TAPE_QUEUE;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ factor += TYPE_ROM_QUEUE;
+ break;
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ default:
+ factor += TYPE_OTHER_QUEUE;
+ break;
+ }
+ }
+ }
+
+ DBG(DBG_REGISTER, printk(KERN_DEBUG "scsi%d: needed queueslots %d\n",
+ host->host_no, factor));
+
+ if(factor == 0) /* We don't want to get a DIV BY ZERO error */
+ factor = 1;
+
+ factor = (SD(host)->queuesize * 10) / factor;
+
+ DBG(DBG_REGISTER, printk(KERN_DEBUG "scsi%d: using factor %dE-1\n",
+ host->host_no, factor));
+
+ /* Now that have the factor we can set the individual queuesizes */
+ for(device = devicelist; device != NULL; device = device->next) {
+ if(device->host == host) {
+ if(SD(device->host)->bustype != IS_ISA){
+ switch(device->type) {
+ case TYPE_DISK:
+ case TYPE_MOD:
+ device->queue_depth = (TYPE_DISK_QUEUE * factor) / 10;
+ break;
+ case TYPE_TAPE:
+ device->queue_depth = (TYPE_TAPE_QUEUE * factor) / 10;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ device->queue_depth = (TYPE_ROM_QUEUE * factor) / 10;
+ break;
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ default:
+ device->queue_depth = (TYPE_OTHER_QUEUE * factor) / 10;
+ break;
+ }
+ } else /* ISA forces us to limit the queue depth because of the
+ * bounce buffer memory overhead. I know this is cruel */
+ device->queue_depth = 2;
+
+ /*
+ * It showed that we need to set an upper limit of commands
+ * we can allow to queue for a single device on the bus.
+ * If we get above that limit, the broken midlevel SCSI code
+ * will produce bogus timeouts and aborts en masse. :-(
+ */
+ if(device->queue_depth > UPPER_DEVICE_QUEUE_LIMIT)
+ device->queue_depth = UPPER_DEVICE_QUEUE_LIMIT;
+ if(device->queue_depth == 0)
+ device->queue_depth = 1;
+
+ printk(KERN_INFO "scsi%d: queue depth for target %d on channel %d "
+ "set to %d\n", host->host_no, device->id, device->channel,
+ device->queue_depth);
+ }
+ }
+#endif
+}
+
+#if CHECK_BLINK
+int check_blink_state(long base)
+{
+ ushort loops = 10;
+ u32 blinkindicator;
+ u32 state = 0x12345678;
+ u32 oldstate = 0;
+
+ blinkindicator = htonl(0x54504442);
+ while ((loops--) && (state != oldstate)) {
+ oldstate = state;
+ state = inl((uint) base + 1);
+ }
+
+ DBG(DBG_BLINK, printk("Did Blink check. Status: %d\n",
+ (state == oldstate) && (state == blinkindicator)));
+
+ if ((state == oldstate) && (state == blinkindicator))
+ return(TRUE);
+ else
+ return (FALSE);
+}
+#endif
+
+char * get_board_data(u32 base, u32 irq, u32 id)
+{
+ struct eata_ccb *cp;
+ struct eata_sp *sp;
+ static char *buff;
+ ulong i;
+
+ cp = (struct eata_ccb *) scsi_init_malloc(sizeof(struct eata_ccb),
+ GFP_ATOMIC | GFP_DMA);
+ sp = (struct eata_sp *) scsi_init_malloc(sizeof(struct eata_sp),
+ GFP_ATOMIC | GFP_DMA);
+
+ buff = dma_scratch;
+
+ memset(cp, 0, sizeof(struct eata_ccb));
+ memset(sp, 0, sizeof(struct eata_sp));
+ memset(buff, 0, 256);
+
+ cp->DataIn = TRUE;
+ cp->Interpret = TRUE; /* Interpret command */
+ cp->cp_dispri = TRUE;
+ cp->cp_identify = TRUE;
+
+ cp->cp_datalen = htonl(56);
+ cp->cp_dataDMA = htonl(virt_to_bus(buff));
+ cp->cp_statDMA = htonl(virt_to_bus(sp));
+ cp->cp_viraddr = cp;
+
+ cp->cp_id = id;
+ cp->cp_lun = 0;
+
+ cp->cp_cdb[0] = INQUIRY;
+ cp->cp_cdb[1] = 0;
+ cp->cp_cdb[2] = 0;
+ cp->cp_cdb[3] = 0;
+ cp->cp_cdb[4] = 56;
+ cp->cp_cdb[5] = 0;
+
+ fake_int_base = (struct eata_register *) base;
+ fake_int_result = FALSE;
+ fake_int_happened = FALSE;
+
+ eata_send_command((u32) cp, (u32) base, EATA_CMD_DMA_SEND_CP);
+
+ i = jiffies + (3 * HZ);
+ while (fake_int_happened == FALSE && jiffies <= i)
+ barrier();
+
+ DBG(DBG_INTR3, printk(KERN_DEBUG "fake_int_result: %#x hbastat %#x "
+ "scsistat %#x, buff %p sp %p\n",
+ fake_int_result, (u32) (sp->hba_stat /*& 0x7f*/),
+ (u32) sp->scsi_stat, buff, sp));
+
+ scsi_init_free((void *)cp, sizeof(struct eata_ccb));
+ scsi_init_free((void *)sp, sizeof(struct eata_sp));
+
+ if ((fake_int_result & HA_SERROR) || jiffies > i){
+ printk(KERN_WARNING "eata_dma: trying to reset HBA at %x to clear "
+ "possible blink state\n", base);
+ /* hard reset the HBA */
+ inb((u32) (base) + HA_RSTATUS);
+ eata_send_command(0, base, EATA_CMD_RESET);
+ DELAY(1);
+ return (NULL);
+ } else
+ return (buff);
+}
+
+
+int get_conf_PIO(u32 base, struct get_conf *buf)
+{
+ ulong loop = R_LIMIT;
+ u16 *p;
+
+ if(check_region(base, 9))
+ return (FALSE);
+
+ memset(buf, 0, sizeof(struct get_conf));
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return (FALSE);
+
+ fake_int_base = (struct eata_register *) base;
+ fake_int_result = FALSE;
+ fake_int_happened = FALSE;
+
+ DBG(DBG_PIO && DBG_PROBE,
+ printk("Issuing PIO READ CONFIG to HBA at %#x\n", base));
+ eata_send_command(0, base, EATA_CMD_PIO_READ_CONFIG);
+
+ loop = R_LIMIT;
+ for (p = (u16 *) buf;
+ (long)p <= ((long)buf + (sizeof(struct get_conf) / 2)); p++) {
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ if (--loop == 0)
+ return (FALSE);
+
+ loop = R_LIMIT;
+ *p = inw(base + HA_RDATA);
+ }
+
+ if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { /* Error ? */
+ if (htonl(EATA_SIGNATURE) == buf->signature) {
+ DBG(DBG_PIO&&DBG_PROBE, printk("EATA Controller found at %x "
+ "EATA Level: %x\n", (uint) base,
+ (uint) (buf->version)));
+
+ while (inb(base + HA_RSTATUS) & HA_SDRQ)
+ inw(base + HA_RDATA);
+ return (TRUE);
+ }
+ } else {
+ DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during transfer "
+ "for HBA at %lx\n", (long)base));
+ }
+ return (FALSE);
+}
+
+
+void print_config(struct get_conf *gc)
+{
+ printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d DMAS:%d\n",
+ (u32) ntohl(gc->len), gc->version,
+ gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support,
+ gc->DMA_support);
+ printk("DMAV:%d HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n",
+ gc->DMA_valid, gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2],
+ gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND);
+ printk("IRQ:%d IRQT:%d DMAC:%d FORCADR:%d SG_64K:%d SG_UAE:%d MID:%d "
+ "MCH:%d MLUN:%d\n",
+ gc->IRQ, gc->IRQ_TR, (8 - gc->DMA_channel) & 7, gc->FORCADR,
+ gc->SG_64K, gc->SG_UAE, gc->MAX_ID, gc->MAX_CHAN, gc->MAX_LUN);
+ printk("RIDQ:%d PCI:%d EISA:%d\n",
+ gc->ID_qest, gc->is_PCI, gc->is_EISA);
+ DBG(DPT_DEBUG, DELAY(14));
+}
+
+short register_HBA(u32 base, struct get_conf *gc, Scsi_Host_Template * tpnt,
+ u8 bustype)
+{
+ ulong size = 0;
+ unchar dma_channel = 0;
+ char *buff = 0;
+ unchar bugs = 0;
+ struct Scsi_Host *sh;
+ hostdata *hd;
+ int x;
+
+
+ DBG(DBG_REGISTER, print_config(gc));
+
+ if (gc->DMA_support == FALSE) {
+ printk("The EATA HBA at %#.4x does not support DMA.\n"
+ "Please use the EATA-PIO driver.\n", base);
+ return (FALSE);
+ }
+ if(gc->HAA_valid == FALSE || ntohl(gc->len) < 0x22)
+ gc->MAX_CHAN = 0;
+
+ if (reg_IRQ[gc->IRQ] == FALSE) { /* Interrupt already registered ? */
+ if (!request_irq(gc->IRQ, (void *) eata_fake_int_handler, SA_INTERRUPT,
+ "eata_dma", NULL)){
+ reg_IRQ[gc->IRQ]++;
+ if (!gc->IRQ_TR)
+ reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */
+ } else {
+ printk("Couldn't allocate IRQ %d, Sorry.", gc->IRQ);
+ return (FALSE);
+ }
+ } else { /* More than one HBA on this IRQ */
+ if (reg_IRQL[gc->IRQ] == TRUE) {
+ printk("Can't support more than one HBA on this IRQ,\n"
+ " if the IRQ is edge triggered. Sorry.\n");
+ return (FALSE);
+ } else
+ reg_IRQ[gc->IRQ]++;
+ }
+
+
+ /* If DMA is supported but DMA_valid isn't set to indicate that
+ * the channel number is given we must have pre 2.0 firmware (1.7?)
+ * which leaves us to guess since the "newer ones" also don't set the
+ * DMA_valid bit.
+ */
+ if (gc->DMA_support && !gc->DMA_valid && gc->DMA_channel) {
+ printk(KERN_WARNING "eata_dma: If you are using a pre 2.0 firmware "
+ "please update it !\n"
+ " You can get new firmware releases from ftp.dpt.com\n");
+ gc->DMA_channel = (base == 0x1f0 ? 3 /* DMA=5 */ : 2 /* DMA=6 */);
+ gc->DMA_valid = TRUE;
+ }
+
+ /* if gc->DMA_valid it must be an ISA HBA and we have to register it */
+ dma_channel = BUSMASTER;
+ if (gc->DMA_valid) {
+ if (request_dma(dma_channel = (8 - gc->DMA_channel) & 7, "eata_dma")) {
+ printk(KERN_WARNING "Unable to allocate DMA channel %d for ISA HBA"
+ " at %#.4x.\n", dma_channel, base);
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+ }
+
+ if (dma_channel != BUSMASTER) {
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ }
+
+ if (bustype != IS_EISA && bustype != IS_ISA)
+ buff = get_board_data(base, gc->IRQ, gc->scsi_id[3]);
+
+ if (buff == NULL) {
+ if (bustype == IS_EISA || bustype == IS_ISA) {
+ bugs = bugs || BROKEN_INQUIRY;
+ } else {
+ if (gc->DMA_support == FALSE)
+ printk(KERN_WARNING "HBA at %#.4x doesn't support DMA. "
+ "Sorry\n", base);
+ else
+ printk(KERN_WARNING "HBA at %#.4x does not react on INQUIRY. "
+ "Sorry.\n", base);
+ if (gc->DMA_valid)
+ free_dma(dma_channel);
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+ }
+
+ if (gc->DMA_support == FALSE && buff != NULL)
+ printk(KERN_WARNING "HBA %.12sat %#.4x doesn't set the DMA_support "
+ "flag correctly.\n", &buff[16], base);
+
+ request_region(base, 9, "eata_dma"); /* We already checked the
+ * availability, so this
+ * should not fail.
+ */
+
+ if(ntohs(gc->queuesiz) == 0) {
+ gc->queuesiz = ntohs(64);
+ printk(KERN_WARNING "Warning: Queue size has to be corrected. Assuming"
+ " 64 queueslots\n"
+ " This might be a PM2012B with a defective Firmware\n"
+ " Contact DPT support@dpt.com for an upgrade\n");
+ }
+
+ size = sizeof(hostdata) + ((sizeof(struct eata_ccb) + sizeof(long))
+ * ntohs(gc->queuesiz));
+
+ DBG(DBG_REGISTER, printk("scsi_register size: %ld\n", size));
+
+ sh = scsi_register(tpnt, size);
+
+ if(sh != NULL) {
+
+ hd = SD(sh);
+
+ memset(hd->reads, 0, sizeof(u32) * 26);
+
+ sh->select_queue_depths = eata_select_queue_depths;
+
+ hd->bustype = bustype;
+
+ /*
+ * If we are using a ISA board, we can't use extended SG,
+ * because we would need excessive amounts of memory for
+ * bounce buffers.
+ */
+ if (gc->SG_64K==TRUE && ntohs(gc->SGsiz)==64 && hd->bustype!=IS_ISA){
+ sh->sg_tablesize = SG_SIZE_BIG;
+ } else {
+ sh->sg_tablesize = ntohs(gc->SGsiz);
+ if (sh->sg_tablesize > SG_SIZE || sh->sg_tablesize == 0) {
+ if (sh->sg_tablesize == 0)
+ printk(KERN_WARNING "Warning: SG size had to be fixed.\n"
+ "This might be a PM2012 with a defective Firmware"
+ "\nContact DPT support@dpt.com for an upgrade\n");
+ sh->sg_tablesize = SG_SIZE;
+ }
+ }
+ hd->sgsize = sh->sg_tablesize;
+ }
+
+ if(sh != NULL) {
+ sh->can_queue = hd->queuesize = ntohs(gc->queuesiz);
+ sh->cmd_per_lun = 0;
+ }
+
+ if(sh == NULL) {
+ DBG(DBG_REGISTER, printk(KERN_NOTICE "eata_dma: couldn't register HBA"
+ " at%x \n", base));
+ scsi_unregister(sh);
+ if (gc->DMA_valid)
+ free_dma(dma_channel);
+
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+
+
+ hd->broken_INQUIRY = (bugs & BROKEN_INQUIRY);
+
+ if(hd->broken_INQUIRY == TRUE) {
+ strcpy(hd->vendor, "DPT");
+ strcpy(hd->name, "??????????");
+ strcpy(hd->revision, "???.?");
+ hd->firmware_revision = 0;
+ } else {
+ strncpy(hd->vendor, &buff[8], 8);
+ hd->vendor[8] = 0;
+ strncpy(hd->name, &buff[16], 17);
+ hd->name[17] = 0;
+ hd->revision[0] = buff[32];
+ hd->revision[1] = buff[33];
+ hd->revision[2] = buff[34];
+ hd->revision[3] = '.';
+ hd->revision[4] = buff[35];
+ hd->revision[5] = 0;
+ hd->firmware_revision = (buff[32] << 24) + (buff[33] << 16)
+ + (buff[34] << 8) + buff[35];
+ }
+
+ if (hd->firmware_revision >= (('0'<<24) + ('7'<<16) + ('G'<< 8) + '0'))
+ hd->immediate_support = 1;
+ else
+ hd->immediate_support = 0;
+
+ switch (ntohl(gc->len)) {
+ case 0x1c:
+ hd->EATA_revision = 'a';
+ break;
+ case 0x1e:
+ hd->EATA_revision = 'b';
+ break;
+ case 0x22:
+ hd->EATA_revision = 'c';
+ break;
+ case 0x24:
+ hd->EATA_revision = 'z';
+ default:
+ hd->EATA_revision = '?';
+ }
+
+
+ if(ntohl(gc->len) >= 0x22) {
+ sh->max_id = gc->MAX_ID + 1;
+ sh->max_lun = gc->MAX_LUN + 1;
+ } else {
+ sh->max_id = 8;
+ sh->max_lun = 8;
+ }
+
+ hd->HBA_number = sh->host_no;
+ hd->channel = gc->MAX_CHAN;
+ sh->max_channel = gc->MAX_CHAN;
+ sh->unique_id = base;
+ sh->base = (char *) base;
+ sh->io_port = base;
+ sh->n_io_port = 9;
+ sh->irq = gc->IRQ;
+ sh->dma_channel = dma_channel;
+
+ /* FIXME:
+ * SCSI midlevel code should support different HBA ids on every channel
+ */
+ sh->this_id = gc->scsi_id[3];
+
+ if (gc->SECOND)
+ hd->primary = FALSE;
+ else
+ hd->primary = TRUE;
+
+ sh->wish_block = FALSE;
+
+ if (hd->bustype != IS_ISA) {
+ sh->unchecked_isa_dma = FALSE;
+ } else {
+ sh->unchecked_isa_dma = TRUE; /* We're doing ISA DMA */
+ }
+
+ for(x = 0; x <= 11; x++){ /* Initialize min. latency */
+ hd->writes_lat[x][1] = 0xffffffff;
+ hd->reads_lat[x][1] = 0xffffffff;
+ }
+ hd->all_lat[1] = 0xffffffff;
+
+ hd->next = NULL; /* build a linked list of all HBAs */
+ hd->prev = last_HBA;
+ if(hd->prev != NULL)
+ SD(hd->prev)->next = sh;
+ last_HBA = sh;
+ if (first_HBA == NULL)
+ first_HBA = sh;
+ registered_HBAs++;
+
+ return (TRUE);
+}
+
+
+
+void find_EISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ u32 base;
+ int i;
+
+#if CHECKPAL
+ u8 pal1, pal2, pal3;
+#endif
+
+ for (i = 0; i < MAXEISA; i++) {
+ if (EISAbases[i] == TRUE) { /* Still a possibility ? */
+
+ base = 0x1c88 + (i * 0x1000);
+#if CHECKPAL
+ pal1 = inb((u16)base - 8);
+ pal2 = inb((u16)base - 7);
+ pal3 = inb((u16)base - 6);
+
+ if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) ||
+ ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) && (pal3 == NEC_ID3))||
+ ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) && (pal3 == ATT_ID3))){
+ DBG(DBG_PROBE, printk("EISA EATA id tags found: %x %x %x \n",
+ (int)pal1, (int)pal2, (int)pal3));
+#endif
+ if (get_conf_PIO(base, buf) == TRUE) {
+ if (buf->IRQ) {
+ DBG(DBG_EISA, printk("Registering EISA HBA\n"));
+ register_HBA(base, buf, tpnt, IS_EISA);
+ } else
+ printk("eata_dma: No valid IRQ. HBA removed from list\n");
+ }
+#if CHECK_BLINK
+ else {
+ if (check_blink_state(base))
+ printk("HBA is in BLINK state. Consult your HBAs "
+ "Manual to correct this.\n");
+ }
+#endif
+ /* Nothing found here so we take it from the list */
+ EISAbases[i] = 0;
+#if CHECKPAL
+ }
+#endif
+ }
+ }
+ return;
+}
+
+void find_ISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ int i;
+
+ for (i = 0; i < MAXISA; i++) {
+ if (ISAbases[i]) {
+ if (get_conf_PIO(ISAbases[i],buf) == TRUE){
+ DBG(DBG_ISA, printk("Registering ISA HBA\n"));
+ register_HBA(ISAbases[i], buf, tpnt, IS_ISA);
+ }
+#if CHECK_BLINK
+ else {
+ if (check_blink_state(ISAbases[i]))
+ printk("HBA is in BLINK state. Consult your HBAs "
+ "Manual to correct this.\n");
+ }
+#endif
+ ISAbases[i] = 0;
+ }
+ }
+ return;
+}
+
+void find_PCI(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+
+#ifndef CONFIG_PCI
+ printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n");
+#else
+
+ u8 pci_bus, pci_device_fn;
+ static s16 pci_index = 0; /* Device index to PCI BIOS calls */
+ u32 base = 0;
+ u16 com_adr;
+ u16 rev_device;
+ u32 error, i, x;
+ u8 pal1, pal2, pal3;
+
+ if (pcibios_present()) {
+ for (i = 0; i <= MAXPCI; ++i, ++pci_index) {
+ if (pcibios_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT,
+ pci_index, &pci_bus, &pci_device_fn))
+ break;
+ DBG(DBG_PROBE && DBG_PCI,
+ printk("eata_dma: find_PCI, HBA at bus %d, device %d,"
+ " function %d, index %d\n", (s32)pci_bus,
+ (s32)((pci_device_fn & 0xf8) >> 3),
+ (s32)(pci_device_fn & 7), pci_index));
+
+ if (!(error = pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_CLASS_DEVICE, &rev_device))) {
+ if (rev_device == PCI_CLASS_STORAGE_SCSI) {
+ if (!(error = pcibios_read_config_word(pci_bus,
+ pci_device_fn, PCI_COMMAND,
+ (u16 *) & com_adr))) {
+ if (!((com_adr & PCI_COMMAND_IO) &&
+ (com_adr & PCI_COMMAND_MASTER))) {
+ printk("eata_dma: find_PCI, HBA has IO or"
+ " BUSMASTER mode disabled\n");
+ continue;
+ }
+ } else
+ printk("eata_dma: find_PCI, error %x while reading "
+ "PCI_COMMAND\n", error);
+ } else
+ printk("eata_dma: find_PCI, DEVICECLASSID %x didn't match\n",
+ rev_device);
+ } else {
+ printk("eata_dma: find_PCI, error %x while reading "
+ "PCI_CLASS_BASE\n",
+ error);
+ continue;
+ }
+
+ if (!(error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, (int *) &base))){
+
+ /* Check if the address is valid */
+ if (base & 0x01) {
+ base &= 0xfffffffe;
+ /* EISA tag there ? */
+ pal1 = inb(base);
+ pal2 = inb(base + 1);
+ pal3 = inb(base + 2);
+ if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) ||
+ ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) &&
+ (pal3 == NEC_ID3)) ||
+ ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) &&
+ (pal3 == ATT_ID3)))
+ base += 0x08;
+ else
+ base += 0x10; /* Now, THIS is the real address */
+
+ if (base != 0x1f8) {
+ /* We didn't find it in the primary search */
+ if (get_conf_PIO(base, buf) == TRUE) {
+
+ /* OK. We made it till here, so we can go now
+ * and register it. We only have to check and
+ * eventually remove it from the EISA and ISA list
+ */
+ DBG(DBG_PCI, printk("Registering PCI HBA\n"));
+ register_HBA(base, buf, tpnt, IS_PCI);
+
+ if (base < 0x1000) {
+ for (x = 0; x < MAXISA; ++x) {
+ if (ISAbases[x] == base) {
+ ISAbases[x] = 0;
+ break;
+ }
+ }
+ } else if ((base & 0x0fff) == 0x0c88)
+ EISAbases[(base >> 12) & 0x0f] = 0;
+ continue; /* break; */
+ }
+#if CHECK_BLINK
+ else if (check_blink_state(base) == TRUE) {
+ printk("eata_dma: HBA is in BLINK state.\n"
+ "Consult your HBAs manual to correct this.\n");
+ }
+#endif
+ }
+ }
+ } else {
+ printk("eata_dma: error %x while reading "
+ "PCI_BASE_ADDRESS_0\n", error);
+ }
+ }
+ } else {
+ printk("eata_dma: No BIOS32 extensions present. This driver release "
+ "still depends on it.\n"
+ " Skipping scan for PCI HBAs. \n");
+ }
+#endif /* #ifndef CONFIG_PCI */
+ return;
+}
+
+int eata_detect(Scsi_Host_Template * tpnt)
+{
+ struct Scsi_Host *HBA_ptr;
+ struct get_conf gc;
+ int i;
+
+ DBG((DBG_PROBE && DBG_DELAY) || DPT_DEBUG,
+ printk("Using lots of delays to let you read the debugging output\n"));
+
+ tpnt->proc_dir = &proc_scsi_eata_dma;
+
+ status = scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA);
+ dma_scratch = scsi_init_malloc(1024, GFP_ATOMIC | GFP_DMA);
+
+ if(status == NULL || dma_scratch == NULL) {
+ printk("eata_dma: can't allocate enough memory to probe for hosts !\n");
+ return(0);
+ }
+
+ dma_scratch += 4;
+
+ find_PCI(&gc, tpnt);
+
+ find_EISA(&gc, tpnt);
+
+ find_ISA(&gc, tpnt);
+
+ for (i = 0; i < MAXIRQ; i++) { /* Now that we know what we have, we */
+ if (reg_IRQ[i] >= 1){ /* exchange the interrupt handler which */
+ free_irq(i, NULL); /* we used for probing with the real one */
+ request_irq(i, (void *)(eata_int_handler), SA_INTERRUPT|SA_SHIRQ,
+ "eata_dma", NULL);
+ }
+ }
+
+ HBA_ptr = first_HBA;
+
+ if (registered_HBAs != 0) {
+ printk("EATA (Extended Attachment) driver version: %d.%d%s"
+ "\ndeveloped in co-operation with DPT\n"
+ "(c) 1993-96 Michael Neuffer, mike@i-Connect.Net\n",
+ VER_MAJOR, VER_MINOR, VER_SUB);
+ printk("Registered HBAs:");
+ printk("\nHBA no. Boardtype Revis EATA Bus BaseIO IRQ"
+ " DMA Ch ID Pr QS S/G IS\n");
+ for (i = 1; i <= registered_HBAs; i++) {
+ printk("scsi%-2d: %.12s v%s 2.0%c %s %#.4x %2d",
+ HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
+ SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P')?
+ "PCI ":(SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ",
+ (u32) HBA_ptr->base, HBA_ptr->irq);
+ if(HBA_ptr->dma_channel != BUSMASTER)
+ printk(" %2x ", HBA_ptr->dma_channel);
+ else
+ printk(" %s", "BMST");
+ printk(" %d %d %c %3d %3d %c\n",
+ SD(HBA_ptr)->channel+1, HBA_ptr->this_id,
+ (SD(HBA_ptr)->primary == TRUE)?'Y':'N',
+ HBA_ptr->can_queue, HBA_ptr->sg_tablesize,
+ (SD(HBA_ptr)->immediate_support == TRUE)?'Y':'N');
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+ } else {
+ scsi_init_free((void *)status, 512);
+ }
+
+ scsi_init_free((void *)dma_scratch - 4, 1024);
+
+ DBG(DPT_DEBUG, DELAY(12));
+
+ return(registered_HBAs);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = EATA_DMA;
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/dev/drivers/scsi/g_NCR5380.c b/linux/dev/drivers/scsi/g_NCR5380.c
new file mode 100644
index 0000000..687dd36
--- /dev/null
+++ b/linux/dev/drivers/scsi/g_NCR5380.c
@@ -0,0 +1,735 @@
+/*
+ * Generic Generic NCR5380 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
+ * K.Lentin@cs.monash.edu.au
+ *
+ * ALPHA RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * TODO : flesh out DMA support, find some one actually using this (I have
+ * a memory mapped Trantor board that works fine)
+ */
+
+/*
+ * Options :
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. With command line overrides - NCR5380=port,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is
+ * specified as an array of address, irq, dma, board tuples. Ie, for
+ * one board at 0x350, IRQ5, no dma, I could say
+ * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}}
+ *
+ * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ *
+ * 3. When included as a module, with arguments passed on the command line:
+ * ncr_irq=xx the interrupt
+ * ncr_addr=xx the port or base address (for port or memory
+ * mapped, resp.)
+ * ncr_dma=xx the DMA
+ * ncr_5380=1 to set up for a NCR5380 board
+ * ncr_53c400=1 to set up for a NCR53C400 board
+ * e.g.
+ * modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
+ * for a port mapped NCR5380 board or
+ * modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
+ * for a memory mapped NCR53C400 board with interrupts disabled.
+ *
+ * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ *
+ */
+
+#ifdef MACH
+#define GENERIC_NCR5380_OVERRIDE {{(NCR5380_map_type)0x350,5,0,BOARD_NCR53C400}};
+#define CONFIG_SCSI_GENERIC_NCR53C400
+#define CONFIG_SCSI_G_NCR5380_MEM
+#endif
+
+#define AUTOPROBE_IRQ
+#define AUTOSENSE
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SCSI_GENERIC_NCR53C400
+#define NCR53C400_PSEUDO_DMA 1
+#define PSEUDO_DMA
+#define NCR53C400
+#define NCR5380_STATS
+#undef NCR5380_STAT_LIMIT
+#endif
+#if defined(CONFIG_SCSI_G_NCR5380_PORT) && defined(CONFIG_SCSI_G_NCR5380_MEM)
+#error You can not configure the Generic NCR 5380 SCSI Driver for memory mapped I/O and port mapped I/O at the same time (yet)
+#endif
+#if !defined(CONFIG_SCSI_G_NCR5380_PORT) && !defined(CONFIG_SCSI_G_NCR5380_MEM)
+#error You must configure the Generic NCR 5380 SCSI Driver for one of memory mapped I/O and port mapped I/O.
+#endif
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "g_NCR5380.h"
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_g_ncr5380 = {
+ PROC_SCSI_GENERIC_NCR5380, 9, "g_NCR5380",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define NCR_NOT_SET 0
+static int ncr_irq=NCR_NOT_SET;
+static int ncr_dma=NCR_NOT_SET;
+static int ncr_addr=NCR_NOT_SET;
+static int ncr_5380=NCR_NOT_SET;
+static int ncr_53c400=NCR_NOT_SET;
+
+static struct override {
+ NCR5380_implementation_fields;
+ int irq;
+ int dma;
+ int board; /* Use NCR53c400, Ricoh, etc. extensions ? */
+} overrides
+#ifdef GENERIC_NCR5380_OVERRIDE
+ [] = GENERIC_NCR5380_OVERRIDE
+#else
+ [1] = {{0,},};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+/*
+ * Function : static internal_setup(int board, char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : board - either BOARD_NCR5380 for a normal NCR5380 board,
+ * or BOARD_NCR53C400 for a NCR53C400 board. str - unused, ints -
+ * array of integer parameters with ints[0] equal to the number of ints.
+ *
+ */
+
+static void internal_setup(int board, char *str, int *ints) {
+ static int commandline_current = 0;
+ switch (board) {
+ case BOARD_NCR5380:
+ if (ints[0] != 2 && ints[0] != 3) {
+ printk("generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n");
+ return;
+ }
+ case BOARD_NCR53C400:
+ if (ints[0] != 2) {
+ printk("generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n");
+ return;
+ }
+ }
+
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type)ints[1];
+ overrides[commandline_current].irq = ints[2];
+ if (ints[0] == 3)
+ overrides[commandline_current].dma = ints[3];
+ else
+ overrides[commandline_current].dma = DMA_NONE;
+ overrides[commandline_current].board = board;
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : generic_NCR5380_setup (char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ */
+
+void generic_NCR5380_setup (char *str, int *ints) {
+ internal_setup (BOARD_NCR5380, str, ints);
+}
+
+/*
+ * Function : generic_NCR53C400_setup (char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ */
+
+void generic_NCR53C400_setup (char *str, int *ints) {
+ internal_setup (BOARD_NCR53C400, str, ints);
+}
+
+/*
+ * Function : int generic_NCR5380_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : initializes generic NCR5380 driver based on the
+ * command line / compile time port and irq definitions.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int generic_NCR5380_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0;
+ int count;
+ int flags = 0;
+ struct Scsi_Host *instance;
+
+ if (ncr_irq != NCR_NOT_SET)
+ overrides[0].irq=ncr_irq;
+ if (ncr_dma != NCR_NOT_SET)
+ overrides[0].dma=ncr_dma;
+ if (ncr_addr != NCR_NOT_SET)
+ overrides[0].NCR5380_map_name=(NCR5380_map_type)ncr_addr;
+ if (ncr_5380 != NCR_NOT_SET)
+ overrides[0].board=BOARD_NCR5380;
+ else if (ncr_53c400 != NCR_NOT_SET)
+ overrides[0].board=BOARD_NCR53C400;
+
+ tpnt->proc_dir = &proc_scsi_g_ncr5380;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ if (!(overrides[current_override].NCR5380_map_name))
+ continue;
+
+ switch (overrides[current_override].board) {
+ case BOARD_NCR5380:
+ flags = FLAG_NO_PSEUDO_DMA;
+ break;
+ case BOARD_NCR53C400:
+ flags = FLAG_NCR53C400;
+ break;
+ }
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->NCR5380_instance_name = overrides[current_override].NCR5380_map_name;
+
+ NCR5380_init(instance, flags);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, 0xffff);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, generic_NCR5380_intr, SA_INTERRUPT, "NCR5380", NULL)) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+
+ printk("scsi%d : at " STRVAL(NCR5380_map_name) " 0x%x", instance->host_no, (unsigned int)instance->NCR5380_instance_name);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, GENERIC_NCR5380_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+const char * generic_NCR5380_info (struct Scsi_Host* host) {
+ static const char string[]="Generic NCR5380/53C400 Driver";
+ return string;
+}
+
+int generic_NCR5380_release_resources(struct Scsi_Host * instance)
+{
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+ if (instance->irq != IRQ_NONE)
+ free_irq(instance->irq, NULL);
+
+ return 0;
+}
+
+#ifdef BIOSPARAM
+/*
+ * Function : int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+#endif
+
+#if NCR53C400_PSEUDO_DMA
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int i;
+ int bl;
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: About to read %d blocks for %d bytes\n", blocks, len);
+#endif
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE | CSR_TRANS_DIR);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: %d blocks left\n", blocks);
+#endif
+
+ if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ if (blocks)
+ printk("53C400r: blocks still == %d\n", blocks);
+ else
+ printk("53C400r: Exiting loop\n");
+#endif
+ break;
+ }
+
+#if 1
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk("53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Waiting for buffer, bl=%d\n", bl);
+#endif
+
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Transferring 128 bytes\n");
+#endif
+
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ dst[start+i] = NCR5380_read(C400_HOST_BUFFER);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+
+ if (blocks) {
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: EXTRA: Waiting for buffer\n");
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Transferring EXTRA 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ dst[start+i] = NCR5380_read(C400_HOST_BUFFER);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: No EXTRA required\n");
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Final values: blocks=%d start=%d\n", blocks, start);
+#endif
+
+ if (!(NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ printk("53C400r: no 53C80 gated irq after transfer");
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: Got 53C80 interrupt and tried to clear it\n");
+#endif
+
+/* DON'T DO THIS - THEY NEVER ARRIVE!
+ printk("53C400r: Waiting for 53C80 registers\n");
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+*/
+
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER))
+ printk("53C400r: no end dma signal\n");
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: end dma as expected\n");
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ return 0;
+}
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int i;
+ int bl;
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: About to write %d blocks for %d bytes\n", blocks, len);
+#endif
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk("53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+
+ if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ if (blocks)
+ printk("53C400w: exiting loop, blocks still == %d\n", blocks);
+ else
+ printk("53C400w: exiting loop\n");
+#endif
+ break;
+ }
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: %d blocks left\n", blocks);
+
+ printk("53C400w: waiting for buffer, bl=%d\n", bl);
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: transferring 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start+i]);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+ if (blocks) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: EXTRA waiting for buffer\n");
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: transferring EXTRA 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start+i]);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ else
+ printk("53C400w: No EXTRA required\n");
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: Final values: blocks=%d start=%d\n", blocks, start);
+#endif
+
+#if 0
+ printk("53C400w: waiting for registers to be available\n");
+ THEY NEVER DO!
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+ printk("53C400w: Got em\n");
+#endif
+
+ /* Let's wait for this instead - could be ugly */
+ /* All documentation says to check for this. Maybe my hardware is too
+ * fast. Waiting for it seems to work fine! KLL
+ */
+ while (!(i = NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ ;
+
+ /*
+ * I know. i is certainly != 0 here but the loop is new. See previous
+ * comment.
+ */
+ if (i) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got 53C80 gated irq (last block)\n");
+#endif
+ if (!((i=NCR5380_read(BUS_AND_STATUS_REG)) & BASR_END_DMA_TRANSFER))
+ printk("53C400w: No END OF DMA bit - WHOOPS! BASR=%0x\n",i);
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ else
+ printk("53C400w: Got END OF DMA\n");
+#endif
+ }
+ else
+ printk("53C400w: no 53C80 gated irq after transfer (last block)\n");
+
+#if 0
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) {
+ printk("53C400w: no end dma signal\n");
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: waiting for last byte...\n");
+#endif
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got last byte.\n");
+ printk("53C400w: pwrite exiting with status 0, whoopee!\n");
+#endif
+ return 0;
+}
+#endif /* PSEUDO_DMA */
+
+#include "NCR5380.c"
+
+#define PRINTP(x) len += sprintf(buffer+len, x)
+#define ANDP ,
+
+static int sprint_opcode(char* buffer, int len, int opcode) {
+ int start = len;
+ PRINTP("0x%02x " ANDP opcode);
+ return len-start;
+}
+
+static int sprint_command (char* buffer, int len, unsigned char *command) {
+ int i,s,start=len;
+ len += sprint_opcode(buffer, len, command[0]);
+ for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ PRINTP("%02x " ANDP command[i]);
+ PRINTP("\n");
+ return len-start;
+}
+
+static int sprint_Scsi_Cmnd (char* buffer, int len, Scsi_Cmnd *cmd) {
+ int start = len;
+ PRINTP("host number %d destination target %d, lun %d\n" ANDP
+ cmd->host->host_no ANDP
+ cmd->target ANDP
+ cmd->lun);
+ PRINTP(" command = ");
+ len += sprint_command (buffer, len, cmd->cmnd);
+ return len-start;
+}
+
+int generic_NCR5380_proc_info(char* buffer, char** start, off_t offset, int length, int hostno, int inout)
+{
+ int len = 0;
+ NCR5380_local_declare();
+ unsigned char status;
+ int i;
+ struct Scsi_Host *scsi_ptr;
+ Scsi_Cmnd *ptr;
+ Scsi_Device *dev;
+ struct NCR5380_hostdata *hostdata;
+
+ cli();
+
+ for (scsi_ptr = first_instance; scsi_ptr; scsi_ptr=scsi_ptr->next)
+ if (scsi_ptr->host_no == hostno)
+ break;
+ NCR5380_setup(scsi_ptr);
+ hostdata = (struct NCR5380_hostdata *)scsi_ptr->hostdata;
+
+ PRINTP("SCSI host number %d : %s\n" ANDP scsi_ptr->host_no ANDP scsi_ptr->hostt->name);
+ PRINTP("Generic NCR5380 driver version %d\n" ANDP GENERIC_NCR5380_PUBLIC_RELEASE);
+ PRINTP("NCR5380 core version %d\n" ANDP NCR5380_PUBLIC_RELEASE);
+#ifdef NCR53C400
+ PRINTP("NCR53C400 extension version %d\n" ANDP NCR53C400_PUBLIC_RELEASE);
+ PRINTP("NCR53C400 card%s detected\n" ANDP (((struct NCR5380_hostdata *)scsi_ptr->hostdata)->flags & FLAG_NCR53C400)?"":" not");
+# if NCR53C400_PSEUDO_DMA
+ PRINTP("NCR53C400 pseudo DMA used\n");
+# endif
+#else
+ PRINTP("NO NCR53C400 driver extensions\n");
+#endif
+ PRINTP("Using %s mapping at %s 0x%x, " ANDP STRVAL(NCR5380_map_config) ANDP STRVAL(NCR5380_map_name) ANDP scsi_ptr->NCR5380_instance_name);
+ if (scsi_ptr->irq == IRQ_NONE)
+ PRINTP("no interrupt\n");
+ else
+ PRINTP("on interrupt %d\n" ANDP scsi_ptr->irq);
+
+#ifdef NCR5380_STATS
+ if (hostdata->connected || hostdata->issue_queue || hostdata->disconnected_queue)
+ PRINTP("There are commands pending, transfer rates may be crud\n");
+ if (hostdata->pendingr)
+ PRINTP(" %d pending reads" ANDP hostdata->pendingr);
+ if (hostdata->pendingw)
+ PRINTP(" %d pending writes" ANDP hostdata->pendingw);
+ if (hostdata->pendingr || hostdata->pendingw)
+ PRINTP("\n");
+ for (dev = scsi_devices; dev; dev=dev->next) {
+ if (dev->host == scsi_ptr) {
+ unsigned long br = hostdata->bytes_read[dev->id];
+ unsigned long bw = hostdata->bytes_write[dev->id];
+ long tr = hostdata->time_read[dev->id] / HZ;
+ long tw = hostdata->time_write[dev->id] / HZ;
+
+ PRINTP(" T:%d %s " ANDP dev->id ANDP (dev->type < MAX_SCSI_DEVICE_CODE) ? scsi_device_types[(int)dev->type] : "Unknown");
+ for (i=0; i<8; i++)
+ if (dev->vendor[i] >= 0x20)
+ *(buffer+(len++)) = dev->vendor[i];
+ *(buffer+(len++)) = ' ';
+ for (i=0; i<16; i++)
+ if (dev->model[i] >= 0x20)
+ *(buffer+(len++)) = dev->model[i];
+ *(buffer+(len++)) = ' ';
+ for (i=0; i<4; i++)
+ if (dev->rev[i] >= 0x20)
+ *(buffer+(len++)) = dev->rev[i];
+ *(buffer+(len++)) = ' ';
+
+ PRINTP("\n%10ld kb read in %5ld secs" ANDP br/1024 ANDP tr);
+ if (tr)
+ PRINTP(" @ %5ld bps" ANDP br / tr);
+
+ PRINTP("\n%10ld kb written in %5ld secs" ANDP bw/1024 ANDP tw);
+ if (tw)
+ PRINTP(" @ %5ld bps" ANDP bw / tw);
+ PRINTP("\n");
+ }
+ }
+#endif
+
+ status = NCR5380_read(STATUS_REG);
+ if (!(status & SR_REQ))
+ PRINTP("REQ not asserted, phase unknown.\n");
+ else {
+ for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
+ (phases[i].value != (status & PHASE_MASK)); ++i)
+ ;
+ PRINTP("Phase %s\n" ANDP phases[i].name);
+ }
+
+ if (!hostdata->connected) {
+ PRINTP("No currently connected command\n");
+ } else {
+ len += sprint_Scsi_Cmnd (buffer, len, (Scsi_Cmnd *) hostdata->connected);
+ }
+
+ PRINTP("issue_queue\n");
+
+ for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ len += sprint_Scsi_Cmnd (buffer, len, ptr);
+
+ PRINTP("disconnected_queue\n");
+
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ len += sprint_Scsi_Cmnd (buffer, len, ptr);
+
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ sti();
+ return len;
+}
+
+#undef PRINTP
+#undef ANDP
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = GENERIC_NCR5380;
+
+#include <linux/module.h>
+#include "scsi_module.c"
+#endif
diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c
new file mode 100644
index 0000000..a830781
--- /dev/null
+++ b/linux/dev/glue/block.c
@@ -0,0 +1,1770 @@
+/*
+ * Linux block driver support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/drivers/block/ll_rw_blk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
+ */
+
+/*
+ * linux/fs/block_dev.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * linux/fs/buffer.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <machine/spl.h>
+#include <mach/mach_types.h>
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/port.h>
+#include <mach/vm_param.h>
+#include <mach/notify.h>
+
+#include <kern/kalloc.h>
+#include <kern/list.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/disk_status.h>
+#include <device/device_reply.user.h>
+#include <device/device_emul.h>
+#include <device/ds_routines.h>
+
+/* TODO. This should be fixed to not be i386 specific. */
+#include <i386at/disk.h>
+
+#define MACH_INCLUDE
+#include <linux/fs.h>
+#include <linux/blk.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/major.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/malloc.h>
+#include <linux/hdreg.h>
+#include <asm/io.h>
+
+#include <linux/dev/glue/glue.h>
+
+#ifdef PAE
+#define VM_PAGE_LINUX VM_PAGE_DMA32
+#else
+#define VM_PAGE_LINUX VM_PAGE_HIGHMEM
+#endif
+
+/* This task queue is not used in Mach: just for fixing undefined symbols. */
+DECLARE_TASK_QUEUE (tq_disk);
+
+/* Location of VTOC in units for sectors (512 bytes). */
+#define PDLOCATION 29
+
+/* Linux kernel variables. */
+
+/* Temporary data allocated on the stack. */
+struct temp_data
+{
+ struct inode inode;
+ struct file file;
+ struct request req;
+ struct list pages;
+};
+
+/* One of these exists for each
+ driver associated with a major number. */
+struct device_struct
+{
+ const char *name; /* device name */
+ struct file_operations *fops; /* operations vector */
+ int busy:1; /* driver is being opened/closed */
+ int want:1; /* someone wants to open/close driver */
+ struct gendisk *gd; /* DOS partition information */
+ int default_slice; /* what slice to use when none is given */
+ struct disklabel **labels; /* disklabels for each DOS partition */
+};
+
+/* An entry in the Mach name to Linux major number conversion table. */
+struct name_map
+{
+ const char *name; /* Mach name for device */
+ unsigned major; /* Linux major number */
+ unsigned unit; /* Linux unit number */
+ int read_only; /* 1 if device is read only */
+};
+
+/* Driver operation table. */
+static struct device_struct blkdevs[MAX_BLKDEV];
+
+/* Driver request function table. */
+struct blk_dev_struct blk_dev[MAX_BLKDEV] =
+{
+ { NULL, NULL }, /* 0 no_dev */
+ { NULL, NULL }, /* 1 dev mem */
+ { NULL, NULL }, /* 2 dev fd */
+ { NULL, NULL }, /* 3 dev ide0 or hd */
+ { NULL, NULL }, /* 4 dev ttyx */
+ { NULL, NULL }, /* 5 dev tty */
+ { NULL, NULL }, /* 6 dev lp */
+ { NULL, NULL }, /* 7 dev pipes */
+ { NULL, NULL }, /* 8 dev sd */
+ { NULL, NULL }, /* 9 dev st */
+ { NULL, NULL }, /* 10 */
+ { NULL, NULL }, /* 11 */
+ { NULL, NULL }, /* 12 */
+ { NULL, NULL }, /* 13 */
+ { NULL, NULL }, /* 14 */
+ { NULL, NULL }, /* 15 */
+ { NULL, NULL }, /* 16 */
+ { NULL, NULL }, /* 17 */
+ { NULL, NULL }, /* 18 */
+ { NULL, NULL }, /* 19 */
+ { NULL, NULL }, /* 20 */
+ { NULL, NULL }, /* 21 */
+ { NULL, NULL } /* 22 dev ide1 */
+};
+
+/*
+ * blk_size contains the size of all block-devices in units of 1024 byte
+ * sectors:
+ *
+ * blk_size[MAJOR][MINOR]
+ *
+ * if (!blk_size[MAJOR]) then no minor size checking is done.
+ */
+int *blk_size[MAX_BLKDEV] = { NULL, NULL, };
+
+/*
+ * blksize_size contains the size of all block-devices:
+ *
+ * blksize_size[MAJOR][MINOR]
+ *
+ * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
+ */
+int *blksize_size[MAX_BLKDEV] = { NULL, NULL, };
+
+/*
+ * hardsect_size contains the size of the hardware sector of a device.
+ *
+ * hardsect_size[MAJOR][MINOR]
+ *
+ * if (!hardsect_size[MAJOR])
+ * then 512 bytes is assumed.
+ * else
+ * sector_size is hardsect_size[MAJOR][MINOR]
+ * This is currently set by some scsi device and read by the msdos fs driver
+ * This might be a some uses later.
+ */
+int *hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
+
+/* This specifies how many sectors to read ahead on the disk.
+ This is unused in Mach. It is here to make drivers compile. */
+int read_ahead[MAX_BLKDEV] = {0, };
+
+/* Use to wait on when there are no free requests.
+ This is unused in Mach. It is here to make drivers compile. */
+struct wait_queue *wait_for_request = NULL;
+
+/* Initialize block drivers. */
+int
+blk_dev_init ()
+{
+#ifdef CONFIG_BLK_DEV_IDE
+ extern char *kernel_cmdline;
+ if (strncmp(kernel_cmdline, "noide", 5) &&
+ !strstr(kernel_cmdline, " noide"))
+ ide_init ();
+#endif
+#ifdef CONFIG_BLK_DEV_FD
+ floppy_init ();
+#else
+ outb_p (0xc, 0x3f2);
+#endif
+ return 0;
+}
+
+/* Return 1 if major number MAJOR corresponds to a disk device. */
+static inline int
+disk_major (int major)
+{
+ return (major == IDE0_MAJOR
+ || major == IDE1_MAJOR
+ || major == IDE2_MAJOR
+ || major == IDE3_MAJOR
+ || major == SCSI_DISK_MAJOR);
+}
+
+/* Linux kernel block support routines. */
+
+/* Register a driver for major number MAJOR,
+ with name NAME, and operations vector FOPS. */
+int
+register_blkdev (unsigned major, const char *name,
+ struct file_operations *fops)
+{
+ if (major == 0)
+ {
+ for (major = MAX_BLKDEV - 1; major > 0; major--)
+ if (blkdevs[major].fops == NULL)
+ goto out;
+ return -EBUSY;
+ }
+ if (major >= MAX_BLKDEV)
+ return -EINVAL;
+ if (blkdevs[major].fops && blkdevs[major].fops != fops)
+ return -EBUSY;
+
+out:
+ blkdevs[major].name = name;
+ blkdevs[major].fops = fops;
+ blkdevs[major].busy = 0;
+ blkdevs[major].want = 0;
+ blkdevs[major].gd = NULL;
+ blkdevs[major].default_slice = 0;
+ blkdevs[major].labels = NULL;
+ return 0;
+}
+
+/* Unregister the driver associated with
+ major number MAJOR and having the name NAME. */
+int
+unregister_blkdev (unsigned major, const char *name)
+{
+ if (major >= MAX_BLKDEV)
+ return -EINVAL;
+ if (! blkdevs[major].fops || strcmp (blkdevs[major].name, name))
+ return -EINVAL;
+ blkdevs[major].fops = NULL;
+ if (blkdevs[major].labels)
+ {
+ assert (blkdevs[major].gd);
+ kfree ((vm_offset_t) blkdevs[major].labels,
+ (sizeof (struct disklabel *)
+ * blkdevs[major].gd->max_p * blkdevs[major].gd->max_nr));
+ }
+ return 0;
+}
+
+void
+set_blocksize (kdev_t dev, int size)
+{
+ if (! blksize_size[MAJOR (dev)])
+ return;
+
+ switch (size)
+ {
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ panic ("Invalid blocksize passed to set_blocksize");
+ break;
+ }
+ blksize_size[MAJOR (dev)][MINOR (dev)] = size;
+}
+
+/* Allocate a buffer SIZE bytes long. */
+static void *
+alloc_buffer (int size)
+{
+ vm_page_t m;
+ struct temp_data *d;
+
+ assert (size <= PAGE_SIZE);
+
+ if (! linux_auto_config)
+ {
+ while ((m = vm_page_grab (VM_PAGE_DMA32)) == 0)
+ VM_PAGE_WAIT (0);
+ d = current_thread ()->pcb->data;
+ assert (d);
+ list_insert_tail (&d->pages, &m->node);
+ return (void *) phystokv(m->phys_addr);
+ }
+ return (void *) __get_free_pages (GFP_KERNEL, 0, ~0UL);
+}
+
+/* Free buffer P which is SIZE bytes long. */
+static void
+free_buffer (void *p, int size)
+{
+ struct temp_data *d;
+ vm_page_t m, tmp;
+
+ assert (size <= PAGE_SIZE);
+
+ if (! linux_auto_config)
+ {
+ d = current_thread ()->pcb->data;
+ assert (d);
+ list_for_each_entry_safe (&d->pages, m, tmp, node)
+ {
+ if (phystokv(m->phys_addr) == (vm_offset_t) p)
+ {
+ list_remove (&m->node);
+ VM_PAGE_FREE (m);
+ return;
+ }
+ }
+ panic ("free_buffer");
+ }
+ free_pages ((unsigned long) p, 0);
+}
+
+/* Allocate a buffer of SIZE bytes and
+ associate it with block number BLOCK of device DEV. */
+struct buffer_head *
+getblk (kdev_t dev, int block, int size)
+{
+ struct buffer_head *bh;
+
+ assert (size <= PAGE_SIZE);
+
+ bh = (struct buffer_head *) kalloc (sizeof (struct buffer_head));
+ if (bh)
+ {
+ memset (bh, 0, sizeof (struct buffer_head));
+ bh->b_data = alloc_buffer (size);
+ if (! bh->b_data)
+ {
+ kfree ((vm_offset_t) bh, sizeof (struct buffer_head));
+ return NULL;
+ }
+ bh->b_dev = dev;
+ bh->b_size = size;
+ bh->b_state = 1 << BH_Lock;
+ bh->b_blocknr = block;
+ }
+ return bh;
+}
+
+/* Release buffer BH previously allocated by getblk. */
+void
+__brelse (struct buffer_head *bh)
+{
+ free_buffer (bh->b_data, bh->b_size);
+ kfree ((vm_offset_t) bh, sizeof (*bh));
+}
+
+/* Allocate a buffer of SIZE bytes and fill it with data
+ from device DEV starting at block number BLOCK. */
+struct buffer_head *
+bread (kdev_t dev, int block, int size)
+{
+ struct buffer_head *bh;
+
+ bh = getblk (dev, block, size);
+ if (bh)
+ {
+ ll_rw_block (READ, 1, &bh, 0);
+ wait_on_buffer (bh);
+ if (! buffer_uptodate (bh))
+ {
+ __brelse (bh);
+ return NULL;
+ }
+ }
+ return bh;
+}
+
+/* Return the block size for device DEV in *BSIZE and
+ log2(block size) in *BSHIFT. */
+static void
+get_block_size (kdev_t dev, int *bsize, int *bshift)
+{
+ int i;
+
+ *bsize = BLOCK_SIZE;
+ if (blksize_size[MAJOR (dev)]
+ && blksize_size[MAJOR (dev)][MINOR (dev)])
+ *bsize = blksize_size[MAJOR (dev)][MINOR (dev)];
+ for (i = *bsize, *bshift = 0; i != 1; i >>= 1, (*bshift)++)
+ ;
+}
+
+/* Enqueue request REQ on a driver's queue. */
+static inline void
+enqueue_request (struct request *req)
+{
+ struct request *tmp;
+ struct blk_dev_struct *dev;
+
+ dev = blk_dev + MAJOR (req->rq_dev);
+ cli ();
+ tmp = dev->current_request;
+ if (! tmp)
+ {
+ dev->current_request = req;
+ (*dev->request_fn) ();
+ sti ();
+ return;
+ }
+ while (tmp->next)
+ {
+ if ((IN_ORDER (tmp, req) || ! IN_ORDER (tmp, tmp->next))
+ && IN_ORDER (req, tmp->next))
+ break;
+ tmp = tmp->next;
+ }
+ req->next = tmp->next;
+ tmp->next = req;
+ if (scsi_blk_major (MAJOR (req->rq_dev)))
+ (*dev->request_fn) ();
+ sti ();
+}
+
+int
+check_rw_block (int nr, struct buffer_head **bh)
+{
+ int i, bshift, bsize;
+ get_block_size (bh[0]->b_dev, &bsize, &bshift);
+ loff_t sectorl = bh[0]->b_blocknr << (bshift - 9);
+
+ for (i = 0; i < nr; i++)
+ {
+ sectorl += bh[i]->b_size >> 9;
+ unsigned long sector = sectorl;
+ if (sector != sectorl)
+ return -EOVERFLOW;
+ }
+
+ return 0;
+}
+
+/* Perform the I/O operation RW on the buffer list BH
+ containing NR buffers. */
+void
+ll_rw_block (int rw, int nr, struct buffer_head **bh, int quiet)
+{
+ int i, bshift, bsize;
+ unsigned major;
+ struct request *r;
+ static struct request req;
+
+ major = MAJOR (bh[0]->b_dev);
+ assert (major < MAX_BLKDEV);
+
+ get_block_size (bh[0]->b_dev, &bsize, &bshift);
+
+ if (! linux_auto_config)
+ {
+ assert (current_thread ()->pcb->data);
+ r = &((struct temp_data *) current_thread ()->pcb->data)->req;
+ }
+ else
+ r = &req;
+
+ for (i = 0, r->nr_sectors = 0; i < nr - 1; i++)
+ {
+ r->nr_sectors += bh[i]->b_size >> 9;
+ bh[i]->b_reqnext = bh[i + 1];
+ }
+ r->nr_sectors += bh[i]->b_size >> 9;
+ bh[i]->b_reqnext = NULL;
+
+ r->rq_status = RQ_ACTIVE;
+ r->rq_dev = bh[0]->b_dev;
+ r->cmd = rw;
+ r->errors = 0;
+ r->quiet = quiet;
+ r->sector = bh[0]->b_blocknr << (bshift - 9);
+ r->current_nr_sectors = bh[0]->b_size >> 9;
+ r->buffer = bh[0]->b_data;
+ r->bh = bh[0];
+ r->bhtail = bh[nr - 1];
+ r->sem = NULL;
+ r->next = NULL;
+
+ enqueue_request (r);
+}
+
+#define BSIZE (1 << bshift)
+#define BMASK (BSIZE - 1)
+
+/* Perform read/write operation RW on device DEV
+ starting at *off to/from buffer *BUF of size *RESID.
+ The device block size is given by BSHIFT. *OFF and
+ *RESID may be non-multiples of the block size.
+ *OFF, *BUF and *RESID are updated if the operation
+ completed successfully. */
+static int
+rdwr_partial (int rw, kdev_t dev, loff_t *off,
+ char **buf, int *resid, int bshift)
+{
+ int c, err = 0, o;
+ long sect, nsect;
+ struct buffer_head bhead, *bh = &bhead;
+ struct gendisk *gd;
+ loff_t blkl;
+
+ memset (bh, 0, sizeof (struct buffer_head));
+ bh->b_state = 1 << BH_Lock;
+ bh->b_dev = dev;
+ blkl = *off >> bshift;
+ bh->b_blocknr = blkl;
+ if (bh->b_blocknr != blkl)
+ return -EOVERFLOW;
+ bh->b_size = BSIZE;
+
+ /* Check if this device has non even number of blocks. */
+ for (gd = gendisk_head, nsect = -1; gd; gd = gd->next)
+ if (gd->major == MAJOR (dev))
+ {
+ nsect = gd->part[MINOR (dev)].nr_sects;
+ break;
+ }
+ if (nsect > 0)
+ {
+ loff_t sectl;
+ sectl = bh->b_blocknr << (bshift - 9);
+ sect = sectl;
+ assert ((nsect - sect) > 0);
+ if (nsect - sect < (BSIZE >> 9))
+ bh->b_size = (nsect - sect) << 9;
+ }
+ bh->b_data = alloc_buffer (bh->b_size);
+ if (! bh->b_data)
+ return -ENOMEM;
+ err = check_rw_block (1, &bh);
+ if (err)
+ goto out;
+ ll_rw_block (READ, 1, &bh, 0);
+ wait_on_buffer (bh);
+ if (buffer_uptodate (bh))
+ {
+ o = *off & BMASK;
+ c = bh->b_size - o;
+ if (c > *resid)
+ c = *resid;
+ if (rw == READ)
+ memcpy (*buf, bh->b_data + o, c);
+ else
+ {
+ memcpy (bh->b_data + o, *buf, c);
+ bh->b_state = (1 << BH_Dirty) | (1 << BH_Lock);
+ err = check_rw_block (1, &bh);
+ if (err)
+ goto out;
+ ll_rw_block (WRITE, 1, &bh, 0);
+ wait_on_buffer (bh);
+ if (! buffer_uptodate (bh))
+ {
+ err = -EIO;
+ goto out;
+ }
+ }
+ *buf += c;
+ *resid -= c;
+ *off += c;
+ }
+ else
+ err = -EIO;
+out:
+ free_buffer (bh->b_data, bh->b_size);
+ return err;
+}
+
+#define BH_Bounce 16
+#define MAX_BUF 8
+
+/* Perform read/write operation RW on device DEV
+ starting at *off to/from buffer *BUF of size *RESID.
+ The device block size is given by BSHIFT. *OFF and
+ *RESID must be multiples of the block size.
+ *OFF, *BUF and *RESID are updated if the operation
+ completed successfully. */
+static int
+rdwr_full (int rw, kdev_t dev, loff_t *off, char **buf, int *resid, int bshift)
+{
+ int cc, err = 0, i, j, nb, nbuf;
+ loff_t blkl;
+ long blk, newblk;
+ struct buffer_head bhead[MAX_BUF], *bh, *bhp[MAX_BUF];
+ phys_addr_t pa;
+
+ assert ((*off & BMASK) == 0);
+
+ nbuf = *resid >> bshift;
+ blkl = *off >> bshift;
+ blk = blkl;
+ if (blk != blkl)
+ return -EOVERFLOW;
+ for (i = nb = 0, bh = bhead; nb < nbuf; bh++)
+ {
+ memset (bh, 0, sizeof (*bh));
+ bh->b_dev = dev;
+ bh->b_blocknr = blk;
+ set_bit (BH_Lock, &bh->b_state);
+ if (rw == WRITE)
+ set_bit (BH_Dirty, &bh->b_state);
+ cc = PAGE_SIZE - (((int) *buf + (nb << bshift)) & PAGE_MASK);
+ pa = pmap_extract (vm_map_pmap (device_io_map),
+ (((vm_offset_t) *buf) + (nb << bshift)));
+ if (cc >= BSIZE && (((int) *buf + (nb << bshift)) & 511) == 0
+ && pa + cc <= VM_PAGE_DIRECTMAP_LIMIT)
+ cc &= ~BMASK;
+ else
+ {
+ cc = PAGE_SIZE;
+ set_bit (BH_Bounce, &bh->b_state);
+ }
+ if (cc > ((nbuf - nb) << bshift))
+ cc = (nbuf - nb) << bshift;
+ if (! test_bit (BH_Bounce, &bh->b_state))
+ bh->b_data = (char *) phystokv(pa);
+ else
+ {
+ bh->b_data = alloc_buffer (cc);
+ if (! bh->b_data)
+ {
+ err = -ENOMEM;
+ break;
+ }
+ if (rw == WRITE)
+ memcpy (bh->b_data, *buf + (nb << bshift), cc);
+ }
+ bh->b_size = cc;
+ bhp[i] = bh;
+ nb += cc >> bshift;
+ newblk = blk + (cc >> bshift);
+ if (newblk < blk)
+ {
+ err = -EOVERFLOW;
+ break;
+ }
+ blk = newblk;
+ if (++i == MAX_BUF)
+ break;
+ }
+ if (! err)
+ err = check_rw_block (i, bhp);
+ if (! err)
+ {
+ assert (i > 0);
+ ll_rw_block (rw, i, bhp, 0);
+ wait_on_buffer (bhp[i - 1]);
+ }
+ for (bh = bhead, cc = 0, j = 0; j < i; cc += bh->b_size, bh++, j++)
+ {
+ if (! err && buffer_uptodate (bh)
+ && rw == READ && test_bit (BH_Bounce, &bh->b_state))
+ memcpy (*buf + cc, bh->b_data, bh->b_size);
+ else if (! err && ! buffer_uptodate (bh))
+ err = -EIO;
+ if (test_bit (BH_Bounce, &bh->b_state))
+ free_buffer (bh->b_data, bh->b_size);
+ }
+ if (! err)
+ {
+ *buf += cc;
+ *resid -= cc;
+ *off += cc;
+ }
+ return err;
+}
+
+/* Perform read/write operation RW on device DEV
+ starting at *off to/from buffer BUF of size COUNT.
+ *OFF is updated if the operation completed successfully. */
+static int
+do_rdwr (int rw, kdev_t dev, loff_t *off, char *buf, int count)
+{
+ int bsize, bshift, err = 0, resid = count;
+
+ get_block_size (dev, &bsize, &bshift);
+ if (*off & BMASK)
+ err = rdwr_partial (rw, dev, off, &buf, &resid, bshift);
+ while (resid >= bsize && ! err)
+ err = rdwr_full (rw, dev, off, &buf, &resid, bshift);
+ if (! err && resid)
+ err = rdwr_partial (rw, dev, off, &buf, &resid, bshift);
+ return err ? err : count - resid;
+}
+
+int
+block_write (struct inode *inode, struct file *filp,
+ const char *buf, int count)
+{
+ return do_rdwr (WRITE, inode->i_rdev, &filp->f_pos, (char *) buf, count);
+}
+
+int
+block_read (struct inode *inode, struct file *filp, char *buf, int count)
+{
+ return do_rdwr (READ, inode->i_rdev, &filp->f_pos, buf, count);
+}
+
+/*
+ * This routine checks whether a removable media has been changed,
+ * and invalidates all buffer-cache-entries in that case. This
+ * is a relatively slow routine, so we have to try to minimize using
+ * it. Thus it is called only upon a 'mount' or 'open'. This
+ * is the best way of combining speed and utility, I think.
+ * People changing diskettes in the middle of an operation deserve
+ * to loose :-)
+ */
+int
+check_disk_change (kdev_t dev)
+{
+ unsigned i;
+ struct file_operations * fops;
+
+ i = MAJOR(dev);
+ if (i >= MAX_BLKDEV || (fops = blkdevs[i].fops) == NULL)
+ return 0;
+ if (fops->check_media_change == NULL)
+ return 0;
+ if (! (*fops->check_media_change) (dev))
+ return 0;
+
+ /* printf ("Disk change detected on device %s\n", kdevname(dev));*/
+
+ if (fops->revalidate)
+ (*fops->revalidate) (dev);
+
+ return 1;
+}
+
+/* Mach device interface routines. */
+
+/* Mach name to Linux major/minor number mapping table. */
+static struct name_map name_to_major[] =
+{
+ /* IDE disks */
+ { "hd0", IDE0_MAJOR, 0, 0 },
+ { "hd1", IDE0_MAJOR, 1, 0 },
+ { "hd2", IDE1_MAJOR, 0, 0 },
+ { "hd3", IDE1_MAJOR, 1, 0 },
+ { "hd4", IDE2_MAJOR, 0, 0 },
+ { "hd5", IDE2_MAJOR, 1, 0 },
+ { "hd6", IDE3_MAJOR, 0, 0 },
+ { "hd7", IDE3_MAJOR, 1, 0 },
+
+ /* IDE CDROMs */
+ { "wcd0", IDE0_MAJOR, 0, 1 },
+ { "wcd1", IDE0_MAJOR, 1, 1 },
+ { "wcd2", IDE1_MAJOR, 0, 1 },
+ { "wcd3", IDE1_MAJOR, 1, 1 },
+ { "wcd4", IDE2_MAJOR, 0, 1 },
+ { "wcd5", IDE2_MAJOR, 1, 1 },
+ { "wcd6", IDE3_MAJOR, 0, 1 },
+ { "wcd7", IDE3_MAJOR, 1, 1 },
+
+ /* SCSI disks */
+ { "sd0", SCSI_DISK_MAJOR, 0, 0 },
+ { "sd1", SCSI_DISK_MAJOR, 1, 0 },
+ { "sd2", SCSI_DISK_MAJOR, 2, 0 },
+ { "sd3", SCSI_DISK_MAJOR, 3, 0 },
+ { "sd4", SCSI_DISK_MAJOR, 4, 0 },
+ { "sd5", SCSI_DISK_MAJOR, 5, 0 },
+ { "sd6", SCSI_DISK_MAJOR, 6, 0 },
+ { "sd7", SCSI_DISK_MAJOR, 7, 0 },
+
+ /* SCSI CDROMs */
+ { "cd0", SCSI_CDROM_MAJOR, 0, 1 },
+ { "cd1", SCSI_CDROM_MAJOR, 1, 1 },
+
+ /* Floppy disks */
+ { "fd0", FLOPPY_MAJOR, 0, 0 },
+ { "fd1", FLOPPY_MAJOR, 1, 0 },
+};
+
+#define NUM_NAMES (sizeof (name_to_major) / sizeof (name_to_major[0]))
+
+/* One of these is associated with each open instance of a device. */
+struct block_data
+{
+ const char *name; /* Mach name for device */
+ int want:1; /* someone is waiting for I/O to complete */
+ int open_count; /* number of opens */
+ int iocount; /* number of pending I/O operations */
+ int part; /* BSD partition number (-1 if none) */
+ int flags; /* Linux file flags */
+ int mode; /* Linux file mode */
+ kdev_t dev; /* Linux device number */
+ ipc_port_t port; /* port representing device */
+ struct device_struct *ds; /* driver operation table entry */
+ struct device device; /* generic device header */
+ struct name_map *np; /* name to inode map */
+ struct block_data *next; /* forward link */
+};
+
+/* List of open devices. */
+static struct block_data *open_list;
+
+/* Forward declarations. */
+
+extern struct device_emulation_ops linux_block_emulation_ops;
+
+static io_return_t device_close (void *);
+static io_return_t device_close_forced (void *, int);
+
+/* Return a send right for block device BD. */
+static ipc_port_t
+dev_to_port (void *bd)
+{
+ return (bd
+ ? ipc_port_make_send (((struct block_data *) bd)->port)
+ : IP_NULL);
+}
+
+/* Return 1 if C is a letter of the alphabet. */
+static inline int
+isalpha (int c)
+{
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+/* Return 1 if C is a digit. */
+static inline int
+isdigit (int c)
+{
+ return c >= '0' && c <= '9';
+}
+
+/* Find the name map entry for device NAME.
+ Set *SLICE to be the DOS partition and
+ *PART the BSD/Mach partition, if any. */
+static struct name_map *
+find_name (char *name, int *slice, int *part)
+{
+ char *p, *q;
+ int i, len;
+ struct name_map *np;
+
+ /* Parse name into name, unit, DOS partition (slice) and partition. */
+ for (*slice = 0, *part = -1, p = name; isalpha (*p); p++)
+ ;
+ if (p == name || ! isdigit (*p))
+ return NULL;
+ do
+ p++;
+ while (isdigit (*p));
+ if (*p)
+ {
+ q = p;
+ if (*q == 's' && isdigit (*(q + 1)))
+ {
+ q++;
+ do
+ *slice = *slice * 10 + *q++ - '0';
+ while (isdigit (*q));
+ if (! *q)
+ goto find_major;
+ }
+ if (! isalpha (*q) || *(q + 1))
+ return NULL;
+ *part = *q - 'a';
+ }
+
+find_major:
+ /* Convert name to major number. */
+ for (i = 0, np = name_to_major; i < NUM_NAMES; i++, np++)
+ {
+ len = strlen (np->name);
+ if (len == (p - name) && ! strncmp (np->name, name, len))
+ return np;
+ }
+ return NULL;
+}
+
+/* Attempt to read a BSD disklabel from device DEV. */
+static struct disklabel *
+read_bsd_label (kdev_t dev)
+{
+ int bsize, bshift;
+ struct buffer_head *bh;
+ struct disklabel *dlp, *lp = NULL;
+
+ get_block_size (dev, &bsize, &bshift);
+ bh = bread (dev, LBLLOC >> (bshift - 9), bsize);
+ if (bh)
+ {
+ dlp = (struct disklabel *) (bh->b_data + ((LBLLOC << 9) & (bsize - 1)));
+ if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC)
+ {
+ lp = (struct disklabel *) kalloc (sizeof (*lp));
+ assert (lp);
+ memcpy (lp, dlp, sizeof (*lp));
+ }
+ __brelse (bh);
+ }
+ return lp;
+}
+
+/* Attempt to read a VTOC from device DEV. */
+static struct disklabel *
+read_vtoc (kdev_t dev)
+{
+ int bshift, bsize, i;
+ struct buffer_head *bh;
+ struct evtoc *evp;
+ struct disklabel *lp = NULL;
+
+ get_block_size (dev, &bsize, &bshift);
+ bh = bread (dev, PDLOCATION >> (bshift - 9), bsize);
+ if (bh)
+ {
+ evp = (struct evtoc *) (bh->b_data + ((PDLOCATION << 9) & (bsize - 1)));
+ if (evp->sanity == VTOC_SANE)
+ {
+ lp = (struct disklabel *) kalloc (sizeof (*lp));
+ assert (lp);
+ lp->d_npartitions = evp->nparts;
+ if (lp->d_npartitions > MAXPARTITIONS)
+ lp->d_npartitions = MAXPARTITIONS;
+ for (i = 0; i < lp->d_npartitions; i++)
+ {
+ lp->d_partitions[i].p_size = evp->part[i].p_size;
+ lp->d_partitions[i].p_offset = evp->part[i].p_start;
+ lp->d_partitions[i].p_fstype = FS_BSDFFS;
+ }
+ }
+ __brelse (bh);
+ }
+ return lp;
+}
+
+/* Initialize BSD/Mach partition table for device
+ specified by NP, DS and *DEV. Check SLICE and *PART for validity. */
+static kern_return_t
+init_partition (struct name_map *np, kdev_t *dev,
+ struct device_struct *ds, int slice, int *part)
+{
+ int i, j;
+ struct disklabel *lp;
+ struct gendisk *gd = ds->gd;
+ struct partition *p;
+ struct temp_data *d = current_thread ()->pcb->data;
+
+ if (! gd)
+ {
+ *part = -1;
+ return 0;
+ }
+ if (ds->labels)
+ goto check;
+ ds->labels = (struct disklabel **) kalloc (sizeof (struct disklabel *)
+ * gd->max_nr * gd->max_p);
+ if (! ds->labels)
+ return D_NO_MEMORY;
+ memset ((void *) ds->labels, 0,
+ sizeof (struct disklabel *) * gd->max_nr * gd->max_p);
+ for (i = 1; i < gd->max_p; i++)
+ {
+ d->inode.i_rdev = *dev | i;
+ if (gd->part[MINOR (d->inode.i_rdev)].nr_sects <= 0
+ || gd->part[MINOR (d->inode.i_rdev)].start_sect < 0)
+ continue;
+ d->file.f_flags = 0;
+ d->file.f_mode = O_RDONLY;
+ if (ds->fops->open && (*ds->fops->open) (&d->inode, &d->file))
+ continue;
+ lp = read_bsd_label (d->inode.i_rdev);
+ if (! lp && gd->part[MINOR (d->inode.i_rdev)].nr_sects > PDLOCATION)
+ lp = read_vtoc (d->inode.i_rdev);
+ if (ds->fops->release)
+ (*ds->fops->release) (&d->inode, &d->file);
+ if (lp)
+ {
+ if (ds->default_slice == 0)
+ ds->default_slice = i;
+ for (j = 0, p = lp->d_partitions; j < lp->d_npartitions; j++, p++)
+ {
+ if (p->p_offset < 0 || p->p_size <= 0)
+ continue;
+
+ /* Sanity check. */
+ if (p->p_size > gd->part[MINOR (d->inode.i_rdev)].nr_sects)
+ p->p_size = gd->part[MINOR (d->inode.i_rdev)].nr_sects;
+ }
+ }
+ ds->labels[MINOR (d->inode.i_rdev)] = lp;
+ }
+
+check:
+ if (*part >= 0 && slice == 0)
+ slice = ds->default_slice;
+ if (*part >= 0 && slice == 0)
+ return D_NO_SUCH_DEVICE;
+ *dev = MKDEV (MAJOR (*dev), MINOR (*dev) | slice);
+ if (slice >= gd->max_p
+ || gd->part[MINOR (*dev)].start_sect < 0
+ || gd->part[MINOR (*dev)].nr_sects <= 0)
+ return D_NO_SUCH_DEVICE;
+ if (*part >= 0)
+ {
+ lp = ds->labels[MINOR (*dev)];
+ if (! lp
+ || *part >= lp->d_npartitions
+ || lp->d_partitions[*part].p_offset < 0
+ || lp->d_partitions[*part].p_size <= 0)
+ return D_NO_SUCH_DEVICE;
+ }
+ return 0;
+}
+
+#define DECL_DATA struct temp_data td
+#define INIT_DATA() \
+{ \
+ list_init (&td.pages); \
+ td.inode.i_rdev = bd->dev; \
+ td.file.f_mode = bd->mode; \
+ td.file.f_flags = bd->flags; \
+ current_thread ()->pcb->data = &td; \
+}
+
+static io_return_t
+device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, char *name, device_t *devp)
+{
+ int part, slice, err;
+ unsigned major, minor;
+ kdev_t dev;
+ ipc_port_t notify;
+ struct block_data *bd = NULL, *bdp;
+ struct device_struct *ds;
+ struct gendisk *gd;
+ struct name_map *np;
+ DECL_DATA;
+
+ np = find_name (name, &slice, &part);
+ if (! np)
+ return D_NO_SUCH_DEVICE;
+ major = np->major;
+ ds = &blkdevs[major];
+
+ /* Check that driver exists. */
+ if (! ds->fops)
+ return D_NO_SUCH_DEVICE;
+
+ /* Wait for any other open/close calls to finish. */
+ ds = &blkdevs[major];
+ while (ds->busy)
+ {
+ ds->want = 1;
+ assert_wait ((event_t) ds, FALSE);
+ schedule ();
+ }
+ ds->busy = 1;
+
+ /* Compute minor number. */
+ if (! ds->gd)
+ {
+ for (gd = gendisk_head; gd && gd->major != major; gd = gd->next)
+ ;
+ ds->gd = gd;
+ }
+ minor = np->unit;
+ gd = ds->gd;
+ if (gd)
+ minor <<= gd->minor_shift;
+ dev = MKDEV (major, minor);
+
+ list_init (&td.pages);
+ current_thread ()->pcb->data = &td;
+
+ /* Check partition. */
+ err = init_partition (np, &dev, ds, slice, &part);
+ if (err)
+ goto out;
+
+ /* Initialize file structure. */
+ switch (mode & (D_READ|D_WRITE))
+ {
+ case D_WRITE:
+ td.file.f_mode = O_WRONLY;
+ break;
+
+ case D_READ|D_WRITE:
+ td.file.f_mode = O_RDWR;
+ break;
+
+ default:
+ td.file.f_mode = O_RDONLY;
+ break;
+ }
+ td.file.f_flags = (mode & D_NODELAY) ? O_NDELAY : 0;
+
+ /* Check if the device is currently open. */
+ for (bdp = open_list; bdp; bdp = bdp->next)
+ if (bdp->dev == dev
+ && bdp->part == part
+ && bdp->mode == td.file.f_mode
+ && bdp->flags == td.file.f_flags)
+ {
+ bd = bdp;
+ goto out;
+ }
+
+ /* Open the device. */
+ if (ds->fops->open)
+ {
+ td.inode.i_rdev = dev;
+ err = (*ds->fops->open) (&td.inode, &td.file);
+ if (err)
+ {
+ err = linux_to_mach_error (err);
+ goto out;
+ }
+ }
+
+ /* Allocate and initialize device data. */
+ bd = (struct block_data *) kalloc (sizeof (struct block_data));
+ if (! bd)
+ {
+ err = D_NO_MEMORY;
+ goto bad;
+ }
+ bd->want = 0;
+ bd->open_count = 0;
+ bd->iocount = 0;
+ bd->part = part;
+ bd->ds = ds;
+ bd->device.emul_data = bd;
+ bd->device.emul_ops = &linux_block_emulation_ops;
+ bd->dev = dev;
+ bd->mode = td.file.f_mode;
+ bd->flags = td.file.f_flags;
+ bd->port = ipc_port_alloc_kernel ();
+ if (bd->port == IP_NULL)
+ {
+ err = KERN_RESOURCE_SHORTAGE;
+ goto bad;
+ }
+ ipc_kobject_set (bd->port, (ipc_kobject_t) &bd->device, IKOT_DEVICE);
+ notify = ipc_port_make_sonce (bd->port);
+ ip_lock (bd->port);
+ ipc_port_nsrequest (bd->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+ goto out;
+
+bad:
+ if (ds->fops->release)
+ (*ds->fops->release) (&td.inode, &td.file);
+
+out:
+ ds->busy = 0;
+ if (ds->want)
+ {
+ ds->want = 0;
+ thread_wakeup ((event_t) ds);
+ }
+
+ if (bd && bd->open_count > 0)
+ {
+ if (err)
+ *devp = NULL;
+ else
+ {
+ *devp = &bd->device;
+ bd->open_count++;
+ }
+ return err;
+ }
+
+ if (err)
+ {
+ if (bd)
+ {
+ if (bd->port != IP_NULL)
+ {
+ ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (bd->port);
+ *devp = (device_t) IP_NULL;
+ }
+ kfree ((vm_offset_t) bd, sizeof (struct block_data));
+ bd = NULL;
+ }
+ }
+ else
+ {
+ bd->open_count = 1;
+ bd->next = open_list;
+ open_list = bd;
+ *devp = &bd -> device;
+ }
+
+ if (!IP_VALID (reply_port) && ! err)
+ device_close (bd);
+ return err;
+}
+
+static io_return_t
+device_close_forced (void *d, int force)
+{
+ struct block_data *bd = d, *bdp, **prev;
+ struct device_struct *ds = bd->ds;
+ DECL_DATA;
+
+ INIT_DATA ();
+
+ /* Wait for any other open/close to complete. */
+ while (ds->busy)
+ {
+ ds->want = 1;
+ assert_wait ((event_t) ds, FALSE);
+ schedule ();
+ }
+ ds->busy = 1;
+
+ if (force || --bd->open_count == 0)
+ {
+ /* Wait for pending I/O to complete. */
+ while (bd->iocount > 0)
+ {
+ bd->want = 1;
+ assert_wait ((event_t) bd, FALSE);
+ schedule ();
+ }
+
+ /* Remove device from open list. */
+ prev = &open_list;
+ bdp = open_list;
+ while (bdp)
+ {
+ if (bdp == bd)
+ {
+ *prev = bdp->next;
+ break;
+ }
+ prev = &bdp->next;
+ bdp = bdp->next;
+ }
+
+ assert (bdp == bd);
+
+ if (ds->fops->release)
+ (*ds->fops->release) (&td.inode, &td.file);
+
+ ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (bd->port);
+ kfree ((vm_offset_t) bd, sizeof (struct block_data));
+ }
+
+ ds->busy = 0;
+ if (ds->want)
+ {
+ ds->want = 0;
+ thread_wakeup ((event_t) ds);
+ }
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_close (void *d)
+{
+ return device_close_forced (d, 0);
+}
+
+
+#define MAX_COPY (VM_MAP_COPY_PAGE_LIST_MAX << PAGE_SHIFT)
+
+/* Check block BN and size COUNT for I/O validity
+ to from device BD. Set *OFF to the byte offset
+ where I/O is to begin and return the size of transfer. */
+static int
+check_limit (struct block_data *bd, loff_t *off, long bn, int count)
+{
+ int major, minor;
+ long maxsz, sz;
+ struct disklabel *lp = NULL;
+
+ if (count <= 0)
+ return count;
+
+ major = MAJOR (bd->dev);
+ minor = MINOR (bd->dev);
+
+ if (bd->ds->gd)
+ {
+ if (bd->part >= 0)
+ {
+ assert (bd->ds->labels);
+ assert (bd->ds->labels[minor]);
+ lp = bd->ds->labels[minor];
+ maxsz = lp->d_partitions[bd->part].p_size;
+ }
+ else
+ maxsz = bd->ds->gd->part[minor].nr_sects;
+ }
+ else
+ {
+ assert (blk_size[major]);
+ maxsz = blk_size[major][minor] << (BLOCK_SIZE_BITS - 9);
+ }
+ assert (maxsz > 0);
+ sz = maxsz - bn;
+ if (sz <= 0)
+ return sz;
+ if (sz < ((count + 511) >> 9))
+ count = sz << 9;
+ if (lp)
+ bn += (lp->d_partitions[bd->part].p_offset
+ - bd->ds->gd->part[minor].start_sect);
+ *off = (loff_t) bn << 9;
+ bd->iocount++;
+ return count;
+}
+
+static io_return_t
+device_write (void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, io_buf_ptr_t data, unsigned int orig_count,
+ int *bytes_written)
+{
+ int resid, amt, i;
+ int count = (int) orig_count;
+ io_return_t err = 0;
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ vm_offset_t addr, uaddr;
+ vm_size_t len, size;
+ struct block_data *bd = d;
+ DECL_DATA;
+
+ INIT_DATA ();
+
+ *bytes_written = 0;
+
+ if (bd->mode == O_RDONLY)
+ return D_INVALID_OPERATION;
+ if (! bd->ds->fops->write)
+ return D_READ_ONLY;
+ count = check_limit (bd, &td.file.f_pos, bn, count);
+ if (count < 0)
+ return D_INVALID_SIZE;
+ if (count == 0)
+ {
+ vm_map_copy_discard (copy);
+ return 0;
+ }
+
+ resid = count;
+ uaddr = copy->offset;
+
+ /* Allocate a kernel buffer. */
+ size = round_page (uaddr + count) - trunc_page (uaddr);
+ if (size > MAX_COPY)
+ size = MAX_COPY;
+ addr = vm_map_min (device_io_map);
+ err = vm_map_enter (device_io_map, &addr, size, 0, TRUE,
+ NULL, 0, FALSE, VM_PROT_READ|VM_PROT_WRITE,
+ VM_PROT_READ|VM_PROT_WRITE, VM_INHERIT_NONE);
+ if (err)
+ {
+ vm_map_copy_discard (copy);
+ goto out;
+ }
+
+ /* Determine size of I/O this time around. */
+ len = size - (uaddr & PAGE_MASK);
+ if (len > resid)
+ len = resid;
+
+ while (1)
+ {
+ /* Map user pages. */
+ for (i = 0; i < copy->cpy_npages; i++)
+ pmap_enter (vm_map_pmap (device_io_map),
+ addr + (i << PAGE_SHIFT),
+ copy->cpy_page_list[i]->phys_addr,
+ VM_PROT_READ|VM_PROT_WRITE, TRUE);
+
+ /* Do the write. */
+ amt = (*bd->ds->fops->write) (&td.inode, &td.file,
+ (char *) addr + (uaddr & PAGE_MASK), len);
+
+ /* Unmap pages and deallocate copy. */
+ pmap_remove (vm_map_pmap (device_io_map),
+ addr, addr + (copy->cpy_npages << PAGE_SHIFT));
+ vm_map_copy_discard (copy);
+
+ /* Check result of write. */
+ if (amt > 0)
+ {
+ resid -= amt;
+ if (resid == 0)
+ break;
+ uaddr += amt;
+ }
+ else
+ {
+ if (amt < 0)
+ err = linux_to_mach_error (amt);
+ break;
+ }
+
+ /* Determine size of I/O this time around and copy in pages. */
+ len = round_page (uaddr + resid) - trunc_page (uaddr);
+ if (len > MAX_COPY)
+ len = MAX_COPY;
+ len -= uaddr & PAGE_MASK;
+ if (len > resid)
+ len = resid;
+ err = vm_map_copyin_page_list (current_map (), uaddr, len,
+ FALSE, FALSE, &copy, FALSE);
+ if (err)
+ break;
+ }
+
+ /* Delete kernel buffer. */
+ vm_map_remove (device_io_map, addr, addr + size);
+
+out:
+ if (--bd->iocount == 0 && bd->want)
+ {
+ bd->want = 0;
+ thread_wakeup ((event_t) bd);
+ }
+ if (IP_VALID (reply_port))
+ ds_device_write_reply (reply_port, reply_port_type, err, count - resid);
+ return MIG_NO_REPLY;
+}
+
+static io_return_t
+device_read (void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, int count, io_buf_ptr_t *data,
+ unsigned *bytes_read)
+{
+ boolean_t dirty;
+ int resid, amt;
+ io_return_t err = 0;
+ struct list pages;
+ vm_map_copy_t copy;
+ vm_offset_t addr, offset, alloc_offset, o;
+ vm_object_t object;
+ vm_page_t m;
+ vm_size_t len, size;
+ struct block_data *bd = d;
+ DECL_DATA;
+
+ INIT_DATA ();
+
+ *data = 0;
+ *bytes_read = 0;
+
+ if (! bd->ds->fops->read)
+ return D_INVALID_OPERATION;
+ count = check_limit (bd, &td.file.f_pos, bn, count);
+ if (count < 0)
+ return D_INVALID_SIZE;
+ if (count == 0)
+ return 0;
+
+ /* Allocate an object to hold the data. */
+ size = round_page (count);
+ object = vm_object_allocate (size);
+ if (! object)
+ {
+ err = D_NO_MEMORY;
+ goto out;
+ }
+ alloc_offset = offset = 0;
+ resid = count;
+
+ /* Allocate a kernel buffer. */
+ addr = vm_map_min (device_io_map);
+ if (size > MAX_COPY)
+ size = MAX_COPY;
+ err = vm_map_enter (device_io_map, &addr, size, 0, TRUE, NULL,
+ 0, FALSE, VM_PROT_READ|VM_PROT_WRITE,
+ VM_PROT_READ|VM_PROT_WRITE, VM_INHERIT_NONE);
+ if (err)
+ goto out;
+
+ list_init (&pages);
+
+ while (resid)
+ {
+ /* Determine size of I/O this time around. */
+ len = round_page (offset + resid) - trunc_page (offset);
+ if (len > MAX_COPY)
+ len = MAX_COPY;
+
+ /* Map any pages left from previous operation. */
+ o = trunc_page (offset);
+ list_for_each_entry (&pages, m, node)
+ {
+ pmap_enter (vm_map_pmap (device_io_map),
+ addr + o - trunc_page (offset),
+ m->phys_addr, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+ o += PAGE_SIZE;
+ }
+ assert (o == alloc_offset);
+
+ /* Allocate and map pages. */
+ while (alloc_offset < trunc_page (offset) + len)
+ {
+ while ((m = vm_page_grab (VM_PAGE_LINUX)) == 0)
+ VM_PAGE_WAIT (0);
+ assert (! m->active && ! m->inactive);
+ m->busy = TRUE;
+ list_insert_tail (&pages, &m->node);
+ pmap_enter (vm_map_pmap (device_io_map),
+ addr + alloc_offset - trunc_page (offset),
+ m->phys_addr, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+ alloc_offset += PAGE_SIZE;
+ }
+
+ /* Do the read. */
+ amt = len - (offset & PAGE_MASK);
+ if (amt > resid)
+ amt = resid;
+ amt = (*bd->ds->fops->read) (&td.inode, &td.file,
+ (char *) addr + (offset & PAGE_MASK), amt);
+
+ /* Compute number of pages to insert in object. */
+ o = trunc_page (offset);
+ if (amt > 0)
+ {
+ dirty = TRUE;
+ resid -= amt;
+ if (resid == 0)
+ {
+ /* Zero any unused space. */
+ if (offset + amt < o + len)
+ memset ((void *) (addr + offset - o + amt),
+ 0, o + len - offset - amt);
+ offset = o + len;
+ }
+ else
+ offset += amt;
+ }
+ else
+ {
+ dirty = FALSE;
+ offset = o + len;
+ }
+
+ /* Unmap pages and add them to the object. */
+ pmap_remove (vm_map_pmap (device_io_map), addr, addr + len);
+ vm_object_lock (object);
+ while (o < trunc_page (offset))
+ {
+ m = list_first_entry (&pages, struct vm_page, node);
+ assert (! list_end (&pages, &m->node));
+ list_remove (&m->node);
+ assert (m->busy);
+ vm_page_lock_queues ();
+ if (dirty)
+ {
+ PAGE_WAKEUP_DONE (m);
+ m->dirty = TRUE;
+ vm_page_insert (m, object, o);
+ }
+ else
+ vm_page_free (m);
+ vm_page_unlock_queues ();
+ o += PAGE_SIZE;
+ }
+ vm_object_unlock (object);
+ if (amt <= 0)
+ {
+ if (amt < 0)
+ err = linux_to_mach_error (amt);
+ break;
+ }
+ }
+
+ /* Delete kernel buffer. */
+ vm_map_remove (device_io_map, addr, addr + size);
+
+ assert (list_empty (&pages));
+
+out:
+ if (! err)
+ err = vm_map_copyin_object (object, 0, round_page (count), &copy);
+ if (! err)
+ {
+ *data = (io_buf_ptr_t) copy;
+ *bytes_read = count - resid;
+ }
+ else
+ vm_object_deallocate (object);
+ if (--bd->iocount == 0 && bd->want)
+ {
+ bd->want = 0;
+ thread_wakeup ((event_t) bd);
+ }
+ return err;
+}
+
+static io_return_t
+device_get_status (void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *status_count)
+{
+ struct block_data *bd = d;
+
+ switch (flavor)
+ {
+ case DEV_GET_SIZE:
+ if (disk_major (MAJOR (bd->dev)))
+ {
+ assert (bd->ds->gd);
+
+ if (bd->part >= 0)
+ {
+ struct disklabel *lp;
+
+ assert (bd->ds->labels);
+ lp = bd->ds->labels[MINOR (bd->dev)];
+ assert (lp);
+ (status[DEV_GET_SIZE_DEVICE_SIZE]
+ = lp->d_partitions[bd->part].p_size << 9);
+ }
+ else
+ (status[DEV_GET_SIZE_DEVICE_SIZE]
+ = bd->ds->gd->part[MINOR (bd->dev)].nr_sects << 9);
+ }
+ else
+ {
+ assert (blk_size[MAJOR (bd->dev)]);
+ (status[DEV_GET_SIZE_DEVICE_SIZE]
+ = (blk_size[MAJOR (bd->dev)][MINOR (bd->dev)]
+ << BLOCK_SIZE_BITS));
+ }
+ /* It would be nice to return the block size as reported by
+ the driver, but a lot of user level code assumes the sector
+ size to be 512. */
+ status[DEV_GET_SIZE_RECORD_SIZE] = 512;
+ /* Always return DEV_GET_SIZE_COUNT. This is what all native
+ Mach drivers do, and makes it possible to detect the absence
+ of the call by setting it to a different value on input. MiG
+ makes sure that we will never return more integers than the
+ user asked for. */
+ *status_count = DEV_GET_SIZE_COUNT;
+ break;
+
+ case DEV_GET_RECORDS:
+ if (disk_major (MAJOR (bd->dev)))
+ {
+ assert (bd->ds->gd);
+
+ if (bd->part >= 0)
+ {
+ struct disklabel *lp;
+
+ assert (bd->ds->labels);
+ lp = bd->ds->labels[MINOR (bd->dev)];
+ assert (lp);
+ (status[DEV_GET_RECORDS_DEVICE_RECORDS]
+ = lp->d_partitions[bd->part].p_size);
+ }
+ else
+ (status[DEV_GET_RECORDS_DEVICE_RECORDS]
+ = bd->ds->gd->part[MINOR (bd->dev)].nr_sects);
+ }
+ else
+ {
+ assert (blk_size[MAJOR (bd->dev)]);
+ status[DEV_GET_RECORDS_DEVICE_RECORDS]
+ = (blk_size[MAJOR (bd->dev)][MINOR (bd->dev)]
+ << (BLOCK_SIZE_BITS - 9));
+ }
+ /* It would be nice to return the block size as reported by
+ the driver, but a lot of user level code assumes the sector
+ size to be 512. */
+ status[DEV_GET_RECORDS_RECORD_SIZE] = 512;
+ /* Always return DEV_GET_RECORDS_COUNT. This is what all native
+ Mach drivers do, and makes it possible to detect the absence
+ of the call by setting it to a different value on input. MiG
+ makes sure that we will never return more integers than the
+ user asked for. */
+ *status_count = DEV_GET_RECORDS_COUNT;
+ break;
+
+ default:
+ return D_INVALID_OPERATION;
+ }
+
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_set_status (void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t status_count)
+{
+ struct block_data *bd = d;
+
+ switch (flavor)
+ {
+ case BLKRRPART:
+ {
+ DECL_DATA;
+ INIT_DATA();
+ return (*bd->ds->fops->ioctl) (&td.inode, &td.file, flavor, 0);
+ }
+ }
+
+ return D_INVALID_OPERATION;
+}
+
+
+static void
+device_no_senders (mach_no_senders_notification_t *ns)
+{
+ device_t dev;
+
+ dev = dev_port_lookup((ipc_port_t) ns->not_header.msgh_remote_port);
+ assert(dev);
+ device_close_forced (dev->emul_data, 1);
+}
+
+struct device_emulation_ops linux_block_emulation_ops =
+{
+ NULL,
+ NULL,
+ dev_to_port,
+ device_open,
+ device_close,
+ device_write,
+ NULL,
+ device_read,
+ NULL,
+ device_set_status,
+ device_get_status,
+ NULL,
+ NULL,
+ device_no_senders,
+ NULL,
+ NULL
+};
diff --git a/linux/dev/glue/glue.h b/linux/dev/glue/glue.h
new file mode 100644
index 0000000..e94ff55
--- /dev/null
+++ b/linux/dev/glue/glue.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef LINUX_DEV_GLUE_GLUE_H
+#define LINUX_DEV_GLUE_GLUE_H
+
+#include <vm/vm_types.h>
+#include <mach/machine/vm_types.h>
+
+extern int linux_auto_config;
+
+extern unsigned long alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
+extern void free_contig_mem (vm_page_t, unsigned);
+extern void init_IRQ (void);
+extern void restore_IRQ (void);
+extern void linux_kmem_init (void);
+extern void linux_net_emulation_init (void);
+extern void device_setup (void);
+extern void linux_timer_intr (void);
+extern void linux_sched_init (void);
+extern void pcmcia_init (void);
+extern void linux_soft_intr (void);
+extern int issig (void);
+extern int linux_to_mach_error (int);
+extern char *get_options(char *str, int *ints);
+
+#endif /* LINUX_DEV_GLUE_GLUE_H */
diff --git a/linux/dev/glue/kmem.c b/linux/dev/glue/kmem.c
new file mode 100644
index 0000000..509229d
--- /dev/null
+++ b/linux/dev/glue/kmem.c
@@ -0,0 +1,589 @@
+/*
+ * Linux memory allocation.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ *
+ */
+
+#include <sys/types.h>
+
+#include <mach/mach_types.h>
+#include <mach/vm_param.h>
+
+#include <kern/assert.h>
+#include <kern/kalloc.h>
+#include <kern/printf.h>
+
+#include <vm/vm_page.h>
+#include <vm/vm_kern.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/malloc.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+
+#include <linux/dev/glue/glue.h>
+
+/* Amount of memory to reserve for Linux memory allocator.
+ We reserve 64K chunks to stay within DMA limits.
+ Increase MEM_CHUNKS if the kernel is running out of memory. */
+#define MEM_CHUNK_SIZE (64 * 1024)
+#define MEM_CHUNKS 32
+#define MEM_DMA_LIMIT (16 * 1024 * 1024)
+
+/* Mininum amount that linux_kmalloc will allocate. */
+#define MIN_ALLOC 12
+
+#ifndef NBPW
+#define NBPW 32
+#endif
+
+/* Memory block header. */
+struct blkhdr
+{
+ unsigned short free; /* 1 if block is free */
+ unsigned short size; /* size of block */
+};
+
+/* This structure heads a page allocated by linux_kmalloc. */
+struct pagehdr
+{
+ unsigned size; /* size (multiple of PAGE_SIZE) */
+ struct pagehdr *next; /* next header in list */
+};
+
+/* This structure describes a memory chunk. */
+struct chunkhdr
+{
+ unsigned long start; /* start address */
+ unsigned long end; /* end address */
+ unsigned long bitmap; /* busy/free bitmap of pages */
+};
+
+/* Chunks from which pages are allocated. */
+static struct chunkhdr pages_free[MEM_CHUNKS];
+
+/* Memory list maintained by linux_kmalloc. */
+static struct pagehdr *memlist;
+
+/* Some statistics. */
+int num_block_coalesce = 0;
+int num_page_collect = 0;
+int linux_mem_avail;
+
+/* Initialize the Linux memory allocator. */
+void
+linux_kmem_init ()
+{
+ int i, j;
+ vm_page_t p, pages;
+
+ for (i = 0; i < MEM_CHUNKS; i++)
+ {
+ /* Allocate memory. */
+ pages_free[i].start = (unsigned long) alloc_contig_mem (MEM_CHUNK_SIZE,
+ MEM_DMA_LIMIT,
+ 0xffff, &pages);
+
+ assert (pages_free[i].start);
+ assert ((pages_free[i].start & 0xffff) == 0);
+
+ /* Sanity check: ensure pages are contiguous and within DMA limits. */
+ for (p = pages, j = 0; j < MEM_CHUNK_SIZE - PAGE_SIZE; j += PAGE_SIZE)
+ {
+ assert (p->phys_addr < MEM_DMA_LIMIT);
+ assert (p->phys_addr + PAGE_SIZE == (p + 1)->phys_addr);
+ p++;
+ }
+
+ pages_free[i].end = pages_free[i].start + MEM_CHUNK_SIZE;
+
+ /* Initialize free page bitmap. */
+ pages_free[i].bitmap = 0;
+ j = MEM_CHUNK_SIZE >> PAGE_SHIFT;
+ while (--j >= 0)
+ pages_free[i].bitmap |= 1 << j;
+ }
+
+ linux_mem_avail = (MEM_CHUNKS * MEM_CHUNK_SIZE) >> PAGE_SHIFT;
+}
+
+/* Return the number by which the page size should be
+ shifted such that the resulting value is >= SIZE. */
+static unsigned long
+get_page_order (int size)
+{
+ unsigned long order;
+
+ for (order = 0; (PAGE_SIZE << order) < size; order++)
+ ;
+ return order;
+}
+
+#ifdef LINUX_DEV_DEBUG
+static void
+check_page_list (int line)
+{
+ unsigned size;
+ struct pagehdr *ph;
+ struct blkhdr *bh;
+
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ if ((int) ph & PAGE_MASK)
+ panic ("%s:%d: page header not aligned", __FILE__, line);
+
+ size = 0;
+ bh = (struct blkhdr *) (ph + 1);
+ while (bh < (struct blkhdr *) ((void *) ph + ph->size))
+ {
+ size += bh->size + sizeof (struct blkhdr);
+ bh = (void *) (bh + 1) + bh->size;
+ }
+
+ if (size + sizeof (struct pagehdr) != ph->size)
+ panic ("%s:%d: memory list destroyed", __FILE__, line);
+ }
+}
+#else
+#define check_page_list(line)
+#endif
+
+/* Merge adjacent free blocks in the memory list. */
+static void
+coalesce_blocks ()
+{
+ struct pagehdr *ph;
+ struct blkhdr *bh, *bhp, *ebh;
+
+ num_block_coalesce++;
+
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ ebh = (struct blkhdr *) ((void *) ph + ph->size);
+ while (1)
+ {
+ /* Skip busy blocks. */
+ while (bh < ebh && !bh->free)
+ bh = (struct blkhdr *) ((void *) (bh + 1) + bh->size);
+ if (bh == ebh)
+ break;
+
+ /* Merge adjacent free blocks. */
+ while (1)
+ {
+ bhp = (struct blkhdr *) ((void *) (bh + 1) + bh->size);
+ if (bhp == ebh)
+ {
+ bh = bhp;
+ break;
+ }
+ if (!bhp->free)
+ {
+ bh = (struct blkhdr *) ((void *) (bhp + 1) + bhp->size);
+ break;
+ }
+ bh->size += bhp->size + sizeof (struct blkhdr);
+ }
+ }
+ }
+}
+
+/* Allocate SIZE bytes of memory.
+ The PRIORITY parameter specifies various flags
+ such as DMA, atomicity, etc. It is not used by Mach. */
+void *
+linux_kmalloc (unsigned int size, int priority)
+{
+ int order, coalesced = 0;
+ unsigned long flags;
+ struct pagehdr *ph;
+ struct blkhdr *bh, *new_bh;
+
+ if (size < MIN_ALLOC)
+ size = MIN_ALLOC;
+ else
+ size = (size + sizeof (int) - 1) & ~(sizeof (int) - 1);
+
+ assert (size <= (MEM_CHUNK_SIZE
+ - sizeof (struct pagehdr)
+ - sizeof (struct blkhdr)));
+
+ save_flags (flags);
+ cli ();
+
+again:
+ check_page_list (__LINE__);
+
+ /* Walk the page list and find the first free block with size
+ greater than or equal to the one required. */
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ while (bh < (struct blkhdr *) ((void *) ph + ph->size))
+ {
+ if (bh->free && bh->size >= size)
+ {
+ bh->free = 0;
+ if (bh->size - size >= MIN_ALLOC + sizeof (struct blkhdr))
+ {
+ /* Split the current block and create a new free block. */
+ new_bh = (void *) (bh + 1) + size;
+ new_bh->free = 1;
+ new_bh->size = bh->size - size - sizeof (struct blkhdr);
+ bh->size = size;
+ }
+
+ check_page_list (__LINE__);
+
+ restore_flags (flags);
+ return bh + 1;
+ }
+ bh = (void *) (bh + 1) + bh->size;
+ }
+ }
+
+ check_page_list (__LINE__);
+
+ /* Allocation failed; coalesce free blocks and try again. */
+ if (!coalesced)
+ {
+ coalesce_blocks ();
+ coalesced = 1;
+ goto again;
+ }
+
+ /* Allocate more pages. */
+ order = get_page_order (size
+ + sizeof (struct pagehdr)
+ + sizeof (struct blkhdr));
+ ph = (struct pagehdr *) __get_free_pages (GFP_KERNEL, order, ~0UL);
+ if (!ph)
+ {
+ restore_flags (flags);
+ return NULL;
+ }
+
+ ph->size = PAGE_SIZE << order;
+ ph->next = memlist;
+ memlist = ph;
+ bh = (struct blkhdr *) (ph + 1);
+ bh->free = 0;
+ bh->size = ph->size - sizeof (struct pagehdr) - sizeof (struct blkhdr);
+ if (bh->size - size >= MIN_ALLOC + sizeof (struct blkhdr))
+ {
+ new_bh = (void *) (bh + 1) + size;
+ new_bh->free = 1;
+ new_bh->size = bh->size - size - sizeof (struct blkhdr);
+ bh->size = size;
+ }
+
+ check_page_list (__LINE__);
+
+ restore_flags (flags);
+ return bh + 1;
+}
+
+/* Free memory P previously allocated by linux_kmalloc. */
+void
+linux_kfree (void *p)
+{
+ unsigned long flags;
+ struct blkhdr *bh;
+ struct pagehdr *ph;
+
+ assert (((int) p & (sizeof (int) - 1)) == 0);
+
+ save_flags (flags);
+ cli ();
+
+ check_page_list (__LINE__);
+
+ for (ph = memlist; ph; ph = ph->next)
+ if (p >= (void *) ph && p < (void *) ph + ph->size)
+ break;
+
+ assert (ph);
+
+ bh = (struct blkhdr *) p - 1;
+
+ assert (!bh->free);
+ assert (bh->size >= MIN_ALLOC);
+ assert ((bh->size & (sizeof (int) - 1)) == 0);
+
+ bh->free = 1;
+
+ check_page_list (__LINE__);
+
+ restore_flags (flags);
+}
+
+/* Free any pages that are not in use.
+ Called by __get_free_pages when pages are running low. */
+static void
+collect_kmalloc_pages ()
+{
+ struct blkhdr *bh;
+ struct pagehdr *ph, **prev_ph;
+
+ check_page_list (__LINE__);
+
+ coalesce_blocks ();
+
+ check_page_list (__LINE__);
+
+ ph = memlist;
+ prev_ph = &memlist;
+ while (ph)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ if (bh->free && (void *) (bh + 1) + bh->size == (void *) ph + ph->size)
+ {
+ *prev_ph = ph->next;
+ free_pages ((unsigned long) ph, get_page_order (ph->size));
+ ph = *prev_ph;
+ }
+ else
+ {
+ prev_ph = &ph->next;
+ ph = ph->next;
+ }
+ }
+
+ check_page_list (__LINE__);
+}
+
+/* Allocate ORDER + 1 number of physically contiguous pages.
+ PRIORITY and DMA are not used in Mach.
+
+ XXX: This needs to be dynamic. To do that we need to make
+ the Mach page manipulation routines interrupt safe and they
+ must provide machine dependant hooks. */
+unsigned long
+__get_free_pages (int priority, unsigned long order, int dma)
+{
+ int i, pages_collected = 0;
+ unsigned bits, off, j, len;
+ unsigned long flags;
+
+ assert ((PAGE_SIZE << order) <= MEM_CHUNK_SIZE);
+
+ /* Construct bitmap of contiguous pages. */
+ bits = 0;
+ j = 0;
+ len = 0;
+ while (len < (PAGE_SIZE << order))
+ {
+ bits |= 1 << j++;
+ len += PAGE_SIZE;
+ }
+
+ save_flags (flags);
+ cli ();
+again:
+
+ /* Search each chunk for the required number of contiguous pages. */
+ for (i = 0; i < MEM_CHUNKS; i++)
+ {
+ off = 0;
+ j = bits;
+ while (MEM_CHUNK_SIZE - off >= (PAGE_SIZE << order))
+ {
+ if ((pages_free[i].bitmap & j) == j)
+ {
+ pages_free[i].bitmap &= ~j;
+ linux_mem_avail -= order + 1;
+ restore_flags (flags);
+ return pages_free[i].start + off;
+ }
+ j <<= 1;
+ off += PAGE_SIZE;
+ }
+ }
+
+ /* Allocation failed; collect kmalloc and buffer pages
+ and try again. */
+ if (!pages_collected)
+ {
+ num_page_collect++;
+ collect_kmalloc_pages ();
+ pages_collected = 1;
+ goto again;
+ }
+
+ printf ("%s:%d: __get_free_pages: ran out of pages\n", __FILE__, __LINE__);
+
+ restore_flags (flags);
+ return 0;
+}
+
+/* Free ORDER + 1 number of physically
+ contiguous pages starting at address ADDR. */
+void
+free_pages (unsigned long addr, unsigned long order)
+{
+ int i;
+ unsigned bits, len, j;
+ unsigned long flags;
+
+ assert ((addr & PAGE_MASK) == 0);
+
+ for (i = 0; i < MEM_CHUNKS; i++)
+ if (addr >= pages_free[i].start && addr < pages_free[i].end)
+ break;
+
+ assert (i < MEM_CHUNKS);
+
+ /* Contruct bitmap of contiguous pages. */
+ len = 0;
+ j = 0;
+ bits = 0;
+ while (len < (PAGE_SIZE << order))
+ {
+ bits |= 1 << j++;
+ len += PAGE_SIZE;
+ }
+ bits <<= (addr - pages_free[i].start) >> PAGE_SHIFT;
+
+ save_flags (flags);
+ cli ();
+
+ assert ((pages_free[i].bitmap & bits) == 0);
+
+ pages_free[i].bitmap |= bits;
+ linux_mem_avail += order + 1;
+ restore_flags (flags);
+}
+
+
+/* vmalloc management routines. */
+struct vmalloc_struct
+{
+ struct vmalloc_struct *prev;
+ struct vmalloc_struct *next;
+ vm_offset_t start;
+ vm_size_t size;
+};
+
+static struct vmalloc_struct
+vmalloc_list = { &vmalloc_list, &vmalloc_list, 0, 0 };
+
+static inline void
+vmalloc_list_insert (vm_offset_t start, vm_size_t size)
+{
+ struct vmalloc_struct *p;
+
+ p = (struct vmalloc_struct *) kalloc (sizeof (struct vmalloc_struct));
+ if (p == NULL)
+ panic ("kernel memory is exhausted");
+
+ p->prev = vmalloc_list.prev;
+ p->next = &vmalloc_list;
+ vmalloc_list.prev->next = p;
+ vmalloc_list.prev = p;
+
+ p->start = start;
+ p->size = size;
+}
+
+static struct vmalloc_struct *
+vmalloc_list_lookup (vm_offset_t start)
+{
+ struct vmalloc_struct *p;
+
+ for (p = vmalloc_list.next; p != &vmalloc_list; p = p->next)
+ {
+ if (p->start == start)
+ return p;
+ }
+
+ return NULL;
+}
+
+static inline void
+vmalloc_list_remove (struct vmalloc_struct *p)
+{
+ p->next->prev = p->prev;
+ p->prev->next = p->next;
+
+ kfree ((vm_offset_t) p, sizeof (struct vmalloc_struct));
+}
+
+/* Allocate SIZE bytes of memory. The pages need not be contiguous. */
+void *
+vmalloc (unsigned long size)
+{
+ kern_return_t ret;
+ vm_offset_t addr;
+
+ ret = kmem_alloc_wired (kernel_map, &addr, round_page (size));
+ if (ret != KERN_SUCCESS)
+ return NULL;
+
+ vmalloc_list_insert (addr, round_page (size));
+ return (void *) addr;
+}
+
+/* Free vmalloc'ed and vremap'ed virtual address space. */
+void
+vfree (void *addr)
+{
+ struct vmalloc_struct *p;
+
+ p = vmalloc_list_lookup ((vm_offset_t) addr);
+ if (!p)
+ panic ("vmalloc_list_lookup failure");
+
+ kmem_free (kernel_map, (vm_offset_t) addr, p->size);
+ vmalloc_list_remove (p);
+}
+
+unsigned long
+vmtophys (void *addr)
+{
+ return kvtophys((vm_offset_t) addr);
+}
+
+/* XXX: Quick hacking. */
+/* Remap physical address into virtual address. */
+
+#include <vm/pmap.h>
+
+void *
+vremap (unsigned long offset, unsigned long size)
+{
+ vm_offset_t addr;
+ kern_return_t ret;
+
+ assert(page_aligned(offset));
+
+ ret = kmem_valloc (kernel_map, &addr, round_page (size));
+ if (ret != KERN_SUCCESS)
+ return NULL;
+
+ (void) pmap_map_bd (addr, offset, offset + round_page (size),
+ VM_PROT_READ | VM_PROT_WRITE);
+
+ vmalloc_list_insert (addr, round_page (size));
+ return (void *) addr;
+}
diff --git a/linux/dev/glue/misc.c b/linux/dev/glue/misc.c
new file mode 100644
index 0000000..5646e5e
--- /dev/null
+++ b/linux/dev/glue/misc.c
@@ -0,0 +1,248 @@
+/*
+ * Miscellaneous routines and data for Linux emulation.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/fs/proc/scsi.c
+ * (c) 1995 Michael Neuffer neuffer@goofy.zdv.uni-mainz.de
+ *
+ * The original version was derived from linux/fs/proc/net.c,
+ * which is Copyright (C) 1991, 1992 Linus Torvalds.
+ * Much has been rewritten, but some of the code still remains.
+ *
+ * /proc/scsi directory handling functions
+ *
+ * last change: 95/07/04
+ *
+ * Initial version: March '95
+ * 95/05/15 Added subdirectories for each driver and show every
+ * registered HBA as a single file.
+ * 95/05/30 Added rudimentary write support for parameter passing
+ * 95/07/04 Fixed bugs in directory handling
+ * 95/09/13 Update to support the new proc-dir tree
+ *
+ * TODO: Improve support to write to the driver files
+ * Add some more comments
+ */
+
+/*
+ * linux/fs/buffer.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <mach/vm_param.h>
+#include <kern/thread.h>
+#include <kern/printf.h>
+#include <kern/mach_host.server.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <device/device_types.h>
+
+#define MACH_INCLUDE
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/blk.h>
+#include <linux/proc_fs.h>
+#include <linux/kernel_stat.h>
+#include <linux/dev/glue/glue.h>
+
+int (*dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
+ off_t offset, int length, int inout) = 0;
+
+struct kernel_stat kstat;
+
+int
+linux_to_mach_error (int err)
+{
+ switch (err)
+ {
+ case 0:
+ return D_SUCCESS;
+
+ case -EPERM:
+ return D_INVALID_OPERATION;
+
+ case -EIO:
+ return D_IO_ERROR;
+
+ case -ENXIO:
+ return D_NO_SUCH_DEVICE;
+
+ case -EACCES:
+ return D_INVALID_OPERATION;
+
+ case -EFAULT:
+ return D_INVALID_SIZE;
+
+ case -EBUSY:
+ return D_ALREADY_OPEN;
+
+ case -EINVAL:
+ return D_INVALID_SIZE;
+
+ case -EROFS:
+ return D_READ_ONLY;
+
+ case -EWOULDBLOCK:
+ return D_WOULD_BLOCK;
+
+ case -ENOMEM:
+ return D_NO_MEMORY;
+
+ default:
+ printf ("linux_to_mach_error: unknown code %d\n", err);
+ return D_IO_ERROR;
+ }
+}
+
+int
+issig ()
+{
+ if (!current_thread())
+ return 0;
+ return current_thread ()->wait_result != THREAD_AWAKENED;
+}
+
+int
+block_fsync (struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+int
+verify_area (int rw, const void *p, unsigned long size)
+{
+ vm_prot_t prot = (rw == VERIFY_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
+ vm_offset_t addr = trunc_page ((vm_offset_t) p);
+ vm_size_t len = round_page ((vm_size_t) size);
+ vm_map_entry_t entry;
+
+ vm_map_lock_read (current_map ());
+
+ while (1)
+ {
+ if (!vm_map_lookup_entry (current_map (), addr, &entry)
+ || (entry->protection & prot) != prot)
+ {
+ vm_map_unlock_read (current_map ());
+ return -EFAULT;
+ }
+ if (entry->vme_end - entry->vme_start >= len)
+ break;
+ len -= entry->vme_end - entry->vme_start;
+ addr += entry->vme_end - entry->vme_start;
+ }
+
+ vm_map_unlock_read (current_map ());
+ return 0;
+}
+
+/*
+ * Print device name (in decimal, hexadecimal or symbolic) -
+ * at present hexadecimal only.
+ * Note: returns pointer to static data!
+ */
+char *
+kdevname (kdev_t dev)
+{
+ static char buffer[32];
+ linux_sprintf (buffer, "%02x:%02x", MAJOR (dev), MINOR (dev));
+ return buffer;
+}
+
+/* RO fail safe mechanism */
+
+static long ro_bits[MAX_BLKDEV][8];
+
+int
+is_read_only (kdev_t dev)
+{
+ int minor, major;
+
+ major = MAJOR (dev);
+ minor = MINOR (dev);
+ if (major < 0 || major >= MAX_BLKDEV)
+ return 0;
+ return ro_bits[major][minor >> 5] & (1 << (minor & 31));
+}
+
+void
+set_device_ro (kdev_t dev, int flag)
+{
+ int minor, major;
+
+ major = MAJOR (dev);
+ minor = MINOR (dev);
+ if (major < 0 || major >= MAX_BLKDEV)
+ return;
+ if (flag)
+ ro_bits[major][minor >> 5] |= 1 << (minor & 31);
+ else
+ ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
+}
+
+struct proc_dir_entry proc_scsi;
+struct inode_operations proc_scsi_inode_operations;
+struct proc_dir_entry proc_net;
+struct inode_operations proc_net_inode_operations;
+
+int
+proc_register (struct proc_dir_entry *xxx1, struct proc_dir_entry *xxx2)
+{
+ return 0;
+}
+
+int
+proc_unregister (struct proc_dir_entry *xxx1, int xxx2)
+{
+ return 0;
+}
+
+void
+add_blkdev_randomness (int major)
+{
+}
+
+void
+do_gettimeofday (struct timeval *tv)
+{
+ /*
+ * XXX: The first argument should be mach_host_self (), but that's too
+ * expensive, and the host argument is not used by host_get_time (),
+ * only checked not to be HOST_NULL.
+ */
+ time_value64_t tv64;
+ host_get_time64 ((host_t) 1, &tv64);
+ tv->tv_sec = tv64.seconds;
+ tv->tv_usec = tv64.nanoseconds / 1000;
+}
+
+int
+dev_get_info (char *buffer, char **start, off_t offset, int length, int dummy)
+{
+ return 0;
+}
diff --git a/linux/dev/glue/net.c b/linux/dev/glue/net.c
new file mode 100644
index 0000000..dd80622
--- /dev/null
+++ b/linux/dev/glue/net.c
@@ -0,0 +1,670 @@
+/*
+ * Linux network driver support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Ethernet-type device handling.
+ *
+ * Version: @(#)eth.c 1.0.7 05/25/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Fixes:
+ * Mr Linux : Arp problems
+ * Alan Cox : Generic queue tidyup (very tiny here)
+ * Alan Cox : eth_header ntohs should be htons
+ * Alan Cox : eth_rebuild_header missing an htons and
+ * minor other things.
+ * Tegge : Arp bug fixes.
+ * Florian : Removed many unnecessary functions, code cleanup
+ * and changes for new arp and skbuff.
+ * Alan Cox : Redid header building to reflect new format.
+ * Alan Cox : ARP only when compiled with CONFIG_INET
+ * Greg Page : 802.2 and SNAP stuff.
+ * Alan Cox : MAC layer pointers/new format.
+ * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding.
+ * Alan Cox : Protect against forwarding explosions with
+ * older network drivers and IFF_ALLMULTI
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <sys/types.h>
+#include <machine/spl.h>
+#include <machine/vm_param.h>
+
+#include <mach/mach_types.h>
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/port.h>
+#include <mach/vm_param.h>
+#include <mach/notify.h>
+
+#include <kern/kalloc.h>
+#include <kern/printf.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/if_hdr.h>
+#include <device/net_io.h>
+#include <device/device_reply.user.h>
+#include <device/device_emul.h>
+#include <device/ds_routines.h>
+
+#define MACH_INCLUDE
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+
+#include <linux/dev/glue/glue.h>
+
+/* One of these is associated with each instance of a device. */
+struct net_data
+{
+ ipc_port_t port; /* device port */
+ struct ifnet ifnet; /* Mach ifnet structure (needed for filters) */
+ struct device device; /* generic device structure */
+ struct linux_device *dev; /* Linux network device structure */
+};
+
+/* List of sk_buffs waiting to be freed. */
+static struct sk_buff_head skb_done_list;
+
+/* Forward declarations. */
+
+extern struct device_emulation_ops linux_net_emulation_ops;
+
+static int print_packet_size = 0;
+
+/* Linux kernel network support routines. */
+
+/* Requeue packet SKB for transmission after the interface DEV
+ has timed out. The priority of the packet is PRI.
+ In Mach, we simply drop the packet like the native drivers. */
+void
+dev_queue_xmit (struct sk_buff *skb, struct linux_device *dev, int pri)
+{
+ dev_kfree_skb (skb, FREE_WRITE);
+}
+
+/* Close the device DEV. */
+int
+dev_close (struct linux_device *dev)
+{
+ return 0;
+}
+
+/* Network software interrupt handler. */
+void
+net_bh (void)
+{
+ int len;
+ struct sk_buff *skb;
+ struct linux_device *dev;
+
+ /* Start transmission on interfaces. */
+ for (dev = dev_base; dev; dev = dev->next)
+ {
+ if (dev->base_addr && dev->base_addr != 0xffe0)
+ while (1)
+ {
+ skb = skb_dequeue (&dev->buffs[0]);
+ if (skb)
+ {
+ len = skb->len;
+ if ((*dev->hard_start_xmit) (skb, dev))
+ {
+ skb_queue_head (&dev->buffs[0], skb);
+ mark_bh (NET_BH);
+ break;
+ }
+ else if (print_packet_size)
+ printf ("net_bh: length %d\n", len);
+ }
+ else
+ break;
+ }
+ }
+}
+
+/* Free all sk_buffs on the done list.
+ This routine is called by the iodone thread in ds_routines.c. */
+void
+free_skbuffs ()
+{
+ struct sk_buff *skb;
+
+ while (1)
+ {
+ skb = skb_dequeue (&skb_done_list);
+ if (skb)
+ {
+ if (skb->copy)
+ {
+ vm_map_copy_discard (skb->copy);
+ skb->copy = NULL;
+ }
+ if (IP_VALID (skb->reply))
+ {
+ ds_device_write_reply (skb->reply, skb->reply_type, 0, skb->len);
+ skb->reply = IP_NULL;
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+ }
+ else
+ break;
+ }
+}
+
+/* Allocate an sk_buff with SIZE bytes of data space. */
+struct sk_buff *
+alloc_skb (unsigned int size, int priority)
+{
+ return dev_alloc_skb (size);
+}
+
+/* Free SKB. */
+void
+kfree_skb (struct sk_buff *skb, int priority)
+{
+ dev_kfree_skb (skb, priority);
+}
+
+/* Allocate an sk_buff with SIZE bytes of data space. */
+struct sk_buff *
+dev_alloc_skb (unsigned int size)
+{
+ struct sk_buff *skb;
+ unsigned char *bptr;
+ int len = size;
+
+ size = (size + 15) & ~15;
+ size += sizeof (struct sk_buff);
+
+ bptr = linux_kmalloc (size, GFP_KERNEL);
+ if (bptr == NULL)
+ return NULL;
+
+ /* XXX: In Mach, a sk_buff is located at the head,
+ while it's located at the tail in Linux. */
+ skb = bptr;
+ skb->dev = NULL;
+ skb->reply = IP_NULL;
+ skb->copy = NULL;
+ skb->len = 0;
+ skb->prev = skb->next = NULL;
+ skb->list = NULL;
+ skb->data = bptr + sizeof (struct sk_buff);
+ skb->tail = skb->data;
+ skb->head = skb->data;
+ skb->end = skb->data + len;
+
+ return skb;
+}
+
+/* Free the sk_buff SKB. */
+void
+dev_kfree_skb (struct sk_buff *skb, int mode)
+{
+ unsigned flags;
+
+ /* Queue sk_buff on done list if there is a
+ page list attached or we need to send a reply.
+ Wakeup the iodone thread to process the list. */
+ if (skb->copy || IP_VALID (skb->reply))
+ {
+ skb_queue_tail (&skb_done_list, skb);
+ save_flags (flags);
+ thread_wakeup ((event_t) & io_done_list);
+ restore_flags (flags);
+ return;
+ }
+ linux_kfree (skb);
+}
+
+/* Accept packet SKB received on an interface. */
+void
+netif_rx (struct sk_buff *skb)
+{
+ ipc_kmsg_t kmsg;
+ struct ether_header *eh;
+ struct packet_header *ph;
+ struct linux_device *dev = skb->dev;
+
+ assert (skb != NULL);
+
+ if (print_packet_size)
+ printf ("netif_rx: length %ld\n", skb->len);
+
+ /* Allocate a kernel message buffer. */
+ kmsg = net_kmsg_get ();
+ if (!kmsg)
+ {
+ dev_kfree_skb (skb, FREE_READ);
+ return;
+ }
+
+ /* Copy packet into message buffer. */
+ eh = (struct ether_header *) (net_kmsg (kmsg)->header);
+ ph = (struct packet_header *) (net_kmsg (kmsg)->packet);
+ memcpy (eh, skb->data, sizeof (struct ether_header));
+
+ /* packet is prefixed with a struct packet_header,
+ see include/device/net_status.h. */
+ memcpy (ph + 1, skb->data + sizeof (struct ether_header),
+ skb->len - sizeof (struct ether_header));
+ ph->type = eh->ether_type;
+ ph->length = (skb->len - sizeof (struct ether_header)
+ + sizeof (struct packet_header));
+
+ dev_kfree_skb (skb, FREE_READ);
+
+ net_kmsg(kmsg)->sent = FALSE; /* Mark packet as received. */
+
+ /* Pass packet up to the microkernel. */
+ net_packet (&dev->net_data->ifnet, kmsg,
+ ph->length, ethernet_priority (kmsg));
+}
+
+/* Mach device interface routines. */
+
+/* Return a send right associated with network device ND. */
+static ipc_port_t
+dev_to_port (void *nd)
+{
+ return (nd
+ ? ipc_port_make_send (((struct net_data *) nd)->port)
+ : IP_NULL);
+}
+
+static io_return_t
+device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, char *name, device_t *devp)
+{
+ io_return_t err = D_SUCCESS;
+ ipc_port_t notify;
+ struct ifnet *ifp;
+ struct linux_device *dev;
+ struct net_data *nd;
+
+ /* Search for the device. */
+ for (dev = dev_base; dev; dev = dev->next)
+ if (dev->base_addr
+ && dev->base_addr != 0xffe0
+ && !strcmp (name, dev->name))
+ break;
+ if (!dev)
+ return D_NO_SUCH_DEVICE;
+
+ /* Allocate and initialize device data if this is the first open. */
+ nd = dev->net_data;
+ if (!nd)
+ {
+ dev->net_data = nd = ((struct net_data *)
+ kalloc (sizeof (struct net_data)));
+ if (!nd)
+ {
+ err = D_NO_MEMORY;
+ goto out;
+ }
+ nd->dev = dev;
+ nd->device.emul_data = nd;
+ nd->device.emul_ops = &linux_net_emulation_ops;
+ nd->port = ipc_port_alloc_kernel ();
+ if (nd->port == IP_NULL)
+ {
+ err = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ ipc_kobject_set (nd->port, (ipc_kobject_t) & nd->device, IKOT_DEVICE);
+ notify = ipc_port_make_sonce (nd->port);
+ ip_lock (nd->port);
+ ipc_port_nsrequest (nd->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+
+ ifp = &nd->ifnet;
+ ifp->if_unit = dev->name[strlen (dev->name) - 1] - '0';
+ ifp->if_flags = IFF_UP | IFF_RUNNING;
+ ifp->if_mtu = dev->mtu;
+ ifp->if_header_size = dev->hard_header_len;
+ ifp->if_header_format = dev->type;
+ ifp->if_address_size = dev->addr_len;
+ ifp->if_address = dev->dev_addr;
+ if_init_queues (ifp);
+
+ if (dev->open)
+ {
+ if ((*dev->open) (dev))
+ err = D_NO_SUCH_DEVICE;
+ }
+
+ out:
+ if (err)
+ {
+ if (nd)
+ {
+ if (nd->port != IP_NULL)
+ {
+ ipc_kobject_set (nd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (nd->port);
+ }
+ kfree ((vm_offset_t) nd, sizeof (struct net_data));
+ nd = NULL;
+ dev->net_data = NULL;
+ }
+ }
+ else
+ {
+ /* IPv6 heavily relies on multicasting (especially router and
+ neighbor solicits and advertisements), so enable reception of
+ those multicast packets by setting `LINUX_IFF_ALLMULTI'. */
+ dev->flags |= LINUX_IFF_UP | LINUX_IFF_RUNNING | LINUX_IFF_ALLMULTI;
+ skb_queue_head_init (&dev->buffs[0]);
+
+ if (dev->set_multicast_list)
+ dev->set_multicast_list (dev);
+ }
+ if (IP_VALID (reply_port))
+ ds_device_open_reply (reply_port, reply_port_type,
+ err, dev_to_port (nd));
+ return MIG_NO_REPLY;
+ }
+
+ *devp = &nd->device;
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_write (void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, io_buf_ptr_t data, unsigned int count,
+ int *bytes_written)
+{
+ int s;
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ char *map_data;
+ vm_offset_t map_addr;
+ vm_size_t map_size;
+ struct net_data *nd = d;
+ struct linux_device *dev = nd->dev;
+ struct sk_buff *skb;
+ kern_return_t kr;
+
+ if (count == 0 || count > dev->mtu + dev->hard_header_len)
+ return D_INVALID_SIZE;
+
+ /* Allocate a sk_buff. */
+ skb = dev_alloc_skb (count);
+ if (!skb)
+ return D_NO_MEMORY;
+
+ /* Map user data. */
+ kr = kmem_io_map_copyout(device_io_map, (vm_offset_t *)&map_data,
+ &map_addr, &map_size, copy, count);
+
+ if (kr) {
+ dev_kfree_skb (skb, FREE_WRITE);
+ return D_NO_MEMORY;
+ }
+
+ /* XXX The underlying physical pages of the mapping could be highmem,
+ for which drivers require the use of a bounce buffer. */
+ memcpy (skb->data, map_data, count);
+ kmem_io_map_deallocate (device_io_map, map_addr, map_size);
+ vm_map_copy_discard (copy);
+
+ skb->len = count;
+ skb->head = skb->data;
+ skb->tail = skb->data + skb->len;
+ skb->end = skb->tail;
+ skb->dev = dev;
+ skb->reply = reply_port;
+ skb->reply_type = reply_port_type;
+
+ /* Queue packet for transmission and schedule a software interrupt. */
+ s = splimp ();
+ if (dev->buffs[0].next != (struct sk_buff *) &dev->buffs[0]
+ || (*dev->hard_start_xmit) (skb, dev))
+ {
+ __skb_queue_tail (&dev->buffs[0], skb);
+ mark_bh (NET_BH);
+ }
+ splx (s);
+
+ /* Send packet to filters. */
+ {
+ struct packet_header *packet;
+ struct ether_header *header;
+ ipc_kmsg_t kmsg;
+
+ kmsg = net_kmsg_get ();
+
+ if (kmsg != IKM_NULL)
+ {
+ /* Suitable for Ethernet only. */
+ header = (struct ether_header *) (net_kmsg (kmsg)->header);
+ packet = (struct packet_header *) (net_kmsg (kmsg)->packet);
+ memcpy (header, skb->data, sizeof (struct ether_header));
+
+ /* packet is prefixed with a struct packet_header,
+ see include/device/net_status.h. */
+ memcpy (packet + 1, skb->data + sizeof (struct ether_header),
+ skb->len - sizeof (struct ether_header));
+ packet->length = skb->len - sizeof (struct ether_header)
+ + sizeof (struct packet_header);
+ packet->type = header->ether_type;
+ net_kmsg (kmsg)->sent = TRUE; /* Mark packet as sent. */
+ s = splimp ();
+ net_packet (&dev->net_data->ifnet, kmsg, packet->length,
+ ethernet_priority (kmsg));
+ splx (s);
+ }
+ }
+
+ return MIG_NO_REPLY;
+}
+
+
+static io_return_t
+device_get_status (void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *count)
+{
+ if (flavor == NET_FLAGS)
+ {
+ struct net_data *net = (struct net_data *) d;
+
+ if (*count != 1)
+ return D_INVALID_SIZE;
+
+ status[0] = net->dev->flags;
+ return D_SUCCESS;
+ }
+
+ if(flavor >= SIOCIWFIRST && flavor <= SIOCIWLAST)
+ {
+ /* handle wireless ioctl */
+ if(! IW_IS_GET(flavor))
+ return D_INVALID_OPERATION;
+
+ if(*count * sizeof(int) < sizeof(struct ifreq))
+ return D_INVALID_OPERATION;
+
+ struct net_data *nd = d;
+ struct linux_device *dev = nd->dev;
+
+ if(! dev->do_ioctl)
+ return D_INVALID_OPERATION;
+
+ int result;
+
+ if (flavor == SIOCGIWRANGE || flavor == SIOCGIWENCODE
+ || flavor == SIOCGIWESSID || flavor == SIOCGIWNICKN
+ || flavor == SIOCGIWSPY)
+ {
+ /*
+ * These ioctls require an `iw_point' as their argument (i.e.
+ * they want to return some data to userspace.
+ * Therefore supply some sane values and carry the data back
+ * to userspace right behind the `struct iwreq'.
+ */
+ struct iw_point *iwp = &((struct iwreq *) status)->u.data;
+ iwp->length = *count * sizeof (dev_status_t) - sizeof (struct ifreq);
+ iwp->pointer = (void *) status + sizeof (struct ifreq);
+
+ result = dev->do_ioctl (dev, (struct ifreq *) status, flavor);
+
+ *count = ((sizeof (struct ifreq) + iwp->length)
+ / sizeof (dev_status_t));
+ if (iwp->length % sizeof (dev_status_t))
+ (*count) ++;
+ }
+ else
+ {
+ *count = sizeof(struct ifreq) / sizeof(int);
+ result = dev->do_ioctl(dev, (struct ifreq *) status, flavor);
+ }
+
+ return result ? D_IO_ERROR : D_SUCCESS;
+ }
+ else
+ {
+ /* common get_status request */
+ return net_getstat (&((struct net_data *) d)->ifnet, flavor,
+ status, count);
+ }
+}
+
+
+static io_return_t
+device_set_status(void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t count)
+{
+ if (flavor == NET_FLAGS)
+ {
+ if (count != 1)
+ return D_INVALID_SIZE;
+
+ short flags = status[0];
+ struct net_data *net = (struct net_data *) d;
+
+ dev_change_flags (net->dev, flags);
+
+ /* Change the flags of the Mach device, too. */
+ net->ifnet.if_flags = net->dev->flags;
+ return D_SUCCESS;
+ }
+
+ if(flavor < SIOCIWFIRST || flavor > SIOCIWLAST)
+ return D_INVALID_OPERATION;
+
+ if(! IW_IS_SET(flavor))
+ return D_INVALID_OPERATION;
+
+ if(count * sizeof(int) < sizeof(struct ifreq))
+ return D_INVALID_OPERATION;
+
+ struct net_data *nd = d;
+ struct linux_device *dev = nd->dev;
+
+ if(! dev->do_ioctl)
+ return D_INVALID_OPERATION;
+
+ if((flavor == SIOCSIWENCODE || flavor == SIOCSIWESSID
+ || flavor == SIOCSIWNICKN || flavor == SIOCSIWSPY)
+ && ((struct iwreq *) status)->u.data.pointer)
+ {
+ struct iw_point *iwp = &((struct iwreq *) status)->u.data;
+
+ /* safety check whether the status array is long enough ... */
+ if(count * sizeof(int) < sizeof(struct ifreq) + iwp->length)
+ return D_INVALID_OPERATION;
+
+ /* make sure, iwp->pointer points to the correct address */
+ if(iwp->pointer) iwp->pointer = (void *) status + sizeof(struct ifreq);
+ }
+
+ int result = dev->do_ioctl(dev, (struct ifreq *) status, flavor);
+ return result ? D_IO_ERROR : D_SUCCESS;
+}
+
+
+static io_return_t
+device_set_filter (void *d, ipc_port_t port, int priority,
+ filter_t * filter, unsigned filter_count)
+{
+ return net_set_filter (&((struct net_data *) d)->ifnet,
+ port, priority, filter, filter_count);
+}
+
+struct device_emulation_ops linux_net_emulation_ops =
+{
+ NULL,
+ NULL,
+ dev_to_port,
+ device_open,
+ NULL,
+ device_write,
+ NULL,
+ NULL,
+ NULL,
+ device_set_status,
+ device_get_status,
+ device_set_filter,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+/* Do any initialization required for network devices. */
+void
+linux_net_emulation_init ()
+{
+ skb_queue_head_init (&skb_done_list);
+}
diff --git a/linux/dev/include/ahci.h b/linux/dev/include/ahci.h
new file mode 100644
index 0000000..31977b6
--- /dev/null
+++ b/linux/dev/include/ahci.h
@@ -0,0 +1,268 @@
+#ifndef _GNUMACH_AHCI_H
+#define _GNUMACH_AHCI_H
+extern void ahci_probe_pci(void);
+
+/* From linux 3.9's drivers/ata/ahci.h */
+
+/*
+ * ahci.h - Common AHCI SATA definitions and declarations
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+enum {
+ AHCI_MAX_PORTS = 32,
+ AHCI_MAX_SG = 168, /* hardware max is 64K */
+ AHCI_DMA_BOUNDARY = 0xffffffff,
+ AHCI_MAX_CMDS = 32,
+ AHCI_CMD_SZ = 32,
+ AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
+ AHCI_RX_FIS_SZ = 256,
+ AHCI_CMD_TBL_CDB = 0x40,
+ AHCI_CMD_TBL_HDR_SZ = 0x80,
+ AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
+ AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
+ AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
+ AHCI_RX_FIS_SZ,
+ AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
+ AHCI_CMD_TBL_AR_SZ +
+ (AHCI_RX_FIS_SZ * 16),
+ AHCI_IRQ_ON_SG = (1 << 31),
+ AHCI_CMD_ATAPI = (1 << 5),
+ AHCI_CMD_WRITE = (1 << 6),
+ AHCI_CMD_PREFETCH = (1 << 7),
+ AHCI_CMD_RESET = (1 << 8),
+ AHCI_CMD_CLR_BUSY = (1 << 10),
+
+ RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */
+ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
+ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
+ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
+
+ /* global controller registers */
+ HOST_CAP = 0x00, /* host capabilities */
+ HOST_CTL = 0x04, /* global host control */
+ HOST_IRQ_STAT = 0x08, /* interrupt status */
+ HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
+ HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
+ HOST_EM_LOC = 0x1c, /* Enclosure Management location */
+ HOST_EM_CTL = 0x20, /* Enclosure Management Control */
+ HOST_CAP2 = 0x24, /* host capabilities, extended */
+
+ /* HOST_CTL bits */
+ HOST_RESET = (1 << 0), /* reset controller; self-clear */
+ HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
+ HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
+
+ /* HOST_CAP bits */
+ HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
+ HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
+ HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
+ HOST_CAP_PART = (1 << 13), /* Partial state capable */
+ HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
+ HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
+ HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
+ HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
+ HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
+ HOST_CAP_CLO = (1 << 24), /* Command List Override support */
+ HOST_CAP_LED = (1 << 25), /* Supports activity LED */
+ HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
+ HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
+ HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
+ HOST_CAP_SNTF = (1 << 29), /* SNotification register */
+ HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
+ HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
+
+ /* HOST_CAP2 bits */
+ HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
+ HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
+ HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
+ HOST_CAP2_SDS = (1 << 3), /* Support device sleep */
+ HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */
+ HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */
+
+ /* registers for each SATA port */
+ PORT_LST_ADDR = 0x00, /* command list DMA addr */
+ PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
+ PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
+ PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
+ PORT_IRQ_STAT = 0x10, /* interrupt status */
+ PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
+ PORT_CMD = 0x18, /* port command */
+ PORT_TFDATA = 0x20, /* taskfile data */
+ PORT_SIG = 0x24, /* device TF signature */
+ PORT_CMD_ISSUE = 0x38, /* command issue */
+ PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
+ PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
+ PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
+ PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
+ PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
+ PORT_FBS = 0x40, /* FIS-based Switching */
+ PORT_DEVSLP = 0x44, /* device sleep */
+
+ /* PORT_IRQ_{STAT,MASK} bits */
+ PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
+ PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
+ PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
+ PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
+ PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
+ PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
+ PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
+ PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
+
+ PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
+ PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
+ PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
+ PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
+ PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
+ PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
+ PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
+ PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
+ PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
+
+ PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
+ PORT_IRQ_IF_ERR |
+ PORT_IRQ_CONNECT |
+ PORT_IRQ_PHYRDY |
+ PORT_IRQ_UNK_FIS |
+ PORT_IRQ_BAD_PMP,
+ PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
+ PORT_IRQ_TF_ERR |
+ PORT_IRQ_HBUS_DATA_ERR,
+ DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
+ PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
+ PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
+
+ /* PORT_CMD bits */
+ PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
+ PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
+ PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
+ PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
+ PORT_CMD_PMP = (1 << 17), /* PMP attached */
+ PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
+ PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
+ PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
+ PORT_CMD_CLO = (1 << 3), /* Command list override */
+ PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
+ PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
+ PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
+
+ PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
+ PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
+ PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
+ PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+
+ /* PORT_FBS bits */
+ PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
+ PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
+ PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
+ PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
+ PORT_FBS_SDE = (1 << 2), /* FBS single device error */
+ PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
+ PORT_FBS_EN = (1 << 0), /* Enable FBS */
+
+ /* PORT_DEVSLP bits */
+ PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */
+ PORT_DEVSLP_DM_MASK = (0xf << 25), /* DITO multiplier mask */
+ PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */
+ PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */
+ PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */
+ PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */
+ PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */
+
+ /* hpriv->flags bits */
+
+#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
+
+ AHCI_HFLAG_NO_NCQ = (1 << 0),
+ AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
+ AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
+ AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
+ AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
+ AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
+ AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
+ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
+ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
+ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
+ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
+ link offline */
+ AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
+ AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
+ AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
+ AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on
+ port start (wait until
+ error-handling stage) */
+ AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
+
+ /* ap->flags bits */
+
+ /*
+ AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
+ */
+
+ ICH_MAP = 0x90, /* ICH MAP register */
+
+ /* em constants */
+ EM_MAX_SLOTS = 8,
+ EM_MAX_RETRY = 5,
+
+ /* em_ctl bits */
+ EM_CTL_RST = (1 << 9), /* Reset */
+ EM_CTL_TM = (1 << 8), /* Transmit Message */
+ EM_CTL_MR = (1 << 0), /* Message Received */
+ EM_CTL_ALHD = (1 << 26), /* Activity LED */
+ EM_CTL_XMT = (1 << 25), /* Transmit Only */
+ EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
+ EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */
+ EM_CTL_SES = (1 << 18), /* SES-2 messages supported */
+ EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */
+ EM_CTL_LED = (1 << 16), /* LED messages supported */
+
+ /* em message type */
+ EM_MSG_TYPE_LED = (1 << 0), /* LED */
+ EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */
+ EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */
+ EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */
+
+ FIS_TYPE_REG_H2D = 0x27,
+ FIS_TYPE_REG_D2H = 0x34,
+ FIS_TYPE_DMA_ACT = 0x39,
+ FIS_TYPE_DMA_SETUP = 0x41,
+ FIS_TYPE_DATA = 0x46,
+ FIS_TYPE_BIST = 0x58,
+ FIS_TYPE_PIO_SETUP = 0x5F,
+ FIS_TYPE_DEV_BITS = 0xA1,
+};
+
+/* End from linux 3.9 */
+
+#endif /* _GNUMACH_AHCI_H */
diff --git a/linux/dev/include/asm-i386/page.h b/linux/dev/include/asm-i386/page.h
new file mode 100644
index 0000000..be81848
--- /dev/null
+++ b/linux/dev/include/asm-i386/page.h
@@ -0,0 +1,59 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+#include <mach/vm_param.h>
+
+#ifdef __KERNEL__
+
+#define STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* This handles the memory map.. */
+#define PAGE_OFFSET 0
+#define MAP_NR(addr) (((unsigned long)(addr)) >> PAGE_SHIFT)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/linux/dev/include/asm-i386/smp.h b/linux/dev/include/asm-i386/smp.h
new file mode 100644
index 0000000..fabe01d
--- /dev/null
+++ b/linux/dev/include/asm-i386/smp.h
@@ -0,0 +1,8 @@
+#ifndef _I386_SMP_H
+#define _I386_SMP_H
+
+#include <machine/cpu_number.h>
+
+#define smp_processor_id() cpu_number()
+
+#endif /* _I386_SMP_H */
diff --git a/linux/dev/include/asm-i386/string.h b/linux/dev/include/asm-i386/string.h
new file mode 100644
index 0000000..f41ca5c
--- /dev/null
+++ b/linux/dev/include/asm-i386/string.h
@@ -0,0 +1,487 @@
+#ifndef _I386_STRING_H_
+#define _I386_STRING_H_
+
+/*
+ * On a 486 or Pentium, we are better off not using the
+ * byte string operations. But on a 386 or a PPro the
+ * byte string ops are faster than doing it by hand
+ * (MUCH faster on a Pentium).
+ *
+ * Also, the byte strings actually work correctly. Forget
+ * the i486 routines for now as they may be broken..
+ */
+#if FIXED_486_STRING && (CPU == 486 || CPU == 586)
+#include <asm/string-486.h>
+#else
+
+/*
+ * This string-include defines all string functions as inline
+ * functions. Use gcc. It also assumes ds=es=data space, this should be
+ * normal. Most of the string-functions are rather heavily hand-optimized,
+ * see especially strtok,strstr,str[c]spn. They should work, but are not
+ * very easy to understand. Everything is done entirely within the register
+ * set, making the functions fast and clean. String instructions have been
+ * used through-out, making for "slightly" unclear code :-)
+ *
+ * NO Copyright (C) 1991, 1992 Linus Torvalds,
+ * consider these trivial functions to be PD.
+ */
+
+#define __HAVE_ARCH_STRCPY
+static inline char * strcpy(char * dest,const char *src)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2)
+ :"0" (src),"1" (dest) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+static inline char * strncpy(char * dest,const char *src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %2\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "rep\n\t"
+ "stosb\n"
+ "2:"
+ : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
+ :"0" (src),"1" (dest),"2" (count) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCAT
+static inline char * strcat(char * dest,const char * src)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+ : "0" (src), "1" (dest), "2" (0), "3" (0xffffffff):"memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCAT
+static inline char * strncat(char * dest,const char * src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n\t"
+ "movl %8,%3\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %2,%2\n\t"
+ "stosb"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+ : "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count)
+ : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCMP
+static inline int strcmp(const char * cs,const char * ct)
+{
+int d0, d1;
+register int __res;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "scasb\n\t"
+ "jne 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "jmp 3f\n"
+ "2:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "3:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1)
+ :"1" (cs),"2" (ct));
+return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+static inline int strncmp(const char * cs,const char * ct,size_t count)
+{
+register int __res;
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "scasb\n\t"
+ "jne 3f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %%eax,%%eax\n\t"
+ "jmp 4f\n"
+ "3:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "4:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+ :"1" (cs),"2" (ct),"3" (count));
+return __res;
+}
+
+#define __HAVE_ARCH_STRCHR
+static inline char * strchr(const char * s, int c)
+{
+int d0;
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "je 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "movl $1,%1\n"
+ "2:\tmovl %1,%0\n\t"
+ "decl %0"
+ :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRRCHR
+static inline char * strrchr(const char * s, int c)
+{
+int d0, d1;
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "jne 2f\n\t"
+ "leal -1(%%esi),%0\n"
+ "2:\ttestb %%al,%%al\n\t"
+ "jne 1b"
+ :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRLEN
+static inline size_t strlen(const char * s)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %0\n\t"
+ "decl %0"
+ :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff));
+return __res;
+}
+
+static inline void * __memcpy(void * to, const void * from, size_t n)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+ : "memory");
+return (to);
+}
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as the count is constant.
+ */
+static inline void * __constant_memcpy(void * to, const void * from, size_t n)
+{
+ switch (n) {
+ case 0:
+ return to;
+ case 1:
+ *(unsigned char *)to = *(const unsigned char *)from;
+ return to;
+ case 2:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ return to;
+ case 3:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ *(2+(unsigned char *)to) = *(2+(const unsigned char *)from);
+ return to;
+ case 4:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ return to;
+ case 6: /* for Ethernet addresses */
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(2+(unsigned short *)to) = *(2+(const unsigned short *)from);
+ return to;
+ case 8:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ return to;
+ case 12:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ return to;
+ case 16:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ return to;
+ case 20:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ *(4+(unsigned long *)to) = *(4+(const unsigned long *)from);
+ return to;
+ }
+#define COMMON(x) \
+__asm__ __volatile__( \
+ "cld\n\t" \
+ "rep ; movsl" \
+ x \
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2) \
+ : "0" (n/4),"1" ((long) to),"2" ((long) from) \
+ : "memory");
+{
+ int d0, d1, d2;
+ switch (n % 4) {
+ case 0: COMMON(""); return to;
+ case 1: COMMON("\n\tmovsb"); return to;
+ case 2: COMMON("\n\tmovsw"); return to;
+ default: COMMON("\n\tmovsw\n\tmovsb"); return to;
+ }
+}
+
+#undef COMMON
+}
+
+#define __HAVE_ARCH_MEMCPY
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __memcpy((t),(f),(n)))
+
+#define __HAVE_ARCH_MEMMOVE
+static inline void * memmove(void * dest,const void * src, size_t n)
+{
+int d0, d1, d2;
+if (dest<src)
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "movsb"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ :"0" (n),"1" (src),"2" (dest)
+ : "memory");
+else
+__asm__ __volatile__(
+ "std\n\t"
+ "rep\n\t"
+ "movsb\n\t"
+ "cld"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ :"0" (n),
+ "1" (n-1+(const char *)src),
+ "2" (n-1+(char *)dest)
+ :"memory");
+return dest;
+}
+
+#define memcmp __builtin_memcmp
+
+#define __HAVE_ARCH_MEMCHR
+static inline void * memchr(const void * cs,int c,size_t count)
+{
+int d0;
+register void * __res;
+if (!count)
+ return NULL;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 1f\n\t"
+ "movl $1,%0\n"
+ "1:\tdecl %0"
+ :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
+return __res;
+}
+
+static inline void * __memset_generic(void * s, char c,size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "stosb"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (c),"1" (s),"0" (count)
+ :"memory");
+return s;
+}
+
+/* we might want to write optimized versions of these later */
+#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
+
+/*
+ * memset(x,0,y) is a reasonably common thing to do, so we want to fill
+ * things 32 bits at a time even when we don't know the size of the
+ * area at compile-time..
+ */
+static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; stosl\n\t"
+ "testb $2,%b3\n\t"
+ "je 1f\n\t"
+ "stosw\n"
+ "1:\ttestb $1,%b3\n\t"
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+ :"memory");
+return (s);
+}
+
+/* Added by Gertjan van Wingerde to make minix and sysv module work */
+#define __HAVE_ARCH_STRNLEN
+static inline size_t strnlen(const char * s, size_t count)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+ "movl %2,%0\n\t"
+ "jmp 2f\n"
+ "1:\tcmpb $0,(%0)\n\t"
+ "je 3f\n\t"
+ "incl %0\n"
+ "2:\tdecl %1\n\t"
+ "cmpl $-1,%1\n\t"
+ "jne 1b\n"
+ "3:\tsubl %2,%0"
+ :"=a" (__res), "=&d" (d0)
+ :"c" (s),"1" (count));
+return __res;
+}
+/* end of additional stuff */
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as we by now know that both pattern and count is constant..
+ */
+static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+{
+ switch (count) {
+ case 0:
+ return s;
+ case 1:
+ *(unsigned char *)s = pattern;
+ return s;
+ case 2:
+ *(unsigned short *)s = pattern;
+ return s;
+ case 3:
+ *(unsigned short *)s = pattern;
+ *(2+(unsigned char *)s) = pattern;
+ return s;
+ case 4:
+ *(unsigned long *)s = pattern;
+ return s;
+ }
+#define COMMON(x) \
+__asm__ __volatile__("cld\n\t" \
+ "rep ; stosl" \
+ x \
+ : "=&c" (d0), "=&D" (d1) \
+ : "a" (pattern),"0" (count/4),"1" ((long) s) \
+ : "memory")
+{
+ int d0, d1;
+ switch (count % 4) {
+ case 0: COMMON(""); return s;
+ case 1: COMMON("\n\tstosb"); return s;
+ case 2: COMMON("\n\tstosw"); return s;
+ default: COMMON("\n\tstosw\n\tstosb"); return s;
+ }
+}
+
+#undef COMMON
+}
+
+#define __constant_c_x_memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_c_and_count_memset((s),(c),(count)) : \
+ __constant_c_memset((s),(c),(count)))
+
+#define __memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_count_memset((s),(c),(count)) : \
+ __memset_generic((s),(c),(count)))
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) \
+(__builtin_constant_p(c) ? \
+ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
+ __memset((s),(c),(count)))
+
+/*
+ * find the first occurrence of byte 'c', or 1 past the area if none
+ */
+#define __HAVE_ARCH_MEMSCAN
+static inline void * memscan(void * addr, int c, size_t size)
+{
+ if (!size)
+ return addr;
+ __asm__("cld\n"
+ "repnz; scasb\n"
+ "jnz 1f\n"
+ "dec %%edi\n"
+ "1:\n"
+ : "=D" (addr), "=c" (size)
+ : "0" (addr), "1" (size), "a" (c));
+ return addr;
+}
+
+#endif
+#endif
diff --git a/linux/dev/include/asm-i386/system.h b/linux/dev/include/asm-i386/system.h
new file mode 100644
index 0000000..5187c5e
--- /dev/null
+++ b/linux/dev/include/asm-i386/system.h
@@ -0,0 +1,356 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <i386/ipl.h> /* curr_ipl[], splx */
+#include <kern/cpu_number.h>
+
+#include <asm/segment.h>
+
+/*
+ * Entry into gdt where to find first TSS. GDT layout:
+ * 0 - null
+ * 1 - not used
+ * 2 - kernel code segment
+ * 3 - kernel data segment
+ * 4 - user code segment
+ * 5 - user data segment
+ * ...
+ * 8 - TSS #0
+ * 9 - LDT #0
+ * 10 - TSS #1
+ * 11 - LDT #1
+ */
+#define FIRST_TSS_ENTRY 8
+#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
+#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
+#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+#define load_TR(n) __asm__("ltr %%ax": /* no output */ :"a" (_TSS(n)))
+#define load_ldt(n) __asm__("lldt %%ax": /* no output */ :"a" (_LDT(n)))
+#define store_TR(n) \
+__asm__("str %%ax\n\t" \
+ "subl %2,%%eax\n\t" \
+ "shrl $4,%%eax" \
+ :"=a" (n) \
+ :"0" (0),"i" (FIRST_TSS_ENTRY<<3))
+
+/* This special macro can be used to load a debugging register */
+
+#define loaddebug(tsk,register) \
+ __asm__("movl %0,%%edx\n\t" \
+ "movl %%edx,%%db" #register "\n\t" \
+ : /* no output */ \
+ :"m" (tsk->debugreg[register]) \
+ :"dx");
+
+
+/*
+ * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ * This also clears the TS-flag if the task we switched to has used
+ * the math co-processor latest.
+ *
+ * It also reloads the debug regs if necessary..
+ */
+
+
+#ifdef __SMP__
+ /*
+ * Keep the lock depth straight. If we switch on an interrupt from
+ * kernel->user task we need to lose a depth, and if we switch the
+ * other way we need to gain a depth. Same layer switches come out
+ * the same.
+ *
+ * We spot a switch in user mode because the kernel counter is the
+ * same as the interrupt counter depth. (We never switch during the
+ * message/invalidate IPI).
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process.
+ */
+
+#define switch_to(prev,next) do { \
+ cli();\
+ if(prev->flags&PF_USEDFPU) \
+ { \
+ __asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \
+ __asm__ __volatile__("fwait"); \
+ prev->flags&=~PF_USEDFPU; \
+ } \
+ prev->lock_depth=syscall_count; \
+ kernel_counter+=next->lock_depth-prev->lock_depth; \
+ syscall_count=next->lock_depth; \
+__asm__("pushl %%edx\n\t" \
+ "movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \
+ "movl 0x20(%%edx), %%edx\n\t" \
+ "shrl $22,%%edx\n\t" \
+ "and $0x3C,%%edx\n\t" \
+ "movl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \
+ "popl %%edx\n\t" \
+ "ljmp %0\n\t" \
+ "sti\n\t" \
+ : /* no output */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "c" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+
+#else
+#define switch_to(prev,next) do { \
+__asm__("movl %2,"SYMBOL_NAME_STR(current_set)"\n\t" \
+ "ljmp %0\n\t" \
+ "cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \
+ "jne 1f\n\t" \
+ "clts\n" \
+ "1:" \
+ : /* no outputs */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "r" (prev), "r" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+#endif
+
+#define _set_base(addr,base) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%1\n\t" \
+ "movb %%dh,%2" \
+ : /* no output */ \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "d" (base) \
+ :"dx")
+
+#define _set_limit(addr,limit) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %1,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%1" \
+ : /* no output */ \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "d" (limit) \
+ :"dx")
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , base )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , (limit-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+#define stts() \
+__asm__ __volatile__ ( \
+ "movl %%cr0,%%eax\n\t" \
+ "orl $8,%%eax\n\t" \
+ "movl %%eax,%%cr0" \
+ : /* no outputs */ \
+ : /* no inputs */ \
+ :"ax")
+
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+#define mb() __asm__ __volatile__ ("" : : :"memory")
+#define __sti() __asm__ __volatile__ ("sti": : :"memory")
+#define __cli() __asm__ __volatile__ ("cli": : :"memory")
+#define __save_flags(x) (x = ((curr_ipl[cpu_number()] > 0) ? 0 : (1 << 9)))
+#define __restore_flags(x) splx((x & (1 << 9)) ? 0 : 7)
+
+#ifdef __SMP__
+
+extern void __global_cli(void);
+extern void __global_sti(void);
+extern unsigned long __global_save_flags(void);
+extern void __global_restore_flags(unsigned long);
+#define cli() __global_cli()
+#define sti() __global_sti()
+#define save_flags(x) ((x)=__global_save_flags())
+#define restore_flags(x) __global_restore_flags(x)
+
+#else
+
+#define cli() __cli()
+#define sti() __sti()
+#define save_flags(x) __save_flags(x)
+#define restore_flags(x) __restore_flags(x)
+
+#endif
+
+
+#define iret() __asm__ __volatile__ ("iret": : :"memory")
+
+#define _set_gate(gate_addr,type,dpl,addr) \
+__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
+ "movw %2,%%dx\n\t" \
+ "movl %%eax,%0\n\t" \
+ "movl %%edx,%1" \
+ :"=m" (*((long *) (gate_addr))), \
+ "=m" (*(1+(long *) (gate_addr))) \
+ :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
+ "d" ((char *) (addr)),"a" (KERNEL_CS << 16) \
+ :"ax","dx")
+
+#define set_intr_gate(n,addr) \
+ _set_gate(&idt[n],14,0,addr)
+
+#define set_trap_gate(n,addr) \
+ _set_gate(&idt[n],15,0,addr)
+
+#define set_system_gate(n,addr) \
+ _set_gate(&idt[n],15,3,addr)
+
+#define set_call_gate(a,addr) \
+ _set_gate(a,12,3,addr)
+
+#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
+ *((gate_addr)+1) = ((base) & 0xff000000) | \
+ (((base) & 0x00ff0000)>>16) | \
+ ((limit) & 0xf0000) | \
+ ((dpl)<<13) | \
+ (0x00408000) | \
+ ((type)<<8); \
+ *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
+ ((limit) & 0x0ffff); }
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw $" #limit ",%1\n\t" \
+ "movw %%ax,%2\n\t" \
+ "rorl $16,%%eax\n\t" \
+ "movb %%al,%3\n\t" \
+ "movb $" type ",%4\n\t" \
+ "movb $0x00,%5\n\t" \
+ "movb %%ah,%6\n\t" \
+ "rorl $16,%%eax" \
+ : /* no output */ \
+ :"a" (addr+0xc0000000), "m" (*(n)), "m" (*(n+2)), "m" (*(n+4)), \
+ "m" (*(n+5)), "m" (*(n+6)), "m" (*(n+7)) \
+ )
+
+#define set_tss_desc(n,addr) _set_tssldt_desc(((char *) (n)),((int)(addr)),235,"0x89")
+#define set_ldt_desc(n,addr,size) \
+ _set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),"0x82")
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt;
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#ifndef MACH
+#define HAVE_DISABLE_HLT
+#endif
+void disable_hlt(void);
+void enable_hlt(void);
+
+static __inline__ unsigned long long rdmsr(unsigned int msr)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdmsr"
+ : "=A" (ret)
+ : "c" (msr));
+ return ret;
+}
+
+static __inline__ void wrmsr(unsigned int msr,unsigned long long val)
+{
+ __asm__ __volatile__("wrmsr"
+ : /* no Outputs */
+ : "c" (msr), "A" (val));
+}
+
+
+static __inline__ unsigned long long rdtsc(void)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdtsc"
+ : "=A" (ret)
+ : /* no inputs */);
+ return ret;
+}
+
+static __inline__ unsigned long long rdpmc(unsigned int counter)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdpmc"
+ : "=A" (ret)
+ : "c" (counter));
+ return ret;
+}
+
+#endif
diff --git a/linux/dev/include/asm-i386/uaccess.h b/linux/dev/include/asm-i386/uaccess.h
new file mode 100644
index 0000000..9d841c9
--- /dev/null
+++ b/linux/dev/include/asm-i386/uaccess.h
@@ -0,0 +1 @@
+/* Dummy file. */
diff --git a/linux/dev/include/linux/blk.h b/linux/dev/include/linux/blk.h
new file mode 100644
index 0000000..b924a14
--- /dev/null
+++ b/linux/dev/include/linux/blk.h
@@ -0,0 +1,471 @@
+/* Is this okay? by OKUJI Yoshinori */
+#ifndef _BLK_H
+#define _BLK_H
+
+#include <linux/blkdev.h>
+#include <linux/locks.h>
+#include <linux/malloc.h>
+#include <linux/config.h>
+#include <linux/md.h>
+
+/*
+ * NR_REQUEST is the number of entries in the request-queue.
+ * NOTE that writes may use only the low 2/3 of these: reads
+ * take precedence.
+ */
+#define NR_REQUEST 64
+
+/*
+ * This is used in the elevator algorithm. We don't prioritise reads
+ * over writes any more --- although reads are more time-critical than
+ * writes, by treating them equally we increase filesystem throughput.
+ * This turns out to give better overall performance. -- sct
+ */
+#define IN_ORDER(s1,s2) \
+((s1)->rq_dev < (s2)->rq_dev || (((s1)->rq_dev == (s2)->rq_dev && \
+(s1)->sector < (s2)->sector)))
+
+/*
+ * These will have to be changed to be aware of different buffer
+ * sizes etc.. It actually needs a major cleanup.
+ */
+#if defined(IDE_DRIVER) || defined(MD_DRIVER)
+#define SECTOR_MASK ((BLOCK_SIZE >> 9) - 1)
+#else
+#define SECTOR_MASK (blksize_size[MAJOR_NR] && \
+ blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] ? \
+ ((blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] >> 9) - 1) : \
+ ((BLOCK_SIZE >> 9) - 1))
+#endif /* IDE_DRIVER */
+
+#define SUBSECTOR(block) (CURRENT->current_nr_sectors > 0)
+
+#ifdef CONFIG_CDU31A
+extern int cdu31a_init(void);
+#endif /* CONFIG_CDU31A */
+#ifdef CONFIG_MCD
+extern int mcd_init(void);
+#endif /* CONFIG_MCD */
+#ifdef CONFIG_MCDX
+extern int mcdx_init(void);
+#endif /* CONFIG_MCDX */
+#ifdef CONFIG_SBPCD
+extern int sbpcd_init(void);
+#endif /* CONFIG_SBPCD */
+#ifdef CONFIG_AZTCD
+extern int aztcd_init(void);
+#endif /* CONFIG_AZTCD */
+#ifdef CONFIG_CDU535
+extern int sony535_init(void);
+#endif /* CONFIG_CDU535 */
+#ifdef CONFIG_GSCD
+extern int gscd_init(void);
+#endif /* CONFIG_GSCD */
+#ifdef CONFIG_CM206
+extern int cm206_init(void);
+#endif /* CONFIG_CM206 */
+#ifdef CONFIG_OPTCD
+extern int optcd_init(void);
+#endif /* CONFIG_OPTCD */
+#ifdef CONFIG_SJCD
+extern int sjcd_init(void);
+#endif /* CONFIG_SJCD */
+#ifdef CONFIG_CDI_INIT
+extern int cdi_init(void);
+#endif /* CONFIG_CDI_INIT */
+#ifdef CONFIG_BLK_DEV_HD
+extern int hd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_IDE
+extern int ide_init(void);
+extern void ide_disable_base(unsigned base);
+#endif
+#ifdef CONFIG_BLK_DEV_XD
+extern int xd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_LOOP
+extern int loop_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_MD
+extern int md_init(void);
+#endif /* CONFIG_BLK_DEV_MD */
+
+extern void set_device_ro(kdev_t dev,int flag);
+void add_blkdev_randomness(int major);
+
+extern int floppy_init(void);
+extern void rd_load(void);
+extern int rd_init(void);
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
+
+extern unsigned long initrd_start,initrd_end;
+extern int mount_initrd; /* zero if initrd should not be mounted */
+void initrd_init(void);
+
+#endif
+
+#define RO_IOCTLS(dev,where) \
+ case BLKROSET: { int __err; if (!suser()) return -EACCES; \
+ __err = verify_area(VERIFY_READ, (void *) (where), sizeof(long)); \
+ if (!__err) set_device_ro((dev),get_fs_long((long *) (where))); return __err; } \
+ case BLKROGET: { int __err = verify_area(VERIFY_WRITE, (void *) (where), sizeof(long)); \
+ if (!__err) put_fs_long(0!=is_read_only(dev),(long *) (where)); return __err; }
+
+#if defined(MAJOR_NR) || defined(IDE_DRIVER)
+
+/*
+ * Add entries as needed.
+ */
+
+#ifdef IDE_DRIVER
+
+#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS)
+#define DEVICE_ON(device) /* nothing */
+#define DEVICE_OFF(device) /* nothing */
+
+#elif (MAJOR_NR == RAMDISK_MAJOR)
+
+/* ram disk */
+#define DEVICE_NAME "ramdisk"
+#define DEVICE_REQUEST rd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+#define DEVICE_NO_RANDOM
+
+#elif (MAJOR_NR == FLOPPY_MAJOR)
+
+static void floppy_off(unsigned int nr);
+
+#define DEVICE_NAME "floppy"
+#define DEVICE_INTR do_floppy
+#define DEVICE_REQUEST do_fd_request
+#define DEVICE_NR(device) ( (MINOR(device) & 3) | ((MINOR(device) & 0x80 ) >> 5 ))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device) floppy_off(DEVICE_NR(device))
+
+#elif (MAJOR_NR == HD_MAJOR)
+
+/* harddisk: timeout is 6 seconds.. */
+#define DEVICE_NAME "harddisk"
+#define DEVICE_INTR do_hd
+#define DEVICE_TIMEOUT HD_TIMER
+#define TIMEOUT_VALUE (6*HZ)
+#define DEVICE_REQUEST do_hd_request
+#define DEVICE_NR(device) (MINOR(device)>>6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_DISK_MAJOR)
+
+#define DEVICE_NAME "scsidisk"
+#define DEVICE_INTR do_sd
+#define TIMEOUT_VALUE (2*HZ)
+#define DEVICE_REQUEST do_sd_request
+#define DEVICE_NR(device) (MINOR(device) >> 4)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+/* Kludge to use the same number for both char and block major numbers */
+#elif (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER)
+
+#define DEVICE_NAME "Multiple devices driver"
+#define DEVICE_REQUEST do_md_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_TAPE_MAJOR)
+
+#define DEVICE_NAME "scsitape"
+#define DEVICE_INTR do_st
+#define DEVICE_NR(device) (MINOR(device) & 0x7f)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_CDROM_MAJOR)
+
+#define DEVICE_NAME "CD-ROM"
+#define DEVICE_INTR do_sr
+#define DEVICE_REQUEST do_sr_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == XT_DISK_MAJOR)
+
+#define DEVICE_NAME "xt disk"
+#define DEVICE_REQUEST do_xd_request
+#define DEVICE_NR(device) (MINOR(device) >> 6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU31A_CDROM_MAJOR)
+
+#define DEVICE_NAME "CDU31A"
+#define DEVICE_REQUEST do_cdu31a_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcd */
+#define DEVICE_REQUEST do_mcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_X_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcdx */
+#define DEVICE_REQUEST do_mcdx_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #1"
+#define DEVICE_REQUEST do_sbpcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM2_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #2"
+#define DEVICE_REQUEST do_sbpcd2_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM3_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #3"
+#define DEVICE_REQUEST do_sbpcd3_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM4_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #4"
+#define DEVICE_REQUEST do_sbpcd4_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == AZTECH_CDROM_MAJOR)
+
+#define DEVICE_NAME "Aztech CD-ROM"
+#define DEVICE_REQUEST do_aztcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU535_CDROM_MAJOR)
+
+#define DEVICE_NAME "SONY-CDU535"
+#define DEVICE_INTR do_cdu535
+#define DEVICE_REQUEST do_cdu535_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == GOLDSTAR_CDROM_MAJOR)
+
+#define DEVICE_NAME "Goldstar R420"
+#define DEVICE_REQUEST do_gscd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CM206_CDROM_MAJOR)
+#define DEVICE_NAME "Philips/LMS cd-rom cm206"
+#define DEVICE_REQUEST do_cm206_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == OPTICS_CDROM_MAJOR)
+
+#define DEVICE_NAME "DOLPHIN 8000AT CD-ROM"
+#define DEVICE_REQUEST do_optcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SANYO_CDROM_MAJOR)
+
+#define DEVICE_NAME "Sanyo H94A CD-ROM"
+#define DEVICE_REQUEST do_sjcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#endif /* MAJOR_NR == whatever */
+
+#if (MAJOR_NR != SCSI_TAPE_MAJOR)
+#if !defined(IDE_DRIVER)
+
+#ifndef CURRENT
+#define CURRENT (blk_dev[MAJOR_NR].current_request)
+#endif
+
+#define CURRENT_DEV DEVICE_NR(CURRENT->rq_dev)
+
+#ifdef DEVICE_INTR
+static void (*DEVICE_INTR)(void) = NULL;
+#endif
+#ifdef DEVICE_TIMEOUT
+
+#define SET_TIMER \
+((timer_table[DEVICE_TIMEOUT].expires = jiffies + TIMEOUT_VALUE), \
+(timer_active |= 1<<DEVICE_TIMEOUT))
+
+#define CLEAR_TIMER \
+timer_active &= ~(1<<DEVICE_TIMEOUT)
+
+#define SET_INTR(x) \
+if ((DEVICE_INTR = (x)) != NULL) \
+ SET_TIMER; \
+else \
+ CLEAR_TIMER;
+
+#else
+
+#define SET_INTR(x) (DEVICE_INTR = (x))
+
+#endif /* DEVICE_TIMEOUT */
+
+static void (DEVICE_REQUEST)(void);
+
+#ifdef DEVICE_INTR
+#define CLEAR_INTR SET_INTR(NULL)
+#else
+#define CLEAR_INTR
+#endif
+
+#define INIT_REQUEST \
+ if (!CURRENT) {\
+ CLEAR_INTR; \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed"); \
+ if (CURRENT->bh) { \
+ if (!buffer_locked(CURRENT->bh)) \
+ panic(DEVICE_NAME ": block not locked"); \
+ }
+
+#endif /* !defined(IDE_DRIVER) */
+
+/* end_request() - SCSI devices have their own version */
+/* - IDE drivers have their own copy too */
+
+#if ! SCSI_BLK_MAJOR(MAJOR_NR)
+
+#if defined(IDE_DRIVER) && !defined(_IDE_C) /* shared copy for IDE modules */
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup);
+#else
+
+#ifdef IDE_DRIVER
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup) {
+ struct request *req = hwgroup->rq;
+#else
+static void end_request(int uptodate) {
+ struct request *req = CURRENT;
+#endif /* IDE_DRIVER */
+ struct buffer_head * bh;
+#ifndef MACH
+ int nsect;
+#endif
+
+ req->errors = 0;
+ if (!uptodate) {
+ if (!req->quiet)
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+#ifdef MACH
+ for (bh = req->bh; bh; )
+ {
+ struct buffer_head *next = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ mark_buffer_uptodate (bh, 0);
+ unlock_buffer (bh);
+ bh = next;
+ }
+ req->bh = NULL;
+#else
+ if ((bh = req->bh) != NULL) {
+ nsect = bh->b_size >> 9;
+ req->nr_sectors--;
+ req->nr_sectors &= ~(nsect - 1);
+ req->sector += nsect;
+ req->sector &= ~(nsect - 1);
+ }
+#endif
+ }
+
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+
+ /*
+ * This is our 'MD IO has finished' event handler.
+ * note that b_state should be cached in a register
+ * anyways, so the overhead if this checking is almost
+ * zero. But anyways .. we never get OO for free :)
+ */
+ if (test_bit(BH_MD, &bh->b_state)) {
+ struct md_personality * pers=(struct md_personality *)bh->personality;
+ pers->end_request(bh,uptodate);
+ }
+ /*
+ * the normal (nonmirrored and no RAID5) case:
+ */
+ else {
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+ }
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_request: buffer-list destroyed\n");
+ }
+ req->buffer = bh->b_data;
+ return;
+ }
+ }
+#ifndef DEVICE_NO_RANDOM
+ add_blkdev_randomness(MAJOR(req->rq_dev));
+#endif
+#ifdef IDE_DRIVER
+ blk_dev[MAJOR(req->rq_dev)].current_request = req->next;
+ hwgroup->rq = NULL;
+#else
+ DEVICE_OFF(req->rq_dev);
+ CURRENT = req->next;
+#endif /* IDE_DRIVER */
+ if (req->sem != NULL)
+ up(req->sem);
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+}
+#endif /* defined(IDE_DRIVER) && !defined(_IDE_C) */
+#endif /* ! SCSI_BLK_MAJOR(MAJOR_NR) */
+#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) */
+
+#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
+
+#endif /* _BLK_H */
diff --git a/linux/dev/include/linux/blkdev.h b/linux/dev/include/linux/blkdev.h
new file mode 100644
index 0000000..5bf0a28
--- /dev/null
+++ b/linux/dev/include/linux/blkdev.h
@@ -0,0 +1,73 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/genhd.h>
+#include <linux/tqueue.h>
+
+/*
+ * Ok, this is an expanded form so that we can use the same
+ * request for paging requests when that is implemented. In
+ * paging, 'bh' is NULL, and the semaphore is used to wait
+ * for read/write completion.
+ */
+struct request {
+ volatile int rq_status; /* should split this into a few status bits */
+#define RQ_INACTIVE (-1)
+#define RQ_ACTIVE 1
+#define RQ_SCSI_BUSY 0xffff
+#define RQ_SCSI_DONE 0xfffe
+#define RQ_SCSI_DISCONNECTING 0xffe0
+
+ kdev_t rq_dev;
+ int cmd; /* READ or WRITE */
+ int errors;
+ int quiet;
+ unsigned long sector;
+ unsigned long nr_sectors;
+ unsigned long current_nr_sectors;
+ char * buffer;
+ struct semaphore * sem;
+ struct buffer_head * bh;
+ struct buffer_head * bhtail;
+ struct request * next;
+};
+
+struct blk_dev_struct {
+ void (*request_fn)(void);
+ struct request * current_request;
+ struct request plug;
+ struct tq_struct plug_tq;
+};
+
+struct sec_size {
+ unsigned block_size;
+ unsigned block_size_bits;
+};
+
+extern struct sec_size * blk_sec[MAX_BLKDEV];
+extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
+extern struct wait_queue * wait_for_request;
+extern void resetup_one_dev(struct gendisk *dev, int drive);
+
+#ifdef MACH
+extern inline void unplug_device(void *data) { }
+#else
+extern void unplug_device(void * data);
+#endif
+
+extern void make_request(int major,int rw, struct buffer_head * bh);
+
+/* md needs this function to remap requests */
+extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size);
+extern int md_make_request (int major, int rw, struct buffer_head * bh);
+extern int md_error (kdev_t mddev, kdev_t rdev);
+
+extern int * blk_size[MAX_BLKDEV];
+
+extern int * blksize_size[MAX_BLKDEV];
+
+extern int * hardsect_size[MAX_BLKDEV];
+
+#endif
diff --git a/linux/dev/include/linux/compile.h b/linux/dev/include/linux/compile.h
new file mode 100644
index 0000000..7d43a20
--- /dev/null
+++ b/linux/dev/include/linux/compile.h
@@ -0,0 +1,6 @@
+#define UTS_VERSION "#11 Fri Apr 24 23:03:10 JST 1998"
+#define LINUX_COMPILE_TIME "23:03:10"
+#define LINUX_COMPILE_BY "somebody"
+#define LINUX_COMPILE_HOST "unknown"
+#define LINUX_COMPILE_DOMAIN "somewhere.org"
+#define LINUX_COMPILER "gcc version 2.7.2.3"
diff --git a/linux/dev/include/linux/etherdevice.h b/linux/dev/include/linux/etherdevice.h
new file mode 100644
index 0000000..eb262b2
--- /dev/null
+++ b/linux/dev/include/linux/etherdevice.h
@@ -0,0 +1,62 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * WARNING: This move may well be temporary. This file will get merged with others RSN.
+ *
+ */
+#ifndef _LINUX_ETHERDEVICE_H
+#define _LINUX_ETHERDEVICE_H
+
+
+#include <linux/if_ether.h>
+
+#ifdef __KERNEL__
+extern int eth_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+extern int eth_rebuild_header(void *buff, struct device *dev,
+ unsigned long dst, struct sk_buff *skb);
+
+/* This cause skb->protocol = 0. I don't sure if this is really ok.
+ * Last modified: 19980402 by OKUJI Yoshinori <okuji@kmc.kyoto-u.ac.jp>
+ */
+#ifdef MACH
+#define eth_type_trans(skb, dev) ((unsigned short)0)
+#else
+extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev);
+#endif
+
+extern void eth_header_cache_bind(struct hh_cache ** hhp, struct device *dev,
+ unsigned short htype, __u32 daddr);
+extern void eth_header_cache_update(struct hh_cache *hh, struct device *dev, unsigned char * haddr);
+
+#ifdef MACH
+#define eth_copy_and_sum(dest, src, length, base) \
+ memcpy((dest)->data, src, length)
+#else
+extern void eth_copy_and_sum(struct sk_buff *dest,
+ unsigned char *src, int length, int base);
+#endif
+
+extern struct device * init_etherdev(struct device *, int);
+
+#endif
+
+#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/linux/dev/include/linux/fs.h b/linux/dev/include/linux/fs.h
new file mode 100644
index 0000000..def2bc9
--- /dev/null
+++ b/linux/dev/include/linux/fs.h
@@ -0,0 +1,803 @@
+#ifndef _LINUX_FS_H
+#define _LINUX_FS_H
+
+/*
+ * This file has definitions for some important file table
+ * structures etc.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/limits.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/vfs.h>
+#include <linux/net.h>
+#include <linux/kdev_t.h>
+#include <linux/ioctl.h>
+
+/*
+ * It's silly to have NR_OPEN bigger than NR_FILE, but I'll fix
+ * that later. Anyway, now the file code is no longer dependent
+ * on bitmaps in unsigned longs, but uses the new fd_set structure..
+ *
+ * Some programs (notably those using select()) may have to be
+ * recompiled to take full advantage of the new limits..
+ */
+
+/* Fixed constants first: */
+#undef NR_OPEN
+#define NR_OPEN 256
+
+#define NR_SUPER 64
+#define BLOCK_SIZE 1024
+#define BLOCK_SIZE_BITS 10
+
+/* And dynamically-tunable limits and defaults: */
+extern int max_inodes, nr_inodes;
+extern int max_files, nr_files;
+#define NR_INODE 3072 /* this should be bigger than NR_FILE */
+#define NR_FILE 1024 /* this can well be larger on a larger system */
+
+#define MAY_EXEC 1
+#define MAY_WRITE 2
+#define MAY_READ 4
+
+#define FMODE_READ 1
+#define FMODE_WRITE 2
+
+#define READ 0
+#define WRITE 1
+#define READA 2 /* read-ahead - don't block if no resources */
+#define WRITEA 3 /* write-ahead - don't block if no resources */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#define NIL_FILP ((struct file *)0)
+#define SEL_IN 1
+#define SEL_OUT 2
+#define SEL_EX 4
+
+/*
+ * These are the fs-independent mount-flags: up to 16 flags are supported
+ */
+#define MS_RDONLY 1 /* Mount read-only */
+#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+#define MS_NODEV 4 /* Disallow access to device special files */
+#define MS_NOEXEC 8 /* Disallow program execution */
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define S_WRITE 128 /* Write on file/directory/symlink */
+#define S_APPEND 256 /* Append-only file */
+#define S_IMMUTABLE 512 /* Immutable file */
+#define MS_NOATIME 1024 /* Do not update access times. */
+#define S_BAD_INODE 2048 /* Marker for unreadable inodes */
+#define S_ZERO_WR 4096 /* Device accepts 0 length writes */
+/*
+ * Flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS|MS_MANDLOCK|MS_NOATIME)
+
+/*
+ * Magic mount flag number. Has to be or-ed to the flag values.
+ */
+#define MS_MGC_VAL 0xC0ED0000 /* magic flag number to indicate "new" flags */
+#define MS_MGC_MSK 0xffff0000 /* magic flag number mask */
+
+/*
+ * Note that read-only etc flags are inode-specific: setting some file-system
+ * flags just means all the inodes inherit those flags by default. It might be
+ * possible to override it selectively if you really wanted to with some
+ * ioctl() that is not currently implemented.
+ *
+ * Exception: MS_RDONLY is always applied to the entire file system.
+ */
+#define IS_RDONLY(inode) (((inode)->i_sb) && ((inode)->i_sb->s_flags & MS_RDONLY))
+#define IS_NOSUID(inode) ((inode)->i_flags & MS_NOSUID)
+#define IS_NODEV(inode) ((inode)->i_flags & MS_NODEV)
+#define IS_NOEXEC(inode) ((inode)->i_flags & MS_NOEXEC)
+#define IS_SYNC(inode) ((inode)->i_flags & MS_SYNCHRONOUS)
+#define IS_MANDLOCK(inode) ((inode)->i_flags & MS_MANDLOCK)
+
+#define IS_WRITABLE(inode) ((inode)->i_flags & S_WRITE)
+#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
+#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+#define IS_NOATIME(inode) ((inode)->i_flags & MS_NOATIME)
+#define IS_ZERO_WR(inode) ((inode)->i_flags & S_ZERO_WR)
+
+#define UPDATE_ATIME(inode) \
+ if (!IS_NOATIME(inode) && !IS_RDONLY(inode)) { \
+ inode->i_atime = CURRENT_TIME; \
+ inode->i_dirt = 1; \
+ }
+
+/* the read-only stuff doesn't really belong here, but any other place is
+ probably as bad and I don't want to create yet another include file. */
+
+#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */
+#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */
+#define BLKRRPART _IO(0x12,95) /* re-read partition table */
+#define BLKGETSIZE _IO(0x12,96) /* return device size */
+#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */
+#define BLKRASET _IO(0x12,98) /* Set read ahead for block device */
+#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */
+
+#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
+#define FIBMAP _IO(0x00,1) /* bmap access */
+#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
+
+#ifdef __KERNEL__
+
+#include <asm/semaphore.h>
+#include <asm/bitops.h>
+
+extern void buffer_init(void);
+extern unsigned long inode_init(unsigned long start, unsigned long end);
+extern unsigned long file_table_init(unsigned long start, unsigned long end);
+extern unsigned long name_cache_init(unsigned long start, unsigned long end);
+
+typedef char buffer_block[BLOCK_SIZE];
+
+/* bh state bits */
+#define BH_Uptodate 0 /* 1 if the buffer contains valid data */
+#define BH_Dirty 1 /* 1 if the buffer is dirty */
+#define BH_Lock 2 /* 1 if the buffer is locked */
+#define BH_Req 3 /* 0 if the buffer has been invalidated */
+#define BH_Touched 4 /* 1 if the buffer has been touched (aging) */
+#define BH_Has_aged 5 /* 1 if the buffer has been aged (aging) */
+#define BH_Protected 6 /* 1 if the buffer is protected */
+#define BH_FreeOnIO 7 /* 1 to discard the buffer_head after IO */
+#define BH_MD 8 /* 1 if the buffer is an MD request */
+
+/*
+ * Try to keep the most commonly used fields in single cache lines (16
+ * bytes) to improve performance. This ordering should be
+ * particularly beneficial on 32-bit processors.
+ *
+ * We use the first 16 bytes for the data which is used in searches
+ * over the block hash lists (ie. getblk(), find_buffer() and
+ * friends).
+ *
+ * The second 16 bytes we use for lru buffer scans, as used by
+ * sync_buffers() and refill_freelist(). -- sct
+ */
+struct buffer_head {
+ /* First cache line: */
+ unsigned long b_blocknr; /* block number */
+ kdev_t b_dev; /* device (B_FREE = free) */
+ kdev_t b_rdev; /* Real device */
+ unsigned long b_rsector; /* Real buffer location on disk */
+ struct buffer_head * b_next; /* Hash queue list */
+ struct buffer_head * b_this_page; /* circular list of buffers in one page */
+
+ /* Second cache line: */
+ unsigned long b_state; /* buffer state bitmap (see above) */
+ struct buffer_head * b_next_free;
+ unsigned int b_count; /* users using this block */
+ unsigned long b_size; /* block size */
+
+ /* Non-performance-critical data follows. */
+ char * b_data; /* pointer to data block (1024 bytes) */
+ unsigned int b_list; /* List that this buffer appears */
+ unsigned long b_flushtime; /* Time when this (dirty) buffer
+ * should be written */
+ unsigned long b_lru_time; /* Time when this buffer was
+ * last used. */
+ struct wait_queue * b_wait;
+ struct buffer_head * b_prev; /* doubly linked list of hash-queue */
+ struct buffer_head * b_prev_free; /* doubly linked list of buffers */
+ struct buffer_head * b_reqnext; /* request queue */
+
+/*
+ * Some MD stuff like RAID5 needs special event handlers and
+ * special private buffer_head fields:
+ */
+ void * personality;
+ void * private_bh;
+};
+
+static inline int buffer_uptodate(struct buffer_head * bh)
+{
+ return test_bit(BH_Uptodate, &bh->b_state);
+}
+
+static inline int buffer_dirty(struct buffer_head * bh)
+{
+ return test_bit(BH_Dirty, &bh->b_state);
+}
+
+static inline int buffer_locked(struct buffer_head * bh)
+{
+ return test_bit(BH_Lock, &bh->b_state);
+}
+
+static inline int buffer_req(struct buffer_head * bh)
+{
+ return test_bit(BH_Req, &bh->b_state);
+}
+
+static inline int buffer_touched(struct buffer_head * bh)
+{
+ return test_bit(BH_Touched, &bh->b_state);
+}
+
+static inline int buffer_has_aged(struct buffer_head * bh)
+{
+ return test_bit(BH_Has_aged, &bh->b_state);
+}
+
+static inline int buffer_protected(struct buffer_head * bh)
+{
+ return test_bit(BH_Protected, &bh->b_state);
+}
+
+#ifndef MACH
+#include <linux/pipe_fs_i.h>
+#include <linux/minix_fs_i.h>
+#include <linux/ext_fs_i.h>
+#include <linux/ext2_fs_i.h>
+#include <linux/hpfs_fs_i.h>
+#include <linux/msdos_fs_i.h>
+#include <linux/umsdos_fs_i.h>
+#include <linux/iso_fs_i.h>
+#include <linux/nfs_fs_i.h>
+#include <linux/xia_fs_i.h>
+#include <linux/sysv_fs_i.h>
+#include <linux/affs_fs_i.h>
+#include <linux/ufs_fs_i.h>
+#endif
+
+/*
+ * Attribute flags. These should be or-ed together to figure out what
+ * has been changed!
+ */
+#define ATTR_MODE 1
+#define ATTR_UID 2
+#define ATTR_GID 4
+#define ATTR_SIZE 8
+#define ATTR_ATIME 16
+#define ATTR_MTIME 32
+#define ATTR_CTIME 64
+#define ATTR_ATIME_SET 128
+#define ATTR_MTIME_SET 256
+#define ATTR_FORCE 512 /* Not a change, but a change it */
+
+/*
+ * This is the Inode Attributes structure, used for notify_change(). It
+ * uses the above definitions as flags, to know which values have changed.
+ * Also, in this manner, a Filesystem can look at only the values it cares
+ * about. Basically, these are the attributes that the VFS layer can
+ * request to change from the FS layer.
+ *
+ * Derek Atkins <warlord@MIT.EDU> 94-10-20
+ */
+struct iattr {
+ unsigned int ia_valid;
+ umode_t ia_mode;
+ uid_t ia_uid;
+ gid_t ia_gid;
+ off_t ia_size;
+ time_t ia_atime;
+ time_t ia_mtime;
+ time_t ia_ctime;
+};
+
+#include <linux/quota.h>
+
+#ifdef MACH
+/* Supress GCC's warnings. by OKUJI Yoshinori. */
+struct vm_area_struct;
+struct page;
+
+struct inode
+{
+ umode_t i_mode;
+ kdev_t i_rdev;
+};
+
+struct file
+{
+ mode_t f_mode;
+ loff_t f_pos;
+ unsigned short f_flags;
+ int f_resid;
+ void *f_object;
+ void *f_np;
+};
+
+#else /* !MACH */
+
+struct inode {
+ kdev_t i_dev;
+ unsigned long i_ino;
+ umode_t i_mode;
+ nlink_t i_nlink;
+ uid_t i_uid;
+ gid_t i_gid;
+ kdev_t i_rdev;
+ off_t i_size;
+ time_t i_atime;
+ time_t i_mtime;
+ time_t i_ctime;
+ unsigned long i_blksize;
+ unsigned long i_blocks;
+ unsigned long i_version;
+ unsigned long i_nrpages;
+ struct semaphore i_sem;
+ struct inode_operations *i_op;
+ struct super_block *i_sb;
+ struct wait_queue *i_wait;
+ struct file_lock *i_flock;
+ struct vm_area_struct *i_mmap;
+ struct page *i_pages;
+ struct dquot *i_dquot[MAXQUOTAS];
+ struct inode *i_next, *i_prev;
+ struct inode *i_hash_next, *i_hash_prev;
+ struct inode *i_bound_to, *i_bound_by;
+ struct inode *i_mount;
+ unsigned long i_count;
+ unsigned short i_flags;
+ unsigned short i_writecount;
+ unsigned char i_lock;
+ unsigned char i_dirt;
+ unsigned char i_pipe;
+ unsigned char i_sock;
+ unsigned char i_seek;
+ unsigned char i_update;
+ unsigned char i_condemned;
+ union {
+ struct pipe_inode_info pipe_i;
+ struct minix_inode_info minix_i;
+ struct ext_inode_info ext_i;
+ struct ext2_inode_info ext2_i;
+ struct hpfs_inode_info hpfs_i;
+ struct msdos_inode_info msdos_i;
+ struct umsdos_inode_info umsdos_i;
+ struct iso_inode_info isofs_i;
+ struct nfs_inode_info nfs_i;
+ struct xiafs_inode_info xiafs_i;
+ struct sysv_inode_info sysv_i;
+ struct affs_inode_info affs_i;
+ struct ufs_inode_info ufs_i;
+ struct socket socket_i;
+ void * generic_ip;
+ } u;
+};
+
+struct fown_struct {
+ int pid; /* pid or -pgrp where SIGIO should be sent */
+ uid_t uid, euid; /* uid/euid of process setting the owner */
+};
+
+struct file {
+ mode_t f_mode;
+ loff_t f_pos;
+ unsigned short f_flags;
+ unsigned short f_count;
+ unsigned long f_reada, f_ramax, f_raend, f_ralen, f_rawin;
+ struct file *f_next, *f_prev;
+ struct fown_struct f_owner;
+ struct inode * f_inode;
+ struct file_operations * f_op;
+ unsigned long f_version;
+ void *private_data; /* needed for tty driver, and maybe others */
+};
+#endif /* !MACH */
+
+#define FL_POSIX 1
+#define FL_FLOCK 2
+#define FL_BROKEN 4 /* broken flock() emulation */
+#define FL_ACCESS 8 /* for processes suspended by mandatory locking */
+
+struct file_lock {
+ struct file_lock *fl_next; /* singly linked list for this inode */
+ struct file_lock *fl_nextlink; /* doubly linked list of all locks */
+ struct file_lock *fl_prevlink; /* used to simplify lock removal */
+ struct file_lock *fl_nextblock; /* circular list of blocked processes */
+ struct file_lock *fl_prevblock;
+ struct task_struct *fl_owner;
+ struct wait_queue *fl_wait;
+ struct file *fl_file;
+ unsigned char fl_flags;
+ unsigned char fl_type;
+ off_t fl_start;
+ off_t fl_end;
+};
+
+#include <linux/fcntl.h>
+
+extern int fcntl_getlk(unsigned int fd, struct flock *l);
+extern int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l);
+extern void locks_remove_locks(struct task_struct *task, struct file *filp);
+
+#include <linux/stat.h>
+
+#define FLOCK_VERIFY_READ 1
+#define FLOCK_VERIFY_WRITE 2
+
+extern int locks_mandatory_locked(struct inode *inode);
+extern int locks_mandatory_area(int read_write, struct inode *inode,
+ struct file *filp, unsigned int offset,
+ unsigned int count);
+
+#ifndef MACH
+extern inline int locks_verify_locked(struct inode *inode)
+{
+ /* Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+ if (IS_MANDLOCK(inode) &&
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return (locks_mandatory_locked(inode));
+ return (0);
+}
+extern inline int locks_verify_area(int read_write, struct inode *inode,
+ struct file *filp, unsigned int offset,
+ unsigned int count)
+{
+ /* Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+ if (IS_MANDLOCK(inode) &&
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return (locks_mandatory_area(read_write, inode, filp, offset,
+ count));
+ return (0);
+}
+#endif
+
+struct fasync_struct {
+ int magic;
+ struct fasync_struct *fa_next; /* singly linked list */
+ struct file *fa_file;
+};
+
+#define FASYNC_MAGIC 0x4601
+
+extern int fasync_helper(struct inode *, struct file *, int, struct fasync_struct **);
+
+#ifndef MACH
+#include <linux/minix_fs_sb.h>
+#include <linux/ext_fs_sb.h>
+#include <linux/ext2_fs_sb.h>
+#include <linux/hpfs_fs_sb.h>
+#include <linux/msdos_fs_sb.h>
+#include <linux/iso_fs_sb.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/xia_fs_sb.h>
+#include <linux/sysv_fs_sb.h>
+#include <linux/affs_fs_sb.h>
+#include <linux/ufs_fs_sb.h>
+
+struct super_block {
+ kdev_t s_dev;
+ unsigned long s_blocksize;
+ unsigned char s_blocksize_bits;
+ unsigned char s_lock;
+ unsigned char s_rd_only;
+ unsigned char s_dirt;
+ struct file_system_type *s_type;
+ struct super_operations *s_op;
+ struct dquot_operations *dq_op;
+ unsigned long s_flags;
+ unsigned long s_magic;
+ unsigned long s_time;
+ struct inode * s_covered;
+ struct inode * s_mounted;
+ struct wait_queue * s_wait;
+ union {
+ struct minix_sb_info minix_sb;
+ struct ext_sb_info ext_sb;
+ struct ext2_sb_info ext2_sb;
+ struct hpfs_sb_info hpfs_sb;
+ struct msdos_sb_info msdos_sb;
+ struct isofs_sb_info isofs_sb;
+ struct nfs_sb_info nfs_sb;
+ struct xiafs_sb_info xiafs_sb;
+ struct sysv_sb_info sysv_sb;
+ struct affs_sb_info affs_sb;
+ struct ufs_sb_info ufs_sb;
+ void *generic_sbp;
+ } u;
+};
+#endif /* !MACH */
+
+/*
+ * This is the "filldir" function type, used by readdir() to let
+ * the kernel specify what kind of dirent layout it wants to have.
+ * This allows the kernel to read directories into kernel space or
+ * to have different dirent layouts depending on the binary type.
+ */
+typedef int (*filldir_t)(void *, const char *, int, off_t, ino_t);
+
+struct file_operations {
+ int (*lseek) (struct inode *, struct file *, off_t, int);
+ int (*read) (struct inode *, struct file *, char *, int);
+ int (*write) (struct inode *, struct file *, const char *, int);
+ int (*readdir) (struct inode *, struct file *, void *, filldir_t);
+ int (*select) (struct inode *, struct file *, int, select_table *);
+ int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
+ int (*mmap) (struct inode *, struct file *, struct vm_area_struct *);
+ int (*open) (struct inode *, struct file *);
+ void (*release) (struct inode *, struct file *);
+ int (*fsync) (struct inode *, struct file *);
+ int (*fasync) (struct inode *, struct file *, int);
+ int (*check_media_change) (kdev_t dev);
+ int (*revalidate) (kdev_t dev);
+};
+
+struct inode_operations {
+ struct file_operations * default_file_ops;
+ int (*create) (struct inode *,const char *,int,int,struct inode **);
+ int (*lookup) (struct inode *,const char *,int,struct inode **);
+ int (*link) (struct inode *,struct inode *,const char *,int);
+ int (*unlink) (struct inode *,const char *,int);
+ int (*symlink) (struct inode *,const char *,int,const char *);
+ int (*mkdir) (struct inode *,const char *,int,int);
+ int (*rmdir) (struct inode *,const char *,int);
+ int (*mknod) (struct inode *,const char *,int,int,int);
+ int (*rename) (struct inode *,const char *,int,struct inode *,const char *,int, int);
+ int (*readlink) (struct inode *,char *,int);
+ int (*follow_link) (struct inode *,struct inode *,int,int,struct inode **);
+ int (*readpage) (struct inode *, struct page *);
+ int (*writepage) (struct inode *, struct page *);
+ int (*bmap) (struct inode *,int);
+ void (*truncate) (struct inode *);
+ int (*permission) (struct inode *, int);
+ int (*smap) (struct inode *,int);
+};
+
+struct super_operations {
+ void (*read_inode) (struct inode *);
+ int (*notify_change) (struct inode *, struct iattr *);
+ void (*write_inode) (struct inode *);
+ void (*put_inode) (struct inode *);
+ void (*put_super) (struct super_block *);
+ void (*write_super) (struct super_block *);
+ void (*statfs) (struct super_block *, struct statfs *, int);
+ int (*remount_fs) (struct super_block *, int *, char *);
+};
+
+struct dquot_operations {
+ void (*initialize) (struct inode *, short);
+ void (*drop) (struct inode *);
+ int (*alloc_block) (const struct inode *, unsigned long);
+ int (*alloc_inode) (const struct inode *, unsigned long);
+ void (*free_block) (const struct inode *, unsigned long);
+ void (*free_inode) (const struct inode *, unsigned long);
+ int (*transfer) (struct inode *, struct iattr *, char);
+};
+
+struct file_system_type {
+ struct super_block *(*read_super) (struct super_block *, void *, int);
+ const char *name;
+ int requires_dev;
+ struct file_system_type * next;
+};
+
+extern int register_filesystem(struct file_system_type *);
+extern int unregister_filesystem(struct file_system_type *);
+
+asmlinkage int sys_open(const char *, int, int);
+asmlinkage int sys_close(unsigned int); /* yes, it's really unsigned */
+asmlinkage int sys_read(unsigned int, char *, int);
+
+extern void kill_fasync(struct fasync_struct *fa, int sig);
+
+extern int getname(const char * filename, char **result);
+extern void putname(char * name);
+extern int do_truncate(struct inode *, unsigned long);
+extern int register_blkdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_blkdev(unsigned int major, const char * name);
+extern int blkdev_open(struct inode * inode, struct file * filp);
+extern void blkdev_release (struct inode * inode);
+extern struct file_operations def_blk_fops;
+extern struct inode_operations blkdev_inode_operations;
+
+extern int register_chrdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_chrdev(unsigned int major, const char * name);
+extern int chrdev_open(struct inode * inode, struct file * filp);
+extern struct file_operations def_chr_fops;
+extern struct inode_operations chrdev_inode_operations;
+
+extern void init_fifo(struct inode * inode);
+extern struct inode_operations fifo_inode_operations;
+
+extern struct file_operations connecting_fifo_fops;
+extern struct file_operations read_fifo_fops;
+extern struct file_operations write_fifo_fops;
+extern struct file_operations rdwr_fifo_fops;
+extern struct file_operations read_pipe_fops;
+extern struct file_operations write_pipe_fops;
+extern struct file_operations rdwr_pipe_fops;
+
+extern struct file_system_type *get_fs_type(const char *name);
+
+extern int fs_may_mount(kdev_t dev);
+extern int fs_may_umount(kdev_t dev, struct inode * mount_root);
+extern int fs_may_remount_ro(kdev_t dev);
+
+extern struct file *first_file;
+extern struct super_block *super_blocks;
+
+extern void refile_buffer(struct buffer_head * buf);
+extern void set_writetime(struct buffer_head * buf, int flag);
+extern int try_to_free_buffer(struct buffer_head*, struct buffer_head**, int);
+
+extern int nr_buffers;
+extern int buffermem;
+extern int nr_buffer_heads;
+
+#define BUF_CLEAN 0
+#define BUF_LOCKED 1 /* Buffers scheduled for write */
+#define BUF_LOCKED1 2 /* Supers, inodes */
+#define BUF_DIRTY 3 /* Dirty buffers, not yet scheduled for write */
+#define NR_LIST 4
+
+#ifdef MACH
+static inline void
+mark_buffer_uptodate (struct buffer_head *bh, int on)
+{
+ if (on)
+ set_bit (BH_Uptodate, &bh->b_state);
+ else
+ clear_bit (BH_Uptodate, &bh->b_state);
+}
+#else
+void mark_buffer_uptodate(struct buffer_head * bh, int on);
+#endif
+
+static inline void mark_buffer_clean(struct buffer_head * bh)
+{
+#ifdef MACH
+ clear_bit (BH_Dirty, &bh->b_state);
+#else
+ if (clear_bit(BH_Dirty, &bh->b_state)) {
+ if (bh->b_list == BUF_DIRTY)
+ refile_buffer(bh);
+ }
+#endif
+}
+
+static inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
+{
+#ifdef MACH
+ set_bit (BH_Dirty, &bh->b_state);
+#else
+ if (!set_bit(BH_Dirty, &bh->b_state)) {
+ set_writetime(bh, flag);
+ if (bh->b_list != BUF_DIRTY)
+ refile_buffer(bh);
+ }
+#endif
+}
+
+extern int check_disk_change(kdev_t dev);
+
+#ifdef MACH
+#define invalidate_inodes(dev)
+#else
+extern void invalidate_inodes(kdev_t dev);
+#endif
+
+extern void invalidate_inode_pages(struct inode *);
+
+#ifdef MACH
+#define invalidate_buffers(dev)
+#else
+extern void invalidate_buffers(kdev_t dev);
+#endif
+
+extern int floppy_is_wp(int minor);
+extern void sync_inodes(kdev_t dev);
+
+#ifdef MACH
+#define sync_dev(dev)
+#define fsync_dev(dev)
+#else
+extern void sync_dev(kdev_t dev);
+extern int fsync_dev(kdev_t dev);
+#endif
+
+extern void sync_supers(kdev_t dev);
+extern int bmap(struct inode * inode,int block);
+extern int notify_change(struct inode *, struct iattr *);
+extern int namei(const char * pathname, struct inode ** res_inode);
+extern int lnamei(const char * pathname, struct inode ** res_inode);
+
+#ifdef MACH
+#define permission(inode, mask) 0
+#else
+extern int permission(struct inode * inode,int mask);
+#endif
+
+extern int get_write_access(struct inode *inode);
+extern void put_write_access(struct inode *inode);
+extern int open_namei(const char * pathname, int flag, int mode,
+ struct inode ** res_inode, struct inode * base);
+extern int do_mknod(const char * filename, int mode, dev_t dev);
+extern int do_pipe(int *);
+extern void iput(struct inode * inode);
+extern struct inode * __iget(struct super_block * sb,int nr,int crsmnt);
+extern struct inode * get_empty_inode(void);
+extern void insert_inode_hash(struct inode *);
+extern void clear_inode(struct inode *);
+extern struct inode * get_pipe_inode(void);
+extern void make_bad_inode(struct inode *);
+extern int get_unused_fd(void);
+extern void put_unused_fd(int);
+extern struct file * get_empty_filp(void);
+extern int close_fp(struct file *filp);
+extern struct buffer_head * get_hash_table(kdev_t dev, int block, int size);
+extern struct buffer_head * getblk(kdev_t dev, int block, int size);
+extern void ll_rw_block(int rw, int nr, struct buffer_head * bh[], int quiet);
+extern void ll_rw_page(int rw, kdev_t dev, unsigned long nr, char * buffer);
+extern void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buffer);
+extern int is_read_only(kdev_t dev);
+extern void __brelse(struct buffer_head *buf);
+extern inline void brelse(struct buffer_head *buf)
+{
+ if (buf)
+ __brelse(buf);
+}
+extern void __bforget(struct buffer_head *buf);
+extern inline void bforget(struct buffer_head *buf)
+{
+ if (buf)
+ __bforget(buf);
+}
+extern void set_blocksize(kdev_t dev, int size);
+extern struct buffer_head * bread(kdev_t dev, int block, int size);
+extern struct buffer_head * breada(kdev_t dev,int block, int size,
+ unsigned int pos, unsigned int filesize);
+
+extern int generic_readpage(struct inode *, struct page *);
+extern int generic_file_read(struct inode *, struct file *, char *, int);
+extern int generic_file_mmap(struct inode *, struct file *, struct vm_area_struct *);
+extern int brw_page(int, struct page *, kdev_t, int [], int, int);
+
+extern void put_super(kdev_t dev);
+unsigned long generate_cluster(kdev_t dev, int b[], int size);
+extern kdev_t ROOT_DEV;
+
+extern void show_buffers(void);
+extern void mount_root(void);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern kdev_t real_root_dev;
+extern int change_root(kdev_t new_root_dev,const char *put_old);
+#endif
+
+extern int char_read(struct inode *, struct file *, char *, int);
+extern int block_read(struct inode *, struct file *, char *, int);
+extern int read_ahead[];
+
+extern int char_write(struct inode *, struct file *, const char *, int);
+extern int block_write(struct inode *, struct file *, const char *, int);
+
+extern int block_fsync(struct inode *, struct file *);
+extern int file_fsync(struct inode *, struct file *);
+
+extern void dcache_add(struct inode *, const char *, int, unsigned long);
+extern int dcache_lookup(struct inode *, const char *, int, unsigned long *);
+
+extern int inode_change_ok(struct inode *, struct iattr *);
+extern void inode_setattr(struct inode *, struct iattr *);
+
+extern inline struct inode * iget(struct super_block * sb,int nr)
+{
+ return __iget(sb, nr, 1);
+}
+
+/* kludge to get SCSI modules working */
+#ifndef MACH
+#include <linux/minix_fs.h>
+#include <linux/minix_fs_sb.h>
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/dev/include/linux/genhd.h b/linux/dev/include/linux/genhd.h
new file mode 100644
index 0000000..f19015d
--- /dev/null
+++ b/linux/dev/include/linux/genhd.h
@@ -0,0 +1,208 @@
+#ifndef _LINUX_GENHD_H
+#define _LINUX_GENHD_H
+
+/*
+ * genhd.h Copyright (C) 1992 Drew Eckhardt
+ * Generic hard disk header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ */
+
+#include <linux/config.h>
+
+#define CONFIG_MSDOS_PARTITION 1
+
+#ifdef __alpha__
+#define CONFIG_OSF_PARTITION 1
+#endif
+
+#if defined(__sparc__) || defined(CONFIG_SMD_DISKLABEL)
+#define CONFIG_SUN_PARTITION 1
+#endif
+
+/* These three have identical behaviour; use the second one if DOS fdisk gets
+ confused about extended/logical partitions starting past cylinder 1023. */
+#define DOS_EXTENDED_PARTITION 5
+#define LINUX_EXTENDED_PARTITION 0x85
+#define WIN98_EXTENDED_PARTITION 0x0f
+
+#define DM6_PARTITION 0x54 /* has DDO: use xlated geom & offset */
+#define EZD_PARTITION 0x55 /* EZ-DRIVE: same as DM6 (we think) */
+#define DM6_AUX1PARTITION 0x51 /* no DDO: use xlated geom */
+#define DM6_AUX3PARTITION 0x53 /* no DDO: use xlated geom */
+
+#ifdef MACH_INCLUDE
+struct linux_partition
+{
+#else
+struct partition {
+#endif
+ unsigned char boot_ind; /* 0x80 - active */
+ unsigned char head; /* starting head */
+ unsigned char sector; /* starting sector */
+ unsigned char cyl; /* starting cylinder */
+ unsigned char sys_ind; /* What partition type */
+ unsigned char end_head; /* end head */
+ unsigned char end_sector; /* end sector */
+ unsigned char end_cyl; /* end cylinder */
+ unsigned int start_sect; /* starting sector counting from 0 */
+ unsigned int nr_sects; /* nr of sectors in partition */
+} __attribute((packed)); /* Give a polite hint to egcs/alpha to generate
+ unaligned operations */
+
+struct hd_struct {
+ long start_sect;
+ long nr_sects;
+};
+
+struct gendisk {
+ int major; /* major number of driver */
+ const char *major_name; /* name of major driver */
+ int minor_shift; /* number of times minor is shifted to
+ get real minor */
+ int max_p; /* maximum partitions per device */
+ int max_nr; /* maximum number of real devices */
+
+ void (*init)(struct gendisk *); /* Initialization called before we do our thing */
+ struct hd_struct *part; /* partition table */
+ int *sizes; /* device size in blocks, copied to blk_size[] */
+ int nr_real; /* number of real devices */
+
+ void *real_devices; /* internal use */
+ struct gendisk *next;
+};
+
+#ifdef CONFIG_BSD_DISKLABEL
+/*
+ * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
+ */
+
+#define BSD_PARTITION 0xa5 /* Partition ID */
+
+#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
+#define BSD_MAXPARTITIONS 8
+#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
+struct bsd_disklabel {
+ __u32 d_magic; /* the magic number */
+ __s16 d_type; /* drive type */
+ __s16 d_subtype; /* controller/d_type specific */
+ char d_typename[16]; /* type name, e.g. "eagle" */
+ char d_packname[16]; /* pack identifier */
+ __u32 d_secsize; /* # of bytes per sector */
+ __u32 d_nsectors; /* # of data sectors per track */
+ __u32 d_ntracks; /* # of tracks per cylinder */
+ __u32 d_ncylinders; /* # of data cylinders per unit */
+ __u32 d_secpercyl; /* # of data sectors per cylinder */
+ __u32 d_secperunit; /* # of data sectors per unit */
+ __u16 d_sparespertrack; /* # of spare sectors per track */
+ __u16 d_sparespercyl; /* # of spare sectors per cylinder */
+ __u32 d_acylinders; /* # of alt. cylinders per unit */
+ __u16 d_rpm; /* rotational speed */
+ __u16 d_interleave; /* hardware sector interleave */
+ __u16 d_trackskew; /* sector 0 skew, per track */
+ __u16 d_cylskew; /* sector 0 skew, per cylinder */
+ __u32 d_headswitch; /* head switch time, usec */
+ __u32 d_trkseek; /* track-to-track seek, usec */
+ __u32 d_flags; /* generic flags */
+#define NDDATA 5
+ __u32 d_drivedata[NDDATA]; /* drive-type specific information */
+#define NSPARE 5
+ __u32 d_spare[NSPARE]; /* reserved for future use */
+ __u32 d_magic2; /* the magic number (again) */
+ __u16 d_checksum; /* xor of data incl. partitions */
+
+ /* filesystem and partition information: */
+ __u16 d_npartitions; /* number of partitions in following */
+ __u32 d_bbsize; /* size of boot area at sn0, bytes */
+ __u32 d_sbsize; /* max size of fs superblock, bytes */
+ struct bsd_partition { /* the partition table */
+ __u32 p_size; /* number of sectors in partition */
+ __u32 p_offset; /* starting sector */
+ __u32 p_fsize; /* filesystem basic fragment size */
+ __u8 p_fstype; /* filesystem type, see below */
+ __u8 p_frag; /* filesystem fragments per block */
+ __u16 p_cpg; /* filesystem cylinders per group */
+ } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
+};
+
+#endif /* CONFIG_BSD_DISKLABEL */
+
+#ifdef CONFIG_GPT_DISKLABEL
+/*
+ * GPT disklabel support by наб <nabijaczleweli@gmail.com>
+ *
+ * Based on UEFI specification 2.8A (current as of May 2020):
+ * https://uefi.org/specifications
+ * https://uefi.org/sites/default/files/resources/UEFI_Spec_2_8_A_Feb14.pdf
+ *
+ * CRC32 behaviour (final ^ ~0) courtesy of util-linux documentation:
+ * https://git.kernel.org/pub/scm/utils/util-linux/util-linux.git/tree/libblkid/src/partitions/gpt.c?id=042f62dfc514da177c148c257e4dcb32e5f8379d#n104
+ */
+
+#define GPT_PARTITION 0xee /* Partition ID in MBR */
+
+#define GPT_GUID_SIZE 16
+struct gpt_guid {
+ __u32 g_time_low; /* Low field of timestamp */
+ __u16 g_time_mid; /* Medium field of timestamp */
+ __u16 g_time_high_version; /* High field of timestamp and version */
+ __u8 g_clock_sec_high; /* High field of clock sequence and variant */
+ __u8 g_clock_sec_low; /* Low field of clock sequence */
+ __u8 g_node_id[6]; /* Spatially unique node identifier (MAC address or urandom) */
+} __attribute((packed));
+typedef char __gpt_guid_right_size[(sizeof(struct gpt_guid) == GPT_GUID_SIZE) ? 1 : -1];
+
+static const struct gpt_guid GPT_GUID_TYPE_UNUSED = {0,0,0,0,0,{0,0,0,0,0,0}};
+
+#define GPT_SIGNATURE "EFI PART" /* The header signauture */
+#define GPT_REVISION (0x00010000UL) /* Little-endian on disk */
+#define GPT_HEADER_SIZE 92
+#define GPT_MAXPARTITIONS 128
+struct gpt_disklabel_header {
+ char h_signature[8]; /* Must match GPT_SIGNATURE */
+ __u32 h_revision; /* Disklabel revision, must match GPT_REVISION */
+ __u32 h_header_size; /* Must match GPT_HEADER_SIZE */
+ __u32 h_header_crc; /* CRC32 of header, zero for calculation */
+ __u32 h_reserved; /* Must be zero */
+ __u64 h_lba_current; /* LBA of this copy of the header */
+ __u64 h_lba_backup; /* LBA of the second (backup) copy of the header */
+ __u64 h_lba_usable_first; /* First usable LBA for partitions (last LBA of primary table + 1) */
+ __u64 h_lba_usable_last; /* Last usable LBA for partitions (first LBA of secondary table - 1) */
+ struct gpt_guid h_guid; /* ID of the disk */
+ __u64 h_part_table_lba; /* First LBA of the partition table (usually 2 in primary header) */
+ __u32 h_part_table_len; /* Amount of entries in the partition table */
+ __u32 h_part_table_entry_size; /* Size of each partition entry (usually 128) */
+ __u32 h_part_table_crc; /* CRC32 of entire partition table, starts at h_part_table_lba, is h_part_table_len*h_part_table_entry_size long */
+ /* Rest of block must be zero */
+} __attribute((packed));
+typedef char __gpt_header_right_size[(sizeof(struct gpt_disklabel_header) == GPT_HEADER_SIZE) ? 1 : -1];
+
+/* 3-47: reserved; 48-63: defined for individual partition types. */
+#define GPT_PARTITION_ATTR_PLATFORM_REQUIRED (1ULL << 0) /* Required by the platform to function */
+#define GPT_PARTITION_ATTR_EFI_IGNORE (1ULL << 1) /* To be ignored by the EFI firmware */
+#define GPT_PARTITION_ATTR_BIOS_BOOTABLE (1ULL << 2) /* Equivalent to MBR active flag */
+
+#define GPT_PARTITION_ENTRY_SIZE 128 /* Minimum size, implementations must respect bigger vendor-specific entries */
+struct gpt_disklabel_part {
+ struct gpt_guid p_type; /* Partition type GUID */
+ struct gpt_guid p_guid; /* ID of the partition */
+ __u64 p_lba_first; /* First LBA of the partition */
+ __u64 p_lba_last; /* Last LBA of the partition */
+ __u64 p_attrs; /* Partition attribute bitfield, see above */
+ __u16 p_name[36]; /* Display name of partition, UTF-16 */
+} __attribute((packed));
+typedef char __gpt_part_entry_right_size[(sizeof(struct gpt_disklabel_part) == GPT_PARTITION_ENTRY_SIZE) ? 1 : -1];
+#endif /* CONFIG_GPT_DISKLABEL */
+
+extern struct gendisk *gendisk_head; /* linked list of disks */
+
+/*
+ * disk_name() is used by genhd.c and md.c.
+ * It formats the devicename of the indicated disk
+ * into the supplied buffer, and returns a pointer
+ * to that same buffer (for convenience).
+ */
+char *disk_name (struct gendisk *hd, int minor, char *buf);
+
+#endif
diff --git a/linux/dev/include/linux/if.h b/linux/dev/include/linux/if.h
new file mode 100644
index 0000000..50dd138
--- /dev/null
+++ b/linux/dev/include/linux/if.h
@@ -0,0 +1,184 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the INET interface module.
+ *
+ * Version: @(#)if.h 1.0.2 04/18/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1982-1988
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_H
+#define _LINUX_IF_H
+
+#include <linux/types.h> /* for "caddr_t" et al */
+#include <linux/socket.h> /* for "struct sockaddr" et al */
+
+/* Standard interface flags. */
+
+#ifdef MACH_INCLUDE
+
+#define LINUX_IFF_UP 0x1 /* interface is up */
+#define LINUX_IFF_BROADCAST 0x2 /* broadcast address valid */
+#define LINUX_IFF_DEBUG 0x4 /* turn on debugging */
+#define LINUX_IFF_LOOPBACK 0x8 /* is a loopback net */
+#define LINUX_IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define LINUX_IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define LINUX_IFF_RUNNING 0x40 /* resources allocated */
+#define LINUX_IFF_NOARP 0x80 /* no ARP protocol */
+#define LINUX_IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define LINUX_IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define LINUX_IFF_MASTER 0x400 /* master of a load balancer */
+#define LINUX_IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define LINUX_IFF_MULTICAST 0x1000 /* Supports multicast */
+#define LINUX_IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers
+ * until broadcast time. Therefore
+ * SOCK_PACKET must call header
+ * construction. Private flag.
+ * Never visible outside of kernel.
+ */
+
+#else /* !MACH_INCLUDE */
+
+#define IFF_UP 0x1 /* interface is up */
+#define IFF_BROADCAST 0x2 /* broadcast address valid */
+#define IFF_DEBUG 0x4 /* turn on debugging */
+#define IFF_LOOPBACK 0x8 /* is a loopback net */
+#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define IFF_RUNNING 0x40 /* resources allocated */
+#define IFF_NOARP 0x80 /* no ARP protocol */
+#define IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define IFF_MASTER 0x400 /* master of a load balancer */
+#define IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define IFF_MULTICAST 0x1000 /* Supports multicast */
+#define IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers
+ * until broadcast time. Therefore
+ * SOCK_PACKET must call header
+ * construction. Private flag.
+ * Never visible outside of kernel.
+ */
+#endif /* !MACH_INCLUDE */
+
+/*
+ * The ifaddr structure contains information about one address
+ * of an interface. They are maintained by the different address
+ * families, are allocated and attached when an address is set,
+ * and are linked together so all addresses for an interface can
+ * be located.
+ */
+
+struct ifaddr
+{
+ struct sockaddr ifa_addr; /* address of interface */
+ union {
+ struct sockaddr ifu_broadaddr;
+ struct sockaddr ifu_dstaddr;
+ } ifa_ifu;
+ struct iface *ifa_ifp; /* back-pointer to interface */
+ struct ifaddr *ifa_next; /* next address for interface */
+};
+
+#define ifa_broadaddr ifa_ifu.ifu_broadaddr /* broadcast address */
+#define ifa_dstaddr ifa_ifu.ifu_dstaddr /* other end of link */
+
+/*
+ * Device mapping structure. I'd just gone off and designed a
+ * beautiful scheme using only loadable modules with arguments
+ * for driver options and along come the PCMCIA people 8)
+ *
+ * Ah well. The get() side of this is good for WDSETUP, and it'll
+ * be handy for debugging things. The set side is fine for now and
+ * being very small might be worth keeping for clean configuration.
+ */
+
+struct ifmap
+{
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+ /* 3 bytes spare */
+};
+
+/*
+ * Interface request structure used for socket
+ * ioctl's. All interface ioctl's must have parameter
+ * definitions which begin with ifr_name. The
+ * remainder may be interface specific.
+ */
+
+struct ifreq
+{
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union
+ {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_metric;
+ int ifru_mtu;
+ struct ifmap ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ caddr_t ifru_data;
+ } ifr_ifru;
+};
+
+#define ifr_name ifr_ifrn.ifrn_name /* interface name */
+#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
+#define ifr_addr ifr_ifru.ifru_addr /* address */
+#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-p lnk */
+#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */
+#define ifr_netmask ifr_ifru.ifru_netmask /* interface net mask */
+#define ifr_flags ifr_ifru.ifru_flags /* flags */
+#define ifr_metric ifr_ifru.ifru_metric /* metric */
+#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */
+#define ifr_map ifr_ifru.ifru_map /* device map */
+#define ifr_slave ifr_ifru.ifru_slave /* slave device */
+#define ifr_data ifr_ifru.ifru_data /* for use by interface */
+
+/*
+ * Structure used in SIOCGIFCONF request.
+ * Used to retrieve interface configuration
+ * for machine (useful for programs which
+ * must know all networks accessible).
+ */
+
+struct ifconf
+{
+ int ifc_len; /* size of buffer */
+ union
+ {
+ caddr_t ifcu_buf;
+ struct ifreq *ifcu_req;
+ } ifc_ifcu;
+};
+#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
+#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
+
+#endif /* _LINUX_IF_H */
diff --git a/linux/dev/include/linux/kernel.h b/linux/dev/include/linux/kernel.h
new file mode 100644
index 0000000..9c60b41
--- /dev/null
+++ b/linux/dev/include/linux/kernel.h
@@ -0,0 +1,107 @@
+#ifndef _LINUX_KERNEL_H
+#define _LINUX_KERNEL_H
+
+/*
+ * 'kernel.h' contains some often-used function prototypes etc
+ */
+
+#ifdef __KERNEL__
+
+#include <stdarg.h>
+#include <linux/linkage.h>
+#include <linux/compiler.h>
+
+#define INT_MAX ((int)(~0U>>1))
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL>>1))
+#define ULONG_MAX (~0UL)
+
+#define STACK_MAGIC 0xdeadbeef
+
+#define KERN_EMERG "<0>" /* system is unusable */
+#define KERN_ALERT "<1>" /* action must be taken immediately */
+#define KERN_CRIT "<2>" /* critical conditions */
+#define KERN_ERR "<3>" /* error conditions */
+#define KERN_WARNING "<4>" /* warning conditions */
+#define KERN_NOTICE "<5>" /* normal but significant condition */
+#define KERN_INFO "<6>" /* informational */
+#define KERN_DEBUG "<7>" /* debug-level messages */
+
+# define NORET_TYPE /**/
+# define ATTRIB_NORET __attribute__((noreturn))
+# define NORET_AND noreturn,
+
+extern void math_error(void);
+
+/* Use Mach's panic. */
+#include <kern/debug.h>
+
+NORET_TYPE void do_exit(long error_code)
+ ATTRIB_NORET;
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+
+extern int linux_sprintf(char *buf, const char *fmt, ...);
+extern int linux_vsprintf(char *buf, const char *fmt, va_list args);
+
+#ifndef MACH_INCLUDE
+#define sprintf linux_sprintf
+#define vsprintf linux_vsprintf
+#endif
+
+extern int session_of_pgrp(int pgrp);
+
+extern int kill_proc(int pid, int sig, int priv);
+extern int kill_pg(int pgrp, int sig, int priv);
+extern int kill_sl(int sess, int sig, int priv);
+
+asmlinkage int printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+#if DEBUG
+#define pr_debug(fmt,arg...) \
+ printk(KERN_DEBUG fmt,##arg)
+#else
+#define pr_debug(fmt,arg...) \
+ do { } while (0)
+#endif
+
+#define pr_info(fmt,arg...) \
+ printk(KERN_INFO fmt,##arg)
+
+/*
+ * "suser()" checks against the effective user id, while "fsuser()"
+ * is used for file permission checking and checks against the fsuid..
+ */
+#ifdef MACH
+#define fsuser() 1
+#else
+#define fsuser() (current->fsuid == 0)
+#endif
+
+/*
+ * Display an IP address in readable format.
+ */
+
+#define NIPQUAD(addr) \
+ (((addr) >> 0) & 0xff), \
+ (((addr) >> 8) & 0xff), \
+ (((addr) >> 16) & 0xff), \
+ (((addr) >> 24) & 0xff)
+
+#endif /* __KERNEL__ */
+
+#define SI_LOAD_SHIFT 16
+struct sysinfo {
+ long uptime; /* Seconds since boot */
+ unsigned long loads[3]; /* 1, 5, and 15 minute load averages */
+ unsigned long totalram; /* Total usable main memory size */
+ unsigned long freeram; /* Available memory size */
+ unsigned long sharedram; /* Amount of shared memory */
+ unsigned long bufferram; /* Memory used by buffers */
+ unsigned long totalswap; /* Total swap space size */
+ unsigned long freeswap; /* swap space still available */
+ unsigned short procs; /* Number of current processes */
+ char _f[22]; /* Pads structure to 64 bytes */
+};
+
+#endif
diff --git a/linux/dev/include/linux/locks.h b/linux/dev/include/linux/locks.h
new file mode 100644
index 0000000..ae063fb
--- /dev/null
+++ b/linux/dev/include/linux/locks.h
@@ -0,0 +1,66 @@
+#ifndef _LINUX_LOCKS_H
+#define _LINUX_LOCKS_H
+
+#ifndef _LINUX_MM_H
+#include <linux/mm.h>
+#endif
+#ifndef _LINUX_PAGEMAP_H
+#include <linux/pagemap.h>
+#endif
+
+/*
+ * Unlocked, temporary IO buffer_heads gets moved to the reuse_list
+ * once their page becomes unlocked.
+ */
+extern struct buffer_head *reuse_list;
+
+/*
+ * Buffer cache locking - note that interrupts may only unlock, not
+ * lock buffers.
+ */
+extern void __wait_on_buffer(struct buffer_head *);
+
+static inline void wait_on_buffer(struct buffer_head * bh)
+{
+ if (test_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+static inline void lock_buffer(struct buffer_head * bh)
+{
+ while (set_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+void unlock_buffer(struct buffer_head *);
+
+#ifndef MACH
+/*
+ * super-block locking. Again, interrupts may only unlock
+ * a super-block (although even this isn't done right now.
+ * nfs may need it).
+ */
+extern void __wait_on_super(struct super_block *);
+
+static inline void wait_on_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+}
+
+static inline void lock_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+ sb->s_lock = 1;
+}
+
+static inline void unlock_super(struct super_block * sb)
+{
+ sb->s_lock = 0;
+ wake_up(&sb->s_wait);
+}
+#endif /* !MACH */
+
+#endif /* _LINUX_LOCKS_H */
+
diff --git a/linux/dev/include/linux/malloc.h b/linux/dev/include/linux/malloc.h
new file mode 100644
index 0000000..50d8114
--- /dev/null
+++ b/linux/dev/include/linux/malloc.h
@@ -0,0 +1,18 @@
+#ifndef _LINUX_MALLOC_H
+#define _LINUX_MALLOC_H
+
+#include <linux/mm.h>
+#include <asm/cache.h>
+
+#ifndef MACH_INCLUDE
+#define kmalloc linux_kmalloc
+#define kfree linux_kfree
+#define kfree_s linux_kfree_s
+#endif
+
+extern void *linux_kmalloc (unsigned int size, int priority);
+extern void linux_kfree (void *obj);
+
+#define linux_kfree_s(a,b) linux_kfree(a)
+
+#endif /* _LINUX_MALLOC_H */
diff --git a/linux/dev/include/linux/mm.h b/linux/dev/include/linux/mm.h
new file mode 100644
index 0000000..b0c3ab0
--- /dev/null
+++ b/linux/dev/include/linux/mm.h
@@ -0,0 +1,378 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+
+extern unsigned long high_memory;
+
+#include <asm/page.h>
+#include <asm/atomic.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+extern int verify_area(int, const void *, unsigned long);
+
+/*
+ * Linux kernel virtual memory manager primitives.
+ * The idea being to have a "virtual" mm in the same way
+ * we have a virtual fs - giving a cleaner interface to the
+ * mm details, and allowing different kinds of memory mappings
+ * (from shared memory to executable loading to arbitrary
+ * mmap() functions).
+ */
+
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ struct mm_struct * vm_mm; /* VM area parameters */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ pgprot_t vm_page_prot;
+ unsigned short vm_flags;
+/* AVL tree of VM areas per task, sorted by address */
+ short vm_avl_height;
+ struct vm_area_struct * vm_avl_left;
+ struct vm_area_struct * vm_avl_right;
+/* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct * vm_next;
+/* for areas with inode, the circular list inode->i_mmap */
+/* for shm areas, the circular list of attaches */
+/* otherwise unused */
+ struct vm_area_struct * vm_next_share;
+ struct vm_area_struct * vm_prev_share;
+/* more */
+ struct vm_operations_struct * vm_ops;
+ unsigned long vm_offset;
+ struct inode * vm_inode;
+ unsigned long vm_pte; /* shared mem */
+};
+
+/*
+ * vm_flags..
+ */
+#define VM_READ 0x0001 /* currently active flags */
+#define VM_WRITE 0x0002
+#define VM_EXEC 0x0004
+#define VM_SHARED 0x0008
+
+#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x0020
+#define VM_MAYEXEC 0x0040
+#define VM_MAYSHARE 0x0080
+
+#define VM_GROWSDOWN 0x0100 /* general info on the segment */
+#define VM_GROWSUP 0x0200
+#define VM_SHM 0x0400 /* shared memory area, don't swap out */
+#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
+
+#define VM_EXECUTABLE 0x1000
+#define VM_LOCKED 0x2000
+
+#define VM_STACK_FLAGS 0x0177
+
+/*
+ * mapping from the currently active vm_flags protection bits (the
+ * low four bits) to a page protection mask..
+ */
+extern pgprot_t protection_map[16];
+
+
+/*
+ * These are the virtual MM functions - opening of an area, closing and
+ * unmapping it (needed to keep files on disk up-to-date etc), pointer
+ * to the functions called when a no-page or a wp-page exception occurs.
+ */
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
+ void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
+ int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
+ void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
+ unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
+ unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
+ unsigned long page);
+ int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
+ pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
+};
+
+/*
+ * Try to keep the most commonly accessed fields in single cache lines
+ * here (16 bytes or greater). This ordering should be particularly
+ * beneficial on 32-bit processors.
+ *
+ * The first line is data used in page cache lookup, the second line
+ * is used for linear searches (eg. clock algorithm scans).
+ */
+typedef struct page {
+ /* these must be first (free area handling) */
+ struct page *next;
+ struct page *prev;
+ struct inode *inode;
+ unsigned long offset;
+ struct page *next_hash;
+ atomic_t count;
+ unsigned flags; /* atomic flags, some possibly updated asynchronously */
+ unsigned dirty:16,
+ age:8;
+ struct wait_queue *wait;
+ struct page *prev_hash;
+ struct buffer_head * buffers;
+ unsigned long swap_unlock_entry;
+ unsigned long map_nr; /* page->map_nr == page - mem_map */
+} mem_map_t;
+
+/* Page flag bit values */
+#define PG_locked 0
+#define PG_error 1
+#define PG_referenced 2
+#define PG_uptodate 3
+#define PG_free_after 4
+#define PG_decr_after 5
+#define PG_swap_unlock_after 6
+#define PG_DMA 7
+#define PG_reserved 31
+
+/* Make it prettier to test the above... */
+#define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
+#define PageError(page) (test_bit(PG_error, &(page)->flags))
+#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
+#define PageDirty(page) (test_bit(PG_dirty, &(page)->flags))
+#define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
+#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
+#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
+#define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
+#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
+#define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
+
+/*
+ * page->reserved denotes a page which must never be accessed (which
+ * may not even be present).
+ *
+ * page->dma is set for those pages which lie in the range of
+ * physical addresses capable of carrying DMA transfers.
+ *
+ * Multiple processes may "see" the same page. E.g. for untouched
+ * mappings of /dev/null, all processes see the same page full of
+ * zeroes, and text pages of executables and shared libraries have
+ * only one copy in memory, at most, normally.
+ *
+ * For the non-reserved pages, page->count denotes a reference count.
+ * page->count == 0 means the page is free.
+ * page->count == 1 means the page is used for exactly one purpose
+ * (e.g. a private data page of one process).
+ *
+ * A page may be used for kmalloc() or anyone else who does a
+ * get_free_page(). In this case the page->count is at least 1, and
+ * all other fields are unused but should be 0 or NULL. The
+ * management of this page is the responsibility of the one who uses
+ * it.
+ *
+ * The other pages (we may call them "process pages") are completely
+ * managed by the Linux memory manager: I/O, buffers, swapping etc.
+ * The following discussion applies only to them.
+ *
+ * A page may belong to an inode's memory mapping. In this case,
+ * page->inode is the inode, and page->offset is the file offset
+ * of the page (not necessarily a multiple of PAGE_SIZE).
+ *
+ * A page may have buffers allocated to it. In this case,
+ * page->buffers is a circular list of these buffer heads. Else,
+ * page->buffers == NULL.
+ *
+ * For pages belonging to inodes, the page->count is the number of
+ * attaches, plus 1 if buffers are allocated to the page.
+ *
+ * All pages belonging to an inode make up a doubly linked list
+ * inode->i_pages, using the fields page->next and page->prev. (These
+ * fields are also used for freelist management when page->count==0.)
+ * There is also a hash table mapping (inode,offset) to the page
+ * in memory if present. The lists for this hash table use the fields
+ * page->next_hash and page->prev_hash.
+ *
+ * All process pages can do I/O:
+ * - inode pages may need to be read from disk,
+ * - inode pages which have been modified and are MAP_SHARED may need
+ * to be written to disk,
+ * - private pages which have been modified may need to be swapped out
+ * to swap space and (later) to be read back into memory.
+ * During disk I/O, page->locked is true. This bit is set before I/O
+ * and reset when I/O completes. page->wait is a wait queue of all
+ * tasks waiting for the I/O on this page to complete.
+ * page->uptodate tells whether the page's contents is valid.
+ * When a read completes, the page becomes uptodate, unless a disk I/O
+ * error happened.
+ * When a write completes, and page->free_after is true, the page is
+ * freed without any further delay.
+ *
+ * For choosing which pages to swap out, inode pages carry a
+ * page->referenced bit, which is set any time the system accesses
+ * that page through the (inode,offset) hash table.
+ * There is also the page->age counter, which implements a linear
+ * decay (why not an exponential decay?), see swapctl.h.
+ */
+
+extern mem_map_t * mem_map;
+
+/*
+ * This is timing-critical - most of the time in getting a new page
+ * goes to clearing the page. If you want a page without the clearing
+ * overhead, just use __get_free_page() directly..
+ */
+#define __get_free_page(priority) __get_free_pages((priority),0,0)
+#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
+extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
+
+static inline unsigned long get_free_page(int priority)
+{
+ unsigned long page;
+
+ page = __get_free_page(priority);
+ if (page)
+ memset((void *) page, 0, PAGE_SIZE);
+ return page;
+}
+
+/* memory.c & swap.c*/
+
+#define free_page(addr) free_pages((addr),0)
+extern void free_pages(unsigned long addr, unsigned long order);
+extern void __free_page(struct page *);
+
+extern void show_free_areas(void);
+extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
+ unsigned long address);
+
+extern void free_page_tables(struct mm_struct * mm);
+extern void clear_page_tables(struct task_struct * tsk);
+extern int new_page_tables(struct task_struct * tsk);
+extern int copy_page_tables(struct task_struct * to);
+
+extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
+extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
+extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
+extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
+
+extern void vmtruncate(struct inode * inode, unsigned long offset);
+extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
+extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+
+extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
+extern void mem_init(unsigned long start_mem, unsigned long end_mem);
+extern void show_mem(void);
+extern void oom(struct task_struct * tsk);
+extern void si_meminfo(struct sysinfo * val);
+
+/* vmalloc.c */
+
+extern void * vmalloc(unsigned long size);
+extern void * vremap(unsigned long offset, unsigned long size);
+extern void vfree(void * addr);
+extern int vread(char *buf, char *addr, int count);
+extern unsigned long vmtophys (void *);
+
+/* mmap.c */
+extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long off);
+extern void merge_segments(struct mm_struct *, unsigned long, unsigned long);
+extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void remove_shared_vm_struct(struct vm_area_struct *);
+extern void build_mmap_avl(struct mm_struct *);
+extern void exit_mmap(struct mm_struct *);
+extern int do_munmap(unsigned long, size_t);
+extern unsigned long get_unmapped_area(unsigned long, unsigned long);
+
+/* filemap.c */
+extern unsigned long page_unuse(unsigned long);
+extern int shrink_mmap(int, int, int);
+extern void truncate_inode_pages(struct inode *, unsigned long);
+
+#define GFP_BUFFER 0x00
+#define GFP_ATOMIC 0x01
+#define GFP_USER 0x02
+#define GFP_KERNEL 0x03
+#define GFP_NOBUFFER 0x04
+#define GFP_NFS 0x05
+#define GFP_IO 0x06
+
+/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
+ platforms, used as appropriate on others */
+
+#define GFP_DMA 0x80
+
+#define GFP_LEVEL_MASK 0xf
+
+#ifndef MACH
+/* vma is the first one with address < vma->vm_end,
+ * and even address < vma->vm_start. Have to extend vma. */
+static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
+{
+ unsigned long grow;
+
+ address &= PAGE_MASK;
+ grow = vma->vm_start - address;
+ if (vma->vm_end - address
+ > (unsigned long) current->rlim[RLIMIT_STACK].rlim_cur ||
+ (vma->vm_mm->total_vm << PAGE_SHIFT) + grow
+ > (unsigned long) current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+ vma->vm_start = address;
+ vma->vm_offset -= grow;
+ vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
+ return 0;
+}
+
+#define avl_empty (struct vm_area_struct *) NULL
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+static inline struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
+{
+ struct vm_area_struct * result = NULL;
+
+ if (mm) {
+ struct vm_area_struct * tree = mm->mmap_avl;
+ for (;;) {
+ if (tree == avl_empty)
+ break;
+ if (tree->vm_end > addr) {
+ result = tree;
+ if (tree->vm_start <= addr)
+ break;
+ tree = tree->vm_avl_left;
+ } else
+ tree = tree->vm_avl_right;
+ }
+ }
+ return result;
+}
+
+/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma;
+
+ vma = find_vma(mm,start_addr);
+ if (vma && end_addr <= vma->vm_start)
+ vma = NULL;
+ return vma;
+}
+#endif /* !MACH */
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/dev/include/linux/modversions.h b/linux/dev/include/linux/modversions.h
new file mode 100644
index 0000000..9d841c9
--- /dev/null
+++ b/linux/dev/include/linux/modversions.h
@@ -0,0 +1 @@
+/* Dummy file. */
diff --git a/linux/dev/include/linux/netdevice.h b/linux/dev/include/linux/netdevice.h
new file mode 100644
index 0000000..e1a9a34
--- /dev/null
+++ b/linux/dev/include/linux/netdevice.h
@@ -0,0 +1,339 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Interfaces handler.
+ *
+ * Version: @(#)dev.h 1.0.11 07/31/96
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Donald J. Becker, <becker@super.org>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ * Bjorn Ekwall. <bj0rn@blox.se>
+ * Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Moved to /usr/include/linux for NET3
+ * Added extern for fddi_setup()
+ */
+#ifndef _LINUX_NETDEVICE_H
+#define _LINUX_NETDEVICE_H
+
+#include <linux/config.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+
+/* for future expansion when we will have different priorities. */
+#define DEV_NUMBUFFS 3
+#define MAX_ADDR_LEN 7
+#ifndef CONFIG_AX25
+#ifndef CONFIG_TR
+#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE)
+#define MAX_HEADER 32 /* We really need about 18 worst case .. so 32 is aligned */
+#else
+#define MAX_HEADER 80 /* We need to allow for having tunnel headers */
+#endif /* IPIP */
+#else
+#define MAX_HEADER 48 /* Token Ring header needs 40 bytes ... 48 is aligned */
+#endif /* TR */
+#else
+#define MAX_HEADER 96 /* AX.25 + NetROM */
+#endif /* AX25 */
+
+#define IS_MYADDR 1 /* address is (one of) our own */
+#define IS_LOOPBACK 2 /* address is for LOOPBACK */
+#define IS_BROADCAST 3 /* address is a valid broadcast */
+#define IS_INVBCAST 4 /* Wrong netmask bcast not for us (unused)*/
+#define IS_MULTICAST 5 /* Multicast IP address */
+
+#ifdef __KERNEL__
+
+#include <linux/skbuff.h>
+
+/*
+ * We tag multicasts with these structures.
+ */
+
+struct dev_mc_list
+{
+ struct dev_mc_list *next;
+ char dmi_addr[MAX_ADDR_LEN];
+ unsigned short dmi_addrlen;
+ unsigned short dmi_users;
+};
+
+struct hh_cache
+{
+ struct hh_cache *hh_next;
+ void *hh_arp; /* Opaque pointer, used by
+ * any address resolution module,
+ * not only ARP.
+ */
+ int hh_refcnt; /* number of users */
+ unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP */
+ char hh_uptodate; /* hh_data is valid */
+ char hh_data[16]; /* cached hardware header */
+};
+
+/*
+ * The DEVICE structure.
+ * Actually, this whole structure is a big mistake. It mixes I/O
+ * data with strictly "high-level" data, and it has to know about
+ * almost every data structure used in the INET module.
+ */
+
+#ifdef MACH
+
+#ifndef MACH_INCLUDE
+#define device linux_device
+#endif
+
+struct linux_device
+
+#else
+
+struct device
+
+#endif
+{
+
+ /*
+ * This is the first field of the "visible" part of this structure
+ * (i.e. as seen by users in the "Space.c" file). It is the name
+ * the interface.
+ */
+ char *name;
+
+ /* I/O specific fields - FIXME: Merge these and struct ifmap into one */
+ unsigned long rmem_end; /* shmem "recv" end */
+ unsigned long rmem_start; /* shmem "recv" start */
+ unsigned long mem_end; /* shared mem end */
+ unsigned long mem_start; /* shared mem start */
+ unsigned long base_addr; /* device I/O address */
+ unsigned char irq; /* device IRQ number */
+
+ /* Low-level status flags. */
+ volatile unsigned char start, /* start an operation */
+ interrupt; /* interrupt arrived */
+ unsigned long tbusy; /* transmitter busy must be long for bitops */
+
+ struct linux_device *next;
+
+ /* The device initialization function. Called only once. */
+ int (*init)(struct linux_device *dev);
+
+ /* Some hardware also needs these fields, but they are not part of the
+ usual set specified in Space.c. */
+ unsigned char if_port; /* Selectable AUI, TP,..*/
+ unsigned char dma; /* DMA channel */
+
+ struct enet_statistics* (*get_stats)(struct linux_device *dev);
+
+ /*
+ * This marks the end of the "visible" part of the structure. All
+ * fields hereafter are internal to the system, and may change at
+ * will (read: may be cleaned up at will).
+ */
+
+ /* These may be needed for future network-power-down code. */
+ unsigned long trans_start; /* Time (in jiffies) of last Tx */
+ unsigned long last_rx; /* Time of last Rx */
+
+ unsigned short flags; /* interface flags (a la BSD) */
+ unsigned short family; /* address family ID (AF_INET) */
+ unsigned short metric; /* routing metric (not used) */
+ unsigned short mtu; /* interface MTU value */
+ unsigned short type; /* interface hardware type */
+ unsigned short hard_header_len; /* hardware hdr length */
+ void *priv; /* pointer to private data */
+
+ /* Interface address info. */
+ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+ unsigned char pad; /* make dev_addr aligned to 8 bytes */
+ unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */
+ unsigned char addr_len; /* hardware address length */
+ unsigned long pa_addr; /* protocol address */
+ unsigned long pa_brdaddr; /* protocol broadcast addr */
+ unsigned long pa_dstaddr; /* protocol P-P other side addr */
+ unsigned long pa_mask; /* protocol netmask */
+ unsigned short pa_alen; /* protocol address length */
+
+ struct dev_mc_list *mc_list; /* Multicast mac addresses */
+ int mc_count; /* Number of installed mcasts */
+
+ struct ip_mc_list *ip_mc_list; /* IP multicast filter chain */
+ __u32 tx_queue_len; /* Max frames per queue allowed */
+
+ /* For load balancing driver pair support */
+
+ unsigned long pkt_queue; /* Packets queued */
+ struct linux_device *slave; /* Slave device */
+ struct net_alias_info *alias_info; /* main dev alias info */
+ struct net_alias *my_alias; /* alias devs */
+
+ /* Pointer to the interface buffers. */
+ struct sk_buff_head buffs[DEV_NUMBUFFS];
+
+ /* Pointers to interface service routines. */
+ int (*open)(struct linux_device *dev);
+ int (*stop)(struct linux_device *dev);
+ int (*hard_start_xmit) (struct sk_buff *skb,
+ struct linux_device *dev);
+ int (*hard_header) (struct sk_buff *skb,
+ struct linux_device *dev,
+ unsigned short type,
+ void *daddr,
+ void *saddr,
+ unsigned len);
+ int (*rebuild_header)(void *eth, struct linux_device *dev,
+ unsigned long raddr, struct sk_buff *skb);
+#define HAVE_MULTICAST
+ void (*set_multicast_list)(struct linux_device *dev);
+#define HAVE_SET_MAC_ADDR
+ int (*set_mac_address)(struct linux_device *dev, void *addr);
+#define HAVE_PRIVATE_IOCTL
+ int (*do_ioctl)(struct linux_device *dev, struct ifreq *ifr, int cmd);
+#define HAVE_SET_CONFIG
+ int (*set_config)(struct linux_device *dev, struct ifmap *map);
+#define HAVE_HEADER_CACHE
+ void (*header_cache_bind)(struct hh_cache **hhp, struct linux_device *dev, unsigned short htype, __u32 daddr);
+ void (*header_cache_update)(struct hh_cache *hh, struct linux_device *dev, unsigned char * haddr);
+#define HAVE_CHANGE_MTU
+ int (*change_mtu)(struct linux_device *dev, int new_mtu);
+
+ struct iw_statistics* (*get_wireless_stats)(struct linux_device *dev);
+
+#ifdef MACH
+
+#ifdef MACH_INCLUDE
+ struct net_data *net_data;
+#else
+ void *net_data;
+#endif
+
+#endif
+};
+
+
+struct packet_type {
+ unsigned short type; /* This is really htons(ether_type). */
+ struct linux_device * dev;
+ int (*func) (struct sk_buff *, struct linux_device *,
+ struct packet_type *);
+ void *data;
+ struct packet_type *next;
+};
+
+
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+
+/* Used by dev_rint */
+#define IN_SKBUFF 1
+
+extern volatile unsigned long in_bh;
+
+extern struct linux_device loopback_dev;
+extern struct linux_device *dev_base;
+extern struct packet_type *ptype_base[16];
+
+
+extern int ip_addr_match(unsigned long addr1, unsigned long addr2);
+extern int ip_chk_addr(unsigned long addr);
+extern struct linux_device *ip_dev_bynet(unsigned long daddr, unsigned long mask);
+extern unsigned long ip_my_addr(void);
+extern unsigned long ip_get_mask(unsigned long addr);
+extern struct linux_device *ip_dev_find(unsigned long addr);
+extern struct linux_device *dev_getbytype(unsigned short type);
+
+extern void dev_add_pack(struct packet_type *pt);
+extern void dev_remove_pack(struct packet_type *pt);
+extern struct linux_device *dev_get(const char *name);
+extern int dev_open(struct linux_device *dev);
+extern int dev_close(struct linux_device *dev);
+extern void dev_queue_xmit(struct sk_buff *skb, struct linux_device *dev,
+ int pri);
+
+#define HAVE_NETIF_RX 1
+extern void netif_rx(struct sk_buff *skb);
+extern void net_bh(void);
+
+#ifdef MACH
+#define dev_tint(dev)
+#else
+extern void dev_tint(struct linux_device *dev);
+#endif
+
+extern int dev_change_flags(struct linux_device *dev, short flags);
+extern int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
+extern int dev_ioctl(unsigned int cmd, void *);
+
+extern void dev_init(void);
+
+/* Locking protection for page faults during outputs to devices unloaded during the fault */
+
+extern int dev_lockct;
+
+/*
+ * These two don't currently need to be interrupt-safe
+ * but they may do soon. Do it properly anyway.
+ */
+
+extern __inline__ void dev_lock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct++;
+ restore_flags(flags);
+}
+
+extern __inline__ void dev_unlock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct--;
+ restore_flags(flags);
+}
+
+/*
+ * This almost never occurs, isn't in performance critical paths
+ * and we can thus be relaxed about it
+ */
+
+extern __inline__ void dev_lock_wait(void)
+{
+ while(dev_lockct)
+ schedule();
+}
+
+
+/* These functions live elsewhere (drivers/net/net_init.c, but related) */
+
+extern void ether_setup(struct linux_device *dev);
+extern void tr_setup(struct linux_device *dev);
+extern void fddi_setup(struct linux_device *dev);
+extern int ether_config(struct linux_device *dev, struct ifmap *map);
+/* Support for loadable net-drivers */
+extern int register_netdev(struct linux_device *dev);
+extern void unregister_netdev(struct linux_device *dev);
+extern int register_netdevice_notifier(struct notifier_block *nb);
+extern int unregister_netdevice_notifier(struct notifier_block *nb);
+/* Functions used for multicast support */
+extern void dev_mc_upload(struct linux_device *dev);
+extern void dev_mc_delete(struct linux_device *dev, void *addr, int alen, int all);
+extern void dev_mc_add(struct linux_device *dev, void *addr, int alen, int newonly);
+extern void dev_mc_discard(struct linux_device *dev);
+/* This is the wrong place but it'll do for the moment */
+extern void ip_mc_allhost(struct linux_device *dev);
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_DEV_H */
diff --git a/linux/dev/include/linux/notifier.h b/linux/dev/include/linux/notifier.h
new file mode 100644
index 0000000..b3c9ccf
--- /dev/null
+++ b/linux/dev/include/linux/notifier.h
@@ -0,0 +1,96 @@
+/*
+ * Routines to manage notifier chains for passing status changes to any
+ * interested routines. We need this instead of hard coded call lists so
+ * that modules can poke their nose into the innards. The network devices
+ * needed them so here they are for the rest of you.
+ *
+ * Alan Cox <Alan.Cox@linux.org>
+ */
+
+#ifndef _LINUX_NOTIFIER_H
+#define _LINUX_NOTIFIER_H
+#include <linux/errno.h>
+
+struct notifier_block
+{
+ int (*notifier_call)(struct notifier_block *this, unsigned long, void *);
+ struct notifier_block *next;
+ int priority;
+};
+
+
+#ifdef __KERNEL__
+
+#define NOTIFY_DONE 0x0000 /* Don't care */
+#define NOTIFY_OK 0x0001 /* Suits me */
+#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
+#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */
+
+extern __inline__ int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
+{
+ while(*list)
+ {
+ if(n->priority > (*list)->priority)
+ break;
+ list= &((*list)->next);
+ }
+ n->next = *list;
+ *list=n;
+ return 0;
+}
+
+/*
+ * Warning to any non GPL module writers out there.. these functions are
+ * GPL'd
+ */
+
+extern __inline__ int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
+{
+ while((*nl)!=NULL)
+ {
+ if((*nl)==n)
+ {
+ *nl=n->next;
+ return 0;
+ }
+ nl=&((*nl)->next);
+ }
+ return -ENOENT;
+}
+
+/*
+ * This is one of these things that is generally shorter inline
+ */
+
+extern __inline__ int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+{
+ int ret=NOTIFY_DONE;
+ struct notifier_block *nb = *n;
+ while(nb)
+ {
+ ret=nb->notifier_call(nb,val,v);
+ if(ret&NOTIFY_STOP_MASK)
+ return ret;
+ nb=nb->next;
+ }
+ return ret;
+}
+
+
+/*
+ * Declared notifiers so far. I can imagine quite a few more chains
+ * over time (eg laptop power reset chains, reboot chain (to clean
+ * device units up), device [un]mount chain, module load/unload chain,
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
+ */
+
+/* netdevice notifier chain */
+#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
+#define NETDEV_DOWN 0x0002
+#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+ detected a hardware crash and restarted
+ - we can use this eg to kick tcp sessions
+ once done */
+#endif
+#endif
diff --git a/linux/dev/include/linux/pagemap.h b/linux/dev/include/linux/pagemap.h
new file mode 100644
index 0000000..6e21f3d
--- /dev/null
+++ b/linux/dev/include/linux/pagemap.h
@@ -0,0 +1,150 @@
+#ifndef _LINUX_PAGEMAP_H
+#define _LINUX_PAGEMAP_H
+
+#include <asm/system.h>
+
+/*
+ * Page-mapping primitive inline functions
+ *
+ * Copyright 1995 Linus Torvalds
+ */
+
+#ifndef MACH
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/swapctl.h>
+
+static inline unsigned long page_address(struct page * page)
+{
+ return PAGE_OFFSET + PAGE_SIZE * page->map_nr;
+}
+
+#define PAGE_HASH_BITS 11
+#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
+
+#define PAGE_AGE_VALUE ((PAGE_INITIAL_AGE)+(PAGE_ADVANCE))
+
+extern unsigned long page_cache_size; /* # of pages currently in the hash table */
+extern struct page * page_hash_table[PAGE_HASH_SIZE];
+
+/*
+ * We use a power-of-two hash table to avoid a modulus,
+ * and get a reasonable hash by knowing roughly how the
+ * inode pointer and offsets are distributed (ie, we
+ * roughly know which bits are "significant")
+ */
+static inline unsigned long _page_hashfn(struct inode * inode, unsigned long offset)
+{
+#define i (((unsigned long) inode)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
+#define o (offset >> PAGE_SHIFT)
+#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
+ return s(i+o) & (PAGE_HASH_SIZE-1);
+#undef i
+#undef o
+#undef s
+}
+
+#define page_hash(inode,offset) (page_hash_table+_page_hashfn(inode,offset))
+
+static inline struct page * __find_page(struct inode * inode, unsigned long offset, struct page *page)
+{
+ goto inside;
+ for (;;) {
+ page = page->next_hash;
+inside:
+ if (!page)
+ goto not_found;
+ if (page->inode != inode)
+ continue;
+ if (page->offset == offset)
+ break;
+ }
+ /* Found the page. */
+ atomic_inc(&page->count);
+ set_bit(PG_referenced, &page->flags);
+not_found:
+ return page;
+}
+
+static inline struct page *find_page(struct inode * inode, unsigned long offset)
+{
+ return __find_page(inode, offset, *page_hash(inode, offset));
+}
+
+static inline void remove_page_from_hash_queue(struct page * page)
+{
+ struct page **p;
+ struct page *next_hash, *prev_hash;
+
+ next_hash = page->next_hash;
+ prev_hash = page->prev_hash;
+ page->next_hash = NULL;
+ page->prev_hash = NULL;
+ if (next_hash)
+ next_hash->prev_hash = prev_hash;
+ if (prev_hash)
+ prev_hash->next_hash = next_hash;
+ p = page_hash(page->inode,page->offset);
+ if (*p == page)
+ *p = next_hash;
+ page_cache_size--;
+}
+
+static inline void __add_page_to_hash_queue(struct page * page, struct page **p)
+{
+ page_cache_size++;
+ set_bit(PG_referenced, &page->flags);
+ page->age = PAGE_AGE_VALUE;
+ page->prev_hash = NULL;
+ if ((page->next_hash = *p) != NULL)
+ page->next_hash->prev_hash = page;
+ *p = page;
+}
+
+static inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long offset)
+{
+ __add_page_to_hash_queue(page, page_hash(inode,offset));
+}
+
+
+static inline void remove_page_from_inode_queue(struct page * page)
+{
+ struct inode * inode = page->inode;
+
+ page->inode = NULL;
+ inode->i_nrpages--;
+ if (inode->i_pages == page)
+ inode->i_pages = page->next;
+ if (page->next)
+ page->next->prev = page->prev;
+ if (page->prev)
+ page->prev->next = page->next;
+ page->next = NULL;
+ page->prev = NULL;
+}
+
+static inline void add_page_to_inode_queue(struct inode * inode, struct page * page)
+{
+ struct page **p = &inode->i_pages;
+
+ inode->i_nrpages++;
+ page->inode = inode;
+ page->prev = NULL;
+ if ((page->next = *p) != NULL)
+ page->next->prev = page;
+ *p = page;
+}
+
+extern void __wait_on_page(struct page *);
+static inline void wait_on_page(struct page * page)
+{
+ if (PageLocked(page))
+ __wait_on_page(page);
+}
+
+extern void update_vm_cache(struct inode *, unsigned long, const char *, int);
+
+#endif /* !MACH */
+
+#endif
diff --git a/linux/dev/include/linux/pm.h b/linux/dev/include/linux/pm.h
new file mode 100644
index 0000000..9d841c9
--- /dev/null
+++ b/linux/dev/include/linux/pm.h
@@ -0,0 +1 @@
+/* Dummy file. */
diff --git a/linux/dev/include/linux/proc_fs.h b/linux/dev/include/linux/proc_fs.h
new file mode 100644
index 0000000..8ce0bb2
--- /dev/null
+++ b/linux/dev/include/linux/proc_fs.h
@@ -0,0 +1,292 @@
+#ifndef _LINUX_PROC_FS_H
+#define _LINUX_PROC_FS_H
+
+#include <linux/fs.h>
+#include <linux/malloc.h>
+
+/*
+ * The proc filesystem constants/structures
+ */
+
+/*
+ * We always define these enumerators
+ */
+
+enum root_directory_inos {
+ PROC_ROOT_INO = 1,
+ PROC_LOADAVG,
+ PROC_UPTIME,
+ PROC_MEMINFO,
+ PROC_KMSG,
+ PROC_VERSION,
+ PROC_CPUINFO,
+ PROC_PCI,
+ PROC_SELF, /* will change inode # */
+ PROC_NET,
+ PROC_SCSI,
+ PROC_MALLOC,
+ PROC_KCORE,
+ PROC_MODULES,
+ PROC_STAT,
+ PROC_DEVICES,
+ PROC_INTERRUPTS,
+ PROC_FILESYSTEMS,
+ PROC_KSYMS,
+ PROC_DMA,
+ PROC_IOPORTS,
+#ifdef __SMP_PROF__
+ PROC_SMP_PROF,
+#endif
+ PROC_PROFILE, /* whether enabled or not */
+ PROC_CMDLINE,
+ PROC_SYS,
+ PROC_MTAB,
+ PROC_MD,
+ PROC_RTC,
+ PROC_LOCKS
+};
+
+enum pid_directory_inos {
+ PROC_PID_INO = 2,
+ PROC_PID_STATUS,
+ PROC_PID_MEM,
+ PROC_PID_CWD,
+ PROC_PID_ROOT,
+ PROC_PID_EXE,
+ PROC_PID_FD,
+ PROC_PID_ENVIRON,
+ PROC_PID_CMDLINE,
+ PROC_PID_STAT,
+ PROC_PID_STATM,
+ PROC_PID_MAPS
+};
+
+enum pid_subdirectory_inos {
+ PROC_PID_FD_DIR = 1
+};
+
+enum net_directory_inos {
+ PROC_NET_UNIX = 128,
+ PROC_NET_ARP,
+ PROC_NET_ROUTE,
+ PROC_NET_DEV,
+ PROC_NET_RAW,
+ PROC_NET_TCP,
+ PROC_NET_UDP,
+ PROC_NET_SNMP,
+ PROC_NET_RARP,
+ PROC_NET_IGMP,
+ PROC_NET_IPMR_VIF,
+ PROC_NET_IPMR_MFC,
+ PROC_NET_IPFWFWD,
+ PROC_NET_IPFWIN,
+ PROC_NET_IPFWOUT,
+ PROC_NET_IPACCT,
+ PROC_NET_IPMSQHST,
+ PROC_NET_WIRELESS,
+ PROC_NET_IPX_INTERFACE,
+ PROC_NET_IPX_ROUTE,
+ PROC_NET_IPX,
+ PROC_NET_ATALK,
+ PROC_NET_AT_ROUTE,
+ PROC_NET_ATIF,
+ PROC_NET_AX25_ROUTE,
+ PROC_NET_AX25,
+ PROC_NET_AX25_CALLS,
+ PROC_NET_NR_NODES,
+ PROC_NET_NR_NEIGH,
+ PROC_NET_NR,
+ PROC_NET_SOCKSTAT,
+ PROC_NET_RTCACHE,
+ PROC_NET_AX25_BPQETHER,
+ PROC_NET_ALIAS_TYPES,
+ PROC_NET_ALIASES,
+ PROC_NET_IP_MASQ_APP,
+ PROC_NET_STRIP_STATUS,
+ PROC_NET_STRIP_TRACE,
+ PROC_NET_IPAUTOFW,
+ PROC_NET_RS_NODES,
+ PROC_NET_RS_NEIGH,
+ PROC_NET_RS_ROUTES,
+ PROC_NET_RS,
+ PROC_NET_Z8530,
+ PROC_NET_LAST
+};
+
+enum scsi_directory_inos {
+ PROC_SCSI_SCSI = 256,
+ PROC_SCSI_ADVANSYS,
+ PROC_SCSI_EATA,
+ PROC_SCSI_EATA_PIO,
+ PROC_SCSI_AHA152X,
+ PROC_SCSI_AHA1542,
+ PROC_SCSI_AHA1740,
+ PROC_SCSI_AIC7XXX,
+ PROC_SCSI_BUSLOGIC,
+ PROC_SCSI_U14_34F,
+ PROC_SCSI_FDOMAIN,
+ PROC_SCSI_GENERIC_NCR5380,
+ PROC_SCSI_IN2000,
+ PROC_SCSI_PAS16,
+ PROC_SCSI_QLOGICFAS,
+ PROC_SCSI_QLOGICISP,
+ PROC_SCSI_SEAGATE,
+ PROC_SCSI_T128,
+ PROC_SCSI_DC390WUF,
+ PROC_SCSI_DC390T,
+ PROC_SCSI_NCR53C7xx,
+ PROC_SCSI_NCR53C8XX,
+ PROC_SCSI_ULTRASTOR,
+ PROC_SCSI_7000FASST,
+ PROC_SCSI_EATA2X,
+ PROC_SCSI_AM53C974,
+ PROC_SCSI_SSC,
+ PROC_SCSI_NCR53C406A,
+ PROC_SCSI_PPA,
+ PROC_SCSI_ESP,
+ PROC_SCSI_A3000,
+ PROC_SCSI_A2091,
+ PROC_SCSI_GVP11,
+ PROC_SCSI_ATARI,
+ PROC_SCSI_GDTH,
+ PROC_SCSI_IDESCSI,
+ PROC_SCSI_SCSI_DEBUG,
+ PROC_SCSI_NOT_PRESENT,
+ PROC_SCSI_FILE, /* I'm assuming here that we */
+ PROC_SCSI_LAST = (PROC_SCSI_FILE + 16) /* won't ever see more than */
+}; /* 16 HBAs in one machine */
+
+/* Finally, the dynamically allocatable proc entries are reserved: */
+
+#define PROC_DYNAMIC_FIRST 4096
+#define PROC_NDYNAMIC 4096
+
+#define PROC_SUPER_MAGIC 0x9fa0
+
+/*
+ * This is not completely implemented yet. The idea is to
+ * create a in-memory tree (like the actual /proc filesystem
+ * tree) of these proc_dir_entries, so that we can dynamically
+ * add new files to /proc.
+ *
+ * The "next" pointer creates a linked list of one /proc directory,
+ * while parent/subdir create the directory structure (every
+ * /proc file has a parent, but "subdir" is NULL for all
+ * non-directory entries).
+ *
+ * "get_info" is called at "read", while "fill_inode" is used to
+ * fill in file type/protection/owner information specific to the
+ * particular /proc file.
+ */
+struct proc_dir_entry {
+ unsigned short low_ino;
+ unsigned short namelen;
+ const char *name;
+ mode_t mode;
+ nlink_t nlink;
+ uid_t uid;
+ gid_t gid;
+ unsigned long size;
+ struct inode_operations * ops;
+ int (*get_info)(char *, char **, off_t, int, int);
+ void (*fill_inode)(struct inode *);
+ struct proc_dir_entry *next, *parent, *subdir;
+ void *data;
+};
+
+extern int (* dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+
+extern struct proc_dir_entry proc_root;
+extern struct proc_dir_entry proc_net;
+extern struct proc_dir_entry proc_scsi;
+extern struct proc_dir_entry proc_sys;
+extern struct proc_dir_entry proc_pid;
+extern struct proc_dir_entry proc_pid_fd;
+
+extern struct inode_operations proc_scsi_inode_operations;
+
+extern void proc_root_init(void);
+extern void proc_base_init(void);
+extern void proc_net_init(void);
+
+extern int proc_register(struct proc_dir_entry *, struct proc_dir_entry *);
+extern int proc_register_dynamic(struct proc_dir_entry *,
+ struct proc_dir_entry *);
+extern int proc_unregister(struct proc_dir_entry *, int);
+
+static inline int proc_net_register(struct proc_dir_entry * x)
+{
+ return proc_register(&proc_net, x);
+}
+
+static inline int proc_net_unregister(int x)
+{
+ return proc_unregister(&proc_net, x);
+}
+
+static inline int proc_scsi_register(struct proc_dir_entry *driver,
+ struct proc_dir_entry *x)
+{
+ x->ops = &proc_scsi_inode_operations;
+ if(x->low_ino < PROC_SCSI_FILE){
+ return(proc_register(&proc_scsi, x));
+ }else{
+ return(proc_register(driver, x));
+ }
+}
+
+static inline int proc_scsi_unregister(struct proc_dir_entry *driver, int x)
+{
+ extern void scsi_init_free(char *ptr, unsigned int size);
+
+ if(x <= PROC_SCSI_FILE)
+ return(proc_unregister(&proc_scsi, x));
+ else {
+ struct proc_dir_entry **p = &driver->subdir, *dp;
+ int ret;
+
+ while ((dp = *p) != NULL) {
+ if (dp->low_ino == x)
+ break;
+ p = &dp->next;
+ }
+ ret = proc_unregister(driver, x);
+ scsi_init_free((char *) dp, sizeof(struct proc_dir_entry) + 4);
+ return(ret);
+ }
+}
+
+extern struct super_block *proc_read_super(struct super_block *,void *,int);
+extern int init_proc_fs(void);
+extern struct inode * proc_get_inode(struct super_block *, int, struct proc_dir_entry *);
+extern void proc_statfs(struct super_block *, struct statfs *, int);
+extern void proc_read_inode(struct inode *);
+extern void proc_write_inode(struct inode *);
+extern int proc_match(int, const char *, struct proc_dir_entry *);
+
+/*
+ * These are generic /proc routines that use the internal
+ * "struct proc_dir_entry" tree to traverse the filesystem.
+ *
+ * The /proc root directory has extended versions to take care
+ * of the /proc/<pid> subdirectories.
+ */
+extern int proc_readdir(struct inode *, struct file *, void *, filldir_t);
+extern int proc_lookup(struct inode *, const char *, int, struct inode **);
+
+extern struct inode_operations proc_dir_inode_operations;
+extern struct inode_operations proc_net_inode_operations;
+extern struct inode_operations proc_netdir_inode_operations;
+extern struct inode_operations proc_scsi_inode_operations;
+extern struct inode_operations proc_mem_inode_operations;
+extern struct inode_operations proc_sys_inode_operations;
+extern struct inode_operations proc_array_inode_operations;
+extern struct inode_operations proc_arraylong_inode_operations;
+extern struct inode_operations proc_kcore_inode_operations;
+extern struct inode_operations proc_profile_inode_operations;
+extern struct inode_operations proc_kmsg_inode_operations;
+extern struct inode_operations proc_link_inode_operations;
+extern struct inode_operations proc_fd_inode_operations;
+
+#endif
diff --git a/linux/dev/include/linux/sched.h b/linux/dev/include/linux/sched.h
new file mode 100644
index 0000000..3e7bcd4
--- /dev/null
+++ b/linux/dev/include/linux/sched.h
@@ -0,0 +1,521 @@
+#ifndef _LINUX_SCHED_H
+#define _LINUX_SCHED_H
+
+/*
+ * define DEBUG if you want the wait-queues to have some extra
+ * debugging code. It's not normally used, but might catch some
+ * wait-queue coding errors.
+ *
+ * #define DEBUG
+ */
+
+#include <asm/param.h> /* for HZ */
+
+extern unsigned long event;
+
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/tasks.h>
+#include <linux/kernel.h>
+
+#include <asm/system.h>
+#include <asm/semaphore.h>
+#include <asm/page.h>
+
+#include <linux/smp.h>
+#include <linux/tty.h>
+#include <linux/sem.h>
+
+/*
+ * cloning flags:
+ */
+#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
+#define CLONE_VM 0x00000100 /* set if VM shared between processes */
+#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
+#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
+#define CLONE_SIGHAND 0x00000800 /* set if signal handlers shared */
+#define CLONE_PID 0x00001000 /* set if pid shared */
+
+/*
+ * These are the constant used to fake the fixed-point load-average
+ * counting. Some notes:
+ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
+ * a load-average precision of 10 bits integer + 11 bits fractional
+ * - if you want to count load-averages more often, you need more
+ * precision, or rounding will get you. With 2-second counting freq,
+ * the EXP_n values would be 1981, 2034 and 2043 if still using only
+ * 11 bit fractions.
+ */
+extern unsigned long avenrun[]; /* Load averages */
+
+#define FSHIFT 11 /* nr of bits of precision */
+#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
+#define LOAD_FREQ (5*HZ) /* 5 sec intervals */
+#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
+#define EXP_5 2014 /* 1/exp(5sec/5min) */
+#define EXP_15 2037 /* 1/exp(5sec/15min) */
+
+#define CALC_LOAD(load,exp,n) \
+ load *= exp; \
+ load += n*(FIXED_1-exp); \
+ load >>= FSHIFT;
+
+#define CT_TO_SECS(x) ((x) / HZ)
+#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
+
+extern int nr_running, nr_tasks;
+extern int last_pid;
+
+#define FIRST_TASK task[0]
+#define LAST_TASK task[NR_TASKS-1]
+
+#include <linux/head.h>
+#include <linux/fs.h>
+#include <linux/signal.h>
+#include <linux/time.h>
+#include <linux/param.h>
+#include <linux/resource.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+
+#include <asm/processor.h>
+
+#define TASK_RUNNING 0
+#define TASK_INTERRUPTIBLE 1
+#define TASK_UNINTERRUPTIBLE 2
+#define TASK_ZOMBIE 3
+#define TASK_STOPPED 4
+#define TASK_SWAPPING 5
+
+/*
+ * Scheduling policies
+ */
+#define SCHED_OTHER 0
+#define SCHED_FIFO 1
+#define SCHED_RR 2
+
+struct sched_param {
+ int sched_priority;
+};
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#ifdef __KERNEL__
+
+extern void sched_init(void);
+extern void show_state(void);
+extern void trap_init(void);
+
+asmlinkage void schedule(void);
+
+/* Open file table structure */
+struct files_struct {
+ int count;
+ fd_set close_on_exec;
+ fd_set open_fds;
+ struct file * fd[NR_OPEN];
+};
+
+#define INIT_FILES { \
+ 1, \
+ { { 0, } }, \
+ { { 0, } }, \
+ { NULL, } \
+}
+
+struct fs_struct {
+ int count;
+ unsigned short umask;
+ struct inode * root, * pwd;
+};
+
+#define INIT_FS { \
+ 1, \
+ 0022, \
+ NULL, NULL \
+}
+
+struct mm_struct {
+ int count;
+ pgd_t * pgd;
+ unsigned long context;
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack, start_mmap;
+ unsigned long arg_start, arg_end, env_start, env_end;
+ unsigned long rss, total_vm, locked_vm;
+ unsigned long def_flags;
+ struct vm_area_struct * mmap;
+ struct vm_area_struct * mmap_avl;
+ struct semaphore mmap_sem;
+};
+
+#define INIT_MM { \
+ 1, \
+ swapper_pg_dir, \
+ 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, \
+ 0, \
+ &init_mmap, &init_mmap, MUTEX }
+
+struct signal_struct {
+ int count;
+ struct sigaction action[32];
+};
+
+#define INIT_SIGNALS { \
+ 1, \
+ { {0,}, } }
+
+struct task_struct {
+/* these are hardcoded - don't touch */
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ long counter;
+ long priority;
+ unsigned long signal;
+ unsigned long blocked; /* bitmap of masked signals */
+ unsigned long flags; /* per process flags, defined below */
+ int errno;
+ long debugreg[8]; /* Hardware debugging registers */
+ struct exec_domain *exec_domain;
+/* various fields */
+ struct linux_binfmt *binfmt;
+ struct task_struct *next_task, *prev_task;
+ struct task_struct *next_run, *prev_run;
+ unsigned long saved_kernel_stack;
+ unsigned long kernel_stack_page;
+ int exit_code, exit_signal;
+ /* ??? */
+ unsigned long personality;
+ int dumpable:1;
+ int did_exec:1;
+ /* shouldn't this be pid_t? */
+ int pid;
+ int pgrp;
+ int tty_old_pgrp;
+ int session;
+ /* boolean value for session group leader */
+ int leader;
+ int groups[NGROUPS];
+ /*
+ * pointers to (original) parent process, youngest child, younger sibling,
+ * older sibling, respectively. (p->father can be replaced with
+ * p->p_pptr->pid)
+ */
+ struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
+ struct wait_queue *wait_chldexit; /* for wait4() */
+ unsigned short uid,euid,suid,fsuid;
+ unsigned short gid,egid,sgid,fsgid;
+ unsigned long timeout, policy, rt_priority;
+ unsigned long it_real_value, it_prof_value, it_virt_value;
+ unsigned long it_real_incr, it_prof_incr, it_virt_incr;
+ struct timer_list real_timer;
+ long utime, stime, cutime, cstime, start_time;
+/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
+ unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
+ int swappable:1;
+ unsigned long swap_address;
+ unsigned long old_maj_flt; /* old value of maj_flt */
+ unsigned long dec_flt; /* page fault count of the last time */
+ unsigned long swap_cnt; /* number of pages to swap on next pass */
+/* limits */
+ struct rlimit rlim[RLIM_NLIMITS];
+ unsigned short used_math;
+ char comm[16];
+/* file system info */
+ int link_count;
+ struct tty_struct *tty; /* NULL if no tty */
+/* ipc stuff */
+ struct sem_undo *semundo;
+ struct sem_queue *semsleeping;
+/* ldt for this task - used by Wine. If NULL, default_ldt is used */
+ struct desc_struct *ldt;
+/* tss for this task */
+ struct thread_struct tss;
+/* filesystem information */
+ struct fs_struct *fs;
+/* open file information */
+ struct files_struct *files;
+/* memory management info */
+ struct mm_struct *mm;
+/* signal handlers */
+ struct signal_struct *sig;
+#ifdef __SMP__
+ int processor;
+ int last_processor;
+ int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
+#endif
+};
+
+/*
+ * Per process flags
+ */
+#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
+#define PF_PTRACED 0x00000010 /* set if ptrace (0) has been called. */
+#define PF_TRACESYS 0x00000020 /* tracing system calls */
+#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
+#define PF_DUMPCORE 0x00000200 /* dumped core */
+#define PF_SIGNALED 0x00000400 /* killed by a signal */
+
+#define PF_STARTING 0x00000002 /* being created */
+#define PF_EXITING 0x00000004 /* getting shut down */
+
+#define PF_USEDFPU 0x00100000 /* Process used the FPU this quantum (SMP only) */
+#define PF_DTRACE 0x00200000 /* delayed trace (used on m68k) */
+
+/*
+ * Limit the stack by to some sane default: root can always
+ * increase this limit if needed.. 8MB seems reasonable.
+ */
+#define _STK_LIM (8*1024*1024)
+
+#define DEF_PRIORITY (20*HZ/100) /* 200 ms time slices */
+
+/*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
+#define INIT_TASK \
+/* state etc */ { 0,DEF_PRIORITY,DEF_PRIORITY,0,0,0,0, \
+/* debugregs */ { 0, }, \
+/* exec domain */&default_exec_domain, \
+/* binfmt */ NULL, \
+/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
+/* stack */ 0,(unsigned long) &init_kernel_stack, \
+/* ec,brk... */ 0,0,0,0,0, \
+/* pid etc.. */ 0,0,0,0,0, \
+/* suppl grps*/ {NOGROUP,}, \
+/* proc links*/ &init_task,&init_task,NULL,NULL,NULL,NULL, \
+/* uid etc */ 0,0,0,0,0,0,0,0, \
+/* timeout */ 0,SCHED_OTHER,0,0,0,0,0,0,0, \
+/* timer */ { NULL, NULL, 0, 0, it_real_fn }, \
+/* utime */ 0,0,0,0,0, \
+/* flt */ 0,0,0,0,0,0, \
+/* swp */ 0,0,0,0,0, \
+/* rlimits */ INIT_RLIMITS, \
+/* math */ 0, \
+/* comm */ "swapper", \
+/* fs info */ 0,NULL, \
+/* ipc */ NULL, NULL, \
+/* ldt */ NULL, \
+/* tss */ INIT_TSS, \
+/* fs */ &init_fs, \
+/* files */ &init_files, \
+/* mm */ &init_mm, \
+/* signals */ &init_signals, \
+}
+
+extern struct mm_struct init_mm;
+extern struct task_struct init_task;
+extern struct task_struct *task[NR_TASKS];
+extern struct task_struct *last_task_used_math;
+extern struct task_struct *current_set[NR_CPUS];
+/*
+ * On a single processor system this comes out as current_set[0] when cpp
+ * has finished with it, which gcc will optimise away.
+ */
+#define current (0+current_set[smp_processor_id()]) /* Current on this processor */
+extern unsigned long volatile jiffies;
+extern unsigned long itimer_ticks;
+extern unsigned long itimer_next;
+extern struct timeval xtime;
+extern int need_resched;
+extern void do_timer(struct pt_regs *);
+
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+
+extern int securelevel; /* system security level */
+
+#define CURRENT_TIME (xtime.tv_sec)
+
+extern void sleep_on(struct wait_queue ** p);
+extern void interruptible_sleep_on(struct wait_queue ** p);
+extern void wake_up(struct wait_queue ** p);
+extern void wake_up_interruptible(struct wait_queue ** p);
+extern void wake_up_process(struct task_struct * tsk);
+
+extern void notify_parent(struct task_struct * tsk, int signal);
+extern void force_sig(unsigned long sig,struct task_struct * p);
+extern int send_sig(unsigned long sig,struct task_struct * p,int priv);
+extern int in_group_p(gid_t grp);
+
+extern int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags,
+ const char *device,
+ void *dev_id);
+extern void free_irq(unsigned int irq, void *dev_id);
+
+/*
+ * This has now become a routine instead of a macro, it sets a flag if
+ * it returns true (to do BSD-style accounting where the process is flagged
+ * if it uses root privs). The implication of this is that you should do
+ * normal permissions checks first, and check suser() last.
+ */
+#ifdef MACH
+
+extern inline int
+suser(void)
+{
+ return 1;
+}
+
+#else
+
+extern inline int suser(void)
+{
+ if (current->euid == 0) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+extern void copy_thread(int, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
+extern void flush_thread(void);
+extern void exit_thread(void);
+
+extern void exit_mm(struct task_struct *);
+extern void exit_fs(struct task_struct *);
+extern void exit_files(struct task_struct *);
+extern void exit_sighand(struct task_struct *);
+extern void release_thread(struct task_struct *);
+
+extern int do_execve(char *, char **, char **, struct pt_regs *);
+extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
+
+extern void add_wait_queue(struct wait_queue **p, struct wait_queue *wait);
+extern void remove_wait_queue(struct wait_queue **p, struct wait_queue *wait);
+
+/* See if we have a valid user level fd.
+ * If it makes sense, return the file structure it references.
+ * Otherwise return NULL.
+ */
+
+#ifdef MACH
+
+extern void __add_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+extern void add_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+extern void __remove_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+extern void remove_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+
+#else /* !MACH */
+
+extern inline struct file *file_from_fd(const unsigned int fd)
+{
+
+ if (fd >= NR_OPEN)
+ return NULL;
+ /* either valid or null */
+ return current->files->fd[fd];
+}
+
+/*
+ * The wait-queues are circular lists, and you have to be *very* sure
+ * to keep them correct. Use only these two functions to add/remove
+ * entries in the queues.
+ */
+extern inline void __add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ struct wait_queue *head = *p;
+ struct wait_queue *next = WAIT_QUEUE_HEAD(p);
+
+ if (head)
+ next = head;
+ *p = wait;
+ wait->next = next;
+}
+
+extern inline void add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __add_wait_queue(p, wait);
+ restore_flags(flags);
+}
+
+extern inline void __remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ struct wait_queue * next = wait->next;
+ struct wait_queue * head = next;
+
+ for (;;) {
+ struct wait_queue * nextlist = head->next;
+ if (nextlist == wait)
+ break;
+ head = nextlist;
+ }
+ head->next = next;
+}
+
+extern inline void remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __remove_wait_queue(p, wait);
+ restore_flags(flags);
+}
+
+extern inline void select_wait(struct wait_queue ** wait_address, select_table * p)
+{
+ struct select_table_entry * entry;
+
+ if (!p || !wait_address)
+ return;
+ if (p->nr >= __MAX_SELECT_TABLE_ENTRIES)
+ return;
+ entry = p->entry + p->nr;
+ entry->wait_address = wait_address;
+ entry->wait.task = current;
+ entry->wait.next = NULL;
+ add_wait_queue(wait_address,&entry->wait);
+ p->nr++;
+}
+
+#endif /* !MACH */
+
+#define REMOVE_LINKS(p) do { unsigned long flags; \
+ save_flags(flags) ; cli(); \
+ (p)->next_task->prev_task = (p)->prev_task; \
+ (p)->prev_task->next_task = (p)->next_task; \
+ restore_flags(flags); \
+ if ((p)->p_osptr) \
+ (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
+ if ((p)->p_ysptr) \
+ (p)->p_ysptr->p_osptr = (p)->p_osptr; \
+ else \
+ (p)->p_pptr->p_cptr = (p)->p_osptr; \
+ } while (0)
+
+#define SET_LINKS(p) do { unsigned long flags; \
+ save_flags(flags); cli(); \
+ (p)->next_task = &init_task; \
+ (p)->prev_task = init_task.prev_task; \
+ init_task.prev_task->next_task = (p); \
+ init_task.prev_task = (p); \
+ restore_flags(flags); \
+ (p)->p_ysptr = NULL; \
+ if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
+ (p)->p_osptr->p_ysptr = p; \
+ (p)->p_pptr->p_cptr = p; \
+ } while (0)
+
+#define for_each_task(p) \
+ for (p = &init_task ; (p = p->next_task) != &init_task ; )
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/dev/include/linux/skbuff.h b/linux/dev/include/linux/skbuff.h
new file mode 100644
index 0000000..c55e529
--- /dev/null
+++ b/linux/dev/include/linux/skbuff.h
@@ -0,0 +1,466 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/config.h>
+#include <linux/time.h>
+
+#include <asm/atomic.h>
+#include <asm/types.h>
+
+#define CONFIG_SKB_CHECK 0
+
+#define HAVE_ALLOC_SKB /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
+
+
+#define FREE_READ 1
+#define FREE_WRITE 0
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+struct sk_buff_head
+{
+ struct sk_buff * next;
+ struct sk_buff * prev;
+ __u32 qlen; /* Must be same length as a pointer
+ for using debugging */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+};
+
+
+struct sk_buff
+{
+ struct sk_buff * next; /* Next buffer in list */
+ struct sk_buff * prev; /* Previous buffer in list */
+ struct sk_buff_head * list; /* List we are on */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+ struct sk_buff *link3; /* Link for IP protocol level buffer chains */
+ struct sock *sk; /* Socket we are owned by */
+ unsigned long when; /* used to compute rtt's */
+ struct timeval stamp; /* Time we arrived */
+ struct linux_device *dev; /* Device we arrived on/are leaving by */
+ union
+ {
+ struct tcphdr *th;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ unsigned char *raw;
+ /* for passing file handles in a unix domain socket */
+ void *filp;
+ } h;
+
+ union
+ {
+ /* As yet incomplete physical layer views */
+ unsigned char *raw;
+ struct ethhdr *ethernet;
+ } mac;
+
+ struct iphdr *ip_hdr; /* For IPPROTO_RAW */
+ unsigned long len; /* Length of actual data */
+ unsigned long csum; /* Checksum */
+ __u32 saddr; /* IP source address */
+ __u32 daddr; /* IP target address */
+ __u32 raddr; /* IP next hop address */
+ __u32 seq; /* TCP sequence number */
+ __u32 end_seq; /* seq [+ fin] [+ syn] + datalen */
+ __u32 ack_seq; /* TCP ack sequence number */
+ unsigned char proto_priv[16]; /* Protocol private data */
+ volatile char acked, /* Are we acked ? */
+ used, /* Are we in use ? */
+ free, /* How to free this buffer */
+ arp; /* Has IP/ARP resolution finished */
+ unsigned char tries, /* Times tried */
+ lock, /* Are we locked ? */
+ localroute, /* Local routing asserted for this frame */
+ pkt_type, /* Packet class */
+ pkt_bridged, /* Tracker for bridging */
+ ip_summed; /* Driver fed us an IP checksum */
+#define PACKET_HOST 0 /* To us */
+#define PACKET_BROADCAST 1 /* To all */
+#define PACKET_MULTICAST 2 /* To group */
+#define PACKET_OTHERHOST 3 /* To someone else */
+ unsigned short users; /* User count - see datagram.c,tcp.c */
+ unsigned short protocol; /* Packet protocol from driver. */
+ unsigned int truesize; /* Buffer size */
+
+ atomic_t count; /* reference count */
+ struct sk_buff *data_skb; /* Link to the actual data skb */
+ unsigned char *head; /* Head of buffer */
+ unsigned char *data; /* Data head pointer */
+ unsigned char *tail; /* Tail pointer */
+ unsigned char *end; /* End pointer */
+ void (*destructor)(struct sk_buff *); /* Destruct function */
+ __u16 redirport; /* Redirect port */
+#ifdef MACH
+#ifdef MACH_INCLUDE
+ ipc_port_t reply;
+ mach_msg_type_name_t reply_type;
+ vm_map_copy_t copy;
+#else
+ void *reply;
+ unsigned reply_type;
+ void *copy;
+#endif
+#endif
+};
+
+#ifdef CONFIG_SKB_LARGE
+#define SK_WMEM_MAX 65535
+#define SK_RMEM_MAX 65535
+#else
+#define SK_WMEM_MAX 32767
+#define SK_RMEM_MAX 32767
+#endif
+
+#if CONFIG_SKB_CHECK
+#define SK_FREED_SKB 0x0DE2C0DE
+#define SK_GOOD_SKB 0xDEC0DED1
+#define SK_HEAD_SKB 0x12231298
+#endif
+
+#ifdef __KERNEL__
+/*
+ * Handling routines are only of interest to the kernel
+ */
+#include <linux/malloc.h>
+
+#include <asm/system.h>
+
+#if 0
+extern void print_skb(struct sk_buff *);
+#endif
+extern void kfree_skb(struct sk_buff *skb, int rw);
+extern void skb_queue_head_init(struct sk_buff_head *list);
+extern void skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
+extern void skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
+extern struct sk_buff * skb_dequeue(struct sk_buff_head *list);
+extern void skb_insert(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_append(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_unlink(struct sk_buff *buf);
+extern __u32 skb_queue_len(struct sk_buff_head *list);
+extern struct sk_buff * skb_peek_copy(struct sk_buff_head *list);
+extern struct sk_buff * alloc_skb(unsigned int size, int priority);
+extern struct sk_buff * dev_alloc_skb(unsigned int size);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority);
+extern void skb_device_lock(struct sk_buff *skb);
+extern void skb_device_unlock(struct sk_buff *skb);
+extern void dev_kfree_skb(struct sk_buff *skb, int mode);
+extern int skb_device_locked(struct sk_buff *skb);
+extern unsigned char * skb_put(struct sk_buff *skb, int len);
+extern unsigned char * skb_push(struct sk_buff *skb, int len);
+extern unsigned char * skb_pull(struct sk_buff *skb, int len);
+extern int skb_headroom(struct sk_buff *skb);
+extern int skb_tailroom(struct sk_buff *skb);
+extern void skb_reserve(struct sk_buff *skb, int len);
+extern void skb_trim(struct sk_buff *skb, int len);
+
+extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
+{
+ return (list->next == (struct sk_buff *) list);
+}
+
+/*
+ * Peek an sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. For an interrupt
+ * type system cli() peek the buffer copy the data and sti();
+ */
+extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/*
+ * Return the length of an sk_buff queue
+ */
+
+extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
+{
+ return(list_->qlen);
+}
+
+#if CONFIG_SKB_CHECK
+extern int skb_check(struct sk_buff *skb,int,int, char *);
+#define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__)
+#define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__)
+#else
+#define IS_SKB(skb)
+#define IS_SKB_HEAD(skb)
+
+extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * Insert an sk_buff at the start of a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+
+extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_head(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Insert an sk_buff at the end of a list.
+ */
+
+extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ next = (struct sk_buff *)list;
+ prev = next->prev;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_tail(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Remove an sk_buff from a list.
+ */
+
+extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev, *result;
+
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = NULL;
+ result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
+}
+
+extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+ long flags;
+ struct sk_buff *result;
+
+ save_flags(flags);
+ cli();
+ result = __skb_dequeue(list);
+ restore_flags(flags);
+ return result;
+}
+
+/*
+ * Insert a packet on a list.
+ */
+
+extern __inline__ void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff * prev, struct sk_buff *next,
+ struct sk_buff_head * list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+ newsk->list = list;
+ list->qlen++;
+}
+
+/*
+ * Place a packet before a given packet in a list
+ */
+extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(newsk, old->prev, old, old->list);
+ restore_flags(flags);
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+
+extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(newsk, old, old->next, old->list);
+ restore_flags(flags);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff * next, * prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->list = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+/*
+ * Remove an sk_buff from its list. Works even without knowing the list it
+ * is sitting on, which can be handy at times. It also means that THE LIST
+ * MUST EXIST when you unlink. Thus a list must have its contents unlinked
+ * _FIRST_.
+ */
+
+extern __inline__ void skb_unlink(struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if(skb->list)
+ __skb_unlink(skb, skb->list);
+ restore_flags(flags);
+}
+
+/*
+ * Add data to an sk_buff
+ */
+extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len)
+{
+ unsigned char *tmp=skb->tail;
+ skb->tail+=len;
+ skb->len+=len;
+ if(skb->tail>skb->end)
+ {
+ panic("skput:over: %d", len);
+ }
+ return tmp;
+}
+
+extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ if(skb->data<skb->head)
+ {
+ panic("skpush:under: %d", len);
+ }
+ return skb->data;
+}
+
+extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len)
+{
+ if(len > skb->len)
+ return NULL;
+ skb->data+=len;
+ skb->len-=len;
+ return skb->data;
+}
+
+extern __inline__ int skb_headroom(struct sk_buff *skb)
+{
+ return skb->data-skb->head;
+}
+
+extern __inline__ int skb_tailroom(struct sk_buff *skb)
+{
+ return skb->end-skb->tail;
+}
+
+extern __inline__ void skb_reserve(struct sk_buff *skb, int len)
+{
+ skb->data+=len;
+ skb->tail+=len;
+}
+
+extern __inline__ void skb_trim(struct sk_buff *skb, int len)
+{
+ if(skb->len>len)
+ {
+ skb->len=len;
+ skb->tail=skb->data+len;
+ }
+}
+
+#endif
+
+extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
+extern int datagram_select(struct sock *sk, int sel_type, select_table *wait);
+extern void skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
+extern void skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
+extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/linux/dev/include/linux/threads.h b/linux/dev/include/linux/threads.h
new file mode 100644
index 0000000..9d841c9
--- /dev/null
+++ b/linux/dev/include/linux/threads.h
@@ -0,0 +1 @@
+/* Dummy file. */
diff --git a/linux/dev/include/linux/types.h b/linux/dev/include/linux/types.h
new file mode 100644
index 0000000..eb086c2
--- /dev/null
+++ b/linux/dev/include/linux/types.h
@@ -0,0 +1,117 @@
+#ifndef _LINUX_TYPES_H
+#define _LINUX_TYPES_H
+
+#include <linux/posix_types.h>
+#include <asm/types.h>
+
+#ifndef __KERNEL_STRICT_NAMES
+
+typedef __kernel_fd_set fd_set;
+
+#ifndef MACH_INCLUDE
+typedef __kernel_dev_t dev_t;
+typedef __kernel_ino_t ino_t;
+typedef __kernel_mode_t mode_t;
+typedef __kernel_nlink_t nlink_t;
+#endif
+
+#ifdef MACH_INCLUDE
+#define off_t long
+#else
+typedef __kernel_off_t off_t;
+#endif
+
+typedef __kernel_pid_t pid_t;
+
+#ifdef MACH_INCLUDE
+#define uid_t unsigned short
+#define gid_t unsigned short
+#define daddr_t int
+#else
+typedef __kernel_uid_t uid_t;
+typedef __kernel_gid_t gid_t;
+typedef __kernel_daddr_t daddr_t;
+#endif
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __kernel_loff_t loff_t;
+#endif
+
+/*
+ * The following typedefs are also protected by individual ifdefs for
+ * historical reasons:
+ */
+#ifndef _SIZE_T
+#define _SIZE_T
+#ifndef MACH_INCLUDE
+typedef __kernel_size_t size_t;
+#endif
+#endif
+
+#ifndef _SSIZE_T
+#define _SSIZE_T
+#ifndef MACH_INCLUDE
+typedef __kernel_ssize_t ssize_t;
+#endif
+#endif
+
+#ifndef _PTRDIFF_T
+#define _PTRDIFF_T
+typedef __kernel_ptrdiff_t ptrdiff_t;
+#endif
+
+#ifndef _TIME_T
+#define _TIME_T
+#ifdef MACH_INCLUDE
+#define time_t long
+#else
+typedef __kernel_time_t time_t;
+#endif
+#endif
+
+#ifndef _CLOCK_T
+#define _CLOCK_T
+typedef __kernel_clock_t clock_t;
+#endif
+
+#ifndef _CADDR_T
+#define _CADDR_T
+#ifndef MACH_INCLUDE
+typedef __kernel_caddr_t caddr_t;
+#endif
+#endif
+
+#ifndef MACH_INCLUDE
+/* bsd */
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+#endif
+
+/* sysv */
+typedef unsigned char unchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+
+#endif /* __KERNEL_STRICT_NAMES */
+
+/*
+ * Below are truly Linux-specific types that should never collide with
+ * any application/library that wants linux/types.h.
+ */
+
+struct ustat {
+ __kernel_daddr_t f_tfree;
+ __kernel_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
+/* Yes, this is ugly. But that's why it is called glue code. */
+
+#define _MACH_SA_SYS_TYPES_H_
+
+
+#endif /* _LINUX_TYPES_H */
diff --git a/linux/dev/init/main.c b/linux/dev/init/main.c
new file mode 100644
index 0000000..6d85395
--- /dev/null
+++ b/linux/dev/init/main.c
@@ -0,0 +1,261 @@
+/*
+ * Linux initialization.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/init/main.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <mach/machine.h>
+
+#include <vm/vm_page.h>
+#include <kern/kalloc.h>
+
+#include <machine/spl.h>
+#include <machine/pmap.h>
+#include <machine/vm_param.h>
+#include <machine/model_dep.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/dev/glue/glue.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+/*
+ * Timing loop count.
+ */
+unsigned long loops_per_sec = 1;
+
+#if defined(__SMP__) && defined(__i386__)
+unsigned long smp_loops_per_tick = 1000000;
+#endif
+
+/*
+ * End of physical memory.
+ */
+unsigned long high_memory;
+
+/*
+ * Flag to indicate auto-configuration is in progress.
+ */
+int linux_auto_config = 1;
+
+/*
+ * Hard drive parameters obtained from the BIOS.
+ */
+struct drive_info_struct
+{
+ char dummy[32];
+} drive_info;
+
+/*
+ * Forward declarations.
+ */
+static void calibrate_delay (void);
+
+/*
+ * Amount of contiguous memory to allocate for initialization.
+ */
+#define CONTIG_ALLOC (512 * 1024)
+
+/*
+ * Initialize Linux drivers.
+ */
+void
+linux_init (void)
+{
+ int addr;
+ unsigned long memory_start, memory_end;
+ vm_page_t pages;
+
+ /*
+ * Initialize memory size.
+ */
+ high_memory = vm_page_seg_end(VM_PAGE_SEL_DIRECTMAP);
+ init_IRQ ();
+ linux_sched_init ();
+
+ /*
+ * Set loop count.
+ */
+ calibrate_delay ();
+
+ /*
+ * Initialize drive info.
+ */
+ addr = *((unsigned *) phystokv (0x104));
+ memcpy (&drive_info,
+ (void *) ((addr & 0xffff) + ((addr >> 12) & 0xffff0)), 16);
+ addr = *((unsigned *) phystokv (0x118));
+ memcpy ((char *) &drive_info + 16,
+ (void *) ((addr & 0xffff) + ((addr >> 12) & 0xffff0)), 16);
+
+ /*
+ * Initialize Linux memory allocator.
+ */
+ linux_kmem_init ();
+
+ /*
+ * Allocate contiguous memory below 16 MB.
+ */
+ memory_start = alloc_contig_mem (CONTIG_ALLOC, 16 * 1024 * 1024, 0, &pages);
+ if (memory_start == 0)
+ panic ("linux_init: alloc_contig_mem failed");
+ memory_end = memory_start + CONTIG_ALLOC;
+
+ /*
+ * Initialize PCI bus.
+ */
+ memory_start = pci_init (memory_start, memory_end);
+
+ if (memory_start > memory_end)
+ panic ("linux_init: ran out memory");
+
+ /*
+ * Initialize devices.
+ */
+#ifdef CONFIG_INET
+ linux_net_emulation_init ();
+#endif
+
+ device_setup ();
+
+#ifdef CONFIG_PCMCIA
+ /*
+ * Initialize pcmcia.
+ */
+ pcmcia_init ();
+#endif
+
+ restore_IRQ ();
+
+ linux_auto_config = 0;
+}
+
+#ifndef NBPW
+#define NBPW 32
+#endif
+
+/*
+ * Allocate contiguous memory with the given constraints.
+ */
+unsigned long
+alloc_contig_mem (unsigned size, unsigned limit,
+ unsigned mask, vm_page_t * pages)
+{
+ vm_page_t p;
+
+ p = vm_page_grab_contig(size, VM_PAGE_SEL_DMA);
+
+ if (p == NULL)
+ return 0;
+
+ if (pages)
+ *pages = p;
+
+ return phystokv(vm_page_to_pa(p));
+}
+
+/*
+ * Free memory allocated by alloc_contig_mem.
+ */
+void
+free_contig_mem (vm_page_t pages, unsigned size)
+{
+ vm_page_free_contig(pages, size);
+}
+
+/* This is the number of bits of precision for the loops_per_second. Each
+ * bit takes on average 1.5/HZ seconds. This (like the original) is a little
+ * better than 1%
+ */
+#define LPS_PREC 8
+
+static void
+calibrate_delay (void)
+{
+ int ticks;
+ int loopbit;
+ int lps_precision = LPS_PREC;
+
+ loops_per_sec = (1 << 12);
+
+#ifndef MACH
+ printk ("Calibrating delay loop.. ");
+#endif
+ while (loops_per_sec <<= 1)
+ {
+ /* wait for "start of" clock tick */
+ ticks = jiffies;
+ while (ticks == jiffies)
+ /* nothing */ ;
+ /* Go .. */
+ ticks = jiffies;
+ __delay (loops_per_sec);
+ ticks = jiffies - ticks;
+ if (ticks)
+ break;
+ }
+
+ /* Do a binary approximation to get loops_per_second set to equal one clock
+ * (up to lps_precision bits)
+ */
+ loops_per_sec >>= 1;
+ loopbit = loops_per_sec;
+ while (lps_precision-- && (loopbit >>= 1))
+ {
+ loops_per_sec |= loopbit;
+ ticks = jiffies;
+ while (ticks == jiffies);
+ ticks = jiffies;
+ __delay (loops_per_sec);
+ if (jiffies != ticks) /* longer than 1 tick */
+ loops_per_sec &= ~loopbit;
+ }
+
+ /* finally, adjust loops per second in terms of seconds instead of clocks */
+ loops_per_sec *= HZ;
+ /* Round the value and print it */
+#ifndef MACH
+ printk ("ok - %lu.%02lu BogoMIPS\n",
+ (loops_per_sec + 2500) / 500000,
+ ((loops_per_sec + 2500) / 5000) % 100);
+#endif
+
+#if defined(__SMP__) && defined(__i386__)
+ smp_loops_per_tick = loops_per_sec / 400;
+#endif
+}
diff --git a/linux/dev/init/version.c b/linux/dev/init/version.c
new file mode 100644
index 0000000..1989483
--- /dev/null
+++ b/linux/dev/init/version.c
@@ -0,0 +1,32 @@
+/*
+ * linux/version.c
+ *
+ * Copyright (C) 1992 Theodore Ts'o
+ *
+ * May be freely distributed as part of Linux.
+ */
+
+#define MACH_INCLUDE
+#include <linux/config.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+#include <linux/compile.h>
+
+/* make the "checkconfig" script happy: we really need to include config.h */
+#ifdef CONFIG_BOGUS
+#endif
+
+#define version(a) Version_ ## a
+#define version_string(a) version(a)
+
+int version_string (LINUX_VERSION_CODE) = 0;
+
+struct new_utsname system_utsname =
+{
+ UTS_SYSNAME, UTS_NODENAME, UTS_RELEASE, UTS_VERSION,
+ UTS_MACHINE, UTS_DOMAINNAME
+};
+
+const char *linux_banner =
+"Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
diff --git a/linux/dev/kernel/dma.c b/linux/dev/kernel/dma.c
new file mode 100644
index 0000000..bbda4bb
--- /dev/null
+++ b/linux/dev/kernel/dma.c
@@ -0,0 +1,109 @@
+/* $Id: dma.c,v 1.1 1999/04/26 05:49:35 tb Exp $
+ * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
+ *
+ * Written by Hennus Bergman, 1992.
+ *
+ * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma.
+ * In the previous version the reported device could end up being wrong,
+ * if a device requested a DMA channel that was already in use.
+ * [It also happened to remove the sizeof(char *) == sizeof(int)
+ * assumption introduced because of those /proc/dma patches. -- Hennus]
+ */
+
+#define MACH_INCLUDE
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+
+
+/* A note on resource allocation:
+ *
+ * All drivers needing DMA channels, should allocate and release them
+ * through the public routines `request_dma()' and `free_dma()'.
+ *
+ * In order to avoid problems, all processes should allocate resources in
+ * the same sequence and release them in the reverse order.
+ *
+ * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA.
+ * When releasing them, first release the DMA, then release the IRQ.
+ * If you don't, you may cause allocation requests to fail unnecessarily.
+ * This doesn't really matter now, but it will once we get real semaphores
+ * in the kernel.
+ */
+
+
+
+/* Channel n is busy iff dma_chan_busy[n].lock != 0.
+ * DMA0 used to be reserved for DRAM refresh, but apparently not any more...
+ * DMA4 is reserved for cascading.
+ */
+
+struct dma_chan
+{
+ int lock;
+ const char *device_id;
+};
+
+static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] =
+{
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 },
+ { 1, "cascade" },
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 }
+};
+
+#ifndef MACH
+int
+get_dma_list (char *buf)
+{
+ int i, len = 0;
+
+ for (i = 0 ; i < MAX_DMA_CHANNELS ; i++)
+ {
+ if (dma_chan_busy[i].lock)
+ {
+ len += linux_sprintf (buf+len, "%2d: %s\n",
+ i,
+ dma_chan_busy[i].device_id);
+ }
+ }
+ return len;
+} /* get_dma_list */
+#endif
+
+int
+request_dma (unsigned int dmanr, const char *device_id)
+{
+ if (dmanr >= MAX_DMA_CHANNELS)
+ return -EINVAL;
+
+ if (xchg (&dma_chan_busy[dmanr].lock, 1) != 0)
+ return -EBUSY;
+
+ dma_chan_busy[dmanr].device_id = device_id;
+
+ /* old flag was 0, now contains 1 to indicate busy */
+ return 0;
+} /* request_dma */
+
+
+void
+free_dma (unsigned int dmanr)
+{
+ if (dmanr >= MAX_DMA_CHANNELS)
+ {
+ printk ("Trying to free DMA%d\n", dmanr);
+ return;
+ }
+
+ if (xchg (&dma_chan_busy[dmanr].lock, 0) == 0)
+ {
+ printk ("Trying to free free DMA%d\n", dmanr);
+ return;
+ }
+} /* free_dma */
diff --git a/linux/dev/kernel/printk.c b/linux/dev/kernel/printk.c
new file mode 100644
index 0000000..7c65d30
--- /dev/null
+++ b/linux/dev/kernel/printk.c
@@ -0,0 +1,83 @@
+/*
+ * Linux kernel print routine.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * linux/kernel/printk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#define MACH_INCLUDE
+#include <stdarg.h>
+#include <asm/system.h>
+#include <kern/assert.h>
+#include <kern/printf.h>
+#include <device/cons.h>
+
+static char buf[2048];
+
+#define DEFAULT_MESSAGE_LOGLEVEL 4
+#define DEFAULT_CONSOLE_LOGLEVEL 7
+
+int console_loglevel = DEFAULT_CONSOLE_LOGLEVEL;
+
+int
+printk (char *fmt, ...)
+{
+ va_list args;
+ int n;
+ unsigned long flags;
+ char *p, *msg, *buf_end;
+ static int msg_level = -1;
+
+ save_flags (flags);
+ cli ();
+ va_start (args, fmt);
+ n = vsnprintf (buf + 3, sizeof (buf) - 3, fmt, args);
+ assert (n <= sizeof (buf) - 3);
+ buf_end = buf + 3 + n;
+ va_end (args);
+ for (p = buf + 3; p < buf_end; p++)
+ {
+ msg = p;
+ if (msg_level < 0)
+ {
+ if (p[0] != '<' || p[1] < '0' || p[1] > '7' || p[2] != '>')
+ {
+ p -= 3;
+ p[0] = '<';
+ p[1] = DEFAULT_MESSAGE_LOGLEVEL + '0';
+ p[2] = '>';
+ }
+ else
+ msg += 3;
+ msg_level = p[1] - '0';
+ }
+ for (; p < buf_end; p++)
+ if (*p == '\n')
+ break;
+ if (msg_level < console_loglevel)
+ while (msg <= p)
+ cnputc (*msg++);
+ if (*p == '\n')
+ msg_level = -1;
+ }
+ restore_flags (flags);
+ return n;
+}
diff --git a/linux/dev/kernel/resource.c b/linux/dev/kernel/resource.c
new file mode 100644
index 0000000..ba107e8
--- /dev/null
+++ b/linux/dev/kernel/resource.c
@@ -0,0 +1,145 @@
+/*
+ * linux/kernel/resource.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * David Hinds
+ *
+ * Kernel io-region resource management
+ */
+
+#include <sys/types.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+#define IOTABLE_SIZE 128
+
+typedef struct resource_entry_t
+{
+ u_long from, num;
+ const char *name;
+ struct resource_entry_t *next;
+} resource_entry_t;
+
+static resource_entry_t iolist = { 0, 0, "", NULL };
+
+static resource_entry_t iotable[IOTABLE_SIZE];
+
+/*
+ * This generates the report for /proc/ioports
+ */
+#ifndef MACH
+int
+get_ioport_list (char *buf)
+{
+ resource_entry_t *p;
+ int len = 0;
+
+ for (p = iolist.next; (p) && (len < 4000); p = p->next)
+ len += linux_sprintf (buf+len, "%04lx-%04lx : %s\n",
+ p->from, p->from+p->num-1, p->name);
+ if (p)
+ len += linux_sprintf (buf+len, "4K limit reached!\n");
+ return len;
+}
+#endif
+
+/*
+ * The workhorse function: find where to put a new entry
+ */
+static resource_entry_t *
+find_gap (resource_entry_t *root, u_long from, u_long num)
+{
+ unsigned long flags;
+ resource_entry_t *p;
+
+ if (from > from+num-1)
+ return NULL;
+ save_flags (flags);
+ cli ();
+ for (p = root; ; p = p->next)
+ {
+ if ((p != root) && (p->from+p->num-1 >= from))
+ {
+ p = NULL;
+ break;
+ }
+ if ((p->next == NULL) || (p->next->from > from+num-1))
+ break;
+ }
+ restore_flags (flags);
+ return p;
+}
+
+/*
+ * Call this from the device driver to register the ioport region.
+ */
+void
+request_region (unsigned int from, unsigned int num, const char *name)
+{
+ resource_entry_t *p;
+ int i;
+
+ for (i = 0; i < IOTABLE_SIZE; i++)
+ if (iotable[i].num == 0)
+ break;
+ if (i == IOTABLE_SIZE)
+ printk ("warning: ioport table is full\n");
+ else
+ {
+ p = find_gap (&iolist, from, num);
+ if (p == NULL)
+ return;
+ iotable[i].name = name;
+ iotable[i].from = from;
+ iotable[i].num = num;
+ iotable[i].next = p->next;
+ p->next = &iotable[i];
+ return;
+ }
+}
+
+/*
+ * Call this when the device driver is unloaded
+ */
+void
+release_region (unsigned int from, unsigned int num)
+{
+ resource_entry_t *p, *q;
+
+ for (p = &iolist; ; p = q)
+ {
+ q = p->next;
+ if (q == NULL)
+ break;
+ if ((q->from == from) && (q->num == num))
+ {
+ q->num = 0;
+ p->next = q->next;
+ return;
+ }
+ }
+}
+
+/*
+ * Call this to check the ioport region before probing
+ */
+int
+check_region (unsigned int from, unsigned int num)
+{
+ return (find_gap (&iolist, from, num) == NULL) ? -EBUSY : 0;
+}
+
+/* Called from init/main.c to reserve IO ports. */
+void
+reserve_setup(char *str, int *ints)
+{
+ int i;
+
+ for (i = 1; i < ints[0]; i += 2)
+ request_region (ints[i], ints[i+1], "reserved");
+}
diff --git a/linux/dev/kernel/sched.c b/linux/dev/kernel/sched.c
new file mode 100644
index 0000000..f87482e
--- /dev/null
+++ b/linux/dev/kernel/sched.c
@@ -0,0 +1,630 @@
+/*
+ * Linux scheduling support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/kernel/sched.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <machine/spl.h>
+
+#include <mach/boolean.h>
+
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#include <kern/printf.h>
+
+#include <machine/machspl.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/dev/glue/glue.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+
+int securelevel = 0;
+
+static void timer_bh (void);
+
+DECLARE_TASK_QUEUE (tq_timer);
+DECLARE_TASK_QUEUE (tq_immediate);
+DECLARE_TASK_QUEUE (tq_scheduler);
+
+static struct wait_queue **auto_config_queue;
+
+static inline void
+handle_soft_intr (void)
+{
+ if (bh_active & bh_mask)
+ {
+ intr_count = 1;
+ linux_soft_intr ();
+ intr_count = 0;
+ }
+}
+
+static void
+tqueue_bh (void)
+{
+ run_task_queue(&tq_timer);
+}
+
+static void
+immediate_bh (void)
+{
+ run_task_queue (&tq_immediate);
+}
+
+void
+add_wait_queue (struct wait_queue **q, struct wait_queue *wait)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ save_flags (flags);
+ cli ();
+ assert_wait ((event_t) q, FALSE);
+ restore_flags (flags);
+ return;
+ }
+
+ if (auto_config_queue)
+ printf ("add_wait_queue: queue not empty\n");
+ auto_config_queue = q;
+}
+
+void
+remove_wait_queue (struct wait_queue **q, struct wait_queue *wait)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ save_flags (flags);
+ thread_wakeup ((event_t) q);
+ restore_flags (flags);
+ return;
+ }
+
+ auto_config_queue = NULL;
+}
+
+static inline int
+waking_non_zero (struct semaphore *sem)
+{
+ int ret;
+ unsigned long flags;
+
+ get_buzz_lock (&sem->lock);
+ save_flags (flags);
+ cli ();
+
+ if ((ret = (sem->waking > 0)))
+ sem->waking--;
+
+ restore_flags (flags);
+ give_buzz_lock (&sem->lock);
+ return ret;
+}
+
+void
+__up (struct semaphore *sem)
+{
+ atomic_inc (&sem->waking);
+ wake_up (&sem->wait);
+}
+
+int
+__do_down (struct semaphore *sem, int task_state)
+{
+ unsigned long flags;
+ int ret = 0;
+ int s;
+
+ if (!linux_auto_config)
+ {
+ save_flags (flags);
+ s = splhigh ();
+ for (;;)
+ {
+ if (waking_non_zero (sem))
+ break;
+
+ if (task_state == TASK_INTERRUPTIBLE && issig ())
+ {
+ ret = -EINTR;
+ atomic_inc (&sem->count);
+ break;
+ }
+
+ assert_wait ((event_t) &sem->wait,
+ task_state == TASK_INTERRUPTIBLE ? TRUE : FALSE);
+ splx (s);
+ schedule ();
+ s = splhigh ();
+ }
+ splx (s);
+ restore_flags (flags);
+ return ret;
+ }
+
+ while (!waking_non_zero (sem))
+ {
+ if (task_state == TASK_INTERRUPTIBLE && issig ())
+ {
+ ret = -EINTR;
+ atomic_inc (&sem->count);
+ break;
+ }
+ schedule ();
+ }
+
+ return ret;
+}
+
+void
+__down (struct semaphore *sem)
+{
+ __do_down(sem, TASK_UNINTERRUPTIBLE);
+}
+
+int
+__down_interruptible (struct semaphore *sem)
+{
+ return __do_down (sem, TASK_INTERRUPTIBLE);
+}
+
+void
+__sleep_on (struct wait_queue **q, int state)
+{
+ unsigned long flags;
+
+ if (!q)
+ return;
+ save_flags (flags);
+ if (!linux_auto_config)
+ {
+ assert_wait ((event_t) q, state == TASK_INTERRUPTIBLE ? TRUE : FALSE);
+ sti ();
+ schedule ();
+ restore_flags (flags);
+ return;
+ }
+
+ add_wait_queue (q, NULL);
+ sti ();
+ while (auto_config_queue)
+ schedule ();
+ restore_flags (flags);
+}
+
+void
+sleep_on (struct wait_queue **q)
+{
+ __sleep_on (q, TASK_UNINTERRUPTIBLE);
+}
+
+void
+interruptible_sleep_on (struct wait_queue **q)
+{
+ __sleep_on (q, TASK_INTERRUPTIBLE);
+}
+
+void
+wake_up (struct wait_queue **q)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ if (q != &wait_for_request) /* ??? by OKUJI Yoshinori. */
+ {
+ save_flags (flags);
+ thread_wakeup ((event_t) q);
+ restore_flags (flags);
+ }
+ return;
+ }
+
+ if (auto_config_queue == q)
+ auto_config_queue = NULL;
+}
+
+void
+__wait_on_buffer (struct buffer_head *bh)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ if (! linux_auto_config)
+ {
+ while (1)
+ {
+ cli ();
+ run_task_queue (&tq_disk);
+ if (! buffer_locked (bh))
+ break;
+ bh->b_wait = (struct wait_queue *) 1;
+ assert_wait ((event_t) bh, FALSE);
+ sti ();
+ schedule ();
+ }
+ restore_flags (flags);
+ return;
+ }
+
+ sti ();
+ while (buffer_locked (bh))
+ {
+ run_task_queue (&tq_disk);
+ schedule ();
+ }
+ restore_flags (flags);
+}
+
+void
+unlock_buffer (struct buffer_head *bh)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ cli ();
+ clear_bit (BH_Lock, &bh->b_state);
+ if (bh->b_wait && ! linux_auto_config)
+ {
+ bh->b_wait = NULL;
+ thread_wakeup ((event_t) bh);
+ }
+ restore_flags (flags);
+}
+
+void
+schedule (void)
+{
+ if (intr_count)
+ printk ("Aiee: scheduling in interrupt %p\n",
+ __builtin_return_address (0));
+
+ handle_soft_intr ();
+ run_task_queue (&tq_scheduler);
+
+ if (!linux_auto_config)
+ thread_block (0);
+}
+
+void
+linux_sched_init (void)
+{
+ /*
+ * Install software interrupt handlers.
+ */
+ init_bh (TIMER_BH, timer_bh);
+ init_bh (TQUEUE_BH, tqueue_bh);
+ init_bh (IMMEDIATE_BH, immediate_bh);
+}
+
+/*
+ * Linux timers.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+unsigned long volatile jiffies = 0;
+
+/*
+ * Mask of active timers.
+ */
+unsigned long timer_active = 0;
+
+/*
+ * List of timeout routines.
+ */
+struct timer_struct timer_table[32];
+
+#define TVN_BITS 6
+#define TVR_BITS 8
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+
+#define SLOW_BUT_DEBUGGING_TIMERS 0
+
+struct timer_vec
+ {
+ int index;
+ struct timer_list *vec[TVN_SIZE];
+ };
+
+struct timer_vec_root
+ {
+ int index;
+ struct timer_list *vec[TVR_SIZE];
+ };
+
+static struct timer_vec tv5 =
+{0};
+static struct timer_vec tv4 =
+{0};
+static struct timer_vec tv3 =
+{0};
+static struct timer_vec tv2 =
+{0};
+static struct timer_vec_root tv1 =
+{0};
+
+static struct timer_vec *const tvecs[] =
+{
+ (struct timer_vec *) &tv1, &tv2, &tv3, &tv4, &tv5
+};
+
+#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
+
+static unsigned long timer_jiffies = 0;
+
+static inline void
+insert_timer (struct timer_list *timer, struct timer_list **vec, int idx)
+{
+ if ((timer->next = vec[idx]))
+ vec[idx]->prev = timer;
+ vec[idx] = timer;
+ timer->prev = (struct timer_list *) &vec[idx];
+}
+
+static inline void
+internal_add_timer (struct timer_list *timer)
+{
+ /*
+ * must be cli-ed when calling this
+ */
+ unsigned long expires = timer->expires;
+ unsigned long idx = expires - timer_jiffies;
+
+ if (idx < TVR_SIZE)
+ {
+ int i = expires & TVR_MASK;
+ insert_timer (timer, tv1.vec, i);
+ }
+ else if (idx < 1 << (TVR_BITS + TVN_BITS))
+ {
+ int i = (expires >> TVR_BITS) & TVN_MASK;
+ insert_timer (timer, tv2.vec, i);
+ }
+ else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS))
+ {
+ int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
+ insert_timer (timer, tv3.vec, i);
+ }
+ else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS))
+ {
+ int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
+ insert_timer (timer, tv4.vec, i);
+ }
+ else if (expires < timer_jiffies)
+ {
+ /* can happen if you add a timer with expires == jiffies,
+ * or you set a timer to go off in the past
+ */
+ insert_timer (timer, tv1.vec, tv1.index);
+ }
+ else if (idx < 0xffffffffUL)
+ {
+ int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+ insert_timer (timer, tv5.vec, i);
+ }
+ else
+ {
+ /* Can only get here on architectures with 64-bit jiffies */
+ timer->next = timer->prev = timer;
+ }
+}
+
+void
+add_timer (struct timer_list *timer)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ cli ();
+#if SLOW_BUT_DEBUGGING_TIMERS
+ if (timer->next || timer->prev)
+ {
+ printk ("add_timer() called with non-zero list from %p\n",
+ __builtin_return_address (0));
+ goto out;
+ }
+#endif
+ internal_add_timer (timer);
+#if SLOW_BUT_DEBUGGING_TIMERS
+out:
+#endif
+ restore_flags (flags);
+}
+
+static inline int
+detach_timer (struct timer_list *timer)
+{
+ int ret = 0;
+ struct timer_list *next, *prev;
+
+ next = timer->next;
+ prev = timer->prev;
+ if (next)
+ {
+ next->prev = prev;
+ }
+ if (prev)
+ {
+ ret = 1;
+ prev->next = next;
+ }
+ return ret;
+}
+
+int
+del_timer (struct timer_list *timer)
+{
+ int ret;
+ unsigned long flags;
+
+ save_flags (flags);
+ cli ();
+ ret = detach_timer (timer);
+ timer->next = timer->prev = 0;
+ restore_flags (flags);
+ return ret;
+}
+
+static inline void
+run_old_timers (void)
+{
+ struct timer_struct *tp;
+ unsigned long mask;
+
+ for (mask = 1, tp = timer_table + 0; mask; tp++, mask += mask)
+ {
+ if (mask > timer_active)
+ break;
+ if (!(mask & timer_active))
+ continue;
+ if (tp->expires > jiffies)
+ continue;
+ timer_active &= ~mask;
+ tp->fn ();
+ sti ();
+ }
+}
+
+static inline void
+cascade_timers (struct timer_vec *tv)
+{
+ /* cascade all the timers from tv up one level */
+ struct timer_list *timer;
+
+ timer = tv->vec[tv->index];
+ /*
+ * We are removing _all_ timers from the list, so we don't have to
+ * detach them individually, just clear the list afterwards.
+ */
+ while (timer)
+ {
+ struct timer_list *tmp = timer;
+ timer = timer->next;
+ internal_add_timer (tmp);
+ }
+ tv->vec[tv->index] = NULL;
+ tv->index = (tv->index + 1) & TVN_MASK;
+}
+
+static inline void
+run_timer_list (void)
+{
+ cli ();
+ while ((long) (jiffies - timer_jiffies) >= 0)
+ {
+ struct timer_list *timer;
+
+ if (!tv1.index)
+ {
+ int n = 1;
+
+ do
+ {
+ cascade_timers (tvecs[n]);
+ }
+ while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
+ }
+ while ((timer = tv1.vec[tv1.index]))
+ {
+ void (*fn) (unsigned long) = timer->function;
+ unsigned long data = timer->data;
+
+ detach_timer (timer);
+ timer->next = timer->prev = NULL;
+ sti ();
+ fn (data);
+ cli ();
+ }
+ ++timer_jiffies;
+ tv1.index = (tv1.index + 1) & TVR_MASK;
+ }
+ sti ();
+}
+
+/*
+ * Timer software interrupt handler.
+ */
+static void
+timer_bh (void)
+{
+ run_old_timers ();
+ run_timer_list ();
+}
+
+#if 0
+int linux_timer_print = 0;
+#endif
+
+/*
+ * Timer interrupt handler.
+ */
+void
+linux_timer_intr (void)
+{
+ if (cpu_number() != master_cpu)
+ return;
+
+ (*(unsigned long *) &jiffies)++;
+ mark_bh (TIMER_BH);
+ if (tq_timer)
+ mark_bh (TQUEUE_BH);
+#if 0
+ if (linux_timer_print)
+ printf ("linux_timer_intr: hello\n");
+#endif
+}
diff --git a/linux/dev/kernel/softirq.c b/linux/dev/kernel/softirq.c
new file mode 100644
index 0000000..ac95a7d
--- /dev/null
+++ b/linux/dev/kernel/softirq.c
@@ -0,0 +1,48 @@
+/*
+ * linux/kernel/softirq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ *
+ * do_bottom_half() runs at normal kernel priority: all interrupts
+ * enabled. do_bottom_half() is atomic with respect to itself: a
+ * bottom_half handler need not be re-entrant.
+ */
+
+#define MACH_INCLUDE
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+#include <asm/system.h>
+
+#include <linux/dev/glue/glue.h>
+
+int bh_mask_count[32];
+unsigned int bh_active = 0;
+unsigned int bh_mask = 0;
+void (*bh_base[32]) (void);
+
+void
+linux_soft_intr (void)
+{
+ unsigned int active;
+ unsigned int mask, left;
+ void (**bh) (void);
+
+ sti ();
+ bh = bh_base;
+ active = bh_active & bh_mask;
+ for (mask = 1, left = ~0; left & active; bh++, mask += mask, left += left)
+ {
+ if (mask & active)
+ {
+ void (*fn) (void);
+ bh_active &= ~mask;
+ fn = *bh;
+ if (!fn)
+ goto bad_bh;
+ fn ();
+ }
+ }
+ return;
+bad_bh:
+ printk ("linux_soft_intr:bad interrupt handler entry %08x\n", mask);
+}
diff --git a/linux/dev/lib/vsprintf.c b/linux/dev/lib/vsprintf.c
new file mode 100644
index 0000000..541ec65
--- /dev/null
+++ b/linux/dev/lib/vsprintf.c
@@ -0,0 +1,354 @@
+/*
+ * linux/lib/vsprintf.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
+/*
+ * Wirzenius wrote this portably, Torvalds fucked it up :-)
+ */
+
+#include <sys/types.h>
+
+#define MACH_INCLUDE
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+unsigned long
+simple_strtoul (const char *cp, char **endp, unsigned int base)
+{
+ unsigned long result = 0, value;
+
+ if (!base)
+ {
+ base = 10;
+ if (*cp == '0')
+ {
+ base = 8;
+ cp++;
+ if ((*cp == 'x') && isxdigit (cp[1]))
+ {
+ cp++;
+ base = 16;
+ }
+ }
+ }
+ while (isxdigit (*cp)
+ && (value = isdigit (*cp) ? *cp - '0'
+ : (islower (*cp) ? toupper (*cp) : *cp) - 'A' + 10) < base)
+ {
+ result = result * base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *) cp;
+ return result;
+}
+
+/* we use this so that we can do without the ctype library */
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+
+static int
+skip_atoi (const char **s)
+{
+ int i = 0;
+
+ while (is_digit (**s))
+ i = i * 10 + *((*s)++) - '0';
+ return i;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SPECIAL 32 /* 0x */
+#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+#define do_div(n,base) ({ \
+int __res; \
+__res = ((unsigned long) n) % (unsigned) base; \
+n = ((unsigned long) n) / (unsigned) base; \
+__res; })
+
+static char *
+number (char *str, long num, int base, int size, int precision, int type)
+{
+ char c, sign, tmp[66];
+ const char *digits = "0123456789abcdefghijklmnopqrstuvwxyz";
+ int i;
+
+ if (type & LARGE)
+ digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ if (type & LEFT)
+ type &= ~ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN)
+ {
+ if (num < 0)
+ {
+ sign = '-';
+ num = -num;
+ size--;
+ }
+ else if (type & PLUS)
+ {
+ sign = '+';
+ size--;
+ }
+ else if (type & SPACE)
+ {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & SPECIAL)
+ {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++] = '0';
+ else
+ while (num != 0)
+ tmp[i++] = digits[do_div (num, base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type & (ZEROPAD + LEFT)))
+ while (size-- > 0)
+ *str++ = ' ';
+ if (sign)
+ *str++ = sign;
+ if (type & SPECIAL)
+ {
+ if (base == 8)
+ {
+ *str++ = '0';
+ }
+ else if (base == 16)
+ {
+ *str++ = '0';
+ *str++ = digits[33];
+ }
+ }
+ if (!(type & LEFT))
+ while (size-- > 0)
+ *str++ = c;
+ while (i < precision--)
+ *str++ = '0';
+ while (i-- > 0)
+ *str++ = tmp[i];
+ while (size-- > 0)
+ *str++ = ' ';
+ return str;
+}
+
+int
+linux_vsprintf (char *buf, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long num;
+ int i, base;
+ char *str;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ * number of chars for from string
+ */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+
+ for (str = buf; *fmt; ++fmt)
+ {
+ if (*fmt != '%')
+ {
+ *str++ = *fmt;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt)
+ {
+ case '-':
+ flags |= LEFT;
+ goto repeat;
+ case '+':
+ flags |= PLUS;
+ goto repeat;
+ case ' ':
+ flags |= SPACE;
+ goto repeat;
+ case '#':
+ flags |= SPECIAL;
+ goto repeat;
+ case '0':
+ flags |= ZEROPAD;
+ goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (is_digit (*fmt))
+ field_width = skip_atoi (&fmt);
+ else if (*fmt == '*')
+ {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg (args, int);
+ if (field_width < 0)
+ {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.')
+ {
+ ++fmt;
+ if (is_digit (*fmt))
+ precision = skip_atoi (&fmt);
+ else if (*fmt == '*')
+ {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg (args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L')
+ {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt)
+ {
+ case 'c':
+ if (!(flags & LEFT))
+ while (--field_width > 0)
+ *str++ = ' ';
+ *str++ = (unsigned char) va_arg (args, int);
+ while (--field_width > 0)
+ *str++ = ' ';
+ continue;
+
+ case 's':
+ s = va_arg (args, char *);
+ if (!s)
+ s = "<NULL>";
+
+ len = strnlen (s, precision);
+
+ if (!(flags & LEFT))
+ while (len < field_width--)
+ *str++ = ' ';
+ for (i = 0; i < len; ++i)
+ *str++ = *s++;
+ while (len < field_width--)
+ *str++ = ' ';
+ continue;
+
+ case 'p':
+ if (field_width == -1)
+ {
+ field_width = 2 * sizeof (void *);
+ flags |= ZEROPAD;
+ }
+ str = number (str,
+ (unsigned long) va_arg (args, void *), 16,
+ field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ if (qualifier == 'l')
+ {
+ long *ip = va_arg (args, long *);
+ *ip = (str - buf);
+ }
+ else
+ {
+ int *ip = va_arg (args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (*fmt != '%')
+ *str++ = '%';
+ if (*fmt)
+ *str++ = *fmt;
+ else
+ --fmt;
+ continue;
+ }
+ if (qualifier == 'l')
+ num = va_arg (args, unsigned long);
+ else if (qualifier == 'h')
+ if (flags & SIGN)
+ num = (short) va_arg (args, int);
+ else
+ num = (unsigned short) va_arg (args, unsigned int);
+ else if (flags & SIGN)
+ num = va_arg (args, int);
+ else
+ num = va_arg (args, unsigned int);
+ str = number (str, num, base, field_width, precision, flags);
+ }
+ *str = '\0';
+ return str - buf;
+}
+
+int
+linux_sprintf (char *buf, const char *fmt,...)
+{
+ va_list args;
+ int i;
+
+ va_start (args, fmt);
+ i = linux_vsprintf (buf, fmt, args);
+ va_end (args);
+ return i;
+}
diff --git a/linux/dev/net/core/dev.c b/linux/dev/net/core/dev.c
new file mode 100644
index 0000000..cbdf8cc
--- /dev/null
+++ b/linux/dev/net/core/dev.c
@@ -0,0 +1,1648 @@
+/*
+ * NET3 Protocol independent device support routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from the non IP parts of dev.c 1.0.19
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ * Additional Authors:
+ * Florian la Roche <rzsfl@rz.uni-sb.de>
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ * David Hinds <dhinds@allegro.stanford.edu>
+ *
+ * Changes:
+ * Alan Cox : device private ioctl copies fields back.
+ * Alan Cox : Transmit queue code does relevant stunts to
+ * keep the queue safe.
+ * Alan Cox : Fixed double lock.
+ * Alan Cox : Fixed promisc NULL pointer trap
+ * ???????? : Support the full private ioctl range
+ * Alan Cox : Moved ioctl permission check into drivers
+ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
+ * Alan Cox : 100 backlog just doesn't cut it when
+ * you start doing multicast video 8)
+ * Alan Cox : Rewrote net_bh and list manager.
+ * Alan Cox : Fix ETH_P_ALL echoback lengths.
+ * Alan Cox : Took out transmit every packet pass
+ * Saved a few bytes in the ioctl handler
+ * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
+ * a function call a packet.
+ * Alan Cox : Hashed net_bh()
+ * Richard Kooijman: Timestamp fixes.
+ * Alan Cox : Wrong field in SIOCGIFDSTADDR
+ * Alan Cox : Device lock protection.
+ * Alan Cox : Fixed nasty side effect of device close changes.
+ * Rudi Cilibrasi : Pass the right thing to set_mac_address()
+ * Dave Miller : 32bit quantity for the device lock to make it work out
+ * on a Sparc.
+ * Bjorn Ekwall : Added KERNELD hack.
+ * Alan Cox : Cleaned up the backlog initialise.
+ * Craig Metz : SIOCGIFCONF fix if space for under
+ * 1 device.
+ * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
+ * is no device open function.
+ * Lawrence V. Stefani : Changed set MTU ioctl to not assume
+ * min MTU of 68 bytes for devices
+ * that have change MTU functions.
+ *
+ */
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/slhc.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <net/br.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+#ifdef CONFIG_KERNELD
+#include <linux/kerneld.h>
+#endif
+#ifdef CONFIG_NET_RADIO
+#include <linux/wireless.h>
+#endif /* CONFIG_NET_RADIO */
+
+#ifndef MACH
+/*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+ */
+
+struct packet_type *ptype_base[16];
+struct packet_type *ptype_all = NULL; /* Taps */
+
+/*
+ * Device list lock
+ */
+
+int dev_lockct=0;
+
+/*
+ * Our notifier list
+ */
+
+struct notifier_block *netdev_chain=NULL;
+
+/*
+ * Device drivers call our routines to queue packets here. We empty the
+ * queue in the bottom half handler.
+ */
+
+static struct sk_buff_head backlog;
+
+/*
+ * We don't overdo the queue or we will thrash memory badly.
+ */
+
+static int backlog_size = 0;
+
+/*
+ * Return the lesser of the two values.
+ */
+
+static __inline__ unsigned long min(unsigned long a, unsigned long b)
+{
+ return (a < b)? a : b;
+}
+
+
+/******************************************************************************************
+
+ Protocol management and registration routines
+
+*******************************************************************************************/
+
+/*
+ * For efficiency
+ */
+
+static int dev_nit=0;
+
+/*
+ * Add a protocol ID to the list. Now that the input handler is
+ * smarter we can dispense with all the messy stuff that used to be
+ * here.
+ */
+
+void dev_add_pack(struct packet_type *pt)
+{
+ int hash;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit++;
+ pt->next=ptype_all;
+ ptype_all=pt;
+ }
+ else
+ {
+ hash=ntohs(pt->type)&15;
+ pt->next = ptype_base[hash];
+ ptype_base[hash] = pt;
+ }
+}
+
+
+/*
+ * Remove a protocol ID from the list.
+ */
+
+void dev_remove_pack(struct packet_type *pt)
+{
+ struct packet_type **pt1;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit--;
+ pt1=&ptype_all;
+ }
+ else
+ pt1=&ptype_base[ntohs(pt->type)&15];
+ for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
+ {
+ if(pt==(*pt1))
+ {
+ *pt1=pt->next;
+ return;
+ }
+ }
+ printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
+}
+
+/*****************************************************************************************
+
+ Device Interface Subroutines
+
+******************************************************************************************/
+
+/*
+ * Find an interface by name.
+ */
+
+struct device *dev_get(const char *name)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (strcmp(dev->name, name) == 0)
+ return(dev);
+ }
+ return NULL;
+}
+
+/*
+ * Find and possibly load an interface.
+ */
+
+#ifdef CONFIG_KERNELD
+
+extern __inline__ void dev_load(const char *name)
+{
+ if(!dev_get(name) && suser()) {
+#ifdef CONFIG_NET_ALIAS
+ const char *sptr;
+
+ for (sptr=name ; *sptr ; sptr++) if(*sptr==':') break;
+ if (!(*sptr && *(sptr+1)))
+#endif
+ request_module(name);
+ }
+}
+
+#endif
+
+/*
+ * Prepare an interface for use.
+ */
+
+int dev_open(struct device *dev)
+{
+ int ret = -ENODEV;
+
+ /*
+ * Call device private open method
+ */
+ if (dev->open)
+ ret = dev->open(dev);
+
+ /*
+ * If it went open OK then set the flags
+ */
+
+ if (ret == 0)
+ {
+ dev->flags |= (IFF_UP | IFF_RUNNING);
+ /*
+ * Initialise multicasting status
+ */
+ dev_mc_upload(dev);
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ }
+ return(ret);
+}
+
+
+/*
+ * Completely shutdown an interface.
+ */
+
+int dev_close(struct device *dev)
+{
+ int ct=0;
+
+ /*
+ * Call the device specific close. This cannot fail.
+ * Only if device is UP
+ */
+
+ if ((dev->flags & IFF_UP) && dev->stop)
+ dev->stop(dev);
+
+ /*
+ * Device is now down.
+ */
+
+ dev->flags&=~(IFF_UP|IFF_RUNNING);
+
+ /*
+ * Tell people we are going down
+ */
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+ /*
+ * Flush the multicast chain
+ */
+ dev_mc_discard(dev);
+
+ /*
+ * Purge any queued packets when we down the link
+ */
+ while(ct<DEV_NUMBUFFS)
+ {
+ struct sk_buff *skb;
+ while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
+ if(skb->free)
+ kfree_skb(skb,FREE_WRITE);
+ ct++;
+ }
+ return(0);
+}
+
+
+/*
+ * Device change register/unregister. These are not inline or static
+ * as we export them to the world.
+ */
+
+int register_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_register(&netdev_chain, nb);
+}
+
+int unregister_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&netdev_chain,nb);
+}
+
+/*
+ * Send (or queue for sending) a packet.
+ *
+ * IMPORTANT: When this is called to resend frames. The caller MUST
+ * already have locked the sk_buff. Apart from that we do the
+ * rest of the magic.
+ */
+
+static void do_dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ unsigned long flags;
+ struct sk_buff_head *list;
+ int retransmission = 0; /* used to say if the packet should go */
+ /* at the front or the back of the */
+ /* queue - front is a retransmit try */
+
+ if(pri>=0 && !skb_device_locked(skb))
+ skb_device_lock(skb); /* Shove a lock on the frame */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb->dev = dev;
+
+ /*
+ * Negative priority is used to flag a frame that is being pulled from the
+ * queue front as a retransmit attempt. It therefore goes back on the queue
+ * start on a failure.
+ */
+
+ if (pri < 0)
+ {
+ pri = -pri-1;
+ retransmission = 1;
+ }
+
+#ifdef CONFIG_NET_DEBUG
+ if (pri >= DEV_NUMBUFFS)
+ {
+ printk(KERN_WARNING "bad priority in dev_queue_xmit.\n");
+ pri = 1;
+ }
+#endif
+
+ /*
+ * If the address has not been resolved. Call the device header rebuilder.
+ * This can cover all protocols and technically not just ARP either.
+ */
+
+ if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
+ return;
+ }
+
+ /*
+ *
+ * If dev is an alias, switch to its main device.
+ * "arp" resolution has been made with alias device, so
+ * arp entries refer to alias, not main.
+ *
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ skb->dev = dev = net_alias_dev_tx(dev);
+#endif
+
+ /*
+ * If we are bridging and this is directly generated output
+ * pass the frame via the bridge.
+ */
+
+#ifdef CONFIG_BRIDGE
+ if(skb->pkt_bridged!=IS_BRIDGED && br_stats.flags & BR_UP)
+ {
+ if(br_tx_frame(skb))
+ return;
+ }
+#endif
+
+ list = dev->buffs + pri;
+
+ save_flags(flags);
+ /* if this isn't a retransmission, use the first packet instead... */
+ if (!retransmission) {
+ if (skb_queue_len(list)) {
+ /* avoid overrunning the device queue.. */
+ if (skb_queue_len(list) > dev->tx_queue_len) {
+ dev_kfree_skb(skb, FREE_WRITE);
+ return;
+ }
+ }
+
+ /* copy outgoing packets to any sniffer packet handlers */
+ if (dev_nit) {
+ struct packet_type *ptype;
+ skb->stamp=xtime;
+ for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
+ {
+ /* Never send packets back to the socket
+ * they originated from - MvS (miquels@drinkel.ow.org)
+ */
+ if ((ptype->dev == dev || !ptype->dev) &&
+ ((struct sock *)ptype->data != skb->sk))
+ {
+ struct sk_buff *skb2;
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+ break;
+ /* FIXME?: Wrong when the hard_header_len
+ * is an upper bound. Is this even
+ * used anywhere?
+ */
+ skb2->h.raw = skb2->data + dev->hard_header_len;
+ /* On soft header devices we
+ * yank the header before mac.raw
+ * back off. This is set by
+ * dev->hard_header().
+ */
+ if (dev->flags&IFF_SOFTHEADERS)
+ skb_pull(skb2,skb2->mac.raw-skb2->data);
+ skb2->mac.raw = skb2->data;
+ ptype->func(skb2, skb->dev, ptype);
+ }
+ }
+ }
+
+ if (skb_queue_len(list)) {
+ cli();
+ skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
+ __skb_queue_tail(list, skb);
+ skb = __skb_dequeue(list);
+ skb_device_lock(skb); /* New buffer needs locking down */
+ restore_flags(flags);
+ }
+ }
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ /*
+ * Packet is now solely the responsibility of the driver
+ */
+ return;
+ }
+
+ /*
+ * Transmission failed, put skb back into a list. Once on the list it's safe and
+ * no longer device locked (it can be freed safely from the device queue)
+ */
+ cli();
+ skb_device_unlock(skb);
+ __skb_queue_head(list,skb);
+ restore_flags(flags);
+}
+
+void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ start_bh_atomic();
+ do_dev_queue_xmit(skb, dev, pri);
+ end_bh_atomic();
+}
+
+/*
+ * Receive a packet from a device driver and queue it for the upper
+ * (protocol) levels. It always succeeds. This is the recommended
+ * interface to use.
+ */
+
+void netif_rx(struct sk_buff *skb)
+{
+ static int dropping = 0;
+
+ /*
+ * Any received buffers are un-owned and should be discarded
+ * when freed. These will be updated later as the frames get
+ * owners.
+ */
+
+ skb->sk = NULL;
+ skb->free = 1;
+ if(skb->stamp.tv_sec==0)
+ skb->stamp = xtime;
+
+ /*
+ * Check that we aren't overdoing things.
+ */
+
+ if (!backlog_size)
+ dropping = 0;
+ else if (backlog_size > 300)
+ dropping = 1;
+
+ if (dropping)
+ {
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Add it to the "backlog" queue.
+ */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb_queue_tail(&backlog,skb);
+ backlog_size++;
+
+ /*
+ * If any packet arrived, mark it for processing after the
+ * hardware interrupt returns.
+ */
+
+ mark_bh(NET_BH);
+ return;
+}
+
+/*
+ * This routine causes all interfaces to try to send some data.
+ */
+
+static void dev_transmit(void)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (dev->flags != 0 && !dev->tbusy) {
+ /*
+ * Kick the device
+ */
+ dev_tint(dev);
+ }
+ }
+}
+
+
+/**********************************************************************************
+
+ Receive Queue Processor
+
+***********************************************************************************/
+
+/*
+ * When we are called the queue is ready to grab, the interrupts are
+ * on and hardware can interrupt and queue to the receive queue as we
+ * run with no problems.
+ * This is run as a bottom half after an interrupt handler that does
+ * mark_bh(NET_BH);
+ */
+
+void net_bh(void)
+{
+ struct packet_type *ptype;
+ struct packet_type *pt_prev;
+ unsigned short type;
+
+ /*
+ * Can we send anything now? We want to clear the
+ * decks for any more sends that get done as we
+ * process the input. This also minimises the
+ * latency on a transmit interrupt bh.
+ */
+
+ dev_transmit();
+
+ /*
+ * Any data left to process. This may occur because a
+ * mark_bh() is done after we empty the queue including
+ * that from the device which does a mark_bh() just after
+ */
+
+ /*
+ * While the queue is not empty..
+ *
+ * Note that the queue never shrinks due to
+ * an interrupt, so we can do this test without
+ * disabling interrupts.
+ */
+
+ while (!skb_queue_empty(&backlog)) {
+ struct sk_buff * skb = backlog.next;
+
+ /*
+ * We have a packet. Therefore the queue has shrunk
+ */
+ cli();
+ __skb_unlink(skb, &backlog);
+ backlog_size--;
+ sti();
+
+
+#ifdef CONFIG_BRIDGE
+
+ /*
+ * If we are bridging then pass the frame up to the
+ * bridging code. If it is bridged then move on
+ */
+
+ if (br_stats.flags & BR_UP)
+ {
+ /*
+ * We pass the bridge a complete frame. This means
+ * recovering the MAC header first.
+ */
+
+ int offset=skb->data-skb->mac.raw;
+ cli();
+ skb_push(skb,offset); /* Put header back on for bridge */
+ if(br_receive_frame(skb))
+ {
+ sti();
+ continue;
+ }
+ /*
+ * Pull the MAC header off for the copy going to
+ * the upper layers.
+ */
+ skb_pull(skb,offset);
+ sti();
+ }
+#endif
+
+ /*
+ * Bump the pointer to the next structure.
+ *
+ * On entry to the protocol layer. skb->data and
+ * skb->h.raw point to the MAC and encapsulated data
+ */
+
+ skb->h.raw = skb->data;
+
+ /*
+ * Fetch the packet protocol ID.
+ */
+
+ type = skb->protocol;
+
+ /*
+ * We got a packet ID. Now loop over the "known protocols"
+ * list. There are two lists. The ptype_all list of taps (normally empty)
+ * and the main protocol list which is hashed perfectly for normal protocols.
+ */
+
+ pt_prev = NULL;
+ for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
+ {
+ if(!ptype->dev || ptype->dev == skb->dev) {
+ if(pt_prev) {
+ struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
+ if(skb2)
+ pt_prev->func(skb2,skb->dev, pt_prev);
+ }
+ pt_prev=ptype;
+ }
+ }
+
+ for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
+ {
+ if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
+ {
+ /*
+ * We already have a match queued. Deliver
+ * to it and then remember the new match
+ */
+ if(pt_prev)
+ {
+ struct sk_buff *skb2;
+
+ skb2=skb_clone(skb, GFP_ATOMIC);
+
+ /*
+ * Kick the protocol handler. This should be fast
+ * and efficient code.
+ */
+
+ if(skb2)
+ pt_prev->func(skb2, skb->dev, pt_prev);
+ }
+ /* Remember the current last to do */
+ pt_prev=ptype;
+ }
+ } /* End of protocol list loop */
+
+ /*
+ * Is there a last item to send to ?
+ */
+
+ if(pt_prev)
+ pt_prev->func(skb, skb->dev, pt_prev);
+ /*
+ * Has an unknown packet has been received ?
+ */
+
+ else
+ kfree_skb(skb, FREE_WRITE);
+ /*
+ * Again, see if we can transmit anything now.
+ * [Ought to take this out judging by tests it slows
+ * us down not speeds us up]
+ */
+#ifdef XMIT_EVERY
+ dev_transmit();
+#endif
+ } /* End of queue loop */
+
+ /*
+ * We have emptied the queue
+ */
+
+ /*
+ * One last output flush.
+ */
+
+#ifdef XMIT_AFTER
+ dev_transmit();
+#endif
+}
+
+
+/*
+ * This routine is called when an device driver (i.e. an
+ * interface) is ready to transmit a packet.
+ */
+
+void dev_tint(struct device *dev)
+{
+ int i;
+ unsigned long flags;
+ struct sk_buff_head * head;
+
+ /*
+ * aliases do not transmit (for now :) )
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev)) return;
+#endif
+ head = dev->buffs;
+ save_flags(flags);
+ cli();
+
+ /*
+ * Work the queues in priority order
+ */
+ for(i = 0;i < DEV_NUMBUFFS; i++,head++)
+ {
+
+ while (!skb_queue_empty(head)) {
+ struct sk_buff *skb;
+
+ skb = head->next;
+ __skb_unlink(skb, head);
+ /*
+ * Stop anyone freeing the buffer while we retransmit it
+ */
+ skb_device_lock(skb);
+ restore_flags(flags);
+ /*
+ * Feed them to the output stage and if it fails
+ * indicate they re-queue at the front.
+ */
+ do_dev_queue_xmit(skb,dev,-i - 1);
+ /*
+ * If we can take no more then stop here.
+ */
+ if (dev->tbusy)
+ return;
+ cli();
+ }
+ }
+ restore_flags(flags);
+}
+
+
+/*
+ * Perform a SIOCGIFCONF call. This structure will change
+ * size shortly, and there is nothing I can do about it.
+ * Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(char *arg)
+{
+ struct ifconf ifc;
+ struct ifreq ifr;
+ struct device *dev;
+ char *pos;
+ int len;
+ int err;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
+ if(err)
+ return err;
+ memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
+ len = ifc.ifc_len;
+ pos = ifc.ifc_buf;
+
+ /*
+ * We now walk the device list filling each active device
+ * into the array.
+ */
+
+ err=verify_area(VERIFY_WRITE,pos,len);
+ if(err)
+ return err;
+
+ /*
+ * Loop over the interfaces, and write an info block for each.
+ */
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
+ continue;
+ /*
+ * Have we run out of space here ?
+ */
+
+ if (len < sizeof(struct ifreq))
+ break;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+ strcpy(ifr.ifr_name, dev->name);
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+
+
+ /*
+ * Write this block to the caller's space.
+ */
+
+ memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
+ pos += sizeof(struct ifreq);
+ len -= sizeof(struct ifreq);
+ }
+
+ /*
+ * All done. Write the updated control block back to the caller.
+ */
+
+ ifc.ifc_len = (pos - ifc.ifc_buf);
+ ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
+ memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
+
+ /*
+ * Report how much was filled in
+ */
+
+ return(pos - arg);
+}
+
+
+/*
+ * This is invoked by the /proc filesystem handler to display a device
+ * in detail.
+ */
+
+#ifdef CONFIG_PROC_FS
+static int sprintf_stats(char *buffer, struct device *dev)
+{
+ struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
+ int size;
+
+ if (stats)
+ size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
+ dev->name,
+ stats->rx_packets, stats->rx_errors,
+ stats->rx_dropped + stats->rx_missed_errors,
+ stats->rx_fifo_errors,
+ stats->rx_length_errors + stats->rx_over_errors
+ + stats->rx_crc_errors + stats->rx_frame_errors,
+ stats->tx_packets, stats->tx_errors, stats->tx_dropped,
+ stats->tx_fifo_errors, stats->collisions,
+ stats->tx_carrier_errors + stats->tx_aborted_errors
+ + stats->tx_window_errors + stats->tx_heartbeat_errors);
+ else
+ size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
+
+ return size;
+}
+
+/*
+ * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
+ * to create /proc/net/dev
+ */
+
+int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
+{
+ int len=0;
+ off_t begin=0;
+ off_t pos=0;
+ int size;
+
+ struct device *dev;
+
+
+ size = sprintf(buffer, "Inter-| Receive | Transmit\n"
+ " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len=length; /* Ending slop */
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+
+
+#ifdef CONFIG_NET_RADIO
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Print one entry of /proc/net/wireless
+ * This is a clone of /proc/net/dev (just above)
+ */
+static int
+sprintf_wireless_stats(char * buffer,
+ struct device * dev)
+{
+ /* Get stats from the driver */
+ struct iw_statistics *stats = (dev->get_wireless_stats ?
+ dev->get_wireless_stats(dev) :
+ (struct iw_statistics *) NULL);
+ int size;
+
+ if(stats != (struct iw_statistics *) NULL)
+ size = sprintf(buffer,
+ "%6s: %02x %3d%c %3d%c %3d%c %5d %5d %5d\n",
+ dev->name,
+ stats->status,
+ stats->qual.qual,
+ stats->qual.updated & 1 ? '.' : ' ',
+ stats->qual.level,
+ stats->qual.updated & 2 ? '.' : ' ',
+ stats->qual.noise,
+ stats->qual.updated & 3 ? '.' : ' ',
+ stats->discard.nwid,
+ stats->discard.code,
+ stats->discard.misc);
+ else
+ size = 0;
+
+ return size;
+}
+
+/*
+ * Print info for /proc/net/wireless (print all entries)
+ * This is a clone of /proc/net/dev (just above)
+ */
+int
+dev_get_wireless_info(char * buffer,
+ char ** start,
+ off_t offset,
+ int length,
+ int dummy)
+{
+ int len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+ int size;
+
+ struct device * dev;
+
+ size = sprintf(buffer,
+ "Inter-|sta| Quality | Discarded packets\n"
+ " face |tus|link level noise| nwid crypt misc\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for(dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_wireless_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos < offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos > offset + length)
+ break;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin); /* Start slop */
+ if(len > length)
+ len = length; /* Ending slop */
+
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NET_RADIO */
+
+
+/*
+ * This checks bitmasks for the ioctl calls for devices.
+ */
+
+static inline int bad_mask(unsigned long mask, unsigned long addr)
+{
+ if (addr & (mask = ~mask))
+ return 1;
+ mask = ntohl(mask);
+ if (mask & (mask+1))
+ return 1;
+ return 0;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls.
+ *
+ * The socket layer has seen an ioctl the address family thinks is
+ * for the device. At this point we get invoked to make a decision
+ */
+
+static int dev_ifsioc(void *arg, unsigned int getset)
+{
+ struct ifreq ifr;
+ struct device *dev;
+ int ret;
+
+ /*
+ * Fetch the caller's info block into kernel space
+ */
+
+ int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
+ if(err)
+ return err;
+
+ memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
+
+ /*
+ * See which interface the caller is talking about.
+ */
+
+ /*
+ *
+ * net_alias_dev_get(): dev_get() with added alias naming magic.
+ * only allow alias creation/deletion if (getset==SIOCSIFADDR)
+ *
+ */
+
+#ifdef CONFIG_KERNELD
+ dev_load(ifr.ifr_name);
+#endif
+
+#ifdef CONFIG_NET_ALIAS
+ if ((dev = net_alias_dev_get(ifr.ifr_name, getset == SIOCSIFADDR, &err, NULL, NULL)) == NULL)
+ return(err);
+#else
+ if ((dev = dev_get(ifr.ifr_name)) == NULL)
+ return(-ENODEV);
+#endif
+ switch(getset)
+ {
+ case SIOCGIFFLAGS: /* Get interface flags */
+ ifr.ifr_flags = (dev->flags & ~IFF_SOFTHEADERS);
+ goto rarok;
+
+ case SIOCSIFFLAGS: /* Set interface flags */
+ {
+ int old_flags = dev->flags;
+
+ if(securelevel>0)
+ ifr.ifr_flags&=~IFF_PROMISC;
+ /*
+ * We are not allowed to potentially close/unload
+ * a device until we get this lock.
+ */
+
+ dev_lock_wait();
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (ifr.ifr_flags & (
+ IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
+ IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
+ IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
+ | IFF_MULTICAST)) | (dev->flags & (IFF_SOFTHEADERS|IFF_UP));
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+
+ /*
+ * Have we downed the interface. We handle IFF_UP ourselves
+ * according to user attempts to set it, rather than blindly
+ * setting it.
+ */
+
+ if ((old_flags^ifr.ifr_flags)&IFF_UP) /* Bit is different ? */
+ {
+ if(old_flags&IFF_UP) /* Gone down */
+ ret=dev_close(dev);
+ else /* Come up */
+ {
+ ret=dev_open(dev);
+ if(ret<0)
+ dev->flags&=~IFF_UP; /* Open failed */
+ }
+ }
+ else
+ ret=0;
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+ }
+ break;
+
+ case SIOCGIFADDR: /* Get interface address (and family) */
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+ }
+ else
+ {
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_port = 0;
+ }
+ goto rarok;
+
+ case SIOCSIFADDR: /* Set interface address (and family) */
+
+ /*
+ * BSDism. SIOCSIFADDR family=AF_UNSPEC sets the
+ * physical address. We can cope with this now.
+ */
+
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(securelevel>0)
+ return -EPERM;
+ ret=dev->set_mac_address(dev,&ifr.ifr_addr);
+ }
+ else
+ {
+ u32 new_pa_addr = (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr;
+ u16 new_family = ifr.ifr_addr.sa_family;
+
+ if (new_family == dev->family &&
+ new_pa_addr == dev->pa_addr) {
+ ret =0;
+ break;
+ }
+ if (dev->flags & IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+
+ /*
+ * if dev is an alias, must rehash to update
+ * address change
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ net_alias_dev_rehash(dev ,&ifr.ifr_addr);
+#endif
+ dev->pa_addr = new_pa_addr;
+ dev->family = new_family;
+
+#ifdef CONFIG_INET
+ /* This is naughty. When net-032e comes out It wants moving into the net032
+ code not the kernel. Till then it can sit here (SIGH) */
+ if (!dev->pa_mask)
+ dev->pa_mask = ip_get_mask(dev->pa_addr);
+#endif
+ if (!dev->pa_brdaddr)
+ dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
+ if (dev->flags & IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFBRDADDR: /* Get the broadcast address */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFBRDADDR: /* Set the broadcast address */
+ dev->pa_brdaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
+ dev->pa_dstaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFNETMASK: /* Get the netmask for the interface */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFNETMASK: /* Set the netmask for the interface */
+ {
+ unsigned long mask = (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr;
+ ret = -EINVAL;
+ /*
+ * The mask we set must be legal.
+ */
+ if (bad_mask(mask,0))
+ break;
+ dev->pa_mask = mask;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
+
+ ifr.ifr_metric = dev->metric;
+ goto rarok;
+
+ case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
+ dev->metric = ifr.ifr_metric;
+ ret=0;
+ break;
+
+ case SIOCGIFMTU: /* Get the MTU of a device */
+ ifr.ifr_mtu = dev->mtu;
+ goto rarok;
+
+ case SIOCSIFMTU: /* Set the MTU of a device */
+
+ if (dev->change_mtu)
+ ret = dev->change_mtu(dev, ifr.ifr_mtu);
+ else
+ {
+ /*
+ * MTU must be positive.
+ */
+
+ if(ifr.ifr_mtu<68)
+ return -EINVAL;
+
+ dev->mtu = ifr.ifr_mtu;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
+ do not support it */
+ ret = -EINVAL;
+ break;
+
+ case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
+ ret = -EINVAL;
+ break;
+
+ case SIOCGIFHWADDR:
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+
+ case SIOCSIFHWADDR:
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(securelevel > 0)
+ return -EPERM;
+ if(ifr.ifr_hwaddr.sa_family!=dev->type)
+ return -EINVAL;
+ ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
+ break;
+
+ case SIOCGIFMAP:
+ ifr.ifr_map.mem_start=dev->mem_start;
+ ifr.ifr_map.mem_end=dev->mem_end;
+ ifr.ifr_map.base_addr=dev->base_addr;
+ ifr.ifr_map.irq=dev->irq;
+ ifr.ifr_map.dma=dev->dma;
+ ifr.ifr_map.port=dev->if_port;
+ goto rarok;
+
+ case SIOCSIFMAP:
+ if(dev->set_config==NULL)
+ return -EOPNOTSUPP;
+ return dev->set_config(dev,&ifr.ifr_map);
+
+ case SIOCADDMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
+ return 0;
+
+ case SIOCDELMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
+ return 0;
+ /*
+ * Unknown or private ioctl
+ */
+
+ default:
+ if((getset >= SIOCDEVPRIVATE) &&
+ (getset <= (SIOCDEVPRIVATE + 15))) {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ break;
+ }
+
+#ifdef CONFIG_NET_RADIO
+ if((getset >= SIOCIWFIRST) &&
+ (getset <= SIOCIWLAST))
+ {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ /* Perform the ioctl */
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ /* If return args... */
+ if(IW_IS_GET(getset))
+ memcpy_tofs(arg, &ifr,
+ sizeof(struct ifreq));
+ break;
+ }
+#endif /* CONFIG_NET_RADIO */
+
+ ret = -EINVAL;
+ }
+ return(ret);
+/*
+ * The load of calls that return an ifreq and ok (saves memory).
+ */
+rarok:
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ return 0;
+}
+
+
+/*
+ * This function handles all "interface"-type I/O control requests. The actual
+ * 'doing' part of this is dev_ifsioc above.
+ */
+
+int dev_ioctl(unsigned int cmd, void *arg)
+{
+ switch(cmd)
+ {
+ case SIOCGIFCONF:
+ (void) dev_ifconf((char *) arg);
+ return 0;
+
+ /*
+ * Ioctl calls that can be done by all.
+ */
+
+ case SIOCGIFFLAGS:
+ case SIOCGIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCGIFSLAVE:
+ case SIOCGIFMAP:
+ return dev_ifsioc(arg, cmd);
+
+ /*
+ * Ioctl calls requiring the power of a superuser
+ */
+
+ case SIOCSIFFLAGS:
+ case SIOCSIFADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
+ case SIOCSIFMETRIC:
+ case SIOCSIFMTU:
+ case SIOCSIFMEM:
+ case SIOCSIFHWADDR:
+ case SIOCSIFMAP:
+ case SIOCSIFSLAVE:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (!suser())
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+
+ case SIOCSIFLINK:
+ return -EINVAL;
+
+ /*
+ * Unknown or private ioctl.
+ */
+
+ default:
+ if((cmd >= SIOCDEVPRIVATE) &&
+ (cmd <= (SIOCDEVPRIVATE + 15))) {
+ return dev_ifsioc(arg, cmd);
+ }
+#ifdef CONFIG_NET_RADIO
+ if((cmd >= SIOCIWFIRST) &&
+ (cmd <= SIOCIWLAST))
+ {
+ if((IW_IS_SET(cmd)) && (!suser()))
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+ }
+#endif /* CONFIG_NET_RADIO */
+ return -EINVAL;
+ }
+}
+#endif /* !MACH */
+
+
+/*
+ * Initialize the DEV module. At boot time this walks the device list and
+ * unhooks any devices that fail to initialise (normally hardware not
+ * present) and leaves us with a valid list of present and active devices.
+ *
+ */
+extern int lance_init(void);
+extern int pi_init(void);
+extern void sdla_setup(void);
+extern int dlci_setup(void);
+
+int net_dev_init(void)
+{
+ struct device *dev, **dp;
+
+ /*
+ * Initialise the packet receive queue.
+ */
+
+#ifndef MACH
+ skb_queue_head_init(&backlog);
+#endif
+
+ /*
+ * The bridge has to be up before the devices
+ */
+
+#ifdef CONFIG_BRIDGE
+ br_init();
+#endif
+
+ /*
+ * This is Very Ugly(tm).
+ *
+ * Some devices want to be initialized early..
+ */
+#if defined(CONFIG_PI)
+ pi_init();
+#endif
+#if defined(CONFIG_PT)
+ pt_init();
+#endif
+#if defined(CONFIG_DLCI)
+ dlci_setup();
+#endif
+#if defined(CONFIG_SDLA)
+ sdla_setup();
+#endif
+ /*
+ * SLHC if present needs attaching so other people see it
+ * even if not opened.
+ */
+#if (defined(CONFIG_SLIP) && defined(CONFIG_SLIP_COMPRESSED)) \
+ || defined(CONFIG_PPP) \
+ || (defined(CONFIG_ISDN) && defined(CONFIG_ISDN_PPP))
+ slhc_install();
+#endif
+
+ /*
+ * Add the devices.
+ * If the call to dev->init fails, the dev is removed
+ * from the chain disconnecting the device until the
+ * next reboot.
+ */
+
+ dp = &dev_base;
+ while ((dev = *dp) != NULL)
+ {
+ int i;
+ for (i = 0; i < DEV_NUMBUFFS; i++) {
+ skb_queue_head_init(dev->buffs + i);
+ }
+
+ if (dev->init && dev->init(dev))
+ {
+ /*
+ * It failed to come up. Unhook it.
+ */
+ *dp = dev->next;
+ }
+ else
+ {
+ dp = &dev->next;
+ }
+ }
+
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_DEV, 3, "dev",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_info
+ });
+#endif
+
+#ifdef CONFIG_NET_RADIO
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_WIRELESS, 8, "wireless",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_wireless_info
+ });
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NET_RADIO */
+
+ /*
+ * Initialise net_alias engine
+ *
+ * - register net_alias device notifier
+ * - register proc entries: /proc/net/alias_types
+ * /proc/net/aliases
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ net_alias_init();
+#endif
+
+ init_bh(NET_BH, net_bh);
+ return 0;
+}
+
+/*
+ * Change the flags of device DEV to FLAGS.
+ */
+int dev_change_flags (struct device *dev, short flags)
+{
+ if (securelevel > 0)
+ flags &= ~IFF_PROMISC;
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (flags &
+ (IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
+ IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
+ IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE
+ | IFF_MASTER | IFF_MULTICAST))
+ | (dev->flags & (IFF_SOFTHEADERS|IFF_UP));
+
+ /* The flags are taken into account (multicast, promiscuous, ...)
+ in the set_multicast_list handler. */
+ if ((dev->flags & IFF_UP) && dev->set_multicast_list != NULL)
+ dev->set_multicast_list (dev);
+
+ return 0;
+}
+
diff --git a/linux/pcmcia-cs/clients/3c574_cs.c b/linux/pcmcia-cs/clients/3c574_cs.c
new file mode 100644
index 0000000..9dc045a
--- /dev/null
+++ b/linux/pcmcia-cs/clients/3c574_cs.c
@@ -0,0 +1,1349 @@
+/* 3c574.c: A PCMCIA ethernet driver for the 3com 3c574 "RoadRunner".
+
+ Written 1993-1998 by
+ Donald Becker, becker@scyld.com, (driver core) and
+ David Hinds, dahinds@users.sourceforge.net (from his PC card code).
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+ This driver derives from Donald Becker's 3c509 core, which has the
+ following copyright:
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+*/
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com 3c574 PC card Fast Ethernet
+Adapter.
+
+II. Board-specific settings
+
+None -- PC cards are autoconfigured.
+
+III. Driver operation
+
+The 3c574 uses a Boomerang-style interface, without the bus-master capability.
+See the Boomerang driver and documentation for most details.
+
+IV. Notes and chip documentation.
+
+Two added registers are used to enhance PIO performance, RunnerRdCtrl and
+RunnerWrCtrl. These are 11 bit down-counters that are preloaded with the
+count of word (16 bits) reads or writes the driver is about to do to the Rx
+or Tx FIFO. The chip is then able to hide the internal-PCI-bus to PC-card
+translation latency by buffering the I/O operations with an 8 word FIFO.
+Note: No other chip accesses are permitted when this buffer is used.
+
+A second enhancement is that both attribute and common memory space
+0x0800-0x0fff can translated to the PIO FIFO. Thus memory operations (faster
+with *some* PCcard bridges) may be used instead of I/O operations.
+This is enabled by setting the 0x10 bit in the PCMCIA LAN COR.
+
+Some slow PC card bridges work better if they never see a WAIT signal.
+This is configured by setting the 0x20 bit in the PCMCIA LAN COR.
+Only do this after testing that it is reliable and improves performance.
+
+The upper five bits of RunnerRdCtrl are used to window into PCcard
+configuration space registers. Window 0 is the regular Boomerang/Odie
+register set, 1-5 are various PC card control registers, and 16-31 are
+the (reversed!) CIS table.
+
+A final note: writing the InternalConfig register in window 3 with an
+invalid ramWidth is Very Bad.
+
+V. References
+
+http://www.scyld.com/expert/NWay.html
+http://www.national.com/pf/DP/DP83840.html
+
+Thanks to Terry Murphy of 3Com for providing development information for
+earlier 3Com products.
+
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/mem_op.h>
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("3Com 3c574 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* Now-standard PC card module parameters. */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+INT_MODULE_PARM(max_interrupt_work, 32);
+
+/* Force full duplex modes? */
+INT_MODULE_PARM(full_duplex, 0);
+
+/* Autodetect link polarity reversal? */
+INT_MODULE_PARM(auto_polarity, 1);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"3c574_cs.c 1.70 2003/08/25 15:57:40 Donald Becker/David Hinds, becker@scyld.com.\n";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT ((800*HZ)/1000)
+
+/* To minimize the size of the driver source and make the driver more
+ readable not all constants are symbolically defined.
+ You'll need the manual if you want to understand driver details anyway. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum el3_cmds {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,
+};
+
+enum elxl_status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 };
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+};
+
+enum Window0 {
+ Wn0EepromCmd = 10, Wn0EepromData = 12, /* EEPROM command/address, data. */
+ IntrStatus=0x0E, /* Valid in all windows. */
+};
+/* These assumes the larger EEPROM. */
+enum Win0_EEPROM_cmds {
+ EEPROM_Read = 0x200, EEPROM_WRITE = 0x100, EEPROM_ERASE = 0x300,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the "Odie" this window is always mapped at offsets 0x10-0x1f.
+ Except for TxFree, which is overlapped by RunnerWrCtrl. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x0C, /* Remaining free bytes in Tx buffer. */
+ RunnerRdCtrl = 0x16, RunnerWrCtrl = 0x1c,
+};
+
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+union wn3_config {
+ int i;
+ struct w3_config_fields {
+ unsigned int ram_size:3, ram_width:1, ram_speed:2, rom_size:2;
+ int pad8:8;
+ unsigned int ram_split:2, pad18:2, xcvr:3, pad21:1, autoselect:1;
+ int pad24:7;
+ } u;
+};
+
+enum Window4 { /* Window 4: Xcvr/media bits. */
+ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+};
+
+
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+
+struct el3_private {
+ dev_link_t link;
+ struct net_device dev;
+ dev_node_t node;
+ struct net_device_stats stats;
+ u16 advertising, partner; /* NWay media advertisement */
+ unsigned char phys; /* MII device address */
+ unsigned int
+ autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */
+ /* for transceiver monitoring */
+ struct timer_list media;
+ u_short media_status;
+ u_short fast_poll;
+ u_long last_irq;
+};
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with the original DP83840 on older 3c905 boards, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+/* Index of functions. */
+
+static void tc574_config(dev_link_t *link);
+static void tc574_release(u_long arg);
+static int tc574_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static void mdio_sync(ioaddr_t ioaddr, int bits);
+static int mdio_read(ioaddr_t ioaddr, int phy_id, int location);
+static void mdio_write(ioaddr_t ioaddr, int phy_id, int location, int value);
+static u_short read_eeprom(ioaddr_t ioaddr, int index);
+static void tc574_wait_for_completion(struct net_device *dev, int cmd);
+
+static void tc574_reset(struct net_device *dev);
+static void media_check(u_long arg);
+static int el3_open(struct net_device *dev);
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *el3_get_stats(struct net_device *dev);
+static int el3_rx(struct net_device *dev, int worklimit);
+static int el3_close(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev);
+static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void set_rx_mode(struct net_device *dev);
+
+static dev_info_t dev_info = "3c574_cs";
+
+static dev_link_t *tc574_attach(void);
+static void tc574_detach(dev_link_t *);
+
+static dev_link_t *dev_list = NULL;
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ tc574_detach(link);
+ }
+}
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+#if CS_RELEASE_CODE < 0x2911
+ CardServices(ReportError, dev_info, (void *)func, (void *)ret);
+#else
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+#endif
+}
+
+/*
+ tc574_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+*/
+
+static dev_link_t *tc574_attach(void)
+{
+ struct el3_private *lp;
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ int i, ret;
+
+ DEBUG(0, "3c574_attach()\n");
+ flush_stale_links();
+
+ /* Create the PC card device object. */
+ lp = kmalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp) return NULL;
+ memset(lp, 0, sizeof(*lp));
+ link = &lp->link; dev = &lp->dev;
+ link->priv = dev->priv = link->irq.Instance = lp;
+
+ init_timer(&link->release);
+ link->release.function = &tc574_release;
+ link->release.data = (u_long)link;
+ link->io.NumPorts1 = 32;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = &el3_interrupt;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ /* The EL3-specific entries in the device structure. */
+ dev->hard_start_xmit = &el3_start_xmit;
+ dev->get_stats = &el3_get_stats;
+ dev->do_ioctl = &el3_ioctl;
+ dev->set_multicast_list = &set_rx_mode;
+ ether_setup(dev);
+ init_dev_name(dev, lp->node);
+ dev->open = &el3_open;
+ dev->stop = &el3_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = el3_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &tc574_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ tc574_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* tc574_attach */
+
+/*
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+*/
+
+static void tc574_detach(dev_link_t *link)
+{
+ struct el3_private *lp = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "3c574_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ tc574_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&lp->dev);
+ kfree(lp);
+
+} /* tc574_detach */
+
+/*
+ tc574_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+static void tc574_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct el3_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_short buf[32];
+ int last_fn, last_ret, i, j;
+ ioaddr_t ioaddr;
+ u16 *phys_addr;
+ char *cardname;
+
+ phys_addr = (u16 *)dev->dev_addr;
+
+ DEBUG(0, "3c574_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ link->io.IOAddrLines = 16;
+ for (i = j = 0; j < 0x400; j += 0x20) {
+ link->io.BasePort1 = j ^ 0x300;
+ i = CardServices(RequestIO, link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestIO, i);
+ goto failed;
+ }
+ CS_CHECK(RequestIRQ, link->handle, &link->irq);
+ CS_CHECK(RequestConfiguration, link->handle, &link->conf);
+
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ ioaddr = dev->base_addr;
+ copy_dev_name(lp->node, dev);
+ link->dev = &lp->node;
+
+ /* The 3c574 normally uses an EEPROM for configuration info, including
+ the hardware address. The future products may include a modem chip
+ and put the address in the CIS. */
+ tuple.DesiredTuple = 0x88;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS) {
+ CardServices(GetTupleData, handle, &tuple);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(buf[i]);
+ } else {
+ EL3WINDOW(0);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
+ if (phys_addr[0] == 0x6060) {
+ printk(KERN_NOTICE "3c574_cs: IO port conflict at 0x%03lx"
+ "-0x%03lx\n", dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS &&
+ CardServices(GetTupleData, handle, &tuple) == CS_SUCCESS &&
+ CardServices(ParseTuple, handle, &tuple, &parse) == CS_SUCCESS) {
+ cardname = parse.version_1.str + parse.version_1.ofs[1];
+ } else
+ cardname = "3Com 3c574";
+
+ printk(KERN_INFO "%s: %s at io %#3lx, irq %d, hw_addr ",
+ dev->name, cardname, dev->base_addr, dev->irq);
+
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : ".\n"));
+
+ {
+ u_char mcr, *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ union wn3_config config;
+ outw(2<<11, ioaddr + RunnerRdCtrl);
+ mcr = inb(ioaddr + 2);
+ outw(0<<11, ioaddr + RunnerRdCtrl);
+ printk(KERN_INFO " ASIC rev %d,", mcr>>3);
+ EL3WINDOW(3);
+ config.i = inl(ioaddr + Wn3_Config);
+ printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n",
+ 8 << config.u.ram_size, ram_split[config.u.ram_split],
+ config.u.autoselect ? "autoselect " : "");
+ lp->default_media = config.u.xcvr;
+ lp->autoselect = config.u.autoselect;
+ }
+
+ {
+ int phy;
+
+ /* Roadrunner only: Turn on the MII transceiver */
+ outw(0x8040, ioaddr + Wn3_Options);
+ mdelay(1);
+ outw(0xc040, ioaddr + Wn3_Options);
+ tc574_wait_for_completion(dev, TxReset);
+ tc574_wait_for_completion(dev, RxReset);
+ mdelay(1);
+ outw(0x8040, ioaddr + Wn3_Options);
+
+ EL3WINDOW(4);
+ for (phy = 1; phy <= 32; phy++) {
+ int mii_status;
+ mdio_sync(ioaddr, 32);
+ mii_status = mdio_read(ioaddr, phy & 0x1f, 1);
+ if (mii_status != 0xffff) {
+ lp->phys = phy & 0x1f;
+ DEBUG(0, " MII transceiver at index %d, status %x.\n",
+ phy, mii_status);
+ if ((mii_status & 0x0040) == 0)
+ mii_preamble_required = 1;
+ break;
+ }
+ }
+ if (phy > 32) {
+ printk(KERN_NOTICE " No MII transceivers found!\n");
+ goto failed;
+ }
+ i = mdio_read(ioaddr, lp->phys, 16) | 0x40;
+ mdio_write(ioaddr, lp->phys, 16, i);
+ lp->advertising = mdio_read(ioaddr, lp->phys, 4);
+ if (full_duplex) {
+ /* Only advertise the FD media types. */
+ lp->advertising &= ~0x02a0;
+ mdio_write(ioaddr, lp->phys, 4, lp->advertising);
+ }
+ }
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ tc574_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+} /* tc574_config */
+
+/*
+ After a card is removed, tc574_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+*/
+
+static void tc574_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "3c574_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "3c574_cs: release postponed, '%s' still open\n",
+ link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* tc574_release */
+
+/*
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+*/
+
+static int tc574_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct el3_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+
+ DEBUG(1, "3c574_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ tc574_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ tc574_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* tc574_event */
+
+static void dump_status(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
+ "%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS),
+ inw(ioaddr+RxStatus), inb(ioaddr+TxStatus),
+ inw(ioaddr+TxFree));
+ EL3WINDOW(4);
+ printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
+ " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
+ inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ EL3WINDOW(1);
+}
+
+/*
+ Use this for commands that may take time to finish
+*/
+static void tc574_wait_for_completion(struct net_device *dev, int cmd)
+{
+ int i = 1500;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
+ if (i == 0)
+ printk(KERN_NOTICE "%s: command 0x%04x did not complete!\n",
+ dev->name, cmd);
+}
+
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+ */
+static u_short read_eeprom(ioaddr_t ioaddr, int index)
+{
+ int timer;
+ outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 usec for the read to take place. */
+ for (timer = 1620; timer >= 0; timer--) {
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+ break;
+ }
+ return inw(ioaddr + Wn0EepromData);
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+ The maxium data clock rate is 2.5 Mhz. The timing is easily met by the
+ slow PC card interface. */
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DIR_WRITE 0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ 0x02
+#define MDIO_ENB_IN 0x00
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(ioaddr_t ioaddr, int bits)
+{
+ int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (-- bits >= 0) {
+ outw(MDIO_DATA_WRITE1, mdio_addr);
+ outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ }
+}
+
+static int mdio_read(ioaddr_t ioaddr, int phy_id, int location)
+{
+ int i;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ unsigned int retval = 0;
+ int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the read command bits out. */
+ for (i = 14; i >= 0; i--) {
+ int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(ioaddr_t ioaddr, int phy_id, int location, int value)
+{
+ int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+ int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ /* Leave the interface idle. */
+ for (i = 1; i >= 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ }
+
+ return;
+}
+
+/* Reset and restore all of the 3c574 registers. */
+static void tc574_reset(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ int i, ioaddr = dev->base_addr;
+
+ tc574_wait_for_completion(dev, TotalReset|0x10);
+
+ /* Clear any transactions in progress. */
+ outw(0, ioaddr + RunnerWrCtrl);
+ outw(0, ioaddr + RunnerRdCtrl);
+
+ /* Set the station address and mask. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i+=2)
+ outw(0, ioaddr + i);
+
+ /* Reset config options */
+ EL3WINDOW(3);
+ outb((dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
+ outl((lp->autoselect ? 0x01000000 : 0) | 0x0062001b,
+ ioaddr + Wn3_Config);
+
+ /* Roadrunner only: Turn on the MII transceiver. */
+ outw(0x8040, ioaddr + Wn3_Options);
+ mdelay(1);
+ outw(0xc040, ioaddr + Wn3_Options);
+ tc574_wait_for_completion(dev, TxReset);
+ tc574_wait_for_completion(dev, RxReset);
+ mdelay(1);
+ outw(0x8040, ioaddr + Wn3_Options);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ inb(ioaddr + 13);
+
+ /* .. enable any extra statistics bits.. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+ /* .. re-sync MII and re-fill what NWay is advertising. */
+ mdio_sync(ioaddr, 32);
+ mdio_write(ioaddr, lp->phys, 4, lp->advertising);
+ if (!auto_polarity) {
+ /* works for TDK 78Q2120 series MII's */
+ int i = mdio_read(ioaddr, lp->phys, 16) | 0x20;
+ mdio_write(ioaddr, lp->phys, 16, i);
+ }
+
+ /* Switch to register set 1 for normal use, just for TxFree. */
+ EL3WINDOW(1);
+
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | AdapterFailure | RxEarly, ioaddr + EL3_CMD);
+}
+
+static int el3_open(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ dev_link_t *link = &lp->link;
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ MOD_INC_USE_COUNT;
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+
+ tc574_reset(dev);
+ lp->media.function = &media_check;
+ lp->media.data = (u_long)lp;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+
+ DEBUG(2, "%s: opened, status %4.4x.\n",
+ dev->name, inw(dev->base_addr + EL3_STATUS));
+
+ return 0;
+}
+
+static void el3_tx_timeout(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
+ dump_status(dev);
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ /* Issue TX_RESET and TX_START commands. */
+ tc574_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+}
+
+static void pop_tx_status(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TxStatus);
+ if (!(tx_status & 0x84)) break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc574_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ DEBUG(1, "%s: transmit error: status 0x%02x\n",
+ dev->name, tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ lp->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+}
+
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ tx_timeout_check(dev, el3_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ DEBUG(3, "%s: el3_start_xmit(length = %ld) called, "
+ "status %4.4x.\n", dev->name, (long)skb->len,
+ inw(ioaddr + EL3_STATUS));
+
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0, ioaddr + TX_FIFO);
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
+
+ dev->trans_start = jiffies;
+
+ /* TxFree appears only in Window 1, not offset 0x1c. */
+ if (inw(ioaddr + TxFree) > 1536) {
+ netif_start_queue(dev);
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet.
+ The threshold is in units of dwords. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+
+ DEV_KFREE_SKB (skb);
+ pop_tx_status(dev);
+
+ return 0;
+}
+
+/* The EL3 interrupt handler. */
+static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct el3_private *lp = dev_id;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr, status;
+ int work_budget = max_interrupt_work;
+
+ if (!netif_device_present(dev))
+ return;
+ ioaddr = dev->base_addr;
+
+ DEBUG(3, "%s: interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | RxEarly | StatsFull)) {
+ if (!netif_device_present(dev) ||
+ ((status & 0xe000) != 0x2000)) {
+ DEBUG(1, "%s: Interrupt from dead card\n", dev->name);
+ break;
+ }
+
+ if (status & RxComplete)
+ work_budget = el3_rx(dev, work_budget);
+
+ if (status & TxAvailable) {
+ DEBUG(3, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+
+ if (status & TxComplete)
+ pop_tx_status(dev);
+
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull)
+ update_stats(dev);
+ if (status & RxEarly) {
+ work_budget = el3_rx(dev, work_budget);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+ EL3WINDOW(1);
+ printk(KERN_NOTICE "%s: adapter failure, FIFO diagnostic"
+ " register %04x.\n", dev->name, fifo_diag);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc574_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc574_wait_for_completion(dev, RxReset);
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--work_budget < 0) {
+ DEBUG(0, "%s: Too much work in interrupt, "
+ "status %4.4x.\n", dev->name, status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ }
+
+ DEBUG(3, "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+ return;
+}
+
+/*
+ This timer serves two purposes: to check for missed interrupts
+ (and as a last resort, poll the NIC for events), and to monitor
+ the MII, reporting changes in cable status.
+*/
+static void media_check(u_long arg)
+{
+ struct el3_private *lp = (struct el3_private *)arg;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_long flags;
+ u_short /* cable, */ media, partner;
+
+ if (!netif_device_present(dev))
+ goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+ (inb(ioaddr + Timer) == 0xff)) {
+ if (!lp->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ el3_interrupt(dev->irq, lp, NULL);
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + 2;
+ add_timer(&lp->media);
+ return;
+ }
+
+ save_flags(flags);
+ cli();
+#if 0
+ outw(2<<11, ioaddr + RunnerRdCtrl);
+ cable = inb(ioaddr);
+ outb(0x20, ioaddr);
+ outw(0, ioaddr + RunnerRdCtrl);
+#endif
+ EL3WINDOW(4);
+ media = mdio_read(ioaddr, lp->phys, 1);
+ partner = mdio_read(ioaddr, lp->phys, 5);
+ EL3WINDOW(1);
+ restore_flags(flags);
+
+#if 0
+ if (cable & 0x20)
+ printk(KERN_INFO "%s: cable %s\n", dev->name,
+ ((cable & 0x08) ? "fixed" : "problem"));
+#endif
+ if (media != lp->media_status) {
+ if ((media ^ lp->media_status) & 0x0004)
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (lp->media_status & 0x0004) ? "lost" : "found");
+ if ((media ^ lp->media_status) & 0x0020) {
+ lp->partner = 0;
+ if (lp->media_status & 0x0020) {
+ printk(KERN_INFO "%s: autonegotiation restarted\n",
+ dev->name);
+ } else if (partner) {
+ partner &= lp->advertising;
+ lp->partner = partner;
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((partner & 0x0180) ? "100" : "10"),
+ ((partner & 0x0140) ? 'F' : 'H'));
+ } else {
+ printk(KERN_INFO "%s: link partner did not autonegotiate\n",
+ dev->name);
+ }
+
+ EL3WINDOW(3);
+ outb((partner & 0x0140 ? 0x20 : 0) |
+ (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
+ EL3WINDOW(1);
+
+ }
+ if (media & 0x0010)
+ printk(KERN_INFO "%s: remote fault detected\n",
+ dev->name);
+ if (media & 0x0002)
+ printk(KERN_INFO "%s: jabber detected\n", dev->name);
+ lp->media_status = media;
+ }
+
+reschedule:
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+}
+
+static struct net_device_stats *el3_get_stats(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+
+ if (netif_device_present(dev))
+ update_stats(dev);
+ return &lp->stats;
+}
+
+/* Update statistics.
+ Suprisingly this need not be run single-threaded, but it effectively is.
+ The counters clear when read, so the adds must merely be atomic.
+ */
+static void update_stats(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u8 rx, tx, up;
+
+ DEBUG(2, "%s: updating the statistics.\n", dev->name);
+
+ if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */
+ return;
+
+ /* Unlike the 3c509 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ lp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ lp->stats.collisions += inb(ioaddr + 3);
+ lp->stats.tx_window_errors += inb(ioaddr + 4);
+ lp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ lp->stats.tx_packets += inb(ioaddr + 6);
+ up = inb(ioaddr + 9);
+ lp->stats.tx_packets += (up&0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ rx = inw(ioaddr + 10);
+ tx = inw(ioaddr + 12);
+
+ EL3WINDOW(4);
+ /* BadSSD */ inb(ioaddr + 12);
+ up = inb(ioaddr + 13);
+
+ add_tx_bytes(&lp->stats, tx + ((up & 0xf0) << 12));
+
+ EL3WINDOW(1);
+}
+
+static int el3_rx(struct net_device *dev, int worklimit)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ short rx_status;
+
+ DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) &&
+ (--worklimit >= 0)) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ lp->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: lp->stats.rx_over_errors++; break;
+ case 0x0800: lp->stats.rx_length_errors++; break;
+ case 0x1000: lp->stats.rx_frame_errors++; break;
+ case 0x1800: lp->stats.rx_length_errors++; break;
+ case 0x2000: lp->stats.rx_frame_errors++; break;
+ case 0x2800: lp->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+
+ DEBUG(3, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ ((pkt_len+3)>>2));
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ add_rx_bytes(&lp->stats, pkt_len);
+ } else {
+ DEBUG(1, "%s: couldn't allocate a sk_buff of"
+ " size %d.\n", dev->name, pkt_len);
+ lp->stats.rx_dropped++;
+ }
+ }
+ tc574_wait_for_completion(dev, RxDiscard);
+ }
+
+ return worklimit;
+}
+
+/* Provide ioctl() calls to examine the MII xcvr state. */
+static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ int phy = lp->phys & 0x1f;
+
+ DEBUG(2, "%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n",
+ dev->name, rq->ifr_ifrn.ifrn_name, cmd,
+ data[0], data[1], data[2], data[3]);
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+ data[0] = phy;
+ case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+ {
+ int saved_window;
+ long flags;
+
+ save_flags(flags);
+ cli();
+ saved_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ EL3WINDOW(saved_window);
+ restore_flags(flags);
+ return 0;
+ }
+ case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+ {
+ int saved_window;
+ long flags;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ save_flags(flags);
+ cli();
+ saved_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ EL3WINDOW(saved_window);
+ restore_flags(flags);
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* The Odie chip has a 64 bin multicast filter, but the bit layout is not
+ documented. Until it is we revert to receiving all multicast frames when
+ any multicast reception is desired.
+ Note: My other drivers emit a log message whenever promiscuous mode is
+ entered to help detect password sniffers. This is less desirable on
+ typical PC card machines, so we omit the message.
+ */
+
+static void set_rx_mode(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ if (dev->flags & IFF_PROMISC)
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
+ ioaddr + EL3_CMD);
+ else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ else
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+}
+
+static int el3_close(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ struct el3_private *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+
+ DEBUG(2, "%s: shutting down ethercard.\n", dev->name);
+
+ if (DEV_OK(link)) {
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ /* Note: Switching to window 0 may disable the IRQ. */
+ EL3WINDOW(0);
+
+ update_stats(dev);
+ }
+
+ link->open--;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+ del_timer(&lp->media);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int __init init_3c574_cs(void)
+{
+ servinfo_t serv;
+
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "3c574_cs: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &tc574_attach, &tc574_detach);
+ return 0;
+}
+
+static void __exit exit_3c574_cs(void)
+{
+ DEBUG(0, "3c574_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ tc574_detach(dev_list);
+}
+
+module_init(init_3c574_cs);
+module_exit(exit_3c574_cs);
+
+/*
+ * Local variables:
+ * compile-command: "make 3c574_cs.o"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/pcmcia-cs/clients/3c589_cs.c b/linux/pcmcia-cs/clients/3c589_cs.c
new file mode 100644
index 0000000..9794b82
--- /dev/null
+++ b/linux/pcmcia-cs/clients/3c589_cs.c
@@ -0,0 +1,1107 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for the 3com 3c589 card.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ 3c589_cs.c 1.167 2003/08/25 15:57:40
+
+ The network driver code is based on Donald Becker's 3c589 code:
+
+ Written 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+/* To minimize the size of the driver source I only define operating
+ constants if they are used several times. You'll need the manual
+ if you want to understand driver details. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_TIMER 0x0a
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+#define EEPROM_READ 0x0080
+#define EEPROM_BUSY 0x8000
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum c509cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,
+};
+
+enum c509status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000
+};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+};
+
+/* Register window 1 offsets, the window used in normal operation. */
+#define TX_FIFO 0x00
+#define RX_FIFO 0x00
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
+#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
+
+#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
+#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+#define MEDIA_LED 0x0001 /* Enable link light on 3C589E cards. */
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+struct el3_private {
+ dev_link_t link;
+ struct net_device dev;
+ dev_node_t node;
+ struct net_device_stats stats;
+ /* For transceiver monitoring */
+ struct timer_list media;
+ u_short media_status;
+ u_short fast_poll;
+ u_long last_irq;
+};
+
+static char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("3Com 3c589 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* Special hook for setting if_port when module is loaded */
+INT_MODULE_PARM(if_port, 0);
+
+/* Bit map of interrupts to choose from */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"3c589_cs.c 1.167 2003/08/25 15:57:40 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+static void tc589_config(dev_link_t *link);
+static void tc589_release(u_long arg);
+static int tc589_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static u_short read_eeprom(ioaddr_t ioaddr, int index);
+static void tc589_reset(struct net_device *dev);
+static void media_check(u_long arg);
+static int el3_config(struct net_device *dev, struct ifmap *map);
+static int el3_open(struct net_device *dev);
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *el3_get_stats(struct net_device *dev);
+static int el3_rx(struct net_device *dev);
+static int el3_close(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+
+static dev_info_t dev_info = "3c589_cs";
+
+static dev_link_t *tc589_attach(void);
+static void tc589_detach(dev_link_t *);
+
+static dev_link_t *dev_list = NULL;
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ tc589_detach(link);
+ }
+}
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ tc589_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *tc589_attach(void)
+{
+ struct el3_private *lp;
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ int i, ret;
+
+ DEBUG(0, "3c589_attach()\n");
+ flush_stale_links();
+
+ /* Create new ethernet device */
+ lp = kmalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp) return NULL;
+ memset(lp, 0, sizeof(*lp));
+ link = &lp->link; dev = &lp->dev;
+ link->priv = dev->priv = link->irq.Instance = lp;
+
+ init_timer(&link->release);
+ link->release.function = &tc589_release;
+ link->release.data = (u_long)link;
+ link->io.NumPorts1 = 16;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = &el3_interrupt;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ /* The EL3-specific entries in the device structure. */
+ dev->hard_start_xmit = &el3_start_xmit;
+ dev->set_config = &el3_config;
+ dev->get_stats = &el3_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ ether_setup(dev);
+ init_dev_name(dev, lp->node);
+ dev->open = &el3_open;
+ dev->stop = &el3_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = el3_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &tc589_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ tc589_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* tc589_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void tc589_detach(dev_link_t *link)
+{
+ struct el3_private *lp = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "3c589_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ tc589_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&lp->dev);
+ kfree(lp);
+
+} /* tc589_detach */
+
+/*======================================================================
+
+ tc589_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+static void tc589_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct el3_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_short buf[32], *phys_addr;
+ int last_fn, last_ret, i, j, multi = 0;
+ ioaddr_t ioaddr;
+ char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+
+ DEBUG(0, "3c589_config(0x%p)\n", link);
+
+ phys_addr = (u_short *)dev->dev_addr;
+ tuple.Attributes = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Is this a 3c562? */
+ tuple.DesiredTuple = CISTPL_MANFID;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ if ((CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS) &&
+ (CardServices(GetTupleData, handle, &tuple) == CS_SUCCESS)) {
+ if (le16_to_cpu(buf[0]) != MANFID_3COM)
+ printk(KERN_INFO "3c589_cs: hmmm, is this really a "
+ "3Com card??\n");
+ multi = (le16_to_cpu(buf[1]) == PRODID_3COM_3C562);
+ }
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* For the 3c562, the base address must be xx00-xx7f */
+ link->io.IOAddrLines = 16;
+ for (i = j = 0; j < 0x400; j += 0x10) {
+ if (multi && (j & 0x80)) continue;
+ link->io.BasePort1 = j ^ 0x300;
+ i = CardServices(RequestIO, link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestIO, i);
+ goto failed;
+ }
+ CS_CHECK(RequestIRQ, link->handle, &link->irq);
+ CS_CHECK(RequestConfiguration, link->handle, &link->conf);
+
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "3c589_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ ioaddr = dev->base_addr;
+ EL3WINDOW(0);
+
+ /* The 3c589 has an extra EEPROM for configuration info, including
+ the hardware address. The 3c562 puts the address in the CIS. */
+ tuple.DesiredTuple = 0x88;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS) {
+ CardServices(GetTupleData, handle, &tuple);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(buf[i]);
+ } else {
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+ if (phys_addr[0] == 0x6060) {
+ printk(KERN_NOTICE "3c589_cs: IO port conflict at 0x%03lx"
+ "-0x%03lx\n", dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+
+ copy_dev_name(lp->node, dev);
+ link->dev = &lp->node;
+
+ /* The address and resource configuration register aren't loaded from
+ the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */
+ outw(0x3f00, ioaddr + 8);
+
+ /* The if_port symbol can be set when the module is loaded */
+ if ((if_port >= 0) && (if_port <= 3))
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "3c589_cs: invalid if_port requested\n");
+
+ printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, hw_addr ",
+ dev->name, (multi ? "562" : "589"), dev->base_addr,
+ dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+ i = inl(ioaddr);
+ printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (i & 7) ? 32 : 8, ram_split[(i >> 16) & 3],
+ if_names[dev->if_port]);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ tc589_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+} /* tc589_config */
+
+/*======================================================================
+
+ After a card is removed, tc589_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void tc589_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "3c589_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "3c589_cs: release postponed, '%s' still open\n",
+ link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* tc589_release */
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int tc589_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct el3_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+
+ DEBUG(1, "3c589_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ tc589_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ tc589_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* tc589_event */
+
+/*====================================================================*/
+
+/*
+ Use this for commands that may take time to finish
+*/
+static void tc589_wait_for_completion(struct net_device *dev, int cmd)
+{
+ int i = 100;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
+ if (i == 0)
+ printk(KERN_NOTICE "%s: command 0x%04x did not complete!\n",
+ dev->name, cmd);
+}
+
+/*
+ Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+*/
+static u_short read_eeprom(ioaddr_t ioaddr, int index)
+{
+ int i;
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Reading the eeprom takes 162 us */
+ for (i = 1620; i >= 0; i--)
+ if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
+ break;
+ return inw(ioaddr + 12);
+}
+
+/*
+ Set transceiver type, perhaps to something other than what the user
+ specified in dev->if_port.
+*/
+static void tc589_set_xcvr(struct net_device *dev, int if_port)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ switch (if_port) {
+ case 0: case 1: outw(0, ioaddr + 6); break;
+ case 2: outw(3<<14, ioaddr + 6); break;
+ case 3: outw(1<<14, ioaddr + 6); break;
+ }
+ /* On PCMCIA, this just turns on the LED */
+ outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+ /* 10baseT interface, enable link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ if (if_port == 2)
+ lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
+ else
+ lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
+}
+
+static void dump_status(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
+ "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS),
+ inw(ioaddr+RX_STATUS), inb(ioaddr+TX_STATUS),
+ inw(ioaddr+TX_FREE));
+ EL3WINDOW(4);
+ printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
+ " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
+ inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ EL3WINDOW(1);
+}
+
+/* Reset and restore all of the 3c589 registers. */
+static void tc589_reset(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ int i;
+
+ EL3WINDOW(0);
+ outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
+
+ /* Set the station address in window 2. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ tc589_set_xcvr(dev, dev->if_port);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr+i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ /* Accept b-cast and phys addr only. */
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | AdapterFailure, ioaddr + EL3_CMD);
+}
+
+static int el3_config(struct net_device *dev, struct ifmap *map)
+{
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 3) {
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ tc589_set_xcvr(dev, dev->if_port);
+ } else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int el3_open(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ dev_link_t *link = &lp->link;
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ MOD_INC_USE_COUNT;
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+
+ tc589_reset(dev);
+ lp->media.function = &media_check;
+ lp->media.data = (u_long)lp;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+
+ DEBUG(1, "%s: opened, status %4.4x.\n",
+ dev->name, inw(dev->base_addr + EL3_STATUS));
+
+ return 0;
+}
+
+static void el3_tx_timeout(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
+ dump_status(dev);
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ /* Issue TX_RESET and TX_START commands. */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+}
+
+static void pop_tx_status(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TX_STATUS);
+ if (!(tx_status & 0x84)) break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc589_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ DEBUG(1, "%s: transmit error: status 0x%02x\n",
+ dev->name, tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ lp->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+}
+
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ tx_timeout_check(dev, el3_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ DEBUG(3, "%s: el3_start_xmit(length = %ld) called, "
+ "status %4.4x.\n", dev->name, (long)skb->len,
+ inw(ioaddr + EL3_STATUS));
+
+ add_tx_bytes(&((struct el3_private *)dev->priv)->stats, skb->len);
+
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+
+ dev->trans_start = jiffies;
+ if (inw(ioaddr + TX_FREE) > 1536) {
+ netif_start_queue(dev);
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+
+ DEV_KFREE_SKB(skb);
+ pop_tx_status(dev);
+
+ return 0;
+}
+
+/* The EL3 interrupt handler. */
+static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct el3_private *lp = dev_id;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr, status;
+ int i = 0;
+
+ if (!netif_device_present(dev))
+ return;
+ ioaddr = dev->base_addr;
+
+ DEBUG(3, "%s: interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | StatsFull)) {
+ if (!netif_device_present(dev) ||
+ ((status & 0xe000) != 0x2000)) {
+ DEBUG(1, "%s: interrupt from dead card\n", dev->name);
+ break;
+ }
+
+ if (status & RxComplete)
+ el3_rx(dev);
+
+ if (status & TxAvailable) {
+ DEBUG(3, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+
+ if (status & TxComplete)
+ pop_tx_status(dev);
+
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(dev);
+ if (status & RxEarly) { /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + 4);
+ EL3WINDOW(1);
+ printk(KERN_NOTICE "%s: adapter failure, FIFO diagnostic"
+ " register %04x.\n", dev->name, fifo_diag);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc589_wait_for_completion(dev, RxReset);
+ set_multicast_list(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (++i > 10) {
+ printk(KERN_NOTICE "%s: infinite loop in interrupt, "
+ "status %4.4x.\n", dev->name, status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ }
+
+ lp->last_irq = jiffies;
+ DEBUG(3, "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+ return;
+}
+
+static void media_check(u_long arg)
+{
+ struct el3_private *lp = (struct el3_private *)(arg);
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short media, errs;
+ u_long flags;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ EL3WINDOW(1);
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+ (inb(ioaddr + EL3_TIMER) == 0xff)) {
+ if (!lp->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ el3_interrupt(dev->irq, lp, NULL);
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + 1;
+ add_timer(&lp->media);
+ return;
+ }
+
+ save_flags(flags);
+ cli();
+ EL3WINDOW(4);
+ media = inw(ioaddr+WN4_MEDIA) & 0xc810;
+
+ /* Ignore collisions unless we've had no irq's recently */
+ if (jiffies - lp->last_irq < HZ) {
+ media &= ~0x0010;
+ } else {
+ /* Try harder to detect carrier errors */
+ EL3WINDOW(6);
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ errs = inb(ioaddr + 0);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ lp->stats.tx_carrier_errors += errs;
+ if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
+ }
+
+ if (media != lp->media_status) {
+ if ((media & lp->media_status & 0x8000) &&
+ ((lp->media_status ^ media) & 0x0800))
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (lp->media_status & 0x0800 ? "lost" : "found"));
+ else if ((media & lp->media_status & 0x4000) &&
+ ((lp->media_status ^ media) & 0x0010))
+ printk(KERN_INFO "%s: coax cable %s\n", dev->name,
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
+ if (dev->if_port == 0) {
+ if (media & 0x8000) {
+ if (media & 0x0800)
+ printk(KERN_INFO "%s: flipped to 10baseT\n",
+ dev->name);
+ else
+ tc589_set_xcvr(dev, 2);
+ } else if (media & 0x4000) {
+ if (media & 0x0010)
+ tc589_set_xcvr(dev, 1);
+ else
+ printk(KERN_INFO "%s: flipped to 10base2\n",
+ dev->name);
+ }
+ }
+ lp->media_status = media;
+ }
+
+ EL3WINDOW(1);
+ restore_flags(flags);
+
+reschedule:
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+}
+
+static struct net_device_stats *el3_get_stats(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ unsigned long flags;
+ dev_link_t *link = &lp->link;
+
+ if (DEV_OK(link)) {
+ save_flags(flags);
+ cli();
+ update_stats(dev);
+ restore_flags(flags);
+ }
+ return &lp->stats;
+}
+
+/*
+ Update statistics. We change to register window 6, so this should be run
+ single-threaded if the device is active. This is expected to be a rare
+ operation, and it's simpler for the rest of the driver to assume that
+ window 1 is always valid rather than use a special window-state variable.
+*/
+static void update_stats(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(2, "%s: updating the statistics.\n", dev->name);
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ lp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ lp->stats.collisions += inb(ioaddr + 3);
+ lp->stats.tx_window_errors += inb(ioaddr + 4);
+ lp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ lp->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Rx octets */ inw(ioaddr + 10);
+ /* Tx octets */ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+}
+
+static int el3_rx(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int worklimit = 32;
+ short rx_status;
+
+ DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+ while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
+ (--worklimit >= 0)) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ lp->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: lp->stats.rx_over_errors++; break;
+ case 0x0800: lp->stats.rx_length_errors++; break;
+ case 0x1000: lp->stats.rx_frame_errors++; break;
+ case 0x1800: lp->stats.rx_length_errors++; break;
+ case 0x2000: lp->stats.rx_frame_errors++; break;
+ case 0x2800: lp->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+
+ DEBUG(3, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ (pkt_len+3)>>2);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ add_rx_bytes(&lp->stats, pkt_len);
+ } else {
+ DEBUG(1, "%s: couldn't allocate a sk_buff of"
+ " size %d.\n", dev->name, pkt_len);
+ lp->stats.rx_dropped++;
+ }
+ }
+ /* Pop the top of the Rx FIFO */
+ tc589_wait_for_completion(dev, RxDiscard);
+ }
+ if (worklimit == 0)
+ printk(KERN_NOTICE "%s: too much work in el3_rx!\n", dev->name);
+ return 0;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct el3_private *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short opts = SetRxFilter | RxStation | RxBroadcast;
+
+ if (!(DEV_OK(link))) return;
+ if (dev->flags & IFF_PROMISC)
+ opts |= RxMulticast | RxProm;
+ else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ opts |= RxMulticast;
+ outw(opts, ioaddr + EL3_CMD);
+}
+
+static int el3_close(struct net_device *dev)
+{
+ struct el3_private *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(1, "%s: shutting down ethercard.\n", dev->name);
+
+ if (DEV_OK(link)) {
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 1) {
+ /* Disable link beat and jabber */
+ EL3WINDOW(4);
+ outw(0, ioaddr + WN4_MEDIA);
+ }
+
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+
+ /* Check if the card still exists */
+ if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
+ update_stats(dev);
+ }
+
+ link->open--;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+ del_timer(&lp->media);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static int __init init_3c589_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "3c589_cs: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &tc589_attach, &tc589_detach);
+ return 0;
+}
+
+static void __exit exit_3c589_cs(void)
+{
+ DEBUG(0, "3c589_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ tc589_detach(dev_list);
+}
+
+module_init(init_3c589_cs);
+module_exit(exit_3c589_cs);
diff --git a/linux/pcmcia-cs/clients/ax8390.h b/linux/pcmcia-cs/clients/ax8390.h
new file mode 100644
index 0000000..8be1197
--- /dev/null
+++ b/linux/pcmcia-cs/clients/ax8390.h
@@ -0,0 +1,165 @@
+/* Generic NS8390 register definitions. */
+/* This file is part of Donald Becker's 8390 drivers, and is distributed
+ under the same license. Auto-loading of 8390.o only in v2.2 - Paul G.
+ Some of these names and comments originated from the Crynwr
+ packet drivers, which are distributed under the GPL. */
+
+#ifndef _8390_h
+#define _8390_h
+
+#include <linux/config.h>
+#include <linux/if_ether.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+
+#define TX_2X_PAGES 12
+#define TX_1X_PAGES 6
+
+#define TX_PAGES TX_2X_PAGES
+
+#define ETHER_ADDR_LEN 6
+
+/* The 8390 specific per-packet-header format. */
+struct e8390_pkt_hdr {
+ unsigned char status; /* status */
+ unsigned char next; /* pointer to next packet. */
+ unsigned short count; /* header + packet length in bytes */
+};
+
+#ifdef notdef
+extern int ei_debug;
+#else
+#define ei_debug 1
+#endif
+
+#ifndef HAVE_AUTOIRQ
+/* From auto_irq.c */
+extern void autoirq_setup(int waittime);
+extern unsigned long autoirq_report(int waittime);
+#endif
+
+/* Most of these entries should be in 'struct net_device' (or most of the
+ things in there should be here!) */
+/* You have one of these per-board */
+struct ei_device {
+ const char *name;
+ void (*reset_8390)(struct net_device *);
+ void (*get_8390_hdr)(struct net_device *, struct e8390_pkt_hdr *, int);
+ void (*block_output)(struct net_device *, int, const unsigned char *, int);
+ void (*block_input)(struct net_device *, int, struct sk_buff *, int);
+ unsigned char mcfilter[8];
+ unsigned open:1;
+ unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */
+ unsigned txing:1; /* Transmit Active */
+ unsigned irqlock:1; /* 8390's intrs disabled when '1'. */
+ unsigned dmaing:1; /* Remote DMA Active */
+ unsigned char tx_start_page, rx_start_page, stop_page;
+ unsigned char current_page; /* Read pointer in buffer */
+ unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */
+ unsigned char txqueue; /* Tx Packet buffer queue length. */
+ short tx1, tx2; /* Packet lengths for ping-pong tx. */
+ short lasttx; /* Alpha version consistency check. */
+ unsigned char saved_irq; /* Original dev->irq value. */
+ struct net_device_stats stat; /* The new statistics table. */
+ spinlock_t page_lock; /* Page register locks */
+ unsigned long priv; /* Private field to store bus IDs etc. */
+};
+
+/* The maximum number of 8390 interrupt service routines called per IRQ. */
+#define MAX_SERVICE 12
+
+/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */
+#define TX_TIMEOUT (20*HZ/100)
+
+#define ei_status (*(struct ei_device *)(dev->priv))
+
+/* Some generic ethernet register configurations. */
+#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */
+#define E8390_RX_IRQ_MASK 0x5
+#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */
+#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */
+#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */
+#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */
+
+/* Register accessed at EN_CMD, the 8390 base addr. */
+#define E8390_STOP 0x01 /* Stop and reset the chip */
+#define E8390_START 0x02 /* Start the chip, clear reset */
+#define E8390_TRANS 0x04 /* Transmit a frame */
+#define E8390_RREAD 0x08 /* Remote read */
+#define E8390_RWRITE 0x10 /* Remote write */
+#define E8390_NODMA 0x20 /* Remote DMA */
+#define E8390_PAGE0 0x00 /* Select page chip registers */
+#define E8390_PAGE1 0x40 /* using the two high-order bits */
+#define E8390_PAGE2 0x80 /* Page 3 is invalid. */
+
+#define E8390_CMD 0x00 /* The command register (for all pages) */
+/* Page 0 register offsets. */
+#define EN0_CLDALO 0x01 /* Low byte of current local dma addr RD */
+#define EN0_STARTPG 0x01 /* Starting page of ring bfr WR */
+#define EN0_CLDAHI 0x02 /* High byte of current local dma addr RD */
+#define EN0_STOPPG 0x02 /* Ending page +1 of ring bfr WR */
+#define EN0_BOUNDARY 0x03 /* Boundary page of ring bfr RD WR */
+#define EN0_TSR 0x04 /* Transmit status reg RD */
+#define EN0_TPSR 0x04 /* Transmit starting page WR */
+#define EN0_NCR 0x05 /* Number of collision reg RD */
+#define EN0_TCNTLO 0x05 /* Low byte of tx byte count WR */
+#define EN0_FIFO 0x06 /* FIFO RD */
+#define EN0_TCNTHI 0x06 /* High byte of tx byte count WR */
+#define EN0_ISR 0x07 /* Interrupt status reg RD WR */
+#define EN0_CRDALO 0x08 /* low byte of current remote dma address RD */
+#define EN0_RSARLO 0x08 /* Remote start address reg 0 */
+#define EN0_CRDAHI 0x09 /* high byte, current remote dma address RD */
+#define EN0_RSARHI 0x09 /* Remote start address reg 1 */
+#define EN0_RCNTLO 0x0a /* Remote byte count reg WR */
+#define EN0_RCNTHI 0x0b /* Remote byte count reg WR */
+#define EN0_RSR 0x0c /* rx status reg RD */
+#define EN0_RXCR 0x0c /* RX configuration reg WR */
+#define EN0_TXCR 0x0d /* TX configuration reg WR */
+#define EN0_COUNTER0 0x0d /* Rcv alignment error counter RD */
+#define EN0_DCFG 0x0e /* Data configuration reg WR */
+#define EN0_COUNTER1 0x0e /* Rcv CRC error counter RD */
+#define EN0_IMR 0x0f /* Interrupt mask reg WR */
+#define EN0_COUNTER2 0x0f /* Rcv missed frame error counter RD */
+
+/* Bits in EN0_ISR - Interrupt status register */
+#define ENISR_RX 0x01 /* Receiver, no error */
+#define ENISR_TX 0x02 /* Transmitter, no error */
+#define ENISR_RX_ERR 0x04 /* Receiver, with error */
+#define ENISR_TX_ERR 0x08 /* Transmitter, with error */
+#define ENISR_OVER 0x10 /* Receiver overwrote the ring */
+#define ENISR_COUNTERS 0x20 /* Counters need emptying */
+#define ENISR_RDC 0x40 /* remote dma complete */
+#define ENISR_RESET 0x80 /* Reset completed */
+#define ENISR_ALL 0x3f /* Interrupts we will enable */
+
+/* Bits in EN0_DCFG - Data config register */
+#define ENDCFG_WTS 0x01 /* word transfer mode selection */
+
+/* Page 1 register offsets. */
+#define EN1_PHYS 0x01 /* This board's physical enet addr RD WR */
+#define EN1_PHYS_SHIFT(i) (i+1) /* Get and set mac address */
+#define EN1_CURPAG 0x07 /* Current memory page RD WR */
+#define EN1_MULT 0x08 /* Multicast filter mask array (8 bytes) RD WR */
+#define EN1_MULT_SHIFT(i) (8+i) /* Get and set multicast filter */
+
+/* Bits in received packet status byte and EN0_RSR*/
+#define ENRSR_RXOK 0x01 /* Received a good packet */
+#define ENRSR_CRC 0x02 /* CRC error */
+#define ENRSR_FAE 0x04 /* frame alignment error */
+#define ENRSR_FO 0x08 /* FIFO overrun */
+#define ENRSR_MPA 0x10 /* missed pkt */
+#define ENRSR_PHY 0x20 /* physical/multicast address */
+#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */
+#define ENRSR_DEF 0x80 /* deferring */
+
+/* Transmitted packet status, EN0_TSR. */
+#define ENTSR_PTX 0x01 /* Packet transmitted without error */
+#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */
+#define ENTSR_COL 0x04 /* The transmit collided at least once. */
+#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */
+#define ENTSR_CRS 0x10 /* The carrier sense was lost. */
+#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */
+#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
+#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
+
+#endif /* _8390_h */
diff --git a/linux/pcmcia-cs/clients/axnet_cs.c b/linux/pcmcia-cs/clients/axnet_cs.c
new file mode 100644
index 0000000..2e7d9ed
--- /dev/null
+++ b/linux/pcmcia-cs/clients/axnet_cs.c
@@ -0,0 +1,1936 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for Asix AX88190-based cards
+
+ The Asix AX88190 is a NS8390-derived chipset with a few nasty
+ idiosyncracies that make it very inconvenient to support with a
+ standard 8390 driver. This driver is based on pcnet_cs, with the
+ tweaked 8390 code grafted on the end. Much of what I did was to
+ clean up and update a similar driver supplied by Asix, which was
+ adapted by William Lee, william@asix.com.tw.
+
+ Copyright (C) 2001 David A. Hinds -- dahinds@users.sourceforge.net
+
+ axnet_cs.c 1.31 2003/08/25 15:57:40
+
+ The network driver code is based on Donald Becker's NE2000 code:
+
+ Written 1992,1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+======================================================================*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#include <linux/netdevice.h>
+#include "ax8390.h"
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#define AXNET_CMD 0x00
+#define AXNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define AXNET_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define AXNET_MII_EEP 0x14 /* Offset of MII access port */
+#define AXNET_TEST 0x15 /* Offset of TEST Register port */
+#define AXNET_GPIO 0x17 /* Offset of General Purpose Register Port */
+
+#define AXNET_START_PG 0x40 /* First page of TX buffer */
+#define AXNET_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#define AXNET_RDC_TIMEOUT 0x02 /* Max wait in jiffies for Tx RDC */
+
+#define IS_AX88190 0x0001
+#define IS_AX88790 0x0002
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* Bit map of interrupts to choose from */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"axnet_cs.c 1.31 2003/08/25 15:57:40 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+static void axnet_config(dev_link_t *link);
+static void axnet_release(u_long arg);
+static int axnet_event(event_t event, int priority,
+ event_callback_args_t *args);
+static int axnet_open(struct net_device *dev);
+static int axnet_close(struct net_device *dev);
+static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs);
+static void ei_watchdog(u_long arg);
+static void axnet_reset_8390(struct net_device *dev);
+
+static int mdio_read(ioaddr_t addr, int phy_id, int loc);
+static void mdio_write(ioaddr_t addr, int phy_id, int loc, int value);
+
+static void get_8390_hdr(struct net_device *,
+ struct e8390_pkt_hdr *, int);
+static void block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page);
+
+static dev_link_t *axnet_attach(void);
+static void axnet_detach(dev_link_t *);
+
+static dev_info_t dev_info = "axnet_cs";
+static dev_link_t *dev_list;
+
+static int axdev_init(struct net_device *dev);
+static void AX88190_init(struct net_device *dev, int startp);
+static int ax_open(struct net_device *dev);
+static int ax_close(struct net_device *dev);
+static void ax_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/*====================================================================*/
+
+typedef struct axnet_dev_t {
+ struct net_device dev; /* so &dev == &axnet_dev_t */
+ dev_link_t link;
+ dev_node_t node;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_short link_status;
+ u_char duplex_flag;
+ int phy_id;
+ int flags;
+} axnet_dev_t;
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ axnet_detach(link);
+ }
+}
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ We never need to do anything when a axnet device is "initialized"
+ by the net software, because we only register already-found cards.
+
+======================================================================*/
+
+static int axnet_init(struct net_device *dev)
+{
+ return 0;
+}
+
+/*======================================================================
+
+ axnet_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *axnet_attach(void)
+{
+ axnet_dev_t *info;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int i, ret;
+
+ DEBUG(0, "axnet_attach()\n");
+ flush_stale_links();
+
+ /* Create new ethernet device */
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) return NULL;
+ memset(info, 0, sizeof(*info));
+ link = &info->link; dev = &info->dev;
+ link->priv = info;
+
+ init_timer(&link->release);
+ link->release.function = &axnet_release;
+ link->release.data = (u_long)link;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ axdev_init(dev);
+ init_dev_name(dev, info->node);
+ dev->init = &axnet_init;
+ dev->open = &axnet_open;
+ dev->stop = &axnet_close;
+ dev->do_ioctl = &axnet_ioctl;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &axnet_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != CS_SUCCESS) {
+ cs_error(link->handle, RegisterClient, ret);
+ axnet_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* axnet_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void axnet_detach(dev_link_t *link)
+{
+ axnet_dev_t *info = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "axnet_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ axnet_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&info->dev);
+ kfree(info);
+
+} /* axnet_detach */
+
+/*======================================================================
+
+ This probes for a card's hardware address by reading the PROM.
+
+======================================================================*/
+
+static int get_prom(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i, j;
+
+ /* This is based on drivers/net/ne.c */
+ struct {
+ u_char value, offset;
+ } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x01, EN0_DCFG}, /* Set word-wide access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF|0x40, EN0_RXCR}, /* 0x60 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {0x10, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0400. */
+ {0x04, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ /* Not much of a test, but the alternatives are messy */
+ if (link->conf.ConfigBase != 0x03c0)
+ return 0;
+
+ axnet_reset_8390(dev);
+ mdelay(10);
+
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ for (i = 0; i < 6; i += 2) {
+ j = inw(ioaddr + AXNET_DATAPORT);
+ dev->dev_addr[i] = j & 0xff;
+ dev->dev_addr[i+1] = j >> 8;
+ }
+ return 1;
+} /* get_prom */
+
+/*======================================================================
+
+ axnet_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+#define CFG_CHECK(fn, args...) \
+if (CardServices(fn, args) != 0) goto next_entry
+
+static int try_io_port(dev_link_t *link)
+{
+ int j, ret;
+ if (link->io.NumPorts1 == 32) {
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (link->io.NumPorts2 > 0) {
+ /* for master/slave multifunction cards */
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ }
+ } else {
+ /* This should be two 16-port windows */
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ }
+ if (link->io.BasePort1 == 0) {
+ link->io.IOAddrLines = 16;
+ for (j = 0; j < 0x400; j += 0x20) {
+ link->io.BasePort1 = j ^ 0x300;
+ link->io.BasePort2 = (j ^ 0x300) + 0x10;
+ ret = CardServices(RequestIO, link->handle, &link->io);
+ if (ret == CS_SUCCESS) return ret;
+ }
+ return ret;
+ } else {
+ return CardServices(RequestIO, link->handle, &link->io);
+ }
+}
+
+static void axnet_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ axnet_dev_t *info = link->priv;
+ struct net_device *dev = &info->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ int i, j, last_ret, last_fn;
+ u_short buf[64];
+ config_info_t conf;
+
+ DEBUG(0, "axnet_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ /* don't trust the CIS on this; Linksys got it wrong */
+ link->conf.Present = 0x63;
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Look up current Vcc */
+ CS_CHECK(GetConfigurationInfo, handle, &conf);
+ link->conf.Vcc = conf.Vcc;
+
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ tuple.Attributes = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ while (last_ret == CS_SUCCESS) {
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ cistpl_io_t *io = &(parse.cftable_entry.io);
+
+ CFG_CHECK(GetTupleData, handle, &tuple);
+ CFG_CHECK(ParseTuple, handle, &tuple, &parse);
+ if ((cfg->index == 0) || (cfg->io.nwin == 0))
+ goto next_entry;
+
+ link->conf.ConfigIndex = 0x05;
+ /* For multifunction cards, by convention, we configure the
+ network function with window 0, and serial with window 1 */
+ if (io->nwin > 1) {
+ i = (io->win[1].len > io->win[0].len);
+ link->io.BasePort2 = io->win[1-i].base;
+ link->io.NumPorts2 = io->win[1-i].len;
+ } else {
+ i = link->io.NumPorts2 = 0;
+ }
+ link->io.BasePort1 = io->win[i].base;
+ link->io.NumPorts1 = io->win[i].len;
+ link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
+ if (link->io.NumPorts1 + link->io.NumPorts2 >= 32) {
+ last_ret = try_io_port(link);
+ if (last_ret == CS_SUCCESS) break;
+ }
+ next_entry:
+ last_ret = CardServices(GetNextTuple, handle, &tuple);
+ }
+ if (last_ret != CS_SUCCESS) {
+ cs_error(handle, RequestIO, last_ret);
+ goto failed;
+ }
+
+ CS_CHECK(RequestIRQ, handle, &link->irq);
+
+ if (link->io.NumPorts2 == 8) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+
+ CS_CHECK(RequestConfiguration, handle, &link->conf);
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ if (!get_prom(link)) {
+ printk(KERN_NOTICE "axnet_cs: this is not an AX88190 card!\n");
+ printk(KERN_NOTICE "axnet_cs: use pcnet_cs instead.\n");
+ unregister_netdev(dev);
+ goto failed;
+ }
+
+ ei_status.name = "AX88190";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = AXNET_START_PG;
+ ei_status.rx_start_page = AXNET_START_PG + TX_PAGES;
+ ei_status.stop_page = AXNET_STOP_PG;
+ ei_status.reset_8390 = &axnet_reset_8390;
+ ei_status.get_8390_hdr = &get_8390_hdr;
+ ei_status.block_input = &block_input;
+ ei_status.block_output = &block_output;
+
+ copy_dev_name(info->node, dev);
+ link->dev = &info->node;
+
+ if (inb(dev->base_addr + AXNET_TEST) != 0)
+ info->flags |= IS_AX88790;
+ else
+ info->flags |= IS_AX88190;
+
+ printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, hw_addr ",
+ dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
+ dev->base_addr, dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+
+ if (info->flags & IS_AX88790)
+ outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
+
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+
+
+ /* Maybe PHY is in power down mode. (PPD_SET = 1)
+ Bit 2 of CCSR is active low. */
+ if (i == 32) {
+ conf_reg_t reg = { 0, CS_WRITE, CISREG_CCSR, 0x04 };
+ CardServices(AccessConfigurationRegister, link->handle, &reg);
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+ }
+
+ info->phy_id = (i < 32) ? i : -1;
+ if (i < 32) {
+ DEBUG(0, " MII transceiver at index %d, status %x.\n", i, j);
+ } else {
+ printk(KERN_NOTICE " No MII transceivers found!\n");
+ }
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ axnet_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+} /* axnet_config */
+
+/*======================================================================
+
+ After a card is removed, axnet_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void axnet_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "axnet_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "axnet_cs: release postponed, '%s' still open\n",
+ ((axnet_dev_t *)(link->priv))->node.dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* axnet_release */
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int axnet_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ axnet_dev_t *info = link->priv;
+
+ DEBUG(2, "axnet_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(&info->dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ axnet_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(&info->dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ axnet_reset_8390(&info->dev);
+ AX88190_init(&info->dev, 1);
+ netif_device_attach(&info->dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* axnet_event */
+
+/*======================================================================
+
+ MII interface support
+
+======================================================================*/
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DATA_WRITE0 0x00
+#define MDIO_DATA_WRITE1 0x08
+#define MDIO_DATA_READ 0x04
+#define MDIO_MASK 0x0f
+#define MDIO_ENB_IN 0x02
+
+static void mdio_sync(ioaddr_t addr)
+{
+ int bits;
+ for (bits = 0; bits < 32; bits++) {
+ outb_p(MDIO_DATA_WRITE1, addr);
+ outb_p(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(ioaddr_t addr, int phy_id, int loc)
+{
+ u_int cmd = (0xf6<<10)|(phy_id<<5)|loc;
+ int i, retval = 0;
+
+ mdio_sync(addr);
+ for (i = 14; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb_p(dat, addr);
+ outb_p(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb_p(MDIO_ENB_IN, addr);
+ retval = (retval << 1) | ((inb_p(addr) & MDIO_DATA_READ) != 0);
+ outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(ioaddr_t addr, int phy_id, int loc, int value)
+{
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb_p(dat, addr);
+ outb_p(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb_p(MDIO_ENB_IN, addr);
+ outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+/*====================================================================*/
+
+static int axnet_open(struct net_device *dev)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "axnet_open('%s')\n", dev->name);
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ MOD_INC_USE_COUNT;
+
+ request_irq(dev->irq, ei_irq_wrapper, SA_SHIRQ, dev_info, dev);
+
+ info->link_status = 0x00;
+ info->watchdog.function = &ei_watchdog;
+ info->watchdog.data = (u_long)info;
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+
+ return ax_open(dev);
+} /* axnet_open */
+
+/*====================================================================*/
+
+static int axnet_close(struct net_device *dev)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "axnet_close('%s')\n", dev->name);
+
+ ax_close(dev);
+ free_irq(dev->irq, dev);
+
+ link->open--;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+ del_timer(&info->watchdog);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+} /* axnet_close */
+
+/*======================================================================
+
+ Hard reset the card. This used to pause for the same period that
+ a 8390 reset command required, but that shouldn't be necessary.
+
+======================================================================*/
+
+static void axnet_reset_8390(struct net_device *dev)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ int i;
+
+ ei_status.txing = ei_status.dmaing = 0;
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD);
+
+ outb(inb(nic_base + AXNET_RESET), nic_base + AXNET_RESET);
+
+ for (i = 0; i < 100; i++) {
+ if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0)
+ break;
+ udelay(100);
+ }
+ outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
+
+ if (i == 100)
+ printk(KERN_ERR "%s: axnet_reset_8390() did not complete.\n",
+ dev->name);
+
+} /* axnet_reset_8390 */
+
+/*====================================================================*/
+
+static void ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs)
+{
+ axnet_dev_t *info = dev_id;
+ info->stale = 0;
+ ax_interrupt(irq, dev_id, regs);
+}
+
+static void ei_watchdog(u_long arg)
+{
+ axnet_dev_t *info = (axnet_dev_t *)(arg);
+ struct net_device *dev = &info->dev;
+ ioaddr_t nic_base = dev->base_addr;
+ ioaddr_t mii_addr = nic_base + AXNET_MII_EEP;
+ u_short link;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
+ if (!info->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ ei_irq_wrapper(dev->irq, dev, NULL);
+ info->fast_poll = HZ;
+ }
+ if (info->fast_poll) {
+ info->fast_poll--;
+ info->watchdog.expires = jiffies + 1;
+ add_timer(&info->watchdog);
+ return;
+ }
+
+ if (info->phy_id < 0)
+ goto reschedule;
+ link = mdio_read(mii_addr, info->phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ info->phy_id = -1;
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != info->link_status) {
+ u_short p = mdio_read(mii_addr, info->phy_id, 5);
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (link) ? "found" : "lost");
+ if (link) {
+ info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00;
+ if (p)
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
+ else
+ printk(KERN_INFO "%s: link partner did not autonegotiate\n",
+ dev->name);
+ AX88190_init(dev, 1);
+ }
+ info->link_status = link;
+ }
+
+reschedule:
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+}
+
+/*====================================================================*/
+
+static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ u16 *data = (u16 *)&rq->ifr_data;
+ ioaddr_t mii_addr = dev->base_addr + AXNET_MII_EEP;
+ switch (cmd) {
+ case SIOCDEVPRIVATE:
+ data[0] = info->phy_id;
+ case SIOCDEVPRIVATE+1:
+ data[3] = mdio_read(mii_addr, data[0], data[1] & 0x1f);
+ return 0;
+ case SIOCDEVPRIVATE+2:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mdio_write(mii_addr, data[0], data[1] & 0x1f, data[2]);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+/*====================================================================*/
+
+static void get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ ioaddr_t nic_base = dev->base_addr;
+
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
+
+ insw(nic_base + AXNET_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr)>>1);
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+
+}
+
+/*====================================================================*/
+
+static void block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ int xfer_count = count;
+ char *buf = skb->data;
+
+#ifdef PCMCIA_DEBUG
+ if ((ei_debug > 4) && (count != 4))
+ printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4);
+#endif
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
+
+ insw(nic_base + AXNET_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++;
+
+}
+
+/*====================================================================*/
+
+static void block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ ioaddr_t nic_base = dev->base_addr;
+
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4)
+ printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count);
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (count & 0x01)
+ count++;
+
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RWRITE+E8390_START, nic_base + AXNET_CMD);
+ outsw(nic_base + AXNET_DATAPORT, buf, count>>1);
+}
+
+/*====================================================================*/
+
+static int __init init_axnet_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "axnet_cs: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &axnet_attach, &axnet_detach);
+ return 0;
+}
+
+static void __exit exit_axnet_cs(void)
+{
+ DEBUG(0, "axnet_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ axnet_detach(dev_list);
+}
+
+module_init(init_axnet_cs);
+module_exit(exit_axnet_cs);
+
+/*====================================================================*/
+
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is the chip-specific code for many 8390-based ethernet adaptors.
+ This is not a complete driver, it must be combined with board-specific
+ code such as ne.c, wd.c, 3c503.c, etc.
+
+ Seeing how at least eight drivers use this code, (not counting the
+ PCMCIA ones either) it is easy to break some card by what seems like
+ a simple innocent change. Please contact me or Donald if you think
+ you have found something that needs changing. -- PG
+
+ Changelog:
+
+ Paul Gortmaker : remove set_bit lock, other cleanups.
+ Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
+ ei_block_input() for eth_io_copy_and_sum().
+ Paul Gortmaker : exchange static int ei_pingpong for a #define,
+ also add better Tx error handling.
+ Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
+ Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
+ Paul Gortmaker : tweak ANK's above multicast changes a bit.
+ Paul Gortmaker : update packet statistics for v2.1.x
+ Alan Cox : support arbitary stupid port mappings on the
+ 68K Macintosh. Support >16bit I/O spaces
+ Paul Gortmaker : add kmod support for auto-loading of the 8390
+ module by all drivers that require it.
+ Alan Cox : Spinlocking work, added 'BUG_83C690'
+ Paul Gortmaker : Separate out Tx timeout code from Tx path.
+
+ Sources:
+ The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+
+ */
+
+static const char *version_8390 =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@scyld.com)\n";
+
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+
+#include <linux/etherdevice.h>
+
+#define BUG_83C690
+
+/* These are the operational function interfaces to board-specific
+ routines.
+ void reset_8390(struct net_device *dev)
+ Resets the board associated with DEV, including a hardware reset of
+ the 8390. This is only called when there is a transmit timeout, and
+ it is always followed by 8390_init().
+ void block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+ Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
+ "page" value uses the 8390's 256-byte pages.
+ void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
+ Read the 4 byte, page aligned 8390 header. *If* there is a
+ subsequent read, it will be of the rest of the packet.
+ void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+ Read COUNT bytes from the packet buffer into the skb data area. Start
+ reading from RING_OFFSET, the address as the 8390 sees it. This will always
+ follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef ei_debug
+int ei_debug = 1;
+#endif
+
+/* Index to functions. */
+static void ei_tx_intr(struct net_device *dev);
+static void ei_tx_err(struct net_device *dev);
+static void ei_tx_timeout(struct net_device *dev);
+static void ei_receive(struct net_device *dev);
+static void ei_rx_overrun(struct net_device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page);
+static void set_multicast_list(struct net_device *dev);
+static void do_set_multicast_list(struct net_device *dev);
+
+/*
+ * SMP and the 8390 setup.
+ *
+ * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
+ * a page register that controls bank and packet buffer access. We guard
+ * this with ei_local->page_lock. Nobody should assume or set the page other
+ * than zero when the lock is not held. Lock holders must restore page 0
+ * before unlocking. Even pure readers must take the lock to protect in
+ * page 0.
+ *
+ * To make life difficult the chip can also be very slow. We therefore can't
+ * just use spinlocks. For the longer lockups we disable the irq the device
+ * sits on and hold the lock. We must hold the lock because there is a dual
+ * processor case other than interrupts (get stats/set multicast list in
+ * parallel with each other and transmit).
+ *
+ * Note: in theory we can just disable the irq on the card _but_ there is
+ * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
+ * enter lock, take the queued irq. So we waddle instead of flying.
+ *
+ * Finally by special arrangement for the purpose of being generally
+ * annoying the transmit function is called bh atomic. That places
+ * restrictions on the user context callers as disable_irq won't save
+ * them.
+ */
+
+/**
+ * ax_open - Open/initialize the board.
+ * @dev: network device to initialize
+ *
+ * This routine goes all-out, setting everything
+ * up anew at each open, even though many of these registers should only
+ * need to be set once at boot.
+ */
+static int ax_open(struct net_device *dev)
+{
+ unsigned long flags;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /* This can't happen unless somebody forgot to call axdev_init(). */
+ if (ei_local == NULL)
+ {
+ printk(KERN_EMERG "%s: ax_open passed a non-existent device!\n", dev->name);
+ return -ENXIO;
+ }
+
+#ifdef HAVE_TX_TIMEOUT
+ /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
+ wrapper that does e.g. media check & then calls ei_tx_timeout. */
+ if (dev->tx_timeout == NULL)
+ dev->tx_timeout = ei_tx_timeout;
+ if (dev->watchdog_timeo <= 0)
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /*
+ * Grab the page lock so we own the register set, then call
+ * the init function.
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ AX88190_init(dev, 1);
+ /* Set the flag before we drop the lock, That way the IRQ arrives
+ after its set and we get no silly warnings */
+ netif_mark_up(dev);
+ netif_start_queue(dev);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ ei_local->irqlock = 0;
+ return 0;
+}
+
+#define dev_lock(dev) (((struct ei_device *)(dev)->priv)->page_lock)
+
+/**
+ * ax_close - shut down network device
+ * @dev: network device to close
+ *
+ * Opposite of ax_open(). Only used when "ifconfig <devname> down" is done.
+ */
+int ax_close(struct net_device *dev)
+{
+ unsigned long flags;
+
+ /*
+ * Hold the page lock during close
+ */
+
+ spin_lock_irqsave(&dev_lock(dev), flags);
+ AX88190_init(dev, 0);
+ spin_unlock_irqrestore(&dev_lock(dev), flags);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/**
+ * ei_tx_timeout - handle transmit time out condition
+ * @dev: network device which has apparently fallen asleep
+ *
+ * Called by kernel when device never acknowledges a transmit has
+ * completed (or failed) - i.e. never posted a Tx related interrupt.
+ */
+
+void ei_tx_timeout(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ unsigned long flags;
+
+ ei_local->stat.tx_errors++;
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ txsr = inb(e8390_base+EN0_TSR);
+ isr = inb(e8390_base+EN0_ISR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+
+ if (!isr && !ei_local->stat.tx_packets)
+ {
+ /* The 8390 probably hasn't gotten on the cable yet. */
+ ei_local->interface_num ^= 1; /* Try a different xcvr. */
+ }
+
+ /* Ugly but a reset can be slow, yet must be protected */
+
+ disable_irq_nosync(dev->irq);
+ spin_lock(&ei_local->page_lock);
+
+ /* Try to restart the card. Perhaps the user has fixed something. */
+ ei_reset_8390(dev);
+ AX88190_init(dev, 1);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_start_xmit - begin packet transmission
+ * @skb: packet to be sent
+ * @dev: network device to which packet is sent
+ *
+ * Sends a packet to an 8390 network device.
+ */
+
+static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int length, send_length, output_page;
+ unsigned long flags;
+
+ tx_timeout_check(dev, ei_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ length = skb->len;
+
+ /* Mask interrupts from the ethercard.
+ SMP: We have to grab the lock here otherwise the IRQ handler
+ on another CPU can flip window and race the IRQ mask set. We end
+ up trashing the mcast filter not disabling irqs if we dont lock */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ outb_p(0x00, e8390_base + EN0_IMR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ /*
+ * Slow phase with lock held.
+ */
+
+ disable_irq_nosync(dev->irq);
+
+ spin_lock(&ei_local->page_lock);
+
+ ei_local->irqlock = 1;
+
+ send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
+
+ /*
+ * We have two Tx slots available for use. Find the first free
+ * slot, and then perform some sanity checks. With two Tx bufs,
+ * you get very close to transmitting back-to-back packets. With
+ * only one Tx buf, the transmitter sits idle while you reload the
+ * card, leaving a substantial gap between each transmitted packet.
+ */
+
+ if (ei_local->tx1 == 0)
+ {
+ output_page = ei_local->tx_start_page;
+ ei_local->tx1 = send_length;
+ if (ei_debug && ei_local->tx2 > 0)
+ printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ }
+ else if (ei_local->tx2 == 0)
+ {
+ output_page = ei_local->tx_start_page + TX_1X_PAGES;
+ ei_local->tx2 = send_length;
+ if (ei_debug && ei_local->tx1 > 0)
+ printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ }
+ else
+ { /* We should never get here. */
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ ei_local->irqlock = 0;
+ netif_stop_queue(dev);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+ ei_local->stat.tx_errors++;
+ return 1;
+ }
+
+ /*
+ * Okay, now upload the packet and trigger a send if the transmitter
+ * isn't already sending. If it is busy, the interrupt handler will
+ * trigger the send later, upon receiving a Tx done interrupt.
+ */
+
+ ei_block_output(dev, length, skb->data, output_page);
+ if (! ei_local->txing)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, output_page);
+ dev->trans_start = jiffies;
+ if (output_page == ei_local->tx_start_page)
+ {
+ ei_local->tx1 = -1;
+ ei_local->lasttx = -1;
+ }
+ else
+ {
+ ei_local->tx2 = -1;
+ ei_local->lasttx = -2;
+ }
+ }
+ else ei_local->txqueue++;
+
+ if (ei_local->tx1 && ei_local->tx2)
+ netif_stop_queue(dev);
+ else
+ netif_start_queue(dev);
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+
+ DEV_KFREE_SKB (skb);
+ add_tx_bytes(&ei_local->stat, send_length);
+
+ return 0;
+}
+
+/**
+ * ax_interrupt - handle the interrupts from an 8390
+ * @irq: interrupt number
+ * @dev_id: a pointer to the net_device
+ * @regs: unused
+ *
+ * Handle the ether interface interrupts. We pull packets from
+ * the 8390 via the card specific functions and fire them at the networking
+ * stack. We also handle transmit completions and wake the transmit path if
+ * neccessary. We also update the counters and do other housekeeping as
+ * needed.
+ */
+
+static void ax_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ long e8390_base;
+ int interrupts, nr_serviced = 0, i;
+ struct ei_device *ei_local;
+
+ if (dev == NULL)
+ {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ e8390_base = dev->base_addr;
+ ei_local = (struct ei_device *) dev->priv;
+
+ /*
+ * Protect the irq test too.
+ */
+
+ spin_lock(&ei_local->page_lock);
+
+ if (ei_local->irqlock)
+ {
+#if 1 /* This might just be an interrupt for a PCI device sharing this line */
+ /* The "irqlock" check is only for testing. */
+ printk(ei_local->irqlock
+ ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
+ : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ dev->name, inb_p(e8390_base + EN0_ISR),
+ inb_p(e8390_base + EN0_IMR));
+#endif
+ spin_unlock(&ei_local->page_lock);
+ return;
+ }
+
+ if (ei_debug > 3)
+ printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
+ inb_p(e8390_base + EN0_ISR));
+
+ outb_p(0x00, e8390_base + EN0_ISR);
+ ei_local->irqlock = 1;
+
+ /* !!Assumption!! -- we stay in page 0. Don't break this. */
+ while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
+ && ++nr_serviced < MAX_SERVICE)
+ {
+ if (!netif_running(dev) || (interrupts == 0xff)) {
+ if (ei_debug > 1)
+ printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ interrupts = 0;
+ break;
+ }
+ /* AX88190 bug fix. */
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ for (i = 0; i < 10; i++) {
+ if (!(inb(e8390_base + EN0_ISR) & interrupts))
+ break;
+ outb_p(0, e8390_base + EN0_ISR);
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ }
+ if (interrupts & ENISR_OVER)
+ ei_rx_overrun(dev);
+ else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
+ {
+ /* Got a good (?) packet. */
+ ei_receive(dev);
+ }
+ /* Push the next to-transmit packet through. */
+ if (interrupts & ENISR_TX)
+ ei_tx_intr(dev);
+ else if (interrupts & ENISR_TX_ERR)
+ ei_tx_err(dev);
+
+ if (interrupts & ENISR_COUNTERS)
+ {
+ ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
+ }
+ }
+
+ if (interrupts && ei_debug)
+ {
+ if (nr_serviced >= MAX_SERVICE)
+ {
+ /* 0xFF is valid for a card removal */
+ if(interrupts!=0xFF)
+ printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
+ dev->name, interrupts);
+ outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+ } else {
+ printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+ }
+ }
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock(&ei_local->page_lock);
+ return;
+}
+
+/**
+ * ei_tx_err - handle transmitter error
+ * @dev: network device which threw the exception
+ *
+ * A transmitter error has happened. Most likely excess collisions (which
+ * is a fairly normal condition). If the error is one where the Tx will
+ * have been aborted, we try and send another one right away, instead of
+ * letting the failed packet sit and collect dust in the Tx buffer. This
+ * is a much better solution as it avoids kernel based Tx timeouts, and
+ * an unnecessary card reset.
+ *
+ * Called with lock held.
+ */
+
+static void ei_tx_err(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ unsigned char txsr = inb_p(e8390_base+EN0_TSR);
+ unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
+
+#ifdef VERBOSE_ERROR_DUMP
+ printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+ if (txsr & ENTSR_ABT)
+ printk("excess-collisions ");
+ if (txsr & ENTSR_ND)
+ printk("non-deferral ");
+ if (txsr & ENTSR_CRS)
+ printk("lost-carrier ");
+ if (txsr & ENTSR_FU)
+ printk("FIFO-underrun ");
+ if (txsr & ENTSR_CDH)
+ printk("lost-heartbeat ");
+ printk("\n");
+#endif
+
+ if (tx_was_aborted)
+ ei_tx_intr(dev);
+ else
+ {
+ ei_local->stat.tx_errors++;
+ if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
+ if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
+ if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++;
+ }
+}
+
+/**
+ * ei_tx_intr - transmit interrupt handler
+ * @dev: network device for which tx intr is handled
+ *
+ * We have finished a transmit: check for errors and then trigger the next
+ * packet to be sent. Called with lock held.
+ */
+
+static void ei_tx_intr(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int status = inb(e8390_base + EN0_TSR);
+
+ /*
+ * There are two Tx buffers, see which one finished, and trigger
+ * the send of another one if it exists.
+ */
+ ei_local->txqueue--;
+
+ if (ei_local->tx1 < 0)
+ {
+ if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+ printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx1);
+ ei_local->tx1 = 0;
+ if (ei_local->tx2 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+ dev->trans_start = jiffies;
+ ei_local->tx2 = -1,
+ ei_local->lasttx = 2;
+ }
+ else ei_local->lasttx = 20, ei_local->txing = 0;
+ }
+ else if (ei_local->tx2 < 0)
+ {
+ if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
+ printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx2);
+ ei_local->tx2 = 0;
+ if (ei_local->tx1 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ ei_local->tx1 = -1;
+ ei_local->lasttx = 1;
+ }
+ else
+ ei_local->lasttx = 10, ei_local->txing = 0;
+ }
+// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
+// dev->name, ei_local->lasttx);
+
+ /* Minimize Tx latency: update the statistics after we restart TXing. */
+ if (status & ENTSR_COL)
+ ei_local->stat.collisions++;
+ if (status & ENTSR_PTX)
+ ei_local->stat.tx_packets++;
+ else
+ {
+ ei_local->stat.tx_errors++;
+ if (status & ENTSR_ABT)
+ {
+ ei_local->stat.tx_aborted_errors++;
+ ei_local->stat.collisions += 16;
+ }
+ if (status & ENTSR_CRS)
+ ei_local->stat.tx_carrier_errors++;
+ if (status & ENTSR_FU)
+ ei_local->stat.tx_fifo_errors++;
+ if (status & ENTSR_CDH)
+ ei_local->stat.tx_heartbeat_errors++;
+ if (status & ENTSR_OWC)
+ ei_local->stat.tx_window_errors++;
+ }
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_receive - receive some packets
+ * @dev: network device with which receive will be run
+ *
+ * We have a good packet(s), get it/them out of the buffers.
+ * Called with lock held.
+ */
+
+static void ei_receive(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ unsigned char rxing_page, this_frame, next_frame;
+ unsigned short current_offset;
+ int rx_pkt_count = 0;
+ struct e8390_pkt_hdr rx_frame;
+
+ while (++rx_pkt_count < 10)
+ {
+ int pkt_len, pkt_stat;
+
+ /* Get the rx page (incoming packet pointer). */
+ rxing_page = inb_p(e8390_base + EN1_CURPAG -1);
+
+ /* Remove one frame from the ring. Boundary is always a page behind. */
+ this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
+ if (this_frame >= ei_local->stop_page)
+ this_frame = ei_local->rx_start_page;
+
+ /* Someday we'll omit the previous, iff we never get this message.
+ (There is at least one clone claimed to have a problem.)
+
+ Keep quiet if it looks like a card removal. One problem here
+ is that some clones crash in roughly the same way.
+ */
+ if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
+ printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
+ dev->name, this_frame, ei_local->current_page);
+
+ if (this_frame == rxing_page) /* Read all the frames? */
+ break; /* Done for now */
+
+ current_offset = this_frame << 8;
+ ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+ pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+ pkt_stat = rx_frame.status;
+
+ next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+ if (pkt_len < 60 || pkt_len > 1518)
+ {
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
+ dev->name, rx_frame.count, rx_frame.status,
+ rx_frame.next);
+ ei_local->stat.rx_errors++;
+ ei_local->stat.rx_length_errors++;
+ }
+ else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
+ {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ ei_local->stat.rx_dropped++;
+ break;
+ }
+ else
+ {
+ skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ ei_local->stat.rx_packets++;
+ add_rx_bytes(&ei_local->stat, pkt_len);
+ if (pkt_stat & ENRSR_PHY)
+ ei_local->stat.multicast++;
+ }
+ }
+ else
+ {
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ dev->name, rx_frame.status, rx_frame.next,
+ rx_frame.count);
+ ei_local->stat.rx_errors++;
+ /* NB: The NIC counts CRC, frame and missed errors. */
+ if (pkt_stat & ENRSR_FO)
+ ei_local->stat.rx_fifo_errors++;
+ }
+ next_frame = rx_frame.next;
+
+ /* This _should_ never happen: it's here for avoiding bad clones. */
+ if (next_frame >= ei_local->stop_page) {
+ printk("%s: next frame inconsistency, %#2x\n", dev->name,
+ next_frame);
+ next_frame = ei_local->rx_start_page;
+ }
+ ei_local->current_page = next_frame;
+ outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+ }
+
+ return;
+}
+
+/**
+ * ei_rx_overrun - handle receiver overrun
+ * @dev: network device which threw exception
+ *
+ * We have a receiver overrun: we have to kick the 8390 to get it started
+ * again. Problem is that you have to kick it exactly as NS prescribes in
+ * the updated datasheets, or "the NIC may act in an unpredictable manner."
+ * This includes causing "the NIC to defer indefinitely when it is stopped
+ * on a busy network." Ugh.
+ * Called with lock held. Don't call this with the interrupts off or your
+ * computer will hate you - it takes 10ms or so.
+ */
+
+static void ei_rx_overrun(struct net_device *dev)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ long e8390_base = dev->base_addr;
+ unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /*
+ * Record whether a Tx was in progress and then issue the
+ * stop command.
+ */
+ was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
+ ei_local->stat.rx_over_errors++;
+
+ /*
+ * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
+ * Early datasheets said to poll the reset bit, but now they say that
+ * it "is not a reliable indicator and subsequently should be ignored."
+ * We wait at least 10ms.
+ */
+
+ mdelay(10);
+
+ /*
+ * Reset RBCR[01] back to zero as per magic incantation.
+ */
+ outb_p(0x00, e8390_base+EN0_RCNTLO);
+ outb_p(0x00, e8390_base+EN0_RCNTHI);
+
+ /*
+ * See if any Tx was interrupted or not. According to NS, this
+ * step is vital, and skipping it will cause no end of havoc.
+ */
+
+ if (was_txing)
+ {
+ unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
+ if (!tx_completed)
+ must_resend = 1;
+ }
+
+ /*
+ * Have to enter loopback mode and then restart the NIC before
+ * you are allowed to slurp packets up off the ring.
+ */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+
+ /*
+ * Clear the Rx ring of all the debris, and ack the interrupt.
+ */
+ ei_receive(dev);
+
+ /*
+ * Leave loopback mode, and resend any packet that got stopped.
+ */
+ outb_p(E8390_TXCONFIG | info->duplex_flag, e8390_base + EN0_TXCR);
+ if (must_resend)
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+}
+
+/*
+ * Collect the stats. This is called unlocked and from several contexts.
+ */
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ unsigned long flags;
+
+ /* If the card is stopped, just return the present stats. */
+ if (!netif_running(dev))
+ return &ei_local->stat;
+
+ spin_lock_irqsave(&ei_local->page_lock,flags);
+ /* Read the counter registers, assuming we are in page 0. */
+ ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ return &ei_local->stat;
+}
+
+/**
+ * do_set_multicast_list - set/clear multicast filter
+ * @dev: net device for which multicast filter is adjusted
+ *
+ * Set or clear the multicast filter for this adaptor. May be called
+ * from a BH in 2.1.x. Must be called with lock held.
+ */
+
+static void do_set_multicast_list(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+
+ if(dev->flags&IFF_PROMISC)
+ outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR);
+ else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
+ outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
+ else
+ outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
+}
+
+/*
+ * Called without lock held. This is invoked from user context and may
+ * be parallel to just about everything else. Its also fairly quick and
+ * not called too often. Must protect against both bh and irq users
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_lock(dev), flags);
+ do_set_multicast_list(dev);
+ spin_unlock_irqrestore(&dev_lock(dev), flags);
+}
+
+/**
+ * axdev_init - init rest of 8390 device struct
+ * @dev: network device structure to init
+ *
+ * Initialize the rest of the 8390 device structure. Do NOT __init
+ * this, as it is used by 8390 based modular drivers too.
+ */
+
+static int axdev_init(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s", version_8390);
+
+ if (dev->priv == NULL)
+ {
+ struct ei_device *ei_local;
+
+ dev->priv = kmalloc(sizeof(struct ei_device), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct ei_device));
+ ei_local = (struct ei_device *)dev->priv;
+ spin_lock_init(&ei_local->page_lock);
+ }
+
+ dev->hard_start_xmit = &ei_start_xmit;
+ dev->get_stats = get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ ether_setup(dev);
+
+ return 0;
+}
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+
+/**
+ * AX88190_init - initialize 8390 hardware
+ * @dev: network device to initialize
+ * @startp: boolean. non-zero value to initiate chip processing
+ *
+ * Must be called with lock held.
+ */
+
+static void AX88190_init(struct net_device *dev, int startp)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int i;
+ int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
+
+ if(sizeof(struct e8390_pkt_hdr)!=4)
+ panic("8390.c: header struct mispacked\n");
+ /* Follow National Semi's recommendations for initing the DP83902. */
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
+ outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
+ /* Clear the remote byte count registers. */
+ outb_p(0x00, e8390_base + EN0_RCNTLO);
+ outb_p(0x00, e8390_base + EN0_RCNTHI);
+ /* Set to monitor and loopback mode -- this is vital!. */
+ outb_p(E8390_RXOFF|0x40, e8390_base + EN0_RXCR); /* 0x60 */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+ /* Set the transmit page and receive ring. */
+ outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+ outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
+ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
+ outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+ /* Clear the pending interrupts and mask. */
+ outb_p(0xFF, e8390_base + EN0_ISR);
+ outb_p(0x00, e8390_base + EN0_IMR);
+
+ /* Copy the station address into the DS8390 registers. */
+
+ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
+ for(i = 0; i < 6; i++)
+ {
+ outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
+ if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
+ printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
+ }
+ /*
+ * Initialize the multicast list to accept-all. If we enable multicast
+ * the higher levels can do the filtering.
+ */
+ for (i = 0; i < 8; i++)
+ outb_p(0xff, e8390_base + EN1_MULT + i);
+
+ outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ netif_start_queue(dev);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_local->txing = 0;
+
+ if (startp)
+ {
+ outb_p(0xff, e8390_base + EN0_ISR);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
+ outb_p(E8390_TXCONFIG | info->duplex_flag,
+ e8390_base + EN0_TXCR); /* xmit on. */
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); /* rx on, */
+ do_set_multicast_list(dev); /* (re)load the mcast table */
+ }
+}
+
+/* Trigger a transmit start, assuming the length is valid.
+ Always called with the page lock held */
+
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) dev->priv;
+
+ if (inb_p(e8390_base) & E8390_TRANS)
+ {
+ printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
+ dev->name);
+ return;
+ }
+ outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+ outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+ outb_p(start_page, e8390_base + EN0_TPSR);
+ outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
+}
diff --git a/linux/pcmcia-cs/clients/fmvj18x_cs.c b/linux/pcmcia-cs/clients/fmvj18x_cs.c
new file mode 100644
index 0000000..bd492e8
--- /dev/null
+++ b/linux/pcmcia-cs/clients/fmvj18x_cs.c
@@ -0,0 +1,1322 @@
+/*======================================================================
+ fmvj18x_cs.c 2.8 2002/03/23
+
+ A fmvj18x (and its compatibles) PCMCIA client driver
+
+ Contributed by Shingo Fujimoto, shingo@flab.fujitsu.co.jp
+
+ TDK LAK-CD021 and CONTEC C-NET(PC)C support added by
+ Nobuhiro Katayama, kata-n@po.iijnet.or.jp
+
+ The PCMCIA client code is based on code written by David Hinds.
+ Network code is based on the "FMV-18x driver" by Yutaka TAMIYA
+ but is actually largely Donald Becker's AT1700 driver, which
+ carries the following attribution:
+
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("fmvj18x and compatible PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* Bit map of interrupts to choose from */
+/* This means pick from 15, 14, 12, 11, 10, 9, 7, 5, 4, and 3 */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+/* SRAM configuration */
+/* 0:4KB*2 TX buffer else:8KB*2 TX buffer */
+INT_MODULE_PARM(sram_config, 0);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version = "fmvj18x_cs.c 2.8 2002/03/23";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+/*
+ PCMCIA event handlers
+ */
+static void fmvj18x_config(dev_link_t *link);
+static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id);
+static int fmvj18x_setup_mfc(dev_link_t *link);
+static void fmvj18x_release(u_long arg);
+static int fmvj18x_event(event_t event, int priority,
+ event_callback_args_t *args);
+static dev_link_t *fmvj18x_attach(void);
+static void fmvj18x_detach(dev_link_t *);
+
+/*
+ LAN controller(MBH86960A) specific routines
+ */
+static int fjn_config(struct net_device *dev, struct ifmap *map);
+static int fjn_open(struct net_device *dev);
+static int fjn_close(struct net_device *dev);
+static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void fjn_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void fjn_rx(struct net_device *dev);
+static void fjn_reset(struct net_device *dev);
+static struct net_device_stats *fjn_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void fjn_tx_timeout(struct net_device *dev);
+
+static dev_info_t dev_info = "fmvj18x_cs";
+static dev_link_t *dev_list;
+
+/*
+ card type
+ */
+typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
+ XXX10304
+} cardtype_t;
+
+/*
+ driver specific data structure
+*/
+typedef struct local_info_t {
+ dev_link_t link;
+ struct net_device dev;
+ dev_node_t node;
+ struct net_device_stats stats;
+ long open_time;
+ uint tx_started:1;
+ uint tx_queue;
+ u_short tx_queue_len;
+ cardtype_t cardtype;
+ u_short sent;
+ u_char mc_filter[8];
+} local_info_t;
+
+#define MC_FILTERBREAK 64
+
+/*====================================================================*/
+/*
+ ioport offset from the base address
+ */
+#define TX_STATUS 0 /* transmit status register */
+#define RX_STATUS 1 /* receive status register */
+#define TX_INTR 2 /* transmit interrupt mask register */
+#define RX_INTR 3 /* receive interrupt mask register */
+#define TX_MODE 4 /* transmit mode register */
+#define RX_MODE 5 /* receive mode register */
+#define CONFIG_0 6 /* configuration register 0 */
+#define CONFIG_1 7 /* configuration register 1 */
+
+#define NODE_ID 8 /* node ID register (bank 0) */
+#define MAR_ADR 8 /* multicast address registers (bank 1) */
+
+#define DATAPORT 8 /* buffer mem port registers (bank 2) */
+#define TX_START 10 /* transmit start register */
+#define COL_CTRL 11 /* 16 collision control register */
+#define BMPR12 12 /* reserved */
+#define BMPR13 13 /* reserved */
+#define RX_SKIP 14 /* skip received packet register */
+
+#define LAN_CTRL 16 /* LAN card control register */
+
+#define MAC_ID 0x1a /* hardware address */
+#define UNGERMANN_MAC_ID 0x18 /* UNGERMANN-BASS hardware address */
+
+/*
+ control bits
+ */
+#define ENA_TMT_OK 0x80
+#define ENA_TMT_REC 0x20
+#define ENA_COL 0x04
+#define ENA_16_COL 0x02
+#define ENA_TBUS_ERR 0x01
+
+#define ENA_PKT_RDY 0x80
+#define ENA_BUS_ERR 0x40
+#define ENA_LEN_ERR 0x08
+#define ENA_ALG_ERR 0x04
+#define ENA_CRC_ERR 0x02
+#define ENA_OVR_FLO 0x01
+
+/* flags */
+#define F_TMT_RDY 0x80 /* can accept new packet */
+#define F_NET_BSY 0x40 /* carrier is detected */
+#define F_TMT_OK 0x20 /* send packet successfully */
+#define F_SRT_PKT 0x10 /* short packet error */
+#define F_COL_ERR 0x04 /* collision error */
+#define F_16_COL 0x02 /* 16 collision error */
+#define F_TBUS_ERR 0x01 /* bus read error */
+
+#define F_PKT_RDY 0x80 /* packet(s) in buffer */
+#define F_BUS_ERR 0x40 /* bus read error */
+#define F_LEN_ERR 0x08 /* short packet */
+#define F_ALG_ERR 0x04 /* frame error */
+#define F_CRC_ERR 0x02 /* CRC error */
+#define F_OVR_FLO 0x01 /* overflow error */
+
+#define F_BUF_EMP 0x40 /* receive buffer is empty */
+
+#define F_SKP_PKT 0x05 /* drop packet in buffer */
+
+/* default bitmaps */
+#define D_TX_INTR ( ENA_TMT_OK )
+#define D_RX_INTR ( ENA_PKT_RDY | ENA_LEN_ERR \
+ | ENA_ALG_ERR | ENA_CRC_ERR | ENA_OVR_FLO )
+#define TX_STAT_M ( F_TMT_RDY )
+#define RX_STAT_M ( F_PKT_RDY | F_LEN_ERR \
+ | F_ALG_ERR | F_CRC_ERR | F_OVR_FLO )
+
+/* commands */
+#define D_TX_MODE 0x06 /* no tests, detect carrier */
+#define ID_MATCHED 0x02 /* (RX_MODE) */
+#define RECV_ALL 0x03 /* (RX_MODE) */
+#define CONFIG0_DFL 0x5a /* 16bit bus, 4K x 2 Tx queues */
+#define CONFIG0_DFL_1 0x5e /* 16bit bus, 8K x 2 Tx queues */
+#define CONFIG0_RST 0xda /* Data Link Controller off (CONFIG_0) */
+#define CONFIG0_RST_1 0xde /* Data Link Controller off (CONFIG_0) */
+#define BANK_0 0xa0 /* bank 0 (CONFIG_1) */
+#define BANK_1 0xa4 /* bank 1 (CONFIG_1) */
+#define BANK_2 0xa8 /* bank 2 (CONFIG_1) */
+#define CHIP_OFF 0x80 /* contrl chip power off (CONFIG_1) */
+#define DO_TX 0x80 /* do transmit packet */
+#define SEND_PKT 0x81 /* send a packet */
+#define AUTO_MODE 0x07 /* Auto skip packet on 16 col detected */
+#define MANU_MODE 0x03 /* Stop and skip packet on 16 col */
+#define TDK_AUTO_MODE 0x47 /* Auto skip packet on 16 col detected */
+#define TDK_MANU_MODE 0x43 /* Stop and skip packet on 16 col */
+#define INTR_OFF 0x0d /* LAN controller ignores interrupts */
+#define INTR_ON 0x1d /* LAN controller will catch interrupts */
+
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+#define BANK_0U 0x20 /* bank 0 (CONFIG_1) */
+#define BANK_1U 0x24 /* bank 1 (CONFIG_1) */
+#define BANK_2U 0x28 /* bank 2 (CONFIG_1) */
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ fmvj18x_detach(link);
+ }
+}
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*====================================================================*/
+
+static dev_link_t *fmvj18x_attach(void)
+{
+ local_info_t *lp;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int i, ret;
+
+ DEBUG(0, "fmvj18x_attach()\n");
+ flush_stale_links();
+
+ /* Make up a FMVJ18x specific data structure */
+ lp = kmalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp) return NULL;
+ memset(lp, 0, sizeof(*lp));
+ link = &lp->link; dev = &lp->dev;
+ link->priv = dev->priv = link->irq.Instance = lp;
+
+ init_timer(&link->release);
+ link->release.function = &fmvj18x_release;
+ link->release.data = (u_long)link;
+
+ /* The io structure describes IO port mapping */
+ link->io.NumPorts1 = 32;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.IOAddrLines = 5;
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = &fjn_interrupt;
+
+ /* General socket configuration */
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* The FMVJ18x specific entries in the device structure. */
+ dev->hard_start_xmit = &fjn_start_xmit;
+ dev->set_config = &fjn_config;
+ dev->get_stats = &fjn_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ ether_setup(dev);
+ init_dev_name(dev, lp->node);
+ dev->open = &fjn_open;
+ dev->stop = &fjn_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = fjn_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &fmvj18x_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ fmvj18x_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* fmvj18x_attach */
+
+/*====================================================================*/
+
+static void fmvj18x_detach(dev_link_t *link)
+{
+ local_info_t *lp = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "fmvj18x_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ fmvj18x_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free pieces */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&lp->dev);
+ kfree(lp);
+
+} /* fmvj18x_detach */
+
+/*====================================================================*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+static int mfc_try_io_port(dev_link_t *link)
+{
+ int i, ret;
+ static ioaddr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+
+ for (i = 0; i < 5; i++) {
+ link->io.BasePort2 = serial_base[i];
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ if (link->io.BasePort2 == 0) {
+ link->io.NumPorts2 = 0;
+ printk(KERN_NOTICE "fmvj18x_cs: out of resource for serial\n");
+ }
+ ret = CardServices(RequestIO, link->handle, &link->io);
+ if (ret == CS_SUCCESS) return ret;
+ }
+ return ret;
+}
+
+static int ungermann_try_io_port(dev_link_t *link)
+{
+ int ret;
+ ioaddr_t ioaddr;
+ /*
+ Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360
+ 0x380,0x3c0 only for ioport.
+ */
+ for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) {
+ link->io.BasePort1 = ioaddr;
+ ret = CardServices(RequestIO, link->handle, &link->io);
+ if (ret == CS_SUCCESS) {
+ /* calculate ConfigIndex value */
+ link->conf.ConfigIndex =
+ ((link->io.BasePort1 & 0x0f0) >> 3) | 0x22;
+ return ret;
+ }
+ }
+ return ret; /* RequestIO failed */
+}
+
+static void fmvj18x_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ local_info_t *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_short buf[32];
+ int i, last_fn, last_ret, ret;
+ ioaddr_t ioaddr;
+ cardtype_t cardtype;
+ char *card_name = "unknown";
+ u_char *node_id;
+
+ DEBUG(0, "fmvj18x_config(0x%p)\n", link);
+
+ /*
+ This reads the card's CONFIG tuple to find its configuration
+ registers.
+ */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ tuple.TupleData = (u_char *)buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ tuple.TupleOffset = 0;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS) {
+ /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigIndex = parse.cftable_entry.index;
+ tuple.DesiredTuple = CISTPL_MANFID;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS)
+ CS_CHECK(GetTupleData, handle, &tuple);
+ else
+ buf[0] = 0xffff;
+ switch (le16_to_cpu(buf[0])) {
+ case MANFID_TDK:
+ cardtype = TDK;
+ if (le16_to_cpu(buf[1]) == PRODID_TDK_CF010) {
+ cs_status_t status;
+ CardServices(GetStatus, handle, &status);
+ if (status.CardState & CS_EVENT_3VCARD)
+ link->conf.Vcc = 33; /* inserted in 3.3V slot */
+ } else if (le16_to_cpu(buf[1]) == PRODID_TDK_GN3410) {
+ /* MultiFunction Card */
+ link->conf.ConfigBase = 0x800;
+ link->conf.ConfigIndex = 0x47;
+ link->io.NumPorts2 = 8;
+ }
+ break;
+ case MANFID_CONTEC:
+ cardtype = CONTEC;
+ break;
+ case MANFID_FUJITSU:
+ if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10302)
+ /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
+ but these are MBH10304 based card. */
+ cardtype = MBH10304;
+ else if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304)
+ cardtype = MBH10304;
+ else
+ cardtype = LA501;
+ break;
+ default:
+ cardtype = MBH10304;
+ }
+ } else {
+ /* old type card */
+ tuple.DesiredTuple = CISTPL_MANFID;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS)
+ CS_CHECK(GetTupleData, handle, &tuple);
+ else
+ buf[0] = 0xffff;
+ switch (le16_to_cpu(buf[0])) {
+ case MANFID_FUJITSU:
+ if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304) {
+ cardtype = XXX10304; /* MBH10304 with buggy CIS */
+ link->conf.ConfigIndex = 0x20;
+ } else {
+ cardtype = MBH10302; /* NextCom NC5310, etc. */
+ link->conf.ConfigIndex = 1;
+ }
+ break;
+ case MANFID_UNGERMANN:
+ cardtype = UNGERMANN;
+ break;
+ default:
+ cardtype = MBH10302;
+ link->conf.ConfigIndex = 1;
+ }
+ }
+
+ if (link->io.NumPorts2 != 0) {
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT;
+ ret = mfc_try_io_port(link);
+ if (ret != CS_SUCCESS) goto cs_failed;
+ } else if (cardtype == UNGERMANN) {
+ ret = ungermann_try_io_port(link);
+ if (ret != CS_SUCCESS) goto cs_failed;
+ } else {
+ CS_CHECK(RequestIO, link->handle, &link->io);
+ }
+ CS_CHECK(RequestIRQ, link->handle, &link->irq);
+ CS_CHECK(RequestConfiguration, link->handle, &link->conf);
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ if (link->io.BasePort2 != 0)
+ fmvj18x_setup_mfc(link);
+
+ ioaddr = dev->base_addr;
+
+ /* Reset controller */
+ if (sram_config == 0)
+ outb(CONFIG0_RST, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
+
+ /* Power On chip and select bank 0 */
+ if (cardtype == MBH10302)
+ outb(BANK_0, ioaddr + CONFIG_1);
+ else
+ outb(BANK_0U, ioaddr + CONFIG_1);
+
+ /* Set hardware address */
+ switch (cardtype) {
+ case MBH10304:
+ case TDK:
+ case LA501:
+ case CONTEC:
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, handle, &tuple);
+ if (cardtype == MBH10304) {
+ /* MBH10304's CIS_FUNCE is corrupted */
+ node_id = &(tuple.TupleData[5]);
+ card_name = "FMV-J182";
+ } else {
+ while (tuple.TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID ) {
+ CS_CHECK(GetNextTuple, handle, &tuple) ;
+ CS_CHECK(GetTupleData, handle, &tuple) ;
+ }
+ node_id = &(tuple.TupleData[2]);
+ if( cardtype == TDK ) {
+ card_name = "TDK LAK-CD021";
+ } else if( cardtype == LA501 ) {
+ card_name = "LA501";
+ } else {
+ card_name = "C-NET(PC)C";
+ }
+ }
+ /* Read MACID from CIS */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = node_id[i];
+ break;
+ case UNGERMANN:
+ /* Read MACID from register */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ card_name = "Access/CARD";
+ break;
+ case XXX10304:
+ /* Read MACID from Buggy CIS */
+ if (fmvj18x_get_hwinfo(link, tuple.TupleData) == -1) {
+ printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.");
+ unregister_netdev(dev);
+ goto failed;
+ }
+ for (i = 0 ; i < 6; i++) {
+ dev->dev_addr[i] = tuple.TupleData[i];
+ }
+ card_name = "FMV-J182";
+ break;
+ case MBH10302:
+ default:
+ /* Read MACID from register */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + MAC_ID + i);
+ card_name = "FMV-J181";
+ break;
+ }
+
+ copy_dev_name(lp->node, dev);
+ link->dev = &lp->node;
+
+ lp->cardtype = cardtype;
+ /* print current configuration */
+ printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, hw_addr ",
+ dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
+ dev->base_addr, dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+cs_failed:
+ /* All Card Services errors end up here */
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ fmvj18x_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+
+} /* fmvj18x_config */
+/*====================================================================*/
+
+static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id)
+{
+ win_req_t req;
+ memreq_t mem;
+ u_char *base;
+ int i, j;
+
+ /* Allocate a small memory window */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = 0; req.Size = 0;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ i = CardServices(RequestWindow, &link->win, &req);
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestWindow, i);
+ return -1;
+ }
+
+ base = ioremap(req.Base, req.Size);
+ mem.Page = 0;
+ mem.CardOffset = 0;
+ CardServices(MapMemPage, link->win, &mem);
+
+ /*
+ * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format
+ * 22 0d xx xx xx 04 06 yy yy yy yy yy yy ff
+ * 'xx' is garbage.
+ * 'yy' is MAC address.
+ */
+ for (i = 0; i < 0x200; i++) {
+ if (readb(base+i*2) == 0x22) {
+ if (readb(base+(i-1)*2) == 0xff
+ && readb(base+(i+5)*2) == 0x04
+ && readb(base+(i+6)*2) == 0x06
+ && readb(base+(i+13)*2) == 0xff)
+ break;
+ }
+ }
+
+ if (i != 0x200) {
+ for (j = 0 ; j < 6; j++,i++) {
+ node_id[j] = readb(base+(i+7)*2);
+ }
+ }
+
+ iounmap(base);
+ j = CardServices(ReleaseWindow, link->win);
+ if (j != CS_SUCCESS)
+ cs_error(link->handle, ReleaseWindow, j);
+ return (i != 0x200) ? 0 : -1;
+
+} /* fmvj18x_get_hwinfo */
+/*====================================================================*/
+
+static int fmvj18x_setup_mfc(dev_link_t *link)
+{
+ win_req_t req;
+ memreq_t mem;
+ u_char *base;
+ int i, j;
+ local_info_t *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr;
+
+ /* Allocate a small memory window */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = 0; req.Size = 0;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ i = CardServices(RequestWindow, &link->win, &req);
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestWindow, i);
+ return -1;
+ }
+
+ base = ioremap(req.Base, req.Size);
+ mem.Page = 0;
+ mem.CardOffset = 0;
+ CardServices(MapMemPage, link->win, &mem);
+
+ ioaddr = dev->base_addr;
+ writeb(0x47, base+0x800); /* Config Option Register of LAN */
+ writeb(0x0, base+0x802); /* Config and Status Register */
+
+ writeb(ioaddr & 0xff, base+0x80a); /* I/O Base(Low) of LAN */
+ writeb((ioaddr >> 8) & 0xff, base+0x80c); /* I/O Base(High) of LAN */
+
+ writeb(0x45, base+0x820); /* Config Option Register of Modem */
+ writeb(0x8, base+0x822); /* Config and Status Register */
+
+ iounmap(base);
+ j = CardServices(ReleaseWindow, link->win);
+ if (j != CS_SUCCESS)
+ cs_error(link->handle, ReleaseWindow, j);
+ return 0;
+
+}
+/*====================================================================*/
+
+static void fmvj18x_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "fmvj18x_release(0x%p)\n", link);
+
+ /*
+ If the device is currently in use, we won't release until it
+ is actually closed.
+ */
+ if (link->open) {
+ DEBUG(1, "fmvj18x_cs: release postponed, '%s' "
+ "still open\n", link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ /* Don't bother checking to see if these succeed or not */
+ CardServices(ReleaseWindow, link->win);
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* fmvj18x_release */
+
+/*====================================================================*/
+
+static int fmvj18x_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ local_info_t *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+
+ DEBUG(1, "fmvj18x_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ fmvj18x_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ fjn_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* fmvj18x_event */
+
+/*====================================================================*/
+
+static int __init init_fmvj18x_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "fmvj18x: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &fmvj18x_attach, &fmvj18x_detach);
+ return 0;
+}
+
+static void __exit exit_fmvj18x_cs(void)
+{
+ DEBUG(0, "fmvj18x_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ fmvj18x_detach(dev_list);
+}
+
+module_init(init_fmvj18x_cs);
+module_exit(exit_fmvj18x_cs);
+
+/*====================================================================*/
+
+static void fjn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ local_info_t *lp = dev_id;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr;
+ unsigned short tx_stat, rx_stat;
+
+ if (lp == NULL) {
+ printk(KERN_NOTICE "fjn_interrupt(): irq %d for "
+ "unknown device.\n", irq);
+ return;
+ }
+ ioaddr = dev->base_addr;
+
+ /* avoid multiple interrupts */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ /* wait for a while */
+ udelay(1);
+
+ /* get status */
+ tx_stat = inb(ioaddr + TX_STATUS);
+ rx_stat = inb(ioaddr + RX_STATUS);
+
+ /* clear status */
+ outb(tx_stat, ioaddr + TX_STATUS);
+ outb(rx_stat, ioaddr + RX_STATUS);
+
+ DEBUG(4, "%s: interrupt, rx_status %02x.\n", dev->name, rx_stat);
+ DEBUG(4, " tx_status %02x.\n", tx_stat);
+
+ if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
+ /* there is packet(s) in rx buffer */
+ fjn_rx(dev);
+ }
+ if (tx_stat & F_TMT_RDY) {
+ lp->stats.tx_packets += lp->sent ;
+ lp->sent = 0 ;
+ if (lp->tx_queue) {
+ outb(DO_TX | lp->tx_queue, ioaddr + TX_START);
+ lp->sent = lp->tx_queue ;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ } else {
+ lp->tx_started = 0;
+ }
+ netif_wake_queue(dev);
+ }
+ DEBUG(4, "%s: exiting interrupt,\n", dev->name);
+ DEBUG(4, " tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat);
+
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+
+} /* fjn_interrupt */
+
+/*====================================================================*/
+
+static void fjn_tx_timeout(struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n",
+ dev->name, htons(inw(ioaddr + TX_STATUS)),
+ inb(ioaddr + TX_STATUS) & F_TMT_RDY
+ ? "IRQ conflict" : "network cable problem");
+ printk(KERN_NOTICE "%s: timeout registers: %04x %04x %04x "
+ "%04x %04x %04x %04x %04x.\n",
+ dev->name, htons(inw(ioaddr + 0)),
+ htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)),
+ htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)),
+ htons(inw(ioaddr +10)), htons(inw(ioaddr +12)),
+ htons(inw(ioaddr +14)));
+ lp->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ cli();
+
+ fjn_reset(dev);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->sent = 0;
+ lp->open_time = jiffies;
+ sti();
+ netif_wake_queue(dev);
+}
+
+static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ tx_timeout_check(dev, fjn_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ if (length > ETH_FRAME_LEN) {
+ printk(KERN_NOTICE "%s: Attempting to send a large packet"
+ " (%d bytes).\n", dev->name, length);
+ return 1;
+ }
+
+ DEBUG(4, "%s: Transmitting a packet of length %lu.\n",
+ dev->name, (unsigned long)skb->len);
+ add_tx_bytes(&lp->stats, skb->len);
+
+ /* Disable both interrupts. */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ /* wait for a while */
+ udelay(1);
+
+ outw(length, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += ((length+3) & ~1);
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb(DO_TX | lp->tx_queue, ioaddr + TX_START);
+ lp->sent = lp->tx_queue ;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ netif_start_queue(dev);
+ } else {
+ if( sram_config == 0 ) {
+ if (lp->tx_queue_len < (4096 - (ETH_FRAME_LEN +2)) )
+ /* Yes, there is room for one more packet. */
+ netif_start_queue(dev);
+ } else {
+ if (lp->tx_queue_len < (8192 - (ETH_FRAME_LEN +2)) &&
+ lp->tx_queue < 127 )
+ /* Yes, there is room for one more packet. */
+ netif_start_queue(dev);
+ }
+ }
+
+ /* Re-enable interrupts */
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+ }
+ DEV_KFREE_SKB (skb);
+
+ return 0;
+} /* fjn_start_xmit */
+
+/*====================================================================*/
+
+static void fjn_reset(struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i;
+
+ DEBUG(4, "fjn_reset(%s) called.\n",dev->name);
+
+ /* Reset controller */
+ if( sram_config == 0 )
+ outb(CONFIG0_RST, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
+
+ /* Power On chip and select bank 0 */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_0, ioaddr + CONFIG_1);
+ else
+ outb(BANK_0U, ioaddr + CONFIG_1);
+
+ /* Set Tx modes */
+ outb(D_TX_MODE, ioaddr + TX_MODE);
+ /* set Rx modes */
+ outb(ID_MATCHED, ioaddr + RX_MODE);
+
+ /* Set hardware address */
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + NODE_ID + i);
+
+ /* Switch to bank 1 */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_1, ioaddr + CONFIG_1);
+ else
+ outb(BANK_1U, ioaddr + CONFIG_1);
+
+ /* set the multicast table to accept none. */
+ for (i = 0; i < 6; i++)
+ outb(0x00, ioaddr + MAR_ADR + i);
+
+ /* Switch to bank 2 (runtime mode) */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_2, ioaddr + CONFIG_1);
+ else
+ outb(BANK_2U, ioaddr + CONFIG_1);
+
+ /* set 16col ctrl bits */
+ if( lp->cardtype == TDK || lp->cardtype == CONTEC)
+ outb(TDK_AUTO_MODE, ioaddr + COL_CTRL);
+ else
+ outb(AUTO_MODE, ioaddr + COL_CTRL);
+
+ /* clear Reserved Regs */
+ outb(0x00, ioaddr + BMPR12);
+ outb(0x00, ioaddr + BMPR13);
+
+ /* reset Skip packet reg. */
+ outb(0x01, ioaddr + RX_SKIP);
+
+ /* Enable Tx and Rx */
+ if( sram_config == 0 )
+ outb(CONFIG0_DFL, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_DFL_1, ioaddr + CONFIG_0);
+
+ /* Init receive pointer ? */
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+
+ /* Clear all status */
+ outb(0xff, ioaddr + TX_STATUS);
+ outb(0xff, ioaddr + RX_STATUS);
+
+ if (lp->cardtype == MBH10302)
+ outb(INTR_OFF, ioaddr + LAN_CTRL);
+
+ /* Turn on Rx interrupts */
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+
+ /* Turn on interrupts from LAN card controller */
+ if (lp->cardtype == MBH10302)
+ outb(INTR_ON, ioaddr + LAN_CTRL);
+} /* fjn_reset */
+
+/*====================================================================*/
+
+static void fjn_rx(struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int boguscount = 10; /* 5 -> 10: by agy 19940922 */
+
+ DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n",
+ dev->name, inb(ioaddr + RX_STATUS));
+
+ while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
+ u_short status = inw(ioaddr + DATAPORT);
+
+ DEBUG(4, "%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ break;
+ }
+#endif
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & F_LEN_ERR) lp->stats.rx_length_errors++;
+ if (status & F_ALG_ERR) lp->stats.rx_frame_errors++;
+ if (status & F_CRC_ERR) lp->stats.rx_crc_errors++;
+ if (status & F_OVR_FLO) lp->stats.rx_over_errors++;
+ } else {
+ u_short pkt_len = inw(ioaddr + DATAPORT);
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk(KERN_NOTICE "%s: The FMV-18x claimed a very "
+ "large packet, size %d.\n", dev->name, pkt_len);
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ lp->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping "
+ "packet (len %d).\n", dev->name, pkt_len);
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+
+ skb_reserve(skb, 2);
+ insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
+ (pkt_len + 1) >> 1);
+ skb->protocol = eth_type_trans(skb, dev);
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 5) {
+ int i;
+ printk(KERN_DEBUG "%s: Rxed packet of length %d: ",
+ dev->name, pkt_len);
+ for (i = 0; i < 14; i++)
+ printk(" %02x", skb->data[i]);
+ printk(".\n");
+ }
+#endif
+
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ add_rx_bytes(&lp->stats, pkt_len);
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a netif_wake_queue() for us and will work on them
+ when we get to the bottom-half routine. */
+/*
+ if (lp->cardtype != TDK) {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == F_BUF_EMP)
+ break;
+ (void)inw(ioaddr + DATAPORT); /+ dummy status read +/
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ }
+
+ if (i > 0)
+ DEBUG(5, "%s: Exint Rx packet with mode %02x after "
+ "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
+ }
+*/
+
+ return;
+} /* fjn_rx */
+
+/*====================================================================*/
+
+static int fjn_config(struct net_device *dev, struct ifmap *map){
+ return 0;
+}
+
+static int fjn_open(struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ dev_link_t *link = &lp->link;
+
+ DEBUG(4, "fjn_open('%s').\n", dev->name);
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+
+ fjn_reset(dev);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->open_time = jiffies;
+ netif_mark_up(dev);
+ netif_start_queue(dev);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+} /* fjn_open */
+
+/*====================================================================*/
+
+static int fjn_close(struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ dev_link_t *link = &lp->link;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(4, "fjn_close('%s').\n", dev->name);
+
+ lp->open_time = 0;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ if( sram_config == 0 )
+ outb(CONFIG0_RST ,ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1 ,ioaddr + CONFIG_0);
+
+ /* Update the statistics -- ToDo. */
+
+ /* Power-down the chip. Green, green, green! */
+ outb(CHIP_OFF ,ioaddr + CONFIG_1);
+
+ /* Set the ethernet adaptor disable IRQ */
+ if (lp->cardtype == MBH10302)
+ outb(INTR_OFF, ioaddr + LAN_CTRL);
+
+ link->open--;
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+} /* fjn_close */
+
+/*====================================================================*/
+
+static struct net_device_stats *fjn_get_stats(struct net_device *dev)
+{
+ local_info_t *lp = (local_info_t *)dev->priv;
+ return &lp->stats;
+} /* fjn_get_stats */
+
+/*====================================================================*/
+
+/*
+ Set the multicast/promiscuous mode for this adaptor.
+*/
+
+static void set_rx_mode(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ u_char mc_filter[8]; /* Multicast hash filter */
+ u_long flags;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ } else if (dev->mc_count > MC_FILTERBREAK
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(2, ioaddr + RX_MODE); /* Use normal mode. */
+ } else if (dev->mc_count == 0) {
+ memset(mc_filter, 0x00, sizeof(mc_filter));
+ outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
+ }
+
+ save_flags(flags);
+ cli();
+ if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
+ int saved_bank = inb(ioaddr + CONFIG_1);
+ /* Switch to bank 1 and set the multicast table. */
+ outb(0xe4, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(mc_filter[i], ioaddr + 8 + i);
+ memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
+ outb(saved_bank, ioaddr + CONFIG_1);
+ }
+ restore_flags(flags);
+}
diff --git a/linux/pcmcia-cs/clients/nmclan_cs.c b/linux/pcmcia-cs/clients/nmclan_cs.c
new file mode 100644
index 0000000..2f6fb08
--- /dev/null
+++ b/linux/pcmcia-cs/clients/nmclan_cs.c
@@ -0,0 +1,1744 @@
+/* ----------------------------------------------------------------------------
+Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN.
+ nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao
+
+ The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media
+ Access Controller for Ethernet (MACE). It is essentially the Am2150
+ PCMCIA Ethernet card contained in the Am2150 Demo Kit.
+
+Written by Roger C. Pao <rpao@paonet.org>
+ Copyright 1995 Roger C. Pao
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+
+Ported to Linux 1.3.* network driver environment by
+ Matti Aarnio <mea@utu.fi>
+
+References
+
+ Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993
+ Am79C940 (MACE) Data Sheet, 1994
+ Am79C90 (C-LANCE) Data Sheet, 1994
+ Linux PCMCIA Programmer's Guide v1.17
+ /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8
+
+ Eric Mears, New Media Corporation
+ Tom Pollard, New Media Corporation
+ Dean Siasoyco, New Media Corporation
+ Ken Lesniak, Silicon Graphics, Inc. <lesniak@boston.sgi.com>
+ Donald Becker <becker@scyld.com>
+ David Hinds <dahinds@users.sourceforge.net>
+
+ The Linux client driver is based on the 3c589_cs.c client driver by
+ David Hinds.
+
+ The Linux network driver outline is based on the 3c589_cs.c driver,
+ the 8390.c driver, and the example skeleton.c kernel code, which are
+ by Donald Becker.
+
+ The Am2150 network driver hardware interface code is based on the
+ OS/9000 driver for the New Media Ethernet LAN by Eric Mears.
+
+ Special thanks for testing and help in debugging this driver goes
+ to Ken Lesniak.
+
+-------------------------------------------------------------------------------
+Driver Notes and Issues
+-------------------------------------------------------------------------------
+
+1. Developed on a Dell 320SLi
+ PCMCIA Card Services 2.6.2
+ Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386
+
+2. rc.pcmcia may require loading pcmcia_core with io_speed=300:
+ 'insmod pcmcia_core.o io_speed=300'.
+ This will avoid problems with fast systems which causes rx_framecnt
+ to return random values.
+
+3. If hot extraction does not work for you, use 'ifconfig eth0 down'
+ before extraction.
+
+4. There is a bad slow-down problem in this driver.
+
+5. Future: Multicast processing. In the meantime, do _not_ compile your
+ kernel with multicast ip enabled.
+
+-------------------------------------------------------------------------------
+History
+-------------------------------------------------------------------------------
+Log: nmclan_cs.c,v
+ * Revision 0.16 1995/07/01 06:42:17 rpao
+ * Bug fix: nmclan_reset() called CardServices incorrectly.
+ *
+ * Revision 0.15 1995/05/24 08:09:47 rpao
+ * Re-implement MULTI_TX dev->tbusy handling.
+ *
+ * Revision 0.14 1995/05/23 03:19:30 rpao
+ * Added, in nmclan_config(), "tuple.Attributes = 0;".
+ * Modified MACE ID check to ignore chip revision level.
+ * Avoid tx_free_frames race condition between _start_xmit and _interrupt.
+ *
+ * Revision 0.13 1995/05/18 05:56:34 rpao
+ * Statistics changes.
+ * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list.
+ * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup.
+ *
+ * Revision 0.12 1995/05/14 00:12:23 rpao
+ * Statistics overhaul.
+ *
+
+95/05/13 rpao V0.10a
+ Bug fix: MACE statistics counters used wrong I/O ports.
+ Bug fix: mace_interrupt() needed to allow statistics to be
+ processed without RX or TX interrupts pending.
+95/05/11 rpao V0.10
+ Multiple transmit request processing.
+ Modified statistics to use MACE counters where possible.
+95/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO.
+ *Released
+95/05/10 rpao V0.08
+ Bug fix: Make all non-exported functions private by using
+ static keyword.
+ Bug fix: Test IntrCnt _before_ reading MACE_IR.
+95/05/10 rpao V0.07 Statistics.
+95/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states.
+
+---------------------------------------------------------------------------- */
+
+/* ----------------------------------------------------------------------------
+Conditional Compilation Options
+---------------------------------------------------------------------------- */
+
+#define MULTI_TX 0
+#define RESET_ON_TIMEOUT 1
+#define TX_INTERRUPTABLE 1
+#define RESET_XILINX 0
+
+/* ----------------------------------------------------------------------------
+Include Files
+---------------------------------------------------------------------------- */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+/* ----------------------------------------------------------------------------
+Defines
+---------------------------------------------------------------------------- */
+
+#define ETHER_ADDR_LEN ETH_ALEN
+ /* 6 bytes in an Ethernet Address */
+#define MACE_LADRF_LEN 8
+ /* 8 bytes in Logical Address Filter */
+
+/* Loop Control Defines */
+#define MACE_MAX_IR_ITERATIONS 10
+#define MACE_MAX_RX_ITERATIONS 12
+ /*
+ TBD: Dean brought this up, and I assumed the hardware would
+ handle it:
+
+ If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be
+ non-zero when the isr exits. We may not get another interrupt
+ to process the remaining packets for some time.
+ */
+
+/*
+The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA)
+which manages the interface between the MACE and the PCMCIA bus. It
+also includes buffer management for the 32K x 8 SRAM to control up to
+four transmit and 12 receive frames at a time.
+*/
+#define AM2150_MAX_TX_FRAMES 4
+#define AM2150_MAX_RX_FRAMES 12
+
+/* Am2150 Ethernet Card I/O Mapping */
+#define AM2150_RCV 0x00
+#define AM2150_XMT 0x04
+#define AM2150_XMT_SKIP 0x09
+#define AM2150_RCV_NEXT 0x0A
+#define AM2150_RCV_FRAME_COUNT 0x0B
+#define AM2150_MACE_BANK 0x0C
+#define AM2150_MACE_BASE 0x10
+
+/* MACE Registers */
+#define MACE_RCVFIFO 0
+#define MACE_XMTFIFO 1
+#define MACE_XMTFC 2
+#define MACE_XMTFS 3
+#define MACE_XMTRC 4
+#define MACE_RCVFC 5
+#define MACE_RCVFS 6
+#define MACE_FIFOFC 7
+#define MACE_IR 8
+#define MACE_IMR 9
+#define MACE_PR 10
+#define MACE_BIUCC 11
+#define MACE_FIFOCC 12
+#define MACE_MACCC 13
+#define MACE_PLSCC 14
+#define MACE_PHYCC 15
+#define MACE_CHIPIDL 16
+#define MACE_CHIPIDH 17
+#define MACE_IAC 18
+/* Reserved */
+#define MACE_LADRF 20
+#define MACE_PADR 21
+/* Reserved */
+/* Reserved */
+#define MACE_MPC 24
+/* Reserved */
+#define MACE_RNTPC 26
+#define MACE_RCVCC 27
+/* Reserved */
+#define MACE_UTR 29
+#define MACE_RTR1 30
+#define MACE_RTR2 31
+
+/* MACE Bit Masks */
+#define MACE_XMTRC_EXDEF 0x80
+#define MACE_XMTRC_XMTRC 0x0F
+
+#define MACE_XMTFS_XMTSV 0x80
+#define MACE_XMTFS_UFLO 0x40
+#define MACE_XMTFS_LCOL 0x20
+#define MACE_XMTFS_MORE 0x10
+#define MACE_XMTFS_ONE 0x08
+#define MACE_XMTFS_DEFER 0x04
+#define MACE_XMTFS_LCAR 0x02
+#define MACE_XMTFS_RTRY 0x01
+
+#define MACE_RCVFS_RCVSTS 0xF000
+#define MACE_RCVFS_OFLO 0x8000
+#define MACE_RCVFS_CLSN 0x4000
+#define MACE_RCVFS_FRAM 0x2000
+#define MACE_RCVFS_FCS 0x1000
+
+#define MACE_FIFOFC_RCVFC 0xF0
+#define MACE_FIFOFC_XMTFC 0x0F
+
+#define MACE_IR_JAB 0x80
+#define MACE_IR_BABL 0x40
+#define MACE_IR_CERR 0x20
+#define MACE_IR_RCVCCO 0x10
+#define MACE_IR_RNTPCO 0x08
+#define MACE_IR_MPCO 0x04
+#define MACE_IR_RCVINT 0x02
+#define MACE_IR_XMTINT 0x01
+
+#define MACE_MACCC_PROM 0x80
+#define MACE_MACCC_DXMT2PD 0x40
+#define MACE_MACCC_EMBA 0x20
+#define MACE_MACCC_RESERVED 0x10
+#define MACE_MACCC_DRCVPA 0x08
+#define MACE_MACCC_DRCVBC 0x04
+#define MACE_MACCC_ENXMT 0x02
+#define MACE_MACCC_ENRCV 0x01
+
+#define MACE_PHYCC_LNKFL 0x80
+#define MACE_PHYCC_DLNKTST 0x40
+#define MACE_PHYCC_REVPOL 0x20
+#define MACE_PHYCC_DAPC 0x10
+#define MACE_PHYCC_LRT 0x08
+#define MACE_PHYCC_ASEL 0x04
+#define MACE_PHYCC_RWAKE 0x02
+#define MACE_PHYCC_AWAKE 0x01
+
+#define MACE_IAC_ADDRCHG 0x80
+#define MACE_IAC_PHYADDR 0x04
+#define MACE_IAC_LOGADDR 0x02
+
+#define MACE_UTR_RTRE 0x80
+#define MACE_UTR_RTRD 0x40
+#define MACE_UTR_RPA 0x20
+#define MACE_UTR_FCOLL 0x10
+#define MACE_UTR_RCVFCSE 0x08
+#define MACE_UTR_LOOP_INCL_MENDEC 0x06
+#define MACE_UTR_LOOP_NO_MENDEC 0x04
+#define MACE_UTR_LOOP_EXTERNAL 0x02
+#define MACE_UTR_LOOP_NONE 0x00
+#define MACE_UTR_RESERVED 0x01
+
+/* Switch MACE register bank (only 0 and 1 are valid) */
+#define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK)
+
+#define MACE_IMR_DEFAULT \
+ (0xFF - \
+ ( \
+ MACE_IR_CERR | \
+ MACE_IR_RCVCCO | \
+ MACE_IR_RNTPCO | \
+ MACE_IR_MPCO | \
+ MACE_IR_RCVINT | \
+ MACE_IR_XMTINT \
+ ) \
+ )
+#undef MACE_IMR_DEFAULT
+#define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */
+
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/* ----------------------------------------------------------------------------
+Type Definitions
+---------------------------------------------------------------------------- */
+
+typedef struct _mace_statistics {
+ /* MACE_XMTFS */
+ int xmtsv;
+ int uflo;
+ int lcol;
+ int more;
+ int one;
+ int defer;
+ int lcar;
+ int rtry;
+
+ /* MACE_XMTRC */
+ int exdef;
+ int xmtrc;
+
+ /* RFS1--Receive Status (RCVSTS) */
+ int oflo;
+ int clsn;
+ int fram;
+ int fcs;
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ int rfs_rntpc;
+
+ /* RFS3--Receive Collision Count (RCVCC) */
+ int rfs_rcvcc;
+
+ /* MACE_IR */
+ int jab;
+ int babl;
+ int cerr;
+ int rcvcco;
+ int rntpco;
+ int mpco;
+
+ /* MACE_MPC */
+ int mpc;
+
+ /* MACE_RNTPC */
+ int rntpc;
+
+ /* MACE_RCVCC */
+ int rcvcc;
+} mace_statistics;
+
+typedef struct _mace_private {
+ dev_link_t link;
+ struct net_device dev;
+ dev_node_t node;
+ struct net_device_stats linux_stats; /* Linux statistics counters */
+ mace_statistics mace_stats; /* MACE chip statistics counters */
+
+ /* restore_multicast_list() state variables */
+ int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */
+ int multicast_num_addrs;
+
+ char tx_free_frames; /* Number of free transmit frame buffers */
+ char tx_irq_disabled; /* MACE TX interrupt disabled */
+} mace_private;
+
+/* ----------------------------------------------------------------------------
+Private Global Variables
+---------------------------------------------------------------------------- */
+
+#ifdef PCMCIA_DEBUG
+static char rcsid[] =
+"nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao";
+static char *version =
+"nmclan_cs 0.16 (Roger C. Pao)";
+#endif
+
+static dev_info_t dev_info="nmclan_cs";
+static dev_link_t *dev_list=NULL;
+
+static char *if_names[]={
+ "Auto", "10baseT", "BNC",
+};
+
+/* ----------------------------------------------------------------------------
+Parameters
+ These are the parameters that can be set during loading with
+ 'insmod'.
+---------------------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("New Media PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */
+INT_MODULE_PARM(if_port, 0);
+/* Bit map of interrupts to choose from */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+#else
+#define DEBUG(n, args...)
+#endif
+
+/* ----------------------------------------------------------------------------
+Function Prototypes
+---------------------------------------------------------------------------- */
+
+static void nmclan_config(dev_link_t *link);
+static void nmclan_release(u_long arg);
+static int nmclan_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static void nmclan_reset(struct net_device *dev);
+static int mace_config(struct net_device *dev, struct ifmap *map);
+static int mace_open(struct net_device *dev);
+static int mace_close(struct net_device *dev);
+static int mace_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev);
+static void mace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static struct net_device_stats *mace_get_stats(struct net_device *dev);
+static int mace_rx(struct net_device *dev, unsigned char RxCnt);
+static void restore_multicast_list(struct net_device *dev);
+
+static void set_multicast_list(struct net_device *dev);
+
+static dev_link_t *nmclan_attach(void);
+static void nmclan_detach(dev_link_t *);
+
+/* ----------------------------------------------------------------------------
+flush_stale_links
+ Clean up stale device structures
+---------------------------------------------------------------------------- */
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ nmclan_detach(link);
+ }
+}
+
+/* ----------------------------------------------------------------------------
+cs_error
+ Report a Card Services related error.
+---------------------------------------------------------------------------- */
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/* ----------------------------------------------------------------------------
+nmclan_attach
+ Creates an "instance" of the driver, allocating local data
+ structures for one device. The device is registered with Card
+ Services.
+---------------------------------------------------------------------------- */
+
+static dev_link_t *nmclan_attach(void)
+{
+ mace_private *lp;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int i, ret;
+
+ DEBUG(0, "nmclan_attach()\n");
+ DEBUG(1, "%s\n", rcsid);
+ flush_stale_links();
+
+ /* Create new ethernet device */
+ lp = kmalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp) return NULL;
+ memset(lp, 0, sizeof(*lp));
+ link = &lp->link; dev = &lp->dev;
+ link->priv = dev->priv = link->irq.Instance = lp;
+
+ init_timer(&link->release);
+ link->release.function = &nmclan_release;
+ link->release.data = (u_long)link;
+ link->io.NumPorts1 = 32;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.IOAddrLines = 5;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = &mace_interrupt;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ dev->hard_start_xmit = &mace_start_xmit;
+ dev->set_config = &mace_config;
+ dev->get_stats = &mace_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ ether_setup(dev);
+ init_dev_name(dev, lp->node);
+ dev->open = &mace_open;
+ dev->stop = &mace_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = mace_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &nmclan_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ nmclan_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* nmclan_attach */
+
+/* ----------------------------------------------------------------------------
+nmclan_detach
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+---------------------------------------------------------------------------- */
+
+static void nmclan_detach(dev_link_t *link)
+{
+ mace_private *lp = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "nmclan_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ nmclan_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&lp->dev);
+ kfree(lp);
+
+} /* nmclan_detach */
+
+/* ----------------------------------------------------------------------------
+mace_read
+ Reads a MACE register. This is bank independent; however, the
+ caller must ensure that this call is not interruptable. We are
+ assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static int mace_read(ioaddr_t ioaddr, int reg)
+{
+ int data = 0xFF;
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ data = inb(ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ save_flags(flags);
+ cli();
+ MACEBANK(1);
+ data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ restore_flags(flags);
+ break;
+ }
+ return (data & 0xFF);
+} /* mace_read */
+
+/* ----------------------------------------------------------------------------
+mace_write
+ Writes to a MACE register. This is bank independent; however,
+ the caller must ensure that this call is not interruptable. We
+ are assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static void mace_write(ioaddr_t ioaddr, int reg, int data)
+{
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ save_flags(flags);
+ cli();
+ MACEBANK(1);
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ restore_flags(flags);
+ break;
+ }
+} /* mace_write */
+
+/* ----------------------------------------------------------------------------
+mace_init
+ Resets the MACE chip.
+---------------------------------------------------------------------------- */
+static void mace_init(ioaddr_t ioaddr, char *enet_addr)
+{
+ int i;
+
+ /* MACE Software reset */
+ mace_write(ioaddr, MACE_BIUCC, 1);
+ while (mace_read(ioaddr, MACE_BIUCC) & 0x01) {
+ /* Wait for reset bit to be cleared automatically after <= 200ns */;
+ }
+ mace_write(ioaddr, MACE_BIUCC, 0);
+
+ /* The Am2150 requires that the MACE FIFOs operate in burst mode. */
+ mace_write(ioaddr, MACE_FIFOCC, 0x0F);
+
+ mace_write(ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */
+ mace_write(ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */
+
+ /*
+ * Bit 2-1 PORTSEL[1-0] Port Select.
+ * 00 AUI/10Base-2
+ * 01 10Base-T
+ * 10 DAI Port (reserved in Am2150)
+ * 11 GPSI
+ * For this card, only the first two are valid.
+ * So, PLSCC should be set to
+ * 0x00 for 10Base-2
+ * 0x02 for 10Base-T
+ * Or just set ASEL in PHYCC below!
+ */
+ switch (if_port) {
+ case 1:
+ mace_write(ioaddr, MACE_PLSCC, 0x02);
+ break;
+ case 2:
+ mace_write(ioaddr, MACE_PLSCC, 0x00);
+ break;
+ default:
+ mace_write(ioaddr, MACE_PHYCC, /* ASEL */ 4);
+ /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
+ and the MACE device will automatically select the operating media
+ interface port. */
+ break;
+ }
+
+ mace_write(ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR);
+ /* Poll ADDRCHG bit */
+ while (mace_read(ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ ;
+ /* Set PADR register */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ mace_write(ioaddr, MACE_PADR, enet_addr[i]);
+
+ /* MAC Configuration Control Register should be written last */
+ /* Let set_multicast_list set this. */
+ /* mace_write(ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */
+ mace_write(ioaddr, MACE_MACCC, 0x00);
+} /* mace_init */
+
+/* ----------------------------------------------------------------------------
+nmclan_config
+ This routine is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+---------------------------------------------------------------------------- */
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+static void nmclan_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ mace_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[64];
+ int i, last_ret, last_fn;
+ ioaddr_t ioaddr;
+
+ DEBUG(0, "nmclan_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ CS_CHECK(RequestIO, handle, &link->io);
+ CS_CHECK(RequestIRQ, handle, &link->irq);
+ CS_CHECK(RequestConfiguration, handle, &link->conf);
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ i = register_netdev(dev);
+ if (i != 0) {
+ printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ ioaddr = dev->base_addr;
+
+ /* Read the ethernet address from the CIS. */
+ tuple.DesiredTuple = 0x80 /* CISTPL_CFTABLE_ENTRY_MISC */;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN);
+
+ /* Verify configuration by reading the MACE ID. */
+ {
+ char sig[2];
+
+ sig[0] = mace_read(ioaddr, MACE_CHIPIDL);
+ sig[1] = mace_read(ioaddr, MACE_CHIPIDH);
+ if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) {
+ DEBUG(0, "nmclan_cs configured: mace id=%x %x\n",
+ sig[0], sig[1]);
+ } else {
+ printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should"
+ " be 0x40 0x?9\n", sig[0], sig[1]);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+ }
+ }
+
+ mace_init(ioaddr, dev->dev_addr);
+
+ /* The if_port symbol can be set when the module is loaded */
+ if (if_port <= 2)
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
+
+#if 0
+ /* Determine which port we are using if auto is selected */
+ if (if_port==0) {
+ mace_write(ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+ DEBUG(2, "%s: mace_phycc 0x%X.\n", dev->name,
+ mace_read(ioaddr, MACE_PHYCC));
+ if (mace_read(ioaddr, MACE_PHYCC) & MACE_PHYCC_LNKFL)
+ /* 10base-T receiver is in link fail, MACE is using AUI port. */
+ dev->if_port = 2;
+ else
+ dev->if_port = 1;
+ mace_write(ioaddr, MACE_MACCC, 0x00);
+ }
+ /* Unfortunately, this doesn't seem to work. LNKFL is always set.
+ LNKFL is supposed to be opposite the green LED on the edge of the card.
+ It doesn't work if it is checked and printed in _open() either.
+ It does work if check in _start_xmit(), but that's not a good place
+ to printk. */
+#endif
+
+ copy_dev_name(lp->node, dev);
+ link->dev = &lp->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+
+ printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port, hw_addr ",
+ dev->name, dev->base_addr, dev->irq, if_names[dev->if_port]);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ nmclan_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+} /* nmclan_config */
+
+/* ----------------------------------------------------------------------------
+nmclan_release
+ After a card is removed, nmclan_release() will unregister the
+ net device, and release the PCMCIA configuration. If the device
+ is still open, this will be postponed until it is closed.
+---------------------------------------------------------------------------- */
+static void nmclan_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "nmclan_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "nmclan_cs: release postponed, '%s' "
+ "still open\n", link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* nmclan_release */
+
+/* ----------------------------------------------------------------------------
+nmclan_event
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+---------------------------------------------------------------------------- */
+static int nmclan_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ mace_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+
+ DEBUG(1, "nmclan_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ nmclan_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ nmclan_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ case CS_EVENT_RESET_REQUEST:
+ return 1;
+ break;
+ }
+ return 0;
+} /* nmclan_event */
+
+/* ----------------------------------------------------------------------------
+nmclan_reset
+ Reset and restore all of the Xilinx and MACE registers.
+---------------------------------------------------------------------------- */
+static void nmclan_reset(struct net_device *dev)
+{
+ mace_private *lp = dev->priv;
+
+#if RESET_XILINX
+ dev_link_t *link = &lp->link;
+ conf_reg_t reg;
+ u_long OrigCorValue;
+
+ /* Save original COR value */
+ reg.Function = 0;
+ reg.Action = CS_READ;
+ reg.Offset = CISREG_COR;
+ reg.Value = 0;
+ CardServices(AccessConfigurationRegister, link->handle, &reg);
+ OrigCorValue = reg.Value;
+
+ /* Reset Xilinx */
+ reg.Action = CS_WRITE;
+ reg.Offset = CISREG_COR;
+ DEBUG(1, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n",
+ OrigCorValue);
+ reg.Value = COR_SOFT_RESET;
+ CardServices(AccessConfigurationRegister, link->handle, &reg);
+ /* Need to wait for 20 ms for PCMCIA to finish reset. */
+
+ /* Restore original COR configuration index */
+ reg.Value = COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK);
+ CardServices(AccessConfigurationRegister, link->handle, &reg);
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+#endif /* #if RESET_XILINX */
+
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ /* Reinitialize the MACE chip for operation. */
+ mace_init(dev->base_addr, dev->dev_addr);
+ mace_write(dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT);
+
+ /* Restore the multicast list and enable TX and RX. */
+ restore_multicast_list(dev);
+} /* nmclan_reset */
+
+/* ----------------------------------------------------------------------------
+mace_config
+ [Someone tell me what this is supposed to do? Is if_port a defined
+ standard? If so, there should be defines to indicate 1=10Base-T,
+ 2=10Base-2, etc. including limited automatic detection.]
+---------------------------------------------------------------------------- */
+static int mace_config(struct net_device *dev, struct ifmap *map)
+{
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 2) {
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n", dev->name,
+ if_names[dev->if_port]);
+ } else
+ return -EINVAL;
+ }
+ return 0;
+} /* mace_config */
+
+/* ----------------------------------------------------------------------------
+mace_open
+ Open device driver.
+---------------------------------------------------------------------------- */
+static int mace_open(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ mace_private *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ MOD_INC_USE_COUNT;
+
+ MACEBANK(0);
+
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+ nmclan_reset(dev);
+
+ return 0; /* Always succeed */
+} /* mace_open */
+
+/* ----------------------------------------------------------------------------
+mace_close
+ Closes device driver.
+---------------------------------------------------------------------------- */
+static int mace_close(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ mace_private *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+
+ DEBUG(2, "%s: shutting down ethercard.\n", dev->name);
+
+ /* Mask off all interrupts from the MACE chip. */
+ outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+
+ link->open--;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+} /* mace_close */
+
+/* ----------------------------------------------------------------------------
+mace_start_xmit
+ This routine begins the packet transmit function. When completed,
+ it will generate a transmit interrupt.
+
+ According to /usr/src/linux/net/inet/dev.c, if _start_xmit
+ returns 0, the "packet is now solely the responsibility of the
+ driver." If _start_xmit returns non-zero, the "transmission
+ failed, put skb back into a list."
+---------------------------------------------------------------------------- */
+
+static void mace_tx_timeout(struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+ dev_link_t *link = &lp->link;
+
+ printk(KERN_NOTICE "%s: transmit timed out -- ", dev->name);
+#if RESET_ON_TIMEOUT
+ printk("resetting card\n");
+ CardServices(ResetCard, link->handle);
+#else /* #if RESET_ON_TIMEOUT */
+ printk("NOT resetting card\n");
+#endif /* #if RESET_ON_TIMEOUT */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static int mace_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ tx_timeout_check(dev, mace_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ DEBUG(3, "%s: mace_start_xmit(length = %ld) called.\n",
+ dev->name, (long)skb->len);
+
+#if (!TX_INTERRUPTABLE)
+ /* Disable MACE TX interrupts. */
+ outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT,
+ ioaddr + AM2150_MACE_BASE + MACE_IMR);
+ lp->tx_irq_disabled=1;
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ {
+ /* This block must not be interrupted by another transmit request!
+ mace_tx_timeout will take care of timer-based retransmissions from
+ the upper layers. The interrupt handler is guaranteed never to
+ service a transmit interrupt while we are in here.
+ */
+
+ add_tx_bytes(&lp->linux_stats, skb->len);
+ lp->tx_free_frames--;
+
+ /* WARNING: Write the _exact_ number of bytes written in the header! */
+ /* Put out the word header [must be an outw()] . . . */
+ outw(skb->len, ioaddr + AM2150_XMT);
+ /* . . . and the packet [may be any combination of outw() and outb()] */
+ outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1);
+ if (skb->len & 1) {
+ /* Odd byte transfer */
+ outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
+ }
+
+ dev->trans_start = jiffies;
+
+#if MULTI_TX
+ if (lp->tx_free_frames > 0)
+ netif_start_queue(dev);
+#endif /* #if MULTI_TX */
+ }
+
+#if (!TX_INTERRUPTABLE)
+ /* Re-enable MACE TX interrupts. */
+ lp->tx_irq_disabled=0;
+ outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ DEV_KFREE_SKB(skb);
+
+ return 0;
+} /* mace_start_xmit */
+
+/* ----------------------------------------------------------------------------
+mace_interrupt
+ The interrupt handler.
+---------------------------------------------------------------------------- */
+static void mace_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ mace_private *lp = (mace_private *)dev_id;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ int status;
+ int IntrCnt = MACE_MAX_IR_ITERATIONS;
+
+ if (dev == NULL) {
+ DEBUG(2, "mace_interrupt(): irq 0x%X for unknown device.\n",
+ irq);
+ return;
+ }
+
+ if (lp->tx_irq_disabled) {
+ printk(
+ (lp->tx_irq_disabled?
+ KERN_NOTICE "%s: Interrupt with tx_irq_disabled "
+ "[isr=%02X, imr=%02X]\n":
+ KERN_NOTICE "%s: Re-entering the interrupt handler "
+ "[isr=%02X, imr=%02X]\n"),
+ dev->name,
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IMR)
+ );
+ /* WARNING: MACE_IR has been read! */
+ return;
+ }
+
+ if (!netif_device_present(dev)) {
+ DEBUG(2, "%s: interrupt from dead card\n", dev->name);
+ goto exception;
+ }
+
+ do {
+ /* WARNING: MACE_IR is a READ/CLEAR port! */
+ status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+
+ DEBUG(3, "mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
+
+ if (status & MACE_IR_RCVINT) {
+ mace_rx(dev, MACE_MAX_RX_ITERATIONS);
+ }
+
+ if (status & MACE_IR_XMTINT) {
+ unsigned char fifofc;
+ unsigned char xmtrc;
+ unsigned char xmtfs;
+
+ fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
+ if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
+ lp->linux_stats.tx_errors++;
+ outb(0xFF, ioaddr + AM2150_XMT_SKIP);
+ }
+
+ /* Transmit Retry Count (XMTRC, reg 4) */
+ xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC);
+ if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++;
+ lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC);
+
+ if (
+ (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) &
+ MACE_XMTFS_XMTSV /* Transmit Status Valid */
+ ) {
+ lp->mace_stats.xmtsv++;
+
+ if (xmtfs & ~MACE_XMTFS_XMTSV) {
+ if (xmtfs & MACE_XMTFS_UFLO) {
+ /* Underflow. Indicates that the Transmit FIFO emptied before
+ the end of frame was reached. */
+ lp->mace_stats.uflo++;
+ }
+ if (xmtfs & MACE_XMTFS_LCOL) {
+ /* Late Collision */
+ lp->mace_stats.lcol++;
+ }
+ if (xmtfs & MACE_XMTFS_MORE) {
+ /* MORE than one retry was needed */
+ lp->mace_stats.more++;
+ }
+ if (xmtfs & MACE_XMTFS_ONE) {
+ /* Exactly ONE retry occurred */
+ lp->mace_stats.one++;
+ }
+ if (xmtfs & MACE_XMTFS_DEFER) {
+ /* Transmission was defered */
+ lp->mace_stats.defer++;
+ }
+ if (xmtfs & MACE_XMTFS_LCAR) {
+ /* Loss of carrier */
+ lp->mace_stats.lcar++;
+ }
+ if (xmtfs & MACE_XMTFS_RTRY) {
+ /* Retry error: transmit aborted after 16 attempts */
+ lp->mace_stats.rtry++;
+ }
+ } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */
+
+ } /* if (xmtfs & MACE_XMTFS_XMTSV) */
+
+ lp->linux_stats.tx_packets++;
+ lp->tx_free_frames++;
+ netif_wake_queue(dev);
+ } /* if (status & MACE_IR_XMTINT) */
+
+ if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) {
+ if (status & MACE_IR_JAB) {
+ /* Jabber Error. Excessive transmit duration (20-150ms). */
+ lp->mace_stats.jab++;
+ }
+ if (status & MACE_IR_BABL) {
+ /* Babble Error. >1518 bytes transmitted. */
+ lp->mace_stats.babl++;
+ }
+ if (status & MACE_IR_CERR) {
+ /* Collision Error. CERR indicates the absence of the
+ Signal Quality Error Test message after a packet
+ transmission. */
+ lp->mace_stats.cerr++;
+ }
+ if (status & MACE_IR_RCVCCO) {
+ /* Receive Collision Count Overflow; */
+ lp->mace_stats.rcvcco++;
+ }
+ if (status & MACE_IR_RNTPCO) {
+ /* Runt Packet Count Overflow */
+ lp->mace_stats.rntpco++;
+ }
+ if (status & MACE_IR_MPCO) {
+ /* Missed Packet Count Overflow */
+ lp->mace_stats.mpco++;
+ }
+ } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */
+
+ } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt));
+
+exception:
+ return;
+} /* mace_interrupt */
+
+/* ----------------------------------------------------------------------------
+mace_rx
+ Receives packets.
+---------------------------------------------------------------------------- */
+static int mace_rx(struct net_device *dev, unsigned char RxCnt)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ unsigned char rx_framecnt;
+ unsigned short rx_status;
+
+ while (
+ ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) &&
+ (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */
+ (RxCnt--)
+ ) {
+ rx_status = inw(ioaddr + AM2150_RCV);
+
+ DEBUG(3, "%s: in mace_rx(), framecnt 0x%X, rx_status"
+ " 0x%X.\n", dev->name, rx_framecnt, rx_status);
+
+ if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
+ lp->linux_stats.rx_errors++;
+ if (rx_status & MACE_RCVFS_OFLO) {
+ lp->mace_stats.oflo++;
+ }
+ if (rx_status & MACE_RCVFS_CLSN) {
+ lp->mace_stats.clsn++;
+ }
+ if (rx_status & MACE_RCVFS_FRAM) {
+ lp->mace_stats.fram++;
+ }
+ if (rx_status & MACE_RCVFS_FCS) {
+ lp->mace_stats.fcs++;
+ }
+ } else {
+ short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4;
+ /* Auto Strip is off, always subtract 4 */
+ struct sk_buff *skb;
+
+ lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV);
+ /* runt packet count */
+ lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
+ /* rcv collision count */
+
+ DEBUG(3, " receiving packet size 0x%X rx_status"
+ " 0x%X.\n", pkt_len, rx_status);
+
+ skb = dev_alloc_skb(pkt_len+2);
+
+ if (skb != NULL) {
+ skb->dev = dev;
+
+ skb_reserve(skb, 2);
+ insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
+ if (pkt_len & 1)
+ *(skb->tail-1) = inb(ioaddr + AM2150_RCV);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
+
+ dev->last_rx = jiffies;
+ lp->linux_stats.rx_packets++;
+ add_rx_bytes(&lp->linux_stats, skb->len);
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ continue;
+ } else {
+ DEBUG(1, "%s: couldn't allocate a sk_buff of size"
+ " %d.\n", dev->name, pkt_len);
+ lp->linux_stats.rx_dropped++;
+ }
+ }
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ } /* while */
+
+ return 0;
+} /* mace_rx */
+
+/* ----------------------------------------------------------------------------
+pr_linux_stats
+---------------------------------------------------------------------------- */
+static void pr_linux_stats(struct net_device_stats *pstats)
+{
+ DEBUG(2, "pr_linux_stats\n");
+ DEBUG(2, " rx_packets=%-7ld tx_packets=%ld\n",
+ (long)pstats->rx_packets, (long)pstats->tx_packets);
+ DEBUG(2, " rx_errors=%-7ld tx_errors=%ld\n",
+ (long)pstats->rx_errors, (long)pstats->tx_errors);
+ DEBUG(2, " rx_dropped=%-7ld tx_dropped=%ld\n",
+ (long)pstats->rx_dropped, (long)pstats->tx_dropped);
+ DEBUG(2, " multicast=%-7ld collisions=%ld\n",
+ (long)pstats->multicast, (long)pstats->collisions);
+
+ DEBUG(2, " rx_length_errors=%-7ld rx_over_errors=%ld\n",
+ (long)pstats->rx_length_errors, (long)pstats->rx_over_errors);
+ DEBUG(2, " rx_crc_errors=%-7ld rx_frame_errors=%ld\n",
+ (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors);
+ DEBUG(2, " rx_fifo_errors=%-7ld rx_missed_errors=%ld\n",
+ (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors);
+
+ DEBUG(2, " tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n",
+ (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors);
+ DEBUG(2, " tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n",
+ (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors);
+ DEBUG(2, " tx_window_errors=%ld\n",
+ (long)pstats->tx_window_errors);
+} /* pr_linux_stats */
+
+/* ----------------------------------------------------------------------------
+pr_mace_stats
+---------------------------------------------------------------------------- */
+static void pr_mace_stats(mace_statistics *pstats)
+{
+ DEBUG(2, "pr_mace_stats\n");
+
+ DEBUG(2, " xmtsv=%-7d uflo=%d\n",
+ pstats->xmtsv, pstats->uflo);
+ DEBUG(2, " lcol=%-7d more=%d\n",
+ pstats->lcol, pstats->more);
+ DEBUG(2, " one=%-7d defer=%d\n",
+ pstats->one, pstats->defer);
+ DEBUG(2, " lcar=%-7d rtry=%d\n",
+ pstats->lcar, pstats->rtry);
+
+ /* MACE_XMTRC */
+ DEBUG(2, " exdef=%-7d xmtrc=%d\n",
+ pstats->exdef, pstats->xmtrc);
+
+ /* RFS1--Receive Status (RCVSTS) */
+ DEBUG(2, " oflo=%-7d clsn=%d\n",
+ pstats->oflo, pstats->clsn);
+ DEBUG(2, " fram=%-7d fcs=%d\n",
+ pstats->fram, pstats->fcs);
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ /* RFS3--Receive Collision Count (RCVCC) */
+ DEBUG(2, " rfs_rntpc=%-7d rfs_rcvcc=%d\n",
+ pstats->rfs_rntpc, pstats->rfs_rcvcc);
+
+ /* MACE_IR */
+ DEBUG(2, " jab=%-7d babl=%d\n",
+ pstats->jab, pstats->babl);
+ DEBUG(2, " cerr=%-7d rcvcco=%d\n",
+ pstats->cerr, pstats->rcvcco);
+ DEBUG(2, " rntpco=%-7d mpco=%d\n",
+ pstats->rntpco, pstats->mpco);
+
+ /* MACE_MPC */
+ DEBUG(2, " mpc=%d\n", pstats->mpc);
+
+ /* MACE_RNTPC */
+ DEBUG(2, " rntpc=%d\n", pstats->rntpc);
+
+ /* MACE_RCVCC */
+ DEBUG(2, " rcvcc=%d\n", pstats->rcvcc);
+
+} /* pr_mace_stats */
+
+/* ----------------------------------------------------------------------------
+update_stats
+ Update statistics. We change to register window 1, so this
+ should be run single-threaded if the device is active. This is
+ expected to be a rare operation, and it's simpler for the rest
+ of the driver to assume that window 0 is always valid rather
+ than use a special window-state variable.
+
+ oflo & uflo should _never_ occur since it would mean the Xilinx
+ was not able to transfer data between the MACE FIFO and the
+ card's SRAM fast enough. If this happens, something is
+ seriously wrong with the hardware.
+---------------------------------------------------------------------------- */
+static void update_stats(ioaddr_t ioaddr, struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+
+ lp->mace_stats.rcvcc += mace_read(ioaddr, MACE_RCVCC);
+ lp->mace_stats.rntpc += mace_read(ioaddr, MACE_RNTPC);
+ lp->mace_stats.mpc += mace_read(ioaddr, MACE_MPC);
+ /* At this point, mace_stats is fully updated for this call.
+ We may now update the linux_stats. */
+
+ /* The MACE has no equivalent for linux_stats field which are commented
+ out. */
+
+#if 0
+ /* These must be tracked in the main body of the driver. */
+ lp->linux_stats.rx_packets;
+ lp->linux_stats.tx_packets;
+ lp->linux_stats.rx_errors;
+ lp->linux_stats.tx_errors;
+ lp->linux_stats.rx_dropped;
+ lp->linux_stats.tx_dropped;
+#endif
+ /* lp->linux_stats.multicast; */
+ lp->linux_stats.collisions =
+ lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
+ /* Collision: The MACE may retry sending a packet 15 times
+ before giving up. The retry count is in XMTRC.
+ Does each retry constitute a collision?
+ If so, why doesn't the RCVCC record these collisions? */
+
+ /* detailed rx_errors: */
+ lp->linux_stats.rx_length_errors =
+ lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
+ /* lp->linux_stats.rx_over_errors */
+ lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs;
+ lp->linux_stats.rx_frame_errors = lp->mace_stats.fram;
+ lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo;
+ lp->linux_stats.rx_missed_errors =
+ lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
+
+ /* detailed tx_errors */
+ lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry;
+ lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar;
+ /* LCAR usually results from bad cabling. */
+ lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
+ lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
+ /* lp->linux_stats.tx_window_errors; */
+
+ return;
+} /* update_stats */
+
+/* ----------------------------------------------------------------------------
+mace_get_stats
+ Gathers ethernet statistics from the MACE chip.
+---------------------------------------------------------------------------- */
+static struct net_device_stats *mace_get_stats(struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+
+ update_stats(dev->base_addr, dev);
+
+ DEBUG(1, "%s: updating the statistics.\n", dev->name);
+ pr_linux_stats(&lp->linux_stats);
+ pr_mace_stats(&lp->mace_stats);
+
+ return &lp->linux_stats;
+} /* net_device_stats */
+
+/* ----------------------------------------------------------------------------
+updateCRC
+ Modified from Am79C90 data sheet.
+---------------------------------------------------------------------------- */
+
+#if BROKEN_MULTICAST
+
+static void updateCRC(int *CRC, int bit)
+{
+ int poly[]={
+ 1,1,1,0, 1,1,0,1,
+ 1,0,1,1, 1,0,0,0,
+ 1,0,0,0, 0,0,1,1,
+ 0,0,1,0, 0,0,0,0
+ }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the
+ CRC generator polynomial. */
+
+ int j;
+
+ /* shift CRC and control bit (CRC[32]) */
+ for (j = 32; j > 0; j--)
+ CRC[j] = CRC[j-1];
+ CRC[0] = 0;
+
+ /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */
+ if (bit ^ CRC[32])
+ for (j = 0; j < 32; j++)
+ CRC[j] ^= poly[j];
+} /* updateCRC */
+
+/* ----------------------------------------------------------------------------
+BuildLAF
+ Build logical address filter.
+ Modified from Am79C90 data sheet.
+
+Input
+ ladrf: logical address filter (contents initialized to 0)
+ adr: ethernet address
+---------------------------------------------------------------------------- */
+static void BuildLAF(int *ladrf, int *adr)
+{
+ int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */
+
+ int i, byte; /* temporary array indices */
+ int hashcode; /* the output object */
+
+ CRC[32]=0;
+
+ for (byte = 0; byte < 6; byte++)
+ for (i = 0; i < 8; i++)
+ updateCRC(CRC, (adr[byte] >> i) & 1);
+
+ hashcode = 0;
+ for (i = 0; i < 6; i++)
+ hashcode = (hashcode << 1) + CRC[i];
+
+ byte = hashcode >> 3;
+ ladrf[byte] |= (1 << (hashcode & 7));
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 2) {
+ printk(KERN_DEBUG " adr =");
+ for (i = 0; i < 6; i++)
+ printk(" %02X", adr[i]);
+ printk("\n" KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63]"
+ " =", hashcode);
+ for (i = 0; i < 8; i++)
+ printk(" %02X", ladrf[i]);
+ printk("\n");
+ }
+#endif
+} /* BuildLAF */
+
+/* ----------------------------------------------------------------------------
+restore_multicast_list
+ Restores the multicast filter for MACE chip to the last
+ set_multicast_list() call.
+
+Input
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+static void restore_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+ int num_addrs = lp->multicast_num_addrs;
+ int *ladrf = lp->multicast_ladrf;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i;
+
+ DEBUG(2, "%s: restoring Rx mode to %d addresses.\n",
+ dev->name, num_addrs);
+
+ if (num_addrs > 0) {
+
+ DEBUG(1, "Attempt to restore multicast list detected.\n");
+
+ mace_write(ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR);
+ /* Poll ADDRCHG bit */
+ while (mace_read(ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ ;
+ /* Set LADRF register */
+ for (i = 0; i < MACE_LADRF_LEN; i++)
+ mace_write(ioaddr, MACE_LADRF, ladrf[i]);
+
+ mace_write(ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL);
+ mace_write(ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ } else if (num_addrs < 0) {
+
+ /* Promiscuous mode: receive all packets */
+ mace_write(ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+
+ } else {
+
+ /* Normal mode */
+ mace_write(ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ }
+} /* restore_multicast_list */
+
+/* ----------------------------------------------------------------------------
+set_multicast_list
+ Set or clear the multicast filter for this adaptor.
+
+Input
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+Output
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+ int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+ int i;
+ struct dev_mc_list *dmi = dev->mc_list;
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 1) {
+ static int old = 0;
+ if (dev->mc_count != old) {
+ old = dev->mc_count;
+ DEBUG(0, "%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ /* Set multicast_num_addrs. */
+ lp->multicast_num_addrs = dev->mc_count;
+
+ /* Set multicast_ladrf. */
+ if (num_addrs > 0) {
+ /* Calculate multicast logical address filter */
+ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
+ for (i = 0; i < dev->mc_count; i++) {
+ memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
+ dmi = dmi->next;
+ BuildLAF(lp->multicast_ladrf, adr);
+ }
+ }
+
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+#endif /* BROKEN_MULTICAST */
+
+static void restore_multicast_list(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", dev->name,
+ ((mace_private *)(dev->priv))->multicast_num_addrs);
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode: receive all packets */
+ mace_write(ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+ } else {
+ /* Normal mode */
+ mace_write(ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+ }
+} /* restore_multicast_list */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 1) {
+ static int old = 0;
+ if (dev->mc_count != old) {
+ old = dev->mc_count;
+ DEBUG(0, "%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ lp->multicast_num_addrs = dev->mc_count;
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+/* ----------------------------------------------------------------------------
+init_nmclan_cs
+---------------------------------------------------------------------------- */
+
+static int __init init_nmclan_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "nmclan_cs: Card Services release does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &nmclan_attach, &nmclan_detach);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------------
+exit_nmclan_cs
+---------------------------------------------------------------------------- */
+
+static void __exit exit_nmclan_cs(void)
+{
+ DEBUG(0, "nmclan_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ nmclan_detach(dev_list);
+}
+
+module_init(init_nmclan_cs);
+module_exit(exit_nmclan_cs);
diff --git a/linux/pcmcia-cs/clients/ositech.h b/linux/pcmcia-cs/clients/ositech.h
new file mode 100644
index 0000000..4126efc
--- /dev/null
+++ b/linux/pcmcia-cs/clients/ositech.h
@@ -0,0 +1,358 @@
+/*
+ This file contains the firmware of Seven of Diamonds from OSITECH.
+ (Special thanks to Kevin MacPherson of OSITECH)
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+*/
+
+ static const u_char __Xilinx7OD[] = {
+ 0xFF, 0x04, 0xA0, 0x36, 0xF3, 0xEC, 0xFF, 0xFF, 0xFF, 0xDF, 0xFB, 0xFF,
+ 0xF3, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0x3F, 0xFF, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x7F, 0xFE, 0xFF,
+ 0xCE, 0xFE, 0xFE, 0xFE,
+ 0xFE, 0xDE, 0xBD, 0xDD, 0xFD, 0xFF, 0xFD, 0xCF, 0xF7, 0xBF, 0x7F, 0xFF,
+ 0x7F, 0x3F, 0xFE, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xBC, 0xFF, 0xFF, 0xBD, 0xB5, 0x7F, 0x7F, 0xBF, 0xBF,
+ 0x7F, 0xFF, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFB, 0xFF, 0xF7, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xDE,
+ 0xFE, 0xFE, 0xFA, 0xDE,
+ 0xBD, 0xFD, 0xED, 0xFD, 0xFD, 0xCF, 0xEF, 0xEF, 0xEF, 0xEF, 0xC7, 0xDF,
+ 0xDF, 0xDF, 0xDF, 0xDF,
+ 0xFF, 0x7E, 0xFE, 0xFD, 0x7D, 0x6D, 0xEE, 0xFE, 0x7C, 0xFB, 0xF4, 0xFB,
+ 0xCF, 0xDB, 0xDF, 0xFF,
+ 0xFF, 0xBB, 0x7F, 0xFF, 0x7F, 0xFF, 0xF7, 0xFF, 0x9E, 0xBF, 0x3B, 0xBF,
+ 0xBF, 0x7F, 0x7F, 0x7F,
+ 0x7E, 0x6F, 0xDF, 0xEF, 0xF5, 0xF6, 0xFD, 0xF6, 0xF5, 0xED, 0xEB, 0xFF,
+ 0xEF, 0xEF, 0xEF, 0x7E,
+ 0x7F, 0x7F, 0x6F, 0x7F, 0xFF, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xEF, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0x1F, 0x1F, 0xEE, 0xFF, 0xBC,
+ 0xB7, 0xFF, 0xDF, 0xFF,
+ 0xDF, 0xEF, 0x3B, 0xE3, 0xD3, 0xFF, 0xFB, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF,
+ 0xFF, 0xBA, 0xBF, 0x2D,
+ 0xDB, 0xBD, 0xFD, 0xDB, 0xDF, 0xFA, 0xFB, 0xFF, 0xEF, 0xFB, 0xDB, 0xF3,
+ 0xFF, 0xDF, 0xFD, 0x7F,
+ 0xEF, 0xFB, 0xFF, 0xFF, 0xBE, 0xBF, 0x27, 0xBA, 0xFE, 0xFB, 0xDF, 0xFF,
+ 0xF6, 0xFF, 0xFF, 0xEF,
+ 0xFB, 0xDB, 0xF3, 0xD9, 0x9A, 0x3F, 0xFF, 0xAF, 0xBF, 0xFF, 0xFF, 0xBE,
+ 0x3F, 0x37, 0xBD, 0x96,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xAE, 0xFB, 0xF3, 0xF3, 0xEB, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF7, 0xFA, 0xBC, 0xAE, 0xFE, 0xBE, 0xFE, 0xBB, 0x7F, 0xFD, 0xFF,
+ 0x7F, 0xEF, 0xF7, 0xFB,
+ 0xBB, 0xD7, 0xF7, 0x7F, 0xFF, 0xF7, 0xFF, 0xFF, 0xF7, 0xBC, 0xED, 0xFD,
+ 0xBD, 0x9D, 0x7D, 0x7B,
+ 0xFB, 0x7B, 0x7B, 0xFB, 0xAF, 0xFF, 0xFE, 0xFD, 0xFD, 0xFE, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xF7,
+ 0xAA, 0xB9, 0xBF, 0x8F, 0xBF, 0xDF, 0xFF, 0x7F, 0xFF, 0xFF, 0x7F, 0xCF,
+ 0xFB, 0xEB, 0xCB, 0xEB,
+ 0xEE, 0xFF, 0xFF, 0xD7, 0xFF, 0xFF, 0xFF, 0x3E, 0x33, 0x3F, 0x1C, 0x7C,
+ 0xFC, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xCF, 0xD3, 0xF3, 0xE3, 0xF3, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEB, 0xFE, 0x35,
+ 0x3F, 0x3D, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xEF, 0x6F, 0xE3,
+ 0xE3, 0xE3, 0xEF, 0xFF,
+ 0xFF, 0xDF, 0xFF, 0xFF, 0xF7, 0xFE, 0x3E, 0x5E, 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFD, 0xFF, 0xFF,
+ 0xAF, 0xCF, 0xF2, 0xCB, 0xCF, 0x8E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFD,
+ 0xFC, 0x3E, 0x1F, 0x9E,
+ 0xAD, 0xFD, 0xFF, 0xFF, 0xBF, 0xFF, 0xFF, 0xEF, 0xFF, 0xB3, 0xF7, 0xE7,
+ 0xF7, 0xFA, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEE, 0xEB, 0xAB, 0xAF, 0x9F, 0xE3, 0x7F, 0xFF, 0xDE,
+ 0xFF, 0x7F, 0xEE, 0xFF,
+ 0xFF, 0xFB, 0x3A, 0xFA, 0xFF, 0xF2, 0x77, 0xFF, 0xFF, 0xF7, 0xFE, 0xFF,
+ 0xFE, 0xBD, 0xAE, 0xDE,
+ 0x7D, 0x7D, 0xFD, 0xFF, 0xBF, 0xEE, 0xFF, 0xFD, 0xFF, 0xDB, 0xFB, 0xFF,
+ 0xF7, 0xEF, 0xFB, 0xFF,
+ 0xFF, 0xFE, 0xFF, 0x2D, 0xAF, 0xB9, 0xFD, 0x79, 0xFB, 0xFA, 0xFF, 0xBF,
+ 0xEF, 0xFF, 0xFF, 0x91,
+ 0xFA, 0xFB, 0xDF, 0xF7, 0xF7, 0xFF, 0xFF, 0xFF, 0xFC, 0xCF, 0x37, 0xBF,
+ 0xBF, 0xFF, 0x7F, 0x7F,
+ 0xFF, 0xFF, 0xFF, 0xAF, 0xFF, 0xFF, 0xF3, 0xFB, 0xFB, 0xFF, 0xF5, 0xEF,
+ 0xFF, 0xFF, 0xF7, 0xFA,
+ 0xFF, 0xFF, 0xEE, 0xFA, 0xFE, 0xFB, 0x55, 0xDD, 0xFF, 0x7F, 0xAF, 0xFE,
+ 0xFF, 0xFB, 0xFB, 0xF5,
+ 0xFF, 0xF7, 0xEF, 0xFF, 0xFF, 0xFF, 0xBE, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D,
+ 0x7B, 0x7B, 0x7B, 0x7B,
+ 0xFB, 0xAE, 0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xDA, 0xB7, 0x61,
+ 0xFF, 0xB9, 0x59, 0xF3, 0x73, 0xF3, 0xDF, 0x7F, 0x6F, 0xDF, 0xEF, 0xF7,
+ 0xEB, 0xEB, 0xD7, 0xFF,
+ 0xD7, 0xFF, 0xFF, 0xF7, 0xFE, 0x7F, 0xFB, 0x3E, 0x38, 0x73, 0xF6, 0x7F,
+ 0xFC, 0xFF, 0xFF, 0xCF,
+ 0xFF, 0xB7, 0xFB, 0xB3, 0xB3, 0x67, 0xFF, 0xE7, 0xFD, 0xFF, 0xEF, 0xF6,
+ 0x7F, 0xB7, 0xBC, 0xF5,
+ 0x7B, 0xF6, 0xF7, 0xF5, 0xFF, 0xFF, 0xEF, 0xFF, 0xF7, 0xFF, 0xF7, 0xCE,
+ 0xE7, 0xFF, 0x9F, 0xFF,
+ 0xFF, 0xF5, 0xFE, 0x7D, 0xFF, 0x5F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xFF, 0xF6,
+ 0xCB, 0xDB, 0xEE, 0xFE, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFE, 0x7F, 0xBE,
+ 0x1E, 0x3E, 0xFE, 0xFF,
+ 0x7D, 0xFE, 0xFF, 0xFF, 0xEF, 0xBF, 0xE7, 0xFF, 0xE3, 0xE3, 0xFF, 0xDF,
+ 0xE7, 0xFF, 0xFF, 0xFF,
+ 0xB8, 0xEF, 0xB7, 0x2F, 0xEE, 0xFF, 0xDF, 0xFF, 0xBF, 0xFF, 0x7F, 0xEF,
+ 0xEB, 0xBF, 0xA3, 0xD3,
+ 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xBE, 0xFD, 0x3F, 0xCF, 0xFD,
+ 0xFB, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xAF, 0xFB, 0xBF, 0xBB, 0xBF, 0xDB, 0xFD, 0xFB, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x3E, 0xFE,
+ 0x3F, 0xBA, 0xBA, 0xFE, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xEF, 0xC3, 0x7F,
+ 0xB2, 0x9B, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0x3C, 0xFF, 0x3F, 0x3C, 0xFF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xAF, 0xF3, 0xFE, 0xF3, 0xE3, 0xEB, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xF7,
+ 0x9A, 0xFE, 0xAF, 0x9E,
+ 0xBE, 0xFE, 0xFF, 0xDF, 0xFF, 0xFF, 0x7B, 0xEF, 0xF7, 0xBF, 0xFB, 0xFB,
+ 0xFB, 0xFF, 0xFF, 0x7F,
+ 0xFF, 0xFF, 0xFF, 0xBC, 0xBD, 0xFD, 0xBD, 0xDD, 0x7D, 0x7B, 0x7B, 0x7B,
+ 0x7B, 0xFB, 0xAE, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xF7, 0x9A, 0xFF,
+ 0x9F, 0xFF, 0xAF, 0xEF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xCF, 0xF3, 0xFF, 0xEB, 0xFF, 0xEB, 0xFF,
+ 0xFF, 0xBF, 0xFF, 0xFF,
+ 0xEF, 0xFE, 0xFF, 0x37, 0xFC, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xCF, 0xEF, 0xFD, 0xF3,
+ 0xFF, 0xEE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6E, 0xFD, 0x2F, 0xFD,
+ 0xFF, 0xFD, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEF, 0xCF, 0xFF, 0xF3, 0xBF, 0x69, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE,
+ 0xFB, 0x9F, 0xFF, 0xBF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x87,
+ 0xFE, 0xDA, 0xEF, 0xCF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xEF, 0xBF, 0xEF, 0xEF, 0xFD,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xFD, 0xFF, 0x7B, 0xFF, 0xEB, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEB, 0xF8, 0xFF, 0xEF,
+ 0xAF, 0xFF, 0xFF, 0xBD, 0xFF, 0xFF, 0xFF, 0x7F, 0xEE, 0x7F, 0xEF, 0xFF,
+ 0xBB, 0xFF, 0xBF, 0xFB,
+ 0xFF, 0xFF, 0xFF, 0xF7, 0xF6, 0xFB, 0xBD, 0xFD, 0xDD, 0xF5, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xAF,
+ 0xFF, 0x5F, 0xF5, 0xDF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF6,
+ 0xF3, 0xFF, 0xDE, 0xFE,
+ 0xEF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xDE, 0xDF, 0x5F, 0xDF,
+ 0xFD, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xAF, 0xFF, 0xFF,
+ 0xEF, 0xED, 0xFF, 0xDF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xDA, 0xBD, 0xBE,
+ 0xAE, 0xFE, 0x7F, 0xFD,
+ 0xDF, 0xFF, 0xFF, 0x7F, 0xEF, 0xFF, 0xFB, 0xFB, 0xFB, 0x7F, 0xF7, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xF7,
+ 0xBC, 0xFD, 0xBD, 0xBD, 0xBD, 0xFD, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAE,
+ 0xFF, 0xFF, 0xFD, 0xFF,
+ 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFA, 0x9F, 0xBF, 0xBF, 0xCF,
+ 0x7F, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xAF, 0xFF, 0xEB, 0xEB, 0xEB, 0xFF, 0xD7, 0xFE, 0xFF, 0xFF,
+ 0xBF, 0xE7, 0xFE, 0xBF,
+ 0x7F, 0xFC, 0xFF, 0xFF, 0xED, 0xFF, 0xFF, 0xFF, 0xFF, 0x4F, 0xFF, 0xFB,
+ 0xFB, 0xFF, 0xFF, 0xDD,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBD, 0xDF, 0x9D, 0xFD, 0xDF, 0xB9,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0xFF, 0xFB, 0xEF, 0xEB, 0xFF, 0xDE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF6, 0x9F, 0xFF, 0xFC,
+ 0xFE, 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xDF, 0xFA, 0xCD, 0xCF,
+ 0xBF, 0x9F, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xF7, 0xFE, 0xBF, 0xFF, 0xDF, 0xEF, 0x5F, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0x6F, 0xFF,
+ 0xBB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7E, 0xFF,
+ 0x5F, 0xFF, 0xBF, 0xBF,
+ 0xF9, 0xFF, 0xFF, 0xFF, 0x7F, 0x6E, 0x7B, 0xFF, 0xEF, 0xFD, 0xEB, 0xDF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xB6, 0x3E, 0xFC, 0xFD, 0xBF, 0x7E, 0xFB, 0xFF, 0xFF, 0xFF, 0xF7,
+ 0xEF, 0xF7, 0xF3, 0xF7,
+ 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6E, 0x35, 0x79, 0xFF,
+ 0xBF, 0xFC, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEF, 0xFB, 0x53, 0xDF, 0xFF, 0xEB, 0xBF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xBC,
+ 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xF5,
+ 0xFF, 0xF7, 0xFF, 0xFB,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBA, 0xAA, 0xEE, 0xFE, 0x3F, 0x7D,
+ 0xFD, 0xFF, 0xFF, 0xFF,
+ 0x7F, 0xAF, 0x77, 0xFB, 0xFB, 0xFF, 0xFB, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xBE, 0xBD, 0xBD,
+ 0xBD, 0xBD, 0xFD, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAE, 0xFF, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFC,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x9A, 0xD9, 0xB8, 0xFF, 0xFF, 0x79, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xCF,
+ 0xFB, 0xFF, 0xEB, 0xFF, 0xEB, 0xD7, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xDE,
+ 0xF8, 0xFB, 0xFE, 0x3F,
+ 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xAD, 0xBF, 0xFA, 0xFF, 0x73,
+ 0xDF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x3A, 0xF5, 0xB7, 0xFC, 0x3F, 0xF9, 0xFD, 0xFF, 0xFF, 0xFF,
+ 0x7F, 0xEF, 0xF3, 0xFF,
+ 0xBF, 0xFE, 0xF3, 0x9F, 0xFE, 0xFF, 0xFF, 0xFF, 0xF7, 0x3E, 0xFF, 0xFF,
+ 0xFF, 0xBF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xD3, 0xFE, 0xDB, 0xFF, 0xDB, 0xDF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x3E, 0xFF, 0xBF, 0xFF, 0x7F, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0x8F,
+ 0xF3, 0xFF, 0xED, 0xFF,
+ 0xF7, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF6, 0x3C, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x9F, 0xEF, 0xEF, 0xD1, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x7E, 0xBF,
+ 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBB, 0xEF, 0xDF, 0xF1,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE, 0x3E, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xBF,
+ 0xEF, 0xFD, 0xC3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF,
+ 0xFC, 0x3E, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x2E, 0xEF, 0xF3, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xF7, 0xBA, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0xAF, 0xFB,
+ 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xF2, 0xD6, 0xED,
+ 0xBD, 0xBD, 0xBD, 0x7D,
+ 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x92, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F,
+ 0xAF, 0xEB, 0xEB, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFE, 0x2E, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x4F, 0xEF, 0xF3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE,
+ 0x3C, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xCE,
+ 0xC3, 0xFD, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x5D, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xCF, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xEE, 0x3E, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xDF, 0xE2, 0xFF,
+ 0xFF, 0xFF, 0xFB, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF6, 0xBE, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x7F, 0xEE,
+ 0x5F, 0xE6, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3E,
+ 0x7D, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF3, 0xFB, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBF, 0xF7, 0x36, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xD3, 0xF6,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x7F, 0xEE,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xEF, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBA, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE,
+ 0xFB, 0xFA, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xD6, 0xFD, 0xBD, 0xBD, 0xBD,
+ 0x7D, 0x7B, 0x7B, 0x7B,
+ 0x7B, 0xFB, 0xAE, 0xFF, 0x7E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF7, 0xBA, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xEB, 0x6B,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFE, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x4F, 0xEF, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
+ 0x3E, 0x6E, 0xFC, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xC3, 0xC9, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x3E, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xEF, 0xFB,
+ 0xD5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFE,
+ 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6F, 0xEF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFB,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF6, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFE,
+ 0xEF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFF, 0xFE, 0xFF, 0xF7, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0xFA, 0xEF, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xE7, 0xFF, 0xFE,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFE, 0xEF, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xA7, 0xFF, 0xFC, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x7F,
+ 0xFE, 0xAE, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7,
+ 0xF7, 0xFA, 0xFF, 0xFD,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xAF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xF7, 0xBE, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B,
+ 0x7B, 0x7B, 0xFB, 0xAF,
+ 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCA,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x6F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xE7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xCF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xDF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xE7, 0xF2, 0xFC,
+ 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xAE, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7E, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF,
+ 0xFE, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xEF, 0xDD, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xAF, 0xEF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBA, 0xFE,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFA, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF6, 0x9C, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB,
+ 0xAE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7A, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xDF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x6F, 0xEF, 0xF7, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xF7, 0xFE,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xEB,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x9E, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xCB, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFD,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xBE, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEF,
+ 0xEF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFB, 0xAF, 0x7F, 0xFF,
+ 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xBF, 0xFF,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xAE,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF7, 0xBC, 0xBD,
+ 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x7F,
+ 0xAF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
+ 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF,
+ 0xFF, 0xFF, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFE, 0xFF, 0x9F, 0x9F,
+ 0x9F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0xFF, 0xEF, 0xDF, 0xDF, 0xDF, 0xDF, 0xCF, 0xB7, 0xBF, 0xBF,
+ 0xBF, 0xBF, 0xFF, 0xBC,
+ 0xB9, 0x9D, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xEF, 0xD7,
+ 0xF5, 0xF3, 0xF1, 0xD1,
+ 0x65, 0xE3, 0xE3, 0xE3, 0xA3, 0xFF, 0xFE, 0x7F, 0xFE, 0xDE, 0xDE, 0xFF,
+ 0xBD, 0xBD, 0xBD, 0xBD,
+ 0xDF, 0xEF, 0xFB, 0xF7, 0xF3, 0xF3, 0xF3, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
+ 0xFB, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+
+ };
diff --git a/linux/pcmcia-cs/clients/pcnet_cs.c b/linux/pcmcia-cs/clients/pcnet_cs.c
new file mode 100644
index 0000000..8b3c1ec
--- /dev/null
+++ b/linux/pcmcia-cs/clients/pcnet_cs.c
@@ -0,0 +1,1702 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for NS8390-based cards
+
+ This driver supports the D-Link DE-650 and Linksys EthernetCard
+ cards, the newer D-Link and Linksys combo cards, Accton EN2212
+ cards, the RPTI EP400, and the PreMax PE-200 in non-shared-memory
+ mode, and the IBM Credit Card Adapter, the NE4100, the Thomas
+ Conrad ethernet card, and the Kingston KNE-PCM/x in shared-memory
+ mode. It will also handle the Socket EA card in either mode.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ pcnet_cs.c 1.153 2003/11/09 18:53:09
+
+ The network driver code is based on Donald Becker's NE2000 code:
+
+ Written 1992,1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+ Based also on Keith Moore's changes to Don Becker's code, for IBM
+ CCAE support. Drivers merged back together, and shared-memory
+ Socket EA support added, by Ken Raeburn, September 1995.
+
+======================================================================*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#include <linux/netdevice.h>
+#include <../drivers/net/8390.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#define PCNET_CMD 0x00
+#define PCNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define PCNET_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define PCNET_MISC 0x18 /* For IBM CCAE and Socket EA cards */
+
+#define PCNET_START_PG 0x40 /* First page of TX buffer */
+#define PCNET_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* Socket EA cards have a larger packet buffer */
+#define SOCKET_START_PG 0x01
+#define SOCKET_STOP_PG 0xff
+
+#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */
+
+static char *if_names[] = { "auto", "10baseT", "10base2"};
+
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+MODULE_PARM(pc_debug, "i");
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"pcnet_cs.c 1.153 2003/11/09 18:53:09 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("NE2000 compatible PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* Bit map of interrupts to choose from */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+INT_MODULE_PARM(if_port, 1); /* Transceiver type */
+INT_MODULE_PARM(use_big_buf, 1); /* use 64K packet buffer? */
+INT_MODULE_PARM(mem_speed, 0); /* shared mem speed, in ns */
+INT_MODULE_PARM(delay_output, 0); /* pause after xmit? */
+INT_MODULE_PARM(delay_time, 4); /* in usec */
+INT_MODULE_PARM(use_shmem, -1); /* use shared memory? */
+INT_MODULE_PARM(full_duplex, 0); /* full duplex? */
+
+/* Ugh! Let the user hardwire the hardware address for queer cards */
+static int hw_addr[6] = { 0, /* ... */ };
+MODULE_PARM(hw_addr, "6i");
+
+/*====================================================================*/
+
+static void mii_phy_probe(struct net_device *dev);
+static void pcnet_config(dev_link_t *link);
+static void pcnet_release(u_long arg);
+static int pcnet_event(event_t event, int priority,
+ event_callback_args_t *args);
+static int pcnet_open(struct net_device *dev);
+static int pcnet_close(struct net_device *dev);
+static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs);
+static void ei_watchdog(u_long arg);
+static void pcnet_reset_8390(struct net_device *dev);
+static int set_config(struct net_device *dev, struct ifmap *map);
+static int setup_shmem_window(dev_link_t *link, int start_pg,
+ int stop_pg, int cm_offset);
+static int setup_dma_config(dev_link_t *link, int start_pg,
+ int stop_pg);
+
+static dev_link_t *pcnet_attach(void);
+static void pcnet_detach(dev_link_t *);
+
+static dev_info_t dev_info = "pcnet_cs";
+static dev_link_t *dev_list;
+
+/*====================================================================*/
+
+typedef struct hw_info_t {
+ u_int offset;
+ u_char a0, a1, a2;
+ u_int flags;
+} hw_info_t;
+
+#define DELAY_OUTPUT 0x01
+#define HAS_MISC_REG 0x02
+#define USE_BIG_BUF 0x04
+#define HAS_IBM_MISC 0x08
+#define IS_DL10019 0x10
+#define IS_DL10022 0x20
+#define HAS_MII 0x40
+#define USE_SHMEM 0x80 /* autodetected */
+
+#define AM79C9XX_HOME_PHY 0x00006B90 /* HomePNA PHY */
+#define AM79C9XX_ETH_PHY 0x00006B70 /* 10baseT PHY */
+#define MII_PHYID_REV_MASK 0xfffffff0
+#define MII_PHYID_REG1 0x02
+#define MII_PHYID_REG2 0x03
+
+static hw_info_t hw_info[] = {
+ { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
+ { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 },
+ { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 },
+ { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94,
+ DELAY_OUTPUT | HAS_IBM_MISC },
+ { /* Danpex EN-6200P2 */ 0x0110, 0x00, 0x40, 0xc7, 0 },
+ { /* DataTrek NetCard */ 0x0ff0, 0x00, 0x20, 0xe8, 0 },
+ { /* Dayna CommuniCard E */ 0x0110, 0x00, 0x80, 0x19, 0 },
+ { /* D-Link DE-650 */ 0x0040, 0x00, 0x80, 0xc8, 0 },
+ { /* EP-210 Ethernet */ 0x0110, 0x00, 0x40, 0x33, 0 },
+ { /* EP4000 Ethernet */ 0x01c0, 0x00, 0x00, 0xb4, 0 },
+ { /* Epson EEN10B */ 0x0ff0, 0x00, 0x00, 0x48,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* ELECOM Laneed LD-CDWA */ 0xb8, 0x08, 0x00, 0x42, 0 },
+ { /* Hypertec Ethernet */ 0x01c0, 0x00, 0x40, 0x4c, 0 },
+ { /* IBM CCAE */ 0x0ff0, 0x08, 0x00, 0x5a,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM CCAE */ 0x0ff0, 0x00, 0x04, 0xac,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM CCAE */ 0x0ff0, 0x00, 0x06, 0x29,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM FME */ 0x0374, 0x08, 0x00, 0x5a,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM FME */ 0x0374, 0x00, 0x04, 0xac,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kansai KLA-PCM/T */ 0x0ff0, 0x00, 0x60, 0x87,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x08, 0x00, 0x17,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x00, 0xc0, 0xa8,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x00, 0xa0, 0xb0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0198, 0x00, 0x20, 0xe0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* I-O DATA PCLA/T */ 0x0ff0, 0x00, 0xa0, 0xb0, 0 },
+ { /* Katron PE-520 */ 0x0110, 0x00, 0x40, 0xf6, 0 },
+ { /* Kingston KNE-PCM/x */ 0x0ff0, 0x00, 0xc0, 0xf0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kingston KNE-PCM/x */ 0x0ff0, 0xe2, 0x0c, 0x0f,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kingston KNE-PC2 */ 0x0180, 0x00, 0xc0, 0xf0, 0 },
+ { /* Maxtech PCN2000 */ 0x5000, 0x00, 0x00, 0xe8, 0 },
+ { /* NDC Instant-Link */ 0x003a, 0x00, 0x80, 0xc6, 0 },
+ { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 },
+ { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 },
+ { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 },
+ { /* SCM Ethernet */ 0x0ff0, 0x00, 0x20, 0xcb, 0 },
+ { /* Socket EA */ 0x4000, 0x00, 0xc0, 0x1b,
+ DELAY_OUTPUT | HAS_MISC_REG | USE_BIG_BUF },
+ { /* Socket LP-E CF+ */ 0x01c0, 0x00, 0xc0, 0x1b, 0 },
+ { /* SuperSocket RE450T */ 0x0110, 0x00, 0xe0, 0x98, 0 },
+ { /* Volktek NPL-402CT */ 0x0060, 0x00, 0x40, 0x05, 0 },
+ { /* NEC PC-9801N-J12 */ 0x0ff0, 0x00, 0x00, 0x4c, 0 },
+ { /* PCMCIA Technology OEM */ 0x01c8, 0x00, 0xa0, 0x0c, 0 }
+};
+
+#define NR_INFO (sizeof(hw_info)/sizeof(hw_info_t))
+
+static hw_info_t default_info = { 0, 0, 0, 0, 0 };
+static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII };
+static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
+
+typedef struct pcnet_dev_t {
+ struct net_device dev; /* so &dev == &pcnet_dev_t */
+ dev_link_t link;
+ dev_node_t node;
+ u_int flags;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_char phy_id;
+ u_char eth_phy, pna_phy;
+ u_short link_status;
+ u_long mii_reset;
+} pcnet_dev_t;
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ pcnet_detach(link);
+ }
+}
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ We never need to do anything when a pcnet device is "initialized"
+ by the net software, because we only register already-found cards.
+
+======================================================================*/
+
+static int pcnet_init(struct net_device *dev)
+{
+ return 0;
+}
+
+/*======================================================================
+
+ pcnet_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *pcnet_attach(void)
+{
+ pcnet_dev_t *info;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int i, ret;
+
+ DEBUG(0, "pcnet_attach()\n");
+ flush_stale_links();
+
+ /* Create new ethernet device */
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) return NULL;
+ memset(info, 0, sizeof(*info));
+ link = &info->link; dev = &info->dev;
+ link->priv = info;
+
+ init_timer(&link->release);
+ link->release.function = &pcnet_release;
+ link->release.data = (u_long)link;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ ethdev_init(dev);
+ init_dev_name(dev, info->node);
+ dev->init = &pcnet_init;
+ dev->open = &pcnet_open;
+ dev->stop = &pcnet_close;
+ dev->set_config = &set_config;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &pcnet_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != CS_SUCCESS) {
+ cs_error(link->handle, RegisterClient, ret);
+ pcnet_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* pcnet_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void pcnet_detach(dev_link_t *link)
+{
+ pcnet_dev_t *info = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "pcnet_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ pcnet_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&info->dev);
+ kfree(info);
+
+} /* pcnet_detach */
+
+/*======================================================================
+
+ This probes for a card's hardware address, for card types that
+ encode this information in their CIS.
+
+======================================================================*/
+
+static hw_info_t *get_hwinfo(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ win_req_t req;
+ memreq_t mem;
+ u_char *base, *virt;
+ int i, j;
+
+ /* Allocate a small memory window */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = 0; req.Size = 0;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ i = CardServices(RequestWindow, &link->win, &req);
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestWindow, i);
+ return NULL;
+ }
+
+ virt = ioremap(req.Base, req.Size);
+ mem.Page = 0;
+ for (i = 0; i < NR_INFO; i++) {
+ mem.CardOffset = hw_info[i].offset & ~(req.Size-1);
+ CardServices(MapMemPage, link->win, &mem);
+ base = &virt[hw_info[i].offset & (req.Size-1)];
+ if ((readb(base+0) == hw_info[i].a0) &&
+ (readb(base+2) == hw_info[i].a1) &&
+ (readb(base+4) == hw_info[i].a2))
+ break;
+ }
+ if (i < NR_INFO) {
+ for (j = 0; j < 6; j++)
+ dev->dev_addr[j] = readb(base + (j<<1));
+ }
+
+ iounmap(virt);
+ j = CardServices(ReleaseWindow, link->win);
+ if (j != CS_SUCCESS)
+ cs_error(link->handle, ReleaseWindow, j);
+ return (i < NR_INFO) ? hw_info+i : NULL;
+} /* get_hwinfo */
+
+/*======================================================================
+
+ This probes for a card's hardware address by reading the PROM.
+ It checks the address against a list of known types, then falls
+ back to a simple NE2000 clone signature check.
+
+======================================================================*/
+
+static hw_info_t *get_prom(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_char prom[32];
+ int i, j;
+
+ /* This is lifted straight from drivers/net/ne.c */
+ struct {
+ u_char value, offset;
+ } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ pcnet_reset_8390(dev);
+ mdelay(10);
+
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ for (i = 0; i < 32; i++)
+ prom[i] = inb(ioaddr + PCNET_DATAPORT);
+ for (i = 0; i < NR_INFO; i++) {
+ if ((prom[0] == hw_info[i].a0) &&
+ (prom[2] == hw_info[i].a1) &&
+ (prom[4] == hw_info[i].a2))
+ break;
+ }
+ if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) {
+ for (j = 0; j < 6; j++)
+ dev->dev_addr[j] = prom[j<<1];
+ return (i < NR_INFO) ? hw_info+i : &default_info;
+ }
+ return NULL;
+} /* get_prom */
+
+/*======================================================================
+
+ For DL10019 based cards, like the Linksys EtherFast
+
+======================================================================*/
+
+static hw_info_t *get_dl10019(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ int i;
+ u_char sum;
+
+ for (sum = 0, i = 0x14; i < 0x1c; i++)
+ sum += inb_p(dev->base_addr + i);
+ if (sum != 0xff)
+ return NULL;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ i = inb(dev->base_addr + 0x1f);
+ return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info;
+}
+
+/*======================================================================
+
+ For Asix AX88190 based cards
+
+======================================================================*/
+
+static hw_info_t *get_ax88190(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i, j;
+
+ /* Not much of a test, but the alternatives are messy */
+ if (link->conf.ConfigBase != 0x03c0)
+ return NULL;
+
+ outb_p(0x01, ioaddr + EN0_DCFG); /* Set word-wide access. */
+ outb_p(0x00, ioaddr + EN0_RSARLO); /* DMA starting at 0x0400. */
+ outb_p(0x04, ioaddr + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, ioaddr + E8390_CMD);
+
+ for (i = 0; i < 6; i += 2) {
+ j = inw(ioaddr + PCNET_DATAPORT);
+ dev->dev_addr[i] = j & 0xff;
+ dev->dev_addr[i+1] = j >> 8;
+ }
+ printk(KERN_NOTICE "pcnet_cs: this is an AX88190 card!\n");
+ printk(KERN_NOTICE "pcnet_cs: use axnet_cs instead.\n");
+ return NULL;
+}
+
+/*======================================================================
+
+ This should be totally unnecessary... but when we can't figure
+ out the hardware address any other way, we'll let the user hard
+ wire it when the module is initialized.
+
+======================================================================*/
+
+static hw_info_t *get_hwired(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (hw_addr[i] != 0) break;
+ if (i == 6)
+ return NULL;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = hw_addr[i];
+
+ return &default_info;
+} /* get_hwired */
+
+/*======================================================================
+
+ pcnet_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+#define CFG_CHECK(fn, args...) \
+if (CardServices(fn, args) != 0) goto next_entry
+
+static int try_io_port(dev_link_t *link)
+{
+ int j, ret;
+ if (link->io.NumPorts1 == 32) {
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (link->io.NumPorts2 > 0) {
+ /* for master/slave multifunction cards */
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ }
+ } else {
+ /* This should be two 16-port windows */
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ }
+ if (link->io.BasePort1 == 0) {
+ link->io.IOAddrLines = 16;
+ for (j = 0; j < 0x400; j += 0x20) {
+ link->io.BasePort1 = j ^ 0x300;
+ link->io.BasePort2 = (j ^ 0x300) + 0x10;
+ ret = CardServices(RequestIO, link->handle, &link->io);
+ if (ret == CS_SUCCESS) return ret;
+ }
+ return ret;
+ } else {
+ return CardServices(RequestIO, link->handle, &link->io);
+ }
+}
+
+static void pcnet_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ pcnet_dev_t *info = link->priv;
+ struct net_device *dev = &info->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ int i, last_ret, last_fn, start_pg, stop_pg, cm_offset;
+ int manfid = 0, prodid = 0, has_shmem = 0;
+ u_short buf[64];
+ config_info_t conf;
+ hw_info_t *hw_info;
+
+ DEBUG(0, "pcnet_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Look up current Vcc */
+ CS_CHECK(GetConfigurationInfo, handle, &conf);
+ link->conf.Vcc = conf.Vcc;
+
+ tuple.DesiredTuple = CISTPL_MANFID;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ if ((CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS) &&
+ (CardServices(GetTupleData, handle, &tuple) == CS_SUCCESS)) {
+ manfid = le16_to_cpu(buf[0]);
+ prodid = le16_to_cpu(buf[1]);
+ }
+
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ tuple.Attributes = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ while (last_ret == CS_SUCCESS) {
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ cistpl_io_t *io = &(parse.cftable_entry.io);
+
+ CFG_CHECK(GetTupleData, handle, &tuple);
+ CFG_CHECK(ParseTuple, handle, &tuple, &parse);
+ if ((cfg->index == 0) || (cfg->io.nwin == 0))
+ goto next_entry;
+
+ link->conf.ConfigIndex = cfg->index;
+ /* For multifunction cards, by convention, we configure the
+ network function with window 0, and serial with window 1 */
+ if (io->nwin > 1) {
+ i = (io->win[1].len > io->win[0].len);
+ link->io.BasePort2 = io->win[1-i].base;
+ link->io.NumPorts2 = io->win[1-i].len;
+ } else {
+ i = link->io.NumPorts2 = 0;
+ }
+ has_shmem = ((cfg->mem.nwin == 1) &&
+ (cfg->mem.win[0].len >= 0x4000));
+ link->io.BasePort1 = io->win[i].base;
+ link->io.NumPorts1 = io->win[i].len;
+ link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
+ if (link->io.NumPorts1 + link->io.NumPorts2 >= 32) {
+ last_ret = try_io_port(link);
+ if (last_ret == CS_SUCCESS) break;
+ }
+ next_entry:
+ last_ret = CardServices(GetNextTuple, handle, &tuple);
+ }
+ if (last_ret != CS_SUCCESS) {
+ cs_error(handle, RequestIO, last_ret);
+ goto failed;
+ }
+
+ CS_CHECK(RequestIRQ, handle, &link->irq);
+
+ if (link->io.NumPorts2 == 8) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+ if ((manfid == MANFID_IBM) &&
+ (prodid == PRODID_IBM_HOME_AND_AWAY))
+ link->conf.ConfigIndex |= 0x10;
+
+ CS_CHECK(RequestConfiguration, handle, &link->conf);
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if (info->flags & HAS_MISC_REG) {
+ if ((if_port == 1) || (if_port == 2))
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
+ } else {
+ dev->if_port = 0;
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ hw_info = get_hwinfo(link);
+ if (hw_info == NULL)
+ hw_info = get_prom(link);
+ if (hw_info == NULL)
+ hw_info = get_dl10019(link);
+ if (hw_info == NULL)
+ hw_info = get_ax88190(link);
+ if (hw_info == NULL)
+ hw_info = get_hwired(link);
+
+ if (hw_info == NULL) {
+ printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
+ " address for io base %#3lx\n", dev->base_addr);
+ unregister_netdev(dev);
+ goto failed;
+ }
+
+ info->flags = hw_info->flags;
+ /* Check for user overrides */
+ info->flags |= (delay_output) ? DELAY_OUTPUT : 0;
+ if ((manfid == MANFID_SOCKET) &&
+ ((prodid == PRODID_SOCKET_LPE) ||
+ (prodid == PRODID_SOCKET_LPE_CF) ||
+ (prodid == PRODID_SOCKET_EIO)))
+ info->flags &= ~USE_BIG_BUF;
+ if (!use_big_buf)
+ info->flags &= ~USE_BIG_BUF;
+
+ if (info->flags & USE_BIG_BUF) {
+ start_pg = SOCKET_START_PG;
+ stop_pg = SOCKET_STOP_PG;
+ cm_offset = 0x10000;
+ } else {
+ start_pg = PCNET_START_PG;
+ stop_pg = PCNET_STOP_PG;
+ cm_offset = 0;
+ }
+
+ /* has_shmem is ignored if use_shmem != -1 */
+ if ((use_shmem == 0) || (!has_shmem && (use_shmem == -1)) ||
+ (setup_shmem_window(link, start_pg, stop_pg, cm_offset) != 0))
+ setup_dma_config(link, start_pg, stop_pg);
+
+ ei_status.name = "NE2000";
+ ei_status.word16 = 1;
+ ei_status.reset_8390 = &pcnet_reset_8390;
+
+ copy_dev_name(info->node, dev);
+ link->dev = &info->node;
+
+ if (info->flags & (IS_DL10019|IS_DL10022)) {
+ u_char id = inb(dev->base_addr + 0x1a);
+ dev->do_ioctl = &ei_ioctl;
+ mii_phy_probe(dev);
+ if ((id == 0x30) && !info->pna_phy && (info->eth_phy == 4))
+ info->eth_phy = 0;
+ printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ",
+ dev->name, ((info->flags & IS_DL10022) ? 22 : 19), id);
+ if (info->pna_phy)
+ printk("PNA, ");
+ } else
+ printk(KERN_INFO "%s: NE2000 Compatible: ", dev->name);
+ printk("io %#3lx, irq %d,", dev->base_addr, dev->irq);
+ if (info->flags & USE_SHMEM)
+ printk (" mem %#5lx,", dev->mem_start);
+ if (info->flags & HAS_MISC_REG)
+ printk(" %s xcvr,", if_names[dev->if_port]);
+ printk(" hw_addr ");
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ pcnet_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+} /* pcnet_config */
+
+/*======================================================================
+
+ After a card is removed, pcnet_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void pcnet_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+ pcnet_dev_t *info = link->priv;
+
+ DEBUG(0, "pcnet_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "pcnet_cs: release postponed, '%s' still open\n",
+ info->node.dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ if (info->flags & USE_SHMEM) {
+ iounmap(info->base);
+ CardServices(ReleaseWindow, link->win);
+ }
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* pcnet_release */
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int pcnet_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ pcnet_dev_t *info = link->priv;
+
+ DEBUG(2, "pcnet_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(&info->dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ pcnet_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(&info->dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ pcnet_reset_8390(&info->dev);
+ NS8390_init(&info->dev, 1);
+ netif_device_attach(&info->dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* pcnet_event */
+
+/*======================================================================
+
+ MII interface support for DL10019 and DL10022 based cards
+
+ On the DL10019, the MII IO direction bit is 0x10; on the DL10022
+ it is 0x20. Setting both bits seems to work on both card types.
+
+======================================================================*/
+
+#define DLINK_GPIO 0x1c
+#define DLINK_DIAG 0x1d
+#define DLINK_EEPROM 0x1e
+
+#define MDIO_SHIFT_CLK 0x80
+#define MDIO_DATA_OUT 0x40
+#define MDIO_DIR_WRITE 0x30
+#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
+#define MDIO_DATA_READ 0x10
+#define MDIO_MASK 0x0f
+
+static void mdio_sync(ioaddr_t addr)
+{
+ int bits, mask = inb(addr) & MDIO_MASK;
+ for (bits = 0; bits < 32; bits++) {
+ outb(mask | MDIO_DATA_WRITE1, addr);
+ outb(mask | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(ioaddr_t addr, int phy_id, int loc)
+{
+ u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
+ int i, retval = 0, mask = inb(addr) & MDIO_MASK;
+
+ mdio_sync(addr);
+ for (i = 13; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(mask | dat, addr);
+ outb(mask | dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb(mask, addr);
+ retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0);
+ outb(mask | MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(ioaddr_t addr, int phy_id, int loc, int value)
+{
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i, mask = inb(addr) & MDIO_MASK;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(mask | dat, addr);
+ outb(mask | dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb(mask, addr);
+ outb(mask | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static void mdio_reset(ioaddr_t addr, int phy_id)
+{
+ outb_p(0x08, addr);
+ outb_p(0x0c, addr);
+ outb_p(0x08, addr);
+ outb_p(0x0c, addr);
+ outb_p(0x00, addr);
+}
+
+/*======================================================================
+
+ EEPROM access routines for DL10019 and DL10022 based cards
+
+======================================================================*/
+
+#define EE_EEP 0x40
+#define EE_ASIC 0x10
+#define EE_CS 0x08
+#define EE_CK 0x04
+#define EE_DO 0x02
+#define EE_DI 0x01
+#define EE_ADOT 0x01 /* DataOut for ASIC */
+#define EE_READ_CMD 0x06
+
+#define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */
+
+static int read_eeprom(ioaddr_t ioaddr, int location)
+{
+ int i, retval = 0;
+ ioaddr_t ee_addr = ioaddr + DLINK_EEPROM;
+ int read_cmd = location | (EE_READ_CMD << 8);
+
+ outb(0, ee_addr);
+ outb(EE_EEP|EE_CS, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DO : 0;
+ outb_p(EE_EEP|EE_CS|dataval, ee_addr);
+ outb_p(EE_EEP|EE_CS|dataval|EE_CK, ee_addr);
+ }
+ outb(EE_EEP|EE_CS, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb_p(EE_EEP|EE_CS | EE_CK, ee_addr);
+ retval = (retval << 1) | ((inb(ee_addr) & EE_DI) ? 1 : 0);
+ outb_p(EE_EEP|EE_CS, ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(0, ee_addr);
+ return retval;
+}
+
+/*
+ The internal ASIC registers can be changed by EEPROM READ access
+ with EE_ASIC bit set.
+ In ASIC mode, EE_ADOT is used to output the data to the ASIC.
+*/
+
+static void write_asic(ioaddr_t ioaddr, int location, short asic_data)
+{
+ int i;
+ ioaddr_t ee_addr = ioaddr + DLINK_EEPROM;
+ short dataval;
+ int read_cmd = location | (EE_READ_CMD << 8);
+
+ asic_data |= read_eeprom(ioaddr, location);
+
+ outb(0, ee_addr);
+ outb(EE_ASIC|EE_CS|EE_DI, ee_addr);
+
+ read_cmd = read_cmd >> 1;
+
+ /* Shift the read command bits out. */
+ for (i = 9; i >= 0; i--) {
+ dataval = (read_cmd & (1 << i)) ? EE_DO : 0;
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr);
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval|EE_CK, ee_addr);
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr);
+ }
+ // sync
+ outb(EE_ASIC|EE_CS, ee_addr);
+ outb(EE_ASIC|EE_CS|EE_CK, ee_addr);
+ outb(EE_ASIC|EE_CS, ee_addr);
+
+ for (i = 15; i >= 0; i--) {
+ dataval = (asic_data & (1 << i)) ? EE_ADOT : 0;
+ outb_p(EE_ASIC|EE_CS|dataval, ee_addr);
+ outb_p(EE_ASIC|EE_CS|dataval|EE_CK, ee_addr);
+ outb_p(EE_ASIC|EE_CS|dataval, ee_addr);
+ }
+
+ /* Terminate the ASIC access. */
+ outb(EE_ASIC|EE_DI, ee_addr);
+ outb(EE_ASIC|EE_DI| EE_CK, ee_addr);
+ outb(EE_ASIC|EE_DI, ee_addr);
+
+ outb(0, ee_addr);
+}
+
+/*====================================================================*/
+
+static void set_misc_reg(struct net_device *dev)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ u_char tmp;
+
+ if (info->flags & HAS_MISC_REG) {
+ tmp = inb_p(nic_base + PCNET_MISC) & ~3;
+ if (dev->if_port == 2)
+ tmp |= 1;
+ if (info->flags & USE_BIG_BUF)
+ tmp |= 2;
+ if (info->flags & HAS_IBM_MISC)
+ tmp |= 8;
+ outb_p(tmp, nic_base + PCNET_MISC);
+ }
+ if (info->flags & IS_DL10022) {
+ if (info->flags & HAS_MII) {
+ mdio_reset(nic_base + DLINK_GPIO, info->eth_phy);
+ /* Restart MII autonegotiation */
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
+ info->mii_reset = jiffies;
+ } else {
+ outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG);
+ }
+ }
+}
+
+/*====================================================================*/
+
+static void mii_phy_probe(struct net_device *dev)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ ioaddr_t mii_addr = dev->base_addr + DLINK_GPIO;
+ int i;
+ u_int tmp, phyid;
+
+ for (i = 31; i >= 0; i--) {
+ tmp = mdio_read(mii_addr, i, 1);
+ if ((tmp == 0) || (tmp == 0xffff))
+ continue;
+ tmp = mdio_read(mii_addr, i, MII_PHYID_REG1);
+ phyid = tmp << 16;
+ phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2);
+ phyid &= MII_PHYID_REV_MASK;
+ DEBUG(0, "%s: MII at %d is 0x%08x\n", dev->name, i, phyid);
+ if (phyid == AM79C9XX_HOME_PHY) {
+ info->pna_phy = i;
+ } else if (phyid != AM79C9XX_ETH_PHY) {
+ info->eth_phy = i;
+ }
+ }
+}
+
+static int pcnet_open(struct net_device *dev)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "pcnet_open('%s')\n", dev->name);
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ MOD_INC_USE_COUNT;
+
+ set_misc_reg(dev);
+ request_irq(dev->irq, ei_irq_wrapper, SA_SHIRQ, dev_info, dev);
+
+ info->phy_id = info->eth_phy;
+ info->link_status = 0x00;
+ info->watchdog.function = &ei_watchdog;
+ info->watchdog.data = (u_long)info;
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+
+ return ei_open(dev);
+} /* pcnet_open */
+
+/*====================================================================*/
+
+static int pcnet_close(struct net_device *dev)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "pcnet_close('%s')\n", dev->name);
+
+ ei_close(dev);
+ free_irq(dev->irq, dev);
+
+ link->open--;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+ del_timer(&info->watchdog);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+} /* pcnet_close */
+
+/*======================================================================
+
+ Hard reset the card. This used to pause for the same period that
+ a 8390 reset command required, but that shouldn't be necessary.
+
+======================================================================*/
+
+static void pcnet_reset_8390(struct net_device *dev)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ int i;
+
+ ei_status.txing = ei_status.dmaing = 0;
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD);
+
+ outb(inb(nic_base + PCNET_RESET), nic_base + PCNET_RESET);
+
+ for (i = 0; i < 100; i++) {
+ if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0)
+ break;
+ udelay(100);
+ }
+ outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
+
+ if (i == 100)
+ printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n",
+ dev->name);
+ set_misc_reg(dev);
+
+} /* pcnet_reset_8390 */
+
+/*====================================================================*/
+
+static int set_config(struct net_device *dev, struct ifmap *map)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (!(info->flags & HAS_MISC_REG))
+ return -EOPNOTSUPP;
+ else if ((map->port < 1) || (map->port > 2))
+ return -EINVAL;
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ NS8390_init(dev, 1);
+ }
+ return 0;
+}
+
+/*====================================================================*/
+
+static void ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs)
+{
+ pcnet_dev_t *info = dev_id;
+ info->stale = 0;
+ ei_interrupt(irq, dev_id, regs);
+}
+
+static void ei_watchdog(u_long arg)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)(arg);
+ struct net_device *dev = &info->dev;
+ ioaddr_t nic_base = dev->base_addr;
+ ioaddr_t mii_addr = nic_base + DLINK_GPIO;
+ u_short link;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ outb_p(E8390_NODMA+E8390_PAGE0, nic_base + E8390_CMD);
+ if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
+ if (!info->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ ei_irq_wrapper(dev->irq, dev, NULL);
+ info->fast_poll = HZ;
+ }
+ if (info->fast_poll) {
+ info->fast_poll--;
+ info->watchdog.expires = jiffies + 1;
+ add_timer(&info->watchdog);
+ return;
+ }
+
+ if (!(info->flags & HAS_MII))
+ goto reschedule;
+
+ mdio_read(mii_addr, info->phy_id, 1);
+ link = mdio_read(mii_addr, info->phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ if (info->eth_phy) {
+ info->phy_id = info->eth_phy = 0;
+ } else {
+ printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ info->flags &= ~HAS_MII;
+ }
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != info->link_status) {
+ u_short p = mdio_read(mii_addr, info->phy_id, 5);
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (link) ? "found" : "lost");
+ if (link && (info->flags & IS_DL10022)) {
+ /* Disable collision detection on full duplex links */
+ outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG);
+ } else if (link && (info->flags & IS_DL10019)) {
+ /* Disable collision detection on full duplex links */
+ write_asic(dev->base_addr, 4, (p & 0x140) ? DL19FDUPLX : 0);
+ }
+ if (link) {
+ if (info->phy_id == info->eth_phy) {
+ if (p)
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
+ else
+ printk(KERN_INFO "%s: link partner did not "
+ "autonegotiate\n", dev->name);
+ }
+ NS8390_init(dev, 1);
+ }
+ info->link_status = link;
+ }
+ if (info->pna_phy && (jiffies - info->mii_reset > 6*HZ)) {
+ link = mdio_read(mii_addr, info->eth_phy, 1) & 0x0004;
+ if (((info->phy_id == info->pna_phy) && link) ||
+ ((info->phy_id != info->pna_phy) && !link)) {
+ /* isolate this MII and try flipping to the other one */
+ mdio_write(mii_addr, info->phy_id, 0, 0x0400);
+ info->phy_id ^= info->pna_phy ^ info->eth_phy;
+ printk(KERN_INFO "%s: switched to %s transceiver\n", dev->name,
+ (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
+ mdio_write(mii_addr, info->phy_id, 0,
+ (info->phy_id == info->eth_phy) ? 0x1000 : 0);
+ info->link_status = 0;
+ info->mii_reset = jiffies;
+ }
+ }
+
+reschedule:
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+}
+
+/*====================================================================*/
+
+static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ u16 *data = (u16 *)&rq->ifr_data;
+ ioaddr_t mii_addr = dev->base_addr + DLINK_GPIO;
+ switch (cmd) {
+ case SIOCDEVPRIVATE:
+ data[0] = info->phy_id;
+ case SIOCDEVPRIVATE+1:
+ data[3] = mdio_read(mii_addr, data[0], data[1] & 0x1f);
+ return 0;
+ case SIOCDEVPRIVATE+2:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mdio_write(mii_addr, data[0], data[1] & 0x1f, data[2]);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+/*====================================================================*/
+
+static void dma_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ ioaddr_t nic_base = dev->base_addr;
+
+ if (ei_status.dmaing) {
+ printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
+
+ insw(nic_base + PCNET_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr)>>1);
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/*====================================================================*/
+
+static void dma_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ int xfer_count = count;
+ char *buf = skb->data;
+
+#ifdef PCMCIA_DEBUG
+ if ((ei_debug > 4) && (count != 4))
+ printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4);
+#endif
+ if (ei_status.dmaing) {
+ printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
+
+ insw(nic_base + PCNET_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++;
+
+ /* This was for the ALPHA version only, but enough people have
+ encountering problems that it is still here. */
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == (addr & 0xff))
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk(KERN_NOTICE "%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+} /* dma_block_input */
+
+/*====================================================================*/
+
+static void dma_block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+#ifdef PCMCIA_DEBUG
+ int retries = 0;
+#endif
+ u_long dma_start;
+
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4)
+ printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count);
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (count & 0x01)
+ count++;
+ if (ei_status.dmaing) {
+ printk(KERN_NOTICE "%s: DMAing conflict in dma_block_output."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base+PCNET_CMD);
+
+#ifdef PCMCIA_DEBUG
+ retry:
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + PCNET_CMD);
+ outsw(nic_base + PCNET_DATAPORT, buf, count>>1);
+
+ dma_start = jiffies;
+
+#ifdef PCMCIA_DEBUG
+ /* This was for the ALPHA version only, but enough people have
+ encountering problems that it is still here. */
+ if (ei_debug > 4) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0) {
+ printk(KERN_NOTICE "%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > PCNET_RDC_TIMEOUT) {
+ printk(KERN_NOTICE "%s: timeout waiting for Tx RDC.\n",
+ dev->name);
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ if (info->flags & DELAY_OUTPUT)
+ udelay((long)delay_time);
+ ei_status.dmaing &= ~0x01;
+}
+
+/*====================================================================*/
+
+static int setup_dma_config(dev_link_t *link, int start_pg,
+ int stop_pg)
+{
+ struct net_device *dev = link->priv;
+
+ ei_status.tx_start_page = start_pg;
+ ei_status.rx_start_page = start_pg + TX_PAGES;
+ ei_status.stop_page = stop_pg;
+
+ /* set up block i/o functions */
+ ei_status.get_8390_hdr = &dma_get_8390_hdr;
+ ei_status.block_input = &dma_block_input;
+ ei_status.block_output = &dma_block_output;
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static void copyin(u_char *dest, u_char *src, int c)
+{
+ u_short *d = (u_short *)dest, *s = (u_short *)src;
+ int odd;
+
+ if (c <= 0)
+ return;
+ odd = (c & 1); c >>= 1;
+
+ if (c) {
+ do { *d++ = readw_ns(s++); } while (--c);
+ }
+ /* get last byte by fetching a word and masking */
+ if (odd)
+ *((u_char *)d) = readw(s) & 0xff;
+}
+
+static void copyout(u_char *dest, const u_char *src, int c)
+{
+ u_short *d = (u_short *)dest, *s = (u_short *)src;
+ int odd;
+
+ if (c <= 0)
+ return;
+ odd = (c & 1); c >>= 1;
+
+ if (c) {
+ do { writew_ns(*s++, d++); } while (--c);
+ }
+ /* copy last byte doing a read-modify-write */
+ if (odd)
+ writew((readw(d) & 0xff00) | *(u_char *)s, d);
+}
+
+/*====================================================================*/
+
+static void shmem_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ void *xfer_start = (void *)(dev->rmem_start + (ring_page << 8)
+ - (ei_status.rx_start_page << 8));
+
+ copyin((void *)hdr, xfer_start, sizeof(struct e8390_pkt_hdr));
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+}
+
+/*====================================================================*/
+
+static void shmem_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ void *xfer_start = (void *)(dev->rmem_start + ring_offset
+ - (ei_status.rx_start_page << 8));
+ char *buf = skb->data;
+
+ if (xfer_start + count > (void *)dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = (void*)dev->rmem_end - xfer_start;
+ copyin(buf, xfer_start, semi_count);
+ buf += semi_count;
+ ring_offset = ei_status.rx_start_page << 8;
+ xfer_start = (void *)dev->rmem_start;
+ count -= semi_count;
+ }
+ copyin(buf, xfer_start, count);
+}
+
+/*====================================================================*/
+
+static void shmem_block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ void *shmem = (void *)dev->mem_start + (start_page << 8);
+ shmem -= ei_status.tx_start_page << 8;
+ copyout(shmem, buf, count);
+}
+
+/*====================================================================*/
+
+static int setup_shmem_window(dev_link_t *link, int start_pg,
+ int stop_pg, int cm_offset)
+{
+ struct net_device *dev = link->priv;
+ pcnet_dev_t *info = link->priv;
+ win_req_t req;
+ memreq_t mem;
+ int i, window_size, offset, last_ret, last_fn;
+
+ window_size = (stop_pg - start_pg) << 8;
+ if (window_size > 32 * 1024)
+ window_size = 32 * 1024;
+
+ /* Make sure it's a power of two. */
+ while ((window_size & (window_size - 1)) != 0)
+ window_size += window_size & ~(window_size - 1);
+
+ /* Allocate a memory window */
+ req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
+ req.Attributes |= WIN_USE_WAIT;
+ req.Base = 0; req.Size = window_size;
+ req.AccessSpeed = mem_speed;
+ link->win = (window_handle_t)link->handle;
+ CS_CHECK(RequestWindow, &link->win, &req);
+
+ mem.CardOffset = (start_pg << 8) + cm_offset;
+ offset = mem.CardOffset % window_size;
+ mem.CardOffset -= offset;
+ mem.Page = 0;
+ CS_CHECK(MapMemPage, link->win, &mem);
+
+ /* Try scribbling on the buffer */
+ info->base = ioremap(req.Base, window_size);
+ for (i = 0; i < (TX_PAGES<<8); i += 2)
+ writew_ns((i>>1), info->base+offset+i);
+ udelay(100);
+ for (i = 0; i < (TX_PAGES<<8); i += 2)
+ if (readw_ns(info->base+offset+i) != (i>>1)) break;
+ pcnet_reset_8390(dev);
+ if (i != (TX_PAGES<<8)) {
+ iounmap(info->base);
+ CardServices(ReleaseWindow, link->win);
+ info->base = NULL; link->win = NULL;
+ goto failed;
+ }
+
+ dev->mem_start = (u_long)info->base + offset;
+ dev->rmem_start = dev->mem_start + (TX_PAGES<<8);
+ dev->mem_end = dev->rmem_end = (u_long)info->base + req.Size;
+
+ ei_status.tx_start_page = start_pg;
+ ei_status.rx_start_page = start_pg + TX_PAGES;
+ ei_status.stop_page = start_pg + ((req.Size - offset) >> 8);
+
+ /* set up block i/o functions */
+ ei_status.get_8390_hdr = &shmem_get_8390_hdr;
+ ei_status.block_input = &shmem_block_input;
+ ei_status.block_output = &shmem_block_output;
+
+ info->flags |= USE_SHMEM;
+ return 0;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ return 1;
+}
+
+/*====================================================================*/
+
+static int __init init_pcnet_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "pcnet_cs: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &pcnet_attach, &pcnet_detach);
+ return 0;
+}
+
+static void __exit exit_pcnet_cs(void)
+{
+ DEBUG(0, "pcnet_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ pcnet_detach(dev_list);
+}
+
+module_init(init_pcnet_cs);
+module_exit(exit_pcnet_cs);
diff --git a/linux/pcmcia-cs/clients/smc91c92_cs.c b/linux/pcmcia-cs/clients/smc91c92_cs.c
new file mode 100644
index 0000000..6921515
--- /dev/null
+++ b/linux/pcmcia-cs/clients/smc91c92_cs.c
@@ -0,0 +1,2135 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for SMC91c92-based cards.
+
+ This driver supports Megahertz PCMCIA ethernet cards; and
+ Megahertz, Motorola, Ositech, and Psion Dacom ethernet/modem
+ multifunction cards.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ smc91c92_cs.c 1.123 2003/08/25 15:57:41
+
+ This driver contains code written by Donald Becker
+ (becker@scyld.com), Rowan Hughes (x-csrdh@jcu.edu.au),
+ David Hinds (dahinds@users.sourceforge.net), and Erik Stahlman
+ (erik@vt.edu). Donald wrote the SMC 91c92 code using parts of
+ Erik's SMC 91c94 driver. Rowan wrote a similar driver, and I've
+ incorporated some parts of his driver here. I (Dave) wrote most
+ of the PCMCIA glue code, and the Ositech support code. Kelly
+ Stephens (kstephen@holli.com) added support for the Motorola
+ Mariner, with help from Allen Brost.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+/* Ositech Seven of Diamonds firmware */
+#include "ositech.h"
+
+/*====================================================================*/
+
+static char *if_names[] = { "auto", "10baseT", "10base2"};
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("SMC 91c92 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/*
+ Transceiver/media type.
+ 0 = auto
+ 1 = 10baseT (and autoselect if #define AUTOSELECT),
+ 2 = AUI/10base2,
+*/
+INT_MODULE_PARM(if_port, 0);
+
+/* Bit map of interrupts to choose from. */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+static const char *version =
+"smc91c92_cs.c 0.09 1996/8/4 Donald Becker, becker@scyld.com.\n";
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+/* Operational parameter that usually are not changed. */
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+#define INTR_WORK 4
+
+/* Times to check the check the chip before concluding that it doesn't
+ currently have room for another Tx packet. */
+#define MEMORY_WAIT_TIME 8
+
+static dev_info_t dev_info = "smc91c92_cs";
+
+static dev_link_t *dev_list;
+
+struct smc_private {
+ dev_link_t link;
+ struct net_device dev;
+ u_short manfid;
+ u_short cardid;
+ struct net_device_stats stats;
+ dev_node_t node;
+ struct sk_buff *saved_skb;
+ int packets_waiting;
+ caddr_t base;
+ u_short cfg;
+ struct timer_list media;
+ int watchdog, tx_err;
+ u_short media_status;
+ u_short fast_poll;
+ u_short link_status;
+ int phy_id;
+ int duplex;
+ int rx_ovrn;
+};
+
+/* Special definitions for Megahertz multifunction cards */
+#define MEGAHERTZ_ISR 0x0380
+
+/* Special function registers for Motorola Mariner */
+#define MOT_LAN 0x0000
+#define MOT_UART 0x0020
+#define MOT_EEPROM 0x20
+
+#define MOT_NORMAL \
+(COR_LEVEL_REQ | COR_FUNC_ENA | COR_ADDR_DECODE | COR_IREQ_ENA)
+
+/* Special function registers for Ositech cards */
+#define OSITECH_AUI_CTL 0x0c
+#define OSITECH_PWRDOWN 0x0d
+#define OSITECH_RESET 0x0e
+#define OSITECH_ISR 0x0f
+#define OSITECH_AUI_PWR 0x0c
+#define OSITECH_RESET_ISR 0x0e
+
+#define OSI_AUI_PWR 0x40
+#define OSI_LAN_PWRDOWN 0x02
+#define OSI_MODEM_PWRDOWN 0x01
+#define OSI_LAN_RESET 0x02
+#define OSI_MODEM_RESET 0x01
+
+/* Symbolic constants for the SMC91c9* series chips, from Erik Stahlman. */
+#define BANK_SELECT 14 /* Window select register. */
+#define SMC_SELECT_BANK(x) { outw(x, ioaddr + BANK_SELECT); }
+
+/* Bank 0 registers. */
+#define TCR 0 /* transmit control register */
+#define TCR_CLEAR 0 /* do NOTHING */
+#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */
+#define TCR_PAD_EN 0x0080 /* pads short packets to 64 bytes */
+#define TCR_MONCSN 0x0400 /* Monitor Carrier. */
+#define TCR_FDUPLX 0x0800 /* Full duplex mode. */
+#define TCR_NORMAL TCR_ENABLE | TCR_PAD_EN
+
+#define EPH 2 /* Ethernet Protocol Handler report. */
+#define EPH_TX_SUC 0x0001
+#define EPH_SNGLCOL 0x0002
+#define EPH_MULCOL 0x0004
+#define EPH_LTX_MULT 0x0008
+#define EPH_16COL 0x0010
+#define EPH_SQET 0x0020
+#define EPH_LTX_BRD 0x0040
+#define EPH_TX_DEFR 0x0080
+#define EPH_LAT_COL 0x0200
+#define EPH_LOST_CAR 0x0400
+#define EPH_EXC_DEF 0x0800
+#define EPH_CTR_ROL 0x1000
+#define EPH_RX_OVRN 0x2000
+#define EPH_LINK_OK 0x4000
+#define EPH_TX_UNRN 0x8000
+#define MEMINFO 8 /* Memory Information Register */
+#define MEMCFG 10 /* Memory Configuration Register */
+
+/* Bank 1 registers. */
+#define CONFIG 0
+#define CFG_MII_SELECT 0x8000 /* 91C100 only */
+#define CFG_NO_WAIT 0x1000
+#define CFG_FULL_STEP 0x0400
+#define CFG_SET_SQLCH 0x0200
+#define CFG_AUI_SELECT 0x0100
+#define CFG_16BIT 0x0080
+#define CFG_DIS_LINK 0x0040
+#define CFG_STATIC 0x0030
+#define CFG_IRQ_SEL_1 0x0004
+#define CFG_IRQ_SEL_0 0x0002
+#define BASE_ADDR 2
+#define ADDR0 4
+#define GENERAL 10
+#define CONTROL 12
+#define CTL_STORE 0x0001
+#define CTL_RELOAD 0x0002
+#define CTL_EE_SELECT 0x0004
+#define CTL_TE_ENABLE 0x0020
+#define CTL_CR_ENABLE 0x0040
+#define CTL_LE_ENABLE 0x0080
+#define CTL_AUTO_RELEASE 0x0800
+#define CTL_POWERDOWN 0x2000
+
+/* Bank 2 registers. */
+#define MMU_CMD 0
+#define MC_ALLOC 0x20 /* or with number of 256 byte packets */
+#define MC_RESET 0x40
+#define MC_RELEASE 0x80 /* remove and release the current rx packet */
+#define MC_FREEPKT 0xA0 /* Release packet in PNR register */
+#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */
+#define PNR_ARR 2
+#define FIFO_PORTS 4
+#define FP_RXEMPTY 0x8000
+#define POINTER 6
+#define PTR_AUTO_INC 0x0040
+#define PTR_READ 0x2000
+#define PTR_AUTOINC 0x4000
+#define PTR_RCV 0x8000
+#define DATA_1 8
+#define INTERRUPT 12
+#define IM_RCV_INT 0x1
+#define IM_TX_INT 0x2
+#define IM_TX_EMPTY_INT 0x4
+#define IM_ALLOC_INT 0x8
+#define IM_RX_OVRN_INT 0x10
+#define IM_EPH_INT 0x20
+
+#define RCR 4
+enum RxCfg { RxAllMulti = 0x0004, RxPromisc = 0x0002,
+ RxEnable = 0x0100, RxStripCRC = 0x0200};
+#define RCR_SOFTRESET 0x8000 /* resets the chip */
+#define RCR_STRIP_CRC 0x200 /* strips CRC */
+#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */
+#define RCR_ALMUL 0x4 /* receive all multicast packets */
+#define RCR_PROMISC 0x2 /* enable promiscuous mode */
+
+/* the normal settings for the RCR register : */
+#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE)
+#define RCR_CLEAR 0x0 /* set it to a base state */
+#define COUNTER 6
+
+/* BANK 3 -- not the same values as in smc9194! */
+#define MULTICAST0 0
+#define MULTICAST2 2
+#define MULTICAST4 4
+#define MULTICAST6 6
+#define MGMT 8
+#define REVISION 0x0a
+
+/* Transmit status bits. */
+#define TS_SUCCESS 0x0001
+#define TS_16COL 0x0010
+#define TS_LATCOL 0x0200
+#define TS_LOSTCAR 0x0400
+
+/* Receive status bits. */
+#define RS_ALGNERR 0x8000
+#define RS_BADCRC 0x2000
+#define RS_ODDFRAME 0x1000
+#define RS_TOOLONG 0x0800
+#define RS_TOOSHORT 0x0400
+#define RS_MULTICAST 0x0001
+#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
+
+#define set_bits(v, p) outw(inw(p)|(v), (p))
+#define mask_bits(v, p) outw(inw(p)&(v), (p))
+
+/*====================================================================*/
+
+static dev_link_t *smc91c92_attach(void);
+static void smc91c92_detach(dev_link_t *);
+static void smc91c92_config(dev_link_t *link);
+static void smc91c92_release(u_long arg);
+static int smc91c92_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static int smc_open(struct net_device *dev);
+static int smc_close(struct net_device *dev);
+static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void smc_tx_timeout(struct net_device *dev);
+static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void smc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void smc_rx(struct net_device *dev);
+static struct net_device_stats *smc_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static int s9k_config(struct net_device *dev, struct ifmap *map);
+static void smc_set_xcvr(struct net_device *dev, int if_port);
+static void smc_reset(struct net_device *dev);
+static void media_check(u_long arg);
+static void mdio_sync(ioaddr_t addr);
+static int mdio_read(struct net_device *dev, int phy_id, int loc);
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value);
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ smc91c92_detach(link);
+ }
+}
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ smc91c92_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *smc91c92_attach(void)
+{
+ client_reg_t client_reg;
+ struct smc_private *smc;
+ dev_link_t *link;
+ struct net_device *dev;
+ int i, ret;
+
+ DEBUG(0, "smc91c92_attach()\n");
+ flush_stale_links();
+
+ /* Create new ethernet device */
+ smc = kmalloc(sizeof(struct smc_private), GFP_KERNEL);
+ if (!smc) return NULL;
+ memset(smc, 0, sizeof(struct smc_private));
+ link = &smc->link; dev = &smc->dev;
+
+ init_timer(&link->release);
+ link->release.function = &smc91c92_release;
+ link->release.data = (u_long)link;
+ link->io.NumPorts1 = 16;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.IOAddrLines = 4;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = &smc_interrupt;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* The SMC91c92-specific entries in the device structure. */
+ dev->hard_start_xmit = &smc_start_xmit;
+ dev->get_stats = &smc_get_stats;
+ dev->set_config = &s9k_config;
+ dev->set_multicast_list = &set_rx_mode;
+ ether_setup(dev);
+ init_dev_name(dev, smc->node);
+ dev->open = &smc_open;
+ dev->stop = &smc_close;
+ dev->do_ioctl = &smc_ioctl;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = smc_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+ dev->priv = link->priv = link->irq.Instance = smc;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask = CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &smc91c92_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ smc91c92_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* smc91c92_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void smc91c92_detach(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "smc91c92_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ smc91c92_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&smc->dev);
+ kfree(smc);
+
+} /* smc91c92_detach */
+
+/*====================================================================*/
+
+static int cvt_ascii_address(struct net_device *dev, char *s)
+{
+ int i, j, da, c;
+
+ if (strlen(s) != 12)
+ return -1;
+ for (i = 0; i < 6; i++) {
+ da = 0;
+ for (j = 0; j < 2; j++) {
+ c = *s++;
+ da <<= 4;
+ da += ((c >= '0') && (c <= '9')) ?
+ (c - '0') : ((c & 0x0f) + 9);
+ }
+ dev->dev_addr[i] = da;
+ }
+ return 0;
+}
+
+/*====================================================================*/
+
+static int get_tuple(int fn, client_handle_t handle, tuple_t *tuple,
+ cisparse_t *parse)
+{
+ int i;
+ i = CardServices(fn, handle, tuple);
+ if (i != CS_SUCCESS) return i;
+ i = CardServices(GetTupleData, handle, tuple);
+ if (i != CS_SUCCESS) return i;
+ return CardServices(ParseTuple, handle, tuple, parse);
+}
+
+#define first_tuple(a, b, c) get_tuple(GetFirstTuple, a, b, c)
+#define next_tuple(a, b, c) get_tuple(GetNextTuple, a, b, c)
+
+/*======================================================================
+
+ Configuration stuff for Megahertz cards
+
+ mhz_3288_power() is used to power up a 3288's ethernet chip.
+ mhz_mfc_config() handles socket setup for multifunction (1144
+ and 3288) cards. mhz_setup() gets a card's hardware ethernet
+ address.
+
+======================================================================*/
+
+static int mhz_3288_power(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ u_char tmp;
+
+ /* Read the ISR twice... */
+ readb(smc->base+MEGAHERTZ_ISR);
+ udelay(5);
+ readb(smc->base+MEGAHERTZ_ISR);
+
+ /* Pause 200ms... */
+ mdelay(200);
+
+ /* Now read and write the COR... */
+ tmp = readb(smc->base + link->conf.ConfigBase + CISREG_COR);
+ udelay(5);
+ writeb(tmp, smc->base + link->conf.ConfigBase + CISREG_COR);
+
+ return 0;
+}
+
+static int mhz_mfc_config(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[255];
+ cistpl_cftable_entry_t *cf = &parse.cftable_entry;
+ win_req_t req;
+ memreq_t mem;
+ int i, k;
+
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT;
+ link->io.IOAddrLines = 16;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->io.NumPorts2 = 8;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+
+ i = first_tuple(link->handle, &tuple, &parse);
+ /* The Megahertz combo cards have modem-like CIS entries, so
+ we have to explicitly try a bunch of port combinations. */
+ while (i == CS_SUCCESS) {
+ link->conf.ConfigIndex = cf->index;
+ link->io.BasePort2 = cf->io.win[0].base;
+ for (k = 0; k < 0x400; k += 0x10) {
+ if (k & 0x80) continue;
+ link->io.BasePort1 = k ^ 0x300;
+ i = CardServices(RequestIO, link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i == CS_SUCCESS) break;
+ i = next_tuple(link->handle, &tuple, &parse);
+ }
+ if (i != CS_SUCCESS)
+ return i;
+ dev->base_addr = link->io.BasePort1;
+
+ /* Allocate a memory window, for accessing the ISR */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = req.Size = 0;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ i = CardServices(RequestWindow, &link->win, &req);
+ if (i != CS_SUCCESS)
+ return i;
+ smc->base = ioremap(req.Base, req.Size);
+ mem.CardOffset = mem.Page = 0;
+ if (smc->manfid == MANFID_MOTOROLA)
+ mem.CardOffset = link->conf.ConfigBase;
+ i = CardServices(MapMemPage, link->win, &mem);
+
+ if ((i == CS_SUCCESS)
+ && (smc->manfid == MANFID_MEGAHERTZ)
+ && (smc->cardid == PRODID_MEGAHERTZ_EM3288))
+ mhz_3288_power(link);
+
+ return i;
+}
+
+static int mhz_setup(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[255], *station_addr;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+
+ /* Read the station address from the CIS. It is stored as the last
+ (fourth) string in the Version 1 Version/ID tuple. */
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (first_tuple(handle, &tuple, &parse) != CS_SUCCESS)
+ return -1;
+ /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
+ if (next_tuple(handle, &tuple, &parse) != CS_SUCCESS)
+ first_tuple(handle, &tuple, &parse);
+ if (parse.version_1.ns > 3) {
+ station_addr = parse.version_1.str + parse.version_1.ofs[3];
+ if (cvt_ascii_address(dev, station_addr) == 0)
+ return 0;
+ }
+
+ /* Another possibility: for the EM3288, in a special tuple */
+ tuple.DesiredTuple = 0x81;
+ if (CardServices(GetFirstTuple, handle, &tuple) != CS_SUCCESS)
+ return -1;
+ if (CardServices(GetTupleData, handle, &tuple) != CS_SUCCESS)
+ return -1;
+ buf[12] = '\0';
+ if (cvt_ascii_address(dev, buf) == 0)
+ return 0;
+
+ return -1;
+}
+
+/*======================================================================
+
+ Configuration stuff for the Motorola Mariner
+
+ mot_config() writes directly to the Mariner configuration
+ registers because the CIS is just bogus.
+
+======================================================================*/
+
+static void mot_config(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ ioaddr_t iouart = link->io.BasePort2;
+
+ /* Set UART base address and force map with COR bit 1 */
+ writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0);
+ writeb((iouart >> 8) & 0xff, smc->base + MOT_UART + CISREG_IOBASE_1);
+ writeb(MOT_NORMAL, smc->base + MOT_UART + CISREG_COR);
+
+ /* Set SMC base address and force map with COR bit 1 */
+ writeb(ioaddr & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_0);
+ writeb((ioaddr >> 8) & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_1);
+ writeb(MOT_NORMAL, smc->base + MOT_LAN + CISREG_COR);
+
+ /* Wait for things to settle down */
+ mdelay(100);
+}
+
+static int mot_setup(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i, wait, loop;
+ u_int addr;
+
+ /* Read Ethernet address from Serial EEPROM */
+
+ for (i = 0; i < 3; i++) {
+ SMC_SELECT_BANK(2);
+ outw(MOT_EEPROM + i, ioaddr + POINTER);
+ SMC_SELECT_BANK(1);
+ outw((CTL_RELOAD | CTL_EE_SELECT), ioaddr + CONTROL);
+
+ for (loop = wait = 0; loop < 200; loop++) {
+ udelay(10);
+ wait = ((CTL_RELOAD | CTL_STORE) & inw(ioaddr + CONTROL));
+ if (wait == 0) break;
+ }
+
+ if (wait)
+ return -1;
+
+ addr = inw(ioaddr + GENERAL);
+ dev->dev_addr[2*i] = addr & 0xff;
+ dev->dev_addr[2*i+1] = (addr >> 8) & 0xff;
+ }
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static int smc_config(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[255];
+ cistpl_cftable_entry_t *cf = &parse.cftable_entry;
+ int i;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+
+ link->io.NumPorts1 = 16;
+ i = first_tuple(link->handle, &tuple, &parse);
+ while (i != CS_NO_MORE_ITEMS) {
+ if (i == CS_SUCCESS) {
+ link->conf.ConfigIndex = cf->index;
+ link->io.BasePort1 = cf->io.win[0].base;
+ link->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
+ i = CardServices(RequestIO, link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ i = next_tuple(link->handle, &tuple, &parse);
+ }
+ if (i == CS_SUCCESS)
+ dev->base_addr = link->io.BasePort1;
+ return i;
+}
+
+static int smc_setup(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ cistpl_lan_node_id_t *node_id;
+ u_char buf[255], *station_addr;
+ int i;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+
+ /* Check for a LAN function extension tuple */
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ i = first_tuple(handle, &tuple, &parse);
+ while (i == CS_SUCCESS) {
+ if (parse.funce.type == CISTPL_FUNCE_LAN_NODE_ID)
+ break;
+ i = next_tuple(handle, &tuple, &parse);
+ }
+ if (i == CS_SUCCESS) {
+ node_id = (cistpl_lan_node_id_t *)parse.funce.data;
+ if (node_id->nb == 6) {
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = node_id->id[i];
+ return 0;
+ }
+ }
+
+ /* Try the third string in the Version 1 Version/ID tuple. */
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (first_tuple(handle, &tuple, &parse) != CS_SUCCESS)
+ return -1;
+ station_addr = parse.version_1.str + parse.version_1.ofs[2];
+ if (cvt_ascii_address(dev, station_addr) == 0)
+ return 0;
+
+ return -1;
+}
+
+/*====================================================================*/
+
+static int osi_config(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ static ioaddr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
+ int i, j;
+
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT;
+ link->io.NumPorts1 = 64;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->io.NumPorts2 = 8;
+ link->io.IOAddrLines = 16;
+
+ /* Enable Hard Decode, LAN, Modem */
+ link->conf.ConfigIndex = 0x23;
+
+ for (i = j = 0; j < 4; j++) {
+ link->io.BasePort2 = com[j];
+ i = CardServices(RequestIO, link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i != CS_SUCCESS) {
+ /* Fallback: turn off hard decode */
+ link->conf.ConfigIndex = 0x03;
+ link->io.NumPorts2 = 0;
+ i = CardServices(RequestIO, link->handle, &link->io);
+ }
+ dev->base_addr = link->io.BasePort1 + 0x10;
+ return i;
+}
+
+static int osi_setup(dev_link_t *link, u_short manfid, u_short cardid)
+{
+ client_handle_t handle = link->handle;
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ u_char buf[255];
+ int i;
+
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+
+ /* Read the station address from tuple 0x90, subtuple 0x04 */
+ tuple.DesiredTuple = 0x90;
+ i = CardServices(GetFirstTuple, handle, &tuple);
+ while (i == CS_SUCCESS) {
+ i = CardServices(GetTupleData, handle, &tuple);
+ if ((i != CS_SUCCESS) || (buf[0] == 0x04))
+ break;
+ i = CardServices(GetNextTuple, handle, &tuple);
+ }
+ if (i != CS_SUCCESS)
+ return -1;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = buf[i+2];
+
+ if (((manfid == MANFID_OSITECH) &&
+ (cardid == PRODID_OSITECH_SEVEN)) ||
+ ((manfid == MANFID_PSION) &&
+ (cardid == PRODID_PSION_NET100))) {
+ /* Download the Seven of Diamonds firmware */
+ for (i = 0; i < sizeof(__Xilinx7OD); i++) {
+ outb(__Xilinx7OD[i], link->io.BasePort1+2);
+ udelay(50);
+ }
+ } else if (manfid == MANFID_OSITECH) {
+ /* Make sure both functions are powered up */
+ set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR);
+ /* Now, turn on the interrupt for both card functions */
+ set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR);
+ DEBUG(2, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n",
+ inw(link->io.BasePort1 + OSITECH_AUI_PWR),
+ inw(link->io.BasePort1 + OSITECH_RESET_ISR));
+ }
+
+ return 0;
+}
+
+/*======================================================================
+
+ This verifies that the chip is some SMC91cXX variant, and returns
+ the revision code if successful. Otherwise, it returns -ENODEV.
+
+======================================================================*/
+
+static int check_sig(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ int width;
+ u_short s;
+
+ SMC_SELECT_BANK(1);
+ if (inw(ioaddr + BANK_SELECT) >> 8 != 0x33) {
+ /* Try powering up the chip */
+ outw(0, ioaddr + CONTROL);
+ mdelay(55);
+ }
+
+ /* Try setting bus width */
+ width = (link->io.Attributes1 == IO_DATA_PATH_WIDTH_AUTO);
+ s = inb(ioaddr + CONFIG);
+ if (width)
+ s |= CFG_16BIT;
+ else
+ s &= ~CFG_16BIT;
+ outb(s, ioaddr + CONFIG);
+
+ /* Check Base Address Register to make sure bus width is OK */
+ s = inw(ioaddr + BASE_ADDR);
+ if ((inw(ioaddr + BANK_SELECT) >> 8 == 0x33) &&
+ ((s >> 8) != (s & 0xff))) {
+ SMC_SELECT_BANK(3);
+ s = inw(ioaddr + REVISION);
+ return (s & 0xff);
+ }
+
+ if (width) {
+ event_callback_args_t args;
+ printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n");
+ args.client_data = link;
+ smc91c92_event(CS_EVENT_RESET_PHYSICAL, 0, &args);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ CardServices(RequestIO, link->handle, &link->io);
+ smc91c92_event(CS_EVENT_CARD_RESET, 0, &args);
+ return check_sig(link);
+ }
+ return -ENODEV;
+}
+
+/*======================================================================
+
+ smc91c92_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_EXIT_TEST(ret, svc, label) \
+if (ret != CS_SUCCESS) { cs_error(link->handle, svc, ret); goto label; }
+
+static void smc91c92_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_short buf[32];
+ char *name;
+ int i, j, rev;
+ ioaddr_t ioaddr;
+
+ DEBUG(0, "smc91c92_config(0x%p)\n", link);
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ i = first_tuple(handle, &tuple, &parse);
+ CS_EXIT_TEST(i, ParseTuple, config_failed);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ tuple.DesiredTuple = CISTPL_MANFID;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ if (first_tuple(handle, &tuple, &parse) == CS_SUCCESS) {
+ smc->manfid = parse.manfid.manf;
+ smc->cardid = parse.manfid.card;
+ }
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ i = osi_config(link);
+ } else if ((smc->manfid == MANFID_MOTOROLA) ||
+ ((smc->manfid == MANFID_MEGAHERTZ) &&
+ ((smc->cardid == PRODID_MEGAHERTZ_VARIOUS) ||
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288)))) {
+ i = mhz_mfc_config(link);
+ } else {
+ i = smc_config(link);
+ }
+ CS_EXIT_TEST(i, RequestIO, config_failed);
+
+ i = CardServices(RequestIRQ, link->handle, &link->irq);
+ CS_EXIT_TEST(i, RequestIRQ, config_failed);
+ i = CardServices(RequestConfiguration, link->handle, &link->conf);
+ CS_EXIT_TEST(i, RequestConfiguration, config_failed);
+
+ if (smc->manfid == MANFID_MOTOROLA)
+ mot_config(link);
+
+ dev->irq = link->irq.AssignedIRQ;
+
+ if ((if_port >= 0) && (if_port <= 2))
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "smc91c92_cs: invalid if_port requested\n");
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
+ goto config_undo;
+ }
+
+ switch (smc->manfid) {
+ case MANFID_OSITECH:
+ case MANFID_PSION:
+ i = osi_setup(link, smc->manfid, smc->cardid); break;
+ case MANFID_SMC:
+ case MANFID_NEW_MEDIA:
+ i = smc_setup(link); break;
+ case 0x128: /* For broken Megahertz cards */
+ case MANFID_MEGAHERTZ:
+ i = mhz_setup(link); break;
+ case MANFID_MOTOROLA:
+ default: /* get the hw address from EEPROM */
+ i = mot_setup(link); break;
+ }
+
+ if (i != 0) {
+ printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n");
+ goto config_undo;
+ }
+
+ copy_dev_name(smc->node, dev);
+ link->dev = &smc->node;
+ smc->duplex = 0;
+ smc->rx_ovrn = 0;
+
+ rev = check_sig(link);
+ name = "???";
+ if (rev > 0)
+ switch (rev >> 4) {
+ case 3: name = "92"; break;
+ case 4: name = ((rev & 15) >= 6) ? "96" : "94"; break;
+ case 5: name = "95"; break;
+ case 7: name = "100"; break;
+ case 8: name = "100-FD"; break;
+ case 9: name = "110"; break;
+ }
+ printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
+ "hw_addr ", dev->name, name, (rev & 0x0f), dev->base_addr,
+ dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+
+ ioaddr = dev->base_addr;
+ if (rev > 0) {
+ u_long mir, mcr;
+ SMC_SELECT_BANK(0);
+ mir = inw(ioaddr + MEMINFO) & 0xff;
+ if (mir == 0xff) mir++;
+ /* Get scale factor for memory size */
+ mcr = ((rev >> 4) > 3) ? inw(ioaddr + MEMCFG) : 0x0200;
+ mir *= 128 * (1<<((mcr >> 9) & 7));
+ if (mir & 0x3ff)
+ printk(KERN_INFO " %lu byte", mir);
+ else
+ printk(KERN_INFO " %lu kb", mir>>10);
+ SMC_SELECT_BANK(1);
+ smc->cfg = inw(ioaddr + CONFIG) & ~CFG_AUI_SELECT;
+ smc->cfg |= CFG_NO_WAIT | CFG_16BIT | CFG_STATIC;
+ if (smc->manfid == MANFID_OSITECH)
+ smc->cfg |= CFG_IRQ_SEL_1 | CFG_IRQ_SEL_0;
+ if ((rev >> 4) >= 7)
+ smc->cfg |= CFG_MII_SELECT;
+ printk(" buffer, %s xcvr\n", (smc->cfg & CFG_MII_SELECT) ?
+ "MII" : if_names[dev->if_port]);
+ }
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ SMC_SELECT_BANK(3);
+
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev, i, 1);
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+ smc->phy_id = (i < 32) ? i : -1;
+ if (i < 32) {
+ DEBUG(0, " MII transceiver at index %d, status %x.\n", i, j);
+ } else {
+ printk(KERN_NOTICE " No MII transceivers found!\n");
+ }
+
+ SMC_SELECT_BANK(0);
+ }
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+config_undo:
+ unregister_netdev(dev);
+config_failed: /* CS_EXIT_TEST() calls jump to here... */
+ smc91c92_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+
+} /* smc91c92_config */
+
+/*======================================================================
+
+ After a card is removed, smc91c92_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void smc91c92_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+ struct smc_private *smc = link->priv;
+
+ DEBUG(0, "smc91c92_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "smc91c92_cs: release postponed, '%s' still open\n",
+ link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+ if (link->win) {
+ iounmap(smc->base);
+ CardServices(ReleaseWindow, link->win);
+ }
+
+ link->state &= ~DEV_CONFIG;
+
+} /* smc91c92_release */
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int smc91c92_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ int i;
+
+ DEBUG(1, "smc91c92_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ smc91c92_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ if ((smc->manfid == MANFID_MEGAHERTZ) &&
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288))
+ mhz_3288_power(link);
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (smc->manfid == MANFID_MOTOROLA)
+ mot_config(link);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ /* Power up the card and enable interrupts */
+ set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR);
+ set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR);
+ }
+ if (((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid == PRODID_OSITECH_SEVEN)) ||
+ ((smc->manfid == MANFID_PSION) &&
+ (smc->cardid == PRODID_PSION_NET100))) {
+ /* Download the Seven of Diamonds firmware */
+ for (i = 0; i < sizeof(__Xilinx7OD); i++) {
+ outb(__Xilinx7OD[i], link->io.BasePort1+2);
+ udelay(50);
+ }
+ }
+ if (link->open) {
+ smc_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* smc91c92_event */
+
+/*======================================================================
+
+ MII interface support for SMC91cXX based cards
+======================================================================*/
+
+#define MDIO_SHIFT_CLK 0x04
+#define MDIO_DATA_OUT 0x01
+#define MDIO_DIR_WRITE 0x08
+#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
+#define MDIO_DATA_READ 0x02
+
+static void mdio_sync(ioaddr_t addr)
+{
+ int bits;
+ for (bits = 0; bits < 32; bits++) {
+ outb(MDIO_DATA_WRITE1, addr);
+ outb(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int loc)
+{
+ ioaddr_t addr = dev->base_addr + MGMT;
+ u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
+ int i, retval = 0;
+
+ mdio_sync(addr);
+ for (i = 13; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(dat, addr);
+ outb(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb(0, addr);
+ retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0);
+ outb(MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
+{
+ ioaddr_t addr = dev->base_addr + MGMT;
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(dat, addr);
+ outb(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb(0, addr);
+ outb(MDIO_SHIFT_CLK, addr);
+ }
+}
+
+/*======================================================================
+
+ The driver core code, most of which should be common with a
+ non-PCMCIA implementation.
+
+======================================================================*/
+
+#ifdef PCMCIA_DEBUG
+static void smc_dump(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short i, w, save;
+ save = inw(ioaddr + BANK_SELECT);
+ for (w = 0; w < 4; w++) {
+ SMC_SELECT_BANK(w);
+ printk(KERN_DEBUG "bank %d: ", w);
+ for (i = 0; i < 14; i += 2)
+ printk(" %04x", inw(ioaddr + i));
+ printk("\n");
+ }
+ outw(save, ioaddr + BANK_SELECT);
+}
+#endif
+
+static int smc_open(struct net_device *dev)
+{
+ struct smc_private *smc = dev->priv;
+ dev_link_t *link = &smc->link;
+
+#ifdef PCMCIA_DEBUG
+ DEBUG(0, "%s: smc_open(%p), ID/Window %4.4x.\n",
+ dev->name, dev, inw(dev->base_addr + BANK_SELECT));
+ if (pc_debug > 1) smc_dump(dev);
+#endif
+
+ /* Check that the PCMCIA card is still here. */
+ if (!DEV_OK(link))
+ return -ENODEV;
+ /* Physical device present signature. */
+ if (check_sig(link) < 0) {
+ printk("smc91c92_cs: Yikes! Bad chip signature!\n");
+ return -ENODEV;
+ }
+ link->open++;
+ MOD_INC_USE_COUNT;
+
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+ smc->saved_skb = 0;
+ smc->packets_waiting = 0;
+
+ smc_reset(dev);
+ smc->media.function = &media_check;
+ smc->media.data = (u_long)smc;
+ smc->media.expires = jiffies + HZ;
+ add_timer(&smc->media);
+
+ return 0;
+} /* smc_open */
+
+/*====================================================================*/
+
+static int smc_close(struct net_device *dev)
+{
+ struct smc_private *smc = dev->priv;
+ dev_link_t *link = &smc->link;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(0, "%s: smc_close(), status %4.4x.\n",
+ dev->name, inw(ioaddr + BANK_SELECT));
+
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+
+ /* Shut off all interrupts, and turn off the Tx and Rx sections.
+ Don't bother to check for chip present. */
+ SMC_SELECT_BANK(2); /* Nominally paranoia, but do no assume... */
+ outw(0, ioaddr + INTERRUPT);
+ SMC_SELECT_BANK(0);
+ mask_bits(0xff00, ioaddr + RCR);
+ mask_bits(0xff00, ioaddr + TCR);
+
+ /* Put the chip into power-down mode. */
+ SMC_SELECT_BANK(1);
+ outw(CTL_POWERDOWN, ioaddr + CONTROL );
+
+ link->open--;
+ del_timer(&smc->media);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+} /* smc_close */
+
+static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct smc_private *smc = dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ ushort saved_bank;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ if (!(smc->cfg & CFG_MII_SELECT))
+ return -EOPNOTSUPP;
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ SMC_SELECT_BANK(3);
+
+ switch (cmd) {
+ case SIOCDEVPRIVATE:
+ data[0] = smc->phy_id;
+ case SIOCDEVPRIVATE+1:
+ data[3] = mdio_read(dev, data[0], data[1] & 0x1f);
+ SMC_SELECT_BANK(saved_bank);
+ return 0;
+ case SIOCDEVPRIVATE+2:
+ if (!capable(CAP_NET_ADMIN)) {
+ SMC_SELECT_BANK(saved_bank);
+ return -EPERM;
+ }
+ mdio_write(dev, data[0], data[1] & 0x1f, data[2]);
+ SMC_SELECT_BANK(saved_bank);
+ return 0;
+ }
+ SMC_SELECT_BANK(saved_bank);
+ return -EOPNOTSUPP;
+}
+/*======================================================================
+
+ Transfer a packet to the hardware and trigger the packet send.
+ This may be called at either from either the Tx queue code
+ or the interrupt handler.
+
+======================================================================*/
+
+static void smc_hardware_send_packet(struct net_device * dev)
+{
+ struct smc_private *smc = dev->priv;
+ struct sk_buff *skb = smc->saved_skb;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_char packet_no;
+
+ if (!skb) {
+ printk(KERN_ERR "%s: In XMIT with no packet to send.\n", dev->name);
+ return;
+ }
+
+ /* There should be a packet slot waiting. */
+ packet_no = inw(ioaddr + PNR_ARR) >> 8;
+ if (packet_no & 0x80) {
+ /* If not, there is a hardware problem! Likely an ejected card. */
+ printk(KERN_WARNING "%s: 91c92 hardware Tx buffer allocation"
+ " failed, status %#2.2x.\n", dev->name, packet_no);
+ dev_kfree_skb_irq(skb);
+ smc->saved_skb = NULL;
+ netif_start_queue(dev);
+ return;
+ }
+
+ add_tx_bytes(&smc->stats, skb->len);
+ /* The card should use the just-allocated buffer. */
+ outw(packet_no, ioaddr + PNR_ARR);
+ /* point to the beginning of the packet */
+ outw(PTR_AUTOINC , ioaddr + POINTER);
+
+ /* Send the packet length (+6 for status, length and ctl byte)
+ and the status word (set to zeros). */
+ {
+ u_char *buf = skb->data;
+ u_int length = skb->len; /* The chip will pad to ethernet min. */
+
+ DEBUG(2, "%s: Trying to xmit packet of length %d.\n",
+ dev->name, length);
+
+ /* send the packet length: +6 for status word, length, and ctl */
+ outw(0, ioaddr + DATA_1);
+ outw(length + 6, ioaddr + DATA_1);
+ outsw(ioaddr + DATA_1, buf, length >> 1);
+
+ /* The odd last byte, if there is one, goes in the control word. */
+ outw((length & 1) ? 0x2000 | buf[length-1] : 0, ioaddr + DATA_1);
+ }
+
+ /* Enable the Tx interrupts, both Tx (TxErr) and TxEmpty. */
+ outw(((IM_TX_INT|IM_TX_EMPTY_INT)<<8) |
+ (inw(ioaddr + INTERRUPT) & 0xff00),
+ ioaddr + INTERRUPT);
+
+ /* The chip does the rest of the work. */
+ outw(MC_ENQUEUE , ioaddr + MMU_CMD);
+
+ smc->saved_skb = NULL;
+ dev_kfree_skb_irq(skb);
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+ return;
+}
+
+/*====================================================================*/
+
+static void smc_tx_timeout(struct net_device *dev)
+{
+ struct smc_private *smc = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, "
+ "Tx_status %2.2x status %4.4x.\n",
+ dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
+ smc->stats.tx_errors++;
+ smc_reset(dev);
+ dev->trans_start = jiffies;
+ smc->saved_skb = NULL;
+ netif_wake_queue(dev);
+}
+
+static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smc_private *smc = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short num_pages;
+ short time_out, ir;
+
+ tx_timeout_check(dev, smc_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ DEBUG(2, "%s: smc_start_xmit(length = %ld) called,"
+ " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2));
+
+ if (smc->saved_skb) {
+ /* THIS SHOULD NEVER HAPPEN. */
+ smc->stats.tx_aborted_errors++;
+ printk(KERN_DEBUG "%s: Internal error -- sent packet while busy.\n",
+ dev->name);
+ return 1;
+ }
+ smc->saved_skb = skb;
+
+ num_pages = skb->len >> 8;
+
+ if (num_pages > 7) {
+ printk(KERN_ERR "%s: Far too big packet error.\n", dev->name);
+ DEV_KFREE_SKB (skb);
+ smc->saved_skb = NULL;
+ smc->stats.tx_dropped++;
+ return 0; /* Do not re-queue this packet. */
+ }
+ /* A packet is now waiting. */
+ smc->packets_waiting++;
+
+ SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */
+
+ /* need MC_RESET to keep the memory consistent. errata? */
+ if (smc->rx_ovrn) {
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ smc->rx_ovrn = 0;
+ }
+
+ /* Allocate the memory; send the packet now if we win. */
+ outw(MC_ALLOC | num_pages, ioaddr + MMU_CMD);
+ for (time_out = MEMORY_WAIT_TIME; time_out >= 0; time_out--) {
+ ir = inw(ioaddr+INTERRUPT);
+ if (ir & IM_ALLOC_INT) {
+ /* Acknowledge the interrupt, send the packet. */
+ outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT);
+ smc_hardware_send_packet(dev); /* Send the packet now.. */
+ return 0;
+ }
+ }
+
+ /* Otherwise defer until the Tx-space-allocated interrupt. */
+ DEBUG(2, "%s: memory allocation deferred.\n", dev->name);
+ outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
+
+ return 0;
+}
+
+/*======================================================================
+
+ Handle a Tx anomolous event. Entered while in Window 2.
+
+======================================================================*/
+
+static void smc_tx_err(struct net_device * dev)
+{
+ struct smc_private *smc = (struct smc_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int saved_packet = inw(ioaddr + PNR_ARR) & 0xff;
+ int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f;
+ int tx_status;
+
+ /* select this as the packet to read from */
+ outw(packet_no, ioaddr + PNR_ARR);
+
+ /* read the first word from this packet */
+ outw(PTR_AUTOINC | PTR_READ | 0, ioaddr + POINTER);
+
+ tx_status = inw(ioaddr + DATA_1);
+
+ smc->stats.tx_errors++;
+ if (tx_status & TS_LOSTCAR) smc->stats.tx_carrier_errors++;
+ if (tx_status & TS_LATCOL) smc->stats.tx_window_errors++;
+ if (tx_status & TS_16COL) {
+ smc->stats.tx_aborted_errors++;
+ smc->tx_err++;
+ }
+
+ if (tx_status & TS_SUCCESS) {
+ printk(KERN_NOTICE "%s: Successful packet caused error "
+ "interrupt?\n", dev->name);
+ }
+ /* re-enable transmit */
+ SMC_SELECT_BANK(0);
+ outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR);
+ SMC_SELECT_BANK(2);
+
+ outw(MC_FREEPKT, ioaddr + MMU_CMD); /* Free the packet memory. */
+
+ /* one less packet waiting for me */
+ smc->packets_waiting--;
+
+ outw(saved_packet, ioaddr + PNR_ARR);
+ return;
+}
+
+/*====================================================================*/
+
+static void smc_eph_irq(struct net_device *dev)
+{
+ struct smc_private *smc = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short card_stats, ephs;
+
+ SMC_SELECT_BANK(0);
+ ephs = inw(ioaddr + EPH);
+ DEBUG(2, "%s: Ethernet protocol handler interrupt, status"
+ " %4.4x.\n", dev->name, ephs);
+ /* Could be a counter roll-over warning: update stats. */
+ card_stats = inw(ioaddr + COUNTER);
+ /* single collisions */
+ smc->stats.collisions += card_stats & 0xF;
+ card_stats >>= 4;
+ /* multiple collisions */
+ smc->stats.collisions += card_stats & 0xF;
+#if 0 /* These are for when linux supports these statistics */
+ card_stats >>= 4; /* deferred */
+ card_stats >>= 4; /* excess deferred */
+#endif
+ /* If we had a transmit error we must re-enable the transmitter. */
+ outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR);
+
+ /* Clear a link error interrupt. */
+ SMC_SELECT_BANK(1);
+ outw(CTL_AUTO_RELEASE | 0x0000, ioaddr + CONTROL);
+ outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
+ ioaddr + CONTROL);
+ SMC_SELECT_BANK(2);
+}
+
+/*====================================================================*/
+
+static void smc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct smc_private *smc = dev_id;
+ struct net_device *dev = &smc->dev;
+ ioaddr_t ioaddr;
+ u_short saved_bank, saved_pointer, mask, status;
+ char bogus_cnt = INTR_WORK; /* Work we are willing to do. */
+
+ if (!netif_device_present(dev))
+ return;
+ ioaddr = dev->base_addr;
+
+ DEBUG(3, "%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
+ irq, ioaddr);
+
+ smc->watchdog = 0;
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ if ((saved_bank & 0xff00) != 0x3300) {
+ /* The device does not exist -- the card could be off-line, or
+ maybe it has been ejected. */
+ DEBUG(1, "%s: SMC91c92 interrupt %d for non-existent"
+ "/ejected device.\n", dev->name, irq);
+ goto irq_done;
+ }
+
+ SMC_SELECT_BANK(2);
+ saved_pointer = inw(ioaddr + POINTER);
+ mask = inw(ioaddr + INTERRUPT) >> 8;
+ /* clear all interrupts */
+ outw(0, ioaddr + INTERRUPT);
+
+ do { /* read the status flag, and mask it */
+ status = inw(ioaddr + INTERRUPT) & 0xff;
+ DEBUG(3, "%s: Status is %#2.2x (mask %#2.2x).\n", dev->name,
+ status, mask);
+ if ((status & mask) == 0)
+ break;
+
+ if (status & IM_RCV_INT) {
+ /* Got a packet(s). */
+ smc_rx(dev);
+ }
+ if (status & IM_TX_INT) {
+ smc_tx_err(dev);
+ outw(IM_TX_INT, ioaddr + INTERRUPT);
+ }
+ status &= mask;
+ if (status & IM_TX_EMPTY_INT) {
+ outw(IM_TX_EMPTY_INT, ioaddr + INTERRUPT);
+ mask &= ~IM_TX_EMPTY_INT;
+ smc->stats.tx_packets += smc->packets_waiting;
+ smc->packets_waiting = 0;
+ }
+ if (status & IM_ALLOC_INT) {
+ /* Clear this interrupt so it doesn't happen again */
+ mask &= ~IM_ALLOC_INT;
+
+ smc_hardware_send_packet(dev);
+
+ /* enable xmit interrupts based on this */
+ mask |= (IM_TX_EMPTY_INT | IM_TX_INT);
+
+ /* and let the card send more packets to me */
+ netif_wake_queue(dev);
+ }
+ if (status & IM_RX_OVRN_INT) {
+ smc->stats.rx_errors++;
+ smc->stats.rx_fifo_errors++;
+ if (smc->duplex)
+ smc->rx_ovrn = 1; /* need MC_RESET outside smc_interrupt */
+ outw(IM_RX_OVRN_INT, ioaddr + INTERRUPT);
+ }
+ if (status & IM_EPH_INT)
+ smc_eph_irq(dev);
+ } while (--bogus_cnt);
+
+ DEBUG(3, " Restoring saved registers mask %2.2x bank %4.4x"
+ " pointer %4.4x.\n", mask, saved_bank, saved_pointer);
+
+ /* restore state register */
+ outw((mask<<8), ioaddr + INTERRUPT);
+ outw(saved_pointer, ioaddr + POINTER);
+ SMC_SELECT_BANK(saved_bank);
+
+ DEBUG(3, "%s: Exiting interrupt IRQ%d.\n", dev->name, irq);
+
+irq_done:
+
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ /* Retrigger interrupt if needed */
+ mask_bits(0x00ff, ioaddr-0x10+OSITECH_RESET_ISR);
+ set_bits(0x0300, ioaddr-0x10+OSITECH_RESET_ISR);
+ }
+ if (smc->manfid == MANFID_MOTOROLA) {
+ u_char cor;
+ cor = readb(smc->base + MOT_UART + CISREG_COR);
+ writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_UART + CISREG_COR);
+ writeb(cor, smc->base + MOT_UART + CISREG_COR);
+ cor = readb(smc->base + MOT_LAN + CISREG_COR);
+ writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_LAN + CISREG_COR);
+ writeb(cor, smc->base + MOT_LAN + CISREG_COR);
+ }
+#ifdef DOES_NOT_WORK
+ if (smc->base != NULL) { /* Megahertz MFC's */
+ readb(smc->base+MEGAHERTZ_ISR);
+ readb(smc->base+MEGAHERTZ_ISR);
+ }
+#endif
+}
+
+/*====================================================================*/
+
+static void smc_rx(struct net_device *dev)
+{
+ struct smc_private *smc = (struct smc_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int rx_status;
+ int packet_length; /* Caution: not frame length, rather words
+ to transfer from the chip. */
+
+ /* Assertion: we are in Window 2. */
+
+ if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) {
+ printk(KERN_ERR "%s: smc_rx() with nothing on Rx FIFO.\n",
+ dev->name);
+ return;
+ }
+
+ /* Reset the read pointer, and read the status and packet length. */
+ outw(PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER);
+ rx_status = inw(ioaddr + DATA_1);
+ packet_length = inw(ioaddr + DATA_1) & 0x07ff;
+
+ DEBUG(2, "%s: Receive status %4.4x length %d.\n",
+ dev->name, rx_status, packet_length);
+
+ if (!(rx_status & RS_ERRORS)) {
+ /* do stuff to make a new packet */
+ struct sk_buff *skb;
+
+ /* Note: packet_length adds 5 or 6 extra bytes here! */
+ skb = dev_alloc_skb(packet_length+2);
+
+ if (skb == NULL) {
+ DEBUG(1, "%s: Low memory, packet dropped.\n", dev->name);
+ smc->stats.rx_dropped++;
+ outw(MC_RELEASE, ioaddr + MMU_CMD);
+ return;
+ }
+
+ packet_length -= (rx_status & RS_ODDFRAME ? 5 : 6);
+ skb_reserve(skb, 2);
+ insw(ioaddr+DATA_1, skb_put(skb, packet_length),
+ (packet_length+1)>>1);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ skb->dev = dev;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ smc->stats.rx_packets++;
+ add_rx_bytes(&smc->stats, packet_length);
+ if (rx_status & RS_MULTICAST)
+ smc->stats.multicast++;
+ } else {
+ /* error ... */
+ smc->stats.rx_errors++;
+
+ if (rx_status & RS_ALGNERR) smc->stats.rx_frame_errors++;
+ if (rx_status & (RS_TOOSHORT | RS_TOOLONG))
+ smc->stats.rx_length_errors++;
+ if (rx_status & RS_BADCRC) smc->stats.rx_crc_errors++;
+ }
+ /* Let the MMU free the memory of this packet. */
+ outw(MC_RELEASE, ioaddr + MMU_CMD);
+
+ return;
+}
+
+/*====================================================================*/
+
+static struct net_device_stats *smc_get_stats(struct net_device *dev)
+{
+ struct smc_private *smc = (struct smc_private *)dev->priv;
+ /* Nothing to update - the 91c92 is a pretty primative chip. */
+ return &smc->stats;
+}
+
+/*======================================================================
+
+ Calculate values for the hardware multicast filter hash table.
+
+======================================================================*/
+
+static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
+ u_char *multicast_table)
+{
+ struct dev_mc_list *mc_addr;
+
+ for (mc_addr = addrs; mc_addr && --count > 0; mc_addr = mc_addr->next) {
+ u_int position = ether_crc(6, mc_addr->dmi_addr);
+#ifndef final_version /* Verify multicast address. */
+ if ((mc_addr->dmi_addr[0] & 1) == 0)
+ continue;
+#endif
+ multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
+ }
+}
+
+/*======================================================================
+
+ Set the receive mode.
+
+ This routine is used by both the protocol level to notify us of
+ promiscuous/multicast mode changes, and by the open/reset code to
+ initialize the Rx registers. We always set the multicast list and
+ leave the receiver running.
+
+======================================================================*/
+
+static void set_rx_mode(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ u_int multicast_table[ 2 ] = { 0, };
+ unsigned long flags;
+ u_short rx_cfg_setting;
+
+ if (dev->flags & IFF_PROMISC) {
+ printk(KERN_NOTICE "%s: setting Rx mode to promiscuous.\n", dev->name);
+ rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti;
+ } else if (dev->flags & IFF_ALLMULTI)
+ rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
+ else {
+ if (dev->mc_count) {
+ fill_multicast_tbl(dev->mc_count, dev->mc_list,
+ (u_char *)multicast_table);
+ }
+ rx_cfg_setting = RxStripCRC | RxEnable;
+ }
+
+ /* Load MC table and Rx setting into the chip without interrupts. */
+ save_flags(flags);
+ cli();
+ SMC_SELECT_BANK(3);
+ outl(multicast_table[0], ioaddr + MULTICAST0);
+ outl(multicast_table[1], ioaddr + MULTICAST4);
+ SMC_SELECT_BANK(0);
+ outw(rx_cfg_setting, ioaddr + RCR);
+ SMC_SELECT_BANK(2);
+ restore_flags(flags);
+
+ return;
+}
+
+/*======================================================================
+
+ Senses when a card's config changes. Here, it's coax or TP.
+
+======================================================================*/
+
+static int s9k_config(struct net_device *dev, struct ifmap *map)
+{
+ struct smc_private *smc = dev->priv;
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (smc->cfg & CFG_MII_SELECT)
+ return -EOPNOTSUPP;
+ else if (map->port > 2)
+ return -EINVAL;
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ smc_reset(dev);
+ }
+ return 0;
+}
+
+/*======================================================================
+
+ Reset the chip, reloading every register that might be corrupted.
+
+======================================================================*/
+
+/*
+ Set transceiver type, perhaps to something other than what the user
+ specified in dev->if_port.
+*/
+static void smc_set_xcvr(struct net_device *dev, int if_port)
+{
+ struct smc_private *smc = (struct smc_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short saved_bank;
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ SMC_SELECT_BANK(1);
+ if (if_port == 2) {
+ outw(smc->cfg | CFG_AUI_SELECT, ioaddr + CONFIG);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ set_bits(OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR);
+ smc->media_status = ((dev->if_port == 0) ? 0x0001 : 0x0002);
+ } else {
+ outw(smc->cfg, ioaddr + CONFIG);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ mask_bits(~OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR);
+ smc->media_status = ((dev->if_port == 0) ? 0x0012 : 0x4001);
+ }
+ SMC_SELECT_BANK(saved_bank);
+}
+
+static void smc_reset(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ struct smc_private *smc = dev->priv;
+ int i;
+
+ DEBUG(0, "%s: smc91c92 reset called.\n", dev->name);
+
+ /* The first interaction must be a write to bring the chip out
+ of sleep mode. */
+ SMC_SELECT_BANK(0);
+ /* Reset the chip. */
+ outw(RCR_SOFTRESET, ioaddr + RCR);
+ udelay(10);
+
+ /* Clear the transmit and receive configuration registers. */
+ outw(RCR_CLEAR, ioaddr + RCR);
+ outw(TCR_CLEAR, ioaddr + TCR);
+
+ /* Set the Window 1 control, configuration and station addr registers.
+ No point in writing the I/O base register ;-> */
+ SMC_SELECT_BANK(1);
+ /* Automatically release succesfully transmitted packets,
+ Accept link errors, counter and Tx error interrupts. */
+ outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
+ ioaddr + CONTROL);
+ smc_set_xcvr(dev, dev->if_port);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ outw((dev->if_port == 2 ? OSI_AUI_PWR : 0) |
+ (inw(ioaddr-0x10+OSITECH_AUI_PWR) & 0xff00),
+ ioaddr - 0x10 + OSITECH_AUI_PWR);
+
+ /* Fill in the physical address. The databook is wrong about the order! */
+ for (i = 0; i < 6; i += 2)
+ outw((dev->dev_addr[i+1]<<8)+dev->dev_addr[i],
+ ioaddr + ADDR0 + i);
+
+ /* Reset the MMU */
+ SMC_SELECT_BANK(2);
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ outw(0, ioaddr + INTERRUPT);
+
+ /* Re-enable the chip. */
+ SMC_SELECT_BANK(0);
+ outw(((smc->cfg & CFG_MII_SELECT) ? 0 : TCR_MONCSN) |
+ TCR_ENABLE | TCR_PAD_EN | smc->duplex, ioaddr + TCR);
+ set_rx_mode(dev);
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ SMC_SELECT_BANK(3);
+
+ /* Reset MII */
+ mdio_write(dev, smc->phy_id, 0, 0x8000);
+
+ /* Advertise 100F, 100H, 10F, 10H */
+ mdio_write(dev, smc->phy_id, 4, 0x01e1);
+
+ /* Restart MII autonegotiation */
+ mdio_write(dev, smc->phy_id, 0, 0x0000);
+ mdio_write(dev, smc->phy_id, 0, 0x1200);
+ }
+
+ /* Enable interrupts. */
+ SMC_SELECT_BANK(2);
+ outw((IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT) << 8,
+ ioaddr + INTERRUPT);
+}
+
+/*======================================================================
+
+ Media selection timer routine
+
+======================================================================*/
+
+static void media_check(u_long arg)
+{
+ struct smc_private *smc = (struct smc_private *)(arg);
+ struct net_device *dev = &smc->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short i, media, saved_bank;
+ u_short link;
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+
+ if (!netif_device_present(dev))
+ goto reschedule;
+
+ SMC_SELECT_BANK(2);
+
+ /* need MC_RESET to keep the memory consistent. errata? */
+ if (smc->rx_ovrn) {
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ smc->rx_ovrn = 0;
+ }
+ i = inw(ioaddr + INTERRUPT);
+ SMC_SELECT_BANK(0);
+ media = inw(ioaddr + EPH) & EPH_LINK_OK;
+ SMC_SELECT_BANK(1);
+ media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
+
+ /* Check for pending interrupt with watchdog flag set: with
+ this, we can limp along even if the interrupt is blocked */
+ if (smc->watchdog++ && ((i>>8) & i)) {
+ if (!smc->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ smc_interrupt(dev->irq, smc, NULL);
+ smc->fast_poll = HZ;
+ }
+ if (smc->fast_poll) {
+ smc->fast_poll--;
+ smc->media.expires = jiffies + 1;
+ add_timer(&smc->media);
+ SMC_SELECT_BANK(saved_bank);
+ return;
+ }
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ if (smc->phy_id < 0)
+ goto reschedule;
+
+ SMC_SELECT_BANK(3);
+ link = mdio_read(dev, smc->phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ smc->phy_id = -1;
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != smc->link_status) {
+ u_short p = mdio_read(dev, smc->phy_id, 5);
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (link) ? "found" : "lost");
+ smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40))
+ ? TCR_FDUPLX : 0);
+ if (link) {
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((p & 0x0180) ? "100" : "10"),
+ (smc->duplex ? 'F' : 'H'));
+ }
+ SMC_SELECT_BANK(0);
+ outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR);
+ smc->link_status = link;
+ }
+ goto reschedule;
+ }
+
+ /* Ignore collisions unless we've had no rx's recently */
+ if (jiffies - dev->last_rx > HZ) {
+ if (smc->tx_err || (smc->media_status & EPH_16COL))
+ media |= EPH_16COL;
+ }
+ smc->tx_err = 0;
+
+ if (media != smc->media_status) {
+ if ((media & smc->media_status & 1) &&
+ ((smc->media_status ^ media) & EPH_LINK_OK))
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (smc->media_status & EPH_LINK_OK ? "lost" : "found"));
+ else if ((media & smc->media_status & 2) &&
+ ((smc->media_status ^ media) & EPH_16COL))
+ printk(KERN_INFO "%s: coax cable %s\n", dev->name,
+ (media & EPH_16COL ? "problem" : "ok"));
+ if (dev->if_port == 0) {
+ if (media & 1) {
+ if (media & EPH_LINK_OK)
+ printk(KERN_INFO "%s: flipped to 10baseT\n",
+ dev->name);
+ else
+ smc_set_xcvr(dev, 2);
+ } else {
+ if (media & EPH_16COL)
+ smc_set_xcvr(dev, 1);
+ else
+ printk(KERN_INFO "%s: flipped to 10base2\n",
+ dev->name);
+ }
+ }
+ smc->media_status = media;
+ }
+
+reschedule:
+ smc->media.expires = jiffies + HZ;
+ add_timer(&smc->media);
+ SMC_SELECT_BANK(saved_bank);
+}
+
+
+/*====================================================================*/
+
+static int __init init_smc91c92_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_ERR
+ "smc91c92_cs: Card Services release does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &smc91c92_attach, &smc91c92_detach);
+ return 0;
+}
+
+static void __exit exit_smc91c92_cs(void)
+{
+ DEBUG(0, "smc91c92_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ smc91c92_detach(dev_list);
+}
+
+module_init(init_smc91c92_cs);
+module_exit(exit_smc91c92_cs);
diff --git a/linux/pcmcia-cs/clients/xirc2ps_cs.c b/linux/pcmcia-cs/clients/xirc2ps_cs.c
new file mode 100644
index 0000000..9db947d
--- /dev/null
+++ b/linux/pcmcia-cs/clients/xirc2ps_cs.c
@@ -0,0 +1,2091 @@
+/* [xirc2ps_cs.c wk 03.11.99] (1.40 1999/11/18 00:06:03)
+ * Xircom CreditCard Ethernet Adapter IIps driver
+ * Xircom Realport 10/100 (RE-100) driver
+ *
+ * This driver supports various Xircom CreditCard Ethernet adapters
+ * including the CE2, CE IIps, RE-10, CEM28, CEM33, CE33, CEM56,
+ * CE3-100, CE3B, RE-100, REM10BT, and REM56G-100.
+ *
+ * 2000-09-24 <psheer@icon.co.za> The Xircom CE3B-100 may not
+ * autodetect the media properly. In this case use the
+ * if_port=1 (for 10BaseT) or if_port=4 (for 100BaseT) options
+ * to force the media type.
+ *
+ * Written originally by Werner Koch based on David Hinds' skeleton of the
+ * PCMCIA driver.
+ *
+ * Copyright (c) 1997,1998 Werner Koch (dd9jn)
+ *
+ * This driver is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * It is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ *
+ * ALTERNATIVELY, this driver may be distributed under the terms of
+ * the following license, in which case the provisions of this license
+ * are required INSTEAD OF the GNU General Public License. (This clause
+ * is necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+
+#ifndef MANFID_COMPAQ
+ #define MANFID_COMPAQ 0x0138
+ #define MANFID_COMPAQ2 0x0183 /* is this correct? */
+#endif
+
+#include <pcmcia/ds.h>
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/****************
+ * Some constants used to access the hardware
+ */
+
+/* Register offsets and value constans */
+#define XIRCREG_CR 0 /* Command register (wr) */
+enum xirc_cr {
+ TransmitPacket = 0x01,
+ SoftReset = 0x02,
+ EnableIntr = 0x04,
+ ForceIntr = 0x08,
+ ClearTxFIFO = 0x10,
+ ClearRxOvrun = 0x20,
+ RestartTx = 0x40
+};
+#define XIRCREG_ESR 0 /* Ethernet status register (rd) */
+enum xirc_esr {
+ FullPktRcvd = 0x01, /* full packet in receive buffer */
+ PktRejected = 0x04, /* a packet has been rejected */
+ TxPktPend = 0x08, /* TX Packet Pending */
+ IncorPolarity = 0x10,
+ MediaSelect = 0x20 /* set if TP, clear if AUI */
+};
+#define XIRCREG_PR 1 /* Page Register select */
+#define XIRCREG_EDP 4 /* Ethernet Data Port Register */
+#define XIRCREG_ISR 6 /* Ethernet Interrupt Status Register */
+enum xirc_isr {
+ TxBufOvr = 0x01, /* TX Buffer Overflow */
+ PktTxed = 0x02, /* Packet Transmitted */
+ MACIntr = 0x04, /* MAC Interrupt occurred */
+ TxResGrant = 0x08, /* Tx Reservation Granted */
+ RxFullPkt = 0x20, /* Rx Full Packet */
+ RxPktRej = 0x40, /* Rx Packet Rejected */
+ ForcedIntr= 0x80 /* Forced Interrupt */
+};
+#define XIRCREG1_IMR0 12 /* Ethernet Interrupt Mask Register (on page 1)*/
+#define XIRCREG1_IMR1 13
+#define XIRCREG0_TSO 8 /* Transmit Space Open Register (on page 0)*/
+#define XIRCREG0_TRS 10 /* Transmit reservation Size Register (page 0)*/
+#define XIRCREG0_DO 12 /* Data Offset Register (page 0) (wr) */
+#define XIRCREG0_RSR 12 /* Receive Status Register (page 0) (rd) */
+enum xirc_rsr {
+ PhyPkt = 0x01, /* set:physical packet, clear: multicast packet */
+ BrdcstPkt = 0x02, /* set if it is a broadcast packet */
+ PktTooLong = 0x04, /* set if packet length > 1518 */
+ AlignErr = 0x10, /* incorrect CRC and last octet not complete */
+ CRCErr = 0x20, /* incorrect CRC and last octet is complete */
+ PktRxOk = 0x80 /* received ok */
+};
+#define XIRCREG0_PTR 13 /* packets transmitted register (rd) */
+#define XIRCREG0_RBC 14 /* receive byte count regsister (rd) */
+#define XIRCREG1_ECR 14 /* ethernet configurationn register */
+enum xirc_ecr {
+ FullDuplex = 0x04, /* enable full duplex mode */
+ LongTPMode = 0x08, /* adjust for longer lengths of TP cable */
+ DisablePolCor = 0x10,/* disable auto polarity correction */
+ DisableLinkPulse = 0x20, /* disable link pulse generation */
+ DisableAutoTx = 0x40, /* disable auto-transmit */
+};
+#define XIRCREG2_RBS 8 /* receive buffer start register */
+#define XIRCREG2_LED 10 /* LED Configuration register */
+/* values for the leds: Bits 2-0 for led 1
+ * 0 disabled Bits 5-3 for led 2
+ * 1 collision
+ * 2 noncollision
+ * 3 link_detected
+ * 4 incor_polarity
+ * 5 jabber
+ * 6 auto_assertion
+ * 7 rx_tx_activity
+ */
+#define XIRCREG2_MSR 12 /* Mohawk specific register */
+
+#define XIRCREG4_GPR0 8 /* General Purpose Register 0 */
+#define XIRCREG4_GPR1 9 /* General Purpose Register 1 */
+#define XIRCREG2_GPR2 13 /* General Purpose Register 2 (page2!)*/
+#define XIRCREG4_BOV 10 /* Bonding Version Register */
+#define XIRCREG4_LMA 12 /* Local Memory Address Register */
+#define XIRCREG4_LMD 14 /* Local Memory Data Port */
+/* MAC register can only by accessed with 8 bit operations */
+#define XIRCREG40_CMD0 8 /* Command Register (wr) */
+enum xirc_cmd { /* Commands */
+ Transmit = 0x01,
+ EnableRecv = 0x04,
+ DisableRecv = 0x08,
+ Abort = 0x10,
+ Online = 0x20,
+ IntrAck = 0x40,
+ Offline = 0x80
+};
+#define XIRCREG5_RHSA0 10 /* Rx Host Start Address */
+#define XIRCREG40_RXST0 9 /* Receive Status Register */
+#define XIRCREG40_TXST0 11 /* Transmit Status Register 0 */
+#define XIRCREG40_TXST1 12 /* Transmit Status Register 10 */
+#define XIRCREG40_RMASK0 13 /* Receive Mask Register */
+#define XIRCREG40_TMASK0 14 /* Transmit Mask Register 0 */
+#define XIRCREG40_TMASK1 15 /* Transmit Mask Register 0 */
+#define XIRCREG42_SWC0 8 /* Software Configuration 0 */
+#define XIRCREG42_SWC1 9 /* Software Configuration 1 */
+#define XIRCREG42_BOC 10 /* Back-Off Configuration */
+#define XIRCREG44_TDR0 8 /* Time Domain Reflectometry 0 */
+#define XIRCREG44_TDR1 9 /* Time Domain Reflectometry 1 */
+#define XIRCREG44_RXBC_LO 10 /* Rx Byte Count 0 (rd) */
+#define XIRCREG44_RXBC_HI 11 /* Rx Byte Count 1 (rd) */
+#define XIRCREG45_REV 15 /* Revision Register (rd) */
+#define XIRCREG50_IA 8 /* Individual Address (8-13) */
+
+static char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
+
+/****************
+ * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
+ * you do not define PCMCIA_DEBUG at all, all the debug code will be
+ * left out. If you compile with PCMCIA_DEBUG=0, the debug code will
+ * be present but disabled -- but it can then be enabled for specific
+ * modules at load time with a 'pc_debug=#' option to insmod.
+ */
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+MODULE_PARM(pc_debug, "i");
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KDBG_XIRC args)
+#else
+#define DEBUG(n, args...)
+#endif
+static char *version =
+"xirc2ps_cs.c 1.31 1998/12/09 19:32:55 (dd9jn+kvh)";
+ /* !--- CVS revision */
+#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: "
+#define KERR_XIRC KERN_ERR "xirc2ps_cs: "
+#define KWRN_XIRC KERN_WARNING "xirc2ps_cs: "
+#define KNOT_XIRC KERN_NOTICE "xirc2ps_cs: "
+#define KINF_XIRC KERN_INFO "xirc2ps_cs: "
+
+/* card types */
+#define XIR_UNKNOWN 0 /* unknown: not supported */
+#define XIR_CE 1 /* (prodid 1) different hardware: not supported */
+#define XIR_CE2 2 /* (prodid 2) */
+#define XIR_CE3 3 /* (prodid 3) */
+#define XIR_CEM 4 /* (prodid 1) different hardware: not supported */
+#define XIR_CEM2 5 /* (prodid 2) */
+#define XIR_CEM3 6 /* (prodid 3) */
+#define XIR_CEM33 7 /* (prodid 4) */
+#define XIR_CEM56M 8 /* (prodid 5) */
+#define XIR_CEM56 9 /* (prodid 6) */
+#define XIR_CM28 10 /* (prodid 3) modem only: not supported here */
+#define XIR_CM33 11 /* (prodid 4) modem only: not supported here */
+#define XIR_CM56 12 /* (prodid 5) modem only: not supported here */
+#define XIR_CG 13 /* (prodid 1) GSM modem only: not supported */
+#define XIR_CBE 14 /* (prodid 1) cardbus ethernet: not supported */
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("Xircom PCMCIA ethernet driver");
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+INT_MODULE_PARM(if_port, 0);
+INT_MODULE_PARM(full_duplex, 0);
+INT_MODULE_PARM(do_sound, 1);
+INT_MODULE_PARM(lockup_hack, 0); /* anti lockup hack */
+
+/*====================================================================*/
+
+/* We do not process more than these number of bytes during one
+ * interrupt. (Of course we receive complete packets, so this is not
+ * an exact value).
+ * Something between 2000..22000; first value gives best interrupt latency,
+ * the second enables the usage of the complete on-chip buffer. We use the
+ * high value as the initial value.
+ */
+static unsigned maxrx_bytes = 22000;
+
+/* MII management prototypes */
+static void mii_idle(ioaddr_t ioaddr);
+static void mii_putbit(ioaddr_t ioaddr, unsigned data);
+static int mii_getbit(ioaddr_t ioaddr);
+static void mii_wbits(ioaddr_t ioaddr, unsigned data, int len);
+static unsigned mii_rd(ioaddr_t ioaddr, u_char phyaddr, u_char phyreg);
+static void mii_wr(ioaddr_t ioaddr, u_char phyaddr, u_char phyreg,
+ unsigned data, int len);
+
+/*
+ * The event() function is this driver's Card Services event handler.
+ * It will be called by Card Services when an appropriate card status
+ * event is received. The config() and release() entry points are
+ * used to configure or release a socket, in response to card insertion
+ * and ejection events. They are invoked from the event handler.
+ */
+
+static int has_ce2_string(dev_link_t * link);
+static void xirc2ps_config(dev_link_t * link);
+static void xirc2ps_release(u_long arg);
+static int xirc2ps_event(event_t event, int priority,
+ event_callback_args_t * args);
+
+/****************
+ * The attach() and detach() entry points are used to create and destroy
+ * "instances" of the driver, where each instance represents everything
+ * needed to manage one actual PCMCIA card.
+ */
+
+static dev_link_t *xirc2ps_attach(void);
+static void xirc2ps_detach(dev_link_t *);
+
+/****************
+ * You'll also need to prototype all the functions that will actually
+ * be used to talk to your device. See 'pcmem_cs' for a good example
+ * of a fully self-sufficient driver; the other drivers rely more or
+ * less on other parts of the kernel.
+ */
+
+static void xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/*
+ * The dev_info variable is the "key" that is used to match up this
+ * device driver with appropriate cards, through the card configuration
+ * database.
+ */
+
+static dev_info_t dev_info = "xirc2ps_cs";
+
+/****************
+ * A linked list of "instances" of the device. Each actual
+ * PCMCIA card corresponds to one device instance, and is described
+ * by one dev_link_t structure (defined in ds.h).
+ *
+ * You may not want to use a linked list for this -- for example, the
+ * memory card driver uses an array of dev_link_t pointers, where minor
+ * device numbers are used to derive the corresponding array index.
+ */
+
+static dev_link_t *dev_list = NULL;
+
+/****************
+ * A dev_link_t structure has fields for most things that are needed
+ * to keep track of a socket, but there will usually be some device
+ * specific information that also needs to be kept track of. The
+ * 'priv' pointer in a dev_link_t structure can be used to point to
+ * a device-specific private data structure, like this.
+ *
+ * A driver needs to provide a dev_node_t structure for each device
+ * on a card. In some cases, there is only one device per card (for
+ * example, ethernet cards, modems). In other cases, there may be
+ * many actual or logical devices (SCSI adapters, memory cards with
+ * multiple partitions). The dev_node_t structures need to be kept
+ * in a linked list starting at the 'dev' field of a dev_link_t
+ * structure. We allocate them in the card's private data structure,
+ * because they generally can't be allocated dynamically.
+ */
+
+typedef struct local_info_t {
+ dev_link_t link;
+ struct net_device dev;
+ dev_node_t node;
+ struct net_device_stats stats;
+ int card_type;
+ int probe_port;
+ int silicon; /* silicon revision. 0=old CE2, 1=Scipper, 4=Mohawk */
+ int mohawk; /* a CE3 type card */
+ int dingo; /* a CEM56 type card */
+ int new_mii; /* has full 10baseT/100baseT MII */
+ int modem; /* is a multi function card (i.e with a modem) */
+ caddr_t dingo_ccr; /* only used for CEM56 cards */
+ unsigned last_ptr_value; /* last packets transmitted value */
+ const char *manf_str;
+} local_info_t;
+
+/****************
+ * Some more prototypes
+ */
+static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void do_tx_timeout(struct net_device *dev);
+static struct net_device_stats *do_get_stats(struct net_device *dev);
+static void set_addresses(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static int set_card_type(dev_link_t *link, const void *s);
+static int do_config(struct net_device *dev, struct ifmap *map);
+static int do_open(struct net_device *dev);
+static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void hardreset(struct net_device *dev);
+static void do_reset(struct net_device *dev, int full);
+static int init_mii(struct net_device *dev);
+static void do_powerdown(struct net_device *dev);
+static int do_stop(struct net_device *dev);
+
+
+/*=============== Helper functions =========================*/
+static void
+flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ xirc2ps_detach(link);
+ }
+}
+
+static void
+cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+static int
+get_tuple_data(int fn, client_handle_t handle, tuple_t *tuple)
+{
+ int err;
+
+ if ((err=CardServices(fn, handle, tuple)))
+ return err;
+ return CardServices(GetTupleData, handle, tuple);
+}
+
+static int
+get_tuple(int fn, client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+{
+ int err;
+
+ if ((err=get_tuple_data(fn, handle, tuple)))
+ return err;
+ return CardServices(ParseTuple, handle, tuple, parse);
+}
+
+#define first_tuple(a, b, c) get_tuple(GetFirstTuple, a, b, c)
+#define next_tuple(a, b, c) get_tuple(GetNextTuple, a, b, c)
+
+#define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR)
+#define GetByte(reg) ((unsigned)inb(ioaddr + (reg)))
+#define GetWord(reg) ((unsigned)inw(ioaddr + (reg)))
+#define PutByte(reg,value) outb((value), ioaddr+(reg))
+#define PutWord(reg,value) outw((value), ioaddr+(reg))
+
+static void
+busy_loop(u_long len)
+{
+#ifdef MACH
+ /* TODO: Is this really what we want? */
+ __udelay(1000000 / HZ * len);
+#else
+ if (in_interrupt()) {
+ u_long timeout = jiffies + len;
+ u_long flags;
+ save_flags(flags);
+ sti();
+ while (timeout >= jiffies)
+ ;
+ restore_flags(flags);
+ } else {
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(len);
+ }
+#endif
+}
+
+
+/*====== Functions used for debugging =================================*/
+#if defined(PCMCIA_DEBUG) && 0 /* reading regs may change system status */
+static void
+PrintRegisters(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ if (pc_debug > 1) {
+ int i, page;
+
+ printk(KDBG_XIRC "Register common: ");
+ for (i = 0; i < 8; i++)
+ printk(" %2.2x", GetByte(i));
+ printk("\n");
+ for (page = 0; page <= 8; page++) {
+ printk(KDBG_XIRC "Register page %2x: ", page);
+ SelectPage(page);
+ for (i = 8; i < 16; i++)
+ printk(" %2.2x", GetByte(i));
+ printk("\n");
+ }
+ for (page=0x40 ; page <= 0x5f; page++) {
+ if (page == 0x43 || (page >= 0x46 && page <= 0x4f)
+ || (page >= 0x51 && page <=0x5e))
+ continue;
+ printk(KDBG_XIRC "Register page %2x: ", page);
+ SelectPage(page);
+ for (i = 8; i < 16; i++)
+ printk(" %2.2x", GetByte(i));
+ printk("\n");
+ }
+ }
+}
+#endif /* PCMCIA_DEBUG */
+
+/*============== MII Management functions ===============*/
+
+/****************
+ * Turn around for read
+ */
+static void
+mii_idle(ioaddr_t ioaddr)
+{
+ PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x04|1); /* and drive MDCK high */
+ udelay(1);
+}
+
+/****************
+ * Write a bit to MDI/O
+ */
+static void
+mii_putbit(ioaddr_t ioaddr, unsigned data)
+{
+ #if 1
+ if (data) {
+ PutByte(XIRCREG2_GPR2, 0x0c|2|0); /* set MDIO */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x0c|2|1); /* and drive MDCK high */
+ udelay(1);
+ } else {
+ PutByte(XIRCREG2_GPR2, 0x0c|0|0); /* clear MDIO */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x0c|0|1); /* and drive MDCK high */
+ udelay(1);
+ }
+ #else
+ if (data) {
+ PutWord(XIRCREG2_GPR2-1, 0x0e0e);
+ udelay(1);
+ PutWord(XIRCREG2_GPR2-1, 0x0f0f);
+ udelay(1);
+ } else {
+ PutWord(XIRCREG2_GPR2-1, 0x0c0c);
+ udelay(1);
+ PutWord(XIRCREG2_GPR2-1, 0x0d0d);
+ udelay(1);
+ }
+ #endif
+}
+
+/****************
+ * Get a bit from MDI/O
+ */
+static int
+mii_getbit(ioaddr_t ioaddr)
+{
+ unsigned d;
+
+ PutByte(XIRCREG2_GPR2, 4|0); /* drive MDCK low */
+ udelay(1);
+ d = GetByte(XIRCREG2_GPR2); /* read MDIO */
+ PutByte(XIRCREG2_GPR2, 4|1); /* drive MDCK high again */
+ udelay(1);
+ return d & 0x20; /* read MDIO */
+}
+
+static void
+mii_wbits(ioaddr_t ioaddr, unsigned data, int len)
+{
+ unsigned m = 1 << (len-1);
+ for (; m; m >>= 1)
+ mii_putbit(ioaddr, data & m);
+}
+
+static unsigned
+mii_rd(ioaddr_t ioaddr, u_char phyaddr, u_char phyreg)
+{
+ int i;
+ unsigned data=0, m;
+
+ SelectPage(2);
+ for (i=0; i < 32; i++) /* 32 bit preamble */
+ mii_putbit(ioaddr, 1);
+ mii_wbits(ioaddr, 0x06, 4); /* Start and opcode for read */
+ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */
+ mii_wbits(ioaddr, phyreg, 5); /* PHY register to read */
+ mii_idle(ioaddr); /* turn around */
+ mii_getbit(ioaddr);
+
+ for (m = 1<<15; m; m >>= 1)
+ if (mii_getbit(ioaddr))
+ data |= m;
+ mii_idle(ioaddr);
+ return data;
+}
+
+static void
+mii_wr(ioaddr_t ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len)
+{
+ int i;
+
+ SelectPage(2);
+ for (i=0; i < 32; i++) /* 32 bit preamble */
+ mii_putbit(ioaddr, 1);
+ mii_wbits(ioaddr, 0x05, 4); /* Start and opcode for write */
+ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */
+ mii_wbits(ioaddr, phyreg, 5); /* PHY Register to write */
+ mii_putbit(ioaddr, 1); /* turn around */
+ mii_putbit(ioaddr, 0);
+ mii_wbits(ioaddr, data, len); /* And write the data */
+ mii_idle(ioaddr);
+}
+
+/*============= Main bulk of functions =========================*/
+
+/****************
+ * xirc2ps_attach() creates an "instance" of the driver, allocating
+ * local data structures for one device. The device is registered
+ * with Card Services.
+ *
+ * The dev_link structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a
+ * card insertion event.
+ */
+
+static dev_link_t *
+xirc2ps_attach(void)
+{
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ local_info_t *local;
+ int err;
+
+ DEBUG(0, "attach()\n");
+ flush_stale_links();
+
+ /* Allocate the device structure */
+ local = kmalloc(sizeof(*local), GFP_KERNEL);
+ if (!local) return NULL;
+ memset(local, 0, sizeof(*local));
+ link = &local->link; dev = &local->dev;
+ link->priv = dev->priv = local;
+
+ init_timer(&link->release);
+ link->release.function = &xirc2ps_release;
+ link->release.data = (u_long) link;
+
+ /* General socket configuration */
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+ link->irq.Handler = xirc2ps_interrupt;
+ link->irq.Instance = dev;
+
+ /* Fill in card specific entries */
+ dev->hard_start_xmit = &do_start_xmit;
+ dev->set_config = &do_config;
+ dev->get_stats = &do_get_stats;
+ dev->do_ioctl = &do_ioctl;
+ dev->set_multicast_list = &set_multicast_list;
+ ether_setup(dev);
+ init_dev_name(dev, local->node);
+ dev->open = &do_open;
+ dev->stop = &do_stop;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = do_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &xirc2ps_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ if ((err = CardServices(RegisterClient, &link->handle, &client_reg))) {
+ cs_error(link->handle, RegisterClient, err);
+ xirc2ps_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* xirc2ps_attach */
+
+/****************
+ * This deletes a driver "instance". The device is de-registered
+ * with Card Services. If it has been released, all local data
+ * structures are freed. Otherwise, the structures will be freed
+ * when the device is released.
+ */
+
+static void
+xirc2ps_detach(dev_link_t * link)
+{
+ local_info_t *local = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link)
+ break;
+ if (!*linkp) {
+ DEBUG(0, "detach(0x%p): dev_link lost\n", link);
+ return;
+ }
+
+ /*
+ * If the device is currently configured and active, we won't
+ * actually delete it yet. Instead, it is marked so that when
+ * the release() function is called, that will trigger a proper
+ * detach().
+ */
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ DEBUG(0, "detach postponed, '%s' still locked\n",
+ link->dev->dev_name);
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free it */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&local->dev);
+ kfree(local);
+
+} /* xirc2ps_detach */
+
+/****************
+ * Detect the type of the card. s is the buffer with the data of tuple 0x20
+ * Returns: 0 := not supported
+ * mediaid=11 and prodid=47
+ * Media-Id bits:
+ * Ethernet 0x01
+ * Tokenring 0x02
+ * Arcnet 0x04
+ * Wireless 0x08
+ * Modem 0x10
+ * GSM only 0x20
+ * Prod-Id bits:
+ * Pocket 0x10
+ * External 0x20
+ * Creditcard 0x40
+ * Cardbus 0x80
+ *
+ */
+static int
+set_card_type(dev_link_t *link, const void *s)
+{
+ local_info_t *local = link->priv;
+ #ifdef PCMCIA_DEBUG
+ unsigned cisrev = ((const unsigned char *)s)[2];
+ #endif
+ unsigned mediaid= ((const unsigned char *)s)[3];
+ unsigned prodid = ((const unsigned char *)s)[4];
+
+ DEBUG(0, "cisrev=%02x mediaid=%02x prodid=%02x\n",
+ cisrev, mediaid, prodid);
+
+ local->mohawk = 0;
+ local->dingo = 0;
+ local->modem = 0;
+ local->card_type = XIR_UNKNOWN;
+ if (!(prodid & 0x40)) {
+ printk(KNOT_XIRC "Ooops: Not a creditcard\n");
+ return 0;
+ }
+ if (!(mediaid & 0x01)) {
+ printk(KNOT_XIRC "Not an Ethernet card\n");
+ return 0;
+ }
+ if (mediaid & 0x10) {
+ local->modem = 1;
+ switch(prodid & 15) {
+ case 1: local->card_type = XIR_CEM ; break;
+ case 2: local->card_type = XIR_CEM2 ; break;
+ case 3: local->card_type = XIR_CEM3 ; break;
+ case 4: local->card_type = XIR_CEM33 ; break;
+ case 5: local->card_type = XIR_CEM56M;
+ local->mohawk = 1;
+ break;
+ case 6:
+ case 7: /* 7 is the RealPort 10/56 */
+ local->card_type = XIR_CEM56 ;
+ local->mohawk = 1;
+ local->dingo = 1;
+ break;
+ }
+ } else {
+ switch(prodid & 15) {
+ case 1: local->card_type = has_ce2_string(link)? XIR_CE2 : XIR_CE ;
+ break;
+ case 15:
+ case 2: local->card_type = XIR_CE2; break;
+ case 3: local->card_type = XIR_CE3;
+ local->mohawk = 1;
+ break;
+ }
+ }
+ if (local->card_type == XIR_CE || local->card_type == XIR_CEM) {
+ printk(KNOT_XIRC "Sorry, this is an old CE card\n");
+ return 0;
+ }
+ if (local->card_type == XIR_UNKNOWN)
+ printk(KNOT_XIRC "unknown card (mediaid=%02x prodid=%02x)\n",
+ mediaid, prodid);
+
+ return 1;
+}
+
+/****************
+ * There are some CE2 cards out which claim to be a CE card.
+ * This function looks for a "CE2" in the 3rd version field.
+ * Returns: true if this is a CE2
+ */
+static int
+has_ce2_string(dev_link_t * link)
+{
+ client_handle_t handle = link->handle;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[256];
+
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 254;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (!first_tuple(handle, &tuple, &parse) && parse.version_1.ns > 2) {
+ if (strstr(parse.version_1.str + parse.version_1.ofs[2], "CE2"))
+ return 1;
+ }
+ return 0;
+}
+
+/****************
+ * xirc2ps_config() is scheduled to run after a CARD_INSERTION event
+ * is received, to configure the PCMCIA socket, and to make the
+ * ethernet device available to the system.
+ */
+static void
+xirc2ps_config(dev_link_t * link)
+{
+ client_handle_t handle = link->handle;
+ local_info_t *local = link->priv;
+ struct net_device *dev = &local->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ ioaddr_t ioaddr;
+ int err, i;
+ u_char buf[64];
+ cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data;
+ cistpl_cftable_entry_t *cf = &parse.cftable_entry;
+
+ local->dingo_ccr = 0;
+
+ DEBUG(0, "config(0x%p)\n", link);
+
+ /*
+ * This reads the card's CONFIG tuple to find its configuration
+ * registers.
+ */
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+
+ /* Is this a valid card */
+ tuple.DesiredTuple = CISTPL_MANFID;
+ if ((err=first_tuple(handle, &tuple, &parse))) {
+ printk(KNOT_XIRC "manfid not found in CIS\n");
+ goto failure;
+ }
+
+ switch(parse.manfid.manf) {
+ case MANFID_XIRCOM:
+ local->manf_str = "Xircom";
+ break;
+ case MANFID_ACCTON:
+ local->manf_str = "Accton";
+ break;
+ case MANFID_COMPAQ:
+ case MANFID_COMPAQ2:
+ local->manf_str = "Compaq";
+ break;
+ case MANFID_INTEL:
+ local->manf_str = "Intel";
+ break;
+ case MANFID_TOSHIBA:
+ local->manf_str = "Toshiba";
+ break;
+ default:
+ printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n",
+ (unsigned)parse.manfid.manf);
+ goto failure;
+ }
+ DEBUG(0, "found %s card\n", local->manf_str);
+
+ if (!set_card_type(link, buf)) {
+ printk(KNOT_XIRC "this card is not supported\n");
+ goto failure;
+ }
+
+ /* get configuration stuff */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ if ((err=first_tuple(handle, &tuple, &parse)))
+ goto cis_error;
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* get the ethernet address from the CIS */
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)) {
+ /* Once I saw two CISTPL_FUNCE_LAN_NODE_ID entries:
+ * the first one with a length of zero the second correct -
+ * so I skip all entries with length 0 */
+ if (parse.funce.type == CISTPL_FUNCE_LAN_NODE_ID
+ && ((cistpl_lan_node_id_t *)parse.funce.data)->nb)
+ break;
+ }
+ if (err) { /* not found: try to get the node-id from tuple 0x89 */
+ tuple.DesiredTuple = 0x89; /* data layout looks like tuple 0x22 */
+ if (!(err = get_tuple_data(GetFirstTuple, handle, &tuple))) {
+ if (tuple.TupleDataLen == 8 && *buf == CISTPL_FUNCE_LAN_NODE_ID)
+ memcpy(&parse, buf, 8);
+ else
+ err = -1;
+ }
+ }
+ if (err) { /* another try (James Lehmer's CE2 version 4.1)*/
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)) {
+ if (parse.funce.type == 0x02 && parse.funce.data[0] == 1
+ && parse.funce.data[1] == 6 && tuple.TupleDataLen == 13) {
+ buf[1] = 4;
+ memcpy(&parse, buf+1, 8);
+ break;
+ }
+ }
+ }
+ if (err) {
+ printk(KNOT_XIRC "node-id not found in CIS\n");
+ goto failure;
+ }
+ node_id = (cistpl_lan_node_id_t *)parse.funce.data;
+ if (node_id->nb != 6) {
+ printk(KNOT_XIRC "malformed node-id in CIS\n");
+ goto failure;
+ }
+ for (i=0; i < 6; i++)
+ dev->dev_addr[i] = node_id->id[i];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ link->io.IOAddrLines =10;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->irq.Attributes = IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else {
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ }
+ if (local->modem) {
+ int pass;
+
+ if (do_sound) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status |= CCSR_AUDIO_ENA;
+ }
+ link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED ;
+ link->io.NumPorts2 = 8;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ if (local->dingo) {
+ /* Take the Modem IO port from the CIS and scan for a free
+ * Ethernet port */
+ link->io.NumPorts1 = 16; /* no Mako stuff anymore */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)) {
+ if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) {
+ for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
+ link->conf.ConfigIndex = cf->index ;
+ link->io.BasePort2 = cf->io.win[0].base;
+ link->io.BasePort1 = ioaddr;
+ if (!(err=CardServices(RequestIO, link->handle,
+ &link->io)))
+ goto port_found;
+ }
+ }
+ }
+ } else {
+ link->io.NumPorts1 = 18;
+ /* We do 2 passes here: The first one uses the regular mapping and
+ * the second tries again, thereby considering that the 32 ports are
+ * mirrored every 32 bytes. Actually we use a mirrored port for
+ * the Mako if (on the first pass) the COR bit 5 is set.
+ */
+ for (pass=0; pass < 2; pass++) {
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)){
+ if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8){
+ link->conf.ConfigIndex = cf->index ;
+ link->io.BasePort2 = cf->io.win[0].base;
+ link->io.BasePort1 = link->io.BasePort2
+ + (pass ? (cf->index & 0x20 ? -24:8)
+ : (cf->index & 0x20 ? 8:-24));
+ if (!(err=CardServices(RequestIO, link->handle,
+ &link->io)))
+ goto port_found;
+ }
+ }
+ }
+ /* if special option:
+ * try to configure as Ethernet only.
+ * .... */
+ }
+ printk(KNOT_XIRC "no ports available\n");
+ } else {
+ link->irq.Attributes |= IRQ_TYPE_EXCLUSIVE;
+ link->io.NumPorts1 = 16;
+ for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
+ link->io.BasePort1 = ioaddr;
+ if (!(err=CardServices(RequestIO, link->handle, &link->io)))
+ goto port_found;
+ }
+ link->io.BasePort1 = 0; /* let CS decide */
+ if ((err=CardServices(RequestIO, link->handle, &link->io))) {
+ cs_error(link->handle, RequestIO, err);
+ goto config_error;
+ }
+ }
+ port_found:
+ if (err)
+ goto config_error;
+
+ /****************
+ * Now allocate an interrupt line. Note that this does not
+ * actually assign a handler to the interrupt.
+ */
+ if ((err=CardServices(RequestIRQ, link->handle, &link->irq))) {
+ cs_error(link->handle, RequestIRQ, err);
+ goto config_error;
+ }
+
+ /****************
+ * This actually configures the PCMCIA socket -- setting up
+ * the I/O windows and the interrupt mapping.
+ */
+ if ((err=CardServices(RequestConfiguration,
+ link->handle, &link->conf))) {
+ cs_error(link->handle, RequestConfiguration, err);
+ goto config_error;
+ }
+
+ if (local->dingo) {
+ conf_reg_t reg;
+ win_req_t req;
+ memreq_t mem;
+
+ /* Reset the modem's BAR to the correct value
+ * This is necessary because in the RequestConfiguration call,
+ * the base address of the ethernet port (BasePort1) is written
+ * to the BAR registers of the modem.
+ */
+ reg.Action = CS_WRITE;
+ reg.Offset = CISREG_IOBASE_0;
+ reg.Value = link->io.BasePort2 & 0xff;
+ if ((err = CardServices(AccessConfigurationRegister, link->handle,
+ &reg))) {
+ cs_error(link->handle, AccessConfigurationRegister, err);
+ goto config_error;
+ }
+ reg.Action = CS_WRITE;
+ reg.Offset = CISREG_IOBASE_1;
+ reg.Value = (link->io.BasePort2 >> 8) & 0xff;
+ if ((err = CardServices(AccessConfigurationRegister, link->handle,
+ &reg))) {
+ cs_error(link->handle, AccessConfigurationRegister, err);
+ goto config_error;
+ }
+
+ /* There is no config entry for the Ethernet part which
+ * is at 0x0800. So we allocate a window into the attribute
+ * memory and write direct to the CIS registers
+ */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = req.Size = 0;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ if ((err = CardServices(RequestWindow, &link->win, &req))) {
+ cs_error(link->handle, RequestWindow, err);
+ goto config_error;
+ }
+ local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800;
+ mem.CardOffset = 0x0;
+ mem.Page = 0;
+ if ((err = CardServices(MapMemPage, link->win, &mem))) {
+ cs_error(link->handle, MapMemPage, err);
+ goto config_error;
+ }
+
+ /* Setup the CCRs; there are no infos in the CIS about the Ethernet
+ * part.
+ */
+ writeb(0x47, local->dingo_ccr + CISREG_COR);
+ ioaddr = link->io.BasePort1;
+ writeb(ioaddr & 0xff , local->dingo_ccr + CISREG_IOBASE_0);
+ writeb((ioaddr >> 8)&0xff , local->dingo_ccr + CISREG_IOBASE_1);
+
+ #if 0
+ {
+ u_char tmp;
+ printk(KERN_INFO "ECOR:");
+ for (i=0; i < 7; i++) {
+ tmp = readb(local->dingo_ccr + i*2);
+ printk(" %02x", tmp);
+ }
+ printk("\n");
+ printk(KERN_INFO "DCOR:");
+ for (i=0; i < 4; i++) {
+ tmp = readb(local->dingo_ccr + 0x20 + i*2);
+ printk(" %02x", tmp);
+ }
+ printk("\n");
+ printk(KERN_INFO "SCOR:");
+ for (i=0; i < 10; i++) {
+ tmp = readb(local->dingo_ccr + 0x40 + i*2);
+ printk(" %02x", tmp);
+ }
+ printk("\n");
+ }
+ #endif
+
+ writeb(0x01, local->dingo_ccr + 0x20);
+ writeb(0x0c, local->dingo_ccr + 0x22);
+ writeb(0x00, local->dingo_ccr + 0x24);
+ writeb(0x00, local->dingo_ccr + 0x26);
+ writeb(0x00, local->dingo_ccr + 0x28);
+ }
+
+ /* The if_port symbol can be set when the module is loaded */
+ local->probe_port=0;
+ if (!if_port) {
+ local->probe_port = dev->if_port = 1;
+ } else if ((if_port >= 1 && if_port <= 2) ||
+ (local->mohawk && if_port==4))
+ dev->if_port = if_port;
+ else
+ printk(KNOT_XIRC "invalid if_port requested\n");
+
+ /* we can now register the device with the net subsystem */
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if ((err=register_netdev(dev))) {
+ printk(KNOT_XIRC "register_netdev() failed\n");
+ goto config_error;
+ }
+
+ copy_dev_name(local->node, dev);
+ link->dev = &local->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+
+ if (local->dingo)
+ do_reset(dev, 1); /* a kludge to make the cem56 work */
+
+ /* give some infos about the hardware */
+ printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr",
+ dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%c%02X", i?':':' ', dev->dev_addr[i]);
+ printk("\n");
+
+ return;
+
+ config_error:
+ link->state &= ~DEV_CONFIG_PENDING;
+ xirc2ps_release((u_long)link);
+ return;
+
+ cis_error:
+ printk(KNOT_XIRC "unable to parse CIS\n");
+ failure:
+ link->state &= ~DEV_CONFIG_PENDING;
+} /* xirc2ps_config */
+
+/****************
+ * After a card is removed, xirc2ps_release() will unregister the net
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
+ */
+static void
+xirc2ps_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *) arg;
+ local_info_t *local = link->priv;
+ struct net_device *dev = &local->dev;
+
+ DEBUG(0, "release(0x%p)\n", link);
+
+ /*
+ * If the device is currently in use, we won't release until it
+ * is actually closed.
+ */
+ if (link->open) {
+ DEBUG(0, "release postponed, '%s' "
+ "still open\n", link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ if (link->win) {
+ local_info_t *local = dev->priv;
+ if (local->dingo)
+ iounmap(local->dingo_ccr - 0x0800);
+ CardServices(ReleaseWindow, link->win);
+ }
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+ link->state &= ~DEV_CONFIG;
+
+} /* xirc2ps_release */
+
+/*====================================================================*/
+
+/****************
+ * The card status event handler. Mostly, this schedules other
+ * stuff to run after an event is received. A CARD_REMOVAL event
+ * also sets some flags to discourage the net drivers from trying
+ * to talk to the card any more.
+ *
+ * When a CARD_REMOVAL event is received, we immediately set a flag
+ * to block future accesses to this device. All the functions that
+ * actually access the device should check this flag to make sure
+ * the card is still present.
+ */
+
+static int
+xirc2ps_event(event_t event, int priority,
+ event_callback_args_t * args)
+{
+ dev_link_t *link = args->client_data;
+ local_info_t *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+
+ DEBUG(0, "event(%d)\n", (int)event);
+
+ switch (event) {
+ case CS_EVENT_REGISTRATION_COMPLETE:
+ DEBUG(0, "registration complete\n");
+ break;
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ xirc2ps_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open) {
+ netif_device_detach(dev);
+ do_powerdown(dev);
+ }
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ do_reset(dev,1);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* xirc2ps_event */
+
+/*====================================================================*/
+
+/****************
+ * This is the Interrupt service route.
+ */
+static void
+xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ local_info_t *lp = dev->priv;
+ ioaddr_t ioaddr;
+ u_char saved_page;
+ unsigned bytes_rcvd;
+ unsigned int_status, eth_status, rx_status, tx_status;
+ unsigned rsr, pktlen;
+ ulong start_ticks = jiffies; /* fixme: jiffies rollover every 497 days
+ * is this something to worry about?
+ * -- on a laptop?
+ */
+
+ if (!netif_device_present(dev))
+ return;
+
+ ioaddr = dev->base_addr;
+ if (lp->mohawk) { /* must disable the interrupt */
+ PutByte(XIRCREG_CR, 0);
+ }
+
+ DEBUG(6, "%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr);
+
+ saved_page = GetByte(XIRCREG_PR);
+ /* Read the ISR to see whats the cause for the interrupt.
+ * This also clears the interrupt flags on CE2 cards
+ */
+ int_status = GetByte(XIRCREG_ISR);
+ bytes_rcvd = 0;
+ loop_entry:
+ if (int_status == 0xff) { /* card may be ejected */
+ DEBUG(3, "%s: interrupt %d for dead card\n", dev->name, irq);
+ goto leave;
+ }
+ eth_status = GetByte(XIRCREG_ESR);
+
+ SelectPage(0x40);
+ rx_status = GetByte(XIRCREG40_RXST0);
+ PutByte(XIRCREG40_RXST0, (~rx_status & 0xff));
+ tx_status = GetByte(XIRCREG40_TXST0);
+ tx_status |= GetByte(XIRCREG40_TXST1) << 8;
+ PutByte(XIRCREG40_TXST0, 0);
+ PutByte(XIRCREG40_TXST1, 0);
+
+ DEBUG(3, "%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n",
+ dev->name, int_status, eth_status, rx_status, tx_status);
+
+ /***** receive section ******/
+ SelectPage(0);
+ while (eth_status & FullPktRcvd) {
+ rsr = GetByte(XIRCREG0_RSR);
+ if (bytes_rcvd > maxrx_bytes && (rsr & PktRxOk)) {
+ /* too many bytes received during this int, drop the rest of the
+ * packets */
+ lp->stats.rx_dropped++;
+ DEBUG(2, "%s: RX drop, too much done\n", dev->name);
+ } else if (rsr & PktRxOk) {
+ struct sk_buff *skb;
+
+ pktlen = GetWord(XIRCREG0_RBC);
+ bytes_rcvd += pktlen;
+
+ DEBUG(5, "rsr=%#02x packet_length=%u\n", rsr, pktlen);
+
+ skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
+ if (!skb) {
+ printk(KNOT_XIRC "low memory, packet dropped (size=%u)\n",
+ pktlen);
+ lp->stats.rx_dropped++;
+ } else { /* okay get the packet */
+ skb_reserve(skb, 2);
+ if (lp->silicon == 0 ) { /* work around a hardware bug */
+ unsigned rhsa; /* receive start address */
+
+ SelectPage(5);
+ rhsa = GetWord(XIRCREG5_RHSA0);
+ SelectPage(0);
+ rhsa += 3; /* skip control infos */
+ if (rhsa >= 0x8000)
+ rhsa = 0;
+ if (rhsa + pktlen > 0x8000) {
+ unsigned i;
+ u_char *buf = skb_put(skb, pktlen);
+ for (i=0; i < pktlen ; i++, rhsa++) {
+ buf[i] = GetByte(XIRCREG_EDP);
+ if (rhsa == 0x8000) {
+ rhsa = 0;
+ i--;
+ }
+ }
+ } else {
+ insw(ioaddr+XIRCREG_EDP,
+ skb_put(skb, pktlen), (pktlen+1)>>1);
+ }
+ }
+ #if 0
+ else if (lp->mohawk) {
+ /* To use this 32 bit access we should use
+ * a manual optimized loop
+ * Also the words are swapped, we can get more
+ * performance by using 32 bit access and swapping
+ * the words in a register. Will need this for cardbus
+ *
+ * Note: don't forget to change the ALLOC_SKB to .. +3
+ */
+ unsigned i;
+ u_long *p = skb_put(skb, pktlen);
+ register u_long a;
+ ioaddr_t edpreg = ioaddr+XIRCREG_EDP-2;
+ for (i=0; i < len ; i += 4, p++) {
+ a = inl(edpreg);
+ __asm__("rorl $16,%0\n\t"
+ :"=q" (a)
+ : "0" (a));
+ *p = a;
+ }
+ }
+ #endif
+ else {
+ insw(ioaddr+XIRCREG_EDP, skb_put(skb, pktlen),
+ (pktlen+1)>>1);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->dev = dev;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ add_rx_bytes(&lp->stats, pktlen);
+ if (!(rsr & PhyPkt))
+ lp->stats.multicast++;
+ }
+ } else { /* bad packet */
+ DEBUG(5, "rsr=%#02x\n", rsr);
+ }
+ if (rsr & PktTooLong) {
+ lp->stats.rx_frame_errors++;
+ DEBUG(3, "%s: Packet too long\n", dev->name);
+ }
+ if (rsr & CRCErr) {
+ lp->stats.rx_crc_errors++;
+ DEBUG(3, "%s: CRC error\n", dev->name);
+ }
+ if (rsr & AlignErr) {
+ lp->stats.rx_fifo_errors++; /* okay ? */
+ DEBUG(3, "%s: Alignment error\n", dev->name);
+ }
+
+ /* clear the received/dropped/error packet */
+ PutWord(XIRCREG0_DO, 0x8000); /* issue cmd: skip_rx_packet */
+
+ /* get the new ethernet status */
+ eth_status = GetByte(XIRCREG_ESR);
+ }
+ if (rx_status & 0x10) { /* Receive overrun */
+ lp->stats.rx_over_errors++;
+ PutByte(XIRCREG_CR, ClearRxOvrun);
+ DEBUG(3, "receive overrun cleared\n");
+ }
+
+ /***** transmit section ******/
+ if (int_status & PktTxed) {
+ unsigned n, nn;
+
+ n = lp->last_ptr_value;
+ nn = GetByte(XIRCREG0_PTR);
+ lp->last_ptr_value = nn;
+ if (nn < n) /* rollover */
+ lp->stats.tx_packets += 256 - n;
+ else if (n == nn) { /* happens sometimes - don't know why */
+ DEBUG(0, "PTR not changed?\n");
+ } else
+ lp->stats.tx_packets += lp->last_ptr_value - n;
+ netif_wake_queue(dev);
+ }
+ if (tx_status & 0x0002) { /* Execessive collissions */
+ DEBUG(0, "tx restarted due to execssive collissions\n");
+ PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */
+ }
+ if (tx_status & 0x0040)
+ lp->stats.tx_aborted_errors++;
+
+ /* recalculate our work chunk so that we limit the duration of this
+ * ISR to about 1/10 of a second.
+ * Calculate only if we received a reasonable amount of bytes.
+ */
+ if (bytes_rcvd > 1000) {
+ u_long duration = jiffies - start_ticks;
+
+ if (duration >= HZ/10) { /* if more than about 1/10 second */
+ maxrx_bytes = (bytes_rcvd * (HZ/10)) / duration;
+ if (maxrx_bytes < 2000)
+ maxrx_bytes = 2000;
+ else if (maxrx_bytes > 22000)
+ maxrx_bytes = 22000;
+ DEBUG(1, "set maxrx=%u (rcvd=%u ticks=%lu)\n",
+ maxrx_bytes, bytes_rcvd, duration);
+ } else if (!duration && maxrx_bytes < 22000) {
+ /* now much faster */
+ maxrx_bytes += 2000;
+ if (maxrx_bytes > 22000)
+ maxrx_bytes = 22000;
+ DEBUG(1, "set maxrx=%u\n", maxrx_bytes);
+ }
+ }
+
+ leave:
+ if (lockup_hack) {
+ if (int_status != 0xff && (int_status = GetByte(XIRCREG_ISR)) != 0)
+ goto loop_entry;
+ }
+ SelectPage(saved_page);
+ PutByte(XIRCREG_CR, EnableIntr); /* re-enable interrupts */
+ /* Instead of dropping packets during a receive, we could
+ * force an interrupt with this command:
+ * PutByte(XIRCREG_CR, EnableIntr|ForceIntr);
+ */
+} /* xirc2ps_interrupt */
+
+/*====================================================================*/
+
+static void
+do_tx_timeout(struct net_device *dev)
+{
+ local_info_t *lp = dev->priv;
+ printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
+ lp->stats.tx_errors++;
+ /* reset the card */
+ do_reset(dev,1);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static int
+do_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ local_info_t *lp = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int okay;
+ unsigned freespace;
+ unsigned pktlen = skb? skb->len : 0;
+
+ DEBUG(1, "do_start_xmit(skb=%p, dev=%p) len=%u\n",
+ skb, dev, pktlen);
+
+ tx_timeout_check(dev, do_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ /* adjust the packet length to min. required
+ * and hope that the buffer is large enough
+ * to provide some random data.
+ * fixme: For Mohawk we can change this by sending
+ * a larger packetlen than we actually have; the chip will
+ * pad this in his buffer with random bytes
+ */
+ if (pktlen < ETH_ZLEN)
+ pktlen = ETH_ZLEN;
+
+ SelectPage(0);
+ PutWord(XIRCREG0_TRS, (u_short)pktlen+2);
+ freespace = GetWord(XIRCREG0_TSO);
+ okay = freespace & 0x8000;
+ freespace &= 0x7fff;
+ /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */
+ okay = pktlen +2 < freespace;
+ DEBUG(2 + (okay ? 2 : 0), "%s: avail. tx space=%u%s\n",
+ dev->name, freespace, okay ? " (okay)":" (not enough)");
+ if (!okay) { /* not enough space */
+ return 1; /* upper layer may decide to requeue this packet */
+ }
+ /* send the packet */
+ PutWord(XIRCREG_EDP, (u_short)pktlen);
+ outsw(ioaddr+XIRCREG_EDP, skb->data, pktlen>>1);
+ if (pktlen & 1)
+ PutByte(XIRCREG_EDP, skb->data[pktlen-1]);
+
+ if (lp->mohawk)
+ PutByte(XIRCREG_CR, TransmitPacket|EnableIntr);
+
+ DEV_KFREE_SKB (skb);
+ dev->trans_start = jiffies;
+ add_tx_bytes(&lp->stats, pktlen);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static struct net_device_stats *
+do_get_stats(struct net_device *dev)
+{
+ local_info_t *lp = dev->priv;
+
+ /* lp->stats.rx_missed_errors = GetByte(?) */
+ return &lp->stats;
+}
+
+/****************
+ * Set all addresses: This first one is the individual address,
+ * the next 9 addresses are taken from the multicast list and
+ * the rest is filled with the individual address.
+ */
+static void
+set_addresses(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ local_info_t *lp = dev->priv;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addr;
+ int i,j,k,n;
+
+ SelectPage(k=0x50);
+ for (i=0,j=8,n=0; ; i++, j++) {
+ if (i > 5) {
+ if (++n > 9)
+ break;
+ i = 0;
+ }
+ if (j > 15) {
+ j = 8;
+ k++;
+ SelectPage(k);
+ }
+
+ if (n && n <= dev->mc_count && dmi) {
+ addr = dmi->dmi_addr;
+ dmi = dmi->next;
+ } else
+ addr = dev->dev_addr;
+
+ if (lp->mohawk)
+ PutByte(j, addr[5-i]);
+ else
+ PutByte(j, addr[i]);
+ }
+ SelectPage(0);
+}
+
+/****************
+ * Set or clear the multicast filter for this adaptor.
+ * We can filter up to 9 addresses, if more are requested we set
+ * multicast promiscuous mode.
+ */
+
+static void
+set_multicast_list(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ SelectPage(0x42);
+ if (dev->flags & IFF_PROMISC) { /* snoop */
+ PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */
+ } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
+ PutByte(XIRCREG42_SWC1, 0x06); /* set MPE */
+ } else if (dev->mc_count) {
+ /* the chip can filter 9 addresses perfectly */
+ PutByte(XIRCREG42_SWC1, 0x00);
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, Offline);
+ set_addresses(dev);
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, EnableRecv | Online);
+ } else { /* standard usage */
+ PutByte(XIRCREG42_SWC1, 0x00);
+ }
+ SelectPage(0);
+}
+
+static int
+do_config(struct net_device *dev, struct ifmap *map)
+{
+ local_info_t *local = dev->priv;
+
+ DEBUG(0, "do_config(%p)\n", dev);
+ if (map->port != 255 && map->port != dev->if_port) {
+ if (map->port > 4)
+ return -EINVAL;
+ if (!map->port) {
+ local->probe_port = 1;
+ dev->if_port = 1;
+ } else {
+ local->probe_port = 0;
+ dev->if_port = map->port;
+ }
+ printk(KERN_INFO "%s: switching to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ do_reset(dev,1); /* not the fine way :-) */
+ }
+ return 0;
+}
+
+/****************
+ * Open the driver
+ */
+static int
+do_open(struct net_device *dev)
+{
+ local_info_t *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+
+ DEBUG(0, "do_open(%p)\n", dev);
+
+ /* Check that the PCMCIA card is still here. */
+ /* Physical device present signature. */
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ /* okay */
+ link->open++;
+ MOD_INC_USE_COUNT;
+
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+ do_reset(dev,1);
+
+ return 0;
+}
+
+static int
+do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ local_info_t *local = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+
+ DEBUG(1, "%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n",
+ dev->name, rq->ifr_ifrn.ifrn_name, cmd,
+ data[0], data[1], data[2], data[3]);
+
+ if (!local->mohawk)
+ return -EOPNOTSUPP;
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+ data[0] = 0; /* we have only this address */
+ /* fall trough */
+ case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+ data[3] = mii_rd(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ break;
+ case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mii_wr(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2], 16);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void
+hardreset(struct net_device *dev)
+{
+ local_info_t *local = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ SelectPage(4);
+ udelay(1);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ busy_loop(HZ/25); /* wait 40 msec */
+ if (local->mohawk)
+ PutByte(XIRCREG4_GPR1, 1); /* set bit 0: power up */
+ else
+ PutByte(XIRCREG4_GPR1, 1 | 4); /* set bit 0: power up, bit 2: AIC */
+ busy_loop(HZ/50); /* wait 20 msec */
+}
+
+static void
+do_reset(struct net_device *dev, int full)
+{
+ local_info_t *local = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ unsigned value;
+
+ DEBUG(0, "%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full);
+
+ hardreset(dev);
+ PutByte(XIRCREG_CR, SoftReset); /* set */
+ busy_loop(HZ/50); /* wait 20 msec */
+ PutByte(XIRCREG_CR, 0); /* clear */
+ busy_loop(HZ/25); /* wait 40 msec */
+ if (local->mohawk) {
+ SelectPage(4);
+ /* set pin GP1 and GP2 to output (0x0c)
+ * set GP1 to low to power up the ML6692 (0x00)
+ * set GP2 to high to power up the 10Mhz chip (0x02)
+ */
+ PutByte(XIRCREG4_GPR0, 0x0e);
+ }
+
+ /* give the circuits some time to power up */
+ busy_loop(HZ/2); /* about 500ms */
+
+ local->last_ptr_value = 0;
+ local->silicon = local->mohawk ? (GetByte(XIRCREG4_BOV) & 0x70) >> 4
+ : (GetByte(XIRCREG4_BOV) & 0x30) >> 4;
+
+ if (local->probe_port) {
+ if (!local->mohawk) {
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR0, 4);
+ local->probe_port = 0;
+ }
+ } else if (dev->if_port == 2) { /* enable 10Base2 */
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC1, 0xC0);
+ } else { /* enable 10BaseT */
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC1, 0x80);
+ }
+ busy_loop(HZ/25); /* wait 40 msec to let it complete */
+
+ #ifdef PCMCIA_DEBUG
+ if (pc_debug) {
+ SelectPage(0);
+ value = GetByte(XIRCREG_ESR); /* read the ESR */
+ printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value);
+ }
+ #endif
+
+ /* setup the ECR */
+ SelectPage(1);
+ PutByte(XIRCREG1_IMR0, 0xff); /* allow all ints */
+ PutByte(XIRCREG1_IMR1, 1 ); /* and Set TxUnderrunDetect */
+ value = GetByte(XIRCREG1_ECR);
+ #if 0
+ if (local->mohawk)
+ value |= DisableLinkPulse;
+ PutByte(XIRCREG1_ECR, value);
+ #endif
+ DEBUG(0, "%s: ECR is: %#02x\n", dev->name, value);
+
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */
+
+ if (local->silicon != 1) {
+ /* set the local memory dividing line.
+ * The comments in the sample code say that this is only
+ * settable with the scipper version 2 which is revision 0.
+ * Always for CE3 cards
+ */
+ SelectPage(2);
+ PutWord(XIRCREG2_RBS, 0x2000);
+ }
+
+ if (full)
+ set_addresses(dev);
+
+ /* Hardware workaround:
+ * The receive byte pointer after reset is off by 1 so we need
+ * to move the offset pointer back to 0.
+ */
+ SelectPage(0);
+ PutWord(XIRCREG0_DO, 0x2000); /* change offset command, off=0 */
+
+ /* setup MAC IMRs and clear status registers */
+ SelectPage(0x40); /* Bit 7 ... bit 0 */
+ PutByte(XIRCREG40_RMASK0, 0xff); /* ROK, RAB, rsv, RO, CRC, AE, PTL, MP */
+ PutByte(XIRCREG40_TMASK0, 0xff); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */
+ PutByte(XIRCREG40_TMASK1, 0xb0); /* rsv, rsv, PTD, EXT, rsv,rsv,rsv, rsv*/
+ PutByte(XIRCREG40_RXST0, 0x00); /* ROK, RAB, REN, RO, CRC, AE, PTL, MP */
+ PutByte(XIRCREG40_TXST0, 0x00); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */
+ PutByte(XIRCREG40_TXST1, 0x00); /* TEN, rsv, PTD, EXT, retry_counter:4 */
+
+ if (full && local->mohawk && init_mii(dev)) {
+ if (dev->if_port == 4 || local->dingo || local->new_mii) {
+ printk(KERN_INFO "%s: MII selected\n", dev->name);
+ SelectPage(2);
+ PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08);
+ busy_loop(HZ/50);
+ } else {
+ printk(KERN_INFO "%s: MII detected; using 10mbs\n",
+ dev->name);
+ SelectPage(0x42);
+ if (dev->if_port == 2) /* enable 10Base2 */
+ PutByte(XIRCREG42_SWC1, 0xC0);
+ else /* enable 10BaseT */
+ PutByte(XIRCREG42_SWC1, 0x80);
+ busy_loop(HZ/25); /* wait 40 msec to let it complete */
+ }
+ if (full_duplex)
+ PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex));
+ } else { /* No MII */
+ SelectPage(0);
+ value = GetByte(XIRCREG_ESR); /* read the ESR */
+ dev->if_port = (value & MediaSelect) ? 1 : 2;
+ }
+
+ /* configure the LEDs */
+ SelectPage(2);
+ if (dev->if_port == 1 || dev->if_port == 4) /* TP: Link and Activity */
+ PutByte(XIRCREG2_LED, 0x3b);
+ else /* Coax: Not-Collision and Activity */
+ PutByte(XIRCREG2_LED, 0x3a);
+
+ if (local->dingo)
+ PutByte(0x0b, 0x04); /* 100 Mbit LED */
+
+ /* enable receiver and put the mac online */
+ if (full) {
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, EnableRecv | Online);
+ }
+
+ /* setup Ethernet IMR and enable interrupts */
+ SelectPage(1);
+ PutByte(XIRCREG1_IMR0, 0xff);
+ udelay(1);
+ SelectPage(0);
+ PutByte(XIRCREG_CR, EnableIntr);
+ if (local->modem && !local->dingo) { /* do some magic */
+ if (!(GetByte(0x10) & 0x01))
+ PutByte(0x10, 0x11); /* unmask master-int bit */
+ }
+
+ if (full)
+ printk(KERN_INFO "%s: media %s, silicon revision %d\n",
+ dev->name, if_names[dev->if_port], local->silicon);
+ /* We should switch back to page 0 to avoid a bug in revision 0
+ * where regs with offset below 8 can't be read after an access
+ * to the MAC registers */
+ SelectPage(0);
+}
+
+/****************
+ * Initialize the Media-Independent-Interface
+ * Returns: True if we have a good MII
+ */
+static int
+init_mii(struct net_device *dev)
+{
+ local_info_t *local = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ unsigned control, status, linkpartner;
+ int i;
+
+ if (if_port == 4 || if_port == 1) { /* force 100BaseT or 10BaseT */
+ dev->if_port = if_port;
+ local->probe_port = 0;
+ return 1;
+ }
+
+ status = mii_rd(ioaddr, 0, 1);
+ if ((status & 0xff00) != 0x7800)
+ return 0; /* No MII */
+
+ local->new_mii = (mii_rd(ioaddr, 0, 2) != 0xffff);
+
+ if (local->probe_port)
+ control = 0x1000; /* auto neg */
+ else if (dev->if_port == 4)
+ control = 0x2000; /* no auto neg, 100mbs mode */
+ else
+ control = 0x0000; /* no auto neg, 10mbs mode */
+ mii_wr(ioaddr, 0, 0, control, 16);
+ udelay(100);
+ control = mii_rd(ioaddr, 0, 0);
+
+ if (control & 0x0400) {
+ printk(KERN_NOTICE "%s can't take PHY out of isolation mode\n",
+ dev->name);
+ local->probe_port = 0;
+ return 0;
+ }
+
+ if (local->probe_port) {
+ /* according to the DP83840A specs the auto negotiation process
+ * may take up to 3.5 sec, so we use this also for our ML6692
+ * Fixme: Better to use a timer here!
+ */
+ for (i=0; i < 35; i++) {
+ busy_loop(HZ/10); /* wait 100 msec */
+ status = mii_rd(ioaddr, 0, 1);
+ if ((status & 0x0020) && (status & 0x0004))
+ break;
+ }
+
+ if (!(status & 0x0020)) {
+ printk(KERN_INFO "%s: autonegotiation failed;"
+ " using 10mbs\n", dev->name);
+ if (!local->new_mii) {
+ control = 0x0000;
+ mii_wr(ioaddr, 0, 0, control, 16);
+ udelay(100);
+ SelectPage(0);
+ dev->if_port = (GetByte(XIRCREG_ESR) & MediaSelect) ? 1 : 2;
+ }
+ } else {
+ linkpartner = mii_rd(ioaddr, 0, 5);
+ printk(KERN_INFO "%s: MII link partner: %04x\n",
+ dev->name, linkpartner);
+ if (linkpartner & 0x0080) {
+ dev->if_port = 4;
+ } else
+ dev->if_port = 1;
+ }
+ }
+
+ return 1;
+}
+
+static void
+do_powerdown(struct net_device *dev)
+{
+
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(0, "do_powerdown(%p)\n", dev);
+
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ SelectPage(0);
+}
+
+static int
+do_stop(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ local_info_t *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+
+ DEBUG(0, "do_stop(%p)\n", dev);
+
+ if (!link)
+ return -ENODEV;
+
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+
+ SelectPage(0);
+ PutByte(XIRCREG_CR, 0); /* disable interrupts */
+ SelectPage(0x01);
+ PutByte(XIRCREG1_IMR0, 0x00); /* forbid all ints */
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ SelectPage(0);
+
+ link->open--;
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int __init
+init_xirc2ps_cs(void)
+{
+ servinfo_t serv;
+
+ printk(KERN_INFO "%s\n", version);
+ if (lockup_hack)
+ printk(KINF_XIRC "lockup hack is enabled\n");
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KNOT_XIRC "Card Services release does not match!\n");
+ return -EINVAL;
+ }
+ DEBUG(0, "pc_debug=%d\n", pc_debug);
+ register_pccard_driver(&dev_info, &xirc2ps_attach, &xirc2ps_detach);
+ return 0;
+}
+
+static void __exit
+exit_xirc2ps_cs(void)
+{
+ DEBUG(0, "unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list) {
+ if (dev_list->state & DEV_CONFIG)
+ xirc2ps_release((u_long)dev_list);
+ if (dev_list) /* xirc2ps_release() might already have detached... */
+ xirc2ps_detach(dev_list);
+ }
+}
+
+module_init(init_xirc2ps_cs);
+module_exit(exit_xirc2ps_cs);
+
diff --git a/linux/pcmcia-cs/glue/ds.c b/linux/pcmcia-cs/glue/ds.c
new file mode 100644
index 0000000..cc4b92b
--- /dev/null
+++ b/linux/pcmcia-cs/glue/ds.c
@@ -0,0 +1,454 @@
+/*
+ * pcmcia-socket `device' driver
+ *
+ * Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ * Written by Stefan Siegl <stesie@brokenpipe.de>.
+ *
+ * This file is part of GNU Mach.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* This file is included from linux/pcmcia-cs/modules/ds.c. */
+
+/*
+ * This is really ugly. But this is glue code, so... It's about the `kfree'
+ * symbols in <linux/malloc.h> and <kern/kalloc.h>.
+ */
+#undef kfree
+
+/*
+ * <kern/sched_prim.h> defines another event_t which is not used in this
+ * file, so name it mach_event_t to avoid a clash.
+ */
+#define event_t mach_event_t
+#include <kern/sched_prim.h>
+#undef event_t
+
+#include <mach/port.h>
+#include <mach/notify.h>
+#include <mach/mig_errors.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+
+#include <device/device_emul.h>
+
+#include <device/device_reply.user.h>
+
+/* Eliminate the queue_empty macro from Mach header files. */
+#undef queue_empty
+
+struct device_emulation_ops linux_pcmcia_emulation_ops;
+
+/*
+ * We have our very own device emulation stack because we need to carry a
+ * pointer from the open call via read until the final close call: a
+ * pointer to the user's event queue.
+ */
+struct mach_socket_device {
+ /*
+ * Pointer to the mach_device we have allocated. This must be the
+ * first entry in this struct, in order to be able to cast to
+ * mach_device.
+ */
+ struct mach_device mach_dev;
+
+ /*
+ * Pointer to the user info of pcmcia data services.
+ */
+ user_info_t *user;
+
+ /*
+ * Cache for carrying data from set_status to get_status calls. This
+ * is needed for write ioctls.
+ */
+ ds_ioctl_arg_t carry;
+};
+
+
+static void
+ds_device_deallocate(void *p)
+{
+ mach_device_t device = (mach_device_t) p;
+
+ simple_lock(&device->ref_lock);
+ if (--device->ref_count > 0)
+ {
+ simple_unlock(&device->ref_lock);
+ return;
+ }
+
+ simple_unlock(&device->ref_lock);
+
+ /*
+ * do what the original ds_release would do, ...
+ */
+ socket_t i = device->dev_number;
+ socket_info_t *s;
+ user_info_t *user, **link;
+
+ s = &socket_table[i];
+ user = ((struct mach_socket_device *) device)->user;
+
+ /* allow to access the device again ... */
+ if(device->flag & D_WRITE)
+ s->state &= ~SOCKET_BUSY;
+
+ /* Unlink user data structure */
+ for (link = &s->user; *link; link = &(*link)->next)
+ if (*link == user) break;
+
+ if(link)
+ {
+ *link = user->next;
+ user->user_magic = 0;
+ linux_kfree(user);
+ }
+
+ /* now finally reap the device */
+ linux_kfree(device);
+}
+
+/*
+ * Return the send right associated with this socket device incarnation.
+ */
+static ipc_port_t
+dev_to_port(void *d)
+{
+ struct mach_device *dev = d;
+
+ if(! dev)
+ return IP_NULL;
+
+ ipc_port_t port = ipc_port_make_send(dev->port);
+
+ ds_device_deallocate(dev);
+ return port;
+}
+
+
+static inline int
+atoi(const char *ptr)
+{
+ if(! ptr)
+ return 0;
+
+ int i = 0;
+ while(*ptr >= '0' && *ptr <= '9')
+ i = i * 10 + *(ptr ++) - '0';
+
+ return i;
+}
+
+
+/*
+ * Try to open the per-socket pseudo device `socket%d'.
+ */
+static io_return_t
+device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, char *name, device_t *devp /* out */)
+{
+ if(! socket_table)
+ return D_NO_SUCH_DEVICE;
+
+ if(strlen(name) < 7 || strncmp(name, "socket", 6))
+ return D_NO_SUCH_DEVICE;
+
+ socket_t i = atoi(name + 6);
+ if(i >= MAX_SOCKS || i >= sockets)
+ return D_NO_SUCH_DEVICE;
+
+ io_return_t err = D_SUCCESS;
+
+ struct mach_device *dev;
+ dev = linux_kmalloc(sizeof(struct mach_socket_device), GFP_KERNEL);
+ if(! dev)
+ {
+ err = D_NO_MEMORY;
+ goto out;
+ }
+
+ memset(dev, 0, sizeof(struct mach_socket_device));
+ mach_device_reference(dev);
+
+ /* now do, what ds_open would do if it would be in charge */
+ socket_info_t *s = &socket_table[i];
+
+ if(mode & D_WRITE)
+ {
+ if(s->state & SOCKET_BUSY)
+ {
+ err = D_ALREADY_OPEN;
+ goto out;
+ }
+ else
+ s->state |= SOCKET_BUSY;
+ }
+
+ user_info_t *user = linux_kmalloc(sizeof(user_info_t), GFP_KERNEL);
+ if(! user)
+ {
+ err = D_NO_MEMORY;
+ goto out;
+ }
+
+ user->event_tail = user->event_head = 0;
+ user->next = s->user;
+ user->user_magic = USER_MAGIC;
+ s->user = user;
+
+ ((struct mach_socket_device *) dev)->user = user;
+
+ if(s->state & SOCKET_PRESENT)
+ queue_event(user, CS_EVENT_CARD_INSERTION);
+
+ /* just set up the rest of our mach_device now ... */
+ dev->dev.emul_ops = &linux_pcmcia_emulation_ops;
+ dev->dev.emul_data = dev;
+
+ dev->dev_number = i;
+ dev->flag = mode;
+
+ dev->port = ipc_port_alloc_kernel();
+ if(dev->port == IP_NULL)
+ {
+ err = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+
+ mach_device_reference(dev);
+ ipc_kobject_set(dev->port, (ipc_kobject_t) &dev->dev, IKOT_DEVICE);
+
+ /* request no-senders notifications on device port */
+ ipc_port_t notify = ipc_port_make_sonce(dev->port);
+ ip_lock(dev->port);
+ ipc_port_nsrequest(dev->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+
+ out:
+ if(err)
+ {
+ if(dev)
+ {
+ if(dev->port != IP_NULL)
+ {
+ ipc_kobject_set(dev->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel(dev->port);
+ }
+
+ linux_kfree(dev);
+ dev = NULL;
+ }
+ }
+ else
+ dev->state = DEV_STATE_OPEN;
+
+ *devp = &dev->dev;
+
+ if (IP_VALID (reply_port))
+ ds_device_open_reply(reply_port, reply_port_type,
+ err, dev_to_port(dev));
+ return MIG_NO_REPLY;
+}
+
+
+/*
+ * Close the device DEV.
+ */
+static int
+device_close (void *devp)
+{
+ struct mach_device *dev = (struct mach_device *) devp;
+
+ dev->state = DEV_STATE_CLOSING;
+
+ /* check whether there is a blocked read request pending,
+ * in that case, abort that one before closing
+ */
+ while(dev->ref_count > 2)
+ {
+ socket_t i = dev->dev_number;
+ socket_info_t *s = &socket_table[i];
+ wake_up_interruptible(&s->queue);
+
+ /* wait for device_read to exit */
+ return D_INVALID_OPERATION;
+ }
+
+ dev_port_remove(dev);
+ ipc_port_dealloc_kernel(dev->port);
+
+ return 0;
+}
+
+static io_return_t
+device_read(void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, int bytes_wanted,
+ io_buf_ptr_t *data, unsigned int *data_count)
+{
+ struct mach_device *dev = (struct mach_device *) d;
+
+ if(dev->state != DEV_STATE_OPEN)
+ return D_NO_SUCH_DEVICE;
+
+ if(! IP_VALID(reply_port)) {
+ printk(KERN_INFO "ds: device_read: invalid reply port.\n");
+ return (MIG_NO_REPLY); /* no sense in doing anything */
+ }
+
+ /* prepare an io request structure */
+ io_req_t ior;
+ io_req_alloc(ior, 0);
+
+ ior->io_device = dev;
+ ior->io_unit = dev->dev_number;
+ ior->io_op = IO_READ | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = 0;
+ ior->io_count = bytes_wanted;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_read_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(dev);
+
+ /* do the read finally */
+ io_return_t result = D_SUCCESS;
+
+ result = device_read_alloc(ior, ior->io_count);
+ if(result != KERN_SUCCESS)
+ goto out;
+
+ socket_t i = dev->dev_number;
+ socket_info_t *s = &socket_table[i];
+ user_info_t *user = ((struct mach_socket_device *) dev)->user;
+
+ if(ior->io_count < 4)
+ return D_INVALID_SIZE;
+
+ if(CHECK_USER(user))
+ {
+ result = D_IO_ERROR;
+ goto out;
+ }
+
+ while(queue_empty(user))
+ {
+ if(ior->io_mode & D_NOWAIT)
+ {
+ result = D_WOULD_BLOCK;
+ goto out;
+ }
+ else
+ interruptible_sleep_on(&s->queue);
+
+ if(dev->state == DEV_STATE_CLOSING)
+ {
+ result = D_DEVICE_DOWN;
+ goto out;
+ }
+ }
+
+ event_t ev = get_queued_event(user);
+ memcpy(ior->io_data, &ev, sizeof(event_t));
+
+ ior->io_residual = ior->io_count - sizeof(event_t);
+
+ out:
+ /*
+ * Return result via ds_read_done.
+ */
+ ior->io_error = result;
+ (void) ds_read_done(ior);
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply has already been sent. */
+}
+
+
+static io_return_t
+device_set_status(void *d, dev_flavor_t req, dev_status_t arg,
+ mach_msg_type_number_t sz)
+{
+ struct mach_socket_device *dev = (struct mach_socket_device *) d;
+
+ if(sz * sizeof(int) > sizeof(ds_ioctl_arg_t))
+ return D_INVALID_OPERATION;
+
+ if(dev->mach_dev.state != DEV_STATE_OPEN)
+ return D_NO_SUCH_DEVICE;
+
+ unsigned int ioctl_sz = (req & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
+ memcpy(&dev->carry, arg, ioctl_sz);
+
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_get_status(void *d, dev_flavor_t req, dev_status_t arg,
+ mach_msg_type_number_t *sz)
+{
+ struct mach_socket_device *dev = (struct mach_socket_device *) d;
+
+ if(dev->mach_dev.state != DEV_STATE_OPEN)
+ return D_NO_SUCH_DEVICE;
+
+ struct inode inode;
+ inode.i_rdev = dev->mach_dev.dev_number;
+ int ret = ds_ioctl(&inode, NULL, req, (u_long) &dev->carry);
+
+ if(ret)
+ return D_IO_ERROR;
+
+ unsigned int ioctl_sz = (req & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
+ if(req & IOC_OUT) memcpy(arg, &dev->carry, ioctl_sz);
+
+ return D_SUCCESS;
+}
+
+
+struct device_emulation_ops linux_pcmcia_emulation_ops =
+ {
+ (void*) mach_device_reference,
+ ds_device_deallocate,
+ dev_to_port,
+ device_open,
+ device_close,
+ NULL, /* device_write */
+ NULL, /* write_inband */
+ device_read,
+ NULL, /* read_inband */
+ device_set_status,
+ device_get_status,
+ NULL, /* set_filter */
+ NULL, /* map */
+ NULL, /* no_senders */
+ NULL, /* write_trap */
+ NULL /* writev_trap */
+ };
diff --git a/linux/pcmcia-cs/glue/pcmcia.c b/linux/pcmcia-cs/glue/pcmcia.c
new file mode 100644
index 0000000..3beebe3
--- /dev/null
+++ b/linux/pcmcia-cs/glue/pcmcia.c
@@ -0,0 +1,121 @@
+/*
+ * pcmcia bridge initialization
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ * Written by Stefan Siegl <stesie@brokenpipe.de>.
+ *
+ * This file is part of GNU Mach.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/pci.h>
+
+#include <asm/spinlock.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+
+extern int init_pcmcia_cs(void);
+extern int init_i82365(void);
+extern int init_pcmcia_ds(void);
+
+extern int pcmcia_modinit_pcnet_cs(void);
+extern int pcmcia_modinit_3c589_cs(void);
+extern int pcmcia_modinit_3c574_cs(void);
+extern int pcmcia_modinit_3c575_cb(void);
+extern int pcmcia_modinit_axnet_cs(void);
+extern int pcmcia_modinit_eepro100_cb(void);
+extern int pcmcia_modinit_epic_cb(void);
+extern int pcmcia_modinit_fmvj18x_cs(void);
+extern int pcmcia_modinit_nmclan_cs(void);
+extern int pcmcia_modinit_smc91c92_cs(void);
+extern int pcmcia_modinit_tulip_cb(void);
+extern int pcmcia_modinit_xirc2ps_cs(void);
+
+extern int pcmcia_modinit_orinoco_cs(void);
+
+/*
+ * pcmcia bridge initialisation.
+ */
+void
+pcmcia_init(void)
+{
+ init_pcmcia_cs();
+
+#ifdef CONFIG_I82365
+ init_i82365();
+#endif
+
+ init_pcmcia_ds();
+
+ /*
+ * Call te initialization routines of each driver.
+ */
+#ifdef CONFIG_PCNET_CS
+ pcmcia_modinit_pcnet_cs();
+#endif
+
+#ifdef CONFIG_3C589_CS
+ pcmcia_modinit_3c589_cs();
+#endif
+
+#ifdef CONFIG_3C574_CS
+ pcmcia_modinit_3c574_cs();
+#endif
+
+#ifdef CONFIG_3C575_CB
+ pcmcia_modinit_3c575_cb();
+#endif
+
+#ifdef CONFIG_AXNET_CS
+ pcmcia_modinit_axnet_cs();
+#endif
+
+#ifdef CONFIG_EEPRO100_CB
+ pcmcia_modinit_eepro100_cb();
+#endif
+
+#ifdef CONFIG_EPIC_CB
+ pcmcia_modinit_epic_cb();
+#endif
+
+#ifdef CONFIG_FMVJ18X_CS
+ pcmcia_modinit_fmvj18x_cs();
+#endif
+
+#ifdef CONFIG_NMCLAN_CS
+ pcmcia_modinit_nmclan_cs();
+#endif
+
+#ifdef CONFIG_SMC91C92_CS
+ pcmcia_modinit_smc91c92_cs();
+#endif
+
+#ifdef CONFIG_TULIP_CB
+ pcmcia_modinit_tulip_cb();
+#endif
+
+#ifdef CONFIG_XIRC2PS_CS
+ pcmcia_modinit_xirc2ps_cs();
+#endif
+
+#ifdef CONFIG_ORINOCO_CS
+ pcmcia_modinit_orinoco_cs();
+#endif
+}
diff --git a/linux/pcmcia-cs/glue/pcmcia_glue.h b/linux/pcmcia-cs/glue/pcmcia_glue.h
new file mode 100644
index 0000000..691c1b9
--- /dev/null
+++ b/linux/pcmcia-cs/glue/pcmcia_glue.h
@@ -0,0 +1,264 @@
+/*
+ * pcmcia card services glue code
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ * Written by Stefan Siegl <stesie@brokenpipe.de>.
+ *
+ * This file is part of GNU Mach.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PCMCIA_GLUE_H
+#define _PCMCIA_GLUE_H
+
+/*
+ * pcmcia glue configuration
+ */
+#define PCMCIA_DEBUG 4
+/* Maximum number of sockets supported by the glue code. */
+#define MAX_SOCKS 8
+
+
+/*
+ * Linux kernel version handling.
+ */
+#include <linux/version.h>
+#define UTS_VERSION "" /* Hm. */
+#define KERNEL_VERSION(v,p,s) (((v)<<16)+(p<<8)+s)
+
+
+/*
+ * Some cardbus drivers want `CARDBUS' to be defined.
+ */
+#ifdef CONFIG_CARDBUS
+#define CARDBUS 1
+#endif
+
+
+/*
+ * Some includes.
+ */
+#include <linux/malloc.h>
+#include <pcmcia/driver_ops.h>
+
+
+/*
+ * ioremap and iounmap
+ */
+#include <linux/pci.h>
+#include <linux/compatmac.h>
+#define iounmap(x) (((long)x<0x100000)?0:vfree ((void*)x))
+
+
+/*
+ * These are implemented in rsrc_mgr.c.
+ */
+extern int check_mem_region(u_long base, u_long num);
+extern void request_mem_region(u_long base, u_long num, char *name);
+extern void release_mem_region(u_long base, u_long num);
+
+
+/*
+ * Timer and delaying functions.
+ */
+#include <linux/delay.h>
+#define mod_timer(a, b) \
+ do { del_timer(a); (a)->expires = (b); add_timer(a); } while (0)
+#define mdelay(x) \
+ do { int i; for (i=0;i<x;i++) __udelay(1000); } while (0)
+
+
+/*
+ * GNU Mach's Linux glue code doesn't have
+ * `interruptible_sleep_on_timeout'. For the moment let's use the
+ * non-timeout variant. :-/
+ */
+#define interruptible_sleep_on_timeout(w,t) \
+ interruptible_sleep_on(w)
+
+/*
+ * The macro implementation relies on current_set symbol, which doesn't
+ * appear to be available on GNU Mach. TODO: How to fix this properly?
+ */
+#undef signal_pending
+#define signal_pending(c) \
+ 0
+
+
+/*
+ * Byte order stuff. TODO: This does not work on big endian systems,
+ * does it? Move to asm-i386?
+ */
+#include <asm/byteorder.h>
+#ifndef le16_to_cpu
+#define le16_to_cpu(x) (x)
+#define le32_to_cpu(x) (x)
+#endif
+#ifndef cpu_to_le16
+#define cpu_to_le16(val) (val)
+#define cpu_to_le32(val) (val)
+#endif
+
+
+/*
+ * There is no `wake_up_interruptible' on GNU Mach. Use plain `wake_up'
+ * for the moment. TODO.
+ */
+#define wake_up_interruptible wake_up
+
+
+/* Eliminate the 4-arg versions from <linux/compatmac.h>. */
+#undef pci_read_config_word
+#undef pci_read_config_dword
+
+#define bus_number(pci_dev) ((pci_dev)->bus->number)
+#define devfn_number(pci_dev) ((pci_dev)->devfn)
+
+#define pci_read_config_byte(pdev, where, valp) \
+ pcibios_read_config_byte(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_read_config_word(pdev, where, valp) \
+ pcibios_read_config_word(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_read_config_dword(pdev, where, valp) \
+ pcibios_read_config_dword(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_write_config_byte(pdev, where, val) \
+ pcibios_write_config_byte(bus_number(pdev), devfn_number(pdev), where, val)
+#define pci_write_config_word(pdev, where, val) \
+ pcibios_write_config_word(bus_number(pdev), devfn_number(pdev), where, val)
+#define pci_write_config_dword(pdev, where, val) \
+ pcibios_write_config_dword(bus_number(pdev), devfn_number(pdev), where, val)
+
+
+/*
+ * From pcmcia-cs/include/linux/pci.h.
+ */
+#define pci_for_each_dev(p) for (p = pci_devices; p; p = p->next)
+
+
+
+/*
+ * These are defined in pci_fixup.c.
+ */
+extern struct pci_dev *pci_find_slot(u_int bus, u_int devfn);
+extern struct pci_dev *pci_find_class(u_int class, struct pci_dev *from);
+extern int pci_set_power_state(struct pci_dev *dev, int state);
+extern int pci_enable_device(struct pci_dev *dev);
+
+extern u32 pci_irq_mask;
+
+
+#ifdef PCMCIA_CLIENT
+/*
+ * Worse enough, we need to have `mach_device' as well (at least in ds.c)
+ * and this one is typedef'd to `device', therefore we cannot just
+ * include `netdevice.h' when we're compiling the core.
+ *
+ * For compilation of the clients `PCMCIA_CLIENT' is defined through the
+ * Makefile.
+ */
+#include <linux/netdevice.h>
+#include <linux/kcomp.h>
+
+
+/*
+ * init_dev_name and copy_dev_name glue (for `PCMCIA_CLIENT's only).
+ */
+static inline void
+init_dev_name(struct net_device *dev, dev_node_t node)
+{
+ /* just allocate some space for the device name,
+ * register_netdev will happily provide one to us
+ */
+ dev->name = kmalloc(8, GFP_KERNEL);
+ dev->name[0] = 0;
+
+ /*
+ * dev->init needs to be initialized in order for register_netdev to work
+ */
+ int stub(struct device *dev)
+ {
+ (void) dev;
+ return 0;
+ }
+ dev->init = stub;
+}
+
+#define copy_dev_name(node, dev) do { } while (0)
+#endif /* PCMCIA_CLIENT */
+
+
+/*
+ * Some network interface glue, additional to the one from
+ * <linux/kcomp.h>.
+ */
+#define netif_mark_up(dev) do { (dev)->start = 1; } while (0)
+#define netif_mark_down(dev) do { (dev)->start = 0; } while (0)
+#define netif_carrier_on(dev) do { dev->flags |= IFF_RUNNING; } while (0)
+#define netif_carrier_off(dev) do { dev->flags &= ~IFF_RUNNING; } while (0)
+#define tx_timeout_check(dev, tx_timeout) \
+ do { if (test_and_set_bit(0, (void *)&(dev)->tbusy) != 0) { \
+ if (jiffies - (dev)->trans_start < TX_TIMEOUT) return 1; \
+ tx_timeout(dev); \
+ } } while (0)
+
+
+/*
+ * Some `struct netdevice' interface glue (from the pcmcia-cs package).
+ */
+#define skb_tx_check(dev, skb) \
+ do { if (skb == NULL) { dev_tint(dev); return 0; } \
+ if (skb->len <= 0) return 0; } while (0)
+#define tx_timeout_check(dev, tx_timeout) \
+ do { if (test_and_set_bit(0, (void *)&(dev)->tbusy) != 0) { \
+ if (jiffies - (dev)->trans_start < TX_TIMEOUT) return 1; \
+ tx_timeout(dev); \
+ } } while (0)
+#define DEV_KFREE_SKB(skb) dev_kfree_skb(skb, FREE_WRITE)
+#define net_device_stats enet_statistics
+#define add_rx_bytes(stats, n) do { int x; x = (n); } while (0)
+#define add_tx_bytes(stats, n) do { int x; x = (n); } while (0)
+
+
+
+/*
+ * TODO: This is i386 dependent.
+ */
+#define readw_ns(p) readw(p)
+#define writew_ns(v,p) writew(v,p)
+
+
+
+
+/*
+ * We compile everything directly into the GNU Mach kernel, there are no
+ * modules.
+ */
+#define MODULE_PARM(a,b)
+#define MODULE_AUTHOR(a)
+#define MODULE_DESCRIPTION(a)
+#define MODULE_LICENSE(a)
+
+#define module_init(a) \
+ void pcmcia_mod ## a (void) { a(); return; }
+#define module_exit(a)
+
+/*
+ * TODO: We don't have `disable_irq_nosync', do we need it? This is used
+ * by the axnet_cs client driver only.
+ */
+#define disable_irq_nosync(irq) disable_irq(irq)
+
+
+#endif /* _PCMCIA_GLUE_H */
diff --git a/linux/pcmcia-cs/glue/wireless_glue.h b/linux/pcmcia-cs/glue/wireless_glue.h
new file mode 100644
index 0000000..61006b4
--- /dev/null
+++ b/linux/pcmcia-cs/glue/wireless_glue.h
@@ -0,0 +1,158 @@
+/*
+ * wireless network glue code
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ * Written by Stefan Siegl <stesie@brokenpipe.de>.
+ *
+ * This file is part of GNU Mach.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _WIRELESS_GLUE_H
+#define _WIRELESS_GLUE_H
+
+/*
+ * Wireless glue configuration.
+ */
+
+/*
+ * Include the pcmcia glue as well, in case the kernel is configured for
+ * it.
+ */
+#ifdef CONFIG_PCMCIA
+#define PCMCIA_CLIENT
+#include "pcmcia_glue.h"
+#endif
+
+
+/*
+ * Definition of a `BUG' function: write message and panic out.
+ */
+#ifndef BUG
+#define BUG() \
+ do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ *(int *)0=0; } while (0)
+#endif
+
+
+#include <kern/debug.h>
+
+/*
+ * We need some `schedule_task' replacement. This is defined in
+ * kernel/context.c in the Linux kernel.
+ */
+static inline int
+schedule_task(struct tq_struct *task)
+{
+ printk(KERN_INFO "schedule_task: not implemented, task=%p\n", task);
+ Debugger("schedule_task");
+ return 0; /* fail */
+}
+
+
+/*
+ * min() and max() macros that also do strict type-checking. See the
+ * "unnecessary" pointer comparison.
+ */
+#define min(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x < _y ? _x : _y; })
+
+#define max(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x > _y ? _x : _y; })
+
+/*
+ * ... and if you can't take the strict types, you can specify one
+ * yourself.
+ */
+#define min_t(type,x,y) \
+ ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
+#define max_t(type,x,y) \
+ ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; })
+
+
+#define DEV_KFREE_SKB(skb) dev_kfree_skb(skb, FREE_WRITE)
+
+
+/*
+ * TODO: this is i386 specific.
+ */
+#define le16_to_cpus(x) do { } while(0)
+
+
+/*
+ * Some wireless drivers check for a return value from `copy_to_user',
+ * however the `memcpy_tofs' implementation does return void.
+ */
+#undef copy_to_user
+#define copy_to_user(a,b,c) ((memcpy_tofs(a,b,c)), 0)
+
+
+/*
+ * Some more macros that are available on 2.2 and 2.4 Linux kernels.
+ */
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+
+/*
+ * TQUEUE glue.
+ */
+#define PREPARE_TQUEUE(_tq, _routine, _data) \
+ do { \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ (_tq)->next = 0; \
+ (_tq)->sync = 0; \
+ PREPARE_TQUEUE((_tq), (_routine), (_data)); \
+ } while (0)
+
+
+/*
+ * `etherdev' allocator.
+ */
+static inline struct net_device *
+alloc_etherdev(int sz)
+{
+ struct net_device *dev;
+ sz += sizeof(*dev) + 31;
+
+ if (!(dev = kmalloc(sz, GFP_KERNEL)))
+ return NULL;
+ memset(dev, 0, sz);
+
+ if (sz)
+ dev->priv = (void *)(((long)dev + sizeof(*dev) + 31) & ~31);
+
+ /* just allocate some space for the device name,
+ * register_netdev will happily provide one to us
+ */
+ dev->name = kmalloc(8, GFP_KERNEL);
+ dev->name[0] = 0;
+
+ ether_setup(dev);
+ return dev;
+}
+
+
+#endif /* _WIRELESS_GLUE_H */
diff --git a/linux/pcmcia-cs/include/linux/crc32.h b/linux/pcmcia-cs/include/linux/crc32.h
new file mode 100644
index 0000000..008a2da
--- /dev/null
+++ b/linux/pcmcia-cs/include/linux/crc32.h
@@ -0,0 +1,49 @@
+#ifndef _COMPAT_CRC32_H
+#define _COMPAT_CRC32_H
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18))
+
+#include_next <linux/crc32.h>
+
+#else
+
+static inline u_int ether_crc(int length, u_char *data)
+{
+ static const u_int ethernet_polynomial = 0x04c11db7U;
+ int crc = 0xffffffff; /* Initial value. */
+
+ while (--length >= 0) {
+ u_char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ }
+ return crc;
+}
+
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ static unsigned const ethernet_polynomial_le = 0xedb88320U;
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+#endif
+
+#endif /* _COMPAT_CRC32_H */
+
diff --git a/linux/pcmcia-cs/include/linux/slab.h b/linux/pcmcia-cs/include/linux/slab.h
new file mode 100644
index 0000000..634084d
--- /dev/null
+++ b/linux/pcmcia-cs/include/linux/slab.h
@@ -0,0 +1,12 @@
+#ifndef _COMPAT_SLAB_H
+#define _COMPAT_SLAB_H
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0))
+#include <linux/malloc.h>
+#else
+#include_next <linux/slab.h>
+#endif
+
+#endif /* _COMPAT_SLAB_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/bulkmem.h b/linux/pcmcia-cs/include/pcmcia/bulkmem.h
new file mode 100644
index 0000000..7748d44
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/bulkmem.h
@@ -0,0 +1,195 @@
+/*
+ * Definitions for bulk memory services
+ *
+ * bulkmem.h 1.13 2001/08/24 12:16:12
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ * bulkmem.h 1.3 1995/05/27 04:49:49
+ */
+
+#ifndef _LINUX_BULKMEM_H
+#define _LINUX_BULKMEM_H
+
+/* For GetFirstRegion and GetNextRegion */
+typedef struct region_info_t {
+ u_int Attributes;
+ u_int CardOffset;
+ u_int RegionSize;
+ u_int AccessSpeed;
+ u_int BlockSize;
+ u_int PartMultiple;
+ u_char JedecMfr, JedecInfo;
+ memory_handle_t next;
+} region_info_t;
+
+#define REGION_TYPE 0x0001
+#define REGION_TYPE_CM 0x0000
+#define REGION_TYPE_AM 0x0001
+#define REGION_PREFETCH 0x0008
+#define REGION_CACHEABLE 0x0010
+#define REGION_BAR_MASK 0xe000
+#define REGION_BAR_SHIFT 13
+
+/* For OpenMemory */
+typedef struct open_mem_t {
+ u_int Attributes;
+ u_int Offset;
+} open_mem_t;
+
+/* Attributes for OpenMemory */
+#define MEMORY_TYPE 0x0001
+#define MEMORY_TYPE_CM 0x0000
+#define MEMORY_TYPE_AM 0x0001
+#define MEMORY_EXCLUSIVE 0x0002
+#define MEMORY_PREFETCH 0x0008
+#define MEMORY_CACHEABLE 0x0010
+#define MEMORY_BAR_MASK 0xe000
+#define MEMORY_BAR_SHIFT 13
+
+typedef struct eraseq_entry_t {
+ memory_handle_t Handle;
+ u_char State;
+ u_int Size;
+ u_int Offset;
+ void *Optional;
+} eraseq_entry_t;
+
+typedef struct eraseq_hdr_t {
+ int QueueEntryCnt;
+ eraseq_entry_t *QueueEntryArray;
+} eraseq_hdr_t;
+
+#define ERASE_QUEUED 0x00
+#define ERASE_IN_PROGRESS(n) (((n) > 0) && ((n) < 0x80))
+#define ERASE_IDLE 0xff
+#define ERASE_PASSED 0xe0
+#define ERASE_FAILED 0xe1
+
+#define ERASE_MISSING 0x80
+#define ERASE_MEDIA_WRPROT 0x84
+#define ERASE_NOT_ERASABLE 0x85
+#define ERASE_BAD_OFFSET 0xc1
+#define ERASE_BAD_TECH 0xc2
+#define ERASE_BAD_SOCKET 0xc3
+#define ERASE_BAD_VCC 0xc4
+#define ERASE_BAD_VPP 0xc5
+#define ERASE_BAD_SIZE 0xc6
+
+/* For CopyMemory */
+typedef struct copy_op_t {
+ u_int Attributes;
+ u_int SourceOffset;
+ u_int DestOffset;
+ u_int Count;
+} copy_op_t;
+
+/* For ReadMemory and WriteMemory */
+typedef struct mem_op_t {
+ u_int Attributes;
+ u_int Offset;
+ u_int Count;
+} mem_op_t;
+
+#define MEM_OP_BUFFER 0x01
+#define MEM_OP_BUFFER_USER 0x00
+#define MEM_OP_BUFFER_KERNEL 0x01
+#define MEM_OP_DISABLE_ERASE 0x02
+#define MEM_OP_VERIFY 0x04
+
+/* For RegisterMTD */
+typedef struct mtd_reg_t {
+ u_int Attributes;
+ u_int Offset;
+ u_long MediaID;
+} mtd_reg_t;
+
+/*
+ * Definitions for MTD requests
+ */
+
+typedef struct mtd_request_t {
+ u_int SrcCardOffset;
+ u_int DestCardOffset;
+ u_int TransferLength;
+ u_int Function;
+ u_long MediaID;
+ u_int Status;
+ u_int Timeout;
+} mtd_request_t;
+
+/* Fields in MTD Function */
+#define MTD_REQ_ACTION 0x003
+#define MTD_REQ_ERASE 0x000
+#define MTD_REQ_READ 0x001
+#define MTD_REQ_WRITE 0x002
+#define MTD_REQ_COPY 0x003
+#define MTD_REQ_NOERASE 0x004
+#define MTD_REQ_VERIFY 0x008
+#define MTD_REQ_READY 0x010
+#define MTD_REQ_TIMEOUT 0x020
+#define MTD_REQ_LAST 0x040
+#define MTD_REQ_FIRST 0x080
+#define MTD_REQ_KERNEL 0x100
+
+/* Status codes */
+#define MTD_WAITREQ 0x00
+#define MTD_WAITTIMER 0x01
+#define MTD_WAITRDY 0x02
+#define MTD_WAITPOWER 0x03
+
+/*
+ * Definitions for MTD helper functions
+ */
+
+/* For MTDModifyWindow */
+typedef struct mtd_mod_win_t {
+ u_int Attributes;
+ u_int AccessSpeed;
+ u_int CardOffset;
+} mtd_mod_win_t;
+
+/* For MTDSetVpp */
+typedef struct mtd_vpp_req_t {
+ u_char Vpp1, Vpp2;
+} mtd_vpp_req_t;
+
+/* For MTDRDYMask */
+typedef struct mtd_rdy_req_t {
+ u_int Mask;
+} mtd_rdy_req_t;
+
+enum mtd_helper {
+ MTDRequestWindow, MTDModifyWindow, MTDReleaseWindow,
+ MTDSetVpp, MTDRDYMask
+};
+
+#ifdef IN_CARD_SERVICES
+extern int MTDHelperEntry(int func, void *a1, void *a2);
+#else
+extern int MTDHelperEntry(int func, ...);
+#endif
+
+#endif /* _LINUX_BULKMEM_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/bus_ops.h b/linux/pcmcia-cs/include/pcmcia/bus_ops.h
new file mode 100644
index 0000000..d5f362a
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/bus_ops.h
@@ -0,0 +1,157 @@
+/*
+ * bus_ops.h 1.12 2001/08/24 12:16:12
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_BUS_OPS_H
+#define _LINUX_BUS_OPS_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_VIRTUAL_BUS
+
+typedef struct bus_operations {
+ void *priv;
+ u32 (*b_in)(void *bus, u32 port, s32 sz);
+ void (*b_ins)(void *bus, u32 port, void *buf,
+ u32 count, s32 sz);
+ void (*b_out)(void *bus, u32 val, u32 port, s32 sz);
+ void (*b_outs)(void *bus, u32 port, void *buf,
+ u32 count, s32 sz);
+ void *(*b_ioremap)(void *bus, u_long ofs, u_long sz);
+ void (*b_iounmap)(void *bus, void *addr);
+ u32 (*b_read)(void *bus, void *addr, s32 sz);
+ void (*b_write)(void *bus, u32 val, void *addr, s32 sz);
+ void (*b_copy_from)(void *bus, void *d, void *s, u32 count);
+ void (*b_copy_to)(void *bus, void *d, void *s, u32 count);
+ int (*b_request_irq)(void *bus, u_int irq,
+ void (*handler)(int, void *,
+ struct pt_regs *),
+ u_long flags, const char *device,
+ void *dev_id);
+ void (*b_free_irq)(void *bus, u_int irq, void *dev_id);
+} bus_operations;
+
+#define bus_inb(b,p) (b)->b_in((b),(p),0)
+#define bus_inw(b,p) (b)->b_in((b),(p),1)
+#define bus_inl(b,p) (b)->b_in((b),(p),2)
+#define bus_inw_ns(b,p) (b)->b_in((b),(p),-1)
+#define bus_inl_ns(b,p) (b)->b_in((b),(p),-2)
+
+#define bus_insb(b,p,a,c) (b)->b_ins((b),(p),(a),(c),0)
+#define bus_insw(b,p,a,c) (b)->b_ins((b),(p),(a),(c),1)
+#define bus_insl(b,p,a,c) (b)->b_ins((b),(p),(a),(c),2)
+#define bus_insw_ns(b,p,a,c) (b)->b_ins((b),(p),(a),(c),-1)
+#define bus_insl_ns(b,p,a,c) (b)->b_ins((b),(p),(a),(c),-2)
+
+#define bus_outb(b,v,p) (b)->b_out((b),(v),(p),0)
+#define bus_outw(b,v,p) (b)->b_out((b),(v),(p),1)
+#define bus_outl(b,v,p) (b)->b_out((b),(v),(p),2)
+#define bus_outw_ns(b,v,p) (b)->b_out((b),(v),(p),-1)
+#define bus_outl_ns(b,v,p) (b)->b_out((b),(v),(p),-2)
+
+#define bus_outsb(b,p,a,c) (b)->b_outs((b),(p),(a),(c),0)
+#define bus_outsw(b,p,a,c) (b)->b_outs((b),(p),(a),(c),1)
+#define bus_outsl(b,p,a,c) (b)->b_outs((b),(p),(a),(c),2)
+#define bus_outsw_ns(b,p,a,c) (b)->b_outs((b),(p),(a),(c),-1)
+#define bus_outsl_ns(b,p,a,c) (b)->b_outs((b),(p),(a),(c),-2)
+
+#define bus_readb(b,a) (b)->b_read((b),(a),0)
+#define bus_readw(b,a) (b)->b_read((b),(a),1)
+#define bus_readl(b,a) (b)->b_read((b),(a),2)
+#define bus_readw_ns(b,a) (b)->b_read((b),(a),-1)
+#define bus_readl_ns(b,a) (b)->b_read((b),(a),-2)
+
+#define bus_writeb(b,v,a) (b)->b_write((b),(v),(a),0)
+#define bus_writew(b,v,a) (b)->b_write((b),(v),(a),1)
+#define bus_writel(b,v,a) (b)->b_write((b),(v),(a),2)
+#define bus_writew_ns(b,v,a) (b)->b_write((b),(v),(a),-1)
+#define bus_writel_ns(b,v,a) (b)->b_write((b),(v),(a),-2)
+
+#define bus_ioremap(b,s,n) (b)->b_ioremap((b),(s),(n))
+#define bus_iounmap(b,a) (b)->b_iounmap((b),(a))
+#define bus_memcpy_fromio(b,d,s,n) (b)->b_copy_from((b),(d),(s),(n))
+#define bus_memcpy_toio(b,d,s,n) (b)->b_copy_to((b),(d),(s),(n))
+
+#define bus_request_irq(b,i,h,f,n,d) \
+ (b)->b_request_irq((b),(i),(h),(f),(n),(d))
+#define bus_free_irq(b,i,d) (b)->b_free_irq((b),(i),(d))
+
+#else
+
+#define bus_inb(b,p) inb(p)
+#define bus_inw(b,p) inw(p)
+#define bus_inl(b,p) inl(p)
+#define bus_inw_ns(b,p) inw_ns(p)
+#define bus_inl_ns(b,p) inl_ns(p)
+
+#define bus_insb(b,p,a,c) insb(p,a,c)
+#define bus_insw(b,p,a,c) insw(p,a,c)
+#define bus_insl(b,p,a,c) insl(p,a,c)
+#define bus_insw_ns(b,p,a,c) insw_ns(p,a,c)
+#define bus_insl_ns(b,p,a,c) insl_ns(p,a,c)
+
+#define bus_outb(b,v,p) outb(b,v,p)
+#define bus_outw(b,v,p) outw(b,v,p)
+#define bus_outl(b,v,p) outl(b,v,p)
+#define bus_outw_ns(b,v,p) outw_ns(b,v,p)
+#define bus_outl_ns(b,v,p) outl_ns(b,v,p)
+
+#define bus_outsb(b,p,a,c) outsb(p,a,c)
+#define bus_outsw(b,p,a,c) outsw(p,a,c)
+#define bus_outsl(b,p,a,c) outsl(p,a,c)
+#define bus_outsw_ns(b,p,a,c) outsw_ns(p,a,c)
+#define bus_outsl_ns(b,p,a,c) outsl_ns(p,a,c)
+
+#define bus_readb(b,a) readb(a)
+#define bus_readw(b,a) readw(a)
+#define bus_readl(b,a) readl(a)
+#define bus_readw_ns(b,a) readw_ns(a)
+#define bus_readl_ns(b,a) readl_ns(a)
+
+#define bus_writeb(b,v,a) writeb(v,a)
+#define bus_writew(b,v,a) writew(v,a)
+#define bus_writel(b,v,a) writel(v,a)
+#define bus_writew_ns(b,v,a) writew_ns(v,a)
+#define bus_writel_ns(b,v,a) writel_ns(v,a)
+
+#define bus_ioremap(b,s,n) ioremap(s,n)
+#define bus_iounmap(b,a) iounmap(a)
+#define bus_memcpy_fromio(b,d,s,n) memcpy_fromio(d,s,n)
+#define bus_memcpy_toio(b,d,s,n) memcpy_toio(d,s,n)
+
+#ifdef CONFIG_8xx
+#define bus_request_irq(b,i,h,f,n,d) request_8xxirq((i),(h),(f),(n),(d))
+#else
+#define bus_request_irq(b,i,h,f,n,d) request_irq((i),(h),(f),(n),(d))
+#endif
+
+#define bus_free_irq(b,i,d) free_irq((i),(d))
+
+#endif /* CONFIG_VIRTUAL_BUS */
+
+#endif /* _LINUX_BUS_OPS_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/ciscode.h b/linux/pcmcia-cs/include/pcmcia/ciscode.h
new file mode 100644
index 0000000..e6bacef
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/ciscode.h
@@ -0,0 +1,138 @@
+/*
+ * ciscode.h 1.57 2002/11/03 20:38:14
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CISCODE_H
+#define _LINUX_CISCODE_H
+
+/* Manufacturer and Product ID codes */
+
+#define MANFID_3COM 0x0101
+#define PRODID_3COM_3CXEM556 0x0035
+#define PRODID_3COM_3CCFEM556 0x0556
+#define PRODID_3COM_3C562 0x0562
+
+#define MANFID_ACCTON 0x01bf
+#define PRODID_ACCTON_EN2226 0x010a
+
+#define MANFID_ADAPTEC 0x012f
+#define PRODID_ADAPTEC_SCSI 0x0001
+
+#define MANFID_ATT 0xffff
+#define PRODID_ATT_KIT 0x0100
+
+#define MANFID_CONTEC 0xc001
+
+#define MANFID_FUJITSU 0x0004
+#define PRODID_FUJITSU_MBH10302 0x0004
+#define PRODID_FUJITSU_MBH10304 0x1003
+#define PRODID_FUJITSU_LA501 0x2000
+
+#define MANFID_IBM 0x00a4
+#define PRODID_IBM_HOME_AND_AWAY 0x002e
+
+#define MANFID_INTEL 0x0089
+#define PRODID_INTEL_DUAL_RS232 0x0301
+#define PRODID_INTEL_2PLUS 0x8422
+
+#define MANFID_KME 0x0032
+#define PRODID_KME_KXLC005_A 0x0704
+#define PRODID_KME_KXLC005_B 0x2904
+
+#define MANFID_LINKSYS 0x0143
+#define PRODID_LINKSYS_PCMLM28 0xc0ab
+#define PRODID_LINKSYS_3400 0x3341
+
+#define MANFID_MEGAHERTZ 0x0102
+#define PRODID_MEGAHERTZ_VARIOUS 0x0000
+#define PRODID_MEGAHERTZ_EM3288 0x0006
+
+#define MANFID_MACNICA 0xc00b
+
+#define MANFID_MOTOROLA 0x0109
+#define PRODID_MOTOROLA_MARINER 0x0501
+
+#define MANFID_NATINST 0x010b
+#define PRODID_NATINST_QUAD_RS232 0xd180
+
+#define MANFID_NEW_MEDIA 0x0057
+
+#define MANFID_NOKIA 0x0124
+#define PRODID_NOKIA_CARDPHONE 0x0900
+
+#define MANFID_OLICOM 0x0121
+#define PRODID_OLICOM_OC2231 0x3122
+#define PRODID_OLICOM_OC2232 0x3222
+
+#define MANFID_OMEGA 0x0137
+#define PRODID_OMEGA_QSP_100 0x0025
+
+#define MANFID_OSITECH 0x0140
+#define PRODID_OSITECH_JACK_144 0x0001
+#define PRODID_OSITECH_JACK_288 0x0002
+#define PRODID_OSITECH_JACK_336 0x0007
+#define PRODID_OSITECH_SEVEN 0x0008
+
+#define MANFID_OXSEMI 0x0279
+
+#define MANFID_PIONEER 0x000b
+
+#define MANFID_PSION 0x016c
+#define PRODID_PSION_NET100 0x0023
+
+#define MANFID_QUATECH 0x0137
+#define PRODID_QUATECH_SPP100 0x0003
+#define PRODID_QUATECH_DUAL_RS232 0x0012
+#define PRODID_QUATECH_DUAL_RS232_D1 0x0007
+#define PRODID_QUATECH_DUAL_RS232_D2 0x0052
+#define PRODID_QUATECH_QUAD_RS232 0x001b
+#define PRODID_QUATECH_DUAL_RS422 0x000e
+#define PRODID_QUATECH_QUAD_RS422 0x0045
+
+#define MANFID_SMC 0x0108
+#define PRODID_SMC_ETHER 0x0105
+
+#define MANFID_SOCKET 0x0104
+#define PRODID_SOCKET_DUAL_RS232 0x0006
+#define PRODID_SOCKET_EIO 0x000a
+#define PRODID_SOCKET_LPE 0x000d
+#define PRODID_SOCKET_LPE_CF 0x0075
+
+#define MANFID_SUNDISK 0x0045
+
+#define MANFID_TDK 0x0105
+#define PRODID_TDK_CF010 0x0900
+#define PRODID_TDK_GN3410 0x4815
+
+#define MANFID_TOSHIBA 0x0098
+
+#define MANFID_UNGERMANN 0x02c0
+
+#define MANFID_XIRCOM 0x0105
+
+#endif /* _LINUX_CISCODE_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/cisreg.h b/linux/pcmcia-cs/include/pcmcia/cisreg.h
new file mode 100644
index 0000000..cb9fe39
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/cisreg.h
@@ -0,0 +1,135 @@
+/*
+ * cisreg.h 1.18 2001/08/24 12:16:12
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CISREG_H
+#define _LINUX_CISREG_H
+
+/*
+ * Offsets from ConfigBase for CIS registers
+ */
+#define CISREG_COR 0x00
+#define CISREG_CCSR 0x02
+#define CISREG_PRR 0x04
+#define CISREG_SCR 0x06
+#define CISREG_ESR 0x08
+#define CISREG_IOBASE_0 0x0a
+#define CISREG_IOBASE_1 0x0c
+#define CISREG_IOBASE_2 0x0e
+#define CISREG_IOBASE_3 0x10
+#define CISREG_IOSIZE 0x12
+
+/*
+ * Configuration Option Register
+ */
+#define COR_CONFIG_MASK 0x3f
+#define COR_MFC_CONFIG_MASK 0x38
+#define COR_FUNC_ENA 0x01
+#define COR_ADDR_DECODE 0x02
+#define COR_IREQ_ENA 0x04
+#define COR_LEVEL_REQ 0x40
+#define COR_SOFT_RESET 0x80
+
+/*
+ * Card Configuration and Status Register
+ */
+#define CCSR_INTR_ACK 0x01
+#define CCSR_INTR_PENDING 0x02
+#define CCSR_POWER_DOWN 0x04
+#define CCSR_AUDIO_ENA 0x08
+#define CCSR_IOIS8 0x20
+#define CCSR_SIGCHG_ENA 0x40
+#define CCSR_CHANGED 0x80
+
+/*
+ * Pin Replacement Register
+ */
+#define PRR_WP_STATUS 0x01
+#define PRR_READY_STATUS 0x02
+#define PRR_BVD2_STATUS 0x04
+#define PRR_BVD1_STATUS 0x08
+#define PRR_WP_EVENT 0x10
+#define PRR_READY_EVENT 0x20
+#define PRR_BVD2_EVENT 0x40
+#define PRR_BVD1_EVENT 0x80
+
+/*
+ * Socket and Copy Register
+ */
+#define SCR_SOCKET_NUM 0x0f
+#define SCR_COPY_NUM 0x70
+
+/*
+ * Extended Status Register
+ */
+#define ESR_REQ_ATTN_ENA 0x01
+#define ESR_REQ_ATTN 0x10
+
+/*
+ * CardBus Function Status Registers
+ */
+#define CBFN_EVENT 0x00
+#define CBFN_MASK 0x04
+#define CBFN_STATE 0x08
+#define CBFN_FORCE 0x0c
+
+/*
+ * These apply to all the CardBus function registers
+ */
+#define CBFN_WP 0x0001
+#define CBFN_READY 0x0002
+#define CBFN_BVD2 0x0004
+#define CBFN_BVD1 0x0008
+#define CBFN_GWAKE 0x0010
+#define CBFN_INTR 0x8000
+
+/*
+ * Extra bits in the Function Event Mask Register
+ */
+#define FEMR_BAM_ENA 0x0020
+#define FEMR_PWM_ENA 0x0040
+#define FEMR_WKUP_MASK 0x4000
+
+/*
+ * Indirect Addressing Registers for Zoomed Video: these are addresses
+ * in common memory space
+ */
+#define CISREG_ICTRL0 0x02 /* control registers */
+#define CISREG_ICTRL1 0x03
+#define CISREG_IADDR0 0x04 /* address registers */
+#define CISREG_IADDR1 0x05
+#define CISREG_IADDR2 0x06
+#define CISREG_IADDR3 0x07
+#define CISREG_IDATA0 0x08 /* data registers */
+#define CISREG_IDATA1 0x09
+
+#define ICTRL0_COMMON 0x01
+#define ICTRL0_AUTOINC 0x02
+#define ICTRL0_BYTEGRAN 0x04
+
+#endif /* _LINUX_CISREG_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/cistpl.h b/linux/pcmcia-cs/include/pcmcia/cistpl.h
new file mode 100644
index 0000000..1d4cac2
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/cistpl.h
@@ -0,0 +1,604 @@
+/*
+ * cistpl.h 1.35 2001/08/24 12:16:12
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CISTPL_H
+#define _LINUX_CISTPL_H
+
+#define CISTPL_NULL 0x00
+#define CISTPL_DEVICE 0x01
+#define CISTPL_LONGLINK_CB 0x02
+#define CISTPL_INDIRECT 0x03
+#define CISTPL_CONFIG_CB 0x04
+#define CISTPL_CFTABLE_ENTRY_CB 0x05
+#define CISTPL_LONGLINK_MFC 0x06
+#define CISTPL_BAR 0x07
+#define CISTPL_PWR_MGMNT 0x08
+#define CISTPL_EXTDEVICE 0x09
+#define CISTPL_CHECKSUM 0x10
+#define CISTPL_LONGLINK_A 0x11
+#define CISTPL_LONGLINK_C 0x12
+#define CISTPL_LINKTARGET 0x13
+#define CISTPL_NO_LINK 0x14
+#define CISTPL_VERS_1 0x15
+#define CISTPL_ALTSTR 0x16
+#define CISTPL_DEVICE_A 0x17
+#define CISTPL_JEDEC_C 0x18
+#define CISTPL_JEDEC_A 0x19
+#define CISTPL_CONFIG 0x1a
+#define CISTPL_CFTABLE_ENTRY 0x1b
+#define CISTPL_DEVICE_OC 0x1c
+#define CISTPL_DEVICE_OA 0x1d
+#define CISTPL_DEVICE_GEO 0x1e
+#define CISTPL_DEVICE_GEO_A 0x1f
+#define CISTPL_MANFID 0x20
+#define CISTPL_FUNCID 0x21
+#define CISTPL_FUNCE 0x22
+#define CISTPL_SWIL 0x23
+#define CISTPL_END 0xff
+/* Layer 2 tuples */
+#define CISTPL_VERS_2 0x40
+#define CISTPL_FORMAT 0x41
+#define CISTPL_GEOMETRY 0x42
+#define CISTPL_BYTEORDER 0x43
+#define CISTPL_DATE 0x44
+#define CISTPL_BATTERY 0x45
+#define CISTPL_FORMAT_A 0x47
+/* Layer 3 tuples */
+#define CISTPL_ORG 0x46
+#define CISTPL_SPCL 0x90
+
+typedef struct cistpl_longlink_t {
+ u_int addr;
+} cistpl_longlink_t;
+
+typedef struct cistpl_checksum_t {
+ u_short addr;
+ u_short len;
+ u_char sum;
+} cistpl_checksum_t;
+
+#define CISTPL_MAX_FUNCTIONS 8
+#define CISTPL_MFC_ATTR 0x00
+#define CISTPL_MFC_COMMON 0x01
+
+typedef struct cistpl_longlink_mfc_t {
+ u_char nfn;
+ struct {
+ u_char space;
+ u_int addr;
+ } fn[CISTPL_MAX_FUNCTIONS];
+} cistpl_longlink_mfc_t;
+
+#define CISTPL_MAX_ALTSTR_STRINGS 4
+
+typedef struct cistpl_altstr_t {
+ u_char ns;
+ u_char ofs[CISTPL_MAX_ALTSTR_STRINGS];
+ char str[254];
+} cistpl_altstr_t;
+
+#define CISTPL_DTYPE_NULL 0x00
+#define CISTPL_DTYPE_ROM 0x01
+#define CISTPL_DTYPE_OTPROM 0x02
+#define CISTPL_DTYPE_EPROM 0x03
+#define CISTPL_DTYPE_EEPROM 0x04
+#define CISTPL_DTYPE_FLASH 0x05
+#define CISTPL_DTYPE_SRAM 0x06
+#define CISTPL_DTYPE_DRAM 0x07
+#define CISTPL_DTYPE_FUNCSPEC 0x0d
+#define CISTPL_DTYPE_EXTEND 0x0e
+
+#define CISTPL_MAX_DEVICES 4
+
+typedef struct cistpl_device_t {
+ u_char ndev;
+ struct {
+ u_char type;
+ u_char wp;
+ u_int speed;
+ u_int size;
+ } dev[CISTPL_MAX_DEVICES];
+} cistpl_device_t;
+
+#define CISTPL_DEVICE_MWAIT 0x01
+#define CISTPL_DEVICE_3VCC 0x02
+
+typedef struct cistpl_device_o_t {
+ u_char flags;
+ cistpl_device_t device;
+} cistpl_device_o_t;
+
+#define CISTPL_VERS_1_MAX_PROD_STRINGS 4
+
+typedef struct cistpl_vers_1_t {
+ u_char major;
+ u_char minor;
+ u_char ns;
+ u_char ofs[CISTPL_VERS_1_MAX_PROD_STRINGS];
+ char str[254];
+} cistpl_vers_1_t;
+
+typedef struct cistpl_jedec_t {
+ u_char nid;
+ struct {
+ u_char mfr;
+ u_char info;
+ } id[CISTPL_MAX_DEVICES];
+} cistpl_jedec_t;
+
+typedef struct cistpl_manfid_t {
+ u_short manf;
+ u_short card;
+} cistpl_manfid_t;
+
+#define CISTPL_FUNCID_MULTI 0x00
+#define CISTPL_FUNCID_MEMORY 0x01
+#define CISTPL_FUNCID_SERIAL 0x02
+#define CISTPL_FUNCID_PARALLEL 0x03
+#define CISTPL_FUNCID_FIXED 0x04
+#define CISTPL_FUNCID_VIDEO 0x05
+#define CISTPL_FUNCID_NETWORK 0x06
+#define CISTPL_FUNCID_AIMS 0x07
+#define CISTPL_FUNCID_SCSI 0x08
+
+#define CISTPL_SYSINIT_POST 0x01
+#define CISTPL_SYSINIT_ROM 0x02
+
+typedef struct cistpl_funcid_t {
+ u_char func;
+ u_char sysinit;
+} cistpl_funcid_t;
+
+typedef struct cistpl_funce_t {
+ u_char type;
+ u_char data[0];
+} cistpl_funce_t;
+
+/*======================================================================
+
+ Modem Function Extension Tuples
+
+======================================================================*/
+
+#define CISTPL_FUNCE_SERIAL_IF 0x00
+#define CISTPL_FUNCE_SERIAL_CAP 0x01
+#define CISTPL_FUNCE_SERIAL_SERV_DATA 0x02
+#define CISTPL_FUNCE_SERIAL_SERV_FAX 0x03
+#define CISTPL_FUNCE_SERIAL_SERV_VOICE 0x04
+#define CISTPL_FUNCE_SERIAL_CAP_DATA 0x05
+#define CISTPL_FUNCE_SERIAL_CAP_FAX 0x06
+#define CISTPL_FUNCE_SERIAL_CAP_VOICE 0x07
+#define CISTPL_FUNCE_SERIAL_IF_DATA 0x08
+#define CISTPL_FUNCE_SERIAL_IF_FAX 0x09
+#define CISTPL_FUNCE_SERIAL_IF_VOICE 0x0a
+
+/* UART identification */
+#define CISTPL_SERIAL_UART_8250 0x00
+#define CISTPL_SERIAL_UART_16450 0x01
+#define CISTPL_SERIAL_UART_16550 0x02
+#define CISTPL_SERIAL_UART_8251 0x03
+#define CISTPL_SERIAL_UART_8530 0x04
+#define CISTPL_SERIAL_UART_85230 0x05
+
+/* UART capabilities */
+#define CISTPL_SERIAL_UART_SPACE 0x01
+#define CISTPL_SERIAL_UART_MARK 0x02
+#define CISTPL_SERIAL_UART_ODD 0x04
+#define CISTPL_SERIAL_UART_EVEN 0x08
+#define CISTPL_SERIAL_UART_5BIT 0x01
+#define CISTPL_SERIAL_UART_6BIT 0x02
+#define CISTPL_SERIAL_UART_7BIT 0x04
+#define CISTPL_SERIAL_UART_8BIT 0x08
+#define CISTPL_SERIAL_UART_1STOP 0x10
+#define CISTPL_SERIAL_UART_MSTOP 0x20
+#define CISTPL_SERIAL_UART_2STOP 0x40
+
+typedef struct cistpl_serial_t {
+ u_char uart_type;
+ u_char uart_cap_0;
+ u_char uart_cap_1;
+} cistpl_serial_t;
+
+typedef struct cistpl_modem_cap_t {
+ u_char flow;
+ u_char cmd_buf;
+ u_char rcv_buf_0, rcv_buf_1, rcv_buf_2;
+ u_char xmit_buf_0, xmit_buf_1, xmit_buf_2;
+} cistpl_modem_cap_t;
+
+#define CISTPL_SERIAL_MOD_103 0x01
+#define CISTPL_SERIAL_MOD_V21 0x02
+#define CISTPL_SERIAL_MOD_V23 0x04
+#define CISTPL_SERIAL_MOD_V22 0x08
+#define CISTPL_SERIAL_MOD_212A 0x10
+#define CISTPL_SERIAL_MOD_V22BIS 0x20
+#define CISTPL_SERIAL_MOD_V26 0x40
+#define CISTPL_SERIAL_MOD_V26BIS 0x80
+#define CISTPL_SERIAL_MOD_V27BIS 0x01
+#define CISTPL_SERIAL_MOD_V29 0x02
+#define CISTPL_SERIAL_MOD_V32 0x04
+#define CISTPL_SERIAL_MOD_V32BIS 0x08
+#define CISTPL_SERIAL_MOD_V34 0x10
+
+#define CISTPL_SERIAL_ERR_MNP2_4 0x01
+#define CISTPL_SERIAL_ERR_V42_LAPM 0x02
+
+#define CISTPL_SERIAL_CMPR_V42BIS 0x01
+#define CISTPL_SERIAL_CMPR_MNP5 0x02
+
+#define CISTPL_SERIAL_CMD_AT1 0x01
+#define CISTPL_SERIAL_CMD_AT2 0x02
+#define CISTPL_SERIAL_CMD_AT3 0x04
+#define CISTPL_SERIAL_CMD_MNP_AT 0x08
+#define CISTPL_SERIAL_CMD_V25BIS 0x10
+#define CISTPL_SERIAL_CMD_V25A 0x20
+#define CISTPL_SERIAL_CMD_DMCL 0x40
+
+typedef struct cistpl_data_serv_t {
+ u_char max_data_0;
+ u_char max_data_1;
+ u_char modulation_0;
+ u_char modulation_1;
+ u_char error_control;
+ u_char compression;
+ u_char cmd_protocol;
+ u_char escape;
+ u_char encrypt;
+ u_char misc_features;
+ u_char ccitt_code[0];
+} cistpl_data_serv_t;
+
+typedef struct cistpl_fax_serv_t {
+ u_char max_data_0;
+ u_char max_data_1;
+ u_char modulation;
+ u_char encrypt;
+ u_char features_0;
+ u_char features_1;
+ u_char ccitt_code[0];
+} cistpl_fax_serv_t;
+
+typedef struct cistpl_voice_serv_t {
+ u_char max_data_0;
+ u_char max_data_1;
+} cistpl_voice_serv_t;
+
+/*======================================================================
+
+ LAN Function Extension Tuples
+
+======================================================================*/
+
+#define CISTPL_FUNCE_LAN_TECH 0x01
+#define CISTPL_FUNCE_LAN_SPEED 0x02
+#define CISTPL_FUNCE_LAN_MEDIA 0x03
+#define CISTPL_FUNCE_LAN_NODE_ID 0x04
+#define CISTPL_FUNCE_LAN_CONNECTOR 0x05
+
+/* LAN technologies */
+#define CISTPL_LAN_TECH_ARCNET 0x01
+#define CISTPL_LAN_TECH_ETHERNET 0x02
+#define CISTPL_LAN_TECH_TOKENRING 0x03
+#define CISTPL_LAN_TECH_LOCALTALK 0x04
+#define CISTPL_LAN_TECH_FDDI 0x05
+#define CISTPL_LAN_TECH_ATM 0x06
+#define CISTPL_LAN_TECH_WIRELESS 0x07
+
+typedef struct cistpl_lan_tech_t {
+ u_char tech;
+} cistpl_lan_tech_t;
+
+typedef struct cistpl_lan_speed_t {
+ u_int speed;
+} cistpl_lan_speed_t;
+
+/* LAN media definitions */
+#define CISTPL_LAN_MEDIA_UTP 0x01
+#define CISTPL_LAN_MEDIA_STP 0x02
+#define CISTPL_LAN_MEDIA_THIN_COAX 0x03
+#define CISTPL_LAN_MEDIA_THICK_COAX 0x04
+#define CISTPL_LAN_MEDIA_FIBER 0x05
+#define CISTPL_LAN_MEDIA_900MHZ 0x06
+#define CISTPL_LAN_MEDIA_2GHZ 0x07
+#define CISTPL_LAN_MEDIA_5GHZ 0x08
+#define CISTPL_LAN_MEDIA_DIFF_IR 0x09
+#define CISTPL_LAN_MEDIA_PTP_IR 0x0a
+
+typedef struct cistpl_lan_media_t {
+ u_char media;
+} cistpl_lan_media_t;
+
+typedef struct cistpl_lan_node_id_t {
+ u_char nb;
+ u_char id[16];
+} cistpl_lan_node_id_t;
+
+typedef struct cistpl_lan_connector_t {
+ u_char code;
+} cistpl_lan_connector_t;
+
+/*======================================================================
+
+ IDE Function Extension Tuples
+
+======================================================================*/
+
+#define CISTPL_IDE_INTERFACE 0x01
+
+typedef struct cistpl_ide_interface_t {
+ u_char interface;
+} cistpl_ide_interface_t;
+
+/* First feature byte */
+#define CISTPL_IDE_SILICON 0x04
+#define CISTPL_IDE_UNIQUE 0x08
+#define CISTPL_IDE_DUAL 0x10
+
+/* Second feature byte */
+#define CISTPL_IDE_HAS_SLEEP 0x01
+#define CISTPL_IDE_HAS_STANDBY 0x02
+#define CISTPL_IDE_HAS_IDLE 0x04
+#define CISTPL_IDE_LOW_POWER 0x08
+#define CISTPL_IDE_REG_INHIBIT 0x10
+#define CISTPL_IDE_HAS_INDEX 0x20
+#define CISTPL_IDE_IOIS16 0x40
+
+typedef struct cistpl_ide_feature_t {
+ u_char feature1;
+ u_char feature2;
+} cistpl_ide_feature_t;
+
+#define CISTPL_FUNCE_IDE_IFACE 0x01
+#define CISTPL_FUNCE_IDE_MASTER 0x02
+#define CISTPL_FUNCE_IDE_SLAVE 0x03
+
+/*======================================================================
+
+ Configuration Table Entries
+
+======================================================================*/
+
+#define CISTPL_BAR_SPACE 0x07
+#define CISTPL_BAR_SPACE_IO 0x10
+#define CISTPL_BAR_PREFETCH 0x20
+#define CISTPL_BAR_CACHEABLE 0x40
+#define CISTPL_BAR_1MEG_MAP 0x80
+
+typedef struct cistpl_bar_t {
+ u_char attr;
+ u_int size;
+} cistpl_bar_t;
+
+typedef struct cistpl_config_t {
+ u_char last_idx;
+ u_int base;
+ u_int rmask[4];
+ u_char subtuples;
+} cistpl_config_t;
+
+/* These are bits in the 'present' field, and indices in 'param' */
+#define CISTPL_POWER_VNOM 0
+#define CISTPL_POWER_VMIN 1
+#define CISTPL_POWER_VMAX 2
+#define CISTPL_POWER_ISTATIC 3
+#define CISTPL_POWER_IAVG 4
+#define CISTPL_POWER_IPEAK 5
+#define CISTPL_POWER_IDOWN 6
+
+#define CISTPL_POWER_HIGHZ_OK 0x01
+#define CISTPL_POWER_HIGHZ_REQ 0x02
+
+typedef struct cistpl_power_t {
+ u_char present;
+ u_char flags;
+ u_int param[7];
+} cistpl_power_t;
+
+typedef struct cistpl_timing_t {
+ u_int wait, waitscale;
+ u_int ready, rdyscale;
+ u_int reserved, rsvscale;
+} cistpl_timing_t;
+
+#define CISTPL_IO_LINES_MASK 0x1f
+#define CISTPL_IO_8BIT 0x20
+#define CISTPL_IO_16BIT 0x40
+#define CISTPL_IO_RANGE 0x80
+
+#define CISTPL_IO_MAX_WIN 16
+
+typedef struct cistpl_io_t {
+ u_char flags;
+ u_char nwin;
+ struct {
+ u_int base;
+ u_int len;
+ } win[CISTPL_IO_MAX_WIN];
+} cistpl_io_t;
+
+typedef struct cistpl_irq_t {
+ u_int IRQInfo1;
+ u_int IRQInfo2;
+} cistpl_irq_t;
+
+#define CISTPL_MEM_MAX_WIN 8
+
+typedef struct cistpl_mem_t {
+ u_char flags;
+ u_char nwin;
+ struct {
+ u_int len;
+ u_int card_addr;
+ u_int host_addr;
+ } win[CISTPL_MEM_MAX_WIN];
+} cistpl_mem_t;
+
+#define CISTPL_CFTABLE_DEFAULT 0x0001
+#define CISTPL_CFTABLE_BVDS 0x0002
+#define CISTPL_CFTABLE_WP 0x0004
+#define CISTPL_CFTABLE_RDYBSY 0x0008
+#define CISTPL_CFTABLE_MWAIT 0x0010
+#define CISTPL_CFTABLE_AUDIO 0x0800
+#define CISTPL_CFTABLE_READONLY 0x1000
+#define CISTPL_CFTABLE_PWRDOWN 0x2000
+
+typedef struct cistpl_cftable_entry_t {
+ u_char index;
+ u_short flags;
+ u_char interface;
+ cistpl_power_t vcc, vpp1, vpp2;
+ cistpl_timing_t timing;
+ cistpl_io_t io;
+ cistpl_irq_t irq;
+ cistpl_mem_t mem;
+ u_char subtuples;
+} cistpl_cftable_entry_t;
+
+#define CISTPL_CFTABLE_MASTER 0x000100
+#define CISTPL_CFTABLE_INVALIDATE 0x000200
+#define CISTPL_CFTABLE_VGA_PALETTE 0x000400
+#define CISTPL_CFTABLE_PARITY 0x000800
+#define CISTPL_CFTABLE_WAIT 0x001000
+#define CISTPL_CFTABLE_SERR 0x002000
+#define CISTPL_CFTABLE_FAST_BACK 0x004000
+#define CISTPL_CFTABLE_BINARY_AUDIO 0x010000
+#define CISTPL_CFTABLE_PWM_AUDIO 0x020000
+
+typedef struct cistpl_cftable_entry_cb_t {
+ u_char index;
+ u_int flags;
+ cistpl_power_t vcc, vpp1, vpp2;
+ u_char io;
+ cistpl_irq_t irq;
+ u_char mem;
+ u_char subtuples;
+} cistpl_cftable_entry_cb_t;
+
+typedef struct cistpl_device_geo_t {
+ u_char ngeo;
+ struct {
+ u_char buswidth;
+ u_int erase_block;
+ u_int read_block;
+ u_int write_block;
+ u_int partition;
+ u_int interleave;
+ } geo[CISTPL_MAX_DEVICES];
+} cistpl_device_geo_t;
+
+typedef struct cistpl_vers_2_t {
+ u_char vers;
+ u_char comply;
+ u_short dindex;
+ u_char vspec8, vspec9;
+ u_char nhdr;
+ u_char vendor, info;
+ char str[244];
+} cistpl_vers_2_t;
+
+typedef struct cistpl_org_t {
+ u_char data_org;
+ char desc[30];
+} cistpl_org_t;
+
+#define CISTPL_ORG_FS 0x00
+#define CISTPL_ORG_APPSPEC 0x01
+#define CISTPL_ORG_XIP 0x02
+
+typedef struct cistpl_format_t {
+ u_char type;
+ u_char edc;
+ u_int offset;
+ u_int length;
+} cistpl_format_t;
+
+#define CISTPL_FORMAT_DISK 0x00
+#define CISTPL_FORMAT_MEM 0x01
+
+#define CISTPL_EDC_NONE 0x00
+#define CISTPL_EDC_CKSUM 0x01
+#define CISTPL_EDC_CRC 0x02
+#define CISTPL_EDC_PCC 0x03
+
+typedef union cisparse_t {
+ cistpl_device_t device;
+ cistpl_checksum_t checksum;
+ cistpl_longlink_t longlink;
+ cistpl_longlink_mfc_t longlink_mfc;
+ cistpl_vers_1_t version_1;
+ cistpl_altstr_t altstr;
+ cistpl_jedec_t jedec;
+ cistpl_manfid_t manfid;
+ cistpl_funcid_t funcid;
+ cistpl_funce_t funce;
+ cistpl_bar_t bar;
+ cistpl_config_t config;
+ cistpl_cftable_entry_t cftable_entry;
+ cistpl_cftable_entry_cb_t cftable_entry_cb;
+ cistpl_device_geo_t device_geo;
+ cistpl_vers_2_t vers_2;
+ cistpl_org_t org;
+ cistpl_format_t format;
+} cisparse_t;
+
+typedef struct tuple_t {
+ u_int Attributes;
+ cisdata_t DesiredTuple;
+ u_int Flags; /* internal use */
+ u_int LinkOffset; /* internal use */
+ u_int CISOffset; /* internal use */
+ cisdata_t TupleCode;
+ cisdata_t TupleLink;
+ cisdata_t TupleOffset;
+ cisdata_t TupleDataMax;
+ cisdata_t TupleDataLen;
+ cisdata_t *TupleData;
+} tuple_t;
+
+/* Special cisdata_t value */
+#define RETURN_FIRST_TUPLE 0xff
+
+/* Attributes for tuple calls */
+#define TUPLE_RETURN_LINK 0x01
+#define TUPLE_RETURN_COMMON 0x02
+
+/* For ValidateCIS */
+typedef struct cisinfo_t {
+ u_int Chains;
+} cisinfo_t;
+
+#define CISTPL_MAX_CIS_SIZE 0x200
+
+/* For ReplaceCIS */
+typedef struct cisdump_t {
+ u_int Length;
+ cisdata_t Data[CISTPL_MAX_CIS_SIZE];
+} cisdump_t;
+
+#endif /* LINUX_CISTPL_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/cs.h b/linux/pcmcia-cs/include/pcmcia/cs.h
new file mode 100644
index 0000000..8e202c6
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/cs.h
@@ -0,0 +1,441 @@
+/*
+ * cs.h 1.74 2001/10/04 03:15:22
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CS_H
+#define _LINUX_CS_H
+
+/* For AccessConfigurationRegister */
+typedef struct conf_reg_t {
+ u_char Function;
+ u_int Action;
+ off_t Offset;
+ u_int Value;
+} conf_reg_t;
+
+/* Actions */
+#define CS_READ 1
+#define CS_WRITE 2
+
+/* for AdjustResourceInfo */
+typedef struct adjust_t {
+ u_int Action;
+ u_int Resource;
+ u_int Attributes;
+ union {
+ struct memory {
+ u_long Base;
+ u_long Size;
+ } memory;
+ struct io {
+ ioaddr_t BasePort;
+ ioaddr_t NumPorts;
+ u_int IOAddrLines;
+ } io;
+ struct irq {
+ u_int IRQ;
+ } irq;
+ } resource;
+} adjust_t;
+
+/* Action field */
+#define REMOVE_MANAGED_RESOURCE 1
+#define ADD_MANAGED_RESOURCE 2
+#define GET_FIRST_MANAGED_RESOURCE 3
+#define GET_NEXT_MANAGED_RESOURCE 4
+/* Resource field */
+#define RES_MEMORY_RANGE 1
+#define RES_IO_RANGE 2
+#define RES_IRQ 3
+/* Attribute field */
+#define RES_IRQ_TYPE 0x03
+#define RES_IRQ_TYPE_EXCLUSIVE 0
+#define RES_IRQ_TYPE_TIME 1
+#define RES_IRQ_TYPE_DYNAMIC 2
+#define RES_IRQ_CSC 0x04
+#define RES_SHARED 0x08
+#define RES_RESERVED 0x10
+#define RES_ALLOCATED 0x20
+#define RES_REMOVED 0x40
+
+typedef struct servinfo_t {
+ char Signature[2];
+ u_int Count;
+ u_int Revision;
+ u_int CSLevel;
+ char *VendorString;
+} servinfo_t;
+
+typedef struct event_callback_args_t {
+ client_handle_t client_handle;
+ void *info;
+ void *mtdrequest;
+ void *buffer;
+ void *misc;
+ void *client_data;
+ struct bus_operations *bus;
+} event_callback_args_t;
+
+/* for GetConfigurationInfo */
+typedef struct config_info_t {
+ u_char Function;
+ u_int Attributes;
+ u_int Vcc, Vpp1, Vpp2;
+ u_int IntType;
+ u_int ConfigBase;
+ u_char Status, Pin, Copy, Option, ExtStatus;
+ u_int Present;
+ u_int CardValues;
+ u_int AssignedIRQ;
+ u_int IRQAttributes;
+ ioaddr_t BasePort1;
+ ioaddr_t NumPorts1;
+ u_int Attributes1;
+ ioaddr_t BasePort2;
+ ioaddr_t NumPorts2;
+ u_int Attributes2;
+ u_int IOAddrLines;
+} config_info_t;
+
+/* For CardValues field */
+#define CV_OPTION_VALUE 0x01
+#define CV_STATUS_VALUE 0x02
+#define CV_PIN_REPLACEMENT 0x04
+#define CV_COPY_VALUE 0x08
+#define CV_EXT_STATUS 0x10
+
+/* For GetFirst/NextClient */
+typedef struct client_req_t {
+ socket_t Socket;
+ u_int Attributes;
+} client_req_t;
+
+#define CLIENT_THIS_SOCKET 0x01
+
+/* For RegisterClient */
+typedef struct client_reg_t {
+ dev_info_t *dev_info;
+ u_int Attributes;
+ u_int EventMask;
+ int (*event_handler)(event_t event, int priority,
+ event_callback_args_t *);
+ event_callback_args_t event_callback_args;
+ u_int Version;
+} client_reg_t;
+
+/* ModifyConfiguration */
+typedef struct modconf_t {
+ u_int Attributes;
+ u_int Vcc, Vpp1, Vpp2;
+} modconf_t;
+
+/* Attributes for ModifyConfiguration */
+#define CONF_IRQ_CHANGE_VALID 0x100
+#define CONF_VCC_CHANGE_VALID 0x200
+#define CONF_VPP1_CHANGE_VALID 0x400
+#define CONF_VPP2_CHANGE_VALID 0x800
+
+/* For RequestConfiguration */
+typedef struct config_req_t {
+ u_int Attributes;
+ u_int Vcc, Vpp1, Vpp2;
+ u_int IntType;
+ u_int ConfigBase;
+ u_char Status, Pin, Copy, ExtStatus;
+ u_char ConfigIndex;
+ u_int Present;
+} config_req_t;
+
+/* Attributes for RequestConfiguration */
+#define CONF_ENABLE_IRQ 0x01
+#define CONF_ENABLE_DMA 0x02
+#define CONF_ENABLE_SPKR 0x04
+#define CONF_VALID_CLIENT 0x100
+
+/* IntType field */
+#define INT_MEMORY 0x01
+#define INT_MEMORY_AND_IO 0x02
+#define INT_CARDBUS 0x04
+#define INT_ZOOMED_VIDEO 0x08
+
+/* For RequestIO and ReleaseIO */
+typedef struct io_req_t {
+ ioaddr_t BasePort1;
+ ioaddr_t NumPorts1;
+ u_int Attributes1;
+ ioaddr_t BasePort2;
+ ioaddr_t NumPorts2;
+ u_int Attributes2;
+ u_int IOAddrLines;
+} io_req_t;
+
+/* Attributes for RequestIO and ReleaseIO */
+#define IO_SHARED 0x01
+#define IO_FIRST_SHARED 0x02
+#define IO_FORCE_ALIAS_ACCESS 0x04
+#define IO_DATA_PATH_WIDTH 0x18
+#define IO_DATA_PATH_WIDTH_8 0x00
+#define IO_DATA_PATH_WIDTH_16 0x08
+#define IO_DATA_PATH_WIDTH_AUTO 0x10
+
+/* For RequestIRQ and ReleaseIRQ */
+typedef struct irq_req_t {
+ u_int Attributes;
+ u_int AssignedIRQ;
+ u_int IRQInfo1, IRQInfo2;
+ void *Handler;
+ void *Instance;
+} irq_req_t;
+
+/* Attributes for RequestIRQ and ReleaseIRQ */
+#define IRQ_TYPE 0x03
+#define IRQ_TYPE_EXCLUSIVE 0x00
+#define IRQ_TYPE_TIME 0x01
+#define IRQ_TYPE_DYNAMIC_SHARING 0x02
+#define IRQ_FORCED_PULSE 0x04
+#define IRQ_FIRST_SHARED 0x08
+#define IRQ_HANDLE_PRESENT 0x10
+#define IRQ_PULSE_ALLOCATED 0x100
+
+/* Bits in IRQInfo1 field */
+#define IRQ_MASK 0x0f
+#define IRQ_NMI_ID 0x01
+#define IRQ_IOCK_ID 0x02
+#define IRQ_BERR_ID 0x04
+#define IRQ_VEND_ID 0x08
+#define IRQ_INFO2_VALID 0x10
+#define IRQ_LEVEL_ID 0x20
+#define IRQ_PULSE_ID 0x40
+#define IRQ_SHARE_ID 0x80
+
+typedef struct eventmask_t {
+ u_int Attributes;
+ u_int EventMask;
+} eventmask_t;
+
+#define CONF_EVENT_MASK_VALID 0x01
+
+/* Configuration registers present */
+#define PRESENT_OPTION 0x001
+#define PRESENT_STATUS 0x002
+#define PRESENT_PIN_REPLACE 0x004
+#define PRESENT_COPY 0x008
+#define PRESENT_EXT_STATUS 0x010
+#define PRESENT_IOBASE_0 0x020
+#define PRESENT_IOBASE_1 0x040
+#define PRESENT_IOBASE_2 0x080
+#define PRESENT_IOBASE_3 0x100
+#define PRESENT_IOSIZE 0x200
+
+/* For GetMemPage, MapMemPage */
+typedef struct memreq_t {
+ u_int CardOffset;
+ page_t Page;
+} memreq_t;
+
+/* For ModifyWindow */
+typedef struct modwin_t {
+ u_int Attributes;
+ u_int AccessSpeed;
+} modwin_t;
+
+/* For RequestWindow */
+typedef struct win_req_t {
+ u_int Attributes;
+ u_long Base;
+ u_int Size;
+ u_int AccessSpeed;
+} win_req_t;
+
+/* Attributes for RequestWindow */
+#define WIN_ADDR_SPACE 0x0001
+#define WIN_ADDR_SPACE_MEM 0x0000
+#define WIN_ADDR_SPACE_IO 0x0001
+#define WIN_MEMORY_TYPE 0x0002
+#define WIN_MEMORY_TYPE_CM 0x0000
+#define WIN_MEMORY_TYPE_AM 0x0002
+#define WIN_ENABLE 0x0004
+#define WIN_DATA_WIDTH 0x0018
+#define WIN_DATA_WIDTH_8 0x0000
+#define WIN_DATA_WIDTH_16 0x0008
+#define WIN_DATA_WIDTH_32 0x0010
+#define WIN_PAGED 0x0020
+#define WIN_SHARED 0x0040
+#define WIN_FIRST_SHARED 0x0080
+#define WIN_USE_WAIT 0x0100
+#define WIN_STRICT_ALIGN 0x0200
+#define WIN_MAP_BELOW_1MB 0x0400
+#define WIN_PREFETCH 0x0800
+#define WIN_CACHEABLE 0x1000
+#define WIN_BAR_MASK 0xe000
+#define WIN_BAR_SHIFT 13
+
+/* Attributes for RegisterClient */
+#define INFO_MASTER_CLIENT 0x01
+#define INFO_IO_CLIENT 0x02
+#define INFO_MTD_CLIENT 0x04
+#define INFO_MEM_CLIENT 0x08
+#define MAX_NUM_CLIENTS 3
+
+#define INFO_CARD_SHARE 0x10
+#define INFO_CARD_EXCL 0x20
+
+typedef struct cs_status_t {
+ u_char Function;
+ event_t CardState;
+ event_t SocketState;
+} cs_status_t;
+
+typedef struct error_info_t {
+ int func;
+ int retcode;
+} error_info_t;
+
+/* Special stuff for binding drivers to sockets */
+typedef struct bind_req_t {
+ socket_t Socket;
+ u_char Function;
+ dev_info_t *dev_info;
+} bind_req_t;
+
+/* Flag to bind to all functions */
+#define BIND_FN_ALL 0xff
+
+typedef struct mtd_bind_t {
+ socket_t Socket;
+ u_int Attributes;
+ u_int CardOffset;
+ dev_info_t *dev_info;
+} mtd_bind_t;
+
+/* Events */
+#define CS_EVENT_PRI_LOW 0
+#define CS_EVENT_PRI_HIGH 1
+
+#define CS_EVENT_WRITE_PROTECT 0x000001
+#define CS_EVENT_CARD_LOCK 0x000002
+#define CS_EVENT_CARD_INSERTION 0x000004
+#define CS_EVENT_CARD_REMOVAL 0x000008
+#define CS_EVENT_BATTERY_DEAD 0x000010
+#define CS_EVENT_BATTERY_LOW 0x000020
+#define CS_EVENT_READY_CHANGE 0x000040
+#define CS_EVENT_CARD_DETECT 0x000080
+#define CS_EVENT_RESET_REQUEST 0x000100
+#define CS_EVENT_RESET_PHYSICAL 0x000200
+#define CS_EVENT_CARD_RESET 0x000400
+#define CS_EVENT_REGISTRATION_COMPLETE 0x000800
+#define CS_EVENT_RESET_COMPLETE 0x001000
+#define CS_EVENT_PM_SUSPEND 0x002000
+#define CS_EVENT_PM_RESUME 0x004000
+#define CS_EVENT_INSERTION_REQUEST 0x008000
+#define CS_EVENT_EJECTION_REQUEST 0x010000
+#define CS_EVENT_MTD_REQUEST 0x020000
+#define CS_EVENT_ERASE_COMPLETE 0x040000
+#define CS_EVENT_REQUEST_ATTENTION 0x080000
+#define CS_EVENT_CB_DETECT 0x100000
+#define CS_EVENT_3VCARD 0x200000
+#define CS_EVENT_XVCARD 0x400000
+
+/* Return codes */
+#define CS_SUCCESS 0x00
+#define CS_BAD_ADAPTER 0x01
+#define CS_BAD_ATTRIBUTE 0x02
+#define CS_BAD_BASE 0x03
+#define CS_BAD_EDC 0x04
+#define CS_BAD_IRQ 0x06
+#define CS_BAD_OFFSET 0x07
+#define CS_BAD_PAGE 0x08
+#define CS_READ_FAILURE 0x09
+#define CS_BAD_SIZE 0x0a
+#define CS_BAD_SOCKET 0x0b
+#define CS_BAD_TYPE 0x0d
+#define CS_BAD_VCC 0x0e
+#define CS_BAD_VPP 0x0f
+#define CS_BAD_WINDOW 0x11
+#define CS_WRITE_FAILURE 0x12
+#define CS_NO_CARD 0x14
+#define CS_UNSUPPORTED_FUNCTION 0x15
+#define CS_UNSUPPORTED_MODE 0x16
+#define CS_BAD_SPEED 0x17
+#define CS_BUSY 0x18
+#define CS_GENERAL_FAILURE 0x19
+#define CS_WRITE_PROTECTED 0x1a
+#define CS_BAD_ARG_LENGTH 0x1b
+#define CS_BAD_ARGS 0x1c
+#define CS_CONFIGURATION_LOCKED 0x1d
+#define CS_IN_USE 0x1e
+#define CS_NO_MORE_ITEMS 0x1f
+#define CS_OUT_OF_RESOURCE 0x20
+#define CS_BAD_HANDLE 0x21
+
+#define CS_BAD_TUPLE 0x40
+
+#ifdef __KERNEL__
+
+/*
+ * Calls to set up low-level "Socket Services" drivers
+ */
+
+typedef int (*ss_entry_t)(u_int sock, u_int cmd, void *arg);
+extern int register_ss_entry(int nsock, ss_entry_t entry);
+extern void unregister_ss_entry(ss_entry_t entry);
+
+/*
+ * The main Card Services entry point
+ */
+
+enum service {
+ AccessConfigurationRegister, AddSocketServices,
+ AdjustResourceInfo, CheckEraseQueue, CloseMemory, CopyMemory,
+ DeregisterClient, DeregisterEraseQueue, GetCardServicesInfo,
+ GetClientInfo, GetConfigurationInfo, GetEventMask,
+ GetFirstClient, GetFirstPartion, GetFirstRegion, GetFirstTuple,
+ GetNextClient, GetNextPartition, GetNextRegion, GetNextTuple,
+ GetStatus, GetTupleData, MapLogSocket, MapLogWindow, MapMemPage,
+ MapPhySocket, MapPhyWindow, ModifyConfiguration, ModifyWindow,
+ OpenMemory, ParseTuple, ReadMemory, RegisterClient,
+ RegisterEraseQueue, RegisterMTD, RegisterTimer,
+ ReleaseConfiguration, ReleaseExclusive, ReleaseIO, ReleaseIRQ,
+ ReleaseSocketMask, ReleaseWindow, ReplaceSocketServices,
+ RequestConfiguration, RequestExclusive, RequestIO, RequestIRQ,
+ RequestSocketMask, RequestWindow, ResetCard, ReturnSSEntry,
+ SetEventMask, SetRegion, ValidateCIS, VendorSpecific,
+ WriteMemory, BindDevice, BindMTD, ReportError,
+ SuspendCard, ResumeCard, EjectCard, InsertCard, ReplaceCIS,
+ GetFirstWindow, GetNextWindow, GetMemPage
+};
+
+#ifdef IN_CARD_SERVICES
+extern int CardServices(int func, void *a1, void *a2, void *a3);
+#else
+extern int CardServices(int func, ...);
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_CS_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/cs_types.h b/linux/pcmcia-cs/include/pcmcia/cs_types.h
new file mode 100644
index 0000000..88471f9
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/cs_types.h
@@ -0,0 +1,70 @@
+/*
+ * cs_types.h 1.20 2002/04/17 02:52:39
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CS_TYPES_H
+#define _LINUX_CS_TYPES_H
+
+#ifdef __linux__
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <sys/types.h>
+#endif
+#endif
+
+#ifdef __arm__
+typedef u_int ioaddr_t;
+#else
+typedef u_short ioaddr_t;
+#endif
+
+typedef u_short socket_t;
+typedef u_int event_t;
+typedef u_char cisdata_t;
+typedef u_short page_t;
+
+struct client_t;
+typedef struct client_t *client_handle_t;
+
+struct window_t;
+typedef struct window_t *window_handle_t;
+
+struct region_t;
+typedef struct region_t *memory_handle_t;
+
+struct eraseq_t;
+typedef struct eraseq_t *eraseq_handle_t;
+
+#ifndef DEV_NAME_LEN
+#define DEV_NAME_LEN 32
+#endif
+
+typedef char dev_info_t[DEV_NAME_LEN];
+
+#endif /* _LINUX_CS_TYPES_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/driver_ops.h b/linux/pcmcia-cs/include/pcmcia/driver_ops.h
new file mode 100644
index 0000000..9903e5b
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/driver_ops.h
@@ -0,0 +1,73 @@
+/*
+ * driver_ops.h 1.17 2001/10/04 03:15:22
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_DRIVER_OPS_H
+#define _LINUX_DRIVER_OPS_H
+
+#ifndef DEV_NAME_LEN
+#define DEV_NAME_LEN 32
+#endif
+
+#ifdef __KERNEL__
+
+typedef struct dev_node_t {
+ char dev_name[DEV_NAME_LEN];
+ u_short major, minor;
+ struct dev_node_t *next;
+} dev_node_t;
+
+typedef struct dev_locator_t {
+ enum { LOC_ISA, LOC_PCI } bus;
+ union {
+ struct {
+ u_short io_base_1, io_base_2;
+ u_long mem_base;
+ u_char irq, dma;
+ } isa;
+ struct {
+ u_char bus;
+ u_char devfn;
+ } pci;
+ } b;
+} dev_locator_t;
+
+typedef struct driver_operations {
+ char *name;
+ dev_node_t *(*attach) (dev_locator_t *loc);
+ void (*suspend) (dev_node_t *dev);
+ void (*resume) (dev_node_t *dev);
+ void (*detach) (dev_node_t *dev);
+} driver_operations;
+
+int register_driver(struct driver_operations *ops);
+void unregister_driver(struct driver_operations *ops);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_DRIVER_OPS_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/ds.h b/linux/pcmcia-cs/include/pcmcia/ds.h
new file mode 100644
index 0000000..b372e59
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/ds.h
@@ -0,0 +1,148 @@
+/*
+ * ds.h 1.58 2001/10/04 03:15:22
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_DS_H
+#define _LINUX_DS_H
+
+#include <pcmcia/driver_ops.h>
+#include <pcmcia/bulkmem.h>
+
+typedef struct tuple_parse_t {
+ tuple_t tuple;
+ cisdata_t data[255];
+ cisparse_t parse;
+} tuple_parse_t;
+
+typedef struct win_info_t {
+ window_handle_t handle;
+ win_req_t window;
+ memreq_t map;
+} win_info_t;
+
+typedef struct bind_info_t {
+ dev_info_t dev_info;
+ u_char function;
+ struct dev_link_t *instance;
+ char name[DEV_NAME_LEN];
+ u_short major, minor;
+ void *next;
+} bind_info_t;
+
+typedef struct mtd_info_t {
+ dev_info_t dev_info;
+ u_int Attributes;
+ u_int CardOffset;
+} mtd_info_t;
+
+typedef union ds_ioctl_arg_t {
+ servinfo_t servinfo;
+ adjust_t adjust;
+ config_info_t config;
+ tuple_t tuple;
+ tuple_parse_t tuple_parse;
+ client_req_t client_req;
+ cs_status_t status;
+ conf_reg_t conf_reg;
+ cisinfo_t cisinfo;
+ region_info_t region;
+ bind_info_t bind_info;
+ mtd_info_t mtd_info;
+ win_info_t win_info;
+ cisdump_t cisdump;
+} ds_ioctl_arg_t;
+
+#define DS_GET_CARD_SERVICES_INFO _IOR ('d', 1, servinfo_t)
+#define DS_ADJUST_RESOURCE_INFO _IOWR('d', 2, adjust_t)
+#define DS_GET_CONFIGURATION_INFO _IOWR('d', 3, config_info_t)
+#define DS_GET_FIRST_TUPLE _IOWR('d', 4, tuple_t)
+#define DS_GET_NEXT_TUPLE _IOWR('d', 5, tuple_t)
+#define DS_GET_TUPLE_DATA _IOWR('d', 6, tuple_parse_t)
+#define DS_PARSE_TUPLE _IOWR('d', 7, tuple_parse_t)
+#define DS_RESET_CARD _IO ('d', 8)
+#define DS_GET_STATUS _IOWR('d', 9, cs_status_t)
+#define DS_ACCESS_CONFIGURATION_REGISTER _IOWR('d', 10, conf_reg_t)
+#define DS_VALIDATE_CIS _IOR ('d', 11, cisinfo_t)
+#define DS_SUSPEND_CARD _IO ('d', 12)
+#define DS_RESUME_CARD _IO ('d', 13)
+#define DS_EJECT_CARD _IO ('d', 14)
+#define DS_INSERT_CARD _IO ('d', 15)
+#define DS_GET_FIRST_REGION _IOWR('d', 16, region_info_t)
+#define DS_GET_NEXT_REGION _IOWR('d', 17, region_info_t)
+#define DS_REPLACE_CIS _IOWR('d', 18, cisdump_t)
+#define DS_GET_FIRST_WINDOW _IOR ('d', 19, win_info_t)
+#define DS_GET_NEXT_WINDOW _IOWR('d', 20, win_info_t)
+#define DS_GET_MEM_PAGE _IOWR('d', 21, win_info_t)
+
+#define DS_BIND_REQUEST _IOWR('d', 60, bind_info_t)
+#define DS_GET_DEVICE_INFO _IOWR('d', 61, bind_info_t)
+#define DS_GET_NEXT_DEVICE _IOWR('d', 62, bind_info_t)
+#define DS_UNBIND_REQUEST _IOW ('d', 63, bind_info_t)
+#define DS_BIND_MTD _IOWR('d', 64, mtd_info_t)
+
+#ifdef __KERNEL__
+
+typedef struct dev_link_t {
+ dev_node_t *dev;
+ u_int state, open;
+ wait_queue_head_t pending;
+ struct timer_list release;
+ client_handle_t handle;
+ io_req_t io;
+ irq_req_t irq;
+ config_req_t conf;
+ window_handle_t win;
+ void *priv;
+ struct dev_link_t *next;
+} dev_link_t;
+
+/* Flags for device state */
+#define DEV_PRESENT 0x01
+#define DEV_CONFIG 0x02
+#define DEV_STALE_CONFIG 0x04 /* release on close */
+#define DEV_STALE_LINK 0x08 /* detach on release */
+#define DEV_CONFIG_PENDING 0x10
+#define DEV_RELEASE_PENDING 0x20
+#define DEV_SUSPEND 0x40
+#define DEV_BUSY 0x80
+
+#define DEV_OK(l) \
+ ((l) && ((l->state & ~DEV_BUSY) == (DEV_CONFIG|DEV_PRESENT)))
+
+int register_pccard_driver(dev_info_t *dev_info,
+ dev_link_t *(*attach)(void),
+ void (*detach)(dev_link_t *));
+
+int unregister_pccard_driver(dev_info_t *dev_info);
+
+#define register_pcmcia_driver register_pccard_driver
+#define unregister_pcmcia_driver unregister_pccard_driver
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_DS_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/mem_op.h b/linux/pcmcia-cs/include/pcmcia/mem_op.h
new file mode 100644
index 0000000..9230faa
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/mem_op.h
@@ -0,0 +1,133 @@
+/*
+ * mem_op.h 1.14 2001/08/24 12:16:13
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_MEM_OP_H
+#define _LINUX_MEM_OP_H
+
+#include <asm/uaccess.h>
+
+/*
+ If UNSAFE_MEMCPY is defined, we use the (optimized) system routines
+ to copy between a card and kernel memory. These routines do 32-bit
+ operations which may not work with all PCMCIA controllers. The
+ safe versions defined here will do only 8-bit and 16-bit accesses.
+*/
+
+#ifdef UNSAFE_MEMCPY
+
+#define copy_from_pc memcpy_fromio
+#define copy_to_pc memcpy_toio
+
+static inline void copy_pc_to_user(void *to, const void *from, size_t n)
+{
+ size_t odd = (n & 3);
+ n -= odd;
+ while (n) {
+ put_user(readl_ns(from), (int *)to);
+ from += 4; to += 4; n -= 4;
+ }
+ while (odd--)
+ put_user(readb((char *)from++), (char *)to++);
+}
+
+static inline void copy_user_to_pc(void *to, const void *from, size_t n)
+{
+ int l;
+ char c;
+ size_t odd = (n & 3);
+ n -= odd;
+ while (n) {
+ l = get_user((int *)from);
+ writel_ns(l, to);
+ to += 4; from += 4; n -= 4;
+ }
+ while (odd--) {
+ c = get_user((char *)from++);
+ writeb(c, (char *)to++);
+ }
+}
+
+#else /* UNSAFE_MEMCPY */
+
+static inline void copy_from_pc(void *to, const void *from, size_t n)
+{
+ size_t odd = (n & 1);
+ n -= odd;
+ while (n) {
+ *(u_short *)to = readw_ns(from);
+ to += 2; from += 2; n -= 2;
+ }
+ if (odd)
+ *(u_char *)to = readb(from);
+}
+
+static inline void copy_to_pc(void *to, const void *from, size_t n)
+{
+ size_t odd = (n & 1);
+ n -= odd;
+ while (n) {
+ writew_ns(*(u_short *)from, to);
+ to += 2; from += 2; n -= 2;
+ }
+ if (odd)
+ writeb(*(u_char *)from, to);
+}
+
+static inline void copy_pc_to_user(void *to, const void *from, size_t n)
+{
+ size_t odd = (n & 1);
+ n -= odd;
+ while (n) {
+ put_user(readw_ns(from), (short *)to);
+ to += 2; from += 2; n -= 2;
+ }
+ if (odd)
+ put_user(readb(from), (char *)to);
+}
+
+static inline void copy_user_to_pc(void *to, const void *from, size_t n)
+{
+ short s;
+ char c;
+ size_t odd = (n & 1);
+ n -= odd;
+ while (n) {
+ s = get_user((short *)from);
+ writew_ns(s, to);
+ to += 2; from += 2; n -= 2;
+ }
+ if (odd) {
+ c = get_user((char *)from);
+ writeb(c, to);
+ }
+}
+
+#endif /* UNSAFE_MEMCPY */
+
+#endif /* _LINUX_MEM_OP_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/ss.h b/linux/pcmcia-cs/include/pcmcia/ss.h
new file mode 100644
index 0000000..d197e42
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/ss.h
@@ -0,0 +1,133 @@
+/*
+ * ss.h 1.31 2001/08/24 12:16:13
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_SS_H
+#define _LINUX_SS_H
+
+/* For RegisterCallback */
+typedef struct ss_callback_t {
+ void (*handler)(void *info, u_int events);
+ void *info;
+} ss_callback_t;
+
+/* Definitions for card status flags for GetStatus */
+#define SS_WRPROT 0x0001
+#define SS_CARDLOCK 0x0002
+#define SS_EJECTION 0x0004
+#define SS_INSERTION 0x0008
+#define SS_BATDEAD 0x0010
+#define SS_BATWARN 0x0020
+#define SS_READY 0x0040
+#define SS_DETECT 0x0080
+#define SS_POWERON 0x0100
+#define SS_GPI 0x0200
+#define SS_STSCHG 0x0400
+#define SS_CARDBUS 0x0800
+#define SS_3VCARD 0x1000
+#define SS_XVCARD 0x2000
+#define SS_PENDING 0x4000
+
+/* for InquireSocket */
+typedef struct socket_cap_t {
+ u_int features;
+ u_int irq_mask;
+ u_int map_size;
+ u_char pci_irq;
+ u_char cardbus;
+ struct pci_bus *cb_bus;
+ struct bus_operations *bus;
+} socket_cap_t;
+
+/* InquireSocket capabilities */
+#define SS_CAP_PAGE_REGS 0x0001
+#define SS_CAP_VIRTUAL_BUS 0x0002
+#define SS_CAP_MEM_ALIGN 0x0004
+#define SS_CAP_STATIC_MAP 0x0008
+#define SS_CAP_PCCARD 0x4000
+#define SS_CAP_CARDBUS 0x8000
+
+/* for GetSocket, SetSocket */
+typedef struct socket_state_t {
+ u_int flags;
+ u_int csc_mask;
+ u_char Vcc, Vpp;
+ u_char io_irq;
+} socket_state_t;
+
+/* Socket configuration flags */
+#define SS_PWR_AUTO 0x0010
+#define SS_IOCARD 0x0020
+#define SS_RESET 0x0040
+#define SS_DMA_MODE 0x0080
+#define SS_SPKR_ENA 0x0100
+#define SS_OUTPUT_ENA 0x0200
+#define SS_ZVCARD 0x0400
+
+/* Flags for I/O port and memory windows */
+#define MAP_ACTIVE 0x01
+#define MAP_16BIT 0x02
+#define MAP_AUTOSZ 0x04
+#define MAP_0WS 0x08
+#define MAP_WRPROT 0x10
+#define MAP_ATTRIB 0x20
+#define MAP_USE_WAIT 0x40
+#define MAP_PREFETCH 0x80
+
+/* Use this just for bridge windows */
+#define MAP_IOSPACE 0x20
+
+typedef struct pccard_io_map {
+ u_char map;
+ u_char flags;
+ u_short speed;
+ u_short start, stop;
+} pccard_io_map;
+
+typedef struct pccard_mem_map {
+ u_char map;
+ u_char flags;
+ u_short speed;
+ u_long sys_start, sys_stop;
+ u_int card_start;
+} pccard_mem_map;
+
+typedef struct cb_bridge_map {
+ u_char map;
+ u_char flags;
+ u_int start, stop;
+} cb_bridge_map;
+
+enum ss_service {
+ SS_RegisterCallback, SS_InquireSocket,
+ SS_GetStatus, SS_GetSocket, SS_SetSocket,
+ SS_GetIOMap, SS_SetIOMap, SS_GetMemMap, SS_SetMemMap,
+ SS_GetBridge, SS_SetBridge, SS_ProcSetup
+};
+
+#endif /* _LINUX_SS_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/version.h b/linux/pcmcia-cs/include/pcmcia/version.h
new file mode 100644
index 0000000..bc2aef3
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/version.h
@@ -0,0 +1,9 @@
+/* version.h 1.118 2003/12/20 07:16:36 (David Hinds) */
+
+#define CS_PKG_RELEASE "3.2.8"
+#define CS_PKG_RELEASE_CODE 0x3208
+
+#define VERSION(v,p,s) (((v)<<16)+(p<<8)+s)
+
+#define CS_RELEASE CS_PKG_RELEASE
+#define CS_RELEASE_CODE CS_PKG_RELEASE_CODE
diff --git a/linux/pcmcia-cs/modules/bulkmem.c b/linux/pcmcia-cs/modules/bulkmem.c
new file mode 100644
index 0000000..558e6d9
--- /dev/null
+++ b/linux/pcmcia-cs/modules/bulkmem.c
@@ -0,0 +1,626 @@
+/*======================================================================
+
+ PCMCIA Bulk Memory Services
+
+ bulkmem.c 1.44 2002/06/29 06:23:09
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#define __NO_VERSION__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+#define IN_CARD_SERVICES
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/bulkmem.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
+
+/*======================================================================
+
+ This function handles submitting an MTD request, and retrying
+ requests when an MTD is busy.
+
+ An MTD request should never block.
+
+======================================================================*/
+
+static int do_mtd_request(memory_handle_t handle, mtd_request_t *req,
+ caddr_t buf)
+{
+ int ret, tries;
+ client_t *mtd;
+ socket_info_t *s;
+
+ mtd = handle->mtd;
+ if (mtd == NULL)
+ return CS_GENERAL_FAILURE;
+ s = SOCKET(mtd);
+ for (ret = tries = 0; tries < 100; tries++) {
+ mtd->event_callback_args.mtdrequest = req;
+ mtd->event_callback_args.buffer = buf;
+ ret = EVENT(mtd, CS_EVENT_MTD_REQUEST, CS_EVENT_PRI_LOW);
+ if (ret != CS_BUSY)
+ break;
+ switch (req->Status) {
+ case MTD_WAITREQ:
+ /* Not that we should ever need this... */
+ interruptible_sleep_on_timeout(&mtd->mtd_req, HZ);
+ break;
+ case MTD_WAITTIMER:
+ case MTD_WAITRDY:
+ interruptible_sleep_on_timeout(&mtd->mtd_req,
+ req->Timeout*HZ/1000);
+ req->Function |= MTD_REQ_TIMEOUT;
+ break;
+ case MTD_WAITPOWER:
+ interruptible_sleep_on(&mtd->mtd_req);
+ break;
+ }
+ if (signal_pending(current))
+ printk(KERN_NOTICE "cs: do_mtd_request interrupted!\n");
+ }
+ if (tries == 20) {
+ printk(KERN_NOTICE "cs: MTD request timed out!\n");
+ ret = CS_GENERAL_FAILURE;
+ }
+ wake_up_interruptible(&mtd->mtd_req);
+ retry_erase_list(&mtd->erase_busy, 0);
+ return ret;
+} /* do_mtd_request */
+
+/*======================================================================
+
+ This stuff is all for handling asynchronous erase requests. It
+ is complicated because all the retry stuff has to be dealt with
+ in timer interrupts or in the card status event handler.
+
+======================================================================*/
+
+static void insert_queue(erase_busy_t *head, erase_busy_t *entry)
+{
+ DEBUG(2, "cs: adding 0x%p to queue 0x%p\n", entry, head);
+ entry->next = head;
+ entry->prev = head->prev;
+ head->prev->next = entry;
+ head->prev = entry;
+}
+
+static void remove_queue(erase_busy_t *entry)
+{
+ DEBUG(2, "cs: unqueueing 0x%p\n", entry);
+ entry->next->prev = entry->prev;
+ entry->prev->next = entry->next;
+}
+
+static void retry_erase(erase_busy_t *busy, u_int cause)
+{
+ eraseq_entry_t *erase = busy->erase;
+ mtd_request_t req;
+ client_t *mtd;
+ socket_info_t *s;
+ int ret;
+
+ DEBUG(2, "cs: trying erase request 0x%p...\n", busy);
+ if (busy->next)
+ remove_queue(busy);
+ req.Function = MTD_REQ_ERASE | cause;
+ req.TransferLength = erase->Size;
+ req.DestCardOffset = erase->Offset + erase->Handle->info.CardOffset;
+ req.MediaID = erase->Handle->MediaID;
+ mtd = erase->Handle->mtd;
+ s = SOCKET(mtd);
+ mtd->event_callback_args.mtdrequest = &req;
+ ret = EVENT(mtd, CS_EVENT_MTD_REQUEST, CS_EVENT_PRI_LOW);
+ if (ret == CS_BUSY) {
+ DEBUG(2, " Status = %d, requeueing.\n", req.Status);
+ switch (req.Status) {
+ case MTD_WAITREQ:
+ case MTD_WAITPOWER:
+ insert_queue(&mtd->erase_busy, busy);
+ break;
+ case MTD_WAITTIMER:
+ case MTD_WAITRDY:
+ if (req.Status == MTD_WAITRDY)
+ insert_queue(&s->erase_busy, busy);
+ mod_timer(&busy->timeout, jiffies + req.Timeout*HZ/1000);
+ break;
+ }
+ } else {
+ /* update erase queue status */
+ DEBUG(2, " Ret = %d\n", ret);
+ switch (ret) {
+ case CS_SUCCESS:
+ erase->State = ERASE_PASSED; break;
+ case CS_WRITE_PROTECTED:
+ erase->State = ERASE_MEDIA_WRPROT; break;
+ case CS_BAD_OFFSET:
+ erase->State = ERASE_BAD_OFFSET; break;
+ case CS_BAD_SIZE:
+ erase->State = ERASE_BAD_SIZE; break;
+ case CS_NO_CARD:
+ erase->State = ERASE_BAD_SOCKET; break;
+ default:
+ erase->State = ERASE_FAILED; break;
+ }
+ busy->client->event_callback_args.info = erase;
+ EVENT(busy->client, CS_EVENT_ERASE_COMPLETE, CS_EVENT_PRI_LOW);
+ kfree(busy);
+ /* Resubmit anything waiting for a request to finish */
+ wake_up_interruptible(&mtd->mtd_req);
+ retry_erase_list(&mtd->erase_busy, 0);
+ }
+} /* retry_erase */
+
+void retry_erase_list(erase_busy_t *list, u_int cause)
+{
+ erase_busy_t tmp = *list;
+
+ DEBUG(2, "cs: rescanning erase queue list 0x%p\n", list);
+ if (list->next == list)
+ return;
+ /* First, truncate the original list */
+ list->prev->next = &tmp;
+ list->next->prev = &tmp;
+ list->prev = list->next = list;
+ tmp.prev->next = &tmp;
+ tmp.next->prev = &tmp;
+
+ /* Now, retry each request, in order. */
+ while (tmp.next != &tmp)
+ retry_erase(tmp.next, cause);
+} /* retry_erase_list */
+
+static void handle_erase_timeout(u_long arg)
+{
+ DEBUG(0, "cs: erase timeout for entry 0x%lx\n", arg);
+ retry_erase((erase_busy_t *)arg, MTD_REQ_TIMEOUT);
+}
+
+static void setup_erase_request(client_handle_t handle, eraseq_entry_t *erase)
+{
+ erase_busy_t *busy;
+ region_info_t *info;
+
+ if (CHECK_REGION(erase->Handle))
+ erase->State = ERASE_BAD_SOCKET;
+ else {
+ info = &erase->Handle->info;
+ if ((erase->Offset >= info->RegionSize) ||
+ (erase->Offset & (info->BlockSize-1)))
+ erase->State = ERASE_BAD_OFFSET;
+ else if ((erase->Offset+erase->Size > info->RegionSize) ||
+ (erase->Size & (info->BlockSize-1)))
+ erase->State = ERASE_BAD_SIZE;
+ else {
+ erase->State = 1;
+ busy = kmalloc(sizeof(erase_busy_t), GFP_KERNEL);
+ busy->erase = erase;
+ busy->client = handle;
+ init_timer(&busy->timeout);
+ busy->timeout.data = (u_long)busy;
+ busy->timeout.function = &handle_erase_timeout;
+ busy->prev = busy->next = NULL;
+ retry_erase(busy, 0);
+ }
+ }
+} /* setup_erase_request */
+
+/*======================================================================
+
+ MTD helper functions
+
+======================================================================*/
+
+static int mtd_modify_window(window_handle_t win, mtd_mod_win_t *req)
+{
+ if ((win == NULL) || (win->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+ win->ctl.flags = MAP_16BIT | MAP_ACTIVE;
+ if (req->Attributes & WIN_USE_WAIT)
+ win->ctl.flags |= MAP_USE_WAIT;
+ if (req->Attributes & WIN_MEMORY_TYPE)
+ win->ctl.flags |= MAP_ATTRIB;
+ win->ctl.speed = req->AccessSpeed;
+ win->ctl.card_start = req->CardOffset;
+ win->sock->ss_entry(win->sock->sock, SS_SetMemMap, &win->ctl);
+ return CS_SUCCESS;
+}
+
+static int mtd_set_vpp(client_handle_t handle, mtd_vpp_req_t *req)
+{
+ socket_info_t *s;
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ if (req->Vpp1 != req->Vpp2)
+ return CS_BAD_VPP;
+ s = SOCKET(handle);
+ s->socket.Vpp = req->Vpp1;
+ if (s->ss_entry(s->sock, SS_SetSocket, &s->socket))
+ return CS_BAD_VPP;
+ return CS_SUCCESS;
+}
+
+static int mtd_rdy_mask(client_handle_t handle, mtd_rdy_req_t *req)
+{
+ socket_info_t *s;
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (req->Mask & CS_EVENT_READY_CHANGE)
+ s->socket.csc_mask |= SS_READY;
+ else
+ s->socket.csc_mask &= ~SS_READY;
+ if (s->ss_entry(s->sock, SS_SetSocket, &s->socket))
+ return CS_GENERAL_FAILURE;
+ return CS_SUCCESS;
+}
+
+int MTDHelperEntry(int func, void *a1, void *a2)
+{
+ switch (func) {
+ case MTDRequestWindow:
+ return CardServices(RequestWindow, a1, a2, NULL);
+ case MTDReleaseWindow:
+ return CardServices(ReleaseWindow, a1, NULL, NULL);
+ case MTDModifyWindow:
+ return mtd_modify_window(a1, a2); break;
+ case MTDSetVpp:
+ return mtd_set_vpp(a1, a2); break;
+ case MTDRDYMask:
+ return mtd_rdy_mask(a1, a2); break;
+ default:
+ return CS_UNSUPPORTED_FUNCTION; break;
+ }
+} /* MTDHelperEntry */
+
+/*======================================================================
+
+ This stuff is used by Card Services to initialize the table of
+ region info used for subsequent calls to GetFirstRegion and
+ GetNextRegion.
+
+======================================================================*/
+
+static void setup_regions(client_handle_t handle, int attr,
+ memory_handle_t *list)
+{
+ int i, code, has_jedec, has_geo;
+ u_int offset;
+ cistpl_device_t device;
+ cistpl_jedec_t jedec;
+ cistpl_device_geo_t geo;
+ memory_handle_t r;
+
+ DEBUG(1, "cs: setup_regions(0x%p, %d, 0x%p)\n",
+ handle, attr, list);
+
+ code = (attr) ? CISTPL_DEVICE_A : CISTPL_DEVICE;
+ if (read_tuple(handle, code, &device) != CS_SUCCESS)
+ return;
+ code = (attr) ? CISTPL_JEDEC_A : CISTPL_JEDEC_C;
+ has_jedec = (read_tuple(handle, code, &jedec) == CS_SUCCESS);
+ if (has_jedec && (device.ndev != jedec.nid)) {
+#ifdef PCMCIA_DEBUG
+ printk(KERN_DEBUG "cs: Device info does not match JEDEC info.\n");
+#endif
+ has_jedec = 0;
+ }
+ code = (attr) ? CISTPL_DEVICE_GEO_A : CISTPL_DEVICE_GEO;
+ has_geo = (read_tuple(handle, code, &geo) == CS_SUCCESS);
+ if (has_geo && (device.ndev != geo.ngeo)) {
+#ifdef PCMCIA_DEBUG
+ printk(KERN_DEBUG "cs: Device info does not match geometry tuple.\n");
+#endif
+ has_geo = 0;
+ }
+
+ offset = 0;
+ for (i = 0; i < device.ndev; i++) {
+ if ((device.dev[i].type != CISTPL_DTYPE_NULL) &&
+ (device.dev[i].size != 0)) {
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+ r->region_magic = REGION_MAGIC;
+ r->state = 0;
+ r->dev_info[0] = '\0';
+ r->mtd = NULL;
+ r->info.Attributes = (attr) ? REGION_TYPE_AM : 0;
+ r->info.CardOffset = offset;
+ r->info.RegionSize = device.dev[i].size;
+ r->info.AccessSpeed = device.dev[i].speed;
+ if (has_jedec) {
+ r->info.JedecMfr = jedec.id[i].mfr;
+ r->info.JedecInfo = jedec.id[i].info;
+ } else
+ r->info.JedecMfr = r->info.JedecInfo = 0;
+ if (has_geo) {
+ r->info.BlockSize = geo.geo[i].buswidth *
+ geo.geo[i].erase_block * geo.geo[i].interleave;
+ r->info.PartMultiple =
+ r->info.BlockSize * geo.geo[i].partition;
+ } else
+ r->info.BlockSize = r->info.PartMultiple = 1;
+ r->info.next = *list; *list = r;
+ }
+ offset += device.dev[i].size;
+ }
+} /* setup_regions */
+
+/*======================================================================
+
+ This is tricky. When get_first_region() is called by Driver
+ Services, we initialize the region info table in the socket
+ structure. When it is called by an MTD, we can just scan the
+ table for matching entries.
+
+======================================================================*/
+
+static int match_region(client_handle_t handle, memory_handle_t list,
+ region_info_t *match)
+{
+ while (list != NULL) {
+ if (!(handle->Attributes & INFO_MTD_CLIENT) ||
+ (strcmp(handle->dev_info, list->dev_info) == 0)) {
+ *match = list->info;
+ return CS_SUCCESS;
+ }
+ list = list->info.next;
+ }
+ return CS_NO_MORE_ITEMS;
+} /* match_region */
+
+int get_first_region(client_handle_t handle, region_info_t *rgn)
+{
+ socket_info_t *s = SOCKET(handle);
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+
+ if ((handle->Attributes & INFO_MASTER_CLIENT) &&
+ (!(s->state & SOCKET_REGION_INFO))) {
+ setup_regions(handle, 0, &s->c_region);
+ setup_regions(handle, 1, &s->a_region);
+ s->state |= SOCKET_REGION_INFO;
+ }
+
+ if (rgn->Attributes & REGION_TYPE_AM)
+ return match_region(handle, s->a_region, rgn);
+ else
+ return match_region(handle, s->c_region, rgn);
+} /* get_first_region */
+
+int get_next_region(client_handle_t handle, region_info_t *rgn)
+{
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ return match_region(handle, rgn->next, rgn);
+} /* get_next_region */
+
+/*======================================================================
+
+ Connect an MTD with a memory region.
+
+======================================================================*/
+
+int register_mtd(client_handle_t handle, mtd_reg_t *reg)
+{
+ memory_handle_t list;
+ socket_info_t *s;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (reg->Attributes & REGION_TYPE_AM)
+ list = s->a_region;
+ else
+ list = s->c_region;
+ DEBUG(1, "cs: register_mtd(0x%p, '%s', 0x%x)\n",
+ handle, handle->dev_info, reg->Offset);
+ while (list) {
+ if (list->info.CardOffset == reg->Offset) break;
+ list = list->info.next;
+ }
+ if (list && (list->mtd == NULL) &&
+ (strcmp(handle->dev_info, list->dev_info) == 0)) {
+ list->info.Attributes = reg->Attributes;
+ list->MediaID = reg->MediaID;
+ list->mtd = handle;
+ handle->mtd_count++;
+ return CS_SUCCESS;
+ } else
+ return CS_BAD_OFFSET;
+} /* register_mtd */
+
+/*======================================================================
+
+ Erase queue management functions
+
+======================================================================*/
+
+int register_erase_queue(client_handle_t *handle, eraseq_hdr_t *header)
+{
+ eraseq_t *queue;
+
+ if ((handle == NULL) || CHECK_HANDLE(*handle))
+ return CS_BAD_HANDLE;
+ queue = kmalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) return CS_OUT_OF_RESOURCE;
+ queue->eraseq_magic = ERASEQ_MAGIC;
+ queue->handle = *handle;
+ queue->count = header->QueueEntryCnt;
+ queue->entry = header->QueueEntryArray;
+ *handle = (client_handle_t)queue;
+ return CS_SUCCESS;
+} /* register_erase_queue */
+
+int deregister_erase_queue(eraseq_handle_t eraseq)
+{
+ int i;
+ if (CHECK_ERASEQ(eraseq))
+ return CS_BAD_HANDLE;
+ for (i = 0; i < eraseq->count; i++)
+ if (ERASE_IN_PROGRESS(eraseq->entry[i].State)) break;
+ if (i < eraseq->count)
+ return CS_BUSY;
+ eraseq->eraseq_magic = 0;
+ kfree(eraseq);
+ return CS_SUCCESS;
+} /* deregister_erase_queue */
+
+int check_erase_queue(eraseq_handle_t eraseq)
+{
+ int i;
+ if (CHECK_ERASEQ(eraseq))
+ return CS_BAD_HANDLE;
+ for (i = 0; i < eraseq->count; i++)
+ if (eraseq->entry[i].State == ERASE_QUEUED)
+ setup_erase_request(eraseq->handle, &eraseq->entry[i]);
+ return CS_SUCCESS;
+} /* check_erase_queue */
+
+/*======================================================================
+
+ Look up the memory region matching the request, and return a
+ memory handle.
+
+======================================================================*/
+
+int open_memory(client_handle_t *handle, open_mem_t *open)
+{
+ socket_info_t *s;
+ memory_handle_t region;
+
+ if ((handle == NULL) || CHECK_HANDLE(*handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(*handle);
+ if (open->Attributes & MEMORY_TYPE_AM)
+ region = s->a_region;
+ else
+ region = s->c_region;
+ while (region) {
+ if (region->info.CardOffset == open->Offset) break;
+ region = region->info.next;
+ }
+ if (region && region->mtd) {
+ *handle = (client_handle_t)region;
+ DEBUG(1, "cs: open_memory(0x%p, 0x%x) = 0x%p\n",
+ handle, open->Offset, region);
+ return CS_SUCCESS;
+ } else
+ return CS_BAD_OFFSET;
+} /* open_memory */
+
+/*======================================================================
+
+ Close a memory handle from an earlier call to OpenMemory.
+
+ For the moment, I don't think this needs to do anything.
+
+======================================================================*/
+
+int close_memory(memory_handle_t handle)
+{
+ DEBUG(1, "cs: close_memory(0x%p)\n", handle);
+ if (CHECK_REGION(handle))
+ return CS_BAD_HANDLE;
+ return CS_SUCCESS;
+} /* close_memory */
+
+/*======================================================================
+
+ Read from a memory device, using a handle previously returned
+ by a call to OpenMemory.
+
+======================================================================*/
+
+int read_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf)
+{
+ mtd_request_t mtd;
+ if (CHECK_REGION(handle))
+ return CS_BAD_HANDLE;
+ if (req->Offset >= handle->info.RegionSize)
+ return CS_BAD_OFFSET;
+ if (req->Offset+req->Count > handle->info.RegionSize)
+ return CS_BAD_SIZE;
+
+ mtd.SrcCardOffset = req->Offset + handle->info.CardOffset;
+ mtd.TransferLength = req->Count;
+ mtd.MediaID = handle->MediaID;
+ mtd.Function = MTD_REQ_READ;
+ if (req->Attributes & MEM_OP_BUFFER_KERNEL)
+ mtd.Function |= MTD_REQ_KERNEL;
+ return do_mtd_request(handle, &mtd, buf);
+} /* read_memory */
+
+/*======================================================================
+
+ Write to a memory device, using a handle previously returned by
+ a call to OpenMemory.
+
+======================================================================*/
+
+int write_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf)
+{
+ mtd_request_t mtd;
+ if (CHECK_REGION(handle))
+ return CS_BAD_HANDLE;
+ if (req->Offset >= handle->info.RegionSize)
+ return CS_BAD_OFFSET;
+ if (req->Offset+req->Count > handle->info.RegionSize)
+ return CS_BAD_SIZE;
+
+ mtd.DestCardOffset = req->Offset + handle->info.CardOffset;
+ mtd.TransferLength = req->Count;
+ mtd.MediaID = handle->MediaID;
+ mtd.Function = MTD_REQ_WRITE;
+ if (req->Attributes & MEM_OP_BUFFER_KERNEL)
+ mtd.Function |= MTD_REQ_KERNEL;
+ return do_mtd_request(handle, &mtd, buf);
+} /* write_memory */
+
+/*======================================================================
+
+ This isn't needed for anything I could think of.
+
+======================================================================*/
+
+int copy_memory(memory_handle_t handle, copy_op_t *req)
+{
+ if (CHECK_REGION(handle))
+ return CS_BAD_HANDLE;
+ return CS_UNSUPPORTED_FUNCTION;
+}
+
diff --git a/linux/pcmcia-cs/modules/cirrus.h b/linux/pcmcia-cs/modules/cirrus.h
new file mode 100644
index 0000000..e3bb255
--- /dev/null
+++ b/linux/pcmcia-cs/modules/cirrus.h
@@ -0,0 +1,188 @@
+/*
+ * cirrus.h 1.11 2003/09/09 07:05:40
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CIRRUS_H
+#define _LINUX_CIRRUS_H
+
+#ifndef PCI_VENDOR_ID_CIRRUS
+#define PCI_VENDOR_ID_CIRRUS 0x1013
+#endif
+#ifndef PCI_DEVICE_ID_CIRRUS_6729
+#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
+#endif
+#ifndef PCI_DEVICE_ID_CIRRUS_6832
+#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
+#endif
+
+#define PD67_MISC_CTL_1 0x16 /* Misc control 1 */
+#define PD67_FIFO_CTL 0x17 /* FIFO control */
+#define PD67_MISC_CTL_2 0x1E /* Misc control 2 */
+#define PD67_CHIP_INFO 0x1f /* Chip information */
+#define PD67_ATA_CTL 0x026 /* 6730: ATA control */
+#define PD67_EXT_INDEX 0x2e /* Extension index */
+#define PD67_EXT_DATA 0x2f /* Extension data */
+
+#define pd67_ext_get(s, r) \
+ (i365_set(s, PD67_EXT_INDEX, r), i365_get(s, PD67_EXT_DATA))
+#define pd67_ext_set(s, r, v) \
+ (i365_set(s, PD67_EXT_INDEX, r), i365_set(s, PD67_EXT_DATA, v))
+
+/* PD6722 extension registers -- indexed in PD67_EXT_INDEX */
+#define PD67_DATA_MASK0 0x01 /* Data mask 0 */
+#define PD67_DATA_MASK1 0x02 /* Data mask 1 */
+#define PD67_DMA_CTL 0x03 /* DMA control */
+
+/* PD6730 extension registers -- indexed in PD67_EXT_INDEX */
+#define PD67_EXT_CTL_1 0x03 /* Extension control 1 */
+#define PD67_MEM_PAGE(n) ((n)+5) /* PCI window bits 31:24 */
+#define PD67_EXTERN_DATA 0x0a
+#define PD67_EXT_CTL_2 0x0b
+#define PD67_MISC_CTL_3 0x25
+#define PD67_SMB_PWR_CTL 0x26
+
+/* I/O window address offset */
+#define PD67_IO_OFF(w) (0x36+((w)<<1))
+
+/* Timing register sets */
+#define PD67_TIME_SETUP(n) (0x3a + 3*(n))
+#define PD67_TIME_CMD(n) (0x3b + 3*(n))
+#define PD67_TIME_RECOV(n) (0x3c + 3*(n))
+
+/* Flags for PD67_MISC_CTL_1 */
+#define PD67_MC1_5V_DET 0x01 /* 5v detect */
+#define PD67_MC1_MEDIA_ENA 0x01 /* 6730: Multimedia enable */
+#define PD67_MC1_VCC_3V 0x02 /* 3.3v Vcc */
+#define PD67_MC1_PULSE_MGMT 0x04
+#define PD67_MC1_PULSE_IRQ 0x08
+#define PD67_MC1_SPKR_ENA 0x10
+#define PD67_MC1_INPACK_ENA 0x80
+
+/* Flags for PD67_FIFO_CTL */
+#define PD67_FIFO_EMPTY 0x80
+
+/* Flags for PD67_MISC_CTL_2 */
+#define PD67_MC2_FREQ_BYPASS 0x01
+#define PD67_MC2_DYNAMIC_MODE 0x02
+#define PD67_MC2_SUSPEND 0x04
+#define PD67_MC2_5V_CORE 0x08
+#define PD67_MC2_LED_ENA 0x10 /* IRQ 12 is LED enable */
+#define PD67_MC2_FAST_PCI 0x10 /* 6729: PCI bus > 25 MHz */
+#define PD67_MC2_3STATE_BIT7 0x20 /* Floppy change bit */
+#define PD67_MC2_DMA_MODE 0x40
+#define PD67_MC2_IRQ15_RI 0x80 /* IRQ 15 is ring enable */
+
+/* Flags for PD67_CHIP_INFO */
+#define PD67_INFO_SLOTS 0x20 /* 0 = 1 slot, 1 = 2 slots */
+#define PD67_INFO_CHIP_ID 0xc0
+#define PD67_INFO_REV 0x1c
+
+/* Fields in PD67_TIME_* registers */
+#define PD67_TIME_SCALE 0xc0
+#define PD67_TIME_SCALE_1 0x00
+#define PD67_TIME_SCALE_16 0x40
+#define PD67_TIME_SCALE_256 0x80
+#define PD67_TIME_SCALE_4096 0xc0
+#define PD67_TIME_MULT 0x3f
+
+/* Fields in PD67_DMA_CTL */
+#define PD67_DMA_MODE 0xc0
+#define PD67_DMA_OFF 0x00
+#define PD67_DMA_DREQ_INPACK 0x40
+#define PD67_DMA_DREQ_WP 0x80
+#define PD67_DMA_DREQ_BVD2 0xc0
+#define PD67_DMA_PULLUP 0x20 /* Disable socket pullups? */
+
+/* Fields in PD67_EXT_CTL_1 */
+#define PD67_EC1_VCC_PWR_LOCK 0x01
+#define PD67_EC1_AUTO_PWR_CLEAR 0x02
+#define PD67_EC1_LED_ENA 0x04
+#define PD67_EC1_INV_CARD_IRQ 0x08
+#define PD67_EC1_INV_MGMT_IRQ 0x10
+#define PD67_EC1_PULLUP_CTL 0x20
+
+/* Fields in PD67_EXTERN_DATA */
+#define PD67_EXD_VS1(s) (0x01 << ((s)<<1))
+#define PD67_EXD_VS2(s) (0x02 << ((s)<<1))
+
+/* Fields in PD67_EXT_CTL_2 */
+#define PD67_EC2_GPSTB_TOTEM 0x04
+#define PD67_EC2_GPSTB_IOR 0x08
+#define PD67_EC2_GPSTB_IOW 0x10
+#define PD67_EC2_GPSTB_HIGH 0x20
+
+/* Fields in PD67_MISC_CTL_3 */
+#define PD67_MC3_IRQ_MASK 0x03
+#define PD67_MC3_IRQ_PCPCI 0x00
+#define PD67_MC3_IRQ_EXTERN 0x01
+#define PD67_MC3_IRQ_PCIWAY 0x02
+#define PD67_MC3_IRQ_PCI 0x03
+#define PD67_MC3_PWR_MASK 0x0c
+#define PD67_MC3_PWR_SERIAL 0x00
+#define PD67_MC3_PWR_TI2202 0x08
+#define PD67_MC3_PWR_SMB 0x0c
+
+/* Register definitions for Cirrus PD6832 PCI-to-CardBus bridge */
+
+/* PD6832 extension registers -- indexed in PD67_EXT_INDEX */
+#define PD68_PCI_SPACE 0x22
+#define PD68_PCCARD_SPACE 0x23
+#define PD68_WINDOW_TYPE 0x24
+#define PD68_EXT_CSC 0x2e
+#define PD68_MISC_CTL_4 0x2f
+#define PD68_MISC_CTL_5 0x30
+#define PD68_MISC_CTL_6 0x31
+
+/* Extra flags in PD67_MISC_CTL_3 */
+#define PD68_MC3_HW_SUSP 0x10
+#define PD68_MC3_MM_EXPAND 0x40
+#define PD68_MC3_MM_ARM 0x80
+
+/* Bridge Control Register */
+#define PD6832_BCR_MGMT_IRQ_ENA 0x0800
+
+/* Socket Number Register */
+#define PD6832_SOCKET_NUMBER 0x004c /* 8 bit */
+
+/* Data structure for tracking vendor-specific state */
+typedef struct cirrus_state_t {
+ u_char misc1; /* PD67_MISC_CTL_1 */
+ u_char misc2; /* PD67_MISC_CTL_2 */
+ u_char ectl1; /* PD67_EXT_CTL_1 */
+ u_char timer[6]; /* PD67_TIME_* */
+} cirrus_state_t;
+
+#define CIRRUS_PCIC_ID \
+ IS_PD6729, IS_PD6730, IS_PD6832
+
+#define CIRRUS_PCIC_INFO \
+ { "Cirrus PD6729", IS_CIRRUS|IS_PCI, ID(CIRRUS, 6729) }, \
+ { "Cirrus PD6730", IS_CIRRUS|IS_PCI, PCI_VENDOR_ID_CIRRUS, -1 }, \
+ { "Cirrus PD6832", IS_CIRRUS|IS_CARDBUS, ID(CIRRUS, 6832) }
+
+#endif /* _LINUX_CIRRUS_H */
diff --git a/linux/pcmcia-cs/modules/cistpl.c b/linux/pcmcia-cs/modules/cistpl.c
new file mode 100644
index 0000000..404b8e4
--- /dev/null
+++ b/linux/pcmcia-cs/modules/cistpl.c
@@ -0,0 +1,1502 @@
+/*======================================================================
+
+ PCMCIA Card Information Structure parser
+
+ cistpl.c 1.101 2003/12/15 03:58:03
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#define __NO_VERSION__
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/bus_ops.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/bulkmem.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
+
+static const u_char mantissa[] = {
+ 10, 12, 13, 15, 20, 25, 30, 35,
+ 40, 45, 50, 55, 60, 70, 80, 90
+};
+
+static const u_int exponent[] = {
+ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000
+};
+
+/* Convert an extended speed byte to a time in nanoseconds */
+#define SPEED_CVT(v) \
+ (mantissa[(((v)>>3)&15)-1] * exponent[(v)&7] / 10)
+/* Convert a power byte to a current in 0.1 microamps */
+#define POWER_CVT(v) \
+ (mantissa[((v)>>3)&15] * exponent[(v)&7] / 10)
+#define POWER_SCALE(v) (exponent[(v)&7])
+
+/* Upper limit on reasonable # of tuples */
+#define MAX_TUPLES 200
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+INT_MODULE_PARM(cis_width, 0); /* 16-bit CIS? */
+
+/*======================================================================
+
+ Low-level functions to read and write CIS memory. I think the
+ write routine is only useful for writing one-byte registers.
+
+======================================================================*/
+
+/* Bits in attr field */
+#define IS_ATTR 1
+#define IS_INDIRECT 8
+
+static int setup_cis_mem(socket_info_t *s);
+
+static void set_cis_map(socket_info_t *s, pccard_mem_map *mem)
+{
+ s->ss_entry(s->sock, SS_SetMemMap, mem);
+ if (s->cap.features & SS_CAP_STATIC_MAP) {
+ if (s->cis_virt)
+ bus_iounmap(s->cap.bus, s->cis_virt);
+ s->cis_virt = bus_ioremap(s->cap.bus, mem->sys_start,
+ s->cap.map_size);
+ }
+}
+
+int read_cis_mem(socket_info_t *s, int attr, u_int addr,
+ u_int len, void *ptr)
+{
+ pccard_mem_map *mem = &s->cis_mem;
+ u_char *sys, *buf = ptr;
+
+ DEBUG(3, "cs: read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
+ if (setup_cis_mem(s) != 0) {
+ memset(ptr, 0xff, len);
+ return -1;
+ }
+ mem->flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
+
+ if (attr & IS_INDIRECT) {
+ /* Indirect accesses use a bunch of special registers at fixed
+ locations in common memory */
+ u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
+ if (attr & IS_ATTR) { addr *= 2; flags = ICTRL0_AUTOINC; }
+ mem->card_start = 0; mem->flags = MAP_ACTIVE;
+ set_cis_map(s, mem);
+ sys = s->cis_virt;
+ bus_writeb(s->cap.bus, flags, sys+CISREG_ICTRL0);
+ bus_writeb(s->cap.bus, addr & 0xff, sys+CISREG_IADDR0);
+ bus_writeb(s->cap.bus, (addr>>8) & 0xff, sys+CISREG_IADDR1);
+ bus_writeb(s->cap.bus, (addr>>16) & 0xff, sys+CISREG_IADDR2);
+ bus_writeb(s->cap.bus, (addr>>24) & 0xff, sys+CISREG_IADDR3);
+ for ( ; len > 0; len--, buf++)
+ *buf = bus_readb(s->cap.bus, sys+CISREG_IDATA0);
+ } else {
+ u_int inc = 1;
+ if (attr) { mem->flags |= MAP_ATTRIB; inc++; addr *= 2; }
+ sys += (addr & (s->cap.map_size-1));
+ mem->card_start = addr & ~(s->cap.map_size-1);
+ while (len) {
+ set_cis_map(s, mem);
+ sys = s->cis_virt + (addr & (s->cap.map_size-1));
+ for ( ; len > 0; len--, buf++, sys += inc) {
+ if (sys == s->cis_virt+s->cap.map_size) break;
+ *buf = bus_readb(s->cap.bus, sys);
+ }
+ mem->card_start += s->cap.map_size;
+ addr = 0;
+ }
+ }
+ DEBUG(3, "cs: %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
+ *(u_char *)(ptr+0), *(u_char *)(ptr+1),
+ *(u_char *)(ptr+2), *(u_char *)(ptr+3));
+ return 0;
+}
+
+void write_cis_mem(socket_info_t *s, int attr, u_int addr,
+ u_int len, void *ptr)
+{
+ pccard_mem_map *mem = &s->cis_mem;
+ u_char *sys, *buf = ptr;
+
+ DEBUG(3, "cs: write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
+ if (setup_cis_mem(s) != 0) return;
+ mem->flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
+
+ if (attr & IS_INDIRECT) {
+ /* Indirect accesses use a bunch of special registers at fixed
+ locations in common memory */
+ u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
+ if (attr & IS_ATTR) { addr *= 2; flags = ICTRL0_AUTOINC; }
+ mem->card_start = 0; mem->flags = MAP_ACTIVE;
+ set_cis_map(s, mem);
+ sys = s->cis_virt;
+ bus_writeb(s->cap.bus, flags, sys+CISREG_ICTRL0);
+ bus_writeb(s->cap.bus, addr & 0xff, sys+CISREG_IADDR0);
+ bus_writeb(s->cap.bus, (addr>>8) & 0xff, sys+CISREG_IADDR1);
+ bus_writeb(s->cap.bus, (addr>>16) & 0xff, sys+CISREG_IADDR2);
+ bus_writeb(s->cap.bus, (addr>>24) & 0xff, sys+CISREG_IADDR3);
+ for ( ; len > 0; len--, buf++)
+ bus_writeb(s->cap.bus, *buf, sys+CISREG_IDATA0);
+ } else {
+ int inc = 1;
+ if (attr & IS_ATTR) { mem->flags |= MAP_ATTRIB; inc++; addr *= 2; }
+ mem->card_start = addr & ~(s->cap.map_size-1);
+ while (len) {
+ set_cis_map(s, mem);
+ sys = s->cis_virt + (addr & (s->cap.map_size-1));
+ for ( ; len > 0; len--, buf++, sys += inc) {
+ if (sys == s->cis_virt+s->cap.map_size) break;
+ bus_writeb(s->cap.bus, *buf, sys);
+ }
+ mem->card_start += s->cap.map_size;
+ addr = 0;
+ }
+ }
+}
+
+/*======================================================================
+
+ This is tricky... when we set up CIS memory, we try to validate
+ the memory window space allocations.
+
+======================================================================*/
+
+/* Scratch pointer to the socket we use for validation */
+static socket_info_t *vs = NULL;
+
+/* Validation function for cards with a valid CIS */
+static int cis_readable(u_long base)
+{
+ cisinfo_t info1, info2;
+ int ret;
+ vs->cis_mem.sys_start = base;
+ vs->cis_mem.sys_stop = base+vs->cap.map_size-1;
+ vs->cis_virt = bus_ioremap(vs->cap.bus, base, vs->cap.map_size);
+ ret = validate_cis(vs->clients, &info1);
+ /* invalidate mapping and CIS cache */
+ bus_iounmap(vs->cap.bus, vs->cis_virt); vs->cis_used = 0;
+ if ((ret != 0) || (info1.Chains == 0))
+ return 0;
+ vs->cis_mem.sys_start = base+vs->cap.map_size;
+ vs->cis_mem.sys_stop = base+2*vs->cap.map_size-1;
+ vs->cis_virt = bus_ioremap(vs->cap.bus, base+vs->cap.map_size,
+ vs->cap.map_size);
+ ret = validate_cis(vs->clients, &info2);
+ bus_iounmap(vs->cap.bus, vs->cis_virt); vs->cis_used = 0;
+ return ((ret == 0) && (info1.Chains == info2.Chains));
+}
+
+/* Validation function for simple memory cards */
+static int checksum(u_long base)
+{
+ int i, a, b, d;
+ vs->cis_mem.sys_start = base;
+ vs->cis_mem.sys_stop = base+vs->cap.map_size-1;
+ vs->cis_virt = bus_ioremap(vs->cap.bus, base, vs->cap.map_size);
+ vs->cis_mem.card_start = 0;
+ vs->cis_mem.flags = MAP_ACTIVE;
+ vs->ss_entry(vs->sock, SS_SetMemMap, &vs->cis_mem);
+ /* Don't bother checking every word... */
+ a = 0; b = -1;
+ for (i = 0; i < vs->cap.map_size; i += 44) {
+ d = bus_readl(vs->cap.bus, vs->cis_virt+i);
+ a += d; b &= d;
+ }
+ bus_iounmap(vs->cap.bus, vs->cis_virt);
+ return (b == -1) ? -1 : (a>>1);
+}
+
+static int checksum_match(u_long base)
+{
+ int a = checksum(base), b = checksum(base+vs->cap.map_size);
+ return ((a == b) && (a >= 0));
+}
+
+static int setup_cis_mem(socket_info_t *s)
+{
+ if (!(s->cap.features & SS_CAP_STATIC_MAP) &&
+ (s->cis_mem.sys_start == 0)) {
+ int low = !(s->cap.features & SS_CAP_PAGE_REGS);
+ vs = s;
+ validate_mem(cis_readable, checksum_match, low);
+ s->cis_mem.sys_start = 0;
+ vs = NULL;
+ if (find_mem_region(&s->cis_mem.sys_start, s->cap.map_size,
+ s->cap.map_size, low, "card services")) {
+ printk(KERN_NOTICE "cs: unable to map card memory!\n");
+ return -1;
+ }
+ s->cis_mem.sys_stop = s->cis_mem.sys_start+s->cap.map_size-1;
+ s->cis_virt = bus_ioremap(s->cap.bus, s->cis_mem.sys_start,
+ s->cap.map_size);
+ }
+ return 0;
+}
+
+void release_cis_mem(socket_info_t *s)
+{
+ if (s->cis_mem.sys_start != 0) {
+ s->cis_mem.flags &= ~MAP_ACTIVE;
+ s->ss_entry(s->sock, SS_SetMemMap, &s->cis_mem);
+ if (!(s->cap.features & SS_CAP_STATIC_MAP))
+ release_mem_region(s->cis_mem.sys_start, s->cap.map_size);
+ bus_iounmap(s->cap.bus, s->cis_virt);
+ s->cis_mem.sys_start = 0;
+ s->cis_virt = NULL;
+ }
+}
+
+/*======================================================================
+
+ This is a wrapper around read_cis_mem, with the same interface,
+ but which caches information, for cards whose CIS may not be
+ readable all the time.
+
+======================================================================*/
+
+static void read_cis_cache(socket_info_t *s, int attr, u_int addr,
+ u_int len, void *ptr)
+{
+ int i, ret;
+ char *caddr;
+
+ if (s->fake_cis) {
+ if (s->fake_cis_len > addr+len)
+ memcpy(ptr, s->fake_cis+addr, len);
+ else
+ memset(ptr, 0xff, len);
+ return;
+ }
+ caddr = s->cis_cache;
+ for (i = 0; i < s->cis_used; i++) {
+ if ((s->cis_table[i].addr == addr) &&
+ (s->cis_table[i].len == len) &&
+ (s->cis_table[i].attr == attr)) break;
+ caddr += s->cis_table[i].len;
+ }
+ if (i < s->cis_used) {
+ memcpy(ptr, caddr, len);
+ return;
+ }
+#ifdef CONFIG_CARDBUS
+ if (s->state & SOCKET_CARDBUS)
+ ret = read_cb_mem(s, 0, attr, addr, len, ptr);
+ else
+#endif
+ ret = read_cis_mem(s, attr, addr, len, ptr);
+ /* Copy data into the cache, if there is room */
+ if ((ret == 0) && (i < MAX_CIS_TABLE) &&
+ (caddr+len < s->cis_cache+MAX_CIS_DATA)) {
+ s->cis_table[i].addr = addr;
+ s->cis_table[i].len = len;
+ s->cis_table[i].attr = attr;
+ s->cis_used++;
+ memcpy(caddr, ptr, len);
+ }
+}
+
+/*======================================================================
+
+ This verifies if the CIS of a card matches what is in the CIS
+ cache.
+
+======================================================================*/
+
+int verify_cis_cache(socket_info_t *s)
+{
+ char *buf, *caddr;
+ int i;
+
+ buf = kmalloc(256, GFP_KERNEL);
+ if (buf == NULL)
+ return -1;
+ caddr = s->cis_cache;
+ for (i = 0; i < s->cis_used; i++) {
+#ifdef CONFIG_CARDBUS
+ if (s->state & SOCKET_CARDBUS)
+ read_cb_mem(s, 0, s->cis_table[i].attr, s->cis_table[i].addr,
+ s->cis_table[i].len, buf);
+ else
+#endif
+ read_cis_mem(s, s->cis_table[i].attr, s->cis_table[i].addr,
+ s->cis_table[i].len, buf);
+ if (memcmp(buf, caddr, s->cis_table[i].len) != 0)
+ break;
+ caddr += s->cis_table[i].len;
+ }
+ kfree(buf);
+ return (i < s->cis_used);
+}
+
+/*======================================================================
+
+ For really bad cards, we provide a facility for uploading a
+ replacement CIS.
+
+======================================================================*/
+
+int replace_cis(client_handle_t handle, cisdump_t *cis)
+{
+ socket_info_t *s;
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (s->fake_cis != NULL) {
+ kfree(s->fake_cis);
+ s->fake_cis = NULL;
+ }
+ if (cis->Length > CISTPL_MAX_CIS_SIZE)
+ return CS_BAD_SIZE;
+ s->fake_cis = kmalloc(cis->Length, GFP_KERNEL);
+ if (s->fake_cis == NULL)
+ return CS_OUT_OF_RESOURCE;
+ s->fake_cis_len = cis->Length;
+ memcpy(s->fake_cis, cis->Data, cis->Length);
+ return CS_SUCCESS;
+}
+
+/*======================================================================
+
+ The high-level CIS tuple services
+
+======================================================================*/
+
+typedef struct tuple_flags {
+ u_int link_space:4;
+ u_int has_link:1;
+ u_int mfc_fn:3;
+ u_int space:4;
+} tuple_flags;
+
+#define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space)
+#define HAS_LINK(f) (((tuple_flags *)(&(f)))->has_link)
+#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn)
+#define SPACE(f) (((tuple_flags *)(&(f)))->space)
+
+int get_next_tuple(client_handle_t handle, tuple_t *tuple);
+
+int get_first_tuple(client_handle_t handle, tuple_t *tuple)
+{
+ socket_info_t *s;
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ tuple->TupleLink = tuple->Flags = 0;
+#ifdef CONFIG_CARDBUS
+ if (s->state & SOCKET_CARDBUS) {
+ u_int ptr;
+ pcibios_read_config_dword(s->cap.cardbus, 0, 0x28, &ptr);
+ tuple->CISOffset = ptr & ~7;
+ SPACE(tuple->Flags) = (ptr & 7);
+ } else
+#endif
+ {
+ /* Assume presence of a LONGLINK_C to address 0 */
+ tuple->CISOffset = tuple->LinkOffset = 0;
+ SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
+ }
+ if (!(s->state & SOCKET_CARDBUS) && (s->functions > 1) &&
+ !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
+ cisdata_t req = tuple->DesiredTuple;
+ tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
+ if (get_next_tuple(handle, tuple) == CS_SUCCESS) {
+ tuple->DesiredTuple = CISTPL_LINKTARGET;
+ if (get_next_tuple(handle, tuple) != CS_SUCCESS)
+ return CS_NO_MORE_ITEMS;
+ } else
+ tuple->CISOffset = tuple->TupleLink = 0;
+ tuple->DesiredTuple = req;
+ }
+ return get_next_tuple(handle, tuple);
+}
+
+static int follow_link(socket_info_t *s, tuple_t *tuple)
+{
+ u_char link[5];
+ u_int ofs;
+
+ if (MFC_FN(tuple->Flags)) {
+ /* Get indirect link from the MFC tuple */
+ read_cis_cache(s, LINK_SPACE(tuple->Flags),
+ tuple->LinkOffset, 5, link);
+ ofs = le32_to_cpu(*(u_int *)(link+1));
+ SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
+ /* Move to the next indirect link */
+ tuple->LinkOffset += 5;
+ MFC_FN(tuple->Flags)--;
+ } else if (HAS_LINK(tuple->Flags)) {
+ ofs = tuple->LinkOffset;
+ SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags);
+ HAS_LINK(tuple->Flags) = 0;
+ } else {
+ return -1;
+ }
+ if (!(s->state & SOCKET_CARDBUS) && SPACE(tuple->Flags)) {
+ /* This is ugly, but a common CIS error is to code the long
+ link offset incorrectly, so we check the right spot... */
+ read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
+ if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
+ (strncmp(link+2, "CIS", 3) == 0))
+ return ofs;
+ /* Then, we try the wrong spot... */
+ ofs = ofs >> 1;
+ }
+ read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
+ if ((link[0] != CISTPL_LINKTARGET) || (link[1] < 3) ||
+ (strncmp(link+2, "CIS", 3) != 0))
+ return -1;
+ return ofs;
+}
+
+int get_next_tuple(client_handle_t handle, tuple_t *tuple)
+{
+ socket_info_t *s;
+ u_char link[2], tmp;
+ int ofs, i, attr;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+
+ link[1] = tuple->TupleLink;
+ ofs = tuple->CISOffset + tuple->TupleLink;
+ attr = SPACE(tuple->Flags);
+
+ for (i = 0; i < MAX_TUPLES; i++) {
+ if (link[1] == 0xff) {
+ link[0] = CISTPL_END;
+ } else {
+ read_cis_cache(s, attr, ofs, 2, link);
+ if (link[0] == CISTPL_NULL) {
+ ofs++; continue;
+ }
+ }
+
+ /* End of chain? Follow long link if possible */
+ if (link[0] == CISTPL_END) {
+ if ((ofs = follow_link(s, tuple)) < 0)
+ return CS_NO_MORE_ITEMS;
+ attr = SPACE(tuple->Flags);
+ read_cis_cache(s, attr, ofs, 2, link);
+ }
+
+ /* Is this a link tuple? Make a note of it */
+ if ((link[0] == CISTPL_LONGLINK_A) ||
+ (link[0] == CISTPL_LONGLINK_C) ||
+ (link[0] == CISTPL_LONGLINK_MFC) ||
+ (link[0] == CISTPL_LINKTARGET) ||
+ (link[0] == CISTPL_INDIRECT) ||
+ (link[0] == CISTPL_NO_LINK)) {
+ switch (link[0]) {
+ case CISTPL_LONGLINK_A:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = attr | IS_ATTR;
+ read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
+ break;
+ case CISTPL_LONGLINK_C:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
+ read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
+ break;
+ case CISTPL_INDIRECT:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = IS_ATTR | IS_INDIRECT;
+ tuple->LinkOffset = 0;
+ break;
+ case CISTPL_LONGLINK_MFC:
+ tuple->LinkOffset = ofs + 3;
+ LINK_SPACE(tuple->Flags) = attr;
+ if (handle->Function == BIND_FN_ALL) {
+ /* Follow all the MFC links */
+ read_cis_cache(s, attr, ofs+2, 1, &tmp);
+ MFC_FN(tuple->Flags) = tmp;
+ } else {
+ /* Follow exactly one of the links */
+ MFC_FN(tuple->Flags) = 1;
+ tuple->LinkOffset += handle->Function * 5;
+ }
+ break;
+ case CISTPL_NO_LINK:
+ HAS_LINK(tuple->Flags) = 0;
+ break;
+ }
+ if ((tuple->Attributes & TUPLE_RETURN_LINK) &&
+ (tuple->DesiredTuple == RETURN_FIRST_TUPLE))
+ break;
+ } else
+ if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
+ break;
+
+ if (link[0] == tuple->DesiredTuple)
+ break;
+ ofs += link[1] + 2;
+ }
+ if (i == MAX_TUPLES) {
+ DEBUG(1, "cs: overrun in get_next_tuple for socket %d\n",
+ handle->Socket);
+ return CS_NO_MORE_ITEMS;
+ }
+
+ tuple->TupleCode = link[0];
+ tuple->TupleLink = link[1];
+ tuple->CISOffset = ofs + 2;
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+#define _MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+int get_tuple_data(client_handle_t handle, tuple_t *tuple)
+{
+ socket_info_t *s;
+ u_int len;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+
+ s = SOCKET(handle);
+
+ if (tuple->TupleLink < tuple->TupleOffset)
+ return CS_NO_MORE_ITEMS;
+ len = tuple->TupleLink - tuple->TupleOffset;
+ tuple->TupleDataLen = tuple->TupleLink;
+ if (len == 0)
+ return CS_SUCCESS;
+ read_cis_cache(s, SPACE(tuple->Flags),
+ tuple->CISOffset + tuple->TupleOffset,
+ _MIN(len, tuple->TupleDataMax), tuple->TupleData);
+ return CS_SUCCESS;
+}
+
+/*======================================================================
+
+ Parsing routines for individual tuples
+
+======================================================================*/
+
+static int parse_device(tuple_t *tuple, cistpl_device_t *device)
+{
+ int i;
+ u_char scale;
+ u_char *p, *q;
+
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ device->ndev = 0;
+ for (i = 0; i < CISTPL_MAX_DEVICES; i++) {
+
+ if (*p == 0xff) break;
+ device->dev[i].type = (*p >> 4);
+ device->dev[i].wp = (*p & 0x08) ? 1 : 0;
+ switch (*p & 0x07) {
+ case 0: device->dev[i].speed = 0; break;
+ case 1: device->dev[i].speed = 250; break;
+ case 2: device->dev[i].speed = 200; break;
+ case 3: device->dev[i].speed = 150; break;
+ case 4: device->dev[i].speed = 100; break;
+ case 7:
+ if (++p == q) return CS_BAD_TUPLE;
+ device->dev[i].speed = SPEED_CVT(*p);
+ while (*p & 0x80)
+ if (++p == q) return CS_BAD_TUPLE;
+ break;
+ default:
+ return CS_BAD_TUPLE;
+ }
+
+ if (++p == q) return CS_BAD_TUPLE;
+ if (*p == 0xff) break;
+ scale = *p & 7;
+ if (scale == 7) return CS_BAD_TUPLE;
+ device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2));
+ device->ndev++;
+ if (++p == q) break;
+ }
+
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum)
+{
+ u_char *p;
+ if (tuple->TupleDataLen < 5)
+ return CS_BAD_TUPLE;
+ p = (u_char *)tuple->TupleData;
+ csum->addr = tuple->CISOffset+(short)le16_to_cpu(*(u_short *)p)-2;
+ csum->len = le16_to_cpu(*(u_short *)(p + 2));
+ csum->sum = *(p+4);
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link)
+{
+ if (tuple->TupleDataLen < 4)
+ return CS_BAD_TUPLE;
+ link->addr = le32_to_cpu(*(u_int *)tuple->TupleData);
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_longlink_mfc(tuple_t *tuple,
+ cistpl_longlink_mfc_t *link)
+{
+ u_char *p;
+ int i;
+
+ p = (u_char *)tuple->TupleData;
+
+ link->nfn = *p; p++;
+ if (tuple->TupleDataLen <= link->nfn*5)
+ return CS_BAD_TUPLE;
+ for (i = 0; i < link->nfn; i++) {
+ link->fn[i].space = *p; p++;
+ link->fn[i].addr = le32_to_cpu(*(u_int *)p); p += 4;
+ }
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_strings(u_char *p, u_char *q, int max,
+ char *s, u_char *ofs, u_char *found)
+{
+ int i, j, ns;
+
+ if (p == q) return CS_BAD_TUPLE;
+ ns = 0; j = 0;
+ for (i = 0; i < max; i++) {
+ if (*p == 0xff) break;
+ ofs[i] = j;
+ ns++;
+ for (;;) {
+ s[j++] = (*p == 0xff) ? '\0' : *p;
+ if ((*p == '\0') || (*p == 0xff)) break;
+ if (++p == q) return CS_BAD_TUPLE;
+ }
+ if ((*p == 0xff) || (++p == q)) break;
+ }
+ if (found) {
+ *found = ns;
+ return CS_SUCCESS;
+ } else {
+ return (ns == max) ? CS_SUCCESS : CS_BAD_TUPLE;
+ }
+}
+
+/*====================================================================*/
+
+static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1)
+{
+ u_char *p, *q;
+
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ vers_1->major = *p; p++;
+ vers_1->minor = *p; p++;
+ if (p >= q) return CS_BAD_TUPLE;
+
+ return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS,
+ vers_1->str, vers_1->ofs, &vers_1->ns);
+}
+
+/*====================================================================*/
+
+static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr)
+{
+ u_char *p, *q;
+
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
+ altstr->str, altstr->ofs, &altstr->ns);
+}
+
+/*====================================================================*/
+
+static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec)
+{
+ u_char *p, *q;
+ int nid;
+
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) {
+ if (p > q-2) break;
+ jedec->id[nid].mfr = p[0];
+ jedec->id[nid].info = p[1];
+ p += 2;
+ }
+ jedec->nid = nid;
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m)
+{
+ u_short *p;
+ if (tuple->TupleDataLen < 4)
+ return CS_BAD_TUPLE;
+ p = (u_short *)tuple->TupleData;
+ m->manf = le16_to_cpu(p[0]);
+ m->card = le16_to_cpu(p[1]);
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_funcid(tuple_t *tuple, cistpl_funcid_t *f)
+{
+ u_char *p;
+ if (tuple->TupleDataLen < 2)
+ return CS_BAD_TUPLE;
+ p = (u_char *)tuple->TupleData;
+ f->func = p[0];
+ f->sysinit = p[1];
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_funce(tuple_t *tuple, cistpl_funce_t *f)
+{
+ u_char *p;
+ int i;
+ if (tuple->TupleDataLen < 1)
+ return CS_BAD_TUPLE;
+ p = (u_char *)tuple->TupleData;
+ f->type = p[0];
+ for (i = 1; i < tuple->TupleDataLen; i++)
+ f->data[i-1] = p[i];
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_config(tuple_t *tuple, cistpl_config_t *config)
+{
+ int rasz, rmsz, i;
+ u_char *p;
+
+ p = (u_char *)tuple->TupleData;
+ rasz = *p & 0x03;
+ rmsz = (*p & 0x3c) >> 2;
+ if (tuple->TupleDataLen < rasz+rmsz+4)
+ return CS_BAD_TUPLE;
+ config->last_idx = *(++p);
+ p++;
+ config->base = 0;
+ for (i = 0; i <= rasz; i++)
+ config->base += p[i] << (8*i);
+ p += rasz+1;
+ for (i = 0; i < 4; i++)
+ config->rmask[i] = 0;
+ for (i = 0; i <= rmsz; i++)
+ config->rmask[i>>2] += p[i] << (8*(i%4));
+ config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4);
+ return CS_SUCCESS;
+}
+
+/*======================================================================
+
+ The following routines are all used to parse the nightmarish
+ config table entries.
+
+======================================================================*/
+
+static u_char *parse_power(u_char *p, u_char *q,
+ cistpl_power_t *pwr)
+{
+ int i;
+ u_int scale;
+
+ if (p == q) return NULL;
+ pwr->present = *p;
+ pwr->flags = 0;
+ p++;
+ for (i = 0; i < 7; i++)
+ if (pwr->present & (1<<i)) {
+ if (p == q) return NULL;
+ pwr->param[i] = POWER_CVT(*p);
+ scale = POWER_SCALE(*p);
+ while (*p & 0x80) {
+ if (++p == q) return NULL;
+ if ((*p & 0x7f) < 100)
+ pwr->param[i] += (*p & 0x7f) * scale / 100;
+ else if (*p == 0x7d)
+ pwr->flags |= CISTPL_POWER_HIGHZ_OK;
+ else if (*p == 0x7e)
+ pwr->param[i] = 0;
+ else if (*p == 0x7f)
+ pwr->flags |= CISTPL_POWER_HIGHZ_REQ;
+ else
+ return NULL;
+ }
+ p++;
+ }
+ return p;
+}
+
+/*====================================================================*/
+
+static u_char *parse_timing(u_char *p, u_char *q,
+ cistpl_timing_t *timing)
+{
+ u_char scale;
+
+ if (p == q) return NULL;
+ scale = *p;
+ if ((scale & 3) != 3) {
+ if (++p == q) return NULL;
+ timing->wait = SPEED_CVT(*p);
+ timing->waitscale = exponent[scale & 3];
+ } else
+ timing->wait = 0;
+ scale >>= 2;
+ if ((scale & 7) != 7) {
+ if (++p == q) return NULL;
+ timing->ready = SPEED_CVT(*p);
+ timing->rdyscale = exponent[scale & 7];
+ } else
+ timing->ready = 0;
+ scale >>= 3;
+ if (scale != 7) {
+ if (++p == q) return NULL;
+ timing->reserved = SPEED_CVT(*p);
+ timing->rsvscale = exponent[scale];
+ } else
+ timing->reserved = 0;
+ p++;
+ return p;
+}
+
+/*====================================================================*/
+
+static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
+{
+ int i, j, bsz, lsz;
+
+ if (p == q) return NULL;
+ io->flags = *p;
+
+ if (!(*p & 0x80)) {
+ io->nwin = 1;
+ io->win[0].base = 0;
+ io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK));
+ return p+1;
+ }
+
+ if (++p == q) return NULL;
+ io->nwin = (*p & 0x0f) + 1;
+ bsz = (*p & 0x30) >> 4;
+ if (bsz == 3) bsz++;
+ lsz = (*p & 0xc0) >> 6;
+ if (lsz == 3) lsz++;
+ p++;
+
+ for (i = 0; i < io->nwin; i++) {
+ io->win[i].base = 0;
+ io->win[i].len = 1;
+ for (j = 0; j < bsz; j++, p++) {
+ if (p == q) return NULL;
+ io->win[i].base += *p << (j*8);
+ }
+ for (j = 0; j < lsz; j++, p++) {
+ if (p == q) return NULL;
+ io->win[i].len += *p << (j*8);
+ }
+ }
+ return p;
+}
+
+/*====================================================================*/
+
+static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
+{
+ int i, j, asz, lsz, has_ha;
+ u_int len, ca, ha;
+
+ if (p == q) return NULL;
+
+ mem->nwin = (*p & 0x07) + 1;
+ lsz = (*p & 0x18) >> 3;
+ asz = (*p & 0x60) >> 5;
+ has_ha = (*p & 0x80);
+ if (++p == q) return NULL;
+
+ for (i = 0; i < mem->nwin; i++) {
+ len = ca = ha = 0;
+ for (j = 0; j < lsz; j++, p++) {
+ if (p == q) return NULL;
+ len += *p << (j*8);
+ }
+ for (j = 0; j < asz; j++, p++) {
+ if (p == q) return NULL;
+ ca += *p << (j*8);
+ }
+ if (has_ha)
+ for (j = 0; j < asz; j++, p++) {
+ if (p == q) return NULL;
+ ha += *p << (j*8);
+ }
+ mem->win[i].len = len << 8;
+ mem->win[i].card_addr = ca << 8;
+ mem->win[i].host_addr = ha << 8;
+ }
+ return p;
+}
+
+/*====================================================================*/
+
+static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
+{
+ if (p == q) return NULL;
+ irq->IRQInfo1 = *p; p++;
+ if (irq->IRQInfo1 & IRQ_INFO2_VALID) {
+ if (p+2 > q) return NULL;
+ irq->IRQInfo2 = (p[1]<<8) + p[0];
+ p += 2;
+ }
+ return p;
+}
+
+/*====================================================================*/
+
+static int parse_cftable_entry(tuple_t *tuple,
+ cistpl_cftable_entry_t *entry)
+{
+ u_char *p, *q, features;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+ entry->index = *p & 0x3f;
+ entry->flags = 0;
+ if (*p & 0x40)
+ entry->flags |= CISTPL_CFTABLE_DEFAULT;
+ if (*p & 0x80) {
+ if (++p == q) return CS_BAD_TUPLE;
+ if (*p & 0x10)
+ entry->flags |= CISTPL_CFTABLE_BVDS;
+ if (*p & 0x20)
+ entry->flags |= CISTPL_CFTABLE_WP;
+ if (*p & 0x40)
+ entry->flags |= CISTPL_CFTABLE_RDYBSY;
+ if (*p & 0x80)
+ entry->flags |= CISTPL_CFTABLE_MWAIT;
+ entry->interface = *p & 0x0f;
+ } else
+ entry->interface = 0;
+
+ /* Process optional features */
+ if (++p == q) return CS_BAD_TUPLE;
+ features = *p; p++;
+
+ /* Power options */
+ if ((features & 3) > 0) {
+ p = parse_power(p, q, &entry->vcc);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vcc.present = 0;
+ if ((features & 3) > 1) {
+ p = parse_power(p, q, &entry->vpp1);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vpp1.present = 0;
+ if ((features & 3) > 2) {
+ p = parse_power(p, q, &entry->vpp2);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vpp2.present = 0;
+
+ /* Timing options */
+ if (features & 0x04) {
+ p = parse_timing(p, q, &entry->timing);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else {
+ entry->timing.wait = 0;
+ entry->timing.ready = 0;
+ entry->timing.reserved = 0;
+ }
+
+ /* I/O window options */
+ if (features & 0x08) {
+ p = parse_io(p, q, &entry->io);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->io.nwin = 0;
+
+ /* Interrupt options */
+ if (features & 0x10) {
+ p = parse_irq(p, q, &entry->irq);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->irq.IRQInfo1 = 0;
+
+ switch (features & 0x60) {
+ case 0x00:
+ entry->mem.nwin = 0;
+ break;
+ case 0x20:
+ entry->mem.nwin = 1;
+ entry->mem.win[0].len = le16_to_cpu(*(u_short *)p) << 8;
+ entry->mem.win[0].card_addr = 0;
+ entry->mem.win[0].host_addr = 0;
+ p += 2;
+ if (p > q) return CS_BAD_TUPLE;
+ break;
+ case 0x40:
+ entry->mem.nwin = 1;
+ entry->mem.win[0].len = le16_to_cpu(*(u_short *)p) << 8;
+ entry->mem.win[0].card_addr =
+ le16_to_cpu(*(u_short *)(p+2)) << 8;
+ entry->mem.win[0].host_addr = 0;
+ p += 4;
+ if (p > q) return CS_BAD_TUPLE;
+ break;
+ case 0x60:
+ p = parse_mem(p, q, &entry->mem);
+ if (p == NULL) return CS_BAD_TUPLE;
+ break;
+ }
+
+ /* Misc features */
+ if (features & 0x80) {
+ if (p == q) return CS_BAD_TUPLE;
+ entry->flags |= (*p << 8);
+ while (*p & 0x80)
+ if (++p == q) return CS_BAD_TUPLE;
+ p++;
+ }
+
+ entry->subtuples = q-p;
+
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+#ifdef CONFIG_CARDBUS
+
+static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar)
+{
+ u_char *p;
+ if (tuple->TupleDataLen < 6)
+ return CS_BAD_TUPLE;
+ p = (u_char *)tuple->TupleData;
+ bar->attr = *p;
+ p += 2;
+ bar->size = le32_to_cpu(*(u_int *)p);
+ return CS_SUCCESS;
+}
+
+static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config)
+{
+ u_char *p;
+
+ p = (u_char *)tuple->TupleData;
+ if ((*p != 3) || (tuple->TupleDataLen < 6))
+ return CS_BAD_TUPLE;
+ config->last_idx = *(++p);
+ p++;
+ config->base = le32_to_cpu(*(u_int *)p);
+ config->subtuples = tuple->TupleDataLen - 6;
+ return CS_SUCCESS;
+}
+
+static int parse_cftable_entry_cb(tuple_t *tuple,
+ cistpl_cftable_entry_cb_t *entry)
+{
+ u_char *p, *q, features;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+ entry->index = *p & 0x3f;
+ entry->flags = 0;
+ if (*p & 0x40)
+ entry->flags |= CISTPL_CFTABLE_DEFAULT;
+
+ /* Process optional features */
+ if (++p == q) return CS_BAD_TUPLE;
+ features = *p; p++;
+
+ /* Power options */
+ if ((features & 3) > 0) {
+ p = parse_power(p, q, &entry->vcc);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vcc.present = 0;
+ if ((features & 3) > 1) {
+ p = parse_power(p, q, &entry->vpp1);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vpp1.present = 0;
+ if ((features & 3) > 2) {
+ p = parse_power(p, q, &entry->vpp2);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vpp2.present = 0;
+
+ /* I/O window options */
+ if (features & 0x08) {
+ if (p == q) return CS_BAD_TUPLE;
+ entry->io = *p; p++;
+ } else
+ entry->io = 0;
+
+ /* Interrupt options */
+ if (features & 0x10) {
+ p = parse_irq(p, q, &entry->irq);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->irq.IRQInfo1 = 0;
+
+ if (features & 0x20) {
+ if (p == q) return CS_BAD_TUPLE;
+ entry->mem = *p; p++;
+ } else
+ entry->mem = 0;
+
+ /* Misc features */
+ if (features & 0x80) {
+ if (p == q) return CS_BAD_TUPLE;
+ entry->flags |= (*p << 8);
+ if (*p & 0x80) {
+ if (++p == q) return CS_BAD_TUPLE;
+ entry->flags |= (*p << 16);
+ }
+ while (*p & 0x80)
+ if (++p == q) return CS_BAD_TUPLE;
+ p++;
+ }
+
+ entry->subtuples = q-p;
+
+ return CS_SUCCESS;
+}
+
+#endif
+
+/*====================================================================*/
+
+static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo)
+{
+ u_char *p, *q;
+ int n;
+
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ for (n = 0; n < CISTPL_MAX_DEVICES; n++) {
+ if (p > q-6) break;
+ geo->geo[n].buswidth = p[0];
+ geo->geo[n].erase_block = 1 << (p[1]-1);
+ geo->geo[n].read_block = 1 << (p[2]-1);
+ geo->geo[n].write_block = 1 << (p[3]-1);
+ geo->geo[n].partition = 1 << (p[4]-1);
+ geo->geo[n].interleave = 1 << (p[5]-1);
+ p += 6;
+ }
+ geo->ngeo = n;
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2)
+{
+ u_char *p, *q;
+
+ if (tuple->TupleDataLen < 10)
+ return CS_BAD_TUPLE;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ v2->vers = p[0];
+ v2->comply = p[1];
+ v2->dindex = le16_to_cpu(*(u_short *)(p+2));
+ v2->vspec8 = p[6];
+ v2->vspec9 = p[7];
+ v2->nhdr = p[8];
+ p += 9;
+ return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL);
+}
+
+/*====================================================================*/
+
+static int parse_org(tuple_t *tuple, cistpl_org_t *org)
+{
+ u_char *p, *q;
+ int i;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+ if (p == q) return CS_BAD_TUPLE;
+ org->data_org = *p;
+ if (++p == q) return CS_BAD_TUPLE;
+ for (i = 0; i < 30; i++) {
+ org->desc[i] = *p;
+ if (*p == '\0') break;
+ if (++p == q) return CS_BAD_TUPLE;
+ }
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_format(tuple_t *tuple, cistpl_format_t *fmt)
+{
+ u_char *p;
+
+ if (tuple->TupleDataLen < 10)
+ return CS_BAD_TUPLE;
+
+ p = tuple->TupleData;
+
+ fmt->type = p[0];
+ fmt->edc = p[1];
+ fmt->offset = le32_to_cpu(*(u_int *)(p+2));
+ fmt->length = le32_to_cpu(*(u_int *)(p+6));
+
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+int parse_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+{
+ int ret = CS_SUCCESS;
+
+ if (tuple->TupleDataLen > tuple->TupleDataMax)
+ return CS_BAD_TUPLE;
+ switch (tuple->TupleCode) {
+ case CISTPL_DEVICE:
+ case CISTPL_DEVICE_A:
+ ret = parse_device(tuple, &parse->device);
+ break;
+#ifdef CONFIG_CARDBUS
+ case CISTPL_BAR:
+ ret = parse_bar(tuple, &parse->bar);
+ break;
+ case CISTPL_CONFIG_CB:
+ ret = parse_config_cb(tuple, &parse->config);
+ break;
+ case CISTPL_CFTABLE_ENTRY_CB:
+ ret = parse_cftable_entry_cb(tuple, &parse->cftable_entry_cb);
+ break;
+#endif
+ case CISTPL_CHECKSUM:
+ ret = parse_checksum(tuple, &parse->checksum);
+ break;
+ case CISTPL_LONGLINK_A:
+ case CISTPL_LONGLINK_C:
+ ret = parse_longlink(tuple, &parse->longlink);
+ break;
+ case CISTPL_LONGLINK_MFC:
+ ret = parse_longlink_mfc(tuple, &parse->longlink_mfc);
+ break;
+ case CISTPL_VERS_1:
+ ret = parse_vers_1(tuple, &parse->version_1);
+ break;
+ case CISTPL_ALTSTR:
+ ret = parse_altstr(tuple, &parse->altstr);
+ break;
+ case CISTPL_JEDEC_A:
+ case CISTPL_JEDEC_C:
+ ret = parse_jedec(tuple, &parse->jedec);
+ break;
+ case CISTPL_MANFID:
+ ret = parse_manfid(tuple, &parse->manfid);
+ break;
+ case CISTPL_FUNCID:
+ ret = parse_funcid(tuple, &parse->funcid);
+ break;
+ case CISTPL_FUNCE:
+ ret = parse_funce(tuple, &parse->funce);
+ break;
+ case CISTPL_CONFIG:
+ ret = parse_config(tuple, &parse->config);
+ break;
+ case CISTPL_CFTABLE_ENTRY:
+ ret = parse_cftable_entry(tuple, &parse->cftable_entry);
+ break;
+ case CISTPL_DEVICE_GEO:
+ case CISTPL_DEVICE_GEO_A:
+ ret = parse_device_geo(tuple, &parse->device_geo);
+ break;
+ case CISTPL_VERS_2:
+ ret = parse_vers_2(tuple, &parse->vers_2);
+ break;
+ case CISTPL_ORG:
+ ret = parse_org(tuple, &parse->org);
+ break;
+ case CISTPL_FORMAT:
+ case CISTPL_FORMAT_A:
+ ret = parse_format(tuple, &parse->format);
+ break;
+ case CISTPL_NO_LINK:
+ case CISTPL_LINKTARGET:
+ ret = CS_SUCCESS;
+ break;
+ default:
+ ret = CS_UNSUPPORTED_FUNCTION;
+ break;
+ }
+ return ret;
+}
+
+/*======================================================================
+
+ This is used internally by Card Services to look up CIS stuff.
+
+======================================================================*/
+
+int read_tuple(client_handle_t handle, cisdata_t code, void *parse)
+{
+ tuple_t tuple;
+ cisdata_t *buf;
+ int ret;
+
+ buf = kmalloc(255, GFP_KERNEL);
+ if (buf == NULL)
+ return CS_OUT_OF_RESOURCE;
+ tuple.DesiredTuple = code;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ ret = CardServices(GetFirstTuple, handle, &tuple, NULL);
+ if (ret != CS_SUCCESS) goto done;
+ tuple.TupleData = buf;
+ tuple.TupleOffset = 0;
+ tuple.TupleDataMax = 255;
+ ret = CardServices(GetTupleData, handle, &tuple, NULL);
+ if (ret != CS_SUCCESS) goto done;
+ ret = CardServices(ParseTuple, handle, &tuple, parse);
+done:
+ kfree(buf);
+ return ret;
+}
+
+/*======================================================================
+
+ This tries to determine if a card has a sensible CIS. It returns
+ the number of tuples in the CIS, or 0 if the CIS looks bad. The
+ checks include making sure several critical tuples are present and
+ valid; seeing if the total number of tuples is reasonable; and
+ looking for tuples that use reserved codes.
+
+======================================================================*/
+
+int validate_cis(client_handle_t handle, cisinfo_t *info)
+{
+ tuple_t tuple;
+ cisparse_t *p;
+ int ret, reserved, dev_ok = 0, ident_ok = 0;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL)
+ return CS_OUT_OF_RESOURCE;
+
+ info->Chains = reserved = 0;
+ tuple.DesiredTuple = RETURN_FIRST_TUPLE;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ ret = get_first_tuple(handle, &tuple);
+ if (ret != CS_SUCCESS)
+ goto done;
+
+ /* First tuple should be DEVICE; we should really have either that
+ or a CFTABLE_ENTRY of some sort */
+ if ((tuple.TupleCode == CISTPL_DEVICE) ||
+ (read_tuple(handle, CISTPL_CFTABLE_ENTRY, p) == CS_SUCCESS) ||
+ (read_tuple(handle, CISTPL_CFTABLE_ENTRY_CB, p) == CS_SUCCESS))
+ dev_ok++;
+
+ /* All cards should have a MANFID tuple, and/or a VERS_1 or VERS_2
+ tuple, for card identification. Certain old D-Link and Linksys
+ cards have only a broken VERS_2 tuple; hence the bogus test. */
+ if ((read_tuple(handle, CISTPL_MANFID, p) == CS_SUCCESS) ||
+ (read_tuple(handle, CISTPL_VERS_1, p) == CS_SUCCESS) ||
+ (read_tuple(handle, CISTPL_VERS_2, p) != CS_NO_MORE_ITEMS))
+ ident_ok++;
+
+ if (!dev_ok && !ident_ok)
+ goto done;
+
+ for (info->Chains = 1; info->Chains < MAX_TUPLES; info->Chains++) {
+ ret = get_next_tuple(handle, &tuple);
+ if (ret != CS_SUCCESS) break;
+ if (((tuple.TupleCode > 0x23) && (tuple.TupleCode < 0x40)) ||
+ ((tuple.TupleCode > 0x47) && (tuple.TupleCode < 0x80)) ||
+ ((tuple.TupleCode > 0x90) && (tuple.TupleCode < 0xff)))
+ reserved++;
+ }
+ if ((info->Chains == MAX_TUPLES) || (reserved > 5) ||
+ ((!dev_ok || !ident_ok) && (info->Chains > 10)))
+ info->Chains = 0;
+
+done:
+ kfree(p);
+ return CS_SUCCESS;
+}
+
diff --git a/linux/pcmcia-cs/modules/cs.c b/linux/pcmcia-cs/modules/cs.c
new file mode 100644
index 0000000..949b190
--- /dev/null
+++ b/linux/pcmcia-cs/modules/cs.c
@@ -0,0 +1,2399 @@
+/*======================================================================
+
+ PCMCIA Card Services -- core services
+
+ cs.c 1.287 2004/04/09 03:54:25
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+
+#define IN_CARD_SERVICES
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/bulkmem.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/bus_ops.h>
+#include "cs_internal.h"
+
+#ifdef CONFIG_PCI
+#define PCI_OPT " [pci]"
+#else
+#define PCI_OPT ""
+#endif
+#ifdef CONFIG_CARDBUS
+#define CB_OPT " [cardbus]"
+#else
+#define CB_OPT ""
+#endif
+#ifdef CONFIG_PM
+#define PM_OPT " [apm]"
+#else
+#define PM_OPT ""
+#endif
+#ifdef CONFIG_PNP_BIOS
+#define PNP_OPT " [pnp]"
+#else
+#define PNP_OPT ""
+#endif
+#if !defined(CONFIG_CARDBUS) && !defined(CONFIG_PCI) && \
+ !defined(CONFIG_PM) && !defined(CONFIG_PNP_BIOS)
+#define OPTIONS " none"
+#else
+#define OPTIONS PCI_OPT CB_OPT PM_OPT PNP_OPT
+#endif
+
+static const char *release = "Linux PCMCIA Card Services " CS_RELEASE;
+#ifdef UTS_RELEASE
+static const char *kernel = "kernel build: " UTS_RELEASE " " UTS_VERSION;
+#endif
+static const char *options = "options: " OPTIONS;
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Linux PCMCIA Card Services " CS_RELEASE
+ "\n options:" OPTIONS);
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+INT_MODULE_PARM(setup_delay, HZ/20); /* ticks */
+INT_MODULE_PARM(resume_delay, HZ/5); /* ticks */
+INT_MODULE_PARM(shutdown_delay, HZ/40); /* ticks */
+INT_MODULE_PARM(vcc_settle, HZ*4/10); /* ticks */
+INT_MODULE_PARM(reset_time, 10); /* usecs */
+INT_MODULE_PARM(unreset_delay, HZ/10); /* ticks */
+INT_MODULE_PARM(unreset_check, HZ/10); /* ticks */
+INT_MODULE_PARM(unreset_limit, 50); /* unreset_check's */
+
+/* Access speed for attribute memory windows */
+INT_MODULE_PARM(cis_speed, 300); /* ns */
+
+/* Access speed for IO windows */
+INT_MODULE_PARM(io_speed, 0); /* ns */
+
+/* Optional features */
+#ifdef CONFIG_PM
+INT_MODULE_PARM(do_apm, 1);
+#endif
+#ifdef CONFIG_PNP_BIOS
+INT_MODULE_PARM(do_pnp, 1);
+#endif
+
+#ifdef PCMCIA_DEBUG
+int pc_debug=PCMCIA_DEBUG;
+MODULE_PARM(pc_debug, "i");
+static const char *version =
+"cs.c 1.287 2004/04/09 03:54:25 (David Hinds)";
+#endif
+
+/*====================================================================*/
+
+static socket_state_t dead_socket = {
+ 0, SS_DETECT, 0, 0, 0
+};
+
+/* Table of sockets */
+socket_t sockets = 0;
+socket_info_t *socket_table[MAX_SOCK];
+
+#ifdef HAS_PROC_BUS
+struct proc_dir_entry *proc_pccard = NULL;
+#endif
+
+/*====================================================================*/
+
+/* String tables for error messages */
+
+typedef struct lookup_t {
+ int key;
+ char *msg;
+} lookup_t;
+
+static const lookup_t error_table[] = {
+ { CS_SUCCESS, "Operation succeeded" },
+ { CS_BAD_ADAPTER, "Bad adapter" },
+ { CS_BAD_ATTRIBUTE, "Bad attribute", },
+ { CS_BAD_BASE, "Bad base address" },
+ { CS_BAD_EDC, "Bad EDC" },
+ { CS_BAD_IRQ, "Bad IRQ" },
+ { CS_BAD_OFFSET, "Bad offset" },
+ { CS_BAD_PAGE, "Bad page number" },
+ { CS_READ_FAILURE, "Read failure" },
+ { CS_BAD_SIZE, "Bad size" },
+ { CS_BAD_SOCKET, "Bad socket" },
+ { CS_BAD_TYPE, "Bad type" },
+ { CS_BAD_VCC, "Bad Vcc" },
+ { CS_BAD_VPP, "Bad Vpp" },
+ { CS_BAD_WINDOW, "Bad window" },
+ { CS_WRITE_FAILURE, "Write failure" },
+ { CS_NO_CARD, "No card present" },
+ { CS_UNSUPPORTED_FUNCTION, "Usupported function" },
+ { CS_UNSUPPORTED_MODE, "Unsupported mode" },
+ { CS_BAD_SPEED, "Bad speed" },
+ { CS_BUSY, "Resource busy" },
+ { CS_GENERAL_FAILURE, "General failure" },
+ { CS_WRITE_PROTECTED, "Write protected" },
+ { CS_BAD_ARG_LENGTH, "Bad argument length" },
+ { CS_BAD_ARGS, "Bad arguments" },
+ { CS_CONFIGURATION_LOCKED, "Configuration locked" },
+ { CS_IN_USE, "Resource in use" },
+ { CS_NO_MORE_ITEMS, "No more items" },
+ { CS_OUT_OF_RESOURCE, "Out of resource" },
+ { CS_BAD_HANDLE, "Bad handle" },
+ { CS_BAD_TUPLE, "Bad CIS tuple" }
+};
+#define ERROR_COUNT (sizeof(error_table)/sizeof(lookup_t))
+
+static const lookup_t service_table[] = {
+ { AccessConfigurationRegister, "AccessConfigurationRegister" },
+ { AddSocketServices, "AddSocketServices" },
+ { AdjustResourceInfo, "AdjustResourceInfo" },
+ { CheckEraseQueue, "CheckEraseQueue" },
+ { CloseMemory, "CloseMemory" },
+ { DeregisterClient, "DeregisterClient" },
+ { DeregisterEraseQueue, "DeregisterEraseQueue" },
+ { GetCardServicesInfo, "GetCardServicesInfo" },
+ { GetClientInfo, "GetClientInfo" },
+ { GetConfigurationInfo, "GetConfigurationInfo" },
+ { GetEventMask, "GetEventMask" },
+ { GetFirstClient, "GetFirstClient" },
+ { GetFirstRegion, "GetFirstRegion" },
+ { GetFirstTuple, "GetFirstTuple" },
+ { GetNextClient, "GetNextClient" },
+ { GetNextRegion, "GetNextRegion" },
+ { GetNextTuple, "GetNextTuple" },
+ { GetStatus, "GetStatus" },
+ { GetTupleData, "GetTupleData" },
+ { MapMemPage, "MapMemPage" },
+ { ModifyConfiguration, "ModifyConfiguration" },
+ { ModifyWindow, "ModifyWindow" },
+ { OpenMemory, "OpenMemory" },
+ { ParseTuple, "ParseTuple" },
+ { ReadMemory, "ReadMemory" },
+ { RegisterClient, "RegisterClient" },
+ { RegisterEraseQueue, "RegisterEraseQueue" },
+ { RegisterMTD, "RegisterMTD" },
+ { ReleaseConfiguration, "ReleaseConfiguration" },
+ { ReleaseIO, "ReleaseIO" },
+ { ReleaseIRQ, "ReleaseIRQ" },
+ { ReleaseWindow, "ReleaseWindow" },
+ { RequestConfiguration, "RequestConfiguration" },
+ { RequestIO, "RequestIO" },
+ { RequestIRQ, "RequestIRQ" },
+ { RequestSocketMask, "RequestSocketMask" },
+ { RequestWindow, "RequestWindow" },
+ { ResetCard, "ResetCard" },
+ { SetEventMask, "SetEventMask" },
+ { ValidateCIS, "ValidateCIS" },
+ { WriteMemory, "WriteMemory" },
+ { BindDevice, "BindDevice" },
+ { BindMTD, "BindMTD" },
+ { ReportError, "ReportError" },
+ { SuspendCard, "SuspendCard" },
+ { ResumeCard, "ResumeCard" },
+ { EjectCard, "EjectCard" },
+ { InsertCard, "InsertCard" },
+ { ReplaceCIS, "ReplaceCIS" }
+};
+#define SERVICE_COUNT (sizeof(service_table)/sizeof(lookup_t))
+
+/*======================================================================
+
+ Reset a socket to the default state
+
+======================================================================*/
+
+static void init_socket(socket_info_t *s)
+{
+ int i;
+ pccard_io_map io = { 0, 0, 0, 0, 1 };
+ pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
+
+ mem.sys_stop = s->cap.map_size;
+ s->socket = dead_socket;
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ for (i = 0; i < 2; i++) {
+ io.map = i;
+ s->ss_entry(s->sock, SS_SetIOMap, &io);
+ }
+ for (i = 0; i < 5; i++) {
+ mem.map = i;
+ s->ss_entry(s->sock, SS_SetMemMap, &mem);
+ }
+}
+
+/*====================================================================*/
+
+#if defined(HAS_PROC_BUS) && defined(PCMCIA_DEBUG)
+static int proc_read_clients(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ socket_info_t *s = data;
+ client_handle_t c;
+ char *p = buf;
+
+ for (c = s->clients; c; c = c->next)
+ p += sprintf(p, "fn %x: '%s' [attr 0x%04x] [state 0x%04x]\n",
+ c->Function, c->dev_info, c->Attributes, c->state);
+ return (p - buf);
+}
+#endif
+
+/*======================================================================
+
+ Low-level PC Card interface drivers need to register with Card
+ Services using these calls.
+
+======================================================================*/
+
+static void setup_socket(u_long i);
+static void shutdown_socket(u_long i);
+static void reset_socket(u_long i);
+static void unreset_socket(u_long i);
+static void parse_events(void *info, u_int events);
+
+int register_ss_entry(int nsock, ss_entry_t ss_entry)
+{
+ int i, ns;
+ socket_info_t *s;
+
+ DEBUG(0, "cs: register_ss_entry(%d, 0x%p)\n", nsock, ss_entry);
+
+ for (ns = 0; ns < nsock; ns++) {
+ s = kmalloc(sizeof(struct socket_info_t), GFP_KERNEL);
+ if (!s) {
+ printk(KERN_NOTICE "cs: memory allocation failure!\n");
+ return (!ns);
+ }
+ memset(s, 0, sizeof(socket_info_t));
+
+ s->ss_entry = ss_entry;
+ s->sock = ns;
+ s->setup.data = sockets;
+ s->setup.function = &setup_socket;
+ s->shutdown.data = sockets;
+ s->shutdown.function = &shutdown_socket;
+ /* base address = 0, map = 0 */
+ s->cis_mem.flags = 0;
+ s->cis_mem.speed = cis_speed;
+ s->erase_busy.next = s->erase_busy.prev = &s->erase_busy;
+ spin_lock_init(&s->lock);
+
+ for (i = 0; i < sockets; i++)
+ if (socket_table[i] == NULL) break;
+ socket_table[i] = s;
+ if (i == sockets) sockets++;
+
+ init_socket(s);
+ ss_entry(ns, SS_InquireSocket, &s->cap);
+#ifdef HAS_PROC_BUS
+ if (proc_pccard) {
+ char name[3];
+ sprintf(name, "%02d", i);
+ s->proc = proc_mkdir(name, proc_pccard);
+ if (s->proc)
+ ss_entry(ns, SS_ProcSetup, s->proc);
+#ifdef PCMCIA_DEBUG
+ if (s->proc)
+ create_proc_read_entry("clients", 0, s->proc,
+ proc_read_clients, s);
+#endif
+ }
+#endif
+ }
+
+ return 0;
+} /* register_ss_entry */
+
+/*====================================================================*/
+
+void unregister_ss_entry(ss_entry_t ss_entry)
+{
+ int i, j;
+ socket_info_t *s = NULL;
+ client_t *client;
+
+#ifdef HAS_PROC_BUS
+ for (i = 0; i < sockets; i++) {
+ s = socket_table[i];
+ if (s->ss_entry != ss_entry) continue;
+ if (proc_pccard) {
+ char name[3];
+ sprintf(name, "%02d", i);
+#ifdef PCMCIA_DEBUG
+ remove_proc_entry("clients", s->proc);
+#endif
+ remove_proc_entry(name, proc_pccard);
+ }
+ }
+#endif
+
+ for (;;) {
+ for (i = 0; i < sockets; i++) {
+ s = socket_table[i];
+ if (s->ss_entry == ss_entry) break;
+ }
+ if (i == sockets)
+ break;
+ shutdown_socket(i);
+ release_cis_mem(s);
+ while (s->clients) {
+ client = s->clients;
+ s->clients = s->clients->next;
+ kfree(client);
+ }
+ s->ss_entry = NULL;
+ kfree(s);
+ socket_table[i] = NULL;
+ for (j = i; j < sockets-1; j++)
+ socket_table[j] = socket_table[j+1];
+ sockets--;
+ }
+
+} /* unregister_ss_entry */
+
+/*======================================================================
+
+ Shutdown_Socket() and setup_socket() are scheduled using add_timer
+ calls by the main event handler when card insertion and removal
+ events are received. Shutdown_Socket() unconfigures a socket and
+ turns off socket power. Setup_socket() turns on socket power
+ and resets the socket, in two stages.
+
+======================================================================*/
+
+static void free_regions(memory_handle_t *list)
+{
+ memory_handle_t tmp;
+ while (*list != NULL) {
+ tmp = *list;
+ *list = tmp->info.next;
+ tmp->region_magic = 0;
+ kfree(tmp);
+ }
+}
+
+static int send_event(socket_info_t *s, event_t event, int priority);
+
+static void shutdown_socket(u_long i)
+{
+ socket_info_t *s = socket_table[i];
+ client_t **c;
+
+ DEBUG(1, "cs: shutdown_socket(%ld)\n", i);
+
+ /* Blank out the socket state */
+ s->state &= SOCKET_PRESENT|SOCKET_SETUP_PENDING;
+ init_socket(s);
+ s->irq.AssignedIRQ = s->irq.Config = 0;
+ s->lock_count = 0;
+ s->cis_used = 0;
+ if (s->fake_cis) {
+ kfree(s->fake_cis);
+ s->fake_cis = NULL;
+ }
+#ifdef CONFIG_CARDBUS
+ cb_release_cis_mem(s);
+ cb_free(s);
+#endif
+ s->functions = 0;
+ if (s->config) {
+ kfree(s->config);
+ s->config = NULL;
+ }
+ for (c = &s->clients; *c; ) {
+ if ((*c)->state & CLIENT_UNBOUND) {
+ client_t *d = *c;
+ *c = (*c)->next;
+ kfree(d);
+ } else {
+ c = &((*c)->next);
+ }
+ }
+ free_regions(&s->a_region);
+ free_regions(&s->c_region);
+} /* shutdown_socket */
+
+static void setup_socket(u_long i)
+{
+ int val;
+ socket_info_t *s = socket_table[i];
+
+ s->ss_entry(s->sock, SS_GetStatus, &val);
+ if (val & SS_PENDING) {
+ /* Does the socket need more time? */
+ DEBUG(2, "cs: setup_socket(%ld): status pending\n", i);
+ if (++s->setup_timeout > 100) {
+ printk(KERN_NOTICE "cs: socket %ld voltage interrogation"
+ " timed out\n", i);
+ } else {
+ mod_timer(&s->setup, jiffies + HZ/10);
+ }
+ } else if (val & SS_DETECT) {
+ DEBUG(1, "cs: setup_socket(%ld): applying power\n", i);
+ s->state |= SOCKET_PRESENT;
+ s->socket.flags = 0;
+ if (val & SS_3VCARD)
+ s->socket.Vcc = s->socket.Vpp = 33;
+ else if (!(val & SS_XVCARD))
+ s->socket.Vcc = s->socket.Vpp = 50;
+ else {
+ printk(KERN_NOTICE "cs: socket %ld: unsupported "
+ "voltage key\n", i);
+ s->socket.Vcc = 0;
+ }
+ if (val & SS_CARDBUS) {
+ s->state |= SOCKET_CARDBUS;
+#ifndef CONFIG_CARDBUS
+ printk(KERN_NOTICE "cs: unsupported card type detected!\n");
+#endif
+ }
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ s->setup.function = &reset_socket;
+ mod_timer(&s->setup, jiffies + vcc_settle);
+ } else
+ DEBUG(0, "cs: setup_socket(%ld): no card!\n", i);
+} /* setup_socket */
+
+/*======================================================================
+
+ Reset_socket() and unreset_socket() handle hard resets. Resets
+ have several causes: card insertion, a call to reset_socket, or
+ recovery from a suspend/resume cycle. Unreset_socket() sends
+ a CS event that matches the cause of the reset.
+
+======================================================================*/
+
+static void reset_socket(u_long i)
+{
+ socket_info_t *s = socket_table[i];
+
+ DEBUG(1, "cs: resetting socket %ld\n", i);
+ s->socket.flags |= SS_OUTPUT_ENA | SS_RESET;
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ udelay((long)reset_time);
+ s->socket.flags &= ~SS_RESET;
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ s->setup_timeout = 0;
+ s->setup.function = &unreset_socket;
+ mod_timer(&s->setup, jiffies + unreset_delay);
+} /* reset_socket */
+
+#define EVENT_MASK \
+(SOCKET_SETUP_PENDING|SOCKET_SUSPEND|SOCKET_RESET_PENDING)
+
+static void unreset_socket(u_long i)
+{
+ socket_info_t *s = socket_table[i];
+ int val;
+
+ s->ss_entry(s->sock, SS_GetStatus, &val);
+ if (val & SS_READY) {
+ DEBUG(1, "cs: reset done on socket %ld\n", i);
+ if (s->state & SOCKET_SUSPEND) {
+ s->state &= ~EVENT_MASK;
+ if (verify_cis_cache(s) != 0)
+ parse_events(s, SS_DETECT);
+ else
+ send_event(s, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW);
+ } else if (s->state & SOCKET_SETUP_PENDING) {
+#ifdef CONFIG_CARDBUS
+ if (s->state & SOCKET_CARDBUS)
+ cb_alloc(s);
+#endif
+ send_event(s, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
+ s->state &= ~SOCKET_SETUP_PENDING;
+ } else {
+ send_event(s, CS_EVENT_CARD_RESET, CS_EVENT_PRI_LOW);
+ if (s->reset_handle) {
+ s->reset_handle->event_callback_args.info = NULL;
+ EVENT(s->reset_handle, CS_EVENT_RESET_COMPLETE,
+ CS_EVENT_PRI_LOW);
+ s->state &= ~EVENT_MASK;
+ }
+ }
+ } else {
+ DEBUG(2, "cs: socket %ld not ready yet\n", i);
+ if (++s->setup_timeout > unreset_limit) {
+ printk(KERN_NOTICE "cs: socket %ld timed out during"
+ " reset\n", i);
+ s->state &= ~EVENT_MASK;
+ } else {
+ mod_timer(&s->setup, jiffies + unreset_check);
+ }
+ }
+} /* unreset_socket */
+
+/*======================================================================
+
+ The central event handler. Send_event() sends an event to all
+ valid clients. Parse_events() interprets the event bits from
+ a card status change report. Do_shotdown() handles the high
+ priority stuff associated with a card removal.
+
+======================================================================*/
+
+static int send_event(socket_info_t *s, event_t event, int priority)
+{
+ client_t *client = s->clients;
+ int ret;
+ DEBUG(1, "cs: send_event(sock %d, event %d, pri %d)\n",
+ s->sock, event, priority);
+ ret = 0;
+ for (; client; client = client->next) {
+ if (client->state & (CLIENT_UNBOUND|CLIENT_STALE))
+ continue;
+ if (client->EventMask & event) {
+ ret = EVENT(client, event, priority);
+ if (ret != 0)
+ return ret;
+ }
+ }
+ return ret;
+} /* send_event */
+
+static void do_shutdown(socket_info_t *s)
+{
+ client_t *client;
+ if (s->state & SOCKET_SHUTDOWN_PENDING)
+ return;
+ s->state |= SOCKET_SHUTDOWN_PENDING;
+ send_event(s, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
+ for (client = s->clients; client; client = client->next)
+ if (!(client->Attributes & INFO_MASTER_CLIENT))
+ client->state |= CLIENT_STALE;
+ if (s->state & (SOCKET_SETUP_PENDING|SOCKET_RESET_PENDING)) {
+ DEBUG(0, "cs: flushing pending setup\n");
+ del_timer(&s->setup);
+ s->state &= ~EVENT_MASK;
+ }
+ mod_timer(&s->shutdown, jiffies + shutdown_delay);
+ s->state &= ~SOCKET_PRESENT;
+}
+
+static void parse_events(void *info, u_int events)
+{
+ socket_info_t *s = info;
+ if (events & SS_DETECT) {
+ int status;
+ u_long flags;
+ spin_lock_irqsave(&s->lock, flags);
+ s->ss_entry(s->sock, SS_GetStatus, &status);
+ if ((s->state & SOCKET_PRESENT) &&
+ (!(s->state & SOCKET_SUSPEND) ||
+ !(status & SS_DETECT)))
+ do_shutdown(s);
+ if (status & SS_DETECT) {
+ if (s->state & SOCKET_SETUP_PENDING) {
+ del_timer(&s->setup);
+ DEBUG(1, "cs: delaying pending setup\n");
+ }
+ s->state |= SOCKET_SETUP_PENDING;
+ s->setup.function = &setup_socket;
+ s->setup_timeout = 0;
+ if (s->state & SOCKET_SUSPEND)
+ s->setup.expires = jiffies + resume_delay;
+ else
+ s->setup.expires = jiffies + setup_delay;
+ add_timer(&s->setup);
+ }
+ spin_unlock_irqrestore(&s->lock, flags);
+ }
+ if (events & SS_BATDEAD)
+ send_event(s, CS_EVENT_BATTERY_DEAD, CS_EVENT_PRI_LOW);
+ if (events & SS_BATWARN)
+ send_event(s, CS_EVENT_BATTERY_LOW, CS_EVENT_PRI_LOW);
+ if (events & SS_READY) {
+ if (!(s->state & SOCKET_RESET_PENDING))
+ send_event(s, CS_EVENT_READY_CHANGE, CS_EVENT_PRI_LOW);
+ else DEBUG(1, "cs: ready change during reset\n");
+ }
+} /* parse_events */
+
+/*======================================================================
+
+ Another event handler, for power management events.
+
+ This does not comply with the latest PC Card spec for handling
+ power management events.
+
+======================================================================*/
+
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE < VERSION(2,3,43))
+static int handle_pm_event(apm_event_t rqst)
+#else
+static int handle_pm_event(struct pm_dev *dev, pm_request_t rqst,
+ void *data)
+#endif
+{
+ int i, stat;
+ socket_info_t *s;
+ static int down = 0;
+
+ /* <linux/pm.h> hides a hack so this works with old APM support */
+ switch (rqst) {
+ case PM_SUSPEND:
+ DEBUG(1, "cs: received suspend notification\n");
+ if (down) {
+ printk(KERN_DEBUG "cs: received extra suspend event\n");
+ break;
+ }
+ down = 1;
+ for (i = 0; i < sockets; i++) {
+ s = socket_table[i];
+ if ((s->state & SOCKET_PRESENT) &&
+ !(s->state & SOCKET_SUSPEND)){
+ send_event(s, CS_EVENT_PM_SUSPEND, CS_EVENT_PRI_LOW);
+ s->ss_entry(s->sock, SS_SetSocket, &dead_socket);
+ s->state |= SOCKET_SUSPEND;
+ }
+ }
+ break;
+ case PM_RESUME:
+ DEBUG(1, "cs: received resume notification\n");
+ if (!down) {
+ printk(KERN_DEBUG "cs: received bogus resume event\n");
+ break;
+ }
+ down = 0;
+ for (i = 0; i < sockets; i++) {
+ s = socket_table[i];
+ /* Do this just to reinitialize the socket */
+ init_socket(s);
+ s->ss_entry(s->sock, SS_GetStatus, &stat);
+ /* If there was or is a card here, we need to do something
+ about it... but parse_events will sort it all out. */
+ if ((s->state & SOCKET_PRESENT) || (stat & SS_DETECT))
+ parse_events(s, SS_DETECT);
+ }
+ break;
+ }
+ return 0;
+} /* handle_pm_event */
+#endif
+
+/*======================================================================
+
+ Special stuff for managing IO windows, because they are scarce.
+
+======================================================================*/
+
+static int alloc_io_space(socket_info_t *s, u_int attr, ioaddr_t *base,
+ ioaddr_t num, u_int lines, char *name)
+{
+ int i;
+ ioaddr_t try, align;
+
+ align = (*base) ? (lines ? 1<<lines : 0) : 1;
+ if (align && (align < num)) {
+ if (*base) {
+ DEBUG(0, "odd IO request: num %04x align %04x\n",
+ num, align);
+ align = 0;
+ } else
+ while (align && (align < num)) align <<= 1;
+ }
+ if (*base & ~(align-1)) {
+ DEBUG(0, "odd IO request: base %04x align %04x\n",
+ *base, align);
+ while (*base & ~(align-1)) align <<= 1;
+ }
+ /* Check for an already-allocated window that must conflict with
+ what was asked for. It is a hack because it does not catch all
+ potential conflicts, just the most obvious ones. */
+ for (i = 0; i < MAX_IO_WIN; i++)
+ if ((s->io[i].NumPorts != 0) &&
+ ((s->io[i].BasePort & (align-1)) == *base))
+ return 1;
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (s->io[i].NumPorts == 0) {
+ if (find_io_region(base, num, align, name) == 0) {
+ s->io[i].Attributes = attr;
+ s->io[i].BasePort = *base;
+ s->io[i].NumPorts = s->io[i].InUse = num;
+ break;
+ } else
+ return 1;
+ } else if (s->io[i].Attributes != attr)
+ continue;
+ /* Try to extend top of window */
+ try = s->io[i].BasePort + s->io[i].NumPorts;
+ if ((*base == 0) || (*base == try))
+ if (find_io_region(&try, num, 0, name) == 0) {
+ *base = try;
+ s->io[i].NumPorts += num;
+ s->io[i].InUse += num;
+ break;
+ }
+ /* Try to extend bottom of window */
+ try = s->io[i].BasePort - num;
+ if ((*base == 0) || (*base == try))
+ if (find_io_region(&try, num, 0, name) == 0) {
+ s->io[i].BasePort = *base = try;
+ s->io[i].NumPorts += num;
+ s->io[i].InUse += num;
+ break;
+ }
+ }
+ return (i == MAX_IO_WIN);
+} /* alloc_io_space */
+
+static void release_io_space(socket_info_t *s, ioaddr_t base,
+ ioaddr_t num)
+{
+ int i;
+ release_region(base, num);
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if ((s->io[i].BasePort <= base) &&
+ (s->io[i].BasePort+s->io[i].NumPorts >= base+num)) {
+ s->io[i].InUse -= num;
+ /* Free the window if no one else is using it */
+ if (s->io[i].InUse == 0)
+ s->io[i].NumPorts = 0;
+ }
+ }
+}
+
+/*======================================================================
+
+ Access_configuration_register() reads and writes configuration
+ registers in attribute memory. Memory window 0 is reserved for
+ this and the tuple reading services.
+
+======================================================================*/
+
+static int access_configuration_register(client_handle_t handle,
+ conf_reg_t *reg)
+{
+ socket_info_t *s;
+ config_t *c;
+ int addr;
+ u_char val;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (handle->Function == BIND_FN_ALL) {
+ if (reg->Function >= s->functions)
+ return CS_BAD_ARGS;
+ c = &s->config[reg->Function];
+ } else
+ c = CONFIG(handle);
+ if (!(c->state & CONFIG_LOCKED))
+ return CS_CONFIGURATION_LOCKED;
+
+ addr = (c->ConfigBase + reg->Offset) >> 1;
+
+ switch (reg->Action) {
+ case CS_READ:
+ read_cis_mem(s, 1, addr, 1, &val);
+ reg->Value = val;
+ break;
+ case CS_WRITE:
+ val = reg->Value;
+ write_cis_mem(s, 1, addr, 1, &val);
+ break;
+ default:
+ return CS_BAD_ARGS;
+ break;
+ }
+ return CS_SUCCESS;
+} /* access_configuration_register */
+
+/*======================================================================
+
+ Bind_device() associates a device driver with a particular socket.
+ It is normally called by Driver Services after it has identified
+ a newly inserted card. An instance of that driver will then be
+ eligible to register as a client of this socket.
+
+======================================================================*/
+
+static int bind_device(bind_req_t *req)
+{
+ client_t *client;
+ socket_info_t *s;
+
+ if (CHECK_SOCKET(req->Socket))
+ return CS_BAD_SOCKET;
+ s = SOCKET(req);
+
+ client = (client_t *)kmalloc(sizeof(client_t), GFP_KERNEL);
+ if (!client) return CS_OUT_OF_RESOURCE;
+ memset(client, '\0', sizeof(client_t));
+ client->client_magic = CLIENT_MAGIC;
+ strncpy(client->dev_info, (char *)req->dev_info, DEV_NAME_LEN);
+ client->Socket = req->Socket;
+ client->Function = req->Function;
+ client->state = CLIENT_UNBOUND;
+ client->erase_busy.next = &client->erase_busy;
+ client->erase_busy.prev = &client->erase_busy;
+ init_waitqueue_head(&client->mtd_req);
+ client->next = s->clients;
+ s->clients = client;
+ DEBUG(1, "cs: bind_device(): client 0x%p, sock %d, dev %s\n",
+ client, client->Socket, client->dev_info);
+ return CS_SUCCESS;
+} /* bind_device */
+
+/*======================================================================
+
+ Bind_mtd() associates a device driver with a particular memory
+ region. It is normally called by Driver Services after it has
+ identified a memory device type. An instance of the corresponding
+ driver will then be able to register to control this region.
+
+======================================================================*/
+
+static int bind_mtd(mtd_bind_t *req)
+{
+ socket_info_t *s;
+ memory_handle_t region;
+
+ if (CHECK_SOCKET(req->Socket))
+ return CS_BAD_SOCKET;
+ s = SOCKET(req);
+
+ if (req->Attributes & REGION_TYPE_AM)
+ region = s->a_region;
+ else
+ region = s->c_region;
+
+ while (region) {
+ if (region->info.CardOffset == req->CardOffset) break;
+ region = region->info.next;
+ }
+ if (!region || (region->mtd != NULL))
+ return CS_BAD_OFFSET;
+ strncpy(region->dev_info, (char *)req->dev_info, DEV_NAME_LEN);
+
+ DEBUG(1, "cs: bind_mtd(): attr 0x%x, offset 0x%x, dev %s\n",
+ req->Attributes, req->CardOffset, (char *)req->dev_info);
+ return CS_SUCCESS;
+} /* bind_mtd */
+
+/*====================================================================*/
+
+static int deregister_client(client_handle_t handle)
+{
+ client_t **client;
+ socket_info_t *s;
+ memory_handle_t region;
+ u_long flags;
+ int i, sn;
+
+ DEBUG(1, "cs: deregister_client(%p)\n", handle);
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ if (handle->state &
+ (CLIENT_IRQ_REQ|CLIENT_IO_REQ|CLIENT_CONFIG_LOCKED))
+ return CS_IN_USE;
+ for (i = 0; i < MAX_WIN; i++)
+ if (handle->state & CLIENT_WIN_REQ(i))
+ return CS_IN_USE;
+
+ /* Disconnect all MTD links */
+ s = SOCKET(handle);
+ if (handle->mtd_count) {
+ for (region = s->a_region; region; region = region->info.next)
+ if (region->mtd == handle) region->mtd = NULL;
+ for (region = s->c_region; region; region = region->info.next)
+ if (region->mtd == handle) region->mtd = NULL;
+ }
+
+ sn = handle->Socket; s = socket_table[sn];
+
+ if ((handle->state & CLIENT_STALE) ||
+ (handle->Attributes & INFO_MASTER_CLIENT)) {
+ spin_lock_irqsave(&s->lock, flags);
+ client = &s->clients;
+ while ((*client) && ((*client) != handle))
+ client = &(*client)->next;
+ if (*client == NULL) {
+ spin_unlock_irqrestore(&s->lock, flags);
+ return CS_BAD_HANDLE;
+ }
+ *client = handle->next;
+ handle->client_magic = 0;
+ kfree(handle);
+ spin_unlock_irqrestore(&s->lock, flags);
+ } else {
+ handle->state = CLIENT_UNBOUND;
+ handle->mtd_count = 0;
+ handle->event_handler = NULL;
+ }
+
+ if (--s->real_clients == 0)
+ s->ss_entry(sn, SS_RegisterCallback, NULL);
+
+ return CS_SUCCESS;
+} /* deregister_client */
+
+/*====================================================================*/
+
+static int get_configuration_info(client_handle_t handle,
+ config_info_t *config)
+{
+ socket_info_t *s;
+ config_t *c;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+
+ if (handle->Function == BIND_FN_ALL) {
+ if (config->Function && (config->Function >= s->functions))
+ return CS_BAD_ARGS;
+ } else
+ config->Function = handle->Function;
+
+#ifdef CONFIG_CARDBUS
+ if (s->state & SOCKET_CARDBUS) {
+ u_char fn = config->Function;
+ memset(config, 0, sizeof(config_info_t));
+ config->Function = fn;
+ config->Vcc = s->socket.Vcc;
+ config->Vpp1 = config->Vpp2 = s->socket.Vpp;
+ config->Option = s->cap.cardbus;
+ config->IntType = INT_CARDBUS;
+ /* This is a nasty hack */
+ pcibios_read_config_dword(s->cap.cardbus, 0, 0, &config->ConfigBase);
+ if (s->cb_config) {
+ config->Attributes = CONF_VALID_CLIENT;
+ config->AssignedIRQ = s->irq.AssignedIRQ;
+ if (config->AssignedIRQ)
+ config->Attributes |= CONF_ENABLE_IRQ;
+ config->BasePort1 = s->io[0].BasePort;
+ config->NumPorts1 = s->io[0].NumPorts;
+ }
+ return CS_SUCCESS;
+ }
+#endif
+
+ c = (s->config != NULL) ? &s->config[config->Function] : NULL;
+
+ if ((c == NULL) || !(c->state & CONFIG_LOCKED)) {
+ config->Attributes = 0;
+ config->Vcc = s->socket.Vcc;
+ config->Vpp1 = config->Vpp2 = s->socket.Vpp;
+ return CS_SUCCESS;
+ }
+
+ /* !!! This is a hack !!! */
+ memcpy(&config->Attributes, &c->Attributes, sizeof(config_t));
+ config->Attributes |= CONF_VALID_CLIENT;
+ config->CardValues = c->CardValues;
+ config->IRQAttributes = c->irq.Attributes;
+ config->AssignedIRQ = s->irq.AssignedIRQ;
+ config->BasePort1 = c->io.BasePort1;
+ config->NumPorts1 = c->io.NumPorts1;
+ config->Attributes1 = c->io.Attributes1;
+ config->BasePort2 = c->io.BasePort2;
+ config->NumPorts2 = c->io.NumPorts2;
+ config->Attributes2 = c->io.Attributes2;
+ config->IOAddrLines = c->io.IOAddrLines;
+
+ return CS_SUCCESS;
+} /* get_configuration_info */
+
+/*======================================================================
+
+ Return information about this version of Card Services.
+
+======================================================================*/
+
+static int get_card_services_info(servinfo_t *info)
+{
+ info->Signature[0] = 'C';
+ info->Signature[1] = 'S';
+ info->Count = sockets;
+ info->Revision = CS_RELEASE_CODE;
+ info->CSLevel = 0x0210;
+ info->VendorString = (char *)release;
+ return CS_SUCCESS;
+} /* get_card_services_info */
+
+/*======================================================================
+
+ Note that get_first_client() *does* recognize the Socket field
+ in the request structure.
+
+======================================================================*/
+
+static int get_first_client(client_handle_t *handle, client_req_t *req)
+{
+ socket_t s;
+ if (req->Attributes & CLIENT_THIS_SOCKET)
+ s = req->Socket;
+ else
+ s = 0;
+ if (CHECK_SOCKET(req->Socket))
+ return CS_BAD_SOCKET;
+ if (socket_table[s]->clients == NULL)
+ return CS_NO_MORE_ITEMS;
+ *handle = socket_table[s]->clients;
+ return CS_SUCCESS;
+} /* get_first_client */
+
+/*====================================================================*/
+
+static int get_next_client(client_handle_t *handle, client_req_t *req)
+{
+ socket_info_t *s;
+ if ((handle == NULL) || CHECK_HANDLE(*handle))
+ return CS_BAD_HANDLE;
+ if ((*handle)->next == NULL) {
+ if (req->Attributes & CLIENT_THIS_SOCKET)
+ return CS_NO_MORE_ITEMS;
+ s = SOCKET(*handle);
+ if (s->clients == NULL)
+ return CS_NO_MORE_ITEMS;
+ *handle = s->clients;
+ } else
+ *handle = (*handle)->next;
+ return CS_SUCCESS;
+} /* get_next_client */
+
+/*====================================================================*/
+
+static int get_window(window_handle_t *handle, int idx, win_req_t *req)
+{
+ socket_info_t *s;
+ window_t *win;
+ int w;
+
+ if (idx == 0)
+ s = SOCKET((client_handle_t)*handle);
+ else
+ s = (*handle)->sock;
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ for (w = idx; w < MAX_WIN; w++)
+ if (s->state & SOCKET_WIN_REQ(w)) break;
+ if (w == MAX_WIN)
+ return CS_NO_MORE_ITEMS;
+ win = &s->win[w];
+ req->Base = win->ctl.sys_start;
+ req->Size = win->ctl.sys_stop - win->ctl.sys_start + 1;
+ req->AccessSpeed = win->ctl.speed;
+ req->Attributes = 0;
+ if (win->ctl.flags & MAP_ATTRIB)
+ req->Attributes |= WIN_MEMORY_TYPE_AM;
+ if (win->ctl.flags & MAP_ACTIVE)
+ req->Attributes |= WIN_ENABLE;
+ if (win->ctl.flags & MAP_16BIT)
+ req->Attributes |= WIN_DATA_WIDTH_16;
+ if (win->ctl.flags & MAP_USE_WAIT)
+ req->Attributes |= WIN_USE_WAIT;
+ *handle = win;
+ return CS_SUCCESS;
+} /* get_window */
+
+static int get_first_window(client_handle_t *handle, win_req_t *req)
+{
+ if ((handle == NULL) || CHECK_HANDLE(*handle))
+ return CS_BAD_HANDLE;
+ return get_window((window_handle_t *)handle, 0, req);
+}
+
+static int get_next_window(window_handle_t *win, win_req_t *req)
+{
+ if ((win == NULL) || ((*win)->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+ return get_window(win, (*win)->index+1, req);
+}
+
+/*======================================================================
+
+ Get the current socket state bits. We don't support the latched
+ SocketState yet: I haven't seen any point for it.
+
+======================================================================*/
+
+static int cs_get_status(client_handle_t handle, cs_status_t *status)
+{
+ socket_info_t *s;
+ config_t *c;
+ int val;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ s->ss_entry(s->sock, SS_GetStatus, &val);
+ status->CardState = status->SocketState = 0;
+ status->CardState |= (val & SS_DETECT) ? CS_EVENT_CARD_DETECT : 0;
+ status->CardState |= (val & SS_CARDBUS) ? CS_EVENT_CB_DETECT : 0;
+ status->CardState |= (val & SS_3VCARD) ? CS_EVENT_3VCARD : 0;
+ status->CardState |= (val & SS_XVCARD) ? CS_EVENT_XVCARD : 0;
+ if (s->state & SOCKET_SUSPEND)
+ status->CardState |= CS_EVENT_PM_SUSPEND;
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (s->state & SOCKET_SETUP_PENDING)
+ status->CardState |= CS_EVENT_CARD_INSERTION;
+
+ /* Get info from the PRR, if necessary */
+ if (handle->Function == BIND_FN_ALL) {
+ if (status->Function && (status->Function >= s->functions))
+ return CS_BAD_ARGS;
+ c = (s->config != NULL) ? &s->config[status->Function] : NULL;
+ } else
+ c = CONFIG(handle);
+ if ((c != NULL) && (c->state & CONFIG_LOCKED) &&
+ (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
+ u_char reg;
+ if (c->Present & PRESENT_PIN_REPLACE) {
+ read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
+ status->CardState |=
+ (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
+ status->CardState |=
+ (reg & PRR_READY_STATUS) ? CS_EVENT_READY_CHANGE : 0;
+ status->CardState |=
+ (reg & PRR_BVD2_STATUS) ? CS_EVENT_BATTERY_LOW : 0;
+ status->CardState |=
+ (reg & PRR_BVD1_STATUS) ? CS_EVENT_BATTERY_DEAD : 0;
+ } else {
+ /* No PRR? Then assume we're always ready */
+ status->CardState |= CS_EVENT_READY_CHANGE;
+ }
+ if (c->Present & PRESENT_EXT_STATUS) {
+ read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
+ status->CardState |=
+ (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
+ }
+ return CS_SUCCESS;
+ }
+ status->CardState |=
+ (val & SS_WRPROT) ? CS_EVENT_WRITE_PROTECT : 0;
+ status->CardState |=
+ (val & SS_BATDEAD) ? CS_EVENT_BATTERY_DEAD : 0;
+ status->CardState |=
+ (val & SS_BATWARN) ? CS_EVENT_BATTERY_LOW : 0;
+ status->CardState |=
+ (val & SS_READY) ? CS_EVENT_READY_CHANGE : 0;
+ return CS_SUCCESS;
+} /* cs_get_status */
+
+/*======================================================================
+
+ Change the card address of an already open memory window.
+
+======================================================================*/
+
+static int get_mem_page(window_handle_t win, memreq_t *req)
+{
+ if ((win == NULL) || (win->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+ req->Page = 0;
+ req->CardOffset = win->ctl.card_start;
+ return CS_SUCCESS;
+} /* get_mem_page */
+
+static int map_mem_page(window_handle_t win, memreq_t *req)
+{
+ socket_info_t *s;
+ if ((win == NULL) || (win->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+ if (req->Page != 0)
+ return CS_BAD_PAGE;
+ s = win->sock;
+ win->ctl.card_start = req->CardOffset;
+ if (s->ss_entry(s->sock, SS_SetMemMap, &win->ctl) != 0)
+ return CS_BAD_OFFSET;
+ return CS_SUCCESS;
+} /* map_mem_page */
+
+/*======================================================================
+
+ Modify a locked socket configuration
+
+======================================================================*/
+
+static int modify_configuration(client_handle_t handle,
+ modconf_t *mod)
+{
+ socket_info_t *s;
+ config_t *c;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle); c = CONFIG(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (!(c->state & CONFIG_LOCKED))
+ return CS_CONFIGURATION_LOCKED;
+
+ if (mod->Attributes & CONF_IRQ_CHANGE_VALID) {
+ if (mod->Attributes & CONF_ENABLE_IRQ) {
+ c->Attributes |= CONF_ENABLE_IRQ;
+ s->socket.io_irq = s->irq.AssignedIRQ;
+ } else {
+ c->Attributes &= ~CONF_ENABLE_IRQ;
+ s->socket.io_irq = 0;
+ }
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ }
+
+ if (mod->Attributes & CONF_VCC_CHANGE_VALID)
+ return CS_BAD_VCC;
+
+ /* We only allow changing Vpp1 and Vpp2 to the same value */
+ if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
+ (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
+ if (mod->Vpp1 != mod->Vpp2)
+ return CS_BAD_VPP;
+ c->Vpp1 = c->Vpp2 = s->socket.Vpp = mod->Vpp1;
+ if (s->ss_entry(s->sock, SS_SetSocket, &s->socket))
+ return CS_BAD_VPP;
+ } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
+ (mod->Attributes & CONF_VPP2_CHANGE_VALID))
+ return CS_BAD_VPP;
+
+ return CS_SUCCESS;
+} /* modify_configuration */
+
+/*======================================================================
+
+ Modify the attributes of a window returned by RequestWindow.
+
+======================================================================*/
+
+static int modify_window(window_handle_t win, modwin_t *req)
+{
+ if ((win == NULL) || (win->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+
+ win->ctl.flags &= ~(MAP_ATTRIB|MAP_ACTIVE);
+ if (req->Attributes & WIN_MEMORY_TYPE)
+ win->ctl.flags |= MAP_ATTRIB;
+ if (req->Attributes & WIN_ENABLE)
+ win->ctl.flags |= MAP_ACTIVE;
+ if (req->Attributes & WIN_DATA_WIDTH_16)
+ win->ctl.flags |= MAP_16BIT;
+ if (req->Attributes & WIN_USE_WAIT)
+ win->ctl.flags |= MAP_USE_WAIT;
+ win->ctl.speed = req->AccessSpeed;
+ win->sock->ss_entry(win->sock->sock, SS_SetMemMap, &win->ctl);
+
+ return CS_SUCCESS;
+} /* modify_window */
+
+/*======================================================================
+
+ Register_client() uses the dev_info_t handle to match the
+ caller with a socket. The driver must have already been bound
+ to a socket with bind_device() -- in fact, bind_device()
+ allocates the client structure that will be used.
+
+======================================================================*/
+
+static int register_client(client_handle_t *handle, client_reg_t *req)
+{
+ client_t *client;
+ socket_info_t *s;
+ socket_t ns;
+
+ /* Look for unbound client with matching dev_info */
+ client = NULL;
+ for (ns = 0; ns < sockets; ns++) {
+ client = socket_table[ns]->clients;
+ while (client != NULL) {
+ if ((strcmp(client->dev_info, (char *)req->dev_info) == 0)
+ && (client->state & CLIENT_UNBOUND)) break;
+ client = client->next;
+ }
+ if (client != NULL) break;
+ }
+ if (client == NULL)
+ return CS_OUT_OF_RESOURCE;
+
+ s = socket_table[ns];
+ if (++s->real_clients == 1) {
+ ss_callback_t call;
+ int status;
+ call.handler = &parse_events;
+ call.info = s;
+ s->ss_entry(ns, SS_RegisterCallback, &call);
+ s->ss_entry(ns, SS_GetStatus, &status);
+ if ((status & SS_DETECT) &&
+ !(s->state & SOCKET_SETUP_PENDING)) {
+ s->state |= SOCKET_SETUP_PENDING;
+ setup_socket(ns);
+ }
+ }
+
+ *handle = client;
+ client->state &= ~CLIENT_UNBOUND;
+ client->Socket = ns;
+ client->Attributes = req->Attributes;
+ client->EventMask = req->EventMask;
+ client->event_handler = req->event_handler;
+ client->event_callback_args = req->event_callback_args;
+ client->event_callback_args.client_handle = client;
+ client->event_callback_args.bus = s->cap.bus;
+
+ if (s->state & SOCKET_CARDBUS)
+ client->state |= CLIENT_CARDBUS;
+
+ if ((!(s->state & SOCKET_CARDBUS)) && (s->functions == 0) &&
+ (client->Function != BIND_FN_ALL)) {
+ cistpl_longlink_mfc_t mfc;
+ if (read_tuple(client, CISTPL_LONGLINK_MFC, &mfc)
+ == CS_SUCCESS)
+ s->functions = mfc.nfn;
+ else
+ s->functions = 1;
+ s->config = kmalloc(sizeof(config_t) * s->functions,
+ GFP_KERNEL);
+ memset(s->config, 0, sizeof(config_t) * s->functions);
+ }
+
+ DEBUG(1, "cs: register_client(): client 0x%p, sock %d, dev %s\n",
+ client, client->Socket, client->dev_info);
+ if (client->EventMask & CS_EVENT_REGISTRATION_COMPLETE)
+ EVENT(client, CS_EVENT_REGISTRATION_COMPLETE, CS_EVENT_PRI_LOW);
+ if ((socket_table[ns]->state & SOCKET_PRESENT) &&
+ !(socket_table[ns]->state & SOCKET_SETUP_PENDING)) {
+ if (client->EventMask & CS_EVENT_CARD_INSERTION)
+ EVENT(client, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
+ else
+ client->PendingEvents |= CS_EVENT_CARD_INSERTION;
+ }
+ return CS_SUCCESS;
+} /* register_client */
+
+/*====================================================================*/
+
+static int release_configuration(client_handle_t handle,
+ config_req_t *req)
+{
+ socket_info_t *s;
+ pccard_io_map io = { 0, 0, 0, 0, 1 };
+ int i;
+
+ if (CHECK_HANDLE(handle) ||
+ !(handle->state & CLIENT_CONFIG_LOCKED))
+ return CS_BAD_HANDLE;
+ handle->state &= ~CLIENT_CONFIG_LOCKED;
+ s = SOCKET(handle);
+
+#ifdef CONFIG_CARDBUS
+ if (handle->state & CLIENT_CARDBUS) {
+ cb_disable(s);
+ s->lock_count = 0;
+ return CS_SUCCESS;
+ }
+#endif
+
+ if (!(handle->state & CLIENT_STALE)) {
+ config_t *c = CONFIG(handle);
+ if (--(s->lock_count) == 0) {
+ s->socket.flags = SS_OUTPUT_ENA;
+ s->socket.Vpp = 0;
+ s->socket.io_irq = 0;
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ }
+ if (c->state & CONFIG_IO_REQ)
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (s->io[i].NumPorts == 0)
+ continue;
+ s->io[i].Config--;
+ if (s->io[i].Config != 0)
+ continue;
+ io.map = i;
+ s->ss_entry(s->sock, SS_SetIOMap, &io);
+ }
+ c->state &= ~CONFIG_LOCKED;
+ }
+
+ return CS_SUCCESS;
+} /* release_configuration */
+
+/*======================================================================
+
+ Release_io() releases the I/O ranges allocated by a client. This
+ may be invoked some time after a card ejection has already dumped
+ the actual socket configuration, so if the client is "stale", we
+ don't bother checking the port ranges against the current socket
+ values.
+
+======================================================================*/
+
+static int release_io(client_handle_t handle, io_req_t *req)
+{
+ socket_info_t *s;
+
+ if (CHECK_HANDLE(handle) || !(handle->state & CLIENT_IO_REQ))
+ return CS_BAD_HANDLE;
+ handle->state &= ~CLIENT_IO_REQ;
+ s = SOCKET(handle);
+
+#ifdef CONFIG_CARDBUS
+ if (handle->state & CLIENT_CARDBUS) {
+ cb_release(s);
+ return CS_SUCCESS;
+ }
+#endif
+
+ if (!(handle->state & CLIENT_STALE)) {
+ config_t *c = CONFIG(handle);
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+ if ((c->io.BasePort1 != req->BasePort1) ||
+ (c->io.NumPorts1 != req->NumPorts1) ||
+ (c->io.BasePort2 != req->BasePort2) ||
+ (c->io.NumPorts2 != req->NumPorts2))
+ return CS_BAD_ARGS;
+ c->state &= ~CONFIG_IO_REQ;
+ }
+
+ release_io_space(s, req->BasePort1, req->NumPorts1);
+ if (req->NumPorts2)
+ release_io_space(s, req->BasePort2, req->NumPorts2);
+
+ return CS_SUCCESS;
+} /* release_io */
+
+/*====================================================================*/
+
+static int cs_release_irq(client_handle_t handle, irq_req_t *req)
+{
+ socket_info_t *s;
+ if (CHECK_HANDLE(handle) || !(handle->state & CLIENT_IRQ_REQ))
+ return CS_BAD_HANDLE;
+ handle->state &= ~CLIENT_IRQ_REQ;
+ s = SOCKET(handle);
+
+ if (!(handle->state & CLIENT_STALE)) {
+ config_t *c = CONFIG(handle);
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+ if (c->irq.Attributes != req->Attributes)
+ return CS_BAD_ATTRIBUTE;
+ if (s->irq.AssignedIRQ != req->AssignedIRQ)
+ return CS_BAD_IRQ;
+ if (--s->irq.Config == 0) {
+ c->state &= ~CONFIG_IRQ_REQ;
+ s->irq.AssignedIRQ = 0;
+ }
+ }
+
+ if (req->Attributes & IRQ_HANDLE_PRESENT) {
+ bus_free_irq(s->cap.bus, req->AssignedIRQ, req->Instance);
+ }
+
+#ifdef CONFIG_ISA
+ if (req->AssignedIRQ != s->cap.pci_irq)
+ undo_irq(req->Attributes, req->AssignedIRQ);
+#endif
+
+ return CS_SUCCESS;
+} /* cs_release_irq */
+
+/*====================================================================*/
+
+static int release_window(window_handle_t win)
+{
+ socket_info_t *s;
+
+ if ((win == NULL) || (win->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+ s = win->sock;
+ if (!(win->handle->state & CLIENT_WIN_REQ(win->index)))
+ return CS_BAD_HANDLE;
+
+ /* Shut down memory window */
+ win->ctl.flags &= ~MAP_ACTIVE;
+ s->ss_entry(s->sock, SS_SetMemMap, &win->ctl);
+ s->state &= ~SOCKET_WIN_REQ(win->index);
+
+ /* Release system memory */
+ release_mem_region(win->base, win->size);
+ win->handle->state &= ~CLIENT_WIN_REQ(win->index);
+
+ win->magic = 0;
+
+ return CS_SUCCESS;
+} /* release_window */
+
+/*====================================================================*/
+
+static int request_configuration(client_handle_t handle,
+ config_req_t *req)
+{
+ int i;
+ u_int base;
+ socket_info_t *s;
+ config_t *c;
+ pccard_io_map iomap;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+
+#ifdef CONFIG_CARDBUS
+ if (handle->state & CLIENT_CARDBUS) {
+ if (!(req->IntType & INT_CARDBUS))
+ return CS_UNSUPPORTED_MODE;
+ if (s->lock_count != 0)
+ return CS_CONFIGURATION_LOCKED;
+ cb_enable(s);
+ handle->state |= CLIENT_CONFIG_LOCKED;
+ s->lock_count++;
+ return CS_SUCCESS;
+ }
+#endif
+
+ if (req->IntType & INT_CARDBUS)
+ return CS_UNSUPPORTED_MODE;
+ c = CONFIG(handle);
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+
+ /* Do power control. We don't allow changes in Vcc. */
+ if (s->socket.Vcc != req->Vcc)
+ printk(KERN_DEBUG "cs: ignoring requested Vcc\n");
+ if (req->Vpp1 != req->Vpp2)
+ return CS_BAD_VPP;
+ s->socket.Vpp = req->Vpp1;
+ if (s->ss_entry(s->sock, SS_SetSocket, &s->socket))
+ return CS_BAD_VPP;
+
+ c->Vcc = req->Vcc; c->Vpp1 = c->Vpp2 = req->Vpp1;
+
+ /* Pick memory or I/O card, DMA mode, interrupt */
+ c->IntType = req->IntType;
+ c->Attributes = req->Attributes;
+ if (req->IntType & INT_MEMORY_AND_IO)
+ s->socket.flags |= SS_IOCARD;
+ if (req->IntType & INT_ZOOMED_VIDEO)
+ s->socket.flags |= SS_ZVCARD;
+ if (req->Attributes & CONF_ENABLE_DMA)
+ s->socket.flags |= SS_DMA_MODE;
+ if (req->Attributes & CONF_ENABLE_SPKR)
+ s->socket.flags |= SS_SPKR_ENA;
+ if (req->Attributes & CONF_ENABLE_IRQ)
+ s->socket.io_irq = s->irq.AssignedIRQ;
+ else
+ s->socket.io_irq = 0;
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ s->lock_count++;
+
+ /* Set up CIS configuration registers */
+ base = c->ConfigBase = req->ConfigBase;
+ c->Present = c->CardValues = req->Present;
+ if (req->Present & PRESENT_COPY) {
+ c->Copy = req->Copy;
+ write_cis_mem(s, 1, (base + CISREG_SCR)>>1, 1, &c->Copy);
+ }
+ if (req->Present & PRESENT_OPTION) {
+ if (s->functions == 1) {
+ c->Option = req->ConfigIndex & COR_CONFIG_MASK;
+ } else {
+ c->Option = req->ConfigIndex & COR_MFC_CONFIG_MASK;
+ c->Option |= COR_FUNC_ENA|COR_IREQ_ENA;
+ if (req->Present & PRESENT_IOBASE_0)
+ c->Option |= COR_ADDR_DECODE;
+ }
+ if (c->state & CONFIG_IRQ_REQ)
+ if (!(c->irq.Attributes & IRQ_FORCED_PULSE))
+ c->Option |= COR_LEVEL_REQ;
+ write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &c->Option);
+ mdelay(40);
+ }
+ if (req->Present & PRESENT_STATUS) {
+ c->Status = req->Status;
+ write_cis_mem(s, 1, (base + CISREG_CCSR)>>1, 1, &c->Status);
+ }
+ if (req->Present & PRESENT_PIN_REPLACE) {
+ c->Pin = req->Pin;
+ write_cis_mem(s, 1, (base + CISREG_PRR)>>1, 1, &c->Pin);
+ }
+ if (req->Present & PRESENT_EXT_STATUS) {
+ c->ExtStatus = req->ExtStatus;
+ write_cis_mem(s, 1, (base + CISREG_ESR)>>1, 1, &c->ExtStatus);
+ }
+ if (req->Present & PRESENT_IOBASE_0) {
+ u_char b = c->io.BasePort1 & 0xff;
+ write_cis_mem(s, 1, (base + CISREG_IOBASE_0)>>1, 1, &b);
+ b = (c->io.BasePort1 >> 8) & 0xff;
+ write_cis_mem(s, 1, (base + CISREG_IOBASE_1)>>1, 1, &b);
+ }
+ if (req->Present & PRESENT_IOSIZE) {
+ u_char b = c->io.NumPorts1 + c->io.NumPorts2 - 1;
+ write_cis_mem(s, 1, (base + CISREG_IOSIZE)>>1, 1, &b);
+ }
+
+ /* Configure I/O windows */
+ if (c->state & CONFIG_IO_REQ) {
+ iomap.speed = io_speed;
+ for (i = 0; i < MAX_IO_WIN; i++)
+ if (s->io[i].NumPorts != 0) {
+ iomap.map = i;
+ iomap.flags = MAP_ACTIVE;
+ switch (s->io[i].Attributes & IO_DATA_PATH_WIDTH) {
+ case IO_DATA_PATH_WIDTH_16:
+ iomap.flags |= MAP_16BIT; break;
+ case IO_DATA_PATH_WIDTH_AUTO:
+ iomap.flags |= MAP_AUTOSZ; break;
+ default:
+ break;
+ }
+ iomap.start = s->io[i].BasePort;
+ iomap.stop = iomap.start + s->io[i].NumPorts - 1;
+ s->ss_entry(s->sock, SS_SetIOMap, &iomap);
+ s->io[i].Config++;
+ }
+ }
+
+ c->state |= CONFIG_LOCKED;
+ handle->state |= CLIENT_CONFIG_LOCKED;
+ return CS_SUCCESS;
+} /* request_configuration */
+
+/*======================================================================
+
+ Request_io() reserves ranges of port addresses for a socket.
+ I have not implemented range sharing or alias addressing.
+
+======================================================================*/
+
+static int request_io(client_handle_t handle, io_req_t *req)
+{
+ socket_info_t *s;
+ config_t *c;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+
+ if (handle->state & CLIENT_CARDBUS) {
+#ifdef CONFIG_CARDBUS
+ int ret = cb_config(s);
+ if (ret == CS_SUCCESS)
+ handle->state |= CLIENT_IO_REQ;
+ return ret;
+#else
+ return CS_UNSUPPORTED_FUNCTION;
+#endif
+ }
+
+ if (!req)
+ return CS_UNSUPPORTED_MODE;
+ c = CONFIG(handle);
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+ if (c->state & CONFIG_IO_REQ)
+ return CS_IN_USE;
+ if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))
+ return CS_BAD_ATTRIBUTE;
+ if ((req->NumPorts2 > 0) &&
+ (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)))
+ return CS_BAD_ATTRIBUTE;
+
+ if (alloc_io_space(s, req->Attributes1, &req->BasePort1,
+ req->NumPorts1, req->IOAddrLines,
+ handle->dev_info))
+ return CS_IN_USE;
+
+ if (req->NumPorts2) {
+ if (alloc_io_space(s, req->Attributes2, &req->BasePort2,
+ req->NumPorts2, req->IOAddrLines,
+ handle->dev_info)) {
+ release_io_space(s, req->BasePort1, req->NumPorts1);
+ return CS_IN_USE;
+ }
+ }
+
+ c->io = *req;
+ c->state |= CONFIG_IO_REQ;
+ handle->state |= CLIENT_IO_REQ;
+ return CS_SUCCESS;
+} /* request_io */
+
+/*======================================================================
+
+ Request_irq() reserves an irq for this client.
+
+ Also, since Linux only reserves irq's when they are actually
+ hooked, we don't guarantee that an irq will still be available
+ when the configuration is locked. Now that I think about it,
+ there might be a way to fix this using a dummy handler.
+
+======================================================================*/
+
+static int cs_request_irq(client_handle_t handle, irq_req_t *req)
+{
+ socket_info_t *s;
+ config_t *c;
+ int ret = CS_IN_USE, irq = 0;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ c = CONFIG(handle);
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+ if (c->state & CONFIG_IRQ_REQ)
+ return CS_IN_USE;
+
+#ifdef CONFIG_ISA
+ if (s->irq.AssignedIRQ != 0) {
+ /* If the interrupt is already assigned, it must match */
+ irq = s->irq.AssignedIRQ;
+ if (req->IRQInfo1 & IRQ_INFO2_VALID) {
+ u_int mask = req->IRQInfo2 & s->cap.irq_mask;
+ ret = ((mask >> irq) & 1) ? 0 : CS_BAD_ARGS;
+ } else
+ ret = ((req->IRQInfo1&IRQ_MASK) == irq) ? 0 : CS_BAD_ARGS;
+ } else {
+ if (req->IRQInfo1 & IRQ_INFO2_VALID) {
+ u_int try, mask = req->IRQInfo2 & s->cap.irq_mask;
+ for (try = 0; try < 2; try++) {
+ for (irq = 0; irq < 16; irq++)
+ if ((mask >> irq) & 1) {
+ ret = try_irq(req->Attributes, irq, try);
+ if (ret == 0) break;
+ }
+ if (ret == 0) break;
+ }
+ } else {
+ irq = req->IRQInfo1 & IRQ_MASK;
+ ret = try_irq(req->Attributes, irq, 1);
+ }
+ }
+#endif
+ if (ret != 0) {
+ if (!s->cap.pci_irq)
+ return ret;
+ irq = s->cap.pci_irq;
+ }
+
+ if (req->Attributes & IRQ_HANDLE_PRESENT) {
+ if (bus_request_irq(s->cap.bus, irq, req->Handler,
+ ((req->Attributes & IRQ_TYPE_DYNAMIC_SHARING) ||
+ (s->functions > 1) ||
+ (irq == s->cap.pci_irq)) ? SA_SHIRQ : 0,
+ handle->dev_info, req->Instance))
+ return CS_IN_USE;
+ }
+
+ c->irq.Attributes = req->Attributes;
+ s->irq.AssignedIRQ = req->AssignedIRQ = irq;
+ s->irq.Config++;
+
+ c->state |= CONFIG_IRQ_REQ;
+ handle->state |= CLIENT_IRQ_REQ;
+ return CS_SUCCESS;
+} /* cs_request_irq */
+
+/*======================================================================
+
+ Request_window() establishes a mapping between card memory space
+ and system memory space.
+
+======================================================================*/
+
+static int request_window(client_handle_t *handle, win_req_t *req)
+{
+ socket_info_t *s;
+ window_t *win;
+ u_long align;
+ int w;
+
+ if (CHECK_HANDLE(*handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(*handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (req->Attributes & (WIN_PAGED | WIN_SHARED))
+ return CS_BAD_ATTRIBUTE;
+
+ /* Window size defaults to smallest available */
+ if (req->Size == 0)
+ req->Size = s->cap.map_size;
+ align = (((s->cap.features & SS_CAP_MEM_ALIGN) ||
+ (req->Attributes & WIN_STRICT_ALIGN)) ?
+ req->Size : s->cap.map_size);
+ if (req->Size & (s->cap.map_size-1))
+ return CS_BAD_SIZE;
+ if ((req->Base && (s->cap.features & SS_CAP_STATIC_MAP)) ||
+ (req->Base & (align-1)))
+ return CS_BAD_BASE;
+ if (req->Base)
+ align = 0;
+
+ /* Allocate system memory window */
+ for (w = 0; w < MAX_WIN; w++)
+ if (!(s->state & SOCKET_WIN_REQ(w))) break;
+ if (w == MAX_WIN)
+ return CS_OUT_OF_RESOURCE;
+
+ win = &s->win[w];
+ win->magic = WINDOW_MAGIC;
+ win->index = w;
+ win->handle = *handle;
+ win->sock = s;
+ win->base = req->Base;
+ win->size = req->Size;
+
+ if (!(s->cap.features & SS_CAP_STATIC_MAP) &&
+ find_mem_region(&win->base, win->size, align,
+ (req->Attributes & WIN_MAP_BELOW_1MB) ||
+ !(s->cap.features & SS_CAP_PAGE_REGS),
+ (*handle)->dev_info))
+ return CS_IN_USE;
+ (*handle)->state |= CLIENT_WIN_REQ(w);
+
+ /* Configure the socket controller */
+ win->ctl.map = w+1;
+ win->ctl.flags = 0;
+ win->ctl.speed = req->AccessSpeed;
+ if (req->Attributes & WIN_MEMORY_TYPE)
+ win->ctl.flags |= MAP_ATTRIB;
+ if (req->Attributes & WIN_ENABLE)
+ win->ctl.flags |= MAP_ACTIVE;
+ if (req->Attributes & WIN_DATA_WIDTH_16)
+ win->ctl.flags |= MAP_16BIT;
+ if (req->Attributes & WIN_USE_WAIT)
+ win->ctl.flags |= MAP_USE_WAIT;
+ win->ctl.sys_start = win->base;
+ win->ctl.sys_stop = win->base + win->size-1;
+ win->ctl.card_start = 0;
+ if (s->ss_entry(s->sock, SS_SetMemMap, &win->ctl) != 0)
+ return CS_BAD_ARGS;
+ s->state |= SOCKET_WIN_REQ(w);
+
+ /* Return window handle */
+ req->Base = win->ctl.sys_start;
+ *handle = (client_handle_t)win;
+
+ return CS_SUCCESS;
+} /* request_window */
+
+/*======================================================================
+
+ I'm not sure which "reset" function this is supposed to use,
+ but for now, it uses the low-level interface's reset, not the
+ CIS register.
+
+======================================================================*/
+
+static int reset_card(client_handle_t handle, client_req_t *req)
+{
+ int i, ret;
+ socket_info_t *s;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (s->state & SOCKET_RESET_PENDING)
+ return CS_IN_USE;
+ s->state |= SOCKET_RESET_PENDING;
+
+ ret = send_event(s, CS_EVENT_RESET_REQUEST, CS_EVENT_PRI_LOW);
+ if (ret != 0) {
+ s->state &= ~SOCKET_RESET_PENDING;
+ handle->event_callback_args.info = (void *)(u_long)ret;
+ EVENT(handle, CS_EVENT_RESET_COMPLETE, CS_EVENT_PRI_LOW);
+ } else {
+ DEBUG(1, "cs: resetting socket %d\n", i);
+ send_event(s, CS_EVENT_RESET_PHYSICAL, CS_EVENT_PRI_LOW);
+ s->reset_handle = handle;
+ reset_socket(i);
+ }
+ return CS_SUCCESS;
+} /* reset_card */
+
+/*======================================================================
+
+ These shut down or wake up a socket. They are sort of user
+ initiated versions of the APM suspend and resume actions.
+
+======================================================================*/
+
+static int suspend_card(client_handle_t handle, client_req_t *req)
+{
+ int i;
+ socket_info_t *s;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (s->state & SOCKET_SUSPEND)
+ return CS_IN_USE;
+
+ DEBUG(1, "cs: suspending socket %d\n", i);
+ send_event(s, CS_EVENT_PM_SUSPEND, CS_EVENT_PRI_LOW);
+ s->ss_entry(s->sock, SS_SetSocket, &dead_socket);
+ s->state |= SOCKET_SUSPEND;
+
+ return CS_SUCCESS;
+} /* suspend_card */
+
+static int resume_card(client_handle_t handle, client_req_t *req)
+{
+ int i;
+ socket_info_t *s;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (!(s->state & SOCKET_SUSPEND))
+ return CS_IN_USE;
+
+ DEBUG(1, "cs: waking up socket %d\n", i);
+ setup_socket(i);
+
+ return CS_SUCCESS;
+} /* resume_card */
+
+/*======================================================================
+
+ These handle user requests to eject or insert a card.
+
+======================================================================*/
+
+static int eject_card(client_handle_t handle, client_req_t *req)
+{
+ int i, ret;
+ socket_info_t *s;
+ u_long flags;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+
+ DEBUG(1, "cs: user eject request on socket %d\n", i);
+
+ ret = send_event(s, CS_EVENT_EJECTION_REQUEST, CS_EVENT_PRI_LOW);
+ if (ret != 0)
+ return ret;
+
+ spin_lock_irqsave(&s->lock, flags);
+ do_shutdown(s);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return CS_SUCCESS;
+
+} /* eject_card */
+
+static int insert_card(client_handle_t handle, client_req_t *req)
+{
+ int i, status;
+ socket_info_t *s;
+ u_long flags;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (s->state & SOCKET_PRESENT)
+ return CS_IN_USE;
+
+ DEBUG(1, "cs: user insert request on socket %d\n", i);
+
+ spin_lock_irqsave(&s->lock, flags);
+ if (!(s->state & SOCKET_SETUP_PENDING)) {
+ s->state |= SOCKET_SETUP_PENDING;
+ spin_unlock_irqrestore(&s->lock, flags);
+ s->ss_entry(i, SS_GetStatus, &status);
+ if (status & SS_DETECT)
+ setup_socket(i);
+ else {
+ s->state &= ~SOCKET_SETUP_PENDING;
+ return CS_NO_CARD;
+ }
+ } else
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return CS_SUCCESS;
+} /* insert_card */
+
+/*======================================================================
+
+ Maybe this should send a CS_EVENT_CARD_INSERTION event if we
+ haven't sent one to this client yet?
+
+======================================================================*/
+
+static int set_event_mask(client_handle_t handle, eventmask_t *mask)
+{
+ u_int events, bit;
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ if (handle->Attributes & CONF_EVENT_MASK_VALID)
+ return CS_BAD_SOCKET;
+ handle->EventMask = mask->EventMask;
+ events = handle->PendingEvents & handle->EventMask;
+ handle->PendingEvents -= events;
+ while (events != 0) {
+ bit = ((events ^ (events-1)) + 1) >> 1;
+ EVENT(handle, bit, CS_EVENT_PRI_LOW);
+ events -= bit;
+ }
+ return CS_SUCCESS;
+} /* set_event_mask */
+
+/*====================================================================*/
+
+static int report_error(client_handle_t handle, error_info_t *err)
+{
+ int i;
+ char *serv;
+
+ if (CHECK_HANDLE(handle))
+ printk(KERN_NOTICE);
+ else
+ printk(KERN_NOTICE "%s: ", handle->dev_info);
+
+ for (i = 0; i < SERVICE_COUNT; i++)
+ if (service_table[i].key == err->func) break;
+ if (i < SERVICE_COUNT)
+ serv = service_table[i].msg;
+ else
+ serv = "Unknown service number";
+
+ for (i = 0; i < ERROR_COUNT; i++)
+ if (error_table[i].key == err->retcode) break;
+ if (i < ERROR_COUNT)
+ printk("%s: %s\n", serv, error_table[i].msg);
+ else
+ printk("%s: Unknown error code %#x\n", serv, err->retcode);
+
+ return CS_SUCCESS;
+} /* report_error */
+
+/*====================================================================*/
+
+int CardServices(int func, void *a1, void *a2, void *a3)
+{
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 2) {
+ int i;
+ for (i = 0; i < SERVICE_COUNT; i++)
+ if (service_table[i].key == func) break;
+ if (i < SERVICE_COUNT)
+ printk(KERN_DEBUG "cs: CardServices(%s, 0x%p, 0x%p)\n",
+ service_table[i].msg, a1, a2);
+ else
+ printk(KERN_DEBUG "cs: CardServices(Unknown func %d, "
+ "0x%p, 0x%p)\n", func, a1, a2);
+ }
+#endif
+ switch (func) {
+ case AccessConfigurationRegister:
+ return access_configuration_register(a1, a2); break;
+ case AdjustResourceInfo:
+ return adjust_resource_info(a1, a2); break;
+ case CheckEraseQueue:
+ return check_erase_queue(a1); break;
+ case CloseMemory:
+ return close_memory(a1); break;
+ case CopyMemory:
+ return copy_memory(a1, a2); break;
+ case DeregisterClient:
+ return deregister_client(a1); break;
+ case DeregisterEraseQueue:
+ return deregister_erase_queue(a1); break;
+ case GetFirstClient:
+ return get_first_client(a1, a2); break;
+ case GetCardServicesInfo:
+ return get_card_services_info(a1); break;
+ case GetConfigurationInfo:
+ return get_configuration_info(a1, a2); break;
+ case GetNextClient:
+ return get_next_client(a1, a2); break;
+ case GetFirstRegion:
+ return get_first_region(a1, a2); break;
+ case GetFirstTuple:
+ return get_first_tuple(a1, a2); break;
+ case GetNextRegion:
+ return get_next_region(a1, a2); break;
+ case GetNextTuple:
+ return get_next_tuple(a1, a2); break;
+ case GetStatus:
+ return cs_get_status(a1, a2); break;
+ case GetTupleData:
+ return get_tuple_data(a1, a2); break;
+ case MapMemPage:
+ return map_mem_page(a1, a2); break;
+ case ModifyConfiguration:
+ return modify_configuration(a1, a2); break;
+ case ModifyWindow:
+ return modify_window(a1, a2); break;
+ case OpenMemory:
+ return open_memory(a1, a2);
+ case ParseTuple:
+ return parse_tuple(a1, a2, a3); break;
+ case ReadMemory:
+ return read_memory(a1, a2, a3); break;
+ case RegisterClient:
+ return register_client(a1, a2); break;
+ case RegisterEraseQueue:
+ return register_erase_queue(a1, a2); break;
+ case RegisterMTD:
+ return register_mtd(a1, a2); break;
+ case ReleaseConfiguration:
+ return release_configuration(a1, a2); break;
+ case ReleaseIO:
+ return release_io(a1, a2); break;
+ case ReleaseIRQ:
+ return cs_release_irq(a1, a2); break;
+ case ReleaseWindow:
+ return release_window(a1); break;
+ case RequestConfiguration:
+ return request_configuration(a1, a2); break;
+ case RequestIO:
+ return request_io(a1, a2); break;
+ case RequestIRQ:
+ return cs_request_irq(a1, a2); break;
+ case RequestWindow:
+ return request_window(a1, a2); break;
+ case ResetCard:
+ return reset_card(a1, a2); break;
+ case SetEventMask:
+ return set_event_mask(a1, a2); break;
+ case ValidateCIS:
+ return validate_cis(a1, a2); break;
+ case WriteMemory:
+ return write_memory(a1, a2, a3); break;
+ case BindDevice:
+ return bind_device(a1); break;
+ case BindMTD:
+ return bind_mtd(a1); break;
+ case ReportError:
+ return report_error(a1, a2); break;
+ case SuspendCard:
+ return suspend_card(a1, a2); break;
+ case ResumeCard:
+ return resume_card(a1, a2); break;
+ case EjectCard:
+ return eject_card(a1, a2); break;
+ case InsertCard:
+ return insert_card(a1, a2); break;
+ case ReplaceCIS:
+ return replace_cis(a1, a2); break;
+ case GetFirstWindow:
+ return get_first_window(a1, a2); break;
+ case GetNextWindow:
+ return get_next_window(a1, a2); break;
+ case GetMemPage:
+ return get_mem_page(a1, a2); break;
+ default:
+ return CS_UNSUPPORTED_FUNCTION; break;
+ }
+
+} /* CardServices */
+
+/*======================================================================
+
+ OS-specific module glue goes here
+
+======================================================================*/
+
+#include <linux/pci.h>
+
+#if (LINUX_VERSION_CODE <= VERSION(2,1,17))
+
+#undef CONFIG_MODVERSIONS
+static struct symbol_table cs_symtab = {
+#include <linux/symtab_begin.h>
+#undef X
+#define X(sym) { (void *)&sym, SYMBOL_NAME_STR(sym) }
+ X(register_ss_entry),
+ X(unregister_ss_entry),
+ X(CardServices),
+ X(MTDHelperEntry),
+#ifdef HAS_PROC_BUS
+ X(proc_pccard),
+#endif
+#ifndef HAVE_MEMRESERVE
+ X(request_mem_region),
+ X(release_mem_region),
+#endif
+#ifdef CONFIG_PNP_BIOS
+ X(check_pnp_irq),
+#endif
+#ifdef CONFIG_PCI
+ X(pci_irq_mask),
+ X(pci_devices),
+ X(pci_root),
+ X(pci_find_slot),
+ X(pci_find_class),
+ X(pci_enable_device),
+ X(pci_set_power_state),
+#endif
+#include <linux/symtab_end.h>
+};
+
+#else
+
+EXPORT_SYMBOL(register_ss_entry);
+EXPORT_SYMBOL(unregister_ss_entry);
+EXPORT_SYMBOL(CardServices);
+EXPORT_SYMBOL(MTDHelperEntry);
+#ifdef HAS_PROC_BUS
+EXPORT_SYMBOL(proc_pccard);
+#endif
+#ifndef HAVE_MEMRESERVE
+EXPORT_SYMBOL(request_mem_region);
+EXPORT_SYMBOL(release_mem_region);
+#endif
+#ifdef CONFIG_PNP_BIOS
+EXPORT_SYMBOL(check_pnp_irq);
+#endif
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_irq_mask);
+#if (LINUX_VERSION_CODE < VERSION(2,3,24))
+EXPORT_SYMBOL(pci_enable_device);
+EXPORT_SYMBOL(pci_set_power_state);
+#endif
+#endif
+
+#endif
+
+int __init init_pcmcia_cs(void)
+{
+ printk(KERN_INFO "%s\n", release);
+#ifdef UTS_RELEASE
+ printk(KERN_INFO " %s\n", kernel);
+#endif
+ printk(KERN_INFO " %s\n", options);
+ DEBUG(0, "%s\n", version);
+#ifdef CONFIG_PM
+ if (do_apm)
+ pm_register(PM_SYS_DEV, PM_SYS_PCMCIA, handle_pm_event);
+#endif
+#ifdef CONFIG_PCI
+ pci_fixup_init();
+#endif
+#ifdef CONFIG_PNP_BIOS
+ if (do_pnp) {
+ pnp_bios_init();
+ pnp_proc_init();
+ pnp_rsrc_init();
+ }
+#endif
+ register_symtab(&cs_symtab);
+#ifdef HAS_PROC_BUS
+ proc_pccard = proc_mkdir("pccard", proc_bus);
+#ifdef CONFIG_PNP_BIOS
+ if (proc_pccard) {
+ create_proc_read_entry("ioport", 0, proc_pccard,
+ proc_read_io, NULL);
+ create_proc_read_entry("irq", 0, proc_pccard,
+ proc_read_irq, NULL);
+ }
+#endif
+#ifndef HAVE_MEMRESERVE
+ if (proc_pccard)
+ create_proc_read_entry("memory", 0, proc_pccard,
+ proc_read_mem, NULL);
+#endif
+#endif
+ return 0;
+}
+
+static void __exit exit_pcmcia_cs(void)
+{
+ printk(KERN_INFO "unloading PCMCIA Card Services\n");
+#ifdef HAS_PROC_BUS
+ if (proc_pccard) {
+#ifdef CONFIG_PNP_BIOS
+ remove_proc_entry("ioport", proc_pccard);
+ remove_proc_entry("irq", proc_pccard);
+#endif
+#ifndef HAVE_MEMRESERVE
+ remove_proc_entry("memory", proc_pccard);
+#endif
+ remove_proc_entry("pccard", proc_bus);
+ }
+#endif
+#ifdef CONFIG_PM
+ if (do_apm)
+ pm_unregister_all(handle_pm_event);
+#endif
+#ifdef CONFIG_PCI
+ pci_fixup_done();
+#endif
+#ifdef CONFIG_PNP_BIOS
+ if (do_pnp) {
+ pnp_proc_done();
+ pnp_rsrc_done();
+ }
+#endif
+ release_resource_db();
+}
+
+module_init(init_pcmcia_cs);
+module_exit(exit_pcmcia_cs);
diff --git a/linux/pcmcia-cs/modules/cs_internal.h b/linux/pcmcia-cs/modules/cs_internal.h
new file mode 100644
index 0000000..c9f98f8
--- /dev/null
+++ b/linux/pcmcia-cs/modules/cs_internal.h
@@ -0,0 +1,300 @@
+/*
+ * cs_internal.h 1.58 2004/04/25 17:58:22
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CS_INTERNAL_H
+#define _LINUX_CS_INTERNAL_H
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+
+typedef struct erase_busy_t {
+ eraseq_entry_t *erase;
+ client_handle_t client;
+ struct timer_list timeout;
+ struct erase_busy_t *prev, *next;
+} erase_busy_t;
+
+#define ERASEQ_MAGIC 0xFA67
+typedef struct eraseq_t {
+ u_short eraseq_magic;
+ client_handle_t handle;
+ int count;
+ eraseq_entry_t *entry;
+} eraseq_t;
+
+#define CLIENT_MAGIC 0x51E6
+typedef struct client_t {
+ u_short client_magic;
+ socket_t Socket;
+ u_char Function;
+ dev_info_t dev_info;
+ u_int Attributes;
+ u_int state;
+ event_t EventMask, PendingEvents;
+ int (*event_handler)(event_t event, int priority,
+ event_callback_args_t *);
+ event_callback_args_t event_callback_args;
+ struct client_t *next;
+ u_int mtd_count;
+ wait_queue_head_t mtd_req;
+ erase_busy_t erase_busy;
+} client_t;
+
+/* Flags in client state */
+#define CLIENT_CONFIG_LOCKED 0x0001
+#define CLIENT_IRQ_REQ 0x0002
+#define CLIENT_IO_REQ 0x0004
+#define CLIENT_UNBOUND 0x0008
+#define CLIENT_STALE 0x0010
+#define CLIENT_WIN_REQ(i) (0x20<<(i))
+#define CLIENT_CARDBUS 0x8000
+
+typedef struct io_window_t {
+ u_int Attributes;
+ ioaddr_t BasePort, NumPorts;
+ ioaddr_t InUse, Config;
+} io_window_t;
+
+#define WINDOW_MAGIC 0xB35C
+typedef struct window_t {
+ u_short magic;
+ u_short index;
+ client_handle_t handle;
+ struct socket_info_t *sock;
+ u_long base;
+ u_long size;
+ pccard_mem_map ctl;
+} window_t;
+
+#define REGION_MAGIC 0xE3C9
+typedef struct region_t {
+ u_short region_magic;
+ u_short state;
+ dev_info_t dev_info;
+ client_handle_t mtd;
+ u_int MediaID;
+ region_info_t info;
+} region_t;
+
+#define REGION_STALE 0x01
+
+/* Each card function gets one of these guys */
+typedef struct config_t {
+ u_int state;
+ u_int Attributes;
+ u_int Vcc, Vpp1, Vpp2;
+ u_int IntType;
+ u_int ConfigBase;
+ u_char Status, Pin, Copy, Option, ExtStatus;
+ u_int Present;
+ u_int CardValues;
+ io_req_t io;
+ struct {
+ u_int Attributes;
+ } irq;
+} config_t;
+
+/* Maximum number of IO windows per socket */
+#define MAX_IO_WIN 2
+
+/* Maximum number of memory windows per socket */
+#define MAX_WIN 4
+
+/* The size of the CIS cache */
+#define MAX_CIS_TABLE 64
+#define MAX_CIS_DATA 512
+
+typedef struct socket_info_t {
+#ifdef __SMP__
+ spinlock_t lock;
+#endif
+ ss_entry_t ss_entry;
+ u_int sock;
+ socket_state_t socket;
+ socket_cap_t cap;
+ u_int state;
+ u_short functions;
+ u_short lock_count;
+ client_handle_t clients;
+ u_int real_clients;
+ client_handle_t reset_handle;
+ struct timer_list setup, shutdown;
+ u_long setup_timeout;
+ pccard_mem_map cis_mem;
+ u_char *cis_virt;
+ config_t *config;
+#ifdef CONFIG_CARDBUS
+ u_int cb_cis_space;
+ cb_bridge_map cb_cis_map;
+ u_char *cb_cis_virt;
+ struct cb_config_t *cb_config;
+#endif
+ struct {
+ u_int AssignedIRQ;
+ u_int Config;
+ } irq;
+ io_window_t io[MAX_IO_WIN];
+ window_t win[MAX_WIN];
+ region_t *c_region, *a_region;
+ erase_busy_t erase_busy;
+ int cis_used;
+ struct {
+ u_int addr;
+ u_short len;
+ u_short attr;
+ } cis_table[MAX_CIS_TABLE];
+ char cis_cache[MAX_CIS_DATA];
+ u_int fake_cis_len;
+ char *fake_cis;
+#ifdef HAS_PROC_BUS
+ struct proc_dir_entry *proc;
+#endif
+} socket_info_t;
+
+/* Flags in config state */
+#define CONFIG_LOCKED 0x01
+#define CONFIG_IRQ_REQ 0x02
+#define CONFIG_IO_REQ 0x04
+
+/* Flags in socket state */
+#define SOCKET_PRESENT 0x0008
+#define SOCKET_SETUP_PENDING 0x0010
+#define SOCKET_SHUTDOWN_PENDING 0x0020
+#define SOCKET_RESET_PENDING 0x0040
+#define SOCKET_SUSPEND 0x0080
+#define SOCKET_WIN_REQ(i) (0x0100<<(i))
+#define SOCKET_IO_REQ(i) (0x1000<<(i))
+#define SOCKET_REGION_INFO 0x4000
+#define SOCKET_CARDBUS 0x8000
+
+#define CHECK_HANDLE(h) \
+ (((h) == NULL) || ((h)->client_magic != CLIENT_MAGIC))
+
+#define CHECK_SOCKET(s) \
+ (((s) >= sockets) || (socket_table[s]->ss_entry == NULL))
+
+#define SOCKET(h) (socket_table[(h)->Socket])
+#define CONFIG(h) (&SOCKET(h)->config[(h)->Function])
+
+#define CHECK_REGION(r) \
+ (((r) == NULL) || ((r)->region_magic != REGION_MAGIC))
+
+#define CHECK_ERASEQ(q) \
+ (((q) == NULL) || ((q)->eraseq_magic != ERASEQ_MAGIC))
+
+#define EVENT(h, e, p) \
+ ((h)->event_handler((e), (p), &(h)->event_callback_args))
+
+/* In cardbus.c */
+int cb_alloc(socket_info_t *s);
+void cb_free(socket_info_t *s);
+int cb_config(socket_info_t *s);
+void cb_release(socket_info_t *s);
+void cb_enable(socket_info_t *s);
+void cb_disable(socket_info_t *s);
+int read_cb_mem(socket_info_t *s, u_char fn, int space,
+ u_int addr, u_int len, void *ptr);
+void cb_release_cis_mem(socket_info_t *s);
+
+/* In cistpl.c */
+int read_cis_mem(socket_info_t *s, int attr,
+ u_int addr, u_int len, void *ptr);
+void write_cis_mem(socket_info_t *s, int attr,
+ u_int addr, u_int len, void *ptr);
+void release_cis_mem(socket_info_t *s);
+int verify_cis_cache(socket_info_t *s);
+void preload_cis_cache(socket_info_t *s);
+int get_first_tuple(client_handle_t handle, tuple_t *tuple);
+int get_next_tuple(client_handle_t handle, tuple_t *tuple);
+int get_tuple_data(client_handle_t handle, tuple_t *tuple);
+int parse_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse);
+int validate_cis(client_handle_t handle, cisinfo_t *info);
+int replace_cis(client_handle_t handle, cisdump_t *cis);
+int read_tuple(client_handle_t handle, cisdata_t code, void *parse);
+
+/* In bulkmem.c */
+void retry_erase_list(struct erase_busy_t *list, u_int cause);
+int get_first_region(client_handle_t handle, region_info_t *rgn);
+int get_next_region(client_handle_t handle, region_info_t *rgn);
+int register_mtd(client_handle_t handle, mtd_reg_t *reg);
+int register_erase_queue(client_handle_t *handle, eraseq_hdr_t *header);
+int deregister_erase_queue(eraseq_handle_t eraseq);
+int check_erase_queue(eraseq_handle_t eraseq);
+int open_memory(client_handle_t *handle, open_mem_t *open);
+int close_memory(memory_handle_t handle);
+int read_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf);
+int write_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf);
+int copy_memory(memory_handle_t handle, copy_op_t *req);
+
+/* In rsrc_mgr */
+void validate_mem(int (*is_valid)(u_long), int (*do_cksum)(u_long),
+ int force_low);
+int find_io_region(ioaddr_t *base, ioaddr_t num, ioaddr_t align,
+ char *name);
+int find_mem_region(u_long *base, u_long num, u_long align,
+ int force_low, char *name);
+int try_irq(u_int Attributes, int irq, int specific);
+void undo_irq(u_int Attributes, int irq);
+int adjust_resource_info(client_handle_t handle, adjust_t *adj);
+void release_resource_db(void);
+int proc_read_io(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data);
+int proc_read_mem(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data);
+
+/* in pnp components */
+int proc_read_irq(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data);
+int check_pnp_irq(int n);
+void pnp_bios_init(void);
+void pnp_proc_init(void);
+void pnp_proc_done(void);
+void pnp_rsrc_init(void);
+void pnp_rsrc_done(void);
+
+/* in pci_fixup */
+void pci_fixup_init(void);
+void pci_fixup_done(void);
+
+#define MAX_SOCK 8
+extern socket_t sockets;
+extern socket_info_t *socket_table[MAX_SOCK];
+
+#ifdef HAS_PROC_BUS
+extern struct proc_dir_entry *proc_pccard;
+#endif
+
+#ifdef PCMCIA_DEBUG
+extern int pc_debug;
+#define DEBUG(n, args...) do { if (pc_debug>(n)) printk(KERN_DEBUG args); } while (0)
+#else
+#define DEBUG(n, args...) do { } while (0)
+#endif
+
+#endif /* _LINUX_CS_INTERNAL_H */
diff --git a/linux/pcmcia-cs/modules/ds.c b/linux/pcmcia-cs/modules/ds.c
new file mode 100644
index 0000000..f2f3341
--- /dev/null
+++ b/linux/pcmcia-cs/modules/ds.c
@@ -0,0 +1,1039 @@
+/*======================================================================
+
+ PC Card Driver Services
+
+ ds.c 1.115 2002/10/12 19:03:44
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/ioctl.h>
+#include <linux/proc_fs.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,23))
+#include <linux/poll.h>
+#endif
+
+/*
+ * <pcmcia/cs.h> defines io_req_t which is not used in this file, but
+ * which clashes with the io_req_t needed for the Mach devices. Rename
+ * it to cardservice_io_req_t to avoid this clash.
+ */
+#define io_req_t cardservice_io_req_t
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/bulkmem.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#undef io_req_t
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("PCMCIA Driver Services " CS_RELEASE);
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static const char *version =
+"ds.c 1.115 2002/10/12 19:03:44 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+typedef struct driver_info_t {
+ dev_info_t dev_info;
+ int use_count, status;
+ dev_link_t *(*attach)(void);
+ void (*detach)(dev_link_t *);
+ struct driver_info_t *next;
+} driver_info_t;
+
+typedef struct socket_bind_t {
+ driver_info_t *driver;
+ u_char function;
+ dev_link_t *instance;
+ struct socket_bind_t *next;
+} socket_bind_t;
+
+/* Device user information */
+#define MAX_EVENTS 32
+#define USER_MAGIC 0x7ea4
+#define CHECK_USER(u) \
+ (((u) == NULL) || ((u)->user_magic != USER_MAGIC))
+typedef struct user_info_t {
+ u_int user_magic;
+ int event_head, event_tail;
+ event_t event[MAX_EVENTS];
+ struct user_info_t *next;
+} user_info_t;
+
+/* Socket state information */
+typedef struct socket_info_t {
+ client_handle_t handle;
+ int state;
+ user_info_t *user;
+ int req_pending, req_result;
+ wait_queue_head_t queue, request;
+ struct timer_list removal;
+ socket_bind_t *bind;
+} socket_info_t;
+
+#define SOCKET_PRESENT 0x01
+#define SOCKET_BUSY 0x02
+#define SOCKET_REMOVAL_PENDING 0x10
+
+/*====================================================================*/
+
+/* Device driver ID passed to Card Services */
+static dev_info_t dev_info = "Driver Services";
+
+/* Linked list of all registered device drivers */
+static driver_info_t *root_driver = NULL;
+
+static int sockets = 0, major_dev = -1;
+static socket_info_t *socket_table = NULL;
+
+extern struct proc_dir_entry *proc_pccard;
+
+/* We use this to distinguish in-kernel from modular drivers */
+static int init_status = 1;
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ Register_pccard_driver() and unregister_pccard_driver() are used
+ tell Driver Services that a PC Card client driver is available to
+ be bound to sockets.
+
+======================================================================*/
+
+int register_pccard_driver(dev_info_t *dev_info,
+ dev_link_t *(*attach)(void),
+ void (*detach)(dev_link_t *))
+{
+ driver_info_t *driver;
+ socket_bind_t *b;
+ int i;
+
+ DEBUG(0, "ds: register_pccard_driver('%s')\n", (char *)dev_info);
+ for (driver = root_driver; driver; driver = driver->next)
+ if (strncmp((char *)dev_info, (char *)driver->dev_info,
+ DEV_NAME_LEN) == 0)
+ break;
+ if (!driver) {
+ driver = kmalloc(sizeof(driver_info_t), GFP_KERNEL);
+ if (!driver) return -ENOMEM;
+ strncpy(driver->dev_info, (char *)dev_info, DEV_NAME_LEN);
+ driver->use_count = 0;
+ driver->status = init_status;
+ driver->next = root_driver;
+ root_driver = driver;
+ }
+
+ driver->attach = attach;
+ driver->detach = detach;
+ if (driver->use_count == 0) return 0;
+
+ /* Instantiate any already-bound devices */
+ for (i = 0; i < sockets; i++)
+ for (b = socket_table[i].bind; b; b = b->next) {
+ if (b->driver != driver) continue;
+ b->instance = driver->attach();
+ if (b->instance == NULL)
+ printk(KERN_NOTICE "ds: unable to create instance "
+ "of '%s'!\n", driver->dev_info);
+ }
+
+ return 0;
+} /* register_pccard_driver */
+
+/*====================================================================*/
+
+int unregister_pccard_driver(dev_info_t *dev_info)
+{
+ driver_info_t *target, **d = &root_driver;
+ socket_bind_t *b;
+ int i;
+
+ DEBUG(0, "ds: unregister_pccard_driver('%s')\n",
+ (char *)dev_info);
+ while ((*d) && (strncmp((*d)->dev_info, (char *)dev_info,
+ DEV_NAME_LEN) != 0))
+ d = &(*d)->next;
+ if (*d == NULL)
+ return -ENODEV;
+
+ target = *d;
+ if (target->use_count == 0) {
+ *d = target->next;
+ kfree(target);
+ } else {
+ /* Blank out any left-over device instances */
+ target->attach = NULL; target->detach = NULL;
+ for (i = 0; i < sockets; i++)
+ for (b = socket_table[i].bind; b; b = b->next)
+ if (b->driver == target) b->instance = NULL;
+ }
+ return 0;
+} /* unregister_pccard_driver */
+
+/*====================================================================*/
+
+#ifdef HAS_PROC_BUS
+static int proc_read_drivers(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ driver_info_t *d;
+ char *p = buf;
+ for (d = root_driver; d; d = d->next)
+ p += sprintf(p, "%-24.24s %d %d\n", d->dev_info,
+ d->status, d->use_count);
+ return (p - buf);
+}
+#endif
+
+/*======================================================================
+
+ These manage a ring buffer of events pending for one user process
+
+======================================================================*/
+
+static int queue_empty(user_info_t *user)
+{
+ return (user->event_head == user->event_tail);
+}
+
+static event_t get_queued_event(user_info_t *user)
+{
+ user->event_tail = (user->event_tail+1) % MAX_EVENTS;
+ return user->event[user->event_tail];
+}
+
+static void queue_event(user_info_t *user, event_t event)
+{
+ user->event_head = (user->event_head+1) % MAX_EVENTS;
+ if (user->event_head == user->event_tail)
+ user->event_tail = (user->event_tail+1) % MAX_EVENTS;
+ user->event[user->event_head] = event;
+}
+
+static void handle_event(socket_info_t *s, event_t event)
+{
+ user_info_t *user;
+ for (user = s->user; user; user = user->next)
+ queue_event(user, event);
+ wake_up_interruptible(&s->queue);
+}
+
+static int handle_request(socket_info_t *s, event_t event)
+{
+ if (s->req_pending != 0)
+ return CS_IN_USE;
+ if (s->state & SOCKET_BUSY)
+ s->req_pending = 1;
+ handle_event(s, event);
+ if (s->req_pending > 0) {
+ interruptible_sleep_on(&s->request);
+ if (signal_pending(current))
+ return CS_IN_USE;
+ else
+ return s->req_result;
+ }
+ return CS_SUCCESS;
+}
+
+static void handle_removal(u_long sn)
+{
+ socket_info_t *s = &socket_table[sn];
+ handle_event(s, CS_EVENT_CARD_REMOVAL);
+ s->state &= ~SOCKET_REMOVAL_PENDING;
+}
+
+/*======================================================================
+
+ The card status event handler.
+
+======================================================================*/
+
+static int ds_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ socket_info_t *s;
+ int i;
+
+ DEBUG(1, "ds: ds_event(0x%06x, %d, 0x%p)\n",
+ event, priority, args->client_handle);
+ s = args->client_data;
+ i = s - socket_table;
+
+ switch (event) {
+
+ case CS_EVENT_CARD_REMOVAL:
+ s->state &= ~SOCKET_PRESENT;
+ if (!(s->state & SOCKET_REMOVAL_PENDING)) {
+ s->state |= SOCKET_REMOVAL_PENDING;
+ s->removal.expires = jiffies + HZ/10;
+ add_timer(&s->removal);
+ }
+ break;
+
+ case CS_EVENT_CARD_INSERTION:
+ s->state |= SOCKET_PRESENT;
+ handle_event(s, event);
+ break;
+
+ case CS_EVENT_EJECTION_REQUEST:
+ return handle_request(s, event);
+ break;
+
+ default:
+ handle_event(s, event);
+ break;
+ }
+
+ return 0;
+} /* ds_event */
+
+/*======================================================================
+
+ bind_mtd() connects a memory region with an MTD client.
+
+======================================================================*/
+
+static int bind_mtd(int i, mtd_info_t *mtd_info)
+{
+ mtd_bind_t bind_req;
+ int ret;
+
+ bind_req.dev_info = &mtd_info->dev_info;
+ bind_req.Attributes = mtd_info->Attributes;
+ bind_req.Socket = i;
+ bind_req.CardOffset = mtd_info->CardOffset;
+ ret = CardServices(BindMTD, &bind_req);
+ if (ret != CS_SUCCESS) {
+ cs_error(NULL, BindMTD, ret);
+ printk(KERN_NOTICE "ds: unable to bind MTD '%s' to socket %d"
+ " offset 0x%x\n",
+ (char *)bind_req.dev_info, i, bind_req.CardOffset);
+ return -ENODEV;
+ }
+ return 0;
+} /* bind_mtd */
+
+/*======================================================================
+
+ bind_request() connects a socket to a particular client driver.
+ It looks up the specified device ID in the list of registered
+ drivers, binds it to the socket, and tries to create an instance
+ of the device. unbind_request() deletes a driver instance.
+
+======================================================================*/
+
+static int bind_request(int i, bind_info_t *bind_info)
+{
+ struct driver_info_t *driver;
+ socket_bind_t *b;
+ bind_req_t bind_req;
+ socket_info_t *s = &socket_table[i];
+ int ret;
+
+ DEBUG(2, "bind_request(%d, '%s')\n", i,
+ (char *)bind_info->dev_info);
+ for (driver = root_driver; driver; driver = driver->next)
+ if (strcmp((char *)driver->dev_info,
+ (char *)bind_info->dev_info) == 0)
+ break;
+ if (driver == NULL) {
+ driver = kmalloc(sizeof(driver_info_t), GFP_KERNEL);
+ if (!driver) return -ENOMEM;
+ strncpy(driver->dev_info, bind_info->dev_info, DEV_NAME_LEN);
+ driver->use_count = 0;
+ driver->next = root_driver;
+ driver->attach = NULL; driver->detach = NULL;
+ root_driver = driver;
+ }
+
+ for (b = s->bind; b; b = b->next)
+ if ((driver == b->driver) &&
+ (bind_info->function == b->function))
+ break;
+ if (b != NULL) {
+ bind_info->instance = b->instance;
+ return -EBUSY;
+ }
+ b = kmalloc(sizeof(socket_bind_t), GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ bind_req.Socket = i;
+ bind_req.Function = bind_info->function;
+ bind_req.dev_info = &driver->dev_info;
+ ret = CardServices(BindDevice, &bind_req);
+ if (ret != CS_SUCCESS) {
+ cs_error(NULL, BindDevice, ret);
+ printk(KERN_NOTICE "ds: unable to bind '%s' to socket %d\n",
+ (char *)dev_info, i);
+ kfree(b);
+ return -ENODEV;
+ }
+
+ /* Add binding to list for this socket */
+ driver->use_count++;
+ b->driver = driver;
+ b->function = bind_info->function;
+ b->instance = NULL;
+ b->next = s->bind;
+ s->bind = b;
+
+ if (driver->attach) {
+ b->instance = driver->attach();
+ if (b->instance == NULL) {
+ printk(KERN_NOTICE "ds: unable to create instance "
+ "of '%s'!\n", (char *)bind_info->dev_info);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+} /* bind_request */
+
+/*====================================================================*/
+
+static int get_device_info(int i, bind_info_t *bind_info, int first)
+{
+ socket_info_t *s = &socket_table[i];
+ socket_bind_t *b;
+ dev_node_t *node;
+
+ for (b = s->bind; b; b = b->next)
+ if ((strcmp((char *)b->driver->dev_info,
+ (char *)bind_info->dev_info) == 0) &&
+ (b->function == bind_info->function))
+ break;
+ if (b == NULL) return -ENODEV;
+ if ((b->instance == NULL) ||
+ (b->instance->state & DEV_CONFIG_PENDING))
+ return -EAGAIN;
+ if (first)
+ node = b->instance->dev;
+ else
+ for (node = b->instance->dev; node; node = node->next)
+ if (node == bind_info->next) break;
+ if (node == NULL) return -ENODEV;
+
+ strncpy(bind_info->name, node->dev_name, DEV_NAME_LEN);
+ bind_info->name[DEV_NAME_LEN-1] = '\0';
+ bind_info->major = node->major;
+ bind_info->minor = node->minor;
+ bind_info->next = node->next;
+
+ return 0;
+} /* get_device_info */
+
+/*====================================================================*/
+
+static int unbind_request(int i, bind_info_t *bind_info)
+{
+ socket_info_t *s = &socket_table[i];
+ socket_bind_t **b, *c;
+
+ DEBUG(2, "unbind_request(%d, '%s')\n", i,
+ (char *)bind_info->dev_info);
+ for (b = &s->bind; *b; b = &(*b)->next)
+ if ((strcmp((char *)(*b)->driver->dev_info,
+ (char *)bind_info->dev_info) == 0) &&
+ ((*b)->function == bind_info->function))
+ break;
+ if (*b == NULL)
+ return -ENODEV;
+
+ c = *b;
+ c->driver->use_count--;
+ if (c->driver->detach) {
+ if (c->instance)
+ c->driver->detach(c->instance);
+ } else {
+ if (c->driver->use_count == 0) {
+ driver_info_t **d;
+ for (d = &root_driver; *d; d = &((*d)->next))
+ if (c->driver == *d) break;
+ *d = (*d)->next;
+ kfree(c->driver);
+ }
+ }
+ *b = c->next;
+ kfree(c);
+
+ return 0;
+} /* unbind_request */
+
+/*======================================================================
+
+ The user-mode PC Card device interface
+
+======================================================================*/
+
+/* Disable all the ds filesystem operations. */
+#ifndef MACH
+
+static int ds_open(struct inode *inode, struct file *file)
+{
+ socket_t i = MINOR(inode->i_rdev);
+ socket_info_t *s;
+ user_info_t *user;
+
+ DEBUG(0, "ds_open(socket %d)\n", i);
+ if ((i >= sockets) || (sockets == 0))
+ return -ENODEV;
+ s = &socket_table[i];
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
+ if (s->state & SOCKET_BUSY)
+ return -EBUSY;
+ else
+ s->state |= SOCKET_BUSY;
+ }
+
+ MOD_INC_USE_COUNT;
+ user = kmalloc(sizeof(user_info_t), GFP_KERNEL);
+ if (!user) {
+ MOD_DEC_USE_COUNT;
+ return -ENOMEM;
+ }
+ user->event_tail = user->event_head = 0;
+ user->next = s->user;
+ user->user_magic = USER_MAGIC;
+ s->user = user;
+ file->private_data = user;
+
+ if (s->state & SOCKET_PRESENT)
+ queue_event(user, CS_EVENT_CARD_INSERTION);
+ return 0;
+} /* ds_open */
+
+/*====================================================================*/
+
+static FS_RELEASE_T ds_release(struct inode *inode, struct file *file)
+{
+ socket_t i = MINOR(inode->i_rdev);
+ socket_info_t *s;
+ user_info_t *user, **link;
+
+ DEBUG(0, "ds_release(socket %d)\n", i);
+ if ((i >= sockets) || (sockets == 0))
+ return (FS_RELEASE_T)0;
+ s = &socket_table[i];
+ user = file->private_data;
+ if (CHECK_USER(user))
+ return (FS_RELEASE_T)0;
+
+ /* Unlink user data structure */
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY)
+ s->state &= ~SOCKET_BUSY;
+ file->private_data = NULL;
+ for (link = &s->user; *link; link = &(*link)->next)
+ if (*link == user) break;
+ if (link == NULL)
+ return (FS_RELEASE_T)0;
+ *link = user->next;
+ user->user_magic = 0;
+ kfree(user);
+
+ MOD_DEC_USE_COUNT;
+ return (FS_RELEASE_T)0;
+} /* ds_release */
+
+/*====================================================================*/
+
+static ssize_t ds_read FOPS(struct inode *inode,
+ struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+{
+ socket_t i = MINOR(F_INODE(file)->i_rdev);
+ socket_info_t *s;
+ user_info_t *user;
+
+ DEBUG(2, "ds_read(socket %d)\n", i);
+
+ if ((i >= sockets) || (sockets == 0))
+ return -ENODEV;
+ if (count < 4)
+ return -EINVAL;
+ s = &socket_table[i];
+ user = file->private_data;
+ if (CHECK_USER(user))
+ return -EIO;
+
+ if (queue_empty(user)) {
+ interruptible_sleep_on(&s->queue);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ put_user(get_queued_event(user), (int *)buf);
+ return 4;
+} /* ds_read */
+
+/*====================================================================*/
+
+static ssize_t ds_write FOPS(struct inode *inode,
+ struct file *file, const char *buf,
+ size_t count, loff_t *ppos)
+{
+ socket_t i = MINOR(F_INODE(file)->i_rdev);
+ socket_info_t *s;
+ user_info_t *user;
+
+ DEBUG(2, "ds_write(socket %d)\n", i);
+
+ if ((i >= sockets) || (sockets == 0))
+ return -ENODEV;
+ if (count != 4)
+ return -EINVAL;
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EBADF;
+ s = &socket_table[i];
+ user = file->private_data;
+ if (CHECK_USER(user))
+ return -EIO;
+
+ if (s->req_pending) {
+ s->req_pending--;
+ get_user(s->req_result, (int *)buf);
+ if ((s->req_result != 0) || (s->req_pending == 0))
+ wake_up_interruptible(&s->request);
+ } else
+ return -EIO;
+
+ return 4;
+} /* ds_write */
+
+/*====================================================================*/
+
+#if (LINUX_VERSION_CODE < VERSION(2,1,23))
+
+static int ds_select(struct inode *inode, struct file *file,
+ int sel_type, select_table *wait)
+{
+ socket_t i = MINOR(inode->i_rdev);
+ socket_info_t *s;
+ user_info_t *user;
+
+ DEBUG(2, "ds_select(socket %d)\n", i);
+
+ if ((i >= sockets) || (sockets == 0))
+ return -ENODEV;
+ s = &socket_table[i];
+ user = file->private_data;
+ if (CHECK_USER(user))
+ return -EIO;
+ if (sel_type != SEL_IN)
+ return 0;
+ if (!queue_empty(user))
+ return 1;
+ select_wait(&s->queue, wait);
+ return 0;
+} /* ds_select */
+
+#else
+
+static u_int ds_poll(struct file *file, poll_table *wait)
+{
+ socket_t i = MINOR(F_INODE(file)->i_rdev);
+ socket_info_t *s;
+ user_info_t *user;
+
+ DEBUG(2, "ds_poll(socket %d)\n", i);
+
+ if ((i >= sockets) || (sockets == 0))
+ return POLLERR;
+ s = &socket_table[i];
+ user = file->private_data;
+ if (CHECK_USER(user))
+ return POLLERR;
+ POLL_WAIT(file, &s->queue, wait);
+ if (!queue_empty(user))
+ return POLLIN | POLLRDNORM;
+ return 0;
+} /* ds_poll */
+
+#endif
+
+/*====================================================================*/
+
+#endif /* !defined(MACH) */
+
+static int ds_ioctl(struct inode * inode, struct file * file,
+ u_int cmd, u_long arg)
+{
+ socket_t i = MINOR(inode->i_rdev);
+ socket_info_t *s;
+ u_int size;
+ int ret, err;
+ ds_ioctl_arg_t buf;
+
+ DEBUG(2, "ds_ioctl(socket %d, %#x, %#lx)\n", i, cmd, arg);
+
+ if ((i >= sockets) || (sockets == 0))
+ return -ENODEV;
+ s = &socket_table[i];
+
+ size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
+ if (size > sizeof(ds_ioctl_arg_t)) return -EINVAL;
+
+ /* Permission check */
+ if (!(cmd & IOC_OUT) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+#ifndef MACH
+ if (cmd & IOC_IN) {
+ err = verify_area(VERIFY_READ, (char *)arg, size);
+ if (err) {
+ DEBUG(3, "ds_ioctl(): verify_read = %d\n", err);
+ return err;
+ }
+ }
+ if (cmd & IOC_OUT) {
+ err = verify_area(VERIFY_WRITE, (char *)arg, size);
+ if (err) {
+ DEBUG(3, "ds_ioctl(): verify_write = %d\n", err);
+ return err;
+ }
+ }
+#endif
+
+ err = ret = 0;
+
+#ifndef MACH
+ if (cmd & IOC_IN) copy_from_user((char *)&buf, (char *)arg, size);
+#else
+ if (cmd & IOC_IN) memcpy((char *) &buf, (char *) arg, size);
+#endif
+
+ switch (cmd) {
+ case DS_ADJUST_RESOURCE_INFO:
+ ret = CardServices(AdjustResourceInfo, s->handle, &buf.adjust);
+ break;
+ case DS_GET_CARD_SERVICES_INFO:
+ ret = CardServices(GetCardServicesInfo, &buf.servinfo);
+ break;
+ case DS_GET_CONFIGURATION_INFO:
+ ret = CardServices(GetConfigurationInfo, s->handle, &buf.config);
+ break;
+ case DS_GET_FIRST_TUPLE:
+ ret = CardServices(GetFirstTuple, s->handle, &buf.tuple);
+ break;
+ case DS_GET_NEXT_TUPLE:
+ ret = CardServices(GetNextTuple, s->handle, &buf.tuple);
+ break;
+ case DS_GET_TUPLE_DATA:
+ buf.tuple.TupleData = buf.tuple_parse.data;
+ buf.tuple.TupleDataMax = sizeof(buf.tuple_parse.data);
+ ret = CardServices(GetTupleData, s->handle, &buf.tuple);
+ break;
+ case DS_PARSE_TUPLE:
+ buf.tuple.TupleData = buf.tuple_parse.data;
+ ret = CardServices(ParseTuple, s->handle, &buf.tuple,
+ &buf.tuple_parse.parse);
+ break;
+ case DS_RESET_CARD:
+ ret = CardServices(ResetCard, s->handle, NULL);
+ break;
+ case DS_GET_STATUS:
+ ret = CardServices(GetStatus, s->handle, &buf.status);
+ break;
+ case DS_VALIDATE_CIS:
+ ret = CardServices(ValidateCIS, s->handle, &buf.cisinfo);
+ break;
+ case DS_SUSPEND_CARD:
+ ret = CardServices(SuspendCard, s->handle, NULL);
+ break;
+ case DS_RESUME_CARD:
+ ret = CardServices(ResumeCard, s->handle, NULL);
+ break;
+ case DS_EJECT_CARD:
+ ret = CardServices(EjectCard, s->handle, NULL);
+ break;
+ case DS_INSERT_CARD:
+ ret = CardServices(InsertCard, s->handle, NULL);
+ break;
+ case DS_ACCESS_CONFIGURATION_REGISTER:
+ if ((buf.conf_reg.Action == CS_WRITE) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ ret = CardServices(AccessConfigurationRegister, s->handle,
+ &buf.conf_reg);
+ break;
+ case DS_GET_FIRST_REGION:
+ ret = CardServices(GetFirstRegion, s->handle, &buf.region);
+ break;
+ case DS_GET_NEXT_REGION:
+ ret = CardServices(GetNextRegion, s->handle, &buf.region);
+ break;
+ case DS_GET_FIRST_WINDOW:
+ buf.win_info.handle = (window_handle_t)s->handle;
+ ret = CardServices(GetFirstWindow, &buf.win_info.handle,
+ &buf.win_info.window);
+ break;
+ case DS_GET_NEXT_WINDOW:
+ ret = CardServices(GetNextWindow, &buf.win_info.handle,
+ &buf.win_info.window);
+ break;
+ case DS_GET_MEM_PAGE:
+ ret = CardServices(GetMemPage, buf.win_info.handle,
+ &buf.win_info.map);
+ break;
+ case DS_REPLACE_CIS:
+ ret = CardServices(ReplaceCIS, s->handle, &buf.cisdump);
+ break;
+ case DS_BIND_REQUEST:
+ if (!capable(CAP_SYS_ADMIN)) return -EPERM;
+ err = bind_request(i, &buf.bind_info);
+ break;
+ case DS_GET_DEVICE_INFO:
+ err = get_device_info(i, &buf.bind_info, 1);
+ break;
+ case DS_GET_NEXT_DEVICE:
+ err = get_device_info(i, &buf.bind_info, 0);
+ break;
+ case DS_UNBIND_REQUEST:
+ err = unbind_request(i, &buf.bind_info);
+ break;
+ case DS_BIND_MTD:
+ if (!capable(CAP_SYS_ADMIN)) return -EPERM;
+ err = bind_mtd(i, &buf.mtd_info);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if ((err == 0) && (ret != CS_SUCCESS)) {
+ DEBUG(2, "ds_ioctl: ret = %d\n", ret);
+ switch (ret) {
+ case CS_BAD_SOCKET: case CS_NO_CARD:
+ err = -ENODEV; break;
+ case CS_BAD_ARGS: case CS_BAD_ATTRIBUTE: case CS_BAD_IRQ:
+ case CS_BAD_TUPLE:
+ err = -EINVAL; break;
+ case CS_IN_USE:
+ err = -EBUSY; break;
+ case CS_OUT_OF_RESOURCE:
+ err = -ENOSPC; break;
+ case CS_NO_MORE_ITEMS:
+ err = -ENODATA; break;
+ case CS_UNSUPPORTED_FUNCTION:
+ err = -ENOSYS; break;
+ default:
+ err = -EIO; break;
+ }
+ }
+
+#ifndef MACH
+ if (cmd & IOC_OUT) copy_to_user((char *)arg, (char *)&buf, size);
+#else
+ if (cmd & IOC_OUT) memcpy((char *) arg, (char *) &buf, size);
+#endif
+
+ return err;
+} /* ds_ioctl */
+
+/*====================================================================*/
+
+#ifndef MACH
+
+static struct file_operations ds_fops = {
+ open: ds_open,
+ release: ds_release,
+ ioctl: ds_ioctl,
+ read: ds_read,
+ write: ds_write,
+#if (LINUX_VERSION_CODE < VERSION(2,1,23))
+ select: ds_select
+#else
+ poll: ds_poll
+#endif
+};
+
+#if (LINUX_VERSION_CODE <= VERSION(2,1,17))
+
+#undef CONFIG_MODVERSIONS
+static struct symbol_table ds_symtab = {
+#include <linux/symtab_begin.h>
+#undef X
+#define X(sym) { (void *)&sym, SYMBOL_NAME_STR(sym) }
+ X(register_pccard_driver),
+ X(unregister_pccard_driver),
+#include <linux/symtab_end.h>
+};
+
+#else
+
+EXPORT_SYMBOL(register_pccard_driver);
+EXPORT_SYMBOL(unregister_pccard_driver);
+
+#endif
+
+#endif /* !defined(MACH) */
+
+/*====================================================================*/
+
+int __init init_pcmcia_ds(void)
+{
+ client_reg_t client_reg;
+ servinfo_t serv;
+ bind_req_t bind;
+ socket_info_t *s;
+ int i, ret;
+
+ DEBUG(0, "%s\n", version);
+
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "ds: Card Services release does not match!\n");
+ return -EINVAL;
+ }
+ if (serv.Count == 0) {
+ printk(KERN_NOTICE "ds: no socket drivers\n");
+ return -1;
+ }
+
+ sockets = serv.Count;
+ socket_table = kmalloc(sockets*sizeof(socket_info_t), GFP_KERNEL);
+ if (!socket_table) return -1;
+ for (i = 0, s = socket_table; i < sockets; i++, s++) {
+ s->state = 0;
+ s->user = NULL;
+ s->req_pending = 0;
+ init_waitqueue_head(&s->queue);
+ init_waitqueue_head(&s->request);
+ s->handle = NULL;
+ init_timer(&s->removal);
+ s->removal.data = i;
+ s->removal.function = &handle_removal;
+ s->bind = NULL;
+ }
+
+ /* Set up hotline to Card Services */
+ client_reg.dev_info = bind.dev_info = &dev_info;
+ client_reg.Attributes = INFO_MASTER_CLIENT;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_EJECTION_REQUEST | CS_EVENT_INSERTION_REQUEST |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &ds_event;
+ client_reg.Version = 0x0210;
+ for (i = 0; i < sockets; i++) {
+ bind.Socket = i;
+ bind.Function = BIND_FN_ALL;
+ ret = CardServices(BindDevice, &bind);
+ if (ret != CS_SUCCESS) {
+ cs_error(NULL, BindDevice, ret);
+ break;
+ }
+ client_reg.event_callback_args.client_data = &socket_table[i];
+ ret = CardServices(RegisterClient, &socket_table[i].handle,
+ &client_reg);
+ if (ret != CS_SUCCESS) {
+ cs_error(NULL, RegisterClient, ret);
+ break;
+ }
+ }
+
+#ifndef MACH
+ /* Set up character device for user mode clients */
+ i = register_chrdev(0, "pcmcia", &ds_fops);
+ if (i == -EBUSY)
+ printk(KERN_NOTICE "unable to find a free device # for "
+ "Driver Services\n");
+ else
+ major_dev = i;
+ register_symtab(&ds_symtab);
+#endif
+
+#ifdef HAS_PROC_BUS
+ if (proc_pccard)
+ create_proc_read_entry("drivers", 0, proc_pccard,
+ proc_read_drivers, NULL);
+ init_status = 0;
+#endif
+ return 0;
+}
+
+#ifdef MODULE
+
+int __init init_module(void)
+{
+ return init_pcmcia_ds();
+}
+
+void __exit cleanup_module(void)
+{
+ int i;
+#ifdef HAS_PROC_BUS
+ if (proc_pccard)
+ remove_proc_entry("drivers", proc_pccard);
+#endif
+ if (major_dev != -1)
+ unregister_chrdev(major_dev, "pcmcia");
+ for (i = 0; i < sockets; i++)
+ CardServices(DeregisterClient, socket_table[i].handle);
+ sockets = 0;
+ kfree(socket_table);
+}
+
+#endif
+
+/*====================================================================*/
+
+/* Include the interface glue code to GNU Mach. */
+#include "../glue/ds.c"
+
+/*====================================================================*/
diff --git a/linux/pcmcia-cs/modules/ene.h b/linux/pcmcia-cs/modules/ene.h
new file mode 100644
index 0000000..6b9b18b
--- /dev/null
+++ b/linux/pcmcia-cs/modules/ene.h
@@ -0,0 +1,59 @@
+/*
+ * ene.h 1.2 2001/08/24 12:15:33
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_ENE_H
+#define _LINUX_ENE_H
+
+#ifndef PCI_VENDOR_ID_ENE
+#define PCI_VENDOR_ID_ENE 0x1524
+#endif
+
+#ifndef PCI_DEVICE_ID_ENE_1211
+#define PCI_DEVICE_ID_ENE_1211 0x1211
+#endif
+#ifndef PCI_DEVICE_ID_ENE_1225
+#define PCI_DEVICE_ID_ENE_1225 0x1225
+#endif
+#ifndef PCI_DEVICE_ID_ENE_1410
+#define PCI_DEVICE_ID_ENE_1410 0x1410
+#endif
+#ifndef PCI_DEVICE_ID_ENE_1420
+#define PCI_DEVICE_ID_ENE_1420 0x1420
+#endif
+
+#define ENE_PCIC_ID \
+ IS_ENE1211, IS_ENE1225, IS_ENE1410, IS_ENE1420
+
+#define ENE_PCIC_INFO \
+ { "ENE 1211", IS_TI|IS_CARDBUS, ID(ENE, 1211) }, \
+ { "ENE 1225", IS_TI|IS_CARDBUS, ID(ENE, 1225) }, \
+ { "ENE 1410", IS_TI|IS_CARDBUS, ID(ENE, 1410) }, \
+ { "ENE 1420", IS_TI|IS_CARDBUS, ID(ENE, 1420) }
+
+#endif /* _LINUX_ENE_H */
diff --git a/linux/pcmcia-cs/modules/i82365.c b/linux/pcmcia-cs/modules/i82365.c
new file mode 100644
index 0000000..17ddf66
--- /dev/null
+++ b/linux/pcmcia-cs/modules/i82365.c
@@ -0,0 +1,2588 @@
+/*======================================================================
+
+ Device driver for Intel 82365 and compatible PC Card controllers,
+ and Yenta-compatible PCI-to-CardBus controllers.
+
+ i82365.c 1.358 2003/09/13 17:34:01
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/system.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+
+/* ISA-bus controllers */
+#include "i82365.h"
+#include "cirrus.h"
+#include "vg468.h"
+#include "ricoh.h"
+#include "o2micro.h"
+
+/* PCI-bus controllers */
+#include "yenta.h"
+#include "ti113x.h"
+#include "smc34c90.h"
+#include "topic.h"
+#include "ene.h"
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Intel ExCA/Yenta PCMCIA socket driver");
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* General options */
+INT_MODULE_PARM(poll_interval, 0); /* in ticks, 0 means never */
+INT_MODULE_PARM(cycle_time, 120); /* in ns, 120 ns = 8.33 MHz */
+INT_MODULE_PARM(do_scan, 1); /* Probe free interrupts? */
+
+/* Cirrus options */
+INT_MODULE_PARM(has_dma, -1);
+INT_MODULE_PARM(has_led, -1);
+INT_MODULE_PARM(has_ring, -1);
+INT_MODULE_PARM(has_vsense, 0);
+INT_MODULE_PARM(dynamic_mode, 0);
+INT_MODULE_PARM(freq_bypass, -1);
+INT_MODULE_PARM(setup_time, -1);
+INT_MODULE_PARM(cmd_time, -1);
+INT_MODULE_PARM(recov_time, -1);
+
+#ifdef CONFIG_ISA
+INT_MODULE_PARM(i365_base, 0x3e0); /* IO address for probes */
+INT_MODULE_PARM(extra_sockets, 0); /* Probe at i365_base+2? */
+INT_MODULE_PARM(ignore, -1); /* Ignore this socket # */
+INT_MODULE_PARM(cs_irq, 0); /* card status irq */
+INT_MODULE_PARM(irq_mask, 0xffff); /* bit map of irq's to use */
+static int irq_list[16] = { -1 };
+MODULE_PARM(irq_list, "1-16i");
+INT_MODULE_PARM(async_clock, -1); /* Vadem specific */
+INT_MODULE_PARM(cable_mode, -1);
+INT_MODULE_PARM(wakeup, 0); /* Cirrus specific */
+#endif
+
+#ifdef CONFIG_PCI
+static int pci_irq_list[8] = { 0 }; /* PCI interrupt assignments */
+MODULE_PARM(pci_irq_list, "1-8i");
+INT_MODULE_PARM(do_pci_probe, 1); /* Scan for PCI bridges? */
+INT_MODULE_PARM(fast_pci, -1);
+INT_MODULE_PARM(cb_write_post, -1);
+INT_MODULE_PARM(irq_mode, -1); /* Override BIOS routing? */
+INT_MODULE_PARM(hold_time, -1); /* Ricoh specific */
+INT_MODULE_PARM(p2cclk, -1); /* TI specific */
+#endif
+
+#if defined(CONFIG_ISA) && defined(CONFIG_PCI)
+INT_MODULE_PARM(pci_csc, 1); /* PCI card status irqs? */
+INT_MODULE_PARM(pci_int, 1); /* PCI IO card irqs? */
+#elif defined(CONFIG_ISA) && !defined(CONFIG_PCI)
+#define pci_csc 0
+#define pci_int 0
+#elif !defined(CONFIG_ISA) && defined(CONFIG_PCI)
+#define pci_csc 0
+#define pci_int 1 /* We must use PCI irq's */
+#else
+#error "No bus architectures defined!"
+#endif
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static const char *version =
+"i82365.c 1.358 2003/09/13 17:34:01 (David Hinds)";
+#else
+#define DEBUG(n, args...) do { } while (0)
+#endif
+
+/*====================================================================*/
+
+typedef struct socket_info_t {
+ u_short type, flags;
+ socket_cap_t cap;
+ ioaddr_t ioaddr;
+ u_short psock;
+ u_char cs_irq, intr;
+ void (*handler)(void *info, u_int events);
+ void *info;
+#ifdef HAS_PROC_BUS
+ struct proc_dir_entry *proc;
+#endif
+ u_char pci_irq_code;
+#ifdef CONFIG_PCI
+ u_short vendor, device;
+ u_char revision, bus, devfn;
+ u_short bcr;
+ u_char pci_lat, cb_lat, sub_bus, cache;
+ u_int cb_phys;
+ char *cb_virt;
+#endif
+ union {
+ cirrus_state_t cirrus;
+ vg46x_state_t vg46x;
+ o2micro_state_t o2micro;
+ ti113x_state_t ti113x;
+ ricoh_state_t ricoh;
+ topic_state_t topic;
+ } state;
+} socket_info_t;
+
+/* Where we keep track of our sockets... */
+static int sockets = 0;
+static socket_info_t socket[8] = {
+ { 0, }, /* ... */
+};
+
+#ifdef CONFIG_ISA
+static int grab_irq;
+#ifdef __SMP__
+static spinlock_t isa_lock = SPIN_LOCK_UNLOCKED;
+#endif
+#define ISA_LOCK(s, f) \
+ if (!((s)->flags & IS_CARDBUS)) spin_lock_irqsave(&isa_lock, f)
+#define ISA_UNLOCK(n, f) \
+ if (!((s)->flags & IS_CARDBUS)) spin_unlock_irqrestore(&isa_lock, f)
+#else
+#define ISA_LOCK(n, f) do { } while (0)
+#define ISA_UNLOCK(n, f) do { } while (0)
+#endif
+
+static void pcic_interrupt_wrapper(u_long data);
+static struct timer_list poll_timer = {
+ function: pcic_interrupt_wrapper
+};
+
+#define flip(v,b,f) (v = ((f)<0) ? v : ((f) ? ((v)|(b)) : ((v)&(~b))))
+
+/*====================================================================*/
+
+/* Some PCI shortcuts */
+
+#ifdef CONFIG_PCI
+static int pci_readb(socket_info_t *s, int r, u_char *v)
+{ return pcibios_read_config_byte(s->bus, s->devfn, r, v); }
+static int pci_writeb(socket_info_t *s, int r, u_char v)
+{ return pcibios_write_config_byte(s->bus, s->devfn, r, v); }
+static int pci_readw(socket_info_t *s, int r, u_short *v)
+{ return pcibios_read_config_word(s->bus, s->devfn, r, v); }
+static int pci_writew(socket_info_t *s, int r, u_short v)
+{ return pcibios_write_config_word(s->bus, s->devfn, r, v); }
+static int pci_readl(socket_info_t *s, int r, u_int *v)
+{ return pcibios_read_config_dword(s->bus, s->devfn, r, v); }
+static int pci_writel(socket_info_t *s, int r, u_int v)
+{ return pcibios_write_config_dword(s->bus, s->devfn, r, v); }
+#endif
+
+#define cb_readb(s, r) readb((s)->cb_virt + (r))
+#define cb_readl(s, r) readl((s)->cb_virt + (r))
+#define cb_writeb(s, r, v) writeb(v, (s)->cb_virt + (r))
+#define cb_writel(s, r, v) writel(v, (s)->cb_virt + (r))
+
+/*====================================================================*/
+
+/* These definitions must match the pcic table! */
+typedef enum pcic_id {
+#ifdef CONFIG_ISA
+ IS_I82365A, IS_I82365B, IS_I82365DF, IS_IBM, IS_RF5Cx96,
+ IS_VLSI, IS_VG468, IS_VG469, IS_PD6710, IS_PD672X, IS_VT83C469,
+#endif
+#ifdef CONFIG_PCI
+ IS_I82092AA, IS_OM82C092G, CIRRUS_PCIC_ID, O2MICRO_PCIC_ID,
+ RICOH_PCIC_ID, SMC_PCIC_ID, TI_PCIC_ID, ENE_PCIC_ID,
+ TOPIC_PCIC_ID, IS_UNK_PCI, IS_UNK_CARDBUS
+#endif
+} pcic_id;
+
+/* Flags for classifying groups of controllers */
+#define IS_VADEM 0x0001
+#define IS_CIRRUS 0x0002
+#define IS_TI 0x0004
+#define IS_O2MICRO 0x0008
+#define IS_TOPIC 0x0020
+#define IS_RICOH 0x0040
+#define IS_UNKNOWN 0x0400
+#define IS_VG_PWR 0x0800
+#define IS_DF_PWR 0x1000
+#define IS_PCI 0x2000
+#define IS_CARDBUS 0x4000
+#define IS_ALIVE 0x8000
+
+typedef struct pcic_t {
+ char *name;
+ u_short flags;
+#ifdef CONFIG_PCI
+ u_short vendor, device;
+#endif
+} pcic_t;
+
+#define ID(a,b) PCI_VENDOR_ID_##a,PCI_DEVICE_ID_##a##_##b
+
+static pcic_t pcic[] = {
+#ifdef CONFIG_ISA
+ { "Intel i82365sl A step", 0 },
+ { "Intel i82365sl B step", 0 },
+ { "Intel i82365sl DF", IS_DF_PWR },
+ { "IBM Clone", 0 },
+ { "Ricoh RF5C296/396", 0 },
+ { "VLSI 82C146", 0 },
+ { "Vadem VG-468", IS_VADEM },
+ { "Vadem VG-469", IS_VADEM|IS_VG_PWR },
+ { "Cirrus PD6710", IS_CIRRUS },
+ { "Cirrus PD672x", IS_CIRRUS },
+ { "VIA VT83C469", IS_CIRRUS },
+#endif
+#ifdef CONFIG_PCI
+ { "Intel 82092AA", IS_PCI, ID(INTEL, 82092AA_0) },
+ { "Omega Micro 82C092G", IS_PCI, ID(OMEGA, 82C092G) },
+ CIRRUS_PCIC_INFO, O2MICRO_PCIC_INFO, RICOH_PCIC_INFO,
+ SMC_PCIC_INFO, TI_PCIC_INFO, ENE_PCIC_INFO, TOPIC_PCIC_INFO,
+ { "Unknown", IS_PCI|IS_UNKNOWN, 0, 0 },
+ { "Unknown", IS_CARDBUS|IS_UNKNOWN, 0, 0 }
+#endif
+};
+
+#define PCIC_COUNT (sizeof(pcic)/sizeof(pcic_t))
+
+/*====================================================================*/
+
+static u_char i365_get(socket_info_t *s, u_short reg)
+{
+#ifdef CONFIG_PCI
+ if (s->cb_virt)
+ return cb_readb(s, 0x0800 + reg);
+#endif
+ outb(I365_REG(s->psock, reg), s->ioaddr);
+ return inb(s->ioaddr+1);
+}
+
+static void i365_set(socket_info_t *s, u_short reg, u_char data)
+{
+#ifdef CONFIG_PCI
+ if (s->cb_virt) {
+ cb_writeb(s, 0x0800 + reg, data);
+ return;
+ }
+#endif
+ outb(I365_REG(s->psock, reg), s->ioaddr);
+ outb(data, s->ioaddr+1);
+}
+
+static void i365_bset(socket_info_t *s, u_short reg, u_char mask)
+{
+ i365_set(s, reg, i365_get(s, reg) | mask);
+}
+
+static void i365_bclr(socket_info_t *s, u_short reg, u_char mask)
+{
+ i365_set(s, reg, i365_get(s, reg) & ~mask);
+}
+
+static void i365_bflip(socket_info_t *s, u_short reg, u_char mask, int b)
+{
+ u_char d = i365_get(s, reg);
+ i365_set(s, reg, (b) ? (d | mask) : (d & ~mask));
+}
+
+static u_short i365_get_pair(socket_info_t *s, u_short reg)
+{
+ return (i365_get(s, reg) + (i365_get(s, reg+1) << 8));
+}
+
+static void i365_set_pair(socket_info_t *s, u_short reg, u_short data)
+{
+ i365_set(s, reg, data & 0xff);
+ i365_set(s, reg+1, data >> 8);
+}
+
+/*======================================================================
+
+ Code to save and restore global state information for Cirrus
+ PD67xx controllers, and to set and report global configuration
+ options.
+
+ The VIA controllers also use these routines, as they are mostly
+ Cirrus lookalikes, without the timing registers.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static int __init get_pci_irq(socket_info_t *s)
+{
+ u8 irq = pci_irq_list[s - socket];
+ if (!irq)
+ irq = pci_find_slot(s->bus, s->devfn)->irq;
+ if (irq >= NR_IRQS) irq = 0;
+ s->cap.pci_irq = irq;
+ return irq;
+}
+
+#endif
+
+static void __init cirrus_get_state(socket_info_t *s)
+{
+ cirrus_state_t *p = &s->state.cirrus;
+ int i;
+
+ p->misc1 = i365_get(s, PD67_MISC_CTL_1);
+ p->misc1 &= (PD67_MC1_MEDIA_ENA | PD67_MC1_INPACK_ENA);
+ p->misc2 = i365_get(s, PD67_MISC_CTL_2);
+ if (s->flags & IS_PCI)
+ p->ectl1 = pd67_ext_get(s, PD67_EXT_CTL_1);
+ for (i = 0; i < 6; i++)
+ p->timer[i] = i365_get(s, PD67_TIME_SETUP(0)+i);
+}
+
+static void cirrus_set_state(socket_info_t *s)
+{
+ cirrus_state_t *p = &s->state.cirrus;
+ u_char misc;
+ int i;
+
+ misc = i365_get(s, PD67_MISC_CTL_2);
+ i365_set(s, PD67_MISC_CTL_2, p->misc2);
+ if (misc & PD67_MC2_SUSPEND) mdelay(50);
+ misc = i365_get(s, PD67_MISC_CTL_1);
+ misc &= ~(PD67_MC1_MEDIA_ENA | PD67_MC1_INPACK_ENA);
+ i365_set(s, PD67_MISC_CTL_1, misc | p->misc1);
+ if (s->flags & IS_PCI)
+ pd67_ext_set(s, PD67_EXT_CTL_1, p->ectl1);
+ else if (has_vsense) {
+ socket_info_t *t = (s->psock) ? s : s+1;
+ pd67_ext_set(t, PD67_EXT_CTL_2, PD67_EC2_GPSTB_IOR);
+ }
+ for (i = 0; i < 6; i++)
+ i365_set(s, PD67_TIME_SETUP(0)+i, p->timer[i]);
+}
+
+#ifdef CONFIG_PCI
+static int cirrus_set_irq_mode(socket_info_t *s, int pcsc, int pint)
+{
+ flip(s->bcr, PD6832_BCR_MGMT_IRQ_ENA, !pcsc);
+ return 0;
+}
+#endif
+
+static u_int __init cirrus_set_opts(socket_info_t *s, char *buf)
+{
+ cirrus_state_t *p = &s->state.cirrus;
+ u_int mask = 0xffff;
+
+ p->misc1 |= PD67_MC1_SPKR_ENA;
+ if (has_ring == -1) has_ring = 1;
+ flip(p->misc2, PD67_MC2_IRQ15_RI, has_ring);
+ flip(p->misc2, PD67_MC2_DYNAMIC_MODE, dynamic_mode);
+ if (p->misc2 & PD67_MC2_IRQ15_RI)
+ strcat(buf, " [ring]");
+ if (p->misc2 & PD67_MC2_DYNAMIC_MODE)
+ strcat(buf, " [dyn mode]");
+ if (p->misc1 & PD67_MC1_INPACK_ENA)
+ strcat(buf, " [inpack]");
+ if (!(s->flags & (IS_PCI|IS_CARDBUS))) {
+ flip(p->misc2, PD67_MC2_FREQ_BYPASS, freq_bypass);
+ if (p->misc2 & PD67_MC2_FREQ_BYPASS)
+ strcat(buf, " [freq bypass]");
+ if (p->misc2 & PD67_MC2_IRQ15_RI)
+ mask &= ~0x8000;
+ if (has_led > 0) {
+ strcat(buf, " [led]");
+ mask &= ~0x1000;
+ }
+ if (has_dma > 0) {
+ strcat(buf, " [dma]");
+ mask &= ~0x0600;
+ }
+#ifdef CONFIG_PCI
+ } else {
+ p->misc1 &= ~PD67_MC1_MEDIA_ENA;
+ p->misc1 &= ~(PD67_MC1_PULSE_MGMT | PD67_MC1_PULSE_IRQ);
+ p->ectl1 &= ~(PD67_EC1_INV_MGMT_IRQ | PD67_EC1_INV_CARD_IRQ);
+ flip(p->misc2, PD67_MC2_FAST_PCI, fast_pci);
+ if (p->misc2 & PD67_MC2_IRQ15_RI)
+ mask &= (s->type == IS_PD6730) ? ~0x0400 : ~0x8000;
+ if ((s->flags & IS_PCI) && (irq_mode == 1) && get_pci_irq(s)) {
+ /* Configure PD6729 bridge for PCI interrupts */
+ p->ectl1 |= PD67_EC1_INV_MGMT_IRQ | PD67_EC1_INV_CARD_IRQ;
+ s->pci_irq_code = 3; /* PCI INTA = "irq 3" */
+ buf += strlen(buf);
+ sprintf(buf, " [pci irq %d]", s->cap.pci_irq);
+ mask = 0;
+ }
+#endif
+ }
+#ifdef CONFIG_ISA
+ if (s->type != IS_VT83C469)
+#endif
+ {
+ if (setup_time >= 0)
+ p->timer[0] = p->timer[3] = setup_time;
+ if (cmd_time > 0) {
+ p->timer[1] = cmd_time;
+ p->timer[4] = cmd_time*2+4;
+ }
+ if (p->timer[1] == 0) {
+ p->timer[1] = 6; p->timer[4] = 16;
+ if (p->timer[0] == 0)
+ p->timer[0] = p->timer[3] = 1;
+ }
+ if (recov_time >= 0)
+ p->timer[2] = p->timer[5] = recov_time;
+ buf += strlen(buf);
+ sprintf(buf, " [%d/%d/%d] [%d/%d/%d]", p->timer[0], p->timer[1],
+ p->timer[2], p->timer[3], p->timer[4], p->timer[5]);
+ }
+ return mask;
+}
+
+/*======================================================================
+
+ Code to save and restore global state information for Vadem VG468
+ and VG469 controllers, and to set and report global configuration
+ options.
+
+======================================================================*/
+
+#ifdef CONFIG_ISA
+
+static void __init vg46x_get_state(socket_info_t *s)
+{
+ vg46x_state_t *p = &s->state.vg46x;
+ p->ctl = i365_get(s, VG468_CTL);
+ if (s->type == IS_VG469)
+ p->ema = i365_get(s, VG469_EXT_MODE);
+}
+
+static void vg46x_set_state(socket_info_t *s)
+{
+ vg46x_state_t *p = &s->state.vg46x;
+ i365_set(s, VG468_CTL, p->ctl);
+ if (s->type == IS_VG469)
+ i365_set(s, VG469_EXT_MODE, p->ema);
+}
+
+static u_int __init vg46x_set_opts(socket_info_t *s, char *buf)
+{
+ vg46x_state_t *p = &s->state.vg46x;
+
+ flip(p->ctl, VG468_CTL_ASYNC, async_clock);
+ flip(p->ema, VG469_MODE_CABLE, cable_mode);
+ if (p->ctl & VG468_CTL_ASYNC)
+ strcat(buf, " [async]");
+ if (p->ctl & VG468_CTL_INPACK)
+ strcat(buf, " [inpack]");
+ if (s->type == IS_VG469) {
+ u_char vsel = i365_get(s, VG469_VSELECT);
+ if (vsel & VG469_VSEL_EXT_STAT) {
+ strcat(buf, " [ext mode]");
+ if (vsel & VG469_VSEL_EXT_BUS)
+ strcat(buf, " [isa buf]");
+ }
+ if (p->ema & VG469_MODE_CABLE)
+ strcat(buf, " [cable]");
+ if (p->ema & VG469_MODE_COMPAT)
+ strcat(buf, " [c step]");
+ }
+ return 0xffff;
+}
+
+#endif
+
+/*======================================================================
+
+ Code to save and restore global state information for TI 1130 and
+ TI 1131 controllers, and to set and report global configuration
+ options.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static void __init ti113x_get_state(socket_info_t *s)
+{
+ ti113x_state_t *p = &s->state.ti113x;
+ pci_readl(s, TI113X_SYSTEM_CONTROL, &p->sysctl);
+ pci_readb(s, TI113X_CARD_CONTROL, &p->cardctl);
+ pci_readb(s, TI113X_DEVICE_CONTROL, &p->devctl);
+ pci_readb(s, TI1250_DIAGNOSTIC, &p->diag);
+ pci_readl(s, TI12XX_IRQMUX, &p->irqmux);
+}
+
+static void ti113x_set_state(socket_info_t *s)
+{
+ ti113x_state_t *p = &s->state.ti113x;
+ pci_writel(s, TI113X_SYSTEM_CONTROL, p->sysctl);
+ pci_writeb(s, TI113X_CARD_CONTROL, p->cardctl);
+ pci_writeb(s, TI113X_DEVICE_CONTROL, p->devctl);
+ pci_writeb(s, TI1250_MULTIMEDIA_CTL, 0);
+ pci_writeb(s, TI1250_DIAGNOSTIC, p->diag);
+ pci_writel(s, TI12XX_IRQMUX, p->irqmux);
+ i365_set_pair(s, TI113X_IO_OFFSET(0), 0);
+ i365_set_pair(s, TI113X_IO_OFFSET(1), 0);
+}
+
+static int ti113x_set_irq_mode(socket_info_t *s, int pcsc, int pint)
+{
+ ti113x_state_t *p = &s->state.ti113x;
+ s->intr = (pcsc) ? I365_INTR_ENA : 0;
+ if (s->type <= IS_TI1131) {
+ p->cardctl &= ~(TI113X_CCR_PCI_IRQ_ENA |
+ TI113X_CCR_PCI_IREQ | TI113X_CCR_PCI_CSC);
+ if (pcsc)
+ p->cardctl |= TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_CSC;
+ if (pint)
+ p->cardctl |= TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_IREQ;
+ } else if (s->type == IS_TI1250A) {
+ p->diag &= TI1250_DIAG_PCI_CSC | TI1250_DIAG_PCI_IREQ;
+ if (pcsc)
+ p->diag |= TI1250_DIAG_PCI_CSC;
+ if (pint)
+ p->diag |= TI1250_DIAG_PCI_IREQ;
+ }
+ return 0;
+}
+
+static u_int __init ti113x_set_opts(socket_info_t *s, char *buf)
+{
+ ti113x_state_t *p = &s->state.ti113x;
+ u_int mask = 0xffff;
+ int old = (s->type <= IS_TI1131);
+
+ flip(p->cardctl, TI113X_CCR_RIENB, has_ring);
+ p->cardctl &= ~TI113X_CCR_ZVENABLE;
+ p->cardctl |= TI113X_CCR_SPKROUTEN;
+ if (!old) flip(p->sysctl, TI122X_SCR_P2CCLK, p2cclk);
+ switch (irq_mode) {
+ case 0:
+ p->devctl &= ~TI113X_DCR_IMODE_MASK;
+ p->irqmux = (p->irqmux & ~0x0f) | 0x02; /* route INTA */
+ if (!(p->sysctl & TI122X_SCR_INTRTIE))
+ p->irqmux = (p->irqmux & ~0xf0) | 0x20; /* route INTB */
+ break;
+ case 1:
+ p->devctl &= ~TI113X_DCR_IMODE_MASK;
+ p->devctl |= TI113X_DCR_IMODE_ISA;
+ break;
+ case 2:
+ p->devctl &= ~TI113X_DCR_IMODE_MASK;
+ p->devctl |= TI113X_DCR_IMODE_SERIAL;
+ break;
+ case 3:
+ p->devctl &= ~TI113X_DCR_IMODE_MASK;
+ p->devctl |= TI12XX_DCR_IMODE_ALL_SERIAL;
+ break;
+ default:
+ /* Feeble fallback: if PCI-only but no PCI irq, try ISA */
+ if (((p->devctl & TI113X_DCR_IMODE_MASK) == 0) &&
+ (s->cap.pci_irq == 0))
+ p->devctl |= TI113X_DCR_IMODE_ISA;
+ }
+ if (p->cardctl & TI113X_CCR_RIENB) {
+ strcat(buf, " [ring]");
+ if (old) mask &= ~0x8000;
+ }
+ if (old && (p->sysctl & TI113X_SCR_CLKRUN_ENA)) {
+ if (p->sysctl & TI113X_SCR_CLKRUN_SEL) {
+ strcat(buf, " [clkrun irq 12]");
+ mask &= ~0x1000;
+ } else {
+ strcat(buf, " [clkrun irq 10]");
+ mask &= ~0x0400;
+ }
+ }
+ switch (p->devctl & TI113X_DCR_IMODE_MASK) {
+ case TI12XX_DCR_IMODE_PCI_ONLY:
+ strcat(buf, " [pci only]");
+ mask = 0;
+ break;
+ case TI113X_DCR_IMODE_ISA:
+ strcat(buf, " [isa irq]");
+ if (old) mask &= ~0x0018;
+ break;
+ case TI113X_DCR_IMODE_SERIAL:
+ strcat(buf, " [pci + serial irq]");
+ mask = 0xffff;
+ break;
+ case TI12XX_DCR_IMODE_ALL_SERIAL:
+ strcat(buf, " [serial pci & irq]");
+ mask = 0xffff;
+ break;
+ }
+ return mask;
+}
+
+#endif
+
+/*======================================================================
+
+ Code to save and restore global state information for the Ricoh
+ RL5C4XX controllers, and to set and report global configuration
+ options.
+
+ The interrupt test doesn't seem to be reliable with Ricoh
+ bridges. It seems to depend on what type of card is in the
+ socket, and on the history of that socket, in some way that
+ doesn't show up in the current socket state.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static void __init ricoh_get_state(socket_info_t *s)
+{
+ ricoh_state_t *p = &s->state.ricoh;
+ pci_readw(s, RL5C4XX_CONFIG, &p->config);
+ pci_readw(s, RL5C4XX_MISC, &p->misc);
+ pci_readw(s, RL5C4XX_16BIT_CTL, &p->ctl);
+ pci_readw(s, RL5C4XX_16BIT_IO_0, &p->io);
+ pci_readw(s, RL5C4XX_16BIT_MEM_0, &p->mem);
+}
+
+static void ricoh_set_state(socket_info_t *s)
+{
+ ricoh_state_t *p = &s->state.ricoh;
+ pci_writew(s, RL5C4XX_CONFIG, p->config);
+ pci_writew(s, RL5C4XX_MISC, p->misc);
+ pci_writew(s, RL5C4XX_16BIT_CTL, p->ctl);
+ pci_writew(s, RL5C4XX_16BIT_IO_0, p->io);
+ pci_writew(s, RL5C4XX_16BIT_MEM_0, p->mem);
+}
+
+static u_int __init ricoh_set_opts(socket_info_t *s, char *buf)
+{
+ ricoh_state_t *p = &s->state.ricoh;
+ u_int mask = 0xffff;
+ int old = (s->type < IS_RL5C475);
+
+ p->ctl = RL5C4XX_16CTL_IO_TIMING | RL5C4XX_16CTL_MEM_TIMING;
+ if (old)
+ p->ctl |= RL5C46X_16CTL_LEVEL_1 | RL5C46X_16CTL_LEVEL_2;
+ else
+ p->config |= RL5C4XX_CONFIG_PREFETCH;
+
+ if (setup_time >= 0) {
+ p->io = (p->io & ~RL5C4XX_SETUP_MASK) +
+ ((setup_time+1) << RL5C4XX_SETUP_SHIFT);
+ p->mem = (p->mem & ~RL5C4XX_SETUP_MASK) +
+ (setup_time << RL5C4XX_SETUP_SHIFT);
+ }
+ if (cmd_time >= 0) {
+ p->io = (p->io & ~RL5C4XX_CMD_MASK) +
+ (cmd_time << RL5C4XX_CMD_SHIFT);
+ p->mem = (p->mem & ~RL5C4XX_CMD_MASK) +
+ (cmd_time << RL5C4XX_CMD_SHIFT);
+ }
+ if (hold_time >= 0) {
+ p->io = (p->io & ~RL5C4XX_HOLD_MASK) +
+ (hold_time << RL5C4XX_HOLD_SHIFT);
+ p->mem = (p->mem & ~RL5C4XX_HOLD_MASK) +
+ (hold_time << RL5C4XX_HOLD_SHIFT);
+ }
+ if (irq_mode == 0) {
+ mask = 0;
+ p->misc &= ~RL5C47X_MISC_SRIRQ_ENA;
+ sprintf(buf, " [pci only]");
+ buf += strlen(buf);
+ } else if (!old) {
+ switch (irq_mode) {
+ case 1:
+ p->misc &= ~RL5C47X_MISC_SRIRQ_ENA; break;
+ case 2:
+ p->misc |= RL5C47X_MISC_SRIRQ_ENA; break;
+ }
+ if (p->misc & RL5C47X_MISC_SRIRQ_ENA)
+ sprintf(buf, " [serial irq]");
+ else
+ sprintf(buf, " [isa irq]");
+ buf += strlen(buf);
+ }
+ sprintf(buf, " [io %d/%d/%d] [mem %d/%d/%d]",
+ (p->io & RL5C4XX_SETUP_MASK) >> RL5C4XX_SETUP_SHIFT,
+ (p->io & RL5C4XX_CMD_MASK) >> RL5C4XX_CMD_SHIFT,
+ (p->io & RL5C4XX_HOLD_MASK) >> RL5C4XX_HOLD_SHIFT,
+ (p->mem & RL5C4XX_SETUP_MASK) >> RL5C4XX_SETUP_SHIFT,
+ (p->mem & RL5C4XX_CMD_MASK) >> RL5C4XX_CMD_SHIFT,
+ (p->mem & RL5C4XX_HOLD_MASK) >> RL5C4XX_HOLD_SHIFT);
+ return mask;
+}
+
+#endif
+
+/*======================================================================
+
+ Code to save and restore global state information for O2Micro
+ controllers, and to set and report global configuration options.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static void __init o2micro_get_state(socket_info_t *s)
+{
+ o2micro_state_t *p = &s->state.o2micro;
+ if ((s->revision == 0x34) || (s->revision == 0x62) ||
+ (s->type == IS_OZ6812)) {
+ p->mode_a = i365_get(s, O2_MODE_A_2);
+ p->mode_b = i365_get(s, O2_MODE_B_2);
+ } else {
+ p->mode_a = i365_get(s, O2_MODE_A);
+ p->mode_b = i365_get(s, O2_MODE_B);
+ }
+ p->mode_c = i365_get(s, O2_MODE_C);
+ p->mode_d = i365_get(s, O2_MODE_D);
+ if (s->flags & IS_CARDBUS) {
+ p->mhpg = i365_get(s, O2_MHPG_DMA);
+ p->fifo = i365_get(s, O2_FIFO_ENA);
+ p->mode_e = i365_get(s, O2_MODE_E);
+ }
+}
+
+static void o2micro_set_state(socket_info_t *s)
+{
+ o2micro_state_t *p = &s->state.o2micro;
+ if ((s->revision == 0x34) || (s->revision == 0x62) ||
+ (s->type == IS_OZ6812)) {
+ i365_set(s, O2_MODE_A_2, p->mode_a);
+ i365_set(s, O2_MODE_B_2, p->mode_b);
+ } else {
+ i365_set(s, O2_MODE_A, p->mode_a);
+ i365_set(s, O2_MODE_B, p->mode_b);
+ }
+ i365_set(s, O2_MODE_C, p->mode_c);
+ i365_set(s, O2_MODE_D, p->mode_d);
+ if (s->flags & IS_CARDBUS) {
+ i365_set(s, O2_MHPG_DMA, p->mhpg);
+ i365_set(s, O2_FIFO_ENA, p->fifo);
+ i365_set(s, O2_MODE_E, p->mode_e);
+ }
+}
+
+static u_int __init o2micro_set_opts(socket_info_t *s, char *buf)
+{
+ o2micro_state_t *p = &s->state.o2micro;
+ u_int mask = 0xffff;
+
+ p->mode_b = (p->mode_b & ~O2_MODE_B_IDENT) | O2_MODE_B_ID_CSTEP;
+ flip(p->mode_b, O2_MODE_B_IRQ15_RI, has_ring);
+ p->mode_c &= ~(O2_MODE_C_ZVIDEO | O2_MODE_C_DREQ_MASK);
+ if (s->flags & IS_CARDBUS) {
+ p->mode_d &= ~O2_MODE_D_W97_IRQ;
+ p->mode_e &= ~O2_MODE_E_MHPG_DMA;
+ p->mhpg = O2_MHPG_CINT_ENA | O2_MHPG_CSC_ENA;
+ if (s->revision == 0x34)
+ p->mode_c = 0x20;
+ } else {
+ if (p->mode_b & O2_MODE_B_IRQ15_RI) mask &= ~0x8000;
+ }
+ if (p->mode_b & O2_MODE_B_IRQ15_RI)
+ strcat(buf, " [ring]");
+ if (irq_mode != -1)
+ p->mode_d = irq_mode;
+ if (p->mode_d & O2_MODE_D_ISA_IRQ) {
+ strcat(buf, " [pci+isa]");
+ } else {
+ switch (p->mode_d & O2_MODE_D_IRQ_MODE) {
+ case O2_MODE_D_IRQ_PCPCI:
+ strcat(buf, " [pc/pci]"); break;
+ case O2_MODE_D_IRQ_PCIWAY:
+ strcat(buf, " [pci/way]"); break;
+ case O2_MODE_D_IRQ_PCI:
+ strcat(buf, " [pci only]"); mask = 0; break;
+ }
+ }
+ if (s->flags & IS_CARDBUS) {
+ if (p->mode_d & O2_MODE_D_W97_IRQ)
+ strcat(buf, " [win97]");
+ }
+ return mask;
+}
+
+#endif
+
+/*======================================================================
+
+ Code to save and restore global state information for the Toshiba
+ ToPIC 95 and 97 controllers, and to set and report global
+ configuration options.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static void __init topic_get_state(socket_info_t *s)
+{
+ topic_state_t *p = &s->state.topic;
+ pci_readb(s, TOPIC_SLOT_CONTROL, &p->slot);
+ pci_readb(s, TOPIC_CARD_CONTROL, &p->ccr);
+ pci_readb(s, TOPIC_CARD_DETECT, &p->cdr);
+ pci_readl(s, TOPIC_REGISTER_CONTROL, &p->rcr);
+ p->fcr = i365_get(s, TOPIC_FUNCTION_CONTROL);
+}
+
+static void topic_set_state(socket_info_t *s)
+{
+ topic_state_t *p = &s->state.topic;
+ u_int state;
+ pci_writeb(s, TOPIC_SLOT_CONTROL, p->slot);
+ pci_writeb(s, TOPIC_CARD_CONTROL, p->ccr);
+ pci_writeb(s, TOPIC_CARD_DETECT, p->cdr);
+ pci_writel(s, TOPIC_REGISTER_CONTROL, p->rcr);
+ i365_set(s, TOPIC_FUNCTION_CONTROL, p->fcr);
+ state = cb_readl(s, CB_SOCKET_STATE);
+ if (!(state & CB_SS_32BIT))
+ cb_writel(s, CB_SOCKET_CONTROL, 0);
+ if (!(state & CB_SS_VSENSE))
+ cb_writel(s, CB_SOCKET_FORCE, CB_SF_CVSTEST);
+}
+
+static u_int __init topic_set_opts(socket_info_t *s, char *buf)
+{
+ topic_state_t *p = &s->state.topic;
+
+ p->slot |= TOPIC_SLOT_SLOTON|TOPIC_SLOT_SLOTEN;
+ p->slot &= ~TOPIC_SLOT_ID_LOCK;
+ p->cdr |= TOPIC_CDR_MODE_PC32;
+ p->cdr &= ~(TOPIC_CDR_SW_DETECT);
+ p->ccr |= TOPIC97_ICR_IRQSEL;
+ p->fcr |= TOPIC_FCR_3V_ENA;
+ sprintf(buf, " [slot 0x%02x] [ccr 0x%02x] [cdr 0x%02x] [rcr 0x%02x]",
+ p->slot, p->ccr, p->cdr, p->rcr);
+ return 0xffff;
+}
+
+#endif
+
+/*======================================================================
+
+ Routines to handle common CardBus options
+
+======================================================================*/
+
+/* Default settings for PCI command configuration register */
+#define CMD_DFLT (PCI_COMMAND_IO|PCI_COMMAND_MEMORY| \
+ PCI_COMMAND_MASTER|PCI_COMMAND_WAIT)
+
+#ifdef CONFIG_PCI
+
+static void __init cb_get_state(socket_info_t *s)
+{
+ pci_readb(s, PCI_CACHE_LINE_SIZE, &s->cache);
+ pci_readb(s, PCI_LATENCY_TIMER, &s->pci_lat);
+ pci_readb(s, CB_LATENCY_TIMER, &s->cb_lat);
+ pci_readb(s, CB_CARDBUS_BUS, &s->cap.cardbus);
+ pci_readb(s, CB_SUBORD_BUS, &s->sub_bus);
+ pci_readw(s, CB_BRIDGE_CONTROL, &s->bcr);
+ get_pci_irq(s);
+}
+
+static void cb_set_state(socket_info_t *s)
+{
+ pci_set_power_state(pci_find_slot(s->bus, s->devfn), 0);
+ pci_writel(s, CB_LEGACY_MODE_BASE, 0);
+ pci_writel(s, PCI_BASE_ADDRESS_0, s->cb_phys);
+ pci_writew(s, PCI_COMMAND, CMD_DFLT);
+ pci_writeb(s, PCI_CACHE_LINE_SIZE, s->cache);
+ pci_writeb(s, PCI_LATENCY_TIMER, s->pci_lat);
+ pci_writeb(s, CB_LATENCY_TIMER, s->cb_lat);
+ pci_writeb(s, CB_CARDBUS_BUS, s->cap.cardbus);
+ pci_writeb(s, CB_SUBORD_BUS, s->sub_bus);
+ pci_writew(s, CB_BRIDGE_CONTROL, s->bcr);
+}
+
+static int cb_get_irq_mode(socket_info_t *s)
+{
+ return (!(s->bcr & CB_BCR_ISA_IRQ));
+}
+
+static int cb_set_irq_mode(socket_info_t *s, int pcsc, int pint)
+{
+ flip(s->bcr, CB_BCR_ISA_IRQ, !(pint));
+ if (s->flags & IS_CIRRUS)
+ return cirrus_set_irq_mode(s, pcsc, pint);
+ else if (s->flags & IS_TI)
+ return ti113x_set_irq_mode(s, pcsc, pint);
+ /* By default, assume that we can't do ISA status irqs */
+ return (!pcsc);
+}
+
+static void __init cb_set_opts(socket_info_t *s, char *buf)
+{
+ s->bcr |= CB_BCR_WRITE_POST;
+ /* some TI1130's seem to exhibit problems with write posting */
+ if (((s->type == IS_TI1130) && (s->revision == 4) &&
+ (cb_write_post < 0)) || (cb_write_post == 0))
+ s->bcr &= ~CB_BCR_WRITE_POST;
+ if (s->cache == 0) s->cache = 8;
+ if (s->pci_lat == 0) s->pci_lat = 0xa8;
+ if (s->cb_lat == 0) s->cb_lat = 0xb0;
+ if (s->cap.pci_irq == 0)
+ strcat(buf, " [no pci irq]");
+ else
+ sprintf(buf, " [pci irq %d]", s->cap.pci_irq);
+ buf += strlen(buf);
+ if (!(s->flags & IS_TOPIC))
+ s->cap.features |= SS_CAP_PAGE_REGS;
+ sprintf(buf, " [lat %d/%d] [bus %d/%d]",
+ s->pci_lat, s->cb_lat, s->cap.cardbus, s->sub_bus);
+}
+
+#endif
+
+/*======================================================================
+
+ Power control for Cardbus controllers: used both for 16-bit and
+ Cardbus cards.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static void cb_get_power(socket_info_t *s, socket_state_t *state)
+{
+ u_int reg = cb_readl(s, CB_SOCKET_CONTROL);
+ state->Vcc = state->Vpp = 0;
+ switch (reg & CB_SC_VCC_MASK) {
+ case CB_SC_VCC_3V: state->Vcc = 33; break;
+ case CB_SC_VCC_5V: state->Vcc = 50; break;
+ }
+ switch (reg & CB_SC_VPP_MASK) {
+ case CB_SC_VPP_3V: state->Vpp = 33; break;
+ case CB_SC_VPP_5V: state->Vpp = 50; break;
+ case CB_SC_VPP_12V: state->Vpp = 120; break;
+ }
+}
+
+static int cb_set_power(socket_info_t *s, socket_state_t *state)
+{
+ u_int reg = 0;
+ /* restart card voltage detection if it seems appropriate */
+ if ((state->Vcc == 0) && (state->Vpp == 0) &&
+ !(cb_readl(s, CB_SOCKET_STATE) & CB_SS_VSENSE))
+ cb_writel(s, CB_SOCKET_FORCE, CB_SF_CVSTEST);
+ switch (state->Vcc) {
+ case 0: reg = 0; break;
+ case 33: reg = CB_SC_VCC_3V; break;
+ case 50: reg = CB_SC_VCC_5V; break;
+ default: return -EINVAL;
+ }
+ switch (state->Vpp) {
+ case 0: break;
+ case 33: reg |= CB_SC_VPP_3V; break;
+ case 50: reg |= CB_SC_VPP_5V; break;
+ case 120: reg |= CB_SC_VPP_12V; break;
+ default: return -EINVAL;
+ }
+ if (reg != cb_readl(s, CB_SOCKET_CONTROL))
+ cb_writel(s, CB_SOCKET_CONTROL, reg);
+ return 0;
+}
+
+#endif
+
+/*======================================================================
+
+ Generic routines to get and set controller options
+
+======================================================================*/
+
+static void __init get_bridge_state(socket_info_t *s)
+{
+ if (s->flags & IS_CIRRUS)
+ cirrus_get_state(s);
+#ifdef CONFIG_ISA
+ else if (s->flags & IS_VADEM)
+ vg46x_get_state(s);
+#endif
+#ifdef CONFIG_PCI
+ else if (s->flags & IS_O2MICRO)
+ o2micro_get_state(s);
+ else if (s->flags & IS_TI)
+ ti113x_get_state(s);
+ else if (s->flags & IS_RICOH)
+ ricoh_get_state(s);
+ else if (s->flags & IS_TOPIC)
+ topic_get_state(s);
+ if (s->flags & IS_CARDBUS)
+ cb_get_state(s);
+#endif
+}
+
+static void set_bridge_state(socket_info_t *s)
+{
+#ifdef CONFIG_PCI
+ if (s->flags & IS_CARDBUS)
+ cb_set_state(s);
+#endif
+ if (s->flags & IS_CIRRUS) {
+ cirrus_set_state(s);
+ } else {
+ i365_set(s, I365_GBLCTL, 0x00);
+ i365_set(s, I365_GENCTL, 0x00);
+ /* Trouble: changes timing of memory operations */
+ /* i365_bset(s, I365_ADDRWIN, I365_ADDR_MEMCS16); */
+ }
+ i365_bflip(s, I365_INTCTL, I365_INTR_ENA, s->intr);
+#ifdef CONFIG_ISA
+ if (s->flags & IS_VADEM)
+ vg46x_set_state(s);
+#endif
+#ifdef CONFIG_PCI
+ if (s->flags & IS_O2MICRO)
+ o2micro_set_state(s);
+ else if (s->flags & IS_TI)
+ ti113x_set_state(s);
+ else if (s->flags & IS_RICOH)
+ ricoh_set_state(s);
+ else if (s->flags & IS_TOPIC)
+ topic_set_state(s);
+#endif
+}
+
+static u_int __init set_bridge_opts(socket_info_t *s, u_short ns)
+{
+ u_short i;
+ u_int m = 0xffff;
+ char buf[128];
+
+ for (i = 0; i < ns; i++) {
+ if (s[i].flags & IS_ALIVE) {
+ printk(KERN_INFO " host opts [%d]: already alive!\n", i);
+ continue;
+ }
+ buf[0] = '\0';
+ get_bridge_state(s+i);
+ if (s[i].flags & IS_CIRRUS)
+ m = cirrus_set_opts(s+i, buf);
+#ifdef CONFIG_ISA
+ else if (s[i].flags & IS_VADEM)
+ m = vg46x_set_opts(s+i, buf);
+#endif
+#ifdef CONFIG_PCI
+ else if (s[i].flags & IS_O2MICRO)
+ m = o2micro_set_opts(s+i, buf);
+ else if (s[i].flags & IS_TI)
+ m = ti113x_set_opts(s+i, buf);
+ else if (s[i].flags & IS_RICOH)
+ m = ricoh_set_opts(s+i, buf);
+ else if (s[i].flags & IS_TOPIC)
+ m = topic_set_opts(s+i, buf);
+ if (s[i].flags & IS_CARDBUS)
+ cb_set_opts(s+i, buf+strlen(buf));
+#endif
+ set_bridge_state(s+i);
+ printk(KERN_INFO " host opts [%d]:%s\n", i,
+ (*buf) ? buf : " none");
+ }
+#ifdef CONFIG_PCI
+ m &= ~pci_irq_mask;
+#endif
+ return m;
+}
+
+/*======================================================================
+
+ Interrupt testing code, for ISA and PCI interrupts
+
+======================================================================*/
+
+static volatile u_int irq_hits, irq_shared;
+static volatile socket_info_t *irq_sock;
+
+static void irq_count(int irq, void *dev, struct pt_regs *regs)
+{
+ irq_hits++;
+ DEBUG(2, "-> hit on irq %d\n", irq);
+ if (!irq_shared && (irq_hits > 100)) {
+ printk(KERN_INFO " PCI irq %d seems to be wedged!\n", irq);
+ disable_irq(irq);
+ return;
+ }
+#ifdef CONFIG_PCI
+ if (irq_sock->flags & IS_CARDBUS) {
+ cb_writel(irq_sock, CB_SOCKET_EVENT, -1);
+ } else
+#endif
+ i365_get((socket_info_t *)irq_sock, I365_CSC);
+ return;
+}
+
+static u_int __init test_irq(socket_info_t *s, int irq, int pci)
+{
+ u_char csc = (pci) ? 0 : irq;
+
+#ifdef CONFIG_PNP_BIOS
+ extern int check_pnp_irq(int);
+ if (!pci && check_pnp_irq(irq)) return 1;
+#endif
+
+ DEBUG(2, " testing %s irq %d\n", pci ? "PCI" : "ISA", irq);
+ irq_sock = s; irq_shared = irq_hits = 0;
+ if (request_irq(irq, irq_count, 0, "scan", socket)) {
+ irq_shared++;
+ if (!pci || request_irq(irq, irq_count, SA_SHIRQ, "scan", socket))
+ return 1;
+ }
+ irq_hits = 0;
+
+#ifndef MACH
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/100);
+#else
+ /* TODO: Is this really what we want? */
+ {
+ unsigned long flags;
+
+ save_flags(flags);
+
+ mdelay(1);
+#endif
+
+ if (irq_hits && !irq_shared) {
+ free_irq(irq, socket);
+ DEBUG(2, " spurious hit!\n");
+ return 1;
+ }
+
+ /* Generate one interrupt */
+#ifdef CONFIG_PCI
+ if (s->flags & IS_CARDBUS) {
+ cb_writel(s, CB_SOCKET_EVENT, -1);
+ i365_set(s, I365_CSCINT, I365_CSC_STSCHG | (csc << 4));
+ cb_writel(s, CB_SOCKET_EVENT, -1);
+ cb_writel(s, CB_SOCKET_MASK, CB_SM_CSTSCHG);
+ cb_writel(s, CB_SOCKET_FORCE, CB_SE_CSTSCHG);
+ mdelay(1);
+ cb_writel(s, CB_SOCKET_EVENT, -1);
+ cb_writel(s, CB_SOCKET_MASK, 0);
+ } else
+#endif
+ {
+ i365_set(s, I365_CSCINT, I365_CSC_DETECT | (csc << 4));
+ i365_bset(s, I365_GENCTL, I365_CTL_SW_IRQ);
+ mdelay(1);
+ }
+
+#ifdef MACH
+ restore_flags(flags);
+ }
+
+#endif
+
+ free_irq(irq, socket);
+
+ /* mask all interrupts */
+ i365_set(s, I365_CSCINT, 0);
+ DEBUG(2, " hits = %d\n", irq_hits);
+
+ return pci ? (irq_hits == 0) : (irq_hits != 1);
+}
+
+#ifdef CONFIG_ISA
+static int _check_irq(int irq, int flags)
+{
+#ifdef CONFIG_PNP_BIOS
+ extern int check_pnp_irq(int);
+ if ((flags != SA_SHIRQ) && check_pnp_irq(irq))
+ return -1;
+#endif
+ if (request_irq(irq, irq_count, flags, "x", irq_count) != 0)
+ return -1;
+ free_irq(irq, irq_count);
+ return 0;
+}
+
+static u_int __init isa_scan(socket_info_t *s, u_int mask0)
+{
+ u_int mask1 = 0;
+ int i;
+
+#ifdef CONFIG_PCI
+ /* Only scan if we can select ISA csc irq's */
+ if (!(s->flags & IS_CARDBUS) || (cb_set_irq_mode(s, 0, 0) == 0))
+#endif
+ if (do_scan) {
+ set_bridge_state(s);
+ i365_set(s, I365_CSCINT, 0);
+ for (i = 0; i < 16; i++)
+ if ((mask0 & (1 << i)) && (test_irq(s, i, 0) == 0))
+ mask1 |= (1 << i);
+ for (i = 0; i < 16; i++)
+ if ((mask1 & (1 << i)) && (test_irq(s, i, 0) != 0))
+ mask1 ^= (1 << i);
+ }
+
+ printk(KERN_INFO " ISA irqs (");
+ /* we trust TI bridges to do this right */
+ if (mask1 || (s->flags & IS_TI)) {
+ printk("scanned");
+ } else {
+ /* Fallback: just find interrupts that aren't in use */
+ for (i = 0; i < 16; i++)
+ if ((mask0 & (1 << i)) && (_check_irq(i, 0) == 0))
+ mask1 |= (1 << i);
+ printk("default");
+ /* If scan failed, default to polled status */
+ if (!cs_irq && (poll_interval == 0)) poll_interval = HZ;
+ }
+ printk(") = ");
+
+ for (i = 0; i < 16; i++)
+ if (mask1 & (1<<i))
+ printk("%s%d", ((mask1 & ((1<<i)-1)) ? "," : ""), i);
+ if (mask1 == 0) printk("none!");
+
+ return mask1;
+}
+#endif /* CONFIG_ISA */
+
+#ifdef CONFIG_PCI
+static int __init pci_scan(socket_info_t *s)
+{
+ int ret;
+ if ((s->flags & IS_RICOH) || !(s->flags & IS_CARDBUS) || !do_scan) {
+ /* for PCI-to-PCMCIA bridges, just check for wedged irq */
+ irq_sock = s; irq_hits = 0;
+ if (request_irq(s->cap.pci_irq, irq_count, 0, "scan", socket))
+ return 1;
+ udelay(50);
+ free_irq(s->cap.pci_irq, socket);
+ return (!irq_hits);
+ }
+ cb_set_irq_mode(s, 1, 0);
+ set_bridge_state(s);
+ i365_set(s, I365_CSCINT, 0);
+ ret = ((test_irq(s, s->cap.pci_irq, 1) == 0) &&
+ (test_irq(s, s->cap.pci_irq, 1) == 0));
+ if (!ret)
+ printk(KERN_INFO " PCI irq %d test failed\n",
+ s->cap.pci_irq);
+ return ret;
+}
+#endif /* CONFIG_PCI */
+
+/*====================================================================*/
+
+#ifdef CONFIG_ISA
+
+static int __init isa_identify(u_short port, u_short sock)
+{
+ socket_info_t *s = socket+sockets;
+ u_char val;
+ int type = -1;
+
+ /* Use the next free entry in the socket table */
+ s->ioaddr = port;
+ s->psock = sock;
+
+ /* Wake up a sleepy Cirrus controller */
+ if (wakeup) {
+ i365_bclr(s, PD67_MISC_CTL_2, PD67_MC2_SUSPEND);
+ /* Pause at least 50 ms */
+ mdelay(50);
+ }
+
+ if ((val = i365_get(s, I365_IDENT)) & 0x70)
+ return -1;
+ switch (val) {
+ case 0x82:
+ type = IS_I82365A; break;
+ case 0x83:
+ type = IS_I82365B; break;
+ case 0x84:
+ type = IS_I82365DF; break;
+ case 0x88: case 0x89: case 0x8a:
+ type = IS_IBM; break;
+ }
+
+ /* Check for Vadem VG-468 chips */
+ outb(0x0e, port);
+ outb(0x37, port);
+ i365_bset(s, VG468_MISC, VG468_MISC_VADEMREV);
+ val = i365_get(s, I365_IDENT);
+ if (val & I365_IDENT_VADEM) {
+ i365_bclr(s, VG468_MISC, VG468_MISC_VADEMREV);
+ type = ((val & 7) >= 4) ? IS_VG469 : IS_VG468;
+ }
+
+ /* Check for Ricoh chips */
+ val = i365_get(s, RF5C_CHIP_ID);
+ if ((val == RF5C_CHIP_RF5C296) || (val == RF5C_CHIP_RF5C396))
+ type = IS_RF5Cx96;
+
+ /* Check for Cirrus CL-PD67xx chips */
+ i365_set(s, PD67_CHIP_INFO, 0);
+ val = i365_get(s, PD67_CHIP_INFO);
+ if ((val & PD67_INFO_CHIP_ID) == PD67_INFO_CHIP_ID) {
+ val = i365_get(s, PD67_CHIP_INFO);
+ if ((val & PD67_INFO_CHIP_ID) == 0) {
+ type = (val & PD67_INFO_SLOTS) ? IS_PD672X : IS_PD6710;
+ i365_set(s, PD67_EXT_INDEX, 0xe5);
+ if (i365_get(s, PD67_EXT_INDEX) != 0xe5)
+ type = IS_VT83C469;
+ }
+ }
+ return type;
+} /* isa_identify */
+
+#endif
+
+/*======================================================================
+
+ See if a card is present, powered up, in IO mode, and already
+ bound to a (non PC Card) Linux driver. We leave these alone.
+
+ We make an exception for cards that seem to be serial devices.
+
+======================================================================*/
+
+static int __init is_alive(socket_info_t *s)
+{
+ u_char stat;
+ u_short start, stop;
+
+ stat = i365_get(s, I365_STATUS);
+ start = i365_get_pair(s, I365_IO(0)+I365_W_START);
+ stop = i365_get_pair(s, I365_IO(0)+I365_W_STOP);
+ if ((stop - start < 0x40) && (stop - start >= 0x07) &&
+ ((start & 0xfeef) != 0x02e8) && (start >= 0x100) &&
+ (stat & I365_CS_DETECT) && (stat & I365_CS_POWERON) &&
+ (i365_get(s, I365_INTCTL) & I365_PC_IOCARD) &&
+ (i365_get(s, I365_ADDRWIN) & I365_ENA_IO(0)) &&
+ (check_region(start, stop-start+1) != 0))
+ return 1;
+ else
+ return 0;
+}
+
+/*====================================================================*/
+
+static void __init add_socket(u_int port, int psock, int type)
+{
+ socket_info_t *s = socket+sockets;
+ s->ioaddr = port;
+ s->psock = psock;
+ s->type = type;
+ s->flags = pcic[type].flags;
+ if (is_alive(s))
+ s->flags |= IS_ALIVE;
+ sockets++;
+}
+
+static void __init add_pcic(int ns, int type)
+{
+ u_int mask = 0, i;
+ int use_pci = 0, isa_irq = 0;
+ socket_info_t *s = &socket[sockets-ns];
+
+ if (s->ioaddr > 0) request_region(s->ioaddr, 2, "i82365");
+
+ printk(KERN_INFO " %s", pcic[type].name);
+#ifdef CONFIG_PCI
+ if (s->flags & IS_UNKNOWN)
+ printk(" [%04x %04x]", s->vendor, s->device);
+ printk(" rev %02x", s->revision);
+ if (s->flags & IS_CARDBUS)
+ printk(" PCI-to-CardBus at slot %02x:%02x, mem %#08x\n",
+ s->bus, PCI_SLOT(s->devfn), s->cb_phys);
+ else if (s->flags & IS_PCI)
+ printk(" PCI-to-PCMCIA at slot %02x:%02x, port %#x\n",
+ s->bus, PCI_SLOT(s->devfn), s->ioaddr);
+ else
+#endif
+ printk(" ISA-to-PCMCIA at port %#x ofs 0x%02x\n",
+ s->ioaddr, s->psock*0x40);
+
+#ifdef CONFIG_ISA
+ if (irq_list[0] == -1)
+ mask = irq_mask;
+ else
+ for (i = mask = 0; i < 16; i++)
+ mask |= (1<<irq_list[i]);
+#endif
+ /* Set host options, build basic interrupt mask */
+ mask &= I365_ISA_IRQ_MASK & set_bridge_opts(s, ns);
+
+#ifdef CONFIG_PCI
+ /* Can we use PCI interrupts for card status changes? */
+ if (pci_csc || pci_int) {
+ for (i = 0; i < ns; i++)
+ if (!s[i].cap.pci_irq || !pci_scan(&s[i])) break;
+ use_pci = (i == ns);
+ }
+#endif
+#ifdef CONFIG_ISA
+ /* Scan, report ISA card interrupts */
+ if (mask)
+ mask = isa_scan(s, mask);
+#endif
+
+#ifdef CONFIG_PCI
+ if (!mask)
+ printk(KERN_INFO " %s card interrupts,",
+ (use_pci && pci_int) ? "PCI" : "*NO*");
+ if (use_pci && pci_csc)
+ printk(" PCI status changes\n");
+#endif
+
+#ifdef CONFIG_ISA
+ /* Poll if only two sensible interrupts available */
+ if (!(use_pci && pci_csc) && !poll_interval) {
+ u_int tmp = (mask & 0xff20);
+ tmp = tmp & (tmp-1);
+ if ((tmp & (tmp-1)) == 0)
+ poll_interval = HZ;
+ }
+ /* Only try an ISA cs_irq if this is the first controller */
+ if (!(use_pci && pci_csc) && !grab_irq &&
+ (cs_irq || !poll_interval)) {
+ /* Avoid irq 12 unless it is explicitly requested */
+ u_int cs_mask = mask & ((cs_irq) ? (1<<cs_irq) : ~(1<<12));
+ for (isa_irq = 15; isa_irq > 0; isa_irq--)
+ if (cs_mask & (1 << isa_irq)) break;
+ if (isa_irq) {
+ grab_irq = 1;
+ cs_irq = isa_irq;
+ printk(" status change on irq %d\n", isa_irq);
+ }
+ }
+#endif
+
+ if (!(use_pci && pci_csc) && !isa_irq) {
+ if (poll_interval == 0)
+ poll_interval = HZ;
+ printk(" polling interval = %d ms\n", poll_interval*1000/HZ);
+ }
+
+ /* Update socket interrupt information, capabilities */
+ for (i = 0; i < ns; i++) {
+ s[i].cap.features |= SS_CAP_PCCARD;
+ s[i].cap.map_size = 0x1000;
+ s[i].cap.irq_mask = mask;
+ if (!use_pci)
+ s[i].cap.pci_irq = 0;
+ s[i].cs_irq = isa_irq;
+#ifdef CONFIG_PCI
+ if (s[i].flags & IS_CARDBUS) {
+ s[i].cap.features |= SS_CAP_CARDBUS;
+ cb_set_irq_mode(s+i, pci_csc && s[i].cap.pci_irq, 0);
+ }
+#endif
+ }
+
+} /* add_pcic */
+
+/*====================================================================*/
+
+#ifdef CONFIG_PCI
+
+static int __init pci_lookup(u_int class, struct pci_dev **id,
+ u_char *bus, u_char *devfn)
+{
+ if ((*id = pci_find_class(class<<8, *id)) != NULL) {
+ *bus = (*id)->bus->number;
+ *devfn = (*id)->devfn;
+ return 0;
+ } else return -1;
+}
+
+static void __init add_pci_bridge(int type, u_short v, u_short d)
+{
+ socket_info_t *s = &socket[sockets];
+ u_int addr, ns;
+
+ pci_enable_device(pci_find_slot(s->bus, s->devfn));
+ pci_writew(s, PCI_COMMAND, CMD_DFLT);
+
+ if (type == PCIC_COUNT) type = IS_UNK_PCI;
+ pci_readl(s, PCI_BASE_ADDRESS_0, &addr);
+ addr &= ~0x1;
+ for (ns = 0; ns < ((type == IS_I82092AA) ? 4 : 2); ns++) {
+ s[ns].bus = s->bus; s[ns].devfn = s->devfn;
+ s[ns].vendor = v; s[ns].device = d;
+ add_socket(addr, ns, type);
+ }
+ add_pcic(ns, type);
+}
+
+static int check_cb_mapping(socket_info_t *s)
+{
+ u_int state = cb_readl(s, CB_SOCKET_STATE) >> 16;
+ /* A few sanity checks to validate the bridge mapping */
+ if ((cb_readb(s, 0x800+I365_IDENT) & 0x70) ||
+ (cb_readb(s, 0x800+I365_CSC) && cb_readb(s, 0x800+I365_CSC) &&
+ cb_readb(s, 0x800+I365_CSC)) || cb_readl(s, CB_SOCKET_FORCE) ||
+ ((state & ~0x3000) || !(state & 0x3000)))
+ return 1;
+ return 0;
+}
+
+static void __init add_cb_bridge(int type, u_short v, u_short d0)
+{
+ socket_info_t *s = &socket[sockets];
+ u_char bus = s->bus, devfn = s->devfn;
+ u_short d, ns;
+ u_char a, r, max;
+
+ /* PCI bus enumeration is broken on some systems */
+ for (ns = 0; ns < sockets; ns++)
+ if ((socket[ns].bus == bus) &&
+ (socket[ns].devfn == devfn))
+ return;
+
+ if (type == PCIC_COUNT) type = IS_UNK_CARDBUS;
+ pci_readb(s, PCI_HEADER_TYPE, &a);
+ pci_readb(s, PCI_CLASS_REVISION, &r);
+ max = (a & 0x80) ? 8 : 1;
+ for (ns = 0; ns < max; ns++, s++, devfn++) {
+ s->bus = bus; s->devfn = devfn;
+ if (pci_readw(s, PCI_DEVICE_ID, &d) || (d != d0))
+ break;
+ s->vendor = v; s->device = d; s->revision = r;
+
+ pci_enable_device(pci_find_slot(bus, devfn));
+ pci_set_power_state(pci_find_slot(bus, devfn), 0);
+ pci_writew(s, PCI_COMMAND, CMD_DFLT);
+
+ /* Set up CardBus register mapping */
+ pci_writel(s, CB_LEGACY_MODE_BASE, 0);
+ pci_readl(s, PCI_BASE_ADDRESS_0, &s->cb_phys);
+ if (s->cb_phys == 0) {
+ printk("\n" KERN_NOTICE " Bridge register mapping failed:"
+ " check cb_mem_base setting\n");
+ break;
+ }
+ s->cb_virt = ioremap(s->cb_phys, 0x1000);
+ if (check_cb_mapping(s) != 0) {
+ printk("\n" KERN_NOTICE " Bad bridge mapping at "
+ "0x%08x!\n", s->cb_phys);
+ break;
+ }
+
+ request_mem_region(s->cb_phys, 0x1000, "i82365");
+ add_socket(0, 0, type);
+ }
+ if (ns == 0) return;
+
+ add_pcic(ns, type);
+
+ /* Look up PCI bus bridge structures if needed */
+ s -= ns;
+ for (a = 0; a < ns; a++) {
+ struct pci_dev *self = pci_find_slot(bus, s[a].devfn);
+#if (LINUX_VERSION_CODE >= VERSION(2,3,40))
+ s[a].cap.cb_bus = self->subordinate;
+#else
+ struct pci_bus *child;
+ for (child = self->bus->children; child; child = child->next)
+ if (child->number == s[a].cap.cardbus) break;
+ s[a].cap.cb_bus = child;
+#endif
+ }
+}
+
+static void __init pci_probe(u_int class)
+{
+ socket_info_t *s = &socket[sockets];
+ u_short i, v, d;
+ struct pci_dev *id;
+
+ id = 0;
+ while (pci_lookup(class, &id, &s->bus, &s->devfn) == 0) {
+ if (PCI_FUNC(s->devfn) != 0) continue;
+ pci_readw(s, PCI_VENDOR_ID, &v);
+ pci_readw(s, PCI_DEVICE_ID, &d);
+ for (i = 0; i < PCIC_COUNT; i++)
+ if ((pcic[i].vendor == v) && (pcic[i].device == d)) break;
+ /* The "ToPIC95-A" is unusable as a CardBus bridge */
+ if (i == IS_TOPIC95_A)
+ continue;
+ if (((i < PCIC_COUNT) && (pcic[i].flags & IS_CARDBUS)) ||
+ (class == PCI_CLASS_BRIDGE_CARDBUS))
+ add_cb_bridge(i, v, d);
+ else
+ add_pci_bridge(i, v, d);
+ s = &socket[sockets];
+ }
+}
+
+#endif
+
+/*====================================================================*/
+
+#ifdef CONFIG_ISA
+
+static void __init isa_probe(ioaddr_t base)
+{
+ int i, j, sock, k, ns, id;
+ ioaddr_t port;
+
+ if (check_region(base, 2) != 0) {
+ if (sockets == 0)
+ printk("port conflict at %#x\n", base);
+ return;
+ }
+
+ id = isa_identify(base, 0);
+ if ((id == IS_I82365DF) && (isa_identify(base, 1) != id)) {
+ for (i = 0; i < 4; i++) {
+ if (i == ignore) continue;
+ port = base + ((i & 1) << 2) + ((i & 2) << 1);
+ sock = (i & 1) << 1;
+ if (isa_identify(port, sock) == IS_I82365DF) {
+ add_socket(port, sock, IS_VLSI);
+ add_pcic(1, IS_VLSI);
+ }
+ }
+ } else {
+ for (i = 0; i < 4; i += 2) {
+ port = base + 2*(i>>2);
+ sock = (i & 3);
+ id = isa_identify(port, sock);
+ if (id < 0) continue;
+
+ for (j = ns = 0; j < 2; j++) {
+ /* Does the socket exist? */
+ if ((ignore == i+j) || (isa_identify(port, sock+j) < 0))
+ continue;
+ /* Check for bad socket decode */
+ for (k = 0; k <= sockets; k++)
+ i365_set(socket+k, I365_MEM(0)+I365_W_OFF, k);
+ for (k = 0; k <= sockets; k++)
+ if (i365_get(socket+k, I365_MEM(0)+I365_W_OFF) != k)
+ break;
+ if (k <= sockets) break;
+ add_socket(port, sock+j, id); ns++;
+ }
+ if (ns != 0) add_pcic(ns, id);
+ }
+ }
+}
+
+#endif
+
+/*======================================================================
+
+ The card status event handler. This may either be interrupt
+ driven or polled. It monitors mainly for card insert and eject
+ events; there are various other kinds of events that can be
+ monitored (ready/busy, status change, etc), but they are almost
+ never used.
+
+======================================================================*/
+
+static void pcic_interrupt(int irq, void *dev, struct pt_regs *regs)
+{
+ int i, j, csc;
+ u_int events, active;
+#ifdef CONFIG_ISA
+ u_long flags = 0;
+#endif
+
+ DEBUG(2, "i82365: pcic_interrupt(%d)\n", irq);
+
+ for (j = 0; j < 20; j++) {
+ active = 0;
+ for (i = 0; i < sockets; i++) {
+ socket_info_t *s = &socket[i];
+ if ((s->cs_irq != irq) && (s->cap.pci_irq != irq))
+ continue;
+ ISA_LOCK(s, flags);
+ csc = i365_get(s, I365_CSC);
+#ifdef CONFIG_PCI
+ if ((s->flags & IS_CARDBUS) &&
+ (cb_readl(s, CB_SOCKET_EVENT) & CB_SE_CCD)) {
+ cb_writel(s, CB_SOCKET_EVENT, CB_SE_CCD);
+ csc |= I365_CSC_DETECT;
+ }
+#endif
+ if ((csc == 0) || (!s->handler) ||
+ (i365_get(s, I365_IDENT) & 0x70)) {
+ ISA_UNLOCK(s, flags);
+ continue;
+ }
+ events = (csc & I365_CSC_DETECT) ? SS_DETECT : 0;
+ if (i365_get(s, I365_INTCTL) & I365_PC_IOCARD) {
+ events |= (csc & I365_CSC_STSCHG) ? SS_STSCHG : 0;
+ } else {
+ events |= (csc & I365_CSC_BVD1) ? SS_BATDEAD : 0;
+ events |= (csc & I365_CSC_BVD2) ? SS_BATWARN : 0;
+ events |= (csc & I365_CSC_READY) ? SS_READY : 0;
+ }
+ ISA_UNLOCK(s, flags);
+ DEBUG(1, "i82365: socket %d event 0x%04x\n", i, events);
+ if (events)
+ s->handler(s->info, events);
+ active |= events;
+ }
+ if (!active) break;
+ }
+ if (j == 20)
+ printk(KERN_NOTICE "i82365: infinite loop in interrupt "
+ "handler: active = 0x%04x\n", active);
+
+ DEBUG(2, "i82365: interrupt done\n");
+} /* pcic_interrupt */
+
+static void pcic_interrupt_wrapper(u_long data)
+{
+ pcic_interrupt(0, NULL, NULL);
+ poll_timer.expires = jiffies + poll_interval;
+ add_timer(&poll_timer);
+}
+
+/*====================================================================*/
+
+static int pcic_register_callback(socket_info_t *s, ss_callback_t *call)
+{
+ if (call == NULL) {
+ s->handler = NULL;
+ MOD_DEC_USE_COUNT;
+ } else {
+ MOD_INC_USE_COUNT;
+ s->handler = call->handler;
+ s->info = call->info;
+ }
+ return 0;
+} /* pcic_register_callback */
+
+/*====================================================================*/
+
+static int pcic_inquire_socket(socket_info_t *s, socket_cap_t *cap)
+{
+ *cap = s->cap;
+ return 0;
+}
+
+/*====================================================================*/
+
+static int i365_get_status(socket_info_t *s, u_int *value)
+{
+ u_int status;
+
+ status = i365_get(s, I365_STATUS);
+ *value = ((status & I365_CS_DETECT) == I365_CS_DETECT)
+ ? SS_DETECT : 0;
+ if (i365_get(s, I365_INTCTL) & I365_PC_IOCARD) {
+ *value |= (status & I365_CS_STSCHG) ? 0 : SS_STSCHG;
+ } else {
+ *value |= (status & I365_CS_BVD1) ? 0 : SS_BATDEAD;
+ *value |= (status & I365_CS_BVD2) ? 0 : SS_BATWARN;
+ }
+ *value |= (status & I365_CS_WRPROT) ? SS_WRPROT : 0;
+ *value |= (status & I365_CS_READY) ? SS_READY : 0;
+ *value |= (status & I365_CS_POWERON) ? SS_POWERON : 0;
+
+#ifdef CONFIG_PCI
+ if (s->flags & IS_CARDBUS) {
+ status = cb_readl(s, CB_SOCKET_STATE);
+ *value |= (status & CB_SS_32BIT) ? SS_CARDBUS : 0;
+ *value |= (status & CB_SS_3VCARD) ? SS_3VCARD : 0;
+ *value |= (status & CB_SS_XVCARD) ? SS_XVCARD : 0;
+ *value |= (status & CB_SS_VSENSE) ? 0 : SS_PENDING;
+ } else if (s->flags & IS_O2MICRO) {
+ status = i365_get(s, O2_MODE_B);
+ *value |= (status & O2_MODE_B_VS1) ? 0 : SS_3VCARD;
+ *value |= (status & O2_MODE_B_VS2) ? 0 : SS_XVCARD;
+ }
+#endif
+ if ((s->flags & IS_CIRRUS) &&
+ ((s->flags & IS_PCI) || has_vsense)) {
+ socket_info_t *t = (s->psock) ? s : s+1;
+ status = pd67_ext_get(t, PD67_EXTERN_DATA);
+ *value |= (status & PD67_EXD_VS1(s->psock)) ? 0 : SS_3VCARD;
+ *value |= (status & PD67_EXD_VS2(s->psock)) ? 0 : SS_XVCARD;
+ }
+#ifdef CONFIG_ISA
+ if (s->type == IS_VG469) {
+ status = i365_get(s, VG469_VSENSE);
+ if (s->psock & 1) {
+ *value |= (status & VG469_VSENSE_B_VS1) ? 0 : SS_3VCARD;
+ *value |= (status & VG469_VSENSE_B_VS2) ? 0 : SS_XVCARD;
+ } else {
+ *value |= (status & VG469_VSENSE_A_VS1) ? 0 : SS_3VCARD;
+ *value |= (status & VG469_VSENSE_A_VS2) ? 0 : SS_XVCARD;
+ }
+ }
+#endif
+ /* For now, ignore cards with unsupported voltage keys */
+ if (*value & SS_XVCARD)
+ *value &= ~(SS_DETECT|SS_3VCARD|SS_XVCARD);
+ DEBUG(1, "i82365: GetStatus(%d) = %#4.4x\n", s-socket, *value);
+ return 0;
+} /* i365_get_status */
+
+/*====================================================================*/
+
+static int i365_get_socket(socket_info_t *s, socket_state_t *state)
+{
+ u_char reg, vcc, vpp;
+
+ reg = i365_get(s, I365_POWER);
+ state->flags = (reg & I365_PWR_AUTO) ? SS_PWR_AUTO : 0;
+ state->flags |= (reg & I365_PWR_OUT) ? SS_OUTPUT_ENA : 0;
+ vcc = reg & I365_VCC_MASK; vpp = reg & I365_VPP1_MASK;
+ state->Vcc = state->Vpp = 0;
+#ifdef CONFIG_PCI
+ if ((s->flags & IS_CARDBUS) && !(s->flags & IS_TOPIC)) {
+ cb_get_power(s, state);
+ } else
+#endif
+ {
+ if ((s->flags & IS_CIRRUS) && (reg & I365_VCC_5V)) {
+ state->Vcc = (i365_get(s, PD67_MISC_CTL_1) &
+ PD67_MC1_VCC_3V) ? 33 : 50;
+ } else if ((s->flags & IS_VG_PWR) && (reg & I365_VCC_5V)) {
+ state->Vcc = (i365_get(s, VG469_VSELECT) &
+ VG469_VSEL_VCC) ? 33 : 50;
+ } else if ((s->flags & IS_DF_PWR) || (s->flags & IS_TOPIC)) {
+ if (vcc == I365_VCC_3V) state->Vcc = 33;
+ if (vcc == I365_VCC_5V) state->Vcc = 50;
+ } else {
+ if (reg & I365_VCC_5V) state->Vcc = 50;
+ }
+ if (vpp == I365_VPP1_5V)
+ state->Vpp = (s->flags & IS_DF_PWR) ? 50 : state->Vcc;
+ if (vpp == I365_VPP1_12V) state->Vpp = 120;
+ }
+
+ /* IO card, RESET flags, IO interrupt */
+ reg = i365_get(s, I365_INTCTL);
+ state->flags |= (reg & I365_PC_RESET) ? 0 : SS_RESET;
+ state->flags |= (reg & I365_PC_IOCARD) ? SS_IOCARD : 0;
+#ifdef CONFIG_PCI
+ if (cb_get_irq_mode(s) != 0)
+ state->io_irq = s->cap.pci_irq;
+ else
+#endif
+ state->io_irq = reg & I365_IRQ_MASK;
+
+ /* Card status change mask */
+ reg = i365_get(s, I365_CSCINT);
+ state->csc_mask = (reg & I365_CSC_DETECT) ? SS_DETECT : 0;
+ if (state->flags & SS_IOCARD) {
+ state->csc_mask |= (reg & I365_CSC_STSCHG) ? SS_STSCHG : 0;
+ } else {
+ state->csc_mask |= (reg & I365_CSC_BVD1) ? SS_BATDEAD : 0;
+ state->csc_mask |= (reg & I365_CSC_BVD2) ? SS_BATWARN : 0;
+ state->csc_mask |= (reg & I365_CSC_READY) ? SS_READY : 0;
+ }
+
+ DEBUG(2, "i82365: GetSocket(%d) = flags %#3.3x, Vcc %d, Vpp %d, "
+ "io_irq %d, csc_mask %#2.2x\n", s-socket, state->flags,
+ state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+ return 0;
+} /* i365_get_socket */
+
+/*====================================================================*/
+
+static int i365_set_socket(socket_info_t *s, socket_state_t *state)
+{
+ u_char reg;
+
+ DEBUG(2, "i82365: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
+ "io_irq %d, csc_mask %#2.2x)\n", s-socket, state->flags,
+ state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+
+ /* First set global controller options */
+#ifdef CONFIG_PCI
+ if (s->cap.pci_irq)
+ cb_set_irq_mode(s, pci_csc, (s->cap.pci_irq == state->io_irq));
+ s->bcr &= ~CB_BCR_CB_RESET;
+#endif
+ set_bridge_state(s);
+
+ /* IO card, RESET flag, IO interrupt */
+ reg = s->intr | ((state->io_irq == s->cap.pci_irq) ?
+ s->pci_irq_code : state->io_irq);
+ reg |= (state->flags & SS_RESET) ? 0 : I365_PC_RESET;
+ reg |= (state->flags & SS_IOCARD) ? I365_PC_IOCARD : 0;
+ i365_set(s, I365_INTCTL, reg);
+
+ reg = I365_PWR_NORESET;
+ if (state->flags & SS_PWR_AUTO) reg |= I365_PWR_AUTO;
+ if (state->flags & SS_OUTPUT_ENA) reg |= I365_PWR_OUT;
+
+#ifdef CONFIG_PCI
+ if ((s->flags & IS_CARDBUS) && !(s->flags & IS_TOPIC)) {
+ cb_set_power(s, state);
+ reg |= i365_get(s, I365_POWER) & (I365_VCC_MASK|I365_VPP1_MASK);
+ } else
+#endif
+ {
+ int new = s->flags & (IS_TOPIC|IS_CIRRUS|IS_VG_PWR|IS_DF_PWR);
+ int vcc3 = (state->Vcc == 33), df = (s->flags & IS_DF_PWR);
+
+ if (state->Vcc == 50) {
+ reg |= I365_VCC_5V;
+ } else if (new && vcc3) {
+ reg |= ((s->flags & (IS_TOPIC|IS_DF_PWR)) ?
+ I365_VCC_3V : I365_VCC_5V);
+ } else if (state->Vcc)
+ return -EINVAL;
+ if (s->flags & IS_CIRRUS)
+ i365_bflip(s, PD67_MISC_CTL_1, PD67_MC1_VCC_3V, vcc3);
+ if (s->flags & IS_VG_PWR)
+ i365_bflip(s, VG469_VSELECT, VG469_VSEL_VCC, vcc3);
+
+ if (state->Vpp == 120) {
+ reg |= I365_VPP1_12V | (new ? 0 : I365_VPP2_12V);
+ } else if (state->Vpp == (df ? 50 : state->Vcc)) {
+ reg |= I365_VPP1_5V | (new ? 0 : I365_VPP2_5V);
+ } else if (state->Vpp)
+ return -EINVAL;
+ }
+
+ if (reg != i365_get(s, I365_POWER))
+ i365_set(s, I365_POWER, reg);
+
+ /* Card status change interrupt mask */
+ reg = (s->cap.pci_irq ? s->pci_irq_code : s->cs_irq) << 4;
+ if (state->csc_mask & SS_DETECT) reg |= I365_CSC_DETECT;
+ if (state->flags & SS_IOCARD) {
+ if (state->csc_mask & SS_STSCHG) reg |= I365_CSC_STSCHG;
+ } else {
+ if (state->csc_mask & SS_BATDEAD) reg |= I365_CSC_BVD1;
+ if (state->csc_mask & SS_BATWARN) reg |= I365_CSC_BVD2;
+ if (state->csc_mask & SS_READY) reg |= I365_CSC_READY;
+ }
+ i365_set(s, I365_CSCINT, reg);
+ i365_get(s, I365_CSC);
+#ifdef CONFIG_PCI
+ if (s->flags & IS_CARDBUS) {
+ if (s->cs_irq || (pci_csc && s->cap.pci_irq))
+ cb_writel(s, CB_SOCKET_MASK, CB_SM_CCD);
+ cb_writel(s, CB_SOCKET_EVENT, -1);
+ }
+#endif
+
+ return 0;
+} /* i365_set_socket */
+
+/*====================================================================*/
+
+static int i365_get_io_map(socket_info_t *s, struct pccard_io_map *io)
+{
+ u_char map, ioctl, addr;
+
+ map = io->map;
+ if (map > 1) return -EINVAL;
+ io->start = i365_get_pair(s, I365_IO(map)+I365_W_START);
+ io->stop = i365_get_pair(s, I365_IO(map)+I365_W_STOP);
+ ioctl = i365_get(s, I365_IOCTL);
+ addr = i365_get(s, I365_ADDRWIN);
+ io->speed = (ioctl & I365_IOCTL_WAIT(map)) ? cycle_time : 0;
+ io->flags = (addr & I365_ENA_IO(map)) ? MAP_ACTIVE : 0;
+ io->flags |= (ioctl & I365_IOCTL_0WS(map)) ? MAP_0WS : 0;
+ io->flags |= (ioctl & I365_IOCTL_16BIT(map)) ? MAP_16BIT : 0;
+ io->flags |= (ioctl & I365_IOCTL_IOCS16(map)) ? MAP_AUTOSZ : 0;
+ DEBUG(3, "i82365: GetIOMap(%d, %d) = %#2.2x, %d ns, %#4.4x-%#4.4x\n",
+ s-socket, map, io->flags, io->speed, io->start, io->stop);
+ return 0;
+} /* i365_get_io_map */
+
+/*====================================================================*/
+
+static int i365_set_io_map(socket_info_t *s, struct pccard_io_map *io)
+{
+ u_char map, ioctl;
+
+ DEBUG(3, "i82365: SetIOMap(%d, %d, %#2.2x, %d ns, %#4.4x-%#4.4x)\n",
+ s-socket, io->map, io->flags, io->speed, io->start, io->stop);
+ map = io->map;
+ if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
+ (io->stop < io->start)) return -EINVAL;
+ /* Turn off the window before changing anything */
+ if (i365_get(s, I365_ADDRWIN) & I365_ENA_IO(map))
+ i365_bclr(s, I365_ADDRWIN, I365_ENA_IO(map));
+ i365_set_pair(s, I365_IO(map)+I365_W_START, io->start);
+ i365_set_pair(s, I365_IO(map)+I365_W_STOP, io->stop);
+ ioctl = i365_get(s, I365_IOCTL) & ~I365_IOCTL_MASK(map);
+ if (io->speed) ioctl |= I365_IOCTL_WAIT(map);
+ if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map);
+ if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map);
+ if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map);
+ i365_set(s, I365_IOCTL, ioctl);
+ /* Turn on the window if necessary */
+ if (io->flags & MAP_ACTIVE)
+ i365_bset(s, I365_ADDRWIN, I365_ENA_IO(map));
+ return 0;
+} /* i365_set_io_map */
+
+/*====================================================================*/
+
+static int i365_get_mem_map(socket_info_t *s, struct pccard_mem_map *mem)
+{
+ u_short base, i;
+ u_char map, addr;
+
+ map = mem->map;
+ if (map > 4) return -EINVAL;
+ addr = i365_get(s, I365_ADDRWIN);
+ mem->flags = (addr & I365_ENA_MEM(map)) ? MAP_ACTIVE : 0;
+ base = I365_MEM(map);
+
+ i = i365_get_pair(s, base+I365_W_START);
+ mem->flags |= (i & I365_MEM_16BIT) ? MAP_16BIT : 0;
+ mem->flags |= (i & I365_MEM_0WS) ? MAP_0WS : 0;
+ mem->sys_start = ((u_long)(i & 0x0fff) << 12);
+
+ i = i365_get_pair(s, base+I365_W_STOP);
+ mem->speed = (i & I365_MEM_WS0) ? 1 : 0;
+ mem->speed += (i & I365_MEM_WS1) ? 2 : 0;
+ mem->speed *= cycle_time;
+ mem->sys_stop = ((u_long)(i & 0x0fff) << 12) + 0x0fff;
+
+ i = i365_get_pair(s, base+I365_W_OFF);
+ mem->flags |= (i & I365_MEM_WRPROT) ? MAP_WRPROT : 0;
+ mem->flags |= (i & I365_MEM_REG) ? MAP_ATTRIB : 0;
+ mem->card_start = ((u_int)(i & 0x3fff) << 12) + mem->sys_start;
+ mem->card_start &= 0x3ffffff;
+
+#ifdef CONFIG_PCI
+ /* Take care of high byte, for PCI controllers */
+ if (s->type == IS_PD6729) {
+ addr = pd67_ext_get(s, PD67_MEM_PAGE(map)) << 24;
+ mem->sys_stop += addr; mem->sys_start += addr;
+ } else if (s->flags & IS_CARDBUS) {
+ addr = i365_get(s, CB_MEM_PAGE(map)) << 24;
+ mem->sys_stop += addr; mem->sys_start += addr;
+ }
+#endif
+
+ DEBUG(3, "i82365: GetMemMap(%d, %d) = %#2.2x, %d ns, %#5.5lx-%#5."
+ "5lx, %#5.5x\n", s-socket, mem->map, mem->flags, mem->speed,
+ mem->sys_start, mem->sys_stop, mem->card_start);
+ return 0;
+} /* i365_get_mem_map */
+
+/*====================================================================*/
+
+static int i365_set_mem_map(socket_info_t *s, struct pccard_mem_map *mem)
+{
+ u_short base, i;
+ u_char map;
+
+ DEBUG(3, "i82365: SetMemMap(%d, %d, %#2.2x, %d ns, %#5.5lx-%#5.5"
+ "lx, %#5.5x)\n", s-socket, mem->map, mem->flags, mem->speed,
+ mem->sys_start, mem->sys_stop, mem->card_start);
+
+ map = mem->map;
+ if ((map > 4) || (mem->card_start > 0x3ffffff) ||
+ (mem->sys_start > mem->sys_stop) || (mem->speed > 1000))
+ return -EINVAL;
+ if (!(s->flags & (IS_PCI|IS_CARDBUS)) &&
+ ((mem->sys_start > 0xffffff) || (mem->sys_stop > 0xffffff)))
+ return -EINVAL;
+
+ /* Turn off the window before changing anything */
+ if (i365_get(s, I365_ADDRWIN) & I365_ENA_MEM(map))
+ i365_bclr(s, I365_ADDRWIN, I365_ENA_MEM(map));
+
+#ifdef CONFIG_PCI
+ /* Take care of high byte, for PCI controllers */
+ if (s->type == IS_PD6729) {
+ pd67_ext_set(s, PD67_MEM_PAGE(map), (mem->sys_start >> 24));
+ } else if (s->flags & IS_CARDBUS)
+ i365_set(s, CB_MEM_PAGE(map), mem->sys_start >> 24);
+#endif
+
+ base = I365_MEM(map);
+ i = (mem->sys_start >> 12) & 0x0fff;
+ if (mem->flags & MAP_16BIT) i |= I365_MEM_16BIT;
+ if (mem->flags & MAP_0WS) i |= I365_MEM_0WS;
+ i365_set_pair(s, base+I365_W_START, i);
+
+ i = (mem->sys_stop >> 12) & 0x0fff;
+ switch (mem->speed / cycle_time) {
+ case 0: break;
+ case 1: i |= I365_MEM_WS0; break;
+ case 2: i |= I365_MEM_WS1; break;
+ default: i |= I365_MEM_WS1 | I365_MEM_WS0; break;
+ }
+ i365_set_pair(s, base+I365_W_STOP, i);
+
+ i = ((mem->card_start - mem->sys_start) >> 12) & 0x3fff;
+ if (mem->flags & MAP_WRPROT) i |= I365_MEM_WRPROT;
+ if (mem->flags & MAP_ATTRIB) i |= I365_MEM_REG;
+ i365_set_pair(s, base+I365_W_OFF, i);
+
+ /* Turn on the window if necessary */
+ if (mem->flags & MAP_ACTIVE)
+ i365_bset(s, I365_ADDRWIN, I365_ENA_MEM(map));
+ return 0;
+} /* i365_set_mem_map */
+
+/*======================================================================
+
+ The few things that are strictly for Cardbus cards goes here.
+
+======================================================================*/
+
+#ifdef CONFIG_CARDBUS
+
+static int cb_get_status(socket_info_t *s, u_int *value)
+{
+ u_int state = cb_readl(s, CB_SOCKET_STATE);
+ *value = (state & CB_SS_32BIT) ? SS_CARDBUS : 0;
+ *value |= (state & CB_SS_CCD) ? 0 : SS_DETECT;
+ *value |= (state & CB_SS_CSTSCHG) ? SS_STSCHG : 0;
+ *value |= (state & CB_SS_PWRCYCLE) ? (SS_POWERON|SS_READY) : 0;
+ *value |= (state & CB_SS_3VCARD) ? SS_3VCARD : 0;
+ *value |= (state & CB_SS_XVCARD) ? SS_XVCARD : 0;
+ *value |= (state & CB_SS_VSENSE) ? 0 : SS_PENDING;
+ DEBUG(1, "yenta: GetStatus(%d) = %#4.4x\n", s-socket, *value);
+ return 0;
+} /* cb_get_status */
+
+static int cb_get_socket(socket_info_t *s, socket_state_t *state)
+{
+ u_short bcr;
+
+ cb_get_power(s, state);
+ pci_readw(s, CB_BRIDGE_CONTROL, &bcr);
+ state->flags |= (bcr & CB_BCR_CB_RESET) ? SS_RESET : 0;
+ if (cb_get_irq_mode(s) != 0)
+ state->io_irq = s->cap.pci_irq;
+ else
+ state->io_irq = i365_get(s, I365_INTCTL) & I365_IRQ_MASK;
+ DEBUG(2, "yenta: GetSocket(%d) = flags %#3.3x, Vcc %d, Vpp %d"
+ ", io_irq %d, csc_mask %#2.2x\n", s-socket, state->flags,
+ state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+ return 0;
+} /* cb_get_socket */
+
+static int cb_set_socket(socket_info_t *s, socket_state_t *state)
+{
+ u_int reg;
+
+ DEBUG(2, "yenta: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
+ "io_irq %d, csc_mask %#2.2x)\n", s-socket, state->flags,
+ state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+
+ /* First set global controller options */
+ if (s->cap.pci_irq)
+ cb_set_irq_mode(s, pci_csc, (s->cap.pci_irq == state->io_irq));
+ s->bcr &= ~CB_BCR_CB_RESET;
+ s->bcr |= (state->flags & SS_RESET) ? CB_BCR_CB_RESET : 0;
+ set_bridge_state(s);
+
+ cb_set_power(s, state);
+
+ /* Handle IO interrupt using ISA routing */
+ reg = s->intr;
+ if (state->io_irq != s->cap.pci_irq) reg |= state->io_irq;
+ i365_set(s, I365_INTCTL, reg);
+
+ /* Handle CSC mask */
+ if (!s->cs_irq && (!pci_csc || !s->cap.pci_irq))
+ return 0;
+ reg = (s->cs_irq << 4);
+ if (state->csc_mask & SS_DETECT) reg |= I365_CSC_DETECT;
+ i365_set(s, I365_CSCINT, reg);
+ i365_get(s, I365_CSC);
+ cb_writel(s, CB_SOCKET_MASK, CB_SM_CCD);
+ cb_writel(s, CB_SOCKET_EVENT, -1);
+
+ return 0;
+} /* cb_set_socket */
+
+static int cb_get_bridge(socket_info_t *s, struct cb_bridge_map *m)
+{
+ u_char map = m->map;
+
+ if (map > 1) return -EINVAL;
+ m->flags &= MAP_IOSPACE;
+ map += (m->flags & MAP_IOSPACE) ? 2 : 0;
+ pci_readl(s, CB_MEM_BASE(map), &m->start);
+ pci_readl(s, CB_MEM_LIMIT(map), &m->stop);
+ if (m->start || m->stop) {
+ m->flags |= MAP_ACTIVE;
+ m->stop |= (map > 1) ? 3 : 0x0fff;
+ }
+ if (map > 1) {
+ u_short bcr;
+ pci_readw(s, CB_BRIDGE_CONTROL, &bcr);
+ m->flags |= (bcr & CB_BCR_PREFETCH(map)) ? MAP_PREFETCH : 0;
+ }
+ DEBUG(3, "yenta: GetBridge(%d, %d) = %#2.2x, %#4.4x-%#4.4x\n",
+ s-socket, map, m->flags, m->start, m->stop);
+ return 0;
+}
+
+static int cb_set_bridge(socket_info_t *s, struct cb_bridge_map *m)
+{
+ u_char map;
+
+ DEBUG(3, "yenta: SetBridge(%d, %d, %#2.2x, %#4.4x-%#4.4x)\n",
+ s-socket, m->map, m->flags, m->start, m->stop);
+ map = m->map;
+ if (!(s->flags & IS_CARDBUS) || (map > 1) || (m->stop < m->start))
+ return -EINVAL;
+ if (m->flags & MAP_IOSPACE) {
+ if ((m->stop > 0xffff) || (m->start & 3) ||
+ ((m->stop & 3) != 3))
+ return -EINVAL;
+ map += 2;
+ } else {
+ if ((m->start & 0x0fff) || ((m->stop & 0x0fff) != 0x0fff))
+ return -EINVAL;
+ s->bcr &= ~CB_BCR_PREFETCH(map);
+ s->bcr |= (m->flags & MAP_PREFETCH) ? CB_BCR_PREFETCH(map) : 0;
+ pci_writew(s, CB_BRIDGE_CONTROL, s->bcr);
+ }
+ if (m->flags & MAP_ACTIVE) {
+ pci_writel(s, CB_MEM_BASE(map), m->start);
+ pci_writel(s, CB_MEM_LIMIT(map), m->stop);
+ } else {
+ pci_writel(s, CB_MEM_LIMIT(map), 0);
+ pci_writel(s, CB_MEM_BASE(map), 0);
+ }
+ return 0;
+}
+
+#endif /* CONFIG_CARDBUS */
+
+/*======================================================================
+
+ Routines for accessing socket information and register dumps via
+ /proc/bus/pccard/...
+
+======================================================================*/
+
+#ifdef HAS_PROC_BUS
+
+static int proc_read_info(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ socket_info_t *s = data;
+ char *p = buf;
+ p += sprintf(p, "type: %s\npsock: %d\n",
+ pcic[s->type].name, s->psock);
+#ifdef CONFIG_PCI
+ if (s->flags & (IS_PCI|IS_CARDBUS))
+ p += sprintf(p, "bus: %02x\ndevfn: %02x.%1x\n",
+ s->bus, PCI_SLOT(s->devfn), PCI_FUNC(s->devfn));
+ if (s->flags & IS_CARDBUS)
+ p += sprintf(p, "cardbus: %02x\n", s->cap.cardbus);
+#endif
+ return (p - buf);
+}
+
+static int proc_read_exca(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ socket_info_t *s = data;
+ char *p = buf;
+ int i, top;
+
+#ifdef CONFIG_ISA
+ u_long flags = 0;
+#endif
+ ISA_LOCK(s, flags);
+ top = 0x40;
+ if (s->flags & IS_CARDBUS)
+ top = (s->flags & IS_CIRRUS) ? 0x140 : 0x50;
+ for (i = 0; i < top; i += 4) {
+ if (i == 0x50) {
+ p += sprintf(p, "\n");
+ i = 0x100;
+ }
+ p += sprintf(p, "%02x %02x %02x %02x%s",
+ i365_get(s,i), i365_get(s,i+1),
+ i365_get(s,i+2), i365_get(s,i+3),
+ ((i % 16) == 12) ? "\n" : " ");
+ }
+ ISA_UNLOCK(s, flags);
+ return (p - buf);
+}
+
+#ifdef CONFIG_PCI
+static int proc_read_pci(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ socket_info_t *s = data;
+ char *p = buf;
+ u_int a, b, c, d;
+ int i;
+
+ for (i = 0; i < 0xc0; i += 0x10) {
+ pci_readl(s, i, &a);
+ pci_readl(s, i+4, &b);
+ pci_readl(s, i+8, &c);
+ pci_readl(s, i+12, &d);
+ p += sprintf(p, "%08x %08x %08x %08x\n", a, b, c, d);
+ }
+ return (p - buf);
+}
+
+static int proc_read_cardbus(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ socket_info_t *s = data;
+ char *p = buf;
+ int i, top;
+
+ top = (s->flags & IS_O2MICRO) ? 0x30 : 0x20;
+ for (i = 0; i < top; i += 0x10)
+ p += sprintf(p, "%08x %08x %08x %08x\n",
+ cb_readl(s,i+0x00), cb_readl(s,i+0x04),
+ cb_readl(s,i+0x08), cb_readl(s,i+0x0c));
+ return (p - buf);
+}
+#endif
+
+static void pcic_proc_setup(socket_info_t *s, struct proc_dir_entry *base)
+{
+ create_proc_read_entry("info", 0, base, proc_read_info, s);
+ create_proc_read_entry("exca", 0, base, proc_read_exca, s);
+#ifdef CONFIG_PCI
+ if (s->flags & (IS_PCI|IS_CARDBUS))
+ create_proc_read_entry("pci", 0, base, proc_read_pci, s);
+ if (s->flags & IS_CARDBUS)
+ create_proc_read_entry("cardbus", 0, base, proc_read_cardbus, s);
+#endif
+ s->proc = base;
+}
+
+static void pcic_proc_remove(socket_info_t *s)
+{
+ struct proc_dir_entry *base = s->proc;
+ if (base == NULL) return;
+ remove_proc_entry("info", base);
+ remove_proc_entry("exca", base);
+#ifdef CONFIG_PCI
+ if (s->flags & (IS_PCI|IS_CARDBUS))
+ remove_proc_entry("pci", base);
+ if (s->flags & IS_CARDBUS)
+ remove_proc_entry("cardbus", base);
+#endif
+}
+
+#endif /* HAS_PROC_BUS */
+
+/*====================================================================*/
+
+typedef int (*subfn_t)(socket_info_t *, void *);
+
+static subfn_t pcic_service_table[] = {
+ (subfn_t)&pcic_register_callback,
+ (subfn_t)&pcic_inquire_socket,
+ (subfn_t)&i365_get_status,
+ (subfn_t)&i365_get_socket,
+ (subfn_t)&i365_set_socket,
+ (subfn_t)&i365_get_io_map,
+ (subfn_t)&i365_set_io_map,
+ (subfn_t)&i365_get_mem_map,
+ (subfn_t)&i365_set_mem_map,
+#ifdef CONFIG_CARDBUS
+ (subfn_t)&cb_get_bridge,
+ (subfn_t)&cb_set_bridge,
+#else
+ NULL, NULL,
+#endif
+#ifdef HAS_PROC_BUS
+ (subfn_t)&pcic_proc_setup
+#endif
+};
+
+#define NFUNC (sizeof(pcic_service_table)/sizeof(subfn_t))
+
+static int pcic_service(u_int sock, u_int cmd, void *arg)
+{
+ socket_info_t *s = &socket[sock];
+ subfn_t fn;
+ int ret;
+#ifdef CONFIG_ISA
+ u_long flags = 0;
+#endif
+
+ if (cmd >= NFUNC)
+ return -EINVAL;
+
+ if (s->flags & IS_ALIVE) {
+ if (cmd == SS_GetStatus)
+ *(u_int *)arg = 0;
+ return -EINVAL;
+ }
+
+ fn = pcic_service_table[cmd];
+#ifdef CONFIG_CARDBUS
+ if ((s->flags & IS_CARDBUS) &&
+ (cb_readl(s, CB_SOCKET_STATE) & CB_SS_32BIT)) {
+ if (cmd == SS_GetStatus)
+ fn = (subfn_t)&cb_get_status;
+ else if (cmd == SS_GetSocket)
+ fn = (subfn_t)&cb_get_socket;
+ else if (cmd == SS_SetSocket)
+ fn = (subfn_t)&cb_set_socket;
+ }
+#endif
+
+ ISA_LOCK(s, flags);
+ ret = (fn == NULL) ? -EINVAL : fn(s, arg);
+ ISA_UNLOCK(s, flags);
+ return ret;
+} /* pcic_service */
+
+/*====================================================================*/
+
+int __init init_i82365(void)
+{
+ servinfo_t serv;
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "i82365: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ DEBUG(0, "%s\n", version);
+
+#ifdef CONFIG_PCI
+ if (pcic[IS_UNK_CARDBUS].flags != (IS_CARDBUS|IS_UNKNOWN)) {
+ printk(KERN_NOTICE "i82365: bad pcic_id enumeration!\n");
+ return -EINVAL;
+ }
+#endif
+
+ printk(KERN_INFO "Intel ISA/PCI/CardBus PCIC probe:\n");
+ sockets = 0;
+
+#ifdef CONFIG_PCI
+ if (do_pci_probe && pcibios_present()) {
+ pci_probe(PCI_CLASS_BRIDGE_CARDBUS);
+ pci_probe(PCI_CLASS_BRIDGE_PCMCIA);
+ }
+#endif
+
+#ifdef CONFIG_ISA
+ isa_probe(i365_base);
+ if (!sockets || extra_sockets)
+ isa_probe(i365_base+2);
+#endif
+
+ if (sockets == 0) {
+ printk(KERN_INFO " no bridges found.\n");
+ return -ENODEV;
+ }
+
+ /* Set up interrupt handler(s) */
+#ifdef CONFIG_ISA
+ if (grab_irq != 0)
+ request_irq(cs_irq, pcic_interrupt, 0, "i82365", socket);
+#endif
+#ifdef CONFIG_PCI
+ if (pci_csc) {
+ u_int i, irq, mask = 0;
+ for (i = 0; i < sockets; i++) {
+ irq = socket[i].cap.pci_irq;
+ if (irq && !(mask & (1<<irq)))
+ request_irq(irq, pcic_interrupt, SA_SHIRQ, "i82365", socket);
+ mask |= (1<<irq);
+ }
+ }
+#endif
+
+ if (register_ss_entry(sockets, &pcic_service) != 0)
+ printk(KERN_NOTICE "i82365: register_ss_entry() failed\n");
+
+ /* Finally, schedule a polling interrupt */
+ if (poll_interval != 0) {
+ poll_timer.expires = jiffies + poll_interval;
+ add_timer(&poll_timer);
+ }
+
+ return 0;
+
+} /* init_i82365 */
+
+static void __exit exit_i82365(void)
+{
+ int i;
+#ifdef HAS_PROC_BUS
+ for (i = 0; i < sockets; i++)
+ pcic_proc_remove(&socket[i]);
+#endif
+ unregister_ss_entry(&pcic_service);
+ if (poll_interval != 0)
+ del_timer(&poll_timer);
+#ifdef CONFIG_ISA
+ if (grab_irq != 0)
+ free_irq(cs_irq, socket);
+#endif
+#ifdef CONFIG_PCI
+ if (pci_csc) {
+ u_int irq, mask = 0;
+ for (i = 0; i < sockets; i++) {
+ irq = socket[i].cap.pci_irq;
+ if (irq && !(mask & (1<<irq)))
+ free_irq(irq, socket);
+ mask |= (1<<irq);
+ }
+ }
+#endif
+ for (i = 0; i < sockets; i++) {
+ socket_info_t *s = &socket[i];
+ /* Turn off all interrupt sources! */
+ i365_set(s, I365_CSCINT, 0);
+#ifdef CONFIG_PCI
+ if (s->flags & IS_CARDBUS)
+ cb_writel(s, CB_SOCKET_MASK, 0);
+ if (s->cb_virt) {
+ iounmap(s->cb_virt);
+ release_mem_region(s->cb_phys, 0x1000);
+ } else
+#endif
+ release_region(s->ioaddr, 2);
+ }
+} /* exit_i82365 */
+
+module_init(init_i82365);
+module_exit(exit_i82365);
diff --git a/linux/pcmcia-cs/modules/i82365.h b/linux/pcmcia-cs/modules/i82365.h
new file mode 100644
index 0000000..27ee583
--- /dev/null
+++ b/linux/pcmcia-cs/modules/i82365.h
@@ -0,0 +1,154 @@
+/*
+ * i82365.h 1.21 2001/08/24 12:15:33
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_I82365_H
+#define _LINUX_I82365_H
+
+/* register definitions for the Intel 82365SL PCMCIA controller */
+
+/* Offsets for PCIC registers */
+#define I365_IDENT 0x00 /* Identification and revision */
+#define I365_STATUS 0x01 /* Interface status */
+#define I365_POWER 0x02 /* Power and RESETDRV control */
+#define I365_INTCTL 0x03 /* Interrupt and general control */
+#define I365_CSC 0x04 /* Card status change */
+#define I365_CSCINT 0x05 /* Card status change interrupt control */
+#define I365_ADDRWIN 0x06 /* Address window enable */
+#define I365_IOCTL 0x07 /* I/O control */
+#define I365_GENCTL 0x16 /* Card detect and general control */
+#define I365_GBLCTL 0x1E /* Global control register */
+
+/* Offsets for I/O and memory window registers */
+#define I365_IO(map) (0x08+((map)<<2))
+#define I365_MEM(map) (0x10+((map)<<3))
+#define I365_W_START 0
+#define I365_W_STOP 2
+#define I365_W_OFF 4
+
+/* Flags for I365_STATUS */
+#define I365_CS_BVD1 0x01
+#define I365_CS_STSCHG 0x01
+#define I365_CS_BVD2 0x02
+#define I365_CS_SPKR 0x02
+#define I365_CS_DETECT 0x0C
+#define I365_CS_WRPROT 0x10
+#define I365_CS_READY 0x20 /* Inverted */
+#define I365_CS_POWERON 0x40
+#define I365_CS_GPI 0x80
+
+/* Flags for I365_POWER */
+#define I365_PWR_OFF 0x00 /* Turn off the socket */
+#define I365_PWR_OUT 0x80 /* Output enable */
+#define I365_PWR_NORESET 0x40 /* Disable RESETDRV on resume */
+#define I365_PWR_AUTO 0x20 /* Auto pwr switch enable */
+#define I365_VCC_MASK 0x18 /* Mask for turning off Vcc */
+/* There are different layouts for B-step and DF-step chips: the B
+ step has independent Vpp1/Vpp2 control, and the DF step has only
+ Vpp1 control, plus 3V control */
+#define I365_VCC_5V 0x10 /* Vcc = 5.0v */
+#define I365_VCC_3V 0x18 /* Vcc = 3.3v */
+#define I365_VPP2_MASK 0x0c /* Mask for turning off Vpp2 */
+#define I365_VPP2_5V 0x04 /* Vpp2 = 5.0v */
+#define I365_VPP2_12V 0x08 /* Vpp2 = 12.0v */
+#define I365_VPP1_MASK 0x03 /* Mask for turning off Vpp1 */
+#define I365_VPP1_5V 0x01 /* Vpp2 = 5.0v */
+#define I365_VPP1_12V 0x02 /* Vpp2 = 12.0v */
+
+/* Flags for I365_INTCTL */
+#define I365_RING_ENA 0x80
+#define I365_PC_RESET 0x40
+#define I365_PC_IOCARD 0x20
+#define I365_INTR_ENA 0x10
+#define I365_IRQ_MASK 0x0F
+
+/* Flags for I365_CSC and I365_CSCINT*/
+#define I365_CSC_BVD1 0x01
+#define I365_CSC_STSCHG 0x01
+#define I365_CSC_BVD2 0x02
+#define I365_CSC_READY 0x04
+#define I365_CSC_DETECT 0x08
+#define I365_CSC_ANY 0x0F
+#define I365_CSC_GPI 0x10
+
+/* Flags for I365_ADDRWIN */
+#define I365_ADDR_MEMCS16 0x20
+#define I365_ENA_IO(map) (0x40 << (map))
+#define I365_ENA_MEM(map) (0x01 << (map))
+
+/* Flags for I365_IOCTL */
+#define I365_IOCTL_MASK(map) (0x0F << (map<<2))
+#define I365_IOCTL_WAIT(map) (0x08 << (map<<2))
+#define I365_IOCTL_0WS(map) (0x04 << (map<<2))
+#define I365_IOCTL_IOCS16(map) (0x02 << (map<<2))
+#define I365_IOCTL_16BIT(map) (0x01 << (map<<2))
+
+/* Flags for I365_GENCTL */
+#define I365_CTL_16DELAY 0x01
+#define I365_CTL_RESET 0x02
+#define I365_CTL_GPI_ENA 0x04
+#define I365_CTL_GPI_CTL 0x08
+#define I365_CTL_RESUME 0x10
+#define I365_CTL_SW_IRQ 0x20
+
+/* Flags for I365_GBLCTL */
+#define I365_GBL_PWRDOWN 0x01
+#define I365_GBL_CSC_LEV 0x02
+#define I365_GBL_WRBACK 0x04
+#define I365_GBL_IRQ_0_LEV 0x08
+#define I365_GBL_IRQ_1_LEV 0x10
+
+/* Flags for memory window registers */
+#define I365_MEM_16BIT 0x8000 /* In memory start high byte */
+#define I365_MEM_0WS 0x4000
+#define I365_MEM_WS1 0x8000 /* In memory stop high byte */
+#define I365_MEM_WS0 0x4000
+#define I365_MEM_WRPROT 0x8000 /* In offset high byte */
+#define I365_MEM_REG 0x4000
+
+#define I365_REG(slot, reg) (((slot) << 6) | (reg))
+
+/* Default ISA interrupt mask */
+#define I365_ISA_IRQ_MASK 0xdeb8 /* irq's 3-5,7,9-12,14,15 */
+
+/* Device ID's for PCI-to-PCMCIA bridges */
+
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82092AA_0
+#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
+#endif
+#ifndef PCI_VENDOR_ID_OMEGA
+#define PCI_VENDOR_ID_OMEGA 0x119b
+#endif
+#ifndef PCI_DEVICE_ID_OMEGA_82C092G
+#define PCI_DEVICE_ID_OMEGA_82C092G 0x1221
+#endif
+
+#endif /* _LINUX_I82365_H */
diff --git a/linux/pcmcia-cs/modules/o2micro.h b/linux/pcmcia-cs/modules/o2micro.h
new file mode 100644
index 0000000..fd15234
--- /dev/null
+++ b/linux/pcmcia-cs/modules/o2micro.h
@@ -0,0 +1,160 @@
+/*
+ * o2micro.h 1.20 2002/03/03 14:16:57
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_O2MICRO_H
+#define _LINUX_O2MICRO_H
+
+#ifndef PCI_VENDOR_ID_O2
+#define PCI_VENDOR_ID_O2 0x1217
+#endif
+#ifndef PCI_DEVICE_ID_O2_6729
+#define PCI_DEVICE_ID_O2_6729 0x6729
+#endif
+#ifndef PCI_DEVICE_ID_O2_6730
+#define PCI_DEVICE_ID_O2_6730 0x673a
+#endif
+#ifndef PCI_DEVICE_ID_O2_6832
+#define PCI_DEVICE_ID_O2_6832 0x6832
+#endif
+#ifndef PCI_DEVICE_ID_O2_6836
+#define PCI_DEVICE_ID_O2_6836 0x6836
+#endif
+#ifndef PCI_DEVICE_ID_O2_6812
+#define PCI_DEVICE_ID_O2_6812 0x6872
+#endif
+#ifndef PCI_DEVICE_ID_O2_6922
+#define PCI_DEVICE_ID_O2_6922 0x6825
+#endif
+#ifndef PCI_DEVICE_ID_O2_6933
+#define PCI_DEVICE_ID_O2_6933 0x6933
+#endif
+#ifndef PCI_DEVICE_ID_O2_6912
+#define PCI_DEVICE_ID_O2_6912 0x6972
+#endif
+
+/* Additional PCI configuration registers */
+
+#define O2_MUX_CONTROL 0x90 /* 32 bit */
+#define O2_MUX_RING_OUT 0x0000000f
+#define O2_MUX_SKTB_ACTV 0x000000f0
+#define O2_MUX_SCTA_ACTV_ENA 0x00000100
+#define O2_MUX_SCTB_ACTV_ENA 0x00000200
+#define O2_MUX_SER_IRQ_ROUTE 0x0000e000
+#define O2_MUX_SER_PCI 0x00010000
+
+#define O2_MUX_SKTA_TURBO 0x000c0000 /* for 6833, 6860 */
+#define O2_MUX_SKTB_TURBO 0x00300000
+#define O2_MUX_AUX_VCC_3V 0x00400000
+#define O2_MUX_PCI_VCC_5V 0x00800000
+#define O2_MUX_PME_MUX 0x0f000000
+
+/* Additional ExCA registers */
+
+#define O2_MODE_A 0x38
+#define O2_MODE_A_2 0x26 /* for 6833B, 6860C */
+#define O2_MODE_A_CD_PULSE 0x04
+#define O2_MODE_A_SUSP_EDGE 0x08
+#define O2_MODE_A_HOST_SUSP 0x10
+#define O2_MODE_A_PWR_MASK 0x60
+#define O2_MODE_A_QUIET 0x80
+
+#define O2_MODE_B 0x39
+#define O2_MODE_B_2 0x2e /* for 6833B, 6860C */
+#define O2_MODE_B_IDENT 0x03
+#define O2_MODE_B_ID_BSTEP 0x00
+#define O2_MODE_B_ID_CSTEP 0x01
+#define O2_MODE_B_ID_O2 0x02
+#define O2_MODE_B_VS1 0x04
+#define O2_MODE_B_VS2 0x08
+#define O2_MODE_B_IRQ15_RI 0x80
+
+#define O2_MODE_C 0x3a
+#define O2_MODE_C_DREQ_MASK 0x03
+#define O2_MODE_C_DREQ_INPACK 0x01
+#define O2_MODE_C_DREQ_WP 0x02
+#define O2_MODE_C_DREQ_BVD2 0x03
+#define O2_MODE_C_ZVIDEO 0x08
+#define O2_MODE_C_IREQ_SEL 0x30
+#define O2_MODE_C_MGMT_SEL 0xc0
+
+#define O2_MODE_D 0x3b
+#define O2_MODE_D_IRQ_MODE 0x03
+#define O2_MODE_D_IRQ_PCPCI 0x00
+#define O2_MODE_D_IRQ_PCIWAY 0x02
+#define O2_MODE_D_IRQ_PCI 0x03
+#define O2_MODE_D_PCI_CLKRUN 0x04
+#define O2_MODE_D_CB_CLKRUN 0x08
+#define O2_MODE_D_SKT_ACTV 0x20
+#define O2_MODE_D_PCI_FIFO 0x40 /* for OZ6729, OZ6730 */
+#define O2_MODE_D_W97_IRQ 0x40
+#define O2_MODE_D_ISA_IRQ 0x80
+
+#define O2_MHPG_DMA 0x3c
+#define O2_MHPG_CHANNEL 0x07
+#define O2_MHPG_CINT_ENA 0x08
+#define O2_MHPG_CSC_ENA 0x10
+
+#define O2_FIFO_ENA 0x3d
+#define O2_FIFO_ZVIDEO_3 0x08
+#define O2_FIFO_PCI_FIFO 0x10
+#define O2_FIFO_POSTWR 0x40
+#define O2_FIFO_BUFFER 0x80
+
+#define O2_MODE_E 0x3e
+#define O2_MODE_E_MHPG_DMA 0x01
+#define O2_MODE_E_SPKR_OUT 0x02
+#define O2_MODE_E_LED_OUT 0x08
+#define O2_MODE_E_SKTA_ACTV 0x10
+
+/* Data structure for tracking vendor-specific state */
+typedef struct o2micro_state_t {
+ u_char mode_a; /* O2_MODE_A */
+ u_char mode_b; /* O2_MODE_B */
+ u_char mode_c; /* O2_MODE_C */
+ u_char mode_d; /* O2_MODE_D */
+ u_char mhpg; /* O2_MHPG_DMA */
+ u_char fifo; /* O2_FIFO_ENA */
+ u_char mode_e; /* O2_MODE_E */
+} o2micro_state_t;
+
+#define O2MICRO_PCIC_ID \
+ IS_OZ6729, IS_OZ6730, IS_OZ6832, IS_OZ6836, IS_OZ6812, \
+ IS_OZ6922, IS_OZ6933, IS_OZ6912
+
+#define O2MICRO_PCIC_INFO \
+ { "O2Micro OZ6729", IS_O2MICRO|IS_PCI|IS_VG_PWR, ID(O2, 6729) }, \
+ { "O2Micro OZ6730", IS_O2MICRO|IS_PCI|IS_VG_PWR, ID(O2, 6730) }, \
+ { "O2Micro OZ6832/33", IS_O2MICRO|IS_CARDBUS, ID(O2, 6832) }, \
+ { "O2Micro OZ6836/60", IS_O2MICRO|IS_CARDBUS, ID(O2, 6836) }, \
+ { "O2Micro OZ6812", IS_O2MICRO|IS_CARDBUS, ID(O2, 6812) }, \
+ { "O2Micro OZ6922", IS_O2MICRO|IS_CARDBUS, ID(O2, 6922) }, \
+ { "O2Micro OZ6933", IS_O2MICRO|IS_CARDBUS, ID(O2, 6933) }, \
+ { "O2Micro OZ6912", IS_O2MICRO|IS_CARDBUS, ID(O2, 6912) }
+
+#endif /* _LINUX_O2MICRO_H */
diff --git a/linux/pcmcia-cs/modules/pci_fixup.c b/linux/pcmcia-cs/modules/pci_fixup.c
new file mode 100644
index 0000000..6cbcd03
--- /dev/null
+++ b/linux/pcmcia-cs/modules/pci_fixup.c
@@ -0,0 +1,677 @@
+/*======================================================================
+
+ Kernel fixups for PCI device support
+
+ pci_fixup.c 1.33 2002/10/12 19:02:59
+
+ PCI bus fixups: various bits of code that don't really belong in
+ the PCMCIA subsystem, but may or may not be available from the
+ kernel, depending on kernel version. The basic idea is to make
+ 2.0.* and 2.2.* kernels look like they have the 2.3.* features.
+
+======================================================================*/
+
+#define __NO_VERSION__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+/* We use these for setting up CardBus bridges */
+#include "yenta.h"
+#include "i82365.h"
+
+#define VERSION KERNEL_VERSION
+#if (LINUX_VERSION_CODE < VERSION(2,3,24))
+
+/* Default memory base addresses for CardBus controllers */
+static u_int cb_mem_base[] = { 0x0, 0x68000000, 0xf8000000 };
+MODULE_PARM(cb_mem_base, "i");
+
+/* PCI bus number overrides for CardBus controllers */
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+INT_MODULE_PARM(cb_bus_base, 0);
+INT_MODULE_PARM(cb_bus_step, 2);
+INT_MODULE_PARM(cb_pci_irq, 0);
+
+#endif
+
+/* (exported) mask of interrupts reserved for PCI devices */
+u32 pci_irq_mask = 0;
+
+/*======================================================================
+
+ Basic PCI services missing from older kernels: device lookup, etc
+
+======================================================================*/
+
+#if (LINUX_VERSION_CODE < VERSION(2,1,0))
+#ifndef MACH
+/* Already defined in drivers/pci/pci.c. */
+struct pci_dev *pci_devices = NULL;
+struct pci_bus pci_root = {
+ parent: NULL,
+ children: NULL,
+ next: NULL,
+ self: NULL,
+ devices: NULL,
+ number: 0
+};
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE < VERSION(2,1,93))
+
+struct pci_dev *pci_find_slot(u_int bus, u_int devfn)
+{
+ struct pci_dev *dev;
+ for (dev = pci_devices; dev; dev = dev->next)
+ if ((dev->devfn == devfn) && (bus == dev->bus->number))
+ return dev;
+#if (LINUX_VERSION_CODE > VERSION(2,1,0))
+ return NULL;
+#else
+ {
+ struct pci_bus *b;
+ u8 hdr;
+ u32 id, class;
+
+ if (pcibios_read_config_byte(bus, devfn & ~7, PCI_HEADER_TYPE, &hdr))
+ return NULL;
+ if (PCI_FUNC(devfn) && !(hdr & 0x80))
+ return NULL;
+ pcibios_read_config_dword(bus, devfn, PCI_VENDOR_ID, &id);
+ if ((id == 0) || (id == 0xffffffff))
+ return NULL;
+ dev = kmalloc(sizeof *dev, GFP_ATOMIC);
+ if (!dev)
+ return NULL;
+ memset(dev, 0, sizeof *dev);
+ dev->devfn = devfn;
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &dev->irq);
+ dev->vendor = id & 0xffff;
+ dev->device = id >> 16;
+ pcibios_read_config_dword(bus, devfn, PCI_CLASS_REVISION, &class);
+ if (dev->irq == 255)
+ dev->irq = 0;
+ dev->class = class >> 8;
+ for (b = &pci_root; b; b = b->next)
+ if (b->number == bus) break;
+ if (!b) {
+ b = kmalloc(sizeof *b, GFP_ATOMIC);
+ if (!b) {
+ kfree(dev);
+ return NULL;
+ }
+ memset(b, 0, sizeof *b);
+ b->number = bus;
+ b->next = pci_root.next;
+ pci_root.next = b;
+ }
+ dev->bus = b;
+ return dev;
+ }
+#endif
+}
+
+struct pci_dev *pci_find_class(u_int class, struct pci_dev *from)
+{
+ static u16 index = 0;
+ u8 bus, devfn;
+ if (from == NULL)
+ index = 0;
+ if (pcibios_find_class(class, index++, &bus, &devfn) == 0)
+ return pci_find_slot(bus, devfn);
+ else
+ return NULL;
+}
+
+#endif /* (LINUX_VERSION_CODE < VERSION(2,1,93)) */
+
+/*======================================================================
+
+ PCI Interrupt Routing Table parser
+
+ This only needs to be done once per boot: we scan the BIOS for
+ the routing table, and then look for devices that have interrupt
+ assignments that the kernel doesn't know about. If we find any,
+ we update their pci_dev structures and write the PCI interrupt
+ line registers.
+
+======================================================================*/
+
+#if (LINUX_VERSION_CODE < VERSION(2,3,24)) && defined(__i386__)
+
+#pragma pack(1)
+
+struct slot_entry {
+ u8 bus, devfn;
+ struct pirq_pin {
+ u8 link;
+ u16 irq_map;
+ } pin[4];
+ u8 slot;
+ u8 reserved;
+};
+
+struct routing_table {
+ u32 signature;
+ u8 minor, major;
+ u16 size;
+ u8 bus, devfn;
+ u16 pci_mask;
+ u32 compat;
+ u32 miniport;
+ u8 reserved[11];
+ u8 checksum;
+ struct slot_entry entry[0];
+};
+
+#pragma pack()
+
+/*
+ The meaning of the link bytes in the routing table is vendor
+ specific. We need code to get and set the routing information.
+*/
+
+static u8 pIIx_link(struct pci_dev *router, u8 link)
+{
+ u8 pirq;
+ /* link should be 0x60, 0x61, 0x62, 0x63 */
+ pci_read_config_byte(router, link, &pirq);
+ return (pirq < 16) ? pirq : 0;
+}
+
+static void pIIx_init(struct pci_dev *router, u8 link, u8 irq)
+{
+ pci_write_config_byte(router, link, irq);
+}
+
+static u8 via_link(struct pci_dev *router, u8 link)
+{
+ u8 pirq = 0;
+ /* link should be 1, 2, 3, 5 */
+ if (link < 6)
+ pci_read_config_byte(router, 0x55 + (link>>1), &pirq);
+ return (link & 1) ? (pirq >> 4) : (pirq & 15);
+}
+
+static void via_init(struct pci_dev *router, u8 link, u8 irq)
+{
+ u8 pirq;
+ pci_read_config_byte(router, 0x55 + (link>>1), &pirq);
+ pirq &= (link & 1) ? 0x0f : 0xf0;
+ pirq |= (link & 1) ? (irq << 4) : (irq & 15);
+ pci_write_config_byte(router, 0x55 + (link>>1), pirq);
+}
+
+static u8 opti_link(struct pci_dev *router, u8 link)
+{
+ u8 pirq = 0;
+ /* link should be 0x02, 0x12, 0x22, 0x32 */
+ if ((link & 0xcf) == 0x02)
+ pci_read_config_byte(router, 0xb8 + (link >> 5), &pirq);
+ return (link & 0x10) ? (pirq >> 4) : (pirq & 15);
+}
+
+static void opti_init(struct pci_dev *router, u8 link, u8 irq)
+{
+ u8 pirq;
+ pci_read_config_byte(router, 0xb8 + (link >> 5), &pirq);
+ pirq &= (link & 0x10) ? 0x0f : 0xf0;
+ pirq |= (link & 0x10) ? (irq << 4) : (irq & 15);
+ pci_write_config_byte(router, 0xb8 + (link >> 5), pirq);
+}
+
+static u8 ali_link(struct pci_dev *router, u8 link)
+{
+ /* No, you're not dreaming */
+ static const u8 map[] =
+ { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
+ u8 pirq;
+ /* link should be 0x01..0x08 */
+ pci_read_config_byte(router, 0x48 + ((link-1)>>1), &pirq);
+ return (link & 1) ? map[pirq&15] : map[pirq>>4];
+}
+
+static void ali_init(struct pci_dev *router, u8 link, u8 irq)
+{
+ /* Inverse of map in ali_link */
+ static const u8 map[] =
+ { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
+ u8 pirq;
+ pci_read_config_byte(router, 0x48 + ((link-1)>>1), &pirq);
+ pirq &= (link & 1) ? 0x0f : 0xf0;
+ pirq |= (link & 1) ? (map[irq] << 4) : (map[irq] & 15);
+ pci_write_config_byte(router, 0x48 + ((link-1)>>1), pirq);
+}
+
+static u8 cyrix_link(struct pci_dev *router, u8 link)
+{
+ u8 pirq;
+ /* link should be 1, 2, 3, 4 */
+ link--;
+ pci_read_config_byte(router, 0x5c + (link>>1), &pirq);
+ return ((link & 1) ? pirq >> 4 : pirq & 15);
+}
+
+static void cyrix_init(struct pci_dev *router, u8 link, u8 irq)
+{
+ u8 pirq;
+ link--;
+ pci_read_config_byte(router, 0x5c + (link>>1), &pirq);
+ pirq &= (link & 1) ? 0x0f : 0xf0;
+ pirq |= (link & 1) ? (irq << 4) : (irq & 15);
+ pci_write_config_byte(router, 0x5c + (link>>1), pirq);
+}
+
+/*
+ A table of all the PCI interrupt routers for which we know how to
+ interpret the link bytes.
+*/
+
+#ifndef PCI_DEVICE_ID_INTEL_82371FB_0
+#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82371SB_0
+#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82371AB_0
+#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82443MX_1
+#define PCI_DEVICE_ID_INTEL_82443MX_1 0x7198
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82443MX_1
+#define PCI_DEVICE_ID_INTEL_82443MX_1 0x7198
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82801AA_0
+#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82801AB_0
+#define PCI_DEVICE_ID_INTEL_82801AB_0 0x2420
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82801BA_0
+#define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82801BAM_0
+#define PCI_DEVICE_ID_INTEL_82801BAM_0 0x244c
+#endif
+#ifndef PCI_DEVICE_ID_VIA_82C586_0
+#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
+#endif
+#ifndef PCI_DEVICE_ID_VIA_82C596
+#define PCI_DEVICE_ID_VIA_82C596 0x0596
+#endif
+#ifndef PCI_DEVICE_ID_VIA_82C686
+#define PCI_DEVICE_ID_VIA_82C686 0x0686
+#endif
+#ifndef PCI_DEVICE_ID_SI
+#define PCI_DEVICE_ID_SI 0x1039
+#endif
+#ifndef PCI_DEVICE_ID_SI_503
+#define PCI_DEVICE_ID_SI_503 0x0008
+#endif
+#ifndef PCI_DEVICE_ID_SI_496
+#define PCI_DEVICE_ID_SI_496 0x0496
+#endif
+
+#define ID(a,b) PCI_VENDOR_ID_##a,PCI_DEVICE_ID_##a##_##b
+
+struct router {
+ u16 vendor, device;
+ u8 (*xlate)(struct pci_dev *, u8);
+ void (*init)(struct pci_dev *, u8, u8);
+} router_table[] = {
+ { ID(INTEL, 82371FB_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82371SB_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82371AB_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82443MX_1), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82801AA_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82801AB_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82801BA_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82801BAM_0), &pIIx_link, &pIIx_init },
+ { ID(VIA, 82C586_0), &via_link, &via_init },
+ { ID(VIA, 82C596), &via_link, &via_init },
+ { ID(VIA, 82C686), &via_link, &via_init },
+ { ID(OPTI, 82C700), &opti_link, &opti_init },
+ { ID(AL, M1533), &ali_link, &ali_init },
+ { ID(SI, 503), &pIIx_link, &pIIx_init },
+ { ID(SI, 496), &pIIx_link, &pIIx_init },
+ { ID(CYRIX, 5530_LEGACY), &cyrix_link, &cyrix_init }
+};
+#define ROUTER_COUNT (sizeof(router_table)/sizeof(router_table[0]))
+
+/* Global variables for current interrupt routing table */
+static struct routing_table *pirq = NULL;
+static struct pci_dev *router_dev = NULL;
+static struct router *router_info = NULL;
+
+#ifndef __va
+#define __va(x) (x)
+#endif
+
+static void scan_pirq_table(void)
+{
+ struct routing_table *r;
+ struct pci_dev *router, *dev;
+ u8 pin, fn, *p;
+ int i, j;
+ struct slot_entry *e;
+
+ /* Scan the BIOS for the routing table signature */
+ for (p = (u8 *)__va(0xf0000); p < (u8 *)__va(0xfffff); p += 16)
+ if ((p[0] == '$') && (p[1] == 'P') &&
+ (p[2] == 'I') && (p[3] == 'R')) break;
+ if (p >= (u8 *)__va(0xfffff))
+ return;
+
+ pirq = r = (struct routing_table *)p;
+ printk(KERN_INFO "PCI routing table version %d.%d at %#06x\n",
+ r->major, r->minor, (u32)r & 0xfffff);
+ for (i = j = 0; i < 16; i++)
+ j += (r->pci_mask >> i) & 1;
+ if (j > 4)
+ printk(KERN_NOTICE " bogus PCI irq mask %#04x!\n",
+ r->pci_mask);
+ else
+ pci_irq_mask |= r->pci_mask;
+
+ router_dev = router = pci_find_slot(r->bus, r->devfn);
+ if (router) {
+ for (i = 0; i < ROUTER_COUNT; i++) {
+ if ((router->vendor == router_table[i].vendor) &&
+ (router->device == router_table[i].device))
+ break;
+ if (((r->compat & 0xffff) == router_table[i].vendor) &&
+ ((r->compat >> 16) == router_table[i].device))
+ break;
+ }
+ if (i == ROUTER_COUNT)
+ printk(KERN_INFO " unknown PCI interrupt router %04x:%04x\n",
+ router->vendor, router->device);
+ else
+ router_info = &router_table[i];
+ }
+
+ for (e = r->entry; (u8 *)e < p+r->size; e++) {
+ for (fn = 0; fn < 8; fn++) {
+ dev = pci_find_slot(e->bus, e->devfn | fn);
+ if ((dev == NULL) || (dev->irq != 0)) continue;
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if ((pin == 0) || (pin == 255)) continue;
+ if (router_info) {
+ dev->irq = router_info->xlate(router, e->pin[pin-1].link);
+ } else {
+ /* Fallback: see if only one irq possible */
+ int map = e->pin[pin-1].irq_map;
+ if (map && (!(map & (map-1))))
+ dev->irq = ffs(map)-1;
+ }
+ if (dev->irq) {
+ printk(KERN_INFO " %02x:%02x.%1x -> irq %d\n",
+ e->bus, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), dev->irq);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
+ dev->irq);
+ }
+ }
+ }
+}
+
+#endif /* (LINUX_VERSION_CODE < VERSION(2,3,24)) && defined(__i386__) */
+
+/*======================================================================
+
+ PCI device enabler
+
+ This is not at all generic... it is mostly a hack to correctly
+ configure CardBus bridges.
+
+======================================================================*/
+
+#if (LINUX_VERSION_CODE < VERSION(2,3,24))
+
+static int check_cb_mapping(u_int phys)
+{
+ /* A few sanity checks to validate the bridge mapping */
+ char *virt = ioremap(phys, 0x1000);
+ int ret = ((readb(virt+0x800+I365_IDENT) & 0x70) ||
+ (readb(virt+0x800+I365_CSC) &&
+ readb(virt+0x800+I365_CSC) &&
+ readb(virt+0x800+I365_CSC)));
+ int state = readl(virt+CB_SOCKET_STATE) >> 16;
+ ret |= (state & ~0x3000) || !(state & 0x3000);
+ ret |= readl(virt+CB_SOCKET_FORCE);
+ iounmap(virt);
+ return ret;
+}
+
+static void setup_cb_bridge(struct pci_dev *dev)
+{
+ u8 bus, sub;
+ u32 phys;
+ int i;
+
+ /* This is nasty, but where else can we put it? */
+ if (PCI_FUNC(dev->devfn) == 0) {
+ struct pci_dev *sib;
+ sib = pci_find_slot(dev->bus->number, dev->devfn+1);
+ if (sib) {
+ u8 a, b;
+ u32 c, d;
+ /* Check for bad PCI bus numbering */
+ pci_read_config_byte(dev, CB_CARDBUS_BUS, &a);
+ pci_read_config_byte(sib, CB_CARDBUS_BUS, &b);
+ if (a == b) {
+ pci_write_config_byte(dev, CB_CARDBUS_BUS, 0);
+ pci_write_config_byte(sib, CB_CARDBUS_BUS, 0);
+ }
+ /* check for bad register mapping */
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &c);
+ pci_read_config_dword(sib, PCI_BASE_ADDRESS_0, &d);
+ if ((c != 0) && (c == d)) {
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0);
+ pci_write_config_dword(sib, PCI_BASE_ADDRESS_0, 0);
+ }
+ }
+ }
+
+ /* Assign PCI bus numbers, if needed */
+ pci_read_config_byte(dev, CB_CARDBUS_BUS, &bus);
+ pci_read_config_byte(dev, CB_SUBORD_BUS, &sub);
+ if ((cb_bus_base > 0) || (bus == 0)) {
+ if (cb_bus_base <= 0) cb_bus_base = 0x20;
+ bus = cb_bus_base;
+ sub = cb_bus_base+cb_bus_step;
+ cb_bus_base += cb_bus_step+1;
+ pci_write_config_byte(dev, CB_CARDBUS_BUS, bus);
+ pci_write_config_byte(dev, CB_SUBORD_BUS, sub);
+ }
+
+ /* Create pci_bus structure for the CardBus, if needed */
+ {
+ struct pci_bus *child, *parent = dev->bus;
+ for (child = parent->children; child; child = child->next)
+ if (child->number == bus) break;
+ if (!child) {
+ child = kmalloc(sizeof(struct pci_bus), GFP_KERNEL);
+ memset(child, 0, sizeof(struct pci_bus));
+ child->self = dev;
+ child->primary = bus;
+ child->number = child->secondary = bus;
+ child->subordinate = sub;
+ child->parent = parent;
+#if (LINUX_VERSION_CODE >= VERSION(2,3,15))
+ child->ops = parent->ops;
+#endif
+ child->next = parent->children;
+ parent->children = child;
+ }
+ }
+
+ /* Map the CardBus bridge registers, if needed */
+ pci_write_config_dword(dev, CB_LEGACY_MODE_BASE, 0);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &phys);
+ if ((phys == 0) || (cb_mem_base[0] != 0)) {
+ /* Make sure the bridge is awake so we can test it */
+ pci_set_power_state(dev, 0);
+ for (i = 0; i < sizeof(cb_mem_base)/sizeof(u_int); i++) {
+ phys = cb_mem_base[i];
+ if (phys == 0) continue;
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, phys);
+ if ((i == 0) || (check_cb_mapping(phys) == 0)) break;
+ }
+ if (i == sizeof(cb_mem_base)/sizeof(u_int)) {
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0);
+ } else {
+ cb_mem_base[0] = cb_mem_base[i] + 0x1000;
+ }
+ }
+}
+
+#ifdef __i386__
+
+static u8 pirq_init(struct pci_dev *router, struct pirq_pin *pin)
+{
+ u16 map = pin->irq_map;
+ u8 irq = 0;
+ if (pirq->pci_mask)
+ map &= pirq->pci_mask;
+ if (cb_pci_irq)
+ map = 1<<cb_pci_irq;
+ /* Be conservative: only init irq if the mask is unambiguous */
+ if (map && (!(map & (map-1)))) {
+ irq = ffs(map)-1;
+ router_info->init(router, pin->link, irq);
+ pci_irq_mask |= (1<<irq);
+ }
+ return irq;
+}
+
+static void setup_cb_bridge_irq(struct pci_dev *dev)
+{
+ struct slot_entry *e;
+ u8 pin;
+ u32 phys;
+ char *virt;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &phys);
+ if (!pin || !phys)
+ return;
+ virt = ioremap(phys, 0x1000);
+ if (virt) {
+ /* Disable any pending interrupt sources */
+ writel(0, virt+CB_SOCKET_MASK);
+ writel(-1, virt+CB_SOCKET_EVENT);
+ iounmap(virt);
+ }
+ for (e = pirq->entry; (u8 *)e < (u8 *)pirq + pirq->size; e++) {
+ if ((e->bus != dev->bus->number) ||
+ (e->devfn != (dev->devfn & ~7)))
+ continue;
+ dev->irq = pirq_init(router_dev, &e->pin[pin-1]);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ break;
+ }
+}
+
+#endif
+
+int pci_enable_device(struct pci_dev *dev)
+{
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) {
+ setup_cb_bridge(dev);
+ }
+#ifdef __i386__
+ /* In certain cases, if the interrupt can be deduced, but was
+ unrouted when the pirq table was scanned, we'll try to set it
+ up now. */
+ if (!dev->irq && pirq && (router_info) &&
+ ((dev->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS)) {
+ setup_cb_bridge_irq(dev);
+ }
+#endif
+ return 0;
+}
+
+int pci_set_power_state(struct pci_dev *dev, int state)
+{
+ u16 tmp, cmd;
+ u32 base, bus;
+ u8 a, b, pmcs;
+ pci_read_config_byte(dev, PCI_STATUS, &a);
+ if (a & PCI_STATUS_CAPLIST) {
+ pci_read_config_byte(dev, PCI_CB_CAPABILITY_POINTER, &b);
+ while (b != 0) {
+ pci_read_config_byte(dev, b+PCI_CAPABILITY_ID, &a);
+ if (a == PCI_CAPABILITY_PM) {
+ pmcs = b + PCI_PM_CONTROL_STATUS;
+ /* Make sure we're in D0 state */
+ pci_read_config_word(dev, pmcs, &tmp);
+ if (!(tmp & PCI_PMCS_PWR_STATE_MASK)) break;
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &base);
+ pci_read_config_dword(dev, CB_PRIMARY_BUS, &bus);
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_write_config_word(dev, pmcs, PCI_PMCS_PWR_STATE_D0);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, base);
+ pci_write_config_dword(dev, CB_PRIMARY_BUS, bus);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ break;
+ }
+ pci_read_config_byte(dev, b+PCI_NEXT_CAPABILITY, &b);
+ }
+ }
+ return 0;
+}
+
+#endif /* (LINUX_VERSION_CODE < VERSION(2,3,24)) */
+
+/*======================================================================
+
+ General setup and cleanup entry points
+
+======================================================================*/
+
+void pci_fixup_init(void)
+{
+ struct pci_dev *p;
+
+#if (LINUX_VERSION_CODE < VERSION(2,3,24)) && defined(__i386__)
+ scan_pirq_table();
+ pci_for_each_dev(p)
+ if (((p->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) &&
+ (p->irq == 0)) break;
+ if (p && !pirq)
+ printk(KERN_INFO "No PCI interrupt routing table!\n");
+ if (!pirq && cb_pci_irq)
+ printk(KERN_INFO "cb_pci_irq will be ignored.\n");
+#endif
+
+ pci_for_each_dev(p)
+ pci_irq_mask |= (1<<p->irq);
+
+#ifdef __alpha__
+#define PIC 0x4d0
+ pci_irq_mask |= inb(PIC) | (inb(PIC+1) << 8);
+#endif
+}
+
+void pci_fixup_done(void)
+{
+#if (LINUX_VERSION_CODE < VERSION(2,1,0))
+ struct pci_dev *d, *dn;
+ struct pci_bus *b, *bn;
+ for (d = pci_devices; d; d = dn) {
+ dn = d->next;
+ kfree(d);
+ }
+ for (b = pci_root.next; b; b = bn) {
+ bn = b->next;
+ kfree(b);
+ }
+#endif
+}
diff --git a/linux/pcmcia-cs/modules/ricoh.h b/linux/pcmcia-cs/modules/ricoh.h
new file mode 100644
index 0000000..de62f8b
--- /dev/null
+++ b/linux/pcmcia-cs/modules/ricoh.h
@@ -0,0 +1,161 @@
+/*
+ * ricoh.h 1.16 2002/08/13 15:17:14
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_RICOH_H
+#define _LINUX_RICOH_H
+
+#define RF5C_MODE_CTL 0x1f /* Mode control */
+#define RF5C_PWR_CTL 0x2f /* Mixed voltage control */
+#define RF5C_CHIP_ID 0x3a /* Chip identification */
+#define RF5C_MODE_CTL_3 0x3b /* Mode control 3 */
+
+/* I/O window address offset */
+#define RF5C_IO_OFF(w) (0x36+((w)<<1))
+
+/* Flags for RF5C_MODE_CTL */
+#define RF5C_MODE_ATA 0x01 /* ATA mode */
+#define RF5C_MODE_LED_ENA 0x02 /* IRQ 12 is LED */
+#define RF5C_MODE_CA21 0x04
+#define RF5C_MODE_CA22 0x08
+#define RF5C_MODE_CA23 0x10
+#define RF5C_MODE_CA24 0x20
+#define RF5C_MODE_CA25 0x40
+#define RF5C_MODE_3STATE_BIT7 0x80
+
+/* Flags for RF5C_PWR_CTL */
+#define RF5C_PWR_VCC_3V 0x01
+#define RF5C_PWR_IREQ_HIGH 0x02
+#define RF5C_PWR_INPACK_ENA 0x04
+#define RF5C_PWR_5V_DET 0x08
+#define RF5C_PWR_TC_SEL 0x10 /* Terminal Count: irq 11 or 15 */
+#define RF5C_PWR_DREQ_LOW 0x20
+#define RF5C_PWR_DREQ_OFF 0x00 /* DREQ steering control */
+#define RF5C_PWR_DREQ_INPACK 0x40
+#define RF5C_PWR_DREQ_SPKR 0x80
+#define RF5C_PWR_DREQ_IOIS16 0xc0
+
+/* Values for RF5C_CHIP_ID */
+#define RF5C_CHIP_RF5C296 0x32
+#define RF5C_CHIP_RF5C396 0xb2
+
+/* Flags for RF5C_MODE_CTL_3 */
+#define RF5C_MCTL3_DISABLE 0x01 /* Disable PCMCIA interface */
+#define RF5C_MCTL3_DMA_ENA 0x02
+
+/* Register definitions for Ricoh PCI-to-CardBus bridges */
+
+#ifndef PCI_VENDOR_ID_RICOH
+#define PCI_VENDOR_ID_RICOH 0x1180
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C465
+#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C466
+#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C475
+#define PCI_DEVICE_ID_RICOH_RL5C475 0x0475
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C476
+#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C477
+#define PCI_DEVICE_ID_RICOH_RL5C477 0x0477
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C478
+#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
+#endif
+
+/* Extra bits in CB_BRIDGE_CONTROL */
+#define RL5C46X_BCR_3E0_ENA 0x0800
+#define RL5C46X_BCR_3E2_ENA 0x1000
+
+/* Bridge Configuration Register */
+#define RL5C4XX_CONFIG 0x80 /* 16 bit */
+#define RL5C4XX_CONFIG_IO_1_MODE 0x0200
+#define RL5C4XX_CONFIG_IO_0_MODE 0x0100
+#define RL5C4XX_CONFIG_PREFETCH 0x0001
+
+/* Misc Control Register */
+#define RL5C4XX_MISC 0x82 /* 16 bit */
+#define RL5C4XX_MISC_HW_SUSPEND_ENA 0x0002
+#define RL5C4XX_MISC_VCCEN_POL 0x0100
+#define RL5C4XX_MISC_VPPEN_POL 0x0200
+#define RL5C46X_MISC_SUSPEND 0x0001
+#define RL5C46X_MISC_PWR_SAVE_2 0x0004
+#define RL5C46X_MISC_IFACE_BUSY 0x0008
+#define RL5C46X_MISC_B_LOCK 0x0010
+#define RL5C46X_MISC_A_LOCK 0x0020
+#define RL5C46X_MISC_PCI_LOCK 0x0040
+#define RL5C47X_MISC_IFACE_BUSY 0x0004
+#define RL5C47X_MISC_PCI_INT_MASK 0x0018
+#define RL5C47X_MISC_PCI_INT_DIS 0x0020
+#define RL5C47X_MISC_SUBSYS_WR 0x0040
+#define RL5C47X_MISC_SRIRQ_ENA 0x0080
+#define RL5C47X_MISC_5V_DISABLE 0x0400
+#define RL5C47X_MISC_LED_POL 0x0800
+
+/* 16-bit Interface Control Register */
+#define RL5C4XX_16BIT_CTL 0x84 /* 16 bit */
+#define RL5C4XX_16CTL_IO_TIMING 0x0100
+#define RL5C4XX_16CTL_MEM_TIMING 0x0200
+#define RL5C46X_16CTL_LEVEL_1 0x0010
+#define RL5C46X_16CTL_LEVEL_2 0x0020
+
+/* 16-bit IO and memory timing registers */
+#define RL5C4XX_16BIT_IO_0 0x88 /* 16 bit */
+#define RL5C4XX_16BIT_MEM_0 0x8a /* 16 bit */
+#define RL5C4XX_SETUP_MASK 0x0007
+#define RL5C4XX_SETUP_SHIFT 0
+#define RL5C4XX_CMD_MASK 0x01f0
+#define RL5C4XX_CMD_SHIFT 4
+#define RL5C4XX_HOLD_MASK 0x1c00
+#define RL5C4XX_HOLD_SHIFT 10
+
+/* Data structure for tracking vendor-specific state */
+typedef struct ricoh_state_t {
+ u_short config; /* RL5C4XX_CONFIG */
+ u_short misc; /* RL5C4XX_MISC */
+ u_short ctl; /* RL5C4XX_16BIT_CTL */
+ u_short io; /* RL5C4XX_16BIT_IO_0 */
+ u_short mem; /* RL5C4XX_16BIT_MEM_0 */
+} ricoh_state_t;
+
+#define RICOH_PCIC_ID \
+ IS_RL5C465, IS_RL5C466, IS_RL5C475, IS_RL5C476, IS_RL5C477, IS_RL5C478
+
+#define RICOH_PCIC_INFO \
+ { "Ricoh RL5C465", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C465) }, \
+ { "Ricoh RL5C466", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C466) }, \
+ { "Ricoh RL5C475", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C475) }, \
+ { "Ricoh RL5C476", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C476) }, \
+ { "Ricoh RL5C477", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C477) }, \
+ { "Ricoh RL5C478", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C478) }
+
+#endif /* _LINUX_RICOH_H */
diff --git a/linux/pcmcia-cs/modules/rsrc_mgr.c b/linux/pcmcia-cs/modules/rsrc_mgr.c
new file mode 100644
index 0000000..a94926a
--- /dev/null
+++ b/linux/pcmcia-cs/modules/rsrc_mgr.c
@@ -0,0 +1,877 @@
+/*======================================================================
+
+ Resource management routines
+
+ rsrc_mgr.c 1.94 2003/12/12 17:12:53
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#define __NO_VERSION__
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/bulkmem.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+INT_MODULE_PARM(probe_mem, 1); /* memory probe? */
+#ifdef CONFIG_ISA
+INT_MODULE_PARM(probe_io, 1); /* IO port probe? */
+INT_MODULE_PARM(mem_limit, 0x10000);
+#endif
+
+/*======================================================================
+
+ The resource_map_t structures are used to track what resources are
+ available for allocation for PC Card devices.
+
+======================================================================*/
+
+typedef struct resource_map_t {
+ u_long base, num;
+ struct resource_map_t *next;
+} resource_map_t;
+
+/* Memory resource database */
+static resource_map_t mem_db = { 0, 0, &mem_db };
+
+/* IO port resource database */
+static resource_map_t io_db = { 0, 0, &io_db };
+
+#ifdef CONFIG_ISA
+
+typedef struct irq_info_t {
+ u_int Attributes;
+ int time_share, dyn_share;
+ struct socket_info_t *Socket;
+} irq_info_t;
+
+/* Table of ISA IRQ assignments */
+static irq_info_t irq_table[16] = { { 0, 0, 0 }, /* etc */ };
+
+#endif
+
+/*======================================================================
+
+ Linux resource management extensions
+
+======================================================================*/
+
+#ifndef CONFIG_PNP_BIOS
+#define check_io_region(b,n) (0)
+#endif
+
+#if defined(CONFIG_PNP_BIOS) || !defined(HAVE_MEMRESERVE)
+
+#ifdef __SMP__
+static spinlock_t rsrc_lock = SPIN_LOCK_UNLOCKED;
+#endif
+
+typedef struct resource_entry_t {
+ u_long base, num;
+ char *name;
+ struct resource_entry_t *next;
+} resource_entry_t;
+
+/* Ordered linked lists of allocated IO and memory blocks */
+#ifdef CONFIG_PNP_BIOS
+static resource_entry_t io_list = { 0, 0, NULL, NULL };
+#endif
+#ifndef HAVE_MEMRESERVE
+static resource_entry_t mem_list = { 0, 0, NULL, NULL };
+#endif
+
+static resource_entry_t *find_gap(resource_entry_t *root,
+ resource_entry_t *entry)
+{
+ resource_entry_t *p;
+
+ if (entry->base > entry->base+entry->num-1)
+ return NULL;
+ for (p = root; ; p = p->next) {
+ if ((p != root) && (p->base+p->num-1 >= entry->base)) {
+ p = NULL;
+ break;
+ }
+ if ((p->next == NULL) ||
+ (p->next->base > entry->base+entry->num-1))
+ break;
+ }
+ return p;
+}
+
+static int register_my_resource(resource_entry_t *list,
+ u_long base, u_long num, char *name)
+{
+ u_long flags;
+ resource_entry_t *p, *entry;
+
+ entry = kmalloc(sizeof(resource_entry_t), GFP_ATOMIC);
+ if (!entry) return -ENOMEM;
+ entry->base = base;
+ entry->num = num;
+ entry->name = name;
+
+ spin_lock_irqsave(&rsrc_lock, flags);
+ p = find_gap(list, entry);
+ if (p == NULL) {
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ kfree(entry);
+ return -EBUSY;
+ }
+ entry->next = p->next;
+ p->next = entry;
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ return 0;
+}
+
+static void release_my_resource(resource_entry_t *list,
+ u_long base, u_long num)
+{
+ u_long flags;
+ resource_entry_t *p, *q;
+
+ spin_lock_irqsave(&rsrc_lock, flags);
+ for (p = list; ; p = q) {
+ q = p->next;
+ if (q == NULL) break;
+ if ((q->base == base) && (q->num == num)) {
+ p->next = q->next;
+ kfree(q);
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ return;
+}
+
+static int check_my_resource(resource_entry_t *list,
+ u_long base, u_long num)
+{
+ if (register_my_resource(list, base, num, NULL) != 0)
+ return -EBUSY;
+ release_my_resource(list, base, num);
+ return 0;
+}
+
+#ifdef CONFIG_PNP_BIOS
+int check_io_region(u_long base, u_long num)
+{
+ return check_my_resource(&io_list, base, num);
+}
+void request_io_region(u_long base, u_long num, char *name)
+{
+ register_my_resource(&io_list, base, num, name);
+}
+void release_io_region(u_long base, u_long num)
+{
+ release_my_resource(&io_list, base, num);
+}
+#ifdef HAS_PROC_BUS
+int proc_read_io(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ resource_entry_t *r;
+ u_long flags;
+ char *p = buf;
+
+ spin_lock_irqsave(&rsrc_lock, flags);
+ for (r = io_list.next; r; r = r->next)
+ p += sprintf(p, "%04lx-%04lx : %s\n", r->base,
+ r->base+r->num-1, r->name);
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ return (p - buf);
+}
+#endif
+#endif
+
+#ifndef HAVE_MEMRESERVE
+int check_mem_region(u_long base, u_long num)
+{
+ return check_my_resource(&mem_list, base, num);
+}
+void request_mem_region(u_long base, u_long num, char *name)
+{
+ register_my_resource(&mem_list, base, num, name);
+}
+void release_mem_region(u_long base, u_long num)
+{
+ release_my_resource(&mem_list, base, num);
+}
+#ifdef HAS_PROC_BUS
+int proc_read_mem(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ resource_entry_t *r;
+ u_long flags;
+ char *p = buf;
+
+ spin_lock_irqsave(&rsrc_lock, flags);
+ for (r = mem_list.next; r; r = r->next)
+ p += sprintf(p, "%08lx-%08lx : %s\n", r->base,
+ r->base+r->num-1, r->name);
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ return (p - buf);
+}
+#endif
+#endif
+
+#endif /* defined(CONFIG_PNP_BIOS) || !defined(HAVE_MEMRESERVE) */
+
+/*======================================================================
+
+ These manage the internal databases of available resources.
+
+======================================================================*/
+
+static int add_interval(resource_map_t *map, u_long base, u_long num)
+{
+ resource_map_t *p, *q;
+
+ for (p = map; ; p = p->next) {
+ if ((p != map) && (p->base+p->num-1 >= base))
+ return -1;
+ if ((p->next == map) || (p->next->base > base+num-1))
+ break;
+ }
+ q = kmalloc(sizeof(resource_map_t), GFP_KERNEL);
+ if (!q) return CS_OUT_OF_RESOURCE;
+ q->base = base; q->num = num;
+ q->next = p->next; p->next = q;
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int sub_interval(resource_map_t *map, u_long base, u_long num)
+{
+ resource_map_t *p, *q;
+
+ for (p = map; ; p = q) {
+ q = p->next;
+ if (q == map)
+ break;
+ if ((q->base+q->num > base) && (base+num > q->base)) {
+ if (q->base >= base) {
+ if (q->base+q->num <= base+num) {
+ /* Delete whole block */
+ p->next = q->next;
+ kfree(q);
+ /* don't advance the pointer yet */
+ q = p;
+ } else {
+ /* Cut off bit from the front */
+ q->num = q->base + q->num - base - num;
+ q->base = base + num;
+ }
+ } else if (q->base+q->num <= base+num) {
+ /* Cut off bit from the end */
+ q->num = base - q->base;
+ } else {
+ /* Split the block into two pieces */
+ p = kmalloc(sizeof(resource_map_t), GFP_KERNEL);
+ if (!p) return CS_OUT_OF_RESOURCE;
+ p->base = base+num;
+ p->num = q->base+q->num - p->base;
+ q->num = base - q->base;
+ p->next = q->next ; q->next = p;
+ }
+ }
+ }
+ return CS_SUCCESS;
+}
+
+/*======================================================================
+
+ These routines examine a region of IO or memory addresses to
+ determine what ranges might be genuinely available.
+
+======================================================================*/
+
+#ifdef CONFIG_ISA
+static void do_io_probe(ioaddr_t base, ioaddr_t num)
+{
+
+ ioaddr_t i, j, bad, any;
+ u_char *b, hole, most;
+
+ printk(KERN_INFO "cs: IO port probe 0x%04x-0x%04x:",
+ base, base+num-1);
+
+ /* First, what does a floating port look like? */
+ b = kmalloc(256, GFP_KERNEL);
+ if (!b) {
+ printk(KERN_INFO " kmalloc failed!\n");
+ return;
+ }
+ memset(b, 0, 256);
+ for (i = base, most = 0; i < base+num; i += 8) {
+ if (check_region(i, 8) || check_io_region(i, 8))
+ continue;
+ hole = inb(i);
+ for (j = 1; j < 8; j++)
+ if (inb(i+j) != hole) break;
+ if ((j == 8) && (++b[hole] > b[most]))
+ most = hole;
+ if (b[most] == 127) break;
+ }
+ kfree(b);
+
+ bad = any = 0;
+ for (i = base; i < base+num; i += 8) {
+ if (check_region(i, 8) || check_io_region(i, 8))
+ continue;
+ for (j = 0; j < 8; j++)
+ if (inb(i+j) != most) break;
+ if (j < 8) {
+ if (!any)
+ printk(" excluding");
+ if (!bad)
+ bad = any = i;
+ } else {
+ if (bad) {
+ sub_interval(&io_db, bad, i-bad);
+ printk(" %#04x-%#04x", bad, i-1);
+ bad = 0;
+ }
+ }
+ }
+ if (bad) {
+ if ((num > 16) && (bad == base) && (i == base+num)) {
+ printk(" nothing: probe failed.\n");
+ return;
+ } else {
+ sub_interval(&io_db, bad, i-bad);
+ printk(" %#04x-%#04x", bad, i-1);
+ }
+ }
+
+ printk(any ? "\n" : " clean.\n");
+}
+
+static int io_scan; /* = 0 */
+
+static void invalidate_io(void)
+{
+ io_scan = 0;
+}
+
+static void validate_io(void)
+{
+ resource_map_t *m, *n;
+ if (!probe_io || io_scan++)
+ return;
+ for (m = io_db.next; m != &io_db; m = n) {
+ n = m->next;
+ do_io_probe(m->base, m->num);
+ }
+}
+
+#else /* CONFIG_ISA */
+
+#define validate_io() do { } while (0)
+#define invalidate_io() do { } while (0)
+
+#endif /* CONFIG_ISA */
+
+/*======================================================================
+
+ The memory probe. If the memory list includes a 64K-aligned block
+ below 1MB, we probe in 64K chunks, and as soon as we accumulate at
+ least mem_limit free space, we quit.
+
+======================================================================*/
+
+static int do_mem_probe(u_long base, u_long num,
+ int (*is_valid)(u_long), int (*do_cksum)(u_long))
+{
+ u_long i, j, bad, fail, step;
+
+ printk(KERN_INFO "cs: memory probe 0x%06lx-0x%06lx:",
+ base, base+num-1);
+ bad = fail = 0;
+ step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff);
+ for (i = j = base; i < base+num; i = j + step) {
+ if (!fail) {
+ for (j = i; j < base+num; j += step)
+ if ((check_mem_region(j, step) == 0) && is_valid(j))
+ break;
+ fail = ((i == base) && (j == base+num));
+ }
+ if (fail) {
+ for (j = i; j < base+num; j += 2*step)
+ if ((check_mem_region(j, 2*step) == 0) &&
+ do_cksum(j) && do_cksum(j+step))
+ break;
+ }
+ if (i != j) {
+ if (!bad) printk(" excluding");
+ printk(" %#05lx-%#05lx", i, j-1);
+ sub_interval(&mem_db, i, j-i);
+ bad += j-i;
+ }
+ }
+ printk(bad ? "\n" : " clean.\n");
+ return (num - bad);
+}
+
+#ifdef CONFIG_ISA
+
+static u_long inv_probe(int (*is_valid)(u_long),
+ int (*do_cksum)(u_long),
+ resource_map_t *m)
+{
+ u_long ok;
+ if (m == &mem_db)
+ return 0;
+ ok = inv_probe(is_valid, do_cksum, m->next);
+ if (ok) {
+ if (m->base >= 0x100000)
+ sub_interval(&mem_db, m->base, m->num);
+ return ok;
+ }
+ if (m->base < 0x100000)
+ return 0;
+ return do_mem_probe(m->base, m->num, is_valid, do_cksum);
+}
+
+static int hi_scan, lo_scan; /* = 0 */
+
+static void invalidate_mem(void)
+{
+ hi_scan = lo_scan = 0;
+}
+
+void validate_mem(int (*is_valid)(u_long), int (*do_cksum)(u_long),
+ int force_low)
+{
+ resource_map_t *m, mm;
+ static u_char order[] = { 0xd0, 0xe0, 0xc0, 0xf0 };
+ u_long b, i, ok = 0;
+
+ if (!probe_mem) return;
+ /* We do up to four passes through the list */
+ if (!force_low) {
+ if (hi_scan++ || (inv_probe(is_valid, do_cksum, mem_db.next) > 0))
+ return;
+ printk(KERN_NOTICE "cs: warning: no high memory space "
+ "available!\n");
+ }
+ if (lo_scan++) return;
+ for (m = mem_db.next; m != &mem_db; m = mm.next) {
+ mm = *m;
+ /* Only probe < 1 MB */
+ if (mm.base >= 0x100000) continue;
+ if ((mm.base | mm.num) & 0xffff) {
+ ok += do_mem_probe(mm.base, mm.num, is_valid, do_cksum);
+ continue;
+ }
+ /* Special probe for 64K-aligned block */
+ for (i = 0; i < 4; i++) {
+ b = order[i] << 12;
+ if ((b >= mm.base) && (b+0x10000 <= mm.base+mm.num)) {
+ if (ok >= mem_limit)
+ sub_interval(&mem_db, b, 0x10000);
+ else
+ ok += do_mem_probe(b, 0x10000, is_valid, do_cksum);
+ }
+ }
+ }
+}
+
+#else /* CONFIG_ISA */
+
+#define invalidate_mem() do { } while (0)
+
+void validate_mem(int (*is_valid)(u_long), int (*do_cksum)(u_long),
+ int force_low)
+{
+ resource_map_t *m, *n;
+ static int done = 0;
+
+ if (!probe_mem || done++)
+ return;
+ for (m = mem_db.next; m != &mem_db; m = n)
+ n = m->next;
+ if (do_mem_probe(m->base, m->num, is_valid, do_cksum))
+ return;
+}
+
+#endif /* CONFIG_ISA */
+
+/*======================================================================
+
+ These find ranges of I/O ports or memory addresses that are not
+ currently allocated by other devices.
+
+ The 'align' field should reflect the number of bits of address
+ that need to be preserved from the initial value of *base. It
+ should be a power of two, greater than or equal to 'num'. A value
+ of 0 means that all bits of *base are significant. *base should
+ also be strictly less than 'align'.
+
+======================================================================*/
+
+int find_io_region(ioaddr_t *base, ioaddr_t num, ioaddr_t align,
+ char *name)
+{
+ ioaddr_t try;
+ resource_map_t *m;
+
+ validate_io();
+ for (m = io_db.next; m != &io_db; m = m->next) {
+ try = (m->base & ~(align-1)) + *base;
+ for (try = (try >= m->base) ? try : try+align;
+ (try >= m->base) && (try+num <= m->base+m->num);
+ try += align) {
+ if ((check_region(try, num) == 0) &&
+ (check_io_region(try, num) == 0)) {
+ *base = try;
+ if (name) request_region(try, num, name);
+ return 0;
+ }
+ if (!align) break;
+ }
+ }
+ return -1;
+}
+
+int find_mem_region(u_long *base, u_long num, u_long align,
+ int force_low, char *name)
+{
+ u_long try;
+ resource_map_t *m;
+
+ while (1) {
+ for (m = mem_db.next; m != &mem_db; m = m->next) {
+ /* first pass >1MB, second pass <1MB */
+ if ((force_low != 0) ^ (m->base < 0x100000)) continue;
+ try = (m->base & ~(align-1)) + *base;
+ for (try = (try >= m->base) ? try : try+align;
+ (try >= m->base) && (try+num <= m->base+m->num);
+ try += align) {
+ if (check_mem_region(try, num) == 0) {
+ if (name) request_mem_region(try, num, name);
+ *base = try;
+ return 0;
+ }
+ if (!align) break;
+ }
+ }
+ if (force_low) break;
+ force_low++;
+ }
+ return -1;
+}
+
+/*======================================================================
+
+ This checks to see if an interrupt is available, with support
+ for interrupt sharing. We don't support reserving interrupts
+ yet. If the interrupt is available, we allocate it.
+
+======================================================================*/
+
+#ifdef CONFIG_ISA
+
+static void fake_irq(int i, void *d, struct pt_regs *r) { }
+static inline int check_irq(int irq)
+{
+ if (request_irq(irq, fake_irq, 0, "bogus", NULL) != 0)
+ return -1;
+ free_irq(irq, NULL);
+ return 0;
+}
+
+int try_irq(u_int Attributes, int irq, int specific)
+{
+ irq_info_t *info = &irq_table[irq];
+ if (info->Attributes & RES_ALLOCATED) {
+ switch (Attributes & IRQ_TYPE) {
+ case IRQ_TYPE_EXCLUSIVE:
+ return CS_IN_USE;
+ case IRQ_TYPE_TIME:
+ if ((info->Attributes & RES_IRQ_TYPE)
+ != RES_IRQ_TYPE_TIME)
+ return CS_IN_USE;
+ if (Attributes & IRQ_FIRST_SHARED)
+ return CS_BAD_ATTRIBUTE;
+ info->Attributes |= RES_IRQ_TYPE_TIME | RES_ALLOCATED;
+ info->time_share++;
+ break;
+ case IRQ_TYPE_DYNAMIC_SHARING:
+ if ((info->Attributes & RES_IRQ_TYPE)
+ != RES_IRQ_TYPE_DYNAMIC)
+ return CS_IN_USE;
+ if (Attributes & IRQ_FIRST_SHARED)
+ return CS_BAD_ATTRIBUTE;
+ info->Attributes |= RES_IRQ_TYPE_DYNAMIC | RES_ALLOCATED;
+ info->dyn_share++;
+ break;
+ }
+ } else {
+ if ((info->Attributes & RES_RESERVED) && !specific)
+ return CS_IN_USE;
+ if (check_irq(irq) != 0)
+ return CS_IN_USE;
+ switch (Attributes & IRQ_TYPE) {
+ case IRQ_TYPE_EXCLUSIVE:
+ info->Attributes |= RES_ALLOCATED;
+ break;
+ case IRQ_TYPE_TIME:
+ if (!(Attributes & IRQ_FIRST_SHARED))
+ return CS_BAD_ATTRIBUTE;
+ info->Attributes |= RES_IRQ_TYPE_TIME | RES_ALLOCATED;
+ info->time_share = 1;
+ break;
+ case IRQ_TYPE_DYNAMIC_SHARING:
+ if (!(Attributes & IRQ_FIRST_SHARED))
+ return CS_BAD_ATTRIBUTE;
+ info->Attributes |= RES_IRQ_TYPE_DYNAMIC | RES_ALLOCATED;
+ info->dyn_share = 1;
+ break;
+ }
+ }
+ return 0;
+}
+
+#endif
+
+/*====================================================================*/
+
+#ifdef CONFIG_ISA
+
+void undo_irq(u_int Attributes, int irq)
+{
+ irq_info_t *info;
+
+ info = &irq_table[irq];
+ switch (Attributes & IRQ_TYPE) {
+ case IRQ_TYPE_EXCLUSIVE:
+ info->Attributes &= RES_RESERVED;
+ break;
+ case IRQ_TYPE_TIME:
+ info->time_share--;
+ if (info->time_share == 0)
+ info->Attributes &= RES_RESERVED;
+ break;
+ case IRQ_TYPE_DYNAMIC_SHARING:
+ info->dyn_share--;
+ if (info->dyn_share == 0)
+ info->Attributes &= RES_RESERVED;
+ break;
+ }
+}
+
+#endif
+
+/*======================================================================
+
+ The various adjust_* calls form the external interface to the
+ resource database.
+
+======================================================================*/
+
+static int adjust_memory(adjust_t *adj)
+{
+ u_long base, num;
+ int i, ret;
+
+ base = adj->resource.memory.Base;
+ num = adj->resource.memory.Size;
+ if ((num == 0) || (base+num-1 < base))
+ return CS_BAD_SIZE;
+
+ ret = CS_SUCCESS;
+ switch (adj->Action) {
+ case ADD_MANAGED_RESOURCE:
+ ret = add_interval(&mem_db, base, num);
+ break;
+ case REMOVE_MANAGED_RESOURCE:
+ ret = sub_interval(&mem_db, base, num);
+ if (ret == CS_SUCCESS) {
+ invalidate_mem();
+ for (i = 0; i < sockets; i++) {
+ release_cis_mem(socket_table[i]);
+#ifdef CONFIG_CARDBUS
+ cb_release_cis_mem(socket_table[i]);
+#endif
+ }
+ }
+ break;
+ default:
+ ret = CS_UNSUPPORTED_FUNCTION;
+ }
+
+ return ret;
+}
+
+/*====================================================================*/
+
+static int adjust_io(adjust_t *adj)
+{
+ int base, num;
+
+ base = adj->resource.io.BasePort;
+ num = adj->resource.io.NumPorts;
+ if ((base < 0) || (base > 0xffff))
+ return CS_BAD_BASE;
+ if ((num <= 0) || (base+num > 0x10000) || (base+num <= base))
+ return CS_BAD_SIZE;
+
+ switch (adj->Action) {
+ case ADD_MANAGED_RESOURCE:
+ if (add_interval(&io_db, base, num) != 0)
+ return CS_IN_USE;
+ break;
+ case REMOVE_MANAGED_RESOURCE:
+ sub_interval(&io_db, base, num);
+ invalidate_io();
+ break;
+ default:
+ return CS_UNSUPPORTED_FUNCTION;
+ break;
+ }
+
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int adjust_irq(adjust_t *adj)
+{
+#ifdef CONFIG_ISA
+ int irq;
+ irq_info_t *info;
+
+ irq = adj->resource.irq.IRQ;
+ if ((irq < 0) || (irq > 15))
+ return CS_BAD_IRQ;
+ info = &irq_table[irq];
+
+ switch (adj->Action) {
+ case ADD_MANAGED_RESOURCE:
+ if (info->Attributes & RES_REMOVED)
+ info->Attributes &= ~(RES_REMOVED|RES_ALLOCATED);
+ else
+ if (adj->Attributes & RES_ALLOCATED)
+ return CS_IN_USE;
+ if (adj->Attributes & RES_RESERVED)
+ info->Attributes |= RES_RESERVED;
+ else
+ info->Attributes &= ~RES_RESERVED;
+ break;
+ case REMOVE_MANAGED_RESOURCE:
+ if (info->Attributes & RES_REMOVED)
+ return 0;
+ if (info->Attributes & RES_ALLOCATED)
+ return CS_IN_USE;
+ info->Attributes |= RES_ALLOCATED|RES_REMOVED;
+ info->Attributes &= ~RES_RESERVED;
+ break;
+ default:
+ return CS_UNSUPPORTED_FUNCTION;
+ break;
+ }
+#endif
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+int adjust_resource_info(client_handle_t handle, adjust_t *adj)
+{
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+
+ switch (adj->Resource) {
+ case RES_MEMORY_RANGE:
+ return adjust_memory(adj);
+ break;
+ case RES_IO_RANGE:
+ return adjust_io(adj);
+ break;
+ case RES_IRQ:
+ return adjust_irq(adj);
+ break;
+ }
+ return CS_UNSUPPORTED_FUNCTION;
+}
+
+/*====================================================================*/
+
+void release_resource_db(void)
+{
+ resource_map_t *p, *q;
+#if defined(CONFIG_PNP_BIOS) || !defined(HAVE_MEMRESERVE)
+ resource_entry_t *u, *v;
+#endif
+
+ for (p = mem_db.next; p != &mem_db; p = q) {
+ q = p->next;
+ kfree(p);
+ }
+ for (p = io_db.next; p != &io_db; p = q) {
+ q = p->next;
+ kfree(p);
+ }
+#ifdef CONFIG_PNP_BIOS
+ for (u = io_list.next; u; u = v) {
+ v = u->next;
+ kfree(u);
+ }
+#endif
+#ifndef HAVE_MEMRESERVE
+ for (u = mem_list.next; u; u = v) {
+ v = u->next;
+ kfree(u);
+ }
+#endif
+}
diff --git a/linux/pcmcia-cs/modules/smc34c90.h b/linux/pcmcia-cs/modules/smc34c90.h
new file mode 100644
index 0000000..0f3ddc0
--- /dev/null
+++ b/linux/pcmcia-cs/modules/smc34c90.h
@@ -0,0 +1,58 @@
+/*
+ * smc34c90.h 1.10 2001/08/24 12:15:34
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_SMC34C90_H
+#define _LINUX_SMC34C90_H
+
+#ifndef PCI_VENDOR_ID_SMC
+#define PCI_VENDOR_ID_SMC 0x10b3
+#endif
+
+#ifndef PCI_DEVICE_ID_SMC_34C90
+#define PCI_DEVICE_ID_SMC_34C90 0xb106
+#endif
+
+/* Register definitions for SMC 34C90 PCI-to-CardBus bridge */
+
+/* EEPROM Information Register */
+#define SMC34C90_EEINFO 0x0088
+#define SMC34C90_EEINFO_ONE_SOCKET 0x0001
+#define SMC34C90_EEINFO_5V_ONLY 0x0002
+#define SMC34C90_EEINFO_ISA_IRQ 0x0004
+#define SMC34C90_EEINFO_ZV_PORT 0x0008
+#define SMC34C90_EEINFO_RING 0x0010
+#define SMC34C90_EEINFO_LED 0x0020
+
+#define SMC_PCIC_ID \
+ IS_SMC34C90
+
+#define SMC_PCIC_INFO \
+ { "SMC 34C90", IS_CARDBUS, ID(SMC, 34C90) }
+
+#endif /* _LINUX_SMC34C90_H */
diff --git a/linux/pcmcia-cs/modules/ti113x.h b/linux/pcmcia-cs/modules/ti113x.h
new file mode 100644
index 0000000..c224d7a
--- /dev/null
+++ b/linux/pcmcia-cs/modules/ti113x.h
@@ -0,0 +1,264 @@
+/*
+ * ti113x.h 1.32 2003/02/13 06:28:09
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_TI113X_H
+#define _LINUX_TI113X_H
+
+#ifndef PCI_VENDOR_ID_TI
+#define PCI_VENDOR_ID_TI 0x104c
+#endif
+
+#ifndef PCI_DEVICE_ID_TI_1130
+#define PCI_DEVICE_ID_TI_1130 0xac12
+#endif
+#ifndef PCI_DEVICE_ID_TI_1031
+#define PCI_DEVICE_ID_TI_1031 0xac13
+#endif
+#ifndef PCI_DEVICE_ID_TI_1131
+#define PCI_DEVICE_ID_TI_1131 0xac15
+#endif
+#ifndef PCI_DEVICE_ID_TI_1210
+#define PCI_DEVICE_ID_TI_1210 0xac1a
+#endif
+#ifndef PCI_DEVICE_ID_TI_1211
+#define PCI_DEVICE_ID_TI_1211 0xac1e
+#endif
+#ifndef PCI_DEVICE_ID_TI_1220
+#define PCI_DEVICE_ID_TI_1220 0xac17
+#endif
+#ifndef PCI_DEVICE_ID_TI_1221
+#define PCI_DEVICE_ID_TI_1221 0xac19
+#endif
+#ifndef PCI_DEVICE_ID_TI_1250A
+#define PCI_DEVICE_ID_TI_1250A 0xac16
+#endif
+#ifndef PCI_DEVICE_ID_TI_1225
+#define PCI_DEVICE_ID_TI_1225 0xac1c
+#endif
+#ifndef PCI_DEVICE_ID_TI_1251A
+#define PCI_DEVICE_ID_TI_1251A 0xac1d
+#endif
+#ifndef PCI_DEVICE_ID_TI_1251B
+#define PCI_DEVICE_ID_TI_1251B 0xac1f
+#endif
+#ifndef PCI_DEVICE_ID_TI_1410
+#define PCI_DEVICE_ID_TI_1410 0xac50
+#endif
+#ifndef PCI_DEVICE_ID_TI_1420
+#define PCI_DEVICE_ID_TI_1420 0xac51
+#endif
+#ifndef PCI_DEVICE_ID_TI_1450
+#define PCI_DEVICE_ID_TI_1450 0xac1b
+#endif
+#ifndef PCI_DEVICE_ID_TI_1451
+#define PCI_DEVICE_ID_TI_1451 0xac52
+#endif
+#ifndef PCI_DEVICE_ID_TI_1510
+#define PCI_DEVICE_ID_TI_1510 0xac56
+#endif
+#ifndef PCI_DEVICE_ID_TI_1520
+#define PCI_DEVICE_ID_TI_1520 0xac55
+#endif
+#ifndef PCI_DEVICE_ID_TI_1620
+#define PCI_DEVICE_ID_TI_1620 0xac54
+#endif
+#ifndef PCI_DEVICE_ID_TI_4410
+#define PCI_DEVICE_ID_TI_4410 0xac41
+#endif
+#ifndef PCI_DEVICE_ID_TI_4450
+#define PCI_DEVICE_ID_TI_4450 0xac40
+#endif
+#ifndef PCI_DEVICE_ID_TI_4451
+#define PCI_DEVICE_ID_TI_4451 0xac42
+#endif
+#ifndef PCI_DEVICE_ID_TI_4510
+#define PCI_DEVICE_ID_TI_4510 0xac44
+#endif
+#ifndef PCI_DEVICE_ID_TI_4520
+#define PCI_DEVICE_ID_TI_4520 0xac46
+#endif
+#ifndef PCI_DEVICE_ID_TI_7410
+#define PCI_DEVICE_ID_TI_7410 0xac49
+#endif
+#ifndef PCI_DEVICE_ID_TI_7510
+#define PCI_DEVICE_ID_TI_7510 0xac47
+#endif
+#ifndef PCI_DEVICE_ID_TI_7610
+#define PCI_DEVICE_ID_TI_7610 0xac48
+#endif
+
+/* Register definitions for TI 113X PCI-to-CardBus bridges */
+
+/* System Control Register */
+#define TI113X_SYSTEM_CONTROL 0x80 /* 32 bit */
+#define TI113X_SCR_SMIROUTE 0x04000000
+#define TI113X_SCR_SMISTATUS 0x02000000
+#define TI113X_SCR_SMIENB 0x01000000
+#define TI113X_SCR_VCCPROT 0x00200000
+#define TI113X_SCR_REDUCEZV 0x00100000
+#define TI113X_SCR_CDREQEN 0x00080000
+#define TI113X_SCR_CDMACHAN 0x00070000
+#define TI113X_SCR_SOCACTIVE 0x00002000
+#define TI113X_SCR_PWRSTREAM 0x00000800
+#define TI113X_SCR_DELAYUP 0x00000400
+#define TI113X_SCR_DELAYDOWN 0x00000200
+#define TI113X_SCR_INTERROGATE 0x00000100
+#define TI113X_SCR_CLKRUN_SEL 0x00000080
+#define TI113X_SCR_PWRSAVINGS 0x00000040
+#define TI113X_SCR_SUBSYSRW 0x00000020
+#define TI113X_SCR_CB_DPAR 0x00000010
+#define TI113X_SCR_CDMA_EN 0x00000008
+#define TI113X_SCR_ASYNC_IRQ 0x00000004
+#define TI113X_SCR_KEEPCLK 0x00000002
+#define TI113X_SCR_CLKRUN_ENA 0x00000001
+
+#define TI122X_SCR_SER_STEP 0xc0000000
+#define TI122X_SCR_INTRTIE 0x20000000
+#define TI122X_SCR_P2CCLK 0x08000000
+#define TI122X_SCR_CBRSVD 0x00400000
+#define TI122X_SCR_MRBURSTDN 0x00008000
+#define TI122X_SCR_MRBURSTUP 0x00004000
+#define TI122X_SCR_RIMUX 0x00000001
+
+/* Multimedia Control Register */
+#define TI1250_MULTIMEDIA_CTL 0x84 /* 8 bit */
+#define TI1250_MMC_ZVOUTEN 0x80
+#define TI1250_MMC_PORTSEL 0x40
+#define TI1250_MMC_ZVEN1 0x02
+#define TI1250_MMC_ZVEN0 0x01
+
+#define TI1250_GENERAL_STATUS 0x85 /* 8 bit */
+#define TI1250_GPIO0_CONTROL 0x88 /* 8 bit */
+#define TI1250_GPIO1_CONTROL 0x89 /* 8 bit */
+#define TI1250_GPIO2_CONTROL 0x8a /* 8 bit */
+#define TI1250_GPIO3_CONTROL 0x8b /* 8 bit */
+#define TI12XX_IRQMUX 0x8c /* 32 bit */
+
+/* Retry Status Register */
+#define TI113X_RETRY_STATUS 0x90 /* 8 bit */
+#define TI113X_RSR_PCIRETRY 0x80
+#define TI113X_RSR_CBRETRY 0x40
+#define TI113X_RSR_TEXP_CBB 0x20
+#define TI113X_RSR_MEXP_CBB 0x10
+#define TI113X_RSR_TEXP_CBA 0x08
+#define TI113X_RSR_MEXP_CBA 0x04
+#define TI113X_RSR_TEXP_PCI 0x02
+#define TI113X_RSR_MEXP_PCI 0x01
+
+/* Card Control Register */
+#define TI113X_CARD_CONTROL 0x91 /* 8 bit */
+#define TI113X_CCR_RIENB 0x80
+#define TI113X_CCR_ZVENABLE 0x40
+#define TI113X_CCR_PCI_IRQ_ENA 0x20
+#define TI113X_CCR_PCI_IREQ 0x10
+#define TI113X_CCR_PCI_CSC 0x08
+#define TI113X_CCR_SPKROUTEN 0x02
+#define TI113X_CCR_IFG 0x01
+
+#define TI1220_CCR_PORT_SEL 0x20
+#define TI122X_CCR_AUD2MUX 0x04
+
+/* Device Control Register */
+#define TI113X_DEVICE_CONTROL 0x92 /* 8 bit */
+#define TI113X_DCR_5V_FORCE 0x40
+#define TI113X_DCR_3V_FORCE 0x20
+#define TI113X_DCR_IMODE_MASK 0x06
+#define TI113X_DCR_IMODE_ISA 0x02
+#define TI113X_DCR_IMODE_SERIAL 0x04
+
+#define TI12XX_DCR_IMODE_PCI_ONLY 0x00
+#define TI12XX_DCR_IMODE_ALL_SERIAL 0x06
+
+/* Buffer Control Register */
+#define TI113X_BUFFER_CONTROL 0x93 /* 8 bit */
+#define TI113X_BCR_CB_READ_DEPTH 0x08
+#define TI113X_BCR_CB_WRITE_DEPTH 0x04
+#define TI113X_BCR_PCI_READ_DEPTH 0x02
+#define TI113X_BCR_PCI_WRITE_DEPTH 0x01
+
+/* Diagnostic Register */
+#define TI1250_DIAGNOSTIC 0x93 /* 8 bit */
+#define TI1250_DIAG_TRUE_VALUE 0x80
+#define TI1250_DIAG_PCI_IREQ 0x40
+#define TI1250_DIAG_PCI_CSC 0x20
+#define TI1250_DIAG_ASYNC_CSC 0x01
+
+/* DMA Registers */
+#define TI113X_DMA_0 0x94 /* 32 bit */
+#define TI113X_DMA_1 0x98 /* 32 bit */
+
+/* ExCA IO offset registers */
+#define TI113X_IO_OFFSET(map) (0x36+((map)<<1))
+
+/* Data structure for tracking vendor-specific state */
+typedef struct ti113x_state_t {
+ u32 sysctl; /* TI113X_SYSTEM_CONTROL */
+ u8 cardctl; /* TI113X_CARD_CONTROL */
+ u8 devctl; /* TI113X_DEVICE_CONTROL */
+ u8 diag; /* TI1250_DIAGNOSTIC */
+ u32 irqmux; /* TI12XX_IRQMUX */
+} ti113x_state_t;
+
+#define TI_PCIC_ID \
+ IS_TI1130, IS_TI1131, IS_TI1031, IS_TI1210, IS_TI1211, \
+ IS_TI1220, IS_TI1221, IS_TI1225, IS_TI1250A, IS_TI1251A, \
+ IS_TI1251B, IS_TI1410, IS_TI1420, IS_TI1450, IS_TI1451, \
+ IS_TI1510, IS_TI1520, IS_TI1620, IS_TI4410, IS_TI4450, \
+ IS_TI4451, IS_TI4510, IS_TI4520, IS_TI7410, IS_TI7510, \
+ IS_TI7610
+
+#define TI_PCIC_INFO \
+ { "TI 1130", IS_TI|IS_CARDBUS, ID(TI, 1130) }, \
+ { "TI 1131", IS_TI|IS_CARDBUS, ID(TI, 1131) }, \
+ { "TI 1031", IS_TI|IS_CARDBUS, ID(TI, 1031) }, \
+ { "TI 1210", IS_TI|IS_CARDBUS, ID(TI, 1210) }, \
+ { "TI 1211", IS_TI|IS_CARDBUS, ID(TI, 1211) }, \
+ { "TI 1220", IS_TI|IS_CARDBUS, ID(TI, 1220) }, \
+ { "TI 1221", IS_TI|IS_CARDBUS, ID(TI, 1221) }, \
+ { "TI 1225", IS_TI|IS_CARDBUS, ID(TI, 1225) }, \
+ { "TI 1250A", IS_TI|IS_CARDBUS, ID(TI, 1250A) }, \
+ { "TI 1251A", IS_TI|IS_CARDBUS, ID(TI, 1251A) }, \
+ { "TI 1251B", IS_TI|IS_CARDBUS, ID(TI, 1251B) }, \
+ { "TI 1410", IS_TI|IS_CARDBUS, ID(TI, 1410) }, \
+ { "TI 1420", IS_TI|IS_CARDBUS, ID(TI, 1420) }, \
+ { "TI 1450", IS_TI|IS_CARDBUS, ID(TI, 1450) }, \
+ { "TI 1451", IS_TI|IS_CARDBUS, ID(TI, 1451) }, \
+ { "TI 1510", IS_TI|IS_CARDBUS, ID(TI, 1510) }, \
+ { "TI 1520", IS_TI|IS_CARDBUS, ID(TI, 1520) }, \
+ { "TI 1620", IS_TI|IS_CARDBUS, ID(TI, 1620) }, \
+ { "TI 4410", IS_TI|IS_CARDBUS, ID(TI, 4410) }, \
+ { "TI 4450", IS_TI|IS_CARDBUS, ID(TI, 4450) }, \
+ { "TI 4451", IS_TI|IS_CARDBUS, ID(TI, 4451) }, \
+ { "TI 4510", IS_TI|IS_CARDBUS, ID(TI, 4510) }, \
+ { "TI 4520", IS_TI|IS_CARDBUS, ID(TI, 4520) }, \
+ { "TI 7410", IS_TI|IS_CARDBUS, ID(TI, 7410) }, \
+ { "TI 7510", IS_TI|IS_CARDBUS, ID(TI, 7510) }, \
+ { "TI 7610", IS_TI|IS_CARDBUS, ID(TI, 7610) }
+
+#endif /* _LINUX_TI113X_H */
diff --git a/linux/pcmcia-cs/modules/topic.h b/linux/pcmcia-cs/modules/topic.h
new file mode 100644
index 0000000..88662c4
--- /dev/null
+++ b/linux/pcmcia-cs/modules/topic.h
@@ -0,0 +1,123 @@
+/*
+ * topic.h 1.15 2002/02/27 01:21:09
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ * topic.h $Release$ 2002/02/27 01:21:09
+ */
+
+#ifndef _LINUX_TOPIC_H
+#define _LINUX_TOPIC_H
+
+#ifndef PCI_VENDOR_ID_TOSHIBA
+#define PCI_VENDOR_ID_TOSHIBA 0x1179
+#endif
+#ifndef PCI_DEVICE_ID_TOSHIBA_TOPIC95_A
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95_A 0x0603
+#endif
+#ifndef PCI_DEVICE_ID_TOSHIBA_TOPIC95_B
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95_B 0x060a
+#endif
+#ifndef PCI_DEVICE_ID_TOSHIBA_TOPIC97
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f
+#endif
+#ifndef PCI_DEVICE_ID_TOSHIBA_TOPIC100
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC100 0x0617
+#endif
+
+/* Register definitions for Toshiba ToPIC95 controllers */
+
+#define TOPIC_SOCKET_CONTROL 0x0090 /* 32 bit */
+#define TOPIC_SCR_IRQSEL 0x00000001
+
+#define TOPIC_SLOT_CONTROL 0x00a0 /* 8 bit */
+#define TOPIC_SLOT_SLOTON 0x80
+#define TOPIC_SLOT_SLOTEN 0x40
+#define TOPIC_SLOT_ID_LOCK 0x20
+#define TOPIC_SLOT_ID_WP 0x10
+#define TOPIC_SLOT_PORT_MASK 0x0c
+#define TOPIC_SLOT_PORT_SHIFT 2
+#define TOPIC_SLOT_OFS_MASK 0x03
+
+#define TOPIC_CARD_CONTROL 0x00a1 /* 8 bit */
+#define TOPIC_CCR_INTB 0x20
+#define TOPIC_CCR_INTA 0x10
+#define TOPIC_CCR_CLOCK 0x0c
+#define TOPIC_CCR_PCICLK 0x0c
+#define TOPIC_CCR_PCICLK_2 0x08
+#define TOPIC_CCR_CCLK 0x04
+
+#define TOPIC97_INT_CONTROL 0x00a1 /* 8 bit */
+#define TOPIC97_ICR_INTB 0x20
+#define TOPIC97_ICR_INTA 0x10
+#define TOPIC97_ICR_STSIRQNP 0x04
+#define TOPIC97_ICR_IRQNP 0x02
+#define TOPIC97_ICR_IRQSEL 0x01
+
+#define TOPIC_CARD_DETECT 0x00a3 /* 8 bit */
+#define TOPIC_CDR_MODE_PC32 0x80
+#define TOPIC_CDR_VS1 0x04
+#define TOPIC_CDR_VS2 0x02
+#define TOPIC_CDR_SW_DETECT 0x01
+
+#define TOPIC_REGISTER_CONTROL 0x00a4 /* 32 bit */
+#define TOPIC_RCR_RESUME_RESET 0x80000000
+#define TOPIC_RCR_REMOVE_RESET 0x40000000
+#define TOPIC97_RCR_CLKRUN_ENA 0x20000000
+#define TOPIC97_RCR_TESTMODE 0x10000000
+#define TOPIC97_RCR_IOPLUP 0x08000000
+#define TOPIC_RCR_BUFOFF_PWROFF 0x02000000
+#define TOPIC_RCR_BUFOFF_SIGOFF 0x01000000
+#define TOPIC97_RCR_CB_DEV_MASK 0x0000f800
+#define TOPIC97_RCR_CB_DEV_SHIFT 11
+#define TOPIC97_RCR_RI_DISABLE 0x00000004
+#define TOPIC97_RCR_CAUDIO_OFF 0x00000002
+#define TOPIC_RCR_CAUDIO_INVERT 0x00000001
+
+#define TOPIC_FUNCTION_CONTROL 0x3e
+#define TOPIC_FCR_PWR_BUF_ENA 0x40
+#define TOPIC_FCR_CTR_ENA 0x08
+#define TOPIC_FCR_VS_ENA 0x02
+#define TOPIC_FCR_3V_ENA 0x01
+
+/* Data structure for tracking vendor-specific state */
+typedef struct topic_state_t {
+ u_char slot; /* TOPIC_SLOT_CONTROL */
+ u_char ccr; /* TOPIC_CARD_CONTROL */
+ u_char cdr; /* TOPIC_CARD_DETECT */
+ u_int rcr; /* TOPIC_REGISTER_CONTROL */
+ u_char fcr; /* TOPIC_FUNCTION_CONTROL */
+} topic_state_t;
+
+#define TOPIC_PCIC_ID \
+ IS_TOPIC95_A, IS_TOPIC95_B, IS_TOPIC97, IS_TOPIC100
+
+#define TOPIC_PCIC_INFO \
+ { "Toshiba ToPIC95-A", IS_CARDBUS|IS_TOPIC, ID(TOSHIBA, TOPIC95_A) }, \
+ { "Toshiba ToPIC95-B", IS_CARDBUS|IS_TOPIC, ID(TOSHIBA, TOPIC95_B) }, \
+ { "Toshiba ToPIC97", IS_CARDBUS|IS_TOPIC, ID(TOSHIBA, TOPIC97) }, \
+ { "Toshiba ToPIC100", IS_CARDBUS|IS_TOPIC, ID(TOSHIBA, TOPIC100) }
+
+#endif /* _LINUX_TOPIC_H */
diff --git a/linux/pcmcia-cs/modules/vg468.h b/linux/pcmcia-cs/modules/vg468.h
new file mode 100644
index 0000000..93dc00b
--- /dev/null
+++ b/linux/pcmcia-cs/modules/vg468.h
@@ -0,0 +1,112 @@
+/*
+ * vg468.h 1.14 2001/08/24 12:15:34
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_VG468_H
+#define _LINUX_VG468_H
+
+/* Special bit in I365_IDENT used for Vadem chip detection */
+#define I365_IDENT_VADEM 0x08
+
+/* Special definitions in I365_POWER */
+#define VG468_VPP2_MASK 0x0c
+#define VG468_VPP2_5V 0x04
+#define VG468_VPP2_12V 0x08
+
+/* Unique Vadem registers */
+#define VG469_VSENSE 0x1f /* Card voltage sense */
+#define VG469_VSELECT 0x2f /* Card voltage select */
+#define VG468_CTL 0x38 /* Control register */
+#define VG468_TIMER 0x39 /* Timer control */
+#define VG468_MISC 0x3a /* Miscellaneous */
+#define VG468_GPIO_CFG 0x3b /* GPIO configuration */
+#define VG469_EXT_MODE 0x3c /* Extended mode register */
+#define VG468_SELECT 0x3d /* Programmable chip select */
+#define VG468_SELECT_CFG 0x3e /* Chip select configuration */
+#define VG468_ATA 0x3f /* ATA control */
+
+/* Flags for VG469_VSENSE */
+#define VG469_VSENSE_A_VS1 0x01
+#define VG469_VSENSE_A_VS2 0x02
+#define VG469_VSENSE_B_VS1 0x04
+#define VG469_VSENSE_B_VS2 0x08
+
+/* Flags for VG469_VSELECT */
+#define VG469_VSEL_VCC 0x03
+#define VG469_VSEL_5V 0x00
+#define VG469_VSEL_3V 0x03
+#define VG469_VSEL_MAX 0x0c
+#define VG469_VSEL_EXT_STAT 0x10
+#define VG469_VSEL_EXT_BUS 0x20
+#define VG469_VSEL_MIXED 0x40
+#define VG469_VSEL_ISA 0x80
+
+/* Flags for VG468_CTL */
+#define VG468_CTL_SLOW 0x01 /* 600ns memory timing */
+#define VG468_CTL_ASYNC 0x02 /* Asynchronous bus clocking */
+#define VG468_CTL_TSSI 0x08 /* Tri-state some outputs */
+#define VG468_CTL_DELAY 0x10 /* Card detect debounce */
+#define VG468_CTL_INPACK 0x20 /* Obey INPACK signal? */
+#define VG468_CTL_POLARITY 0x40 /* VCCEN polarity */
+#define VG468_CTL_COMPAT 0x80 /* Compatibility stuff */
+
+#define VG469_CTL_WS_COMPAT 0x04 /* Wait state compatibility */
+#define VG469_CTL_STRETCH 0x10 /* LED stretch */
+
+/* Flags for VG468_TIMER */
+#define VG468_TIMER_ZEROPWR 0x10 /* Zero power control */
+#define VG468_TIMER_SIGEN 0x20 /* Power up */
+#define VG468_TIMER_STATUS 0x40 /* Activity timer status */
+#define VG468_TIMER_RES 0x80 /* Timer resolution */
+#define VG468_TIMER_MASK 0x0f /* Activity timer timeout */
+
+/* Flags for VG468_MISC */
+#define VG468_MISC_GPIO 0x04 /* General-purpose IO */
+#define VG468_MISC_DMAWSB 0x08 /* DMA wait state control */
+#define VG469_MISC_LEDENA 0x10 /* LED enable */
+#define VG468_MISC_VADEMREV 0x40 /* Vadem revision control */
+#define VG468_MISC_UNLOCK 0x80 /* Unique register lock */
+
+/* Flags for VG469_EXT_MODE_A */
+#define VG469_MODE_VPPST 0x03 /* Vpp steering control */
+#define VG469_MODE_INT_SENSE 0x04 /* Internal voltage sense */
+#define VG469_MODE_CABLE 0x08
+#define VG469_MODE_COMPAT 0x10 /* i82365sl B or DF step */
+#define VG469_MODE_TEST 0x20
+#define VG469_MODE_RIO 0x40 /* Steer RIO to INTR? */
+
+/* Flags for VG469_EXT_MODE_B */
+#define VG469_MODE_B_3V 0x01 /* 3.3v for socket B */
+
+/* Data structure for tracking vendor-specific state */
+typedef struct vg46x_state_t {
+ u_char ctl; /* VG468_CTL */
+ u_char ema; /* VG468_EXT_MODE_A */
+} vg46x_state_t;
+
+#endif /* _LINUX_VG468_H */
diff --git a/linux/pcmcia-cs/modules/yenta.h b/linux/pcmcia-cs/modules/yenta.h
new file mode 100644
index 0000000..525d8ec
--- /dev/null
+++ b/linux/pcmcia-cs/modules/yenta.h
@@ -0,0 +1,156 @@
+/*
+ * yenta.h 1.20 2001/08/24 12:15:34
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_YENTA_H
+#define _LINUX_YENTA_H
+
+/* PCI Configuration Registers */
+
+#define PCI_STATUS_CAPLIST 0x10
+#define PCI_CB_CAPABILITY_POINTER 0x14 /* 8 bit */
+#define PCI_CAPABILITY_ID 0x00 /* 8 bit */
+#define PCI_CAPABILITY_PM 0x01
+#define PCI_NEXT_CAPABILITY 0x01 /* 8 bit */
+#define PCI_PM_CAPABILITIES 0x02 /* 16 bit */
+#define PCI_PMCAP_PME_D3COLD 0x8000
+#define PCI_PMCAP_PME_D3HOT 0x4000
+#define PCI_PMCAP_PME_D2 0x2000
+#define PCI_PMCAP_PME_D1 0x1000
+#define PCI_PMCAP_PME_D0 0x0800
+#define PCI_PMCAP_D2_CAP 0x0400
+#define PCI_PMCAP_D1_CAP 0x0200
+#define PCI_PMCAP_DYN_DATA 0x0100
+#define PCI_PMCAP_DSI 0x0020
+#define PCI_PMCAP_AUX_PWR 0x0010
+#define PCI_PMCAP_PMECLK 0x0008
+#define PCI_PMCAP_VERSION_MASK 0x0007
+#define PCI_PM_CONTROL_STATUS 0x04 /* 16 bit */
+#define PCI_PMCS_PME_STATUS 0x8000
+#define PCI_PMCS_DATASCALE_MASK 0x6000
+#define PCI_PMCS_DATASCALE_SHIFT 13
+#define PCI_PMCS_DATASEL_MASK 0x1e00
+#define PCI_PMCS_DATASEL_SHIFT 9
+#define PCI_PMCS_PME_ENABLE 0x0100
+#define PCI_PMCS_PWR_STATE_MASK 0x0003
+#define PCI_PMCS_PWR_STATE_D0 0x0000
+#define PCI_PMCS_PWR_STATE_D1 0x0001
+#define PCI_PMCS_PWR_STATE_D2 0x0002
+#define PCI_PMCS_PWR_STATE_D3 0x0003
+#define PCI_PM_BRIDGE_EXT 0x06 /* 8 bit */
+#define PCI_PM_DATA 0x07 /* 8 bit */
+
+#define CB_PRIMARY_BUS 0x18 /* 8 bit */
+#define CB_CARDBUS_BUS 0x19 /* 8 bit */
+#define CB_SUBORD_BUS 0x1a /* 8 bit */
+#define CB_LATENCY_TIMER 0x1b /* 8 bit */
+
+#define CB_MEM_BASE(m) (0x1c + 8*(m))
+#define CB_MEM_LIMIT(m) (0x20 + 8*(m))
+#define CB_IO_BASE(m) (0x2c + 8*(m))
+#define CB_IO_LIMIT(m) (0x30 + 8*(m))
+
+#define CB_BRIDGE_CONTROL 0x3e /* 16 bit */
+#define CB_BCR_PARITY_ENA 0x0001
+#define CB_BCR_SERR_ENA 0x0002
+#define CB_BCR_ISA_ENA 0x0004
+#define CB_BCR_VGA_ENA 0x0008
+#define CB_BCR_MABORT 0x0020
+#define CB_BCR_CB_RESET 0x0040
+#define CB_BCR_ISA_IRQ 0x0080
+#define CB_BCR_PREFETCH(m) (0x0100 << (m))
+#define CB_BCR_WRITE_POST 0x0400
+
+#define CB_LEGACY_MODE_BASE 0x44
+
+/* Memory mapped registers */
+
+#define CB_SOCKET_EVENT 0x0000
+#define CB_SE_CSTSCHG 0x00000001
+#define CB_SE_CCD 0x00000006
+#define CB_SE_CCD1 0x00000002
+#define CB_SE_CCD2 0x00000004
+#define CB_SE_PWRCYCLE 0x00000008
+
+#define CB_SOCKET_MASK 0x0004
+#define CB_SM_CSTSCHG 0x00000001
+#define CB_SM_CCD 0x00000006
+#define CB_SM_PWRCYCLE 0x00000008
+
+#define CB_SOCKET_STATE 0x0008
+#define CB_SS_CSTSCHG 0x00000001
+#define CB_SS_CCD 0x00000006
+#define CB_SS_CCD1 0x00000002
+#define CB_SS_CCD2 0x00000004
+#define CB_SS_PWRCYCLE 0x00000008
+#define CB_SS_16BIT 0x00000010
+#define CB_SS_32BIT 0x00000020
+#define CB_SS_CINT 0x00000040
+#define CB_SS_BADCARD 0x00000080
+#define CB_SS_DATALOST 0x00000100
+#define CB_SS_BADVCC 0x00000200
+#define CB_SS_5VCARD 0x00000400
+#define CB_SS_3VCARD 0x00000800
+#define CB_SS_XVCARD 0x00001000
+#define CB_SS_YVCARD 0x00002000
+#define CB_SS_VSENSE 0x00003c86
+#define CB_SS_5VSOCKET 0x10000000
+#define CB_SS_3VSOCKET 0x20000000
+#define CB_SS_XVSOCKET 0x40000000
+#define CB_SS_YVSOCKET 0x80000000
+
+#define CB_SOCKET_FORCE 0x000c
+#define CB_SF_CVSTEST 0x00004000
+
+#define CB_SOCKET_CONTROL 0x0010
+#define CB_SC_VPP_MASK 0x00000007
+#define CB_SC_VPP_OFF 0x00000000
+#define CB_SC_VPP_12V 0x00000001
+#define CB_SC_VPP_5V 0x00000002
+#define CB_SC_VPP_3V 0x00000003
+#define CB_SC_VPP_XV 0x00000004
+#define CB_SC_VPP_YV 0x00000005
+#define CB_SC_VCC_MASK 0x00000070
+#define CB_SC_VCC_OFF 0x00000000
+#define CB_SC_VCC_5V 0x00000020
+#define CB_SC_VCC_3V 0x00000030
+#define CB_SC_VCC_XV 0x00000040
+#define CB_SC_VCC_YV 0x00000050
+#define CB_SC_CCLK_STOP 0x00000080
+
+#define CB_SOCKET_POWER 0x0020
+#define CB_SP_CLK_CTRL 0x00000001
+#define CB_SP_CLK_CTRL_ENA 0x00010000
+#define CB_SP_CLK_MODE 0x01000000
+#define CB_SP_ACCESS 0x02000000
+
+/* Address bits 31..24 for memory windows for 16-bit cards,
+ accessable only by memory mapping the 16-bit register set */
+#define CB_MEM_PAGE(map) (0x40 + (map))
+
+#endif /* _LINUX_YENTA_H */
diff --git a/linux/pcmcia-cs/wireless/hermes.c b/linux/pcmcia-cs/wireless/hermes.c
new file mode 100644
index 0000000..d5ec3de
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/hermes.c
@@ -0,0 +1,552 @@
+/* hermes.c
+ *
+ * Driver core for the "Hermes" wireless MAC controller, as used in
+ * the Lucent Orinoco and Cabletron RoamAbout cards. It should also
+ * work on the hfa3841 and hfa3842 MAC controller chips used in the
+ * Prism II chipsets.
+ *
+ * This is not a complete driver, just low-level access routines for
+ * the MAC controller itself.
+ *
+ * Based on the prism2 driver from Absolute Value Systems' linux-wlan
+ * project, the Linux wvlan_cs driver, Lucent's HCF-Light
+ * (wvlan_hcf.c) library, and the NetBSD wireless driver (in no
+ * particular order).
+ *
+ * Copyright (C) 2000, David Gibson, Linuxcare Australia <hermes@gibson.dropbear.id.au>
+ * Copyright (C) 2001, David Gibson, IBM <hermes@gibson.dropbear.id.au>
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/errno.h>
+
+#include "hermes.h"
+
+MODULE_DESCRIPTION("Low-level driver helper for Lucent Hermes chipset and Prism II HFA384x wireless MAC controller");
+MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual MPL/GPL");
+#endif
+
+/* These are maximum timeouts. Most often, card wil react much faster */
+#define CMD_BUSY_TIMEOUT (100) /* In iterations of ~1us */
+#define CMD_INIT_TIMEOUT (50000) /* in iterations of ~10us */
+#define CMD_COMPL_TIMEOUT (20000) /* in iterations of ~10us */
+#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */
+
+/*
+ * Debugging helpers
+ */
+
+#define IO_TYPE(hw) ((hw)->io_space ? "IO " : "MEM ")
+#define DMSG(stuff...) do {printk(KERN_DEBUG "hermes @ %s0x%x: " , IO_TYPE(hw), hw->iobase); \
+ printk(stuff);} while (0)
+
+#undef HERMES_DEBUG
+#ifdef HERMES_DEBUG
+#include <stdarg.h>
+
+#define DEBUG(lvl, stuff...) if ( (lvl) <= HERMES_DEBUG) DMSG(stuff)
+
+#else /* ! HERMES_DEBUG */
+
+#define DEBUG(lvl, stuff...) do { } while (0)
+
+#endif /* ! HERMES_DEBUG */
+
+
+/*
+ * Internal functions
+ */
+
+/* Issue a command to the chip. Waiting for it to complete is the caller's
+ problem.
+
+ Returns -EBUSY if the command register is busy, 0 on success.
+
+ Callable from any context.
+*/
+static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0)
+{
+ int k = CMD_BUSY_TIMEOUT;
+ u16 reg;
+
+ /* First wait for the command register to unbusy */
+ reg = hermes_read_regn(hw, CMD);
+ while ( (reg & HERMES_CMD_BUSY) && k ) {
+ k--;
+ udelay(1);
+ reg = hermes_read_regn(hw, CMD);
+ }
+ if (reg & HERMES_CMD_BUSY) {
+ return -EBUSY;
+ }
+
+ hermes_write_regn(hw, PARAM2, 0);
+ hermes_write_regn(hw, PARAM1, 0);
+ hermes_write_regn(hw, PARAM0, param0);
+ hermes_write_regn(hw, CMD, cmd);
+
+ return 0;
+}
+
+/*
+ * Function definitions
+ */
+
+void hermes_struct_init(hermes_t *hw, ulong address,
+ int io_space, int reg_spacing)
+{
+ hw->iobase = address;
+ hw->io_space = io_space;
+ hw->reg_spacing = reg_spacing;
+ hw->inten = 0x0;
+
+#ifdef HERMES_DEBUG_BUFFER
+ hw->dbufp = 0;
+ memset(&hw->dbuf, 0xff, sizeof(hw->dbuf));
+ memset(&hw->profile, 0, sizeof(hw->profile));
+#endif
+}
+
+int hermes_init(hermes_t *hw)
+{
+ u16 status, reg;
+ int err = 0;
+ int k;
+
+ /* We don't want to be interrupted while resetting the chipset */
+ hw->inten = 0x0;
+ hermes_write_regn(hw, INTEN, 0);
+ hermes_write_regn(hw, EVACK, 0xffff);
+
+ /* Normally it's a "can't happen" for the command register to
+ be busy when we go to issue a command because we are
+ serializing all commands. However we want to have some
+ chance of resetting the card even if it gets into a stupid
+ state, so we actually wait to see if the command register
+ will unbusy itself here. */
+ k = CMD_BUSY_TIMEOUT;
+ reg = hermes_read_regn(hw, CMD);
+ while (k && (reg & HERMES_CMD_BUSY)) {
+ if (reg == 0xffff) /* Special case - the card has probably been removed,
+ so don't wait for the timeout */
+ return -ENODEV;
+
+ k--;
+ udelay(1);
+ reg = hermes_read_regn(hw, CMD);
+ }
+
+ /* No need to explicitly handle the timeout - if we've timed
+ out hermes_issue_cmd() will probably return -EBUSY below */
+
+ /* According to the documentation, EVSTAT may contain
+ obsolete event occurrence information. We have to acknowledge
+ it by writing EVACK. */
+ reg = hermes_read_regn(hw, EVSTAT);
+ hermes_write_regn(hw, EVACK, reg);
+
+ /* We don't use hermes_docmd_wait here, because the reset wipes
+ the magic constant in SWSUPPORT0 away, and it gets confused */
+ err = hermes_issue_cmd(hw, HERMES_CMD_INIT, 0);
+ if (err)
+ return err;
+
+ reg = hermes_read_regn(hw, EVSTAT);
+ k = CMD_INIT_TIMEOUT;
+ while ( (! (reg & HERMES_EV_CMD)) && k) {
+ k--;
+ udelay(10);
+ reg = hermes_read_regn(hw, EVSTAT);
+ }
+
+ hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
+
+ if (! hermes_present(hw)) {
+ DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
+ hw->iobase);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (! (reg & HERMES_EV_CMD)) {
+ printk(KERN_ERR "hermes @ %s0x%lx: "
+ "Timeout waiting for card to reset (reg=0x%04x)!\n",
+ IO_TYPE(hw), hw->iobase, reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = hermes_read_regn(hw, STATUS);
+
+ hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
+
+ if (status & HERMES_STATUS_RESULT)
+ err = -EIO;
+
+ out:
+ return err;
+}
+
+/* Issue a command to the chip, and (busy!) wait for it to
+ * complete.
+ *
+ * Returns: < 0 on internal error, 0 on success, > 0 on error returned by the firmware
+ *
+ * Callable from any context, but locking is your problem. */
+int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ hermes_response_t *resp)
+{
+ int err;
+ int k;
+ u16 reg;
+ u16 status;
+
+ err = hermes_issue_cmd(hw, cmd, parm0);
+ if (err) {
+ if (! hermes_present(hw)) {
+ printk(KERN_WARNING "hermes @ %s0x%lx: "
+ "Card removed while issuing command.\n",
+ IO_TYPE(hw), hw->iobase);
+ err = -ENODEV;
+ } else
+ printk(KERN_ERR "hermes @ %s0x%lx: Error %d issuing command.\n",
+ IO_TYPE(hw), hw->iobase, err);
+ goto out;
+ }
+
+ reg = hermes_read_regn(hw, EVSTAT);
+ k = CMD_COMPL_TIMEOUT;
+ while ( (! (reg & HERMES_EV_CMD)) && k) {
+ k--;
+ udelay(10);
+ reg = hermes_read_regn(hw, EVSTAT);
+ }
+
+ if (! hermes_present(hw)) {
+ printk(KERN_WARNING "hermes @ %s0x%lx: "
+ "Card removed while waiting for command completion.\n",
+ IO_TYPE(hw), hw->iobase);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (! (reg & HERMES_EV_CMD)) {
+ printk(KERN_ERR "hermes @ %s0x%lx: "
+ "Timeout waiting for command completion.\n",
+ IO_TYPE(hw), hw->iobase);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = hermes_read_regn(hw, STATUS);
+ if (resp) {
+ resp->status = status;
+ resp->resp0 = hermes_read_regn(hw, RESP0);
+ resp->resp1 = hermes_read_regn(hw, RESP1);
+ resp->resp2 = hermes_read_regn(hw, RESP2);
+ }
+
+ hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
+
+ if (status & HERMES_STATUS_RESULT)
+ err = -EIO;
+
+ out:
+ return err;
+}
+
+int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
+{
+ int err = 0;
+ int k;
+ u16 reg;
+
+ if ( (size < HERMES_ALLOC_LEN_MIN) || (size > HERMES_ALLOC_LEN_MAX) )
+ return -EINVAL;
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_ALLOC, size, NULL);
+ if (err) {
+ return err;
+ }
+
+ reg = hermes_read_regn(hw, EVSTAT);
+ k = ALLOC_COMPL_TIMEOUT;
+ while ( (! (reg & HERMES_EV_ALLOC)) && k) {
+ k--;
+ udelay(10);
+ reg = hermes_read_regn(hw, EVSTAT);
+ }
+
+ if (! hermes_present(hw)) {
+ printk(KERN_WARNING "hermes @ %s0x%lx: "
+ "Card removed waiting for frame allocation.\n",
+ IO_TYPE(hw), hw->iobase);
+ return -ENODEV;
+ }
+
+ if (! (reg & HERMES_EV_ALLOC)) {
+ printk(KERN_ERR "hermes @ %s0x%lx: "
+ "Timeout waiting for frame allocation\n",
+ IO_TYPE(hw), hw->iobase);
+ return -ETIMEDOUT;
+ }
+
+ *fid = hermes_read_regn(hw, ALLOCFID);
+ hermes_write_regn(hw, EVACK, HERMES_EV_ALLOC);
+
+ return 0;
+}
+
+
+/* Set up a BAP to read a particular chunk of data from card's internal buffer.
+ *
+ * Returns: < 0 on internal failure (errno), 0 on success, >0 on error
+ * from firmware
+ *
+ * Callable from any context */
+static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
+{
+ int sreg = bap ? HERMES_SELECT1 : HERMES_SELECT0;
+ int oreg = bap ? HERMES_OFFSET1 : HERMES_OFFSET0;
+ int k;
+ u16 reg;
+
+ /* Paranoia.. */
+ if ( (offset > HERMES_BAP_OFFSET_MAX) || (offset % 2) )
+ return -EINVAL;
+
+ k = HERMES_BAP_BUSY_TIMEOUT;
+ reg = hermes_read_reg(hw, oreg);
+ while ((reg & HERMES_OFFSET_BUSY) && k) {
+ k--;
+ udelay(1);
+ reg = hermes_read_reg(hw, oreg);
+ }
+
+#ifdef HERMES_DEBUG_BUFFER
+ hw->profile[HERMES_BAP_BUSY_TIMEOUT - k]++;
+
+ if (k < HERMES_BAP_BUSY_TIMEOUT) {
+ struct hermes_debug_entry *e =
+ &hw->dbuf[(hw->dbufp++) % HERMES_DEBUG_BUFSIZE];
+ e->bap = bap;
+ e->id = id;
+ e->offset = offset;
+ e->cycles = HERMES_BAP_BUSY_TIMEOUT - k;
+ }
+#endif
+
+ if (reg & HERMES_OFFSET_BUSY)
+ return -ETIMEDOUT;
+
+ /* Now we actually set up the transfer */
+ hermes_write_reg(hw, sreg, id);
+ hermes_write_reg(hw, oreg, offset);
+
+ /* Wait for the BAP to be ready */
+ k = HERMES_BAP_BUSY_TIMEOUT;
+ reg = hermes_read_reg(hw, oreg);
+ while ( (reg & (HERMES_OFFSET_BUSY | HERMES_OFFSET_ERR)) && k) {
+ k--;
+ udelay(1);
+ reg = hermes_read_reg(hw, oreg);
+ }
+
+ if (reg & HERMES_OFFSET_BUSY) {
+ return -ETIMEDOUT;
+ }
+
+ if (reg & HERMES_OFFSET_ERR) {
+ return -EIO;
+ }
+
+
+ return 0;
+}
+
+/* Read a block of data from the chip's buffer, via the
+ * BAP. Synchronization/serialization is the caller's problem. len
+ * must be even.
+ *
+ * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
+ */
+int hermes_bap_pread(hermes_t *hw, int bap, void *buf, unsigned len,
+ u16 id, u16 offset)
+{
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ int err = 0;
+
+ if ( (len < 0) || (len % 2) )
+ return -EINVAL;
+
+ err = hermes_bap_seek(hw, bap, id, offset);
+ if (err)
+ goto out;
+
+ /* Actually do the transfer */
+ hermes_read_words(hw, dreg, buf, len/2);
+
+ out:
+ return err;
+}
+
+/* Write a block of data to the chip's buffer, via the
+ * BAP. Synchronization/serialization is the caller's problem. len
+ * must be even.
+ *
+ * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
+ */
+int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
+ u16 id, u16 offset)
+{
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ int err = 0;
+
+ if ( (len < 0) || (len % 2) )
+ return -EINVAL;
+
+ err = hermes_bap_seek(hw, bap, id, offset);
+ if (err)
+ goto out;
+
+ /* Actually do the transfer */
+ hermes_write_words(hw, dreg, buf, len/2);
+
+ out:
+ return err;
+}
+
+/* Read a Length-Type-Value record from the card.
+ *
+ * If length is NULL, we ignore the length read from the card, and
+ * read the entire buffer regardless. This is useful because some of
+ * the configuration records appear to have incorrect lengths in
+ * practice.
+ *
+ * Callable from user or bh context. */
+int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
+ u16 *length, void *buf)
+{
+ int err = 0;
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ u16 rlength, rtype;
+ unsigned nwords;
+
+ if ( (bufsize < 0) || (bufsize % 2) )
+ return -EINVAL;
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS, rid, NULL);
+ if (err)
+ return err;
+
+ err = hermes_bap_seek(hw, bap, rid, 0);
+ if (err)
+ return err;
+
+ rlength = hermes_read_reg(hw, dreg);
+
+ if (! rlength)
+ return -ENOENT;
+
+ rtype = hermes_read_reg(hw, dreg);
+
+ if (length)
+ *length = rlength;
+
+ if (rtype != rid)
+ printk(KERN_WARNING "hermes @ %s0x%lx: "
+ "hermes_read_ltv(): rid (0x%04x) does not match type (0x%04x)\n",
+ IO_TYPE(hw), hw->iobase, rid, rtype);
+ if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize)
+ printk(KERN_WARNING "hermes @ %s0x%lx: "
+ "Truncating LTV record from %d to %d bytes. "
+ "(rid=0x%04x, len=0x%04x)\n",
+ IO_TYPE(hw), hw->iobase,
+ HERMES_RECLEN_TO_BYTES(rlength), bufsize, rid, rlength);
+
+ nwords = min((unsigned)rlength - 1, bufsize / 2);
+ hermes_read_words(hw, dreg, buf, nwords);
+
+ return 0;
+}
+
+int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *value)
+{
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ int err = 0;
+ unsigned count;
+
+ if (length == 0)
+ return -EINVAL;
+
+ err = hermes_bap_seek(hw, bap, rid, 0);
+ if (err)
+ return err;
+
+ hermes_write_reg(hw, dreg, length);
+ hermes_write_reg(hw, dreg, rid);
+
+ count = length - 1;
+
+ hermes_write_words(hw, dreg, value, count);
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
+ rid, NULL);
+
+ return err;
+}
+
+EXPORT_SYMBOL(hermes_struct_init);
+EXPORT_SYMBOL(hermes_init);
+EXPORT_SYMBOL(hermes_docmd_wait);
+EXPORT_SYMBOL(hermes_allocate);
+
+EXPORT_SYMBOL(hermes_bap_pread);
+EXPORT_SYMBOL(hermes_bap_pwrite);
+EXPORT_SYMBOL(hermes_read_ltv);
+EXPORT_SYMBOL(hermes_write_ltv);
+
+static int __init init_hermes(void)
+{
+ return 0;
+}
+
+static void __exit exit_hermes(void)
+{
+}
+
+module_init(init_hermes);
+module_exit(exit_hermes);
diff --git a/linux/pcmcia-cs/wireless/hermes.h b/linux/pcmcia-cs/wireless/hermes.h
new file mode 100644
index 0000000..b43fa0c
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/hermes.h
@@ -0,0 +1,456 @@
+/* hermes.h
+ *
+ * Driver core for the "Hermes" wireless MAC controller, as used in
+ * the Lucent Orinoco and Cabletron RoamAbout cards. It should also
+ * work on the hfa3841 and hfa3842 MAC controller chips used in the
+ * Prism I & II chipsets.
+ *
+ * This is not a complete driver, just low-level access routines for
+ * the MAC controller itself.
+ *
+ * Based on the prism2 driver from Absolute Value Systems' linux-wlan
+ * project, the Linux wvlan_cs driver, Lucent's HCF-Light
+ * (wvlan_hcf.c) library, and the NetBSD wireless driver.
+ *
+ * Copyright (C) 2000, David Gibson, Linuxcare Australia <hermes@gibson.dropbear.id.au>
+ *
+ * Portions taken from hfa384x.h, Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ *
+ * This file distributed under the GPL, version 2.
+ */
+
+#ifndef _HERMES_H
+#define _HERMES_H
+
+/* Notes on locking:
+ *
+ * As a module of low level hardware access routines, there is no
+ * locking. Users of this module should ensure that they serialize
+ * access to the hermes_t structure, and to the hardware
+*/
+
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
+
+/*
+ * Limits and constants
+ */
+#define HERMES_ALLOC_LEN_MIN (4)
+#define HERMES_ALLOC_LEN_MAX (2400)
+#define HERMES_LTV_LEN_MAX (34)
+#define HERMES_BAP_DATALEN_MAX (4096)
+#define HERMES_BAP_OFFSET_MAX (4096)
+#define HERMES_PORTID_MAX (7)
+#define HERMES_NUMPORTS_MAX (HERMES_PORTID_MAX+1)
+#define HERMES_PDR_LEN_MAX (260) /* in bytes, from EK */
+#define HERMES_PDA_RECS_MAX (200) /* a guess */
+#define HERMES_PDA_LEN_MAX (1024) /* in bytes, from EK */
+#define HERMES_SCANRESULT_MAX (35)
+#define HERMES_CHINFORESULT_MAX (8)
+#define HERMES_MAX_MULTICAST (16)
+#define HERMES_MAGIC (0x7d1f)
+
+/*
+ * Hermes register offsets
+ */
+#define HERMES_CMD (0x00)
+#define HERMES_PARAM0 (0x02)
+#define HERMES_PARAM1 (0x04)
+#define HERMES_PARAM2 (0x06)
+#define HERMES_STATUS (0x08)
+#define HERMES_RESP0 (0x0A)
+#define HERMES_RESP1 (0x0C)
+#define HERMES_RESP2 (0x0E)
+#define HERMES_INFOFID (0x10)
+#define HERMES_RXFID (0x20)
+#define HERMES_ALLOCFID (0x22)
+#define HERMES_TXCOMPLFID (0x24)
+#define HERMES_SELECT0 (0x18)
+#define HERMES_OFFSET0 (0x1C)
+#define HERMES_DATA0 (0x36)
+#define HERMES_SELECT1 (0x1A)
+#define HERMES_OFFSET1 (0x1E)
+#define HERMES_DATA1 (0x38)
+#define HERMES_EVSTAT (0x30)
+#define HERMES_INTEN (0x32)
+#define HERMES_EVACK (0x34)
+#define HERMES_CONTROL (0x14)
+#define HERMES_SWSUPPORT0 (0x28)
+#define HERMES_SWSUPPORT1 (0x2A)
+#define HERMES_SWSUPPORT2 (0x2C)
+#define HERMES_AUXPAGE (0x3A)
+#define HERMES_AUXOFFSET (0x3C)
+#define HERMES_AUXDATA (0x3E)
+
+/*
+ * CMD register bitmasks
+ */
+#define HERMES_CMD_BUSY (0x8000)
+#define HERMES_CMD_AINFO (0x7f00)
+#define HERMES_CMD_MACPORT (0x0700)
+#define HERMES_CMD_RECL (0x0100)
+#define HERMES_CMD_WRITE (0x0100)
+#define HERMES_CMD_PROGMODE (0x0300)
+#define HERMES_CMD_CMDCODE (0x003f)
+
+/*
+ * STATUS register bitmasks
+ */
+#define HERMES_STATUS_RESULT (0x7f00)
+#define HERMES_STATUS_CMDCODE (0x003f)
+
+/*
+ * OFFSET register bitmasks
+ */
+#define HERMES_OFFSET_BUSY (0x8000)
+#define HERMES_OFFSET_ERR (0x4000)
+#define HERMES_OFFSET_DATAOFF (0x0ffe)
+
+/*
+ * Event register bitmasks (INTEN, EVSTAT, EVACK)
+ */
+#define HERMES_EV_TICK (0x8000)
+#define HERMES_EV_WTERR (0x4000)
+#define HERMES_EV_INFDROP (0x2000)
+#define HERMES_EV_INFO (0x0080)
+#define HERMES_EV_DTIM (0x0020)
+#define HERMES_EV_CMD (0x0010)
+#define HERMES_EV_ALLOC (0x0008)
+#define HERMES_EV_TXEXC (0x0004)
+#define HERMES_EV_TX (0x0002)
+#define HERMES_EV_RX (0x0001)
+
+/*
+ * Command codes
+ */
+/*--- Controller Commands --------------------------*/
+#define HERMES_CMD_INIT (0x0000)
+#define HERMES_CMD_ENABLE (0x0001)
+#define HERMES_CMD_DISABLE (0x0002)
+#define HERMES_CMD_DIAG (0x0003)
+
+/*--- Buffer Mgmt Commands --------------------------*/
+#define HERMES_CMD_ALLOC (0x000A)
+#define HERMES_CMD_TX (0x000B)
+#define HERMES_CMD_CLRPRST (0x0012)
+
+/*--- Regulate Commands --------------------------*/
+#define HERMES_CMD_NOTIFY (0x0010)
+#define HERMES_CMD_INQUIRE (0x0011)
+
+/*--- Configure Commands --------------------------*/
+#define HERMES_CMD_ACCESS (0x0021)
+#define HERMES_CMD_DOWNLD (0x0022)
+
+/*--- Debugging Commands -----------------------------*/
+#define HERMES_CMD_MONITOR (0x0038)
+#define HERMES_MONITOR_ENABLE (0x000b)
+#define HERMES_MONITOR_DISABLE (0x000f)
+
+/*
+ * Frame structures and constants
+ */
+
+#define HERMES_DESCRIPTOR_OFFSET 0
+#define HERMES_802_11_OFFSET (14)
+#define HERMES_802_3_OFFSET (14+32)
+#define HERMES_802_2_OFFSET (14+32+14)
+
+struct hermes_rx_descriptor {
+ u16 status;
+ u32 time;
+ u8 silence;
+ u8 signal;
+ u8 rate;
+ u8 rxflow;
+ u32 reserved;
+} __attribute__ ((packed));
+
+#define HERMES_RXSTAT_ERR (0x0003)
+#define HERMES_RXSTAT_BADCRC (0x0001)
+#define HERMES_RXSTAT_UNDECRYPTABLE (0x0002)
+#define HERMES_RXSTAT_MACPORT (0x0700)
+#define HERMES_RXSTAT_PCF (0x1000) /* Frame was received in CF period */
+#define HERMES_RXSTAT_MSGTYPE (0xE000)
+#define HERMES_RXSTAT_1042 (0x2000) /* RFC-1042 frame */
+#define HERMES_RXSTAT_TUNNEL (0x4000) /* bridge-tunnel encoded frame */
+#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */
+
+struct hermes_tx_descriptor {
+ u16 status;
+ u16 reserved1;
+ u16 reserved2;
+ u32 sw_support;
+ u8 retry_count;
+ u8 tx_rate;
+ u16 tx_control;
+} __attribute__ ((packed));
+
+#define HERMES_TXSTAT_RETRYERR (0x0001)
+#define HERMES_TXSTAT_AGEDERR (0x0002)
+#define HERMES_TXSTAT_DISCON (0x0004)
+#define HERMES_TXSTAT_FORMERR (0x0008)
+
+#define HERMES_TXCTRL_TX_OK (0x0002) /* ?? interrupt on Tx complete */
+#define HERMES_TXCTRL_TX_EX (0x0004) /* ?? interrupt on Tx exception */
+#define HERMES_TXCTRL_802_11 (0x0008) /* We supply 802.11 header */
+#define HERMES_TXCTRL_ALT_RTRY (0x0020)
+
+/* Inquiry constants and data types */
+
+#define HERMES_INQ_TALLIES (0xF100)
+#define HERMES_INQ_SCAN (0xF101)
+#define HERMES_INQ_LINKSTATUS (0xF200)
+
+struct hermes_tallies_frame {
+ u16 TxUnicastFrames;
+ u16 TxMulticastFrames;
+ u16 TxFragments;
+ u16 TxUnicastOctets;
+ u16 TxMulticastOctets;
+ u16 TxDeferredTransmissions;
+ u16 TxSingleRetryFrames;
+ u16 TxMultipleRetryFrames;
+ u16 TxRetryLimitExceeded;
+ u16 TxDiscards;
+ u16 RxUnicastFrames;
+ u16 RxMulticastFrames;
+ u16 RxFragments;
+ u16 RxUnicastOctets;
+ u16 RxMulticastOctets;
+ u16 RxFCSErrors;
+ u16 RxDiscards_NoBuffer;
+ u16 TxDiscardsWrongSA;
+ u16 RxWEPUndecryptable;
+ u16 RxMsgInMsgFragments;
+ u16 RxMsgInBadMsgFragments;
+ /* Those last are probably not available in very old firmwares */
+ u16 RxDiscards_WEPICVError;
+ u16 RxDiscards_WEPExcluded;
+} __attribute__ ((packed));
+
+/* Grabbed from wlan-ng - Thanks Mark... - Jean II
+ * This is the result of a scan inquiry command */
+/* Structure describing info about an Access Point */
+struct hermes_scan_apinfo {
+ u16 channel; /* Channel where the AP sits */
+ u16 noise; /* Noise level */
+ u16 level; /* Signal level */
+ u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
+ u16 beacon_interv; /* Beacon interval ? */
+ u16 capabilities; /* Capabilities ? */
+ u8 essid[32]; /* ESSID of the network */
+ u8 rates[10]; /* Bit rate supported */
+ u16 proberesp_rate; /* ???? */
+} __attribute__ ((packed));
+/* Container */
+struct hermes_scan_frame {
+ u16 rsvd; /* ??? */
+ u16 scanreason; /* ??? */
+ struct hermes_scan_apinfo aps[35]; /* Scan result */
+} __attribute__ ((packed));
+#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
+#define HERMES_LINKSTATUS_CONNECTED (0x0001)
+#define HERMES_LINKSTATUS_DISCONNECTED (0x0002)
+#define HERMES_LINKSTATUS_AP_CHANGE (0x0003)
+#define HERMES_LINKSTATUS_AP_OUT_OF_RANGE (0x0004)
+#define HERMES_LINKSTATUS_AP_IN_RANGE (0x0005)
+#define HERMES_LINKSTATUS_ASSOC_FAILED (0x0006)
+
+struct hermes_linkstatus {
+ u16 linkstatus; /* Link status */
+} __attribute__ ((packed));
+
+// #define HERMES_DEBUG_BUFFER 1
+#define HERMES_DEBUG_BUFSIZE 4096
+struct hermes_debug_entry {
+ int bap;
+ u16 id, offset;
+ int cycles;
+};
+
+#ifdef __KERNEL__
+
+/* Timeouts */
+#define HERMES_BAP_BUSY_TIMEOUT (500) /* In iterations of ~1us */
+
+/* Basic control structure */
+typedef struct hermes {
+ unsigned long iobase;
+ int io_space; /* 1 if we IO-mapped IO, 0 for memory-mapped IO? */
+#define HERMES_IO 1
+#define HERMES_MEM 0
+ int reg_spacing;
+#define HERMES_16BIT_REGSPACING 0
+#define HERMES_32BIT_REGSPACING 1
+
+ u16 inten; /* Which interrupts should be enabled? */
+
+#ifdef HERMES_DEBUG_BUFFER
+ struct hermes_debug_entry dbuf[HERMES_DEBUG_BUFSIZE];
+ unsigned long dbufp;
+ unsigned long profile[HERMES_BAP_BUSY_TIMEOUT+1];
+#endif
+} hermes_t;
+
+typedef struct hermes_response {
+ u16 status, resp0, resp1, resp2;
+} hermes_response_t;
+
+/* Register access convenience macros */
+#define hermes_read_reg(hw, off) ((hw)->io_space ? \
+ inw((hw)->iobase + ( (off) << (hw)->reg_spacing )) : \
+ readw((hw)->iobase + ( (off) << (hw)->reg_spacing )))
+#define hermes_write_reg(hw, off, val) ((hw)->io_space ? \
+ outw_p((val), (hw)->iobase + ( (off) << (hw)->reg_spacing )) : \
+ writew((val), (hw)->iobase + ( (off) << (hw)->reg_spacing )))
+
+#define hermes_read_regn(hw, name) (hermes_read_reg((hw), HERMES_##name))
+#define hermes_write_regn(hw, name, val) (hermes_write_reg((hw), HERMES_##name, (val)))
+
+/* Function prototypes */
+void hermes_struct_init(hermes_t *hw, ulong address, int io_space, int reg_spacing);
+int hermes_init(hermes_t *hw);
+int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0, hermes_response_t *resp);
+int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
+
+int hermes_bap_pread(hermes_t *hw, int bap, void *buf, unsigned len,
+ u16 id, u16 offset);
+int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
+ u16 id, u16 offset);
+int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
+ u16 *length, void *buf);
+int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *value);
+
+/* Inline functions */
+
+static inline int hermes_present(hermes_t *hw)
+{
+ return hermes_read_regn(hw, SWSUPPORT0) == HERMES_MAGIC;
+}
+
+static inline void hermes_set_irqmask(hermes_t *hw, u16 events)
+{
+ hw->inten = events;
+ hermes_write_regn(hw, INTEN, events);
+}
+
+static inline int hermes_enable_port(hermes_t *hw, int port)
+{
+ return hermes_docmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
+ 0, NULL);
+}
+
+static inline int hermes_disable_port(hermes_t *hw, int port)
+{
+ return hermes_docmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
+ 0, NULL);
+}
+
+/* Initiate an INQUIRE command (tallies or scan). The result will come as an
+ * information frame in __orinoco_ev_info() */
+static inline int hermes_inquire(hermes_t *hw, u16 rid)
+{
+ return hermes_docmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
+}
+
+#define HERMES_BYTES_TO_RECLEN(n) ( (((n)+1)/2) + 1 )
+#define HERMES_RECLEN_TO_BYTES(n) ( ((n)-1) * 2 )
+
+/* Note that for the next two, the count is in 16-bit words, not bytes */
+static inline void hermes_read_words(struct hermes *hw, int off, void *buf, unsigned count)
+{
+ off = off << hw->reg_spacing;;
+
+ if (hw->io_space) {
+ insw(hw->iobase + off, buf, count);
+ } else {
+ unsigned i;
+ u16 *p;
+
+ /* This needs to *not* byteswap (like insw()) but
+ * readw() does byteswap hence the conversion. I hope
+ * gcc is smart enough to fold away the two swaps on
+ * big-endian platforms. */
+ for (i = 0, p = buf; i < count; i++) {
+ *p++ = cpu_to_le16(readw(hw->iobase + off));
+ }
+ }
+}
+
+static inline void hermes_write_words(struct hermes *hw, int off, const void *buf, unsigned count)
+{
+ off = off << hw->reg_spacing;;
+
+ if (hw->io_space) {
+ outsw(hw->iobase + off, buf, count);
+ } else {
+ unsigned i;
+ const u16 *p;
+
+ /* This needs to *not* byteswap (like outsw()) but
+ * writew() does byteswap hence the conversion. I
+ * hope gcc is smart enough to fold away the two swaps
+ * on big-endian platforms. */
+ for (i = 0, p = buf; i < count; i++) {
+ writew(le16_to_cpu(*p++), hw->iobase + off);
+ }
+ }
+}
+
+static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count)
+{
+ unsigned i;
+
+ off = off << hw->reg_spacing;;
+
+ if (hw->io_space) {
+ for (i = 0; i < count; i++)
+ outw(0, hw->iobase + off);
+ } else {
+ for (i = 0; i < count; i++)
+ writew(0, hw->iobase + off);
+ }
+}
+
+#define HERMES_READ_RECORD(hw, bap, rid, buf) \
+ (hermes_read_ltv((hw),(bap),(rid), sizeof(*buf), NULL, (buf)))
+#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
+ (hermes_write_ltv((hw),(bap),(rid),HERMES_BYTES_TO_RECLEN(sizeof(*buf)),(buf)))
+
+static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
+{
+ u16 rec;
+ int err;
+
+ err = HERMES_READ_RECORD(hw, bap, rid, &rec);
+ *word = le16_to_cpu(rec);
+ return err;
+}
+
+static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word)
+{
+ u16 rec = cpu_to_le16(word);
+ return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
+}
+
+#else /* ! __KERNEL__ */
+
+/* These are provided for the benefit of userspace drivers and testing programs
+ which use ioperm() or iopl() */
+
+#define hermes_read_reg(base, off) (inw((base) + (off)))
+#define hermes_write_reg(base, off, val) (outw((val), (base) + (off)))
+
+#define hermes_read_regn(base, name) (hermes_read_reg((base), HERMES_##name))
+#define hermes_write_regn(base, name, val) (hermes_write_reg((base), HERMES_##name, (val)))
+
+/* Note that for the next two, the count is in 16-bit words, not bytes */
+#define hermes_read_data(base, off, buf, count) (insw((base) + (off), (buf), (count)))
+#define hermes_write_data(base, off, buf, count) (outsw((base) + (off), (buf), (count)))
+
+#endif /* ! __KERNEL__ */
+
+#endif /* _HERMES_H */
diff --git a/linux/pcmcia-cs/wireless/hermes_rid.h b/linux/pcmcia-cs/wireless/hermes_rid.h
new file mode 100644
index 0000000..761c542
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/hermes_rid.h
@@ -0,0 +1,153 @@
+#ifndef _HERMES_RID_H
+#define _HERMES_RID_H
+
+/*
+ * Configuration RIDs
+ */
+#define HERMES_RID_CNFPORTTYPE 0xFC00 /* used */
+#define HERMES_RID_CNFOWNMACADDR 0xFC01 /* used */
+#define HERMES_RID_CNFDESIREDSSID 0xFC02 /* used */
+#define HERMES_RID_CNFOWNCHANNEL 0xFC03 /* used */
+#define HERMES_RID_CNFOWNSSID 0xFC04 /* used */
+#define HERMES_RID_CNFOWNATIMWINDOW 0xFC05
+#define HERMES_RID_CNFSYSTEMSCALE 0xFC06 /* used */
+#define HERMES_RID_CNFMAXDATALEN 0xFC07
+#define HERMES_RID_CNFWDSADDRESS 0xFC08
+#define HERMES_RID_CNFPMENABLED 0xFC09 /* used */
+#define HERMES_RID_CNFPMEPS 0xFC0A
+#define HERMES_RID_CNFMULTICASTRECEIVE 0xFC0B /* used */
+#define HERMES_RID_CNFMAXSLEEPDURATION 0xFC0C /* used */
+#define HERMES_RID_CNFPMHOLDOVERDURATION 0xFC0D /* used */
+#define HERMES_RID_CNFOWNNAME 0xFC0E /* used */
+#define HERMES_RID_CNFOWNDTIMPERIOD 0xFC10
+#define HERMES_RID_CNFWDSADDRESS1 0xFC11
+#define HERMES_RID_CNFWDSADDRESS2 0xFC12
+#define HERMES_RID_CNFWDSADDRESS3 0xFC13
+#define HERMES_RID_CNFWDSADDRESS4 0xFC14
+#define HERMES_RID_CNFWDSADDRESS5 0xFC15
+#define HERMES_RID_CNFWDSADDRESS6 0xFC16
+#define HERMES_RID_CNFMULTICASTPMBUFFERING 0xFC17
+#define HERMES_RID_CNFWEPENABLED_AGERE 0xFC20 /* used */
+#define HERMES_RID_CNFMANDATORYBSSID_SYMBOL 0xFC21
+#define HERMES_RID_CNFWEPDEFAULTKEYID 0xFC23 /* used */
+#define HERMES_RID_CNFDEFAULTKEY0 0xFC24 /* used */
+#define HERMES_RID_CNFDEFAULTKEY1 0xFC25 /* used */
+#define HERMES_RID_CNFMWOROBUST_AGERE 0xFC25 /* used */
+#define HERMES_RID_CNFDEFAULTKEY2 0xFC26 /* used */
+#define HERMES_RID_CNFDEFAULTKEY3 0xFC27 /* used */
+#define HERMES_RID_CNFWEPFLAGS_INTERSIL 0xFC28 /* used */
+#define HERMES_RID_CNFWEPKEYMAPPINGTABLE 0xFC29
+#define HERMES_RID_CNFAUTHENTICATION 0xFC2A /* used */
+#define HERMES_RID_CNFMAXASSOCSTA 0xFC2B
+#define HERMES_RID_CNFKEYLENGTH_SYMBOL 0xFC2B
+#define HERMES_RID_CNFTXCONTROL 0xFC2C
+#define HERMES_RID_CNFROAMINGMODE 0xFC2D
+#define HERMES_RID_CNFHOSTAUTHENTICATION 0xFC2E
+#define HERMES_RID_CNFRCVCRCERROR 0xFC30
+#define HERMES_RID_CNFMMLIFE 0xFC31
+#define HERMES_RID_CNFALTRETRYCOUNT 0xFC32
+#define HERMES_RID_CNFBEACONINT 0xFC33
+#define HERMES_RID_CNFAPPCFINFO 0xFC34
+#define HERMES_RID_CNFSTAPCFINFO 0xFC35
+#define HERMES_RID_CNFPRIORITYQUSAGE 0xFC37
+#define HERMES_RID_CNFTIMCTRL 0xFC40
+#define HERMES_RID_CNFTHIRTY2TALLY 0xFC42
+#define HERMES_RID_CNFENHSECURITY 0xFC43
+#define HERMES_RID_CNFGROUPADDRESSES 0xFC80 /* used */
+#define HERMES_RID_CNFCREATEIBSS 0xFC81 /* used */
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD 0xFC82 /* used */
+#define HERMES_RID_CNFRTSTHRESHOLD 0xFC83 /* used */
+#define HERMES_RID_CNFTXRATECONTROL 0xFC84 /* used */
+#define HERMES_RID_CNFPROMISCUOUSMODE 0xFC85 /* used */
+#define HERMES_RID_CNFBASICRATES_SYMBOL 0xFC8A
+#define HERMES_RID_CNFPREAMBLE_SYMBOL 0xFC8C /* used */
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD0 0xFC90
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD1 0xFC91
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD2 0xFC92
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD3 0xFC93
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD4 0xFC94
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD5 0xFC95
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD6 0xFC96
+#define HERMES_RID_CNFRTSTHRESHOLD0 0xFC97
+#define HERMES_RID_CNFRTSTHRESHOLD1 0xFC98
+#define HERMES_RID_CNFRTSTHRESHOLD2 0xFC99
+#define HERMES_RID_CNFRTSTHRESHOLD3 0xFC9A
+#define HERMES_RID_CNFRTSTHRESHOLD4 0xFC9B
+#define HERMES_RID_CNFRTSTHRESHOLD5 0xFC9C
+#define HERMES_RID_CNFRTSTHRESHOLD6 0xFC9D
+#define HERMES_RID_CNFSHORTPREAMBLE 0xFCB0
+#define HERMES_RID_CNFWEPKEYS_AGERE 0xFCB0 /* used */
+#define HERMES_RID_CNFEXCLUDELONGPREAMBLE 0xFCB1
+#define HERMES_RID_CNFTXKEY_AGERE 0xFCB1 /* used */
+#define HERMES_RID_CNFAUTHENTICATIONRSPTO 0xFCB2
+#define HERMES_RID_CNFBASICRATES 0xFCB3
+#define HERMES_RID_CNFSUPPORTEDRATES 0xFCB4
+#define HERMES_RID_CNFTICKTIME 0xFCE0 /* used */
+#define HERMES_RID_CNFSCANREQUEST 0xFCE1
+#define HERMES_RID_CNFJOINREQUEST 0xFCE2
+#define HERMES_RID_CNFAUTHENTICATESTATION 0xFCE3
+#define HERMES_RID_CNFCHANNELINFOREQUEST 0xFCE4
+
+/*
+ * Information RIDs
+ */
+#define HERMES_RID_MAXLOADTIME 0xFD00
+#define HERMES_RID_DOWNLOADBUFFER 0xFD01
+#define HERMES_RID_PRIID 0xFD02
+#define HERMES_RID_PRISUPRANGE 0xFD03
+#define HERMES_RID_CFIACTRANGES 0xFD04
+#define HERMES_RID_NICSERNUM 0xFD0A
+#define HERMES_RID_NICID 0xFD0B
+#define HERMES_RID_MFISUPRANGE 0xFD0C
+#define HERMES_RID_CFISUPRANGE 0xFD0D
+#define HERMES_RID_CHANNELLIST 0xFD10 /* used */
+#define HERMES_RID_REGULATORYDOMAINS 0xFD11
+#define HERMES_RID_TEMPTYPE 0xFD12
+#define HERMES_RID_CIS 0xFD13
+#define HERMES_RID_STAID 0xFD20 /* used */
+#define HERMES_RID_STASUPRANGE 0xFD21
+#define HERMES_RID_MFIACTRANGES 0xFD22
+#define HERMES_RID_CFIACTRANGES2 0xFD23
+#define HERMES_RID_SECONDARYVERSION_SYMBOL 0xFD24 /* used */
+#define HERMES_RID_PORTSTATUS 0xFD40
+#define HERMES_RID_CURRENTSSID 0xFD41 /* used */
+#define HERMES_RID_CURRENTBSSID 0xFD42 /* used */
+#define HERMES_RID_COMMSQUALITY 0xFD43 /* used */
+#define HERMES_RID_CURRENTTXRATE 0xFD44 /* used */
+#define HERMES_RID_CURRENTBEACONINTERVAL 0xFD45
+#define HERMES_RID_CURRENTSCALETHRESHOLDS 0xFD46
+#define HERMES_RID_PROTOCOLRSPTIME 0xFD47
+#define HERMES_RID_SHORTRETRYLIMIT 0xFD48 /* used */
+#define HERMES_RID_LONGRETRYLIMIT 0xFD49 /* used */
+#define HERMES_RID_MAXTRANSMITLIFETIME 0xFD4A /* used */
+#define HERMES_RID_MAXRECEIVELIFETIME 0xFD4B
+#define HERMES_RID_CFPOLLABLE 0xFD4C
+#define HERMES_RID_AUTHENTICATIONALGORITHMS 0xFD4D
+#define HERMES_RID_PRIVACYOPTIONIMPLEMENTED 0xFD4F
+#define HERMES_RID_CURRENTTXRATE1 0xFD80
+#define HERMES_RID_CURRENTTXRATE2 0xFD81
+#define HERMES_RID_CURRENTTXRATE3 0xFD82
+#define HERMES_RID_CURRENTTXRATE4 0xFD83
+#define HERMES_RID_CURRENTTXRATE5 0xFD84
+#define HERMES_RID_CURRENTTXRATE6 0xFD85
+#define HERMES_RID_OWNMACADDR 0xFD86
+#define HERMES_RID_SCANRESULTSTABLE 0xFD88
+#define HERMES_RID_PHYTYPE 0xFDC0
+#define HERMES_RID_CURRENTCHANNEL 0xFDC1 /* used */
+#define HERMES_RID_CURRENTPOWERSTATE 0xFDC2
+#define HERMES_RID_CCAMODE 0xFDC3
+#define HERMES_RID_SUPPORTEDDATARATES 0xFDC6 /* used */
+#define HERMES_RID_BUILDSEQ 0xFFFE
+#define HERMES_RID_FWID 0xFFFF
+
+/* "ID" structure - used for ESSID and station nickname */
+struct hermes_idstring {
+ u16 len;
+ u16 val[16];
+} __attribute__ ((packed));
+
+typedef struct hermes_multicast {
+ u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
+} __attribute__ ((packed)) hermes_multicast_t;
+
+#endif
diff --git a/linux/pcmcia-cs/wireless/ieee802_11.h b/linux/pcmcia-cs/wireless/ieee802_11.h
new file mode 100644
index 0000000..07d626e
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/ieee802_11.h
@@ -0,0 +1,79 @@
+#ifndef _IEEE802_11_H
+#define _IEEE802_11_H
+
+#define IEEE802_11_DATA_LEN 2304
+/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
+ 6.2.1.1.2.
+
+ The figure in section 7.1.2 suggests a body size of up to 2312
+ bytes is allowed, which is a bit confusing, I suspect this
+ represents the 2304 bytes of real data, plus a possible 8 bytes of
+ WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
+
+
+#define IEEE802_11_HLEN 30
+#define IEEE802_11_FRAME_LEN (IEEE802_11_DATA_LEN + IEEE802_11_HLEN)
+
+struct ieee802_11_hdr {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+} __attribute__ ((packed));
+
+/* Frame control field constants */
+#define IEEE802_11_FCTL_VERS 0x0002
+#define IEEE802_11_FCTL_FTYPE 0x000c
+#define IEEE802_11_FCTL_STYPE 0x00f0
+#define IEEE802_11_FCTL_TODS 0x0100
+#define IEEE802_11_FCTL_FROMDS 0x0200
+#define IEEE802_11_FCTL_MOREFRAGS 0x0400
+#define IEEE802_11_FCTL_RETRY 0x0800
+#define IEEE802_11_FCTL_PM 0x1000
+#define IEEE802_11_FCTL_MOREDATA 0x2000
+#define IEEE802_11_FCTL_WEP 0x4000
+#define IEEE802_11_FCTL_ORDER 0x8000
+
+#define IEEE802_11_FTYPE_MGMT 0x0000
+#define IEEE802_11_FTYPE_CTL 0x0004
+#define IEEE802_11_FTYPE_DATA 0x0008
+
+/* management */
+#define IEEE802_11_STYPE_ASSOC_REQ 0x0000
+#define IEEE802_11_STYPE_ASSOC_RESP 0x0010
+#define IEEE802_11_STYPE_REASSOC_REQ 0x0020
+#define IEEE802_11_STYPE_REASSOC_RESP 0x0030
+#define IEEE802_11_STYPE_PROBE_REQ 0x0040
+#define IEEE802_11_STYPE_PROBE_RESP 0x0050
+#define IEEE802_11_STYPE_BEACON 0x0080
+#define IEEE802_11_STYPE_ATIM 0x0090
+#define IEEE802_11_STYPE_DISASSOC 0x00A0
+#define IEEE802_11_STYPE_AUTH 0x00B0
+#define IEEE802_11_STYPE_DEAUTH 0x00C0
+
+/* control */
+#define IEEE802_11_STYPE_PSPOLL 0x00A0
+#define IEEE802_11_STYPE_RTS 0x00B0
+#define IEEE802_11_STYPE_CTS 0x00C0
+#define IEEE802_11_STYPE_ACK 0x00D0
+#define IEEE802_11_STYPE_CFEND 0x00E0
+#define IEEE802_11_STYPE_CFENDACK 0x00F0
+
+/* data */
+#define IEEE802_11_STYPE_DATA 0x0000
+#define IEEE802_11_STYPE_DATA_CFACK 0x0010
+#define IEEE802_11_STYPE_DATA_CFPOLL 0x0020
+#define IEEE802_11_STYPE_DATA_CFACKPOLL 0x0030
+#define IEEE802_11_STYPE_NULLFUNC 0x0040
+#define IEEE802_11_STYPE_CFACK 0x0050
+#define IEEE802_11_STYPE_CFPOLL 0x0060
+#define IEEE802_11_STYPE_CFACKPOLL 0x0070
+
+#define IEEE802_11_SCTL_FRAG 0x000F
+#define IEEE802_11_SCTL_SEQ 0xFFF0
+
+#endif /* _IEEE802_11_H */
+
diff --git a/linux/pcmcia-cs/wireless/orinoco.c b/linux/pcmcia-cs/wireless/orinoco.c
new file mode 100644
index 0000000..1f70b6e
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/orinoco.c
@@ -0,0 +1,4230 @@
+/* orinoco.c 0.13e - (formerly known as dldwd_cs.c and orinoco_cs.c)
+ *
+ * A driver for Hermes or Prism 2 chipset based PCMCIA wireless
+ * adaptors, with Lucent/Agere, Intersil or Symbol firmware.
+ *
+ * Copyright (C) 2000 David Gibson, Linuxcare Australia <hermes@gibson.dropbear.id.au>
+ * With some help from :
+ * Copyright (C) 2001 Jean Tourrilhes, HP Labs <jt@hpl.hp.com>
+ * Copyright (C) 2001 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ *
+ * Based on dummy_cs.c 1.27 2000/06/12 21:27:25
+ *
+ * Portions based on wvlan_cs.c 1.0.6, Copyright Andreas Neuhaus <andy@fasta.fh-dortmund.de>
+ * http://www.fasta.fh-dortmund.de/users/andy/wvlan/
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David
+ * A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights
+ * Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL. */
+
+/*
+ * v0.01 -> v0.02 - 21/3/2001 - Jean II
+ * o Allow to use regular ethX device name instead of dldwdX
+ * o Warning on IBSS with ESSID=any for firmware 6.06
+ * o Put proper range.throughput values (optimistic)
+ * o IWSPY support (IOCTL and stat gather in Rx path)
+ * o Allow setting frequency in Ad-Hoc mode
+ * o Disable WEP setting if !has_wep to work on old firmware
+ * o Fix txpower range
+ * o Start adding support for Samsung/Compaq firmware
+ *
+ * v0.02 -> v0.03 - 23/3/2001 - Jean II
+ * o Start adding Symbol support - need to check all that
+ * o Fix Prism2/Symbol WEP to accept 128 bits keys
+ * o Add Symbol WEP (add authentication type)
+ * o Add Prism2/Symbol rate
+ * o Add PM timeout (holdover duration)
+ * o Enable "iwconfig eth0 key off" and friends (toggle flags)
+ * o Enable "iwconfig eth0 power unicast/all" (toggle flags)
+ * o Try with an intel card. It report firmware 1.01, behave like
+ * an antiquated firmware, however on windows it says 2.00. Yuck !
+ * o Workaround firmware bug in allocate buffer (Intel 1.01)
+ * o Finish external renaming to orinoco...
+ * o Testing with various Wavelan firmwares
+ *
+ * v0.03 -> v0.04 - 30/3/2001 - Jean II
+ * o Update to Wireless 11 -> add retry limit/lifetime support
+ * o Tested with a D-Link DWL 650 card, fill in firmware support
+ * o Warning on Vcc mismatch (D-Link 3.3v card in Lucent 5v only slot)
+ * o Fixed the Prims2 WEP bugs that I introduced in v0.03 :-(
+ * It work on D-Link *only* after a tcpdump. Weird...
+ * And still doesn't work on Intel card. Grrrr...
+ * o Update the mode after a setport3
+ * o Add preamble setting for Symbol cards (not yet enabled)
+ * o Don't complain as much about Symbol cards...
+ *
+ * v0.04 -> v0.04b - 22/4/2001 - David Gibson
+ * o Removed the 'eth' parameter - always use ethXX as the
+ * interface name instead of dldwdXX. The other was racy
+ * anyway.
+ * o Clean up RID definitions in hermes.h, other cleanups
+ *
+ * v0.04b -> v0.04c - 24/4/2001 - Jean II
+ * o Tim Hurley <timster@seiki.bliztech.com> reported a D-Link card
+ * with vendor 02 and firmware 0.08. Added in the capabilities...
+ * o Tested Lucent firmware 7.28, everything works...
+ *
+ * v0.04c -> v0.05 - 3/5/2001 - Benjamin Herrenschmidt
+ * o Spin-off Pcmcia code. This file is renamed orinoco.c,
+ * and orinoco_cs.c now contains only the Pcmcia specific stuff
+ * o Add Airport driver support on top of orinoco.c (see airport.c)
+ *
+ * v0.05 -> v0.05a - 4/5/2001 - Jean II
+ * o Revert to old Pcmcia code to fix breakage of Ben's changes...
+ *
+ * v0.05a -> v0.05b - 4/5/2001 - Jean II
+ * o add module parameter 'ignore_cis_vcc' for D-Link @ 5V
+ * o D-Link firmware doesn't support multicast. We just print a few
+ * error messages, but otherwise everything works...
+ * o For David : set/getport3 works fine, just upgrade iwpriv...
+ *
+ * v0.05b -> v0.05c - 5/5/2001 - Benjamin Herrenschmidt
+ * o Adapt airport.c to latest changes in orinoco.c
+ * o Remove deferred power enabling code
+ *
+ * v0.05c -> v0.05d - 5/5/2001 - Jean II
+ * o Workaround to SNAP decapsulate frame from LinkSys AP
+ * original patch from : Dong Liu <dliu@research.bell-labs.com>
+ * (note : the memcmp bug was mine - fixed)
+ * o Remove set_retry stuff, no firmware support it (bloat--).
+ *
+ * v0.05d -> v0.06 - 25/5/2001 - Jean II
+ * Original patch from "Hong Lin" <alin@redhat.com>,
+ * "Ian Kinner" <ikinner@redhat.com>
+ * and "David Smith" <dsmith@redhat.com>
+ * o Init of priv->tx_rate_ctrl in firmware specific section.
+ * o Prism2/Symbol rate, upto should be 0xF and not 0x15. Doh !
+ * o Spectrum card always need cor_reset (for every reset)
+ * o Fix cor_reset to not lose bit 7 in the register
+ * o flush_stale_links to remove zombie Pcmcia instances
+ * o Ack previous hermes event before reset
+ * Me (with my little hands)
+ * o Allow orinoco.c to call cor_reset via priv->card_reset_handler
+ * o Add priv->need_card_reset to toggle this feature
+ * o Fix various buglets when setting WEP in Symbol firmware
+ * Now, encryption is fully functional on Symbol cards. Youpi !
+ *
+ * v0.06 -> v0.06b - 25/5/2001 - Jean II
+ * o IBSS on Symbol use port_mode = 4. Please don't ask...
+ *
+ * v0.06b -> v0.06c - 29/5/2001 - Jean II
+ * o Show first spy address in /proc/net/wireless for IBSS mode as well
+ *
+ * v0.06c -> v0.06d - 6/7/2001 - David Gibson
+ * o Change a bunch of KERN_INFO messages to KERN_DEBUG, as per Linus'
+ * wishes to reduce the number of unecessary messages.
+ * o Removed bogus message on CRC error.
+ * o Merged fixeds for v0.08 Prism 2 firmware from William Waghorn
+ * <willwaghorn@yahoo.co.uk>
+ * o Slight cleanup/re-arrangement of firmware detection code.
+ *
+ * v0.06d -> v0.06e - 1/8/2001 - David Gibson
+ * o Removed some redundant global initializers (orinoco_cs.c).
+ * o Added some module metadataa
+ *
+ * v0.06e -> v0.06f - 14/8/2001 - David Gibson
+ * o Wording fix to license
+ * o Added a 'use_alternate_encaps' module parameter for APs which need an
+ * oui of 00:00:00. We really need a better way of handling this, but
+ * the module flag is better than nothing for now.
+ *
+ * v0.06f -> v0.07 - 20/8/2001 - David Gibson
+ * o Removed BAP error retries from hermes_bap_seek(). For Tx we now
+ * let the upper layers handle the retry, we retry explicitly in the
+ * Rx path, but don't make as much noise about it.
+ * o Firmware detection cleanups.
+ *
+ * v0.07 -> v0.07a - 1/10/3001 - Jean II
+ * o Add code to read Symbol firmware revision, inspired by latest code
+ * in Spectrum24 by Lee John Keyser-Allen - Thanks Lee !
+ * o Thanks to Jared Valentine <hidden@xmission.com> for "providing" me
+ * a 3Com card with a recent firmware, fill out Symbol firmware
+ * capabilities of latest rev (2.20), as well as older Symbol cards.
+ * o Disable Power Management in newer Symbol firmware, the API
+ * has changed (documentation needed).
+ *
+ * v0.07a -> v0.08 - 3/10/2001 - David Gibson
+ * o Fixed a possible buffer overrun found by the Stanford checker (in
+ * dldwd_ioctl_setiwencode()). Can only be called by root anyway, so not
+ * a big problem.
+ * o Turned has_big_wep on for Intersil cards. That's not true for all of
+ * them but we should at least let the capable ones try.
+ * o Wait for BUSY to clear at the beginning of hermes_bap_seek(). I
+ * realised that my assumption that the driver's serialization
+ * would prevent the BAP being busy on entry was possibly false, because
+ * things other than seeks may make the BAP busy.
+ * o Use "alternate" (oui 00:00:00) encapsulation by default.
+ * Setting use_old_encaps will mimic the old behaviour, but I think we
+ * will be able to eliminate this.
+ * o Don't try to make __initdata const (the version string). This can't
+ * work because of the way the __initdata sectioning works.
+ * o Added MODULE_LICENSE tags.
+ * o Support for PLX (transparent PCMCIA->PCI brdge) cards.
+ * o Changed to using the new type-facist min/max.
+ *
+ * v0.08 -> v0.08a - 9/10/2001 - David Gibson
+ * o Inserted some missing acknowledgements/info into the Changelog.
+ * o Fixed some bugs in the normalisation of signel level reporting.
+ * o Fixed bad bug in WEP key handling on Intersil and Symbol firmware,
+ * which led to an instant crash on big-endian machines.
+ *
+ * v0.08a -> v0.08b - 20/11/2001 - David Gibson
+ * o Lots of cleanup and bugfixes in orinoco_plx.c
+ * o Cleanup to handling of Tx rate setting.
+ * o Removed support for old encapsulation method.
+ * o Removed old "dldwd" names.
+ * o Split RID constants into a new file hermes_rid.h
+ * o Renamed RID constants to match linux-wlan-ng and prism2.o
+ * o Bugfixes in hermes.c
+ * o Poke the PLX's INTCSR register, so it actually starts
+ * generating interrupts. These cards might actually work now.
+ * o Update to wireless extensions v12 (Jean II)
+ * o Support for tallies and inquire command (Jean II)
+ * o Airport updates for newer PPC kernels (BenH)
+ *
+ * v0.08b -> v0.09 - 21/12/2001 - David Gibson
+ * o Some new PCI IDs for PLX cards.
+ * o Removed broken attempt to do ALLMULTI reception. Just use
+ * promiscuous mode instead
+ * o Preliminary work for list-AP (Jean II)
+ * o Airport updates from (BenH)
+ * o Eliminated racy hw_ready stuff
+ * o Fixed generation of fake events in irq handler. This should
+ * finally kill the EIO problems (Jean II & dgibson)
+ * o Fixed breakage of bitrate set/get on Agere firmware (Jean II)
+ *
+ * v0.09 -> v0.09a - 2/1/2002 - David Gibson
+ * o Fixed stupid mistake in multicast list handling, triggering
+ * a BUG()
+ *
+ * v0.09a -> v0.09b - 16/1/2002 - David Gibson
+ * o Fixed even stupider mistake in new interrupt handling, which
+ * seriously broke things on big-endian machines.
+ * o Removed a bunch of redundant includes and exports.
+ * o Removed a redundant MOD_{INC,DEC}_USE_COUNT pair in airport.c
+ * o Don't attempt to do hardware level multicast reception on
+ * Intersil firmware, just go promisc instead.
+ * o Typo fixed in hermes_issue_cmd()
+ * o Eliminated WIRELESS_SPY #ifdefs
+ * o Status code reported on Tx exceptions
+ * o Moved netif_wake_queue() from ALLOC interrupts to TX and TXEXC
+ * interrupts, which should fix the timeouts we're seeing.
+ *
+ * v0.09b -> v0.10 - 25 Feb 2002 - David Gibson
+ * o Removed nested structures used for header parsing, so the
+ * driver should now work without hackery on ARM
+ * o Fix for WEP handling on Intersil (Hawk Newton)
+ * o Eliminated the /proc/hermes/ethXX/regs debugging file. It
+ * was never very useful.
+ * o Make Rx errors less noisy.
+ *
+ * v0.10 -> v0.11 - 5 Apr 2002 - David Gibson
+ * o Laid the groundwork in hermes.[ch] for devices which map
+ * into PCI memory space rather than IO space.
+ * o Fixed bug in multicast handling (cleared multicast list when
+ * leaving promiscuous mode).
+ * o Relegated Tx error messages to debug.
+ * o Cleaned up / corrected handling of allocation lengths.
+ * o Set OWNSSID in IBSS mode for WinXP interoperability (jimc).
+ * o Change to using alloc_etherdev() for structure allocations.
+ * o Check for and drop undersized packets.
+ * o Fixed a race in stopping/waking the queue. This should fix
+ * the timeout problems (Pavel Roskin)
+ * o Reverted to netif_wake_queue() on the ALLOC event.
+ * o Fixes for recent Symbol firmwares which lack AP density
+ * (Pavel Roskin).
+ *
+ * v0.11 -> v0.11a - 29 Apr 2002 - David Gibson
+ * o Handle different register spacing, necessary for Prism 2.5
+ * PCI adaptors (Steve Hill).
+ * o Cleaned up initialization of card structures in orinoco_cs
+ * and airport. Removed card->priv field.
+ * o Make response structure optional for hermes_docmd_wait()
+ * Pavel Roskin)
+ * o Added PCI id for Nortel emobility to orinoco_plx.c.
+ * o Cleanup to handling of Symbol's allocation bug. (Pavel Roskin)
+ * o Cleanups to firmware capability detection.
+ * o Arrange for orinoco_pci.c to override firmware detection.
+ * We should be able to support the PCI Intersil cards now.
+ * o Cleanup handling of reset_cor and hard_reset (Pavel Roskin).
+ * o Remove erroneous use of USER_BAP in the TxExc handler (Jouni
+ * Malinen).
+ * o Makefile changes for better integration into David Hinds
+ * pcmcia-cs package.
+ *
+ * v0.11a -> v0.11b - 1 May 2002 - David Gibson
+ * o Better error reporting in orinoco_plx_init_one()
+ * o Fixed multiple bad kfree() bugs introduced by the
+ * alloc_orinocodev() changes.
+ *
+ * v0.11b -> v0.12 - 19 Jun 2002 - David Gibson
+ * o Support changing the MAC address.
+ * o Correct display of Intersil firmware revision numbers.
+ * o Entirely revised locking scheme. Should be both simpler and
+ * better.
+ * o Merged some common code in orinoco_plx, orinoco_pci and
+ * airport by creating orinoco_default_{open,stop,reset}()
+ * which are used as the dev->open, dev->stop, priv->reset
+ * callbacks if none are specified when alloc_orinocodev() is
+ * called.
+ * o Removed orinoco_plx_interrupt() and orinoco_pci_interrupt().
+ * They didn't do anything.
+ *
+ * v0.12 -> v0.12a - 4 Jul 2002 - David Gibson
+ * o Some rearrangement of code.
+ * o Numerous fixups to locking and rest handling, particularly
+ * for PCMCIA.
+ * o This allows open and stop net_device methods to be in
+ * orinoco.c now, rather than in the init modules.
+ * o In orinoco_cs.c link->priv now points to the struct
+ * net_device not to the struct orinoco_private.
+ * o Added a check for undersized SNAP frames, which could cause
+ * crashes.
+ *
+ * v0.12a -> v0.12b - 11 Jul 2002 - David Gibson
+ * o Fix hw->num_init testing code, so num_init is actually
+ * incremented.
+ * o Fix very stupid bug in orinoco_cs which broke compile with
+ * CONFIG_SMP.
+ * o Squashed a warning.
+ *
+ * v0.12b -> v0.12c - 26 Jul 2002 - David Gibson
+ * o Change to C9X style designated initializers.
+ * o Add support for 3Com AirConnect PCI.
+ * o No longer ignore the hard_reset argument to
+ * alloc_orinocodev(). Oops.
+ *
+ * v0.12c -> v0.13beta1 - 13 Sep 2002 - David Gibson
+ * o Revert the broken 0.12* locking scheme and go to a new yet
+ * simpler scheme.
+ * o Do firmware resets only in orinoco_init() and when waking
+ * the card from hard sleep.
+ *
+ * v0.13beta1 -> v0.13 - 27 Sep 2002 - David Gibson
+ * o Re-introduced full resets (via schedule_task()) on Tx
+ * timeout.
+ *
+ * v0.13 -> v0.13a - 30 Sep 2002 - David Gibson
+ * o Minor cleanups to info frame handling. Add basic support
+ * for linkstatus info frames.
+ * o Include required kernel headers in orinoco.h, to avoid
+ * compile problems.
+ *
+ * v0.13a -> v0.13b - 10 Feb 2003 - David Gibson
+ * o Implemented hard reset for Airport cards
+ * o Experimental suspend/resume implementation for orinoco_pci
+ * o Abolished /proc debugging support, replaced with a debugging
+ * iwpriv. Now it's ugly and simple instead of ugly and complex.
+ * o Bugfix in hermes.c if the firmware returned a record length
+ * of 0, we could go clobbering memory.
+ * o Bugfix in orinoco_stop() - it used to fail if hw_unavailable
+ * was set, which was usually true on PCMCIA hot removes.
+ * o Track LINKSTATUS messages, silently drop Tx packets before
+ * we are connected (avoids cofusing the firmware), and only
+ * give LINKSTATUS printk()s if the status has changed.
+ *
+ * v0.13b -> v0.13c - 11 Mar 2003 - David Gibson
+ * o Cleanup: use dev instead of priv in various places.
+ * o Bug fix: Don't ReleaseConfiguration on RESET_PHYSICAL event
+ * if we're in the middle of a (driver initiated) hard reset.
+ * o Bug fix: ETH_ZLEN is supposed to include the header
+ * (Dionysus Blazakis & Manish Karir)
+ * o Convert to using workqueues instead of taskqueues (and
+ * backwards compatibility macros for pre 2.5.41 kernels).
+ * o Drop redundant (I think...) MOD_{INC,DEC}_USE_COUNT in
+ * airport.c
+ * o New orinoco_tmd.c init module from Joerg Dorchain for
+ * TMD7160 based PCI to PCMCIA bridges (similar to
+ * orinoco_plx.c).
+ *
+ * v0.13c -> v0.13d - 22 Apr 2003 - David Gibson
+ * o Make hw_unavailable a counter, rather than just a flag, this
+ * is necessary to avoid some races (such as a card being
+ * removed in the middle of orinoco_reset().
+ * o Restore Release/RequestConfiguration in the PCMCIA event handler
+ * when dealing with a driver initiated hard reset. This is
+ * necessary to prevent hangs due to a spurious interrupt while
+ * the reset is in progress.
+ * o Clear the 802.11 header when transmitting, even though we
+ * don't use it. This fixes a long standing bug on some
+ * firmwares, which seem to get confused if that isn't done.
+ * o Be less eager to de-encapsulate SNAP frames, only do so if
+ * the OUI is 00:00:00 or 00:00:f8, leave others alone. The old
+ * behaviour broke CDP (Cisco Discovery Protocol).
+ * o Use dev instead of priv for free_irq() as well as
+ * request_irq() (oops).
+ * o Attempt to reset rather than giving up if we get too many
+ * IRQs.
+ * o Changed semantics of __orinoco_down() so it can be called
+ * safely with hw_unavailable set. It also now clears the
+ * linkstatus (since we're going to have to reassociate).
+ *
+ * v0.13d -> v0.13e - 12 May 2003 - David Gibson
+ * o Support for post-2.5.68 return values from irq handler.
+ * o Fixed bug where underlength packets would be double counted
+ * in the rx_dropped statistics.
+ * o Provided a module parameter to suppress linkstatus messages.
+ *
+ * TODO
+ * o New wireless extensions API (patch from Moustafa
+ * Youssef, updated by Jim Carter and Pavel Roskin).
+ * o Handle de-encapsulation within network layer, provide 802.11
+ * headers (patch from Thomas 'Dent' Mirlacher)
+ * o RF monitor mode support
+ * o Fix possible races in SPY handling.
+ * o Disconnect wireless extensions from fundamental configuration.
+ * o (maybe) Software WEP support (patch from Stano Meduna).
+ * o (maybe) Use multiple Tx buffers - driver handling queue
+ * rather than firmware. */
+
+/* Locking and synchronization:
+ *
+ * The basic principle is that everything is serialized through a
+ * single spinlock, priv->lock. The lock is used in user, bh and irq
+ * context, so when taken outside hardirq context it should always be
+ * taken with interrupts disabled. The lock protects both the
+ * hardware and the struct orinoco_private.
+ *
+ * Another flag, priv->hw_unavailable indicates that the hardware is
+ * unavailable for an extended period of time (e.g. suspended, or in
+ * the middle of a hard reset). This flag is protected by the
+ * spinlock. All code which touches the hardware should check the
+ * flag after taking the lock, and if it is set, give up on whatever
+ * they are doing and drop the lock again. The orinoco_lock()
+ * function handles this (it unlocks and returns -EBUSY if
+ * hw_unavailable is non-zero). */
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "hermes.h"
+#include "hermes_rid.h"
+#include "orinoco.h"
+#include "ieee802_11.h"
+
+/********************************************************************/
+/* Module information */
+/********************************************************************/
+
+MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
+MODULE_DESCRIPTION("Driver for Lucent Orinoco, Prism II based and similar wireless cards");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual MPL/GPL");
+#endif
+
+/* Level of debugging. Used in the macros in orinoco.h */
+#ifdef ORINOCO_DEBUG
+int orinoco_debug = ORINOCO_DEBUG;
+MODULE_PARM(orinoco_debug, "i");
+EXPORT_SYMBOL(orinoco_debug);
+#endif
+
+static int suppress_linkstatus; /* = 0 */
+MODULE_PARM(suppress_linkstatus, "i");
+
+/********************************************************************/
+/* Compile time configuration and compatibility stuff */
+/********************************************************************/
+
+/* Wireless extensions backwards compatibility */
+#ifndef SIOCIWFIRSTPRIV
+#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE
+#endif /* SIOCIWFIRSTPRIV */
+#ifndef SIOCIWLASTPRIV
+#define SIOCIWLASTPRIV SIOCDEVPRIVATE+0xF
+#endif /* SIOCIWLASTPRIV */
+
+/* We do this this way to avoid ifdefs in the actual code */
+#ifdef WIRELESS_SPY
+#define SPY_NUMBER(priv) (priv->spy_number)
+#else
+#define SPY_NUMBER(priv) 0
+#endif /* WIRELESS_SPY */
+
+/********************************************************************/
+/* Internal constants */
+/********************************************************************/
+
+#define ORINOCO_MIN_MTU 256
+#define ORINOCO_MAX_MTU (IEEE802_11_DATA_LEN - ENCAPS_OVERHEAD)
+
+#define SYMBOL_MAX_VER_LEN (14)
+#define USER_BAP 0
+#define IRQ_BAP 1
+#define MAX_IRQLOOPS_PER_IRQ 10
+#define MAX_IRQLOOPS_PER_JIFFY (20000/HZ) /* Based on a guestimate of
+ * how many events the
+ * device could
+ * legitimately generate */
+#define SMALL_KEY_SIZE 5
+#define LARGE_KEY_SIZE 13
+#define TX_NICBUF_SIZE_BUG 1585 /* Bug in Symbol firmware */
+
+#define DUMMY_FID 0xFFFF
+
+#define RUP_EVEN(a) (((a) + 1) & (~1))
+
+/*#define MAX_MULTICAST(priv) (priv->firmware_type == FIRMWARE_TYPE_AGERE ? \
+ HERMES_MAX_MULTICAST : 0)*/
+#define MAX_MULTICAST(priv) (HERMES_MAX_MULTICAST)
+
+/*
+ * MACH related stuff...
+ */
+
+#ifdef MACH
+
+#undef copy_to_user
+#define copy_to_user(a,b,c) (memcpy(a,b,c), 0)
+
+#define verify_area(a,b,c) (0)
+#define copy_from_user(a,b,c) (memcpy(a,b,c), 0)
+
+#endif
+
+/********************************************************************/
+/* Data tables */
+/********************************************************************/
+
+/* The frequency of each channel in MHz */
+const long channel_frequency[] = {
+ 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484
+};
+#define NUM_CHANNELS ( sizeof(channel_frequency) / sizeof(channel_frequency[0]) )
+
+/* This tables gives the actual meanings of the bitrate IDs returned by the firmware. */
+struct {
+ int bitrate; /* in 100s of kilobits */
+ int automatic;
+ u16 agere_txratectrl;
+ u16 intersil_txratectrl;
+} bitrate_table[] = {
+ {110, 1, 3, 15}, /* Entry 0 is the default */
+ {10, 0, 1, 1},
+ {10, 1, 1, 1},
+ {20, 0, 2, 2},
+ {20, 1, 6, 3},
+ {55, 0, 4, 4},
+ {55, 1, 7, 7},
+ {110, 0, 5, 8},
+};
+#define BITRATE_TABLE_SIZE (sizeof(bitrate_table) / sizeof(bitrate_table[0]))
+
+/********************************************************************/
+/* Data types */
+/********************************************************************/
+
+struct header_struct {
+ /* 802.3 */
+ u8 dest[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u16 len;
+ /* 802.2 */
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl;
+ /* SNAP */
+ u8 oui[3];
+ u16 ethertype;
+} __attribute__ ((packed));
+
+/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
+u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+
+#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
+
+/********************************************************************/
+/* Function prototypes */
+/********************************************************************/
+
+static void orinoco_stat_gather(struct net_device *dev,
+ struct sk_buff *skb,
+ struct hermes_rx_descriptor *desc);
+
+static struct net_device_stats *orinoco_get_stats(struct net_device *dev);
+static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev);
+
+/* Hardware control routines */
+
+static int __orinoco_program_rids(struct net_device *dev);
+
+static int __orinoco_hw_set_bitrate(struct orinoco_private *priv);
+static int __orinoco_hw_setup_wep(struct orinoco_private *priv);
+static int orinoco_hw_get_bssid(struct orinoco_private *priv, char buf[ETH_ALEN]);
+static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
+ char buf[IW_ESSID_MAX_SIZE+1]);
+static long orinoco_hw_get_freq(struct orinoco_private *priv);
+static int orinoco_hw_get_bitratelist(struct orinoco_private *priv, int *numrates,
+ s32 *rates, int max);
+static void __orinoco_set_multicast_list(struct net_device *dev);
+
+/* Interrupt handling routines */
+static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw);
+
+/* ioctl() routines */
+static int orinoco_debug_dump_recs(struct net_device *dev);
+
+/********************************************************************/
+/* Function prototypes */
+/********************************************************************/
+
+int __orinoco_up(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ err = __orinoco_program_rids(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d configuring card\n",
+ dev->name, err);
+ return err;
+ }
+
+ /* Fire things up again */
+ hermes_set_irqmask(hw, ORINOCO_INTEN);
+ err = hermes_enable_port(hw, 0);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d enabling MAC port\n",
+ dev->name, err);
+ return err;
+ }
+
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+
+ return 0;
+}
+
+int __orinoco_down(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+
+ if (! priv->hw_unavailable) {
+ if (! priv->broken_disableport) {
+ err = hermes_disable_port(hw, 0);
+ if (err) {
+ /* Some firmwares (e.g. Intersil 1.3.x) seem
+ * to have problems disabling the port, oh
+ * well, too bad. */
+ printk(KERN_WARNING "%s: Error %d disabling MAC port\n",
+ dev->name, err);
+ priv->broken_disableport = 1;
+ }
+ }
+ hermes_set_irqmask(hw, 0);
+ hermes_write_regn(hw, EVACK, 0xffff);
+ }
+
+ /* firmware will have to reassociate */
+ priv->last_linkstatus = 0xffff;
+ priv->connected = 0;
+
+ return 0;
+}
+
+int orinoco_reinit_firmware(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ err = hermes_init(hw);
+ if (err)
+ return err;
+
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err == -EIO) {
+ /* Try workaround for old Symbol firmware bug */
+ printk(KERN_WARNING "%s: firmware ALLOC bug detected "
+ "(old Symbol firmware?). Trying to work around... ",
+ dev->name);
+
+ priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err)
+ printk("failed!\n");
+ else
+ printk("ok.\n");
+ }
+
+ return err;
+}
+
+static int orinoco_open(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ unsigned long flags;
+ int err;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = __orinoco_up(dev);
+
+ if (! err)
+ priv->open = 1;
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+int orinoco_stop(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ int err = 0;
+
+ /* We mustn't use orinoco_lock() here, because we need to be
+ able to close the interface even if hw_unavailable is set
+ (e.g. as we're released after a PC Card removal) */
+ spin_lock_irq(&priv->lock);
+
+ priv->open = 0;
+
+ err = __orinoco_down(dev);
+
+ spin_unlock_irq(&priv->lock);
+
+ return err;
+}
+
+static int __orinoco_program_rids(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err;
+ struct hermes_idstring idbuf;
+
+ /* Set the MAC address */
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting MAC address\n", dev->name, err);
+ return err;
+ }
+
+ /* Set up the link mode */
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE, priv->port_type);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting port type\n", dev->name, err);
+ return err;
+ }
+ /* Set the channel/frequency */
+ if (priv->channel == 0) {
+ printk(KERN_DEBUG "%s: Channel is 0 in __orinoco_program_rids()\n", dev->name);
+ if (priv->createibss)
+ priv->channel = 10;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFOWNCHANNEL, priv->channel);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting channel\n", dev->name, err);
+ return err;
+ }
+
+ if (priv->has_ibss) {
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFCREATEIBSS,
+ priv->createibss);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting CREATEIBSS\n", dev->name, err);
+ return err;
+ }
+
+ if ((strlen(priv->desired_essid) == 0) && (priv->createibss)
+ && (!priv->has_ibss_any)) {
+ printk(KERN_WARNING "%s: This firmware requires an \
+ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
+ /* With wvlan_cs, in this case, we would crash.
+ * hopefully, this driver will behave better...
+ * Jean II */
+ }
+ }
+
+ /* Set the desired ESSID */
+ idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
+ memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
+ /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
+ &idbuf);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting OWNSSID\n", dev->name, err);
+ return err;
+ }
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
+ &idbuf);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n", dev->name, err);
+ return err;
+ }
+
+ /* Set the station name */
+ idbuf.len = cpu_to_le16(strlen(priv->nick));
+ memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
+ &idbuf);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting nickname\n", dev->name, err);
+ return err;
+ }
+
+ /* Set AP density */
+ if (priv->has_sensitivity) {
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
+ priv->ap_density);
+ if (err) {
+ printk(KERN_WARNING "%s: Error %d setting SYSTEMSCALE. "
+ "Disabling sensitivity control\n", dev->name, err);
+
+ priv->has_sensitivity = 0;
+ }
+ }
+
+ /* Set RTS threshold */
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD, priv->rts_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting RTS threshold\n", dev->name, err);
+ return err;
+ }
+
+ /* Set fragmentation threshold or MWO robustness */
+ if (priv->has_mwo)
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMWOROBUST_AGERE,
+ priv->mwo_robust);
+ else
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
+ priv->frag_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting framentation\n", dev->name, err);
+ return err;
+ }
+
+ /* Set bitrate */
+ err = __orinoco_hw_set_bitrate(priv);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting bitrate\n", dev->name, err);
+ return err;
+ }
+
+ /* Set power management */
+ if (priv->has_pm) {
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED,
+ priv->pm_on);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMULTICASTRECEIVE,
+ priv->pm_mcast);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMAXSLEEPDURATION,
+ priv->pm_period);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPMHOLDOVERDURATION,
+ priv->pm_timeout);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+ }
+
+ /* Set preamble - only for Symbol so far... */
+ if (priv->has_preamble) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPREAMBLE_SYMBOL,
+ priv->preamble);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting preamble\n",
+ dev->name, err);
+ return err;
+ }
+ }
+
+ /* Set up encryption */
+ if (priv->has_wep) {
+ err = __orinoco_hw_setup_wep(priv);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d activating WEP\n",
+ dev->name, err);
+ return err;
+ }
+ }
+
+ /* Set promiscuity / multicast*/
+ priv->promiscuous = 0;
+ priv->mc_count = 0;
+ __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
+
+ return 0;
+}
+
+/* xyzzy */
+static int orinoco_reconfigure(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct hermes *hw = &priv->hw;
+ unsigned long flags;
+ int err = 0;
+
+ if (priv->broken_disableport) {
+ schedule_work(&priv->reset_work);
+ return 0;
+ }
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+
+ err = hermes_disable_port(hw, 0);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to disable port while reconfiguring card\n",
+ dev->name);
+ priv->broken_disableport = 1;
+ goto out;
+ }
+
+ err = __orinoco_program_rids(dev);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to reconfigure card\n",
+ dev->name);
+ goto out;
+ }
+
+ err = hermes_enable_port(hw, 0);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to enable port while reconfiguring card\n",
+ dev->name);
+ goto out;
+ }
+
+ out:
+ if (err) {
+ printk(KERN_WARNING "%s: Resetting instead...\n", dev->name);
+ schedule_work(&priv->reset_work);
+ err = 0;
+ }
+
+ orinoco_unlock(priv, &flags);
+ return err;
+
+}
+
+/* This must be called from user context, without locks held - use
+ * schedule_work() */
+static void orinoco_reset(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct hermes *hw = &priv->hw;
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ /* When the hardware becomes available again, whatever
+ * detects that is responsible for re-initializing
+ * it. So no need for anything further*/
+ return;
+
+ netif_stop_queue(dev);
+
+ /* Shut off interrupts. Depending on what state the hardware
+ * is in, this might not work, but we'll try anyway */
+ hermes_set_irqmask(hw, 0);
+ hermes_write_regn(hw, EVACK, 0xffff);
+
+ priv->hw_unavailable++;
+ priv->last_linkstatus = 0xffff; /* firmware will have to reassociate */
+ priv->connected = 0;
+
+ orinoco_unlock(priv, &flags);
+
+ if (priv->hard_reset)
+ err = (*priv->hard_reset)(priv);
+ if (err) {
+ printk(KERN_ERR "%s: orinoco_reset: Error %d performing hard reset\n",
+ dev->name, err);
+ /* FIXME: shutdown of some sort */
+ return;
+ }
+
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
+ dev->name, err);
+ return;
+ }
+
+ spin_lock_irq(&priv->lock); /* This has to be called from user context */
+
+ priv->hw_unavailable--;
+
+ /* priv->open or priv->hw_unavailable might have changed while
+ * we dropped the lock */
+ if (priv->open && (! priv->hw_unavailable)) {
+ err = __orinoco_up(dev);
+ if (err) {
+ printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
+ dev->name, err);
+ } else
+ dev->trans_start = jiffies;
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return;
+}
+
+/********************************************************************/
+/* Internal helper functions */
+/********************************************************************/
+
+static inline void
+set_port_type(struct orinoco_private *priv)
+{
+ switch (priv->iw_mode) {
+ case IW_MODE_INFRA:
+ priv->port_type = 1;
+ priv->createibss = 0;
+ break;
+ case IW_MODE_ADHOC:
+ if (priv->prefer_port3) {
+ priv->port_type = 3;
+ priv->createibss = 0;
+ } else {
+ priv->port_type = priv->ibss_port;
+ priv->createibss = 1;
+ }
+ break;
+ default:
+ printk(KERN_ERR "%s: Invalid priv->iw_mode in set_port_type()\n",
+ priv->ndev->name);
+ }
+}
+
+/* Does the frame have a SNAP header indicating it should be
+ * de-encapsulated to Ethernet-II? */
+static inline int
+is_ethersnap(struct header_struct *hdr)
+{
+ /* We de-encapsulate all packets which, a) have SNAP headers
+ * (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header
+ * and where b) the OUI of the SNAP header is 00:00:00 or
+ * 00:00:f8 - we need both because different APs appear to use
+ * different OUIs for some reason */
+ return (memcmp(&hdr->dsap, &encaps_hdr, 5) == 0)
+ && ( (hdr->oui[2] == 0x00) || (hdr->oui[2] == 0xf8) );
+}
+
+static void
+orinoco_set_multicast_list(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_DEBUG "%s: orinoco_set_multicast_list() "
+ "called when hw_unavailable\n", dev->name);
+ return;
+ }
+
+ __orinoco_set_multicast_list(dev);
+ orinoco_unlock(priv, &flags);
+}
+
+/********************************************************************/
+/* Hardware control functions */
+/********************************************************************/
+
+
+static int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+
+ if (priv->bitratemode >= BITRATE_TABLE_SIZE) {
+ printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n",
+ priv->ndev->name, priv->bitratemode);
+ return -EINVAL;
+ }
+
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE:
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXRATECONTROL,
+ bitrate_table[priv->bitratemode].agere_txratectrl);
+ break;
+ case FIRMWARE_TYPE_INTERSIL:
+ case FIRMWARE_TYPE_SYMBOL:
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXRATECONTROL,
+ bitrate_table[priv->bitratemode].intersil_txratectrl);
+ break;
+ default:
+ BUG();
+ }
+
+ return err;
+}
+
+
+static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ int master_wep_flag;
+ int auth_flag;
+
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
+ if (priv->wep_on) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXKEY_AGERE,
+ priv->tx_key);
+ if (err)
+ return err;
+
+ err = HERMES_WRITE_RECORD(hw, USER_BAP,
+ HERMES_RID_CNFWEPKEYS_AGERE,
+ &priv->keys);
+ if (err)
+ return err;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFWEPENABLED_AGERE,
+ priv->wep_on);
+ if (err)
+ return err;
+ break;
+
+ case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
+ case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
+ master_wep_flag = 0; /* Off */
+ if (priv->wep_on) {
+ int keylen;
+ int i;
+
+ /* Fudge around firmware weirdness */
+ keylen = le16_to_cpu(priv->keys[priv->tx_key].len);
+
+ /* Write all 4 keys */
+ for(i = 0; i < ORINOCO_MAX_KEYS; i++) {
+/* int keylen = le16_to_cpu(priv->keys[i].len); */
+
+ if (keylen > LARGE_KEY_SIZE) {
+ printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n",
+ priv->ndev->name, i, keylen);
+ return -E2BIG;
+ }
+
+ err = hermes_write_ltv(hw, USER_BAP,
+ HERMES_RID_CNFDEFAULTKEY0 + i,
+ HERMES_BYTES_TO_RECLEN(keylen),
+ priv->keys[i].data);
+ if (err)
+ return err;
+ }
+
+ /* Write the index of the key used in transmission */
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPDEFAULTKEYID,
+ priv->tx_key);
+ if (err)
+ return err;
+
+ if (priv->wep_restrict) {
+ auth_flag = 2;
+ master_wep_flag = 3;
+ } else {
+ /* Authentication is where Intersil and Symbol
+ * firmware differ... */
+ auth_flag = 1;
+ if (priv->firmware_type == FIRMWARE_TYPE_SYMBOL)
+ master_wep_flag = 3; /* Symbol */
+ else
+ master_wep_flag = 1; /* Intersil */
+ }
+
+
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFAUTHENTICATION, auth_flag);
+ if (err)
+ return err;
+ }
+
+ /* Master WEP setting : on/off */
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFWEPFLAGS_INTERSIL,
+ master_wep_flag);
+ if (err)
+ return err;
+
+ break;
+
+ default:
+ if (priv->wep_on) {
+ printk(KERN_ERR "%s: WEP enabled, although not supported!\n",
+ priv->ndev->name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int orinoco_hw_get_bssid(struct orinoco_private *priv,
+ char buf[ETH_ALEN])
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
+ ETH_ALEN, NULL, buf);
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
+ char buf[IW_ESSID_MAX_SIZE+1])
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ struct hermes_idstring essidbuf;
+ char *p = (char *)(&essidbuf.val);
+ int len;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if (strlen(priv->desired_essid) > 0) {
+ /* We read the desired SSID from the hardware rather
+ than from priv->desired_essid, just in case the
+ firmware is allowed to change it on us. I'm not
+ sure about this */
+ /* My guess is that the OWNSSID should always be whatever
+ * we set to the card, whereas CURRENT_SSID is the one that
+ * may change... - Jean II */
+ u16 rid;
+
+ *active = 1;
+
+ rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
+ HERMES_RID_CNFDESIREDSSID;
+
+ err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
+ NULL, &essidbuf);
+ if (err)
+ goto fail_unlock;
+ } else {
+ *active = 0;
+
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
+ sizeof(essidbuf), NULL, &essidbuf);
+ if (err)
+ goto fail_unlock;
+ }
+
+ len = le16_to_cpu(essidbuf.len);
+
+ memset(buf, 0, IW_ESSID_MAX_SIZE+1);
+ memcpy(buf, p, len);
+ buf[len] = '\0';
+
+ fail_unlock:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static long orinoco_hw_get_freq(struct orinoco_private *priv)
+{
+
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 channel;
+ long freq = 0;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL, &channel);
+ if (err)
+ goto out;
+
+ /* Intersil firmware 1.3.5 returns 0 when the interface is down */
+ if (channel == 0) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if ( (channel < 1) || (channel > NUM_CHANNELS) ) {
+ printk(KERN_WARNING "%s: Channel out of range (%d)!\n",
+ priv->ndev->name, channel);
+ err = -EBUSY;
+ goto out;
+
+ }
+ freq = channel_frequency[channel-1] * 100000;
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ if (err > 0)
+ err = -EBUSY;
+ return err ? err : freq;
+}
+
+static int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
+ int *numrates, s32 *rates, int max)
+{
+ hermes_t *hw = &priv->hw;
+ struct hermes_idstring list;
+ unsigned char *p = (unsigned char *)&list.val;
+ int err = 0;
+ int num;
+ int i;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
+ sizeof(list), NULL, &list);
+ orinoco_unlock(priv, &flags);
+
+ if (err)
+ return err;
+
+ num = le16_to_cpu(list.len);
+ *numrates = num;
+ num = min(num, max);
+
+ for (i = 0; i < num; i++) {
+ rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */
+ }
+
+ return 0;
+}
+
+#if 0
+static void show_rx_frame(struct orinoco_rxframe_hdr *frame)
+{
+ printk(KERN_DEBUG "RX descriptor:\n");
+ printk(KERN_DEBUG " status = 0x%04x\n", frame->desc.status);
+ printk(KERN_DEBUG " time = 0x%08x\n", frame->desc.time);
+ printk(KERN_DEBUG " silence = 0x%02x\n", frame->desc.silence);
+ printk(KERN_DEBUG " signal = 0x%02x\n", frame->desc.signal);
+ printk(KERN_DEBUG " rate = 0x%02x\n", frame->desc.rate);
+ printk(KERN_DEBUG " rxflow = 0x%02x\n", frame->desc.rxflow);
+ printk(KERN_DEBUG " reserved = 0x%08x\n", frame->desc.reserved);
+
+ printk(KERN_DEBUG "IEEE 802.11 header:\n");
+ printk(KERN_DEBUG " frame_ctl = 0x%04x\n",
+ frame->p80211.frame_ctl);
+ printk(KERN_DEBUG " duration_id = 0x%04x\n",
+ frame->p80211.duration_id);
+ printk(KERN_DEBUG " addr1 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr1[0], frame->p80211.addr1[1],
+ frame->p80211.addr1[2], frame->p80211.addr1[3],
+ frame->p80211.addr1[4], frame->p80211.addr1[5]);
+ printk(KERN_DEBUG " addr2 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr2[0], frame->p80211.addr2[1],
+ frame->p80211.addr2[2], frame->p80211.addr2[3],
+ frame->p80211.addr2[4], frame->p80211.addr2[5]);
+ printk(KERN_DEBUG " addr3 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr3[0], frame->p80211.addr3[1],
+ frame->p80211.addr3[2], frame->p80211.addr3[3],
+ frame->p80211.addr3[4], frame->p80211.addr3[5]);
+ printk(KERN_DEBUG " seq_ctl = 0x%04x\n",
+ frame->p80211.seq_ctl);
+ printk(KERN_DEBUG " addr4 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr4[0], frame->p80211.addr4[1],
+ frame->p80211.addr4[2], frame->p80211.addr4[3],
+ frame->p80211.addr4[4], frame->p80211.addr4[5]);
+ printk(KERN_DEBUG " data_len = 0x%04x\n",
+ frame->p80211.data_len);
+
+ printk(KERN_DEBUG "IEEE 802.3 header:\n");
+ printk(KERN_DEBUG " dest = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p8023.h_dest[0], frame->p8023.h_dest[1],
+ frame->p8023.h_dest[2], frame->p8023.h_dest[3],
+ frame->p8023.h_dest[4], frame->p8023.h_dest[5]);
+ printk(KERN_DEBUG " src = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p8023.h_source[0], frame->p8023.h_source[1],
+ frame->p8023.h_source[2], frame->p8023.h_source[3],
+ frame->p8023.h_source[4], frame->p8023.h_source[5]);
+ printk(KERN_DEBUG " len = 0x%04x\n", frame->p8023.h_proto);
+
+ printk(KERN_DEBUG "IEEE 802.2 LLC/SNAP header:\n");
+ printk(KERN_DEBUG " DSAP = 0x%02x\n", frame->p8022.dsap);
+ printk(KERN_DEBUG " SSAP = 0x%02x\n", frame->p8022.ssap);
+ printk(KERN_DEBUG " ctrl = 0x%02x\n", frame->p8022.ctrl);
+ printk(KERN_DEBUG " OUI = %02x:%02x:%02x\n",
+ frame->p8022.oui[0], frame->p8022.oui[1], frame->p8022.oui[2]);
+ printk(KERN_DEBUG " ethertype = 0x%04x\n", frame->ethertype);
+}
+#endif /* 0 */
+
+/*
+ * Interrupt handler
+ */
+irqreturn_t orinoco_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int count = MAX_IRQLOOPS_PER_IRQ;
+ u16 evstat, events;
+ /* These are used to detect a runaway interrupt situation */
+ /* If we get more than MAX_IRQLOOPS_PER_JIFFY iterations in a jiffy,
+ * we panic and shut down the hardware */
+ static int last_irq_jiffy = 0; /* jiffies value the last time we were called */
+ static int loops_this_jiffy = 0;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ /* If hw is unavailable - we don't know if the irq was
+ * for us or not */
+ return IRQ_HANDLED;
+ }
+
+ evstat = hermes_read_regn(hw, EVSTAT);
+ events = evstat & hw->inten;
+ if (! events) {
+ orinoco_unlock(priv, &flags);
+ return IRQ_NONE;
+ }
+
+ if (jiffies != last_irq_jiffy)
+ loops_this_jiffy = 0;
+ last_irq_jiffy = jiffies;
+
+ while (events && count--) {
+ if (++loops_this_jiffy > MAX_IRQLOOPS_PER_JIFFY) {
+ printk(KERN_WARNING "%s: IRQ handler is looping too "
+ "much! Resetting.\n", dev->name);
+ /* Disable interrupts for now */
+ hermes_set_irqmask(hw, 0);
+ schedule_work(&priv->reset_work);
+ break;
+ }
+
+ /* Check the card hasn't been removed */
+ if (! hermes_present(hw)) {
+ DEBUG(0, "orinoco_interrupt(): card removed\n");
+ break;
+ }
+
+ if (events & HERMES_EV_TICK)
+ __orinoco_ev_tick(dev, hw);
+ if (events & HERMES_EV_WTERR)
+ __orinoco_ev_wterr(dev, hw);
+ if (events & HERMES_EV_INFDROP)
+ __orinoco_ev_infdrop(dev, hw);
+ if (events & HERMES_EV_INFO)
+ __orinoco_ev_info(dev, hw);
+ if (events & HERMES_EV_RX)
+ __orinoco_ev_rx(dev, hw);
+ if (events & HERMES_EV_TXEXC)
+ __orinoco_ev_txexc(dev, hw);
+ if (events & HERMES_EV_TX)
+ __orinoco_ev_tx(dev, hw);
+ if (events & HERMES_EV_ALLOC)
+ __orinoco_ev_alloc(dev, hw);
+
+ hermes_write_regn(hw, EVACK, events);
+
+ evstat = hermes_read_regn(hw, EVSTAT);
+ events = evstat & hw->inten;
+ };
+
+ orinoco_unlock(priv, &flags);
+ return IRQ_HANDLED;
+}
+
+static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw)
+{
+ printk(KERN_DEBUG "%s: TICK\n", dev->name);
+}
+
+static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw)
+{
+ /* This seems to happen a fair bit under load, but ignoring it
+ seems to work fine...*/
+ printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n",
+ dev->name);
+}
+
+static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
+{
+ printk(KERN_WARNING "%s: Information frame lost.\n", dev->name);
+}
+
+static void print_linkstatus(struct net_device *dev, u16 status)
+{
+ char * s;
+
+ if (suppress_linkstatus)
+ return;
+
+ switch (status) {
+ case HERMES_LINKSTATUS_NOT_CONNECTED:
+ s = "Not Connected";
+ break;
+ case HERMES_LINKSTATUS_CONNECTED:
+ s = "Connected";
+ break;
+ case HERMES_LINKSTATUS_DISCONNECTED:
+ s = "Disconnected";
+ break;
+ case HERMES_LINKSTATUS_AP_CHANGE:
+ s = "AP Changed";
+ break;
+ case HERMES_LINKSTATUS_AP_OUT_OF_RANGE:
+ s = "AP Out of Range";
+ break;
+ case HERMES_LINKSTATUS_AP_IN_RANGE:
+ s = "AP In Range";
+ break;
+ case HERMES_LINKSTATUS_ASSOC_FAILED:
+ s = "Association Failed";
+ break;
+ default:
+ s = "UNKNOWN";
+ }
+
+ printk(KERN_INFO "%s: New link status: %s (%04x)\n",
+ dev->name, s, status);
+}
+
+static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = dev->priv;
+ u16 infofid;
+ struct {
+ u16 len;
+ u16 type;
+ } __attribute__ ((packed)) info;
+ int len, type;
+ int err;
+
+ /* This is an answer to an INQUIRE command that we did earlier,
+ * or an information "event" generated by the card
+ * The controller return to us a pseudo frame containing
+ * the information in question - Jean II */
+ infofid = hermes_read_regn(hw, INFOFID);
+
+ /* Read the info frame header - don't try too hard */
+ err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info),
+ infofid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading info frame. "
+ "Frame dropped.\n", dev->name, err);
+ return;
+ }
+
+ len = HERMES_RECLEN_TO_BYTES(le16_to_cpu(info.len));
+ type = le16_to_cpu(info.type);
+
+ switch (type) {
+ case HERMES_INQ_TALLIES: {
+ struct hermes_tallies_frame tallies;
+ struct iw_statistics *wstats = &priv->wstats;
+
+ if (len > sizeof(tallies)) {
+ printk(KERN_WARNING "%s: Tallies frame too long (%d bytes)\n",
+ dev->name, len);
+ len = sizeof(tallies);
+ }
+
+ /* Read directly the data (no seek) */
+ hermes_read_words(hw, HERMES_DATA1, (void *) &tallies,
+ len / 2); /* FIXME: blech! */
+
+ /* Increment our various counters */
+ /* wstats->discard.nwid - no wrong BSSID stuff */
+ wstats->discard.code +=
+ le16_to_cpu(tallies.RxWEPUndecryptable);
+ if (len == sizeof(tallies))
+ wstats->discard.code +=
+ le16_to_cpu(tallies.RxDiscards_WEPICVError) +
+ le16_to_cpu(tallies.RxDiscards_WEPExcluded);
+ wstats->discard.misc +=
+ le16_to_cpu(tallies.TxDiscardsWrongSA);
+#if WIRELESS_EXT > 11
+ wstats->discard.fragment +=
+ le16_to_cpu(tallies.RxMsgInBadMsgFragments);
+ wstats->discard.retries +=
+ le16_to_cpu(tallies.TxRetryLimitExceeded);
+ /* wstats->miss.beacon - no match */
+#endif /* WIRELESS_EXT > 11 */
+ }
+ break;
+ case HERMES_INQ_LINKSTATUS: {
+ struct hermes_linkstatus linkstatus;
+ u16 newstatus;
+
+ if (len != sizeof(linkstatus)) {
+ printk(KERN_WARNING "%s: Unexpected size for linkstatus frame (%d bytes)\n",
+ dev->name, len);
+ break;
+ }
+
+ hermes_read_words(hw, HERMES_DATA1, (void *) &linkstatus,
+ len / 2);
+ newstatus = le16_to_cpu(linkstatus.linkstatus);
+
+ if ( (newstatus == HERMES_LINKSTATUS_CONNECTED)
+ || (newstatus == HERMES_LINKSTATUS_AP_CHANGE)
+ || (newstatus == HERMES_LINKSTATUS_AP_IN_RANGE) )
+ priv->connected = 1;
+ else if ( (newstatus == HERMES_LINKSTATUS_NOT_CONNECTED)
+ || (newstatus == HERMES_LINKSTATUS_DISCONNECTED)
+ || (newstatus == HERMES_LINKSTATUS_AP_OUT_OF_RANGE)
+ || (newstatus == HERMES_LINKSTATUS_ASSOC_FAILED) )
+ priv->connected = 0;
+
+ if (newstatus != priv->last_linkstatus)
+ print_linkstatus(dev, newstatus);
+
+ priv->last_linkstatus = newstatus;
+ }
+ break;
+ default:
+ printk(KERN_DEBUG "%s: Unknown information frame received (type %04x).\n",
+ dev->name, type);
+ /* We don't actually do anything about it */
+ break;
+ }
+}
+
+static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct net_device_stats *stats = &priv->stats;
+ struct iw_statistics *wstats = &priv->wstats;
+ struct sk_buff *skb = NULL;
+ u16 rxfid, status;
+ int length, data_len, data_off;
+ char *p;
+ struct hermes_rx_descriptor desc;
+ struct header_struct hdr;
+ struct ethhdr *eh;
+ int err;
+
+ rxfid = hermes_read_regn(hw, RXFID);
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc),
+ rxfid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading Rx descriptor. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ status = le16_to_cpu(desc.status);
+
+ if (status & HERMES_RXSTAT_ERR) {
+ if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
+ wstats->discard.code++;
+ DEBUG(1, "%s: Undecryptable frame on Rx. Frame dropped.\n",
+ dev->name);
+ } else {
+ stats->rx_crc_errors++;
+ DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n", dev->name);
+ }
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ /* For now we ignore the 802.11 header completely, assuming
+ that the card's firmware has handled anything vital */
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &hdr, sizeof(hdr),
+ rxfid, HERMES_802_3_OFFSET);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading frame header. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ length = ntohs(hdr.len);
+
+ /* Sanity checks */
+ if (length < 3) { /* No for even an 802.2 LLC header */
+ /* At least on Symbol firmware with PCF we get quite a
+ lot of these legitimately - Poll frames with no
+ data. */
+ stats->rx_dropped++;
+ goto drop;
+ }
+ if (length > IEEE802_11_DATA_LEN) {
+ printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
+ dev->name, length);
+ stats->rx_length_errors++;
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ /* We need space for the packet data itself, plus an ethernet
+ header, plus 2 bytes so we can align the IP header on a
+ 32bit boundary, plus 1 byte so we can read in odd length
+ packets from the card, which has an IO granularity of 16
+ bits */
+ skb = dev_alloc_skb(length+ETH_HLEN+2+1);
+ if (!skb) {
+ printk(KERN_WARNING "%s: Can't allocate skb for Rx\n",
+ dev->name);
+ goto drop;
+ }
+
+ skb_reserve(skb, 2); /* This way the IP header is aligned */
+
+ /* Handle decapsulation
+ * In most cases, the firmware tell us about SNAP frames.
+ * For some reason, the SNAP frames sent by LinkSys APs
+ * are not properly recognised by most firmwares.
+ * So, check ourselves */
+ if(((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) ||
+ ((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) ||
+ is_ethersnap(&hdr)) {
+ /* These indicate a SNAP within 802.2 LLC within
+ 802.11 frame which we'll need to de-encapsulate to
+ the original EthernetII frame. */
+
+ if (length < ENCAPS_OVERHEAD) { /* No room for full LLC+SNAP */
+ stats->rx_length_errors++;
+ goto drop;
+ }
+
+ /* Remove SNAP header, reconstruct EthernetII frame */
+ data_len = length - ENCAPS_OVERHEAD;
+ data_off = HERMES_802_3_OFFSET + sizeof(hdr);
+
+ eh = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+
+ memcpy(eh, &hdr, 2 * ETH_ALEN);
+ eh->h_proto = hdr.ethertype;
+ } else {
+ /* All other cases indicate a genuine 802.3 frame. No
+ decapsulation needed. We just throw the whole
+ thing in, and hope the protocol layer can deal with
+ it as 802.3 */
+ data_len = length;
+ data_off = HERMES_802_3_OFFSET;
+ /* FIXME: we re-read from the card data we already read here */
+ }
+
+ p = skb_put(skb, data_len);
+ err = hermes_bap_pread(hw, IRQ_BAP, p, RUP_EVEN(data_len),
+ rxfid, data_off);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading frame. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Process the wireless stats if needed */
+ orinoco_stat_gather(dev, skb, &desc);
+
+ /* Pass the packet to the networking stack */
+ netif_rx(skb);
+ stats->rx_packets++;
+
+ return;
+
+ drop:
+ stats->rx_dropped++;
+
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ return;
+}
+
+static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct net_device_stats *stats = &priv->stats;
+ u16 fid = hermes_read_regn(hw, TXCOMPLFID);
+ struct hermes_tx_descriptor desc;
+ int err = 0;
+
+ if (fid == DUMMY_FID)
+ return; /* Nothing's really happened */
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc), fid, 0);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to read descriptor on Tx error "
+ "(FID=%04X error %d)\n",
+ dev->name, fid, err);
+ } else {
+ DEBUG(1, "%s: Tx error, status %d\n",
+ dev->name, le16_to_cpu(desc.status));
+ }
+
+ stats->tx_errors++;
+
+ hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
+}
+
+static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct net_device_stats *stats = &priv->stats;
+
+ stats->tx_packets++;
+
+ hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
+}
+
+static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = dev->priv;
+
+ u16 fid = hermes_read_regn(hw, ALLOCFID);
+
+ if (fid != priv->txfid) {
+ if (fid != DUMMY_FID)
+ printk(KERN_WARNING "%s: Allocate event on unexpected fid (%04X)\n",
+ dev->name, fid);
+ return;
+ } else {
+ netif_wake_queue(dev);
+ }
+
+ hermes_write_regn(hw, ALLOCFID, DUMMY_FID);
+}
+
+struct sta_id {
+ u16 id, variant, major, minor;
+} __attribute__ ((packed));
+
+static int determine_firmware_type(struct net_device *dev, struct sta_id *sta_id)
+{
+ /* FIXME: this is fundamentally broken */
+ unsigned int firmver = ((u32)sta_id->major << 16) | sta_id->minor;
+
+ if (sta_id->variant == 1)
+ return FIRMWARE_TYPE_AGERE;
+ else if ((sta_id->variant == 2) &&
+ ((firmver == 0x10001) || (firmver == 0x20001)))
+ return FIRMWARE_TYPE_SYMBOL;
+ else
+ return FIRMWARE_TYPE_INTERSIL;
+}
+
+static void determine_firmware(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err;
+ struct sta_id sta_id;
+ unsigned int firmver;
+ char tmp[SYMBOL_MAX_VER_LEN+1];
+
+ /* Get the firmware version */
+ err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_STAID, &sta_id);
+ if (err) {
+ printk(KERN_WARNING "%s: Error %d reading firmware info. Wildly guessing capabilities...\n",
+ dev->name, err);
+ memset(&sta_id, 0, sizeof(sta_id));
+ }
+ le16_to_cpus(&sta_id.id);
+ le16_to_cpus(&sta_id.variant);
+ le16_to_cpus(&sta_id.major);
+ le16_to_cpus(&sta_id.minor);
+
+ printk(KERN_DEBUG "%s: Station identity %04x:%04x:%04x:%04x\n",
+ dev->name, sta_id.id, sta_id.variant,
+ sta_id.major, sta_id.minor);
+
+ if (! priv->firmware_type)
+ priv->firmware_type = determine_firmware_type(dev, &sta_id);
+
+ /* Default capabilities */
+ priv->has_sensitivity = 1;
+ priv->has_mwo = 0;
+ priv->has_preamble = 0;
+ priv->has_port3 = 1;
+ priv->has_ibss = 1;
+ priv->has_ibss_any = 0;
+ priv->has_wep = 0;
+ priv->has_big_wep = 0;
+
+ /* Determine capabilities from the firmware version */
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE:
+ /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
+ ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
+ printk(KERN_DEBUG "%s: Looks like a Lucent/Agere firmware "
+ "version %d.%02d\n", dev->name,
+ sta_id.major, sta_id.minor);
+
+ firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
+
+ priv->has_ibss = (firmver >= 0x60006);
+ priv->has_ibss_any = (firmver >= 0x60010);
+ priv->has_wep = (firmver >= 0x40020);
+ priv->has_big_wep = 1; /* FIXME: this is wrong - how do we tell
+ Gold cards from the others? */
+ priv->has_mwo = (firmver >= 0x60000);
+ priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */
+ priv->ibss_port = 1;
+
+ /* Tested with Agere firmware :
+ * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
+ * Tested CableTron firmware : 4.32 => Anton */
+ break;
+ case FIRMWARE_TYPE_SYMBOL:
+ /* Symbol , 3Com AirConnect, Intel, Ericsson WLAN */
+ /* Intel MAC : 00:02:B3:* */
+ /* 3Com MAC : 00:50:DA:* */
+ memset(tmp, 0, sizeof(tmp));
+ /* Get the Symbol firmware version */
+ err = hermes_read_ltv(hw, USER_BAP,
+ HERMES_RID_SECONDARYVERSION_SYMBOL,
+ SYMBOL_MAX_VER_LEN, NULL, &tmp);
+ if (err) {
+ printk(KERN_WARNING
+ "%s: Error %d reading Symbol firmware info. Wildly guessing capabilities...\n",
+ dev->name, err);
+ firmver = 0;
+ tmp[0] = '\0';
+ } else {
+ /* The firmware revision is a string, the format is
+ * something like : "V2.20-01".
+ * Quick and dirty parsing... - Jean II
+ */
+ firmver = ((tmp[1] - '0') << 16) | ((tmp[3] - '0') << 12)
+ | ((tmp[4] - '0') << 8) | ((tmp[6] - '0') << 4)
+ | (tmp[7] - '0');
+
+ tmp[SYMBOL_MAX_VER_LEN] = '\0';
+ }
+
+ printk(KERN_DEBUG "%s: Looks like a Symbol firmware "
+ "version [%s] (parsing to %X)\n", dev->name,
+ tmp, firmver);
+
+ priv->has_ibss = (firmver >= 0x20000);
+ priv->has_wep = (firmver >= 0x15012);
+ priv->has_big_wep = (firmver >= 0x20000);
+ priv->has_pm = (firmver >= 0x20000) && (firmver < 0x22000);
+ priv->has_preamble = (firmver >= 0x20000);
+ priv->ibss_port = 4;
+ /* Tested with Intel firmware : 0x20015 => Jean II */
+ /* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */
+ break;
+ case FIRMWARE_TYPE_INTERSIL:
+ /* D-Link, Linksys, Adtron, ZoomAir, and many others...
+ * Samsung, Compaq 100/200 and Proxim are slightly
+ * different and less well tested */
+ /* D-Link MAC : 00:40:05:* */
+ /* Addtron MAC : 00:90:D1:* */
+ printk(KERN_DEBUG "%s: Looks like an Intersil firmware "
+ "version %d.%d.%d\n", dev->name,
+ sta_id.major, sta_id.minor, sta_id.variant);
+
+ firmver = ((unsigned long)sta_id.major << 16) |
+ ((unsigned long)sta_id.minor << 8) | sta_id.variant;
+
+ priv->has_ibss = (firmver >= 0x000700); /* FIXME */
+ priv->has_big_wep = priv->has_wep = (firmver >= 0x000800);
+ priv->has_pm = (firmver >= 0x000700);
+
+ if (firmver >= 0x000800)
+ priv->ibss_port = 0;
+ else {
+ printk(KERN_NOTICE "%s: Intersil firmware earlier "
+ "than v0.8.x - several features not supported\n",
+ dev->name);
+ priv->ibss_port = 1;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * struct net_device methods
+ */
+
+static int
+orinoco_init(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ struct hermes_idstring nickbuf;
+ u16 reclen;
+ int len;
+
+ TRACE_ENTER(dev->name);
+
+ /* No need to lock, the hw_unavailable flag is already set in
+ * alloc_orinocodev() */
+ priv->nicbuf_size = IEEE802_11_FRAME_LEN + ETH_HLEN;
+
+ /* Initialize the firmware */
+ err = hermes_init(hw);
+ if (err != 0) {
+ printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n",
+ dev->name, err);
+ goto out;
+ }
+
+ determine_firmware(dev);
+
+ if (priv->has_port3)
+ printk(KERN_DEBUG "%s: Ad-hoc demo mode supported\n", dev->name);
+ if (priv->has_ibss)
+ printk(KERN_DEBUG "%s: IEEE standard IBSS ad-hoc mode supported\n",
+ dev->name);
+ if (priv->has_wep) {
+ printk(KERN_DEBUG "%s: WEP supported, ", dev->name);
+ if (priv->has_big_wep)
+ printk("104-bit key\n");
+ else
+ printk("40-bit key\n");
+ }
+
+ /* Get the MAC address */
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ ETH_ALEN, NULL, dev->dev_addr);
+ if (err) {
+ printk(KERN_WARNING "%s: failed to read MAC address!\n",
+ dev->name);
+ goto out;
+ }
+
+ printk(KERN_DEBUG "%s: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev->name, dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4],
+ dev->dev_addr[5]);
+
+ /* Get the station name */
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ sizeof(nickbuf), &reclen, &nickbuf);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read station name\n",
+ dev->name);
+ goto out;
+ }
+ if (nickbuf.len)
+ len = min(IW_ESSID_MAX_SIZE, (int)le16_to_cpu(nickbuf.len));
+ else
+ len = min(IW_ESSID_MAX_SIZE, 2 * reclen);
+ memcpy(priv->nick, &nickbuf.val, len);
+ priv->nick[len] = '\0';
+
+ printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick);
+
+ /* Get allowed channels */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
+ &priv->channel_mask);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read channel list!\n",
+ dev->name);
+ goto out;
+ }
+
+ /* Get initial AP density */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
+ &priv->ap_density);
+ if (err || priv->ap_density < 1 || priv->ap_density > 3) {
+ priv->has_sensitivity = 0;
+ }
+
+ /* Get initial RTS threshold */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
+ &priv->rts_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read RTS threshold!\n", dev->name);
+ goto out;
+ }
+
+ /* Get initial fragmentation settings */
+ if (priv->has_mwo)
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMWOROBUST_AGERE,
+ &priv->mwo_robust);
+ else
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
+ &priv->frag_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read fragmentation settings!\n", dev->name);
+ goto out;
+ }
+
+ /* Power management setup */
+ if (priv->has_pm) {
+ priv->pm_on = 0;
+ priv->pm_mcast = 1;
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMAXSLEEPDURATION,
+ &priv->pm_period);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read power management period!\n",
+ dev->name);
+ goto out;
+ }
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPMHOLDOVERDURATION,
+ &priv->pm_timeout);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read power management timeout!\n",
+ dev->name);
+ goto out;
+ }
+ }
+
+ /* Preamble setup */
+ if (priv->has_preamble) {
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPREAMBLE_SYMBOL,
+ &priv->preamble);
+ if (err)
+ goto out;
+ }
+
+ /* Set up the default configuration */
+ priv->iw_mode = IW_MODE_INFRA;
+ /* By default use IEEE/IBSS ad-hoc mode if we have it */
+ priv->prefer_port3 = priv->has_port3 && (! priv->has_ibss);
+ set_port_type(priv);
+ priv->channel = 10; /* default channel, more-or-less arbitrary */
+
+ priv->promiscuous = 0;
+ priv->wep_on = 0;
+ priv->tx_key = 0;
+
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err == -EIO) {
+ /* Try workaround for old Symbol firmware bug */
+ printk(KERN_WARNING "%s: firmware ALLOC bug detected "
+ "(old Symbol firmware?). Trying to work around... ",
+ dev->name);
+
+ priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err)
+ printk("failed!\n");
+ else
+ printk("ok.\n");
+ }
+ if (err) {
+ printk("%s: Error %d allocating Tx buffer\n", dev->name, err);
+ goto out;
+ }
+
+ /* Make the hardware available, as long as it hasn't been
+ * removed elsewhere (e.g. by PCMCIA hot unplug) */
+ spin_lock_irq(&priv->lock);
+ priv->hw_unavailable--;
+ spin_unlock_irq(&priv->lock);
+
+ printk(KERN_DEBUG "%s: ready\n", dev->name);
+
+ out:
+ TRACE_EXIT(dev->name);
+ return err;
+}
+
+struct net_device_stats *
+orinoco_get_stats(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+
+ return &priv->stats;
+}
+
+struct iw_statistics *
+orinoco_get_wireless_stats(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ struct iw_statistics *wstats = &priv->wstats;
+ int err = 0;
+ unsigned long flags;
+
+ if (! netif_device_present(dev)) {
+ printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n",
+ dev->name);
+ return NULL; /* FIXME: Can we do better than this? */
+ }
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return NULL; /* FIXME: Erg, we've been signalled, how
+ * do we propagate this back up? */
+
+ if (priv->iw_mode == IW_MODE_ADHOC) {
+ memset(&wstats->qual, 0, sizeof(wstats->qual));
+ /* If a spy address is defined, we report stats of the
+ * first spy address - Jean II */
+ if (SPY_NUMBER(priv)) {
+ wstats->qual.qual = priv->spy_stat[0].qual;
+ wstats->qual.level = priv->spy_stat[0].level;
+ wstats->qual.noise = priv->spy_stat[0].noise;
+ wstats->qual.updated = priv->spy_stat[0].updated;
+ }
+ } else {
+ struct {
+ u16 qual, signal, noise;
+ } __attribute__ ((packed)) cq;
+
+ err = HERMES_READ_RECORD(hw, USER_BAP,
+ HERMES_RID_COMMSQUALITY, &cq);
+
+ wstats->qual.qual = (int)le16_to_cpu(cq.qual);
+ wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
+ wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
+ wstats->qual.updated = 7;
+ }
+
+ /* We can't really wait for the tallies inquiry command to
+ * complete, so we just use the previous results and trigger
+ * a new tallies inquiry command for next time - Jean II */
+ /* FIXME: We're in user context (I think?), so we should just
+ wait for the tallies to come through */
+ err = hermes_inquire(hw, HERMES_INQ_TALLIES);
+
+ orinoco_unlock(priv, &flags);
+
+ if (err)
+ return NULL;
+
+ return wstats;
+}
+
+static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
+ int level, int noise)
+{
+ struct orinoco_private *priv = (struct orinoco_private *)dev->priv;
+ int i;
+
+ /* Gather wireless spy statistics: for each packet, compare the
+ * source address with out list, and if match, get the stats... */
+ for (i = 0; i < priv->spy_number; i++)
+ if (!memcmp(mac, priv->spy_address[i], ETH_ALEN)) {
+ priv->spy_stat[i].level = level - 0x95;
+ priv->spy_stat[i].noise = noise - 0x95;
+ priv->spy_stat[i].qual = (level > noise) ? (level - noise) : 0;
+ priv->spy_stat[i].updated = 7;
+ }
+}
+
+void
+orinoco_stat_gather(struct net_device *dev,
+ struct sk_buff *skb,
+ struct hermes_rx_descriptor *desc)
+{
+ struct orinoco_private *priv = (struct orinoco_private *)dev->priv;
+
+ /* Using spy support with lots of Rx packets, like in an
+ * infrastructure (AP), will really slow down everything, because
+ * the MAC address must be compared to each entry of the spy list.
+ * If the user really asks for it (set some address in the
+ * spy list), we do it, but he will pay the price.
+ * Note that to get here, you need both WIRELESS_SPY
+ * compiled in AND some addresses in the list !!!
+ */
+ /* Note : gcc will optimise the whole section away if
+ * WIRELESS_SPY is not defined... - Jean II */
+ if (SPY_NUMBER(priv)) {
+ orinoco_spy_gather(dev, skb->mac.raw + ETH_ALEN,
+ desc->signal, desc->silence);
+ }
+}
+
+static int
+orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct orinoco_private *priv = (struct orinoco_private *)dev->priv;
+ struct net_device_stats *stats = &priv->stats;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 txfid = priv->txfid;
+ char *p;
+ struct ethhdr *eh;
+ int len, data_len, data_off;
+ struct hermes_tx_descriptor desc;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ if (! netif_running(dev)) {
+ printk(KERN_ERR "%s: Tx on stopped device!\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+ return 1;
+ }
+
+ if (netif_queue_stopped(dev)) {
+ printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+ return 1;
+ }
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+/* BUG(); */
+ return 1;
+ }
+
+ if (! priv->connected) {
+ /* Oops, the firmware hasn't established a connection,
+ silently drop the packet (this seems to be the
+ safest approach). */
+ stats->tx_errors++;
+ orinoco_unlock(priv, &flags);
+ dev_kfree_skb(skb, FREE_WRITE);
+ TRACE_EXIT(dev->name);
+ return 0;
+ }
+
+ /* Length of the packet body */
+ /* FIXME: what if the skb is smaller than this? */
+ len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN);
+
+ eh = (struct ethhdr *)skb->data;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.tx_control = cpu_to_le16(HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX);
+ err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), txfid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d writing Tx descriptor to BAP\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ /* Clear the 802.11 header and data length fields - some
+ * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
+ * if this isn't done. */
+ hermes_clear_words(hw, HERMES_DATA0,
+ HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
+
+ /* Encapsulate Ethernet-II frames */
+ if (ntohs(eh->h_proto) > 1500) { /* Ethernet-II frame */
+ struct header_struct hdr;
+ data_len = len;
+ data_off = HERMES_802_3_OFFSET + sizeof(hdr);
+ p = skb->data + ETH_HLEN;
+
+ /* 802.3 header */
+ memcpy(hdr.dest, eh->h_dest, ETH_ALEN);
+ memcpy(hdr.src, eh->h_source, ETH_ALEN);
+ hdr.len = htons(data_len + ENCAPS_OVERHEAD);
+
+ /* 802.2 header */
+ memcpy(&hdr.dsap, &encaps_hdr, sizeof(encaps_hdr));
+
+ hdr.ethertype = eh->h_proto;
+ err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
+ txfid, HERMES_802_3_OFFSET);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d writing packet header to BAP\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+ } else { /* IEEE 802.3 frame */
+ data_len = len + ETH_HLEN;
+ data_off = HERMES_802_3_OFFSET;
+ p = skb->data;
+ }
+
+ /* Round up for odd length packets */
+ err = hermes_bap_pwrite(hw, USER_BAP, p, RUP_EVEN(data_len), txfid, data_off);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ /* Finally, we actually initiate the send */
+ netif_stop_queue(dev);
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL, txfid, NULL);
+ if (err) {
+ netif_start_queue(dev);
+ printk(KERN_ERR "%s: Error %d transmitting packet\n", dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ dev->trans_start = jiffies;
+
+ orinoco_unlock(priv, &flags);
+
+ DEV_KFREE_SKB(skb);
+
+ TRACE_EXIT(dev->name);
+
+ return 0;
+ fail:
+ TRACE_EXIT(dev->name);
+
+ orinoco_unlock(priv, &flags);
+ return err;
+}
+
+#ifdef HAVE_TX_TIMEOUT
+static void
+orinoco_tx_timeout(struct net_device *dev)
+{
+ struct orinoco_private *priv = (struct orinoco_private *)dev->priv;
+ struct net_device_stats *stats = &priv->stats;
+ struct hermes *hw = &priv->hw;
+
+ printk(KERN_WARNING "%s: Tx timeout! "
+ "ALLOCFID=%04x, TXCOMPLFID=%04x, EVSTAT=%04x\n",
+ dev->name, hermes_read_regn(hw, ALLOCFID),
+ hermes_read_regn(hw, TXCOMPLFID), hermes_read_regn(hw, EVSTAT));
+
+ stats->tx_errors++;
+
+ schedule_work(&priv->reset_work);
+}
+#endif
+
+static int
+orinoco_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct orinoco_private *priv = dev->priv;
+
+ if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) )
+ return -EINVAL;
+
+ if ( (new_mtu + ENCAPS_OVERHEAD + IEEE802_11_HLEN) >
+ (priv->nicbuf_size - ETH_HLEN) )
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+/* FIXME: return int? */
+static void
+__orinoco_set_multicast_list(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ int promisc, mc_count;
+
+ /* The Hermes doesn't seem to have an allmulti mode, so we go
+ * into promiscuous mode and let the upper levels deal. */
+ if ( (dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > MAX_MULTICAST(priv)) ) {
+ promisc = 1;
+ mc_count = 0;
+ } else {
+ promisc = 0;
+ mc_count = dev->mc_count;
+ }
+
+ if (promisc != priv->promiscuous) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPROMISCUOUSMODE,
+ promisc);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n",
+ dev->name, err);
+ } else
+ priv->promiscuous = promisc;
+ }
+
+ if (! promisc && (mc_count || priv->mc_count) ) {
+ struct dev_mc_list *p = dev->mc_list;
+ hermes_multicast_t mclist;
+ int i;
+
+ for (i = 0; i < mc_count; i++) {
+ /* Paranoia: */
+ if (! p)
+ BUG(); /* Multicast list shorter than mc_count */
+ if (p->dmi_addrlen != ETH_ALEN)
+ BUG(); /* Bad address size in multicast list */
+
+ memcpy(mclist.addr[i], p->dmi_addr, ETH_ALEN);
+ p = p->next;
+ }
+
+ if (p)
+ printk(KERN_WARNING "Multicast list is longer than mc_count\n");
+
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFGROUPADDRESSES,
+ HERMES_BYTES_TO_RECLEN(priv->mc_count * ETH_ALEN),
+ &mclist);
+ if (err)
+ printk(KERN_ERR "%s: Error %d setting multicast list.\n",
+ dev->name, err);
+ else
+ priv->mc_count = mc_count;
+ }
+
+ /* Since we can set the promiscuous flag when it wasn't asked
+ for, make sure the net_device knows about it. */
+ if (priv->promiscuous)
+ dev->flags |= IFF_PROMISC;
+ else
+ dev->flags &= ~IFF_PROMISC;
+}
+
+/********************************************************************/
+/* Wireless extensions support */
+/********************************************************************/
+
+static int orinoco_ioctl_getiwrange(struct net_device *dev, struct iw_point *rrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int err = 0;
+ int mode;
+ struct iw_range range;
+ int numrates;
+ int i, k;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ err = verify_area(VERIFY_WRITE, rrq->pointer, sizeof(range));
+ if (err)
+ return err;
+
+ rrq->length = sizeof(range);
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ mode = priv->iw_mode;
+ orinoco_unlock(priv, &flags);
+
+ memset(&range, 0, sizeof(range));
+
+ /* Much of this shamelessly taken from wvlan_cs.c. No idea
+ * what it all means -dgibson */
+#if WIRELESS_EXT > 10
+ range.we_version_compiled = WIRELESS_EXT;
+ range.we_version_source = 11;
+#endif /* WIRELESS_EXT > 10 */
+
+ range.min_nwid = range.max_nwid = 0; /* We don't use nwids */
+
+ /* Set available channels/frequencies */
+ range.num_channels = NUM_CHANNELS;
+ k = 0;
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (priv->channel_mask & (1 << i)) {
+ range.freq[k].i = i + 1;
+ range.freq[k].m = channel_frequency[i] * 100000;
+ range.freq[k].e = 1;
+ k++;
+ }
+
+ if (k >= IW_MAX_FREQUENCIES)
+ break;
+ }
+ range.num_frequency = k;
+
+ range.sensitivity = 3;
+
+ if ((mode == IW_MODE_ADHOC) && (priv->spy_number == 0)){
+ /* Quality stats meaningless in ad-hoc mode */
+ range.max_qual.qual = 0;
+ range.max_qual.level = 0;
+ range.max_qual.noise = 0;
+#if WIRELESS_EXT > 11
+ range.avg_qual.qual = 0;
+ range.avg_qual.level = 0;
+ range.avg_qual.noise = 0;
+#endif /* WIRELESS_EXT > 11 */
+
+ } else {
+ range.max_qual.qual = 0x8b - 0x2f;
+ range.max_qual.level = 0x2f - 0x95 - 1;
+ range.max_qual.noise = 0x2f - 0x95 - 1;
+#if WIRELESS_EXT > 11
+ /* Need to get better values */
+ range.avg_qual.qual = 0x24;
+ range.avg_qual.level = 0xC2;
+ range.avg_qual.noise = 0x9E;
+#endif /* WIRELESS_EXT > 11 */
+ }
+
+ err = orinoco_hw_get_bitratelist(priv, &numrates,
+ range.bitrate, IW_MAX_BITRATES);
+ if (err)
+ return err;
+ range.num_bitrates = numrates;
+
+ /* Set an indication of the max TCP throughput in bit/s that we can
+ * expect using this interface. May be use for QoS stuff...
+ * Jean II */
+ if(numrates > 2)
+ range.throughput = 5 * 1000 * 1000; /* ~5 Mb/s */
+ else
+ range.throughput = 1.5 * 1000 * 1000; /* ~1.5 Mb/s */
+
+ range.min_rts = 0;
+ range.max_rts = 2347;
+ range.min_frag = 256;
+ range.max_frag = 2346;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ if (priv->has_wep) {
+ range.max_encoding_tokens = ORINOCO_MAX_KEYS;
+
+ range.encoding_size[0] = SMALL_KEY_SIZE;
+ range.num_encoding_sizes = 1;
+
+ if (priv->has_big_wep) {
+ range.encoding_size[1] = LARGE_KEY_SIZE;
+ range.num_encoding_sizes = 2;
+ }
+ } else {
+ range.num_encoding_sizes = 0;
+ range.max_encoding_tokens = 0;
+ }
+ orinoco_unlock(priv, &flags);
+
+ range.min_pmp = 0;
+ range.max_pmp = 65535000;
+ range.min_pmt = 0;
+ range.max_pmt = 65535 * 1000; /* ??? */
+ range.pmp_flags = IW_POWER_PERIOD;
+ range.pmt_flags = IW_POWER_TIMEOUT;
+ range.pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_UNICAST_R;
+
+ range.num_txpower = 1;
+ range.txpower[0] = 15; /* 15dBm */
+ range.txpower_capa = IW_TXPOW_DBM;
+
+#if WIRELESS_EXT > 10
+ range.retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
+ range.retry_flags = IW_RETRY_LIMIT;
+ range.r_time_flags = IW_RETRY_LIFETIME;
+ range.min_retry = 0;
+ range.max_retry = 65535; /* ??? */
+ range.min_r_time = 0;
+ range.max_r_time = 65535 * 1000; /* ??? */
+#endif /* WIRELESS_EXT > 10 */
+
+ if (copy_to_user(rrq->pointer, &range, sizeof(range)))
+ return -EFAULT;
+
+ TRACE_EXIT(dev->name);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setiwencode(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int index = (erq->flags & IW_ENCODE_INDEX) - 1;
+ int setindex = priv->tx_key;
+ int enable = priv->wep_on;
+ int restricted = priv->wep_restrict;
+ u16 xlen = 0;
+ int err = 0;
+ char keybuf[ORINOCO_MAX_KEY_SIZE];
+ unsigned long flags;
+
+ if (erq->pointer) {
+ /* We actually have a key to set */
+ if ( (erq->length < SMALL_KEY_SIZE) || (erq->length > ORINOCO_MAX_KEY_SIZE) )
+ return -EINVAL;
+
+ if (copy_from_user(keybuf, erq->pointer, erq->length))
+ return -EFAULT;
+ }
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if (erq->pointer) {
+ if (erq->length > ORINOCO_MAX_KEY_SIZE) {
+ err = -E2BIG;
+ goto out;
+ }
+
+ if ( (erq->length > LARGE_KEY_SIZE)
+ || ( ! priv->has_big_wep && (erq->length > SMALL_KEY_SIZE)) ) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
+ index = priv->tx_key;
+
+ if (erq->length > SMALL_KEY_SIZE) {
+ xlen = LARGE_KEY_SIZE;
+ } else if (erq->length > 0) {
+ xlen = SMALL_KEY_SIZE;
+ } else
+ xlen = 0;
+
+ /* Switch on WEP if off */
+ if ((!enable) && (xlen > 0)) {
+ setindex = index;
+ enable = 1;
+ }
+ } else {
+ /* Important note : if the user do "iwconfig eth0 enc off",
+ * we will arrive there with an index of -1. This is valid
+ * but need to be taken care off... Jean II */
+ if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) {
+ if((index != -1) || (erq->flags == 0)) {
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ /* Set the index : Check that the key is valid */
+ if(priv->keys[index].len == 0) {
+ err = -EINVAL;
+ goto out;
+ }
+ setindex = index;
+ }
+ }
+
+ if (erq->flags & IW_ENCODE_DISABLED)
+ enable = 0;
+ /* Only for Prism2 & Symbol cards (so far) - Jean II */
+ if (erq->flags & IW_ENCODE_OPEN)
+ restricted = 0;
+ if (erq->flags & IW_ENCODE_RESTRICTED)
+ restricted = 1;
+
+ if (erq->pointer) {
+ priv->keys[index].len = cpu_to_le16(xlen);
+ memset(priv->keys[index].data, 0, sizeof(priv->keys[index].data));
+ memcpy(priv->keys[index].data, keybuf, erq->length);
+ }
+ priv->tx_key = setindex;
+ priv->wep_on = enable;
+ priv->wep_restrict = restricted;
+
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getiwencode(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int index = (erq->flags & IW_ENCODE_INDEX) - 1;
+ u16 xlen = 0;
+ char keybuf[ORINOCO_MAX_KEY_SIZE];
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
+ index = priv->tx_key;
+
+ erq->flags = 0;
+ if (! priv->wep_on)
+ erq->flags |= IW_ENCODE_DISABLED;
+ erq->flags |= index + 1;
+
+ /* Only for symbol cards - Jean II */
+ if (priv->firmware_type != FIRMWARE_TYPE_AGERE) {
+ if(priv->wep_restrict)
+ erq->flags |= IW_ENCODE_RESTRICTED;
+ else
+ erq->flags |= IW_ENCODE_OPEN;
+ }
+
+ xlen = le16_to_cpu(priv->keys[index].len);
+
+ erq->length = xlen;
+
+ if (erq->pointer) {
+ memcpy(keybuf, priv->keys[index].data, ORINOCO_MAX_KEY_SIZE);
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ if (erq->pointer) {
+ if (copy_to_user(erq->pointer, keybuf, xlen))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int orinoco_ioctl_setessid(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = dev->priv;
+ char essidbuf[IW_ESSID_MAX_SIZE+1];
+ int err;
+ unsigned long flags;
+
+ /* Note : ESSID is ignored in Ad-Hoc demo mode, but we can set it
+ * anyway... - Jean II */
+
+ memset(&essidbuf, 0, sizeof(essidbuf));
+
+ if (erq->flags) {
+ if (erq->length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+
+ if (copy_from_user(&essidbuf, erq->pointer, erq->length))
+ return -EFAULT;
+
+ essidbuf[erq->length] = '\0';
+ }
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ memcpy(priv->desired_essid, essidbuf, sizeof(priv->desired_essid));
+
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_getessid(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = dev->priv;
+ char essidbuf[IW_ESSID_MAX_SIZE+1];
+ int active;
+ int err = 0;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ if (netif_running(dev)) {
+ err = orinoco_hw_get_essid(priv, &active, essidbuf);
+ if (err)
+ return err;
+ } else {
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ memcpy(essidbuf, priv->desired_essid, sizeof(essidbuf));
+ orinoco_unlock(priv, &flags);
+ }
+
+ erq->flags = 1;
+ erq->length = strlen(essidbuf) + 1;
+ if (erq->pointer)
+ if (copy_to_user(erq->pointer, essidbuf, erq->length))
+ return -EFAULT;
+
+ TRACE_EXIT(dev->name);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setnick(struct net_device *dev, struct iw_point *nrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ char nickbuf[IW_ESSID_MAX_SIZE+1];
+ int err;
+ unsigned long flags;
+
+ if (nrq->length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+
+ memset(nickbuf, 0, sizeof(nickbuf));
+
+ if (copy_from_user(nickbuf, nrq->pointer, nrq->length))
+ return -EFAULT;
+
+ nickbuf[nrq->length] = '\0';
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ memcpy(priv->nick, nickbuf, sizeof(priv->nick));
+
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_getnick(struct net_device *dev, struct iw_point *nrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ char nickbuf[IW_ESSID_MAX_SIZE+1];
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ memcpy(nickbuf, priv->nick, IW_ESSID_MAX_SIZE+1);
+ orinoco_unlock(priv, &flags);
+
+ nrq->length = strlen(nickbuf)+1;
+
+#ifdef MACH
+ if(! nrq->pointer) {
+ printk(KERN_INFO "orinoco_ioctl_getnick: no nrq pointer.\n");
+ return -EFAULT;
+ }
+#endif
+
+ if (copy_to_user(nrq->pointer, nickbuf, sizeof(nickbuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int orinoco_ioctl_setfreq(struct net_device *dev, struct iw_freq *frq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int chan = -1;
+ int err;
+ unsigned long flags;
+
+ /* We can only use this in Ad-Hoc demo mode to set the operating
+ * frequency, or in IBSS mode to set the frequency where the IBSS
+ * will be created - Jean II */
+ if (priv->iw_mode != IW_MODE_ADHOC)
+ return -EOPNOTSUPP;
+
+ if ( (frq->e == 0) && (frq->m <= 1000) ) {
+ /* Setting by channel number */
+ chan = frq->m;
+ } else {
+ /* Setting by frequency - search the table */
+ int mult = 1;
+ int i;
+
+ for (i = 0; i < (6 - frq->e); i++)
+ mult *= 10;
+
+ for (i = 0; i < NUM_CHANNELS; i++)
+ if (frq->m == (channel_frequency[i] * mult))
+ chan = i+1;
+ }
+
+ if ( (chan < 1) || (chan > NUM_CHANNELS) ||
+ ! (priv->channel_mask & (1 << (chan-1)) ) )
+ return -EINVAL;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ priv->channel = chan;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_getsens(struct net_device *dev, struct iw_param *srq)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ u16 val;
+ int err;
+ unsigned long flags;
+
+ if (!priv->has_sensitivity)
+ return -EOPNOTSUPP;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, &val);
+ orinoco_unlock(priv, &flags);
+
+ if (err)
+ return err;
+
+ srq->value = val;
+ srq->fixed = 0; /* auto */
+
+ return 0;
+}
+
+static int orinoco_ioctl_setsens(struct net_device *dev, struct iw_param *srq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int val = srq->value;
+ int err;
+ unsigned long flags;
+
+ if (!priv->has_sensitivity)
+ return -EOPNOTSUPP;
+
+ if ((val < 1) || (val > 3))
+ return -EINVAL;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ priv->ap_density = val;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setrts(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int val = rrq->value;
+ int err;
+ unsigned long flags;
+
+ if (rrq->disabled)
+ val = 2347;
+
+ if ( (val < 0) || (val > 2347) )
+ return -EINVAL;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ priv->rts_thresh = val;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setfrag(struct net_device *dev, struct iw_param *frq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int err = 0;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if (priv->has_mwo) {
+ if (frq->disabled)
+ priv->mwo_robust = 0;
+ else {
+ if (frq->fixed)
+ printk(KERN_WARNING "%s: Fixed fragmentation not \
+supported on this firmware. Using MWO robust instead.\n", dev->name);
+ priv->mwo_robust = 1;
+ }
+ } else {
+ if (frq->disabled)
+ priv->frag_thresh = 2346;
+ else {
+ if ( (frq->value < 256) || (frq->value > 2346) )
+ err = -EINVAL;
+ else
+ priv->frag_thresh = frq->value & ~0x1; /* must be even */
+ }
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getfrag(struct net_device *dev, struct iw_param *frq)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 val;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if (priv->has_mwo) {
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMWOROBUST_AGERE,
+ &val);
+ if (err)
+ val = 0;
+
+ frq->value = val ? 2347 : 0;
+ frq->disabled = ! val;
+ frq->fixed = 0;
+ } else {
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
+ &val);
+ if (err)
+ val = 0;
+
+ frq->value = val;
+ frq->disabled = (val >= 2346);
+ frq->fixed = 1;
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_setrate(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int err = 0;
+ int ratemode = -1;
+ int bitrate; /* 100s of kilobits */
+ int i;
+ unsigned long flags;
+
+ /* As the user space doesn't know our highest rate, it uses -1
+ * to ask us to set the highest rate. Test it using "iwconfig
+ * ethX rate auto" - Jean II */
+ if (rrq->value == -1)
+ bitrate = 110;
+ else {
+ if (rrq->value % 100000)
+ return -EINVAL;
+ bitrate = rrq->value / 100000;
+ }
+
+ if ( (bitrate != 10) && (bitrate != 20) &&
+ (bitrate != 55) && (bitrate != 110) )
+ return -EINVAL;
+
+ for (i = 0; i < BITRATE_TABLE_SIZE; i++)
+ if ( (bitrate_table[i].bitrate == bitrate) &&
+ (bitrate_table[i].automatic == ! rrq->fixed) ) {
+ ratemode = i;
+ break;
+ }
+
+ if (ratemode == -1)
+ return -EINVAL;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ priv->bitratemode = ratemode;
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getrate(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ int ratemode;
+ int i;
+ u16 val;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ ratemode = priv->bitratemode;
+
+ if ( (ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE) )
+ BUG();
+
+ rrq->value = bitrate_table[ratemode].bitrate * 100000;
+ rrq->fixed = ! bitrate_table[ratemode].automatic;
+ rrq->disabled = 0;
+
+ /* If the interface is running we try to find more about the
+ current mode */
+ if (netif_running(dev)) {
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CURRENTTXRATE, &val);
+ if (err)
+ goto out;
+
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE: /* Lucent style rate */
+ /* Note : in Lucent firmware, the return value of
+ * HERMES_RID_CURRENTTXRATE is the bitrate in Mb/s,
+ * and therefore is totally different from the
+ * encoding of HERMES_RID_CNFTXRATECONTROL.
+ * Don't forget that 6Mb/s is really 5.5Mb/s */
+ if (val == 6)
+ rrq->value = 5500000;
+ else
+ rrq->value = val * 1000000;
+ break;
+ case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */
+ case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */
+ for (i = 0; i < BITRATE_TABLE_SIZE; i++)
+ if (bitrate_table[i].intersil_txratectrl == val) {
+ ratemode = i;
+ break;
+ }
+ if (i >= BITRATE_TABLE_SIZE)
+ printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n",
+ dev->name, val);
+
+ rrq->value = bitrate_table[ratemode].bitrate * 100000;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_setpower(struct net_device *dev, struct iw_param *prq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int err = 0;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if (prq->disabled) {
+ priv->pm_on = 0;
+ } else {
+ switch (prq->flags & IW_POWER_MODE) {
+ case IW_POWER_UNICAST_R:
+ priv->pm_mcast = 0;
+ priv->pm_on = 1;
+ break;
+ case IW_POWER_ALL_R:
+ priv->pm_mcast = 1;
+ priv->pm_on = 1;
+ break;
+ case IW_POWER_ON:
+ /* No flags : but we may have a value - Jean II */
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ goto out;
+
+ if (prq->flags & IW_POWER_TIMEOUT) {
+ priv->pm_on = 1;
+ priv->pm_timeout = prq->value / 1000;
+ }
+ if (prq->flags & IW_POWER_PERIOD) {
+ priv->pm_on = 1;
+ priv->pm_period = prq->value / 1000;
+ }
+ /* It's valid to not have a value if we are just toggling
+ * the flags... Jean II */
+ if(!priv->pm_on) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getpower(struct net_device *dev, struct iw_param *prq)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 enable, period, timeout, mcast;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED, &enable);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMAXSLEEPDURATION, &period);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, &timeout);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFMULTICASTRECEIVE, &mcast);
+ if (err)
+ goto out;
+
+ prq->disabled = !enable;
+ /* Note : by default, display the period */
+ if ((prq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
+ prq->flags = IW_POWER_TIMEOUT;
+ prq->value = timeout * 1000;
+ } else {
+ prq->flags = IW_POWER_PERIOD;
+ prq->value = period * 1000;
+ }
+ if (mcast)
+ prq->flags |= IW_POWER_ALL_R;
+ else
+ prq->flags |= IW_POWER_UNICAST_R;
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+#if WIRELESS_EXT > 10
+static int orinoco_ioctl_getretry(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 short_limit, long_limit, lifetime;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
+ &short_limit);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
+ &long_limit);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
+ &lifetime);
+ if (err)
+ goto out;
+
+ rrq->disabled = 0; /* Can't be disabled */
+
+ /* Note : by default, display the retry number */
+ if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+ rrq->flags = IW_RETRY_LIFETIME;
+ rrq->value = lifetime * 1000; /* ??? */
+ } else {
+ /* By default, display the min number */
+ if ((rrq->flags & IW_RETRY_MAX)) {
+ rrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ rrq->value = long_limit;
+ } else {
+ rrq->flags = IW_RETRY_LIMIT;
+ rrq->value = short_limit;
+ if(short_limit != long_limit)
+ rrq->flags |= IW_RETRY_MIN;
+ }
+ }
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+#endif /* WIRELESS_EXT > 10 */
+
+static int orinoco_ioctl_setibssport(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int val = *( (int *) wrq->u.name );
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ priv->ibss_port = val ;
+
+ /* Actually update the mode we are using */
+ set_port_type(priv);
+
+ orinoco_unlock(priv, &flags);
+ return 0;
+}
+
+static int orinoco_ioctl_getibssport(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int *val = (int *)wrq->u.name;
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ *val = priv->ibss_port;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setport3(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int val = *( (int *) wrq->u.name );
+ int err = 0;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ switch (val) {
+ case 0: /* Try to do IEEE ad-hoc mode */
+ if (! priv->has_ibss) {
+ err = -EINVAL;
+ break;
+ }
+ priv->prefer_port3 = 0;
+
+ break;
+
+ case 1: /* Try to do Lucent proprietary ad-hoc mode */
+ if (! priv->has_port3) {
+ err = -EINVAL;
+ break;
+ }
+ priv->prefer_port3 = 1;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (! err)
+ /* Actually update the mode we are using */
+ set_port_type(priv);
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getport3(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int *val = (int *)wrq->u.name;
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ *val = priv->prefer_port3;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+/* Spy is used for link quality/strength measurements in Ad-Hoc mode
+ * Jean II */
+static int orinoco_ioctl_setspy(struct net_device *dev, struct iw_point *srq)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct sockaddr address[IW_MAX_SPY];
+ int number = srq->length;
+ int i;
+ int err = 0;
+ unsigned long flags;
+
+ /* Check the number of addresses */
+ if (number > IW_MAX_SPY)
+ return -E2BIG;
+
+ /* Get the data in the driver */
+ if (srq->pointer) {
+ if (copy_from_user(address, srq->pointer,
+ sizeof(struct sockaddr) * number))
+ return -EFAULT;
+ }
+
+ /* Make sure nobody mess with the structure while we do */
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ /* orinoco_lock() doesn't disable interrupts, so make sure the
+ * interrupt rx path don't get confused while we copy */
+ priv->spy_number = 0;
+
+ if (number > 0) {
+ /* Extract the addresses */
+ for (i = 0; i < number; i++)
+ memcpy(priv->spy_address[i], address[i].sa_data,
+ ETH_ALEN);
+ /* Reset stats */
+ memset(priv->spy_stat, 0,
+ sizeof(struct iw_quality) * IW_MAX_SPY);
+ /* Set number of addresses */
+ priv->spy_number = number;
+ }
+
+ /* Now, let the others play */
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getspy(struct net_device *dev, struct iw_point *srq)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct sockaddr address[IW_MAX_SPY];
+ struct iw_quality spy_stat[IW_MAX_SPY];
+ int number;
+ int i;
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ number = priv->spy_number;
+ if ((number > 0) && (srq->pointer)) {
+ /* Create address struct */
+ for (i = 0; i < number; i++) {
+ memcpy(address[i].sa_data, priv->spy_address[i],
+ ETH_ALEN);
+ address[i].sa_family = AF_UNIX;
+ }
+ /* Copy stats */
+ /* In theory, we should disable irqs while copying the stats
+ * because the rx path migh update it in the middle...
+ * Bah, who care ? - Jean II */
+ memcpy(&spy_stat, priv->spy_stat,
+ sizeof(struct iw_quality) * IW_MAX_SPY);
+ for (i=0; i < number; i++)
+ priv->spy_stat[i].updated = 0;
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ /* Push stuff to user space */
+ srq->length = number;
+ if(copy_to_user(srq->pointer, address,
+ sizeof(struct sockaddr) * number))
+ return -EFAULT;
+ if(copy_to_user(srq->pointer + (sizeof(struct sockaddr)*number),
+ &spy_stat, sizeof(struct iw_quality) * number))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct iwreq *wrq = (struct iwreq *)rq;
+ int err = 0;
+ int tmp;
+ int changed = 0;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ /* In theory, we could allow most of the the SET stuff to be
+ * done. In practice, the lapse of time at startup when the
+ * card is not ready is very short, so why bother... Note
+ * that netif_device_present is different from up/down
+ * (ifconfig), when the device is not yet up, it is usually
+ * already ready... Jean II */
+ if (! netif_device_present(dev))
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGIWNAME:
+ strcpy(wrq->u.name, "IEEE 802.11-DS");
+ break;
+
+ case SIOCGIWAP:
+ wrq->u.ap_addr.sa_family = ARPHRD_ETHER;
+ err = orinoco_hw_get_bssid(priv, wrq->u.ap_addr.sa_data);
+ break;
+
+ case SIOCGIWRANGE:
+ err = orinoco_ioctl_getiwrange(dev, &wrq->u.data);
+ break;
+
+ case SIOCSIWMODE:
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ switch (wrq->u.mode) {
+ case IW_MODE_ADHOC:
+ if (! (priv->has_ibss || priv->has_port3) )
+ err = -EINVAL;
+ else {
+ priv->iw_mode = IW_MODE_ADHOC;
+ changed = 1;
+ }
+ break;
+
+ case IW_MODE_INFRA:
+ priv->iw_mode = IW_MODE_INFRA;
+ changed = 1;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ set_port_type(priv);
+ orinoco_unlock(priv, &flags);
+ break;
+
+ case SIOCGIWMODE:
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ wrq->u.mode = priv->iw_mode;
+ orinoco_unlock(priv, &flags);
+ break;
+
+ case SIOCSIWENCODE:
+ if (! priv->has_wep) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ err = orinoco_ioctl_setiwencode(dev, &wrq->u.encoding);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWENCODE:
+ if (! priv->has_wep) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = orinoco_ioctl_getiwencode(dev, &wrq->u.encoding);
+ break;
+
+ case SIOCSIWESSID:
+ err = orinoco_ioctl_setessid(dev, &wrq->u.essid);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWESSID:
+ err = orinoco_ioctl_getessid(dev, &wrq->u.essid);
+ break;
+
+ case SIOCSIWNICKN:
+ err = orinoco_ioctl_setnick(dev, &wrq->u.data);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWNICKN:
+ err = orinoco_ioctl_getnick(dev, &wrq->u.data);
+ break;
+
+ case SIOCGIWFREQ:
+ tmp = orinoco_hw_get_freq(priv);
+ if (tmp < 0) {
+ err = tmp;
+ } else {
+ wrq->u.freq.m = tmp;
+ wrq->u.freq.e = 1;
+ }
+ break;
+
+ case SIOCSIWFREQ:
+ err = orinoco_ioctl_setfreq(dev, &wrq->u.freq);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWSENS:
+ err = orinoco_ioctl_getsens(dev, &wrq->u.sens);
+ break;
+
+ case SIOCSIWSENS:
+ err = orinoco_ioctl_setsens(dev, &wrq->u.sens);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWRTS:
+ wrq->u.rts.value = priv->rts_thresh;
+ wrq->u.rts.disabled = (wrq->u.rts.value == 2347);
+ wrq->u.rts.fixed = 1;
+ break;
+
+ case SIOCSIWRTS:
+ err = orinoco_ioctl_setrts(dev, &wrq->u.rts);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCSIWFRAG:
+ err = orinoco_ioctl_setfrag(dev, &wrq->u.frag);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWFRAG:
+ err = orinoco_ioctl_getfrag(dev, &wrq->u.frag);
+ break;
+
+ case SIOCSIWRATE:
+ err = orinoco_ioctl_setrate(dev, &wrq->u.bitrate);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWRATE:
+ err = orinoco_ioctl_getrate(dev, &wrq->u.bitrate);
+ break;
+
+ case SIOCSIWPOWER:
+ err = orinoco_ioctl_setpower(dev, &wrq->u.power);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWPOWER:
+ err = orinoco_ioctl_getpower(dev, &wrq->u.power);
+ break;
+
+ case SIOCGIWTXPOW:
+ /* The card only supports one tx power, so this is easy */
+ wrq->u.txpower.value = 15; /* dBm */
+ wrq->u.txpower.fixed = 1;
+ wrq->u.txpower.disabled = 0;
+ wrq->u.txpower.flags = IW_TXPOW_DBM;
+ break;
+
+#if WIRELESS_EXT > 10
+ case SIOCSIWRETRY:
+ err = -EOPNOTSUPP;
+ break;
+
+ case SIOCGIWRETRY:
+ err = orinoco_ioctl_getretry(dev, &wrq->u.retry);
+ break;
+#endif /* WIRELESS_EXT > 10 */
+
+ case SIOCSIWSPY:
+ err = orinoco_ioctl_setspy(dev, &wrq->u.data);
+ break;
+
+ case SIOCGIWSPY:
+ err = orinoco_ioctl_getspy(dev, &wrq->u.data);
+ break;
+
+ case SIOCGIWPRIV:
+ if (wrq->u.data.pointer) {
+ struct iw_priv_args privtab[] = {
+ { SIOCIWFIRSTPRIV + 0x0, 0, 0, "force_reset" },
+ { SIOCIWFIRSTPRIV + 0x1, 0, 0, "card_reset" },
+ { SIOCIWFIRSTPRIV + 0x2,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0, "set_port3" },
+ { SIOCIWFIRSTPRIV + 0x3, 0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_port3" },
+ { SIOCIWFIRSTPRIV + 0x4,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0, "set_preamble" },
+ { SIOCIWFIRSTPRIV + 0x5, 0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_preamble" },
+ { SIOCIWFIRSTPRIV + 0x6,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0, "set_ibssport" },
+ { SIOCIWFIRSTPRIV + 0x7, 0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_ibssport" },
+ { SIOCIWLASTPRIV, 0, 0, "dump_recs" },
+ };
+
+ err = verify_area(VERIFY_WRITE, wrq->u.data.pointer, sizeof(privtab));
+ if (err)
+ break;
+
+ wrq->u.data.length = sizeof(privtab) / sizeof(privtab[0]);
+ if (copy_to_user(wrq->u.data.pointer, privtab, sizeof(privtab)))
+ err = -EFAULT;
+ }
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x0: /* force_reset */
+ case SIOCIWFIRSTPRIV + 0x1: /* card_reset */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
+
+ schedule_work(&priv->reset_work);
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x2: /* set_port3 */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = orinoco_ioctl_setport3(dev, wrq);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x3: /* get_port3 */
+ err = orinoco_ioctl_getport3(dev, wrq);
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x4: /* set_preamble */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ /* 802.11b has recently defined some short preamble.
+ * Basically, the Phy header has been reduced in size.
+ * This increase performance, especially at high rates
+ * (the preamble is transmitted at 1Mb/s), unfortunately
+ * this give compatibility troubles... - Jean II */
+ if(priv->has_preamble) {
+ int val = *( (int *) wrq->u.name );
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ if (val)
+ priv->preamble = 1;
+ else
+ priv->preamble = 0;
+ orinoco_unlock(priv, &flags);
+ changed = 1;
+ } else
+ err = -EOPNOTSUPP;
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x5: /* get_preamble */
+ if(priv->has_preamble) {
+ int *val = (int *)wrq->u.name;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ *val = priv->preamble;
+ orinoco_unlock(priv, &flags);
+ } else
+ err = -EOPNOTSUPP;
+ break;
+ case SIOCIWFIRSTPRIV + 0x6: /* set_ibssport */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = orinoco_ioctl_setibssport(dev, wrq);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x7: /* get_ibssport */
+ err = orinoco_ioctl_getibssport(dev, wrq);
+ break;
+
+ case SIOCIWLASTPRIV:
+ err = orinoco_debug_dump_recs(dev);
+ if (err)
+ printk(KERN_ERR "%s: Unable to dump records (%d)\n",
+ dev->name, err);
+ break;
+
+
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ if (! err && changed && netif_running(dev)) {
+ err = orinoco_reconfigure(dev);
+ }
+
+ TRACE_EXIT(dev->name);
+
+ return err;
+}
+
+struct {
+ u16 rid;
+ char *name;
+ int displaytype;
+#define DISPLAY_WORDS 0
+#define DISPLAY_BYTES 1
+#define DISPLAY_STRING 2
+#define DISPLAY_XSTRING 3
+} record_table[] = {
+#define DEBUG_REC(name,type) { HERMES_RID_##name, #name, DISPLAY_##type }
+ DEBUG_REC(CNFPORTTYPE,WORDS),
+ DEBUG_REC(CNFOWNMACADDR,BYTES),
+ DEBUG_REC(CNFDESIREDSSID,STRING),
+ DEBUG_REC(CNFOWNCHANNEL,WORDS),
+ DEBUG_REC(CNFOWNSSID,STRING),
+ DEBUG_REC(CNFOWNATIMWINDOW,WORDS),
+ DEBUG_REC(CNFSYSTEMSCALE,WORDS),
+ DEBUG_REC(CNFMAXDATALEN,WORDS),
+ DEBUG_REC(CNFPMENABLED,WORDS),
+ DEBUG_REC(CNFPMEPS,WORDS),
+ DEBUG_REC(CNFMULTICASTRECEIVE,WORDS),
+ DEBUG_REC(CNFMAXSLEEPDURATION,WORDS),
+ DEBUG_REC(CNFPMHOLDOVERDURATION,WORDS),
+ DEBUG_REC(CNFOWNNAME,STRING),
+ DEBUG_REC(CNFOWNDTIMPERIOD,WORDS),
+ DEBUG_REC(CNFMULTICASTPMBUFFERING,WORDS),
+ DEBUG_REC(CNFWEPENABLED_AGERE,WORDS),
+ DEBUG_REC(CNFMANDATORYBSSID_SYMBOL,WORDS),
+ DEBUG_REC(CNFWEPDEFAULTKEYID,WORDS),
+ DEBUG_REC(CNFDEFAULTKEY0,BYTES),
+ DEBUG_REC(CNFDEFAULTKEY1,BYTES),
+ DEBUG_REC(CNFMWOROBUST_AGERE,WORDS),
+ DEBUG_REC(CNFDEFAULTKEY2,BYTES),
+ DEBUG_REC(CNFDEFAULTKEY3,BYTES),
+ DEBUG_REC(CNFWEPFLAGS_INTERSIL,WORDS),
+ DEBUG_REC(CNFWEPKEYMAPPINGTABLE,WORDS),
+ DEBUG_REC(CNFAUTHENTICATION,WORDS),
+ DEBUG_REC(CNFMAXASSOCSTA,WORDS),
+ DEBUG_REC(CNFKEYLENGTH_SYMBOL,WORDS),
+ DEBUG_REC(CNFTXCONTROL,WORDS),
+ DEBUG_REC(CNFROAMINGMODE,WORDS),
+ DEBUG_REC(CNFHOSTAUTHENTICATION,WORDS),
+ DEBUG_REC(CNFRCVCRCERROR,WORDS),
+ DEBUG_REC(CNFMMLIFE,WORDS),
+ DEBUG_REC(CNFALTRETRYCOUNT,WORDS),
+ DEBUG_REC(CNFBEACONINT,WORDS),
+ DEBUG_REC(CNFAPPCFINFO,WORDS),
+ DEBUG_REC(CNFSTAPCFINFO,WORDS),
+ DEBUG_REC(CNFPRIORITYQUSAGE,WORDS),
+ DEBUG_REC(CNFTIMCTRL,WORDS),
+ DEBUG_REC(CNFTHIRTY2TALLY,WORDS),
+ DEBUG_REC(CNFENHSECURITY,WORDS),
+ DEBUG_REC(CNFGROUPADDRESSES,BYTES),
+ DEBUG_REC(CNFCREATEIBSS,WORDS),
+ DEBUG_REC(CNFFRAGMENTATIONTHRESHOLD,WORDS),
+ DEBUG_REC(CNFRTSTHRESHOLD,WORDS),
+ DEBUG_REC(CNFTXRATECONTROL,WORDS),
+ DEBUG_REC(CNFPROMISCUOUSMODE,WORDS),
+ DEBUG_REC(CNFBASICRATES_SYMBOL,WORDS),
+ DEBUG_REC(CNFPREAMBLE_SYMBOL,WORDS),
+ DEBUG_REC(CNFSHORTPREAMBLE,WORDS),
+ DEBUG_REC(CNFWEPKEYS_AGERE,BYTES),
+ DEBUG_REC(CNFEXCLUDELONGPREAMBLE,WORDS),
+ DEBUG_REC(CNFTXKEY_AGERE,WORDS),
+ DEBUG_REC(CNFAUTHENTICATIONRSPTO,WORDS),
+ DEBUG_REC(CNFBASICRATES,WORDS),
+ DEBUG_REC(CNFSUPPORTEDRATES,WORDS),
+ DEBUG_REC(CNFTICKTIME,WORDS),
+ DEBUG_REC(CNFSCANREQUEST,WORDS),
+ DEBUG_REC(CNFJOINREQUEST,WORDS),
+ DEBUG_REC(CNFAUTHENTICATESTATION,WORDS),
+ DEBUG_REC(CNFCHANNELINFOREQUEST,WORDS),
+ DEBUG_REC(MAXLOADTIME,WORDS),
+ DEBUG_REC(DOWNLOADBUFFER,WORDS),
+ DEBUG_REC(PRIID,WORDS),
+ DEBUG_REC(PRISUPRANGE,WORDS),
+ DEBUG_REC(CFIACTRANGES,WORDS),
+ DEBUG_REC(NICSERNUM,XSTRING),
+ DEBUG_REC(NICID,WORDS),
+ DEBUG_REC(MFISUPRANGE,WORDS),
+ DEBUG_REC(CFISUPRANGE,WORDS),
+ DEBUG_REC(CHANNELLIST,WORDS),
+ DEBUG_REC(REGULATORYDOMAINS,WORDS),
+ DEBUG_REC(TEMPTYPE,WORDS),
+/* DEBUG_REC(CIS,BYTES), */
+ DEBUG_REC(STAID,WORDS),
+ DEBUG_REC(CURRENTSSID,STRING),
+ DEBUG_REC(CURRENTBSSID,BYTES),
+ DEBUG_REC(COMMSQUALITY,WORDS),
+ DEBUG_REC(CURRENTTXRATE,WORDS),
+ DEBUG_REC(CURRENTBEACONINTERVAL,WORDS),
+ DEBUG_REC(CURRENTSCALETHRESHOLDS,WORDS),
+ DEBUG_REC(PROTOCOLRSPTIME,WORDS),
+ DEBUG_REC(SHORTRETRYLIMIT,WORDS),
+ DEBUG_REC(LONGRETRYLIMIT,WORDS),
+ DEBUG_REC(MAXTRANSMITLIFETIME,WORDS),
+ DEBUG_REC(MAXRECEIVELIFETIME,WORDS),
+ DEBUG_REC(CFPOLLABLE,WORDS),
+ DEBUG_REC(AUTHENTICATIONALGORITHMS,WORDS),
+ DEBUG_REC(PRIVACYOPTIONIMPLEMENTED,WORDS),
+ DEBUG_REC(OWNMACADDR,BYTES),
+ DEBUG_REC(SCANRESULTSTABLE,WORDS),
+ DEBUG_REC(PHYTYPE,WORDS),
+ DEBUG_REC(CURRENTCHANNEL,WORDS),
+ DEBUG_REC(CURRENTPOWERSTATE,WORDS),
+ DEBUG_REC(CCAMODE,WORDS),
+ DEBUG_REC(SUPPORTEDDATARATES,WORDS),
+ DEBUG_REC(BUILDSEQ,BYTES),
+ DEBUG_REC(FWID,XSTRING)
+#undef DEBUG_REC
+};
+
+#define DEBUG_LTV_SIZE 128
+
+static int orinoco_debug_dump_recs(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ u8 *val8;
+ u16 *val16;
+ int i,j;
+ u16 length;
+ int err;
+
+ /* I'm not sure: we might have a lock here, so we'd better go
+ atomic, just in case. */
+ val8 = kmalloc(DEBUG_LTV_SIZE + 2, GFP_ATOMIC);
+ if (! val8)
+ return -ENOMEM;
+ val16 = (u16 *)val8;
+
+ for (i = 0; i < ARRAY_SIZE(record_table); i++) {
+ u16 rid = record_table[i].rid;
+ int len;
+
+ memset(val8, 0, DEBUG_LTV_SIZE + 2);
+
+ err = hermes_read_ltv(hw, USER_BAP, rid, DEBUG_LTV_SIZE,
+ &length, val8);
+ if (err) {
+ DEBUG(0, "Error %d reading RID 0x%04x\n", err, rid);
+ continue;
+ }
+ val16 = (u16 *)val8;
+ if (length == 0)
+ continue;
+
+ printk(KERN_DEBUG "%-15s (0x%04x): length=%d (%d bytes)\tvalue=",
+ record_table[i].name,
+ rid, length, (length-1)*2);
+ len = min(((int)length-1)*2, DEBUG_LTV_SIZE);
+
+ switch (record_table[i].displaytype) {
+ case DISPLAY_WORDS:
+ for (j = 0; j < len / 2; j++)
+ printk("%04X-", le16_to_cpu(val16[j]));
+ break;
+
+ case DISPLAY_BYTES:
+ default:
+ for (j = 0; j < len; j++)
+ printk("%02X:", val8[j]);
+ break;
+
+ case DISPLAY_STRING:
+ len = min(len, le16_to_cpu(val16[0])+2);
+ val8[len] = '\0';
+ printk("\"%s\"", (char *)&val16[1]);
+ break;
+
+ case DISPLAY_XSTRING:
+ printk("'%s'", (char *)val8);
+ }
+
+ printk("\n");
+ }
+
+ kfree(val8);
+
+ return 0;
+}
+
+struct net_device *alloc_orinocodev(int sizeof_card, int (*hard_reset)(struct orinoco_private *))
+{
+ struct net_device *dev;
+ struct orinoco_private *priv;
+
+ dev = alloc_etherdev(sizeof(struct orinoco_private) + sizeof_card);
+ priv = (struct orinoco_private *)dev->priv;
+ priv->ndev = dev;
+ if (sizeof_card)
+ priv->card = (void *)((unsigned long)dev->priv + sizeof(struct orinoco_private));
+ else
+ priv->card = NULL;
+
+ /* Setup / override net_device fields */
+ dev->init = orinoco_init;
+ dev->hard_start_xmit = orinoco_xmit;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = orinoco_tx_timeout;
+ dev->watchdog_timeo = HZ; /* 1 second timeout */
+#endif
+ dev->get_stats = orinoco_get_stats;
+ dev->get_wireless_stats = orinoco_get_wireless_stats;
+ dev->do_ioctl = orinoco_ioctl;
+ dev->change_mtu = orinoco_change_mtu;
+ dev->set_multicast_list = orinoco_set_multicast_list;
+ /* we use the default eth_mac_addr for setting the MAC addr */
+
+ /* Set up default callbacks */
+ dev->open = orinoco_open;
+ dev->stop = orinoco_stop;
+ priv->hard_reset = hard_reset;
+
+ spin_lock_init(&priv->lock);
+ priv->open = 0;
+ priv->hw_unavailable = 1; /* orinoco_init() must clear this
+ * before anything else touches the
+ * hardware */
+ INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev);
+
+ priv->last_linkstatus = 0xffff;
+ priv->connected = 0;
+
+ return dev;
+
+}
+
+/********************************************************************/
+/* Module initialization */
+/********************************************************************/
+
+EXPORT_SYMBOL(alloc_orinocodev);
+
+EXPORT_SYMBOL(__orinoco_up);
+EXPORT_SYMBOL(__orinoco_down);
+EXPORT_SYMBOL(orinoco_stop);
+EXPORT_SYMBOL(orinoco_reinit_firmware);
+
+EXPORT_SYMBOL(orinoco_interrupt);
+
+/* Can't be declared "const" or the whole __initdata section will
+ * become const */
+static char version[] __initdata = "orinoco.c 0.13e (David Gibson <hermes@gibson.dropbear.id.au> and others)";
+
+static int __init init_orinoco(void)
+{
+ printk(KERN_DEBUG "%s\n", version);
+ return 0;
+}
+
+static void __exit exit_orinoco(void)
+{
+}
+
+module_init(init_orinoco);
+module_exit(exit_orinoco);
diff --git a/linux/pcmcia-cs/wireless/orinoco.h b/linux/pcmcia-cs/wireless/orinoco.h
new file mode 100644
index 0000000..6eb9e85
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/orinoco.h
@@ -0,0 +1,166 @@
+/* orinoco.h
+ *
+ * Common definitions to all pieces of the various orinoco
+ * drivers
+ */
+
+#ifndef _ORINOCO_H
+#define _ORINOCO_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/version.h>
+#include "hermes.h"
+
+/* Workqueue / task queue backwards compatibility stuff */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#define INIT_WORK INIT_TQUEUE
+#define schedule_work schedule_task
+#endif
+
+/* Interrupt handler backwards compatibility stuff */
+#ifndef IRQ_NONE
+
+#define IRQ_NONE
+#define IRQ_HANDLED
+typedef void irqreturn_t;
+
+#endif
+
+/* To enable debug messages */
+//#define ORINOCO_DEBUG 3
+
+#if (! defined (WIRELESS_EXT)) || (WIRELESS_EXT < 10)
+#error "orinoco driver requires Wireless extensions v10 or later."
+#endif /* (! defined (WIRELESS_EXT)) || (WIRELESS_EXT < 10) */
+#define WIRELESS_SPY // enable iwspy support
+
+#define ORINOCO_MAX_KEY_SIZE 14
+#define ORINOCO_MAX_KEYS 4
+
+struct orinoco_key {
+ u16 len; /* always stored as little-endian */
+ char data[ORINOCO_MAX_KEY_SIZE];
+} __attribute__ ((packed));
+
+#define ORINOCO_INTEN ( HERMES_EV_RX | HERMES_EV_ALLOC | HERMES_EV_TX | \
+ HERMES_EV_TXEXC | HERMES_EV_WTERR | HERMES_EV_INFO | \
+ HERMES_EV_INFDROP )
+
+
+struct orinoco_private {
+ void *card; /* Pointer to card dependent structure */
+ int (*hard_reset)(struct orinoco_private *);
+
+ /* Synchronisation stuff */
+ spinlock_t lock;
+ int hw_unavailable;
+ struct work_struct reset_work;
+
+ /* driver state */
+ int open;
+ u16 last_linkstatus;
+ int connected;
+
+ /* Net device stuff */
+ struct net_device *ndev;
+ struct net_device_stats stats;
+ struct iw_statistics wstats;
+
+ /* Hardware control variables */
+ hermes_t hw;
+ u16 txfid;
+
+
+ /* Capabilities of the hardware/firmware */
+ int firmware_type;
+#define FIRMWARE_TYPE_AGERE 1
+#define FIRMWARE_TYPE_INTERSIL 2
+#define FIRMWARE_TYPE_SYMBOL 3
+ int has_ibss, has_port3, has_ibss_any, ibss_port;
+ int has_wep, has_big_wep;
+ int has_mwo;
+ int has_pm;
+ int has_preamble;
+ int has_sensitivity;
+ int nicbuf_size;
+ u16 channel_mask;
+ int broken_disableport;
+
+ /* Configuration paramaters */
+ u32 iw_mode;
+ int prefer_port3;
+ u16 wep_on, wep_restrict, tx_key;
+ struct orinoco_key keys[ORINOCO_MAX_KEYS];
+ int bitratemode;
+ char nick[IW_ESSID_MAX_SIZE+1];
+ char desired_essid[IW_ESSID_MAX_SIZE+1];
+ u16 frag_thresh, mwo_robust;
+ u16 channel;
+ u16 ap_density, rts_thresh;
+ u16 pm_on, pm_mcast, pm_period, pm_timeout;
+ u16 preamble;
+#ifdef WIRELESS_SPY
+ int spy_number;
+ u_char spy_address[IW_MAX_SPY][ETH_ALEN];
+ struct iw_quality spy_stat[IW_MAX_SPY];
+#endif
+
+ /* Configuration dependent variables */
+ int port_type, createibss;
+ int promiscuous, mc_count;
+};
+
+#ifdef ORINOCO_DEBUG
+extern int orinoco_debug;
+#define DEBUG(n, args...) do { if (orinoco_debug>(n)) printk(KERN_DEBUG args); } while(0)
+#else
+#define DEBUG(n, args...) do { } while (0)
+#endif /* ORINOCO_DEBUG */
+
+#define TRACE_ENTER(devname) DEBUG(2, "%s: -> " __FUNCTION__ "()\n", devname);
+#define TRACE_EXIT(devname) DEBUG(2, "%s: <- " __FUNCTION__ "()\n", devname);
+
+extern struct net_device *alloc_orinocodev(int sizeof_card,
+ int (*hard_reset)(struct orinoco_private *));
+extern int __orinoco_up(struct net_device *dev);
+extern int __orinoco_down(struct net_device *dev);
+extern int orinoco_stop(struct net_device *dev);
+extern int orinoco_reinit_firmware(struct net_device *dev);
+extern irqreturn_t orinoco_interrupt(int irq, void * dev_id, struct pt_regs *regs);
+
+/********************************************************************/
+/* Locking and synchronization functions */
+/********************************************************************/
+
+/* These functions *must* be inline or they will break horribly on
+ * SPARC, due to its weird semantics for save/restore flags. extern
+ * inline should prevent the kernel from linking or module from
+ * loading if they are not inlined. */
+extern inline int orinoco_lock(struct orinoco_private *priv,
+ unsigned long *flags)
+{
+ spin_lock_irqsave(&priv->lock, *flags);
+ if (priv->hw_unavailable) {
+ printk(KERN_DEBUG "orinoco_lock() called with hw_unavailable (dev=%p)\n",
+ priv->ndev);
+ spin_unlock_irqrestore(&priv->lock, *flags);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+extern inline void orinoco_unlock(struct orinoco_private *priv,
+ unsigned long *flags)
+{
+ spin_unlock_irqrestore(&priv->lock, *flags);
+}
+
+#endif /* _ORINOCO_H */
diff --git a/linux/pcmcia-cs/wireless/orinoco_cs.c b/linux/pcmcia-cs/wireless/orinoco_cs.c
new file mode 100644
index 0000000..a3f6357
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/orinoco_cs.c
@@ -0,0 +1,705 @@
+/* orinoco_cs.c 0.13e - (formerly known as dldwd_cs.c)
+ *
+ * A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
+ * as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
+ * EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and others).
+ * It should also be usable on various Prism II based cards such as the
+ * Linksys, D-Link and Farallon Skyline. It should also work on Symbol
+ * cards such as the 3Com AirConnect and Ericsson WLAN.
+ *
+ * Copyright notice & release notes in file orinoco.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "orinoco.h"
+
+/********************************************************************/
+/* Module stuff */
+/********************************************************************/
+
+MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
+MODULE_DESCRIPTION("Driver for PCMCIA Lucent Orinoco, Prism II based and similar wireless cards");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual MPL/GPL");
+#endif
+
+/* Module parameters */
+
+/* The old way: bit map of interrupts to choose from */
+/* This means pick from 15, 14, 12, 11, 10, 9, 7, 5, 4, and 3 */
+static uint irq_mask = 0xdeb8;
+/* Newer, simpler way of listing specific interrupts */
+static int irq_list[4] = { -1 };
+
+/* Some D-Link cards have buggy CIS. They do work at 5v properly, but
+ * don't have any CIS entry for it. This workaround it... */
+static int ignore_cis_vcc; /* = 0 */
+
+MODULE_PARM(irq_mask, "i");
+MODULE_PARM(irq_list, "1-4i");
+MODULE_PARM(ignore_cis_vcc, "i");
+
+/********************************************************************/
+/* Magic constants */
+/********************************************************************/
+
+/*
+ * The dev_info variable is the "key" that is used to match up this
+ * device driver with appropriate cards, through the card
+ * configuration database.
+ */
+static dev_info_t dev_info = "orinoco_cs";
+
+/********************************************************************/
+/* Data structures */
+/********************************************************************/
+
+/* PCMCIA specific device information (goes in the card field of
+ * struct orinoco_private */
+struct orinoco_pccard {
+ dev_link_t link;
+ dev_node_t node;
+
+ /* Used to handle hard reset */
+ /* yuck, we need this hack to work around the insanity of the
+ * PCMCIA layer */
+ unsigned long hard_reset_in_progress;
+};
+
+/*
+ * A linked list of "instances" of the device. Each actual PCMCIA
+ * card corresponds to one device instance, and is described by one
+ * dev_link_t structure (defined in ds.h).
+ */
+static dev_link_t *dev_list; /* = NULL */
+
+/********************************************************************/
+/* Function prototypes */
+/********************************************************************/
+
+/* device methods */
+static int orinoco_cs_hard_reset(struct orinoco_private *priv);
+
+/* PCMCIA gumpf */
+static void orinoco_cs_config(dev_link_t * link);
+static void orinoco_cs_release(u_long arg);
+static int orinoco_cs_event(event_t event, int priority,
+ event_callback_args_t * args);
+
+static dev_link_t *orinoco_cs_attach(void);
+static void orinoco_cs_detach(dev_link_t *);
+
+/********************************************************************/
+/* Device methods */
+/********************************************************************/
+
+static int
+orinoco_cs_hard_reset(struct orinoco_private *priv)
+{
+ struct orinoco_pccard *card = priv->card;
+ dev_link_t *link = &card->link;
+ int err;
+
+ /* We need atomic ops here, because we're not holding the lock */
+ set_bit(0, &card->hard_reset_in_progress);
+
+ err = CardServices(ResetCard, link->handle, NULL);
+ if (err)
+ return err;
+
+ clear_bit(0, &card->hard_reset_in_progress);
+
+ return 0;
+}
+
+/********************************************************************/
+/* PCMCIA stuff */
+/********************************************************************/
+
+/* In 2.5 (as of 2.5.69 at least) there is a cs_error exported which
+ * does this, but it's not in 2.4 so we do our own for now. */
+static void
+orinoco_cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+
+/* Remove zombie instances (card removed, detach pending) */
+static void
+flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+
+ TRACE_ENTER("");
+
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK) {
+ orinoco_cs_detach(link);
+ }
+ }
+ TRACE_EXIT("");
+}
+
+/*
+ * This creates an "instance" of the driver, allocating local data
+ * structures for one device. The device is registered with Card
+ * Services.
+ *
+ * The dev_link structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a card
+ * insertion event. */
+static dev_link_t *
+orinoco_cs_attach(void)
+{
+ struct net_device *dev;
+ struct orinoco_private *priv;
+ struct orinoco_pccard *card;
+ dev_link_t *link;
+ client_reg_t client_reg;
+ int ret, i;
+
+ /* A bit of cleanup */
+ flush_stale_links();
+
+ dev = alloc_orinocodev(sizeof(*card), orinoco_cs_hard_reset);
+ if (! dev)
+ return NULL;
+ priv = dev->priv;
+ card = priv->card;
+
+ /* Link both structures together */
+ link = &card->link;
+ link->priv = dev;
+
+ /* Initialize the dev_link_t structure */
+ init_timer(&link->release);
+ link->release.function = &orinoco_cs_release;
+ link->release.data = (u_long) link;
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = NULL;
+
+ /* General socket configuration defaults can go here. In this
+ * client, we assume very little, and rely on the CIS for
+ * almost everything. In most clients, many details (i.e.,
+ * number, sizes, and attributes of IO windows) are fixed by
+ * the nature of the device, and can be hard-wired here. */
+ link->conf.Attributes = 0;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* Register with Card Services */
+ /* FIXME: need a lock? */
+ link->next = dev_list;
+ dev_list = link;
+
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &orinoco_cs_event;
+ client_reg.Version = 0x0210; /* FIXME: what does this mean? */
+ client_reg.event_callback_args.client_data = link;
+
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != CS_SUCCESS) {
+ orinoco_cs_error(link->handle, RegisterClient, ret);
+ orinoco_cs_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* orinoco_cs_attach */
+
+/*
+ * This deletes a driver "instance". The device is de-registered with
+ * Card Services. If it has been released, all local data structures
+ * are freed. Otherwise, the structures will be freed when the device
+ * is released.
+ */
+static void
+orinoco_cs_detach(dev_link_t * link)
+{
+ dev_link_t **linkp;
+ struct net_device *dev = link->priv;
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link)
+ break;
+ if (*linkp == NULL) {
+ BUG();
+ return;
+ }
+
+ if (link->state & DEV_CONFIG) {
+ orinoco_cs_release((u_long)link);
+ if (link->state & DEV_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, and free it */
+ *linkp = link->next;
+ DEBUG(0, "orinoco_cs: detach: link=%p link->dev=%p\n", link, link->dev);
+ if (link->dev) {
+ DEBUG(0, "orinoco_cs: About to unregister net device %p\n",
+ dev);
+ unregister_netdev(dev);
+ }
+ kfree(dev);
+} /* orinoco_cs_detach */
+
+/*
+ * orinoco_cs_config() is scheduled to run after a CARD_INSERTION
+ * event is received, to configure the PCMCIA socket, and to make the
+ * device available to the system.
+ */
+
+#define CS_CHECK(fn, args...) \
+ while ((last_ret=CardServices(last_fn=(fn),args))!=0) goto cs_failed
+
+#define CFG_CHECK(fn, args...) \
+ if (CardServices(fn, args) != 0) goto next_entry
+
+static void
+orinoco_cs_config(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ client_handle_t handle = link->handle;
+ struct orinoco_private *priv = dev->priv;
+ struct orinoco_pccard *card = priv->card;
+ hermes_t *hw = &priv->hw;
+ int last_fn, last_ret;
+ u_char buf[64];
+ config_info_t conf;
+ cisinfo_t info;
+ tuple_t tuple;
+ cisparse_t parse;
+
+ CS_CHECK(ValidateCIS, handle, &info);
+
+ /*
+ * This reads the card's CONFIG tuple to find its
+ * configuration registers.
+ */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Look up the current Vcc */
+ CS_CHECK(GetConfigurationInfo, handle, &conf);
+ link->conf.Vcc = conf.Vcc;
+
+ /*
+ * In this loop, we scan the CIS for configuration table
+ * entries, each of which describes a valid card
+ * configuration, including voltage, IO window, memory window,
+ * and interrupt settings.
+ *
+ * We make no assumptions about the card to be configured: we
+ * use just the information available in the CIS. In an ideal
+ * world, this would work for any PCMCIA card, but it requires
+ * a complete and accurate CIS. In practice, a driver usually
+ * "knows" most of these things without consulting the CIS,
+ * and most client drivers will only use the CIS to fill in
+ * implementation-defined details.
+ */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ while (1) {
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ cistpl_cftable_entry_t dflt = { .index = 0 };
+
+ CFG_CHECK(GetTupleData, handle, &tuple);
+ CFG_CHECK(ParseTuple, handle, &tuple, &parse);
+
+ if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
+ dflt = *cfg;
+ if (cfg->index == 0)
+ goto next_entry;
+ link->conf.ConfigIndex = cfg->index;
+
+ /* Does this card need audio output? */
+ if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+
+ /* Use power settings for Vcc and Vpp if present */
+ /* Note that the CIS values need to be rescaled */
+ if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
+ if (conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) {
+ DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
+ if (!ignore_cis_vcc)
+ goto next_entry;
+ }
+ } else if (dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
+ if (conf.Vcc != dflt.vcc.param[CISTPL_POWER_VNOM] / 10000) {
+ DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, dflt.vcc.param[CISTPL_POWER_VNOM] / 10000);
+ if(!ignore_cis_vcc)
+ goto next_entry;
+ }
+ }
+
+ if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
+ else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
+
+ /* Do we need to allocate an interrupt? */
+ if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
+ link->conf.Attributes |= CONF_ENABLE_IRQ;
+
+ /* IO window settings */
+ link->io.NumPorts1 = link->io.NumPorts2 = 0;
+ if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
+ cistpl_io_t *io =
+ (cfg->io.nwin) ? &cfg->io : &dflt.io;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (!(io->flags & CISTPL_IO_8BIT))
+ link->io.Attributes1 =
+ IO_DATA_PATH_WIDTH_16;
+ if (!(io->flags & CISTPL_IO_16BIT))
+ link->io.Attributes1 =
+ IO_DATA_PATH_WIDTH_8;
+ link->io.IOAddrLines =
+ io->flags & CISTPL_IO_LINES_MASK;
+ link->io.BasePort1 = io->win[0].base;
+ link->io.NumPorts1 = io->win[0].len;
+ if (io->nwin > 1) {
+ link->io.Attributes2 =
+ link->io.Attributes1;
+ link->io.BasePort2 = io->win[1].base;
+ link->io.NumPorts2 = io->win[1].len;
+ }
+
+ /* This reserves IO space but doesn't actually enable it */
+ CFG_CHECK(RequestIO, link->handle, &link->io);
+ }
+
+
+ /* If we got this far, we're cool! */
+
+ break;
+
+ next_entry:
+ if (link->io.NumPorts1)
+ CardServices(ReleaseIO, link->handle, &link->io);
+ last_ret = CardServices(GetNextTuple, handle, &tuple);
+ if (last_ret == CS_NO_MORE_ITEMS) {
+ printk(KERN_ERR "GetNextTuple(). No matching CIS configuration, "
+ "maybe you need the ignore_cis_vcc=1 parameter.\n");
+ goto cs_failed;
+ }
+ }
+
+ /*
+ * Allocate an interrupt line. Note that this does not assign
+ * a handler to the interrupt, unless the 'Handler' member of
+ * the irq structure is initialized.
+ */
+ if (link->conf.Attributes & CONF_ENABLE_IRQ) {
+ int i;
+
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i=0; i<4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+
+ link->irq.Handler = orinoco_interrupt;
+ link->irq.Instance = dev;
+
+ CS_CHECK(RequestIRQ, link->handle, &link->irq);
+ }
+
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
+ hermes_struct_init(hw, link->io.BasePort1,
+ HERMES_IO, HERMES_16BIT_REGSPACING);
+
+ /*
+ * This actually configures the PCMCIA socket -- setting up
+ * the I/O windows and the interrupt mapping, and putting the
+ * card and host interface into "Memory and IO" mode.
+ */
+ CS_CHECK(RequestConfiguration, link->handle, &link->conf);
+
+ /* Ok, we have the configuration, prepare to register the netdev */
+ dev->base_addr = link->io.BasePort1;
+ dev->irq = link->irq.AssignedIRQ;
+ SET_MODULE_OWNER(dev);
+ card->node.major = card->node.minor = 0;
+
+ /* register_netdev will give us an ethX name */
+ dev->name[0] = '\0';
+ /* Tell the stack we exist */
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR "orinoco_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ /* At this point, the dev_node_t structure(s) needs to be
+ * initialized and arranged in a linked list at link->dev. */
+ strcpy(card->node.dev_name, dev->name);
+ link->dev = &card->node; /* link->dev being non-NULL is also
+ used to indicate that the
+ net_device has been registered */
+ link->state &= ~DEV_CONFIG_PENDING;
+
+ /* Finally, report what we've done */
+ printk(KERN_DEBUG "%s: index 0x%02x: Vcc %d.%d",
+ dev->name, link->conf.ConfigIndex,
+ link->conf.Vcc / 10, link->conf.Vcc % 10);
+ if (link->conf.Vpp1)
+ printk(", Vpp %d.%d", link->conf.Vpp1 / 10,
+ link->conf.Vpp1 % 10);
+ if (link->conf.Attributes & CONF_ENABLE_IRQ)
+ printk(", irq %d", link->irq.AssignedIRQ);
+ if (link->io.NumPorts1)
+ printk(", io 0x%04x-0x%04x", link->io.BasePort1,
+ link->io.BasePort1 + link->io.NumPorts1 - 1);
+ if (link->io.NumPorts2)
+ printk(" & 0x%04x-0x%04x", link->io.BasePort2,
+ link->io.BasePort2 + link->io.NumPorts2 - 1);
+ printk("\n");
+
+ return;
+
+ cs_failed:
+ orinoco_cs_error(link->handle, last_fn, last_ret);
+
+ failed:
+ orinoco_cs_release((u_long) link);
+} /* orinoco_cs_config */
+
+/*
+ * After a card is removed, orinoco_cs_release() will unregister the
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
+ */
+static void
+orinoco_cs_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *) arg;
+ struct net_device *dev = link->priv;
+ struct orinoco_private *priv = dev->priv;
+ unsigned long flags;
+
+ /* We're committed to taking the device away now, so mark the
+ * hardware as unavailable */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->hw_unavailable++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Don't bother checking to see if these succeed or not */
+ CardServices(ReleaseConfiguration, link->handle);
+ if (link->io.NumPorts1)
+ CardServices(ReleaseIO, link->handle, &link->io);
+ if (link->irq.AssignedIRQ)
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+ link->state &= ~DEV_CONFIG;
+} /* orinoco_cs_release */
+
+/*
+ * The card status event handler. Mostly, this schedules other stuff
+ * to run after an event is received.
+ */
+static int
+orinoco_cs_event(event_t event, int priority,
+ event_callback_args_t * args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+ struct orinoco_private *priv = dev->priv;
+ struct orinoco_pccard *card = priv->card;
+ int err = 0;
+ unsigned long flags;
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ orinoco_lock(priv, &flags);
+
+ netif_device_detach(dev);
+ priv->hw_unavailable++;
+
+ orinoco_unlock(priv, &flags);
+ }
+ break;
+
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ orinoco_cs_config(link);
+ break;
+
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ /* Mark the device as stopped, to block IO until later */
+ if (link->state & DEV_CONFIG) {
+ /* This is probably racy, but I can't think of
+ a better way, short of rewriting the PCMCIA
+ layer to not suck :-( */
+ if (! test_bit(0, &card->hard_reset_in_progress)) {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ err = __orinoco_down(dev);
+ if (err)
+ printk(KERN_WARNING "%s: %s: Error %d downing interface\n",
+ dev->name,
+ event == CS_EVENT_PM_SUSPEND ? "SUSPEND" : "RESET_PHYSICAL",
+ err);
+
+ netif_device_detach(dev);
+ priv->hw_unavailable++;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ /* FIXME: should we double check that this is
+ * the same card as we had before */
+ CardServices(RequestConfiguration, link->handle,
+ &link->conf);
+
+ if (! test_bit(0, &card->hard_reset_in_progress)) {
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
+ dev->name, err);
+ break;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netif_device_attach(dev);
+ priv->hw_unavailable--;
+
+ if (priv->open && ! priv->hw_unavailable) {
+ err = __orinoco_up(dev);
+ if (err)
+ printk(KERN_ERR "%s: Error %d restarting card\n",
+ dev->name, err);
+
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+ break;
+ }
+
+ return err;
+} /* orinoco_cs_event */
+
+/********************************************************************/
+/* Module initialization */
+/********************************************************************/
+
+/* Can't be declared "const" or the whole __initdata section will
+ * become const */
+static char version[] __initdata = "orinoco_cs.c 0.13e (David Gibson <hermes@gibson.dropbear.id.au> and others)";
+
+static int __init
+init_orinoco_cs(void)
+{
+ servinfo_t serv;
+
+ printk(KERN_DEBUG "%s\n", version);
+
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "orinoco_cs: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+
+ register_pccard_driver(&dev_info, &orinoco_cs_attach, &orinoco_cs_detach);
+
+ return 0;
+}
+
+static void __exit
+exit_orinoco_cs(void)
+{
+ unregister_pccard_driver(&dev_info);
+
+ if (dev_list)
+ DEBUG(0, "orinoco_cs: Removing leftover devices.\n");
+ while (dev_list != NULL) {
+ if (dev_list->state & DEV_CONFIG)
+ orinoco_cs_release((u_long) dev_list);
+ orinoco_cs_detach(dev_list);
+ }
+}
+
+module_init(init_orinoco_cs);
+module_exit(exit_orinoco_cs);
+
diff --git a/linux/src/COPYING b/linux/src/COPYING
new file mode 100644
index 0000000..6dc77dc
--- /dev/null
+++ b/linux/src/COPYING
@@ -0,0 +1,351 @@
+
+ NOTE! This copyright does *not* cover user programs that use kernel
+ services by normal system calls - this is merely considered normal use
+ of the kernel, and does *not* fall under the heading of "derived work".
+ Also note that the GPL below is copyrighted by the Free Software
+ Foundation, but the instance of code that it refers to (the Linux
+ kernel) is copyrighted by me and others who actually wrote it.
+
+ Linus Torvalds
+
+----------------------------------------
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 675 Mass Ave, Cambridge, MA 02139, USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ Appendix: How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) 19yy <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) 19yy name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/linux/src/arch/i386/kernel/bios32.c b/linux/src/arch/i386/kernel/bios32.c
new file mode 100644
index 0000000..bb0e89c
--- /dev/null
+++ b/linux/src/arch/i386/kernel/bios32.c
@@ -0,0 +1,916 @@
+/*
+ * bios32.c - BIOS32, PCI BIOS functions.
+ *
+ * $Id: bios32.c,v 1.1 1999/04/26 05:50:57 tb Exp $
+ *
+ * Sponsored by
+ * iX Multiuser Multitasking Magazine
+ * Hannover, Germany
+ * hm@ix.de
+ *
+ * Copyright 1993, 1994 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * Drew@Colorado.EDU
+ * +1 (303) 786-7975
+ *
+ * For more information, please consult
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ *
+ * Manuals are $25 each or $50 for all three, plus $7 shipping
+ * within the United States, $35 abroad.
+ *
+ *
+ * CHANGELOG :
+ * Jun 17, 1994 : Modified to accommodate the broken pre-PCI BIOS SPECIFICATION
+ * Revision 2.0 present on <thys@dennis.ee.up.ac.za>'s ASUS mainboard.
+ *
+ * Jan 5, 1995 : Modified to probe PCI hardware at boot time by Frederic
+ * Potter, potter@cao-vlsi.ibp.fr
+ *
+ * Jan 10, 1995 : Modified to store the information about configured pci
+ * devices into a list, which can be accessed via /proc/pci by
+ * Curtis Varner, cvarner@cs.ucr.edu
+ *
+ * Jan 12, 1995 : CPU-PCI bridge optimization support by Frederic Potter.
+ * Alpha version. Intel & UMC chipset support only.
+ *
+ * Apr 16, 1995 : Source merge with the DEC Alpha PCI support. Most of the code
+ * moved to drivers/pci/pci.c.
+ *
+ * Dec 7, 1996 : Added support for direct configuration access of boards
+ * with Intel compatible access schemes (tsbogend@alpha.franken.de)
+ *
+ * Feb 3, 1997 : Set internal functions to static, save/restore flags
+ * avoid dead locks reading broken PCI BIOS, werner@suse.de
+ *
+ * Apr 26, 1997 : Fixed case when there is BIOS32, but not PCI BIOS
+ * (mj@atrey.karlin.mff.cuni.cz)
+ *
+ * May 7, 1997 : Added some missing cli()'s. [mj]
+ *
+ * Jun 20, 1997 : Corrected problems in "conf1" type accesses.
+ * (paubert@iram.es)
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/io.h>
+
+#define PCIBIOS_PCI_FUNCTION_ID 0xb1XX
+#define PCIBIOS_PCI_BIOS_PRESENT 0xb101
+#define PCIBIOS_FIND_PCI_DEVICE 0xb102
+#define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103
+#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106
+#define PCIBIOS_READ_CONFIG_BYTE 0xb108
+#define PCIBIOS_READ_CONFIG_WORD 0xb109
+#define PCIBIOS_READ_CONFIG_DWORD 0xb10a
+#define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b
+#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c
+#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d
+
+
+/* BIOS32 signature: "_32_" */
+#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
+
+/* PCI signature: "PCI " */
+#define PCI_SIGNATURE (('P' << 0) + ('C' << 8) + ('I' << 16) + (' ' << 24))
+
+/* PCI service signature: "$PCI" */
+#define PCI_SERVICE (('$' << 0) + ('P' << 8) + ('C' << 16) + ('I' << 24))
+
+/*
+ * This is the standard structure used to identify the entry point
+ * to the BIOS32 Service Directory, as documented in
+ * Standard BIOS 32-bit Service Directory Proposal
+ * Revision 0.4 May 24, 1993
+ * Phoenix Technologies Ltd.
+ * Norwood, MA
+ * and the PCI BIOS specification.
+ */
+
+union bios32 {
+ struct {
+ unsigned long signature; /* _32_ */
+ unsigned long entry; /* 32 bit physical address */
+ unsigned char revision; /* Revision level, 0 */
+ unsigned char length; /* Length in paragraphs should be 01 */
+ unsigned char checksum; /* All bytes must add up to zero */
+ unsigned char reserved[5]; /* Must be zero */
+ } fields;
+ char chars[16];
+};
+
+#ifdef CONFIG_PCI
+/*
+ * Physical address of the service directory. I don't know if we're
+ * allowed to have more than one of these or not, so just in case
+ * we'll make pcibios_present() take a memory start parameter and store
+ * the array there.
+ */
+
+static unsigned long bios32_entry = 0;
+static struct {
+ unsigned long address;
+ unsigned short segment;
+} bios32_indirect = { 0, KERNEL_CS };
+
+
+/*
+ * function table for accessing PCI configuration space
+ */
+struct pci_access {
+ int (*find_device)(unsigned short, unsigned short, unsigned short, unsigned char *, unsigned char *);
+ int (*find_class)(unsigned int, unsigned short, unsigned char *, unsigned char *);
+ int (*read_config_byte)(unsigned char, unsigned char, unsigned char, unsigned char *);
+ int (*read_config_word)(unsigned char, unsigned char, unsigned char, unsigned short *);
+ int (*read_config_dword)(unsigned char, unsigned char, unsigned char, unsigned int *);
+ int (*write_config_byte)(unsigned char, unsigned char, unsigned char, unsigned char);
+ int (*write_config_word)(unsigned char, unsigned char, unsigned char, unsigned short);
+ int (*write_config_dword)(unsigned char, unsigned char, unsigned char, unsigned int);
+};
+
+/*
+ * pointer to selected PCI access function table
+ */
+static struct pci_access *access_pci = NULL;
+
+
+
+/*
+ * Returns the entry point for the given service, NULL on error
+ */
+
+static unsigned long bios32_service(unsigned long service)
+{
+ unsigned char return_code; /* %al */
+ unsigned long address; /* %ebx */
+ unsigned long length; /* %ecx */
+ unsigned long entry; /* %edx */
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%edi); cld"
+ : "=a" (return_code),
+ "=b" (address),
+ "=c" (length),
+ "=d" (entry)
+ : "0" (service),
+ "1" (0),
+ "D" (&bios32_indirect));
+ restore_flags(flags);
+
+ switch (return_code) {
+ case 0:
+ return address + entry;
+ case 0x80: /* Not present */
+ printk("bios32_service(0x%lx) : not present\n", service);
+ return 0;
+ default: /* Shouldn't happen */
+ printk("bios32_service(0x%lx) : returned 0x%x, mail drew@colorado.edu\n",
+ service, return_code);
+ return 0;
+ }
+}
+
+static long pcibios_entry = 0;
+static struct {
+ unsigned long address;
+ unsigned short segment;
+} pci_indirect = { 0, KERNEL_CS };
+
+
+static int check_pcibios(void)
+{
+ unsigned long signature;
+ unsigned char present_status;
+ unsigned char major_revision;
+ unsigned char minor_revision;
+ unsigned long flags;
+ int pack;
+
+ if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
+ pci_indirect.address = phystokv(pcibios_entry);
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%edi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:\tshl $8, %%eax\n\t"
+ "movw %%bx, %%ax"
+ : "=d" (signature),
+ "=a" (pack)
+ : "1" (PCIBIOS_PCI_BIOS_PRESENT),
+ "D" (&pci_indirect)
+ : "bx", "cx");
+ restore_flags(flags);
+
+ present_status = (pack >> 16) & 0xff;
+ major_revision = (pack >> 8) & 0xff;
+ minor_revision = pack & 0xff;
+ if (present_status || (signature != PCI_SIGNATURE)) {
+ printk ("pcibios_init : %s : BIOS32 Service Directory says PCI BIOS is present,\n"
+ " but PCI_BIOS_PRESENT subfunction fails with present status of 0x%x\n"
+ " and signature of 0x%08lx (%c%c%c%c). mail drew@Colorado.EDU\n",
+ (signature == PCI_SIGNATURE) ? "WARNING" : "ERROR",
+ present_status, signature,
+ (char) (signature >> 0), (char) (signature >> 8),
+ (char) (signature >> 16), (char) (signature >> 24));
+
+ if (signature != PCI_SIGNATURE)
+ pcibios_entry = 0;
+ }
+ if (pcibios_entry) {
+ printk ("pcibios_init : PCI BIOS revision %x.%02x entry at 0x%lx\n",
+ major_revision, minor_revision, pcibios_entry);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+static int pci_bios_find_class (unsigned int class_code, unsigned short index,
+ unsigned char *bus, unsigned char *device_fn)
+{
+ unsigned long bx;
+ unsigned long ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__ ("lcall *(%%edi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=b" (bx),
+ "=a" (ret)
+ : "1" (PCIBIOS_FIND_PCI_CLASS_CODE),
+ "c" (class_code),
+ "S" ((int) index),
+ "D" (&pci_indirect));
+ restore_flags(flags);
+ *bus = (bx >> 8) & 0xff;
+ *device_fn = bx & 0xff;
+ return (int) (ret & 0xff00) >> 8;
+}
+
+
+static int pci_bios_find_device (unsigned short vendor, unsigned short device_id,
+ unsigned short index, unsigned char *bus, unsigned char *device_fn)
+{
+ unsigned short bx;
+ unsigned short ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%edi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=b" (bx),
+ "=a" (ret)
+ : "1" (PCIBIOS_FIND_PCI_DEVICE),
+ "c" (device_id),
+ "d" (vendor),
+ "S" ((int) index),
+ "D" (&pci_indirect));
+ restore_flags(flags);
+ *bus = (bx >> 8) & 0xff;
+ *device_fn = bx & 0xff;
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_read_config_byte(unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned char *value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=c" (*value),
+ "=a" (ret)
+ : "1" (PCIBIOS_READ_CONFIG_BYTE),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_read_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short *value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=c" (*value),
+ "=a" (ret)
+ : "1" (PCIBIOS_READ_CONFIG_WORD),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_read_config_dword (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned int *value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=c" (*value),
+ "=a" (ret)
+ : "1" (PCIBIOS_READ_CONFIG_DWORD),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_write_config_byte (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned char value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=a" (ret)
+ : "0" (PCIBIOS_WRITE_CONFIG_BYTE),
+ "c" (value),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_write_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=a" (ret)
+ : "0" (PCIBIOS_WRITE_CONFIG_WORD),
+ "c" (value),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_write_config_dword (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned int value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=a" (ret)
+ : "0" (PCIBIOS_WRITE_CONFIG_DWORD),
+ "c" (value),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+/*
+ * function table for BIOS32 access
+ */
+static struct pci_access pci_bios_access = {
+ pci_bios_find_device,
+ pci_bios_find_class,
+ pci_bios_read_config_byte,
+ pci_bios_read_config_word,
+ pci_bios_read_config_dword,
+ pci_bios_write_config_byte,
+ pci_bios_write_config_word,
+ pci_bios_write_config_dword
+};
+
+
+
+/*
+ * Given the vendor and device ids, find the n'th instance of that device
+ * in the system.
+ */
+static int pci_direct_find_device (unsigned short vendor, unsigned short device_id,
+ unsigned short index, unsigned char *bus,
+ unsigned char *devfn)
+{
+ unsigned int curr = 0;
+ struct pci_dev *dev;
+
+ for (dev = pci_devices; dev; dev = dev->next) {
+ if (dev->vendor == vendor && dev->device == device_id) {
+ if (curr == index) {
+ *devfn = dev->devfn;
+ *bus = dev->bus->number;
+ return PCIBIOS_SUCCESSFUL;
+ }
+ ++curr;
+ }
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+
+/*
+ * Given the class, find the n'th instance of that device
+ * in the system.
+ */
+static int pci_direct_find_class (unsigned int class_code, unsigned short index,
+ unsigned char *bus, unsigned char *devfn)
+{
+ unsigned int curr = 0;
+ struct pci_dev *dev;
+
+ for (dev = pci_devices; dev; dev = dev->next) {
+ if (dev->class == class_code) {
+ if (curr == index) {
+ *devfn = dev->devfn;
+ *bus = dev->bus->number;
+ return PCIBIOS_SUCCESSFUL;
+ }
+ ++curr;
+ }
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+/*
+ * Functions for accessing PCI configuration space with type 1 accesses
+ */
+#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (bus << 16) | (device_fn << 8) | (where & ~3))
+
+static int pci_conf1_read_config_byte(unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned char *value)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ *value = inb(0xCFC + (where&3));
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_read_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short *value)
+{
+ unsigned long flags;
+
+ if (where&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ *value = inw(0xCFC + (where&2));
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_read_config_dword (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned int *value)
+{
+ unsigned long flags;
+
+ if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ *value = inl(0xCFC);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_write_config_byte (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned char value)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ outb(value, 0xCFC + (where&3));
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_write_config_word (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned short value)
+{
+ unsigned long flags;
+
+ if (where&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ outw(value, 0xCFC + (where&2));
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_write_config_dword (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned int value)
+{
+ unsigned long flags;
+
+ if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ outl(value, 0xCFC);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+#undef CONFIG_CMD
+
+/*
+ * functiontable for type 1
+ */
+static struct pci_access pci_direct_conf1 = {
+ pci_direct_find_device,
+ pci_direct_find_class,
+ pci_conf1_read_config_byte,
+ pci_conf1_read_config_word,
+ pci_conf1_read_config_dword,
+ pci_conf1_write_config_byte,
+ pci_conf1_write_config_word,
+ pci_conf1_write_config_dword
+};
+
+/*
+ * Functions for accessing PCI configuration space with type 2 accesses
+ */
+#define IOADDR(devfn, where) ((0xC000 | ((devfn & 0x78) << 5)) + where)
+#define FUNC(devfn) (((devfn & 7) << 1) | 0xf0)
+
+static int pci_conf2_read_config_byte(unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned char *value)
+{
+ unsigned long flags;
+
+ if (device_fn & 0x80)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ *value = inb(IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf2_read_config_word (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned short *value)
+{
+ unsigned long flags;
+
+ if (device_fn & 0x80)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ *value = inw(IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf2_read_config_dword (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned int *value)
+{
+ unsigned long flags;
+
+ if (device_fn & 0x80)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ *value = inl (IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf2_write_config_byte (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned char value)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ outb (value, IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf2_write_config_word (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned short value)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ outw (value, IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf2_write_config_dword (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned int value)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ outl (value, IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+#undef IOADDR
+#undef FUNC
+
+/*
+ * functiontable for type 2
+ */
+static struct pci_access pci_direct_conf2 = {
+ pci_direct_find_device,
+ pci_direct_find_class,
+ pci_conf2_read_config_byte,
+ pci_conf2_read_config_word,
+ pci_conf2_read_config_dword,
+ pci_conf2_write_config_byte,
+ pci_conf2_write_config_word,
+ pci_conf2_write_config_dword
+};
+
+
+static struct pci_access *check_direct_pci(void)
+{
+ unsigned int tmp;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+ /*
+ * check if configuration type 1 works
+ */
+ outb (0x01, 0xCFB);
+ tmp = inl (0xCF8);
+ outl (0x80000000, 0xCF8);
+ if (inl (0xCF8) == 0x80000000) {
+ outl (tmp, 0xCF8);
+ restore_flags(flags);
+ printk("pcibios_init: Using configuration type 1\n");
+ return &pci_direct_conf1;
+ }
+ outl (tmp, 0xCF8);
+
+ /*
+ * check if configuration type 2 works
+ */
+ outb (0x00, 0xCFB);
+ outb (0x00, 0xCF8);
+ outb (0x00, 0xCFA);
+ if (inb (0xCF8) == 0x00 && inb (0xCFB) == 0x00) {
+ restore_flags(flags);
+ printk("pcibios_init: Using configuration type 2\n");
+ return &pci_direct_conf2;
+ }
+ restore_flags(flags);
+ printk("pcibios_init: Not supported chipset for direct PCI access !\n");
+ return NULL;
+}
+
+
+/*
+ * access defined pcibios functions via
+ * the function table
+ */
+
+int pcibios_present(void)
+{
+ return access_pci ? 1 : 0;
+}
+
+int pcibios_find_class (unsigned int class_code, unsigned short index,
+ unsigned char *bus, unsigned char *device_fn)
+{
+ if (access_pci && access_pci->find_class)
+ return access_pci->find_class(class_code, index, bus, device_fn);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_find_device (unsigned short vendor, unsigned short device_id,
+ unsigned short index, unsigned char *bus, unsigned char *device_fn)
+{
+ if (access_pci && access_pci->find_device)
+ return access_pci->find_device(vendor, device_id, index, bus, device_fn);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_read_config_byte (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned char *value)
+{
+ if (access_pci && access_pci->read_config_byte)
+ return access_pci->read_config_byte(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_read_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short *value)
+{
+ if (access_pci && access_pci->read_config_word)
+ return access_pci->read_config_word(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_read_config_dword (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned int *value)
+{
+ if (access_pci && access_pci->read_config_dword)
+ return access_pci->read_config_dword(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_write_config_byte (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned char value)
+{
+ if (access_pci && access_pci->write_config_byte)
+ return access_pci->write_config_byte(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_write_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short value)
+{
+ if (access_pci && access_pci->write_config_word)
+ return access_pci->write_config_word(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_write_config_dword (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned int value)
+{
+ if (access_pci && access_pci->write_config_dword)
+ return access_pci->write_config_dword(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+const char *pcibios_strerror (int error)
+{
+ static char buf[80];
+
+ switch (error) {
+ case PCIBIOS_SUCCESSFUL:
+ return "SUCCESSFUL";
+
+ case PCIBIOS_FUNC_NOT_SUPPORTED:
+ return "FUNC_NOT_SUPPORTED";
+
+ case PCIBIOS_BAD_VENDOR_ID:
+ return "SUCCESSFUL";
+
+ case PCIBIOS_DEVICE_NOT_FOUND:
+ return "DEVICE_NOT_FOUND";
+
+ case PCIBIOS_BAD_REGISTER_NUMBER:
+ return "BAD_REGISTER_NUMBER";
+
+ case PCIBIOS_SET_FAILED:
+ return "SET_FAILED";
+
+ case PCIBIOS_BUFFER_TOO_SMALL:
+ return "BUFFER_TOO_SMALL";
+
+ default:
+ sprintf (buf, "UNKNOWN RETURN 0x%x", error);
+ return buf;
+ }
+}
+
+
+unsigned long pcibios_fixup(unsigned long mem_start, unsigned long mem_end)
+{
+ return mem_start;
+}
+
+#endif
+
+unsigned long pcibios_init(unsigned long memory_start, unsigned long memory_end)
+{
+#ifdef CONFIG_PCI
+ union bios32 *check;
+ unsigned char sum;
+ int i, length;
+
+ /*
+ * Follow the standard procedure for locating the BIOS32 Service
+ * directory by scanning the permissible address range from
+ * 0xe0000 through 0xfffff for a valid BIOS32 structure.
+ *
+ */
+
+ for (check = (union bios32 *) phystokv(0xe0000);
+ check <= (union bios32 *) phystokv(0xffff0);
+ ++check) {
+ if (check->fields.signature != BIOS32_SIGNATURE)
+ continue;
+ length = check->fields.length * 16;
+ if (!length)
+ continue;
+ sum = 0;
+ for (i = 0; i < length ; ++i)
+ sum += check->chars[i];
+ if (sum != 0)
+ continue;
+ if (check->fields.revision != 0) {
+ printk("pcibios_init : unsupported revision %d at 0x%lx, mail drew@colorado.edu\n",
+ check->fields.revision, _kvtophys(check));
+ continue;
+ }
+ printk ("pcibios_init : BIOS32 Service Directory structure at 0x%lx\n", _kvtophys(check));
+ if (!bios32_entry) {
+ if (check->fields.entry >= 0x100000) {
+ printk("pcibios_init: entry in high memory, trying direct PCI access\n");
+ access_pci = check_direct_pci();
+ } else {
+ bios32_entry = check->fields.entry;
+ printk ("pcibios_init : BIOS32 Service Directory entry at 0x%lx\n", bios32_entry);
+ bios32_indirect.address = phystokv(bios32_entry);
+ }
+ }
+ }
+ if (bios32_entry && check_pcibios())
+ access_pci = &pci_bios_access;
+ else
+ access_pci = check_direct_pci();
+#endif
+ return memory_start;
+}
diff --git a/linux/src/arch/i386/kernel/irq.c b/linux/src/arch/i386/kernel/irq.c
new file mode 100644
index 0000000..6db6115
--- /dev/null
+++ b/linux/src/arch/i386/kernel/irq.c
@@ -0,0 +1,582 @@
+/*
+ * linux/arch/i386/kernel/irq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+/*
+ * IRQ's are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/malloc.h>
+#include <linux/random.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/smp.h>
+
+#define CR0_NE 32
+
+static unsigned char cache_21 = 0xff;
+static unsigned char cache_A1 = 0xff;
+
+#ifdef __SMP_PROF__
+static unsigned int int_count[NR_CPUS][NR_IRQS] = {{0},};
+#endif
+
+static inline void mask_irq(unsigned int irq_nr)
+{
+ unsigned char mask;
+
+ mask = 1 << (irq_nr & 7);
+ if (irq_nr < 8) {
+ cache_21 |= mask;
+ outb(cache_21,0x21);
+ } else {
+ cache_A1 |= mask;
+ outb(cache_A1,0xA1);
+ }
+}
+
+static inline void unmask_irq(unsigned int irq_nr)
+{
+ unsigned char mask;
+
+ mask = ~(1 << (irq_nr & 7));
+ if (irq_nr < 8) {
+ cache_21 &= mask;
+ outb(cache_21,0x21);
+ } else {
+ cache_A1 &= mask;
+ outb(cache_A1,0xA1);
+ }
+}
+
+void disable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ mask_irq(irq_nr);
+ restore_flags(flags);
+}
+
+void enable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ unmask_irq(irq_nr);
+ restore_flags(flags);
+}
+
+/*
+ * This builds up the IRQ handler stubs using some ugly macros in irq.h
+ *
+ * These macros create the low-level assembly IRQ routines that do all
+ * the operations that are needed to keep the AT interrupt-controller
+ * happy. They are also written to be fast - and to disable interrupts
+ * as little as humanly possible.
+ *
+ * NOTE! These macros expand to three different handlers for each line: one
+ * complete handler that does all the fancy stuff (including signal handling),
+ * and one fast handler that is meant for simple IRQ's that want to be
+ * atomic. The specific handler is chosen depending on the SA_INTERRUPT
+ * flag when installing a handler. Finally, one "bad interrupt" handler, that
+ * is used when no handler is present.
+ *
+ * The timer interrupt is handled specially to insure that the jiffies
+ * variable is updated at all times. Specifically, the timer interrupt is
+ * just like the complete handlers except that it is invoked with interrupts
+ * disabled and should never re-enable them. If other interrupts were
+ * allowed to be processed while the timer interrupt is active, then the
+ * other interrupts would have to avoid using the jiffies variable for delay
+ * and interval timing operations to avoid hanging the system.
+ */
+BUILD_TIMER_IRQ(FIRST,0,0x01)
+BUILD_IRQ(FIRST,1,0x02)
+BUILD_IRQ(FIRST,2,0x04)
+BUILD_IRQ(FIRST,3,0x08)
+BUILD_IRQ(FIRST,4,0x10)
+BUILD_IRQ(FIRST,5,0x20)
+BUILD_IRQ(FIRST,6,0x40)
+BUILD_IRQ(FIRST,7,0x80)
+BUILD_IRQ(SECOND,8,0x01)
+BUILD_IRQ(SECOND,9,0x02)
+BUILD_IRQ(SECOND,10,0x04)
+BUILD_IRQ(SECOND,11,0x08)
+BUILD_IRQ(SECOND,12,0x10)
+#ifdef __SMP__
+BUILD_MSGIRQ(SECOND,13,0x20)
+#else
+BUILD_IRQ(SECOND,13,0x20)
+#endif
+BUILD_IRQ(SECOND,14,0x40)
+BUILD_IRQ(SECOND,15,0x80)
+#ifdef __SMP__
+BUILD_RESCHEDIRQ(16)
+#endif
+
+/*
+ * Pointers to the low-level handlers: first the general ones, then the
+ * fast ones, then the bad ones.
+ */
+static void (*interrupt[17])(void) = {
+ IRQ0_interrupt, IRQ1_interrupt, IRQ2_interrupt, IRQ3_interrupt,
+ IRQ4_interrupt, IRQ5_interrupt, IRQ6_interrupt, IRQ7_interrupt,
+ IRQ8_interrupt, IRQ9_interrupt, IRQ10_interrupt, IRQ11_interrupt,
+ IRQ12_interrupt, IRQ13_interrupt, IRQ14_interrupt, IRQ15_interrupt
+#ifdef __SMP__
+ ,IRQ16_interrupt
+#endif
+};
+
+static void (*fast_interrupt[16])(void) = {
+ fast_IRQ0_interrupt, fast_IRQ1_interrupt,
+ fast_IRQ2_interrupt, fast_IRQ3_interrupt,
+ fast_IRQ4_interrupt, fast_IRQ5_interrupt,
+ fast_IRQ6_interrupt, fast_IRQ7_interrupt,
+ fast_IRQ8_interrupt, fast_IRQ9_interrupt,
+ fast_IRQ10_interrupt, fast_IRQ11_interrupt,
+ fast_IRQ12_interrupt, fast_IRQ13_interrupt,
+ fast_IRQ14_interrupt, fast_IRQ15_interrupt
+};
+
+static void (*bad_interrupt[16])(void) = {
+ bad_IRQ0_interrupt, bad_IRQ1_interrupt,
+ bad_IRQ2_interrupt, bad_IRQ3_interrupt,
+ bad_IRQ4_interrupt, bad_IRQ5_interrupt,
+ bad_IRQ6_interrupt, bad_IRQ7_interrupt,
+ bad_IRQ8_interrupt, bad_IRQ9_interrupt,
+ bad_IRQ10_interrupt, bad_IRQ11_interrupt,
+ bad_IRQ12_interrupt, bad_IRQ13_interrupt,
+ bad_IRQ14_interrupt, bad_IRQ15_interrupt
+};
+
+/*
+ * Initial irq handlers.
+ */
+
+static void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
+
+#ifdef __SMP__
+
+/*
+ * On SMP boards, irq13 is used for interprocessor interrupts (IPI's).
+ */
+static struct irqaction irq13 = { smp_message_irq, SA_INTERRUPT, 0, "IPI", NULL, NULL };
+
+#else
+
+/*
+ * Note that on a 486, we don't want to do a SIGFPE on a irq13
+ * as the irq is unreliable, and exception 16 works correctly
+ * (ie as explained in the intel literature). On a 386, you
+ * can't use exception 16 due to bad IBM design, so we have to
+ * rely on the less exact irq13.
+ *
+ * Careful.. Not only is IRQ13 unreliable, but it is also
+ * leads to races. IBM designers who came up with it should
+ * be shot.
+ */
+
+
+static void math_error_irq(int cpl, void *dev_id, struct pt_regs *regs)
+{
+ outb(0,0xF0);
+ if (ignore_irq13 || !hard_math)
+ return;
+ math_error();
+}
+
+static struct irqaction irq13 = { math_error_irq, 0, 0, "math error", NULL, NULL };
+
+#endif
+
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL};
+
+static struct irqaction *irq_action[16] = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+int get_irq_list(char *buf)
+{
+ int i, len = 0;
+ struct irqaction * action;
+
+ for (i = 0 ; i < 16 ; i++) {
+ action = irq_action[i];
+ if (!action)
+ continue;
+ len += sprintf(buf+len, "%2d: %10u %c %s",
+ i, kstat.interrupts[i],
+ (action->flags & SA_INTERRUPT) ? '+' : ' ',
+ action->name);
+ for (action=action->next; action; action = action->next) {
+ len += sprintf(buf+len, ",%s %s",
+ (action->flags & SA_INTERRUPT) ? " +" : "",
+ action->name);
+ }
+ len += sprintf(buf+len, "\n");
+ }
+/*
+ * Linus - should you add NMI counts here ?????
+ */
+#ifdef __SMP_PROF__
+ len+=sprintf(buf+len, "IPI: %8lu received\n",
+ ipi_count);
+#endif
+ return len;
+}
+
+#ifdef __SMP_PROF__
+
+int get_smp_prof_list(char *buf) {
+ int i,j, len = 0;
+ struct irqaction * action;
+ unsigned long sum_spins = 0;
+ unsigned long sum_spins_syscall = 0;
+ unsigned long sum_spins_sys_idle = 0;
+ unsigned long sum_smp_idle_count = 0;
+
+ for (i=0;i<smp_num_cpus;i++) {
+ int cpunum = cpu_logical_map[i];
+ sum_spins+=smp_spins[cpunum];
+ sum_spins_syscall+=smp_spins_syscall[cpunum];
+ sum_spins_sys_idle+=smp_spins_sys_idle[cpunum];
+ sum_smp_idle_count+=smp_idle_count[cpunum];
+ }
+
+ len += sprintf(buf+len,"CPUS: %10i \n", smp_num_cpus);
+ len += sprintf(buf+len," SUM ");
+ for (i=0;i<smp_num_cpus;i++)
+ len += sprintf(buf+len," P%1d ",cpu_logical_map[i]);
+ len += sprintf(buf+len,"\n");
+ for (i = 0 ; i < NR_IRQS ; i++) {
+ action = *(i + irq_action);
+ if (!action || !action->handler)
+ continue;
+ len += sprintf(buf+len, "%3d: %10d ",
+ i, kstat.interrupts[i]);
+ for (j=0;j<smp_num_cpus;j++)
+ len+=sprintf(buf+len, "%10d ",
+ int_count[cpu_logical_map[j]][i]);
+ len += sprintf(buf+len, "%c %s",
+ (action->flags & SA_INTERRUPT) ? '+' : ' ',
+ action->name);
+ for (action=action->next; action; action = action->next) {
+ len += sprintf(buf+len, ",%s %s",
+ (action->flags & SA_INTERRUPT) ? " +" : "",
+ action->name);
+ }
+ len += sprintf(buf+len, "\n");
+ }
+ len+=sprintf(buf+len, "LCK: %10lu",
+ sum_spins);
+
+ for (i=0;i<smp_num_cpus;i++)
+ len+=sprintf(buf+len," %10lu",smp_spins[cpu_logical_map[i]]);
+
+ len +=sprintf(buf+len," spins from int\n");
+
+ len+=sprintf(buf+len, "LCK: %10lu",
+ sum_spins_syscall);
+
+ for (i=0;i<smp_num_cpus;i++)
+ len+=sprintf(buf+len," %10lu",smp_spins_syscall[cpu_logical_map[i]]);
+
+ len +=sprintf(buf+len," spins from syscall\n");
+
+ len+=sprintf(buf+len, "LCK: %10lu",
+ sum_spins_sys_idle);
+
+ for (i=0;i<smp_num_cpus;i++)
+ len+=sprintf(buf+len," %10lu",smp_spins_sys_idle[cpu_logical_map[i]]);
+
+ len +=sprintf(buf+len," spins from sysidle\n");
+ len+=sprintf(buf+len,"IDLE %10lu",sum_smp_idle_count);
+
+ for (i=0;i<smp_num_cpus;i++)
+ len+=sprintf(buf+len," %10lu",smp_idle_count[cpu_logical_map[i]]);
+
+ len +=sprintf(buf+len," idle ticks\n");
+
+ len+=sprintf(buf+len, "IPI: %10lu received\n",
+ ipi_count);
+
+ return len;
+}
+#endif
+
+
+
+/*
+ * do_IRQ handles IRQ's that have been installed without the
+ * SA_INTERRUPT flag: it uses the full signal-handling return
+ * and runs with other interrupts enabled. All relatively slow
+ * IRQ's should use this format: notably the keyboard/timer
+ * routines.
+ */
+asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
+{
+ struct irqaction * action = *(irq + irq_action);
+ int do_random = 0;
+ int c,intm,mask;
+#ifdef IRQ_DEBUG
+ static int count;
+ if (smp_processor_id() != 0 && count++ < 1000)
+ printk("IRQ %d: done by CPU %d\n",irq,smp_processor_id());
+#endif
+ if (irq >= 8) {
+ c = cache_A1;
+ intm = inb(0xA1);
+ mask = 1 << (irq - 8);
+ } else {
+ c = cache_21;
+ intm = inb(0x21);
+ mask = 1 << irq;
+ }
+ if (!(c & mask) || !(intm & mask)) {
+#ifdef IRQ_DEBUG
+ printk("IRQ %d (proc %d):cache_x1=0x%x,INT mask=0x%x\n", irq, smp_processor_id(),c,intm);
+#endif
+ /* better to return because the interrupt may be asserted again,
+ the bad thing is that we may loose some interrupts */
+ return;
+ }
+#ifdef __SMP__
+ if(smp_threads_ready && active_kernel_processor!=smp_processor_id())
+ panic("IRQ %d: active processor set wrongly(%d not %d).\n", irq, active_kernel_processor, smp_processor_id());
+#endif
+
+ kstat.interrupts[irq]++;
+#ifdef __SMP_PROF__
+ int_count[smp_processor_id()][irq]++;
+#endif
+ while (action) {
+ do_random |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ }
+ if (do_random & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+}
+
+/*
+ * do_fast_IRQ handles IRQ's that don't need the fancy interrupt return
+ * stuff - the handler is also running with interrupts disabled unless
+ * it explicitly enables them later.
+ */
+asmlinkage void do_fast_IRQ(int irq)
+{
+ struct irqaction * action = *(irq + irq_action);
+ int do_random = 0;
+
+#ifdef __SMP__
+ /* IRQ 13 is allowed - that's a flush tlb */
+ if(smp_threads_ready && active_kernel_processor!=smp_processor_id() && irq!=13)
+ panic("fast_IRQ %d: active processor set wrongly(%d not %d).\n", irq, active_kernel_processor, smp_processor_id());
+#endif
+
+ kstat.interrupts[irq]++;
+#ifdef __SMP_PROF__
+ int_count[smp_processor_id()][irq]++;
+#endif
+ while (action) {
+ do_random |= action->flags;
+ action->handler(irq, action->dev_id, NULL);
+ action = action->next;
+ }
+ if (do_random & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+}
+
+int setup_x86_irq(int irq, struct irqaction * new)
+{
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+
+ p = irq_action + irq;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ))
+ return -EBUSY;
+
+ /* Can't share interrupts unless both are same type */
+ if ((old->flags ^ new->flags) & SA_INTERRUPT)
+ return -EBUSY;
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ if (new->flags & SA_SAMPLE_RANDOM)
+ rand_initialize_irq(irq);
+
+ save_flags(flags);
+ cli();
+ *p = new;
+
+ if (!shared) {
+ if (new->flags & SA_INTERRUPT)
+ set_intr_gate(0x20+irq,fast_interrupt[irq]);
+ else
+ set_intr_gate(0x20+irq,interrupt[irq]);
+ unmask_irq(irq);
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags,
+ const char * devname,
+ void *dev_id)
+{
+ int retval;
+ struct irqaction * action;
+
+ if (irq > 15)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->mask = 0;
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ retval = setup_x86_irq(irq, action);
+
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irqaction * action, **p;
+ unsigned long flags;
+
+ if (irq > 15) {
+ printk("Trying to free IRQ%d\n",irq);
+ return;
+ }
+ for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) {
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now free it */
+ save_flags(flags);
+ cli();
+ *p = action->next;
+ if (!irq[irq_action]) {
+ mask_irq(irq);
+ set_intr_gate(0x20+irq,bad_interrupt[irq]);
+ }
+ restore_flags(flags);
+ kfree(action);
+ return;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+}
+
+unsigned long probe_irq_on (void)
+{
+ unsigned int i, irqs = 0, irqmask;
+ unsigned long delay;
+
+ /* first, enable any unassigned irqs */
+ for (i = 15; i > 0; i--) {
+ if (!irq_action[i]) {
+ enable_irq(i);
+ irqs |= (1 << i);
+ }
+ }
+
+ /* wait for spurious interrupts to mask themselves out again */
+ for (delay = jiffies + HZ/10; delay > jiffies; )
+ /* about 100ms delay */;
+
+ /* now filter out any obviously spurious interrupts */
+ irqmask = (((unsigned int)cache_A1)<<8) | (unsigned int)cache_21;
+ return irqs & ~irqmask;
+}
+
+int probe_irq_off (unsigned long irqs)
+{
+ unsigned int i, irqmask;
+
+ irqmask = (((unsigned int)cache_A1)<<8) | (unsigned int)cache_21;
+#ifdef DEBUG
+ printk("probe_irq_off: irqs=0x%04lx irqmask=0x%04x\n", irqs, irqmask);
+#endif
+ irqs &= irqmask;
+ if (!irqs)
+ return 0;
+ i = ffz(~irqs);
+ if (irqs != (irqs & (1 << i)))
+ i = -i;
+ return i;
+}
+
+void init_IRQ(void)
+{
+ int i;
+ static unsigned char smptrap=0;
+ if(smptrap)
+ return;
+ smptrap=1;
+
+ /* set the clock to 100 Hz */
+ outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
+ outb_p(LATCH & 0xff , 0x40); /* LSB */
+ outb(LATCH >> 8 , 0x40); /* MSB */
+ for (i = 0; i < 16 ; i++)
+ set_intr_gate(0x20+i,bad_interrupt[i]);
+ /* This bit is a hack because we don't send timer messages to all processors yet */
+ /* It has to be here .. it doesn't work if you put it down the bottom - assembler explodes 8) */
+#ifdef __SMP__
+ set_intr_gate(0x20+i, interrupt[i]); /* IRQ '16' - IPI for rescheduling */
+#endif
+ request_region(0x20,0x20,"pic1");
+ request_region(0xa0,0x20,"pic2");
+ setup_x86_irq(2, &irq2);
+ setup_x86_irq(13, &irq13);
+}
diff --git a/linux/src/arch/i386/lib/delay.c b/linux/src/arch/i386/lib/delay.c
new file mode 100644
index 0000000..04ccf16
--- /dev/null
+++ b/linux/src/arch/i386/lib/delay.c
@@ -0,0 +1,45 @@
+/*
+ * Precise Delay Loops for i386
+ *
+ * Copyright (C) 1993 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *
+ * The __delay function must _NOT_ be inlined as its execution time
+ * depends wildly on alignment on many x86 processors. The additional
+ * jump magic is needed to get the timing stable on all the CPU's
+ * we have to worry about.
+ */
+
+#include <linux/sched.h>
+#include <linux/delay.h>
+
+#ifdef __SMP__
+#include <asm/smp.h>
+#endif
+
+void __delay(unsigned long loops)
+{
+ int d0;
+ __asm__ __volatile__(
+ "\tjmp 1f\n"
+ ".align 16\n"
+ "1:\tjmp 2f\n"
+ ".align 16\n"
+ "2:\tdecl %0\n\tjns 2b"
+ :"=&a" (d0)
+ :"0" (loops));
+}
+
+inline void __const_udelay(unsigned long xloops)
+{
+ int d0;
+ __asm__("mull %0"
+ :"=d" (xloops), "=&a" (d0)
+ :"1" (xloops),"0" (loops_per_sec));
+ __delay(xloops);
+}
+
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
+}
diff --git a/linux/src/arch/i386/lib/semaphore.S b/linux/src/arch/i386/lib/semaphore.S
new file mode 100644
index 0000000..e09655c
--- /dev/null
+++ b/linux/src/arch/i386/lib/semaphore.S
@@ -0,0 +1,35 @@
+/*
+ * linux/arch/i386/lib/semaphore.S
+ *
+ * Copyright (C) 1996 Linus Torvalds
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * "down_failed" is called with the eventual return address
+ * in %eax, and the address of the semaphore in %ecx. We need
+ * to increment the number of waiters on the semaphore,
+ * call "__down()", and then eventually return to try again.
+ */
+ENTRY(down_failed)
+ pushl %eax
+ pushl %ecx
+ call SYMBOL_NAME(__down)
+ popl %ecx
+ ret
+
+ENTRY(up_wakeup)
+ pushl %eax
+ pushl %ecx
+ call SYMBOL_NAME(__up)
+ popl %ecx
+ ret
+
+ENTRY(down_failed_interruptible)
+ pushl %eax
+ pushl %ecx
+ call SYMBOL_NAME(__down_interruptible)
+ popl %ecx
+ ret
+
diff --git a/linux/src/drivers/block/cmd640.c b/linux/src/drivers/block/cmd640.c
new file mode 100644
index 0000000..b8132dc
--- /dev/null
+++ b/linux/src/drivers/block/cmd640.c
@@ -0,0 +1,850 @@
+/*
+ * linux/drivers/block/cmd640.c Version 1.02 Sep 01, 1996
+ *
+ * Copyright (C) 1995-1996 Linus Torvalds & authors (see below)
+ */
+
+/*
+ * Original author: abramov@cecmow.enet.dec.com (Igor Abramov)
+ *
+ * Maintained by: mlord@pobox.com (Mark Lord)
+ * with fanatical support from a legion of hackers!
+ *
+ * This file provides support for the advanced features and bugs
+ * of IDE interfaces using the CMD Technologies 0640 IDE interface chip.
+ *
+ * These chips are basically fucked by design, and getting this driver
+ * to work on every motherboard design that uses this screwed chip seems
+ * bloody well impossible. However, we're still trying.
+ *
+ * Version 0.97 worked for everybody.
+ *
+ * User feedback is essential. Many thanks to the beta test team:
+ *
+ * A.Hartgers@stud.tue.nl, JZDQC@CUNYVM.CUNY.edu, abramov@cecmow.enet.dec.com,
+ * bardj@utopia.ppp.sn.no, bart@gaga.tue.nl, bbol001@cs.auckland.ac.nz,
+ * chrisc@dbass.demon.co.uk, dalecki@namu26.Num.Math.Uni-Goettingen.de,
+ * derekn@vw.ece.cmu.edu, florian@btp2x3.phy.uni-bayreuth.de,
+ * flynn@dei.unipd.it, gadio@netvision.net.il, godzilla@futuris.net,
+ * j@pobox.com, jkemp1@mises.uni-paderborn.de, jtoppe@hiwaay.net,
+ * kerouac@ssnet.com, meskes@informatik.rwth-aachen.de, hzoli@cs.elte.hu,
+ * peter@udgaard.isgtec.com, phil@tazenda.demon.co.uk, roadcapw@cfw.com,
+ * s0033las@sun10.vsz.bme.hu, schaffer@tam.cornell.edu, sjd@slip.net,
+ * steve@ei.org, ulrpeg@bigcomm.gun.de, ism@tardis.ed.ac.uk, mack@cray.com
+ * liug@mama.indstate.edu, and others.
+ *
+ * Version 0.01 Initial version, hacked out of ide.c,
+ * and #include'd rather than compiled separately.
+ * This will get cleaned up in a subsequent release.
+ *
+ * Version 0.02 Fixes for vlb initialization code, enable prefetch
+ * for versions 'B' and 'C' of chip by default,
+ * some code cleanup.
+ *
+ * Version 0.03 Added reset of secondary interface,
+ * and black list for devices which are not compatible
+ * with prefetch mode. Separate function for setting
+ * prefetch is added, possibly it will be called some
+ * day from ioctl processing code.
+ *
+ * Version 0.04 Now configs/compiles separate from ide.c
+ *
+ * Version 0.05 Major rewrite of interface timing code.
+ * Added new function cmd640_set_mode to set PIO mode
+ * from ioctl call. New drives added to black list.
+ *
+ * Version 0.06 More code cleanup. Prefetch is enabled only for
+ * detected hard drives, not included in prefetch
+ * black list.
+ *
+ * Version 0.07 Changed to more conservative drive tuning policy.
+ * Unknown drives, which report PIO < 4 are set to
+ * (reported_PIO - 1) if it is supported, or to PIO0.
+ * List of known drives extended by info provided by
+ * CMD at their ftp site.
+ *
+ * Version 0.08 Added autotune/noautotune support.
+ *
+ * Version 0.09 Try to be smarter about 2nd port enabling.
+ * Version 0.10 Be nice and don't reset 2nd port.
+ * Version 0.11 Try to handle more wierd situations.
+ *
+ * Version 0.12 Lots of bug fixes from Laszlo Peter
+ * irq unmasking disabled for reliability.
+ * try to be even smarter about the second port.
+ * tidy up source code formatting.
+ * Version 0.13 permit irq unmasking again.
+ * Version 0.90 massive code cleanup, some bugs fixed.
+ * defaults all drives to PIO mode0, prefetch off.
+ * autotune is OFF by default, with compile time flag.
+ * prefetch can be turned OFF/ON using "hdparm -p8/-p9"
+ * (requires hdparm-3.1 or newer)
+ * Version 0.91 first release to linux-kernel list.
+ * Version 0.92 move initial reg dump to separate callable function
+ * change "readahead" to "prefetch" to avoid confusion
+ * Version 0.95 respect original BIOS timings unless autotuning.
+ * tons of code cleanup and rearrangement.
+ * added CONFIG_BLK_DEV_CMD640_ENHANCED option
+ * prevent use of unmask when prefetch is on
+ * Version 0.96 prevent use of io_32bit when prefetch is off
+ * Version 0.97 fix VLB secondary interface for sjd@slip.net
+ * other minor tune-ups: 0.96 was very good.
+ * Version 0.98 ignore PCI version when disabled by BIOS
+ * Version 0.99 display setup/active/recovery clocks with PIO mode
+ * Version 1.00 Mmm.. cannot depend on PCMD_ENA in all systems
+ * Version 1.01 slow/fast devsel can be selected with "hdparm -p6/-p7"
+ * ("fast" is necessary for 32bit I/O in some systems)
+ * Version 1.02 fix bug that resulted in slow "setup times"
+ * (patch courtesy of Zoltan Hidvegi)
+ */
+
+#undef REALLY_SLOW_IO /* most systems can safely undef this */
+#define CMD640_PREFETCH_MASKS 1
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <asm/io.h>
+#include "ide.h"
+#include "ide_modes.h"
+
+/*
+ * This flag is set in ide.c by the parameter: ide0=cmd640_vlb
+ */
+int cmd640_vlb = 0;
+
+/*
+ * CMD640 specific registers definition.
+ */
+
+#define VID 0x00
+#define DID 0x02
+#define PCMD 0x04
+#define PCMD_ENA 0x01
+#define PSTTS 0x06
+#define REVID 0x08
+#define PROGIF 0x09
+#define SUBCL 0x0a
+#define BASCL 0x0b
+#define BaseA0 0x10
+#define BaseA1 0x14
+#define BaseA2 0x18
+#define BaseA3 0x1c
+#define INTLINE 0x3c
+#define INPINE 0x3d
+
+#define CFR 0x50
+#define CFR_DEVREV 0x03
+#define CFR_IDE01INTR 0x04
+#define CFR_DEVID 0x18
+#define CFR_AT_VESA_078h 0x20
+#define CFR_DSA1 0x40
+#define CFR_DSA0 0x80
+
+#define CNTRL 0x51
+#define CNTRL_DIS_RA0 0x40
+#define CNTRL_DIS_RA1 0x80
+#define CNTRL_ENA_2ND 0x08
+
+#define CMDTIM 0x52
+#define ARTTIM0 0x53
+#define DRWTIM0 0x54
+#define ARTTIM1 0x55
+#define DRWTIM1 0x56
+#define ARTTIM23 0x57
+#define ARTTIM23_DIS_RA2 0x04
+#define ARTTIM23_DIS_RA3 0x08
+#define DRWTIM23 0x58
+#define BRST 0x59
+
+/*
+ * Registers and masks for easy access by drive index:
+ */
+static byte prefetch_regs[4] = {CNTRL, CNTRL, ARTTIM23, ARTTIM23};
+static byte prefetch_masks[4] = {CNTRL_DIS_RA0, CNTRL_DIS_RA1, ARTTIM23_DIS_RA2, ARTTIM23_DIS_RA3};
+
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+
+static byte arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
+static byte drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM23, DRWTIM23};
+
+/*
+ * Current cmd640 timing values for each drive.
+ * The defaults for each are the slowest possible timings.
+ */
+static byte setup_counts[4] = {4, 4, 4, 4}; /* Address setup count (in clocks) */
+static byte active_counts[4] = {16, 16, 16, 16}; /* Active count (encoded) */
+static byte recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */
+
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+
+/*
+ * These are initialized to point at the devices we control
+ */
+static ide_hwif_t *cmd_hwif0, *cmd_hwif1;
+static ide_drive_t *cmd_drives[4];
+
+/*
+ * Interface to access cmd640x registers
+ */
+static unsigned int cmd640_key;
+static void (*put_cmd640_reg)(unsigned short reg, byte val);
+static byte (*get_cmd640_reg)(unsigned short reg);
+
+/*
+ * This is read from the CFR reg, and is used in several places.
+ */
+static unsigned int cmd640_chip_version;
+
+/*
+ * The CMD640x chip does not support DWORD config write cycles, but some
+ * of the BIOSes use them to implement the config services.
+ * Therefore, we must use direct IO instead.
+ */
+
+/* PCI method 1 access */
+
+static void put_cmd640_reg_pci1 (unsigned short reg, byte val)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
+ outb_p(val, (reg & 3) | 0xcfc);
+ restore_flags(flags);
+}
+
+static byte get_cmd640_reg_pci1 (unsigned short reg)
+{
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
+ b = inb_p((reg & 3) | 0xcfc);
+ restore_flags(flags);
+ return b;
+}
+
+/* PCI method 2 access (from CMD datasheet) */
+
+static void put_cmd640_reg_pci2 (unsigned short reg, byte val)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(0x10, 0xcf8);
+ outb_p(val, cmd640_key + reg);
+ outb_p(0, 0xcf8);
+ restore_flags(flags);
+}
+
+static byte get_cmd640_reg_pci2 (unsigned short reg)
+{
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(0x10, 0xcf8);
+ b = inb_p(cmd640_key + reg);
+ outb_p(0, 0xcf8);
+ restore_flags(flags);
+ return b;
+}
+
+/* VLB access */
+
+static void put_cmd640_reg_vlb (unsigned short reg, byte val)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(reg, cmd640_key);
+ outb_p(val, cmd640_key + 4);
+ restore_flags(flags);
+}
+
+static byte get_cmd640_reg_vlb (unsigned short reg)
+{
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(reg, cmd640_key);
+ b = inb_p(cmd640_key + 4);
+ restore_flags(flags);
+ return b;
+}
+
+static int match_pci_cmd640_device (void)
+{
+ const byte ven_dev[4] = {0x95, 0x10, 0x40, 0x06};
+ unsigned int i;
+ for (i = 0; i < 4; i++) {
+ if (get_cmd640_reg(i) != ven_dev[i])
+ return 0;
+ }
+#ifdef STUPIDLY_TRUST_BROKEN_PCMD_ENA_BIT
+ if ((get_cmd640_reg(PCMD) & PCMD_ENA) == 0) {
+ printk("ide: cmd640 on PCI disabled by BIOS\n");
+ return 0;
+ }
+#endif /* STUPIDLY_TRUST_BROKEN_PCMD_ENA_BIT */
+ return 1; /* success */
+}
+
+/*
+ * Probe for CMD640x -- pci method 1
+ */
+static int probe_for_cmd640_pci1 (void)
+{
+ get_cmd640_reg = get_cmd640_reg_pci1;
+ put_cmd640_reg = put_cmd640_reg_pci1;
+ for (cmd640_key = 0x80000000; cmd640_key <= 0x8000f800; cmd640_key += 0x800) {
+ if (match_pci_cmd640_device())
+ return 1; /* success */
+ }
+ return 0;
+}
+
+/*
+ * Probe for CMD640x -- pci method 2
+ */
+static int probe_for_cmd640_pci2 (void)
+{
+ get_cmd640_reg = get_cmd640_reg_pci2;
+ put_cmd640_reg = put_cmd640_reg_pci2;
+ for (cmd640_key = 0xc000; cmd640_key <= 0xcf00; cmd640_key += 0x100) {
+ if (match_pci_cmd640_device())
+ return 1; /* success */
+ }
+ return 0;
+}
+
+/*
+ * Probe for CMD640x -- vlb
+ */
+static int probe_for_cmd640_vlb (void)
+{
+ byte b;
+
+ get_cmd640_reg = get_cmd640_reg_vlb;
+ put_cmd640_reg = put_cmd640_reg_vlb;
+ cmd640_key = 0x178;
+ b = get_cmd640_reg(CFR);
+ if (b == 0xff || b == 0x00 || (b & CFR_AT_VESA_078h)) {
+ cmd640_key = 0x78;
+ b = get_cmd640_reg(CFR);
+ if (b == 0xff || b == 0x00 || !(b & CFR_AT_VESA_078h))
+ return 0;
+ }
+ return 1; /* success */
+}
+
+/*
+ * Returns 1 if an IDE interface/drive exists at 0x170,
+ * Returns 0 otherwise.
+ */
+static int secondary_port_responding (void)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ outb_p(0x0a, 0x170 + IDE_SELECT_OFFSET); /* select drive0 */
+ udelay(100);
+ if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x0a) {
+ outb_p(0x1a, 0x170 + IDE_SELECT_OFFSET); /* select drive1 */
+ udelay(100);
+ if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x1a) {
+ restore_flags(flags);
+ return 0; /* nothing responded */
+ }
+ }
+ restore_flags(flags);
+ return 1; /* success */
+}
+
+#ifdef CMD640_DUMP_REGS
+/*
+ * Dump out all cmd640 registers. May be called from ide.c
+ */
+void cmd640_dump_regs (void)
+{
+ unsigned int reg = cmd640_vlb ? 0x50 : 0x00;
+
+ /* Dump current state of chip registers */
+ printk("ide: cmd640 internal register dump:");
+ for (; reg <= 0x59; reg++) {
+ if (!(reg & 0x0f))
+ printk("\n%04x:", reg);
+ printk(" %02x", get_cmd640_reg(reg));
+ }
+ printk("\n");
+}
+#endif
+
+/*
+ * Check whether prefetch is on for a drive,
+ * and initialize the unmask flags for safe operation.
+ */
+static void check_prefetch (unsigned int index)
+{
+ ide_drive_t *drive = cmd_drives[index];
+ byte b = get_cmd640_reg(prefetch_regs[index]);
+
+ if (b & prefetch_masks[index]) { /* is prefetch off? */
+ drive->no_unmask = 0;
+ drive->no_io_32bit = 1;
+ drive->io_32bit = 0;
+ } else {
+#if CMD640_PREFETCH_MASKS
+ drive->no_unmask = 1;
+ drive->unmask = 0;
+#endif
+ drive->no_io_32bit = 0;
+ }
+}
+
+/*
+ * Figure out which devices we control
+ */
+static void setup_device_ptrs (void)
+{
+ unsigned int i;
+
+ cmd_hwif0 = &ide_hwifs[0]; /* default, if not found below */
+ cmd_hwif1 = &ide_hwifs[1]; /* default, if not found below */
+ for (i = 0; i < MAX_HWIFS; i++) {
+ ide_hwif_t *hwif = &ide_hwifs[i];
+ if (hwif->chipset == ide_unknown || hwif->chipset == ide_generic) {
+ if (hwif->io_base == 0x1f0)
+ cmd_hwif0 = hwif;
+ else if (hwif->io_base == 0x170)
+ cmd_hwif1 = hwif;
+ }
+ }
+ cmd_drives[0] = &cmd_hwif0->drives[0];
+ cmd_drives[1] = &cmd_hwif0->drives[1];
+ cmd_drives[2] = &cmd_hwif1->drives[0];
+ cmd_drives[3] = &cmd_hwif1->drives[1];
+}
+
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+
+/*
+ * Sets prefetch mode for a drive.
+ */
+static void set_prefetch_mode (unsigned int index, int mode)
+{
+ ide_drive_t *drive = cmd_drives[index];
+ int reg = prefetch_regs[index];
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ b = get_cmd640_reg(reg);
+ if (mode) { /* want prefetch on? */
+#if CMD640_PREFETCH_MASKS
+ drive->no_unmask = 1;
+ drive->unmask = 0;
+#endif
+ drive->no_io_32bit = 0;
+ b &= ~prefetch_masks[index]; /* enable prefetch */
+ } else {
+ drive->no_unmask = 0;
+ drive->no_io_32bit = 1;
+ drive->io_32bit = 0;
+ b |= prefetch_masks[index]; /* disable prefetch */
+ }
+ put_cmd640_reg(reg, b);
+ restore_flags(flags);
+}
+
+/*
+ * Dump out current drive clocks settings
+ */
+static void display_clocks (unsigned int index)
+{
+ byte active_count, recovery_count;
+
+ active_count = active_counts[index];
+ if (active_count == 1)
+ ++active_count;
+ recovery_count = recovery_counts[index];
+ if (active_count > 3 && recovery_count == 1)
+ ++recovery_count;
+ if (cmd640_chip_version > 1)
+ recovery_count += 1; /* cmd640b uses (count + 1)*/
+ printk(", clocks=%d/%d/%d\n", setup_counts[index], active_count, recovery_count);
+}
+
+/*
+ * Pack active and recovery counts into single byte representation
+ * used by controller
+ */
+inline static byte pack_nibbles (byte upper, byte lower)
+{
+ return ((upper & 0x0f) << 4) | (lower & 0x0f);
+}
+
+/*
+ * This routine retrieves the initial drive timings from the chipset.
+ */
+static void retrieve_drive_counts (unsigned int index)
+{
+ byte b;
+
+ /*
+ * Get the internal setup timing, and convert to clock count
+ */
+ b = get_cmd640_reg(arttim_regs[index]) & ~0x3f;
+ switch (b) {
+ case 0x00: b = 4; break;
+ case 0x80: b = 3; break;
+ case 0x40: b = 2; break;
+ default: b = 5; break;
+ }
+ setup_counts[index] = b;
+
+ /*
+ * Get the active/recovery counts
+ */
+ b = get_cmd640_reg(drwtim_regs[index]);
+ active_counts[index] = (b >> 4) ? (b >> 4) : 0x10;
+ recovery_counts[index] = (b & 0x0f) ? (b & 0x0f) : 0x10;
+}
+
+
+/*
+ * This routine writes the prepared setup/active/recovery counts
+ * for a drive into the cmd640 chipset registers to active them.
+ */
+static void program_drive_counts (unsigned int index)
+{
+ unsigned long flags;
+ byte setup_count = setup_counts[index];
+ byte active_count = active_counts[index];
+ byte recovery_count = recovery_counts[index];
+
+ /*
+ * Set up address setup count and drive read/write timing registers.
+ * Primary interface has individual count/timing registers for
+ * each drive. Secondary interface has one common set of registers,
+ * so we merge the timings, using the slowest value for each timing.
+ */
+ if (index > 1) {
+ unsigned int mate;
+ if (cmd_drives[mate = index ^ 1]->present) {
+ if (setup_count < setup_counts[mate])
+ setup_count = setup_counts[mate];
+ if (active_count < active_counts[mate])
+ active_count = active_counts[mate];
+ if (recovery_count < recovery_counts[mate])
+ recovery_count = recovery_counts[mate];
+ }
+ }
+
+ /*
+ * Convert setup_count to internal chipset representation
+ */
+ switch (setup_count) {
+ case 4: setup_count = 0x00; break;
+ case 3: setup_count = 0x80; break;
+ case 1:
+ case 2: setup_count = 0x40; break;
+ default: setup_count = 0xc0; /* case 5 */
+ }
+
+ /*
+ * Now that everything is ready, program the new timings
+ */
+ save_flags (flags);
+ cli();
+ /*
+ * Program the address_setup clocks into ARTTIM reg,
+ * and then the active/recovery counts into the DRWTIM reg
+ * (this converts counts of 16 into counts of zero -- okay).
+ */
+ setup_count |= get_cmd640_reg(arttim_regs[index]) & 0x3f;
+ put_cmd640_reg(arttim_regs[index], setup_count);
+ put_cmd640_reg(drwtim_regs[index], pack_nibbles(active_count, recovery_count));
+ restore_flags(flags);
+}
+
+/*
+ * Set a specific pio_mode for a drive
+ */
+static void cmd640_set_mode (unsigned int index, byte pio_mode, unsigned int cycle_time)
+{
+ int setup_time, active_time, recovery_time, clock_time;
+ byte setup_count, active_count, recovery_count, recovery_count2, cycle_count;
+ int bus_speed = ide_system_bus_speed();
+
+ if (pio_mode > 5)
+ pio_mode = 5;
+ setup_time = ide_pio_timings[pio_mode].setup_time;
+ active_time = ide_pio_timings[pio_mode].active_time;
+ recovery_time = cycle_time - (setup_time + active_time);
+ clock_time = 1000 / bus_speed;
+ cycle_count = (cycle_time + clock_time - 1) / clock_time;
+
+ setup_count = (setup_time + clock_time - 1) / clock_time;
+
+ active_count = (active_time + clock_time - 1) / clock_time;
+ if (active_count < 2)
+ active_count = 2; /* minimum allowed by cmd640 */
+
+ recovery_count = (recovery_time + clock_time - 1) / clock_time;
+ recovery_count2 = cycle_count - (setup_count + active_count);
+ if (recovery_count2 > recovery_count)
+ recovery_count = recovery_count2;
+ if (recovery_count < 2)
+ recovery_count = 2; /* minimum allowed by cmd640 */
+ if (recovery_count > 17) {
+ active_count += recovery_count - 17;
+ recovery_count = 17;
+ }
+ if (active_count > 16)
+ active_count = 16; /* maximum allowed by cmd640 */
+ if (cmd640_chip_version > 1)
+ recovery_count -= 1; /* cmd640b uses (count + 1)*/
+ if (recovery_count > 16)
+ recovery_count = 16; /* maximum allowed by cmd640 */
+
+ setup_counts[index] = setup_count;
+ active_counts[index] = active_count;
+ recovery_counts[index] = recovery_count;
+
+ /*
+ * In a perfect world, we might set the drive pio mode here
+ * (using WIN_SETFEATURE) before continuing.
+ *
+ * But we do not, because:
+ * 1) this is the wrong place to do it (proper is do_special() in ide.c)
+ * 2) in practice this is rarely, if ever, necessary
+ */
+ program_drive_counts (index);
+}
+
+/*
+ * Drive PIO mode selection:
+ */
+static void cmd640_tune_drive (ide_drive_t *drive, byte mode_wanted)
+{
+ byte b;
+ ide_pio_data_t d;
+ unsigned int index = 0;
+
+ while (drive != cmd_drives[index]) {
+ if (++index > 3) {
+ printk("%s: bad news in cmd640_tune_drive\n", drive->name);
+ return;
+ }
+ }
+ switch (mode_wanted) {
+ case 6: /* set fast-devsel off */
+ case 7: /* set fast-devsel on */
+ mode_wanted &= 1;
+ b = get_cmd640_reg(CNTRL) & ~0x27;
+ if (mode_wanted)
+ b |= 0x27;
+ put_cmd640_reg(CNTRL, b);
+ printk("%s: %sabled cmd640 fast host timing (devsel)\n", drive->name, mode_wanted ? "en" : "dis");
+ return;
+
+ case 8: /* set prefetch off */
+ case 9: /* set prefetch on */
+ mode_wanted &= 1;
+ set_prefetch_mode(index, mode_wanted);
+ printk("%s: %sabled cmd640 prefetch\n", drive->name, mode_wanted ? "en" : "dis");
+ return;
+ }
+
+ (void) ide_get_best_pio_mode (drive, mode_wanted, 5, &d);
+ cmd640_set_mode (index, d.pio_mode, d.cycle_time);
+
+ printk ("%s: selected cmd640 PIO mode%d (%dns) %s/IORDY%s",
+ drive->name,
+ d.pio_mode,
+ d.cycle_time,
+ d.use_iordy ? "w" : "wo",
+ d.overridden ? " (overriding vendor mode)" : "");
+ display_clocks(index);
+}
+
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+
+/*
+ * Probe for a cmd640 chipset, and initialize it if found. Called from ide.c
+ */
+int ide_probe_for_cmd640x (void)
+{
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ int second_port_toggled = 0;
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+ int second_port_cmd640 = 0;
+ const char *bus_type, *port2;
+ unsigned int index;
+ byte b, cfr;
+
+ if (cmd640_vlb && probe_for_cmd640_vlb()) {
+ bus_type = "VLB";
+ } else {
+ cmd640_vlb = 0;
+ if (probe_for_cmd640_pci1())
+ bus_type = "PCI (type1)";
+ else if (probe_for_cmd640_pci2())
+ bus_type = "PCI (type2)";
+ else
+ return 0;
+ }
+ /*
+ * Undocumented magic (there is no 0x5b reg in specs)
+ */
+ put_cmd640_reg(0x5b, 0xbd);
+ if (get_cmd640_reg(0x5b) != 0xbd) {
+ printk("ide: cmd640 init failed: wrong value in reg 0x5b\n");
+ return 0;
+ }
+ put_cmd640_reg(0x5b, 0);
+
+#ifdef CMD640_DUMP_REGS
+ CMD640_DUMP_REGS;
+#endif
+
+ /*
+ * Documented magic begins here
+ */
+ cfr = get_cmd640_reg(CFR);
+ cmd640_chip_version = cfr & CFR_DEVREV;
+ if (cmd640_chip_version == 0) {
+ printk ("ide: bad cmd640 revision: %d\n", cmd640_chip_version);
+ return 0;
+ }
+
+ /*
+ * Initialize data for primary port
+ */
+ setup_device_ptrs ();
+ printk("%s: buggy cmd640%c interface on %s, config=0x%02x\n",
+ cmd_hwif0->name, 'a' + cmd640_chip_version - 1, bus_type, cfr);
+ cmd_hwif0->chipset = ide_cmd640;
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ cmd_hwif0->tuneproc = &cmd640_tune_drive;
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+
+ /*
+ * Ensure compatibility by always using the slowest timings
+ * for access to the drive's command register block,
+ * and reset the prefetch burstsize to default (512 bytes).
+ *
+ * Maybe we need a way to NOT do these on *some* systems?
+ */
+ put_cmd640_reg(CMDTIM, 0);
+ put_cmd640_reg(BRST, 0x40);
+
+ /*
+ * Try to enable the secondary interface, if not already enabled
+ */
+ if (cmd_hwif1->noprobe) {
+ port2 = "not probed";
+ } else {
+ b = get_cmd640_reg(CNTRL);
+ if (secondary_port_responding()) {
+ if ((b & CNTRL_ENA_2ND)) {
+ second_port_cmd640 = 1;
+ port2 = "okay";
+ } else if (cmd640_vlb) {
+ second_port_cmd640 = 1;
+ port2 = "alive";
+ } else
+ port2 = "not cmd640";
+ } else {
+ put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */
+ if (secondary_port_responding()) {
+ second_port_cmd640 = 1;
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ second_port_toggled = 1;
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+ port2 = "enabled";
+ } else {
+ put_cmd640_reg(CNTRL, b); /* restore original setting */
+ port2 = "not responding";
+ }
+ }
+ }
+
+ /*
+ * Initialize data for secondary cmd640 port, if enabled
+ */
+ if (second_port_cmd640) {
+ cmd_hwif0->serialized = 1;
+ cmd_hwif1->serialized = 1;
+ cmd_hwif1->chipset = ide_cmd640;
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ cmd_hwif1->tuneproc = &cmd640_tune_drive;
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+ }
+ printk("%s: %sserialized, secondary interface %s\n", cmd_hwif1->name,
+ cmd_hwif0->serialized ? "" : "not ", port2);
+
+ /*
+ * Establish initial timings/prefetch for all drives.
+ * Do not unnecessarily disturb any prior BIOS setup of these.
+ */
+ for (index = 0; index < (2 + (second_port_cmd640 << 1)); index++) {
+ ide_drive_t *drive = cmd_drives[index];
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ if (drive->autotune || ((index > 1) && second_port_toggled)) {
+ /*
+ * Reset timing to the slowest speed and turn off prefetch.
+ * This way, the drive identify code has a better chance.
+ */
+ setup_counts [index] = 4; /* max possible */
+ active_counts [index] = 16; /* max possible */
+ recovery_counts [index] = 16; /* max possible */
+ program_drive_counts (index);
+ set_prefetch_mode (index, 0);
+ printk("cmd640: drive%d timings/prefetch cleared\n", index);
+ } else {
+ /*
+ * Record timings/prefetch without changing them.
+ * This preserves any prior BIOS setup.
+ */
+ retrieve_drive_counts (index);
+ check_prefetch (index);
+ printk("cmd640: drive%d timings/prefetch(%s) preserved",
+ index, drive->no_io_32bit ? "off" : "on");
+ display_clocks(index);
+ }
+#else
+ /*
+ * Set the drive unmask flags to match the prefetch setting
+ */
+ check_prefetch (index);
+ printk("cmd640: drive%d timings/prefetch(%s) preserved\n",
+ index, drive->no_io_32bit ? "off" : "on");
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+ }
+
+#ifdef CMD640_DUMP_REGS
+ CMD640_DUMP_REGS;
+#endif
+ return 1;
+}
+
diff --git a/linux/src/drivers/block/floppy.c b/linux/src/drivers/block/floppy.c
new file mode 100644
index 0000000..1b96c44
--- /dev/null
+++ b/linux/src/drivers/block/floppy.c
@@ -0,0 +1,4284 @@
+/*
+ * linux/kernel/floppy.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1993, 1994 Alain Knaff
+ */
+/*
+ * 02.12.91 - Changed to static variables to indicate need for reset
+ * and recalibrate. This makes some things easier (output_byte reset
+ * checking etc), and means less interrupt jumping in case of errors,
+ * so the code is hopefully easier to understand.
+ */
+
+/*
+ * This file is certainly a mess. I've tried my best to get it working,
+ * but I don't like programming floppies, and I have only one anyway.
+ * Urgel. I should check for more errors, and do more graceful error
+ * recovery. Seems there are problems with several drives. I've tried to
+ * correct them. No promises.
+ */
+
+/*
+ * As with hd.c, all routines within this file can (and will) be called
+ * by interrupts, so extreme caution is needed. A hardware interrupt
+ * handler may not sleep, or a kernel panic will happen. Thus I cannot
+ * call "floppy-on" directly, but have to set a special timer interrupt
+ * etc.
+ */
+
+/*
+ * 28.02.92 - made track-buffering routines, based on the routines written
+ * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus.
+ */
+
+/*
+ * Automatic floppy-detection and formatting written by Werner Almesberger
+ * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with
+ * the floppy-change signal detection.
+ */
+
+/*
+ * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed
+ * FDC data overrun bug, added some preliminary stuff for vertical
+ * recording support.
+ *
+ * 1992/9/17: Added DMA allocation & DMA functions. -- hhb.
+ *
+ * TODO: Errors are still not counted properly.
+ */
+
+/* 1992/9/20
+ * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl)
+ * modeled after the freeware MS-DOS program fdformat/88 V1.8 by
+ * Christoph H. Hochst\"atter.
+ * I have fixed the shift values to the ones I always use. Maybe a new
+ * ioctl() should be created to be able to modify them.
+ * There is a bug in the driver that makes it impossible to format a
+ * floppy as the first thing after bootup.
+ */
+
+/*
+ * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and
+ * this helped the floppy driver as well. Much cleaner, and still seems to
+ * work.
+ */
+
+/* 1994/6/24 --bbroad-- added the floppy table entries and made
+ * minor modifications to allow 2.88 floppies to be run.
+ */
+
+/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more
+ * disk types.
+ */
+
+/*
+ * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger
+ * format bug fixes, but unfortunately some new bugs too...
+ */
+
+/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write
+ * errors to allow safe writing by specialized programs.
+ */
+
+/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
+ * by defining bit 1 of the "stretch" parameter to mean put sectors on the
+ * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's
+ * drives are "upside-down").
+ */
+
+/*
+ * 1995/8/26 -- Andreas Busse -- added Mips support.
+ */
+
+/*
+ * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent
+ * features to asm/floppy.h.
+ */
+
+
+#define FLOPPY_SANITY_CHECK
+#undef FLOPPY_SILENT_DCL_CLEAR
+
+#define REALLY_SLOW_IO
+
+#define DEBUGT 2
+#define DCL_DEBUG /* debug disk change line */
+
+/* do print messages for unexpected interrupts */
+static int print_unex=1;
+#include <linux/utsname.h>
+#include <linux/module.h>
+
+/* the following is the mask of allowed drives. By default units 2 and
+ * 3 of both floppy controllers are disabled, because switching on the
+ * motor of these drives causes system hangs on some PCI computers. drive
+ * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if
+ * a drive is allowed. */
+static int FLOPPY_IRQ=6;
+static int FLOPPY_DMA=2;
+static int allowed_drive_mask = 0x33;
+
+static int irqdma_allocated = 0;
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/tqueue.h>
+#define FDPATCHES
+#include <linux/fdreg.h>
+
+
+#include <linux/fd.h>
+
+
+#define OLDFDRAWCMD 0x020d /* send a raw command to the FDC */
+
+struct old_floppy_raw_cmd {
+ void *data;
+ long length;
+
+ unsigned char rate;
+ unsigned char flags;
+ unsigned char cmd_count;
+ unsigned char cmd[9];
+ unsigned char reply_count;
+ unsigned char reply[7];
+ int track;
+};
+
+#include <linux/errno.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/segment.h>
+
+static int use_virtual_dma=0; /* virtual DMA for Intel */
+static unsigned short virtual_dma_port=0x3f0;
+void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static int set_dor(int fdc, char mask, char data);
+static inline int __get_order(unsigned long size);
+#include <asm/floppy.h>
+
+
+#define MAJOR_NR FLOPPY_MAJOR
+
+#include <linux/blk.h>
+#include <linux/cdrom.h> /* for the compatibility eject ioctl */
+
+#include <linux/dev/glue/glue.h>
+
+
+#ifndef FLOPPY_MOTOR_MASK
+#define FLOPPY_MOTOR_MASK 0xf0
+#endif
+
+#ifndef fd_get_dma_residue
+#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
+#endif
+
+/* Dma Memory related stuff */
+
+/* Pure 2^n version of get_order */
+static inline int __get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#ifndef fd_dma_mem_free
+#define fd_dma_mem_free(addr, size) free_pages(addr, __get_order(size))
+#endif
+
+#ifndef fd_dma_mem_alloc
+#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,__get_order(size))
+#endif
+
+/* End dma memory related stuff */
+
+static unsigned int fake_change = 0;
+static int initialising=1;
+
+static inline int TYPE(kdev_t x) {
+ return (MINOR(x)>>2) & 0x1f;
+}
+static inline int DRIVE(kdev_t x) {
+ return (MINOR(x)&0x03) | ((MINOR(x)&0x80) >> 5);
+}
+#define ITYPE(x) (((x)>>2) & 0x1f)
+#define TOMINOR(x) ((x & 3) | ((x & 4) << 5))
+#define UNIT(x) ((x) & 0x03) /* drive on fdc */
+#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */
+#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
+ /* reverse mapping from unit and fdc to drive */
+#define DP (&drive_params[current_drive])
+#define DRS (&drive_state[current_drive])
+#define DRWE (&write_errors[current_drive])
+#define FDCS (&fdc_state[fdc])
+#define CLEARF(x) (clear_bit(x##_BIT, &DRS->flags))
+#define SETF(x) (set_bit(x##_BIT, &DRS->flags))
+#define TESTF(x) (test_bit(x##_BIT, &DRS->flags))
+
+#define UDP (&drive_params[drive])
+#define UDRS (&drive_state[drive])
+#define UDRWE (&write_errors[drive])
+#define UFDCS (&fdc_state[FDC(drive)])
+#define UCLEARF(x) (clear_bit(x##_BIT, &UDRS->flags))
+#define USETF(x) (set_bit(x##_BIT, &UDRS->flags))
+#define UTESTF(x) (test_bit(x##_BIT, &UDRS->flags))
+
+#define DPRINT(format, args...) printk(DEVICE_NAME "%d: " format, current_drive , ## args)
+
+#define PH_HEAD(floppy,head) (((((floppy)->stretch & 2) >>1) ^ head) << 2)
+#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
+
+#define CLEARSTRUCT(x) memset((x), 0, sizeof(*(x)))
+
+#define INT_OFF save_flags(flags); cli()
+#define INT_ON restore_flags(flags)
+
+/* read/write */
+#define COMMAND raw_cmd->cmd[0]
+#define DR_SELECT raw_cmd->cmd[1]
+#define TRACK raw_cmd->cmd[2]
+#define HEAD raw_cmd->cmd[3]
+#define SECTOR raw_cmd->cmd[4]
+#define SIZECODE raw_cmd->cmd[5]
+#define SECT_PER_TRACK raw_cmd->cmd[6]
+#define GAP raw_cmd->cmd[7]
+#define SIZECODE2 raw_cmd->cmd[8]
+#define NR_RW 9
+
+/* format */
+#define F_SIZECODE raw_cmd->cmd[2]
+#define F_SECT_PER_TRACK raw_cmd->cmd[3]
+#define F_GAP raw_cmd->cmd[4]
+#define F_FILL raw_cmd->cmd[5]
+#define NR_F 6
+
+/*
+ * Maximum disk size (in kilobytes). This default is used whenever the
+ * current disk size is unknown.
+ * [Now it is rather a minimum]
+ */
+#define MAX_DISK_SIZE 4 /* 3984*/
+
+#define K_64 0x10000 /* 64KB */
+
+/*
+ * globals used by 'result()'
+ */
+#define MAX_REPLIES 16
+static unsigned char reply_buffer[MAX_REPLIES];
+static int inr; /* size of reply buffer, when called from interrupt */
+#define ST0 (reply_buffer[0])
+#define ST1 (reply_buffer[1])
+#define ST2 (reply_buffer[2])
+#define ST3 (reply_buffer[0]) /* result of GETSTATUS */
+#define R_TRACK (reply_buffer[3])
+#define R_HEAD (reply_buffer[4])
+#define R_SECTOR (reply_buffer[5])
+#define R_SIZECODE (reply_buffer[6])
+
+#define SEL_DLY (2*HZ/100)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+/*
+ * this struct defines the different floppy drive types.
+ */
+static struct {
+ struct floppy_drive_params params;
+ const char *name; /* name printed while booting */
+} default_drive_params[]= {
+/* NOTE: the time values in jiffies should be in msec!
+ CMOS drive type
+ | Maximum data rate supported by drive type
+ | | Head load time, msec
+ | | | Head unload time, msec (not used)
+ | | | | Step rate interval, usec
+ | | | | | Time needed for spinup time (jiffies)
+ | | | | | | Timeout for spinning down (jiffies)
+ | | | | | | | Spindown offset (where disk stops)
+ | | | | | | | | Select delay
+ | | | | | | | | | RPS
+ | | | | | | | | | | Max number of tracks
+ | | | | | | | | | | | Interrupt timeout
+ | | | | | | | | | | | | Max nonintlv. sectors
+ | | | | | | | | | | | | | -Max Errors- flags */
+{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" },
+
+{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0,
+ 0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/
+
+{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0,
+ 0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/
+
+{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/
+
+{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/
+
+{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
+ 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/
+
+{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
+ 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/
+/* | --autodetected formats--- | | |
+ * read_track | | Name printed when booting
+ * | Native format
+ * Frequency of disk change checks */
+};
+
+static struct floppy_drive_params drive_params[N_DRIVE];
+static struct floppy_drive_struct drive_state[N_DRIVE];
+static struct floppy_write_errors write_errors[N_DRIVE];
+static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
+
+/*
+ * This struct defines the different floppy types.
+ *
+ * Bit 0 of 'stretch' tells if the tracks need to be doubled for some
+ * types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch'
+ * tells if the disk is in Commodore 1581 format, which means side 0 sectors
+ * are located on side 1 of the disk but with a side 0 ID, and vice-versa.
+ * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the
+ * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
+ * side 0 is on physical side 0 (but with the misnamed sector IDs).
+ * 'stretch' should probably be renamed to something more general, like
+ * 'options'. Other parameters should be self-explanatory (see also
+ * setfdprm(8)).
+ */
+static struct floppy_struct floppy_type[32] = {
+ { 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */
+ { 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */
+ { 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */
+ { 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */
+ { 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */
+ { 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */
+ { 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */
+ { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */
+ { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */
+ { 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120"}, /* 9 3.12MB 3.5" */
+
+ { 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */
+ { 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */
+ { 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */
+ { 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */
+ { 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */
+ { 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */
+ { 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */
+ { 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */
+ { 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */
+ { 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */
+
+ { 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */
+ { 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */
+ { 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */
+ { 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */
+ { 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */
+ { 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */
+ { 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */
+ { 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */
+ { 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */
+
+ { 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */
+ { 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */
+ { 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
+};
+
+#define NUMBER(x) (sizeof(x) / sizeof(*(x)))
+#define SECTSIZE (_FD_SECTSIZE(*floppy))
+
+/* Auto-detection: Disk type used until the next media change occurs. */
+static struct floppy_struct *current_type[N_DRIVE] = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+/*
+ * User-provided type information. current_type points to
+ * the respective entry of this array.
+ */
+static struct floppy_struct user_params[N_DRIVE];
+
+static int floppy_sizes[256];
+static int floppy_blocksizes[256] = { 0, };
+
+/*
+ * The driver is trying to determine the correct media format
+ * while probing is set. rw_interrupt() clears it after a
+ * successful access.
+ */
+static int probing = 0;
+
+/* Synchronization of FDC access. */
+#define FD_COMMAND_NONE -1
+#define FD_COMMAND_ERROR 2
+#define FD_COMMAND_OKAY 3
+
+static volatile int command_status = FD_COMMAND_NONE, fdc_busy = 0;
+static struct wait_queue *fdc_wait = NULL, *command_done = NULL;
+#define NO_SIGNAL (!(current->signal & ~current->blocked) || !interruptible)
+#define CALL(x) if ((x) == -EINTR) return -EINTR
+#define ECALL(x) if ((ret = (x))) return ret;
+#define _WAIT(x,i) CALL(ret=wait_til_done((x),i))
+#define WAIT(x) _WAIT((x),interruptible)
+#define IWAIT(x) _WAIT((x),1)
+
+/* Errors during formatting are counted here. */
+static int format_errors;
+
+/* Format request descriptor. */
+static struct format_descr format_req;
+
+/*
+ * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps
+ * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc),
+ * H is head unload time (1=16ms, 2=32ms, etc)
+ */
+
+/*
+ * Track buffer
+ * Because these are written to by the DMA controller, they must
+ * not contain a 64k byte boundary crossing, or data will be
+ * corrupted/lost.
+ */
+static char *floppy_track_buffer=0;
+static int max_buffer_sectors=0;
+
+static int *errors;
+typedef void (*done_f)(int);
+static struct cont_t {
+ void (*interrupt)(void); /* this is called after the interrupt of the
+ * main command */
+ void (*redo)(void); /* this is called to retry the operation */
+ void (*error)(void); /* this is called to tally an error */
+ done_f done; /* this is called to say if the operation has
+ * succeeded/failed */
+} *cont=NULL;
+
+static void floppy_ready(void);
+static void floppy_start(void);
+static void process_fd_request(void);
+static void recalibrate_floppy(void);
+static void floppy_shutdown(void);
+
+static int floppy_grab_irq_and_dma(void);
+static void floppy_release_irq_and_dma(void);
+
+/*
+ * The "reset" variable should be tested whenever an interrupt is scheduled,
+ * after the commands have been sent. This is to ensure that the driver doesn't
+ * get wedged when the interrupt doesn't come because of a failed command.
+ * reset doesn't need to be tested before sending commands, because
+ * output_byte is automatically disabled when reset is set.
+ */
+#define CHECK_RESET { if (FDCS->reset){ reset_fdc(); return; } }
+static void reset_fdc(void);
+
+/*
+ * These are global variables, as that's the easiest way to give
+ * information to interrupts. They are the data used for the current
+ * request.
+ */
+#define NO_TRACK -1
+#define NEED_1_RECAL -2
+#define NEED_2_RECAL -3
+
+/* */
+static int usage_count = 0;
+
+
+/* buffer related variables */
+static int buffer_track = -1;
+static int buffer_drive = -1;
+static int buffer_min = -1;
+static int buffer_max = -1;
+
+/* fdc related variables, should end up in a struct */
+static struct floppy_fdc_state fdc_state[N_FDC];
+static int fdc; /* current fdc */
+
+static struct floppy_struct *_floppy = floppy_type;
+static unsigned char current_drive = 0;
+static long current_count_sectors = 0;
+static unsigned char sector_t; /* sector in track */
+
+#ifndef fd_eject
+#define fd_eject(x) -EINVAL
+#endif
+
+
+#ifdef DEBUGT
+static long unsigned debugtimer;
+#endif
+
+/*
+ * Debugging
+ * =========
+ */
+static inline void set_debugt(void)
+{
+#ifdef DEBUGT
+ debugtimer = jiffies;
+#endif
+}
+
+static inline void debugt(const char *message)
+{
+#ifdef DEBUGT
+ if (DP->flags & DEBUGT)
+ printk("%s dtime=%lu\n", message, jiffies-debugtimer);
+#endif
+}
+
+typedef void (*timeout_fn)(unsigned long);
+static struct timer_list fd_timeout ={ NULL, NULL, 0, 0,
+ (timeout_fn) floppy_shutdown };
+
+static const char *timeout_message;
+
+#ifdef FLOPPY_SANITY_CHECK
+static void is_alive(const char *message)
+{
+ /* this routine checks whether the floppy driver is "alive" */
+ if (fdc_busy && command_status < 2 && !fd_timeout.prev){
+ DPRINT("timeout handler died: %s\n",message);
+ }
+}
+#endif
+
+#ifdef FLOPPY_SANITY_CHECK
+
+#define OLOGSIZE 20
+
+static void (*lasthandler)(void) = NULL;
+static int interruptjiffies=0;
+static int resultjiffies=0;
+static int resultsize=0;
+static int lastredo=0;
+
+static struct output_log {
+ unsigned char data;
+ unsigned char status;
+ unsigned long jiffies;
+} output_log[OLOGSIZE];
+
+static int output_log_pos=0;
+#endif
+
+#define CURRENTD -1
+#define MAXTIMEOUT -2
+
+static void reschedule_timeout(int drive, const char *message, int marg)
+{
+ if (drive == CURRENTD)
+ drive = current_drive;
+ del_timer(&fd_timeout);
+ if (drive < 0 || drive > N_DRIVE) {
+ fd_timeout.expires = jiffies + 20*HZ;
+ drive=0;
+ } else
+ fd_timeout.expires = jiffies + UDP->timeout;
+ add_timer(&fd_timeout);
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("reschedule timeout ");
+ printk(message, marg);
+ printk("\n");
+ }
+ timeout_message = message;
+}
+
+static int maximum(int a, int b)
+{
+ if(a > b)
+ return a;
+ else
+ return b;
+}
+#define INFBOUND(a,b) (a)=maximum((a),(b));
+
+static int minimum(int a, int b)
+{
+ if(a < b)
+ return a;
+ else
+ return b;
+}
+#define SUPBOUND(a,b) (a)=minimum((a),(b));
+
+
+/*
+ * Bottom half floppy driver.
+ * ==========================
+ *
+ * This part of the file contains the code talking directly to the hardware,
+ * and also the main service loop (seek-configure-spinup-command)
+ */
+
+/*
+ * disk change.
+ * This routine is responsible for maintaining the FD_DISK_CHANGE flag,
+ * and the last_checked date.
+ *
+ * last_checked is the date of the last check which showed 'no disk change'
+ * FD_DISK_CHANGE is set under two conditions:
+ * 1. The floppy has been changed after some i/o to that floppy already
+ * took place.
+ * 2. No floppy disk is in the drive. This is done in order to ensure that
+ * requests are quickly flushed in case there is no disk in the drive. It
+ * follows that FD_DISK_CHANGE can only be cleared if there is a disk in
+ * the drive.
+ *
+ * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet.
+ * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on
+ * each seek. If a disk is present, the disk change line should also be
+ * cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk
+ * change line is set, this means either that no disk is in the drive, or
+ * that it has been removed since the last seek.
+ *
+ * This means that we really have a third possibility too:
+ * The floppy has been changed after the last seek.
+ */
+
+static int disk_change(int drive)
+{
+ int fdc=FDC(drive);
+#ifdef FLOPPY_SANITY_CHECK
+ if (jiffies - UDRS->select_date < UDP->select_delay)
+ DPRINT("WARNING disk change called early\n");
+ if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
+ (FDCS->dor & 3) != UNIT(drive) ||
+ fdc != FDC(drive)){
+ DPRINT("probing disk change on unselected drive\n");
+ DPRINT("drive=%d fdc=%d dor=%x\n",drive, FDC(drive),
+ FDCS->dor);
+ }
+#endif
+
+#ifdef DCL_DEBUG
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("checking disk change line for drive %d\n",drive);
+ DPRINT("jiffies=%ld\n", jiffies);
+ DPRINT("disk change line=%x\n",fd_inb(FD_DIR)&0x80);
+ DPRINT("flags=%x\n",UDRS->flags);
+ }
+#endif
+ if (UDP->flags & FD_BROKEN_DCL)
+ return UTESTF(FD_DISK_CHANGED);
+ if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80){
+ USETF(FD_VERIFY); /* verify write protection */
+ if (UDRS->maxblock){
+ /* mark it changed */
+ USETF(FD_DISK_CHANGED);
+ }
+
+ /* invalidate its geometry */
+ if (UDRS->keep_data >= 0) {
+ if ((UDP->flags & FTD_MSG) &&
+ current_type[drive] != NULL)
+ DPRINT("Disk type is undefined after "
+ "disk change\n");
+ current_type[drive] = NULL;
+ floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE;
+ }
+
+ /*USETF(FD_DISK_NEWCHANGE);*/
+ return 1;
+ } else {
+ UDRS->last_checked=jiffies;
+ UCLEARF(FD_DISK_NEWCHANGE);
+ }
+ return 0;
+}
+
+static inline int is_selected(int dor, int unit)
+{
+ return ((dor & (0x10 << unit)) && (dor &3) == unit);
+}
+
+static int set_dor(int fdc, char mask, char data)
+{
+ register unsigned char drive, unit, newdor,olddor;
+
+ if (FDCS->address == -1)
+ return -1;
+
+ olddor = FDCS->dor;
+ newdor = (olddor & mask) | data;
+ if (newdor != olddor){
+ unit = olddor & 0x3;
+ if (is_selected(olddor, unit) && !is_selected(newdor,unit)){
+ drive = REVDRIVE(fdc,unit);
+#ifdef DCL_DEBUG
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("calling disk change from set_dor\n");
+ }
+#endif
+ disk_change(drive);
+ }
+ FDCS->dor = newdor;
+ fd_outb(newdor, FD_DOR);
+
+ unit = newdor & 0x3;
+ if (!is_selected(olddor, unit) && is_selected(newdor,unit)){
+ drive = REVDRIVE(fdc,unit);
+ UDRS->select_date = jiffies;
+ }
+ }
+
+ /* FIXME: we should be more graceful here */
+
+ if (newdor & FLOPPY_MOTOR_MASK)
+ floppy_grab_irq_and_dma();
+ if (olddor & FLOPPY_MOTOR_MASK)
+ floppy_release_irq_and_dma();
+ return olddor;
+}
+
+static void twaddle(void)
+{
+ if (DP->select_delay)
+ return;
+ fd_outb(FDCS->dor & ~(0x10<<UNIT(current_drive)),FD_DOR);
+ fd_outb(FDCS->dor, FD_DOR);
+ DRS->select_date = jiffies;
+}
+
+/* reset all driver information about the current fdc. This is needed after
+ * a reset, and after a raw command. */
+static void reset_fdc_info(int mode)
+{
+ int drive;
+
+ FDCS->spec1 = FDCS->spec2 = -1;
+ FDCS->need_configure = 1;
+ FDCS->perp_mode = 1;
+ FDCS->rawcmd = 0;
+ for (drive = 0; drive < N_DRIVE; drive++)
+ if (FDC(drive) == fdc &&
+ (mode || UDRS->track != NEED_1_RECAL))
+ UDRS->track = NEED_2_RECAL;
+}
+
+/* selects the fdc and drive, and enables the fdc's input/dma. */
+static void set_fdc(int drive)
+{
+ if (drive >= 0 && drive < N_DRIVE){
+ fdc = FDC(drive);
+ current_drive = drive;
+ }
+ if (fdc != 1 && fdc != 0) {
+ printk("bad fdc value\n");
+ return;
+ }
+ set_dor(fdc,~0,8);
+#if N_FDC > 1
+ set_dor(1-fdc, ~8, 0);
+#endif
+ if (FDCS->rawcmd == 2)
+ reset_fdc_info(1);
+ if (fd_inb(FD_STATUS) != STATUS_READY)
+ FDCS->reset = 1;
+}
+
+/* locks the driver */
+static int lock_fdc(int drive, int interruptible)
+{
+ unsigned long flags;
+
+ if (!usage_count){
+ printk(KERN_ERR "trying to lock fdc while usage count=0\n");
+ return -1;
+ }
+ if(floppy_grab_irq_and_dma()==-1)
+ return -EBUSY;
+ INT_OFF;
+ while (fdc_busy && NO_SIGNAL)
+ interruptible_sleep_on(&fdc_wait);
+ if (fdc_busy){
+ INT_ON;
+ return -EINTR;
+ }
+ fdc_busy = 1;
+ INT_ON;
+ command_status = FD_COMMAND_NONE;
+ reschedule_timeout(drive, "lock fdc", 0);
+ set_fdc(drive);
+ return 0;
+}
+
+#define LOCK_FDC(drive,interruptible) \
+if (lock_fdc(drive,interruptible)) return -EINTR;
+
+
+/* unlocks the driver */
+static inline void unlock_fdc(void)
+{
+ raw_cmd = 0;
+ if (!fdc_busy)
+ DPRINT("FDC access conflict!\n");
+
+ if (DEVICE_INTR)
+ DPRINT("device interrupt still active at FDC release: %p!\n",
+ DEVICE_INTR);
+ command_status = FD_COMMAND_NONE;
+ del_timer(&fd_timeout);
+ cont = NULL;
+ fdc_busy = 0;
+ floppy_release_irq_and_dma();
+ wake_up(&fdc_wait);
+}
+
+/* switches the motor off after a given timeout */
+static void motor_off_callback(unsigned long nr)
+{
+ unsigned char mask = ~(0x10 << UNIT(nr));
+
+ set_dor(FDC(nr), mask, 0);
+}
+
+static struct timer_list motor_off_timer[N_DRIVE] = {
+ { NULL, NULL, 0, 0, motor_off_callback },
+ { NULL, NULL, 0, 1, motor_off_callback },
+ { NULL, NULL, 0, 2, motor_off_callback },
+ { NULL, NULL, 0, 3, motor_off_callback },
+ { NULL, NULL, 0, 4, motor_off_callback },
+ { NULL, NULL, 0, 5, motor_off_callback },
+ { NULL, NULL, 0, 6, motor_off_callback },
+ { NULL, NULL, 0, 7, motor_off_callback }
+};
+
+/* schedules motor off */
+static void floppy_off(unsigned int drive)
+{
+ unsigned long volatile delta;
+ register int fdc=FDC(drive);
+
+ if (!(FDCS->dor & (0x10 << UNIT(drive))))
+ return;
+
+ del_timer(motor_off_timer+drive);
+
+ /* make spindle stop in a position which minimizes spinup time
+ * next time */
+ if (UDP->rps){
+ delta = jiffies - UDRS->first_read_date + HZ -
+ UDP->spindown_offset;
+ delta = ((delta * UDP->rps) % HZ) / UDP->rps;
+ motor_off_timer[drive].expires = jiffies + UDP->spindown - delta;
+ }
+ add_timer(motor_off_timer+drive);
+}
+
+/*
+ * cycle through all N_DRIVE floppy drives, for disk change testing.
+ * stopping at current drive. This is done before any long operation, to
+ * be sure to have up to date disk change information.
+ */
+static void scandrives(void)
+{
+ int i, drive, saved_drive;
+
+ if (DP->select_delay)
+ return;
+
+ saved_drive = current_drive;
+ for (i=0; i < N_DRIVE; i++){
+ drive = (saved_drive + i + 1) % N_DRIVE;
+ if (UDRS->fd_ref == 0 || UDP->select_delay != 0)
+ continue; /* skip closed drives */
+ set_fdc(drive);
+ if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
+ (0x10 << UNIT(drive))))
+ /* switch the motor off again, if it was off to
+ * begin with */
+ set_dor(fdc, ~(0x10 << UNIT(drive)), 0);
+ }
+ set_fdc(saved_drive);
+}
+
+static void empty(void)
+{
+}
+
+static struct tq_struct floppy_tq =
+{ 0, 0, 0, 0 };
+
+static struct timer_list fd_timer ={ NULL, NULL, 0, 0, 0 };
+
+static void cancel_activity(void)
+{
+ CLEAR_INTR;
+ floppy_tq.routine = (void *)(void *) empty;
+ del_timer(&fd_timer);
+}
+
+/* this function makes sure that the disk stays in the drive during the
+ * transfer */
+static void fd_watchdog(void)
+{
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from watchdog\n");
+ }
+#endif
+
+ if (disk_change(current_drive)){
+ DPRINT("disk removed during i/o\n");
+ cancel_activity();
+ cont->done(0);
+ reset_fdc();
+ } else {
+ del_timer(&fd_timer);
+ fd_timer.function = (timeout_fn) fd_watchdog;
+ fd_timer.expires = jiffies + HZ / 10;
+ add_timer(&fd_timer);
+ }
+}
+
+static void main_command_interrupt(void)
+{
+ del_timer(&fd_timer);
+ cont->interrupt();
+}
+
+/* waits for a delay (spinup or select) to pass */
+static int wait_for_completion(int delay, timeout_fn function)
+{
+ if (FDCS->reset){
+ reset_fdc(); /* do the reset during sleep to win time
+ * if we don't need to sleep, it's a good
+ * occasion anyways */
+ return 1;
+ }
+
+ if ((signed) (jiffies - delay) < 0){
+ del_timer(&fd_timer);
+ fd_timer.function = function;
+ fd_timer.expires = delay;
+ add_timer(&fd_timer);
+ return 1;
+ }
+ return 0;
+}
+
+static int hlt_disabled=0;
+static void floppy_disable_hlt(void)
+{
+ unsigned long flags;
+
+ INT_OFF;
+ if (!hlt_disabled){
+ hlt_disabled=1;
+#ifdef HAVE_DISABLE_HLT
+ disable_hlt();
+#endif
+ }
+ INT_ON;
+}
+
+static void floppy_enable_hlt(void)
+{
+ unsigned long flags;
+
+ INT_OFF;
+ if (hlt_disabled){
+ hlt_disabled=0;
+#ifdef HAVE_DISABLE_HLT
+ enable_hlt();
+#endif
+ }
+ INT_ON;
+}
+
+
+static void setup_DMA(void)
+{
+ unsigned long flags;
+
+#ifdef FLOPPY_SANITY_CHECK
+ if (raw_cmd->length == 0){
+ int i;
+
+ printk("zero dma transfer size:");
+ for (i=0; i < raw_cmd->cmd_count; i++)
+ printk("%x,", raw_cmd->cmd[i]);
+ printk("\n");
+ cont->done(0);
+ FDCS->reset = 1;
+ return;
+ }
+ if ((long) raw_cmd->kernel_data % 512){
+ printk("non aligned address: %p\n", raw_cmd->kernel_data);
+ cont->done(0);
+ FDCS->reset=1;
+ return;
+ }
+ if (CROSS_64KB(raw_cmd->kernel_data, raw_cmd->length)) {
+ printk("DMA crossing 64-K boundary %p-%p\n",
+ raw_cmd->kernel_data,
+ raw_cmd->kernel_data + raw_cmd->length);
+ cont->done(0);
+ FDCS->reset=1;
+ return;
+ }
+#endif
+ INT_OFF;
+ fd_disable_dma();
+ fd_clear_dma_ff();
+ fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length);
+ fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ)?
+ DMA_MODE_READ : DMA_MODE_WRITE);
+ fd_set_dma_addr(virt_to_bus(raw_cmd->kernel_data));
+ fd_set_dma_count(raw_cmd->length);
+ virtual_dma_port = FDCS->address;
+ fd_enable_dma();
+ INT_ON;
+ floppy_disable_hlt();
+}
+
+void show_floppy(void);
+
+/* waits until the fdc becomes ready */
+static int wait_til_ready(void)
+{
+ int counter, status;
+ if(FDCS->reset)
+ return -1;
+ for (counter = 0; counter < 10000; counter++) {
+ status = fd_inb(FD_STATUS);
+ if (status & STATUS_READY)
+ return status;
+ }
+ if (!initialising) {
+ DPRINT("Getstatus times out (%x) on fdc %d\n",
+ status, fdc);
+ show_floppy();
+ }
+ FDCS->reset = 1;
+ return -1;
+}
+
+/* sends a command byte to the fdc */
+static int output_byte(char byte)
+{
+ int status;
+
+ if ((status = wait_til_ready()) < 0)
+ return -1;
+ if ((status & (STATUS_READY|STATUS_DIR|STATUS_DMA)) == STATUS_READY){
+ fd_outb(byte,FD_DATA);
+#ifdef FLOPPY_SANITY_CHECK
+ output_log[output_log_pos].data = byte;
+ output_log[output_log_pos].status = status;
+ output_log[output_log_pos].jiffies = jiffies;
+ output_log_pos = (output_log_pos + 1) % OLOGSIZE;
+#endif
+ return 0;
+ }
+ FDCS->reset = 1;
+ if (!initialising) {
+ DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
+ byte, fdc, status);
+ show_floppy();
+ }
+ return -1;
+}
+#define LAST_OUT(x) if (output_byte(x)<0){ reset_fdc();return;}
+
+/* gets the response from the fdc */
+static int result(void)
+{
+ int i, status;
+
+ for(i=0; i < MAX_REPLIES; i++) {
+ if ((status = wait_til_ready()) < 0)
+ break;
+ status &= STATUS_DIR|STATUS_READY|STATUS_BUSY|STATUS_DMA;
+ if ((status & ~STATUS_BUSY) == STATUS_READY){
+#ifdef FLOPPY_SANITY_CHECK
+ resultjiffies = jiffies;
+ resultsize = i;
+#endif
+ return i;
+ }
+ if (status == (STATUS_DIR|STATUS_READY|STATUS_BUSY))
+ reply_buffer[i] = fd_inb(FD_DATA);
+ else
+ break;
+ }
+ if(!initialising) {
+ DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
+ fdc, status, i);
+ show_floppy();
+ }
+ FDCS->reset = 1;
+ return -1;
+}
+
+#define MORE_OUTPUT -2
+/* does the fdc need more output? */
+static int need_more_output(void)
+{
+ int status;
+ if( (status = wait_til_ready()) < 0)
+ return -1;
+ if ((status & (STATUS_READY|STATUS_DIR|STATUS_DMA)) == STATUS_READY)
+ return MORE_OUTPUT;
+ return result();
+}
+
+/* Set perpendicular mode as required, based on data rate, if supported.
+ * 82077 Now tested. 1Mbps data rate only possible with 82077-1.
+ */
+static inline void perpendicular_mode(void)
+{
+ unsigned char perp_mode;
+
+ if (raw_cmd->rate & 0x40){
+ switch(raw_cmd->rate & 3){
+ case 0:
+ perp_mode=2;
+ break;
+ case 3:
+ perp_mode=3;
+ break;
+ default:
+ DPRINT("Invalid data rate for perpendicular mode!\n");
+ cont->done(0);
+ FDCS->reset = 1; /* convenient way to return to
+ * redo without to much hassle (deep
+ * stack et al. */
+ return;
+ }
+ } else
+ perp_mode = 0;
+
+ if (FDCS->perp_mode == perp_mode)
+ return;
+ if (FDCS->version >= FDC_82077_ORIG) {
+ output_byte(FD_PERPENDICULAR);
+ output_byte(perp_mode);
+ FDCS->perp_mode = perp_mode;
+ } else if (perp_mode) {
+ DPRINT("perpendicular mode not supported by this FDC.\n");
+ }
+} /* perpendicular_mode */
+
+static int fifo_depth = 0xa;
+static int no_fifo = 0;
+
+static int fdc_configure(void)
+{
+ /* Turn on FIFO */
+ output_byte(FD_CONFIGURE);
+ if(need_more_output() != MORE_OUTPUT)
+ return 0;
+ output_byte(0);
+ output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
+ output_byte(0); /* pre-compensation from track
+ 0 upwards */
+ return 1;
+}
+
+#define NOMINAL_DTR 500
+
+/* Issue a "SPECIFY" command to set the step rate time, head unload time,
+ * head load time, and DMA disable flag to values needed by floppy.
+ *
+ * The value "dtr" is the data transfer rate in Kbps. It is needed
+ * to account for the data rate-based scaling done by the 82072 and 82077
+ * FDC types. This parameter is ignored for other types of FDCs (i.e.
+ * 8272a).
+ *
+ * Note that changing the data transfer rate has a (probably deleterious)
+ * effect on the parameters subject to scaling for 82072/82077 FDCs, so
+ * fdc_specify is called again after each data transfer rate
+ * change.
+ *
+ * srt: 1000 to 16000 in microseconds
+ * hut: 16 to 240 milliseconds
+ * hlt: 2 to 254 milliseconds
+ *
+ * These values are rounded up to the next highest available delay time.
+ */
+static void fdc_specify(void)
+{
+ unsigned char spec1, spec2;
+ int srt, hlt, hut;
+ unsigned long dtr = NOMINAL_DTR;
+ unsigned long scale_dtr = NOMINAL_DTR;
+ int hlt_max_code = 0x7f;
+ int hut_max_code = 0xf;
+
+ if (FDCS->need_configure && FDCS->version >= FDC_82072A) {
+ fdc_configure();
+ FDCS->need_configure = 0;
+ /*DPRINT("FIFO enabled\n");*/
+ }
+
+ switch (raw_cmd->rate & 0x03) {
+ case 3:
+ dtr = 1000;
+ break;
+ case 1:
+ dtr = 300;
+ if (FDCS->version >= FDC_82078) {
+ /* chose the default rate table, not the one
+ * where 1 = 2 Mbps */
+ output_byte(FD_DRIVESPEC);
+ if(need_more_output() == MORE_OUTPUT) {
+ output_byte(UNIT(current_drive));
+ output_byte(0xc0);
+ }
+ }
+ break;
+ case 2:
+ dtr = 250;
+ break;
+ }
+
+ if (FDCS->version >= FDC_82072) {
+ scale_dtr = dtr;
+ hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
+ hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
+ }
+
+ /* Convert step rate from microseconds to milliseconds and 4 bits */
+ srt = 16 - (DP->srt*scale_dtr/1000 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ SUPBOUND(srt, 0xf);
+ INFBOUND(srt, 0);
+
+ hlt = (DP->hlt*scale_dtr/2 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ if (hlt < 0x01)
+ hlt = 0x01;
+ else if (hlt > 0x7f)
+ hlt = hlt_max_code;
+
+ hut = (DP->hut*scale_dtr/16 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ if (hut < 0x1)
+ hut = 0x1;
+ else if (hut > 0xf)
+ hut = hut_max_code;
+
+ spec1 = (srt << 4) | hut;
+ spec2 = (hlt << 1) | (use_virtual_dma & 1);
+
+ /* If these parameters did not change, just return with success */
+ if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) {
+ /* Go ahead and set spec1 and spec2 */
+ output_byte(FD_SPECIFY);
+ output_byte(FDCS->spec1 = spec1);
+ output_byte(FDCS->spec2 = spec2);
+ }
+} /* fdc_specify */
+
+/* Set the FDC's data transfer rate on behalf of the specified drive.
+ * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue
+ * of the specify command (i.e. using the fdc_specify function).
+ */
+static int fdc_dtr(void)
+{
+ /* If data rate not already set to desired value, set it. */
+ if ((raw_cmd->rate & 3) == FDCS->dtr)
+ return 0;
+
+ /* Set dtr */
+ fd_outb(raw_cmd->rate & 3, FD_DCR);
+
+ /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
+ * need a stabilization period of several milliseconds to be
+ * enforced after data rate changes before R/W operations.
+ * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
+ */
+ FDCS->dtr = raw_cmd->rate & 3;
+ return(wait_for_completion(jiffies+2*HZ/100,
+ (timeout_fn) floppy_ready));
+} /* fdc_dtr */
+
+static void tell_sector(void)
+{
+ printk(": track %d, head %d, sector %d, size %d",
+ R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE);
+} /* tell_sector */
+
+
+/*
+ * OK, this error interpreting routine is called after a
+ * DMA read/write has succeeded
+ * or failed, so we check the results, and copy any buffers.
+ * hhb: Added better error reporting.
+ * ak: Made this into a separate routine.
+ */
+static int interpret_errors(void)
+{
+ char bad;
+
+ if (inr!=7) {
+ DPRINT("-- FDC reply error");
+ FDCS->reset = 1;
+ return 1;
+ }
+
+ /* check IC to find cause of interrupt */
+ switch (ST0 & ST0_INTR) {
+ case 0x40: /* error occurred during command execution */
+ if (ST1 & ST1_EOC)
+ return 0; /* occurs with pseudo-DMA */
+ bad = 1;
+ if (ST1 & ST1_WP) {
+ DPRINT("Drive is write protected\n");
+ CLEARF(FD_DISK_WRITABLE);
+ cont->done(0);
+ bad = 2;
+ } else if (ST1 & ST1_ND) {
+ SETF(FD_NEED_TWADDLE);
+ } else if (ST1 & ST1_OR) {
+ if (DP->flags & FTD_MSG)
+ DPRINT("Over/Underrun - retrying\n");
+ bad = 0;
+ }else if (*errors >= DP->max_errors.reporting){
+ DPRINT("");
+ if (ST0 & ST0_ECE) {
+ printk("Recalibrate failed!");
+ } else if (ST2 & ST2_CRC) {
+ printk("data CRC error");
+ tell_sector();
+ } else if (ST1 & ST1_CRC) {
+ printk("CRC error");
+ tell_sector();
+ } else if ((ST1 & (ST1_MAM|ST1_ND)) || (ST2 & ST2_MAM)) {
+ if (!probing) {
+ printk("sector not found");
+ tell_sector();
+ } else
+ printk("probe failed...");
+ } else if (ST2 & ST2_WC) { /* seek error */
+ printk("wrong cylinder");
+ } else if (ST2 & ST2_BC) { /* cylinder marked as bad */
+ printk("bad cylinder");
+ } else {
+ printk("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x", ST0, ST1, ST2);
+ tell_sector();
+ }
+ printk("\n");
+
+ }
+ if (ST2 & ST2_WC || ST2 & ST2_BC)
+ /* wrong cylinder => recal */
+ DRS->track = NEED_2_RECAL;
+ return bad;
+ case 0x80: /* invalid command given */
+ DPRINT("Invalid FDC command given!\n");
+ cont->done(0);
+ return 2;
+ case 0xc0:
+ DPRINT("Abnormal termination caused by polling\n");
+ cont->error();
+ return 2;
+ default: /* (0) Normal command termination */
+ return 0;
+ }
+}
+
+/*
+ * This routine is called when everything should be correctly set up
+ * for the transfer (i.e. floppy motor is on, the correct floppy is
+ * selected, and the head is sitting on the right track).
+ */
+static void setup_rw_floppy(void)
+{
+ int i,ready_date,r, flags,dflags;
+ timeout_fn function;
+
+ flags = raw_cmd->flags;
+ if (flags & (FD_RAW_READ | FD_RAW_WRITE))
+ flags |= FD_RAW_INTR;
+
+ if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)){
+ ready_date = DRS->spinup_date + DP->spinup;
+ /* If spinup will take a long time, rerun scandrives
+ * again just before spinup completion. Beware that
+ * after scandrives, we must again wait for selection.
+ */
+ if ((signed) (ready_date - jiffies) > DP->select_delay){
+ ready_date -= DP->select_delay;
+ function = (timeout_fn) floppy_start;
+ } else
+ function = (timeout_fn) setup_rw_floppy;
+
+ /* wait until the floppy is spinning fast enough */
+ if (wait_for_completion(ready_date,function))
+ return;
+ }
+ dflags = DRS->flags;
+
+ if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
+ setup_DMA();
+
+ if (flags & FD_RAW_INTR)
+ SET_INTR(main_command_interrupt);
+
+ r=0;
+ for (i=0; i< raw_cmd->cmd_count; i++)
+ r|=output_byte(raw_cmd->cmd[i]);
+
+#ifdef DEBUGT
+ debugt("rw_command: ");
+#endif
+ if (r){
+ cont->error();
+ reset_fdc();
+ return;
+ }
+
+ if (!(flags & FD_RAW_INTR)){
+ inr = result();
+ cont->interrupt();
+ } else if (flags & FD_RAW_NEED_DISK)
+ fd_watchdog();
+}
+
+static int blind_seek;
+
+/*
+ * This is the routine called after every seek (or recalibrate) interrupt
+ * from the floppy controller.
+ */
+static void seek_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("seek interrupt:");
+#endif
+ if (inr != 2 || (ST0 & 0xF8) != 0x20) {
+ DPRINT("seek failed\n");
+ DRS->track = NEED_2_RECAL;
+ cont->error();
+ cont->redo();
+ return;
+ }
+ if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek){
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("clearing NEWCHANGE flag because of effective seek\n");
+ DPRINT("jiffies=%ld\n", jiffies);
+ }
+#endif
+ CLEARF(FD_DISK_NEWCHANGE); /* effective seek */
+ DRS->select_date = jiffies;
+ }
+ DRS->track = ST1;
+ floppy_ready();
+}
+
+static void check_wp(void)
+{
+ if (TESTF(FD_VERIFY)) {
+ /* check write protection */
+ output_byte(FD_GETSTATUS);
+ output_byte(UNIT(current_drive));
+ if (result() != 1){
+ FDCS->reset = 1;
+ return;
+ }
+ CLEARF(FD_VERIFY);
+ CLEARF(FD_NEED_TWADDLE);
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("checking whether disk is write protected\n");
+ DPRINT("wp=%x\n",ST3 & 0x40);
+ }
+#endif
+ if (!(ST3 & 0x40))
+ SETF(FD_DISK_WRITABLE);
+ else
+ CLEARF(FD_DISK_WRITABLE);
+ }
+}
+
+static void seek_floppy(void)
+{
+ int track;
+
+ blind_seek=0;
+
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from seek\n");
+ }
+#endif
+
+ if (!TESTF(FD_DISK_NEWCHANGE) &&
+ disk_change(current_drive) &&
+ (raw_cmd->flags & FD_RAW_NEED_DISK)){
+ /* the media changed flag should be cleared after the seek.
+ * If it isn't, this means that there is really no disk in
+ * the drive.
+ */
+ SETF(FD_DISK_CHANGED);
+ cont->done(0);
+ cont->redo();
+ return;
+ }
+ if (DRS->track <= NEED_1_RECAL){
+ recalibrate_floppy();
+ return;
+ } else if (TESTF(FD_DISK_NEWCHANGE) &&
+ (raw_cmd->flags & FD_RAW_NEED_DISK) &&
+ (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) {
+ /* we seek to clear the media-changed condition. Does anybody
+ * know a more elegant way, which works on all drives? */
+ if (raw_cmd->track)
+ track = raw_cmd->track - 1;
+ else {
+ if (DP->flags & FD_SILENT_DCL_CLEAR){
+ set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0);
+ blind_seek = 1;
+ raw_cmd->flags |= FD_RAW_NEED_SEEK;
+ }
+ track = 1;
+ }
+ } else {
+ check_wp();
+ if (raw_cmd->track != DRS->track &&
+ (raw_cmd->flags & FD_RAW_NEED_SEEK))
+ track = raw_cmd->track;
+ else {
+ setup_rw_floppy();
+ return;
+ }
+ }
+
+ SET_INTR(seek_interrupt);
+ output_byte(FD_SEEK);
+ output_byte(UNIT(current_drive));
+ LAST_OUT(track);
+#ifdef DEBUGT
+ debugt("seek command:");
+#endif
+}
+
+static void recal_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("recal interrupt:");
+#endif
+ if (inr !=2)
+ FDCS->reset = 1;
+ else if (ST0 & ST0_ECE) {
+ switch(DRS->track){
+ case NEED_1_RECAL:
+#ifdef DEBUGT
+ debugt("recal interrupt need 1 recal:");
+#endif
+ /* after a second recalibrate, we still haven't
+ * reached track 0. Probably no drive. Raise an
+ * error, as failing immediately might upset
+ * computers possessed by the Devil :-) */
+ cont->error();
+ cont->redo();
+ return;
+ case NEED_2_RECAL:
+#ifdef DEBUGT
+ debugt("recal interrupt need 2 recal:");
+#endif
+ /* If we already did a recalibrate,
+ * and we are not at track 0, this
+ * means we have moved. (The only way
+ * not to move at recalibration is to
+ * be already at track 0.) Clear the
+ * new change flag */
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("clearing NEWCHANGE flag because of second recalibrate\n");
+ }
+#endif
+
+ CLEARF(FD_DISK_NEWCHANGE);
+ DRS->select_date = jiffies;
+ /* fall through */
+ default:
+#ifdef DEBUGT
+ debugt("recal interrupt default:");
+#endif
+ /* Recalibrate moves the head by at
+ * most 80 steps. If after one
+ * recalibrate we don't have reached
+ * track 0, this might mean that we
+ * started beyond track 80. Try
+ * again. */
+ DRS->track = NEED_1_RECAL;
+ break;
+ }
+ } else
+ DRS->track = ST1;
+ floppy_ready();
+}
+
+static void print_result(char *message, int inr)
+{
+ int i;
+
+ DPRINT("%s ", message);
+ if (inr >= 0)
+ for (i=0; i<inr; i++)
+ printk("repl[%d]=%x ", i, reply_buffer[i]);
+ printk("\n");
+}
+
+/* interrupt handler */
+void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ void (*handler)(void) = DEVICE_INTR;
+ int do_print;
+
+ lasthandler = handler;
+ interruptjiffies = jiffies;
+
+ fd_disable_dma();
+ floppy_enable_hlt();
+ CLEAR_INTR;
+ if (fdc >= N_FDC || FDCS->address == -1){
+ /* we don't even know which FDC is the culprit */
+ printk("DOR0=%x\n", fdc_state[0].dor);
+ printk("floppy interrupt on bizarre fdc %d\n",fdc);
+ printk("handler=%p\n", handler);
+ is_alive("bizarre fdc");
+ return;
+ }
+
+ FDCS->reset = 0;
+ /* We have to clear the reset flag here, because apparently on boxes
+ * with level triggered interrupts (PS/2, Sparc, ...), it is needed to
+ * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the
+ * emission of the SENSEI's.
+ * It is OK to emit floppy commands because we are in an interrupt
+ * handler here, and thus we have to fear no interference of other
+ * activity.
+ */
+
+ do_print = !handler && print_unex && !initialising;
+
+ inr = result();
+ if(do_print)
+ print_result("unexpected interrupt", inr);
+ if (inr == 0){
+ int max_sensei = 4;
+ do {
+ output_byte(FD_SENSEI);
+ inr = result();
+ if(do_print)
+ print_result("sensei", inr);
+ max_sensei--;
+ } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2 && max_sensei);
+ }
+ if (handler) {
+ if(intr_count >= 2) {
+ /* expected interrupt */
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task_irq(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ } else
+ handler();
+ } else
+ FDCS->reset = 1;
+ is_alive("normal interrupt end");
+}
+
+static void recalibrate_floppy(void)
+{
+#ifdef DEBUGT
+ debugt("recalibrate floppy:");
+#endif
+ SET_INTR(recal_interrupt);
+ output_byte(FD_RECALIBRATE);
+ LAST_OUT(UNIT(current_drive));
+}
+
+/*
+ * Must do 4 FD_SENSEIs after reset because of ``drive polling''.
+ */
+static void reset_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("reset interrupt:");
+#endif
+ result(); /* get the status ready for set_fdc */
+ if (FDCS->reset) {
+ printk("reset set in interrupt, calling %p\n", cont->error);
+ cont->error(); /* a reset just after a reset. BAD! */
+ }
+ cont->redo();
+}
+
+/*
+ * reset is done by pulling bit 2 of DOR low for a while (old FDCs),
+ * or by setting the self clearing bit 7 of STATUS (newer FDCs)
+ */
+static void reset_fdc(void)
+{
+ SET_INTR(reset_interrupt);
+ FDCS->reset = 0;
+ reset_fdc_info(0);
+
+ /* Pseudo-DMA may intercept 'reset finished' interrupt. */
+ /* Irrelevant for systems with true DMA (i386). */
+ fd_disable_dma();
+
+ if (FDCS->version >= FDC_82072A)
+ fd_outb(0x80 | (FDCS->dtr &3), FD_STATUS);
+ else {
+ fd_outb(FDCS->dor & ~0x04, FD_DOR);
+ udelay(FD_RESET_DELAY);
+ fd_outb(FDCS->dor, FD_DOR);
+ }
+}
+
+void show_floppy(void)
+{
+ int i;
+
+ printk("\n");
+ printk("floppy driver state\n");
+ printk("-------------------\n");
+ printk("now=%ld last interrupt=%d last called handler=%p\n",
+ jiffies, interruptjiffies, lasthandler);
+
+
+#ifdef FLOPPY_SANITY_CHECK
+ printk("timeout_message=%s\n", timeout_message);
+ printk("last output bytes:\n");
+ for (i=0; i < OLOGSIZE; i++)
+ printk("%2x %2x %ld\n",
+ output_log[(i+output_log_pos) % OLOGSIZE].data,
+ output_log[(i+output_log_pos) % OLOGSIZE].status,
+ output_log[(i+output_log_pos) % OLOGSIZE].jiffies);
+ printk("last result at %d\n", resultjiffies);
+ printk("last redo_fd_request at %d\n", lastredo);
+ for (i=0; i<resultsize; i++){
+ printk("%2x ", reply_buffer[i]);
+ }
+ printk("\n");
+#endif
+
+ printk("status=%x\n", fd_inb(FD_STATUS));
+ printk("fdc_busy=%d\n", fdc_busy);
+ if (DEVICE_INTR)
+ printk("DEVICE_INTR=%p\n", DEVICE_INTR);
+ if (floppy_tq.sync)
+ printk("floppy_tq.routine=%p\n", floppy_tq.routine);
+ if (fd_timer.prev)
+ printk("fd_timer.function=%p\n", fd_timer.function);
+ if (fd_timeout.prev){
+ printk("timer_table=%p\n",fd_timeout.function);
+ printk("expires=%ld\n",fd_timeout.expires-jiffies);
+ printk("now=%ld\n",jiffies);
+ }
+ printk("cont=%p\n", cont);
+ printk("CURRENT=%p\n", CURRENT);
+ printk("command_status=%d\n", command_status);
+ printk("\n");
+}
+
+static void floppy_shutdown(void)
+{
+ if (!initialising)
+ show_floppy();
+ cancel_activity();
+ sti();
+
+ floppy_enable_hlt();
+ fd_disable_dma();
+ /* avoid dma going to a random drive after shutdown */
+
+ if (!initialising)
+ DPRINT("floppy timeout called\n");
+ FDCS->reset = 1;
+ if (cont){
+ cont->done(0);
+ cont->redo(); /* this will recall reset when needed */
+ } else {
+ printk("no cont in shutdown!\n");
+ process_fd_request();
+ }
+ is_alive("floppy shutdown");
+}
+/*typedef void (*timeout_fn)(unsigned long);*/
+
+/* start motor, check media-changed condition and write protection */
+static int start_motor(void (*function)(void) )
+{
+ int mask, data;
+
+ mask = 0xfc;
+ data = UNIT(current_drive);
+ if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)){
+ if (!(FDCS->dor & (0x10 << UNIT(current_drive)))){
+ set_debugt();
+ /* no read since this drive is running */
+ DRS->first_read_date = 0;
+ /* note motor start time if motor is not yet running */
+ DRS->spinup_date = jiffies;
+ data |= (0x10 << UNIT(current_drive));
+ }
+ } else
+ if (FDCS->dor & (0x10 << UNIT(current_drive)))
+ mask &= ~(0x10 << UNIT(current_drive));
+
+ /* starts motor and selects floppy */
+ del_timer(motor_off_timer + current_drive);
+ set_dor(fdc, mask, data);
+
+ /* wait_for_completion also schedules reset if needed. */
+ return(wait_for_completion(DRS->select_date+DP->select_delay,
+ (timeout_fn) function));
+}
+
+static void floppy_ready(void)
+{
+ CHECK_RESET;
+ if (start_motor(floppy_ready)) return;
+ if (fdc_dtr()) return;
+
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from floppy_ready\n");
+ }
+#endif
+
+ if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
+ disk_change(current_drive) &&
+ !DP->select_delay)
+ twaddle(); /* this clears the dcl on certain drive/controller
+ * combinations */
+
+ if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)){
+ perpendicular_mode();
+ fdc_specify(); /* must be done here because of hut, hlt ... */
+ seek_floppy();
+ } else
+ setup_rw_floppy();
+}
+
+static void floppy_start(void)
+{
+ reschedule_timeout(CURRENTD, "floppy start", 0);
+
+ scandrives();
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("setting NEWCHANGE in floppy_start\n");
+ }
+#endif
+ SETF(FD_DISK_NEWCHANGE);
+ floppy_ready();
+}
+
+/*
+ * ========================================================================
+ * here ends the bottom half. Exported routines are:
+ * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc,
+ * start_motor, reset_fdc, reset_fdc_info, interpret_errors.
+ * Initialization also uses output_byte, result, set_dor, floppy_interrupt
+ * and set_dor.
+ * ========================================================================
+ */
+/*
+ * General purpose continuations.
+ * ==============================
+ */
+
+static void do_wakeup(void)
+{
+ reschedule_timeout(MAXTIMEOUT, "do wakeup", 0);
+ cont = 0;
+ command_status += 2;
+ wake_up(&command_done);
+}
+
+static struct cont_t wakeup_cont={
+ empty,
+ do_wakeup,
+ empty,
+ (done_f)empty
+};
+
+
+static struct cont_t intr_cont={
+ empty,
+ process_fd_request,
+ empty,
+ (done_f) empty
+};
+
+static int wait_til_done(void (*handler)(void), int interruptible)
+{
+ int ret;
+ unsigned long flags;
+
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ INT_OFF;
+ while(command_status < 2 && NO_SIGNAL){
+ is_alive("wait_til_done");
+ if (interruptible)
+ interruptible_sleep_on(&command_done);
+ else
+ sleep_on(&command_done);
+ }
+ if (command_status < 2){
+ cancel_activity();
+ cont = &intr_cont;
+ reset_fdc();
+ INT_ON;
+ return -EINTR;
+ }
+ INT_ON;
+
+ if (FDCS->reset)
+ command_status = FD_COMMAND_ERROR;
+ if (command_status == FD_COMMAND_OKAY)
+ ret=0;
+ else
+ ret=-EIO;
+ command_status = FD_COMMAND_NONE;
+ return ret;
+}
+
+static void generic_done(int result)
+{
+ command_status = result;
+ cont = &wakeup_cont;
+}
+
+static void generic_success(void)
+{
+ cont->done(1);
+}
+
+static void generic_failure(void)
+{
+ cont->done(0);
+}
+
+static void success_and_wakeup(void)
+{
+ generic_success();
+ cont->redo();
+}
+
+
+/*
+ * formatting and rw support.
+ * ==========================
+ */
+
+static int next_valid_format(void)
+{
+ int probed_format;
+
+ probed_format = DRS->probed_format;
+ while(1){
+ if (probed_format >= 8 ||
+ !DP->autodetect[probed_format]){
+ DRS->probed_format = 0;
+ return 1;
+ }
+ if (floppy_type[DP->autodetect[probed_format]].sect){
+ DRS->probed_format = probed_format;
+ return 0;
+ }
+ probed_format++;
+ }
+}
+
+static void bad_flp_intr(void)
+{
+ if (probing){
+ DRS->probed_format++;
+ if (!next_valid_format())
+ return;
+ }
+ (*errors)++;
+ INFBOUND(DRWE->badness, *errors);
+ if (*errors > DP->max_errors.abort)
+ cont->done(0);
+ if (*errors > DP->max_errors.reset)
+ FDCS->reset = 1;
+ else if (*errors > DP->max_errors.recal)
+ DRS->track = NEED_2_RECAL;
+}
+
+static void set_floppy(kdev_t device)
+{
+ if (TYPE(device))
+ _floppy = TYPE(device) + floppy_type;
+ else
+ _floppy = current_type[ DRIVE(device) ];
+}
+
+/*
+ * formatting support.
+ * ===================
+ */
+static void format_interrupt(void)
+{
+ switch (interpret_errors()){
+ case 1:
+ cont->error();
+ case 2:
+ break;
+ case 0:
+ cont->done(1);
+ }
+ cont->redo();
+}
+
+#define CODE2SIZE (ssize = ((1 << SIZECODE) + 3) >> 2)
+#define FM_MODE(x,y) ((y) & ~(((x)->rate & 0x80) >>1))
+#define CT(x) ((x) | 0x40)
+static void setup_format_params(int track)
+{
+ struct fparm {
+ unsigned char track,head,sect,size;
+ } *here = (struct fparm *)floppy_track_buffer;
+ int il,n;
+ int count,head_shift,track_shift;
+
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->track = track;
+
+ raw_cmd->flags = FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN |
+ FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
+ raw_cmd->rate = _floppy->rate & 0x43;
+ raw_cmd->cmd_count = NR_F;
+ COMMAND = FM_MODE(_floppy,FD_FORMAT);
+ DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy,format_req.head);
+ F_SIZECODE = FD_SIZECODE(_floppy);
+ F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE;
+ F_GAP = _floppy->fmt_gap;
+ F_FILL = FD_FILL_BYTE;
+
+ raw_cmd->kernel_data = floppy_track_buffer;
+ raw_cmd->length = 4 * F_SECT_PER_TRACK;
+
+ /* allow for about 30ms for data transport per track */
+ head_shift = (F_SECT_PER_TRACK + 5) / 6;
+
+ /* a ``cylinder'' is two tracks plus a little stepping time */
+ track_shift = 2 * head_shift + 3;
+
+ /* position of logical sector 1 on this track */
+ n = (track_shift * format_req.track + head_shift * format_req.head)
+ % F_SECT_PER_TRACK;
+
+ /* determine interleave */
+ il = 1;
+ if (_floppy->fmt_gap < 0x22)
+ il++;
+
+ /* initialize field */
+ for (count = 0; count < F_SECT_PER_TRACK; ++count) {
+ here[count].track = format_req.track;
+ here[count].head = format_req.head;
+ here[count].sect = 0;
+ here[count].size = F_SIZECODE;
+ }
+ /* place logical sectors */
+ for (count = 1; count <= F_SECT_PER_TRACK; ++count) {
+ here[n].sect = count;
+ n = (n+il) % F_SECT_PER_TRACK;
+ if (here[n].sect) { /* sector busy, find next free sector */
+ ++n;
+ if (n>= F_SECT_PER_TRACK) {
+ n-=F_SECT_PER_TRACK;
+ while (here[n].sect) ++n;
+ }
+ }
+ }
+}
+
+static void redo_format(void)
+{
+ buffer_track = -1;
+ setup_format_params(format_req.track << STRETCH(_floppy));
+ floppy_start();
+#ifdef DEBUGT
+ debugt("queue format request");
+#endif
+}
+
+static struct cont_t format_cont={
+ format_interrupt,
+ redo_format,
+ bad_flp_intr,
+ generic_done };
+
+static int do_format(kdev_t device, struct format_descr *tmp_format_req)
+{
+ int ret;
+ int drive=DRIVE(device);
+
+ LOCK_FDC(drive,1);
+ set_floppy(device);
+ if (!_floppy ||
+ _floppy->track > DP->tracks ||
+ tmp_format_req->track >= _floppy->track ||
+ tmp_format_req->head >= _floppy->head ||
+ (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) ||
+ !_floppy->fmt_gap) {
+ process_fd_request();
+ return -EINVAL;
+ }
+ format_req = *tmp_format_req;
+ format_errors = 0;
+ cont = &format_cont;
+ errors = &format_errors;
+ IWAIT(redo_format);
+ process_fd_request();
+ return ret;
+}
+
+/*
+ * Buffer read/write and support
+ * =============================
+ */
+
+/* new request_done. Can handle physical sectors which are smaller than a
+ * logical buffer */
+static void request_done(int uptodate)
+{
+ int block;
+
+ probing = 0;
+ reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate);
+
+ if (!CURRENT){
+ DPRINT("request list destroyed in floppy request done\n");
+ return;
+ }
+
+ if (uptodate){
+ /* maintain values for invalidation on geometry
+ * change */
+ block = current_count_sectors + CURRENT->sector;
+ INFBOUND(DRS->maxblock, block);
+ if (block > _floppy->sect)
+ DRS->maxtrack = 1;
+
+ /* unlock chained buffers */
+ while (current_count_sectors && CURRENT &&
+ current_count_sectors >= CURRENT->current_nr_sectors){
+ current_count_sectors -= CURRENT->current_nr_sectors;
+ CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
+ CURRENT->sector += CURRENT->current_nr_sectors;
+ end_request(1);
+ }
+ if (current_count_sectors && CURRENT){
+ /* "unlock" last subsector */
+ CURRENT->buffer += current_count_sectors <<9;
+ CURRENT->current_nr_sectors -= current_count_sectors;
+ CURRENT->nr_sectors -= current_count_sectors;
+ CURRENT->sector += current_count_sectors;
+ return;
+ }
+
+ if (current_count_sectors && !CURRENT)
+ DPRINT("request list destroyed in floppy request done\n");
+
+ } else {
+ if (CURRENT->cmd == WRITE) {
+ /* record write error information */
+ DRWE->write_errors++;
+ if (DRWE->write_errors == 1) {
+ DRWE->first_error_sector = CURRENT->sector;
+ DRWE->first_error_generation = DRS->generation;
+ }
+ DRWE->last_error_sector = CURRENT->sector;
+ DRWE->last_error_generation = DRS->generation;
+ }
+ end_request(0);
+ }
+}
+
+/* Interrupt handler evaluating the result of the r/w operation */
+static void rw_interrupt(void)
+{
+ int nr_sectors, ssize, eoc;
+
+ if (!DRS->first_read_date)
+ DRS->first_read_date = jiffies;
+
+ nr_sectors = 0;
+ CODE2SIZE;
+
+ if(ST1 & ST1_EOC)
+ eoc = 1;
+ else
+ eoc = 0;
+ nr_sectors = ((R_TRACK-TRACK)*_floppy->head+R_HEAD-HEAD) *
+ _floppy->sect + ((R_SECTOR-SECTOR+eoc) << SIZECODE >> 2) -
+ (sector_t % _floppy->sect) % ssize;
+
+#ifdef FLOPPY_SANITY_CHECK
+ if (nr_sectors > current_count_sectors + ssize -
+ (current_count_sectors + sector_t) % ssize +
+ sector_t % ssize){
+ DPRINT("long rw: %x instead of %lx\n",
+ nr_sectors, current_count_sectors);
+ printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
+ printk("rh=%d h=%d\n", R_HEAD, HEAD);
+ printk("rt=%d t=%d\n", R_TRACK, TRACK);
+ printk("spt=%d st=%d ss=%d\n", SECT_PER_TRACK,
+ sector_t, ssize);
+ }
+#endif
+ INFBOUND(nr_sectors,0);
+ SUPBOUND(current_count_sectors, nr_sectors);
+
+ switch (interpret_errors()){
+ case 2:
+ cont->redo();
+ return;
+ case 1:
+ if (!current_count_sectors){
+ cont->error();
+ cont->redo();
+ return;
+ }
+ break;
+ case 0:
+ if (!current_count_sectors){
+ cont->redo();
+ return;
+ }
+ current_type[current_drive] = _floppy;
+ floppy_sizes[TOMINOR(current_drive) ]= _floppy->size>>1;
+ break;
+ }
+
+ if (probing) {
+ if (DP->flags & FTD_MSG)
+ DPRINT("Auto-detected floppy type %s in fd%d\n",
+ _floppy->name,current_drive);
+ current_type[current_drive] = _floppy;
+ floppy_sizes[TOMINOR(current_drive)] = _floppy->size >> 1;
+ probing = 0;
+ }
+
+ if (CT(COMMAND) != FD_READ ||
+ raw_cmd->kernel_data == CURRENT->buffer){
+ /* transfer directly from buffer */
+ cont->done(1);
+ } else if (CT(COMMAND) == FD_READ){
+ buffer_track = raw_cmd->track;
+ buffer_drive = current_drive;
+ INFBOUND(buffer_max, nr_sectors + sector_t);
+ }
+ cont->redo();
+}
+
+/* Compute maximal contiguous buffer size. */
+static int buffer_chain_size(void)
+{
+ struct buffer_head *bh;
+ int size;
+ char *base;
+
+ base = CURRENT->buffer;
+ size = CURRENT->current_nr_sectors << 9;
+ bh = CURRENT->bh;
+
+ if (bh){
+ bh = bh->b_reqnext;
+ while (bh && bh->b_data == base + size){
+ size += bh->b_size;
+ bh = bh->b_reqnext;
+ }
+ }
+ return size >> 9;
+}
+
+/* Compute the maximal transfer size */
+static int transfer_size(int ssize, int max_sector, int max_size)
+{
+ SUPBOUND(max_sector, sector_t + max_size);
+
+ /* alignment */
+ max_sector -= (max_sector % _floppy->sect) % ssize;
+
+ /* transfer size, beginning not aligned */
+ current_count_sectors = max_sector - sector_t ;
+
+ return max_sector;
+}
+
+/*
+ * Move data from/to the track buffer to/from the buffer cache.
+ */
+static void copy_buffer(int ssize, int max_sector, int max_sector_2)
+{
+ int remaining; /* number of transferred 512-byte sectors */
+ struct buffer_head *bh;
+ char *buffer, *dma_buffer;
+ int size;
+
+ max_sector = transfer_size(ssize,
+ minimum(max_sector, max_sector_2),
+ CURRENT->nr_sectors);
+
+ if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
+ buffer_max > sector_t + CURRENT->nr_sectors)
+ current_count_sectors = minimum(buffer_max - sector_t,
+ CURRENT->nr_sectors);
+
+ remaining = current_count_sectors << 9;
+#ifdef FLOPPY_SANITY_CHECK
+ if ((remaining >> 9) > CURRENT->nr_sectors &&
+ CT(COMMAND) == FD_WRITE){
+ DPRINT("in copy buffer\n");
+ printk("current_count_sectors=%ld\n", current_count_sectors);
+ printk("remaining=%d\n", remaining >> 9);
+ printk("CURRENT->nr_sectors=%ld\n",CURRENT->nr_sectors);
+ printk("CURRENT->current_nr_sectors=%ld\n",
+ CURRENT->current_nr_sectors);
+ printk("max_sector=%d\n", max_sector);
+ printk("ssize=%d\n", ssize);
+ }
+#endif
+
+ buffer_max = maximum(max_sector, buffer_max);
+
+ dma_buffer = floppy_track_buffer + ((sector_t - buffer_min) << 9);
+
+ bh = CURRENT->bh;
+ size = CURRENT->current_nr_sectors << 9;
+ buffer = CURRENT->buffer;
+
+ while (remaining > 0){
+ SUPBOUND(size, remaining);
+#ifdef FLOPPY_SANITY_CHECK
+ if (dma_buffer + size >
+ floppy_track_buffer + (max_buffer_sectors << 10) ||
+ dma_buffer < floppy_track_buffer){
+ DPRINT("buffer overrun in copy buffer %d\n",
+ (int) ((floppy_track_buffer - dma_buffer) >>9));
+ printk("sector_t=%d buffer_min=%d\n",
+ sector_t, buffer_min);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
+ if (CT(COMMAND) == FD_READ)
+ printk("read\n");
+ if (CT(COMMAND) == FD_READ)
+ printk("write\n");
+ break;
+ }
+ if (((unsigned long)buffer) % 512)
+ DPRINT("%p buffer not aligned\n", buffer);
+#endif
+ if (CT(COMMAND) == FD_READ)
+ memcpy(buffer, dma_buffer, size);
+ else
+ memcpy(dma_buffer, buffer, size);
+ remaining -= size;
+ if (!remaining)
+ break;
+
+ dma_buffer += size;
+ bh = bh->b_reqnext;
+#ifdef FLOPPY_SANITY_CHECK
+ if (!bh){
+ DPRINT("bh=null in copy buffer after copy\n");
+ break;
+ }
+#endif
+ size = bh->b_size;
+ buffer = bh->b_data;
+ }
+#ifdef FLOPPY_SANITY_CHECK
+ if (remaining){
+ if (remaining > 0)
+ max_sector -= remaining >> 9;
+ DPRINT("weirdness: remaining %d\n", remaining>>9);
+ }
+#endif
+}
+
+/*
+ * Formulate a read/write request.
+ * this routine decides where to load the data (directly to buffer, or to
+ * tmp floppy area), how much data to load (the size of the buffer, the whole
+ * track, or a single sector)
+ * All floppy_track_buffer handling goes in here. If we ever add track buffer
+ * allocation on the fly, it should be done here. No other part should need
+ * modification.
+ */
+
+static int make_raw_rw_request(void)
+{
+ int aligned_sector_t;
+ int max_sector, max_size, tracksize, ssize;
+
+ set_fdc(DRIVE(CURRENT->rq_dev));
+
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK |
+ FD_RAW_NEED_SEEK;
+ raw_cmd->cmd_count = NR_RW;
+ if (CURRENT->cmd == READ){
+ raw_cmd->flags |= FD_RAW_READ;
+ COMMAND = FM_MODE(_floppy,FD_READ);
+ } else if (CURRENT->cmd == WRITE){
+ raw_cmd->flags |= FD_RAW_WRITE;
+ COMMAND = FM_MODE(_floppy,FD_WRITE);
+ } else {
+ DPRINT("make_raw_rw_request: unknown command\n");
+ return 0;
+ }
+
+ max_sector = _floppy->sect * _floppy->head;
+
+ TRACK = CURRENT->sector / max_sector;
+ sector_t = CURRENT->sector % max_sector;
+ if (_floppy->track && TRACK >= _floppy->track)
+ return 0;
+ HEAD = sector_t / _floppy->sect;
+
+ if (((_floppy->stretch & FD_SWAPSIDES) || TESTF(FD_NEED_TWADDLE)) &&
+ sector_t < _floppy->sect)
+ max_sector = _floppy->sect;
+
+ /* 2M disks have phantom sectors on the first track */
+ if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)){
+ max_sector = 2 * _floppy->sect / 3;
+ if (sector_t >= max_sector){
+ current_count_sectors = minimum(_floppy->sect - sector_t,
+ CURRENT->nr_sectors);
+ return 1;
+ }
+ SIZECODE = 2;
+ } else
+ SIZECODE = FD_SIZECODE(_floppy);
+ raw_cmd->rate = _floppy->rate & 0x43;
+ if ((_floppy->rate & FD_2M) &&
+ (TRACK || HEAD) &&
+ raw_cmd->rate == 2)
+ raw_cmd->rate = 1;
+
+ if (SIZECODE)
+ SIZECODE2 = 0xff;
+ else
+ SIZECODE2 = 0x80;
+ raw_cmd->track = TRACK << STRETCH(_floppy);
+ DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy,HEAD);
+ GAP = _floppy->gap;
+ CODE2SIZE;
+ SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
+ SECTOR = ((sector_t % _floppy->sect) << 2 >> SIZECODE) + 1;
+ tracksize = _floppy->sect - _floppy->sect % ssize;
+ if (tracksize < _floppy->sect){
+ SECT_PER_TRACK ++;
+ if (tracksize <= sector_t % _floppy->sect)
+ SECTOR--;
+ while (tracksize <= sector_t % _floppy->sect){
+ while(tracksize + ssize > _floppy->sect){
+ SIZECODE--;
+ ssize >>= 1;
+ }
+ SECTOR++; SECT_PER_TRACK ++;
+ tracksize += ssize;
+ }
+ max_sector = HEAD * _floppy->sect + tracksize;
+ } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing)
+ max_sector = _floppy->sect;
+
+ aligned_sector_t = sector_t - (sector_t % _floppy->sect) % ssize;
+ max_size = CURRENT->nr_sectors;
+ if ((raw_cmd->track == buffer_track) &&
+ (current_drive == buffer_drive) &&
+ (sector_t >= buffer_min) && (sector_t < buffer_max)) {
+ /* data already in track buffer */
+ if (CT(COMMAND) == FD_READ) {
+ copy_buffer(1, max_sector, buffer_max);
+ return 1;
+ }
+ } else if (aligned_sector_t != sector_t || CURRENT->nr_sectors < ssize){
+ if (CT(COMMAND) == FD_WRITE){
+ if (sector_t + CURRENT->nr_sectors > ssize &&
+ sector_t + CURRENT->nr_sectors < ssize + ssize)
+ max_size = ssize + ssize;
+ else
+ max_size = ssize;
+ }
+ raw_cmd->flags &= ~FD_RAW_WRITE;
+ raw_cmd->flags |= FD_RAW_READ;
+ COMMAND = FM_MODE(_floppy,FD_READ);
+ } else if ((unsigned long)CURRENT->buffer < MAX_DMA_ADDRESS) {
+ unsigned long dma_limit;
+ int direct, indirect;
+
+ indirect= transfer_size(ssize,max_sector,max_buffer_sectors*2) -
+ sector_t;
+
+ /*
+ * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide
+ * on a 64 bit machine!
+ */
+ max_size = buffer_chain_size();
+ dma_limit = (MAX_DMA_ADDRESS - ((unsigned long) CURRENT->buffer)) >> 9;
+ if ((unsigned long) max_size > dma_limit) {
+ max_size = dma_limit;
+ }
+ /* 64 kb boundaries */
+ if (CROSS_64KB(CURRENT->buffer, max_size << 9))
+ max_size = (K_64 - ((long) CURRENT->buffer) % K_64)>>9;
+ direct = transfer_size(ssize,max_sector,max_size) - sector_t;
+ /*
+ * We try to read tracks, but if we get too many errors, we
+ * go back to reading just one sector at a time.
+ *
+ * This means we should be able to read a sector even if there
+ * are other bad sectors on this track.
+ */
+ if (!direct ||
+ (indirect * 2 > direct * 3 &&
+ *errors < DP->max_errors.read_track &&
+ /*!TESTF(FD_NEED_TWADDLE) &&*/
+ ((!probing || (DP->read_track&(1<<DRS->probed_format)))))){
+ max_size = CURRENT->nr_sectors;
+ } else {
+ raw_cmd->kernel_data = CURRENT->buffer;
+ raw_cmd->length = current_count_sectors << 9;
+ if (raw_cmd->length == 0){
+ DPRINT("zero dma transfer attempted from make_raw_request\n");
+ DPRINT("indirect=%d direct=%d sector_t=%d",
+ indirect, direct, sector_t);
+ return 0;
+ }
+ return 2;
+ }
+ }
+
+ if (CT(COMMAND) == FD_READ)
+ max_size = max_sector; /* unbounded */
+
+ /* claim buffer track if needed */
+ if (buffer_track != raw_cmd->track || /* bad track */
+ buffer_drive !=current_drive || /* bad drive */
+ sector_t > buffer_max ||
+ sector_t < buffer_min ||
+ ((CT(COMMAND) == FD_READ ||
+ (aligned_sector_t == sector_t && CURRENT->nr_sectors >= ssize))&&
+ max_sector > 2 * max_buffer_sectors + buffer_min &&
+ max_size + sector_t > 2 * max_buffer_sectors + buffer_min)
+ /* not enough space */){
+ buffer_track = -1;
+ buffer_drive = current_drive;
+ buffer_max = buffer_min = aligned_sector_t;
+ }
+ raw_cmd->kernel_data = floppy_track_buffer +
+ ((aligned_sector_t-buffer_min)<<9);
+
+ if (CT(COMMAND) == FD_WRITE){
+ /* copy write buffer to track buffer.
+ * if we get here, we know that the write
+ * is either aligned or the data already in the buffer
+ * (buffer will be overwritten) */
+#ifdef FLOPPY_SANITY_CHECK
+ if (sector_t != aligned_sector_t && buffer_track == -1)
+ DPRINT("internal error offset !=0 on write\n");
+#endif
+ buffer_track = raw_cmd->track;
+ buffer_drive = current_drive;
+ copy_buffer(ssize, max_sector, 2*max_buffer_sectors+buffer_min);
+ } else
+ transfer_size(ssize, max_sector,
+ 2*max_buffer_sectors+buffer_min-aligned_sector_t);
+
+ /* round up current_count_sectors to get dma xfer size */
+ raw_cmd->length = sector_t+current_count_sectors-aligned_sector_t;
+ raw_cmd->length = ((raw_cmd->length -1)|(ssize-1))+1;
+ raw_cmd->length <<= 9;
+#ifdef FLOPPY_SANITY_CHECK
+ if ((raw_cmd->length < current_count_sectors << 9) ||
+ (raw_cmd->kernel_data != CURRENT->buffer &&
+ CT(COMMAND) == FD_WRITE &&
+ (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
+ aligned_sector_t < buffer_min)) ||
+ raw_cmd->length % (128 << SIZECODE) ||
+ raw_cmd->length <= 0 || current_count_sectors <= 0){
+ DPRINT("fractionary current count b=%lx s=%lx\n",
+ raw_cmd->length, current_count_sectors);
+ if (raw_cmd->kernel_data != CURRENT->buffer)
+ printk("addr=%d, length=%ld\n",
+ (int) ((raw_cmd->kernel_data -
+ floppy_track_buffer) >> 9),
+ current_count_sectors);
+ printk("st=%d ast=%d mse=%d msi=%d\n",
+ sector_t, aligned_sector_t, max_sector, max_size);
+ printk("ssize=%x SIZECODE=%d\n", ssize, SIZECODE);
+ printk("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
+ COMMAND, SECTOR, HEAD, TRACK);
+ printk("buffer drive=%d\n", buffer_drive);
+ printk("buffer track=%d\n", buffer_track);
+ printk("buffer_min=%d\n", buffer_min);
+ printk("buffer_max=%d\n", buffer_max);
+ return 0;
+ }
+
+ if (raw_cmd->kernel_data != CURRENT->buffer){
+ if (raw_cmd->kernel_data < floppy_track_buffer ||
+ current_count_sectors < 0 ||
+ raw_cmd->length < 0 ||
+ raw_cmd->kernel_data + raw_cmd->length >
+ floppy_track_buffer + (max_buffer_sectors << 10)){
+ DPRINT("buffer overrun in schedule dma\n");
+ printk("sector_t=%d buffer_min=%d current_count=%ld\n",
+ sector_t, buffer_min,
+ raw_cmd->length >> 9);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
+ if (CT(COMMAND) == FD_READ)
+ printk("read\n");
+ if (CT(COMMAND) == FD_READ)
+ printk("write\n");
+ return 0;
+ }
+ } else if (raw_cmd->length > CURRENT->nr_sectors << 9 ||
+ current_count_sectors > CURRENT->nr_sectors){
+ DPRINT("buffer overrun in direct transfer\n");
+ return 0;
+ } else if (raw_cmd->length < current_count_sectors << 9){
+ DPRINT("more sectors than bytes\n");
+ printk("bytes=%ld\n", raw_cmd->length >> 9);
+ printk("sectors=%ld\n", current_count_sectors);
+ }
+ if (raw_cmd->length == 0){
+ DPRINT("zero dma transfer attempted from make_raw_request\n");
+ return 0;
+ }
+#endif
+ return 2;
+}
+
+static void redo_fd_request(void)
+{
+#define REPEAT {request_done(0); continue; }
+ kdev_t device;
+ int tmp;
+
+ lastredo = jiffies;
+ if (current_drive < N_DRIVE)
+ floppy_off(current_drive);
+
+ if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+ CLEAR_INTR;
+ unlock_fdc();
+ return;
+ }
+
+ while(1){
+ if (!CURRENT) {
+ CLEAR_INTR;
+ unlock_fdc();
+ return;
+ }
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
+ panic(DEVICE_NAME ": request list destroyed");
+ if (CURRENT->bh && !buffer_locked(CURRENT->bh))
+ panic(DEVICE_NAME ": block not locked");
+
+ device = CURRENT->rq_dev;
+ set_fdc(DRIVE(device));
+ reschedule_timeout(CURRENTD, "redo fd request", 0);
+
+ set_floppy(device);
+ raw_cmd = & default_raw_cmd;
+ raw_cmd->flags = 0;
+ if (start_motor(redo_fd_request)) return;
+ disk_change(current_drive);
+ if (test_bit(current_drive, &fake_change) ||
+ TESTF(FD_DISK_CHANGED)){
+ DPRINT("disk absent or changed during operation\n");
+ REPEAT;
+ }
+ if (!_floppy) { /* Autodetection */
+ if (!probing){
+ DRS->probed_format = 0;
+ if (next_valid_format()){
+ DPRINT("no autodetectable formats\n");
+ _floppy = NULL;
+ REPEAT;
+ }
+ }
+ probing = 1;
+ _floppy = floppy_type+DP->autodetect[DRS->probed_format];
+ } else
+ probing = 0;
+ errors = & (CURRENT->errors);
+ tmp = make_raw_rw_request();
+ if (tmp < 2){
+ request_done(tmp);
+ continue;
+ }
+
+ if (TESTF(FD_NEED_TWADDLE))
+ twaddle();
+ floppy_tq.routine = (void *)(void *) floppy_start;
+ queue_task(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+#ifdef DEBUGT
+ debugt("queue fd request");
+#endif
+ return;
+ }
+#undef REPEAT
+}
+
+static struct cont_t rw_cont={
+ rw_interrupt,
+ redo_fd_request,
+ bad_flp_intr,
+ request_done };
+
+static struct tq_struct request_tq =
+{ 0, 0, (void *) (void *) redo_fd_request, 0 };
+
+static void process_fd_request(void)
+{
+ cont = &rw_cont;
+ queue_task(&request_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+static void do_fd_request(void)
+{
+ sti();
+ if (fdc_busy){
+ /* fdc busy, this new request will be treated when the
+ current one is done */
+ is_alive("do fd request, old request running");
+ return;
+ }
+ lock_fdc(MAXTIMEOUT,0);
+ process_fd_request();
+ is_alive("do fd request");
+}
+
+static struct cont_t poll_cont={
+ success_and_wakeup,
+ floppy_ready,
+ generic_failure,
+ generic_done };
+
+static int poll_drive(int interruptible, int flag)
+{
+ int ret;
+ /* no auto-sense, just clear dcl */
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->flags= flag;
+ raw_cmd->track=0;
+ raw_cmd->cmd_count=0;
+ cont = &poll_cont;
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("setting NEWCHANGE in poll_drive\n");
+ }
+#endif
+ SETF(FD_DISK_NEWCHANGE);
+ WAIT(floppy_ready);
+ return ret;
+}
+
+/*
+ * User triggered reset
+ * ====================
+ */
+
+static void reset_intr(void)
+{
+ printk("weird, reset interrupt called\n");
+}
+
+static struct cont_t reset_cont={
+ reset_intr,
+ success_and_wakeup,
+ generic_failure,
+ generic_done };
+
+static int user_reset_fdc(int drive, int arg, int interruptible)
+{
+ int ret;
+
+ ret=0;
+ LOCK_FDC(drive,interruptible);
+ if (arg == FD_RESET_ALWAYS)
+ FDCS->reset=1;
+ if (FDCS->reset){
+ cont = &reset_cont;
+ WAIT(reset_fdc);
+ }
+ process_fd_request();
+ return ret;
+}
+
+/*
+ * Misc Ioctl's and support
+ * ========================
+ */
+static int fd_copyout(void *param, const void *address, int size)
+{
+ int ret;
+
+ ECALL(verify_area(VERIFY_WRITE,param,size));
+ memcpy_tofs(param,(void *) address, size);
+ return 0;
+}
+
+static int fd_copyin(void *param, void *address, int size)
+{
+ int ret;
+
+ ECALL(verify_area(VERIFY_READ,param,size));
+ memcpy_fromfs((void *) address, param, size);
+ return 0;
+}
+
+#define COPYOUT(x) ECALL(fd_copyout((void *)param, &(x), sizeof(x)))
+#define COPYIN(x) ECALL(fd_copyin((void *)param, &(x), sizeof(x)))
+
+static inline const char *drive_name(int type, int drive)
+{
+ struct floppy_struct *floppy;
+
+ if (type)
+ floppy = floppy_type + type;
+ else {
+ if (UDP->native_format)
+ floppy = floppy_type + UDP->native_format;
+ else
+ return "(null)";
+ }
+ if (floppy->name)
+ return floppy->name;
+ else
+ return "(null)";
+}
+
+
+/* raw commands */
+static void raw_cmd_done(int flag)
+{
+ int i;
+
+ if (!flag) {
+ raw_cmd->flags |= FD_RAW_FAILURE;
+ raw_cmd->flags |= FD_RAW_HARDFAILURE;
+ } else {
+ raw_cmd->reply_count = inr;
+ for (i=0; i< raw_cmd->reply_count; i++)
+ raw_cmd->reply[i] = reply_buffer[i];
+
+ if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE))
+ raw_cmd->length = fd_get_dma_residue();
+
+ if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) &&
+ (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0)))
+ raw_cmd->flags |= FD_RAW_FAILURE;
+
+ if (disk_change(current_drive))
+ raw_cmd->flags |= FD_RAW_DISK_CHANGE;
+ else
+ raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
+ if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
+ motor_off_callback(current_drive);
+
+ if (raw_cmd->next &&
+ (!(raw_cmd->flags & FD_RAW_FAILURE) ||
+ !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) &&
+ ((raw_cmd->flags & FD_RAW_FAILURE) ||
+ !(raw_cmd->flags &FD_RAW_STOP_IF_SUCCESS))) {
+ raw_cmd = raw_cmd->next;
+ return;
+ }
+ }
+ generic_done(flag);
+}
+
+
+static struct cont_t raw_cmd_cont={
+ success_and_wakeup,
+ floppy_start,
+ generic_failure,
+ raw_cmd_done
+};
+
+static inline int raw_cmd_copyout(int cmd, char *param,
+ struct floppy_raw_cmd *ptr)
+{
+ struct old_floppy_raw_cmd old_raw_cmd;
+ int ret;
+
+ while(ptr) {
+ if (cmd == OLDFDRAWCMD) {
+ old_raw_cmd.flags = ptr->flags;
+ old_raw_cmd.data = ptr->data;
+ old_raw_cmd.length = ptr->length;
+ old_raw_cmd.rate = ptr->rate;
+ old_raw_cmd.reply_count = ptr->reply_count;
+ memcpy(old_raw_cmd.reply, ptr->reply, 7);
+ COPYOUT(old_raw_cmd);
+ param += sizeof(old_raw_cmd);
+ } else {
+ COPYOUT(*ptr);
+ param += sizeof(struct floppy_raw_cmd);
+ }
+
+ if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length){
+ if (ptr->length>=0 && ptr->length<=ptr->buffer_length)
+ ECALL(fd_copyout(ptr->data,
+ ptr->kernel_data,
+ ptr->buffer_length -
+ ptr->length));
+ }
+ ptr = ptr->next;
+ }
+ return 0;
+}
+
+
+static void raw_cmd_free(struct floppy_raw_cmd **ptr)
+{
+ struct floppy_raw_cmd *next,*this;
+
+ this = *ptr;
+ *ptr = 0;
+ while(this) {
+ if (this->buffer_length) {
+ fd_dma_mem_free((unsigned long)this->kernel_data,
+ this->buffer_length);
+ this->buffer_length = 0;
+ }
+ next = this->next;
+ kfree(this);
+ this = next;
+ }
+}
+
+
+static inline int raw_cmd_copyin(int cmd, char *param,
+ struct floppy_raw_cmd **rcmd)
+{
+ struct floppy_raw_cmd *ptr;
+ struct old_floppy_raw_cmd old_raw_cmd;
+ int ret;
+ int i;
+
+ *rcmd = 0;
+ while(1) {
+ ptr = (struct floppy_raw_cmd *)
+ kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
+ if (!ptr)
+ return -ENOMEM;
+ *rcmd = ptr;
+ if (cmd == OLDFDRAWCMD){
+ COPYIN(old_raw_cmd);
+ ptr->flags = old_raw_cmd.flags;
+ ptr->data = old_raw_cmd.data;
+ ptr->length = old_raw_cmd.length;
+ ptr->rate = old_raw_cmd.rate;
+ ptr->cmd_count = old_raw_cmd.cmd_count;
+ ptr->track = old_raw_cmd.track;
+ ptr->phys_length = 0;
+ ptr->next = 0;
+ ptr->buffer_length = 0;
+ memcpy(ptr->cmd, old_raw_cmd.cmd, 9);
+ param += sizeof(struct old_floppy_raw_cmd);
+ if (ptr->cmd_count > 9)
+ return -EINVAL;
+ } else {
+ COPYIN(*ptr);
+ ptr->next = 0;
+ ptr->buffer_length = 0;
+ param += sizeof(struct floppy_raw_cmd);
+ if (ptr->cmd_count > 33)
+ /* the command may now also take up the space
+ * initially intended for the reply & the
+ * reply count. Needed for long 82078 commands
+ * such as RESTORE, which takes ... 17 command
+ * bytes. Murphy's law #137: When you reserve
+ * 16 bytes for a structure, you'll one day
+ * discover that you really need 17...
+ */
+ return -EINVAL;
+ }
+
+ for (i=0; i< 16; i++)
+ ptr->reply[i] = 0;
+ ptr->resultcode = 0;
+ ptr->kernel_data = 0;
+
+ if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
+ if (ptr->length <= 0)
+ return -EINVAL;
+ ptr->kernel_data =(char*)fd_dma_mem_alloc(ptr->length);
+ if (!ptr->kernel_data)
+ return -ENOMEM;
+ ptr->buffer_length = ptr->length;
+ }
+ if ( ptr->flags & FD_RAW_READ )
+ ECALL( verify_area( VERIFY_WRITE, ptr->data,
+ ptr->length ));
+ if (ptr->flags & FD_RAW_WRITE)
+ ECALL(fd_copyin(ptr->data, ptr->kernel_data,
+ ptr->length));
+ rcmd = & (ptr->next);
+ if (!(ptr->flags & FD_RAW_MORE))
+ return 0;
+ ptr->rate &= 0x43;
+ }
+}
+
+
+static int raw_cmd_ioctl(int cmd, void *param)
+{
+ int drive, ret, ret2;
+ struct floppy_raw_cmd *my_raw_cmd;
+
+ if (FDCS->rawcmd <= 1)
+ FDCS->rawcmd = 1;
+ for (drive= 0; drive < N_DRIVE; drive++){
+ if (FDC(drive) != fdc)
+ continue;
+ if (drive == current_drive){
+ if (UDRS->fd_ref > 1){
+ FDCS->rawcmd = 2;
+ break;
+ }
+ } else if (UDRS->fd_ref){
+ FDCS->rawcmd = 2;
+ break;
+ }
+ }
+
+ if (FDCS->reset)
+ return -EIO;
+
+ ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
+ if (ret) {
+ raw_cmd_free(&my_raw_cmd);
+ return ret;
+ }
+
+ raw_cmd = my_raw_cmd;
+ cont = &raw_cmd_cont;
+ ret=wait_til_done(floppy_start,1);
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from raw_cmd ioctl\n");
+ }
+#endif
+
+ if (ret != -EINTR && FDCS->reset)
+ ret = -EIO;
+
+ DRS->track = NO_TRACK;
+
+ ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
+ if (!ret)
+ ret = ret2;
+ raw_cmd_free(&my_raw_cmd);
+ return ret;
+}
+
+static int invalidate_drive(kdev_t rdev)
+{
+ /* invalidate the buffer track to force a reread */
+ set_bit(DRIVE(rdev), &fake_change);
+ process_fd_request();
+ check_disk_change(rdev);
+ return 0;
+}
+
+
+static inline void clear_write_error(int drive)
+{
+ CLEARSTRUCT(UDRWE);
+}
+
+static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
+ int drive, int type, kdev_t device)
+{
+ int cnt;
+
+ /* sanity checking for parameters.*/
+ if (g->sect <= 0 ||
+ g->head <= 0 ||
+ g->track <= 0 ||
+ g->track > UDP->tracks>>STRETCH(g) ||
+ /* check if reserved bits are set */
+ (g->stretch&~(FD_STRETCH|FD_SWAPSIDES)) != 0)
+ return -EINVAL;
+ if (type){
+ if (!suser())
+ return -EPERM;
+ LOCK_FDC(drive,1);
+ for (cnt = 0; cnt < N_DRIVE; cnt++){
+ if (ITYPE(drive_state[cnt].fd_device) == type &&
+ drive_state[cnt].fd_ref)
+ set_bit(drive, &fake_change);
+ }
+ floppy_type[type] = *g;
+ floppy_type[type].name="user format";
+ for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
+ floppy_sizes[cnt]= floppy_sizes[cnt+0x80]=
+ floppy_type[type].size>>1;
+ process_fd_request();
+ for (cnt = 0; cnt < N_DRIVE; cnt++){
+ if (ITYPE(drive_state[cnt].fd_device) == type &&
+ drive_state[cnt].fd_ref)
+ check_disk_change(
+ MKDEV(FLOPPY_MAJOR,
+ drive_state[cnt].fd_device));
+ }
+ } else {
+ LOCK_FDC(drive,1);
+ if (cmd != FDDEFPRM)
+ /* notice a disk change immediately, else
+ * we loose our settings immediately*/
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ user_params[drive] = *g;
+ if (buffer_drive == drive)
+ SUPBOUND(buffer_max, user_params[drive].sect);
+ current_type[drive] = &user_params[drive];
+ floppy_sizes[drive] = user_params[drive].size >> 1;
+ if (cmd == FDDEFPRM)
+ DRS->keep_data = -1;
+ else
+ DRS->keep_data = 1;
+ /* invalidation. Invalidate only when needed, i.e.
+ * when there are already sectors in the buffer cache
+ * whose number will change. This is useful, because
+ * mtools often changes the geometry of the disk after
+ * looking at the boot block */
+ if (DRS->maxblock > user_params[drive].sect || DRS->maxtrack)
+ invalidate_drive(device);
+ else
+ process_fd_request();
+ }
+ return 0;
+}
+
+/* handle obsolete ioctl's */
+static struct translation_entry {
+ int newcmd;
+ int oldcmd;
+ int oldsize; /* size of 0x00xx-style ioctl. Reflects old structures, thus
+ * use numeric values. NO SIZEOFS */
+} translation_table[]= {
+ {FDCLRPRM, 0, 0},
+ {FDSETPRM, 1, 28},
+ {FDDEFPRM, 2, 28},
+ {FDGETPRM, 3, 28},
+ {FDMSGON, 4, 0},
+ {FDMSGOFF, 5, 0},
+ {FDFMTBEG, 6, 0},
+ {FDFMTTRK, 7, 12},
+ {FDFMTEND, 8, 0},
+ {FDSETEMSGTRESH, 10, 0},
+ {FDFLUSH, 11, 0},
+ {FDSETMAXERRS, 12, 20},
+ {OLDFDRAWCMD, 30, 0},
+ {FDGETMAXERRS, 14, 20},
+ {FDGETDRVTYP, 16, 16},
+ {FDSETDRVPRM, 20, 88},
+ {FDGETDRVPRM, 21, 88},
+ {FDGETDRVSTAT, 22, 52},
+ {FDPOLLDRVSTAT, 23, 52},
+ {FDRESET, 24, 0},
+ {FDGETFDCSTAT, 25, 40},
+ {FDWERRORCLR, 27, 0},
+ {FDWERRORGET, 28, 24},
+ {FDRAWCMD, 0, 0},
+ {FDEJECT, 0, 0},
+ {FDTWADDLE, 40, 0} };
+
+static inline int normalize_0x02xx_ioctl(int *cmd, int *size)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(translation_table); i++) {
+ if ((*cmd & 0xffff) == (translation_table[i].newcmd & 0xffff)){
+ *size = _IOC_SIZE(*cmd);
+ *cmd = translation_table[i].newcmd;
+ if (*size > _IOC_SIZE(*cmd)) {
+ printk("ioctl not yet supported\n");
+ return -EFAULT;
+ }
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static inline int xlate_0x00xx_ioctl(int *cmd, int *size)
+{
+ int i;
+ /* old ioctls' for kernels <= 1.3.33 */
+ /* When the next even release will come around, we'll start
+ * warning against these.
+ * When the next odd release will come around, we'll fail with
+ * -EINVAL */
+ if(strcmp(system_utsname.version, "1.4.0") >= 0)
+ printk("obsolete floppy ioctl %x\n", *cmd);
+ if((system_utsname.version[0] == '1' &&
+ strcmp(system_utsname.version, "1.5.0") >= 0) ||
+ (system_utsname.version[0] >= '2' &&
+ strcmp(system_utsname.version, "2.1.0") >= 0))
+ return -EINVAL;
+ for (i=0; i < ARRAY_SIZE(translation_table); i++) {
+ if (*cmd == translation_table[i].oldcmd) {
+ *size = translation_table[i].oldsize;
+ *cmd = translation_table[i].newcmd;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long param)
+{
+#define IOCTL_MODE_BIT 8
+#define OPEN_WRITE_BIT 16
+#define IOCTL_ALLOWED (filp && (filp->f_mode & IOCTL_MODE_BIT))
+#define OUT(c,x) case c: outparam = (const char *) (x); break
+#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0
+
+ int i,drive,type;
+ kdev_t device;
+ int ret;
+ int size;
+ union inparam {
+ struct floppy_struct g; /* geometry */
+ struct format_descr f;
+ struct floppy_max_errors max_errors;
+ struct floppy_drive_params dp;
+ } inparam; /* parameters coming from user space */
+ const char *outparam; /* parameters passed back to user space */
+
+ device = inode->i_rdev;
+ switch (cmd) {
+ RO_IOCTLS(device,param);
+ }
+ type = TYPE(device);
+ drive = DRIVE(device);
+
+ /* convert compatibility eject ioctls into floppy eject ioctl.
+ * We do this in order to provide a means to eject floppy disks before
+ * installing the new fdutils package */
+ if(cmd == CDROMEJECT || /* CD-ROM eject */
+ cmd == 0x6470 /* SunOS floppy eject */) {
+ DPRINT("obsolete eject ioctl\n");
+ DPRINT("please use floppycontrol --eject\n");
+ cmd = FDEJECT;
+ }
+
+ /* convert the old style command into a new style command */
+ if ((cmd & 0xff00) == 0x0200) {
+ ECALL(normalize_0x02xx_ioctl(&cmd, &size));
+ } else if ((cmd & 0xff00) == 0x0000) {
+ ECALL(xlate_0x00xx_ioctl(&cmd, &size));
+ } else
+ return -EINVAL;
+
+ /* permission checks */
+ if (((cmd & 0x80) && !suser()) ||
+ ((cmd & 0x40) && !IOCTL_ALLOWED))
+ return -EPERM;
+
+ /* verify writability of result, and fail early */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ ECALL(verify_area(VERIFY_WRITE,(void *) param, size));
+
+ /* copyin */
+ CLEARSTRUCT(&inparam);
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ ECALL(fd_copyin((void *)param, &inparam, size))
+
+ switch (cmd) {
+ case FDEJECT:
+ if(UDRS->fd_ref != 1)
+ /* somebody else has this drive open */
+ return -EBUSY;
+ LOCK_FDC(drive,1);
+
+ /* do the actual eject. Fails on
+ * non-Sparc architectures */
+ ret=fd_eject(UNIT(drive));
+
+ USETF(FD_DISK_CHANGED);
+ USETF(FD_VERIFY);
+ process_fd_request();
+ return ret;
+ case FDCLRPRM:
+ LOCK_FDC(drive,1);
+ current_type[drive] = NULL;
+ floppy_sizes[drive] = MAX_DISK_SIZE;
+ UDRS->keep_data = 0;
+ return invalidate_drive(device);
+ case FDSETPRM:
+ case FDDEFPRM:
+ return set_geometry(cmd, & inparam.g,
+ drive, type, device);
+ case FDGETPRM:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1,0));
+ process_fd_request();
+ if (type)
+ outparam = (char *) &floppy_type[type];
+ else
+ outparam = (char *) current_type[drive];
+ if(!outparam)
+ return -ENODEV;
+ break;
+
+ case FDMSGON:
+ UDP->flags |= FTD_MSG;
+ return 0;
+ case FDMSGOFF:
+ UDP->flags &= ~FTD_MSG;
+ return 0;
+
+ case FDFMTBEG:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ ret = UDRS->flags;
+ process_fd_request();
+ if(ret & FD_VERIFY)
+ return -ENODEV;
+ if(!(ret & FD_DISK_WRITABLE))
+ return -EROFS;
+ return 0;
+ case FDFMTTRK:
+ if (UDRS->fd_ref != 1)
+ return -EBUSY;
+ return do_format(device, &inparam.f);
+ case FDFMTEND:
+ case FDFLUSH:
+ LOCK_FDC(drive,1);
+ return invalidate_drive(device);
+
+ case FDSETEMSGTRESH:
+ UDP->max_errors.reporting =
+ (unsigned short) (param & 0x0f);
+ return 0;
+ OUT(FDGETMAXERRS, &UDP->max_errors);
+ IN(FDSETMAXERRS, &UDP->max_errors, max_errors);
+
+ case FDGETDRVTYP:
+ outparam = drive_name(type,drive);
+ SUPBOUND(size,strlen(outparam)+1);
+ break;
+
+ IN(FDSETDRVPRM, UDP, dp);
+ OUT(FDGETDRVPRM, UDP);
+
+ case FDPOLLDRVSTAT:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ process_fd_request();
+ /* fall through */
+ OUT(FDGETDRVSTAT, UDRS);
+
+ case FDRESET:
+ return user_reset_fdc(drive, (int)param, 1);
+
+ OUT(FDGETFDCSTAT,UFDCS);
+
+ case FDWERRORCLR:
+ CLEARSTRUCT(UDRWE);
+ return 0;
+ OUT(FDWERRORGET,UDRWE);
+
+ case OLDFDRAWCMD:
+ case FDRAWCMD:
+ if (type)
+ return -EINVAL;
+ LOCK_FDC(drive,1);
+ set_floppy(device);
+ CALL(i = raw_cmd_ioctl(cmd,(void *) param));
+ process_fd_request();
+ return i;
+
+ case FDTWADDLE:
+ LOCK_FDC(drive,1);
+ twaddle();
+ process_fd_request();
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ return fd_copyout((void *)param, outparam, size);
+ else
+ return 0;
+#undef IOCTL_ALLOWED
+#undef OUT
+#undef IN
+}
+
+static void config_types(void)
+{
+ int first=1;
+ int drive;
+
+ /* read drive info out of physical CMOS */
+ drive=0;
+ if (!UDP->cmos)
+ UDP->cmos= FLOPPY0_TYPE;
+ drive=1;
+ if (!UDP->cmos && FLOPPY1_TYPE)
+ UDP->cmos = FLOPPY1_TYPE;
+
+ /* XXX */
+ /* additional physical CMOS drive detection should go here */
+
+ for (drive=0; drive < N_DRIVE; drive++){
+ if (UDP->cmos >= 16)
+ UDP->cmos = 0;
+ if (UDP->cmos >= 0 && UDP->cmos <= NUMBER(default_drive_params))
+ memcpy((char *) UDP,
+ (char *) (&default_drive_params[(int)UDP->cmos].params),
+ sizeof(struct floppy_drive_params));
+ if (UDP->cmos){
+ if (first)
+ printk(KERN_INFO "Floppy drive(s): ");
+ else
+ printk(", ");
+ first=0;
+ if (UDP->cmos > 0){
+ allowed_drive_mask |= 1 << drive;
+ printk("fd%d is %s", drive,
+ default_drive_params[(int)UDP->cmos].name);
+ } else
+ printk("fd%d is unknown type %d",drive,
+ UDP->cmos);
+ }
+ }
+ if (!first)
+ printk("\n");
+}
+
+static int floppy_read(struct inode * inode, struct file * filp,
+ char * buf, int count)
+{
+ int drive = DRIVE(inode->i_rdev);
+
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ return -ENXIO;
+ return block_read(inode, filp, buf, count);
+}
+
+static int floppy_write(struct inode * inode, struct file * filp,
+ const char * buf, int count)
+{
+ int block;
+ int ret;
+ int drive = DRIVE(inode->i_rdev);
+
+ if (!UDRS->maxblock)
+ UDRS->maxblock=1;/* make change detectable */
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ return -ENXIO;
+ if (!UTESTF(FD_DISK_WRITABLE))
+ return -EROFS;
+ block = (filp->f_pos + count) >> 9;
+ INFBOUND(UDRS->maxblock, block);
+ ret= block_write(inode, filp, buf, count);
+ return ret;
+}
+
+static void floppy_release(struct inode * inode, struct file * filp)
+{
+ int drive;
+
+ drive = DRIVE(inode->i_rdev);
+
+ if (!filp || (filp->f_mode & (2 | OPEN_WRITE_BIT)))
+ /* if the file is mounted OR (writable now AND writable at
+ * open time) Linus: Does this cover all cases? */
+ block_fsync(inode,filp);
+
+ if (UDRS->fd_ref < 0)
+ UDRS->fd_ref=0;
+ else if (!UDRS->fd_ref--) {
+ DPRINT("floppy_release with fd_ref == 0");
+ UDRS->fd_ref = 0;
+ }
+ floppy_release_irq_and_dma();
+}
+
+/*
+ * floppy_open check for aliasing (/dev/fd0 can be the same as
+ * /dev/PS0 etc), and disallows simultaneous access to the same
+ * drive with different device numbers.
+ */
+#define RETERR(x) do{floppy_release(inode,filp); return -(x);}while(0)
+
+static int floppy_open(struct inode * inode, struct file * filp)
+{
+ int drive;
+ int old_dev;
+ int try;
+ char *tmp;
+
+ if (!filp) {
+ DPRINT("Weird, open called with filp=0\n");
+ return -EIO;
+ }
+
+ drive = DRIVE(inode->i_rdev);
+ if (drive >= N_DRIVE ||
+ !(allowed_drive_mask & (1 << drive)) ||
+ fdc_state[FDC(drive)].version == FDC_NONE)
+ return -ENXIO;
+
+ if (TYPE(inode->i_rdev) >= NUMBER(floppy_type))
+ return -ENXIO;
+ old_dev = UDRS->fd_device;
+ if (UDRS->fd_ref && old_dev != MINOR(inode->i_rdev))
+ return -EBUSY;
+
+ if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)){
+ USETF(FD_DISK_CHANGED);
+ USETF(FD_VERIFY);
+ }
+
+ if (UDRS->fd_ref == -1 ||
+ (UDRS->fd_ref && (filp->f_flags & O_EXCL)))
+ return -EBUSY;
+
+ if (floppy_grab_irq_and_dma())
+ return -EBUSY;
+
+ if (filp->f_flags & O_EXCL)
+ UDRS->fd_ref = -1;
+ else
+ UDRS->fd_ref++;
+
+ if (!floppy_track_buffer){
+ /* if opening an ED drive, reserve a big buffer,
+ * else reserve a small one */
+ if ((UDP->cmos == 6) || (UDP->cmos == 5))
+ try = 64; /* Only 48 actually useful */
+ else
+ try = 32; /* Only 24 actually useful */
+
+ tmp=(char *)fd_dma_mem_alloc(1024 * try);
+ if (!tmp) {
+ try >>= 1; /* buffer only one side */
+ INFBOUND(try, 16);
+ tmp= (char *)fd_dma_mem_alloc(1024*try);
+ }
+ if (!tmp) {
+ DPRINT("Unable to allocate DMA memory\n");
+ RETERR(ENXIO);
+ }
+ if (floppy_track_buffer)
+ fd_dma_mem_free((unsigned long)tmp,try*1024);
+ else {
+ buffer_min = buffer_max = -1;
+ floppy_track_buffer = tmp;
+ max_buffer_sectors = try;
+ }
+ }
+
+ UDRS->fd_device = MINOR(inode->i_rdev);
+ if (old_dev != -1 && old_dev != MINOR(inode->i_rdev)) {
+ if (buffer_drive == drive)
+ buffer_track = -1;
+ invalidate_buffers(MKDEV(FLOPPY_MAJOR,old_dev));
+ }
+
+ /* Allow ioctls if we have write-permissions even if read-only open */
+ if ((filp->f_mode & 2) || (permission(inode,2) == 0))
+ filp->f_mode |= IOCTL_MODE_BIT;
+ if (filp->f_mode & 2)
+ filp->f_mode |= OPEN_WRITE_BIT;
+
+ if (UFDCS->rawcmd == 1)
+ UFDCS->rawcmd = 2;
+
+ if (filp->f_flags & O_NDELAY)
+ return 0;
+ if (filp->f_mode & 3) {
+ UDRS->last_checked = 0;
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ RETERR(ENXIO);
+ }
+ if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE)))
+ RETERR(EROFS);
+ return 0;
+#undef RETERR
+}
+
+/*
+ * Check if the disk has been changed or if a change has been faked.
+ */
+static int check_floppy_change(kdev_t dev)
+{
+ int drive = DRIVE(dev);
+
+ if (MAJOR(dev) != MAJOR_NR) {
+ DPRINT("check_floppy_change: not a floppy\n");
+ return 0;
+ }
+
+ if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY))
+ return 1;
+
+ if (UDP->checkfreq < jiffies - UDRS->last_checked){
+ lock_fdc(drive,0);
+ poll_drive(0,0);
+ process_fd_request();
+ }
+
+ if (UTESTF(FD_DISK_CHANGED) ||
+ UTESTF(FD_VERIFY) ||
+ test_bit(drive, &fake_change) ||
+ (!TYPE(dev) && !current_type[drive]))
+ return 1;
+ return 0;
+}
+
+/* revalidate the floppy disk, i.e. trigger format autodetection by reading
+ * the bootblock (block 0). "Autodetection" is also needed to check whether
+ * there is a disk in the drive at all... Thus we also do it for fixed
+ * geometry formats */
+static int floppy_revalidate(kdev_t dev)
+{
+#define NO_GEOM (!current_type[drive] && !TYPE(dev))
+ struct buffer_head * bh;
+ int drive=DRIVE(dev);
+ int cf;
+
+ if (UTESTF(FD_DISK_CHANGED) ||
+ UTESTF(FD_VERIFY) ||
+ test_bit(drive, &fake_change) ||
+ NO_GEOM){
+ lock_fdc(drive,0);
+ cf = UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY);
+ if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)){
+ process_fd_request(); /*already done by another thread*/
+ return 0;
+ }
+ UDRS->maxblock = 0;
+ UDRS->maxtrack = 0;
+ if (buffer_drive == drive)
+ buffer_track = -1;
+ clear_bit(drive, &fake_change);
+ UCLEARF(FD_DISK_CHANGED);
+ if (cf)
+ UDRS->generation++;
+ if (NO_GEOM){
+ /* auto-sensing */
+ int size = floppy_blocksizes[MINOR(dev)];
+ if (!size)
+ size = 1024;
+ if (!(bh = getblk(dev,0,size))){
+ process_fd_request();
+ return 1;
+ }
+ if (bh && !buffer_uptodate(bh))
+ ll_rw_block(READ, 1, &bh);
+ process_fd_request();
+ wait_on_buffer(bh);
+ brelse(bh);
+ return 0;
+ }
+ if (cf)
+ poll_drive(0, FD_RAW_NEED_DISK);
+ process_fd_request();
+ }
+ return 0;
+}
+
+static struct file_operations floppy_fops = {
+ NULL, /* lseek - default */
+ floppy_read, /* read - general block-dev read */
+ floppy_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ fd_ioctl, /* ioctl */
+ NULL, /* mmap */
+ floppy_open, /* open */
+ floppy_release, /* release */
+ block_fsync, /* fsync */
+ NULL, /* fasync */
+ check_floppy_change, /* media_change */
+ floppy_revalidate, /* revalidate */
+};
+
+/*
+ * Floppy Driver initialization
+ * =============================
+ */
+
+/* Determine the floppy disk controller type */
+/* This routine was written by David C. Niemi */
+static char get_fdc_version(void)
+{
+ int r;
+
+ output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
+ if (FDCS->reset)
+ return FDC_NONE;
+ if ((r = result()) <= 0x00)
+ return FDC_NONE; /* No FDC present ??? */
+ if ((r==1) && (reply_buffer[0] == 0x80)){
+ printk(KERN_INFO "FDC %d is an 8272A\n",fdc);
+ return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
+ }
+ if (r != 10) {
+ printk("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+
+ if(!fdc_configure()) {
+ printk(KERN_INFO "FDC %d is an 82072\n",fdc);
+ return FDC_82072; /* 82072 doesn't know CONFIGURE */
+ }
+
+ output_byte(FD_PERPENDICULAR);
+ if(need_more_output() == MORE_OUTPUT) {
+ output_byte(0);
+ } else {
+ printk(KERN_INFO "FDC %d is an 82072A\n", fdc);
+ return FDC_82072A; /* 82072A as found on Sparcs. */
+ }
+
+ output_byte(FD_UNLOCK);
+ r = result();
+ if ((r == 1) && (reply_buffer[0] == 0x80)){
+ printk(KERN_INFO "FDC %d is a pre-1991 82077\n", fdc);
+ return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
+ * LOCK/UNLOCK */
+ }
+ if ((r != 1) || (reply_buffer[0] != 0x00)) {
+ printk("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ output_byte(FD_PARTID);
+ r = result();
+ if (r != 1) {
+ printk("FDC %d init: PARTID: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ if (reply_buffer[0] == 0x80) {
+ printk(KERN_INFO "FDC %d is a post-1991 82077\n",fdc);
+ return FDC_82077; /* Revised 82077AA passes all the tests */
+ }
+ switch (reply_buffer[0] >> 5) {
+ case 0x0:
+ /* Either a 82078-1 or a 82078SL running at 5Volt */
+ printk(KERN_INFO "FDC %d is an 82078.\n",fdc);
+ return FDC_82078;
+ case 0x1:
+ printk(KERN_INFO "FDC %d is a 44pin 82078\n",fdc);
+ return FDC_82078;
+ case 0x2:
+ printk(KERN_INFO "FDC %d is a S82078B\n", fdc);
+ return FDC_S82078B;
+ case 0x3:
+ printk(KERN_INFO "FDC %d is a National Semiconductor PC87306\n", fdc);
+ return FDC_87306;
+ default:
+ printk(KERN_INFO "FDC %d init: 82078 variant with unknown PARTID=%d.\n",
+ fdc, reply_buffer[0] >> 5);
+ return FDC_82078_UNKN;
+ }
+} /* get_fdc_version */
+
+/* lilo configuration */
+
+/* we make the invert_dcl function global. One day, somebody might
+ * want to centralize all thinkpad related options into one lilo option,
+ * there are just so many thinkpad related quirks! */
+void floppy_invert_dcl(int *ints,int param)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(default_drive_params); i++){
+ if (param)
+ default_drive_params[i].params.flags |= 0x80;
+ else
+ default_drive_params[i].params.flags &= ~0x80;
+ }
+ DPRINT("Configuring drives for inverted dcl\n");
+}
+
+static void daring(int *ints,int param)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(default_drive_params); i++){
+ if (param){
+ default_drive_params[i].params.select_delay = 0;
+ default_drive_params[i].params.flags |= FD_SILENT_DCL_CLEAR;
+ } else {
+ default_drive_params[i].params.select_delay = 2*HZ/100;
+ default_drive_params[i].params.flags &= ~FD_SILENT_DCL_CLEAR;
+ }
+ }
+ DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken");
+}
+
+static void set_cmos(int *ints, int dummy)
+{
+ int current_drive=0;
+
+ if (ints[0] != 2){
+ DPRINT("wrong number of parameter for cmos\n");
+ return;
+ }
+ current_drive = ints[1];
+ if (current_drive < 0 || current_drive >= 8){
+ DPRINT("bad drive for set_cmos\n");
+ return;
+ }
+ if (current_drive >= 4 && !FDC2)
+ FDC2 = 0x370;
+ if (ints[2] <= 0 ||
+ (ints[2] >= NUMBER(default_drive_params) && ints[2] != 16)){
+ DPRINT("bad cmos code %d\n", ints[2]);
+ return;
+ }
+ DP->cmos = ints[2];
+ DPRINT("setting cmos code to %d\n", ints[2]);
+}
+
+static struct param_table {
+ const char *name;
+ void (*fn)(int *ints, int param);
+ int *var;
+ int def_param;
+} config_params[]={
+ { "allowed_drive_mask", 0, &allowed_drive_mask, 0xff },
+ { "all_drives", 0, &allowed_drive_mask, 0xff },
+ { "asus_pci", 0, &allowed_drive_mask, 0x33 },
+
+ { "daring", daring, 0, 1},
+
+ { "two_fdc", 0, &FDC2, 0x370 },
+ { "one_fdc", 0, &FDC2, 0 },
+
+ { "thinkpad", floppy_invert_dcl, 0, 1 },
+
+ { "nodma", 0, &use_virtual_dma, 1 },
+ { "omnibook", 0, &use_virtual_dma, 1 },
+ { "dma", 0, &use_virtual_dma, 0 },
+
+ { "fifo_depth", 0, &fifo_depth, 0xa },
+ { "nofifo", 0, &no_fifo, 0x20 },
+ { "usefifo", 0, &no_fifo, 0 },
+
+ { "cmos", set_cmos, 0, 0 },
+
+ { "unexpected_interrupts", 0, &print_unex, 1 },
+ { "no_unexpected_interrupts", 0, &print_unex, 0 },
+ { "L40SX", 0, &print_unex, 0 } };
+
+#define FLOPPY_SETUP
+void floppy_setup(char *str, int *ints)
+{
+ int i;
+ int param;
+ if (str)
+ for (i=0; i< ARRAY_SIZE(config_params); i++){
+ if (strcmp(str,config_params[i].name) == 0){
+ if (ints[0])
+ param = ints[1];
+ else
+ param = config_params[i].def_param;
+ if(config_params[i].fn)
+ config_params[i].fn(ints,param);
+ if(config_params[i].var) {
+ DPRINT("%s=%d\n", str, param);
+ *config_params[i].var = param;
+ }
+ return;
+ }
+ }
+ if (str) {
+ DPRINT("unknown floppy option [%s]\n", str);
+
+ DPRINT("allowed options are:");
+ for (i=0; i< ARRAY_SIZE(config_params); i++)
+ printk(" %s",config_params[i].name);
+ printk("\n");
+ } else
+ DPRINT("botched floppy option\n");
+ DPRINT("Read linux/drivers/block/README.fd\n");
+}
+
+int floppy_init(void)
+{
+ int i,unit,drive;
+ int have_no_fdc= -EIO;
+
+ raw_cmd = 0;
+
+ if (register_blkdev(MAJOR_NR,"fd",&floppy_fops)) {
+ printk("Unable to get major %d for floppy\n",MAJOR_NR);
+ return -EBUSY;
+ }
+
+ for (i=0; i<256; i++)
+ if (ITYPE(i))
+ floppy_sizes[i] = floppy_type[ITYPE(i)].size >> 1;
+ else
+ floppy_sizes[i] = MAX_DISK_SIZE;
+
+ blk_size[MAJOR_NR] = floppy_sizes;
+ blksize_size[MAJOR_NR] = floppy_blocksizes;
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
+ config_types();
+
+ for (i = 0; i < N_FDC; i++) {
+ fdc = i;
+ CLEARSTRUCT(FDCS);
+ FDCS->dtr = -1;
+ FDCS->dor = 0x4;
+#ifdef __sparc__
+ /*sparcs don't have a DOR reset which we can fall back on to*/
+ FDCS->version = FDC_82072A;
+#endif
+ }
+
+ fdc_state[0].address = FDC1;
+#if N_FDC > 1
+ fdc_state[1].address = FDC2;
+#endif
+
+ if (floppy_grab_irq_and_dma()){
+ del_timer(&fd_timeout);
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ unregister_blkdev(MAJOR_NR,"fd");
+ return -EBUSY;
+ }
+
+ /* initialise drive state */
+ for (drive = 0; drive < N_DRIVE; drive++) {
+ CLEARSTRUCT(UDRS);
+ CLEARSTRUCT(UDRWE);
+ UDRS->flags = FD_VERIFY | FD_DISK_NEWCHANGE | FD_DISK_CHANGED;
+ UDRS->fd_device = -1;
+ floppy_track_buffer = NULL;
+ max_buffer_sectors = 0;
+ }
+
+ for (i = 0; i < N_FDC; i++) {
+ fdc = i;
+ FDCS->driver_version = FD_DRIVER_VERSION;
+ for (unit=0; unit<4; unit++)
+ FDCS->track[unit] = 0;
+ if (FDCS->address == -1)
+ continue;
+ FDCS->rawcmd = 2;
+ if (user_reset_fdc(-1,FD_RESET_ALWAYS,0)){
+ FDCS->address = -1;
+ FDCS->version = FDC_NONE;
+ continue;
+ }
+ /* Try to determine the floppy controller type */
+ FDCS->version = get_fdc_version();
+ if (FDCS->version == FDC_NONE){
+ FDCS->address = -1;
+ continue;
+ }
+
+ request_region(FDCS->address, 6, "floppy");
+ request_region(FDCS->address+7, 1, "floppy DIR");
+ /* address + 6 is reserved, and may be taken by IDE.
+ * Unfortunately, Adaptec doesn't know this :-(, */
+
+ have_no_fdc = 0;
+ /* Not all FDCs seem to be able to handle the version command
+ * properly, so force a reset for the standard FDC clones,
+ * to avoid interrupt garbage.
+ */
+ user_reset_fdc(-1,FD_RESET_ALWAYS,0);
+ }
+ fdc=0;
+ del_timer(&fd_timeout);
+ current_drive = 0;
+ floppy_release_irq_and_dma();
+ initialising=0;
+ if (have_no_fdc) {
+ DPRINT("no floppy controllers found\n");
+ request_tq.routine = (void *)(void *) empty;
+ /*
+ * When we return we may be unloaded. This little
+ * trick forces the immediate_bh handler to have run
+ * before we unload it, lest we cause bad things.
+ */
+ mark_bh(IMMEDIATE_BH);
+ schedule();
+ if (usage_count)
+ floppy_release_irq_and_dma();
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ unregister_blkdev(MAJOR_NR,"fd");
+ }
+ return have_no_fdc;
+}
+
+static int floppy_grab_irq_and_dma(void)
+{
+ int i;
+ unsigned long flags;
+
+ INT_OFF;
+ if (usage_count++){
+ INT_ON;
+ return 0;
+ }
+ INT_ON;
+ MOD_INC_USE_COUNT;
+ for (i=0; i< N_FDC; i++){
+ if (fdc_state[i].address != -1){
+ fdc = i;
+ reset_fdc_info(1);
+ fd_outb(FDCS->dor, FD_DOR);
+ }
+ }
+ fdc = 0;
+ set_dor(0, ~0, 8); /* avoid immediate interrupt */
+
+ if (fd_request_irq()) {
+ DPRINT("Unable to grab IRQ%d for the floppy driver\n",
+ FLOPPY_IRQ);
+ MOD_DEC_USE_COUNT;
+ usage_count--;
+ return -1;
+ }
+ if (fd_request_dma()) {
+ DPRINT("Unable to grab DMA%d for the floppy driver\n",
+ FLOPPY_DMA);
+ fd_free_irq();
+ MOD_DEC_USE_COUNT;
+ usage_count--;
+ return -1;
+ }
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (FDCS->address != -1)
+ fd_outb(FDCS->dor, FD_DOR);
+ fdc = 0;
+ fd_enable_irq();
+ irqdma_allocated=1;
+ return 0;
+}
+
+static void floppy_release_irq_and_dma(void)
+{
+#ifdef FLOPPY_SANITY_CHECK
+ int drive;
+#endif
+ long tmpsize;
+ unsigned long tmpaddr;
+ unsigned long flags;
+
+ INT_OFF;
+ if (--usage_count){
+ INT_ON;
+ return;
+ }
+ INT_ON;
+ if(irqdma_allocated)
+ {
+ fd_disable_dma();
+ fd_free_dma();
+ fd_disable_irq();
+ fd_free_irq();
+ irqdma_allocated=0;
+ }
+
+ set_dor(0, ~0, 8);
+#if N_FDC > 1
+ set_dor(1, ~8, 0);
+#endif
+ floppy_enable_hlt();
+
+ if (floppy_track_buffer && max_buffer_sectors) {
+ tmpsize = max_buffer_sectors*1024;
+ tmpaddr = (unsigned long)floppy_track_buffer;
+ floppy_track_buffer = 0;
+ max_buffer_sectors = 0;
+ buffer_min = buffer_max = -1;
+ fd_dma_mem_free(tmpaddr, tmpsize);
+ }
+
+#ifdef FLOPPY_SANITY_CHECK
+#ifndef __sparc__
+ for (drive=0; drive < N_FDC * 4; drive++)
+ if (motor_off_timer[drive].next)
+ printk("motor off timer %d still active\n", drive);
+#endif
+
+ if (fd_timeout.next)
+ printk("floppy timer still active:%s\n", timeout_message);
+ if (fd_timer.next)
+ printk("auxiliary floppy timer still active\n");
+ if (floppy_tq.sync)
+ printk("task queue still active\n");
+#endif
+ MOD_DEC_USE_COUNT;
+}
+
+
+#ifdef MODULE
+
+char *floppy=NULL;
+
+static void parse_floppy_cfg_string(char *cfg)
+{
+ char *ptr;
+ int ints[11];
+
+ while(*cfg) {
+ for(ptr = cfg;*cfg && *cfg != ' ' && *cfg != '\t'; cfg++);
+ if(*cfg) {
+ *cfg = '\0';
+ cfg++;
+ }
+ if(*ptr)
+ floppy_setup(get_options(ptr,ints),ints);
+ }
+}
+
+static void mod_setup(char *pattern, void (*setup)(char *, int *))
+{
+ unsigned long i;
+ char c;
+ int j;
+ int match;
+ char buffer[100];
+ int ints[11];
+ int length = strlen(pattern)+1;
+
+ match=0;
+ j=1;
+
+ for (i=current->mm->env_start; i< current->mm->env_end; i ++){
+ c= get_fs_byte(i);
+ if (match){
+ if (j==99)
+ c='\0';
+ buffer[j] = c;
+ if (!c || c == ' ' || c == '\t'){
+ if (j){
+ buffer[j] = '\0';
+ setup(get_options(buffer,ints),ints);
+ }
+ j=0;
+ } else
+ j++;
+ if (!c)
+ break;
+ continue;
+ }
+ if ((!j && !c) || (j && c == pattern[j-1]))
+ j++;
+ else
+ j=0;
+ if (j==length){
+ match=1;
+ j=0;
+ }
+ }
+}
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+int init_module(void)
+{
+ printk(KERN_INFO "inserting floppy driver for %s\n", kernel_version);
+
+ if(floppy)
+ parse_floppy_cfg_string(floppy);
+ else
+ mod_setup("floppy=", floppy_setup);
+
+ return floppy_init();
+}
+
+void cleanup_module(void)
+{
+ int fdc, dummy;
+
+ for (fdc=0; fdc<2; fdc++)
+ if (FDCS->address != -1){
+ release_region(FDCS->address, 6);
+ release_region(FDCS->address+7, 1);
+ }
+
+ unregister_blkdev(MAJOR_NR, "fd");
+
+ blk_dev[MAJOR_NR].request_fn = 0;
+ /* eject disk, if any */
+ dummy = fd_eject(0);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#else
+/* eject the boot floppy (if we need the drive for a different root floppy) */
+/* This should only be called at boot time when we're sure that there's no
+ * resource contention. */
+void floppy_eject(void)
+{
+ int dummy;
+ if(floppy_grab_irq_and_dma()==0)
+ {
+ lock_fdc(MAXTIMEOUT,0);
+ dummy=fd_eject(0);
+ process_fd_request();
+ floppy_release_irq_and_dma();
+ }
+}
+#endif
diff --git a/linux/src/drivers/block/genhd.c b/linux/src/drivers/block/genhd.c
new file mode 100644
index 0000000..ebee7ff
--- /dev/null
+++ b/linux/src/drivers/block/genhd.c
@@ -0,0 +1,761 @@
+/*
+ * Code extracted from
+ * linux/kernel/hd.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ *
+ * Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * in the early extended-partition checks and added DM partitions
+ *
+ * Support for DiskManager v6.0x added by Mark Lord,
+ * with information provided by OnTrack. This now works for linux fdisk
+ * and LILO, as well as loadlin and bootln. Note that disks other than
+ * /dev/hda *must* have a "DOS" type 0x51 partition in the first slot (hda1).
+ *
+ * More flexible handling of extended partitions - aeb, 950831
+ *
+ * Check partition table on IDE disks for common CHS translations
+ */
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
+
+#include <asm/system.h>
+
+/*
+ * Many architectures don't like unaligned accesses, which is
+ * frequently the case with the nr_sects and start_sect partition
+ * table entries.
+ */
+#include <asm/unaligned.h>
+
+#define SYS_IND(p) get_unaligned(&p->sys_ind)
+#define NR_SECTS(p) get_unaligned(&p->nr_sects)
+#define START_SECT(p) get_unaligned(&p->start_sect)
+
+
+struct gendisk *gendisk_head = NULL;
+
+static int current_minor = 0;
+extern int *blk_size[];
+extern void rd_load(void);
+extern void initrd_load(void);
+
+extern int chr_dev_init(void);
+extern int blk_dev_init(void);
+extern int scsi_dev_init(void);
+extern int net_dev_init(void);
+
+/*
+ * disk_name() is used by genhd.c and md.c.
+ * It formats the devicename of the indicated disk
+ * into the supplied buffer, and returns a pointer
+ * to that same buffer (for convenience).
+ */
+char *disk_name (struct gendisk *hd, int minor, char *buf)
+{
+ unsigned int part;
+ const char *maj = hd->major_name;
+ char unit = (minor >> hd->minor_shift) + 'a';
+
+#ifdef CONFIG_BLK_DEV_IDE
+ /*
+ * IDE devices use multiple major numbers, but the drives
+ * are named as: {hda,hdb}, {hdc,hdd}, {hde,hdf}, {hdg,hdh}..
+ * This requires special handling here.
+ */
+ switch (hd->major) {
+ case IDE3_MAJOR:
+ unit += 2;
+ case IDE2_MAJOR:
+ unit += 2;
+ case IDE1_MAJOR:
+ unit += 2;
+ case IDE0_MAJOR:
+ maj = "hd";
+ }
+#endif
+ part = minor & ((1 << hd->minor_shift) - 1);
+ if (part)
+ sprintf(buf, "%s%c%d", maj, unit, part);
+ else
+ sprintf(buf, "%s%c", maj, unit);
+ return buf;
+}
+
+static void add_partition (struct gendisk *hd, int minor, int start, int size)
+{
+ char buf[8];
+ hd->part[minor].start_sect = start;
+ hd->part[minor].nr_sects = size;
+ printk(" %s", disk_name(hd, minor, buf));
+}
+
+static inline int is_extended_partition(struct partition *p)
+{
+ return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
+ SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
+ SYS_IND(p) == LINUX_EXTENDED_PARTITION);
+}
+
+#ifdef CONFIG_MSDOS_PARTITION
+/*
+ * Create devices for each logical partition in an extended partition.
+ * The logical partitions form a linked list, with each entry being
+ * a partition table with two entries. The first entry
+ * is the real data partition (with a start relative to the partition
+ * table start). The second is a pointer to the next logical partition
+ * (with a start relative to the entire extended partition).
+ * We do not create a Linux partition for the partition tables, but
+ * only for the actual data partitions.
+ */
+
+static void extended_partition(struct gendisk *hd, kdev_t dev)
+{
+ struct buffer_head *bh;
+ struct partition *p;
+ unsigned long first_sector, first_size, this_sector, this_size;
+ int mask = (1 << hd->minor_shift) - 1;
+ int i;
+
+ first_sector = hd->part[MINOR(dev)].start_sect;
+ first_size = hd->part[MINOR(dev)].nr_sects;
+ this_sector = first_sector;
+
+ while (1) {
+ if ((current_minor & mask) == 0)
+ return;
+ if (!(bh = bread(dev,0,1024)))
+ return;
+ /*
+ * This block is from a device that we're about to stomp on.
+ * So make sure nobody thinks this block is usable.
+ */
+ bh->b_state = 0;
+
+ if (*(unsigned short *) (bh->b_data+510) != 0xAA55)
+ goto done;
+
+ p = (struct partition *) (0x1BE + bh->b_data);
+
+ this_size = hd->part[MINOR(dev)].nr_sects;
+
+ /*
+ * Usually, the first entry is the real data partition,
+ * the 2nd entry is the next extended partition, or empty,
+ * and the 3rd and 4th entries are unused.
+ * However, DRDOS sometimes has the extended partition as
+ * the first entry (when the data partition is empty),
+ * and OS/2 seems to use all four entries.
+ */
+
+ /*
+ * First process the data partition(s)
+ */
+ for (i=0; i<4; i++, p++) {
+ if (!NR_SECTS(p) || is_extended_partition(p))
+ continue;
+
+ /* Check the 3rd and 4th entries -
+ these sometimes contain random garbage */
+ if (i >= 2
+ && START_SECT(p) + NR_SECTS(p) > this_size
+ && (this_sector + START_SECT(p) < first_sector ||
+ this_sector + START_SECT(p) + NR_SECTS(p) >
+ first_sector + first_size))
+ continue;
+
+ add_partition(hd, current_minor, this_sector+START_SECT(p), NR_SECTS(p));
+ current_minor++;
+ if ((current_minor & mask) == 0)
+ goto done;
+ }
+ /*
+ * Next, process the (first) extended partition, if present.
+ * (So far, there seems to be no reason to make
+ * extended_partition() recursive and allow a tree
+ * of extended partitions.)
+ * It should be a link to the next logical partition.
+ * Create a minor for this just long enough to get the next
+ * partition table. The minor will be reused for the next
+ * data partition.
+ */
+ p -= 4;
+ for (i=0; i<4; i++, p++)
+ if(NR_SECTS(p) && is_extended_partition(p))
+ break;
+ if (i == 4)
+ goto done; /* nothing left to do */
+
+ hd->part[current_minor].nr_sects = NR_SECTS(p);
+ hd->part[current_minor].start_sect = first_sector + START_SECT(p);
+ this_sector = first_sector + START_SECT(p);
+ dev = MKDEV(hd->major, current_minor);
+ brelse(bh);
+ }
+done:
+ brelse(bh);
+}
+
+#ifdef CONFIG_BSD_DISKLABEL
+/*
+ * Create devices for BSD partitions listed in a disklabel, under a
+ * dos-like partition. See extended_partition() for more information.
+ */
+static void bsd_disklabel_partition(struct gendisk *hd, kdev_t dev)
+{
+ struct buffer_head *bh;
+ struct bsd_disklabel *l;
+ struct bsd_partition *p;
+ int mask = (1 << hd->minor_shift) - 1;
+
+ if (!(bh = bread(dev,0,1024)))
+ return;
+ bh->b_state = 0;
+ l = (struct bsd_disklabel *) (bh->b_data+512);
+ if (l->d_magic != BSD_DISKMAGIC) {
+ brelse(bh);
+ return;
+ }
+
+ p = &l->d_partitions[0];
+ while (p - &l->d_partitions[0] <= BSD_MAXPARTITIONS) {
+ if ((current_minor & mask) >= (4 + hd->max_p))
+ break;
+
+ if (p->p_fstype != BSD_FS_UNUSED) {
+ add_partition(hd, current_minor, p->p_offset, p->p_size);
+ current_minor++;
+ }
+ p++;
+ }
+ brelse(bh);
+
+}
+#endif
+
+static int msdos_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector)
+{
+ int i, minor = current_minor;
+ struct buffer_head *bh;
+ struct partition *p;
+ unsigned char *data;
+ int mask = (1 << hd->minor_shift) - 1;
+#ifdef CONFIG_BLK_DEV_IDE
+ int tested_for_xlate = 0;
+
+read_mbr:
+#endif
+ if (!(bh = bread(dev,0,1024))) {
+ printk(" unable to read partition table\n");
+ return -1;
+ }
+ data = bh->b_data;
+ /* In some cases we modify the geometry */
+ /* of the drive (below), so ensure that */
+ /* nobody else tries to re-use this data. */
+ bh->b_state = 0;
+#ifdef CONFIG_BLK_DEV_IDE
+check_table:
+#endif
+ if (*(unsigned short *) (0x1fe + data) != 0xAA55) {
+ brelse(bh);
+ return 0;
+ }
+ p = (struct partition *) (0x1be + data);
+
+#ifdef CONFIG_BLK_DEV_IDE
+ if (!tested_for_xlate++) { /* Do this only once per disk */
+ /*
+ * Look for various forms of IDE disk geometry translation
+ */
+ extern int ide_xlate_1024(kdev_t, int, const char *);
+ unsigned int sig = *(unsigned short *)(data + 2);
+ if (SYS_IND(p) == EZD_PARTITION) {
+ /*
+ * The remainder of the disk must be accessed using
+ * a translated geometry that reduces the number of
+ * apparent cylinders to less than 1024 if possible.
+ *
+ * ide_xlate_1024() will take care of the necessary
+ * adjustments to fool fdisk/LILO and partition check.
+ */
+ if (ide_xlate_1024(dev, -1, " [EZD]")) {
+ data += 512;
+ goto check_table;
+ }
+ } else if (SYS_IND(p) == DM6_PARTITION) {
+
+ /*
+ * Everything on the disk is offset by 63 sectors,
+ * including a "new" MBR with its own partition table,
+ * and the remainder of the disk must be accessed using
+ * a translated geometry that reduces the number of
+ * apparent cylinders to less than 1024 if possible.
+ *
+ * ide_xlate_1024() will take care of the necessary
+ * adjustments to fool fdisk/LILO and partition check.
+ */
+ if (ide_xlate_1024(dev, 1, " [DM6:DDO]")) {
+ brelse(bh);
+ goto read_mbr; /* start over with new MBR */
+ }
+ } else if (sig <= 0x1ae && *(unsigned short *)(data + sig) == 0x55AA
+ && (1 & *(unsigned char *)(data + sig + 2)) )
+ {
+ /*
+ * DM6 signature in MBR, courtesy of OnTrack
+ */
+ (void) ide_xlate_1024 (dev, 0, " [DM6:MBR]");
+ } else if (SYS_IND(p) == DM6_AUX1PARTITION || SYS_IND(p) == DM6_AUX3PARTITION) {
+ /*
+ * DM6 on other than the first (boot) drive
+ */
+ (void) ide_xlate_1024(dev, 0, " [DM6:AUX]");
+ } else {
+ /*
+ * Examine the partition table for common translations.
+ * This is necessary for drives for situations where
+ * the translated geometry is unavailable from the BIOS.
+ */
+ for (i = 0; i < 4 ; i++) {
+ struct partition *q = &p[i];
+ if (NR_SECTS(q)
+ && (q->sector & 63) == 1
+ && (q->end_sector & 63) == 63) {
+ unsigned int heads = q->end_head + 1;
+ if (heads == 32 || heads == 64 || heads == 128 || heads == 255) {
+
+ (void) ide_xlate_1024(dev, heads, " [PTBL]");
+ break;
+ }
+ }
+ }
+ }
+ }
+#endif /* CONFIG_BLK_DEV_IDE */
+
+ current_minor += 4; /* first "extra" minor (for extended partitions) */
+ for (i=1 ; i<=4 ; minor++,i++,p++) {
+ if (!NR_SECTS(p))
+ continue;
+ add_partition(hd, minor, first_sector+START_SECT(p), NR_SECTS(p));
+ if (is_extended_partition(p)) {
+ printk(" <");
+ /*
+ * If we are rereading the partition table, we need
+ * to set the size of the partition so that we will
+ * be able to bread the block containing the extended
+ * partition info.
+ */
+ hd->sizes[minor] = hd->part[minor].nr_sects
+ >> (BLOCK_SIZE_BITS - 9);
+ extended_partition(hd, MKDEV(hd->major, minor));
+ printk(" >");
+ /* prevent someone doing mkfs or mkswap on an
+ extended partition, but leave room for LILO */
+ if (hd->part[minor].nr_sects > 2)
+ hd->part[minor].nr_sects = 2;
+ }
+#ifdef CONFIG_BSD_DISKLABEL
+ if (SYS_IND(p) == BSD_PARTITION) {
+ printk(" <");
+ bsd_disklabel_partition(hd, MKDEV(hd->major, minor));
+ printk(" >");
+ }
+#endif
+ }
+ /*
+ * Check for old-style Disk Manager partition table
+ */
+ if (*(unsigned short *) (data+0xfc) == 0x55AA) {
+ p = (struct partition *) (0x1be + data);
+ for (i = 4 ; i < 16 ; i++, current_minor++) {
+ p--;
+ if ((current_minor & mask) == 0)
+ break;
+ if (!(START_SECT(p) && NR_SECTS(p)))
+ continue;
+ add_partition(hd, current_minor, START_SECT(p), NR_SECTS(p));
+ }
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_MSDOS_PARTITION */
+
+#ifdef CONFIG_OSF_PARTITION
+
+static int osf_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector)
+{
+ int i;
+ int mask = (1 << hd->minor_shift) - 1;
+ struct buffer_head *bh;
+ struct disklabel {
+ u32 d_magic;
+ u16 d_type,d_subtype;
+ u8 d_typename[16];
+ u8 d_packname[16];
+ u32 d_secsize;
+ u32 d_nsectors;
+ u32 d_ntracks;
+ u32 d_ncylinders;
+ u32 d_secpercyl;
+ u32 d_secprtunit;
+ u16 d_sparespertrack;
+ u16 d_sparespercyl;
+ u32 d_acylinders;
+ u16 d_rpm, d_interleave, d_trackskew, d_cylskew;
+ u32 d_headswitch, d_trkseek, d_flags;
+ u32 d_drivedata[5];
+ u32 d_spare[5];
+ u32 d_magic2;
+ u16 d_checksum;
+ u16 d_npartitions;
+ u32 d_bbsize, d_sbsize;
+ struct d_partition {
+ u32 p_size;
+ u32 p_offset;
+ u32 p_fsize;
+ u8 p_fstype;
+ u8 p_frag;
+ u16 p_cpg;
+ } d_partitions[8];
+ } * label;
+ struct d_partition * partition;
+#define DISKLABELMAGIC (0x82564557UL)
+
+ if (!(bh = bread(dev,0,1024))) {
+ printk("unable to read partition table\n");
+ return -1;
+ }
+ label = (struct disklabel *) (bh->b_data+64);
+ partition = label->d_partitions;
+ if (label->d_magic != DISKLABELMAGIC) {
+ printk("magic: %08x\n", label->d_magic);
+ brelse(bh);
+ return 0;
+ }
+ if (label->d_magic2 != DISKLABELMAGIC) {
+ printk("magic2: %08x\n", label->d_magic2);
+ brelse(bh);
+ return 0;
+ }
+ for (i = 0 ; i < label->d_npartitions; i++, partition++) {
+ if ((current_minor & mask) == 0)
+ break;
+ if (partition->p_size)
+ add_partition(hd, current_minor,
+ first_sector+partition->p_offset,
+ partition->p_size);
+ current_minor++;
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_OSF_PARTITION */
+
+#ifdef CONFIG_SUN_PARTITION
+
+static int sun_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector)
+{
+ int i, csum;
+ unsigned short *ush;
+ struct buffer_head *bh;
+ struct sun_disklabel {
+ unsigned char info[128]; /* Informative text string */
+ unsigned char spare[292]; /* Boot information etc. */
+ unsigned short rspeed; /* Disk rotational speed */
+ unsigned short pcylcount; /* Physical cylinder count */
+ unsigned short sparecyl; /* extra sects per cylinder */
+ unsigned char spare2[4]; /* More magic... */
+ unsigned short ilfact; /* Interleave factor */
+ unsigned short ncyl; /* Data cylinder count */
+ unsigned short nacyl; /* Alt. cylinder count */
+ unsigned short ntrks; /* Tracks per cylinder */
+ unsigned short nsect; /* Sectors per track */
+ unsigned char spare3[4]; /* Even more magic... */
+ struct sun_partition {
+ __u32 start_cylinder;
+ __u32 num_sectors;
+ } partitions[8];
+ unsigned short magic; /* Magic number */
+ unsigned short csum; /* Label xor'd checksum */
+ } * label;
+ struct sun_partition *p;
+ int other_endian;
+ unsigned long spc;
+#define SUN_LABEL_MAGIC 0xDABE
+#define SUN_LABEL_MAGIC_SWAPPED 0xBEDA
+/* No need to optimize these macros since they are called only when reading
+ * the partition table. This occurs only at each disk change. */
+#define SWAP16(x) (other_endian ? (((__u16)(x) & 0xFF) << 8) \
+ | (((__u16)(x) & 0xFF00) >> 8) \
+ : (__u16)(x))
+#define SWAP32(x) (other_endian ? (((__u32)(x) & 0xFF) << 24) \
+ | (((__u32)(x) & 0xFF00) << 8) \
+ | (((__u32)(x) & 0xFF0000) >> 8) \
+ | (((__u32)(x) & 0xFF000000) >> 24) \
+ : (__u32)(x))
+
+ if(!(bh = bread(dev, 0, 1024))) {
+ printk("Dev %s: unable to read partition table\n",
+ kdevname(dev));
+ return -1;
+ }
+ label = (struct sun_disklabel *) bh->b_data;
+ p = label->partitions;
+ if (label->magic != SUN_LABEL_MAGIC && label->magic != SUN_LABEL_MAGIC_SWAPPED) {
+ printk("Dev %s Sun disklabel: bad magic %04x\n",
+ kdevname(dev), label->magic);
+ brelse(bh);
+ return 0;
+ }
+ other_endian = (label->magic == SUN_LABEL_MAGIC_SWAPPED);
+ /* Look at the checksum */
+ ush = ((unsigned short *) (label+1)) - 1;
+ for(csum = 0; ush >= ((unsigned short *) label);)
+ csum ^= *ush--;
+ if(csum) {
+ printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
+ kdevname(dev));
+ brelse(bh);
+ return 0;
+ }
+ /* All Sun disks have 8 partition entries */
+ spc = SWAP16(label->ntrks) * SWAP16(label->nsect);
+ for(i=0; i < 8; i++, p++) {
+ unsigned long st_sector;
+
+ /* We register all partitions, even if zero size, so that
+ * the minor numbers end up ok as per SunOS interpretation.
+ */
+ st_sector = first_sector + SWAP32(p->start_cylinder) * spc;
+ add_partition(hd, current_minor, st_sector, SWAP32(p->num_sectors));
+ current_minor++;
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+#undef SWAP16
+#undef SWAP32
+}
+
+#endif /* CONFIG_SUN_PARTITION */
+
+#ifdef CONFIG_AMIGA_PARTITION
+#include <asm/byteorder.h>
+#include <linux/affs_hardblocks.h>
+
+static __inline__ __u32
+checksum_block(__u32 *m, int size)
+{
+ __u32 sum = 0;
+
+ while (size--)
+ sum += htonl(*m++);
+ return sum;
+}
+
+static int
+amiga_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector)
+{
+ struct buffer_head *bh;
+ struct RigidDiskBlock *rdb;
+ struct PartitionBlock *pb;
+ int start_sect;
+ int nr_sects;
+ int blk;
+ int part, res;
+
+ set_blocksize(dev,512);
+ res = 0;
+
+ for (blk = 0; blk < RDB_ALLOCATION_LIMIT; blk++) {
+ if(!(bh = bread(dev,blk,512))) {
+ printk("Dev %d: unable to read RDB block %d\n",dev,blk);
+ goto rdb_done;
+ }
+ if (*(__u32 *)bh->b_data == htonl(IDNAME_RIGIDDISK)) {
+ rdb = (struct RigidDiskBlock *)bh->b_data;
+ if (checksum_block((__u32 *)bh->b_data,htonl(rdb->rdb_SummedLongs) & 0x7F)) {
+ printk("Dev %d: RDB in block %d has bad checksum\n",dev,blk);
+ brelse(bh);
+ continue;
+ }
+ printk(" RDSK");
+ blk = htonl(rdb->rdb_PartitionList);
+ brelse(bh);
+ for (part = 1; blk > 0 && part <= 16; part++) {
+ if (!(bh = bread(dev,blk,512))) {
+ printk("Dev %d: unable to read partition block %d\n",
+ dev,blk);
+ goto rdb_done;
+ }
+ pb = (struct PartitionBlock *)bh->b_data;
+ blk = htonl(pb->pb_Next);
+ if (pb->pb_ID == htonl(IDNAME_PARTITION) && checksum_block(
+ (__u32 *)pb,htonl(pb->pb_SummedLongs) & 0x7F) == 0 ) {
+
+ /* Tell Kernel about it */
+
+ if (!(nr_sects = (htonl(pb->pb_Environment[10]) + 1 -
+ htonl(pb->pb_Environment[9])) *
+ htonl(pb->pb_Environment[3]) *
+ htonl(pb->pb_Environment[5]))) {
+ continue;
+ }
+ start_sect = htonl(pb->pb_Environment[9]) *
+ htonl(pb->pb_Environment[3]) *
+ htonl(pb->pb_Environment[5]);
+ add_partition(hd,current_minor,start_sect,nr_sects);
+ current_minor++;
+ res = 1;
+ }
+ brelse(bh);
+ }
+ printk("\n");
+ break;
+ }
+ }
+
+rdb_done:
+ set_blocksize(dev,BLOCK_SIZE);
+ return res;
+}
+#endif /* CONFIG_AMIGA_PARTITION */
+
+static void check_partition(struct gendisk *hd, kdev_t dev)
+{
+ static int first_time = 1;
+ unsigned long first_sector;
+ char buf[8];
+
+ if (first_time)
+ printk("Partition check:\n");
+ first_time = 0;
+ first_sector = hd->part[MINOR(dev)].start_sect;
+
+ /*
+ * This is a kludge to allow the partition check to be
+ * skipped for specific drives (e.g. IDE cd-rom drives)
+ */
+ if ((int)first_sector == -1) {
+ hd->part[MINOR(dev)].start_sect = 0;
+ return;
+ }
+
+ printk(" %s:", disk_name(hd, MINOR(dev), buf));
+#ifdef CONFIG_MSDOS_PARTITION
+ if (msdos_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_OSF_PARTITION
+ if (osf_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_SUN_PARTITION
+ if(sun_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_AMIGA_PARTITION
+ if(amiga_partition(hd, dev, first_sector))
+ return;
+#endif
+ printk(" unknown partition table\n");
+}
+
+/* This function is used to re-read partition tables for removable disks.
+ Much of the cleanup from the old partition tables should have already been
+ done */
+
+/* This function will re-read the partition tables for a given device,
+and set things back up again. There are some important caveats,
+however. You must ensure that no one is using the device, and no one
+can start using the device while this function is being executed. */
+
+void resetup_one_dev(struct gendisk *dev, int drive)
+{
+ int i;
+ int first_minor = drive << dev->minor_shift;
+ int end_minor = first_minor + dev->max_p;
+
+ blk_size[dev->major] = NULL;
+ current_minor = 1 + first_minor;
+ check_partition(dev, MKDEV(dev->major, first_minor));
+
+ /*
+ * We need to set the sizes array before we will be able to access
+ * any of the partitions on this device.
+ */
+ if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */
+ for (i = first_minor; i < end_minor; i++)
+ dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+ blk_size[dev->major] = dev->sizes;
+ }
+}
+
+static void setup_dev(struct gendisk *dev)
+{
+ int i, drive;
+ int end_minor = dev->max_nr * dev->max_p;
+
+ blk_size[dev->major] = NULL;
+ for (i = 0 ; i < end_minor; i++) {
+ dev->part[i].start_sect = 0;
+ dev->part[i].nr_sects = 0;
+ }
+ dev->init(dev);
+ for (drive = 0 ; drive < dev->nr_real ; drive++) {
+ int first_minor = drive << dev->minor_shift;
+ current_minor = 1 + first_minor;
+ check_partition(dev, MKDEV(dev->major, first_minor));
+ }
+ if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */
+ for (i = 0; i < end_minor; i++)
+ dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+ blk_size[dev->major] = dev->sizes;
+ }
+}
+
+void device_setup(void)
+{
+ extern void console_map_init(void);
+ struct gendisk *p;
+ int nr=0;
+
+ chr_dev_init();
+ blk_dev_init();
+ sti();
+#ifdef CONFIG_SCSI
+ scsi_dev_init();
+#endif
+#ifdef CONFIG_INET
+ net_dev_init();
+#endif
+ console_map_init();
+
+ for (p = gendisk_head ; p ; p=p->next) {
+ setup_dev(p);
+ nr += p->nr_real;
+ }
+#ifdef CONFIG_BLK_DEV_RAM
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start && mount_initrd) initrd_load();
+ else
+#endif
+ rd_load();
+#endif
+}
diff --git a/linux/src/drivers/block/ide-cd.c b/linux/src/drivers/block/ide-cd.c
new file mode 100644
index 0000000..020a831
--- /dev/null
+++ b/linux/src/drivers/block/ide-cd.c
@@ -0,0 +1,2802 @@
+/* #define VERBOSE_IDE_CD_ERRORS 1 */
+/*
+ * linux/drivers/block/ide-cd.c
+ * ATAPI cd-rom driver. To be used with ide.c.
+ * See Documentation/cdrom/ide-cd for usage information.
+ *
+ * Copyright (C) 1994, 1995, 1996 scott snyder <snyder@fnald0.fnal.gov>
+ * Copyright (C) 1996, 1997 Erik Andersen <andersee@debian.org>
+ * Copyright (C) 1998 Jens Axboe and Chris Zwilling
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ * see linux/COPYING for more information.
+ *
+ * 1.00 Oct 31, 1994 -- Initial version.
+ * 1.01 Nov 2, 1994 -- Fixed problem with starting request in
+ * cdrom_check_status.
+ * 1.03 Nov 25, 1994 -- leaving unmask_intr[] as a user-setting (as for disks)
+ * (from mlord) -- minor changes to cdrom_setup()
+ * -- renamed ide_dev_s to ide_drive_t, enable irq on command
+ * 2.00 Nov 27, 1994 -- Generalize packet command interface;
+ * add audio ioctls.
+ * 2.01 Dec 3, 1994 -- Rework packet command interface to handle devices
+ * which send an interrupt when ready for a command.
+ * 2.02 Dec 11, 1994 -- Cache the TOC in the driver.
+ * Don't use SCMD_PLAYAUDIO_TI; it's not included
+ * in the current version of ATAPI.
+ * Try to use LBA instead of track or MSF addressing
+ * when possible.
+ * Don't wait for READY_STAT.
+ * 2.03 Jan 10, 1995 -- Rewrite block read routines to handle block sizes
+ * other than 2k and to move multiple sectors in a
+ * single transaction.
+ * 2.04 Apr 21, 1995 -- Add work-around for Creative Labs CD220E drives.
+ * Thanks to Nick Saw <cwsaw@pts7.pts.mot.com> for
+ * help in figuring this out. Ditto for Acer and
+ * Aztech drives, which seem to have the same problem.
+ * 2.04b May 30, 1995 -- Fix to match changes in ide.c version 3.16 -ml
+ * 2.05 Jun 8, 1995 -- Don't attempt to retry after an illegal request
+ * or data protect error.
+ * Use HWIF and DEV_HWIF macros as in ide.c.
+ * Always try to do a request_sense after
+ * a failed command.
+ * Include an option to give textual descriptions
+ * of ATAPI errors.
+ * Fix a bug in handling the sector cache which
+ * showed up if the drive returned data in 512 byte
+ * blocks (like Pioneer drives). Thanks to
+ * Richard Hirst <srh@gpt.co.uk> for diagnosing this.
+ * Properly supply the page number field in the
+ * MODE_SELECT command.
+ * PLAYAUDIO12 is broken on the Aztech; work around it.
+ * 2.05x Aug 11, 1995 -- lots of data structure renaming/restructuring in ide.c
+ * (my apologies to Scott, but now ide-cd.c is independent)
+ * 3.00 Aug 22, 1995 -- Implement CDROMMULTISESSION ioctl.
+ * Implement CDROMREADAUDIO ioctl (UNTESTED).
+ * Use input_ide_data() and output_ide_data().
+ * Add door locking.
+ * Fix usage count leak in cdrom_open, which happened
+ * when a read-write mount was attempted.
+ * Try to load the disk on open.
+ * Implement CDROMEJECT_SW ioctl (off by default).
+ * Read total cdrom capacity during open.
+ * Rearrange logic in cdrom_decode_status. Issue
+ * request sense commands for failed packet commands
+ * from here instead of from cdrom_queue_packet_command.
+ * Fix a race condition in retrieving error information.
+ * Suppress printing normal unit attention errors and
+ * some drive not ready errors.
+ * Implement CDROMVOLREAD ioctl.
+ * Implement CDROMREADMODE1/2 ioctls.
+ * Fix race condition in setting up interrupt handlers
+ * when the `serialize' option is used.
+ * 3.01 Sep 2, 1995 -- Fix ordering of reenabling interrupts in
+ * cdrom_queue_request.
+ * Another try at using ide_[input,output]_data.
+ * 3.02 Sep 16, 1995 -- Stick total disk capacity in partition table as well.
+ * Make VERBOSE_IDE_CD_ERRORS dump failed command again.
+ * Dump out more information for ILLEGAL REQUEST errs.
+ * Fix handling of errors occurring before the
+ * packet command is transferred.
+ * Fix transfers with odd bytelengths.
+ * 3.03 Oct 27, 1995 -- Some Creative drives have an id of just `CD'.
+ * `DCI-2S10' drives are broken too.
+ * 3.04 Nov 20, 1995 -- So are Vertos drives.
+ * 3.05 Dec 1, 1995 -- Changes to go with overhaul of ide.c and ide-tape.c
+ * 3.06 Dec 16, 1995 -- Add support needed for partitions.
+ * More workarounds for Vertos bugs (based on patches
+ * from Holger Dietze <dietze@aix520.informatik.uni-leipzig.de>).
+ * Try to eliminate byteorder assumptions.
+ * Use atapi_cdrom_subchnl struct definition.
+ * Add STANDARD_ATAPI compilation option.
+ * 3.07 Jan 29, 1996 -- More twiddling for broken drives: Sony 55D,
+ * Vertos 300.
+ * Add NO_DOOR_LOCKING configuration option.
+ * Handle drive_cmd requests w/NULL args (for hdparm -t).
+ * Work around sporadic Sony55e audio play problem.
+ * 3.07a Feb 11, 1996 -- check drive->id for NULL before dereferencing, to fix
+ * problem with "hde=cdrom" with no drive present. -ml
+ * 3.08 Mar 6, 1996 -- More Vertos workarounds.
+ * 3.09 Apr 5, 1996 -- Add CDROMCLOSETRAY ioctl.
+ * Switch to using MSF addressing for audio commands.
+ * Reformat to match kernel tabbing style.
+ * Add CDROM_GET_UPC ioctl.
+ * 3.10 Apr 10, 1996 -- Fix compilation error with STANDARD_ATAPI.
+ * 3.11 Apr 29, 1996 -- Patch from Heiko Eissfeldt <heiko@colossus.escape.de>
+ * to remove redundant verify_area calls.
+ * 3.12 May 7, 1996 -- Rudimentary changer support. Based on patches
+ * from Gerhard Zuber <zuber@berlin.snafu.de>.
+ * Let open succeed even if there's no loaded disc.
+ * 3.13 May 19, 1996 -- Fixes for changer code.
+ * 3.14 May 29, 1996 -- Add work-around for Vertos 600.
+ * (From Hennus Bergman <hennus@sky.ow.nl>.)
+ * 3.15 July 2, 1996 -- Added support for Sanyo 3 CD changers
+ * from Ben Galliart <bgallia@luc.edu> with
+ * special help from Jeff Lightfoot
+ * <jeffml@netcom.com>
+ * 3.15a July 9, 1996 -- Improved Sanyo 3 CD changer identification
+ * 3.16 Jul 28, 1996 -- Fix from Gadi to reduce kernel stack usage for ioctl.
+ * 3.17 Sep 17, 1996 -- Tweak audio reads for some drives.
+ * Start changing CDROMLOADFROMSLOT to CDROM_SELECT_DISC.
+ *
+ * 3.19 Nov 5, 1996 -- New ide-cd maintainer:
+ * Erik B. Andersen <andersee@debian.org>
+ * 3.20 Jan 13,1997 -- Bug Fixes:
+ * Fix errors on CDROMSTOP (If you have a "Dolphin",
+ * you must define IHAVEADOLPHIN)
+ * Added identifier so new Sanyo CD-changer works
+ * Better detection if door locking isn't supported
+ * 3.21 Jun 16,1997 -- Add work-around for GCD-R580B
+ *
+ * 3.22 Nov 13, 1998 -- New ide-cd maintainers:
+ * Jens Axboe <axboe@image.dk>
+ * Chris Zwilling <chris@cloudnet.com>
+ *
+ * NOTE: Direct audio reads will only work on some types of drive.
+ * So far, i've received reports of success for Sony and Toshiba drives.
+ *
+ * ALSO NOTE:
+ *
+ * The ide cdrom driver has undergone extensive changes for the
+ * latest development kernel. If you wish to add new features to
+ * this driver, make your changes to the latest version in the
+ * development kernel. Only Bug fixes will be accepted for this
+ * version.
+ *
+ * For those wishing to work on this driver, please be sure you download
+ * and comply with the latest ATAPI standard. This document can be
+ * obtained by anonymous ftp from fission.dt.wdc.com in directory:
+ * /pub/standards/atapi/spec/SFF8020-r2.6/PDF/8020r26.pdf
+ *
+ */
+
+
+/***************************************************************************/
+
+#ifdef MACH
+#include <kern/sched_prim.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/cdrom.h>
+#include <linux/ucdrom.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/segment.h>
+#include <asm/unaligned.h>
+
+#include "ide.h"
+
+
+
+/* Turn this on to have the driver print out the meanings of the
+ ATAPI error codes. This will use up additional kernel-space
+ memory, though. */
+
+#ifndef VERBOSE_IDE_CD_ERRORS
+#define VERBOSE_IDE_CD_ERRORS 0
+#endif
+
+
+/* Turning this on will remove code to work around various nonstandard
+ ATAPI implementations. If you know your drive follows the standard,
+ this will give you a slightly smaller kernel. */
+
+#ifndef STANDARD_ATAPI
+#define STANDARD_ATAPI 0
+#endif
+
+
+/* Turning this on will disable the door-locking functionality.
+ This is apparently needed for supermount. */
+
+#ifndef NO_DOOR_LOCKING
+#define NO_DOOR_LOCKING 0
+#endif
+
+
+/* Size of buffer to allocate, in blocks, for audio reads. */
+
+#ifndef CDROM_NBLOCKS_BUFFER
+#define CDROM_NBLOCKS_BUFFER 8
+#endif
+
+
+/************************************************************************/
+
+#define SECTOR_SIZE 512
+#define SECTOR_BITS 9
+#define SECTORS_PER_FRAME (CD_FRAMESIZE / SECTOR_SIZE)
+
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+
+/* special command codes for strategy routine. */
+#define PACKET_COMMAND 4315
+#define REQUEST_SENSE_COMMAND 4316
+#define RESET_DRIVE_COMMAND 4317
+
+/* Some ATAPI command opcodes (just like SCSI).
+ (Some other cdrom-specific codes are in cdrom.h.) */
+#define TEST_UNIT_READY 0x00
+#define REQUEST_SENSE 0x03
+#define START_STOP 0x1b
+#define ALLOW_MEDIUM_REMOVAL 0x1e
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define MODE_SENSE_10 0x5a
+#define MODE_SELECT_10 0x55
+#define READ_CD 0xbe
+
+#define LOAD_UNLOAD 0xa6
+
+
+/* ATAPI sense keys (mostly copied from scsi.h). */
+
+#define NO_SENSE 0x00
+#define RECOVERED_ERROR 0x01
+#define NOT_READY 0x02
+#define MEDIUM_ERROR 0x03
+#define HARDWARE_ERROR 0x04
+#define ILLEGAL_REQUEST 0x05
+#define UNIT_ATTENTION 0x06
+#define DATA_PROTECT 0x07
+#define ABORTED_COMMAND 0x0b
+#define MISCOMPARE 0x0e
+
+/* We want some additional flags for cd-rom drives.
+ To save space in the ide_drive_t struct, use some fields which
+ doesn't make sense for cd-roms -- `bios_sect' and `bios_head'. */
+
+/* Configuration flags. These describe the capabilities of the drive.
+ They generally do not change after initialization, unless we learn
+ more about the drive from stuff failing. */
+struct ide_cd_config_flags {
+ __u8 drq_interrupt : 1; /* Device sends an interrupt when ready
+ for a packet command. */
+ __u8 no_doorlock : 1; /* Drive cannot lock the door. */
+#if ! STANDARD_ATAPI
+ __u8 old_readcd : 1; /* Drive uses old READ CD opcode. */
+ __u8 playmsf_as_bcd : 1; /* PLAYMSF command takes BCD args. */
+ __u8 tocaddr_as_bcd : 1; /* TOC addresses are in BCD. */
+ __u8 toctracks_as_bcd : 1; /* TOC track numbers are in BCD. */
+ __u8 subchan_as_bcd : 1; /* Subchannel info is in BCD. */
+#endif /* not STANDARD_ATAPI */
+ __u8 reserved : 1;
+};
+#define CDROM_CONFIG_FLAGS(drive) ((struct ide_cd_config_flags *)&((drive)->bios_sect))
+
+
+/* State flags. These give information about the current state of the
+ drive, and will change during normal operation. */
+struct ide_cd_state_flags {
+ __u8 media_changed : 1; /* Driver has noticed a media change. */
+ __u8 toc_valid : 1; /* Saved TOC information is current. */
+ __u8 door_locked : 1; /* We think that the drive door is locked. */
+ __u8 eject_on_close: 1; /* Drive should eject when device is closed. */
+ __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */
+ __u8 reserved : 2;
+};
+#define CDROM_STATE_FLAGS(drive) ((struct ide_cd_state_flags *)&((drive)->bios_head))
+
+
+#define SECTOR_BUFFER_SIZE CD_FRAMESIZE
+
+
+
+/****************************************************************************
+ * Routines to read and write data from/to the drive, using
+ * the routines input_ide_data() and output_ide_data() from ide.c.
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd bytecount is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
+ */
+
+
+static inline
+void cdrom_in_bytes (ide_drive_t *drive, void *buffer, uint bytecount)
+{
+ ++bytecount;
+ ide_input_data (drive, buffer, bytecount / 4);
+ if ((bytecount & 0x03) >= 2) {
+ insw (IDE_DATA_REG, ((byte *)buffer) + (bytecount & ~0x03), 1);
+ }
+}
+
+
+static inline
+void cdrom_out_bytes (ide_drive_t *drive, void *buffer, uint bytecount)
+{
+ ++bytecount;
+ ide_output_data (drive, buffer, bytecount / 4);
+ if ((bytecount & 0x03) >= 2) {
+ outsw (IDE_DATA_REG,
+ ((byte *)buffer) + (bytecount & ~0x03), 1);
+ }
+}
+
+
+
+/****************************************************************************
+ * Descriptions of ATAPI error codes.
+ */
+
+#define ARY_LEN(a) ((sizeof(a) / sizeof(a[0])))
+
+#if VERBOSE_IDE_CD_ERRORS
+
+/* From Table 124 of the ATAPI 1.2 spec. */
+
+char *sense_key_texts[16] = {
+ "No sense data",
+ "Recovered error",
+ "Not ready",
+ "Medium error",
+ "Hardware error",
+ "Illegal request",
+ "Unit attention",
+ "Data protect",
+ "(reserved)",
+ "(reserved)",
+ "(reserved)",
+ "Aborted command",
+ "(reserved)",
+ "(reserved)",
+ "Miscompare",
+ "(reserved)",
+};
+
+
+/* From Table 125 of the ATAPI 1.2 spec. */
+
+struct {
+ short asc_ascq;
+ char *text;
+} sense_data_texts[] = {
+ { 0x0000, "No additional sense information" },
+ { 0x0011, "Audio play operation in progress" },
+ { 0x0012, "Audio play operation paused" },
+ { 0x0013, "Audio play operation successfully completed" },
+ { 0x0014, "Audio play operation stopped due to error" },
+ { 0x0015, "No current audio status to return" },
+
+ { 0x0200, "No seek complete" },
+
+ { 0x0400, "Logical unit not ready - cause not reportable" },
+ { 0x0401,
+ "Logical unit not ready - in progress (sic) of becoming ready" },
+ { 0x0402, "Logical unit not ready - initializing command required" },
+ { 0x0403, "Logical unit not ready - manual intervention required" },
+
+ { 0x0600, "No reference position found" },
+
+ { 0x0900, "Track following error" },
+ { 0x0901, "Tracking servo failure" },
+ { 0x0902, "Focus servo failure" },
+ { 0x0903, "Spindle servo failure" },
+
+ { 0x1100, "Unrecovered read error" },
+ { 0x1106, "CIRC unrecovered error" },
+
+ { 0x1500, "Random positioning error" },
+ { 0x1501, "Mechanical positioning error" },
+ { 0x1502, "Positioning error detected by read of medium" },
+
+ { 0x1700, "Recovered data with no error correction applied" },
+ { 0x1701, "Recovered data with retries" },
+ { 0x1702, "Recovered data with positive head offset" },
+ { 0x1703, "Recovered data with negative head offset" },
+ { 0x1704, "Recovered data with retries and/or CIRC applied" },
+ { 0x1705, "Recovered data using previous sector ID" },
+
+ { 0x1800, "Recovered data with error correction applied" },
+ { 0x1801, "Recovered data with error correction and retries applied" },
+ { 0x1802, "Recovered data - the data was auto-reallocated" },
+ { 0x1803, "Recovered data with CIRC" },
+ { 0x1804, "Recovered data with L-EC" },
+ { 0x1805, "Recovered data - recommend reassignment" },
+ { 0x1806, "Recovered data - recommend rewrite" },
+
+ { 0x1a00, "Parameter list length error" },
+
+ { 0x2000, "Invalid command operation code" },
+
+ { 0x2100, "Logical block address out of range" },
+
+ { 0x2400, "Invalid field in command packet" },
+
+ { 0x2600, "Invalid field in parameter list" },
+ { 0x2601, "Parameter not supported" },
+ { 0x2602, "Parameter value invalid" },
+ { 0x2603, "Threshold parameters not supported" },
+
+ { 0x2800, "Not ready to ready transition, medium may have changed" },
+
+ { 0x2900, "Power on, reset or bus device reset occurred" },
+
+ { 0x2a00, "Parameters changed" },
+ { 0x2a01, "Mode parameters changed" },
+
+ { 0x3000, "Incompatible medium installed" },
+ { 0x3001, "Cannot read medium - unknown format" },
+ { 0x3002, "Cannot read medium - incompatible format" },
+
+ { 0x3700, "Rounded parameter" },
+
+ { 0x3900, "Saving parameters not supported" },
+
+ { 0x3a00, "Medium not present" },
+
+ { 0x3f00, "ATAPI CD-ROM drive operating conditions have changed" },
+ { 0x3f01, "Microcode has been changed" },
+ { 0x3f02, "Changed operating definition" },
+ { 0x3f03, "Inquiry data has changed" },
+
+ { 0x4000, "Diagnostic failure on component (ASCQ)" },
+
+ { 0x4400, "Internal ATAPI CD-ROM drive failure" },
+
+ { 0x4e00, "Overlapped commands attempted" },
+
+ { 0x5300, "Media load or eject failed" },
+ { 0x5302, "Medium removal prevented" },
+
+ { 0x5700, "Unable to recover table of contents" },
+
+ { 0x5a00, "Operator request or state change input (unspecified)" },
+ { 0x5a01, "Operator medium removal request" },
+
+ { 0x5b00, "Threshold condition met" },
+
+ { 0x5c00, "Status change" },
+
+ { 0x6300, "End of user area encountered on this track" },
+
+ { 0x6400, "Illegal mode for this track" },
+
+ { 0xbf00, "Loss of streaming" },
+};
+#endif
+
+
+
+/****************************************************************************
+ * Generic packet command support and error handling routines.
+ */
+
+
+static
+void cdrom_analyze_sense_data (ide_drive_t *drive,
+ struct atapi_request_sense *reqbuf,
+ struct packet_command *failed_command)
+{
+ /* Don't print not ready or unit attention errors for READ_SUBCHANNEL.
+ Workman (and probably other programs) uses this command to poll
+ the drive, and we don't want to fill the syslog
+ with useless errors. */
+ if (failed_command &&
+ failed_command->c[0] == SCMD_READ_SUBCHANNEL &&
+ (reqbuf->sense_key == NOT_READY ||
+ reqbuf->sense_key == UNIT_ATTENTION))
+ return;
+
+#if VERBOSE_IDE_CD_ERRORS
+ {
+ int i;
+ char *s;
+ char buf[80];
+
+ printk ("ATAPI device %s:\n", drive->name);
+
+ printk (" Error code: 0x%02x\n", reqbuf->error_code);
+
+ if (reqbuf->sense_key >= 0 &&
+ reqbuf->sense_key < ARY_LEN (sense_key_texts))
+ s = sense_key_texts[reqbuf->sense_key];
+ else
+ s = "(bad sense key)";
+
+ printk (" Sense key: 0x%02x - %s\n", reqbuf->sense_key, s);
+
+ if (reqbuf->asc == 0x40) {
+ sprintf (buf, "Diagnostic failure on component 0x%02x",
+ reqbuf->ascq);
+ s = buf;
+ } else {
+ int lo, hi;
+ int key = (reqbuf->asc << 8);
+ if ( ! (reqbuf->ascq >= 0x80 && reqbuf->ascq <= 0xdd) )
+ key |= reqbuf->ascq;
+
+ lo = 0;
+ hi = ARY_LEN (sense_data_texts);
+ s = NULL;
+
+ while (hi > lo) {
+ int mid = (lo + hi) / 2;
+ if (sense_data_texts[mid].asc_ascq == key) {
+ s = sense_data_texts[mid].text;
+ break;
+ }
+ else if (sense_data_texts[mid].asc_ascq > key)
+ hi = mid;
+ else
+ lo = mid+1;
+ }
+ }
+
+ if (s == NULL) {
+ if (reqbuf->asc > 0x80)
+ s = "(vendor-specific error)";
+ else
+ s = "(reserved error code)";
+ }
+
+ printk (" Additional sense data: 0x%02x, 0x%02x - %s\n",
+ reqbuf->asc, reqbuf->ascq, s);
+
+ if (failed_command != NULL) {
+ printk (" Failed packet command: ");
+ for (i=0; i<sizeof (failed_command->c); i++)
+ printk ("%02x ", failed_command->c[i]);
+ printk ("\n");
+ }
+
+ if (reqbuf->sense_key == ILLEGAL_REQUEST &&
+ (reqbuf->sense_key_specific[0] & 0x80) != 0) {
+ printk (" Error in %s byte %d",
+ (reqbuf->sense_key_specific[0] & 0x40) != 0
+ ? "command packet"
+ : "command data",
+ (reqbuf->sense_key_specific[1] << 8) +
+ reqbuf->sense_key_specific[2]);
+
+ if ((reqbuf->sense_key_specific[0] & 0x40) != 0) {
+ printk (" bit %d",
+ reqbuf->sense_key_specific[0] & 0x07);
+ }
+
+ printk ("\n");
+ }
+ }
+
+#else /* not VERBOSE_IDE_CD_ERRORS */
+
+ /* Suppress printing unit attention and `in progress of becoming ready'
+ errors when we're not being verbose. */
+
+ if (reqbuf->sense_key == UNIT_ATTENTION ||
+ (reqbuf->sense_key == NOT_READY && (reqbuf->asc == 4 ||
+ reqbuf->asc == 0x3a)))
+ return;
+
+ printk ("%s: code: 0x%02x key: 0x%02x asc: 0x%02x ascq: 0x%02x\n",
+ drive->name,
+ reqbuf->error_code, reqbuf->sense_key,
+ reqbuf->asc, reqbuf->ascq);
+#endif /* not VERBOSE_IDE_CD_ERRORS */
+}
+
+
+/* Fix up a possibly partially-processed request so that we can
+ start it over entirely, or even put it back on the request queue. */
+static void restore_request (struct request *rq)
+{
+ if (rq->buffer != rq->bh->b_data) {
+ int n = (rq->buffer - rq->bh->b_data) / SECTOR_SIZE;
+ rq->buffer = rq->bh->b_data;
+ rq->nr_sectors += n;
+ rq->sector -= n;
+ }
+ rq->current_nr_sectors = rq->bh->b_size >> SECTOR_BITS;
+}
+
+
+static void cdrom_queue_request_sense (ide_drive_t *drive,
+ struct semaphore *sem,
+ struct atapi_request_sense *reqbuf,
+ struct packet_command *failed_command)
+{
+ struct request *rq;
+ struct packet_command *pc;
+ int len;
+
+ /* If the request didn't explicitly specify where
+ to put the sense data, use the statically allocated structure. */
+ if (reqbuf == NULL)
+ reqbuf = &drive->cdrom_info.sense_data;
+
+ /* Make up a new request to retrieve sense information. */
+
+ pc = &HWIF(drive)->request_sense_pc;
+ memset (pc, 0, sizeof (*pc));
+
+ /* The request_sense structure has an odd number of (16-bit) words,
+ which won't work well with 32-bit transfers. However, we don't care
+ about the last two bytes, so just truncate the structure down
+ to an even length. */
+ len = sizeof (*reqbuf) / 4;
+ len *= 4;
+
+ pc->c[0] = REQUEST_SENSE;
+ pc->c[4] = len;
+ pc->buffer = (unsigned char *)reqbuf;
+ pc->buflen = len;
+ pc->sense_data = (struct atapi_request_sense *)failed_command;
+
+ /* stuff the sense request in front of our current request */
+
+ rq = &HWIF(drive)->request_sense_request;
+ ide_init_drive_cmd (rq);
+ rq->cmd = REQUEST_SENSE_COMMAND;
+ rq->buffer = (char *)pc;
+ rq->sem = sem;
+ (void) ide_do_drive_cmd (drive, rq, ide_preempt);
+}
+
+
+static void cdrom_end_request (int uptodate, ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+
+ if (rq->cmd == REQUEST_SENSE_COMMAND && uptodate && !rq->quiet) {
+ struct packet_command *pc = (struct packet_command *)
+ rq->buffer;
+ cdrom_analyze_sense_data (drive,
+ (struct atapi_request_sense *)
+ (pc->buffer - pc->c[4]),
+ (struct packet_command *)
+ pc->sense_data);
+ }
+
+ ide_end_request (uptodate, HWGROUP(drive));
+}
+
+
+/* Mark that we've seen a media change, and invalidate our internal
+ buffers. */
+static void cdrom_saw_media_change (ide_drive_t *drive)
+{
+ CDROM_STATE_FLAGS (drive)->media_changed = 1;
+ CDROM_STATE_FLAGS (drive)->toc_valid = 0;
+ drive->cdrom_info.nsectors_buffered = 0;
+}
+
+
+/* Returns 0 if the request should be continued.
+ Returns 1 if the request was ended. */
+static int cdrom_decode_status (ide_drive_t *drive, int good_stat,
+ int *stat_ret)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ int stat, err, sense_key, cmd;
+
+ /* Check for errors. */
+ stat = GET_STAT();
+ *stat_ret = stat;
+
+ if (OK_STAT (stat, good_stat, BAD_R_STAT))
+ return 0;
+
+ /* Got an error. */
+ err = IN_BYTE (IDE_ERROR_REG);
+ sense_key = err >> 4;
+
+ if (rq == NULL)
+ printk ("%s : missing request in cdrom_decode_status\n",
+ drive->name);
+ else {
+ cmd = rq->cmd;
+
+ if (cmd == REQUEST_SENSE_COMMAND) {
+ /* We got an error trying to get sense info
+ from the drive (probably while trying
+ to recover from a former error). Just give up. */
+
+ struct packet_command *pc = (struct packet_command *)
+ rq->buffer;
+ pc->stat = 1;
+ cdrom_end_request (1, drive);
+ ide_error (drive, "request sense failure", stat);
+ return 1;
+
+ } else if (cmd == PACKET_COMMAND) {
+ /* All other functions, except for READ. */
+
+ struct packet_command *pc = (struct packet_command *)
+ rq->buffer;
+ struct semaphore *sem = NULL;
+
+ /* Check for tray open. */
+ if (sense_key == NOT_READY) {
+ cdrom_saw_media_change (drive);
+
+ /* Print an error message to the syslog.
+ Exception: don't print anything if this
+ is a read subchannel command. This is
+ because workman constantly polls the drive
+ with this command, and we don't want
+ to uselessly fill up the syslog. */
+ if (pc->c[0] != SCMD_READ_SUBCHANNEL && !rq->quiet)
+ printk ("%s : tray open or drive not ready\n",
+ drive->name);
+ } else if (sense_key == UNIT_ATTENTION) {
+ /* Check for media change. */
+ cdrom_saw_media_change (drive);
+ if (!rq->quiet)
+ printk ("%s: media changed\n", drive->name);
+ } else {
+ /* Otherwise, print an error. */
+ if (!rq->quiet)
+ ide_dump_status (drive, "packet command error",
+ stat);
+ }
+
+ /* Set the error flag and complete the request.
+ Then, if we have a CHECK CONDITION status,
+ queue a request sense command. We must be careful,
+ though: we don't want the thread in
+ cdrom_queue_packet_command to wake up until
+ the request sense has completed. We do this
+ by transferring the semaphore from the packet
+ command request to the request sense request. */
+
+ if ((stat & ERR_STAT) != 0) {
+ sem = rq->sem;
+ rq->sem = NULL;
+ }
+
+ pc->stat = 1;
+ cdrom_end_request (1, drive);
+
+ if ((stat & ERR_STAT) != 0)
+ cdrom_queue_request_sense (drive, sem,
+ pc->sense_data, pc);
+ } else {
+ /* Handle errors from READ requests. */
+
+ if (sense_key == NOT_READY) {
+ /* Tray open. */
+ cdrom_saw_media_change (drive);
+
+ /* Fail the request. */
+ if (!rq->quiet)
+ printk ("%s : tray open\n", drive->name);
+ cdrom_end_request (0, drive);
+ } else if (sense_key == UNIT_ATTENTION) {
+ /* Media change. */
+ cdrom_saw_media_change (drive);
+
+ /* Arrange to retry the request.
+ But be sure to give up if we've retried
+ too many times. */
+ if (++rq->errors > ERROR_MAX)
+ cdrom_end_request (0, drive);
+ } else if (sense_key == ILLEGAL_REQUEST ||
+ sense_key == DATA_PROTECT) {
+ /* No point in retrying after an illegal
+ request or data protect error.*/
+ if (!rq->quiet)
+ ide_dump_status (drive, "command error", stat);
+ cdrom_end_request (0, drive);
+ } else if ((err & ~ABRT_ERR) != 0) {
+ /* Go to the default handler
+ for other errors. */
+ ide_error (drive, "cdrom_decode_status", stat);
+ return 1;
+ } else if ((++rq->errors > ERROR_MAX)) {
+ /* We've racked up too many retries. Abort. */
+ cdrom_end_request (0, drive);
+ }
+
+ /* If we got a CHECK_CONDITION status,
+ queue a request sense command. */
+ if ((stat & ERR_STAT) != 0)
+ cdrom_queue_request_sense (drive,
+ NULL, NULL, NULL);
+ }
+ }
+
+ /* Retry, or handle the next request. */
+ return 1;
+}
+
+
+/* Set up the device registers for transferring a packet command on DEV,
+ expecting to later transfer XFERLEN bytes. HANDLER is the routine
+ which actually transfers the command to the drive. If this is a
+ drq_interrupt device, this routine will arrange for HANDLER to be
+ called when the interrupt from the drive arrives. Otherwise, HANDLER
+ will be called immediately after the drive is prepared for the transfer. */
+
+static int cdrom_start_packet_command (ide_drive_t *drive, int xferlen,
+ ide_handler_t *handler)
+{
+ /* Wait for the controller to be idle. */
+ if (ide_wait_stat (drive, 0, BUSY_STAT, WAIT_READY)) return 1;
+
+ /* Set up the controller registers. */
+ OUT_BYTE (0, IDE_FEATURE_REG);
+ OUT_BYTE (0, IDE_NSECTOR_REG);
+ OUT_BYTE (0, IDE_SECTOR_REG);
+
+ OUT_BYTE (xferlen & 0xff, IDE_LCYL_REG);
+ OUT_BYTE (xferlen >> 8 , IDE_HCYL_REG);
+ OUT_BYTE (drive->ctl, IDE_CONTROL_REG);
+
+ if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
+ ide_set_handler (drive, handler, WAIT_CMD);
+ OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* packet command */
+ } else {
+ OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* packet command */
+ (*handler) (drive);
+ }
+
+ return 0;
+}
+
+
+/* Send a packet command to DRIVE described by CMD_BUF and CMD_LEN.
+ The device registers must have already been prepared
+ by cdrom_start_packet_command.
+ HANDLER is the interrupt handler to call when the command completes
+ or there's data ready. */
+static int cdrom_transfer_packet_command (ide_drive_t *drive,
+ unsigned char *cmd_buf, int cmd_len,
+ ide_handler_t *handler)
+{
+ if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
+ /* Here we should have been called after receiving an interrupt
+ from the device. DRQ should how be set. */
+ int stat_dum;
+
+ /* Check for errors. */
+ if (cdrom_decode_status (drive, DRQ_STAT, &stat_dum)) return 1;
+ } else {
+ /* Otherwise, we must wait for DRQ to get set. */
+ if (ide_wait_stat (drive, DRQ_STAT, BUSY_STAT, WAIT_READY))
+ return 1;
+ }
+
+ /* Arm the interrupt handler. */
+ ide_set_handler (drive, handler, WAIT_CMD);
+
+ /* Send the command to the device. */
+ cdrom_out_bytes (drive, cmd_buf, cmd_len);
+
+ return 0;
+}
+
+
+
+/****************************************************************************
+ * Block read functions.
+ */
+
+/*
+ * Buffer up to SECTORS_TO_TRANSFER sectors from the drive in our sector
+ * buffer. Once the first sector is added, any subsequent sectors are
+ * assumed to be continuous (until the buffer is cleared). For the first
+ * sector added, SECTOR is its sector number. (SECTOR is then ignored until
+ * the buffer is cleared.)
+ */
+static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector,
+ int sectors_to_transfer)
+{
+ struct cdrom_info *info = &drive->cdrom_info;
+
+ /* Number of sectors to read into the buffer. */
+ int sectors_to_buffer = MIN (sectors_to_transfer,
+ (SECTOR_BUFFER_SIZE >> SECTOR_BITS) -
+ info->nsectors_buffered);
+
+ char *dest;
+
+ /* If we don't yet have a sector buffer, try to allocate one.
+ If we can't get one atomically, it's not fatal -- we'll just throw
+ the data away rather than caching it. */
+ if (info->sector_buffer == NULL) {
+ info->sector_buffer = (char *) kmalloc (SECTOR_BUFFER_SIZE,
+ GFP_ATOMIC);
+
+ /* If we couldn't get a buffer,
+ don't try to buffer anything... */
+ if (info->sector_buffer == NULL)
+ sectors_to_buffer = 0;
+ }
+
+ /* If this is the first sector in the buffer, remember its number. */
+ if (info->nsectors_buffered == 0)
+ info->sector_buffered = sector;
+
+ /* Read the data into the buffer. */
+ dest = info->sector_buffer + info->nsectors_buffered * SECTOR_SIZE;
+ while (sectors_to_buffer > 0) {
+ cdrom_in_bytes (drive, dest, SECTOR_SIZE);
+ --sectors_to_buffer;
+ --sectors_to_transfer;
+ ++info->nsectors_buffered;
+ dest += SECTOR_SIZE;
+ }
+
+ /* Throw away any remaining data. */
+ while (sectors_to_transfer > 0) {
+ char dum[SECTOR_SIZE];
+ cdrom_in_bytes (drive, dum, sizeof (dum));
+ --sectors_to_transfer;
+ }
+}
+
+
+/*
+ * Check the contents of the interrupt reason register from the cdrom
+ * and attempt to recover if there are problems. Returns 0 if everything's
+ * ok; nonzero if the request has been terminated.
+ */
+static inline
+int cdrom_read_check_ireason (ide_drive_t *drive, int len, int ireason)
+{
+ ireason &= 3;
+ if (ireason == 2) return 0;
+
+ if (ireason == 0) {
+ /* Whoops... The drive is expecting to receive data from us! */
+ printk ("%s: cdrom_read_intr: "
+ "Drive wants to transfer data the wrong way!\n",
+ drive->name);
+
+ /* Throw some data at the drive so it doesn't hang
+ and quit this request. */
+ while (len > 0) {
+ int dum = 0;
+ cdrom_out_bytes (drive, &dum, sizeof (dum));
+ len -= sizeof (dum);
+ }
+ } else {
+ /* Drive wants a command packet, or invalid ireason... */
+ printk ("%s: cdrom_read_intr: bad interrupt reason %d\n",
+ drive->name, ireason);
+ }
+
+ cdrom_end_request (0, drive);
+ return -1;
+}
+
+
+/*
+ * Interrupt routine. Called when a read request has completed.
+ */
+static void cdrom_read_intr (ide_drive_t *drive)
+{
+ int stat;
+ int ireason, len, sectors_to_transfer, nskip;
+
+ struct request *rq = HWGROUP(drive)->rq;
+
+ /* Check for errors. */
+ if (cdrom_decode_status (drive, 0, &stat)) return;
+
+ /* Read the interrupt reason and the transfer length. */
+ ireason = IN_BYTE (IDE_NSECTOR_REG);
+ len = IN_BYTE (IDE_LCYL_REG) + 256 * IN_BYTE (IDE_HCYL_REG);
+
+ /* If DRQ is clear, the command has completed. */
+ if ((stat & DRQ_STAT) == 0) {
+ /* If we're not done filling the current buffer, complain.
+ Otherwise, complete the command normally. */
+ if (rq->current_nr_sectors > 0) {
+ printk ("%s: cdrom_read_intr: data underrun (%ld blocks)\n",
+ drive->name, rq->current_nr_sectors);
+ cdrom_end_request (0, drive);
+ } else
+ cdrom_end_request (1, drive);
+
+ return;
+ }
+
+ /* Check that the drive is expecting to do the same thing we are. */
+ if (cdrom_read_check_ireason (drive, len, ireason)) return;
+
+ /* Assume that the drive will always provide data in multiples
+ of at least SECTOR_SIZE, as it gets hairy to keep track
+ of the transfers otherwise. */
+ if ((len % SECTOR_SIZE) != 0) {
+ printk ("%s: cdrom_read_intr: Bad transfer size %d\n",
+ drive->name, len);
+ printk (" This drive is not supported by this version of the driver\n");
+ cdrom_end_request (0, drive);
+ return;
+ }
+
+ /* The number of sectors we need to read from the drive. */
+ sectors_to_transfer = len / SECTOR_SIZE;
+
+ /* First, figure out if we need to bit-bucket
+ any of the leading sectors. */
+ nskip = MIN ((int)(rq->current_nr_sectors -
+ (rq->bh->b_size >> SECTOR_BITS)),
+ sectors_to_transfer);
+
+ while (nskip > 0) {
+ /* We need to throw away a sector. */
+ char dum[SECTOR_SIZE];
+ cdrom_in_bytes (drive, dum, sizeof (dum));
+
+ --rq->current_nr_sectors;
+ --nskip;
+ --sectors_to_transfer;
+ }
+
+ /* Now loop while we still have data to read from the drive. */
+ while (sectors_to_transfer > 0) {
+ int this_transfer;
+
+ /* If we've filled the present buffer but there's another
+ chained buffer after it, move on. */
+ if (rq->current_nr_sectors == 0 &&
+ rq->nr_sectors > 0)
+ cdrom_end_request (1, drive);
+
+ /* If the buffers are full, cache the rest of the data in our
+ internal buffer. */
+ if (rq->current_nr_sectors == 0) {
+ cdrom_buffer_sectors (drive,
+ rq->sector, sectors_to_transfer);
+ sectors_to_transfer = 0;
+ } else {
+ /* Transfer data to the buffers.
+ Figure out how many sectors we can transfer
+ to the current buffer. */
+ this_transfer = MIN (sectors_to_transfer,
+ rq->current_nr_sectors);
+
+ /* Read this_transfer sectors
+ into the current buffer. */
+ while (this_transfer > 0) {
+ cdrom_in_bytes (drive
+ , rq->buffer, SECTOR_SIZE);
+ rq->buffer += SECTOR_SIZE;
+ --rq->nr_sectors;
+ --rq->current_nr_sectors;
+ ++rq->sector;
+ --this_transfer;
+ --sectors_to_transfer;
+ }
+ }
+ }
+
+ /* Done moving data!
+ Wait for another interrupt. */
+ ide_set_handler (drive, &cdrom_read_intr, WAIT_CMD);
+}
+
+
+/*
+ * Try to satisfy some of the current read request from our cached data.
+ * Returns nonzero if the request has been completed, zero otherwise.
+ */
+static int cdrom_read_from_buffer (ide_drive_t *drive)
+{
+ struct cdrom_info *info = &drive->cdrom_info;
+ struct request *rq = HWGROUP(drive)->rq;
+
+ /* Can't do anything if there's no buffer. */
+ if (info->sector_buffer == NULL) return 0;
+
+ /* Loop while this request needs data and the next block is present
+ in our cache. */
+ while (rq->nr_sectors > 0 &&
+ rq->sector >= info->sector_buffered &&
+ rq->sector < info->sector_buffered + info->nsectors_buffered) {
+ if (rq->current_nr_sectors == 0)
+ cdrom_end_request (1, drive);
+
+ memcpy (rq->buffer,
+ info->sector_buffer +
+ (rq->sector - info->sector_buffered) * SECTOR_SIZE,
+ SECTOR_SIZE);
+ rq->buffer += SECTOR_SIZE;
+ --rq->current_nr_sectors;
+ --rq->nr_sectors;
+ ++rq->sector;
+ }
+
+ /* If we've satisfied the current request,
+ terminate it successfully. */
+ if (rq->nr_sectors == 0) {
+ cdrom_end_request (1, drive);
+ return -1;
+ }
+
+ /* Move on to the next buffer if needed. */
+ if (rq->current_nr_sectors == 0)
+ cdrom_end_request (1, drive);
+
+ /* If this condition does not hold, then the kluge i use to
+ represent the number of sectors to skip at the start of a transfer
+ will fail. I think that this will never happen, but let's be
+ paranoid and check. */
+ if (rq->current_nr_sectors < (rq->bh->b_size >> SECTOR_BITS) &&
+ (rq->sector % SECTORS_PER_FRAME) != 0) {
+ printk ("%s: cdrom_read_from_buffer: buffer botch (%ld)\n",
+ drive->name, rq->sector);
+ cdrom_end_request (0, drive);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+/*
+ * Routine to send a read packet command to the drive.
+ * This is usually called directly from cdrom_start_read.
+ * However, for drq_interrupt devices, it is called from an interrupt
+ * when the drive is ready to accept the command.
+ */
+static void cdrom_start_read_continuation (ide_drive_t *drive)
+{
+ struct packet_command pc;
+ struct request *rq = HWGROUP(drive)->rq;
+
+ int nsect, sector, nframes, frame, nskip;
+
+ /* Number of sectors to transfer. */
+ nsect = rq->nr_sectors;
+
+#if !STANDARD_ATAPI
+ if (nsect > drive->cdrom_info.max_sectors)
+ nsect = drive->cdrom_info.max_sectors;
+#endif /* not STANDARD_ATAPI */
+
+ /* Starting sector. */
+ sector = rq->sector;
+
+ /* If the requested sector doesn't start on a cdrom block boundary,
+ we must adjust the start of the transfer so that it does,
+ and remember to skip the first few sectors.
+ If the CURRENT_NR_SECTORS field is larger than the size
+ of the buffer, it will mean that we're to skip a number
+ of sectors equal to the amount by which CURRENT_NR_SECTORS
+ is larger than the buffer size. */
+ nskip = (sector % SECTORS_PER_FRAME);
+ if (nskip > 0) {
+ /* Sanity check... */
+ if (rq->current_nr_sectors !=
+ (rq->bh->b_size >> SECTOR_BITS)) {
+ printk ("%s: cdrom_start_read_continuation: buffer botch (%ld)\n",
+ drive->name, rq->current_nr_sectors);
+ cdrom_end_request (0, drive);
+ return;
+ }
+
+ sector -= nskip;
+ nsect += nskip;
+ rq->current_nr_sectors += nskip;
+ }
+
+ /* Convert from sectors to cdrom blocks, rounding up the transfer
+ length if needed. */
+ nframes = (nsect + SECTORS_PER_FRAME-1) / SECTORS_PER_FRAME;
+ frame = sector / SECTORS_PER_FRAME;
+
+ /* Largest number of frames was can transfer at once is 64k-1. */
+ nframes = MIN (nframes, 65535);
+
+ /* Set up the command */
+ memset (&pc.c, 0, sizeof (pc.c));
+ pc.c[0] = READ_10;
+ pc.c[7] = (nframes >> 8);
+ pc.c[8] = (nframes & 0xff);
+ put_unaligned(htonl (frame), (unsigned int *) &pc.c[2]);
+
+ /* Send the command to the drive and return. */
+ (void) cdrom_transfer_packet_command (drive, pc.c, sizeof (pc.c),
+ &cdrom_read_intr);
+}
+
+
+/*
+ * Start a read request from the CD-ROM.
+ */
+static void cdrom_start_read (ide_drive_t *drive, unsigned int block)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ int minor = MINOR (rq->rq_dev);
+
+ /* If the request is relative to a partition, fix it up to refer to the
+ absolute address. */
+ if ((minor & PARTN_MASK) != 0) {
+ rq->sector = block;
+ minor &= ~PARTN_MASK;
+ rq->rq_dev = MKDEV (MAJOR(rq->rq_dev), minor);
+ }
+
+ /* We may be retrying this request after an error. Fix up
+ any weirdness which might be present in the request packet. */
+ restore_request (rq);
+
+ /* Satisfy whatever we can of this request from our cached sector. */
+ if (cdrom_read_from_buffer (drive))
+ return;
+
+ /* Clear the local sector buffer. */
+ drive->cdrom_info.nsectors_buffered = 0;
+
+ /* Start sending the read request to the drive. */
+ cdrom_start_packet_command (drive, 32768,
+ cdrom_start_read_continuation);
+}
+
+
+
+
+/****************************************************************************
+ * Execute all other packet commands.
+ */
+
+/* Forward declarations. */
+static int
+cdrom_lockdoor (ide_drive_t *drive, int lockflag,
+ struct atapi_request_sense *reqbuf);
+
+
+
+/* Interrupt routine for packet command completion. */
+static void cdrom_pc_intr (ide_drive_t *drive)
+{
+ int ireason, len, stat, thislen;
+ struct request *rq = HWGROUP(drive)->rq;
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+
+ /* Check for errors. */
+ if (cdrom_decode_status (drive, 0, &stat)) return;
+
+ /* Read the interrupt reason and the transfer length. */
+ ireason = IN_BYTE (IDE_NSECTOR_REG);
+ len = IN_BYTE (IDE_LCYL_REG) + 256 * IN_BYTE (IDE_HCYL_REG);
+
+ /* If DRQ is clear, the command has completed.
+ Complain if we still have data left to transfer. */
+ if ((stat & DRQ_STAT) == 0) {
+ /* Some of the trailing request sense fields are optional, and
+ some drives don't send them. Sigh. */
+ if (pc->c[0] == REQUEST_SENSE &&
+ pc->buflen > 0 &&
+ pc->buflen <= 5) {
+ while (pc->buflen > 0) {
+ *pc->buffer++ = 0;
+ --pc->buflen;
+ }
+ }
+
+ if (pc->buflen == 0)
+ cdrom_end_request (1, drive);
+ else {
+ printk ("%s: cdrom_pc_intr: data underrun %d\n",
+ drive->name, pc->buflen);
+ pc->stat = 1;
+ cdrom_end_request (1, drive);
+ }
+ return;
+ }
+
+ /* Figure out how much data to transfer. */
+ thislen = pc->buflen;
+ if (thislen < 0) thislen = -thislen;
+ if (thislen > len) thislen = len;
+
+ /* The drive wants to be written to. */
+ if ((ireason & 3) == 0) {
+ /* Check that we want to write. */
+ if (pc->buflen > 0) {
+ printk ("%s: cdrom_pc_intr: Drive wants "
+ "to transfer data the wrong way!\n",
+ drive->name);
+ pc->stat = 1;
+ thislen = 0;
+ }
+
+ /* Transfer the data. */
+ cdrom_out_bytes (drive, pc->buffer, thislen);
+
+ /* If we haven't moved enough data to satisfy the drive,
+ add some padding. */
+ while (len > thislen) {
+ int dum = 0;
+ cdrom_out_bytes (drive, &dum, sizeof (dum));
+ len -= sizeof (dum);
+ }
+
+ /* Keep count of how much data we've moved. */
+ pc->buffer += thislen;
+ pc->buflen += thislen;
+ }
+
+ /* Same drill for reading. */
+ else if ((ireason & 3) == 2) {
+ /* Check that we want to read. */
+ if (pc->buflen < 0) {
+ printk ("%s: cdrom_pc_intr: Drive wants to "
+ "transfer data the wrong way!\n",
+ drive->name);
+ pc->stat = 1;
+ thislen = 0;
+ }
+
+ /* Transfer the data. */
+ cdrom_in_bytes (drive, pc->buffer, thislen);
+
+ /* If we haven't moved enough data to satisfy the drive,
+ add some padding. */
+ while (len > thislen) {
+ int dum = 0;
+ cdrom_in_bytes (drive, &dum, sizeof (dum));
+ len -= sizeof (dum);
+ }
+
+ /* Keep count of how much data we've moved. */
+ pc->buffer += thislen;
+ pc->buflen -= thislen;
+ } else {
+ printk ("%s: cdrom_pc_intr: The drive "
+ "appears confused (ireason = 0x%2x)\n",
+ drive->name, ireason);
+ pc->stat = 1;
+ }
+
+ /* Now we wait for another interrupt. */
+ ide_set_handler (drive, &cdrom_pc_intr, WAIT_CMD);
+}
+
+
+static void cdrom_do_pc_continuation (ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+
+ /* Send the command to the drive and return. */
+ cdrom_transfer_packet_command (drive, pc->c,
+ sizeof (pc->c), &cdrom_pc_intr);
+}
+
+
+static void cdrom_do_packet_command (ide_drive_t *drive)
+{
+ int len;
+ struct request *rq = HWGROUP(drive)->rq;
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+
+ len = pc->buflen;
+ if (len < 0) len = -len;
+
+ pc->stat = 0;
+
+ /* Start sending the command to the drive. */
+ cdrom_start_packet_command (drive, len, cdrom_do_pc_continuation);
+}
+
+/* Sleep for TIME jiffies.
+ Not to be called from an interrupt handler. */
+#ifdef MACH
+static
+void cdrom_sleep (int time)
+{
+ int xxx;
+
+ assert_wait ((event_t) &xxx, TRUE);
+ thread_set_timeout (time);
+ schedule ();
+}
+#else
+static
+void cdrom_sleep (int time)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ current->timeout = jiffies + time;
+ schedule ();
+}
+#endif
+
+static
+int cdrom_queue_packet_command (ide_drive_t *drive, struct packet_command *pc, int quiet)
+{
+ struct atapi_request_sense my_reqbuf;
+ int retries = 10;
+ struct request req;
+
+ /* If our caller has not provided a place to stick any sense data,
+ use our own area. */
+ if (pc->sense_data == NULL)
+ pc->sense_data = &my_reqbuf;
+ pc->sense_data->sense_key = 0;
+
+ /* Start of retry loop. */
+ do {
+ ide_init_drive_cmd (&req);
+ req.cmd = PACKET_COMMAND;
+ req.buffer = (char *)pc;
+ req.quiet = quiet;
+ (void) ide_do_drive_cmd (drive, &req, ide_wait);
+
+ if (pc->stat != 0) {
+ /* The request failed. Retry if it was due to a unit
+ attention status
+ (usually means media was changed). */
+ struct atapi_request_sense *reqbuf = pc->sense_data;
+
+ if (reqbuf->sense_key == UNIT_ATTENTION)
+ ;
+ else if (reqbuf->sense_key == NOT_READY &&
+ reqbuf->asc == 4) {
+ /* The drive is in the process of loading
+ a disk. Retry, but wait a little to give
+ the drive time to complete the load. */
+ cdrom_sleep (HZ);
+ } else
+ /* Otherwise, don't retry. */
+ retries = 0;
+
+ --retries;
+ }
+
+ /* End of retry loop. */
+ } while (pc->stat != 0 && retries >= 0);
+
+
+ /* Return an error if the command failed. */
+ if (pc->stat != 0)
+ return -EIO;
+ else {
+ /* The command succeeded. If it was anything other than
+ a request sense, eject, or door lock command,
+ and we think that the door is presently, lock it again.
+ (The door was probably unlocked via an explicit
+ CDROMEJECT ioctl.) */
+ if (CDROM_STATE_FLAGS (drive)->door_locked == 0 &&
+ (pc->c[0] != REQUEST_SENSE &&
+ pc->c[0] != ALLOW_MEDIUM_REMOVAL &&
+ pc->c[0] != START_STOP)) {
+ (void) cdrom_lockdoor (drive, 1, NULL);
+ }
+ return 0;
+ }
+}
+
+
+/****************************************************************************
+ * cdrom driver request routine.
+ */
+
+void ide_do_rw_cdrom (ide_drive_t *drive, unsigned long block)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+
+ if (rq -> cmd == PACKET_COMMAND || rq -> cmd == REQUEST_SENSE_COMMAND)
+ cdrom_do_packet_command (drive);
+ else if (rq -> cmd == RESET_DRIVE_COMMAND) {
+ cdrom_end_request (1, drive);
+ ide_do_reset (drive);
+ return;
+ } else if (rq -> cmd != READ) {
+ printk ("ide-cd: bad cmd %d\n", rq -> cmd);
+ cdrom_end_request (0, drive);
+ } else
+ cdrom_start_read (drive, block);
+}
+
+
+
+/****************************************************************************
+ * Ioctl handling.
+ *
+ * Routines which queue packet commands take as a final argument a pointer
+ * to an atapi_request_sense struct. If execution of the command results
+ * in an error with a CHECK CONDITION status, this structure will be filled
+ * with the results of the subsequent request sense command. The pointer
+ * can also be NULL, in which case no sense information is returned.
+ */
+
+#if ! STANDARD_ATAPI
+static inline
+int bin2bcd (int x)
+{
+ return (x%10) | ((x/10) << 4);
+}
+
+
+static inline
+int bcd2bin (int x)
+{
+ return (x >> 4) * 10 + (x & 0x0f);
+}
+
+static
+void msf_from_bcd (struct atapi_msf *msf)
+{
+ msf->minute = bcd2bin (msf->minute);
+ msf->second = bcd2bin (msf->second);
+ msf->frame = bcd2bin (msf->frame);
+}
+
+#endif /* not STANDARD_ATAPI */
+
+
+static inline
+void lba_to_msf (int lba, byte *m, byte *s, byte *f)
+{
+ lba += CD_BLOCK_OFFSET;
+ lba &= 0xffffff; /* negative lbas use only 24 bits */
+ *m = lba / (CD_SECS * CD_FRAMES);
+ lba %= (CD_SECS * CD_FRAMES);
+ *s = lba / CD_FRAMES;
+ *f = lba % CD_FRAMES;
+}
+
+
+static inline
+int msf_to_lba (byte m, byte s, byte f)
+{
+ return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_BLOCK_OFFSET;
+}
+
+
+static int
+cdrom_check_status (ide_drive_t *drive,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+
+ pc.sense_data = reqbuf;
+ pc.c[0] = TEST_UNIT_READY;
+
+ /* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to
+ switch CDs instead of supporting the LOAD_UNLOAD opcode */
+
+ pc.c[7] = CDROM_STATE_FLAGS (drive)->sanyo_slot % 3;
+
+ return cdrom_queue_packet_command (drive, &pc, 1);
+}
+
+
+/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
+static int
+cdrom_lockdoor (ide_drive_t *drive, int lockflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct atapi_request_sense my_reqbuf;
+ int stat;
+ struct packet_command pc;
+
+ if (reqbuf == NULL)
+ reqbuf = &my_reqbuf;
+
+ /* If the drive cannot lock the door, just pretend. */
+ if (CDROM_CONFIG_FLAGS (drive)->no_doorlock)
+ stat = 0;
+ else {
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = ALLOW_MEDIUM_REMOVAL;
+ pc.c[4] = (lockflag != 0);
+ stat = cdrom_queue_packet_command (drive, &pc, 0);
+ }
+
+ if (stat == 0)
+ CDROM_STATE_FLAGS (drive)->door_locked = lockflag;
+ else {
+ /* If we got an illegal field error, the drive
+ probably cannot lock the door. */
+ if (reqbuf->sense_key == ILLEGAL_REQUEST &&
+ (reqbuf->asc == 0x24 || reqbuf->asc == 0x20)) {
+ printk ("%s: door locking not supported\n",
+ drive->name);
+ CDROM_CONFIG_FLAGS (drive)->no_doorlock = 1;
+ stat = 0;
+ CDROM_STATE_FLAGS (drive)->door_locked = lockflag;
+ }
+ }
+ return stat;
+}
+
+
+/* Eject the disk if EJECTFLAG is 0.
+ If EJECTFLAG is 1, try to reload the disk. */
+static int
+cdrom_eject (ide_drive_t *drive, int ejectflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = START_STOP;
+ pc.c[4] = 2 + (ejectflag != 0);
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+static int
+cdrom_pause (ide_drive_t *drive, int pauseflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = SCMD_PAUSE_RESUME;
+ pc.c[8] = !pauseflag;
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+static int
+cdrom_startstop (ide_drive_t *drive, int startflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = START_STOP;
+ pc.c[1] = 1;
+ pc.c[4] = startflag;
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+static int
+cdrom_read_capacity (ide_drive_t *drive, unsigned *capacity,
+ struct atapi_request_sense *reqbuf)
+{
+ struct {
+ unsigned lba;
+ unsigned blocklen;
+ } capbuf;
+
+ int stat;
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = READ_CAPACITY;
+ pc.buffer = (unsigned char *)&capbuf;
+ pc.buflen = sizeof (capbuf);
+
+ stat = cdrom_queue_packet_command (drive, &pc, 1);
+ if (stat == 0)
+ *capacity = ntohl (capbuf.lba);
+
+ return stat;
+}
+
+
+static int
+cdrom_read_tocentry (ide_drive_t *drive, int trackno, int msf_flag,
+ int format, char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = (unsigned char *)buf;
+ pc.buflen = buflen;
+ pc.c[0] = SCMD_READ_TOC;
+ pc.c[6] = trackno;
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ pc.c[9] = (format << 6);
+ if (msf_flag) pc.c[1] = 2;
+ return cdrom_queue_packet_command (drive, &pc, 1);
+}
+
+
+/* Try to read the entire TOC for the disk into our internal buffer. */
+static int
+cdrom_read_toc (ide_drive_t *drive,
+ struct atapi_request_sense *reqbuf)
+{
+ int stat, ntracks, i;
+ struct atapi_toc *toc = drive->cdrom_info.toc;
+ struct {
+ struct atapi_toc_header hdr;
+ struct atapi_toc_entry ent;
+ } ms_tmp;
+
+ if (toc == NULL) {
+ /* Try to allocate space. */
+ toc = (struct atapi_toc *) kmalloc (sizeof (struct atapi_toc),
+ GFP_KERNEL);
+ drive->cdrom_info.toc = toc;
+ }
+
+ if (toc == NULL) {
+ printk ("%s: No cdrom TOC buffer!\n", drive->name);
+ return -EIO;
+ }
+
+ /* Check to see if the existing data is still valid.
+ If it is, just return. */
+ if (CDROM_STATE_FLAGS (drive)->toc_valid)
+ (void) cdrom_check_status (drive, NULL);
+
+ if (CDROM_STATE_FLAGS (drive)->toc_valid) return 0;
+
+ /* First read just the header, so we know how long the TOC is. */
+ stat = cdrom_read_tocentry (drive, 0, 1, 0, (char *)&toc->hdr,
+ sizeof (struct atapi_toc_header) +
+ sizeof (struct atapi_toc_entry),
+ reqbuf);
+ if (stat) return stat;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd) {
+ toc->hdr.first_track = bcd2bin (toc->hdr.first_track);
+ toc->hdr.last_track = bcd2bin (toc->hdr.last_track);
+ }
+#endif /* not STANDARD_ATAPI */
+
+ ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
+ if (ntracks <= 0) return -EIO;
+ if (ntracks > MAX_TRACKS) ntracks = MAX_TRACKS;
+
+ /* Now read the whole schmeer. */
+ stat = cdrom_read_tocentry (drive, 0, 1, 0, (char *)&toc->hdr,
+ sizeof (struct atapi_toc_header) +
+ (ntracks+1) *
+ sizeof (struct atapi_toc_entry),
+ reqbuf);
+ if (stat) return stat;
+ toc->hdr.toc_length = ntohs (toc->hdr.toc_length);
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd) {
+ toc->hdr.first_track = bcd2bin (toc->hdr.first_track);
+ toc->hdr.last_track = bcd2bin (toc->hdr.last_track);
+ }
+#endif /* not STANDARD_ATAPI */
+
+ for (i=0; i<=ntracks; i++) {
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd) {
+ if (CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd)
+ toc->ent[i].track = bcd2bin (toc->ent[i].track);
+ msf_from_bcd (&toc->ent[i].addr.msf);
+ }
+#endif /* not STANDARD_ATAPI */
+ toc->ent[i].addr.lba = msf_to_lba (toc->ent[i].addr.msf.minute,
+ toc->ent[i].addr.msf.second,
+ toc->ent[i].addr.msf.frame);
+ }
+
+ /* Read the multisession information. */
+ stat = cdrom_read_tocentry (drive, 0, 1, 1,
+ (char *)&ms_tmp, sizeof (ms_tmp),
+ reqbuf);
+ if (stat) return stat;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd)
+ msf_from_bcd (&ms_tmp.ent.addr.msf);
+#endif /* not STANDARD_ATAPI */
+
+ toc->last_session_lba = msf_to_lba (ms_tmp.ent.addr.msf.minute,
+ ms_tmp.ent.addr.msf.second,
+ ms_tmp.ent.addr.msf.frame);
+
+ toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track);
+
+ /* Now try to get the total cdrom capacity. */
+ stat = cdrom_read_capacity (drive, &toc->capacity, reqbuf);
+ if (stat) toc->capacity = 0x1fffff;
+
+ HWIF(drive)->gd->sizes[drive->select.b.unit << PARTN_BITS]
+ = toc->capacity * SECTORS_PER_FRAME;
+ drive->part[0].nr_sects = toc->capacity * SECTORS_PER_FRAME;
+
+ /* Remember that we've read this stuff. */
+ CDROM_STATE_FLAGS (drive)->toc_valid = 1;
+
+ return 0;
+}
+
+
+static int
+cdrom_read_subchannel (ide_drive_t *drive, int format,
+ char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = (unsigned char *) buf;
+ pc.buflen = buflen;
+ pc.c[0] = SCMD_READ_SUBCHANNEL;
+ pc.c[1] = 2; /* MSF addressing */
+ pc.c[2] = 0x40; /* request subQ data */
+ pc.c[3] = format,
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+/* modeflag: 0 = current, 1 = changeable mask, 2 = default, 3 = saved */
+static int
+cdrom_mode_sense (ide_drive_t *drive, int pageno, int modeflag,
+ char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = (unsigned char *)buf;
+ pc.buflen = buflen;
+ pc.c[0] = MODE_SENSE_10;
+ pc.c[2] = pageno | (modeflag << 6);
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+static int
+cdrom_mode_select (ide_drive_t *drive, int pageno, char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = (unsigned char *)buf;
+ pc.buflen = - buflen;
+ pc.c[0] = MODE_SELECT_10;
+ pc.c[1] = 0x10;
+ pc.c[2] = pageno;
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+static int
+cdrom_play_lba_range_1 (ide_drive_t *drive, int lba_start, int lba_end,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = SCMD_PLAYAUDIO_MSF;
+ lba_to_msf (lba_start, &pc.c[3], &pc.c[4], &pc.c[5]);
+ lba_to_msf (lba_end-1, &pc.c[6], &pc.c[7], &pc.c[8]);
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd) {
+ pc.c[3] = bin2bcd (pc.c[3]);
+ pc.c[4] = bin2bcd (pc.c[4]);
+ pc.c[5] = bin2bcd (pc.c[5]);
+ pc.c[6] = bin2bcd (pc.c[6]);
+ pc.c[7] = bin2bcd (pc.c[7]);
+ pc.c[8] = bin2bcd (pc.c[8]);
+ }
+#endif /* not STANDARD_ATAPI */
+
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+/* Play audio starting at LBA LBA_START and finishing with the
+ LBA before LBA_END. */
+static int
+cdrom_play_lba_range (ide_drive_t *drive, int lba_start, int lba_end,
+ struct atapi_request_sense *reqbuf)
+{
+ int i, stat;
+ struct atapi_request_sense my_reqbuf;
+
+ if (reqbuf == NULL)
+ reqbuf = &my_reqbuf;
+
+ /* Some drives, will, for certain audio cds,
+ give an error if you ask them to play the entire cd using the
+ values which are returned in the TOC. The play will succeed,
+ however, if the ending address is adjusted downwards
+ by a few frames. */
+ for (i=0; i<75; i++) {
+ stat = cdrom_play_lba_range_1 (drive, lba_start, lba_end,
+ reqbuf);
+
+ if (stat == 0 ||
+ !(reqbuf->sense_key == ILLEGAL_REQUEST &&
+ reqbuf->asc == 0x24))
+ return stat;
+
+ --lba_end;
+ if (lba_end <= lba_start) break;
+ }
+
+ return stat;
+}
+
+
+static
+int cdrom_get_toc_entry (ide_drive_t *drive, int track,
+ struct atapi_toc_entry **ent,
+ struct atapi_request_sense *reqbuf)
+{
+ int stat, ntracks;
+ struct atapi_toc *toc;
+
+ /* Make sure our saved TOC is valid. */
+ stat = cdrom_read_toc (drive, reqbuf);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ /* Check validity of requested track number. */
+ ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
+ if (track == CDROM_LEADOUT)
+ *ent = &toc->ent[ntracks];
+ else if (track < toc->hdr.first_track ||
+ track > toc->hdr.last_track)
+ return -EINVAL;
+ else
+ *ent = &toc->ent[track - toc->hdr.first_track];
+
+ return 0;
+}
+
+
+static int
+cdrom_read_block (ide_drive_t *drive, int format, int lba, int nblocks,
+ char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+ struct atapi_request_sense my_reqbuf;
+ int stat;
+
+ if (reqbuf == NULL)
+ reqbuf = &my_reqbuf;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = (unsigned char *)buf;
+ pc.buflen = buflen;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->old_readcd)
+ pc.c[0] = 0xd4;
+ else
+#endif /* not STANDARD_ATAPI */
+ pc.c[0] = READ_CD;
+
+ pc.c[1] = (format << 2);
+ put_unaligned(htonl(lba), (unsigned int *) &pc.c[2]);
+ pc.c[8] = (nblocks & 0xff);
+ pc.c[7] = ((nblocks>>8) & 0xff);
+ pc.c[6] = ((nblocks>>16) & 0xff);
+ if (format <= 1)
+ pc.c[9] = 0xf8;
+ else
+ pc.c[9] = 0x10;
+
+ stat = cdrom_queue_packet_command (drive, &pc, 0);
+
+#if ! STANDARD_ATAPI
+ /* If the drive doesn't recognize the READ CD opcode, retry the command
+ with an older opcode for that command. */
+ if (stat && reqbuf->sense_key == ILLEGAL_REQUEST &&
+ reqbuf->asc == 0x20 &&
+ CDROM_CONFIG_FLAGS (drive)->old_readcd == 0) {
+ printk ("%s: Drive does not recognize READ_CD;"
+ "trying opcode 0xd4\n",
+ drive->name);
+ CDROM_CONFIG_FLAGS (drive)->old_readcd = 1;
+ return cdrom_read_block (drive, format, lba, nblocks,
+ buf, buflen, reqbuf);
+ }
+#endif /* not STANDARD_ATAPI */
+
+ return stat;
+}
+
+
+/* If SLOT<0, unload the current slot. Otherwise, try to load SLOT. */
+static int
+cdrom_load_unload (ide_drive_t *drive, int slot,
+ struct atapi_request_sense *reqbuf)
+{
+ /* if the drive is a Sanyo 3 CD changer then TEST_UNIT_READY
+ (used in the cdrom_check_status function) is used to
+ switch CDs instead of LOAD_UNLOAD */
+
+ if (CDROM_STATE_FLAGS (drive)->sanyo_slot > 0) {
+
+ if ((slot == 1) || (slot == 2)) {
+ CDROM_STATE_FLAGS (drive)->sanyo_slot = slot;
+ } else if (slot >= 0) {
+ CDROM_STATE_FLAGS (drive)->sanyo_slot = 3;
+ } else {
+ return 0;
+ }
+
+ return cdrom_check_status (drive, NULL);
+
+ } else {
+
+ /* ATAPI Rev. 2.2+ standard for requesting switching of
+ CDs in a multiplatter device */
+
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = LOAD_UNLOAD;
+ pc.c[4] = 2 + (slot >= 0);
+ pc.c[8] = slot;
+ return cdrom_queue_packet_command (drive, &pc, 0);
+
+ }
+}
+
+
+int ide_cdrom_ioctl (ide_drive_t *drive, struct inode *inode,
+ struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case CDROMEJECT: {
+ int stat;
+
+ if (drive->usage > 1)
+ return -EBUSY;
+
+ stat = cdrom_lockdoor (drive, 0, NULL);
+ if (stat) return stat;
+
+ return cdrom_eject (drive, 0, NULL);
+ }
+
+ case CDROMCLOSETRAY: {
+ int stat;
+ if (drive->usage > 1)
+ return -EBUSY;
+
+ stat = cdrom_eject (drive, 1, NULL);
+ if (stat) return stat;
+
+ return cdrom_lockdoor (drive, 1, NULL);
+ }
+
+ case CDROMEJECT_SW: {
+ CDROM_STATE_FLAGS (drive)->eject_on_close = arg;
+ return 0;
+ }
+
+ case CDROMPAUSE:
+ return cdrom_pause (drive, 1, NULL);
+
+ case CDROMRESUME:
+ return cdrom_pause (drive, 0, NULL);
+
+ case CDROMSTART:
+ return cdrom_startstop (drive, 1, NULL);
+
+ case CDROMSTOP: {
+#ifdef IHAVEADOLPHIN
+ /* Certain Drives require this. Most don't
+ and will produce errors upon CDROMSTOP
+ pit says the Dolphin needs this. If you
+ own a dolphin, just define IHAVEADOLPHIN somewhere */
+ int stat;
+ stat = cdrom_startstop (drive, 0, NULL);
+ if (stat) return stat;
+ return cdrom_eject (drive, 1, NULL);
+#endif /* end of IHAVEADOLPHIN */
+ return cdrom_startstop (drive, 0, NULL);
+ }
+
+ case CDROMPLAYMSF: {
+ struct cdrom_msf msf;
+ int stat, lba_start, lba_end;
+
+ stat = verify_area (VERIFY_READ, (void *)arg, sizeof (msf));
+ if (stat) return stat;
+
+ memcpy_fromfs (&msf, (void *) arg, sizeof(msf));
+
+ lba_start = msf_to_lba (msf.cdmsf_min0, msf.cdmsf_sec0,
+ msf.cdmsf_frame0);
+ lba_end = msf_to_lba (msf.cdmsf_min1, msf.cdmsf_sec1,
+ msf.cdmsf_frame1) + 1;
+
+ if (lba_end <= lba_start) return -EINVAL;
+
+ return cdrom_play_lba_range (drive, lba_start, lba_end, NULL);
+ }
+
+ /* Like just about every other Linux cdrom driver, we ignore the
+ index part of the request here. */
+ case CDROMPLAYTRKIND: {
+ int stat, lba_start, lba_end;
+ struct cdrom_ti ti;
+ struct atapi_toc_entry *first_toc, *last_toc;
+
+ stat = verify_area (VERIFY_READ, (void *)arg, sizeof (ti));
+ if (stat) return stat;
+
+ memcpy_fromfs (&ti, (void *) arg, sizeof(ti));
+
+ stat = cdrom_get_toc_entry (drive, ti.cdti_trk0, &first_toc,
+ NULL);
+ if (stat) return stat;
+ stat = cdrom_get_toc_entry (drive, ti.cdti_trk1, &last_toc,
+ NULL);
+ if (stat) return stat;
+
+ if (ti.cdti_trk1 != CDROM_LEADOUT) ++last_toc;
+ lba_start = first_toc->addr.lba;
+ lba_end = last_toc->addr.lba;
+
+ if (lba_end <= lba_start) return -EINVAL;
+
+ return cdrom_play_lba_range (drive, lba_start, lba_end, NULL);
+ }
+
+ case CDROMREADTOCHDR: {
+ int stat;
+ struct cdrom_tochdr tochdr;
+ struct atapi_toc *toc;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg,
+ sizeof (tochdr));
+ if (stat) return stat;
+
+ /* Make sure our saved TOC is valid. */
+ stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+ tochdr.cdth_trk0 = toc->hdr.first_track;
+ tochdr.cdth_trk1 = toc->hdr.last_track;
+
+ memcpy_tofs ((void *) arg, &tochdr, sizeof (tochdr));
+
+ return stat;
+ }
+
+ case CDROMREADTOCENTRY: {
+ int stat;
+ struct cdrom_tocentry tocentry;
+ struct atapi_toc_entry *toce;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg,
+ sizeof (tocentry));
+ if (stat) return stat;
+
+ memcpy_fromfs (&tocentry, (void *) arg, sizeof (tocentry));
+
+ stat = cdrom_get_toc_entry (drive, tocentry.cdte_track, &toce,
+ NULL);
+ if (stat) return stat;
+
+ tocentry.cdte_ctrl = toce->control;
+ tocentry.cdte_adr = toce->adr;
+
+ if (tocentry.cdte_format == CDROM_MSF) {
+ /* convert to MSF */
+ lba_to_msf (toce->addr.lba,
+ &tocentry.cdte_addr.msf.minute,
+ &tocentry.cdte_addr.msf.second,
+ &tocentry.cdte_addr.msf.frame);
+ } else
+ tocentry.cdte_addr.lba = toce->addr.lba;
+
+ memcpy_tofs ((void *) arg, &tocentry, sizeof (tocentry));
+
+ return stat;
+ }
+
+ case CDROMSUBCHNL: {
+ struct atapi_cdrom_subchnl scbuf;
+ int stat;
+ struct cdrom_subchnl subchnl;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg,
+ sizeof (subchnl));
+ if (stat) return stat;
+
+ memcpy_fromfs (&subchnl, (void *) arg, sizeof (subchnl));
+
+ stat = cdrom_read_subchannel (drive, 1, /* current position */
+ (char *)&scbuf, sizeof (scbuf),
+ NULL);
+ if (stat) return stat;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd) {
+ msf_from_bcd (&scbuf.acdsc_absaddr.msf);
+ msf_from_bcd (&scbuf.acdsc_reladdr.msf);
+ }
+ if (CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd)
+ scbuf.acdsc_trk = bcd2bin (scbuf.acdsc_trk);
+#endif /* not STANDARD_ATAPI */
+
+ if (subchnl.cdsc_format == CDROM_MSF) {
+ subchnl.cdsc_absaddr.msf.minute =
+ scbuf.acdsc_absaddr.msf.minute;
+ subchnl.cdsc_absaddr.msf.second =
+ scbuf.acdsc_absaddr.msf.second;
+ subchnl.cdsc_absaddr.msf.frame =
+ scbuf.acdsc_absaddr.msf.frame;
+
+ subchnl.cdsc_reladdr.msf.minute =
+ scbuf.acdsc_reladdr.msf.minute;
+ subchnl.cdsc_reladdr.msf.second =
+ scbuf.acdsc_reladdr.msf.second;
+ subchnl.cdsc_reladdr.msf.frame =
+ scbuf.acdsc_reladdr.msf.frame;
+ } else {
+ subchnl.cdsc_absaddr.lba =
+ msf_to_lba (scbuf.acdsc_absaddr.msf.minute,
+ scbuf.acdsc_absaddr.msf.second,
+ scbuf.acdsc_absaddr.msf.frame);
+ subchnl.cdsc_reladdr.lba =
+ msf_to_lba (scbuf.acdsc_reladdr.msf.minute,
+ scbuf.acdsc_reladdr.msf.second,
+ scbuf.acdsc_reladdr.msf.frame);
+ }
+
+ subchnl.cdsc_audiostatus = scbuf.acdsc_audiostatus;
+ subchnl.cdsc_ctrl = scbuf.acdsc_ctrl;
+ subchnl.cdsc_trk = scbuf.acdsc_trk;
+ subchnl.cdsc_ind = scbuf.acdsc_ind;
+
+ memcpy_tofs ((void *) arg, &subchnl, sizeof (subchnl));
+
+ return stat;
+ }
+
+ case CDROMVOLCTRL: {
+ struct cdrom_volctrl volctrl;
+ char buffer[24], mask[24];
+ int stat;
+
+ stat = verify_area (VERIFY_READ, (void *) arg,
+ sizeof (volctrl));
+ if (stat) return stat;
+ memcpy_fromfs (&volctrl, (void *) arg, sizeof (volctrl));
+
+ stat = cdrom_mode_sense (drive, 0x0e, 0, buffer,
+ sizeof (buffer), NULL);
+ if (stat) return stat;
+ stat = cdrom_mode_sense (drive, 0x0e, 1, mask,
+ sizeof (buffer), NULL);
+ if (stat) return stat;
+
+ buffer[1] = buffer[2] = 0;
+
+ buffer[17] = volctrl.channel0 & mask[17];
+ buffer[19] = volctrl.channel1 & mask[19];
+ buffer[21] = volctrl.channel2 & mask[21];
+ buffer[23] = volctrl.channel3 & mask[23];
+
+ return cdrom_mode_select (drive, 0x0e, buffer,
+ sizeof (buffer), NULL);
+ }
+
+ case CDROMVOLREAD: {
+ struct cdrom_volctrl volctrl;
+ char buffer[24];
+ int stat;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg,
+ sizeof (volctrl));
+ if (stat) return stat;
+
+ stat = cdrom_mode_sense (drive, 0x0e, 0, buffer,
+ sizeof (buffer), NULL);
+ if (stat) return stat;
+
+ volctrl.channel0 = buffer[17];
+ volctrl.channel1 = buffer[19];
+ volctrl.channel2 = buffer[21];
+ volctrl.channel3 = buffer[23];
+
+ memcpy_tofs ((void *) arg, &volctrl, sizeof (volctrl));
+
+ return 0;
+ }
+
+ case CDROMMULTISESSION: {
+ struct cdrom_multisession ms_info;
+ struct atapi_toc *toc;
+ int stat;
+
+ stat = verify_area (VERIFY_WRITE, (void *)arg,
+ sizeof (ms_info));
+ if (stat) return stat;
+
+ memcpy_fromfs (&ms_info, (void *)arg, sizeof (ms_info));
+
+ /* Make sure the TOC information is valid. */
+ stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ if (ms_info.addr_format == CDROM_MSF)
+ lba_to_msf (toc->last_session_lba,
+ &ms_info.addr.msf.minute,
+ &ms_info.addr.msf.second,
+ &ms_info.addr.msf.frame);
+ else if (ms_info.addr_format == CDROM_LBA)
+ ms_info.addr.lba = toc->last_session_lba;
+ else
+ return -EINVAL;
+
+ ms_info.xa_flag = toc->xa_flag;
+
+ memcpy_tofs ((void *)arg, &ms_info, sizeof (ms_info));
+
+ return 0;
+ }
+
+ /* Read 2352 byte blocks from audio tracks. */
+ case CDROMREADAUDIO: {
+ int stat, lba;
+ struct atapi_toc *toc;
+ struct cdrom_read_audio ra;
+ char *buf;
+
+ /* Make sure the TOC is up to date. */
+ stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ stat = verify_area (VERIFY_READ, (char *)arg, sizeof (ra));
+ if (stat) return stat;
+
+ memcpy_fromfs (&ra, (void *)arg, sizeof (ra));
+
+ if (ra.nframes < 0 || ra.nframes > toc->capacity)
+ return -EINVAL;
+ else if (ra.nframes == 0)
+ return 0;
+
+ stat = verify_area (VERIFY_WRITE, (char *)ra.buf,
+ ra.nframes * CD_FRAMESIZE_RAW);
+ if (stat) return stat;
+
+ if (ra.addr_format == CDROM_MSF)
+ lba = msf_to_lba (ra.addr.msf.minute,
+ ra.addr.msf.second,
+ ra.addr.msf.frame);
+ else if (ra.addr_format == CDROM_LBA)
+ lba = ra.addr.lba;
+ else
+ return -EINVAL;
+
+ if (lba < 0 || lba >= toc->capacity)
+ return -EINVAL;
+
+ buf = (char *) kmalloc (CDROM_NBLOCKS_BUFFER*CD_FRAMESIZE_RAW,
+ GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ while (ra.nframes > 0) {
+ int this_nblocks = ra.nframes;
+ if (this_nblocks > CDROM_NBLOCKS_BUFFER)
+ this_nblocks = CDROM_NBLOCKS_BUFFER;
+ stat = cdrom_read_block
+ (drive, 1, lba, this_nblocks,
+ buf, this_nblocks * CD_FRAMESIZE_RAW, NULL);
+ if (stat) break;
+
+ memcpy_tofs (ra.buf, buf,
+ this_nblocks * CD_FRAMESIZE_RAW);
+ ra.buf += this_nblocks * CD_FRAMESIZE_RAW;
+ ra.nframes -= this_nblocks;
+ lba += this_nblocks;
+ }
+
+ kfree (buf);
+ return stat;
+ }
+ case CDROMREADRAW:
+ case CDROMREADMODE1:
+ case CDROMREADMODE2: {
+ struct cdrom_msf msf;
+ int blocksize, format, stat, lba;
+ char *buf;
+
+ if (cmd == CDROMREADMODE1) {
+ blocksize = CD_FRAMESIZE;
+ format = 2;
+ } else if (cmd == CDROMREADMODE2) {
+ blocksize = CD_FRAMESIZE_RAW0;
+ format = 3;
+ } else {
+ blocksize = CD_FRAMESIZE_RAW;
+ format = 0;
+ }
+
+ stat = verify_area (VERIFY_WRITE, (char *)arg, blocksize);
+ if (stat) return stat;
+
+ memcpy_fromfs (&msf, (void *)arg, sizeof (msf));
+
+ lba = msf_to_lba (msf.cdmsf_min0,
+ msf.cdmsf_sec0,
+ msf.cdmsf_frame0);
+
+ /* DON'T make sure the TOC is up to date. */
+ /* stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ if (lba < 0 || lba >= toc->capacity)
+ return -EINVAL; */
+
+ buf = (char *) kmalloc (CD_FRAMESIZE_RAW, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ stat = cdrom_read_block (drive, format, lba, 1, buf, blocksize,
+ NULL);
+ if (stat == 0)
+ memcpy_tofs ((char *)arg, buf, blocksize);
+
+ kfree (buf);
+ return stat;
+ }
+
+ case CDROM_GET_UPC: {
+ int stat;
+ char mcnbuf[24];
+ struct cdrom_mcn mcn;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg,
+ sizeof (mcn));
+ if (stat) return stat;
+
+ stat = cdrom_read_subchannel (drive, 2, /* get MCN */
+ mcnbuf, sizeof (mcnbuf),
+ NULL);
+ if (stat) return stat;
+
+ memcpy (mcn.medium_catalog_number, mcnbuf+9,
+ sizeof (mcn.medium_catalog_number)-1);
+ mcn.medium_catalog_number[sizeof (mcn.medium_catalog_number)-1]
+ = '\0';
+
+ memcpy_tofs ((void *) arg, &mcn, sizeof (mcn));
+
+ return stat;
+ }
+
+ case CDROMLOADFROMSLOT:
+ printk ("%s: Use CDROM_SELECT_DISC "
+ " instead of CDROMLOADFROMSLOT.\n", drive->name);
+ /* Fall through. */
+
+ case CDROM_SELECT_DISC: {
+ struct atapi_request_sense my_reqbuf;
+ int stat;
+
+ if (drive->usage > 1)
+ return -EBUSY;
+
+ (void) cdrom_load_unload (drive, -1, NULL);
+
+ cdrom_saw_media_change (drive);
+ if (arg == -1) {
+ (void) cdrom_lockdoor (drive, 0, NULL);
+ return 0;
+ }
+ (void) cdrom_load_unload (drive, (int)arg, NULL);
+
+ stat = cdrom_check_status (drive, &my_reqbuf);
+ if (stat && my_reqbuf.sense_key == NOT_READY) {
+ return -ENOENT;
+ }
+
+ /* And try to read the TOC information now. */
+ return cdrom_read_toc (drive, &my_reqbuf);
+ }
+
+#if 0 /* Doesn't work reliably yet. */
+ case CDROMRESET: {
+ struct request req;
+ ide_init_drive_cmd (&req);
+ req.cmd = RESET_DRIVE_COMMAND;
+ return ide_do_drive_cmd (drive, &req, ide_wait);
+ }
+#endif
+
+
+#ifdef TEST
+ case 0x1234: {
+ int stat;
+ struct packet_command pc;
+ int len, lena;
+
+ memset (&pc, 0, sizeof (pc));
+
+ stat = verify_area (VERIFY_READ, (void *) arg, sizeof (pc.c));
+ if (stat) return stat;
+ memcpy_fromfs (&pc.c, (void *) arg, sizeof (pc.c));
+ arg += sizeof (pc.c);
+
+ stat = verify_area (VERIFY_READ, (void *) arg, sizeof (len));
+ if (stat) return stat;
+ memcpy_fromfs (&len, (void *) arg , sizeof (len));
+ arg += sizeof (len);
+
+ if (len > 0) {
+ stat = verify_area (VERIFY_WRITE, (void *) arg, len);
+ if (stat) return stat;
+ }
+
+ lena = len;
+ if (lena < 0) lena = 0;
+
+ {
+ char buf[lena];
+ if (len > 0) {
+ pc.buflen = len;
+ pc.buffer = buf;
+ }
+
+ stat = cdrom_queue_packet_command (drive, &pc, 0);
+
+ if (len > 0)
+ memcpy_tofs ((void *)arg, buf, len);
+ }
+
+ return stat;
+ }
+#endif
+
+ default:
+ return -EPERM;
+ }
+
+}
+
+
+
+/****************************************************************************
+ * Other driver requests (open, close, check media change).
+ */
+
+int ide_cdrom_check_media_change (ide_drive_t *drive)
+{
+ int retval;
+
+ (void) cdrom_check_status (drive, NULL);
+
+ retval = CDROM_STATE_FLAGS (drive)->media_changed;
+ CDROM_STATE_FLAGS (drive)->media_changed = 0;
+
+ return retval;
+}
+
+
+int ide_cdrom_open (struct inode *ip, struct file *fp, ide_drive_t *drive)
+{
+ /* no write access */
+ if (fp->f_mode & 2) {
+ --drive->usage;
+ return -EROFS;
+ }
+
+ /* If this is the first open, check the drive status. */
+ if (drive->usage == 1) {
+ int stat;
+ struct atapi_request_sense my_reqbuf;
+ my_reqbuf.sense_key = 0;
+
+ /* Get the drive status. */
+ stat = cdrom_check_status (drive, &my_reqbuf);
+
+ /* If the tray is open, try to close it. */
+ if (stat && my_reqbuf.sense_key == NOT_READY) {
+ cdrom_eject (drive, 1, &my_reqbuf);
+ stat = cdrom_check_status (drive, &my_reqbuf);
+ }
+
+ /* If things worked ok, lock the door and read the
+ TOC information. */
+ if (stat == 0 || my_reqbuf.sense_key == UNIT_ATTENTION) {
+ (void) cdrom_lockdoor (drive, 1, &my_reqbuf);
+ (void) cdrom_read_toc (drive, &my_reqbuf);
+ } else {
+ /* Otherwise return as missing */
+ --drive->usage;
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * Close down the device. Invalidate all cached blocks.
+ */
+
+void ide_cdrom_release (struct inode *inode, struct file *file,
+ ide_drive_t *drive)
+{
+ if (drive->usage == 0) {
+ invalidate_buffers (inode->i_rdev);
+
+ /* Unlock the door. */
+ (void) cdrom_lockdoor (drive, 0, NULL);
+
+ /* Do an eject if we were requested to do so. */
+ if (CDROM_STATE_FLAGS (drive)->eject_on_close)
+ (void) cdrom_eject (drive, 0, NULL);
+ }
+}
+
+
+
+/****************************************************************************
+ * Device initialization.
+ */
+
+void ide_cdrom_setup (ide_drive_t *drive)
+{
+ blksize_size[HWIF(drive)->major][drive->select.b.unit << PARTN_BITS] =
+ CD_FRAMESIZE;
+
+ drive->special.all = 0;
+ drive->ready_stat = 0;
+
+ CDROM_STATE_FLAGS (drive)->media_changed = 0;
+ CDROM_STATE_FLAGS (drive)->toc_valid = 0;
+ CDROM_STATE_FLAGS (drive)->door_locked = 0;
+
+ /* Turn this off by default, since many people don't like it. */
+ CDROM_STATE_FLAGS (drive)->eject_on_close= 0;
+
+#if NO_DOOR_LOCKING
+ CDROM_CONFIG_FLAGS (drive)->no_doorlock = 1;
+#else
+ CDROM_CONFIG_FLAGS (drive)->no_doorlock = 0;
+#endif
+
+ /* by default Sanyo 3 CD changer support is turned off and
+ ATAPI Rev 2.2+ standard support for CD changers is used */
+ CDROM_STATE_FLAGS (drive)->sanyo_slot = 0;
+
+ if (drive->id != NULL)
+ CDROM_CONFIG_FLAGS (drive)->drq_interrupt =
+ ((drive->id->config & 0x0060) == 0x20);
+ else
+ CDROM_CONFIG_FLAGS (drive)->drq_interrupt = 0;
+
+#if ! STANDARD_ATAPI
+ drive->cdrom_info.max_sectors = 252;
+
+ CDROM_CONFIG_FLAGS (drive)->old_readcd = 0;
+ CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd = 0;
+ CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd = 0;
+ CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd = 0;
+ CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 0;
+
+ if (drive->id != NULL) {
+ const char *model = (const char *)drive->id->model;
+ const char *fw_rev = (const char *)drive->id->fw_rev;
+
+ if (strcmp (model, "V003S0DS") == 0 &&
+ fw_rev[4] == '1' &&
+ fw_rev[6] <= '2') {
+ /* Vertos 300.
+ Some versions of this drive like to talk BCD. */
+ CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 1;
+ }
+
+ else if (strcmp (model, "V006E0DS") == 0 &&
+ fw_rev[4] == '1' &&
+ fw_rev[6] <= '2') {
+ /* Vertos 600 ESD. */
+ CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd = 1;
+ }
+
+ else if (strcmp (model, "GCD-R580B") == 0)
+ drive->cdrom_info.max_sectors = 124;
+
+ else if (strcmp (model,
+ "NEC CD-ROM DRIVE:260") == 0 &&
+ strcmp (fw_rev, "1.01") == 0) {
+ /* Old NEC260 (not R). */
+ CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 1;
+ }
+
+ else if (strcmp (model, "WEARNES CDD-120") == 0 &&
+ strcmp (fw_rev, "A1.1") == 0) {
+ /* Wearnes */
+ CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 1;
+ }
+
+ /* Sanyo 3 CD changer uses a non-standard command
+ for CD changing */
+ else if ((strcmp(model, "CD-ROM CDR-C3 G") == 0) ||
+ (strcmp(model, "CD-ROM CDR-C3G") == 0) ||
+ (strcmp(model, "CD-ROM CDR_C36") == 0)) {
+ /* uses CD in slot 0 when value is set to 3 */
+ CDROM_STATE_FLAGS (drive)->sanyo_slot = 3;
+ }
+
+ }
+#endif /* not STANDARD_ATAPI */
+
+ drive->cdrom_info.toc = NULL;
+ drive->cdrom_info.sector_buffer = NULL;
+ drive->cdrom_info.sector_buffered = 0;
+ drive->cdrom_info.nsectors_buffered = 0;
+}
+
+
+
+/*
+ * TODO (for 2.1?):
+ * Avoid printing error messages for expected errors from the drive.
+ * Integrate with generic cdrom driver.
+ * Query the drive to find what features are available
+ * before trying to use them.
+ * Integrate spindown time adjustment patch.
+ * Modularize.
+ * CDROMRESET ioctl.
+ * Better support for changers.
+ */
+
+
+
+/*==========================================================================*/
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/linux/src/drivers/block/ide.c b/linux/src/drivers/block/ide.c
new file mode 100644
index 0000000..c8dee84
--- /dev/null
+++ b/linux/src/drivers/block/ide.c
@@ -0,0 +1,3926 @@
+/*
+ * linux/drivers/block/ide.c Version 5.53 Jun 24, 1997
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors (see below)
+ */
+#define _IDE_C /* needed by <linux/blk.h> */
+
+/*
+ * Maintained by Mark Lord <mlord@pobox.com>
+ * and Gadi Oxman <gadio@netvision.net.il>
+ *
+ * This is the multiple IDE interface driver, as evolved from hd.c.
+ * It supports up to four IDE interfaces, on one or more IRQs (usually 14 & 15).
+ * There can be up to two drives per interface, as per the ATA-2 spec.
+ *
+ * Primary: ide0, port 0x1f0; major=3; hda is minor=0; hdb is minor=64
+ * Secondary: ide1, port 0x170; major=22; hdc is minor=0; hdd is minor=64
+ * Tertiary: ide2, port 0x???; major=33; hde is minor=0; hdf is minor=64
+ * Quaternary: ide3, port 0x???; major=34; hdg is minor=0; hdh is minor=64
+ *
+ * It is easy to extend ide.c to handle more than four interfaces:
+ *
+ * Change the MAX_HWIFS constant in ide.h.
+ *
+ * Define some new major numbers (in major.h), and insert them into
+ * the ide_hwif_to_major table in ide.c.
+ *
+ * Fill in the extra values for the new interfaces into the two tables
+ * inside ide.c: default_io_base[] and default_irqs[].
+ *
+ * Create the new request handlers by cloning "do_ide3_request()"
+ * for each new interface, and add them to the switch statement
+ * in the ide_init() function in ide.c.
+ *
+ * Recompile, create the new /dev/ entries, and it will probably work.
+ *
+ * From hd.c:
+ * |
+ * | It traverses the request-list, using interrupts to jump between functions.
+ * | As nearly all functions can be called within interrupts, we may not sleep.
+ * | Special care is recommended. Have Fun!
+ * |
+ * | modified by Drew Eckhardt to check nr of hd's from the CMOS.
+ * |
+ * | Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * | in the early extended-partition checks and added DM partitions.
+ * |
+ * | Early work on error handling by Mika Liljeberg (liljeber@cs.Helsinki.FI).
+ * |
+ * | IRQ-unmask, drive-id, multiple-mode, support for ">16 heads",
+ * | and general streamlining by Mark Lord (mlord@pobox.com).
+ *
+ * October, 1994 -- Complete line-by-line overhaul for linux 1.1.x, by:
+ *
+ * Mark Lord (mlord@pobox.com) (IDE Perf.Pkg)
+ * Delman Lee (delman@mipg.upenn.edu) ("Mr. atdisk2")
+ * Scott Snyder (snyder@fnald0.fnal.gov) (ATAPI IDE cd-rom)
+ *
+ * This was a rewrite of just about everything from hd.c, though some original
+ * code is still sprinkled about. Think of it as a major evolution, with
+ * inspiration from lots of linux users, esp. hamish@zot.apana.org.au
+ *
+ * Version 1.0 ALPHA initial code, primary i/f working okay
+ * Version 1.3 BETA dual i/f on shared irq tested & working!
+ * Version 1.4 BETA added auto probing for irq(s)
+ * Version 1.5 BETA added ALPHA (untested) support for IDE cd-roms,
+ * ...
+ * Version 3.5 correct the bios_cyl field if it's too small
+ * (linux 1.1.76) (to help fdisk with brain-dead BIOSs)
+ * Version 3.6 cosmetic corrections to comments and stuff
+ * (linux 1.1.77) reorganise probing code to make it understandable
+ * added halfway retry to probing for drive identification
+ * added "hdx=noprobe" command line option
+ * allow setting multmode even when identification fails
+ * Version 3.7 move set_geometry=1 from do_identify() to ide_init()
+ * increase DRQ_WAIT to eliminate nuisance messages
+ * wait for DRQ_STAT instead of DATA_READY during probing
+ * (courtesy of Gary Thomas gary@efland.UU.NET)
+ * Version 3.8 fixed byte-swapping for confused Mitsumi cdrom drives
+ * update of ide-cd.c from Scott, allows blocksize=1024
+ * cdrom probe fixes, inspired by jprang@uni-duisburg.de
+ * Version 3.9 don't use LBA if lba_capacity looks funny
+ * correct the drive capacity calculations
+ * fix probing for old Seagates without IDE_ALTSTATUS_REG
+ * fix byte-ordering for some NEC cdrom drives
+ * Version 3.10 disable multiple mode by default; was causing trouble
+ * Version 3.11 fix mis-identification of old WD disks as cdroms
+ * Version 3,12 simplify logic for selecting initial mult_count
+ * (fixes problems with buggy WD drives)
+ * Version 3.13 remove excess "multiple mode disabled" messages
+ * Version 3.14 fix ide_error() handling of BUSY_STAT
+ * fix byte-swapped cdrom strings (again.. arghh!)
+ * ignore INDEX bit when checking the ALTSTATUS reg
+ * Version 3.15 add SINGLE_THREADED flag for use with dual-CMD i/f
+ * ignore WRERR_STAT for non-write operations
+ * added vlb_sync support for DC-2000A & others,
+ * (incl. some Promise chips), courtesy of Frank Gockel
+ * Version 3.16 convert vlb_32bit and vlb_sync into runtime flags
+ * add ioctls to get/set VLB flags (HDIO_[SG]ET_CHIPSET)
+ * rename SINGLE_THREADED to SUPPORT_SERIALIZE,
+ * add boot flag to "serialize" operation for CMD i/f
+ * add optional support for DTC2278 interfaces,
+ * courtesy of andy@cercle.cts.com (Dyan Wile).
+ * add boot flag to enable "dtc2278" probe
+ * add probe to avoid EATA (SCSI) interfaces,
+ * courtesy of neuffer@goofy.zdv.uni-mainz.de.
+ * Version 4.00 tidy up verify_area() calls - heiko@colossus.escape.de
+ * add flag to ignore WRERR_STAT for some drives
+ * courtesy of David.H.West@um.cc.umich.edu
+ * assembly syntax tweak to vlb_sync
+ * removable drive support from scuba@cs.tu-berlin.de
+ * add transparent support for DiskManager-6.0x "Dynamic
+ * Disk Overlay" (DDO), most of this is in genhd.c
+ * eliminate "multiple mode turned off" message at boot
+ * Version 4.10 fix bug in ioctl for "hdparm -c3"
+ * fix DM6:DDO support -- now works with LILO, fdisk, ...
+ * don't treat some naughty WD drives as removable
+ * Version 4.11 updated DM6 support using info provided by OnTrack
+ * Version 5.00 major overhaul, multmode setting fixed, vlb_sync fixed
+ * added support for 3rd/4th/alternative IDE ports
+ * created ide.h; ide-cd.c now compiles separate from ide.c
+ * hopefully fixed infinite "unexpected_intr" from cdroms
+ * zillions of other changes and restructuring
+ * somehow reduced overall memory usage by several kB
+ * probably slowed things down slightly, but worth it
+ * Version 5.01 AT LAST!! Finally understood why "unexpected_intr"
+ * was happening at various times/places: whenever the
+ * ide-interface's ctl_port was used to "mask" the irq,
+ * it also would trigger an edge in the process of masking
+ * which would result in a self-inflicted interrupt!!
+ * (such a stupid way to build a hardware interrupt mask).
+ * This is now fixed (after a year of head-scratching).
+ * Version 5.02 got rid of need for {enable,disable}_irq_list()
+ * Version 5.03 tune-ups, comments, remove "busy wait" from drive resets
+ * removed PROBE_FOR_IRQS option -- no longer needed
+ * OOOPS! fixed "bad access" bug for 2nd drive on an i/f
+ * Version 5.04 changed "ira %d" to "irq %d" in DEBUG message
+ * added more comments, cleaned up unexpected_intr()
+ * OOOPS! fixed null pointer problem in ide reset code
+ * added autodetect for Triton chipset -- no effect yet
+ * Version 5.05 OOOPS! fixed bug in revalidate_disk()
+ * OOOPS! fixed bug in ide_do_request()
+ * added ATAPI reset sequence for cdroms
+ * Version 5.10 added Bus-Mastered DMA support for Triton Chipset
+ * some (mostly) cosmetic changes
+ * Version 5.11 added ht6560b support by malafoss@snakemail.hut.fi
+ * reworked PCI scanning code
+ * added automatic RZ1000 detection/support
+ * added automatic PCI CMD640 detection/support
+ * added option for VLB CMD640 support
+ * tweaked probe to find cdrom on hdb with disks on hda,hdc
+ * Version 5.12 some performance tuning
+ * added message to alert user to bad /dev/hd[cd] entries
+ * OOOPS! fixed bug in atapi reset
+ * driver now forces "serialize" again for all cmd640 chips
+ * noticed REALLY_SLOW_IO had no effect, moved it to ide.c
+ * made do_drive_cmd() into public ide_do_drive_cmd()
+ * Version 5.13 fixed typo ('B'), thanks to houston@boyd.geog.mcgill.ca
+ * fixed ht6560b support
+ * Version 5.13b (sss) fix problem in calling ide_cdrom_setup()
+ * don't bother invalidating nonexistent partitions
+ * Version 5.14 fixes to cmd640 support.. maybe it works now(?)
+ * added & tested full EZ-DRIVE support -- don't use LILO!
+ * don't enable 2nd CMD640 PCI port during init - conflict
+ * Version 5.15 bug fix in init_cmd640_vlb()
+ * bug fix in interrupt sharing code
+ * Version 5.16 ugh.. fix "serialize" support, broken in 5.15
+ * remove "Huh?" from cmd640 code
+ * added qd6580 interface speed select from Colten Edwards
+ * Version 5.17 kludge around bug in BIOS32 on Intel triton motherboards
+ * Version 5.18 new CMD640 code, moved to cmd640.c, #include'd for now
+ * new UMC8672 code, moved to umc8672.c, #include'd for now
+ * disallow turning on DMA when h/w not capable of DMA
+ * Version 5.19 fix potential infinite timeout on resets
+ * extend reset poll into a general purpose polling scheme
+ * add atapi tape drive support from Gadi Oxman
+ * simplify exit from _intr routines -- no IDE_DO_REQUEST
+ * Version 5.20 leave current rq on blkdev request list during I/O
+ * generalized ide_do_drive_cmd() for tape/cdrom driver use
+ * Version 5.21 fix nasty cdrom/tape bug (ide_preempt was messed up)
+ * Version 5.22 fix ide_xlate_1024() to work with/without drive->id
+ * Version 5.23 miscellaneous touch-ups
+ * Version 5.24 fix #if's for SUPPORT_CMD640
+ * Version 5.25 more touch-ups, fix cdrom resets, ...
+ * cmd640.c now configs/compiles separate from ide.c
+ * Version 5.26 keep_settings now maintains the using_dma flag
+ * fix [EZD] remap message to only output at boot time
+ * fix "bad /dev/ entry" message to say hdc, not hdc0
+ * fix ide_xlate_1024() to respect user specified CHS
+ * use CHS from partn table if it looks translated
+ * re-merged flags chipset,vlb_32bit,vlb_sync into io_32bit
+ * keep track of interface chipset type, when known
+ * add generic PIO mode "tuneproc" mechanism
+ * fix cmd640_vlb option
+ * fix ht6560b support (was completely broken)
+ * umc8672.c now configures/compiles separate from ide.c
+ * move dtc2278 support to dtc2278.c
+ * move ht6560b support to ht6560b.c
+ * move qd6580 support to qd6580.c
+ * add ali14xx support in ali14xx.c
+ * Version 5.27 add [no]autotune parameters to help cmd640
+ * move rz1000 support to rz1000.c
+ * Version 5.28 #include "ide_modes.h"
+ * fix disallow_unmask: now per-interface "no_unmask" bit
+ * force io_32bit to be the same on drive pairs of dtc2278
+ * improved IDE tape error handling, and tape DMA support
+ * bugfix in ide_do_drive_cmd() for cdroms + serialize
+ * Version 5.29 fixed non-IDE check for too many physical heads
+ * don't use LBA if capacity is smaller than CHS
+ * Version 5.30 remove real_devices kludge, formerly used by genhd.c
+ * Version 5.32 change "KB" to "kB"
+ * fix serialize (was broken in kernel 1.3.72)
+ * add support for "hdparm -I"
+ * use common code for disk/tape/cdrom IDE_DRIVE_CMDs
+ * add support for Promise DC4030VL caching card
+ * improved serialize support
+ * put partition check back into alphabetical order
+ * add config option for PCMCIA baggage
+ * try to make PCMCIA support safer to use
+ * improve security on ioctls(): all are suser() only
+ * Version 5.33 improve handling of HDIO_DRIVE_CMDs that read data
+ * Version 5.34 fix irq-sharing problem from 5.33
+ * fix cdrom ioctl problem from 5.33
+ * Version 5.35 cosmetic changes
+ * fix cli() problem in try_to_identify()
+ * Version 5.36 fixes to optional PCMCIA support
+ * Version 5.37 don't use DMA when "noautotune" is specified
+ * Version 5.37a (go) fix shared irq probing (was broken in kernel 1.3.72)
+ * call unplug_device() from ide_do_drive_cmd()
+ * Version 5.38 add "hdx=none" option, courtesy of Joel Maslak
+ * mask drive irq after use, if sharing with another hwif
+ * add code to help debug weird cmd640 problems
+ * Version 5.39 fix horrible error in earlier irq sharing "fix"
+ * Version 5.40 fix serialization -- was broken in 5.39
+ * help sharing by masking device irq after probing
+ * Version 5.41 more fixes to irq sharing/serialize detection
+ * disable io_32bit by default on drive reset
+ * Version 5.42 simplify irq-masking after probe
+ * fix NULL pointer deref in save_match()
+ * Version 5.43 Ugh.. unexpected_intr is back: try to exterminate it
+ * Version 5.44 Fix for "irq probe failed" on cmd640
+ * change path on message regarding MAKEDEV.ide
+ * add a throttle to the unexpected_intr() messages
+ * Version 5.45 fix ugly parameter parsing bugs (thanks Derek)
+ * include Gadi's magic fix for cmd640 unexpected_intr
+ * include mc68000 patches from Geert Uytterhoeven
+ * add Gadi's fix for PCMCIA cdroms
+ * Version 5.46 remove the mc68000 #ifdefs for 2.0.x
+ * Version 5.47 fix set_tune race condition
+ * fix bug in earlier PCMCIA cdrom update
+ * Version 5.48 if def'd, invoke CMD640_DUMP_REGS when irq probe fails
+ * lengthen the do_reset1() pulse, for laptops
+ * add idebus=xx parameter for cmd640 and ali chipsets
+ * no_unmask flag now per-drive instead of per-hwif
+ * fix tune_req so that it gets done immediately
+ * fix missing restore_flags() in ide_ioctl
+ * prevent use of io_32bit on cmd640 with no prefetch
+ * Version 5.49 fix minor quirks in probing routines
+ * Version 5.50 allow values as small as 20 for idebus=
+ * Version 5.51 force non io_32bit in drive_cmd_intr()
+ * change delay_10ms() to delay_50ms() to fix problems
+ * Version 5.52 fix incorrect invalidation of removable devices
+ * add "hdx=slow" command line option
+ * Version 5.53 add ATAPI floppy drive support
+ * change default media for type 0 to floppy
+ * add support for Exabyte Nest
+ * add missing set_blocksize() in revalidate_disk()
+ * handle bad status bit sequencing in ide_wait_stat()
+ * support partition table translations with 255 heads
+ * probe all interfaces by default
+ * add probe for the i82371AB chipset
+ * acknowledge media change on removable drives
+ * add work-around for BMI drives
+ * remove "LBA" from boot messages
+ * Version 5.53.1 add UDMA "CRC retry" support
+ * Version 5.53.2 add Promise/33 auto-detection and DMA support
+ * fix MC_ERR handling
+ * fix mis-detection of NEC cdrom as floppy
+ * issue ATAPI reset and re-probe after "no response"
+ *
+ * Some additional driver compile-time options are in ide.h
+ *
+ * To do, in likely order of completion:
+ * - modify kernel to obtain BIOS geometry for drives on 2nd/3rd/4th i/f
+*/
+
+#undef REALLY_SLOW_IO /* most systems can safely undef this */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/malloc.h>
+
+#include <ahci.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/segment.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_PCI
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#endif /* CONFIG_PCI */
+
+#include "ide.h"
+#include "ide_modes.h"
+
+#ifdef CONFIG_BLK_DEV_PROMISE
+#include "promise.h"
+#define IS_PROMISE_DRIVE (HWIF(drive)->chipset == ide_promise)
+#else
+#define IS_PROMISE_DRIVE (0) /* auto-NULLs out Promise code */
+#endif /* CONFIG_BLK_DEV_PROMISE */
+
+static const byte ide_hwif_to_major[MAX_HWIFS] = {IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR};
+static unsigned short default_io_base[MAX_HWIFS] = {0x1f0, 0x170, 0x1e8, 0x168};
+static const byte default_irqs[MAX_HWIFS] = {14, 15, 11, 10};
+static int idebus_parameter; /* holds the "idebus=" parameter */
+static int system_bus_speed; /* holds what we think is VESA/PCI bus speed */
+
+/*
+ * This is declared extern in ide.h, for access by other IDE modules:
+ */
+ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
+
+#if (DISK_RECOVERY_TIME > 0)
+/*
+ * For really screwy hardware (hey, at least it *can* be used with Linux)
+ * we can enforce a minimum delay time between successive operations.
+ */
+static unsigned long read_timer(void)
+{
+ unsigned long t, flags;
+ int i;
+
+ save_flags(flags);
+ cli();
+ t = jiffies * 11932;
+ outb_p(0, 0x43);
+ i = inb_p(0x40);
+ i |= inb(0x40) << 8;
+ restore_flags(flags);
+ return (t - i);
+}
+
+static void set_recovery_timer (ide_hwif_t *hwif)
+{
+ hwif->last_time = read_timer();
+}
+#define SET_RECOVERY_TIMER(drive) set_recovery_timer (drive)
+
+#else
+
+#define SET_RECOVERY_TIMER(drive)
+
+#endif /* DISK_RECOVERY_TIME */
+
+/* Called by other drivers to disable the legacy IDE driver on a given IDE base. */
+void ide_disable_base(unsigned base)
+{
+ unsigned i;
+ for (i = 0; i < MAX_HWIFS; i++)
+ if (default_io_base[i] == base)
+ default_io_base[i] = 0;
+}
+
+
+/*
+ * Do not even *think* about calling this!
+ */
+static void init_hwif_data (unsigned int index)
+{
+ byte *p;
+ unsigned int unit;
+ ide_hwif_t *hwif = &ide_hwifs[index];
+
+ /* bulk initialize hwif & drive info with zeros */
+ p = ((byte *) hwif) + sizeof(ide_hwif_t);
+ do {
+ *--p = 0;
+ } while (p > (byte *) hwif);
+
+ /* fill in any non-zero initial values */
+ hwif->index = index;
+ hwif->io_base = default_io_base[index];
+ hwif->irq = default_irqs[index];
+ hwif->ctl_port = hwif->io_base ? hwif->io_base+0x206 : 0x000;
+#ifdef CONFIG_BLK_DEV_HD
+ if (hwif->io_base == HD_DATA)
+ hwif->noprobe = 1; /* may be overridden by ide_setup() */
+#endif /* CONFIG_BLK_DEV_HD */
+ hwif->major = ide_hwif_to_major[index];
+ hwif->name[0] = 'i';
+ hwif->name[1] = 'd';
+ hwif->name[2] = 'e';
+ hwif->name[3] = '0' + index;
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ hwif->tape_drive = NULL;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+
+ drive->select.all = (unit<<4)|0xa0;
+ drive->hwif = hwif;
+ drive->ctl = 0x08;
+ drive->ready_stat = READY_STAT;
+ drive->bad_wstat = BAD_W_STAT;
+ drive->special.b.recalibrate = 1;
+ drive->special.b.set_geometry = 1;
+ drive->name[0] = 'h';
+ drive->name[1] = 'd';
+#ifdef MACH
+ drive->name[2] = '0' + (index * MAX_DRIVES) + unit;
+#else
+ drive->name[2] = 'a' + (index * MAX_DRIVES) + unit;
+#endif
+ }
+}
+
+/*
+ * init_ide_data() sets reasonable default values into all fields
+ * of all instances of the hwifs and drives, but only on the first call.
+ * Subsequent calls have no effect (they don't wipe out anything).
+ *
+ * This routine is normally called at driver initialization time,
+ * but may also be called MUCH earlier during kernel "command-line"
+ * parameter processing. As such, we cannot depend on any other parts
+ * of the kernel (such as memory allocation) to be functioning yet.
+ *
+ * This is too bad, as otherwise we could dynamically allocate the
+ * ide_drive_t structs as needed, rather than always consuming memory
+ * for the max possible number (MAX_HWIFS * MAX_DRIVES) of them.
+ */
+#define MAGIC_COOKIE 0x12345678
+static void init_ide_data (void)
+{
+ unsigned int index;
+ static unsigned long magic_cookie = MAGIC_COOKIE;
+
+ if (magic_cookie != MAGIC_COOKIE)
+ return; /* already initialized */
+ magic_cookie = 0;
+
+ for (index = 0; index < MAX_HWIFS; ++index)
+ init_hwif_data(index);
+
+ idebus_parameter = 0;
+ system_bus_speed = 0;
+}
+
+/*
+ * ide_system_bus_speed() returns what we think is the system VESA/PCI
+ * bus speed (in Mhz). This is used for calculating interface PIO timings.
+ * The default is 40 for known PCI systems, 50 otherwise.
+ * The "idebus=xx" parameter can be used to override this value.
+ * The actual value to be used is computed/displayed the first time through.
+ */
+int ide_system_bus_speed (void)
+{
+ if (!system_bus_speed) {
+ if (idebus_parameter)
+ system_bus_speed = idebus_parameter; /* user supplied value */
+#ifdef CONFIG_PCI
+ else if (pcibios_present())
+ system_bus_speed = 40; /* safe default value for PCI */
+#endif /* CONFIG_PCI */
+ else
+ system_bus_speed = 50; /* safe default value for VESA and PCI */
+ printk("ide: Assuming %dMhz system bus speed for PIO modes; override with idebus=xx\n", system_bus_speed);
+ }
+ return system_bus_speed;
+}
+
+#if SUPPORT_VLB_SYNC
+/*
+ * Some localbus EIDE interfaces require a special access sequence
+ * when using 32-bit I/O instructions to transfer data. We call this
+ * the "vlb_sync" sequence, which consists of three successive reads
+ * of the sector count register location, with interrupts disabled
+ * to ensure that the reads all happen together.
+ */
+static inline void do_vlb_sync (unsigned short port) {
+ (void) inb (port);
+ (void) inb (port);
+ (void) inb (port);
+}
+#endif /* SUPPORT_VLB_SYNC */
+
+/*
+ * This is used for most PIO data transfers *from* the IDE interface
+ */
+void ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
+{
+ unsigned short io_base = HWIF(drive)->io_base;
+ unsigned short data_reg = io_base+IDE_DATA_OFFSET;
+ byte io_32bit = drive->io_32bit;
+
+ if (io_32bit) {
+#if SUPPORT_VLB_SYNC
+ if (io_32bit & 2) {
+ cli();
+ do_vlb_sync(io_base+IDE_NSECTOR_OFFSET);
+ insl(data_reg, buffer, wcount);
+ if (drive->unmask)
+ sti();
+ } else
+#endif /* SUPPORT_VLB_SYNC */
+ insl(data_reg, buffer, wcount);
+ } else {
+#if SUPPORT_SLOW_DATA_PORTS
+ if (drive->slow) {
+ unsigned short *ptr = (unsigned short *) buffer;
+ while (wcount--) {
+ *ptr++ = inw_p(data_reg);
+ *ptr++ = inw_p(data_reg);
+ }
+ } else
+#endif /* SUPPORT_SLOW_DATA_PORTS */
+ insw(data_reg, buffer, wcount<<1);
+ }
+}
+
+/*
+ * This is used for most PIO data transfers *to* the IDE interface
+ */
+void ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
+{
+ unsigned short io_base = HWIF(drive)->io_base;
+ unsigned short data_reg = io_base+IDE_DATA_OFFSET;
+ byte io_32bit = drive->io_32bit;
+
+ if (io_32bit) {
+#if SUPPORT_VLB_SYNC
+ if (io_32bit & 2) {
+ cli();
+ do_vlb_sync(io_base+IDE_NSECTOR_OFFSET);
+ outsl(data_reg, buffer, wcount);
+ if (drive->unmask)
+ sti();
+ } else
+#endif /* SUPPORT_VLB_SYNC */
+ outsl(data_reg, buffer, wcount);
+ } else {
+#if SUPPORT_SLOW_DATA_PORTS
+ if (drive->slow) {
+ unsigned short *ptr = (unsigned short *) buffer;
+ while (wcount--) {
+ outw_p(*ptr++, data_reg);
+ outw_p(*ptr++, data_reg);
+ }
+ } else
+#endif /* SUPPORT_SLOW_DATA_PORTS */
+ outsw(data_reg, buffer, wcount<<1);
+ }
+}
+
+/*
+ * The following routines are mainly used by the ATAPI drivers.
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd bytecount is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
+ */
+void atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
+{
+ ++bytecount;
+ ide_input_data (drive, buffer, bytecount / 4);
+ if ((bytecount & 0x03) >= 2)
+ insw (IDE_DATA_REG, ((byte *)buffer) + (bytecount & ~0x03), 1);
+}
+
+void atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
+{
+ ++bytecount;
+ ide_output_data (drive, buffer, bytecount / 4);
+ if ((bytecount & 0x03) >= 2)
+ outsw (IDE_DATA_REG, ((byte *)buffer) + (bytecount & ~0x03), 1);
+}
+
+/*
+ * This should get invoked any time we exit the driver to
+ * wait for an interrupt response from a drive. handler() points
+ * at the appropriate code to handle the next interrupt, and a
+ * timer is started to prevent us from waiting forever in case
+ * something goes wrong (see the timer_expiry() handler later on).
+ */
+void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout)
+{
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+#ifdef DEBUG
+ if (hwgroup->handler != NULL) {
+ printk("%s: ide_set_handler: handler not null; old=%p, new=%p\n",
+ drive->name, hwgroup->handler, handler);
+ }
+#endif
+ hwgroup->handler = handler;
+ hwgroup->timer.expires = jiffies + timeout;
+ add_timer(&(hwgroup->timer));
+}
+
+/*
+ * lba_capacity_is_ok() performs a sanity check on the claimed "lba_capacity"
+ * value for this drive (from its reported identification information).
+ *
+ * Returns: 1 if lba_capacity looks sensible
+ * 0 otherwise
+ *
+ * Note: we must not change id->cyls here, otherwise a second call
+ * of this routine might no longer find lba_capacity ok.
+ */
+static int lba_capacity_is_ok (struct hd_driveid *id)
+{
+ unsigned long lba_sects = id->lba_capacity;
+ unsigned long chs_sects = id->cyls * id->heads * id->sectors;
+ unsigned long _10_percent = chs_sects / 10;
+
+ /*
+ * The ATA spec tells large drives to return
+ * C/H/S = 16383/16/63 independent of their size.
+ * Some drives can be jumpered to use 15 heads instead of 16.
+ */
+ if (id->cyls == 16383 && id->sectors == 63 &&
+ (id->heads == 15 || id->heads == 16) &&
+ id->lba_capacity >= 16383*63*id->heads)
+ return 1; /* lba_capacity is our only option */
+
+ /* perform a rough sanity check on lba_sects: within 10% is "okay" */
+ if ((lba_sects - chs_sects) < _10_percent)
+ return 1; /* lba_capacity is good */
+
+ /* some drives have the word order reversed */
+ lba_sects = (lba_sects << 16) | (lba_sects >> 16);
+ if ((lba_sects - chs_sects) < _10_percent) {
+ id->lba_capacity = lba_sects; /* fix it */
+ return 1; /* lba_capacity is (now) good */
+ }
+ return 0; /* lba_capacity value is bad */
+}
+
+/*
+ * current_capacity() returns the capacity (in sectors) of a drive
+ * according to its current geometry/LBA settings.
+ *
+ * It also sets select.b.lba.
+ */
+static unsigned long current_capacity (ide_drive_t *drive)
+{
+ struct hd_driveid *id = drive->id;
+ unsigned long capacity;
+
+ if (!drive->present)
+ return 0;
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy)
+ return idefloppy_capacity(drive);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+ if (drive->media != ide_disk)
+ return 0x7fffffff; /* cdrom or tape */
+
+ drive->select.b.lba = 0;
+ /* Determine capacity, and use LBA if the drive properly supports it */
+ capacity = drive->cyl * drive->head * drive->sect;
+ if (id != NULL && (id->capability & 2) && lba_capacity_is_ok(id)) {
+ if (id->lba_capacity >= capacity) {
+ capacity = id->lba_capacity;
+ drive->select.b.lba = 1;
+ }
+ }
+ return (capacity - drive->sect0);
+}
+
+/*
+ * ide_geninit() is called exactly *once* for each major, from genhd.c,
+ * at the beginning of the initial partition check for the drives.
+ */
+static void ide_geninit (struct gendisk *gd)
+{
+ unsigned int unit;
+ ide_hwif_t *hwif = gd->real_devices;
+
+ for (unit = 0; unit < gd->nr_real; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->present && drive->media == ide_cdrom)
+ ide_cdrom_setup(drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->present && drive->media == ide_tape)
+ idetape_setup(drive);
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->present && drive->media == ide_floppy)
+ idefloppy_setup(drive);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+ drive->part[0].nr_sects = current_capacity(drive);
+ if (!drive->present || (drive->media != ide_disk && drive->media != ide_floppy) ||
+ !drive->part[0].nr_sects) {
+ drive->part[0].start_sect = -1; /* skip partition check */
+ }
+ }
+}
+
+/*
+ * init_gendisk() (as opposed to ide_geninit) is called for each major device,
+ * after probing for drives, to allocate partition tables and other data
+ * structures needed for the routines in genhd.c. ide_geninit() gets called
+ * somewhat later, during the partition check.
+ */
+static void init_gendisk (ide_hwif_t *hwif)
+{
+ struct gendisk *gd, **gdp;
+ unsigned int unit, units, minors;
+ int *bs;
+
+ /* figure out maximum drive number on the interface */
+ for (units = MAX_DRIVES; units > 0; --units) {
+ if (hwif->drives[units-1].present)
+ break;
+ }
+ minors = units * (1<<PARTN_BITS);
+ gd = kmalloc (sizeof(struct gendisk), GFP_KERNEL);
+ gd->sizes = kmalloc (minors * sizeof(int), GFP_KERNEL);
+ gd->part = kmalloc (minors * sizeof(struct hd_struct), GFP_KERNEL);
+ bs = kmalloc (minors*sizeof(int), GFP_KERNEL);
+
+ memset(gd->part, 0, minors * sizeof(struct hd_struct));
+
+ /* cdroms and msdos f/s are examples of non-1024 blocksizes */
+ blksize_size[hwif->major] = bs;
+ for (unit = 0; unit < minors; ++unit)
+ *bs++ = BLOCK_SIZE;
+
+ for (unit = 0; unit < units; ++unit)
+ hwif->drives[unit].part = &gd->part[unit << PARTN_BITS];
+
+ gd->major = hwif->major; /* our major device number */
+ gd->major_name = IDE_MAJOR_NAME; /* treated special in genhd.c */
+ gd->minor_shift = PARTN_BITS; /* num bits for partitions */
+ gd->max_p = 1<<PARTN_BITS; /* 1 + max partitions / drive */
+ gd->max_nr = units; /* max num real drives */
+ gd->nr_real = units; /* current num real drives */
+ gd->init = ide_geninit; /* initialization function */
+ gd->real_devices= hwif; /* ptr to internal data */
+ gd->next = NULL; /* linked list of major devs */
+
+ for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next)) ;
+ hwif->gd = *gdp = gd; /* link onto tail of list */
+}
+
+static void do_reset1 (ide_drive_t *, int); /* needed below */
+
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+/*
+ * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an atapi drive reset operation. If the drive has not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static void atapi_reset_pollfunc (ide_drive_t *drive)
+{
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ byte stat;
+
+ OUT_BYTE (drive->select.all, IDE_SELECT_REG);
+ udelay (10);
+
+ if (OK_STAT(stat=GET_STAT(), 0, BUSY_STAT)) {
+ printk("%s: ATAPI reset complete\n", drive->name);
+ } else {
+ if (jiffies < hwgroup->poll_timeout) {
+ ide_set_handler (drive, &atapi_reset_pollfunc, HZ/20);
+ return; /* continue polling */
+ }
+ hwgroup->poll_timeout = 0; /* end of polling */
+ printk("%s: ATAPI reset timed-out, status=0x%02x\n", drive->name, stat);
+ do_reset1 (drive, 1); /* do it the old fashioned way */
+ return;
+ }
+ hwgroup->poll_timeout = 0; /* done polling */
+}
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+
+/*
+ * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an ide reset operation. If the drives have not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static void reset_pollfunc (ide_drive_t *drive)
+{
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ ide_hwif_t *hwif = HWIF(drive);
+ byte tmp;
+
+ if (!OK_STAT(tmp=GET_STAT(), 0, BUSY_STAT)) {
+ if (jiffies < hwgroup->poll_timeout) {
+ ide_set_handler (drive, &reset_pollfunc, HZ/20);
+ return; /* continue polling */
+ }
+ printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
+ } else {
+ printk("%s: reset: ", hwif->name);
+ if ((tmp = GET_ERR()) == 1)
+ printk("success\n");
+ else {
+#if FANCY_STATUS_DUMPS
+ printk("master: ");
+ switch (tmp & 0x7f) {
+ case 1: printk("passed");
+ break;
+ case 2: printk("formatter device error");
+ break;
+ case 3: printk("sector buffer error");
+ break;
+ case 4: printk("ECC circuitry error");
+ break;
+ case 5: printk("controlling MPU error");
+ break;
+ default:printk("error (0x%02x?)", tmp);
+ }
+ if (tmp & 0x80)
+ printk("; slave: failed");
+ printk("\n");
+#else
+ printk("failed\n");
+#endif /* FANCY_STATUS_DUMPS */
+ }
+ }
+ hwgroup->poll_timeout = 0; /* done polling */
+}
+
+/*
+ * do_reset1() attempts to recover a confused drive by resetting it.
+ * Unfortunately, resetting a disk drive actually resets all devices on
+ * the same interface, so it can really be thought of as resetting the
+ * interface rather than resetting the drive.
+ *
+ * ATAPI devices have their own reset mechanism which allows them to be
+ * individually reset without clobbering other devices on the same interface.
+ *
+ * Unfortunately, the IDE interface does not generate an interrupt to let
+ * us know when the reset operation has finished, so we must poll for this.
+ * Equally poor, though, is the fact that this may a very long time to complete,
+ * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
+ * we set a timer to poll at 50ms intervals.
+ */
+static void do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+{
+ unsigned int unit;
+ unsigned long flags;
+ ide_hwif_t *hwif = HWIF(drive);
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+
+ save_flags(flags);
+ cli(); /* Why ? */
+
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ /* For an ATAPI device, first try an ATAPI SRST. */
+ if (drive->media != ide_disk) {
+ if (!do_not_try_atapi) {
+ if (!drive->keep_settings) {
+ drive->unmask = 0;
+ drive->io_32bit = 0;
+ }
+ OUT_BYTE (drive->select.all, IDE_SELECT_REG);
+ udelay (20);
+ OUT_BYTE (WIN_SRST, IDE_COMMAND_REG);
+ hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
+ ide_set_handler (drive, &atapi_reset_pollfunc, HZ/20);
+ restore_flags (flags);
+ return;
+ }
+ }
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+
+ /*
+ * First, reset any device state data we were maintaining
+ * for any of the drives on this interface.
+ */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *rdrive = &hwif->drives[unit];
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (rdrive->media == ide_tape)
+ rdrive->tape.reset_issued = 1;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ rdrive->special.all = 0;
+ rdrive->special.b.set_geometry = 1;
+ rdrive->special.b.recalibrate = 1;
+ if (OK_TO_RESET_CONTROLLER)
+ rdrive->mult_count = 0;
+ if (!rdrive->keep_settings) {
+ rdrive->mult_req = 0;
+ rdrive->unmask = 0;
+ rdrive->io_32bit = 0;
+ if (rdrive->using_dma) {
+ rdrive->using_dma = 0;
+ printk("%s: disabled DMA\n", rdrive->name);
+ }
+ }
+ if (rdrive->mult_req != rdrive->mult_count)
+ rdrive->special.b.set_multmode = 1;
+ }
+
+#if OK_TO_RESET_CONTROLLER
+ /*
+ * Note that we also set nIEN while resetting the device,
+ * to mask unwanted interrupts from the interface during the reset.
+ * However, due to the design of PC hardware, this will cause an
+ * immediate interrupt due to the edge transition it produces.
+ * This single interrupt gives us a "fast poll" for drives that
+ * recover from reset very quickly, saving us the first 50ms wait time.
+ */
+ OUT_BYTE(drive->ctl|6,IDE_CONTROL_REG); /* set SRST and nIEN */
+ udelay(10); /* more than enough time */
+ OUT_BYTE(drive->ctl|2,IDE_CONTROL_REG); /* clear SRST, leave nIEN */
+ udelay(10); /* more than enough time */
+ hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
+ ide_set_handler (drive, &reset_pollfunc, HZ/20);
+#endif /* OK_TO_RESET_CONTROLLER */
+
+ restore_flags (flags);
+}
+
+/*
+ * ide_do_reset() is the entry point to the drive/interface reset code.
+ */
+void ide_do_reset (ide_drive_t *drive)
+{
+ do_reset1 (drive, 0);
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ drive->tape.reset_issued=1;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+}
+
+/*
+ * Clean up after success/failure of an explicit drive cmd
+ */
+void ide_end_drive_cmd (ide_drive_t *drive, byte stat, byte err)
+{
+ unsigned long flags;
+ struct request *rq = HWGROUP(drive)->rq;
+
+ if (rq->cmd == IDE_DRIVE_CMD) {
+ byte *args = (byte *) rq->buffer;
+ rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
+ if (args) {
+ args[0] = stat;
+ args[1] = err;
+ args[2] = IN_BYTE(IDE_NSECTOR_REG);
+ }
+ }
+ save_flags(flags);
+ cli();
+ blk_dev[MAJOR(rq->rq_dev)].current_request = rq->next;
+ HWGROUP(drive)->rq = NULL;
+ rq->rq_status = RQ_INACTIVE;
+ if (rq->sem != NULL)
+ up(rq->sem);
+ restore_flags(flags);
+}
+
+/*
+ * Error reporting, in human readable form (luxurious, but a memory hog).
+ */
+byte ide_dump_status (ide_drive_t *drive, const char *msg, byte stat)
+{
+ unsigned long flags;
+ byte err = 0;
+
+ save_flags (flags);
+ sti();
+ printk("%s: %s: status=0x%02x", drive->name, msg, stat);
+#if FANCY_STATUS_DUMPS
+ if (drive->media == ide_disk) {
+ printk(" { ");
+ if (stat & BUSY_STAT)
+ printk("Busy ");
+ else {
+ if (stat & READY_STAT) printk("DriveReady ");
+ if (stat & WRERR_STAT) printk("DeviceFault ");
+ if (stat & SEEK_STAT) printk("SeekComplete ");
+ if (stat & DRQ_STAT) printk("DataRequest ");
+ if (stat & ECC_STAT) printk("CorrectedError ");
+ if (stat & INDEX_STAT) printk("Index ");
+ if (stat & ERR_STAT) printk("Error ");
+ }
+ printk("}");
+ }
+#endif /* FANCY_STATUS_DUMPS */
+ printk("\n");
+ if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
+ err = GET_ERR();
+ printk("%s: %s: error=0x%02x", drive->name, msg, err);
+#if FANCY_STATUS_DUMPS
+ if (drive->media == ide_disk) {
+ printk(" { ");
+ if (err & ICRC_ERR) printk((err & ABRT_ERR) ? "BadCRC " : "BadSector ");
+ if (err & ECC_ERR) printk("UncorrectableError ");
+ if (err & ID_ERR) printk("SectorIdNotFound ");
+ if (err & ABRT_ERR) printk("DriveStatusError ");
+ if (err & TRK0_ERR) printk("TrackZeroNotFound ");
+ if (err & MARK_ERR) printk("AddrMarkNotFound ");
+ printk("}");
+ if (err & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
+ byte cur = IN_BYTE(IDE_SELECT_REG);
+ if (cur & 0x40) { /* using LBA? */
+ printk(", LBAsect=%ld", (unsigned long)
+ ((cur&0xf)<<24)
+ |(IN_BYTE(IDE_HCYL_REG)<<16)
+ |(IN_BYTE(IDE_LCYL_REG)<<8)
+ | IN_BYTE(IDE_SECTOR_REG));
+ } else {
+ printk(", CHS=%d/%d/%d",
+ (IN_BYTE(IDE_HCYL_REG)<<8) +
+ IN_BYTE(IDE_LCYL_REG),
+ cur & 0xf,
+ IN_BYTE(IDE_SECTOR_REG));
+ }
+ if (HWGROUP(drive)->rq)
+ printk(", sector=%ld", HWGROUP(drive)->rq->sector);
+ }
+ }
+#endif /* FANCY_STATUS_DUMPS */
+ printk("\n");
+ }
+ restore_flags (flags);
+ return err;
+}
+
+/*
+ * try_to_flush_leftover_data() is invoked in response to a drive
+ * unexpectedly having its DRQ_STAT bit set. As an alternative to
+ * resetting the drive, this routine tries to clear the condition
+ * by read a sector's worth of data from the drive. Of course,
+ * this may not help if the drive is *waiting* for data from *us*.
+ */
+static void try_to_flush_leftover_data (ide_drive_t *drive)
+{
+ int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
+
+ while (i > 0) {
+ unsigned long buffer[16];
+ unsigned int wcount = (i > 16) ? 16 : i;
+ i -= wcount;
+ ide_input_data (drive, buffer, wcount);
+ }
+}
+
+/*
+ * ide_error() takes action based on the error returned by the controller.
+ */
+void ide_error (ide_drive_t *drive, const char *msg, byte stat)
+{
+ struct request *rq;
+ byte err;
+
+ err = ide_dump_status(drive, msg, stat);
+ if ((rq = HWGROUP(drive)->rq) == NULL || drive == NULL)
+ return;
+ /* retry only "normal" I/O: */
+ if (rq->cmd == IDE_DRIVE_CMD) {
+ rq->errors = 1;
+ ide_end_drive_cmd(drive, stat, err);
+ return;
+ }
+ if (stat & BUSY_STAT) { /* other bits are useless when BUSY */
+ rq->errors |= ERROR_RESET;
+ } else {
+ if (drive->media == ide_disk && (stat & ERR_STAT)) {
+ /* err has different meaning on cdrom and tape */
+ if (err == ABRT_ERR) {
+ if (drive->select.b.lba && IN_BYTE(IDE_COMMAND_REG) == WIN_SPECIFY)
+ return; /* some newer drives don't support WIN_SPECIFY */
+ } else if ((err & (ABRT_ERR | ICRC_ERR)) == (ABRT_ERR | ICRC_ERR))
+ ; /* UDMA crc error -- just retry the operation */
+ else if (err & (BBD_ERR | ECC_ERR)) /* retries won't help these */
+ rq->errors = ERROR_MAX;
+ else if (err & TRK0_ERR) /* help it find track zero */
+ rq->errors |= ERROR_RECAL;
+ else if (err & MC_ERR)
+ drive->special.b.mc = 1;
+ }
+ if ((stat & DRQ_STAT) && rq->cmd != WRITE)
+ try_to_flush_leftover_data(drive);
+ }
+ if (GET_STAT() & (BUSY_STAT|DRQ_STAT))
+ rq->errors |= ERROR_RESET; /* Mmmm.. timing problem */
+
+ if (rq->errors >= ERROR_MAX) {
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape) {
+ rq->errors = 0;
+ idetape_end_request(0, HWGROUP(drive));
+ } else
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy) {
+ rq->errors = 0;
+ idefloppy_end_request(0, HWGROUP(drive));
+ } else
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ if (drive->media == ide_scsi) {
+ rq->errors = 0;
+ idescsi_end_request(0, HWGROUP(drive));
+ } else
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ ide_end_request(0, HWGROUP(drive));
+ }
+ else {
+ if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
+ ++rq->errors;
+ ide_do_reset(drive);
+ return;
+ } else if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
+ drive->special.b.recalibrate = 1;
+ ++rq->errors;
+ }
+}
+
+/*
+ * read_intr() is the handler for disk read/multread interrupts
+ */
+static void read_intr (ide_drive_t *drive)
+{
+ byte stat;
+ int i;
+ unsigned int msect, nsect;
+ struct request *rq;
+
+ if (!OK_STAT(stat=GET_STAT(),DATA_READY,BAD_R_STAT)) {
+ ide_error(drive, "read_intr", stat);
+ return;
+ }
+ msect = drive->mult_count;
+read_next:
+ rq = HWGROUP(drive)->rq;
+ if (msect) {
+ if ((nsect = rq->current_nr_sectors) > msect)
+ nsect = msect;
+ msect -= nsect;
+ } else
+ nsect = 1;
+ i = rq->nr_sectors - nsect;
+ if (i > 0 && !msect)
+ ide_set_handler (drive, &read_intr, WAIT_CMD);
+ ide_input_data(drive, rq->buffer, nsect * SECTOR_WORDS);
+#ifdef DEBUG
+ printk("%s: read: sectors(%ld-%ld), buffer=0x%08lx, remaining=%ld\n",
+ drive->name, rq->sector, rq->sector+nsect-1,
+ (unsigned long) rq->buffer+(nsect<<9), rq->nr_sectors-nsect);
+#endif
+ rq->sector += nsect;
+ rq->buffer += nsect<<9;
+ rq->errors = 0;
+ rq->nr_sectors = i;
+ if ((rq->current_nr_sectors -= nsect) <= 0)
+ ide_end_request(1, HWGROUP(drive));
+ if (i > 0 && msect)
+ goto read_next;
+}
+
+/*
+ * write_intr() is the handler for disk write interrupts
+ */
+static void write_intr (ide_drive_t *drive)
+{
+ byte stat;
+ int i;
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ struct request *rq = hwgroup->rq;
+
+ if (OK_STAT(stat=GET_STAT(),DRIVE_READY,drive->bad_wstat)) {
+#ifdef DEBUG
+ printk("%s: write: sector %ld, buffer=0x%08lx, remaining=%ld\n",
+ drive->name, rq->sector, (unsigned long) rq->buffer,
+ rq->nr_sectors-1);
+#endif
+ if ((rq->nr_sectors == 1) ^ ((stat & DRQ_STAT) != 0)) {
+ rq->sector++;
+ rq->buffer += 512;
+ rq->errors = 0;
+ i = --rq->nr_sectors;
+ --rq->current_nr_sectors;
+ if (rq->current_nr_sectors <= 0)
+ ide_end_request(1, hwgroup);
+ if (i > 0) {
+ ide_set_handler (drive, &write_intr, WAIT_CMD);
+ ide_output_data (drive, rq->buffer, SECTOR_WORDS);
+ }
+ return;
+ }
+ }
+ ide_error(drive, "write_intr", stat);
+}
+
+/*
+ * ide_multwrite() transfers a block of up to mcount sectors of data
+ * to a drive as part of a disk multiple-sector write operation.
+ */
+void ide_multwrite (ide_drive_t *drive, unsigned int mcount)
+{
+ struct request *rq = &HWGROUP(drive)->wrq;
+
+ do {
+ unsigned int nsect = rq->current_nr_sectors;
+ if (nsect > mcount)
+ nsect = mcount;
+ mcount -= nsect;
+
+ ide_output_data(drive, rq->buffer, nsect<<7);
+#ifdef DEBUG
+ printk("%s: multwrite: sector %ld, buffer=0x%08lx, count=%d, remaining=%ld\n",
+ drive->name, rq->sector, (unsigned long) rq->buffer,
+ nsect, rq->nr_sectors - nsect);
+#endif
+ if ((rq->nr_sectors -= nsect) <= 0)
+ break;
+ if ((rq->current_nr_sectors -= nsect) == 0) {
+ if ((rq->bh = rq->bh->b_reqnext) != NULL) {
+ rq->current_nr_sectors = rq->bh->b_size>>9;
+ rq->buffer = rq->bh->b_data;
+ } else {
+ panic("%s: buffer list corrupted\n", drive->name);
+ break;
+ }
+ } else {
+ rq->buffer += nsect << 9;
+ }
+ } while (mcount);
+}
+
+/*
+ * multwrite_intr() is the handler for disk multwrite interrupts
+ */
+static void multwrite_intr (ide_drive_t *drive)
+{
+ byte stat;
+ int i;
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ struct request *rq = &hwgroup->wrq;
+
+ if (OK_STAT(stat=GET_STAT(),DRIVE_READY,drive->bad_wstat)) {
+ if (stat & DRQ_STAT) {
+ if (rq->nr_sectors) {
+ ide_set_handler (drive, &multwrite_intr, WAIT_CMD);
+ ide_multwrite(drive, drive->mult_count);
+ return;
+ }
+ } else {
+ if (!rq->nr_sectors) { /* all done? */
+ rq = hwgroup->rq;
+ for (i = rq->nr_sectors; i > 0;){
+ i -= rq->current_nr_sectors;
+ ide_end_request(1, hwgroup);
+ }
+ return;
+ }
+ }
+ }
+ ide_error(drive, "multwrite_intr", stat);
+}
+
+/*
+ * Issue a simple drive command
+ * The drive must be selected beforehand.
+ */
+static void ide_cmd(ide_drive_t *drive, byte cmd, byte nsect, ide_handler_t *handler)
+{
+ ide_set_handler (drive, handler, WAIT_CMD);
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
+ OUT_BYTE(nsect,IDE_NSECTOR_REG);
+ OUT_BYTE(cmd,IDE_COMMAND_REG);
+}
+
+/*
+ * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
+ */
+static void set_multmode_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (OK_STAT(stat,READY_STAT,BAD_STAT)) {
+ drive->mult_count = drive->mult_req;
+ } else {
+ drive->mult_req = drive->mult_count = 0;
+ drive->special.b.recalibrate = 1;
+ (void) ide_dump_status(drive, "set_multmode", stat);
+ }
+}
+
+/*
+ * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
+ */
+static void set_geometry_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (!OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_error(drive, "set_geometry_intr", stat);
+}
+
+/*
+ * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
+ */
+static void recal_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (!OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_error(drive, "recal_intr", stat);
+}
+
+/*
+ * mc_intr() is invoked on completion of a WIN_ACKMC cmd.
+ */
+static void mc_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (!OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_error(drive, "mc_intr", stat);
+ drive->special.b.mc = 0;
+}
+
+/*
+ * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
+ */
+static void drive_cmd_intr (ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ byte *args = (byte *) rq->buffer;
+ byte stat = GET_STAT();
+
+ sti();
+ if ((stat & DRQ_STAT) && args && args[3]) {
+ byte io_32bit = drive->io_32bit;
+ drive->io_32bit = 0;
+ ide_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
+ drive->io_32bit = io_32bit;
+ stat = GET_STAT();
+ }
+ if (OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_end_drive_cmd (drive, stat, GET_ERR());
+ else
+ ide_error(drive, "drive_cmd", stat); /* calls ide_end_drive_cmd */
+}
+
+/*
+ * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT
+ * commands to a drive. It used to do much more, but has been scaled back.
+ */
+static inline void do_special (ide_drive_t *drive)
+{
+ special_t *s = &drive->special;
+
+#ifdef DEBUG
+ printk("%s: do_special: 0x%02x\n", drive->name, s->all);
+#endif
+ if (s->b.set_geometry) {
+ s->b.set_geometry = 0;
+ if (drive->media == ide_disk && !drive->no_geom) {
+ OUT_BYTE(drive->sect,IDE_SECTOR_REG);
+ OUT_BYTE(drive->cyl,IDE_LCYL_REG);
+ OUT_BYTE(drive->cyl>>8,IDE_HCYL_REG);
+ OUT_BYTE(((drive->head-1)|drive->select.all)&0xBF,IDE_SELECT_REG);
+ if (!IS_PROMISE_DRIVE)
+ ide_cmd(drive, WIN_SPECIFY, drive->sect, &set_geometry_intr);
+ }
+ } else if (s->b.recalibrate) {
+ s->b.recalibrate = 0;
+ if (drive->media == ide_disk && !IS_PROMISE_DRIVE)
+ ide_cmd(drive, WIN_RESTORE, drive->sect, &recal_intr);
+ } else if (s->b.set_tune) {
+ ide_tuneproc_t *tuneproc = HWIF(drive)->tuneproc;
+ s->b.set_tune = 0;
+ if (tuneproc != NULL)
+ tuneproc(drive, drive->tune_req);
+ } else if (s->b.set_multmode) {
+ s->b.set_multmode = 0;
+ if (drive->media == ide_disk) {
+ if (drive->id && drive->mult_req > drive->id->max_multsect)
+ drive->mult_req = drive->id->max_multsect;
+ if (!IS_PROMISE_DRIVE)
+ ide_cmd(drive, WIN_SETMULT, drive->mult_req, &set_multmode_intr);
+ } else
+ drive->mult_req = 0;
+ } else if (s->b.mc) {
+ s->b.mc = 0;
+ if (drive->media == ide_disk && !IS_PROMISE_DRIVE)
+ ide_cmd(drive, WIN_ACKMC, drive->sect, &mc_intr);
+ } else if (s->all) {
+ int special = s->all;
+ s->all = 0;
+ printk("%s: bad special flag: 0x%02x\n", drive->name, special);
+ }
+}
+
+/*
+ * This routine busy-waits for the drive status to be not "busy".
+ * It then checks the status for all of the "good" bits and none
+ * of the "bad" bits, and if all is okay it returns 0. All other
+ * cases return 1 after invoking ide_error() -- caller should just return.
+ *
+ * This routine should get fixed to not hog the cpu during extra long waits..
+ * That could be done by busy-waiting for the first jiffy or two, and then
+ * setting a timer to wake up at half second intervals thereafter,
+ * until timeout is achieved, before timing out.
+ */
+int ide_wait_stat (ide_drive_t *drive, byte good, byte bad, unsigned long timeout)
+{
+ byte stat;
+ unsigned long flags;
+
+ udelay(1); /* spec allows drive 400ns to assert "BUSY" */
+ if ((stat = GET_STAT()) & BUSY_STAT) {
+ save_flags(flags);
+ sti();
+ timeout += jiffies;
+ while ((stat = GET_STAT()) & BUSY_STAT) {
+ if (jiffies > timeout) {
+ restore_flags(flags);
+ ide_error(drive, "status timeout", stat);
+ return 1;
+ }
+ }
+ restore_flags(flags);
+ }
+ udelay(1); /* allow status to settle, then read it again */
+ if (OK_STAT((stat = GET_STAT()), good, bad))
+ return 0;
+ ide_error(drive, "status error", stat);
+ return 1;
+}
+
+/*
+ * do_rw_disk() issues READ and WRITE commands to a disk,
+ * using LBA if supported, or CHS otherwise, to address sectors.
+ * It also takes care of issuing special DRIVE_CMDs.
+ */
+static inline void do_rw_disk (ide_drive_t *drive, struct request *rq, unsigned long block)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+ unsigned short io_base = hwif->io_base;
+#ifdef CONFIG_BLK_DEV_PROMISE
+ int use_promise_io = 0;
+#endif /* CONFIG_BLK_DEV_PROMISE */
+
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
+ OUT_BYTE(rq->nr_sectors,io_base+IDE_NSECTOR_OFFSET);
+#ifdef CONFIG_BLK_DEV_PROMISE
+ if (IS_PROMISE_DRIVE) {
+ if (hwif->is_promise2 || rq->cmd == READ) {
+ use_promise_io = 1;
+ }
+ }
+ if (drive->select.b.lba || use_promise_io) {
+#else /* !CONFIG_BLK_DEV_PROMISE */
+ if (drive->select.b.lba) {
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ if (block >= 1UL << 28) {
+ printk("block %lu beyond LBA28\n", block);
+ ide_end_request(0, hwif->hwgroup);
+ return;
+ }
+#ifdef DEBUG
+ printk("%s: %sing: LBAsect=%ld, sectors=%ld, buffer=0x%08lx\n",
+ drive->name, (rq->cmd==READ)?"read":"writ",
+ block, rq->nr_sectors, (unsigned long) rq->buffer);
+#endif
+ OUT_BYTE(block,io_base+IDE_SECTOR_OFFSET);
+ OUT_BYTE(block>>=8,io_base+IDE_LCYL_OFFSET);
+ OUT_BYTE(block>>=8,io_base+IDE_HCYL_OFFSET);
+ OUT_BYTE(((block>>8)&0x0f)|drive->select.all,io_base+IDE_SELECT_OFFSET);
+ } else {
+ unsigned int sect,head,cyl,track;
+ track = block / drive->sect;
+ sect = block % drive->sect + 1;
+ OUT_BYTE(sect,io_base+IDE_SECTOR_OFFSET);
+ head = track % drive->head;
+ cyl = track / drive->head;
+
+ if (cyl >= 1 << 16) {
+ printk("block %lu cylinder %u beyond CHS\n", block, cyl);
+ ide_end_request(0, hwif->hwgroup);
+ return;
+ }
+
+ OUT_BYTE(cyl,io_base+IDE_LCYL_OFFSET);
+ OUT_BYTE(cyl>>8,io_base+IDE_HCYL_OFFSET);
+ OUT_BYTE(head|drive->select.all,io_base+IDE_SELECT_OFFSET);
+#ifdef DEBUG
+ printk("%s: %sing: CHS=%d/%d/%d, sectors=%ld, buffer=0x%08lx\n",
+ drive->name, (rq->cmd==READ)?"read":"writ", cyl,
+ head, sect, rq->nr_sectors, (unsigned long) rq->buffer);
+#endif
+ }
+#ifdef CONFIG_BLK_DEV_PROMISE
+ if (use_promise_io) {
+ do_promise_io (drive, rq);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ if (rq->cmd == READ) {
+#ifdef CONFIG_BLK_DEV_TRITON
+ if (drive->using_dma && !(HWIF(drive)->dmaproc(ide_dma_read, drive)))
+ return;
+#endif /* CONFIG_BLK_DEV_TRITON */
+ ide_set_handler(drive, &read_intr, WAIT_CMD);
+ OUT_BYTE(drive->mult_count ? WIN_MULTREAD : WIN_READ, io_base+IDE_COMMAND_OFFSET);
+ return;
+ }
+ if (rq->cmd == WRITE) {
+#ifdef CONFIG_BLK_DEV_TRITON
+ if (drive->using_dma && !(HWIF(drive)->dmaproc(ide_dma_write, drive)))
+ return;
+#endif /* CONFIG_BLK_DEV_TRITON */
+ if (drive->mult_count)
+ ide_set_handler (drive, &multwrite_intr, WAIT_CMD);
+ else
+ ide_set_handler (drive, &write_intr, WAIT_CMD);
+ OUT_BYTE(drive->mult_count ? WIN_MULTWRITE : WIN_WRITE, io_base+IDE_COMMAND_OFFSET);
+ if (ide_wait_stat(drive, DATA_READY, drive->bad_wstat, WAIT_DRQ)) {
+ printk("%s: no DRQ after issuing %s\n", drive->name,
+ drive->mult_count ? "MULTWRITE" : "WRITE");
+ return;
+ }
+ if (!drive->unmask)
+ cli();
+ if (drive->mult_count) {
+ HWGROUP(drive)->wrq = *rq; /* scratchpad */
+ ide_multwrite(drive, drive->mult_count);
+ } else {
+ ide_output_data(drive, rq->buffer, SECTOR_WORDS);
+ }
+ return;
+ }
+ printk("%s: bad command: %d\n", drive->name, rq->cmd);
+ ide_end_request(0, HWGROUP(drive));
+}
+
+/*
+ * execute_drive_cmd() issues a special drive command,
+ * usually initiated by ioctl() from the external hdparm program.
+ */
+static void execute_drive_cmd (ide_drive_t *drive, struct request *rq)
+{
+ byte *args = (byte *)rq->buffer;
+ if (args) {
+#ifdef DEBUG
+ printk("%s: DRIVE_CMD cmd=0x%02x sc=0x%02x fr=0x%02x xx=0x%02x\n",
+ drive->name, args[0], args[1], args[2], args[3]);
+#endif
+ OUT_BYTE(args[2],IDE_FEATURE_REG);
+ ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
+ return;
+ } else {
+ /*
+ * NULL is actually a valid way of waiting for
+ * all current requests to be flushed from the queue.
+ */
+#ifdef DEBUG
+ printk("%s: DRIVE_CMD (null)\n", drive->name);
+#endif
+ ide_end_drive_cmd(drive, GET_STAT(), GET_ERR());
+ return;
+ }
+}
+
+/*
+ * do_request() initiates handling of a new I/O request
+ */
+static inline void do_request (ide_hwif_t *hwif, struct request *rq)
+{
+ unsigned int minor, unit;
+ unsigned long block, blockend;
+ ide_drive_t *drive;
+
+ sti();
+#ifdef DEBUG
+ printk("%s: do_request: current=0x%08lx\n", hwif->name, (unsigned long) rq);
+#endif
+ minor = MINOR(rq->rq_dev);
+ unit = minor >> PARTN_BITS;
+ if (MAJOR(rq->rq_dev) != hwif->major || unit >= MAX_DRIVES) {
+ printk("%s: bad device number: %s\n",
+ hwif->name, kdevname(rq->rq_dev));
+ goto kill_rq;
+ }
+ drive = &hwif->drives[unit];
+#ifdef DEBUG
+ if (rq->bh && !buffer_locked(rq->bh)) {
+ printk("%s: block not locked\n", drive->name);
+ goto kill_rq;
+ }
+#endif
+ block = rq->sector;
+ blockend = block + rq->nr_sectors;
+ if ((blockend < block) || (blockend > drive->part[minor&PARTN_MASK].nr_sects)) {
+#ifdef MACH
+ printk ("%s%c: bad access: block=%ld, count=%ld, blockend=%ld, nr_sects%ld\n",
+ drive->name, (minor&PARTN_MASK)?'0'+(minor&PARTN_MASK):' ',
+ block, rq->nr_sectors, blockend, drive->part[minor&PARTN_MASK].nr_sects);
+#else
+ printk("%s%c: bad access: block=%ld, count=%ld\n", drive->name,
+ (minor&PARTN_MASK)?'0'+(minor&PARTN_MASK):' ', block, rq->nr_sectors);
+#endif
+ goto kill_rq;
+ }
+ block += drive->part[minor&PARTN_MASK].start_sect + drive->sect0;
+#if FAKE_FDISK_FOR_EZDRIVE
+ if (block == 0 && drive->remap_0_to_1)
+ block = 1; /* redirect MBR access to EZ-Drive partn table */
+#endif /* FAKE_FDISK_FOR_EZDRIVE */
+ ((ide_hwgroup_t *)hwif->hwgroup)->drive = drive;
+#if (DISK_RECOVERY_TIME > 0)
+ while ((read_timer() - hwif->last_time) < DISK_RECOVERY_TIME);
+#endif
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ POLL_HWIF_TAPE_DRIVE; /* macro from ide-tape.h */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+ SELECT_DRIVE(hwif,drive);
+ if (ide_wait_stat(drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {
+ printk("%s: drive not ready for command\n", drive->name);
+ return;
+ }
+
+ if (!drive->special.all) {
+ if (rq->cmd == IDE_DRIVE_CMD) {
+ execute_drive_cmd(drive, rq);
+ return;
+ }
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ switch (drive->media) {
+ case ide_disk:
+ do_rw_disk (drive, rq, block);
+ return;
+#ifdef CONFIG_BLK_DEV_IDECD
+ case ide_cdrom:
+ ide_do_rw_cdrom (drive, block);
+ return;
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ case ide_tape:
+ idetape_do_request (drive, rq, block);
+ return;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ case ide_floppy:
+ idefloppy_do_request (drive, rq, block);
+ return;
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ case ide_scsi:
+ idescsi_do_request (drive, rq, block);
+ return;
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+
+ default:
+ printk("%s: media type %d not supported\n",
+ drive->name, drive->media);
+ goto kill_rq;
+ }
+#else
+ do_rw_disk (drive, rq, block); /* simpler and faster */
+ return;
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+ }
+ do_special(drive);
+ return;
+kill_rq:
+ ide_end_request(0, hwif->hwgroup);
+}
+
+/*
+ * The driver enables interrupts as much as possible. In order to do this,
+ * (a) the device-interrupt is always masked before entry, and
+ * (b) the timeout-interrupt is always disabled before entry.
+ *
+ * If we enter here from, say irq14, and then start a new request for irq15,
+ * (possible with "serialize" option) then we cannot ensure that we exit
+ * before the irq15 hits us. So, we must be careful not to let this bother us.
+ *
+ * Interrupts are still masked (by default) whenever we are exchanging
+ * data/cmds with a drive, because some drives seem to have very poor
+ * tolerance for latency during I/O. For devices which don't suffer from
+ * this problem (most don't), the unmask flag can be set using the "hdparm"
+ * utility, to permit other interrupts during data/cmd transfers.
+ */
+void ide_do_request (ide_hwgroup_t *hwgroup)
+{
+ cli(); /* paranoia */
+ if (hwgroup->handler != NULL) {
+ printk("%s: EEeekk!! handler not NULL in ide_do_request()\n", hwgroup->hwif->name);
+ return;
+ }
+ do {
+ ide_hwif_t *hwif = hwgroup->hwif;
+ struct request *rq;
+ if ((rq = hwgroup->rq) == NULL) {
+ if (hwif->sharing_irq && hwgroup->drive) /* set nIEN */
+ OUT_BYTE(hwgroup->drive->ctl|2,hwif->ctl_port);
+ /*
+ * hwgroup->next_hwif is different from hwgroup->hwif
+ * only when a request is inserted using "ide_next".
+ * This saves wear and tear on IDE tapes.
+ */
+ hwif = hwgroup->next_hwif;
+ do {
+ rq = blk_dev[hwif->major].current_request;
+ if (rq != NULL && rq->rq_status != RQ_INACTIVE)
+ goto got_rq;
+ } while ((hwif = hwif->next) != hwgroup->next_hwif);
+ hwgroup->active = 0;
+ return; /* no work left for this hwgroup */
+ }
+ got_rq:
+ do_request(hwgroup->hwif = hwgroup->next_hwif = hwif, hwgroup->rq = rq);
+ cli();
+ } while (hwgroup->handler == NULL);
+}
+
+/*
+ * do_hwgroup_request() invokes ide_do_request() after first masking
+ * all possible interrupts for the current hwgroup. This prevents race
+ * conditions in the event that an unexpected interrupt occurs while
+ * we are in the driver.
+ *
+ * Note that when an interrupt is used to reenter the driver, the first level
+ * handler will already have masked the irq that triggered, but any other ones
+ * for the hwgroup will still be unmasked. The driver tries to be careful
+ * about such things.
+ */
+static void do_hwgroup_request (ide_hwgroup_t *hwgroup)
+{
+ if (hwgroup->handler == NULL) {
+ ide_hwif_t *hgif = hwgroup->hwif;
+ ide_hwif_t *hwif = hgif;
+ hwgroup->active = 1;
+ do {
+ disable_irq(hwif->irq);
+ } while ((hwif = hwif->next) != hgif);
+ ide_do_request (hwgroup);
+ do {
+ enable_irq(hwif->irq);
+ } while ((hwif = hwif->next) != hgif);
+ }
+}
+
+static void do_ide0_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[0].hwgroup);
+}
+
+#if MAX_HWIFS > 1
+static void do_ide1_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[1].hwgroup);
+}
+#endif
+
+#if MAX_HWIFS > 2
+static void do_ide2_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[2].hwgroup);
+}
+#endif
+
+#if MAX_HWIFS > 3
+static void do_ide3_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[3].hwgroup);
+}
+#endif
+
+static void timer_expiry (unsigned long data)
+{
+ ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
+ ide_drive_t *drive = hwgroup->drive;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ if (hwgroup->poll_timeout != 0) { /* polling in progress? */
+ ide_handler_t *handler = hwgroup->handler;
+ hwgroup->handler = NULL;
+ handler(drive);
+ } else if (hwgroup->handler == NULL) { /* not waiting for anything? */
+ sti(); /* drive must have responded just as the timer expired */
+ printk("%s: marginal timeout\n", drive->name);
+ } else {
+ hwgroup->handler = NULL; /* abort the operation */
+ if (hwgroup->hwif->dmaproc)
+ (void) hwgroup->hwif->dmaproc (ide_dma_abort, drive);
+ ide_error(drive, "irq timeout", GET_STAT());
+ }
+ if (hwgroup->handler == NULL)
+ do_hwgroup_request (hwgroup);
+ restore_flags(flags);
+}
+
+/*
+ * There's nothing really useful we can do with an unexpected interrupt,
+ * other than reading the status register (to clear it), and logging it.
+ * There should be no way that an irq can happen before we're ready for it,
+ * so we needn't worry much about losing an "important" interrupt here.
+ *
+ * On laptops (and "green" PCs), an unexpected interrupt occurs whenever the
+ * drive enters "idle", "standby", or "sleep" mode, so if the status looks
+ * "good", we just ignore the interrupt completely.
+ *
+ * This routine assumes cli() is in effect when called.
+ *
+ * If an unexpected interrupt happens on irq15 while we are handling irq14
+ * and if the two interfaces are "serialized" (CMD640), then it looks like
+ * we could screw up by interfering with a new request being set up for irq15.
+ *
+ * In reality, this is a non-issue. The new command is not sent unless the
+ * drive is ready to accept one, in which case we know the drive is not
+ * trying to interrupt us. And ide_set_handler() is always invoked before
+ * completing the issuance of any new drive command, so we will not be
+ * accidently invoked as a result of any valid command completion interrupt.
+ *
+ */
+static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
+{
+ byte stat;
+ unsigned int unit;
+ ide_hwif_t *hwif = hwgroup->hwif;
+
+ /*
+ * handle the unexpected interrupt
+ */
+ do {
+ if (hwif->irq == irq) {
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (!drive->present)
+ continue;
+ SELECT_DRIVE(hwif,drive);
+ udelay(100); /* Ugly, but wait_stat() may not be safe here */
+ if (!OK_STAT(stat=GET_STAT(), drive->ready_stat, BAD_STAT)) {
+ /* Try to not flood the console with msgs */
+ static unsigned long last_msgtime = 0;
+ if ((last_msgtime + (HZ/2)) < jiffies) {
+ last_msgtime = jiffies;
+ (void) ide_dump_status(drive, "unexpected_intr", stat);
+ }
+ }
+ if ((stat & DRQ_STAT))
+ try_to_flush_leftover_data(drive);
+ }
+ }
+ } while ((hwif = hwif->next) != hwgroup->hwif);
+ SELECT_DRIVE(hwif,hwgroup->drive); /* Ugh.. probably interrupts current I/O */
+ udelay(100); /* Ugly, but wait_stat() may not be safe here */
+}
+
+/*
+ * entry point for all interrupts, caller does cli() for us
+ */
+void ide_intr (int irq, void *dev_id, struct pt_regs *regs)
+{
+ ide_hwgroup_t *hwgroup = dev_id;
+ ide_handler_t *handler;
+
+ if (irq == hwgroup->hwif->irq && (handler = hwgroup->handler) != NULL) {
+ ide_drive_t *drive = hwgroup->drive;
+ hwgroup->handler = NULL;
+ del_timer(&(hwgroup->timer));
+ if (drive->unmask)
+ sti();
+ handler(drive);
+ cli(); /* this is necessary, as next rq may be different irq */
+ if (hwgroup->handler == NULL) {
+ SET_RECOVERY_TIMER(HWIF(drive));
+ ide_do_request(hwgroup);
+ }
+ } else {
+ unexpected_intr(irq, hwgroup);
+ }
+ cli();
+}
+
+/*
+ * get_info_ptr() returns the (ide_drive_t *) for a given device number.
+ * It returns NULL if the given device number does not match any present drives.
+ */
+static ide_drive_t *get_info_ptr (kdev_t i_rdev)
+{
+ int major = MAJOR(i_rdev);
+ unsigned int h;
+
+ for (h = 0; h < MAX_HWIFS; ++h) {
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ if (hwif->present && major == hwif->major) {
+ unsigned unit = DEVICE_NR(i_rdev);
+ if (unit < MAX_DRIVES) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (drive->present)
+ return drive;
+ } else if (major == IDE0_MAJOR && unit < 4) {
+ printk("ide: probable bad entry for /dev/hd%c\n", 'a'+unit);
+ printk("ide: to fix it, run: /usr/src/linux/scripts/MAKEDEV.ide\n");
+ }
+ break;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * This function is intended to be used prior to invoking ide_do_drive_cmd().
+ */
+void ide_init_drive_cmd (struct request *rq)
+{
+ rq->buffer = NULL;
+ rq->cmd = IDE_DRIVE_CMD;
+ rq->sector = 0;
+ rq->nr_sectors = 0;
+ rq->current_nr_sectors = 0;
+ rq->sem = NULL;
+ rq->bh = NULL;
+ rq->bhtail = NULL;
+ rq->next = NULL;
+
+#if 0 /* these are done each time through ide_do_drive_cmd() */
+ rq->errors = 0;
+ rq->rq_status = RQ_ACTIVE;
+ rq->rq_dev = ????;
+#endif
+ rq->quiet = 0;
+}
+
+/*
+ * This function issues a special IDE device request
+ * onto the request queue.
+ *
+ * If action is ide_wait, then the rq is queued at the end of the
+ * request queue, and the function sleeps until it has been processed.
+ * This is for use when invoked from an ioctl handler.
+ *
+ * If action is ide_preempt, then the rq is queued at the head of
+ * the request queue, displacing the currently-being-processed
+ * request and this function returns immediately without waiting
+ * for the new rq to be completed. This is VERY DANGEROUS, and is
+ * intended for careful use by the ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_next, then the rq is queued immediately after
+ * the currently-being-processed-request (if any), and the function
+ * returns without waiting for the new rq to be completed. As above,
+ * This is VERY DANGEROUS, and is intended for careful use by the
+ * ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_end, then the rq is queued at the end of the
+ * request queue, and the function returns immediately without waiting
+ * for the new rq to be completed. This is again intended for careful
+ * use by the ATAPI tape/cdrom driver code. (Currently used by ide-tape.c,
+ * when operating in the pipelined operation mode).
+ */
+int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action)
+{
+ unsigned long flags;
+ unsigned int major = HWIF(drive)->major;
+ struct request *cur_rq;
+ struct blk_dev_struct *bdev = &blk_dev[major];
+ struct semaphore sem = MUTEX_LOCKED;
+
+ if (IS_PROMISE_DRIVE && rq->buffer != NULL)
+ return -ENOSYS; /* special drive cmds not supported */
+ rq->errors = 0;
+ rq->rq_status = RQ_ACTIVE;
+ rq->rq_dev = MKDEV(major,(drive->select.b.unit)<<PARTN_BITS);
+ if (action == ide_wait)
+ rq->sem = &sem;
+ unplug_device(bdev);
+
+ save_flags(flags);
+ cli();
+ if (action == ide_next)
+ HWGROUP(drive)->next_hwif = HWIF(drive);
+ cur_rq = bdev->current_request;
+
+ if (cur_rq == NULL || action == ide_preempt) {
+ rq->next = cur_rq;
+ bdev->current_request = rq;
+ if (action == ide_preempt)
+ HWGROUP(drive)->rq = NULL;
+ } else {
+ if (action == ide_wait || action == ide_end) {
+ while (cur_rq->next != NULL) /* find end of list */
+ cur_rq = cur_rq->next;
+ }
+ rq->next = cur_rq->next;
+ cur_rq->next = rq;
+ }
+ if (!HWGROUP(drive)->active) {
+ do_hwgroup_request(HWGROUP(drive));
+ cli();
+ }
+ if (action == ide_wait && rq->rq_status != RQ_INACTIVE)
+ down(&sem); /* wait for it to be serviced */
+ restore_flags(flags);
+ return rq->errors ? -EIO : 0; /* return -EIO if errors */
+}
+
+static int ide_open(struct inode * inode, struct file * filp)
+{
+ ide_drive_t *drive;
+ unsigned long flags;
+
+ if ((drive = get_info_ptr(inode->i_rdev)) == NULL)
+ return -ENXIO;
+ save_flags(flags);
+ cli();
+ while (drive->busy)
+ sleep_on(&drive->wqueue);
+ drive->usage++;
+ restore_flags(flags);
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return ide_cdrom_open (inode, filp, drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ return idetape_blkdev_open (inode, filp, drive);
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy)
+ return idefloppy_open (inode, filp, drive);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ if (drive->media == ide_scsi)
+ return idescsi_open (inode, filp, drive);
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ if (drive->removable && drive->usage == 1) {
+ byte door_lock[] = {WIN_DOORLOCK,0,0,0};
+ struct request rq;
+ check_disk_change(inode->i_rdev);
+ ide_init_drive_cmd (&rq);
+ rq.buffer = (char *)door_lock;
+ /*
+ * Ignore the return code from door_lock,
+ * since the open() has already succeeded,
+ * and the door_lock is irrelevant at this point.
+ */
+ (void) ide_do_drive_cmd(drive, &rq, ide_wait);
+ }
+ return 0;
+}
+
+/*
+ * Releasing a block device means we sync() it, so that it can safely
+ * be forgotten about...
+ */
+static void ide_release(struct inode * inode, struct file * file)
+{
+ ide_drive_t *drive;
+
+ if ((drive = get_info_ptr(inode->i_rdev)) != NULL) {
+ fsync_dev(inode->i_rdev);
+ drive->usage--;
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom) {
+ ide_cdrom_release (inode, file, drive);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape) {
+ idetape_blkdev_release (inode, file, drive);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy) {
+ idefloppy_release (inode, file, drive);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ if (drive->media == ide_scsi) {
+ idescsi_ide_release (inode, file, drive);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ if (drive->removable && !drive->usage) {
+ byte door_unlock[] = {WIN_DOORUNLOCK,0,0,0};
+ struct request rq;
+ invalidate_buffers(inode->i_rdev);
+ ide_init_drive_cmd (&rq);
+ rq.buffer = (char *)door_unlock;
+ (void) ide_do_drive_cmd(drive, &rq, ide_wait);
+ }
+ }
+}
+
+/*
+ * This routine is called to flush all partitions and partition tables
+ * for a changed disk, and then re-read the new partition table.
+ * If we are revalidating a disk because of a media change, then we
+ * enter with usage == 0. If we are using an ioctl, we automatically have
+ * usage == 1 (we need an open channel to use an ioctl :-), so this
+ * is our limit.
+ */
+static int revalidate_disk(kdev_t i_rdev)
+{
+ ide_drive_t *drive;
+ unsigned int p, major, minor;
+ long flags;
+
+ if ((drive = get_info_ptr(i_rdev)) == NULL)
+ return -ENODEV;
+
+ major = MAJOR(i_rdev);
+ minor = drive->select.b.unit << PARTN_BITS;
+ save_flags(flags);
+ cli();
+ if (drive->busy || (drive->usage > 1)) {
+ restore_flags(flags);
+ return -EBUSY;
+ };
+ drive->busy = 1;
+ restore_flags(flags);
+
+ for (p = 0; p < (1<<PARTN_BITS); ++p) {
+ if (drive->part[p].nr_sects > 0) {
+ kdev_t devp = MKDEV(major, minor+p);
+ fsync_dev (devp);
+ invalidate_inodes (devp);
+ invalidate_buffers (devp);
+ set_blocksize(devp, 1024);
+ }
+ drive->part[p].start_sect = 0;
+ drive->part[p].nr_sects = 0;
+ };
+
+ drive->part[0].nr_sects = current_capacity(drive);
+ if ((drive->media != ide_disk && drive->media != ide_floppy) || !drive->part[0].nr_sects)
+ drive->part[0].start_sect = -1;
+ resetup_one_dev(HWIF(drive)->gd, drive->select.b.unit);
+
+ drive->busy = 0;
+ wake_up(&drive->wqueue);
+ return 0;
+}
+
+static int write_fs_long (unsigned long useraddr, long value)
+{
+ int err;
+
+ if (NULL == (long *)useraddr)
+ return -EINVAL;
+ if ((err = verify_area(VERIFY_WRITE, (long *)useraddr, sizeof(long))))
+ return err;
+ put_user((unsigned)value, (long *) useraddr);
+ return 0;
+}
+
+static int ide_ioctl (struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err;
+ ide_drive_t *drive;
+ unsigned long flags;
+ struct request rq;
+
+ if (!inode || !(inode->i_rdev))
+ return -EINVAL;
+ if ((drive = get_info_ptr(inode->i_rdev)) == NULL)
+ return -ENODEV;
+ ide_init_drive_cmd (&rq);
+ switch (cmd) {
+ case HDIO_GETGEO:
+ {
+ struct hd_geometry *loc = (struct hd_geometry *) arg;
+ if (!loc || (drive->media != ide_disk && drive->media != ide_floppy)) return -EINVAL;
+#ifdef MACH
+ loc->heads = drive->bios_head;
+ loc->sectors = drive->bios_sect;
+ loc->cylinders = drive->bios_cyl;
+ loc->start
+ = (drive->part[MINOR(inode->i_rdev)&PARTN_MASK]
+ .start_sect);
+#else
+ err = verify_area(VERIFY_WRITE, loc, sizeof(*loc));
+ if (err) return err;
+ put_user(drive->bios_head, (byte *) &loc->heads);
+ put_user(drive->bios_sect, (byte *) &loc->sectors);
+ put_user(drive->bios_cyl, (unsigned short *) &loc->cylinders);
+ put_user((unsigned)drive->part[MINOR(inode->i_rdev)&PARTN_MASK].start_sect,
+ (unsigned long *) &loc->start);
+#endif
+ return 0;
+ }
+ case BLKFLSBUF:
+ if (!suser()) return -EACCES;
+ fsync_dev(inode->i_rdev);
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ case BLKRASET:
+ if (!suser()) return -EACCES;
+ if(arg > 0xff) return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+
+ case BLKRAGET:
+ return write_fs_long(arg, read_ahead[MAJOR(inode->i_rdev)]);
+
+ case BLKGETSIZE: /* Return device size */
+ return write_fs_long(arg, drive->part[MINOR(inode->i_rdev)&PARTN_MASK].nr_sects);
+ case BLKRRPART: /* Re-read partition tables */
+ if (!suser()) return -EACCES;
+ return revalidate_disk(inode->i_rdev);
+
+ case HDIO_GET_KEEPSETTINGS:
+ return write_fs_long(arg, drive->keep_settings);
+
+ case HDIO_GET_UNMASKINTR:
+ return write_fs_long(arg, drive->unmask);
+
+ case HDIO_GET_DMA:
+ return write_fs_long(arg, drive->using_dma);
+
+ case HDIO_GET_32BIT:
+ return write_fs_long(arg, drive->io_32bit);
+
+ case HDIO_GET_MULTCOUNT:
+ return write_fs_long(arg, drive->mult_count);
+
+ case HDIO_GET_IDENTITY:
+ if (!arg || (MINOR(inode->i_rdev) & PARTN_MASK))
+ return -EINVAL;
+ if (drive->id == NULL)
+ return -ENOMSG;
+ err = verify_area(VERIFY_WRITE, (char *)arg, sizeof(*drive->id));
+ if (!err)
+ memcpy_tofs((char *)arg, (char *)drive->id, sizeof(*drive->id));
+ return err;
+
+ case HDIO_GET_NOWERR:
+ return write_fs_long(arg, drive->bad_wstat == BAD_R_STAT);
+
+ case HDIO_SET_DMA:
+ if (!suser()) return -EACCES;
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return -EPERM;
+#endif /* CONFIG_BLK_DEV_IDECD */
+ if (!drive->id || !(drive->id->capability & 1) || !HWIF(drive)->dmaproc)
+ return -EPERM;
+ case HDIO_SET_KEEPSETTINGS:
+ case HDIO_SET_UNMASKINTR:
+ case HDIO_SET_NOWERR:
+ if (arg > 1)
+ return -EINVAL;
+ case HDIO_SET_32BIT:
+ if (!suser()) return -EACCES;
+ if ((MINOR(inode->i_rdev) & PARTN_MASK))
+ return -EINVAL;
+ save_flags(flags);
+ cli();
+ switch (cmd) {
+ case HDIO_SET_DMA:
+ if (!(HWIF(drive)->dmaproc)) {
+ restore_flags(flags);
+ return -EPERM;
+ }
+ drive->using_dma = arg;
+ break;
+ case HDIO_SET_KEEPSETTINGS:
+ drive->keep_settings = arg;
+ break;
+ case HDIO_SET_UNMASKINTR:
+ if (arg && drive->no_unmask) {
+ restore_flags(flags);
+ return -EPERM;
+ }
+ drive->unmask = arg;
+ break;
+ case HDIO_SET_NOWERR:
+ drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
+ break;
+ case HDIO_SET_32BIT:
+ if (arg > (1 + (SUPPORT_VLB_SYNC<<1))) {
+ restore_flags(flags);
+ return -EINVAL;
+ }
+ if (arg && drive->no_io_32bit) {
+ restore_flags(flags);
+ return -EPERM;
+ }
+ drive->io_32bit = arg;
+#ifdef CONFIG_BLK_DEV_DTC2278
+ if (HWIF(drive)->chipset == ide_dtc2278)
+ HWIF(drive)->drives[!drive->select.b.unit].io_32bit = arg;
+#endif /* CONFIG_BLK_DEV_DTC2278 */
+ break;
+ }
+ restore_flags(flags);
+ return 0;
+
+ case HDIO_SET_MULTCOUNT:
+ if (!suser()) return -EACCES;
+ if (MINOR(inode->i_rdev) & PARTN_MASK)
+ return -EINVAL;
+ if (drive->id && arg > drive->id->max_multsect)
+ return -EINVAL;
+ save_flags(flags);
+ cli();
+ if (drive->special.b.set_multmode) {
+ restore_flags(flags);
+ return -EBUSY;
+ }
+ drive->mult_req = arg;
+ drive->special.b.set_multmode = 1;
+ restore_flags(flags);
+ (void) ide_do_drive_cmd (drive, &rq, ide_wait);
+ return (drive->mult_count == arg) ? 0 : -EIO;
+
+ case HDIO_DRIVE_CMD:
+ {
+ byte args[4], *argbuf = args;
+ int argsize = 4;
+ if (!suser() || securelevel > 0) return -EACCES;
+ if (NULL == (void *) arg) {
+ err = ide_do_drive_cmd(drive, &rq, ide_wait);
+ } else if (!(err = verify_area(VERIFY_READ,(void *)arg, 4))) {
+ memcpy_fromfs(args, (void *)arg, 4);
+ if (args[3]) {
+ argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
+ argbuf = kmalloc(argsize, GFP_KERNEL);
+ if (argbuf == NULL)
+ return -ENOMEM;
+ argbuf[0] = args[0];
+ argbuf[1] = args[1];
+ argbuf[2] = args[2];
+ argbuf[3] = args[3];
+ }
+ if (!(err = verify_area(VERIFY_WRITE,(void *)arg, argsize))) {
+ rq.buffer = (char *)argbuf;
+ err = ide_do_drive_cmd(drive, &rq, ide_wait);
+ memcpy_tofs((void *)arg, argbuf, argsize);
+ }
+ if (argsize > 4)
+ kfree(argbuf);
+ }
+ return err;
+ }
+ case HDIO_SET_PIO_MODE:
+ if (!suser()) return -EACCES;
+ if (MINOR(inode->i_rdev) & PARTN_MASK)
+ return -EINVAL;
+ if (!HWIF(drive)->tuneproc)
+ return -ENOSYS;
+ save_flags(flags);
+ cli();
+ if (drive->special.b.set_tune) {
+ restore_flags(flags);
+ return -EBUSY;
+ }
+ drive->tune_req = (byte) arg;
+ drive->special.b.set_tune = 1;
+ restore_flags(flags);
+ (void) ide_do_drive_cmd (drive, &rq, ide_wait);
+ return 0;
+
+ RO_IOCTLS(inode->i_rdev, arg);
+
+ default:
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return ide_cdrom_ioctl(drive, inode, file, cmd, arg);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ return idetape_blkdev_ioctl(drive, inode, file, cmd, arg);
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy)
+ return idefloppy_ioctl(drive, inode, file, cmd, arg);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ if (drive->media == ide_scsi)
+ return idescsi_ioctl(drive, inode, file, cmd, arg);
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ return -EPERM;
+ }
+}
+
+static int ide_check_media_change (kdev_t i_rdev)
+{
+ ide_drive_t *drive;
+
+ if ((drive = get_info_ptr(i_rdev)) == NULL)
+ return -ENODEV;
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return ide_cdrom_check_media_change (drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy)
+ return idefloppy_media_change (drive);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+ if (drive->removable) /* for disks */
+ return 1; /* always assume it was changed */
+ return 0;
+}
+
+void ide_fixstring (byte *s, const int bytecount, const int byteswap)
+{
+ byte *p = s, *end = &s[bytecount & ~1]; /* bytecount must be even */
+
+ if (byteswap) {
+ /* convert from big-endian to host byte order */
+ for (p = end ; p != s;) {
+ unsigned short *pp = (unsigned short *) (p -= 2);
+ *pp = ntohs(*pp);
+ }
+ }
+
+ /* strip leading blanks */
+ while (s != end && *s == ' ')
+ ++s;
+
+ /* compress internal blanks and strip trailing blanks */
+ while (s != end && *s) {
+ if (*s++ != ' ' || (s != end && *s && *s != ' '))
+ *p++ = *(s-1);
+ }
+
+ /* wipe out trailing garbage */
+ while (p != end)
+ *p++ = '\0';
+}
+
+static inline void do_identify (ide_drive_t *drive, byte cmd)
+{
+ int bswap;
+ struct hd_driveid *id;
+ unsigned long capacity, check;
+
+ id = drive->id = kmalloc (SECTOR_WORDS*4, GFP_KERNEL);
+ ide_input_data(drive, id, SECTOR_WORDS);/* read 512 bytes of id info */
+ sti();
+
+#if defined (CONFIG_SCSI_EATA_DMA) || defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA)
+ /*
+ * EATA SCSI controllers do a hardware ATA emulation:
+ * Ignore them if there is a driver for them available.
+ */
+ if ((id->model[0] == 'P' && id->model[1] == 'M')
+ || (id->model[0] == 'S' && id->model[1] == 'K')) {
+ printk("%s: EATA SCSI HBA %.10s\n", drive->name, id->model);
+ drive->present = 0;
+ return;
+ }
+#endif
+
+ /*
+ * WIN_IDENTIFY returns little-endian info,
+ * WIN_PIDENTIFY *usually* returns little-endian info.
+ */
+ bswap = 1;
+ if (cmd == WIN_PIDENTIFY) {
+ if ((id->model[0] == 'N' && id->model[1] == 'E') /* NEC */
+ || (id->model[0] == 'F' && id->model[1] == 'X') /* Mitsumi */
+ || (id->model[0] == 'P' && id->model[1] == 'i'))/* Pioneer */
+ bswap = 0; /* Vertos drives may still be weird */
+ }
+ ide_fixstring (id->model, sizeof(id->model), bswap);
+ ide_fixstring (id->fw_rev, sizeof(id->fw_rev), bswap);
+ ide_fixstring (id->serial_no, sizeof(id->serial_no), bswap);
+
+ if (strstr((char *)id->model, "E X A B Y T E N E S T"))
+ return;
+
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ /*
+ * Check for an ATAPI device
+ */
+ if (cmd == WIN_PIDENTIFY) {
+ byte type = (id->config >> 8) & 0x1f;
+ printk("%s: %s, ATAPI ", drive->name, id->model);
+#ifdef CONFIG_BLK_DEV_PROMISE
+ if (HWIF(drive)->is_promise2) {
+ printk(" -- not supported on 2nd Promise port\n");
+ drive->present = 0;
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ if (!drive->ide_scsi) switch (type) {
+ case 0:
+ if (!strstr((char *)id->model, "oppy") &&
+ !strstr((char *)id->model, "poyp") &&
+ !strstr((char *)id->model, "ZIP"))
+ printk("cdrom or floppy?, assuming ");
+ if (drive->media != ide_cdrom &&
+ !strstr((char *)id->model, "CD-ROM")) {
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ printk("FLOPPY drive\n");
+ drive->media = ide_floppy;
+ if (idefloppy_identify_device(drive, id))
+ drive->present = 1;
+ return;
+#else
+ printk("FLOPPY ");
+ break;
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+ }
+ /* Early cdrom models used zero */
+ case 5:
+#ifdef CONFIG_BLK_DEV_IDECD
+ printk ("CDROM drive\n");
+ drive->media = ide_cdrom;
+ drive->present = 1;
+ drive->removable = 1;
+ return;
+#else
+ printk ("CDROM ");
+ break;
+#endif /* CONFIG_BLK_DEV_IDECD */
+ case 1:
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ printk ("TAPE drive");
+ if (idetape_identify_device (drive,id)) {
+ drive->media = ide_tape;
+ drive->present = 1;
+ drive->removable = 1;
+ if (drive->autotune != 2 && HWIF(drive)->dmaproc != NULL && !drive->nodma) {
+ if (!HWIF(drive)->dmaproc(ide_dma_check, drive))
+ printk(", DMA");
+ }
+ printk("\n");
+ }
+ else {
+ drive->present = 0;
+ printk ("\nide-tape: the tape is not supported by this version of the driver\n");
+ }
+ return;
+#else
+ printk ("TAPE ");
+ break;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ default:
+ drive->present = 0;
+ printk("Type %d - Unknown device\n", type);
+ return;
+ }
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ printk("drive - enabling SCSI emulation\n");
+ drive->media = ide_scsi;
+ drive->present = 1;
+ idescsi_setup(drive);
+#else
+ drive->present = 0;
+ printk("- not supported by this kernel\n");
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+
+ /* check for removable disks (eg. SYQUEST), ignore 'WD' drives */
+ if (id->config & (1<<7)) { /* removable disk ? */
+ if (id->model[0] != 'W' || id->model[1] != 'D')
+ drive->removable = 1;
+ }
+
+ /* SunDisk drives: treat as non-removable, force one unit */
+ if (id->model[0] == 'S' && id->model[1] == 'u') {
+ drive->removable = 0;
+ if (drive->select.all & (1<<4)) {
+ drive->present = 0;
+ return;
+ }
+ }
+
+ drive->media = ide_disk;
+ /* Extract geometry if we did not already have one for the drive */
+ if (!drive->present) {
+ drive->present = 1;
+ drive->cyl = drive->bios_cyl = id->cyls;
+ drive->head = drive->bios_head = id->heads;
+ drive->sect = drive->bios_sect = id->sectors;
+ }
+ /* Handle logical geometry translation by the drive */
+ if ((id->field_valid & 1) && id->cur_cyls && id->cur_heads
+ && (id->cur_heads <= 16) && id->cur_sectors) {
+ /*
+ * Extract the physical drive geometry for our use.
+ * Note that we purposely do *not* update the bios info.
+ * This way, programs that use it (like fdisk) will
+ * still have the same logical view as the BIOS does,
+ * which keeps the partition table from being screwed.
+ *
+ * An exception to this is the cylinder count,
+ * which we reexamine later on to correct for 1024 limitations.
+ */
+ drive->cyl = id->cur_cyls;
+ drive->head = id->cur_heads;
+ drive->sect = id->cur_sectors;
+
+ /* check for word-swapped "capacity" field in id information */
+ capacity = drive->cyl * drive->head * drive->sect;
+ check = (id->cur_capacity0 << 16) | id->cur_capacity1;
+ if (check == capacity) { /* was it swapped? */
+ /* yes, bring it into little-endian order: */
+ id->cur_capacity0 = (capacity >> 0) & 0xffff;
+ id->cur_capacity1 = (capacity >> 16) & 0xffff;
+ }
+ }
+ /* Use physical geometry if what we have still makes no sense */
+ if ((!drive->head || drive->head > 16) &&
+ id->heads && id->heads <= 16) {
+ drive->cyl = id->cyls;
+ drive->head = id->heads;
+ drive->sect = id->sectors;
+ }
+
+ /* calculate drive capacity, and select LBA if possible */
+ capacity = current_capacity (drive);
+
+ /*
+ * if possible, give fdisk access to more of the drive,
+ * by correcting bios_cyls:
+ */
+ if (capacity > drive->bios_cyl * drive->bios_head * drive->bios_sect
+ && !drive->forced_geom && drive->bios_sect && drive->bios_head) {
+ int cyl = (capacity / drive->bios_sect) / drive->bios_head;
+ if (cyl <= 65535)
+ drive->bios_cyl = cyl;
+ else {
+ /* OK until 539 GB */
+ drive->bios_sect = 63;
+ drive->bios_head = 255;
+ drive->bios_cyl = capacity / (63*255);
+ }
+ }
+
+ if (!strncmp((char *)id->model, "BMI ", 4) &&
+ strstr((char *)id->model, " ENHANCED IDE ") &&
+ drive->select.b.lba)
+ drive->no_geom = 1;
+
+ printk ("%s: %.40s, %ldMB w/%dkB Cache, CHS=%d/%d/%d",
+ drive->name, id->model, current_capacity(drive)/2048L, id->buf_size/2,
+ drive->bios_cyl, drive->bios_head, drive->bios_sect);
+
+ drive->mult_count = 0;
+ if (id->max_multsect) {
+ drive->mult_req = INITIAL_MULT_COUNT;
+ if (drive->mult_req > id->max_multsect)
+ drive->mult_req = id->max_multsect;
+ if (drive->mult_req || ((id->multsect_valid & 1) && id->multsect))
+ drive->special.b.set_multmode = 1;
+ }
+ if (drive->autotune != 2 && HWIF(drive)->dmaproc != NULL && !drive->nodma) {
+ if (!(HWIF(drive)->dmaproc(ide_dma_check, drive))) {
+ if ((id->field_valid & 4) && (id->dma_ultra & (id->dma_ultra >> 8) & 7))
+ printk(", UDMA");
+ else
+ printk(", DMA");
+ }
+ }
+ printk("\n");
+}
+
+/*
+ * Delay for *at least* 50ms. As we don't know how much time is left
+ * until the next tick occurs, we wait an extra tick to be safe.
+ * This is used only during the probing/polling for drives at boot time.
+ */
+static void delay_50ms (void)
+{
+ unsigned long timer = jiffies + ((HZ + 19)/20) + 1;
+ while (timer > jiffies);
+}
+
+/*
+ * try_to_identify() sends an ATA(PI) IDENTIFY request to a drive
+ * and waits for a response. It also monitors irqs while this is
+ * happening, in hope of automatically determining which one is
+ * being used by the interface.
+ *
+ * Returns: 0 device was identified
+ * 1 device timed-out (no response to identify request)
+ * 2 device aborted the command (refused to identify itself)
+ */
+static int try_to_identify (ide_drive_t *drive, byte cmd)
+{
+ int hd_status, rc;
+ unsigned long timeout;
+ unsigned long irqs_on = 0;
+ int irq_off;
+
+ if (!HWIF(drive)->irq) { /* already got an IRQ? */
+ printk("%s: Not probing legacy IRQs)\n", drive->name);
+ return 2;
+ probe_irq_off(probe_irq_on()); /* clear dangling irqs */
+ irqs_on = probe_irq_on(); /* start monitoring irqs */
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG); /* enable device irq */
+ }
+
+ delay_50ms(); /* take a deep breath */
+ if ((IN_BYTE(IDE_ALTSTATUS_REG) ^ IN_BYTE(IDE_STATUS_REG)) & ~INDEX_STAT) {
+ printk("%s: probing with STATUS instead of ALTSTATUS\n", drive->name);
+ hd_status = IDE_STATUS_REG; /* ancient Seagate drives */
+ } else
+ hd_status = IDE_ALTSTATUS_REG; /* use non-intrusive polling */
+
+#if CONFIG_BLK_DEV_PROMISE
+ if (IS_PROMISE_DRIVE) {
+ if (promise_cmd(drive,PROMISE_IDENTIFY)) {
+ if (irqs_on)
+ (void) probe_irq_off(irqs_on);
+ return 1;
+ }
+ } else
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ OUT_BYTE(cmd,IDE_COMMAND_REG); /* ask drive for ID */
+ timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
+ timeout += jiffies;
+ do {
+ if (jiffies > timeout) {
+ if (irqs_on)
+ (void) probe_irq_off(irqs_on);
+ return 1; /* drive timed-out */
+ }
+ delay_50ms(); /* give drive a breather */
+ } while (IN_BYTE(hd_status) & BUSY_STAT);
+
+ delay_50ms(); /* wait for IRQ and DRQ_STAT */
+ if (OK_STAT(GET_STAT(),DRQ_STAT,BAD_R_STAT)) {
+ unsigned long flags;
+ save_flags(flags);
+ cli(); /* some systems need this */
+ do_identify(drive, cmd); /* drive returned ID */
+ rc = 0; /* drive responded with ID */
+ (void) GET_STAT(); /* clear drive IRQ */
+ restore_flags(flags);
+ } else
+ rc = 2; /* drive refused ID */
+ if (!HWIF(drive)->irq) {
+ irq_off = probe_irq_off(irqs_on); /* get our irq number */
+ if (irq_off > 0) {
+ HWIF(drive)->irq = irq_off; /* save it for later */
+ irqs_on = probe_irq_on();
+ OUT_BYTE(drive->ctl|2,IDE_CONTROL_REG); /* mask device irq */
+ udelay(5);
+ (void) probe_irq_off(irqs_on);
+ (void) probe_irq_off(probe_irq_on()); /* clear self-inflicted irq */
+ (void) GET_STAT(); /* clear drive IRQ */
+
+ } else { /* Mmmm.. multiple IRQs.. don't know which was ours */
+ printk("%s: IRQ probe failed (%d)\n", drive->name, irq_off);
+#ifdef CONFIG_BLK_DEV_CMD640
+#ifdef CMD640_DUMP_REGS
+ if (HWIF(drive)->chipset == ide_cmd640) {
+ printk("%s: Hmmm.. probably a driver problem.\n", drive->name);
+ CMD640_DUMP_REGS;
+ }
+#endif /* CMD640_DUMP_REGS */
+#endif /* CONFIG_BLK_DEV_CMD640 */
+ }
+ }
+ return rc;
+}
+
+/*
+ * do_probe() has the difficult job of finding a drive if it exists,
+ * without getting hung up if it doesn't exist, without trampling on
+ * ethernet cards, and without leaving any IRQs dangling to haunt us later.
+ *
+ * If a drive is "known" to exist (from CMOS or kernel parameters),
+ * but does not respond right away, the probe will "hang in there"
+ * for the maximum wait time (about 30 seconds), otherwise it will
+ * exit much more quickly.
+ *
+ * Returns: 0 device was identified
+ * 1 device timed-out (no response to identify request)
+ * 2 device aborted the command (refused to identify itself)
+ * 3 bad status from device (possible for ATAPI drives)
+ * 4 probe was not attempted because failure was obvious
+ */
+static int do_probe (ide_drive_t *drive, byte cmd)
+{
+ int rc;
+ ide_hwif_t *hwif = HWIF(drive);
+ unsigned long timeout;
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ if (drive->present) { /* avoid waiting for inappropriate probes */
+ if ((drive->media != ide_disk) && (cmd == WIN_IDENTIFY))
+ return 4;
+ }
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+#ifdef DEBUG
+ printk("probing for %s: present=%d, media=%d, probetype=%s\n",
+ drive->name, drive->present, drive->media,
+ (cmd == WIN_IDENTIFY) ? "ATA" : "ATAPI");
+#endif
+ SELECT_DRIVE(hwif,drive);
+ delay_50ms();
+ if (IN_BYTE(IDE_SELECT_REG) != drive->select.all && !drive->present) {
+ OUT_BYTE(0xa0,IDE_SELECT_REG); /* exit with drive0 selected */
+ delay_50ms(); /* allow BUSY_STAT to assert & clear */
+ return 3; /* no i/f present: avoid killing ethernet cards */
+ }
+
+ if (OK_STAT(GET_STAT(),READY_STAT,BUSY_STAT)
+ || drive->present || cmd == WIN_PIDENTIFY)
+ {
+ if ((rc = try_to_identify(drive,cmd))) /* send cmd and wait */
+ rc = try_to_identify(drive,cmd); /* failed: try again */
+ if (rc == 1 && cmd == WIN_PIDENTIFY && drive->autotune != 2) {
+ printk("%s: no response (status = 0x%02x), resetting drive\n", drive->name, GET_STAT());
+ delay_50ms();
+ OUT_BYTE (drive->select.all, IDE_SELECT_REG);
+ delay_50ms();
+ OUT_BYTE(WIN_SRST, IDE_COMMAND_REG);
+ timeout = jiffies;
+ while ((GET_STAT() & BUSY_STAT) && jiffies < timeout + WAIT_WORSTCASE)
+ delay_50ms();
+ rc = try_to_identify(drive, cmd);
+ }
+ if (rc == 1)
+ printk("%s: no response (status = 0x%02x)\n", drive->name, GET_STAT());
+ (void) GET_STAT(); /* ensure drive irq is clear */
+ } else {
+ rc = 3; /* not present or maybe ATAPI */
+ }
+ if (drive->select.b.unit != 0) {
+ OUT_BYTE(0xa0,IDE_SELECT_REG); /* exit with drive0 selected */
+ delay_50ms();
+ (void) GET_STAT(); /* ensure drive irq is clear */
+ }
+ return rc;
+}
+
+static void enable_nest (ide_drive_t *drive)
+{
+ unsigned long timeout;
+
+ printk("%s: enabling %s -- ", HWIF(drive)->name, drive->id->model);
+ SELECT_DRIVE(HWIF(drive), drive);
+ delay_50ms();
+ OUT_BYTE(EXABYTE_ENABLE_NEST, IDE_COMMAND_REG);
+ timeout = jiffies + WAIT_WORSTCASE;
+ do {
+ if (jiffies > timeout) {
+ printk("failed (timeout)\n");
+ return;
+ }
+ delay_50ms();
+ } while (GET_STAT() & BUSY_STAT);
+ delay_50ms();
+ if (!OK_STAT(GET_STAT(), 0, BAD_STAT))
+ printk("failed (status = 0x%02x)\n", GET_STAT());
+ else
+ printk("success\n");
+ if (do_probe(drive, WIN_IDENTIFY) >= 2) { /* if !(success||timed-out) */
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ (void) do_probe(drive, WIN_PIDENTIFY); /* look for ATAPI device */
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+ }
+}
+
+/*
+ * probe_for_drive() tests for existence of a given drive using do_probe().
+ *
+ * Returns: 0 no device was found
+ * 1 device was found (note: drive->present might still be 0)
+ */
+static inline byte probe_for_drive (ide_drive_t *drive)
+{
+ if (drive->noprobe) /* skip probing? */
+ return drive->present;
+ if (do_probe(drive, WIN_IDENTIFY) >= 2) { /* if !(success||timed-out) */
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ (void) do_probe(drive, WIN_PIDENTIFY); /* look for ATAPI device */
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+ }
+ if (drive->id && strstr((char *)drive->id->model, "E X A B Y T E N E S T"))
+ enable_nest(drive);
+ if (!drive->present)
+ return 0; /* drive not found */
+ if (drive->id == NULL) { /* identification failed? */
+ if (drive->media == ide_disk) {
+ printk ("%s: non-IDE drive, CHS=%d/%d/%d\n",
+ drive->name, drive->cyl, drive->head, drive->sect);
+ }
+#ifdef CONFIG_BLK_DEV_IDECD
+ else if (drive->media == ide_cdrom) {
+ printk("%s: ATAPI cdrom (?)\n", drive->name);
+ }
+#endif /* CONFIG_BLK_DEV_IDECD */
+ else {
+ drive->present = 0; /* nuke it */
+ }
+ }
+ return 1; /* drive was found */
+}
+
+/*
+ * We query CMOS about hard disks : it could be that we have a SCSI/ESDI/etc
+ * controller that is BIOS compatible with ST-506, and thus showing up in our
+ * BIOS table, but not register compatible, and therefore not present in CMOS.
+ *
+ * Furthermore, we will assume that our ST-506 drives <if any> are the primary
+ * drives in the system -- the ones reflected as drive 1 or 2. The first
+ * drive is stored in the high nibble of CMOS byte 0x12, the second in the low
+ * nibble. This will be either a 4 bit drive type or 0xf indicating use byte
+ * 0x19 for an 8 bit type, drive 1, 0x1a for drive 2 in CMOS. A non-zero value
+ * means we have an AT controller hard disk for that drive.
+ *
+ * Of course, there is no guarantee that either drive is actually on the
+ * "primary" IDE interface, but we don't bother trying to sort that out here.
+ * If a drive is not actually on the primary interface, then these parameters
+ * will be ignored. This results in the user having to supply the logical
+ * drive geometry as a boot parameter for each drive not on the primary i/f.
+ *
+ * The only "perfect" way to handle this would be to modify the setup.[cS] code
+ * to do BIOS calls Int13h/Fn08h and Int13h/Fn48h to get all of the drive info
+ * for us during initialization. I have the necessary docs -- any takers? -ml
+ */
+static void probe_cmos_for_drives (ide_hwif_t *hwif)
+{
+#ifdef __i386__
+ extern struct drive_info_struct drive_info;
+ byte cmos_disks, *BIOS = (byte *) &drive_info;
+ int unit;
+
+#ifdef CONFIG_BLK_DEV_PROMISE
+ if (hwif->is_promise2)
+ return;
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ outb_p(0x12,0x70); /* specify CMOS address 0x12 */
+ cmos_disks = inb_p(0x71); /* read the data from 0x12 */
+ /* Extract drive geometry from CMOS+BIOS if not already setup */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if ((cmos_disks & (0xf0 >> (unit*4))) && !drive->present && !drive->nobios) {
+ unsigned short cyl = *(unsigned short *)BIOS;
+ unsigned char head = *(BIOS+2);
+ unsigned char sect = *(BIOS+14);
+ unsigned char ctl = *(BIOS+8);
+ if (cyl > 0 && head > 0 && sect > 0 && sect < 64 && head < 255) {
+ drive->cyl = drive->bios_cyl = cyl;
+ drive->head = drive->bios_head = head;
+ drive->sect = drive->bios_sect = sect;
+ drive->ctl = ctl;
+ drive->present = 1;
+ printk("hd%d: got CHS=%d/%d/%d CTL=%x from BIOS\n",
+ unit, cyl, head, sect, ctl);
+
+ } else {
+ printk("hd%d: CHS=%d/%d/%d CTL=%x from BIOS ignored\n",
+ unit, cyl, head, sect, ctl);
+ }
+ }
+ BIOS += 16;
+ }
+#endif
+}
+
+/*
+ * This routine only knows how to look for drive units 0 and 1
+ * on an interface, so any setting of MAX_DRIVES > 2 won't work here.
+ */
+static void probe_hwif (ide_hwif_t *hwif)
+{
+ unsigned int unit;
+
+ if (hwif->noprobe)
+ return;
+ if (hwif->io_base == HD_DATA)
+ probe_cmos_for_drives (hwif);
+#if CONFIG_BLK_DEV_PROMISE
+ if (!hwif->is_promise2 &&
+ (check_region(hwif->io_base,8) || check_region(hwif->ctl_port,1))) {
+#else
+ if (check_region(hwif->io_base,8) || check_region(hwif->ctl_port,1)) {
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ int msgout = 0;
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (drive->present) {
+ drive->present = 0;
+ printk("%s: ERROR, PORTS ALREADY IN USE\n", drive->name);
+ msgout = 1;
+ }
+ }
+ if (!msgout)
+ printk("%s: ports already in use, skipping probe\n", hwif->name);
+ } else {
+ unsigned long flags;
+ save_flags(flags);
+
+ sti(); /* needed for jiffies and irq probing */
+ /*
+ * Second drive should only exist if first drive was found,
+ * but a lot of cdrom drives are configured as single slaves.
+ */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ (void) probe_for_drive (drive);
+ if (drive->present && drive->media == ide_disk) {
+ if ((!drive->head || drive->head > 16) && !drive->select.b.lba) {
+ printk("%s: INVALID GEOMETRY: %d PHYSICAL HEADS?\n",
+ drive->name, drive->head);
+ drive->present = 0;
+ }
+ }
+ if (drive->present && !hwif->present) {
+ hwif->present = 1;
+ request_region(hwif->io_base, 8, hwif->name);
+ request_region(hwif->ctl_port, 1, hwif->name);
+ }
+ }
+ restore_flags(flags);
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (drive->present && drive->media != ide_tape) {
+ ide_tuneproc_t *tuneproc = HWIF(drive)->tuneproc;
+ if (tuneproc != NULL && drive->autotune == 1)
+ tuneproc(drive, 255); /* auto-tune PIO mode */
+ }
+ }
+ }
+}
+
+/*
+ * stridx() returns the offset of c within s,
+ * or -1 if c is '\0' or not found within s.
+ */
+static int stridx (const char *s, char c)
+{
+ char *i = strchr(s, c);
+ return (i && c) ? i - s : -1;
+}
+
+/*
+ * match_parm() does parsing for ide_setup():
+ *
+ * 1. the first char of s must be '='.
+ * 2. if the remainder matches one of the supplied keywords,
+ * the index (1 based) of the keyword is negated and returned.
+ * 3. if the remainder is a series of no more than max_vals numbers
+ * separated by commas, the numbers are saved in vals[] and a
+ * count of how many were saved is returned. Base10 is assumed,
+ * and base16 is allowed when prefixed with "0x".
+ * 4. otherwise, zero is returned.
+ */
+static int match_parm (char *s, const char *keywords[], int vals[], int max_vals)
+{
+ static const char *decimal = "0123456789";
+ static const char *hex = "0123456789abcdef";
+ int i, n;
+
+ if (*s++ == '=') {
+ /*
+ * Try matching against the supplied keywords,
+ * and return -(index+1) if we match one
+ */
+ if (keywords != NULL) {
+ for (i = 0; *keywords != NULL; ++i) {
+ if (!strcmp(s, *keywords++))
+ return -(i+1);
+ }
+ }
+ /*
+ * Look for a series of no more than "max_vals"
+ * numeric values separated by commas, in base10,
+ * or base16 when prefixed with "0x".
+ * Return a count of how many were found.
+ */
+ for (n = 0; (i = stridx(decimal, *s)) >= 0;) {
+ vals[n] = i;
+ while ((i = stridx(decimal, *++s)) >= 0)
+ vals[n] = (vals[n] * 10) + i;
+ if (*s == 'x' && !vals[n]) {
+ while ((i = stridx(hex, *++s)) >= 0)
+ vals[n] = (vals[n] * 0x10) + i;
+ }
+ if (++n == max_vals)
+ break;
+ if (*s == ',')
+ ++s;
+ }
+ if (!*s)
+ return n;
+ }
+ return 0; /* zero = nothing matched */
+}
+
+/*
+ * ide_setup() gets called VERY EARLY during initialization,
+ * to handle kernel "command line" strings beginning with "hdx="
+ * or "ide". Here is the complete set currently supported:
+ *
+ * "hdx=" is recognized for all "x" from "a" to "h", such as "hdc".
+ * "idex=" is recognized for all "x" from "0" to "3", such as "ide1".
+ *
+ * "hdx=noprobe" : drive may be present, but do not probe for it
+ * "hdx=none" : drive is NOT present, ignore cmos and do not probe
+ * "hdx=nowerr" : ignore the WRERR_STAT bit on this drive
+ * "hdx=cdrom" : drive is present, and is a cdrom drive
+ * "hdx=cyl,head,sect" : disk drive is present, with specified geometry
+ * "hdx=autotune" : driver will attempt to tune interface speed
+ * to the fastest PIO mode supported,
+ * if possible for this drive only.
+ * Not fully supported by all chipset types,
+ * and quite likely to cause trouble with
+ * older/odd IDE drives.
+ * "hdx=nodma" : disallow DMA for the drive
+ *
+ * "idebus=xx" : inform IDE driver of VESA/PCI bus speed in Mhz,
+ * where "xx" is between 20 and 66 inclusive,
+ * used when tuning chipset PIO modes.
+ * For PCI bus, 25 is correct for a P75 system,
+ * 30 is correct for P90,P120,P180 systems,
+ * and 33 is used for P100,P133,P166 systems.
+ * If in doubt, use idebus=33 for PCI.
+ * As for VLB, it is safest to not specify it.
+ *
+ * "idex=noprobe" : do not attempt to access/use this interface
+ * "idex=base" : probe for an interface at the addr specified,
+ * where "base" is usually 0x1f0 or 0x170
+ * and "ctl" is assumed to be "base"+0x206
+ * "idex=base,ctl" : specify both base and ctl
+ * "idex=base,ctl,irq" : specify base, ctl, and irq number
+ * "idex=autotune" : driver will attempt to tune interface speed
+ * to the fastest PIO mode supported,
+ * for all drives on this interface.
+ * Not fully supported by all chipset types,
+ * and quite likely to cause trouble with
+ * older/odd IDE drives.
+ * "idex=noautotune" : driver will NOT attempt to tune interface speed
+ * This is the default for most chipsets,
+ * except the cmd640.
+ * "idex=serialize" : do not overlap operations on idex and ide(x^1)
+ *
+ * The following are valid ONLY on ide0,
+ * and the defaults for the base,ctl ports must not be altered.
+ *
+ * "ide0=dtc2278" : probe/support DTC2278 interface
+ * "ide0=ht6560b" : probe/support HT6560B interface
+ * "ide0=cmd640_vlb" : *REQUIRED* for VLB cards with the CMD640 chip
+ * (not for PCI -- automatically detected)
+ * "ide0=qd6580" : probe/support qd6580 interface
+ * "ide0=ali14xx" : probe/support ali14xx chipsets (ALI M1439, M1443, M1445)
+ * "ide0=umc8672" : probe/support umc8672 chipsets
+ */
+void ide_setup (char *s)
+{
+ int i, vals[3];
+ ide_hwif_t *hwif;
+ ide_drive_t *drive;
+ unsigned int hw, unit;
+#ifdef MACH
+ const char max_drive = '0' + ((MAX_HWIFS * MAX_DRIVES) - 1);
+#else
+ const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1);
+#endif
+ const char max_hwif = '0' + (MAX_HWIFS - 1);
+
+ printk("ide_setup: %s", s);
+ init_ide_data ();
+
+ /*
+ * Look for drive options: "hdx="
+ */
+#ifdef MACH
+ if (s[0] == 'h' && s[1] == 'd' && s[2] >= '0' && s[2] <= max_drive) {
+#else
+ if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
+#endif
+ const char *hd_words[] = {"none", "noprobe", "nowerr", "cdrom",
+ "serialize", "autotune", "noautotune",
+ "slow", "ide-scsi", "nodma", NULL};
+#ifdef MACH
+ unit = s[2] - '0';
+#else
+ unit = s[2] - 'a';
+#endif
+ hw = unit / MAX_DRIVES;
+ unit = unit % MAX_DRIVES;
+ hwif = &ide_hwifs[hw];
+ drive = &hwif->drives[unit];
+ switch (match_parm(&s[3], hd_words, vals, 3)) {
+ case -1: /* "none" */
+ drive->nobios = 1; /* drop into "noprobe" */
+ case -2: /* "noprobe" */
+ drive->noprobe = 1;
+ goto done;
+ case -3: /* "nowerr" */
+ drive->bad_wstat = BAD_R_STAT;
+ hwif->noprobe = 0;
+ goto done;
+ case -4: /* "cdrom" */
+ drive->present = 1;
+ drive->media = ide_cdrom;
+ hwif->noprobe = 0;
+ goto done;
+ case -5: /* "serialize" */
+ printk(" -- USE \"ide%d=serialize\" INSTEAD", hw);
+ goto do_serialize;
+ case -6: /* "autotune" */
+ drive->autotune = 1;
+ goto done;
+ case -7: /* "noautotune" */
+ drive->autotune = 2;
+ goto done;
+ case -8: /* "slow" */
+ drive->slow = 1;
+ goto done;
+ case -9: /* "ide-scsi" */
+ drive->ide_scsi = 1;
+ goto done;
+ case -10: /* "nodma" */
+ drive->nodma = 1;
+ goto done;
+ case 3: /* cyl,head,sect */
+ drive->media = ide_disk;
+ drive->cyl = drive->bios_cyl = vals[0];
+ drive->head = drive->bios_head = vals[1];
+ drive->sect = drive->bios_sect = vals[2];
+ drive->present = 1;
+ drive->forced_geom = 1;
+ hwif->noprobe = 0;
+ goto done;
+ default:
+ goto bad_option;
+ }
+ }
+
+ if (s[0] != 'i' || s[1] != 'd' || s[2] != 'e')
+ goto bad_option;
+ /*
+ * Look for bus speed option: "idebus="
+ */
+ if (s[3] == 'b' && s[4] == 'u' && s[5] == 's') {
+ if (match_parm(&s[6], NULL, vals, 1) != 1)
+ goto bad_option;
+ if (vals[0] >= 20 && vals[0] <= 66)
+ idebus_parameter = vals[0];
+ else
+ printk(" -- BAD BUS SPEED! Expected value from 20 to 66");
+ goto done;
+ }
+ /*
+ * Look for interface options: "idex="
+ */
+ if (s[3] >= '0' && s[3] <= max_hwif) {
+ /*
+ * Be VERY CAREFUL changing this: note hardcoded indexes below
+ */
+ const char *ide_words[] = {"noprobe", "serialize", "autotune", "noautotune",
+ "qd6580", "ht6560b", "cmd640_vlb", "dtc2278", "umc8672", "ali14xx", "dc4030", NULL};
+ hw = s[3] - '0';
+ hwif = &ide_hwifs[hw];
+ i = match_parm(&s[4], ide_words, vals, 3);
+
+ /*
+ * Cryptic check to ensure chipset not already set for hwif:
+ */
+ if (i > 0 || i <= -5) {
+ if (hwif->chipset != ide_unknown)
+ goto bad_option;
+ if (i <= -5) {
+ if (ide_hwifs[1].chipset != ide_unknown)
+ goto bad_option;
+ /*
+ * Interface keywords work only for ide0:
+ */
+ if (hw != 0)
+ goto bad_hwif;
+ }
+ }
+
+ switch (i) {
+#ifdef CONFIG_BLK_DEV_PROMISE
+ case -11: /* "dc4030" */
+ {
+ setup_dc4030(hwif);
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_PROMISE */
+#ifdef CONFIG_BLK_DEV_ALI14XX
+ case -10: /* "ali14xx" */
+ {
+ extern void init_ali14xx (void);
+ init_ali14xx();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_ALI14XX */
+#ifdef CONFIG_BLK_DEV_UMC8672
+ case -9: /* "umc8672" */
+ {
+ extern void init_umc8672 (void);
+ init_umc8672();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_UMC8672 */
+#ifdef CONFIG_BLK_DEV_DTC2278
+ case -8: /* "dtc2278" */
+ {
+ extern void init_dtc2278 (void);
+ init_dtc2278();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_DTC2278 */
+#ifdef CONFIG_BLK_DEV_CMD640
+ case -7: /* "cmd640_vlb" */
+ {
+ extern int cmd640_vlb; /* flag for cmd640.c */
+ cmd640_vlb = 1;
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_CMD640 */
+#ifdef CONFIG_BLK_DEV_HT6560B
+ case -6: /* "ht6560b" */
+ {
+ extern void init_ht6560b (void);
+ init_ht6560b();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_HT6560B */
+#if CONFIG_BLK_DEV_QD6580
+ case -5: /* "qd6580" (has secondary i/f) */
+ {
+ extern void init_qd6580 (void);
+ init_qd6580();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_QD6580 */
+ case -4: /* "noautotune" */
+ hwif->drives[0].autotune = 2;
+ hwif->drives[1].autotune = 2;
+ goto done;
+ case -3: /* "autotune" */
+ hwif->drives[0].autotune = 1;
+ hwif->drives[1].autotune = 1;
+ goto done;
+ case -2: /* "serialize" */
+ do_serialize:
+ ide_hwifs[hw].serialized = 1; /* serialize */
+ ide_hwifs[hw^1].serialized = 1; /* with mate */
+ goto done;
+
+ case -1: /* "noprobe" */
+ hwif->noprobe = 1;
+ goto done;
+
+ case 1: /* base */
+ vals[1] = vals[0] + 0x206; /* default ctl */
+ case 2: /* base,ctl */
+ vals[2] = 0; /* default irq = probe for it */
+ case 3: /* base,ctl,irq */
+ hwif->io_base = vals[0];
+ hwif->ctl_port = vals[1];
+ hwif->irq = vals[2];
+ hwif->noprobe = 0;
+ hwif->chipset = ide_generic;
+ goto done;
+
+ case 0: goto bad_option;
+ default:
+ printk(" -- SUPPORT NOT CONFIGURED IN THIS KERNEL\n");
+ return;
+ }
+ }
+bad_option:
+ printk(" -- BAD OPTION\n");
+ return;
+bad_hwif:
+ printk("-- NOT SUPPORTED ON ide%d", hw);
+done:
+ printk("\n");
+}
+
+/*
+ * This routine is called from the partition-table code in genhd.c
+ * to "convert" a drive to a logical geometry with fewer than 1024 cyls.
+ *
+ * The second parameter, "xparm", determines exactly how the translation
+ * will be handled:
+ * 0 = convert to CHS with fewer than 1024 cyls
+ * using the same method as Ontrack DiskManager.
+ * 1 = same as "0", plus offset everything by 63 sectors.
+ * -1 = similar to "0", plus redirect sector 0 to sector 1.
+ * >1 = convert to a CHS geometry with "xparm" heads.
+ *
+ * Returns 0 if the translation was not possible, if the device was not
+ * an IDE disk drive, or if a geometry was "forced" on the commandline.
+ * Returns 1 if the geometry translation was successful.
+ */
+
+int ide_xlate_1024 (kdev_t i_rdev, int xparm, const char *msg)
+{
+ ide_drive_t *drive;
+ static const byte head_vals[] = {4, 8, 16, 32, 64, 128, 255, 0};
+ const byte *heads = head_vals;
+ unsigned long tracks;
+
+ drive = get_info_ptr(i_rdev);
+ if (!drive)
+ return 0;
+
+ if (drive->forced_geom)
+ return 0;
+
+ if (xparm > 1 && xparm <= drive->bios_head && drive->bios_sect == 63)
+ return 0; /* we already have a translation */
+
+ printk("%s ", msg);
+
+ if (xparm < 0 && (drive->bios_cyl * drive->bios_head * drive->bios_sect) < (1024 * 16 * 63)) {
+ return 0; /* small disk: no translation needed */
+ }
+
+ if (drive->id) {
+ drive->cyl = drive->id->cyls;
+ drive->head = drive->id->heads;
+ drive->sect = drive->id->sectors;
+ }
+ drive->bios_cyl = drive->cyl;
+ drive->bios_head = drive->head;
+ drive->bios_sect = drive->sect;
+ drive->special.b.set_geometry = 1;
+
+ tracks = drive->bios_cyl * drive->bios_head * drive->bios_sect / 63;
+ drive->bios_sect = 63;
+ if (xparm > 1) {
+ drive->bios_head = xparm;
+ drive->bios_cyl = tracks / drive->bios_head;
+ } else {
+ while (drive->bios_cyl >= 1024) {
+ drive->bios_head = *heads;
+ drive->bios_cyl = tracks / drive->bios_head;
+ if (0 == *++heads)
+ break;
+ }
+#if FAKE_FDISK_FOR_EZDRIVE
+ if (xparm == -1) {
+ drive->remap_0_to_1 = 1;
+ msg = "0->1";
+ } else
+#endif /* FAKE_FDISK_FOR_EZDRIVE */
+ if (xparm == 1) {
+ drive->sect0 = 63;
+ drive->bios_cyl = (tracks - 1) / drive->bios_head;
+ msg = "+63";
+ }
+ printk("[remap %s] ", msg);
+ }
+ drive->part[0].nr_sects = current_capacity(drive);
+ printk("[%d/%d/%d]", drive->bios_cyl, drive->bios_head, drive->bios_sect);
+ return 1;
+}
+
+#if MAX_HWIFS > 1
+/*
+ * save_match() is used to simplify logic in init_irq() below.
+ *
+ * A loophole here is that we may not know about a particular
+ * hwif's irq until after that hwif is actually probed/initialized..
+ * This could be a problem for the case where an hwif is on a
+ * dual interface that requires serialization (eg. cmd640) and another
+ * hwif using one of the same irqs is initialized beforehand.
+ *
+ * This routine detects and reports such situations, but does not fix them.
+ */
+static void save_match (ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match)
+{
+ ide_hwif_t *m = *match;
+
+ if (m && m->hwgroup && m->hwgroup != new->hwgroup) {
+ if (!new->hwgroup)
+ return;
+ printk("%s: potential irq problem with %s and %s\n", hwif->name, new->name, m->name);
+ }
+ if (!m || m->irq != hwif->irq) /* don't undo a prior perfect match */
+ *match = new;
+}
+#endif /* MAX_HWIFS > 1 */
+
+/*
+ * This routine sets up the irq for an ide interface, and creates a new
+ * hwgroup for the irq/hwif if none was previously assigned.
+ *
+ * Much of the code is for correctly detecting/handling irq sharing
+ * and irq serialization situations. This is somewhat complex because
+ * it handles static as well as dynamic (PCMCIA) IDE interfaces.
+ *
+ * The SA_INTERRUPT in sa_flags means ide_intr() is always entered with
+ * interrupts completely disabled. This can be bad for interrupt latency,
+ * but anything else has led to problems on some machines. We re-enable
+ * interrupts as much as we can safely do in most places.
+ */
+static int init_irq (ide_hwif_t *hwif)
+{
+ unsigned long flags;
+#if MAX_HWIFS > 1
+ unsigned int index;
+#endif /* MAX_HWIFS > 1 */
+ ide_hwgroup_t *hwgroup;
+ ide_hwif_t *match = NULL;
+
+ save_flags(flags);
+ cli();
+
+ hwif->hwgroup = NULL;
+#if MAX_HWIFS > 1
+ /*
+ * Group up with any other hwifs that share our irq(s).
+ */
+ for (index = 0; index < MAX_HWIFS; index++) {
+ ide_hwif_t *h = &ide_hwifs[index];
+ if (h->hwgroup) { /* scan only initialized hwif's */
+ if (hwif->irq == h->irq) {
+ hwif->sharing_irq = h->sharing_irq = 1;
+ save_match(hwif, h, &match);
+ }
+ if (hwif->serialized) {
+ ide_hwif_t *mate = &ide_hwifs[hwif->index^1];
+ if (index == mate->index || h->irq == mate->irq)
+ save_match(hwif, h, &match);
+ }
+ if (h->serialized) {
+ ide_hwif_t *mate = &ide_hwifs[h->index^1];
+ if (hwif->irq == mate->irq)
+ save_match(hwif, h, &match);
+ }
+ }
+ }
+#endif /* MAX_HWIFS > 1 */
+ /*
+ * If we are still without a hwgroup, then form a new one
+ */
+ if (match) {
+ hwgroup = match->hwgroup;
+ } else {
+ hwgroup = kmalloc(sizeof(ide_hwgroup_t), GFP_KERNEL);
+ hwgroup->hwif = hwgroup->next_hwif = hwif->next = hwif;
+ hwgroup->rq = NULL;
+ hwgroup->handler = NULL;
+ if (hwif->drives[0].present)
+ hwgroup->drive = &hwif->drives[0];
+ else
+ hwgroup->drive = &hwif->drives[1];
+ hwgroup->poll_timeout = 0;
+ hwgroup->active = 0;
+ init_timer(&hwgroup->timer);
+ hwgroup->timer.function = &timer_expiry;
+ hwgroup->timer.data = (unsigned long) hwgroup;
+ }
+
+ /*
+ * Allocate the irq, if not already obtained for another hwif
+ */
+ if (!match || match->irq != hwif->irq) {
+ if (request_irq(hwif->irq, ide_intr, SA_INTERRUPT, hwif->name, hwgroup)) {
+ if (!match)
+ kfree(hwgroup);
+ restore_flags(flags);
+ return 1;
+ }
+ }
+
+ /*
+ * Everything is okay, so link us into the hwgroup
+ */
+ hwif->hwgroup = hwgroup;
+ hwif->next = hwgroup->hwif->next;
+ hwgroup->hwif->next = hwif;
+
+ restore_flags(flags); /* safe now that hwif->hwgroup is set up */
+
+ printk("%s at 0x%03x-0x%03x,0x%03x on irq %d", hwif->name,
+ hwif->io_base, hwif->io_base+7, hwif->ctl_port, hwif->irq);
+ if (match)
+ printk(" (%sed with %s)", hwif->sharing_irq ? "shar" : "serializ", match->name);
+ printk("\n");
+ return 0;
+}
+
+static struct file_operations ide_fops = {
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ ide_ioctl, /* ioctl */
+ NULL, /* mmap */
+ ide_open, /* open */
+ ide_release, /* release */
+ block_fsync /* fsync */
+ ,NULL, /* fasync */
+ ide_check_media_change, /* check_media_change */
+ revalidate_disk /* revalidate */
+};
+
+#ifdef CONFIG_PCI
+#if defined(CONFIG_BLK_DEV_RZ1000) || defined(CONFIG_BLK_DEV_TRITON)
+
+typedef void (ide_pci_init_proc_t)(byte, byte);
+
+/*
+ * ide_probe_pci() scans PCI for a specific vendor/device function,
+ * and invokes the supplied init routine for each instance detected.
+ */
+static void ide_probe_pci (unsigned short vendor, unsigned short device, ide_pci_init_proc_t *init, int func_adj)
+{
+ unsigned long flags;
+ unsigned index;
+ byte fn, bus;
+
+ save_flags(flags);
+ cli();
+ for (index = 0; !pcibios_find_device (vendor, device, index, &bus, &fn); ++index) {
+ init (bus, fn + func_adj);
+ }
+ restore_flags(flags);
+}
+
+#endif /* defined(CONFIG_BLK_DEV_RZ1000) || defined(CONFIG_BLK_DEV_TRITON) */
+
+static void ide_probe_promise_20246(void)
+{
+ byte fn, bus;
+ unsigned short io[6], count = 0;
+ unsigned int reg, tmp, i;
+ ide_hwif_t *hwif;
+
+ memset(io, 0, 6 * sizeof(unsigned short));
+ if (pcibios_find_device(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20246, 0, &bus, &fn))
+ return;
+ printk("ide: Promise Technology IDE Ultra-DMA 33 on PCI bus %d function %d\n", bus, fn);
+ for (reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg += 4) {
+ pcibios_read_config_dword(bus, fn, reg, &tmp);
+ if (tmp & PCI_BASE_ADDRESS_SPACE_IO)
+ io[count++] = tmp & PCI_BASE_ADDRESS_IO_MASK;
+ }
+ for (i = 2; i < 4; i++) {
+ hwif = ide_hwifs + i;
+ if (hwif->chipset == ide_generic) {
+ printk("ide%d: overridden with command line parameter\n", i);
+ return;
+ }
+ tmp = (i - 2) * 2;
+ if (!io[tmp] || !io[tmp + 1]) {
+ printk("ide%d: invalid port address %x, %x -- aborting\n", i, io[tmp], io[tmp + 1]);
+ return;
+ }
+ hwif->io_base = io[tmp];
+ hwif->ctl_port = io[tmp + 1] + 2;
+ hwif->noprobe = 0;
+ }
+}
+
+#endif /* CONFIG_PCI */
+
+/*
+ * ide_init_pci() finds/initializes "known" PCI IDE interfaces
+ *
+ * This routine should ideally be using pcibios_find_class() to find
+ * all IDE interfaces, but that function causes some systems to "go weird".
+ */
+static void probe_for_hwifs (void)
+{
+#ifdef CONFIG_PCI
+ /*
+ * Find/initialize PCI IDE interfaces
+ */
+ if (pcibios_present()) {
+#ifdef CONFIG_BLK_DEV_RZ1000
+ ide_pci_init_proc_t init_rz1000;
+ ide_probe_pci (PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000, &init_rz1000, 0);
+ ide_probe_pci (PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001, &init_rz1000, 0);
+#endif /* CONFIG_BLK_DEV_RZ1000 */
+#ifdef CONFIG_BLK_DEV_TRITON
+ /*
+ * Apparently the BIOS32 services on Intel motherboards are
+ * buggy and won't find the PCI_DEVICE_ID_INTEL_82371_1 for us.
+ * So instead, we search for PCI_DEVICE_ID_INTEL_82371_0,
+ * and then add 1.
+ */
+ ide_probe_pci (PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371_0, &ide_init_triton, 1);
+ ide_probe_pci (PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_1, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229, &ide_init_triton, 0);
+#endif /* CONFIG_BLK_DEV_TRITON */
+ ide_probe_promise_20246();
+ }
+#endif /* CONFIG_PCI */
+#ifdef CONFIG_BLK_DEV_CMD640
+ {
+ extern void ide_probe_for_cmd640x (void);
+ ide_probe_for_cmd640x();
+ }
+#endif
+#ifdef CONFIG_BLK_DEV_PROMISE
+ init_dc4030();
+#endif
+ extern char *kernel_cmdline;
+ if (strncmp(kernel_cmdline, "noahci", 6) &&
+ !strstr(kernel_cmdline, " noahci"))
+ ahci_probe_pci();
+}
+
+static int hwif_init (int h)
+{
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ void (*rfn)(void);
+
+ if (!hwif->present)
+ return 0;
+ if (!hwif->irq) {
+ if (!(hwif->irq = default_irqs[h])) {
+ printk("%s: DISABLED, NO IRQ\n", hwif->name);
+ return (hwif->present = 0);
+ }
+ }
+#ifdef CONFIG_BLK_DEV_HD
+ if (hwif->irq == HD_IRQ && hwif->io_base != HD_DATA) {
+ printk("%s: CANNOT SHARE IRQ WITH OLD HARDDISK DRIVER (hd.c)\n", hwif->name);
+ return (hwif->present = 0);
+ }
+#endif /* CONFIG_BLK_DEV_HD */
+
+ hwif->present = 0; /* we set it back to 1 if all is ok below */
+ switch (hwif->major) {
+ case IDE0_MAJOR: rfn = &do_ide0_request; break;
+#if MAX_HWIFS > 1
+ case IDE1_MAJOR: rfn = &do_ide1_request; break;
+#endif
+#if MAX_HWIFS > 2
+ case IDE2_MAJOR: rfn = &do_ide2_request; break;
+#endif
+#if MAX_HWIFS > 3
+ case IDE3_MAJOR: rfn = &do_ide3_request; break;
+#endif
+ default:
+ printk("%s: request_fn NOT DEFINED\n", hwif->name);
+ return (hwif->present = 0);
+ }
+ if (register_blkdev (hwif->major, hwif->name, &ide_fops)) {
+ printk("%s: UNABLE TO GET MAJOR NUMBER %d\n", hwif->name, hwif->major);
+ } else if (init_irq (hwif)) {
+ printk("%s: UNABLE TO GET IRQ %d\n", hwif->name, hwif->irq);
+ (void) unregister_blkdev (hwif->major, hwif->name);
+ } else {
+ init_gendisk(hwif);
+ blk_dev[hwif->major].request_fn = rfn;
+ read_ahead[hwif->major] = 8; /* (4kB) */
+ hwif->present = 1; /* success */
+ }
+ return hwif->present;
+}
+
+/*
+ * This is gets invoked once during initialization, to set *everything* up
+ */
+int ide_init (void)
+{
+ int index;
+
+ init_ide_data ();
+ /*
+ * Probe for special "known" interface chipsets
+ */
+ probe_for_hwifs ();
+
+ /*
+ * Probe for drives in the usual way.. CMOS/BIOS, then poke at ports
+ */
+ for (index = 0; index < MAX_HWIFS; ++index)
+ probe_hwif (&ide_hwifs[index]);
+ for (index = 0; index < MAX_HWIFS; ++index)
+ hwif_init (index);
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ idetape_register_chrdev(); /* Register character device interface to the ide tape */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+ return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_IDE_PCMCIA
+int ide_register(int io_base, int ctl_port, int irq)
+{
+ int index, i, rc = -1;
+ ide_hwif_t *hwif;
+ ide_drive_t *drive;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ for (index = 0; index < MAX_HWIFS; ++index) {
+ hwif = &ide_hwifs[index];
+ if (hwif->present) {
+ if (hwif->io_base == io_base || hwif->ctl_port == ctl_port)
+ break; /* this ide port already exists */
+ } else {
+ hwif->io_base = io_base;
+ hwif->ctl_port = ctl_port;
+ hwif->irq = irq;
+ hwif->noprobe = 0;
+ probe_hwif(hwif);
+ if (!hwif_init(index))
+ break;
+ for (i = 0; i < hwif->gd->nr_real; i++) {
+ drive = &hwif->drives[i];
+ revalidate_disk(MKDEV(hwif->major, i<<PARTN_BITS));
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->present && drive->media == ide_cdrom)
+ ide_cdrom_setup(drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+ }
+ rc = index;
+ break;
+ }
+ }
+ restore_flags(flags);
+ return rc;
+}
+
+void ide_unregister (unsigned int index)
+{
+ struct gendisk *gd, **gdp;
+ ide_hwif_t *hwif, *g;
+ ide_hwgroup_t *hwgroup;
+ int irq_count = 0;
+ unsigned long flags;
+
+ if (index >= MAX_HWIFS)
+ return;
+ save_flags(flags);
+ cli();
+ hwif = &ide_hwifs[index];
+ if (!hwif->present || hwif->drives[0].busy || hwif->drives[1].busy) {
+ restore_flags(flags);
+ return;
+ }
+ hwif->present = 0;
+ hwgroup = hwif->hwgroup;
+
+ /*
+ * free the irq if we were the only hwif using it
+ */
+ g = hwgroup->hwif;
+ do {
+ if (g->irq == hwif->irq)
+ ++irq_count;
+ g = g->next;
+ } while (g != hwgroup->hwif);
+ if (irq_count == 1)
+ free_irq(hwif->irq, hwgroup);
+
+ /*
+ * Note that we only release the standard ports,
+ * and do not even try to handle any extra ports
+ * allocated for weird IDE interface chipsets.
+ */
+ release_region(hwif->io_base, 8);
+ release_region(hwif->ctl_port, 1);
+
+ /*
+ * Remove us from the hwgroup, and free
+ * the hwgroup if we were the only member
+ */
+ while (hwgroup->hwif->next != hwif)
+ hwgroup->hwif = hwgroup->hwif->next;
+ hwgroup->hwif->next = hwif->next;
+ if (hwgroup->hwif == hwif)
+ hwgroup->hwif = hwif->next;
+ if (hwgroup->next_hwif == hwif)
+ hwgroup->next_hwif = hwif->next;
+ if (hwgroup->hwif == hwif)
+ kfree(hwgroup);
+
+ /*
+ * Remove us from the kernel's knowledge
+ */
+ unregister_blkdev(hwif->major, hwif->name);
+ kfree(blksize_size[hwif->major]);
+ blk_dev[hwif->major].request_fn = NULL;
+ blksize_size[hwif->major] = NULL;
+ for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next))
+ if (*gdp == hwif->gd)
+ break;
+ if (*gdp == NULL)
+ printk("gd not in disk chain!\n");
+ else {
+ gd = *gdp; *gdp = gd->next;
+ kfree(gd->sizes);
+ kfree(gd->part);
+ kfree(gd);
+ }
+ init_hwif_data (index); /* restore hwif data to pristine status */
+ restore_flags(flags);
+}
+#endif /* CONFIG_BLK_DEV_IDE_PCMCIA */
diff --git a/linux/src/drivers/block/ide.h b/linux/src/drivers/block/ide.h
new file mode 100644
index 0000000..28e371b
--- /dev/null
+++ b/linux/src/drivers/block/ide.h
@@ -0,0 +1,750 @@
+/*
+ * linux/drivers/block/ide.h
+ *
+ * Copyright (C) 1994, 1995 Linus Torvalds & authors
+ */
+
+#include <linux/config.h>
+
+/*
+ * This is the multiple IDE interface driver, as evolved from hd.c.
+ * It supports up to four IDE interfaces, on one or more IRQs (usually 14 & 15).
+ * There can be up to two drives per interface, as per the ATA-2 spec.
+ *
+ * Primary i/f: ide0: major=3; (hda) minor=0; (hdb) minor=64
+ * Secondary i/f: ide1: major=22; (hdc or hd1a) minor=0; (hdd or hd1b) minor=64
+ * Tertiary i/f: ide2: major=33; (hde) minor=0; (hdf) minor=64
+ * Quaternary i/f: ide3: major=34; (hdg) minor=0; (hdh) minor=64
+ */
+
+/******************************************************************************
+ * IDE driver configuration options (play with these as desired):
+ *
+ * REALLY_SLOW_IO can be defined in ide.c and ide-cd.c, if necessary
+ */
+#undef REALLY_FAST_IO /* define if ide ports are perfect */
+#define INITIAL_MULT_COUNT 16 /* off=0; on=2,4,8,16,32, etc.. */
+
+#ifndef SUPPORT_SLOW_DATA_PORTS /* 1 to support slow data ports */
+#define SUPPORT_SLOW_DATA_PORTS 1 /* 0 to reduce kernel size */
+#endif
+#ifndef SUPPORT_VLB_SYNC /* 1 to support weird 32-bit chips */
+#define SUPPORT_VLB_SYNC 1 /* 0 to reduce kernel size */
+#endif
+#ifndef DISK_RECOVERY_TIME /* off=0; on=access_delay_time */
+#define DISK_RECOVERY_TIME 0 /* for hardware that needs it */
+#endif
+#ifndef OK_TO_RESET_CONTROLLER /* 1 needed for good error recovery */
+#define OK_TO_RESET_CONTROLLER 1 /* 0 for use with AH2372A/B interface */
+#endif
+#ifndef FAKE_FDISK_FOR_EZDRIVE /* 1 to help linux fdisk with EZDRIVE */
+#define FAKE_FDISK_FOR_EZDRIVE 1 /* 0 to reduce kernel size */
+#endif
+#ifndef FANCY_STATUS_DUMPS /* 1 for human-readable drive errors */
+#define FANCY_STATUS_DUMPS 1 /* 0 to reduce kernel size */
+#endif
+
+#ifdef CONFIG_BLK_DEV_CMD640
+#if 0 /* change to 1 when debugging cmd640 problems */
+void cmd640_dump_regs (void);
+#define CMD640_DUMP_REGS cmd640_dump_regs() /* for debugging cmd640 chipset */
+#endif
+#endif /* CONFIG_BLK_DEV_CMD640 */
+
+#if defined(CONFIG_BLK_DEV_IDECD) || defined(CONFIG_BLK_DEV_IDETAPE) || \
+ defined(CONFIG_BLK_DEV_IDEFLOPPY) || defined(CONFIG_BLK_DEV_IDESCSI)
+#define CONFIG_BLK_DEV_IDEATAPI 1
+#endif
+
+/*
+ * IDE_DRIVE_CMD is used to implement many features of the hdparm utility
+ */
+#define IDE_DRIVE_CMD 99 /* (magic) undef to reduce kernel size*/
+
+/*
+ * "No user-serviceable parts" beyond this point :)
+ *****************************************************************************/
+
+#if defined(CONFIG_BLK_DEV_IDESCSI) && !defined(CONFIG_SCSI)
+#error "SCSI must also be selected"
+#endif
+
+typedef unsigned char byte; /* used everywhere */
+
+/*
+ * Probably not wise to fiddle with these
+ */
+#define ERROR_MAX 8 /* Max read/write errors per sector */
+#define ERROR_RESET 3 /* Reset controller every 4th retry */
+#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
+
+/*
+ * Ensure that various configuration flags have compatible settings
+ */
+#ifdef REALLY_SLOW_IO
+#undef REALLY_FAST_IO
+#endif
+
+/*
+ * Definitions for accessing IDE controller registers
+ */
+
+#define HWIF(drive) ((ide_hwif_t *)((drive)->hwif))
+#define HWGROUP(drive) ((ide_hwgroup_t *)(HWIF(drive)->hwgroup))
+
+#define IDE_DATA_OFFSET (0)
+#define IDE_ERROR_OFFSET (1)
+#define IDE_NSECTOR_OFFSET (2)
+#define IDE_SECTOR_OFFSET (3)
+#define IDE_LCYL_OFFSET (4)
+#define IDE_HCYL_OFFSET (5)
+#define IDE_SELECT_OFFSET (6)
+#define IDE_STATUS_OFFSET (7)
+#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET
+#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET
+
+#define IDE_DATA_REG (HWIF(drive)->io_base+IDE_DATA_OFFSET)
+#define IDE_ERROR_REG (HWIF(drive)->io_base+IDE_ERROR_OFFSET)
+#define IDE_NSECTOR_REG (HWIF(drive)->io_base+IDE_NSECTOR_OFFSET)
+#define IDE_SECTOR_REG (HWIF(drive)->io_base+IDE_SECTOR_OFFSET)
+#define IDE_LCYL_REG (HWIF(drive)->io_base+IDE_LCYL_OFFSET)
+#define IDE_HCYL_REG (HWIF(drive)->io_base+IDE_HCYL_OFFSET)
+#define IDE_SELECT_REG (HWIF(drive)->io_base+IDE_SELECT_OFFSET)
+#define IDE_STATUS_REG (HWIF(drive)->io_base+IDE_STATUS_OFFSET)
+#define IDE_CONTROL_REG (HWIF(drive)->ctl_port)
+#define IDE_FEATURE_REG IDE_ERROR_REG
+#define IDE_COMMAND_REG IDE_STATUS_REG
+#define IDE_ALTSTATUS_REG IDE_CONTROL_REG
+#define IDE_IREASON_REG IDE_NSECTOR_REG
+#define IDE_BCOUNTL_REG IDE_LCYL_REG
+#define IDE_BCOUNTH_REG IDE_HCYL_REG
+
+#ifdef REALLY_FAST_IO
+#define OUT_BYTE(b,p) outb((b),(p))
+#define IN_BYTE(p) (byte)inb(p)
+#else
+#define OUT_BYTE(b,p) outb_p((b),(p))
+#define IN_BYTE(p) (byte)inb_p(p)
+#endif /* REALLY_FAST_IO */
+
+#define GET_ERR() IN_BYTE(IDE_ERROR_REG)
+#define GET_STAT() IN_BYTE(IDE_STATUS_REG)
+#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
+#define BAD_R_STAT (BUSY_STAT | ERR_STAT)
+#define BAD_W_STAT (BAD_R_STAT | WRERR_STAT)
+#define BAD_STAT (BAD_R_STAT | DRQ_STAT)
+#define DRIVE_READY (READY_STAT | SEEK_STAT)
+#define DATA_READY (DRQ_STAT)
+
+/*
+ * Some more useful definitions
+ */
+#define IDE_MAJOR_NAME "ide" /* the same for all i/f; see also genhd.c */
+#define MAJOR_NAME IDE_MAJOR_NAME
+#define PARTN_BITS 6 /* number of minor dev bits for partitions */
+#define PARTN_MASK ((1<<PARTN_BITS)-1) /* a useful bit mask */
+#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
+#ifndef MAX_HWIFS
+#define MAX_HWIFS 4 /* an arbitrary, but realistic limit */
+#endif
+#define SECTOR_WORDS (512 / 4) /* number of 32bit words per sector */
+
+/*
+ * Timeouts for various operations:
+ */
+#define WAIT_DRQ (1*HZ) /* 1s - spec allows up to 20ms, but CF
+ * cards and SSD drives need more */
+#ifdef CONFIG_APM
+#define WAIT_READY (5*HZ) /* 5sec - some laptops are very slow */
+#else
+#define WAIT_READY (3*HZ/100) /* 30msec - should be instantaneous */
+#endif /* CONFIG_APM */
+#define WAIT_PIDENTIFY (1*HZ) /* 1sec - should be less than 3ms (?) */
+#define WAIT_WORSTCASE (30*HZ) /* 30sec - worst case when spinning up */
+#define WAIT_CMD (10*HZ) /* 10sec - maximum wait for an IRQ to happen */
+
+#if defined(CONFIG_BLK_DEV_HT6560B) || defined(CONFIG_BLK_DEV_PROMISE)
+#define SELECT_DRIVE(hwif,drive) \
+{ \
+ if (hwif->selectproc) \
+ hwif->selectproc(drive); \
+ else \
+ OUT_BYTE((drive)->select.all, hwif->io_base+IDE_SELECT_OFFSET); \
+}
+#else
+#define SELECT_DRIVE(hwif,drive) OUT_BYTE((drive)->select.all, hwif->io_base+IDE_SELECT_OFFSET);
+#endif /* CONFIG_BLK_DEV_HT6560B || CONFIG_BLK_DEV_PROMISE */
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+#include "ide-tape.h"
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+#ifdef CONFIG_BLK_DEV_IDECD
+
+struct atapi_request_sense {
+ unsigned char error_code : 7;
+ unsigned char valid : 1;
+ byte reserved1;
+ unsigned char sense_key : 4;
+ unsigned char reserved2 : 1;
+ unsigned char ili : 1;
+ unsigned char reserved3 : 2;
+ byte info[4];
+ byte sense_len;
+ byte command_info[4];
+ byte asc;
+ byte ascq;
+ byte fru;
+ byte sense_key_specific[3];
+};
+
+struct packet_command {
+ unsigned char *buffer;
+ int buflen;
+ int stat;
+ struct atapi_request_sense *sense_data;
+ unsigned char c[12];
+};
+
+
+/* Structure of a MSF cdrom address. */
+struct atapi_msf {
+ byte reserved;
+ byte minute;
+ byte second;
+ byte frame;
+};
+
+
+/* Space to hold the disk TOC. */
+
+#define MAX_TRACKS 99
+struct atapi_toc_header {
+ unsigned short toc_length;
+ byte first_track;
+ byte last_track;
+};
+
+struct atapi_toc_entry {
+ byte reserved1;
+ unsigned control : 4;
+ unsigned adr : 4;
+ byte track;
+ byte reserved2;
+ union {
+ unsigned lba;
+ struct atapi_msf msf;
+ } addr;
+};
+
+struct atapi_toc {
+ int last_session_lba;
+ int xa_flag;
+ unsigned capacity;
+ struct atapi_toc_header hdr;
+ struct atapi_toc_entry ent[MAX_TRACKS+1];
+ /* One extra for the leadout. */
+};
+
+
+/* This structure is annoyingly close to, but not identical with,
+ the cdrom_subchnl structure from cdrom.h. */
+struct atapi_cdrom_subchnl
+{
+ u_char acdsc_reserved;
+ u_char acdsc_audiostatus;
+ u_short acdsc_length;
+ u_char acdsc_format;
+
+ u_char acdsc_adr: 4;
+ u_char acdsc_ctrl: 4;
+ u_char acdsc_trk;
+ u_char acdsc_ind;
+ union {
+ struct atapi_msf msf;
+ int lba;
+ } acdsc_absaddr;
+ union {
+ struct atapi_msf msf;
+ int lba;
+ } acdsc_reladdr;
+};
+
+
+/* Extra per-device info for cdrom drives. */
+struct cdrom_info {
+
+ /* Buffer for table of contents. NULL if we haven't allocated
+ a TOC buffer for this device yet. */
+
+ struct atapi_toc *toc;
+
+ /* Sector buffer. If a read request wants only the first part
+ of a cdrom block, we cache the rest of the block here,
+ in the expectation that that data is going to be wanted soon.
+ SECTOR_BUFFERED is the number of the first buffered sector,
+ and NSECTORS_BUFFERED is the number of sectors in the buffer.
+ Before the buffer is allocated, we should have
+ SECTOR_BUFFER == NULL and NSECTORS_BUFFERED == 0. */
+
+ unsigned long sector_buffered;
+ unsigned long nsectors_buffered;
+ char *sector_buffer;
+
+ /* The result of the last successful request sense command
+ on this device. */
+ struct atapi_request_sense sense_data;
+
+ int max_sectors;
+};
+
+#endif /* CONFIG_BLK_DEV_IDECD */
+
+/*
+ * Now for the data we need to maintain per-drive: ide_drive_t
+ */
+
+typedef enum {ide_disk, ide_cdrom, ide_tape, ide_floppy, ide_scsi} ide_media_t;
+
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned set_geometry : 1; /* respecify drive geometry */
+ unsigned recalibrate : 1; /* seek to cyl 0 */
+ unsigned set_multmode : 1; /* set multmode count */
+ unsigned set_tune : 1; /* tune interface for drive */
+ unsigned mc : 1; /* acknowledge media change */
+ unsigned reserved : 3; /* unused */
+ } b;
+ } special_t;
+
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned head : 4; /* always zeros here */
+ unsigned unit : 1; /* drive select number, 0 or 1 */
+ unsigned bit5 : 1; /* always 1 */
+ unsigned lba : 1; /* using LBA instead of CHS */
+ unsigned bit7 : 1; /* always 1 */
+ } b;
+ } select_t;
+
+typedef struct ide_drive_s {
+ special_t special; /* special action flags */
+ unsigned present : 1; /* drive is physically present */
+ unsigned noprobe : 1; /* from: hdx=noprobe */
+ unsigned keep_settings : 1; /* restore settings after drive reset */
+ unsigned busy : 1; /* currently doing revalidate_disk() */
+ unsigned removable : 1; /* 1 if need to do check_media_change */
+ unsigned using_dma : 1; /* disk is using dma for read/write */
+ unsigned forced_geom : 1; /* 1 if hdx=c,h,s was given at boot */
+ unsigned unmask : 1; /* flag: okay to unmask other irqs */
+ unsigned no_unmask : 1; /* disallow setting unmask bit */
+ unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */
+ unsigned nobios : 1; /* flag: do not probe bios for drive */
+ unsigned slow : 1; /* flag: slow data port */
+ unsigned autotune : 2; /* 1=autotune, 2=noautotune, 0=default */
+ unsigned nodma : 1; /* disk should not use dma for read/write */
+#if FAKE_FDISK_FOR_EZDRIVE
+ unsigned remap_0_to_1 : 1; /* flag: partitioned with ezdrive */
+#endif /* FAKE_FDISK_FOR_EZDRIVE */
+ unsigned no_geom : 1; /* flag: do not set geometry */
+ ide_media_t media; /* disk, cdrom, tape, floppy */
+ select_t select; /* basic drive/head select reg value */
+ byte ctl; /* "normal" value for IDE_CONTROL_REG */
+ byte ready_stat; /* min status value for drive ready */
+ byte mult_count; /* current multiple sector setting */
+ byte mult_req; /* requested multiple sector setting */
+ byte tune_req; /* requested drive tuning setting */
+ byte io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
+ byte bad_wstat; /* used for ignoring WRERR_STAT */
+ byte sect0; /* offset of first sector for DM6:DDO */
+ byte usage; /* current "open()" count for drive */
+ byte head; /* "real" number of heads */
+ byte sect; /* "real" sectors per track */
+ byte bios_head; /* BIOS/fdisk/LILO number of heads */
+ byte bios_sect; /* BIOS/fdisk/LILO sectors per track */
+ unsigned short bios_cyl; /* BIOS/fdisk/LILO number of cyls */
+ unsigned short cyl; /* "real" number of cyls */
+ void *hwif; /* actually (ide_hwif_t *) */
+ struct wait_queue *wqueue; /* used to wait for drive in open() */
+ struct hd_driveid *id; /* drive model identification info */
+ struct hd_struct *part; /* drive partition table */
+ char name[4]; /* drive name, such as "hda" */
+#ifdef CONFIG_BLK_DEV_IDECD
+ struct cdrom_info cdrom_info; /* for ide-cd.c */
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ idetape_tape_t tape; /* for ide-tape.c */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ void *floppy; /* for ide-floppy.c */
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ void *scsi; /* for ide-scsi.c */
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ byte ide_scsi; /* use ide-scsi driver */
+ } ide_drive_t;
+
+/*
+ * An ide_dmaproc_t() initiates/aborts DMA read/write operations on a drive.
+ *
+ * The caller is assumed to have selected the drive and programmed the drive's
+ * sector address using CHS or LBA. All that remains is to prepare for DMA
+ * and then issue the actual read/write DMA/PIO command to the drive.
+ *
+ * Returns 0 if all went well.
+ * Returns 1 if DMA read/write could not be started, in which case the caller
+ * should either try again later, or revert to PIO for the current request.
+ */
+typedef enum { ide_dma_read = 0, ide_dma_write = 1,
+ ide_dma_abort = 2, ide_dma_check = 3,
+ ide_dma_status_bad = 4, ide_dma_transferred = 5,
+ ide_dma_begin = 6 }
+ ide_dma_action_t;
+
+typedef int (ide_dmaproc_t)(ide_dma_action_t, ide_drive_t *);
+
+
+/*
+ * An ide_tuneproc_t() is used to set the speed of an IDE interface
+ * to a particular PIO mode. The "byte" parameter is used
+ * to select the PIO mode by number (0,1,2,3,4,5), and a value of 255
+ * indicates that the interface driver should "auto-tune" the PIO mode
+ * according to the drive capabilities in drive->id;
+ *
+ * Not all interface types support tuning, and not all of those
+ * support all possible PIO settings. They may silently ignore
+ * or round values as they see fit.
+ */
+typedef void (ide_tuneproc_t)(ide_drive_t *, byte);
+
+/*
+ * This is used to provide HT6560B & PROMISE interface support.
+ */
+typedef void (ide_selectproc_t) (ide_drive_t *);
+
+/*
+ * hwif_chipset_t is used to keep track of the specific hardware
+ * chipset used by each IDE interface, if known.
+ */
+typedef enum { ide_unknown, ide_generic, ide_triton,
+ ide_cmd640, ide_dtc2278, ide_ali14xx,
+ ide_qd6580, ide_umc8672, ide_ht6560b,
+ ide_promise, ide_hpt343, ide_udma,
+ ide_ultra66 }
+ hwif_chipset_t;
+
+typedef struct hwif_s {
+ struct hwif_s *next; /* for linked-list in ide_hwgroup_t */
+ void *hwgroup; /* actually (ide_hwgroup_t *) */
+ unsigned short io_base; /* base io port addr */
+ unsigned short ctl_port; /* usually io_base+0x206 */
+ ide_drive_t drives[MAX_DRIVES]; /* drive info */
+ struct gendisk *gd; /* gendisk structure */
+ ide_tuneproc_t *tuneproc; /* routine to tune PIO mode for drives */
+#if defined(CONFIG_BLK_DEV_HT6560B) || defined(CONFIG_BLK_DEV_PROMISE)
+ ide_selectproc_t *selectproc; /* tweaks hardware to select drive */
+#endif
+ ide_dmaproc_t *dmaproc; /* dma read/write/abort routine */
+ unsigned long *dmatable; /* dma physical region descriptor table */
+ unsigned short dma_base; /* base addr for dma ports (triton) */
+ byte irq; /* our irq number */
+ byte major; /* our major number */
+ char name[5]; /* name of interface, eg. "ide0" */
+ byte index; /* 0 for ide0; 1 for ide1; ... */
+ hwif_chipset_t chipset; /* sub-module for tuning.. */
+ unsigned noprobe : 1; /* don't probe for this interface */
+ unsigned present : 1; /* this interface exists */
+ unsigned serialized : 1; /* serialized operation with mate hwif */
+ unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
+#ifdef CONFIG_BLK_DEV_PROMISE
+ unsigned is_promise2: 1; /* 2nd i/f on promise DC4030 */
+#endif /* CONFIG_BLK_DEV_PROMISE */
+#if (DISK_RECOVERY_TIME > 0)
+ unsigned long last_time; /* time when previous rq was done */
+#endif
+#ifdef CONFIG_BLK_DEV_IDECD
+ struct request request_sense_request; /* from ide-cd.c */
+ struct packet_command request_sense_pc; /* from ide-cd.c */
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ ide_drive_t *tape_drive; /* Pointer to the tape on this interface */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ } ide_hwif_t;
+
+/*
+ * internal ide interrupt handler type
+ */
+typedef void (ide_handler_t)(ide_drive_t *);
+
+typedef struct hwgroup_s {
+ ide_handler_t *handler;/* irq handler, if active */
+ ide_drive_t *drive; /* current drive */
+ ide_hwif_t *hwif; /* ptr to current hwif in linked-list */
+ ide_hwif_t *next_hwif; /* next selected hwif (for tape) */
+ struct request *rq; /* current request */
+ struct timer_list timer; /* failsafe timer */
+ struct request wrq; /* local copy of current write rq */
+ unsigned long poll_timeout; /* timeout value during long polls */
+ int active; /* set when servicing requests */
+ } ide_hwgroup_t;
+
+/*
+ * ide_hwifs[] is the master data structure used to keep track
+ * of just about everything in ide.c. Whenever possible, routines
+ * should be using pointers to a drive (ide_drive_t *) or
+ * pointers to a hwif (ide_hwif_t *), rather than indexing this
+ * structure directly (the allocation/layout may change!).
+ *
+ */
+#ifndef _IDE_C
+extern ide_hwif_t ide_hwifs[]; /* master data repository */
+#endif
+
+/*
+ * One final include file, which references some of the data/defns from above
+ */
+#define IDE_DRIVER /* "parameter" for blk.h */
+#include <linux/blk.h>
+
+#if (DISK_RECOVERY_TIME > 0)
+void ide_set_recovery_timer (ide_hwif_t *);
+#define SET_RECOVERY_TIMER(drive) ide_set_recovery_timer (drive)
+#else
+#define SET_RECOVERY_TIMER(drive)
+#endif
+
+/*
+ * This is used for (nearly) all data transfers from the IDE interface
+ */
+void ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount);
+
+/*
+ * This is used for (nearly) all data transfers to the IDE interface
+ */
+void ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount);
+
+/*
+ * This is used for (nearly) all ATAPI data transfers from/to the IDE interface
+ */
+void atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount);
+void atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount);
+
+/*
+ * This is used on exit from the driver, to designate the next irq handler
+ * and also to start the safety timer.
+ */
+void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout);
+
+/*
+ * Error reporting, in human readable form (luxurious, but a memory hog).
+ */
+byte ide_dump_status (ide_drive_t *drive, const char *msg, byte stat);
+
+/*
+ * ide_error() takes action based on the error returned by the controller.
+ * The calling function must return afterwards, to restart the request.
+ */
+void ide_error (ide_drive_t *drive, const char *msg, byte stat);
+
+/*
+ * ide_fixstring() cleans up and (optionally) byte-swaps a text string,
+ * removing leading/trailing blanks and compressing internal blanks.
+ * It is primarily used to tidy up the model name/number fields as
+ * returned by the WIN_[P]IDENTIFY commands.
+ */
+void ide_fixstring (byte *s, const int bytecount, const int byteswap);
+
+/*
+ * This routine busy-waits for the drive status to be not "busy".
+ * It then checks the status for all of the "good" bits and none
+ * of the "bad" bits, and if all is okay it returns 0. All other
+ * cases return 1 after invoking ide_error() -- caller should return.
+ *
+ */
+int ide_wait_stat (ide_drive_t *drive, byte good, byte bad, unsigned long timeout);
+
+/*
+ * This routine is called from the partition-table code in genhd.c
+ * to "convert" a drive to a logical geometry with fewer than 1024 cyls.
+ *
+ * The second parameter, "xparm", determines exactly how the translation
+ * will be handled:
+ * 0 = convert to CHS with fewer than 1024 cyls
+ * using the same method as Ontrack DiskManager.
+ * 1 = same as "0", plus offset everything by 63 sectors.
+ * -1 = similar to "0", plus redirect sector 0 to sector 1.
+ * >1 = convert to a CHS geometry with "xparm" heads.
+ *
+ * Returns 0 if the translation was not possible, if the device was not
+ * an IDE disk drive, or if a geometry was "forced" on the commandline.
+ * Returns 1 if the geometry translation was successful.
+ */
+int ide_xlate_1024 (kdev_t, int, const char *);
+
+/*
+ * Start a reset operation for an IDE interface.
+ * The caller should return immediately after invoking this.
+ */
+void ide_do_reset (ide_drive_t *);
+
+/*
+ * This function is intended to be used prior to invoking ide_do_drive_cmd().
+ */
+void ide_init_drive_cmd (struct request *rq);
+
+/*
+ * "action" parameter type for ide_do_drive_cmd() below.
+ */
+typedef enum
+ {ide_wait, /* insert rq at end of list, and wait for it */
+ ide_next, /* insert rq immediately after current request */
+ ide_preempt, /* insert rq in front of current request */
+ ide_end} /* insert rq at end of list, but don't wait for it */
+ ide_action_t;
+
+/*
+ * This function issues a special IDE device request
+ * onto the request queue.
+ *
+ * If action is ide_wait, then the rq is queued at the end of the
+ * request queue, and the function sleeps until it has been processed.
+ * This is for use when invoked from an ioctl handler.
+ *
+ * If action is ide_preempt, then the rq is queued at the head of
+ * the request queue, displacing the currently-being-processed
+ * request and this function returns immediately without waiting
+ * for the new rq to be completed. This is VERY DANGEROUS, and is
+ * intended for careful use by the ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_next, then the rq is queued immediately after
+ * the currently-being-processed-request (if any), and the function
+ * returns without waiting for the new rq to be completed. As above,
+ * This is VERY DANGEROUS, and is intended for careful use by the
+ * ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_end, then the rq is queued at the end of the
+ * request queue, and the function returns immediately without waiting
+ * for the new rq to be completed. This is again intended for careful
+ * use by the ATAPI tape/cdrom driver code. (Currently used by ide-tape.c,
+ * when operating in the pipelined operation mode).
+ */
+int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action);
+
+/*
+ * Clean up after success/failure of an explicit drive cmd.
+ * stat/err are used only when (HWGROUP(drive)->rq->cmd == IDE_DRIVE_CMD).
+ */
+void ide_end_drive_cmd (ide_drive_t *drive, byte stat, byte err);
+
+/*
+ * ide_system_bus_speed() returns what we think is the system VESA/PCI
+ * bus speed (in Mhz). This is used for calculating interface PIO timings.
+ * The default is 40 for known PCI systems, 50 otherwise.
+ * The "idebus=xx" parameter can be used to override this value.
+ */
+int ide_system_bus_speed (void);
+
+/*
+ * ide_multwrite() transfers a block of up to mcount sectors of data
+ * to a drive as part of a disk multwrite operation.
+ */
+void ide_multwrite (ide_drive_t *drive, unsigned int mcount);
+
+#ifdef CONFIG_BLK_DEV_IDECD
+/*
+ * These are routines in ide-cd.c invoked from ide.c
+ */
+void ide_do_rw_cdrom (ide_drive_t *, unsigned long);
+int ide_cdrom_ioctl (ide_drive_t *, struct inode *, struct file *, unsigned int, unsigned long);
+int ide_cdrom_check_media_change (ide_drive_t *);
+int ide_cdrom_open (struct inode *, struct file *, ide_drive_t *);
+void ide_cdrom_release (struct inode *, struct file *, ide_drive_t *);
+void ide_cdrom_setup (ide_drive_t *);
+#endif /* CONFIG_BLK_DEV_IDECD */
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+
+/*
+ * Functions in ide-tape.c which are invoked from ide.c:
+ */
+
+/*
+ * idetape_identify_device is called during device probing stage to
+ * probe for an ide atapi tape drive and to initialize global variables
+ * in ide-tape.c which provide the link between the character device
+ * and the corresponding block device.
+ *
+ * Returns 1 if an ide tape was detected and is supported.
+ * Returns 0 otherwise.
+ */
+
+int idetape_identify_device (ide_drive_t *drive,struct hd_driveid *id);
+
+/*
+ * idetape_setup is called a bit later than idetape_identify_device,
+ * during the search for disk partitions, to initialize various tape
+ * state variables in ide_drive_t *drive.
+ */
+
+void idetape_setup (ide_drive_t *drive);
+
+/*
+ * idetape_do_request is our request function. It is called by ide.c
+ * to process a new request.
+ */
+
+void idetape_do_request (ide_drive_t *drive, struct request *rq, unsigned long block);
+
+/*
+ * idetape_end_request is used to finish servicing a request, and to
+ * insert a pending pipeline request into the main device queue.
+ */
+
+void idetape_end_request (byte uptodate, ide_hwgroup_t *hwgroup);
+
+/*
+ * Block device interface functions.
+ */
+
+int idetape_blkdev_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+int idetape_blkdev_open (struct inode *inode, struct file *filp, ide_drive_t *drive);
+void idetape_blkdev_release (struct inode *inode, struct file *filp, ide_drive_t *drive);
+
+/*
+ * idetape_register_chrdev initializes the character device interface to
+ * the ide tape drive.
+ */
+
+void idetape_register_chrdev (void);
+
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+int idefloppy_identify_device (ide_drive_t *drive,struct hd_driveid *id);
+void idefloppy_setup (ide_drive_t *drive);
+void idefloppy_do_request (ide_drive_t *drive, struct request *rq, unsigned long block);
+void idefloppy_end_request (byte uptodate, ide_hwgroup_t *hwgroup);
+int idefloppy_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+int idefloppy_open (struct inode *inode, struct file *filp, ide_drive_t *drive);
+void idefloppy_release (struct inode *inode, struct file *filp, ide_drive_t *drive);
+int idefloppy_media_change (ide_drive_t *drive);
+unsigned long idefloppy_capacity (ide_drive_t *drive);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+
+#ifdef CONFIG_BLK_DEV_IDESCSI
+void idescsi_setup (ide_drive_t *drive);
+void idescsi_do_request (ide_drive_t *drive, struct request *rq, unsigned long block);
+void idescsi_end_request (byte uptodate, ide_hwgroup_t *hwgroup);
+int idescsi_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
+int idescsi_open (struct inode *inode, struct file *filp, ide_drive_t *drive);
+void idescsi_ide_release (struct inode *inode, struct file *filp, ide_drive_t *drive);
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+
+#ifdef CONFIG_BLK_DEV_TRITON
+void ide_init_triton (byte, byte);
+void ide_init_promise (byte bus, byte fn, ide_hwif_t *hwif0, ide_hwif_t *hwif1, unsigned short dma);
+#endif /* CONFIG_BLK_DEV_TRITON */
diff --git a/linux/src/drivers/block/ide_modes.h b/linux/src/drivers/block/ide_modes.h
new file mode 100644
index 0000000..589fbfa
--- /dev/null
+++ b/linux/src/drivers/block/ide_modes.h
@@ -0,0 +1,226 @@
+#ifndef _IDE_MODES_H
+#define _IDE_MODES_H
+/*
+ * linux/drivers/block/ide_modes.h
+ *
+ * Copyright (C) 1996 Linus Torvalds, Igor Abramov, and Mark Lord
+ */
+
+#include <linux/config.h>
+
+/*
+ * Shared data/functions for determining best PIO mode for an IDE drive.
+ * Most of this stuff originally lived in cmd640.c, and changes to the
+ * ide_pio_blacklist[] table should be made with EXTREME CAUTION to avoid
+ * breaking the fragile cmd640.c support.
+ */
+
+#if defined(CONFIG_BLK_DEV_CMD640) || defined(CONFIG_IDE_CHIPSETS)
+
+/*
+ * Standard (generic) timings for PIO modes, from ATA2 specification.
+ * These timings are for access to the IDE data port register *only*.
+ * Some drives may specify a mode, while also specifying a different
+ * value for cycle_time (from drive identification data).
+ */
+typedef struct ide_pio_timings_s {
+ int setup_time; /* Address setup (ns) minimum */
+ int active_time; /* Active pulse (ns) minimum */
+ int cycle_time; /* Cycle time (ns) minimum = (setup + active + recovery) */
+} ide_pio_timings_t;
+
+typedef struct ide_pio_data_s {
+ byte pio_mode;
+ byte use_iordy;
+ byte overridden;
+ byte blacklisted;
+ unsigned int cycle_time;
+} ide_pio_data_t;
+
+#ifndef _IDE_C
+
+int ide_scan_pio_blacklist (char *model);
+byte ide_get_best_pio_mode (ide_drive_t *drive, byte mode_wanted, byte max_mode, ide_pio_data_t *d);
+extern const ide_pio_timings_t ide_pio_timings[6];
+
+#else /* _IDE_C */
+
+const ide_pio_timings_t ide_pio_timings[6] = {
+ { 70, 165, 600 }, /* PIO Mode 0 */
+ { 50, 125, 383 }, /* PIO Mode 1 */
+ { 30, 100, 240 }, /* PIO Mode 2 */
+ { 30, 80, 180 }, /* PIO Mode 3 with IORDY */
+ { 25, 70, 120 }, /* PIO Mode 4 with IORDY */
+ { 20, 50, 100 } /* PIO Mode 5 with IORDY (nonstandard) */
+};
+
+/*
+ * Black list. Some drives incorrectly report their maximal PIO mode,
+ * at least in respect to CMD640. Here we keep info on some known drives.
+ */
+static struct ide_pio_info {
+ const char *name;
+ int pio;
+} ide_pio_blacklist [] = {
+/* { "Conner Peripherals 1275MB - CFS1275A", 4 }, */
+ { "Conner Peripherals 540MB - CFS540A", 3 },
+
+ { "WDC AC2700", 3 },
+ { "WDC AC2540", 3 },
+ { "WDC AC2420", 3 },
+ { "WDC AC2340", 3 },
+ { "WDC AC2250", 0 },
+ { "WDC AC2200", 0 },
+ { "WDC AC21200", 4 },
+ { "WDC AC2120", 0 },
+ { "WDC AC2850", 3 },
+ { "WDC AC1270", 3 },
+ { "WDC AC1170", 1 },
+ { "WDC AC1210", 1 },
+ { "WDC AC280", 0 },
+/* { "WDC AC21000", 4 }, */
+ { "WDC AC31000", 3 },
+ { "WDC AC31200", 3 },
+/* { "WDC AC31600", 4 }, */
+
+ { "Maxtor 7131 AT", 1 },
+ { "Maxtor 7171 AT", 1 },
+ { "Maxtor 7213 AT", 1 },
+ { "Maxtor 7245 AT", 1 },
+ { "Maxtor 7345 AT", 1 },
+ { "Maxtor 7546 AT", 3 },
+ { "Maxtor 7540 AV", 3 },
+
+ { "SAMSUNG SHD-3121A", 1 },
+ { "SAMSUNG SHD-3122A", 1 },
+ { "SAMSUNG SHD-3172A", 1 },
+
+/* { "ST51080A", 4 },
+ * { "ST51270A", 4 },
+ * { "ST31220A", 4 },
+ * { "ST31640A", 4 },
+ * { "ST32140A", 4 },
+ * { "ST3780A", 4 },
+ */
+ { "ST5660A", 3 },
+ { "ST3660A", 3 },
+ { "ST3630A", 3 },
+ { "ST3655A", 3 },
+ { "ST3391A", 3 },
+ { "ST3390A", 1 },
+ { "ST3600A", 1 },
+ { "ST3290A", 0 },
+ { "ST3144A", 0 },
+
+ { "QUANTUM ELS127A", 0 },
+ { "QUANTUM ELS170A", 0 },
+ { "QUANTUM LPS240A", 0 },
+ { "QUANTUM LPS210A", 3 },
+ { "QUANTUM LPS270A", 3 },
+ { "QUANTUM LPS365A", 3 },
+ { "QUANTUM LPS540A", 3 },
+ { "QUANTUM LIGHTNING 540A", 3 },
+ { "QUANTUM LIGHTNING 730A", 3 },
+ { "QUANTUM FIREBALL", 3 }, /* For models 540/640/1080/1280 */
+ /* 1080A works fine in mode4 with triton */
+ { NULL, 0 }
+};
+
+/*
+ * This routine searches the ide_pio_blacklist for an entry
+ * matching the start/whole of the supplied model name.
+ *
+ * Returns -1 if no match found.
+ * Otherwise returns the recommended PIO mode from ide_pio_blacklist[].
+ */
+int ide_scan_pio_blacklist (char *model)
+{
+ struct ide_pio_info *p;
+
+ for (p = ide_pio_blacklist; p->name != NULL; p++) {
+ if (strncmp(p->name, model, strlen(p->name)) == 0)
+ return p->pio;
+ }
+ return -1;
+}
+
+/*
+ * This routine returns the recommended PIO settings for a given drive,
+ * based on the drive->id information and the ide_pio_blacklist[].
+ * This is used by most chipset support modules when "auto-tuning".
+ */
+
+/*
+ * Drive PIO mode auto selection
+ */
+byte ide_get_best_pio_mode (ide_drive_t *drive, byte mode_wanted, byte max_mode, ide_pio_data_t *d)
+{
+ int pio_mode;
+ int cycle_time = 0;
+ int use_iordy = 0;
+ struct hd_driveid* id = drive->id;
+ int overridden = 0;
+ int blacklisted = 0;
+
+ if (mode_wanted != 255) {
+ pio_mode = mode_wanted;
+ } else if (!drive->id) {
+ pio_mode = 0;
+ } else if ((pio_mode = ide_scan_pio_blacklist(id->model)) != -1) {
+ overridden = 1;
+ blacklisted = 1;
+ use_iordy = (pio_mode > 2);
+ } else {
+ pio_mode = id->tPIO;
+ if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */
+ pio_mode = 2;
+ overridden = 1;
+ }
+ if (id->field_valid & 2) { /* drive implements ATA2? */
+ if (id->capability & 8) { /* drive supports use_iordy? */
+ use_iordy = 1;
+ cycle_time = id->eide_pio_iordy;
+ if (id->eide_pio_modes & 7) {
+ overridden = 0;
+ if (id->eide_pio_modes & 4)
+ pio_mode = 5;
+ else if (id->eide_pio_modes & 2)
+ pio_mode = 4;
+ else
+ pio_mode = 3;
+ }
+ } else {
+ cycle_time = id->eide_pio;
+ }
+ }
+
+ /*
+ * Conservative "downgrade" for all pre-ATA2 drives
+ */
+ if (pio_mode && pio_mode < 4) {
+ pio_mode--;
+ overridden = 1;
+#if 0
+ use_iordy = (pio_mode > 2);
+#endif
+ if (cycle_time && cycle_time < ide_pio_timings[pio_mode].cycle_time)
+ cycle_time = 0; /* use standard timing */
+ }
+ }
+ if (pio_mode > max_mode) {
+ pio_mode = max_mode;
+ cycle_time = 0;
+ }
+ if (d) {
+ d->pio_mode = pio_mode;
+ d->cycle_time = cycle_time ? cycle_time : ide_pio_timings[pio_mode].cycle_time;
+ d->use_iordy = use_iordy;
+ d->overridden = overridden;
+ d->blacklisted = blacklisted;
+ }
+ return pio_mode;
+}
+
+#endif /* _IDE_C */
+#endif /* defined(CONFIG_BLK_DEV_CMD640) || defined(CONFIG_IDE_CHIPSETS) */
+#endif /* _IDE_MODES_H */
diff --git a/linux/src/drivers/block/rz1000.c b/linux/src/drivers/block/rz1000.c
new file mode 100644
index 0000000..41b26f2
--- /dev/null
+++ b/linux/src/drivers/block/rz1000.c
@@ -0,0 +1,59 @@
+/*
+ * linux/drivers/block/rz1000.c Version 0.03 Mar 20, 1996
+ *
+ * Copyright (C) 1995-1996 Linus Torvalds & author (see below)
+ */
+
+/*
+ * Principal Author/Maintainer: mlord@pobox.com (Mark Lord)
+ *
+ * This file provides support for disabling the buggy read-ahead
+ * mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
+ */
+
+#undef REALLY_SLOW_IO /* most systems can safely undef this */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <asm/io.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include "ide.h"
+
+static void ide_pci_access_error (int rc)
+{
+ printk("ide: pcibios access failed - %s\n", pcibios_strerror(rc));
+}
+
+void init_rz1000 (byte bus, byte fn)
+{
+ int rc;
+ unsigned short reg;
+
+ printk("ide0: buggy RZ1000 interface: ");
+ if ((rc = pcibios_read_config_word (bus, fn, PCI_COMMAND, &reg))) {
+ ide_pci_access_error (rc);
+ } else if (!(reg & 1)) {
+ printk("not enabled\n");
+ } else {
+ if ((rc = pcibios_read_config_word(bus, fn, 0x40, &reg))
+ || (rc = pcibios_write_config_word(bus, fn, 0x40, reg & 0xdfff)))
+ {
+ ide_hwifs[0].drives[0].no_unmask = 1;
+ ide_hwifs[0].drives[1].no_unmask = 1;
+ ide_hwifs[1].drives[0].no_unmask = 1;
+ ide_hwifs[1].drives[1].no_unmask = 1;
+ ide_hwifs[0].serialized = 1;
+ ide_hwifs[1].serialized = 1;
+ ide_pci_access_error (rc);
+ printk("serialized, disabled unmasking\n");
+ } else
+ printk("disabled read-ahead\n");
+ }
+}
diff --git a/linux/src/drivers/block/triton.c b/linux/src/drivers/block/triton.c
new file mode 100644
index 0000000..f4633d2
--- /dev/null
+++ b/linux/src/drivers/block/triton.c
@@ -0,0 +1,996 @@
+/*
+ * linux/drivers/block/triton.c Version 1.13 Aug 12, 1996
+ * Version 1.13a June 1998 - new chipsets
+ * Version 1.13b July 1998 - DMA blacklist
+ * Version 1.14 June 22, 1999
+ *
+ * Copyright (c) 1998-1999 Andre Hedrick
+ * Copyright (c) 1995-1996 Mark Lord
+ * May be copied or modified under the terms of the GNU General Public License
+ */
+
+/*
+ * This module provides support for Bus Master IDE DMA functions in various
+ * motherboard chipsets and PCI controller cards.
+ * Please check /Documentation/ide.txt and /Documentation/udma.txt for details.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+#include "ide.h"
+
+#undef DISPLAY_TRITON_TIMINGS /* define this to display timings */
+#undef DISPLAY_APOLLO_TIMINGS /* define this for extensive debugging information */
+#undef DISPLAY_ALI15X3_TIMINGS /* define this for extensive debugging information */
+
+#if defined(CONFIG_PROC_FS)
+#include <linux/stat.h>
+#include <linux/proc_fs.h>
+#ifdef DISPLAY_APOLLO_TIMINGS
+#include <linux/via_ide_dma.h>
+#endif
+#ifdef DISPLAY_ALI15X3_TIMINGS
+#include <linux/ali_ide_dma.h>
+#endif
+#endif
+
+/*
+ * good_dma_drives() lists the model names (from "hdparm -i")
+ * of drives which do not support mword2 DMA but which are
+ * known to work fine with this interface under Linux.
+ */
+const char *good_dma_drives[] = {"Micropolis 2112A",
+ "CONNER CTMA 4000",
+ "CONNER CTT8000-A",
+ "QEMU HARDDISK",
+ NULL};
+
+/*
+ * bad_dma_drives() lists the model names (from "hdparm -i")
+ * of drives which supposedly support (U)DMA but which are
+ * known to corrupt data with this interface under Linux.
+ *
+ * Note: the list was generated by statistical analysis of problem
+ * reports. It's not clear if there are problems with the drives,
+ * or with some combination of drive/controller or what.
+ *
+ * You can forcibly override this if you wish. This is the kernel
+ * 'Tread carefully' list.
+ *
+ * Finally see http://www.wdc.com/quality/err-rec.html if you have
+ * one of the listed drives.
+ */
+const char *bad_dma_drives[] = {"WDC AC11000H",
+ "WDC AC22100H",
+ "WDC AC32500H",
+ "WDC AC33100H",
+ NULL};
+
+/*
+ * Our Physical Region Descriptor (PRD) table should be large enough
+ * to handle the biggest I/O request we are likely to see. Since requests
+ * can have no more than 256 sectors, and since the typical blocksize is
+ * two sectors, we could get by with a limit of 128 entries here for the
+ * usual worst case. Most requests seem to include some contiguous blocks,
+ * further reducing the number of table entries required.
+ *
+ * The driver reverts to PIO mode for individual requests that exceed
+ * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
+ * 100% of all crazy scenarios here is not necessary.
+ *
+ * As it turns out though, we must allocate a full 4KB page for this,
+ * so the two PRD tables (ide0 & ide1) will each get half of that,
+ * allowing each to have about 256 entries (8 bytes each) from this.
+ */
+#define PRD_BYTES 8
+#define PRD_ENTRIES (PAGE_SIZE / (2 * PRD_BYTES))
+#define DEFAULT_BMIBA 0xe800 /* in case BIOS did not init it */
+#define DEFAULT_BMCRBA 0xcc00 /* VIA's default value */
+#define DEFAULT_BMALIBA 0xd400 /* ALI's default value */
+
+/*
+ * dma_intr() is the handler for disk read/write DMA interrupts
+ */
+static void dma_intr (ide_drive_t *drive)
+{
+ byte stat, dma_stat;
+ int i;
+ struct request *rq = HWGROUP(drive)->rq;
+ unsigned short dma_base = HWIF(drive)->dma_base;
+
+ dma_stat = inb(dma_base+2); /* get DMA status */
+ outb(inb(dma_base)&~1, dma_base); /* stop DMA operation */
+ stat = GET_STAT(); /* get drive status */
+ if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
+ if ((dma_stat & 7) == 4) { /* verify good DMA status */
+ rq = HWGROUP(drive)->rq;
+ for (i = rq->nr_sectors; i > 0;) {
+ i -= rq->current_nr_sectors;
+ ide_end_request(1, HWGROUP(drive));
+ }
+ return;
+ }
+ printk("%s: bad DMA status: 0x%02x\n", drive->name, dma_stat);
+ }
+ sti();
+ ide_error(drive, "dma_intr", stat);
+}
+
+/*
+ * build_dmatable() prepares a dma request.
+ * Returns 0 if all went okay, returns 1 otherwise.
+ */
+static int build_dmatable (ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ struct buffer_head *bh = rq->bh;
+ unsigned long size, addr, *table = HWIF(drive)->dmatable;
+ unsigned int count = 0;
+
+ do {
+ /*
+ * Determine addr and size of next buffer area. We assume that
+ * individual virtual buffers are always composed linearly in
+ * physical memory. For example, we assume that any 8kB buffer
+ * is always composed of two adjacent physical 4kB pages rather
+ * than two possibly non-adjacent physical 4kB pages.
+ */
+ if (bh == NULL) { /* paging and tape requests have (rq->bh == NULL) */
+ addr = virt_to_bus (rq->buffer);
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ size = drive->tape.pc->request_transfer;
+ else
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ size = rq->nr_sectors << 9;
+ } else {
+ /* group sequential buffers into one large buffer */
+ addr = virt_to_bus (bh->b_data);
+ size = bh->b_size;
+ while ((bh = bh->b_reqnext) != NULL) {
+ if ((addr + size) != virt_to_bus (bh->b_data))
+ break;
+ size += bh->b_size;
+ }
+ }
+
+ /*
+ * Fill in the dma table, without crossing any 64kB boundaries.
+ * We assume 16-bit alignment of all blocks.
+ */
+ while (size) {
+ if (++count >= PRD_ENTRIES) {
+ printk("%s: DMA table too small\n", drive->name);
+ return 1; /* revert to PIO for this request */
+ } else {
+ unsigned long bcount = 0x10000 - (addr & 0xffff);
+ if (bcount > size)
+ bcount = size;
+ *table++ = addr;
+ *table++ = bcount & 0xffff;
+ addr += bcount;
+ size -= bcount;
+ }
+ }
+ } while (bh != NULL);
+ if (count) {
+ *--table |= 0x80000000; /* set End-Of-Table (EOT) bit */
+ return 0;
+ }
+ printk("%s: empty DMA table?\n", drive->name);
+ return 1; /* let the PIO routines handle this weirdness */
+}
+
+/*
+ * We will only enable drives with multi-word (mode2) (U)DMA capabilities,
+ * and ignore the very rare cases of drives that can only do single-word
+ * (modes 0 & 1) (U)DMA transfers. We also discard "blacklisted" hard disks.
+ */
+static int config_drive_for_dma (ide_drive_t *drive)
+{
+#ifndef CONFIG_BLK_DEV_FORCE_DMA
+ const char **list;
+ struct hd_driveid *id = drive->id;
+#endif
+
+#ifdef CONFIG_BLK_DEV_FORCE_DMA
+ drive->using_dma = 1;
+ return 0;
+#else
+ if (HWIF(drive)->chipset == ide_hpt343) {
+ drive->using_dma = 0; /* no DMA */
+ return 1; /* DMA disabled */
+ }
+
+ if (id && (id->capability & 1)) {
+ /* Consult the list of known "bad" drives */
+ list = bad_dma_drives;
+ while (*list) {
+ if (!strcmp(*list++,id->model)) {
+ drive->using_dma = 0; /* no DMA */
+ printk("ide: Disabling DMA modes on %s drive (%s).\n", drive->name, id->model);
+ return 1; /* DMA disabled */
+ }
+ }
+
+ if (!strcmp("QEMU HARDDISK", id->model)) {
+ /* Virtual disks don't have issues with DMA :) */
+ drive->using_dma = 1;
+ /* And keep enabled even if some requests time out due to emulation lag. */
+ drive->keep_settings = 1;
+ return 1; /* DMA enabled */
+ }
+ /* Enable DMA on any drive that has mode 4 or 2 UltraDMA enabled */
+ if (id->field_valid & 4) { /* UltraDMA */
+ /* Enable DMA on any drive that has mode 4 UltraDMA enabled */
+ if (((id->dma_ultra & 0x1010) == 0x1010) &&
+ (id->word93 & 0x2000) &&
+ (HWIF(drive)->chipset == ide_ultra66)) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
+ } else
+ /* Enable DMA on any drive that has mode 2 UltraDMA enabled */
+ if ((id->dma_ultra & 0x404) == 0x404) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
+ }
+ }
+ /* Enable DMA on any drive that has mode2 DMA enabled */
+ if (id->field_valid & 2) /* regular DMA */
+ if ((id->dma_mword & 0x404) == 0x404) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
+ }
+ /* Consult the list of known "good" drives */
+ list = good_dma_drives;
+ while (*list) {
+ if (!strcmp(*list++,id->model)) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
+ }
+ }
+ }
+ return 1; /* DMA not enabled */
+#endif
+}
+
+/*
+ * triton_dmaproc() initiates/aborts DMA read/write operations on a drive.
+ *
+ * The caller is assumed to have selected the drive and programmed the drive's
+ * sector address using CHS or LBA. All that remains is to prepare for DMA
+ * and then issue the actual read/write DMA/PIO command to the drive.
+ *
+ * For ATAPI devices, we just prepare for DMA and return. The caller should
+ * then issue the packet command to the drive and call us again with
+ * ide_dma_begin afterwards.
+ *
+ * Returns 0 if all went well.
+ * Returns 1 if DMA read/write could not be started, in which case
+ * the caller should revert to PIO for the current request.
+ */
+static int triton_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
+{
+ unsigned long dma_base = HWIF(drive)->dma_base;
+ unsigned int reading = (1 << 3);
+
+ switch (func) {
+ case ide_dma_abort:
+ outb(inb(dma_base)&~1, dma_base); /* stop DMA */
+ return 0;
+ case ide_dma_check:
+ return config_drive_for_dma (drive);
+ case ide_dma_write:
+ reading = 0;
+ case ide_dma_read:
+ break;
+ case ide_dma_status_bad:
+ return ((inb(dma_base+2) & 7) != 4); /* verify good DMA status */
+ case ide_dma_transferred:
+#if 0
+ return (number of bytes actually transferred);
+#else
+ return (0);
+#endif
+ case ide_dma_begin:
+ outb(inb(dma_base)|1, dma_base); /* begin DMA */
+ return 0;
+ default:
+ printk("triton_dmaproc: unsupported func: %d\n", func);
+ return 1;
+ }
+ if (build_dmatable (drive))
+ return 1;
+ outl(virt_to_bus (HWIF(drive)->dmatable), dma_base + 4); /* PRD table */
+ outb(reading, dma_base); /* specify r/w */
+ outb(inb(dma_base+2)|0x06, dma_base+2); /* clear status bits */
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ if (drive->media != ide_disk)
+ return 0;
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+ ide_set_handler(drive, &dma_intr, WAIT_CMD); /* issue cmd to drive */
+ OUT_BYTE(reading ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
+ outb(inb(dma_base)|1, dma_base); /* begin DMA */
+ return 0;
+}
+
+#ifdef DISPLAY_TRITON_TIMINGS
+/*
+ * print_triton_drive_flags() displays the currently programmed options
+ * in the i82371 (Triton) for a given drive.
+ *
+ * If fastDMA is "no", then slow ISA timings are used for DMA data xfers.
+ * If fastPIO is "no", then slow ISA timings are used for PIO data xfers.
+ * If IORDY is "no", then IORDY is assumed to always be asserted.
+ * If PreFetch is "no", then data pre-fetch/post are not used.
+ *
+ * When "fastPIO" and/or "fastDMA" are "yes", then faster PCI timings and
+ * back-to-back 16-bit data transfers are enabled, using the sample_CLKs
+ * and recovery_CLKs (PCI clock cycles) timing parameters for that interface.
+ */
+static void print_triton_drive_flags (unsigned int unit, byte flags)
+{
+ printk(" %s ", unit ? "slave :" : "master:");
+ printk( "fastDMA=%s", (flags&9) ? "on " : "off");
+ printk(" PreFetch=%s", (flags&4) ? "on " : "off");
+ printk(" IORDY=%s", (flags&2) ? "on " : "off");
+ printk(" fastPIO=%s\n", ((flags&9)==1) ? "on " : "off");
+}
+#endif /* DISPLAY_TRITON_TIMINGS */
+
+static void init_triton_dma (ide_hwif_t *hwif, unsigned short base)
+{
+ static unsigned long dmatable = 0;
+
+ printk(" %s: BM-DMA at 0x%04x-0x%04x", hwif->name, base, base+7);
+ if (check_region(base, 8)) {
+ printk(" -- ERROR, PORTS ALREADY IN USE");
+ } else {
+ request_region(base, 8, "IDE DMA");
+ hwif->dma_base = base;
+ if (!dmatable) {
+ /*
+ * The BM-DMA uses a full 32-bits, so we can
+ * safely use __get_free_page() here instead
+ * of __get_dma_pages() -- no ISA limitations.
+ */
+ dmatable = __get_free_pages(GFP_KERNEL, 1, 0);
+ }
+ if (dmatable) {
+ hwif->dmatable = (unsigned long *) dmatable;
+ dmatable += (PRD_ENTRIES * PRD_BYTES);
+ outl(virt_to_bus(hwif->dmatable), base + 4);
+ hwif->dmaproc = &triton_dmaproc;
+ }
+ }
+ printk("\n");
+}
+
+/*
+ * Set VIA Chipset Timings for (U)DMA modes enabled.
+ */
+static int set_via_timings (byte bus, byte fn, byte post, byte flush)
+{
+ byte via_config = 0;
+ int rc = 0;
+
+ /* setting IDE read prefetch buffer and IDE post write buffer */
+ if ((rc = pcibios_read_config_byte(bus, fn, 0x41, &via_config)))
+ return (1);
+ if ((rc = pcibios_write_config_byte(bus, fn, 0x41, via_config | post)))
+ return (1);
+
+ /* setting Channel read and End-of-sector FIFO flush: */
+ if ((rc = pcibios_read_config_byte(bus, fn, 0x46, &via_config)))
+ return (1);
+ if ((rc = pcibios_write_config_byte(bus, fn, 0x46, via_config | flush)))
+ return (1);
+
+ return (0);
+}
+
+static int setup_aladdin (byte bus, byte fn)
+{
+ byte confreg0 = 0, confreg1 = 0, progif = 0;
+ int errors = 0;
+
+ if (pcibios_read_config_byte(bus, fn, 0x50, &confreg1))
+ goto veryspecialsettingserror;
+ if (!(confreg1 & 0x02))
+ if (pcibios_write_config_byte(bus, fn, 0x50, confreg1 | 0x02))
+ goto veryspecialsettingserror;
+
+ if (pcibios_read_config_byte(bus, fn, 0x09, &progif))
+ goto veryspecialsettingserror;
+ if (!(progif & 0x40)) {
+ /*
+ * The way to enable them is to set progif
+ * writable at 0x4Dh register, and set bit 6
+ * of progif to 1:
+ */
+ if (pcibios_read_config_byte(bus, fn, 0x4d, &confreg0))
+ goto veryspecialsettingserror;
+ if (confreg0 & 0x80)
+ if (pcibios_write_config_byte(bus, fn, 0x4d, confreg0 & ~0x80))
+ goto veryspecialsettingserror;
+ if (pcibios_write_config_byte(bus, fn, 0x09, progif | 0x40))
+ goto veryspecialsettingserror;
+ if (confreg0 & 0x80)
+ if (pcibios_write_config_byte(bus, fn, 0x4d, confreg0))
+ errors++;
+ }
+
+ if ((pcibios_read_config_byte(bus, fn, 0x09, &progif)) || (!(progif & 0x40)))
+ goto veryspecialsettingserror;
+
+ printk("ide: ALI15X3: enabled read of IDE channels state (en/dis-abled) %s.\n",
+ errors ? "with Error(s)" : "Succeeded" );
+ return 1;
+veryspecialsettingserror:
+ printk("ide: ALI15X3: impossible to enable read of IDE channels state (en/dis-abled)!\n");
+ return 0;
+}
+
+void set_promise_hpt343_extra (unsigned short device, unsigned int bmiba)
+{
+ switch(device) {
+ case PCI_DEVICE_ID_PROMISE_20246:
+ if(!check_region((bmiba+16), 16))
+ request_region((bmiba+16), 16, "PDC20246");
+ break;
+ case PCI_DEVICE_ID_PROMISE_20262:
+ if (!check_region((bmiba+48), 48))
+ request_region((bmiba+48), 48, "PDC20262");
+ break;
+ case PCI_DEVICE_ID_TTI_HPT343:
+ if(!check_region((bmiba+16), 16))
+ request_region((bmiba+16), 16, "HPT343");
+ break;
+ default:
+ break;
+ }
+}
+
+#define HPT343_PCI_INIT_REG 0x80
+
+/*
+ * ide_init_triton() prepares the IDE driver for DMA operation.
+ * This routine is called once, from ide.c during driver initialization,
+ * for each BM-DMA chipset which is found (rarely more than one).
+ */
+void ide_init_triton (byte bus, byte fn)
+{
+ byte bridgebus, bridgefn, bridgeset = 0, hpt34x_flag = 0;
+ unsigned char irq = 0;
+ int dma_enabled = 0, rc = 0, h;
+ unsigned short io[6], count = 0, step_count = 0, pass_count = 0;
+ unsigned short pcicmd, vendor, device, class;
+ unsigned int bmiba, timings, reg, tmp;
+ unsigned int addressbios = 0;
+ unsigned long flags;
+ unsigned index;
+
+#if defined(DISPLAY_APOLLO_TIMINGS) || defined(DISPLAY_ALI15X3_TIMINGS)
+ bmide_bus = bus;
+ bmide_fn = fn;
+#endif /* DISPLAY_APOLLO_TIMINGS || DISPLAY_ALI15X3_TIMINGS */
+
+/*
+ * We pick up the vendor, device, and class info for selecting the correct
+ * controller that is supported. Since we can access this routine more than
+ * once with the use of onboard and off-board EIDE controllers, a method
+ * of determining "who is who for what" is needed.
+ */
+
+ pcibios_read_config_word (bus, fn, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word (bus, fn, PCI_DEVICE_ID, &device);
+ pcibios_read_config_word (bus, fn, PCI_CLASS_DEVICE, &class);
+ pcibios_read_config_byte (bus, fn, PCI_INTERRUPT_LINE, &irq);
+
+ switch(vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ printk("ide: Intel 82371 ");
+ switch(device) {
+ case PCI_DEVICE_ID_INTEL_82371_0:
+ printk("PIIX (single FIFO) ");
+ break;
+ case PCI_DEVICE_ID_INTEL_82371SB_1:
+ printk("PIIX3 (dual FIFO) ");
+ break;
+ case PCI_DEVICE_ID_INTEL_82371AB:
+ printk("PIIX4 (dual FIFO) ");
+ break;
+ default:
+ printk(" (unknown) 0x%04x ", device);
+ break;
+ }
+ printk("DMA Bus Mastering IDE ");
+ break;
+ case PCI_VENDOR_ID_SI:
+ printk("ide: SiS 5513 (dual FIFO) DMA Bus Mastering IDE ");
+ break;
+ case PCI_VENDOR_ID_VIA:
+ printk("ide: VIA VT82C586B (split FIFO) UDMA Bus Mastering IDE ");
+ break;
+ case PCI_VENDOR_ID_TTI:
+ /*PCI_CLASS_STORAGE_UNKNOWN == class */
+ if (device == PCI_DEVICE_ID_TTI_HPT343) {
+ pcibios_write_config_byte(bus, fn, HPT343_PCI_INIT_REG, 0x00);
+ pcibios_read_config_word(bus, fn, PCI_COMMAND, &pcicmd);
+ hpt34x_flag = (pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0;
+#if 1
+ if (!hpt34x_flag) {
+ save_flags(flags);
+ cli();
+ pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd & ~PCI_COMMAND_IO);
+ pcibios_read_config_dword(bus, fn, PCI_BASE_ADDRESS_4, &bmiba);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_0, bmiba | 0x20);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_1, bmiba | 0x34);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_2, bmiba | 0x28);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_3, bmiba | 0x3c);
+ pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd);
+ bmiba = 0;
+ restore_flags(flags);
+ }
+#endif
+ pcibios_write_config_byte(bus, fn, PCI_LATENCY_TIMER, 0x20);
+ goto hpt343_jump_in;
+ } else {
+ printk("ide: HPTXXX did == 0x%04X unsupport chipset error.\n", device);
+ return;
+ }
+ case PCI_VENDOR_ID_PROMISE:
+ /*
+ * I have been able to make my Promise Ultra33 UDMA card change class.
+ * It has reported as both PCI_CLASS_STORAGE_RAID and PCI_CLASS_STORAGE_IDE.
+ * Since the PCI_CLASS_STORAGE_RAID mode should automatically mirror the
+ * two halves of the PCI_CONFIG register data, but sometimes it forgets.
+ * Thus we guarantee that they are identical, with a quick check and
+ * correction if needed.
+ * PDC20246 (primary) PDC20247 (secondary) IDE hwif's.
+ *
+ * PDC20262 Promise Ultra66 UDMA.
+ *
+ * Note that Promise "stories,fibs,..." about this device not being
+ * capable of ATAPI and AT devices.
+ */
+ if (class != PCI_CLASS_STORAGE_IDE) {
+ unsigned char irq_mirror = 0;
+
+ pcibios_read_config_byte(bus, fn, (PCI_INTERRUPT_LINE)|0x80, &irq_mirror);
+ if (irq != irq_mirror) {
+ pcibios_write_config_byte(bus, fn, (PCI_INTERRUPT_LINE)|0x80, irq);
+ }
+ }
+ case PCI_VENDOR_ID_ARTOP:
+ /* PCI_CLASS_STORAGE_SCSI == class */
+ /*
+ * I have found that by stroking rom_enable_bit on both the AEC6210U/UF and
+ * PDC20246 controller cards, the features desired are almost guaranteed
+ * to be enabled and compatible. This ROM may not be registered in the
+ * config data, but it can be turned on. Registration failure has only
+ * been observed if and only if Linux sets up the pci_io_address in the
+ * 0x6000 range. If they are setup in the 0xef00 range it is reported.
+ * WHY??? got me.........
+ */
+hpt343_jump_in:
+ printk("ide: %s UDMA Bus Mastering ",
+ (device == PCI_DEVICE_ID_ARTOP_ATP850UF) ? "AEC6210" :
+ (device == PCI_DEVICE_ID_PROMISE_20246) ? "PDC20246" :
+ (device == PCI_DEVICE_ID_PROMISE_20262) ? "PDC20262" :
+ (hpt34x_flag && (device == PCI_DEVICE_ID_TTI_HPT343)) ? "HPT345" :
+ (device == PCI_DEVICE_ID_TTI_HPT343) ? "HPT343" : "UNKNOWN");
+ pcibios_read_config_dword(bus, fn, PCI_ROM_ADDRESS, &addressbios);
+ if (addressbios) {
+ pcibios_write_config_byte(bus, fn, PCI_ROM_ADDRESS, addressbios | PCI_ROM_ADDRESS_ENABLE);
+ printk("with ROM enabled at 0x%08x", addressbios);
+ }
+ /*
+ * This was stripped out of 2.1.XXX kernel code and parts from a patch called
+ * promise_update. This finds the PCI_BASE_ADDRESS spaces and makes them
+ * available for configuration later.
+ * PCI_BASE_ADDRESS_0 hwif0->io_base
+ * PCI_BASE_ADDRESS_1 hwif0->ctl_port
+ * PCI_BASE_ADDRESS_2 hwif1->io_base
+ * PCI_BASE_ADDRESS_3 hwif1->ctl_port
+ * PCI_BASE_ADDRESS_4 bmiba
+ */
+ memset(io, 0, 6 * sizeof(unsigned short));
+ for (reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg += 4) {
+ pcibios_read_config_dword(bus, fn, reg, &tmp);
+ if (tmp & PCI_BASE_ADDRESS_SPACE_IO)
+ io[count++] = tmp & PCI_BASE_ADDRESS_IO_MASK;
+ }
+ break;
+ case PCI_VENDOR_ID_AL:
+ save_flags(flags);
+ cli();
+ for (index = 0; !pcibios_find_device (PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, index, &bridgebus, &bridgefn); ++index) {
+ bridgeset = setup_aladdin(bus, fn);
+ }
+ restore_flags(flags);
+ printk("ide: ALI15X3 (dual FIFO) DMA Bus Mastering IDE ");
+ break;
+ default:
+ return;
+ }
+
+ printk("\n Controller on PCI bus %d function %d\n", bus, fn);
+
+ /*
+ * See if IDE and BM-DMA features are enabled:
+ */
+ if ((rc = pcibios_read_config_word(bus, fn, PCI_COMMAND, &pcicmd)))
+ goto quit;
+ if ((pcicmd & 1) == 0) {
+ printk("ide: ports are not enabled (BIOS)\n");
+ goto quit;
+ }
+ if ((pcicmd & 4) == 0) {
+ printk("ide: BM-DMA feature is not enabled (BIOS), enabling\n");
+ pcicmd |= 4;
+ pcibios_write_config_word(bus, fn, 0x04, pcicmd);
+ if ((rc = pcibios_read_config_word(bus, fn, 0x04, &pcicmd))) {
+ printk("ide: Couldn't read back PCI command\n");
+ goto quit;
+ }
+ }
+
+ if ((pcicmd & 4) == 0) {
+ printk("ide: BM-DMA feature couldn't be enabled\n");
+ } else {
+ /*
+ * Get the bmiba base address
+ */
+ int try_again = 1;
+ do {
+ if ((rc = pcibios_read_config_dword(bus, fn, PCI_BASE_ADDRESS_4, &bmiba)))
+ goto quit;
+ bmiba &= 0xfff0; /* extract port base address */
+ if (bmiba) {
+ dma_enabled = 1;
+ break;
+ } else {
+ printk("ide: BM-DMA base register is invalid (0x%04x, PnP BIOS problem)\n", bmiba);
+ if (inb(((vendor == PCI_VENDOR_ID_AL) ? DEFAULT_BMALIBA :
+ (vendor == PCI_VENDOR_ID_VIA) ? DEFAULT_BMCRBA :
+ DEFAULT_BMIBA)) != 0xff || !try_again)
+ break;
+ printk("ide: setting BM-DMA base register to 0x%04x\n",
+ ((vendor == PCI_VENDOR_ID_AL) ? DEFAULT_BMALIBA :
+ (vendor == PCI_VENDOR_ID_VIA) ? DEFAULT_BMCRBA :
+ DEFAULT_BMIBA));
+ if ((rc = pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd&~1)))
+ goto quit;
+ rc = pcibios_write_config_dword(bus, fn, 0x20,
+ ((vendor == PCI_VENDOR_ID_AL) ? DEFAULT_BMALIBA :
+ (vendor == PCI_VENDOR_ID_VIA) ? DEFAULT_BMCRBA :
+ DEFAULT_BMIBA)|1);
+ if (pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd|5) || rc)
+ goto quit;
+ }
+ } while (try_again--);
+ }
+
+ /*
+ * See if ide port(s) are enabled
+ */
+ if ((rc = pcibios_read_config_dword(bus, fn,
+ (vendor == PCI_VENDOR_ID_PROMISE) ? 0x50 :
+ (vendor == PCI_VENDOR_ID_ARTOP) ? 0x54 :
+ (vendor == PCI_VENDOR_ID_SI) ? 0x48 :
+ (vendor == PCI_VENDOR_ID_AL) ? 0x08 :
+ 0x40, &timings)))
+ goto quit;
+ /*
+ * We do a vendor check since the Ultra33/66 and AEC6210
+ * holds their timings in a different location.
+ */
+#if 0
+ printk("ide: timings == %08x\n", timings);
+#endif
+ /*
+ * The switch preserves some stuff that was original.
+ */
+ switch(vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ if (!(timings & 0x80008000)) {
+ printk("ide: INTEL: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_VIA:
+ if(!(timings & 0x03)) {
+ printk("ide: VIA: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_AL:
+ timings <<= 16;
+ timings >>= 24;
+ if (!(timings & 0x30)) {
+ printk("ide: ALI15X3: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_SI:
+ timings <<= 8;
+ timings >>= 24;
+ if (!(timings & 0x06)) {
+ printk("ide: SIS5513: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_PROMISE:
+ printk(" (U)DMA Burst Bit %sABLED " \
+ "Primary %s Mode " \
+ "Secondary %s Mode.\n",
+ (inb(bmiba + 0x001f) & 1) ? "EN" : "DIS",
+ (inb(bmiba + 0x001a) & 1) ? "MASTER" : "PCI",
+ (inb(bmiba + 0x001b) & 1) ? "MASTER" : "PCI" );
+#if 0
+ if (!(inb(bmiba + 0x001f) & 1)) {
+ outb(inb(bmiba + 0x001f)|0x01, (bmiba + 0x001f));
+ printk(" (U)DMA Burst Bit Forced %sABLED.\n",
+ (inb(bmiba + 0x001f) & 1) ? "EN" : "DIS");
+ }
+#endif
+ break;
+ case PCI_VENDOR_ID_ARTOP:
+ case PCI_VENDOR_ID_TTI:
+ default:
+ break;
+ }
+
+ /*
+ * Save the dma_base port addr for each interface
+ */
+ for (h = 0; h < MAX_HWIFS; ++h) {
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ byte channel = ((h == 1) || (h == 3) || (h == 5)) ? 1 : 0;
+
+ /*
+ * This prevents the first contoller from accidentally
+ * initalizing the hwif's that it does not use and block
+ * an off-board ide-pci from getting in the game.
+ */
+ if ((step_count >= 2) || (pass_count >= 2)) {
+ goto quit;
+ }
+
+#if 0
+ if (hwif->chipset == ide_unknown)
+ printk("ide: index == %d channel(%d)\n", h, channel);
+#endif
+
+#ifdef CONFIG_BLK_DEV_OFFBOARD
+ /*
+ * This is a forced override for the onboard ide controller
+ * to be enabled, if one chooses to have an offboard ide-pci
+ * card as the primary booting device. This beasty is
+ * for offboard UDMA upgrades with hard disks, but saving
+ * the onboard DMA2 controllers for CDROMS, TAPES, ZIPS, etc...
+ */
+ if (((vendor == PCI_VENDOR_ID_INTEL) ||
+ (vendor == PCI_VENDOR_ID_SI) ||
+ (vendor == PCI_VENDOR_ID_VIA) ||
+ (vendor == PCI_VENDOR_ID_AL)) && (h >= 2)) {
+ hwif->io_base = channel ? 0x170 : 0x1f0;
+ hwif->ctl_port = channel ? 0x376 : 0x3f6;
+ hwif->irq = channel ? 15 : 14;
+ hwif->noprobe = 0;
+ }
+#endif /* CONFIG_BLK_DEV_OFFBOARD */
+ /*
+ * If the chipset is listed as "ide_unknown", lets get a
+ * hwif while they last. This does the first check on
+ * the current availability of the ide_hwifs[h] in question.
+ */
+ if (hwif->chipset != ide_unknown) {
+ continue;
+ } else if (vendor == PCI_VENDOR_ID_INTEL) {
+ unsigned short time;
+#ifdef DISPLAY_TRITON_TIMINGS
+ byte s_clks, r_clks;
+ unsigned short devid;
+#endif /* DISPLAY_TRITON_TIMINGS */
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ time = timings & 0xffff;
+ if ((time & 0x8000) == 0) /* interface enabled? */
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ time = timings >> 16;
+ if ((time & 0x8000) == 0) /* interface enabled? */
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ step_count++;
+ } else {
+ continue;
+ }
+#ifdef DISPLAY_TRITON_TIMINGS
+ s_clks = ((~time >> 12) & 3) + 2;
+ r_clks = ((~time >> 8) & 3) + 1;
+ printk(" %s timing: (0x%04x) sample_CLKs=%d, recovery_CLKs=%d\n",
+ hwif->name, time, s_clks, r_clks);
+ if ((time & 0x40) && !pcibios_read_config_word(bus, fn, PCI_DEVICE_ID, &devid)
+ && devid == PCI_DEVICE_ID_INTEL_82371SB_1) {
+ byte stime;
+ if (pcibios_read_config_byte(bus, fn, 0x44, &stime)) {
+ if (hwif->io_base == 0x1f0) {
+ s_clks = ~stime >> 6;
+ r_clks = ~stime >> 4;
+ } else {
+ s_clks = ~stime >> 2;
+ r_clks = ~stime;
+ }
+ s_clks = (s_clks & 3) + 2;
+ r_clks = (r_clks & 3) + 1;
+ printk(" slave: sample_CLKs=%d, recovery_CLKs=%d\n",
+ s_clks, r_clks);
+ }
+ }
+ print_triton_drive_flags (0, time & 0xf);
+ print_triton_drive_flags (1, (time >> 4) & 0xf);
+#endif /* DISPLAY_TRITON_TIMINGS */
+ } else if (vendor == PCI_VENDOR_ID_SI) {
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ if ((timings & 0x02) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ if ((timings & 0x04) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ step_count++;
+ } else {
+ continue;
+ }
+ } else if (vendor == PCI_VENDOR_ID_VIA) {
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ if ((timings & 0x02) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ if (set_via_timings(bus, fn, 0xc0, 0xa0))
+ goto quit;
+#ifdef DISPLAY_APOLLO_TIMINGS
+ proc_register_dynamic(&proc_root, &via_proc_entry);
+#endif /* DISPLAY_APOLLO_TIMINGS */
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ if ((timings & 0x01) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ if (set_via_timings(bus, fn, 0x30, 0x50))
+ goto quit;
+ step_count++;
+ } else {
+ continue;
+ }
+ } else if (vendor == PCI_VENDOR_ID_AL) {
+ byte ideic, inmir;
+ byte irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
+ 1, 11, 0, 12, 0, 14, 0, 15 };
+
+ if (bridgeset) {
+ pcibios_read_config_byte(bridgebus, bridgefn, 0x58, &ideic);
+ ideic = ideic & 0x03;
+ if ((channel && ideic == 0x03) || (!channel && !ideic)) {
+ pcibios_read_config_byte(bridgebus, bridgefn, 0x44, &inmir);
+ inmir = inmir & 0x0f;
+ hwif->irq = irq_routing_table[inmir];
+ } else if (channel && !(ideic & 0x01)) {
+ pcibios_read_config_byte(bridgebus, bridgefn, 0x75, &inmir);
+ inmir = inmir & 0x0f;
+ hwif->irq = irq_routing_table[inmir];
+ }
+ }
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ if ((timings & 0x20) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ outb(inb(bmiba+2) & 0x60, bmiba+2);
+ if (inb(bmiba+2) & 0x80)
+ printk("ALI15X3: simplex device: DMA forced\n");
+#ifdef DISPLAY_ALI15X3_TIMINGS
+ proc_register_dynamic(&proc_root, &ali_proc_entry);
+#endif /* DISPLAY_ALI15X3_TIMINGS */
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ if ((timings & 0x10) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ outb(inb(bmiba+10) & 0x60, bmiba+10);
+ if (inb(bmiba+10) & 0x80)
+ printk("ALI15X3: simplex device: DMA forced\n");
+ step_count++;
+ } else {
+ continue;
+ }
+ } else if ((vendor == PCI_VENDOR_ID_PROMISE) ||
+ (vendor == PCI_VENDOR_ID_ARTOP) ||
+ (vendor == PCI_VENDOR_ID_TTI)) {
+ pass_count++;
+ if (vendor == PCI_VENDOR_ID_TTI) {
+ if ((!hpt34x_flag) && (h < 2)) {
+ goto quit;
+ } else if (hpt34x_flag) {
+ hwif->io_base = channel ? (bmiba + 0x28) : (bmiba + 0x20);
+ hwif->ctl_port = channel ? (bmiba + 0x3e) : (bmiba + 0x36);
+ } else {
+ goto io_temps;
+ }
+ } else {
+io_temps:
+ tmp = channel ? 2 : 0;
+ hwif->io_base = io[tmp];
+ hwif->ctl_port = io[tmp + 1] + 2;
+ }
+ hwif->irq = irq;
+ hwif->noprobe = 0;
+
+ if (device == PCI_DEVICE_ID_ARTOP_ATP850UF) {
+ hwif->serialized = 1;
+ }
+
+ if ((vendor == PCI_VENDOR_ID_PROMISE) ||
+ (vendor == PCI_VENDOR_ID_TTI)) {
+ set_promise_hpt343_extra(device, bmiba);
+ }
+
+ if (dma_enabled) {
+ if ((!check_region(bmiba, 8)) && (!channel)) {
+ hwif->chipset = ((vendor == PCI_VENDOR_ID_TTI) && !hpt34x_flag) ? ide_hpt343 :
+ (device == PCI_DEVICE_ID_PROMISE_20262) ? ide_ultra66 : ide_udma;
+ init_triton_dma(hwif, bmiba);
+ step_count++;
+ } else if ((!check_region((bmiba + 0x08), 8)) && (channel)) {
+ hwif->chipset = ((vendor == PCI_VENDOR_ID_TTI) && !hpt34x_flag) ? ide_hpt343 :
+ (device == PCI_DEVICE_ID_PROMISE_20262) ? ide_ultra66 : ide_udma;
+ init_triton_dma(hwif, bmiba + 8);
+ step_count++;
+ } else {
+ continue;
+ }
+ }
+ }
+ }
+
+ quit: if (rc) printk("ide: pcibios access failed - %s\n", pcibios_strerror(rc));
+}
diff --git a/linux/src/drivers/net/3c501.c b/linux/src/drivers/net/3c501.c
new file mode 100644
index 0000000..200b95c
--- /dev/null
+++ b/linux/src/drivers/net/3c501.c
@@ -0,0 +1,856 @@
+/* 3c501.c: A 3Com 3c501 ethernet driver for linux. */
+/*
+ Written 1992,1993,1994 Donald Becker
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ This is a device driver for the 3Com Etherlink 3c501.
+ Do not purchase this card, even as a joke. It's performance is horrible,
+ and it breaks in many ways.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Fixed (again!) the missing interrupt locking on TX/RX shifting.
+ Alan Cox <Alan.Cox@linux.org>
+
+ Removed calls to init_etherdev since they are no longer needed, and
+ cleaned up modularization just a bit. The driver still allows only
+ the default address for cards when loaded as a module, but that's
+ really less braindead than anyone using a 3c501 board. :)
+ 19950208 (invid@msen.com)
+
+ Added traps for interrupts hitting the window as we clear and TX load
+ the board. Now getting 150K/second FTP with a 3c501 card. Still playing
+ with a TX-TX optimisation to see if we can touch 180-200K/second as seems
+ theoretically maximum.
+ 19950402 Alan Cox <Alan.Cox@linux.org>
+
+ Some notes on this thing if you have to hack it. [Alan]
+
+ 1] Some documentation is available from 3Com. Due to the boards age
+ standard responses when you ask for this will range from 'be serious'
+ to 'give it to a museum'. The documentation is incomplete and mostly
+ of historical interest anyway.
+
+ 2] The basic system is a single buffer which can be used to receive or
+ transmit a packet. A third command mode exists when you are setting
+ things up.
+
+ 3] If it's transmitting it's not receiving and vice versa. In fact the
+ time to get the board back into useful state after an operation is
+ quite large.
+
+ 4] The driver works by keeping the board in receive mode waiting for a
+ packet to arrive. When one arrives it is copied out of the buffer
+ and delivered to the kernel. The card is reloaded and off we go.
+
+ 5] When transmitting dev->tbusy is set and the card is reset (from
+ receive mode) [possibly losing a packet just received] to command
+ mode. A packet is loaded and transmit mode triggered. The interrupt
+ handler runs different code for transmit interrupts and can handle
+ returning to receive mode or retransmissions (yes you have to help
+ out with those too).
+
+ Problems:
+ There are a wide variety of undocumented error returns from the card
+ and you basically have to kick the board and pray if they turn up. Most
+ only occur under extreme load or if you do something the board doesn't
+ like (eg touching a register at the wrong time).
+
+ The driver is less efficient than it could be. It switches through
+ receive mode even if more transmits are queued. If this worries you buy
+ a real ethernet card.
+
+ The combination of slow receive restart and no real multicast
+ filter makes the board unusable with a kernel compiled for IP
+ multicasting in a real multicast environment. That's down to the board,
+ but even with no multicast programs running a multicast IP kernel is
+ in group 224.0.0.1 and you will therefore be listening to all multicasts.
+ One nv conference running over that ethernet and you can give up.
+
+*/
+
+static const char *version =
+ "3c501.c: 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov).\n";
+
+/*
+ * Braindamage remaining:
+ * The 3c501 board.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/config.h> /* for CONFIG_IP_MULTICAST */
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#define BLOCKOUT_2
+
+/* A zero-terminated list of I/O addresses to be probed.
+ The 3c501 can be at many locations, but here are the popular ones. */
+static unsigned int netcard_portlist[] =
+ { 0x280, 0x300, 0};
+
+
+/*
+ * Index to functions.
+ */
+
+int el1_probe(struct device *dev);
+static int el1_probe1(struct device *dev, int ioaddr);
+static int el_open(struct device *dev);
+static int el_start_xmit(struct sk_buff *skb, struct device *dev);
+static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void el_receive(struct device *dev);
+static void el_reset(struct device *dev);
+static int el1_close(struct device *dev);
+static struct enet_statistics *el1_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+#define EL1_IO_EXTENT 16
+
+#ifndef EL_DEBUG
+#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */
+#endif /* Anything above 5 is wordy death! */
+static int el_debug = EL_DEBUG;
+
+/*
+ * Board-specific info in dev->priv.
+ */
+
+struct net_local
+{
+ struct enet_statistics stats;
+ int tx_pkt_start; /* The length of the current Tx packet. */
+ int collisions; /* Tx collisions this packet */
+ int loading; /* Spot buffer load collisions */
+};
+
+
+#define RX_STATUS (ioaddr + 0x06)
+#define RX_CMD RX_STATUS
+#define TX_STATUS (ioaddr + 0x07)
+#define TX_CMD TX_STATUS
+#define GP_LOW (ioaddr + 0x08)
+#define GP_HIGH (ioaddr + 0x09)
+#define RX_BUF_CLR (ioaddr + 0x0A)
+#define RX_LOW (ioaddr + 0x0A)
+#define RX_HIGH (ioaddr + 0x0B)
+#define SAPROM (ioaddr + 0x0C)
+#define AX_STATUS (ioaddr + 0x0E)
+#define AX_CMD AX_STATUS
+#define DATAPORT (ioaddr + 0x0F)
+#define TX_RDY 0x08 /* In TX_STATUS */
+
+#define EL1_DATAPTR 0x08
+#define EL1_RXPTR 0x0A
+#define EL1_SAPROM 0x0C
+#define EL1_DATAPORT 0x0f
+
+/*
+ * Writes to the ax command register.
+ */
+
+#define AX_OFF 0x00 /* Irq off, buffer access on */
+#define AX_SYS 0x40 /* Load the buffer */
+#define AX_XMIT 0x44 /* Transmit a packet */
+#define AX_RX 0x48 /* Receive a packet */
+#define AX_LOOP 0x0C /* Loopback mode */
+#define AX_RESET 0x80
+
+/*
+ * Normal receive mode written to RX_STATUS. We must intr on short packets
+ * to avoid bogus rx lockups.
+ */
+
+#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */
+#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */
+#define RX_MULT 0xE8 /* Accept multicast packets. */
+#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */
+
+/*
+ * TX_STATUS register.
+ */
+
+#define TX_COLLISION 0x02
+#define TX_16COLLISIONS 0x04
+#define TX_READY 0x08
+
+#define RX_RUNT 0x08
+#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */
+#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */
+
+
+/*
+ * The boilerplate probe code.
+ */
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry el1_drv = {"3c501", el1_probe1, EL1_IO_EXTENT, netcard_portlist};
+#else
+
+int el1_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el1_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; netcard_portlist[i]; i++)
+ {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, EL1_IO_EXTENT))
+ continue;
+ if (el1_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/*
+ * The actual probe.
+ */
+
+static int el1_probe1(struct device *dev, int ioaddr)
+{
+ const char *mname; /* Vendor name */
+ unsigned char station_addr[6];
+ int autoirq = 0;
+ int i;
+
+ /*
+ * Read the station address PROM data from the special port.
+ */
+
+ for (i = 0; i < 6; i++)
+ {
+ outw(i, ioaddr + EL1_DATAPTR);
+ station_addr[i] = inb(ioaddr + EL1_SAPROM);
+ }
+ /*
+ * Check the first three octets of the S.A. for 3Com's prefix, or
+ * for the Sager NP943 prefix.
+ */
+
+ if (station_addr[0] == 0x02 && station_addr[1] == 0x60
+ && station_addr[2] == 0x8c)
+ {
+ mname = "3c501";
+ } else if (station_addr[0] == 0x00 && station_addr[1] == 0x80
+ && station_addr[2] == 0xC8)
+ {
+ mname = "NP943";
+ }
+ else
+ return ENODEV;
+
+ /*
+ * Grab the region so we can find the another board if autoIRQ fails.
+ */
+
+ request_region(ioaddr, EL1_IO_EXTENT,"3c501");
+
+ /*
+ * We auto-IRQ by shutting off the interrupt line and letting it float
+ * high.
+ */
+
+ if (dev->irq < 2)
+ {
+ autoirq_setup(2);
+ inb(RX_STATUS); /* Clear pending interrupts. */
+ inb(TX_STATUS);
+ outb(AX_LOOP + 1, AX_CMD);
+
+ outb(0x00, AX_CMD);
+
+ autoirq = autoirq_report(1);
+
+ if (autoirq == 0)
+ {
+ printk("%s probe at %#x failed to detect IRQ line.\n",
+ mname, ioaddr);
+ return EAGAIN;
+ }
+ }
+
+ outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */
+ dev->base_addr = ioaddr;
+ memcpy(dev->dev_addr, station_addr, ETH_ALEN);
+
+ if (dev->mem_start & 0xf)
+ el_debug = dev->mem_start & 0x7;
+ if (autoirq)
+ dev->irq = autoirq;
+
+ printk("%s: %s EtherLink at %#lx, using %sIRQ %d.\n", dev->name, mname, dev->base_addr,
+ autoirq ? "auto":"assigned ", dev->irq);
+
+#ifdef CONFIG_IP_MULTICAST
+ printk("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
+#endif
+
+ if (el_debug)
+ printk("%s", version);
+
+ /*
+ * Initialize the device structure.
+ */
+
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ /*
+ * The EL1-specific entries in the device structure.
+ */
+
+ dev->open = &el_open;
+ dev->hard_start_xmit = &el_start_xmit;
+ dev->stop = &el1_close;
+ dev->get_stats = &el1_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /*
+ * Setup the generic properties
+ */
+
+ ether_setup(dev);
+
+ return 0;
+}
+
+/*
+ * Open/initialize the board.
+ */
+
+static int el_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug > 2)
+ printk("%s: Doing el_open()...", dev->name);
+
+ if (request_irq(dev->irq, &el_interrupt, 0, "3c501", NULL))
+ return -EAGAIN;
+
+ irq2dev_map[dev->irq] = dev;
+ el_reset(dev);
+
+ dev->start = 1;
+
+ outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int el_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ if(dev->interrupt) /* May be unloading, don't stamp on */
+ return 1; /* the packet buffer this time */
+
+ if (dev->tbusy)
+ {
+ if (jiffies - dev->trans_start < 20)
+ {
+ if (el_debug > 2)
+ printk(" transmitter busy, deferred.\n");
+ return 1;
+ }
+ if (el_debug)
+ printk ("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
+ dev->name, inb(TX_STATUS), inb(AX_STATUS), inb(RX_STATUS));
+ lp->stats.tx_errors++;
+ outb(TX_NORM, TX_CMD);
+ outb(RX_NORM, RX_CMD);
+ outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */
+ outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ }
+
+ if (skb == NULL)
+ {
+ dev_tint(dev);
+ return 0;
+ }
+
+ save_flags(flags);
+
+ /*
+ * Avoid incoming interrupts between us flipping tbusy and flipping
+ * mode as the driver assumes tbusy is a faithful indicator of card
+ * state
+ */
+
+ cli();
+
+ /*
+ * Avoid timer-based retransmission conflicts.
+ */
+
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ {
+ restore_flags(flags);
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ }
+ else
+ {
+ int gp_start = 0x800 - (ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
+ unsigned char *buf = skb->data;
+
+load_it_again_sam:
+ lp->tx_pkt_start = gp_start;
+ lp->collisions = 0;
+
+ /*
+ * Command mode with status cleared should [in theory]
+ * mean no more interrupts can be pending on the card.
+ */
+
+#ifdef BLOCKOUT_1
+ disable_irq(dev->irq);
+#endif
+ outb_p(AX_SYS, AX_CMD);
+ inb_p(RX_STATUS);
+ inb_p(TX_STATUS);
+
+ lp->loading=1;
+
+ /*
+ * Turn interrupts back on while we spend a pleasant afternoon
+ * loading bytes into the board
+ */
+
+ restore_flags(flags);
+ outw(0x00, RX_BUF_CLR); /* Set rx packet area to 0. */
+ outw(gp_start, GP_LOW); /* aim - packet will be loaded into buffer start */
+ outsb(DATAPORT,buf,skb->len); /* load buffer (usual thing each byte increments the pointer) */
+ outw(gp_start, GP_LOW); /* the board reuses the same register */
+#ifndef BLOCKOUT_1
+ if(lp->loading==2) /* A receive upset our load, despite our best efforts */
+ {
+ if(el_debug>2)
+ printk("%s: burped during tx load.\n", dev->name);
+ goto load_it_again_sam; /* Sigh... */
+ }
+#endif
+ outb(AX_XMIT, AX_CMD); /* fire ... Trigger xmit. */
+ lp->loading=0;
+#ifdef BLOCKOUT_1
+ enable_irq(dev->irq);
+#endif
+ dev->trans_start = jiffies;
+ }
+
+ if (el_debug > 2)
+ printk(" queued xmit.\n");
+ dev_kfree_skb (skb, FREE_WRITE);
+ return 0;
+}
+
+
+/*
+ * The typical workload of the driver:
+ * Handle the ether interface interrupts.
+ */
+
+static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr;
+ int axsr; /* Aux. status reg. */
+
+ if (dev == NULL || dev->irq != irq)
+ {
+ printk ("3c501 driver: irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ /*
+ * What happened ?
+ */
+
+ axsr = inb(AX_STATUS);
+
+ /*
+ * Log it
+ */
+
+ if (el_debug > 3)
+ printk("%s: el_interrupt() aux=%#02x", dev->name, axsr);
+ if (dev->interrupt)
+ printk("%s: Reentering the interrupt driver!\n", dev->name);
+ dev->interrupt = 1;
+#ifndef BLOCKOUT_1
+ if(lp->loading==1 && !dev->tbusy)
+ printk("%s: Inconsistent state loading while not in tx\n",
+ dev->name);
+#endif
+#ifdef BLOCKOUT_3
+ lp->loading=2; /* So we can spot loading interruptions */
+#endif
+
+ if (dev->tbusy)
+ {
+
+ /*
+ * Board in transmit mode. May be loading. If we are
+ * loading we shouldn't have got this.
+ */
+
+ int txsr = inb(TX_STATUS);
+#ifdef BLOCKOUT_2
+ if(lp->loading==1)
+ {
+ if(el_debug > 2)
+ {
+ printk("%s: Interrupt while loading [", dev->name);
+ printk(" txsr=%02x gp=%04x rp=%04x]\n", txsr, inw(GP_LOW),inw(RX_LOW));
+ }
+ lp->loading=2; /* Force a reload */
+ dev->interrupt = 0;
+ return;
+ }
+#endif
+ if (el_debug > 6)
+ printk(" txsr=%02x gp=%04x rp=%04x", txsr, inw(GP_LOW),inw(RX_LOW));
+
+ if ((axsr & 0x80) && (txsr & TX_READY) == 0)
+ {
+ /*
+ * FIXME: is there a logic to whether to keep on trying or
+ * reset immediately ?
+ */
+ if(el_debug>1)
+ printk("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x"
+ " gp=%03x rp=%03x.\n", dev->name, txsr, axsr,
+ inw(ioaddr + EL1_DATAPTR), inw(ioaddr + EL1_RXPTR));
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ else if (txsr & TX_16COLLISIONS)
+ {
+ /*
+ * Timed out
+ */
+ if (el_debug)
+ printk("%s: Transmit failed 16 times, ethernet jammed?\n",dev->name);
+ outb(AX_SYS, AX_CMD);
+ lp->stats.tx_aborted_errors++;
+ }
+ else if (txsr & TX_COLLISION)
+ {
+ /*
+ * Retrigger xmit.
+ */
+
+ if (el_debug > 6)
+ printk(" retransmitting after a collision.\n");
+ /*
+ * Poor little chip can't reset its own start pointer
+ */
+
+ outb(AX_SYS, AX_CMD);
+ outw(lp->tx_pkt_start, GP_LOW);
+ outb(AX_XMIT, AX_CMD);
+ lp->stats.collisions++;
+ dev->interrupt = 0;
+ return;
+ }
+ else
+ {
+ /*
+ * It worked.. we will now fall through and receive
+ */
+ lp->stats.tx_packets++;
+ if (el_debug > 6)
+ printk(" Tx succeeded %s\n",
+ (txsr & TX_RDY) ? "." : "but tx is busy!");
+ /*
+ * This is safe the interrupt is atomic WRT itself.
+ */
+
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* In case more to transmit */
+ }
+ }
+ else
+ {
+ /*
+ * In receive mode.
+ */
+
+ int rxsr = inb(RX_STATUS);
+ if (el_debug > 5)
+ printk(" rxsr=%02x txsr=%02x rp=%04x", rxsr, inb(TX_STATUS),inw(RX_LOW));
+ /*
+ * Just reading rx_status fixes most errors.
+ */
+ if (rxsr & RX_MISSED)
+ lp->stats.rx_missed_errors++;
+ else if (rxsr & RX_RUNT)
+ { /* Handled to avoid board lock-up. */
+ lp->stats.rx_length_errors++;
+ if (el_debug > 5)
+ printk(" runt.\n");
+ }
+ else if (rxsr & RX_GOOD)
+ {
+ /*
+ * Receive worked.
+ */
+ el_receive(dev);
+ }
+ else
+ {
+ /*
+ * Nothing? Something is broken!
+ */
+ if (el_debug > 2)
+ printk("%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
+ dev->name, rxsr);
+ el_reset(dev);
+ }
+ if (el_debug > 3)
+ printk(".\n");
+ }
+
+ /*
+ * Move into receive mode
+ */
+
+ outb(AX_RX, AX_CMD);
+ outw(0x00, RX_BUF_CLR);
+ inb(RX_STATUS); /* Be certain that interrupts are cleared. */
+ inb(TX_STATUS);
+ dev->interrupt = 0;
+ return;
+}
+
+
+/*
+ * We have a good packet. Well, not really "good", just mostly not broken.
+ * We must check everything to see if it is good.
+ */
+
+static void el_receive(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int pkt_len;
+ struct sk_buff *skb;
+
+ pkt_len = inw(RX_LOW);
+
+ if (el_debug > 4)
+ printk(" el_receive %d.\n", pkt_len);
+
+ if ((pkt_len < 60) || (pkt_len > 1536))
+ {
+ if (el_debug)
+ printk("%s: bogus packet, length=%d\n", dev->name, pkt_len);
+ lp->stats.rx_over_errors++;
+ return;
+ }
+
+ /*
+ * Command mode so we can empty the buffer
+ */
+
+ outb(AX_SYS, AX_CMD);
+ skb = dev_alloc_skb(pkt_len+2);
+
+ /*
+ * Start of frame
+ */
+
+ outw(0x00, GP_LOW);
+ if (skb == NULL)
+ {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ return;
+ }
+ else
+ {
+ skb_reserve(skb,2); /* Force 16 byte alignment */
+ skb->dev = dev;
+ /*
+ * The read increments through the bytes. The interrupt
+ * handler will fix the pointer when it returns to
+ * receive mode.
+ */
+ insb(DATAPORT, skb_put(skb,pkt_len), pkt_len);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ return;
+}
+
+static void el_reset(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug> 2)
+ printk("3c501 reset...");
+ outb(AX_RESET, AX_CMD); /* Reset the chip */
+ outb(AX_LOOP, AX_CMD); /* Aux control, irq and loopback enabled */
+ {
+ int i;
+ for (i = 0; i < 6; i++) /* Set the station address. */
+ outb(dev->dev_addr[i], ioaddr + i);
+ }
+
+ outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */
+ cli(); /* Avoid glitch on writes to CMD regs */
+ outb(TX_NORM, TX_CMD); /* tx irq on done, collision */
+ outb(RX_NORM, RX_CMD); /* Set Rx commands. */
+ inb(RX_STATUS); /* Clear status. */
+ inb(TX_STATUS);
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ sti();
+}
+
+static int el1_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug > 2)
+ printk("%s: Shutting down ethercard at %#x.\n", dev->name, ioaddr);
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /*
+ * Free and disable the IRQ.
+ */
+
+ free_irq(dev->irq, NULL);
+ outb(AX_RESET, AX_CMD); /* Reset the chip */
+ irq2dev_map[dev->irq] = 0;
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static struct enet_statistics *el1_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * best-effort filtering.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ outb(RX_PROM, RX_CMD);
+ inb(RX_STATUS);
+ }
+ else if (dev->mc_list || dev->flags&IFF_ALLMULTI)
+ {
+ outb(RX_MULT, RX_CMD); /* Multicast or all multicast is the same */
+ inb(RX_STATUS); /* Clear status. */
+ }
+ else
+ {
+ outb(RX_NORM, RX_CMD);
+ inb(RX_STATUS);
+ }
+}
+
+#ifdef MODULE
+
+static char devicename[9] = { 0, };
+
+static struct device dev_3c501 =
+{
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x280, 5,
+ 0, 0, 0, NULL, el1_probe
+};
+
+static int io=0x280;
+static int irq=5;
+
+int init_module(void)
+{
+ dev_3c501.irq=irq;
+ dev_3c501.base_addr=io;
+ if (register_netdev(&dev_3c501) != 0)
+ return -EIO;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ /*
+ * No need to check MOD_IN_USE, as sys_delete_module() checks.
+ */
+
+ unregister_netdev(&dev_3c501);
+
+ /*
+ * Free up the private structure, or leak memory :-)
+ */
+
+ kfree(dev_3c501.priv);
+ dev_3c501.priv = NULL; /* gets re-allocated by el1_probe1 */
+
+ /*
+ * If we don't do this, we can't re-insmod it later.
+ */
+ release_region(dev_3c501.base_addr, EL1_IO_EXTENT);
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -m486 -c -o 3c501.o 3c501.c"
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c503.c b/linux/src/drivers/net/3c503.c
new file mode 100644
index 0000000..8ce488d
--- /dev/null
+++ b/linux/src/drivers/net/3c503.c
@@ -0,0 +1,690 @@
+/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This driver should work with the 3c503 and 3c503/16. It should be used
+ in shared memory mode for best performance, although it may also work
+ in programmed-I/O mode.
+
+ Sources:
+ EtherLink II Technical Reference Manual,
+ EtherLink II/16 Technical Reference Manual Supplement,
+ 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145
+
+ The Crynwr 3c503 packet driver.
+
+ Changelog:
+
+ Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards.
+ Paul Gortmaker : multiple card support for module users.
+ rjohnson@analogic.com : Fix up PIO interface for efficient operation.
+
+*/
+
+static const char *version =
+ "3c503.c:v1.10 9/23/93 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+
+#include "8390.h"
+#include "3c503.h"
+#define WRD_COUNT 4
+
+int el2_probe(struct device *dev);
+int el2_pio_probe(struct device *dev);
+int el2_probe1(struct device *dev, int ioaddr);
+
+/* A zero-terminated list of I/O addresses to be probed in PIO mode. */
+static unsigned int netcard_portlist[] =
+ { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0};
+
+#define EL2_IO_EXTENT 16
+
+#ifdef HAVE_DEVLIST
+/* The 3c503 uses two entries, one for the safe memory-mapped probe and
+ the other for the typical I/O probe. */
+struct netdev_entry el2_drv =
+{"3c503", el2_probe, EL1_IO_EXTENT, 0};
+struct netdev_entry el2pio_drv =
+{"3c503pio", el2_pioprobe1, EL1_IO_EXTENT, netcard_portlist};
+#endif
+
+static int el2_open(struct device *dev);
+static int el2_close(struct device *dev);
+static void el2_reset_8390(struct device *dev);
+static void el2_init_card(struct device *dev);
+static void el2_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void el2_block_input(struct device *dev, int count, struct sk_buff *skb,
+ int ring_offset);
+static void el2_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+
+/* This routine probes for a memory-mapped 3c503 board by looking for
+ the "location register" at the end of the jumpered boot PROM space.
+ This works even if a PROM isn't there.
+
+ If the ethercard isn't found there is an optional probe for
+ ethercard jumpered to programmed-I/O mode.
+ */
+int
+el2_probe(struct device *dev)
+{
+ int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0};
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el2_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (addr = addrs; *addr; addr++) {
+ int i;
+ unsigned int base_bits = readb(*addr);
+ /* Find first set bit. */
+ for(i = 7; i >= 0; i--, base_bits >>= 1)
+ if (base_bits & 0x1)
+ break;
+ if (base_bits != 1)
+ continue;
+ if (check_region(netcard_portlist[i], EL2_IO_EXTENT))
+ continue;
+ if (el2_probe1(dev, netcard_portlist[i]) == 0)
+ return 0;
+ }
+#if ! defined(no_probe_nonshared_memory) && ! defined (HAVE_DEVLIST)
+ return el2_pio_probe(dev);
+#else
+ return ENODEV;
+#endif
+}
+
+#ifndef HAVE_DEVLIST
+/* Try all of the locations that aren't obviously empty. This touches
+ a lot of locations, and is much riskier than the code above. */
+int
+el2_pio_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el2_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; netcard_portlist[i]; i++) {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, EL2_IO_EXTENT))
+ continue;
+ if (el2_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* Probe for the Etherlink II card at I/O port base IOADDR,
+ returning non-zero on success. If found, set the station
+ address and memory parameters in DEVICE. */
+int
+el2_probe1(struct device *dev, int ioaddr)
+{
+ int i, iobase_reg, membase_reg, saved_406, wordlength;
+ static unsigned version_printed = 0;
+ unsigned long vendor_id;
+
+ /* Reset and/or avoid any lurking NE2000 */
+ if (inb(ioaddr + 0x408) == 0xff) {
+ udelay(1000);
+ return ENODEV;
+ }
+
+ /* We verify that it's a 3C503 board by checking the first three octets
+ of its ethernet address. */
+ iobase_reg = inb(ioaddr+0x403);
+ membase_reg = inb(ioaddr+0x404);
+ /* ASIC location registers should be 0 or have only a single bit set. */
+ if ( (iobase_reg & (iobase_reg - 1))
+ || (membase_reg & (membase_reg - 1))) {
+ return ENODEV;
+ }
+ saved_406 = inb_p(ioaddr + 0x406);
+ outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */
+ outb_p(ECNTRL_THIN, ioaddr + 0x406);
+ /* Map the station addr PROM into the lower I/O ports. We now check
+ for both the old and new 3Com prefix */
+ outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406);
+ vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2);
+ if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) {
+ /* Restore the register we frobbed. */
+ outb(saved_406, ioaddr + 0x406);
+ return ENODEV;
+ }
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("3c503.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ dev->base_addr = ioaddr;
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk ("3c503: unable to allocate memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ printk("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr);
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ /* Map the 8390 back into the window. */
+ outb(ECNTRL_THIN, ioaddr + 0x406);
+
+ /* Check for EL2/16 as described in tech. man. */
+ outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
+ outb_p(0, ioaddr + EN0_DCFG);
+ outb_p(E8390_PAGE2, ioaddr + E8390_CMD);
+ wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS;
+ outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
+
+ /* Probe for, turn on and clear the board's shared memory. */
+ if (ei_debug > 2) printk(" memory jumpers %2.2x ", membase_reg);
+ outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */
+
+ /* This should be probed for (or set via an ioctl()) at run-time.
+ Right now we use a sleazy hack to pass in the interface number
+ at boot-time via the low bits of the mem_end field. That value is
+ unused, and the low bits would be discarded even if it was used. */
+#if defined(EI8390_THICK) || defined(EL2_AUI)
+ ei_status.interface_num = 1;
+#else
+ ei_status.interface_num = dev->mem_end & 0xf;
+#endif
+ printk(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex");
+
+ if ((membase_reg & 0xf0) == 0) {
+ dev->mem_start = 0;
+ ei_status.name = "3c503-PIO";
+ } else {
+ dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) +
+ ((membase_reg & 0xA0) ? 0x4000 : 0);
+
+#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256
+#ifdef EL2MEMTEST
+ /* This has never found an error, but someone might care.
+ Note that it only tests the 2nd 8kB on 16kB 3c503/16
+ cards between card addr. 0x2000 and 0x3fff. */
+ { /* Check the card's memory. */
+ unsigned long mem_base = dev->mem_start;
+ unsigned int test_val = 0xbbadf00d;
+ writel(0xba5eba5e, mem_base);
+ for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) {
+ writel(test_val, mem_base + i);
+ if (readl(mem_base) != 0xba5eba5e
+ || readl(mem_base + i) != test_val) {
+ printk("3c503: memory failure or memory address conflict.\n");
+ dev->mem_start = 0;
+ ei_status.name = "3c503-PIO";
+ break;
+ }
+ test_val += 0x55555555;
+ writel(0, mem_base + i);
+ }
+ }
+#endif /* EL2MEMTEST */
+
+ dev->mem_end = dev->rmem_end = dev->mem_start + EL2_MEMSIZE;
+
+ if (wordlength) { /* No Tx pages to skip over to get to Rx */
+ dev->rmem_start = dev->mem_start;
+ ei_status.name = "3c503/16";
+ } else {
+ dev->rmem_start = TX_PAGES*256 + dev->mem_start;
+ ei_status.name = "3c503";
+ }
+ }
+
+ /*
+ Divide up the memory on the card. This is the same regardless of
+ whether shared-mem or PIO is used. For 16 bit cards (16kB RAM),
+ we use the entire 8k of bank1 for an Rx ring. We only use 3k
+ of the bank0 for 2 full size Tx packet slots. For 8 bit cards,
+ (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining
+ 5kB for an Rx ring. */
+
+ if (wordlength) {
+ ei_status.tx_start_page = EL2_MB0_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG;
+ } else {
+ ei_status.tx_start_page = EL2_MB1_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
+ }
+
+ /* Finish setting the board's parameters. */
+ ei_status.stop_page = EL2_MB1_STOP_PG;
+ ei_status.word16 = wordlength;
+ ei_status.reset_8390 = &el2_reset_8390;
+ ei_status.get_8390_hdr = &el2_get_8390_hdr;
+ ei_status.block_input = &el2_block_input;
+ ei_status.block_output = &el2_block_output;
+
+ request_region(ioaddr, EL2_IO_EXTENT, ei_status.name);
+
+ if (dev->irq == 2)
+ dev->irq = 9;
+ else if (dev->irq > 5 && dev->irq != 9) {
+ printk("3c503: configured interrupt %d invalid, will use autoIRQ.\n",
+ dev->irq);
+ dev->irq = 0;
+ }
+
+ ei_status.saved_irq = dev->irq;
+
+ dev->start = 0;
+ dev->open = &el2_open;
+ dev->stop = &el2_close;
+
+ if (dev->mem_start)
+ printk("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n",
+ dev->name, ei_status.name, (wordlength+1)<<3,
+ dev->mem_start, dev->mem_end-1);
+
+ else
+ {
+ ei_status.tx_start_page = EL2_MB1_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
+ printk("\n%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n",
+ dev->name, ei_status.name, (wordlength+1)<<3);
+ }
+ return 0;
+}
+
+static int
+el2_open(struct device *dev)
+{
+
+ if (dev->irq < 2) {
+ int irqlist[] = {5, 9, 3, 4, 0};
+ int *irqp = irqlist;
+
+ outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
+ do {
+ if (request_irq (*irqp, NULL, 0, "bogus", NULL) != -EBUSY) {
+ /* Twinkle the interrupt, and check if it's seen. */
+ autoirq_setup(0);
+ outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
+ outb_p(0x00, E33G_IDCFR);
+ if (*irqp == autoirq_report(0) /* It's a good IRQ line! */
+ && request_irq (dev->irq = *irqp, &ei_interrupt, 0, ei_status.name, NULL) == 0)
+ break;
+ }
+ } while (*++irqp);
+ if (*irqp == 0) {
+ outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
+ return -EAGAIN;
+ }
+ } else {
+ if (request_irq(dev->irq, &ei_interrupt, 0, ei_status.name, NULL)) {
+ return -EAGAIN;
+ }
+ }
+
+ el2_init_card(dev);
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+el2_close(struct device *dev)
+{
+ free_irq(dev->irq, NULL);
+ dev->irq = ei_status.saved_irq;
+ irq2dev_map[dev->irq] = NULL;
+ outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
+
+ ei_close(dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/* This is called whenever we have a unrecoverable failure:
+ transmit timeout
+ Bad ring buffer packet header
+ */
+static void
+el2_reset_8390(struct device *dev)
+{
+ if (ei_debug > 1) {
+ printk("%s: Resetting the 3c503 board...", dev->name);
+ printk("%#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR),
+ E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR));
+ }
+ outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL);
+ ei_status.txing = 0;
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ el2_init_card(dev);
+ if (ei_debug > 1) printk("done\n");
+}
+
+/* Initialize the 3c503 GA registers after a reset. */
+static void
+el2_init_card(struct device *dev)
+{
+ /* Unmap the station PROM and select the DIX or BNC connector. */
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+
+ /* Set ASIC copy of rx's first and last+1 buffer pages */
+ /* These must be the same as in the 8390. */
+ outb(ei_status.rx_start_page, E33G_STARTPG);
+ outb(ei_status.stop_page, E33G_STOPPG);
+
+ /* Point the vector pointer registers somewhere ?harmless?. */
+ outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */
+ outb(0xff, E33G_VP1);
+ outb(0x00, E33G_VP0);
+ /* Turn off all interrupts until we're opened. */
+ outb_p(0x00, dev->base_addr + EN0_IMR);
+ /* Enable IRQs iff started. */
+ outb(EGACFR_NORM, E33G_GACFR);
+
+ /* Set the interrupt line. */
+ outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR);
+ outb_p((WRD_COUNT << 1), E33G_DRQCNT); /* Set burst size to 8 */
+ outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */
+ outb_p(0x00, E33G_DMAAL);
+ return; /* We always succeed */
+}
+
+/*
+ * Either use the shared memory (if enabled on the board) or put the packet
+ * out through the ASIC FIFO.
+ */
+static void
+el2_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ unsigned short int *wrd;
+ int boguscount; /* timeout counter */
+ unsigned short word; /* temporary for better machine code */
+
+ if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */
+ outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR);
+ else
+ outb(EGACFR_NORM, E33G_GACFR);
+
+ if (dev->mem_start) { /* Shared memory transfer */
+ unsigned long dest_addr = dev->mem_start +
+ ((start_page - ei_status.tx_start_page) << 8);
+ memcpy_toio(dest_addr, buf, count);
+ outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */
+ return;
+ }
+
+/*
+ * No shared memory, put the packet out the other way.
+ * Set up then start the internal memory transfer to Tx Start Page
+ */
+
+ word = (unsigned short)start_page;
+ outb(word&0xFF, E33G_DMAAH);
+ outb(word>>8, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+/*
+ * Here I am going to write data to the FIFO as quickly as possible.
+ * Note that E33G_FIFOH is defined incorrectly. It is really
+ * E33G_FIFOL, the lowest port address for both the byte and
+ * word write. Variable 'count' is NOT checked. Caller must supply a
+ * valid count. Note that I may write a harmless extra byte to the
+ * 8390 if the byte-count was not even.
+ */
+ wrd = (unsigned short int *) buf;
+ count = (count + 1) >> 1;
+ for(;;)
+ {
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ printk("%s: FIFO blocked in el2_block_output.\n", dev->name);
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ if(count > WRD_COUNT)
+ {
+ outsw(E33G_FIFOH, wrd, WRD_COUNT);
+ wrd += WRD_COUNT;
+ count -= WRD_COUNT;
+ }
+ else
+ {
+ outsw(E33G_FIFOH, wrd, count);
+ break;
+ }
+ }
+ blocked:;
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ return;
+}
+
+/* Read the 4 byte, page aligned 8390 specific header. */
+static void
+el2_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int boguscount;
+ unsigned long hdr_start = dev->mem_start + ((ring_page - EL2_MB1_START_PG)<<8);
+ unsigned short word;
+
+ if (dev->mem_start) { /* Use the shared memory. */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ return;
+ }
+
+/*
+ * No shared memory, use programmed I/O.
+ */
+
+ word = (unsigned short)ring_page;
+ outb(word&0xFF, E33G_DMAAH);
+ outb(word>>8, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
+ | ECNTRL_START, E33G_CNTRL);
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ printk("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name);
+ memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr));
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1);
+ blocked:;
+ outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+}
+
+
+static void
+el2_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int boguscount = 0;
+ unsigned short int *buf;
+ unsigned short word;
+
+ int end_of_ring = dev->rmem_end;
+
+ /* Maybe enable shared memory just be to be safe... nahh.*/
+ if (dev->mem_start) { /* Use the shared memory. */
+ ring_offset -= (EL2_MB1_START_PG<<8);
+ if (dev->mem_start + ring_offset + count > end_of_ring) {
+ /* We must wrap the input move. */
+ int semi_count = end_of_ring - (dev->mem_start + ring_offset);
+ memcpy_fromio(skb->data, dev->mem_start + ring_offset, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, dev->mem_start + ring_offset, count, 0);
+ }
+ return;
+ }
+
+/*
+ * No shared memory, use programmed I/O.
+ */
+ word = (unsigned short) ring_offset;
+ outb(word>>8, E33G_DMAAH);
+ outb(word&0xFF, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+/*
+ * Here I also try to get data as fast as possible. I am betting that I
+ * can read one extra byte without clobbering anything in the kernel because
+ * this would only occur on an odd byte-count and allocation of skb->data
+ * is word-aligned. Variable 'count' is NOT checked. Caller must check
+ * for a valid count.
+ * [This is currently quite safe.... but if one day the 3c503 explodes
+ * you know where to come looking ;)]
+ */
+
+ buf = (unsigned short int *) skb->data;
+ count = (count + 1) >> 1;
+ for(;;)
+ {
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ printk("%s: FIFO blocked in el2_block_input.\n", dev->name);
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ if(count > WRD_COUNT)
+ {
+ insw(E33G_FIFOH, buf, WRD_COUNT);
+ buf += WRD_COUNT;
+ count -= WRD_COUNT;
+ }
+ else
+ {
+ insw(E33G_FIFOH, buf, count);
+ break;
+ }
+ }
+ blocked:;
+ outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ return;
+}
+#ifdef MODULE
+#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */
+#define NAMELEN 8 /* #of chars for storing dev->name */
+
+static char namelist[NAMELEN * MAX_EL2_CARDS] = { 0, };
+static struct device dev_el2[MAX_EL2_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_EL2_CARDS] = { 0, };
+static int irq[MAX_EL2_CARDS] = { 0, };
+static int xcvr[MAX_EL2_CARDS] = { 0, }; /* choose int. or ext. xcvr */
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
+ struct device *dev = &dev_el2[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
+ dev->init = el2_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "3c503.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
+ struct device *dev = &dev_el2[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: el2_close() handles free_irq + irq2dev map */
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(dev->base_addr, EL2_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c503.h b/linux/src/drivers/net/3c503.h
new file mode 100644
index 0000000..b9f8a46
--- /dev/null
+++ b/linux/src/drivers/net/3c503.h
@@ -0,0 +1,91 @@
+/* Definitions for the 3Com 3c503 Etherlink 2. */
+/* This file is distributed under the GPL.
+ Many of these names and comments are directly from the Crynwr packet
+ drivers, which are released under the GPL. */
+
+#define EL2H (dev->base_addr + 0x400)
+#define EL2L (dev->base_addr)
+
+/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran
+ out of available addresses on the first one... */
+
+#define OLD_3COM_ID 0x02608c
+#define NEW_3COM_ID 0x0020af
+
+/* Shared memory management parameters. NB: The 8 bit cards have only
+ one bank (MB1) which serves both Tx and Rx packet space. The 16bit
+ cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets.
+ You choose which bank appears in the sh. mem window with EGACFR_MBSn */
+
+#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */
+#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */
+#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */
+
+/* 3Com 3c503 ASIC registers */
+#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */
+#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */
+#define E33G_DRQCNT (EL2H+2) /* DMA burst count */
+#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */
+ /* (non-useful, but it also appears at the end of EPROM space) */
+#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */
+#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */
+#define E33G_CNTRL (EL2H+6) /* Board's main control register */
+#define E33G_STATUS (EL2H+7) /* Status on completions. */
+#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */
+ /* (Which IRQ to assert, DMA chan to use) */
+#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */
+#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */
+/* "Vector pointer" - if this address matches a read, the EPROM (rather than
+ shared RAM) is mapped into memory space. */
+#define E33G_VP2 (EL2H+11)
+#define E33G_VP1 (EL2H+12)
+#define E33G_VP0 (EL2H+13)
+#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */
+#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */
+
+/* Bits in E33G_CNTRL register: */
+
+#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */
+#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */
+#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */
+#define ECNTRL_SAPROM (0x04) /* Map the station address prom */
+#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */
+#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */
+#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */
+#define ECNTRL_START (0x80) /* Start the DMA logic */
+
+/* Bits in E33G_STATUS register: */
+
+#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */
+#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */
+#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */
+#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */
+#define ESTAT_DIP (0x08) /* DMA In Progress */
+
+/* Bits in E33G_GACFR register: */
+
+#define EGACFR_NIM (0x80) /* NIC interrupt mask */
+#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */
+#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */
+#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */
+#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */
+#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */
+
+#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */
+#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */
+
+/*
+ MBS2 MBS1 MBS0 Sh. mem windows card mem at:
+ ---- ---- ---- -----------------------------
+ 0 0 0 0x0000 -- bank 0
+ 0 0 1 0x2000 -- bank 1 (only choice for 8bit card)
+ 0 1 0 0x4000 -- bank 2, not used
+ 0 1 1 0x6000 -- bank 3, not used
+
+There was going to be a 32k card that used bank 2 and 3, but it
+never got produced.
+
+*/
+
+
+/* End of 3C503 parameter definitions */
diff --git a/linux/src/drivers/net/3c505.c b/linux/src/drivers/net/3c505.c
new file mode 100644
index 0000000..d78dad5
--- /dev/null
+++ b/linux/src/drivers/net/3c505.c
@@ -0,0 +1,1732 @@
+/*
+ * Linux ethernet device driver for the 3Com Etherlink Plus (3C505)
+ * By Craig Southeren, Juha Laiho and Philip Blundell
+ *
+ * 3c505.c This module implements an interface to the 3Com
+ * Etherlink Plus (3c505) ethernet card. Linux device
+ * driver interface reverse engineered from the Linux 3C509
+ * device drivers. Some 3C505 information gleaned from
+ * the Crynwr packet driver. Still this driver would not
+ * be here without 3C505 technical reference provided by
+ * 3Com.
+ *
+ * $Id: 3c505.c,v 1.1 1999/04/26 05:51:48 tb Exp $
+ *
+ * Authors: Linux 3c505 device driver by
+ * Craig Southeren, <craigs@ineluki.apana.org.au>
+ * Final debugging by
+ * Andrew Tridgell, <tridge@nimbus.anu.edu.au>
+ * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by
+ * Juha Laiho, <jlaiho@ichaos.nullnet.fi>
+ * Linux 3C509 driver by
+ * Donald Becker, <becker@super.org>
+ * Crynwr packet driver by
+ * Krishnan Gopalan and Gregg Stefancik,
+ * Clemson University Engineering Computer Operations.
+ * Portions of the code have been adapted from the 3c505
+ * driver for NCSA Telnet by Bruce Orchard and later
+ * modified by Warren Van Houten and krus@diku.dk.
+ * 3C505 technical information provided by
+ * Terry Murphy, of 3Com Network Adapter Division
+ * Linux 1.3.0 changes by
+ * Alan Cox <Alan.Cox@linux.org>
+ * More debugging and DMA version by Philip Blundell
+ */
+
+/* Theory of operation:
+
+ * The 3c505 is quite an intelligent board. All communication with it is done
+ * by means of Primary Command Blocks (PCBs); these are transferred using PIO
+ * through the command register. The card has 256k of on-board RAM, which is
+ * used to buffer received packets. It might seem at first that more buffers
+ * are better, but in fact this isn't true. From my tests, it seems that
+ * more than about 10 buffers are unnecessary, and there is a noticeable
+ * performance hit in having more active on the card. So the majority of the
+ * card's memory isn't, in fact, used.
+ *
+ * We keep up to 4 "receive packet" commands active on the board at a time.
+ * When a packet comes in, so long as there is a receive command active, the
+ * board will send us a "packet received" PCB and then add the data for that
+ * packet to the DMA queue. If a DMA transfer is not already in progress, we
+ * set one up to start uploading the data. We have to maintain a list of
+ * backlogged receive packets, because the card may decide to tell us about
+ * a newly-arrived packet at any time, and we may not be able to start a DMA
+ * transfer immediately (ie one may already be going on). We can't NAK the
+ * PCB, because then it would throw the packet away.
+ *
+ * Trying to send a PCB to the card at the wrong moment seems to have bad
+ * effects. If we send it a transmit PCB while a receive DMA is happening,
+ * it will just NAK the PCB and so we will have wasted our time. Worse, it
+ * sometimes seems to interrupt the transfer. The majority of the low-level
+ * code is protected by one huge semaphore -- "busy" -- which is set whenever
+ * it probably isn't safe to do anything to the card. The receive routine
+ * must gain a lock on "busy" before it can start a DMA transfer, and the
+ * transmit routine must gain a lock before it sends the first PCB to the card.
+ * The send_pcb() routine also has an internal semaphore to protect it against
+ * being re-entered (which would be disastrous) -- this is needed because
+ * several things can happen asynchronously (re-priming the receiver and
+ * asking the card for statistics, for example). send_pcb() will also refuse
+ * to talk to the card at all if a DMA upload is happening. The higher-level
+ * networking code will reschedule a later retry if some part of the driver
+ * is blocked. In practice, this doesn't seem to happen very often.
+ */
+
+/* This driver will not work with revision 2 hardware, because the host
+ * control register is write-only. It should be fairly easy to arrange to
+ * keep our own soft-copy of the intended contents of this register, if
+ * somebody has the time. There may be firmware differences that cause
+ * other problems, though, and I don't have an old card to test.
+ */
+
+/* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly
+ * to make it more reliable, and secondly to add DMA mode. Many things could
+ * probably be done better; the concurrency protection is particularly awful.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "3c505.h"
+
+#define ELP_DMA 6 /* DMA channel to use */
+#define ELP_RX_PCBS 4
+
+/*********************************************************
+ *
+ * define debug messages here as common strings to reduce space
+ *
+ *********************************************************/
+
+static const char *filename = __FILE__;
+
+static const char *timeout_msg = "*** timeout at %s:%s (line %d) ***\n";
+#define TIMEOUT_MSG(lineno) \
+ printk(timeout_msg, filename,__FUNCTION__,(lineno))
+
+static const char *invalid_pcb_msg =
+"*** invalid pcb length %d at %s:%s (line %d) ***\n";
+#define INVALID_PCB_MSG(len) \
+ printk(invalid_pcb_msg, (len),filename,__FUNCTION__,__LINE__)
+
+static const char *search_msg = "%s: Looking for 3c505 adapter at address %#x...";
+
+static const char *stilllooking_msg = "still looking...";
+
+static const char *found_msg = "found.\n";
+
+static const char *notfound_msg = "not found (reason = %d)\n";
+
+static const char *couldnot_msg = "%s: 3c505 not found\n";
+
+/*********************************************************
+ *
+ * various other debug stuff
+ *
+ *********************************************************/
+
+#ifdef ELP_DEBUG
+static const int elp_debug = ELP_DEBUG;
+#else
+static const int elp_debug = 0;
+#endif
+
+/*
+ * 0 = no messages (well, some)
+ * 1 = messages when high level commands performed
+ * 2 = messages when low level commands performed
+ * 3 = messages when interrupts received
+ */
+
+/*****************************************************************
+ *
+ * useful macros
+ *
+ *****************************************************************/
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+
+/*****************************************************************
+ *
+ * List of I/O-addresses we try to auto-sense
+ * Last element MUST BE 0!
+ *****************************************************************/
+
+const int addr_list[] = {0x300, 0x280, 0x310, 0};
+
+/* Dma Memory related stuff */
+
+/* Pure 2^n version of get_order */
+static inline int __get_order(unsigned long size)
+{
+ int order;
+
+ size = (size - 1) >> (PAGE_SHIFT - 1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+static unsigned long dma_mem_alloc(int size)
+{
+ int order = __get_order(size);
+
+ return __get_dma_pages(GFP_KERNEL, order);
+}
+
+
+/*****************************************************************
+ *
+ * Functions for I/O (note the inline !)
+ *
+ *****************************************************************/
+
+static inline unsigned char inb_status(unsigned int base_addr)
+{
+ return inb(base_addr + PORT_STATUS);
+}
+
+static inline unsigned char inb_control(unsigned int base_addr)
+{
+ return inb(base_addr + PORT_CONTROL);
+}
+
+static inline int inb_command(unsigned int base_addr)
+{
+ return inb(base_addr + PORT_COMMAND);
+}
+
+static inline void outb_control(unsigned char val, unsigned int base_addr)
+{
+ outb(val, base_addr + PORT_CONTROL);
+}
+
+static inline void outb_command(unsigned char val, unsigned int base_addr)
+{
+ outb(val, base_addr + PORT_COMMAND);
+}
+
+static inline unsigned int inw_data(unsigned int base_addr)
+{
+ return inw(base_addr + PORT_DATA);
+}
+
+static inline void outw_data(unsigned int val, unsigned int base_addr)
+{
+ outw(val, base_addr + PORT_DATA);
+}
+
+
+/*****************************************************************
+ *
+ * structure to hold context information for adapter
+ *
+ *****************************************************************/
+
+#define DMA_BUFFER_SIZE 1600
+#define BACKLOG_SIZE 4
+
+typedef struct {
+ volatile short got[NUM_TRANSMIT_CMDS]; /* flags for command completion */
+ pcb_struct tx_pcb; /* PCB for foreground sending */
+ pcb_struct rx_pcb; /* PCB for foreground receiving */
+ pcb_struct itx_pcb; /* PCB for background sending */
+ pcb_struct irx_pcb; /* PCB for background receiving */
+ struct enet_statistics stats;
+
+ void *dma_buffer;
+
+ struct {
+ unsigned int length[BACKLOG_SIZE];
+ unsigned int in;
+ unsigned int out;
+ } rx_backlog;
+
+ struct {
+ unsigned int direction;
+ unsigned int length;
+ unsigned int copy_flag;
+ struct sk_buff *skb;
+ long int start_time;
+ } current_dma;
+
+ /* flags */
+ unsigned long send_pcb_semaphore;
+ unsigned int dmaing;
+ unsigned long busy;
+
+ unsigned int rx_active; /* number of receive PCBs */
+} elp_device;
+
+static inline unsigned int backlog_next(unsigned int n)
+{
+ return (n + 1) % BACKLOG_SIZE;
+}
+
+/*****************************************************************
+ *
+ * useful functions for accessing the adapter
+ *
+ *****************************************************************/
+
+/*
+ * use this routine when accessing the ASF bits as they are
+ * changed asynchronously by the adapter
+ */
+
+/* get adapter PCB status */
+#define GET_ASF(addr) \
+ (get_status(addr)&ASF_PCB_MASK)
+
+static inline int get_status(unsigned int base_addr)
+{
+ int timeout = jiffies + 10;
+ register int stat1;
+ do {
+ stat1 = inb_status(base_addr);
+ } while (stat1 != inb_status(base_addr) && jiffies < timeout);
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ return stat1;
+}
+
+static inline void set_hsf(unsigned int base_addr, int hsf)
+{
+ cli();
+ outb_control((inb_control(base_addr) & ~HSF_PCB_MASK) | hsf, base_addr);
+ sti();
+}
+
+static int start_receive(struct device *, pcb_struct *);
+
+inline static void adapter_reset(struct device *dev)
+{
+ int timeout;
+ unsigned char orig_hcr = inb_control(dev->base_addr);
+
+ elp_device *adapter = dev->priv;
+
+ outb_control(0, dev->base_addr);
+
+ if (inb_status(dev->base_addr) & ACRF) {
+ do {
+ inb_command(dev->base_addr);
+ timeout = jiffies + 2;
+ while ((jiffies <= timeout) && !(inb_status(dev->base_addr) & ACRF));
+ } while (inb_status(dev->base_addr) & ACRF);
+ set_hsf(dev->base_addr, HSF_PCB_NAK);
+ }
+ outb_control(inb_control(dev->base_addr) | ATTN | DIR, dev->base_addr);
+ timeout = jiffies + 1;
+ while (jiffies <= timeout);
+ outb_control(inb_control(dev->base_addr) & ~ATTN, dev->base_addr);
+ timeout = jiffies + 1;
+ while (jiffies <= timeout);
+ outb_control(inb_control(dev->base_addr) | FLSH, dev->base_addr);
+ timeout = jiffies + 1;
+ while (jiffies <= timeout);
+ outb_control(inb_control(dev->base_addr) & ~FLSH, dev->base_addr);
+ timeout = jiffies + 1;
+ while (jiffies <= timeout);
+
+ outb_control(orig_hcr, dev->base_addr);
+ if (!start_receive(dev, &adapter->tx_pcb))
+ printk("%s: start receive command failed \n", dev->name);
+}
+
+/* Check to make sure that a DMA transfer hasn't timed out. This should never happen
+ * in theory, but seems to occur occasionally if the card gets prodded at the wrong
+ * time.
+ */
+static inline void check_dma(struct device *dev)
+{
+ elp_device *adapter = dev->priv;
+ if (adapter->dmaing && (jiffies > (adapter->current_dma.start_time + 10))) {
+ unsigned long flags;
+ printk("%s: DMA %s timed out, %d bytes left\n", dev->name, adapter->current_dma.direction ? "download" : "upload", get_dma_residue(dev->dma));
+ save_flags(flags);
+ cli();
+ adapter->dmaing = 0;
+ adapter->busy = 0;
+ disable_dma(dev->dma);
+ if (adapter->rx_active)
+ adapter->rx_active--;
+ outb_control(inb_control(dev->base_addr) & ~(DMAE | TCEN | DIR), dev->base_addr);
+ restore_flags(flags);
+ }
+}
+
+/* Primitive functions used by send_pcb() */
+static inline unsigned int send_pcb_slow(unsigned int base_addr, unsigned char byte)
+{
+ unsigned int timeout;
+ outb_command(byte, base_addr);
+ for (timeout = jiffies + 5; jiffies < timeout;) {
+ if (inb_status(base_addr) & HCRE)
+ return FALSE;
+ }
+ printk("3c505: send_pcb_slow timed out\n");
+ return TRUE;
+}
+
+static inline unsigned int send_pcb_fast(unsigned int base_addr, unsigned char byte)
+{
+ unsigned int timeout;
+ outb_command(byte, base_addr);
+ for (timeout = 0; timeout < 40000; timeout++) {
+ if (inb_status(base_addr) & HCRE)
+ return FALSE;
+ }
+ printk("3c505: send_pcb_fast timed out\n");
+ return TRUE;
+}
+
+/* Check to see if the receiver needs restarting, and kick it if so */
+static inline void prime_rx(struct device *dev)
+{
+ elp_device *adapter = dev->priv;
+ while (adapter->rx_active < ELP_RX_PCBS && dev->start) {
+ if (!start_receive(dev, &adapter->itx_pcb))
+ break;
+ }
+}
+
+/*****************************************************************
+ *
+ * send_pcb
+ * Send a PCB to the adapter.
+ *
+ * output byte to command reg --<--+
+ * wait until HCRE is non zero |
+ * loop until all bytes sent -->--+
+ * set HSF1 and HSF2 to 1
+ * output pcb length
+ * wait until ASF give ACK or NAK
+ * set HSF1 and HSF2 to 0
+ *
+ *****************************************************************/
+
+/* This can be quite slow -- the adapter is allowed to take up to 40ms
+ * to respond to the initial interrupt.
+ *
+ * We run initially with interrupts turned on, but with a semaphore set
+ * so that nobody tries to re-enter this code. Once the first byte has
+ * gone through, we turn interrupts off and then send the others (the
+ * timeout is reduced to 500us).
+ */
+
+static int send_pcb(struct device *dev, pcb_struct * pcb)
+{
+ int i;
+ int timeout;
+ elp_device *adapter = dev->priv;
+
+ check_dma(dev);
+
+ if (adapter->dmaing && adapter->current_dma.direction == 0)
+ return FALSE;
+
+ /* Avoid contention */
+ if (set_bit(1, &adapter->send_pcb_semaphore)) {
+ if (elp_debug >= 3) {
+ printk("%s: send_pcb entered while threaded\n", dev->name);
+ }
+ return FALSE;
+ }
+ /*
+ * load each byte into the command register and
+ * wait for the HCRE bit to indicate the adapter
+ * had read the byte
+ */
+ set_hsf(dev->base_addr, 0);
+
+ if (send_pcb_slow(dev->base_addr, pcb->command))
+ goto abort;
+
+ cli();
+
+ if (send_pcb_fast(dev->base_addr, pcb->length))
+ goto sti_abort;
+
+ for (i = 0; i < pcb->length; i++) {
+ if (send_pcb_fast(dev->base_addr, pcb->data.raw[i]))
+ goto sti_abort;
+ }
+
+ outb_control(inb_control(dev->base_addr) | 3, dev->base_addr); /* signal end of PCB */
+ outb_command(2 + pcb->length, dev->base_addr);
+
+ /* now wait for the acknowledgement */
+ sti();
+
+ for (timeout = jiffies + 5; jiffies < timeout;) {
+ switch (GET_ASF(dev->base_addr)) {
+ case ASF_PCB_ACK:
+ adapter->send_pcb_semaphore = 0;
+ return TRUE;
+ break;
+ case ASF_PCB_NAK:
+ printk("%s: send_pcb got NAK\n", dev->name);
+ goto abort;
+ break;
+ }
+ }
+
+ if (elp_debug >= 1)
+ printk("%s: timeout waiting for PCB acknowledge (status %02x)\n", dev->name, inb_status(dev->base_addr));
+
+ sti_abort:
+ sti();
+ abort:
+ adapter->send_pcb_semaphore = 0;
+ return FALSE;
+}
+
+
+/*****************************************************************
+ *
+ * receive_pcb
+ * Read a PCB from the adapter
+ *
+ * wait for ACRF to be non-zero ---<---+
+ * input a byte |
+ * if ASF1 and ASF2 were not both one |
+ * before byte was read, loop --->---+
+ * set HSF1 and HSF2 for ack
+ *
+ *****************************************************************/
+
+static int receive_pcb(struct device *dev, pcb_struct * pcb)
+{
+ int i, j;
+ int total_length;
+ int stat;
+ int timeout;
+
+ elp_device *adapter = dev->priv;
+
+ set_hsf(dev->base_addr, 0);
+
+ /* get the command code */
+ timeout = jiffies + 2;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && jiffies < timeout);
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ return FALSE;
+ }
+ pcb->command = inb_command(dev->base_addr);
+
+ /* read the data length */
+ timeout = jiffies + 3;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && jiffies < timeout);
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ printk("%s: status %02x\n", dev->name, stat);
+ return FALSE;
+ }
+ pcb->length = inb_command(dev->base_addr);
+
+ if (pcb->length > MAX_PCB_DATA) {
+ INVALID_PCB_MSG(pcb->length);
+ adapter_reset(dev);
+ return FALSE;
+ }
+ /* read the data */
+ cli();
+ i = 0;
+ do {
+ j = 0;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && j++ < 20000);
+ pcb->data.raw[i++] = inb_command(dev->base_addr);
+ if (i > MAX_PCB_DATA)
+ INVALID_PCB_MSG(i);
+ } while ((stat & ASF_PCB_MASK) != ASF_PCB_END && j < 20000);
+ sti();
+ if (j >= 20000) {
+ TIMEOUT_MSG(__LINE__);
+ return FALSE;
+ }
+ /* woops, the last "data" byte was really the length! */
+ total_length = pcb->data.raw[--i];
+
+ /* safety check total length vs data length */
+ if (total_length != (pcb->length + 2)) {
+ if (elp_debug >= 2)
+ printk("%s: mangled PCB received\n", dev->name);
+ set_hsf(dev->base_addr, HSF_PCB_NAK);
+ return FALSE;
+ }
+
+ if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) {
+ if (set_bit(0, (void *) &adapter->busy)) {
+ if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) {
+ set_hsf(dev->base_addr, HSF_PCB_NAK);
+ printk("%s: PCB rejected, transfer in progress and backlog full\n", dev->name);
+ pcb->command = 0;
+ return TRUE;
+ } else {
+ pcb->command = 0xff;
+ }
+ }
+ }
+ set_hsf(dev->base_addr, HSF_PCB_ACK);
+ return TRUE;
+}
+
+/******************************************************
+ *
+ * queue a receive command on the adapter so we will get an
+ * interrupt when a packet is received.
+ *
+ ******************************************************/
+
+static int start_receive(struct device *dev, pcb_struct * tx_pcb)
+{
+ int status;
+ elp_device *adapter = dev->priv;
+
+ if (elp_debug >= 3)
+ printk("%s: restarting receiver\n", dev->name);
+ tx_pcb->command = CMD_RECEIVE_PACKET;
+ tx_pcb->length = sizeof(struct Rcv_pkt);
+ tx_pcb->data.rcv_pkt.buf_seg
+ = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */
+ tx_pcb->data.rcv_pkt.buf_len = 1600;
+ tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */
+ status = send_pcb(dev, tx_pcb);
+ if (status)
+ adapter->rx_active++;
+ return status;
+}
+
+/******************************************************
+ *
+ * extract a packet from the adapter
+ * this routine is only called from within the interrupt
+ * service routine, so no cli/sti calls are needed
+ * note that the length is always assumed to be even
+ *
+ ******************************************************/
+
+static void receive_packet(struct device *dev, int len)
+{
+ int rlen;
+ elp_device *adapter = dev->priv;
+ unsigned long target;
+ struct sk_buff *skb;
+
+ rlen = (len + 1) & ~1;
+ skb = dev_alloc_skb(rlen + 2);
+
+ adapter->current_dma.copy_flag = 0;
+
+ if (!skb) {
+ printk("%s: memory squeeze, dropping packet\n", dev->name);
+ target = virt_to_bus(adapter->dma_buffer);
+ } else {
+ skb_reserve(skb, 2);
+ target = virt_to_bus(skb_put(skb, rlen));
+ if ((target + rlen) >= MAX_DMA_ADDRESS) {
+ target = virt_to_bus(adapter->dma_buffer);
+ adapter->current_dma.copy_flag = 1;
+ }
+ }
+ /* if this happens, we die */
+ if (set_bit(0, (void *) &adapter->dmaing))
+ printk("%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction);
+
+ adapter->current_dma.direction = 0;
+ adapter->current_dma.length = rlen;
+ adapter->current_dma.skb = skb;
+ adapter->current_dma.start_time = jiffies;
+
+ outb_control(inb_control(dev->base_addr) | DIR | TCEN | DMAE, dev->base_addr);
+
+ disable_dma(dev->dma);
+ clear_dma_ff(dev->dma);
+ set_dma_mode(dev->dma, 0x04); /* dma read */
+ set_dma_addr(dev->dma, target);
+ set_dma_count(dev->dma, rlen);
+ enable_dma(dev->dma);
+
+ if (elp_debug >= 3) {
+ printk("%s: rx DMA transfer started\n", dev->name);
+ }
+ if (adapter->rx_active)
+ adapter->rx_active--;
+
+ if (!adapter->busy)
+ printk("%s: receive_packet called, busy not set.\n", dev->name);
+}
+
+/******************************************************
+ *
+ * interrupt handler
+ *
+ ******************************************************/
+
+static void elp_interrupt(int irq, void *dev_id, struct pt_regs *reg_ptr)
+{
+ int len;
+ int dlen;
+ int icount = 0;
+ struct device *dev;
+ elp_device *adapter;
+ int timeout;
+
+ if (irq < 0 || irq > 15) {
+ printk("elp_interrupt(): illegal IRQ number found in interrupt routine (%i)\n", irq);
+ return;
+ }
+ dev = irq2dev_map[irq];
+
+ if (dev == NULL) {
+ printk("elp_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ adapter = (elp_device *) dev->priv;
+
+ if (dev->interrupt) {
+ printk("%s: re-entering the interrupt handler!\n", dev->name);
+ return;
+ }
+ dev->interrupt = 1;
+
+ do {
+ /*
+ * has a DMA transfer finished?
+ */
+ if (inb_status(dev->base_addr) & DONE) {
+ if (!adapter->dmaing) {
+ printk("%s: phantom DMA completed\n", dev->name);
+ }
+ if (elp_debug >= 3) {
+ printk("%s: %s DMA complete, status %02x\n", dev->name, adapter->current_dma.direction ? "tx" : "rx", inb_status(dev->base_addr));
+ }
+
+ outb_control(inb_control(dev->base_addr) & ~(DMAE | TCEN | DIR), dev->base_addr);
+ if (adapter->current_dma.direction) {
+ dev_kfree_skb(adapter->current_dma.skb, FREE_WRITE);
+ } else {
+ struct sk_buff *skb = adapter->current_dma.skb;
+ if (skb) {
+ skb->dev = dev;
+ if (adapter->current_dma.copy_flag) {
+ memcpy(skb_put(skb, adapter->current_dma.length), adapter->dma_buffer, adapter->current_dma.length);
+ }
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ }
+ }
+ adapter->dmaing = 0;
+ if (adapter->rx_backlog.in != adapter->rx_backlog.out) {
+ int t = adapter->rx_backlog.length[adapter->rx_backlog.out];
+ adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out);
+ if (elp_debug >= 2)
+ printk("%s: receiving backlogged packet (%d)\n", dev->name, t);
+ receive_packet(dev, t);
+ } else {
+ adapter->busy = 0;
+ }
+ } else {
+ /* has one timed out? */
+ check_dma(dev);
+ }
+
+ sti();
+
+ /*
+ * receive a PCB from the adapter
+ */
+ timeout = jiffies + 3;
+ while ((inb_status(dev->base_addr) & ACRF) != 0 && jiffies < timeout) {
+ if (receive_pcb(dev, &adapter->irx_pcb)) {
+ switch (adapter->irx_pcb.command) {
+ case 0:
+ break;
+ /*
+ * received a packet - this must be handled fast
+ */
+ case 0xff:
+ case CMD_RECEIVE_PACKET_COMPLETE:
+ /* if the device isn't open, don't pass packets up the stack */
+ if (dev->start == 0)
+ break;
+ cli();
+ len = adapter->irx_pcb.data.rcv_resp.pkt_len;
+ dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
+ if (adapter->irx_pcb.data.rcv_resp.timeout != 0) {
+ printk("%s: interrupt - packet not received correctly\n", dev->name);
+ sti();
+ } else {
+ if (elp_debug >= 3) {
+ sti();
+ printk("%s: interrupt - packet received of length %i (%i)\n", dev->name, len, dlen);
+ cli();
+ }
+ if (adapter->irx_pcb.command == 0xff) {
+ if (elp_debug >= 2)
+ printk("%s: adding packet to backlog (len = %d)\n", dev->name, dlen);
+ adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen;
+ adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in);
+ } else {
+ receive_packet(dev, dlen);
+ }
+ sti();
+ if (elp_debug >= 3)
+ printk("%s: packet received\n", dev->name);
+ }
+ break;
+
+ /*
+ * 82586 configured correctly
+ */
+ case CMD_CONFIGURE_82586_RESPONSE:
+ adapter->got[CMD_CONFIGURE_82586] = 1;
+ if (elp_debug >= 3)
+ printk("%s: interrupt - configure response received\n", dev->name);
+ break;
+
+ /*
+ * Adapter memory configuration
+ */
+ case CMD_CONFIGURE_ADAPTER_RESPONSE:
+ adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1;
+ if (elp_debug >= 3)
+ printk("%s: Adapter memory configuration %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+ /*
+ * Multicast list loading
+ */
+ case CMD_LOAD_MULTICAST_RESPONSE:
+ adapter->got[CMD_LOAD_MULTICAST_LIST] = 1;
+ if (elp_debug >= 3)
+ printk("%s: Multicast address list loading %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+ /*
+ * Station address setting
+ */
+ case CMD_SET_ADDRESS_RESPONSE:
+ adapter->got[CMD_SET_STATION_ADDRESS] = 1;
+ if (elp_debug >= 3)
+ printk("%s: Ethernet address setting %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+
+ /*
+ * received board statistics
+ */
+ case CMD_NETWORK_STATISTICS_RESPONSE:
+ adapter->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
+ adapter->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
+ adapter->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
+ adapter->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
+ adapter->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
+ adapter->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
+ adapter->got[CMD_NETWORK_STATISTICS] = 1;
+ if (elp_debug >= 3)
+ printk("%s: interrupt - statistics response received\n", dev->name);
+ break;
+
+ /*
+ * sent a packet
+ */
+ case CMD_TRANSMIT_PACKET_COMPLETE:
+ if (elp_debug >= 3)
+ printk("%s: interrupt - packet sent\n", dev->name);
+ if (dev->start == 0)
+ break;
+ switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
+ case 0xffff:
+ adapter->stats.tx_aborted_errors++;
+ printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name);
+ break;
+ case 0xfffe:
+ adapter->stats.tx_fifo_errors++;
+ printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name);
+ break;
+ }
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ break;
+
+ /*
+ * some unknown PCB
+ */
+ default:
+ printk(KERN_DEBUG "%s: unknown PCB received - %2.2x\n", dev->name, adapter->irx_pcb.command);
+ break;
+ }
+ } else {
+ printk("%s: failed to read PCB on interrupt\n", dev->name);
+ adapter_reset(dev);
+ }
+ }
+
+ } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE)));
+
+ prime_rx(dev);
+
+ /*
+ * indicate no longer in interrupt routine
+ */
+ dev->interrupt = 0;
+}
+
+
+/******************************************************
+ *
+ * open the board
+ *
+ ******************************************************/
+
+static int elp_open(struct device *dev)
+{
+ elp_device *adapter;
+
+ adapter = dev->priv;
+
+ if (elp_debug >= 3)
+ printk("%s: request to open device\n", dev->name);
+
+ /*
+ * make sure we actually found the device
+ */
+ if (adapter == NULL) {
+ printk("%s: Opening a non-existent physical device\n", dev->name);
+ return -EAGAIN;
+ }
+ /*
+ * disable interrupts on the board
+ */
+ outb_control(0x00, dev->base_addr);
+
+ /*
+ * clear any pending interrupts
+ */
+ inb_command(dev->base_addr);
+ adapter_reset(dev);
+
+ /*
+ * interrupt routine not entered
+ */
+ dev->interrupt = 0;
+
+ /*
+ * transmitter not busy
+ */
+ dev->tbusy = 0;
+
+ /*
+ * no receive PCBs active
+ */
+ adapter->rx_active = 0;
+
+ adapter->busy = 0;
+ adapter->send_pcb_semaphore = 0;
+ adapter->rx_backlog.in = 0;
+ adapter->rx_backlog.out = 0;
+
+ /*
+ * make sure we can find the device header given the interrupt number
+ */
+ irq2dev_map[dev->irq] = dev;
+
+ /*
+ * install our interrupt service routine
+ */
+ if (request_irq(dev->irq, &elp_interrupt, 0, "3c505", NULL)) {
+ irq2dev_map[dev->irq] = NULL;
+ return -EAGAIN;
+ }
+ if (request_dma(dev->dma, "3c505")) {
+ printk("%s: could not allocate DMA channel\n", dev->name);
+ return -EAGAIN;
+ }
+ adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE);
+ if (!adapter->dma_buffer) {
+ printk("Could not allocate DMA buffer\n");
+ }
+ adapter->dmaing = 0;
+
+ /*
+ * enable interrupts on the board
+ */
+ outb_control(CMDE, dev->base_addr);
+
+ /*
+ * device is now officially open!
+ */
+ dev->start = 1;
+
+ /*
+ * configure adapter memory: we need 10 multicast addresses, default==0
+ */
+ if (elp_debug >= 3)
+ printk("%s: sending 3c505 memory configuration command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
+ adapter->tx_pcb.data.memconf.cmd_q = 10;
+ adapter->tx_pcb.data.memconf.rcv_q = 20;
+ adapter->tx_pcb.data.memconf.mcast = 10;
+ adapter->tx_pcb.data.memconf.frame = 20;
+ adapter->tx_pcb.data.memconf.rcv_b = 20;
+ adapter->tx_pcb.data.memconf.progs = 0;
+ adapter->tx_pcb.length = sizeof(struct Memconf);
+ adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send memory configuration command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && jiffies < timeout);
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ }
+
+
+ /*
+ * configure adapter to receive broadcast messages and wait for response
+ */
+ if (elp_debug >= 3)
+ printk("%s: sending 82586 configure command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_82586;
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
+ adapter->tx_pcb.length = 2;
+ adapter->got[CMD_CONFIGURE_82586] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send 82586 configure command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_82586] == 0 && jiffies < timeout);
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ }
+
+ /* enable burst-mode DMA */
+ outb(0x1, dev->base_addr + PORT_AUXDMA);
+
+ /*
+ * queue receive commands to provide buffering
+ */
+ prime_rx(dev);
+ if (elp_debug >= 3)
+ printk("%s: %d receive PCBs active\n", dev->name, adapter->rx_active);
+
+ MOD_INC_USE_COUNT;
+
+ return 0; /* Always succeed */
+}
+
+
+/******************************************************
+ *
+ * send a packet to the adapter
+ *
+ ******************************************************/
+
+static int send_packet(struct device *dev, struct sk_buff *skb)
+{
+ elp_device *adapter = dev->priv;
+ unsigned long target;
+
+ /*
+ * make sure the length is even and no shorter than 60 bytes
+ */
+ unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);
+
+ if (set_bit(0, (void *) &adapter->busy)) {
+ if (elp_debug >= 2)
+ printk("%s: transmit blocked\n", dev->name);
+ return FALSE;
+ }
+ adapter = dev->priv;
+
+ /*
+ * send the adapter a transmit packet command. Ignore segment and offset
+ * and make sure the length is even
+ */
+ adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
+ adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
+ adapter->tx_pcb.data.xmit_pkt.buf_ofs
+ = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */
+ adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;
+
+ if (!send_pcb(dev, &adapter->tx_pcb)) {
+ adapter->busy = 0;
+ return FALSE;
+ }
+ /* if this happens, we die */
+ if (set_bit(0, (void *) &adapter->dmaing))
+ printk("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);
+
+ adapter->current_dma.direction = 1;
+ adapter->current_dma.start_time = jiffies;
+
+ target = virt_to_bus(skb->data);
+ if ((target + nlen) >= MAX_DMA_ADDRESS) {
+ memcpy(adapter->dma_buffer, skb->data, nlen);
+ target = virt_to_bus(adapter->dma_buffer);
+ }
+ adapter->current_dma.skb = skb;
+ cli();
+ disable_dma(dev->dma);
+ clear_dma_ff(dev->dma);
+ set_dma_mode(dev->dma, 0x08); /* dma memory -> io */
+ set_dma_addr(dev->dma, target);
+ set_dma_count(dev->dma, nlen);
+ enable_dma(dev->dma);
+ outb_control(inb_control(dev->base_addr) | DMAE | TCEN, dev->base_addr);
+ if (elp_debug >= 3)
+ printk("%s: DMA transfer started\n", dev->name);
+
+ return TRUE;
+}
+
+/******************************************************
+ *
+ * start the transmitter
+ * return 0 if sent OK, else return 1
+ *
+ ******************************************************/
+
+static int elp_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ if (dev->interrupt) {
+ printk("%s: start_xmit aborted (in irq)\n", dev->name);
+ return 1;
+ }
+
+ check_dma(dev);
+
+ /*
+ * if the transmitter is still busy, we have a transmit timeout...
+ */
+ if (dev->tbusy) {
+ elp_device *adapter = dev->priv;
+ int tickssofar = jiffies - dev->trans_start;
+ int stat;
+
+ if (tickssofar < 1000)
+ return 1;
+
+ stat = inb_status(dev->base_addr);
+ printk("%s: transmit timed out, lost %s?\n", dev->name, (stat & ACRF) ? "interrupt" : "command");
+ if (elp_debug >= 1)
+ printk("%s: status %#02x\n", dev->name, stat);
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ adapter->stats.tx_dropped++;
+ }
+
+ /* Some upper layer thinks we've missed a tx-done interrupt */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+
+ if (elp_debug >= 3)
+ printk("%s: request to send packet of length %d\n", dev->name, (int) skb->len);
+
+ if (set_bit(0, (void *) &dev->tbusy)) {
+ printk("%s: transmitter access conflict\n", dev->name);
+ return 1;
+ }
+ /*
+ * send the packet at skb->data for skb->len
+ */
+ if (!send_packet(dev, skb)) {
+ if (elp_debug >= 2) {
+ printk("%s: failed to transmit packet\n", dev->name);
+ }
+ dev->tbusy = 0;
+ return 1;
+ }
+ if (elp_debug >= 3)
+ printk("%s: packet of length %d sent\n", dev->name, (int) skb->len);
+
+ /*
+ * start the transmit timeout
+ */
+ dev->trans_start = jiffies;
+
+ prime_rx(dev);
+
+ return 0;
+}
+
+/******************************************************
+ *
+ * return statistics on the board
+ *
+ ******************************************************/
+
+static struct enet_statistics *elp_get_stats(struct device *dev)
+{
+ elp_device *adapter = (elp_device *) dev->priv;
+
+ if (elp_debug >= 3)
+ printk("%s: request for stats\n", dev->name);
+
+ /* If the device is closed, just return the latest stats we have,
+ - we cannot ask from the adapter without interrupts */
+ if (!dev->start)
+ return &adapter->stats;
+
+ /* send a get statistics command to the board */
+ adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
+ adapter->tx_pcb.length = 0;
+ adapter->got[CMD_NETWORK_STATISTICS] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send get statistics command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && jiffies < timeout);
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ return &adapter->stats;
+ }
+ }
+
+ /* statistics are now up to date */
+ return &adapter->stats;
+}
+
+/******************************************************
+ *
+ * close the board
+ *
+ ******************************************************/
+
+static int elp_close(struct device *dev)
+{
+ elp_device *adapter;
+
+ adapter = dev->priv;
+
+ if (elp_debug >= 3)
+ printk("%s: request to close device\n", dev->name);
+
+ /* Someone may request the device statistic information even when
+ * the interface is closed. The following will update the statistics
+ * structure in the driver, so we'll be able to give current statistics.
+ */
+ (void) elp_get_stats(dev);
+
+ /*
+ * disable interrupts on the board
+ */
+ outb_control(0x00, dev->base_addr);
+
+ /*
+ * flag transmitter as busy (i.e. not available)
+ */
+ dev->tbusy = 1;
+
+ /*
+ * indicate device is closed
+ */
+ dev->start = 0;
+
+ /*
+ * release the IRQ
+ */
+ free_irq(dev->irq, NULL);
+
+ /*
+ * and we no longer have to map irq to dev either
+ */
+ irq2dev_map[dev->irq] = 0;
+
+ free_dma(dev->dma);
+ free_pages((unsigned long) adapter->dma_buffer, __get_order(DMA_BUFFER_SIZE));
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+/************************************************************
+ *
+ * Set multicast list
+ * num_addrs==0: clear mc_list
+ * num_addrs==-1: set promiscuous mode
+ * num_addrs>0: set mc_list
+ *
+ ************************************************************/
+
+static void elp_set_mc_list(struct device *dev)
+{
+ elp_device *adapter = (elp_device *) dev->priv;
+ struct dev_mc_list *dmi = dev->mc_list;
+ int i;
+
+ if (elp_debug >= 3)
+ printk("%s: request to set multicast list\n", dev->name);
+
+ if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ /* send a "load multicast list" command to the board, max 10 addrs/cmd */
+ /* if num_addrs==0 the list will be cleared */
+ adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
+ adapter->tx_pcb.length = 6 * dev->mc_count;
+ for (i = 0; i < dev->mc_count; i++) {
+ memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr, 6);
+ dmi = dmi->next;
+ }
+ adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send set_multicast command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && jiffies < timeout);
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ }
+ }
+ if (dev->mc_count)
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
+ else /* num_addrs == 0 */
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
+ } else
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC;
+ /*
+ * configure adapter to receive messages (as specified above)
+ * and wait for response
+ */
+ if (elp_debug >= 3)
+ printk("%s: sending 82586 configure command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_82586;
+ adapter->tx_pcb.length = 2;
+ adapter->got[CMD_CONFIGURE_82586] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send 82586 configure command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_82586] == 0 && jiffies < timeout);
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ }
+}
+
+/******************************************************
+ *
+ * initialise Etherlink Plus board
+ *
+ ******************************************************/
+
+static void elp_init(struct device *dev)
+{
+ elp_device *adapter = dev->priv;
+
+ /*
+ * set ptrs to various functions
+ */
+ dev->open = elp_open; /* local */
+ dev->stop = elp_close; /* local */
+ dev->get_stats = elp_get_stats; /* local */
+ dev->hard_start_xmit = elp_start_xmit; /* local */
+ dev->set_multicast_list = elp_set_mc_list; /* local */
+
+ /* Setup the generic properties */
+ ether_setup(dev);
+
+ /*
+ * setup ptr to adapter specific information
+ */
+ memset(&(adapter->stats), 0, sizeof(struct enet_statistics));
+
+ /*
+ * memory information
+ */
+ dev->mem_start = dev->mem_end = dev->rmem_end = dev->rmem_start = 0;
+}
+
+/************************************************************
+ *
+ * A couple of tests to see if there's 3C505 or not
+ * Called only by elp_autodetect
+ ************************************************************/
+
+static int elp_sense(struct device *dev)
+{
+ int timeout;
+ int addr = dev->base_addr;
+ const char *name = dev->name;
+ long flags;
+ byte orig_HCR, orig_HSR;
+
+ if (check_region(addr, 0xf))
+ return -1;
+
+ orig_HCR = inb_control(addr);
+ orig_HSR = inb_status(addr);
+
+ if (elp_debug > 0)
+ printk(search_msg, name, addr);
+
+ if (((orig_HCR == 0xff) && (orig_HSR == 0xff)) ||
+ ((orig_HCR & DIR) != (orig_HSR & DIR))) {
+ if (elp_debug > 0)
+ printk(notfound_msg, 1);
+ return -1; /* It can't be 3c505 if HCR.DIR != HSR.DIR */
+ }
+ /* Enable interrupts - we need timers! */
+ save_flags(flags);
+ sti();
+
+ /* Wait for a while; the adapter may still be booting up */
+ if (elp_debug > 0)
+ printk("%s", stilllooking_msg);
+ if (orig_HCR & DIR) {
+ /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */
+ outb_control(orig_HCR & ~DIR, addr);
+ timeout = jiffies + 30;
+ while (jiffies < timeout);
+ restore_flags(flags);
+ if (inb_status(addr) & DIR) {
+ outb_control(orig_HCR, addr);
+ if (elp_debug > 0)
+ printk(notfound_msg, 2);
+ return -1;
+ }
+ } else {
+ /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */
+ outb_control(orig_HCR | DIR, addr);
+ timeout = jiffies + 30;
+ while (jiffies < timeout);
+ restore_flags(flags);
+ if (!(inb_status(addr) & DIR)) {
+ outb_control(orig_HCR, addr);
+ if (elp_debug > 0)
+ printk(notfound_msg, 3);
+ return -1;
+ }
+ }
+ /*
+ * It certainly looks like a 3c505. If it has DMA enabled, it needs
+ * a hard reset. Also, do a hard reset if selected at the compile time.
+ */
+ if (elp_debug > 0)
+ printk("%s", found_msg);
+
+ return 0;
+}
+
+/*************************************************************
+ *
+ * Search through addr_list[] and try to find a 3C505
+ * Called only by eplus_probe
+ *************************************************************/
+
+static int elp_autodetect(struct device *dev)
+{
+ int idx = 0;
+
+ /* if base address set, then only check that address
+ otherwise, run through the table */
+ if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */
+ if (elp_sense(dev) == 0)
+ return dev->base_addr;
+ } else
+ while ((dev->base_addr = addr_list[idx++])) {
+ if (elp_sense(dev) == 0)
+ return dev->base_addr;
+ }
+
+ /* could not find an adapter */
+ if (elp_debug > 0)
+ printk(couldnot_msg, dev->name);
+
+ return 0; /* Because of this, the layer above will return -ENODEV */
+}
+
+
+/******************************************************
+ *
+ * probe for an Etherlink Plus board at the specified address
+ *
+ ******************************************************/
+
+/* There are three situations we need to be able to detect here:
+
+ * a) the card is idle
+ * b) the card is still booting up
+ * c) the card is stuck in a strange state (some DOS drivers do this)
+ *
+ * In case (a), all is well. In case (b), we wait 10 seconds to see if the
+ * card finishes booting, and carry on if so. In case (c), we do a hard reset,
+ * loop round, and hope for the best.
+ *
+ * This is all very unpleasant, but hopefully avoids the problems with the old
+ * probe code (which had a 15-second delay if the card was idle, and didn't
+ * work at all if it was in a weird state).
+ */
+
+int elplus_probe(struct device *dev)
+{
+ elp_device *adapter;
+ int i, tries, tries1, timeout, okay;
+
+ /*
+ * setup adapter structure
+ */
+
+ dev->base_addr = elp_autodetect(dev);
+ if (!(dev->base_addr))
+ return -ENODEV;
+
+ /*
+ * setup ptr to adapter specific information
+ */
+ adapter = (elp_device *) (dev->priv = kmalloc(sizeof(elp_device), GFP_KERNEL));
+ if (adapter == NULL) {
+ printk("%s: out of memory\n", dev->name);
+ return -ENODEV;
+ }
+
+ for (tries1 = 0; tries1 < 3; tries1++) {
+ outb_control((inb_control(dev->base_addr) | CMDE) & ~DIR, dev->base_addr);
+ /* First try to write just one byte, to see if the card is
+ * responding at all normally.
+ */
+ timeout = jiffies + 5;
+ okay = 0;
+ while (jiffies < timeout && !(inb_status(dev->base_addr) & HCRE));
+ if ((inb_status(dev->base_addr) & HCRE)) {
+ outb_command(0, dev->base_addr); /* send a spurious byte */
+ timeout = jiffies + 5;
+ while (jiffies < timeout && !(inb_status(dev->base_addr) & HCRE));
+ if (inb_status(dev->base_addr) & HCRE)
+ okay = 1;
+ }
+ if (!okay) {
+ /* Nope, it's ignoring the command register. This means that
+ * either it's still booting up, or it's died.
+ */
+ printk("%s: command register wouldn't drain, ", dev->name);
+ if ((inb_status(dev->base_addr) & 7) == 3) {
+ /* If the adapter status is 3, it *could* still be booting.
+ * Give it the benefit of the doubt for 10 seconds.
+ */
+ printk("assuming 3c505 still starting\n");
+ timeout = jiffies + 10 * HZ;
+ while (jiffies < timeout && (inb_status(dev->base_addr) & 7));
+ if (inb_status(dev->base_addr) & 7) {
+ printk("%s: 3c505 failed to start\n", dev->name);
+ } else {
+ okay = 1; /* It started */
+ }
+ } else {
+ /* Otherwise, it must just be in a strange state. We probably
+ * need to kick it.
+ */
+ printk("3c505 is sulking\n");
+ }
+ }
+ for (tries = 0; tries < 5 && okay; tries++) {
+
+ /*
+ * Try to set the Ethernet address, to make sure that the board
+ * is working.
+ */
+ adapter->tx_pcb.command = CMD_STATION_ADDRESS;
+ adapter->tx_pcb.length = 0;
+ autoirq_setup(0);
+ if (!send_pcb(dev, &adapter->tx_pcb)) {
+ printk("%s: could not send first PCB\n", dev->name);
+ autoirq_report(0);
+ continue;
+ }
+ if (!receive_pcb(dev, &adapter->rx_pcb)) {
+ printk("%s: could not read first PCB\n", dev->name);
+ autoirq_report(0);
+ continue;
+ }
+ if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) ||
+ (adapter->rx_pcb.length != 6)) {
+ printk("%s: first PCB wrong (%d, %d)\n", dev->name, adapter->rx_pcb.command, adapter->rx_pcb.length);
+ autoirq_report(0);
+ continue;
+ }
+ goto okay;
+ }
+ /* It's broken. Do a hard reset to re-initialise the board,
+ * and try again.
+ */
+ printk(KERN_INFO "%s: resetting adapter\n", dev->name);
+ outb_control(inb_control(dev->base_addr) | FLSH | ATTN, dev->base_addr);
+ outb_control(inb_control(dev->base_addr) & ~(FLSH | ATTN), dev->base_addr);
+ }
+ printk("%s: failed to initialise 3c505\n", dev->name);
+ return -ENODEV;
+
+ okay:
+ if (dev->irq) { /* Is there a preset IRQ? */
+ int rpt = autoirq_report(0);
+ if (dev->irq != rpt) {
+ printk("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt);
+ return -ENODEV;
+ }
+ /* if dev->irq == autoirq_report(0), all is well */
+ } else /* No preset IRQ; just use what we can detect */
+ dev->irq = autoirq_report(0);
+ switch (dev->irq) { /* Legal, sane? */
+ case 0:
+ printk("%s: No IRQ reported by autoirq_report().\n", dev->name);
+ printk("%s: Check the jumpers of your 3c505 board.\n", dev->name);
+ return -ENODEV;
+ case 1:
+ case 6:
+ case 8:
+ case 13:
+ printk("%s: Impossible IRQ %d reported by autoirq_report().\n",
+ dev->name, dev->irq);
+ return -ENODEV;
+ }
+ /*
+ * Now we have the IRQ number so we can disable the interrupts from
+ * the board until the board is opened.
+ */
+ outb_control(inb_control(dev->base_addr) & ~CMDE, dev->base_addr);
+
+ /*
+ * copy ethernet address into structure
+ */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i];
+
+ /* set up the DMA channel */
+ dev->dma = ELP_DMA;
+
+ /*
+ * print remainder of startup message
+ */
+ printk("%s: 3c505 at %#lx, irq %d, dma %d, ",
+ dev->name, dev->base_addr, dev->irq, dev->dma);
+ printk("addr %02x:%02x:%02x:%02x:%02x:%02x, ",
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ /*
+ * read more information from the adapter
+ */
+
+ adapter->tx_pcb.command = CMD_ADAPTER_INFO;
+ adapter->tx_pcb.length = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb) ||
+ !receive_pcb(dev, &adapter->rx_pcb) ||
+ (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) ||
+ (adapter->rx_pcb.length != 10)) {
+ printk("%s: not responding to second PCB\n", dev->name);
+ }
+ printk("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers, adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz);
+
+ /*
+ * reconfigure the adapter memory to better suit our purposes
+ */
+ adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
+ adapter->tx_pcb.length = 12;
+ adapter->tx_pcb.data.memconf.cmd_q = 8;
+ adapter->tx_pcb.data.memconf.rcv_q = 8;
+ adapter->tx_pcb.data.memconf.mcast = 10;
+ adapter->tx_pcb.data.memconf.frame = 10;
+ adapter->tx_pcb.data.memconf.rcv_b = 10;
+ adapter->tx_pcb.data.memconf.progs = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb) ||
+ !receive_pcb(dev, &adapter->rx_pcb) ||
+ (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) ||
+ (adapter->rx_pcb.length != 2)) {
+ printk("%s: could not configure adapter memory\n", dev->name);
+ }
+ if (adapter->rx_pcb.data.configure) {
+ printk("%s: adapter configuration failed\n", dev->name);
+ }
+ /*
+ * and reserve the address region
+ */
+ request_region(dev->base_addr, ELP_IO_EXTENT, "3c505");
+
+ /*
+ * initialise the device
+ */
+ elp_init(dev);
+
+ return 0;
+}
+
+#ifdef MODULE
+static char devicename[9] = {0,};
+static struct device dev_3c505 =
+{
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, elplus_probe};
+
+int io = 0x300;
+int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("3c505: You should not use auto-probing with insmod!\n");
+ dev_3c505.base_addr = io;
+ dev_3c505.irq = irq;
+ if (register_netdev(&dev_3c505) != 0) {
+ return -EIO;
+ }
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(&dev_3c505);
+ kfree(dev_3c505.priv);
+ dev_3c505.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_3c505.base_addr, ELP_IO_EXTENT);
+}
+
+#endif /* MODULE */
+
+
+/*
+ * Local Variables:
+ * c-file-style: "linux"
+ * tab-width: 8
+ * compile-command: "gcc -D__KERNEL__ -I/discs/bibble/src/linux-1.3.69/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strength-reduce -pipe -m486 -DCPU=486 -DMODULE -c 3c505.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c505.h b/linux/src/drivers/net/3c505.h
new file mode 100644
index 0000000..0598ca2
--- /dev/null
+++ b/linux/src/drivers/net/3c505.h
@@ -0,0 +1,245 @@
+/*****************************************************************
+ *
+ * defines for 3Com Etherlink Plus adapter
+ *
+ *****************************************************************/
+
+/*
+ * I/O register offsets
+ */
+#define PORT_COMMAND 0x00 /* read/write, 8-bit */
+#define PORT_STATUS 0x02 /* read only, 8-bit */
+#define PORT_AUXDMA 0x02 /* write only, 8-bit */
+#define PORT_DATA 0x04 /* read/write, 16-bit */
+#define PORT_CONTROL 0x06 /* read/write, 8-bit */
+
+#define ELP_IO_EXTENT 0x10 /* size of used IO registers */
+
+/*
+ * host control registers bits
+ */
+#define ATTN 0x80 /* attention */
+#define FLSH 0x40 /* flush data register */
+#define DMAE 0x20 /* DMA enable */
+#define DIR 0x10 /* direction */
+#define TCEN 0x08 /* terminal count interrupt enable */
+#define CMDE 0x04 /* command register interrupt enable */
+#define HSF2 0x02 /* host status flag 2 */
+#define HSF1 0x01 /* host status flag 1 */
+
+/*
+ * combinations of HSF flags used for PCB transmission
+ */
+#define HSF_PCB_ACK HSF1
+#define HSF_PCB_NAK HSF2
+#define HSF_PCB_END (HSF2|HSF1)
+#define HSF_PCB_MASK (HSF2|HSF1)
+
+/*
+ * host status register bits
+ */
+#define HRDY 0x80 /* data register ready */
+#define HCRE 0x40 /* command register empty */
+#define ACRF 0x20 /* adapter command register full */
+/* #define DIR 0x10 direction - same as in control register */
+#define DONE 0x08 /* DMA done */
+#define ASF3 0x04 /* adapter status flag 3 */
+#define ASF2 0x02 /* adapter status flag 2 */
+#define ASF1 0x01 /* adapter status flag 1 */
+
+/*
+ * combinations of ASF flags used for PCB reception
+ */
+#define ASF_PCB_ACK ASF1
+#define ASF_PCB_NAK ASF2
+#define ASF_PCB_END (ASF2|ASF1)
+#define ASF_PCB_MASK (ASF2|ASF1)
+
+/*
+ * host aux DMA register bits
+ */
+#define DMA_BRST 0x01 /* DMA burst */
+
+/*
+ * maximum amount of data allowed in a PCB
+ */
+#define MAX_PCB_DATA 62
+
+/*****************************************************************
+ *
+ * timeout value
+ * this is a rough value used for loops to stop them from
+ * locking up the whole machine in the case of failure or
+ * error conditions
+ *
+ *****************************************************************/
+
+#define TIMEOUT 300
+
+/*****************************************************************
+ *
+ * PCB commands
+ *
+ *****************************************************************/
+
+enum {
+ /*
+ * host PCB commands
+ */
+ CMD_CONFIGURE_ADAPTER_MEMORY = 0x01,
+ CMD_CONFIGURE_82586 = 0x02,
+ CMD_STATION_ADDRESS = 0x03,
+ CMD_DMA_DOWNLOAD = 0x04,
+ CMD_DMA_UPLOAD = 0x05,
+ CMD_PIO_DOWNLOAD = 0x06,
+ CMD_PIO_UPLOAD = 0x07,
+ CMD_RECEIVE_PACKET = 0x08,
+ CMD_TRANSMIT_PACKET = 0x09,
+ CMD_NETWORK_STATISTICS = 0x0a,
+ CMD_LOAD_MULTICAST_LIST = 0x0b,
+ CMD_CLEAR_PROGRAM = 0x0c,
+ CMD_DOWNLOAD_PROGRAM = 0x0d,
+ CMD_EXECUTE_PROGRAM = 0x0e,
+ CMD_SELF_TEST = 0x0f,
+ CMD_SET_STATION_ADDRESS = 0x10,
+ CMD_ADAPTER_INFO = 0x11,
+ NUM_TRANSMIT_CMDS,
+
+ /*
+ * adapter PCB commands
+ */
+ CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31,
+ CMD_CONFIGURE_82586_RESPONSE = 0x32,
+ CMD_ADDRESS_RESPONSE = 0x33,
+ CMD_DOWNLOAD_DATA_REQUEST = 0x34,
+ CMD_UPLOAD_DATA_REQUEST = 0x35,
+ CMD_RECEIVE_PACKET_COMPLETE = 0x38,
+ CMD_TRANSMIT_PACKET_COMPLETE = 0x39,
+ CMD_NETWORK_STATISTICS_RESPONSE = 0x3a,
+ CMD_LOAD_MULTICAST_RESPONSE = 0x3b,
+ CMD_CLEAR_PROGRAM_RESPONSE = 0x3c,
+ CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d,
+ CMD_EXECUTE_RESPONSE = 0x3e,
+ CMD_SELF_TEST_RESPONSE = 0x3f,
+ CMD_SET_ADDRESS_RESPONSE = 0x40,
+ CMD_ADAPTER_INFO_RESPONSE = 0x41
+};
+
+/* Definitions for the PCB data structure */
+
+/* Data units */
+typedef unsigned char byte;
+typedef unsigned short int word;
+typedef unsigned long int dword;
+
+/* Data structures */
+struct Memconf {
+ word cmd_q,
+ rcv_q,
+ mcast,
+ frame,
+ rcv_b,
+ progs;
+};
+
+struct Rcv_pkt {
+ word buf_ofs,
+ buf_seg,
+ buf_len,
+ timeout;
+};
+
+struct Xmit_pkt {
+ word buf_ofs,
+ buf_seg,
+ pkt_len;
+};
+
+struct Rcv_resp {
+ word buf_ofs,
+ buf_seg,
+ buf_len,
+ pkt_len,
+ timeout,
+ status;
+ dword timetag;
+};
+
+struct Xmit_resp {
+ word buf_ofs,
+ buf_seg,
+ c_stat,
+ status;
+};
+
+
+struct Netstat {
+ dword tot_recv,
+ tot_xmit;
+ word err_CRC,
+ err_align,
+ err_res,
+ err_ovrrun;
+};
+
+
+struct Selftest {
+ word error;
+ union {
+ word ROM_cksum;
+ struct {
+ word ofs, seg;
+ } RAM;
+ word i82586;
+ } failure;
+};
+
+struct Info {
+ byte minor_vers,
+ major_vers;
+ word ROM_cksum,
+ RAM_sz,
+ free_ofs,
+ free_seg;
+};
+
+struct Memdump {
+ word size,
+ off,
+ seg;
+};
+
+/*
+Primary Command Block. The most important data structure. All communication
+between the host and the adapter is done with these. (Except for the actual
+ethernet data, which has different packaging.)
+*/
+typedef struct {
+ byte command;
+ byte length;
+ union {
+ struct Memconf memconf;
+ word configure;
+ struct Rcv_pkt rcv_pkt;
+ struct Xmit_pkt xmit_pkt;
+ byte multicast[10][6];
+ byte eth_addr[6];
+ byte failed;
+ struct Rcv_resp rcv_resp;
+ struct Xmit_resp xmit_resp;
+ struct Netstat netstat;
+ struct Selftest selftest;
+ struct Info info;
+ struct Memdump memdump;
+ byte raw[62];
+ } data;
+} pcb_struct;
+
+/* These defines for 'configure' */
+#define RECV_STATION 0x00
+#define RECV_BROAD 0x01
+#define RECV_MULTI 0x02
+#define RECV_PROMISC 0x04
+#define NO_LOOPBACK 0x00
+#define INT_LOOPBACK 0x08
+#define EXT_LOOPBACK 0x10
diff --git a/linux/src/drivers/net/3c507.c b/linux/src/drivers/net/3c507.c
new file mode 100644
index 0000000..58ba2d7
--- /dev/null
+++ b/linux/src/drivers/net/3c507.c
@@ -0,0 +1,924 @@
+/* 3c507.c: An EtherLink16 device driver for Linux. */
+/*
+ Written 1993,1994 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings)
+ and jrs@world.std.com (Rick Sladkey) for testing and bugfixes.
+ Mark Salazar <leslie@access.digex.net> made the changes for cards with
+ only 16K packet buffers.
+
+ Things remaining to do:
+ Verify that the tx and rx buffers don't have fencepost errors.
+ Move the theory of operation and memory map documentation.
+ The statistics need to be updated correctly.
+*/
+
+static const char *version =
+ "3c507.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+
+/*
+ Sources:
+ This driver wouldn't have been written with the availability of the
+ Crynwr driver source code. It provided a known-working implementation
+ that filled in the gaping holes of the Intel documentation. Three cheers
+ for Russ Nelson.
+
+ Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough
+ info that the casual reader might think that it documents the i82586 :-<.
+*/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+
+
+/* use 0 for production, 1 for verification, 2..7 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* A zero-terminated list of common I/O addresses to be probed. */
+static unsigned int netcard_portlist[] =
+ { 0x300, 0x320, 0x340, 0x280, 0};
+
+static void init_rx_bufs(struct device *dev);
+
+/*
+ Details of the i82586.
+
+ You'll really need the databook to understand the details of this part,
+ but the outline is that the i82586 has two separate processing units.
+ Both are started from a list of three configuration tables, of which only
+ the last, the System Control Block (SCB), is used after reset-time. The SCB
+ has the following fields:
+ Status word
+ Command word
+ Tx/Command block addr.
+ Rx block addr.
+ The command word accepts the following controls for the Tx and Rx units:
+ */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+
+/* The Rx unit uses a list of frame descriptors and a list of data buffer
+ descriptors. We use full-sized (1518 byte) data buffers, so there is
+ a one-to-one pairing of frame descriptors to buffer descriptors.
+
+ The Tx ("command") unit executes a list of commands that look like:
+ Status word Written by the 82586 when the command is done.
+ Command word Command in lower 3 bits, post-command action in upper 3
+ Link word The address of the next command.
+ Parameters (as needed).
+
+ Some definitions related to the Command Word are:
+ */
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ int last_restart;
+ ushort rx_head;
+ ushort rx_tail;
+ ushort tx_head;
+ ushort tx_cmd_link;
+ ushort tx_reap;
+};
+
+/*
+ Details of the EtherLink16 Implementation
+ The 3c507 is a generic shared-memory i82586 implementation.
+ The host can map 16K, 32K, 48K, or 64K of the 64K memory into
+ 0x0[CD][08]0000, or all 64K into 0xF[02468]0000.
+ */
+
+/* Offsets from the base I/O address. */
+#define SA_DATA 0 /* Station address data, or 3Com signature. */
+#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */
+#define RESET_IRQ 10 /* Reset the latched IRQ line. */
+#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */
+#define ROM_CONFIG 13
+#define MEM_CONFIG 14
+#define IRQ_CONFIG 15
+#define EL16_IO_EXTENT 16
+
+/* The ID port is used at boot-time to locate the ethercard. */
+#define ID_PORT 0x100
+
+/* Offsets to registers in the mailbox (SCB). */
+#define iSCB_STATUS 0x8
+#define iSCB_CMD 0xA
+#define iSCB_CBL 0xC /* Command BLock offset. */
+#define iSCB_RFA 0xE /* Rx Frame Area offset. */
+
+/* Since the 3c507 maps the shared memory window so that the last byte is
+ at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or
+ 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively.
+ We can account for this be setting the 'SBC Base' entry in the ISCP table
+ below for all the 16 bit offset addresses, and also adding the 'SCB Base'
+ value to all 24 bit physical addresses (in the SCP table and the TX and RX
+ Buffer Descriptors).
+ -Mark
+ */
+#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start))
+
+/*
+ What follows in 'init_words[]' is the "program" that is downloaded to the
+ 82586 memory. It's mostly tables and command blocks, and starts at the
+ reset address 0xfffff6. This is designed to be similar to the EtherExpress,
+ thus the unusual location of the SCB at 0x0008.
+
+ Even with the additional "don't care" values, doing it this way takes less
+ program space than initializing the individual tables, and I feel it's much
+ cleaner.
+
+ The databook is particularly useless for the first two structures, I had
+ to use the Crynwr driver as an example.
+
+ The memory setup is as follows:
+ */
+
+#define CONFIG_CMD 0x0018
+#define SET_SA_CMD 0x0024
+#define SA_OFFSET 0x002A
+#define IDLELOOP 0x30
+#define TDR_CMD 0x38
+#define TDR_TIME 0x3C
+#define DUMP_CMD 0x40
+#define DIAG_CMD 0x48
+#define SET_MC_CMD 0x4E
+#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */
+
+#define TX_BUF_START 0x0100
+#define NUM_TX_BUFS 4
+#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */
+
+#define RX_BUF_START 0x2000
+#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
+#define RX_BUF_END (dev->mem_end - dev->mem_start)
+
+/*
+ That's it: only 86 bytes to set up the beast, including every extra
+ command available. The 170 byte buffer at DUMP_DATA is shared between the
+ Dump command (called only by the diagnostic program) and the SetMulticastList
+ command.
+
+ To complete the memory setup you only have to write the station address at
+ SA_OFFSET and create the Tx & Rx buffer lists.
+
+ The Tx command chain and buffer list is setup as follows:
+ A Tx command table, with the data buffer pointing to...
+ A Tx data buffer descriptor. The packet is in a single buffer, rather than
+ chaining together several smaller buffers.
+ A NoOp command, which initially points to itself,
+ And the packet data.
+
+ A transmit is done by filling in the Tx command table and data buffer,
+ re-writing the NoOp command, and finally changing the offset of the last
+ command to point to the current Tx command. When the Tx command is finished,
+ it jumps to the NoOp, when it loops until the next Tx command changes the
+ "link offset" in the NoOp. This way the 82586 never has to go through the
+ slow restart sequence.
+
+ The Rx buffer list is set up in the obvious ring structure. We have enough
+ memory (and low enough interrupt latency) that we can avoid the complicated
+ Rx buffer linked lists by alway associating a full-size Rx data buffer with
+ each Rx data frame.
+
+ I current use four transmit buffers starting at TX_BUF_START (0x0100), and
+ use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers.
+
+ */
+
+unsigned short init_words[] = {
+ /* System Configuration Pointer (SCP). */
+ 0x0000, /* Set bus size to 16 bits. */
+ 0,0, /* pad words. */
+ 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */
+
+ /* Intermediate System Configuration Pointer (ISCP). */
+ 0x0001, /* Status word that's cleared when init is done. */
+ 0x0008,0,0, /* SCB offset, (skip, skip) */
+
+ /* System Control Block (SCB). */
+ 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */
+ CONFIG_CMD, /* Command list pointer, points to Configure. */
+ RX_BUF_START, /* Rx block list. */
+ 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */
+
+ /* 0x0018: Configure command. Change to put MAC data with packet. */
+ 0, CmdConfigure, /* Status, command. */
+ SET_SA_CMD, /* Next command is Set Station Addr. */
+ 0x0804, /* "4" bytes of config data, 8 byte FIFO. */
+ 0x2e40, /* Magic values, including MAC data location. */
+ 0, /* Unused pad word. */
+
+ /* 0x0024: Setup station address command. */
+ 0, CmdSASetup,
+ SET_MC_CMD, /* Next command. */
+ 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */
+
+ /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */
+ 0, CmdNOp, IDLELOOP, 0 /* pad */,
+
+ /* 0x0038: A unused Time-Domain Reflectometer command. */
+ 0, CmdTDR, IDLELOOP, 0,
+
+ /* 0x0040: An unused Dump State command. */
+ 0, CmdDump, IDLELOOP, DUMP_DATA,
+
+ /* 0x0048: An unused Diagnose command. */
+ 0, CmdDiagnose, IDLELOOP,
+
+ /* 0x004E: An empty set-multicast-list command. */
+ 0, CmdMulticastList, IDLELOOP, 0,
+};
+
+/* Index to functions, as function prototypes. */
+
+extern int el16_probe(struct device *dev); /* Called from Space.c */
+
+static int el16_probe1(struct device *dev, int ioaddr);
+static int el16_open(struct device *dev);
+static int el16_send_packet(struct sk_buff *skb, struct device *dev);
+static void el16_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void el16_rx(struct device *dev);
+static int el16_close(struct device *dev);
+static struct enet_statistics *el16_get_stats(struct device *dev);
+
+static void hardware_send_packet(struct device *dev, void *buf, short length);
+void init_82586_mem(struct device *dev);
+
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+{"3c507", el16_probe1, EL16_IO_EXTENT, netcard_portlist};
+#endif
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, (detachable devices only) allocate space for the
+ device and return success.
+ */
+int
+el16_probe(struct device *dev)
+{
+ int base_addr = dev ? dev->base_addr : 0;
+ int i;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el16_probe1(dev, base_addr);
+ else if (base_addr != 0)
+ return ENXIO; /* Don't probe at all. */
+
+ for (i = 0; netcard_portlist[i]; i++) {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, EL16_IO_EXTENT))
+ continue;
+ if (el16_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+int el16_probe1(struct device *dev, int ioaddr)
+{
+ static unsigned char init_ID_done = 0, version_printed = 0;
+ int i, irq, irqval;
+
+ if (init_ID_done == 0) {
+ ushort lrs_state = 0xff;
+ /* Send the ID sequence to the ID_PORT to enable the board(s). */
+ outb(0x00, ID_PORT);
+ for(i = 0; i < 255; i++) {
+ outb(lrs_state, ID_PORT);
+ lrs_state <<= 1;
+ if (lrs_state & 0x100)
+ lrs_state ^= 0xe7;
+ }
+ outb(0x00, ID_PORT);
+ init_ID_done = 1;
+ }
+
+ if (inb(ioaddr) == '*' && inb(ioaddr+1) == '3'
+ && inb(ioaddr+2) == 'C' && inb(ioaddr+3) == 'O')
+ ;
+ else
+ return ENODEV;
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ if (net_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("%s: 3c507 at %#x,", dev->name, ioaddr);
+
+ /* We should make a few more checks here, like the first three octets of
+ the S.A. for the manufacturer's code. */
+
+ irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+ irqval = request_irq(irq, &el16_interrupt, 0, "3c507", NULL);
+ if (irqval) {
+ printk ("unable to get IRQ %d (irqval=%d).\n", irq, irqval);
+ return EAGAIN;
+ }
+
+ /* We've committed to using the board, and can start filling in *dev. */
+ request_region(ioaddr, EL16_IO_EXTENT, "3c507");
+ dev->base_addr = ioaddr;
+
+ outb(0x01, ioaddr + MISC_CTRL);
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = inb(ioaddr + i);
+ printk(" %02x", dev->dev_addr[i]);
+ }
+
+ if ((dev->mem_start & 0xf) > 0)
+ net_debug = dev->mem_start & 7;
+
+#ifdef MEM_BASE
+ dev->mem_start = MEM_BASE;
+ dev->mem_end = dev->mem_start + 0x10000;
+#else
+ {
+ int base;
+ int size;
+ char mem_config = inb(ioaddr + MEM_CONFIG);
+ if (mem_config & 0x20) {
+ size = 64*1024;
+ base = 0xf00000 + (mem_config & 0x08 ? 0x080000
+ : ((mem_config & 3) << 17));
+ } else {
+ size = ((mem_config & 3) + 1) << 14;
+ base = 0x0c0000 + ( (mem_config & 0x18) << 12);
+ }
+ dev->mem_start = base;
+ dev->mem_end = base + size;
+ }
+#endif
+
+ dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0;
+ dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+ printk(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq,
+ dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1);
+
+ if (net_debug)
+ printk("%s", version);
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = el16_open;
+ dev->stop = el16_close;
+ dev->hard_start_xmit = el16_send_packet;
+ dev->get_stats = el16_get_stats;
+
+ ether_setup(dev); /* Generic ethernet behaviour */
+
+ dev->flags&=~IFF_MULTICAST; /* Multicast doesn't work */
+
+ return 0;
+}
+
+
+
+static int
+el16_open(struct device *dev)
+{
+ irq2dev_map[dev->irq] = dev;
+
+ /* Initialize the 82586 memory and start it. */
+ init_82586_mem(dev);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int
+el16_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ short *shmem = (short*)dev->mem_start;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ if (net_debug > 1)
+ printk("%s: transmit timed out, %s? ", dev->name,
+ shmem[iSCB_STATUS>>1] & 0x8000 ? "IRQ conflict" :
+ "network cable problem");
+ /* Try to restart the adaptor. */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ if (net_debug > 1) printk("Resetting board.\n");
+ /* Completely reset the adaptor. */
+ init_82586_mem(dev);
+ } else {
+ /* Issue the channel attention signal and hope it "gets better". */
+ if (net_debug > 1) printk("Kicking board.\n");
+ shmem[iSCB_CMD>>1] = 0xf000|CUC_START|RX_START;
+ outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
+ lp->last_restart = lp->stats.tx_packets;
+ }
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ /* Enable the 82586 interrupt input. */
+ outb(0x84, ioaddr + MISC_CTRL);
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* You might need to clean up and record Tx statistics here. */
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+el16_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+ ushort ack_cmd = 0;
+ ushort *shmem;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+ shmem = ((ushort*)dev->mem_start);
+
+ status = shmem[iSCB_STATUS>>1];
+
+ if (net_debug > 4) {
+ printk("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status);
+ }
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+
+ /* Reap the Tx packet buffers. */
+ while (lp->tx_reap != lp->tx_head) {
+ unsigned short tx_status = shmem[lp->tx_reap>>1];
+
+ if (tx_status == 0) {
+ if (net_debug > 5) printk("Couldn't reap %#x.\n", lp->tx_reap);
+ break;
+ }
+ if (tx_status & 0x2000) {
+ lp->stats.tx_packets++;
+ lp->stats.collisions += tx_status & 0xf;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ } else {
+ lp->stats.tx_errors++;
+ if (tx_status & 0x0600) lp->stats.tx_carrier_errors++;
+ if (tx_status & 0x0100) lp->stats.tx_fifo_errors++;
+ if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if (tx_status & 0x0020) lp->stats.tx_aborted_errors++;
+ }
+ if (net_debug > 5)
+ printk("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
+ lp->tx_reap += TX_BUF_SIZE;
+ if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE)
+ lp->tx_reap = TX_BUF_START;
+ if (++boguscount > 4)
+ break;
+ }
+
+ if (status & 0x4000) { /* Packet received. */
+ if (net_debug > 5)
+ printk("Received packet, rx_head %04x.\n", lp->rx_head);
+ el16_rx(dev);
+ }
+
+ /* Acknowledge the interrupt sources. */
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x0700) != 0x0200 && dev->start) {
+ if (net_debug)
+ printk("%s: Command unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ /* If this ever occurs we should really re-write the idle loop, reset
+ the Tx list, and do a complete restart of the command unit.
+ For now we rely on the Tx timeout if the resume doesn't work. */
+ ack_cmd |= CUC_RESUME;
+ }
+
+ if ((status & 0x0070) != 0x0040 && dev->start) {
+ /* The Rx unit is not ready, it must be hung. Restart the receiver by
+ initializing the rx buffers, and issuing an Rx start command. */
+ if (net_debug)
+ printk("%s: Rx unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ init_rx_bufs(dev);
+ shmem[iSCB_RFA >> 1] = RX_BUF_START;
+ ack_cmd |= RX_START;
+ }
+
+ shmem[iSCB_CMD>>1] = ack_cmd;
+ outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
+
+ /* Clear the latched interrupt. */
+ outb(0, ioaddr + RESET_IRQ);
+
+ /* Enable the 82586's interrupt input. */
+ outb(0x84, ioaddr + MISC_CTRL);
+
+ return;
+}
+
+static int
+el16_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ ushort *shmem = (short*)dev->mem_start;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Flush the Tx and disable Rx. */
+ shmem[iSCB_CMD >> 1] = RX_SUSPEND | CUC_SUSPEND;
+ outb(0, ioaddr + SIGNAL_CA);
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+
+ /* We always physically use the IRQ line, so we don't do free_irq().
+ We do remove ourselves from the map. */
+
+ irq2dev_map[dev->irq] = 0;
+
+ /* Update the statistics here. */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+el16_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ /* ToDo: decide if there are any useful statistics from the SCB. */
+
+ return &lp->stats;
+}
+
+/* Initialize the Rx-block list. */
+static void
+init_rx_bufs(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short *write_ptr;
+ unsigned short SCB_base = SCB_BASE;
+
+ int cur_rxbuf = lp->rx_head = RX_BUF_START;
+
+ /* Initialize each Rx frame + data buffer. */
+ do { /* While there is room for one more. */
+
+ write_ptr = (unsigned short *)(dev->mem_start + cur_rxbuf);
+
+ *write_ptr++ = 0x0000; /* Status */
+ *write_ptr++ = 0x0000; /* Command */
+ *write_ptr++ = cur_rxbuf + RX_BUF_SIZE; /* Link */
+ *write_ptr++ = cur_rxbuf + 22; /* Buffer offset */
+ *write_ptr++ = 0x0000; /* Pad for dest addr. */
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000; /* Pad for source addr. */
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000; /* Pad for protocol. */
+
+ *write_ptr++ = 0x0000; /* Buffer: Actual count */
+ *write_ptr++ = -1; /* Buffer: Next (none). */
+ *write_ptr++ = cur_rxbuf + 0x20 + SCB_base; /* Buffer: Address low */
+ *write_ptr++ = 0x0000;
+ /* Finally, the number of bytes in the buffer. */
+ *write_ptr++ = 0x8000 + RX_BUF_SIZE-0x20;
+
+ lp->rx_tail = cur_rxbuf;
+ cur_rxbuf += RX_BUF_SIZE;
+ } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE);
+
+ /* Terminate the list by setting the EOL bit, and wrap the pointer to make
+ the list a ring. */
+ write_ptr = (unsigned short *)
+ (dev->mem_start + lp->rx_tail + 2);
+ *write_ptr++ = 0xC000; /* Command, mark as last. */
+ *write_ptr++ = lp->rx_head; /* Link */
+
+}
+
+void
+init_82586_mem(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ ushort *shmem = (short*)dev->mem_start;
+
+ /* Enable loopback to protect the wire while starting up,
+ and hold the 586 in reset during the memory initialization. */
+ outb(0x20, ioaddr + MISC_CTRL);
+
+ /* Fix the ISCP address and base. */
+ init_words[3] = SCB_BASE;
+ init_words[7] = SCB_BASE;
+
+ /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */
+ memcpy((void*)dev->mem_end-10, init_words, 10);
+
+ /* Write the words at 0x0000. */
+ memcpy((char*)dev->mem_start, init_words + 5, sizeof(init_words) - 10);
+
+ /* Fill in the station address. */
+ memcpy((char*)dev->mem_start+SA_OFFSET, dev->dev_addr,
+ sizeof(dev->dev_addr));
+
+ /* The Tx-block list is written as needed. We just set up the values. */
+ lp->tx_cmd_link = IDLELOOP + 4;
+ lp->tx_head = lp->tx_reap = TX_BUF_START;
+
+ init_rx_bufs(dev);
+
+ /* Start the 586 by releasing the reset line, but leave loopback. */
+ outb(0xA0, ioaddr + MISC_CTRL);
+
+ /* This was time consuming to track down: you need to give two channel
+ attention signals to reliably start up the i82586. */
+ outb(0, ioaddr + SIGNAL_CA);
+
+ {
+ int boguscnt = 50;
+ while (shmem[iSCB_STATUS>>1] == 0)
+ if (--boguscnt == 0) {
+ printk("%s: i82586 initialization timed out with status %04x,"
+ "cmd %04x.\n", dev->name,
+ shmem[iSCB_STATUS>>1], shmem[iSCB_CMD>>1]);
+ break;
+ }
+ /* Issue channel-attn -- the 82586 won't start. */
+ outb(0, ioaddr + SIGNAL_CA);
+ }
+
+ /* Disable loopback and enable interrupts. */
+ outb(0x84, ioaddr + MISC_CTRL);
+ if (net_debug > 4)
+ printk("%s: Initialized 82586, status %04x.\n", dev->name,
+ shmem[iSCB_STATUS>>1]);
+ return;
+}
+
+static void
+hardware_send_packet(struct device *dev, void *buf, short length)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ ushort tx_block = lp->tx_head;
+ ushort *write_ptr = (ushort *)(dev->mem_start + tx_block);
+
+ /* Set the write pointer to the Tx block, and put out the header. */
+ *write_ptr++ = 0x0000; /* Tx status */
+ *write_ptr++ = CMD_INTR|CmdTx; /* Tx command */
+ *write_ptr++ = tx_block+16; /* Next command is a NoOp. */
+ *write_ptr++ = tx_block+8; /* Data Buffer offset. */
+
+ /* Output the data buffer descriptor. */
+ *write_ptr++ = length | 0x8000; /* Byte count parameter. */
+ *write_ptr++ = -1; /* No next data buffer. */
+ *write_ptr++ = tx_block+22+SCB_BASE;/* Buffer follows the NoOp command. */
+ *write_ptr++ = 0x0000; /* Buffer address high bits (always zero). */
+
+ /* Output the Loop-back NoOp command. */
+ *write_ptr++ = 0x0000; /* Tx status */
+ *write_ptr++ = CmdNOp; /* Tx command */
+ *write_ptr++ = tx_block+16; /* Next is myself. */
+
+ /* Output the packet at the write pointer. */
+ memcpy(write_ptr, buf, length);
+
+ /* Set the old command link pointing to this send packet. */
+ *(ushort*)(dev->mem_start + lp->tx_cmd_link) = tx_block;
+ lp->tx_cmd_link = tx_block + 20;
+
+ /* Set the next free tx region. */
+ lp->tx_head = tx_block + TX_BUF_SIZE;
+ if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE)
+ lp->tx_head = TX_BUF_START;
+
+ if (net_debug > 4) {
+ printk("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n",
+ dev->name, ioaddr, length, tx_block, lp->tx_head);
+ }
+
+ if (lp->tx_head != lp->tx_reap)
+ dev->tbusy = 0;
+}
+
+static void
+el16_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short *shmem = (short*)dev->mem_start;
+ ushort rx_head = lp->rx_head;
+ ushort rx_tail = lp->rx_tail;
+ ushort boguscount = 10;
+ short frame_status;
+
+ while ((frame_status = shmem[rx_head>>1]) < 0) { /* Command complete */
+ ushort *read_frame = (short *)(dev->mem_start + rx_head);
+ ushort rfd_cmd = read_frame[1];
+ ushort next_rx_frame = read_frame[2];
+ ushort data_buffer_addr = read_frame[3];
+ ushort *data_frame = (short *)(dev->mem_start + data_buffer_addr);
+ ushort pkt_len = data_frame[0];
+
+ if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22
+ || (pkt_len & 0xC000) != 0xC000) {
+ printk("%s: Rx frame at %#x corrupted, status %04x cmd %04x"
+ "next %04x data-buf @%04x %04x.\n", dev->name, rx_head,
+ frame_status, rfd_cmd, next_rx_frame, data_buffer_addr,
+ pkt_len);
+ } else if ((frame_status & 0x2000) == 0) {
+ /* Frame Rxed, but with error. */
+ lp->stats.rx_errors++;
+ if (frame_status & 0x0800) lp->stats.rx_crc_errors++;
+ if (frame_status & 0x0400) lp->stats.rx_frame_errors++;
+ if (frame_status & 0x0200) lp->stats.rx_fifo_errors++;
+ if (frame_status & 0x0100) lp->stats.rx_over_errors++;
+ if (frame_status & 0x0080) lp->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ pkt_len &= 0x3fff;
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb_reserve(skb,2);
+ skb->dev = dev;
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb,pkt_len), data_frame + 5, pkt_len);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+
+ /* Clear the status word and set End-of-List on the rx frame. */
+ read_frame[0] = 0;
+ read_frame[1] = 0xC000;
+ /* Clear the end-of-list on the prev. RFD. */
+ *(short*)(dev->mem_start + rx_tail + 2) = 0x0000;
+
+ rx_tail = rx_head;
+ rx_head = next_rx_frame;
+ if (--boguscount == 0)
+ break;
+ }
+
+ lp->rx_head = rx_head;
+ lp->rx_tail = rx_tail;
+}
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_3c507 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, el16_probe
+};
+
+static int io = 0x300;
+static int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("3c507: You should not use auto-probing with insmod!\n");
+ dev_3c507.base_addr = io;
+ dev_3c507.irq = irq;
+ if (register_netdev(&dev_3c507) != 0) {
+ printk("3c507: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_3c507);
+ kfree(dev_3c507.priv);
+ dev_3c507.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ free_irq(dev_3c507.irq, NULL);
+ release_region(dev_3c507.base_addr, EL16_IO_EXTENT);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -I/usr/src/linux/drivers/net -Wall -Wstrict-prototypes -O6 -m486 -c 3c507.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c509.c b/linux/src/drivers/net/3c509.c
new file mode 100644
index 0000000..727595c
--- /dev/null
+++ b/linux/src/drivers/net/3c509.c
@@ -0,0 +1,842 @@
+/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */
+/*
+ Written 1993-1998 by Donald Becker.
+
+ Copyright 1994-1998 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ This driver is for the 3Com EtherLinkIII series.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov or
+ C/O Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Known limitations:
+ Because of the way 3c509 ISA detection works it's difficult to predict
+ a priori which of several ISA-mode cards will be detected first.
+
+ This driver does not use predictive interrupt mode, resulting in higher
+ packet latency but lower overhead. If interrupts are disabled for an
+ unusually long time it could also result in missed packets, but in
+ practice this rarely happens.
+
+
+ FIXES:
+ Alan Cox: Removed the 'Unexpected interrupt' bug.
+ Michael Meskes: Upgraded to Donald Becker's version 1.07.
+ Alan Cox: Increased the eeprom delay. Regardless of
+ what the docs say some people definitely
+ get problems with lower (but in card spec)
+ delays
+ v1.10 4/21/97 Fixed module code so that multiple cards may be detected,
+ other cleanups. -djb
+ v1.13 9/8/97 Made 'max_interrupt_work' an insmod-settable variable -djb
+ v1.14 10/15/97 Avoided waiting..discard message for fast machines -djb
+ v1.15 1/31/98 Faster recovery for Tx errors. -djb
+ v1.16 2/3/98 Different ID port handling to avoid sound cards. -djb
+*/
+
+static char *version = "3c509.c:1.16 2/3/98 becker@cesdis.gsfc.nasa.gov\n";
+/* A few values that may be tweaked. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (400*HZ/1000)
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 10;
+
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/config.h> /* for CONFIG_MCA */
+#include <linux/delay.h> /* for udelay() */
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef EL3_DEBUG
+int el3_debug = EL3_DEBUG;
+#else
+int el3_debug = 2;
+#endif
+
+/* To minimize the size of the driver source I only define operating
+ constants if they are used several times. You'll need the manual
+ anyway if you want to understand driver details. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+#define EEPROM_READ 0x80
+
+#define EL3_IO_EXTENT 16
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum c509cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,};
+
+enum c509status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000, };
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Register window 1 offsets, the window used in normal operation. */
+#define TX_FIFO 0x00
+#define RX_FIFO 0x00
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
+#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
+
+#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
+#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+
+/*
+ * Must be a power of two (we use a binary and in the
+ * circular queue)
+ */
+#define SKB_QUEUE_SIZE 64
+
+struct el3_private {
+ struct enet_statistics stats;
+ struct device *next_dev;
+ /* skb send-queue */
+ int head, size;
+ struct sk_buff *queue[SKB_QUEUE_SIZE];
+};
+static int id_port = 0x110; /* Start with 0x110 to avoid new sound cards.*/
+static struct device *el3_root_dev = NULL;
+
+static ushort id_read_eeprom(int index);
+static ushort read_eeprom(int ioaddr, int index);
+static int el3_open(struct device *dev);
+static int el3_start_xmit(struct sk_buff *skb, struct device *dev);
+static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void update_stats(int addr, struct device *dev);
+static struct enet_statistics *el3_get_stats(struct device *dev);
+static int el3_rx(struct device *dev);
+static int el3_close(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+
+int el3_probe(struct device *dev)
+{
+ short lrs_state = 0xff, i;
+ int ioaddr, irq, if_port;
+ u16 phys_addr[3];
+ static int current_tag = 0;
+
+ /* First check all slots of the EISA bus. The next slot address to
+ probe is kept in 'eisa_addr' to support multiple probe() calls. */
+ if (EISA_bus) {
+ static int eisa_addr = 0x1000;
+ while (eisa_addr < 0x9000) {
+ ioaddr = eisa_addr;
+ eisa_addr += 0x1000;
+
+ /* Check the standard EISA ID register for an encoded '3Com'. */
+ if (inw(ioaddr + 0xC80) != 0x6d50)
+ continue;
+
+ /* Change the register set to the configuration window 0. */
+ outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD);
+
+ irq = inw(ioaddr + WN0_IRQ) >> 12;
+ if_port = inw(ioaddr + 6)>>14;
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+
+ /* Restore the "Product ID" to the EEPROM read register. */
+ read_eeprom(ioaddr, 3);
+
+ /* Was the EISA code an add-on hack? Nahhhhh... */
+ goto found;
+ }
+ }
+
+#ifdef CONFIG_MCA
+ if (MCA_bus) {
+ mca_adaptor_select_mode(1);
+ for (i = 0; i < 8; i++)
+ if ((mca_adaptor_id(i) | 1) == 0x627c) {
+ ioaddr = mca_pos_base_addr(i);
+ irq = inw(ioaddr + WN0_IRQ) >> 12;
+ if_port = inw(ioaddr + 6)>>14;
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+
+ mca_adaptor_select_mode(0);
+ goto found;
+ }
+ mca_adaptor_select_mode(0);
+
+ }
+#endif
+
+ /* Reset the ISA PnP mechanism on 3c509b. */
+ outb(0x02, 0x279); /* Select PnP config control register. */
+ outb(0x02, 0xA79); /* Return to WaitForKey state. */
+ /* Select an open I/O location at 0x1*0 to do contention select. */
+ for ( ; id_port < 0x200; id_port += 0x10) {
+ if (check_region(id_port, 1))
+ continue;
+ outb(0x00, id_port);
+ outb(0xff, id_port);
+ if (inb(id_port) & 0x01)
+ break;
+ }
+ if (id_port >= 0x200) { /* GCC optimizes this test out. */
+ /* Rare -- do we really need a warning? */
+ printk(" WARNING: No I/O port available for 3c509 activation.\n");
+ return -ENODEV;
+ }
+ /* Next check for all ISA bus boards by sending the ID sequence to the
+ ID_PORT. We find cards past the first by setting the 'current_tag'
+ on cards as they are found. Cards with their tag set will not
+ respond to subsequent ID sequences. */
+
+ outb(0x00, id_port);
+ outb(0x00, id_port);
+ for(i = 0; i < 255; i++) {
+ outb(lrs_state, id_port);
+ lrs_state <<= 1;
+ lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
+ }
+
+ /* For the first probe, clear all board's tag registers. */
+ if (current_tag == 0)
+ outb(0xd0, id_port);
+ else /* Otherwise kill off already-found boards. */
+ outb(0xd8, id_port);
+
+ if (id_read_eeprom(7) != 0x6d50) {
+ return -ENODEV;
+ }
+
+ /* Read in EEPROM data, which does contention-select.
+ Only the lowest address board will stay "on-line".
+ 3Com got the byte order backwards. */
+ for (i = 0; i < 3; i++) {
+ phys_addr[i] = htons(id_read_eeprom(i));
+ }
+
+ {
+ unsigned int iobase = id_read_eeprom(8);
+ if_port = iobase >> 14;
+ ioaddr = 0x200 + ((iobase & 0x1f) << 4);
+ }
+ irq = id_read_eeprom(9) >> 12;
+
+ if (dev) { /* Set passed-in IRQ or I/O Addr. */
+ if (dev->irq > 1 && dev->irq < 16)
+ irq = dev->irq;
+
+ if (dev->base_addr) {
+ if (dev->mem_end == 0x3c509 /* Magic key */
+ && dev->base_addr >= 0x200 && dev->base_addr <= 0x3e0)
+ ioaddr = dev->base_addr & 0x3f0;
+ else if (dev->base_addr != ioaddr)
+ return -ENODEV;
+ }
+ }
+
+ /* Set the adaptor tag so that the next card can be found. */
+ outb(0xd0 + ++current_tag, id_port);
+
+ /* Activate the adaptor at the EEPROM location. */
+ outb((ioaddr >> 4) | 0xe0, id_port);
+
+ EL3WINDOW(0);
+ if (inw(ioaddr) != 0x6d50)
+ return -ENODEV;
+
+ /* Free the interrupt so that some other card can use it. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+ found:
+ if (dev == NULL) {
+ dev = init_etherdev(dev, sizeof(struct el3_private));
+ }
+ memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->if_port = (dev->mem_start & 0x1f) ? dev->mem_start & 3 : if_port;
+
+ request_region(dev->base_addr, EL3_IO_EXTENT, "3c509");
+
+ {
+ const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"};
+ printk("%s: 3c509 at %#3.3lx tag %d, %s port, address ",
+ dev->name, dev->base_addr, current_tag, if_names[dev->if_port]);
+ }
+
+ /* Read in the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i]);
+ printk(", IRQ %d.\n", dev->irq);
+
+ /* Make up a EL3-specific-data structure. */
+ if (dev->priv == NULL)
+ dev->priv = kmalloc(sizeof(struct el3_private), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct el3_private));
+
+ ((struct el3_private *)dev->priv)->next_dev = el3_root_dev;
+ el3_root_dev = dev;
+
+ if (el3_debug > 0)
+ printk("%s", version);
+
+ /* The EL3-specific entries in the device structure. */
+ dev->open = &el3_open;
+ dev->hard_start_xmit = &el3_start_xmit;
+ dev->stop = &el3_close;
+ dev->get_stats = &el3_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the generic fields of the device structure. */
+ ether_setup(dev);
+ return 0;
+}
+
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+ */
+static ushort read_eeprom(int ioaddr, int index)
+{
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Pause for at least 162 us. for the read to take place. */
+ udelay (500);
+ return inw(ioaddr + 12);
+}
+
+/* Read a word from the EEPROM when in the ISA ID probe state. */
+static ushort id_read_eeprom(int index)
+{
+ int bit, word = 0;
+
+ /* Issue read command, and pause for at least 162 us. for it to complete.
+ Assume extra-fast 16Mhz bus. */
+ outb(EEPROM_READ + index, id_port);
+
+ /* Pause for at least 162 us. for the read to take place. */
+ udelay (500);
+
+ for (bit = 15; bit >= 0; bit--)
+ word = (word << 1) + (inb(id_port) & 0x01);
+
+ if (el3_debug > 3)
+ printk(" 3c509 EEPROM word %d %#4.4x.\n", index, word);
+
+ return word;
+}
+
+
+
+static int
+el3_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int i;
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ outw(RxReset, ioaddr + EL3_CMD);
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+ if (request_irq(dev->irq, &el3_interrupt, 0, "3c509", dev)) {
+ return -EAGAIN;
+ }
+
+ EL3WINDOW(0);
+ if (el3_debug > 3)
+ printk("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name,
+ dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS));
+
+ /* Activate board: this is probably unnecessary. */
+ outw(0x0001, ioaddr + 4);
+
+ /* Set the IRQ line. */
+ outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ);
+
+ /* Set the station address in window 2 each time opened. */
+ EL3WINDOW(2);
+
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ if (dev->if_port == 3)
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 0) {
+ /* 10baseT interface, enabled link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA);
+ }
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ /* Accept b-case and phys addr only. */
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 1;
+
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch|TxAvailable|TxComplete|RxComplete|StatsFull,
+ ioaddr + EL3_CMD);
+
+ if (el3_debug > 3)
+ printk("%s: Opened 3c509 IRQ %d status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + EL3_STATUS));
+
+ MOD_INC_USE_COUNT;
+ return 0; /* Always succeed */
+}
+
+static int
+el3_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < TX_TIMEOUT)
+ return 1;
+ printk("%s: transmit timed out, Tx_status %2.2x status %4.4x "
+ "Tx FIFO room %d.\n",
+ dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
+ inw(ioaddr + TX_FREE));
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ /* Issue TX_RESET and TX_START commands. */
+ outw(TxReset, ioaddr + EL3_CMD);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->tbusy = 0;
+ }
+
+ if (el3_debug > 4) {
+ printk("%s: el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ dev->name, skb->len, inw(ioaddr + EL3_STATUS));
+ }
+#if 0
+#ifndef final_version
+ { /* Error-checking code, delete someday. */
+ ushort status = inw(ioaddr + EL3_STATUS);
+ if (status & 0x0001 /* IRQ line active, missed one. */
+ && inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */
+ printk("%s: Missed interrupt, status then %04x now %04x"
+ " Tx %2.2x Rx %4.4x.\n", dev->name, status,
+ inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
+ inw(ioaddr + RX_STATUS));
+ /* Fake interrupt trigger by masking, acknowledge interrupts. */
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ }
+ }
+#endif
+#endif
+ /* Avoid timer-based retransmission conflicts. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+#ifdef __powerpc__
+ outsl_unswapped(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+#else
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+#endif
+
+ dev->trans_start = jiffies;
+ if (inw(ioaddr + TX_FREE) > 1536) {
+ dev->tbusy = 0;
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* Clear the Tx status stack. */
+ {
+ short tx_status;
+ int i = 4;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
+ if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
+ if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+/* The EL3 interrupt handler. */
+static void
+el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)dev_id;
+ int ioaddr, status;
+ int i = max_interrupt_work;
+
+ if (dev == NULL) {
+ printk ("el3_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (el3_debug > 4)
+ printk("%s: interrupt, status %4.4x.\n", dev->name, status);
+
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | StatsFull)) {
+
+ if (status & RxComplete)
+ el3_rx(dev);
+
+ if (status & TxAvailable) {
+ if (el3_debug > 5)
+ printk(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ if (status & (AdapterFailure | RxEarly | StatsFull | TxComplete)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(ioaddr, dev);
+ if (status & RxEarly) { /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & TxComplete) { /* Really Tx error. */
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ short tx_status;
+ int i = 4;
+
+ while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
+ if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
+ if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+ }
+ if (status & AdapterFailure) {
+ /* Adapter failure requires Rx reset and reinit. */
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Set the Rx filter to the current state. */
+ outw(SetRxFilter | RxStation | RxBroadcast
+ | (dev->flags & IFF_ALLMULTI ? RxMulticast : 0)
+ | (dev->flags & IFF_PROMISC ? RxProm : 0),
+ ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--i < 0) {
+ printk("%s: Infinite loop in interrupt, status %4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupts. */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); /* Ack IRQ */
+ }
+
+ if (el3_debug > 4) {
+ printk("%s: exiting interrupt, status %4.4x.\n", dev->name,
+ inw(ioaddr + EL3_STATUS));
+ }
+
+ dev->interrupt = 0;
+ return;
+}
+
+
+static struct enet_statistics *
+el3_get_stats(struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ update_stats(dev->base_addr, dev);
+ restore_flags(flags);
+ return &lp->stats;
+}
+
+/* Update statistics. We change to register window 6, so this should be run
+ single-threaded if the device is active. This is expected to be a rare
+ operation, and it's simpler for the rest of the driver to assume that
+ window 1 is always valid rather than use a special window-state variable.
+ */
+static void update_stats(int ioaddr, struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+
+ if (el3_debug > 5)
+ printk(" Updating the statistics.\n");
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ lp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ lp->stats.collisions += inb(ioaddr + 3);
+ lp->stats.tx_window_errors += inb(ioaddr + 4);
+ lp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ lp->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ inw(ioaddr + 10); /* Total Rx and Tx octets. */
+ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ return;
+}
+
+static int
+el3_rx(struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ short rx_status;
+
+ if (el3_debug > 5)
+ printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+ while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ lp->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: lp->stats.rx_over_errors++; break;
+ case 0x0800: lp->stats.rx_length_errors++; break;
+ case 0x1000: lp->stats.rx_frame_errors++; break;
+ case 0x1800: lp->stats.rx_length_errors++; break;
+ case 0x2000: lp->stats.rx_frame_errors++; break;
+ case 0x2800: lp->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+ if (el3_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte */
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+#ifdef __powerpc__
+ insl_unswapped(ioaddr+RX_FIFO, skb_put(skb,pkt_len),
+ (pkt_len + 3) >> 2);
+#else
+ insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len),
+ (pkt_len + 3) >> 2);
+#endif
+
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ continue;
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ lp->stats.rx_dropped++;
+ if (el3_debug)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ }
+ inw(ioaddr + EL3_STATUS); /* Delay. */
+ while (inw(ioaddr + EL3_STATUS) & 0x1000)
+ printk(" Waiting for 3c509 to discard packet, status %x.\n",
+ inw(ioaddr + EL3_STATUS) );
+ }
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void
+set_multicast_list(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ if (el3_debug > 1) {
+ static int old = 0;
+ if (old != dev->mc_count) {
+ old = dev->mc_count;
+ printk("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
+ }
+ }
+ if (dev->flags&IFF_PROMISC) {
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
+ ioaddr + EL3_CMD);
+ }
+ else if (dev->mc_count || (dev->flags&IFF_ALLMULTI)) {
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
+ }
+ else
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+}
+
+static int
+el3_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el3_debug > 2)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 3)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 0) {
+ /* Disable link beat and jabber, if_port may change ere next open(). */
+ EL3WINDOW(4);
+ outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA);
+ }
+
+ free_irq(dev->irq, dev);
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+
+ update_stats(ioaddr, dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+#ifdef MODULE
+/* Parameters that may be passed into the module. */
+static int debug = -1;
+static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int xcvr[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+int
+init_module(void)
+{
+ int el3_cards = 0;
+
+ if (debug >= 0)
+ el3_debug = debug;
+
+ el3_root_dev = NULL;
+ while (el3_probe(0) == 0) {
+ if (irq[el3_cards] > 1)
+ el3_root_dev->irq = irq[el3_cards];
+ if (xcvr[el3_cards] >= 0)
+ el3_root_dev->if_port = xcvr[el3_cards];
+ el3_cards++;
+ }
+
+ return el3_cards ? 0 : -ENODEV;
+}
+
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (el3_root_dev) {
+ next_dev = ((struct el3_private *)el3_root_dev->priv)->next_dev;
+ unregister_netdev(el3_root_dev);
+ release_region(el3_root_dev->base_addr, EL3_IO_EXTENT);
+ kfree(el3_root_dev);
+ el3_root_dev = next_dev;
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c509.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c515.c b/linux/src/drivers/net/3c515.c
new file mode 100644
index 0000000..52f4703
--- /dev/null
+++ b/linux/src/drivers/net/3c515.c
@@ -0,0 +1,1501 @@
+/* 3c515.c: A 3Com ISA EtherLink XL "Corkscrew" ethernet driver for linux. */
+/*
+ Written 1997-1998 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ This driver is for the 3Com ISA EtherLink XL "Corkscrew" 3c515 ethercard.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+*/
+
+static char *version = "3c515.c:v0.99 4/7/98 becker@cesdis.gsfc.nasa.gov\n";
+#define CORKSCREW 1
+
+/* "Knobs" that adjust features and parameters. */
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1512 effectively disables this feature. */
+static const int rx_copybreak = 200;
+/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
+static const int mtu = 1500;
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Enable the automatic media selection code -- usually set. */
+#define AUTOMEDIA 1
+
+/* Allow the use of fragment bus master transfers instead of only
+ programmed-I/O for Vortex cards. Full-bus-master transfers are always
+ enabled by default on Boomerang cards. If VORTEX_BUS_MASTER is defined,
+ the feature may be turned on using 'options'. */
+#define VORTEX_BUS_MASTER
+
+/* A few values that may be tweaked. */
+/* Keep the ring sizes a power of two for efficiency. */
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 16
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+#ifdef MODULE
+#ifdef MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <linux/timer.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#if (LINUX_VERSION_CODE >= 0x10344)
+#define NEW_MULTICAST
+#include <linux/delay.h>
+#else
+#define udelay(microsec) do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+#endif
+
+/* Kernel version compatibility functions. */
+#define RUN_AT(x) (jiffies + (x))
+#define DEV_ALLOC_SKB(len) dev_alloc_skb(len + 2)
+
+#define FREE_IRQ(irqnum, dev) free_irq(irqnum, dev)
+#define REQUEST_IRQ(i,h,f,n, instance) request_irq(i,h,f,n, instance)
+#define IRQ(irq, dev_id, pt_regs) (irq, dev_id, pt_regs)
+
+#if (LINUX_VERSION_CODE < 0x20123)
+//#define test_and_set_bit(val, addr) set_bit(val, addr)
+#elif defined(MODULE)
+MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+MODULE_DESCRIPTION("3Com 3c515 Corkscrew driver");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(max_interrupt_work, "i");
+#endif
+
+/* "Knobs" for adjusting internal parameters. */
+/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */
+#define DRIVER_DEBUG 1
+/* Some values here only for performance evaluation and path-coverage
+ debugging. */
+static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0;
+
+/* Number of times to check to see if the Tx FIFO has space, used in some
+ limited cases. */
+#define WAIT_TX_AVAIL 200
+
+/* Operational parameter that usually are not changed. */
+#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */
+
+/* The size here is somewhat misleading: the Corkscrew also uses the ISA
+ aliased registers at <base>+0x400.
+ */
+#define CORKSCREW_TOTAL_SIZE 0x20
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry tc515_drv =
+{"3c515", tc515_probe, CORKSCREW_TOTAL_SIZE, NULL};
+#endif
+
+#ifdef DRIVER_DEBUG
+int vortex_debug = DRIVER_DEBUG;
+#else
+int vortex_debug = 1;
+#endif
+
+#define CORKSCREW_ID 10
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com 3c515 ISA Fast EtherLink XL,
+3Com's ISA bus adapter for Fast Ethernet. Due to the unique I/O port layout,
+it's not practical to integrate this driver with the other EtherLink drivers.
+
+II. Board-specific settings
+
+The Corkscrew has an EEPROM for configuration, but no special settings are
+needed for Linux.
+
+III. Driver operation
+
+The 3c515 series use an interface that's very similar to the 3c900 "Boomerang"
+PCI cards, with the bus master interface extensively modified to work with
+the ISA bus.
+
+The card is capable of full-bus-master transfers with separate
+lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
+DEC Tulip and Intel Speedo3.
+
+This driver uses a "RX_COPYBREAK" scheme rather than a fixed intermediate
+receive buffer. This scheme allocates full-sized skbuffs as receive
+buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is
+chosen to trade-off the memory wasted by passing the full-sized skbuff to
+the queue layer for all frames vs. the copying cost of copying a frame to a
+correctly-sized skbuff.
+
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+IV. Notes
+
+Thanks to Terry Murphy of 3Com for providing documentation and a development
+board.
+
+The names "Vortex", "Boomerang" and "Corkscrew" are the internal 3Com
+project names. I use these names to eliminate confusion -- 3Com product
+numbers and names are very similar and often confused.
+
+The new chips support both ethernet (1.5K) and FDDI (4.5K) frame sizes!
+This driver only supports ethernet frames because of the recent MTU limit
+of 1.5K, but the changes to support 4.5K are minimal.
+*/
+
+/* Operational definitions.
+ These are not used by other compilation units and thus are not
+ exported in a ".h" file.
+
+ First the windows. There are eight register windows, with the command
+ and status registers available in each.
+ */
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable.
+ Note that 11 parameters bits was fine for ethernet, but the new chips
+ can handle FDDI length frames (~4500 octets) and now parameters count
+ 32-bit 'Dwords' rather than octets. */
+
+enum vortex_cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
+ UpStall = 6<<11, UpUnstall = (6<<11)+1,
+ DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
+ RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11,
+ StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Bits in the general status register. */
+enum vortex_status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080,
+ DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
+ DMAInProgress = 1<<11, /* DMA controller is still busy.*/
+ CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the Corkscrew this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+ Wn0IRQ = 0x08,
+#if defined(CORKSCREW)
+ Wn0EepromCmd = 0x200A, /* Corkscrew EEPROM command register. */
+ Wn0EepromData = 0x200C, /* Corkscrew EEPROM results register. */
+#else
+ Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
+ Wn0EepromData = 12, /* Window 0: EEPROM results register. */
+#endif
+};
+enum Win0_EEPROM_bits {
+ EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+/* EEPROM locations. */
+enum eeprom_offset {
+ PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
+ EtherLink3ID=7, };
+
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+union wn3_config {
+ int i;
+ struct w3_config_fields {
+ unsigned int ram_size:3, ram_width:1, ram_speed:2, rom_size:2;
+ int pad8:8;
+ unsigned int ram_split:2, pad18:2, xcvr:3, pad21:1, autoselect:1;
+ int pad24:7;
+ } u;
+};
+
+enum Window4 {
+ Wn4_NetDiag = 6, Wn4_Media = 10, /* Window 4: Xcvr/media bits. */
+};
+enum Win4_Media_bits {
+ Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
+ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
+ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
+ Media_LnkBeat = 0x0800,
+};
+enum Window7 { /* Window 7: Bus Master control. */
+ Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
+};
+/* Boomerang-style bus master control registers. Note ISA aliases! */
+enum MasterCtrl {
+ PktStatus = 0x400, DownListPtr = 0x404, FragAddr = 0x408, FragLen = 0x40c,
+ TxFreeThreshold = 0x40f, UpPktStatus = 0x410, UpListPtr = 0x418,
+};
+
+/* The Rx and Tx descriptor lists.
+ Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
+ alignment contraint on tx_ring[] and rx_ring[]. */
+struct boom_rx_desc {
+ u32 next;
+ s32 status;
+ u32 addr;
+ s32 length;
+};
+/* Values for the Rx status entry. */
+enum rx_desc_status {
+ RxDComplete=0x00008000, RxDError=0x4000,
+ /* See boomerang_rx() for actual error bits */
+};
+
+struct boom_tx_desc {
+ u32 next;
+ s32 status;
+ u32 addr;
+ s32 length;
+};
+
+struct vortex_private {
+ char devname[8]; /* "ethN" string, also for kernel debug. */
+ const char *product_name;
+ struct device *next_module;
+ /* The Rx and Tx rings are here to keep them quad-word-aligned. */
+ struct boom_rx_desc rx_ring[RX_RING_SIZE];
+ struct boom_tx_desc tx_ring[TX_RING_SIZE];
+ /* The addresses of transmit- and receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ struct enet_statistics stats;
+ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
+ struct timer_list timer; /* Media selection timer. */
+ int capabilities; /* Adapter capabilities word. */
+ int options; /* User-settable misc. driver options. */
+ int last_rx_packets; /* For media autoselection. */
+ unsigned int available_media:8, /* From Wn3_Options */
+ media_override:3, /* Passed-in media type. */
+ default_media:3, /* Read from the EEPROM. */
+ full_duplex:1, autoselect:1,
+ bus_master:1, /* Vortex can only do a fragment bus-m. */
+ full_bus_master_tx:1, full_bus_master_rx:1, /* Boomerang */
+ tx_full:1;
+};
+
+/* The action to take with a media selection timer tick.
+ Note that we deviate from the 3Com order by checking 10base2 before AUI.
+ */
+enum xcvr_types {
+ XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
+ XCVR_100baseFx, XCVR_MII=6, XCVR_Default=8,
+};
+
+static struct media_table {
+ char *name;
+ unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
+ mask:8, /* The transceiver-present bit in Wn3_Config.*/
+ next:8; /* The media type to try next. */
+ short wait; /* Time before we check media status. */
+} media_tbl[] = {
+ { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
+ { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
+ { "undefined", 0, 0x80, XCVR_10baseT, 10000},
+ { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10},
+ { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
+ { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10},
+ { "MII", 0, 0x40, XCVR_10baseT, 3*HZ },
+ { "undefined", 0, 0x01, XCVR_10baseT, 10000},
+ { "Default", 0, 0xFF, XCVR_10baseT, 10000},
+};
+
+static int vortex_scan(struct device *dev);
+static struct device *vortex_found_device(struct device *dev, int ioaddr,
+ int irq, int product_index,
+ int options);
+static int vortex_probe1(struct device *dev);
+static int vortex_open(struct device *dev);
+static void vortex_timer(unsigned long arg);
+static int vortex_start_xmit(struct sk_buff *skb, struct device *dev);
+static int vortex_rx(struct device *dev);
+static int boomerang_rx(struct device *dev);
+static void vortex_interrupt IRQ(int irq, void *dev_id, struct pt_regs *regs);
+static int vortex_close(struct device *dev);
+static void update_stats(int addr, struct device *dev);
+static struct enet_statistics *vortex_get_stats(struct device *dev);
+static void set_rx_mode(struct device *dev);
+
+
+/* Unlike the other PCI cards the 59x cards don't need a large contiguous
+ memory region, so making the driver a loadable module is feasible.
+
+ Unfortunately maximizing the shared code between the integrated and
+ module version of the driver results in a complicated set of initialization
+ procedures.
+ init_module() -- modules / tc59x_init() -- built-in
+ The wrappers for vortex_scan()
+ vortex_scan() The common routine that scans for PCI and EISA cards
+ vortex_found_device() Allocate a device structure when we find a card.
+ Different versions exist for modules and built-in.
+ vortex_probe1() Fill in the device structure -- this is separated
+ so that the modules code can put it in dev->init.
+*/
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
+/* Note: this is the only limit on the number of cards supported!! */
+static int options[8] = { -1, -1, -1, -1, -1, -1, -1, -1,};
+
+#ifdef MODULE
+static int debug = -1;
+/* A list of all installed Vortex devices, for removing the driver module. */
+static struct device *root_vortex_dev = NULL;
+
+int
+init_module(void)
+{
+ int cards_found;
+
+ if (debug >= 0)
+ vortex_debug = debug;
+ if (vortex_debug)
+ printk("%s", version);
+
+ root_vortex_dev = NULL;
+ cards_found = vortex_scan(0);
+ return cards_found ? 0 : -ENODEV;
+}
+
+#else
+int tc515_probe(struct device *dev)
+{
+ int cards_found = 0;
+
+ cards_found = vortex_scan(dev);
+
+ if (vortex_debug > 0 && cards_found)
+ printk("%s", version);
+
+ return cards_found ? 0 : -ENODEV;
+}
+#endif /* not MODULE */
+
+static int vortex_scan(struct device *dev)
+{
+ int cards_found = 0;
+ static int ioaddr = 0x100;
+
+ /* Check all locations on the ISA bus -- evil! */
+ for (; ioaddr < 0x400; ioaddr += 0x20) {
+ int irq;
+ if (check_region(ioaddr, CORKSCREW_TOTAL_SIZE))
+ continue;
+ /* Check the resource configuration for a matching ioaddr. */
+ if ((inw(ioaddr + 0x2002) & 0x1f0) != (ioaddr & 0x1f0))
+ continue;
+ /* Verify by reading the device ID from the EEPROM. */
+ {
+ int timer;
+ outw(EEPROM_Read + 7, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 4; timer >= 0; timer--) {
+ udelay(162);
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0)
+ break;
+ }
+ if (inw(ioaddr + Wn0EepromData) != 0x6d50)
+ continue;
+ }
+ printk("3c515 Resource configuraiton register %#4.4x, DCR %4.4x.\n",
+ inl(ioaddr + 0x2002), inw(ioaddr + 0x2000));
+ irq = inw(ioaddr + 0x2002) & 15;
+ vortex_found_device(dev, ioaddr, irq, CORKSCREW_ID, dev && dev->mem_start
+ ? dev->mem_start : options[cards_found]);
+ dev = 0;
+ cards_found++;
+ }
+
+ if (vortex_debug)
+ printk("%d 3c515 cards found.\n", cards_found);
+ return cards_found;
+}
+
+static struct device *vortex_found_device(struct device *dev, int ioaddr,
+ int irq, int product_index,
+ int options)
+{
+ struct vortex_private *vp;
+
+#ifdef MODULE
+ /* Allocate and fill new device structure. */
+ int dev_size = sizeof(struct device) +
+ sizeof(struct vortex_private) + 15; /* Pad for alignment */
+
+ dev = (struct device *) kmalloc(dev_size, GFP_KERNEL);
+ memset(dev, 0, dev_size);
+ /* Align the Rx and Tx ring entries. */
+ dev->priv = (void *)(((long)dev + sizeof(struct device) + 15) & ~15);
+ vp = (struct vortex_private *)dev->priv;
+ dev->name = vp->devname; /* An empty string. */
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->dma = (product_index == CORKSCREW_ID ? inw(ioaddr + 0x2000) & 7 : 0);
+ dev->init = vortex_probe1;
+ vp->product_name = "3c515";
+ vp->options = options;
+ if (options >= 0) {
+ vp->media_override = ((options & 7) == 2) ? 0 : options & 7;
+ vp->full_duplex = (options & 8) ? 1 : 0;
+ vp->bus_master = (options & 16) ? 1 : 0;
+ } else {
+ vp->media_override = 7;
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+ ether_setup(dev);
+ vp->next_module = root_vortex_dev;
+ root_vortex_dev = dev;
+ if (register_netdev(dev) != 0)
+ return 0;
+#else /* not a MODULE */
+ if (dev) {
+ /* Caution: quad-word alignment required for rings! */
+ dev->priv = kmalloc(sizeof (struct vortex_private), GFP_KERNEL);
+ memset(dev->priv, 0, sizeof (struct vortex_private));
+ }
+ dev = init_etherdev(dev, sizeof(struct vortex_private));
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->dma = (product_index == CORKSCREW_ID ? inw(ioaddr + 0x2000) & 7 : 0);
+ vp = (struct vortex_private *)dev->priv;
+ vp->product_name = "3c515";
+ vp->options = options;
+ if (options >= 0) {
+ vp->media_override = ((options & 7) == 2) ? 0 : options & 7;
+ vp->full_duplex = (options & 8) ? 1 : 0;
+ vp->bus_master = (options & 16) ? 1 : 0;
+ } else {
+ vp->media_override = 7;
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+
+ vortex_probe1(dev);
+#endif /* MODULE */
+ return dev;
+}
+
+static int vortex_probe1(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ int i;
+
+ printk("%s: 3Com %s at %#3x,", dev->name,
+ vp->product_name, ioaddr);
+
+ /* Read the station address from the EEPROM. */
+ EL3WINDOW(0);
+ for (i = 0; i < 0x18; i++) {
+ short *phys_addr = (short *)dev->dev_addr;
+ int timer;
+ outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 4; timer >= 0; timer--) {
+ udelay(162);
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0)
+ break;
+ }
+ eeprom[i] = inw(ioaddr + Wn0EepromData);
+ checksum ^= eeprom[i];
+ if (i < 3)
+ phys_addr[i] = htons(eeprom[i]);
+ }
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ if (checksum != 0x00)
+ printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
+ if (eeprom[16] == 0x11c7) { /* Corkscrew */
+ if (request_dma(dev->dma, "3c515")) {
+ printk(", DMA %d allocation failed", dev->dma);
+ dev->dma = 0;
+ } else
+ printk(", DMA %d", dev->dma);
+ }
+ printk(", IRQ %d\n", dev->irq);
+ /* Tell them about an invalid IRQ. */
+ if (vortex_debug && (dev->irq <= 0 || dev->irq > 15))
+ printk(" *** Warning: this IRQ is unlikely to work! ***\n");
+
+ {
+ char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ union wn3_config config;
+ EL3WINDOW(3);
+ vp->available_media = inw(ioaddr + Wn3_Options);
+ config.i = inl(ioaddr + Wn3_Config);
+ if (vortex_debug > 1)
+ printk(" Internal config register is %4.4x, transceivers %#x.\n",
+ config.i, inw(ioaddr + Wn3_Options));
+ printk(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+ 8 << config.u.ram_size,
+ config.u.ram_width ? "word" : "byte",
+ ram_split[config.u.ram_split],
+ config.u.autoselect ? "autoselect/" : "",
+ media_tbl[config.u.xcvr].name);
+ dev->if_port = config.u.xcvr;
+ vp->default_media = config.u.xcvr;
+ vp->autoselect = config.u.autoselect;
+ }
+ if (vp->media_override != 7) {
+ printk(" Media override to transceiver type %d (%s).\n",
+ vp->media_override, media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ }
+
+ vp->capabilities = eeprom[16];
+ vp->full_bus_master_tx = (vp->capabilities & 0x20) ? 1 : 0;
+ /* Rx is broken at 10mbps, so we always disable it. */
+ /* vp->full_bus_master_rx = 0;*/
+ vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0;
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, CORKSCREW_TOTAL_SIZE, vp->product_name);
+
+ /* The 3c59x-specific entries in the device structure. */
+ dev->open = &vortex_open;
+ dev->hard_start_xmit = &vortex_start_xmit;
+ dev->stop = &vortex_close;
+ dev->get_stats = &vortex_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+
+ return 0;
+}
+
+
+static int
+vortex_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ union wn3_config config;
+ int i;
+
+ /* Before initializing select the active media port. */
+ EL3WINDOW(3);
+ if (vp->full_duplex)
+ outb(0x20, ioaddr + Wn3_MAC_Ctrl); /* Set the full-duplex bit. */
+ config.i = inl(ioaddr + Wn3_Config);
+
+ if (vp->media_override != 7) {
+ if (vortex_debug > 1)
+ printk("%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else if (vp->autoselect) {
+ /* Find first available media type, starting with 100baseTx. */
+ dev->if_port = 4;
+ while (! (vp->available_media & media_tbl[dev->if_port].mask))
+ dev->if_port = media_tbl[dev->if_port].next;
+
+ if (vortex_debug > 1)
+ printk("%s: Initial media type %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ init_timer(&vp->timer);
+ vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
+ vp->timer.data = (unsigned long)dev;
+ vp->timer.function = &vortex_timer; /* timer handler */
+ add_timer(&vp->timer);
+ } else
+ dev->if_port = vp->default_media;
+
+ config.u.xcvr = dev->if_port;
+ outl(config.i, ioaddr + Wn3_Config);
+
+ if (vortex_debug > 1) {
+ printk("%s: vortex_open() InternalConfig %8.8x.\n",
+ dev->name, config.i);
+ }
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 20; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Wait a few ticks for the RxReset command to complete. */
+ for (i = 20; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+ /* Use the now-standard shared IRQ implementation. */
+ if (vp->capabilities == 0x11c7) {
+ /* Corkscrew: Cannot share ISA resources. */
+ if (dev->irq == 0
+ || dev->dma == 0
+ || request_irq(dev->irq, &vortex_interrupt, 0,
+ vp->product_name, dev))
+ return -EAGAIN;
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ } else if (request_irq(dev->irq, &vortex_interrupt, SA_SHIRQ,
+ vp->product_name, dev)) {
+ return -EAGAIN;
+ }
+
+ if (vortex_debug > 1) {
+ EL3WINDOW(4);
+ printk("%s: vortex_open() irq %d media status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + Wn4_Media));
+ }
+
+ /* Set the station address and mask in window 2 each time opened. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i+=2)
+ outw(0, ioaddr + i);
+
+ if (dev->if_port == 3)
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ EL3WINDOW(4);
+ outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ /* ..and on the Boomerang we enable the extra statistics bits. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ vp->cur_rx = vp->dirty_rx = 0;
+ if (vortex_debug > 2)
+ printk("%s: Filling in the Rx ring.\n", dev->name);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ if (i < (RX_RING_SIZE - 1))
+ vp->rx_ring[i].next = virt_to_bus(&vp->rx_ring[i+1]);
+ else
+ vp->rx_ring[i].next = 0;
+ vp->rx_ring[i].status = 0; /* Clear complete bit. */
+ vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000;
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ vp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[i].addr = virt_to_bus(skb->tail);
+ }
+ vp->rx_ring[i-1].next = virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
+ outl(virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
+ }
+ if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
+ vp->cur_tx = vp->dirty_tx = 0;
+ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
+ /* Clear the Tx ring. */
+ for (i = 0; i < TX_RING_SIZE; i++)
+ vp->tx_skbuff[i] = 0;
+ outl(0, ioaddr + DownListPtr);
+ }
+ /* Set reciever mode: presumably accept b-case and phys addr only. */
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | AdapterFailure|IntReq|StatsFull |
+ (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+ (vp->full_bus_master_rx ? UpComplete : RxComplete) |
+ (vp->bus_master ? DMADone : 0),
+ ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete,
+ ioaddr + EL3_CMD);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static void vortex_timer(unsigned long data)
+{
+#ifdef AUTOMEDIA
+ struct device *dev = (struct device *)data;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ int ok = 0;
+
+ if (vortex_debug > 1)
+ printk("%s: Media selection timer tick happened, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ save_flags(flags); cli(); {
+ int old_window = inw(ioaddr + EL3_CMD) >> 13;
+ int media_status;
+ EL3WINDOW(4);
+ media_status = inw(ioaddr + Wn4_Media);
+ switch (dev->if_port) {
+ case 0: case 4: case 5: /* 10baseT, 100baseTX, 100baseFX */
+ if (media_status & Media_LnkBeat) {
+ ok = 1;
+ if (vortex_debug > 1)
+ printk("%s: Media %s has link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ } else if (vortex_debug > 1)
+ printk("%s: Media %s is has no link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+
+ break;
+ default: /* Other media types handled by Tx timeouts. */
+ if (vortex_debug > 1)
+ printk("%s: Media %s is has no indication, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ ok = 1;
+ }
+ if ( ! ok) {
+ union wn3_config config;
+
+ do {
+ dev->if_port = media_tbl[dev->if_port].next;
+ } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
+ if (dev->if_port == 8) { /* Go back to default. */
+ dev->if_port = vp->default_media;
+ if (vortex_debug > 1)
+ printk("%s: Media selection failing, using default %s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ } else {
+ if (vortex_debug > 1)
+ printk("%s: Media selection failed, now trying %s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
+ add_timer(&vp->timer);
+ }
+ outw((media_status & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+
+ EL3WINDOW(3);
+ config.i = inl(ioaddr + Wn3_Config);
+ config.u.xcvr = dev->if_port;
+ outl(config.i, ioaddr + Wn3_Config);
+
+ outw(dev->if_port == 3 ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+ }
+ EL3WINDOW(old_window);
+ } restore_flags(flags);
+ if (vortex_debug > 1)
+ printk("%s: Media selection timer finished, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+#endif /* AUTOMEDIA*/
+ return;
+}
+
+static int
+vortex_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ int i;
+
+ /* Min. wait before assuming a Tx failed == 400ms. */
+
+ if (tickssofar < 400*HZ/1000) /* We probably aren't empty. */
+ return 1;
+ printk("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ dev->name, inb(ioaddr + TxStatus),
+ inw(ioaddr + EL3_STATUS));
+ /* Slight code bloat to be user friendly. */
+ if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
+ printk("%s: Transmitter encountered 16 collisions -- network"
+ " network cable problem?\n", dev->name);
+#ifndef final_version
+ printk(" Flags; bus-master %d, full %d; dirty %d current %d.\n",
+ vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, vp->cur_tx);
+ printk(" Down list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr),
+ &vp->tx_ring[0]);
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ printk(" %d: %p length %8.8x status %8.8x\n", i,
+ &vp->tx_ring[i],
+ vp->tx_ring[i].length,
+ vp->tx_ring[i].status);
+ }
+#endif
+ /* Issue TX_RESET and TX_START commands. */
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 20; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->trans_start = jiffies;
+ /* dev->tbusy = 0;*/
+ vp->stats.tx_errors++;
+ vp->stats.tx_dropped++;
+ return 0; /* Yes, silently *drop* the packet! */
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ If this ever occurs the queue layer is doing something evil! */
+ if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ if (vp->full_bus_master_tx) { /* BOOMERANG bus-master */
+ /* Calculate the next Tx descriptor entry. */
+ int entry = vp->cur_tx % TX_RING_SIZE;
+ struct boom_tx_desc *prev_entry;
+ unsigned long flags, i;
+
+ if (vp->tx_full) /* No room to transmit with */
+ return 1;
+ if (vp->cur_tx != 0)
+ prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
+ else
+ prev_entry = NULL;
+ if (vortex_debug > 3)
+ printk("%s: Trying to send a packet, Tx index %d.\n",
+ dev->name, vp->cur_tx);
+ /* vp->tx_full = 1; */
+ vp->tx_skbuff[entry] = skb;
+ vp->tx_ring[entry].next = 0;
+ vp->tx_ring[entry].addr = virt_to_bus(skb->data);
+ vp->tx_ring[entry].length = skb->len | 0x80000000;
+ vp->tx_ring[entry].status = skb->len | 0x80000000;
+
+ save_flags(flags);
+ cli();
+ outw(DownStall, ioaddr + EL3_CMD);
+ /* Wait for the stall to complete. */
+ for (i = 20; i >= 0 ; i--)
+ if ( (inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0)
+ break;
+ if (prev_entry)
+ prev_entry->next = virt_to_bus(&vp->tx_ring[entry]);
+ if (inl(ioaddr + DownListPtr) == 0) {
+ outl(virt_to_bus(&vp->tx_ring[entry]), ioaddr + DownListPtr);
+ queued_packet++;
+ }
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ restore_flags(flags);
+
+ vp->cur_tx++;
+ if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1)
+ vp->tx_full = 1;
+ else { /* Clear previous interrupt enable. */
+ if (prev_entry)
+ prev_entry->status &= ~0x80000000;
+ dev->tbusy = 0;
+ }
+ dev->trans_start = jiffies;
+ return 0;
+ }
+ /* Put out the doubleword header... */
+ outl(skb->len, ioaddr + TX_FIFO);
+#ifdef VORTEX_BUS_MASTER
+ if (vp->bus_master) {
+ /* Set the bus-master controller to transfer the packet. */
+ outl((int)(skb->data), ioaddr + Wn7_MasterAddr);
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ vp->tx_skb = skb;
+ outw(StartDMADown, ioaddr + EL3_CMD);
+ /* dev->tbusy will be cleared at the DMADone interrupt. */
+ } else {
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb (skb, FREE_WRITE);
+ if (inw(ioaddr + TxFree) > 1536) {
+ dev->tbusy = 0;
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ }
+#else
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb (skb, FREE_WRITE);
+ if (inw(ioaddr + TxFree) > 1536) {
+ dev->tbusy = 0;
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+#endif /* bus master */
+
+ dev->trans_start = jiffies;
+
+ /* Clear the Tx status stack. */
+ {
+ short tx_status;
+ int i = 4;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
+ if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
+ if (vortex_debug > 2)
+ printk("%s: Tx error, status %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) {
+ int j;
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 20; j >= 0 ; j--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void vortex_interrupt IRQ(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* Use the now-standard shared IRQ implementation. */
+ struct device *dev = dev_id;
+ struct vortex_private *lp;
+ int ioaddr, status;
+ int latency;
+ int i = max_interrupt_work;
+
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ latency = inb(ioaddr + Timer);
+ lp = (struct vortex_private *)dev->priv;
+
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (vortex_debug > 4)
+ printk("%s: interrupt, status %4.4x, timer %d.\n", dev->name,
+ status, latency);
+ if ((status & 0xE000) != 0xE000) {
+ static int donedidthis=0;
+ /* Some interrupt controllers store a bogus interrupt from boot-time.
+ Ignore a single early interrupt, but don't hang the machine for
+ other interrupt problems. */
+ if (donedidthis++ > 100) {
+ printk("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n",
+ dev->name, status, dev->start);
+ FREE_IRQ(dev->irq, dev);
+ }
+ }
+
+ do {
+ if (vortex_debug > 5)
+ printk("%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & RxComplete)
+ vortex_rx(dev);
+
+ if (status & TxAvailable) {
+ if (vortex_debug > 5)
+ printk(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ if (status & DownComplete) {
+ unsigned int dirty_tx = lp->dirty_tx;
+
+ while (lp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ if (inl(ioaddr + DownListPtr) ==
+ virt_to_bus(&lp->tx_ring[entry]))
+ break; /* It still hasn't been processed. */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ }
+ dirty_tx++;
+ }
+ lp->dirty_tx = dirty_tx;
+ outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+ if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) {
+ lp->tx_full= 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ }
+#ifdef VORTEX_BUS_MASTER
+ if (status & DMADone) {
+ outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+ dev->tbusy = 0;
+ dev_kfree_skb (lp->tx_skb, FREE_WRITE); /* Release the transfered buffer */
+ mark_bh(NET_BH);
+ }
+#endif
+ if (status & UpComplete) {
+ boomerang_rx(dev);
+ outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+ }
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts at once. */
+ if (status & RxEarly) { /* Rx early is unused. */
+ vortex_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat = 0;
+ if (vortex_debug > 4)
+ printk("%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* DEBUG HACK: Disable statistics as an interrupt source. */
+ /* This occurs when we have the wrong media type! */
+ if (DoneDidThat == 0 &&
+ inw(ioaddr + EL3_STATUS) & StatsFull) {
+ int win, reg;
+ printk("%s: Updating stats failed, disabling stats as an"
+ " interrupt source.\n", dev->name);
+ for (win = 0; win < 8; win++) {
+ EL3WINDOW(win);
+ printk("\n Vortex window %d:", win);
+ for (reg = 0; reg < 16; reg++)
+ printk(" %2.2x", inb(ioaddr+reg));
+ }
+ EL3WINDOW(7);
+ outw(SetIntrEnb | TxAvailable | RxComplete | AdapterFailure
+ | UpComplete | DownComplete | TxComplete,
+ ioaddr + EL3_CMD);
+ DoneDidThat++;
+ }
+ }
+ if (status & AdapterFailure) {
+ /* Adapter failure requires Rx reset and reinit. */
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Set the Rx filter to the current state. */
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--i < 0) {
+ printk("%s: Too much work in interrupt, status %4.4x. "
+ "Disabling functions (%4.4x).\n",
+ dev->name, status, SetStatusEnb | ((~status) & 0x7FE));
+ /* Disable all pending interrupts. */
+ outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
+ outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+
+ } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+ if (vortex_debug > 4)
+ printk("%s: exiting interrupt, status %4.4x.\n", dev->name, status);
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+vortex_rx(struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+ short rx_status;
+
+ if (vortex_debug > 5)
+ printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ unsigned char rx_error = inb(ioaddr + RxErrors);
+ if (vortex_debug > 2)
+ printk(" Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01) vp->stats.rx_over_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ short pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ skb = DEV_ALLOC_SKB(pkt_len + 5);
+ if (vortex_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+#if LINUX_VERSION_CODE >= 0x10300
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
+ (pkt_len + 3) >> 2);
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb, dev);
+#else
+ skb->len = pkt_len;
+ /* 'skb->data' points to the start of sk_buff data area. */
+ insl(ioaddr + RX_FIFO, skb->data, (pkt_len + 3) >> 2);
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+#endif /* KERNEL_1_3_0 */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+ /* Wait a limited time to go to next packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ continue;
+ } else if (vortex_debug)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ vp->stats.rx_dropped++;
+ /* Wait a limited time to skip this packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+
+ return 0;
+}
+
+static int
+boomerang_rx(struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int entry = vp->cur_rx % RX_RING_SIZE;
+ int ioaddr = dev->base_addr;
+ int rx_status;
+
+ if (vortex_debug > 5)
+ printk(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) {
+ if (rx_status & RxDError) { /* Error, update stats. */
+ unsigned char rx_error = rx_status >> 16;
+ if (vortex_debug > 2)
+ printk(" Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01) vp->stats.rx_over_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ short pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ if (vortex_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = DEV_ALLOC_SKB(pkt_len + 2)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb, pkt_len),
+ bus_to_virt(vp->rx_ring[entry].addr),
+ pkt_len);
+ rx_copy++;
+ } else{
+ void *temp;
+ /* Pass up the skbuff already on the Rx ring. */
+ skb = vp->rx_skbuff[entry];
+ vp->rx_skbuff[entry] = NULL;
+ temp = skb_put(skb, pkt_len);
+ /* Remove this checking code for final release. */
+ if (bus_to_virt(vp->rx_ring[entry].addr) != temp)
+ printk("%s: Warning -- the skbuff addresses do not match"
+ " in boomerang_rx: %p vs. %p / %p.\n", dev->name,
+ bus_to_virt(vp->rx_ring[entry].addr),
+ skb->head, temp);
+ rx_nocopy++;
+ }
+#if LINUX_VERSION_CODE > 0x10300
+ skb->protocol = eth_type_trans(skb, dev);
+#else
+ skb->len = pkt_len;
+#endif
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+ }
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
+ }
+ /* Refill the Rx ring buffers. */
+ for (; vp->dirty_rx < vp->cur_rx; vp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = vp->dirty_rx % RX_RING_SIZE;
+ if (vp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+#if LINUX_VERSION_CODE > 0x10300
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[entry].addr = virt_to_bus(skb->tail);
+#else
+ vp->rx_ring[entry].addr = virt_to_bus(skb->data);
+#endif
+ vp->rx_skbuff[entry] = skb;
+ }
+ vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+ }
+ return 0;
+}
+
+static int
+vortex_close(struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (vortex_debug > 1) {
+ printk("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
+ printk("%s: vortex close stats: rx_nocopy %d rx_copy %d"
+ " tx_queued %d.\n",
+ dev->name, rx_nocopy, rx_copy, queued_packet);
+ }
+
+ del_timer(&vp->timer);
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == XCVR_10base2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+
+#ifdef SA_SHIRQ
+ free_irq(dev->irq, dev);
+#else
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(ioaddr, dev);
+ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ outl(0, ioaddr + UpListPtr);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (vp->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ vp->rx_skbuff[i]->free = 1;
+#endif
+ dev_kfree_skb (vp->rx_skbuff[i], FREE_WRITE);
+ vp->rx_skbuff[i] = 0;
+ }
+ }
+ if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+ outl(0, ioaddr + DownListPtr);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ if (vp->tx_skbuff[i]) {
+ dev_kfree_skb(vp->tx_skbuff[i], FREE_WRITE);
+ vp->tx_skbuff[i] = 0;
+ }
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+vortex_get_stats(struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ unsigned long flags;
+
+ if (dev->start) {
+ save_flags(flags);
+ cli();
+ update_stats(dev->base_addr, dev);
+ restore_flags(flags);
+ }
+ return &vp->stats;
+}
+
+/* Update statistics.
+ Unlike with the EL3 we need not worry about interrupts changing
+ the window setting from underneath us, but we must still guard
+ against a race condition with a StatsUpdate interrupt updating the
+ table. This is done by checking that the ASM (!) code generated uses
+ atomic updates with '+='.
+ */
+static void update_stats(int ioaddr, struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+
+ /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ vp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ vp->stats.collisions += inb(ioaddr + 3);
+ vp->stats.tx_window_errors += inb(ioaddr + 4);
+ vp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ vp->stats.tx_packets += inb(ioaddr + 6);
+ vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Don't bother with register 9, an extension of registers 6&7.
+ If we do use the 6&7 values the atomic update assumption above
+ is invalid. */
+ inw(ioaddr + 10); /* Total Rx and Tx octets. */
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+
+ /* We change back to window 7 (not 1) with the Vortex. */
+ EL3WINDOW(7);
+ return;
+}
+
+/* This new version of set_rx_mode() supports v1.4 kernels.
+ The Vortex chip has no documented multicast filter, so the only
+ multicast setting is to receive all multicast frames. At least
+ the chip has a very clean way to set the mode, unlike many others. */
+static void
+set_rx_mode(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ short new_mode;
+
+ if (dev->flags & IFF_PROMISC) {
+ if (vortex_debug > 3)
+ printk("%s: Setting promiscuous mode.\n", dev->name);
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
+ } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+ } else
+ new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+ outw(new_mode, ioaddr + EL3_CMD);
+}
+
+#ifdef MODULE
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_vortex_dev) {
+ next_dev = ((struct vortex_private *)root_vortex_dev->priv)->next_module;
+ if (root_vortex_dev->dma)
+ free_dma(root_vortex_dev->dma);
+ unregister_netdev(root_vortex_dev);
+ outw(TotalReset, root_vortex_dev->base_addr + EL3_CMD);
+ release_region(root_vortex_dev->base_addr, CORKSCREW_TOTAL_SIZE);
+ kfree(root_vortex_dev);
+ root_vortex_dev = next_dev;
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c515.c"
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c59x.c b/linux/src/drivers/net/3c59x.c
new file mode 100644
index 0000000..a6b89cd
--- /dev/null
+++ b/linux/src/drivers/net/3c59x.c
@@ -0,0 +1,2648 @@
+/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
+/*
+ Written 1996-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
+ Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
+ and the EtherLink XL 3c900 and 3c905 cards.
+
+ The original author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates are available at
+ http://www.scyld.com/network/vortex.html
+*/
+
+static const char versionA[] =
+"3c59x.c:v0.99Za 4/17/2003 Donald Becker, becker@scyld.com\n";
+static const char versionB[] =
+" http://www.scyld.com/network/vortex.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc.
+ See media_tbl[] and the web page for the possible types.
+ There is no limit on card count, MAX_UNITS limits only module options. */
+#define MAX_UNITS 8
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1512 effectively disables this feature. */
+static const int rx_copybreak = 200;
+
+/* Allow setting MTU to a larger size, bypassing the normal Ethernet setup. */
+static const int mtu = 1500;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ Cyclones and later have a 64 or 256 element hash table based on the
+ Ethernet CRC. */
+static int multicast_filter_limit = 64;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ Do not increase the Tx ring beyond 256.
+ Large receive rings waste memory and confound network buffer limits.
+ These values have been carefully studied: changing these might mask a
+ problem, it won't fix it.
+ */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. The 1536 value is not
+ a limit, or directly related to MTU, but rather a way to keep a
+ consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
+#include <linux/module.h>
+#include <linux/modversions.h>
+#else
+#include <linux/modversions.h>
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability.
+ Compatibility defines are now in kern_compat.h */
+
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("3Com EtherLink XL (3c590/3c900 series) driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(multicast_filter_limit, "i");
+#ifdef MODULE_PARM_DESC
+MODULE_PARM_DESC(debug, "3c59x message level (0-31)");
+MODULE_PARM_DESC(options, "3c59x force fixed media type");
+MODULE_PARM_DESC(full_duplex,
+ "3c59x set to 1 to force full duplex (deprecated)");
+MODULE_PARM_DESC(rx_copybreak,
+ "3c59x copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(max_interrupt_work,
+ "3c59x maximum events handled per interrupt");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast address count before switching to Rx-all-multicast");
+#endif
+
+/* Operational parameter that usually are not changed. */
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with the original DP83840 on older 3c905 boards, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+/* Performance and path-coverage information. */
+static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0, rx_csumhits;
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com FastEtherLink and FastEtherLink
+XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs
+versions of the FastEtherLink cards. The supported product IDs are
+in the pci_tbl[] list.
+
+The related ISA 3c515 is supported with a separate driver, 3c515.c, included
+with the kernel source.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+
+The EEPROM settings for media type and forced-full-duplex are observed.
+The EEPROM media type should be left at the default "autoselect" unless using
+10base2 or AUI connections which cannot be reliably detected.
+
+III. Driver operation
+
+The 3c59x series use an interface that's very similar to the previous 3c5x9
+series. The primary interface is two programmed-I/O FIFOs, with an
+alternate single-contiguous-region bus-master transfer (see next).
+
+The 3c900 "Boomerang" series uses a full-bus-master interface with separate
+lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
+DEC Tulip and Intel Speedo3. The first chip version retains a compatible
+programmed-I/O interface that has been removed in 'B' and subsequent board
+revisions.
+
+One extension that is advertised in a very large font is that the adapters
+are capable of being bus masters. On the Vortex chip this capability was
+only for a single contiguous region making it far less useful than the full
+bus master capability. There is a significant performance impact of taking
+an extra interrupt or polling for the completion of each transfer, as well
+as difficulty sharing the single transfer engine between the transmit and
+receive threads. Using DMA transfers is a win only with large blocks or
+with the flawed versions of the Intel Orion motherboard PCI controller.
+
+The Boomerang chip's full-bus-master interface is useful, and has the
+currently-unused advantages over other similar chips that queued transmit
+packets may be reordered and receive buffer groups are associated with a
+single frame.
+
+With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
+Rather than a fixed intermediate receive buffer, this scheme allocates
+full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as
+the copying breakpoint: it is chosen to trade-off the memory wasted by
+passing the full-sized skbuff to the queue layer for all frames vs. the
+copying cost of copying a frame to a correctly-sized skbuff.
+
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+IV. Notes
+
+Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
+3c590, 3c595, and 3c900 boards.
+The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
+the EISA version is called "Demon". According to Terry these names come
+from rides at the local amusement park.
+
+The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes.
+This driver only supports ethernet packets on some kernels because of the
+skbuff allocation limit of 4K.
+*/
+
+/* The Vortex size is twice that of the original EtherLinkIII series: the
+ runtime register window, window 1, is now always mapped in.
+ The Boomerang size is twice as large as the Vortex -- it has additional
+ bus master control registers. */
+#define VORTEX_SIZE 0x20
+#define BOOMERANG_SIZE 0x40
+#define CYCLONE_SIZE 0x80
+enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=0x804, IS_TORNADO=0x08,
+ HAS_PWR_CTRL=0x10, HAS_MII=0x20, HAS_NWAY=0x40, HAS_CB_FNS=0x80,
+ EEPROM_8BIT=0x200, INVERT_LED_PWR=0x400, MII_XCVR_PWR=0x4000,
+ HAS_V2_TX=0x800, WN0_XCVR_PWR=0x1000,
+};
+/* Base feature sets for the generations. */
+#define FEATURE_BOOMERANG (HAS_MII) /* 905 */
+#define FEATURE_CYCLONE (IS_CYCLONE|HAS_V2_TX) /* 905B */
+#define FEATURE_TORNADO (IS_TORNADO|HAS_NWAY|HAS_V2_TX) /* 905C */
+
+static void *vortex_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int pwr_event(void *dev_instance, int event);
+#ifdef USE_MEM_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#endif
+
+static struct pci_id_info pci_tbl[] = {
+ {"3c590 Vortex 10Mbps", { 0x590010B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"3c595 Vortex 100baseTx", { 0x595010B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"3c595 Vortex 100baseT4", { 0x595110B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"3c595 Vortex 100base-MII",{ 0x595210B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ /* Change EISA_scan if these move from index 4 and 5. */
+ {"3c592 EISA Vortex", { 0x592010B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"3c597 EISA Vortex", { 0x597010B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"Vortex (unknown)", { 0x590010B7, 0xff00ffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"3c900 Boomerang 10baseT", { 0x900010B7, 0xffffffff },
+ PCI_IOTYPE, BOOMERANG_SIZE, IS_BOOMERANG, },
+ {"3c900 Boomerang 10Mbps Combo", { 0x900110B7, 0xffffffff },
+ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG, },
+ {"3c900 Cyclone 10Mbps TPO", { 0x900410B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
+ {"3c900 Cyclone 10Mbps Combo", { 0x900510B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
+ {"3c900 Cyclone 10Mbps TPC", { 0x900610B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
+ {"3c900B-FL Cyclone 10base-FL",{ 0x900A10B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
+ {"3c905 Boomerang 100baseTx",{ 0x905010B7, 0xffffffff },
+ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII, },
+ {"3c905 Boomerang 100baseT4",{ 0x905110B7, 0xffffffff },
+ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII, },
+ {"3c905B Cyclone 100baseTx",{ 0x905510B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE|HAS_NWAY, },
+ {"3c905B Cyclone 10/100/BNC",{ 0x905810B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE|HAS_NWAY, },
+ {"3c905B-FX Cyclone 100baseFx",{ 0x905A10B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
+ {"3c905C Tornado",{ 0x920010B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
+ {"3c920 Tornado",{ 0x920110B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
+ {"3c920 series Tornado",{ 0x920010B7, 0xfff0ffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
+ {"3c982 Server Tornado",{ 0x980510B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
+ {"3c980 Cyclone",{ 0x980010B7, 0xfff0ffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_CYCLONE|HAS_NWAY, },
+ {"3cSOHO100-TX Hurricane", { 0x764610B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_CYCLONE, },
+ {"3c555 Laptop Hurricane", { 0x505510B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_CYCLONE, },
+ {"3c556 Laptop Tornado",{ 0x605510B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO|EEPROM_8BIT, },
+ {"3c556 series Laptop Tornado",{ 0x605510B7, 0xf0ffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO|EEPROM_8BIT, },
+ {"3c1556B-5 mini-PCI",{ 0x605610B7, 0xffffffff, 0x655610b7, 0xffffffff, },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_TORNADO|EEPROM_8BIT|INVERT_LED_PWR|WN0_XCVR_PWR, },
+ {"3c1556B mini-PCI",{ 0x605610B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_TORNADO|EEPROM_8BIT|HAS_CB_FNS|INVERT_LED_PWR|MII_XCVR_PWR, },
+ {"3c1556B series mini-PCI",{ 0x605610B7, 0xf0ffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_TORNADO|EEPROM_8BIT|HAS_CB_FNS|INVERT_LED_PWR|MII_XCVR_PWR, },
+ {"3c575 Boomerang CardBus", { 0x505710B7, 0xffffffff },
+ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, },
+ {"3CCFE575BT Cyclone CardBus",{ 0x515710B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_CYCLONE | HAS_CB_FNS | EEPROM_8BIT | INVERT_LED_PWR, },
+ {"3CCFE575CT Tornado CardBus",{ 0x525710B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_TORNADO|HAS_CB_FNS|EEPROM_8BIT|MII_XCVR_PWR, },
+ {"3CCFE656 Cyclone CardBus",{ 0x656010B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ IS_CYCLONE|HAS_NWAY|HAS_CB_FNS| INVERT_LED_PWR | MII_XCVR_PWR, },
+ {"3CCFE656B Cyclone+Winmodem CardBus",{ 0x656210B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_CYCLONE/*|HAS_NWAY*/ |HAS_CB_FNS|EEPROM_8BIT|INVERT_LED_PWR|MII_XCVR_PWR, },
+ {"3CCFE656C Tornado+Winmodem CardBus",{ 0x656410B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ (FEATURE_TORNADO & ~HAS_NWAY)|HAS_CB_FNS|EEPROM_8BIT | MII_XCVR_PWR, },
+ {"3c450 HomePNA Tornado",{ 0x450010B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
+ {"3c575 series CardBus (unknown version)", {0x505710B7, 0xf0ffffff },
+ PCI_IOTYPE, BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII, },
+ {"3Com Boomerang (unknown version)",{ 0x900010B7, 0xff00ffff },
+ PCI_IOTYPE, BOOMERANG_SIZE, IS_BOOMERANG, },
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info vortex_drv_id = {
+ "vortex", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_tbl,
+ vortex_probe1, pwr_event };
+
+/* This driver was written to use I/O operations.
+ However there are performance benefits to using memory operations, so
+ that mode is now an options.
+ Compiling for memory ops turns off EISA support.
+*/
+#ifdef USE_MEM_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* Operational definitions.
+ These are not used by other compilation units and thus are not
+ exported in a ".h" file.
+
+ First the windows. There are eight register windows, with the command
+ and status registers available in each.
+ */
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable.
+ Note that 11 parameters bits was fine for ethernet, but the new chip
+ can handle FDDI length frames (~4500 octets) and now parameters count
+ 32-bit 'Dwords' rather than octets. */
+
+enum vortex_cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
+ UpStall = 6<<11, UpUnstall = (6<<11)+1,
+ DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
+ RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11,
+ StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8,
+ RxMulticastHash = 0x10,
+};
+
+/* Bits in the general status register. */
+enum vortex_status {
+ IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080,
+ DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
+ DMAInProgress = 1<<11, /* DMA controller is still busy.*/
+ CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the Vortex this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+ Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
+ Wn0EepromData = 12, /* Window 0: EEPROM results register. */
+ IntrStatus=0x0E, /* Valid in all windows. */
+};
+
+/* EEPROM locations. */
+enum eeprom_offset {
+ PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
+ EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
+ NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
+ DriverTune=13, Checksum=15};
+
+enum Window2 { /* Window 2. */
+ Wn2_ResetOptions=12,
+};
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+
+enum Window4 { /* Window 4: Xcvr/media bits. */
+ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+};
+enum Window5 {
+ Wn5_TxThreshold = 0, Wn5_RxFilter = 8,
+};
+enum Win4_Media_bits {
+ Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
+ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
+ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
+ Media_LnkBeat = 0x0800,
+};
+enum Window7 {
+ /* Bus Master control on Vortex. */
+ Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
+ /* On Cyclone and later, VLAN and PowerMgt control. */
+ Wn7_VLAN_Mask = 0, Wn7_VLAN_EtherType = 4, Wn7_PwrMgmtEvent = 12,
+};
+
+/* Boomerang and Cyclone bus master control registers. */
+enum MasterCtrl {
+ PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
+ DownPollRate = 0x2d, TxFreeThreshold = 0x2f,
+ UpPktStatus = 0x30, UpListPtr = 0x38,
+ /* Cyclone+. */
+ TxPktID=0x18, RxPriorityThresh = 0x3c,
+};
+
+/* The Rx and Tx descriptor lists.
+ Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
+ alignment contraint on tx_ring[] and rx_ring[]. */
+#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */
+struct boom_rx_desc {
+ u32 next; /* Last entry points to 0. */
+ s32 status;
+ u32 addr; /* Up to 63 addr/len pairs possible. */
+ s32 length; /* Set LAST_FRAG to indicate last pair. */
+};
+/* Values for the Rx status entry. */
+enum rx_desc_status {
+ RxDComplete=0x00008000, RxDError=0x4000,
+ /* See boomerang_rx() for actual error bits */
+ IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
+ IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
+};
+
+struct boom_tx_desc {
+ u32 next; /* Last entry points to 0. */
+ s32 status; /* bits 0:12 length, others see below. */
+ u32 addr;
+ s32 length;
+};
+
+/* Values for the Tx status entry. */
+enum tx_desc_status {
+ CRCDisable=0x2000, TxIntrDnComplete=0x8000, TxDownComplete=0x10000,
+ AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
+ TxNoRoundup=0x10000000, /* HAS_V2_TX should not word-pad packet. */
+ TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */
+};
+
+/* Chip features we care about in vp->capabilities, read from the EEPROM. */
+enum ChipCaps { CapBusMaster=0x20, CapNoTxLength=0x0200, CapPwrMgmt=0x2000 };
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct vortex_private {
+ /* The Rx and Tx rings should be quad-word-aligned. */
+ struct boom_rx_desc rx_ring[RX_RING_SIZE];
+ struct boom_tx_desc tx_ring[TX_RING_SIZE];
+ /* The addresses of transmit- and receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device *next_module;
+ void *priv_addr;
+ /* Keep the Rx and Tx variables grouped on their own cache lines. */
+ struct boom_rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ struct boom_tx_desc *tx_desc_tail;
+ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1, restart_tx:1;
+
+ long last_reset;
+ spinlock_t window_lock;
+ struct net_device_stats stats;
+ char *cb_fn_base; /* CardBus function status addr space. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev; /* PCI configuration space information. */
+
+ /* The remainder are related to chip state, mostly media selection. */
+ int multicast_filter_limit;
+ u32 mc_filter[8];
+ int max_interrupt_work;
+ int rx_mode;
+ struct timer_list timer; /* Media selection timer. */
+ int options; /* User-settable misc. driver options. */
+ unsigned int media_override:4, /* Passed-in media type. */
+ default_media:4, /* Read from the EEPROM/Wn3_Config. */
+ full_duplex:1, medialock:1, autoselect:1,
+ bus_master:1, /* Vortex can only do a fragment bus-m. */
+ full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
+ hw_csums:1, /* Has hardware checksums. */
+ restore_intr_mask:1,
+ polling:1;
+ u16 status_enable;
+ u16 intr_enable;
+ u16 available_media; /* From Wn3_Options. */
+ u16 wn3_mac_ctrl; /* Current settings. */
+ u16 capabilities, info1, info2; /* Various, from EEPROM. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+/* The action to take with a media selection timer tick.
+ Note that we deviate from the 3Com order by checking 10base2 before AUI.
+ */
+enum xcvr_types {
+ XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
+ XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
+};
+
+static struct media_table {
+ char *name;
+ unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
+ mask:8, /* The transceiver-present bit in Wn3_Config.*/
+ next:8; /* The media type to try next. */
+ int wait; /* Time before we check media status. */
+} media_tbl[] = {
+ { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
+ { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
+ { "undefined", 0, 0x80, XCVR_10baseT, 10000},
+ { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10},
+ { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
+ { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10},
+ { "MII", 0, 0x41, XCVR_10baseT, 3*HZ },
+ { "undefined", 0, 0x01, XCVR_10baseT, 10000},
+ { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ},
+ { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ },
+ { "Default", 0, 0xFF, XCVR_10baseT, 10000},
+};
+
+#if ! defined(CARDBUS) && ! defined(USE_MEM_OPS)
+static int eisa_scan(struct net_device *dev);
+#endif
+static int vortex_open(struct net_device *dev);
+static void set_media_type(struct net_device *dev);
+static void activate_xcvr(struct net_device *dev);
+static void start_operation(struct net_device *dev);
+static void start_operation1(struct net_device *dev);
+static void mdio_sync(long ioaddr, int bits);
+static int mdio_read(long ioaddr, int phy_id, int location);
+static void mdio_write(long ioaddr, int phy_id, int location, int value);
+static void vortex_timer(unsigned long arg);
+static void vortex_tx_timeout(struct net_device *dev);
+static int vortex_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int vortex_rx(struct net_device *dev);
+static int boomerang_rx(struct net_device *dev);
+static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int vortex_close(struct net_device *dev);
+static void update_stats(long ioaddr, struct net_device *dev);
+static struct net_device_stats *vortex_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#if defined(NO_PCI)
+#define acpi_set_WOL(dev) do {} while(0);
+#define acpi_wake(pci_dev) do {} while(0);
+#define acpi_set_pwr_state(pci_dev, state) do {} while(0);
+#else
+static void acpi_set_WOL(struct net_device *dev);
+#endif
+
+
+/* A list of all installed Vortex devices, for removing the driver module. */
+static struct net_device *root_vortex_dev = NULL;
+
+
+#if defined(MODULE) && defined(CARDBUS)
+
+#include <pcmcia/driver_ops.h>
+
+static dev_node_t *vortex_attach(dev_locator_t *loc)
+{
+ u32 io, pci_id;
+ u8 bus, devfn, irq;
+ struct net_device *dev;
+ int chip_idx;
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &io);
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+ pcibios_read_config_dword(bus, devfn, PCI_VENDOR_ID, &pci_id);
+ printk(KERN_INFO "vortex_attach(bus %d, function %d, device %8.8x)\n",
+ bus, devfn, pci_id);
+ io &= ~3;
+ if (io == 0 || irq == 0) {
+ printk(KERN_ERR "The 3Com CardBus Ethernet interface was not "
+ "assigned an %s.\n" KERN_ERR " It will not be activated.\n",
+ io == 0 ? "I/O address" : "IRQ");
+ return NULL;
+ }
+ for (chip_idx = 0; pci_tbl[chip_idx].id.pci; chip_idx++)
+ if ((pci_id & pci_tbl[chip_idx].id.pci_mask) ==
+ pci_tbl[chip_idx].id.pci)
+ break;
+ if (pci_tbl[chip_idx].id.pci == 0) { /* Compiled out! */
+ printk(KERN_INFO "Unable to match chip type %8.8x in "
+ "vortex_attach().\n", pci_id);
+ return NULL;
+ }
+ dev = vortex_probe1(pci_find_slot(bus, devfn), NULL, io, irq, chip_idx, MAX_UNITS+1);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+ node->major = node->minor = 0;
+ node->next = NULL;
+ MOD_INC_USE_COUNT;
+ return node;
+ }
+ return NULL;
+}
+
+static void vortex_detach(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_DEBUG "vortex_detach(%s)\n", node->dev_name);
+ for (devp = &root_vortex_dev; *devp; devp = next) {
+ next = &((struct vortex_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ struct net_device *dev = *devp;
+ struct vortex_private *vp = dev->priv;
+ if (dev->flags & IFF_UP)
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ unregister_netdev(dev);
+ if (vp->cb_fn_base) iounmap(vp->cb_fn_base);
+ kfree(dev);
+ *devp = *next;
+ kfree(vp->priv_addr);
+ kfree(node);
+ MOD_DEC_USE_COUNT;
+ }
+}
+
+struct driver_operations vortex_ops = {
+ "3c575_cb", vortex_attach, NULL, NULL, vortex_detach
+};
+
+#endif /* Old-style Cardbus module support */
+
+#if defined(MODULE) || (LINUX_VERSION_CODE >= 0x020400)
+
+#if ! defined(MODULE) /* Must be a 2.4 kernel */
+module_init(init_module);
+module_exit(cleanup_module);
+#endif
+
+int init_module(void)
+{
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+#ifdef CARDBUS
+ register_driver(&vortex_ops);
+ return 0;
+#else
+#ifndef USE_MEM_OPS
+ /* This is not quite correct, but both EISA and PCI cards is unlikely. */
+ if (eisa_scan(0) >= 0)
+ return 0;
+#if defined(NO_PCI)
+ return 0;
+#endif
+#endif
+
+ return pci_drv_register(&vortex_drv_id, NULL);
+#endif
+}
+
+#else
+int tc59x_probe(struct net_device *dev)
+{
+ int retval = -ENODEV;
+
+ /* Allow an EISA-only driver. */
+#if ! defined(NO_PCI)
+ if (pci_drv_register(&vortex_drv_id, dev) >= 0) {
+ retval = 0;
+ dev = 0;
+ }
+#endif
+#ifndef USE_MEM_OPS
+ if (eisa_scan(dev) >= 0)
+ retval = 0;
+#endif
+ if (retval >= 0)
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+ return retval;
+}
+#endif /* not MODULE */
+
+#if ! defined(CARDBUS) && ! defined(USE_MEM_OPS)
+static int eisa_scan(struct net_device *dev)
+{
+ int cards_found = 0;
+
+ /* Check the slots of the EISA bus. */
+ if (EISA_bus) {
+ static long ioaddr = 0x1000;
+ for ( ; ioaddr < 0x9000; ioaddr += 0x1000) {
+ int device_id;
+ if (check_region(ioaddr, VORTEX_SIZE))
+ continue;
+ /* Check the standard EISA ID register for an encoded '3Com'. */
+ if (inw(ioaddr + 0xC80) != 0x6d50)
+ continue;
+ /* Check for a product that we support, 3c59{2,7} any rev. */
+ device_id = (inb(ioaddr + 0xC82)<<8) + inb(ioaddr + 0xC83);
+ if ((device_id & 0xFF00) != 0x5900)
+ continue;
+ vortex_probe1(0, dev, ioaddr, inw(ioaddr + 0xC88) >> 12,
+ (device_id & 0xfff0) == 0x5970 ? 5 : 4, cards_found);
+ dev = 0;
+ cards_found++;
+ }
+ }
+
+ return cards_found ? 0 : -ENODEV;
+}
+#endif /* ! Cardbus */
+
+static int do_eeprom_op(long ioaddr, int ee_cmd)
+{
+ int timer;
+
+ outw(ee_cmd, ioaddr + Wn0EepromCmd);
+ /* Wait for the read to take place, worst-case 162 us. */
+ for (timer = 1620; timer >= 0; timer--) {
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+ break;
+ }
+ return inw(ioaddr + Wn0EepromData);
+}
+
+static void *vortex_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt)
+{
+ struct net_device *dev;
+ struct vortex_private *vp;
+ void *priv_mem;
+ int option;
+ unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ int ee_read_cmd;
+ int drv_flags = pci_tbl[chip_idx].drv_flags;
+ int i;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+#if ! defined(NO_PCI)
+ /* Check the PCI latency value. On the 3c590 series the latency timer
+ must be set to the maximum value to avoid data corruption that occurs
+ when the timer expires during a transfer. This bug exists the Vortex
+ chip only. */
+ if (pdev) {
+ u8 pci_latency;
+ u8 new_latency = (drv_flags & IS_VORTEX) ? 248 : 32;
+
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < new_latency) {
+ printk(KERN_INFO "%s: Overriding PCI latency"
+ " timer (CFLT) setting of %d, new value is %d.\n",
+ dev->name, pci_latency, new_latency);
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
+ }
+ }
+#endif
+
+ printk(KERN_INFO "%s: 3Com %s at 0x%lx, ",
+ dev->name, pci_tbl[chip_idx].name, ioaddr);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*vp) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL) {
+ printk(" INTERFACE MEMORY ALLOCATION FAILURE.\n");
+ return NULL;
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->mtu = mtu;
+
+ dev->priv = vp = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(vp, 0, sizeof(*vp));
+ vp->priv_addr = priv_mem;
+
+ vp->next_module = root_vortex_dev;
+ root_vortex_dev = dev;
+
+ vp->chip_id = chip_idx;
+ vp->pci_dev = pdev;
+ vp->drv_flags = drv_flags;
+ vp->msg_level = (1 << debug) - 1;
+ vp->rx_copybreak = rx_copybreak;
+ vp->max_interrupt_work = max_interrupt_work;
+ vp->multicast_filter_limit = multicast_filter_limit;
+
+ /* The lower four bits are the media type. */
+ if (dev->mem_start)
+ option = dev->mem_start;
+ else if (find_cnt < MAX_UNITS)
+ option = options[find_cnt];
+ else
+ option = -1;
+
+ if (option >= 0) {
+ vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
+ vp->full_duplex = (option & 0x200) ? 1 : 0;
+ vp->bus_master = (option & 16) ? 1 : 0;
+ } else {
+ vp->media_override = 7;
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+ if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
+ vp->full_duplex = 1;
+
+ vp->options = option;
+
+ /* Read the station address from the EEPROM. */
+ EL3WINDOW(0);
+ /* Figure out the size and offset of the EEPROM table.
+ This is complicated by potential discontiguous address bits. */
+
+ /* Locate the opcode bits, 0xC0 or 0x300. */
+ outw(0x5555, ioaddr + Wn0EepromData);
+ ee_read_cmd = do_eeprom_op(ioaddr, 0x80) == 0x5555 ? 0x200 : 0x80;
+ /* Locate the table base for CardBus cards. */
+ if (do_eeprom_op(ioaddr, ee_read_cmd + 0x37) == 0x6d50)
+ ee_read_cmd += 0x30;
+
+ for (i = 0; i < 0x40; i++) {
+ int cmd_and_addr = ee_read_cmd + i;
+ if (ee_read_cmd == 0xB0) { /* Correct for discontinuity. */
+ int offset = 0x30 + i;
+ cmd_and_addr = 0x80 + (offset & 0x3f) + ((offset<<2) & 0x0f00);
+ }
+ eeprom[i] = do_eeprom_op(ioaddr, cmd_and_addr);
+ }
+ for (i = 0; i < 0x18; i++)
+ checksum ^= eeprom[i];
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */
+ while (i < 0x21)
+ checksum ^= eeprom[i++];
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ }
+ if (checksum != 0x00 && !(drv_flags & IS_TORNADO))
+ printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ printk(", IRQ %d\n", dev->irq);
+ /* Tell them about an invalid IRQ. */
+ if (dev->irq <= 0)
+ printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
+ dev->irq);
+
+#if ! defined(NO_PCI)
+ if (drv_flags & HAS_CB_FNS) {
+ u32 fn_st_addr; /* Cardbus function status space */
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_2, &fn_st_addr);
+ if (fn_st_addr)
+ vp->cb_fn_base = ioremap(fn_st_addr & ~3, 128);
+ printk(KERN_INFO "%s: CardBus functions mapped %8.8x->%p.\n",
+ dev->name, fn_st_addr, vp->cb_fn_base);
+ }
+#endif
+
+ /* Extract our information from the EEPROM data. */
+ vp->info1 = eeprom[13];
+ vp->info2 = eeprom[15];
+ vp->capabilities = eeprom[16];
+
+ if (vp->info1 & 0x8000)
+ vp->full_duplex = 1;
+ if (vp->full_duplex)
+ vp->medialock = 1;
+
+ /* Turn on the transceiver. */
+ activate_xcvr(dev);
+
+ {
+ char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ int i_cfg;
+ EL3WINDOW(3);
+ vp->available_media = inw(ioaddr + Wn3_Options);
+ if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
+ vp->available_media = 0x40;
+ i_cfg = inl(ioaddr + Wn3_Config); /* Internal Configuration */
+ vp->default_media = (i_cfg >> 20) & 15;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG " Internal config register is %8.8x, "
+ "transceivers %#x.\n", i_cfg, inw(ioaddr + Wn3_Options));
+ printk(KERN_INFO " %dK buffer %s Rx:Tx split, %s%s interface.\n",
+ 8 << (i_cfg & 7),
+ ram_split[(i_cfg >> 16) & 3],
+ i_cfg & 0x01000000 ? "autoselect/" : "",
+ vp->default_media > XCVR_ExtMII ? "<invalid transceiver>" :
+ media_tbl[vp->default_media].name);
+ vp->autoselect = i_cfg & 0x01000000 ? 1 : 0;
+ }
+
+ if (vp->media_override != 7) {
+ printk(KERN_INFO " Media override to transceiver type %d (%s).\n",
+ vp->media_override, media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else
+ dev->if_port = vp->default_media;
+
+ if ((vp->available_media & 0x41) || (drv_flags & HAS_NWAY) ||
+ dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
+ int phy, phy_idx = 0;
+ EL3WINDOW(4);
+ mii_preamble_required++;
+ mdio_sync(ioaddr, 32);
+ mdio_read(ioaddr, 24, 1);
+ for (phy = 1; phy <= 32 && phy_idx < sizeof(vp->phys); phy++) {
+ int mii_status, phyx = phy & 0x1f;
+ mii_status = mdio_read(ioaddr, phyx, 1);
+ if ((mii_status & 0xf800) && mii_status != 0xffff) {
+ vp->phys[phy_idx++] = phyx;
+ printk(KERN_INFO " MII transceiver found at address %d,"
+ " status %4x.\n", phyx, mii_status);
+ if ((mii_status & 0x0040) == 0)
+ mii_preamble_required++;
+ }
+ }
+ mii_preamble_required--;
+ if (phy_idx == 0) {
+ printk(KERN_WARNING" ***WARNING*** No MII transceivers found!\n");
+ vp->phys[0] = 24;
+ } else {
+ if (mii_preamble_required == 0 &&
+ mdio_read(ioaddr, vp->phys[0], 1) == 0) {
+ printk(KERN_INFO "%s: MII transceiver has preamble bug.\n",
+ dev->name);
+ mii_preamble_required = 1;
+ }
+ vp->advertising = mdio_read(ioaddr, vp->phys[0], 4);
+ if (vp->full_duplex) {
+ /* Only advertise the FD media types. */
+ vp->advertising &= ~0x02A0;
+ mdio_write(ioaddr, vp->phys[0], 4, vp->advertising);
+ }
+ }
+ } else {
+ /* We will emulate MII management. */
+ vp->phys[0] = 32;
+ }
+
+ if (vp->capabilities & CapBusMaster) {
+ vp->full_bus_master_tx = 1;
+ printk(KERN_INFO" Using bus-master transmits and %s receives.\n",
+ (vp->info2 & 1) ? "early" : "whole-frame" );
+ vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
+ }
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
+
+ /* The 3c59x-specific entries in the device structure. */
+ dev->open = &vortex_open;
+ dev->hard_start_xmit = &vortex_start_xmit;
+ dev->stop = &vortex_close;
+ dev->get_stats = &vortex_get_stats;
+ dev->do_ioctl = &vortex_ioctl;
+ dev->set_multicast_list = &set_rx_mode;
+
+ return dev;
+}
+
+
+static int vortex_open(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ MOD_INC_USE_COUNT;
+
+ acpi_wake(vp->pci_dev);
+ vp->window_lock = SPIN_LOCK_UNLOCKED;
+ activate_xcvr(dev);
+
+ /* Before initializing select the active media port. */
+ if (vp->media_override != 7) {
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else if (vp->autoselect) {
+ if (vp->drv_flags & HAS_NWAY)
+ dev->if_port = XCVR_NWAY;
+ else {
+ /* Find first available media type, starting with 100baseTx. */
+ dev->if_port = XCVR_100baseTx;
+ while (! (vp->available_media & media_tbl[dev->if_port].mask))
+ dev->if_port = media_tbl[dev->if_port].next;
+ }
+ } else
+ dev->if_port = vp->default_media;
+
+ if (! vp->medialock)
+ vp->full_duplex = 0;
+
+ vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
+ (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+ (vp->full_bus_master_rx ? UpComplete : RxComplete) |
+ (vp->bus_master ? DMADone : 0);
+ vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | RxComplete |
+ StatsFull | HostError | TxComplete | IntReq
+ | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
+
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Initial media type %s %s-duplex.\n",
+ dev->name, media_tbl[dev->if_port].name,
+ vp->full_duplex ? "full":"half");
+
+ set_media_type(dev);
+ start_operation(dev);
+
+ /* Use the now-standard shared IRQ implementation. */
+ if (request_irq(dev->irq, &vortex_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ spin_lock(&vp->window_lock);
+
+ if (vp->msg_level & NETIF_MSG_IFUP) {
+ EL3WINDOW(4);
+ printk(KERN_DEBUG "%s: vortex_open() irq %d media status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + Wn4_Media));
+ }
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ /* ..and on the Boomerang we enable the extra statistics bits. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+#if defined(CONFIG_VLAN)
+ /* If this value is set no MTU adjustment is needed for 802.1Q. */
+ outw(0x8100, ioaddr + Wn7_VLAN_EtherType);
+#endif
+ spin_unlock(&vp->window_lock);
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ vp->cur_rx = vp->dirty_rx = 0;
+ /* Use 1518/+18 if the CRC is transferred. */
+ vp->rx_buf_sz = dev->mtu + 14;
+ if (vp->rx_buf_sz < PKT_BUF_SZ)
+ vp->rx_buf_sz = PKT_BUF_SZ;
+
+ /* Initialize the RxEarly register as recommended. */
+ outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ outl(0x0020, ioaddr + PktStatus);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ vp->rx_ring[i].length = cpu_to_le32(vp->rx_buf_sz | LAST_FRAG);
+ vp->rx_ring[i].status = 0;
+ vp->rx_ring[i].next = virt_to_le32desc(&vp->rx_ring[i+1]);
+ vp->rx_skbuff[i] = 0;
+ }
+ /* Wrap the ring. */
+ vp->rx_head_desc = &vp->rx_ring[0];
+ vp->rx_ring[i-1].next = virt_to_le32desc(&vp->rx_ring[0]);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(vp->rx_buf_sz);
+ vp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[i].addr = virt_to_le32desc(skb->tail);
+ }
+ outl(virt_to_bus(vp->rx_head_desc), ioaddr + UpListPtr);
+ }
+ if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
+ dev->hard_start_xmit = &boomerang_start_xmit;
+ vp->cur_tx = vp->dirty_tx = 0;
+ vp->tx_desc_tail = &vp->tx_ring[TX_RING_SIZE - 1];
+ if (vp->drv_flags & IS_BOOMERANG) {
+ /* Room for a packet, to avoid long DownStall delays. */
+ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
+ } else if (vp->drv_flags & HAS_V2_TX)
+ outb(20, ioaddr + DownPollRate);
+
+ /* Clear the Tx ring. */
+ for (i = 0; i < TX_RING_SIZE; i++)
+ vp->tx_skbuff[i] = 0;
+ outl(0, ioaddr + DownListPtr);
+ vp->tx_full = 0;
+ vp->restart_tx = 1;
+ }
+ /* The multicast filter is an ill-considered, write-only design.
+ The semantics are not documented, so we assume but do not rely
+ on the table being cleared with an RxReset.
+ Here we do an explicit clear of the largest known table.
+ */
+ if (vp->drv_flags & HAS_V2_TX)
+ for (i = 0; i < 0x100; i++)
+ outw(SetFilterBit | i, ioaddr + EL3_CMD);
+ memset(vp->mc_filter, 0, sizeof vp->mc_filter);
+
+ /* Set receiver mode: presumably accept b-case and phys addr only. */
+ vp->rx_mode = 0;
+ set_rx_mode(dev);
+
+ start_operation1(dev);
+
+ init_timer(&vp->timer);
+ vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
+ vp->timer.data = (unsigned long)dev;
+ vp->timer.function = &vortex_timer; /* timer handler */
+ add_timer(&vp->timer);
+
+ return 0;
+}
+
+static void set_media_type(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i_cfg;
+
+ EL3WINDOW(3);
+ i_cfg = inl(ioaddr + Wn3_Config);
+ i_cfg &= ~0x00f00000;
+ if (vp->drv_flags & HAS_NWAY)
+ outl(i_cfg | 0x00800000, ioaddr + Wn3_Config);
+ else
+ outl(i_cfg | (dev->if_port << 20), ioaddr + Wn3_Config);
+
+ if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
+ int mii_reg1, mii_reg5;
+ EL3WINDOW(4);
+ /* Read BMSR (reg1) only to clear old status. */
+ mii_reg1 = mdio_read(ioaddr, vp->phys[0], 1);
+ mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5);
+ if (mii_reg5 == 0xffff || mii_reg5 == 0x0000)
+ ; /* No MII device or no link partner report */
+ else if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
+ || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
+ vp->full_duplex = 1;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
+ " setting %s-duplex.\n", dev->name, vp->phys[0],
+ mii_reg1, mii_reg5, vp->full_duplex ? "full" : "half");
+ EL3WINDOW(3);
+ }
+ if (dev->if_port == XCVR_10base2)
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ EL3WINDOW(4);
+ if (dev->if_port != XCVR_NWAY) {
+ outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+ }
+ /* Do we require link beat to transmit? */
+ if (vp->info1 & 0x4000)
+ outw(inw(ioaddr + Wn4_Media) & ~Media_Lnk, ioaddr + Wn4_Media);
+
+ /* Set the full-duplex and oversized frame bits. */
+ EL3WINDOW(3);
+
+ vp->wn3_mac_ctrl = vp->full_duplex ? 0x0120 : 0;
+ if (dev->mtu > 1500)
+ vp->wn3_mac_ctrl |= (dev->mtu == 1504 ? 0x0400 : 0x0040);
+ outb(vp->wn3_mac_ctrl, ioaddr + Wn3_MAC_Ctrl);
+
+ if (vp->drv_flags & HAS_V2_TX)
+ outw(dev->mtu + 14, ioaddr + Wn3_MaxPktSize);
+}
+
+static void activate_xcvr(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int reset_opts;
+
+ /* Correct some magic bits. */
+ EL3WINDOW(2);
+ reset_opts = inw(ioaddr + Wn2_ResetOptions);
+ if (vp->drv_flags & INVERT_LED_PWR)
+ reset_opts |= 0x0010;
+ if (vp->drv_flags & MII_XCVR_PWR)
+ reset_opts |= 0x4000;
+ outw(reset_opts, ioaddr + Wn2_ResetOptions);
+ if (vp->drv_flags & WN0_XCVR_PWR) {
+ EL3WINDOW(0);
+ outw(0x0900, ioaddr);
+ }
+}
+
+static void start_operation(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 2000; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ outw(RxReset | 0x04, ioaddr + EL3_CMD);
+ /* Assume this cleared the filter. */
+ memset(vp->mc_filter, 0, sizeof vp->mc_filter);
+
+ /* Wait a few ticks for the RxReset command to complete. */
+ for (i = 0; i < 200000; i++)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ if (i >= 200 && (vp->msg_level & NETIF_MSG_DRV))
+ printk(KERN_DEBUG "%s: Rx Reset took an unexpectedly long time"
+ " to finish, %d ticks.\n",
+ dev->name, i);
+
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+ /* Handle VLANs and jumbo frames. */
+ if ((vp->drv_flags & HAS_V2_TX) && dev->mtu > 1500) {
+ EL3WINDOW(3);
+ outw(dev->mtu + 14, ioaddr + Wn3_MaxPktSize);
+ if (dev->mtu > 2033) {
+ outl(inl(ioaddr + Wn3_Config) | 0x0000C000, ioaddr + Wn3_Config);
+ outw(SetTxStart + (2000>>2), ioaddr + EL3_CMD);
+ }
+ }
+ /* Reset the station address and mask. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i+=2)
+ outw(0, ioaddr + i);
+ if (vp->drv_flags & IS_BOOMERANG) {
+ /* Room for a packet, to avoid long DownStall delays. */
+ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
+ } else if (vp->drv_flags & HAS_V2_TX) {
+ outb(20, ioaddr + DownPollRate);
+ vp->restart_tx = 1;
+ }
+}
+
+static void start_operation1(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (vp->full_bus_master_rx) { /* post-Vortex bus master. */
+ /* Initialize the RxEarly register as recommended. */
+ outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ outl(0x0020, ioaddr + PktStatus);
+ outl(virt_to_bus(&vp->rx_ring[vp->cur_rx % RX_RING_SIZE]),
+ ioaddr + UpListPtr);
+ }
+
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(vp->status_enable, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(vp->intr_enable, ioaddr + EL3_CMD);
+ if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
+ writel(0x8000, vp->cb_fn_base + 4);
+ netif_start_tx_queue(dev);
+}
+
+static void vortex_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+ int ok = 0;
+ int media_status, old_window;
+
+ if (vp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Media selection timer tick happened, "
+ "%s %s duplex.\n",
+ dev->name, media_tbl[dev->if_port].name,
+ vp->full_duplex ? "full" : "half");
+
+ /* This only works with bus-master (non-3c590) chips. */
+ if (vp->cur_tx - vp->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ /* Check for blocked interrupts. */
+ if (inw(ioaddr + EL3_STATUS) & IntLatch) {
+ /* We have a blocked IRQ line. This should never happen, but
+ we recover as best we can.*/
+ if ( ! vp->polling) {
+ if (jiffies - vp->last_reset > 10*HZ) {
+ printk(KERN_ERR "%s: IRQ %d is physically blocked! "
+ "Failing back to low-rate polling.\n",
+ dev->name, dev->irq);
+ vp->last_reset = jiffies;
+ }
+ vp->polling = 1;
+ }
+ vortex_interrupt(dev->irq, dev, 0);
+ next_tick = jiffies + 2;
+ } else {
+ vortex_tx_timeout(dev);
+ vp->last_reset = jiffies;
+ }
+ }
+
+ disable_irq(dev->irq);
+ old_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ media_status = inw(ioaddr + Wn4_Media);
+ switch (dev->if_port) {
+ case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
+ if (media_status & Media_LnkBeat) {
+ ok = 1;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ } else if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media %s is has no link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ break;
+ case XCVR_MII: case XCVR_NWAY: {
+ int mii_status = mdio_read(ioaddr, vp->phys[0], 1);
+ int mii_reg5, negotiated, duplex;
+ ok = 1;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
+ dev->name, mii_status);
+ if (vp->medialock)
+ break;
+ if ((mii_status & 0x0004) == 0) {
+ next_tick = 5*HZ;
+ break;
+ }
+ mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5);
+ negotiated = mii_reg5 & vp->advertising;
+ duplex = (negotiated & 0x0100) || (negotiated & 0x03C0) == 0x0040;
+ if (mii_reg5 == 0xffff || vp->full_duplex == duplex)
+ break;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on "
+ "MII #%d link partner capability of %4.4x.\n",
+ dev->name, vp->full_duplex ? "full" : "half",
+ vp->phys[0], mii_reg5);
+ vp->full_duplex = duplex;
+ /* Set the full-duplex bit. */
+ EL3WINDOW(3);
+ if (duplex)
+ vp->wn3_mac_ctrl |= 0x120;
+ else
+ vp->wn3_mac_ctrl &= ~0x120;
+ outb(vp->wn3_mac_ctrl, ioaddr + Wn3_MAC_Ctrl);
+ break;
+ }
+ default: /* Other media types handled by Tx timeouts. */
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media %s is has no indication, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ ok = 1;
+ }
+ if ( ! ok) {
+ int i_cfg;
+
+ do {
+ dev->if_port = media_tbl[dev->if_port].next;
+ } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
+ if (dev->if_port == XCVR_Default) { /* Go back to default. */
+ dev->if_port = vp->default_media;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media selection failing, using default "
+ "%s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ } else {
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media selection failed, now trying "
+ "%s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ next_tick = media_tbl[dev->if_port].wait;
+ }
+ outw((media_status & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+
+ EL3WINDOW(3);
+ i_cfg = inl(ioaddr + Wn3_Config);
+ i_cfg &= ~0x00f00000;
+ i_cfg |= (dev->if_port << 20);
+ outl(i_cfg, ioaddr + Wn3_Config);
+
+ outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
+ ioaddr + EL3_CMD);
+ }
+ EL3WINDOW(old_window);
+ enable_irq(dev->irq);
+ if (vp->restore_intr_mask)
+ outw(FakeIntr, ioaddr + EL3_CMD);
+
+ if (vp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ vp->timer.expires = jiffies + next_tick;
+ add_timer(&vp->timer);
+ return;
+}
+
+static void vortex_tx_timeout(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int tx_status = inb(ioaddr + TxStatus);
+ int intr_status = inw(ioaddr + EL3_STATUS);
+ int j;
+
+ printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ dev->name, tx_status, intr_status);
+ /* Slight code bloat to be user friendly. */
+ if ((tx_status & 0x88) == 0x88)
+ printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
+ " network cable problem?\n", dev->name);
+ if (intr_status & IntLatch) {
+ printk(KERN_ERR "%s: Interrupt posted but not delivered --"
+ " IRQ blocked by another device?\n", dev->name);
+ /* Race condition possible, but we handle a few events. */
+ vortex_interrupt(dev->irq, dev, 0);
+ }
+
+#if ! defined(final_version) && LINUX_VERSION_CODE >= 0x10300
+ if (vp->full_bus_master_tx) {
+ int i;
+ printk(KERN_DEBUG " Flags: bus-master %d full %d dirty %d "
+ "current %d restart_tx %d.\n",
+ vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, vp->cur_tx,
+ vp->restart_tx);
+ printk(KERN_DEBUG " Transmit list %8.8x vs. %p, packet ID %2.2x.\n",
+ (int)inl(ioaddr + DownListPtr),
+ &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE],
+ inb(ioaddr + TxPktID));
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %d: @%p length %8.8x status %8.8x\n", i,
+ &vp->tx_ring[i],
+ le32_to_cpu(vp->tx_ring[i].length),
+ le32_to_cpu(vp->tx_ring[i].status));
+ }
+ }
+#endif
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 200; j >= 0 ; j--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ vp->stats.tx_errors++;
+
+ if (vp->full_bus_master_tx) {
+ if (vp->drv_flags & HAS_V2_TX)
+ outb(20, ioaddr + DownPollRate);
+ if (vp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n",
+ dev->name);
+ if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0)
+ outl(virt_to_bus(&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]),
+ ioaddr + DownListPtr);
+ else
+ vp->restart_tx = 1;
+ if (vp->drv_flags & IS_BOOMERANG) {
+ /* Room for a packet, to avoid long DownStall delays. */
+ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ } else {
+ if (dev->mtu > 2033)
+ outw(SetTxStart + (2000>>2), ioaddr + EL3_CMD);
+ }
+
+ if (vp->tx_full && (vp->cur_tx - vp->dirty_tx <= TX_QUEUE_LEN - 1)) {
+ vp->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ }
+ } else {
+ netif_unpause_tx_queue(dev);
+ vp->stats.tx_dropped++;
+ }
+
+ /* Issue Tx Enable */
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->trans_start = jiffies;
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+}
+
+/*
+ * Handle uncommon interrupt sources. This is a separate routine to minimize
+ * the cache impact.
+ */
+static void
+vortex_error(struct net_device *dev, int status)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int do_tx_reset = 0;
+ int i;
+
+ if (status & TxComplete) { /* Really "TxError" for us. */
+ unsigned char tx_status = inb(ioaddr + TxStatus);
+ /* Presumably a tx-timeout. We must merely re-enable. */
+ if (vp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG"%s: Transmit error, Tx status register %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ outb(0, ioaddr + TxStatus);
+ if (tx_status & 0x30)
+ do_tx_reset = 1;
+ else { /* Merely re-enable the transmitter. */
+ outw(TxEnable, ioaddr + EL3_CMD);
+ vp->restart_tx = 1;
+ }
+ }
+ if (status & RxEarly) { /* Rx early is unused. */
+ vortex_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat = 0;
+ if (vp->msg_level & NETIF_MSG_MISC)
+ printk(KERN_DEBUG "%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* HACK: Disable statistics as an interrupt source. */
+ /* This occurs when we have the wrong media type! */
+ if (DoneDidThat == 0 &&
+ inw(ioaddr + EL3_STATUS) & StatsFull) {
+ printk(KERN_WARNING "%s: Updating statistics failed, disabling "
+ "stats as an interrupt source.\n", dev->name);
+ EL3WINDOW(5);
+ outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
+ EL3WINDOW(7);
+ DoneDidThat++;
+ }
+ }
+ if (status & IntReq) { /* Restore all interrupt sources. */
+ outw(vp->status_enable, ioaddr + EL3_CMD);
+ outw(vp->intr_enable, ioaddr + EL3_CMD);
+ vp->restore_intr_mask = 0;
+ }
+ if (status & HostError) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+ if (vp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: Host error, status %x, FIFO diagnostic "
+ "register %4.4x.\n",
+ dev->name, status, fifo_diag);
+ /* Adapter failure requires Tx/Rx reset and reinit. */
+ if (vp->full_bus_master_tx) {
+ int bus_status = inl(ioaddr + PktStatus);
+ /* 0x80000000 PCI master abort. */
+ /* 0x40000000 PCI target abort. */
+ outw(TotalReset | 0xff, ioaddr + EL3_CMD);
+ for (i = 2000; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ if (vp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: PCI bus error, bus status %8.8x, reset "
+ "had %d tick left.\n",
+ dev->name, bus_status, i);
+ /* Re-enable the receiver. */
+ outw(RxEnable, ioaddr + EL3_CMD);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ vp->restart_tx = 1;
+ } else if (fifo_diag & 0x0400)
+ do_tx_reset = 1;
+ if (fifo_diag & 0x3000) {
+ outw(RxReset | 7, ioaddr + EL3_CMD);
+ for (i = 200000; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ if ((vp->drv_flags & HAS_V2_TX) && dev->mtu > 1500) {
+ EL3WINDOW(3);
+ outw(dev->mtu + 14, ioaddr + Wn3_MaxPktSize);
+ }
+ /* Set the Rx filter to the current state. */
+ memset(vp->mc_filter, 0, sizeof vp->mc_filter);
+ vp->rx_mode = 0;
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | HostError, ioaddr + EL3_CMD);
+ }
+ }
+ if (do_tx_reset) {
+ int j;
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 200; j >= 0 ; j--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ outw(TxEnable, ioaddr + EL3_CMD);
+ vp->restart_tx = 1;
+ }
+
+}
+
+
+static int
+vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ vortex_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Put out the doubleword header... */
+ outl(skb->len, ioaddr + TX_FIFO);
+ if (vp->bus_master) {
+ /* Set the bus-master controller to transfer the packet. */
+ outl(virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr);
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ vp->tx_skb = skb;
+ outw(StartDMADown, ioaddr + EL3_CMD);
+ netif_stop_tx_queue(dev);
+ /* Tx busy will be cleared at the DMADone interrupt. */
+ } else {
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_free_skb(skb);
+ if (inw(ioaddr + TxFree) <= 1536) {
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ }
+
+ dev->trans_start = jiffies;
+
+ /* Clear the Tx status stack. */
+ {
+ int tx_status;
+ int i = 32;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
+ if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
+ if (vp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) {
+ int j;
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 200; j >= 0 ; j--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+ outw(TxEnable, ioaddr + EL3_CMD);
+ vp->restart_tx = 1;
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+static int
+boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int entry;
+ struct boom_tx_desc *prev_entry;
+ unsigned long flags;
+ int i;
+
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ vortex_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = vp->cur_tx % TX_RING_SIZE;
+ prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
+
+ if (vp->msg_level & NETIF_MSG_TX_QUEUED)
+ printk(KERN_DEBUG "%s: Queuing Tx packet, index %d.\n",
+ dev->name, vp->cur_tx);
+ /* Impossible error. */
+ if (vp->tx_full) {
+ printk(KERN_WARNING "%s: Tx Ring full, refusing to send buffer.\n",
+ dev->name);
+ return 1;
+ }
+ vp->tx_skbuff[entry] = skb;
+ vp->tx_ring[entry].next = 0;
+ vp->tx_ring[entry].addr = virt_to_le32desc(skb->data);
+ vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
+ if (vp->capabilities & CapNoTxLength)
+ vp->tx_ring[entry].status =
+ cpu_to_le32(TxNoRoundup | TxIntrUploaded | (entry << 2));
+ else
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+
+ if (vp->drv_flags & IS_BOOMERANG) {
+ save_flags(flags);
+ cli();
+ outw(DownStall, ioaddr + EL3_CMD);
+ /* Wait for the stall to complete. */
+ for (i = 600; i >= 0 ; i--)
+ if ( (inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0)
+ break;
+ vp->tx_desc_tail->next = virt_to_le32desc(&vp->tx_ring[entry]);
+ vp->tx_desc_tail = &vp->tx_ring[entry];
+ if (inl(ioaddr + DownListPtr) == 0) {
+ outl(virt_to_bus(&vp->tx_ring[entry]), ioaddr + DownListPtr);
+ queued_packet++;
+ }
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ restore_flags(flags);
+ } else {
+ vp->tx_desc_tail->next = virt_to_le32desc(&vp->tx_ring[entry]);
+ vp->tx_desc_tail = &vp->tx_ring[entry];
+ if (vp->restart_tx) {
+ outl(virt_to_bus(vp->tx_desc_tail), ioaddr + DownListPtr);
+ vp->restart_tx = 0;
+ queued_packet++;
+ }
+ }
+ vp->cur_tx++;
+ if (vp->cur_tx - vp->dirty_tx >= TX_QUEUE_LEN) {
+ vp->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (vp->cur_tx - (volatile unsigned int)vp->dirty_tx
+ < TX_QUEUE_LEN - 2) {
+ vp->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else { /* Clear previous interrupt enable. */
+#if defined(tx_interrupt_mitigation)
+ prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
+#endif
+ netif_unpause_tx_queue(dev); /* Typical path */
+ }
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr;
+ int latency, status;
+ int work_done = vp->max_interrupt_work;
+
+ ioaddr = dev->base_addr;
+ latency = inb(ioaddr + Timer);
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (status == 0xffff)
+ goto handler_exit;
+ if (vp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+ dev->name, status, latency);
+ do {
+ if (vp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & RxComplete)
+ vortex_rx(dev);
+ if (status & UpComplete) {
+ outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+ boomerang_rx(dev);
+ }
+
+ if (status & TxAvailable) {
+ if (vp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_resume_tx_queue(dev);
+ }
+
+ if (status & DownComplete) {
+ unsigned int dirty_tx = vp->dirty_tx;
+
+ outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+ while (vp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int tx_status = le32_to_cpu(vp->tx_ring[entry].status);
+ if (vp->capabilities & CapNoTxLength) {
+ if ( ! (tx_status & TxDownComplete))
+ break;
+ } else if (inl(ioaddr + DownListPtr) ==
+ virt_to_bus(&vp->tx_ring[entry]))
+ break; /* It still hasn't been processed. */
+ if (vp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ if (vp->tx_skbuff[entry]) {
+ dev_free_skb_irq(vp->tx_skbuff[entry]);
+ vp->tx_skbuff[entry] = 0;
+ }
+ /* vp->stats.tx_packets++; Counted below. */
+ dirty_tx++;
+ }
+ vp->dirty_tx = dirty_tx;
+ /* 4 entry hysteresis before marking the queue non-full. */
+ if (vp->tx_full && (vp->cur_tx - dirty_tx < TX_QUEUE_LEN - 4)) {
+ vp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+ }
+ if (status & DMADone) {
+ if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {
+ outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+ /* Release the transfered buffer */
+ dev_free_skb_irq(vp->tx_skb);
+ if (inw(ioaddr + TxFree) > 1536) {
+ netif_resume_tx_queue(dev);
+ } else /* Interrupt when FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ }
+ }
+ /* Check for all uncommon interrupts at once. */
+ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
+ if (status == 0xffff)
+ break;
+ vortex_error(dev, status);
+ }
+
+ if (--work_done < 0) {
+ if ((status & (0x7fe - (UpComplete | DownComplete))) == 0) {
+ /* Just ack these and return. */
+ outw(AckIntr | UpComplete | DownComplete, ioaddr + EL3_CMD);
+ } else {
+ printk(KERN_WARNING "%s: Too much work in interrupt, status "
+ "%4.4x. Temporarily disabling functions (%4.4x).\n",
+ dev->name, status, SetStatusEnb | ((~status) & 0x7FE));
+ /* Disable all pending interrupts. */
+ outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
+ outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
+ /* The timer will reenable interrupts. */
+ vp->restore_intr_mask = 1;
+ break;
+ }
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
+ writel(0x8000, vp->cb_fn_base + 4);
+
+ } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+ if (vp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, status);
+handler_exit:
+ return;
+}
+
+static int vortex_rx(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+ short rx_status;
+
+ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ unsigned char rx_error = inb(ioaddr + RxErrors);
+ if (vp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01) vp->stats.rx_over_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ int pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 5);
+ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ if (vp->bus_master &&
+ ! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) {
+ outl(virt_to_bus(skb_put(skb, pkt_len)),
+ ioaddr + Wn7_MasterAddr);
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ outw(StartDMAUp, ioaddr + EL3_CMD);
+ while (inw(ioaddr + Wn7_MasterStatus) & 0x8000)
+ ;
+ } else {
+ insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
+ (pkt_len + 3) >> 2);
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ vp->stats.rx_bytes += pkt_len;
+#endif
+ /* Wait a limited time to go to next packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ continue;
+ } else if (vp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
+ "size %d.\n", dev->name, pkt_len);
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ vp->stats.rx_dropped++;
+ /* Wait a limited time to skip this packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+
+ return 0;
+}
+
+static int
+boomerang_rx(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int entry = vp->cur_rx % RX_RING_SIZE;
+ long ioaddr = dev->base_addr;
+ int rx_status;
+ int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+
+ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In boomerang_rx(), status %4.4x, rx_status "
+ "%8.8x.\n",
+ inw(ioaddr+EL3_STATUS), (int)inl(ioaddr+UpPktStatus));
+ while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
+ if (--rx_work_limit < 0)
+ break;
+ if (rx_status & RxDError) { /* Error, update stats. */
+ unsigned char rx_error = rx_status >> 16;
+ if (vp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x01) {
+ vp->stats.rx_over_errors++;
+ if (vp->drv_flags & HAS_V2_TX) {
+ int cur_rx_thresh = inb(ioaddr + RxPriorityThresh);
+ if (cur_rx_thresh < 0x20)
+ outb(cur_rx_thresh + 1, ioaddr + RxPriorityThresh);
+ else
+ printk(KERN_WARNING "%s: Excessive PCI latency causing"
+ " packet corruption.\n", dev->name);
+ }
+ }
+ } else {
+ /* The packet length: up to 4.5K!. */
+ int pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < vp->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb, pkt_len),
+ le32desc_to_virt(vp->rx_ring[entry].addr), pkt_len);
+ rx_copy++;
+ } else {
+ void *temp;
+ /* Pass up the skbuff already on the Rx ring. */
+ skb = vp->rx_skbuff[entry];
+ vp->rx_skbuff[entry] = NULL;
+ temp = skb_put(skb, pkt_len);
+ /* Remove this checking code for final release. */
+ if (le32desc_to_virt(vp->rx_ring[entry].addr) != temp)
+ printk(KERN_ERR "%s: Warning -- the skbuff addresses do not match"
+ " in boomerang_rx: %p vs. %p.\n", dev->name,
+ bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)),
+ temp);
+ rx_nocopy++;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ { /* Use hardware checksum info. */
+ int csum_bits = rx_status & 0xee000000;
+ if (csum_bits &&
+ (csum_bits == (IPChksumValid | TCPChksumValid) ||
+ csum_bits == (IPChksumValid | UDPChksumValid))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rx_csumhits++;
+ }
+ }
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ vp->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
+ }
+ /* Refill the Rx ring buffers. */
+ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = vp->dirty_rx % RX_RING_SIZE;
+ if (vp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(vp->rx_buf_sz);
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
+ vp->rx_skbuff[entry] = skb;
+ }
+ vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+ outw(UpUnstall, ioaddr + EL3_CMD);
+ }
+ return 0;
+}
+
+static void
+vortex_down(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Turn off statistics ASAP. We update vp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == XCVR_10base2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(ioaddr, dev);
+ if (vp->full_bus_master_rx)
+ outl(0, ioaddr + UpListPtr);
+ if (vp->full_bus_master_tx)
+ outl(0, ioaddr + DownListPtr);
+}
+
+static int
+vortex_close(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (vp->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
+ printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
+ " tx_queued %d Rx pre-checksummed %d.\n",
+ dev->name, rx_nocopy, rx_copy, queued_packet, rx_csumhits);
+ }
+
+ del_timer(&vp->timer);
+ vortex_down(dev);
+ free_irq(dev->irq, dev);
+ outw(TotalReset | 0x34, ioaddr + EL3_CMD);
+
+ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (vp->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ vp->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = 0;
+ }
+ }
+ if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+ for (i = 0; i < TX_RING_SIZE; i++)
+ if (vp->tx_skbuff[i]) {
+ dev_free_skb(vp->tx_skbuff[i]);
+ vp->tx_skbuff[i] = 0;
+ }
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct net_device_stats *vortex_get_stats(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ save_flags(flags);
+ cli();
+ update_stats(dev->base_addr, dev);
+ restore_flags(flags);
+ }
+ return &vp->stats;
+}
+
+/* Update statistics.
+ Unlike with the EL3 we need not worry about interrupts changing
+ the window setting from underneath us, but we must still guard
+ against a race condition with a StatsUpdate interrupt updating the
+ table. This is done by checking that the ASM (!) code generated uses
+ atomic updates with '+='.
+ */
+static void update_stats(long ioaddr, struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int old_window = inw(ioaddr + EL3_CMD);
+
+ if (old_window == 0xffff) /* Chip suspended or ejected. */
+ return;
+ /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ vp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ vp->stats.collisions += inb(ioaddr + 3);
+ vp->stats.tx_window_errors += inb(ioaddr + 4);
+ vp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ vp->stats.tx_packets += inb(ioaddr + 6);
+ vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Don't bother with register 9, an extension of registers 6&7.
+ If we do use the 6&7 values the atomic update assumption above
+ is invalid. */
+ /* Rx Bytes is unreliable */ inw(ioaddr + 10);
+#if LINUX_VERSION_CODE > 0x020119
+ vp->stats.tx_bytes += inw(ioaddr + 12);
+#else
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+#endif
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+
+ /* We change back to window 7 (not 1) with the Vortex. */
+ EL3WINDOW(old_window >> 13);
+ return;
+}
+
+static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+ int phy = vp->phys[0];
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = phy;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ if (data[0] == 32) { /* Emulate MII for 3c59*, 3c900. */
+ data[3] = 0;
+ switch (data[1]) {
+ case 0:
+ if (dev->if_port == XCVR_100baseTx) data[3] |= 0x2000;
+ if (vp->full_duplex) data[3] |= 0x0100;
+ break;
+ case 1:
+ if (vp->available_media & 0x02) data[3] |= 0x6000;
+ if (vp->available_media & 0x08) data[3] |= 0x1800;
+ spin_lock(&vp->window_lock);
+ EL3WINDOW(4);
+ if (inw(ioaddr + Wn4_Media) & Media_LnkBeat) data[3] |= 0x0004;
+ spin_unlock(&vp->window_lock);
+ break;
+ case 2: data[3] = 0x0280; break; /* OUI 00:a0:24 */
+ case 3: data[3] = 0x9000; break;
+ default: break;
+ }
+ return 0;
+ }
+ spin_lock(&vp->window_lock);
+ EL3WINDOW(4);
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ spin_unlock(&vp->window_lock);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == vp->phys[0]) {
+ u16 value = data[2];
+ if (vp->phys[0] == 32) {
+ if (data[1] == 0) {
+ vp->media_override = (value & 0x2000) ?
+ XCVR_100baseTx : XCVR_10baseT;
+ vp->full_duplex = (value & 0x0100) ? 1 : 0;
+ vp->medialock = 1;
+ }
+ return 0;
+ }
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ vp->medialock = (value & 0x9000) ? 0 : 1;
+ if (vp->medialock)
+ vp->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: vp->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ spin_lock(&vp->window_lock);
+ EL3WINDOW(4);
+ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ spin_unlock(&vp->window_lock);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = vp->msg_level;
+ data32[1] = vp->multicast_filter_limit;
+ data32[2] = vp->max_interrupt_work;
+ data32[3] = vp->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ vp->msg_level = data32[0];
+ vp->multicast_filter_limit = data32[1];
+ vp->max_interrupt_work = data32[2];
+ vp->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+/* Pre-Cyclone chips have no documented multicast filter, so the only
+ multicast setting is to receive all multicast frames. Cyclone and later
+ chips have a write-only table of unknown size.
+ At least the chip has a very clean way to set the other filter modes. */
+static void set_rx_mode(struct net_device *dev)
+{
+ struct vortex_private *vp = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int new_mode;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log a net tap. */
+ printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+ } else if ((vp->drv_flags & HAS_V2_TX) &&
+ dev->mc_count < vp->multicast_filter_limit) {
+ struct dev_mc_list *mclist;
+ int i;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int filter_bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0xff;
+ if (test_bit(filter_bit, vp->mc_filter))
+ continue;
+ outw(SetFilterBit | 0x0400 | filter_bit, ioaddr + EL3_CMD);
+ set_bit(filter_bit, vp->mc_filter);
+ }
+
+ new_mode = SetRxFilter|RxStation|RxMulticastHash|RxBroadcast;
+ } else if (dev->mc_count) {
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+ } else
+ new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+ if (vp->rx_mode != new_mode) {
+ vp->rx_mode = new_mode;
+ outw(new_mode, ioaddr + EL3_CMD);
+ }
+}
+
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details. */
+
+/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+#define mdio_delay() inl(mdio_addr)
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DIR_WRITE 0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ 0x02
+#define MDIO_ENB_IN 0x00
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long ioaddr, int bits)
+{
+ long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (-- bits >= 0) {
+ outw(MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_read(long ioaddr, int phy_id, int location)
+{
+ int i;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ unsigned int retval = 0;
+ long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the read command bits out. */
+ for (i = 14; i >= 0; i--) {
+ int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ mdio_delay();
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition and 16 data bits. */
+ for (i = 18; i > 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ return retval & 0x10000 ? 0xffff : retval & 0xffff;
+}
+
+static void mdio_write(long ioaddr, int phy_id, int location, int value)
+{
+ int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+ long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ mdio_delay();
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Leave the interface idle. */
+ mdio_sync(ioaddr, 32);
+
+ return;
+}
+
+#if ! defined(NO_PCI)
+/* ACPI: Advanced Configuration and Power Interface. */
+/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
+static void acpi_set_WOL(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
+ EL3WINDOW(7);
+ outw(2, ioaddr + 0x0c);
+ /* The RxFilter must accept the WOL frames. */
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ /* Change the power state to D3; RxEnable doesn't take effect. */
+ pci_write_config_word(vp->pci_dev, 0xe0, 0x8103);
+}
+#endif
+
+static int pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct vortex_private *np = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ vortex_down(dev);
+ netif_stop_tx_queue(dev);
+ if (np->capabilities & CapPwrMgmt)
+ acpi_set_WOL(dev);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the actions are very chip specific. */
+ activate_xcvr(dev);
+ set_media_type(dev);
+ start_operation(dev);
+ np->rx_mode = 0;
+ set_rx_mode(dev);
+ start_operation1(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_vortex_dev; *devp; devp = next) {
+ next = &((struct vortex_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ case DRV_PWR_WakeOn:
+ if ( ! (np->capabilities & CapPwrMgmt))
+ return -1;
+ EL3WINDOW(7);
+ /* Power up on: 1=Downloaded Filter, 2=Magic Packets, 4=Link Status.*/
+ outw(2, ioaddr + 12);
+ /* This RxEnable doesn't take effect if we immediately change to D3. */
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ acpi_set_pwr_state(np->pci_dev, ACPI_D3);
+ break;
+ }
+ return 0;
+}
+
+
+#ifdef MODULE
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&vortex_ops);
+#elif ! defined(NO_PCI)
+ pci_drv_unregister(&vortex_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_vortex_dev) {
+ struct vortex_private *vp=(void *)(root_vortex_dev->priv);
+ unregister_netdev(root_vortex_dev);
+ outw(TotalReset | 0x14, root_vortex_dev->base_addr + EL3_CMD);
+ if (vp->capabilities & CapPwrMgmt)
+ acpi_set_WOL(root_vortex_dev);
+#ifdef USE_MEM_OPS
+ iounmap((char *)root_vortex_dev->base_addr);
+#else
+ release_region(root_vortex_dev->base_addr,
+ pci_tbl[vp->chip_id].io_size);
+#endif
+ next_dev = vp->next_module;
+ if (vp->priv_addr)
+ kfree(vp->priv_addr);
+ kfree(root_vortex_dev);
+ root_vortex_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` 3c59x.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c 3c59x.c"
+ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c 3c59x.c -o 3c575_cb.o -I/usr/src/pcmcia/include/"
+ * eisa-only-compile: "gcc -DNO_PCI -DMODULE -O6 -c 3c59x.c -o 3c597.o"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/8390.c b/linux/src/drivers/net/8390.c
new file mode 100644
index 0000000..747ccb0
--- /dev/null
+++ b/linux/src/drivers/net/8390.c
@@ -0,0 +1,829 @@
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is the chip-specific code for many 8390-based ethernet adaptors.
+ This is not a complete driver, it must be combined with board-specific
+ code such as ne.c, wd.c, 3c503.c, etc.
+
+ Seeing how at least eight drivers use this code, (not counting the
+ PCMCIA ones either) it is easy to break some card by what seems like
+ a simple innocent change. Please contact me or Donald if you think
+ you have found something that needs changing. -- PG
+
+
+ Changelog:
+
+ Paul Gortmaker : remove set_bit lock, other cleanups.
+ Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
+ ei_block_input() for eth_io_copy_and_sum().
+ Paul Gortmaker : exchange static int ei_pingpong for a #define,
+ also add better Tx error handling.
+ Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
+
+
+ Sources:
+ The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+
+ */
+
+static const char *version =
+ "8390.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "8390.h"
+
+/* These are the operational function interfaces to board-specific
+ routines.
+ void reset_8390(struct device *dev)
+ Resets the board associated with DEV, including a hardware reset of
+ the 8390. This is only called when there is a transmit timeout, and
+ it is always followed by 8390_init().
+ void block_output(struct device *dev, int count, const unsigned char *buf,
+ int start_page)
+ Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
+ "page" value uses the 8390's 256-byte pages.
+ void get_8390_hdr(struct device *dev, struct e8390_hdr *hdr, int ring_page)
+ Read the 4 byte, page aligned 8390 header. *If* there is a
+ subsequent read, it will be of the rest of the packet.
+ void block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+ Read COUNT bytes from the packet buffer into the skb data area. Start
+ reading from RING_OFFSET, the address as the 8390 sees it. This will always
+ follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifdef EI_DEBUG
+int ei_debug = EI_DEBUG;
+#else
+int ei_debug = 1;
+#endif
+
+/* Index to functions. */
+static void ei_tx_intr(struct device *dev);
+static void ei_tx_err(struct device *dev);
+static void ei_receive(struct device *dev);
+static void ei_rx_overrun(struct device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct device *dev, unsigned int length,
+ int start_page);
+static void set_multicast_list(struct device *dev);
+
+
+/* Open/initialize the board. This routine goes all-out, setting everything
+ up anew at each open, even though many of these registers should only
+ need to be set once at boot.
+ */
+int ei_open(struct device *dev)
+{
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /* This can't happen unless somebody forgot to call ethdev_init(). */
+ if (ei_local == NULL) {
+ printk(KERN_EMERG "%s: ei_open passed a non-existent device!\n", dev->name);
+ return -ENXIO;
+ }
+
+ irq2dev_map[dev->irq] = dev;
+ NS8390_init(dev, 1);
+ dev->start = 1;
+ ei_local->irqlock = 0;
+ return 0;
+}
+
+/* Opposite of above. Only used when "ifconfig <devname> down" is done. */
+int ei_close(struct device *dev)
+{
+ NS8390_init(dev, 0);
+ dev->start = 0;
+ return 0;
+}
+
+static int ei_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int length, send_length, output_page;
+
+/*
+ * We normally shouldn't be called if dev->tbusy is set, but the
+ * existing code does anyway. If it has been too long since the
+ * last Tx, we assume the board has died and kick it.
+ */
+
+ if (dev->tbusy) { /* Do timeouts, just like the 8003 driver. */
+ int txsr = inb(e8390_base+EN0_TSR), isr;
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < TX_TIMEOUT || (tickssofar < (TX_TIMEOUT+5) && ! (txsr & ENTSR_PTX))) {
+ return 1;
+ }
+ isr = inb(e8390_base+EN0_ISR);
+ if (dev->start == 0) {
+ printk("%s: xmit on stopped card\n", dev->name);
+ return 1;
+ }
+
+ /*
+ * Note that if the Tx posted a TX_ERR interrupt, then the
+ * error will have been handled from the interrupt handler.
+ * and not here.
+ */
+
+ printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+
+ if (!isr && !ei_local->stat.tx_packets) {
+ /* The 8390 probably hasn't gotten on the cable yet. */
+ ei_local->interface_num ^= 1; /* Try a different xcvr. */
+ }
+
+ /* Try to restart the card. Perhaps the user has fixed something. */
+ ei_reset_8390(dev);
+ NS8390_init(dev, 1);
+ dev->trans_start = jiffies;
+ }
+
+ /* Sending a NULL skb means some higher layer thinks we've missed an
+ tx-done interrupt. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ length = skb->len;
+ if (skb->len <= 0)
+ return 0;
+
+ /* Mask interrupts from the ethercard. */
+ outb_p(0x00, e8390_base + EN0_IMR);
+ if (dev->interrupt) {
+ printk("%s: Tx request while isr active.\n",dev->name);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ return 1;
+ }
+ ei_local->irqlock = 1;
+
+ send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
+
+#ifdef EI_PINGPONG
+
+ /*
+ * We have two Tx slots available for use. Find the first free
+ * slot, and then perform some sanity checks. With two Tx bufs,
+ * you get very close to transmitting back-to-back packets. With
+ * only one Tx buf, the transmitter sits idle while you reload the
+ * card, leaving a substantial gap between each transmitted packet.
+ */
+
+ if (ei_local->tx1 == 0) {
+ output_page = ei_local->tx_start_page;
+ ei_local->tx1 = send_length;
+ if (ei_debug && ei_local->tx2 > 0)
+ printk("%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ } else if (ei_local->tx2 == 0) {
+ output_page = ei_local->tx_start_page + TX_1X_PAGES;
+ ei_local->tx2 = send_length;
+ if (ei_debug && ei_local->tx1 > 0)
+ printk("%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ } else { /* We should never get here. */
+ if (ei_debug)
+ printk("%s: No Tx buffers free! irq=%d tx1=%d tx2=%d last=%d\n",
+ dev->name, dev->interrupt, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ ei_local->irqlock = 0;
+ dev->tbusy = 1;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ return 1;
+ }
+
+ /*
+ * Okay, now upload the packet and trigger a send if the transmitter
+ * isn't already sending. If it is busy, the interrupt handler will
+ * trigger the send later, upon receiving a Tx done interrupt.
+ */
+
+ ei_block_output(dev, length, skb->data, output_page);
+ if (! ei_local->txing) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, output_page);
+ dev->trans_start = jiffies;
+ if (output_page == ei_local->tx_start_page) {
+ ei_local->tx1 = -1;
+ ei_local->lasttx = -1;
+ } else {
+ ei_local->tx2 = -1;
+ ei_local->lasttx = -2;
+ }
+ } else
+ ei_local->txqueue++;
+
+ dev->tbusy = (ei_local->tx1 && ei_local->tx2);
+
+#else /* EI_PINGPONG */
+
+ /*
+ * Only one Tx buffer in use. You need two Tx bufs to come close to
+ * back-to-back transmits. Expect a 20 -> 25% performance hit on
+ * reasonable hardware if you only use one Tx buffer.
+ */
+
+ ei_block_output(dev, length, skb->data, ei_local->tx_start_page);
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ dev->tbusy = 1;
+
+#endif /* EI_PINGPONG */
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the ether interface interrupts. */
+void ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ int e8390_base;
+ int interrupts, nr_serviced = 0;
+ struct ei_device *ei_local;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ e8390_base = dev->base_addr;
+ ei_local = (struct ei_device *) dev->priv;
+ if (dev->interrupt || ei_local->irqlock) {
+ /* The "irqlock" check is only for testing. */
+ printk(ei_local->irqlock
+ ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
+ : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ dev->name, inb_p(e8390_base + EN0_ISR),
+ inb_p(e8390_base + EN0_IMR));
+ return;
+ }
+
+ dev->interrupt = 1;
+
+ /* Change to page 0 and read the intr status reg. */
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+ if (ei_debug > 3)
+ printk("%s: interrupt(isr=%#2.2x).\n", dev->name,
+ inb_p(e8390_base + EN0_ISR));
+
+ /* !!Assumption!! -- we stay in page 0. Don't break this. */
+ while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
+ && ++nr_serviced < MAX_SERVICE) {
+ if (dev->start == 0) {
+ printk("%s: interrupt from stopped card\n", dev->name);
+ interrupts = 0;
+ break;
+ }
+ if (interrupts & ENISR_OVER) {
+ ei_rx_overrun(dev);
+ } else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
+ /* Got a good (?) packet. */
+ ei_receive(dev);
+ }
+ /* Push the next to-transmit packet through. */
+ if (interrupts & ENISR_TX) {
+ ei_tx_intr(dev);
+ } else if (interrupts & ENISR_TX_ERR) {
+ ei_tx_err(dev);
+ }
+
+ if (interrupts & ENISR_COUNTERS) {
+ ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
+ outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
+ }
+
+ /* Ignore any RDC interrupts that make it back to here. */
+ if (interrupts & ENISR_RDC) {
+ outb_p(ENISR_RDC, e8390_base + EN0_ISR);
+ }
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ }
+
+ if (interrupts && ei_debug) {
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ if (nr_serviced >= MAX_SERVICE) {
+ printk("%s: Too much work at interrupt, status %#2.2x\n",
+ dev->name, interrupts);
+ outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+ } else {
+ printk("%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+ }
+ }
+ dev->interrupt = 0;
+ return;
+}
+
+/*
+ * A transmitter error has happened. Most likely excess collisions (which
+ * is a fairly normal condition). If the error is one where the Tx will
+ * have been aborted, we try and send another one right away, instead of
+ * letting the failed packet sit and collect dust in the Tx buffer. This
+ * is a much better solution as it avoids kernel based Tx timeouts, and
+ * an unnecessary card reset.
+ */
+
+static void ei_tx_err(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ unsigned char txsr = inb_p(e8390_base+EN0_TSR);
+ unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+#ifdef VERBOSE_ERROR_DUMP
+ printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+ if (txsr & ENTSR_ABT)
+ printk("excess-collisions ");
+ if (txsr & ENTSR_ND)
+ printk("non-deferral ");
+ if (txsr & ENTSR_CRS)
+ printk("lost-carrier ");
+ if (txsr & ENTSR_FU)
+ printk("FIFO-underrun ");
+ if (txsr & ENTSR_CDH)
+ printk("lost-heartbeat ");
+ printk("\n");
+#endif
+
+ outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
+
+ if (tx_was_aborted)
+ ei_tx_intr(dev);
+
+ /*
+ * Note: NCR reads zero on 16 collisions so we add them
+ * in by hand. Somebody might care...
+ */
+ if (txsr & ENTSR_ABT)
+ ei_local->stat.collisions += 16;
+
+}
+
+/* We have finished a transmit: check for errors and then trigger the next
+ packet to be sent. */
+static void ei_tx_intr(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ int status = inb(e8390_base + EN0_TSR);
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
+
+#ifdef EI_PINGPONG
+
+ /*
+ * There are two Tx buffers, see which one finished, and trigger
+ * the send of another one if it exists.
+ */
+ ei_local->txqueue--;
+ if (ei_local->tx1 < 0) {
+ if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+ printk("%s: bogus last_tx_buffer %d, tx1=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx1);
+ ei_local->tx1 = 0;
+ dev->tbusy = 0;
+ if (ei_local->tx2 > 0) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+ dev->trans_start = jiffies;
+ ei_local->tx2 = -1,
+ ei_local->lasttx = 2;
+ } else
+ ei_local->lasttx = 20, ei_local->txing = 0;
+ } else if (ei_local->tx2 < 0) {
+ if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
+ printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx2);
+ ei_local->tx2 = 0;
+ dev->tbusy = 0;
+ if (ei_local->tx1 > 0) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ ei_local->tx1 = -1;
+ ei_local->lasttx = 1;
+ } else
+ ei_local->lasttx = 10, ei_local->txing = 0;
+ } else
+ printk("%s: unexpected TX-done interrupt, lasttx=%d.\n",
+ dev->name, ei_local->lasttx);
+
+#else /* EI_PINGPONG */
+ /*
+ * Single Tx buffer: mark it free so another packet can be loaded.
+ */
+ ei_local->txing = 0;
+ dev->tbusy = 0;
+#endif
+
+ /* Minimize Tx latency: update the statistics after we restart TXing. */
+ if (status & ENTSR_COL)
+ ei_local->stat.collisions++;
+ if (status & ENTSR_PTX)
+ ei_local->stat.tx_packets++;
+ else {
+ ei_local->stat.tx_errors++;
+ if (status & ENTSR_ABT) ei_local->stat.tx_aborted_errors++;
+ if (status & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
+ if (status & ENTSR_FU) ei_local->stat.tx_fifo_errors++;
+ if (status & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
+ if (status & ENTSR_OWC) ei_local->stat.tx_window_errors++;
+ }
+
+ mark_bh (NET_BH);
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+
+static void ei_receive(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ unsigned char rxing_page, this_frame, next_frame;
+ unsigned short current_offset;
+ int rx_pkt_count = 0;
+ struct e8390_pkt_hdr rx_frame;
+ int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
+
+ while (++rx_pkt_count < 10) {
+ int pkt_len;
+
+ /* Get the rx page (incoming packet pointer). */
+ outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
+ rxing_page = inb_p(e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+
+ /* Remove one frame from the ring. Boundary is always a page behind. */
+ this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
+ if (this_frame >= ei_local->stop_page)
+ this_frame = ei_local->rx_start_page;
+
+ /* Someday we'll omit the previous, iff we never get this message.
+ (There is at least one clone claimed to have a problem.) */
+ if (ei_debug > 0 && this_frame != ei_local->current_page)
+ printk("%s: mismatched read page pointers %2x vs %2x.\n",
+ dev->name, this_frame, ei_local->current_page);
+
+ if (this_frame == rxing_page) /* Read all the frames? */
+ break; /* Done for now */
+
+ current_offset = this_frame << 8;
+ ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+ pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+
+ next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+ /* Check for bogosity warned by 3c503 book: the status byte is never
+ written. This happened a lot during testing! This code should be
+ cleaned up someday. */
+ if (rx_frame.next != next_frame
+ && rx_frame.next != next_frame + 1
+ && rx_frame.next != next_frame - num_rx_pages
+ && rx_frame.next != next_frame + 1 - num_rx_pages) {
+ ei_local->current_page = rxing_page;
+ outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
+ ei_local->stat.rx_errors++;
+ continue;
+ }
+
+ if (pkt_len < 60 || pkt_len > 1518) {
+ if (ei_debug)
+ printk("%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
+ dev->name, rx_frame.count, rx_frame.status,
+ rx_frame.next);
+ ei_local->stat.rx_errors++;
+ } else if ((rx_frame.status & 0x0F) == ENRSR_RXOK) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ if (ei_debug > 1)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ ei_local->stat.rx_dropped++;
+ break;
+ } else {
+ skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ ei_local->stat.rx_packets++;
+ }
+ } else {
+ int errs = rx_frame.status;
+ if (ei_debug)
+ printk("%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ dev->name, rx_frame.status, rx_frame.next,
+ rx_frame.count);
+ if (errs & ENRSR_FO)
+ ei_local->stat.rx_fifo_errors++;
+ }
+ next_frame = rx_frame.next;
+
+ /* This _should_ never happen: it's here for avoiding bad clones. */
+ if (next_frame >= ei_local->stop_page) {
+ printk("%s: next frame inconsistency, %#2x\n", dev->name,
+ next_frame);
+ next_frame = ei_local->rx_start_page;
+ }
+ ei_local->current_page = next_frame;
+ outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+ }
+
+ /* We used to also ack ENISR_OVER here, but that would sometimes mask
+ a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
+ outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
+ return;
+}
+
+/*
+ * We have a receiver overrun: we have to kick the 8390 to get it started
+ * again. Problem is that you have to kick it exactly as NS prescribes in
+ * the updated datasheets, or "the NIC may act in an unpredictable manner."
+ * This includes causing "the NIC to defer indefinitely when it is stopped
+ * on a busy network." Ugh.
+ */
+static void ei_rx_overrun(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ unsigned long wait_start_time;
+ unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /*
+ * Record whether a Tx was in progress and then issue the
+ * stop command.
+ */
+ was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ if (ei_debug > 1)
+ printk("%s: Receiver overrun.\n", dev->name);
+ ei_local->stat.rx_over_errors++;
+
+ /*
+ * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
+ * Early datasheets said to poll the reset bit, but now they say that
+ * it "is not a reliable indicator and subsequently should be ignored."
+ * We wait at least 10ms.
+ */
+ wait_start_time = jiffies;
+ while (jiffies - wait_start_time <= 1*HZ/100)
+ barrier();
+
+ /*
+ * Reset RBCR[01] back to zero as per magic incantation.
+ */
+ outb_p(0x00, e8390_base+EN0_RCNTLO);
+ outb_p(0x00, e8390_base+EN0_RCNTHI);
+
+ /*
+ * See if any Tx was interrupted or not. According to NS, this
+ * step is vital, and skipping it will cause no end of havoc.
+ */
+ if (was_txing) {
+ unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
+ if (!tx_completed) must_resend = 1;
+ }
+
+ /*
+ * Have to enter loopback mode and then restart the NIC before
+ * you are allowed to slurp packets up off the ring.
+ */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+
+ /*
+ * Clear the Rx ring of all the debris, and ack the interrupt.
+ */
+ ei_receive(dev);
+ outb_p(ENISR_OVER, e8390_base+EN0_ISR);
+
+ /*
+ * Leave loopback mode, and resend any packet that got stopped.
+ */
+ outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
+ if (must_resend)
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+
+}
+
+static struct enet_statistics *get_stats(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /* If the card is stopped, just return the present stats. */
+ if (dev->start == 0) return &ei_local->stat;
+
+ /* Read the counter registers, assuming we are in page 0. */
+ ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
+
+ return &ei_local->stat;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ outb_p(E8390_RXCONFIG | 0x18, ioaddr + EN0_RXCR);
+ }
+ else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
+ {
+ /* The multicast-accept list is initialized to accept-all, and we
+ rely on higher-level filtering for now. */
+ outb_p(E8390_RXCONFIG | 0x08, ioaddr + EN0_RXCR);
+ }
+ else
+ outb_p(E8390_RXCONFIG, ioaddr + EN0_RXCR);
+}
+
+/* Initialize the rest of the 8390 device structure. */
+int ethdev_init(struct device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s", version);
+
+ if (dev->priv == NULL) {
+ dev->priv = kmalloc(sizeof(struct ei_device), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct ei_device));
+ }
+
+ dev->hard_start_xmit = &ei_start_xmit;
+ dev->get_stats = get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ ether_setup(dev);
+
+ return 0;
+}
+
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+void NS8390_init(struct device *dev, int startp)
+{
+ int e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int i;
+ int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
+ unsigned long flags;
+
+ /* Follow National Semi's recommendations for initing the DP83902. */
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base); /* 0x21 */
+ outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
+ /* Clear the remote byte count registers. */
+ outb_p(0x00, e8390_base + EN0_RCNTLO);
+ outb_p(0x00, e8390_base + EN0_RCNTHI);
+ /* Set to monitor and loopback mode -- this is vital!. */
+ outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+ /* Set the transmit page and receive ring. */
+ outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+ outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
+ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
+ outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+ /* Clear the pending interrupts and mask. */
+ outb_p(0xFF, e8390_base + EN0_ISR);
+ outb_p(0x00, e8390_base + EN0_IMR);
+
+ /* Copy the station address into the DS8390 registers,
+ and set the multicast hash bitmap to receive all multicasts. */
+ save_flags(flags);
+ cli();
+ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base); /* 0x61 */
+ for(i = 0; i < 6; i++) {
+ outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS + i);
+ }
+ /* Initialize the multicast list to accept-all. If we enable multicast
+ the higher levels can do the filtering. */
+ for(i = 0; i < 8; i++)
+ outb_p(0xff, e8390_base + EN1_MULT + i);
+
+ outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base);
+ restore_flags(flags);
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_local->txing = 0;
+ if (startp) {
+ outb_p(0xff, e8390_base + EN0_ISR);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base);
+ outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
+ dev->set_multicast_list(dev); /* Get the multicast status right if this
+ was a reset. */
+ }
+ return;
+}
+
+/* Trigger a transmit start, assuming the length is valid. */
+static void NS8390_trigger_send(struct device *dev, unsigned int length,
+ int start_page)
+{
+ int e8390_base = dev->base_addr;
+
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base);
+
+ if (inb_p(e8390_base) & E8390_TRANS) {
+ printk("%s: trigger_send() called with the transmitter busy.\n",
+ dev->name);
+ return;
+ }
+ outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+ outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+ outb_p(start_page, e8390_base + EN0_TPSR);
+ outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base);
+ return;
+}
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 8390.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/8390.h b/linux/src/drivers/net/8390.h
new file mode 100644
index 0000000..9cc0ddc
--- /dev/null
+++ b/linux/src/drivers/net/8390.h
@@ -0,0 +1,175 @@
+/* Generic NS8390 register definitions. */
+/* This file is part of Donald Becker's 8390 drivers, and is distributed
+ under the same license.
+ Some of these names and comments originated from the Crynwr
+ packet drivers, which are distributed under the GPL. */
+
+#ifndef _8390_h
+#define _8390_h
+
+#include <linux/if_ether.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+
+#define TX_2X_PAGES 12
+#define TX_1X_PAGES 6
+
+/* Should always use two Tx slots to get back-to-back transmits. */
+#define EI_PINGPONG
+
+#ifdef EI_PINGPONG
+#define TX_PAGES TX_2X_PAGES
+#else
+#define TX_PAGES TX_1X_PAGES
+#endif
+
+#define ETHER_ADDR_LEN 6
+
+/* The 8390 specific per-packet-header format. */
+struct e8390_pkt_hdr {
+ unsigned char status; /* status */
+ unsigned char next; /* pointer to next packet. */
+ unsigned short count; /* header + packet length in bytes */
+};
+
+/* From 8390.c */
+extern int ei_debug;
+extern struct sigaction ei_sigaction;
+
+extern int ethif_init(struct device *dev);
+extern int ethdev_init(struct device *dev);
+extern void NS8390_init(struct device *dev, int startp);
+extern int ei_open(struct device *dev);
+extern int ei_close(struct device *dev);
+extern void ei_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+#ifndef HAVE_AUTOIRQ
+/* From auto_irq.c */
+extern struct device *irq2dev_map[16];
+extern int autoirq_setup(int waittime);
+extern int autoirq_report(int waittime);
+#endif
+
+/* Most of these entries should be in 'struct device' (or most of the
+ things in there should be here!) */
+/* You have one of these per-board */
+struct ei_device {
+ const char *name;
+ void (*reset_8390)(struct device *);
+ void (*get_8390_hdr)(struct device *, struct e8390_pkt_hdr *, int);
+ void (*block_output)(struct device *, int, const unsigned char *, int);
+ void (*block_input)(struct device *, int, struct sk_buff *, int);
+ unsigned open:1;
+ unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */
+ unsigned txing:1; /* Transmit Active */
+ unsigned irqlock:1; /* 8390's intrs disabled when '1'. */
+ unsigned dmaing:1; /* Remote DMA Active */
+ unsigned char tx_start_page, rx_start_page, stop_page;
+ unsigned char current_page; /* Read pointer in buffer */
+ unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */
+ unsigned char txqueue; /* Tx Packet buffer queue length. */
+ short tx1, tx2; /* Packet lengths for ping-pong tx. */
+ short lasttx; /* Alpha version consistency check. */
+ unsigned char reg0; /* Register '0' in a WD8013 */
+ unsigned char reg5; /* Register '5' in a WD8013 */
+ unsigned char saved_irq; /* Original dev->irq value. */
+ /* The new statistics table. */
+ struct enet_statistics stat;
+};
+
+/* The maximum number of 8390 interrupt service routines called per IRQ. */
+#define MAX_SERVICE 12
+
+/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */
+#define TX_TIMEOUT (20*HZ/100)
+
+#define ei_status (*(struct ei_device *)(dev->priv))
+
+/* Some generic ethernet register configurations. */
+#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */
+#define E8390_RX_IRQ_MASK 0x5
+#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */
+#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */
+#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */
+#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */
+
+/* Register accessed at EN_CMD, the 8390 base addr. */
+#define E8390_STOP 0x01 /* Stop and reset the chip */
+#define E8390_START 0x02 /* Start the chip, clear reset */
+#define E8390_TRANS 0x04 /* Transmit a frame */
+#define E8390_RREAD 0x08 /* Remote read */
+#define E8390_RWRITE 0x10 /* Remote write */
+#define E8390_NODMA 0x20 /* Remote DMA */
+#define E8390_PAGE0 0x00 /* Select page chip registers */
+#define E8390_PAGE1 0x40 /* using the two high-order bits */
+#define E8390_PAGE2 0x80 /* Page 3 is invalid. */
+
+#define E8390_CMD 0x00 /* The command register (for all pages) */
+/* Page 0 register offsets. */
+#define EN0_CLDALO 0x01 /* Low byte of current local dma addr RD */
+#define EN0_STARTPG 0x01 /* Starting page of ring bfr WR */
+#define EN0_CLDAHI 0x02 /* High byte of current local dma addr RD */
+#define EN0_STOPPG 0x02 /* Ending page +1 of ring bfr WR */
+#define EN0_BOUNDARY 0x03 /* Boundary page of ring bfr RD WR */
+#define EN0_TSR 0x04 /* Transmit status reg RD */
+#define EN0_TPSR 0x04 /* Transmit starting page WR */
+#define EN0_NCR 0x05 /* Number of collision reg RD */
+#define EN0_TCNTLO 0x05 /* Low byte of tx byte count WR */
+#define EN0_FIFO 0x06 /* FIFO RD */
+#define EN0_TCNTHI 0x06 /* High byte of tx byte count WR */
+#define EN0_ISR 0x07 /* Interrupt status reg RD WR */
+#define EN0_CRDALO 0x08 /* low byte of current remote dma address RD */
+#define EN0_RSARLO 0x08 /* Remote start address reg 0 */
+#define EN0_CRDAHI 0x09 /* high byte, current remote dma address RD */
+#define EN0_RSARHI 0x09 /* Remote start address reg 1 */
+#define EN0_RCNTLO 0x0a /* Remote byte count reg WR */
+#define EN0_RCNTHI 0x0b /* Remote byte count reg WR */
+#define EN0_RSR 0x0c /* rx status reg RD */
+#define EN0_RXCR 0x0c /* RX configuration reg WR */
+#define EN0_TXCR 0x0d /* TX configuration reg WR */
+#define EN0_COUNTER0 0x0d /* Rcv alignment error counter RD */
+#define EN0_DCFG 0x0e /* Data configuration reg WR */
+#define EN0_COUNTER1 0x0e /* Rcv CRC error counter RD */
+#define EN0_IMR 0x0f /* Interrupt mask reg WR */
+#define EN0_COUNTER2 0x0f /* Rcv missed frame error counter RD */
+
+/* Bits in EN0_ISR - Interrupt status register */
+#define ENISR_RX 0x01 /* Receiver, no error */
+#define ENISR_TX 0x02 /* Transmitter, no error */
+#define ENISR_RX_ERR 0x04 /* Receiver, with error */
+#define ENISR_TX_ERR 0x08 /* Transmitter, with error */
+#define ENISR_OVER 0x10 /* Receiver overwrote the ring */
+#define ENISR_COUNTERS 0x20 /* Counters need emptying */
+#define ENISR_RDC 0x40 /* remote dma complete */
+#define ENISR_RESET 0x80 /* Reset completed */
+#define ENISR_ALL 0x3f /* Interrupts we will enable */
+
+/* Bits in EN0_DCFG - Data config register */
+#define ENDCFG_WTS 0x01 /* word transfer mode selection */
+
+/* Page 1 register offsets. */
+#define EN1_PHYS 0x01 /* This board's physical enet addr RD WR */
+#define EN1_CURPAG 0x07 /* Current memory page RD WR */
+#define EN1_MULT 0x08 /* Multicast filter mask array (8 bytes) RD WR */
+
+/* Bits in received packet status byte and EN0_RSR*/
+#define ENRSR_RXOK 0x01 /* Received a good packet */
+#define ENRSR_CRC 0x02 /* CRC error */
+#define ENRSR_FAE 0x04 /* frame alignment error */
+#define ENRSR_FO 0x08 /* FIFO overrun */
+#define ENRSR_MPA 0x10 /* missed pkt */
+#define ENRSR_PHY 0x20 /* physical/multicase address */
+#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */
+#define ENRSR_DEF 0x80 /* deferring */
+
+/* Transmitted packet status, EN0_TSR. */
+#define ENTSR_PTX 0x01 /* Packet transmitted without error */
+#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */
+#define ENTSR_COL 0x04 /* The transmit collided at least once. */
+#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */
+#define ENTSR_CRS 0x10 /* The carrier sense was lost. */
+#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */
+#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
+#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
+
+#endif /* _8390_h */
diff --git a/linux/src/drivers/net/Space.c b/linux/src/drivers/net/Space.c
new file mode 100644
index 0000000..083cdeb
--- /dev/null
+++ b/linux/src/drivers/net/Space.c
@@ -0,0 +1,541 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Holds initial configuration information for devices.
+ *
+ * NOTE: This file is a nice idea, but its current format does not work
+ * well for drivers that support multiple units, like the SLIP
+ * driver. We should actually have only one pointer to a driver
+ * here, with the driver knowing how many units it supports.
+ * Currently, the SLIP driver abuses the "base_addr" integer
+ * field of the 'device' structure to store the unit number...
+ * -FvK
+ *
+ * Version: @(#)Space.c 1.0.8 07/31/96
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald J. Becker, <becker@super.org>
+ *
+ * FIXME:
+ * Sort the device chain fastest first.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+
+#define NEXT_DEV NULL
+
+
+/* A unified ethernet device probe. This is the easiest way to have every
+ ethernet adaptor have the name "eth[0123...]".
+ */
+
+extern int tulip_probe(struct device *dev);
+extern int hp100_probe(struct device *dev);
+extern int ultra_probe(struct device *dev);
+extern int ultra32_probe(struct device *dev);
+extern int wd_probe(struct device *dev);
+extern int el2_probe(struct device *dev);
+extern int ne_probe(struct device *dev);
+extern int ne2k_pci_probe(struct device *dev);
+extern int hp_probe(struct device *dev);
+extern int hp_plus_probe(struct device *dev);
+extern int znet_probe(struct device *);
+extern int express_probe(struct device *);
+extern int eepro_probe(struct device *);
+extern int el3_probe(struct device *);
+extern int at1500_probe(struct device *);
+extern int at1700_probe(struct device *);
+extern int fmv18x_probe(struct device *);
+extern int eth16i_probe(struct device *);
+extern int depca_probe(struct device *);
+extern int apricot_probe(struct device *);
+extern int ewrk3_probe(struct device *);
+extern int de4x5_probe(struct device *);
+extern int el1_probe(struct device *);
+extern int via_rhine_probe(struct device *);
+#if defined(CONFIG_WAVELAN)
+extern int wavelan_probe(struct device *);
+#endif /* defined(CONFIG_WAVELAN) */
+extern int el16_probe(struct device *);
+extern int elplus_probe(struct device *);
+extern int ac3200_probe(struct device *);
+extern int e2100_probe(struct device *);
+extern int ni52_probe(struct device *);
+extern int ni65_probe(struct device *);
+extern int SK_init(struct device *);
+extern int seeq8005_probe(struct device *);
+extern int tc59x_probe(struct device *);
+extern int dgrs_probe(struct device *);
+extern int smc_init( struct device * );
+extern int sparc_lance_probe(struct device *);
+extern int atarilance_probe(struct device *);
+extern int a2065_probe(struct device *);
+extern int ariadne_probe(struct device *);
+extern int hydra_probe(struct device *);
+extern int yellowfin_probe(struct device *);
+extern int eepro100_probe(struct device *);
+extern int epic100_probe(struct device *);
+extern int rtl8139_probe(struct device *);
+extern int tlan_probe(struct device *);
+extern int isa515_probe(struct device *);
+extern int pcnet32_probe(struct device *);
+extern int lance_probe(struct device *);
+/* Detachable devices ("pocket adaptors") */
+extern int atp_init(struct device *);
+extern int de600_probe(struct device *);
+extern int de620_probe(struct device *);
+/* The shaper hook */
+extern int shaper_probe(struct device *);
+/* Red Creek PCI hook */
+extern int rcpci_probe(struct device *);
+
+static int
+ethif_probe(struct device *dev)
+{
+ u_long base_addr = dev->base_addr;
+
+ if ((base_addr == 0xffe0) || (base_addr == 1))
+ return 1; /* ENXIO */
+
+ if (1
+ /* All PCI probes are safe, and thus should be first. */
+#ifdef CONFIG_DE4X5 /* DEC DE425, DE434, DE435 adapters */
+ && de4x5_probe(dev)
+#endif
+#ifdef CONFIG_DGRS
+ && dgrs_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS_PRO100B /* Intel EtherExpress Pro100B */
+ && eepro100_probe(dev)
+#endif
+#ifdef CONFIG_EPIC
+ && epic100_probe(dev)
+#endif
+#if defined(CONFIG_HP100)
+ && hp100_probe(dev)
+#endif
+#if defined(CONFIG_NE2K_PCI)
+ && ne2k_pci_probe(dev)
+#endif
+#ifdef CONFIG_PCNET32
+ && pcnet32_probe(dev)
+#endif
+#ifdef CONFIG_RTL8139
+ && rtl8139_probe(dev)
+#endif
+#if defined(CONFIG_VIA_RHINE)
+ && via_rhine_probe(dev)
+#endif
+#if defined(CONFIG_VORTEX)
+ && tc59x_probe(dev)
+#endif
+#if defined(CONFIG_DEC_ELCP)
+ && tulip_probe(dev)
+#endif
+#ifdef CONFIG_YELLOWFIN
+ && yellowfin_probe(dev)
+#endif
+ /* Next mostly-safe EISA-only drivers. */
+#ifdef CONFIG_AC3200 /* Ansel Communications EISA 3200. */
+ && ac3200_probe(dev)
+#endif
+#if defined(CONFIG_ULTRA32)
+ && ultra32_probe(dev)
+#endif
+ /* Third, sensitive ISA boards. */
+#ifdef CONFIG_AT1700
+ && at1700_probe(dev)
+#endif
+#if defined(CONFIG_ULTRA)
+ && ultra_probe(dev)
+#endif
+#if defined(CONFIG_SMC9194)
+ && smc_init(dev)
+#endif
+#if defined(CONFIG_WD80x3)
+ && wd_probe(dev)
+#endif
+#if defined(CONFIG_EL2) /* 3c503 */
+ && el2_probe(dev)
+#endif
+#if defined(CONFIG_HPLAN)
+ && hp_probe(dev)
+#endif
+#if defined(CONFIG_HPLAN_PLUS)
+ && hp_plus_probe(dev)
+#endif
+#if defined(CONFIG_SEEQ8005)
+ && seeq8005_probe(dev)
+#endif
+#ifdef CONFIG_E2100 /* Cabletron E21xx series. */
+ && e2100_probe(dev)
+#endif
+#if defined(CONFIG_NE2000)
+ && ne_probe(dev)
+#endif
+#ifdef CONFIG_AT1500
+ && at1500_probe(dev)
+#endif
+#ifdef CONFIG_FMV18X /* Fujitsu FMV-181/182 */
+ && fmv18x_probe(dev)
+#endif
+#ifdef CONFIG_ETH16I
+ && eth16i_probe(dev) /* ICL EtherTeam 16i/32 */
+#endif
+#ifdef CONFIG_EL3 /* 3c509 */
+ && el3_probe(dev)
+#endif
+#ifdef CONFIG_3C515 /* 3c515 */
+ && tc515_probe(dev)
+#endif
+#ifdef CONFIG_ZNET /* Zenith Z-Note and some IBM Thinkpads. */
+ && znet_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS /* Intel EtherExpress */
+ && express_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS_PRO /* Intel EtherExpress Pro/10 */
+ && eepro_probe(dev)
+#endif
+#ifdef CONFIG_DEPCA /* DEC DEPCA */
+ && depca_probe(dev)
+#endif
+#ifdef CONFIG_EWRK3 /* DEC EtherWORKS 3 */
+ && ewrk3_probe(dev)
+#endif
+#ifdef CONFIG_APRICOT /* Apricot I82596 */
+ && apricot_probe(dev)
+#endif
+#ifdef CONFIG_EL1 /* 3c501 */
+ && el1_probe(dev)
+#endif
+#if defined(CONFIG_WAVELAN) /* WaveLAN */
+ && wavelan_probe(dev)
+#endif /* defined(CONFIG_WAVELAN) */
+#ifdef CONFIG_EL16 /* 3c507 */
+ && el16_probe(dev)
+#endif
+#ifdef CONFIG_ELPLUS /* 3c505 */
+ && elplus_probe(dev)
+#endif
+#ifdef CONFIG_DE600 /* D-Link DE-600 adapter */
+ && de600_probe(dev)
+#endif
+#ifdef CONFIG_DE620 /* D-Link DE-620 adapter */
+ && de620_probe(dev)
+#endif
+#if defined(CONFIG_SK_G16)
+ && SK_init(dev)
+#endif
+#ifdef CONFIG_NI52
+ && ni52_probe(dev)
+#endif
+#ifdef CONFIG_NI65
+ && ni65_probe(dev)
+#endif
+#ifdef CONFIG_LANCE /* ISA LANCE boards */
+ && lance_probe(dev)
+#endif
+#ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */
+ && atarilance_probe(dev)
+#endif
+#ifdef CONFIG_A2065 /* Commodore/Ameristar A2065 Ethernet Board */
+ && a2065_probe(dev)
+#endif
+#ifdef CONFIG_ARIADNE /* Village Tronic Ariadne Ethernet Board */
+ && ariadne_probe(dev)
+#endif
+#ifdef CONFIG_HYDRA /* Hydra Systems Amiganet Ethernet board */
+ && hydra_probe(dev)
+#endif
+#ifdef CONFIG_SUNLANCE
+ && sparc_lance_probe(dev)
+#endif
+#ifdef CONFIG_TLAN
+ && tlan_probe(dev)
+#endif
+#ifdef CONFIG_LANCE
+ && lance_probe(dev)
+#endif
+ && 1 ) {
+ return 1; /* -ENODEV or -EAGAIN would be more accurate. */
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SDLA
+ extern int sdla_init(struct device *);
+ static struct device sdla0_dev = { "sdla0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, sdla_init, };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&sdla0_dev)
+#endif
+
+/* Run-time ATtachable (Pocket) devices have a different (not "eth#") name. */
+#ifdef CONFIG_ATP /* AT-LAN-TEC (RealTek) pocket adaptor. */
+static struct device atp_dev = {
+ "atp0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, atp_init, /* ... */ };
+# undef NEXT_DEV
+# define NEXT_DEV (&atp_dev)
+#endif
+
+#ifdef CONFIG_ARCNET
+ extern int arcnet_probe(struct device *dev);
+ static struct device arcnet_dev = {
+ "arc0", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, arcnet_probe, };
+# undef NEXT_DEV
+# define NEXT_DEV (&arcnet_dev)
+#endif
+
+/* The first device defaults to I/O base '0', which means autoprobe. */
+#ifndef ETH0_ADDR
+# define ETH0_ADDR 0
+#endif
+#ifndef ETH0_IRQ
+# define ETH0_IRQ 0
+#endif
+/* "eth0" defaults to autoprobe (== 0), other use a base of 0xffe0 (== -0x20),
+ which means "don't probe". These entries exist to only to provide empty
+ slots which may be enabled at boot-time. */
+
+static struct device eth7_dev = {
+ "eth7", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, NEXT_DEV, ethif_probe };
+static struct device eth6_dev = {
+ "eth6", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth7_dev, ethif_probe };
+static struct device eth5_dev = {
+ "eth5", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth6_dev, ethif_probe };
+static struct device eth4_dev = {
+ "eth4", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth5_dev, ethif_probe };
+static struct device eth3_dev = {
+ "eth3", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth4_dev, ethif_probe };
+static struct device eth2_dev = {
+ "eth2", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth3_dev, ethif_probe };
+static struct device eth1_dev = {
+ "eth1", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth2_dev, ethif_probe };
+
+static struct device eth0_dev = {
+ "eth0", 0, 0, 0, 0, ETH0_ADDR, ETH0_IRQ, 0, 0, 0, &eth1_dev, ethif_probe };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&eth0_dev)
+
+#if defined(PLIP) || defined(CONFIG_PLIP)
+ extern int plip_init(struct device *);
+ static struct device plip2_dev = {
+ "plip2", 0, 0, 0, 0, 0x278, 2, 0, 0, 0, NEXT_DEV, plip_init, };
+ static struct device plip1_dev = {
+ "plip1", 0, 0, 0, 0, 0x378, 7, 0, 0, 0, &plip2_dev, plip_init, };
+ static struct device plip0_dev = {
+ "plip0", 0, 0, 0, 0, 0x3BC, 5, 0, 0, 0, &plip1_dev, plip_init, };
+# undef NEXT_DEV
+# define NEXT_DEV (&plip0_dev)
+#endif /* PLIP */
+
+#if defined(SLIP) || defined(CONFIG_SLIP)
+ /* To be exact, this node just hooks the initialization
+ routines to the device structures. */
+extern int slip_init_ctrl_dev(struct device *);
+static struct device slip_bootstrap = {
+ "slip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, slip_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&slip_bootstrap)
+#endif /* SLIP */
+
+#if defined(CONFIG_MKISS)
+ /* To be exact, this node just hooks the initialization
+ routines to the device structures. */
+extern int mkiss_init_ctrl_dev(struct device *);
+static struct device mkiss_bootstrap = {
+ "mkiss_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, mkiss_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&mkiss_bootstrap)
+#endif /* MKISS */
+
+#if defined(CONFIG_STRIP)
+extern int strip_init_ctrl_dev(struct device *);
+static struct device strip_bootstrap = {
+ "strip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, strip_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&strip_bootstrap)
+#endif /* STRIP */
+
+#if defined(CONFIG_SHAPER)
+static struct device shaper_bootstrap = {
+ "shaper", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, shaper_probe, };
+#undef NEXT_DEV
+#define NEXT_DEV (&shaper_bootstrap)
+#endif /* SHAPER */
+
+#if defined(CONFIG_RCPCI)
+static struct device rcpci_bootstrap = {
+ "rcpci", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, rcpci_probe, };
+#undef NEXT_DEV
+#define NEXT_DEV (&rcpci_bootstrap)
+#endif /* RCPCI */
+
+#if defined(CONFIG_PPP)
+extern int ppp_init(struct device *);
+static struct device ppp_bootstrap = {
+ "ppp_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, ppp_init, };
+#undef NEXT_DEV
+#define NEXT_DEV (&ppp_bootstrap)
+#endif /* PPP */
+
+#ifdef CONFIG_DUMMY
+ extern int dummy_init(struct device *dev);
+ static struct device dummy_dev = {
+ "dummy", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, dummy_init, };
+# undef NEXT_DEV
+# define NEXT_DEV (&dummy_dev)
+#endif
+
+#ifdef CONFIG_EQUALIZER
+extern int eql_init(struct device *dev);
+struct device eql_dev = {
+ "eql", /* Master device for IP traffic load
+ balancing */
+ 0x0, 0x0, 0x0, 0x0, /* recv end/start; mem end/start */
+ 0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ eql_init /* set up the rest */
+};
+# undef NEXT_DEV
+# define NEXT_DEV (&eql_dev)
+#endif
+
+#ifdef CONFIG_IBMTR
+
+ extern int tok_probe(struct device *dev);
+ static struct device ibmtr_dev1 = {
+ "tr1", /* IBM Token Ring (Non-DMA) Interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0xa24, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tok_probe /* ??? Token_init should set up the rest */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&ibmtr_dev1)
+
+
+ static struct device ibmtr_dev0 = {
+ "tr0", /* IBM Token Ring (Non-DMA) Interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0xa20, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tok_probe /* ??? Token_init should set up the rest */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&ibmtr_dev0)
+
+#endif
+
+#ifdef CONFIG_DEFXX
+ extern int dfx_probe(struct device *dev);
+ static struct device fddi7_dev =
+ {"fddi7", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, dfx_probe};
+ static struct device fddi6_dev =
+ {"fddi6", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi7_dev, dfx_probe};
+ static struct device fddi5_dev =
+ {"fddi5", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi6_dev, dfx_probe};
+ static struct device fddi4_dev =
+ {"fddi4", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi5_dev, dfx_probe};
+ static struct device fddi3_dev =
+ {"fddi3", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi4_dev, dfx_probe};
+ static struct device fddi2_dev =
+ {"fddi2", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi3_dev, dfx_probe};
+ static struct device fddi1_dev =
+ {"fddi1", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi2_dev, dfx_probe};
+ static struct device fddi0_dev =
+ {"fddi0", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi1_dev, dfx_probe};
+
+#undef NEXT_DEV
+#define NEXT_DEV (&fddi0_dev)
+#endif
+
+#ifdef CONFIG_NET_IPIP
+ extern int tunnel_init(struct device *);
+
+ static struct device tunnel_dev1 =
+ {
+ "tunl1", /* IPIP tunnel */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0x0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tunnel_init /* Fill in the details */
+ };
+
+ static struct device tunnel_dev0 =
+ {
+ "tunl0", /* IPIP tunnel */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0x0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ &tunnel_dev1, /* next device */
+ tunnel_init /* Fill in the details */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&tunnel_dev0)
+
+#endif
+
+#ifdef CONFIG_APFDDI
+ extern int apfddi_init(struct device *dev);
+ static struct device fddi_dev = {
+ "fddi", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, apfddi_init };
+# undef NEXT_DEV
+# define NEXT_DEV (&fddi_dev)
+#endif
+
+#ifdef CONFIG_APBIF
+ extern int bif_init(struct device *dev);
+ static struct device bif_dev = {
+ "bif", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, bif_init };
+# undef NEXT_DEV
+# define NEXT_DEV (&bif_dev)
+#endif
+
+extern int loopback_init(struct device *dev);
+struct device loopback_dev = {
+ "lo", /* Software Loopback interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ loopback_init /* loopback_init should set up the rest */
+};
+
+struct device *dev_base = &loopback_dev;
diff --git a/linux/src/drivers/net/ac3200.c b/linux/src/drivers/net/ac3200.c
new file mode 100644
index 0000000..600949f
--- /dev/null
+++ b/linux/src/drivers/net/ac3200.c
@@ -0,0 +1,385 @@
+/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */
+/*
+ Written 1993, 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov, or
+ C/O Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN
+ Adapter. The programming information is from the users manual, as related
+ by glee@ardnassak.math.clemson.edu.
+ */
+
+static const char *version =
+ "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+/* Offsets from the base address. */
+#define AC_NIC_BASE 0x00
+#define AC_SA_PROM 0x16 /* The station address PROM. */
+#define AC_ADDR0 0x00 /* Prefix station address values. */
+#define AC_ADDR1 0x40 /* !!!!These are just guesses!!!! */
+#define AC_ADDR2 0x90
+#define AC_ID_PORT 0xC80
+#define AC_EISA_ID 0x0110d305
+#define AC_RESET_PORT 0xC84
+#define AC_RESET 0x00
+#define AC_ENABLE 0x01
+#define AC_CONFIG 0xC90 /* The configuration port. */
+
+#define AC_IO_EXTENT 0x10 /* IS THIS REALLY TRUE ??? */
+ /* Actually accessed is:
+ * AC_NIC_BASE (0-15)
+ * AC_SA_PROM (0-5)
+ * AC_ID_PORT (0-3)
+ * AC_RESET_PORT
+ * AC_CONFIG
+ */
+
+/* Decoding of the configuration register. */
+static unsigned char config2irqmap[8] = {15, 12, 11, 10, 9, 7, 5, 3};
+static int addrmap[8] =
+{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 };
+static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"};
+
+#define config2irq(configval) config2irqmap[((configval) >> 3) & 7]
+#define config2mem(configval) addrmap[(configval) & 7]
+#define config2name(configval) port_name[((configval) >> 6) & 3]
+
+/* First and last 8390 pages. */
+#define AC_START_PG 0x00 /* First page of 8390 TX buffer */
+#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */
+
+int ac3200_probe(struct device *dev);
+static int ac_probe1(int ioaddr, struct device *dev);
+
+static int ac_open(struct device *dev);
+static void ac_reset_8390(struct device *dev);
+static void ac_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ac_block_output(struct device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+static void ac_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+static int ac_close_card(struct device *dev);
+
+
+/* Probe for the AC3200.
+
+ The AC3200 can be identified by either the EISA configuration registers,
+ or the unique value in the station address PROM.
+ */
+
+int ac3200_probe(struct device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+
+ if (ioaddr > 0x1ff) /* Check a single specified location. */
+ return ac_probe1(ioaddr, dev);
+ else if (ioaddr > 0) /* Don't probe at all. */
+ return ENXIO;
+
+ /* If you have a pre 0.99pl15 machine you should delete this line. */
+ if ( ! EISA_bus)
+ return ENXIO;
+
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+ if (check_region(ioaddr, AC_IO_EXTENT))
+ continue;
+ if (ac_probe1(ioaddr, dev) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+static int ac_probe1(int ioaddr, struct device *dev)
+{
+ int i;
+
+#ifndef final_version
+ printk("AC3200 ethercard probe at %#3x:", ioaddr);
+
+ for(i = 0; i < 6; i++)
+ printk(" %02x", inb(ioaddr + AC_SA_PROM + i));
+#endif
+
+ /* !!!!The values of AC_ADDRn (see above) should be corrected when we
+ find out the correct station address prefix!!!! */
+ if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0
+ || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1
+ || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) {
+#ifndef final_version
+ printk(" not found (invalid prefix).\n");
+#endif
+ return ENODEV;
+ }
+
+ /* The correct probe method is to check the EISA ID. */
+ for (i = 0; i < 4; i++)
+ if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) {
+ printk("EISA ID mismatch, %8x vs %8x.\n",
+ inl(ioaddr + AC_ID_PORT), AC_EISA_ID);
+ return ENODEV;
+ }
+
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("ac3200.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i);
+
+#ifndef final_version
+ printk("\nAC3200 ethercard configuration register is %#02x,"
+ " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG),
+ inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1),
+ inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3));
+#endif
+
+ /* Assign and allocate the interrupt now. */
+ if (dev->irq == 0)
+ dev->irq = config2irq(inb(ioaddr + AC_CONFIG));
+ else if (dev->irq == 2)
+ dev->irq = 9;
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "ac3200", NULL)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return EAGAIN;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to allocate memory for dev->priv.\n");
+ free_irq(dev->irq, NULL);
+ return -ENOMEM;
+ }
+
+ request_region(ioaddr, AC_IO_EXTENT, "ac3200");
+
+ dev->base_addr = ioaddr;
+
+#ifdef notyet
+ if (dev->mem_start) { /* Override the value from the board. */
+ for (i = 0; i < 7; i++)
+ if (addrmap[i] == dev->mem_start)
+ break;
+ if (i >= 7)
+ i = 0;
+ outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG);
+ }
+#endif
+
+ dev->if_port = inb(ioaddr + AC_CONFIG) >> 6;
+ dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG));
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end = dev->mem_start
+ + (AC_STOP_PG - AC_START_PG)*256;
+
+ ei_status.name = "AC3200";
+ ei_status.tx_start_page = AC_START_PG;
+ ei_status.rx_start_page = AC_START_PG + TX_PAGES;
+ ei_status.stop_page = AC_STOP_PG;
+ ei_status.word16 = 1;
+
+ printk("\n%s: AC3200 at %#x, IRQ %d, %s port, shared memory %#lx-%#lx.\n",
+ dev->name, ioaddr, dev->irq, port_name[dev->if_port],
+ dev->mem_start, dev->mem_end-1);
+
+ if (ei_debug > 0)
+ printk("%s", version);
+
+ ei_status.reset_8390 = &ac_reset_8390;
+ ei_status.block_input = &ac_block_input;
+ ei_status.block_output = &ac_block_output;
+ ei_status.get_8390_hdr = &ac_get_8390_hdr;
+
+ dev->open = &ac_open;
+ dev->stop = &ac_close_card;
+ NS8390_init(dev, 0);
+ return 0;
+}
+
+static int ac_open(struct device *dev)
+{
+#ifdef notyet
+ /* Someday we may enable the IRQ and shared memory here. */
+ int ioaddr = dev->base_addr;
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "ac3200", NULL))
+ return -EAGAIN;
+#endif
+
+ ei_open(dev);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static void ac_reset_8390(struct device *dev)
+{
+ ushort ioaddr = dev->base_addr;
+
+ outb(AC_RESET, ioaddr + AC_RESET_PORT);
+ if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies);
+
+ ei_status.txing = 0;
+ outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ac_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ unsigned long hdr_start = dev->mem_start + ((ring_page - AC_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps. */
+
+static void ac_block_input(struct device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ unsigned long xfer_start = dev->mem_start + ring_offset - (AC_START_PG<<8);
+
+ if (xfer_start + count > dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = dev->rmem_end - xfer_start;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+}
+
+static void ac_block_output(struct device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ unsigned long shmem = dev->mem_start + ((start_page - AC_START_PG)<<8);
+
+ memcpy_toio(shmem, buf, count);
+}
+
+static int ac_close_card(struct device *dev)
+{
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+#ifdef notyet
+ /* We should someday disable shared memory and interrupts. */
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ ei_close(dev);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+#ifdef MODULE
+#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_AC32_CARDS] = { 0, };
+static struct device dev_ac32[MAX_AC32_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_AC32_CARDS] = { 0, };
+static int irq[MAX_AC32_CARDS] = { 0, };
+static int mem[MAX_AC32_CARDS] = { 0, };
+
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
+ struct device *dev = &dev_ac32[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev]; /* Currently ignored by driver */
+ dev->init = ac3200_probe;
+ /* Default is to only install one card. */
+ if (io[this_dev] == 0 && this_dev != 0) break;
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
+ struct device *dev = &dev_ac32[this_dev];
+ if (dev->priv != NULL) {
+ kfree(dev->priv);
+ dev->priv = NULL;
+ /* Someday free_irq + irq2dev may be in ac_close_card() */
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(dev->base_addr, AC_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c ac3200.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/apricot.c b/linux/src/drivers/net/apricot.c
new file mode 100644
index 0000000..57fccaf
--- /dev/null
+++ b/linux/src/drivers/net/apricot.c
@@ -0,0 +1,1046 @@
+/* apricot.c: An Apricot 82596 ethernet driver for linux. */
+/*
+ Apricot
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@super.org or
+ C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+
+
+*/
+
+static const char *version = "apricot.c:v0.2 05/12/94\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#ifndef HAVE_PORTRESERVE
+#define check_region(addr, size) 0
+#define request_region(addr, size,name) do ; while(0)
+#endif
+
+#ifndef HAVE_ALLOC_SKB
+#define alloc_skb(size, priority) (struct sk_buff *) kmalloc(size,priority)
+#define kfree_skbmem(buff, size) kfree_s(buff,size)
+#endif
+
+#define APRICOT_DEBUG 1
+
+#ifdef APRICOT_DEBUG
+int i596_debug = APRICOT_DEBUG;
+#else
+int i596_debug = 1;
+#endif
+
+#define APRICOT_TOTAL_SIZE 17
+
+#define I596_NULL -1
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
+
+#define STAT_C 0x8000 /* Set to 0 after execution */
+#define STAT_B 0x4000 /* Command being executed */
+#define STAT_OK 0x2000 /* Command executed ok */
+#define STAT_A 0x1000 /* Command aborted */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+struct i596_cmd {
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *next;
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ struct i596_tbd *next;
+ unsigned char *data;
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ struct i596_tbd *tbd;
+ unsigned short size;
+ unsigned short pad;
+};
+
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ struct i596_rfd *next;
+ long rbd;
+ unsigned short count;
+ unsigned short size;
+ unsigned char data[1532];
+};
+
+#define RX_RING_SIZE 8
+
+struct i596_scb {
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ unsigned long crc_err;
+ unsigned long align_err;
+ unsigned long resource_err;
+ unsigned long over_err;
+ unsigned long rcvdt_err;
+ unsigned long short_err;
+ unsigned short t_on;
+ unsigned short t_off;
+};
+
+struct i596_iscp {
+ unsigned long stat;
+ struct i596_scb *scb;
+};
+
+struct i596_scp {
+ unsigned long sysbus;
+ unsigned long pad;
+ struct i596_iscp *iscp;
+};
+
+struct i596_private {
+ volatile struct i596_scp scp;
+ volatile struct i596_iscp iscp;
+ volatile struct i596_scb scb;
+ volatile struct i596_cmd set_add;
+ char eth_addr[8];
+ volatile struct i596_cmd set_conf;
+ char i596_config[16];
+ volatile struct i596_cmd tdr;
+ unsigned long stat;
+ int last_restart;
+ struct i596_rfd *rx_tail;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ unsigned long last_cmd;
+ struct enet_statistics stats;
+};
+
+char init_setup[] = {
+ 0x8E, /* length, prefetch on */
+ 0xC8, /* fifo to 8, monitor off */
+ 0x80, /* don't save bad frames */
+ 0x2E, /* No source address insertion, 8 byte preamble */
+ 0x00, /* priority and backoff defaults */
+ 0x60, /* interframe spacing */
+ 0x00, /* slot time LSB */
+ 0xf2, /* slot time and retries */
+ 0x00, /* promiscuous mode */
+ 0x00, /* collision detect */
+ 0x40, /* minimum frame length */
+ 0xff,
+ 0x00,
+ 0x7f /* *multi IA */ };
+
+static int i596_open(struct device *dev);
+static int i596_start_xmit(struct sk_buff *skb, struct device *dev);
+static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int i596_close(struct device *dev);
+static struct enet_statistics *i596_get_stats(struct device *dev);
+static void i596_add_cmd(struct device *dev, struct i596_cmd *cmd);
+static void print_eth(unsigned char *);
+static void set_multicast_list(struct device *dev);
+
+
+static inline int
+init_rx_bufs(struct device *dev, int num)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int i;
+ struct i596_rfd *rfd;
+
+ lp->scb.rfd = (struct i596_rfd *)I596_NULL;
+
+ if (i596_debug > 1) printk ("%s: init_rx_bufs %d.\n", dev->name, num);
+
+ for (i = 0; i < num; i++)
+ {
+ if (!(rfd = (struct i596_rfd *)kmalloc(sizeof(struct i596_rfd), GFP_KERNEL)))
+ break;
+
+ rfd->stat = 0x0000;
+ rfd->rbd = I596_NULL;
+ rfd->count = 0;
+ rfd->size = 1532;
+ if (i == 0)
+ {
+ rfd->cmd = CMD_EOL;
+ lp->rx_tail = rfd;
+ }
+ else
+ rfd->cmd = 0x0000;
+
+ rfd->next = lp->scb.rfd;
+ lp->scb.rfd = rfd;
+ }
+
+ if (i != 0)
+ lp->rx_tail->next = lp->scb.rfd;
+
+ return (i);
+}
+
+static inline void
+remove_rx_bufs(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ struct i596_rfd *rfd = lp->scb.rfd;
+
+ lp->rx_tail->next = (struct i596_rfd *)I596_NULL;
+
+ do
+ {
+ lp->scb.rfd = rfd->next;
+ kfree_s(rfd, sizeof(struct i596_rfd));
+ rfd = lp->scb.rfd;
+ }
+ while (rfd != lp->rx_tail);
+}
+
+static inline void
+init_i596_mem(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ short ioaddr = dev->base_addr;
+ int boguscnt = 100;
+
+ /* change the scp address */
+ outw(0, ioaddr);
+ outw(0, ioaddr);
+ outb(4, ioaddr+0xf);
+ outw(((((int)&lp->scp) & 0xffff) | 2), ioaddr);
+ outw((((int)&lp->scp)>>16) & 0xffff, ioaddr);
+
+ lp->last_cmd = jiffies;
+
+ lp->scp.sysbus = 0x00440000;
+ lp->scp.iscp = &(lp->iscp);
+ lp->iscp.scb = &(lp->scb);
+ lp->iscp.stat = 0x0001;
+ lp->cmd_backlog = 0;
+
+ lp->cmd_head = lp->scb.cmd = (struct i596_cmd *) I596_NULL;
+
+ if (i596_debug > 2) printk("%s: starting i82596.\n", dev->name);
+
+ (void) inb (ioaddr+0x10);
+ outb(4, ioaddr+0xf);
+ outw(0, ioaddr+4);
+
+ while (lp->iscp.stat)
+ if (--boguscnt == 0)
+ {
+ printk("%s: i82596 initialization timed out with status %4.4x, cmd %4.4x.\n",
+ dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.command = 0;
+
+ memcpy (lp->i596_config, init_setup, 14);
+ lp->set_conf.command = CmdConfigure;
+ i596_add_cmd(dev, &lp->set_conf);
+
+ memcpy (lp->eth_addr, dev->dev_addr, 6);
+ lp->set_add.command = CmdSASetup;
+ i596_add_cmd(dev, &lp->set_add);
+
+ lp->tdr.command = CmdTDR;
+ i596_add_cmd(dev, &lp->tdr);
+
+ boguscnt = 200;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: receive unit start timed out with status %4.4x, cmd %4.4x.\n",
+ dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.command = RX_START;
+ outw(0, ioaddr+4);
+
+ boguscnt = 200;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i82596 init timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ return;
+}
+
+static inline int
+i596_rx(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int frames = 0;
+
+ if (i596_debug > 3) printk ("i596_rx()\n");
+
+ while ((lp->scb.rfd->stat) & STAT_C)
+ {
+ if (i596_debug >2) print_eth(lp->scb.rfd->data);
+
+ if ((lp->scb.rfd->stat) & STAT_OK)
+ {
+ /* a good frame */
+ int pkt_len = lp->scb.rfd->count & 0x3fff;
+ struct sk_buff *skb = dev_alloc_skb(pkt_len);
+
+ frames++;
+
+ if (skb == NULL)
+ {
+ printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb->dev = dev;
+ memcpy(skb_put(skb,pkt_len), lp->scb.rfd->data, pkt_len);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+
+ if (i596_debug > 4) print_eth(skb->data);
+ }
+ else
+ {
+ lp->stats.rx_errors++;
+ if ((lp->scb.rfd->stat) & 0x0001) lp->stats.collisions++;
+ if ((lp->scb.rfd->stat) & 0x0080) lp->stats.rx_length_errors++;
+ if ((lp->scb.rfd->stat) & 0x0100) lp->stats.rx_over_errors++;
+ if ((lp->scb.rfd->stat) & 0x0200) lp->stats.rx_fifo_errors++;
+ if ((lp->scb.rfd->stat) & 0x0400) lp->stats.rx_frame_errors++;
+ if ((lp->scb.rfd->stat) & 0x0800) lp->stats.rx_crc_errors++;
+ if ((lp->scb.rfd->stat) & 0x1000) lp->stats.rx_length_errors++;
+ }
+
+ lp->scb.rfd->stat = 0;
+ lp->rx_tail->cmd = 0;
+ lp->rx_tail = lp->scb.rfd;
+ lp->scb.rfd = lp->scb.rfd->next;
+ lp->rx_tail->count = 0;
+ lp->rx_tail->cmd = CMD_EOL;
+
+ }
+
+ if (i596_debug > 3) printk ("frames %d\n", frames);
+
+ return 0;
+}
+
+static inline void
+i596_cleanup_cmd(struct i596_private *lp)
+{
+ struct i596_cmd *ptr;
+ int boguscnt = 100;
+
+ if (i596_debug > 4) printk ("i596_cleanup_cmd\n");
+
+ while (lp->cmd_head != (struct i596_cmd *) I596_NULL)
+ {
+ ptr = lp->cmd_head;
+
+ lp->cmd_head = lp->cmd_head->next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7)
+ {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1;
+
+ dev_kfree_skb(skb, FREE_WRITE);
+
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)tx_cmd, (sizeof (struct tx_cmd) + sizeof (struct i596_tbd)));
+ break;
+ }
+ case CmdMulticastList:
+ {
+ unsigned short count = *((unsigned short *) (ptr + 1));
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)ptr, (sizeof (struct i596_cmd) + count + 2));
+ break;
+ }
+ default:
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ }
+ }
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_cleanup_cmd timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.cmd = lp->cmd_head;
+}
+
+static inline void
+i596_reset(struct device *dev, struct i596_private *lp, int ioaddr)
+{
+ int boguscnt = 100;
+
+ if (i596_debug > 4) printk ("i596_reset\n");
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_reset timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ lp->scb.command = CUC_ABORT|RX_ABORT;
+ outw(0, ioaddr+4);
+
+ /* wait for shutdown */
+ boguscnt = 400;
+
+ while ((lp->scb.status, lp->scb.command) || lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_reset 2 timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ i596_cleanup_cmd(lp);
+ i596_rx(dev);
+
+ dev->start = 1;
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ init_i596_mem(dev);
+}
+
+static void i596_add_cmd(struct device *dev, struct i596_cmd *cmd)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ int boguscnt = 100;
+
+ if (i596_debug > 4) printk ("i596_add_cmd\n");
+
+ cmd->status = 0;
+ cmd->command |= (CMD_EOL|CMD_INTR);
+ cmd->next = (struct i596_cmd *) I596_NULL;
+
+ save_flags(flags);
+ cli();
+ if (lp->cmd_head != (struct i596_cmd *) I596_NULL)
+ lp->cmd_tail->next = cmd;
+ else
+ {
+ lp->cmd_head = cmd;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_add_cmd timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.cmd = cmd;
+ lp->scb.command = CUC_START;
+ outw (0, ioaddr+4);
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ lp->cmd_head = lp->scb.cmd;
+ restore_flags(flags);
+
+ if (lp->cmd_backlog > 16)
+ {
+ int tickssofar = jiffies - lp->last_cmd;
+
+ if (tickssofar < 25) return;
+
+ printk("%s: command unit timed out, status resetting.\n", dev->name);
+
+ i596_reset(dev, lp, ioaddr);
+ }
+}
+
+static int
+i596_open(struct device *dev)
+{
+ int i;
+
+ if (i596_debug > 1)
+ printk("%s: i596_open() irq %d.\n", dev->name, dev->irq);
+
+ if (request_irq(dev->irq, &i596_interrupt, 0, "apricot", NULL))
+ return -EAGAIN;
+
+ irq2dev_map[dev->irq] = dev;
+
+ i = init_rx_bufs(dev, RX_RING_SIZE);
+
+ if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
+ printk("%s: only able to allocate %d receive buffers\n", dev->name, i);
+
+ if (i < 4)
+ {
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = 0;
+ return -EAGAIN;
+ }
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ MOD_INC_USE_COUNT;
+
+ /* Initialize the 82596 memory */
+ init_i596_mem(dev);
+
+ return 0; /* Always succeed */
+}
+
+static int
+i596_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ struct tx_cmd *tx_cmd;
+
+ if (i596_debug > 2) printk ("%s: Apricot start xmit\n", dev->name);
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ printk("%s: transmit timed out, status resetting.\n",
+ dev->name);
+ lp->stats.tx_errors++;
+ /* Try to restart the adaptor */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ if (i596_debug > 1) printk ("Resetting board.\n");
+
+ /* Shutdown and restart */
+ i596_reset(dev,lp, ioaddr);
+ } else {
+ /* Issue a channel attention signal */
+ if (i596_debug > 1) printk ("Kicking board.\n");
+
+ lp->scb.command = CUC_START|RX_START;
+ outw(0, ioaddr+4);
+
+ lp->last_restart = lp->stats.tx_packets;
+ }
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher level thinks we've misses a tx-done interrupt
+ we are passed NULL. n.b. dev_tint handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* shouldn't happen */
+ if (skb->len <= 0) return 0;
+
+ if (i596_debug > 3) printk("%s: i596_start_xmit() called\n", dev->name);
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else
+ {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ dev->trans_start = jiffies;
+
+ tx_cmd = (struct tx_cmd *) kmalloc ((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
+ if (tx_cmd == NULL)
+ {
+ printk ("%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.tx_dropped++;
+
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ else
+ {
+ tx_cmd->tbd = (struct i596_tbd *) (tx_cmd + 1);
+ tx_cmd->tbd->next = (struct i596_tbd *) I596_NULL;
+
+ tx_cmd->cmd.command = CMD_FLEX|CmdTx;
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tx_cmd->tbd->pad = 0;
+ tx_cmd->tbd->size = EOF | length;
+
+ tx_cmd->tbd->data = skb->data;
+
+ if (i596_debug > 3) print_eth(skb->data);
+
+ i596_add_cmd(dev, (struct i596_cmd *)tx_cmd);
+
+ lp->stats.tx_packets++;
+ }
+ }
+
+ dev->tbusy = 0;
+
+ return 0;
+}
+
+
+static void print_eth(unsigned char *add)
+{
+ int i;
+
+ printk ("Dest ");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", add[i]);
+ printk ("\n");
+
+ printk ("Source");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", add[i+6]);
+ printk ("\n");
+ printk ("type %2.2X%2.2X\n", add[12], add[13]);
+}
+
+int apricot_probe(struct device *dev)
+{
+ int i;
+ struct i596_private *lp;
+ int checksum = 0;
+ int ioaddr = 0x300;
+ char eth_addr[8];
+
+ /* this is easy the ethernet interface can only be at 0x300 */
+ /* first check nothing is already registered here */
+
+ if (check_region(ioaddr, APRICOT_TOTAL_SIZE))
+ return ENODEV;
+
+ for (i = 0; i < 8; i++)
+ {
+ eth_addr[i] = inb(ioaddr+8+i);
+ checksum += eth_addr[i];
+ }
+
+ /* checksum is a multiple of 0x100, got this wrong first time
+ some machines have 0x100, some 0x200. The DOS driver doesn't
+ even bother with the checksum */
+
+ if (checksum % 0x100) return ENODEV;
+
+ /* Some other boards trip the checksum.. but then appear as ether
+ address 0. Trap these - AC */
+
+ if(memcmp(eth_addr,"\x00\x00\x49",3)!= 0)
+ return ENODEV;
+
+ request_region(ioaddr, APRICOT_TOTAL_SIZE, "apricot");
+
+ dev->base_addr = ioaddr;
+ ether_setup(dev);
+ printk("%s: Apricot 82596 at %#3x,", dev->name, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
+
+ dev->base_addr = ioaddr;
+ dev->irq = 10;
+ printk(" IRQ %d.\n", dev->irq);
+
+ if (i596_debug > 0) printk("%s", version);
+
+ /* The APRICOT-specific entries in the device structure. */
+ dev->open = &i596_open;
+ dev->stop = &i596_close;
+ dev->hard_start_xmit = &i596_start_xmit;
+ dev->get_stats = &i596_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ dev->mem_start = (int)kmalloc(sizeof(struct i596_private)+ 0x0f, GFP_KERNEL);
+ /* align for scp */
+ dev->priv = (void *)((dev->mem_start + 0xf) & 0xfffffff0);
+
+ lp = (struct i596_private *)dev->priv;
+ memset((void *)lp, 0, sizeof(struct i596_private));
+ lp->scb.command = 0;
+ lp->scb.cmd = (struct i596_cmd *) I596_NULL;
+ lp->scb.rfd = (struct i596_rfd *)I596_NULL;
+
+ return 0;
+}
+
+static void
+i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct i596_private *lp;
+ short ioaddr;
+ int boguscnt = 200;
+ unsigned short status, ack_cmd = 0;
+
+ if (dev == NULL) {
+ printk ("i596_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if (i596_debug > 3) printk ("%s: i596_interrupt(): irq %d\n",dev->name, irq);
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+
+ lp = (struct i596_private *)dev->priv;
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: i596 interrupt, timeout status %4.4x command %4.4x.\n", dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+ status = lp->scb.status;
+
+ if (i596_debug > 4)
+ printk("%s: i596 interrupt, status %4.4x.\n", dev->name, status);
+
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x8000) || (status & 0x2000))
+ {
+ struct i596_cmd *ptr;
+
+ if ((i596_debug > 4) && (status & 0x8000))
+ printk("%s: i596 interrupt completed command.\n", dev->name);
+ if ((i596_debug > 4) && (status & 0x2000))
+ printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700);
+
+ while ((lp->cmd_head != (struct i596_cmd *) I596_NULL) && (lp->cmd_head->status & STAT_C))
+ {
+ ptr = lp->cmd_head;
+
+ lp->cmd_head = lp->cmd_head->next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7)
+ {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1;
+
+ dev_kfree_skb(skb, FREE_WRITE);
+
+ if ((ptr->status) & STAT_OK)
+ {
+ if (i596_debug >2) print_eth(skb->data);
+ }
+ else
+ {
+ lp->stats.tx_errors++;
+ if ((ptr->status) & 0x0020) lp->stats.collisions++;
+ if (!((ptr->status) & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if ((ptr->status) & 0x0400) lp->stats.tx_carrier_errors++;
+ if ((ptr->status) & 0x0800) lp->stats.collisions++;
+ if ((ptr->status) & 0x1000) lp->stats.tx_aborted_errors++;
+ }
+
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)tx_cmd, (sizeof (struct tx_cmd) + sizeof (struct i596_tbd)));
+ break;
+ }
+ case CmdMulticastList:
+ {
+ unsigned short count = *((unsigned short *) (ptr + 1));
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)ptr, (sizeof (struct i596_cmd) + count + 2));
+ break;
+ }
+ case CmdTDR:
+ {
+ unsigned long status = *((unsigned long *) (ptr + 1));
+
+ if (status & 0x8000)
+ {
+ if (i596_debug > 3)
+ printk("%s: link ok.\n", dev->name);
+ }
+ else
+ {
+ if (status & 0x4000)
+ printk("%s: Transceiver problem.\n", dev->name);
+ if (status & 0x2000)
+ printk("%s: Termination problem.\n", dev->name);
+ if (status & 0x1000)
+ printk("%s: Short circuit.\n", dev->name);
+
+ printk("%s: Time %ld.\n", dev->name, status & 0x07ff);
+ }
+ }
+ default:
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+
+ lp->last_cmd = jiffies;
+ }
+ }
+
+ ptr = lp->cmd_head;
+ while ((ptr != (struct i596_cmd *) I596_NULL) && (ptr != lp->cmd_tail))
+ {
+ ptr->command &= 0x1fff;
+ ptr = ptr->next;
+ }
+
+ if ((lp->cmd_head != (struct i596_cmd *) I596_NULL) && (dev->start)) ack_cmd |= CUC_START;
+ lp->scb.cmd = lp->cmd_head;
+ }
+
+ if ((status & 0x1000) || (status & 0x4000))
+ {
+ if ((i596_debug > 4) && (status & 0x4000))
+ printk("%s: i596 interrupt received a frame.\n", dev->name);
+ if ((i596_debug > 4) && (status & 0x1000))
+ printk("%s: i596 interrupt receive unit inactive %x.\n", dev->name, status & 0x0070);
+
+ i596_rx(dev);
+
+ if (dev->start) ack_cmd |= RX_START;
+ }
+
+ /* acknowledge the interrupt */
+
+/*
+ if ((lp->scb.cmd != (struct i596_cmd *) I596_NULL) && (dev->start)) ack_cmd | = CUC_START;
+*/
+ boguscnt = 100;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: i596 interrupt, timeout status %4.4x command %4.4x.\n", dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+ lp->scb.command = ack_cmd;
+
+ (void) inb (ioaddr+0x10);
+ outb (4, ioaddr+0xf);
+ outw (0, ioaddr+4);
+
+ if (i596_debug > 4)
+ printk("%s: exiting interrupt.\n", dev->name);
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+i596_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int boguscnt = 200;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (i596_debug > 1)
+ printk("%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, lp->scb.status);
+
+ lp->scb.command = CUC_ABORT|RX_ABORT;
+ outw(0, ioaddr+4);
+
+ i596_cleanup_cmd(lp);
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: close timed out with status %4.4x, cmd %4.4x.\n",
+ dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = 0;
+ remove_rx_bufs(dev);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+i596_get_stats(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ struct i596_cmd *cmd;
+
+ if (i596_debug > 1)
+ printk ("%s: set multicast list %d\n", dev->name, dev->mc_count);
+
+ if (dev->mc_count > 0)
+ {
+ struct dev_mc_list *dmi;
+ char *cp;
+ cmd = (struct i596_cmd *) kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
+ if (cmd == NULL)
+ {
+ printk ("%s: set_multicast Memory squeeze.\n", dev->name);
+ return;
+ }
+ cmd->command = CmdMulticastList;
+ *((unsigned short *) (cmd + 1)) = dev->mc_count * 6;
+ cp=((char *)(cmd + 1))+2;
+ for(dmi=dev->mc_list;dmi!=NULL;dmi=dmi->next)
+ {
+ memcpy(cp, dmi,6);
+ cp+=6;
+ }
+ print_eth (((unsigned char *)(cmd + 1)) + 2);
+ i596_add_cmd(dev, cmd);
+ }
+ else
+ {
+ if (lp->set_conf.next != (struct i596_cmd * ) I596_NULL)
+ return;
+ if (dev->mc_count == 0 && !(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
+ {
+ if(dev->flags&IFF_ALLMULTI)
+ dev->flags|=IFF_PROMISC;
+ lp->i596_config[8] &= ~0x01;
+ }
+ else
+ lp->i596_config[8] |= 0x01;
+
+ i596_add_cmd(dev, &lp->set_conf);
+ }
+}
+
+#ifdef HAVE_DEVLIST
+static unsigned int apricot_portlist[] = {0x300, 0};
+struct netdev_entry apricot_drv =
+{"apricot", apricot_probe, APRICOT_TOTAL_SIZE, apricot_portlist};
+#endif
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_apricot = {
+ devicename, /* device name inserted by /linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x300, 10,
+ 0, 0, 0, NULL, apricot_probe };
+
+static int io = 0x300;
+static int irq = 10;
+
+int
+init_module(void)
+{
+ dev_apricot.base_addr = io;
+ dev_apricot.irq = irq;
+ if (register_netdev(&dev_apricot) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_apricot);
+ kfree_s((void *)dev_apricot.mem_start, sizeof(struct i596_private) + 0xf);
+ dev_apricot.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_apricot.base_addr, APRICOT_TOTAL_SIZE);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c apricot.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/at1700.c b/linux/src/drivers/net/at1700.c
new file mode 100644
index 0000000..f4025f4
--- /dev/null
+++ b/linux/src/drivers/net/at1700.c
@@ -0,0 +1,756 @@
+/* at1700.c: A network device driver for the Allied Telesis AT1700.
+
+ Written 1993-98 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is a device driver for the Allied Telesis AT1700, which is a
+ straight-forward Fujitsu MB86965 implementation.
+
+ Sources:
+ The Fujitsu MB86965 datasheet.
+
+ After the initial version of this driver was written Gerry Sawkins of
+ ATI provided their EEPROM configuration code header file.
+ Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes.
+
+ Bugs:
+ The MB86965 has a design flaw that makes all probes unreliable. Not
+ only is it difficult to detect, it also moves around in I/O space in
+ response to inb()s from other device probes!
+*/
+
+static const char *version =
+ "at1700.c:v1.15 4/7/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* Tunable parameters. */
+
+/* When to switch from the 64-entry multicast filter to Rx-all-multicast. */
+#define MC_FILTERBREAK 64
+
+/* These unusual address orders are used to verify the CONFIG register. */
+static int at1700_probe_list[] =
+{0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0};
+static int fmv18x_probe_list[] =
+{0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0};
+
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+typedef unsigned char uchar;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ unsigned char mc_filter[8];
+ uint jumpered:1; /* Set iff the board has jumper config. */
+ uint tx_started:1; /* Packets are on the Tx queue. */
+ uint invalid_irq:1;
+ uchar tx_queue; /* Number of packet on the Tx queue. */
+ ushort tx_queue_len; /* Current length of the Tx queue. */
+};
+
+
+/* Offsets from the base address. */
+#define STATUS 0
+#define TX_STATUS 0
+#define RX_STATUS 1
+#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
+#define RX_INTR 3
+#define TX_MODE 4
+#define RX_MODE 5
+#define CONFIG_0 6 /* Misc. configuration settings. */
+#define CONFIG_1 7
+/* Run-time register bank 2 definitions. */
+#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
+#define TX_START 10
+#define MODE13 13
+/* Configuration registers only on the '865A/B chips. */
+#define EEPROM_Ctrl 16
+#define EEPROM_Data 17
+#define IOCONFIG 18 /* Either read the jumper, or move the I/O. */
+#define IOCONFIG1 19
+#define SAPROM 20 /* The station address PROM, if no EEPROM. */
+#define RESET 31 /* Write to reset some parts of the chip. */
+#define AT1700_IO_EXTENT 32
+/* Index to functions, as function prototypes. */
+
+extern int at1700_probe(struct device *dev);
+
+static int at1700_probe1(struct device *dev, int ioaddr);
+static int read_eeprom(int ioaddr, int location);
+static int net_open(struct device *dev);
+static int net_send_packet(struct sk_buff *skb, struct device *dev);
+static void net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void net_rx(struct device *dev);
+static int net_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+static void set_rx_mode(struct device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry at1700_drv =
+{"at1700", at1700_probe1, AT1700_IO_EXTENT, at1700_probe_list};
+#else
+int
+at1700_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return at1700_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; at1700_probe_list[i]; i++) {
+ int ioaddr = at1700_probe_list[i];
+ if (check_region(ioaddr, AT1700_IO_EXTENT))
+ continue;
+ if (at1700_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
+ "signature", the default bit pattern after a reset. This *doesn't* work --
+ there is no way to reset the bus interface without a complete power-cycle!
+
+ It turns out that ATI came to the same conclusion I did: the only thing
+ that can be done is checking a few bits and then diving right into an
+ EEPROM read. */
+
+int at1700_probe1(struct device *dev, int ioaddr)
+{
+ char fmv_irqmap[4] = {3, 7, 10, 15};
+ char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
+ unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
+
+ /* Resetting the chip doesn't reset the ISA interface, so don't bother.
+ That means we have to be careful with the register values we probe for.
+ */
+#ifdef notdef
+ printk("at1700 probe at %#x, eeprom is %4.4x %4.4x %4.4x ctrl %4.4x.\n",
+ ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5),
+ read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl));
+#endif
+ /* We must check for the EEPROM-config boards first, else accessing
+ IOCONFIG0 will move the board! */
+ if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr
+ && read_eeprom(ioaddr, 4) == 0x0000
+ && (read_eeprom(ioaddr, 5) & 0xff00) == 0xF400)
+ is_at1700 = 1;
+ else if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] == ioaddr
+ && inb(ioaddr + SAPROM ) == 0x00
+ && inb(ioaddr + SAPROM + 1) == 0x00
+ && inb(ioaddr + SAPROM + 2) == 0x0e)
+ is_fmv18x = 1;
+ else
+ return -ENODEV;
+
+ /* Reset the internal state machines. */
+ outb(0, ioaddr + RESET);
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ if (is_at1700)
+ irq = at1700_irqmap[(read_eeprom(ioaddr, 12)&0x04)
+ | (read_eeprom(ioaddr, 0)>>14)];
+ else
+ irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03];
+
+ /* Grab the region so that we can find another board if the IRQ request
+ fails. */
+ request_region(ioaddr, AT1700_IO_EXTENT, dev->name);
+
+ printk("%s: AT1700 found at %#3x, IRQ %d, address ", dev->name,
+ ioaddr, irq);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ for(i = 0; i < 3; i++) {
+ unsigned short eeprom_val = read_eeprom(ioaddr, 4+i);
+ printk("%04x", eeprom_val);
+ ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val);
+ }
+
+ /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
+ rather than 150 ohm shielded twisted pair compensation.
+ 0x0000 == auto-sense the interface
+ 0x0800 == use TP interface
+ 0x1800 == use coax interface
+ */
+ {
+ const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2"};
+ ushort setup_value = read_eeprom(ioaddr, 12);
+
+ dev->if_port = setup_value >> 8;
+ printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
+ }
+
+ /* Set the station address in bank zero. */
+ outb(0xe0, ioaddr + CONFIG_1);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + 8 + i);
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0xe4, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + 8 + i);
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
+ bus access, two 4K Tx queues, and disabled Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Switch to bank 2 and lock our I/O address. */
+ outb(0xe8, ioaddr + CONFIG_1);
+ outb(dev->if_port, MODE13);
+
+ /* Power-down the chip. Aren't we green! */
+ outb(0x00, ioaddr + CONFIG_1);
+
+ if (net_debug)
+ printk("%s", version);
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+
+ /* Fill in the fields of 'dev' with ethernet-generic values. */
+ ether_setup(dev);
+
+ {
+ struct net_local *lp = (struct net_local *)dev->priv;
+ lp->jumpered = is_fmv18x;
+ /* Snarf the interrupt vector now. */
+ if (request_irq(irq, &net_interrupt, 0, dev->name, dev)) {
+ printk (" AT1700 at %#3x is unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, irq);
+ lp->invalid_irq = 1;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x40 /* EEPROM shift clock, in reg. 16. */
+#define EE_CS 0x20 /* EEPROM chip select, in reg. 16. */
+#define EE_DATA_WRITE 0x80 /* EEPROM chip data in, in reg. 17. */
+#define EE_DATA_READ 0x80 /* EEPROM chip data out, in reg. 17. */
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay() do {} while (0);
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ_CMD (6 << 6)
+#define EE_ERASE_CMD (7 << 6)
+
+static int read_eeprom(int ioaddr, int location)
+{
+ int i;
+ unsigned short retval = 0;
+ int ee_addr = ioaddr + EEPROM_Ctrl;
+ int ee_daddr = ioaddr + EEPROM_Data;
+ int read_cmd = location | EE_READ_CMD;
+
+ /* Shift the read command bits out. */
+ for (i = 9; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outb(EE_CS, ee_addr);
+ outb(dataval, ee_daddr);
+ eeprom_delay();
+ outb(EE_CS | EE_SHIFT_CLK, ee_addr); /* EEPROM clock tick. */
+ eeprom_delay();
+ }
+ outb(EE_DATA_WRITE, ee_daddr);
+ for (i = 16; i > 0; i--) {
+ outb(EE_CS, ee_addr);
+ eeprom_delay();
+ outb(EE_CS | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_daddr) & EE_DATA_READ) ? 1 : 0);
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(EE_CS, ee_addr);
+ eeprom_delay();
+ outb(EE_SHIFT_CLK, ee_addr);
+ outb(0, ee_addr);
+ return retval;
+}
+
+
+
+static int net_open(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ /* Powerup the chip, initialize config register 1, and select bank 0. */
+ outb(0xe0, ioaddr + CONFIG_1);
+
+ /* Set the station address in bank zero. */
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + 8 + i);
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0xe4, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + 8 + i);
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
+ bus access, and two 4K Tx queues. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Switch to register bank 2, enable the Rx and Tx. */
+ outw(0xe85a, ioaddr + CONFIG_0);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Turn on Rx interrupts, leave Tx interrupts off until packet Tx. */
+ outb(0x00, ioaddr + TX_INTR);
+ outb(0x81, ioaddr + RX_INTR);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int
+net_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 10)
+ return 1;
+ printk("%s: transmit timed out with status %04x, %s?\n", dev->name,
+ inw(ioaddr + STATUS), inb(ioaddr + TX_STATUS) & 0x80
+ ? "IRQ conflict" : "network cable problem");
+ printk("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
+ dev->name, inw(ioaddr + 0), inw(ioaddr + 2), inw(ioaddr + 4),
+ inw(ioaddr + 6), inw(ioaddr + 8), inw(ioaddr + 10),
+ inw(ioaddr + 12), inw(ioaddr + 14));
+ lp->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ outw(0xffff, ioaddr + 24);
+ outw(0xffff, ioaddr + TX_STATUS);
+ outw(0xe85a, ioaddr + CONFIG_0);
+ outw(0x8100, ioaddr + TX_INTR);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ /* Turn off the possible Tx interrupts. */
+ outb(0x00, ioaddr + TX_INTR);
+
+ outw(length, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ dev->tbusy = 0;
+ } else if (lp->tx_queue_len < 4096 - 1502)
+ /* Yes, there is room for one more packet. */
+ dev->tbusy = 0;
+
+ /* Turn on Tx interrupts back on. */
+ outb(0x82, ioaddr + TX_INTR);
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status;
+
+ if (dev == NULL) {
+ printk ("at1700_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+ status = inw(ioaddr + TX_STATUS);
+ outw(status, ioaddr + TX_STATUS);
+
+ if (net_debug > 4)
+ printk("%s: Interrupt with status %04x.\n", dev->name, status);
+ if (status & 0xff00
+ || (inb(ioaddr + RX_MODE) & 0x40) == 0) { /* Got a packet(s). */
+ net_rx(dev);
+ }
+ if (status & 0x00ff) {
+ if (status & 0x80) {
+ lp->stats.tx_packets++;
+ if (lp->tx_queue) {
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ } else {
+ lp->tx_started = 0;
+ /* Turn on Tx interrupts off. */
+ outb(0x00, ioaddr + TX_INTR);
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ }
+ }
+
+ dev->interrupt = 0;
+ return;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = 5;
+
+ while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
+ ushort status = inw(ioaddr + DATAPORT);
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+
+ if (net_debug > 4)
+ printk("%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(0x05, ioaddr + 14);
+ break;
+ }
+#endif
+
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x08) lp->stats.rx_length_errors++;
+ if (status & 0x04) lp->stats.rx_frame_errors++;
+ if (status & 0x02) lp->stats.rx_crc_errors++;
+ if (status & 0x01) lp->stats.rx_over_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk("%s: The AT1700 claimed a very large packet, size %d.\n",
+ dev->name, pkt_len);
+ /* Prime the FIFO and then flush the packet. */
+ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+3);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet (len %d).\n",
+ dev->name, pkt_len);
+ /* Prime the FIFO and then flush the packet. */
+ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
+ break;
+ inw(ioaddr + DATAPORT); /* dummy status read */
+ outb(0x05, ioaddr + 14);
+ }
+
+ if (net_debug > 5)
+ printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
+ dev->name, inb(ioaddr + RX_MODE), i);
+ }
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int net_close(struct device *dev)
+{
+#if 0
+ struct net_local *lp = (struct net_local *)dev->priv;
+#endif
+ int ioaddr = dev->base_addr;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* No statistic counters on the chip to update. */
+
+#if 0
+ /* Disable the IRQ on boards where it is feasible. */
+ if (lp->jumpered) {
+ outb(0x00, ioaddr + IOCONFIG1);
+ free_irq(dev->irq, dev);
+ }
+#endif
+
+ /* Power-down the chip. Green, green, green! */
+ outb(0x00, ioaddr + CONFIG_1);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* Get the current statistics.
+ This may be called with the card open or closed.
+ There are no on-chip counters, so this function is trivial.
+*/
+static struct enet_statistics *
+net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ return &lp->stats;
+}
+
+/*
+ Set the multicast/promiscuous mode for this adaptor.
+*/
+
+/* The little-endian AUTODIN II ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void
+set_rx_mode(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned char mc_filter[8]; /* Multicast hash filter */
+ long flags;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ } else if (dev->mc_count > MC_FILTERBREAK
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(2, ioaddr + RX_MODE); /* Use normal mode. */
+ } else if (dev->mc_count == 0) {
+ memset(mc_filter, 0x00, sizeof(mc_filter));
+ outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26,
+ mc_filter);
+ }
+
+ save_flags(flags);
+ cli();
+ if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
+ int saved_bank = inw(ioaddr + CONFIG_0);
+ /* Switch to bank 1 and set the multicast table. */
+ outw((saved_bank & ~0x0C00) | 0x0480, ioaddr + CONFIG_0);
+ for (i = 0; i < 8; i++)
+ outb(mc_filter[i], ioaddr + 8 + i);
+ memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
+ outw(saved_bank, ioaddr + CONFIG_0);
+ }
+ restore_flags(flags);
+ return;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_at1700 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, at1700_probe };
+
+static int io = 0x260;
+static int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("at1700: You should not use auto-probing with insmod!\n");
+ dev_at1700.base_addr = io;
+ dev_at1700.irq = irq;
+ if (register_netdev(&dev_at1700) != 0) {
+ printk("at1700: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_at1700);
+ kfree(dev_at1700.priv);
+ dev_at1700.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ free_irq(dev_at1700.irq, NULL);
+ release_region(dev_at1700.base_addr, AT1700_IO_EXTENT);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c at1700.c"
+ * alt-compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c at1700.c"
+ * tab-width: 4
+ * c-basic-offset: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/atp.c b/linux/src/drivers/net/atp.c
new file mode 100644
index 0000000..a9445ea
--- /dev/null
+++ b/linux/src/drivers/net/atp.c
@@ -0,0 +1,977 @@
+/* atp.c: Attached (pocket) ethernet adapter driver for linux. */
+/*
+ This is a driver for commonly OEMed pocket (parallel port)
+ ethernet adapters based on the Realtek RTL8002 and RTL8012 chips.
+
+ Written 1993-95,1997 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ The timer-based reset code was written by Bill Carlson, wwc@super.org.
+*/
+
+static const char *version =
+ "atp.c:v1.08 4/1/97 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+/* Operational parameters that may be safely changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/*
+ This file is a device driver for the RealTek (aka AT-Lan-Tec) pocket
+ ethernet adapter. This is a common low-cost OEM pocket ethernet
+ adapter, sold under many names.
+
+ Sources:
+ This driver was written from the packet driver assembly code provided by
+ Vincent Bono of AT-Lan-Tec. Ever try to figure out how a complicated
+ device works just from the assembly code? It ain't pretty. The following
+ description is written based on guesses and writing lots of special-purpose
+ code to test my theorized operation.
+
+ In 1997 Realtek made available the documentation for the second generation
+ RTL8012 chip, which has lead to several driver improvements.
+ http://www.realtek.com.tw/cn/cn.html
+
+ Theory of Operation
+
+ The RTL8002 adapter seems to be built around a custom spin of the SEEQ
+ controller core. It probably has a 16K or 64K internal packet buffer, of
+ which the first 4K is devoted to transmit and the rest to receive.
+ The controller maintains the queue of received packet and the packet buffer
+ access pointer internally, with only 'reset to beginning' and 'skip to next
+ packet' commands visible. The transmit packet queue holds two (or more?)
+ packets: both 'retransmit this packet' (due to collision) and 'transmit next
+ packet' commands must be started by hand.
+
+ The station address is stored in a standard bit-serial EEPROM which must be
+ read (ughh) by the device driver. (Provisions have been made for
+ substituting a 74S288 PROM, but I haven't gotten reports of any models
+ using it.) Unlike built-in devices, a pocket adapter can temporarily lose
+ power without indication to the device driver. The major effect is that
+ the station address, receive filter (promiscuous, etc.) and transceiver
+ must be reset.
+
+ The controller itself has 16 registers, some of which use only the lower
+ bits. The registers are read and written 4 bits at a time. The four bit
+ register address is presented on the data lines along with a few additional
+ timing and control bits. The data is then read from status port or written
+ to the data port.
+
+ Correction: the controller has two banks of 16 registers. The second
+ bank contains only the multicast filter table (now used) and the EEPROM
+ access registers.
+
+ Since the bulk data transfer of the actual packets through the slow
+ parallel port dominates the driver's running time, four distinct data
+ (non-register) transfer modes are provided by the adapter, two in each
+ direction. In the first mode timing for the nibble transfers is
+ provided through the data port. In the second mode the same timing is
+ provided through the control port. In either case the data is read from
+ the status port and written to the data port, just as it is accessing
+ registers.
+
+ In addition to the basic data transfer methods, several more are modes are
+ created by adding some delay by doing multiple reads of the data to allow
+ it to stabilize. This delay seems to be needed on most machines.
+
+ The data transfer mode is stored in the 'dev->if_port' field. Its default
+ value is '4'. It may be overridden at boot-time using the third parameter
+ to the "ether=..." initialization.
+
+ The header file <atp.h> provides inline functions that encapsulate the
+ register and data access methods. These functions are hand-tuned to
+ generate reasonable object code. This header file also documents my
+ interpretations of the device registers.
+*/
+#include <linux/config.h>
+#ifdef MODULE
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "atp.h"
+
+/* Kernel compatibility defines, common to David Hind's PCMCIA package.
+ This is only in the support-all-kernels source code. */
+#include <linux/version.h> /* Evil, but neccessary */
+
+#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10300
+#define RUN_AT(x) (x) /* What to put in timer->expires. */
+#define DEV_ALLOC_SKB(len) alloc_skb(len, GFP_ATOMIC)
+#define virt_to_bus(addr) ((unsigned long)addr)
+#define bus_to_virt(addr) ((void*)addr)
+
+#else /* 1.3.0 and later */
+#define RUN_AT(x) (jiffies + (x))
+#define DEV_ALLOC_SKB(len) dev_alloc_skb(len + 2)
+#endif
+#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10338
+#ifdef MODULE
+#if !defined(CONFIG_MODVERSIONS) && !defined(__NO_VERSION__)
+char kernel_version[] = UTS_RELEASE;
+#endif
+#else
+#undef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT
+#undef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+#endif /* 1.3.38 */
+
+#if (LINUX_VERSION_CODE >= 0x10344)
+#define NEW_MULTICAST
+#include <linux/delay.h>
+#endif
+
+#ifdef SA_SHIRQ
+#define FREE_IRQ(irqnum, dev) free_irq(irqnum, dev)
+#define REQUEST_IRQ(i,h,f,n, instance) request_irq(i,h,f,n, instance)
+#define IRQ(irq, dev_id, pt_regs) (irq, dev_id, pt_regs)
+#else
+#define FREE_IRQ(irqnum, dev) free_irq(irqnum)
+#define REQUEST_IRQ(i,h,f,n, instance) request_irq(i,h,f,n)
+#define IRQ(irq, dev_id, pt_regs) (irq, pt_regs)
+#endif
+/* End of kernel compatibility defines. */
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+#define ETHERCARD_TOTAL_SIZE 3
+
+/* Sequence to switch an 8012 from printer mux to ethernet mode. */
+static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,};
+
+/* This code, written by wwc@super.org, resets the adapter every
+ TIMED_CHECKER ticks. This recovers from an unknown error which
+ hangs the device. */
+#define TIMED_CHECKER (HZ/4)
+#ifdef TIMED_CHECKER
+#include <linux/timer.h>
+static void atp_timed_checker(unsigned long ignored);
+#endif
+
+/* Index to functions, as function prototypes. */
+
+extern int atp_init(struct device *dev);
+
+static int atp_probe1(struct device *dev, short ioaddr);
+static void get_node_ID(struct device *dev);
+static unsigned short eeprom_op(short ioaddr, unsigned int cmd);
+static int net_open(struct device *dev);
+static void hardware_init(struct device *dev);
+static void write_packet(short ioaddr, int length, unsigned char *packet, int mode);
+static void trigger_send(short ioaddr, int length);
+static int net_send_packet(struct sk_buff *skb, struct device *dev);
+static void net_interrupt IRQ(int irq, void *dev_id, struct pt_regs *regs);
+static void net_rx(struct device *dev);
+static void read_block(short ioaddr, int length, unsigned char *buffer, int data_mode);
+static int net_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+#ifdef NEW_MULTICAST
+static void set_rx_mode_8002(struct device *dev);
+static void set_rx_mode_8012(struct device *dev);
+#else
+static void set_rx_mode_8002(struct device *dev, int num_addrs, void *addrs);
+static void set_rx_mode_8012(struct device *dev, int num_addrs, void *addrs);
+#endif
+
+
+/* A list of all installed ATP devices, for removing the driver module. */
+static struct device *root_atp_dev = NULL;
+
+/* Check for a network adapter of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+int
+atp_init(struct device *dev)
+{
+ int *port, ports[] = {0x378, 0x278, 0x3bc, 0};
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return atp_probe1(dev, base_addr);
+ else if (base_addr == 1) /* Don't probe at all. */
+ return ENXIO;
+
+ for (port = ports; *port; port++) {
+ int ioaddr = *port;
+ outb(0x57, ioaddr + PAR_DATA);
+ if (inb(ioaddr + PAR_DATA) != 0x57)
+ continue;
+ if (atp_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+static int atp_probe1(struct device *dev, short ioaddr)
+{
+ struct net_local *lp;
+ int saved_ctrl_reg, status, i;
+
+ outb(0xff, ioaddr + PAR_DATA);
+ /* Save the original value of the Control register, in case we guessed
+ wrong. */
+ saved_ctrl_reg = inb(ioaddr + PAR_CONTROL);
+ /* IRQEN=0, SLCTB=high INITB=high, AUTOFDB=high, STBB=high. */
+ outb(0x04, ioaddr + PAR_CONTROL);
+ /* Turn off the printer multiplexer on the 8012. */
+ for (i = 0; i < 8; i++)
+ outb(mux_8012[i], ioaddr + PAR_DATA);
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+ eeprom_delay(2048);
+ status = read_nibble(ioaddr, CMR1);
+
+ if ((status & 0x78) != 0x08) {
+ /* The pocket adapter probe failed, restore the control register. */
+ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
+ return 1;
+ }
+ status = read_nibble(ioaddr, CMR2_h);
+ if ((status & 0x78) != 0x10) {
+ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
+ return 1;
+ }
+
+ dev = init_etherdev(dev, sizeof(struct net_local));
+
+ /* Find the IRQ used by triggering an interrupt. */
+ write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable Tx and Rx. */
+
+ /* Omit autoIRQ routine for now. Use "table lookup" instead. Uhgggh. */
+ if (ioaddr == 0x378)
+ dev->irq = 7;
+ else
+ dev->irq = 5;
+ write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF); /* Disable Tx and Rx units. */
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+
+ dev->base_addr = ioaddr;
+
+ /* Read the station address PROM. */
+ get_node_ID(dev);
+
+ printk("%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM "
+ "%02X:%02X:%02X:%02X:%02X:%02X.\n", dev->name, dev->base_addr,
+ dev->irq, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ /* Reset the ethernet hardware and activate the printer pass-through. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
+
+ if (net_debug)
+ printk(version);
+
+ /* Initialize the device structure. */
+ ether_setup(dev);
+ if (dev->priv == NULL)
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ lp = (struct net_local *)dev->priv;
+ lp->chip_type = RTL8002;
+ lp->addr_mode = CMR2h_Normal;
+
+ lp->next_module = root_atp_dev;
+ root_atp_dev = dev;
+
+ /* For the ATP adapter the "if_port" is really the data transfer mode. */
+ dev->if_port = (dev->mem_start & 0xf) ? (dev->mem_start & 0x7) : 4;
+ if (dev->mem_end & 0xf)
+ net_debug = dev->mem_end & 7;
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list =
+ lp->chip_type == RTL8002 ? &set_rx_mode_8002 : &set_rx_mode_8012;
+
+ return 0;
+}
+
+/* Read the station address PROM, usually a word-wide EEPROM. */
+static void get_node_ID(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ int sa_offset = 0;
+ int i;
+
+ write_reg(ioaddr, CMR2, CMR2_EEPROM); /* Point to the EEPROM control registers. */
+
+ /* Some adapters have the station address at offset 15 instead of offset
+ zero. Check for it, and fix it if needed. */
+ if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff)
+ sa_offset = 15;
+
+ for (i = 0; i < 3; i++)
+ ((unsigned short *)dev->dev_addr)[i] =
+ ntohs(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
+
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+}
+
+/*
+ An EEPROM read command starts by shifting out 0x60+address, and then
+ shifting in the serial data. See the NatSemi databook for details.
+ * ________________
+ * CS : __|
+ * ___ ___
+ * CLK: ______| |___| |
+ * __ _______ _______
+ * DI : __X_______X_______X
+ * DO : _________X_______X
+ */
+
+static unsigned short eeprom_op(short ioaddr, unsigned int cmd)
+{
+ unsigned eedata_out = 0;
+ int num_bits = EE_CMD_SIZE;
+
+ while (--num_bits >= 0) {
+ char outval = test_bit(num_bits, &cmd) ? EE_DATA_WRITE : 0;
+ write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW);
+ eeprom_delay(5);
+ write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH);
+ eedata_out <<= 1;
+ if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ)
+ eedata_out++;
+ eeprom_delay(5);
+ }
+ write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS);
+ return eedata_out;
+}
+
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine sets everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+
+ This is an attachable device: if there is no dev->priv entry then it wasn't
+ probed for at boot-time, and we need to probe for it again.
+ */
+static int net_open(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ /* The interrupt line is turned off (tri-stated) when the device isn't in
+ use. That's especially important for "attached" interfaces where the
+ port or interrupt may be shared. */
+#ifndef SA_SHIRQ
+ if (irq2dev_map[dev->irq] != 0
+ || (irq2dev_map[dev->irq] = dev) == 0
+ || REQUEST_IRQ(dev->irq, &net_interrupt, 0, "ATP", dev)) {
+ return -EAGAIN;
+ }
+#else
+ if (request_irq(dev->irq, &net_interrupt, 0, "ATP Ethernet", dev))
+ return -EAGAIN;
+#endif
+
+ MOD_INC_USE_COUNT;
+ hardware_init(dev);
+ dev->start = 1;
+
+ init_timer(&lp->timer);
+ lp->timer.expires = RUN_AT(TIMED_CHECKER);
+ lp->timer.data = (unsigned long)dev;
+ lp->timer.function = &atp_timed_checker; /* timer handler */
+ add_timer(&lp->timer);
+
+ return 0;
+}
+
+/* This routine resets the hardware. We initialize everything, assuming that
+ the hardware may have been temporarily detached. */
+static void hardware_init(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ /* Turn off the printer multiplexer on the 8012. */
+ for (i = 0; i < 8; i++)
+ outb(mux_8012[i], ioaddr + PAR_DATA);
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+
+ if (net_debug > 2) {
+ printk("%s: Reset: current Rx mode %d.\n", dev->name,
+ (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
+ }
+
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+
+ /* Enable the interrupt line from the serial port. */
+ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+
+ /* Unmask the interesting interrupts. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
+
+ lp->tx_unit_busy = 0;
+ lp->pac_cnt_in_tx_buf = 0;
+ lp->saved_tx_size = 0;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+}
+
+static void trigger_send(short ioaddr, int length)
+{
+ write_reg_byte(ioaddr, TxCNT0, length & 0xff);
+ write_reg(ioaddr, TxCNT1, length >> 8);
+ write_reg(ioaddr, CMR1, CMR1_Xmit);
+}
+
+static void write_packet(short ioaddr, int length, unsigned char *packet, int data_mode)
+{
+ length = (length + 1) & ~1; /* Round up to word length. */
+ outb(EOC+MAR, ioaddr + PAR_DATA);
+ if ((data_mode & 1) == 0) {
+ /* Write the packet out, starting with the write addr. */
+ outb(WrAddr+MAR, ioaddr + PAR_DATA);
+ do {
+ write_byte_mode0(ioaddr, *packet++);
+ } while (--length > 0) ;
+ } else {
+ /* Write the packet out in slow mode. */
+ unsigned char outbyte = *packet++;
+
+ outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ outb(WrAddr+MAR, ioaddr + PAR_DATA);
+
+ outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA);
+ outb(outbyte & 0x0f, ioaddr + PAR_DATA);
+ outbyte >>= 4;
+ outb(outbyte & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ while (--length > 0)
+ write_byte_mode1(ioaddr, *packet++);
+ }
+ /* Terminate the Tx frame. End of write: ECB. */
+ outb(0xff, ioaddr + PAR_DATA);
+ outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+}
+
+static int
+net_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+#ifndef final_version
+ if (skb == NULL || skb->len <= 0) {
+ printk("%s: Obsolete driver layer request made: skbuff==NULL.\n",
+ dev->name);
+ dev_tint(dev);
+ return 0;
+ }
+#endif
+
+ /* Use transmit-while-tbusy as a crude error timer. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ if (jiffies - dev->trans_start < TX_TIMEOUT)
+ return 1;
+ printk("%s: transmit timed out, %s?\n", dev->name,
+ inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem"
+ : "IRQ conflict");
+ lp->stats.tx_errors++;
+ /* Try to restart the adapter. */
+ hardware_init(dev);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ return 1;
+ } else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+ int flags;
+
+ /* Disable interrupts by writing 0x00 to the Interrupt Mask Register.
+ This sequence must not be interrupted by an incoming packet. */
+ save_flags(flags);
+ cli();
+ write_reg(ioaddr, IMR, 0);
+ write_reg_high(ioaddr, IMR, 0);
+ restore_flags(flags);
+
+ write_packet(ioaddr, length, buf, dev->if_port);
+
+ lp->pac_cnt_in_tx_buf++;
+ if (lp->tx_unit_busy == 0) {
+ trigger_send(ioaddr, length);
+ lp->saved_tx_size = 0; /* Redundant */
+ lp->re_tx = 0;
+ lp->tx_unit_busy = 1;
+ } else
+ lp->saved_tx_size = length;
+
+ dev->trans_start = jiffies;
+ /* Re-enable the LPT interrupts. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+net_interrupt IRQ(int irq, void *dev_instance, struct pt_regs * regs)
+{
+#ifdef SA_SHIRQ
+ struct device *dev = (struct device *)dev_instance;
+#else
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+#endif
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 20;
+ static int num_tx_since_rx = 0;
+
+ if (dev == NULL) {
+ printk ("ATP_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ /* Disable additional spurious interrupts. */
+ outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
+
+ /* The adapter's output is currently the IRQ line, switch it to data. */
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+ write_reg(ioaddr, IMR, 0);
+
+ if (net_debug > 5) printk("%s: In interrupt ", dev->name);
+ while (--boguscount > 0) {
+ status = read_nibble(ioaddr, ISR);
+ if (net_debug > 5) printk("loop status %02x..", status);
+
+ if (status & (ISR_RxOK<<3)) {
+ write_reg(ioaddr, ISR, ISR_RxOK); /* Clear the Rx interrupt. */
+ do {
+ int read_status = read_nibble(ioaddr, CMR1);
+ if (net_debug > 6)
+ printk("handling Rx packet %02x..", read_status);
+ /* We acknowledged the normal Rx interrupt, so if the interrupt
+ is still outstanding we must have a Rx error. */
+ if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */
+ lp->stats.rx_over_errors++;
+ /* Set to no-accept mode long enough to remove a packet. */
+ write_reg_high(ioaddr, CMR2, CMR2h_OFF);
+ net_rx(dev);
+ /* Clear the interrupt and return to normal Rx mode. */
+ write_reg_high(ioaddr, ISR, ISRh_RxErr);
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+ } else if ((read_status & (CMR1_BufEnb << 3)) == 0) {
+ net_rx(dev);
+ dev->last_rx = jiffies;
+ num_tx_since_rx = 0;
+ } else
+ break;
+ } while (--boguscount > 0);
+ } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
+ if (net_debug > 6) printk("handling Tx done..");
+ /* Clear the Tx interrupt. We should check for too many failures
+ and reinitialize the adapter. */
+ write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
+ if (status & (ISR_TxErr<<3)) {
+ lp->stats.collisions++;
+ if (++lp->re_tx > 15) {
+ lp->stats.tx_aborted_errors++;
+ hardware_init(dev);
+ break;
+ }
+ /* Attempt to retransmit. */
+ if (net_debug > 6) printk("attempting to ReTx");
+ write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit);
+ } else {
+ /* Finish up the transmit. */
+ lp->stats.tx_packets++;
+ lp->pac_cnt_in_tx_buf--;
+ if ( lp->saved_tx_size) {
+ trigger_send(ioaddr, lp->saved_tx_size);
+ lp->saved_tx_size = 0;
+ lp->re_tx = 0;
+ } else
+ lp->tx_unit_busy = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ num_tx_since_rx++;
+ } else if (num_tx_since_rx > 8
+ && jiffies > dev->last_rx + 100) {
+ if (net_debug > 2)
+ printk("%s: Missed packet? No Rx after %d Tx and %ld jiffies"
+ " status %02x CMR1 %02x.\n", dev->name,
+ num_tx_since_rx, jiffies - dev->last_rx, status,
+ (read_nibble(ioaddr, CMR1) >> 3) & 15);
+ lp->stats.rx_missed_errors++;
+ hardware_init(dev);
+ num_tx_since_rx = 0;
+ break;
+ } else
+ break;
+ }
+
+ /* This following code fixes a rare (and very difficult to track down)
+ problem where the adapter forgets its ethernet address. */
+ {
+ int i;
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+ }
+
+ /* Tell the adapter that it can go back to using the output line as IRQ. */
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ /* Enable the physical interrupt line, which is sure to be low until.. */
+ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ /* .. we enable the interrupt sources. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr); /* Hmmm, really needed? */
+
+ if (net_debug > 5) printk("exiting interrupt.\n");
+
+ dev->interrupt = 0;
+
+ return;
+}
+
+#ifdef TIMED_CHECKER
+/* This following code fixes a rare (and very difficult to track down)
+ problem where the adapter forgets its ethernet address. */
+static void atp_timed_checker(unsigned long data)
+{
+ struct device *dev = (struct device *)data;
+ int ioaddr = dev->base_addr;
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int tickssofar = jiffies - lp->last_rx_time;
+ int i;
+
+ if (tickssofar > 2*HZ && dev->interrupt == 0) {
+#if 1
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+ lp->last_rx_time = jiffies;
+#else
+ for (i = 0; i < 6; i++)
+ if (read_cmd_byte(ioaddr, PAR0 + i) != atp_timed_dev->dev_addr[i])
+ {
+ struct net_local *lp = (struct net_local *)atp_timed_dev->priv;
+ write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]);
+ if (i == 2)
+ lp->stats.tx_errors++;
+ else if (i == 3)
+ lp->stats.tx_dropped++;
+ else if (i == 4)
+ lp->stats.collisions++;
+ else
+ lp->stats.rx_errors++;
+ }
+#endif
+ }
+ lp->timer.expires = RUN_AT(TIMED_CHECKER);
+ add_timer(&lp->timer);
+}
+#endif
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void net_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ struct rx_header rx_head;
+
+ /* Process the received packet. */
+ outb(EOC+MAR, ioaddr + PAR_DATA);
+ read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port);
+ if (net_debug > 5)
+ printk(" rx_count %04x %04x %04x %04x..", rx_head.pad,
+ rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr);
+ if ((rx_head.rx_status & 0x77) != 0x01) {
+ lp->stats.rx_errors++;
+ if (rx_head.rx_status & 0x0004) lp->stats.rx_frame_errors++;
+ else if (rx_head.rx_status & 0x0002) lp->stats.rx_crc_errors++;
+ if (net_debug > 3) printk("%s: Unknown ATP Rx error %04x.\n",
+ dev->name, rx_head.rx_status);
+ if (rx_head.rx_status & 0x0020) {
+ lp->stats.rx_fifo_errors++;
+ write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+ } else if (rx_head.rx_status & 0x0050)
+ hardware_init(dev);
+ return;
+ } else {
+ /* Malloc up new buffer. The "-4" is omits the FCS (CRC). */
+ int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
+ struct sk_buff *skb;
+
+ skb = DEV_ALLOC_SKB(pkt_len + 2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ goto done;
+ }
+ skb->dev = dev;
+
+#if LINUX_VERSION_CODE >= 0x10300
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
+ skb->protocol = eth_type_trans(skb, dev);
+#else
+ read_block(ioaddr, pkt_len, skb->data, dev->if_port);
+ skb->len = pkt_len;
+#endif
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ done:
+ write_reg(ioaddr, CMR1, CMR1_NextPkt);
+ lp->last_rx_time = jiffies;
+ return;
+}
+
+static void read_block(short ioaddr, int length, unsigned char *p, int data_mode)
+{
+
+ if (data_mode <= 3) { /* Mode 0 or 1 */
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
+ ioaddr + PAR_DATA);
+ if (data_mode <= 1) { /* Mode 0 or 1 */
+ do *p++ = read_byte_mode0(ioaddr); while (--length > 0);
+ } else /* Mode 2 or 3 */
+ do *p++ = read_byte_mode2(ioaddr); while (--length > 0);
+ } else if (data_mode <= 5)
+ do *p++ = read_byte_mode4(ioaddr); while (--length > 0);
+ else
+ do *p++ = read_byte_mode6(ioaddr); while (--length > 0);
+
+ outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
+ outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
+}
+
+/* The inverse routine to net_open(). */
+static int
+net_close(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ del_timer(&lp->timer);
+
+ /* Flush the Tx and disable Rx here. */
+ lp->addr_mode = CMR2h_OFF;
+ write_reg_high(ioaddr, CMR2, CMR2h_OFF);
+
+ /* Free the IRQ line. */
+ outb(0x00, ioaddr + PAR_CONTROL);
+ FREE_IRQ(dev->irq, dev);
+#ifndef SA_SHIRQ
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ /* Reset the ethernet hardware and activate the printer pass-through. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+
+/* The little-endian AUTODIN32 ethernet CRC calculation.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void
+#ifdef NEW_MULTICAST
+set_rx_mode_8002(struct device *dev)
+#else
+static void set_rx_mode_8002(struct device *dev, int num_addrs, void *addrs);
+#endif
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+
+ if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) {
+ /* We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+ lp->addr_mode = CMR2h_PROMISC;
+ } else
+ lp->addr_mode = CMR2h_Normal;
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+}
+
+static void
+#ifdef NEW_MULTICAST
+set_rx_mode_8012(struct device *dev)
+#else
+static void set_rx_mode_8012(struct device *dev, int num_addrs, void *addrs);
+#endif
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ new_mode = CMR2h_PROMISC;
+ } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ new_mode = CMR2h_Normal;
+ } else {
+ struct dev_mc_list *mclist;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
+ new_mode = CMR2h_Normal;
+ }
+ lp->addr_mode = new_mode;
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */
+ for (i = 0; i < 8; i++)
+ write_reg_byte(ioaddr, i, mc_filter[i]);
+ if (net_debug > 2 || 1) {
+ lp->addr_mode = 1;
+ printk("%s: Mode %d, setting multicast filter to",
+ dev->name, lp->addr_mode);
+ for (i = 0; i < 8; i++)
+ printk(" %2.2x", mc_filter[i]);
+ printk(".\n");
+ }
+
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
+}
+
+#ifdef MODULE
+static int debug = 1;
+int
+init_module(void)
+{
+ net_debug = debug;
+ root_atp_dev = NULL;
+ atp_init(0);
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ /* No need to release_region(), since we never snarf it. */
+ while (root_atp_dev) {
+ next_dev = ((struct net_local *)root_atp_dev->priv)->next_module;
+ unregister_netdev(root_atp_dev);
+ kfree(root_atp_dev);
+ root_atp_dev = next_dev;
+ }
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c atp.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/atp.h b/linux/src/drivers/net/atp.h
new file mode 100644
index 0000000..b4d1933
--- /dev/null
+++ b/linux/src/drivers/net/atp.h
@@ -0,0 +1,274 @@
+/* Linux header file for the ATP pocket ethernet adapter. */
+/* v1.04 4/1/97 becker@cesdis.gsfc.nasa.gov. */
+
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+struct net_local {
+#ifdef __KERNEL__
+ struct enet_statistics stats;
+#endif
+ struct device *next_module;
+ struct timer_list timer; /* Media selection timer. */
+ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+ ushort saved_tx_size;
+ unsigned tx_unit_busy:1;
+ unsigned char re_tx, /* Number of packet retransmissions. */
+ addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
+ pac_cnt_in_tx_buf,
+ chip_type;
+};
+
+struct rx_header {
+ ushort pad; /* Pad. */
+ ushort rx_count;
+ ushort rx_status; /* Unknown bit assignments :-<. */
+ ushort cur_addr; /* Apparently the current buffer address(?) */
+};
+
+#define PAR_DATA 0
+#define PAR_STATUS 1
+#define PAR_CONTROL 2
+
+enum chip_type { RTL8002, RTL8012 };
+
+#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
+#define Ctrl_HNibRead 0
+#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
+#define Ctrl_HNibWrite 0
+#define Ctrl_SelData 0x04 /* LP_PINITP */
+#define Ctrl_IRQEN 0x10 /* LP_PINTEN */
+
+#define EOW 0xE0
+#define EOC 0xE0
+#define WrAddr 0x40 /* Set address of EPLC read, write register. */
+#define RdAddr 0xC0
+#define HNib 0x10
+
+enum page0_regs
+{
+ /* The first six registers hold the ethernet physical station address. */
+ PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
+ TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
+ TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
+ ISR = 10, IMR = 11, /* Interrupt status and mask. */
+ CMR1 = 12, /* Command register 1. */
+ CMR2 = 13, /* Command register 2. */
+ MODSEL = 14, /* Mode select register. */
+ MAR = 14, /* Memory address register (?). */
+ CMR2_h = 0x1d, };
+
+enum eepage_regs
+{ PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */
+
+
+#define ISR_TxOK 0x01
+#define ISR_RxOK 0x04
+#define ISR_TxErr 0x02
+#define ISRh_RxErr 0x11 /* ISR, high nibble */
+
+#define CMR1h_MUX 0x08 /* Select printer multiplexor on 8012. */
+#define CMR1h_RESET 0x04 /* Reset. */
+#define CMR1h_RxENABLE 0x02 /* Rx unit enable. */
+#define CMR1h_TxENABLE 0x01 /* Tx unit enable. */
+#define CMR1h_TxRxOFF 0x00
+#define CMR1_ReXmit 0x08 /* Trigger a retransmit. */
+#define CMR1_Xmit 0x04 /* Trigger a transmit. */
+#define CMR1_IRQ 0x02 /* Interrupt active. */
+#define CMR1_BufEnb 0x01 /* Enable the buffer(?). */
+#define CMR1_NextPkt 0x01 /* Enable the buffer(?). */
+
+#define CMR2_NULL 8
+#define CMR2_IRQOUT 9
+#define CMR2_RAMTEST 10
+#define CMR2_EEPROM 12 /* Set to page 1, for reading the EEPROM. */
+
+#define CMR2h_OFF 0 /* No accept mode. */
+#define CMR2h_Physical 1 /* Accept a physical address match only. */
+#define CMR2h_Normal 2 /* Accept physical and broadcast address. */
+#define CMR2h_PROMISC 3 /* Promiscuous mode. */
+
+/* An inline function used below: it differs from inb() by explicitly return an unsigned
+ char, saving a truncation. */
+extern inline unsigned char inbyte(unsigned short port)
+{
+ unsigned char _v;
+ __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port));
+ return _v;
+}
+
+/* Read register OFFSET.
+ This command should always be terminated with read_end(). */
+extern inline unsigned char read_nibble(short port, unsigned char offset)
+{
+ unsigned char retval;
+ outb(EOC+offset, port + PAR_DATA);
+ outb(RdAddr+offset, port + PAR_DATA);
+ inbyte(port + PAR_STATUS); /* Settling time delay */
+ retval = inbyte(port + PAR_STATUS);
+ outb(EOC+offset, port + PAR_DATA);
+
+ return retval;
+}
+
+/* Functions for bulk data read. The interrupt line is always disabled. */
+/* Get a byte using read mode 0, reading data from the control lines. */
+extern inline unsigned char read_byte_mode0(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* The same as read_byte_mode0(), but does multiple inb()s for stability. */
+extern inline unsigned char read_byte_mode2(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* Read a byte through the data register. */
+extern inline unsigned char read_byte_mode4(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* Read a byte through the data register, double reading to allow settling. */
+extern inline unsigned char read_byte_mode6(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+extern inline void
+write_reg(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval;
+ outb(EOC | reg, port + PAR_DATA);
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval &= 0xf0;
+ outval |= value;
+ outb(outval, port + PAR_DATA);
+ outval &= 0x1f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | outval, port + PAR_DATA);
+}
+
+extern inline void
+write_reg_high(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval = EOC | HNib | reg;
+
+ outb(outval, port + PAR_DATA);
+ outval &= WrAddr | HNib | 0x0f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval = WrAddr | HNib | value;
+ outb(outval, port + PAR_DATA);
+ outval &= HNib | 0x0f; /* HNib | value */
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | HNib | outval, port + PAR_DATA);
+}
+
+/* Write a byte out using nibble mode. The low nibble is written first. */
+extern inline void
+write_reg_byte(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval;
+ outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
+ outb(value & 0x0f, port + PAR_DATA);
+ value >>= 4;
+ outb(value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+
+ outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
+}
+
+/*
+ * Bulk data writes to the packet buffer. The interrupt line remains enabled.
+ * The first, faster method uses only the dataport (data modes 0, 2 & 4).
+ * The second (backup) method uses data and control regs (modes 1, 3 & 5).
+ * It should only be needed when there is skew between the individual data
+ * lines.
+ */
+extern inline void write_byte_mode0(short ioaddr, unsigned char value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+}
+
+extern inline void write_byte_mode1(short ioaddr, unsigned char value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
+}
+
+/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
+extern inline void write_word_mode0(short ioaddr, unsigned short value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+}
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_CLK_HIGH 0x12
+#define EE_CLK_LOW 0x16
+#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
+#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay(ticks) \
+do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
+#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
+#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17)
+#define EE_CMD_SIZE 27 /* The command+address+data size. */
diff --git a/linux/src/drivers/net/auto_irq.c b/linux/src/drivers/net/auto_irq.c
new file mode 100644
index 0000000..82bc7b1
--- /dev/null
+++ b/linux/src/drivers/net/auto_irq.c
@@ -0,0 +1,123 @@
+/* auto_irq.c: Auto-configure IRQ lines for linux. */
+/*
+ Written 1994 by Donald Becker.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This code is a general-purpose IRQ line detector for devices with
+ jumpered IRQ lines. If you can make the device raise an IRQ (and
+ that IRQ line isn't already being used), these routines will tell
+ you what IRQ line it's using -- perfect for those oh-so-cool boot-time
+ device probes!
+
+ To use this, first call autoirq_setup(timeout). TIMEOUT is how many
+ 'jiffies' (1/100 sec.) to detect other devices that have active IRQ lines,
+ and can usually be zero at boot. 'autoirq_setup()' returns the bit
+ vector of nominally-available IRQ lines (lines may be physically in-use,
+ but not yet registered to a device).
+ Next, set up your device to trigger an interrupt.
+ Finally call autoirq_report(TIMEOUT) to find out which IRQ line was
+ most recently active. The TIMEOUT should usually be zero, but may
+ be set to the number of jiffies to wait for a slow device to raise an IRQ.
+
+ The idea of using the setup timeout to filter out bogus IRQs came from
+ the serial driver.
+*/
+
+
+#ifdef version
+static const char *version=
+"auto_irq.c:v1.11 Donald Becker (becker@cesdis.gsfc.nasa.gov)";
+#endif
+
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/netdevice.h>
+
+struct device *irq2dev_map[NR_IRQS] = {0, 0, /* ... zeroed */};
+
+unsigned long irqs_busy = 0x2147; /* The set of fixed IRQs (keyboard, timer, etc) */
+unsigned long irqs_used = 0x0001; /* The set of fixed IRQs sometimes enabled. */
+unsigned long irqs_reserved = 0x0000; /* An advisory "reserved" table. */
+unsigned long irqs_shared = 0x0000; /* IRQ lines "shared" among conforming cards.*/
+
+static volatile unsigned long irq_bitmap; /* The irqs we actually found. */
+static unsigned long irq_handled; /* The irq lines we have a handler on. */
+static volatile int irq_number; /* The latest irq number we actually found. */
+
+static void autoirq_probe(int irq, void *dev_id, struct pt_regs * regs)
+{
+ irq_number = irq;
+ set_bit(irq, (void *)&irq_bitmap); /* irq_bitmap |= 1 << irq; */
+ /* This code used to disable the irq. However, the interrupt stub
+ * would then re-enable the interrupt with (potentially) disastrous
+ * consequences
+ */
+ free_irq(irq, dev_id);
+ return;
+}
+
+int autoirq_setup(int waittime)
+{
+ int i;
+ unsigned long timeout = jiffies + waittime;
+ unsigned long boguscount = (waittime*loops_per_sec) / 100;
+
+ irq_handled = 0;
+ irq_bitmap = 0;
+
+ for (i = 0; i < 16; i++) {
+ if (test_bit(i, &irqs_busy) == 0
+ && request_irq(i, autoirq_probe, SA_INTERRUPT, "irq probe", NULL) == 0)
+ set_bit(i, (void *)&irq_handled); /* irq_handled |= 1 << i;*/
+ }
+ /* Update our USED lists. */
+ irqs_used |= ~irq_handled;
+
+ /* Hang out at least <waittime> jiffies waiting for bogus IRQ hits. */
+ while (timeout > jiffies && --boguscount > 0)
+ ;
+
+ irq_handled &= ~irq_bitmap;
+
+ irq_number = 0; /* We are interested in new interrupts from now on */
+
+ return irq_handled;
+}
+
+int autoirq_report(int waittime)
+{
+ int i;
+ unsigned long timeout = jiffies+waittime;
+ unsigned long boguscount = (waittime*loops_per_sec) / 100;
+
+ /* Hang out at least <waittime> jiffies waiting for the IRQ. */
+
+ while (timeout > jiffies && --boguscount > 0)
+ if (irq_number)
+ break;
+
+ irq_handled &= ~irq_bitmap; /* This eliminates the already reset handlers */
+
+ /* Retract the irq handlers that we installed. */
+ for (i = 0; i < 16; i++) {
+ if (test_bit(i, (void *)&irq_handled))
+ free_irq(i, NULL);
+ }
+ return irq_number;
+}
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I/usr/src/linux/net/tcp -c auto_irq.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/cb_shim.c b/linux/src/drivers/net/cb_shim.c
new file mode 100644
index 0000000..599b5bb
--- /dev/null
+++ b/linux/src/drivers/net/cb_shim.c
@@ -0,0 +1,296 @@
+/* cb_shim.c: Linux CardBus device support code. */
+/*
+ Written 1999-2002 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by
+ reference. This is not a documented interface. Drivers incorporating
+ or interacting with these functions are derivative works and thus
+ are covered the GPL. They must include an explicit GPL notice.
+
+ This code provides a shim to allow newer drivers to interact with the
+ older Cardbus driver activation code. The functions supported are
+ attach, suspend, power-off, resume and eject.
+
+ The author may be reached as becker@scyld.com, or
+ Donald Becker
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/drivers.html
+
+ Other contributers: (none yet)
+*/
+
+static const char version1[] =
+"cb_shim.c:v1.03 7/12/2002 Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/linux/drivers.html\n";
+
+/* Module options. */
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+/* These might be awkward to locate. */
+#include <pcmcia/driver_ops.h>
+#include "pci-scan.h"
+#include "kern_compat.h"
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Hot-swap-PCI and Cardbus event dispatch");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "Enable additional status messages (0-7)");
+
+/* Note: this is used in a slightly sleazy manner: it is passed to routines
+ that expect and return just dev_node_t. However using the too-simple
+ dev_node_t complicates devices management -- older drivers had to
+ look up dev_node_t.name in their private list. */
+
+struct registered_pci_device {
+ struct dev_node_t node;
+ int magic;
+ struct registered_pci_device *next;
+ struct drv_id_info *drv_info;
+ struct pci_dev *pci_loc;
+ void *dev_instance;
+} static *root_pci_devs = 0;
+
+struct drv_shim {
+ struct drv_id_info *did;
+ struct driver_operations drv_ops;
+ int magic;
+ struct drv_shim *next;
+} static *root_drv_id = 0;
+
+static void drv_power_op(struct dev_node_t *node, enum drv_pwr_action action)
+{
+ struct registered_pci_device **devp, **next, *rpin = (void *)node, *rp;
+ if (debug > 1)
+ printk(KERN_DEBUG "power operation(%s, %d).\n",
+ rpin->drv_info->name, action);
+ /* With our wrapper structure we can almost do
+ rpin->drv_info->pwr_event(rpin->dev_instance, action);
+ But the detach operation requires us to remove the object from the
+ list, so we check for uncontrolled "ghost" devices. */
+ for (devp = &root_pci_devs; *devp; devp = next) {
+ rp = *devp;
+ next = &rp->next;
+ if (rp == rpin) {
+ if (rp->drv_info->pwr_event)
+ rp->drv_info->pwr_event((*devp)->dev_instance, action);
+ else
+ printk(KERN_ERR "No power event hander for driver %s.\n",
+ rpin->drv_info->name);
+ if (action == DRV_DETACH) {
+ kfree(rp);
+ *devp = *next;
+ MOD_DEC_USE_COUNT;
+ }
+ return;
+ }
+ }
+ if (debug)
+ printk(KERN_WARNING "power operation(%s, %d) for a ghost device.\n",
+ node->dev_name, action);
+}
+/* Wrappers / static lambdas. */
+static void drv_suspend(struct dev_node_t *node)
+{
+ drv_power_op(node, DRV_SUSPEND);
+}
+static void drv_resume(struct dev_node_t *node)
+{
+ drv_power_op(node, DRV_RESUME);
+}
+static void drv_detach(struct dev_node_t *node)
+{
+ drv_power_op(node, DRV_DETACH);
+}
+
+/* The CardBus interaction does not identify the driver the attach() is
+ for, thus we must search for the ID in all PCI device tables.
+ While ugly, we likely only have one driver loaded anyway.
+*/
+static dev_node_t *drv_attach(struct dev_locator_t *loc)
+{
+ struct drv_shim *dp;
+ struct drv_id_info *drv_id = NULL;
+ struct pci_id_info *pci_tbl = NULL;
+ u32 pci_id, subsys_id, pci_rev, pciaddr;
+ u8 irq;
+ int chip_idx = 0, pci_flags, bus, devfn;
+ long ioaddr;
+ void *newdev;
+
+ if (debug > 1)
+ printk(KERN_INFO "drv_attach()\n");
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ if (debug > 1)
+ printk(KERN_DEBUG "drv_attach(bus %d, function %d)\n", bus, devfn);
+
+ pcibios_read_config_dword(bus, devfn, PCI_VENDOR_ID, &pci_id);
+ pcibios_read_config_dword(bus, devfn, PCI_SUBSYSTEM_ID, &subsys_id);
+ pcibios_read_config_dword(bus, devfn, PCI_REVISION_ID, &pci_rev);
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+ for (dp = root_drv_id; dp; dp = dp->next) {
+ drv_id = dp->did;
+ pci_tbl = drv_id->pci_dev_tbl;
+ for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+ struct pci_id_info *chip = &pci_tbl[chip_idx];
+ if ((pci_id & chip->id.pci_mask) == chip->id.pci
+ && (subsys_id & chip->id.subsystem_mask) == chip->id.subsystem
+ && (pci_rev & chip->id.revision_mask) == chip->id.revision)
+ break;
+ }
+ if (pci_tbl[chip_idx].name) /* Compiled out! */
+ break;
+ }
+ if (dp == 0) {
+ printk(KERN_WARNING "No driver match for device %8.8x at %d/%d.\n",
+ pci_id, bus, devfn);
+ return 0;
+ }
+ pci_flags = pci_tbl[chip_idx].pci_flags;
+ pcibios_read_config_dword(bus, devfn, ((pci_flags >> 2) & 0x1C) + 0x10,
+ &pciaddr);
+ if ((pciaddr & PCI_BASE_ADDRESS_SPACE_IO)) {
+ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+ } else
+ ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_tbl[chip_idx].io_size);
+ if (ioaddr == 0 || irq == 0) {
+ printk(KERN_ERR "The %s at %d/%d was not assigned an %s.\n"
+ KERN_ERR " It will not be activated.\n",
+ pci_tbl[chip_idx].name, bus, devfn,
+ ioaddr == 0 ? "address" : "IRQ");
+ return NULL;
+ }
+ printk(KERN_INFO "Found a %s at %d/%d address 0x%x->0x%lx IRQ %d.\n",
+ pci_tbl[chip_idx].name, bus, devfn, pciaddr, ioaddr, irq);
+ {
+ u16 pci_command;
+ pcibios_read_config_word(bus, devfn, PCI_COMMAND, &pci_command);
+ printk(KERN_INFO "%s at %d/%d command 0x%x.\n",
+ pci_tbl[chip_idx].name, bus, devfn, pci_command);
+ }
+
+ newdev = drv_id->probe1(pci_find_slot(bus, devfn), 0,
+ ioaddr, irq, chip_idx, 0);
+ if (newdev) {
+ struct registered_pci_device *hsdev =
+ kmalloc(sizeof(struct registered_pci_device), GFP_KERNEL);
+ if (drv_id->pci_class == PCI_CLASS_NETWORK_ETHERNET<<8)
+ strcpy(hsdev->node.dev_name, ((struct net_device *)newdev)->name);
+ hsdev->node.major = hsdev->node.minor = 0;
+ hsdev->node.next = NULL;
+ hsdev->drv_info = drv_id;
+ hsdev->dev_instance = newdev;
+ hsdev->next = root_pci_devs;
+ root_pci_devs = hsdev;
+ drv_id->pwr_event(newdev, DRV_ATTACH);
+ MOD_INC_USE_COUNT;
+ return &hsdev->node;
+ }
+ return NULL;
+}
+
+/* Add/remove a driver ID structure to our private list of known drivers. */
+int do_cb_register(struct drv_id_info *did)
+{
+ struct driver_operations *dop;
+ struct drv_shim *dshim = kmalloc(sizeof(*dshim), GFP_KERNEL);
+ if (dshim == 0)
+ return 0;
+ if (debug > 1)
+ printk(KERN_INFO "Registering driver support for '%s'.\n",
+ did->name);
+ MOD_INC_USE_COUNT;
+ dshim->did = did;
+ dop = &dshim->drv_ops;
+ dop->name = (char *)did->name;
+ dop->attach = drv_attach;
+ dop->suspend = drv_suspend;
+ dop->resume = drv_resume;
+ dop->detach = drv_detach;
+ dshim->next = root_drv_id;
+ root_drv_id = dshim;
+ return register_driver(dop);
+}
+
+void do_cb_unregister(struct drv_id_info *did)
+{
+ struct drv_shim **dp;
+ for (dp = &root_drv_id; *dp; dp = &(*dp)->next)
+ if ((*dp)->did == did) {
+ struct drv_shim *dshim = *dp;
+ unregister_driver(&dshim->drv_ops);
+ *dp = dshim->next;
+ kfree(dshim);
+ MOD_DEC_USE_COUNT;
+ return;
+ }
+}
+
+extern int (*register_hotswap_hook)(struct drv_id_info *did);
+extern void (*unregister_hotswap_hook)(struct drv_id_info *did);
+
+int (*old_cb_hook)(struct drv_id_info *did);
+void (*old_un_cb_hook)(struct drv_id_info *did);
+
+int init_module(void)
+{
+ if (debug)
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ old_cb_hook = register_hotswap_hook;
+ old_un_cb_hook = unregister_hotswap_hook;
+ register_hotswap_hook = do_cb_register;
+ unregister_hotswap_hook = do_cb_unregister;
+ return 0;
+}
+void cleanup_module(void)
+{
+ register_hotswap_hook = old_cb_hook;
+ unregister_hotswap_hook = old_un_cb_hook;
+ return;
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c cb_shim.c -I/usr/include/ -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
+
diff --git a/linux/src/drivers/net/de4x5.c b/linux/src/drivers/net/de4x5.c
new file mode 100644
index 0000000..c85bcdb
--- /dev/null
+++ b/linux/src/drivers/net/de4x5.c
@@ -0,0 +1,5942 @@
+/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500
+ ethernet driver for Linux.
+
+ Copyright 1994, 1995 Digital Equipment Corporation.
+
+ Testing resources for this driver have been made available
+ in part by NASA Ames Research Center (mjacob@nas.nasa.gov).
+
+ The author may be reached at davies@maniac.ultranet.com.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2 of the License, or (at your
+ option) any later version.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Originally, this driver was written for the Digital Equipment
+ Corporation series of EtherWORKS ethernet cards:
+
+ DE425 TP/COAX EISA
+ DE434 TP PCI
+ DE435 TP/COAX/AUI PCI
+ DE450 TP/COAX/AUI PCI
+ DE500 10/100 PCI Fasternet
+
+ but it will now attempt to support all cards which conform to the
+ Digital Semiconductor SROM Specification. The driver currently
+ recognises the following chips:
+
+ DC21040 (no SROM)
+ DC21041[A]
+ DC21140[A]
+ DC21142
+ DC21143
+
+ So far the driver is known to work with the following cards:
+
+ KINGSTON
+ Linksys
+ ZNYX342
+ SMC8432
+ SMC9332 (w/new SROM)
+ ZNYX31[45]
+ ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
+
+ The driver has been tested on a relatively busy network using the DE425,
+ DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
+ 16M of data to a DECstation 5000/200 as follows:
+
+ TCP UDP
+ TX RX TX RX
+ DE425 1030k 997k 1170k 1128k
+ DE434 1063k 995k 1170k 1125k
+ DE435 1063k 995k 1170k 1125k
+ DE500 1063k 998k 1170k 1125k in 10Mb/s mode
+
+ All values are typical (in kBytes/sec) from a sample of 4 for each
+ measurement. Their error is +/-20k on a quiet (private) network and also
+ depend on what load the CPU has.
+
+ =========================================================================
+ This driver has been written substantially from scratch, although its
+ inheritance of style and stack interface from 'ewrk3.c' and in turn from
+ Donald Becker's 'lance.c' should be obvious. With the module autoload of
+ every usable DECchip board, I pinched Donald's 'next_module' field to
+ link my modules together.
+
+ Upto 15 EISA cards can be supported under this driver, limited primarily
+ by the available IRQ lines. I have checked different configurations of
+ multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
+ problem yet (provided you have at least depca.c v0.38) ...
+
+ PCI support has been added to allow the driver to work with the DE434,
+ DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due
+ to the differences in the EISA and PCI CSR address offsets from the base
+ address.
+
+ The ability to load this driver as a loadable module has been included
+ and used extensively during the driver development (to save those long
+ reboot sequences). Loadable module support under PCI and EISA has been
+ achieved by letting the driver autoprobe as if it were compiled into the
+ kernel. Do make sure you're not sharing interrupts with anything that
+ cannot accommodate interrupt sharing!
+
+ To utilise this ability, you have to do 8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) for fixed autoprobes (not recommended), edit the source code near
+ line 5594 to reflect the I/O address you're using, or assign these when
+ loading by:
+
+ insmod de4x5 io=0xghh where g = bus number
+ hh = device number
+
+ NB: autoprobing for modules is now supported by default. You may just
+ use:
+
+ insmod de4x5
+
+ to load all available boards. For a specific board, still use
+ the 'io=?' above.
+ 3) compile de4x5.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the de4x5 configuration turned off and reboot.
+ 5) insmod de4x5 [io=0xghh]
+ 6) run the net startup bits for your new eth?? interface(s) manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ To unload a module, turn off the associated interface(s)
+ 'ifconfig eth?? down' then 'rmmod de4x5'.
+
+ Automedia detection is included so that in principal you can disconnect
+ from, e.g. TP, reconnect to BNC and things will still work (after a
+ pause whilst the driver figures out where its media went). My tests
+ using ping showed that it appears to work....
+
+ By default, the driver will now autodetect any DECchip based card.
+ Should you have a need to restrict the driver to DIGITAL only cards, you
+ can compile with a DEC_ONLY define, or if loading as a module, use the
+ 'dec_only=1' parameter.
+
+ I've changed the timing routines to use the kernel timer and scheduling
+ functions so that the hangs and other assorted problems that occurred
+ while autosensing the media should be gone. A bonus for the DC21040
+ auto media sense algorithm is that it can now use one that is more in
+ line with the rest (the DC21040 chip doesn't have a hardware timer).
+ The downside is the 1 'jiffies' (10ms) resolution.
+
+ IEEE 802.3u MII interface code has been added in anticipation that some
+ products may use it in the future.
+
+ The SMC9332 card has a non-compliant SROM which needs fixing - I have
+ patched this driver to detect it because the SROM format used complies
+ to a previous DEC-STD format.
+
+ I have removed the buffer copies needed for receive on Intels. I cannot
+ remove them for Alphas since the Tulip hardware only does longword
+ aligned DMA transfers and the Alphas get alignment traps with non
+ longword aligned data copies (which makes them really slow). No comment.
+
+ I have added SROM decoding routines to make this driver work with any
+ card that supports the Digital Semiconductor SROM spec. This will help
+ all cards running the dc2114x series chips in particular. Cards using
+ the dc2104x chips should run correctly with the basic driver. I'm in
+ debt to <mjacob@feral.com> for the testing and feedback that helped get
+ this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
+ (with the latest SROM complying with the SROM spec V3: their first was
+ broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315
+ (quad 21041 MAC) cards also appear to work despite their incorrectly
+ wired IRQs.
+
+ I have added a temporary fix for interrupt problems when some SCSI cards
+ share the same interrupt as the DECchip based cards. The problem occurs
+ because the SCSI card wants to grab the interrupt as a fast interrupt
+ (runs the service routine with interrupts turned off) vs. this card
+ which really needs to run the service routine with interrupts turned on.
+ This driver will now add the interrupt service routine as a fast
+ interrupt if it is bounced from the slow interrupt. THIS IS NOT A
+ RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
+ until people sort out their compatibility issues and the kernel
+ interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
+ INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
+ run on the same interrupt. PCMCIA/CardBus is another can of worms...
+
+ Finally, I think I have really fixed the module loading problem with
+ more than one DECchip based card. As a side effect, I don't mess with
+ the device structure any more which means that if more than 1 card in
+ 2.0.x is installed (4 in 2.1.x), the user will have to edit
+ linux/drivers/net/Space.c to make room for them. Hence, module loading
+ is the preferred way to use this driver, since it doesn't have this
+ limitation.
+
+ Where SROM media detection is used and full duplex is specified in the
+ SROM, the feature is ignored unless lp->params.fdx is set at compile
+ time OR during a module load (insmod de4x5 args='eth??:fdx' [see
+ below]). This is because there is no way to automatically detect full
+ duplex links except through autonegotiation. When I include the
+ autonegotiation feature in the SROM autoconf code, this detection will
+ occur automatically for that case.
+
+ Command line arguments are now allowed, similar to passing arguments
+ through LILO. This will allow a per adapter board set up of full duplex
+ and media. The only lexical constraints are: the board name (dev->name)
+ appears in the list before its parameters. The list of parameters ends
+ either at the end of the parameter list or with another board name. The
+ following parameters are allowed:
+
+ fdx for full duplex
+ autosense to set the media/speed; with the following
+ sub-parameters:
+ TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
+
+ Case sensitivity is important for the sub-parameters. They *must* be
+ upper case. Examples:
+
+ insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
+
+ For a compiled in driver, in linux/drivers/net/CONFIG, place e.g.
+ DE4X5_OPTS = -DDE4X5_PARM='"eth0:fdx autosense=AUI eth2:autosense=TP"'
+
+ Yes, I know full duplex isn't permissible on BNC or AUI; they're just
+ examples. By default, full duplex is turned off and AUTO is the default
+ autosense setting. In reality, I expect only the full duplex option to
+ be used. Note the use of single quotes in the two examples above and the
+ lack of commas to separate items.
+
+ TO DO:
+ ------
+
+ o check what revision numbers the 21142 and 21143 have
+ o
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 17-Nov-94 Initial writing. ALPHA code release.
+ 0.2 13-Jan-95 Added PCI support for DE435's.
+ 0.21 19-Jan-95 Added auto media detection.
+ 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
+ Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ Add request/release_region code.
+ Add loadable modules support for PCI.
+ Clean up loadable modules support.
+ 0.23 28-Feb-95 Added DC21041 and DC21140 support.
+ Fix missed frame counter value and initialisation.
+ Fixed EISA probe.
+ 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
+ Change TX_BUFFS_AVAIL macro.
+ Change media autodetection to allow manual setting.
+ Completed DE500 (DC21140) support.
+ 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
+ 0.242 10-May-95 Minor changes.
+ 0.30 12-Jun-95 Timer fix for DC21140.
+ Portability changes.
+ Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
+ Add DE500 semi automatic autosense.
+ Add Link Fail interrupt TP failure detection.
+ Add timer based link change detection.
+ Plugged a memory leak in de4x5_queue_pkt().
+ 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1.
+ 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a
+ suggestion by <heiko@colossus.escape.de>.
+ 0.33 8-Aug-95 Add shared interrupt support (not released yet).
+ 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs.
+ Fix de4x5_interrupt().
+ Fix dc21140_autoconf() mess.
+ No shared interrupt support.
+ 0.332 11-Sep-95 Added MII management interface routines.
+ 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>.
+ Add kernel timer code (h/w is too flaky).
+ Add MII based PHY autosense.
+ Add new multicasting code.
+ Add new autosense algorithms for media/mode
+ selection using kernel scheduling/timing.
+ Re-formatted.
+ Made changes suggested by <jeff@router.patch.net>:
+ Change driver to detect all DECchip based cards
+ with DEC_ONLY restriction a special case.
+ Changed driver to autoprobe as a module. No irq
+ checking is done now - assume BIOS is good!
+ Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp>
+ 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card
+ only <niles@axp745gsfc.nasa.gov>
+ Fix for multiple PCI cards reported by <jos@xos.nl>
+ Duh, put the SA_SHIRQ flag into request_interrupt().
+ Fix SMC ethernet address in enet_det[].
+ Print chip name instead of "UNKNOWN" during boot.
+ 0.42 26-Apr-96 Fix MII write TA bit error.
+ Fix bug in dc21040 and dc21041 autosense code.
+ Remove buffer copies on receive for Intels.
+ Change sk_buff handling during media disconnects to
+ eliminate DUP packets.
+ Add dynamic TX thresholding.
+ Change all chips to use perfect multicast filtering.
+ Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.43 21-Jun-96 Fix unconnected media TX retry bug.
+ Add Accton to the list of broken cards.
+ Fix TX under-run bug for non DC21140 chips.
+ Fix boot command probe bug in alloc_device() as
+ reported by <koen.gadeyne@barco.com> and
+ <orava@nether.tky.hut.fi>.
+ Add cache locks to prevent a race condition as
+ reported by <csd@microplex.com> and
+ <baba@beckman.uiuc.edu>.
+ Upgraded alloc_device() code.
+ 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
+ with <csd@microplex.com>
+ 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips.
+ Fix EISA probe bugs reported by <os2@kpi.kharkov.ua>
+ and <michael@compurex.com>.
+ 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media
+ with a loopback packet.
+ 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
+ by <bhat@mundook.cs.mu.OZ.AU>
+ 0.45 8-Dec-96 Include endian functions for PPC use, from work
+ by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
+ 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
+ suggestion from <mjacob@feral.com>.
+ 0.5 30-Jan-97 Added SROM decoding functions.
+ Updated debug flags.
+ Fix sleep/wakeup calls for PCI cards, bug reported
+ by <cross@gweep.lkg.dec.com>.
+ Added multi-MAC, one SROM feature from discussion
+ with <mjacob@feral.com>.
+ Added full module autoprobe capability.
+ Added attempt to use an SMC9332 with broken SROM.
+ Added fix for ZYNX multi-mac cards that didn't
+ get their IRQs wired correctly.
+ 0.51 13-Feb-97 Added endian fixes for the SROM accesses from
+ <paubert@iram.es>
+ Fix init_connection() to remove extra device reset.
+ Fix MAC/PHY reset ordering in dc21140m_autoconf().
+ Fix initialisation problem with lp->timeout in
+ typeX_infoblock() from <paubert@iram.es>.
+ Fix MII PHY reset problem from work done by
+ <paubert@iram.es>.
+ 0.52 26-Apr-97 Some changes may not credit the right people -
+ a disk crash meant I lost some mail.
+ Change RX interrupt routine to drop rather than
+ defer packets to avoid hang reported by
+ <g.thomas@opengroup.org>.
+ Fix srom_exec() to return for COMPACT and type 1
+ infoblocks.
+ Added DC21142 and DC21143 functions.
+ Added byte counters from <phil@tazenda.demon.co.uk>
+ Added SA_INTERRUPT temporary fix from
+ <mjacob@feral.com>.
+ 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
+ module load: bug reported by
+ <Piete.Brooks@cl.cam.ac.uk>
+ Fix multi-MAC, one SROM, to work with 2114x chips:
+ bug reported by <cmetz@inner.net>.
+ Make above search independent of BIOS device scan
+ direction.
+ Completed DC2114[23] autosense functions.
+ 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
+ <robin@intercore.com
+ Fix type1_infoblock() bug introduced in 0.53, from
+ problem reports by
+ <parmee@postecss.ncrfran.france.ncr.com> and
+ <jo@ice.dillingen.baynet.de>.
+ Added argument list to set up each board from either
+ a module's command line or a compiled in #define.
+ Added generic MII PHY functionality to deal with
+ newer PHY chips.
+ Fix the mess in 2.1.67.
+ 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
+ <redhat@cococo.net>.
+ Fix bug in pci_probe() for 64 bit systems reported
+ by <belliott@accessone.com>.
+ 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>.
+ 0.534 24-Jan-98 Fix last (?) endian bug from
+ <Geert.Uytterhoeven@cs.kuleuven.ac.be>
+ 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040.
+ 0.5351 4-Oct-98 Atomicize assertion of dev->interrupt for SMP (not
+ for Alpha arch.) from <lma@varesearch.com>
+ Add TP, AUI and BNC cases to 21140m_autoconf() for
+ case where a 21140 under SROM control uses, e.g. AUI
+ from problem report by <delchini@lpnp09.in2p3.fr>
+ Add MII parallel detection to 2114x_autoconf() for
+ case where no autonegotiation partner exists from
+ problem report by <mlapsley@ndirect.co.uk>.
+ Add ability to force connection type directly even
+ when using SROM control from problem report by
+ <earl@exis.net>.
+ Fix is_anc_capable() bug reported by
+ <Austin.Donnelly@cl.cam.ac.uk>.
+ Fix type[13]_infoblock() bug: during MII search, PHY
+ lp->rst not run because lp->ibn not initialised -
+ from report & fix by <paubert@iram.es>.
+ Fix probe bug with EISA & PCI cards present from
+ report by <eirik@netcom.com>.
+ Fix compiler problems associated with i386-string
+ ops from multiple bug reports and temporary fix
+ from <paubert@iram.es>.
+ Add an_exception() for old ZYNX346 and fix compile
+ warning on PPC & SPARC, from <ecd@skynet.be>.
+ Fix lastPCI to correctly work with compiled in
+ kernels and modules from bug report by
+ <Zlatko.Calusic@CARNet.hr> et al.
+ Fix dc2114x_autoconf() to stop multiple messages
+ when media is unconnected.
+ Change dev->interrupt to lp->interrupt to ensure
+ alignment for Alpha's and avoid their unaligned
+ access traps. This flag is merely for log messages:
+ should do something more definitive though...
+
+ =========================================================================
+*/
+
+static const char *version = "de4x5.c:V0.5351 1998/10/4 davies@maniac.ultranet.com\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+
+#include "de4x5.h"
+
+#define c_char const char
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+# define __initfunc(__arginit) __arginit
+//# define test_and_set_bit set_bit
+# define net_device_stats enet_statistics
+# define copy_to_user(a,b,c) memcpy_tofs(a,b,c)
+# define copy_from_user(a,b,c) memcpy_fromfs(a,b,c)
+# define le16_to_cpu(a) cpu_to_le16(a)
+# define le32_to_cpu(a) cpu_to_le32(a)
+# ifdef __powerpc__
+# define cpu_to_le16(a) ((((a) & 0x00ffU) << 8) | (((a) & 0xff00U) >> 8))
+# define cpu_to_le32(a) ((((a) & 0x000000ffU) << 24) |\
+ (((a) & 0x0000ff00U) << 8) |\
+ (((a) & 0x00ff0000U) >> 8) |\
+ (((a) & 0xff000000U) >> 24))
+# else
+# define cpu_to_le16(a) (a)
+# define cpu_to_le32(a) (a)
+# endif /* __powerpc__ */
+# include <asm/segment.h>
+#else
+# include <asm/uaccess.h>
+# include <linux/init.h>
+#endif /* LINUX_VERSION_CODE */
+#define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((u_short *)(a)))
+
+/*
+** MII Information
+*/
+struct phy_table {
+ int reset; /* Hard reset required? */
+ int id; /* IEEE OUI */
+ int ta; /* One cycle TA time - 802.3u is confusing here */
+ struct { /* Non autonegotiation (parallel) speed det. */
+ int reg;
+ int mask;
+ int value;
+ } spd;
+};
+
+struct mii_phy {
+ int reset; /* Hard reset required? */
+ int id; /* IEEE OUI */
+ int ta; /* One cycle TA time */
+ struct { /* Non autonegotiation (parallel) speed det. */
+ int reg;
+ int mask;
+ int value;
+ } spd;
+ int addr; /* MII address for the PHY */
+ u_char *gep; /* Start of GEP sequence block in SROM */
+ u_char *rst; /* Start of reset sequence in SROM */
+ u_int mc; /* Media Capabilities */
+ u_int ana; /* NWay Advertisement */
+ u_int fdx; /* Full DupleX capabilites for each media */
+ u_int ttm; /* Transmit Threshold Mode for each media */
+ u_int mci; /* 21142 MII Connector Interrupt info */
+};
+
+#define DE4X5_MAX_PHY 8 /* Allow upto 8 attached PHY devices per board */
+
+struct sia_phy {
+ u_char mc; /* Media Code */
+ u_char ext; /* csr13-15 valid when set */
+ int csr13; /* SIA Connectivity Register */
+ int csr14; /* SIA TX/RX Register */
+ int csr15; /* SIA General Register */
+ int gepc; /* SIA GEP Control Information */
+ int gep; /* SIA GEP Data */
+};
+
+/*
+** Define the know universe of PHY devices that can be
+** recognised by this driver.
+*/
+static struct phy_table phy_info[] = {
+ {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */
+ {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */
+ {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */
+ {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */
+ {0, 0x7810 , 1, {0x05, 0x0380, 0x0380}} /* Level One? */
+};
+
+/*
+** These GENERIC values assumes that the PHY devices follow 802.3u and
+** allow parallel detection to set the link partner ability register.
+** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported.
+*/
+#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */
+#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */
+#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */
+
+/*
+** Define special SROM detection cases
+*/
+static c_char enet_det[][ETH_ALEN] = {
+ {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
+};
+
+#define SMC 1
+#define ACCTON 2
+
+/*
+** SROM Repair definitions. If a broken SROM is detected a card may
+** use this information to help figure out what to do. This is a
+** "stab in the dark" and so far for SMC9332's only.
+*/
+static c_char srom_repair_info[][100] = {
+ {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */
+ 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
+ 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
+ 0x00,0x18,}
+};
+
+
+#ifdef DE4X5_DEBUG
+static int de4x5_debug = DE4X5_DEBUG;
+#else
+/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/
+static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
+#endif
+
+/*
+** Allow per adapter set up. For modules this is simply a command line
+** parameter, e.g.:
+** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
+**
+** For a compiled in driver, place e.g.
+** DE4X5_OPTS = -DDE4X5_PARM='"eth0:fdx autosense=AUI eth2:autosense=TP"'
+** in linux/drivers/net/CONFIG
+*/
+#ifdef DE4X5_PARM
+static char *args = DE4X5_PARM;
+#else
+static char *args = NULL;
+#endif
+
+struct parameters {
+ int fdx;
+ int autosense;
+};
+
+#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */
+
+#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
+
+/*
+** Ethernet PROM defines
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** Ethernet Info
+*/
+#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
+#define IEEE802_3_SZ 1518 /* Packet + CRC */
+#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
+#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
+#define MIN_DAT_SZ 1 /* Minimum ethernet data length */
+#define PKT_HDR_LEN 14 /* Addresses and data length info */
+#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
+#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
+
+
+#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
+#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
+
+/*
+** EISA bus defines
+*/
+#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */
+
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
+
+#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
+#define DE4X5_NAME_LENGTH 8
+
+/*
+** Ethernet PROM defines for DC21040
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** PCI Bus defines
+*/
+#define PCI_MAX_BUS_NUM 8
+#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
+#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
+#define NO_MORE_PCI -2 /* PCI bus search all done */
+
+/*
+** Memory Alignment. Each descriptor is 4 longwords long. To force a
+** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
+** DESC_ALIGN. ALIGN aligns the start address of the private memory area
+** and hence the RX descriptor ring's first entry.
+*/
+#define ALIGN4 ((u_long)4 - 1) /* 1 longword align */
+#define ALIGN8 ((u_long)8 - 1) /* 2 longword align */
+#define ALIGN16 ((u_long)16 - 1) /* 4 longword align */
+#define ALIGN32 ((u_long)32 - 1) /* 8 longword align */
+#define ALIGN64 ((u_long)64 - 1) /* 16 longword align */
+#define ALIGN128 ((u_long)128 - 1) /* 32 longword align */
+
+#define ALIGN ALIGN32 /* Keep the DC21040 happy... */
+#define CACHE_ALIGN CAL_16LONG
+#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
+/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
+#define DESC_ALIGN
+
+#ifndef DEC_ONLY /* See README.de4x5 for using this */
+static int dec_only = 0;
+#else
+static int dec_only = 1;
+#endif
+
+/*
+** DE4X5 IRQ ENABLE/DISABLE
+*/
+#define ENABLE_IRQs { \
+ imr |= lp->irq_en;\
+ outl(imr, DE4X5_IMR); /* Enable the IRQs */\
+}
+
+#define DISABLE_IRQs {\
+ imr = inl(DE4X5_IMR);\
+ imr &= ~lp->irq_en;\
+ outl(imr, DE4X5_IMR); /* Disable the IRQs */\
+}
+
+#define UNMASK_IRQs {\
+ imr |= lp->irq_mask;\
+ outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
+}
+
+#define MASK_IRQs {\
+ imr = inl(DE4X5_IMR);\
+ imr &= ~lp->irq_mask;\
+ outl(imr, DE4X5_IMR); /* Mask the IRQs */\
+}
+
+/*
+** DE4X5 START/STOP
+*/
+#define START_DE4X5 {\
+ omr = inl(DE4X5_OMR);\
+ omr |= OMR_ST | OMR_SR;\
+ outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
+}
+
+#define STOP_DE4X5 {\
+ omr = inl(DE4X5_OMR);\
+ omr &= ~(OMR_ST|OMR_SR);\
+ outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
+}
+
+/*
+** DE4X5 SIA RESET
+*/
+#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
+
+/*
+** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
+*/
+#define DE4X5_AUTOSENSE_MS 250
+
+/*
+** SROM Structure
+*/
+struct de4x5_srom {
+ char sub_vendor_id[2];
+ char sub_system_id[2];
+ char reserved[12];
+ char id_block_crc;
+ char reserved2;
+ char version;
+ char num_controllers;
+ char ieee_addr[6];
+ char info[100];
+ short chksum;
+};
+#define SUB_VENDOR_ID 0x500a
+
+/*
+** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
+** and have sizes of both a power of 2 and a multiple of 4.
+** A size of 256 bytes for each buffer could be chosen because over 90% of
+** all packets in our network are <256 bytes long and 64 longword alignment
+** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
+** descriptors are needed for machines with an ALPHA CPU.
+*/
+#define NUM_RX_DESC 8 /* Number of RX descriptors */
+#define NUM_TX_DESC 32 /* Number of TX descriptors */
+#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
+ /* Multiple of 4 for DC21040 */
+ /* Allows 512 byte alignment */
+struct de4x5_desc {
+ volatile s32 status;
+ u32 des1;
+ u32 buf;
+ u32 next;
+ DESC_ALIGN
+};
+
+/*
+** The DE4X5 private structure
+*/
+#define DE4X5_PKT_STAT_SZ 16
+#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase DE4X5_PKT_STAT_SZ */
+
+struct de4x5_private {
+ char adapter_name[80]; /* Adapter name */
+ u_long interrupt; /* Aligned ISR flag */
+ struct de4x5_desc rx_ring[NUM_RX_DESC]; /* RX descriptor ring */
+ struct de4x5_desc tx_ring[NUM_TX_DESC]; /* TX descriptor ring */
+ struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
+ struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */
+ int rx_new, rx_old; /* RX descriptor ring pointers */
+ int tx_new, tx_old; /* TX descriptor ring pointers */
+ char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
+ char frame[64]; /* Min sized packet for loopback*/
+ struct net_device_stats stats; /* Public stats */
+ struct {
+ u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
+ u_int unicast;
+ u_int multicast;
+ u_int broadcast;
+ u_int excessive_collisions;
+ u_int tx_underruns;
+ u_int excessive_underruns;
+ u_int rx_runt_frames;
+ u_int rx_collision;
+ u_int rx_dribble;
+ u_int rx_overflow;
+ } pktStats;
+ char rxRingSize;
+ char txRingSize;
+ int bus; /* EISA or PCI */
+ int bus_num; /* PCI Bus number */
+ int device; /* Device number on PCI bus */
+ int state; /* Adapter OPENED or CLOSED */
+ int chipset; /* DC21040, DC21041 or DC21140 */
+ s32 irq_mask; /* Interrupt Mask (Enable) bits */
+ s32 irq_en; /* Summary interrupt bits */
+ int media; /* Media (eg TP), mode (eg 100B)*/
+ int c_media; /* Remember the last media conn */
+ int fdx; /* media full duplex flag */
+ int linkOK; /* Link is OK */
+ int autosense; /* Allow/disallow autosensing */
+ int tx_enable; /* Enable descriptor polling */
+ int setup_f; /* Setup frame filtering type */
+ int local_state; /* State within a 'media' state */
+ struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
+ struct sia_phy sia; /* SIA PHY Information */
+ int active; /* Index to active PHY device */
+ int mii_cnt; /* Number of attached PHY's */
+ int timeout; /* Scheduling counter */
+ struct timer_list timer; /* Timer info for kernel */
+ int tmp; /* Temporary global per card */
+ struct {
+ void *priv; /* Original kmalloc'd mem addr */
+ void *buf; /* Original kmalloc'd mem addr */
+ u_long lock; /* Lock the cache accesses */
+ s32 csr0; /* Saved Bus Mode Register */
+ s32 csr6; /* Saved Operating Mode Reg. */
+ s32 csr7; /* Saved IRQ Mask Register */
+ s32 gep; /* Saved General Purpose Reg. */
+ s32 gepc; /* Control info for GEP */
+ s32 csr13; /* Saved SIA Connectivity Reg. */
+ s32 csr14; /* Saved SIA TX/RX Register */
+ s32 csr15; /* Saved SIA General Register */
+ int save_cnt; /* Flag if state already saved */
+ struct sk_buff *skb; /* Save the (re-ordered) skb's */
+ } cache;
+ struct de4x5_srom srom; /* A copy of the SROM */
+ struct device *next_module; /* Link to the next module */
+ int rx_ovf; /* Check for 'RX overflow' tag */
+ int useSROM; /* For non-DEC card use SROM */
+ int useMII; /* Infoblock using the MII */
+ int asBitValid; /* Autosense bits in GEP? */
+ int asPolarity; /* 0 => asserted high */
+ int asBit; /* Autosense bit number in GEP */
+ int defMedium; /* SROM default medium */
+ int tcount; /* Last infoblock number */
+ int infoblock_init; /* Initialised this infoblock? */
+ int infoleaf_offset; /* SROM infoleaf for controller */
+ s32 infoblock_csr6; /* csr6 value in SROM infoblock */
+ int infoblock_media; /* infoblock media */
+ int (*infoleaf_fn)(struct device *); /* Pointer to infoleaf function */
+ u_char *rst; /* Pointer to Type 5 reset info */
+ u_char ibn; /* Infoblock number */
+ struct parameters params; /* Command line/ #defined params */
+};
+
+/*
+** Kludge to get around the fact that the CSR addresses have different
+** offsets in the PCI and EISA boards. Also note that the ethernet address
+** PROM is accessed differently.
+*/
+static struct bus_type {
+ int bus;
+ int bus_num;
+ int device;
+ int chipset;
+ struct de4x5_srom srom;
+ int autosense;
+ int useSROM;
+} bus;
+
+/*
+** To get around certain poxy cards that don't provide an SROM
+** for the second and more DECchip, I have to key off the first
+** chip's address. I'll assume there's not a bad SROM iff:
+**
+** o the chipset is the same
+** o the bus number is the same and > 0
+** o the sum of all the returned hw address bytes is 0 or 0x5fa
+**
+** Also have to save the irq for those cards whose hardware designers
+** can't follow the PCI to PCI Bridge Architecture spec.
+*/
+static struct {
+ int chipset;
+ int bus;
+ int irq;
+ u_char addr[ETH_ALEN];
+} last = {0,};
+
+/*
+** The transmit ring full condition is described by the tx_old and tx_new
+** pointers by:
+** tx_old = tx_new Empty ring
+** tx_old = tx_new+1 Full ring
+** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
+*/
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->txRingSize-lp->tx_new-1:\
+ lp->tx_old -lp->tx_new-1)
+
+#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
+
+/*
+** Public Functions
+*/
+static int de4x5_open(struct device *dev);
+static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
+static void de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int de4x5_close(struct device *dev);
+static struct net_device_stats *de4x5_get_stats(struct device *dev);
+static void de4x5_local_stats(struct device *dev, char *buf, int pkt_len);
+static void set_multicast_list(struct device *dev);
+static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+
+/*
+** Private functions
+*/
+static int de4x5_hw_init(struct device *dev, u_long iobase);
+static int de4x5_init(struct device *dev);
+static int de4x5_sw_reset(struct device *dev);
+static int de4x5_rx(struct device *dev);
+static int de4x5_tx(struct device *dev);
+static int de4x5_ast(struct device *dev);
+static int de4x5_txur(struct device *dev);
+static int de4x5_rx_ovfc(struct device *dev);
+
+static int autoconf_media(struct device *dev);
+static void create_packet(struct device *dev, char *frame, int len);
+static void de4x5_us_delay(u32 usec);
+static void de4x5_ms_delay(u32 msec);
+static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
+static int dc21040_autoconf(struct device *dev);
+static int dc21041_autoconf(struct device *dev);
+static int dc21140m_autoconf(struct device *dev);
+static int dc2114x_autoconf(struct device *dev);
+static int srom_autoconf(struct device *dev);
+static int de4x5_suspect_state(struct device *dev, int timeout, int prev_state, int (*fn)(struct device *, int), int (*asfn)(struct device *));
+static int dc21040_state(struct device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct device *, int));
+static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
+static int test_for_100Mb(struct device *dev, int msec);
+static int wait_for_link(struct device *dev);
+static int test_mii_reg(struct device *dev, int reg, int mask, int pol, long msec);
+static int is_spd_100(struct device *dev);
+static int is_100_up(struct device *dev);
+static int is_10_up(struct device *dev);
+static int is_anc_capable(struct device *dev);
+static int ping_media(struct device *dev, int msec);
+static struct sk_buff *de4x5_alloc_rx_buff(struct device *dev, int index, int len);
+static void de4x5_free_rx_buffs(struct device *dev);
+static void de4x5_free_tx_buffs(struct device *dev);
+static void de4x5_save_skbs(struct device *dev);
+static void de4x5_rst_desc_ring(struct device *dev);
+static void de4x5_cache_state(struct device *dev, int flag);
+static void de4x5_put_cache(struct device *dev, struct sk_buff *skb);
+static void de4x5_putb_cache(struct device *dev, struct sk_buff *skb);
+static struct sk_buff *de4x5_get_cache(struct device *dev);
+static void de4x5_setup_intr(struct device *dev);
+static void de4x5_init_connection(struct device *dev);
+static int de4x5_reset_phy(struct device *dev);
+static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
+static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
+static int test_tp(struct device *dev, s32 msec);
+static int EISA_signature(char *name, s32 eisa_id);
+static int PCI_signature(char *name, struct bus_type *lp);
+static void DevicePresent(u_long iobase);
+static void enet_addr_rst(u_long aprom_addr);
+static int de4x5_bad_srom(struct bus_type *lp);
+static short srom_rd(u_long address, u_char offset);
+static void srom_latch(u_int command, u_long address);
+static void srom_command(u_int command, u_long address);
+static void srom_address(u_int command, u_long address, u_char offset);
+static short srom_data(u_int command, u_long address);
+/*static void srom_busy(u_int command, u_long address);*/
+static void sendto_srom(u_int command, u_long addr);
+static int getfrom_srom(u_long addr);
+static int srom_map_media(struct device *dev);
+static int srom_infoleaf_info(struct device *dev);
+static void srom_init(struct device *dev);
+static void srom_exec(struct device *dev, u_char *p);
+static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
+static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
+static int mii_rdata(u_long ioaddr);
+static void mii_wdata(int data, int len, u_long ioaddr);
+static void mii_ta(u_long rw, u_long ioaddr);
+static int mii_swap(int data, int len);
+static void mii_address(u_char addr, u_long ioaddr);
+static void sendto_mii(u32 command, int data, u_long ioaddr);
+static int getfrom_mii(u32 command, u_long ioaddr);
+static int mii_get_oui(u_char phyaddr, u_long ioaddr);
+static int mii_get_phy(struct device *dev);
+static void SetMulticastFilter(struct device *dev);
+static int get_hw_addr(struct device *dev);
+static void srom_repair(struct device *dev, int card);
+static int test_bad_enet(struct device *dev, int status);
+static int an_exception(struct bus_type *lp);
+#if !defined(__sparc_v9__) && !defined(__powerpc__) && !defined(__alpha__)
+static void eisa_probe(struct device *dev, u_long iobase);
+#endif
+static void pci_probe(struct device *dev, u_long iobase);
+static void srom_search(int index);
+static char *build_setup_frame(struct device *dev, int mode);
+static void disable_ast(struct device *dev);
+static void enable_ast(struct device *dev, u32 time_out);
+static long de4x5_switch_mac_port(struct device *dev);
+static int gep_rd(struct device *dev);
+static void gep_wr(s32 data, struct device *dev);
+static void timeout(struct device *dev, void (*fn)(u_long data), u_long data, u_long msec);
+static void yawn(struct device *dev, int state);
+static void link_modules(struct device *dev, struct device *tmp);
+static void de4x5_parse_params(struct device *dev);
+static void de4x5_dbg_open(struct device *dev);
+static void de4x5_dbg_mii(struct device *dev, int k);
+static void de4x5_dbg_media(struct device *dev);
+static void de4x5_dbg_srom(struct de4x5_srom *p);
+static void de4x5_dbg_rx(struct sk_buff *skb, int len);
+static int de4x5_strncmp(char *a, char *b, int n);
+static int dc21041_infoleaf(struct device *dev);
+static int dc21140_infoleaf(struct device *dev);
+static int dc21142_infoleaf(struct device *dev);
+static int dc21143_infoleaf(struct device *dev);
+static int type0_infoblock(struct device *dev, u_char count, u_char *p);
+static int type1_infoblock(struct device *dev, u_char count, u_char *p);
+static int type2_infoblock(struct device *dev, u_char count, u_char *p);
+static int type3_infoblock(struct device *dev, u_char count, u_char *p);
+static int type4_infoblock(struct device *dev, u_char count, u_char *p);
+static int type5_infoblock(struct device *dev, u_char count, u_char *p);
+static int compact_infoblock(struct device *dev, u_char count, u_char *p);
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static struct device *unlink_modules(struct device *p);
+static struct device *insert_device(struct device *dev, u_long iobase,
+ int (*init)(struct device *));
+static int count_adapters(void);
+static int loading_module = 1;
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+MODULE_PARM(de4x5_debug, "i");
+MODULE_PARM(dec_only, "i");
+MODULE_PARM(args, "s");
+#endif /* LINUX_VERSION_CODE */
+# else
+static int loading_module = 0;
+#endif /* MODULE */
+
+static char name[DE4X5_NAME_LENGTH + 1];
+#if !defined(__sparc_v9__) && !defined(__powerpc__) && !defined(__alpha__)
+static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
+static int lastEISA = 0;
+#else
+static int lastEISA = MAX_EISA_SLOTS; /* Only PCI probes */
+#endif
+static int num_de4x5s = 0;
+static int cfrv = 0, useSROM = 0;
+static int lastPCI = -1;
+static struct device *lastModule = NULL;
+
+/*
+** List the SROM infoleaf functions and chipsets
+*/
+struct InfoLeaf {
+ int chipset;
+ int (*fn)(struct device *);
+};
+static struct InfoLeaf infoleaf_array[] = {
+ {DC21041, dc21041_infoleaf},
+ {DC21140, dc21140_infoleaf},
+ {DC21142, dc21142_infoleaf},
+ {DC21143, dc21143_infoleaf}
+};
+#define INFOLEAF_SIZE (sizeof(infoleaf_array)/(sizeof(int)+sizeof(int *)))
+
+/*
+** List the SROM info block functions
+*/
+static int (*dc_infoblock[])(struct device *dev, u_char, u_char *) = {
+ type0_infoblock,
+ type1_infoblock,
+ type2_infoblock,
+ type3_infoblock,
+ type4_infoblock,
+ type5_infoblock,
+ compact_infoblock
+};
+
+#define COMPACT (sizeof(dc_infoblock)/sizeof(int *) - 1)
+
+/*
+** Miscellaneous defines...
+*/
+#define RESET_DE4X5 {\
+ int i;\
+ i=inl(DE4X5_BMR);\
+ de4x5_ms_delay(1);\
+ outl(i | BMR_SWR, DE4X5_BMR);\
+ de4x5_ms_delay(1);\
+ outl(i, DE4X5_BMR);\
+ de4x5_ms_delay(1);\
+ for (i=0;i<5;i++) {inl(DE4X5_BMR); de4x5_ms_delay(1);}\
+ de4x5_ms_delay(1);\
+}
+
+#define PHY_HARD_RESET {\
+ outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\
+ udelay(1000); /* Assert for 1ms */\
+ outl(0x00, DE4X5_GEP);\
+ udelay(2000); /* Wait for 2ms */\
+}
+
+
+/*
+** Autoprobing in modules is allowed here. See the top of the file for
+** more info.
+*/
+__initfunc(int
+de4x5_probe(struct device *dev))
+{
+ u_long iobase = dev->base_addr;
+
+#if !defined(__sparc_v9__) && !defined(__powerpc__) && !defined(__alpha__)
+ eisa_probe(dev, iobase);
+#endif
+ if (lastEISA == MAX_EISA_SLOTS) {
+ pci_probe(dev, iobase);
+ }
+
+ return (dev->priv ? 0 : -ENODEV);
+}
+
+__initfunc(static int
+de4x5_hw_init(struct device *dev, u_long iobase))
+{
+ struct bus_type *lp = &bus;
+ int i, status=0;
+ char *tmp;
+
+ /* Ensure we're not sleeping */
+ if (lp->bus == EISA) {
+ outb(WAKEUP, PCI_CFPM);
+ } else {
+ pcibios_write_config_byte(lp->bus_num, lp->device << 3,
+ PCI_CFDA_PSM, WAKEUP);
+ }
+ de4x5_ms_delay(10);
+
+ RESET_DE4X5;
+
+ if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
+ return -ENXIO; /* Hardware could not reset */
+ }
+
+ /*
+ ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
+ */
+ useSROM = FALSE;
+ if (lp->bus == PCI) {
+ PCI_signature(name, lp);
+ } else {
+ EISA_signature(name, EISA_ID0);
+ }
+
+ if (*name == '\0') { /* Not found a board signature */
+ return -ENXIO;
+ }
+
+ dev->base_addr = iobase;
+ if (lp->bus == EISA) {
+ printk("%s: %s at 0x%04lx (EISA slot %ld)",
+ dev->name, name, iobase, ((iobase>>12)&0x0f));
+ } else { /* PCI port address */
+ printk("%s: %s at 0x%04lx (PCI bus %d, device %d)", dev->name, name,
+ iobase, lp->bus_num, lp->device);
+ }
+
+ printk(", h/w address ");
+ status = get_hw_addr(dev);
+ for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x,\n", dev->dev_addr[i]);
+
+ if (status != 0) {
+ printk(" which has an Ethernet PROM CRC error.\n");
+ return -ENXIO;
+ } else {
+ struct de4x5_private *lp;
+
+ /*
+ ** Reserve a section of kernel memory for the adapter
+ ** private area and the TX/RX descriptor rings.
+ */
+ dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
+ GFP_KERNEL);
+ if (dev->priv == NULL) {
+ return -ENOMEM;
+ }
+
+ /*
+ ** Align to a longword boundary
+ */
+ tmp = dev->priv;
+ dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
+ lp = (struct de4x5_private *)dev->priv;
+ memset(dev->priv, 0, sizeof(struct de4x5_private));
+ lp->bus = bus.bus;
+ lp->bus_num = bus.bus_num;
+ lp->device = bus.device;
+ lp->chipset = bus.chipset;
+ lp->cache.priv = tmp;
+ lp->cache.gepc = GEP_INIT;
+ lp->asBit = GEP_SLNK;
+ lp->asPolarity = GEP_SLNK;
+ lp->asBitValid = TRUE;
+ lp->timeout = -1;
+ lp->useSROM = useSROM;
+ memcpy((char *)&lp->srom,(char *)&bus.srom,sizeof(struct de4x5_srom));
+ de4x5_parse_params(dev);
+
+ /*
+ ** Choose correct autosensing in case someone messed up
+ */
+ lp->autosense = lp->params.autosense;
+ if (lp->chipset != DC21140) {
+ if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
+ lp->params.autosense = TP;
+ }
+ if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
+ lp->params.autosense = BNC;
+ }
+ }
+ lp->fdx = lp->params.fdx;
+ sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
+
+ /*
+ ** Set up the RX descriptor ring (Intels)
+ ** Allocate contiguous receive buffers, long word aligned (Alphas)
+ */
+#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
+ for (i=0; i<NUM_RX_DESC; i++) {
+ lp->rx_ring[i].status = 0;
+ lp->rx_ring[i].des1 = RX_BUFF_SZ;
+ lp->rx_ring[i].buf = 0;
+ lp->rx_ring[i].next = 0;
+ lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
+ }
+
+#else
+ if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
+ GFP_KERNEL)) == NULL) {
+ kfree(lp->cache.priv);
+ return -ENOMEM;
+ }
+
+ lp->cache.buf = tmp;
+ tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
+ for (i=0; i<NUM_RX_DESC; i++) {
+ lp->rx_ring[i].status = 0;
+ lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
+ lp->rx_ring[i].buf = cpu_to_le32(virt_to_bus(tmp+i*RX_BUFF_SZ));
+ lp->rx_ring[i].next = 0;
+ lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
+ }
+#endif
+
+ barrier();
+
+ request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
+ DE4X5_EISA_TOTAL_SIZE),
+ lp->adapter_name);
+
+ lp->rxRingSize = NUM_RX_DESC;
+ lp->txRingSize = NUM_TX_DESC;
+
+ /* Write the end of list marker to the descriptor lists */
+ lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
+ lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
+
+ /* Tell the adapter where the TX/RX rings are located. */
+ outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
+ outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
+
+ /* Initialise the IRQ mask and Enable/Disable */
+ lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
+ lp->irq_en = IMR_NIM | IMR_AIM;
+
+ /* Create a loopback packet frame for later media probing */
+ create_packet(dev, lp->frame, sizeof(lp->frame));
+
+ /* Check if the RX overflow bug needs testing for */
+ i = cfrv & 0x000000fe;
+ if ((lp->chipset == DC21140) && (i == 0x20)) {
+ lp->rx_ovf = 1;
+ }
+
+ /* Initialise the SROM pointers if possible */
+ if (lp->useSROM) {
+ lp->state = INITIALISED;
+ if (srom_infoleaf_info(dev)) {
+ return -ENXIO;
+ }
+ srom_init(dev);
+ }
+
+ lp->state = CLOSED;
+
+ /*
+ ** Check for an MII interface
+ */
+ if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
+ mii_get_phy(dev);
+ }
+
+#ifndef __sparc_v9__
+ printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
+#else
+ printk(" and requires IRQ%x (provided by %s).\n", dev->irq,
+#endif
+ ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
+ }
+
+ if (de4x5_debug & DEBUG_VERSION) {
+ printk("%s", version);
+ }
+
+ /* The DE4X5-specific entries in the device structure. */
+ dev->open = &de4x5_open;
+ dev->hard_start_xmit = &de4x5_queue_pkt;
+ dev->stop = &de4x5_close;
+ dev->get_stats = &de4x5_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &de4x5_ioctl;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic fields of the device structure. */
+ ether_setup(dev);
+
+ /* Let the adapter sleep to save power */
+ yawn(dev, SLEEP);
+
+ return status;
+}
+
+
+static int
+de4x5_open(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ s32 omr;
+
+ /* Allocate the RX buffers */
+ for (i=0; i<lp->rxRingSize; i++) {
+ if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
+ de4x5_free_rx_buffs(dev);
+ return -EAGAIN;
+ }
+ }
+
+ /*
+ ** Wake up the adapter
+ */
+ yawn(dev, WAKEUP);
+
+ /*
+ ** Re-initialize the DE4X5...
+ */
+ status = de4x5_init(dev);
+
+ lp->state = OPEN;
+ de4x5_dbg_open(dev);
+
+ if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
+ lp->adapter_name, dev)) {
+ printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
+ if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ,
+ lp->adapter_name, dev)) {
+ printk("\n Cannot get IRQ- reconfigure your hardware.\n");
+ disable_ast(dev);
+ de4x5_free_rx_buffs(dev);
+ de4x5_free_tx_buffs(dev);
+ yawn(dev, SLEEP);
+ lp->state = CLOSED;
+ return -EAGAIN;
+ } else {
+ printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
+ printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
+ }
+ }
+
+ dev->tbusy = 0;
+ dev->start = 1;
+ lp->interrupt = UNMASK_INTERRUPTS;
+ dev->trans_start = jiffies;
+
+ START_DE4X5;
+
+ de4x5_setup_intr(dev);
+
+ if (de4x5_debug & DEBUG_OPEN) {
+ printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
+ printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
+ printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
+ printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
+ printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
+ printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
+ printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
+ printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
+ }
+
+ MOD_INC_USE_COUNT;
+
+ return status;
+}
+
+/*
+** Initialize the DE4X5 operating conditions. NB: a chip problem with the
+** DC21140 requires using perfect filtering mode for that chip. Since I can't
+** see why I'd want > 14 multicast addresses, I have changed all chips to use
+** the perfect filtering mode. Keep the DMA burst length at 8: there seems
+** to be data corruption problems if it is larger (UDP errors seen from a
+** ttcp source).
+*/
+static int
+de4x5_init(struct device *dev)
+{
+ /* Lock out other processes whilst setting up the hardware */
+ test_and_set_bit(0, (void *)&dev->tbusy);
+
+ de4x5_sw_reset(dev);
+
+ /* Autoconfigure the connected port */
+ autoconf_media(dev);
+
+ return 0;
+}
+
+static int
+de4x5_sw_reset(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ s32 bmr, omr;
+
+ /* Select the MII or SRL port now and RESET the MAC */
+ if (!lp->useSROM) {
+ if (lp->phy[lp->active].id != 0) {
+ lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
+ } else {
+ lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
+ }
+ de4x5_switch_mac_port(dev);
+ }
+
+ /*
+ ** Set the programmable burst length to 8 longwords for all the DC21140
+ ** Fasternet chips and 4 longwords for all others: DMA errors result
+ ** without these values. Cache align 16 long.
+ */
+ bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | CACHE_ALIGN;
+ bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
+ outl(bmr, DE4X5_BMR);
+
+ omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */
+ if (lp->chipset == DC21140) {
+ omr |= (OMR_SDP | OMR_SB);
+ }
+ lp->setup_f = PERFECT;
+ outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
+ outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
+
+ lp->rx_new = lp->rx_old = 0;
+ lp->tx_new = lp->tx_old = 0;
+
+ for (i = 0; i < lp->rxRingSize; i++) {
+ lp->rx_ring[i].status = cpu_to_le32(R_OWN);
+ }
+
+ for (i = 0; i < lp->txRingSize; i++) {
+ lp->tx_ring[i].status = cpu_to_le32(0);
+ }
+
+ barrier();
+
+ /* Build the setup frame depending on filtering mode */
+ SetMulticastFilter(dev);
+
+ load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
+ outl(omr|OMR_ST, DE4X5_OMR);
+
+ /* Poll for setup frame completion (adapter interrupts are disabled now) */
+ sti(); /* Ensure timer interrupts */
+ for (j=0, i=0;(i<500) && (j==0);i++) { /* Upto 500ms delay */
+ udelay(1000);
+ if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
+ }
+ outl(omr, DE4X5_OMR); /* Stop everything! */
+
+ if (j == 0) {
+ printk("%s: Setup frame timed out, status %08x\n", dev->name,
+ inl(DE4X5_STS));
+ status = -EIO;
+ }
+
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ lp->tx_old = lp->tx_new;
+
+ return status;
+}
+
+/*
+** Writes a socket buffer address to the next available transmit descriptor.
+*/
+static int
+de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int status = 0;
+
+ test_and_set_bit(0, (void*)&dev->tbusy); /* Stop send re-tries */
+ if (lp->tx_enable == NO) { /* Cannot send for now */
+ return -1;
+ }
+
+ /*
+ ** Clean out the TX ring asynchronously to interrupts - sometimes the
+ ** interrupts are lost by delayed descriptor status updates relative to
+ ** the irq assertion, especially with a busy PCI bus.
+ */
+ cli();
+ de4x5_tx(dev);
+ sti();
+
+ /* Test if cache is already locked - requeue skb if so */
+ if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
+ return -1;
+
+ /* Transmit descriptor ring full or stale skb */
+ if (dev->tbusy || lp->tx_skb[lp->tx_new]) {
+ if (lp->interrupt) {
+ de4x5_putb_cache(dev, skb); /* Requeue the buffer */
+ } else {
+ de4x5_put_cache(dev, skb);
+ }
+ if (de4x5_debug & DEBUG_TX) {
+ printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%ld\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), dev->tbusy, inl(DE4X5_IMR), inl(DE4X5_OMR), (lp->tx_skb[lp->tx_new] ? "YES" : "NO"));
+ }
+ } else if (skb->len > 0) {
+ /* If we already have stuff queued locally, use that first */
+ if (lp->cache.skb && !lp->interrupt) {
+ de4x5_put_cache(dev, skb);
+ skb = de4x5_get_cache(dev);
+ }
+
+ while (skb && !dev->tbusy && !lp->tx_skb[lp->tx_new]) {
+ cli();
+ test_and_set_bit(0, (void*)&dev->tbusy);
+ load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
+#if LINUX_VERSION_CODE >= ((2 << 16) | (1 << 8))
+ lp->stats.tx_bytes += skb->len;
+#endif
+ outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
+
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ dev->trans_start = jiffies;
+
+ if (TX_BUFFS_AVAIL) {
+ dev->tbusy = 0; /* Another pkt may be queued */
+ }
+ skb = de4x5_get_cache(dev);
+ sti();
+ }
+ if (skb) de4x5_putb_cache(dev, skb);
+ }
+
+ lp->cache.lock = 0;
+
+ return status;
+}
+
+/*
+** The DE4X5 interrupt handler.
+**
+** I/O Read/Writes through intermediate PCI bridges are never 'posted',
+** so that the asserted interrupt always has some real data to work with -
+** if these I/O accesses are ever changed to memory accesses, ensure the
+** STS write is read immediately to complete the transaction if the adapter
+** is not on bus 0. Lost interrupts can still occur when the PCI bus load
+** is high and descriptor status bits cannot be set before the associated
+** interrupt is asserted and this routine entered.
+*/
+static void
+de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)dev_id;
+ struct de4x5_private *lp;
+ s32 imr, omr, sts, limit;
+ u_long iobase;
+
+ if (dev == NULL) {
+ printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ lp = (struct de4x5_private *)dev->priv;
+ iobase = dev->base_addr;
+
+ DISABLE_IRQs; /* Ensure non re-entrancy */
+
+ if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+#if LINUX_VERSION_CODE >= ((2 << 16) | (1 << 8))
+ synchronize_irq();
+#endif
+
+ for (limit=0; limit<8; limit++) {
+ sts = inl(DE4X5_STS); /* Read IRQ status */
+ outl(sts, DE4X5_STS); /* Reset the board interrupts */
+
+ if (!(sts & lp->irq_mask)) break;/* All done */
+
+ if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
+ de4x5_rx(dev);
+
+ if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
+ de4x5_tx(dev);
+
+ if (sts & STS_LNF) { /* TP Link has failed */
+ lp->irq_mask &= ~IMR_LFM;
+ }
+
+ if (sts & STS_UNF) { /* Transmit underrun */
+ de4x5_txur(dev);
+ }
+
+ if (sts & STS_SE) { /* Bus Error */
+ STOP_DE4X5;
+ printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
+ dev->name, sts);
+ return;
+ }
+ }
+
+ /* Load the TX ring with any locally stored packets */
+ if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
+ while (lp->cache.skb && !dev->tbusy && lp->tx_enable) {
+ de4x5_queue_pkt(de4x5_get_cache(dev), dev);
+ }
+ lp->cache.lock = 0;
+ }
+
+ lp->interrupt = UNMASK_INTERRUPTS;
+ ENABLE_IRQs;
+
+ return;
+}
+
+static int
+de4x5_rx(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int entry;
+ s32 status;
+
+ for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
+ entry=lp->rx_new) {
+ status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
+
+ if (lp->rx_ovf) {
+ if (inl(DE4X5_MFC) & MFC_FOCM) {
+ de4x5_rx_ovfc(dev);
+ break;
+ }
+ }
+
+ if (status & RD_FS) { /* Remember the start of frame */
+ lp->rx_old = entry;
+ }
+
+ if (status & RD_LS) { /* Valid frame status */
+ if (lp->tx_enable) lp->linkOK++;
+ if (status & RD_ES) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
+ if (status & RD_CE) lp->stats.rx_crc_errors++;
+ if (status & RD_OF) lp->stats.rx_fifo_errors++;
+ if (status & RD_TL) lp->stats.rx_length_errors++;
+ if (status & RD_RF) lp->pktStats.rx_runt_frames++;
+ if (status & RD_CS) lp->pktStats.rx_collision++;
+ if (status & RD_DB) lp->pktStats.rx_dribble++;
+ if (status & RD_OF) lp->pktStats.rx_overflow++;
+ } else { /* A valid frame received */
+ struct sk_buff *skb;
+ short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
+ >> 16) - 4;
+
+ if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
+ printk("%s: Insufficient memory; nuking packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ } else {
+ de4x5_dbg_rx(skb, pkt_len);
+
+ /* Push up the protocol stack */
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+
+ /* Update stats */
+ lp->stats.rx_packets++;
+#if LINUX_VERSION_CODE >= ((2 << 16) | (1 << 8))
+ lp->stats.rx_bytes += pkt_len;
+#endif
+ de4x5_local_stats(dev, skb->data, pkt_len);
+ }
+ }
+
+ /* Change buffer ownership for this frame, back to the adapter */
+ for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old+1)%lp->rxRingSize) {
+ lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
+ barrier();
+ }
+ lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
+ barrier();
+ }
+
+ /*
+ ** Update entry information
+ */
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
+ }
+
+ return 0;
+}
+
+/*
+** Buffer sent - check for TX buffer errors.
+*/
+static int
+de4x5_tx(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int entry;
+ s32 status;
+
+ for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
+ status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
+ if (status < 0) { /* Buffer not sent yet */
+ break;
+ } else if (status != 0x7fffffff) { /* Not setup frame */
+ if (status & TD_ES) { /* An error happened */
+ lp->stats.tx_errors++;
+ if (status & TD_NC) lp->stats.tx_carrier_errors++;
+ if (status & TD_LC) lp->stats.tx_window_errors++;
+ if (status & TD_UF) lp->stats.tx_fifo_errors++;
+ if (status & TD_EC) lp->pktStats.excessive_collisions++;
+ if (status & TD_DE) lp->stats.tx_aborted_errors++;
+
+ if (TX_PKT_PENDING) {
+ outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
+ }
+ } else { /* Packet sent */
+ lp->stats.tx_packets++;
+ if (lp->tx_enable) lp->linkOK++;
+ }
+ /* Update the collision counter */
+ lp->stats.collisions += ((status & TD_EC) ? 16 :
+ ((status & TD_CC) >> 3));
+
+ /* Free the buffer. */
+ if (lp->tx_skb[entry] != NULL) {
+ dev_kfree_skb(lp->tx_skb[entry], FREE_WRITE);
+ lp->tx_skb[entry] = NULL;
+ }
+ }
+
+ /* Update all the pointers */
+ lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
+ }
+
+ if (TX_BUFFS_AVAIL && dev->tbusy) { /* Any resources available? */
+ dev->tbusy = 0; /* Clear TX busy flag */
+ if (lp->interrupt) mark_bh(NET_BH);
+ }
+
+ return 0;
+}
+
+static int
+de4x5_ast(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ disable_ast(dev);
+
+ if (lp->useSROM) {
+ next_tick = srom_autoconf(dev);
+ } else if (lp->chipset == DC21140) {
+ next_tick = dc21140m_autoconf(dev);
+ } else if (lp->chipset == DC21041) {
+ next_tick = dc21041_autoconf(dev);
+ } else if (lp->chipset == DC21040) {
+ next_tick = dc21040_autoconf(dev);
+ }
+ lp->linkOK = 0;
+ enable_ast(dev, next_tick);
+
+ return 0;
+}
+
+static int
+de4x5_txur(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int omr;
+
+ omr = inl(DE4X5_OMR);
+ if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
+ omr &= ~(OMR_ST|OMR_SR);
+ outl(omr, DE4X5_OMR);
+ while (inl(DE4X5_STS) & STS_TS);
+ if ((omr & OMR_TR) < OMR_TR) {
+ omr += 0x4000;
+ } else {
+ omr |= OMR_SF;
+ }
+ outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
+ }
+
+ return 0;
+}
+
+static int
+de4x5_rx_ovfc(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int omr;
+
+ omr = inl(DE4X5_OMR);
+ outl(omr & ~OMR_SR, DE4X5_OMR);
+ while (inl(DE4X5_STS) & STS_RS);
+
+ for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
+ lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
+ }
+
+ outl(omr, DE4X5_OMR);
+
+ return 0;
+}
+
+static int
+de4x5_close(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 imr, omr;
+
+ disable_ast(dev);
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (de4x5_debug & DEBUG_CLOSE) {
+ printk("%s: Shutting down ethercard, status was %8.8x.\n",
+ dev->name, inl(DE4X5_STS));
+ }
+
+ /*
+ ** We stop the DE4X5 here... mask interrupts and stop TX & RX
+ */
+ DISABLE_IRQs;
+ STOP_DE4X5;
+
+ /* Free the associated irq */
+ free_irq(dev->irq, dev);
+ lp->state = CLOSED;
+
+ /* Free any socket buffers */
+ de4x5_free_rx_buffs(dev);
+ de4x5_free_tx_buffs(dev);
+
+ MOD_DEC_USE_COUNT;
+
+ /* Put the adapter to sleep to save power */
+ yawn(dev, SLEEP);
+
+ return 0;
+}
+
+static struct net_device_stats *
+de4x5_get_stats(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
+
+ return &lp->stats;
+}
+
+static void
+de4x5_local_stats(struct device *dev, char *buf, int pkt_len)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+
+ for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
+ lp->pktStats.bins[i]++;
+ i = DE4X5_PKT_STAT_SZ;
+ }
+ }
+ if (buf[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
+ (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+
+ return;
+}
+
+static void
+load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ lp->tx_ring[lp->tx_new].buf = cpu_to_le32(virt_to_bus(buf));
+ lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
+ lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
+ lp->tx_skb[lp->tx_new] = skb;
+ barrier();
+ lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
+ barrier();
+
+ return;
+}
+
+/*
+** Set or clear the multicast filter for this adaptor.
+*/
+static void
+set_multicast_list(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ /* First, double check that the adapter is open */
+ if (lp->state == OPEN) {
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ u32 omr;
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PR;
+ outl(omr, DE4X5_OMR);
+ } else {
+ SetMulticastFilter(dev);
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ SETUP_FRAME_LEN, NULL);
+
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ dev->trans_start = jiffies;
+ }
+ }
+
+ return;
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Little endian crc one liner from Matt Thomas, DEC.
+*/
+static void
+SetMulticastFilter(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ u_long iobase = dev->base_addr;
+ int i, j, bit, byte;
+ u16 hashcode;
+ u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
+ char *pa;
+ unsigned char *addrs;
+
+ omr = inl(DE4X5_OMR);
+ omr &= ~(OMR_PR | OMR_PM);
+ pa = build_setup_frame(dev, ALL); /* Build the basic frame */
+
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
+ omr |= OMR_PM; /* Pass all multicasts */
+ } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
+ for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = 0xffffffff; /* init CRC for each address */
+ for (byte=0;byte<ETH_ALEN;byte++) {/* for each address byte */
+ /* process each address bit */
+ for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
+ crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
+ }
+ }
+ hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
+
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
+
+ byte <<= 1; /* calc offset into setup frame */
+ if (byte & 0x02) {
+ byte -= 1;
+ }
+ lp->setup_frame[byte] |= bit;
+ }
+ }
+ } else { /* Perfect filtering */
+ for (j=0; j<dev->mc_count; j++) {
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ for (i=0; i<ETH_ALEN; i++) {
+ *(pa + (i&1)) = *addrs++;
+ if (i & 0x01) pa += 4;
+ }
+ }
+ }
+ outl(omr, DE4X5_OMR);
+
+ return;
+}
+
+#if !defined(__sparc_v9__) && !defined(__powerpc__) && !defined(__alpha__)
+/*
+** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+** the motherboard. Upto 15 EISA devices are supported.
+*/
+__initfunc(static void
+eisa_probe(struct device *dev, u_long ioaddr))
+{
+ int i, maxSlots, status, device;
+ u_char irq;
+ u_short vendor;
+ u32 cfid;
+ u_long iobase;
+ struct bus_type *lp = &bus;
+ char name[DE4X5_STRLEN];
+
+ if (lastEISA == MAX_EISA_SLOTS) return;/* No more EISA devices to search */
+
+ lp->bus = EISA;
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EISA_SLOT_INC; /* Get the first slot address */
+ i = 1;
+ maxSlots = MAX_EISA_SLOTS;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+ }
+
+ for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID)) {
+ cfid = (u32) inl(PCI_CFID);
+ cfrv = (u_short) inl(PCI_CFRV);
+ device = (cfid >> 8) & 0x00ffff00;
+ vendor = (u_short) cfid;
+
+ /* Read the EISA Configuration Registers */
+ irq = inb(EISA_REG0);
+ irq = de4x5_irq[(irq >> 1) & 0x03];
+
+ if (is_DC2114x) device |= (cfrv & CFRV_RN);
+ lp->chipset = device;
+
+ /* Write the PCI Configuration Registers */
+ outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
+ outl(0x00006000, PCI_CFLT);
+ outl(iobase, PCI_CBIO);
+
+ DevicePresent(EISA_APROM);
+ if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
+ dev->irq = irq;
+ if ((status = de4x5_hw_init(dev, iobase)) == 0) {
+ num_de4x5s++;
+ if (loading_module) link_modules(lastModule, dev);
+ lastEISA = i;
+ return;
+ }
+ } else if (ioaddr != 0) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name,iobase);
+ }
+ }
+ }
+
+ if (ioaddr == 0) lastEISA = i;
+
+ return;
+}
+#endif /* !(__sparc_v9__) && !(__powerpc__) && !defined(__alpha__)*/
+
+/*
+** PCI bus I/O device probe
+** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
+** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
+** enabled by the user first in the set up utility. Hence we just check for
+** enabled features and silently ignore the card if they're not.
+**
+** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
+** bit. Here, check for I/O accesses and then set BM. If you put the card in
+** a non BM slot, you're on your own (and complain to the PC vendor that your
+** PC doesn't conform to the PCI standard)!
+*/
+#define PCI_DEVICE (dev_num << 3)
+#define PCI_LAST_DEV 32
+
+__initfunc(static void
+pci_probe(struct device *dev, u_long ioaddr))
+{
+ u_char pb, pbus, dev_num, dnum, dev_fn, timer, tirq;
+ u_short dev_id, vendor, index, status;
+ u_int tmp, irq = 0, device, class = DE4X5_CLASS_CODE;
+ u_long iobase = 0; /* Clear upper 32 bits in Alphas */
+ struct bus_type *lp = &bus;
+
+ if (lastPCI == NO_MORE_PCI) return;
+
+ if (!pcibios_present()) {
+ lastPCI = NO_MORE_PCI;
+ return; /* No PCI bus in this machine! */
+ }
+
+ lp->bus = PCI;
+ lp->bus_num = 0;
+
+ if ((ioaddr < 0x1000) && loading_module) {
+ pbus = (u_short)(ioaddr >> 8);
+ dnum = (u_short)(ioaddr & 0xff);
+ } else {
+ pbus = 0;
+ dnum = 0;
+ }
+
+ for (index=lastPCI+1;
+ (pcibios_find_class(class, index, &pb, &dev_fn)== PCIBIOS_SUCCESSFUL);
+ index++) {
+ dev_num = PCI_SLOT(dev_fn);
+ if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
+#ifdef __sparc_v9__
+ struct pci_dev *pdev;
+ for (pdev = pci_devices; pdev; pdev = pdev->next) {
+ if ((pdev->bus->number==pb) && (pdev->devfn==dev_fn)) break;
+ }
+#endif
+ device = 0;
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &dev_id);
+ device = dev_id;
+ device <<= 8;
+ if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) {
+ continue;
+ }
+
+ /* Search for an SROM on this bus */
+ if (lp->bus_num != pb) {
+ lp->bus_num = pb;
+ srom_search(index);
+ }
+
+ /* Get the chip configuration revision register */
+ pcibios_read_config_dword(pb, PCI_DEVICE, PCI_REVISION_ID, &cfrv);
+
+ /* Set the device number information */
+ lp->device = dev_num;
+ lp->bus_num = pb;
+
+ /* Set the chipset information */
+ if (is_DC2114x) device |= (cfrv & CFRV_RN);
+ lp->chipset = device;
+
+ /* Get the board I/O address (64 bits on sparc64) */
+#ifndef __sparc_v9__
+ pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &tmp);
+ iobase = tmp;
+#else
+ iobase = pdev->base_address[0];
+#endif
+ iobase &= CBIO_MASK;
+
+ /* Fetch the IRQ to be used */
+#ifndef __sparc_v9__
+ pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &tirq);
+ irq = tirq;
+#else
+ irq = pdev->irq;
+#endif
+ if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
+
+ /* Check if I/O accesses and Bus Mastering are enabled */
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
+#ifdef __powerpc__
+ if (!(status & PCI_COMMAND_IO)) {
+ status |= PCI_COMMAND_IO;
+ pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
+ }
+#endif /* __powerpc__ */
+ if (!(status & PCI_COMMAND_IO)) continue;
+
+ if (!(status & PCI_COMMAND_MASTER)) {
+ status |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
+ }
+ if (!(status & PCI_COMMAND_MASTER)) continue;
+
+ /* Check the latency timer for values >= 0x60 */
+ pcibios_read_config_byte(pb, PCI_DEVICE, PCI_LATENCY_TIMER, &timer);
+ if (timer < 0x60) {
+ pcibios_write_config_byte(pb, PCI_DEVICE, PCI_LATENCY_TIMER, 0x60);
+ }
+
+ DevicePresent(DE4X5_APROM);
+ if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
+ dev->irq = irq;
+ if ((status = de4x5_hw_init(dev, iobase)) == 0) {
+ num_de4x5s++;
+ lastPCI = index;
+ if (loading_module) link_modules(lastModule, dev);
+ return;
+ }
+ } else if (ioaddr != 0) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name,
+ iobase);
+ }
+ }
+ }
+
+ lastPCI = NO_MORE_PCI;
+
+ return;
+}
+
+/*
+** This function searches the current bus (which is >0) for a DECchip with an
+** SROM, so that in multiport cards that have one SROM shared between multiple
+** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
+** For single port cards this is a time waster...
+*/
+__initfunc(static void
+srom_search(int index))
+{
+ u_char pb, dev_fn, tirq;
+ u_short dev_id, dev_num, vendor, status;
+ u_int tmp, irq = 0, device, class = DE4X5_CLASS_CODE;
+ u_long iobase = 0; /* Clear upper 32 bits in Alphas */
+ int i, j;
+ struct bus_type *lp = &bus;
+
+ for (;
+ (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
+ index++) {
+
+ if (lp->bus_num != pb) return;
+ dev_num = PCI_SLOT(dev_fn);
+#ifdef __sparc_v9__
+ struct pci_dev *pdev;
+ for (pdev = pci_devices; pdev; pdev = pdev->next) {
+ if ((pdev->bus->number == pb) && (pdev->devfn == dev_fn)) break;
+ }
+#endif
+ device = 0;
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &dev_id);
+ device = dev_id;
+ device <<= 8;
+ if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) {
+ continue;
+ }
+
+ /* Get the chip configuration revision register */
+ pcibios_read_config_dword(pb, PCI_DEVICE, PCI_REVISION_ID, &cfrv);
+
+ /* Set the device number information */
+ lp->device = dev_num;
+ lp->bus_num = pb;
+
+ /* Set the chipset information */
+ if (is_DC2114x) device |= (cfrv & CFRV_RN);
+ lp->chipset = device;
+
+ /* Get the board I/O address (64 bits on sparc64) */
+#ifndef __sparc_v9__
+ pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &tmp);
+ iobase = tmp;
+#else
+ iobase = pdev->base_address[0];
+#endif
+ iobase &= CBIO_MASK;
+
+ /* Fetch the IRQ to be used */
+#ifndef __sparc_v9__
+ pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &tirq);
+ irq = tirq;
+#else
+ irq = pdev->irq;
+#endif
+ if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
+
+ /* Check if I/O accesses are enabled */
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
+ if (!(status & PCI_COMMAND_IO)) continue;
+
+ /* Search for a valid SROM attached to this DECchip */
+ DevicePresent(DE4X5_APROM);
+ for (j=0, i=0; i<ETH_ALEN; i++) {
+ j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
+ }
+ if ((j != 0) && (j != 0x5fa)) {
+ last.chipset = device;
+ last.bus = pb;
+ last.irq = irq;
+ for (i=0; i<ETH_ALEN; i++) {
+ last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
+ }
+ return;
+ }
+ }
+
+ return;
+}
+
+__initfunc(static void
+link_modules(struct device *dev, struct device *tmp))
+{
+ struct device *p=dev;
+
+ if (p) {
+ while (((struct de4x5_private *)(p->priv))->next_module) {
+ p = ((struct de4x5_private *)(p->priv))->next_module;
+ }
+
+ if (dev != tmp) {
+ ((struct de4x5_private *)(p->priv))->next_module = tmp;
+ } else {
+ ((struct de4x5_private *)(p->priv))->next_module = NULL;
+ }
+ }
+
+ return;
+}
+
+/*
+** Auto configure the media here rather than setting the port at compile
+** time. This routine is called by de4x5_init() and when a loss of media is
+** detected (excessive collisions, loss of carrier, no carrier or link fail
+** [TP] or no recent receive activity) to check whether the user has been
+** sneaky and changed the port on us.
+*/
+static int
+autoconf_media(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ lp->linkOK = 0;
+ lp->c_media = AUTO; /* Bogus last media */
+ disable_ast(dev);
+ inl(DE4X5_MFC); /* Zero the lost frames counter */
+ lp->media = INIT;
+ lp->tcount = 0;
+
+ if (lp->useSROM) {
+ next_tick = srom_autoconf(dev);
+ } else if (lp->chipset == DC21040) {
+ next_tick = dc21040_autoconf(dev);
+ } else if (lp->chipset == DC21041) {
+ next_tick = dc21041_autoconf(dev);
+ } else if (lp->chipset == DC21140) {
+ next_tick = dc21140m_autoconf(dev);
+ }
+
+ enable_ast(dev, next_tick);
+
+ return (lp->media);
+}
+
+/*
+** Autoconfigure the media when using the DC21040. AUI cannot be distinguished
+** from BNC as the port has a jumper to set thick or thin wire. When set for
+** BNC, the BNC port will indicate activity if it's not terminated correctly.
+** The only way to test for that is to place a loopback packet onto the
+** network and watch for errors. Since we're messing with the interrupt mask
+** register, disable the board interrupts and do not allow any more packets to
+** be queued to the hardware. Re-enable everything only when the media is
+** found.
+** I may have to "age out" locally queued packets so that the higher layer
+** timeouts don't effectively duplicate packets on the network.
+*/
+static int
+dc21040_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ s32 imr;
+
+ switch (lp->media) {
+ case INIT:
+ DISABLE_IRQs;
+ lp->tx_enable = NO;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev);
+ if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
+ lp->media = TP;
+ } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
+ lp->media = BNC_AUI;
+ } else if (lp->autosense == EXT_SIA) {
+ lp->media = EXT_SIA;
+ } else {
+ lp->media = NC;
+ }
+ lp->local_state = 0;
+ next_tick = dc21040_autoconf(dev);
+ break;
+
+ case TP:
+ next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
+ TP_SUSPECT, test_tp);
+ break;
+
+ case TP_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
+ break;
+
+ case BNC:
+ case AUI:
+ case BNC_AUI:
+ next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
+ BNC_AUI_SUSPECT, ping_media);
+ break;
+
+ case BNC_AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
+ break;
+
+ case EXT_SIA:
+ next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
+ NC, EXT_SIA_SUSPECT, ping_media);
+ break;
+
+ case EXT_SIA_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
+ break;
+
+ case NC:
+ /* default to TP for all */
+ reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = NO;
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+dc21040_state(struct device *dev, int csr13, int csr14, int csr15, int timeout,
+ int next_state, int suspect_state,
+ int (*fn)(struct device *, int))
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ int linkBad;
+
+ switch (lp->local_state) {
+ case 0:
+ reset_init_sia(dev, csr13, csr14, csr15);
+ lp->local_state++;
+ next_tick = 500;
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ linkBad = fn(dev, timeout);
+ if (linkBad < 0) {
+ next_tick = linkBad & ~TIMER_CB;
+ } else {
+ if (linkBad && (lp->autosense == AUTO)) {
+ lp->local_state = 0;
+ lp->media = next_state;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = suspect_state;
+ next_tick = 3000;
+ }
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+de4x5_suspect_state(struct device *dev, int timeout, int prev_state,
+ int (*fn)(struct device *, int),
+ int (*asfn)(struct device *))
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ int linkBad;
+
+ switch (lp->local_state) {
+ case 1:
+ if (lp->linkOK) {
+ lp->media = prev_state;
+ } else {
+ lp->local_state++;
+ next_tick = asfn(dev);
+ }
+ break;
+
+ case 2:
+ linkBad = fn(dev, timeout);
+ if (linkBad < 0) {
+ next_tick = linkBad & ~TIMER_CB;
+ } else if (!linkBad) {
+ lp->local_state--;
+ lp->media = prev_state;
+ } else {
+ lp->media = INIT;
+ lp->tcount++;
+ }
+ }
+
+ return next_tick;
+}
+
+/*
+** Autoconfigure the media when using the DC21041. AUI needs to be tested
+** before BNC, because the BNC port will indicate activity if it's not
+** terminated correctly. The only way to test for that is to place a loopback
+** packet onto the network and watch for errors. Since we're messing with
+** the interrupt mask register, disable the board interrupts and do not allow
+** any more packets to be queued to the hardware. Re-enable everything only
+** when the media is found.
+*/
+static int
+dc21041_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 sts, irqs, irq_mask, imr, omr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ switch (lp->media) {
+ case INIT:
+ DISABLE_IRQs;
+ lp->tx_enable = NO;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
+ lp->media = TP; /* On chip auto negotiation is broken */
+ } else if (lp->autosense == TP) {
+ lp->media = TP;
+ } else if (lp->autosense == BNC) {
+ lp->media = BNC;
+ } else if (lp->autosense == AUI) {
+ lp->media = AUI;
+ } else {
+ lp->media = NC;
+ }
+ lp->local_state = 0;
+ next_tick = dc21041_autoconf(dev);
+ break;
+
+ case TP_NW:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
+ outl(omr | OMR_FDX, DE4X5_OMR);
+ }
+ irqs = STS_LNF | STS_LNP;
+ irq_mask = IMR_LFM | IMR_LPM;
+ sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts & STS_LNP) {
+ lp->media = ANS;
+ } else {
+ lp->media = AUI;
+ }
+ next_tick = dc21041_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ if (!lp->tx_enable) {
+ irqs = STS_LNP;
+ irq_mask = IMR_LPM;
+ sts = test_ans(dev, irqs, irq_mask, 3000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
+ lp->media = TP;
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = ANS_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case ANS_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
+ break;
+
+ case TP:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = STS_LNF | STS_LNP;
+ irq_mask = IMR_LFM | IMR_LPM;
+ sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
+ if (inl(DE4X5_SISR) & SISR_NRA) {
+ lp->media = AUI; /* Non selected port activity */
+ } else {
+ lp->media = BNC;
+ }
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = TP_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case TP_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
+ break;
+
+ case AUI:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
+ lp->media = BNC;
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = AUI_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
+ break;
+
+ case BNC:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ lp->local_state++; /* Ensure media connected */
+ next_tick = dc21041_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ if ((sts = ping_media(dev, 3000)) < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts) {
+ lp->local_state = 0;
+ lp->media = NC;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = BNC_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+ }
+ break;
+
+ case BNC_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
+ break;
+
+ case NC:
+ omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
+ outl(omr | OMR_FDX, DE4X5_OMR);
+ reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = NO;
+ break;
+ }
+
+ return next_tick;
+}
+
+/*
+** Some autonegotiation chips are broken in that they do not return the
+** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement
+** register, except at the first power up negotiation.
+*/
+static int
+dc21140m_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int ana, anlpa, cap, cr, slnk, sr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ u_long imr, omr, iobase = dev->base_addr;
+
+ switch(lp->media) {
+ case INIT:
+ if (lp->timeout < 0) {
+ DISABLE_IRQs;
+ lp->tx_enable = FALSE;
+ lp->linkOK = 0;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ }
+ if ((next_tick = de4x5_reset_phy(dev)) < 0) {
+ next_tick &= ~TIMER_CB;
+ } else {
+ if (lp->useSROM) {
+ if (srom_map_media(dev) < 0) {
+ lp->tcount++;
+ return next_tick;
+ }
+ srom_exec(dev, lp->phy[lp->active].gep);
+ if (lp->infoblock_media == ANS) {
+ ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ } else {
+ lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */
+ SET_10Mb;
+ if (lp->autosense == _100Mb) {
+ lp->media = _100Mb;
+ } else if (lp->autosense == _10Mb) {
+ lp->media = _10Mb;
+ } else if ((lp->autosense == AUTO) &&
+ ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
+ ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
+ ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ lp->media = ANS;
+ } else if (lp->autosense == AUTO) {
+ lp->media = SPD_DET;
+ } else if (is_spd_100(dev) && is_100_up(dev)) {
+ lp->media = _100Mb;
+ } else {
+ lp->media = NC;
+ }
+ }
+ lp->local_state = 0;
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
+ if (cr < 0) {
+ next_tick = cr & ~TIMER_CB;
+ } else {
+ if (cr) {
+ lp->local_state = 0;
+ lp->media = SPD_DET;
+ } else {
+ lp->local_state++;
+ }
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
+ next_tick = sr & ~TIMER_CB;
+ } else {
+ lp->media = SPD_DET;
+ lp->local_state = 0;
+ if (sr) { /* Success! */
+ lp->tmp = MII_SR_ASSC;
+ anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
+ ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ if (!(anlpa & MII_ANLPA_RF) &&
+ (cap = anlpa & MII_ANLPA_TAF & ana)) {
+ if (cap & MII_ANA_100M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
+ lp->media = _100Mb;
+ } else if (cap & MII_ANA_10M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
+
+ lp->media = _10Mb;
+ }
+ }
+ } /* Auto Negotiation failed to finish */
+ next_tick = dc21140m_autoconf(dev);
+ } /* Auto Negotiation failed to start */
+ break;
+ }
+ break;
+
+ case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
+ if (lp->timeout < 0) {
+ lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
+ (~gep_rd(dev) & GEP_LNP));
+ SET_100Mb_PDET;
+ }
+ if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
+ next_tick = slnk & ~TIMER_CB;
+ } else {
+ if (is_spd_100(dev) && is_100_up(dev)) {
+ lp->media = _100Mb;
+ } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
+ lp->media = _10Mb;
+ } else {
+ lp->media = NC;
+ }
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case _100Mb: /* Set 100Mb/s */
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_100Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case BNC:
+ case AUI:
+ case _10Mb: /* Set 10Mb/s */
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_10Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case NC:
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = FALSE;
+ break;
+ }
+
+ return next_tick;
+}
+
+/*
+** This routine may be merged into dc21140m_autoconf() sometime as I'm
+** changing how I figure out the media - but trying to keep it backwards
+** compatible with the de500-xa and de500-aa.
+** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock
+** functions and set during de4x5_mac_port() and/or de4x5_reset_phy().
+** This routine just has to figure out whether 10Mb/s or 100Mb/s is
+** active.
+** When autonegotiation is working, the ANS part searches the SROM for
+** the highest common speed (TP) link that both can run and if that can
+** be full duplex. That infoblock is executed and then the link speed set.
+**
+** Only _10Mb and _100Mb are tested here.
+*/
+static int
+dc2114x_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ switch (lp->media) {
+ case INIT:
+ if (lp->timeout < 0) {
+ DISABLE_IRQs;
+ lp->tx_enable = FALSE;
+ lp->linkOK = 0;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ if (lp->params.autosense & ~AUTO) {
+ srom_map_media(dev); /* Fixed media requested */
+ if (lp->media != lp->params.autosense) {
+ lp->tcount++;
+ lp->media = INIT;
+ return next_tick;
+ }
+ lp->media = INIT;
+ }
+ }
+ if ((next_tick = de4x5_reset_phy(dev)) < 0) {
+ next_tick &= ~TIMER_CB;
+ } else {
+ if (lp->autosense == _100Mb) {
+ lp->media = _100Mb;
+ } else if (lp->autosense == _10Mb) {
+ lp->media = _10Mb;
+ } else if (lp->autosense == TP) {
+ lp->media = TP;
+ } else if (lp->autosense == BNC) {
+ lp->media = BNC;
+ } else if (lp->autosense == AUI) {
+ lp->media = AUI;
+ } else {
+ lp->media = SPD_DET;
+ if ((lp->infoblock_media == ANS) &&
+ ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
+ ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
+ ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ lp->media = ANS;
+ }
+ }
+ lp->local_state = 0;
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
+ if (cr < 0) {
+ next_tick = cr & ~TIMER_CB;
+ } else {
+ if (cr) {
+ lp->local_state = 0;
+ lp->media = SPD_DET;
+ } else {
+ lp->local_state++;
+ }
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
+ next_tick = sr & ~TIMER_CB;
+ } else {
+ lp->media = SPD_DET;
+ lp->local_state = 0;
+ if (sr) { /* Success! */
+ lp->tmp = MII_SR_ASSC;
+ anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
+ ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ if (!(anlpa & MII_ANLPA_RF) &&
+ (cap = anlpa & MII_ANLPA_TAF & ana)) {
+ if (cap & MII_ANA_100M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
+ lp->media = _100Mb;
+ } else if (cap & MII_ANA_10M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
+ lp->media = _10Mb;
+ }
+ }
+ } /* Auto Negotiation failed to finish */
+ next_tick = dc2114x_autoconf(dev);
+ } /* Auto Negotiation failed to start */
+ break;
+ }
+ break;
+
+ case AUI:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
+ lp->media = BNC;
+ next_tick = dc2114x_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = AUI_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
+ break;
+
+ case BNC:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ lp->local_state++; /* Ensure media connected */
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ if ((sts = ping_media(dev, 3000)) < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts) {
+ lp->local_state = 0;
+ lp->tcount++;
+ lp->media = INIT;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = BNC_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+ }
+ break;
+
+ case BNC_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
+ break;
+
+ case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
+ if (srom_map_media(dev) < 0) {
+ lp->tcount++;
+ lp->media = INIT;
+ return next_tick;
+ }
+ if (lp->media == _100Mb) {
+ if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
+ lp->media = SPD_DET;
+ return (slnk & ~TIMER_CB);
+ }
+ } else {
+ if (wait_for_link(dev) < 0) {
+ lp->media = SPD_DET;
+ return PDET_LINK_WAIT;
+ }
+ }
+ if (lp->media == ANS) { /* Do MII parallel detection */
+ if (is_spd_100(dev)) {
+ lp->media = _100Mb;
+ } else {
+ lp->media = _10Mb;
+ }
+ next_tick = dc2114x_autoconf(dev);
+ } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
+ (((lp->media == _10Mb) || (lp->media == TP) ||
+ (lp->media == BNC) || (lp->media == AUI)) &&
+ is_10_up(dev))) {
+ next_tick = dc2114x_autoconf(dev);
+ } else {
+ lp->tcount++;
+ lp->media = INIT;
+ }
+ break;
+
+ case _10Mb:
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_10Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case _100Mb:
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_100Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ default:
+ lp->tcount++;
+printk("Huh?: media:%02x\n", lp->media);
+ lp->media = INIT;
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+srom_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ return lp->infoleaf_fn(dev);
+}
+
+/*
+** This mapping keeps the original media codes and FDX flag unchanged.
+** While it isn't strictly necessary, it helps me for the moment...
+** The early return avoids a media state / SROM media space clash.
+*/
+static int
+srom_map_media(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ lp->fdx = 0;
+ if (lp->infoblock_media == lp->media)
+ return 0;
+
+ switch(lp->infoblock_media) {
+ case SROM_10BASETF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = TRUE;
+ case SROM_10BASET:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
+ lp->media = _10Mb;
+ } else {
+ lp->media = TP;
+ }
+ break;
+
+ case SROM_10BASE2:
+ lp->media = BNC;
+ break;
+
+ case SROM_10BASE5:
+ lp->media = AUI;
+ break;
+
+ case SROM_100BASETF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = TRUE;
+ case SROM_100BASET:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ lp->media = _100Mb;
+ break;
+
+ case SROM_100BASET4:
+ lp->media = _100Mb;
+ break;
+
+ case SROM_100BASEFF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = TRUE;
+ case SROM_100BASEF:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ lp->media = _100Mb;
+ break;
+
+ case ANS:
+ lp->media = ANS;
+ break;
+
+ default:
+ printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
+ lp->infoblock_media);
+ return -1;
+ break;
+ }
+
+ return 0;
+}
+
+static void
+de4x5_init_connection(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media; /* Stop scrolling media messages */
+ }
+
+ cli();
+ de4x5_rst_desc_ring(dev);
+ de4x5_setup_intr(dev);
+ lp->tx_enable = YES;
+ dev->tbusy = 0;
+ sti();
+ outl(POLL_DEMAND, DE4X5_TPD);
+ mark_bh(NET_BH);
+
+ return;
+}
+
+/*
+** General PHY reset function. Some MII devices don't reset correctly
+** since their MII address pins can float at voltages that are dependent
+** on the signal pin use. Do a double reset to ensure a reset.
+*/
+static int
+de4x5_reset_phy(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int next_tick = 0;
+
+ if ((lp->useSROM) || (lp->phy[lp->active].id)) {
+ if (lp->timeout < 0) {
+ if (lp->useSROM) {
+ if (lp->phy[lp->active].rst) {
+ srom_exec(dev, lp->phy[lp->active].rst);
+ srom_exec(dev, lp->phy[lp->active].rst);
+ } else if (lp->rst) { /* Type 5 infoblock reset */
+ srom_exec(dev, lp->rst);
+ srom_exec(dev, lp->rst);
+ }
+ } else {
+ PHY_HARD_RESET;
+ }
+ if (lp->useMII) {
+ mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ }
+ if (lp->useMII) {
+ next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, FALSE, 500);
+ }
+ } else if (lp->chipset == DC21140) {
+ PHY_HARD_RESET;
+ }
+
+ return next_tick;
+}
+
+static int
+test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 sts, csr12;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
+ reset_init_sia(dev, csr13, csr14, csr15);
+ }
+
+ /* set up the interrupt mask */
+ outl(irq_mask, DE4X5_IMR);
+
+ /* clear all pending interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+
+ /* clear csr12 NRA and SRA bits */
+ if ((lp->chipset == DC21041) || lp->useSROM) {
+ csr12 = inl(DE4X5_SISR);
+ outl(csr12, DE4X5_SISR);
+ }
+ }
+
+ sts = inl(DE4X5_STS) & ~TIMER_CB;
+
+ if (!(sts & irqs) && --lp->timeout) {
+ sts = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sts;
+}
+
+static int
+test_tp(struct device *dev, s32 msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int sisr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ }
+
+ sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
+
+ if (sisr && --lp->timeout) {
+ sisr = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sisr;
+}
+
+/*
+** Samples the 100Mb Link State Signal. The sample interval is important
+** because too fast a rate can give erroneous results and confuse the
+** speed sense algorithm.
+*/
+#define SAMPLE_INTERVAL 500 /* ms */
+#define SAMPLE_DELAY 2000 /* ms */
+static int
+test_for_100Mb(struct device *dev, int msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
+
+ if (lp->timeout < 0) {
+ if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
+ if (msec > SAMPLE_DELAY) {
+ lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
+ gep = SAMPLE_DELAY | TIMER_CB;
+ return gep;
+ } else {
+ lp->timeout = msec/SAMPLE_INTERVAL;
+ }
+ }
+
+ if (lp->phy[lp->active].id || lp->useSROM) {
+ gep = is_100_up(dev) | is_spd_100(dev);
+ } else {
+ gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
+ }
+ if (!(gep & ret) && --lp->timeout) {
+ gep = SAMPLE_INTERVAL | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return gep;
+}
+
+static int
+wait_for_link(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ if (lp->timeout < 0) {
+ lp->timeout = 1;
+ }
+
+ if (lp->timeout--) {
+ return TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return 0;
+}
+
+/*
+**
+**
+*/
+static int
+test_mii_reg(struct device *dev, int reg, int mask, int pol, long msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int test;
+ u_long iobase = dev->base_addr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ }
+
+ if (pol) pol = ~0;
+ reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
+ test = (reg ^ pol) & mask;
+
+ if (test && --lp->timeout) {
+ reg = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return reg;
+}
+
+static int
+is_spd_100(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int spd;
+
+ if (lp->useMII) {
+ spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
+ spd = ~(spd ^ lp->phy[lp->active].spd.value);
+ spd &= lp->phy[lp->active].spd.mask;
+ } else if (!lp->useSROM) { /* de500-xa */
+ spd = ((~gep_rd(dev)) & GEP_SLNK);
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0);
+
+ spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid);
+ }
+
+ return spd;
+}
+
+static int
+is_100_up(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->useMII) {
+ /* Double read for sticky bits & temporary drops */
+ mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
+ return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
+ } else if (!lp->useSROM) { /* de500-xa */
+ return ((~gep_rd(dev)) & GEP_SLNK);
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0);
+
+ return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid));
+ }
+}
+
+static int
+is_10_up(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->useMII) {
+ /* Double read for sticky bits & temporary drops */
+ mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
+ return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
+ } else if (!lp->useSROM) { /* de500-xa */
+ return ((~gep_rd(dev)) & GEP_LNP);
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return (((lp->chipset & ~0x00ff) == DC2114x) ?
+ (~inl(DE4X5_SISR)&SISR_LS10):
+ 0);
+
+ return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid));
+ }
+}
+
+static int
+is_anc_capable(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
+ return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII));
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
+ } else {
+ return 0;
+ }
+}
+
+/*
+** Send a packet onto the media and watch for send errors that indicate the
+** media is bad or unconnected.
+*/
+static int
+ping_media(struct device *dev, int msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int sisr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+
+ lp->tmp = lp->tx_new; /* Remember the ring position */
+ load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), NULL);
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD);
+ }
+
+ sisr = inl(DE4X5_SISR);
+
+ if ((!(sisr & SISR_NCR)) &&
+ ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
+ (--lp->timeout)) {
+ sisr = 100 | TIMER_CB;
+ } else {
+ if ((!(sisr & SISR_NCR)) &&
+ !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
+ lp->timeout) {
+ sisr = 0;
+ } else {
+ sisr = 1;
+ }
+ lp->timeout = -1;
+ }
+
+ return sisr;
+}
+
+/*
+** This function does 2 things: on Intels it kmalloc's another buffer to
+** replace the one about to be passed up. On Alpha's it kmallocs a buffer
+** into which the packet is copied.
+*/
+static struct sk_buff *
+de4x5_alloc_rx_buff(struct device *dev, int index, int len)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct sk_buff *p;
+
+#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
+ struct sk_buff *ret;
+ u_long i=0, tmp;
+
+ p = dev_alloc_skb(IEEE802_3_SZ + ALIGN + 2);
+ if (!p) return NULL;
+
+ p->dev = dev;
+ tmp = virt_to_bus(p->data);
+ i = ((tmp + ALIGN) & ~ALIGN) - tmp;
+ skb_reserve(p, i);
+ lp->rx_ring[index].buf = tmp + i;
+
+ ret = lp->rx_skb[index];
+ lp->rx_skb[index] = p;
+
+ if ((u_long) ret > 1) {
+ skb_put(ret, len);
+ }
+
+ return ret;
+
+#else
+ if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
+
+ p = dev_alloc_skb(len + 2);
+ if (!p) return NULL;
+
+ p->dev = dev;
+ skb_reserve(p, 2); /* Align */
+ if (index < lp->rx_old) { /* Wrapped buffer */
+ short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
+ memcpy(skb_put(p,tlen),
+ bus_to_virt(le32_to_cpu(lp->rx_ring[lp->rx_old].buf)),tlen);
+ memcpy(skb_put(p,len-tlen),
+ bus_to_virt(le32_to_cpu(lp->rx_ring[0].buf)), len-tlen);
+ } else { /* Linear buffer */
+ memcpy(skb_put(p,len),
+ bus_to_virt(le32_to_cpu(lp->rx_ring[lp->rx_old].buf)),len);
+ }
+
+ return p;
+#endif
+}
+
+static void
+de4x5_free_rx_buffs(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+
+ for (i=0; i<lp->rxRingSize; i++) {
+ if ((u_long) lp->rx_skb[i] > 1) {
+ dev_kfree_skb(lp->rx_skb[i], FREE_WRITE);
+ }
+ lp->rx_ring[i].status = 0;
+ lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
+ }
+
+ return;
+}
+
+static void
+de4x5_free_tx_buffs(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+
+ for (i=0; i<lp->txRingSize; i++) {
+ if (lp->tx_skb[i]) {
+ dev_kfree_skb(lp->tx_skb[i], FREE_WRITE);
+ lp->tx_skb[i] = NULL;
+ }
+ lp->tx_ring[i].status = 0;
+ }
+
+ /* Unload the locally queued packets */
+ while (lp->cache.skb) {
+ dev_kfree_skb(de4x5_get_cache(dev), FREE_WRITE);
+ }
+
+ return;
+}
+
+/*
+** When a user pulls a connection, the DECchip can end up in a
+** 'running - waiting for end of transmission' state. This means that we
+** have to perform a chip soft reset to ensure that we can synchronize
+** the hardware and software and make any media probes using a loopback
+** packet meaningful.
+*/
+static void
+de4x5_save_skbs(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 omr;
+
+ if (!lp->cache.save_cnt) {
+ STOP_DE4X5;
+ de4x5_tx(dev); /* Flush any sent skb's */
+ de4x5_free_tx_buffs(dev);
+ de4x5_cache_state(dev, DE4X5_SAVE_STATE);
+ de4x5_sw_reset(dev);
+ de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
+ lp->cache.save_cnt++;
+ START_DE4X5;
+ }
+
+ return;
+}
+
+static void
+de4x5_rst_desc_ring(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i;
+ s32 omr;
+
+ if (lp->cache.save_cnt) {
+ STOP_DE4X5;
+ outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
+ outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
+
+ lp->rx_new = lp->rx_old = 0;
+ lp->tx_new = lp->tx_old = 0;
+
+ for (i = 0; i < lp->rxRingSize; i++) {
+ lp->rx_ring[i].status = cpu_to_le32(R_OWN);
+ }
+
+ for (i = 0; i < lp->txRingSize; i++) {
+ lp->tx_ring[i].status = cpu_to_le32(0);
+ }
+
+ barrier();
+ lp->cache.save_cnt--;
+ START_DE4X5;
+ }
+
+ return;
+}
+
+static void
+de4x5_cache_state(struct device *dev, int flag)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ switch(flag) {
+ case DE4X5_SAVE_STATE:
+ lp->cache.csr0 = inl(DE4X5_BMR);
+ lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
+ lp->cache.csr7 = inl(DE4X5_IMR);
+ break;
+
+ case DE4X5_RESTORE_STATE:
+ outl(lp->cache.csr0, DE4X5_BMR);
+ outl(lp->cache.csr6, DE4X5_OMR);
+ outl(lp->cache.csr7, DE4X5_IMR);
+ if (lp->chipset == DC21140) {
+ gep_wr(lp->cache.gepc, dev);
+ gep_wr(lp->cache.gep, dev);
+ } else {
+ reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
+ lp->cache.csr15);
+ }
+ break;
+ }
+
+ return;
+}
+
+static void
+de4x5_put_cache(struct device *dev, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct sk_buff *p;
+
+ if (lp->cache.skb) {
+ for (p=lp->cache.skb; p->next; p=p->next);
+ p->next = skb;
+ } else {
+ lp->cache.skb = skb;
+ }
+ skb->next = NULL;
+
+ return;
+}
+
+static void
+de4x5_putb_cache(struct device *dev, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct sk_buff *p = lp->cache.skb;
+
+ lp->cache.skb = skb;
+ skb->next = p;
+
+ return;
+}
+
+static struct sk_buff *
+de4x5_get_cache(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct sk_buff *p = lp->cache.skb;
+
+ if (p) {
+ lp->cache.skb = p->next;
+ p->next = NULL;
+ }
+
+ return p;
+}
+
+/*
+** Check the Auto Negotiation State. Return OK when a link pass interrupt
+** is received and the auto-negotiation status is NWAY OK.
+*/
+static int
+test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 sts, ans;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ outl(irq_mask, DE4X5_IMR);
+
+ /* clear all pending interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+ }
+
+ ans = inl(DE4X5_SISR) & SISR_ANS;
+ sts = inl(DE4X5_STS) & ~TIMER_CB;
+
+ if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
+ sts = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sts;
+}
+
+static void
+de4x5_setup_intr(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 imr, sts;
+
+ if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
+ imr = 0;
+ UNMASK_IRQs;
+ sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */
+ outl(sts, DE4X5_STS);
+ ENABLE_IRQs;
+ }
+
+ return;
+}
+
+/*
+**
+*/
+static void
+reset_init_sia(struct device *dev, s32 csr13, s32 csr14, s32 csr15)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ RESET_SIA;
+ if (lp->useSROM) {
+ if (lp->ibn == 3) {
+ srom_exec(dev, lp->phy[lp->active].rst);
+ srom_exec(dev, lp->phy[lp->active].gep);
+ outl(1, DE4X5_SICR);
+ return;
+ } else {
+ csr15 = lp->cache.csr15;
+ csr14 = lp->cache.csr14;
+ csr13 = lp->cache.csr13;
+ outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
+ outl(csr15 | lp->cache.gep, DE4X5_SIGR);
+ }
+ } else {
+ outl(csr15, DE4X5_SIGR);
+ }
+ outl(csr14, DE4X5_STRR);
+ outl(csr13, DE4X5_SICR);
+
+ de4x5_ms_delay(10);
+
+ return;
+}
+
+/*
+** Create a loopback ethernet packet
+*/
+static void
+create_packet(struct device *dev, char *frame, int len)
+{
+ int i;
+ char *buf = frame;
+
+ for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
+ *buf++ = dev->dev_addr[i];
+ }
+ for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
+ *buf++ = dev->dev_addr[i];
+ }
+
+ *buf++ = 0; /* Packet length (2 bytes) */
+ *buf++ = 1;
+
+ return;
+}
+
+/*
+** Known delay in microseconds
+*/
+static void
+de4x5_us_delay(u32 usec)
+{
+ udelay(usec);
+
+ return;
+}
+
+/*
+** Known delay in milliseconds, in millisecond steps.
+*/
+static void
+de4x5_ms_delay(u32 msec)
+{
+ u_int i;
+
+ for (i=0; i<msec; i++) {
+ de4x5_us_delay(1000);
+ }
+
+ return;
+}
+
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int
+EISA_signature(char *name, s32 eisa_id)
+{
+ static c_char *signatures[] = DE4X5_SIGNATURE;
+ char ManCode[DE4X5_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int i, status = 0, siglen = sizeof(signatures)/sizeof(c_char *);
+
+ *name = '\0';
+ Eisa.ID = inl(eisa_id);
+
+ ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
+ ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
+ ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
+ ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
+ ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
+ ManCode[5]='\0';
+
+ for (i=0;i<siglen;i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name,ManCode);
+ status = 1;
+ break;
+ }
+ }
+
+ return status; /* return the device name string */
+}
+
+/*
+** Look for a particular board name in the PCI configuration space
+*/
+static int
+PCI_signature(char *name, struct bus_type *lp)
+{
+ static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
+ int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
+
+ if (lp->chipset == DC21040) {
+ strcpy(name, "DE434/5");
+ return status;
+ } else { /* Search for a DEC name in the SROM */
+ int i = *((char *)&lp->srom + 19) * 3;
+ strncpy(name, (char *)&lp->srom + 26 + i, 8);
+ }
+ name[8] = '\0';
+ for (i=0; i<siglen; i++) {
+ if (strstr(name,de4x5_signatures[i])!=NULL) break;
+ }
+ if (i == siglen) {
+ if (dec_only) {
+ *name = '\0';
+ } else { /* Use chip name to avoid confusion */
+ strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
+ ((lp->chipset == DC21041) ? "DC21041" :
+ ((lp->chipset == DC21140) ? "DC21140" :
+ ((lp->chipset == DC21142) ? "DC21142" :
+ ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
+ )))))));
+ }
+ if (lp->chipset != DC21041) {
+ useSROM = TRUE; /* card is not recognisably DEC */
+ }
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ useSROM = TRUE;
+ }
+
+ return status;
+}
+
+/*
+** Set up the Ethernet PROM counter to the start of the Ethernet address on
+** the DC21040, else read the SROM for the other chips.
+** The SROM may not be present in a multi-MAC card, so first read the
+** MAC address and check for a bad address. If there is a bad one then exit
+** immediately with the prior srom contents intact (the h/w address will
+** be fixed up later).
+*/
+static void
+DevicePresent(u_long aprom_addr)
+{
+ int i, j=0;
+ struct bus_type *lp = &bus;
+
+ if (lp->chipset == DC21040) {
+ if (lp->bus == EISA) {
+ enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
+ } else {
+ outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */
+ }
+ } else { /* Read new srom */
+ u_short tmp, *p = (short *)((char *)&lp->srom + SROM_HWADD);
+ for (i=0; i<(ETH_ALEN>>1); i++) {
+ tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
+ *p = le16_to_cpu(tmp);
+ j += *p++;
+ }
+ if ((j == 0) || (j == 0x2fffd)) {
+ return;
+ }
+
+ p=(short *)&lp->srom;
+ for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
+ tmp = srom_rd(aprom_addr, i);
+ *p++ = le16_to_cpu(tmp);
+ }
+ de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
+ }
+
+ return;
+}
+
+/*
+** Since the write on the Enet PROM register doesn't seem to reset the PROM
+** pointer correctly (at least on my DE425 EISA card), this routine should do
+** it...from depca.c.
+*/
+static void
+enet_addr_rst(u_long aprom_addr)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ short sigLength=0;
+ s8 data;
+ int i, j;
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
+ data = inb(aprom_addr);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) { /* rare case.... */
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** For the bad status case and no SROM, then add one to the previous
+** address. However, need to add one backwards in case we have 0xff
+** as one or more of the bytes. Only the last 3 bytes should be checked
+** as the first three are invariant - assigned to an organisation.
+*/
+static int
+get_hw_addr(struct device *dev)
+{
+ u_long iobase = dev->base_addr;
+ int broken, i, k, tmp, status = 0;
+ u_short j,chksum;
+ struct bus_type *lp = &bus;
+
+ broken = de4x5_bad_srom(lp);
+
+ for (i=0,k=0,j=0;j<3;j++) {
+ k <<= 1;
+ if (k > 0xffff) k-=0xffff;
+
+ if (lp->bus == PCI) {
+ if (lp->chipset == DC21040) {
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ k += (u_char) tmp;
+ dev->dev_addr[i++] = (u_char) tmp;
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ k += (u_short) (tmp << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+ } else if (!broken) {
+ dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ } else if ((broken == SMC) || (broken == ACCTON)) {
+ dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ }
+ } else {
+ k += (u_char) (tmp = inb(EISA_APROM));
+ dev->dev_addr[i++] = (u_char) tmp;
+ k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+ }
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+
+ if (lp->bus == PCI) {
+ if (lp->chipset == DC21040) {
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ chksum = (u_char) tmp;
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ chksum |= (u_short) (tmp << 8);
+ if ((k != chksum) && (dec_only)) status = -1;
+ }
+ } else {
+ chksum = (u_char) inb(EISA_APROM);
+ chksum |= (u_short) (inb(EISA_APROM) << 8);
+ if ((k != chksum) && (dec_only)) status = -1;
+ }
+
+ /* If possible, try to fix a broken card - SMC only so far */
+ srom_repair(dev, broken);
+
+#ifdef CONFIG_PMAC
+ /*
+ ** If the address starts with 00 a0, we have to bit-reverse
+ ** each byte of the address.
+ */
+ if (dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0xa0) {
+ for (i = 0; i < ETH_ALEN; ++i) {
+ int x = dev->dev_addr[i];
+ x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
+ x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
+ dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
+ }
+ }
+#endif /* CONFIG_PMAC */
+
+ /* Test for a bad enet address */
+ status = test_bad_enet(dev, status);
+
+ return status;
+}
+
+/*
+** Test for enet addresses in the first 32 bytes. The built-in strncmp
+** didn't seem to work here...?
+*/
+static int
+de4x5_bad_srom(struct bus_type *lp)
+{
+ int i, status = 0;
+
+ for (i=0; i<sizeof(enet_det)/ETH_ALEN; i++) {
+ if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
+ !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
+ if (i == 0) {
+ status = SMC;
+ } else if (i == 1) {
+ status = ACCTON;
+ }
+ break;
+ }
+ }
+
+ return status;
+}
+
+static int
+de4x5_strncmp(char *a, char *b, int n)
+{
+ int ret=0;
+
+ for (;n && !ret;n--) {
+ ret = *a++ - *b++;
+ }
+
+ return ret;
+}
+
+static void
+srom_repair(struct device *dev, int card)
+{
+ struct bus_type *lp = &bus;
+
+ switch(card) {
+ case SMC:
+ memset((char *)&bus.srom, 0, sizeof(struct de4x5_srom));
+ memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
+ memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
+ useSROM = TRUE;
+ break;
+ }
+
+ return;
+}
+
+/*
+** Assume that the irq's do not follow the PCI spec - this is seems
+** to be true so far (2 for 2).
+*/
+static int
+test_bad_enet(struct device *dev, int status)
+{
+ struct bus_type *lp = &bus;
+ int i, tmp;
+
+ for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
+ if ((tmp == 0) || (tmp == 0x5fa)) {
+ if ((lp->chipset == last.chipset) &&
+ (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
+ for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
+ for (i=ETH_ALEN-1; i>2; --i) {
+ dev->dev_addr[i] += 1;
+ if (dev->dev_addr[i] != 0) break;
+ }
+ for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ if (!an_exception(lp)) {
+ dev->irq = last.irq;
+ }
+
+ status = 0;
+ }
+ } else if (!status) {
+ last.chipset = lp->chipset;
+ last.bus = lp->bus_num;
+ last.irq = dev->irq;
+ for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ }
+
+ return status;
+}
+
+/*
+** List of board exceptions with correctly wired IRQs
+*/
+static int
+an_exception(struct bus_type *lp)
+{
+ if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
+ (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+** SROM Read
+*/
+static short
+srom_rd(u_long addr, u_char offset)
+{
+ sendto_srom(SROM_RD | SROM_SR, addr);
+
+ srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
+ srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
+ srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
+
+ return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
+}
+
+static void
+srom_latch(u_int command, u_long addr)
+{
+ sendto_srom(command, addr);
+ sendto_srom(command | DT_CLK, addr);
+ sendto_srom(command, addr);
+
+ return;
+}
+
+static void
+srom_command(u_int command, u_long addr)
+{
+ srom_latch(command, addr);
+ srom_latch(command, addr);
+ srom_latch((command & 0x0000ff00) | DT_CS, addr);
+
+ return;
+}
+
+static void
+srom_address(u_int command, u_long addr, u_char offset)
+{
+ int i;
+ char a;
+
+ a = (char)(offset << 2);
+ for (i=0; i<6; i++, a <<= 1) {
+ srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
+ }
+ de4x5_us_delay(1);
+
+ i = (getfrom_srom(addr) >> 3) & 0x01;
+
+ return;
+}
+
+static short
+srom_data(u_int command, u_long addr)
+{
+ int i;
+ short word = 0;
+ s32 tmp;
+
+ for (i=0; i<16; i++) {
+ sendto_srom(command | DT_CLK, addr);
+ tmp = getfrom_srom(addr);
+ sendto_srom(command, addr);
+
+ word = (word << 1) | ((tmp >> 3) & 0x01);
+ }
+
+ sendto_srom(command & 0x0000ff00, addr);
+
+ return word;
+}
+
+/*
+static void
+srom_busy(u_int command, u_long addr)
+{
+ sendto_srom((command & 0x0000ff00) | DT_CS, addr);
+
+ while (!((getfrom_srom(addr) >> 3) & 0x01)) {
+ de4x5_ms_delay(1);
+ }
+
+ sendto_srom(command & 0x0000ff00, addr);
+
+ return;
+}
+*/
+
+static void
+sendto_srom(u_int command, u_long addr)
+{
+ outl(command, addr);
+ udelay(1);
+
+ return;
+}
+
+static int
+getfrom_srom(u_long addr)
+{
+ s32 tmp;
+
+ tmp = inl(addr);
+ udelay(1);
+
+ return tmp;
+}
+
+static int
+srom_infoleaf_info(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i, count;
+ u_char *p;
+
+ /* Find the infoleaf decoder function that matches this chipset */
+ for (i=0; i<INFOLEAF_SIZE; i++) {
+ if (lp->chipset == infoleaf_array[i].chipset) break;
+ }
+ if (i == INFOLEAF_SIZE) {
+ lp->useSROM = FALSE;
+ printk("%s: Cannot find correct chipset for SROM decoding!\n",
+ dev->name);
+ return -ENXIO;
+ }
+
+ lp->infoleaf_fn = infoleaf_array[i].fn;
+
+ /* Find the information offset that this function should use */
+ count = *((u_char *)&lp->srom + 19);
+ p = (u_char *)&lp->srom + 26;
+
+ if (count > 1) {
+ for (i=count; i; --i, p+=3) {
+ if (lp->device == *p) break;
+ }
+ if (i == 0) {
+ lp->useSROM = FALSE;
+ printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
+ dev->name, lp->device);
+ return -ENXIO;
+ }
+ }
+
+ lp->infoleaf_offset = TWIDDLE(p+1);
+
+ return 0;
+}
+
+/*
+** This routine loads any type 1 or 3 MII info into the mii device
+** struct and executes any type 5 code to reset PHY devices for this
+** controller.
+** The info for the MII devices will be valid since the index used
+** will follow the discovery process from MII address 1-31 then 0.
+*/
+static void
+srom_init(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ u_char count;
+
+ p+=2;
+ if (lp->chipset == DC21140) {
+ lp->cache.gepc = (*p++ | GEP_CTRL);
+ gep_wr(lp->cache.gepc, dev);
+ }
+
+ /* Block count */
+ count = *p++;
+
+ /* Jump the infoblocks to find types */
+ for (;count; --count) {
+ if (*p < 128) {
+ p += COMPACT_LEN;
+ } else if (*(p+1) == 5) {
+ type5_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 4) {
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 3) {
+ type3_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 2) {
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 1) {
+ type1_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else {
+ p += ((*p & BLOCK_LEN) + 1);
+ }
+ }
+
+ return;
+}
+
+/*
+** A generic routine that writes GEP control, data and reset information
+** to the GEP register (21140) or csr15 GEP portion (2114[23]).
+*/
+static void
+srom_exec(struct device *dev, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char count = (p ? *p++ : 0);
+ u_short *w = (u_short *)p;
+
+ if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
+
+ if (lp->chipset != DC21140) RESET_SIA;
+
+ while (count--) {
+ gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
+ *p++ : TWIDDLE(w++)), dev);
+ udelay(2000); /* 2ms per action */
+ }
+
+ if (lp->chipset != DC21140) {
+ outl(lp->cache.csr14, DE4X5_STRR);
+ outl(lp->cache.csr13, DE4X5_SICR);
+ }
+
+ return;
+}
+
+/*
+** Basically this function is a NOP since it will never be called,
+** unless I implement the DC21041 SROM functions. There's no need
+** since the existing code will be satisfactory for all boards.
+*/
+static int
+dc21041_infoleaf(struct device *dev)
+{
+ return DE4X5_AUTOSENSE_MS;
+}
+
+static int
+dc21140_infoleaf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* GEP control */
+ lp->cache.gepc = (*p++ | GEP_CTRL);
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = FALSE;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+static int
+dc21142_infoleaf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = FALSE;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+static int
+dc21143_infoleaf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = FALSE;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+/*
+** The compact infoblock is only designed for DC21140[A] chips, so
+** we'll reuse the dc21140m_autoconf function. Non MII media only.
+*/
+static int
+compact_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char flags, csr6;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+COMPACT_LEN) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
+ } else {
+ return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = COMPACT;
+ lp->active = 0;
+ gep_wr(lp->cache.gepc, dev);
+ lp->infoblock_media = (*p++) & COMPACT_MC;
+ lp->cache.gep = *p++;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+/*
+** This block describes non MII media for the DC21140[A] only.
+*/
+static int
+type0_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 0;
+ lp->active = 0;
+ gep_wr(lp->cache.gepc, dev);
+ p+=2;
+ lp->infoblock_media = (*p++) & BLOCK0_MC;
+ lp->cache.gep = *p++;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+/* These functions are under construction! */
+
+static int
+type1_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ p += 2;
+ if (lp->state == INITIALISED) {
+ lp->ibn = 1;
+ lp->active = *p++;
+ lp->phy[lp->active].gep = (*p ? p : 0); p += (*p + 1);
+ lp->phy[lp->active].rst = (*p ? p : 0); p += (*p + 1);
+ lp->phy[lp->active].mc = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ana = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].fdx = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ttm = TWIDDLE(p);
+ return 0;
+ } else if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 1;
+ lp->active = *p;
+ lp->infoblock_csr6 = OMR_MII_100;
+ lp->useMII = TRUE;
+ lp->infoblock_media = ANS;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+static int
+type2_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 2;
+ lp->active = 0;
+ p += 2;
+ lp->infoblock_media = (*p) & MEDIA_CODE;
+
+ if ((*p++) & EXT_FIELD) {
+ lp->cache.csr13 = TWIDDLE(p); p += 2;
+ lp->cache.csr14 = TWIDDLE(p); p += 2;
+ lp->cache.csr15 = TWIDDLE(p); p += 2;
+ } else {
+ lp->cache.csr13 = CSR13;
+ lp->cache.csr14 = CSR14;
+ lp->cache.csr15 = CSR15;
+ }
+ lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
+ lp->cache.gep = ((s32)(TWIDDLE(p)) << 16);
+ lp->infoblock_csr6 = OMR_SIA;
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+static int
+type3_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ p += 2;
+ if (lp->state == INITIALISED) {
+ lp->ibn = 3;
+ lp->active = *p++;
+ lp->phy[lp->active].gep = (*p ? p : 0); p += (2 * (*p) + 1);
+ lp->phy[lp->active].rst = (*p ? p : 0); p += (2 * (*p) + 1);
+ lp->phy[lp->active].mc = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ana = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].fdx = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ttm = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].mci = *p;
+ return 0;
+ } else if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 3;
+ lp->active = *p;
+ lp->infoblock_csr6 = OMR_MII_100;
+ lp->useMII = TRUE;
+ lp->infoblock_media = ANS;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+static int
+type4_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 4;
+ lp->active = 0;
+ p+=2;
+ lp->infoblock_media = (*p++) & MEDIA_CODE;
+ lp->cache.csr13 = CSR13; /* Hard coded defaults */
+ lp->cache.csr14 = CSR14;
+ lp->cache.csr15 = CSR15;
+ lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
+ lp->cache.gep = ((s32)(TWIDDLE(p)) << 16); p += 2;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+/*
+** This block type provides information for resetting external devices
+** (chips) through the General Purpose Register.
+*/
+static int
+type5_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ /* Must be initializing to run this code */
+ if ((lp->state == INITIALISED) || (lp->media == INIT)) {
+ p+=2;
+ lp->rst = p;
+ srom_exec(dev, lp->rst);
+ }
+
+ return DE4X5_AUTOSENSE_MS;
+}
+
+/*
+** MII Read/Write
+*/
+
+static int
+mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
+{
+ mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
+ mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
+ mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */
+ mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
+ mii_address(phyreg, ioaddr); /* PHY Register to read */
+ mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
+
+ return mii_rdata(ioaddr); /* Read data */
+}
+
+static void
+mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
+{
+ mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
+ mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
+ mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */
+ mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
+ mii_address(phyreg, ioaddr); /* PHY Register to write */
+ mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
+ data = mii_swap(data, 16); /* Swap data bit ordering */
+ mii_wdata(data, 16, ioaddr); /* Write data */
+
+ return;
+}
+
+static int
+mii_rdata(u_long ioaddr)
+{
+ int i;
+ s32 tmp = 0;
+
+ for (i=0; i<16; i++) {
+ tmp <<= 1;
+ tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
+ }
+
+ return tmp;
+}
+
+static void
+mii_wdata(int data, int len, u_long ioaddr)
+{
+ int i;
+
+ for (i=0; i<len; i++) {
+ sendto_mii(MII_MWR | MII_WR, data, ioaddr);
+ data >>= 1;
+ }
+
+ return;
+}
+
+static void
+mii_address(u_char addr, u_long ioaddr)
+{
+ int i;
+
+ addr = mii_swap(addr, 5);
+ for (i=0; i<5; i++) {
+ sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
+ addr >>= 1;
+ }
+
+ return;
+}
+
+static void
+mii_ta(u_long rw, u_long ioaddr)
+{
+ if (rw == MII_STWR) {
+ sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
+ sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
+ } else {
+ getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
+ }
+
+ return;
+}
+
+static int
+mii_swap(int data, int len)
+{
+ int i, tmp = 0;
+
+ for (i=0; i<len; i++) {
+ tmp <<= 1;
+ tmp |= (data & 1);
+ data >>= 1;
+ }
+
+ return tmp;
+}
+
+static void
+sendto_mii(u32 command, int data, u_long ioaddr)
+{
+ u32 j;
+
+ j = (data & 1) << 17;
+ outl(command | j, ioaddr);
+ udelay(1);
+ outl(command | MII_MDC | j, ioaddr);
+ udelay(1);
+
+ return;
+}
+
+static int
+getfrom_mii(u32 command, u_long ioaddr)
+{
+ outl(command, ioaddr);
+ udelay(1);
+ outl(command | MII_MDC, ioaddr);
+ udelay(1);
+
+ return ((inl(ioaddr) >> 19) & 1);
+}
+
+/*
+** Here's 3 ways to calculate the OUI from the ID registers.
+*/
+static int
+mii_get_oui(u_char phyaddr, u_long ioaddr)
+{
+/*
+ union {
+ u_short reg;
+ u_char breg[2];
+ } a;
+ int i, r2, r3, ret=0;*/
+ int r2, r3;
+
+ /* Read r2 and r3 */
+ r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
+ r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
+ /* SEEQ and Cypress way * /
+ / * Shuffle r2 and r3 * /
+ a.reg=0;
+ r3 = ((r3>>10)|(r2<<6))&0x0ff;
+ r2 = ((r2>>2)&0x3fff);
+
+ / * Bit reverse r3 * /
+ for (i=0;i<8;i++) {
+ ret<<=1;
+ ret |= (r3&1);
+ r3>>=1;
+ }
+
+ / * Bit reverse r2 * /
+ for (i=0;i<16;i++) {
+ a.reg<<=1;
+ a.reg |= (r2&1);
+ r2>>=1;
+ }
+
+ / * Swap r2 bytes * /
+ i=a.breg[0];
+ a.breg[0]=a.breg[1];
+ a.breg[1]=i;
+
+ return ((a.reg<<8)|ret); */ /* SEEQ and Cypress way */
+/* return ((r2<<6)|(u_int)(r3>>10)); */ /* NATIONAL and BROADCOM way */
+ return r2; /* (I did it) My way */
+}
+
+/*
+** The SROM spec forces us to search addresses [1-31 0]. Bummer.
+*/
+static int
+mii_get_phy(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table);
+ int id;
+
+ lp->active = 0;
+ lp->useMII = TRUE;
+
+ /* Search the MII address space for possible PHY devices */
+ for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
+ lp->phy[lp->active].addr = i;
+ if (i==0) n++; /* Count cycles */
+ while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
+ id = mii_get_oui(i, DE4X5_MII);
+ if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
+ for (j=0; j<limit; j++) { /* Search PHY table */
+ if (id != phy_info[j].id) continue; /* ID match? */
+ for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
+ if (k < DE4X5_MAX_PHY) {
+ memcpy((char *)&lp->phy[k],
+ (char *)&phy_info[j], sizeof(struct phy_table));
+ lp->phy[k].addr = i;
+ lp->mii_cnt++;
+ lp->active++;
+ } else {
+ goto purgatory; /* Stop the search */
+ }
+ break;
+ }
+ if ((j == limit) && (i < DE4X5_MAX_MII)) {
+ for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
+ lp->phy[k].addr = i;
+ lp->phy[k].id = id;
+ lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
+ lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
+ lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
+ lp->mii_cnt++;
+ lp->active++;
+ printk("%s: Using generic MII device control. If the board doesn't operate, \nplease mail the following dump to the author:\n", dev->name);
+ j = de4x5_debug;
+ de4x5_debug |= DEBUG_MII;
+ de4x5_dbg_mii(dev, k);
+ de4x5_debug = j;
+ printk("\n");
+ }
+ }
+ purgatory:
+ lp->active = 0;
+ if (lp->phy[0].id) { /* Reset the PHY devices */
+ for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/
+ mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
+ while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
+
+ de4x5_dbg_mii(dev, k);
+ }
+ }
+ if (!lp->mii_cnt) lp->useMII = FALSE;
+
+ return lp->mii_cnt;
+}
+
+static char *
+build_setup_frame(struct device *dev, int mode)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+ char *pa = lp->setup_frame;
+
+ /* Initialise the setup frame */
+ if (mode == ALL) {
+ memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
+ }
+
+ if (lp->setup_f == HASH_PERF) {
+ for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
+ *(pa + i) = dev->dev_addr[i]; /* Host address */
+ if (i & 0x01) pa += 2;
+ }
+ *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
+ } else {
+ for (i=0; i<ETH_ALEN; i++) { /* Host address */
+ *(pa + (i&1)) = dev->dev_addr[i];
+ if (i & 0x01) pa += 4;
+ }
+ for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
+ *(pa + (i&1)) = (char) 0xff;
+ if (i & 0x01) pa += 4;
+ }
+ }
+
+ return pa; /* Points to the next entry */
+}
+
+static void
+enable_ast(struct device *dev, u32 time_out)
+{
+ timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out);
+
+ return;
+}
+
+static void
+disable_ast(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ del_timer(&lp->timer);
+
+ return;
+}
+
+static long
+de4x5_switch_mac_port(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 omr;
+
+ STOP_DE4X5;
+
+ /* Assert the OMR_PS bit in CSR6 */
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
+ OMR_FDX));
+ omr |= lp->infoblock_csr6;
+ if (omr & OMR_PS) omr |= OMR_HBD;
+ outl(omr, DE4X5_OMR);
+
+ /* Soft Reset */
+ RESET_DE4X5;
+
+ /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
+ if (lp->chipset == DC21140) {
+ gep_wr(lp->cache.gepc, dev);
+ gep_wr(lp->cache.gep, dev);
+ } else if ((lp->chipset & ~0x0ff) == DC2114x) {
+ reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
+ }
+
+ /* Restore CSR6 */
+ outl(omr, DE4X5_OMR);
+
+ /* Reset CSR8 */
+ inl(DE4X5_MFC);
+
+ return omr;
+}
+
+static void
+gep_wr(s32 data, struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->chipset == DC21140) {
+ outl(data, DE4X5_GEP);
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
+ }
+
+ return;
+}
+
+static int
+gep_rd(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->chipset == DC21140) {
+ return inl(DE4X5_GEP);
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ return (inl(DE4X5_SIGR) & 0x000fffff);
+ }
+
+ return 0;
+}
+
+static void
+timeout(struct device *dev, void (*fn)(u_long data), u_long data, u_long msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int dt;
+
+ /* First, cancel any pending timer events */
+ del_timer(&lp->timer);
+
+ /* Convert msec to ticks */
+ dt = (msec * HZ) / 1000;
+ if (dt==0) dt=1;
+
+ /* Set up timer */
+ lp->timer.expires = jiffies + dt;
+ lp->timer.function = fn;
+ lp->timer.data = data;
+ add_timer(&lp->timer);
+
+ return;
+}
+
+static void
+yawn(struct device *dev, int state)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
+
+ if(lp->bus == EISA) {
+ switch(state) {
+ case WAKEUP:
+ outb(WAKEUP, PCI_CFPM);
+ de4x5_ms_delay(10);
+ break;
+
+ case SNOOZE:
+ outb(SNOOZE, PCI_CFPM);
+ break;
+
+ case SLEEP:
+ outl(0, DE4X5_SICR);
+ outb(SLEEP, PCI_CFPM);
+ break;
+ }
+ } else {
+ switch(state) {
+ case WAKEUP:
+ pcibios_write_config_byte(lp->bus_num, lp->device << 3,
+ PCI_CFDA_PSM, WAKEUP);
+ de4x5_ms_delay(10);
+ break;
+
+ case SNOOZE:
+ pcibios_write_config_byte(lp->bus_num, lp->device << 3,
+ PCI_CFDA_PSM, SNOOZE);
+ break;
+
+ case SLEEP:
+ outl(0, DE4X5_SICR);
+ pcibios_write_config_byte(lp->bus_num, lp->device << 3,
+ PCI_CFDA_PSM, SLEEP);
+ break;
+ }
+ }
+
+ return;
+}
+
+static void
+de4x5_parse_params(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ char *p, *q, t;
+
+ lp->params.fdx = 0;
+ lp->params.autosense = AUTO;
+
+ if (args == NULL) return;
+
+ if ((p = strstr(args, dev->name))) {
+ if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
+ t = *q;
+ *q = '\0';
+
+ if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+
+ if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
+ if (strstr(p, "TP")) {
+ lp->params.autosense = TP;
+ } else if (strstr(p, "TP_NW")) {
+ lp->params.autosense = TP_NW;
+ } else if (strstr(p, "BNC")) {
+ lp->params.autosense = BNC;
+ } else if (strstr(p, "AUI")) {
+ lp->params.autosense = AUI;
+ } else if (strstr(p, "BNC_AUI")) {
+ lp->params.autosense = BNC;
+ } else if (strstr(p, "10Mb")) {
+ lp->params.autosense = _10Mb;
+ } else if (strstr(p, "100Mb")) {
+ lp->params.autosense = _100Mb;
+ } else if (strstr(p, "AUTO")) {
+ lp->params.autosense = AUTO;
+ }
+ }
+ *q = t;
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_open(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+
+ if (de4x5_debug & DEBUG_OPEN) {
+ printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
+ printk("\tphysical address: ");
+ for (i=0;i<6;i++) {
+ printk("%2.2x:",(short)dev->dev_addr[i]);
+ }
+ printk("\n");
+ printk("Descriptor head addresses:\n");
+ printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
+ printk("Descriptor addresses:\nRX: ");
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
+ }
+ }
+ printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
+ printk("TX: ");
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
+ }
+ }
+ printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
+ printk("Descriptor buffers:\nRX: ");
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
+ }
+ }
+ printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
+ printk("TX: ");
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
+ }
+ }
+ printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
+ printk("Ring size: \nRX: %d\nTX: %d\n",
+ (short)lp->rxRingSize,
+ (short)lp->txRingSize);
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_mii(struct device *dev, int k)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (de4x5_debug & DEBUG_MII) {
+ printk("\nMII device address: %d\n", lp->phy[k].addr);
+ printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
+ printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
+ if (lp->phy[k].id != BROADCOM_T4) {
+ printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
+ }
+ printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
+ if (lp->phy[k].id != BROADCOM_T4) {
+ printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
+ printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
+ } else {
+ printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
+ }
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_media(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ if (lp->media != lp->c_media) {
+ if (de4x5_debug & DEBUG_MEDIA) {
+ printk("%s: media is %s%s\n", dev->name,
+ (lp->media == NC ? "unconnected, link down or incompatible connection" :
+ (lp->media == TP ? "TP" :
+ (lp->media == ANS ? "TP/Nway" :
+ (lp->media == BNC ? "BNC" :
+ (lp->media == AUI ? "AUI" :
+ (lp->media == BNC_AUI ? "BNC/AUI" :
+ (lp->media == EXT_SIA ? "EXT SIA" :
+ (lp->media == _100Mb ? "100Mb/s" :
+ (lp->media == _10Mb ? "10Mb/s" :
+ "???"
+ ))))))))), (lp->fdx?" full duplex.":"."));
+ }
+ lp->c_media = lp->media;
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_srom(struct de4x5_srom *p)
+{
+ int i;
+
+ if (de4x5_debug & DEBUG_SROM) {
+ printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
+ printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
+ printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
+ printk("SROM version: %02x\n", (u_char)(p->version));
+ printk("# controllers: %02x\n", (u_char)(p->num_controllers));
+
+ printk("Hardware Address: ");
+ for (i=0;i<ETH_ALEN-1;i++) {
+ printk("%02x:", (u_char)*(p->ieee_addr+i));
+ }
+ printk("%02x\n", (u_char)*(p->ieee_addr+i));
+ printk("CRC checksum: %04x\n", (u_short)(p->chksum));
+ for (i=0; i<64; i++) {
+ printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
+ }
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_rx(struct sk_buff *skb, int len)
+{
+ int i, j;
+
+ if (de4x5_debug & DEBUG_RX) {
+ printk("R: %02x:%02x:%02x:%02x:%02x:%02x <- %02x:%02x:%02x:%02x:%02x:%02x len/SAP:%02x%02x [%d]\n",
+ (u_char)skb->data[0],
+ (u_char)skb->data[1],
+ (u_char)skb->data[2],
+ (u_char)skb->data[3],
+ (u_char)skb->data[4],
+ (u_char)skb->data[5],
+ (u_char)skb->data[6],
+ (u_char)skb->data[7],
+ (u_char)skb->data[8],
+ (u_char)skb->data[9],
+ (u_char)skb->data[10],
+ (u_char)skb->data[11],
+ (u_char)skb->data[12],
+ (u_char)skb->data[13],
+ len);
+ if (de4x5_debug & DEBUG_RX) {
+ for (j=0; len>0;j+=16, len-=16) {
+ printk(" %03x: ",j);
+ for (i=0; i<16 && i<len; i++) {
+ printk("%02x ",(u_char)skb->data[i+j]);
+ }
+ printk("\n");
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases. In the normal course of events
+** this function is only used for my testing.
+*/
+static int
+de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ s32 omr;
+ union {
+ u8 addr[144];
+ u16 sval[72];
+ u32 lval[36];
+ } tmp;
+
+ switch(ioc->cmd) {
+ case DE4X5_GET_HWADDR: /* Get the hardware address */
+ ioc->len = ETH_ALEN;
+ status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
+ if (status)
+ break;
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ copy_to_user(ioc->data, tmp.addr, ioc->len);
+
+ break;
+ case DE4X5_SET_HWADDR: /* Set the hardware address */
+ status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
+ if (status)
+ break;
+ status = -EPERM;
+ if (!suser())
+ break;
+ status = 0;
+ copy_from_user(tmp.addr, ioc->data, ETH_ALEN);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ }
+ build_setup_frame(dev, PHYS_ADDR_ONLY);
+ /* Set up the descriptor and give ownership to the card */
+ while (test_and_set_bit(0, (void *)&dev->tbusy) != 0);
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ SETUP_FRAME_LEN, NULL);
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ dev->tbusy = 0; /* Unlock the TX ring */
+
+ break;
+ case DE4X5_SET_PROM: /* Set Promiscuous Mode */
+ if (suser()) {
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PR;
+ outl(omr, DE4X5_OMR);
+ dev->flags |= IFF_PROMISC;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_CLR_PROM: /* Clear Promiscuous Mode */
+ if (suser()) {
+ omr = inl(DE4X5_OMR);
+ omr &= ~OMR_PR;
+ outb(omr, DE4X5_OMR);
+ dev->flags &= ~IFF_PROMISC;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ printk("%s: Boo!\n", dev->name);
+
+ break;
+ case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
+ if (suser()) {
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PM;
+ outl(omr, DE4X5_OMR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_GET_STATS: /* Get the driver statistics */
+ ioc->len = sizeof(lp->pktStats);
+ status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
+ if (status)
+ break;
+
+ cli();
+ copy_to_user(ioc->data, &lp->pktStats, ioc->len);
+ sti();
+
+ break;
+ case DE4X5_CLR_STATS: /* Zero out the driver statistics */
+ if (suser()) {
+ cli();
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ sti();
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_GET_OMR: /* Get the OMR Register contents */
+ tmp.addr[0] = inl(DE4X5_OMR);
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
+ copy_to_user(ioc->data, tmp.addr, 1);
+ }
+
+ break;
+ case DE4X5_SET_OMR: /* Set the OMR Register contents */
+ if (suser()) {
+ if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
+ copy_from_user(tmp.addr, ioc->data, 1);
+ outl(tmp.addr[0], DE4X5_OMR);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_GET_REG: /* Get the DE4X5 Registers */
+ j = 0;
+ tmp.lval[0] = inl(DE4X5_STS); j+=4;
+ tmp.lval[1] = inl(DE4X5_BMR); j+=4;
+ tmp.lval[2] = inl(DE4X5_IMR); j+=4;
+ tmp.lval[3] = inl(DE4X5_OMR); j+=4;
+ tmp.lval[4] = inl(DE4X5_SISR); j+=4;
+ tmp.lval[5] = inl(DE4X5_SICR); j+=4;
+ tmp.lval[6] = inl(DE4X5_STRR); j+=4;
+ tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
+ ioc->len = j;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ copy_to_user(ioc->data, tmp.addr, ioc->len);
+ }
+ break;
+
+#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
+/*
+ case DE4X5_DUMP:
+ j = 0;
+ tmp.addr[j++] = dev->irq;
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[j++] = dev->dev_addr[i];
+ }
+ tmp.addr[j++] = lp->rxRingSize;
+ tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
+ tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
+
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
+
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
+
+ for (i=0;i<lp->rxRingSize;i++){
+ tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
+ }
+ for (i=0;i<lp->txRingSize;i++){
+ tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
+ }
+
+ tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
+ tmp.lval[j>>2] = lp->chipset; j+=4;
+ if (lp->chipset == DC21140) {
+ tmp.lval[j>>2] = gep_rd(dev); j+=4;
+ } else {
+ tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
+ }
+ tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
+ if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
+ tmp.lval[j>>2] = lp->active; j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ if (lp->phy[lp->active].id != BROADCOM_T4) {
+ tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ }
+ tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ if (lp->phy[lp->active].id != BROADCOM_T4) {
+ tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ } else {
+ tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ }
+ }
+
+ tmp.addr[j++] = lp->txRingSize;
+ tmp.addr[j++] = dev->tbusy;
+
+ ioc->len = j;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ copy_to_user(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+*/
+ default:
+ status = -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+#ifdef MODULE
+/*
+** Note now that module autoprobing is allowed under EISA and PCI. The
+** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes
+** to "do the right thing".
+*/
+#define LP(a) ((struct de4x5_private *)(a))
+static struct device *mdev = NULL;
+static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+MODULE_PARM(io, "i");
+#endif /* LINUX_VERSION_CODE */
+
+int
+init_module(void)
+{
+ int i, num, status = -EIO;
+ struct device *p;
+
+ num = count_adapters();
+
+ for (i=0; i<num; i++) {
+ if ((p = insert_device(NULL, io, de4x5_probe)) == NULL)
+ return -ENOMEM;
+
+ if (!mdev) mdev = p;
+
+ if (register_netdev(p) != 0) {
+ kfree(p);
+ } else {
+ status = 0; /* At least one adapter will work */
+ lastModule = p;
+ }
+ }
+
+ return status;
+}
+
+void
+cleanup_module(void)
+{
+ while (mdev != NULL) {
+ mdev = unlink_modules(mdev);
+ }
+
+ return;
+}
+
+static struct device *
+unlink_modules(struct device *p)
+{
+ struct device *next = NULL;
+
+ if (p->priv) { /* Private areas allocated? */
+ struct de4x5_private *lp = (struct de4x5_private *)p->priv;
+
+ next = lp->next_module;
+ if (lp->cache.buf) { /* MAC buffers allocated? */
+ kfree(lp->cache.buf); /* Free the MAC buffers */
+ }
+ kfree(lp->cache.priv); /* Free the private area */
+ release_region(p->base_addr, (lp->bus == PCI ?
+ DE4X5_PCI_TOTAL_SIZE :
+ DE4X5_EISA_TOTAL_SIZE));
+ }
+ unregister_netdev(p);
+ kfree(p); /* Free the device structure */
+
+ return next;
+}
+
+static int
+count_adapters(void)
+{
+ int i, j=0;
+ u_char pb, dev_fn, dev_num;
+ u_short dev_id, vendor;
+ u_int class = DE4X5_CLASS_CODE;
+ u_int device;
+
+#if !defined(__sparc_v9__) && !defined(__powerpc__) && !defined(__alpha__)
+ char name[DE4X5_STRLEN];
+ u_long iobase = 0x1000;
+
+ for (i=1; i<MAX_EISA_SLOTS; i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID)) j++;
+ }
+#endif
+ if (!pcibios_present()) return j;
+
+ for (i=0;
+ (pcibios_find_class(class, i, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
+ i++) {
+ dev_num = PCI_SLOT(dev_fn);
+ device = 0;
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &dev_id);
+ device = dev_id;
+ device <<= 8;
+ if (is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x) j++;
+ }
+
+ return j;
+}
+
+/*
+** If at end of eth device list and can't use current entry, malloc
+** one up. If memory could not be allocated, print an error message.
+*/
+__initfunc(static struct device *
+insert_device(struct device *dev, u_long iobase, int (*init)(struct device *)))
+{
+ struct device *new;
+
+ new = (struct device *)kmalloc(sizeof(struct device)+8, GFP_KERNEL);
+ if (new == NULL) {
+ printk("de4x5.c: Device not initialised, insufficient memory\n");
+ return NULL;
+ } else {
+ memset((char *)new, 0, sizeof(struct device)+8);
+ new->name = (char *)(new + 1);
+ new->base_addr = iobase; /* assign the io address */
+ new->init = init; /* initialisation routine */
+ }
+
+ return new;
+}
+
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c de4x5.c"
+ *
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c de4x5.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/de4x5.h b/linux/src/drivers/net/de4x5.h
new file mode 100644
index 0000000..c0c58cc
--- /dev/null
+++ b/linux/src/drivers/net/de4x5.h
@@ -0,0 +1,1028 @@
+/*
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of the
+ GNU Public License, incorporated herein by reference.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+*/
+
+/*
+** DC21040 CSR<1..15> Register Address Map
+*/
+#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */
+#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */
+#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */
+#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */
+#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */
+#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */
+#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */
+#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */
+#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */
+#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */
+#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */
+#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */
+#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */
+#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */
+#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */
+#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/
+#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */
+#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */
+#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */
+#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */
+#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */
+
+/*
+** EISA Register Address Map
+*/
+#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
+#define EISA_CR iobase+0x0c84 /* EISA Control Register */
+#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
+#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
+#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */
+#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */
+#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */
+
+/*
+** PCI/EISA Configuration Registers Address Map
+*/
+#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */
+#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */
+#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */
+#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */
+#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */
+#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */
+#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */
+#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */
+#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */
+#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */
+#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */
+
+/*
+** EISA Configuration Register 0 bit definitions
+*/
+#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */
+#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */
+#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */
+#define ER0_ISTS 0x10 /* Interrupt Status (X) */
+#define ER0_LI 0x08 /* Latch Interrupts */
+#define ER0_INTL 0x06 /* INTerrupt Level */
+#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */
+
+/*
+** EISA Configuration Register 1 bit definitions
+*/
+#define ER1_IAM 0xe0 /* ISA Address Mode */
+#define ER1_IAE 0x10 /* ISA Addressing Enable */
+#define ER1_UPIN 0x0f /* User Pins */
+
+/*
+** EISA Configuration Register 2 bit definitions
+*/
+#define ER2_BRS 0xc0 /* Boot ROM Size */
+#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */
+
+/*
+** EISA Configuration Register 3 bit definitions
+*/
+#define ER3_BWE 0x40 /* Burst Write Enable */
+#define ER3_BRE 0x04 /* Burst Read Enable */
+#define ER3_LSR 0x02 /* Local Software Reset */
+
+/*
+** PCI Configuration ID Register (PCI_CFID). The Device IDs are left
+** shifted 8 bits to allow detection of DC21142 and DC21143 variants with
+** the configuration revision register step number.
+*/
+#define CFID_DID 0xff00 /* Device ID */
+#define CFID_VID 0x00ff /* Vendor ID */
+#define DC21040_DID 0x0200 /* Unique Device ID # */
+#define DC21040_VID 0x1011 /* DC21040 Manufacturer */
+#define DC21041_DID 0x1400 /* Unique Device ID # */
+#define DC21041_VID 0x1011 /* DC21041 Manufacturer */
+#define DC21140_DID 0x0900 /* Unique Device ID # */
+#define DC21140_VID 0x1011 /* DC21140 Manufacturer */
+#define DC2114x_DID 0x1900 /* Unique Device ID # */
+#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */
+
+/*
+** Chipset defines
+*/
+#define DC21040 DC21040_DID
+#define DC21041 DC21041_DID
+#define DC21140 DC21140_DID
+#define DC2114x DC2114x_DID
+#define DC21142 (DC2114x_DID | 0x0010)
+#define DC21143 (DC2114x_DID | 0x0030)
+
+#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID))
+#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID))
+#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID))
+#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID))
+#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142))
+#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143))
+
+/*
+** PCI Configuration Command/Status Register (PCI_CFCS)
+*/
+#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */
+#define CFCS_SSE 0x40000000 /* Signal System Error (S) */
+#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */
+#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */
+#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */
+#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */
+#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */
+#define CFCS_SEE 0x00000100 /* System Error Enable (C) */
+#define CFCS_PER 0x00000040 /* Parity Error Response (C) */
+#define CFCS_MO 0x00000004 /* Master Operation (C) */
+#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */
+#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */
+
+/*
+** PCI Configuration Revision Register (PCI_CFRV)
+*/
+#define CFRV_BC 0xff000000 /* Base Class */
+#define CFRV_SC 0x00ff0000 /* Subclass */
+#define CFRV_RN 0x000000f0 /* Revision Number */
+#define CFRV_SN 0x0000000f /* Step Number */
+#define BASE_CLASS 0x02000000 /* Indicates Network Controller */
+#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */
+#define STEP_NUMBER 0x00000020 /* Increments for future chips */
+#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */
+#define CFRV_MASK 0xffff0000 /* Register mask */
+
+/*
+** PCI Configuration Latency Timer Register (PCI_CFLT)
+*/
+#define CFLT_BC 0x0000ff00 /* Latency Timer bits */
+
+/*
+** PCI Configuration Base I/O Address Register (PCI_CBIO)
+*/
+#define CBIO_MASK -128 /* Base I/O Address Mask */
+#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */
+
+/*
+** PCI Configuration Card Information Structure Register (PCI_CCIS)
+*/
+#define CCIS_ROMI 0xf0000000 /* ROM Image */
+#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */
+#define CCIS_ASI 0x00000007 /* Address Space Indicator */
+
+/*
+** PCI Configuration Subsystem ID Register (PCI_SSID)
+*/
+#define SSID_SSID 0xffff0000 /* Subsystem ID */
+#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */
+
+/*
+** PCI Configuration Expansion ROM Base Address Register (PCI_CBER)
+*/
+#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */
+#define CBER_ROME 0x00000001 /* ROM Enable */
+
+/*
+** PCI Configuration Interrupt Register (PCI_CFIT)
+*/
+#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */
+#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */
+#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */
+#define CFIT_IRQL 0x000000ff /* Interrupt Line */
+
+/*
+** PCI Configuration Power Management Area Register (PCI_CFPM)
+*/
+#define SLEEP 0x80 /* Power Saving Sleep Mode */
+#define SNOOZE 0x40 /* Power Saving Snooze Mode */
+#define WAKEUP 0x00 /* Power Saving Wakeup */
+
+#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */
+#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */
+
+/*
+** DC21040 Bus Mode Register (DE4X5_BMR)
+*/
+#define BMR_RML 0x00200000 /* [Memory] Read Multiple */
+#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */
+#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */
+#define BMR_DAS 0x00010000 /* Diagnostic Address Space */
+#define BMR_CAL 0x0000c000 /* Cache Alignment */
+#define BMR_PBL 0x00003f00 /* Programmable Burst Length */
+#define BMR_BLE 0x00000080 /* Big/Little Endian */
+#define BMR_DSL 0x0000007c /* Descriptor Skip Length */
+#define BMR_BAR 0x00000002 /* Bus ARbitration */
+#define BMR_SWR 0x00000001 /* Software Reset */
+
+ /* Timings here are for 10BASE-T/AUI only*/
+#define TAP_NOPOLL 0x00000000 /* No automatic polling */
+#define TAP_200US 0x00020000 /* TX automatic polling every 200us */
+#define TAP_800US 0x00040000 /* TX automatic polling every 800us */
+#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */
+#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */
+#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */
+#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */
+#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */
+
+#define CAL_NOUSE 0x00000000 /* Not used */
+#define CAL_8LONG 0x00004000 /* 8-longword alignment */
+#define CAL_16LONG 0x00008000 /* 16-longword alignment */
+#define CAL_32LONG 0x0000c000 /* 32-longword alignment */
+
+#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */
+#define PBL_1 0x00000100 /* 1 longword DMA burst length */
+#define PBL_2 0x00000200 /* 2 longwords DMA burst length */
+#define PBL_4 0x00000400 /* 4 longwords DMA burst length */
+#define PBL_8 0x00000800 /* 8 longwords DMA burst length */
+#define PBL_16 0x00001000 /* 16 longwords DMA burst length */
+#define PBL_32 0x00002000 /* 32 longwords DMA burst length */
+
+#define DSL_0 0x00000000 /* 0 longword / descriptor */
+#define DSL_1 0x00000004 /* 1 longword / descriptor */
+#define DSL_2 0x00000008 /* 2 longwords / descriptor */
+#define DSL_4 0x00000010 /* 4 longwords / descriptor */
+#define DSL_8 0x00000020 /* 8 longwords / descriptor */
+#define DSL_16 0x00000040 /* 16 longwords / descriptor */
+#define DSL_32 0x00000080 /* 32 longwords / descriptor */
+
+/*
+** DC21040 Transmit Poll Demand Register (DE4X5_TPD)
+*/
+#define TPD 0x00000001 /* Transmit Poll Demand */
+
+/*
+** DC21040 Receive Poll Demand Register (DE4X5_RPD)
+*/
+#define RPD 0x00000001 /* Receive Poll Demand */
+
+/*
+** DC21040 Receive Ring Base Address Register (DE4X5_RRBA)
+*/
+#define RRBA 0xfffffffc /* RX Descriptor List Start Address */
+
+/*
+** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA)
+*/
+#define TRBA 0xfffffffc /* TX Descriptor List Start Address */
+
+/*
+** Status Register (DE4X5_STS)
+*/
+#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */
+#define STS_BE 0x03800000 /* Bus Error Bits */
+#define STS_TS 0x00700000 /* Transmit Process State */
+#define STS_RS 0x000e0000 /* Receive Process State */
+#define STS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define STS_ER 0x00004000 /* Early Receive */
+#define STS_FBE 0x00002000 /* Fatal Bus Error */
+#define STS_SE 0x00002000 /* System Error */
+#define STS_LNF 0x00001000 /* Link Fail */
+#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
+#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
+#define STS_ETI 0x00000400 /* Early Transmit Interupt */
+#define STS_AT 0x00000400 /* AUI/TP Pin */
+#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
+#define STS_RPS 0x00000100 /* Receive Process Stopped */
+#define STS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define STS_RI 0x00000040 /* Receive Interrupt */
+#define STS_UNF 0x00000020 /* Transmit Underflow */
+#define STS_LNP 0x00000010 /* Link Pass */
+#define STS_ANC 0x00000010 /* Autonegotiation Complete */
+#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */
+#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define STS_TPS 0x00000002 /* Transmit Process Stopped */
+#define STS_TI 0x00000001 /* Transmit Interrupt */
+
+#define EB_PAR 0x00000000 /* Parity Error */
+#define EB_MA 0x00800000 /* Master Abort */
+#define EB_TA 0x01000000 /* Target Abort */
+#define EB_RES0 0x01800000 /* Reserved */
+#define EB_RES1 0x02000000 /* Reserved */
+
+#define TS_STOP 0x00000000 /* Stopped */
+#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */
+#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */
+#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */
+#define TS_RES 0x00400000 /* Reserved */
+#define TS_SPKT 0x00500000 /* Setup Packet */
+#define TS_SUSP 0x00600000 /* Suspended */
+#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */
+
+#define RS_STOP 0x00000000 /* Stopped */
+#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */
+#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */
+#define RS_WFRP 0x00060000 /* Wait for Receive Packet */
+#define RS_SUSP 0x00080000 /* Suspended */
+#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */
+#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */
+#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */
+
+#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */
+
+/*
+** Operation Mode Register (DE4X5_OMR)
+*/
+#define OMR_SC 0x80000000 /* Special Capture Effect Enable */
+#define OMR_RA 0x40000000 /* Receive All */
+#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */
+#define OMR_SCR 0x01000000 /* Scrambler Mode */
+#define OMR_PCS 0x00800000 /* PCS Function */
+#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */
+#define OMR_SF 0x00200000 /* Store and Forward */
+#define OMR_HBD 0x00080000 /* HeartBeat Disable */
+#define OMR_PS 0x00040000 /* Port Select */
+#define OMR_CA 0x00020000 /* Capture Effect Enable */
+#define OMR_BP 0x00010000 /* Back Pressure */
+#define OMR_TR 0x0000c000 /* Threshold Control Bits */
+#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */
+#define OMR_FC 0x00001000 /* Force Collision Mode */
+#define OMR_OM 0x00000c00 /* Operating Mode */
+#define OMR_FDX 0x00000200 /* Full Duplex Mode */
+#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */
+#define OMR_PM 0x00000080 /* Pass All Multicast */
+#define OMR_PR 0x00000040 /* Promiscuous Mode */
+#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */
+#define OMR_IF 0x00000010 /* Inverse Filtering */
+#define OMR_PB 0x00000008 /* Pass Bad Frames */
+#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */
+#define OMR_SR 0x00000002 /* Start/Stop Receive */
+#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */
+
+#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */
+#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */
+#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */
+#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */
+
+#define OMR_DEF (OMR_SDP)
+#define OMR_SIA (OMR_SDP | OMR_TTM)
+#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS)
+#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS)
+#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS)
+
+/*
+** DC21040 Interrupt Mask Register (DE4X5_IMR)
+*/
+#define IMR_GPM 0x04000000 /* General Purpose Port Mask */
+#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */
+#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */
+#define IMR_ERM 0x00004000 /* Early Receive Mask */
+#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */
+#define IMR_SEM 0x00002000 /* System Error Mask */
+#define IMR_LFM 0x00001000 /* Link Fail Mask */
+#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */
+#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */
+#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */
+#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */
+#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */
+#define IMR_RSM 0x00000100 /* Receive Stopped Mask */
+#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */
+#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */
+#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */
+#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */
+#define IMR_LPM 0x00000010 /* Link Pass */
+#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */
+#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */
+#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */
+#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */
+
+/*
+** Missed Frames and FIFO Overflow Counters (DE4X5_MFC)
+*/
+#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */
+#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */
+#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */
+#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */
+#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */
+
+/*
+** DC21040 Ethernet Address PROM (DE4X5_APROM)
+*/
+#define APROM_DN 0x80000000 /* Data Not Valid */
+#define APROM_DT 0x000000ff /* Address Byte */
+
+/*
+** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM)
+*/
+#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
+#define BROM_RD 0x00004000 /* Read from Boot ROM */
+#define BROM_WR 0x00002000 /* Write to Boot ROM */
+#define BROM_BR 0x00001000 /* Select Boot ROM when set */
+#define BROM_SR 0x00000800 /* Select Serial ROM when set */
+#define BROM_REG 0x00000400 /* External Register Select */
+#define BROM_DT 0x000000ff /* Data Byte */
+
+/*
+** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII)
+*/
+#define MII_MDI 0x00080000 /* MII Management Data In */
+#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */
+#define MII_MRD 0x00040000 /* MII Management Define Read Mode */
+#define MII_MWR 0x00000000 /* MII Management Define Write Mode */
+#define MII_MDT 0x00020000 /* MII Management Data Out */
+#define MII_MDC 0x00010000 /* MII Management Clock */
+#define MII_RD 0x00004000 /* Read from MII */
+#define MII_WR 0x00002000 /* Write to MII */
+#define MII_SEL 0x00000800 /* Select MII when RESET */
+
+#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
+#define SROM_RD 0x00004000 /* Read from Boot ROM */
+#define SROM_WR 0x00002000 /* Write to Boot ROM */
+#define SROM_BR 0x00001000 /* Select Boot ROM when set */
+#define SROM_SR 0x00000800 /* Select Serial ROM when set */
+#define SROM_REG 0x00000400 /* External Register Select */
+#define SROM_DT 0x000000ff /* Data Byte */
+
+#define DT_OUT 0x00000008 /* Serial Data Out */
+#define DT_IN 0x00000004 /* Serial Data In */
+#define DT_CLK 0x00000002 /* Serial ROM Clock */
+#define DT_CS 0x00000001 /* Serial ROM Chip Select */
+
+#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */
+#define MII_TEST 0xaaaaaaaa /* MII Test Signal */
+#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */
+#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */
+
+#define MII_CR 0x00 /* MII Management Control Register */
+#define MII_SR 0x01 /* MII Management Status Register */
+#define MII_ID0 0x02 /* PHY Identifier Register 0 */
+#define MII_ID1 0x03 /* PHY Identifier Register 1 */
+#define MII_ANA 0x04 /* Auto Negotiation Advertisement */
+#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */
+#define MII_ANE 0x06 /* Auto Negotiation Expansion */
+#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */
+
+#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */
+
+/*
+** MII Management Control Register
+*/
+#define MII_CR_RST 0x8000 /* RESET the PHY chip */
+#define MII_CR_LPBK 0x4000 /* Loopback enable */
+#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */
+#define MII_CR_10 0x0000 /* Set 10Mb/s */
+#define MII_CR_100 0x2000 /* Set 100Mb/s */
+#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */
+#define MII_CR_PD 0x0800 /* Power Down */
+#define MII_CR_ISOL 0x0400 /* Isolate Mode */
+#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */
+#define MII_CR_FDM 0x0100 /* Full Duplex Mode */
+#define MII_CR_CTE 0x0080 /* Collision Test Enable */
+
+/*
+** MII Management Status Register
+*/
+#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */
+#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */
+#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */
+#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */
+#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */
+#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/
+#define MII_SR_RFD 0x0010 /* Remote Fault Detected */
+#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */
+#define MII_SR_LKS 0x0004 /* Link Status */
+#define MII_SR_JABD 0x0002 /* Jabber Detect */
+#define MII_SR_XC 0x0001 /* Extended Capabilities */
+
+/*
+** MII Management Auto Negotiation Advertisement Register
+*/
+#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */
+#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */
+#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */
+#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
+#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
+#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */
+#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */
+#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */
+
+/*
+** MII Management Auto Negotiation Remote End Register
+*/
+#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */
+#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */
+#define MII_ANLPA_RF 0x2000 /* Remote Fault */
+#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */
+#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */
+#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */
+#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
+#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
+#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */
+#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */
+#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */
+
+/*
+** SROM Media Definitions (ABG SROM Section)
+*/
+#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */
+#define MEDIA_MII 0x0040 /* MII Present on the adapter */
+#define MEDIA_FIBRE 0x0008 /* Fibre Media present */
+#define MEDIA_AUI 0x0004 /* AUI Media present */
+#define MEDIA_TP 0x0002 /* TP Media present */
+#define MEDIA_BNC 0x0001 /* BNC Media present */
+
+/*
+** SROM Definitions (Digital Semiconductor Format)
+*/
+#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */
+#define SROM_SSID 0x0002 /* Sub-system ID offset */
+#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */
+#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */
+#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/
+#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */
+#define SROM_SFV 0x0012 /* SROM Format Version offset */
+#define SROM_CCNT 0x0013 /* Controller Count offset */
+#define SROM_HWADD 0x0014 /* Hardware Address offset */
+#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/
+#define SROM_CRC 0x007e /* SROM CRC offset */
+
+/*
+** SROM Media Connection Definitions
+*/
+#define SROM_10BT 0x0000 /* 10BASE-T half duplex */
+#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */
+#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */
+#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */
+#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */
+#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */
+#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */
+#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */
+#define SROM_100BT4 0x0006 /* 100BASE-T4 */
+#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */
+#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */
+#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */
+#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */
+#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */
+#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */
+#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */
+#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */
+#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */
+#define SROM_PAO 0x8800 /* Powerup Autosense Only */
+#define SROM_NSMI 0xffff /* No Selected Media Information */
+
+/*
+** SROM Media Definitions
+*/
+#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */
+#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */
+#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */
+#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */
+#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */
+#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */
+#define SROM_100BASET4 0x0006 /* 100BASE-T4 */
+#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */
+#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */
+
+#define BLOCK_LEN 0x7f /* Extended blocks length mask */
+#define EXT_FIELD 0x40 /* Extended blocks extension field bit */
+#define MEDIA_CODE 0x3f /* Extended blocks media code mask */
+
+/*
+** SROM Compact Format Block Masks
+*/
+#define COMPACT_FI 0x80 /* Format Indicator */
+#define COMPACT_LEN 0x04 /* Length */
+#define COMPACT_MC 0x3f /* Media Code */
+
+/*
+** SROM Extended Format Block Type 0 Masks
+*/
+#define BLOCK0_FI 0x80 /* Format Indicator */
+#define BLOCK0_MCS 0x80 /* Media Code byte Sign */
+#define BLOCK0_MC 0x3f /* Media Code */
+
+/*
+** DC21040 Full Duplex Register (DE4X5_FDR)
+*/
+#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */
+
+/*
+** DC21041 General Purpose Timer Register (DE4X5_GPT)
+*/
+#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */
+#define GPT_VAL 0x0000ffff /* Timer Value */
+
+/*
+** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits)
+*/
+/* Valid ONLY for DE500 hardware */
+#define GEP_LNP 0x00000080 /* Link Pass (input) */
+#define GEP_SLNK 0x00000040 /* SYM LINK (input) */
+#define GEP_SDET 0x00000020 /* Signal Detect (input) */
+#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */
+#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */
+#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */
+#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */
+#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */
+#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */
+#define GEP_CTRL 0x00000100 /* GEP control bit */
+
+/*
+** SIA Register Defaults
+*/
+#define CSR13 0x00000001
+#define CSR14 0x0003ff7f /* Autonegotiation disabled */
+#define CSR15 0x00000008
+
+/*
+** SIA Status Register (DE4X5_SISR)
+*/
+#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */
+#define SISR_LPN 0x00008000 /* Link Partner Negotiable */
+#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */
+#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */
+#define SISR_TRF 0x00000800 /* Transmit Remote Fault */
+#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */
+#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/
+#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */
+#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */
+#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */
+#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */
+#define SISR_DAO 0x00000080 /* PLL All One */
+#define SISR_DAZ 0x00000040 /* PLL All Zero */
+#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */
+#define SISR_DSD 0x00000010 /* PLL Self-Test Done */
+#define SISR_APS 0x00000008 /* Auto Polarity State */
+#define SISR_LKF 0x00000004 /* Link Fail Status */
+#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */
+#define SISR_NCR 0x00000002 /* Network Connection Error */
+#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */
+#define SISR_PAUI 0x00000001 /* AUI_TP Indication */
+#define SISR_MRA 0x00000001 /* MII Receive Port Activity */
+
+#define ANS_NDIS 0x00000000 /* Nway disable */
+#define ANS_TDIS 0x00001000 /* Transmit Disable */
+#define ANS_ADET 0x00002000 /* Ability Detect */
+#define ANS_ACK 0x00003000 /* Acknowledge */
+#define ANS_CACK 0x00004000 /* Complete Acknowledge */
+#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */
+#define ANS_LCHK 0x00006000 /* Link Check */
+
+#define SISR_RST 0x00000301 /* CSR12 reset */
+#define SISR_ANR 0x00001301 /* Autonegotiation restart */
+
+/*
+** SIA Connectivity Register (DE4X5_SICR)
+*/
+#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */
+#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */
+#define SICR_OE24 0x00004000 /* Output Enable 2 4 */
+#define SICR_OE13 0x00002000 /* Output Enable 1 3 */
+#define SICR_IE 0x00001000 /* Input Enable */
+#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */
+#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */
+#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/
+#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/
+#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */
+#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */
+#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/
+#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */
+#define SICR_ASE 0x00000080 /* APLL Start Enable*/
+#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */
+#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */
+#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */
+#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */
+#define SICR_CAC 0x00000004 /* CSR Auto Configuration */
+#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */
+#define SICR_SRL 0x00000001 /* SIA Reset */
+#define SIA_RESET 0x00000000 /* SIA Reset Value */
+
+/*
+** SIA Transmit and Receive Register (DE4X5_STRR)
+*/
+#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */
+#define STRR_SPP 0x00004000 /* Set Polarity Plus */
+#define STRR_APE 0x00002000 /* Auto Polarity Enable */
+#define STRR_LTE 0x00001000 /* Link Test Enable */
+#define STRR_SQE 0x00000800 /* Signal Quality Enable */
+#define STRR_CLD 0x00000400 /* Collision Detect Enable */
+#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */
+#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */
+#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */
+#define STRR_HDE 0x00000040 /* Half Duplex Enable */
+#define STRR_CPEN 0x00000030 /* Compensation Enable */
+#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */
+#define STRR_DREN 0x00000004 /* Driver Enable */
+#define STRR_LBK 0x00000002 /* Loopback Enable */
+#define STRR_ECEN 0x00000001 /* Encoder Enable */
+#define STRR_RESET 0xffffffff /* Reset value for STRR */
+
+/*
+** SIA General Register (DE4X5_SIGR)
+*/
+#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */
+#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */
+#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */
+#define SIGR_CWE 0x08000000 /* Control Write Enable */
+#define SIGR_RME 0x04000000 /* Receive Match Enable */
+#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */
+#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */
+#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */
+#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */
+#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */
+#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */
+#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */
+#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */
+#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */
+#define SIGR_FRL 0x00002000 /* Force Receiver Low */
+#define SIGR_DPST 0x00001000 /* PLL Self Test Start */
+#define SIGR_LSD 0x00000800 /* LED Stretch Disable */
+#define SIGR_FLF 0x00000400 /* Force Link Fail */
+#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */
+#define SIGR_TSCK 0x00000100 /* Test Clock */
+#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */
+#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */
+#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */
+#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */
+#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */
+#define SIGR_JCK 0x00000004 /* Jabber Clock */
+#define SIGR_HUJ 0x00000002 /* Host Unjab */
+#define SIGR_JBD 0x00000001 /* Jabber Disable */
+#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */
+
+/*
+** Receive Descriptor Bit Summary
+*/
+#define R_OWN 0x80000000 /* Own Bit */
+#define RD_FF 0x40000000 /* Filtering Fail */
+#define RD_FL 0x3fff0000 /* Frame Length */
+#define RD_ES 0x00008000 /* Error Summary */
+#define RD_LE 0x00004000 /* Length Error */
+#define RD_DT 0x00003000 /* Data Type */
+#define RD_RF 0x00000800 /* Runt Frame */
+#define RD_MF 0x00000400 /* Multicast Frame */
+#define RD_FS 0x00000200 /* First Descriptor */
+#define RD_LS 0x00000100 /* Last Descriptor */
+#define RD_TL 0x00000080 /* Frame Too Long */
+#define RD_CS 0x00000040 /* Collision Seen */
+#define RD_FT 0x00000020 /* Frame Type */
+#define RD_RJ 0x00000010 /* Receive Watchdog */
+#define RD_RE 0x00000008 /* Report on MII Error */
+#define RD_DB 0x00000004 /* Dribbling Bit */
+#define RD_CE 0x00000002 /* CRC Error */
+#define RD_OF 0x00000001 /* Overflow */
+
+#define RD_RER 0x02000000 /* Receive End Of Ring */
+#define RD_RCH 0x01000000 /* Second Address Chained */
+#define RD_RBS2 0x003ff800 /* Buffer 2 Size */
+#define RD_RBS1 0x000007ff /* Buffer 1 Size */
+
+/*
+** Transmit Descriptor Bit Summary
+*/
+#define T_OWN 0x80000000 /* Own Bit */
+#define TD_ES 0x00008000 /* Error Summary */
+#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */
+#define TD_LO 0x00000800 /* Loss Of Carrier */
+#define TD_NC 0x00000400 /* No Carrier */
+#define TD_LC 0x00000200 /* Late Collision */
+#define TD_EC 0x00000100 /* Excessive Collisions */
+#define TD_HF 0x00000080 /* Heartbeat Fail */
+#define TD_CC 0x00000078 /* Collision Counter */
+#define TD_LF 0x00000004 /* Link Fail */
+#define TD_UF 0x00000002 /* Underflow Error */
+#define TD_DE 0x00000001 /* Deferred */
+
+#define TD_IC 0x80000000 /* Interrupt On Completion */
+#define TD_LS 0x40000000 /* Last Segment */
+#define TD_FS 0x20000000 /* First Segment */
+#define TD_FT1 0x10000000 /* Filtering Type */
+#define TD_SET 0x08000000 /* Setup Packet */
+#define TD_AC 0x04000000 /* Add CRC Disable */
+#define TD_TER 0x02000000 /* Transmit End Of Ring */
+#define TD_TCH 0x01000000 /* Second Address Chained */
+#define TD_DPD 0x00800000 /* Disabled Padding */
+#define TD_FT0 0x00400000 /* Filtering Type */
+#define TD_TBS2 0x003ff800 /* Buffer 2 Size */
+#define TD_TBS1 0x000007ff /* Buffer 1 Size */
+
+#define PERFECT_F 0x00000000
+#define HASH_F TD_FT0
+#define INVERSE_F TD_FT1
+#define HASH_O_F (TD_FT1 | TD_F0)
+
+/*
+** Media / mode state machine definitions
+** User selectable:
+*/
+#define TP 0x0001 /* 10Base-T */
+#define TP_NW 0x0002 /* 10Base-T with Nway */
+#define BNC 0x0004 /* Thinwire */
+#define AUI 0x0008 /* Thickwire */
+#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */
+#define _10Mb 0x0040 /* 10Mb/s Ethernet */
+#define _100Mb 0x0080 /* 100Mb/s Ethernet */
+#define AUTO 0x4000 /* Auto sense the media or speed */
+
+/*
+** Internal states
+*/
+#define NC 0x0000 /* No Connection */
+#define ANS 0x0020 /* Intermediate AutoNegotiation State */
+#define SPD_DET 0x0100 /* Parallel speed detection */
+#define INIT 0x0200 /* Initial state */
+#define EXT_SIA 0x0400 /* External SIA for motherboard chip */
+#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */
+#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */
+#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */
+#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */
+#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */
+#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */
+#define MII 0x1000 /* MII on the 21143 */
+
+#define TIMER_CB 0x80000000 /* Timer callback detection */
+
+/*
+** DE4X5 DEBUG Options
+*/
+#define DEBUG_NONE 0x0000 /* No DEBUG messages */
+#define DEBUG_VERSION 0x0001 /* Print version message */
+#define DEBUG_MEDIA 0x0002 /* Print media messages */
+#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */
+#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */
+#define DEBUG_SROM 0x0010 /* Print SROM messages */
+#define DEBUG_MII 0x0020 /* Print MII messages */
+#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */
+#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */
+#define DEBUG_PCICFG 0x0100
+#define DEBUG_ALL 0x01ff
+
+/*
+** Miscellaneous
+*/
+#define PCI 0
+#define EISA 1
+
+#define HASH_TABLE_LEN 512 /* Bits */
+#define HASH_BITS 0x01ff /* 9 LS bits */
+
+#define SETUP_FRAME_LEN 192 /* Bytes */
+#define IMPERF_PA_OFFSET 156 /* Bytes */
+
+#define POLL_DEMAND 1
+
+#define LOST_MEDIA_THRESHOLD 3
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define DE4X5_STRLEN 8
+
+#define DE4X5_INIT 0 /* Initialisation time */
+#define DE4X5_RUN 1 /* Run time */
+
+#define DE4X5_SAVE_STATE 0
+#define DE4X5_RESTORE_STATE 1
+
+/*
+** Address Filtering Modes
+*/
+#define PERFECT 0 /* 16 perfect physical addresses */
+#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */
+#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */
+#define ALL_HASH 3 /* Hashes all physical & multicast addrs */
+
+#define ALL 0 /* Clear out all the setup frame */
+#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
+
+/*
+** Booleans
+*/
+#define NO 0
+#define FALSE 0
+
+#define YES ~0
+#define TRUE ~0
+
+/*
+** Adapter state
+*/
+#define INITIALISED 0 /* After h/w initialised and mem alloc'd */
+#define CLOSED 1 /* Ready for opening */
+#define OPEN 2 /* Running */
+
+/*
+** Various wait times
+*/
+#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */
+#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */
+
+/*
+** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since
+** the vendors seem split 50-50 on how to calculate the OUI register values
+** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()].
+*/
+#define NATIONAL_TX 0x2000
+#define BROADCOM_T4 0x03e0
+#define SEEQ_T4 0x0016
+#define CYPRESS_T4 0x0014
+
+/*
+** Speed Selection stuff
+*/
+#define SET_10Mb {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
+ if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
+ mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\
+ outl(omr, DE4X5_OMR);\
+ if (!lp->useSROM) lp->cache.gep = 0;\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\
+ lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+#define SET_100Mb {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ int fdx=0;\
+ if (lp->phy[lp->active].id == NATIONAL_TX) {\
+ mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\
+ 0x18, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
+ sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\
+ if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\
+ if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
+ mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ if (fdx) omr |= OMR_FDX;\
+ outl(omr, DE4X5_OMR);\
+ if (!lp->useSROM) lp->cache.gep = 0;\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | lp->infoblock_csr6, DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\
+ lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+/* FIX ME so I don't jam 10Mb networks */
+#define SET_100Mb_PDET {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr, DE4X5_OMR);\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr, DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\
+ lp->cache.gep = (GEP_FDXD | GEP_MODE);\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define DE4X5IOCTL SIOCDEVPRIVATE
+
+struct de4x5_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
+#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
+#define DE4X5_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define DE4X5_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
+#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
+#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */
+#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */
+#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */
+#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */
+#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
+#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
diff --git a/linux/src/drivers/net/de600.c b/linux/src/drivers/net/de600.c
new file mode 100644
index 0000000..ce96942
--- /dev/null
+++ b/linux/src/drivers/net/de600.c
@@ -0,0 +1,853 @@
+static const char *version =
+ "de600.c: $Revision: 1.1 $, Bjorn Ekwall (bj0rn@blox.se)\n";
+/*
+ * de600.c
+ *
+ * Linux driver for the D-Link DE-600 Ethernet pocket adapter.
+ *
+ * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall
+ * The Author may be reached as bj0rn@blox.se
+ *
+ * Based on adapter information gathered from DE600.ASM by D-Link Inc.,
+ * as included on disk C in the v.2.11 of PC/TCP from FTP Software.
+ * For DE600.asm:
+ * Portions (C) Copyright 1990 D-Link, Inc.
+ * Copyright, 1988-1992, Russell Nelson, Crynwr Software
+ *
+ * Adapted to the sample network driver core for linux,
+ * written by: Donald Becker <becker@super.org>
+ * C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+ *
+ * compile-command:
+ * "gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer \
+ * -m486 -c de600.c
+ *
+ **************************************************************/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ **************************************************************/
+/* Add another "; SLOW_DOWN_IO" here if your adapter won't work OK: */
+#define DE600_SLOW_DOWN SLOW_DOWN_IO; SLOW_DOWN_IO; SLOW_DOWN_IO
+
+ /*
+ * If you still have trouble reading/writing to the adapter,
+ * modify the following "#define": (see <asm/io.h> for more info)
+#define REALLY_SLOW_IO
+ */
+#define SLOW_IO_BY_JUMPING /* Looks "better" than dummy write to port 0x80 :-) */
+
+/*
+ * If you want to enable automatic continuous checking for the DE600,
+ * keep this #define enabled.
+ * It doesn't cost much per packet, so I think it is worth it!
+ * If you disagree, comment away the #define, and live with it...
+ *
+ */
+#define CHECK_LOST_DE600
+
+/*
+ * Enable this #define if you want the adapter to do a "ifconfig down" on
+ * itself when we have detected that something is possibly wrong with it.
+ * The default behaviour is to retry with "adapter_init()" until success.
+ * This should be used for debugging purposes only.
+ * (Depends on the CHECK_LOST_DE600 above)
+ *
+ */
+#define SHUTDOWN_WHEN_LOST
+
+/*
+ * See comment at "de600_rspace()"!
+ * This is an *ugly* hack, but for now it achieves its goal of
+ * faking a TCP flow-control that will not flood the poor DE600.
+ *
+ * Tricks TCP to announce a small max window (max 2 fast packets please :-)
+ *
+ * Comment away at your own risk!
+ *
+ * Update: Use the more general per-device maxwindow parameter instead.
+ */
+#undef FAKE_SMALL_MAX
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifdef DE600_DEBUG
+#define PRINTK(x) if (de600_debug >= 2) printk x
+#else
+#define DE600_DEBUG 0
+#define PRINTK(x) /**/
+#endif
+unsigned int de600_debug = DE600_DEBUG;
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <linux/in.h>
+#include <linux/ptrace.h>
+#include <asm/system.h>
+#include <linux/errno.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#ifdef FAKE_SMALL_MAX
+static unsigned long de600_rspace(struct sock *sk);
+#include <net/sock.h>
+#endif
+
+#define netstats enet_statistics
+typedef unsigned char byte;
+
+/**************************************************
+ * *
+ * Definition of D-Link Ethernet Pocket adapter *
+ * *
+ **************************************************/
+/*
+ * D-Link Ethernet pocket adapter ports
+ */
+/*
+ * OK, so I'm cheating, but there are an awful lot of
+ * reads and writes in order to get anything in and out
+ * of the DE-600 with 4 bits at a time in the parallel port,
+ * so every saved instruction really helps :-)
+ *
+ * That is, I don't care what the device struct says
+ * but hope that Space.c will keep the rest of the drivers happy.
+ */
+#ifndef DE600_IO
+#define DE600_IO 0x378
+#endif
+
+#define DATA_PORT (DE600_IO)
+#define STATUS_PORT (DE600_IO + 1)
+#define COMMAND_PORT (DE600_IO + 2)
+
+#ifndef DE600_IRQ
+#define DE600_IRQ 7
+#endif
+/*
+ * It really should look like this, and autoprobing as well...
+ *
+#define DATA_PORT (dev->base_addr + 0)
+#define STATUS_PORT (dev->base_addr + 1)
+#define COMMAND_PORT (dev->base_addr + 2)
+#define DE600_IRQ dev->irq
+ */
+
+/*
+ * D-Link COMMAND_PORT commands
+ */
+#define SELECT_NIC 0x04 /* select Network Interface Card */
+#define SELECT_PRN 0x1c /* select Printer */
+#define NML_PRN 0xec /* normal Printer situation */
+#define IRQEN 0x10 /* enable IRQ line */
+
+/*
+ * D-Link STATUS_PORT
+ */
+#define RX_BUSY 0x80
+#define RX_GOOD 0x40
+#define TX_FAILED16 0x10
+#define TX_BUSY 0x08
+
+/*
+ * D-Link DATA_PORT commands
+ * command in low 4 bits
+ * data in high 4 bits
+ * select current data nibble with HI_NIBBLE bit
+ */
+#define WRITE_DATA 0x00 /* write memory */
+#define READ_DATA 0x01 /* read memory */
+#define STATUS 0x02 /* read status register */
+#define COMMAND 0x03 /* write command register (see COMMAND below) */
+#define NULL_COMMAND 0x04 /* null command */
+#define RX_LEN 0x05 /* read received packet length */
+#define TX_ADDR 0x06 /* set adapter transmit memory address */
+#define RW_ADDR 0x07 /* set adapter read/write memory address */
+#define HI_NIBBLE 0x08 /* read/write the high nibble of data,
+ or-ed with rest of command */
+
+/*
+ * command register, accessed through DATA_PORT with low bits = COMMAND
+ */
+#define RX_ALL 0x01 /* PROMISCUOUS */
+#define RX_BP 0x02 /* default: BROADCAST & PHYSICAL ADDRESS */
+#define RX_MBP 0x03 /* MULTICAST, BROADCAST & PHYSICAL ADDRESS */
+
+#define TX_ENABLE 0x04 /* bit 2 */
+#define RX_ENABLE 0x08 /* bit 3 */
+
+#define RESET 0x80 /* set bit 7 high */
+#define STOP_RESET 0x00 /* set bit 7 low */
+
+/*
+ * data to command register
+ * (high 4 bits in write to DATA_PORT)
+ */
+#define RX_PAGE2_SELECT 0x10 /* bit 4, only 2 pages to select */
+#define RX_BASE_PAGE 0x20 /* bit 5, always set when specifying RX_ADDR */
+#define FLIP_IRQ 0x40 /* bit 6 */
+
+/*
+ * D-Link adapter internal memory:
+ *
+ * 0-2K 1:st transmit page (send from pointer up to 2K)
+ * 2-4K 2:nd transmit page (send from pointer up to 4K)
+ *
+ * 4-6K 1:st receive page (data from 4K upwards)
+ * 6-8K 2:nd receive page (data from 6K upwards)
+ *
+ * 8K+ Adapter ROM (contains magic code and last 3 bytes of Ethernet address)
+ */
+#define MEM_2K 0x0800 /* 2048 */
+#define MEM_4K 0x1000 /* 4096 */
+#define MEM_6K 0x1800 /* 6144 */
+#define NODE_ADDRESS 0x2000 /* 8192 */
+
+#define RUNT 60 /* Too small Ethernet packet */
+
+/**************************************************
+ * *
+ * End of definition *
+ * *
+ **************************************************/
+
+/*
+ * Index to functions, as function prototypes.
+ */
+/* Routines used internally. (See "convenience macros") */
+static byte de600_read_status(struct device *dev);
+static byte de600_read_byte(unsigned char type, struct device *dev);
+
+/* Put in the device structure. */
+static int de600_open(struct device *dev);
+static int de600_close(struct device *dev);
+static struct netstats *get_stats(struct device *dev);
+static int de600_start_xmit(struct sk_buff *skb, struct device *dev);
+
+/* Dispatch from interrupts. */
+static void de600_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int de600_tx_intr(struct device *dev, int irq_status);
+static void de600_rx_intr(struct device *dev);
+
+/* Initialization */
+static void trigger_interrupt(struct device *dev);
+int de600_probe(struct device *dev);
+static int adapter_init(struct device *dev);
+
+/*
+ * D-Link driver variables:
+ */
+static volatile int rx_page = 0;
+
+#define TX_PAGES 2
+static volatile int tx_fifo[TX_PAGES];
+static volatile int tx_fifo_in = 0;
+static volatile int tx_fifo_out = 0;
+static volatile int free_tx_pages = TX_PAGES;
+static int was_down = 0;
+
+/*
+ * Convenience macros/functions for D-Link adapter
+ */
+
+#define select_prn() outb_p(SELECT_PRN, COMMAND_PORT); DE600_SLOW_DOWN
+#define select_nic() outb_p(SELECT_NIC, COMMAND_PORT); DE600_SLOW_DOWN
+
+/* Thanks for hints from Mark Burton <markb@ordern.demon.co.uk> */
+#define de600_put_byte(data) ( \
+ outb_p(((data) << 4) | WRITE_DATA , DATA_PORT), \
+ outb_p(((data) & 0xf0) | WRITE_DATA | HI_NIBBLE, DATA_PORT))
+
+/*
+ * The first two outb_p()'s below could perhaps be deleted if there
+ * would be more delay in the last two. Not certain about it yet...
+ */
+#define de600_put_command(cmd) ( \
+ outb_p(( rx_page << 4) | COMMAND , DATA_PORT), \
+ outb_p(( rx_page & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT), \
+ outb_p(((rx_page | cmd) << 4) | COMMAND , DATA_PORT), \
+ outb_p(((rx_page | cmd) & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT))
+
+#define de600_setup_address(addr,type) ( \
+ outb_p((((addr) << 4) & 0xf0) | type , DATA_PORT), \
+ outb_p(( (addr) & 0xf0) | type | HI_NIBBLE, DATA_PORT), \
+ outb_p((((addr) >> 4) & 0xf0) | type , DATA_PORT), \
+ outb_p((((addr) >> 8) & 0xf0) | type | HI_NIBBLE, DATA_PORT))
+
+#define rx_page_adr() ((rx_page & RX_PAGE2_SELECT)?(MEM_6K):(MEM_4K))
+
+/* Flip bit, only 2 pages */
+#define next_rx_page() (rx_page ^= RX_PAGE2_SELECT)
+
+#define tx_page_adr(a) (((a) + 1) * MEM_2K)
+
+static inline byte
+de600_read_status(struct device *dev)
+{
+ byte status;
+
+ outb_p(STATUS, DATA_PORT);
+ status = inb(STATUS_PORT);
+ outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT);
+
+ return status;
+}
+
+static inline byte
+de600_read_byte(unsigned char type, struct device *dev) { /* dev used by macros */
+ byte lo;
+
+ (void)outb_p((type), DATA_PORT);
+ lo = ((unsigned char)inb(STATUS_PORT)) >> 4;
+ (void)outb_p((type) | HI_NIBBLE, DATA_PORT);
+ return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1).
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ */
+static int
+de600_open(struct device *dev)
+{
+ if (request_irq(DE600_IRQ, de600_interrupt, 0, "de600", NULL)) {
+ printk ("%s: unable to get IRQ %d\n", dev->name, DE600_IRQ);
+ return 1;
+ }
+ irq2dev_map[DE600_IRQ] = dev;
+
+ MOD_INC_USE_COUNT;
+ dev->start = 1;
+ if (adapter_init(dev)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * The inverse routine to de600_open().
+ */
+static int
+de600_close(struct device *dev)
+{
+ select_nic();
+ rx_page = 0;
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+ de600_put_command(0);
+ select_prn();
+
+ if (dev->start) {
+ free_irq(DE600_IRQ, NULL);
+ irq2dev_map[DE600_IRQ] = NULL;
+ dev->start = 0;
+ MOD_DEC_USE_COUNT;
+ }
+ return 0;
+}
+
+static struct netstats *
+get_stats(struct device *dev)
+{
+ return (struct netstats *)(dev->priv);
+}
+
+static inline void
+trigger_interrupt(struct device *dev)
+{
+ de600_put_command(FLIP_IRQ);
+ select_prn();
+ DE600_SLOW_DOWN;
+ select_nic();
+ de600_put_command(0);
+}
+
+/*
+ * Copy a buffer to the adapter transmit page memory.
+ * Start sending.
+ */
+static int
+de600_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ int transmit_from;
+ int len;
+ int tickssofar;
+ byte *buffer = skb->data;
+
+ /*
+ * If some higher layer thinks we've missed a
+ * tx-done interrupt we are passed NULL.
+ * Caution: dev_tint() handles the cli()/sti() itself.
+ */
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
+ tickssofar = jiffies - dev->trans_start;
+
+ if (tickssofar < 5)
+ return 1;
+
+ /* else */
+ printk("%s: transmit timed out (%d), %s?\n",
+ dev->name,
+ tickssofar,
+ "network cable problem"
+ );
+ /* Restart the adapter. */
+ if (adapter_init(dev)) {
+ return 1;
+ }
+ }
+
+ /* Start real output */
+ PRINTK(("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages));
+
+ if ((len = skb->len) < RUNT)
+ len = RUNT;
+
+ cli();
+ select_nic();
+ tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len;
+ tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */
+
+#ifdef CHECK_LOST_DE600
+ /* This costs about 40 instructions per packet... */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ de600_read_byte(READ_DATA, dev);
+ if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
+ if (adapter_init(dev)) {
+ sti();
+ return 1;
+ }
+ }
+#endif
+
+ de600_setup_address(transmit_from, RW_ADDR);
+ for ( ; len > 0; --len, ++buffer)
+ de600_put_byte(*buffer);
+
+ if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */
+ dev->trans_start = jiffies;
+ dev->tbusy = 0; /* allow more packets into adapter */
+ /* Send page and generate a faked interrupt */
+ de600_setup_address(transmit_from, TX_ADDR);
+ de600_put_command(TX_ENABLE);
+ }
+ else {
+ dev->tbusy = !free_tx_pages;
+ select_prn();
+ }
+
+ sti(); /* interrupts back on */
+
+#ifdef FAKE_SMALL_MAX
+ /* This will "patch" the socket TCP proto at an early moment */
+ if (skb->sk && (skb->sk->protocol == IPPROTO_TCP) &&
+ (skb->sk->prot->rspace != &de600_rspace))
+ skb->sk->prot->rspace = de600_rspace; /* Ugh! */
+#endif
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static void
+de600_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = irq2dev_map[irq];
+ byte irq_status;
+ int retrig = 0;
+ int boguscount = 0;
+
+ /* This might just as well be deleted now, no crummy drivers present :-) */
+ if ((dev == NULL) || (dev->start == 0) || (DE600_IRQ != irq)) {
+ printk("%s: bogus interrupt %d\n", dev?dev->name:"DE-600", irq);
+ return;
+ }
+
+ dev->interrupt = 1;
+ select_nic();
+ irq_status = de600_read_status(dev);
+
+ do {
+ PRINTK(("de600_interrupt (%02X)\n", irq_status));
+
+ if (irq_status & RX_GOOD)
+ de600_rx_intr(dev);
+ else if (!(irq_status & RX_BUSY))
+ de600_put_command(RX_ENABLE);
+
+ /* Any transmission in progress? */
+ if (free_tx_pages < TX_PAGES)
+ retrig = de600_tx_intr(dev, irq_status);
+ else
+ retrig = 0;
+
+ irq_status = de600_read_status(dev);
+ } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) );
+ /*
+ * Yeah, it _looks_ like busy waiting, smells like busy waiting
+ * and I know it's not PC, but please, it will only occur once
+ * in a while and then only for a loop or so (< 1ms for sure!)
+ */
+
+ /* Enable adapter interrupts */
+ dev->interrupt = 0;
+ select_prn();
+
+ if (retrig)
+ trigger_interrupt(dev);
+
+ sti();
+ return;
+}
+
+static int
+de600_tx_intr(struct device *dev, int irq_status)
+{
+ /*
+ * Returns 1 if tx still not done
+ */
+
+ mark_bh(NET_BH);
+ /* Check if current transmission is done yet */
+ if (irq_status & TX_BUSY)
+ return 1; /* tx not done, try again */
+
+ /* else */
+ /* If last transmission OK then bump fifo index */
+ if (!(irq_status & TX_FAILED16)) {
+ tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES;
+ ++free_tx_pages;
+ ((struct netstats *)(dev->priv))->tx_packets++;
+ dev->tbusy = 0;
+ }
+
+ /* More to send, or resend last packet? */
+ if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) {
+ dev->trans_start = jiffies;
+ de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR);
+ de600_put_command(TX_ENABLE);
+ return 1;
+ }
+ /* else */
+
+ return 0;
+}
+
+/*
+ * We have a good packet, get it out of the adapter.
+ */
+static void
+de600_rx_intr(struct device *dev)
+{
+ struct sk_buff *skb;
+ int i;
+ int read_from;
+ int size;
+ register unsigned char *buffer;
+
+ cli();
+ /* Get size of received packet */
+ size = de600_read_byte(RX_LEN, dev); /* low byte */
+ size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */
+ size -= 4; /* Ignore trailing 4 CRC-bytes */
+
+ /* Tell adapter where to store next incoming packet, enable receiver */
+ read_from = rx_page_adr();
+ next_rx_page();
+ de600_put_command(RX_ENABLE);
+ sti();
+
+ if ((size < 32) || (size > 1535)) {
+ printk("%s: Bogus packet size %d.\n", dev->name, size);
+ if (size > 10000)
+ adapter_init(dev);
+ return;
+ }
+
+ skb = dev_alloc_skb(size+2);
+ sti();
+ if (skb == NULL) {
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, size);
+ return;
+ }
+ /* else */
+
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align */
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ buffer = skb_put(skb,size);
+
+ /* copy the packet into the buffer */
+ de600_setup_address(read_from, RW_ADDR);
+ for (i = size; i > 0; --i, ++buffer)
+ *buffer = de600_read_byte(READ_DATA, dev);
+
+ ((struct netstats *)(dev->priv))->rx_packets++; /* count all receives */
+
+ skb->protocol=eth_type_trans(skb,dev);
+
+ netif_rx(skb);
+ /*
+ * If any worth-while packets have been received, netif_rx()
+ * has done a mark_bh(INET_BH) for us and will work on them
+ * when we get to the bottom-half routine.
+ */
+}
+
+int
+de600_probe(struct device *dev)
+{
+ int i;
+ static struct netstats de600_netstats;
+ /*dev->priv = kmalloc(sizeof(struct netstats), GFP_KERNEL);*/
+
+ printk("%s: D-Link DE-600 pocket adapter", dev->name);
+ /* Alpha testers must have the version number to report bugs. */
+ if (de600_debug > 1)
+ printk("%s", version);
+
+ /* probe for adapter */
+ rx_page = 0;
+ select_nic();
+ (void)de600_read_status(dev);
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+ if (de600_read_status(dev) & 0xf0) {
+ printk(": not at I/O %#3x.\n", DATA_PORT);
+ return ENODEV;
+ }
+
+ /*
+ * Maybe we found one,
+ * have to check if it is a D-Link DE-600 adapter...
+ */
+
+ /* Get the adapter ethernet address from the ROM */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = de600_read_byte(READ_DATA, dev);
+ dev->broadcast[i] = 0xff;
+ }
+
+ /* Check magic code */
+ if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) {
+ /* OK, install real address */
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x80;
+ dev->dev_addr[2] = 0xc8;
+ dev->dev_addr[3] &= 0x0f;
+ dev->dev_addr[3] |= 0x70;
+ } else {
+ printk(" not identified in the printer port\n");
+ return ENODEV;
+ }
+
+#if 0 /* Not yet */
+ if (check_region(DE600_IO, 3)) {
+ printk(", port 0x%x busy\n", DE600_IO);
+ return EBUSY;
+ }
+#endif
+ request_region(DE600_IO, 3, "de600");
+
+ printk(", Ethernet Address: %02X", dev->dev_addr[0]);
+ for (i = 1; i < ETH_ALEN; i++)
+ printk(":%02X",dev->dev_addr[i]);
+ printk("\n");
+
+ /* Initialize the device structure. */
+ /*dev->priv = kmalloc(sizeof(struct netstats), GFP_KERNEL);*/
+ dev->priv = &de600_netstats;
+
+ memset(dev->priv, 0, sizeof(struct netstats));
+ dev->get_stats = get_stats;
+
+ dev->open = de600_open;
+ dev->stop = de600_close;
+ dev->hard_start_xmit = &de600_start_xmit;
+
+ ether_setup(dev);
+
+ dev->flags&=~IFF_MULTICAST;
+
+ select_prn();
+ return 0;
+}
+
+static int
+adapter_init(struct device *dev)
+{
+ int i;
+ long flags;
+
+ save_flags(flags);
+ cli();
+
+ select_nic();
+ rx_page = 0; /* used by RESET */
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+#ifdef CHECK_LOST_DE600
+ /* Check if it is still there... */
+ /* Get the some bytes of the adapter ethernet address from the ROM */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ de600_read_byte(READ_DATA, dev);
+ if ((de600_read_byte(READ_DATA, dev) != 0xde) ||
+ (de600_read_byte(READ_DATA, dev) != 0x15)) {
+ /* was: if (de600_read_status(dev) & 0xf0) { */
+ printk("Something has happened to the DE-600! Please check it"
+#ifdef SHUTDOWN_WHEN_LOST
+ " and do a new ifconfig"
+#endif /* SHUTDOWN_WHEN_LOST */
+ "!\n");
+#ifdef SHUTDOWN_WHEN_LOST
+ /* Goodbye, cruel world... */
+ dev->flags &= ~IFF_UP;
+ de600_close(dev);
+#endif /* SHUTDOWN_WHEN_LOST */
+ was_down = 1;
+ dev->tbusy = 1; /* Transmit busy... */
+ restore_flags(flags);
+ return 1; /* failed */
+ }
+#endif /* CHECK_LOST_DE600 */
+ if (was_down) {
+ printk("Thanks, I feel much better now!\n");
+ was_down = 0;
+ }
+
+ dev->tbusy = 0; /* Transmit busy... */
+ dev->interrupt = 0;
+ tx_fifo_in = 0;
+ tx_fifo_out = 0;
+ free_tx_pages = TX_PAGES;
+
+ /* set the ether address. */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ for (i = 0; i < ETH_ALEN; i++)
+ de600_put_byte(dev->dev_addr[i]);
+
+ /* where to start saving incoming packets */
+ rx_page = RX_BP | RX_BASE_PAGE;
+ de600_setup_address(MEM_4K, RW_ADDR);
+ /* Enable receiver */
+ de600_put_command(RX_ENABLE);
+ select_prn();
+ restore_flags(flags);
+
+ return 0; /* OK */
+}
+
+#ifdef FAKE_SMALL_MAX
+/*
+ * The new router code (coming soon 8-) ) will fix this properly.
+ */
+#define DE600_MIN_WINDOW 1024
+#define DE600_MAX_WINDOW 2048
+#define DE600_TCP_WINDOW_DIFF 1024
+/*
+ * Copied from "net/inet/sock.c"
+ *
+ * Sets a lower max receive window in order to achieve <= 2
+ * packets arriving at the adapter in fast succession.
+ * (No way that a DE-600 can keep up with a net saturated
+ * with packets homing in on it :-( )
+ *
+ * Since there are only 2 receive buffers in the DE-600
+ * and it takes some time to copy from the adapter,
+ * this is absolutely necessary for any TCP performance whatsoever!
+ *
+ * Note that the returned window info will never be smaller than
+ * DE600_MIN_WINDOW, i.e. 1024
+ * This differs from the standard function, that can return an
+ * arbitrarily small window!
+ */
+#define min(a,b) ((a)<(b)?(a):(b))
+static unsigned long
+de600_rspace(struct sock *sk)
+{
+ int amt;
+
+ if (sk != NULL) {
+/*
+ * Hack! You might want to play with commenting away the following line,
+ * if you know what you do!
+ sk->max_unacked = DE600_MAX_WINDOW - DE600_TCP_WINDOW_DIFF;
+ */
+
+ if (sk->rmem_alloc >= sk->rcvbuf-2*DE600_MIN_WINDOW) return(0);
+ amt = min((sk->rcvbuf-sk->rmem_alloc)/2/*-DE600_MIN_WINDOW*/, DE600_MAX_WINDOW);
+ if (amt < 0) return(0);
+ return(amt);
+ }
+ return(0);
+}
+#endif
+
+#ifdef MODULE
+static char nullname[8];
+static struct device de600_dev = {
+ nullname, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, de600_probe };
+
+int
+init_module(void)
+{
+ if (register_netdev(&de600_dev) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&de600_dev);
+ release_region(DE600_IO, 3);
+}
+#endif /* MODULE */
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de600.c"
+ * module-compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de600.c"
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de600.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/de620.c b/linux/src/drivers/net/de620.c
new file mode 100644
index 0000000..0e0c552
--- /dev/null
+++ b/linux/src/drivers/net/de620.c
@@ -0,0 +1,1045 @@
+/*
+ * de620.c $Revision: 1.1 $ BETA
+ *
+ *
+ * Linux driver for the D-Link DE-620 Ethernet pocket adapter.
+ *
+ * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se>
+ *
+ * Based on adapter information gathered from DOS packetdriver
+ * sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.)
+ * Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992
+ * Copyright, 1988, Russell Nelson, Crynwr Software
+ *
+ * Adapted to the sample network driver core for linux,
+ * written by: Donald Becker <becker@super.org>
+ * (Now at <becker@cesdis.gsfc.nasa.gov>
+ *
+ * Valuable assistance from:
+ * J. Joshua Kopper <kopper@rtsg.mot.com>
+ * Olav Kvittem <Olav.Kvittem@uninett.no>
+ * Germano Caronni <caronni@nessie.cs.id.ethz.ch>
+ * Jeremy Fitzhardinge <jeremy@suite.sw.oz.au>
+ *
+ *****************************************************************************/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+static const char *version =
+ "de620.c: $Revision: 1.1 $, Bjorn Ekwall <bj0rn@blox.se>\n";
+
+/***********************************************************************
+ *
+ * "Tuning" section.
+ *
+ * Compile-time options: (see below for descriptions)
+ * -DDE620_IO=0x378 (lpt1)
+ * -DDE620_IRQ=7 (lpt1)
+ * -DDE602_DEBUG=...
+ * -DSHUTDOWN_WHEN_LOST
+ * -DCOUNT_LOOPS
+ * -DLOWSPEED
+ * -DREAD_DELAY
+ * -DWRITE_DELAY
+ */
+
+/*
+ * This driver assumes that the printer port is a "normal",
+ * dumb, uni-directional port!
+ * If your port is "fancy" in any way, please try to set it to "normal"
+ * with your BIOS setup. I have no access to machines with bi-directional
+ * ports, so I can't test such a driver :-(
+ * (Yes, I _know_ it is possible to use DE620 with bidirectional ports...)
+ *
+ * There are some clones of DE620 out there, with different names.
+ * If the current driver does not recognize a clone, try to change
+ * the following #define to:
+ *
+ * #define DE620_CLONE 1
+ */
+#define DE620_CLONE 0
+
+/*
+ * If the adapter has problems with high speeds, enable this #define
+ * otherwise full printerport speed will be attempted.
+ *
+ * You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED
+ *
+#define LOWSPEED
+ */
+
+#ifndef READ_DELAY
+#define READ_DELAY 100 /* adapter internal read delay in 100ns units */
+#endif
+
+#ifndef WRITE_DELAY
+#define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */
+#endif
+
+/*
+ * Enable this #define if you want the adapter to do a "ifconfig down" on
+ * itself when we have detected that something is possibly wrong with it.
+ * The default behaviour is to retry with "adapter_init()" until success.
+ * This should be used for debugging purposes only.
+ *
+#define SHUTDOWN_WHEN_LOST
+ */
+
+/*
+ * Enable debugging by "-DDE620_DEBUG=3" when compiling,
+ * OR in "./CONFIG"
+ * OR by enabling the following #define
+ *
+ * use 0 for production, 1 for verification, >2 for debug
+ *
+#define DE620_DEBUG 3
+ */
+
+#ifdef LOWSPEED
+/*
+ * Enable this #define if you want to see debugging output that show how long
+ * we have to wait before the DE-620 is ready for the next read/write/command.
+ *
+#define COUNT_LOOPS
+ */
+#endif
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <linux/in.h>
+#include <linux/ptrace.h>
+#include <asm/system.h>
+#include <linux/errno.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* Constant definitions for the DE-620 registers, commands and bits */
+#include "de620.h"
+
+#define netstats enet_statistics
+typedef unsigned char byte;
+
+/*******************************************************
+ * *
+ * Definition of D-Link DE-620 Ethernet Pocket adapter *
+ * See also "de620.h" *
+ * *
+ *******************************************************/
+#ifndef DE620_IO /* Compile-time configurable */
+#define DE620_IO 0x378
+#endif
+
+#ifndef DE620_IRQ /* Compile-time configurable */
+#define DE620_IRQ 7
+#endif
+
+#define DATA_PORT (dev->base_addr)
+#define STATUS_PORT (dev->base_addr + 1)
+#define COMMAND_PORT (dev->base_addr + 2)
+
+#define RUNT 60 /* Too small Ethernet packet */
+#define GIANT 1514 /* largest legal size packet, no fcs */
+
+#ifdef DE620_DEBUG /* Compile-time configurable */
+#define PRINTK(x) if (de620_debug >= 2) printk x
+#else
+#define DE620_DEBUG 0
+#define PRINTK(x) /**/
+#endif
+
+
+/*
+ * Force media with insmod:
+ * insmod de620.o bnc=1
+ * or
+ * insmod de620.o utp=1
+ *
+ * Force io and/or irq with insmod:
+ * insmod de620.o io=0x378 irq=7
+ *
+ * Make a clone skip the Ethernet-address range check:
+ * insmod de620.o clone=1
+ */
+static int bnc = 0;
+static int utp = 0;
+static int io = DE620_IO;
+static int irq = DE620_IRQ;
+static int clone = DE620_CLONE;
+
+static unsigned int de620_debug = DE620_DEBUG;
+
+/***********************************************
+ * *
+ * Index to functions, as function prototypes. *
+ * *
+ ***********************************************/
+
+/*
+ * Routines used internally. (See also "convenience macros.. below")
+ */
+
+/* Put in the device structure. */
+static int de620_open(struct device *);
+static int de620_close(struct device *);
+static struct netstats *get_stats(struct device *);
+static void de620_set_multicast_list(struct device *);
+static int de620_start_xmit(struct sk_buff *, struct device *);
+
+/* Dispatch from interrupts. */
+static void de620_interrupt(int, void *, struct pt_regs *);
+static int de620_rx_intr(struct device *);
+
+/* Initialization */
+static int adapter_init(struct device *);
+int de620_probe(struct device *);
+static int read_eeprom(struct device *);
+
+
+/*
+ * D-Link driver variables:
+ */
+#define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX
+#define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */
+#define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */
+#define DEF_NIC_CMD IRQEN | ICEN | DS1
+
+static volatile byte NIC_Cmd;
+static volatile byte next_rx_page;
+static byte first_rx_page;
+static byte last_rx_page;
+static byte EIPRegister;
+
+static struct nic {
+ byte NodeID[6];
+ byte RAM_Size;
+ byte Model;
+ byte Media;
+ byte SCR;
+} nic_data;
+
+/**********************************************************
+ * *
+ * Convenience macros/functions for D-Link DE-620 adapter *
+ * *
+ **********************************************************/
+#define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1))
+#define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT);
+
+/* Check for ready-status, and return a nibble (high 4 bits) for data input */
+#ifdef COUNT_LOOPS
+static int tot_cnt;
+#endif
+static inline byte
+de620_ready(struct device *dev)
+{
+ byte value;
+ register short int cnt = 0;
+
+ while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000))
+ ++cnt;
+
+#ifdef COUNT_LOOPS
+ tot_cnt += cnt;
+#endif
+ return value & 0xf0; /* nibble */
+}
+
+static inline void
+de620_send_command(struct device *dev, byte cmd)
+{
+ de620_ready(dev);
+ if (cmd == W_DUMMY)
+ outb(NIC_Cmd, COMMAND_PORT);
+
+ outb(cmd, DATA_PORT);
+
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+ de620_ready(dev);
+ outb(NIC_Cmd, COMMAND_PORT);
+}
+
+static inline void
+de620_put_byte(struct device *dev, byte value)
+{
+ /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
+ de620_ready(dev);
+ outb(value, DATA_PORT);
+ de620_flip_ds(dev);
+}
+
+static inline byte
+de620_read_byte(struct device *dev)
+{
+ byte value;
+
+ /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
+ value = de620_ready(dev); /* High nibble */
+ de620_flip_ds(dev);
+ value |= de620_ready(dev) >> 4; /* Low nibble */
+ return value;
+}
+
+static inline void
+de620_write_block(struct device *dev, byte *buffer, int count)
+{
+#ifndef LOWSPEED
+ byte uflip = NIC_Cmd ^ (DS0 | DS1);
+ byte dflip = NIC_Cmd;
+#else /* LOWSPEED */
+#ifdef COUNT_LOOPS
+ int bytes = count;
+#endif /* COUNT_LOOPS */
+#endif /* LOWSPEED */
+
+#ifdef LOWSPEED
+#ifdef COUNT_LOOPS
+ tot_cnt = 0;
+#endif /* COUNT_LOOPS */
+ /* No further optimization useful, the limit is in the adapter. */
+ for ( ; count > 0; --count, ++buffer) {
+ de620_put_byte(dev,*buffer);
+ }
+ de620_send_command(dev,W_DUMMY);
+#ifdef COUNT_LOOPS
+ /* trial debug output: loops per byte in de620_ready() */
+ printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1)));
+#endif /* COUNT_LOOPS */
+#else /* not LOWSPEED */
+ for ( ; count > 0; count -=2) {
+ outb(*buffer++, DATA_PORT);
+ outb(uflip, COMMAND_PORT);
+ outb(*buffer++, DATA_PORT);
+ outb(dflip, COMMAND_PORT);
+ }
+ de620_send_command(dev,W_DUMMY);
+#endif /* LOWSPEED */
+}
+
+static inline void
+de620_read_block(struct device *dev, byte *data, int count)
+{
+#ifndef LOWSPEED
+ byte value;
+ byte uflip = NIC_Cmd ^ (DS0 | DS1);
+ byte dflip = NIC_Cmd;
+#else /* LOWSPEED */
+#ifdef COUNT_LOOPS
+ int bytes = count;
+
+ tot_cnt = 0;
+#endif /* COUNT_LOOPS */
+#endif /* LOWSPEED */
+
+#ifdef LOWSPEED
+ /* No further optimization useful, the limit is in the adapter. */
+ while (count-- > 0) {
+ *data++ = de620_read_byte(dev);
+ de620_flip_ds(dev);
+ }
+#ifdef COUNT_LOOPS
+ /* trial debug output: loops per byte in de620_ready() */
+ printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1)));
+#endif /* COUNT_LOOPS */
+#else /* not LOWSPEED */
+ while (count-- > 0) {
+ value = inb(STATUS_PORT) & 0xf0; /* High nibble */
+ outb(uflip, COMMAND_PORT);
+ *data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */
+ outb(dflip , COMMAND_PORT);
+ }
+#endif /* LOWSPEED */
+}
+
+static inline void
+de620_set_delay(struct device *dev)
+{
+ de620_ready(dev);
+ outb(W_DFR, DATA_PORT);
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+
+ de620_ready(dev);
+#ifdef LOWSPEED
+ outb(WRITE_DELAY, DATA_PORT);
+#else
+ outb(0, DATA_PORT);
+#endif
+ de620_flip_ds(dev);
+
+ de620_ready(dev);
+#ifdef LOWSPEED
+ outb(READ_DELAY, DATA_PORT);
+#else
+ outb(0, DATA_PORT);
+#endif
+ de620_flip_ds(dev);
+}
+
+static inline void
+de620_set_register(struct device *dev, byte reg, byte value)
+{
+ de620_ready(dev);
+ outb(reg, DATA_PORT);
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+
+ de620_put_byte(dev, value);
+}
+
+static inline byte
+de620_get_register(struct device *dev, byte reg)
+{
+ byte value;
+
+ de620_send_command(dev,reg);
+ value = de620_read_byte(dev);
+ de620_send_command(dev,W_DUMMY);
+
+ return value;
+}
+
+/*********************************************************************
+ *
+ * Open/initialize the board.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ *
+ */
+static int
+de620_open(struct device *dev)
+{
+ if (request_irq(dev->irq, de620_interrupt, 0, "de620", NULL)) {
+ printk ("%s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return 1;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ MOD_INC_USE_COUNT;
+ if (adapter_init(dev)) {
+ return 1;
+ }
+ dev->start = 1;
+ return 0;
+}
+
+/************************************************
+ *
+ * The inverse routine to de620_open().
+ *
+ */
+static int
+de620_close(struct device *dev)
+{
+ /* disable recv */
+ de620_set_register(dev, W_TCR, RXOFF);
+
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+
+ dev->start = 0;
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*********************************************
+ *
+ * Return current statistics
+ *
+ */
+static struct netstats *
+get_stats(struct device *dev)
+{
+ return (struct netstats *)(dev->priv);
+}
+
+/*********************************************
+ *
+ * Set or clear the multicast filter for this adaptor.
+ * (no real multicast implemented for the DE-620, but she can be promiscuous...)
+ *
+ */
+
+static void de620_set_multicast_list(struct device *dev)
+{
+ if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ { /* Enable promiscuous mode */
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+
+ de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
+ }
+ else
+ { /* Disable promiscuous mode, use normal mode */
+ de620_set_register(dev, W_TCR, TCR_DEF);
+ }
+}
+
+/*******************************************************
+ *
+ * Copy a buffer to the adapter transmit page memory.
+ * Start sending.
+ */
+static int
+de620_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ unsigned long flags;
+ int len;
+ int tickssofar;
+ byte *buffer = skb->data;
+ byte using_txbuf;
+
+ /*
+ * If some higher layer thinks we've missed a
+ * tx-done interrupt we are passed NULL.
+ * Caution: dev_tint() handles the cli()/sti() itself.
+ */
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */
+ dev->tbusy = (using_txbuf == (TXBF0 | TXBF1)); /* Boolean! */
+
+ if (dev->tbusy) { /* Do timeouts, to avoid hangs. */
+ tickssofar = jiffies - dev->trans_start;
+
+ if (tickssofar < 5)
+ return 1;
+
+ /* else */
+ printk("%s: transmit timed out (%d), %s?\n",
+ dev->name,
+ tickssofar,
+ "network cable problem"
+ );
+ /* Restart the adapter. */
+ if (adapter_init(dev)) /* maybe close it */
+ return 1;
+ }
+
+ if ((len = skb->len) < RUNT)
+ len = RUNT;
+ if (len & 1) /* send an even number of bytes */
+ ++len;
+
+ /* Start real output */
+ save_flags(flags);
+ cli();
+
+ PRINTK(("de620_start_xmit: len=%d, bufs 0x%02x\n",
+ (int)skb->len, using_txbuf));
+
+ /* select a free tx buffer. if there is one... */
+ switch (using_txbuf) {
+ default: /* both are free: use TXBF0 */
+ case TXBF1: /* use TXBF0 */
+ de620_send_command(dev,W_CR | RW0);
+ using_txbuf |= TXBF0;
+ break;
+
+ case TXBF0: /* use TXBF1 */
+ de620_send_command(dev,W_CR | RW1);
+ using_txbuf |= TXBF1;
+ break;
+
+ case (TXBF0 | TXBF1): /* NONE!!! */
+ printk("de620: Ouch! No tx-buffer available!\n");
+ restore_flags(flags);
+ return 1;
+ break;
+ }
+ de620_write_block(dev, buffer, len);
+
+ dev->trans_start = jiffies;
+ dev->tbusy = (using_txbuf == (TXBF0 | TXBF1)); /* Boolean! */
+
+ ((struct netstats *)(dev->priv))->tx_packets++;
+
+ restore_flags(flags); /* interrupts maybe back on */
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/*****************************************************
+ *
+ * Handle the network interface interrupts.
+ *
+ */
+static void
+de620_interrupt(int irq_in, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = irq2dev_map[irq_in];
+ byte irq_status;
+ int bogus_count = 0;
+ int again = 0;
+
+ /* This might be deleted now, no crummy drivers present :-) Or..? */
+ if ((dev == NULL) || (irq != irq_in)) {
+ printk("%s: bogus interrupt %d\n", dev?dev->name:"de620", irq_in);
+ return;
+ }
+
+ cli();
+ dev->interrupt = 1;
+
+ /* Read the status register (_not_ the status port) */
+ irq_status = de620_get_register(dev, R_STS);
+
+ PRINTK(("de620_interrupt (%2.2X)\n", irq_status));
+
+ if (irq_status & RXGOOD) {
+ do {
+ again = de620_rx_intr(dev);
+ PRINTK(("again=%d\n", again));
+ }
+ while (again && (++bogus_count < 100));
+ }
+
+ dev->tbusy = (de620_tx_buffs(dev) == (TXBF0 | TXBF1)); /* Boolean! */
+
+ dev->interrupt = 0;
+ sti();
+ return;
+}
+
+/**************************************
+ *
+ * Get a packet from the adapter
+ *
+ * Send it "upstairs"
+ *
+ */
+static int
+de620_rx_intr(struct device *dev)
+{
+ struct header_buf {
+ byte status;
+ byte Rx_NextPage;
+ unsigned short Rx_ByteCount;
+ } header_buf;
+ struct sk_buff *skb;
+ int size;
+ byte *buffer;
+ byte pagelink;
+ byte curr_page;
+
+ PRINTK(("de620_rx_intr: next_rx_page = %d\n", next_rx_page));
+
+ /* Tell the adapter that we are going to read data, and from where */
+ de620_send_command(dev, W_CR | RRN);
+ de620_set_register(dev, W_RSA1, next_rx_page);
+ de620_set_register(dev, W_RSA0, 0);
+
+ /* Deep breath, and away we goooooo */
+ de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf));
+ PRINTK(("page status=0x%02x, nextpage=%d, packetsize=%d\n",
+ header_buf.status, header_buf.Rx_NextPage, header_buf.Rx_ByteCount));
+
+ /* Plausible page header? */
+ pagelink = header_buf.Rx_NextPage;
+ if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) {
+ /* Ouch... Forget it! Skip all and start afresh... */
+ printk("%s: Ring overrun? Restoring...\n", dev->name);
+ /* You win some, you loose some. And sometimes plenty... */
+ adapter_init(dev);
+ ((struct netstats *)(dev->priv))->rx_over_errors++;
+ return 0;
+ }
+
+ /* OK, this look good, so far. Let's see if it's consistent... */
+ /* Let's compute the start of the next packet, based on where we are */
+ pagelink = next_rx_page +
+ ((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8);
+
+ /* Are we going to wrap around the page counter? */
+ if (pagelink > last_rx_page)
+ pagelink -= (last_rx_page - first_rx_page + 1);
+
+ /* Is the _computed_ next page number equal to what the adapter says? */
+ if (pagelink != header_buf.Rx_NextPage) {
+ /* Naah, we'll skip this packet. Probably bogus data as well */
+ printk("%s: Page link out of sync! Restoring...\n", dev->name);
+ next_rx_page = header_buf.Rx_NextPage; /* at least a try... */
+ de620_send_command(dev, W_DUMMY);
+ de620_set_register(dev, W_NPRF, next_rx_page);
+ ((struct netstats *)(dev->priv))->rx_over_errors++;
+ return 0;
+ }
+ next_rx_page = pagelink;
+
+ size = header_buf.Rx_ByteCount - 4;
+ if ((size < RUNT) || (GIANT < size)) {
+ printk("%s: Illegal packet size: %d!\n", dev->name, size);
+ }
+ else { /* Good packet? */
+ skb = dev_alloc_skb(size+2);
+ if (skb == NULL) { /* Yeah, but no place to put it... */
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, size);
+ ((struct netstats *)(dev->priv))->rx_dropped++;
+ }
+ else { /* Yep! Go get it! */
+ skb_reserve(skb,2); /* Align */
+ skb->dev = dev;
+ skb->free = 1;
+ /* skb->data points to the start of sk_buff data area */
+ buffer = skb_put(skb,size);
+ /* copy the packet into the buffer */
+ de620_read_block(dev, buffer, size);
+ PRINTK(("Read %d bytes\n", size));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb); /* deliver it "upstairs" */
+ /* count all receives */
+ ((struct netstats *)(dev->priv))->rx_packets++;
+ }
+ }
+
+ /* Let's peek ahead to see if we have read the last current packet */
+ /* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */
+ curr_page = de620_get_register(dev, R_CPR);
+ de620_set_register(dev, W_NPRF, next_rx_page);
+ PRINTK(("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page));
+
+ return (next_rx_page != curr_page); /* That was slightly tricky... */
+}
+
+/*********************************************
+ *
+ * Reset the adapter to a known state
+ *
+ */
+static int
+adapter_init(struct device *dev)
+{
+ int i;
+ static int was_down = 0;
+
+ if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */
+ EIPRegister = NCTL0;
+ if (nic_data.Media != 1)
+ EIPRegister |= NIS0; /* not BNC */
+ }
+ else if (nic_data.Model == 2) { /* UTP */
+ EIPRegister = NCTL0 | NIS0;
+ }
+
+ if (utp)
+ EIPRegister = NCTL0 | NIS0;
+ if (bnc)
+ EIPRegister = NCTL0;
+
+ de620_send_command(dev, W_CR | RNOP | CLEAR);
+ de620_send_command(dev, W_CR | RNOP);
+
+ de620_set_register(dev, W_SCR, SCR_DEF);
+ /* disable recv to wait init */
+ de620_set_register(dev, W_TCR, RXOFF);
+
+ /* Set the node ID in the adapter */
+ for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */
+ de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]);
+ }
+
+ de620_set_register(dev, W_EIP, EIPRegister);
+
+ next_rx_page = first_rx_page = DE620_RX_START_PAGE;
+ if (nic_data.RAM_Size)
+ last_rx_page = nic_data.RAM_Size - 1;
+ else /* 64k RAM */
+ last_rx_page = 255;
+
+ de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/
+ de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */
+ de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/
+ de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/
+ de620_send_command(dev, W_DUMMY);
+ de620_set_delay(dev);
+
+ /* Final sanity check: Anybody out there? */
+ /* Let's hope some bits from the statusregister make a good check */
+#define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 )
+#define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 )
+ /* success: X 0 0 X 0 0 X X */
+ /* ignore: EEDI RXGOOD COLS LNKS*/
+
+ if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) {
+ printk("Something has happened to the DE-620! Please check it"
+#ifdef SHUTDOWN_WHEN_LOST
+ " and do a new ifconfig"
+#endif
+ "! (%02x)\n", i);
+#ifdef SHUTDOWN_WHEN_LOST
+ /* Goodbye, cruel world... */
+ dev->flags &= ~IFF_UP;
+ de620_close(dev);
+#endif
+ was_down = 1;
+ return 1; /* failed */
+ }
+ if (was_down) {
+ printk("Thanks, I feel much better now!\n");
+ was_down = 0;
+ }
+
+ /* All OK, go ahead... */
+ de620_set_register(dev, W_TCR, TCR_DEF);
+
+ return 0; /* all ok */
+}
+
+/******************************************************************************
+ *
+ * Only start-up code below
+ *
+ */
+/****************************************
+ *
+ * Check if there is a DE-620 connected
+ */
+int
+de620_probe(struct device *dev)
+{
+ static struct netstats de620_netstats;
+ int i;
+ byte checkbyte = 0xa5;
+
+ /*
+ * This is where the base_addr and irq gets set.
+ * Tunable at compile-time and insmod-time
+ */
+ dev->base_addr = io;
+ dev->irq = irq;
+
+ if (de620_debug)
+ printk("%s", version);
+
+ printk("D-Link DE-620 pocket adapter");
+
+ /* Initially, configure basic nibble mode, so we can read the EEPROM */
+ NIC_Cmd = DEF_NIC_CMD;
+ de620_set_register(dev, W_EIP, EIPRegister);
+
+ /* Anybody out there? */
+ de620_set_register(dev, W_CPR, checkbyte);
+ checkbyte = de620_get_register(dev, R_CPR);
+
+ if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) {
+ printk(" not identified in the printer port\n");
+ return ENODEV;
+ }
+
+#if 0 /* Not yet */
+ if (check_region(dev->base_addr, 3)) {
+ printk(", port 0x%x busy\n", dev->base_addr);
+ return EBUSY;
+ }
+#endif
+ request_region(dev->base_addr, 3, "de620");
+
+ /* else, got it! */
+ printk(", Ethernet Address: %2.2X",
+ dev->dev_addr[0] = nic_data.NodeID[0]);
+ for (i = 1; i < ETH_ALEN; i++) {
+ printk(":%2.2X", dev->dev_addr[i] = nic_data.NodeID[i]);
+ dev->broadcast[i] = 0xff;
+ }
+
+ printk(" (%dk RAM,",
+ (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64);
+
+ if (nic_data.Media == 1)
+ printk(" BNC)\n");
+ else
+ printk(" UTP)\n");
+
+ /* Initialize the device structure. */
+ /*dev->priv = kmalloc(sizeof(struct netstats), GFP_KERNEL);*/
+ dev->priv = &de620_netstats;
+
+ memset(dev->priv, 0, sizeof(struct netstats));
+ dev->get_stats = get_stats;
+ dev->open = de620_open;
+ dev->stop = de620_close;
+ dev->hard_start_xmit = &de620_start_xmit;
+ dev->set_multicast_list = &de620_set_multicast_list;
+ /* base_addr and irq are already set, see above! */
+
+ ether_setup(dev);
+
+ /* dump eeprom */
+ if (de620_debug) {
+ printk("\nEEPROM contents:\n");
+ printk("RAM_Size = 0x%02X\n", nic_data.RAM_Size);
+ printk("NodeID = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ nic_data.NodeID[0], nic_data.NodeID[1],
+ nic_data.NodeID[2], nic_data.NodeID[3],
+ nic_data.NodeID[4], nic_data.NodeID[5]);
+ printk("Model = %d\n", nic_data.Model);
+ printk("Media = %d\n", nic_data.Media);
+ printk("SCR = 0x%02x\n", nic_data.SCR);
+ }
+
+ return 0;
+}
+
+/**********************************
+ *
+ * Read info from on-board EEPROM
+ *
+ * Note: Bitwise serial I/O to/from the EEPROM vi the status _register_!
+ */
+#define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister);
+
+static unsigned short
+ReadAWord(struct device *dev, int from)
+{
+ unsigned short data;
+ int nbits;
+
+ /* cs [__~~] SET SEND STATE */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4);
+
+ /* Send the 9-bit address from where we want to read the 16-bit word */
+ for (nbits = 9; nbits > 0; --nbits, from <<= 1) {
+ if (from & 0x0100) { /* bit set? */
+ /* cs [~~~~] SEND 1 */
+ /* di [~~~~] */
+ /* sck [_~~_] */
+ sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6);
+ }
+ else {
+ /* cs [~~~~] SEND 0 */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
+ }
+ }
+
+ /* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */
+ for (data = 0, nbits = 16; nbits > 0; --nbits) {
+ /* cs [~~~~] SEND 0 */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
+ data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7);
+ }
+ /* cs [____] RESET SEND STATE */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0);
+
+ return data;
+}
+
+static int
+read_eeprom(struct device *dev)
+{
+ unsigned short wrd;
+
+ /* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */
+ wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */
+ if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */
+ return -1; /* Nope, not a DE-620 */
+ nic_data.NodeID[0] = wrd & 0xff;
+ nic_data.NodeID[1] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */
+ if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */
+ return -1; /* Nope, not a DE-620 */
+ nic_data.NodeID[2] = wrd & 0xff;
+ nic_data.NodeID[3] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */
+ nic_data.NodeID[4] = wrd & 0xff;
+ nic_data.NodeID[5] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */
+ nic_data.RAM_Size = (wrd >> 8);
+
+ wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */
+ nic_data.Model = (wrd & 0xff);
+
+ wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */
+ nic_data.Media = (wrd & 0xff);
+
+ wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */
+ nic_data.SCR = (wrd >> 8);
+
+ return 0; /* no errors */
+}
+
+/******************************************************************************
+ *
+ * Loadable module skeleton
+ *
+ */
+#ifdef MODULE
+static char nullname[8] = "";
+static struct device de620_dev = {
+ nullname, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, de620_probe };
+
+int
+init_module(void)
+{
+ if (register_netdev(&de620_dev) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&de620_dev);
+ release_region(de620_dev.base_addr, 3);
+}
+#endif /* MODULE */
+
+/*
+ * (add '-DMODULE' when compiling as loadable module)
+ *
+ * compile-command:
+ * gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O2 \
+ * -fomit-frame-pointer -m486 \
+ * -I/usr/src/linux/include -I../../net/inet -c de620.c
+*/
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * module-compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/de620.h b/linux/src/drivers/net/de620.h
new file mode 100644
index 0000000..e8d9a88
--- /dev/null
+++ b/linux/src/drivers/net/de620.h
@@ -0,0 +1,117 @@
+/*********************************************************
+ * *
+ * Definition of D-Link DE-620 Ethernet Pocket adapter *
+ * *
+ *********************************************************/
+
+/* DE-620's CMD port Command */
+#define CS0 0x08 /* 1->0 command strobe */
+#define ICEN 0x04 /* 0=enable DL3520 host interface */
+#define DS0 0x02 /* 1->0 data strobe 0 */
+#define DS1 0x01 /* 1->0 data strobe 1 */
+
+#define WDIR 0x20 /* general 0=read 1=write */
+#define RDIR 0x00 /* (not 100% confirm ) */
+#define PS2WDIR 0x00 /* ps/2 mode 1=read, 0=write */
+#define PS2RDIR 0x20
+
+#define IRQEN 0x10 /* 1 = enable printer IRQ line */
+#define SELECTIN 0x08 /* 1 = select printer */
+#define INITP 0x04 /* 0 = initial printer */
+#define AUTOFEED 0x02 /* 1 = printer auto form feed */
+#define STROBE 0x01 /* 0->1 data strobe */
+
+#define RESET 0x08
+#define NIS0 0x20 /* 0 = BNC, 1 = UTP */
+#define NCTL0 0x10
+
+/* DE-620 DIC Command */
+#define W_DUMMY 0x00 /* DIC reserved command */
+#define W_CR 0x20 /* DIC write command register */
+#define W_NPR 0x40 /* DIC write Next Page Register */
+#define W_TBR 0x60 /* DIC write Tx Byte Count 1 reg */
+#define W_RSA 0x80 /* DIC write Remote Start Addr 1 */
+
+/* DE-620's STAT port bits 7-4 */
+#define EMPTY 0x80 /* 1 = receive buffer empty */
+#define INTLEVEL 0x40 /* 1 = interrupt level is high */
+#define TXBF1 0x20 /* 1 = transmit buffer 1 is in use */
+#define TXBF0 0x10 /* 1 = transmit buffer 0 is in use */
+#define READY 0x08 /* 1 = h/w ready to accept cmd/data */
+
+/* IDC 1 Command */
+#define W_RSA1 0xa0 /* write remote start address 1 */
+#define W_RSA0 0xa1 /* write remote start address 0 */
+#define W_NPRF 0xa2 /* write next page register NPR15-NPR8 */
+#define W_DFR 0xa3 /* write delay factor register */
+#define W_CPR 0xa4 /* write current page register */
+#define W_SPR 0xa5 /* write start page register */
+#define W_EPR 0xa6 /* write end page register */
+#define W_SCR 0xa7 /* write system configuration register */
+#define W_TCR 0xa8 /* write Transceiver Configuration reg */
+#define W_EIP 0xa9 /* write EEPM Interface port */
+#define W_PAR0 0xaa /* write physical address register 0 */
+#define W_PAR1 0xab /* write physical address register 1 */
+#define W_PAR2 0xac /* write physical address register 2 */
+#define W_PAR3 0xad /* write physical address register 3 */
+#define W_PAR4 0xae /* write physical address register 4 */
+#define W_PAR5 0xaf /* write physical address register 5 */
+
+/* IDC 2 Command */
+#define R_STS 0xc0 /* read status register */
+#define R_CPR 0xc1 /* read current page register */
+#define R_BPR 0xc2 /* read boundary page register */
+#define R_TDR 0xc3 /* read time domain reflectometry reg */
+
+/* STATUS Register */
+#define EEDI 0x80 /* EEPM DO pin */
+#define TXSUC 0x40 /* tx success */
+#define T16 0x20 /* tx fail 16 times */
+#define TS1 0x40 /* 0=Tx success, 1=T16 */
+#define TS0 0x20 /* 0=Tx success, 1=T16 */
+#define RXGOOD 0x10 /* rx a good packet */
+#define RXCRC 0x08 /* rx a CRC error packet */
+#define RXSHORT 0x04 /* rx a short packet */
+#define COLS 0x02 /* coaxial collision status */
+#define LNKS 0x01 /* UTP link status */
+
+/* Command Register */
+#define CLEAR 0x10 /* reset part of hardware */
+#define NOPER 0x08 /* No Operation */
+#define RNOP 0x08
+#define RRA 0x06 /* After RR then auto-advance NPR & BPR(=NPR-1) */
+#define RRN 0x04 /* Normal Remote Read mode */
+#define RW1 0x02 /* Remote Write tx buffer 1 ( page 6 - 11 ) */
+#define RW0 0x00 /* Remote Write tx buffer 0 ( page 0 - 5 ) */
+#define TXEN 0x01 /* 0->1 tx enable */
+
+/* System Configuration Register */
+#define TESTON 0x80 /* test host data transfer reliability */
+#define SLEEP 0x40 /* sleep mode */
+#if 0
+#define FASTMODE 0x04 /* fast mode for intel 82360SL fast mode */
+#define BYTEMODE 0x02 /* byte mode */
+#else
+#define FASTMODE 0x20 /* fast mode for intel 82360SL fast mode */
+#define BYTEMODE 0x10 /* byte mode */
+#endif
+#define NIBBLEMODE 0x00 /* nibble mode */
+#define IRQINV 0x08 /* turn off IRQ line inverter */
+#define IRQNML 0x00 /* turn on IRQ line inverter */
+#define INTON 0x04
+#define AUTOFFSET 0x02 /* auto shift address to TPR+12 */
+#define AUTOTX 0x01 /* auto tx when leave RW mode */
+
+/* Transceiver Configuration Register */
+#define JABBER 0x80 /* generate jabber condition */
+#define TXSUCINT 0x40 /* enable tx success interrupt */
+#define T16INT 0x20 /* enable T16 interrupt */
+#define RXERRPKT 0x10 /* accept CRC error or short packet */
+#define EXTERNALB2 0x0C /* external loopback 2 */
+#define EXTERNALB1 0x08 /* external loopback 1 */
+#define INTERNALB 0x04 /* internal loopback */
+#define NMLOPERATE 0x00 /* normal operation */
+#define RXPBM 0x03 /* rx physical, broadcast, multicast */
+#define RXPB 0x02 /* rx physical, broadcast */
+#define RXALL 0x01 /* rx all packet */
+#define RXOFF 0x00 /* rx disable */
diff --git a/linux/src/drivers/net/depca.c b/linux/src/drivers/net/depca.c
new file mode 100644
index 0000000..2048812
--- /dev/null
+++ b/linux/src/drivers/net/depca.c
@@ -0,0 +1,1890 @@
+/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux.
+
+ Written 1994, 1995 by David C. Davies.
+
+
+ Copyright 1994 David C. Davies
+ and
+ United States Government
+ (as represented by the Director, National Security Agency).
+
+ Copyright 1995 Digital Equipment Corporation.
+
+
+ This software may be used and distributed according to the terms of
+ the GNU Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of DEPCA and EtherWORKS ethernet cards:
+
+ DEPCA (the original)
+ DE100
+ DE101
+ DE200 Turbo
+ DE201 Turbo
+ DE202 Turbo (TP BNC)
+ DE210
+ DE422 (EISA)
+
+ The driver has been tested on DE100, DE200 and DE202 cards in a
+ relatively busy network. The DE422 has been tested a little.
+
+ This driver will NOT work for the DE203, DE204 and DE205 series of
+ cards, since they have a new custom ASIC in place of the AMD LANCE
+ chip. See the 'ewrk3.c' driver in the Linux source tree for running
+ those cards.
+
+ I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from)
+ a DECstation 5000/200.
+
+ The author may be reached at davies@maniac.ultranet.com
+
+ =========================================================================
+
+ The driver was originally based on the 'lance.c' driver from Donald
+ Becker which is included with the standard driver distribution for
+ linux. V0.4 is a complete re-write with only the kernel interface
+ remaining from the original code.
+
+ 1) Lance.c code in /linux/drivers/net/
+ 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook",
+ AMD, 1992 [(800) 222-9323].
+ 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)",
+ AMD, Pub. #17881, May 1993.
+ 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA",
+ AMD, Pub. #16907, May 1992
+ 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003
+ 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003
+ 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR
+ Digital Equipment Corporation, 1989
+ 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001
+
+
+ Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this
+ driver.
+
+ The original DEPCA card requires that the ethernet ROM address counter
+ be enabled to count and has an 8 bit NICSR. The ROM counter enabling is
+ only done when a 0x08 is read as the first address octet (to minimise
+ the chances of writing over some other hardware's I/O register). The
+ NICSR accesses have been changed to byte accesses for all the cards
+ supported by this driver, since there is only one useful bit in the MSB
+ (remote boot timeout) and it is not used. Also, there is a maximum of
+ only 48kB network RAM for this card. My thanks to Torbjorn Lindh for
+ help debugging all this (and holding my feet to the fire until I got it
+ right).
+
+ The DE200 series boards have on-board 64kB RAM for use as a shared
+ memory network buffer. Only the DE100 cards make use of a 2kB buffer
+ mode which has not been implemented in this driver (only the 32kB and
+ 64kB modes are supported [16kB/48kB for the original DEPCA]).
+
+ At the most only 2 DEPCA cards can be supported on the ISA bus because
+ there is only provision for two I/O base addresses on each card (0x300
+ and 0x200). The I/O address is detected by searching for a byte sequence
+ in the Ethernet station address PROM at the expected I/O address for the
+ Ethernet PROM. The shared memory base address is 'autoprobed' by
+ looking for the self test PROM and detecting the card name. When a
+ second DEPCA is detected, information is placed in the base_addr
+ variable of the next device structure (which is created if necessary),
+ thus enabling ethif_probe initialization for the device. More than 2
+ EISA cards can be supported, but care will be needed assigning the
+ shared memory to ensure that each slot has the correct IRQ, I/O address
+ and shared memory address assigned.
+
+ ************************************************************************
+
+ NOTE: If you are using two ISA DEPCAs, it is important that you assign
+ the base memory addresses correctly. The driver autoprobes I/O 0x300
+ then 0x200. The base memory address for the first device must be less
+ than that of the second so that the auto probe will correctly assign the
+ I/O and memory addresses on the same card. I can't think of a way to do
+ this unambiguously at the moment, since there is nothing on the cards to
+ tie I/O and memory information together.
+
+ I am unable to test 2 cards together for now, so this code is
+ unchecked. All reports, good or bad, are welcome.
+
+ ************************************************************************
+
+ The board IRQ setting must be at an unused IRQ which is auto-probed
+ using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are
+ {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is
+ really IRQ9 in machines with 16 IRQ lines.
+
+ No 16MB memory limitation should exist with this driver as DMA is not
+ used and the common memory area is in low memory on the network card (my
+ current system has 20MB and I've not had problems yet).
+
+ The ability to load this driver as a loadable module has been added. To
+ utilise this ability, you have to do <8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy depca.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) if you wish, edit the source code near line 1530 to reflect the I/O
+ address and IRQ you're using (see also 5).
+ 3) compile depca.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the depca configuration turned off and reboot.
+ 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100]
+ [Alan Cox: Changed the code to allow command line irq/io assignments]
+ [Dave Davies: Changed the code to allow command line mem/name
+ assignments]
+ 6) run the net startup bits for your eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod depca'.
+
+ To assign a base memory address for the shared memory when running as a
+ loadable module, see 5 above. To include the adapter name (if you have
+ no PROM but know the card name) also see 5 above. Note that this last
+ option will not work with kernel built-in depca's.
+
+ The shared memory assignment for a loadable module makes sense to avoid
+ the 'memory autoprobe' picking the wrong shared memory (for the case of
+ 2 depca's in a PC).
+
+
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 25-jan-94 Initial writing.
+ 0.2 27-jan-94 Added LANCE TX hardware buffer chaining.
+ 0.3 1-feb-94 Added multiple DEPCA support.
+ 0.31 4-feb-94 Added DE202 recognition.
+ 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support.
+ 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable.
+ Add jabber packet fix from murf@perftech.com
+ and becker@super.org
+ 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access.
+ 0.35 8-mar-94 Added DE201 recognition. Tidied up.
+ 0.351 30-apr-94 Added EISA support. Added DE422 recognition.
+ 0.36 16-may-94 DE422 fix released.
+ 0.37 22-jul-94 Added MODULE support
+ 0.38 15-aug-94 Added DBR ROM switch in depca_close().
+ Multi DEPCA bug fix.
+ 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0.
+ 0.381 12-dec-94 Added DE101 recognition, fix multicast bug.
+ 0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by
+ <stromain@alf.dec.com>
+ 0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk>
+ 0.385 3-apr-95 Fix a recognition bug reported by
+ <ryan.niemi@lastfrontier.com>
+ 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility
+ 0.40 25-May-95 Rewrite for portability & updated.
+ ALPHA support from <jestabro@amt.tay1.dec.com>
+ 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from
+ suggestion by <heiko@colossus.escape.de>
+ 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable
+ modules.
+ Add 'adapter_name' for loadable modules when no PROM.
+ Both above from a suggestion by
+ <pchen@woodruffs121.residence.gatech.edu>.
+ Add new multicasting code.
+ 0.421 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.422 29-Apr-96 Fix depca_hw_init() bug <jari@markkus2.fimr.fi>
+ 0.423 7-Jun-96 Fix module load bug <kmg@barco.be>
+ 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
+
+ =========================================================================
+*/
+
+static const char *version = "depca.c:v0.43 96/8/16 davies@maniac.ultranet.com\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/segment.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+
+#include "depca.h"
+
+#ifdef DEPCA_DEBUG
+static int depca_debug = DEPCA_DEBUG;
+#else
+static int depca_debug = 1;
+#endif
+
+#define DEPCA_NDA 0xffe0 /* No Device Address */
+
+/*
+** Ethernet PROM defines
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** Set the number of Tx and Rx buffers. Ensure that the memory requested
+** here is <= to the amount of shared memory set up by the board switches.
+** The number of descriptors MUST BE A POWER OF 2.
+**
+** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ)
+*/
+#define NUM_RX_DESC 8 /* Number of RX descriptors */
+#define NUM_TX_DESC 8 /* Number of TX descriptors */
+#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */
+#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */
+
+#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
+#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
+
+/*
+** EISA bus defines
+*/
+#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+
+/*
+** ISA Bus defines
+*/
+#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
+#define DEPCA_IO_PORTS {0x300, 0x200, 0}
+#define DEPCA_TOTAL_SIZE 0x10
+static short mem_chkd = 0;
+
+/*
+** Name <-> Adapter mapping
+*/
+#define DEPCA_SIGNATURE {"DEPCA",\
+ "DE100","DE101",\
+ "DE200","DE201","DE202",\
+ "DE210",\
+ "DE422",\
+ ""}
+static enum {DEPCA, de100, de101, de200, de201, de202, de210, de422, unknown} adapter;
+
+/*
+** Miscellaneous info...
+*/
+#define DEPCA_STRLEN 16
+#define MAX_NUM_DEPCAS 2
+
+/*
+** Memory Alignment. Each descriptor is 4 longwords long. To force a
+** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
+** DESC_ALIGN. ALIGN aligns the start address of the private memory area
+** and hence the RX descriptor ring's first entry.
+*/
+#define ALIGN4 ((u_long)4 - 1) /* 1 longword align */
+#define ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */
+#define ALIGN ALIGN8 /* Keep the LANCE happy... */
+
+/*
+** The DEPCA Rx and Tx ring descriptors.
+*/
+struct depca_rx_desc {
+ volatile s32 base;
+ s16 buf_length; /* This length is negative 2's complement! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct depca_tx_desc {
+ volatile s32 base;
+ s16 length; /* This length is negative 2's complement! */
+ s16 misc; /* Errors and TDR info */
+};
+
+#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM
+ to LANCE memory address space */
+
+/*
+** The Lance initialization block, described in databook, in common memory.
+*/
+struct depca_init {
+ u16 mode; /* Mode register */
+ u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */
+ u8 mcast_table[8]; /* Multicast Hash Table. */
+ u32 rx_ring; /* Rx ring base pointer & ring length */
+ u32 tx_ring; /* Tx ring base pointer & ring length */
+};
+
+#define DEPCA_PKT_STAT_SZ 16
+#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase DEPCA_PKT_STAT_SZ */
+struct depca_private {
+ char devname[DEPCA_STRLEN]; /* Device Product String */
+ char adapter_name[DEPCA_STRLEN];/* /proc/ioports string */
+ char adapter; /* Adapter type */
+ struct depca_rx_desc *rx_ring; /* Pointer to start of RX descriptor ring */
+ struct depca_tx_desc *tx_ring; /* Pointer to start of TX descriptor ring */
+ struct depca_init init_block;/* Shadow Initialization block */
+ char *rx_memcpy[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */
+ char *tx_memcpy[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */
+ u_long bus_offset; /* (E)ISA bus address offset vs LANCE */
+ u_long sh_mem; /* Physical start addr of shared mem area */
+ u_long dma_buffs; /* LANCE Rx and Tx buffers start address. */
+ int rx_new, tx_new; /* The next free ring entry */
+ int rx_old, tx_old; /* The ring entries to be free()ed. */
+ struct enet_statistics stats;
+ struct { /* Private stats counters */
+ u32 bins[DEPCA_PKT_STAT_SZ];
+ u32 unicast;
+ u32 multicast;
+ u32 broadcast;
+ u32 excessive_collisions;
+ u32 tx_underruns;
+ u32 excessive_underruns;
+ } pktStats;
+ int txRingMask; /* TX ring mask */
+ int rxRingMask; /* RX ring mask */
+ s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */
+ s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */
+};
+
+/*
+** The transmit ring full condition is described by the tx_old and tx_new
+** pointers by:
+** tx_old = tx_new Empty ring
+** tx_old = tx_new+1 Full ring
+** tx_old+txRingMask = tx_new Full ring (wrapped condition)
+*/
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->txRingMask-lp->tx_new:\
+ lp->tx_old -lp->tx_new-1)
+
+/*
+** Public Functions
+*/
+static int depca_open(struct device *dev);
+static int depca_start_xmit(struct sk_buff *skb, struct device *dev);
+static void depca_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static int depca_close(struct device *dev);
+static int depca_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+static struct enet_statistics *depca_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+/*
+** Private functions
+*/
+static int depca_hw_init(struct device *dev, u_long ioaddr);
+static void depca_init_ring(struct device *dev);
+static int depca_rx(struct device *dev);
+static int depca_tx(struct device *dev);
+
+static void LoadCSRs(struct device *dev);
+static int InitRestartDepca(struct device *dev);
+static void DepcaSignature(char *name, u_long paddr);
+static int DevicePresent(u_long ioaddr);
+static int get_hw_addr(struct device *dev);
+static int EISA_signature(char *name, s32 eisa_id);
+static void SetMulticastFilter(struct device *dev);
+static void isa_probe(struct device *dev, u_long iobase);
+static void eisa_probe(struct device *dev, u_long iobase);
+static struct device *alloc_device(struct device *dev, u_long iobase);
+static int depca_dev_index(char *s);
+static struct device *insert_device(struct device *dev, u_long iobase, int (*init)(struct device *));
+static int load_packet(struct device *dev, struct sk_buff *skb);
+static void depca_dbg_open(struct device *dev);
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static int autoprobed = 1, loading_module = 1;
+# else
+static u_char de1xx_irq[] = {2,3,4,5,7,9,0};
+static u_char de2xx_irq[] = {5,9,10,11,15,0};
+static u_char de422_irq[] = {5,9,10,11,0};
+static u_char *depca_irq;
+static int autoprobed = 0, loading_module = 0;
+#endif /* MODULE */
+
+static char name[DEPCA_STRLEN];
+static int num_depcas = 0, num_eth = 0;
+static int mem=0; /* For loadable module assignment
+ use insmod mem=0x????? .... */
+static char *adapter_name = '\0'; /* If no PROM when loadable module
+ use insmod adapter_name=DE??? ...
+ */
+/*
+** Miscellaneous defines...
+*/
+#define STOP_DEPCA \
+ outw(CSR0, DEPCA_ADDR);\
+ outw(STOP, DEPCA_DATA)
+
+
+
+int depca_probe(struct device *dev)
+{
+ int tmp = num_depcas, status = -ENODEV;
+ u_long iobase = dev->base_addr;
+
+ if ((iobase == 0) && loading_module){
+ printk("Autoprobing is not supported when loading a module based driver.\n");
+ status = -EIO;
+ } else {
+ isa_probe(dev, iobase);
+ eisa_probe(dev, iobase);
+
+ if ((tmp == num_depcas) && (iobase != 0) && loading_module) {
+ printk("%s: depca_probe() cannot find device at 0x%04lx.\n", dev->name,
+ iobase);
+ }
+
+ /*
+ ** Walk the device list to check that at least one device
+ ** initialised OK
+ */
+ for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
+
+ if (dev->priv) status = 0;
+ if (iobase == 0) autoprobed = 1;
+ }
+
+ return status;
+}
+
+static int
+depca_hw_init(struct device *dev, u_long ioaddr)
+{
+ struct depca_private *lp;
+ int i, j, offset, netRAM, mem_len, status=0;
+ s16 nicsr;
+ u_long mem_start=0, mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
+
+ STOP_DEPCA;
+
+ nicsr = inb(DEPCA_NICSR);
+ nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM);
+ outb(nicsr, DEPCA_NICSR);
+
+ if (inw(DEPCA_DATA) == STOP) {
+ do {
+ strcpy(name, (adapter_name ? adapter_name : ""));
+ mem_start = (mem ? mem & 0xf0000 : mem_base[mem_chkd++]);
+ DepcaSignature(name, mem_start);
+ } while (!mem && mem_base[mem_chkd] && (adapter == unknown));
+
+ if ((adapter != unknown) && mem_start) { /* found a DEPCA device */
+ dev->base_addr = ioaddr;
+
+ if ((ioaddr&0x0fff)==DEPCA_EISA_IO_PORTS) {/* EISA slot address */
+ printk("%s: %s at 0x%04lx (EISA slot %d)",
+ dev->name, name, ioaddr, (int)((ioaddr>>12)&0x0f));
+ } else { /* ISA port address */
+ printk("%s: %s at 0x%04lx", dev->name, name, ioaddr);
+ }
+
+ printk(", h/w address ");
+ status = get_hw_addr(dev);
+ for (i=0; i<ETH_ALEN - 1; i++) { /* get the ethernet address */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x", dev->dev_addr[i]);
+
+ if (status == 0) {
+ /* Set up the maximum amount of network RAM(kB) */
+ netRAM = ((adapter != DEPCA) ? 64 : 48);
+ if ((nicsr & _128KB) && (adapter == de422)) netRAM = 128;
+ offset = 0x0000;
+
+ /* Shared Memory Base Address */
+ if (nicsr & BUF) {
+ offset = 0x8000; /* 32kbyte RAM offset*/
+ nicsr &= ~BS; /* DEPCA RAM in top 32k */
+ netRAM -= 32;
+ }
+ mem_start += offset; /* (E)ISA start address */
+ if ((mem_len = (NUM_RX_DESC*(sizeof(struct depca_rx_desc)+RX_BUFF_SZ) +
+ NUM_TX_DESC*(sizeof(struct depca_tx_desc)+TX_BUFF_SZ) +
+ sizeof(struct depca_init))) <=
+ (netRAM<<10)) {
+ printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start);
+
+ /* Enable the shadow RAM. */
+ if (adapter != DEPCA) {
+ nicsr |= SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /* Define the device private memory */
+ dev->priv = (void *) kmalloc(sizeof(struct depca_private), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ lp = (struct depca_private *)dev->priv;
+ memset((char *)dev->priv, 0, sizeof(struct depca_private));
+ lp->adapter = adapter;
+ sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
+ request_region(ioaddr, DEPCA_TOTAL_SIZE, lp->adapter_name);
+
+ /* Initialisation Block */
+ lp->sh_mem = mem_start;
+ mem_start += sizeof(struct depca_init);
+
+ /* Tx & Rx descriptors (aligned to a quadword boundary) */
+ mem_start = (mem_start + ALIGN) & ~ALIGN;
+ lp->rx_ring = (struct depca_rx_desc *)mem_start;
+
+ mem_start += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
+ lp->tx_ring = (struct depca_tx_desc *)mem_start;
+
+ mem_start += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
+ lp->bus_offset = mem_start & 0x00ff0000;
+ mem_start &= LA_MASK; /* LANCE re-mapped start address */
+
+ lp->dma_buffs = mem_start;
+
+ /* Finish initialising the ring information. */
+ lp->rxRingMask = NUM_RX_DESC - 1;
+ lp->txRingMask = NUM_TX_DESC - 1;
+
+ /* Calculate Tx/Rx RLEN size for the descriptors. */
+ for (i=0, j = lp->rxRingMask; j>0; i++) {
+ j >>= 1;
+ }
+ lp->rx_rlen = (s32)(i << 29);
+ for (i=0, j = lp->txRingMask; j>0; i++) {
+ j >>= 1;
+ }
+ lp->tx_rlen = (s32)(i << 29);
+
+ /* Load the initialisation block */
+ depca_init_ring(dev);
+
+ /* Initialise the control and status registers */
+ LoadCSRs(dev);
+
+ /* Enable DEPCA board interrupts for autoprobing */
+ nicsr = ((nicsr & ~IM)|IEN);
+ outb(nicsr, DEPCA_NICSR);
+
+ /* To auto-IRQ we enable the initialization-done and DMA err,
+ interrupts. For now we will always get a DMA error. */
+ if (dev->irq < 2) {
+#ifndef MODULE
+ unsigned char irqnum;
+ autoirq_setup(0);
+
+ /* Assign the correct irq list */
+ switch (lp->adapter) {
+ case DEPCA:
+ case de100:
+ case de101:
+ depca_irq = de1xx_irq;
+ break;
+ case de200:
+ case de201:
+ case de202:
+ case de210:
+ depca_irq = de2xx_irq;
+ break;
+ case de422:
+ depca_irq = de422_irq;
+ break;
+ }
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(INEA | INIT, DEPCA_DATA);
+
+ irqnum = autoirq_report(1);
+ if (!irqnum) {
+ printk(" and failed to detect IRQ line.\n");
+ status = -ENXIO;
+ } else {
+ for (dev->irq=0,i=0; (depca_irq[i]) && (!dev->irq); i++) {
+ if (irqnum == depca_irq[i]) {
+ dev->irq = irqnum;
+ printk(" and uses IRQ%d.\n", dev->irq);
+ }
+ }
+
+ if (!dev->irq) {
+ printk(" but incorrect IRQ line detected.\n");
+ status = -ENXIO;
+ }
+ }
+#endif /* MODULE */
+ } else {
+ printk(" and assigned IRQ%d.\n", dev->irq);
+ }
+ if (status) release_region(ioaddr, DEPCA_TOTAL_SIZE);
+ } else {
+ printk(",\n requests %dkB RAM: only %dkB is available!\n",
+ (mem_len>>10), netRAM);
+ status = -ENXIO;
+ }
+ } else {
+ printk(" which has an Ethernet PROM CRC error.\n");
+ status = -ENXIO;
+ }
+ } else {
+ status = -ENXIO;
+ }
+ if (!status) {
+ if (depca_debug > 1) {
+ printk("%s", version);
+ }
+
+ /* The DEPCA-specific entries in the device structure. */
+ dev->open = &depca_open;
+ dev->hard_start_xmit = &depca_start_xmit;
+ dev->stop = &depca_close;
+ dev->get_stats = &depca_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &depca_ioctl;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic field of the device structure. */
+ ether_setup(dev);
+ } else { /* Incorrectly initialised hardware */
+ if (dev->priv) {
+ kfree_s(dev->priv, sizeof(struct depca_private));
+ dev->priv = NULL;
+ }
+ }
+ } else {
+ status = -ENXIO;
+ }
+
+ return status;
+}
+
+
+static int
+depca_open(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ s16 nicsr;
+ int status = 0;
+
+ irq2dev_map[dev->irq] = dev;
+ STOP_DEPCA;
+ nicsr = inb(DEPCA_NICSR);
+
+ /* Make sure the shadow RAM is enabled */
+ if (adapter != DEPCA) {
+ nicsr |= SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /* Re-initialize the DEPCA... */
+ depca_init_ring(dev);
+ LoadCSRs(dev);
+
+ depca_dbg_open(dev);
+
+ if (request_irq(dev->irq, &depca_interrupt, 0, lp->adapter_name, NULL)) {
+ printk("depca_open(): Requested IRQ%d is busy\n",dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ /* Enable DEPCA board interrupts and turn off LED */
+ nicsr = ((nicsr & ~IM & ~LED)|IEN);
+ outb(nicsr, DEPCA_NICSR);
+ outw(CSR0,DEPCA_ADDR);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ status = InitRestartDepca(dev);
+
+ if (depca_debug > 1){
+ printk("CSR0: 0x%4.4x\n",inw(DEPCA_DATA));
+ printk("nicsr: 0x%02x\n",inb(DEPCA_NICSR));
+ }
+ }
+
+ MOD_INC_USE_COUNT;
+
+ return status;
+}
+
+/* Initialize the lance Rx and Tx descriptor rings. */
+static void
+depca_init_ring(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_int i;
+ u_long p;
+
+ /* Lock out other processes whilst setting up the hardware */
+ set_bit(0, (void *)&dev->tbusy);
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Initialize the base addresses and length of each buffer in the ring */
+ for (i = 0; i <= lp->rxRingMask; i++) {
+ writel((p=lp->dma_buffs+i*RX_BUFF_SZ) | R_OWN, &lp->rx_ring[i].base);
+ writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length);
+ lp->rx_memcpy[i]=(char *)(p+lp->bus_offset);
+ }
+ for (i = 0; i <= lp->txRingMask; i++) {
+ writel((p=lp->dma_buffs+(i+lp->txRingMask+1)*TX_BUFF_SZ) & 0x00ffffff,
+ &lp->tx_ring[i].base);
+ lp->tx_memcpy[i]=(char *)(p+lp->bus_offset);
+ }
+
+ /* Set up the initialization block */
+ lp->init_block.rx_ring = ((u32)((u_long)lp->rx_ring)&LA_MASK) | lp->rx_rlen;
+ lp->init_block.tx_ring = ((u32)((u_long)lp->tx_ring)&LA_MASK) | lp->tx_rlen;
+
+ SetMulticastFilter(dev);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ }
+
+ lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */
+
+ return;
+}
+
+/*
+** Writes a socket buffer to TX descriptor ring and starts transmission
+*/
+static int
+depca_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ int status = 0;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 1*HZ) {
+ status = -1;
+ } else {
+ printk("%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, inw(DEPCA_DATA));
+
+ STOP_DEPCA;
+ depca_init_ring(dev);
+ LoadCSRs(dev);
+ dev->interrupt = UNMASK_INTERRUPTS;
+ dev->start = 1;
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ InitRestartDepca(dev);
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ return status;
+ } else if (skb == NULL) {
+ dev_tint(dev);
+ } else if (skb->len > 0) {
+ /* Enforce 1 process per h/w access */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ status = -1;
+ } else {
+ if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
+ status = load_packet(dev, skb);
+
+ if (!status) {
+ /* Trigger an immediate send demand. */
+ outw(CSR0, DEPCA_ADDR);
+ outw(INEA | TDMD, DEPCA_DATA);
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ if (TX_BUFFS_AVAIL) {
+ dev->tbusy=0;
+ }
+ } else {
+ status = -1;
+ }
+ }
+ }
+
+ return status;
+}
+
+/*
+** The DEPCA interrupt handler.
+*/
+static void
+depca_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct depca_private *lp;
+ s16 csr0, nicsr;
+ u_long ioaddr;
+
+ if (dev == NULL) {
+ printk ("depca_interrupt(): irq %d for unknown device.\n", irq);
+ } else {
+ lp = (struct depca_private *)dev->priv;
+ ioaddr = dev->base_addr;
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = MASK_INTERRUPTS;
+
+ /* mask the DEPCA board interrupts and turn on the LED */
+ nicsr = inb(DEPCA_NICSR);
+ nicsr |= (IM|LED);
+ outb(nicsr, DEPCA_NICSR);
+
+ outw(CSR0, DEPCA_ADDR);
+ csr0 = inw(DEPCA_DATA);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & INTE, DEPCA_DATA);
+
+ if (csr0 & RINT) /* Rx interrupt (packet arrived) */
+ depca_rx(dev);
+
+ if (csr0 & TINT) /* Tx interrupt (packet sent) */
+ depca_tx(dev);
+
+ if ((TX_BUFFS_AVAIL >= 0) && dev->tbusy) { /* any resources available? */
+ dev->tbusy = 0; /* clear TX busy flag */
+ mark_bh(NET_BH);
+ }
+
+ /* Unmask the DEPCA board interrupts and turn off the LED */
+ nicsr = (nicsr & ~IM & ~LED);
+ outb(nicsr, DEPCA_NICSR);
+
+ dev->interrupt = UNMASK_INTERRUPTS;
+ }
+
+ return;
+}
+
+static int
+depca_rx(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ int i, entry;
+ s32 status;
+
+ for (entry=lp->rx_new;
+ !(readl(&lp->rx_ring[entry].base) & R_OWN);
+ entry=lp->rx_new){
+ status = readl(&lp->rx_ring[entry].base) >> 16 ;
+ if (status & R_STP) { /* Remember start of frame */
+ lp->rx_old = entry;
+ }
+ if (status & R_ENP) { /* Valid frame status */
+ if (status & R_ERR) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (status & R_FRAM) lp->stats.rx_frame_errors++;
+ if (status & R_OFLO) lp->stats.rx_over_errors++;
+ if (status & R_CRC) lp->stats.rx_crc_errors++;
+ if (status & R_BUFF) lp->stats.rx_fifo_errors++;
+ } else {
+ short len, pkt_len = readw(&lp->rx_ring[entry].msg_length);
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb != NULL) {
+ unsigned char *buf;
+ skb_reserve(skb,2); /* 16 byte align the IP header */
+ buf = skb_put(skb,pkt_len);
+ skb->dev = dev;
+ if (entry < lp->rx_old) { /* Wrapped buffer */
+ len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
+ memcpy_fromio(buf, lp->rx_memcpy[lp->rx_old], len);
+ memcpy_fromio(buf + len, lp->rx_memcpy[0], pkt_len-len);
+ } else { /* Linear buffer */
+ memcpy_fromio(buf, lp->rx_memcpy[lp->rx_old], pkt_len);
+ }
+
+ /*
+ ** Notify the upper protocol layers that there is another
+ ** packet to handle
+ */
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+
+ /*
+ ** Update stats
+ */
+ lp->stats.rx_packets++;
+ for (i=1; i<DEPCA_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < (i*DEPCA_PKT_BIN_SZ)) {
+ lp->pktStats.bins[i]++;
+ i = DEPCA_PKT_STAT_SZ;
+ }
+ }
+ if (buf[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s16 *)&buf[0] == -1) &&
+ (*(s16 *)&buf[2] == -1) &&
+ (*(s16 *)&buf[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s16 *)&buf[0] == *(s16 *)&dev->dev_addr[0]) &&
+ (*(s16 *)&buf[2] == *(s16 *)&dev->dev_addr[2]) &&
+ (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ } else {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ lp->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+ /* Change buffer ownership for this last frame, back to the adapter */
+ for (; lp->rx_old!=entry; lp->rx_old=(lp->rx_old+1)&lp->rxRingMask) {
+ writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN,
+ &lp->rx_ring[lp->rx_old].base);
+ }
+ writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
+ }
+
+ /*
+ ** Update entry information
+ */
+ lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
+ }
+
+ return 0;
+}
+
+/*
+** Buffer sent - check for buffer errors.
+*/
+static int
+depca_tx(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ int entry;
+ s32 status;
+ u_long ioaddr = dev->base_addr;
+
+ for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
+ status = readl(&lp->tx_ring[entry].base) >> 16 ;
+
+ if (status < 0) { /* Packet not yet sent! */
+ break;
+ } else if (status & T_ERR) { /* An error occurred. */
+ status = readl(&lp->tx_ring[entry].misc);
+ lp->stats.tx_errors++;
+ if (status & TMD3_RTRY) lp->stats.tx_aborted_errors++;
+ if (status & TMD3_LCAR) lp->stats.tx_carrier_errors++;
+ if (status & TMD3_LCOL) lp->stats.tx_window_errors++;
+ if (status & TMD3_UFLO) lp->stats.tx_fifo_errors++;
+ if (status & (TMD3_BUFF | TMD3_UFLO)) {
+ /* Trigger an immediate send demand. */
+ outw(CSR0, DEPCA_ADDR);
+ outw(INEA | TDMD, DEPCA_DATA);
+ }
+ } else if (status & (T_MORE | T_ONE)) {
+ lp->stats.collisions++;
+ } else {
+ lp->stats.tx_packets++;
+ }
+
+ /* Update all the pointers */
+ lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
+ }
+
+ return 0;
+}
+
+static int
+depca_close(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ s16 nicsr;
+ u_long ioaddr = dev->base_addr;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ outw(CSR0, DEPCA_ADDR);
+
+ if (depca_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(DEPCA_DATA));
+ }
+
+ /*
+ ** We stop the DEPCA here -- it occasionally polls
+ ** memory if we don't.
+ */
+ outw(STOP, DEPCA_DATA);
+
+ /*
+ ** Give back the ROM in case the user wants to go to DOS
+ */
+ if (lp->adapter != DEPCA) {
+ nicsr = inb(DEPCA_NICSR);
+ nicsr &= ~SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /*
+ ** Free the associated irq
+ */
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static void LoadCSRs(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+
+ outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
+ outw((u16)(lp->sh_mem & LA_MASK), DEPCA_DATA);
+ outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */
+ outw((u16)((lp->sh_mem & LA_MASK) >> 16), DEPCA_DATA);
+ outw(CSR3, DEPCA_ADDR); /* ALE control */
+ outw(ACON, DEPCA_DATA);
+
+ outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
+
+ return;
+}
+
+static int InitRestartDepca(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ int i, status=0;
+
+ /* Copy the shadow init_block to shared memory */
+ memcpy_toio((char *)lp->sh_mem, &lp->init_block, sizeof(struct depca_init));
+
+ outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */
+ outw(INIT, DEPCA_DATA); /* initialize DEPCA */
+
+ /* wait for lance to complete initialisation */
+ for (i=0;(i<100) && !(inw(DEPCA_DATA) & IDON); i++);
+
+ if (i!=100) {
+ /* clear IDON by writing a "1", enable interrupts and start lance */
+ outw(IDON | INEA | STRT, DEPCA_DATA);
+ if (depca_debug > 2) {
+ printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n",
+ dev->name, i, lp->sh_mem, inw(DEPCA_DATA));
+ }
+ } else {
+ printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n",
+ dev->name, i, lp->sh_mem, inw(DEPCA_DATA));
+ status = -1;
+ }
+
+ return status;
+}
+
+static struct enet_statistics *
+depca_get_stats(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+
+ /* Null body since there is no framing error counter */
+
+ return &lp->stats;
+}
+
+/*
+** Set or clear the multicast filter for this adaptor.
+*/
+static void
+set_multicast_list(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+
+ if (irq2dev_map[dev->irq] != NULL) {
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
+ lp->init_block.mode |= PROM;
+ } else {
+ SetMulticastFilter(dev);
+ lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
+ }
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ }
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Big endian crc one liner is mine, all mine, ha ha ha ha!
+** LANCE calculates its hash codes big endian.
+*/
+static void SetMulticastFilter(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ char *addrs;
+ int i, j, bit, byte;
+ u16 hashcode;
+ s32 crc, poly = CRC_POLYNOMIAL_BE;
+
+ if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */
+ for (i=0; i<(HASH_TABLE_LEN>>3); i++) {
+ lp->init_block.mcast_table[i] = (char)0xff;
+ }
+ } else {
+ for (i=0; i<(HASH_TABLE_LEN>>3); i++){ /* Clear the multicast table */
+ lp->init_block.mcast_table[i]=0;
+ }
+ /* Add multicast addresses */
+ for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = 0xffffffff; /* init CRC for each address */
+ for (byte=0;byte<ETH_ALEN;byte++) {/* for each address byte */
+ /* process each address bit */
+ for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
+ crc = (crc << 1) ^ ((((crc<0?1:0) ^ bit) & 0x01) ? poly : 0);
+ }
+ }
+ hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
+ for (j=0;j<5;j++) { /* ... in reverse order. */
+ hashcode = (hashcode << 1) | ((crc>>=1) & 1);
+ }
+
+
+ byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+ lp->init_block.mcast_table[byte] |= bit;
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** ISA bus I/O device probe
+*/
+static void isa_probe(struct device *dev, u_long ioaddr)
+{
+ int i = num_depcas, maxSlots;
+ s32 ports[] = DEPCA_IO_PORTS;
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if (ioaddr > 0x400) return; /* EISA Address */
+ if (i >= MAX_NUM_DEPCAS) return; /* Too many ISA adapters */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ maxSlots = MAX_NUM_DEPCAS;
+ } else { /* Probe a specific location */
+ ports[i] = ioaddr;
+ maxSlots = i + 1;
+ }
+
+ for (; (i<maxSlots) && (dev!=NULL) && ports[i]; i++) {
+ if (DevicePresent(ports[i]) == 0) {
+ if (check_region(ports[i], DEPCA_TOTAL_SIZE) == 0) {
+ if ((dev = alloc_device(dev, ports[i])) != NULL) {
+ if (depca_hw_init(dev, ports[i]) == 0) {
+ num_depcas++;
+ }
+ num_eth++;
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04x.\n", dev->name,ports[i]);
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+** the motherboard. Upto 15 EISA devices are supported.
+*/
+static void eisa_probe(struct device *dev, u_long ioaddr)
+{
+ int i, maxSlots;
+ u_long iobase;
+ char name[DEPCA_STRLEN];
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if ((ioaddr < 0x400) && (ioaddr > 0)) return; /* ISA Address */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EISA_SLOT_INC; /* Get the first slot address */
+ i = 1;
+ maxSlots = MAX_EISA_SLOTS;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+ }
+ if ((iobase & 0x0fff) == 0) iobase += DEPCA_EISA_IO_PORTS;
+
+ for (; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID)) {
+ if (DevicePresent(iobase) == 0) {
+ if (check_region(iobase, DEPCA_TOTAL_SIZE) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ if (depca_hw_init(dev, iobase) == 0) {
+ num_depcas++;
+ }
+ num_eth++;
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04lx.\n",dev->name,iobase);
+ }
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** Search the entire 'eth' device list for a fixed probe. If a match isn't
+** found then check for an autoprobe or unused device location. If they
+** are not available then insert a new device structure at the end of
+** the current list.
+*/
+static struct device *
+alloc_device(struct device *dev, u_long iobase)
+{
+ struct device *adev = NULL;
+ int fixed = 0, new_dev = 0;
+
+ num_eth = depca_dev_index(dev->name);
+ if (loading_module) return dev;
+
+ while (1) {
+ if (((dev->base_addr == DEPCA_NDA) || (dev->base_addr==0)) && !adev) {
+ adev=dev;
+ } else if ((dev->priv == NULL) && (dev->base_addr==iobase)) {
+ fixed = 1;
+ } else {
+ if (dev->next == NULL) {
+ new_dev = 1;
+ } else if (strncmp(dev->next->name, "eth", 3) != 0) {
+ new_dev = 1;
+ }
+ }
+ if ((dev->next == NULL) || new_dev || fixed) break;
+ dev = dev->next;
+ num_eth++;
+ }
+ if (adev && !fixed) {
+ dev = adev;
+ num_eth = depca_dev_index(dev->name);
+ new_dev = 0;
+ }
+
+ if (((dev->next == NULL) &&
+ ((dev->base_addr != DEPCA_NDA) && (dev->base_addr != 0)) && !fixed) ||
+ new_dev) {
+ num_eth++; /* New device */
+ dev = insert_device(dev, iobase, depca_probe);
+ }
+
+ return dev;
+}
+
+/*
+** If at end of eth device list and can't use current entry, malloc
+** one up. If memory could not be allocated, print an error message.
+*/
+static struct device *
+insert_device(struct device *dev, u_long iobase, int (*init)(struct device *))
+{
+ struct device *new;
+
+ new = (struct device *)kmalloc(sizeof(struct device)+8, GFP_KERNEL);
+ if (new == NULL) {
+ printk("eth%d: Device not initialised, insufficient memory\n",num_eth);
+ return NULL;
+ } else {
+ new->next = dev->next;
+ dev->next = new;
+ dev = dev->next; /* point to the new device */
+ dev->name = (char *)(dev + 1);
+ if (num_eth > 9999) {
+ sprintf(dev->name,"eth????");/* New device name */
+ } else {
+ sprintf(dev->name,"eth%d", num_eth);/* New device name */
+ }
+ dev->base_addr = iobase; /* assign the io address */
+ dev->init = init; /* initialisation routine */
+ }
+
+ return dev;
+}
+
+static int
+depca_dev_index(char *s)
+{
+ int i=0, j=0;
+
+ for (;*s; s++) {
+ if (isdigit(*s)) {
+ j=1;
+ i = (i * 10) + (*s - '0');
+ } else if (j) break;
+ }
+
+ return i;
+}
+
+/*
+** Look for a particular board name in the on-board Remote Diagnostics
+** and Boot (readb) ROM. This will also give us a clue to the network RAM
+** base address.
+*/
+static void DepcaSignature(char *name, u_long paddr)
+{
+ u_int i,j,k;
+ const char *signatures[] = DEPCA_SIGNATURE;
+ char tmpstr[16];
+
+ /* Copy the first 16 bytes of ROM */
+ for (i=0;i<16;i++) {
+ tmpstr[i] = readb(paddr+0xc000+i);
+ }
+
+ /* Check if PROM contains a valid string */
+ for (i=0;*signatures[i]!='\0';i++) {
+ for (j=0,k=0;j<16 && k<strlen(signatures[i]);j++) {
+ if (signatures[i][k] == tmpstr[j]) { /* track signature */
+ k++;
+ } else { /* lost signature; begin search again */
+ k=0;
+ }
+ }
+ if (k == strlen(signatures[i])) break;
+ }
+
+ /* Check if name string is valid, provided there's no PROM */
+ if (*name && (i == unknown)) {
+ for (i=0;*signatures[i]!='\0';i++) {
+ if (strcmp(name,signatures[i]) == 0) break;
+ }
+ }
+
+ /* Update search results */
+ strcpy(name,signatures[i]);
+ adapter = i;
+
+ return;
+}
+
+/*
+** Look for a special sequence in the Ethernet station address PROM that
+** is common across all DEPCA products. Note that the original DEPCA needs
+** its ROM address counter to be initialized and enabled. Only enable
+** if the first address octet is a 0x08 - this minimises the chances of
+** messing around with some other hardware, but it assumes that this DEPCA
+** card initialized itself correctly.
+**
+** Search the Ethernet address ROM for the signature. Since the ROM address
+** counter can start at an arbitrary point, the search must include the entire
+** probe sequence length plus the (length_of_the_signature - 1).
+** Stop the search IMMEDIATELY after the signature is found so that the
+** PROM address counter is correctly positioned at the start of the
+** ethernet address for later read out.
+*/
+static int DevicePresent(u_long ioaddr)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ short sigLength=0;
+ s8 data;
+ s16 nicsr;
+ int i, j, status = 0;
+
+ data = inb(DEPCA_PROM); /* clear counter on DEPCA */
+ data = inb(DEPCA_PROM); /* read data */
+
+ if (data == 0x08) { /* Enable counter on DEPCA */
+ nicsr = inb(DEPCA_NICSR);
+ nicsr |= AAC;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
+ data = inb(DEPCA_PROM);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) { /* rare case.... */
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+
+ if (j!=sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+
+ return status;
+}
+
+/*
+** The DE100 and DE101 PROM accesses were made non-standard for some bizarre
+** reason: access the upper half of the PROM with x=0; access the lower half
+** with x=1.
+*/
+static int get_hw_addr(struct device *dev)
+{
+ u_long ioaddr = dev->base_addr;
+ int i, k, tmp, status = 0;
+ u_short j, x, chksum;
+
+ x = (((adapter == de100) || (adapter == de101)) ? 1 : 0);
+
+ for (i=0,k=0,j=0;j<3;j++) {
+ k <<= 1 ;
+ if (k > 0xffff) k-=0xffff;
+
+ k += (u_char) (tmp = inb(DEPCA_PROM + x));
+ dev->dev_addr[i++] = (u_char) tmp;
+ k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+
+ chksum = (u_char) inb(DEPCA_PROM + x);
+ chksum |= (u_short) (inb(DEPCA_PROM + x) << 8);
+ if (k != chksum) status = -1;
+
+ return status;
+}
+
+/*
+** Load a packet into the shared memory
+*/
+static int load_packet(struct device *dev, struct sk_buff *skb)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ int i, entry, end, len, status = 0;
+
+ entry = lp->tx_new; /* Ring around buffer number. */
+ end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
+ if (!(readl(&lp->tx_ring[end].base) & T_OWN)) {/* Enough room? */
+ /*
+ ** Caution: the write order is important here... don't set up the
+ ** ownership rights until all the other information is in place.
+ */
+ if (end < entry) { /* wrapped buffer */
+ len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ;
+ memcpy_toio(lp->tx_memcpy[entry], skb->data, len);
+ memcpy_toio(lp->tx_memcpy[0], skb->data + len, skb->len - len);
+ } else { /* linear buffer */
+ memcpy_toio(lp->tx_memcpy[entry], skb->data, skb->len);
+ }
+
+ /* set up the buffer descriptors */
+ len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+ for (i = entry; i != end; i = (i + 1) & lp->txRingMask) {
+ /* clean out flags */
+ writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
+ writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
+ writew(-TX_BUFF_SZ, &lp->tx_ring[i].length);/* packet length in buffer */
+ len -= TX_BUFF_SZ;
+ }
+ /* clean out flags */
+ writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base);
+ writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */
+ writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */
+
+ /* start of packet */
+ writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base);
+ /* end of packet */
+ writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base);
+
+ for (i=end; i!=entry; --i) {
+ /* ownership of packet */
+ writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base);
+ if (i == 0) i=lp->txRingMask+1;
+ }
+ writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base);
+
+ lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
+ } else {
+ status = -1;
+ }
+
+ return status;
+}
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int EISA_signature(char *name, s32 eisa_id)
+{
+ u_int i;
+ const char *signatures[] = DEPCA_SIGNATURE;
+ char ManCode[DEPCA_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int status = 0;
+
+ *name = '\0';
+ Eisa.ID = inl(eisa_id);
+
+ ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
+ ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
+ ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
+ ManCode[3]=(( Eisa.Id[2]&0x0f)+0x30);
+ ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
+ ManCode[5]='\0';
+
+ for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name,ManCode);
+ status = 1;
+ }
+ }
+
+ return status;
+}
+
+static void depca_dbg_open(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ struct depca_init *p = (struct depca_init *)lp->sh_mem;
+ int i;
+
+ if (depca_debug > 1){
+ /* Copy the shadow init_block to shared memory */
+ memcpy_toio((char *)lp->sh_mem,&lp->init_block,sizeof(struct depca_init));
+
+ printk("%s: depca open with irq %d\n",dev->name,dev->irq);
+ printk("Descriptor head addresses:\n");
+ printk("\t0x%lx 0x%lx\n",(u_long)lp->rx_ring, (u_long)lp->tx_ring);
+ printk("Descriptor addresses:\nRX: ");
+ for (i=0;i<lp->rxRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (long) &lp->rx_ring[i].base);
+ }
+ }
+ printk("...0x%8.8lx\n", (long) &lp->rx_ring[i].base);
+ printk("TX: ");
+ for (i=0;i<lp->txRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (long) &lp->tx_ring[i].base);
+ }
+ }
+ printk("...0x%8.8lx\n", (long) &lp->tx_ring[i].base);
+ printk("\nDescriptor buffers:\nRX: ");
+ for (i=0;i<lp->rxRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", readl(&lp->rx_ring[i].base));
+ }
+ }
+ printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base));
+ printk("TX: ");
+ for (i=0;i<lp->txRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", readl(&lp->tx_ring[i].base));
+ }
+ }
+ printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base));
+ printk("Initialisation block at 0x%8.8lx\n",lp->sh_mem);
+ printk("\tmode: 0x%4.4x\n",readw(&p->mode));
+ printk("\tphysical address: ");
+ for (i=0;i<ETH_ALEN-1;i++){
+ printk("%2.2x:",(u_char)readb(&p->phys_addr[i]));
+ }
+ printk("%2.2x\n",(u_char)readb(&p->phys_addr[i]));
+ printk("\tmulticast hash table: ");
+ for (i=0;i<(HASH_TABLE_LEN >> 3)-1;i++){
+ printk("%2.2x:",(u_char)readb(&p->mcast_table[i]));
+ }
+ printk("%2.2x\n",(u_char)readb(&p->mcast_table[i]));
+ printk("\trx_ring at: 0x%8.8x\n",readl(&p->rx_ring));
+ printk("\ttx_ring at: 0x%8.8x\n",readl(&p->tx_ring));
+ printk("dma_buffs: 0x%8.8lx\n",lp->dma_buffs);
+ printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n",
+ (int)lp->rxRingMask + 1,
+ lp->rx_rlen);
+ printk("TX: %d Log2(txRingMask): 0x%8.8x\n",
+ (int)lp->txRingMask + 1,
+ lp->tx_rlen);
+ outw(CSR2,DEPCA_ADDR);
+ printk("CSR2&1: 0x%4.4x",inw(DEPCA_DATA));
+ outw(CSR1,DEPCA_ADDR);
+ printk("%4.4x\n",inw(DEPCA_DATA));
+ outw(CSR3,DEPCA_ADDR);
+ printk("CSR3: 0x%4.4x\n",inw(DEPCA_DATA));
+ }
+
+ return;
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases.
+** All MCA IOCTLs will not work here and are for testing purposes only.
+*/
+static int depca_ioctl(struct device *dev, struct ifreq *rq, int cmd)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_data;
+ int i, status = 0;
+ u_long ioaddr = dev->base_addr;
+ union {
+ u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
+ u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
+ } tmp;
+
+ switch(ioc->cmd) {
+ case DEPCA_GET_HWADDR: /* Get the hardware address */
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ ioc->len = ETH_ALEN;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case DEPCA_SET_HWADDR: /* Set the hardware address */
+ if (suser()) {
+ if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN))) {
+ memcpy_fromfs(tmp.addr,ioc->data,ETH_ALEN);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ }
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new);/* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_SET_PROM: /* Set Promiscuous Mode */
+ if (suser()) {
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ lp->init_block.mode |= PROM; /* Set promiscuous mode */
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
+ if (suser()) {
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ printk("%s: Boo!\n", dev->name);
+
+ break;
+ case DEPCA_GET_MCA: /* Get the multicast address table */
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ if (!(status = verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, lp->init_block.mcast_table, ioc->len);
+ }
+
+ break;
+ case DEPCA_SET_MCA: /* Set a multicast address */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, ETH_ALEN*ioc->len))) {
+ memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
+ set_multicast_list(dev);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_CLR_MCA: /* Clear all multicast addresses */
+ if (suser()) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
+ if (suser()) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_GET_STATS: /* Get the driver statistics */
+ cli();
+ ioc->len = sizeof(lp->pktStats);
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
+ }
+ sti();
+
+ break;
+ case DEPCA_CLR_STATS: /* Zero out the driver statistics */
+ if (suser()) {
+ cli();
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ sti();
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_GET_REG: /* Get the DEPCA Registers */
+ i=0;
+ tmp.sval[i++] = inw(DEPCA_NICSR);
+ outw(CSR0, DEPCA_ADDR); /* status register */
+ tmp.sval[i++] = inw(DEPCA_DATA);
+ memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
+ ioc->len = i+sizeof(struct depca_init);
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device thisDepca = {
+ devicename, /* device name is inserted by /linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x200, 7, /* I/O address, IRQ */
+ 0, 0, 0, NULL, depca_probe };
+
+static int irq=7; /* EDIT THESE LINE FOR YOUR CONFIGURATION */
+static int io=0x200; /* Or use the irq= io= options to insmod */
+
+/* See depca_probe() for autoprobe messages when a module */
+int
+init_module(void)
+{
+ thisDepca.irq=irq;
+ thisDepca.base_addr=io;
+
+ if (register_netdev(&thisDepca) != 0)
+ return -EIO;
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ if (thisDepca.priv) {
+ kfree(thisDepca.priv);
+ thisDepca.priv = NULL;
+ }
+ thisDepca.irq=0;
+
+ unregister_netdev(&thisDepca);
+ release_region(thisDepca.base_addr, DEPCA_TOTAL_SIZE);
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c depca.c"
+ *
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c depca.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/depca.h b/linux/src/drivers/net/depca.h
new file mode 100644
index 0000000..012f739
--- /dev/null
+++ b/linux/src/drivers/net/depca.h
@@ -0,0 +1,185 @@
+/*
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 David C. Davies. This software may be used and distributed
+ according to the terms of the GNU Public License, incorporated herein by
+ reference.
+*/
+
+/*
+** I/O addresses. Note that the 2k buffer option is not supported in
+** this driver.
+*/
+#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */
+#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */
+#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */
+#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */
+#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */
+#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */
+#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */
+#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */
+
+/*
+** These are LANCE registers addressable through DEPCA_ADDR
+*/
+#define CSR0 0
+#define CSR1 1
+#define CSR2 2
+#define CSR3 3
+
+/*
+** NETWORK INTERFACE CSR (NI_CSR) bit definitions
+*/
+
+#define TO 0x0100 /* Time Out for remote boot */
+#define SHE 0x0080 /* SHadow memory Enable */
+#define BS 0x0040 /* Bank Select */
+#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */
+#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */
+#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */
+#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */
+#define IM 0x0004 /* Interrupt Mask (1->mask) */
+#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */
+#define LED 0x0001 /* LED control */
+
+/*
+** Control and Status Register 0 (CSR0) bit definitions
+*/
+
+#define ERR 0x8000 /* Error summary */
+#define BABL 0x4000 /* Babble transmitter timeout error */
+#define CERR 0x2000 /* Collision Error */
+#define MISS 0x1000 /* Missed packet */
+#define MERR 0x0800 /* Memory Error */
+#define RINT 0x0400 /* Receiver Interrupt */
+#define TINT 0x0200 /* Transmit Interrupt */
+#define IDON 0x0100 /* Initialization Done */
+#define INTR 0x0080 /* Interrupt Flag */
+#define INEA 0x0040 /* Interrupt Enable */
+#define RXON 0x0020 /* Receiver on */
+#define TXON 0x0010 /* Transmitter on */
+#define TDMD 0x0008 /* Transmit Demand */
+#define STOP 0x0004 /* Stop */
+#define STRT 0x0002 /* Start */
+#define INIT 0x0001 /* Initialize */
+#define INTM 0xff00 /* Interrupt Mask */
+#define INTE 0xfff0 /* Interrupt Enable */
+
+/*
+** CONTROL AND STATUS REGISTER 3 (CSR3)
+*/
+
+#define BSWP 0x0004 /* Byte SWaP */
+#define ACON 0x0002 /* ALE control */
+#define BCON 0x0001 /* Byte CONtrol */
+
+/*
+** Initialization Block Mode Register
+*/
+
+#define PROM 0x8000 /* Promiscuous Mode */
+#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */
+#define INTL 0x0040 /* Internal Loopback */
+#define DRTY 0x0020 /* Disable Retry */
+#define COLL 0x0010 /* Force Collision */
+#define DTCR 0x0008 /* Disable Transmit CRC */
+#define LOOP 0x0004 /* Loopback */
+#define DTX 0x0002 /* Disable the Transmitter */
+#define DRX 0x0001 /* Disable the Receiver */
+
+/*
+** Receive Message Descriptor 1 (RMD1) bit definitions.
+*/
+
+#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
+#define R_ERR 0x4000 /* Error Summary */
+#define R_FRAM 0x2000 /* Framing Error */
+#define R_OFLO 0x1000 /* Overflow Error */
+#define R_CRC 0x0800 /* CRC Error */
+#define R_BUFF 0x0400 /* Buffer Error */
+#define R_STP 0x0200 /* Start of Packet */
+#define R_ENP 0x0100 /* End of Packet */
+
+/*
+** Transmit Message Descriptor 1 (TMD1) bit definitions.
+*/
+
+#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
+#define T_ERR 0x4000 /* Error Summary */
+#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */
+#define T_MORE 0x1000 /* >1 retry to transmit packet */
+#define T_ONE 0x0800 /* 1 try needed to transmit the packet */
+#define T_DEF 0x0400 /* Deferred */
+#define T_STP 0x02000000 /* Start of Packet */
+#define T_ENP 0x01000000 /* End of Packet */
+#define T_FLAGS 0xff000000 /* TX Flags Field */
+
+/*
+** Transmit Message Descriptor 3 (TMD3) bit definitions.
+*/
+
+#define TMD3_BUFF 0x8000 /* BUFFer error */
+#define TMD3_UFLO 0x4000 /* UnderFLOw error */
+#define TMD3_RES 0x2000 /* REServed */
+#define TMD3_LCOL 0x1000 /* Late COLlision */
+#define TMD3_LCAR 0x0800 /* Loss of CARrier */
+#define TMD3_RTRY 0x0400 /* ReTRY error */
+
+/*
+** EISA configuration Register (CNFG) bit definitions
+*/
+
+#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */
+#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */
+#define IRQ11 0x0040 /* Enable -> 1 */
+#define IRQ10 0x0020 /* Enable -> 1 */
+#define IRQ9 0x0010 /* Enable -> 1 */
+#define IRQ5 0x0008 /* Enable -> 1 */
+#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */
+#define PADR16 0x0002 /* RAM on 64kB boundary */
+#define PADR17 0x0001 /* RAM on 128kB boundary */
+
+/*
+** Miscellaneous
+*/
+#define HASH_TABLE_LEN 64 /* Bits */
+#define HASH_BITS 0x003f /* 6 LS bits */
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define EISA_EN 0x0001 /* Enable EISA bus buffers */
+#define EISA_ID iobase+0x0080 /* ID long word for EISA card */
+#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define DEPCAIOCTL SIOCDEVPRIVATE
+
+struct depca_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */
+#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */
+#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define DEPCA_GET_MCA 0x06 /* Get a multicast address */
+#define DEPCA_SET_MCA 0x07 /* Set a multicast address */
+#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */
+#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */
+#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */
+#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define DEPCA_GET_REG 0x0c /* Get the Register contents */
+#define DEPCA_SET_REG 0x0d /* Set the Register contents */
+#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */
+
diff --git a/linux/src/drivers/net/e2100.c b/linux/src/drivers/net/e2100.c
new file mode 100644
index 0000000..be4185a
--- /dev/null
+++ b/linux/src/drivers/net/e2100.c
@@ -0,0 +1,456 @@
+/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */
+/*
+ Written 1993-1994 by Donald Becker.
+
+ Copyright 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ This is a driver for the Cabletron E2100 series ethercards.
+
+ The Author may be reached as becker@cesdis.gsfc.nasa.gov, or
+ C/O Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ The E2100 series ethercard is a fairly generic shared memory 8390
+ implementation. The only unusual aspect is the way the shared memory
+ registers are set: first you do an inb() in what is normally the
+ station address region, and the low three bits of next outb() *address*
+ is used as the write value for that register. Either someone wasn't
+ too used to dem bit en bites, or they were trying to obfuscate the
+ programming interface.
+
+ There is an additional complication when setting the window on the packet
+ buffer. You must first do a read into the packet buffer region with the
+ low 8 address bits the address setting the page for the start of the packet
+ buffer window, and then do the above operation. See mem_on() for details.
+
+ One bug on the chip is that even a hard reset won't disable the memory
+ window, usually resulting in a hung machine if mem_off() isn't called.
+ If this happens, you must power down the machine for about 30 seconds.
+*/
+
+static const char *version =
+ "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0};
+
+/* Offsets from the base_addr.
+ Read from the ASIC register, and the low three bits of the next outb()
+ address is used to set the corresponding register. */
+#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */
+#define E21_ASIC 0x10
+#define E21_MEM_ENABLE 0x10
+#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */
+#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */
+#define E21_MEM_BASE 0x11
+#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */
+#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */
+#define E21_MEDIA 0x14 /* (alias). */
+#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */
+#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */
+#define E21_SAPROM 0x10 /* Offset to station address data. */
+#define E21_IO_EXTENT 0x20
+
+static inline void mem_on(short port, volatile char *mem_base,
+ unsigned char start_page )
+{
+ /* This is a little weird: set the shared memory window by doing a
+ read. The low address bits specify the starting page. */
+ mem_base[start_page];
+ inb(port + E21_MEM_ENABLE);
+ outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON);
+}
+
+static inline void mem_off(short port)
+{
+ inb(port + E21_MEM_ENABLE);
+ outb(0x00, port + E21_MEM_ENABLE);
+}
+
+/* In other drivers I put the TX pages first, but the E2100 window circuitry
+ is designed to have a 4K Tx region last. The windowing circuitry wraps the
+ window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring
+ appear contiguously in the window. */
+#define E21_RX_START_PG 0x00 /* First page of RX buffer */
+#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
+#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */
+#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */
+
+int e2100_probe(struct device *dev);
+int e21_probe1(struct device *dev, int ioaddr);
+
+static int e21_open(struct device *dev);
+static void e21_reset_8390(struct device *dev);
+static void e21_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void e21_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void e21_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+static int e21_close(struct device *dev);
+
+
+/* Probe for the E2100 series ethercards. These cards have an 8390 at the
+ base address and the station address at both offset 0x10 and 0x18. I read
+ the station address from offset 0x18 to avoid the dataport of NE2000
+ ethercards, and look for Ctron's unique ID (first three octets of the
+ station address).
+ */
+
+int e2100_probe(struct device *dev)
+{
+ int *port;
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return e21_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (port = e21_probe_list; *port; port++) {
+ if (check_region(*port, E21_IO_EXTENT))
+ continue;
+ if (e21_probe1(dev, *port) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+int e21_probe1(struct device *dev, int ioaddr)
+{
+ int i, status;
+ unsigned char *station_addr = dev->dev_addr;
+ static unsigned version_printed = 0;
+
+ /* First check the station address for the Ctron prefix. */
+ if (inb(ioaddr + E21_SAPROM + 0) != 0x00
+ || inb(ioaddr + E21_SAPROM + 1) != 0x00
+ || inb(ioaddr + E21_SAPROM + 2) != 0x1d)
+ return ENODEV;
+
+ /* Verify by making certain that there is a 8390 at there. */
+ outb(E8390_NODMA + E8390_STOP, ioaddr);
+ SLOW_DOWN_IO;
+ status = inb(ioaddr);
+ if (status != 0x21 && status != 0x23)
+ return ENODEV;
+
+ /* Read the station address PROM. */
+ for (i = 0; i < 6; i++)
+ station_addr[i] = inb(ioaddr + E21_SAPROM + i);
+
+ inb(ioaddr + E21_MEDIA); /* Point to media selection. */
+ outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("e2100.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ printk("%s: E21** at %#3x,", dev->name, ioaddr);
+ for (i = 0; i < 6; i++)
+ printk(" %02X", station_addr[i]);
+
+ if (dev->irq < 2) {
+ int irqlist[] = {15,11,10,12,5,9,3,4}, i;
+ for (i = 0; i < 8; i++)
+ if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
+ dev->irq = irqlist[i];
+ break;
+ }
+ if (i >= 8) {
+ printk(" unable to get IRQ %d.\n", dev->irq);
+ return EAGAIN;
+ }
+ } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */
+ dev->irq = 9;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* Grab the region so we can find a different board if IRQ select fails. */
+ request_region(ioaddr, E21_IO_EXTENT, "e2100");
+
+ /* The 8390 is at the base address. */
+ dev->base_addr = ioaddr;
+
+ ei_status.name = "E2100";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = E21_TX_START_PG;
+ ei_status.rx_start_page = E21_RX_START_PG;
+ ei_status.stop_page = E21_RX_STOP_PG;
+ ei_status.saved_irq = dev->irq;
+
+ /* Check the media port used. The port can be passed in on the
+ low mem_end bits. */
+ if (dev->mem_end & 15)
+ dev->if_port = dev->mem_end & 7;
+ else {
+ dev->if_port = 0;
+ inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */
+ for(i = 0; i < 6; i++)
+ if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) {
+ dev->if_port = 1;
+ break;
+ }
+ }
+
+ /* Never map in the E21 shared memory unless you are actively using it.
+ Also, the shared memory has effective only one setting -- spread all
+ over the 128K region! */
+ if (dev->mem_start == 0)
+ dev->mem_start = 0xd0000;
+
+#ifdef notdef
+ /* These values are unused. The E2100 has a 2K window into the packet
+ buffer. The window can be set to start on any page boundary. */
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end = dev->mem_start + 2*1024;
+#endif
+
+ printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq,
+ dev->if_port ? "secondary" : "primary", dev->mem_start);
+
+ ei_status.reset_8390 = &e21_reset_8390;
+ ei_status.block_input = &e21_block_input;
+ ei_status.block_output = &e21_block_output;
+ ei_status.get_8390_hdr = &e21_get_8390_hdr;
+ dev->open = &e21_open;
+ dev->stop = &e21_close;
+ NS8390_init(dev, 0);
+
+ return 0;
+}
+
+static int
+e21_open(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "e2100", NULL)) {
+ return EBUSY;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Set the interrupt line and memory base on the hardware. */
+ inb(ioaddr + E21_IRQ_LOW);
+ outb(0, ioaddr + E21_ASIC + (dev->irq & 7));
+ inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
+ outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0)
+ + (dev->if_port ? E21_ALT_IFPORT : 0));
+ inb(ioaddr + E21_MEM_BASE);
+ outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7));
+
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static void
+e21_reset_8390(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outb(0x01, ioaddr);
+ if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ /* Set up the ASIC registers, just in case something changed them. */
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. We put the 2k window so the header page
+ appears at the start of the shared memory. */
+
+static void
+e21_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ short ioaddr = dev->base_addr;
+ char *shared_mem = (char *)dev->mem_start;
+
+ mem_on(ioaddr, shared_mem, ring_page);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(shared_mem);
+#endif
+
+ /* Turn off memory access: we would need to reprogram the window anyway. */
+ mem_off(ioaddr);
+
+}
+
+/* Block input and output are easy on shared memory ethercards.
+ The E21xx makes block_input() especially easy by wrapping the top
+ ring buffer to the bottom automatically. */
+static void
+e21_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ short ioaddr = dev->base_addr;
+ char *shared_mem = (char *)dev->mem_start;
+
+ mem_on(ioaddr, shared_mem, (ring_offset>>8));
+
+ /* Packet is always in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, dev->mem_start + (ring_offset & 0xff), count, 0);
+
+ mem_off(ioaddr);
+}
+
+static void
+e21_block_output(struct device *dev, int count, const unsigned char *buf,
+ const int start_page)
+{
+ short ioaddr = dev->base_addr;
+ volatile char *shared_mem = (char *)dev->mem_start;
+
+ /* Set the shared memory window start by doing a read, with the low address
+ bits specifying the starting page. */
+ readb(shared_mem + start_page);
+ mem_on(ioaddr, shared_mem, start_page);
+
+ memcpy_toio(shared_mem, buf, count);
+ mem_off(ioaddr);
+}
+
+static int
+e21_close(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ free_irq(dev->irq, NULL);
+ dev->irq = ei_status.saved_irq;
+
+ /* Shut off the interrupt line and secondary interface. */
+ inb(ioaddr + E21_IRQ_LOW);
+ outb(0, ioaddr + E21_ASIC);
+ inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
+ outb(0, ioaddr + E21_ASIC);
+
+ irq2dev_map[dev->irq] = NULL;
+
+ ei_close(dev);
+
+ /* Double-check that the memory has been turned off, because really
+ really bad things happen if it isn't. */
+ mem_off(ioaddr);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry e21_drv =
+{"e21", e21_probe1, E21_IO_EXTENT, e21_probe_list};
+#endif
+
+
+#ifdef MODULE
+#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_E21_CARDS] = { 0, };
+static struct device dev_e21[MAX_E21_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_E21_CARDS] = { 0, };
+static int irq[MAX_E21_CARDS] = { 0, };
+static int mem[MAX_E21_CARDS] = { 0, };
+static int xcvr[MAX_E21_CARDS] = { 0, }; /* choose int. or ext. xcvr */
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
+ struct device *dev = &dev_e21[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
+ dev->init = e2100_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
+ struct device *dev = &dev_e21[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: e21_close() handles free_irq + irq2dev map */
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(dev->base_addr, E21_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c e2100.c"
+ * version-control: t
+ * tab-width: 4
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/linux/src/drivers/net/eepro.c b/linux/src/drivers/net/eepro.c
new file mode 100644
index 0000000..3d4fc57
--- /dev/null
+++ b/linux/src/drivers/net/eepro.c
@@ -0,0 +1,1407 @@
+/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */
+/*
+ Written 1994-1998 by Bao C. Ha.
+
+ Copyright (C) 1994-1998 by Bao C. Ha.
+
+ This software may be used and distributed
+ according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ The author may be reached at bao@hacom.net
+ or Hacom, 2477 Wrightsboro Rd., Augusta, GA 30904.
+
+ Things remaining to do:
+ Better record keeping of errors.
+ Eliminate transmit interrupt to reduce overhead.
+ Implement "concurrent processing". I won't be doing it!
+
+ Bugs:
+
+ If you have a problem of not detecting the 82595 during a
+ reboot (warm reset), disable the FLASH memory should fix it.
+ This is a compatibility hardware problem.
+
+ Versions:
+
+ 0.10c Some cosmetic changes. (9/28/98, BCH)
+
+ 0.10b Should work now with (some) Pro/10+. At least for
+ me (and my two cards) it does. _No_ guarantee for
+ function with non-Pro/10+ cards! (don't have any)
+ (RMC, 9/11/96)
+
+ 0.10 Added support for the Etherexpress Pro/10+. The
+ IRQ map was changed significantly from the old
+ pro/10. The new interrupt map was provided by
+ Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu).
+ (BCH, 9/3/96)
+
+ 0.09 Fixed a race condition in the transmit algorithm,
+ which causes crashes under heavy load with fast
+ pentium computers. The performance should also
+ improve a bit. The size of RX buffer, and hence
+ TX buffer, can also be changed via lilo or insmod.
+ (BCH, 7/31/96)
+
+ 0.08 Implement 32-bit I/O for the 82595TX and 82595FX
+ based lan cards. Disable full-duplex mode if TPE
+ is not used. (BCH, 4/8/96)
+
+ 0.07a Fix a stat report which counts every packet as a
+ heart-beat failure. (BCH, 6/3/95)
+
+ 0.07 Modified to support all other 82595-based lan cards.
+ The IRQ vector of the EtherExpress Pro will be set
+ according to the value saved in the EEPROM. For other
+ cards, I will do autoirq_request() to grab the next
+ available interrupt vector. (BCH, 3/17/95)
+
+ 0.06a,b Interim released. Minor changes in the comments and
+ print out format. (BCH, 3/9/95 and 3/14/95)
+
+ 0.06 First stable release that I am comfortable with. (BCH,
+ 3/2/95)
+
+ 0.05 Complete testing of multicast. (BCH, 2/23/95)
+
+ 0.04 Adding multicast support. (BCH, 2/14/95)
+
+ 0.03 First widely alpha release for public testing.
+ (BCH, 2/14/95)
+
+*/
+
+static const char *version =
+ "eepro.c: v0.10c 9/28/98 Bao C. Ha (bao@hacom.net)\n";
+
+#include <linux/module.h>
+
+/*
+ Sources:
+
+ This driver wouldn't have been written without the availability
+ of the Crynwr's Lan595 driver source code. It helps me to
+ familiarize with the 82595 chipset while waiting for the Intel
+ documentation. I also learned how to detect the 82595 using
+ the packet driver's technique.
+
+ This driver is written by cutting and pasting the skeleton.c driver
+ provided by Donald Becker. I also borrowed the EEPROM routine from
+ Donald Becker's 82586 driver.
+
+ Datasheet for the Intel 82595 (including the TX and FX version). It
+ provides just enough info that the casual reader might think that it
+ documents the i82595.
+
+ The User Manual for the 82595. It provides a lot of the missing
+ information.
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* First, a few definitions that the brave might change. */
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int eepro_portlist[] =
+ { 0x300, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+
+static unsigned int net_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+
+#define EEPRO_IO_EXTENT 16
+
+/* Different 82595 chips */
+
+#define LAN595 0
+#define LAN595TX 1
+#define LAN595FX 2
+
+/* Information that need to be kept for each board. */
+struct eepro_local {
+ struct enet_statistics stats;
+ unsigned rx_start;
+ unsigned tx_start; /* start of the transmit chain */
+ int tx_last; /* pointer to last packet in the transmit chain */
+ unsigned tx_end; /* end of the transmit chain (plus 1) */
+ int eepro; /* 1 for the EtherExpress Pro/10,
+ 2 for the EtherExpress Pro/10+,
+ 0 for other 82595-based lan cards. */
+ int version; /* a flag to indicate if this is a TX or FX
+ version of the 82595 chip. */
+ int stepping;
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+
+#define SA_ADDR0 0x00 /* Etherexpress Pro/10 */
+#define SA_ADDR1 0xaa
+#define SA_ADDR2 0x00
+
+#define SA2_ADDR0 0x00 /* Etherexpress Pro/10+ */
+#define SA2_ADDR1 0xa0
+#define SA2_ADDR2 0xc9
+
+#define SA3_ADDR0 0x00 /* more Etherexpress Pro/10+ */
+#define SA3_ADDR1 0xaa
+#define SA3_ADDR2 0x00
+#define SA3_ADDR3 0xc9
+
+/* Index to functions, as function prototypes. */
+
+extern int eepro_probe(struct device *dev);
+
+static int eepro_probe1(struct device *dev, short ioaddr);
+static int eepro_open(struct device *dev);
+static int eepro_send_packet(struct sk_buff *skb, struct device *dev);
+static void eepro_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void eepro_rx(struct device *dev);
+static void eepro_transmit_interrupt(struct device *dev);
+static int eepro_close(struct device *dev);
+static struct enet_statistics *eepro_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+static int read_eeprom(int ioaddr, int location);
+static void hardware_send_packet(struct device *dev, void *buf, short length);
+static int eepro_grab_irq(struct device *dev);
+
+/*
+ Details of the i82595.
+
+You will need either the datasheet or the user manual to understand what
+is going on here. The 82595 is very different from the 82586, 82593.
+
+The receive algorithm in eepro_rx() is just an implementation of the
+RCV ring structure that the Intel 82595 imposes at the hardware level.
+The receive buffer is set at 24K, and the transmit buffer is 8K. I
+am assuming that the total buffer memory is 32K, which is true for the
+Intel EtherExpress Pro/10. If it is less than that on a generic card,
+the driver will be broken.
+
+The transmit algorithm in the hardware_send_packet() is similar to the
+one in the eepro_rx(). The transmit buffer is a ring linked list.
+I just queue the next available packet to the end of the list. In my
+system, the 82595 is so fast that the list seems to always contain a
+single packet. In other systems with faster computers and more congested
+network traffics, the ring linked list should improve performance by
+allowing up to 8K worth of packets to be queued.
+
+The sizes of the receive and transmit buffers can now be changed via lilo
+or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0"
+where rx-buffer is in KB unit. Modules uses the parameter mem which is
+also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer."
+The receive buffer has to be more than 3K or less than 29K. Otherwise,
+it is reset to the default of 24K, and, hence, 8K for the trasnmit
+buffer (transmit-buffer = 32K - receive-buffer).
+
+*/
+
+#define RAM_SIZE 0x8000
+#define RCV_HEADER 8
+#define RCV_RAM 0x6000 /* 24KB default for RCV buffer */
+#define RCV_LOWER_LIMIT 0x00 /* 0x0000 */
+
+/* #define RCV_UPPER_LIMIT ((RCV_RAM - 2) >> 8) */ /* 0x5ffe */
+#define RCV_UPPER_LIMIT (((rcv_ram) - 2) >> 8)
+
+/* #define XMT_RAM (RAM_SIZE - RCV_RAM) */ /* 8KB for XMT buffer */
+#define XMT_RAM (RAM_SIZE - (rcv_ram)) /* 8KB for XMT buffer */
+
+/* #define XMT_LOWER_LIMIT (RCV_RAM >> 8) */ /* 0x6000 */
+#define XMT_LOWER_LIMIT ((rcv_ram) >> 8)
+#define XMT_UPPER_LIMIT ((RAM_SIZE - 2) >> 8) /* 0x7ffe */
+#define XMT_HEADER 8
+
+#define RCV_DONE 0x0008
+#define RX_OK 0x2000
+#define RX_ERROR 0x0d81
+
+#define TX_DONE_BIT 0x0080
+#define CHAIN_BIT 0x8000
+#define XMT_STATUS 0x02
+#define XMT_CHAIN 0x04
+#define XMT_COUNT 0x06
+
+#define BANK0_SELECT 0x00
+#define BANK1_SELECT 0x40
+#define BANK2_SELECT 0x80
+
+/* Bank 0 registers */
+
+#define COMMAND_REG 0x00 /* Register 0 */
+#define MC_SETUP 0x03
+#define XMT_CMD 0x04
+#define DIAGNOSE_CMD 0x07
+#define RCV_ENABLE_CMD 0x08
+#define RCV_DISABLE_CMD 0x0a
+#define STOP_RCV_CMD 0x0b
+#define RESET_CMD 0x0e
+#define POWER_DOWN_CMD 0x18
+#define RESUME_XMT_CMD 0x1c
+#define SEL_RESET_CMD 0x1e
+#define STATUS_REG 0x01 /* Register 1 */
+#define RX_INT 0x02
+#define TX_INT 0x04
+#define EXEC_STATUS 0x30
+#define ID_REG 0x02 /* Register 2 */
+#define R_ROBIN_BITS 0xc0 /* round robin counter */
+#define ID_REG_MASK 0x2c
+#define ID_REG_SIG 0x24
+#define AUTO_ENABLE 0x10
+#define INT_MASK_REG 0x03 /* Register 3 */
+#define RX_STOP_MASK 0x01
+#define RX_MASK 0x02
+#define TX_MASK 0x04
+#define EXEC_MASK 0x08
+#define ALL_MASK 0x0f
+#define IO_32_BIT 0x10
+#define RCV_BAR 0x04 /* The following are word (16-bit) registers */
+#define RCV_STOP 0x06
+#define XMT_BAR 0x0a
+#define HOST_ADDRESS_REG 0x0c
+#define IO_PORT 0x0e
+#define IO_PORT_32_BIT 0x0c
+
+/* Bank 1 registers */
+
+#define REG1 0x01
+#define WORD_WIDTH 0x02
+#define INT_ENABLE 0x80
+#define INT_NO_REG 0x02
+#define RCV_LOWER_LIMIT_REG 0x08
+#define RCV_UPPER_LIMIT_REG 0x09
+#define XMT_LOWER_LIMIT_REG 0x0a
+#define XMT_UPPER_LIMIT_REG 0x0b
+
+/* Bank 2 registers */
+
+#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */
+#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */
+#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */
+#define REG2 0x02
+#define PRMSC_Mode 0x01
+#define Multi_IA 0x20
+#define REG3 0x03
+#define TPE_BIT 0x04
+#define BNC_BIT 0x20
+#define REG13 0x0d
+#define FDX 0x00
+#define A_N_ENABLE 0x02
+
+#define I_ADD_REG0 0x04
+#define I_ADD_REG1 0x05
+#define I_ADD_REG2 0x06
+#define I_ADD_REG3 0x07
+#define I_ADD_REG4 0x08
+#define I_ADD_REG5 0x09
+
+#define EEPROM_REG 0x0a
+#define EESK 0x01
+#define EECS 0x02
+#define EEDI 0x04
+#define EEDO 0x08
+
+/* Check for a network adaptor of this type, and return '0' if one exists.
+
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+
+ */
+
+#ifdef HAVE_DEVLIST
+
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+
+struct netdev_entry netcard_drv =
+{"eepro", eepro_probe1, EEPRO_IO_EXTENT, eepro_portlist};
+
+#else
+
+int
+eepro_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return eepro_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; eepro_portlist[i]; i++) {
+ int ioaddr = eepro_portlist[i];
+ if (check_region(ioaddr, EEPRO_IO_EXTENT))
+ continue;
+
+ if (eepro_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probes avoids doing writes, and
+ verifies that the correct device exists and functions. */
+
+int
+eepro_probe1(struct device *dev, short ioaddr)
+{
+ unsigned short station_addr[6], id, counter;
+ int i;
+ int eepro;
+ const char *ifmap[] = {"AUI", "10Base2", "10BaseT"};
+ enum iftype { AUI=0, BNC=1, TPE=2 };
+
+ /* Now, we are going to check for the signature of the
+ ID_REG (register 2 of bank 0) */
+ if (((id=inb(ioaddr + ID_REG)) & ID_REG_MASK) == ID_REG_SIG) {
+
+ /* We seem to have the 82595 signature, let's
+ play with its counter (last 2 bits of
+ register 2 of bank 0) to be sure. */
+
+ counter = (id & R_ROBIN_BITS);
+ if (((id=inb(ioaddr+ID_REG)) & R_ROBIN_BITS) ==
+ (counter + 0x40)) {
+
+ /* Yes, the 82595 has been found */
+
+ /* Now, get the ethernet hardware address from
+ the EEPROM */
+
+ station_addr[0] = read_eeprom(ioaddr, 2);
+ station_addr[1] = read_eeprom(ioaddr, 3);
+ station_addr[2] = read_eeprom(ioaddr, 4);
+
+ /* Check the station address for the manufacturer's code */
+
+ if ((station_addr[2] == 0x00aa) && (station_addr[1]!= 0x00c9)) {
+ eepro = 1;
+ printk("%s: Intel EtherExpress Pro/10 ISA at %#x,",
+ dev->name, ioaddr);
+ } else
+ if ( (station_addr[2] == 0x00a0)
+ || ((station_addr[2] == 0x00aa) && (station_addr[1] == 0x00c9) )) {
+ eepro = 2;
+ printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,",
+ dev->name, ioaddr);
+ }
+ else {
+ eepro = 0;
+ printk("%s: Intel 82595-based lan card at %#x,",
+ dev->name, ioaddr);
+ }
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+
+ for (i=0; i < 6; i++) {
+ dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
+ printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+ }
+
+ if ((dev->mem_end & 0x3f) < 3 || /* RX buffer must be more than 3K */
+ (dev->mem_end & 0x3f) > 29) /* and less than 29K */
+ dev->mem_end = RCV_RAM; /* or it will be set to 24K */
+ else dev->mem_end = 1024*dev->mem_end; /* Maybe I should shift << 10 */
+
+ /* From now on, dev->mem_end contains the actual size of rx buffer */
+
+ if (net_debug > 3)
+ printk(", %dK RCV buffer", (int)(dev->mem_end)/1024);
+
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ id = inb(ioaddr + REG3);
+ if (id & TPE_BIT)
+ dev->if_port = TPE;
+ else dev->if_port = BNC;
+
+ if (net_debug>3)
+ printk("id: %x\n", id);
+
+ if (dev->irq < 2 && eepro) {
+ i = read_eeprom(ioaddr, 1);
+ if (eepro == 1)
+ switch (i & 0x07) {
+ case 0: dev->irq = 9; break;
+ case 1: dev->irq = 3; break;
+ case 2: dev->irq = 5; break;
+ case 3: dev->irq = 10; break;
+ case 4: dev->irq = 11; break;
+ default: /* should never get here !!!!! */
+ printk(" illegal interrupt vector stored in EEPROM.\n");
+ return ENODEV;
+ }
+ else switch (i & 0x07) {
+ case 0: dev->irq = 3; break;
+ case 1: dev->irq = 4; break;
+ case 2: dev->irq = 5; break;
+ case 3: dev->irq = 7; break;
+ case 4: dev->irq = 9; break;
+ case 5: dev->irq = 10; break;
+ case 6: dev->irq = 11; break;
+ case 7: dev->irq = 12; break;
+ }
+ }
+ else if (dev->irq == 2)
+ dev->irq = 9;
+
+ if (dev->irq > 2) {
+ printk(", IRQ %d, %s.\n", dev->irq,
+ ifmap[dev->if_port]);
+ if (request_irq(dev->irq, &eepro_interrupt, 0, "eepro", NULL)) {
+ printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ }
+ else printk(", %s.\n", ifmap[dev->if_port]);
+
+ if ((dev->mem_start & 0xf) > 0) /* I don't know if this is */
+ net_debug = dev->mem_start & 7; /* still useful or not */
+
+ if (net_debug > 3) {
+ i = read_eeprom(ioaddr, 5);
+ if (i & 0x2000) /* bit 13 of EEPROM word 5 */
+ printk("%s: Concurrent Processing is enabled but not used!\n",
+ dev->name);
+ }
+
+ if (net_debug)
+ printk("%s", version);
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ request_region(ioaddr, EEPRO_IO_EXTENT, "eepro");
+
+ /* Initialize the device structure */
+ dev->priv = kmalloc(sizeof(struct eepro_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct eepro_local));
+
+ dev->open = eepro_open;
+ dev->stop = eepro_close;
+ dev->hard_start_xmit = eepro_send_packet;
+ dev->get_stats = eepro_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the fields of the device structure with
+ ethernet generic values */
+
+ ether_setup(dev);
+
+ outb(RESET_CMD, ioaddr); /* RESET the 82595 */
+
+ return 0;
+ }
+ else return ENODEV;
+ }
+ else if (net_debug > 3)
+ printk ("EtherExpress Pro probed failed!\n");
+ return ENODEV;
+}
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+
+static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
+static char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
+
+static int
+eepro_grab_irq(struct device *dev)
+{
+ int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12 };
+ int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
+
+ outb(BANK1_SELECT, ioaddr); /* be CAREFUL, BANK 1 now */
+
+ /* Enable the interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg | INT_ENABLE, ioaddr + REG1);
+
+ outb(BANK0_SELECT, ioaddr); /* be CAREFUL, BANK 0 now */
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ /* Let EXEC event to interrupt */
+ outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG);
+
+ do {
+ outb(BANK1_SELECT, ioaddr); /* be CAREFUL, BANK 1 now */
+ temp_reg = inb(ioaddr + INT_NO_REG);
+ outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+ if (request_irq (*irqp, NULL, 0, "bogus", NULL) != EBUSY) {
+ /* Twinkle the interrupt, and check if it's seen */
+ autoirq_setup(0);
+ outb(DIAGNOSE_CMD, ioaddr); /* RESET the 82595 */
+
+ if (*irqp == autoirq_report(2) && /* It's a good IRQ line */
+ (request_irq(dev->irq = *irqp, &eepro_interrupt, 0, "eepro", NULL) == 0))
+ break;
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+ }
+ } while (*++irqp);
+
+ outb(BANK1_SELECT, ioaddr); /* Switch back to Bank 1 */
+
+ /* Disable the physical interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg & 0x7f, ioaddr + REG1);
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+
+ /* Mask all the interrupts. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ return dev->irq;
+}
+
+static int
+eepro_open(struct device *dev)
+{
+ unsigned short temp_reg, old8, old9;
+ int i, ioaddr = dev->base_addr, rcv_ram = dev->mem_end;
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+
+ if (net_debug > 3)
+ printk("eepro: entering eepro_open routine.\n");
+
+ if ((dev->dev_addr[0] == SA_ADDR0 &&
+ dev->dev_addr[1] == SA_ADDR1 &&
+ dev->dev_addr[2] == SA_ADDR2)&&
+ (dev->dev_addr[3] != SA3_ADDR3))
+ {
+ lp->eepro = 1;
+ if (net_debug > 3) printk("p->eepro = 1;\n");
+ } /* Yes, an Intel EtherExpress Pro/10 */
+
+ else if ((dev->dev_addr[0] == SA2_ADDR0 &&
+ dev->dev_addr[1] == SA2_ADDR1 &&
+ dev->dev_addr[2] == SA2_ADDR2)||
+ (dev->dev_addr[0] == SA3_ADDR0 &&
+ dev->dev_addr[1] == SA3_ADDR1 &&
+ dev->dev_addr[2] == SA3_ADDR2 &&
+ dev->dev_addr[3] == SA3_ADDR3))
+ {
+ lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */
+ if (net_debug > 3) printk("p->eepro = 2;\n");
+ }
+
+ else lp->eepro = 0; /* No, it is a generic 82585 lan card */
+
+ /* Get the interrupt vector for the 82595 */
+ if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
+ printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ if (irq2dev_map[dev->irq] != 0
+ || (irq2dev_map[dev->irq] = dev) == 0)
+ return -EAGAIN;
+
+ /* Initialize the 82595. */
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ temp_reg = inb(ioaddr + EEPROM_REG);
+ lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */
+
+ if (net_debug > 3)
+ printk("The stepping of the 82595 is %d\n", lp->stepping);
+ if (temp_reg & 0x10) /* Check the TurnOff Enable bit */
+ outb(temp_reg & 0xef, ioaddr + EEPROM_REG);
+ for (i=0; i < 6; i++)
+ outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
+
+ temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */
+ outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */
+ | RCV_Discard_BadFrame, ioaddr + REG1);
+ temp_reg = inb(ioaddr + REG2); /* Match broadcast */
+ outb(temp_reg | 0x14, ioaddr + REG2);
+ temp_reg = inb(ioaddr + REG3);
+ outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */
+
+ /* Set the receiving mode */
+ outb(BANK1_SELECT, ioaddr); /* be CAREFUL, BANK 1 now */
+
+ /* Set the interrupt vector */
+ temp_reg = inb(ioaddr + INT_NO_REG);
+
+ if (lp->eepro == 2)
+ outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG);
+ else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
+
+ temp_reg = inb(ioaddr + INT_NO_REG);
+
+ if (lp->eepro == 2)
+ outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG);
+ else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
+
+ if (net_debug > 3)
+ printk("eepro_open: content of INT Reg is %x\n", temp_reg);
+
+
+ /* Initialize the RCV and XMT upper and lower limits */
+ outb(RCV_LOWER_LIMIT, ioaddr + RCV_LOWER_LIMIT_REG);
+ outb(RCV_UPPER_LIMIT, ioaddr + RCV_UPPER_LIMIT_REG);
+ outb(XMT_LOWER_LIMIT, ioaddr + XMT_LOWER_LIMIT_REG);
+ outb(XMT_UPPER_LIMIT, ioaddr + XMT_UPPER_LIMIT_REG);
+
+ /* Enable the interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg | INT_ENABLE, ioaddr + REG1);
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+
+ /* Let RX and TX events to interrupt */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ /* Initialize RCV */
+ outw(RCV_LOWER_LIMIT << 8, ioaddr + RCV_BAR);
+ lp->rx_start = (RCV_LOWER_LIMIT << 8) ;
+ outw((RCV_UPPER_LIMIT << 8) | 0xfe, ioaddr + RCV_STOP);
+
+ /* Initialize XMT */
+ outw(XMT_LOWER_LIMIT << 8, ioaddr + XMT_BAR);
+
+ /* Check for the i82595TX and i82595FX */
+ old8 = inb(ioaddr + 8);
+ outb(~old8, ioaddr + 8);
+
+ if ((temp_reg = inb(ioaddr + 8)) == old8) {
+ if (net_debug > 3)
+ printk("i82595 detected!\n");
+ lp->version = LAN595;
+ }
+ else {
+ lp->version = LAN595TX;
+ outb(old8, ioaddr + 8);
+ old9 = inb(ioaddr + 9);
+ outb(~old9, ioaddr + 9);
+
+ if (((temp_reg = inb(ioaddr + 9)) == ( (~old9)&0xff) )) {
+ enum iftype { AUI=0, BNC=1, TPE=2 };
+
+ if (net_debug > 3) {
+ printk("temp_reg: %#x ~old9: %#x\n",temp_reg, ~old9);
+ printk("i82595FX detected!\n");
+ }
+
+ lp->version = LAN595FX;
+ outb(old9, ioaddr + 9);
+
+ if (dev->if_port != TPE) { /* Hopefully, this will fix the
+ problem of using Pentiums and
+ pro/10 w/ BNC. */
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ temp_reg = inb(ioaddr + REG13);
+
+ /* disable the full duplex mode since it is not
+ applicable with the 10Base2 cable. */
+ outb(temp_reg & ~(FDX | A_N_ENABLE), REG13);
+ outb(BANK0_SELECT, ioaddr); /* be CAREFUL, BANK 0 now */
+ }
+ }
+ else if (net_debug > 3) {
+ printk("temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff));
+ printk("i82595TX detected!\n");
+ }
+ }
+
+ outb(SEL_RESET_CMD, ioaddr);
+
+ /* We are supposed to wait for 2 us after a SEL_RESET */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ lp->tx_start = lp->tx_end = XMT_LOWER_LIMIT << 8; /* or = RCV_RAM */
+ lp->tx_last = 0;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ if (net_debug > 3)
+ printk("eepro: exiting eepro_open routine.\n");
+
+ outb(RCV_ENABLE_CMD, ioaddr);
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int
+eepro_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int rcv_ram = dev->mem_end;
+
+ if (net_debug > 5)
+ printk("eepro: entering eepro_send_packet routine.\n");
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+
+ int tickssofar = jiffies - dev->trans_start;
+
+ if (tickssofar < 40)
+ return 1;
+
+ if (net_debug > 1)
+ printk("%s: transmit timed out, %s?\n", dev->name,
+ "network cable problem");
+
+ lp->stats.tx_errors++;
+
+ /* Try to restart the adaptor. */
+ outb(SEL_RESET_CMD, ioaddr);
+
+ /* We are supposed to wait for 2 us after a SEL_RESET */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ /* Do I also need to flush the transmit buffers here? YES? */
+ lp->tx_start = lp->tx_end = rcv_ram;
+ lp->tx_last = 0;
+
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ outb(RCV_ENABLE_CMD, ioaddr);
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. */
+
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* You might need to clean up and record Tx statistics here. */
+ /* lp->stats.tx_aborted_errors++; */
+
+ if (net_debug > 5)
+ printk("eepro: exiting eepro_send_packet routine.\n");
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+
+static void
+eepro_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ int ioaddr, status, boguscount = 20;
+
+ if (net_debug > 5)
+ printk("eepro: entering eepro_interrupt routine.\n");
+
+ if (dev == NULL) {
+ printk ("eepro_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+
+ do {
+ status = inb(ioaddr + STATUS_REG);
+
+ if (status & RX_INT) {
+ if (net_debug > 4)
+ printk("eepro: packet received interrupt.\n");
+ /* Acknowledge the RX_INT */
+ outb(RX_INT, ioaddr + STATUS_REG);
+ /* Get the received packets */
+ eepro_rx(dev);
+ }
+ else if (status & TX_INT) {
+ if (net_debug > 4)
+ printk("eepro: packet transmit interrupt.\n");
+ /* Acknowledge the TX_INT */
+ outb(TX_INT, ioaddr + STATUS_REG);
+ /* Process the status of transmitted packets */
+ eepro_transmit_interrupt(dev);
+ }
+
+ } while ((boguscount-- > 0) && (status & 0x06));
+
+ dev->interrupt = 0;
+
+ if (net_debug > 5)
+ printk("eepro: exiting eepro_interrupt routine.\n");
+
+ return;
+}
+
+static int
+eepro_close(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int rcv_ram = dev->mem_end;
+ short temp_reg;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ outb(BANK1_SELECT, ioaddr); /* Switch back to Bank 1 */
+
+ /* Disable the physical interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg & 0x7f, ioaddr + REG1);
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+
+ /* Flush the Tx and disable Rx. */
+ outb(STOP_RCV_CMD, ioaddr);
+
+ lp->tx_start = lp->tx_end = rcv_ram ;
+ lp->tx_last = 0;
+
+ /* Mask all the interrupts. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ /* Reset the 82595 */
+ outb(RESET_CMD, ioaddr);
+
+ /* release the interrupt */
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = 0;
+
+ /* Update the statistics here. What statistics? */
+ /* We are supposed to wait for 200 us after a RESET */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO; /* May not be enough? */
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+eepro_get_stats(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void
+set_multicast_list(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ unsigned short mode;
+ struct dev_mc_list *dmi=dev->mc_list;
+
+ if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
+ {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. If it was a promisc request the
+ * flag is already set. If not we assert it.
+ */
+ dev->flags|=IFF_PROMISC;
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode | PRMSC_Mode, ioaddr + REG2);
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ outb(BANK0_SELECT, ioaddr); /* Return to BANK 0 now */
+ printk("%s: promiscuous mode enabled.\n", dev->name);
+ }
+
+ else if (dev->mc_count==0 )
+ {
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ outb(BANK0_SELECT, ioaddr); /* Return to BANK 0 now */
+ }
+
+ else
+ {
+ unsigned short status, *eaddrs;
+ int i, boguscount = 0;
+
+ /* Disable RX and TX interrupts. Necessary to avoid
+ corruption of the HOST_ADDRESS_REG by interrupt
+ service routines. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode | Multi_IA, ioaddr + REG2);
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ outb(BANK0_SELECT, ioaddr); /* Return to BANK 0 now */
+ outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
+ outw(MC_SETUP, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(6*(dev->mc_count + 1), ioaddr + IO_PORT);
+
+ for (i = 0; i < dev->mc_count; i++)
+ {
+ eaddrs=(unsigned short *)dmi->dmi_addr;
+ dmi=dmi->next;
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ }
+
+ eaddrs = (unsigned short *) dev->dev_addr;
+ outw(eaddrs[0], ioaddr + IO_PORT);
+ outw(eaddrs[1], ioaddr + IO_PORT);
+ outw(eaddrs[2], ioaddr + IO_PORT);
+ outw(lp->tx_end, ioaddr + XMT_BAR);
+ outb(MC_SETUP, ioaddr);
+
+ /* Update the transmit queue */
+ i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1);
+
+ if (lp->tx_start != lp->tx_end)
+ {
+ /* update the next address and the chain bit in the
+ last packet */
+ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
+ outw(i, ioaddr + IO_PORT);
+ outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
+ status = inw(ioaddr + IO_PORT);
+ outw(status | CHAIN_BIT, ioaddr + IO_PORT);
+ lp->tx_end = i ;
+ }
+ else {
+ lp->tx_start = lp->tx_end = i ;
+ }
+
+ /* Acknowledge that the MC setup is done */
+ do { /* We should be doing this in the eepro_interrupt()! */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ if (inb(ioaddr + STATUS_REG) & 0x08)
+ {
+ i = inb(ioaddr);
+ outb(0x08, ioaddr + STATUS_REG);
+
+ if (i & 0x20) { /* command ABORTed */
+ printk("%s: multicast setup failed.\n",
+ dev->name);
+ break;
+ } else if ((i & 0x0f) == 0x03) { /* MC-Done */
+ printk("%s: set Rx mode to %d addresses.\n",
+ dev->name, dev->mc_count);
+ break;
+ }
+ }
+ } while (++boguscount < 100);
+
+ /* Re-enable RX and TX interrupts */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+
+ }
+ outb(RCV_ENABLE_CMD, ioaddr);
+}
+
+/* The horrible routine to read a word from the serial EEPROM. */
+/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */
+/* The delay between EEPROM clock transitions. */
+
+#define eeprom_delay() { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }}
+#define EE_READ_CMD (6 << 6)
+
+int
+read_eeprom(int ioaddr, int location)
+{
+ int i;
+ unsigned short retval = 0;
+ short ee_addr = ioaddr + EEPROM_REG;
+ int read_cmd = location | EE_READ_CMD;
+ short ctrl_val = EECS ;
+
+ outb(BANK2_SELECT, ioaddr);
+ outb(ctrl_val, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 8; i >= 0; i--) {
+ short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
+ : ctrl_val;
+ outb(outval, ee_addr);
+ outb(outval | EESK, ee_addr); /* EEPROM clock tick. */
+ eeprom_delay();
+ outb(outval, ee_addr); /* Finish EEPROM a clock tick. */
+ eeprom_delay();
+ }
+ outb(ctrl_val, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb(ctrl_val | EESK, ee_addr); eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
+ outb(ctrl_val, ee_addr); eeprom_delay();
+ }
+ /* Terminate the EEPROM access. */
+ ctrl_val &= ~EECS;
+ outb(ctrl_val | EESK, ee_addr);
+ eeprom_delay();
+ outb(ctrl_val, ee_addr);
+ eeprom_delay();
+ outb(BANK0_SELECT, ioaddr);
+ return retval;
+}
+
+static void
+hardware_send_packet(struct device *dev, void *buf, short length)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ int rcv_ram = dev->mem_end;
+ unsigned status, tx_available, last, end, boguscount = 100;
+
+ if (net_debug > 5)
+ printk("eepro: entering hardware_send_packet routine.\n");
+
+ while (boguscount-- > 0) {
+
+ /* Disable RX and TX interrupts. Necessary to avoid
+ corruption of the HOST_ADDRESS_REG by interrupt
+ service routines. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+
+ if (dev->interrupt == 1) {
+ /* Enable RX and TX interrupts */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+ continue;
+ }
+
+ /* determine how much of the transmit buffer space is available */
+ if (lp->tx_end > lp->tx_start)
+ tx_available = XMT_RAM - (lp->tx_end - lp->tx_start);
+ else if (lp->tx_end < lp->tx_start)
+ tx_available = lp->tx_start - lp->tx_end;
+ else tx_available = XMT_RAM;
+
+ if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER)
+ >= tx_available) /* No space available ??? */
+ {
+ eepro_transmit_interrupt(dev); /* Clean up the transmiting queue */
+ /* Enable RX and TX interrupts */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+ continue;
+ }
+
+ last = lp->tx_end;
+ end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
+ if (end >= RAM_SIZE) { /* the transmit buffer is wrapped around */
+
+ if ((RAM_SIZE - last) <= XMT_HEADER) {
+ /* Arrrr!!!, must keep the xmt header together,
+ several days were lost to chase this one down. */
+ last = rcv_ram;
+ end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
+ }
+
+ else end = rcv_ram + (end - RAM_SIZE);
+ }
+
+ outw(last, ioaddr + HOST_ADDRESS_REG);
+ outw(XMT_CMD, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(end, ioaddr + IO_PORT);
+ outw(length, ioaddr + IO_PORT);
+
+ if (lp->version == LAN595)
+ outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1);
+
+ else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
+ unsigned short temp = inb(ioaddr + INT_MASK_REG);
+ outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
+ outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2);
+ outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
+ }
+
+ /* A dummy read to flush the DRAM write pipeline */
+ status = inw(ioaddr + IO_PORT);
+
+ if (lp->tx_start == lp->tx_end) {
+ outw(last, ioaddr + XMT_BAR);
+ outb(XMT_CMD, ioaddr);
+ lp->tx_start = last; /* I don't like to change tx_start here */
+ }
+ else {
+ /* update the next address and the chain bit in the
+ last packet */
+
+ if (lp->tx_end != last) {
+ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
+ outw(last, ioaddr + IO_PORT);
+ }
+
+ outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
+ status = inw(ioaddr + IO_PORT);
+ outw(status | CHAIN_BIT, ioaddr + IO_PORT);
+
+ /* Continue the transmit command */
+ outb(RESUME_XMT_CMD, ioaddr);
+ }
+ lp->tx_last = last;
+ lp->tx_end = end;
+
+ /* Enable RX and TX interrupts */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+
+ if (dev->tbusy) {
+ dev->tbusy = 0;
+ }
+
+ if (net_debug > 5)
+ printk("eepro: exiting hardware_send_packet routine.\n");
+
+ return;
+ }
+ dev->tbusy = 1;
+
+ if (net_debug > 5)
+ printk("eepro: exiting hardware_send_packet routine.\n");
+}
+
+static void
+eepro_rx(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr, rcv_ram = dev->mem_end;
+ short boguscount = 20;
+ short rcv_car = lp->rx_start;
+ unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
+
+ if (net_debug > 5)
+ printk("eepro: entering eepro_rx routine.\n");
+
+ /* Set the read pointer to the start of the RCV */
+ outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
+
+ rcv_event = inw(ioaddr + IO_PORT);
+ while (rcv_event == RCV_DONE) {
+
+ rcv_status = inw(ioaddr + IO_PORT);
+ rcv_next_frame = inw(ioaddr + IO_PORT);
+ rcv_size = inw(ioaddr + IO_PORT);
+
+ if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
+
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+ rcv_size &= 0x3fff;
+ skb = dev_alloc_skb(rcv_size+5);
+
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ if (lp->version == LAN595)
+ insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1);
+
+ else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
+ unsigned short temp = inb(ioaddr + INT_MASK_REG);
+ outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
+ insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size), (rcv_size + 3) >> 2);
+ outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
+ }
+
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+
+ else { /* Not sure will ever reach here,
+ I set the 595 to discard bad received frames */
+ lp->stats.rx_errors++;
+
+ if (rcv_status & 0x0100)
+ lp->stats.rx_over_errors++;
+
+ else if (rcv_status & 0x0400)
+ lp->stats.rx_frame_errors++;
+
+ else if (rcv_status & 0x0800)
+ lp->stats.rx_crc_errors++;
+
+ printk("%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
+ dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
+ }
+
+ if (rcv_status & 0x1000)
+ lp->stats.rx_length_errors++;
+
+ if (--boguscount == 0)
+ break;
+
+ rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
+ lp->rx_start = rcv_next_frame;
+ outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
+ rcv_event = inw(ioaddr + IO_PORT);
+ }
+ if (rcv_car == 0)
+ rcv_car = (RCV_UPPER_LIMIT << 8) | 0xff;
+
+ outw(rcv_car - 1, ioaddr + RCV_STOP);
+
+ if (net_debug > 5)
+ printk("eepro: exiting eepro_rx routine.\n");
+}
+
+static void
+eepro_transmit_interrupt(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ short boguscount = 20;
+ short xmt_status;
+
+ while (lp->tx_start != lp->tx_end) {
+
+ outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
+ xmt_status = inw(ioaddr+IO_PORT);
+
+ if ((xmt_status & TX_DONE_BIT) == 0) break;
+
+ xmt_status = inw(ioaddr+IO_PORT);
+ lp->tx_start = inw(ioaddr+IO_PORT);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+
+ if (xmt_status & 0x2000)
+ lp->stats.tx_packets++;
+ else {
+ lp->stats.tx_errors++;
+ if (xmt_status & 0x0400)
+ lp->stats.tx_carrier_errors++;
+ printk("%s: XMT status = %#x\n",
+ dev->name, xmt_status);
+ }
+
+ if (xmt_status & 0x000f) {
+ lp->stats.collisions += (xmt_status & 0x000f);
+ }
+
+ if ((xmt_status & 0x0040) == 0x0) {
+ lp->stats.tx_heartbeat_errors++;
+ }
+
+ if (--boguscount == 0)
+ break;
+ }
+}
+
+#ifdef MODULE
+
+static char devicename[9] = { 0, };
+static struct device dev_eepro = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, eepro_probe };
+static int io = 0x200;
+static int irq = 0;
+static int mem = (RCV_RAM/1024); /* Size of the rx buffer in KB */
+
+int
+init_module(void)
+{
+ if (io == 0)
+ printk("eepro: You should not use auto-probing with insmod!\n");
+
+ dev_eepro.base_addr = io;
+ dev_eepro.irq = irq;
+ dev_eepro.mem_end = mem;
+
+ if (register_netdev(&dev_eepro) != 0)
+ return -EIO;
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_eepro);
+
+ kfree_s(dev_eepro.priv,sizeof(struct eepro_local));
+ dev_eepro.priv=NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_eepro.base_addr, EEPRO_IO_EXTENT);
+}
+#endif /* MODULE */
diff --git a/linux/src/drivers/net/eepro100.c b/linux/src/drivers/net/eepro100.c
new file mode 100644
index 0000000..d03462c
--- /dev/null
+++ b/linux/src/drivers/net/eepro100.c
@@ -0,0 +1,2155 @@
+/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
+/*
+ Written 1998-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This driver is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
+ It should work with all i82557/558/559 boards.
+
+ To use as a module, use the compile-command at the end of the file.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ For updates see
+ http://www.scyld.com/network/eepro100.html
+ For installation instructions
+ http://www.scyld.com/network/modules.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"eepro100.c:v1.28 7/22/2003 Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/eepro100.html\n";
+
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.
+ The first five are undocumented and spelled per Intel recommendations.
+*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+static int congenb = 0; /* Enable congestion control in the DP83840. */
+static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
+static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
+/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
+static int txdmacount = 128;
+static int rxdmacount = 0;
+
+/* Set the copy breakpoint for the copy-only-tiny-frame Rx method.
+ Lower values use more memory, but are faster.
+ Setting to > 1518 disables this feature. */
+static int rx_copybreak = 200;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
+static int multicast_filter_limit = 64;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability, however setting full_duplex[] is deprecated.
+ The media type is usually passed in 'options[]'.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* The ring sizes should be a power of two for efficiency. */
+#define TX_RING_SIZE 32 /* Effectively 2 entries fewer. */
+#define RX_RING_SIZE 32
+/* Actual number of TX packets queued, must be <= TX_RING_SIZE-2. */
+#define TX_QUEUE_LIMIT 12
+#define TX_QUEUE_UNFULL 8 /* Hysteresis marking queue as no longer full. */
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE >= 0x20300
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= 0x20200
+#include <asm/spinlock.h>
+#endif
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed bus+endian portability operations. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Intel PCI EtherExpressPro 100 driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(congenb, "i");
+MODULE_PARM(txfifo, "i");
+MODULE_PARM(rxfifo, "i");
+MODULE_PARM(txdmacount, "i");
+MODULE_PARM(rxdmacount, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(multicast_filter_limit, "i");
+#ifdef MODULE_PARM_DESC
+MODULE_PARM_DESC(debug, "EEPro100 message level (0-31)");
+MODULE_PARM_DESC(options,
+ "EEPro100: force fixed speed+duplex 0x10 0x20 0x100 0x200");
+MODULE_PARM_DESC(max_interrupt_work,
+ "EEPro100 maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "EEPro100 set to forced full duplex when not 0"
+ " (deprecated)");
+MODULE_PARM_DESC(rx_copybreak,
+ "EEPro100 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "EEPro100 breakpoint for switching to Rx-all-multicast");
+/* Other settings are undocumented per Intel recommendation. */
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
+single-chip fast Ethernet controller for PCI, as used on the Intel
+EtherExpress Pro 100 adapter.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line. While it's
+possible to share PCI interrupt lines, it negatively impacts performance and
+only recent kernels support it.
+
+III. Driver operation
+
+IIIA. General
+The Speedo3 is very similar to other Intel network chips, that is to say
+"apparently designed on a different planet". This chips retains the complex
+Rx and Tx descriptors and multiple buffers pointers as previous chips, but
+also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
+Tx mode, but in a simplified lower-overhead manner: it associates only a
+single buffer descriptor with each frame descriptor.
+
+Despite the extra space overhead in each receive skbuff, the driver must use
+the simplified Rx buffer mode to assure that only a single data buffer is
+associated with each RxFD. The driver implements this by reserving space
+for the Rx descriptor at the head of each Rx skbuff.
+
+The Speedo-3 has receive and command unit base addresses that are added to
+almost all descriptor pointers. The driver sets these to zero, so that all
+pointer fields are absolute addresses.
+
+The System Control Block (SCB) of some previous Intel chips exists on the
+chip in both PCI I/O and memory space. This driver uses the I/O space
+registers, but might switch to memory mapped mode to better support non-x86
+processors.
+
+IIIB. Transmit structure
+
+The driver must use the complex Tx command+descriptor mode in order to
+have a indirect pointer to the skbuff data section. Each Tx command block
+(TxCB) is associated with two immediately appended Tx Buffer Descriptor
+(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
+speedo_private data structure for each adapter instance.
+
+The i82558 and later explicitly supports this structure, and can read the two
+TxBDs in the same PCI burst as the TxCB.
+
+This ring structure is used for all normal transmit packets, but the
+transmit packet descriptors aren't long enough for most non-Tx commands such
+as CmdConfigure. This is complicated by the possibility that the chip has
+already loaded the link address in the previous descriptor. So for these
+commands we convert the next free descriptor on the ring to a NoOp, and point
+that descriptor's link to the complex command.
+
+An additional complexity of these non-transmit commands are that they may be
+added asynchronous to the normal transmit queue, so we set a lock
+whenever the Tx descriptor ring is manipulated.
+
+A notable aspect of these special configure commands is that they do
+work with the normal Tx ring entry scavenge method. The Tx ring scavenge
+is done at interrupt time using the 'dirty_tx' index, and checking for the
+command-complete bit. While the setup frames may have the NoOp command on the
+Tx ring marked as complete, but not have completed the setup command, this
+is not a problem. The tx_ring entry can be still safely reused, as the
+tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
+
+Commands may have bits set e.g. CmdSuspend in the command word to either
+suspend or stop the transmit/command unit. This driver always initializes
+the current command with CmdSuspend before erasing the CmdSuspend in the
+previous command, and only then issues a CU_RESUME.
+
+Note: In previous generation Intel chips, restarting the command unit was a
+notoriously slow process. This is presumably no longer true.
+
+IIIC. Receive structure
+
+Because of the bus-master support on the Speedo3 this driver uses the
+SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
+This scheme allocates full-sized skbuffs as receive buffers. The value
+SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
+trade-off the memory wasted by passing the full-sized skbuff to the queue
+layer for all frames vs. the copying cost of copying a frame to a
+correctly-sized skbuff.
+
+For small frames the copying cost is negligible (esp. considering that we
+are pre-loading the cache with immediately useful header information), so we
+allocate a new, minimally-sized skbuff. For large frames the copying cost
+is non-trivial, and the larger copy might flush the cache of useful data, so
+we pass up the skbuff the packet was received into.
+
+IIID. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'sp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'sp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
+that stated that I could disclose the information. But I still resent
+having to sign an Intel NDA when I'm helping Intel sell their own product!
+
+*/
+
+/* This table drives the PCI probe routines. */
+static void *speedo_found1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int fnd_cnt);
+static int speedo_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags { ResetMII=1, HasChksum=2};
+
+/* I/O registers beyond 0x18 do not exist on the i82557. */
+#ifdef USE_IO_OPS
+#define SPEEDO_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR1
+#define SPEEDO_SIZE 32
+#else
+#define SPEEDO_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR0
+#define SPEEDO_SIZE 0x1000
+#endif
+
+struct pci_id_info static pci_id_tbl[] = {
+ {"Intel PCI EtherExpress Pro100 82865", { 0x12278086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel PCI EtherExpress Pro100 Smart (i960RP/RD)",
+ { 0x12288086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel i82559 rev 8", { 0x12298086, ~0, 0,0, 8,0xff},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, HasChksum, },
+ {"Intel PCI EtherExpress Pro100", { 0x12298086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel EtherExpress Pro/100+ i82559ER", { 0x12098086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, ResetMII, },
+ {"Intel EtherExpress Pro/100 type 1029", { 0x10298086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel EtherExpress Pro/100 type 1030", { 0x10308086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 V Network", { 0x24498086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel PCI LAN0 Controller 82801E", { 0x24598086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel PCI LAN1 Controller 82801E", { 0x245D8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1031)", { 0x10318086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1032)", { 0x10328086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1033)", { 0x10338086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1034)", { 0x10348086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1035)", { 0x10358086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (type 1038)", { 0x10388086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (type 1039)", { 0x10398086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (type 103a)", { 0x103a8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"HP/Compaq D510 Intel Pro/100 VM",
+ { 0x103b8086, 0xffffffff, 0x00120e11, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (type 103b)", { 0x103b8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 103D)", { 0x103d8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 103E)", { 0x103e8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel EtherExpress Pro/100 865G Northbridge type 1051",
+ { 0x10518086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel PCI to PCI Bridge EtherExpress Pro100 Server Adapter",
+ { 0x52008086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel PCI EtherExpress Pro100 Server Adapter",
+ { 0x52018086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (unknown type series 1030)",
+ { 0x10308086, 0xfff0ffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 (unknown type series 1050)",
+ { 0x10508086, 0xfff0ffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info eepro100_drv_id = {
+ "eepro100", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ speedo_found1, speedo_pwr_event, };
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* Offsets to the various registers.
+ All accesses need not be longword aligned. */
+enum speedo_offsets {
+ SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
+ SCBPointer = 4, /* General purpose pointer. */
+ SCBPort = 8, /* Misc. commands and operands. */
+ SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
+ SCBCtrlMDI = 16, /* MDI interface control. */
+ SCBEarlyRx = 20, /* Early receive byte count. */
+};
+/* Commands that can be put in a command list entry. */
+enum commands {
+ CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
+ CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
+ CmdDump = 0x60000, CmdDiagnose = 0x70000,
+ CmdSuspend = 0x40000000, /* Suspend after completion. */
+ CmdIntr = 0x20000000, /* Interrupt after completion. */
+ CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
+};
+/* Do atomically if possible. */
+#if defined(__i386__)
+#define clear_suspend(cmd) ((char *)(&(cmd)->cmd_status))[3] &= ~0x40
+#elif defined(__alpha__) || defined(__x86_64) || defined(__ia64)
+#define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status)
+#elif defined(__powerpc__) || defined(__sparc__) || (__BIG_ENDIAN)
+#define clear_suspend(cmd) clear_bit(6, &(cmd)->cmd_status)
+#else
+#warning Undefined architecture.
+#define clear_suspend(cmd) (cmd)->cmd_status &= cpu_to_le32(~CmdSuspend)
+#endif
+
+enum SCBCmdBits {
+ SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
+ SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
+ SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
+ /* The rest are Rx and Tx commands. */
+ CUStart=0x0010, CUResume=0x0020, CUHiPriStart=0x0030, CUStatsAddr=0x0040,
+ CUShowStats=0x0050,
+ CUCmdBase=0x0060, /* CU Base address (set to zero) . */
+ CUDumpStats=0x0070, /* Dump then reset stats counters. */
+ CUHiPriResume=0x00b0, /* Resume for the high priority Tx queue. */
+ RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
+ RxResumeNoResources=0x0007,
+};
+
+enum intr_status_bits {
+ IntrCmdDone=0x8000, IntrRxDone=0x4000, IntrCmdIdle=0x2000,
+ IntrRxSuspend=0x1000, IntrMIIDone=0x0800, IntrDrvrIntr=0x0400,
+ IntrAllNormal=0xfc00,
+};
+
+enum SCBPort_cmds {
+ PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
+};
+
+/* The Speedo3 Rx and Tx frame/buffer descriptors. */
+struct descriptor { /* A generic descriptor. */
+ s32 cmd_status; /* All command and status fields. */
+ u32 link; /* struct descriptor * */
+ unsigned char params[0];
+};
+
+/* The Speedo3 Rx and Tx buffer descriptors. */
+struct RxFD { /* Receive frame descriptor. */
+ s32 status;
+ u32 link; /* struct RxFD * */
+ u32 rx_buf_addr; /* void * */
+ u32 count;
+};
+
+/* Selected elements of the Tx/RxFD.status word. */
+enum RxFD_bits {
+ RxComplete=0x8000, RxOK=0x2000,
+ RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
+ RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
+ TxUnderrun=0x1000, StatusComplete=0x8000,
+};
+
+struct TxFD { /* Transmit frame descriptor set. */
+ s32 status;
+ u32 link; /* void * */
+ u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
+ s32 count; /* # of TBD (=1), Tx start thresh., etc. */
+ /* This constitutes two "TBD" entries. Non-zero-copy uses only one. */
+ u32 tx_buf_addr0; /* void *, frame to be transmitted. */
+ s32 tx_buf_size0; /* Length of Tx frame. */
+ u32 tx_buf_addr1; /* Used only for zero-copy data section. */
+ s32 tx_buf_size1; /* Length of second data buffer (0). */
+};
+
+/* Elements of the dump_statistics block. This block must be lword aligned. */
+struct speedo_stats {
+ u32 tx_good_frames;
+ u32 tx_coll16_errs;
+ u32 tx_late_colls;
+ u32 tx_underruns;
+ u32 tx_lost_carrier;
+ u32 tx_deferred;
+ u32 tx_one_colls;
+ u32 tx_multi_colls;
+ u32 tx_total_colls;
+ u32 rx_good_frames;
+ u32 rx_crc_errs;
+ u32 rx_align_errs;
+ u32 rx_resource_errs;
+ u32 rx_overrun_errs;
+ u32 rx_colls_errs;
+ u32 rx_runt_errs;
+ u32 done_marker;
+};
+
+/* Do not change the position (alignment) of the first few elements!
+ The later elements are grouped for cache locality. */
+struct speedo_private {
+ struct TxFD tx_ring[TX_RING_SIZE]; /* Commands (usually CmdTxPacket). */
+ struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
+ struct speedo_stats lstats; /* Statistics and self-test region */
+
+ /* The addresses of a Tx/Rx-in-place packets/buffers. */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+
+ /* Transmit and other commands control. */
+ struct descriptor *last_cmd; /* Last command sent. */
+ unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
+ spinlock_t lock; /* Group with Tx control cache line. */
+ u32 tx_threshold; /* The value for txdesc.count. */
+ unsigned long last_cmd_time;
+
+ /* Rx control, one cache line. */
+ struct RxFD *last_rxf; /* Most recent Rx frame. */
+ unsigned int cur_rx, dirty_rx; /* The next free ring entry */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+ int rx_copybreak;
+
+ int msg_level;
+ int max_interrupt_work;
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+ struct net_device_stats stats;
+ int alloc_failures;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ unsigned char acpi_pwr;
+ struct timer_list timer; /* Media selection timer. */
+ /* Multicast filter command. */
+ int mc_setup_frm_len; /* The length of an allocated.. */
+ struct descriptor *mc_setup_frm; /* ..multicast setup frame. */
+ int mc_setup_busy; /* Avoid double-use of setup frame. */
+ int multicast_filter_limit;
+
+ int in_interrupt; /* Word-aligned dev->interrupt */
+ int rx_mode; /* Current PROMISC/ALLMULTI setting. */
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
+ unsigned int rx_bug:1; /* Work around receiver hang errata. */
+ unsigned int rx_bug10:1; /* Receiver might hang at 10mbps. */
+ unsigned int rx_bug100:1; /* Receiver might hang at 100mbps. */
+ unsigned int polling:1; /* Hardware blocked interrupt line. */
+ unsigned int medialock:1; /* The media speed/duplex is fixed. */
+ unsigned char default_port; /* Last dev->if_port value. */
+ unsigned short phy[2]; /* PHY media interfaces available. */
+ unsigned short advertising; /* Current PHY advertised caps. */
+ unsigned short partner; /* Link partner caps. */
+ long last_reset;
+};
+
+/* Our internal RxMode state, not tied to the hardware bits. */
+enum rx_mode_bits {
+ AcceptAllMulticast=0x01, AcceptAllPhys=0x02,
+ AcceptErr=0x80, AcceptRunt=0x10,
+ AcceptBroadcast=0x08, AcceptMulticast=0x04,
+ AcceptMyPhys=0x01, RxInvalidMode=0x7f
+};
+
+/* The parameters for a CmdConfigure operation.
+ There are so many options that it would be difficult to document each bit.
+ We mostly use the default or recommended settings. */
+const char i82557_config_cmd[22] = {
+ 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
+ 0, 0x2E, 0, 0x60, 0,
+ 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
+ 0x3f, 0x05, };
+const char i82558_config_cmd[22] = {
+ 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
+ 0, 0x2E, 0, 0x60, 0x08, 0x88,
+ 0x68, 0, 0x40, 0xf2, 0xBD, /* 0xBD->0xFD=Force full-duplex */
+ 0x31, 0x05, };
+
+/* PHY media interface chips, defined by the databook. */
+static const char *phys[] = {
+ "None", "i82553-A/B", "i82553-C", "i82503",
+ "DP83840", "80c240", "80c24", "i82555",
+ "unknown-8", "unknown-9", "DP83840A", "unknown-11",
+ "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
+enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
+ S80C24, I82555, DP83840A=10, };
+static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
+
+/* Standard serial configuration EEPROM commands. */
+#define EE_READ_CMD (6)
+
+static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static int mdio_write(long ioaddr, int phy_id, int location, int value);
+static int speedo_open(struct net_device *dev);
+static void speedo_resume(struct net_device *dev);
+static void speedo_timer(unsigned long data);
+static void speedo_init_rx_ring(struct net_device *dev);
+static void speedo_tx_timeout(struct net_device *dev);
+static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int speedo_rx(struct net_device *dev);
+static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int speedo_close(struct net_device *dev);
+static struct net_device_stats *speedo_get_stats(struct net_device *dev);
+static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void set_rx_mode(struct net_device *dev);
+
+
+
+#ifdef honor_default_port
+/* Optional driver feature to allow forcing the transceiver setting.
+ Not recommended. */
+static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
+ 0x2000, 0x2100, 0x0400, 0x3100};
+#endif
+
+/* A list of all installed Speedo devices, for removing the driver module. */
+static struct net_device *root_speedo_dev = NULL;
+
+static void *speedo_found1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct speedo_private *sp;
+ void *priv_mem;
+ int i, option;
+ u16 eeprom[0x100];
+ int acpi_idle_state = 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ if (dev->mem_start > 0)
+ option = dev->mem_start;
+ else if (card_idx >= 0 && options[card_idx] >= 0)
+ option = options[card_idx];
+ else
+ option = -1;
+
+ acpi_idle_state = acpi_set_pwr_state(pdev, ACPI_D0);
+
+ /* Read the station address EEPROM before doing the reset.
+ Nominally his should even be done before accepting the device, but
+ then we wouldn't have a device name with which to report the error.
+ The size test is for 6 bit vs. 8 bit address serial EEPROMs.
+ */
+ {
+ u16 sum = 0;
+ int j;
+ int read_cmd, ee_size;
+
+ if ((do_eeprom_cmd(ioaddr, EE_READ_CMD << 24, 27) & 0xffe0000)
+ == 0xffe0000) {
+ ee_size = 0x100;
+ read_cmd = EE_READ_CMD << 24;
+ } else {
+ ee_size = 0x40;
+ read_cmd = EE_READ_CMD << 22;
+ }
+
+ for (j = 0, i = 0; i < ee_size; i++) {
+ u16 value = do_eeprom_cmd(ioaddr, read_cmd | (i << 16), 27);
+ eeprom[i] = value;
+ sum += value;
+ if (i < 3) {
+ dev->dev_addr[j++] = value;
+ dev->dev_addr[j++] = value >> 8;
+ }
+ }
+ if (sum != 0xBABA)
+ printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
+ "check settings before activating this device!\n",
+ dev->name, sum);
+ /* Don't unregister_netdev(dev); as the EEPro may actually be
+ usable, especially if the MAC address is set later. */
+ }
+
+ /* Reset the chip: stop Tx and Rx processes and clear counters.
+ This takes less than 10usec and will easily finish before the next
+ action. */
+ outl(PortReset, ioaddr + SCBPort);
+
+ printk(KERN_INFO "%s: %s%s at %#3lx, ", dev->name,
+ eeprom[3] & 0x0100 ? "OEM " : "", pci_id_tbl[chip_idx].name,
+ ioaddr);
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2X:", dev->dev_addr[i]);
+ printk("%2.2X, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* We have decided to accept this device. */
+ /* Allocate cached private storage.
+ The PCI coherent descriptor rings are allocated at each open. */
+ sp = priv_mem = kmalloc(sizeof(*sp), GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+#ifndef kernel_bloat
+ /* OK, this is pure kernel bloat. I don't like it when other drivers
+ waste non-pageable kernel space to emit similar messages, but I need
+ them for bug reports. */
+ {
+ const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
+ /* The self-test results must be paragraph aligned. */
+ s32 *volatile self_test_results;
+ int boguscnt = 16000; /* Timeout for set-test. */
+ printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
+ " connectors present:",
+ eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
+ for (i = 0; i < 4; i++)
+ if (eeprom[5] & (1<<i))
+ printk("%s", connectors[i]);
+ printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
+ phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
+ if (eeprom[7] & 0x0700)
+ printk(KERN_INFO " Secondary interface chip %s.\n",
+ phys[(eeprom[7]>>8)&7]);
+ if (((eeprom[6]>>8) & 0x3f) == DP83840
+ || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
+ int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
+ if (congenb)
+ mdi_reg23 |= 0x0100;
+ printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
+ mdi_reg23);
+ mdio_write(ioaddr, eeprom[6] & 0x1f, 23, mdi_reg23);
+ }
+ if ((option >= 0) && (option & 0x330)) {
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (option & 0x220 ? "full" : "half"));
+ mdio_write(ioaddr, eeprom[6] & 0x1f, 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
+ } else {
+ int mii_bmcrctrl = mdio_read(dev, eeprom[6] & 0x1f, 0);
+ /* Reset out of a transceiver left in 10baseT-fixed mode. */
+ if ((mii_bmcrctrl & 0x3100) == 0)
+ mdio_write(ioaddr, eeprom[6] & 0x1f, 0, 0x8000);
+ }
+ if (eeprom[10] & 0x0002)
+ printk(KERN_INFO "\n" KERN_INFO " ** The configuration "
+ "EEPROM enables Sleep Mode.\n" KERN_INFO "\n"
+ " ** This will cause PCI bus errors!\n"
+ KERN_INFO " ** Update the configuration EEPROM "
+ "with the eepro100-diag program.\n" );
+ if (eeprom[6] == 0)
+ printk(KERN_INFO " ** The configuration EEPROM does not have a "
+ "transceiver type set.\n" KERN_INFO "\n"
+ " ** This will cause configuration problems and prevent "
+ "monitoring the link!\n"
+ KERN_INFO " ** Update the configuration EEPROM "
+ "with the eepro100-diag program.\n" );
+
+ /* Perform a system self-test. */
+ self_test_results = (s32*)(&sp->lstats);
+ self_test_results[0] = 0;
+ self_test_results[1] = -1;
+ outl(virt_to_bus(self_test_results) | PortSelfTest, ioaddr + SCBPort);
+ do {
+ udelay(10);
+ } while (self_test_results[1] == -1 && --boguscnt >= 0);
+
+ if (boguscnt < 0) { /* Test optimized out. */
+ printk(KERN_ERR "Self test failed, status %8.8x:\n"
+ KERN_ERR " Failure to initialize the i82557.\n"
+ KERN_ERR " Verify that the card is a bus-master"
+ " capable slot.\n",
+ self_test_results[1]);
+ } else
+ printk(KERN_INFO " General self-test: %s.\n"
+ KERN_INFO " Serial sub-system self-test: %s.\n"
+ KERN_INFO " Internal registers self-test: %s.\n"
+ KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
+ self_test_results[1] & 0x1000 ? "failed" : "passed",
+ self_test_results[1] & 0x0020 ? "failed" : "passed",
+ self_test_results[1] & 0x0008 ? "failed" : "passed",
+ self_test_results[1] & 0x0004 ? "failed" : "passed",
+ self_test_results[0]);
+ }
+#endif /* kernel_bloat */
+
+ outl(PortReset, ioaddr + SCBPort);
+
+ /* Return the chip to its original power state. */
+ acpi_set_pwr_state(pdev, acpi_idle_state);
+
+ /* We do a request_region() only to register /proc/ioports info. */
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+ dev->priv = sp; /* Allocated above. */
+ memset(sp, 0, sizeof(*sp));
+ sp->next_module = root_speedo_dev;
+ root_speedo_dev = dev;
+
+ sp->priv_addr = priv_mem;
+ sp->pci_dev = pdev;
+ sp->chip_id = chip_idx;
+ sp->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ sp->acpi_pwr = acpi_idle_state;
+ sp->msg_level = (1 << debug) - 1;
+ sp->rx_copybreak = rx_copybreak;
+ sp->max_interrupt_work = max_interrupt_work;
+ sp->multicast_filter_limit = multicast_filter_limit;
+
+ sp->full_duplex = option >= 0 && (option & 0x220) ? 1 : 0;
+ if (card_idx >= 0) {
+ if (full_duplex[card_idx] >= 0)
+ sp->full_duplex = full_duplex[card_idx];
+ }
+ sp->default_port = option >= 0 ? (option & 0x0f) : 0;
+ if (sp->full_duplex)
+ sp->medialock = 1;
+
+ sp->phy[0] = eeprom[6];
+ sp->phy[1] = eeprom[7];
+ sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
+
+ if (sp->rx_bug)
+ printk(KERN_INFO " Receiver lock-up workaround activated.\n");
+
+ /* The Speedo-specific entries in the device structure. */
+ dev->open = &speedo_open;
+ dev->hard_start_xmit = &speedo_start_xmit;
+ dev->stop = &speedo_close;
+ dev->get_stats = &speedo_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &speedo_ioctl;
+
+ return dev;
+}
+
+/* How to wait for the command unit to accept a command.
+ Typically this takes 0 ticks. */
+
+static inline void wait_for_cmd_done(struct net_device *dev)
+{
+ long cmd_ioaddr = dev->base_addr + SCBCmd;
+ int wait = 0;
+ int delayed_cmd;
+ do
+ if (inb(cmd_ioaddr) == 0) return;
+ while(++wait <= 100);
+ delayed_cmd = inb(cmd_ioaddr);
+ do
+ if (inb(cmd_ioaddr) == 0) break;
+ while(++wait <= 10000);
+ printk(KERN_ERR "%s: Command %2.2x was not immediately accepted, "
+ "%d ticks!\n",
+ dev->name, delayed_cmd, wait);
+}
+
+/* Perform a SCB command known to be slow.
+ This function checks the status both before and after command execution. */
+static void do_slow_command(struct net_device *dev, int cmd)
+{
+ long cmd_ioaddr = dev->base_addr + SCBCmd;
+ int wait = 0;
+ do
+ if (inb(cmd_ioaddr) == 0) break;
+ while(++wait <= 200);
+ if (wait > 100)
+ printk(KERN_ERR "%s: Command %4.4x was never accepted (%d polls)!\n",
+ dev->name, inb(cmd_ioaddr), wait);
+ outb(cmd, cmd_ioaddr);
+ for (wait = 0; wait <= 100; wait++)
+ if (inb(cmd_ioaddr) == 0) return;
+ for (; wait <= 20000; wait++)
+ if (inb(cmd_ioaddr) == 0) return;
+ else udelay(1);
+ printk(KERN_ERR "%s: Command %4.4x was not accepted after %d polls!"
+ " Current status %8.8x.\n",
+ dev->name, cmd, wait, (int)inl(dev->base_addr + SCBStatus));
+}
+
+
+/* Serial EEPROM section.
+ A "bit" grungy, but we work our way through bit-by-bit :->. */
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
+#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+#define EE_ENB (0x4800 | EE_CS)
+#define EE_WRITE_0 0x4802
+#define EE_WRITE_1 0x4806
+#define EE_OFFSET SCBeeprom
+
+/* Delay between EEPROM clock transitions.
+ The code works with no delay on 33Mhz PCI. */
+#ifndef USE_IO_OPS
+#define eeprom_delay(ee_addr) writew(readw(ee_addr), ee_addr)
+#else
+#define eeprom_delay(ee_addr) inw(ee_addr)
+#endif
+
+static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
+{
+ unsigned retval = 0;
+ long ee_addr = ioaddr + SCBeeprom;
+
+ outw(EE_ENB | EE_SHIFT_CLK, ee_addr);
+
+ /* Shift the command bits out. */
+ do {
+ short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
+ outw(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ outw(dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay(ee_addr);
+ retval = (retval << 1) | ((inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ } while (--cmd_len >= 0);
+ outw(EE_ENB, ee_addr);
+
+ /* Terminate the EEPROM access. */
+ outw(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long ioaddr = dev->base_addr;
+ int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
+
+ outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
+ do {
+ val = inl(ioaddr + SCBCtrlMDI);
+ if (--boguscnt < 0) {
+ printk(KERN_ERR "%s: mdio_read() timed out with val = %8.8x.\n",
+ dev->name, val);
+ break;
+ }
+ } while (! (val & 0x10000000));
+ return val & 0xffff;
+}
+
+static int mdio_write(long ioaddr, int phy_id, int location, int value)
+{
+ int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
+ outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
+ ioaddr + SCBCtrlMDI);
+ do {
+ val = inl(ioaddr + SCBCtrlMDI);
+ if (--boguscnt < 0) {
+ printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
+ break;
+ }
+ } while (! (val & 0x10000000));
+ return val & 0xffff;
+}
+
+
+static int
+speedo_open(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ MOD_INC_USE_COUNT;
+ acpi_set_pwr_state(sp->pci_dev, ACPI_D0);
+
+ if (sp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
+
+ /* Set up the Tx queue early.. */
+ sp->cur_tx = 0;
+ sp->dirty_tx = 0;
+ sp->last_cmd = 0;
+ sp->tx_full = 0;
+ sp->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+ sp->polling = sp->in_interrupt = 0;
+
+ dev->if_port = sp->default_port;
+
+ if ((sp->phy[0] & 0x8000) == 0)
+ sp->advertising = mdio_read(dev, sp->phy[0] & 0x1f, 4);
+ /* With some transceivers we must retrigger negotiation to reset
+ power-up errors. */
+ if ((sp->drv_flags & ResetMII) &&
+ (sp->phy[0] & 0x8000) == 0) {
+ int phy_addr = sp->phy[0] & 0x1f ;
+ /* Use 0x3300 for restarting NWay, other values to force xcvr:
+ 0x0000 10-HD
+ 0x0100 10-FD
+ 0x2000 100-HD
+ 0x2100 100-FD
+ */
+#ifdef honor_default_port
+ mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
+#else
+ mdio_write(ioaddr, phy_addr, 0, 0x3300);
+#endif
+ }
+
+ /* We can safely take handler calls during init.
+ Doing this after speedo_init_rx_ring() results in a memory leak. */
+ if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ speedo_init_rx_ring(dev);
+
+ /* Fire up the hardware. */
+ speedo_resume(dev);
+ netif_start_tx_queue(dev);
+
+ /* Setup the chip and configure the multicast list. */
+ sp->mc_setup_frm = NULL;
+ sp->mc_setup_frm_len = 0;
+ sp->mc_setup_busy = 0;
+ sp->rx_mode = RxInvalidMode; /* Invalid -> always reset the mode. */
+ sp->flow_ctrl = sp->partner = 0;
+ set_rx_mode(dev);
+
+ if (sp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
+ dev->name, (int)inw(ioaddr + SCBStatus));
+
+ /* Set the timer. The timer serves a dual purpose:
+ 1) to monitor the media interface (e.g. link beat) and perhaps switch
+ to an alternate media type
+ 2) to monitor Rx activity, and restart the Rx process if the receiver
+ hangs. */
+ init_timer(&sp->timer);
+ sp->timer.expires = jiffies + 3*HZ;
+ sp->timer.data = (unsigned long)dev;
+ sp->timer.function = &speedo_timer; /* timer handler */
+ add_timer(&sp->timer);
+
+ /* No need to wait for the command unit to accept here. */
+ if ((sp->phy[0] & 0x8000) == 0)
+ mdio_read(dev, sp->phy[0] & 0x1f, 0);
+ return 0;
+}
+
+/* Start the chip hardware after a full reset. */
+static void speedo_resume(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ outw(SCBMaskAll, ioaddr + SCBCmd);
+
+ /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
+ sp->tx_threshold = 0x01208000;
+
+ /* Set the segment registers to '0'. */
+ wait_for_cmd_done(dev);
+ if (inb(ioaddr + SCBCmd)) {
+ outl(PortPartialReset, ioaddr + SCBPort);
+ udelay(10);
+ }
+ outl(0, ioaddr + SCBPointer);
+ inl(ioaddr + SCBPointer); /* Flush to PCI. */
+ udelay(10); /* Bogus, but it avoids the bug. */
+ /* Note: these next two operations can take a while. */
+ do_slow_command(dev, RxAddrLoad);
+ do_slow_command(dev, CUCmdBase);
+
+ /* Load the statistics block and rx ring addresses. */
+ outl(virt_to_bus(&sp->lstats), ioaddr + SCBPointer);
+ inl(ioaddr + SCBPointer); /* Flush to PCI. */
+ outb(CUStatsAddr, ioaddr + SCBCmd);
+ sp->lstats.done_marker = 0;
+ wait_for_cmd_done(dev);
+
+ outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+ ioaddr + SCBPointer);
+ inl(ioaddr + SCBPointer); /* Flush to PCI. */
+ /* Note: RxStart should complete instantly. */
+ do_slow_command(dev, RxStart);
+ do_slow_command(dev, CUDumpStats);
+
+ /* Fill the first command with our physical address. */
+ {
+ int entry = sp->cur_tx++ % TX_RING_SIZE;
+ struct descriptor *cur_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ /* Avoid a bug(?!) here by marking the command already completed. */
+ cur_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
+ cur_cmd->link =
+ virt_to_le32desc(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
+ memcpy(cur_cmd->params, dev->dev_addr, 6);
+ if (sp->last_cmd)
+ clear_suspend(sp->last_cmd);
+ sp->last_cmd = cur_cmd;
+ }
+
+ /* Start the chip's Tx process and unmask interrupts. */
+ outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
+ ioaddr + SCBPointer);
+ outw(CUStart, ioaddr + SCBCmd);
+}
+
+/* Media monitoring and control. */
+static void speedo_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int phy_num = sp->phy[0] & 0x1f;
+ int status = inw(ioaddr + SCBStatus);
+
+ if (sp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Interface monitor tick, chip status %4.4x.\n",
+ dev->name, status);
+
+ /* Normally we check every two seconds. */
+ sp->timer.expires = jiffies + 2*HZ;
+
+ if (sp->polling) {
+ /* Continue to be annoying. */
+ if (status & 0xfc00) {
+ speedo_interrupt(dev->irq, dev, 0);
+ if (jiffies - sp->last_reset > 10*HZ) {
+ printk(KERN_ERR "%s: IRQ %d is still blocked!\n",
+ dev->name, dev->irq);
+ sp->last_reset = jiffies;
+ }
+ } else if (jiffies - sp->last_reset > 10*HZ)
+ sp->polling = 0;
+ sp->timer.expires = jiffies + 2;
+ }
+ /* We have MII and lost link beat. */
+ if ((sp->phy[0] & 0x8000) == 0) {
+ int partner = mdio_read(dev, phy_num, 5);
+ if (partner != sp->partner) {
+ int flow_ctrl = sp->advertising & partner & 0x0400 ? 1 : 0;
+ sp->partner = partner;
+ if (flow_ctrl != sp->flow_ctrl) {
+ sp->flow_ctrl = flow_ctrl;
+ sp->rx_mode = RxInvalidMode; /* Trigger a reload. */
+ }
+ /* Clear sticky bit. */
+ mdio_read(dev, phy_num, 1);
+ /* If link beat has returned... */
+ if (mdio_read(dev, phy_num, 1) & 0x0004)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ }
+ }
+
+ /* This no longer has a false-trigger window. */
+ if (sp->cur_tx - sp->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT &&
+ (jiffies - sp->last_cmd_time) > TX_TIMEOUT) {
+ if (status == 0xffff) {
+ if (jiffies - sp->last_reset > 10*HZ) {
+ sp->last_reset = jiffies;
+ printk(KERN_ERR "%s: The EEPro100 chip is missing!\n",
+ dev->name);
+ }
+ } else if (status & 0xfc00) {
+ /* We have a blocked IRQ line. This should never happen, but
+ we recover as best we can.*/
+ if ( ! sp->polling) {
+ if (jiffies - sp->last_reset > 10*HZ) {
+ printk(KERN_ERR "%s: IRQ %d is physically blocked! (%4.4x)"
+ "Failing back to low-rate polling.\n",
+ dev->name, dev->irq, status);
+ sp->last_reset = jiffies;
+ }
+ sp->polling = 1;
+ }
+ speedo_interrupt(dev->irq, dev, 0);
+ sp->timer.expires = jiffies + 2; /* Avoid */
+ } else {
+ speedo_tx_timeout(dev);
+ sp->last_reset = jiffies;
+ }
+ }
+ if (sp->rx_mode == RxInvalidMode ||
+ (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
+ /* We haven't received a packet in a Long Time. We might have been
+ bitten by the receiver hang bug. This can be cleared by sending
+ a set multicast list command. */
+ set_rx_mode(dev);
+ }
+ add_timer(&sp->timer);
+}
+
+static void speedo_show_state(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ int phy_num = sp->phy[0] & 0x1f;
+ int i;
+
+ /* Print a few items for debugging. */
+ if (sp->msg_level & NETIF_MSG_DRV) {
+ int i;
+ printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %d / %d:\n", dev->name,
+ sp->cur_tx, sp->dirty_tx);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG "%s: %c%c%d %8.8x.\n", dev->name,
+ i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
+ i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
+ i, sp->tx_ring[i].status);
+ }
+ printk(KERN_DEBUG "%s:Printing Rx ring (next to receive into %d).\n",
+ dev->name, sp->cur_rx);
+
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(KERN_DEBUG " Rx ring entry %d %8.8x.\n",
+ i, sp->rx_ringp[i] ? (int)sp->rx_ringp[i]->status : 0);
+
+ for (i = 0; i < 16; i++) {
+ if (i == 6) i = 21;
+ printk(KERN_DEBUG " PHY index %d register %d is %4.4x.\n",
+ phy_num, i, mdio_read(dev, phy_num, i));
+ }
+
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+speedo_init_rx_ring(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ struct RxFD *rxf, *last_rxf = NULL;
+ int i;
+
+ sp->cur_rx = 0;
+#if defined(CONFIG_VLAN)
+ /* Note that buffer sizing is not a run-time check! */
+ sp->rx_buf_sz = dev->mtu + 14 + sizeof(struct RxFD) + 4;
+#else
+ sp->rx_buf_sz = dev->mtu + 14 + sizeof(struct RxFD);
+#endif
+ if (sp->rx_buf_sz < PKT_BUF_SZ)
+ sp->rx_buf_sz = PKT_BUF_SZ;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ skb = dev_alloc_skb(sp->rx_buf_sz);
+ sp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* OK. Just initially short of Rx bufs. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ rxf = (struct RxFD *)skb->tail;
+ sp->rx_ringp[i] = rxf;
+ skb_reserve(skb, sizeof(struct RxFD));
+ if (last_rxf)
+ last_rxf->link = virt_to_le32desc(rxf);
+ last_rxf = rxf;
+ rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
+ rxf->link = 0; /* None yet. */
+ /* This field unused by i82557, we use it as a consistency check. */
+#ifdef final_version
+ rxf->rx_buf_addr = 0xffffffff;
+#else
+ rxf->rx_buf_addr = virt_to_bus(skb->tail);
+#endif
+ rxf->count = cpu_to_le32((sp->rx_buf_sz - sizeof(struct RxFD)) << 16);
+ }
+ sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+ /* Mark the last entry as end-of-list. */
+ last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
+ sp->last_rxf = last_rxf;
+}
+
+static void speedo_tx_timeout(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int status = inw(ioaddr + SCBStatus);
+
+ printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
+ " %4.4x at %d/%d commands %8.8x %8.8x %8.8x.\n",
+ dev->name, status, (int)inw(ioaddr + SCBCmd),
+ sp->dirty_tx, sp->cur_tx,
+ sp->tx_ring[(sp->dirty_tx+0) % TX_RING_SIZE].status,
+ sp->tx_ring[(sp->dirty_tx+1) % TX_RING_SIZE].status,
+ sp->tx_ring[(sp->dirty_tx+2) % TX_RING_SIZE].status);
+
+ /* Trigger a stats dump to give time before the reset. */
+ speedo_get_stats(dev);
+
+ speedo_show_state(dev);
+ if ((status & 0x00C0) != 0x0080
+ && (status & 0x003C) == 0x0010 && 0) {
+ /* Only the command unit has stopped. */
+ printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
+ dev->name);
+ outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
+ ioaddr + SCBPointer);
+ outw(CUStart, ioaddr + SCBCmd);
+ } else {
+ printk(KERN_WARNING "%s: Restarting the chip...\n",
+ dev->name);
+ /* Reset the Tx and Rx units. */
+ outl(PortReset, ioaddr + SCBPort);
+ if (sp->msg_level & NETIF_MSG_TX_ERR)
+ speedo_show_state(dev);
+ udelay(10);
+ speedo_resume(dev);
+ }
+ /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
+ if ((sp->phy[0] & 0x8000) == 0) {
+ int phy_addr = sp->phy[0] & 0x1f;
+ int advertising = mdio_read(dev, phy_addr, 4);
+ int mii_bmcr = mdio_read(dev, phy_addr, 0);
+ mdio_write(ioaddr, phy_addr, 0, 0x0400);
+ mdio_write(ioaddr, phy_addr, 1, 0x0000);
+ mdio_write(ioaddr, phy_addr, 4, 0x0000);
+ mdio_write(ioaddr, phy_addr, 0, 0x8000);
+#ifdef honor_default_port
+ mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
+#else
+ mdio_read(dev, phy_addr, 0);
+ mdio_write(ioaddr, phy_addr, 0, mii_bmcr);
+ mdio_write(ioaddr, phy_addr, 4, advertising);
+#endif
+ }
+ sp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ return;
+}
+
+/* Handle the interrupt cases when something unexpected happens. */
+static void speedo_intr_error(struct net_device *dev, int intr_status)
+{
+ long ioaddr = dev->base_addr;
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+
+ if (intr_status & IntrRxSuspend) {
+ if ((intr_status & 0x003c) == 0x0028) /* No more Rx buffers. */
+ outb(RxResumeNoResources, ioaddr + SCBCmd);
+ else if ((intr_status & 0x003c) == 0x0008) { /* No resources (why?!) */
+ printk(KERN_DEBUG "%s: Unknown receiver error, status=%#4.4x.\n",
+ dev->name, intr_status);
+ /* No idea of what went wrong. Restart the receiver. */
+ outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+ ioaddr + SCBPointer);
+ outb(RxStart, ioaddr + SCBCmd);
+ }
+ sp->stats.rx_errors++;
+ }
+}
+
+
+static int
+speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int entry;
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ If this ever occurs the queue layer is doing something evil! */
+ if (netif_pause_tx_queue(dev) != 0) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < TX_TIMEOUT - 2)
+ return 1;
+ if (tickssofar < TX_TIMEOUT) {
+ /* Reap sent packets from the full Tx queue. */
+ outw(SCBTriggerIntr, ioaddr + SCBCmd);
+ return 1;
+ }
+ speedo_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ { /* Prevent interrupts from changing the Tx ring from underneath us. */
+ unsigned long flags;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ /* Calculate the Tx descriptor entry. */
+ entry = sp->cur_tx % TX_RING_SIZE;
+
+ sp->tx_skbuff[entry] = skb;
+ /* Todo: be a little more clever about setting the interrupt bit. */
+ sp->tx_ring[entry].status =
+ cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
+ sp->cur_tx++;
+ sp->tx_ring[entry].link =
+ virt_to_le32desc(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
+ /* We may nominally release the lock here. */
+ sp->tx_ring[entry].tx_desc_addr =
+ virt_to_le32desc(&sp->tx_ring[entry].tx_buf_addr0);
+ /* The data region is always in one buffer descriptor. */
+ sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
+ sp->tx_ring[entry].tx_buf_addr0 = virt_to_le32desc(skb->data);
+ sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
+ /* Todo: perhaps leave the interrupt bit set if the Tx queue is more
+ than half full. Argument against: we should be receiving packets
+ and scavenging the queue. Argument for: if so, it shouldn't
+ matter. */
+ {
+ struct descriptor *last_cmd = sp->last_cmd;
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+ clear_suspend(last_cmd);
+ }
+ if (sp->cur_tx - sp->dirty_tx >= TX_QUEUE_LIMIT) {
+ sp->tx_full = 1;
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ }
+ wait_for_cmd_done(dev);
+ outb(CUResume, ioaddr + SCBCmd);
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct speedo_private *sp;
+ long ioaddr;
+ int work_limit;
+ u16 status;
+
+ ioaddr = dev->base_addr;
+ sp = (struct speedo_private *)dev->priv;
+ work_limit = sp->max_interrupt_work;
+#ifndef final_version
+ /* A lock to prevent simultaneous entry on SMP machines. */
+ if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ sp->in_interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#endif
+
+ do {
+ status = inw(ioaddr + SCBStatus);
+
+ if ((status & IntrAllNormal) == 0 || status == 0xffff)
+ break;
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(status & IntrAllNormal, ioaddr + SCBStatus);
+
+ if (sp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
+ dev->name, status);
+
+ if (status & (IntrRxDone|IntrRxSuspend))
+ speedo_rx(dev);
+
+ /* The command unit did something, scavenge finished Tx entries. */
+ if (status & (IntrCmdDone | IntrCmdIdle | IntrDrvrIntr)) {
+ unsigned int dirty_tx;
+ /* We should nominally not need this lock. */
+ spin_lock(&sp->lock);
+
+ dirty_tx = sp->dirty_tx;
+ while (sp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(sp->tx_ring[entry].status);
+
+ if (sp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
+ entry, status);
+ if ((status & StatusComplete) == 0) {
+ /* Special case error check: look for descriptor that the
+ chip skipped(?). */
+ if (sp->cur_tx - dirty_tx > 2 &&
+ (sp->tx_ring[(dirty_tx+1) % TX_RING_SIZE].status
+ & cpu_to_le32(StatusComplete))) {
+ printk(KERN_ERR "%s: Command unit failed to mark "
+ "command %8.8x as complete at %d.\n",
+ dev->name, status, dirty_tx);
+ } else
+ break; /* It still hasn't been processed. */
+ }
+ if ((status & TxUnderrun) &&
+ (sp->tx_threshold < 0x01e08000)) {
+ sp->tx_threshold += 0x00040000;
+ if (sp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Tx threshold increased, "
+ "%#8.8x.\n", dev->name, sp->tx_threshold);
+ }
+ /* Free the original skb. */
+ if (sp->tx_skbuff[entry]) {
+ sp->stats.tx_packets++; /* Count only user packets. */
+#if LINUX_VERSION_CODE > 0x20127
+ sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
+#endif
+ dev_free_skb_irq(sp->tx_skbuff[entry]);
+ sp->tx_skbuff[entry] = 0;
+ } else if ((status & 0x70000) == CmdNOp)
+ sp->mc_setup_busy = 0;
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (sp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
+ " full=%d.\n",
+ dirty_tx, sp->cur_tx, sp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ sp->dirty_tx = dirty_tx;
+ if (sp->tx_full
+ && sp->cur_tx - dirty_tx < TX_QUEUE_UNFULL) {
+ /* The ring is no longer full, clear tbusy. */
+ sp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+ spin_unlock(&sp->lock);
+ }
+
+ if (status & IntrRxSuspend)
+ speedo_intr_error(dev, status);
+
+ if (--work_limit < 0) {
+ printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+ outl(0xfc00, ioaddr + SCBStatus);
+ break;
+ }
+ } while (1);
+
+ if (sp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)inw(ioaddr + SCBStatus));
+
+ clear_bit(0, (void*)&sp->in_interrupt);
+ return;
+}
+
+static int
+speedo_rx(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ int entry = sp->cur_rx % RX_RING_SIZE;
+ int status;
+ int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
+
+ if (sp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In speedo_rx().\n");
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (sp->rx_ringp[entry] != NULL &&
+ (status = le32_to_cpu(sp->rx_ringp[entry]->status)) & RxComplete) {
+ int desc_count = le32_to_cpu(sp->rx_ringp[entry]->count);
+ int pkt_len = desc_count & 0x07ff;
+
+ if (--rx_work_limit < 0)
+ break;
+ if (sp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
+ pkt_len);
+ if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
+ if (status & RxErrTooBig)
+ printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
+ "status %8.8x!\n", dev->name, status);
+ else if ( ! (status & RxOK)) {
+ /* There was a fatal error. This *should* be impossible. */
+ sp->stats.rx_errors++;
+ printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
+ "status %8.8x.\n", dev->name, status);
+ }
+ } else {
+ struct sk_buff *skb;
+
+ if (sp->drv_flags & HasChksum)
+ pkt_len -= 2;
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < sp->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ } else {
+ void *temp;
+ /* Pass up the already-filled skbuff. */
+ skb = sp->rx_skbuff[entry];
+ if (skb == NULL) {
+ printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
+ dev->name);
+ break;
+ }
+ sp->rx_skbuff[entry] = NULL;
+ temp = skb_put(skb, pkt_len);
+#if !defined(final_version) && !defined(__powerpc__)
+ if (bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr) != temp)
+ printk(KERN_ERR "%s: Rx consistency error -- the skbuff "
+ "addresses do not match in speedo_rx: %p vs. %p "
+ "/ %p.\n", dev->name,
+ bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
+ skb->head, temp);
+#endif
+ sp->rx_ringp[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ if (sp->drv_flags & HasChksum) {
+#if 0
+ u16 csum = get_unaligned((u16*)(skb->head + pkt_len))
+ if (desc_count & 0x8000)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+#endif
+ }
+ netif_rx(skb);
+ sp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ sp->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++sp->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; sp->cur_rx - sp->dirty_rx > 0; sp->dirty_rx++) {
+ struct RxFD *rxf;
+ entry = sp->dirty_rx % RX_RING_SIZE;
+ if (sp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb;
+ /* Get a fresh skbuff to replace the consumed one. */
+ skb = dev_alloc_skb(sp->rx_buf_sz);
+ sp->rx_skbuff[entry] = skb;
+ if (skb == NULL) {
+ sp->rx_ringp[entry] = NULL;
+ sp->alloc_failures++;
+ break; /* Better luck next time! */
+ }
+ rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+ skb->dev = dev;
+ skb_reserve(skb, sizeof(struct RxFD));
+ rxf->rx_buf_addr = virt_to_le32desc(skb->tail);
+ } else {
+ rxf = sp->rx_ringp[entry];
+ }
+ rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
+ rxf->link = 0; /* None yet. */
+ rxf->count = cpu_to_le32((sp->rx_buf_sz - sizeof(struct RxFD)) << 16);
+ sp->last_rxf->link = virt_to_le32desc(rxf);
+ sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
+ sp->last_rxf = rxf;
+ }
+
+ sp->last_rx_time = jiffies;
+ return 0;
+}
+
+static int
+speedo_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (sp->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n"
+ KERN_DEBUG "%s: Cumlative allocation failures: %d.\n",
+ dev->name, (int)inw(ioaddr + SCBStatus),
+ dev->name, sp->alloc_failures);
+
+ /* Shut off the media monitoring timer. */
+ del_timer(&sp->timer);
+
+ /* Shutting down the chip nicely fails to disable flow control. So.. */
+ outl(PortPartialReset, ioaddr + SCBPort);
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = sp->rx_skbuff[i];
+ sp->rx_skbuff[i] = 0;
+ /* Clear the Rx descriptors. */
+ if (skb) {
+#if LINUX_VERSION_CODE < 0x20100
+ skb->free = 1;
+#endif
+ dev_free_skb(skb);
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = sp->tx_skbuff[i];
+ sp->tx_skbuff[i] = 0;
+ /* Clear the Tx descriptors. */
+ if (skb)
+ dev_free_skb(skb);
+ }
+ if (sp->mc_setup_frm) {
+ kfree(sp->mc_setup_frm);
+ sp->mc_setup_frm_len = 0;
+ }
+
+ /* Print a few items for debugging. */
+ if (sp->msg_level & NETIF_MSG_IFDOWN)
+ speedo_show_state(dev);
+
+ /* Alt: acpi_set_pwr_state(pdev, sp->acpi_pwr); */
+ acpi_set_pwr_state(sp->pci_dev, ACPI_D2);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* The Speedo-3 has an especially awkward and unusable method of getting
+ statistics out of the chip. It takes an unpredictable length of time
+ for the dump-stats command to complete. To avoid a busy-wait loop we
+ update the stats with the previous dump results, and then trigger a
+ new dump.
+
+ These problems are mitigated by the current /proc implementation, which
+ calls this routine first to judge the output length, and then to emit the
+ output.
+
+ Oh, and incoming frames are dropped while executing dump-stats!
+ */
+static struct net_device_stats *speedo_get_stats(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Update only if the previous dump finished. */
+ if (sp->lstats.done_marker == le32_to_cpu(0xA007)) {
+ sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats.tx_coll16_errs);
+ sp->stats.tx_window_errors += le32_to_cpu(sp->lstats.tx_late_colls);
+ sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_underruns);
+ sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_lost_carrier);
+ /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats.tx_deferred);*/
+ sp->stats.collisions += le32_to_cpu(sp->lstats.tx_total_colls);
+ sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats.rx_crc_errs);
+ sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats.rx_align_errs);
+ sp->stats.rx_over_errors += le32_to_cpu(sp->lstats.rx_resource_errs);
+ sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats.rx_overrun_errs);
+ sp->stats.rx_length_errors += le32_to_cpu(sp->lstats.rx_runt_errs);
+ sp->lstats.done_marker = 0x0000;
+ if (netif_running(dev)) {
+ wait_for_cmd_done(dev);
+ outb(CUDumpStats, ioaddr + SCBCmd);
+ }
+ }
+ return &sp->stats;
+}
+
+static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+ int phy = sp->phy[0] & 0x1f;
+ int saved_acpi;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = phy;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ saved_acpi = acpi_set_pwr_state(sp->pci_dev, ACPI_D0);
+ data[3] = mdio_read(dev, data[0], data[1]);
+ acpi_set_pwr_state(sp->pci_dev, saved_acpi);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == sp->phy[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ sp->medialock = (value & 0x9000) ? 0 : 1;
+ if (sp->medialock) {
+ sp->full_duplex = (value & 0x0100) ? 1 : 0;
+ sp->rx_mode = RxInvalidMode;
+ }
+ break;
+ case 4: sp->advertising = value; break;
+ }
+ }
+ saved_acpi = acpi_set_pwr_state(sp->pci_dev, ACPI_D0);
+ mdio_write(ioaddr, data[0], data[1], data[2]);
+ acpi_set_pwr_state(sp->pci_dev, saved_acpi);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = sp->msg_level;
+ data32[1] = sp->multicast_filter_limit;
+ data32[2] = sp->max_interrupt_work;
+ data32[3] = sp->rx_copybreak;
+#if 0
+ /* No room in the ioctl() to set these. */
+ data32[4] = txfifo;
+ data32[5] = rxfifo;
+#endif
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ sp->msg_level = data32[0];
+ sp->multicast_filter_limit = data32[1];
+ sp->max_interrupt_work = data32[2];
+ sp->rx_copybreak = data32[3];
+#if 0
+ /* No room in the ioctl() to set these. */
+ if (data32[4] < 16)
+ txfifo = data32[4];
+ if (data32[5] < 16)
+ rxfifo = data32[5];
+#endif
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This is very ugly with Intel chips -- we usually have to execute an
+ entire configuration command, plus process a multicast command.
+ This is complicated. We must put a large configuration command and
+ an arbitrarily-sized multicast command in the transmit list.
+ To minimize the disruption -- the previous command might have already
+ loaded the link -- we convert the current command block, normally a Tx
+ command, into a no-op and link it to the new command.
+*/
+static void set_rx_mode(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ struct descriptor *last_cmd;
+ char new_rx_mode;
+ unsigned long flags;
+ int entry, i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ new_rx_mode = AcceptAllMulticast | AcceptAllPhys;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ dev->mc_count > sp->multicast_filter_limit) {
+ new_rx_mode = AcceptAllMulticast;
+ } else
+ new_rx_mode = 0;
+
+ if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) {
+ /* The Tx ring is full -- don't add anything! Presumably the new mode
+ is in config_cmd_data and will be added anyway, otherwise we wait
+ for a timer tick or the mode to change again. */
+ sp->rx_mode = RxInvalidMode;
+ return;
+ }
+
+ if (new_rx_mode != sp->rx_mode) {
+ u8 *config_cmd_data;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ entry = sp->cur_tx % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ sp->tx_skbuff[entry] = 0; /* Redundant. */
+ sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
+ sp->cur_tx++;
+ sp->tx_ring[entry].link =
+ virt_to_le32desc(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
+ /* We may nominally release the lock here. */
+
+ config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
+ /* Construct a full CmdConfig frame. */
+ memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));
+ config_cmd_data[1] = (txfifo << 4) | rxfifo;
+ config_cmd_data[4] = rxdmacount;
+ config_cmd_data[5] = txdmacount + 0x80;
+ config_cmd_data[6] |= (new_rx_mode & AcceptErr) ? 0x80 : 0;
+ config_cmd_data[7] &= (new_rx_mode & AcceptRunt) ? ~0x01 : ~0;
+ if (sp->drv_flags & HasChksum)
+ config_cmd_data[9] |= 1;
+ config_cmd_data[15] |= (new_rx_mode & AcceptAllPhys) ? 1 : 0;
+ config_cmd_data[19] = sp->flow_ctrl ? 0xBD : 0x80;
+ config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
+ config_cmd_data[21] = (new_rx_mode & AcceptAllMulticast) ? 0x0D : 0x05;
+ if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
+ config_cmd_data[15] |= 0x80;
+ config_cmd_data[8] = 0;
+ }
+ /* Trigger the command unit resume. */
+ wait_for_cmd_done(dev);
+ clear_suspend(last_cmd);
+ outb(CUResume, ioaddr + SCBCmd);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sp->last_cmd_time = jiffies;
+ }
+
+ if (new_rx_mode == 0 && dev->mc_count < 4) {
+ /* The simple case of 0-3 multicast list entries occurs often, and
+ fits within one tx_ring[] entry. */
+ struct dev_mc_list *mclist;
+ u16 *setup_params, *eaddrs;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ entry = sp->cur_tx % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ sp->tx_skbuff[entry] = 0;
+ sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
+ sp->cur_tx++;
+ sp->tx_ring[entry].link =
+ virt_to_le32desc(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
+ /* We may nominally release the lock here. */
+ sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
+ setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
+ *setup_params++ = cpu_to_le16(dev->mc_count*6);
+ /* Fill in the multicast addresses. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ }
+
+ wait_for_cmd_done(dev);
+ clear_suspend(last_cmd);
+ /* Immediately trigger the command unit resume. */
+ outb(CUResume, ioaddr + SCBCmd);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sp->last_cmd_time = jiffies;
+ } else if (new_rx_mode == 0) {
+ struct dev_mc_list *mclist;
+ u16 *setup_params, *eaddrs;
+ struct descriptor *mc_setup_frm = sp->mc_setup_frm;
+ int i;
+
+ if (sp->mc_setup_frm_len < 10 + dev->mc_count*6
+ || sp->mc_setup_frm == NULL) {
+ /* Allocate a full setup frame, 10bytes + <max addrs>. */
+ if (sp->mc_setup_frm)
+ kfree(sp->mc_setup_frm);
+ sp->mc_setup_busy = 0;
+ sp->mc_setup_frm_len = 10 + sp->multicast_filter_limit*6;
+ sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC);
+ if (sp->mc_setup_frm == NULL) {
+ printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
+ dev->name);
+ sp->rx_mode = RxInvalidMode; /* We failed, try again. */
+ return;
+ }
+ }
+ /* If we are busy, someone might be quickly adding to the MC list.
+ Try again later when the list updates stop. */
+ if (sp->mc_setup_busy) {
+ sp->rx_mode = RxInvalidMode;
+ return;
+ }
+ mc_setup_frm = sp->mc_setup_frm;
+ /* Fill the setup frame. */
+ if (sp->msg_level & NETIF_MSG_RXFILTER)
+ printk(KERN_DEBUG "%s: Constructing a setup frame at %p, "
+ "%d bytes.\n",
+ dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len);
+ mc_setup_frm->cmd_status =
+ cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
+ /* Link set below. */
+ setup_params = (u16 *)&mc_setup_frm->params;
+ *setup_params++ = cpu_to_le16(dev->mc_count*6);
+ /* Fill in the multicast addresses. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ }
+
+ /* Disable interrupts while playing with the Tx Cmd list. */
+ spin_lock_irqsave(&sp->lock, flags);
+ entry = sp->cur_tx % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = mc_setup_frm;
+ sp->mc_setup_busy++;
+
+ /* Change the command to a NoOp, pointing to the CmdMulti command. */
+ sp->tx_skbuff[entry] = 0;
+ sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
+ sp->cur_tx++;
+ sp->tx_ring[entry].link = virt_to_le32desc(mc_setup_frm);
+ /* We may nominally release the lock here. */
+
+ /* Set the link in the setup frame. */
+ mc_setup_frm->link =
+ virt_to_le32desc(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));
+
+ wait_for_cmd_done(dev);
+ clear_suspend(last_cmd);
+ /* Immediately trigger the command unit resume. */
+ outb(CUResume, ioaddr + SCBCmd);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sp->last_cmd_time = jiffies;
+ if (sp->msg_level & NETIF_MSG_RXFILTER)
+ printk(KERN_DEBUG " CmdMCSetup frame length %d in entry %d.\n",
+ dev->mc_count, entry);
+ }
+
+ sp->rx_mode = new_rx_mode;
+}
+
+static int speedo_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct speedo_private *np = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ outl(PortPartialReset, ioaddr + SCBPort);
+ break;
+ case DRV_RESUME:
+ speedo_resume(dev);
+ np->rx_mode = RxInvalidMode;
+ np->flow_ctrl = np->partner = 0;
+ set_rx_mode(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_speedo_dev; *devp; devp = next) {
+ next = &((struct speedo_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ case DRV_PWR_DOWN:
+ case DRV_PWR_UP:
+ acpi_set_pwr_state(np->pci_dev, event==DRV_PWR_DOWN ? ACPI_D3:ACPI_D0);
+ break;
+ case DRV_PWR_WakeOn:
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+
+#if defined(MODULE) || (LINUX_VERSION_CODE >= 0x020400)
+
+int init_module(void)
+{
+ int cards_found;
+
+ /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ cards_found = pci_drv_register(&eepro100_drv_id, NULL);
+ if (cards_found < 0)
+ printk(KERN_INFO "eepro100: No cards found, driver not installed.\n");
+ return cards_found;
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&eepro100_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_speedo_dev) {
+ struct speedo_private *sp = (void *)root_speedo_dev->priv;
+ unregister_netdev(root_speedo_dev);
+#ifdef USE_IO_OPS
+ release_region(root_speedo_dev->base_addr,
+ pci_id_tbl[sp->chip_id].io_size);
+#else
+ iounmap((char *)root_speedo_dev->base_addr);
+#endif
+ acpi_set_pwr_state(sp->pci_dev, sp->acpi_pwr);
+ next_dev = sp->next_module;
+ if (sp->priv_addr)
+ kfree(sp->priv_addr);
+ kfree(root_speedo_dev);
+ root_speedo_dev = next_dev;
+ }
+}
+
+#if (LINUX_VERSION_CODE >= 0x020400) && 0
+module_init(init_module);
+module_exit(cleanup_module);
+#endif
+
+#else /* not MODULE */
+
+int eepro100_probe(struct net_device *dev)
+{
+ int cards_found = pci_drv_register(&eepro100_drv_id, dev);
+
+ /* Only emit the version if the driver is being used. */
+ if (cards_found >= 0)
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+
+ return cards_found;
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` eepro100.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c eepro100.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c eepro100.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/eexpress.c b/linux/src/drivers/net/eexpress.c
new file mode 100644
index 0000000..9c816ee
--- /dev/null
+++ b/linux/src/drivers/net/eexpress.c
@@ -0,0 +1,1285 @@
+/* $Id: eexpress.c,v 1.1 1999/04/26 05:52:09 tb Exp $
+ *
+ * Intel EtherExpress device driver for Linux
+ *
+ * Original version written 1993 by Donald Becker
+ * Modularized by Pauline Middelink <middelin@polyware.iaf.nl>
+ * Changed to support io= irq= by Alan Cox <Alan.Cox@linux.org>
+ * Reworked 1995 by John Sullivan <js10039@cam.ac.uk>
+ * More fixes by Philip Blundell <pjb27@cam.ac.uk>
+ * Added the Compaq LTE Alan Cox <alan@redhat.com>
+ *
+ * Note - this driver is experimental still - it has problems on faster
+ * machines. Someone needs to sit down and go through it line by line with
+ * a databook...
+ */
+
+/*
+ * The original EtherExpress driver was just about usable, but
+ * suffered from a long startup delay, a hard limit of 16k memory
+ * usage on the card (EtherExpress 16s have either 32k or 64k),
+ * and random locks under load. The last was particularly annoying
+ * and made running eXceed/W preferable to Linux/XFree. After hacking
+ * through the driver for a couple of days, I had fixed most of the
+ * card handling errors, at the expense of turning the code into
+ * a complete jungle, but still hadn't tracked down the lock-ups.
+ * I had hoped these would be an IP bug, but failed to reproduce them
+ * under other drivers, so decided to start from scratch and rewrite
+ * the driver cleanly. And here it is.
+ *
+ * It's still not quite there, but self-corrects a lot more problems.
+ * the 'CU wedged, resetting...' message shouldn't happen at all, but
+ * at least we recover. It still locks occasionally, any ideas welcome.
+ *
+ * The original startup delay experienced by some people was due to the
+ * first ARP request for the address of the default router getting lost.
+ * (mostly the reply we were getting back was arriving before our
+ * hardware address was set up, or before the configuration sequence
+ * had told the card NOT to strip of the frame header). If you a long
+ * startup delay, you may have lost this ARP request/reply, although
+ * the original cause has been fixed. However, it is more likely that
+ * you've just locked under this version.
+ *
+ * The main changes are in the 586 initialization procedure (which was
+ * just broken before - the EExp is a strange beasty and needs careful
+ * handling) the receive buffer handling (we now use a non-terminating
+ * circular list of buffers, which stops the card giving us out-of-
+ * resources errors), and the transmit code. The driver is also more
+ * structured, and I have tried to keep the kernel interface separate
+ * from the hardware interface (although some routines naturally want
+ * to do both).
+ *
+ * John Sullivan
+ *
+ * 18/5/95:
+ *
+ * The lock-ups seem to happen when you access card memory after a 586
+ * reset. This happens only 1 in 12 resets, on a random basis, and
+ * completely locks the machine. As far as I can see there is no
+ * workaround possible - the only thing to be done is make sure we
+ * never reset the card *after* booting the kernel - once at probe time
+ * must be sufficient, and we'll just have to put up with that failing
+ * occasionally (or buy a new NIC). By the way, this looks like a
+ * definite card bug, since Intel's own driver for DOS does exactly the
+ * same.
+ *
+ * This bug makes switching in and out of promiscuous mode a risky
+ * business, since we must do a 586 reset each time.
+ */
+
+/*
+ * Sources:
+ *
+ * The original eexpress.c by Donald Becker
+ * Sources: the Crynwr EtherExpress driver source.
+ * the Intel Microcommunications Databook Vol.1 1990
+ *
+ * wavelan.c and i82586.h
+ * This was invaluable for the complete '586 configuration details
+ * and command format.
+ *
+ * The Crynwr sources (again)
+ * Not as useful as the Wavelan driver, but then I had eexpress.c to
+ * go off.
+ *
+ * The Intel EtherExpress 16 ethernet card
+ * Provided the only reason I want to see a working etherexpress driver.
+ * A lot of fixes came from just observing how the card (mis)behaves when
+ * you prod it.
+ *
+ */
+
+static char version[] =
+"eexpress.c: v0.10 04-May-95 John Sullivan <js10039@cam.ac.uk>\n"
+" v0.14 19-May-96 Philip Blundell <phil@tazenda.demon.co.uk>\n"
+" v0.15 04-Aug-98 Alan Cox <alan@redhat.com>\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+
+/*
+ * Not actually used yet - may be implemented when the driver has
+ * been debugged!
+ *
+ * Debug Level Driver Status
+ * 0 Final release
+ * 1 Beta test
+ * 2
+ * 3
+ * 4 Report timeouts & 586 errors (normal debug level)
+ * 5 Report all major events
+ * 6 Dump sent/received packet contents
+ * 7 Report function entry/exit
+ */
+
+#ifndef NET_DEBUG
+#define NET_DEBUG 4
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+#undef F_DEB
+
+#include "eth82586.h"
+
+#define PRIV(x) ((struct net_local *)(x)->priv)
+#define EEXP_IO_EXTENT 16
+
+/*
+ * Private data declarations
+ */
+
+struct net_local
+{
+ struct enet_statistics stats;
+ unsigned long init_time; /* jiffies when eexp_hw_init586 called */
+ unsigned short rx_first; /* first rx buf, same as RX_BUF_START */
+ unsigned short rx_last; /* last rx buf */
+ unsigned short tx_head; /* next free tx buf */
+ unsigned short tx_reap; /* first in-use tx buf */
+ unsigned short tx_tail; /* previous tx buf to tx_head */
+ unsigned short tx_link; /* last known-executing tx buf */
+ unsigned short last_tx_restart; /* set to tx_link when we restart the CU */
+ unsigned char started;
+ unsigned char promisc;
+ unsigned short rx_buf_start;
+ unsigned short rx_buf_end;
+ unsigned short num_tx_bufs;
+ unsigned short num_rx_bufs;
+};
+
+unsigned short start_code[] = {
+ 0x0000, /* SCP: set bus to 16 bits */
+ 0x0000,0x0000, /* junk */
+ 0x0000,0x0000, /* address of ISCP (lo,hi) */
+
+ 0x0001, /* ISCP: busy - cleared after reset */
+ 0x0008,0x0000,0x0000, /* offset,address (lo,hi) of SCB */
+
+ 0x0000,0x0000, /* SCB: status, commands */
+ 0x0000,0x0000, /* links to first command block, first receive descriptor */
+ 0x0000,0x0000, /* CRC error, alignment error counts */
+ 0x0000,0x0000, /* out of resources, overrun error counts */
+
+ 0x0000,0x0000, /* pad */
+ 0x0000,0x0000,
+
+ 0x0000,Cmd_Config, /* startup configure sequence, at 0x0020 */
+ 0x0032, /* link to next command */
+ 0x080c, /* 12 bytes follow : fifo threshold=8 */
+ 0x2e40, /* don't rx bad frames : SRDY/ARDY => ext. sync. : preamble len=8
+ * take addresses from data buffers : 6 bytes/address */
+ 0x6000, /* default backoff method & priority : interframe spacing = 0x60 */
+ 0xf200, /* slot time=0x200 : max collision retry = 0xf */
+ 0x0000, /* no HDLC : normal CRC : enable broadcast : disable promiscuous/multicast modes */
+ 0x003c, /* minimum frame length = 60 octets) */
+
+ 0x0000,Cmd_INT|Cmd_SetAddr,
+ 0x003e, /* link to next command */
+ 0x0000,0x0000,0x0000, /* hardware address placed here, 0x0038 */
+ 0x0000,Cmd_END|Cmd_Nop, /* end of configure sequence */
+ 0x003e,
+
+ 0x0000
+
+};
+
+#define CONF_LINK 0x0020
+#define CONF_HW_ADDR 0x0038
+
+/* maps irq number to EtherExpress magic value */
+static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 };
+
+/*
+ * Prototypes for Linux interface
+ */
+
+extern int express_probe(struct device *dev);
+static int eexp_open (struct device *dev);
+static int eexp_close(struct device *dev);
+static struct enet_statistics *eexp_stats(struct device *dev);
+static int eexp_xmit (struct sk_buff *buf, struct device *dev);
+
+static void eexp_irq (int irq, void *dev_addr, struct pt_regs *regs);
+static void eexp_set_multicast(struct device *dev);
+
+/*
+ * Prototypes for hardware access functions
+ */
+
+static void eexp_hw_rx (struct device *dev);
+static void eexp_hw_tx (struct device *dev, unsigned short *buf, unsigned short len);
+static int eexp_hw_probe (struct device *dev,unsigned short ioaddr);
+static unsigned short eexp_hw_readeeprom(unsigned short ioaddr, unsigned char location);
+
+static unsigned short eexp_hw_lasttxstat(struct device *dev);
+static void eexp_hw_txrestart (struct device *dev);
+
+static void eexp_hw_txinit (struct device *dev);
+static void eexp_hw_rxinit (struct device *dev);
+
+static void eexp_hw_init586 (struct device *dev);
+static void eexp_hw_ASICrst (struct device *dev);
+
+/*
+ * Linux interface
+ */
+
+/*
+ * checks for presence of EtherExpress card
+ */
+
+int express_probe(struct device *dev)
+{
+ unsigned short *port,ports[] = { 0x0300,0x0270,0x0320,0x0340,0 };
+ unsigned short ioaddr = dev->base_addr;
+
+ if (ioaddr&0xfe00)
+ return eexp_hw_probe(dev,ioaddr);
+ else if (ioaddr)
+ return ENXIO;
+
+ for ( port=&ports[0] ; *port ; port++ )
+ {
+ unsigned short sum = 0;
+ int i;
+ for ( i=0 ; i<4 ; i++ )
+ {
+ unsigned short t;
+ t = inb(*port + ID_PORT);
+ sum |= (t>>4) << ((t & 0x03)<<2);
+ }
+ if (sum==0xbaba && !eexp_hw_probe(dev,*port))
+ return 0;
+ }
+ return ENODEV;
+}
+
+/*
+ * open and initialize the adapter, ready for use
+ */
+
+static int eexp_open(struct device *dev)
+{
+ int irq = dev->irq;
+ unsigned short ioaddr = dev->base_addr;
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_open()\n", dev->name);
+#endif
+
+ if (!irq || !irqrmap[irq])
+ return -ENXIO;
+
+ if (irq2dev_map[irq] ||
+ /* more consistent, surely? */
+ ((irq2dev_map[irq]=dev),0) ||
+ request_irq(irq,&eexp_irq,0,"eexpress",NULL))
+ return -EAGAIN;
+
+ request_region(ioaddr, EEXP_IO_EXTENT, "eexpress");
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ eexp_hw_init586(dev);
+ dev->start = 1;
+ MOD_INC_USE_COUNT;
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: leaving eexp_open()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*
+ * close and disable the interface, leaving
+ * the 586 in reset
+ */
+static int eexp_close(struct device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ int irq = dev->irq;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ);
+ PRIV(dev)->started = 0;
+ outw(SCB_CUsuspend|SCB_RUsuspend,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ free_irq(irq,NULL);
+ irq2dev_map[irq] = NULL;
+ outb(i586_RST,ioaddr+EEPROM_Ctrl);
+ release_region(ioaddr,16);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Return interface stats
+ */
+
+static struct enet_statistics *eexp_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ /*
+ * Hmmm, this looks a little too easy... The card maintains
+ * some stats in the SCB, and I'm not convinced we're
+ * incrementing the most sensible statistics when the card
+ * returns an error (esp. slow DMA, out-of-resources)
+ */
+ return &lp->stats;
+}
+
+/*
+ * Called to transmit a packet, or to allow us to right ourselves
+ * if the kernel thinks we've died.
+ */
+
+static int eexp_xmit(struct sk_buff *buf, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name);
+#endif
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ if (dev->tbusy)
+ {
+ /* This will happen, but hopefully not as often as when
+ * tbusy==0. If it happens too much, we probably ought
+ * to think about unwedging ourselves...
+ */
+ if (test_bit(0,(void *)&PRIV(dev)->started))
+ {
+ if ((jiffies - dev->trans_start)>5)
+ {
+ if (lp->tx_link==lp->last_tx_restart)
+ {
+ unsigned short boguscount=200,rsst;
+ printk(KERN_WARNING "%s: Retransmit timed out, status %04x, resetting...\n",
+ dev->name,inw(ioaddr+SCB_STATUS));
+ eexp_hw_txinit(dev);
+ lp->last_tx_restart = 0;
+ outw(lp->tx_link,ioaddr+SCB_CBL);
+ outw(0,ioaddr+SCB_STATUS);
+ outw(SCB_CUstart,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ while (!SCB_complete(rsst=inw(ioaddr+SCB_STATUS)))
+ {
+ if (!--boguscount)
+ {
+ boguscount=200;
+ printk(KERN_WARNING "%s: Reset timed out status %04x, retrying...\n",
+ dev->name,rsst);
+ outw(lp->tx_link,ioaddr+SCB_CBL);
+ outw(0,ioaddr+SCB_STATUS);
+ outw(SCB_CUstart,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ }
+ }
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ else
+ {
+ unsigned short status = inw(ioaddr+SCB_STATUS);
+ if (SCB_CUdead(status))
+ {
+ unsigned short txstatus = eexp_hw_lasttxstat(dev);
+ printk(KERN_WARNING "%s: Transmit timed out, CU not active status %04x %04x, restarting...\n",
+ dev->name, status, txstatus);
+ eexp_hw_txrestart(dev);
+ }
+ else
+ {
+ unsigned short txstatus = eexp_hw_lasttxstat(dev);
+ if (dev->tbusy && !txstatus)
+ {
+ printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n",
+ dev->name,status,txstatus);
+ eexp_hw_init586(dev);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ if ((jiffies-lp->init_time)>10)
+ {
+ unsigned short status = inw(ioaddr+SCB_STATUS);
+ printk(KERN_WARNING "%s: i82586 startup timed out, status %04x, resetting...\n",
+ dev->name, status);
+ eexp_hw_init586(dev);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ }
+ }
+
+ if (buf==NULL)
+ {
+ unsigned short status = inw(ioaddr+SCB_STATUS);
+ unsigned short txstatus = eexp_hw_lasttxstat(dev);
+ if (SCB_CUdead(status))
+ {
+ printk(KERN_WARNING "%s: CU has died! status %04x %04x, attempting to restart...\n",
+ dev->name, status, txstatus);
+ lp->stats.tx_errors++;
+ eexp_hw_txrestart(dev);
+ }
+ dev_tint(dev);
+ outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ dev_kfree_skb(buf, FREE_WRITE);
+ return 0;
+ }
+
+ if (set_bit(0,(void *)&dev->tbusy))
+ {
+ lp->stats.tx_dropped++;
+ }
+ else
+ {
+ unsigned short length = (ETH_ZLEN < buf->len) ? buf->len : ETH_ZLEN;
+ unsigned short *data = (unsigned short *)buf->data;
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ eexp_hw_tx(dev,data,length);
+ outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ }
+ dev_kfree_skb(buf, FREE_WRITE);
+ outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ return 0;
+}
+
+/*
+ * Handle an EtherExpress interrupt
+ * If we've finished initializing, start the RU and CU up.
+ * If we've already started, reap tx buffers, handle any received packets,
+ * check to make sure we've not become wedged.
+ */
+
+static void eexp_irq(int irq, void *dev_info, struct pt_regs *regs)
+{
+ struct device *dev = irq2dev_map[irq];
+ struct net_local *lp;
+ unsigned short ioaddr,status,ack_cmd;
+ unsigned short old_rp,old_wp;
+
+ if (dev==NULL)
+ {
+ printk(KERN_WARNING "net_interrupt(): irq %d for unknown device caught by EExpress\n",irq);
+ return;
+ }
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: interrupt\n", dev->name);
+#endif
+
+ dev->interrupt = 1; /* should this be reset on exit? */
+
+ lp = (struct net_local *)dev->priv;
+ ioaddr = dev->base_addr;
+
+ outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ);
+ old_rp = inw(ioaddr+READ_PTR);
+ old_wp = inw(ioaddr+WRITE_PTR);
+ status = inw(ioaddr+SCB_STATUS);
+ ack_cmd = SCB_ack(status);
+
+ if (PRIV(dev)->started==0 && SCB_complete(status))
+ {
+#if NET_DEBUG > 4
+ printk(KERN_DEBUG "%s: SCBcomplete event received\n", dev->name);
+#endif
+ while (SCB_CUstat(status)==2)
+ status = inw_p(ioaddr+SCB_STATUS);
+#if NET_DEBUG > 4
+ printk(KERN_DEBUG "%s: CU went non-active (status = %08x)\n", dev->name, status);
+#endif
+ PRIV(dev)->started=1;
+ outw_p(lp->tx_link,ioaddr+SCB_CBL);
+ outw_p(PRIV(dev)->rx_buf_start,ioaddr+SCB_RFA);
+ ack_cmd |= SCB_CUstart | SCB_RUstart;
+ }
+ else if (PRIV(dev)->started)
+ {
+ unsigned short txstatus;
+ txstatus = eexp_hw_lasttxstat(dev);
+ }
+
+ if (SCB_rxdframe(status))
+ {
+ eexp_hw_rx(dev);
+ }
+
+ if ((PRIV(dev)->started&2)!=0 && SCB_RUstat(status)!=4)
+ {
+ printk(KERN_WARNING "%s: RU stopped status %04x, restarting...\n",
+ dev->name,status);
+ lp->stats.rx_errors++;
+ eexp_hw_rxinit(dev);
+ outw(PRIV(dev)->rx_buf_start,ioaddr+SCB_RFA);
+ ack_cmd |= SCB_RUstart;
+ }
+ else if (PRIV(dev)->started==1 && SCB_RUstat(status)==4)
+ PRIV(dev)->started|=2;
+
+ outw(ack_cmd,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ outw(old_rp,ioaddr+READ_PTR);
+ outw(old_wp,ioaddr+WRITE_PTR);
+ outb(SIRQ_en|irqrmap[irq],ioaddr+SET_IRQ);
+ dev->interrupt = 0;
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: leaving eexp_irq()\n", dev->name);
+#endif
+ return;
+}
+
+/*
+ * Hardware access functions
+ */
+
+/*
+ * Check all the receive buffers, and hand any received packets
+ * to the upper levels. Basic sanity check on each frame
+ * descriptor
+ */
+
+static void eexp_hw_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short old_wp = inw(ioaddr+WRITE_PTR);
+ unsigned short old_rp = inw(ioaddr+READ_PTR);
+ unsigned short rx_block = lp->rx_first;
+ unsigned short boguscount = lp->num_rx_bufs;
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_hw_rx()\n", dev->name);
+#endif
+
+ while (outw(rx_block,ioaddr+READ_PTR),boguscount--)
+ {
+ unsigned short status = inw(ioaddr);
+ unsigned short rfd_cmd = inw(ioaddr);
+ unsigned short rx_next = inw(ioaddr);
+ unsigned short pbuf = inw(ioaddr);
+ unsigned short pkt_len;
+
+ if (FD_Done(status))
+ {
+ outw(pbuf,ioaddr+READ_PTR);
+ pkt_len = inw(ioaddr);
+
+ if (rfd_cmd!=0x0000 || pbuf!=rx_block+0x16
+ || (pkt_len & 0xc000)!=0xc000)
+ {
+ printk(KERN_WARNING "%s: Rx frame at %04x corrupted, status %04x, cmd %04x, "
+ "next %04x, pbuf %04x, len %04x\n",dev->name,rx_block,
+ status,rfd_cmd,rx_next,pbuf,pkt_len);
+ boguscount++;
+ continue;
+ }
+ else if (!FD_OK(status))
+ {
+ lp->stats.rx_errors++;
+ if (FD_CRC(status))
+ lp->stats.rx_crc_errors++;
+ if (FD_Align(status))
+ lp->stats.rx_frame_errors++;
+ if (FD_Resrc(status))
+ lp->stats.rx_fifo_errors++;
+ if (FD_DMA(status))
+ lp->stats.rx_over_errors++;
+ if (FD_Short(status))
+ lp->stats.rx_length_errors++;
+ }
+ else
+ {
+ struct sk_buff *skb;
+ pkt_len &= 0x3fff;
+ skb = dev_alloc_skb(pkt_len+16);
+ if (skb == NULL)
+ {
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ outw(pbuf+10,ioaddr+READ_PTR);
+ insw(ioaddr,skb_put(skb,pkt_len),(pkt_len+1)>>1);
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ outw(rx_block,ioaddr+WRITE_PTR);
+ outw(0x0000,ioaddr);
+ outw(0x0000,ioaddr);
+ }
+ rx_block = rx_next;
+ }
+ outw(old_rp,ioaddr+READ_PTR);
+ outw(old_wp,ioaddr+WRITE_PTR);
+}
+
+/*
+ * Hand a packet to the card for transmission
+ * If we get here, we MUST have already checked
+ * to make sure there is room in the transmit
+ * buffer region
+ */
+
+static void eexp_hw_tx(struct device *dev, unsigned short *buf, unsigned short len)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short old_wp = inw(ioaddr+WRITE_PTR);
+
+ outw(lp->tx_head,ioaddr+WRITE_PTR);
+ outw(0x0000,ioaddr);
+ outw(Cmd_INT|Cmd_Xmit,ioaddr);
+ outw(lp->tx_head+0x08,ioaddr);
+ outw(lp->tx_head+0x0e,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(lp->tx_head+0x08,ioaddr);
+ outw(0x8000|len,ioaddr);
+ outw(-1,ioaddr);
+ outw(lp->tx_head+0x16,ioaddr);
+ outw(0,ioaddr);
+ outsw(ioaddr,buf,(len+1)>>1);
+ outw(lp->tx_tail+0x0c,ioaddr+WRITE_PTR);
+ outw(lp->tx_head,ioaddr);
+ dev->trans_start = jiffies;
+ lp->tx_tail = lp->tx_head;
+ if (lp->tx_head==TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
+ lp->tx_head = TX_BUF_START;
+ else
+ lp->tx_head += TX_BUF_SIZE;
+ if (lp->tx_head != lp->tx_reap)
+ dev->tbusy = 0;
+ outw(old_wp,ioaddr+WRITE_PTR);
+}
+
+/*
+ * Sanity check the suspected EtherExpress card
+ * Read hardware address, reset card, size memory and
+ * initialize buffer memory pointers. These should
+ * probably be held in dev->priv, in case someone has 2
+ * differently configured cards in their box (Arghhh!)
+ */
+
+static int eexp_hw_probe(struct device *dev, unsigned short ioaddr)
+{
+ unsigned short hw_addr[3];
+ int i;
+ unsigned char *chw_addr = (unsigned char *)hw_addr;
+
+ printk("%s: EtherExpress at %#x, ",dev->name,ioaddr);
+
+ hw_addr[0] = eexp_hw_readeeprom(ioaddr,2);
+ hw_addr[1] = eexp_hw_readeeprom(ioaddr,3);
+ hw_addr[2] = eexp_hw_readeeprom(ioaddr,4);
+
+ /* Standard Address or Compaq LTE Address */
+ if (!((hw_addr[2]==0x00aa && ((hw_addr[1] & 0xff00)==0x0000)) ||
+ (hw_addr[2]==0x0080 && ((hw_addr[1] & 0xff00)==0x5F00))))
+ {
+ printk("rejected: invalid address %04x%04x%04x\n",
+ hw_addr[2],hw_addr[1],hw_addr[0]);
+ return -ENODEV;
+ }
+
+ dev->base_addr = ioaddr;
+ for ( i=0 ; i<6 ; i++ )
+ dev->dev_addr[i] = chw_addr[5-i];
+
+ {
+ char irqmap[]={0, 9, 3, 4, 5, 10, 11, 0};
+ char *ifmap[]={"AUI", "BNC", "10baseT"};
+ enum iftype {AUI=0, BNC=1, TP=2};
+ unsigned short setupval = eexp_hw_readeeprom(ioaddr,0);
+
+ dev->irq = irqmap[setupval>>13];
+ dev->if_port = !(setupval & 0x1000) ? AUI :
+ eexp_hw_readeeprom(ioaddr,5) & 0x1 ? TP : BNC;
+
+ printk("IRQ %d, Interface %s, ",dev->irq,ifmap[dev->if_port]);
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ outb(0,ioaddr+SET_IRQ);
+ }
+
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (!dev->priv)
+ return -ENOMEM;
+
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ eexp_hw_ASICrst(dev);
+
+ {
+ unsigned short i586mso = 0x023e;
+ unsigned short old_wp,old_rp,old_a0,old_a1;
+ unsigned short a0_0,a1_0,a0_1,a1_1;
+
+ old_wp = inw(ioaddr+WRITE_PTR);
+ old_rp = inw(ioaddr+READ_PTR);
+ outw(0x8000+i586mso,ioaddr+READ_PTR);
+ old_a1 = inw(ioaddr);
+ outw(i586mso,ioaddr+READ_PTR);
+ old_a0 = inw(ioaddr);
+ outw(i586mso,ioaddr+WRITE_PTR);
+ outw(0x55aa,ioaddr);
+ outw(i586mso,ioaddr+READ_PTR);
+ a0_0 = inw(ioaddr);
+ outw(0x8000+i586mso,ioaddr+WRITE_PTR);
+ outw(0x5a5a,ioaddr);
+ outw(0x8000+i586mso,ioaddr+READ_PTR);
+ a1_0 = inw(ioaddr);
+ outw(i586mso,ioaddr+READ_PTR);
+ a0_1 = inw(ioaddr);
+ outw(i586mso,ioaddr+WRITE_PTR);
+ outw(0x1234,ioaddr);
+ outw(0x8000+i586mso,ioaddr+READ_PTR);
+ a1_1 = inw(ioaddr);
+
+ if ((a0_0 != a0_1) || (a1_0 != a1_1) ||
+ (a1_0 != 0x5a5a) || (a0_0 != 0x55aa))
+ {
+ printk("32k\n");
+ PRIV(dev)->rx_buf_end = 0x7ff6;
+ PRIV(dev)->num_tx_bufs = 4;
+ }
+ else
+ {
+ printk("64k\n");
+ PRIV(dev)->num_tx_bufs = 8;
+ PRIV(dev)->rx_buf_start = TX_BUF_START + (PRIV(dev)->num_tx_bufs*TX_BUF_SIZE);
+ PRIV(dev)->rx_buf_end = 0xfff6;
+ }
+
+ outw(0x8000+i586mso,ioaddr+WRITE_PTR);
+ outw(old_a1,ioaddr);
+ outw(i586mso,ioaddr+WRITE_PTR);
+ outw(old_a0,ioaddr);
+ outw(old_wp,ioaddr+WRITE_PTR);
+ outw(old_rp,ioaddr+READ_PTR);
+ }
+
+ if (net_debug)
+ printk("%s", version);
+ dev->open = eexp_open;
+ dev->stop = eexp_close;
+ dev->hard_start_xmit = eexp_xmit;
+ dev->get_stats = eexp_stats;
+ dev->set_multicast_list = &eexp_set_multicast;
+ ether_setup(dev);
+ return 0;
+}
+
+/*
+ * Read a word from eeprom location (0-63?)
+ */
+static unsigned short eexp_hw_readeeprom(unsigned short ioaddr, unsigned char location)
+{
+ unsigned short cmd = 0x180|(location&0x7f);
+ unsigned short rval = 0,wval = EC_CS|i586_RST;
+ int i;
+
+ outb(EC_CS|i586_RST,ioaddr+EEPROM_Ctrl);
+ for ( i=0x100 ; i ; i>>=1 )
+ {
+ if (cmd&i)
+ wval |= EC_Wr;
+ else
+ wval &= ~EC_Wr;
+
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ }
+ wval &= ~EC_Wr;
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ for ( i=0x8000 ; i ; i>>=1 )
+ {
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ if (inb(ioaddr+EEPROM_Ctrl)&EC_Rd)
+ rval |= i;
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ }
+ wval &= ~EC_CS;
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ return rval;
+}
+
+/*
+ * Reap tx buffers and return last transmit status.
+ * if ==0 then either:
+ * a) we're not transmitting anything, so why are we here?
+ * b) we've died.
+ * otherwise, Stat_Busy(return) means we've still got some packets
+ * to transmit, Stat_Done(return) means our buffers should be empty
+ * again
+ */
+
+static unsigned short eexp_hw_lasttxstat(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short old_rp = inw(ioaddr+READ_PTR);
+ unsigned short old_wp = inw(ioaddr+WRITE_PTR);
+ unsigned short tx_block = lp->tx_reap;
+ unsigned short status;
+
+ if (!test_bit(0,(void *)&dev->tbusy) && lp->tx_head==lp->tx_reap)
+ return 0x0000;
+
+ do
+ {
+ outw(tx_block,ioaddr+READ_PTR);
+ status = inw(ioaddr);
+ if (!Stat_Done(status))
+ {
+ lp->tx_link = tx_block;
+ outw(old_rp,ioaddr+READ_PTR);
+ outw(old_wp,ioaddr+WRITE_PTR);
+ return status;
+ }
+ else
+ {
+ lp->last_tx_restart = 0;
+ lp->stats.collisions += Stat_NoColl(status);
+ if (!Stat_OK(status))
+ {
+ if (Stat_Abort(status))
+ lp->stats.tx_aborted_errors++;
+ if (Stat_TNoCar(status) || Stat_TNoCTS(status))
+ lp->stats.tx_carrier_errors++;
+ if (Stat_TNoDMA(status))
+ lp->stats.tx_fifo_errors++;
+ }
+ else
+ lp->stats.tx_packets++;
+ }
+ if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
+ lp->tx_reap = tx_block = TX_BUF_START;
+ else
+ lp->tx_reap = tx_block += TX_BUF_SIZE;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ while (lp->tx_reap != lp->tx_head);
+
+ lp->tx_link = lp->tx_tail + 0x08;
+ outw(old_rp,ioaddr+READ_PTR);
+ outw(old_wp,ioaddr+WRITE_PTR);
+
+ return status;
+}
+
+/*
+ * This should never happen. It is called when some higher
+ * routine detects the CU has stopped, to try to restart
+ * it from the last packet we knew we were working on,
+ * or the idle loop if we had finished for the time.
+ */
+
+static void eexp_hw_txrestart(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+
+ lp->last_tx_restart = lp->tx_link;
+ outw(lp->tx_link,ioaddr+SCB_CBL);
+ outw(SCB_CUstart,ioaddr+SCB_CMD);
+ outw(0,ioaddr+SCB_STATUS);
+ outb(0,ioaddr+SIGNAL_CA);
+
+ {
+ unsigned short boguscount=50,failcount=5;
+ while (!inw(ioaddr+SCB_STATUS))
+ {
+ if (!--boguscount)
+ {
+ if (--failcount)
+ {
+ printk(KERN_WARNING "%s: CU start timed out, status %04x, cmd %04x\n",
+ dev->name, inw(ioaddr+SCB_STATUS), inw(ioaddr+SCB_CMD));
+ outw(lp->tx_link,ioaddr+SCB_CBL);
+ outw(0,ioaddr+SCB_STATUS);
+ outw(SCB_CUstart,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ boguscount = 100;
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: Failed to restart CU, resetting board...\n",dev->name);
+ eexp_hw_init586(dev);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ return;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Writes down the list of transmit buffers into card
+ * memory. Initial separate, repeated transmits link
+ * them into a circular list, such that the CU can
+ * be constantly active, and unlink them as we reap
+ * transmitted packet buffers, so the CU doesn't loop
+ * and endlessly transmit packets. (Try hacking the driver
+ * to send continuous broadcast messages, say ARP requests
+ * on a subnet with Windows boxes running on Novell and
+ * LAN Workplace with EMM386. Amusing to watch them all die
+ * horribly leaving the Linux boxes up!)
+ */
+
+static void eexp_hw_txinit(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short old_wp = inw(ioaddr+WRITE_PTR);
+ unsigned short tx_block = TX_BUF_START;
+ unsigned short curtbuf;
+
+ for ( curtbuf=0 ; curtbuf<lp->num_tx_bufs ; curtbuf++ )
+ {
+ outw(tx_block,ioaddr+WRITE_PTR);
+ outw(0x0000,ioaddr);
+ outw(Cmd_INT|Cmd_Xmit,ioaddr);
+ outw(tx_block+0x08,ioaddr);
+ outw(tx_block+0x0e,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(tx_block+0x08,ioaddr);
+ outw(0x8000,ioaddr);
+ outw(-1,ioaddr);
+ outw(tx_block+0x16,ioaddr);
+ outw(0x0000,ioaddr);
+ tx_block += TX_BUF_SIZE;
+ }
+ lp->tx_head = TX_BUF_START;
+ lp->tx_reap = TX_BUF_START;
+ lp->tx_tail = tx_block - TX_BUF_SIZE;
+ lp->tx_link = lp->tx_tail + 0x08;
+ lp->rx_buf_start = tx_block;
+ outw(old_wp,ioaddr+WRITE_PTR);
+}
+
+/* is this a standard test pattern, or dbecker randomness? */
+
+unsigned short rx_words[] =
+{
+ 0xfeed,0xf00d,0xf001,0x0505,0x2424,0x6565,0xdeaf
+};
+
+/*
+ * Write the circular list of receive buffer descriptors to
+ * card memory. Note, we no longer mark the end of the list,
+ * so if all the buffers fill up, the 82586 will loop until
+ * we free one. This may sound dodgy, but it works, and
+ * it makes the error detection in the interrupt handler
+ * a lot simpler.
+ */
+
+static void eexp_hw_rxinit(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short old_wp = inw(ioaddr+WRITE_PTR);
+ unsigned short rx_block = lp->rx_buf_start;
+
+ lp->num_rx_bufs = 0;
+ lp->rx_first = rx_block;
+ do
+ {
+ lp->num_rx_bufs++;
+ outw(rx_block,ioaddr+WRITE_PTR);
+ outw(0x0000,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(rx_block+RX_BUF_SIZE,ioaddr);
+ outw(rx_block+0x16,ioaddr);
+ outsw(ioaddr, rx_words, sizeof(rx_words)>>1);
+ outw(0x8000,ioaddr);
+ outw(-1,ioaddr);
+ outw(rx_block+0x20,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(0x8000|(RX_BUF_SIZE-0x20),ioaddr);
+ lp->rx_last = rx_block;
+ rx_block += RX_BUF_SIZE;
+ } while (rx_block <= lp->rx_buf_end-RX_BUF_SIZE);
+
+ outw(lp->rx_last+4,ioaddr+WRITE_PTR);
+ outw(lp->rx_first,ioaddr);
+
+ outw(old_wp,ioaddr+WRITE_PTR);
+}
+
+/*
+ * Reset the 586, fill memory (including calls to
+ * eexp_hw_[(rx)(tx)]init()) unreset, and start
+ * the configuration sequence. We don't wait for this
+ * to finish, but allow the interrupt handler to start
+ * the CU and RU for us. We can't start the receive/
+ * transmission system up before we know that the
+ * hardware is configured correctly
+ */
+static void eexp_hw_init586(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+
+#if NET_DEBUG > 6
+ printk("%s: eexp_hw_init586()\n", dev->name);
+#endif
+
+ lp->started = 0;
+ set_loopback;
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ outb_p(i586_RST,ioaddr+EEPROM_Ctrl);
+ udelay(2000); /* delay 20ms */
+ {
+ unsigned long ofs;
+ for (ofs = 0; ofs < lp->rx_buf_end; ofs += 32) {
+ unsigned long i;
+ outw_p(ofs, ioaddr+SM_PTR);
+ for (i = 0; i < 16; i++) {
+ outw_p(0, ioaddr+SM_ADDR(i<<1));
+ }
+ }
+ }
+
+ outw_p(lp->rx_buf_end,ioaddr+WRITE_PTR);
+ start_code[28] = (dev->flags & IFF_PROMISC)?(start_code[28] | 1):(start_code[28] & ~1);
+ lp->promisc = dev->flags & IFF_PROMISC;
+ /* We may die here */
+ outsw(ioaddr, start_code, sizeof(start_code)>>1);
+ outw(CONF_HW_ADDR,ioaddr+WRITE_PTR);
+ outsw(ioaddr,dev->dev_addr,3);
+ eexp_hw_txinit(dev);
+ eexp_hw_rxinit(dev);
+ outw(0,ioaddr+WRITE_PTR);
+ outw(1,ioaddr);
+ outb(0,ioaddr+EEPROM_Ctrl);
+ outw(0,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ {
+ unsigned short rboguscount=50,rfailcount=5;
+ while (outw(0,ioaddr+READ_PTR),inw(ioaddr))
+ {
+ if (!--rboguscount)
+ {
+ printk(KERN_WARNING "%s: i82586 reset timed out, kicking...\n",
+ dev->name);
+ outw(0,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ rboguscount = 100;
+ if (!--rfailcount)
+ {
+ printk(KERN_WARNING "%s: i82586 not responding, giving up.\n",
+ dev->name);
+ return;
+ }
+ }
+ }
+ }
+
+ outw(CONF_LINK,ioaddr+SCB_CBL);
+ outw(0,ioaddr+SCB_STATUS);
+ outw(0xf000|SCB_CUstart,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ {
+ unsigned short iboguscount=50,ifailcount=5;
+ while (!inw(ioaddr+SCB_STATUS))
+ {
+ if (!--iboguscount)
+ {
+ if (--ifailcount)
+ {
+ printk(KERN_WARNING "%s: i82586 initialization timed out, status %04x, cmd %04x\n",
+ dev->name, inw(ioaddr+SCB_STATUS), inw(ioaddr+SCB_CMD));
+ outw(CONF_LINK,ioaddr+SCB_CBL);
+ outw(0,ioaddr+SCB_STATUS);
+ outw(0xf000|SCB_CUstart,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ iboguscount = 100;
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: Failed to initialize i82586, giving up.\n",dev->name);
+ return;
+ }
+ }
+ }
+ }
+
+ outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ clear_loopback;
+ lp->init_time = jiffies;
+#if NET_DEBUG > 6
+ printk("%s: leaving eexp_hw_init586()\n", dev->name);
+#endif
+ return;
+}
+
+/*
+ * completely reset the EtherExpress hardware. We will most likely get
+ * an interrupt during this whether we want one or not. It is best,
+ * therefore, to call this while we don't have a request_irq() on.
+ */
+
+static void eexp_hw_ASICrst(struct device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short wrval = 0x0001,succount=0,boguscount=500;
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+
+ PRIV(dev)->started = 0;
+ outb(ASIC_RST|i586_RST,ioaddr+EEPROM_Ctrl);
+ while (succount<20)
+ {
+ if (wrval == 0xffff)
+ wrval = 0x0001;
+ outw(0,ioaddr+WRITE_PTR);
+ outw(wrval,ioaddr);
+ outw(0,ioaddr+READ_PTR);
+ if (wrval++ == inw(ioaddr))
+ succount++;
+ else
+ {
+ succount = 0;
+ if (!boguscount--)
+ {
+ boguscount = 500;
+ printk("%s: Having problems resetting EtherExpress ASIC, continuing...\n",
+ dev->name);
+ wrval = 0x0001;
+ outb(ASIC_RST|i586_RST,ioaddr+EEPROM_Ctrl);
+ }
+ }
+ }
+ outb(i586_RST,ioaddr+EEPROM_Ctrl);
+}
+
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * We have to do a complete 586 restart for this to take effect.
+ * At the moment only promiscuous mode is supported.
+ */
+static void
+eexp_set_multicast(struct device *dev)
+{
+ if ((dev->flags & IFF_PROMISC) != PRIV(dev)->promisc)
+ eexp_hw_init586(dev);
+}
+
+
+/*
+ * MODULE stuff
+ */
+#ifdef MODULE
+
+#define EEXP_MAX_CARDS 4 /* max number of cards to support */
+#define NAMELEN 8 /* max length of dev->name (inc null) */
+
+static char namelist[NAMELEN * EEXP_MAX_CARDS] = { 0, };
+
+static struct device dev_eexp[EEXP_MAX_CARDS] =
+{
+ { NULL, /* will allocate dynamically */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, express_probe },
+};
+
+int irq[EEXP_MAX_CARDS] = {0, };
+int io[EEXP_MAX_CARDS] = {0, };
+
+/* Ideally the user would give us io=, irq= for every card. If any parameters
+ * are specified, we verify and then use them. If no parameters are given, we
+ * autoprobe for one card only.
+ */
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
+ struct device *dev = &dev_eexp[this_dev];
+ dev->name = namelist + (NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (io[this_dev] == 0) {
+ if (this_dev) break;
+ printk(KERN_NOTICE "eexpress.c: Module autoprobe not recommended, give io=xx.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "eexpress.c: Failed to register card at 0x%x.\n", io[this_dev]);
+ if (found != 0) return 0;
+ return -ENXIO;
+ }
+ found++;
+ }
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
+ struct device *dev = &dev_eexp[this_dev];
+ if (dev->priv != NULL) {
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(dev->base_addr, EEXP_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif
+
+/*
+ * Local Variables:
+ * c-file-style: "linux"
+ * tab-width: 8
+ * compile-command: "gcc -D__KERNEL__ -I/discs/bibble/src/linux-1.3.69/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strength-reduce -pipe -m486 -DCPU=486 -DMODULE -c 3c505.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/epic100.c b/linux/src/drivers/net/epic100.c
new file mode 100644
index 0000000..b44f291
--- /dev/null
+++ b/linux/src/drivers/net/epic100.c
@@ -0,0 +1,1560 @@
+/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
+/*
+ Written/copyright 1997-2002 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the SMC83c170/175 "EPIC" series, as used on the
+ SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Information and updates available at
+ http://www.scyld.com/network/epic100.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] =
+"epic100.c:v1.18 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/epic100.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 32;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip uses a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Used to set a special media speed or duplex.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for operational efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ Too-large receive rings only waste memory. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+/* Bytes transferred to chip before transmission starts. */
+/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
+#define TX_FIFO_THRESH 256
+#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE >= 0x20300
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= 0x20200
+#include <asm/spinlock.h>
+#endif
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex.\n"
+"Values are 0x10/0x20/0x100/0x200.");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex.");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the SMC "EPIC/100", the SMC
+single-chip Ethernet controllers for PCI. This chip is used on
+the SMC EtherPower II boards.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS will assign the
+PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+IVb. References
+
+http://www.smsc.com/main/datasheets/83c171.pdf
+http://www.smsc.com/main/datasheets/83c175.pdf
+http://scyld.com/expert/NWay.html
+http://www.national.com/pf/DP/DP83840A.html
+
+IVc. Errata
+
+*/
+
+static void *epic_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int epic_pwr_event(void *dev_instance, int event);
+
+enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
+
+#define EPIC_TOTAL_SIZE 0x100
+#ifdef USE_IO_OPS
+#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
+#else
+#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"SMSC EPIC 83c172", {0x000510B8, 0xffffffff, 0,0, 9,0xff},
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN, },
+ {"SMSC EPIC 83c171", {0x000510B8, 0xffffffff, 0,0, 6,0xff},
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN, },
+ {"SMSC EPIC/100 83c170", {0x000510B8, 0xffffffff, 0x0ab41092, 0xffffffff},
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN, },
+ {"SMSC EPIC/100 83c170", {0x000510B8, 0xffffffff},
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR, },
+ {"SMSC EPIC/C 83c175", {0x000610B8, 0xffffffff},
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN, },
+ {0,},
+};
+
+struct drv_id_info epic_drv_id = {
+ "epic100", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ epic_probe1, epic_pwr_event };
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* Offsets to registers, using the (ugh) SMC names. */
+enum epic_registers {
+ COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
+ PCIBurstCnt=0x18,
+ TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
+ MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
+ LAN0=64, /* MAC address. */
+ MC0=80, /* Multicast filter table. */
+ RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
+ PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatus {
+ TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
+ PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
+ RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
+ TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
+ RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
+};
+enum CommandBits {
+ StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
+ StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
+};
+
+/* The EPIC100 Rx and Tx buffer descriptors. */
+
+struct epic_tx_desc {
+ u32 txstatus;
+ u32 bufaddr;
+ u32 buflength;
+ u32 next;
+};
+
+struct epic_rx_desc {
+ u32 rxstatus;
+ u32 bufaddr;
+ u32 buflength;
+ u32 next;
+};
+
+enum desc_status_bits {
+ DescOwn=0x8000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct epic_private {
+ /* Tx and Rx rings first so that they remain paragraph aligned. */
+ struct epic_rx_desc rx_ring[RX_RING_SIZE];
+ struct epic_tx_desc tx_ring[TX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+
+ /* Ring pointers. */
+ spinlock_t lock; /* Group with Tx control cache line. */
+ unsigned int cur_tx, dirty_tx;
+ struct descriptor *last_tx_desc;
+
+ unsigned int cur_rx, dirty_rx;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct descriptor *last_rx_desc;
+ long last_rx_time; /* Last Rx, in jiffies. */
+ int rx_copybreak;
+
+ int msg_level;
+ int max_interrupt_work;
+ struct pci_dev *pci_dev; /* PCI bus location. */
+ int chip_id, chip_flags;
+
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
+ int tx_threshold;
+ int genctl; /* Including Rx threshold. */
+ u32 cur_rx_mode;
+ unsigned char mc_filter[8];
+ int multicast_filter_limit;
+
+ signed char phys[4]; /* MII device addresses. */
+ u16 mii_bmcr; /* MII control register */
+ u16 advertising; /* NWay media advertisement */
+ int mii_phy_cnt;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Current duplex setting. */
+ unsigned int duplex_lock:1; /* Duplex forced by the user. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+};
+
+static int epic_open(struct net_device *dev);
+static int read_eeprom(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
+static void epic_start(struct net_device *dev, int restart);
+static void check_media(struct net_device *dev);
+static void epic_timer(unsigned long data);
+static void epic_tx_timeout(struct net_device *dev);
+static void epic_init_ring(struct net_device *dev);
+static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int epic_rx(struct net_device *dev);
+static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int epic_close(struct net_device *dev);
+static struct net_device_stats *epic_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+
+
+/* A list of all installed EPIC devices, for removing the driver module. */
+static struct net_device *root_epic_dev = NULL;
+
+static void *epic_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct epic_private *ep;
+ void *priv_mem;
+ int i, option = 0, duplex = 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ if (dev->mem_start) {
+ option = dev->mem_start;
+ duplex = (dev->mem_start & 16) ? 1 : 0;
+ } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
+ if (options[card_idx] >= 0)
+ option = options[card_idx];
+ if (full_duplex[card_idx] >= 0)
+ duplex = full_duplex[card_idx];
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ printk(KERN_INFO "%s: %s at %#lx, %2.2x:%2.2x IRQ %d, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
+ pci_bus_number(pdev), pci_devfn(pdev)>>3, dev->irq);
+
+ /* Bring the chip out of low-power mode. */
+ outl(0x4200, ioaddr + GENCTL);
+ /* Magic from SMSC app note 7.15 */
+ outl(0x0008, ioaddr + TEST1);
+
+ /* Turn on the MII transceiver. */
+ outl(0x12, ioaddr + MIICfg);
+ if (pci_id_tbl[chip_idx].drv_flags & NO_MII)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ outl(0x0200, ioaddr + GENCTL);
+
+ if (((1 << debug) - 1) & NETIF_MSG_MISC) {
+ printk(KERN_DEBUG "%s: EEPROM contents\n", dev->name);
+ for (i = 0; i < 64; i++)
+ printk(" %4.4x%s", read_eeprom(ioaddr, i),
+ i % 16 == 15 ? "\n" : "");
+ }
+
+ /* Note: the '175 does not have a serial EEPROM. */
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x.\n", dev->dev_addr[i]);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*ep) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+ dev->priv = ep = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(ep, 0, sizeof(*ep));
+ ep->priv_addr = priv_mem;
+
+ ep->next_module = root_epic_dev;
+ root_epic_dev = dev;
+
+ ep->pci_dev = pdev;
+ ep->chip_id = chip_idx;
+ ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
+ ep->msg_level = (1 << debug) - 1;
+ ep->rx_copybreak = rx_copybreak;
+ ep->max_interrupt_work = max_interrupt_work;
+ ep->multicast_filter_limit = multicast_filter_limit;
+
+ /* The lower four bits are non-TP media types. */
+ if (option > 0) {
+ if (option & 0x220)
+ ep->duplex_lock = ep->full_duplex = 1;
+ ep->default_port = option & 0xFFFF;
+ ep->medialock = 1;
+ }
+ if (duplex) {
+ ep->duplex_lock = ep->full_duplex = 1;
+ printk(KERN_INFO "%s: Forced full duplex operation requested.\n",
+ dev->name);
+ }
+ dev->if_port = ep->default_port;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes much time and no cards have external MII. */
+ {
+ int phy, phy_idx = 0;
+ for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ ep->phys[phy_idx++] = phy;
+ printk(KERN_INFO "%s: Located MII transceiver #%d control "
+ "%4.4x status %4.4x.\n",
+ dev->name, phy, mdio_read(dev, phy, 0), mii_status);
+ }
+ }
+ ep->mii_phy_cnt = phy_idx;
+ }
+ if (ep->mii_phy_cnt == 0 && ! (ep->chip_flags & NO_MII)) {
+ printk(KERN_WARNING "%s: ***WARNING***: No MII transceiver found!\n",
+ dev->name);
+ /* Use the known PHY address of the EPII. */
+ ep->phys[0] = 3;
+ }
+
+ if (ep->mii_phy_cnt) {
+ int phy = ep->phys[0];
+ int xcvr = ep->default_port & 0x330;
+ if (xcvr) {
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (xcvr & 0x300 ? 100 : 10),
+ (xcvr & 0x220 ? "full" : "half"));
+ ep->mii_bmcr = xcvr & 0x300 ? 0x2000 : 0; /* 10/100mbps? */
+ ep->mii_bmcr |= xcvr & 0x220 ? 0x0100 : 0; /* duplex */
+ mdio_write(dev, phy, 0, ep->mii_bmcr);
+ } else {
+ ep->mii_bmcr = 0x3000;
+ ep->advertising = mdio_read(dev, phy, 4);
+ printk(KERN_INFO "%s: Autonegotiation advertising %4.4x link "
+ "partner %4.4x.\n",
+ dev->name, ep->advertising, mdio_read(dev, phy, 5));
+ }
+ }
+
+#if EPIC_POWER_SAVE
+ /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
+ if (ep->chip_flags & MII_PWRDWN)
+ outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
+#endif
+ outl(0x0008, ioaddr + GENCTL);
+
+ /* The Epic-specific entries in the device structure. */
+ dev->open = &epic_open;
+ dev->hard_start_xmit = &epic_start_xmit;
+ dev->stop = &epic_close;
+ dev->get_stats = &epic_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ return dev;
+}
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x09
+#define EE_DATA_READ 0x10 /* EEPROM chip data out. */
+#define EE_ENB (0x0001 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ This serves to flush the operation to the PCI bus.
+ */
+
+#define eeprom_delay() inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ64_CMD (6 << 6)
+#define EE_READ256_CMD (6 << 8)
+#define EE_ERASE_CMD (7 << 6)
+
+static int read_eeprom(long ioaddr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = ioaddr + EECTL;
+ int read_cmd = location |
+ (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
+
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ outl(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 12; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
+ outl(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ }
+ outl(EE_ENB, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outl(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+#define MII_READOP 1
+#define MII_WRITEOP 2
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long ioaddr = dev->base_addr;
+ int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
+ int i;
+
+ outl(read_cmd, ioaddr + MIICtrl);
+ /* Typical operation takes 25 loops. */
+ for (i = 400; i > 0; i--)
+ if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
+ /* Work around read failure bug. */
+ if (phy_id == 1 && location < 6
+ && inw(ioaddr + MIIData) == 0xffff) {
+ outl(read_cmd, ioaddr + MIICtrl);
+ continue;
+ }
+ return inw(ioaddr + MIIData);
+ }
+ return 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
+{
+ long ioaddr = dev->base_addr;
+ int i;
+
+ outw(value, ioaddr + MIIData);
+ outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
+ for (i = 10000; i > 0; i--) {
+ if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
+ break;
+ }
+ return;
+}
+
+
+static int epic_open(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ epic_init_ring(dev);
+ check_media(dev);
+ epic_start(dev, 0);
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&ep->timer);
+ ep->timer.expires = jiffies + 3*HZ;
+ ep->timer.data = (unsigned long)dev;
+ ep->timer.function = &epic_timer; /* timer handler */
+ add_timer(&ep->timer);
+
+ return 0;
+}
+
+/* Reset the chip to recover from a PCI transaction error.
+ This may occur at interrupt time. */
+static void epic_pause(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x00000000, ioaddr + INTMASK);
+ /* Stop the chip's Tx and Rx DMA processes. */
+ outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
+
+ /* Update the error counts. */
+ if (inw(ioaddr + COMMAND) != 0xffff) {
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+ }
+
+ /* Remove the packets on the Rx queue. */
+ epic_rx(dev);
+}
+
+static void epic_start(struct net_device *dev, int restart)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int i;
+
+ if (restart) {
+ /* Soft reset the chip. */
+ outl(0x4001, ioaddr + GENCTL);
+ printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
+ dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
+ udelay(1);
+
+ /* This magic is documented in SMSC app note 7.15 */
+ for (i = 16; i > 0; i--)
+ outl(0x0008, ioaddr + TEST1);
+ }
+
+#if defined(__powerpc__) || defined(__sparc__) || defined(__BIG_ENDIAN)
+ ep->genctl = 0x0432 | (RX_FIFO_THRESH<<8);
+#elif defined(__LITTLE_ENDIAN) || defined(__i386__)
+ ep->genctl = 0x0412 | (RX_FIFO_THRESH<<8);
+#else
+#error The byte order of this architecture is not defined.
+#endif
+
+ /* Power and reset the PHY. */
+ if (ep->chip_flags & MII_PWRDWN)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ if (restart) {
+ outl(ep->genctl | 0x4000, ioaddr + GENCTL);
+ inl(ioaddr + GENCTL);
+ }
+ outl(ep->genctl, ioaddr + GENCTL);
+
+ if (dev->if_port == 2 || dev->if_port == 5) { /* 10base2 or AUI */
+ outl(0x13, ioaddr + MIICfg);
+ printk(KERN_INFO "%s: Disabling MII PHY to use 10base2/AUI.\n",
+ dev->name);
+ mdio_write(dev, ep->phys[0], 0, 0x0C00);
+ } else {
+ outl(0x12, ioaddr + MIICfg);
+ mdio_write(dev, ep->phys[0], 0, ep->advertising);
+ mdio_write(dev, ep->phys[0], 0, ep->mii_bmcr);
+ check_media(dev);
+ }
+
+ for (i = 0; i < 3; i++)
+ outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+
+ ep->tx_threshold = TX_FIFO_THRESH;
+ outl(ep->tx_threshold, ioaddr + TxThresh);
+ outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ outl(virt_to_bus(&ep->rx_ring[ep->cur_rx % RX_RING_SIZE]),
+ ioaddr + PRxCDAR);
+ outl(virt_to_bus(&ep->tx_ring[ep->dirty_tx % TX_RING_SIZE]),
+ ioaddr + PTxCDAR);
+
+ /* Start the chip's Rx process. */
+ set_rx_mode(dev);
+ outl(StartRx | RxQueued, ioaddr + COMMAND);
+
+ if ( ! restart)
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
+ | CntFull | TxUnderrun | TxDone | TxEmpty
+ | RxError | RxOverflow | RxFull | RxHeader | RxDone,
+ ioaddr + INTMASK);
+ if (ep->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: epic_start() done, cmd status %4.4x, "
+ "ctl %4.4x interrupt %4.4x.\n",
+ dev->name, (int)inl(ioaddr + COMMAND),
+ (int)inl(ioaddr + GENCTL), (int)inl(ioaddr + INTSTAT));
+ return;
+}
+
+static void check_media(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int mii_reg5 = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], 5) : 0;
+ int negotiated = mii_reg5 & ep->advertising;
+ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+
+ if (ep->duplex_lock)
+ return;
+ if (mii_reg5 == 0xffff) /* Bogus read */
+ return;
+ if (ep->full_duplex != duplex) {
+ ep->full_duplex = duplex;
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+ " partner capability of %4.4x.\n", dev->name,
+ ep->full_duplex ? "full" : "half", ep->phys[0], mii_reg5);
+ outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ }
+}
+
+static void epic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 5*HZ;
+
+ if (ep->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
+ dev->name, (int)inl(ioaddr + TxSTAT));
+ printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
+ "IntStatus %4.4x RxStatus %4.4x.\n",
+ dev->name, (int)inl(ioaddr + INTMASK),
+ (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
+ }
+
+ if (ep->cur_tx - ep->dirty_tx > 1 &&
+ jiffies - dev->trans_start > TX_TIMEOUT) {
+ printk(KERN_WARNING "%s: Tx hung, %d vs. %d.\n",
+ dev->name, ep->cur_tx, ep->dirty_tx);
+ epic_tx_timeout(dev);
+ }
+
+ check_media(dev);
+
+ ep->timer.expires = jiffies + next_tick;
+ add_timer(&ep->timer);
+}
+
+static void epic_tx_timeout(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int tx_status = inw(ioaddr + TxSTAT);
+
+ printk(KERN_WARNING "%s: EPIC transmit timeout, Tx status %4.4x.\n",
+ dev->name, tx_status);
+ if (ep->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
+ dev->name, ep->dirty_tx, ep->cur_tx);
+ if (tx_status & 0x10) { /* Tx FIFO underflow. */
+ ep->stats.tx_fifo_errors++;
+ outl(RestartTx, ioaddr + COMMAND);
+ } else {
+ epic_start(dev, 1);
+ outl(TxQueued, dev->base_addr + COMMAND);
+ }
+
+ dev->trans_start = jiffies;
+ ep->stats.tx_errors++;
+ return;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void epic_init_ring(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int i;
+
+ ep->tx_full = 0;
+ ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+ ep->dirty_tx = ep->cur_tx = 0;
+ ep->cur_rx = ep->dirty_rx = 0;
+ ep->last_rx_time = jiffies;
+ ep->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ : dev->mtu + 14);
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ ep->rx_ring[i].rxstatus = 0;
+ ep->rx_ring[i].buflength = ep->rx_buf_sz;
+ ep->rx_ring[i].next = virt_to_bus(&ep->rx_ring[i+1]);
+ ep->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ ep->rx_ring[i-1].next = virt_to_bus(&ep->rx_ring[0]);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
+ ep->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ ep->rx_ring[i].bufaddr = virt_to_bus(skb->tail);
+ ep->rx_ring[i].rxstatus = DescOwn;
+ }
+ ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ ep->tx_skbuff[i] = 0;
+ ep->tx_ring[i].txstatus = 0x0000;
+ ep->tx_ring[i].next = virt_to_bus(&ep->tx_ring[i+1]);
+ }
+ ep->tx_ring[i-1].next = virt_to_bus(&ep->tx_ring[0]);
+ return;
+}
+
+static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int entry, free_count;
+ u32 ctrl_word;
+ unsigned long flags;
+
+ /* Block a timer-based transmit from overlapping. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ epic_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the field with the
+ "ownership" bit last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ spin_lock_irqsave(&ep->lock, flags);
+ free_count = ep->cur_tx - ep->dirty_tx;
+ entry = ep->cur_tx % TX_RING_SIZE;
+
+ ep->tx_skbuff[entry] = skb;
+ ep->tx_ring[entry].bufaddr = virt_to_bus(skb->data);
+
+ if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
+ ctrl_word = 0x100000; /* No interrupt */
+ } else if (free_count == TX_QUEUE_LEN/2) {
+ ctrl_word = 0x140000; /* Tx-done intr. */
+ } else if (free_count < TX_QUEUE_LEN - 1) {
+ ctrl_word = 0x100000; /* No Tx-done intr. */
+ } else {
+ /* Leave room for an additional entry. */
+ ctrl_word = 0x140000; /* Tx-done intr. */
+ ep->tx_full = 1;
+ }
+ ep->tx_ring[entry].buflength = ctrl_word | skb->len;
+ ep->tx_ring[entry].txstatus =
+ ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
+ | DescOwn;
+
+ ep->cur_tx++;
+ if (ep->tx_full) {
+ /* Check for a just-cleared queue. */
+ if (ep->cur_tx - (volatile int)ep->dirty_tx < TX_QUEUE_LEN - 2) {
+ netif_unpause_tx_queue(dev);
+ ep->tx_full = 0;
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev);
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+ /* Trigger an immediate transmit demand. */
+ outl(TxQueued, dev->base_addr + COMMAND);
+
+ dev->trans_start = jiffies;
+ if (ep->msg_level & NETIF_MSG_TX_QUEUED)
+ printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
+ "flag %2.2x Tx status %8.8x.\n",
+ dev->name, (int)skb->len, entry, ctrl_word,
+ (int)inl(dev->base_addr + TxSTAT));
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int status, boguscnt = max_interrupt_work;
+
+ do {
+ status = inl(ioaddr + INTSTAT);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(status & 0x00007fff, ioaddr + INTSTAT);
+
+ if (ep->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
+ "intstat=%#8.8x.\n",
+ dev->name, status, (int)inl(ioaddr + INTSTAT));
+
+ if ((status & IntrSummary) == 0)
+ break;
+
+ if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
+ epic_rx(dev);
+
+ if (status & (TxEmpty | TxDone)) {
+ unsigned int dirty_tx, cur_tx;
+
+ /* Note: if this lock becomes a problem we can narrow the locked
+ region at the cost of occasionally grabbing the lock more
+ times. */
+ spin_lock(&ep->lock);
+ cur_tx = ep->cur_tx;
+ dirty_tx = ep->dirty_tx;
+ for (; cur_tx - dirty_tx > 0; dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int txstatus = ep->tx_ring[entry].txstatus;
+
+ if (txstatus & DescOwn)
+ break; /* It still hasn't been Txed */
+
+ if ( ! (txstatus & 0x0001)) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (ep->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+#endif
+ ep->stats.tx_errors++;
+ if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
+ if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
+ if (txstatus & 0x0040) ep->stats.tx_window_errors++;
+ if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
+#ifdef ETHER_STATS
+ if (txstatus & 0x1000) ep->stats.collisions16++;
+#endif
+ } else {
+ if (ep->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status "
+ "%8.8x.\n", dev->name, txstatus);
+#ifdef ETHER_STATS
+ if ((txstatus & 0x0002) != 0) ep->stats.tx_deferred++;
+#endif
+ ep->stats.collisions += (txstatus >> 8) & 15;
+ ep->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
+#endif
+ }
+
+ /* Free the original skb. */
+ dev_free_skb_irq(ep->tx_skbuff[entry]);
+ ep->tx_skbuff[entry] = 0;
+ }
+
+#ifndef final_version
+ if (cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, cur_tx, ep->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+ ep->dirty_tx = dirty_tx;
+ if (ep->tx_full
+ && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ ep->tx_full = 0;
+ spin_unlock(&ep->lock);
+ netif_resume_tx_queue(dev);
+ } else
+ spin_unlock(&ep->lock);
+ }
+
+ /* Check uncommon events all at once. */
+ if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
+ PCIBusErr170 | PCIBusErr175)) {
+ if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
+ break;
+ /* Always update the error counts to avoid overhead later. */
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+
+ if (status & TxUnderrun) { /* Tx FIFO underflow. */
+ ep->stats.tx_fifo_errors++;
+ outl(ep->tx_threshold += 128, ioaddr + TxThresh);
+ /* Restart the transmit process. */
+ outl(RestartTx, ioaddr + COMMAND);
+ }
+ if (status & RxOverflow) { /* Missed a Rx frame. */
+ ep->stats.rx_errors++;
+ }
+ if (status & (RxOverflow | RxFull))
+ outw(RxQueued, ioaddr + COMMAND);
+ if (status & PCIBusErr170) {
+ printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
+ dev->name, status);
+ epic_pause(dev);
+ epic_start(dev, 1);
+ }
+ /* Clear all error sources. */
+ outl(status & 0x7f18, ioaddr + INTSTAT);
+ }
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "IntrStatus=0x%8.8x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+ outl(0x0001ffff, ioaddr + INTSTAT);
+ /* Ill-advised: Slowly stop emitting this message. */
+ max_interrupt_work++;
+ break;
+ }
+ } while (1);
+
+ if (ep->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Exiting interrupt, intr_status=%#4.4x.\n",
+ dev->name, status);
+
+ return;
+}
+
+static int epic_rx(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int entry = ep->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
+ int work_done = 0;
+
+ if (ep->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
+ ep->rx_ring[entry].rxstatus);
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
+ int status = ep->rx_ring[entry].rxstatus;
+
+ if (ep->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
+ if (--rx_work_limit < 0)
+ break;
+ if (status & 0x2006) {
+ if (ep->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
+ dev->name, status);
+ if (status & 0x2000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, status %4.4x!\n", dev->name, status);
+ ep->stats.rx_length_errors++;
+ } else if (status & 0x0006)
+ /* Rx Frame errors are counted in hardware. */
+ ep->stats.rx_errors++;
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = (status >> 16) - 4;
+ struct sk_buff *skb;
+
+ if (pkt_len > PKT_BUF_SZ - 4) {
+ printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
+ "%d bytes.\n",
+ dev->name, pkt_len, status);
+ pkt_len = 1514;
+ }
+ if (ep->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ ", bogus_cnt %d.\n", pkt_len, rx_work_limit);
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if 1 /* HAS_IP_COPYSUM */
+ eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), ep->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ skb_put(skb = ep->rx_skbuff[entry], pkt_len);
+ ep->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ ep->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ ep->stats.rx_bytes += pkt_len;
+#endif
+ }
+ work_done++;
+ entry = (++ep->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
+ entry = ep->dirty_rx % RX_RING_SIZE;
+ if (ep->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb;
+ skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ ep->rx_ring[entry].bufaddr = virt_to_bus(skb->tail);
+ work_done++;
+ }
+ ep->rx_ring[entry].rxstatus = DescOwn;
+ }
+ return work_done;
+}
+
+static int epic_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (ep->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x.\n",
+ dev->name, (int)inl(ioaddr + INTSTAT));
+
+ epic_pause(dev);
+ del_timer(&ep->timer);
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = ep->rx_skbuff[i];
+ ep->rx_skbuff[i] = 0;
+ ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
+ ep->rx_ring[i].buflength = 0;
+ ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
+ if (skb) {
+#if LINUX_VERSION_CODE < 0x20100
+ skb->free = 1;
+#endif
+ dev_free_skb(skb);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (ep->tx_skbuff[i])
+ dev_free_skb(ep->tx_skbuff[i]);
+ ep->tx_skbuff[i] = 0;
+ }
+
+ /* Green! Leave the chip in low-power mode. */
+ outl(0x440008, ioaddr + GENCTL);
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static struct net_device_stats *epic_get_stats(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (netif_running(dev)) {
+ /* Update the error counts. */
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+ }
+
+ return &ep->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling ep->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+/* The little-endian AUTODIN II ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ unsigned char mc_filter[8]; /* Multicast hash filter */
+ u32 new_rx_mode;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ new_rx_mode = 0x002C;
+ /* Unconditionally log net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
+ /* There is apparently a chip bug, so the multicast filter
+ is never enabled. */
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ new_rx_mode = 0x000C;
+ } else if (dev->mc_count == 0) {
+ memset(mc_filter, 0, sizeof(mc_filter));
+ new_rx_mode = 0x0004;
+ } else { /* Never executed, for now. */
+ struct dev_mc_list *mclist;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
+ new_rx_mode = 0x000C;
+ }
+ if (ep->cur_rx_mode != new_rx_mode) {
+ ep->cur_rx_mode = new_rx_mode;
+ outl(new_rx_mode, ioaddr + RxCtrl);
+ }
+ /* ToDo: perhaps we need to stop the Tx and Rx process here? */
+ if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
+ for (i = 0; i < 4; i++)
+ outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
+ memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
+ }
+ return;
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct epic_private *ep = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = ep->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ if (! netif_running(dev)) {
+ outl(0x0200, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ }
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+#if defined(PWRDWN_AFTER_IOCTL)
+ if (! netif_running(dev)) {
+ outl(0x0008, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+ }
+#endif
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (! netif_running(dev)) {
+ outl(0x0200, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ }
+ if (data[0] == ep->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ ep->duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (ep->duplex_lock)
+ ep->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: ep->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+#if defined(PWRDWN_AFTER_IOCTL)
+ if (! netif_running(dev)) {
+ outl(0x0008, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+ }
+#endif
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = ep->msg_level;
+ data32[1] = ep->multicast_filter_limit;
+ data32[2] = ep->max_interrupt_work;
+ data32[3] = ep->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ ep->msg_level = data32[0];
+ ep->multicast_filter_limit = data32[1];
+ ep->max_interrupt_work = data32[2];
+ ep->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int epic_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ if (ep->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_SUSPEND:
+ epic_pause(dev);
+ /* Put the chip into low-power mode. */
+ outl(0x0008, ioaddr + GENCTL);
+ break;
+ case DRV_RESUME:
+ epic_start(dev, 1);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[ep->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (ep->priv_addr)
+ kfree(ep->priv_addr);
+ kfree(dev);
+ /*MOD_DEC_USE_COUNT;*/
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef CARDBUS
+
+#include <pcmcia/driver_ops.h>
+
+static dev_node_t *epic_attach(dev_locator_t *loc)
+{
+ struct net_device *dev;
+ u16 dev_id;
+ u32 pciaddr;
+ u8 bus, devfn, irq;
+ long ioaddr;
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ printk(KERN_DEBUG "epic_attach(bus %d, function %d)\n", bus, devfn);
+#ifdef USE_IO_OPS
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &pciaddr);
+ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+#else
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &pciaddr);
+ ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_id_tbl[1].io_size);
+#endif
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+ pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &dev_id);
+ if (ioaddr == 0 || irq == 0) {
+ printk(KERN_ERR "The EPIC/C CardBus Ethernet interface at %d/%d was "
+ "not assigned an %s.\n"
+ KERN_ERR " It will not be activated.\n",
+ bus, devfn, ioaddr == 0 ? "address" : "IRQ");
+ return NULL;
+ }
+ dev = epic_probe1(pci_find_slot(bus, devfn), NULL, ioaddr, irq, 1, 0);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+ node->major = node->minor = 0;
+ node->next = NULL;
+ MOD_INC_USE_COUNT;
+ return node;
+ }
+ return NULL;
+}
+
+static void epic_suspend(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "epic_suspend(%s)\n", node->dev_name);
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ long ioaddr = (*devp)->base_addr;
+ epic_pause(*devp);
+ /* Put the chip into low-power mode. */
+ outl(0x0008, ioaddr + GENCTL);
+ }
+}
+static void epic_resume(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "epic_resume(%s)\n", node->dev_name);
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ epic_start(*devp, 1);
+ }
+}
+static void epic_detach(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "epic_detach(%s)\n", node->dev_name);
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ unregister_netdev(*devp);
+ release_region((*devp)->base_addr, EPIC_TOTAL_SIZE);
+#ifndef USE_IO_OPS
+ iounmap((char *)(*devp)->base_addr);
+#endif
+ kfree(*devp);
+ *devp = *next;
+ kfree(node);
+ MOD_DEC_USE_COUNT;
+ }
+}
+
+struct driver_operations epic_ops = {
+ "epic_cb", epic_attach, epic_suspend, epic_resume, epic_detach
+};
+
+#endif /* Cardbus support */
+
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s", version);
+
+#ifdef CARDBUS
+ register_driver(&epic_ops);
+ return 0;
+#else
+ return pci_drv_register(&epic_drv_id, NULL);
+#endif
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&epic_ops);
+#else
+ pci_drv_unregister(&epic_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_epic_dev) {
+ struct epic_private *ep = (struct epic_private *)root_epic_dev->priv;
+ unregister_netdev(root_epic_dev);
+ release_region(root_epic_dev->base_addr, pci_id_tbl[ep->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)root_epic_dev->base_addr);
+#endif
+ next_dev = ep->next_module;
+ if (ep->priv_addr)
+ kfree(ep->priv_addr);
+ kfree(root_epic_dev);
+ root_epic_dev = next_dev;
+ }
+}
+#else
+int epic100_probe(struct net_device *dev)
+{
+ int retval = pci_drv_register(&epic_drv_id, dev);
+ if (retval >= 0)
+ printk(KERN_INFO "%s", version);
+ return retval;
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c epic100.c"
+ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c epic100.c -o epic_cb.o -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/eth16i.c b/linux/src/drivers/net/eth16i.c
new file mode 100644
index 0000000..244c3e7
--- /dev/null
+++ b/linux/src/drivers/net/eth16i.c
@@ -0,0 +1,1604 @@
+/* eth16i.c An ICL EtherTeam 16i and 32 EISA ethernet driver for Linux
+
+ Written 1994-1998 by Mika Kuoppala
+
+ Copyright (C) 1994-1998 by Mika Kuoppala
+ Based on skeleton.c and heavily on at1700.c by Donald Becker
+
+ This software may be used and distributed according to the terms
+ of the GNU Public Licence, incorporated herein by reference.
+
+ The author may be reached as miku@iki.fi
+
+ This driver supports following cards :
+ - ICL EtherTeam 16i
+ - ICL EtherTeam 32 EISA
+ (Uses true 32 bit transfers rather than 16i compability mode)
+
+ Example Module usage:
+ insmod eth16i.o ioaddr=0x2a0 mediatype=bnc
+
+ mediatype can be one of the following: bnc,tp,dix,auto,eprom
+
+ 'auto' will try to autoprobe mediatype.
+ 'eprom' will use whatever type defined in eprom.
+
+ I have benchmarked driver with PII/300Mhz as a ftp client
+ and 486/33Mhz as a ftp server. Top speed was 1128.37 kilobytes/sec.
+
+ Sources:
+ - skeleton.c a sample network driver core for linux,
+ written by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
+ - at1700.c a driver for Allied Telesis AT1700, written
+ by Donald Becker.
+ - e16iSRV.asm a Netware 3.X Server Driver for ICL EtherTeam16i
+ written by Markku Viima
+ - The Fujitsu MB86965 databook.
+
+ Author thanks following persons due to their valueble assistance:
+ Markku Viima (ICL)
+ Ari Valve (ICL)
+ Donald Becker
+ Kurt Huwig <kurt@huwig.de>
+
+ Revision history:
+
+ Version Date Description
+
+ 0.01 15.12-94 Initial version (card detection)
+ 0.02 23.01-95 Interrupt is now hooked correctly
+ 0.03 01.02-95 Rewrote initialization part
+ 0.04 07.02-95 Base skeleton done...
+ Made a few changes to signature checking
+ to make it a bit reliable.
+ - fixed bug in tx_buf mapping
+ - fixed bug in initialization (DLC_EN
+ wasn't enabled when initialization
+ was done.)
+ 0.05 08.02-95 If there were more than one packet to send,
+ transmit was jammed due to invalid
+ register write...now fixed
+ 0.06 19.02-95 Rewrote interrupt handling
+ 0.07 13.04-95 Wrote EEPROM read routines
+ Card configuration now set according to
+ data read from EEPROM
+ 0.08 23.06-95 Wrote part that tries to probe used interface
+ port if AUTO is selected
+
+ 0.09 01.09-95 Added module support
+
+ 0.10 04.09-95 Fixed receive packet allocation to work
+ with kernels > 1.3.x
+
+ 0.20 20.09-95 Added support for EtherTeam32 EISA
+
+ 0.21 17.10-95 Removed the unnecessary extern
+ init_etherdev() declaration. Some
+ other cleanups.
+
+ 0.22 22.02-96 Receive buffer was not flushed
+ correctly when faulty packet was
+ received. Now fixed.
+
+ 0.23 26.02-96 Made resetting the adapter
+ more reliable.
+
+ 0.24 27.02-96 Rewrote faulty packet handling in eth16i_rx
+
+ 0.25 22.05-96 kfree() was missing from cleanup_module.
+
+ 0.26 11.06-96 Sometimes card was not found by
+ check_signature(). Now made more reliable.
+
+ 0.27 23.06-96 Oops. 16 consecutive collisions halted
+ adapter. Now will try to retransmit
+ MAX_COL_16 times before finally giving up.
+
+ 0.28 28.10-97 Added dev_id parameter (NULL) for free_irq
+
+ 0.29 29.10-97 Multiple card support for module users
+
+ 0.30 30.10-97 Fixed irq allocation bug.
+ (request_irq moved from probe to open)
+
+ 0.30a 21.08-98 Card detection made more relaxed. Driver
+ had problems with some TCP/IP-PROM boots
+ to find the card. Suggested by
+ Kurt Huwig <kurt@huwig.de>
+
+ 0.31 28.08-98 Media interface port can now be selected
+ with module parameters or kernel
+ boot parameters.
+
+ 0.32 31.08-98 IRQ was never freed if open/close
+ pair wasn't called. Now fixed.
+
+ 0.33 10.09-98 When eth16i_open() was called after
+ eth16i_close() chip never recovered.
+ Now more shallow reset is made on
+ close.
+
+ Bugs:
+ In some cases the media interface autoprobing code doesn't find
+ the correct interface type. In this case you can
+ manually choose the interface type in DOS with E16IC.EXE which is
+ configuration software for EtherTeam16i and EtherTeam32 cards.
+ This is also true for IRQ setting. You cannot use module
+ parameter to configure IRQ of the card (yet).
+
+ To do:
+ - Real multicast support
+ - Rewrite the media interface autoprobing code. Its _horrible_ !
+ - Possibly merge all the MB86965 specific code to external
+ module for use by eth16.c and Donald's at1700.c
+ - IRQ configuration with module parameter. I will do
+ this when i will get enough info about setting
+ irq without configuration utility.
+*/
+
+static char *version =
+ "eth16i.c: v0.33 10-09-98 Mika Kuoppala (miku@iki.fi)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif
+
+#if LINUX_VERSION_CODE >= 0x20123
+#include <linux/init.h>
+#else
+#define __init
+#define __initdata
+#define __initfunc(x) x
+#endif
+
+#if LINUX_VERSION_CODE < 0x20138
+//#define test_and_set_bit(val,addr) set_bit(val,addr)
+#endif
+
+#if LINUX_VERSION_CODE < 0x020100
+typedef struct enet_statistics eth16i_stats_type;
+#else
+typedef struct net_device_stats eth16i_stats_type;
+#endif
+
+/* Few macros */
+#define BIT(a) ( (1 << (a)) )
+#define BITSET(ioaddr, bnum) ((outb(((inb(ioaddr)) | (bnum)), ioaddr)))
+#define BITCLR(ioaddr, bnum) ((outb(((inb(ioaddr)) & (~(bnum))), ioaddr)))
+
+/* This is the I/O address space for Etherteam 16i adapter. */
+#define ETH16I_IO_EXTENT 32
+
+/* Ticks before deciding that transmit has timed out */
+#define TX_TIMEOUT (400*HZ/1000)
+
+/* Maximum loop count when receiving packets */
+#define MAX_RX_LOOP 20
+
+/* Some interrupt masks */
+#define ETH16I_INTR_ON 0xef8a /* Higher is receive mask */
+#define ETH16I_INTR_OFF 0x0000
+
+/* Buffers header status byte meanings */
+#define PKT_GOOD BIT(5)
+#define PKT_GOOD_RMT BIT(4)
+#define PKT_SHORT BIT(3)
+#define PKT_ALIGN_ERR BIT(2)
+#define PKT_CRC_ERR BIT(1)
+#define PKT_RX_BUF_OVERFLOW BIT(0)
+
+/* Transmit status register (DLCR0) */
+#define TX_STATUS_REG 0
+#define TX_DONE BIT(7)
+#define NET_BUSY BIT(6)
+#define TX_PKT_RCD BIT(5)
+#define CR_LOST BIT(4)
+#define TX_JABBER_ERR BIT(3)
+#define COLLISION BIT(2)
+#define COLLISIONS_16 BIT(1)
+
+/* Receive status register (DLCR1) */
+#define RX_STATUS_REG 1
+#define RX_PKT BIT(7) /* Packet received */
+#define BUS_RD_ERR BIT(6)
+#define SHORT_PKT_ERR BIT(3)
+#define ALIGN_ERR BIT(2)
+#define CRC_ERR BIT(1)
+#define RX_BUF_OVERFLOW BIT(0)
+
+/* Transmit Interrupt Enable Register (DLCR2) */
+#define TX_INTR_REG 2
+#define TX_INTR_DONE BIT(7)
+#define TX_INTR_COL BIT(2)
+#define TX_INTR_16_COL BIT(1)
+
+/* Receive Interrupt Enable Register (DLCR3) */
+#define RX_INTR_REG 3
+#define RX_INTR_RECEIVE BIT(7)
+#define RX_INTR_SHORT_PKT BIT(3)
+#define RX_INTR_CRC_ERR BIT(1)
+#define RX_INTR_BUF_OVERFLOW BIT(0)
+
+/* Transmit Mode Register (DLCR4) */
+#define TRANSMIT_MODE_REG 4
+#define LOOPBACK_CONTROL BIT(1)
+#define CONTROL_OUTPUT BIT(2)
+
+/* Receive Mode Register (DLCR5) */
+#define RECEIVE_MODE_REG 5
+#define RX_BUFFER_EMPTY BIT(6)
+#define ACCEPT_BAD_PACKETS BIT(5)
+#define RECEIVE_SHORT_ADDR BIT(4)
+#define ACCEPT_SHORT_PACKETS BIT(3)
+#define REMOTE_RESET BIT(2)
+
+#define ADDRESS_FILTER_MODE BIT(1) | BIT(0)
+#define REJECT_ALL 0
+#define ACCEPT_ALL 3
+#define MODE_1 1 /* NODE ID, BC, MC, 2-24th bit */
+#define MODE_2 2 /* NODE ID, BC, MC, Hash Table */
+
+/* Configuration Register 0 (DLCR6) */
+#define CONFIG_REG_0 6
+#define DLC_EN BIT(7)
+#define SRAM_CYCLE_TIME_100NS BIT(6)
+#define SYSTEM_BUS_WIDTH_8 BIT(5) /* 1 = 8bit, 0 = 16bit */
+#define BUFFER_WIDTH_8 BIT(4) /* 1 = 8bit, 0 = 16bit */
+#define TBS1 BIT(3)
+#define TBS0 BIT(2)
+#define SRAM_BS1 BIT(1) /* 00=8kb, 01=16kb */
+#define SRAM_BS0 BIT(0) /* 10=32kb, 11=64kb */
+
+#ifndef ETH16I_TX_BUF_SIZE /* 0 = 2kb, 1 = 4kb */
+#define ETH16I_TX_BUF_SIZE 3 /* 2 = 8kb, 3 = 16kb */
+#endif
+#define TX_BUF_1x2048 0
+#define TX_BUF_2x2048 1
+#define TX_BUF_2x4098 2
+#define TX_BUF_2x8192 3
+
+/* Configuration Register 1 (DLCR7) */
+#define CONFIG_REG_1 7
+#define POWERUP BIT(5)
+
+/* Transmit start register */
+#define TRANSMIT_START_REG 10
+#define TRANSMIT_START_RB 2
+#define TX_START BIT(7) /* Rest of register bit indicate*/
+ /* number of packets in tx buffer*/
+/* Node ID registers (DLCR8-13) */
+#define NODE_ID_0 8
+#define NODE_ID_RB 0
+
+/* Hash Table registers (HT8-15) */
+#define HASH_TABLE_0 8
+#define HASH_TABLE_RB 1
+
+/* Buffer memory ports */
+#define BUFFER_MEM_PORT_LB 8
+#define DATAPORT BUFFER_MEM_PORT_LB
+#define BUFFER_MEM_PORT_HB 9
+
+/* 16 Collision control register (BMPR11) */
+#define COL_16_REG 11
+#define HALT_ON_16 0x00
+#define RETRANS_AND_HALT_ON_16 0x02
+
+/* Maximum number of attempts to send after 16 concecutive collisions */
+#define MAX_COL_16 10
+
+/* DMA Burst and Transceiver Mode Register (BMPR13) */
+#define TRANSCEIVER_MODE_REG 13
+#define TRANSCEIVER_MODE_RB 2
+#define IO_BASE_UNLOCK BIT(7)
+#define LOWER_SQUELCH_TRESH BIT(6)
+#define LINK_TEST_DISABLE BIT(5)
+#define AUI_SELECT BIT(4)
+#define DIS_AUTO_PORT_SEL BIT(3)
+
+/* Filter Self Receive Register (BMPR14) */
+#define FILTER_SELF_RX_REG 14
+#define SKIP_RX_PACKET BIT(2)
+#define FILTER_SELF_RECEIVE BIT(0)
+
+/* EEPROM Control Register (BMPR 16) */
+#define EEPROM_CTRL_REG 16
+
+/* EEPROM Data Register (BMPR 17) */
+#define EEPROM_DATA_REG 17
+
+/* NMC93CSx6 EEPROM Control Bits */
+#define CS_0 0x00
+#define CS_1 0x20
+#define SK_0 0x00
+#define SK_1 0x40
+#define DI_0 0x00
+#define DI_1 0x80
+
+/* NMC93CSx6 EEPROM Instructions */
+#define EEPROM_READ 0x80
+
+/* NMC93CSx6 EEPROM Addresses */
+#define E_NODEID_0 0x02
+#define E_NODEID_1 0x03
+#define E_NODEID_2 0x04
+#define E_PORT_SELECT 0x14
+ #define E_PORT_BNC 0x00
+ #define E_PORT_DIX 0x01
+ #define E_PORT_TP 0x02
+ #define E_PORT_AUTO 0x03
+ #define E_PORT_FROM_EPROM 0x04
+#define E_PRODUCT_CFG 0x30
+
+
+/* Macro to slow down io between EEPROM clock transitions */
+#define eeprom_slow_io() do { int _i = 40; while(--_i > 0) { inb(0x80); }}while(0)
+
+/* Jumperless Configuration Register (BMPR19) */
+#define JUMPERLESS_CONFIG 19
+
+/* ID ROM registers, writing to them also resets some parts of chip */
+#define ID_ROM_0 24
+#define ID_ROM_7 31
+#define RESET ID_ROM_0
+
+/* This is the I/O address list to be probed when seeking the card */
+static unsigned int eth16i_portlist[] =
+ { 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300, 0 };
+
+static unsigned int eth32i_portlist[] =
+ { 0x1000, 0x2000, 0x3000, 0x4000, 0x5000, 0x6000, 0x7000, 0x8000,
+ 0x9000, 0xA000, 0xB000, 0xC000, 0xD000, 0xE000, 0xF000, 0 };
+
+/* This is the Interrupt lookup table for Eth16i card */
+static unsigned int eth16i_irqmap[] = { 9, 10, 5, 15, 0 };
+#define NUM_OF_ISA_IRQS 4
+
+/* This is the Interrupt lookup table for Eth32i card */
+static unsigned int eth32i_irqmap[] = { 3, 5, 7, 9, 10, 11, 12, 15, 0 };
+#define EISA_IRQ_REG 0xc89
+#define NUM_OF_EISA_IRQS 8
+
+static unsigned int eth16i_tx_buf_map[] = { 2048, 2048, 4096, 8192 };
+static unsigned int boot = 1;
+
+/* Use 0 for production, 1 for verification, >2 for debug */
+#ifndef ETH16I_DEBUG
+#define ETH16I_DEBUG 0
+#endif
+static unsigned int eth16i_debug = ETH16I_DEBUG;
+
+/* Information for each board */
+
+struct eth16i_local {
+ eth16i_stats_type stats;
+ unsigned char tx_started;
+ unsigned char tx_buf_busy;
+ unsigned short tx_queue; /* Number of packets in transmit buffer */
+ unsigned short tx_queue_len;
+ unsigned int tx_buf_size;
+ unsigned long open_time;
+ unsigned long tx_buffered_packets;
+ unsigned long col_16;
+};
+
+/* Function prototypes */
+
+extern int eth16i_probe(struct device *dev);
+
+static int eth16i_probe1(struct device *dev, int ioaddr);
+static int eth16i_check_signature(int ioaddr);
+static int eth16i_probe_port(int ioaddr);
+static void eth16i_set_port(int ioaddr, int porttype);
+static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l);
+static int eth16i_receive_probe_packet(int ioaddr);
+static int eth16i_get_irq(int ioaddr);
+static int eth16i_read_eeprom(int ioaddr, int offset);
+static int eth16i_read_eeprom_word(int ioaddr);
+static void eth16i_eeprom_cmd(int ioaddr, unsigned char command);
+static int eth16i_open(struct device *dev);
+static int eth16i_close(struct device *dev);
+static int eth16i_tx(struct sk_buff *skb, struct device *dev);
+static void eth16i_rx(struct device *dev);
+static void eth16i_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void eth16i_reset(struct device *dev);
+static void eth16i_skip_packet(struct device *dev);
+static void eth16i_multicast(struct device *dev);
+static void eth16i_select_regbank(unsigned char regbank, int ioaddr);
+static void eth16i_initialize(struct device *dev);
+
+#if 0
+static int eth16i_set_irq(struct device *dev);
+#endif
+
+#ifdef MODULE
+static ushort eth16i_parse_mediatype(const char* s);
+#endif
+
+static struct enet_statistics *eth16i_get_stats(struct device *dev);
+
+static char *cardname = "ICL EtherTeam 16i/32";
+
+#ifdef HAVE_DEVLIST
+
+/* Support for alternate probe manager */
+/struct netdev_entry eth16i_drv =
+ {"eth16i", eth16i_probe1, ETH16I_IO_EXTENT, eth16i_probe_list};
+
+#else /* Not HAVE_DEVLIST */
+
+__initfunc(int eth16i_probe(struct device *dev))
+{
+ int i;
+ int ioaddr;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if(eth16i_debug > 4)
+ printk(KERN_DEBUG "Probing started for %s\n", cardname);
+
+ if(base_addr > 0x1ff) /* Check only single location */
+ return eth16i_probe1(dev, base_addr);
+ else if(base_addr != 0) /* Don't probe at all */
+ return ENXIO;
+
+ /* Seek card from the ISA io address space */
+ for(i = 0; (ioaddr = eth16i_portlist[i]) ; i++) {
+ if(check_region(ioaddr, ETH16I_IO_EXTENT))
+ continue;
+ if(eth16i_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ /* Seek card from the EISA io address space */
+ for(i = 0; (ioaddr = eth32i_portlist[i]) ; i++) {
+ if(check_region(ioaddr, ETH16I_IO_EXTENT))
+ continue;
+ if(eth16i_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif /* Not HAVE_DEVLIST */
+
+__initfunc(static int eth16i_probe1(struct device *dev, int ioaddr))
+{
+ static unsigned version_printed = 0;
+ boot = 1; /* To inform initilization that we are in boot probe */
+
+ /*
+ The MB86985 chip has on register which holds information in which
+ io address the chip lies. First read this register and compare
+ it to our current io address and if match then this could
+ be our chip.
+ */
+
+ if(ioaddr < 0x1000) {
+
+ if(eth16i_portlist[(inb(ioaddr + JUMPERLESS_CONFIG) & 0x07)]
+ != ioaddr)
+ return -ENODEV;
+ }
+
+ /* Now we will go a bit deeper and try to find the chip's signature */
+
+ if(eth16i_check_signature(ioaddr) != 0)
+ return -ENODEV;
+
+ /*
+ Now it seems that we have found a ethernet chip in this particular
+ ioaddr. The MB86985 chip has this feature, that when you read a
+ certain register it will increase it's io base address to next
+ configurable slot. Now when we have found the chip, first thing is
+ to make sure that the chip's ioaddr will hold still here.
+ */
+
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(0x00, ioaddr + TRANSCEIVER_MODE_REG);
+
+ outb(0x00, ioaddr + RESET); /* Reset some parts of chip */
+ BITSET(ioaddr + CONFIG_REG_0, BIT(7)); /* Disable the data link */
+
+ if(dev == NULL)
+ dev = init_etherdev(0, 0);
+
+ if( (eth16i_debug & version_printed++) == 0)
+ printk(KERN_INFO "%s", version);
+
+ dev->base_addr = ioaddr;
+
+#if 0
+ if(dev->irq) {
+ if(eth16i_set_irq(dev)) {
+ dev->irq = eth16i_get_irq(ioaddr);
+ }
+
+ }
+ else {
+#endif
+
+ dev->irq = eth16i_get_irq(ioaddr);
+
+ /* Try to obtain interrupt vector */
+
+ if (request_irq(dev->irq, (void *)&eth16i_interrupt, 0, "eth16i", dev)) {
+ printk(KERN_WARNING "%s: %s at %#3x, but is unusable due conflicting IRQ %d.\n",
+ dev->name, cardname, ioaddr, dev->irq);
+ return -EAGAIN;
+ }
+
+#if 0
+ irq2dev_map[dev->irq] = dev;
+#endif
+
+ printk(KERN_INFO "%s: %s at %#3x, IRQ %d, ",
+ dev->name, cardname, ioaddr, dev->irq);
+
+ /* Let's grab the region */
+ request_region(ioaddr, ETH16I_IO_EXTENT, "eth16i");
+
+ /* Now we will have to lock the chip's io address */
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(0x38, ioaddr + TRANSCEIVER_MODE_REG);
+
+ eth16i_initialize(dev); /* Initialize rest of the chip's registers */
+
+ /* Now let's same some energy by shutting down the chip ;) */
+ BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
+
+ /* Initialize the device structure */
+ if(dev->priv == NULL) {
+ dev->priv = kmalloc(sizeof(struct eth16i_local), GFP_KERNEL);
+ if(dev->priv == NULL)
+ return -ENOMEM;
+ }
+
+ memset(dev->priv, 0, sizeof(struct eth16i_local));
+
+ dev->open = eth16i_open;
+ dev->stop = eth16i_close;
+ dev->hard_start_xmit = eth16i_tx;
+ dev->get_stats = eth16i_get_stats;
+ dev->set_multicast_list = &eth16i_multicast;
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ boot = 0;
+
+ return 0;
+}
+
+
+static void eth16i_initialize(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int i, node_w = 0;
+ unsigned char node_byte = 0;
+
+ /* Setup station address */
+ eth16i_select_regbank(NODE_ID_RB, ioaddr);
+ for(i = 0 ; i < 3 ; i++) {
+ unsigned short node_val = eth16i_read_eeprom(ioaddr, E_NODEID_0 + i);
+ ((unsigned short *)dev->dev_addr)[i] = ntohs(node_val);
+ }
+
+ for(i = 0; i < 6; i++) {
+ outb( ((unsigned char *)dev->dev_addr)[i], ioaddr + NODE_ID_0 + i);
+ if(boot) {
+ printk("%02x", inb(ioaddr + NODE_ID_0 + i));
+ if(i != 5)
+ printk(":");
+ }
+ }
+
+ /* Now we will set multicast addresses to accept none */
+ eth16i_select_regbank(HASH_TABLE_RB, ioaddr);
+ for(i = 0; i < 8; i++)
+ outb(0x00, ioaddr + HASH_TABLE_0 + i);
+
+ /*
+ Now let's disable the transmitter and receiver, set the buffer ram
+ cycle time, bus width and buffer data path width. Also we shall
+ set transmit buffer size and total buffer size.
+ */
+
+ eth16i_select_regbank(2, ioaddr);
+
+ node_byte = 0;
+ node_w = eth16i_read_eeprom(ioaddr, E_PRODUCT_CFG);
+
+ if( (node_w & 0xFF00) == 0x0800)
+ node_byte |= BUFFER_WIDTH_8;
+
+ node_byte |= SRAM_BS1;
+
+ if( (node_w & 0x00FF) == 64)
+ node_byte |= SRAM_BS0;
+
+ node_byte |= DLC_EN | SRAM_CYCLE_TIME_100NS | (ETH16I_TX_BUF_SIZE << 2);
+
+ outb(node_byte, ioaddr + CONFIG_REG_0);
+
+ /* We shall halt the transmitting, if 16 collisions are detected */
+ outb(HALT_ON_16, ioaddr + COL_16_REG);
+
+#ifdef MODULE
+ /* if_port already set by init_module() */
+#else
+ dev->if_port = (dev->mem_start < E_PORT_FROM_EPROM) ?
+ dev->mem_start : E_PORT_FROM_EPROM;
+#endif
+
+ /* Set interface port type */
+ if(boot) {
+ char *porttype[] = {"BNC", "DIX", "TP", "AUTO", "FROM_EPROM" };
+
+ switch(dev->if_port)
+ {
+
+ case E_PORT_FROM_EPROM:
+ dev->if_port = eth16i_read_eeprom(ioaddr, E_PORT_SELECT);
+ break;
+
+ case E_PORT_AUTO:
+ dev->if_port = eth16i_probe_port(ioaddr);
+ break;
+
+ case E_PORT_BNC:
+ case E_PORT_TP:
+ case E_PORT_DIX:
+ break;
+ }
+
+ printk(" %s interface.\n", porttype[dev->if_port]);
+
+ eth16i_set_port(ioaddr, dev->if_port);
+ }
+
+ /* Set Receive Mode to normal operation */
+ outb(MODE_2, ioaddr + RECEIVE_MODE_REG);
+}
+
+static int eth16i_probe_port(int ioaddr)
+{
+ int i;
+ int retcode;
+ unsigned char dummy_packet[64] = { 0 };
+
+ /* Powerup the chip */
+ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
+
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ eth16i_select_regbank(NODE_ID_RB, ioaddr);
+
+ for(i = 0; i < 6; i++) {
+ dummy_packet[i] = inb(ioaddr + NODE_ID_0 + i);
+ dummy_packet[i+6] = inb(ioaddr + NODE_ID_0 + i);
+ }
+
+ dummy_packet[12] = 0x00;
+ dummy_packet[13] = 0x04;
+
+ eth16i_select_regbank(2, ioaddr);
+
+ for(i = 0; i < 3; i++) {
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+ eth16i_set_port(ioaddr, i);
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Set port number %d\n", i);
+
+ retcode = eth16i_send_probe_packet(ioaddr, dummy_packet, 64);
+ if(retcode == 0) {
+ retcode = eth16i_receive_probe_packet(ioaddr);
+ if(retcode != -1) {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Eth16i interface port found at %d\n", i);
+ return i;
+ }
+ }
+ else {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "TRANSMIT_DONE timeout when probing interface port\n");
+ }
+ }
+
+ if( eth16i_debug > 1)
+ printk(KERN_DEBUG "Using default port\n");
+
+ return E_PORT_BNC;
+}
+
+static void eth16i_set_port(int ioaddr, int porttype)
+{
+ unsigned short temp = 0;
+
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(LOOPBACK_CONTROL, ioaddr + TRANSMIT_MODE_REG);
+
+ temp |= DIS_AUTO_PORT_SEL;
+
+ switch(porttype) {
+
+ case E_PORT_BNC :
+ temp |= AUI_SELECT;
+ break;
+
+ case E_PORT_TP :
+ break;
+
+ case E_PORT_DIX :
+ temp |= AUI_SELECT;
+ BITSET(ioaddr + TRANSMIT_MODE_REG, CONTROL_OUTPUT);
+ break;
+ }
+
+ outb(temp, ioaddr + TRANSCEIVER_MODE_REG);
+
+ if(eth16i_debug > 1) {
+ printk(KERN_DEBUG "TRANSMIT_MODE_REG = %x\n", inb(ioaddr + TRANSMIT_MODE_REG));
+ printk(KERN_DEBUG "TRANSCEIVER_MODE_REG = %x\n",
+ inb(ioaddr+TRANSCEIVER_MODE_REG));
+ }
+}
+
+static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l)
+{
+ int starttime;
+
+ outb(0xff, ioaddr + TX_STATUS_REG);
+
+ outw(l, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, (unsigned short *)b, (l + 1) >> 1);
+
+ starttime = jiffies;
+ outb(TX_START | 1, ioaddr + TRANSMIT_START_REG);
+
+ while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) {
+ if( (jiffies - starttime) > TX_TIMEOUT) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int eth16i_receive_probe_packet(int ioaddr)
+{
+ int starttime;
+
+ starttime = jiffies;
+
+ while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) {
+ if( (jiffies - starttime) > TX_TIMEOUT) {
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Timeout occured waiting transmit packet received\n");
+ starttime = jiffies;
+ while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) {
+ if( (jiffies - starttime) > TX_TIMEOUT) {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Timeout occured waiting receive packet\n");
+ return -1;
+ }
+ }
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "RECEIVE_PACKET\n");
+ return(0); /* Found receive packet */
+ }
+ }
+
+ if(eth16i_debug > 1) {
+ printk(KERN_DEBUG "TRANSMIT_PACKET_RECEIVED %x\n", inb(ioaddr + TX_STATUS_REG));
+ printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
+ }
+
+ return(0); /* Return success */
+}
+
+#if 0
+static int eth16i_set_irq(struct device* dev)
+{
+ const int ioaddr = dev->base_addr;
+ const int irq = dev->irq;
+ int i = 0;
+
+ if(ioaddr < 0x1000) {
+ while(eth16i_irqmap[i] && eth16i_irqmap[i] != irq)
+ i++;
+
+ if(i < NUM_OF_ISA_IRQS) {
+ u8 cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
+ cbyte = (cbyte & 0x3F) | (i << 6);
+ outb(cbyte, ioaddr + JUMPERLESS_CONFIG);
+ return 0;
+ }
+ }
+ else {
+ printk(KERN_NOTICE "%s: EISA Interrupt cannot be set. Use EISA Configuration utility.\n", dev->name);
+ }
+
+ return -1;
+
+}
+#endif
+
+static int eth16i_get_irq(int ioaddr)
+{
+ unsigned char cbyte;
+
+ if( ioaddr < 0x1000) {
+ cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
+ return( eth16i_irqmap[ ((cbyte & 0xC0) >> 6) ] );
+ } else { /* Oh..the card is EISA so method getting IRQ different */
+ unsigned short index = 0;
+ cbyte = inb(ioaddr + EISA_IRQ_REG);
+ while( (cbyte & 0x01) == 0) {
+ cbyte = cbyte >> 1;
+ index++;
+ }
+ return( eth32i_irqmap[ index ] );
+ }
+}
+
+static int eth16i_check_signature(int ioaddr)
+{
+ int i;
+ unsigned char creg[4] = { 0 };
+
+ for(i = 0; i < 4 ; i++) {
+
+ creg[i] = inb(ioaddr + TRANSMIT_MODE_REG + i);
+
+ if(eth16i_debug > 1)
+ printk("eth16i: read signature byte %x at %x\n",
+ creg[i],
+ ioaddr + TRANSMIT_MODE_REG + i);
+ }
+
+ creg[0] &= 0x0F; /* Mask collision cnr */
+ creg[2] &= 0x7F; /* Mask DCLEN bit */
+
+#if 0
+ /*
+ This was removed because the card was sometimes left to state
+ from which it couldn't be find anymore. If there is need
+ to more strict check still this have to be fixed.
+ */
+ if( ! ((creg[0] == 0x06) && (creg[1] == 0x41)) ) {
+ if(creg[1] != 0x42)
+ return -1;
+ }
+#endif
+
+ if( !((creg[2] == 0x36) && (creg[3] == 0xE0)) ) {
+ creg[2] &= 0x40;
+ creg[3] &= 0x03;
+
+ if( !((creg[2] == 0x40) && (creg[3] == 0x00)) )
+ return -1;
+ }
+
+ if(eth16i_read_eeprom(ioaddr, E_NODEID_0) != 0)
+ return -1;
+
+ if((eth16i_read_eeprom(ioaddr, E_NODEID_1) & 0xFF00) != 0x4B00)
+ return -1;
+
+ return 0;
+}
+
+static int eth16i_read_eeprom(int ioaddr, int offset)
+{
+ int data = 0;
+
+ eth16i_eeprom_cmd(ioaddr, EEPROM_READ | offset);
+ outb(CS_1, ioaddr + EEPROM_CTRL_REG);
+ data = eth16i_read_eeprom_word(ioaddr);
+ outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
+
+ return(data);
+}
+
+static int eth16i_read_eeprom_word(int ioaddr)
+{
+ int i;
+ int data = 0;
+
+ for(i = 16; i > 0; i--) {
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ data = (data << 1) |
+ ((inb(ioaddr + EEPROM_DATA_REG) & DI_1) ? 1 : 0);
+
+ eeprom_slow_io();
+ }
+
+ return(data);
+}
+
+static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
+{
+ int i;
+
+ outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ outb(DI_0, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ outb(DI_1, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+
+ for(i = 7; i >= 0; i--) {
+ short cmd = ( (command & (1 << i)) ? DI_1 : DI_0 );
+ outb(cmd, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ }
+}
+
+static int eth16i_open(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Powerup the chip */
+ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
+
+ /* Initialize the chip */
+ eth16i_initialize(dev);
+
+ /* Set the transmit buffer size */
+ lp->tx_buf_size = eth16i_tx_buf_map[ETH16I_TX_BUF_SIZE & 0x03];
+
+ if(eth16i_debug > 0)
+ printk(KERN_DEBUG "%s: transmit buffer size %d\n",
+ dev->name, lp->tx_buf_size);
+
+ /* Now enable Transmitter and Receiver sections */
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ /* Now switch to register bank 2, for run time operation */
+ eth16i_select_regbank(2, ioaddr);
+
+ lp->open_time = jiffies;
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Turn on interrupts*/
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int eth16i_close(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ eth16i_reset(dev);
+
+ /* Turn off interrupts*/
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ lp->open_time = 0;
+
+ /* Disable transmit and receive */
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ /* Reset the chip */
+ /* outb(0xff, ioaddr + RESET); */
+ /* outw(0xffff, ioaddr + TX_STATUS_REG); */
+
+ outb(0x00, ioaddr + CONFIG_REG_1);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int eth16i_tx(struct sk_buff *skb, struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int status = 0;
+
+ if(dev->tbusy) {
+
+ /*
+ If we get here, some higher level has decided that
+ we are broken. There should really be a "kick me"
+ function call instead.
+ */
+
+ int tickssofar = jiffies - dev->trans_start;
+ if(tickssofar < TX_TIMEOUT)
+ return 1;
+
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ printk(KERN_WARNING "%s: transmit timed out with status %04x, %s ?\n",
+ dev->name,
+ inw(ioaddr + TX_STATUS_REG),
+ (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
+ "IRQ conflict" : "network cable problem");
+
+ dev->trans_start = jiffies;
+
+ /* Let's dump all registers */
+ if(eth16i_debug > 0) {
+ printk(KERN_DEBUG "%s: timeout: %02x %02x %02x %02x %02x %02x %02x %02x.\n",
+ dev->name, inb(ioaddr + 0),
+ inb(ioaddr + 1), inb(ioaddr + 2),
+ inb(ioaddr + 3), inb(ioaddr + 4),
+ inb(ioaddr + 5),
+ inb(ioaddr + 6), inb(ioaddr + 7));
+
+ printk(KERN_DEBUG "%s: transmit start reg: %02x. collision reg %02x\n",
+ dev->name, inb(ioaddr + TRANSMIT_START_REG),
+ inb(ioaddr + COL_16_REG));
+
+ printk(KERN_DEBUG "lp->tx_queue = %d\n", lp->tx_queue);
+ printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len);
+ printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started);
+
+ }
+
+ lp->stats.tx_errors++;
+
+ eth16i_reset(dev);
+
+ dev->trans_start = jiffies;
+
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ }
+
+ /*
+ If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself
+ */
+
+ if(skb == NULL) {
+#if LINUX_VERSION_CODE < 0x020100
+ dev_tint(dev);
+#endif
+ if(eth16i_debug > 0)
+ printk(KERN_WARNING "%s: Missed tx-done interrupt.\n", dev->name);
+ return 0;
+ }
+
+ /* Block a timer based transmitter from overlapping.
+ This could better be done with atomic_swap(1, dev->tbusy),
+ but set_bit() works as well. */
+
+ set_bit(0, (void *)&lp->tx_buf_busy);
+
+ /* Turn off TX interrupts */
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ if(test_and_set_bit(0, (void *)&dev->tbusy) != 0) {
+ printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
+ status = -1;
+ }
+ else {
+ ushort length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ if( (length + 2) > (lp->tx_buf_size - lp->tx_queue_len)) {
+ if(eth16i_debug > 0)
+ printk(KERN_WARNING "%s: Transmit buffer full.\n", dev->name);
+ }
+ else {
+ outw(length, ioaddr + DATAPORT);
+
+ if( ioaddr < 0x1000 )
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+ else {
+ unsigned char frag = length % 4;
+
+ outsl(ioaddr + DATAPORT, buf, length >> 2);
+
+ if( frag != 0 ) {
+ outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC)), 1);
+ if( frag == 3 )
+ outsw(ioaddr + DATAPORT,
+ (buf + (length & 0xFFFC) + 2), 1);
+ }
+ }
+
+ lp->tx_buffered_packets++;
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+
+ }
+
+ lp->tx_buf_busy = 0;
+
+ if(lp->tx_started == 0) {
+ /* If the transmitter is idle..always trigger a transmit */
+ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ dev->tbusy = 0;
+ }
+ else if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
+ /* There is still more room for one more packet in tx buffer */
+ dev->tbusy = 0;
+ }
+
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ /* Turn TX interrupts back on */
+ /* outb(TX_INTR_DONE | TX_INTR_16_COL, ioaddr + TX_INTR_REG); */
+ status = 0;
+ }
+
+#if LINUX_VERSION_CODE >= 0x020100
+ dev_kfree_skb(skb);
+#else
+ dev_kfree_skb(skb, FREE_WRITE);
+#endif
+
+ return status;
+}
+
+static void eth16i_rx(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = MAX_RX_LOOP;
+
+ /* Loop until all packets have been read */
+ while( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) {
+
+ /* Read status byte from receive buffer */
+ ushort status = inw(ioaddr + DATAPORT);
+
+ /* Get the size of the packet from receive buffer */
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+
+ if(eth16i_debug > 4)
+ printk(KERN_DEBUG "%s: Receiving packet mode %02x status %04x.\n",
+ dev->name,
+ inb(ioaddr + RECEIVE_MODE_REG), status);
+
+ if( !(status & PKT_GOOD) ) {
+ lp->stats.rx_errors++;
+
+ if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) {
+ lp->stats.rx_length_errors++;
+ eth16i_reset(dev);
+ return;
+ }
+ else {
+ eth16i_skip_packet(dev);
+ lp->stats.rx_dropped++;
+ }
+ }
+ else { /* Ok so now we should have a good packet */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 3);
+ if( skb == NULL ) {
+ printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n",
+ dev->name, pkt_len);
+ eth16i_skip_packet(dev);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ /*
+ Now let's get the packet out of buffer.
+ size is (pkt_len + 1) >> 1, cause we are now reading words
+ and it have to be even aligned.
+ */
+
+ if(ioaddr < 0x1000)
+ insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
+ (pkt_len + 1) >> 1);
+ else {
+ unsigned char *buf = skb_put(skb, pkt_len);
+ unsigned char frag = pkt_len % 4;
+
+ insl(ioaddr + DATAPORT, buf, pkt_len >> 2);
+
+ if(frag != 0) {
+ unsigned short rest[2];
+ rest[0] = inw( ioaddr + DATAPORT );
+ if(frag == 3)
+ rest[1] = inw( ioaddr + DATAPORT );
+
+ memcpy(buf + (pkt_len & 0xfffc), (char *)rest, frag);
+ }
+ }
+
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+
+ if( eth16i_debug > 5 ) {
+ int i;
+ printk(KERN_DEBUG "%s: Received packet of length %d.\n",
+ dev->name, pkt_len);
+ for(i = 0; i < 14; i++)
+ printk(KERN_DEBUG " %02x", skb->data[i]);
+ printk(KERN_DEBUG ".\n");
+ }
+
+ } /* else */
+
+ if(--boguscount <= 0)
+ break;
+
+ } /* while */
+
+#if 0
+ {
+ int i;
+
+ for(i = 0; i < 20; i++) {
+ if( (inb(ioaddr+RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) ==
+ RX_BUFFER_EMPTY)
+ break;
+ inw(ioaddr + DATAPORT);
+ outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG);
+ }
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "%s: Flushed receive buffer.\n", dev->name);
+ }
+#endif
+
+ return;
+}
+
+static void eth16i_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = dev_id;
+ struct eth16i_local *lp;
+ int ioaddr = 0,
+ status;
+
+ if(dev == NULL) {
+ printk(KERN_WARNING "eth16i_interrupt(): irq %d for unknown device. \n", irq);
+ return;
+ }
+
+ /* Turn off all interrupts from adapter */
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ set_bit(0, (void *)&dev->tbusy); /* Set the device busy so that */
+ /* eth16i_tx wont be called */
+
+ if(dev->interrupt)
+ printk(KERN_WARNING "%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct eth16i_local *)dev->priv;
+ status = inw(ioaddr + TX_STATUS_REG); /* Get the status */
+ outw(status, ioaddr + TX_STATUS_REG); /* Clear status bits */
+
+ if(eth16i_debug > 3)
+ printk(KERN_DEBUG "%s: Interrupt with status %04x.\n", dev->name, status);
+
+ if( status & 0x7f00 ) {
+
+ lp->stats.rx_errors++;
+
+ if(status & (BUS_RD_ERR << 8) )
+ printk(KERN_WARNING "%s: Bus read error.\n",dev->name);
+ if(status & (SHORT_PKT_ERR << 8) ) lp->stats.rx_length_errors++;
+ if(status & (ALIGN_ERR << 8) ) lp->stats.rx_frame_errors++;
+ if(status & (CRC_ERR << 8) ) lp->stats.rx_crc_errors++;
+ if(status & (RX_BUF_OVERFLOW << 8) ) lp->stats.rx_over_errors++;
+ }
+ if( status & 0x001a) {
+
+ lp->stats.tx_errors++;
+
+ if(status & CR_LOST) lp->stats.tx_carrier_errors++;
+ if(status & TX_JABBER_ERR) lp->stats.tx_window_errors++;
+
+#if 0
+ if(status & COLLISION) {
+ lp->stats.collisions +=
+ ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4);
+ }
+#endif
+ if(status & COLLISIONS_16) {
+ if(lp->col_16 < MAX_COL_16) {
+ lp->col_16++;
+ lp->stats.collisions++;
+ /* Resume transmitting, skip failed packet */
+ outb(0x02, ioaddr + COL_16_REG);
+ }
+ else {
+ printk(KERN_WARNING "%s: bailing out due to many consecutive 16-in-a-row collisions. Network cable problem?\n", dev->name);
+ }
+ }
+ }
+
+ if( status & 0x00ff ) { /* Let's check the transmit status reg */
+
+ if(status & TX_DONE) { /* The transmit has been done */
+ lp->stats.tx_packets = lp->tx_buffered_packets;
+ lp->col_16 = 0;
+
+ if(lp->tx_queue) { /* Is there still packets ? */
+ /* There was packet(s) so start transmitting and write also
+ how many packets there is to be sended */
+ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->tx_started = 1;
+ dev->trans_start = jiffies;
+ mark_bh(NET_BH);
+ }
+ else {
+ lp->tx_started = 0;
+ mark_bh(NET_BH);
+ }
+ }
+ }
+
+ if( ( status & 0x8000 ) ||
+ ( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) ) {
+ eth16i_rx(dev); /* We have packet in receive buffer */
+ }
+
+ dev->interrupt = 0;
+
+ /* Turn interrupts back on */
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
+ /* There is still more room for one more packet in tx buffer */
+ dev->tbusy = 0;
+ }
+
+ return;
+}
+
+static void eth16i_skip_packet(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+
+ outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG);
+ while( inb( ioaddr + FILTER_SELF_RX_REG ) != 0);
+}
+
+static void eth16i_reset(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "%s: Resetting device.\n", dev->name);
+
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+ outw(0xffff, ioaddr + TX_STATUS_REG);
+ eth16i_select_regbank(2, ioaddr);
+
+ lp->tx_started = 0;
+ lp->tx_buf_busy = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ dev->interrupt = 0;
+ dev->start = 1;
+ dev->tbusy = 0;
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+}
+
+static void eth16i_multicast(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ {
+ dev->flags|=IFF_PROMISC; /* Must do this */
+ outb(3, ioaddr + RECEIVE_MODE_REG);
+ } else {
+ outb(2, ioaddr + RECEIVE_MODE_REG);
+ }
+}
+
+static struct enet_statistics *eth16i_get_stats(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+static void eth16i_select_regbank(unsigned char banknbr, int ioaddr)
+{
+ unsigned char data;
+
+ data = inb(ioaddr + CONFIG_REG_1);
+ outb( ((data & 0xF3) | ( (banknbr & 0x03) << 2)), ioaddr + CONFIG_REG_1);
+}
+
+#ifdef MODULE
+
+static ushort eth16i_parse_mediatype(const char* s)
+{
+ if(!s)
+ return E_PORT_FROM_EPROM;
+
+ if (!strncmp(s, "bnc", 3))
+ return E_PORT_BNC;
+ else if (!strncmp(s, "tp", 2))
+ return E_PORT_TP;
+ else if (!strncmp(s, "dix", 3))
+ return E_PORT_DIX;
+ else if (!strncmp(s, "auto", 4))
+ return E_PORT_AUTO;
+ else
+ return E_PORT_FROM_EPROM;
+}
+
+#define MAX_ETH16I_CARDS 4 /* Max number of Eth16i cards per module */
+#define NAMELEN 8 /* number of chars for storing dev->name */
+
+static char namelist[NAMELEN * MAX_ETH16I_CARDS] = { 0, };
+static struct device dev_eth16i[MAX_ETH16I_CARDS] = {
+ {
+ NULL,
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int ioaddr[MAX_ETH16I_CARDS] = { 0, };
+#if 0
+static int irq[MAX_ETH16I_CARDS] = { 0, };
+#endif
+static char* mediatype[MAX_ETH16I_CARDS] = { 0, };
+static int debug = -1;
+
+#if (LINUX_VERSION_CODE >= 0x20115)
+MODULE_AUTHOR("Mika Kuoppala <miku@iki.fi>");
+MODULE_DESCRIPTION("ICL EtherTeam 16i/32 driver");
+
+MODULE_PARM(ioaddr, "1-" __MODULE_STRING(MAX_ETH16I_CARDS) "i");
+MODULE_PARM_DESC(ioaddr, "eth16i io base address");
+
+#if 0
+MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_ETH16I_CARDS) "i");
+MODULE_PARM_DESC(irq, "eth16i interrupt request number");
+#endif
+
+MODULE_PARM(mediatype, "1-" __MODULE_STRING(MAX_ETH16I_CARDS) "s");
+MODULE_PARM_DESC(mediatype, "eth16i interfaceport mediatype");
+
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "eth16i debug level (0-4)");
+#endif
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++)
+ {
+ struct device *dev = &dev_eth16i[this_dev];
+
+ dev->name = namelist + (NAMELEN*this_dev);
+ dev->irq = 0; /* irq[this_dev]; */
+ dev->base_addr = ioaddr[this_dev];
+ dev->init = eth16i_probe;
+
+ if(debug != -1)
+ eth16i_debug = debug;
+
+ if(eth16i_debug > 1)
+ printk(KERN_NOTICE "eth16i(%d): interface type %s\n", this_dev, mediatype[this_dev] ? mediatype[this_dev] : "none" );
+
+ dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]);
+
+ if(ioaddr[this_dev] == 0)
+ {
+ if(this_dev != 0) break; /* Only autoprobe 1st one */
+
+ printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+
+ if(register_netdev(dev) != 0)
+ {
+ printk(KERN_WARNING "eth16i.c No Eth16i card found (i/o = 0x%x).\n",
+ ioaddr[this_dev]);
+
+ if(found != 0) return 0;
+ return -ENXIO;
+ }
+
+ found++;
+ }
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++)
+ {
+ struct device* dev = &dev_eth16i[this_dev];
+
+ if(dev->priv != NULL)
+ {
+ unregister_netdev(dev);
+ kfree(dev->priv);
+ dev->priv = NULL;
+
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ETH16I_IO_EXTENT);
+
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c eth16i.c"
+ * alt-compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict -prototypes -O6 -c eth16i.c"
+ * tab-width: 8
+ * c-basic-offset: 8
+ * c-indent-level: 8
+ * End:
+ */
+
+/* End of file eth16i.c */
diff --git a/linux/src/drivers/net/eth82586.h b/linux/src/drivers/net/eth82586.h
new file mode 100644
index 0000000..c2178ff
--- /dev/null
+++ b/linux/src/drivers/net/eth82586.h
@@ -0,0 +1,172 @@
+/*
+ * eth82586.h: Intel EtherExpress defines
+ *
+ * Written 1995 by John Sullivan
+ * See eexpress.c for further details
+ * documentation and usage to do.
+ */
+
+/*
+ * EtherExpress card register addresses
+ * as offsets from the base IO region (dev->base_addr)
+ */
+
+#define DATAPORT 0x0000
+#define WRITE_PTR 0x0002
+#define READ_PTR 0x0004
+#define SIGNAL_CA 0x0006
+#define SET_IRQ 0x0007
+#define SM_PTR 0x0008
+#define MEM_Ctrl 0x000b
+#define MEM_Page_Ctrl 0x000c
+#define Config 0x000d
+#define EEPROM_Ctrl 0x000e
+#define ID_PORT 0x000f
+
+/*
+ * offset to shadowed memory, 0 <= x <= 31. We don't use this yet,
+ * but may in the future. Is shadow memory access any faster than
+ * dataport access?
+ */
+#define SM_ADDR(x) (0x4000+((x&0x10)<<10)+(x&0xf))
+
+/* Always mirrors eexp-memory at 0x0008-0x000f */
+#define SCB_STATUS 0xc008
+#define SCB_CMD 0xc00a
+#define SCB_CBL 0xc00c
+#define SCB_RFA 0xc00e
+
+
+
+/*
+ * card register defines
+ */
+
+/* SET_IRQ */
+#define SIRQ_en 0x08
+#define SIRQ_dis 0x00
+
+/* Config */
+#define set_loopback outb(inb(ioaddr+Config)|0x02,ioaddr+Config)
+#define clear_loopback outb(inb(ioaddr+Config)&0xfd,ioaddr+Config)
+
+/* EEPROM_Ctrl */
+#define EC_Clk 0x01
+#define EC_CS 0x02
+#define EC_Wr 0x04
+#define EC_Rd 0x08
+#define ASIC_RST 0x40
+#define i586_RST 0x80
+
+#define eeprom_delay() { int _i = 40; while (--_i>0) { __SLOW_DOWN_IO; }}
+
+/*
+ * i82586 Memory Configuration
+ */
+
+/* (System Configuration Pointer) System start up block, read after 586_RST */
+#define SCP_START 0xfff6
+
+
+/* Intermediate System Configuration Pointer */
+#define ISCP_START 0x0000
+/* System Command Block */
+#define SCB_START 0x0008
+
+/*
+ * Start of buffer region. If we have 64k memory, eexp_hw_probe() may raise
+ * NUM_TX_BUFS. RX_BUF_END is set to the end of memory, and all space between
+ * the transmit buffer region and end of memory used for as many receive buffers
+ * as we can fit. See eexp_hw_[(rx)(tx)]init().
+ */
+#define TX_BUF_START 0x0100
+#define TX_BUF_SIZE ((24+ETH_FRAME_LEN+31)&~0x1f)
+#define RX_BUF_SIZE ((32+ETH_FRAME_LEN+31)&~0x1f)
+
+
+
+/*
+ * SCB defines
+ */
+
+/* these functions take the SCB status word and test the relevant status bit */
+#define SCB_complete(s) ((s&0x8000)!=0)
+#define SCB_rxdframe(s) ((s&0x4000)!=0)
+#define SCB_CUdead(s) ((s&0x2000)!=0)
+#define SCB_RUdead(s) ((s&0x1000)!=0)
+#define SCB_ack(s) (s & 0xf000)
+
+/* Command unit status: 0=idle, 1=suspended, 2=active */
+#define SCB_CUstat(s) ((s&0x0300)>>8)
+
+/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
+#define SCB_RUstat(s) ((s&0x0070)>>4)
+
+/* SCB commands */
+#define SCB_CUnop 0x0000
+#define SCB_CUstart 0x0100
+#define SCB_CUresume 0x0200
+#define SCB_CUsuspend 0x0300
+#define SCB_CUabort 0x0400
+
+/* ? */
+#define SCB_resetchip 0x0080
+
+#define SCB_RUnop 0x0000
+#define SCB_RUstart 0x0010
+#define SCB_RUresume 0x0020
+#define SCB_RUsuspend 0x0030
+#define SCB_RUabort 0x0040
+
+
+/*
+ * Command block defines
+ */
+
+#define Stat_Done(s) ((s&0x8000)!=0)
+#define Stat_Busy(s) ((s&0x4000)!=0)
+#define Stat_OK(s) ((s&0x2000)!=0)
+#define Stat_Abort(s) ((s&0x1000)!=0)
+#define Stat_STFail ((s&0x0800)!=0)
+#define Stat_TNoCar(s) ((s&0x0400)!=0)
+#define Stat_TNoCTS(s) ((s&0x0200)!=0)
+#define Stat_TNoDMA(s) ((s&0x0100)!=0)
+#define Stat_TDefer(s) ((s&0x0080)!=0)
+#define Stat_TColl(s) ((s&0x0040)!=0)
+#define Stat_TXColl(s) ((s&0x0020)!=0)
+#define Stat_NoColl(s) (s&0x000f)
+
+/* Cmd_END will end AFTER the command if this is the first
+ * command block after an SCB_CUstart, but BEFORE the command
+ * for all subsequent commands. Best strategy is to place
+ * Cmd_INT on the last command in the sequence, followed by a
+ * dummy Cmd_Nop with Cmd_END after this.
+ */
+#define Cmd_END 0x8000
+#define Cmd_SUS 0x4000
+#define Cmd_INT 0x2000
+
+#define Cmd_Nop 0x0000
+#define Cmd_SetAddr 0x0001
+#define Cmd_Config 0x0002
+#define Cmd_MCast 0x0003
+#define Cmd_Xmit 0x0004
+#define Cmd_TDR 0x0005
+#define Cmd_Dump 0x0006
+#define Cmd_Diag 0x0007
+
+
+/*
+ * Frame Descriptor (Receive block) defines
+ */
+
+#define FD_Done(s) ((s&0x8000)!=0)
+#define FD_Busy(s) ((s&0x4000)!=0)
+#define FD_OK(s) ((s&0x2000)!=0)
+
+#define FD_CRC(s) ((s&0x0800)!=0)
+#define FD_Align(s) ((s&0x0400)!=0)
+#define FD_Resrc(s) ((s&0x0200)!=0)
+#define FD_DMA(s) ((s&0x0100)!=0)
+#define FD_Short(s) ((s&0x0080)!=0)
+#define FD_NoEOF(s) ((s&0x0040)!=0)
diff --git a/linux/src/drivers/net/ewrk3.c b/linux/src/drivers/net/ewrk3.c
new file mode 100644
index 0000000..07b0f13
--- /dev/null
+++ b/linux/src/drivers/net/ewrk3.c
@@ -0,0 +1,1920 @@
+/* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux.
+
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of
+ the GNU Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of EtherWORKS ethernet cards:
+
+ DE203 Turbo (BNC)
+ DE204 Turbo (TP)
+ DE205 Turbo (TP BNC)
+
+ The driver has been tested on a relatively busy network using the DE205
+ card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s
+ (7.8Mb/s) to a DECstation 5000/200.
+
+ The author may be reached at davies@maniac.ultranet.com.
+
+ =========================================================================
+ This driver has been written substantially from scratch, although its
+ inheritance of style and stack interface from 'depca.c' and in turn from
+ Donald Becker's 'lance.c' should be obvious.
+
+ The DE203/4/5 boards all use a new proprietary chip in place of the
+ LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422).
+ Use the depca.c driver in the standard distribution for the LANCE based
+ cards from DIGITAL; this driver will not work with them.
+
+ The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O
+ only makes all the card accesses through I/O transactions and no high
+ (shared) memory is used. This mode provides a >48% performance penalty
+ and is deprecated in this driver, although allowed to provide initial
+ setup when hardstrapped.
+
+ The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is
+ no point in using any mode other than the 2kB mode - their performances
+ are virtually identical, although the driver has been tested in the 2kB
+ and 32kB modes. I would suggest you uncomment the line:
+
+ FORCE_2K_MODE;
+
+ to allow the driver to configure the card as a 2kB card at your current
+ base address, thus leaving more room to clutter your system box with
+ other memory hungry boards.
+
+ As many ISA and EISA cards can be supported under this driver as you
+ wish, limited primarily by the available IRQ lines, rather than by the
+ available I/O addresses (24 ISA, 16 EISA). I have checked different
+ configurations of multiple depca cards and ewrk3 cards and have not
+ found a problem yet (provided you have at least depca.c v0.38) ...
+
+ The board IRQ setting must be at an unused IRQ which is auto-probed
+ using Donald Becker's autoprobe routines. All these cards are at
+ {5,10,11,15}.
+
+ No 16MB memory limitation should exist with this driver as DMA is not
+ used and the common memory area is in low memory on the network card (my
+ current system has 20MB and I've not had problems yet).
+
+ The ability to load this driver as a loadable module has been included
+ and used extensively during the driver development (to save those long
+ reboot sequences). To utilise this ability, you have to do 8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) edit the source code near line 1898 to reflect the I/O address and
+ IRQ you're using.
+ 3) compile ewrk3.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the ewrk3 configuration turned off and reboot.
+ 5) insmod ewrk3.o
+ [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y]
+ 6) run the net startup bits for your new eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod ewrk3'.
+
+ Promiscuous mode has been turned off in this driver, but all the
+ multicast address bits have been turned on. This improved the send
+ performance on a busy network by about 13%.
+
+ Ioctl's have now been provided (primarily because I wanted to grab some
+ packet size statistics). They are patterned after 'plipconfig.c' from a
+ suggestion by Alan Cox. Using these ioctls, you can enable promiscuous
+ mode, add/delete multicast addresses, change the hardware address, get
+ packet size distribution statistics and muck around with the control and
+ status register. I'll add others if and when the need arises.
+
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 26-aug-94 Initial writing. ALPHA code release.
+ 0.11 31-aug-94 Fixed: 2k mode memory base calc.,
+ LeMAC version calc.,
+ IRQ vector assignments during autoprobe.
+ 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card.
+ Fixed up MCA hash table algorithm.
+ 0.20 4-sep-94 Added IOCTL functionality.
+ 0.21 14-sep-94 Added I/O mode.
+ 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0.
+ 0.22 16-sep-94 Added more IOCTLs & tidied up.
+ 0.23 21-sep-94 Added transmit cut through.
+ 0.24 31-oct-94 Added uid checks in some ioctls.
+ 0.30 1-nov-94 BETA code release.
+ 0.31 5-dec-94 Added check/allocate region code.
+ 0.32 16-jan-95 Broadcast packet fix.
+ 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ 0.40 27-Dec-95 Rationalise MODULE and autoprobe code.
+ Rewrite for portability & updated.
+ ALPHA support from <jestabro@amt.tay1.dec.com>
+ Added verify_area() calls in ewrk3_ioctl() from
+ suggestion by <heiko@colossus.escape.de>.
+ Add new multicasting code.
+ 0.41 20-Jan-96 Fix IRQ set up problem reported by
+ <kenneth@bbs.sas.ntu.ac.sg>.
+ 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
+
+ =========================================================================
+*/
+
+static const char *version = "ewrk3.c:v0.43 96/8/16 davies@maniac.ultranet.com\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/segment.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+
+#include "ewrk3.h"
+
+#ifdef EWRK3_DEBUG
+static int ewrk3_debug = EWRK3_DEBUG;
+#else
+static int ewrk3_debug = 1;
+#endif
+
+#define EWRK3_NDA 0xffe0 /* No Device Address */
+
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+#ifndef EWRK3_SIGNATURE
+#define EWRK3_SIGNATURE {"DE203","DE204","DE205",""}
+#define EWRK3_STRLEN 8
+#endif
+
+#ifndef EWRK3_RAM_BASE_ADDRESSES
+#define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000}
+#endif
+
+/*
+** Sets up the I/O area for the autoprobe.
+*/
+#define EWRK3_IO_BASE 0x100 /* Start address for probe search */
+#define EWRK3_IOP_INC 0x20 /* I/O address increment */
+#define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */
+
+#ifndef MAX_NUM_EWRK3S
+#define MAX_NUM_EWRK3S 21
+#endif
+
+#ifndef EWRK3_EISA_IO_PORTS
+#define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#endif
+
+#ifndef MAX_EISA_SLOTS
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+#endif
+
+#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
+#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
+
+#define QUEUE_PKT_TIMEOUT (1*HZ) /* Jiffies */
+
+/*
+** EtherWORKS 3 shared memory window sizes
+*/
+#define IO_ONLY 0x00
+#define SHMEM_2K 0x800
+#define SHMEM_32K 0x8000
+#define SHMEM_64K 0x10000
+
+/*
+** EtherWORKS 3 IRQ ENABLE/DISABLE
+*/
+#define ENABLE_IRQs { \
+ icr |= lp->irq_mask;\
+ outb(icr, EWRK3_ICR); /* Enable the IRQs */\
+}
+
+#define DISABLE_IRQs { \
+ icr = inb(EWRK3_ICR);\
+ icr &= ~lp->irq_mask;\
+ outb(icr, EWRK3_ICR); /* Disable the IRQs */\
+}
+
+/*
+** EtherWORKS 3 START/STOP
+*/
+#define START_EWRK3 { \
+ csr = inb(EWRK3_CSR);\
+ csr &= ~(CSR_TXD|CSR_RXD);\
+ outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\
+}
+
+#define STOP_EWRK3 { \
+ csr = (CSR_TXD|CSR_RXD);\
+ outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\
+}
+
+/*
+** The EtherWORKS 3 private structure
+*/
+#define EWRK3_PKT_STAT_SZ 16
+#define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase EWRK3_PKT_STAT_SZ */
+
+struct ewrk3_private {
+ char adapter_name[80]; /* Name exported to /proc/ioports */
+ u_long shmem_base; /* Shared memory start address */
+ u_long shmem_length; /* Shared memory window length */
+ struct enet_statistics stats; /* Public stats */
+ struct {
+ u32 bins[EWRK3_PKT_STAT_SZ]; /* Private stats counters */
+ u32 unicast;
+ u32 multicast;
+ u32 broadcast;
+ u32 excessive_collisions;
+ u32 tx_underruns;
+ u32 excessive_underruns;
+ } pktStats;
+ u_char irq_mask; /* Adapter IRQ mask bits */
+ u_char mPage; /* Maximum 2kB Page number */
+ u_char lemac; /* Chip rev. level */
+ u_char hard_strapped; /* Don't allow a full open */
+ u_char lock; /* Lock the page register */
+ u_char txc; /* Transmit cut through */
+ u_char *mctbl; /* Pointer to the multicast table */
+};
+
+/*
+** Force the EtherWORKS 3 card to be in 2kB MODE
+*/
+#define FORCE_2K_MODE { \
+ shmem_length = SHMEM_2K;\
+ outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\
+}
+
+/*
+** Public Functions
+*/
+static int ewrk3_open(struct device *dev);
+static int ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev);
+static void ewrk3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int ewrk3_close(struct device *dev);
+static struct enet_statistics *ewrk3_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+static int ewrk3_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+
+/*
+** Private functions
+*/
+static int ewrk3_hw_init(struct device *dev, u_long iobase);
+static void ewrk3_init(struct device *dev);
+static int ewrk3_rx(struct device *dev);
+static int ewrk3_tx(struct device *dev);
+
+static void EthwrkSignature(char * name, char *eeprom_image);
+static int DevicePresent(u_long iobase);
+static void SetMulticastFilter(struct device *dev);
+static int EISA_signature(char *name, s32 eisa_id);
+
+static int Read_EEPROM(u_long iobase, u_char eaddr);
+static int Write_EEPROM(short data, u_long iobase, u_char eaddr);
+static u_char get_hw_addr (struct device *dev, u_char *eeprom_image, char chipType);
+
+static void isa_probe(struct device *dev, u_long iobase);
+static void eisa_probe(struct device *dev, u_long iobase);
+static struct device *alloc_device(struct device *dev, u_long iobase);
+static int ewrk3_dev_index(char *s);
+static struct device *insert_device(struct device *dev, u_long iobase, int (*init)(struct device *));
+
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static int autoprobed = 1, loading_module = 1;
+
+# else
+static u_char irq[] = {5,0,10,3,11,9,15,12};
+static int autoprobed = 0, loading_module = 0;
+
+#endif /* MODULE */
+
+static char name[EWRK3_STRLEN + 1];
+static int num_ewrk3s = 0, num_eth = 0;
+
+/*
+** Miscellaneous defines...
+*/
+#define INIT_EWRK3 {\
+ outb(EEPROM_INIT, EWRK3_IOPR);\
+ udelay(1000);\
+}
+
+
+
+
+int ewrk3_probe(struct device *dev)
+{
+ int tmp = num_ewrk3s, status = -ENODEV;
+ u_long iobase = dev->base_addr;
+
+ if ((iobase == 0) && loading_module){
+ printk("Autoprobing is not supported when loading a module based driver.\n");
+ status = -EIO;
+ } else { /* First probe for the Ethernet */
+ /* Address PROM pattern */
+ isa_probe(dev, iobase);
+ eisa_probe(dev, iobase);
+
+ if ((tmp == num_ewrk3s) && (iobase != 0) && loading_module) {
+ printk("%s: ewrk3_probe() cannot find device at 0x%04lx.\n", dev->name,
+ iobase);
+ }
+
+ /*
+ ** Walk the device list to check that at least one device
+ ** initialised OK
+ */
+ for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
+
+ if (dev->priv) status = 0;
+ if (iobase == 0) autoprobed = 1;
+ }
+
+ return status;
+}
+
+static int
+ewrk3_hw_init(struct device *dev, u_long iobase)
+{
+ struct ewrk3_private *lp;
+ int i, status=0;
+ u_long mem_start, shmem_length;
+ u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0;
+ u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0;
+
+ /*
+ ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot.
+ ** This also disables the EISA_ENABLE bit in the EISA Control Register.
+ */
+ if (iobase > 0x400) eisa_cr = inb(EISA_CR);
+ INIT_EWRK3;
+
+ nicsr = inb(EWRK3_CSR);
+
+ icr = inb(EWRK3_ICR);
+ icr &= 0x70;
+ outb(icr, EWRK3_ICR); /* Disable all the IRQs */
+
+ if (nicsr == (CSR_TXD|CSR_RXD)) {
+
+ /* Check that the EEPROM is alive and well and not living on Pluto... */
+ for (chksum=0, i=0; i<EEPROM_MAX; i+=2) {
+ union {
+ short val;
+ char c[2];
+ } tmp;
+
+ tmp.val = (short)Read_EEPROM(iobase, (i>>1));
+ eeprom_image[i] = tmp.c[0];
+ eeprom_image[i+1] = tmp.c[1];
+ chksum += eeprom_image[i] + eeprom_image[i+1];
+ }
+
+ if (chksum != 0) { /* Bad EEPROM Data! */
+ printk("%s: Device has a bad on-board EEPROM.\n", dev->name);
+ status = -ENXIO;
+ } else {
+ EthwrkSignature(name, eeprom_image);
+ if (*name != '\0') { /* found a EWRK3 device */
+ dev->base_addr = iobase;
+
+ if (iobase > 0x400) {
+ outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */
+ }
+
+ lemac = eeprom_image[EEPROM_CHIPVER];
+ cmr = inb(EWRK3_CMR);
+
+ if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) ||
+ ((lemac == LeMAC2) && !(cmr & CMR_HS))) {
+ printk("%s: %s at %#4lx", dev->name, name, iobase);
+ hard_strapped = 1;
+ } else if ((iobase&0x0fff)==EWRK3_EISA_IO_PORTS) {
+ /* EISA slot address */
+ printk("%s: %s at %#4lx (EISA slot %ld)",
+ dev->name, name, iobase, ((iobase>>12)&0x0f));
+ } else { /* ISA port address */
+ printk("%s: %s at %#4lx", dev->name, name, iobase);
+ }
+
+ if (!status) {
+ printk(", h/w address ");
+ if (lemac!=LeMAC2) DevicePresent(iobase);/* need after EWRK3_INIT */
+ status = get_hw_addr(dev, eeprom_image, lemac);
+ for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x,\n", dev->dev_addr[i]);
+
+ if (status) {
+ printk(" which has an EEPROM CRC error.\n");
+ status = -ENXIO;
+ } else {
+ if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */
+ cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS);
+ if (eeprom_image[EEPROM_MISC0] & READ_AHEAD) cmr |= CMR_RA;
+ if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND) cmr |= CMR_WB;
+ if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL) cmr |= CMR_POLARITY;
+ if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK) cmr |= CMR_LINK;
+ if (eeprom_image[EEPROM_MISC0] & _0WS_ENA) cmr |= CMR_0WS;
+ }
+ if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM) cmr |= CMR_DRAM;
+ outb(cmr, EWRK3_CMR);
+
+ cr = inb(EWRK3_CR); /* Set up the Control Register */
+ cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD;
+ if (cr & SETUP_APD) cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS;
+ cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS;
+ cr |= eeprom_image[EEPROM_MISC0] & ENA_16;
+ outb(cr, EWRK3_CR);
+
+ /*
+ ** Determine the base address and window length for the EWRK3
+ ** RAM from the memory base register.
+ */
+ mem_start = inb(EWRK3_MBR);
+ shmem_length = 0;
+ if (mem_start != 0) {
+ if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) {
+ mem_start *= SHMEM_64K;
+ shmem_length = SHMEM_64K;
+ } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) {
+ mem_start *= SHMEM_32K;
+ shmem_length = SHMEM_32K;
+ } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) {
+ mem_start = mem_start * SHMEM_2K + 0x80000;
+ shmem_length = SHMEM_2K;
+ } else {
+ status = -ENXIO;
+ }
+ }
+
+ /*
+ ** See the top of this source code for comments about
+ ** uncommenting this line.
+ */
+/* FORCE_2K_MODE;*/
+
+ if (!status) {
+ if (hard_strapped) {
+ printk(" is hard strapped.\n");
+ } else if (mem_start) {
+ printk(" has a %dk RAM window", (int)(shmem_length >> 10));
+ printk(" at 0x%.5lx", mem_start);
+ } else {
+ printk(" is in I/O only mode");
+ }
+
+ /* private area & initialise */
+ dev->priv = (void *) kmalloc(sizeof(struct ewrk3_private),
+ GFP_KERNEL);
+ if (dev->priv == NULL) {
+ return -ENOMEM;
+ }
+ lp = (struct ewrk3_private *)dev->priv;
+ memset(dev->priv, 0, sizeof(struct ewrk3_private));
+ lp->shmem_base = mem_start;
+ lp->shmem_length = shmem_length;
+ lp->lemac = lemac;
+ lp->hard_strapped = hard_strapped;
+
+ lp->mPage = 64;
+ if (cmr & CMR_DRAM) lp->mPage <<= 1 ;/* 2 DRAMS on module */
+
+ sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
+ request_region(iobase, EWRK3_TOTAL_SIZE, lp->adapter_name);
+
+ lp->irq_mask = ICR_TNEM|ICR_TXDM|ICR_RNEM|ICR_RXDM;
+
+ if (!hard_strapped) {
+ /*
+ ** Enable EWRK3 board interrupts for autoprobing
+ */
+ icr |= ICR_IE; /* Enable interrupts */
+ outb(icr, EWRK3_ICR);
+
+ /* The DMA channel may be passed in on this parameter. */
+ dev->dma = 0;
+
+ /* To auto-IRQ we enable the initialization-done and DMA err,
+ interrupts. For now we will always get a DMA error. */
+ if (dev->irq < 2) {
+#ifndef MODULE
+ u_char irqnum;
+
+ autoirq_setup(0);
+
+ /*
+ ** Trigger a TNE interrupt.
+ */
+ icr |=ICR_TNEM;
+ outb(1,EWRK3_TDQ); /* Write to the TX done queue */
+ outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */
+
+ irqnum = irq[((icr & IRQ_SEL) >> 4)];
+
+ dev->irq = autoirq_report(1);
+ if ((dev->irq) && (irqnum == dev->irq)) {
+ printk(" and uses IRQ%d.\n", dev->irq);
+ } else {
+ if (!dev->irq) {
+ printk(" and failed to detect IRQ line.\n");
+ } else if ((irqnum == 1) && (lemac == LeMAC2)) {
+ printk(" and an illegal IRQ line detected.\n");
+ } else {
+ printk(", but incorrect IRQ line detected.\n");
+ }
+ status = -ENXIO;
+ }
+
+ DISABLE_IRQs; /* Mask all interrupts */
+
+#endif /* MODULE */
+ } else {
+ printk(" and requires IRQ%d.\n", dev->irq);
+ }
+ }
+ if (status) release_region(iobase, EWRK3_TOTAL_SIZE);
+ } else {
+ status = -ENXIO;
+ }
+ }
+ }
+ } else {
+ status = -ENXIO;
+ }
+ }
+
+ if (!status) {
+ if (ewrk3_debug > 1) {
+ printk("%s", version);
+ }
+
+ /* The EWRK3-specific entries in the device structure. */
+ dev->open = &ewrk3_open;
+ dev->hard_start_xmit = &ewrk3_queue_pkt;
+ dev->stop = &ewrk3_close;
+ dev->get_stats = &ewrk3_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &ewrk3_ioctl;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic field of the device structure. */
+ ether_setup(dev);
+ }
+ } else {
+ status = -ENXIO;
+ }
+
+ return status;
+}
+
+
+static int
+ewrk3_open(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ u_char icr, csr;
+
+ /*
+ ** Stop the TX and RX...
+ */
+ STOP_EWRK3;
+
+ if (!lp->hard_strapped) {
+ irq2dev_map[dev->irq] = dev; /* For latched interrupts */
+
+ if (request_irq(dev->irq, (void *)ewrk3_interrupt, 0, "ewrk3", NULL)) {
+ printk("ewrk3_open(): Requested IRQ%d is busy\n",dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ /*
+ ** Re-initialize the EWRK3...
+ */
+ ewrk3_init(dev);
+
+ if (ewrk3_debug > 1){
+ printk("%s: ewrk3 open with irq %d\n",dev->name,dev->irq);
+ printk(" physical address: ");
+ for (i=0;i<5;i++){
+ printk("%2.2x:",(u_char)dev->dev_addr[i]);
+ }
+ printk("%2.2x\n",(u_char)dev->dev_addr[i]);
+ if (lp->shmem_length == 0) {
+ printk(" no shared memory, I/O only mode\n");
+ } else {
+ printk(" start of shared memory: 0x%08lx\n",lp->shmem_base);
+ printk(" window length: 0x%04lx\n",lp->shmem_length);
+ }
+ printk(" # of DRAMS: %d\n",((inb(EWRK3_CMR) & 0x02) ? 2 : 1));
+ printk(" csr: 0x%02x\n", inb(EWRK3_CSR));
+ printk(" cr: 0x%02x\n", inb(EWRK3_CR));
+ printk(" icr: 0x%02x\n", inb(EWRK3_ICR));
+ printk(" cmr: 0x%02x\n", inb(EWRK3_CMR));
+ printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC));
+ }
+
+ dev->tbusy = 0;
+ dev->start = 1;
+ dev->interrupt = UNMASK_INTERRUPTS;
+
+ /*
+ ** Unmask EWRK3 board interrupts
+ */
+ icr = inb(EWRK3_ICR);
+ ENABLE_IRQs;
+
+ }
+ } else {
+ dev->start = 0;
+ dev->tbusy = 1;
+ printk("%s: ewrk3 available for hard strapped set up only.\n", dev->name);
+ printk(" Run the 'ewrk3setup' utility or remove the hard straps.\n");
+ }
+
+ MOD_INC_USE_COUNT;
+
+ return status;
+}
+
+/*
+** Initialize the EtherWORKS 3 operating conditions
+*/
+static void
+ewrk3_init(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_char csr, page;
+ u_long iobase = dev->base_addr;
+
+ /*
+ ** Enable any multicasts
+ */
+ set_multicast_list(dev);
+
+ /*
+ ** Clean out any remaining entries in all the queues here
+ */
+ while (inb(EWRK3_TQ));
+ while (inb(EWRK3_TDQ));
+ while (inb(EWRK3_RQ));
+ while (inb(EWRK3_FMQ));
+
+ /*
+ ** Write a clean free memory queue
+ */
+ for (page=1;page<lp->mPage;page++) { /* Write the free page numbers */
+ outb(page, EWRK3_FMQ); /* to the Free Memory Queue */
+ }
+
+ lp->lock = 0; /* Ensure there are no locks */
+
+ START_EWRK3; /* Enable the TX and/or RX */
+}
+
+/*
+** Writes a socket buffer to the free page queue
+*/
+static int
+ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int status = 0;
+ u_char icr, csr;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy || lp->lock) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < QUEUE_PKT_TIMEOUT) {
+ status = -1;
+ } else if (!lp->hard_strapped) {
+ printk("%s: transmit timed/locked out, status %04x, resetting.\n",
+ dev->name, inb(EWRK3_CSR));
+
+ /*
+ ** Mask all board interrupts
+ */
+ DISABLE_IRQs;
+
+ /*
+ ** Stop the TX and RX...
+ */
+ STOP_EWRK3;
+
+ ewrk3_init(dev);
+
+ /*
+ ** Unmask EWRK3 board interrupts
+ */
+ ENABLE_IRQs;
+
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ } else if (skb == NULL) {
+ dev_tint(dev);
+ } else if (skb->len > 0) {
+
+ /*
+ ** Block a timer-based transmit from overlapping. This could better be
+ ** done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+
+ DISABLE_IRQs; /* So that the page # remains correct */
+
+ /*
+ ** Get a free page from the FMQ when resources are available
+ */
+ if (inb(EWRK3_FMQC) > 0) {
+ u_long buf = 0;
+ u_char page;
+
+ if ((page = inb(EWRK3_FMQ)) < lp->mPage) {
+ /*
+ ** Set up shared memory window and pointer into the window
+ */
+ while (set_bit(0, (void *)&lp->lock) != 0); /* Wait for lock to free */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(page, EWRK3_IOPR);
+ } else if (lp->shmem_length == SHMEM_2K) {
+ buf = lp->shmem_base;
+ outb(page, EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_32K) {
+ buf = ((((short)page << 11) & 0x7800) + lp->shmem_base);
+ outb((page >> 4), EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_64K) {
+ buf = ((((short)page << 11) & 0xf800) + lp->shmem_base);
+ outb((page >> 5), EWRK3_MPR);
+ } else {
+ status = -1;
+ printk("%s: Oops - your private data area is hosed!\n",dev->name);
+ }
+
+ if (!status) {
+
+ /*
+ ** Set up the buffer control structures and copy the data from
+ ** the socket buffer to the shared memory .
+ */
+
+ if (lp->shmem_length == IO_ONLY) {
+ int i;
+ u_char *p = skb->data;
+
+ outb((char)(TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA);
+ outb((char)(skb->len & 0xff), EWRK3_DATA);
+ outb((char)((skb->len >> 8) & 0xff), EWRK3_DATA);
+ outb((char)0x04, EWRK3_DATA);
+ for (i=0; i<skb->len; i++) {
+ outb(*p++, EWRK3_DATA);
+ }
+ outb(page, EWRK3_TQ); /* Start sending pkt */
+ } else {
+ writeb((char)(TCR_QMODE|TCR_PAD|TCR_IFC), (char *)buf);/* ctrl byte*/
+ buf+=1;
+ writeb((char)(skb->len & 0xff), (char *)buf);/* length (16 bit xfer)*/
+ buf+=1;
+ if (lp->txc) {
+ writeb((char)(((skb->len >> 8) & 0xff) | XCT), (char *)buf);
+ buf+=1;
+ writeb(0x04, (char *)buf); /* index byte */
+ buf+=1;
+ writeb(0x00, (char *)(buf + skb->len)); /* Write the XCT flag */
+ memcpy_toio(buf, skb->data, PRELOAD);/* Write PRELOAD bytes*/
+ outb(page, EWRK3_TQ); /* Start sending pkt */
+ memcpy_toio(buf+PRELOAD, skb->data+PRELOAD, skb->len-PRELOAD);
+ writeb(0xff, (char *)(buf + skb->len)); /* Write the XCT flag */
+ } else {
+ writeb((char)((skb->len >> 8) & 0xff), (char *)buf);
+ buf+=1;
+ writeb(0x04, (char *)buf); /* index byte */
+ buf+=1;
+ memcpy_toio((char *)buf, skb->data, skb->len);/* Write data bytes */
+ outb(page, EWRK3_TQ); /* Start sending pkt */
+ }
+ }
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ } else { /* return unused page to the free memory queue */
+ outb(page, EWRK3_FMQ);
+ }
+ lp->lock = 0; /* unlock the page register */
+ } else {
+ printk("ewrk3_queue_pkt(): Invalid free memory page (%d).\n",
+ (u_char) page);
+ }
+ } else {
+ printk("ewrk3_queue_pkt(): No free resources...\n");
+ printk("ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n",inb(EWRK3_CSR),inb(EWRK3_ICR),inb(EWRK3_FMQC));
+ }
+
+ /* Check for free resources: clear 'tbusy' if there are some */
+ if (inb(EWRK3_FMQC) > 0) {
+ dev->tbusy = 0;
+ }
+
+ ENABLE_IRQs;
+ }
+
+ return status;
+}
+
+/*
+** The EWRK3 interrupt handler.
+*/
+static void
+ewrk3_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct ewrk3_private *lp;
+ u_long iobase;
+ u_char icr, cr, csr;
+
+ if (dev == NULL) {
+ printk ("ewrk3_interrupt(): irq %d for unknown device.\n", irq);
+ } else {
+ lp = (struct ewrk3_private *)dev->priv;
+ iobase = dev->base_addr;
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = MASK_INTERRUPTS;
+
+ /* get the interrupt information */
+ csr = inb(EWRK3_CSR);
+
+ /*
+ ** Mask the EWRK3 board interrupts and turn on the LED
+ */
+ DISABLE_IRQs;
+
+ cr = inb(EWRK3_CR);
+ cr |= CR_LED;
+ outb(cr, EWRK3_CR);
+
+ if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */
+ ewrk3_rx(dev);
+
+ if (csr & CSR_TNE) /* Tx interrupt (packet sent) */
+ ewrk3_tx(dev);
+
+ /*
+ ** Now deal with the TX/RX disable flags. These are set when there
+ ** are no more resources. If resources free up then enable these
+ ** interrupts, otherwise mask them - failure to do this will result
+ ** in the system hanging in an interrupt loop.
+ */
+ if (inb(EWRK3_FMQC)) { /* any resources available? */
+ lp->irq_mask |= ICR_TXDM|ICR_RXDM;/* enable the interrupt source */
+ csr &= ~(CSR_TXD|CSR_RXD);/* ensure restart of a stalled TX or RX */
+ outb(csr, EWRK3_CSR);
+ dev->tbusy = 0; /* clear TX busy flag */
+ mark_bh(NET_BH);
+ } else {
+ lp->irq_mask &= ~(ICR_TXDM|ICR_RXDM);/* disable the interrupt source */
+ }
+
+ /* Unmask the EWRK3 board interrupts and turn off the LED */
+ cr &= ~CR_LED;
+ outb(cr, EWRK3_CR);
+
+ dev->interrupt = UNMASK_INTERRUPTS;
+ ENABLE_IRQs;
+ }
+
+ return;
+}
+
+static int
+ewrk3_rx(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ u_char page, tmpPage = 0, tmpLock = 0;
+ u_long buf = 0;
+
+ while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */
+ if ((page = inb(EWRK3_RQ)) < lp->mPage) {/* Get next entry's buffer page */
+ /*
+ ** Preempt any process using the current page register. Check for
+ ** an existing lock to reduce time taken in I/O transactions.
+ */
+ if ((tmpLock = set_bit(0, (void *)&lp->lock)) == 1) { /* Assert lock */
+ if (lp->shmem_length == IO_ONLY) { /* Get existing page */
+ tmpPage = inb(EWRK3_IOPR);
+ } else {
+ tmpPage = inb(EWRK3_MPR);
+ }
+ }
+
+ /*
+ ** Set up shared memory window and pointer into the window
+ */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(page, EWRK3_IOPR);
+ } else if (lp->shmem_length == SHMEM_2K) {
+ buf = lp->shmem_base;
+ outb(page, EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_32K) {
+ buf = ((((short)page << 11) & 0x7800) + lp->shmem_base);
+ outb((page >> 4), EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_64K) {
+ buf = ((((short)page << 11) & 0xf800) + lp->shmem_base);
+ outb((page >> 5), EWRK3_MPR);
+ } else {
+ status = -1;
+ printk("%s: Oops - your private data area is hosed!\n",dev->name);
+ }
+
+ if (!status) {
+ char rx_status;
+ int pkt_len;
+
+ if (lp->shmem_length == IO_ONLY) {
+ rx_status = inb(EWRK3_DATA);
+ pkt_len = inb(EWRK3_DATA);
+ pkt_len |= ((u_short)inb(EWRK3_DATA) << 8);
+ } else {
+ rx_status = readb(buf);
+ buf+=1;
+ pkt_len = readw(buf);
+ buf+=3;
+ }
+
+ if (!(rx_status & R_ROK)) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (rx_status & R_DBE) lp->stats.rx_frame_errors++;
+ if (rx_status & R_CRC) lp->stats.rx_crc_errors++;
+ if (rx_status & R_PLL) lp->stats.rx_fifo_errors++;
+ } else {
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) {
+ unsigned char *p;
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align to 16 bytes */
+ p = skb_put(skb,pkt_len);
+
+ if (lp->shmem_length == IO_ONLY) {
+ *p = inb(EWRK3_DATA); /* dummy read */
+ for (i=0; i<pkt_len; i++) {
+ *p++ = inb(EWRK3_DATA);
+ }
+ } else {
+ memcpy_fromio(p, buf, pkt_len);
+ }
+
+ /*
+ ** Notify the upper protocol layers that there is another
+ ** packet to handle
+ */
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+
+ /*
+ ** Update stats
+ */
+ lp->stats.rx_packets++;
+ for (i=1; i<EWRK3_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < i*EWRK3_PKT_BIN_SZ) {
+ lp->pktStats.bins[i]++;
+ i = EWRK3_PKT_STAT_SZ;
+ }
+ }
+ p = skb->data; /* Look at the dest addr */
+ if (p[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s32 *)&p[0] == -1) && (*(s16 *)&p[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s32 *)&p[0] == *(s32 *)&dev->dev_addr[0]) &&
+ (*(s16 *)&p[4] == *(s16 *)&dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ } else {
+ printk("%s: Insufficient memory; nuking packet.\n", dev->name);
+ lp->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+ }
+ /*
+ ** Return the received buffer to the free memory queue
+ */
+ outb(page, EWRK3_FMQ);
+
+ if (tmpLock) { /* If a lock was preempted */
+ if (lp->shmem_length == IO_ONLY) { /* Replace old page */
+ outb(tmpPage, EWRK3_IOPR);
+ } else {
+ outb(tmpPage, EWRK3_MPR);
+ }
+ }
+ lp->lock = 0; /* Unlock the page register */
+ } else {
+ printk("ewrk3_rx(): Illegal page number, page %d\n",page);
+ printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n",inb(EWRK3_CSR),inb(EWRK3_ICR),inb(EWRK3_FMQC));
+ }
+ }
+ return status;
+}
+
+/*
+** Buffer sent - check for TX buffer errors.
+*/
+static int
+ewrk3_tx(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char tx_status;
+
+ while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */
+ if (tx_status & T_VSTS) { /* The status is valid */
+ if (tx_status & T_TXE) {
+ lp->stats.tx_errors++;
+ if (tx_status & T_NCL) lp->stats.tx_carrier_errors++;
+ if (tx_status & T_LCL) lp->stats.tx_window_errors++;
+ if (tx_status & T_CTU) {
+ if ((tx_status & T_COLL) ^ T_XUR) {
+ lp->pktStats.tx_underruns++;
+ } else {
+ lp->pktStats.excessive_underruns++;
+ }
+ } else if (tx_status & T_COLL) {
+ if ((tx_status & T_COLL) ^ T_XCOLL) {
+ lp->stats.collisions++;
+ } else {
+ lp->pktStats.excessive_collisions++;
+ }
+ }
+ } else {
+ lp->stats.tx_packets++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+ewrk3_close(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char icr, csr;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ewrk3_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inb(EWRK3_CSR));
+ }
+
+ /*
+ ** We stop the EWRK3 here... mask interrupts and stop TX & RX
+ */
+ DISABLE_IRQs;
+
+ STOP_EWRK3;
+
+ /*
+ ** Clean out the TX and RX queues here (note that one entry
+ ** may get added to either the TXD or RX queues if the TX or RX
+ ** just starts processing a packet before the STOP_EWRK3 command
+ ** is received. This will be flushed in the ewrk3_open() call).
+ */
+ while (inb(EWRK3_TQ));
+ while (inb(EWRK3_TDQ));
+ while (inb(EWRK3_RQ));
+
+ if (!lp->hard_strapped) {
+ free_irq(dev->irq, NULL);
+
+ irq2dev_map[dev->irq] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+ewrk3_get_stats(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+
+ /* Null body since there is no framing error counter */
+
+ return &lp->stats;
+}
+
+/*
+** Set or clear the multicast filter for this adapter.
+*/
+static void
+set_multicast_list(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char csr;
+
+ if (irq2dev_map[dev->irq] != NULL) {
+ csr = inb(EWRK3_CSR);
+
+ if (lp->shmem_length == IO_ONLY) {
+ lp->mctbl = (char *) PAGE0_HTE;
+ } else {
+ lp->mctbl = (char *)(lp->shmem_base + PAGE0_HTE);
+ }
+
+ csr &= ~(CSR_PME | CSR_MCE);
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ csr |= CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ SetMulticastFilter(dev);
+ csr |= CSR_MCE;
+ outb(csr, EWRK3_CSR);
+ }
+ }
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Little endian crc one liner from Matt Thomas, DEC.
+**
+** Note that when clearing the table, the broadcast bit must remain asserted
+** to receive broadcast messages.
+*/
+static void SetMulticastFilter(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ u_long iobase = dev->base_addr;
+ int i;
+ char *addrs, j, bit, byte;
+ short *p = (short *) lp->mctbl;
+ u16 hashcode;
+ s32 crc, poly = CRC_POLYNOMIAL_LE;
+
+ while (set_bit(0, (void *)&lp->lock) != 0); /* Wait for lock to free */
+
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0, EWRK3_IOPR);
+ outw(EEPROM_OFFSET(lp->mctbl), EWRK3_PIR1);
+ } else {
+ outb(0, EWRK3_MPR);
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i=0; i<(HASH_TABLE_LEN >> 3); i++) {
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0xff, EWRK3_DATA);
+ } else { /* memset didn't work here */
+ writew(0xffff, p);
+ p++; i++;
+ }
+ }
+ } else {
+ /* Clear table except for broadcast bit */
+ if (lp->shmem_length == IO_ONLY) {
+ for (i=0; i<(HASH_TABLE_LEN >> 4) - 1; i++) {
+ outb(0x00, EWRK3_DATA);
+ }
+ outb(0x80, EWRK3_DATA); i++; /* insert the broadcast bit */
+ for (; i<(HASH_TABLE_LEN >> 3); i++) {
+ outb(0x00, EWRK3_DATA);
+ }
+ } else {
+ memset_io(lp->mctbl, 0, (HASH_TABLE_LEN >> 3));
+ writeb(0x80, (char *)(lp->mctbl + (HASH_TABLE_LEN >> 4) - 1));
+ }
+
+ /* Update table */
+ for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = 0xffffffff; /* init CRC for each address */
+ for (byte=0;byte<ETH_ALEN;byte++) { /* for each address byte */
+ /* process each address bit */
+ for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
+ crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
+ }
+ }
+ hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
+
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+
+ if (lp->shmem_length == IO_ONLY) {
+ u_char tmp;
+
+ outw((short)((long)lp->mctbl) + byte, EWRK3_PIR1);
+ tmp = inb(EWRK3_DATA);
+ tmp |= bit;
+ outw((short)((long)lp->mctbl) + byte, EWRK3_PIR1);
+ outb(tmp, EWRK3_DATA);
+ } else {
+ writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte);
+ }
+ }
+ }
+ }
+
+ lp->lock = 0; /* Unlock the page register */
+
+ return;
+}
+
+/*
+** ISA bus I/O device probe
+*/
+static void isa_probe(struct device *dev, u_long ioaddr)
+{
+ int i = num_ewrk3s, maxSlots;
+ u_long iobase;
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if (ioaddr >= 0x400) return; /* Not ISA */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EWRK3_IO_BASE; /* Get the first slot address */
+ maxSlots = 24;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ maxSlots = i + 1;
+ }
+
+ for (; (i<maxSlots) && (dev!=NULL);iobase+=EWRK3_IOP_INC, i++) {
+ if (!check_region(iobase, EWRK3_TOTAL_SIZE)) {
+ if (DevicePresent(iobase) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ if (ewrk3_hw_init(dev, iobase) == 0) {
+ num_ewrk3s++;
+ }
+ num_eth++;
+ }
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
+ }
+ }
+
+ return;
+}
+
+/*
+** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+** the motherboard.
+*/
+static void eisa_probe(struct device *dev, u_long ioaddr)
+{
+ int i, maxSlots;
+ u_long iobase;
+ char name[EWRK3_STRLEN];
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if (ioaddr < 0x1000) return; /* Not EISA */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EISA_SLOT_INC; /* Get the first slot address */
+ i = 1;
+ maxSlots = MAX_EISA_SLOTS;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+ }
+
+ for (i=1; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID) == 0) {
+ if (!check_region(iobase, EWRK3_TOTAL_SIZE)) {
+ if (DevicePresent(iobase) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ if (ewrk3_hw_init(dev, iobase) == 0) {
+ num_ewrk3s++;
+ }
+ num_eth++;
+ }
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** Search the entire 'eth' device list for a fixed probe. If a match isn't
+** found then check for an autoprobe or unused device location. If they
+** are not available then insert a new device structure at the end of
+** the current list.
+*/
+static struct device *
+alloc_device(struct device *dev, u_long iobase)
+{
+ struct device *adev = NULL;
+ int fixed = 0, new_dev = 0;
+
+ num_eth = ewrk3_dev_index(dev->name);
+ if (loading_module) return dev;
+
+ while (1) {
+ if (((dev->base_addr == EWRK3_NDA) || (dev->base_addr==0)) && !adev) {
+ adev=dev;
+ } else if ((dev->priv == NULL) && (dev->base_addr==iobase)) {
+ fixed = 1;
+ } else {
+ if (dev->next == NULL) {
+ new_dev = 1;
+ } else if (strncmp(dev->next->name, "eth", 3) != 0) {
+ new_dev = 1;
+ }
+ }
+ if ((dev->next == NULL) || new_dev || fixed) break;
+ dev = dev->next;
+ num_eth++;
+ }
+ if (adev && !fixed) {
+ dev = adev;
+ num_eth = ewrk3_dev_index(dev->name);
+ new_dev = 0;
+ }
+
+ if (((dev->next == NULL) &&
+ ((dev->base_addr != EWRK3_NDA) && (dev->base_addr != 0)) && !fixed) ||
+ new_dev) {
+ num_eth++; /* New device */
+ dev = insert_device(dev, iobase, ewrk3_probe);
+ }
+
+ return dev;
+}
+
+/*
+** If at end of eth device list and can't use current entry, malloc
+** one up. If memory could not be allocated, print an error message.
+*/
+static struct device *
+insert_device(struct device *dev, u_long iobase, int (*init)(struct device *))
+{
+ struct device *new;
+
+ new = (struct device *)kmalloc(sizeof(struct device)+8, GFP_KERNEL);
+ if (new == NULL) {
+ printk("eth%d: Device not initialised, insufficient memory\n",num_eth);
+ return NULL;
+ } else {
+ new->next = dev->next;
+ dev->next = new;
+ dev = dev->next; /* point to the new device */
+ dev->name = (char *)(dev + 1);
+ if (num_eth > 9999) {
+ sprintf(dev->name,"eth????");/* New device name */
+ } else {
+ sprintf(dev->name,"eth%d", num_eth);/* New device name */
+ }
+ dev->base_addr = iobase; /* assign the io address */
+ dev->init = init; /* initialisation routine */
+ }
+
+ return dev;
+}
+
+static int
+ewrk3_dev_index(char *s)
+{
+ int i=0, j=0;
+
+ for (;*s; s++) {
+ if (isdigit(*s)) {
+ j=1;
+ i = (i * 10) + (*s - '0');
+ } else if (j) break;
+ }
+
+ return i;
+}
+
+/*
+** Read the EWRK3 EEPROM using this routine
+*/
+static int Read_EEPROM(u_long iobase, u_char eaddr)
+{
+ int i;
+
+ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
+ outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */
+ for (i=0;i<5000;i++) inb(EWRK3_CSR); /* wait 1msec */
+
+ return inw(EWRK3_EPROM1); /* 16 bits data return */
+}
+
+/*
+** Write the EWRK3 EEPROM using this routine
+*/
+static int Write_EEPROM(short data, u_long iobase, u_char eaddr)
+{
+ int i;
+
+ outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */
+ for (i=0;i<5000;i++) inb(EWRK3_CSR); /* wait 1msec */
+ outw(data, EWRK3_EPROM1); /* write data to register */
+ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
+ outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */
+ for (i=0;i<75000;i++) inb(EWRK3_CSR); /* wait 15msec */
+ outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */
+ for (i=0;i<5000;i++) inb(EWRK3_CSR); /* wait 1msec */
+
+ return 0;
+}
+
+/*
+** Look for a particular board name in the on-board EEPROM.
+*/
+static void EthwrkSignature(char *name, char *eeprom_image)
+{
+ u_long i,j,k;
+ char *signatures[] = EWRK3_SIGNATURE;
+
+ strcpy(name, "");
+ for (i=0;*signatures[i] != '\0' && *name == '\0';i++) {
+ for (j=EEPROM_PNAME7,k=0;j<=EEPROM_PNAME0 && k<strlen(signatures[i]);j++) {
+ if (signatures[i][k] == eeprom_image[j]) { /* track signature */
+ k++;
+ } else { /* lost signature; begin search again */
+ k=0;
+ }
+ }
+ if (k == strlen(signatures[i])) {
+ for (k=0; k<EWRK3_STRLEN; k++) {
+ name[k] = eeprom_image[EEPROM_PNAME7 + k];
+ name[EWRK3_STRLEN] = '\0';
+ }
+ }
+ }
+
+ return; /* return the device name string */
+}
+
+/*
+** Look for a special sequence in the Ethernet station address PROM that
+** is common across all EWRK3 products.
+**
+** Search the Ethernet address ROM for the signature. Since the ROM address
+** counter can start at an arbitrary point, the search must include the entire
+** probe sequence length plus the (length_of_the_signature - 1).
+** Stop the search IMMEDIATELY after the signature is found so that the
+** PROM address counter is correctly positioned at the start of the
+** ethernet address for later read out.
+*/
+
+static int DevicePresent(u_long iobase)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ short sigLength;
+ char data;
+ int i, j, status = 0;
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
+ data = inb(EWRK3_APROM);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) {
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+
+ if (j!=sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+
+ return status;
+}
+
+static u_char get_hw_addr(struct device *dev, u_char *eeprom_image, char chipType)
+{
+ int i, j, k;
+ u_short chksum;
+ u_char crc, lfsr, sd, status = 0;
+ u_long iobase = dev->base_addr;
+ u16 tmp;
+
+ if (chipType == LeMAC2) {
+ for (crc=0x6a, j=0; j<ETH_ALEN; j++) {
+ sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j];
+ outb(dev->dev_addr[j], EWRK3_PAR0 + j);
+ for (k=0; k<8; k++, sd >>= 1) {
+ lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7;
+ crc = (crc >> 1) + lfsr;
+ }
+ }
+ if (crc != eeprom_image[EEPROM_PA_CRC]) status = -1;
+ } else {
+ for (i=0,k=0;i<ETH_ALEN;) {
+ k <<= 1 ;
+ if (k > 0xffff) k-=0xffff;
+
+ k += (u_char) (tmp = inb(EWRK3_APROM));
+ dev->dev_addr[i] = (u_char) tmp;
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+ i++;
+ k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8);
+ dev->dev_addr[i] = (u_char) tmp;
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+ i++;
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+ chksum = inb(EWRK3_APROM);
+ chksum |= (inb(EWRK3_APROM)<<8);
+ if (k != chksum) status = -1;
+ }
+
+ return status;
+}
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int EISA_signature(char *name, s32 eisa_id)
+{
+ u_long i;
+ char *signatures[] = EWRK3_SIGNATURE;
+ char ManCode[EWRK3_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int status = 0;
+
+ *name = '\0';
+ for (i=0; i<4; i++) {
+ Eisa.Id[i] = inb(eisa_id + i);
+ }
+
+ ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
+ ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
+ ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
+ ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
+ ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
+ ManCode[5]='\0';
+
+ for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name,ManCode);
+ status = 1;
+ }
+ }
+
+ return status; /* return the device name string */
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases.
+*/
+static int ewrk3_ioctl(struct device *dev, struct ifreq *rq, int cmd)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_data;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ u_char csr;
+ union {
+ u_char addr[HASH_TABLE_LEN * ETH_ALEN];
+ u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ } tmp;
+
+ switch(ioc->cmd) {
+ case EWRK3_GET_HWADDR: /* Get the hardware address */
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ ioc->len = ETH_ALEN;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case EWRK3_SET_HWADDR: /* Set the hardware address */
+ if (suser()) {
+ if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN))) {
+ csr = inb(EWRK3_CSR);
+ csr |= (CSR_TXD|CSR_RXD);
+ outb(csr, EWRK3_CSR); /* Disable the TX and RX */
+
+ memcpy_fromfs(tmp.addr,ioc->data,ETH_ALEN);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ outb(tmp.addr[i], EWRK3_PAR0 + i);
+ }
+
+ csr &= ~(CSR_TXD|CSR_RXD); /* Enable the TX and RX */
+ outb(csr, EWRK3_CSR);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SET_PROM: /* Set Promiscuous Mode */
+ if (suser()) {
+ csr = inb(EWRK3_CSR);
+ csr |= CSR_PME;
+ csr &= ~CSR_MCE;
+ outb(csr, EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */
+ if (suser()) {
+ csr = inb(EWRK3_CSR);
+ csr &= ~CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ printk("%s: Boo!\n", dev->name);
+
+ break;
+ case EWRK3_GET_MCA: /* Get the multicast address table */
+ if (!(status = verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ while (set_bit(0, (void *)&lp->lock) != 0); /* Wait for lock to free */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0, EWRK3_IOPR);
+ outw(PAGE0_HTE, EWRK3_PIR1);
+ for (i=0; i<(HASH_TABLE_LEN >> 3); i++) {
+ tmp.addr[i] = inb(EWRK3_DATA);
+ }
+ } else {
+ outb(0, EWRK3_MPR);
+ memcpy_fromio(tmp.addr, (char *)(lp->shmem_base + PAGE0_HTE), (HASH_TABLE_LEN >> 3));
+ }
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+ lp->lock = 0; /* Unlock the page register */
+
+ break;
+ case EWRK3_SET_MCA: /* Set a multicast address */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, ETH_ALEN*ioc->len))) {
+ memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
+ set_multicast_list(dev);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_MCA: /* Clear all multicast addresses */
+ if (suser()) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_MCA_EN: /* Enable multicast addressing */
+ if (suser()) {
+ csr = inb(EWRK3_CSR);
+ csr |= CSR_MCE;
+ csr &= ~CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_STATS: /* Get the driver statistics */
+ cli();
+ ioc->len = sizeof(lp->pktStats);
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
+ }
+ sti();
+
+ break;
+ case EWRK3_CLR_STATS: /* Zero out the driver statistics */
+ if (suser()) {
+ cli();
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ sti();
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_CSR: /* Get the CSR Register contents */
+ tmp.addr[0] = inb(EWRK3_CSR);
+ ioc->len = 1;
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case EWRK3_SET_CSR: /* Set the CSR Register contents */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, 1))) {
+ memcpy_fromfs(tmp.addr, ioc->data, 1);
+ outb(tmp.addr[0], EWRK3_CSR);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_EEPROM: /* Get the EEPROM contents */
+ if (suser()) {
+ for (i=0; i<(EEPROM_MAX>>1); i++) {
+ tmp.val[i] = (short)Read_EEPROM(iobase, i);
+ }
+ i = EEPROM_MAX;
+ tmp.addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */
+ for (j=0;j<ETH_ALEN;j++) {
+ tmp.addr[i++] = inb(EWRK3_PAR0 + j);
+ }
+ ioc->len = EEPROM_MAX + 1 + ETH_ALEN;
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SET_EEPROM: /* Set the EEPROM contents */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, EEPROM_MAX))) {
+ memcpy_fromfs(tmp.addr, ioc->data, EEPROM_MAX);
+ for (i=0; i<(EEPROM_MAX>>1); i++) {
+ Write_EEPROM(tmp.val[i], iobase, i);
+ }
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_CMR: /* Get the CMR Register contents */
+ tmp.addr[0] = inb(EWRK3_CMR);
+ ioc->len = 1;
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */
+ if (suser()) {
+ lp->txc = 1;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */
+ if (suser()) {
+ lp->txc = 0;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device thisEthwrk = {
+ devicename, /* device name is inserted by /linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x300, 5, /* I/O address, IRQ */
+ 0, 0, 0, NULL, ewrk3_probe };
+
+static int io=0x300; /* <--- EDIT THESE LINES FOR YOUR CONFIGURATION */
+static int irq=5; /* or use the insmod io= irq= options */
+
+int
+init_module(void)
+{
+ thisEthwrk.base_addr=io;
+ thisEthwrk.irq=irq;
+ if (register_netdev(&thisEthwrk) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ if (thisEthwrk.priv) {
+ kfree(thisEthwrk.priv);
+ thisEthwrk.priv = NULL;
+ }
+ thisEthwrk.irq = 0;
+
+ unregister_netdev(&thisEthwrk);
+ release_region(thisEthwrk.base_addr, EWRK3_TOTAL_SIZE);
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c ewrk3.c"
+ *
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c ewrk3.c"
+ * End:
+ */
+
diff --git a/linux/src/drivers/net/ewrk3.h b/linux/src/drivers/net/ewrk3.h
new file mode 100644
index 0000000..554a18a
--- /dev/null
+++ b/linux/src/drivers/net/ewrk3.h
@@ -0,0 +1,322 @@
+/*
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of the
+ GNU Public License, incorporated herein by reference.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+*/
+
+/*
+** I/O Address Register Map
+*/
+#define EWRK3_CSR iobase+0x00 /* Control and Status Register */
+#define EWRK3_CR iobase+0x01 /* Control Register */
+#define EWRK3_ICR iobase+0x02 /* Interrupt Control Register */
+#define EWRK3_TSR iobase+0x03 /* Transmit Status Register */
+#define EWRK3_RSVD1 iobase+0x04 /* RESERVED */
+#define EWRK3_RSVD2 iobase+0x05 /* RESERVED */
+#define EWRK3_FMQ iobase+0x06 /* Free Memory Queue */
+#define EWRK3_FMQC iobase+0x07 /* Free Memory Queue Counter */
+#define EWRK3_RQ iobase+0x08 /* Receive Queue */
+#define EWRK3_RQC iobase+0x09 /* Receive Queue Counter */
+#define EWRK3_TQ iobase+0x0a /* Transmit Queue */
+#define EWRK3_TQC iobase+0x0b /* Transmit Queue Counter */
+#define EWRK3_TDQ iobase+0x0c /* Transmit Done Queue */
+#define EWRK3_TDQC iobase+0x0d /* Transmit Done Queue Counter */
+#define EWRK3_PIR1 iobase+0x0e /* Page Index Register 1 */
+#define EWRK3_PIR2 iobase+0x0f /* Page Index Register 2 */
+#define EWRK3_DATA iobase+0x10 /* Data Register */
+#define EWRK3_IOPR iobase+0x11 /* I/O Page Register */
+#define EWRK3_IOBR iobase+0x12 /* I/O Base Register */
+#define EWRK3_MPR iobase+0x13 /* Memory Page Register */
+#define EWRK3_MBR iobase+0x14 /* Memory Base Register */
+#define EWRK3_APROM iobase+0x15 /* Address PROM */
+#define EWRK3_EPROM1 iobase+0x16 /* EEPROM Data Register 1 */
+#define EWRK3_EPROM2 iobase+0x17 /* EEPROM Data Register 2 */
+#define EWRK3_PAR0 iobase+0x18 /* Physical Address Register 0 */
+#define EWRK3_PAR1 iobase+0x19 /* Physical Address Register 1 */
+#define EWRK3_PAR2 iobase+0x1a /* Physical Address Register 2 */
+#define EWRK3_PAR3 iobase+0x1b /* Physical Address Register 3 */
+#define EWRK3_PAR4 iobase+0x1c /* Physical Address Register 4 */
+#define EWRK3_PAR5 iobase+0x1d /* Physical Address Register 5 */
+#define EWRK3_CMR iobase+0x1e /* Configuration/Management Register */
+
+/*
+** Control Page Map
+*/
+#define PAGE0_FMQ 0x000 /* Free Memory Queue */
+#define PAGE0_RQ 0x080 /* Receive Queue */
+#define PAGE0_TQ 0x100 /* Transmit Queue */
+#define PAGE0_TDQ 0x180 /* Transmit Done Queue */
+#define PAGE0_HTE 0x200 /* Hash Table Entries */
+#define PAGE0_RSVD 0x240 /* RESERVED */
+#define PAGE0_USRD 0x600 /* User Data */
+
+/*
+** Control and Status Register bit definitions (EWRK3_CSR)
+*/
+#define CSR_RA 0x80 /* Runt Accept */
+#define CSR_PME 0x40 /* Promiscuous Mode Enable */
+#define CSR_MCE 0x20 /* Multicast Enable */
+#define CSR_TNE 0x08 /* TX Done Queue Not Empty */
+#define CSR_RNE 0x04 /* RX Queue Not Empty */
+#define CSR_TXD 0x02 /* TX Disable */
+#define CSR_RXD 0x01 /* RX Disable */
+
+/*
+** Control Register bit definitions (EWRK3_CR)
+*/
+#define CR_APD 0x80 /* Auto Port Disable */
+#define CR_PSEL 0x40 /* Port Select (0->TP port) */
+#define CR_LBCK 0x20 /* LoopBaCK enable */
+#define CR_FDUP 0x10 /* Full DUPlex enable */
+#define CR_FBUS 0x08 /* Fast BUS enable (ISA clk > 8.33MHz) */
+#define CR_EN_16 0x04 /* ENable 16 bit memory accesses */
+#define CR_LED 0x02 /* LED (1-> turn on) */
+
+/*
+** Interrupt Control Register bit definitions (EWRK3_ICR)
+*/
+#define ICR_IE 0x80 /* Interrupt Enable */
+#define ICR_IS 0x60 /* Interrupt Selected */
+#define ICR_TNEM 0x08 /* TNE Mask (0->mask) */
+#define ICR_RNEM 0x04 /* RNE Mask (0->mask) */
+#define ICR_TXDM 0x02 /* TXD Mask (0->mask) */
+#define ICR_RXDM 0x01 /* RXD Mask (0->mask) */
+
+/*
+** Transmit Status Register bit definitions (EWRK3_TSR)
+*/
+#define TSR_NCL 0x80 /* No Carrier Loopback */
+#define TSR_ID 0x40 /* Initially Deferred */
+#define TSR_LCL 0x20 /* Late CoLlision */
+#define TSR_ECL 0x10 /* Excessive CoLlisions */
+#define TSR_RCNTR 0x0f /* Retries CouNTeR */
+
+/*
+** I/O Page Register bit definitions (EWRK3_IOPR)
+*/
+#define EEPROM_INIT 0xc0 /* EEPROM INIT command */
+#define EEPROM_WR_EN 0xc8 /* EEPROM WRITE ENABLE command */
+#define EEPROM_WR 0xd0 /* EEPROM WRITE command */
+#define EEPROM_WR_DIS 0xd8 /* EEPROM WRITE DISABLE command */
+#define EEPROM_RD 0xe0 /* EEPROM READ command */
+
+/*
+** I/O Base Register bit definitions (EWRK3_IOBR)
+*/
+#define EISA_REGS_EN 0x20 /* Enable EISA ID and Control Registers */
+#define EISA_IOB 0x1f /* Compare bits for I/O Base Address */
+
+/*
+** I/O Configuration/Management Register bit definitions (EWRK3_CMR)
+*/
+#define CMR_RA 0x80 /* Read Ahead */
+#define CMR_WB 0x40 /* Write Behind */
+#define CMR_LINK 0x20 /* 0->TP */
+#define CMR_POLARITY 0x10 /* Informational */
+#define CMR_NO_EEPROM 0x0c /* NO_EEPROM<1:0> pin status */
+#define CMR_HS 0x08 /* Hard Strapped pin status (LeMAC2) */
+#define CMR_PNP 0x04 /* Plug 'n Play */
+#define CMR_DRAM 0x02 /* 0-> 1DRAM, 1-> 2 DRAM on board */
+#define CMR_0WS 0x01 /* Zero Wait State */
+
+/*
+** MAC Receive Status Register bit definitions
+*/
+
+#define R_ROK 0x80 /* Receive OK summary */
+#define R_IAM 0x10 /* Individual Address Match */
+#define R_MCM 0x08 /* MultiCast Match */
+#define R_DBE 0x04 /* Dribble Bit Error */
+#define R_CRC 0x02 /* CRC error */
+#define R_PLL 0x01 /* Phase Lock Lost */
+
+/*
+** MAC Transmit Control Register bit definitions
+*/
+
+#define TCR_SQEE 0x40 /* SQE Enable - look for heartbeat */
+#define TCR_SED 0x20 /* Stop when Error Detected */
+#define TCR_QMODE 0x10 /* Q_MODE */
+#define TCR_LAB 0x08 /* Less Aggressive Backoff */
+#define TCR_PAD 0x04 /* PAD Runt Packets */
+#define TCR_IFC 0x02 /* Insert Frame Check */
+#define TCR_ISA 0x01 /* Insert Source Address */
+
+/*
+** MAC Transmit Status Register bit definitions
+*/
+
+#define T_VSTS 0x80 /* Valid STatuS */
+#define T_CTU 0x40 /* Cut Through Used */
+#define T_SQE 0x20 /* Signal Quality Error */
+#define T_NCL 0x10 /* No Carrier Loopback */
+#define T_LCL 0x08 /* Late Collision */
+#define T_ID 0x04 /* Initially Deferred */
+#define T_COLL 0x03 /* COLLision status */
+#define T_XCOLL 0x03 /* Excessive Collisions */
+#define T_MCOLL 0x02 /* Multiple Collisions */
+#define T_OCOLL 0x01 /* One Collision */
+#define T_NOCOLL 0x00 /* No Collisions */
+#define T_XUR 0x03 /* Excessive Underruns */
+#define T_TXE 0x7f /* TX Errors */
+
+/*
+** EISA Configuration Register bit definitions
+*/
+
+#define EISA_ID iobase + 0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase + 0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase + 0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase + 0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase + 0x0c83 /* EISA ID Register 3 */
+#define EISA_CR iobase + 0x0c84 /* EISA Control Register */
+
+/*
+** EEPROM BYTES
+*/
+#define EEPROM_MEMB 0x00
+#define EEPROM_IOB 0x01
+#define EEPROM_EISA_ID0 0x02
+#define EEPROM_EISA_ID1 0x03
+#define EEPROM_EISA_ID2 0x04
+#define EEPROM_EISA_ID3 0x05
+#define EEPROM_MISC0 0x06
+#define EEPROM_MISC1 0x07
+#define EEPROM_PNAME7 0x08
+#define EEPROM_PNAME6 0x09
+#define EEPROM_PNAME5 0x0a
+#define EEPROM_PNAME4 0x0b
+#define EEPROM_PNAME3 0x0c
+#define EEPROM_PNAME2 0x0d
+#define EEPROM_PNAME1 0x0e
+#define EEPROM_PNAME0 0x0f
+#define EEPROM_SWFLAGS 0x10
+#define EEPROM_HWCAT 0x11
+#define EEPROM_NETMAN2 0x12
+#define EEPROM_REVLVL 0x13
+#define EEPROM_NETMAN0 0x14
+#define EEPROM_NETMAN1 0x15
+#define EEPROM_CHIPVER 0x16
+#define EEPROM_SETUP 0x17
+#define EEPROM_PADDR0 0x18
+#define EEPROM_PADDR1 0x19
+#define EEPROM_PADDR2 0x1a
+#define EEPROM_PADDR3 0x1b
+#define EEPROM_PADDR4 0x1c
+#define EEPROM_PADDR5 0x1d
+#define EEPROM_PA_CRC 0x1e
+#define EEPROM_CHKSUM 0x1f
+
+/*
+** EEPROM bytes for checksumming
+*/
+#define EEPROM_MAX 32 /* bytes */
+
+/*
+** EEPROM MISCELLANEOUS FLAGS
+*/
+#define RBE_SHADOW 0x0100 /* Remote Boot Enable Shadow */
+#define READ_AHEAD 0x0080 /* Read Ahead feature */
+#define IRQ_SEL2 0x0070 /* IRQ line selection (LeMAC2) */
+#define IRQ_SEL 0x0060 /* IRQ line selection */
+#define FAST_BUS 0x0008 /* ISA Bus speeds > 8.33MHz */
+#define ENA_16 0x0004 /* Enables 16 bit memory transfers */
+#define WRITE_BEHIND 0x0002 /* Write Behind feature */
+#define _0WS_ENA 0x0001 /* Zero Wait State Enable */
+
+/*
+** EEPROM NETWORK MANAGEMENT FLAGS
+*/
+#define NETMAN_POL 0x04 /* Polarity defeat */
+#define NETMAN_LINK 0x02 /* Link defeat */
+#define NETMAN_CCE 0x01 /* Custom Counters Enable */
+
+/*
+** EEPROM SW FLAGS
+*/
+#define SW_SQE 0x10 /* Signal Quality Error */
+#define SW_LAB 0x08 /* Less Aggressive Backoff */
+#define SW_INIT 0x04 /* Initialized */
+#define SW_TIMEOUT 0x02 /* 0:2.5 mins, 1: 30 secs */
+#define SW_REMOTE 0x01 /* Remote Boot Enable -> 1 */
+
+/*
+** EEPROM SETUP FLAGS
+*/
+#define SETUP_APD 0x80 /* AutoPort Disable */
+#define SETUP_PS 0x40 /* Port Select */
+#define SETUP_MP 0x20 /* MultiPort */
+#define SETUP_1TP 0x10 /* 1 port, TP */
+#define SETUP_1COAX 0x00 /* 1 port, Coax */
+#define SETUP_DRAM 0x02 /* Number of DRAMS on board */
+
+/*
+** EEPROM MANAGEMENT FLAGS
+*/
+#define MGMT_CCE 0x01 /* Custom Counters Enable */
+
+/*
+** EEPROM VERSIONS
+*/
+#define LeMAC 0x11
+#define LeMAC2 0x12
+
+/*
+** Miscellaneous
+*/
+
+#define EEPROM_WAIT_TIME 1000 /* Number of microseconds */
+#define EISA_EN 0x0001 /* Enable EISA bus buffers */
+
+#define HASH_TABLE_LEN 512 /* Bits */
+
+#define XCT 0x80 /* Transmit Cut Through */
+#define PRELOAD 16 /* 4 long words */
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define EEPROM_OFFSET(a) ((u_short)((u_long)(a)))
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define EWRK3IOCTL SIOCDEVPRIVATE
+
+struct ewrk3_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define EWRK3_GET_HWADDR 0x01 /* Get the hardware address */
+#define EWRK3_SET_HWADDR 0x02 /* Get the hardware address */
+#define EWRK3_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define EWRK3_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define EWRK3_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define EWRK3_GET_MCA 0x06 /* Get a multicast address */
+#define EWRK3_SET_MCA 0x07 /* Set a multicast address */
+#define EWRK3_CLR_MCA 0x08 /* Clear a multicast address */
+#define EWRK3_MCA_EN 0x09 /* Enable a multicast address group */
+#define EWRK3_GET_STATS 0x0a /* Get the driver statistics */
+#define EWRK3_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define EWRK3_GET_CSR 0x0c /* Get the CSR Register contents */
+#define EWRK3_SET_CSR 0x0d /* Set the CSR Register contents */
+#define EWRK3_GET_EEPROM 0x0e /* Get the EEPROM contents */
+#define EWRK3_SET_EEPROM 0x0f /* Set the EEPROM contents */
+#define EWRK3_GET_CMR 0x10 /* Get the CMR Register contents */
+#define EWRK3_CLR_TX_CUT_THRU 0x11 /* Clear the TX cut through mode */
+#define EWRK3_SET_TX_CUT_THRU 0x12 /* Set the TX cut through mode */
diff --git a/linux/src/drivers/net/fmv18x.c b/linux/src/drivers/net/fmv18x.c
new file mode 100644
index 0000000..b29ddf0
--- /dev/null
+++ b/linux/src/drivers/net/fmv18x.c
@@ -0,0 +1,664 @@
+/* fmv18x.c: A network device driver for the Fujitsu FMV-181/182/183/184.
+
+ Original: at1700.c (1993-94 by Donald Becker).
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Modified by Yutaka TAMIYA (tamy@flab.fujitsu.co.jp)
+ Copyright 1994 Fujitsu Laboratories Ltd.
+ Special thanks to:
+ Masayoshi UTAKA (utaka@ace.yk.fujitsu.co.jp)
+ for testing this driver.
+ H. NEGISHI (agy, negishi@sun45.psd.cs.fujitsu.co.jp)
+ for suggestion of some program modification.
+ Masahiro SEKIGUCHI <seki@sysrap.cs.fujitsu.co.jp>
+ for suggestion of some program modification.
+ Kazutoshi MORIOKA (morioka@aurora.oaks.cs.fujitsu.co.jp)
+ for testing this driver.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ This is a device driver for the Fujitsu FMV-181/182/183/184, which
+ is a straight-forward Fujitsu MB86965 implementation.
+
+ Sources:
+ at1700.c
+ The Fujitsu MB86965 datasheet.
+ The Fujitsu FMV-181/182 user's guide
+*/
+
+static const char *version =
+ "fmv18x.c:v1.3.71e 03/04/96 Yutaka TAMIYA (tamy@flab.fujitsu.co.jp)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+static int fmv18x_probe_list[] =
+{0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+typedef unsigned char uchar;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ long open_time; /* Useless example local info. */
+ uint tx_started:1; /* Number of packet on the Tx queue. */
+ uchar tx_queue; /* Number of packet on the Tx queue. */
+ ushort tx_queue_len; /* Current length of the Tx queue. */
+};
+
+
+/* Offsets from the base address. */
+#define STATUS 0
+#define TX_STATUS 0
+#define RX_STATUS 1
+#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
+#define RX_INTR 3
+#define TX_MODE 4
+#define RX_MODE 5
+#define CONFIG_0 6 /* Misc. configuration settings. */
+#define CONFIG_1 7
+/* Run-time register bank 2 definitions. */
+#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
+#define TX_START 10
+#define COL16CNTL 11
+#define MODE13 13
+/* Fujitsu FMV-18x Card Configuration */
+#define FJ_STATUS0 0x10
+#define FJ_STATUS1 0x11
+#define FJ_CONFIG0 0x12
+#define FJ_CONFIG1 0x13
+#define FJ_MACADDR 0x14 /* 0x14 - 0x19 */
+#define FJ_BUFCNTL 0x1A
+#define FJ_BUFDATA 0x1C
+#define FMV18X_IO_EXTENT 32
+
+/* Index to functions, as function prototypes. */
+
+extern int fmv18x_probe(struct device *dev);
+
+static int fmv18x_probe1(struct device *dev, short ioaddr);
+static int net_open(struct device *dev);
+static int net_send_packet(struct sk_buff *skb, struct device *dev);
+static void net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void net_rx(struct device *dev);
+static int net_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry fmv18x_drv =
+{"fmv18x", fmv18x_probe1, FMV18X_IO_EXTENT, fmv18x_probe_list};
+#else
+int
+fmv18x_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return fmv18x_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; fmv18x_probe_list[i]; i++) {
+ int ioaddr = fmv18x_probe_list[i];
+ if (check_region(ioaddr, FMV18X_IO_EXTENT))
+ continue;
+ if (fmv18x_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
+ "signature", the default bit pattern after a reset. This *doesn't* work --
+ there is no way to reset the bus interface without a complete power-cycle!
+
+ It turns out that ATI came to the same conclusion I did: the only thing
+ that can be done is checking a few bits and then diving right into MAC
+ address check. */
+
+int fmv18x_probe1(struct device *dev, short ioaddr)
+{
+ char irqmap[4] = {3, 7, 10, 15};
+ unsigned int i, irq;
+
+ /* Resetting the chip doesn't reset the ISA interface, so don't bother.
+ That means we have to be careful with the register values we probe for.
+ */
+
+ /* Check I/O address configuration and Fujitsu vendor code */
+ if (fmv18x_probe_list[inb(ioaddr + FJ_CONFIG0) & 0x07] != ioaddr
+ || inb(ioaddr+FJ_MACADDR ) != 0x00
+ || inb(ioaddr+FJ_MACADDR+1) != 0x00
+ || inb(ioaddr+FJ_MACADDR+2) != 0x0e)
+ return -ENODEV;
+
+ irq = irqmap[(inb(ioaddr + FJ_CONFIG0)>>6) & 0x03];
+
+ /* Snarf the interrupt vector now. */
+ if (request_irq(irq, &net_interrupt, 0, "fmv18x", NULL)) {
+ printk ("FMV-18x found at %#3x, but it's unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, irq);
+ return EAGAIN;
+ }
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ /* Grab the region so that we can find another board if the IRQ request
+ fails. */
+ request_region(ioaddr, FMV18X_IO_EXTENT, "fmv18x");
+
+ printk("%s: FMV-18x found at %#3x, IRQ %d, address ", dev->name,
+ ioaddr, irq);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ irq2dev_map[irq] = dev;
+
+ for(i = 0; i < 6; i++) {
+ unsigned char val = inb(ioaddr + FJ_MACADDR + i);
+ printk("%02x", val);
+ dev->dev_addr[i] = val;
+ }
+
+ /* "FJ_STATUS0" 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
+ rather than 150 ohm shielded twisted pair compensation.
+ 0x0000 == auto-sense the interface
+ 0x0800 == use TP interface
+ 0x1800 == use coax interface
+ */
+ {
+ const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2/5"};
+ ushort setup_value = inb(ioaddr + FJ_STATUS0);
+
+ switch( setup_value & 0x07 ){
+ case 0x01 /* 10base5 */:
+ case 0x02 /* 10base2 */: dev->if_port = 0x18; break;
+ case 0x04 /* 10baseT */: dev->if_port = 0x08; break;
+ default /* auto-sense*/: dev->if_port = 0x00; break;
+ }
+ printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
+ }
+
+ /* Initialize LAN Controller and LAN Card */
+ outb(0xda, ioaddr + CONFIG_0); /* Initialize LAN Controller */
+ outb(0x00, ioaddr + CONFIG_1); /* Stand by mode */
+ outb(0x00, ioaddr + FJ_CONFIG1); /* Disable IRQ of LAN Card */
+ outb(0x00, ioaddr + FJ_BUFCNTL); /* Reset ? I'm not sure (TAMIYA) */
+
+ /* wait for a while */
+ udelay(200);
+
+ /* Set the station address in bank zero. */
+ outb(0x00, ioaddr + CONFIG_1);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + 8 + i);
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0x04, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + 8 + i);
+
+ /* Switch to bank 2 and lock our I/O address. */
+ outb(0x08, ioaddr + CONFIG_1);
+ outb(dev->if_port, ioaddr + MODE13);
+
+ if (net_debug)
+ printk("%s", version);
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the fields of 'dev' with ethernet-generic values. */
+
+ ether_setup(dev);
+ return 0;
+}
+
+
+static int net_open(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory,
+ 16 bit bus access, and two 4K Tx, enable the Rx and Tx. */
+ outb(0x5a, ioaddr + CONFIG_0);
+
+ /* Powerup and switch to register bank 2 for the run-time registers. */
+ outb(0xe8, ioaddr + CONFIG_1);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Clear Tx and Rx Status */
+ outb(0xff, ioaddr + TX_STATUS);
+ outb(0xff, ioaddr + RX_STATUS);
+ lp->open_time = jiffies;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ /* Enable the IRQ of the LAN Card */
+ outb(0x80, ioaddr + FJ_CONFIG1);
+
+ /* Enable both Tx and Rx interrupts */
+ outw(0x8182, ioaddr+TX_INTR);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int
+net_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 10)
+ return 1;
+ printk("%s: transmit timed out with status %04x, %s?\n", dev->name,
+ htons(inw(ioaddr + TX_STATUS)),
+ inb(ioaddr + TX_STATUS) & 0x80
+ ? "IRQ conflict" : "network cable problem");
+ printk("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
+ dev->name, htons(inw(ioaddr + 0)),
+ htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)),
+ htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)),
+ htons(inw(ioaddr +10)), htons(inw(ioaddr +12)),
+ htons(inw(ioaddr +14)));
+ printk("eth card: %04x %04x\n",
+ htons(inw(ioaddr+FJ_STATUS0)),
+ htons(inw(ioaddr+FJ_CONFIG0)));
+ lp->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ cli();
+
+ /* Initialize LAN Controller and LAN Card */
+ outb(0xda, ioaddr + CONFIG_0); /* Initialize LAN Controller */
+ outb(0x00, ioaddr + CONFIG_1); /* Stand by mode */
+ outb(0x00, ioaddr + FJ_CONFIG1); /* Disable IRQ of LAN Card */
+ outb(0x00, ioaddr + FJ_BUFCNTL); /* Reset ? I'm not sure */
+ net_open(dev);
+
+ sti();
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ if (length > ETH_FRAME_LEN) {
+ if (net_debug)
+ printk("%s: Attempting to send a large packet (%d bytes).\n",
+ dev->name, length);
+ return 1;
+ }
+
+ if (net_debug > 4)
+ printk("%s: Transmitting a packet of length %lu.\n", dev->name,
+ (unsigned long)skb->len);
+
+ /* Disable both interrupts. */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ outw(length, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ dev->tbusy = 0;
+ } else if (lp->tx_queue_len < 4096 - 1502)
+ /* Yes, there is room for one more packet. */
+ dev->tbusy = 0;
+
+ /* Re-enable interrupts */
+ outw(0x8182, ioaddr + TX_INTR);
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status;
+
+ if (dev == NULL) {
+ printk ("fmv18x_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ /* Avoid multiple interrupts. */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ status = inw(ioaddr + TX_STATUS);
+ outw(status, ioaddr + TX_STATUS);
+
+ if (net_debug > 4)
+ printk("%s: Interrupt with status %04x.\n", dev->name, status);
+ if (status & 0xff00
+ || (inb(ioaddr + RX_MODE) & 0x40) == 0) { /* Got a packet(s). */
+ net_rx(dev);
+ }
+ if (status & 0x00ff) {
+ if (status & 0x80) {
+ lp->stats.tx_packets++;
+ if (lp->tx_queue) {
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ } else {
+ lp->tx_started = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ }
+ if (status & 0x02 ) {
+ if (net_debug > 4)
+ printk("%s: 16 Collision occur during Txing.\n", dev->name);
+ /* Retry to send the packet */
+ outb(0x02, ioaddr + COL16CNTL);
+ }
+ }
+
+ dev->interrupt = 0;
+ outw(0x8182, ioaddr + TX_INTR);
+ return;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = 10; /* 5 -> 10: by agy 19940922 */
+
+ while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
+ /* Clear PKT_RDY bit: by agy 19940922 */
+ /* outb(0x80, ioaddr + RX_STATUS); */
+ ushort status = inw(ioaddr + DATAPORT);
+
+ if (net_debug > 4)
+ printk("%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(0x05, ioaddr + 14);
+ break;
+ }
+#endif
+
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x08) lp->stats.rx_length_errors++;
+ if (status & 0x04) lp->stats.rx_frame_errors++;
+ if (status & 0x02) lp->stats.rx_crc_errors++;
+ if (status & 0x01) lp->stats.rx_over_errors++;
+ } else {
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk("%s: The FMV-18x claimed a very large packet, size %d.\n",
+ dev->name, pkt_len);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+3);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet (len %d).\n",
+ dev->name, pkt_len);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
+
+ if (net_debug > 5) {
+ int i;
+ printk("%s: Rxed packet of length %d: ", dev->name, pkt_len);
+ for (i = 0; i < 14; i++)
+ printk(" %02x", skb->data[i]);
+ printk(".\n");
+ }
+
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
+ break;
+ (void)inw(ioaddr + DATAPORT); /* dummy status read */
+ outb(0x05, ioaddr + 14);
+ }
+
+ if (net_debug > 5 && i > 0)
+ printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
+ dev->name, inb(ioaddr + RX_MODE), i);
+ }
+
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int net_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ ((struct net_local *)dev->priv)->open_time = 0;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Update the statistics -- ToDo. */
+
+ /* Power-down the chip. Green, green, green! */
+ outb(0x00, ioaddr + CONFIG_1);
+
+ MOD_DEC_USE_COUNT;
+
+ /* Set the ethernet adaptor disable IRQ */
+ outb(0x00, ioaddr + FJ_CONFIG1);
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ cli();
+ /* ToDo: Update the statistics from the device registers. */
+ sti();
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void
+set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ if (dev->mc_count || dev->flags&(IFF_PROMISC|IFF_ALLMULTI))
+ {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ }
+ else
+ outb(2, ioaddr + RX_MODE); /* Disable promiscuous, use normal mode */
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_fmv18x = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, fmv18x_probe };
+
+static int io = 0x220;
+static int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("fmv18x: You should not use auto-probing with insmod!\n");
+ dev_fmv18x.base_addr = io;
+ dev_fmv18x.irq = irq;
+ if (register_netdev(&dev_fmv18x) != 0) {
+ printk("fmv18x: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_fmv18x);
+ kfree(dev_fmv18x.priv);
+ dev_fmv18x.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ free_irq(dev_fmv18x.irq, NULL);
+ irq2dev_map[dev_fmv18x.irq] = NULL;
+ release_region(dev_fmv18x.base_addr, FMV18X_IO_EXTENT);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c fmv18x.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/hamachi.c b/linux/src/drivers/net/hamachi.c
new file mode 100644
index 0000000..fdcf43d
--- /dev/null
+++ b/linux/src/drivers/net/hamachi.c
@@ -0,0 +1,1315 @@
+/* hamachi.c: A Packet Engines GNIC-II Gigabit Ethernet driver for Linux. */
+/*
+ Written 1998-2002 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This driver is for the Packet Engines GNIC-II PCI Gigabit Ethernet
+ adapter.
+
+ Support and updates available at
+ http://www.scyld.com/network/hamachi.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"hamachi.c:v1.04 11/17/2002 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/hamachi.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: hamachi_probe
+config-in: tristate 'Packet Engines "Hamachi" PCI Gigabit Ethernet support' CONFIG_HAMACHI
+c-help-name: Packet Engines "Hamachi" PCI Gigabit Ethernet support
+c-help-symbol: CONFIG_HAMACHI
+c-help: This driver is for the Packet Engines "Hamachi" GNIC-2 Gigabit Ethernet
+c-help: adapter.
+c-help: Usage information and updates are available from
+c-help: http://www.scyld.com/network/hamachi.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 40;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Hamachi has a 64 element perfect filter. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* A override for the hardware detection of bus width.
+ Set to 1 to force 32 bit PCI bus detection. Set to 4 to force 64 bit.
+ Add 2 to disable parity detection.
+*/
+static int force32 = 0;
+
+/* Used to pass the media type, etc.
+ These exist for driver interoperability.
+ Only 1 Gigabit is supported by the chip.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 64
+#define TX_QUEUE_LEN 60 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 128
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#if ADDRLEN == 64
+#define virt_to_desc(addr) cpu_to_le64(virt_to_bus(addr))
+#else
+#define virt_to_desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Packet Engines 'Hamachi' GNIC-II Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(force32, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to force full duplex, non-negotiated link "
+ "(unused, deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+MODULE_PARM_DESC(force32, "Set to 1 to force 32 bit PCI bus use.");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Packet Engines "Hamachi"
+Gigabit Ethernet chip. The only PCA currently supported is the GNIC-II 64-bit
+66Mhz PCI card.
+
+II. Board-specific settings
+
+No jumpers exist on the board. The chip supports software correction of
+various motherboard wiring errors, however this driver does not support
+that feature.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Hamachi uses a typical descriptor based bus-master architecture.
+The descriptor list is similar to that used by the Digital Tulip.
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+This driver uses a zero-copy receive and transmit scheme similar my other
+network drivers.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the Hamachi as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack and replaced by a newly allocated skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. Gigabit cards are typically used on generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.
+
+IIIb/c. Transmit/Receive Structure
+
+The Rx and Tx descriptor structure are straight-forward, with no historical
+baggage that must be explained. Unlike the awkward DBDMA structure, there
+are no unused fields or option bits that had only one allowable setting.
+
+Two details should be noted about the descriptors: The chip supports both 32
+bit and 64 bit address structures, and the length field is overwritten on
+the receive descriptors. The descriptor length is set in the control word
+for each channel. The development driver uses 32 bit addresses only, however
+64 bit addresses may be enabled for 64 bit architectures e.g. the Alpha.
+
+IIId. Synchronization
+
+This driver is very similar to my other network drivers.
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'hmp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Kim Stearns of Packet Engines for providing a pair of GNIC-II boards.
+
+IVb. References
+
+Hamachi Engineering Design Specification, 5/15/97
+(Note: This version was marked "Confidential".)
+
+IVc. Errata
+
+None noted.
+*/
+
+
+/* The table for PCI detection and activation. */
+
+static void *hamachi_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+enum chip_capability_flags { CanHaveMII=1, };
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Packet Engines GNIC-II \"Hamachi\"", { 0x09111318, 0xffffffff,},
+ PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR0 | PCI_ADDR_64BITS, 0x400, 0, },
+ { 0,},
+};
+
+struct drv_id_info hamachi_drv_id = {
+ "hamachi", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ hamachi_probe1, 0,
+};
+
+/* Offsets to the Hamachi registers. Various sizes. */
+enum hamachi_offsets {
+ TxDMACtrl=0x00, TxCmd=0x04, TxStatus=0x06, TxPtr=0x08, TxCurPtr=0x10,
+ RxDMACtrl=0x20, RxCmd=0x24, RxStatus=0x26, RxPtr=0x28, RxCurPtr=0x30,
+ PCIClkMeas=0x060, MiscStatus=0x066, ChipRev=0x68, ChipReset=0x06B,
+ LEDCtrl=0x06C, VirtualJumpers=0x06D,
+ TxChecksum=0x074, RxChecksum=0x076,
+ TxIntrCtrl=0x078, RxIntrCtrl=0x07C,
+ InterruptEnable=0x080, InterruptClear=0x084, IntrStatus=0x088,
+ EventStatus=0x08C,
+ MACCnfg=0x0A0, FrameGap0=0x0A2, FrameGap1=0x0A4,
+ /* See enum MII_offsets below. */
+ MACCnfg2=0x0B0, RxDepth=0x0B8, FlowCtrl=0x0BC, MaxFrameSize=0x0CE,
+ AddrMode=0x0D0, StationAddr=0x0D2,
+ /* Gigabit AutoNegotiation. */
+ ANCtrl=0x0E0, ANStatus=0x0E2, ANXchngCtrl=0x0E4, ANAdvertise=0x0E8,
+ ANLinkPartnerAbility=0x0EA,
+ EECmdStatus=0x0F0, EEData=0x0F1, EEAddr=0x0F2,
+ FIFOcfg=0x0F8,
+};
+
+/* Offsets to the MII-mode registers. */
+enum MII_offsets {
+ MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
+ MII_Status=0xAE,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x01, IntrRxPCIFault=0x02, IntrRxPCIErr=0x04,
+ IntrTxDone=0x100, IntrTxPCIFault=0x200, IntrTxPCIErr=0x400,
+ LinkChange=0x10000, NegotiationChange=0x20000, StatsMax=0x40000, };
+
+/* The Hamachi Rx and Tx buffer descriptors. */
+struct hamachi_desc {
+ u32 status_n_length;
+#if ADDRLEN == 64
+ u32 pad;
+ u64 addr;
+#else
+ u32 addr;
+#endif
+};
+
+/* Bits in hamachi_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
+ DescIntr=0x10000000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct hamachi_private {
+ /* Descriptor rings first for alignment. Tx requires a second descriptor
+ for status. */
+ struct hamachi_desc rx_ring[RX_RING_SIZE];
+ struct hamachi_desc tx_ring[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+
+ /* Frequently used and paired value: keep adjacent for cache effect. */
+ int msg_level;
+ int max_interrupt_work;
+ long in_interrupt;
+
+ struct hamachi_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+ int multicast_filter_limit;
+ int rx_mode;
+
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+static int read_eeprom(struct net_device *dev, int location);
+static int mdio_read(long ioaddr, int phy_id, int location);
+static void mdio_write(long ioaddr, int phy_id, int location, int value);
+static int hamachi_open(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#ifdef HAVE_CHANGE_MTU
+static int change_mtu(struct net_device *dev, int new_mtu);
+#endif
+static void hamachi_timer(unsigned long data);
+static void hamachi_tx_timeout(struct net_device *dev);
+static void hamachi_init_ring(struct net_device *dev);
+static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void hamachi_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int hamachi_rx(struct net_device *dev);
+static void hamachi_error(struct net_device *dev, int intr_status);
+static int hamachi_close(struct net_device *dev);
+static struct net_device_stats *hamachi_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_hamachi_dev = NULL;
+
+#ifndef MODULE
+int hamachi_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&hamachi_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *hamachi_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct hamachi_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s type %x at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, (int)readl(ioaddr + ChipRev),
+ ioaddr);
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = read_eeprom(dev, 4 + i);
+ /* Alternate: readb(ioaddr + StationAddr + i); */
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ i = readb(ioaddr + PCIClkMeas);
+ printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers "
+ "%2.2x, LPA %4.4x.\n",
+ dev->name, readw(ioaddr + MiscStatus) & 1 ? 64 : 32,
+ i ? 2000/(i&0x7f) : 0, i&0x7f, (int)readb(ioaddr + VirtualJumpers),
+ (int)readw(ioaddr + ANLinkPartnerAbility));
+
+ /* Hmmm, do we really need to reset the chip???. */
+ writeb(1, ioaddr + ChipReset);
+
+ /* If the bus size is misidentified, do the following. */
+ if (force32)
+ writeb(force32, ioaddr + VirtualJumpers);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_hamachi_dev;
+ root_hamachi_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit =
+ multicast_filter_limit < 64 ? multicast_filter_limit : 64;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x2220)
+ np->full_duplex = 1;
+ np->default_port = option & 15;
+ if (np->default_port & 0x3330)
+ np->medialock = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+
+ /* The Hamachi-specific entries in the device structure. */
+ dev->open = &hamachi_open;
+ dev->hard_start_xmit = &hamachi_start_xmit;
+ dev->stop = &hamachi_close;
+ dev->get_stats = &hamachi_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+#ifdef HAVE_CHANGE_MTU
+ dev->change_mtu = change_mtu;
+#endif
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(ioaddr, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(ioaddr, phy, 4);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+#ifdef notyet
+ /* Disable PCI Parity Error (0x02) or PCI 64 Bit (0x01) for miswired
+ motherboards. */
+ if (readb(ioaddr + VirtualJumpers) != 0x30)
+ writeb(0x33, ioaddr + VirtualJumpers)
+#endif
+ /* Configure gigabit autonegotiation. */
+ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
+ writew(0x08e0, ioaddr + ANAdvertise); /* Set our advertise word. */
+ writew(0x1000, ioaddr + ANCtrl); /* Enable negotiation */
+
+ return dev;
+}
+
+static int read_eeprom(struct net_device *dev, int location)
+{
+ struct hamachi_private *np = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int bogus_cnt = 1000;
+
+ writew(location, ioaddr + EEAddr);
+ writeb(0x02, ioaddr + EECmdStatus);
+ while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0)
+ ;
+ if (np->msg_level & NETIF_MSG_MISC)
+ printk(KERN_DEBUG " EEPROM status is %2.2x after %d ticks.\n",
+ (int)readb(ioaddr + EECmdStatus), 1000- bogus_cnt);
+ return readb(ioaddr + EEData);
+}
+
+/* MII Managemen Data I/O accesses.
+ These routines assume the MDIO controller is idle, and do not exit until
+ the command is finished. */
+
+static int mdio_read(long ioaddr, int phy_id, int location)
+{
+ int i;
+
+ writew((phy_id<<8) + location, ioaddr + MII_Addr);
+ writew(1, ioaddr + MII_Cmd);
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return readw(ioaddr + MII_Rd_Data);
+}
+
+static void mdio_write(long ioaddr, int phy_id, int location, int value)
+{
+ int i;
+
+ writew((phy_id<<8) + location, ioaddr + MII_Addr);
+ writew(value, ioaddr + MII_Wr_Data);
+
+ /* Wait for the command to finish. */
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return;
+}
+
+
+static int hamachi_open(struct net_device *dev)
+{
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Do we need to reset the chip??? */
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &hamachi_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (hmp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ hamachi_init_ring(dev);
+
+#if ADDRLEN == 64
+ writel(virt_to_bus(hmp->rx_ring), ioaddr + RxPtr);
+ writel(virt_to_bus(hmp->rx_ring) >> 32, ioaddr + RxPtr + 4);
+ writel(virt_to_bus(hmp->tx_ring), ioaddr + TxPtr);
+ writel(virt_to_bus(hmp->tx_ring) >> 32, ioaddr + TxPtr + 4);
+#else
+ writel(virt_to_bus(hmp->rx_ring), ioaddr + RxPtr);
+ writel(virt_to_bus(hmp->tx_ring), ioaddr + TxPtr);
+#endif
+
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers: with so many this eventually this will
+ converted to an offset/value list. */
+ /* Configure the FIFO for 512K external, 16K used for Tx. */
+ writew(0x0028, ioaddr + FIFOcfg);
+
+ if (dev->if_port == 0)
+ dev->if_port = hmp->default_port;
+ hmp->in_interrupt = 0;
+
+ /* Setting the Rx mode will start the Rx process. */
+ /* We are always in full-duplex mode with gigabit! */
+ hmp->full_duplex = 1;
+ writew(0x0001, ioaddr + RxChecksum); /* Enable Rx IP partial checksum. */
+ writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
+ writew(0x215F, ioaddr + MACCnfg);
+ writew(0x000C, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
+ writew(0x1018, ioaddr + FrameGap1);
+ writew(0x2780, ioaddr + MACCnfg2); /* Upper 16 bits control LEDs. */
+ /* Enable automatic generation of flow control frames, period 0xffff. */
+ writel(0x0030FFFF, ioaddr + FlowCtrl);
+ writew(dev->mtu+19, ioaddr + MaxFrameSize); /* hmp->rx_buf_sz ??? */
+
+ /* Enable legacy links. */
+ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
+ /* Initial Link LED to blinking red. */
+ writeb(0x03, ioaddr + LEDCtrl);
+
+ /* Configure interrupt mitigation. This has a great effect on
+ performance, so systems tuning should start here!. */
+ writel(0x00080000, ioaddr + TxIntrCtrl);
+ writel(0x00000020, ioaddr + RxIntrCtrl);
+
+ hmp->rx_mode = 0; /* Force Rx mode write. */
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writel(0x80878787, ioaddr + InterruptEnable);
+ writew(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
+
+ /* Configure and start the DMA channels. */
+ /* Burst sizes are in the low three bits: size = 4<<(val&7) */
+#if ADDRLEN == 64
+ writew(0x0055, ioaddr + RxDMACtrl); /* 128 dword bursts */
+ writew(0x0055, ioaddr + TxDMACtrl);
+#else
+ writew(0x0015, ioaddr + RxDMACtrl);
+ writew(0x0015, ioaddr + TxDMACtrl);
+#endif
+ writew(1, dev->base_addr + RxCmd);
+
+ if (hmp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done hamachi_open(), status: Rx %x Tx %x.\n",
+ dev->name, (int)readw(ioaddr + RxStatus),
+ (int)readw(ioaddr + TxStatus));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&hmp->timer);
+ hmp->timer.expires = jiffies + 3*HZ;
+ hmp->timer.data = (unsigned long)dev;
+ hmp->timer.function = &hamachi_timer; /* timer handler */
+ add_timer(&hmp->timer);
+
+ return 0;
+}
+
+static void hamachi_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (hmp->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_INFO "%s: Hamachi Autonegotiation status %4.4x, LPA "
+ "%4.4x.\n", dev->name, (int)readw(ioaddr + ANStatus),
+ (int)readw(ioaddr + ANLinkPartnerAbility));
+ printk(KERN_INFO "%s: Autonegotiation regs %4.4x %4.4x %4.4x "
+ "%4.4x %4.4x %4.4x.\n", dev->name,
+ (int)readw(ioaddr + 0x0e0),
+ (int)readw(ioaddr + 0x0e2),
+ (int)readw(ioaddr + 0x0e4),
+ (int)readw(ioaddr + 0x0e6),
+ (int)readw(ioaddr + 0x0e8),
+ (int)readw(ioaddr + 0x0eA));
+ }
+ /* This has a small false-trigger window. */
+ if (netif_queue_paused(dev) &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT
+ && hmp->cur_tx - hmp->dirty_tx > 1) {
+ hamachi_tx_timeout(dev);
+ }
+ /* We could do something here... nah. */
+ hmp->timer.expires = jiffies + next_tick;
+ add_timer(&hmp->timer);
+}
+
+static void hamachi_tx_timeout(struct net_device *dev)
+{
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Hamachi transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
+
+ if (hmp->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)hmp->rx_ring[i].status_n_length);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", hmp->tx_ring[i].status_n_length);
+ printk("\n");
+ }
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+ writew(2, dev->base_addr + TxCmd);
+ writew(1, dev->base_addr + TxCmd);
+ writew(1, dev->base_addr + RxCmd);
+
+ dev->trans_start = jiffies;
+ hmp->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void hamachi_init_ring(struct net_device *dev)
+{
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ int i;
+
+ hmp->tx_full = 0;
+ hmp->cur_rx = hmp->cur_tx = 0;
+ hmp->dirty_rx = hmp->dirty_tx = 0;
+
+ /* Size of each temporary Rx buffer. Add 8 if you do Rx checksumming! */
+ hmp->rx_buf_sz = dev->mtu + 18 + 8;
+ /* Match other driver's allocation size when possible. */
+ if (hmp->rx_buf_sz < PKT_BUF_SZ)
+ hmp->rx_buf_sz = PKT_BUF_SZ;
+ hmp->rx_head_desc = &hmp->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ hmp->rx_ring[i].status_n_length = 0;
+ hmp->rx_skbuff[i] = 0;
+ }
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
+ hmp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ hmp->rx_ring[i].addr = virt_to_desc(skb->tail);
+ hmp->rx_ring[i].status_n_length =
+ cpu_to_le32(DescOwn | DescEndPacket | DescIntr | hmp->rx_buf_sz);
+ }
+ hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+ /* Mark the last entry as wrapping the ring. */
+ hmp->rx_ring[i-1].status_n_length |= cpu_to_le32(DescEndRing);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ hmp->tx_skbuff[i] = 0;
+ hmp->tx_ring[i].status_n_length = 0;
+ }
+ return;
+}
+
+static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ hamachi_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = hmp->cur_tx % TX_RING_SIZE;
+
+ hmp->tx_skbuff[entry] = skb;
+
+ hmp->tx_ring[entry].addr = virt_to_desc(skb->data);
+ if (entry >= TX_RING_SIZE-1) /* Wrap ring */
+ hmp->tx_ring[entry].status_n_length =
+ cpu_to_le32(DescOwn|DescEndPacket|DescEndRing|DescIntr | skb->len);
+ else
+ hmp->tx_ring[entry].status_n_length =
+ cpu_to_le32(DescOwn|DescEndPacket | skb->len);
+ hmp->cur_tx++;
+
+ /* Architecture-specific: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ writew(1, dev->base_addr + TxCmd);
+
+ if (hmp->cur_tx - hmp->dirty_tx >= TX_QUEUE_LEN - 1) {
+ hmp->tx_full = 1;
+ if (hmp->cur_tx - hmp->dirty_tx < TX_QUEUE_LEN - 1) {
+ netif_unpause_tx_queue(dev);
+ hmp->tx_full = 0;
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ dev->trans_start = jiffies;
+
+ if (hmp->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Hamachi transmit frame #%d length %d queued "
+ "in slot %d.\n", dev->name, hmp->cur_tx, (int)skb->len, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void hamachi_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct hamachi_private *hmp;
+ long ioaddr;
+ int boguscnt = max_interrupt_work;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "hamachi_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ hmp = (struct hamachi_private *)dev->priv;
+ if (test_and_set_bit(0, (void*)&hmp->in_interrupt)) {
+ printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+ hmp->in_interrupt = 0; /* Avoid future hang on bug */
+ return;
+ }
+
+ do {
+ u32 intr_status = readl(ioaddr + InterruptClear);
+
+ if (hmp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Hamachi interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ if (intr_status & IntrRxDone)
+ hamachi_rx(dev);
+
+ for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) {
+ int entry = hmp->dirty_tx % TX_RING_SIZE;
+ if (!(hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn)))
+ break;
+ if (hmp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, hmp->tx_ring[entry].status_n_length);
+ /* Free the original skb. */
+ dev_free_skb_irq(hmp->tx_skbuff[entry]);
+ hmp->tx_skbuff[entry] = 0;
+ hmp->stats.tx_packets++;
+ }
+ if (hmp->tx_full
+ && hmp->cur_tx - hmp->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, clear tbusy. */
+ hmp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status &
+ (IntrTxPCIFault | IntrTxPCIErr | IntrRxPCIFault | IntrRxPCIErr |
+ LinkChange | NegotiationChange | StatsMax))
+ hamachi_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (hmp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+ clear_bit(0, (void*)&hmp->in_interrupt);
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int hamachi_rx(struct net_device *dev)
+{
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ int entry = hmp->cur_rx % RX_RING_SIZE;
+ int boguscnt = hmp->dirty_rx + RX_RING_SIZE - hmp->cur_rx;
+
+ if (hmp->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In hamachi_rx(), entry %d status %4.4x.\n",
+ entry, hmp->rx_ring[entry].status_n_length);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ( ! (hmp->rx_head_desc->status_n_length & cpu_to_le32(DescOwn))) {
+ struct hamachi_desc *desc = hmp->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->status_n_length);
+ u16 data_size = desc_status; /* Implicit truncate */
+ u8 *buf_addr = hmp->rx_skbuff[entry]->tail;
+ s32 frame_status =
+ le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12])));
+
+ if (hmp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
+ frame_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & DescEndPacket)) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %4.4x!\n",
+ dev->name, hmp->cur_rx, data_size, desc_status);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
+ dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status"
+ " %x last status %x.\n", dev->name,
+ hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length,
+ hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length);
+ hmp->stats.rx_length_errors++;
+ } /* else Omit for prototype errata??? */
+ if (frame_status & 0x00380000) {
+ /* There was a error. */
+ if (hmp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n",
+ frame_status);
+ hmp->stats.rx_errors++;
+ if (frame_status & 0x00600000) hmp->stats.rx_length_errors++;
+ if (frame_status & 0x00080000) hmp->stats.rx_frame_errors++;
+ if (frame_status & 0x00100000) hmp->stats.rx_crc_errors++;
+ if (frame_status < 0) hmp->stats.rx_dropped++;
+ } else {
+ struct sk_buff *skb;
+ u16 pkt_len = (frame_status & 0x07ff) - 4; /* Omit CRC */
+
+#if ! defined(final_version) && 0
+ if (hmp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " hamachi_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, data_size, boguscnt);
+ if (hmp->msg_level & NETIF_MSG_PKTDATA)
+ printk(KERN_DEBUG"%s: rx status %8.8x %8.8x %8.8x %8.8x %8.8x.\n",
+ dev->name,
+ *(s32*)&(buf_addr[data_size - 20]),
+ *(s32*)&(buf_addr[data_size - 16]),
+ *(s32*)&(buf_addr[data_size - 12]),
+ *(s32*)&(buf_addr[data_size - 8]),
+ *(s32*)&(buf_addr[data_size - 4]));
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ eth_copy_and_sum(skb, hmp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ } else {
+ char *temp = skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
+ hmp->rx_skbuff[entry] = NULL;
+#if ! defined(final_version)
+ if (bus_to_virt(desc->addr) != temp)
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in hamachi_rx: %p vs. %p / %p.\n",
+ dev->name, bus_to_virt(desc->addr),
+ skb->head, temp);
+#endif
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ hmp->stats.rx_packets++;
+ }
+ entry = (++hmp->cur_rx) % RX_RING_SIZE;
+ hmp->rx_head_desc = &hmp->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = hmp->dirty_rx % RX_RING_SIZE;
+ if (hmp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(hmp->rx_buf_sz);
+ hmp->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ hmp->rx_ring[entry].addr = virt_to_desc(skb->tail);
+ }
+ if (entry >= RX_RING_SIZE-1) /* Wrap ring */
+ hmp->rx_ring[entry].status_n_length =
+ cpu_to_le32(DescOwn|DescEndPacket|DescEndRing|DescIntr | hmp->rx_buf_sz);
+ else
+ hmp->rx_ring[entry].status_n_length =
+ cpu_to_le32(DescOwn|DescEndPacket|DescIntr | hmp->rx_buf_sz);
+ }
+
+ /* Restart Rx engine if stopped. */
+ writew(1, dev->base_addr + RxCmd);
+ return 0;
+}
+
+/* This is more properly named "uncommon interrupt events", as it covers more
+ than just errors. */
+static void hamachi_error(struct net_device *dev, int intr_status)
+{
+ long ioaddr = dev->base_addr;
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+
+ if (intr_status & (LinkChange|NegotiationChange)) {
+ if (hmp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Link changed: AutoNegotiation Ctrl"
+ " %4.4x, Status %4.4x %4.4x Intr status %4.4x.\n",
+ dev->name, (int)readw(ioaddr + 0x0E0),
+ (int)readw(ioaddr + 0x0E2),
+ (int)readw(ioaddr + ANLinkPartnerAbility),
+ (int)readl(ioaddr + IntrStatus));
+ if (readw(ioaddr + ANStatus) & 0x20) {
+ writeb(0x01, ioaddr + LEDCtrl);
+ netif_link_up(dev);
+ } else {
+ writeb(0x03, ioaddr + LEDCtrl);
+ netif_link_down(dev);
+ }
+ }
+ if (intr_status & StatsMax) {
+ hamachi_get_stats(dev);
+ /* Read the overflow bits to clear. */
+ readl(ioaddr + 0x36C);
+ readl(ioaddr + 0x3F0);
+ }
+ if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange))
+ && (hmp->msg_level & NETIF_MSG_DRV))
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
+ hmp->stats.tx_fifo_errors++;
+ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
+ hmp->stats.rx_fifo_errors++;
+}
+
+static int hamachi_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (hmp->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, (int)readw(ioaddr + TxStatus),
+ (int)readw(ioaddr + RxStatus), (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx,
+ hmp->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0x0000, ioaddr + InterruptEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(2, ioaddr + RxCmd);
+ writew(2, ioaddr + TxCmd);
+
+ del_timer(&hmp->timer);
+
+#ifdef __i386__
+ if (hmp->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(hmp->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %c #%d desc. %8.8x %8.8x.\n",
+ readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
+ i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(hmp->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x\n",
+ readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
+ i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
+ if (*(u8*)hmp->rx_ring[i].addr != 0x69) {
+ int j;
+ for (j = 0; j < 0x50; j++)
+ printk(" %4.4x", ((u16*)hmp->rx_ring[i].addr)[j]);
+ printk("\n");
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ hmp->rx_ring[i].status_n_length = 0;
+ hmp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (hmp->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ hmp->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(hmp->rx_skbuff[i]);
+ }
+ hmp->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (hmp->tx_skbuff[i])
+ dev_free_skb(hmp->tx_skbuff[i]);
+ hmp->tx_skbuff[i] = 0;
+ }
+
+ writeb(0x00, ioaddr + LEDCtrl);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct net_device_stats *hamachi_get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+
+ /* We should lock this segment of code for SMP eventually, although
+ the vulnerability window is very small and statistics are
+ non-critical. */
+#if LINUX_VERSION_CODE >= 0x20119
+ hmp->stats.rx_bytes += readl(ioaddr + 0x330); /* Total Uni+Brd+Multi */
+ hmp->stats.tx_bytes += readl(ioaddr + 0x3B0); /* Total Uni+Brd+Multi */
+#endif
+ hmp->stats.multicast += readl(ioaddr + 0x320); /* Multicast Rx */
+
+ hmp->stats.rx_length_errors += readl(ioaddr + 0x368); /* Over+Undersized */
+ hmp->stats.rx_over_errors += readl(ioaddr + 0x35C); /* Jabber */
+ hmp->stats.rx_crc_errors += readl(ioaddr + 0x360);
+ hmp->stats.rx_frame_errors += readl(ioaddr + 0x364); /* Symbol Errs */
+ hmp->stats.rx_missed_errors += readl(ioaddr + 0x36C); /* Dropped */
+
+ return &hmp->stats;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct hamachi_private *np = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int new_rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ new_rx_mode = 0x000F;
+ } else if (dev->mc_count > np->multicast_filter_limit ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ new_rx_mode = 0x000B;
+ } else if (dev->mc_count > 0) { /* Must use the CAM filter. */
+ struct dev_mc_list *mclist;
+ int i;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
+ writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
+ ioaddr + 0x104 + i*8);
+ }
+ /* Clear remaining entries. */
+ for (; i < 64; i++)
+ writel(0, ioaddr + 0x104 + i*8);
+ new_rx_mode = 0x0003;
+ } else { /* Normal, unicast/broadcast-only mode. */
+ new_rx_mode = 0x0001;
+ }
+ if (np->rx_mode != new_rx_mode) {
+ np->rx_mode = new_rx_mode;
+ writew(new_rx_mode, ioaddr + AddrMode);
+ }
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct hamachi_private *np = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* We are always full duplex. Skip recording the advertised value. */
+ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS: {
+ /* Set rx,tx intr params, from Eric Kasten. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->max_interrupt_work = data32[2];
+ writel(data32[1], dev->base_addr + TxIntrCtrl);
+ writel(data32[3], dev->base_addr + RxIntrCtrl);
+ printk(KERN_INFO "%s: Set interrupt mitigate paramters tx %08x, "
+ "rx %08x.\n", dev->name,
+ (int) readl(dev->base_addr + TxIntrCtrl),
+ (int) readl(dev->base_addr + RxIntrCtrl));
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#ifdef HAVE_CHANGE_MTU
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1536))
+ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+ printk(KERN_NOTICE "%s: Changing MTU to %d.\n", dev->name, new_mtu);
+ dev->mtu = new_mtu;
+ return 0;
+}
+#endif
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&hamachi_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&hamachi_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_hamachi_dev) {
+ struct hamachi_private *hmp = (void *)(root_hamachi_dev->priv);
+ unregister_netdev(root_hamachi_dev);
+ iounmap((char *)root_hamachi_dev->base_addr);
+ next_dev = hmp->next_module;
+ if (hmp->priv_addr)
+ kfree(hmp->priv_addr);
+ kfree(root_hamachi_dev);
+ root_hamachi_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` hamachi.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c hamachi.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c hamachi.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/hp-plus.c b/linux/src/drivers/net/hp-plus.c
new file mode 100644
index 0000000..c2b7116
--- /dev/null
+++ b/linux/src/drivers/net/hp-plus.c
@@ -0,0 +1,483 @@
+/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */
+/*
+ Written 1994 by Donald Becker.
+
+ This driver is for the Hewlett Packard PC LAN (27***) plus ethercards.
+ These cards are sold under several model numbers, usually 2724*.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ As is often the case, a great deal of credit is owed to Russ Nelson.
+ The Crynwr packet driver was my primary source of HP-specific
+ programming information.
+*/
+
+static const char *version =
+"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/string.h> /* Important -- this inlines word moves. */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int hpplus_portlist[] =
+{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
+
+/*
+ The HP EtherTwist chip implementation is a fairly routine DP8390
+ implementation. It allows both shared memory and programmed-I/O buffer
+ access, using a custom interface for both. The programmed-I/O mode is
+ entirely implemented in the HP EtherTwist chip, bypassing the problem
+ ridden built-in 8390 facilities used on NE2000 designs. The shared
+ memory mode is likewise special, with an offset register used to make
+ packets appear at the shared memory base. Both modes use a base and bounds
+ page register to hide the Rx ring buffer wrap -- a packet that spans the
+ end of physical buffer memory appears continuous to the driver. (c.f. the
+ 3c503 and Cabletron E2100)
+
+ A special note: the internal buffer of the board is only 8 bits wide.
+ This lays several nasty traps for the unaware:
+ - the 8390 must be programmed for byte-wide operations
+ - all I/O and memory operations must work on whole words (the access
+ latches are serially preloaded and have no byte-swapping ability).
+
+ This board is laid out in I/O space much like the earlier HP boards:
+ the first 16 locations are for the board registers, and the second 16 are
+ for the 8390. The board is easy to identify, with both a dedicated 16 bit
+ ID register and a constant 0x530* value in the upper bits of the paging
+ register.
+*/
+
+#define HP_ID 0x00 /* ID register, always 0x4850. */
+#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */
+#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */
+#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */
+#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */
+#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */
+#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */
+#define HP_IO_EXTENT 32
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* The register set selected in HP_PAGING. */
+enum PageName {
+ Perf_Page = 0, /* Normal operation. */
+ MAC_Page = 1, /* The ethernet address (+checksum). */
+ HW_Page = 2, /* EEPROM-loaded hardware parameters. */
+ LAN_Page = 4, /* Transceiver selection, testing, etc. */
+ ID_Page = 6 };
+
+/* The bit definitions for the HPP_OPTION register. */
+enum HP_Option {
+ NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */
+ EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20,
+ MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, };
+
+int hp_plus_probe(struct device *dev);
+int hpp_probe1(struct device *dev, int ioaddr);
+
+static void hpp_reset_8390(struct device *dev);
+static int hpp_open(struct device *dev);
+static int hpp_close(struct device *dev);
+static void hpp_mem_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hpp_mem_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void hpp_mem_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void hpp_io_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hpp_io_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void hpp_io_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+
+/* Probe a list of addresses for an HP LAN+ adaptor.
+ This routine is almost boilerplate. */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry hpplus_drv =
+{"hpplus", hpp_probe1, HP_IO_EXTENT, hpplus_portlist};
+#else
+
+int hp_plus_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return hpp_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; hpplus_portlist[i]; i++) {
+ int ioaddr = hpplus_portlist[i];
+ if (check_region(ioaddr, HP_IO_EXTENT))
+ continue;
+ if (hpp_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* Do the interesting part of the probe at a single address. */
+int hpp_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ unsigned char checksum = 0;
+ const char *name = "HP-PC-LAN+";
+ int mem_start;
+ static unsigned version_printed = 0;
+
+ /* Check for the HP+ signature, 50 48 0x 53. */
+ if (inw(ioaddr + HP_ID) != 0x4850
+ || (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300)
+ return ENODEV;
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("hp-plus.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("%s: %s at %#3x,", dev->name, name, ioaddr);
+
+ /* Retrieve and checksum the station address. */
+ outw(MAC_Page, ioaddr + HP_PAGING);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ unsigned char inval = inb(ioaddr + 8 + i);
+ dev->dev_addr[i] = inval;
+ checksum += inval;
+ printk(" %2.2x", inval);
+ }
+ checksum += inb(ioaddr + 14);
+
+ if (checksum != 0xff) {
+ printk(" bad checksum %2.2x.\n", checksum);
+ return ENODEV;
+ } else {
+ /* Point at the Software Configuration Flags. */
+ outw(ID_Page, ioaddr + HP_PAGING);
+ printk(" ID %4.4x", inw(ioaddr + 12));
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk ("hp-plus.c: unable to allocate memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* Grab the region so we can find another board if something fails. */
+ request_region(ioaddr, HP_IO_EXTENT,"hp-plus");
+
+ /* Read the IRQ line. */
+ outw(HW_Page, ioaddr + HP_PAGING);
+ {
+ int irq = inb(ioaddr + 13) & 0x0f;
+ int option = inw(ioaddr + HPP_OPTION);
+
+ dev->irq = irq;
+ if (option & MemEnable) {
+ mem_start = inw(ioaddr + 9) << 8;
+ printk(", IRQ %d, memory address %#x.\n", irq, mem_start);
+ } else {
+ mem_start = 0;
+ printk(", IRQ %d, programmed-I/O mode.\n", irq);
+ }
+ }
+
+ /* Set the wrap registers for string I/O reads. */
+ outw((HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = ioaddr + NIC_OFFSET;
+
+ dev->open = &hpp_open;
+ dev->stop = &hpp_close;
+
+ ei_status.name = name;
+ ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
+ ei_status.tx_start_page = HP_START_PG;
+ ei_status.rx_start_page = HP_START_PG + TX_2X_PAGES;
+ ei_status.stop_page = HP_STOP_PG;
+
+ ei_status.reset_8390 = &hpp_reset_8390;
+ ei_status.block_input = &hpp_io_block_input;
+ ei_status.block_output = &hpp_io_block_output;
+ ei_status.get_8390_hdr = &hpp_io_get_8390_hdr;
+
+ /* Check if the memory_enable flag is set in the option register. */
+ if (mem_start) {
+ ei_status.block_input = &hpp_mem_block_input;
+ ei_status.block_output = &hpp_mem_block_output;
+ ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
+ dev->mem_start = mem_start;
+ dev->rmem_start = dev->mem_start + TX_2X_PAGES*256;
+ dev->mem_end = dev->rmem_end
+ = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
+ }
+
+ outw(Perf_Page, ioaddr + HP_PAGING);
+ NS8390_init(dev, 0);
+ /* Leave the 8390 and HP chip reset. */
+ outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);
+
+ return 0;
+}
+
+static int
+hpp_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg;
+
+ if (request_irq(dev->irq, &ei_interrupt, 0, "hp-plus", NULL)) {
+ return -EAGAIN;
+ }
+
+ /* Reset the 8390 and HP chip. */
+ option_reg = inw(ioaddr + HPP_OPTION);
+ outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
+ SLOW_DOWN_IO; SLOW_DOWN_IO;
+ /* Unreset the board and enable interrupts. */
+ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
+
+ /* Set the wrap registers for programmed-I/O operation. */
+ outw(HW_Page, ioaddr + HP_PAGING);
+ outw((HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+
+ /* Select the operational page. */
+ outw(Perf_Page, ioaddr + HP_PAGING);
+
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+hpp_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+ ei_close(dev);
+ outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset,
+ ioaddr + HPP_OPTION);
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static void
+hpp_reset_8390(struct device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
+
+ outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
+ /* Pause a few cycles for the hardware reset to take place. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ ei_status.txing = 0;
+ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
+
+ SLOW_DOWN_IO; SLOW_DOWN_IO;
+
+
+ if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
+ printk("%s: hp_reset_8390() did not complete.\n", dev->name);
+
+ if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
+ return;
+}
+
+/* The programmed-I/O version of reading the 4 byte 8390 specific header.
+ Note that transfer with the EtherTwist+ must be on word boundaries. */
+
+static void
+hpp_io_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+
+ outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
+ insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. */
+
+static void
+hpp_io_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ char *buf = skb->data;
+
+ outw(ring_offset, ioaddr + HPP_IN_ADDR);
+ insw(ioaddr + HP_DATAPORT, buf, count>>1);
+ if (count & 0x01)
+ buf[count-1] = inw(ioaddr + HP_DATAPORT);
+}
+
+/* The corresponding shared memory versions of the above 2 functions. */
+
+static void
+hpp_mem_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+ memcpy_fromio(hdr, dev->mem_start, sizeof(struct e8390_pkt_hdr));
+ outw(option_reg, ioaddr + HPP_OPTION);
+ hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
+}
+
+static void
+hpp_mem_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw(ring_offset, ioaddr + HPP_IN_ADDR);
+
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+
+ /* Caution: this relies on get_8390_hdr() rounding up count!
+ Also note that we *can't* use eth_io_copy_and_sum() because
+ it will not always copy "count" bytes (e.g. padded IP). */
+
+ memcpy_fromio(skb->data, dev->mem_start, count);
+ outw(option_reg, ioaddr + HPP_OPTION);
+}
+
+/* A special note: we *must* always transfer >=16 bit words.
+ It's always safe to round up, so we do. */
+static void
+hpp_io_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
+ outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
+ return;
+}
+
+static void
+hpp_mem_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+ memcpy_toio(dev->mem_start, buf, (count + 3) & ~3);
+ outw(option_reg, ioaddr + HPP_OPTION);
+
+ return;
+}
+
+
+#ifdef MODULE
+#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_HPP_CARDS] = { 0, };
+static struct device dev_hpp[MAX_HPP_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_HPP_CARDS] = { 0, };
+static int irq[MAX_HPP_CARDS] = { 0, };
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
+ struct device *dev = &dev_hpp[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = hp_plus_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
+ struct device *dev = &dev_hpp[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: hpp_close() handles free_irq + irq2dev map */
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(ioaddr, HP_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c hp-plus.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/hp.c b/linux/src/drivers/net/hp.c
new file mode 100644
index 0000000..6ddbfd2
--- /dev/null
+++ b/linux/src/drivers/net/hp.c
@@ -0,0 +1,451 @@
+/* hp.c: A HP LAN ethernet driver for linux. */
+/*
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is a driver for the HP PC-LAN adaptors.
+
+ Sources:
+ The Crynwr packet driver.
+*/
+
+static const char *version =
+ "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int hppclan_portlist[] =
+{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0};
+
+#define HP_IO_EXTENT 32
+
+#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */
+#define HP_ID 0x07
+#define HP_CONFIGURE 0x08 /* Configuration register. */
+#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */
+#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */
+#define HP_DATAON 0x10 /* Turn on dataport */
+#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */
+#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */
+
+int hp_probe(struct device *dev);
+int hp_probe1(struct device *dev, int ioaddr);
+
+static int hp_open(struct device *dev);
+static int hp_close(struct device *dev);
+static void hp_reset_8390(struct device *dev);
+static void hp_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void hp_block_input(struct device *dev, int count,
+ struct sk_buff *skb , int ring_offset);
+static void hp_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+
+static void hp_init_card(struct device *dev);
+
+/* The map from IRQ number to HP_CONFIGURE register setting. */
+/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */
+static char irqmap[16] = { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0};
+
+
+/* Probe for an HP LAN adaptor.
+ Also initialize the card and fill in STATION_ADDR with the station
+ address. */
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+{"hp", hp_probe1, HP_IO_EXTENT, hppclan_portlist};
+#else
+
+int hp_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return hp_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; hppclan_portlist[i]; i++) {
+ int ioaddr = hppclan_portlist[i];
+ if (check_region(ioaddr, HP_IO_EXTENT))
+ continue;
+ if (hp_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+int hp_probe1(struct device *dev, int ioaddr)
+{
+ int i, board_id, wordmode;
+ const char *name;
+ static unsigned version_printed = 0;
+
+ /* Check for the HP physical address, 08 00 09 xx xx xx. */
+ /* This really isn't good enough: we may pick up HP LANCE boards
+ also! Avoid the lance 0x5757 signature. */
+ if (inb(ioaddr) != 0x08
+ || inb(ioaddr+1) != 0x00
+ || inb(ioaddr+2) != 0x09
+ || inb(ioaddr+14) == 0x57)
+ return ENODEV;
+
+ /* Set up the parameters based on the board ID.
+ If you have additional mappings, please mail them to me -djb. */
+ if ((board_id = inb(ioaddr + HP_ID)) & 0x80) {
+ name = "HP27247";
+ wordmode = 1;
+ } else {
+ name = "HP27250";
+ wordmode = 0;
+ }
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("hp.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ /* Snarf the interrupt now. Someday this could be moved to open(). */
+ if (dev->irq < 2) {
+ int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
+ int irq_8list[] = { 7, 5, 3, 4, 9, 0};
+ int *irqp = wordmode ? irq_16list : irq_8list;
+ do {
+ int irq = *irqp;
+ if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
+ autoirq_setup(0);
+ /* Twinkle the interrupt, and check if it's seen. */
+ outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE);
+ outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE);
+ if (irq == autoirq_report(0) /* It's a good IRQ line! */
+ && request_irq (irq, &ei_interrupt, 0, "hp", NULL) == 0) {
+ printk(" selecting IRQ %d.\n", irq);
+ dev->irq = *irqp;
+ break;
+ }
+ }
+ } while (*++irqp);
+ if (*irqp == 0) {
+ printk(" no free IRQ lines.\n");
+ return EBUSY;
+ }
+ } else {
+ if (dev->irq == 2)
+ dev->irq = 9;
+ if (request_irq(dev->irq, ei_interrupt, 0, "hp", NULL)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return EBUSY;
+ }
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ free_irq(dev->irq, NULL);
+ return -ENOMEM;
+ }
+
+ /* Grab the region so we can find another board if something fails. */
+ request_region(ioaddr, HP_IO_EXTENT,"hp");
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = ioaddr + NIC_OFFSET;
+ dev->open = &hp_open;
+ dev->stop = &hp_close;
+
+ ei_status.name = name;
+ ei_status.word16 = wordmode;
+ ei_status.tx_start_page = HP_START_PG;
+ ei_status.rx_start_page = HP_START_PG + TX_PAGES;
+ ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
+
+ ei_status.reset_8390 = &hp_reset_8390;
+ ei_status.get_8390_hdr = &hp_get_8390_hdr;
+ ei_status.block_input = &hp_block_input;
+ ei_status.block_output = &hp_block_output;
+ hp_init_card(dev);
+
+ return 0;
+}
+
+static int
+hp_open(struct device *dev)
+{
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+hp_close(struct device *dev)
+{
+ ei_close(dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static void
+hp_reset_8390(struct device *dev)
+{
+ int hp_base = dev->base_addr - NIC_OFFSET;
+ int saved_config = inb_p(hp_base + HP_CONFIGURE);
+
+ if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
+ outb_p(0x00, hp_base + HP_CONFIGURE);
+ ei_status.txing = 0;
+ /* Pause just a few cycles for the hardware reset to take place. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ outb_p(saved_config, hp_base + HP_CONFIGURE);
+ SLOW_DOWN_IO; SLOW_DOWN_IO;
+
+ if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
+ printk("%s: hp_reset_8390() did not complete.\n", dev->name);
+
+ if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
+ return;
+}
+
+static void
+hp_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base);
+
+ if (ei_status.word16)
+ insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you are
+ porting to a new ethercard look at the packet driver source for hints.
+ The HP LAN doesn't use shared memory -- we put the packet
+ out through the "remote DMA" dataport. */
+
+static void
+hp_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+ int xfer_count = count;
+ char *buf = skb->data;
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base);
+ if (ei_status.word16) {
+ insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++;
+ } else {
+ insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
+ }
+ /* This is for the ALPHA version only, remove for later releases. */
+ if (ei_debug > 0) { /* DMA termination address check... */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ int addr = (high << 8) + low;
+ /* Check only the lower 8 bits so we can ignore ring wrap. */
+ if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff))
+ printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+static void
+hp_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base);
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work. */
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0xff, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+#define NE_CMD 0x00
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ inb_p(0x61);
+ inb_p(0x61);
+#endif
+
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base);
+ if (ei_status.word16) {
+ /* Use the 'rep' sequence for 16 bit boards. */
+ outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1);
+ } else {
+ outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
+ }
+
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */
+
+ /* This is for the ALPHA version only, remove for later releases. */
+ if (ei_debug > 0) { /* DMA termination address check... */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ int addr = (high << 8) + low;
+ if ((start_page << 8) + count != addr)
+ printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n",
+ dev->name, (start_page << 8) + count, addr);
+ }
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+ return;
+}
+
+/* This function resets the ethercard if something screws up. */
+static void
+hp_init_card(struct device *dev)
+{
+ int irq = dev->irq;
+ NS8390_init(dev, 0);
+ outb_p(irqmap[irq&0x0f] | HP_RUN,
+ dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
+ return;
+}
+
+#ifdef MODULE
+#define MAX_HP_CARDS 4 /* Max number of HP cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_HP_CARDS] = { 0, };
+static struct device dev_hp[MAX_HP_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_HP_CARDS] = { 0, };
+static int irq[MAX_HP_CARDS] = { 0, };
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
+ struct device *dev = &dev_hp[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = hp_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
+ struct device *dev = &dev_hp[this_dev];
+ if (dev->priv != NULL) {
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(ioaddr, HP_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c hp.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/hp100.c b/linux/src/drivers/net/hp100.c
new file mode 100644
index 0000000..0b86ef4
--- /dev/null
+++ b/linux/src/drivers/net/hp100.c
@@ -0,0 +1,3121 @@
+/*
+** hp100.c
+** HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters
+**
+** $Id: hp100.c,v 1.1.4.1 2005/06/02 18:52:39 ams Exp $
+**
+** Based on the HP100 driver written by Jaroslav Kysela <perex@jcu.cz>
+** Extended for new busmaster capable chipsets by
+** Siegfried "Frieder" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>
+**
+** Maintained by: Jaroslav Kysela <perex@jcu.cz>
+**
+** This driver has only been tested with
+** -- HP J2585B 10/100 Mbit/s PCI Busmaster
+** -- HP J2585A 10/100 Mbit/s PCI
+** -- HP J2970 10 Mbit/s PCI Combo 10base-T/BNC
+** -- HP J2973 10 Mbit/s PCI 10base-T
+** -- HP J2573 10/100 ISA
+** -- Compex ReadyLink ENET100-VG4 10/100 Mbit/s PCI / EISA
+** -- Compex FreedomLine 100/VG 10/100 Mbit/s ISA / EISA / PCI
+**
+** but it should also work with the other CASCADE based adapters.
+**
+** TODO:
+** - J2573 seems to hang sometimes when in shared memory mode.
+** - Mode for Priority TX
+** - Check PCI registers, performance might be improved?
+** - To reduce interrupt load in busmaster, one could switch off
+** the interrupts that are used to refill the queues whenever the
+** queues are filled up to more than a certain threshold.
+** - some updates for EISA version of card
+**
+**
+** This source/code is public free; you can distribute it and/or modify
+** it under terms of the GNU General Public License (published by the
+** Free Software Foundation) either version two of this License, or any
+** later version.
+**
+** 1.55 -> 1.56
+** - removed printk in misc. interrupt and update statistics to allow
+** monitoring of card status
+** - timing changes in xmit routines, relogin to 100VG hub added when
+** driver does reset
+** - included fix for Compex FreedomLine PCI adapter
+**
+** 1.54 -> 1.55
+** - fixed bad initialization in init_module
+** - added Compex FreedomLine adapter
+** - some fixes in card initialization
+**
+** 1.53 -> 1.54
+** - added hardware multicast filter support (doesn't work)
+** - little changes in hp100_sense_lan routine
+** - added support for Coax and AUI (J2970)
+** - fix for multiple cards and hp100_mode parameter (insmod)
+** - fix for shared IRQ
+**
+** 1.52 -> 1.53
+** - fixed bug in multicast support
+**
+*/
+
+#define HP100_DEFAULT_PRIORITY_TX 0
+
+#undef HP100_DEBUG
+#undef HP100_DEBUG_B /* Trace */
+#undef HP100_DEBUG_BM /* Debug busmaster code (PDL stuff) */
+
+#undef HP100_DEBUG_TRAINING /* Debug login-to-hub procedure */
+#undef HP100_DEBUG_TX
+#undef HP100_DEBUG_IRQ
+#undef HP100_DEBUG_RX
+
+#undef HP100_MULTICAST_FILTER /* Need to be debugged... */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/types.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+#include <linux/delay.h>
+
+#if LINUX_VERSION_CODE < 0x020100
+#define ioremap vremap
+#define iounmap vfree
+typedef struct enet_statistics hp100_stats_t;
+#else
+#define LINUX_2_1
+typedef struct net_device_stats hp100_stats_t;
+#endif
+
+#ifndef __initfunc
+#define __initfunc(__initarg) __initarg
+#else
+#include <linux/init.h>
+#endif
+
+#include "hp100.h"
+
+/*
+ * defines
+ */
+
+#define HP100_BUS_ISA 0
+#define HP100_BUS_EISA 1
+#define HP100_BUS_PCI 2
+
+#ifndef PCI_DEVICE_ID_HP_J2585B
+#define PCI_DEVICE_ID_HP_J2585B 0x1031
+#endif
+#ifndef PCI_VENDOR_ID_COMPEX
+#define PCI_VENDOR_ID_COMPEX 0x11f6
+#endif
+#ifndef PCI_DEVICE_ID_COMPEX_ENET100VG4
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
+#endif
+#ifndef PCI_VENDOR_ID_COMPEX2
+#define PCI_VENDOR_ID_COMPEX2 0x101a
+#endif
+#ifndef PCI_DEVICE_ID_COMPEX2_100VG
+#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005
+#endif
+
+#define HP100_REGION_SIZE 0x20 /* for ioports */
+
+#define HP100_MAX_PACKET_SIZE (1536+4)
+#define HP100_MIN_PACKET_SIZE 60
+
+#ifndef HP100_DEFAULT_RX_RATIO
+/* default - 75% onboard memory on the card are used for RX packets */
+#define HP100_DEFAULT_RX_RATIO 75
+#endif
+
+#ifndef HP100_DEFAULT_PRIORITY_TX
+/* default - don't enable transmit outgoing packets as priority */
+#define HP100_DEFAULT_PRIORITY_TX 0
+#endif
+
+/*
+ * structures
+ */
+
+struct hp100_eisa_id {
+ u_int id;
+ const char *name;
+ u_char bus;
+};
+
+struct hp100_pci_id {
+ u_short vendor;
+ u_short device;
+};
+
+struct hp100_private {
+ struct hp100_eisa_id *id;
+ u_short chip;
+ u_short soft_model;
+ u_int memory_size;
+ u_int virt_memory_size;
+ u_short rx_ratio; /* 1 - 99 */
+ u_short priority_tx; /* != 0 - priority tx */
+ u_short mode; /* PIO, Shared Mem or Busmaster */
+ u_char bus;
+ u_char pci_bus;
+ u_char pci_device_fn;
+ short mem_mapped; /* memory mapped access */
+ u_int *mem_ptr_virt; /* virtual memory mapped area, maybe NULL */
+ u_int *mem_ptr_phys; /* physical memory mapped area */
+ short lan_type; /* 10Mb/s, 100Mb/s or -1 (error) */
+ int hub_status; /* was login to hub successful? */
+ u_char mac1_mode;
+ u_char mac2_mode;
+ u_char hash_bytes[ 8 ];
+ hp100_stats_t stats;
+
+ /* Rings for busmaster mode: */
+ hp100_ring_t *rxrhead; /* Head (oldest) index into rxring */
+ hp100_ring_t *rxrtail; /* Tail (newest) index into rxring */
+ hp100_ring_t *txrhead; /* Head (oldest) index into txring */
+ hp100_ring_t *txrtail; /* Tail (newest) index into txring */
+
+ hp100_ring_t rxring[ MAX_RX_PDL ];
+ hp100_ring_t txring[ MAX_TX_PDL ];
+
+ u_int *page_vaddr; /* Virtual address of allocated page */
+ u_int *page_vaddr_algn; /* Aligned virtual address of allocated page */
+ int rxrcommit; /* # Rx PDLs commited to adapter */
+ int txrcommit; /* # Tx PDLs commited to adapter */
+};
+
+/*
+ * variables
+ */
+
+static struct hp100_eisa_id hp100_eisa_ids[] = {
+
+ /* 10/100 EISA card with revision A Cascade chip */
+ { 0x80F1F022, "HP J2577 rev A", HP100_BUS_EISA },
+
+ /* 10/100 ISA card with revision A Cascade chip */
+ { 0x50F1F022, "HP J2573 rev A", HP100_BUS_ISA },
+
+ /* 10 only EISA card with Cascade chip */
+ { 0x2019F022, "HP 27248B", HP100_BUS_EISA },
+
+ /* 10/100 EISA card with Cascade chip */
+ { 0x4019F022, "HP J2577", HP100_BUS_EISA },
+
+ /* 10/100 ISA card with Cascade chip */
+ { 0x5019F022, "HP J2573", HP100_BUS_ISA },
+
+ /* 10/100 PCI card - old J2585A */
+ { 0x1030103c, "HP J2585A", HP100_BUS_PCI },
+
+ /* 10/100 PCI card - new J2585B - master capable */
+ { 0x1041103c, "HP J2585B", HP100_BUS_PCI },
+
+ /* 10 Mbit Combo Adapter */
+ { 0x1042103c, "HP J2970", HP100_BUS_PCI },
+
+ /* 10 Mbit 10baseT Adapter */
+ { 0x1040103c, "HP J2973", HP100_BUS_PCI },
+
+ /* 10/100 EISA card from Compex */
+ { 0x0103180e, "ReadyLink ENET100-VG4", HP100_BUS_EISA },
+
+ /* 10/100 EISA card from Compex - FreedomLine (sq5bpf) */
+ /* Note: plhbrod@mbox.vol.cz reported that same ID have ISA */
+ /* version of adapter, too... */
+ { 0x0104180e, "FreedomLine 100/VG", HP100_BUS_EISA },
+
+ /* 10/100 PCI card from Compex - FreedomLine
+ *
+ * I think this card doesn't like aic7178 scsi controller, but
+ * I haven't tested this much. It works fine on diskless machines.
+ * Jacek Lipkowski <sq5bpf@acid.ch.pw.edu.pl>
+ */
+ { 0x021211f6, "FreedomLine 100/VG", HP100_BUS_PCI },
+
+ /* 10/100 PCI card from Compex (J2585A compatible) */
+ { 0x011211f6, "ReadyLink ENET100-VG4", HP100_BUS_PCI }
+
+};
+
+#define HP100_EISA_IDS_SIZE (sizeof(hp100_eisa_ids)/sizeof(struct hp100_eisa_id))
+
+static struct hp100_pci_id hp100_pci_ids[] = {
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A },
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B },
+ { PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_ENET100VG4 },
+ { PCI_VENDOR_ID_COMPEX2, PCI_DEVICE_ID_COMPEX2_100VG }
+};
+
+#define HP100_PCI_IDS_SIZE (sizeof(hp100_pci_ids)/sizeof(struct hp100_pci_id))
+
+static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
+static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
+static int hp100_mode = 1;
+
+#ifdef LINUX_2_1
+MODULE_PARM( hp100_rx_ratio, "1i" );
+MODULE_PARM( hp100_priority_tx, "1i" );
+MODULE_PARM( hp100_mode, "1i" );
+#endif
+
+/*
+ * prototypes
+ */
+
+static int hp100_probe1( struct device *dev, int ioaddr, u_char bus, u_char pci_bus, u_char pci_device_fn );
+static int hp100_open( struct device *dev );
+static int hp100_close( struct device *dev );
+static int hp100_start_xmit( struct sk_buff *skb, struct device *dev );
+static int hp100_start_xmit_bm (struct sk_buff *skb, struct device *dev );
+static void hp100_rx( struct device *dev );
+static hp100_stats_t *hp100_get_stats( struct device *dev );
+static void hp100_misc_interrupt( struct device *dev );
+static void hp100_update_stats( struct device *dev );
+static void hp100_clear_stats( int ioaddr );
+static void hp100_set_multicast_list( struct device *dev);
+static void hp100_interrupt( int irq, void *dev_id, struct pt_regs *regs );
+static void hp100_start_interface( struct device *dev );
+static void hp100_stop_interface( struct device *dev );
+static void hp100_load_eeprom( struct device *dev, u_short ioaddr );
+static int hp100_sense_lan( struct device *dev );
+static int hp100_login_to_vg_hub( struct device *dev, u_short force_relogin );
+static int hp100_down_vg_link( struct device *dev );
+static void hp100_cascade_reset( struct device *dev, u_short enable );
+static void hp100_BM_shutdown( struct device *dev );
+static void hp100_mmuinit( struct device *dev );
+static void hp100_init_pdls( struct device *dev );
+static int hp100_init_rxpdl( struct device *dev, register hp100_ring_t *ringptr, register u_int *pdlptr);
+static int hp100_init_txpdl( struct device *dev, register hp100_ring_t *ringptr, register u_int *pdlptr);
+static void hp100_rxfill( struct device *dev );
+static void hp100_hwinit( struct device *dev );
+static void hp100_clean_txring( struct device *dev );
+#ifdef HP100_DEBUG
+static void hp100_RegisterDump( struct device *dev );
+#endif
+
+/* TODO: This function should not really be needed in a good design... */
+static void wait( void )
+{
+ udelay( 1000 );
+}
+
+/*
+ * probe functions
+ * These functions should - if possible - avoid doing write operations
+ * since this could cause problems when the card is not installed.
+ */
+
+__initfunc(int hp100_probe( struct device *dev ))
+{
+ int base_addr = dev ? dev -> base_addr : 0;
+ int ioaddr = 0;
+#ifdef CONFIG_PCI
+ int pci_start_index = 0;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4200, TRACE );
+ printk( "hp100: %s: probe\n", dev->name );
+#endif
+
+ if ( base_addr > 0xff ) /* Check a single specified location. */
+ {
+ if ( check_region( base_addr, HP100_REGION_SIZE ) ) return -EINVAL;
+ if ( base_addr < 0x400 )
+ return hp100_probe1( dev, base_addr, HP100_BUS_ISA, 0, 0 );
+ if ( EISA_bus && base_addr >= 0x1c38 && ( (base_addr - 0x1c38) & 0x3ff ) == 0 )
+ return hp100_probe1( dev, base_addr, HP100_BUS_EISA, 0, 0 );
+#ifdef CONFIG_PCI
+ printk( "hp100: %s: You may specify card # in i/o address parameter for PCI bus...", dev->name );
+ return hp100_probe1( dev, base_addr, HP100_BUS_PCI, 0, 0 );
+#else
+ return -ENODEV;
+#endif
+ }
+ else
+#ifdef CONFIG_PCI
+ if ( base_addr > 0 && base_addr < 8 + 1 )
+ pci_start_index = 0x100 | ( base_addr - 1 );
+ else
+#endif
+ if ( base_addr != 0 ) return -ENXIO;
+
+ /* at first - scan PCI bus(es) */
+
+#ifdef CONFIG_PCI
+ if ( pcibios_present() )
+ {
+ int pci_index;
+
+#ifdef HP100_DEBUG_PCI
+ printk( "hp100: %s: PCI BIOS is present, checking for devices..\n", dev->name );
+#endif
+ for ( pci_index = pci_start_index & 7; pci_index < 8; pci_index++ )
+ {
+ u_char pci_bus, pci_device_fn;
+ u_short pci_command;
+ int pci_id_index;
+
+ for ( pci_id_index = 0; pci_id_index < HP100_PCI_IDS_SIZE; pci_id_index++ )
+ if ( pcibios_find_device( hp100_pci_ids[ pci_id_index ].vendor,
+ hp100_pci_ids[ pci_id_index ].device,
+ pci_index, &pci_bus,
+ &pci_device_fn ) == 0 ) goto __pci_found;
+ break;
+
+ __pci_found:
+ pcibios_read_config_dword( pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &ioaddr );
+
+ ioaddr &= ~3; /* remove I/O space marker in bit 0. */
+
+ if ( check_region( ioaddr, HP100_REGION_SIZE ) ) continue;
+
+ pcibios_read_config_word( pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command );
+ if ( !( pci_command & PCI_COMMAND_IO ) )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: PCI I/O Bit has not been set. Setting...\n", dev->name );
+#endif
+ pci_command |= PCI_COMMAND_IO;
+ pcibios_write_config_word( pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command );
+ }
+ if ( !( pci_command & PCI_COMMAND_MASTER ) )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: PCI Master Bit has not been set. Setting...\n", dev->name );
+#endif
+ pci_command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word( pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command );
+ }
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: PCI adapter found at 0x%x\n", dev->name, ioaddr );
+#endif
+ if ( hp100_probe1( dev, ioaddr, HP100_BUS_PCI, pci_bus, pci_device_fn ) == 0 )
+ return 0;
+ }
+ }
+ if ( pci_start_index > 0 ) return -ENODEV;
+#endif /* CONFIG_PCI */
+
+ /* Second: Probe all EISA possible port regions (if EISA bus present) */
+ for ( ioaddr = 0x1c38; EISA_bus && ioaddr < 0x10000; ioaddr += 0x400 )
+ {
+ if ( check_region( ioaddr, HP100_REGION_SIZE ) ) continue;
+ if ( hp100_probe1( dev, ioaddr, HP100_BUS_EISA, 0, 0 ) == 0 ) return 0;
+ }
+
+ /* Third Probe all ISA possible port regions */
+ for ( ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x20 )
+ {
+ if ( check_region( ioaddr, HP100_REGION_SIZE ) ) continue;
+ if ( hp100_probe1( dev, ioaddr, HP100_BUS_ISA, 0, 0 ) == 0 ) return 0;
+ }
+
+ return -ENODEV;
+}
+
+
+__initfunc(static int hp100_probe1( struct device *dev, int ioaddr, u_char bus, u_char pci_bus, u_char pci_device_fn ))
+{
+ int i;
+
+ u_char uc, uc_1;
+ u_int eisa_id;
+ u_int chip;
+ u_int memory_size = 0, virt_memory_size = 0;
+ u_short local_mode, lsw;
+ short mem_mapped;
+ u_int *mem_ptr_phys, *mem_ptr_virt;
+ struct hp100_private *lp;
+ struct hp100_eisa_id *eid;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4201, TRACE );
+ printk("hp100: %s: probe1\n",dev->name);
+#endif
+
+ if ( dev == NULL )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100_probe1: %s: dev == NULL ?\n", dev->name );
+#endif
+ return EIO;
+ }
+
+ if ( hp100_inw( HW_ID ) != HP100_HW_ID_CASCADE )
+ {
+ return -ENODEV;
+ }
+ else
+ {
+ chip = hp100_inw( PAGING ) & HP100_CHIPID_MASK;
+#ifdef HP100_DEBUG
+ if ( chip == HP100_CHIPID_SHASTA )
+ printk("hp100: %s: Shasta Chip detected. (This is a pre 802.12 chip)\n", dev->name);
+ else if ( chip == HP100_CHIPID_RAINIER )
+ printk("hp100: %s: Rainier Chip detected. (This is a pre 802.12 chip)\n", dev->name);
+ else if ( chip == HP100_CHIPID_LASSEN )
+ printk("hp100: %s: Lassen Chip detected.\n", dev->name);
+ else
+ printk("hp100: %s: Warning: Unknown CASCADE chip (id=0x%.4x).\n",dev->name,chip);
+#endif
+ }
+
+ dev->base_addr = ioaddr;
+
+ hp100_page( ID_MAC_ADDR );
+ for ( i = uc = eisa_id = 0; i < 4; i++ )
+ {
+ eisa_id >>= 8;
+ uc_1 = hp100_inb( BOARD_ID + i );
+ eisa_id |= uc_1 << 24;
+ uc += uc_1;
+ }
+ uc += hp100_inb( BOARD_ID + 4 );
+
+ if ( uc != 0xff ) /* bad checksum? */
+ {
+ printk("hp100_probe: %s: bad EISA ID checksum at base port 0x%x\n", dev->name, ioaddr );
+ return -ENODEV;
+ }
+
+ for ( i=0; i < HP100_EISA_IDS_SIZE; i++)
+ if ( hp100_eisa_ids[ i ].id == eisa_id )
+ break;
+ if ( i >= HP100_EISA_IDS_SIZE ) {
+ for ( i = 0; i < HP100_EISA_IDS_SIZE; i++)
+ if ( ( hp100_eisa_ids[ i ].id & 0xf0ffffff ) == ( eisa_id & 0xf0ffffff ) )
+ break;
+ if ( i >= HP100_EISA_IDS_SIZE ) {
+ printk( "hp100_probe: %s: card at port 0x%x isn't known (id = 0x%x)\n", dev -> name, ioaddr, eisa_id );
+ return -ENODEV;
+ }
+ }
+ eid = &hp100_eisa_ids[ i ];
+ if ( ( eid->id & 0x0f000000 ) < ( eisa_id & 0x0f000000 ) )
+ {
+ printk( "hp100_probe: %s: newer version of card %s at port 0x%x - unsupported\n",
+ dev->name, eid->name, ioaddr );
+ return -ENODEV;
+ }
+
+ for ( i = uc = 0; i < 7; i++ )
+ uc += hp100_inb( LAN_ADDR + i );
+ if ( uc != 0xff )
+ {
+ printk("hp100_probe: %s: bad lan address checksum (card %s at port 0x%x)\n",
+ dev->name, eid->name, ioaddr );
+ return -EIO;
+ }
+
+ /* Make sure, that all registers are correctly updated... */
+
+ hp100_load_eeprom( dev, ioaddr );
+ wait();
+
+ /*
+ * Determine driver operation mode
+ *
+ * Use the variable "hp100_mode" upon insmod or as kernel parameter to
+ * force driver modes:
+ * hp100_mode=1 -> default, use busmaster mode if configured.
+ * hp100_mode=2 -> enable shared memory mode
+ * hp100_mode=3 -> force use of i/o mapped mode.
+ * hp100_mode=4 -> same as 1, but re-set the enable bit on the card.
+ */
+
+ /*
+ * LSW values:
+ * 0x2278 -> J2585B, PnP shared memory mode
+ * 0x2270 -> J2585B, shared memory mode, 0xdc000
+ * 0xa23c -> J2585B, I/O mapped mode
+ * 0x2240 -> EISA COMPEX, BusMaster (Shasta Chip)
+ * 0x2220 -> EISA HP, I/O (Shasta Chip)
+ * 0x2260 -> EISA HP, BusMaster (Shasta Chip)
+ */
+
+#if 0
+ local_mode = 0x2270;
+ hp100_outw(0xfefe,OPTION_LSW);
+ hp100_outw(local_mode|HP100_SET_LB|HP100_SET_HB,OPTION_LSW);
+#endif
+
+ /* hp100_mode value maybe used in future by another card */
+ local_mode=hp100_mode;
+ if ( local_mode < 1 || local_mode > 4 )
+ local_mode = 1; /* default */
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: original LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW) );
+#endif
+
+ if(local_mode==3)
+ {
+ hp100_outw(HP100_MEM_EN|HP100_RESET_LB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN|HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_BM_WRITE|HP100_BM_READ|HP100_RESET_HB, OPTION_LSW);
+ printk("hp100: %s: IO mapped mode forced.\n", dev->name);
+ }
+ else if(local_mode==2)
+ {
+ hp100_outw(HP100_MEM_EN|HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN |HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_BM_WRITE|HP100_BM_READ|HP100_RESET_HB, OPTION_LSW);
+ printk("hp100: %s: Shared memory mode requested.\n", dev->name);
+ }
+ else if(local_mode==4)
+ {
+ if(chip==HP100_CHIPID_LASSEN)
+ {
+ hp100_outw(HP100_BM_WRITE|
+ HP100_BM_READ | HP100_SET_HB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN |
+ HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
+ printk("hp100: %s: Busmaster mode requested.\n",dev->name);
+ }
+ local_mode=1;
+ }
+
+ if(local_mode==1) /* default behaviour */
+ {
+ lsw = hp100_inw(OPTION_LSW);
+
+ if ( (lsw & HP100_IO_EN) &&
+ (~lsw & HP100_MEM_EN) &&
+ (~lsw & (HP100_BM_WRITE|HP100_BM_READ)) )
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: IO_EN bit is set on card.\n",dev->name);
+#endif
+ local_mode=3;
+ }
+ else if ( chip == HP100_CHIPID_LASSEN &&
+ ( lsw & (HP100_BM_WRITE|HP100_BM_READ) ) ==
+ (HP100_BM_WRITE|HP100_BM_READ) )
+ {
+ printk("hp100: %s: Busmaster mode enabled.\n",dev->name);
+ hp100_outw(HP100_MEM_EN|HP100_IO_EN|HP100_RESET_LB, OPTION_LSW);
+ }
+ else
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: Card not configured for BM or BM not supported with this card.\n", dev->name );
+ printk("hp100: %s: Trying shared memory mode.\n", dev->name);
+#endif
+ /* In this case, try shared memory mode */
+ local_mode=2;
+ hp100_outw(HP100_MEM_EN|HP100_SET_LB, OPTION_LSW);
+ /* hp100_outw(HP100_IO_EN|HP100_RESET_LB, OPTION_LSW); */
+ }
+ }
+
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: new LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW) );
+#endif
+
+ /* Check for shared memory on the card, eventually remap it */
+ hp100_page( HW_MAP );
+ mem_mapped = (( hp100_inw( OPTION_LSW ) & ( HP100_MEM_EN ) ) != 0);
+ mem_ptr_phys = mem_ptr_virt = NULL;
+ memory_size = (8192<<( (hp100_inb(SRAM)>>5)&0x07));
+ virt_memory_size = 0;
+
+ /* For memory mapped or busmaster mode, we want the memory address */
+ if ( mem_mapped || (local_mode==1))
+ {
+ mem_ptr_phys = (u_int *)( hp100_inw( MEM_MAP_LSW ) |
+ ( hp100_inw( MEM_MAP_MSW ) << 16 ) );
+ mem_ptr_phys = (u_int *) ((u_int) mem_ptr_phys & ~0x1fff); /* 8k alignment */
+ if ( bus == HP100_BUS_ISA && ( (u_long)mem_ptr_phys & ~0xfffff ) != 0 )
+ {
+ printk("hp100: %s: Can only use programmed i/o mode.\n", dev->name);
+ mem_ptr_phys = NULL;
+ mem_mapped = 0;
+ local_mode=3; /* Use programmed i/o */
+ }
+
+ /* We do not need access to shared memory in busmaster mode */
+ /* However in slave mode we need to remap high (>1GB) card memory */
+ if(local_mode!=1) /* = not busmaster */
+ {
+ if ( bus == HP100_BUS_PCI && mem_ptr_phys >= (u_int *)0x100000 )
+ {
+ /* We try with smaller memory sizes, if ioremap fails */
+ for(virt_memory_size = memory_size; virt_memory_size>16383; virt_memory_size>>=1)
+ {
+ if((mem_ptr_virt=ioremap((u_long)mem_ptr_phys,virt_memory_size))==NULL)
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: ioremap for 0x%x bytes high PCI memory at 0x%lx failed\n", dev->name, virt_memory_size, (u_long)mem_ptr_phys );
+#endif
+ }
+ else
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: remapped 0x%x bytes high PCI memory at 0x%lx to 0x%lx.\n", dev->name, virt_memory_size, (u_long)mem_ptr_phys, (u_long)mem_ptr_virt);
+#endif
+ break;
+ }
+ }
+
+ if(mem_ptr_virt==NULL) /* all ioremap tries failed */
+ {
+ printk("hp100: %s: Failed to ioremap the PCI card memory. Will have to use i/o mapped mode.\n", dev->name);
+ local_mode=3;
+ virt_memory_size = 0;
+ }
+ }
+ }
+
+ }
+
+ if(local_mode==3) /* io mapped forced */
+ {
+ mem_mapped = 0;
+ mem_ptr_phys = mem_ptr_virt = NULL;
+ printk("hp100: %s: Using (slow) programmed i/o mode.\n", dev->name);
+ }
+
+ /* Initialise the "private" data structure for this card. */
+ if ( (dev->priv=kmalloc(sizeof(struct hp100_private), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset( dev->priv, 0, sizeof(struct hp100_private) );
+
+ lp = (struct hp100_private *)dev->priv;
+ lp->id = eid;
+ lp->chip = chip;
+ lp->mode = local_mode;
+ lp->pci_bus = pci_bus;
+ lp->bus = bus;
+ lp->pci_device_fn = pci_device_fn;
+ lp->priority_tx = hp100_priority_tx;
+ lp->rx_ratio = hp100_rx_ratio;
+ lp->mem_ptr_phys = mem_ptr_phys;
+ lp->mem_ptr_virt = mem_ptr_virt;
+ hp100_page( ID_MAC_ADDR );
+ lp->soft_model = hp100_inb( SOFT_MODEL );
+ lp->mac1_mode = HP100_MAC1MODE3;
+ lp->mac2_mode = HP100_MAC2MODE3;
+ memset( &lp->hash_bytes, 0x00, 8 );
+
+ dev->base_addr = ioaddr;
+
+ lp->memory_size = memory_size;
+ lp->virt_memory_size = virt_memory_size;
+ lp->rx_ratio = hp100_rx_ratio; /* can be conf'd with insmod */
+
+ /* memory region for programmed i/o */
+ request_region( dev->base_addr, HP100_REGION_SIZE, eid->name );
+
+ dev->open = hp100_open;
+ dev->stop = hp100_close;
+
+ if (lp->mode==1) /* busmaster */
+ dev->hard_start_xmit = hp100_start_xmit_bm;
+ else
+ dev->hard_start_xmit = hp100_start_xmit;
+
+ dev->get_stats = hp100_get_stats;
+ dev->set_multicast_list = &hp100_set_multicast_list;
+
+ /* Ask the card for which IRQ line it is configured */
+ hp100_page( HW_MAP );
+ dev->irq = hp100_inb( IRQ_CHANNEL ) & HP100_IRQMASK;
+ if ( dev->irq == 2 )
+ dev->irq = 9;
+
+ if(lp->mode==1) /* busmaster */
+ dev->dma=4;
+
+ /* Ask the card for its MAC address and store it for later use. */
+ hp100_page( ID_MAC_ADDR );
+ for ( i = uc = 0; i < 6; i++ )
+ dev->dev_addr[ i ] = hp100_inb( LAN_ADDR + i );
+
+ /* Reset statistics (counters) */
+ hp100_clear_stats( ioaddr );
+
+ ether_setup( dev );
+
+ /* If busmaster mode is wanted, a dma-capable memory area is needed for
+ * the rx and tx PDLs
+ * PCI cards can access the whole PC memory. Therefore GFP_DMA is not
+ * needed for the allocation of the memory area.
+ */
+
+ /* TODO: We do not need this with old cards, where PDLs are stored
+ * in the cards shared memory area. But currently, busmaster has been
+ * implemented/tested only with the lassen chip anyway... */
+ if(lp->mode==1) /* busmaster */
+ {
+ /* Get physically continous memory for TX & RX PDLs */
+ if ( (lp->page_vaddr=kmalloc(MAX_RINGSIZE+0x0f,GFP_KERNEL) ) == NULL)
+ return -ENOMEM;
+ lp->page_vaddr_algn=((u_int *) ( ((u_int)(lp->page_vaddr)+0x0f) &~0x0f));
+ memset(lp->page_vaddr, 0, MAX_RINGSIZE+0x0f);
+
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: Reserved DMA memory from 0x%x to 0x%x\n",
+ dev->name,
+ (u_int)lp->page_vaddr_algn,
+ (u_int)lp->page_vaddr_algn+MAX_RINGSIZE);
+#endif
+ lp->rxrcommit = lp->txrcommit = 0;
+ lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
+ lp->txrhead = lp->txrtail = &(lp->txring[0]);
+ }
+
+ /* Initialise the card. */
+ /* (I'm not really sure if it's a good idea to do this during probing, but
+ * like this it's assured that the lan connection type can be sensed
+ * correctly)
+ */
+ hp100_hwinit( dev );
+
+ /* Try to find out which kind of LAN the card is connected to. */
+ lp->lan_type = hp100_sense_lan( dev );
+
+ /* Print out a message what about what we think we have probed. */
+ printk( "hp100: %s: %s at 0x%x, IRQ %d, ",
+ dev->name, lp->id->name, ioaddr, dev->irq );
+ switch ( bus ) {
+ case HP100_BUS_EISA: printk( "EISA" ); break;
+ case HP100_BUS_PCI: printk( "PCI" ); break;
+ default: printk( "ISA" ); break;
+ }
+ printk( " bus, %dk SRAM (rx/tx %d%%).\n",
+ lp->memory_size >> 10, lp->rx_ratio );
+
+ if ( lp->mode==2 ) /* memory mapped */
+ {
+ printk( "hp100: %s: Memory area at 0x%lx-0x%lx",
+ dev->name,(u_long)mem_ptr_phys,
+ ((u_long)mem_ptr_phys+(mem_ptr_phys>(u_int *)0x100000?(u_long)lp->memory_size:16*1024))-1 );
+ if ( mem_ptr_virt )
+ printk( " (virtual base 0x%lx)", (u_long)mem_ptr_virt );
+ printk( ".\n" );
+
+ /* Set for info when doing ifconfig */
+ dev->mem_start = (u_long)mem_ptr_phys;
+ dev->mem_end = (u_long)mem_ptr_phys+(u_long)lp->memory_size;
+ }
+ printk( "hp100: %s: ", dev->name );
+ if ( lp->lan_type != HP100_LAN_ERR )
+ printk( "Adapter is attached to " );
+ switch ( lp->lan_type ) {
+ case HP100_LAN_100:
+ printk( "100Mb/s Voice Grade AnyLAN network.\n" );
+ break;
+ case HP100_LAN_10:
+ printk( "10Mb/s network.\n" );
+ break;
+ default:
+ printk( "Warning! Link down.\n" );
+ }
+
+ return 0;
+}
+
+
+/* This procedure puts the card into a stable init state */
+static void hp100_hwinit( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4202, TRACE );
+ printk("hp100: %s: hwinit\n", dev->name);
+#endif
+
+ /* Initialise the card. -------------------------------------------- */
+
+ /* Clear all pending Ints and disable Ints */
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* clear all pending ints */
+
+ hp100_outw( HP100_INT_EN | HP100_RESET_LB, OPTION_LSW );
+ hp100_outw( HP100_TRI_INT | HP100_SET_HB, OPTION_LSW );
+
+ if(lp->mode==1)
+ {
+ hp100_BM_shutdown( dev ); /* disables BM, puts cascade in reset */
+ wait();
+ }
+ else
+ {
+ hp100_outw( HP100_INT_EN | HP100_RESET_LB, OPTION_LSW );
+ hp100_cascade_reset( dev, TRUE );
+ hp100_page( MAC_CTRL );
+ hp100_andb( ~(HP100_RX_EN|HP100_TX_EN), MAC_CFG_1);
+ }
+
+ /* Initiate EEPROM reload */
+ hp100_load_eeprom( dev, 0 );
+
+ wait();
+
+ /* Go into reset again. */
+ hp100_cascade_reset( dev, TRUE );
+
+ /* Set Option Registers to a safe state */
+ hp100_outw( HP100_DEBUG_EN |
+ HP100_RX_HDR |
+ HP100_EE_EN |
+ HP100_BM_WRITE |
+ HP100_BM_READ | HP100_RESET_HB |
+ HP100_FAKE_INT |
+ HP100_INT_EN |
+ HP100_MEM_EN |
+ HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
+
+ hp100_outw( HP100_TRI_INT |
+ HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW );
+
+ hp100_outb( HP100_PRIORITY_TX |
+ HP100_ADV_NXT_PKT |
+ HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW );
+
+ /* TODO: Configure MMU for Ram Test. */
+ /* TODO: Ram Test. */
+
+ /* Re-check if adapter is still at same i/o location */
+ /* (If the base i/o in eeprom has been changed but the */
+ /* registers had not been changed, a reload of the eeprom */
+ /* would move the adapter to the address stored in eeprom */
+
+ /* TODO: Code to implement. */
+
+ /* Until here it was code from HWdiscover procedure. */
+ /* Next comes code from mmuinit procedure of SCO BM driver which is
+ * called from HWconfigure in the SCO driver. */
+
+ /* Initialise MMU, eventually switch on Busmaster Mode, initialise
+ * multicast filter...
+ */
+ hp100_mmuinit( dev );
+
+ /* We don't turn the interrupts on here - this is done by start_interface. */
+ wait(); /* TODO: Do we really need this? */
+
+ /* Enable Hardware (e.g. unreset) */
+ hp100_cascade_reset( dev, FALSE );
+
+ /* ------- initialisation complete ----------- */
+
+ /* Finally try to log in the Hub if there may be a VG connection. */
+ if( lp->lan_type != HP100_LAN_10 )
+ hp100_login_to_vg_hub( dev, FALSE ); /* relogin */
+}
+
+
+/*
+ * mmuinit - Reinitialise Cascade MMU and MAC settings.
+ * Note: Must already be in reset and leaves card in reset.
+ */
+static void hp100_mmuinit( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ int i;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4203, TRACE );
+ printk("hp100: %s: mmuinit\n",dev->name);
+#endif
+
+#ifdef HP100_DEBUG
+ if( 0!=(hp100_inw(OPTION_LSW)&HP100_HW_RST) )
+ {
+ printk("hp100: %s: Not in reset when entering mmuinit. Fix me.\n",dev->name);
+ return;
+ }
+#endif
+
+ /* Make sure IRQs are masked off and ack'ed. */
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* ack IRQ */
+
+ /*
+ * Enable Hardware
+ * - Clear Debug En, Rx Hdr Pipe, EE En, I/O En, Fake Int and Intr En
+ * - Set Tri-State Int, Bus Master Rd/Wr, and Mem Map Disable
+ * - Clear Priority, Advance Pkt and Xmit Cmd
+ */
+
+ hp100_outw( HP100_DEBUG_EN |
+ HP100_RX_HDR |
+ HP100_EE_EN | HP100_RESET_HB |
+ HP100_IO_EN |
+ HP100_FAKE_INT |
+ HP100_INT_EN | HP100_RESET_LB, OPTION_LSW );
+
+ hp100_outw( HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
+
+ if(lp->mode==1) /* busmaster */
+ {
+ hp100_outw( HP100_BM_WRITE |
+ HP100_BM_READ |
+ HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW );
+ }
+ else if(lp->mode==2) /* memory mapped */
+ {
+ hp100_outw( HP100_BM_WRITE |
+ HP100_BM_READ | HP100_RESET_HB, OPTION_LSW );
+ hp100_outw( HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW );
+ hp100_outw( HP100_MEM_EN | HP100_SET_LB, OPTION_LSW );
+ hp100_outw( HP100_IO_EN | HP100_SET_LB, OPTION_LSW );
+ }
+ else if( lp->mode==3 ) /* i/o mapped mode */
+ {
+ hp100_outw( HP100_MMAP_DIS | HP100_SET_HB |
+ HP100_IO_EN | HP100_SET_LB, OPTION_LSW );
+ }
+
+ hp100_page( HW_MAP );
+ hp100_outb( 0, EARLYRXCFG );
+ hp100_outw( 0, EARLYTXCFG );
+
+ /*
+ * Enable Bus Master mode
+ */
+ if(lp->mode==1) /* busmaster */
+ {
+ /* Experimental: Set some PCI configuration bits */
+ hp100_page( HW_MAP );
+ hp100_andb( ~HP100_PDL_USE3, MODECTRL1 ); /* BM engine read maximum */
+ hp100_andb( ~HP100_TX_DUALQ, MODECTRL1 ); /* No Queue for Priority TX */
+
+ /* PCI Bus failures should result in a Misc. Interrupt */
+ hp100_orb( HP100_EN_BUS_FAIL, MODECTRL2);
+
+ hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_SET_HB, OPTION_LSW );
+ hp100_page( HW_MAP );
+ /* Use Burst Mode and switch on PAGE_CK */
+ hp100_orb( HP100_BM_BURST_RD |
+ HP100_BM_BURST_WR, BM);
+ if((lp->chip==HP100_CHIPID_RAINIER)||(lp->chip==HP100_CHIPID_SHASTA))
+ hp100_orb( HP100_BM_PAGE_CK, BM );
+ hp100_orb( HP100_BM_MASTER, BM );
+ }
+ else /* not busmaster */
+ {
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_BM_MASTER, BM );
+ }
+
+ /*
+ * Divide card memory into regions for Rx, Tx and, if non-ETR chip, PDLs
+ */
+ hp100_page( MMU_CFG );
+ if(lp->mode==1) /* only needed for Busmaster */
+ {
+ int xmit_stop, recv_stop;
+
+ if((lp->chip==HP100_CHIPID_RAINIER)||(lp->chip==HP100_CHIPID_SHASTA))
+ {
+ int pdl_stop;
+
+ /*
+ * Each pdl is 508 bytes long. (63 frags * 4 bytes for address and
+ * 4 bytes for header). We will leave NUM_RXPDLS * 508 (rounded
+ * to the next higher 1k boundary) bytes for the rx-pdl's
+ * Note: For non-etr chips the transmit stop register must be
+ * programmed on a 1k boundary, i.e. bits 9:0 must be zero.
+ */
+ pdl_stop = lp->memory_size;
+ xmit_stop = ( pdl_stop-508*(MAX_RX_PDL)-16 )& ~(0x03ff);
+ recv_stop = ( xmit_stop * (lp->rx_ratio)/100 ) &~(0x03ff);
+ hp100_outw( (pdl_stop>>4)-1, PDL_MEM_STOP );
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: PDL_STOP = 0x%x\n", dev->name, pdl_stop);
+#endif
+ }
+ else /* ETR chip (Lassen) in busmaster mode */
+ {
+ xmit_stop = ( lp->memory_size ) - 1;
+ recv_stop = ( ( lp->memory_size * lp->rx_ratio ) / 100 ) & ~(0x03ff);
+ }
+
+ hp100_outw( xmit_stop>>4 , TX_MEM_STOP );
+ hp100_outw( recv_stop>>4 , RX_MEM_STOP );
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: TX_STOP = 0x%x\n",dev->name,xmit_stop>>4);
+ printk("hp100: %s: RX_STOP = 0x%x\n",dev->name,recv_stop>>4);
+#endif
+ }
+ else /* Slave modes (memory mapped and programmed io) */
+ {
+ hp100_outw( (((lp->memory_size*lp->rx_ratio)/100)>>4), RX_MEM_STOP );
+ hp100_outw( ((lp->memory_size - 1 )>>4), TX_MEM_STOP );
+#ifdef HP100_DEBUG
+ printk("hp100: %s: TX_MEM_STOP: 0x%x\n", dev->name,hp100_inw(TX_MEM_STOP));
+ printk("hp100: %s: RX_MEM_STOP: 0x%x\n", dev->name,hp100_inw(RX_MEM_STOP));
+#endif
+ }
+
+ /* Write MAC address into page 1 */
+ hp100_page( MAC_ADDRESS );
+ for ( i = 0; i < 6; i++ )
+ hp100_outb( dev->dev_addr[ i ], MAC_ADDR + i );
+
+ /* Zero the multicast hash registers */
+ for ( i = 0; i < 8; i++ )
+ hp100_outb( 0x0, HASH_BYTE0 + i );
+
+ /* Set up MAC defaults */
+ hp100_page( MAC_CTRL );
+
+ /* Go to LAN Page and zero all filter bits */
+ /* Zero accept error, accept multicast, accept broadcast and accept */
+ /* all directed packet bits */
+ hp100_andb( ~(HP100_RX_EN|
+ HP100_TX_EN|
+ HP100_ACC_ERRORED|
+ HP100_ACC_MC|
+ HP100_ACC_BC|
+ HP100_ACC_PHY), MAC_CFG_1 );
+
+ hp100_outb( 0x00, MAC_CFG_2 );
+
+ /* Zero the frame format bit. This works around a training bug in the */
+ /* new hubs. */
+ hp100_outb( 0x00, VG_LAN_CFG_2); /* (use 802.3) */
+
+ if(lp->priority_tx)
+ hp100_outb( HP100_PRIORITY_TX | HP100_SET_LB, OPTION_MSW );
+ else
+ hp100_outb( HP100_PRIORITY_TX | HP100_RESET_LB, OPTION_MSW );
+
+ hp100_outb( HP100_ADV_NXT_PKT |
+ HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW );
+
+ /* If busmaster, initialize the PDLs */
+ if(lp->mode==1)
+ hp100_init_pdls( dev );
+
+ /* Go to performance page and initalize isr and imr registers */
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* ack IRQ */
+}
+
+
+/*
+ * open/close functions
+ */
+
+static int hp100_open( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+#ifdef HP100_DEBUG_B
+ int ioaddr=dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4204, TRACE );
+ printk("hp100: %s: open\n",dev->name);
+#endif
+
+ /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
+ if ( request_irq(dev->irq, hp100_interrupt,
+ lp->bus==HP100_BUS_PCI||lp->bus==HP100_BUS_EISA?SA_SHIRQ:SA_INTERRUPT,
+ lp->id->name, dev))
+ {
+ printk( "hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq );
+ return -EAGAIN;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ lp->lan_type = hp100_sense_lan( dev );
+ lp->mac1_mode = HP100_MAC1MODE3;
+ lp->mac2_mode = HP100_MAC2MODE3;
+ memset( &lp->hash_bytes, 0x00, 8 );
+
+ hp100_stop_interface( dev );
+
+ hp100_hwinit( dev );
+
+ hp100_start_interface( dev ); /* sets mac modes, enables interrupts */
+
+ return 0;
+}
+
+
+/* The close function is called when the interface is to be brought down */
+static int hp100_close( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4205, TRACE );
+ printk("hp100: %s: close\n", dev->name);
+#endif
+
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all IRQs */
+
+ hp100_stop_interface( dev );
+
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status=hp100_login_to_vg_hub( dev, FALSE );
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ free_irq( dev->irq, dev );
+
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: close LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW) );
+#endif
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+
+/*
+ * Configure the PDL Rx rings and LAN
+ */
+static void hp100_init_pdls( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ hp100_ring_t *ringptr;
+ u_int *pageptr;
+ int i;
+
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4206, TRACE );
+ printk("hp100: %s: init pdls\n", dev->name);
+#endif
+
+ if(0==lp->page_vaddr_algn)
+ printk("hp100: %s: Warning: lp->page_vaddr_algn not initialised!\n",dev->name);
+ else
+ {
+ /* pageptr shall point into the DMA accessible memory region */
+ /* we use this pointer to status the upper limit of allocated */
+ /* memory in the allocated page. */
+ /* note: align the pointers to the pci cache line size */
+ memset(lp->page_vaddr_algn, 0, MAX_RINGSIZE); /* Zero Rx/Tx ring page */
+ pageptr=lp->page_vaddr_algn;
+
+ lp->rxrcommit =0;
+ ringptr = lp->rxrhead = lp-> rxrtail = &(lp->rxring[0]);
+
+ /* Initialise Rx Ring */
+ for (i=MAX_RX_PDL-1; i>=0; i--)
+ {
+ lp->rxring[i].next = ringptr;
+ ringptr=&(lp->rxring[i]);
+ pageptr+=hp100_init_rxpdl(dev, ringptr, pageptr);
+ }
+
+ /* Initialise Tx Ring */
+ lp->txrcommit = 0;
+ ringptr = lp->txrhead = lp->txrtail = &(lp->txring[0]);
+ for (i=MAX_TX_PDL-1; i>=0; i--)
+ {
+ lp->txring[i].next = ringptr;
+ ringptr=&(lp->txring[i]);
+ pageptr+=hp100_init_txpdl(dev, ringptr, pageptr);
+ }
+ }
+}
+
+
+/* These functions "format" the entries in the pdl structure */
+/* They return how much memory the fragments need. */
+static int hp100_init_rxpdl( struct device *dev, register hp100_ring_t *ringptr, register u32 *pdlptr )
+{
+ /* pdlptr is starting adress for this pdl */
+
+ if( 0!=( ((unsigned)pdlptr) & 0xf) )
+ printk("hp100: %s: Init rxpdl: Unaligned pdlptr 0x%x.\n",dev->name,(unsigned)pdlptr);
+
+ ringptr->pdl = pdlptr+1;
+ ringptr->pdl_paddr = virt_to_bus(pdlptr+1);
+ ringptr->skb = (void *) NULL;
+
+ /*
+ * Write address and length of first PDL Fragment (which is used for
+ * storing the RX-Header
+ * We use the 4 bytes _before_ the PDH in the pdl memory area to
+ * store this information. (PDH is at offset 0x04)
+ */
+ /* Note that pdlptr+1 and not pdlptr is the pointer to the PDH */
+
+ *(pdlptr+2) =(u_int) virt_to_bus(pdlptr); /* Address Frag 1 */
+ *(pdlptr+3) = 4; /* Length Frag 1 */
+
+ return( ( ((MAX_RX_FRAG*2+2)+3) /4)*4 );
+}
+
+
+static int hp100_init_txpdl( struct device *dev, register hp100_ring_t *ringptr, register u32 *pdlptr )
+{
+ if( 0!=( ((unsigned)pdlptr) & 0xf) )
+ printk("hp100: %s: Init txpdl: Unaligned pdlptr 0x%x.\n",dev->name,(unsigned) pdlptr);
+
+ ringptr->pdl = pdlptr; /* +1; */
+ ringptr->pdl_paddr = virt_to_bus(pdlptr); /* +1 */
+ ringptr->skb = (void *) NULL;
+
+ return((((MAX_TX_FRAG*2+2)+3)/4)*4);
+}
+
+
+/*
+ * hp100_build_rx_pdl allocates an skb_buff of maximum size plus two bytes
+ * for possible odd word alignment rounding up to next dword and set PDL
+ * address for fragment#2
+ * Returns: 0 if unable to allocate skb_buff
+ * 1 if successful
+ */
+int hp100_build_rx_pdl( hp100_ring_t *ringptr, struct device *dev )
+{
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+#ifdef HP100_DEBUG_BM
+ u_int *p;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4207, TRACE );
+ printk("hp100: %s: build rx pdl\n", dev->name);
+#endif
+
+ /* Allocate skb buffer of maximum size */
+ /* Note: This depends on the alloc_skb functions allocating more
+ * space than requested, i.e. aligning to 16bytes */
+
+ ringptr->skb = dev_alloc_skb( ((MAX_ETHER_SIZE+2+3)/4)*4 );
+
+ if(NULL!=ringptr->skb)
+ {
+ /*
+ * Reserve 2 bytes at the head of the buffer to land the IP header
+ * on a long word boundary (According to the Network Driver section
+ * in the Linux KHG, this should help to increase performance.)
+ */
+ skb_reserve(ringptr->skb, 2);
+
+ ringptr->skb->dev=dev;
+ ringptr->skb->data=(u_char *)skb_put(ringptr->skb, MAX_ETHER_SIZE );
+
+ /* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
+ /* Note: 1st Fragment is used for the 4 byte packet status
+ * (receive header). Its PDL entries are set up by init_rxpdl. So
+ * here we only have to set up the PDL fragment entries for the data
+ * part. Those 4 bytes will be stored in the DMA memory region
+ * directly before the PDL.
+ */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n",
+ dev->name,
+ (u_int) ringptr->pdl,
+ ((MAX_ETHER_SIZE+2+3)/4)*4,
+ (unsigned int) ringptr->skb->data);
+#endif
+
+ ringptr->pdl[0] = 0x00020000; /* Write PDH */
+ ringptr->pdl[3] = ((u_int)virt_to_bus(ringptr->skb->data));
+ ringptr->pdl[4] = MAX_ETHER_SIZE; /* Length of Data */
+
+#ifdef HP100_DEBUG_BM
+ for(p=(ringptr->pdl); p<(ringptr->pdl+5); p++)
+ printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n",dev->name,(u_int) p,(u_int) *p );
+#endif
+ return(1);
+ }
+ /* else: */
+ /* alloc_skb failed (no memory) -> still can receive the header
+ * fragment into PDL memory. make PDL safe by clearing msgptr and
+ * making the PDL only 1 fragment (i.e. the 4 byte packet status)
+ */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: build_rx_pdl: PDH@0x%x, No space for skb.\n",
+ dev->name,
+ (u_int) ringptr->pdl);
+#endif
+
+ ringptr->pdl[0]=0x00010000; /* PDH: Count=1 Fragment */
+
+ return(0);
+}
+
+
+/*
+ * hp100_rxfill - attempt to fill the Rx Ring will empty skb's
+ *
+ * Makes assumption that skb's are always contiguous memory areas and
+ * therefore PDLs contain only 2 physical fragments.
+ * - While the number of Rx PDLs with buffers is less than maximum
+ * a. Get a maximum packet size skb
+ * b. Put the physical address of the buffer into the PDL.
+ * c. Output physical address of PDL to adapter.
+ */
+static void hp100_rxfill( struct device *dev )
+{
+ int ioaddr=dev->base_addr;
+
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ hp100_ring_t *ringptr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4208, TRACE );
+ printk("hp100: %s: rxfill\n",dev->name);
+#endif
+
+ hp100_page( PERFORMANCE );
+
+ while (lp->rxrcommit < MAX_RX_PDL)
+ {
+ /*
+ ** Attempt to get a buffer and build a Rx PDL.
+ */
+ ringptr = lp->rxrtail;
+ if (0 == hp100_build_rx_pdl( ringptr, dev ))
+ {
+ return; /* None available, return */
+ }
+
+ /* Hand this PDL over to the card */
+ /* Note: This needs performance page selected! */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: rxfill: Hand to card: pdl #%d @0x%x phys:0x%x, buffer: 0x%x\n",
+ dev->name,
+ lp->rxrcommit,
+ (u_int)ringptr->pdl,
+ (u_int)ringptr->pdl_paddr,
+ (u_int)ringptr->pdl[3]);
+#endif
+
+ hp100_outl( (u32)ringptr->pdl_paddr, RX_PDA);
+
+ lp->rxrcommit += 1;
+ lp->rxrtail = ringptr->next;
+ }
+}
+
+
+/*
+ * BM_shutdown - shutdown bus mastering and leave chip in reset state
+ */
+
+static void hp100_BM_shutdown( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ unsigned long time;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4209, TRACE );
+ printk("hp100: %s: bm shutdown\n",dev->name);
+#endif
+
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* Ack all ints */
+
+ /* Ensure Interrupts are off */
+ hp100_outw( HP100_INT_EN | HP100_RESET_LB , OPTION_LSW );
+
+ /* Disable all MAC activity */
+ hp100_page( MAC_CTRL );
+ hp100_andb( ~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1 ); /* stop rx/tx */
+
+ /* If cascade MMU is not already in reset */
+ if (0 != (hp100_inw(OPTION_LSW)&HP100_HW_RST) )
+ {
+ /* Wait 1.3ms (10Mb max packet time) to ensure MAC is idle so
+ * MMU pointers will not be reset out from underneath
+ */
+ hp100_page( MAC_CTRL );
+ for(time=0; time<5000; time++)
+ {
+ if( (hp100_inb(MAC_CFG_1)&(HP100_TX_IDLE|HP100_RX_IDLE))==
+ (HP100_TX_IDLE|HP100_RX_IDLE) ) break;
+ }
+
+ /* Shutdown algorithm depends on the generation of Cascade */
+ if( lp->chip==HP100_CHIPID_LASSEN )
+ { /* ETR shutdown/reset */
+ /* Disable Busmaster mode and wait for bit to go to zero. */
+ hp100_page(HW_MAP);
+ hp100_andb( ~HP100_BM_MASTER, BM );
+ /* 100 ms timeout */
+ for(time=0; time<32000; time++)
+ {
+ if ( 0 == (hp100_inb( BM ) & HP100_BM_MASTER) ) break;
+ }
+ }
+ else
+ { /* Shasta or Rainier Shutdown/Reset */
+ /* To ensure all bus master inloading activity has ceased,
+ * wait for no Rx PDAs or no Rx packets on card.
+ */
+ hp100_page( PERFORMANCE );
+ /* 100 ms timeout */
+ for(time=0; time<10000; time++)
+ {
+ /* RX_PDL: PDLs not executed. */
+ /* RX_PKT_CNT: RX'd packets on card. */
+ if ( (hp100_inb( RX_PDL ) == 0) &&
+ (hp100_inb( RX_PKT_CNT ) == 0) ) break;
+ }
+
+ if(time>=10000)
+ printk("hp100: %s: BM shutdown error.\n", dev->name);
+
+ /* To ensure all bus master outloading activity has ceased,
+ * wait until the Tx PDA count goes to zero or no more Tx space
+ * available in the Tx region of the card.
+ */
+ /* 100 ms timeout */
+ for(time=0; time<10000; time++) {
+ if ( (0 == hp100_inb( TX_PKT_CNT )) &&
+ (0 != (hp100_inb( TX_MEM_FREE )&HP100_AUTO_COMPARE))) break;
+ }
+
+ /* Disable Busmaster mode */
+ hp100_page(HW_MAP);
+ hp100_andb( ~HP100_BM_MASTER, BM );
+ } /* end of shutdown procedure for non-etr parts */
+
+ hp100_cascade_reset( dev, TRUE );
+ }
+ hp100_page( PERFORMANCE );
+ /* hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_RESET_HB, OPTION_LSW ); */
+ /* Busmaster mode should be shut down now. */
+}
+
+
+
+/*
+ * transmit functions
+ */
+
+/* tx function for busmaster mode */
+static int hp100_start_xmit_bm( struct sk_buff *skb, struct device *dev )
+{
+ unsigned long flags;
+ int i, ok_flag;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ hp100_ring_t *ringptr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4210, TRACE );
+ printk("hp100: %s: start_xmit_bm\n",dev->name);
+#endif
+
+ if ( skb==NULL )
+ {
+#ifndef LINUX_2_1
+ dev_tint( dev );
+#endif
+ return 0;
+ }
+
+ if ( skb->len <= 0 ) return 0;
+
+ /* Get Tx ring tail pointer */
+ if( lp->txrtail->next==lp->txrhead )
+ {
+ /* No memory. */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
+#endif
+ /* not waited long enough since last tx? */
+ if ( jiffies - dev->trans_start < HZ ) return -EAGAIN;
+
+ if ( lp->lan_type < 0 ) /* no LAN type detected yet? */
+ {
+ hp100_stop_interface( dev );
+ if ( ( lp->lan_type = hp100_sense_lan( dev ) ) < 0 )
+ {
+ printk( "hp100: %s: no connection found - check wire\n", dev->name );
+ hp100_start_interface( dev ); /* 10Mb/s RX pkts maybe handled */
+ return -EIO;
+ }
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE ); /* relogin */
+ hp100_start_interface( dev );
+ }
+
+ if ( lp->lan_type == HP100_LAN_100 && lp->hub_status < 0 )
+ /* we have a 100Mb/s adapter but it isn't connected to hub */
+ {
+ printk( "hp100: %s: login to 100Mb/s hub retry\n", dev->name );
+ hp100_stop_interface( dev );
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ }
+ else
+ {
+ hp100_ints_off();
+ i = hp100_sense_lan( dev );
+ hp100_ints_on();
+ if ( i == HP100_LAN_ERR )
+ printk( "hp100: %s: link down detected\n", dev->name );
+ else
+ if ( lp->lan_type != i ) /* cable change! */
+ {
+ /* it's very hard - all network setting must be changed!!! */
+ printk( "hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name );
+ lp->lan_type = i;
+ hp100_stop_interface( dev );
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ }
+ else
+ {
+ printk( "hp100: %s: interface reset\n", dev->name );
+ hp100_stop_interface( dev );
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ }
+ }
+
+ dev->trans_start = jiffies;
+ return -EAGAIN;
+ }
+
+ /*
+ * we have to turn int's off before modifying this, otherwise
+ * a tx_pdl_cleanup could occur at the same time
+ */
+ save_flags( flags );
+ cli();
+ ringptr=lp->txrtail;
+ lp->txrtail=ringptr->next;
+
+ /* Check whether packet has minimal packet size */
+ ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
+ i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
+
+ ringptr->skb=skb;
+ ringptr->pdl[0]=((1<<16) | i); /* PDH: 1 Fragment & length */
+ ringptr->pdl[1]=(u32)virt_to_bus(skb->data); /* 1st Frag: Adr. of data */
+ if(lp->chip==HP100_CHIPID_SHASTA)
+ {
+ /* TODO:Could someone who has the EISA card please check if this works? */
+ ringptr->pdl[2]=i;
+ }
+ else /* Lassen */
+ {
+ /* In the PDL, don't use the padded size but the real packet size: */
+ ringptr->pdl[2]=skb->len; /* 1st Frag: Length of frag */
+ }
+
+ /* Hand this PDL to the card. */
+ hp100_outl( ringptr->pdl_paddr, TX_PDA_L ); /* Low Prio. Queue */
+
+ lp->txrcommit++;
+ restore_flags( flags );
+
+ /* Update statistics */
+ lp->stats.tx_packets++;
+#ifdef LINUX_2_1
+ lp->stats.tx_bytes += skb->len;
+#endif
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+
+/* clean_txring checks if packets have been sent by the card by reading
+ * the TX_PDL register from the performance page and comparing it to the
+ * number of commited packets. It then frees the skb's of the packets that
+ * obviously have been sent to the network.
+ *
+ * Needs the PERFORMANCE page selected.
+ */
+static void hp100_clean_txring( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int donecount;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4211, TRACE );
+ printk("hp100: %s: clean txring\n", dev->name);
+#endif
+
+ /* How many PDLs have been transmitted? */
+ donecount=(lp->txrcommit)-hp100_inb(TX_PDL);
+
+#ifdef HP100_DEBUG
+ if(donecount>MAX_TX_PDL)
+ printk("hp100: %s: Warning: More PDLs transmitted than commited to card???\n",dev->name);
+#endif
+
+ for( ; 0!=donecount; donecount-- )
+ {
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: Free skb: data @0x%.8x txrcommit=0x%x TXPDL=0x%x, done=0x%x\n",
+ dev->name,
+ (u_int) lp->txrhead->skb->data,
+ lp->txrcommit,
+ hp100_inb(TX_PDL),
+ donecount);
+#endif
+#ifdef LINUX_2_1
+ dev_kfree_skb( lp->txrhead->skb );
+#else
+ dev_kfree_skb( lp->txrhead->skb, FREE_WRITE );
+#endif
+ lp->txrhead->skb=(void *)NULL;
+ lp->txrhead=lp->txrhead->next;
+ lp->txrcommit--;
+ }
+}
+
+
+/* tx function for slave modes */
+static int hp100_start_xmit( struct sk_buff *skb, struct device *dev )
+{
+ int i, ok_flag;
+ int ioaddr = dev->base_addr;
+ u_short val;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4212, TRACE );
+ printk("hp100: %s: start_xmit\n", dev->name);
+#endif
+
+ if ( skb==NULL )
+ {
+#ifndef LINUX_2_1
+ dev_tint( dev );
+#endif
+ return 0;
+ }
+
+ if ( skb->len <= 0 ) return 0;
+
+ if ( lp->lan_type < 0 ) /* no LAN type detected yet? */
+ {
+ hp100_stop_interface( dev );
+ if ( ( lp->lan_type = hp100_sense_lan( dev ) ) < 0 )
+ {
+ printk( "hp100: %s: no connection found - check wire\n", dev->name );
+ hp100_start_interface( dev ); /* 10Mb/s RX packets maybe handled */
+ return -EIO;
+ }
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE ); /* relogin */
+ hp100_start_interface( dev );
+ }
+
+ /* If there is not enough free memory on the card... */
+ i=hp100_inl(TX_MEM_FREE)&0x7fffffff;
+ if ( !(((i/2)-539)>(skb->len+16) && (hp100_inb(TX_PKT_CNT)<255)) )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i );
+#endif
+ /* not waited long enough since last failed tx try? */
+ if ( jiffies - dev->trans_start < HZ )
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: trans_start timing problem\n", dev->name);
+#endif
+ return -EAGAIN;
+ }
+ if ( lp->lan_type == HP100_LAN_100 && lp->hub_status < 0 )
+ /* we have a 100Mb/s adapter but it isn't connected to hub */
+ {
+ printk( "hp100: %s: login to 100Mb/s hub retry\n", dev->name );
+ hp100_stop_interface( dev );
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ }
+ else
+ {
+ hp100_ints_off();
+ i = hp100_sense_lan( dev );
+ hp100_ints_on();
+ if ( i == HP100_LAN_ERR )
+ printk( "hp100: %s: link down detected\n", dev->name );
+ else
+ if ( lp->lan_type != i ) /* cable change! */
+ {
+ /* it's very hard - all network setting must be changed!!! */
+ printk( "hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name );
+ lp->lan_type = i;
+ hp100_stop_interface( dev );
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ }
+ else
+ {
+ printk( "hp100: %s: interface reset\n", dev->name );
+ hp100_stop_interface( dev );
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ udelay(1000);
+ }
+ }
+ dev->trans_start = jiffies;
+ return -EAGAIN;
+ }
+
+ for ( i=0; i<6000 && ( hp100_inb( OPTION_MSW ) & HP100_TX_CMD ); i++ )
+ {
+#ifdef HP100_DEBUG_TX
+ printk( "hp100: %s: start_xmit: busy\n", dev->name );
+#endif
+ }
+
+ hp100_ints_off();
+ val = hp100_inw( IRQ_STATUS );
+ /* Ack / clear the interrupt TX_COMPLETE interrupt - this interrupt is set
+ * when the current packet being transmitted on the wire is completed. */
+ hp100_outw( HP100_TX_COMPLETE, IRQ_STATUS );
+#ifdef HP100_DEBUG_TX
+ printk("hp100: %s: start_xmit: irq_status=0x%.4x, irqmask=0x%.4x, len=%d\n",dev->name,val,hp100_inw(IRQ_MASK),(int)skb->len );
+#endif
+
+ ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
+ i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
+
+ hp100_outw( i, DATA32 ); /* tell card the total packet length */
+ hp100_outw( i, FRAGMENT_LEN ); /* and first/only fragment length */
+
+ if ( lp->mode==2 ) /* memory mapped */
+ {
+ if ( lp->mem_ptr_virt ) /* high pci memory was remapped */
+ {
+ /* Note: The J2585B needs alignment to 32bits here! */
+ memcpy( lp->mem_ptr_virt, skb->data, ( skb->len + 3 ) & ~3 );
+ if ( !ok_flag )
+ memset( lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len );
+ }
+ else
+ {
+ /* Note: The J2585B needs alignment to 32bits here! */
+ memcpy_toio( lp->mem_ptr_phys, skb->data, (skb->len + 3) & ~3 );
+ if ( !ok_flag )
+ memset_io( lp->mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb->len );
+ }
+ }
+ else /* programmed i/o */
+ {
+ outsl( ioaddr + HP100_REG_DATA32, skb->data, ( skb->len + 3 ) >> 2 );
+ if ( !ok_flag )
+ for ( i = ( skb->len + 3 ) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4 )
+ hp100_outl( 0, DATA32 );
+ }
+
+ hp100_outb( HP100_TX_CMD | HP100_SET_LB, OPTION_MSW ); /* send packet */
+
+ lp->stats.tx_packets++;
+#ifdef LINUX_2_1
+ lp->stats.tx_bytes += skb->len;
+#endif
+ dev->trans_start=jiffies;
+ hp100_ints_on();
+
+#ifdef LINUX_2_1
+ dev_kfree_skb( skb );
+#else
+ dev_kfree_skb( skb, FREE_WRITE );
+#endif
+
+#ifdef HP100_DEBUG_TX
+ printk( "hp100: %s: start_xmit: end\n", dev->name );
+#endif
+
+ return 0;
+}
+
+
+/*
+ * Receive Function (Non-Busmaster mode)
+ * Called when an "Receive Packet" interrupt occurs, i.e. the receive
+ * packet counter is non-zero.
+ * For non-busmaster, this function does the whole work of transfering
+ * the packet to the host memory and then up to higher layers via skb
+ * and netif_rx.
+ */
+
+static void hp100_rx( struct device *dev )
+{
+ int packets, pkt_len;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ u_int header;
+ struct sk_buff *skb;
+
+#ifdef DEBUG_B
+ hp100_outw( 0x4213, TRACE );
+ printk("hp100: %s: rx\n", dev->name);
+#endif
+
+ /* First get indication of received lan packet */
+ /* RX_PKT_CND indicates the number of packets which have been fully */
+ /* received onto the card but have not been fully transfered of the card */
+ packets = hp100_inb( RX_PKT_CNT );
+#ifdef HP100_DEBUG_RX
+ if ( packets > 1 )
+ printk( "hp100: %s: rx: waiting packets = %d\n", dev->name,packets );
+#endif
+
+ while ( packets-- > 0 )
+ {
+ /* If ADV_NXT_PKT is still set, we have to wait until the card has */
+ /* really advanced to the next packet. */
+ for (pkt_len=0; pkt_len<6000 &&(hp100_inb(OPTION_MSW)&HP100_ADV_NXT_PKT);
+ pkt_len++ )
+ {
+#ifdef HP100_DEBUG_RX
+ printk( "hp100: %s: rx: busy, remaining packets = %d\n", dev->name, packets );
+#endif
+ }
+
+ /* First we get the header, which contains information about the */
+ /* actual length of the received packet. */
+ if( lp->mode==2 ) /* memory mapped mode */
+ {
+ if ( lp->mem_ptr_virt ) /* if memory was remapped */
+ header = *(__u32 *)lp->mem_ptr_virt;
+ else
+ header = readl( lp->mem_ptr_phys );
+ }
+ else /* programmed i/o */
+ header = hp100_inl( DATA32 );
+
+ pkt_len = ((header & HP100_PKT_LEN_MASK) + 3) & ~3;
+
+#ifdef HP100_DEBUG_RX
+ printk( "hp100: %s: rx: new packet - length=%d, errors=0x%x, dest=0x%x\n",
+ dev->name,
+ header & HP100_PKT_LEN_MASK, (header>>16)&0xfff8,
+ (header>>16)&7);
+#endif
+
+ /* Now we allocate the skb and transfer the data into it. */
+ skb = dev_alloc_skb( pkt_len );
+ if ( skb == NULL ) /* Not enough memory->drop packet */
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: rx: couldn't allocate a sk_buff of size %d\n", dev->name, pkt_len );
+#endif
+ lp->stats.rx_dropped++;
+ }
+ else /* skb successfully allocated */
+ {
+ u_char *ptr;
+
+ skb->dev = dev;
+
+ /* ptr to start of the sk_buff data area */
+ ptr = (u_char *)skb_put( skb, pkt_len );
+
+ /* Now transfer the data from the card into that area */
+ if ( lp->mode==2 )
+ {
+ if ( lp->mem_ptr_virt )
+ memcpy( ptr, lp->mem_ptr_virt, pkt_len );
+ /* Note alignment to 32bit transfers */
+ else
+ memcpy_fromio( ptr, lp->mem_ptr_phys, pkt_len );
+ }
+ else /* io mapped */
+ insl( ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2 );
+
+ skb->protocol = eth_type_trans( skb, dev );
+
+ netif_rx( skb );
+ lp->stats.rx_packets++;
+#ifdef LINUX_2_1
+ lp->stats.rx_bytes += skb->len;
+#endif
+
+#ifdef HP100_DEBUG_RX
+ printk( "hp100: %s: rx: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ dev->name,
+ ptr[ 0 ], ptr[ 1 ], ptr[ 2 ], ptr[ 3 ], ptr[ 4 ], ptr[ 5 ],
+ ptr[ 6 ], ptr[ 7 ], ptr[ 8 ], ptr[ 9 ], ptr[ 10 ], ptr[ 11 ] );
+#endif
+ }
+
+ /* Indicate the card that we have got the packet */
+ hp100_outb( HP100_ADV_NXT_PKT | HP100_SET_LB, OPTION_MSW );
+
+ switch ( header & 0x00070000 ) {
+ case (HP100_MULTI_ADDR_HASH<<16):
+ case (HP100_MULTI_ADDR_NO_HASH<<16):
+ lp->stats.multicast++; break;
+ }
+ } /* end of while(there are packets) loop */
+#ifdef HP100_DEBUG_RX
+ printk( "hp100_rx: %s: end\n", dev->name );
+#endif
+}
+
+
+/*
+ * Receive Function for Busmaster Mode
+ */
+static void hp100_rx_bm( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ hp100_ring_t *ptr;
+ u_int header;
+ int pkt_len;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4214, TRACE );
+ printk("hp100: %s: rx_bm\n", dev->name);
+#endif
+
+#ifdef HP100_DEBUG
+ if(0==lp->rxrcommit)
+ {
+ printk("hp100: %s: rx_bm called although no PDLs were committed to adapter?\n", dev->name);
+ return;
+ }
+ else
+
+ /* RX_PKT_CNT states how many PDLs are currently formatted and available to
+ * the cards BM engine */
+ if( (hp100_inw(RX_PKT_CNT)&0x00ff) >= lp->rxrcommit)
+ {
+ printk("hp100: %s: More packets received than commited? RX_PKT_CNT=0x%x, commit=0x%x\n", dev->name, hp100_inw(RX_PKT_CNT)&0x00ff, lp->rxrcommit);
+ return;
+ }
+#endif
+
+ while( (lp->rxrcommit > hp100_inb(RX_PDL)) )
+ {
+ /*
+ * The packet was received into the pdl pointed to by lp->rxrhead (
+ * the oldest pdl in the ring
+ */
+
+ /* First we get the header, which contains information about the */
+ /* actual length of the received packet. */
+
+ ptr=lp->rxrhead;
+
+ header = *(ptr->pdl-1);
+ pkt_len = (header & HP100_PKT_LEN_MASK);
+
+#ifdef HP100_DEBUG_BM
+ printk( "hp100: %s: rx_bm: header@0x%x=0x%x length=%d, errors=0x%x, dest=0x%x\n",
+ dev->name,
+ (u_int) (ptr->pdl-1),(u_int) header,
+ pkt_len,
+ (header>>16)&0xfff8,
+ (header>>16)&7);
+ printk( "hp100: %s: RX_PDL_COUNT:0x%x TX_PDL_COUNT:0x%x, RX_PKT_CNT=0x%x PDH=0x%x, Data@0x%x len=0x%x\n",
+ dev->name,
+ hp100_inb( RX_PDL ),
+ hp100_inb( TX_PDL ),
+ hp100_inb( RX_PKT_CNT ),
+ (u_int) *(ptr->pdl),
+ (u_int) *(ptr->pdl+3),
+ (u_int) *(ptr->pdl+4));
+#endif
+
+ if( (pkt_len>=MIN_ETHER_SIZE) &&
+ (pkt_len<=MAX_ETHER_SIZE) )
+ {
+ if(ptr->skb==NULL)
+ {
+ printk("hp100: %s: rx_bm: skb null\n", dev->name);
+ /* can happen if we only allocated room for the pdh due to memory shortage. */
+ lp->stats.rx_dropped++;
+ }
+ else
+ {
+ skb_trim( ptr->skb, pkt_len ); /* Shorten it */
+ ptr->skb->protocol = eth_type_trans( ptr->skb, dev );
+
+ netif_rx( ptr->skb ); /* Up and away... */
+
+ lp->stats.rx_packets++;
+#ifdef LINUX_2_1
+ lp->stats.rx_bytes += ptr->skb->len;
+#endif
+ }
+
+ switch ( header & 0x00070000 ) {
+ case (HP100_MULTI_ADDR_HASH<<16):
+ case (HP100_MULTI_ADDR_NO_HASH<<16):
+ lp->stats.multicast++; break;
+ }
+ }
+ else
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: rx_bm: Received bad packet (length=%d)\n",dev->name,pkt_len);
+#endif
+ if(ptr->skb!=NULL)
+#ifdef LINUX_2_1
+ dev_kfree_skb( ptr->skb );
+#else
+ dev_kfree_skb( ptr->skb, FREE_READ );
+#endif
+ lp->stats.rx_errors++;
+ }
+
+ lp->rxrhead=lp->rxrhead->next;
+
+ /* Allocate a new rx PDL (so lp->rxrcommit stays the same) */
+ if (0 == hp100_build_rx_pdl( lp->rxrtail, dev ))
+ {
+ /* No space for skb, header can still be received. */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: rx_bm: No space for new PDL.\n", dev->name);
+#endif
+ return;
+ }
+ else
+ { /* successfully allocated new PDL - put it in ringlist at tail. */
+ hp100_outl((u32)lp->rxrtail->pdl_paddr, RX_PDA);
+ lp->rxrtail=lp->rxrtail->next;
+ }
+
+ }
+}
+
+
+
+/*
+ * statistics
+ */
+static hp100_stats_t *hp100_get_stats( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4215, TRACE );
+#endif
+
+ hp100_ints_off();
+ hp100_update_stats( dev );
+ hp100_ints_on();
+ return &((struct hp100_private *)dev->priv)->stats;
+}
+
+static void hp100_update_stats( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ u_short val;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4216, TRACE );
+ printk("hp100: %s: update-stats\n", dev->name);
+#endif
+
+ /* Note: Statistics counters clear when read. */
+ hp100_page( MAC_CTRL );
+ val = hp100_inw( DROPPED ) & 0x0fff;
+ lp->stats.rx_errors += val;
+ lp->stats.rx_over_errors += val;
+ val = hp100_inb( CRC );
+ lp->stats.rx_errors += val;
+ lp->stats.rx_crc_errors += val;
+ val = hp100_inb( ABORT );
+ lp->stats.tx_errors += val;
+ lp->stats.tx_aborted_errors += val;
+ hp100_page( PERFORMANCE );
+}
+
+static void hp100_misc_interrupt( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4216, TRACE );
+ printk("hp100: %s: misc_interrupt\n", dev->name);
+#endif
+
+ /* Note: Statistics counters clear when read. */
+ lp->stats.rx_errors++;
+ lp->stats.tx_errors++;
+}
+
+static void hp100_clear_stats( int ioaddr )
+{
+ unsigned long flags;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4217, TRACE );
+ printk("hp100: %s: clear_stats\n", dev->name);
+#endif
+
+ save_flags( flags );
+ cli();
+ hp100_page( MAC_CTRL ); /* get all statistics bytes */
+ hp100_inw( DROPPED );
+ hp100_inb( CRC );
+ hp100_inb( ABORT );
+ hp100_page( PERFORMANCE );
+ restore_flags( flags );
+}
+
+
+/*
+ * multicast setup
+ */
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+
+static void hp100_set_multicast_list( struct device *dev )
+{
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4218, TRACE );
+ printk("hp100: %s: set_mc_list\n", dev->name);
+#endif
+
+ save_flags( flags );
+ cli();
+ hp100_ints_off();
+ hp100_page( MAC_CTRL );
+ hp100_andb( ~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1 ); /* stop rx/tx */
+
+ if ( dev->flags & IFF_PROMISC )
+ {
+ lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
+ lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
+ memset( &lp->hash_bytes, 0xff, 8 );
+ }
+ else if ( dev->mc_count || (dev->flags&IFF_ALLMULTI) )
+ {
+ lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
+ lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
+#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
+ if ( dev -> flags & IFF_ALLMULTI )
+ {
+ /* set hash filter to receive all multicast packets */
+ memset( &lp->hash_bytes, 0xff, 8 );
+ }
+ else
+ {
+ int i, j, idx;
+ u_char *addrs;
+ struct dev_mc_list *dmi;
+
+ memset( &lp->hash_bytes, 0x00, 8 );
+#ifdef HP100_DEBUG
+ printk("hp100: %s: computing hash filter - mc_count = %i\n", dev -> name, dev -> mc_count );
+#endif
+ for ( i = 0, dmi = dev -> mc_list; i < dev -> mc_count; i++, dmi = dmi -> next )
+ {
+ addrs = dmi -> dmi_addr;
+ if ( ( *addrs & 0x01 ) == 0x01 ) /* multicast address? */
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: multicast = %02x:%02x:%02x:%02x:%02x:%02x, ",
+ dev -> name,
+ addrs[ 0 ], addrs[ 1 ], addrs[ 2 ],
+ addrs[ 3 ], addrs[ 4 ], addrs[ 5 ] );
+#endif
+ for ( j = idx = 0; j < 6; j++ )
+ {
+ idx ^= *addrs++ & 0x3f;
+ printk( ":%02x:", idx );
+ }
+#ifdef HP100_DEBUG
+ printk("idx = %i\n", idx );
+#endif
+ lp->hash_bytes[ idx >> 3 ] |= ( 1 << ( idx & 7 ) );
+ }
+ }
+ }
+#else
+ memset( &lp->hash_bytes, 0xff, 8 );
+#endif
+ }
+ else
+ {
+ lp->mac2_mode = HP100_MAC2MODE3; /* normal mode = get packets for me */
+ lp->mac1_mode = HP100_MAC1MODE3; /* and broadcasts */
+ memset( &lp->hash_bytes, 0x00, 8 );
+ }
+
+ if ( ( (hp100_inb(MAC_CFG_1) & 0x0f)!=lp->mac1_mode ) ||
+ ( hp100_inb(MAC_CFG_2)!=lp->mac2_mode ) )
+ {
+ int i;
+
+ hp100_outb( lp->mac2_mode, MAC_CFG_2 );
+ hp100_andb( HP100_MAC1MODEMASK, MAC_CFG_1 ); /* clear mac1 mode bits */
+ hp100_orb( lp->mac1_mode, MAC_CFG_1 ); /* and set the new mode */
+
+ hp100_page( MAC_ADDRESS );
+ for ( i = 0; i < 8; i++ )
+ hp100_outb( lp->hash_bytes[ i ], HASH_BYTE0 + i );
+#ifdef HP100_DEBUG
+ printk("hp100: %s: mac1 = 0x%x, mac2 = 0x%x, multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, lp->mac1_mode, lp->mac2_mode,
+ lp->hash_bytes[ 0 ], lp->hash_bytes[ 1 ],
+ lp->hash_bytes[ 2 ], lp->hash_bytes[ 3 ],
+ lp->hash_bytes[ 4 ], lp->hash_bytes[ 5 ],
+ lp->hash_bytes[ 6 ], lp->hash_bytes[ 7 ]
+ );
+#endif
+
+ if(lp->lan_type==HP100_LAN_100)
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
+#endif
+ lp->hub_status=hp100_login_to_vg_hub( dev, TRUE ); /* force a relogin to the hub */
+ }
+ }
+ else
+ {
+ int i;
+ u_char old_hash_bytes[ 8 ];
+
+ hp100_page( MAC_ADDRESS );
+ for ( i = 0; i < 8; i++ )
+ old_hash_bytes[ i ] = hp100_inb( HASH_BYTE0 + i );
+ if ( memcmp( old_hash_bytes, &lp->hash_bytes, 8 ) )
+ {
+ for ( i = 0; i < 8; i++ )
+ hp100_outb( lp->hash_bytes[ i ], HASH_BYTE0 + i );
+#ifdef HP100_DEBUG
+ printk("hp100: %s: multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name,
+ lp->hash_bytes[ 0 ], lp->hash_bytes[ 1 ],
+ lp->hash_bytes[ 2 ], lp->hash_bytes[ 3 ],
+ lp->hash_bytes[ 4 ], lp->hash_bytes[ 5 ],
+ lp->hash_bytes[ 6 ], lp->hash_bytes[ 7 ]
+ );
+#endif
+
+ if(lp->lan_type==HP100_LAN_100)
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
+#endif
+ lp->hub_status=hp100_login_to_vg_hub( dev, TRUE ); /* force a relogin to the hub */
+ }
+ }
+ }
+
+ hp100_page( MAC_CTRL );
+ hp100_orb( HP100_RX_EN | HP100_RX_IDLE | /* enable rx */
+ HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1 ); /* enable tx */
+
+ hp100_page( PERFORMANCE );
+ hp100_ints_on();
+ restore_flags( flags );
+}
+
+
+/*
+ * hardware interrupt handling
+ */
+
+static void hp100_interrupt( int irq, void *dev_id, struct pt_regs *regs )
+{
+ struct device *dev = (struct device *)dev_id;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+ int ioaddr;
+ u_int val;
+
+ if ( dev == NULL ) return;
+ ioaddr = dev->base_addr;
+
+ if ( dev->interrupt )
+ printk( "hp100: %s: re-entering the interrupt handler\n", dev->name );
+ hp100_ints_off();
+ dev->interrupt = 1; /* mark that we are inside the handler */
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4219, TRACE );
+#endif
+
+ /* hp100_page( PERFORMANCE ); */
+ val = hp100_inw( IRQ_STATUS );
+#ifdef HP100_DEBUG_IRQ
+ printk( "hp100: %s: mode=%x,IRQ_STAT=0x%.4x,RXPKTCNT=0x%.2x RXPDL=0x%.2x TXPKTCNT=0x%.2x TXPDL=0x%.2x\n",
+ dev->name,
+ lp->mode,
+ (u_int)val,
+ hp100_inb( RX_PKT_CNT ),
+ hp100_inb( RX_PDL ),
+ hp100_inb( TX_PKT_CNT ),
+ hp100_inb( TX_PDL )
+ );
+#endif
+
+ if(val==0) /* might be a shared interrupt */
+ {
+ dev->interrupt=0;
+ hp100_ints_on();
+ return;
+ }
+ /* We're only interested in those interrupts we really enabled. */
+ /* val &= hp100_inw( IRQ_MASK ); */
+
+ /*
+ * RX_PDL_FILL_COMPL is set whenever a RX_PDL has been executed. A RX_PDL
+ * is considered executed whenever the RX_PDL data structure is no longer
+ * needed.
+ */
+ if ( val & HP100_RX_PDL_FILL_COMPL )
+ {
+ if(lp->mode==1)
+ hp100_rx_bm( dev );
+ else
+ {
+ printk("hp100: %s: rx_pdl_fill_compl interrupt although not busmaster?\n", dev->name);
+ }
+ }
+
+ /*
+ * The RX_PACKET interrupt is set, when the receive packet counter is
+ * non zero. We use this interrupt for receiving in slave mode. In
+ * busmaster mode, we use it to make sure we did not miss any rx_pdl_fill
+ * interrupts. If rx_pdl_fill_compl is not set and rx_packet is set, then
+ * we somehow have missed a rx_pdl_fill_compl interrupt.
+ */
+
+ if ( val & HP100_RX_PACKET ) /* Receive Packet Counter is non zero */
+ {
+ if(lp->mode!=1) /* non busmaster */
+ hp100_rx( dev );
+ else if ( !(val & HP100_RX_PDL_FILL_COMPL ))
+ {
+ /* Shouldnt happen - maybe we missed a RX_PDL_FILL Interrupt? */
+ hp100_rx_bm( dev );
+ }
+ }
+
+ /*
+ * Ack. that we have noticed the interrupt and thereby allow next one.
+ * Note that this is now done after the slave rx function, since first
+ * acknowledging and then setting ADV_NXT_PKT caused an extra interrupt
+ * on the J2573.
+ */
+ hp100_outw( val, IRQ_STATUS );
+
+ /*
+ * RX_ERROR is set when a packet is dropped due to no memory resources on
+ * the card or when a RCV_ERR occurs.
+ * TX_ERROR is set when a TX_ABORT condition occurs in the MAC->exists
+ * only in the 802.3 MAC and happens when 16 collisions occur during a TX
+ */
+ if ( val & ( HP100_TX_ERROR | HP100_RX_ERROR ) )
+ {
+#ifdef HP100_DEBUG_IRQ
+ printk("hp100: %s: TX/RX Error IRQ\n", dev->name);
+#endif
+ hp100_update_stats( dev );
+ if(lp->mode==1)
+ {
+ hp100_rxfill( dev );
+ hp100_clean_txring( dev );
+ }
+ }
+
+ /*
+ * RX_PDA_ZERO is set when the PDA count goes from non-zero to zero.
+ */
+ if ( (lp->mode==1)&&(val &(HP100_RX_PDA_ZERO)) )
+ hp100_rxfill( dev );
+
+ /*
+ * HP100_TX_COMPLETE interrupt occurs when packet transmitted on wire
+ * is completed
+ */
+ if ( (lp->mode==1) && ( val & ( HP100_TX_COMPLETE )) )
+ hp100_clean_txring( dev );
+
+ /*
+ * MISC_ERROR is set when either the LAN link goes down or a detected
+ * bus error occurs.
+ */
+ if ( val & HP100_MISC_ERROR ) /* New for J2585B */
+ {
+#ifdef HP100_DEBUG_IRQ
+ printk("hp100: %s: Misc. Error Interrupt - Check cabling.\n", dev->name);
+#endif
+ if(lp->mode==1)
+ {
+ hp100_clean_txring( dev );
+ hp100_rxfill( dev );
+ }
+ hp100_misc_interrupt( dev );
+ }
+
+ dev->interrupt = 0;
+ hp100_ints_on();
+}
+
+
+/*
+ * some misc functions
+ */
+
+static void hp100_start_interface( struct device *dev )
+{
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4220, TRACE );
+ printk("hp100: %s: hp100_start_interface\n",dev->name);
+#endif
+
+ save_flags( flags );
+ cli();
+
+ /* Ensure the adapter does not want to request an interrupt when */
+ /* enabling the IRQ line to be active on the bus (i.e. not tri-stated) */
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* ack all IRQs */
+ hp100_outw( HP100_FAKE_INT|HP100_INT_EN|HP100_RESET_LB, OPTION_LSW);
+ /* Un Tri-state int. TODO: Check if shared interrupts can be realised? */
+ hp100_outw( HP100_TRI_INT | HP100_RESET_HB, OPTION_LSW );
+
+ if(lp->mode==1)
+ {
+ /* Make sure BM bit is set... */
+ hp100_page(HW_MAP);
+ hp100_orb( HP100_BM_MASTER, BM );
+ hp100_rxfill( dev );
+ }
+ else if(lp->mode==2)
+ {
+ /* Enable memory mapping. Note: Don't do this when busmaster. */
+ hp100_outw( HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW );
+ }
+
+ hp100_page(PERFORMANCE);
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* ack IRQ */
+
+ /* enable a few interrupts: */
+ if(lp->mode==1) /* busmaster mode */
+ {
+ hp100_outw( HP100_RX_PDL_FILL_COMPL |
+ HP100_RX_PDA_ZERO |
+ HP100_RX_ERROR |
+ /* HP100_RX_PACKET | */
+ /* HP100_RX_EARLY_INT | */ HP100_SET_HB |
+ /* HP100_TX_PDA_ZERO | */
+ HP100_TX_COMPLETE |
+ /* HP100_MISC_ERROR | */
+ HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK );
+ }
+ else
+ {
+ hp100_outw( HP100_RX_PACKET |
+ HP100_RX_ERROR | HP100_SET_HB |
+ HP100_TX_ERROR | HP100_SET_LB , IRQ_MASK );
+ }
+
+ /* Enable MAC Tx and RX, set MAC modes, ... */
+ hp100_set_multicast_list( dev );
+
+ restore_flags( flags );
+}
+
+
+static void hp100_stop_interface( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ u_int val;
+
+#ifdef HP100_DEBUG_B
+ printk("hp100: %s: hp100_stop_interface\n",dev->name);
+ hp100_outw( 0x4221, TRACE );
+#endif
+
+ if (lp->mode==1)
+ hp100_BM_shutdown( dev );
+ else
+ {
+ /* Note: MMAP_DIS will be reenabled by start_interface */
+ hp100_outw( HP100_INT_EN | HP100_RESET_LB |
+ HP100_TRI_INT | HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW );
+ val = hp100_inw( OPTION_LSW );
+
+ hp100_page( MAC_CTRL );
+ hp100_andb( ~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1 );
+
+ if ( !(val & HP100_HW_RST) ) return; /* If reset, imm. return ... */
+ /* ... else: busy wait until idle */
+ for ( val = 0; val < 6000; val++ )
+ if ( ( hp100_inb( MAC_CFG_1 ) & (HP100_TX_IDLE | HP100_RX_IDLE) ) ==
+ (HP100_TX_IDLE | HP100_RX_IDLE) )
+ {
+ hp100_page(PERFORMANCE);
+ return;
+ }
+ printk( "hp100: %s: hp100_stop_interface - timeout\n", dev->name );
+ hp100_page(PERFORMANCE);
+ }
+}
+
+
+static void hp100_load_eeprom( struct device *dev, u_short probe_ioaddr )
+{
+ int i;
+ int ioaddr = probe_ioaddr > 0 ? probe_ioaddr : dev->base_addr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4222, TRACE );
+#endif
+
+ hp100_page( EEPROM_CTRL );
+ hp100_andw( ~HP100_EEPROM_LOAD, EEPROM_CTRL );
+ hp100_orw( HP100_EEPROM_LOAD, EEPROM_CTRL );
+ for ( i = 0; i < 10000; i++ )
+ if ( !( hp100_inb( OPTION_MSW ) & HP100_EE_LOAD ) ) return;
+ printk( "hp100: %s: hp100_load_eeprom - timeout\n", dev->name );
+}
+
+
+/* Sense connection status.
+ * return values: LAN_10 - Connected to 10Mbit/s network
+ * LAN_100 - Connected to 100Mbit/s network
+ * LAN_ERR - not connected or 100Mbit/s Hub down
+ */
+static int hp100_sense_lan( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ u_short val_VG, val_10;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4223, TRACE );
+#endif
+
+ hp100_page( MAC_CTRL );
+ val_10 = hp100_inb( 10_LAN_CFG_1 );
+ val_VG = hp100_inb( VG_LAN_CFG_1 );
+ hp100_page( PERFORMANCE );
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: sense_lan: val_VG = 0x%04x, val_10 = 0x%04x\n", dev->name, val_VG, val_10 );
+#endif
+
+ if ( val_10 & HP100_LINK_BEAT_ST ) /* 10Mb connection is active */
+ return HP100_LAN_10;
+
+ if ( val_10 & HP100_AUI_ST ) /* have we BNC or AUI onboard? */
+ {
+ val_10 |= HP100_AUI_SEL | HP100_LOW_TH;
+ hp100_page( MAC_CTRL );
+ hp100_outb( val_10, 10_LAN_CFG_1 );
+ hp100_page( PERFORMANCE );
+ return HP100_LAN_10;
+ }
+
+ if ( (lp->id->id == 0x02019F022) ||
+ (lp->id->id == 0x01042103c) ||
+ (lp->id->id == 0x01040103c) )
+ return HP100_LAN_ERR; /* Those cards don't have a 100 Mbit connector */
+
+ if ( val_VG & HP100_LINK_CABLE_ST ) /* Can hear the HUBs tone. */
+ return HP100_LAN_100;
+ return HP100_LAN_ERR;
+}
+
+
+
+static int hp100_down_vg_link( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long time;
+ long savelan, newlan;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4224, TRACE );
+ printk("hp100: %s: down_vg_link\n", dev->name);
+#endif
+
+ hp100_page( MAC_CTRL );
+ time=jiffies+(HZ/4);
+ do{
+ if ( hp100_inb( VG_LAN_CFG_1 ) & HP100_LINK_CABLE_ST ) break;
+ } while (time>jiffies);
+
+ if ( jiffies >= time ) /* no signal->no logout */
+ return 0;
+
+ /* Drop the VG Link by clearing the link up cmd and load addr.*/
+
+ hp100_andb( ~( HP100_LOAD_ADDR| HP100_LINK_CMD), VG_LAN_CFG_1);
+ hp100_orb( HP100_VG_SEL, VG_LAN_CFG_1);
+
+ /* Conditionally stall for >250ms on Link-Up Status (to go down) */
+ time=jiffies+(HZ/2);
+ do{
+ if ( !(hp100_inb( VG_LAN_CFG_1) & HP100_LINK_UP_ST) ) break;
+ } while(time>jiffies);
+
+#ifdef HP100_DEBUG
+ if (jiffies>=time)
+ printk("hp100: %s: down_vg_link: Link does not go down?\n", dev->name);
+#endif
+
+ /* To prevent condition where Rev 1 VG MAC and old hubs do not complete */
+ /* logout under traffic (even though all the status bits are cleared), */
+ /* do this workaround to get the Rev 1 MAC in its idle state */
+ if ( lp->chip==HP100_CHIPID_LASSEN )
+ {
+ /* Reset VG MAC to insure it leaves the logoff state even if */
+ /* the Hub is still emitting tones */
+ hp100_andb(~HP100_VG_RESET, VG_LAN_CFG_1);
+ udelay(1500); /* wait for >1ms */
+ hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1); /* Release Reset */
+ udelay(1500);
+ }
+
+ /* New: For lassen, switch to 10 Mbps mac briefly to clear training ACK */
+ /* to get the VG mac to full reset. This is not req.d with later chips */
+ /* Note: It will take the between 1 and 2 seconds for the VG mac to be */
+ /* selected again! This will be left to the connect hub function to */
+ /* perform if desired. */
+ if (lp->chip==HP100_CHIPID_LASSEN)
+ {
+ /* Have to write to 10 and 100VG control registers simultaneously */
+ savelan=newlan=hp100_inl(10_LAN_CFG_1); /* read 10+100 LAN_CFG regs */
+ newlan &= ~(HP100_VG_SEL<<16);
+ newlan |= (HP100_DOT3_MAC)<<8;
+ hp100_andb( ~HP100_AUTO_MODE, MAC_CFG_3); /* Autosel off */
+ hp100_outl(newlan, 10_LAN_CFG_1);
+
+ /* Conditionally stall for 5sec on VG selected. */
+ time=jiffies+(HZ*5);
+ do{
+ if( !(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST) ) break;
+ } while(time>jiffies);
+
+ hp100_orb( HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */
+ hp100_outl(savelan, 10_LAN_CFG_1);
+ }
+
+ time=jiffies+(3*HZ); /* Timeout 3s */
+ do {
+ if ( (hp100_inb( VG_LAN_CFG_1 )&HP100_LINK_CABLE_ST) == 0) break;
+ } while (time>jiffies);
+
+ if(time<=jiffies)
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: down_vg_link: timeout\n", dev->name );
+#endif
+ return -EIO;
+ }
+
+ time=jiffies+(2*HZ); /* This seems to take a while.... */
+ do {} while (time>jiffies);
+
+ return 0;
+}
+
+
+static int hp100_login_to_vg_hub( struct device *dev, u_short force_relogin )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ u_short val=0;
+ unsigned long time;
+ int startst;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4225, TRACE );
+ printk("hp100: %s: login_to_vg_hub\n", dev->name);
+#endif
+
+ /* Initiate a login sequence iff VG MAC is enabled and either Load Address
+ * bit is zero or the force relogin flag is set (e.g. due to MAC address or
+ * promiscuous mode change)
+ */
+ hp100_page( MAC_CTRL );
+ startst=hp100_inb( VG_LAN_CFG_1 );
+ if((force_relogin==TRUE)||(hp100_inb( MAC_CFG_4 )&HP100_MAC_SEL_ST))
+ {
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Start training\n", dev->name);
+#endif
+
+ /* Ensure VG Reset bit is 1 (i.e., do not reset)*/
+ hp100_orb( HP100_VG_RESET , VG_LAN_CFG_1 );
+
+ /* If Lassen AND auto-select-mode AND VG tones were sensed on */
+ /* entry then temporarily put them into force 100Mbit mode */
+ if((lp->chip==HP100_CHIPID_LASSEN)&&( startst & HP100_LINK_CABLE_ST ) )
+ hp100_andb( ~HP100_DOT3_MAC, 10_LAN_CFG_2 );
+
+ /* Drop the VG link by zeroing Link Up Command and Load Address */
+ hp100_andb( ~(HP100_LINK_CMD/* |HP100_LOAD_ADDR */), VG_LAN_CFG_1);
+
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Bring down the link\n", dev->name);
+#endif
+
+ /* Wait for link to drop */
+ time = jiffies + (HZ/10);
+ do {
+ if (~(hp100_inb( VG_LAN_CFG_1 )& HP100_LINK_UP_ST) ) break;
+ } while (time>jiffies);
+
+ /* Start an addressed training and optionally request promiscuous port */
+ if ( (dev->flags) & IFF_PROMISC )
+ {
+ hp100_orb( HP100_PROM_MODE, VG_LAN_CFG_2);
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ hp100_orw( HP100_MACRQ_PROMSC, TRAIN_REQUEST );
+ }
+ else
+ {
+ hp100_andb( ~HP100_PROM_MODE, VG_LAN_CFG_2);
+ /* For ETR parts we need to reset the prom. bit in the training
+ * register, otherwise promiscious mode won't be disabled.
+ */
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ {
+ hp100_andw( ~HP100_MACRQ_PROMSC, TRAIN_REQUEST );
+ }
+ }
+
+ /* With ETR parts, frame format request bits can be set. */
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ hp100_orb( HP100_MACRQ_FRAMEFMT_EITHER, TRAIN_REQUEST);
+
+ hp100_orb( HP100_LINK_CMD|HP100_LOAD_ADDR|HP100_VG_RESET, VG_LAN_CFG_1);
+
+ /* Note: Next wait could be omitted for Hood and earlier chips under */
+ /* certain circumstances */
+ /* TODO: check if hood/earlier and skip wait. */
+
+ /* Wait for either short timeout for VG tones or long for login */
+ /* Wait for the card hardware to signalise link cable status ok... */
+ hp100_page( MAC_CTRL );
+ time = jiffies + ( 1*HZ ); /* 1 sec timeout for cable st */
+ do {
+ if ( hp100_inb( VG_LAN_CFG_1 ) & HP100_LINK_CABLE_ST ) break;
+ } while ( jiffies < time );
+
+ if ( jiffies >= time )
+ {
+#ifdef HP100_DEBUG_TRAINING
+ printk( "hp100: %s: Link cable status not ok? Training aborted.\n", dev->name );
+#endif
+ }
+ else
+ {
+#ifdef HP100_DEBUG_TRAINING
+ printk( "hp100: %s: HUB tones detected. Trying to train.\n", dev->name);
+#endif
+
+ time = jiffies + ( 2*HZ ); /* again a timeout */
+ do {
+ val = hp100_inb( VG_LAN_CFG_1 );
+ if ( (val & ( HP100_LINK_UP_ST )) )
+ {
+#ifdef HP100_DEBUG_TRAINING
+ printk( "hp100: %s: Passed training.\n", dev->name);
+#endif
+ break;
+ }
+ } while ( time > jiffies );
+ }
+
+ /* If LINK_UP_ST is set, then we are logged into the hub. */
+ if ( (jiffies<=time) && (val & HP100_LINK_UP_ST) )
+ {
+#ifdef HP100_DEBUG_TRAINING
+ printk( "hp100: %s: Successfully logged into the HUB.\n", dev->name);
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ {
+ val = hp100_inw(TRAIN_ALLOW);
+ printk( "hp100: %s: Card supports 100VG MAC Version \"%s\" ",
+ dev->name,(hp100_inw(TRAIN_REQUEST)&HP100_CARD_MACVER) ? "802.12" : "Pre");
+ printk( "Driver will use MAC Version \"%s\"\n",
+ ( val & HP100_HUB_MACVER) ? "802.12" : "Pre" );
+ printk( "hp100: %s: Frame format is %s.\n",dev->name,(val&HP100_MALLOW_FRAMEFMT)?"802.5":"802.3");
+ }
+#endif
+ }
+ else
+ {
+ /* If LINK_UP_ST is not set, login was not successful */
+ printk("hp100: %s: Problem logging into the HUB.\n",dev->name);
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ {
+ /* Check allowed Register to find out why there is a problem. */
+ val = hp100_inw( TRAIN_ALLOW ); /* wont work on non-ETR card */
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: MAC Configuration requested: 0x%04x, HUB allowed: 0x%04x\n", dev->name, hp100_inw(TRAIN_REQUEST), val);
+#endif
+ if ( val & HP100_MALLOW_ACCDENIED )
+ printk("hp100: %s: HUB access denied.\n", dev->name);
+ if ( val & HP100_MALLOW_CONFIGURE )
+ printk("hp100: %s: MAC Configuration is incompatible with the Network.\n", dev->name);
+ if ( val & HP100_MALLOW_DUPADDR )
+ printk("hp100: %s: Duplicate MAC Address on the Network.\n", dev->name);
+ }
+ }
+
+ /* If we have put the chip into forced 100 Mbit mode earlier, go back */
+ /* to auto-select mode */
+
+ if( (lp->chip==HP100_CHIPID_LASSEN)&&(startst & HP100_LINK_CABLE_ST) )
+ {
+ hp100_page( MAC_CTRL );
+ hp100_orb( HP100_DOT3_MAC, 10_LAN_CFG_2 );
+ }
+
+ val=hp100_inb(VG_LAN_CFG_1);
+
+ /* Clear the MISC_ERROR Interrupt, which might be generated when doing the relogin */
+ hp100_page(PERFORMANCE);
+ hp100_outw( HP100_MISC_ERROR, IRQ_STATUS);
+
+ if (val&HP100_LINK_UP_ST)
+ return(0); /* login was ok */
+ else
+ {
+ printk("hp100: %s: Training failed.\n", dev->name);
+ hp100_down_vg_link( dev );
+ return -EIO;
+ }
+ }
+ /* no forced relogin & already link there->no training. */
+ return -EIO;
+}
+
+
+static void hp100_cascade_reset( struct device *dev, u_short enable )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ int i;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4226, TRACE );
+ printk("hp100: %s: cascade_reset\n", dev->name);
+#endif
+
+ if (enable==TRUE)
+ {
+ hp100_outw( HP100_HW_RST | HP100_RESET_LB, OPTION_LSW );
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ {
+ /* Lassen requires a PCI transmit fifo reset */
+ hp100_page( HW_MAP );
+ hp100_andb( ~HP100_PCI_RESET, PCICTRL2 );
+ hp100_orb( HP100_PCI_RESET, PCICTRL2 );
+ /* Wait for min. 300 ns */
+ /* we cant use jiffies here, because it may be */
+ /* that we have disabled the timer... */
+ for (i=0; i<0xffff; i++);
+ hp100_andb( ~HP100_PCI_RESET, PCICTRL2 );
+ hp100_page( PERFORMANCE );
+ }
+ }
+ else
+ { /* bring out of reset */
+ hp100_outw(HP100_HW_RST|HP100_SET_LB, OPTION_LSW);
+ for (i=0; i<0xffff; i++ );
+ hp100_page(PERFORMANCE);
+ }
+}
+
+#ifdef HP100_DEBUG
+void hp100_RegisterDump( struct device *dev )
+{
+ int ioaddr=dev->base_addr;
+ int Page;
+ int Register;
+
+ /* Dump common registers */
+ printk("hp100: %s: Cascade Register Dump\n", dev->name);
+ printk("hardware id #1: 0x%.2x\n",hp100_inb(HW_ID));
+ printk("hardware id #2/paging: 0x%.2x\n",hp100_inb(PAGING));
+ printk("option #1: 0x%.4x\n",hp100_inw(OPTION_LSW));
+ printk("option #2: 0x%.4x\n",hp100_inw(OPTION_MSW));
+
+ /* Dump paged registers */
+ for (Page = 0; Page < 8; Page++)
+ {
+ /* Dump registers */
+ printk("page: 0x%.2x\n",Page);
+ outw( Page, ioaddr+0x02);
+ for (Register = 0x8; Register < 0x22; Register += 2)
+ {
+ /* Display Register contents except data port */
+ if (((Register != 0x10) && (Register != 0x12)) || (Page > 0))
+ {
+ printk("0x%.2x = 0x%.4x\n",Register,inw(ioaddr+Register));
+ }
+ }
+ }
+ hp100_page(PERFORMANCE);
+}
+#endif
+
+
+
+/*
+ * module section
+ */
+
+#ifdef MODULE
+
+/* Parameters set by insmod */
+int hp100_port[5] = { 0, -1, -1, -1, -1 };
+#ifdef LINUX_2_1
+MODULE_PARM(hp100_port, "1-5i");
+#endif
+
+#ifdef LINUX_2_1
+char hp100_name[5][IFNAMSIZ] = { "", "", "", "", "" };
+MODULE_PARM(hp100_name, "1-5c" __MODULE_STRING(IFNAMSIZ));
+#else
+static char devname[5][IFNAMSIZ] = { "", "", "", "", "" };
+static char *hp100_name[5] = { devname[0], devname[1],
+ devname[2], devname[3],
+ devname[4] };
+#endif
+
+/* List of devices */
+static struct device *hp100_devlist[5] = { NULL, NULL, NULL, NULL, NULL };
+
+/*
+ * Note: if you have more than five 100vg cards in your pc, feel free to
+ * increase this value
+ */
+
+/*
+ * Note: to register three eisa or pci devices, use:
+ * option hp100 hp100_port=0,0,0
+ * to register one card at io 0x280 as eth239, use:
+ * option hp100 hp100_port=0x280 hp100_name=eth239
+ */
+
+int init_module( void )
+{
+ int i, cards;
+
+ if (hp100_port == 0 && !EISA_bus && !pcibios_present())
+ printk("hp100: You should not use auto-probing with insmod!\n");
+
+ /* Loop on all possible base addresses */
+ i = -1; cards = 0;
+ while((hp100_port[++i] != -1) && (i < 5))
+ {
+ /* Create device and set basics args */
+ hp100_devlist[i] = kmalloc(sizeof(struct device), GFP_KERNEL);
+ memset(hp100_devlist[i], 0x00, sizeof(struct device));
+ hp100_devlist[i]->name = hp100_name[i];
+ hp100_devlist[i]->base_addr = hp100_port[i];
+ hp100_devlist[i]->init = &hp100_probe;
+
+ /* Try to create the device */
+ if(register_netdev(hp100_devlist[i]) != 0)
+ {
+ /* DeAllocate everything */
+ /* Note: if dev->priv is mallocated, there is no way to fail */
+ kfree_s(hp100_devlist[i], sizeof(struct device));
+ hp100_devlist[i] = (struct device *) NULL;
+ }
+ else
+ cards++;
+ } /* Loop over all devices */
+
+ return cards > 0 ? 0 : -ENODEV;
+}
+
+void cleanup_module( void )
+{
+ int i;
+
+ /* TODO: Check if all skb's are released/freed. */
+ for(i = 0; i < 5; i++)
+ if(hp100_devlist[i] != (struct device *) NULL)
+ {
+ unregister_netdev( hp100_devlist[i] );
+ release_region( hp100_devlist[i]->base_addr, HP100_REGION_SIZE );
+ if( ((struct hp100_private *)hp100_devlist[i]->priv)->mode==1 ) /* busmaster */
+ kfree_s( ((struct hp100_private *)hp100_devlist[i]->priv)->page_vaddr, MAX_RINGSIZE+0x0f);
+ if ( ((struct hp100_private *)hp100_devlist[i]->priv) -> mem_ptr_virt )
+ iounmap( ((struct hp100_private *)hp100_devlist[i]->priv) -> mem_ptr_virt );
+ kfree_s( hp100_devlist[i]->priv, sizeof( struct hp100_private ) );
+ hp100_devlist[i]->priv = NULL;
+ kfree_s(hp100_devlist[i], sizeof(struct device));
+ hp100_devlist[i] = (struct device *) NULL;
+ }
+}
+
+#endif /* MODULE */
+
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c hp100.c"
+ * c-indent-level: 2
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/net/hp100.h b/linux/src/drivers/net/hp100.h
new file mode 100644
index 0000000..e1884aa
--- /dev/null
+++ b/linux/src/drivers/net/hp100.h
@@ -0,0 +1,626 @@
+/*
+ * hp100.h: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux.
+ *
+ * $Id: hp100.h,v 1.1 1999/04/26 05:52:20 tb Exp $
+ *
+ * Authors: Jaroslav Kysela, <perex@pf.jcu.cz>
+ * Siegfried Loeffler <floeff@tunix.mathematik.uni-stuttgart.de>
+ *
+ * This driver is based on the 'hpfepkt' crynwr packet driver.
+ *
+ * This source/code is public free; you can distribute it and/or modify
+ * it under terms of the GNU General Public License (published by the
+ * Free Software Foundation) either version two of this License, or any
+ * later version.
+ */
+
+/****************************************************************************
+ * Hardware Constants
+ ****************************************************************************/
+
+/*
+ * Page Identifiers
+ * (Swap Paging Register, PAGING, bits 3:0, Offset 0x02)
+ */
+
+#define HP100_PAGE_PERFORMANCE 0x0 /* Page 0 */
+#define HP100_PAGE_MAC_ADDRESS 0x1 /* Page 1 */
+#define HP100_PAGE_HW_MAP 0x2 /* Page 2 */
+#define HP100_PAGE_EEPROM_CTRL 0x3 /* Page 3 */
+#define HP100_PAGE_MAC_CTRL 0x4 /* Page 4 */
+#define HP100_PAGE_MMU_CFG 0x5 /* Page 5 */
+#define HP100_PAGE_ID_MAC_ADDR 0x6 /* Page 6 */
+#define HP100_PAGE_MMU_POINTER 0x7 /* Page 7 */
+
+
+/* Registers that are present on all pages */
+
+#define HP100_REG_HW_ID 0x00 /* R: (16) Unique card ID */
+#define HP100_REG_TRACE 0x00 /* W: (16) Used for debug output */
+#define HP100_REG_PAGING 0x02 /* R: (16),15:4 Card ID */
+ /* W: (16),3:0 Switch pages */
+#define HP100_REG_OPTION_LSW 0x04 /* RW: (16) Select card functions */
+#define HP100_REG_OPTION_MSW 0x06 /* RW: (16) Select card functions */
+
+/* Page 0 - Performance */
+
+#define HP100_REG_IRQ_STATUS 0x08 /* RW: (16) Which ints are pending */
+#define HP100_REG_IRQ_MASK 0x0a /* RW: (16) Select ints to allow */
+#define HP100_REG_FRAGMENT_LEN 0x0c /* W: (16)12:0 Current fragment len */
+/* Note: For 32 bit systems, fragment len and offset registers are available */
+/* at offset 0x28 and 0x2c, where they can be written as 32bit values. */
+#define HP100_REG_OFFSET 0x0e /* RW: (16)12:0 Offset to start read */
+#define HP100_REG_DATA32 0x10 /* RW: (32) I/O mode data port */
+#define HP100_REG_DATA16 0x12 /* RW: WORDs must be read from here */
+#define HP100_REG_TX_MEM_FREE 0x14 /* RD: (32) Amount of free Tx mem */
+#define HP100_REG_TX_PDA_L 0x14 /* W: (32) BM: Ptr to PDL, Low Pri */
+#define HP100_REG_TX_PDA_H 0x1c /* W: (32) BM: Ptr to PDL, High Pri */
+#define HP100_REG_RX_PKT_CNT 0x18 /* RD: (8) Rx count of pkts on card */
+#define HP100_REG_TX_PKT_CNT 0x19 /* RD: (8) Tx count of pkts on card */
+#define HP100_REG_RX_PDL 0x1a /* R: (8) BM: # rx pdl not executed */
+#define HP100_REG_TX_PDL 0x1b /* R: (8) BM: # tx pdl not executed */
+#define HP100_REG_RX_PDA 0x18 /* W: (32) BM: Up to 31 addresses */
+ /* which point to a PDL */
+#define HP100_REG_SL_EARLY 0x1c /* (32) Enhanced Slave Early Rx */
+#define HP100_REG_STAT_DROPPED 0x20 /* R (12) Dropped Packet Counter */
+#define HP100_REG_STAT_ERRORED 0x22 /* R (8) Errored Packet Counter */
+#define HP100_REG_STAT_ABORT 0x23 /* R (8) Abort Counter/OW Coll. Flag */
+#define HP100_REG_RX_RING 0x24 /* W (32) Slave: RX Ring Pointers */
+#define HP100_REG_32_FRAGMENT_LEN 0x28 /* W (13) Slave: Fragment Length Reg */
+#define HP100_REG_32_OFFSET 0x2c /* W (16) Slave: Offset Register */
+
+/* Page 1 - MAC Address/Hash Table */
+
+#define HP100_REG_MAC_ADDR 0x08 /* RW: (8) Cards MAC address */
+#define HP100_REG_HASH_BYTE0 0x10 /* RW: (8) Cards multicast filter */
+
+/* Page 2 - Hardware Mapping */
+
+#define HP100_REG_MEM_MAP_LSW 0x08 /* RW: (16) LSW of cards mem addr */
+#define HP100_REG_MEM_MAP_MSW 0x0a /* RW: (16) MSW of cards mem addr */
+#define HP100_REG_IO_MAP 0x0c /* RW: (8) Cards I/O address */
+#define HP100_REG_IRQ_CHANNEL 0x0d /* RW: (8) IRQ and edge/level int */
+#define HP100_REG_SRAM 0x0e /* RW: (8) How much RAM on card */
+#define HP100_REG_BM 0x0f /* RW: (8) Controls BM functions */
+
+/* New on Page 2 for ETR chips: */
+#define HP100_REG_MODECTRL1 0x10 /* RW: (8) Mode Control 1 */
+#define HP100_REG_MODECTRL2 0x11 /* RW: (8) Mode Control 2 */
+#define HP100_REG_PCICTRL1 0x12 /* RW: (8) PCI Cfg 1 */
+#define HP100_REG_PCICTRL2 0x13 /* RW: (8) PCI Cfg 2 */
+#define HP100_REG_PCIBUSMLAT 0x15 /* RW: (8) PCI Bus Master Latency */
+#define HP100_REG_EARLYTXCFG 0x16 /* RW: (16) Early TX Cfg/Cntrl Reg */
+#define HP100_REG_EARLYRXCFG 0x18 /* RW: (8) Early RX Cfg/Cntrl Reg */
+#define HP100_REG_ISAPNPCFG1 0x1a /* RW: (8) ISA PnP Cfg/Cntrl Reg 1 */
+#define HP100_REG_ISAPNPCFG2 0x1b /* RW: (8) ISA PnP Cfg/Cntrl Reg 2 */
+
+/* Page 3 - EEPROM/Boot ROM */
+
+#define HP100_REG_EEPROM_CTRL 0x08 /* RW: (16) Used to load EEPROM */
+#define HP100_REG_BOOTROM_CTRL 0x0a
+
+/* Page 4 - LAN Configuration (MAC_CTRL) */
+
+#define HP100_REG_10_LAN_CFG_1 0x08 /* RW: (8) Set 10M XCVR functions */
+#define HP100_REG_10_LAN_CFG_2 0x09 /* RW: (8) 10M XCVR functions */
+#define HP100_REG_VG_LAN_CFG_1 0x0a /* RW: (8) Set 100M XCVR functions */
+#define HP100_REG_VG_LAN_CFG_2 0x0b /* RW: (8) 100M LAN Training cfgregs */
+#define HP100_REG_MAC_CFG_1 0x0c /* RW: (8) Types of pkts to accept */
+#define HP100_REG_MAC_CFG_2 0x0d /* RW: (8) Misc MAC functions */
+#define HP100_REG_MAC_CFG_3 0x0e /* RW: (8) Misc MAC functions */
+#define HP100_REG_MAC_CFG_4 0x0f /* R: (8) Misc MAC states */
+#define HP100_REG_DROPPED 0x10 /* R: (16),11:0 Pkts cant fit in mem*/
+#define HP100_REG_CRC 0x12 /* R: (8) Pkts with CRC */
+#define HP100_REG_ABORT 0x13 /* R: (8) Aborted Tx pkts */
+#define HP100_REG_TRAIN_REQUEST 0x14 /* RW: (16) Endnode MAC register.*/
+#define HP100_REG_TRAIN_ALLOW 0x16 /* R: (16) Hub allowed register */
+
+/* Page 5 - MMU */
+
+#define HP100_REG_RX_MEM_STOP 0x0c /* RW: (16) End of Rx ring addr */
+#define HP100_REG_TX_MEM_STOP 0x0e /* RW: (16) End of Tx ring addr */
+#define HP100_REG_PDL_MEM_STOP 0x10 /* Not used by 802.12 devices */
+#define HP100_REG_ECB_MEM_STOP 0x14 /* I've no idea what this is */
+
+/* Page 6 - Card ID/Physical LAN Address */
+
+#define HP100_REG_BOARD_ID 0x08 /* R: (8) EISA/ISA card ID */
+#define HP100_REG_BOARD_IO_CHCK 0x0c /* R: (8) Added to ID to get FFh */
+#define HP100_REG_SOFT_MODEL 0x0d /* R: (8) Config program defined */
+#define HP100_REG_LAN_ADDR 0x10 /* R: (8) MAC addr of card */
+#define HP100_REG_LAN_ADDR_CHCK 0x16 /* R: (8) Added to addr to get FFh */
+
+/* Page 7 - MMU Current Pointers */
+
+#define HP100_REG_PTR_RXSTART 0x08 /* R: (16) Current begin of Rx ring */
+#define HP100_REG_PTR_RXEND 0x0a /* R: (16) Current end of Rx ring */
+#define HP100_REG_PTR_TXSTART 0x0c /* R: (16) Current begin of Tx ring */
+#define HP100_REG_PTR_TXEND 0x0e /* R: (16) Current end of Rx ring */
+#define HP100_REG_PTR_RPDLSTART 0x10
+#define HP100_REG_PTR_RPDLEND 0x12
+#define HP100_REG_PTR_RINGPTRS 0x14
+#define HP100_REG_PTR_MEMDEBUG 0x1a
+/* ------------------------------------------------------------------------ */
+
+
+/*
+ * Hardware ID Register I (Always available, HW_ID, Offset 0x00)
+ */
+#define HP100_HW_ID_CASCADE 0x4850 /* Identifies Cascade Chip */
+
+/*
+ * Hardware ID Register 2 & Paging Register
+ * (Always available, PAGING, Offset 0x02)
+ * Bits 15:4 are for the Chip ID
+ */
+#define HP100_CHIPID_MASK 0xFFF0
+#define HP100_CHIPID_SHASTA 0x5350 /* Not 802.12 compliant */
+ /* EISA BM/SL, MCA16/32 SL, ISA SL */
+#define HP100_CHIPID_RAINIER 0x5360 /* Not 802.12 compliant EISA BM,*/
+ /* PCI SL, MCA16/32 SL, ISA SL */
+#define HP100_CHIPID_LASSEN 0x5370 /* 802.12 compliant PCI BM, PCI SL */
+ /* LRF supported */
+
+/*
+ * Option Registers I and II
+ * (Always available, OPTION_LSW, Offset 0x04-0x05)
+ */
+#define HP100_DEBUG_EN 0x8000 /* 0:Dis., 1:Enable Debug Dump Ptr. */
+#define HP100_RX_HDR 0x4000 /* 0:Dis., 1:Enable putting pkt into */
+ /* system mem. before Rx interrupt */
+#define HP100_MMAP_DIS 0x2000 /* 0:Enable, 1:Disable mem.mapping. */
+ /* MMAP_DIS must be 0 and MEM_EN */
+ /* must be 1 for memory-mapped */
+ /* mode to be enabled */
+#define HP100_EE_EN 0x1000 /* 0:Disable,1:Enable EEPROM writing */
+#define HP100_BM_WRITE 0x0800 /* 0:Slave, 1:Bus Master for Tx data */
+#define HP100_BM_READ 0x0400 /* 0:Slave, 1:Bus Master for Rx data */
+#define HP100_TRI_INT 0x0200 /* 0:Don't, 1:Do tri-state the int */
+#define HP100_MEM_EN 0x0040 /* Config program set this to */
+ /* 0:Disable, 1:Enable mem map. */
+ /* See MMAP_DIS. */
+#define HP100_IO_EN 0x0020 /* 1:Enable I/O transfers */
+#define HP100_BOOT_EN 0x0010 /* 1:Enable boot ROM access */
+#define HP100_FAKE_INT 0x0008 /* 1:int */
+#define HP100_INT_EN 0x0004 /* 1:Enable ints from card */
+#define HP100_HW_RST 0x0002 /* 0:Reset, 1:Out of reset */
+ /* NIC reset on 0 to 1 transition */
+
+/*
+ * Option Register III
+ * (Always available, OPTION_MSW, Offset 0x06)
+ */
+#define HP100_PRIORITY_TX 0x0080 /* 1:Do all Tx pkts as priority */
+#define HP100_EE_LOAD 0x0040 /* 1:EEPROM loading, 0 when done */
+#define HP100_ADV_NXT_PKT 0x0004 /* 1:Advance to next pkt in Rx queue */
+ /* h/w will set to 0 when done */
+#define HP100_TX_CMD 0x0002 /* 1:Tell h/w download done, h/w */
+ /* will set to 0 when done */
+
+/*
+ * Interrupt Status Registers I and II
+ * (Page PERFORMANCE, IRQ_STATUS, Offset 0x08-0x09)
+ * Note: With old chips, these Registers will clear when 1 is written to them
+ * with new chips this depends on setting of CLR_ISMODE
+ */
+#define HP100_RX_EARLY_INT 0x2000
+#define HP100_RX_PDA_ZERO 0x1000
+#define HP100_RX_PDL_FILL_COMPL 0x0800
+#define HP100_RX_PACKET 0x0400 /* 0:No, 1:Yes pkt has been Rx */
+#define HP100_RX_ERROR 0x0200 /* 0:No, 1:Yes Rx pkt had error */
+#define HP100_TX_PDA_ZERO 0x0020 /* 1 when PDA count goes to zero */
+#define HP100_TX_SPACE_AVAIL 0x0010 /* 0:<8192, 1:>=8192 Tx free bytes */
+#define HP100_TX_COMPLETE 0x0008 /* 0:No, 1:Yes a Tx has completed */
+#define HP100_MISC_ERROR 0x0004 /* 0:No, 1:Lan Link down or bus error*/
+#define HP100_TX_ERROR 0x0002 /* 0:No, 1:Yes Tx pkt had error */
+
+/*
+ * Xmit Memory Free Count
+ * (Page PERFORMANCE, TX_MEM_FREE, Offset 0x14) (Read only, 32bit)
+ */
+#define HP100_AUTO_COMPARE 0x80000000 /* Tx Space avail & pkts<255 */
+#define HP100_FREE_SPACE 0x7fffffe0 /* Tx free memory */
+
+/*
+ * IRQ Channel
+ * (Page HW_MAP, IRQ_CHANNEL, Offset 0x0d)
+ */
+#define HP100_ZERO_WAIT_EN 0x80 /* 0:No, 1:Yes asserts NOWS signal */
+#define HP100_IRQ_SCRAMBLE 0x40
+#define HP100_BOND_HP 0x20
+#define HP100_LEVEL_IRQ 0x10 /* 0:Edge, 1:Level type interrupts. */
+ /* (Only valid on EISA cards) */
+#define HP100_IRQMASK 0x0F /* Isolate the IRQ bits */
+
+/*
+ * SRAM Parameters
+ * (Page HW_MAP, SRAM, Offset 0x0e)
+ */
+#define HP100_RAM_SIZE_MASK 0xe0 /* AND to get SRAM size index */
+#define HP100_RAM_SIZE_SHIFT 0x05 /* Shift count(put index in lwr bits)*/
+
+/*
+ * Bus Master Register
+ * (Page HW_MAP, BM, Offset 0x0f)
+ */
+#define HP100_BM_BURST_RD 0x01 /* EISA only: 1=Use burst trans. fm system */
+ /* memory to chip (tx) */
+#define HP100_BM_BURST_WR 0x02 /* EISA only: 1=Use burst trans. fm system */
+ /* memory to chip (rx) */
+#define HP100_BM_MASTER 0x04 /* 0:Slave, 1:BM mode */
+#define HP100_BM_PAGE_CK 0x08 /* This bit should be set whenever in*/
+ /* an EISA system */
+#define HP100_BM_PCI_8CLK 0x40 /* ... cycles 8 clocks apart */
+
+
+/*
+ * Mode Control Register I
+ * (Page HW_MAP, MODECTRL1, Offset0x10)
+ */
+#define HP100_TX_DUALQ 0x10
+ /* If set and BM -> dual tx pda queues*/
+#define HP100_ISR_CLRMODE 0x02 /* If set ISR will clear all pending */
+ /* interrupts on read (etr only?) */
+#define HP100_EE_NOLOAD 0x04 /* Status whether res will be loaded */
+ /* from the eeprom */
+#define HP100_TX_CNT_FLG 0x08 /* Controls Early TX Reg Cnt Field */
+#define HP100_PDL_USE3 0x10 /* If set BM engine will read only */
+ /* first three data elements of a PDL */
+ /* on the first access. */
+#define HP100_BUSTYPE_MASK 0xe0 /* Three bit bus type info */
+
+/*
+ * Mode Control Register II
+ * (Page HW_MAP, MODECTRL2, Offset0x11)
+ */
+#define HP100_EE_MASK 0x0f /* Tell EEPROM circuit not to load */
+ /* certain resources */
+#define HP100_DIS_CANCEL 0x20 /* For tx dualq mode operation */
+#define HP100_EN_PDL_WB 0x40 /* 1: Status of PDL completion may be */
+ /* written back to system mem */
+#define HP100_EN_BUS_FAIL 0x80 /* Enables bus-fail portion of misc */
+ /* interrupt */
+
+/*
+ * PCI Configuration and Control Register I
+ * (Page HW_MAP, PCICTRL1, Offset 0x12)
+ */
+#define HP100_LO_MEM 0x01 /* 1: Mapped Mem requested below 1MB */
+#define HP100_NO_MEM 0x02 /* 1: Disables Req for sysmem to PCI */
+ /* bios */
+#define HP100_USE_ISA 0x04 /* 1: isa type decodes will occur */
+ /* simultaneously with PCI decodes */
+#define HP100_IRQ_HI_MASK 0xf0 /* pgmed by pci bios */
+#define HP100_PCI_IRQ_HI_MASK 0x78 /* Isolate 4 bits for PCI IRQ */
+
+/*
+ * PCI Configuration and Control Register II
+ * (Page HW_MAP, PCICTRL2, Offset 0x13)
+ */
+#define HP100_RD_LINE_PDL 0x01 /* 1: PCI command Memory Read Line en */
+#define HP100_RD_TX_DATA_MASK 0x06 /* choose PCI memread cmds for TX */
+#define HP100_MWI 0x08 /* 1: en. PCI memory write invalidate */
+#define HP100_ARB_MODE 0x10 /* Select PCI arbitor type */
+#define HP100_STOP_EN 0x20 /* Enables PCI state machine to issue */
+ /* pci stop if cascade not ready */
+#define HP100_IGNORE_PAR 0x40 /* 1: PCI state machine ignores parity*/
+#define HP100_PCI_RESET 0x80 /* 0->1: Reset PCI block */
+
+/*
+ * Early TX Configuration and Control Register
+ * (Page HW_MAP, EARLYTXCFG, Offset 0x16)
+ */
+#define HP100_EN_EARLY_TX 0x8000 /* 1=Enable Early TX */
+#define HP100_EN_ADAPTIVE 0x4000 /* 1=Enable adaptive mode */
+#define HP100_EN_TX_UR_IRQ 0x2000 /* reserved, must be 0 */
+#define HP100_EN_LOW_TX 0x1000 /* reserved, must be 0 */
+#define HP100_ET_CNT_MASK 0x0fff /* bits 11..0: ET counters */
+
+/*
+ * Early RX Configuration and Control Register
+ * (Page HW_MAP, EARLYRXCFG, Offset 0x18)
+ */
+#define HP100_EN_EARLY_RX 0x80 /* 1=Enable Early RX */
+#define HP100_EN_LOW_RX 0x40 /* reserved, must be 0 */
+#define HP100_RX_TRIP_MASK 0x1f /* bits 4..0: threshold at which the
+ * early rx circuit will start the
+ * dma of received packet into system
+ * memory for BM */
+
+/*
+ * Serial Devices Control Register
+ * (Page EEPROM_CTRL, EEPROM_CTRL, Offset 0x08)
+ */
+#define HP100_EEPROM_LOAD 0x0001 /* 0->1 loads EEPROM into registers. */
+ /* When it goes back to 0, load is */
+ /* complete. This should take ~600us.*/
+
+/*
+ * 10MB LAN Control and Configuration Register I
+ * (Page MAC_CTRL, 10_LAN_CFG_1, Offset 0x08)
+ */
+#define HP100_MAC10_SEL 0xc0 /* Get bits to indicate MAC */
+#define HP100_AUI_SEL 0x20 /* Status of AUI selection */
+#define HP100_LOW_TH 0x10 /* 0:No, 1:Yes allow better cabling */
+#define HP100_LINK_BEAT_DIS 0x08 /* 0:Enable, 1:Disable link beat */
+#define HP100_LINK_BEAT_ST 0x04 /* 0:No, 1:Yes link beat being Rx */
+#define HP100_R_ROL_ST 0x02 /* 0:No, 1:Yes Rx twisted pair has */
+ /* been reversed */
+#define HP100_AUI_ST 0x01 /* 0:No, 1:Yes use AUI on TP card */
+
+/*
+ * 10 MB LAN Control and Configuration Register II
+ * (Page MAC_CTRL, 10_LAN_CFG_2, Offset 0x09)
+ */
+#define HP100_SQU_ST 0x01 /* 0:No, 1:Yes collision signal sent */
+ /* after Tx.Only used for AUI. */
+#define HP100_FULLDUP 0x02 /* 1: LXT901 XCVR fullduplx enabled */
+#define HP100_DOT3_MAC 0x04 /* 1: DOT 3 Mac sel. unless Autosel */
+
+/*
+ * MAC Selection, use with MAC10_SEL bits
+ */
+#define HP100_AUTO_SEL_10 0x0 /* Auto select */
+#define HP100_XCVR_LXT901_10 0x1 /* LXT901 10BaseT transceiver */
+#define HP100_XCVR_7213 0x2 /* 7213 transceiver */
+#define HP100_XCVR_82503 0x3 /* 82503 transceiver */
+
+/*
+ * 100MB LAN Training Register
+ * (Page MAC_CTRL, VG_LAN_CFG_2, Offset 0x0b) (old, pre 802.12)
+ */
+#define HP100_FRAME_FORMAT 0x08 /* 0:802.3, 1:802.5 frames */
+#define HP100_BRIDGE 0x04 /* 0:No, 1:Yes tell hub i am a bridge */
+#define HP100_PROM_MODE 0x02 /* 0:No, 1:Yes tell hub card is */
+ /* promiscuous */
+#define HP100_REPEATER 0x01 /* 0:No, 1:Yes tell hub MAC wants to */
+ /* be a cascaded repeater */
+
+/*
+ * 100MB LAN Control and Configuration Register
+ * (Page MAC_CTRL, VG_LAN_CFG_1, Offset 0x0a)
+ */
+#define HP100_VG_SEL 0x80 /* 0:No, 1:Yes use 100 Mbit MAC */
+#define HP100_LINK_UP_ST 0x40 /* 0:No, 1:Yes endnode logged in */
+#define HP100_LINK_CABLE_ST 0x20 /* 0:No, 1:Yes cable can hear tones */
+ /* from hub */
+#define HP100_LOAD_ADDR 0x10 /* 0->1 card addr will be sent */
+ /* 100ms later the link status */
+ /* bits are valid */
+#define HP100_LINK_CMD 0x08 /* 0->1 link will attempt to log in. */
+ /* 100ms later the link status */
+ /* bits are valid */
+#define HP100_TRN_DONE 0x04 /* NEW ETR-Chips only: Will be reset */
+ /* after LinkUp Cmd is given and set */
+ /* when training has completed. */
+#define HP100_LINK_GOOD_ST 0x02 /* 0:No, 1:Yes cable passed training */
+#define HP100_VG_RESET 0x01 /* 0:Yes, 1:No reset the 100VG MAC */
+
+
+/*
+ * MAC Configuration Register I
+ * (Page MAC_CTRL, MAC_CFG_1, Offset 0x0c)
+ */
+#define HP100_RX_IDLE 0x80 /* 0:Yes, 1:No currently receiving pkts */
+#define HP100_TX_IDLE 0x40 /* 0:Yes, 1:No currently Txing pkts */
+#define HP100_RX_EN 0x20 /* 1: allow receiving of pkts */
+#define HP100_TX_EN 0x10 /* 1: allow transmitting of pkts */
+#define HP100_ACC_ERRORED 0x08 /* 0:No, 1:Yes allow Rx of errored pkts */
+#define HP100_ACC_MC 0x04 /* 0:No, 1:Yes allow Rx of multicast pkts */
+#define HP100_ACC_BC 0x02 /* 0:No, 1:Yes allow Rx of broadcast pkts */
+#define HP100_ACC_PHY 0x01 /* 0:No, 1:Yes allow Rx of ALL phys. pkts */
+#define HP100_MAC1MODEMASK 0xf0 /* Hide ACC bits */
+#define HP100_MAC1MODE1 0x00 /* Receive nothing, must also disable RX */
+#define HP100_MAC1MODE2 0x00
+#define HP100_MAC1MODE3 HP100_MAC1MODE2 | HP100_ACC_BC
+#define HP100_MAC1MODE4 HP100_MAC1MODE3 | HP100_ACC_MC
+#define HP100_MAC1MODE5 HP100_MAC1MODE4 /* set mc hash to all ones also */
+#define HP100_MAC1MODE6 HP100_MAC1MODE5 | HP100_ACC_PHY /* Promiscuous */
+/* Note MODE6 will receive all GOOD packets on the LAN. This really needs
+ a mode 7 defined to be LAN Analyzer mode, which will receive errored and
+ runt packets, and keep the CRC bytes. */
+#define HP100_MAC1MODE7 HP100_MAC1MODE6 | HP100_ACC_ERRORED
+
+/*
+ * MAC Configuration Register II
+ * (Page MAC_CTRL, MAC_CFG_2, Offset 0x0d)
+ */
+#define HP100_TR_MODE 0x80 /* 0:No, 1:Yes support Token Ring formats */
+#define HP100_TX_SAME 0x40 /* 0:No, 1:Yes Tx same packet continuous */
+#define HP100_LBK_XCVR 0x20 /* 0:No, 1:Yes loopback through MAC & */
+ /* transceiver */
+#define HP100_LBK_MAC 0x10 /* 0:No, 1:Yes loopback through MAC */
+#define HP100_CRC_I 0x08 /* 0:No, 1:Yes inhibit CRC on Tx packets */
+#define HP100_ACCNA 0x04 /* 1: For 802.5: Accept only token ring
+ * group addr that maches NA mask */
+#define HP100_KEEP_CRC 0x02 /* 0:No, 1:Yes keep CRC on Rx packets. */
+ /* The length will reflect this. */
+#define HP100_ACCFA 0x01 /* 1: For 802.5: Accept only functional
+ * addrs that match FA mask (page1) */
+#define HP100_MAC2MODEMASK 0x02
+#define HP100_MAC2MODE1 0x00
+#define HP100_MAC2MODE2 0x00
+#define HP100_MAC2MODE3 0x00
+#define HP100_MAC2MODE4 0x00
+#define HP100_MAC2MODE5 0x00
+#define HP100_MAC2MODE6 0x00
+#define HP100_MAC2MODE7 KEEP_CRC
+
+/*
+ * MAC Configuration Register III
+ * (Page MAC_CTRL, MAC_CFG_3, Offset 0x0e)
+ */
+#define HP100_PACKET_PACE 0x03 /* Packet Pacing:
+ * 00: No packet pacing
+ * 01: 8 to 16 uS delay
+ * 10: 16 to 32 uS delay
+ * 11: 32 to 64 uS delay
+ */
+#define HP100_LRF_EN 0x04 /* 1: External LAN Rcv Filter and
+ * TCP/IP Checksumming enabled. */
+#define HP100_AUTO_MODE 0x10 /* 1: AutoSelect between 10/100 */
+
+/*
+ * MAC Configuration Register IV
+ * (Page MAC_CTRL, MAC_CFG_4, Offset 0x0f)
+ */
+#define HP100_MAC_SEL_ST 0x01 /* (R): Status of external VGSEL
+ * Signal, 1=100VG, 0=10Mbit sel. */
+#define HP100_LINK_FAIL_ST 0x02 /* (R): Status of Link Fail portion
+ * of the Misc. Interrupt */
+
+/*
+ * 100 MB LAN Training Request/Allowed Registers
+ * (Page MAC_CTRL, TRAIN_REQUEST and TRAIN_ALLOW, Offset 0x14-0x16)(ETR parts only)
+ */
+#define HP100_MACRQ_REPEATER 0x0001 /* 1: MAC tells HUB it wants to be
+ * a cascaded repeater
+ * 0: ... wants to be a DTE */
+#define HP100_MACRQ_PROMSC 0x0006 /* 2 bits: Promiscious mode
+ * 00: Rcv only unicast packets
+ * specifically addr to this
+ * endnode
+ * 10: Rcv all pckts fwded by
+ * the local repeater */
+#define HP100_MACRQ_FRAMEFMT_EITHER 0x0018 /* 11: either format allowed */
+#define HP100_MACRQ_FRAMEFMT_802_3 0x0000 /* 00: 802.3 is requested */
+#define HP100_MACRQ_FRAMEFMT_802_5 0x0010 /* 10: 802.5 format is requested */
+#define HP100_CARD_MACVER 0xe000 /* R: 3 bit Cards 100VG MAC version */
+#define HP100_MALLOW_REPEATER 0x0001 /* If reset, requested access as an
+ * end node is allowed */
+#define HP100_MALLOW_PROMSC 0x0004 /* 2 bits: Promiscious mode
+ * 00: Rcv only unicast packets
+ * specifically addr to this
+ * endnode
+ * 10: Rcv all pckts fwded by
+ * the local repeater */
+#define HP100_MALLOW_FRAMEFMT 0x00e0 /* 2 bits: Frame Format
+ * 00: 802.3 format will be used
+ * 10: 802.5 format will be used */
+#define HP100_MALLOW_ACCDENIED 0x0400 /* N bit */
+#define HP100_MALLOW_CONFIGURE 0x0f00 /* C bit */
+#define HP100_MALLOW_DUPADDR 0x1000 /* D bit */
+#define HP100_HUB_MACVER 0xe000 /* R: 3 bit 802.12 MAC/RMAC training */
+ /* protocol of repeater */
+
+/* ****************************************************************************** */
+
+/*
+ * Set/Reset bits
+ */
+#define HP100_SET_HB 0x0100 /* 0:Set fields to 0 whose mask is 1 */
+#define HP100_SET_LB 0x0001 /* HB sets upper byte, LB sets lower byte */
+#define HP100_RESET_HB 0x0000 /* For readability when resetting bits */
+#define HP100_RESET_LB 0x0000 /* For readability when resetting bits */
+
+/*
+ * Misc. Constants
+ */
+#define HP100_LAN_100 100 /* lan_type value for VG */
+#define HP100_LAN_10 10 /* lan_type value for 10BaseT */
+#define HP100_LAN_ERR (-1) /* lan_type value for link down */
+
+#define TRUE 1
+#define FALSE 0
+
+
+/*
+ * Bus Master Data Structures ----------------------------------------------
+ */
+
+#define MAX_RX_PDL 30 /* Card limit = 31 */
+#define MAX_RX_FRAG 2 /* Don't need more... */
+#define MAX_TX_PDL 29
+#define MAX_TX_FRAG 2 /* Limit = 31 */
+
+/* Define total PDL area size in bytes (should be 4096) */
+/* This is the size of kernel (dma) memory that will be allocated. */
+#define MAX_RINGSIZE ((MAX_RX_FRAG*8+4+4)*MAX_RX_PDL+(MAX_TX_FRAG*8+4+4)*MAX_TX_PDL)+16
+
+/* Ethernet Packet Sizes */
+#define MIN_ETHER_SIZE 60
+#define MAX_ETHER_SIZE 1514 /* Needed for preallocation of */
+ /* skb buffer when busmastering */
+
+/* Tx or Rx Ring Entry */
+typedef struct hp100_ring {
+ u_int *pdl; /* Address of PDLs PDH, dword before
+ * this address is used for rx hdr */
+ u_int pdl_paddr; /* Physical address of PDL */
+ struct sk_buff *skb;
+ struct hp100_ring *next;
+} hp100_ring_t;
+
+
+
+/* Mask for Header Descriptor */
+#define HP100_PKT_LEN_MASK 0x1FFF /* AND with RxLength to get length */
+
+
+/* Receive Packet Status. Note, the error bits are only valid if ACC_ERRORED
+ bit in the MAC Configuration Register 1 is set. */
+#define HP100_RX_PRI 0x8000 /* 0:No, 1:Yes packet is priority */
+#define HP100_SDF_ERR 0x4000 /* 0:No, 1:Yes start of frame error */
+#define HP100_SKEW_ERR 0x2000 /* 0:No, 1:Yes skew out of range */
+#define HP100_BAD_SYMBOL_ERR 0x1000 /* 0:No, 1:Yes invalid symbol received */
+#define HP100_RCV_IPM_ERR 0x0800 /* 0:No, 1:Yes pkt had an invalid packet */
+ /* marker */
+#define HP100_SYMBOL_BAL_ERR 0x0400 /* 0:No, 1:Yes symbol balance error */
+#define HP100_VG_ALN_ERR 0x0200 /* 0:No, 1:Yes non-octet received */
+#define HP100_TRUNC_ERR 0x0100 /* 0:No, 1:Yes the packet was truncated */
+#define HP100_RUNT_ERR 0x0040 /* 0:No, 1:Yes pkt length < Min Pkt */
+ /* Length Reg. */
+#define HP100_ALN_ERR 0x0010 /* 0:No, 1:Yes align error. */
+#define HP100_CRC_ERR 0x0008 /* 0:No, 1:Yes CRC occurred. */
+
+/* The last three bits indicate the type of destination address */
+
+#define HP100_MULTI_ADDR_HASH 0x0006 /* 110: Addr multicast, matched hash */
+#define HP100_BROADCAST_ADDR 0x0003 /* x11: Addr broadcast */
+#define HP100_MULTI_ADDR_NO_HASH 0x0002 /* 010: Addr multicast, didn't match hash */
+#define HP100_PHYS_ADDR_MATCH 0x0001 /* x01: Addr was physical and mine */
+#define HP100_PHYS_ADDR_NO_MATCH 0x0000 /* x00: Addr was physical but not mine */
+
+/*
+ * macros
+ */
+
+#define hp100_inb( reg ) \
+ inb( ioaddr + HP100_REG_##reg )
+#define hp100_inw( reg ) \
+ inw( ioaddr + HP100_REG_##reg )
+#define hp100_inl( reg ) \
+ inl( ioaddr + HP100_REG_##reg )
+#define hp100_outb( data, reg ) \
+ outb( data, ioaddr + HP100_REG_##reg )
+#define hp100_outw( data, reg ) \
+ outw( data, ioaddr + HP100_REG_##reg )
+#define hp100_outl( data, reg ) \
+ outl( data, ioaddr + HP100_REG_##reg )
+#define hp100_orb( data, reg ) \
+ outb( inb( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_orw( data, reg ) \
+ outw( inw( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_andb( data, reg ) \
+ outb( inb( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+#define hp100_andw( data, reg ) \
+ outw( inw( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+
+#define hp100_page( page ) \
+ outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING )
+#define hp100_ints_off() \
+ outw( HP100_INT_EN | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_ints_on() \
+ outw( HP100_INT_EN | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_enable() \
+ outw( HP100_MMAP_DIS | HP100_RESET_HB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_disable() \
+ outw( HP100_MMAP_DIS | HP100_SET_HB, ioaddr + HP100_REG_OPTION_LSW )
+
+
+/*
+ * Local variables:
+ * c-indent-level: 2
+ * tab-width: 8
+ * End:
+*/
diff --git a/linux/src/drivers/net/i82586.h b/linux/src/drivers/net/i82586.h
new file mode 100644
index 0000000..d41702e
--- /dev/null
+++ b/linux/src/drivers/net/i82586.h
@@ -0,0 +1,413 @@
+/*
+ * Intel 82586 IEEE 802.3 Ethernet LAN Coprocessor.
+ *
+ * See:
+ * Intel Microcommunications 1991
+ * p1-1 to p1-37
+ * Intel order No. 231658
+ * ISBN 1-55512-119-5
+ *
+ * Unfortunately, the above chapter mentions neither
+ * the System Configuration Pointer (SCP) nor the
+ * Intermediate System Configuration Pointer (ISCP),
+ * so we probably need to look elsewhere for the
+ * whole story -- some recommend the "Intel LAN
+ * Components manual" but I have neither a copy
+ * nor a full reference. But "elsewhere" may be
+ * in the same publication...
+ * The description of a later device, the
+ * "82596CA High-Performance 32-Bit Local Area Network
+ * Coprocessor", (ibid. p1-38 to p1-109) does mention
+ * the SCP and ISCP and also has an i82586 compatibility
+ * mode. Even more useful is "AP-235 An 82586 Data Link
+ * Driver" (ibid. p1-337 to p1-417).
+ */
+
+#define I82586_MEMZ (64 * 1024)
+
+#define I82586_SCP_ADDR (I82586_MEMZ - sizeof(scp_t))
+
+#define ADDR_LEN 6
+#define I82586NULL 0xFFFF
+
+#define toff(t,p,f) (unsigned short)((void *)(&((t *)((void *)0 + (p)))->f) - (void *)0)
+
+/*
+ * System Configuration Pointer (SCP).
+ */
+typedef struct scp_t scp_t;
+struct scp_t
+{
+ unsigned short scp_sysbus; /* 82586 bus width: */
+#define SCP_SY_16BBUS (0x0 << 0) /* 16 bits */
+#define SCP_SY_8BBUS (0x1 << 0) /* 8 bits. */
+ unsigned short scp_junk[2]; /* Unused */
+ unsigned short scp_iscpl; /* lower 16 bits of ISCP_ADDR */
+ unsigned short scp_iscph; /* upper 16 bits of ISCP_ADDR */
+};
+
+/*
+ * Intermediate System Configuration Pointer (ISCP).
+ */
+typedef struct iscp_t iscp_t;
+struct iscp_t
+{
+ unsigned short iscp_busy; /* set by CPU before first CA, */
+ /* cleared by 82586 after read. */
+ unsigned short iscp_offset; /* offset of SCB */
+ unsigned short iscp_basel; /* base of SCB */
+ unsigned short iscp_baseh; /* " */
+};
+
+/*
+ * System Control Block (SCB).
+ * The 82586 writes its status to scb_status and then
+ * raises an interrupt to alert the CPU.
+ * The CPU writes a command to scb_command and
+ * then issues a Channel Attention (CA) to alert the 82586.
+ */
+typedef struct scb_t scb_t;
+struct scb_t
+{
+ unsigned short scb_status; /* Status of 82586 */
+#define SCB_ST_INT (0xF << 12) /* Some of: */
+#define SCB_ST_CX (0x1 << 15) /* Cmd completed */
+#define SCB_ST_FR (0x1 << 14) /* Frame received */
+#define SCB_ST_CNA (0x1 << 13) /* Cmd unit not active */
+#define SCB_ST_RNR (0x1 << 12) /* Rcv unit not ready */
+#define SCB_ST_JUNK0 (0x1 << 11) /* 0 */
+#define SCB_ST_CUS (0x7 << 8) /* Cmd unit status */
+#define SCB_ST_CUS_IDLE (0 << 8) /* Idle */
+#define SCB_ST_CUS_SUSP (1 << 8) /* Suspended */
+#define SCB_ST_CUS_ACTV (2 << 8) /* Active */
+#define SCB_ST_JUNK1 (0x1 << 7) /* 0 */
+#define SCB_ST_RUS (0x7 << 4) /* Rcv unit status */
+#define SCB_ST_RUS_IDLE (0 << 4) /* Idle */
+#define SCB_ST_RUS_SUSP (1 << 4) /* Suspended */
+#define SCB_ST_RUS_NRES (2 << 4) /* No resources */
+#define SCB_ST_RUS_RDY (4 << 4) /* Ready */
+ unsigned short scb_command; /* Next command */
+#define SCB_CMD_ACK_CX (0x1 << 15) /* Ack cmd completion */
+#define SCB_CMD_ACK_FR (0x1 << 14) /* Ack frame received */
+#define SCB_CMD_ACK_CNA (0x1 << 13) /* Ack CU not active */
+#define SCB_CMD_ACK_RNR (0x1 << 12) /* Ack RU not ready */
+#define SCB_CMD_JUNKX (0x1 << 11) /* Unused */
+#define SCB_CMD_CUC (0x7 << 8) /* Command Unit command */
+#define SCB_CMD_CUC_NOP (0 << 8) /* Nop */
+#define SCB_CMD_CUC_GO (1 << 8) /* Start cbl_offset */
+#define SCB_CMD_CUC_RES (2 << 8) /* Resume execution */
+#define SCB_CMD_CUC_SUS (3 << 8) /* Suspend " */
+#define SCB_CMD_CUC_ABT (4 << 8) /* Abort " */
+#define SCB_CMD_RESET (0x1 << 7) /* Reset chip (hardware) */
+#define SCB_CMD_RUC (0x7 << 4) /* Receive Unit command */
+#define SCB_CMD_RUC_NOP (0 << 4) /* Nop */
+#define SCB_CMD_RUC_GO (1 << 4) /* Start rfa_offset */
+#define SCB_CMD_RUC_RES (2 << 4) /* Resume reception */
+#define SCB_CMD_RUC_SUS (3 << 4) /* Suspend " */
+#define SCB_CMD_RUC_ABT (4 << 4) /* Abort " */
+ unsigned short scb_cbl_offset; /* Offset of first command unit */
+ /* Action Command */
+ unsigned short scb_rfa_offset; /* Offset of first Receive */
+ /* Frame Descriptor in the */
+ /* Receive Frame Area */
+ unsigned short scb_crcerrs; /* Properly aligned frames */
+ /* received with a CRC error */
+ unsigned short scb_alnerrs; /* Misaligned frames received */
+ /* with a CRC error */
+ unsigned short scb_rscerrs; /* Frames lost due to no space */
+ unsigned short scb_ovrnerrs; /* Frames lost due to slow bus */
+};
+
+#define scboff(p,f) toff(scb_t, p, f)
+
+/*
+ * The eight Action Commands.
+ */
+typedef enum acmd_e acmd_e;
+enum acmd_e
+{
+ acmd_nop = 0, /* Do nothing */
+ acmd_ia_setup = 1, /* Load an (ethernet) address into the */
+ /* 82586 */
+ acmd_configure = 2, /* Update the 82586 operating parameters */
+ acmd_mc_setup = 3, /* Load a list of (ethernet) multicast */
+ /* addresses into the 82586 */
+ acmd_transmit = 4, /* Transmit a frame */
+ acmd_tdr = 5, /* Perform a Time Domain Reflectometer */
+ /* test on the serial link */
+ acmd_dump = 6, /* Copy 82586 registers to memory */
+ acmd_diagnose = 7, /* Run an internal self test */
+};
+
+/*
+ * Generic Action Command header.
+ */
+typedef struct ach_t ach_t;
+struct ach_t
+{
+ unsigned short ac_status; /* Command status: */
+#define AC_SFLD_C (0x1 << 15) /* Command completed */
+#define AC_SFLD_B (0x1 << 14) /* Busy executing */
+#define AC_SFLD_OK (0x1 << 13) /* Completed error free */
+#define AC_SFLD_A (0x1 << 12) /* Command aborted */
+#define AC_SFLD_FAIL (0x1 << 11) /* Selftest failed */
+#define AC_SFLD_S10 (0x1 << 10) /* No carrier sense */
+ /* during transmission */
+#define AC_SFLD_S9 (0x1 << 9) /* Tx unsuccessful: */
+ /* (stopped) lost CTS */
+#define AC_SFLD_S8 (0x1 << 8) /* Tx unsuccessful: */
+ /* (stopped) slow DMA */
+#define AC_SFLD_S7 (0x1 << 7) /* Tx deferred: */
+ /* other link traffic */
+#define AC_SFLD_S6 (0x1 << 6) /* Heart Beat: collision */
+ /* detect after last tx */
+#define AC_SFLD_S5 (0x1 << 5) /* Tx stopped: */
+ /* excessive collisions */
+#define AC_SFLD_MAXCOL (0xF << 0) /* Collision count */
+ unsigned short ac_command; /* Command specifier: */
+#define AC_CFLD_EL (0x1 << 15) /* End of command list */
+#define AC_CFLD_S (0x1 << 14) /* Suspend on completion */
+#define AC_CFLD_I (0x1 << 13) /* Interrupt on completion */
+#define AC_CFLD_CMD (0x7 << 0) /* acmd_e */
+ unsigned short ac_link; /* Next Action Command */
+};
+
+#define acoff(p,f) toff(ach_t, p, f)
+
+/*
+ * The Nop Action Command.
+ */
+typedef struct ac_nop_t ac_nop_t;
+struct ac_nop_t
+{
+ ach_t nop_h;
+};
+
+/*
+ * The IA-Setup Action Command.
+ */
+typedef struct ac_ias_t ac_ias_t;
+struct ac_ias_t
+{
+ ach_t ias_h;
+ unsigned char ias_addr[ADDR_LEN]; /* The (ethernet) address */
+};
+
+/*
+ * The Configure Action Command.
+ */
+typedef struct ac_cfg_t ac_cfg_t;
+struct ac_cfg_t
+{
+ ach_t cfg_h;
+ unsigned char cfg_byte_cnt; /* Size foll data: 4-12 */
+#define AC_CFG_BYTE_CNT(v) (((v) & 0xF) << 0)
+ unsigned char cfg_fifolim; /* FIFO threshold */
+#define AC_CFG_FIFOLIM(v) (((v) & 0xF) << 0)
+ unsigned char cfg_byte8;
+#define AC_CFG_SAV_BF(v) (((v) & 0x1) << 7) /* Save rxd bad frames */
+#define AC_CFG_SRDY(v) (((v) & 0x1) << 6) /* SRDY/ARDY pin means */
+ /* external sync. */
+ unsigned char cfg_byte9;
+#define AC_CFG_ELPBCK(v) (((v) & 0x1) << 7) /* External loopback */
+#define AC_CFG_ILPBCK(v) (((v) & 0x1) << 6) /* Internal loopback */
+#define AC_CFG_PRELEN(v) (((v) & 0x3) << 4) /* Preamble length */
+#define AC_CFG_PLEN_2 0 /* 2 bytes */
+#define AC_CFG_PLEN_4 1 /* 4 bytes */
+#define AC_CFG_PLEN_8 2 /* 8 bytes */
+#define AC_CFG_PLEN_16 3 /* 16 bytes */
+#define AC_CFG_ALOC(v) (((v) & 0x1) << 3) /* Addr/len data is */
+ /* explicit in buffers */
+#define AC_CFG_ADDRLEN(v) (((v) & 0x7) << 0) /* Bytes per address */
+ unsigned char cfg_byte10;
+#define AC_CFG_BOFMET(v) (((v) & 0x1) << 7) /* Use alternate expo. */
+ /* backoff method */
+#define AC_CFG_ACR(v) (((v) & 0x7) << 4) /* Accelerated cont. res. */
+#define AC_CFG_LINPRIO(v) (((v) & 0x7) << 0) /* Linear priority */
+ unsigned char cfg_ifs; /* Interframe spacing */
+ unsigned char cfg_slotl; /* Slot time (low byte) */
+ unsigned char cfg_byte13;
+#define AC_CFG_RETRYNUM(v) (((v) & 0xF) << 4) /* Max. collision retry */
+#define AC_CFG_SLTTMHI(v) (((v) & 0x7) << 0) /* Slot time (high bits) */
+ unsigned char cfg_byte14;
+#define AC_CFG_FLGPAD(v) (((v) & 0x1) << 7) /* Pad with HDLC flags */
+#define AC_CFG_BTSTF(v) (((v) & 0x1) << 6) /* Do HDLC bitstuffing */
+#define AC_CFG_CRC16(v) (((v) & 0x1) << 5) /* 16 bit CCITT CRC */
+#define AC_CFG_NCRC(v) (((v) & 0x1) << 4) /* Insert no CRC */
+#define AC_CFG_TNCRS(v) (((v) & 0x1) << 3) /* Tx even if no carrier */
+#define AC_CFG_MANCH(v) (((v) & 0x1) << 2) /* Manchester coding */
+#define AC_CFG_BCDIS(v) (((v) & 0x1) << 1) /* Disable broadcast */
+#define AC_CFG_PRM(v) (((v) & 0x1) << 0) /* Promiscuous mode */
+ unsigned char cfg_byte15;
+#define AC_CFG_ICDS(v) (((v) & 0x1) << 7) /* Internal collision */
+ /* detect source */
+#define AC_CFG_CDTF(v) (((v) & 0x7) << 4) /* Collision detect */
+ /* filter in bit times */
+#define AC_CFG_ICSS(v) (((v) & 0x1) << 3) /* Internal carrier */
+ /* sense source */
+#define AC_CFG_CSTF(v) (((v) & 0x7) << 0) /* Carrier sense */
+ /* filter in bit times */
+ unsigned short cfg_min_frm_len;
+#define AC_CFG_MNFRM(v) (((v) & 0xFF) << 0) /* Min. bytes/frame (<= 255) */
+};
+
+/*
+ * The MC-Setup Action Command.
+ */
+typedef struct ac_mcs_t ac_mcs_t;
+struct ac_mcs_t
+{
+ ach_t mcs_h;
+ unsigned short mcs_cnt; /* No. of bytes of MC addresses */
+#if 0
+ unsigned char mcs_data[ADDR_LEN]; /* The first MC address .. */
+ ...
+#endif
+};
+
+#define I82586_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
+
+/*
+ * The Transmit Action Command.
+ */
+typedef struct ac_tx_t ac_tx_t;
+struct ac_tx_t
+{
+ ach_t tx_h;
+ unsigned short tx_tbd_offset; /* Address of list of buffers. */
+#if 0
+Linux packets are passed down with the destination MAC address
+and length/type field already prepended to the data,
+so we do not need to insert it. Consistent with this
+we must also set the AC_CFG_ALOC(..) flag during the
+ac_cfg_t action command.
+ unsigned char tx_addr[ADDR_LEN]; /* The frame dest. address */
+ unsigned short tx_length; /* The frame length */
+#endif /* 0 */
+};
+
+/*
+ * The Time Domain Reflectometer Action Command.
+ */
+typedef struct ac_tdr_t ac_tdr_t;
+struct ac_tdr_t
+{
+ ach_t tdr_h;
+ unsigned short tdr_result; /* Result. */
+#define AC_TDR_LNK_OK (0x1 << 15) /* No link problem */
+#define AC_TDR_XCVR_PRB (0x1 << 14) /* Txcvr cable problem */
+#define AC_TDR_ET_OPN (0x1 << 13) /* Open on the link */
+#define AC_TDR_ET_SRT (0x1 << 12) /* Short on the link */
+#define AC_TDR_TIME (0x7FF << 0) /* Distance to problem */
+ /* site in transmit */
+ /* clock cycles */
+};
+
+/*
+ * The Dump Action Command.
+ */
+typedef struct ac_dmp_t ac_dmp_t;
+struct ac_dmp_t
+{
+ ach_t dmp_h;
+ unsigned short dmp_offset; /* Result. */
+};
+
+/*
+ * Size of the result of the dump command.
+ */
+#define DUMPBYTES 170
+
+/*
+ * The Diagnose Action Command.
+ */
+typedef struct ac_dgn_t ac_dgn_t;
+struct ac_dgn_t
+{
+ ach_t dgn_h;
+};
+
+/*
+ * Transmit Buffer Descriptor (TBD).
+ */
+typedef struct tbd_t tbd_t;
+struct tbd_t
+{
+ unsigned short tbd_status; /* Written by the CPU */
+#define TBD_STATUS_EOF (0x1 << 15) /* This TBD is the */
+ /* last for this frame */
+#define TBD_STATUS_ACNT (0x3FFF << 0) /* Actual count of data */
+ /* bytes in this buffer */
+ unsigned short tbd_next_bd_offset; /* Next in list */
+ unsigned short tbd_bufl; /* Buffer address (low) */
+ unsigned short tbd_bufh; /* " " (high) */
+};
+
+/*
+ * Receive Buffer Descriptor (RBD).
+ */
+typedef struct rbd_t rbd_t;
+struct rbd_t
+{
+ unsigned short rbd_status; /* Written by the 82586 */
+#define RBD_STATUS_EOF (0x1 << 15) /* This RBD is the */
+ /* last for this frame */
+#define RBD_STATUS_F (0x1 << 14) /* ACNT field is valid */
+#define RBD_STATUS_ACNT (0x3FFF << 0) /* Actual no. of data */
+ /* bytes in this buffer */
+ unsigned short rbd_next_rbd_offset; /* Next rbd in list */
+ unsigned short rbd_bufl; /* Data pointer (low) */
+ unsigned short rbd_bufh; /* " " (high) */
+ unsigned short rbd_el_size; /* EL+Data buf. size */
+#define RBD_EL (0x1 << 15) /* This BD is the */
+ /* last in the list */
+#define RBD_SIZE (0x3FFF << 0) /* No. of bytes the */
+ /* buffer can hold */
+};
+
+#define rbdoff(p,f) toff(rbd_t, p, f)
+
+/*
+ * Frame Descriptor (FD).
+ */
+typedef struct fd_t fd_t;
+struct fd_t
+{
+ unsigned short fd_status; /* Written by the 82586 */
+#define FD_STATUS_C (0x1 << 15) /* Completed storing frame */
+#define FD_STATUS_B (0x1 << 14) /* FD was consumed by RU */
+#define FD_STATUS_OK (0x1 << 13) /* Frame rxd successfully */
+#define FD_STATUS_S11 (0x1 << 11) /* CRC error */
+#define FD_STATUS_S10 (0x1 << 10) /* Alignment error */
+#define FD_STATUS_S9 (0x1 << 9) /* Ran out of resources */
+#define FD_STATUS_S8 (0x1 << 8) /* Rx DMA overrun */
+#define FD_STATUS_S7 (0x1 << 7) /* Frame too short */
+#define FD_STATUS_S6 (0x1 << 6) /* No EOF flag */
+ unsigned short fd_command; /* Command */
+#define FD_COMMAND_EL (0x1 << 15) /* Last FD in list */
+#define FD_COMMAND_S (0x1 << 14) /* Suspend RU after rx */
+ unsigned short fd_link_offset; /* Next FD */
+ unsigned short fd_rbd_offset; /* First RBD (data) */
+ /* Prepared by CPU, */
+ /* updated by 82586 */
+#if 0
+I think the rest is unused since we
+have set AC_CFG_ALOC(..). However, just
+in case, we leave the space.
+#endif /* 0 */
+ unsigned char fd_dest[ADDR_LEN]; /* Destination address */
+ /* Written by 82586 */
+ unsigned char fd_src[ADDR_LEN]; /* Source address */
+ /* Written by 82586 */
+ unsigned short fd_length; /* Frame length or type */
+ /* Written by 82586 */
+};
+
+#define fdoff(p,f) toff(fd_t, p, f)
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU Public License.
+ *
+ * For more details, see wavelan.c.
+ */
diff --git a/linux/src/drivers/net/intel-gige.c b/linux/src/drivers/net/intel-gige.c
new file mode 100644
index 0000000..5884ffb
--- /dev/null
+++ b/linux/src/drivers/net/intel-gige.c
@@ -0,0 +1,1450 @@
+/* intel-gige.c: A Linux device driver for Intel Gigabit Ethernet adapters. */
+/*
+ Written 2000-2002 by Donald Becker.
+ Copyright Scyld Computing Corporation.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ You should have received a copy of the GPL with this file.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/ethernet.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"intel-gige.c:v0.14 11/17/2002 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/ethernet.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: igige_probe
+config-in: tristate 'Intel PCI Gigabit Ethernet support' CONFIG_IGIGE
+
+c-help-name: Intel PCI Gigabit Ethernet support
+c-help-symbol: CONFIG_IGIGE
+c-help: This driver is for the Intel PCI Gigabit Ethernet
+c-help: adapter series.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/drivers.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip has a 16 element perfect filter, and an unusual 4096 bit
+ hash filter based directly on address bits, not the Ethernet CRC.
+ It is costly to recalculate a large, frequently changing table.
+ However even a large table may useful in some nearly-static environments.
+*/
+static int multicast_filter_limit = 15;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ The media type is passed in 'options[]'. The full_duplex[] table only
+ allows the duplex to be forced on, implicitly disabling autonegotiation.
+ Setting the entry to zero still allows a link to autonegotiate to full
+ duplex.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* The delay before announcing a Rx or Tx has completed. */
+static int rx_intr_holdoff = 0;
+static int tx_intr_holdoff = 128;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two to avoid divides.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#if ! defined(final_version) /* Stress the driver. */
+#define TX_RING_SIZE 8
+#define TX_QUEUE_LEN 5
+#define RX_RING_SIZE 4
+#else
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 32
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Intel Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to set forced full duplex (deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Intel Gigabit Ethernet adapter.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Descriptor Rings
+
+This driver uses two statically allocated fixed-size descriptor arrays
+treated as rings by the hardware. The ring sizes are set at compile time
+by RX/TX_RING_SIZE.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+When unaligned buffers are permitted by the hardware (and always on copies)
+frames are put into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control.
+One is the send-packet routine which is single-threaded by the queue
+layer. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring. At the
+start of a transmit attempt netif_pause_tx_queue(dev) is called. If the
+transmit attempt fills the Tx queue controlled by the chip, the driver
+informs the software queue layer by not calling
+netif_unpause_tx_queue(dev) on exit.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IIId. SMP semantics
+
+The following are serialized with respect to each other via the "xmit_lock".
+ dev->hard_start_xmit() Transmit a packet
+ dev->tx_timeout() Transmit watchdog for stuck Tx
+ dev->set_multicast_list() Set the recieve filter.
+Note: The Tx timeout watchdog code is implemented by the timer routine in
+kernels up to 2.2.*. In 2.4.* and later the timeout code is part of the
+driver interface.
+
+The following fall under the global kernel lock. The module will not be
+unloaded during the call, unless a call with a potential reschedule e.g.
+kmalloc() is called. No other synchronization assertion is made.
+ dev->open()
+ dev->do_ioctl()
+ dev->get_stats()
+Caution: The lock for dev->open() is commonly broken with request_irq() or
+kmalloc(). It is best to avoid any lock-breaking call in do_ioctl() and
+get_stats(), or additional module locking code must be implemented.
+
+The following is self-serialized (no simultaneous entry)
+ An handler registered with request_irq().
+
+IV. Notes
+
+IVb. References
+
+Intel has also released a Linux driver for this product, "e1000".
+
+IVc. Errata
+
+*/
+
+
+
+static void *igige_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int netdev_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags { CanHaveMII=1, };
+#define PCI_IOTYPE ()
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Intel Gigabit Ethernet adapter", {0x10008086, 0xffffffff, },
+ PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0, 0x1ffff, 0},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info igige_drv_id = {
+ "intel-gige", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ igige_probe1, netdev_pwr_event };
+
+/* This hardware only has a PCI memory space BAR, not I/O space. */
+#ifdef USE_IO_OPS
+#error This driver only works with PCI memory space access.
+#endif
+
+/* Offsets to the device registers.
+*/
+enum register_offsets {
+ ChipCtrl=0x00, ChipStatus=0x08, EECtrl=0x10,
+ FlowCtrlAddrLo=0x028, FlowCtrlAddrHi=0x02c, FlowCtrlType=0x030,
+ VLANetherType=0x38,
+
+ RxAddrCAM=0x040,
+ IntrStatus=0x0C0, /* Interrupt, Clear on Read, AKA ICR */
+ IntrEnable=0x0D0, /* Set enable mask when '1' AKA IMS */
+ IntrDisable=0x0D8, /* Clear enable mask when '1' */
+
+ RxControl=0x100,
+ RxQ0IntrDelay=0x108, /* Rx list #0 interrupt delay timer. */
+ RxRingPtr=0x110, /* Rx Desc. list #0 base address, 64bits */
+ RxRingLen=0x118, /* Num bytes of Rx descriptors in ring. */
+ RxDescHead=0x120,
+ RxDescTail=0x128,
+
+ RxQ1IntrDelay=0x130, /* Rx list #1 interrupt delay timer. */
+ RxRing1Ptr=0x138, /* Rx Desc. list #1 base address, 64bits */
+ RxRing1Len=0x140, /* Num bytes of Rx descriptors in ring. */
+ RxDesc1Head=0x148,
+ RxDesc1Tail=0x150,
+
+ FlowCtrlTimer=0x170, FlowCtrlThrshHi=0x160, FlowCtrlThrshLo=0x168,
+ TxConfigReg=0x178,
+ RxConfigReg=0x180,
+ MulticastArray=0x200,
+
+ TxControl=0x400,
+ TxQState=0x408, /* 64 bit queue state */
+ TxIPG=0x410, /* Inter-Packet Gap */
+ TxRingPtr=0x420, TxRingLen=0x428,
+ TxDescHead=0x430, TxDescTail=0x438, TxIntrDelay=0x440,
+
+ RxCRCErrs=0x4000, RxMissed=0x4010,
+
+ TxStatus=0x408,
+ RxStatus=0x180,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrTxDone=0x0001, /* Tx packet queued */
+ IntrLinkChange=0x0004, /* Link Status Change */
+ IntrRxSErr=0x0008, /* Rx Symbol/Sequence error */
+ IntrRxEmpty=0x0010, /* Rx queue 0 Empty */
+ IntrRxQ1Empty=0x0020, /* Rx queue 1 Empty */
+ IntrRxDone=0x0080, /* Rx Done, Queue 0*/
+ IntrRxDoneQ1=0x0100, /* Rx Done, Queue 0*/
+ IntrPCIErr=0x0200, /* PCI Bus Error */
+
+ IntrTxEmpty=0x0002, /* Guess */
+ StatsMax=0x1000, /* Unknown */
+};
+
+/* Bits in the RxFilterMode register. */
+enum rx_mode_bits {
+ RxCtrlReset=0x01, RxCtrlEnable=0x02, RxCtrlAllUnicast=0x08,
+ RxCtrlAllMulticast=0x10,
+ RxCtrlLoopback=0xC0, /* We never configure loopback */
+ RxCtrlAcceptBroadcast=0x8000,
+ /* Aliased names.*/
+ AcceptAllPhys=0x08, AcceptAllMulticast=0x10, AcceptBroadcast=0x8000,
+ AcceptMyPhys=0,
+ AcceptMulticast=0,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct rx_desc {
+ u32 buf_addr;
+ u32 buf_addr_hi;
+ u32 csum_length; /* Checksum and length */
+ u32 status; /* Errors and status. */
+};
+
+struct tx_desc {
+ u32 buf_addr;
+ u32 buf_addr_hi;
+ u32 cmd_length;
+ u32 status; /* And errors */
+};
+
+/* Bits in tx_desc.cmd_length */
+enum tx_cmd_bits {
+ TxDescEndPacket=0x02000000, TxCmdIntrDelay=0x80000000,
+ TxCmdAddCRC=0x02000000, TxCmdDoTx=0x13000000,
+};
+enum tx_status_bits {
+ TxDescDone=0x0001, TxDescEndPkt=0x0002,
+};
+
+/* Bits in tx_desc.status */
+enum rx_status_bits {
+ RxDescDone=0x0001, RxDescEndPkt=0x0002,
+};
+
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+struct netdev_private {
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Keep frequently used values adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ int max_interrupt_work;
+ int intr_enable;
+ long in_interrupt; /* Word-long for SMP locks. */
+
+ struct rx_desc *rx_ring;
+ struct rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ struct tx_desc *tx_ring;
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+
+ unsigned int rx_mode;
+ unsigned int tx_config;
+ int multicast_filter_limit;
+ /* These values track the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+};
+
+static int eeprom_read(long ioaddr, int location);
+static int netdev_open(struct net_device *dev);
+static int change_mtu(struct net_device *dev, int new_mtu);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int igige_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&igige_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *igige_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ for (i = 0; i < 3; i++)
+ ((u16*)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* Do bogusness checks before this point.
+ We do a request_region() only to register /proc/ioports info. */
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(0x04000000, ioaddr + ChipCtrl);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x2220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3330;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex)
+ np->duplex_lock = 1;
+
+#if ! defined(final_version) /* Dump the EEPROM contents during development. */
+ if (np->msg_level & NETIF_MSG_MISC) {
+ int sum = 0;
+ for (i = 0; i < 0x40; i++) {
+ int eeval = eeprom_read(ioaddr, i);
+ printk("%4.4x%s", eeval, i % 16 != 15 ? " " : "\n");
+ sum += eeval;
+ }
+ printk(KERN_DEBUG "%s: EEPROM checksum %4.4X (expected value 0xBABA).\n",
+ dev->name, sum & 0xffff);
+ }
+#endif
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+ dev->change_mtu = &change_mtu;
+
+ /* Turn off VLAN and clear the VLAN filter. */
+ writel(0x04000000, ioaddr + VLANetherType);
+ for (i = 0x600; i < 0x800; i+=4)
+ writel(0, ioaddr + i);
+ np->tx_config = 0x80000020;
+ writel(np->tx_config, ioaddr + TxConfigReg);
+ {
+ int eeword10 = eeprom_read(ioaddr, 10);
+ writel(((eeword10 & 0x01e0) << 17) | ((eeword10 & 0x0010) << 3),
+ ioaddr + ChipCtrl);
+ }
+
+ return dev;
+}
+
+
+/* Read the EEPROM interface with a serial bit streams generated by the
+ host processor.
+ The example below is for the common 93c46 EEPROM, 64 16 bit words. */
+
+/* Delay between EEPROM clock transitions.
+ The effectivly flushes the write cache to prevent quick double-writes.
+*/
+#define eeprom_delay(ee_addr) readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x01, EE_ChipSelect=0x02, EE_DataIn=0x08, EE_DataOut=0x04,
+};
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataOut)
+
+/* The EEPROM commands include the alway-set leading bit. */
+enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
+
+static int eeprom_read(long addr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = addr + EECtrl;
+ int read_cmd = ((EE_ReadCmd<<6) | location) << 16 ;
+ int cmd_len = 2+6+16;
+ u32 baseval = readl(ee_addr) & ~0x0f;
+
+ writel(EE_Write0 | baseval, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = cmd_len; i >= 0; i--) {
+ int dataval = baseval |
+ ((read_cmd & (1 << i)) ? EE_Write1 : EE_Write0);
+ writel(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ writel(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(baseval | EE_Write0, ee_addr);
+ writel(baseval & ~EE_ChipSelect, ee_addr);
+ return retval;
+}
+
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Some chips may need to be reset. */
+
+ MOD_INC_USE_COUNT;
+
+ if (np->tx_ring == 0)
+ np->tx_ring = (void *)get_free_page(GFP_KERNEL);
+ if (np->tx_ring == 0)
+ return -ENOMEM;
+ if (np->rx_ring == 0)
+ np->rx_ring = (void *)get_free_page(GFP_KERNEL);
+ if (np->tx_ring == 0) {
+ free_page((long)np->tx_ring);
+ return -ENOMEM;
+ }
+
+ /* Note that both request_irq() and init_ring() call kmalloc(), which
+ break the global kernel lock protecting this routine. */
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ writel(0, ioaddr + RxControl);
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+#if ADDRLEN == 64
+ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtr + 4);
+#else
+ writel(0, ioaddr + RxRingPtr + 4);
+#endif
+
+ writel(RX_RING_SIZE * sizeof(struct rx_desc), ioaddr + RxRingLen);
+ writel(0x80000000 | rx_intr_holdoff, ioaddr + RxQ0IntrDelay);
+ writel(0, ioaddr + RxDescHead);
+ writel(np->dirty_rx + RX_RING_SIZE, ioaddr + RxDescTail);
+
+ /* Zero the unused Rx ring #1. */
+ writel(0, ioaddr + RxQ1IntrDelay);
+ writel(0, ioaddr + RxRing1Ptr);
+ writel(0, ioaddr + RxRing1Ptr + 4);
+ writel(0, ioaddr + RxRing1Len);
+ writel(0, ioaddr + RxDesc1Head);
+ writel(0, ioaddr + RxDesc1Tail);
+
+ /* Use 0x002000FA for half duplex. */
+ writel(0x000400FA, ioaddr + TxControl);
+
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+#if ADDRLEN == 64
+ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtr + 4);
+#else
+ writel(0, ioaddr + TxRingPtr + 4);
+#endif
+
+ writel(TX_RING_SIZE * sizeof(struct tx_desc), ioaddr + TxRingLen);
+ writel(0, ioaddr + TxDescHead);
+ writel(0, ioaddr + TxDescTail);
+ writel(0, ioaddr + TxQState);
+ writel(0, ioaddr + TxQState + 4);
+
+ /* Set IPG register with Ethernet standard values. */
+ writel(0x00A0080A, ioaddr + TxIPG);
+ /* The delay before announcing a Tx has completed. */
+ writel(tx_intr_holdoff, ioaddr + TxIntrDelay);
+
+ writel(((u32*)dev->dev_addr)[0], ioaddr + RxAddrCAM);
+ writel(0x80000000 | ((((u32*)dev->dev_addr)[1]) & 0xffff),
+ ioaddr + RxAddrCAM + 4);
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ np->in_interrupt = 0;
+
+ np->rx_mode = RxCtrlEnable;
+ set_rx_mode(dev);
+
+ /* Tx mode */
+ np->tx_config = 0x80000020;
+ writel(np->tx_config, ioaddr + TxConfigReg);
+
+ /* Flow control */
+ writel(0x00C28001, ioaddr + FlowCtrlAddrLo);
+ writel(0x00000100, ioaddr + FlowCtrlAddrHi);
+ writel(0x8808, ioaddr + FlowCtrlType);
+ writel(0x0100, ioaddr + FlowCtrlTimer);
+ writel(0x8000, ioaddr + FlowCtrlThrshHi);
+ writel(0x4000, ioaddr + FlowCtrlThrshLo);
+
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writel(IntrTxDone | IntrLinkChange | IntrRxDone | IntrPCIErr
+ | IntrRxEmpty | IntrRxSErr, ioaddr + IntrEnable);
+
+ /* writel(1, dev->base_addr + RxCmd);*/
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x Rx %x Tx %x.\n",
+ dev->name, (int)readl(ioaddr + ChipStatus),
+ (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + TxStatus));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+/* Update for jumbo frames...
+ Changing the MTU while active is not allowed.
+ */
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int chip_ctrl = readl(ioaddr + ChipCtrl);
+ int rx_cfg = readl(ioaddr + RxConfigReg);
+ int tx_cfg = readl(ioaddr + TxConfigReg);
+#if 0
+ int chip_status = readl(ioaddr + ChipStatus);
+#endif
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Link changed status. Ctrl %x rxcfg %8.8x "
+ "txcfg %8.8x.\n",
+ dev->name, chip_ctrl, rx_cfg, tx_cfg);
+ if (np->medialock) {
+ if (np->full_duplex)
+ ;
+ }
+ /* writew(new_tx_mode, ioaddr + TxMode); */
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x, "
+ "Tx %x Rx %x.\n",
+ dev->name, (int)readl(ioaddr + ChipStatus),
+ (int)readl(ioaddr + TxStatus), (int)readl(ioaddr + RxStatus));
+ }
+ /* This will either have a small false-trigger window or will not catch
+ tbusy incorrectly set when the queue is empty. */
+ if ((jiffies - dev->trans_start) > TX_TIMEOUT &&
+ (np->cur_tx - np->dirty_tx > 0 ||
+ netif_queue_paused(dev)) ) {
+ tx_timeout(dev);
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + ChipStatus));
+
+#ifndef __alpha__
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Tx registers: ");
+ for (i = 0x400; i < 0x444; i += 8)
+ printk(" %8.8x", (int)readl(ioaddr + i));
+ printk("\n"KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", np->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_skbuff[i] = 0;
+ }
+
+ /* The number of ring descriptors is set by the ring length register,
+ thus the chip does not use 'next_desc' chains. */
+
+ /* Fill in the Rx buffers. Allocation failures are acceptable. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
+ np->rx_ring[i].buf_addr_hi = 0;
+ np->rx_ring[i].status = 0;
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].status = 0;
+ }
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ /* Note: Descriptors may be uncached. Write each field only once. */
+ np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
+ np->tx_ring[entry].buf_addr_hi = 0;
+ np->tx_ring[entry].cmd_length = cpu_to_le32(TxCmdDoTx | skb->len);
+ np->tx_ring[entry].status = 0;
+
+ /* Non-CC architectures: explicitly flush descriptor and packet.
+ cache_flush(np->tx_ring[entry], sizeof np->tx_ring[entry]);
+ cache_flush(skb->data, skb->len);
+ */
+
+ np->cur_tx++;
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile int)np->dirty_tx < TX_QUEUE_LEN - 2) {
+ netif_unpause_tx_queue(dev);
+ np->tx_full = 0;
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+
+ /* Inform the chip we have another Tx. */
+ if (np->msg_level & NETIF_MSG_TX_QUEUED)
+ printk(KERN_DEBUG "%s: Tx queued to slot %d, desc tail now %d "
+ "writing %d.\n",
+ dev->name, entry, (int)readl(dev->base_addr + TxDescTail),
+ np->cur_tx % TX_RING_SIZE);
+ writel(np->cur_tx % TX_RING_SIZE, dev->base_addr + TxDescTail);
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d (%x) queued in slot %d.\n",
+ dev->name, np->cur_tx, (int)virt_to_bus(&np->tx_ring[entry]),
+ entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int work_limit;
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ work_limit = np->max_interrupt_work;
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#endif
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0 || intr_status == 0xffffffff)
+ break;
+
+ if (intr_status & IntrRxDone)
+ netdev_rx(dev);
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ if (np->tx_ring[entry].status == 0)
+ break;
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, np->tx_ring[entry].status);
+ np->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ /* Note the 4 slot hysteresis to mark the queue non-full. */
+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | IntrLinkChange | StatsMax))
+ netdev_error(dev, intr_status);
+
+ if (--work_limit < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ clear_bit(0, (void*)&dev->interrupt);
+#endif
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+ entry, np->rx_ring[entry].status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (np->rx_head_desc->status & cpu_to_le32(RxDescDone)) {
+ struct rx_desc *desc = np->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->status);
+ int data_size = le32_to_cpu(desc->csum_length);
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & RxDescEndPkt)) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %4.4x!\n",
+ dev->name, np->cur_rx, data_size, desc_status);
+ np->stats.rx_length_errors++;
+ } else {
+ struct sk_buff *skb;
+ /* Reported length should omit the CRC. */
+ int pkt_len = (data_size & 0xffff) - 4;
+
+#ifndef final_version
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, data_size, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+#ifndef final_version /* Remove after testing. */
+ if (le32desc_to_virt(np->rx_ring[entry].buf_addr) != temp)
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in netdev_rx: %p vs. %p / %p.\n",
+ dev->name,
+ le32desc_to_virt(np->rx_ring[entry].buf_addr),
+ skb->head, temp);
+#endif
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (np->msg_level & NETIF_MSG_PKTDATA)
+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+ "%d.%d.%d.%d.\n",
+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+ skb->data[8], skb->data[9], skb->data[10],
+ skb->data[11], skb->data[12], skb->data[13],
+ skb->data[14], skb->data[15], skb->data[16],
+ skb->data[17]);
+#endif
+ skb->protocol = eth_type_trans(skb, dev);
+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
+ }
+ np->rx_ring[entry].status = 0;
+ }
+
+ /* Restart Rx engine if stopped. */
+ /* writel(1, dev->base_addr + RxCmd); */
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ if (intr_status & IntrLinkChange) {
+ int chip_ctrl = readl(ioaddr + ChipCtrl);
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_ERR "%s: Link changed: Autonegotiation on-going.\n",
+ dev->name);
+ if (chip_ctrl & 1)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ check_duplex(dev);
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if ((intr_status & ~(IntrLinkChange|StatsMax))
+ && (np->msg_level & NETIF_MSG_DRV))
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrPCIErr)
+ np->stats.tx_fifo_errors++;
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int crc_errs = readl(ioaddr + RxCRCErrs);
+
+ if (crc_errs != 0xffffffff) {
+ /* We need not lock this segment of code for SMP.
+ The non-atomic-add vulnerability is very small
+ and statistics are non-critical. */
+ np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+ }
+
+ return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+ A big-endian version is also available.
+ This is slow but compact code. Do not use this routine for bulk data,
+ use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c.
+ Chips may use the upper or lower CRC bits, and may reverse and/or invert
+ them. Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u32 new_mc_filter[128]; /* Multicast filter table */
+ u32 new_rx_mode = np->rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ new_rx_mode |=
+ RxCtrlAcceptBroadcast | RxCtrlAllMulticast | RxCtrlAllUnicast;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ new_rx_mode &= ~RxCtrlAllUnicast;
+ new_rx_mode |= RxCtrlAcceptBroadcast | RxCtrlAllMulticast;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(new_mc_filter, 0, sizeof(new_mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < 15;
+ i++, mclist = mclist->next) {
+ writel(((u32*)mclist->dmi_addr)[0], ioaddr + RxAddrCAM + 8 + i*8);
+ writel((((u32*)mclist->dmi_addr)[1] & 0xffff) | 0x80000000,
+ ioaddr + RxAddrCAM + 12 + i*8);
+ }
+ for (; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ set_bit(((u32*)mclist->dmi_addr)[1] & 0xfff,
+ new_mc_filter);
+ }
+ new_rx_mode &= ~RxCtrlAllUnicast | RxCtrlAllMulticast;
+ new_rx_mode |= RxCtrlAcceptBroadcast;
+ if (dev->mc_count > 15)
+ for (i = 0; i < 128; i++)
+ writel(new_mc_filter[i], ioaddr + MulticastArray + (i<<2));
+ }
+ if (np->rx_mode != new_rx_mode)
+ writel(np->rx_mode = new_rx_mode, ioaddr + RxControl);
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, (int)readl(ioaddr + TxStatus),
+ (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(~0, ioaddr + IntrDisable);
+ readl(ioaddr + IntrStatus);
+
+ /* Reset everything. */
+ writel(0x04000000, ioaddr + ChipCtrl);
+
+ del_timer(&np->timer);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. buf %8.8x, length %8.8x, status %8.8x.\n",
+ i, np->tx_ring[i].buf_addr, np->tx_ring[i].cmd_length,
+ np->tx_ring[i].status);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].csum_length,
+ np->rx_ring[i].status, np->rx_ring[i].buf_addr);
+ if (np->rx_ring[i].buf_addr) {
+ if (*(u8*)np->rx_skbuff[i]->tail != 0x69) {
+ u16 *pkt_buf = (void *)np->rx_skbuff[i]->tail;
+ int j;
+ for (j = 0; j < 0x50; j++)
+ printk(" %4.4x", pkt_buf[j]);
+ printk("\n");
+ }
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int netdev_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, stop Tx and Rx. */
+ writel(~0, ioaddr + IntrDisable);
+ /* writel(2, ioaddr + RxCmd); */
+ /* writew(2, ioaddr + TxCmd); */
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the actions are very chip specific. */
+ set_rx_mode(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+ iounmap((char *)dev->base_addr);
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&igige_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&igige_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+ release_region(root_net_dev->base_addr,
+ pci_id_tbl[np->chip_id].io_size);
+ iounmap((char *)(root_net_dev->base_addr));
+ next_dev = np->next_module;
+ if (np->tx_ring == 0)
+ free_page((long)np->tx_ring);
+ if (np->rx_ring == 0)
+ free_page((long)np->rx_ring);
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` intel-gige.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c intel-gige.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c intel-gige.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/iow.h b/linux/src/drivers/net/iow.h
new file mode 100644
index 0000000..6e15688
--- /dev/null
+++ b/linux/src/drivers/net/iow.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_IOW_H
+#define _ASM_IOW_H
+
+/* no longer used */
+
+#endif
diff --git a/linux/src/drivers/net/kern_compat.h b/linux/src/drivers/net/kern_compat.h
new file mode 100644
index 0000000..39e1934
--- /dev/null
+++ b/linux/src/drivers/net/kern_compat.h
@@ -0,0 +1,285 @@
+#ifndef _KERN_COMPAT_H
+#define _KERN_COMPAT_H
+/* kern_compat.h: Linux PCI network adapter backward compatibility code. */
+/*
+ $Revision: 1.1.2.2 $ $Date: 2007/08/04 21:02:21 $
+
+ Kernel compatibility defines.
+ This file provides macros to mask the difference between kernel versions.
+ It is designed primarily to allow device drivers to be written so that
+ they work with a range of kernel versions.
+
+ Written 1999-2003 Donald Becker, Scyld Computing Corporation
+ This software may be used and distributed according to the terms
+ of the GNU General Public License (GPL), incorporated herein by
+ reference. Drivers interacting with these functions are derivative
+ works and thus are covered the GPL. They must include an explicit
+ GPL notice.
+
+ This code also provides inline scan and activate functions for PCI network
+ interfaces. It has an interface identical to pci-scan.c, but is
+ intended as an include file to simplify using updated drivers with older
+ kernel versions.
+ This code version matches pci-scan.c:v0.05 9/16/99
+
+ The author may be reached as becker@scyld.com, or
+ Donald Becker
+ Penguin Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Other contributers:
+ <none>
+*/
+
+/* We try to use defined values to decide when an interface has changed or
+ added features, but we must have the kernel version number for a few. */
+#if ! defined(LINUX_VERSION_CODE) || (LINUX_VERSION_CODE < 0x10000)
+#include <linux/version.h>
+#endif
+/* Older kernel versions didn't include modversions automatically. */
+#if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+/* There was no support for PCI address space mapping in 2.0, but the
+ Alpha needed it. See the 2.2 documentation. */
+#if LINUX_VERSION_CODE < 0x20100 && ! defined(__alpha__)
+#define ioremap(a,b)\
+ (((unsigned long)(a) >= 0x100000) ? vremap(a,b) : (void*)(a))
+#define iounmap(v)\
+ do { if ((unsigned long)(v) >= 0x100000) vfree(v);} while (0)
+#endif
+
+/* Support for adding info about the purpose of and parameters for kernel
+ modules was added in 2.1. */
+#if LINUX_VERSION_CODE < 0x20115
+#define MODULE_AUTHOR(name) extern int nonesuch
+#define MODULE_DESCRIPTION(string) extern int nonesuch
+#define MODULE_PARM(varname, typestring) extern int nonesuch
+#define MODULE_PARM_DESC(var,desc) extern int nonesuch
+#endif
+#if !defined(MODULE_LICENSE)
+#define MODULE_LICENSE(license) \
+static const char __module_license[] __attribute__((section(".modinfo"))) = \
+"license=" license
+#endif
+#if !defined(MODULE_PARM_DESC)
+#define MODULE_PARM_DESC(var,desc) \
+const char __module_parm_desc_##var[] \
+__attribute__((section(".modinfo"))) = \
+"parm_desc_" __MODULE_STRING(var) "=" desc
+#endif
+
+/* SMP and better multiarchitecture support were added.
+ Using an older kernel means we assume a little-endian uniprocessor.
+*/
+#if LINUX_VERSION_CODE < 0x20123
+#define hard_smp_processor_id() smp_processor_id()
+//#define test_and_set_bit(val, addr) set_bit(val, addr)
+#define cpu_to_le16(val) (val)
+#define cpu_to_le32(val) (val)
+#define le16_to_cpu(val) (val)
+#define le16_to_cpus(val) /* In-place conversion. */
+#define le32_to_cpu(val) (val)
+#define cpu_to_be16(val) ((((val) & 0xff) << 8) + (((val) >> 8) & 0xff))
+#define cpu_to_be32(val) ((cpu_to_be16(val) << 16) + cpu_to_be16((val) >> 16))
+typedef long spinlock_t;
+#define SPIN_LOCK_UNLOCKED 0
+#define spin_lock(lock)
+#define spin_unlock(lock)
+#define spin_lock_irqsave(lock, flags) do {save_flags(flags); cli();} while(0)
+#define spin_unlock_irqrestore(lock, flags) restore_flags(flags)
+#endif
+
+#if LINUX_VERSION_CODE <= 0x20139
+#define net_device_stats enet_statistics
+#else
+#define NETSTATS_VER2
+#endif
+
+/* These are used by the netdrivers to report values from the
+ MII (Media Indpendent Interface) management registers.
+*/
+#ifndef SIOCGMIIPHY
+#define SIOCGMIIPHY (SIOCDEVPRIVATE) /* Get the PHY in use. */
+#define SIOCGMIIREG (SIOCDEVPRIVATE+1) /* Read a PHY register. */
+#define SIOCSMIIREG (SIOCDEVPRIVATE+2) /* Write a PHY register. */
+#endif
+#ifndef SIOCGPARAMS
+#define SIOCGPARAMS (SIOCDEVPRIVATE+3) /* Read operational parameters. */
+#define SIOCSPARAMS (SIOCDEVPRIVATE+4) /* Set operational parameters. */
+#endif
+
+#if !defined(HAVE_NETIF_MSG)
+enum {
+ NETIF_MSG_DRV = 0x0001,
+ NETIF_MSG_PROBE = 0x0002,
+ NETIF_MSG_LINK = 0x0004,
+ NETIF_MSG_TIMER = 0x0008,
+ NETIF_MSG_IFDOWN = 0x0010,
+ NETIF_MSG_IFUP = 0x0020,
+ NETIF_MSG_RX_ERR = 0x0040,
+ NETIF_MSG_TX_ERR = 0x0080,
+ NETIF_MSG_TX_QUEUED = 0x0100,
+ NETIF_MSG_INTR = 0x0200,
+ NETIF_MSG_TX_DONE = 0x0400,
+ NETIF_MSG_RX_STATUS = 0x0800,
+ NETIF_MSG_PKTDATA = 0x1000,
+ /* 2000 is reserved. */
+ NETIF_MSG_WOL = 0x4000,
+ NETIF_MSG_MISC = 0x8000,
+ NETIF_MSG_RXFILTER = 0x10000,
+};
+#define NETIF_MSG_MAX 0x10000
+#endif
+
+#if !defined(NETIF_MSG_MAX) || NETIF_MSG_MAX < 0x8000
+#define NETIF_MSG_MISC 0x8000
+#endif
+#if !defined(NETIF_MSG_MAX) || NETIF_MSG_MAX < 0x10000
+#define NETIF_MSG_RXFILTER 0x10000
+#endif
+
+#if LINUX_VERSION_CODE < 0x20155
+#include <linux/bios32.h>
+#define PCI_SUPPORT_VER1
+/* A minimal version of the 2.2.* PCI support that handles configuration
+ space access.
+ Drivers that actually use pci_dev fields must do explicit compatibility.
+ Note that the struct pci_dev * "pointer" is actually a byte mapped integer!
+*/
+#if LINUX_VERSION_CODE < 0x20014
+struct pci_dev { int not_used; };
+#endif
+
+#define pci_find_slot(bus, devfn) (struct pci_dev*)((bus<<8) | devfn | 0xf0000)
+#define bus_number(pci_dev) ((((int)(pci_dev))>>8) & 0xff)
+#define devfn_number(pci_dev) (((int)(pci_dev)) & 0xff)
+#define pci_bus_number(pci_dev) ((((int)(pci_dev))>>8) & 0xff)
+#define pci_devfn(pci_dev) (((int)(pci_dev)) & 0xff)
+
+#ifndef CONFIG_PCI
+extern inline int pci_present(void) { return 0; }
+#else
+#define pci_present pcibios_present
+#endif
+
+#define pci_read_config_byte(pdev, where, valp)\
+ pcibios_read_config_byte(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_read_config_word(pdev, where, valp)\
+ pcibios_read_config_word(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_read_config_dword(pdev, where, valp)\
+ pcibios_read_config_dword(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_write_config_byte(pdev, where, val)\
+ pcibios_write_config_byte(bus_number(pdev), devfn_number(pdev), where, val)
+#define pci_write_config_word(pdev, where, val)\
+ pcibios_write_config_word(bus_number(pdev), devfn_number(pdev), where, val)
+#define pci_write_config_dword(pdev, where, val)\
+ pcibios_write_config_dword(bus_number(pdev), devfn_number(pdev), where, val)
+#else
+#define PCI_SUPPORT_VER2
+#define pci_bus_number(pci_dev) ((pci_dev)->bus->number)
+#define pci_devfn(pci_dev) ((pci_dev)->devfn)
+#endif
+
+/* The arg count changed, but function name did not.
+ We cover that bad choice by defining a new name.
+*/
+#if LINUX_VERSION_CODE < 0x20159
+#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE)
+#define dev_free_skb_irq(skb) dev_kfree_skb(skb, FREE_WRITE)
+#elif LINUX_VERSION_CODE < 0x20400
+#define dev_free_skb(skb) dev_kfree_skb(skb)
+#define dev_free_skb_irq(skb) dev_kfree_skb(skb)
+#else
+#define dev_free_skb(skb) dev_kfree_skb(skb)
+#define dev_free_skb_irq(skb) dev_kfree_skb_irq(skb)
+#endif
+
+/* Added at the suggestion of Jes Sorensen. */
+#if LINUX_VERSION_CODE > 0x20153
+#include <linux/init.h>
+#else
+#define __init
+#define __initdata
+#define __initfunc(__arginit) __arginit
+#endif
+
+/* The old 'struct device' used a too-generic name. */
+#if LINUX_VERSION_CODE < 0x2030d
+#define net_device device
+#endif
+
+/* More changes for the 2.4 kernel, some in the zillion 2.3.99 releases. */
+#if LINUX_VERSION_CODE < 0x20363
+#define DECLARE_MUTEX(name) struct semaphore (name) = MUTEX;
+#define down_write(semaphore_p) down(semaphore_p)
+#define down_read(semaphore_p) down(semaphore_p)
+#define up_write(semaphore_p) up(semaphore_p)
+#define up_read(semaphore_p) up(semaphore_p)
+/* Note that the kernel version has a broken time_before()! */
+#define time_after(a,b) ((long)(b) - (long)(a) < 0)
+#define time_before(a,b) ((long)(a) - (long)(b) < 0)
+#else
+#define get_free_page get_zeroed_page
+#endif
+
+/* The 2.2 kernels added the start of capability-based security for operations
+ that formerally could only be done by root.
+*/
+#if ! defined(CAP_NET_ADMIN)
+#define capable(CAP_XXX) (suser())
+#endif
+
+#if ! defined(HAVE_NETIF_QUEUE)
+#define netif_wake_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_start_tx_queue(dev) do { (dev)->tbusy = 0; dev->start = 1; } while (0)
+#define netif_stop_tx_queue(dev) do { (dev)->tbusy = 1; dev->start = 0; } while (0)
+#define netif_queue_paused(dev) ((dev)->tbusy != 0)
+/* Splitting these lines exposes a bug in some preprocessors. */
+#define netif_pause_tx_queue(dev) (test_and_set_bit( 0, (void*)&(dev)->tbusy))
+#define netif_unpause_tx_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); } while (0)
+#define netif_resume_tx_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); mark_bh(NET_BH); } while (0)
+
+#define netif_running(dev) ((dev)->start != 0)
+#define netif_device_attach(dev) do {; } while (0)
+#define netif_device_detach(dev) do {; } while (0)
+#define netif_device_present(dev) (1)
+#define netif_set_tx_timeout(dev, func, deltajiffs) do {; } while (0)
+#define netif_link_down(dev) (dev)->flags &= ~IFF_RUNNING
+#define netif_link_up(dev) (dev)->flags |= IFF_RUNNING
+
+#else
+
+#define netif_start_tx_queue(dev) netif_start_queue(dev)
+#define netif_stop_tx_queue(dev) netif_stop_queue(dev)
+#define netif_queue_paused(dev) netif_queue_stopped(dev)
+#define netif_resume_tx_queue(dev) netif_wake_queue(dev)
+/* Only used in transmit path. No function in 2.4. */
+#define netif_pause_tx_queue(dev) 0
+#define netif_unpause_tx_queue(dev) do {; } while (0)
+
+#ifdef __LINK_STATE_NOCARRIER
+#define netif_link_down(dev) netif_carrier_off(dev)
+#define netif_link_up(dev) netif_carrier_on(dev)
+#else
+#define netif_link_down(dev) (dev)->flags &= ~IFF_RUNNING
+#define netif_link_up(dev) (dev)->flags |= IFF_RUNNING
+#endif
+
+#endif
+#ifndef PCI_DMA_BUS_IS_PHYS
+#define pci_dma_sync_single(pci_dev, base_addr, extent, tofrom) do {; } while (0)
+#define pci_map_single(pci_dev, base_addr, extent, dir) virt_to_bus(base_addr)
+#define pci_unmap_single(pci_dev, base_addr, extent, dir) do {; } while (0)
+#endif
+
+#endif
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/lance.c b/linux/src/drivers/net/lance.c
new file mode 100644
index 0000000..fe3cf68
--- /dev/null
+++ b/linux/src/drivers/net/lance.c
@@ -0,0 +1,1293 @@
+/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
+/*
+ Written/copyright 1993-1998 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
+ with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Fixing alignment problem with 1.3.* kernel and some minor changes
+ by Andrey V. Savochkin, 1996.
+
+ Problems or questions may be send to Donald Becker (see above) or to
+ Andrey Savochkin -- saw@shade.msu.ru or
+ Laboratory of Computation Methods,
+ Department of Mathematics and Mechanics,
+ Moscow State University,
+ Leninskye Gory, Moscow 119899
+
+ But I should to inform you that I'm not an expert in the LANCE card
+ and it may occurs that you will receive no answer on your mail
+ to Donald Becker. I didn't receive any answer on all my letters
+ to him. Who knows why... But may be you are more lucky? ;->
+ SAW
+
+ Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
+ - added support for Linux/Alpha, but removed most of it, because
+ it worked only for the PCI chip.
+ - added hook for the 32bit lance driver
+ - added PCnetPCI II (79C970A) to chip table
+ Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
+ - hopefully fix above so Linux/Alpha can use ISA cards too.
+ 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
+ v1.12 10/27/97 Module support -djb
+ v1.14 2/3/98 Module support modified, made PCI support optional -djb
+*/
+
+static const char *version = "lance.c:v1.14 2/3/1998 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
+
+#ifdef MODULE
+#ifdef MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+static unsigned int lance_portlist[] = { 0x300, 0x320, 0x340, 0x360, 0};
+int lance_probe(struct device *dev);
+int lance_probe1(struct device *dev, int ioaddr, int irq, int options);
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry lance_drv =
+{"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
+#endif
+
+#ifdef LANCE_DEBUG
+int lance_debug = LANCE_DEBUG;
+#else
+int lance_debug = 1;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the AMD 79C960, the "PCnet-ISA
+single-chip ethernet controller for ISA". This chip is used in a wide
+variety of boards from vendors such as Allied Telesis, HP, Kingston,
+and Boca. This driver is also intended to work with older AMD 7990
+designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
+I use the name LANCE to refer to all of the AMD chips, even though it properly
+refers only to the original 7990.
+
+II. Board-specific settings
+
+The driver is designed to work the boards that use the faster
+bus-master mode, rather than in shared memory mode. (Only older designs
+have on-board buffer memory needed to support the slower shared memory mode.)
+
+Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
+channel. This driver probes the likely base addresses:
+{0x300, 0x320, 0x340, 0x360}.
+After the board is found it generates a DMA-timeout interrupt and uses
+autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
+of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
+probed for by enabling each free DMA channel in turn and checking if
+initialization succeeds.
+
+The HP-J2405A board is an exception: with this board it is easy to read the
+EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
+_know_ the base address -- that field is for writing the EEPROM.)
+
+III. Driver operation
+
+IIIa. Ring buffers
+The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
+the base and length of the data buffer, along with status bits. The length
+of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
+the buffer length (rather than being directly the buffer length) for
+implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
+ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
+needlessly uses extra space and reduces the chance that an upper layer will
+be able to reorder queued Tx packets based on priority. Decreasing the number
+of entries makes it more difficult to achieve back-to-back packet transmission
+and increases the chance that Rx ring will overflow. (Consider the worst case
+of receiving back-to-back minimum-sized packets.)
+
+The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
+statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
+avoid the administrative overhead. For the Rx side this avoids dynamically
+allocating full-sized buffers "just in case", at the expense of a
+memory-to-memory data copy for each packet received. For most systems this
+is a good tradeoff: the Rx buffer will always be in low memory, the copy
+is inexpensive, and it primes the cache for later packet processing. For Tx
+the buffers are only used when needed as low-memory bounce buffers.
+
+IIIB. 16M memory limitations.
+For the ISA bus master mode all structures used directly by the LANCE,
+the initialization block, Rx and Tx rings, and data buffers, must be
+accessible from the ISA bus, i.e. in the lower 16M of real memory.
+This is a problem for current Linux kernels on >16M machines. The network
+devices are initialized after memory initialization, and the kernel doles out
+memory from the top of memory downward. The current solution is to have a
+special network initialization routine that's called before memory
+initialization; this will eventually be generalized for all network devices.
+As mentioned before, low-memory "bounce-buffers" are used when needed.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+*/
+
+/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
+ That translates to 4 and 4 (16 == 2^^4).
+ This is a compile-time option for efficiency.
+ */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define LANCE_DATA 0x10
+#define LANCE_ADDR 0x12
+#define LANCE_RESET 0x14
+#define LANCE_BUS_IF 0x16
+#define LANCE_TOTAL_SIZE 0x18
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ s32 base;
+ s16 buf_length; /* This length is 2s complement (negative)! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ s32 base;
+ s16 length; /* Length is 2s complement (negative)! */
+ s16 misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ u16 mode; /* Pre-set mode (reg. 15) */
+ u8 phys_addr[6]; /* Physical ethernet address */
+ u32 filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring; /* Tx and Rx ring base pointers */
+ u32 tx_ring;
+};
+
+struct lance_private {
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
+ struct lance_rx_head rx_ring[RX_RING_SIZE];
+ struct lance_tx_head tx_ring[TX_RING_SIZE];
+ struct lance_init_block init_block;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
+ /* Tx low-memory "bounce buffer" address. */
+ char (*tx_bounce_buffs)[PKT_BUF_SZ];
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ int dma;
+ struct enet_statistics stats;
+ unsigned char chip_version; /* See lance_chip_type. */
+ char tx_full;
+ unsigned long lock;
+};
+
+#define LANCE_MUST_PAD 0x00000001
+#define LANCE_ENABLE_AUTOSELECT 0x00000002
+#define LANCE_MUST_REINIT_RING 0x00000004
+#define LANCE_MUST_UNRESET 0x00000008
+#define LANCE_HAS_MISSED_FRAME 0x00000010
+
+/* A mapping from the chip ID number to the part number and features.
+ These are from the datasheets -- in real life the '970 version
+ reportedly has the same ID as the '965. */
+static struct lance_chip_type {
+ int id_number;
+ const char *name;
+ int flags;
+} chip_table[] = {
+ {0x0000, "LANCE 7990", /* Ancient lance chip. */
+ LANCE_MUST_PAD + LANCE_MUST_UNRESET},
+ {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
+ it the PCnet32. */
+ {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x0, "PCnet (unknown)",
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+};
+
+enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
+
+/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
+static unsigned char pci_irq_line = 0;
+
+/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
+ Assume yes until we know the memory size. */
+static unsigned char lance_need_isa_bounce_buffers = 1;
+
+static int lance_open(struct device *dev);
+static int lance_open_fail(struct device *dev);
+static void lance_init_ring(struct device *dev, int mode);
+static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
+static int lance_rx(struct device *dev);
+static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int lance_close(struct device *dev);
+static struct enet_statistics *lance_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+
+#ifdef MODULE
+#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
+#define IF_NAMELEN 8 /* # of chars for storing dev->name */
+
+static int io[MAX_CARDS] = { 0, };
+static int dma[MAX_CARDS] = { 0, };
+static int irq[MAX_CARDS] = { 0, };
+
+static char ifnames[MAX_CARDS][IF_NAMELEN] = { {0, }, };
+static struct device dev_lance[MAX_CARDS] =
+{{
+ 0, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL}};
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ struct device *dev = &dev_lance[this_dev];
+ dev->name = ifnames[this_dev];
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->dma = dma[this_dev];
+ dev->init = lance_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only complain once */
+ printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
+ return -EPERM;
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "lance.c: No PCnet/LANCE card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ struct device *dev = &dev_lance[this_dev];
+ if (dev->priv != NULL) {
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_dma(dev->dma);
+ release_region(dev->base_addr, LANCE_TOTAL_SIZE);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
+ board probes now that kmalloc() can allocate ISA DMA-able regions.
+ This also allows the LANCE driver to be used as a module.
+ */
+int lance_probe(struct device *dev)
+{
+ int *port, result;
+
+ if (high_memory <= 16*1024*1024)
+ lance_need_isa_bounce_buffers = 0;
+
+#if defined(CONFIG_PCI)
+ if (pcibios_present()) {
+ int pci_index;
+ if (lance_debug > 1)
+ printk("lance.c: PCI bios is present, checking for devices...\n");
+ for (pci_index = 0; pci_index < 8; pci_index++) {
+ unsigned char pci_bus, pci_device_fn;
+ unsigned int pci_ioaddr;
+ unsigned short pci_command;
+
+ if (pcibios_find_device (PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_LANCE, pci_index,
+ &pci_bus, &pci_device_fn) != 0)
+ break;
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+ /* PCI Spec 2.1 states that it is either the driver or PCI card's
+ * responsibility to set the PCI Master Enable Bit if needed.
+ * (From Mark Stockton <marks@schooner.sys.hou.compaq.com>)
+ */
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ if ( ! (pci_command & PCI_COMMAND_MASTER)) {
+ printk("PCI Master Bit has not been set. Setting...\n");
+ pci_command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command);
+ }
+ printk("Found PCnet/PCI at %#x, irq %d.\n",
+ pci_ioaddr, pci_irq_line);
+ result = lance_probe1(dev, pci_ioaddr, pci_irq_line, 0);
+ pci_irq_line = 0;
+ if (!result) return 0;
+ }
+ }
+#endif /* defined(CONFIG_PCI) */
+
+ for (port = lance_portlist; *port; port++) {
+ int ioaddr = *port;
+
+ if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0) {
+ /* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
+ signatures w/ minimal I/O reads */
+ char offset15, offset14 = inb(ioaddr + 14);
+
+ if ((offset14 == 0x52 || offset14 == 0x57) &&
+ ((offset15 = inb(ioaddr + 15)) == 0x57 || offset15 == 0x44)) {
+ result = lance_probe1(dev, ioaddr, 0, 0);
+ if ( !result ) return 0;
+ }
+ }
+ }
+ return -ENODEV;
+}
+
+int lance_probe1(struct device *dev, int ioaddr, int irq, int options)
+{
+ struct lance_private *lp;
+ short dma_channels; /* Mark spuriously-busy DMA channels */
+ int i, reset_val, lance_version;
+ const char *chipname;
+ /* Flags for specific chips or boards. */
+ unsigned char hpJ2405A = 0; /* HP ISA adaptor */
+ int hp_builtin = 0; /* HP on-board ethernet. */
+ static int did_version = 0; /* Already printed version info. */
+
+ /* First we look for special cases.
+ Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
+ There are two HP versions, check the BIOS for the configuration port.
+ This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
+ */
+ if (readw(0x000f0102) == 0x5048) {
+ static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
+ int hp_port = (readl(0x000f00f1) & 1) ? 0x499 : 0x99;
+ /* We can have boards other than the built-in! Verify this is on-board. */
+ if ((inb(hp_port) & 0xc0) == 0x80
+ && ioaddr_table[inb(hp_port) & 3] == ioaddr)
+ hp_builtin = hp_port;
+ }
+ /* We also recognize the HP Vectra on-board here, but check below. */
+ hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
+ && inb(ioaddr+2) == 0x09);
+
+ /* Reset the LANCE. */
+ reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
+
+ /* The Un-Reset needed is only needed for the real NE2100, and will
+ confuse the HP board. */
+ if (!hpJ2405A)
+ outw(reset_val, ioaddr+LANCE_RESET);
+
+ outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
+ if (inw(ioaddr+LANCE_DATA) != 0x0004)
+ return -ENODEV;
+
+ /* Get the version of the chip. */
+ outw(88, ioaddr+LANCE_ADDR);
+ if (inw(ioaddr+LANCE_ADDR) != 88) {
+ lance_version = 0;
+ } else { /* Good, it's a newer chip. */
+ int chip_version = inw(ioaddr+LANCE_DATA);
+ outw(89, ioaddr+LANCE_ADDR);
+ chip_version |= inw(ioaddr+LANCE_DATA) << 16;
+ if (lance_debug > 2)
+ printk(" LANCE chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return -ENODEV;
+ chip_version = (chip_version >> 12) & 0xffff;
+ for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
+ if (chip_table[lance_version].id_number == chip_version)
+ break;
+ }
+ }
+
+ /* We can't use init_etherdev() to allocate dev->priv because it must
+ a ISA DMA-able region. */
+ dev = init_etherdev(dev, 0);
+ dev->open = lance_open_fail;
+ chipname = chip_table[lance_version].name;
+ printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
+
+ /* There is a 16 byte station address PROM at the base address.
+ The first six bytes are the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ dev->base_addr = ioaddr;
+ request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
+
+ /* Make certain the data structures used by the LANCE are aligned and DMAble. */
+
+ lp = (struct lance_private *)(((unsigned long)kmalloc(sizeof(*lp)+7,
+ GFP_DMA | GFP_KERNEL)+7) & ~7);
+ if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
+ memset(lp, 0, sizeof(*lp));
+ dev->priv = lp;
+ lp->name = chipname;
+ lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (lance_need_isa_bounce_buffers)
+ lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ else
+ lp->tx_bounce_buffs = NULL;
+
+ lp->chip_version = lance_version;
+
+ lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw((short) (u32) virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw(((u32)virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+
+ if (irq) { /* Set iff PCI card. */
+ dev->dma = 4; /* Native bus-master, no DMA channel needed. */
+ dev->irq = irq;
+ } else if (hp_builtin) {
+ static const char dma_tbl[4] = {3, 5, 6, 0};
+ static const char irq_tbl[4] = {3, 4, 5, 9};
+ unsigned char port_val = inb(hp_builtin);
+ dev->dma = dma_tbl[(port_val >> 4) & 3];
+ dev->irq = irq_tbl[(port_val >> 2) & 3];
+ printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (hpJ2405A) {
+ static const char dma_tbl[4] = {3, 5, 6, 7};
+ static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
+ short reset_val = inw(ioaddr+LANCE_RESET);
+ dev->dma = dma_tbl[(reset_val >> 2) & 3];
+ dev->irq = irq_tbl[(reset_val >> 4) & 7];
+ printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
+ short bus_info;
+ outw(8, ioaddr+LANCE_ADDR);
+ bus_info = inw(ioaddr+LANCE_BUS_IF);
+ dev->dma = bus_info & 0x07;
+ dev->irq = (bus_info >> 4) & 0x0F;
+ } else {
+ /* The DMA channel may be passed in PARAM1. */
+ if (dev->mem_start & 0x07)
+ dev->dma = dev->mem_start & 0x07;
+ }
+
+ if (dev->dma == 0) {
+ /* Read the DMA channel status register, so that we can avoid
+ stuck DMA channels in the DMA detection below. */
+ dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
+ (inb(DMA2_STAT_REG) & 0xf0);
+ }
+ if (dev->irq >= 2)
+ printk(" assigned IRQ %d", dev->irq);
+ else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
+ /* To auto-IRQ we enable the initialization-done and DMA error
+ interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ boards will work. */
+ autoirq_setup(0);
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ dev->irq = autoirq_report(2);
+ if (dev->irq)
+ printk(", probed IRQ %d", dev->irq);
+ else {
+ printk(", failed to detect IRQ line.\n");
+ return -ENODEV;
+ }
+
+ /* Check for the initialization done bit, 0x0100, which means
+ that we don't need a DMA channel. */
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ dev->dma = 4;
+ }
+
+ if (dev->dma == 4) {
+ printk(", no DMA needed.\n");
+ } else if (dev->dma) {
+ if (request_dma(dev->dma, chipname)) {
+ printk("DMA %d allocation failed.\n", dev->dma);
+ return -ENODEV;
+ } else
+ printk(", assigned DMA %d.\n", dev->dma);
+ } else { /* OK, we have to auto-DMA. */
+ for (i = 0; i < 4; i++) {
+ static const char dmas[] = { 5, 6, 7, 3 };
+ int dma = dmas[i];
+ int boguscnt;
+
+ /* Don't enable a permanently busy DMA channel, or the machine
+ will hang. */
+ if (test_bit(dma, &dma_channels))
+ continue;
+ outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
+ if (request_dma(dma, chipname))
+ continue;
+ set_dma_mode(dma, DMA_MODE_CASCADE);
+ enable_dma(dma);
+
+ /* Trigger an initialization. */
+ outw(0x0001, ioaddr+LANCE_DATA);
+ for (boguscnt = 100; boguscnt > 0; --boguscnt)
+ if (inw(ioaddr+LANCE_DATA) & 0x0900)
+ break;
+ if (inw(ioaddr+LANCE_DATA) & 0x0100) {
+ dev->dma = dma;
+ printk(", DMA %d.\n", dev->dma);
+ break;
+ } else {
+ disable_dma(dma);
+ free_dma(dma);
+ }
+ }
+ if (i == 4) { /* Failure: bail. */
+ printk("DMA detection failed.\n");
+ return -ENODEV;
+ }
+ }
+
+ if (lance_version == 0 && dev->irq == 0) {
+ /* We may auto-IRQ now that we have a DMA channel. */
+ /* Trigger an initialization just for the interrupt. */
+ autoirq_setup(0);
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ dev->irq = autoirq_report(4);
+ if (dev->irq == 0) {
+ printk(" Failed to detect the 7990 IRQ line.\n");
+ return -ENODEV;
+ }
+ printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
+ }
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* Turn on auto-select of media (10baseT or BNC) so that the user
+ can watch the LEDs even if the board isn't opened. */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Don't touch 10base2 power bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 0 && did_version++ == 0)
+ printk("%s", version);
+
+ /* The LANCE-specific entries in the device structure. */
+ dev->open = lance_open;
+ dev->hard_start_xmit = lance_start_xmit;
+ dev->stop = lance_close;
+ dev->get_stats = lance_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+
+ return 0;
+}
+
+static int
+lance_open_fail(struct device *dev)
+{
+ return -ENODEV;
+}
+
+
+
+static int
+lance_open(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
+ return -EAGAIN;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ /* We used to allocate DMA here, but that was silly.
+ DMA lines can't be shared! We now permanently allocate them. */
+
+ /* Reset the LANCE */
+ inw(ioaddr+LANCE_RESET);
+
+ /* The DMA controller is used as a no-operation slave, "cascade mode". */
+ if (dev->dma != 4) {
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ }
+
+ /* Un-Reset the LANCE, needed only for the NE2100. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
+ outw(0, ioaddr+LANCE_RESET);
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Only touch autoselect bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 1)
+ printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq, dev->dma,
+ (u32) virt_to_bus(lp->tx_ring),
+ (u32) virt_to_bus(lp->rx_ring),
+ (u32) virt_to_bus(&lp->init_block));
+
+ lance_init_ring(dev, GFP_KERNEL);
+ /* Re-initialize the LANCE, and start it when done. */
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ outw((short) (u32) virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ outw(((u32)virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+
+ outw(0x0004, ioaddr+LANCE_ADDR);
+ outw(0x0915, ioaddr+LANCE_DATA);
+
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0001, ioaddr+LANCE_DATA);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ outw(0x0042, ioaddr+LANCE_DATA);
+
+ if (lance_debug > 2)
+ printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
+
+ return 0; /* Always succeed */
+}
+
+/* The LANCE has been halted for one reason or another (busmaster memory
+ arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ etc.). Modern LANCE variants always reload their ring-buffer
+ configuration when restarted, so we must reinitialize our ring
+ context before restarting. As part of this reinitialization,
+ find all packets still on the Tx ring and pretend that they had been
+ sent (in effect, drop the packets on the floor) - the higher-level
+ protocols will time out and retransmit. It'd be better to shuffle
+ these skbs to a temp list and then actually re-Tx them after
+ restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+*/
+
+static void
+lance_purge_tx_ring(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ dev_kfree_skb(lp->tx_skbuff[i],FREE_WRITE);
+ lp->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+static void
+lance_init_ring(struct device *dev, int gfp)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int i;
+
+ lp->lock = 0, lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ void *rx_buff;
+
+ skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
+ lp->rx_skbuff[i] = skb;
+ if (skb) {
+ skb->dev = dev;
+ rx_buff = skb->tail;
+ } else
+ rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
+ if (rx_buff == NULL)
+ lp->rx_ring[i].base = 0;
+ else
+ lp->rx_ring[i].base = (u32)virt_to_bus(rx_buff) | 0x80000000;
+ lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_skbuff[i] = 0;
+ lp->tx_ring[i].base = 0;
+ }
+
+ lp->init_block.mode = 0x0000;
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+}
+
+static void
+lance_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+
+ if (must_reinit ||
+ (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
+ lance_purge_tx_ring(dev);
+ lance_init_ring(dev, GFP_ATOMIC);
+ }
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(csr0_bits, dev->base_addr + LANCE_DATA);
+}
+
+static int
+lance_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int entry;
+ unsigned long flags;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 20)
+ return 1;
+ outw(0, ioaddr+LANCE_ADDR);
+ printk("%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, inw(ioaddr+LANCE_DATA));
+ outw(0x0004, ioaddr+LANCE_DATA);
+ lp->stats.tx_errors++;
+#ifndef final_version
+ {
+ int i;
+ printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0 ; i < RX_RING_SIZE; i++)
+ printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length);
+ for (i = 0 ; i < TX_RING_SIZE; i++)
+ printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc);
+ printk("\n");
+ }
+#endif
+ lance_restart(dev, 0x0043, 1);
+
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+
+ return 0;
+ }
+
+ if (lance_debug > 3) {
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
+ inw(ioaddr+LANCE_DATA));
+ outw(0x0000, ioaddr+LANCE_DATA);
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ if (set_bit(0, (void*)&lp->lock) != 0) {
+ if (lance_debug > 0)
+ printk("%s: tx queue lock!.\n", dev->name);
+ /* don't clear dev->tbusy flag. */
+ return 1;
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
+ lp->tx_ring[entry].length =
+ -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
+ } else
+ lp->tx_ring[entry].length = -skb->len;
+
+ lp->tx_ring[entry].misc = 0x0000;
+
+ /* If any part of this buffer is >16M we must copy it to a low-memory
+ buffer. */
+ if ((u32)virt_to_bus(skb->data) + skb->len > 0x01000000) {
+ if (lance_debug > 5)
+ printk("%s: bouncing a high-memory packet (%#x).\n",
+ dev->name, (u32)virt_to_bus(skb->data));
+ memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
+ lp->tx_ring[entry].base =
+ ((u32)virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
+ dev_kfree_skb (skb, FREE_WRITE);
+ } else {
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = ((u32)virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
+ }
+ lp->cur_tx++;
+
+ /* Trigger an immediate send poll. */
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0048, ioaddr+LANCE_DATA);
+
+ dev->trans_start = jiffies;
+
+ save_flags(flags);
+ cli();
+ lp->lock = 0;
+ if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
+ dev->tbusy=0;
+ else
+ lp->tx_full = 1;
+ restore_flags(flags);
+
+ return 0;
+}
+
+/* The LANCE interrupt handler. */
+static void
+lance_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)dev_id;
+ struct lance_private *lp;
+ int csr0, ioaddr, boguscnt=10;
+ int must_restart;
+
+ if (dev == NULL) {
+ printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct lance_private *)dev->priv;
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ outw(0x00, dev->base_addr + LANCE_ADDR);
+ while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
+ && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
+
+ must_restart = 0;
+
+ if (lance_debug > 5)
+ printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ lance_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = lp->tx_ring[entry].base;
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x40000000) {
+ /* There was an major error, log it. */
+ int err_status = lp->tx_ring[entry].misc;
+ lp->stats.tx_errors++;
+ if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
+ if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
+ if (err_status & 0x1000) lp->stats.tx_window_errors++;
+ if (err_status & 0x4000) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ lp->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk("%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+ } else {
+ if (status & 0x18000000)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb if it's not a data-only copy
+ in the bounce buffer. */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && dev->tbusy
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & 0x0800) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x0004, dev->base_addr + LANCE_DATA);
+ lance_restart(dev, 0x0002, 0);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x7940, dev->base_addr + LANCE_DATA);
+
+ if (lance_debug > 4)
+ printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
+ dev->name, inw(ioaddr + LANCE_ADDR),
+ inw(dev->base_addr + LANCE_DATA));
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+lance_rx(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (lp->rx_ring[entry].base >= 0) {
+ int status = lp->rx_ring[entry].base >> 24;
+
+ if (status != 0x03) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20) lp->stats.rx_frame_errors++;
+ if (status & 0x10) lp->stats.rx_over_errors++;
+ if (status & 0x08) lp->stats.rx_crc_errors++;
+ if (status & 0x04) lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].base &= 0x03ffffff;
+ }
+ else
+ {
+ /* Malloc up new buffer, compatible with net3. */
+ short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len<60)
+ {
+ printk("%s: Runt packet!\n",dev->name);
+ lp->stats.rx_errors++;
+ }
+ else
+ {
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ for (i=0; i < RX_RING_SIZE; i++)
+ if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2)
+ {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].base |= 0x80000000;
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
+ pkt_len,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ }
+ /* The docs say that the buffer length isn't touched, but Andrew Boyd
+ of QNX reports that some revs of the 79C965 clear it. */
+ lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
+ lp->rx_ring[entry].base |= 0x80000000;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+static int
+lance_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int i;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ outw(112, ioaddr+LANCE_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ }
+ outw(0, ioaddr+LANCE_ADDR);
+
+ if (lance_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(ioaddr+LANCE_DATA));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ outw(0x0004, ioaddr+LANCE_DATA);
+
+ if (dev->dma != 4)
+ disable_dma(dev->dma);
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = lp->rx_skbuff[i];
+ lp->rx_skbuff[i] = 0;
+ lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
+ if (skb) {
+ skb->free = 1;
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i])
+ dev_kfree_skb(lp->tx_skbuff[i], FREE_WRITE);
+ lp->tx_skbuff[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static struct enet_statistics *
+lance_get_stats(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ short ioaddr = dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ save_flags(flags);
+ cli();
+ saved_addr = inw(ioaddr+LANCE_ADDR);
+ outw(112, ioaddr+LANCE_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ outw(saved_addr, ioaddr+LANCE_ADDR);
+ restore_flags(flags);
+ }
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outw(0, ioaddr+LANCE_ADDR);
+ outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int i;
+ int num_addrs=dev->mc_count;
+ if(dev->flags&IFF_ALLMULTI)
+ num_addrs=1;
+ /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ outw(8 + i, ioaddr+LANCE_ADDR);
+ outw(multicast_table[i], ioaddr+LANCE_DATA);
+ }
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
+ }
+
+ lance_restart(dev, 0x0142, 0); /* Resume normal operation */
+
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/myson803.c b/linux/src/drivers/net/myson803.c
new file mode 100644
index 0000000..545d124
--- /dev/null
+++ b/linux/src/drivers/net/myson803.c
@@ -0,0 +1,1650 @@
+/* myson803.c: A Linux device driver for the Myson mtd803 Ethernet chip. */
+/*
+ Written 1998-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/myson803.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"myson803.c:v1.05 3/10/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/drivers.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: myson803_probe
+config-in: tristate 'Myson MTD803 series Ethernet support' CONFIG_MYSON_ETHER
+
+c-help-name: Myson MTD803 PCI Ethernet support
+c-help-symbol: CONFIG_MYSON_ETHER
+c-help: This driver is for the Myson MTD803 Ethernet adapter series.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/drivers.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 40;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip uses a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit Tx ring entries actually used. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+/* Kernels before 2.1.0 cannot map the high addrs assigned by some BIOSes. */
+#if (LINUX_VERSION_CODE < 0x20100) || ! defined(MODULE)
+#define USE_IO_OPS
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Myson mtd803 Ethernet driver");
+MODULE_LICENSE("GPL");
+/* List in order of common use. */
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(full_duplex, "Non-zero to force full duplex, "
+ "non-negotiated link (deprecated).");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Maximum events handled per interrupt");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Myson mtd803 chip.
+It should work with other Myson 800 series chips.
+
+II. Board-specific settings
+
+None.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+Some chips explicitly use only 2^N sized rings, while others use a
+'next descriptor' pointer that the driver forms into rings.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+When unaligned buffers are permitted by the hardware (and always on copies)
+frames are put into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IIId. SMP semantics
+
+The following are serialized with respect to each other via the "xmit_lock".
+ dev->hard_start_xmit() Transmit a packet
+ dev->tx_timeout() Transmit watchdog for stuck Tx
+ dev->set_multicast_list() Set the recieve filter.
+Note: The Tx timeout watchdog code is implemented by the timer routine in
+kernels up to 2.2.*. In 2.4.* and later the timeout code is part of the
+driver interface.
+
+The following fall under the global kernel lock. The module will not be
+unloaded during the call, unless a call with a potential reschedule e.g.
+kmalloc() is called. No other synchronization assertion is made.
+ dev->open()
+ dev->do_ioctl()
+ dev->get_stats()
+Caution: The lock for dev->open() is commonly broken with request_irq() or
+kmalloc(). It is best to avoid any lock-breaking call in do_ioctl() and
+get_stats(), or additional module locking code must be implemented.
+
+The following is self-serialized (no simultaneous entry)
+ An handler registered with request_irq().
+
+IV. Notes
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://scyld.com/expert/NWay.html
+http://www.myson.com.hk/mtd/datasheet/mtd803.pdf
+ Myson does not require a NDA to read the datasheet.
+
+IVc. Errata
+
+No undocumented errata.
+*/
+
+
+
+/* PCI probe routines. */
+
+static void *myson_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int netdev_pwr_event(void *dev_instance, int event);
+
+/* Chips prior to the 803 have an external MII transceiver. */
+enum chip_capability_flags { HasMIIXcvr=1, HasChipXcvr=2 };
+
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#define PCI_IOSIZE 256
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#define PCI_IOSIZE 1024
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Myson mtd803 Fast Ethernet", {0x08031516, 0xffffffff, },
+ PCI_IOTYPE, PCI_IOSIZE, HasChipXcvr},
+ {"Myson mtd891 Gigabit Ethernet", {0x08911516, 0xffffffff, },
+ PCI_IOTYPE, PCI_IOSIZE, HasChipXcvr},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info myson803_drv_id = {
+ "myson803", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl, myson_probe1,
+ netdev_pwr_event };
+
+/* This driver was written to use PCI memory space, however x86-oriented
+ hardware sometimes works only with I/O space accesses. */
+#ifdef USE_IO_OPS
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+/* Offsets to the various registers.
+ Most accesses must be longword aligned. */
+enum register_offsets {
+ StationAddr=0x00, MulticastFilter0=0x08, MulticastFilter1=0x0C,
+ FlowCtrlAddr=0x10, RxConfig=0x18, TxConfig=0x1a, PCIBusCfg=0x1c,
+ TxStartDemand=0x20, RxStartDemand=0x24,
+ RxCurrentPtr=0x28, TxRingPtr=0x2c, RxRingPtr=0x30,
+ IntrStatus=0x34, IntrEnable=0x38,
+ FlowCtrlThreshold=0x3c,
+ MIICtrl=0x40, EECtrl=0x40, RxErrCnts=0x44, TxErrCnts=0x48,
+ PHYMgmt=0x4c,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxErr=0x0002, IntrRxDone=0x0004, IntrTxDone=0x0008,
+ IntrTxEmpty=0x0010, IntrRxEmpty=0x0020, StatsMax=0x0040, RxEarly=0x0080,
+ TxEarly=0x0100, RxOverflow=0x0200, TxUnderrun=0x0400,
+ IntrPCIErr=0x2000, NWayDone=0x4000, LinkChange=0x8000,
+};
+
+/* Bits in the RxMode (np->txrx_config) register. */
+enum rx_mode_bits {
+ RxEnable=0x01, RxFilter=0xfe,
+ AcceptErr=0x02, AcceptRunt=0x08, AcceptBroadcast=0x40,
+ AcceptMulticast=0x20, AcceptAllPhys=0x80, AcceptMyPhys=0x00,
+ RxFlowCtrl=0x2000,
+ TxEnable=0x40000, TxModeFDX=0x00100000, TxThreshold=0x00e00000,
+};
+
+/* Misc. bits. */
+enum misc_bits {
+ BCR_Reset=1, /* PCIBusCfg */
+ TxThresholdInc=0x200000,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+ u32 status;
+ u32 ctrl_length;
+ u32 buf_addr;
+ u32 next_desc;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000,
+ RxDescStartPacket=0x0800, RxDescEndPacket=0x0400, RxDescWholePkt=0x0c00,
+ RxDescErrSum=0x80, RxErrRunt=0x40, RxErrLong=0x20, RxErrFrame=0x10,
+ RxErrCRC=0x08, RxErrCode=0x04,
+ TxErrAbort=0x2000, TxErrCarrier=0x1000, TxErrLate=0x0800,
+ TxErr16Colls=0x0400, TxErrDefer=0x0200, TxErrHeartbeat=0x0100,
+ TxColls=0x00ff,
+};
+/* Bits in network_desc.ctrl_length */
+enum ctrl_length_bits {
+ TxIntrOnDone=0x80000000, TxIntrOnFIFO=0x40000000,
+ TxDescEndPacket=0x20000000, TxDescStartPacket=0x10000000,
+ TxAppendCRC=0x08000000, TxPadTo64=0x04000000, TxNormalPkt=0x3C000000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc rx_ring[RX_RING_SIZE];
+ struct netdev_desc tx_ring[TX_RING_SIZE];
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int msg_level;
+ int max_interrupt_work;
+ int intr_enable;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+
+ struct netdev_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int rx_died:1;
+ unsigned int txrx_config;
+
+ /* These values keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+
+ unsigned int mcast_filter[2];
+ int multicast_filter_limit;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+static int eeprom_read(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id,
+ unsigned int location);
+static void mdio_write(struct net_device *dev, int phy_id,
+ unsigned int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int myson803_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&myson803_drv_id, dev) < 0)
+ return -ENODEV;
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *myson_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i + 8));
+ if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
+ /* Fill a temp addr with the "locally administered" bit set. */
+ memcpy(dev->dev_addr, ">Linux", 6);
+ }
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+#if ! defined(final_version) /* Dump the EEPROM contents during development. */
+ if (debug > 4)
+ for (i = 0; i < 0x40; i++)
+ printk("%4.4x%s",
+ eeprom_read(ioaddr, i), i % 16 != 15 ? " " : "\n");
+#endif
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* Do bogusness checks before this point.
+ We do a request_region() only to register /proc/ioports info. */
+#ifdef USE_IO_OPS
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+#endif
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(BCR_Reset, ioaddr + PCIBusCfg);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ if (np->drv_flags & HasMIIXcvr) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+ if (np->drv_flags & HasChipXcvr) {
+ np->phys[np->mii_cnt++] = 32;
+ printk(KERN_INFO "%s: Internal PHY status 0x%4.4x"
+ " advertising %4.4x.\n",
+ dev->name, mdio_read(dev, 32, 1), mdio_read(dev, 32, 4));
+ }
+ /* Allow forcing the media type. */
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ if (option & 0x220)
+ np->full_duplex = 1;
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ if (np->mii_cnt)
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+
+ return dev;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
+ often serial bit streams generated by the host processor.
+ The example below is for the common 93c46 EEPROM, 64 16 bit words. */
+
+/* This "delay" forces out buffered PCI writes.
+ The udelay() is unreliable for timing, but some Myson NICs shipped with
+ absurdly slow EEPROMs.
+ */
+#define eeprom_delay(ee_addr) readl(ee_addr); udelay(2); readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x04<<16, EE_ChipSelect=0x88<<16,
+ EE_DataOut=0x02<<16, EE_DataIn=0x01<<16,
+ EE_Write0=0x88<<16, EE_Write1=0x8a<<16,
+};
+
+/* The EEPROM commands always start with 01.. preamble bits.
+ Commands are prepended to the variable-length address. */
+enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
+
+static int eeprom_read(long addr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = addr + EECtrl;
+ int read_cmd = location | (EE_ReadCmd<<6);
+
+ writel(EE_ChipSelect, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ writel(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ writel(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_ChipSelect, ee_addr);
+ writel(0, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz.
+ The timing is decoupled from the processor clock by flushing the write
+ from the CPU write buffer with a following read, and using PCI
+ transaction timing. */
+#define mdio_in(mdio_addr) readl(mdio_addr)
+#define mdio_out(value, mdio_addr) writel(value, mdio_addr)
+#define mdio_delay(mdio_addr) readl(mdio_addr)
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with older tranceivers, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
+};
+#define MDIO_EnbIn (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ mdio_out(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, unsigned int location)
+{
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (location >= 32)
+ return 0xffff;
+ if (phy_id >= 32) {
+ if (location < 6)
+ return readw(ioaddr + PHYMgmt + location*2);
+ else if (location == 16)
+ return readw(ioaddr + PHYMgmt + 6*2);
+ else if (location == 17)
+ return readw(ioaddr + PHYMgmt + 7*2);
+ else if (location == 18)
+ return readw(ioaddr + PHYMgmt + 10*2);
+ else
+ return 0;
+ }
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_Data) ? 1 : 0);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id,
+ unsigned int location, int value)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (location == 4 && phy_id == np->phys[0])
+ np->advertising = value;
+ else if (location >= 32)
+ return;
+
+ if (phy_id == 32) {
+ if (location < 6)
+ writew(value, ioaddr + PHYMgmt + location*2);
+ else if (location == 16)
+ writew(value, ioaddr + PHYMgmt + 6*2);
+ else if (location == 17)
+ writew(value, ioaddr + PHYMgmt + 7*2);
+ return;
+ }
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Some chips may need to be reset. */
+
+ MOD_INC_USE_COUNT;
+
+ writel(~0, ioaddr + IntrStatus);
+
+ /* Note that both request_irq() and init_ring() call kmalloc(), which
+ break the global kernel lock protecting this routine. */
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ /* Address register must be written as words. */
+ writel(cpu_to_le32(cpu_to_le32(get_unaligned((u32 *)dev->dev_addr))),
+ ioaddr + StationAddr);
+ writel(cpu_to_le16(cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)))),
+ ioaddr + StationAddr + 4);
+ /* Set the flow control address, 01:80:c2:00:00:01. */
+ writel(0x00c28001, ioaddr + FlowCtrlAddr);
+ writel(0x00000100, ioaddr + FlowCtrlAddr + 4);
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+ writel(0x01f8, ioaddr + PCIBusCfg);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ np->txrx_config = TxEnable | RxEnable | RxFlowCtrl | 0x00600000;
+ np->mcast_filter[0] = np->mcast_filter[1] = 0;
+ np->rx_died = 0;
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ np->intr_enable = IntrRxDone | IntrRxErr | IntrRxEmpty | IntrTxDone
+ | IntrTxEmpty | StatsMax | RxOverflow | TxUnderrun | IntrPCIErr
+ | NWayDone | LinkChange;
+ writel(np->intr_enable, ioaddr + IntrEnable);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), PHY status: %x %x.\n",
+ dev->name, (int)readw(ioaddr + PHYMgmt),
+ (int)readw(ioaddr + PHYMgmt + 2));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int new_tx_mode = np->txrx_config;
+
+ if (np->medialock) {
+ } else {
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
+ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->duplex_lock || mii_reg5 == 0xffff)
+ return;
+ if (duplex)
+ new_tx_mode |= TxModeFDX;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
+ " negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ }
+ }
+ if (np->txrx_config != new_tx_mode)
+ writel(new_tx_mode, ioaddr + RxConfig);
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
+ dev->name, (int)readw(ioaddr + PHYMgmt + 10));
+ }
+ /* This will either have a small false-trigger window or will not catch
+ tbusy incorrectly set when the queue is empty. */
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ tx_timeout(dev);
+ }
+ /* It's dead Jim, no race condition. */
+ if (np->rx_died)
+ netdev_rx(dev);
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", np->tx_ring[i].status);
+ printk("\n");
+ }
+
+ /* Stop and restart the chip's Tx processes . */
+ writel(np->txrx_config & ~TxEnable, ioaddr + RxConfig);
+ writel(virt_to_bus(np->tx_ring + (np->dirty_tx%TX_RING_SIZE)),
+ ioaddr + TxRingPtr);
+ writel(np->txrx_config, ioaddr + RxConfig);
+ /* Trigger an immediate transmit demand. */
+ writel(0, dev->base_addr + TxStartDemand);
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 4);
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].ctrl_length = cpu_to_le32(np->rx_buf_sz);
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
+ np->rx_ring[i].status = cpu_to_le32(DescOwn);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].status = 0;
+ np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
+ }
+ np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
+ np->tx_ring[entry].ctrl_length =
+ cpu_to_le32(TxIntrOnDone | TxNormalPkt | (skb->len << 11) | skb->len);
+ np->tx_ring[entry].status = cpu_to_le32(DescOwn);
+ np->cur_tx++;
+
+ /* On some architectures: explicitly flushing cache lines here speeds
+ operation. */
+
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_QUEUE_LEN - 2) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ /* Wake the potentially-idle transmit channel. */
+ writel(0, dev->base_addr + TxStartDemand);
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int boguscnt;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+ "device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ boguscnt = np->max_interrupt_work;
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#endif
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ writel(intr_status, ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ if (intr_status & IntrRxDone)
+ netdev_rx(dev);
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ int tx_status = le32_to_cpu(np->tx_ring[entry].status);
+ if (tx_status & DescOwn)
+ break;
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ if (tx_status & (TxErrAbort | TxErrCarrier | TxErrLate
+ | TxErr16Colls | TxErrHeartbeat)) {
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ np->stats.tx_errors++;
+ if (tx_status & TxErrCarrier) np->stats.tx_carrier_errors++;
+ if (tx_status & TxErrLate) np->stats.tx_window_errors++;
+ if (tx_status & TxErrHeartbeat) np->stats.tx_heartbeat_errors++;
+#ifdef ETHER_STATS
+ if (tx_status & TxErr16Colls) np->stats.collisions16++;
+ if (tx_status & TxErrAbort) np->stats.tx_aborted_errors++;
+#else
+ if (tx_status & (TxErr16Colls|TxErrAbort))
+ np->stats.tx_aborted_errors++;
+#endif
+ } else {
+ np->stats.tx_packets++;
+ np->stats.collisions += tx_status & TxColls;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+#ifdef ETHER_STATS
+ if (tx_status & TxErrDefer) np->stats.tx_deferred++;
+#endif
+ }
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ /* Note the 4 slot hysteresis to mark the queue non-full. */
+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrRxErr | IntrRxEmpty | StatsMax | RxOverflow
+ | TxUnderrun | IntrPCIErr | NWayDone | LinkChange))
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ clear_bit(0, (void*)&dev->interrupt);
+#endif
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+ int refilled = 0;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+ entry, np->rx_ring[entry].status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ( ! (np->rx_head_desc->status & cpu_to_le32(DescOwn))) {
+ struct netdev_desc *desc = np->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->status);
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ((desc_status & RxDescWholePkt) != RxDescWholePkt) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %4.4x!\n",
+ dev->name, np->cur_rx, desc_status >> 16, desc_status);
+ np->stats.rx_length_errors++;
+ } else if (desc_status & RxDescErrSum) {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & (RxErrLong|RxErrRunt))
+ np->stats.rx_length_errors++;
+ if (desc_status & (RxErrFrame|RxErrCode))
+ np->stats.rx_frame_errors++;
+ if (desc_status & RxErrCRC)
+ np->stats.rx_crc_errors++;
+ } else {
+ struct sk_buff *skb;
+ /* Reported length should omit the CRC. */
+ u16 pkt_len = ((desc_status >> 16) & 0xfff) - 4;
+
+#ifndef final_version
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, pkt_len, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (np->msg_level & NETIF_MSG_PKTDATA)
+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+ "%d.%d.%d.%d.\n",
+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+ skb->data[8], skb->data[9], skb->data[10],
+ skb->data[11], skb->data[12], skb->data[13],
+ skb->data[14], skb->data[15], skb->data[16],
+ skb->data[17]);
+#endif
+ skb->mac.raw = skb->data;
+ /* Protocol lookup disabled until verified with all kernels. */
+ if (0 && ntohs(skb->mac.ethernet->h_proto) >= 0x0800) {
+ struct ethhdr *eth = skb->mac.ethernet;
+ skb->protocol = eth->h_proto;
+ if (desc_status & 0x1000) {
+ if ((dev->flags & IFF_PROMISC) &&
+ memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
+ skb->pkt_type = PACKET_OTHERHOST;
+ } else if (desc_status & 0x2000)
+ skb->pkt_type = PACKET_BROADCAST;
+ else if (desc_status & 0x4000)
+ skb->pkt_type = PACKET_MULTICAST;
+ } else
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
+ }
+ np->rx_ring[entry].ctrl_length = cpu_to_le32(np->rx_buf_sz);
+ np->rx_ring[entry].status = cpu_to_le32(DescOwn);
+ refilled++;
+ }
+
+ /* Restart Rx engine if stopped. */
+ if (refilled) { /* Perhaps "&& np->rx_died" */
+ writel(0, dev->base_addr + RxStartDemand);
+ np->rx_died = 0;
+ }
+ return refilled;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (intr_status & (LinkChange | NWayDone)) {
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+ " %4.4x partner %4.4x.\n", dev->name,
+ mdio_read(dev, np->phys[0], 4),
+ mdio_read(dev, np->phys[0], 5));
+ /* Clear sticky bit first. */
+ readw(ioaddr + PHYMgmt + 2);
+ if (readw(ioaddr + PHYMgmt + 2) & 0x0004)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ check_duplex(dev);
+ }
+ if ((intr_status & TxUnderrun)
+ && (np->txrx_config & TxThreshold) != TxThreshold) {
+ np->txrx_config += TxThresholdInc;
+ writel(np->txrx_config, ioaddr + RxConfig);
+ np->stats.tx_fifo_errors++;
+ }
+ if (intr_status & IntrRxEmpty) {
+ printk(KERN_WARNING "%s: Out of receive buffers: no free memory.\n",
+ dev->name);
+ /* Refill Rx descriptors */
+ np->rx_died = 1;
+ netdev_rx(dev);
+ }
+ if (intr_status & RxOverflow) {
+ printk(KERN_WARNING "%s: Receiver overflow.\n", dev->name);
+ np->stats.rx_over_errors++;
+ netdev_rx(dev); /* Refill Rx descriptors */
+ get_stats(dev); /* Empty dropped counter. */
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if ((intr_status & ~(LinkChange|NWayDone|StatsMax|TxUnderrun|RxOverflow
+ |TxEarly|RxEarly|0x001e))
+ && (np->msg_level & NETIF_MSG_DRV))
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrPCIErr) {
+ const char *const pcierr[4] =
+ { "Parity Error", "Master Abort", "Target Abort", "Unknown Error" };
+ if (np->msg_level & NETIF_MSG_DRV)
+ printk(KERN_WARNING "%s: PCI Bus %s, %x.\n",
+ dev->name, pcierr[(intr_status>>11) & 3], intr_status);
+ }
+}
+
+/* We do not bother to spinlock statistics.
+ A window only exists if we have non-atomic adds, the error counts are
+ typically zero, and statistics are non-critical. */
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned int rxerrs = readl(ioaddr + RxErrCnts);
+ unsigned int txerrs = readl(ioaddr + TxErrCnts);
+
+ /* The chip only need report frames silently dropped. */
+ np->stats.rx_crc_errors += rxerrs >> 16;
+ np->stats.rx_missed_errors += rxerrs & 0xffff;
+
+ /* These stats are required when the descriptor is closed before Tx. */
+ np->stats.tx_aborted_errors += txerrs >> 24;
+ np->stats.tx_window_errors += (txerrs >> 16) & 0xff;
+ np->stats.collisions += txerrs & 0xffff;
+
+ return &np->stats;
+}
+
+/* Big-endian AUTODIN II ethernet CRC calculations.
+ This is slow but compact code. Do not use this routine for bulk data,
+ use a table-based routine instead.
+ This is common code and may be in the kernel with Linux 2.5+.
+*/
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ u32 crc = ~0;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ mc_filter[1] = mc_filter[0] = ~0;
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
+ | AcceptMyPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ mc_filter[1] = mc_filter[0] = ~0;
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) & 0x3f,
+ mc_filter);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ }
+ if (mc_filter[0] != np->mcast_filter[0] ||
+ mc_filter[1] != np->mcast_filter[1]) {
+ writel(mc_filter[0], ioaddr + MulticastFilter0);
+ writel(mc_filter[1], ioaddr + MulticastFilter1);
+ np->mcast_filter[0] = mc_filter[0];
+ np->mcast_filter[1] = mc_filter[1];
+ }
+ if ((np->txrx_config & RxFilter) != rx_mode) {
+ np->txrx_config &= ~RxFilter;
+ np->txrx_config |= rx_mode;
+ writel(np->txrx_config, ioaddr + RxConfig);
+ }
+}
+
+/*
+ Handle user-level ioctl() calls.
+ We must use two numeric constants as the key because some clueless person
+ changed the value for the symbolic name.
+*/
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0];
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0], data[1]);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(dev, data[0], data[1], data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x.\n",
+ dev->name, (int)readl(ioaddr + RxConfig));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ np->txrx_config = 0;
+ writel(0, ioaddr + RxConfig);
+
+ del_timer(&np->timer);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. %x %x %8.8x.\n",
+ i, np->tx_ring[i].status, np->tx_ring[i].ctrl_length,
+ np->tx_ring[i].buf_addr);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].status, np->rx_ring[i].ctrl_length,
+ np->rx_ring[i].buf_addr);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int netdev_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, stop Tx and Rx. */
+ writel(0, ioaddr + IntrEnable);
+ writel(0, ioaddr + RxConfig);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the actions are very chip specific. */
+ set_rx_mode(dev);
+ writel(np->intr_enable, ioaddr + IntrEnable);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&myson803_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&myson803_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+#ifdef USE_IO_OPS
+ release_region(root_net_dev->base_addr,
+ pci_id_tbl[np->chip_id].io_size);
+#else
+ iounmap((char *)(root_net_dev->base_addr));
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` myson803.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c myson803.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c myson803.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/natsemi.c b/linux/src/drivers/net/natsemi.c
new file mode 100644
index 0000000..0d98bea
--- /dev/null
+++ b/linux/src/drivers/net/natsemi.c
@@ -0,0 +1,1448 @@
+/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP83810 series. */
+/*
+ Written/copyright 1999-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL. License for under other terms may be
+ available. Contact the original author for details.
+
+ The original author may be reached as becker@scyld.com, or at
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/natsemi.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"natsemi.c:v1.17a 8/09/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/natsemi.html\n";
+/* Updated to recommendations in pci-skeleton v2.11. */
+
+/* Automatically extracted configuration info:
+probe-func: natsemi_probe
+config-in: tristate 'National Semiconductor DP8381x series PCI Ethernet support' CONFIG_NATSEMI
+
+c-help-name: National Semiconductor DP8381x series PCI Ethernet support
+c-help-symbol: CONFIG_NATSEMI
+c-help: This driver is for the National Semiconductor DP83810 series,
+c-help: including the 83815 chip.
+c-help: Usage information and updates are available from
+c-help: http://www.scyld.com/network/natsemi.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip uses a 512 element hash table based on the Ethernet CRC.
+ Some chip versions are reported to have unreliable multicast filter
+ circuitry. To work around an observed problem set this value to '0',
+ which will immediately switch to Rx-all-multicast.
+*/
+static int multicast_filter_limit = 100;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature.
+ This chip can only receive into aligned buffers, so architectures such
+ as the Alpha AXP might benefit from a copy-align.
+*/
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability, however setting full_duplex[] is deprecated.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ Understand the implications before changing these settings!
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ Too-large receive rings waste memory and confound network buffer limits. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung.
+ Re-autonegotiation may take up to 3 seconds.
+ */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("National Semiconductor DP83810 series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to force full duplex, non-negotiated link "
+ "(deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
+It also works with other chips in in the DP83810 series.
+The most common board is the Netgear FA311 using the 83815.
+
+II. Board-specific settings
+
+This driver requires the PCI interrupt line to be valid.
+It honors the EEPROM-set values.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+The NatSemi design uses a 'next descriptor' pointer that the driver forms
+into a list, thus rings can be arbitrarily sized. Before changing the
+ring sizes you should understand the flow and cache effects of the
+full/available/empty hysteresis.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that unaligned buffers are not permitted
+by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. On copies frames are put into the
+skbuff at an offset of "+2", 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+The older dp83810 chips are so uncommon that support is not relevant.
+No NatSemi datasheet was publically available at the initial release date,
+but the dp83815 has now been published.
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+
+IVc. Errata
+
+Qustionable multicast filter implementation.
+The EEPROM format is obviously the result of a chip bug.
+*/
+
+
+
+static void *natsemi_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int power_event(void *dev_instance, int event);
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Netgear FA311 (NatSemi DP83815)",
+ { 0x0020100B, 0xffffffff, 0xf3111385, 0xffffffff, },
+ PCI_IOTYPE, 256, 0},
+ {"NatSemi DP83815", { 0x0020100B, 0xffffffff },
+ PCI_IOTYPE, 256, 0},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info natsemi_drv_id = {
+ "natsemi", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ natsemi_probe1, power_event };
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. Please do not change these names without good reason.
+*/
+enum register_offsets {
+ ChipCmd=0x00, ChipConfig=0x04, EECtrl=0x08, PCIBusCfg=0x0C,
+ IntrStatus=0x10, IntrMask=0x14, IntrEnable=0x18,
+ TxRingPtr=0x20, TxConfig=0x24,
+ RxRingPtr=0x30, RxConfig=0x34, ClkRunCtrl=0x3C,
+ WOLCmd=0x40, PauseCmd=0x44, RxFilterAddr=0x48, RxFilterData=0x4C,
+ BootRomAddr=0x50, BootRomData=0x54, ChipRevReg=0x58,
+ StatsCtrl=0x5C, StatsData=0x60,
+ RxPktErrs=0x60, RxMissed=0x68, RxCRCErrs=0x64,
+ NS_Xcvr_Mgmt = 0x80, NS_MII_BMCR=0x80, NS_MII_BMSR=0x84,
+ NS_MII_Advert=0x90, NS_MIILinkPartner=0x94,
+};
+
+/* Bits in ChipCmd. */
+enum ChipCmdBits {
+ ChipReset=0x100, SoftIntr=0x80, RxReset=0x20, TxReset=0x10,
+ RxOff=0x08, RxOn=0x04, TxOff=0x02, TxOn=0x01,
+};
+
+/* Bits in ChipConfig. */
+enum ChipConfigBits {
+ CfgLinkGood=0x80000000, CfgFDX=0x20000000,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x0001, IntrRxIntr=0x0002, IntrRxErr=0x0004, IntrRxEarly=0x0008,
+ IntrRxIdle=0x0010, IntrRxOverrun=0x0020,
+ IntrTxDone=0x0040, IntrTxIntr=0x0080, IntrTxErr=0x0100,
+ IntrTxIdle=0x0200, IntrTxUnderrun=0x0400,
+ StatsMax=0x0800, IntrDrv=0x1000, WOLPkt=0x2000, LinkChange=0x4000,
+ RxStatusOverrun=0x10000,
+ RxResetDone=0x1000000, TxResetDone=0x2000000,
+ IntrPCIErr=0x00f00000,
+ IntrNormalSummary=0x0251, IntrAbnormalSummary=0xED20,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+ AcceptErr=0x20, AcceptRunt=0x10,
+ AcceptBroadcast=0xC0000000,
+ AcceptMulticast=0x00200000, AcceptAllMulticast=0x20000000,
+ AcceptAllPhys=0x10000000, AcceptMyPhys=0x08000000,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+ u32 next_desc;
+ s32 cmd_status;
+ u32 buf_addr;
+ u32 software_use;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
+ DescNoCRC=0x10000000,
+ DescPktOK=0x08000000, RxTooLong=0x00400000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc rx_ring[RX_RING_SIZE];
+ struct netdev_desc tx_ring[TX_RING_SIZE];
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ long in_interrupt; /* Word-long for SMP locks. */
+ int max_interrupt_work;
+ int intr_enable;
+ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
+ unsigned int rx_q_empty:1; /* Set out-of-skbuffs. */
+
+ struct netdev_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ /* These values keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* Rx filter. */
+ u32 cur_rx_mode;
+ u16 rx_filter[32];
+ int multicast_filter_limit;
+ /* FIFO and PCI burst thresholds. */
+ int tx_config, rx_config;
+ /* MII transceiver section. */
+ u16 advertising; /* NWay media advertisement */
+};
+
+static int eeprom_read(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static int rx_ring_fill(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int natsemi_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&natsemi_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *natsemi_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+ int prev_eedata;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ /* Perhaps NETIF_MSG_PROBE */
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ /* Work around the dropped serial bit. */
+ prev_eedata = eeprom_read(ioaddr, 6);
+ for (i = 0; i < 3; i++) {
+ int eedata = eeprom_read(ioaddr, i + 7);
+ dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
+ dev->dev_addr[i*2+1] = eedata >> 7;
+ prev_eedata = eedata;
+ }
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(ChipReset, ioaddr + ChipCmd);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* 0x10/0x20/0x100/0x200 set forced speed&duplex modes. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ writew(((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0), /* Full duplex? */
+ ioaddr + NS_MII_BMCR);
+ }
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ /* Override the PME enable from the EEPROM. */
+ writel(0x8000, ioaddr + ClkRunCtrl);
+
+ if ((readl(ioaddr + ChipConfig) & 0xe000) != 0xe000) {
+ u32 chip_config = readl(ioaddr + ChipConfig);
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Transceiver default autonegotiation %s "
+ "10%s %s duplex.\n",
+ dev->name, chip_config & 0x2000 ? "enabled, advertise"
+ : "disabled, force", chip_config & 0x4000 ? "0" : "",
+ chip_config & 0x8000 ? "full" : "half");
+ }
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Transceiver status 0x%4.4x partner %4.4x.\n",
+ dev->name, (int)readl(ioaddr + NS_MII_BMSR),
+ (int)readl(ioaddr + NS_MIILinkPartner));
+
+ return dev;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+ The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses.
+ Update to the code in other drivers for 8/10 bit addresses.
+*/
+
+/* Delay between EEPROM clock transitions.
+ This "delay" forces out buffered PCI writes, which is sufficient to meet
+ the timing requirements of most EEPROMs.
+*/
+#define eeprom_delay(ee_addr) readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x04, EE_DataIn=0x01, EE_ChipSelect=0x08, EE_DataOut=0x02,
+};
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataIn)
+
+/* The EEPROM commands include the preamble. */
+enum EEPROM_Cmds {
+ EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
+};
+
+static int eeprom_read(long addr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = addr + EECtrl;
+ int read_cmd = location | EE_ReadCmd;
+ writel(EE_Write0, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ writel(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ writel(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+
+ for (i = 0; i < 16; i++) {
+ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_Write0, ee_addr);
+ writel(0, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ The 83815 series has an internal, directly accessable transceiver.
+ We present the management registers as if they were MII connected. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ if (phy_id == 1 && location < 32)
+ return readw(dev->base_addr + NS_Xcvr_Mgmt + (location<<2));
+ else
+ return 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ if (phy_id == 1 && location < 32)
+ writew(value, dev->base_addr + NS_Xcvr_Mgmt + (location<<2));
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* We do not need to reset the '815 chip. */
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ for (i = 0; i < 6; i += 2) {
+ writel(i, ioaddr + RxFilterAddr);
+ writel(dev->dev_addr[i] + (dev->dev_addr[i+1] << 8),
+ ioaddr + RxFilterData);
+ }
+
+ /* Initialize other registers. */
+ /* See the datasheet for this correction. */
+ if (readl(ioaddr + ChipRevReg) == 0x0203) {
+ writew(0x0001, ioaddr + 0xCC);
+ writew(0x18C9, ioaddr + 0xE4);
+ writew(0x0000, ioaddr + 0xFC);
+ writew(0x5040, ioaddr + 0xF4);
+ writew(0x008C, ioaddr + 0xF8);
+ }
+
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+ /* Configure for standard, in-spec Ethernet. */
+
+ if (readl(ioaddr + ChipConfig) & CfgFDX) { /* Full duplex */
+ np->tx_config = 0xD0801002;
+ np->rx_config = 0x10000020;
+ } else {
+ np->tx_config = 0x10801002;
+ np->rx_config = 0x0020;
+ }
+ if (dev->mtu > 1500)
+ np->rx_config |= 0x08000000;
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ np->in_interrupt = 0;
+
+ check_duplex(dev);
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ np->intr_enable = IntrNormalSummary | IntrAbnormalSummary | 0x1f;
+ writel(np->intr_enable, ioaddr + IntrMask);
+ writel(1, ioaddr + IntrEnable);
+
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ writel(4, ioaddr + StatsCtrl); /* Clear Stats */
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int duplex;
+
+ if (np->duplex_lock)
+ return;
+ duplex = readl(ioaddr + ChipConfig) & 0x20000000 ? 1 : 0;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
+ " capability.\n", dev->name,
+ duplex ? "full" : "half");
+ if (duplex) {
+ np->rx_config |= 0x10000000;
+ np->tx_config |= 0xC0000000;
+ } else {
+ np->rx_config &= ~0x10000000;
+ np->tx_config &= ~0xC0000000;
+ }
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Driver monitor timer tick, status %8.8x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+ if (np->rx_q_empty) {
+ /* Trigger an interrupt to refill. */
+ writel(SoftIntr, ioaddr + ChipCmd);
+ }
+ /* This will either have a small false-trigger window or will not catch
+ tbusy incorrectly set when the queue is empty. */
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ tx_timeout(dev);
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + TxRingPtr));
+
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].cmd_status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", np->tx_ring[i].cmd_status);
+ printk("\n");
+ }
+
+ /* Reinitialize the hardware here. */
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+/* Refill the Rx ring buffers, returning non-zero if not full. */
+static int rx_ring_fill(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned int entry;
+
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ return 1; /* Better luck next time. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
+ }
+ np->rx_ring[entry].cmd_status = cpu_to_le32(DescIntr | np->rx_buf_sz);
+ }
+ return 0;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ /* MAX(PKT_BUF_SZ, dev->mtu + 8); */
+ /* I know you _want_ to change this without understanding it. Don't. */
+ np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 8);
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
+ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
+ np->tx_ring[i].cmd_status = 0;
+ }
+ np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
+
+ /* Fill in the Rx buffers.
+ Allocation failure just leaves a "negative" np->dirty_rx. */
+ np->dirty_rx = (unsigned int)(0 - RX_RING_SIZE);
+ rx_ring_fill(dev);
+
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned int entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx.
+ No spinlock is needed for either Tx or Rx.
+ */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
+ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn|DescIntr | skb->len);
+ np->cur_tx++;
+
+ /* For some architectures explicitly flushing np->tx_ring,sizeof(tx_ring)
+ and skb->data,skb->len improves performance. */
+
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_QUEUE_LEN - 4) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ /* Wake the potentially-idle transmit channel. */
+ writel(TxOn, dev->base_addr + ChipCmd);
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int boguscnt;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+ "device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ boguscnt = np->max_interrupt_work;
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ if (intr_status == 0 || intr_status == 0xffffffff)
+ break;
+
+ /* Acknowledge all of the current interrupt sources ASAP.
+ Nominally the read above accomplishes this, but... */
+ writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
+ dev->name, intr_status);
+
+ if (intr_status & (IntrRxDone | IntrRxIntr)) {
+ netdev_rx(dev);
+ np->rx_q_empty = rx_ring_fill(dev);
+ }
+
+ if (intr_status & (IntrRxIdle | IntrDrv)) {
+ unsigned int old_dirty_rx = np->dirty_rx;
+ if (rx_ring_fill(dev) == 0)
+ np->rx_q_empty = 0;
+ /* Restart Rx engine iff we did add a buffer. */
+ if (np->dirty_rx != old_dirty_rx)
+ writel(RxOn, dev->base_addr + ChipCmd);
+ }
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
+ if (tx_status & DescOwn)
+ break;
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x08000000) {
+ np->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+ } else { /* Various Tx errors */
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
+ if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
+ if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
+ if (tx_status & 0x00200000) np->stats.tx_window_errors++;
+ np->stats.tx_errors++;
+ }
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ /* Note the 4 slot hysteresis to mark the queue non-full. */
+ if (np->tx_full
+ && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & IntrAbnormalSummary)
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ np->restore_intr_enable = 1;
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+ s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+
+ /* If the driver owns the next entry it's a new packet. Send it up. */
+ while (desc_status < 0) { /* e.g. & DescOwn */
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In netdev_rx() entry %d status was %8.8x.\n",
+ entry, desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ((desc_status & (DescMore|DescPktOK|RxTooLong)) != DescPktOK) {
+ if (desc_status & DescMore) {
+ printk(KERN_WARNING "%s: Oversized(?) Ethernet frame spanned "
+ "multiple buffers, entry %#x status %x.\n",
+ dev->name, np->cur_rx, desc_status);
+ np->stats.rx_length_errors++;
+ } else {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & 0x06000000) np->stats.rx_over_errors++;
+ if (desc_status & 0x00600000) np->stats.rx_length_errors++;
+ if (desc_status & 0x00140000) np->stats.rx_frame_errors++;
+ if (desc_status & 0x00080000) np->stats.rx_crc_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+ int pkt_len = (desc_status & 0x0fff) - 4; /* Omit CRC size. */
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if defined(HAS_IP_COPYSUM) || (LINUX_VERSION_CODE >= 0x20100)
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ /* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+ }
+
+ /* Refill is now done in the main interrupt loop. */
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (intr_status & LinkChange) {
+ int chip_config = readl(ioaddr + ChipConfig);
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+ " %4.4x partner %4.4x.\n", dev->name,
+ (int)readw(ioaddr + NS_MII_Advert),
+ (int)readw(ioaddr + NS_MIILinkPartner));
+ if (chip_config & CfgLinkGood)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ check_duplex(dev);
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ /* Increase the Tx threshold, 32 byte units. */
+ if ((np->tx_config & 0x3f) < 62)
+ np->tx_config += 2; /* +64 bytes */
+ writel(np->tx_config, ioaddr + TxConfig);
+ }
+ if (intr_status & WOLPkt) {
+ int wol_status = readl(ioaddr + WOLCmd);
+ printk(KERN_NOTICE "%s: Link wake-up event %8.8x",
+ dev->name, wol_status);
+ }
+ if (intr_status & (RxStatusOverrun | IntrRxOverrun)) {
+ if (np->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: Rx overflow! ns815 %8.8x.\n",
+ dev->name, intr_status);
+ np->stats.rx_fifo_errors++;
+ }
+ if (intr_status & ~(LinkChange|StatsMax|RxResetDone|TxResetDone|
+ RxStatusOverrun|0xA7ff)) {
+ if (np->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: Something Wicked happened! natsemi %8.8x.\n",
+ dev->name, intr_status);
+ }
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrPCIErr) {
+ np->stats.tx_fifo_errors++;
+ np->stats.rx_fifo_errors++;
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int crc_errs = readl(ioaddr + RxCRCErrs);
+
+ if (crc_errs != 0xffffffff) {
+ /* We need not lock this segment of code for SMP.
+ There is no atomic-add vulnerability for most CPUs,
+ and statistics are non-critical. */
+ /* The chip only need report frame silently dropped. */
+ np->stats.rx_crc_errors += crc_errs;
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+ }
+
+ return &np->stats;
+}
+
+/* The big-endian AUTODIN II ethernet CRC calculations.
+ See ns820.c for how to fill the table on new chips.
+ */
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u8 mc_filter[64]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys
+ | AcceptMyPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr);
+ set_bit(filterbit & 0x1ff, mc_filter);
+ if (np->msg_level & NETIF_MSG_RXFILTER)
+ printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
+ "%2.2x:%2.2x:%2.2x crc %8.8x bit %d.\n", dev->name,
+ mclist->dmi_addr[0], mclist->dmi_addr[1],
+ mclist->dmi_addr[2], mclist->dmi_addr[3],
+ mclist->dmi_addr[4], mclist->dmi_addr[5],
+ filterbit, filterbit & 0x1ff);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ for (i = 0; i < 64; i += 2) {
+ u16 filterword = (mc_filter[i+1]<<8) + mc_filter[i];
+ if (filterword != np->rx_filter[i>>2]) {
+ writel(0x200 + i, ioaddr + RxFilterAddr);
+ writel(filterword, ioaddr + RxFilterData);
+ np->rx_filter[i>>2] = filterword;
+ }
+ }
+ }
+ writel(rx_mode, ioaddr + RxFilterAddr);
+ np->cur_rx_mode = rx_mode;
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = 1;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == 1) {
+ u16 miireg = data[1] & 0x1f;
+ u16 value = data[2];
+ mdio_write(dev, 1, miireg, value);
+ switch (miireg) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (np->duplex_lock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ }
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x "
+ "Int %2.2x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd),
+ (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* We don't want the timer to re-start anything. */
+ del_timer(&np->timer);
+
+ /* Disable interrupts using the mask. */
+ writel(0, ioaddr + IntrMask);
+ writel(0, ioaddr + IntrEnable);
+ writel(2, ioaddr + StatsCtrl); /* Freeze Stats */
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+
+ get_stats(dev);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. %8.8x %8.8x.\n",
+ i, np->tx_ring[i].cmd_status, (u32)np->tx_ring[i].buf_addr);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x\n",
+ i, np->rx_ring[i].cmd_status, (u32)np->rx_ring[i].buf_addr);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].cmd_status = 0;
+ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+#if 0
+ writel(0x0200, ioaddr + ChipConfig); /* Power down Xcvr. */
+#endif
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int power_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, freeze stats, stop Tx and Rx. */
+ writel(0, ioaddr + IntrEnable);
+ writel(2, ioaddr + StatsCtrl);
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the open() actions should be repeated. */
+ set_rx_mode(dev);
+ writel(np->intr_enable, ioaddr + IntrEnable);
+ writel(1, ioaddr + IntrEnable);
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+#ifdef CARDBUS
+ register_driver(&etherdev_ops);
+ return 0;
+#else
+ return pci_drv_register(&natsemi_drv_id, NULL);
+#endif
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&etherdev_ops);
+#else
+ pci_drv_unregister(&natsemi_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+ iounmap((char *)root_net_dev->base_addr);
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` natsemi.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c natsemi.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c natsemi.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/ne.c b/linux/src/drivers/net/ne.c
new file mode 100644
index 0000000..ea2f929
--- /dev/null
+++ b/linux/src/drivers/net/ne.c
@@ -0,0 +1,812 @@
+/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This driver should work with many programmed-I/O 8390-based ethernet
+ boards. Currently it supports the NE1000, NE2000, many clones,
+ and some Cabletron products.
+
+ Changelog:
+
+ Paul Gortmaker : use ENISR_RDC to monitor Tx PIO uploads, made
+ sanity checks and bad clone support optional.
+ Paul Gortmaker : new reset code, reset card after probe at boot.
+ Paul Gortmaker : multiple card support for module users.
+ Paul Gortmaker : Support for PCI ne2k clones, similar to lance.c
+ Paul Gortmaker : Allow users with bad cards to avoid full probe.
+ Paul Gortmaker : PCI probe changes, more PCI cards supported.
+
+*/
+
+/* Routines for the NatSemi-based designs (NE[12]000). */
+
+static const char *version =
+ "ne.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do we support clones that don't adhere to 14,15 of the SAprom ? */
+#define SUPPORT_NE_BAD_CLONES
+
+/* Do we perform extra sanity checks on stuff ? */
+/* #define NE_SANITY_CHECK */
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+#if defined(HAVE_DEVLIST) || !defined(MODULE)
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int netcard_portlist[] =
+{ 0x300, 0x280, 0x320, 0x340, 0x360, 0};
+#endif /* defined(HAVE_DEVLIST) || !defined(MODULE) */
+
+#ifdef CONFIG_PCI
+/* Ack! People are making PCI ne2000 clones! Oh the horror, the horror... */
+static struct { unsigned short vendor, dev_id;}
+pci_clone_list[] = {
+ {PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8029},
+ {PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940},
+ {PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_RL2000},
+ {PCI_VENDOR_ID_KTI, PCI_DEVICE_ID_KTI_ET32P2},
+ {PCI_VENDOR_ID_NETVIN, PCI_DEVICE_ID_NETVIN_NV5000SC},
+ {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C926},
+ {0,}
+};
+#endif
+
+#ifdef SUPPORT_NE_BAD_CLONES
+/* A list of bad clones that we none-the-less recognize. */
+static struct { const char *name8, *name16; unsigned char SAprefix[4];}
+bad_clone_list[] = {
+ {"DE100", "DE200", {0x00, 0xDE, 0x01,}},
+ {"DE120", "DE220", {0x00, 0x80, 0xc8,}},
+ {"DFI1000", "DFI2000", {'D', 'F', 'I',}}, /* Original, eh? */
+ {"EtherNext UTP8", "EtherNext UTP16", {0x00, 0x00, 0x79}},
+ {"NE1000","NE2000-invalid", {0x00, 0x00, 0xd8}}, /* Ancient real NE1000. */
+ {"NN1000", "NN2000", {0x08, 0x03, 0x08}}, /* Outlaw no-name clone. */
+ {"4-DIM8","4-DIM16", {0x00,0x00,0x4d,}}, /* Outlaw 4-Dimension cards. */
+ {"Con-Intl_8", "Con-Intl_16", {0x00, 0x00, 0x24}}, /* Connect Int'nl */
+ {"ET-100","ET-200", {0x00, 0x45, 0x54}}, /* YANG and YA clone */
+ {"COMPEX","COMPEX16",{0x00,0x80,0x48}}, /* Broken ISA Compex cards */
+ {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
+ {"RealTek 8029", "RealTek 8029", {0x00, 0x3e, 0x4d}}, /* RealTek PCI cards */
+ {0,}
+};
+#endif
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x20
+
+#define NE1SM_START_PG 0x20 /* First page of TX buffer */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
+static unsigned char pci_irq_line = 0;
+
+int ne_probe(struct device *dev);
+#ifdef CONFIG_PCI
+static int ne_probe_pci(struct device *dev);
+#endif
+static int ne_probe1(struct device *dev, int ioaddr);
+
+static int ne_open(struct device *dev);
+static int ne_close(struct device *dev);
+
+static void ne_reset_8390(struct device *dev);
+static void ne_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne_block_output(struct device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+/* Probe for various non-shared-memory ethercards.
+
+ NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
+ buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
+ the SAPROM, while other supposed NE2000 clones must be detected by their
+ SA prefix.
+
+ Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ mode results in doubled values, which can be detected and compensated for.
+
+ The probe is also responsible for initializing the card and filling
+ in the 'dev' and 'ei_status' structures.
+
+ We use the minimum memory size for some ethercard product lines, iff we can't
+ distinguish models. You can increase the packet buffer size by setting
+ PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
+ E1010 starts at 0x100 and ends at 0x2000.
+ E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
+ E2010 starts at 0x100 and ends at 0x4000.
+ E2010-x starts at 0x100 and ends at 0xffff. */
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+{"ne", ne_probe1, NE_IO_EXTENT, netcard_portlist};
+#else
+
+/* Note that this probe only picks up one card at a time, even for multiple
+ PCI ne2k cards. Use "ether=0,0,eth1" if you have a second PCI ne2k card.
+ This keeps things consistent regardless of the bus type of the card. */
+
+int ne_probe(struct device *dev)
+{
+#ifndef MODULE
+ int i;
+#endif /* MODULE */
+ int base_addr = dev ? dev->base_addr : 0;
+
+ /* First check any supplied i/o locations. User knows best. <cough> */
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ne_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+#ifdef CONFIG_PCI
+ /* Then look for any installed PCI clones */
+ if (pcibios_present() && (ne_probe_pci(dev) == 0))
+ return 0;
+#endif
+
+#ifndef MODULE
+ /* Last resort. The semi-risky ISA auto-probe. */
+ for (i = 0; netcard_portlist[i]; i++) {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, NE_IO_EXTENT))
+ continue;
+ if (ne_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+#endif
+
+ return ENODEV;
+}
+#endif
+
+#ifdef CONFIG_PCI
+static int ne_probe_pci(struct device *dev)
+{
+ int i;
+
+ for (i = 0; pci_clone_list[i].vendor != 0; i++) {
+ unsigned char pci_bus, pci_device_fn;
+ unsigned int pci_ioaddr;
+ u16 pci_command, new_command;
+ int pci_index;
+
+ for (pci_index = 0; pci_index < 8; pci_index++) {
+ if (pcibios_find_device (pci_clone_list[i].vendor,
+ pci_clone_list[i].dev_id, pci_index,
+ &pci_bus, &pci_device_fn) != 0)
+ break; /* No more of these type of cards */
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Strip the I/O address out of the returned value */
+ pci_ioaddr &= PCI_BASE_ADDRESS_IO_MASK;
+ /* Avoid already found cards from previous calls */
+ if (check_region(pci_ioaddr, NE_IO_EXTENT))
+ continue;
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ break; /* Beauty -- got a valid card. */
+ }
+ if (pci_irq_line == 0) continue; /* Try next PCI ID */
+ printk("ne.c: PCI BIOS reports NE 2000 clone at i/o %#x, irq %d.\n",
+ pci_ioaddr, pci_irq_line);
+
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+
+ /* Activate the card: fix for brain-damaged Win98 BIOSes. */
+ new_command = pci_command | PCI_COMMAND_IO;
+ if (pci_command != new_command) {
+ printk(KERN_INFO " The PCI BIOS has not enabled this"
+ " NE2k clone! Updating PCI command %4.4x->%4.4x.\n",
+ pci_command, new_command);
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, new_command);
+ }
+
+ if (ne_probe1(dev, pci_ioaddr) != 0) { /* Shouldn't happen. */
+ printk(KERN_ERR "ne.c: Probe of PCI card at %#x failed.\n", pci_ioaddr);
+ pci_irq_line = 0;
+ return -ENXIO;
+ }
+ pci_irq_line = 0;
+ return 0;
+ }
+ return -ENODEV;
+}
+#endif /* CONFIG_PCI */
+
+static int ne_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ unsigned char SA_prom[32];
+ int wordlength = 2;
+ const char *name = NULL;
+ int start_page, stop_page;
+ int neX000, ctron, bad_card;
+ int reg0 = inb_p(ioaddr);
+ static unsigned version_printed = 0;
+
+ if (reg0 == 0xFF)
+ return ENODEV;
+
+ /* Do a preliminary verification that we have a 8390. */
+ { int regd;
+ outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb_p(ioaddr + 0x0d);
+ outb_p(0xff, ioaddr + 0x0d);
+ outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
+ outb_p(reg0, ioaddr);
+ outb_p(regd, ioaddr + 0x0d); /* Restore the old values. */
+ return ENODEV;
+ }
+ }
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk(KERN_ERR "ne.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("NE*000 ethercard probe at %#3x:", ioaddr);
+
+ /* A user with a poor card that fails to ack the reset, or that
+ does not have a valid 0x57,0x57 signature can still use this
+ without having to recompile. Specifying an i/o address along
+ with an otherwise unused dev->mem_end value of "0xBAD" will
+ cause the driver to skip these parts of the probe. */
+
+ bad_card = ((dev->base_addr != 0) && (dev->mem_end == 0xbad));
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ { unsigned long reset_start_time = jiffies;
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ if (bad_card) {
+ printk(" (warning: no reset ack)");
+ break;
+ } else {
+ printk(" not found (no reset ack).\n");
+ return ENODEV;
+ }
+ }
+
+ outb_p(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ }
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) {
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+ SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
+ if (SA_prom[i] != SA_prom[i+1])
+ wordlength = 1;
+ }
+
+ /* At this point, wordlength *only* tells us if the SA_prom is doubled
+ up or not because some broken PCI cards don't respect the byte-wide
+ request in program_seq above, and hence don't have doubled up values.
+ These broken cards would otherwise be detected as an ne1000. */
+
+ if (wordlength == 2)
+ for (i = 0; i < 16; i++)
+ SA_prom[i] = SA_prom[i+i];
+
+ if (pci_irq_line || ioaddr >= 0x400)
+ wordlength = 2; /* Catch broken PCI cards mentioned above. */
+
+ if (wordlength == 2) {
+ /* We must set the 8390 for word mode. */
+ outb_p(0x49, ioaddr + EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+ } else {
+ start_page = NE1SM_START_PG;
+ stop_page = NE1SM_STOP_PG;
+ }
+
+ neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
+ ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
+
+ /* Set up the rest of the parameters. */
+ if (neX000 || bad_card) {
+ name = (wordlength == 2) ? "NE2000" : "NE1000";
+ } else if (ctron) {
+ name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
+ start_page = 0x01;
+ stop_page = (wordlength == 2) ? 0x40 : 0x20;
+ } else {
+#ifdef SUPPORT_NE_BAD_CLONES
+ /* Ack! Well, there might be a *bad* NE*000 clone there.
+ Check for total bogus addresses. */
+ for (i = 0; bad_clone_list[i].name8; i++) {
+ if (SA_prom[0] == bad_clone_list[i].SAprefix[0] &&
+ SA_prom[1] == bad_clone_list[i].SAprefix[1] &&
+ SA_prom[2] == bad_clone_list[i].SAprefix[2]) {
+ if (wordlength == 2) {
+ name = bad_clone_list[i].name16;
+ } else {
+ name = bad_clone_list[i].name8;
+ }
+ break;
+ }
+ }
+ if (bad_clone_list[i].name8 == NULL) {
+ printk(" not found (invalid signature %2.2x %2.2x).\n",
+ SA_prom[14], SA_prom[15]);
+ return ENXIO;
+ }
+#else
+ printk(" not found.\n");
+ return ENXIO;
+#endif
+
+ }
+
+ if (pci_irq_line)
+ dev->irq = pci_irq_line;
+
+ if (dev->irq < 2) {
+ autoirq_setup(0);
+ outb_p(0x50, ioaddr + EN0_IMR); /* Enable one interrupt. */
+ outb_p(0x00, ioaddr + EN0_RCNTLO);
+ outb_p(0x00, ioaddr + EN0_RCNTHI);
+ outb_p(E8390_RREAD+E8390_START, ioaddr); /* Trigger it... */
+ outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
+ dev->irq = autoirq_report(0);
+ if (ei_debug > 2)
+ printk(" autoirq is %d\n", dev->irq);
+ } else if (dev->irq == 2)
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ or don't know which one to set. */
+ dev->irq = 9;
+
+ if (! dev->irq) {
+ printk(" failed to detect IRQ line.\n");
+ return EAGAIN;
+ }
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share (with ISA cards) and the board will usually be enabled. */
+ {
+ int irqval = request_irq(dev->irq, ei_interrupt,
+ pci_irq_line ? SA_SHIRQ : 0, name, dev);
+ if (irqval) {
+ printk (" unable to get IRQ %d (irqval=%d).\n", dev->irq, irqval);
+ return EAGAIN;
+ }
+ }
+
+ dev->base_addr = ioaddr;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ free_irq(dev->irq, NULL);
+ return -ENOMEM;
+ }
+
+ request_region(ioaddr, NE_IO_EXTENT, name);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ printk(" %2.2x", SA_prom[i]);
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+ printk("\n%s: %s found at %#x, using IRQ %d.\n",
+ dev->name, name, ioaddr, dev->irq);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = (wordlength == 2);
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne_reset_8390;
+ ei_status.block_input = &ne_block_input;
+ ei_status.block_output = &ne_block_output;
+ ei_status.get_8390_hdr = &ne_get_8390_hdr;
+ dev->open = &ne_open;
+ dev->stop = &ne_close;
+ NS8390_init(dev, 0);
+ return 0;
+}
+
+static int
+ne_open(struct device *dev)
+{
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+ne_close(struct device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void
+ne_reset_8390(struct device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk("%s: ne_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ne_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ int nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ dev->interrupt);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.word16)
+ insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void
+ne_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+#ifdef NE_SANITY_CHECK
+ int xfer_count = count;
+#endif
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+#ifdef NE_SANITY_CHECK
+ xfer_count++;
+#endif
+ }
+ } else {
+ insb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. If you see
+ this message you either 1) have a slightly incompatible clone
+ or 2) have noise/speed problems with your bus. */
+ if (ei_debug > 1) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == low)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk("%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void
+ne_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+#ifdef NE_SANITY_CHECK
+ int retries = 0;
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE_SANITY_CHECK
+ retry:
+#endif
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0x00, nic_base + EN0_RCNTHI);
+ outb_p(0x42, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+ dma_start = jiffies;
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. */
+ if (ei_debug > 1) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0) {
+ printk("%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+
+#ifdef MODULE
+#define MAX_NE_CARDS 4 /* Max number of NE cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_NE_CARDS] = { 0, };
+static struct device dev_ne[MAX_NE_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_NE_CARDS] = { 0, };
+static int irq[MAX_NE_CARDS] = { 0, };
+static int bad[MAX_NE_CARDS] = { 0, };
+
+/* This is set up so that no autoprobe takes place. We can't guarantee
+that the ne2k probe is the last 8390 based probe to take place (as it
+is at boot) and so the probe will get confused by any other 8390 cards.
+ISA device autoprobes on a running machine are not recommended anyway. */
+
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct device *dev = &dev_ne[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = ne_probe;
+ dev->mem_end = bad[this_dev];
+ if (register_netdev(dev) == 0) {
+ found++;
+ continue;
+ }
+ if (found != 0) /* Got at least one. */
+ return 0;
+ if (io[this_dev] != 0)
+ printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", io[this_dev]);
+ else
+ printk(KERN_NOTICE "ne.c: No PCI cards found. Use \"io=0xNNN\" value(s) for ISA cards.\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct device *dev = &dev_ne[this_dev];
+ if (dev->priv != NULL) {
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_irq(dev->irq, dev);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(dev->base_addr, NE_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I/usr/src/linux/net/tcp -c ne.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/linux/src/drivers/net/ne2k-pci.c b/linux/src/drivers/net/ne2k-pci.c
new file mode 100644
index 0000000..2b2b1f4
--- /dev/null
+++ b/linux/src/drivers/net/ne2k-pci.c
@@ -0,0 +1,647 @@
+/* ne2k-pci.c: A NE2000 clone on PCI bus driver for Linux. */
+/*
+ A Linux device driver for PCI NE2000 clones.
+
+ Authors and other copyright holders:
+ 1992-2002 by Donald Becker, NE2000 core and various modifications.
+ 1995-1998 by Paul Gortmaker, core modifications and PCI support.
+ Copyright 1993 assigned to the United States Government as represented
+ by the Director, National Security Agency.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Issues remaining:
+ People are making PCI ne2000 clones! Oh the horror, the horror...
+ Limited full-duplex support.
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"ne2k-pci.c:v1.05 6/13/2002 D. Becker/P. Gortmaker\n";
+static const char version2[] =
+" http://www.scyld.com/network/ne2k-pci.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS] = {0, };
+static int options[MAX_UNITS] = {0, };
+
+/* Force a non std. amount of memory. Units are 256 byte pages. */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#include <linux/module.h>
+#if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#if LINUX_VERSION_CODE < 0x20200
+#define lock_8390_module()
+#define unlock_8390_module()
+#else
+#include <linux/init.h>
+#endif
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
+MODULE_DESCRIPTION("PCI NE2000 clone driver");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do #define LOAD_8390_BY_KERNELD to automatically load 8390 support. */
+#ifdef LOAD_8390_BY_KERNELD
+#include <linux/kerneld.h>
+#endif
+
+static void *ne2k_pci_probe1(struct pci_dev *pdev, void *dev,
+ long ioaddr, int irq, int chip_idx, int fnd_cnt);
+/* Flags. We rename an existing ei_status field to store flags! */
+/* Thus only the low 8 bits are usable for non-init-time flags. */
+#define ne2k_flags reg0
+enum {
+ ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */
+ FORCE_FDX=0x20, /* User override. */
+ REALTEK_FDX=0x40, HOLTEK_FDX=0x80,
+ STOP_PG_0x60=0x100,
+};
+#define NE_IO_EXTENT 0x20
+#ifndef USE_MEMORY_OPS
+#define PCI_IOTYPE (PCI_USES_IO | PCI_ADDR0)
+#else
+#warning When using PCI memory mode the 8390 core must be compiled for memory
+#warning operations as well.
+#warning Not all PCI NE2000 clones support memory mode access.
+#define PCI_IOTYPE (PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"RealTek RTL-8029",{ 0x802910ec, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT,
+ REALTEK_FDX },
+ {"Winbond 89C940", { 0x09401050, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {"Winbond w89c940", { 0x5a5a1050, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {"KTI ET32P2", { 0x30008e2e, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {"NetVin NV5000SC", { 0x50004a14, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {"Via 86C926", { 0x09261106, 0xffffffff},
+ PCI_IOTYPE, NE_IO_EXTENT, ONLY_16BIT_IO},
+ {"SureCom NE34", { 0x0e3410bd, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {"Holtek HT80232", { 0x005812c3, 0xffffffff},
+ PCI_IOTYPE, NE_IO_EXTENT, ONLY_16BIT_IO | HOLTEK_FDX},
+ {"Holtek HT80229", { 0x559812c3, 0xffffffff},
+ PCI_IOTYPE, NE_IO_EXTENT, ONLY_32BIT_IO | HOLTEK_FDX | STOP_PG_0x60},
+ {"Compex RL2000",
+ { 0x140111f6, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ /* A mutant board: Winbond chip with a RTL format EEPROM. */
+ {"Winbond w89c940 (misprogrammed type 0x1980)", { 0x19808c4a, 0xffffffff},
+ PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info ne2k_pci_drv_id = {
+ "ne2k-pci", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl, ne2k_pci_probe1,
+};
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+int ne2k_pci_probe(struct net_device *dev);
+
+static int ne2k_pci_open(struct net_device *dev);
+static int ne2k_pci_close(struct net_device *dev);
+
+static void ne2k_pci_reset_8390(struct net_device *dev);
+static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne2k_pci_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne2k_pci_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+
+/* There is no room in the standard 8390 structure for extra info we need,
+ so we build a meta/outer-wrapper structure.. */
+struct ne2k_pci_card {
+ struct ne2k_pci_card *next;
+ struct net_device *dev;
+ struct pci_dev *pci_dev;
+};
+/* A list of all installed devices, for removing the driver module. */
+static struct ne2k_pci_card *ne2k_card_list = NULL;
+
+#ifdef LOAD_8390_BY_KERNELD
+static int (*Lethdev_init)(struct net_device *dev);
+static void (*LNS8390_init)(struct net_device *dev, int startp);
+static int (*Lei_open)(struct net_device *dev);
+static int (*Lei_close)(struct net_device *dev);
+static void (*Lei_interrupt)(int irq, void *dev_id, struct pt_regs *regs);
+#else
+#define Lethdev_init ethdev_init
+#define LNS8390_init NS8390_init
+#define Lei_open ei_open
+#define Lei_close ei_close
+#define Lei_interrupt ei_interrupt
+#endif
+
+#ifdef MODULE
+int init_module(void)
+{
+ int found_cnt;
+
+ if (debug) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ found_cnt = pci_drv_register(&ne2k_pci_drv_id, NULL);
+ if (found_cnt < 0) {
+ printk(KERN_NOTICE "ne2k-pci.c: No useable cards found, driver NOT installed.\n");
+ return -ENODEV;
+ }
+ lock_8390_module();
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ struct net_device *dev;
+ struct ne2k_pci_card *this_card;
+
+ pci_drv_unregister(&ne2k_pci_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (ne2k_card_list) {
+ dev = ne2k_card_list->dev;
+ unregister_netdev(dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+ kfree(dev);
+ this_card = ne2k_card_list;
+ ne2k_card_list = ne2k_card_list->next;
+ kfree(this_card);
+ }
+
+#ifdef LOAD_8390_BY_KERNELD
+ release_module("8390", 0);
+#else
+ unlock_8390_module();
+#endif
+}
+
+#else
+
+int ne2k_pci_probe(struct net_device *dev)
+{
+ int found_cnt = pci_drv_register(&ne2k_pci_drv_id, NULL);
+ if (found_cnt >= 0 && debug)
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return found_cnt;
+}
+#endif /* MODULE */
+
+static void *ne2k_pci_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int fnd_cnt)
+{
+ struct net_device *dev;
+ int i;
+ unsigned char SA_prom[32];
+ int start_page, stop_page;
+ int reg0 = inb(ioaddr);
+ int flags = pci_id_tbl[chip_idx].drv_flags;
+ struct ne2k_pci_card *ne2k_card;
+
+ if (reg0 == 0xFF)
+ return 0;
+
+ /* Do a preliminary verification that we have a 8390. */
+ {
+ int regd;
+ outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb(ioaddr + 0x0d);
+ outb(0xff, ioaddr + 0x0d);
+ outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb(ioaddr + EN0_COUNTER0) != 0) {
+ outb(reg0, ioaddr);
+ outb(regd, ioaddr + 0x0d); /* Restore the old values. */
+ return 0;
+ }
+ }
+
+ dev = init_etherdev(init_dev, 0);
+
+ if (dev == NULL)
+ return 0;
+ ne2k_card = kmalloc(sizeof(struct ne2k_pci_card), GFP_KERNEL);
+ if (ne2k_card == NULL)
+ return 0;
+
+ ne2k_card->next = ne2k_card_list;
+ ne2k_card_list = ne2k_card;
+ ne2k_card->dev = dev;
+ ne2k_card->pci_dev = pdev;
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ {
+ unsigned long reset_start_time = jiffies;
+
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ /* This looks like a horrible timing loop, but it should never take
+ more than a few cycles.
+ */
+ while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
+ /* Limit wait: '2' avoids jiffy roll-over. */
+ if (jiffies - reset_start_time > 2) {
+ printk("ne2k-pci: Card failure (no reset ack).\n");
+ return 0;
+ }
+
+ outb(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ }
+
+#if defined(LOAD_8390_BY_KERNELD)
+ /* We are now certain the 8390 module is required. */
+ if (request_module("8390")) {
+ printk("ne2k-pci: Failed to load the 8390 core module.\n");
+ return 0;
+ }
+ if ((Lethdev_init = (void*)get_module_symbol(0, "ethdev_init")) == 0 ||
+ (LNS8390_init = (void*)get_module_symbol(0, "NS8390_init")) == 0 ||
+ (Lei_open = (void*)get_module_symbol(0, "ei_open")) == 0 ||
+ (Lei_close = (void*)get_module_symbol(0, "ei_close")) == 0 ||
+ (Lei_interrupt = (void*)get_module_symbol(0, "ei_interrupt")) == 0 ) {
+ printk("ne2k-pci: Failed to resolve an 8390 symbol.\n");
+ release_module("8390", 0);
+ return 0;
+ }
+#endif
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x49, EN0_DCFG}, /* Set word-wide access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+
+ /* Note: all PCI cards have at least 16 bit access, so we don't have
+ to check for 8 bit cards. Most cards permit 32 bit access. */
+
+ if (flags & ONLY_32BIT_IO) {
+ for (i = 0; i < 8; i++)
+ ((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT));
+ } else
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++)
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+
+ /* We always set the 8390 registers for word mode. */
+ outb(0x49, ioaddr + EN0_DCFG);
+ start_page = NESM_START_PG;
+
+ stop_page = flags & STOP_PG_0x60 ? 0x60 : NESM_STOP_PG;
+
+ /* Set up the rest of the parameters. */
+ dev->irq = irq;
+ dev->base_addr = ioaddr;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (Lethdev_init(dev)) {
+ printk ("%s: unable to get memory for dev->priv.\n", dev->name);
+ return 0;
+ }
+
+ request_region(ioaddr, NE_IO_EXTENT, dev->name);
+
+ printk("%s: %s found at %#lx, IRQ %d, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
+ for(i = 0; i < 6; i++) {
+ printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":");
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+ ei_status.name = pci_id_tbl[chip_idx].name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = 1;
+ ei_status.ne2k_flags = flags;
+ if (fnd_cnt < MAX_UNITS) {
+ if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX)) {
+ printk("%s: Full duplex set by user option.\n", dev->name);
+ ei_status.ne2k_flags |= FORCE_FDX;
+ }
+ }
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne2k_pci_reset_8390;
+ ei_status.block_input = &ne2k_pci_block_input;
+ ei_status.block_output = &ne2k_pci_block_output;
+ ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr;
+ dev->open = &ne2k_pci_open;
+ dev->stop = &ne2k_pci_close;
+ LNS8390_init(dev, 0);
+ return dev;
+}
+
+static int ne2k_pci_open(struct net_device *dev)
+{
+ MOD_INC_USE_COUNT;
+ if (request_irq(dev->irq, Lei_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+ /* Set full duplex for the chips that we know about. */
+ if (ei_status.ne2k_flags & FORCE_FDX) {
+ long ioaddr = dev->base_addr;
+ if (ei_status.ne2k_flags & REALTEK_FDX) {
+ outb(0xC0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 3 */
+ outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20);
+ } else if (ei_status.ne2k_flags & HOLTEK_FDX)
+ outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20);
+ }
+ Lei_open(dev);
+ return 0;
+}
+
+static int ne2k_pci_close(struct net_device *dev)
+{
+ Lei_close(dev);
+ free_irq(dev->irq, dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void ne2k_pci_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (debug > 1) printk("%s: Resetting the 8390 t=%ld...",
+ dev->name, jiffies);
+
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2) {
+ printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ne2k_pci_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ long nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ (int)dev->interrupt);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb(0, nic_base + EN0_RCNTHI);
+ outb(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb(ring_page, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ } else {
+ *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT));
+ le16_to_cpus(&hdr->count);
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void ne2k_pci_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ long nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ (int)dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ if (ei_status.ne2k_flags & ONLY_32BIT_IO)
+ count = (count + 3) & 0xFFFC;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(count & 0xff, nic_base + EN0_RCNTLO);
+ outb(count >> 8, nic_base + EN0_RCNTHI);
+ outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+ }
+ } else {
+ insl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ if (count & 3) {
+ buf += count & ~3;
+ if (count & 2) {
+ *((u16 *) buf) = le16_to_cpu(inw(NE_BASE + NE_DATAPORT));
+ buf = (void *) buf + sizeof (u16);
+ }
+ if (count & 1)
+ *buf = inb(NE_BASE + NE_DATAPORT);
+ }
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void
+ne2k_pci_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+
+ /* On little-endian it's always safe to round the count up for
+ word writes. */
+ if (ei_status.ne2k_flags & ONLY_32BIT_IO)
+ count = (count + 3) & 0xFFFC;
+ else
+ if (count & 0x01)
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ (int)dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+ outb(0x42, nic_base + EN0_RCNTLO);
+ outb(0x00, nic_base + EN0_RCNTHI);
+ outb(0x42, nic_base + EN0_RSARLO);
+ outb(0x00, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+#endif
+ outb(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb(count & 0xff, nic_base + EN0_RCNTLO);
+ outb(count >> 8, nic_base + EN0_RCNTHI);
+ outb(0x00, nic_base + EN0_RSARLO);
+ outb(start_page, nic_base + EN0_RSARHI);
+ outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ if (count & 3) {
+ buf += count & ~3;
+ if (count & 2) {
+ outw(cpu_to_le16(*((u16 *) buf)), NE_BASE + NE_DATAPORT);
+ buf = (void *) buf + sizeof (u16);
+ }
+ }
+ }
+
+ dma_start = jiffies;
+
+ while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne2k_pci_reset_8390(dev);
+ LNS8390_init(dev,1);
+ break;
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c ne2k-pci.c -I/usr/src/linux/drivers/net/"
+ * alt-compile-command: "gcc -DMODULE -O6 -c ne2k-pci.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * version-control: t
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/linux/src/drivers/net/net_init.c b/linux/src/drivers/net/net_init.c
new file mode 100644
index 0000000..3d4c42d
--- /dev/null
+++ b/linux/src/drivers/net/net_init.c
@@ -0,0 +1,439 @@
+/* netdrv_init.c: Initialization for network devices. */
+/*
+ Written 1993,1994,1995 by Donald Becker.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov or
+ C/O Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This file contains the initialization for the "pl14+" style ethernet
+ drivers. It should eventually replace most of drivers/net/Space.c.
+ It's primary advantage is that it's able to allocate low-memory buffers.
+ A secondary advantage is that the dangerous NE*000 netcards can reserve
+ their I/O port region before the SCSI probes start.
+
+ Modifications/additions by Bjorn Ekwall <bj0rn@blox.se>:
+ ethdev_index[MAX_ETH_CARDS]
+ register_netdev() / unregister_netdev()
+
+ Modifications by Wolfgang Walter
+ Use dev_close cleanly so we always shut things down tidily.
+
+ Changed 29/10/95, Alan Cox to pass sockaddr's around for mac addresses.
+
+ 14/06/96 - Paul Gortmaker: Add generic eth_change_mtu() function.
+
+ August 12, 1996 - Lawrence V. Stefani: Added fddi_change_mtu() and
+ fddi_setup() functions.
+ Sept. 10, 1996 - Lawrence V. Stefani: Increased hard_header_len to
+ include 3 pad bytes.
+*/
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/malloc.h>
+#include <linux/if_ether.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/trdevice.h>
+#include <linux/if_arp.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+
+/* The network devices currently exist only in the socket namespace, so these
+ entries are unused. The only ones that make sense are
+ open start the ethercard
+ close stop the ethercard
+ ioctl To get statistics, perhaps set the interface port (AUI, BNC, etc.)
+ One can also imagine getting raw packets using
+ read & write
+ but this is probably better handled by a raw packet socket.
+
+ Given that almost all of these functions are handled in the current
+ socket-based scheme, putting ethercard devices in /dev/ seems pointless.
+
+ [Removed all support for /dev network devices. When someone adds
+ streams then by magic we get them, but otherwise they are un-needed
+ and a space waste]
+*/
+
+/* The list of used and available "eth" slots (for "eth0", "eth1", etc.) */
+#define MAX_ETH_CARDS 16 /* same as the number if irq's in irq2dev[] */
+static struct device *ethdev_index[MAX_ETH_CARDS];
+
+
+/* Fill in the fields of the device structure with ethernet-generic values.
+
+ If no device structure is passed, a new one is constructed, complete with
+ a SIZEOF_PRIVATE private data area.
+
+ If an empty string area is passed as dev->name, or a new structure is made,
+ a new name string is constructed. The passed string area should be 8 bytes
+ long.
+ */
+
+struct device *
+init_etherdev(struct device *dev, int sizeof_priv)
+{
+ int new_device = 0;
+ int i;
+
+ /* Use an existing correctly named device in Space.c:dev_base. */
+ if (dev == NULL) {
+ int alloc_size = sizeof(struct device) + sizeof("eth%d ")
+ + sizeof_priv + 3;
+ struct device *cur_dev;
+ char pname[8]; /* Putative name for the device. */
+
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(pname, "eth%d", i);
+ for (cur_dev = dev_base; cur_dev; cur_dev = cur_dev->next)
+ if (strcmp(pname, cur_dev->name) == 0) {
+ dev = cur_dev;
+ dev->init = NULL;
+ sizeof_priv = (sizeof_priv + 3) & ~3;
+ dev->priv = sizeof_priv
+ ? kmalloc(sizeof_priv, GFP_KERNEL)
+ : NULL;
+ if (dev->priv) memset(dev->priv, 0, sizeof_priv);
+ goto found;
+ }
+ }
+
+ alloc_size &= ~3; /* Round to dword boundary. */
+
+ dev = (struct device *)kmalloc(alloc_size, GFP_KERNEL);
+ memset(dev, 0, alloc_size);
+ if (sizeof_priv)
+ dev->priv = (void *) (dev + 1);
+ dev->name = sizeof_priv + (char *)(dev + 1);
+ new_device = 1;
+ }
+
+ found: /* From the double loop above. */
+
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ ether_setup(dev); /* Hmmm, should this be called here? */
+
+ if (new_device) {
+ /* Append the device to the device queue. */
+ struct device **old_devp = &dev_base;
+ while ((*old_devp)->next)
+ old_devp = & (*old_devp)->next;
+ (*old_devp)->next = dev;
+ dev->next = 0;
+ }
+ return dev;
+}
+
+
+static int eth_mac_addr(struct device *dev, void *p)
+{
+ struct sockaddr *addr=p;
+ if(dev->start)
+ return -EBUSY;
+ memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+ return 0;
+}
+
+static int eth_change_mtu(struct device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#ifdef CONFIG_FDDI
+
+static int fddi_change_mtu(struct device *dev, int new_mtu)
+{
+ if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN))
+ return(-EINVAL);
+ dev->mtu = new_mtu;
+ return(0);
+}
+
+#endif
+
+void ether_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ /* register boot-defined "eth" devices */
+ if (dev->name && (strncmp(dev->name, "eth", 3) == 0)) {
+ i = simple_strtoul(dev->name + 3, NULL, 0);
+ if (ethdev_index[i] == NULL) {
+ ethdev_index[i] = dev;
+ }
+ else if (dev != ethdev_index[i]) {
+ /* Really shouldn't happen! */
+ printk("ether_setup: Ouch! Someone else took %s\n",
+ dev->name);
+ }
+ }
+
+ dev->change_mtu = eth_change_mtu;
+ dev->hard_header = eth_header;
+ dev->rebuild_header = eth_rebuild_header;
+ dev->set_mac_address = eth_mac_addr;
+ dev->header_cache_bind = eth_header_cache_bind;
+ dev->header_cache_update= eth_header_cache_update;
+
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_len = ETH_HLEN;
+ dev->mtu = 1500; /* eth_mtu */
+ dev->addr_len = ETH_ALEN;
+ dev->tx_queue_len = 100; /* Ethernet wants good queues */
+
+ memset(dev->broadcast,0xFF, ETH_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#ifdef CONFIG_TR
+
+void tr_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->hard_header = tr_header;
+ dev->rebuild_header = tr_rebuild_header;
+
+ dev->type = ARPHRD_IEEE802;
+ dev->hard_header_len = TR_HLEN;
+ dev->mtu = 2000; /* bug in fragmenter...*/
+ dev->addr_len = TR_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on tr */
+
+ memset(dev->broadcast,0xFF, TR_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#endif
+
+#ifdef CONFIG_FDDI
+
+void fddi_setup(struct device *dev)
+ {
+ int i;
+
+ /*
+ * Fill in the fields of the device structure with FDDI-generic values.
+ * This should be in a common file instead of per-driver.
+ */
+ for (i=0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->change_mtu = fddi_change_mtu;
+ dev->hard_header = fddi_header;
+ dev->rebuild_header = fddi_rebuild_header;
+
+ dev->type = ARPHRD_FDDI;
+ dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
+ dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
+ dev->addr_len = FDDI_K_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on FDDI */
+
+ memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
+
+ /* New-style flags */
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+ return;
+ }
+
+#endif
+
+int ether_config(struct device *dev, struct ifmap *map)
+{
+ if (map->mem_start != (u_long)(-1))
+ dev->mem_start = map->mem_start;
+ if (map->mem_end != (u_long)(-1))
+ dev->mem_end = map->mem_end;
+ if (map->base_addr != (u_short)(-1))
+ dev->base_addr = map->base_addr;
+ if (map->irq != (u_char)(-1))
+ dev->irq = map->irq;
+ if (map->dma != (u_char)(-1))
+ dev->dma = map->dma;
+ if (map->port != (u_char)(-1))
+ dev->if_port = map->port;
+ return 0;
+}
+
+int register_netdev(struct device *dev)
+{
+ struct device *d = dev_base;
+ unsigned long flags;
+ int i=MAX_ETH_CARDS;
+
+ save_flags(flags);
+ cli();
+
+ if (dev && dev->init) {
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+/* printk("loading device '%s'...\n", dev->name);*/
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ sti(); /* device probes assume interrupts enabled */
+ if (dev->init(dev) != 0) {
+ if (i < MAX_ETH_CARDS) ethdev_index[i] = NULL;
+ restore_flags(flags);
+ return -EIO;
+ }
+ cli();
+
+ /* Add device to end of chain */
+ if (dev_base) {
+ while (d->next)
+ d = d->next;
+ d->next = dev;
+ }
+ else
+ dev_base = dev;
+ dev->next = NULL;
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+void unregister_netdev(struct device *dev)
+{
+ struct device *d = dev_base;
+ unsigned long flags;
+ int i;
+
+ save_flags(flags);
+ cli();
+
+ if (dev == NULL)
+ {
+ printk("was NULL\n");
+ restore_flags(flags);
+ return;
+ }
+ /* else */
+ if (dev->start)
+ printk("ERROR '%s' busy and not MOD_IN_USE.\n", dev->name);
+
+ /*
+ * must jump over main_device+aliases
+ * avoid alias devices unregistration so that only
+ * net_alias module manages them
+ */
+#ifdef CONFIG_NET_ALIAS
+ if (dev_base == dev)
+ dev_base = net_alias_nextdev(dev);
+ else
+ {
+ while(d && (net_alias_nextdev(d) != dev)) /* skip aliases */
+ d = net_alias_nextdev(d);
+
+ if (d && (net_alias_nextdev(d) == dev))
+ {
+ /*
+ * Critical: Bypass by consider devices as blocks (maindev+aliases)
+ */
+ net_alias_nextdev_set(d, net_alias_nextdev(dev));
+ }
+#else
+ if (dev_base == dev)
+ dev_base = dev->next;
+ else
+ {
+ while (d && (d->next != dev))
+ d = d->next;
+
+ if (d && (d->next == dev))
+ {
+ d->next = dev->next;
+ }
+#endif
+ else
+ {
+ printk("unregister_netdev: '%s' not found\n", dev->name);
+ restore_flags(flags);
+ return;
+ }
+ }
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ {
+ if (ethdev_index[i] == dev)
+ {
+ ethdev_index[i] = NULL;
+ break;
+ }
+ }
+
+ restore_flags(flags);
+
+ /*
+ * You can i.e use a interfaces in a route though it is not up.
+ * We call close_dev (which is changed: it will down a device even if
+ * dev->flags==0 (but it will not call dev->stop if IFF_UP
+ * is not set).
+ * This will call notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev),
+ * dev_mc_discard(dev), ....
+ */
+
+ dev_close(dev);
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c net_init.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/ni52.c b/linux/src/drivers/net/ni52.c
new file mode 100644
index 0000000..6d486e9
--- /dev/null
+++ b/linux/src/drivers/net/ni52.c
@@ -0,0 +1,1387 @@
+/*
+ * net-3-driver for the NI5210 card (i82586 Ethernet chip)
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * Alphacode 0.80 (96/02/19) for Linux 1.3.66 (or later)
+ * Copyrights (c) 1994,1995,1996 by M.Hipp (Michael.Hipp@student.uni-tuebingen.de)
+ * [feel free to mail ....]
+ *
+ * when using as module: (no autoprobing!)
+ * compile with: gcc -D__KERNEL__ -DMODULE -O2 -c ni52.c
+ * run with e.g: insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000
+ *
+ * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!.
+ *
+ * If you find a bug, please report me:
+ * The kernel panic output and any kmsg from the ni52 driver
+ * the ni5210-driver-version and the linux-kernel version
+ * how many shared memory (memsize) on the netcard,
+ * bootprom: yes/no, base_addr, mem_start
+ * maybe the ni5210-card revision and the i82586 version
+ *
+ * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340
+ * mem_start: 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000,
+ * 0xd8000,0xcc000,0xce000,0xda000,0xdc000
+ *
+ * sources:
+ * skeleton.c from Donald Becker
+ *
+ * I have also done a look in the following sources: (mail me if you need them)
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's (fourth) i82586-driver for BSD
+ * (before getting an i82596 (yes 596 not 586) manual, the existing drivers helped
+ * me a lot to understand this tricky chip.)
+ *
+ * Known Problems:
+ * The internal sysbus seems to be slow. So we often lose packets because of
+ * overruns while receiving from a fast remote host.
+ * This can slow down TCP connections. Maybe the newer ni5210 cards are better.
+ * my experience is, that if a machine sends with more then about 500-600K/s
+ * the fifo/sysbus overflows.
+ *
+ * IMPORTANT NOTE:
+ * On fast networks, it's a (very) good idea to have 16K shared memory. With
+ * 8K, we can store only 4 receive frames, so it can (easily) happen that a remote
+ * machine 'overruns' our system.
+ *
+ * Known i82586/card problems (I'm sure, there are many more!):
+ * Running the NOP-mode, the i82586 sometimes seems to forget to report
+ * every xmit-interrupt until we restart the CU.
+ * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit
+ * in the RBD-Struct which indicates an end of the RBD queue.
+ * Instead, the RU fetches another (randomly selected and
+ * usually used) RBD and begins to fill it. (Maybe, this happens only if
+ * the last buffer from the previous RFD fits exact into the queue and
+ * the next RFD can't fetch an initial RBD. Anyone knows more? )
+ *
+ * results from ftp performance tests with Linux 1.2.5
+ * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s)
+ * sending in NOP-mode: peak performance up to 530K/s (but better don't run this mode)
+ */
+
+/*
+ * 19.Feb.96: more Mcast changes, module support (MH)
+ *
+ * 18.Nov.95: Mcast changes (AC).
+ *
+ * 23.April.95: fixed(?) receiving problems by configuring a RFD more
+ * than the number of RBD's. Can maybe cause other problems.
+ * 18.April.95: Added MODULE support (MH)
+ * 17.April.95: MC related changes in init586() and set_multicast_list().
+ * removed use of 'jiffies' in init586() (MH)
+ *
+ * 19.Sep.94: Added Multicast support (not tested yet) (MH)
+ *
+ * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling.
+ * Now, every RFD has exact one RBD. (MH)
+ *
+ * 14.Sep.94: added promiscuous mode, a few cleanups (MH)
+ *
+ * 19.Aug.94: changed request_irq() parameter (MH)
+ *
+ * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH)
+ *
+ * 19.July.94: lotsa cleanups .. (MH)
+ *
+ * 17.July.94: some patches ... verified to run with 1.1.29 (MH)
+ *
+ * 4.July.94: patches for Linux 1.1.24 (MH)
+ *
+ * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH)
+ *
+ * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, too (MH)
+ *
+ * < 30.Sep.93: first versions
+ */
+
+static int debuglevel = 0; /* debug-printk 0: off 1: a few 2: more */
+static int automatic_resume = 0; /* experimental .. better should be zero */
+static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
+static int fifo=0x8; /* don't change */
+
+/* #define REALLY_SLOW_IO */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "ni52.h"
+
+#define DEBUG /* debug on */
+#define SYSBUSVAL 1 /* 8 Bit */
+
+#define ni_attn586() {outb(0,dev->base_addr+NI52_ATTENTION);}
+#define ni_reset586() {outb(0,dev->base_addr+NI52_RESET);}
+#define ni_disint() {outb(0,dev->base_addr+NI52_INTDIS);}
+#define ni_enaint() {outb(0,dev->base_addr+NI52_INTENA);}
+
+#define make32(ptr16) (p->memtop + (short) (ptr16) )
+#define make24(ptr32) ((char *) (ptr32) - p->base)
+#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop ))
+
+/******************* how to calculate the buffers *****************************
+
+ * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
+ * --------------- in a different (more stable?) mode. Only in this mode it's
+ * possible to configure the driver with 'NO_NOPCOMMANDS'
+
+sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
+sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
+sizeof(rfd) = 24; sizeof(rbd) = 12;
+sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
+sizeof(nop_cmd) = 8;
+
+ * if you don't know the driver, better do not change these values: */
+
+#define RECV_BUFF_SIZE 1524 /* slightly oversized */
+#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
+#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
+#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
+#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
+#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
+
+/**************************************************************************/
+
+/* different DELAYs */
+#define DELAY(x) __delay((loops_per_sec>>5)*(x));
+#define DELAY_16(); { __delay( (loops_per_sec>>16)+1 ); }
+#define DELAY_18(); { __delay( (loops_per_sec>>18)+1 ); }
+
+/* wait for command with timeout: */
+#define WAIT_4_SCB_CMD() { int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_cuc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \
+ if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } }
+
+#define WAIT_4_SCB_CMD_RUC() { int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_ruc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \
+ if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } }
+
+#define WAIT_4_STAT_COMPL(addr) { int i; \
+ for(i=0;i<32767;i++) { \
+ if((addr)->cmd_status & STAT_COMPL) break; \
+ DELAY_16(); DELAY_16(); } }
+
+#define NI52_TOTAL_SIZE 16
+#define NI52_ADDR0 0x02
+#define NI52_ADDR1 0x07
+#define NI52_ADDR2 0x01
+
+static int ni52_probe1(struct device *dev,int ioaddr);
+static void ni52_interrupt(int irq,void *dev_id,struct pt_regs *reg_ptr);
+static int ni52_open(struct device *dev);
+static int ni52_close(struct device *dev);
+static int ni52_send_packet(struct sk_buff *,struct device *);
+static struct enet_statistics *ni52_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+#if 0
+static void ni52_dump(struct device *,void *);
+#endif
+
+/* helper-functions */
+static int init586(struct device *dev);
+static int check586(struct device *dev,char *where,unsigned size);
+static void alloc586(struct device *dev);
+static void startrecv586(struct device *dev);
+static void *alloc_rfa(struct device *dev,void *ptr);
+static void ni52_rcv_int(struct device *dev);
+static void ni52_xmt_int(struct device *dev);
+static void ni52_rnr_int(struct device *dev);
+
+struct priv
+{
+ struct enet_statistics stats;
+ unsigned long base;
+ char *memtop;
+ int lock,reseted;
+ volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first;
+ volatile struct scp_struct *scp; /* volatile is important */
+ volatile struct iscp_struct *iscp; /* volatile is important */
+ volatile struct scb_struct *scb; /* volatile is important */
+ volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
+ volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
+#if (NUM_XMIT_BUFFS == 1)
+ volatile struct nop_cmd_struct *nop_cmds[2];
+#else
+ volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
+#endif
+ volatile int nop_point,num_recv_buffs;
+ volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
+ volatile int xmit_count,xmit_last;
+};
+
+/**********************************************
+ * close device
+ */
+static int ni52_close(struct device *dev)
+{
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+
+ ni_reset586(); /* the hard way to stop the receiver */
+
+ dev->start = 0;
+ dev->tbusy = 0;
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/**********************************************
+ * open device
+ */
+static int ni52_open(struct device *dev)
+{
+ ni_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ ni_enaint();
+
+ if(request_irq(dev->irq, &ni52_interrupt,0,"ni5210",NULL))
+ {
+ ni_reset586();
+ return -EAGAIN;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0; /* most done by init */
+}
+
+/**********************************************
+ * Check to see if there's an 82586 out there.
+ */
+static int check586(struct device *dev,char *where,unsigned size)
+{
+ struct priv pb;
+ struct priv *p = /* (struct priv *) dev->priv*/ &pb;
+ char *iscp_addrs[2];
+ int i;
+
+ p->base = (unsigned long) where + size - 0x01000000;
+ p->memtop = where + size;
+ p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
+ memset((char *)p->scp,0, sizeof(struct scp_struct));
+ for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */
+ if(((char *)p->scp)[i])
+ return 0;
+ p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
+ if(p->scp->sysbus != SYSBUSVAL)
+ return 0;
+
+ iscp_addrs[0] = where;
+ iscp_addrs[1]= (char *) p->scp - sizeof(struct iscp_struct);
+
+ for(i=0;i<2;i++)
+ {
+ p->iscp = (struct iscp_struct *) iscp_addrs[i];
+ memset((char *)p->iscp,0, sizeof(struct iscp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->iscp->busy = 1;
+
+ ni_reset586();
+ ni_attn586();
+ DELAY(1); /* wait a while... */
+
+ if(p->iscp->busy) /* i82586 clears 'busy' after successful init */
+ return 0;
+ }
+ return 1;
+}
+
+/******************************************************************
+ * set iscp at the right place, called by ni52_probe1 and open586.
+ */
+void alloc586(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ ni_reset586();
+ DELAY(1);
+
+ p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
+ p->scb = (struct scb_struct *) (dev->mem_start);
+ p->iscp = (struct iscp_struct *) ((char *)p->scp - sizeof(struct iscp_struct));
+
+ memset((char *) p->iscp,0,sizeof(struct iscp_struct));
+ memset((char *) p->scp ,0,sizeof(struct scp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->scp->sysbus = SYSBUSVAL;
+ p->iscp->scb_offset = make16(p->scb);
+
+ p->iscp->busy = 1;
+ ni_reset586();
+ ni_attn586();
+
+ DELAY(1);
+
+ if(p->iscp->busy)
+ printk("%s: Init-Problems (alloc).\n",dev->name);
+
+ p->reseted = 0;
+
+ memset((char *)p->scb,0,sizeof(struct scb_struct));
+}
+
+/**********************************************
+ * probe the ni5210-card
+ */
+int ni52_probe(struct device *dev)
+{
+#ifndef MODULE
+ int *port;
+ static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0};
+#endif
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ if( (inb(base_addr+NI52_MAGIC1) == NI52_MAGICVAL1) &&
+ (inb(base_addr+NI52_MAGIC2) == NI52_MAGICVAL2))
+ return ni52_probe1(dev, base_addr);
+ else if (base_addr > 0) /* Don't probe at all. */
+ return ENXIO;
+
+#ifdef MODULE
+ printk("%s: no autoprobing allowed for modules.\n",dev->name);
+#else
+ for (port = ports; *port; port++) {
+ int ioaddr = *port;
+ if (check_region(ioaddr, NI52_TOTAL_SIZE))
+ continue;
+ if( !(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
+ !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2))
+ continue;
+
+ dev->base_addr = ioaddr;
+ if (ni52_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+#ifdef FULL_IO_PROBE
+ for(dev->base_addr=0x200;dev->base_addr<0x400;dev->base_addr+=8)
+ {
+ int ioaddr = dev->base_addr;
+ if (check_region(ioaddr, NI52_TOTAL_SIZE))
+ continue;
+ if( !(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
+ !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2))
+ continue;
+ if (ni52_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+#endif
+
+#endif
+
+ dev->base_addr = base_addr;
+ return ENODEV;
+}
+
+static int ni52_probe1(struct device *dev,int ioaddr)
+{
+ int i,size;
+
+ for(i=0;i<ETH_ALEN;i++)
+ dev->dev_addr[i] = inb(dev->base_addr+i);
+
+ if(dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1
+ || dev->dev_addr[2] != NI52_ADDR2)
+ return ENODEV;
+
+ printk("%s: NI5210 found at %#3lx, ",dev->name,dev->base_addr);
+
+ request_region(ioaddr,NI52_TOTAL_SIZE,"ni5210");
+
+ /*
+ * check (or search) IO-Memory, 8K and 16K
+ */
+#ifdef MODULE
+ size = dev->mem_end - dev->mem_start;
+ if(size != 0x2000 && size != 0x4000)
+ {
+ printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n",dev->name,size);
+ return ENODEV;
+ }
+ if(!check586(dev,(char *) dev->mem_start,size))
+ {
+ printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size);
+ return ENODEV;
+ }
+#else
+ if(dev->mem_start != 0) /* no auto-mem-probe */
+ {
+ size = 0x4000; /* check for 16K mem */
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ size = 0x2000; /* check for 8K mem */
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ printk("?memprobe, Can't find memory at 0x%lx!\n",dev->mem_start);
+ return ENODEV;
+ }
+ }
+ }
+ else
+ {
+ static long memaddrs[] = { 0xc8000,0xca000,0xcc000,0xce000,0xd0000,0xd2000,
+ 0xd4000,0xd6000,0xd8000,0xda000,0xdc000, 0 };
+ for(i=0;;i++)
+ {
+ if(!memaddrs[i]) {
+ printk("?memprobe, Can't find io-memory!\n");
+ return ENODEV;
+ }
+ dev->mem_start = memaddrs[i];
+ size = 0x2000; /* check for 8K mem */
+ if(check586(dev,(char *)dev->mem_start,size)) /* 8K-check */
+ break;
+ size = 0x4000; /* check for 16K mem */
+ if(check586(dev,(char *)dev->mem_start,size)) /* 16K-check */
+ break;
+ }
+ }
+ dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */
+#endif
+
+ dev->priv = (void *) kmalloc(sizeof(struct priv),GFP_KERNEL);
+ if(dev->priv == NULL)
+ {
+ printk("%s: Ooops .. can't allocate private driver memory.\n",dev->name);
+ return -ENOMEM;
+ }
+ /* warning: we don't free it on errors */
+ memset((char *) dev->priv,0,sizeof(struct priv));
+
+ ((struct priv *) (dev->priv))->memtop = (char *) dev->mem_start + size;
+ ((struct priv *) (dev->priv))->base = dev->mem_start + size - 0x01000000;
+ alloc586(dev);
+
+ /* set number of receive-buffs according to memsize */
+ if(size == 0x2000)
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8;
+ else
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16;
+
+ printk("Memaddr: 0x%lx, Memsize: %d, ",dev->mem_start,size);
+
+ if(dev->irq < 2)
+ {
+ autoirq_setup(0);
+ ni_reset586();
+ ni_attn586();
+ if(!(dev->irq = autoirq_report(2)))
+ {
+ printk("?autoirq, Failed to detect IRQ line!\n");
+ return 1;
+ }
+ printk("IRQ %d (autodetected).\n",dev->irq);
+ }
+ else {
+ if(dev->irq == 2)
+ dev->irq = 9;
+ printk("IRQ %d (assigned and not checked!).\n",dev->irq);
+ }
+
+ dev->open = &ni52_open;
+ dev->stop = &ni52_close;
+ dev->get_stats = &ni52_get_stats;
+ dev->hard_start_xmit = &ni52_send_packet;
+ dev->set_multicast_list = &set_multicast_list;
+
+ dev->if_port = 0;
+
+ ether_setup(dev);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 0;
+
+ return 0;
+}
+
+/**********************************************
+ * init the chip (ni52-interrupt should be disabled?!)
+ * needs a correct 'allocated' memory
+ */
+
+static int init586(struct device *dev)
+{
+ void *ptr;
+ int i,result=0;
+ struct priv *p = (struct priv *) dev->priv;
+ volatile struct configure_cmd_struct *cfg_cmd;
+ volatile struct iasetup_cmd_struct *ias_cmd;
+ volatile struct tdr_cmd_struct *tdr_cmd;
+ volatile struct mcsetup_cmd_struct *mc_cmd;
+ struct dev_mc_list *dmi=dev->mc_list;
+ int num_addrs=dev->mc_count;
+
+ ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
+
+ cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */
+ cfg_cmd->cmd_status = 0;
+ cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST;
+ cfg_cmd->cmd_link = 0xffff;
+
+ cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
+ cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */
+ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
+ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
+ cfg_cmd->priority = 0x00;
+ cfg_cmd->ifs = 0x60;
+ cfg_cmd->time_low = 0x00;
+ cfg_cmd->time_high = 0xf2;
+ cfg_cmd->promisc = 0;
+ if(dev->flags & IFF_ALLMULTI) {
+ int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
+ if(num_addrs > len) {
+ printk("%s: switching to promisc. mode\n",dev->name);
+ dev->flags|=IFF_PROMISC;
+ }
+ }
+ if(dev->flags&IFF_PROMISC)
+ {
+ cfg_cmd->promisc=1;
+ dev->flags|=IFF_PROMISC;
+ }
+ cfg_cmd->carr_coll = 0x00;
+
+ p->scb->cbl_offset = make16(cfg_cmd);
+ p->scb->cmd_ruc = 0;
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(cfg_cmd);
+
+ if((cfg_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK))
+ {
+ printk("%s: configure command failed: %x\n",dev->name,cfg_cmd->cmd_status);
+ return 1;
+ }
+
+ /*
+ * individual address setup
+ */
+ ias_cmd = (struct iasetup_cmd_struct *)ptr;
+
+ ias_cmd->cmd_status = 0;
+ ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST;
+ ias_cmd->cmd_link = 0xffff;
+
+ memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
+
+ p->scb->cbl_offset = make16(ias_cmd);
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(ias_cmd);
+
+ if((ias_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) {
+ printk("%s (ni52): individual address setup command failed: %04x\n",dev->name,ias_cmd->cmd_status);
+ return 1;
+ }
+
+ /*
+ * TDR, wire check .. e.g. no resistor e.t.c
+ */
+ tdr_cmd = (struct tdr_cmd_struct *)ptr;
+
+ tdr_cmd->cmd_status = 0;
+ tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST;
+ tdr_cmd->cmd_link = 0xffff;
+ tdr_cmd->status = 0;
+
+ p->scb->cbl_offset = make16(tdr_cmd);
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(tdr_cmd);
+
+ if(!(tdr_cmd->cmd_status & STAT_COMPL))
+ {
+ printk("%s: Problems while running the TDR.\n",dev->name);
+ }
+ else
+ {
+ DELAY_16(); /* wait for result */
+ result = tdr_cmd->status;
+
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ ni_attn586(); /* ack the interrupts */
+
+ if(result & TDR_LNK_OK)
+ ;
+ else if(result & TDR_XCVR_PRB)
+ printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name);
+ else if(result & TDR_ET_OPN)
+ printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ else if(result & TDR_ET_SRT)
+ {
+ if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
+ printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ }
+ else
+ printk("%s: TDR: Unknown status %04x\n",dev->name,result);
+ }
+
+ /*
+ * Multicast setup
+ */
+ if(num_addrs && !(dev->flags & IFF_PROMISC) )
+ {
+ mc_cmd = (struct mcsetup_cmd_struct *) ptr;
+ mc_cmd->cmd_status = 0;
+ mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
+ mc_cmd->cmd_link = 0xffff;
+ mc_cmd->mc_cnt = num_addrs * 6;
+
+ for(i=0;i<num_addrs;i++,dmi=dmi->next)
+ memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6);
+
+ p->scb->cbl_offset = make16(mc_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(mc_cmd);
+
+ if( (mc_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't apply multicast-address-list.\n",dev->name);
+ }
+
+ /*
+ * alloc nop/xmit-cmds
+ */
+#if (NUM_XMIT_BUFFS == 1)
+ for(i=0;i<2;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#else
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#endif
+
+ ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+
+ /*
+ * alloc xmit-buffs / init xmit_cmds
+ */
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+ p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
+ ptr = (char *) ptr + XMIT_BUFF_SIZE;
+ p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
+ ptr = (char *) ptr + sizeof(struct tbd_struct);
+ if((void *)ptr > (void *)p->iscp)
+ {
+ printk("%s: not enough shared-mem for your configuration!\n",dev->name);
+ return 1;
+ }
+ memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct));
+ memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct));
+ p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]);
+ p->xmit_cmds[i]->cmd_status = STAT_COMPL;
+ p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT;
+ p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
+ p->xmit_buffs[i]->next = 0xffff;
+ p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
+ }
+
+ p->xmit_count = 0;
+ p->xmit_last = 0;
+#ifndef NO_NOPCOMMANDS
+ p->nop_point = 0;
+#endif
+
+ /*
+ * 'start transmitter'
+ */
+#ifndef NO_NOPCOMMANDS
+ p->scb->cbl_offset = make16(p->nop_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+#else
+ p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]);
+ p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_SUSPEND | CMD_INT;
+#endif
+
+ /*
+ * ack. interrupts
+ */
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ ni_attn586();
+ DELAY_16();
+
+ ni_enaint();
+
+ return 0;
+}
+
+/******************************************************
+ * This is a helper routine for ni52_rnr_int() and init586().
+ * It sets up the Receive Frame Area (RFA).
+ */
+
+static void *alloc_rfa(struct device *dev,void *ptr)
+{
+ volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr;
+ volatile struct rbd_struct *rbd;
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd));
+ p->rfd_first = rfd;
+
+ for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) {
+ rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) );
+ rfd[i].rbd_offset = 0xffff;
+ }
+ rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */
+
+ ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) );
+
+ rbd = (struct rbd_struct *) ptr;
+ ptr = (void *) (rbd + p->num_recv_buffs);
+
+ /* clr descriptors */
+ memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs));
+
+ for(i=0;i<p->num_recv_buffs;i++)
+ {
+ rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs));
+ rbd[i].size = RECV_BUFF_SIZE;
+ rbd[i].buffer = make24(ptr);
+ ptr = (char *) ptr + RECV_BUFF_SIZE;
+ }
+
+ p->rfd_top = p->rfd_first;
+ p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->rfd_first->rbd_offset = make16(rbd);
+
+ return ptr;
+}
+
+
+/**************************************************
+ * Interrupt Handler ...
+ */
+
+static void ni52_interrupt(int irq,void *dev_id,struct pt_regs *reg_ptr)
+{
+ struct device *dev = (struct device *) irq2dev_map[irq];
+ unsigned short stat;
+ int cnt=0;
+ struct priv *p;
+
+ if (!dev) {
+ printk ("ni5210-interrupt: irq %d for unknown device.\n",irq);
+ return;
+ }
+ p = (struct priv *) dev->priv;
+
+ if(debuglevel > 1)
+ printk("I");
+
+ dev->interrupt = 1;
+
+ WAIT_4_SCB_CMD(); /* wait for last command */
+
+ while((stat=p->scb->cus & STAT_MASK))
+ {
+ p->scb->cmd_cuc = stat;
+ ni_attn586();
+
+ if(stat & STAT_FR) /* received a frame */
+ ni52_rcv_int(dev);
+
+ if(stat & STAT_RNR) /* RU went 'not ready' */
+ {
+ printk("(R)");
+ if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ ni_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+ else
+ {
+ printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus);
+ ni52_rnr_int(dev);
+ }
+ }
+
+ if(stat & STAT_CX) /* command with I-bit set complete */
+ ni52_xmt_int(dev);
+
+#ifndef NO_NOPCOMMANDS
+ if(stat & STAT_CNA) /* CU went 'not ready' */
+ {
+ if(dev->start)
+ printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus);
+ }
+#endif
+
+ if(debuglevel > 1)
+ printk("%d",cnt++);
+
+ WAIT_4_SCB_CMD(); /* wait for ack. (ni52_xmt_int can be faster than ack!!) */
+ if(p->scb->cmd_cuc) /* timed out? */
+ {
+ printk("%s: Acknowledge timed out.\n",dev->name);
+ ni_disint();
+ break;
+ }
+ }
+
+ if(debuglevel > 1)
+ printk("i");
+
+ dev->interrupt = 0;
+}
+
+/*******************************************************
+ * receive-interrupt
+ */
+
+static void ni52_rcv_int(struct device *dev)
+{
+ int status,cnt=0;
+ unsigned short totlen;
+ struct sk_buff *skb;
+ struct rbd_struct *rbd;
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(debuglevel > 0)
+ printk("R");
+
+ for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
+ {
+ rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
+
+ if(status & RFD_OK) /* frame received without error? */
+ {
+ if( (totlen = rbd->status) & RBD_LAST) /* the first and the last buffer? */
+ {
+ totlen &= RBD_MASK; /* length of this frame */
+ rbd->status = 0;
+ skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
+ if(skb != NULL)
+ {
+ skb->dev = dev;
+ skb_reserve(skb,2);
+ skb_put(skb,totlen);
+ eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ p->stats.rx_packets++;
+ }
+ else
+ p->stats.rx_dropped++;
+ }
+ else
+ {
+ int rstat;
+ /* free all RBD's until RBD_LAST is set */
+ totlen = 0;
+ while(!((rstat=rbd->status) & RBD_LAST))
+ {
+ totlen += rstat & RBD_MASK;
+ if(!rstat)
+ {
+ printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
+ break;
+ }
+ rbd->status = 0;
+ rbd = (struct rbd_struct *) make32(rbd->next);
+ }
+ totlen += rstat & RBD_MASK;
+ rbd->status = 0;
+ printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
+ p->stats.rx_dropped++;
+ }
+ }
+ else /* frame !(ok), only with 'save-bad-frames' */
+ {
+ printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
+ p->stats.rx_errors++;
+ }
+ p->rfd_top->stat_high = 0;
+ p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
+ p->rfd_top->rbd_offset = 0xffff;
+ p->rfd_last->last = 0; /* delete RFD_SUSP */
+ p->rfd_last = p->rfd_top;
+ p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
+ p->scb->rfa_offset = make16(p->rfd_top);
+
+ if(debuglevel > 0)
+ printk("%d",cnt++);
+ }
+
+ if(automatic_resume)
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ ni_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+
+#ifdef WAIT_4_BUSY
+ {
+ int i;
+ for(i=0;i<1024;i++)
+ {
+ if(p->rfd_top->status)
+ break;
+ DELAY_16();
+ if(i == 1023)
+ printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name);
+ }
+ }
+#endif
+
+#if 0
+ if(!at_least_one)
+ {
+ int i;
+ volatile struct rfd_struct *rfds=p->rfd_top;
+ volatile struct rbd_struct *rbds;
+ printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
+ for(i=0;i< (p->num_recv_buffs+4);i++)
+ {
+ rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
+ printk("%04x:%04x ",rfds->status,rbds->status);
+ rfds = (struct rfd_struct *) make32(rfds->next);
+ }
+ printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
+ printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
+ }
+ old_at_least = at_least_one;
+#endif
+
+ if(debuglevel > 0)
+ printk("r");
+}
+
+/**********************************************************
+ * handle 'Receiver went not ready'.
+ */
+
+static void ni52_rnr_int(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->stats.rx_errors++;
+
+ WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
+ p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
+ ni_attn586();
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */
+
+ alloc_rfa(dev,(char *)p->rfd_first);
+/* maybe add a check here, before restarting the RU */
+ startrecv586(dev); /* restart RU */
+
+ printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus);
+
+}
+
+/**********************************************************
+ * handle xmit - interrupt
+ */
+
+static void ni52_xmt_int(struct device *dev)
+{
+ int status;
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(debuglevel > 0)
+ printk("X");
+
+ status = p->xmit_cmds[p->xmit_last]->cmd_status;
+ if(!(status & STAT_COMPL))
+ printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name);
+
+ if(status & STAT_OK)
+ {
+ p->stats.tx_packets++;
+ p->stats.collisions += (status & TCMD_MAXCOLLMASK);
+ }
+ else
+ {
+ p->stats.tx_errors++;
+ if(status & TCMD_LATECOLL) {
+ printk("%s: late collision detected.\n",dev->name);
+ p->stats.collisions++;
+ }
+ else if(status & TCMD_NOCARRIER) {
+ p->stats.tx_carrier_errors++;
+ printk("%s: no carrier detected.\n",dev->name);
+ }
+ else if(status & TCMD_LOSTCTS)
+ printk("%s: loss of CTS detected.\n",dev->name);
+ else if(status & TCMD_UNDERRUN) {
+ p->stats.tx_fifo_errors++;
+ printk("%s: DMA underrun detected.\n",dev->name);
+ }
+ else if(status & TCMD_MAXCOLL) {
+ printk("%s: Max. collisions exceeded.\n",dev->name);
+ p->stats.collisions += 16;
+ }
+ }
+
+#if (NUM_XMIT_BUFFS > 1)
+ if( (++p->xmit_last) == NUM_XMIT_BUFFS)
+ p->xmit_last = 0;
+#endif
+
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+}
+
+/***********************************************************
+ * (re)start the receiver
+ */
+
+static void startrecv586(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->scb->cmd_ruc = RUC_START;
+ ni_attn586(); /* start cmd. */
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */
+}
+
+/******************************************************
+ * send frame
+ */
+
+static int ni52_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ int len,i;
+#ifndef NO_NOPCOMMANDS
+ int next_nop;
+#endif
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(dev->tbusy)
+ {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+
+#ifndef NO_NOPCOMMANDS
+ if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */
+ {
+ dev->tbusy = 0;
+#ifdef DEBUG
+ printk("%s: strange ... timeout with CU active?!?\n",dev->name);
+ printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)p->xmit_cmds[0]->cmd_status,(int)p->nop_cmds[0]->cmd_status,(int)p->nop_cmds[1]->cmd_status,(int)p->nop_point);
+#endif
+ p->scb->cmd_cuc = CUC_ABORT;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb, FREE_WRITE);
+ return 0;
+ }
+ else
+#endif
+ {
+#ifdef DEBUG
+ printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
+ printk("%s: command-stats: %04x %04x\n",dev->name,p->xmit_cmds[0]->cmd_status,p->xmit_cmds[1]->cmd_status);
+ printk("%s: check, whether you set the right interrupt number!\n",dev->name);
+#endif
+ ni52_close(dev);
+ ni52_open(dev);
+ }
+ dev->trans_start = jiffies;
+ return 0;
+ }
+
+ if(skb == NULL)
+ {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+ if(skb->len > XMIT_BUFF_SIZE)
+ {
+ printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %ld bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
+ return 0;
+ }
+
+ if (set_bit(0, (void*)&dev->tbusy)) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+#if(NUM_XMIT_BUFFS > 1)
+ else if(set_bit(0,(void *) &p->lock)) {
+ printk("%s: Queue was locked\n",dev->name);
+ return 1;
+ }
+#endif
+ else
+ {
+ memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len);
+ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+
+#if (NUM_XMIT_BUFFS == 1)
+# ifdef NO_NOPCOMMANDS
+
+#ifdef DEBUG
+ if(p->scb->cus & CU_ACTIVE)
+ {
+ printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name);
+ printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,p->xmit_cmds[0]->cmd_status);
+ }
+#endif
+
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+ for(i=0;i<16;i++)
+ {
+ p->xmit_cmds[0]->cmd_status = 0;
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_STATUS) == CU_SUSPEND)
+ p->scb->cmd_cuc = CUC_RESUME;
+ else
+ {
+ p->scb->cbl_offset = make16(p->xmit_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ }
+
+ ni_attn586();
+ dev->trans_start = jiffies;
+ if(!i)
+ dev_kfree_skb(skb,FREE_WRITE);
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
+ break;
+ if(p->xmit_cmds[0]->cmd_status)
+ break;
+ if(i==15)
+ printk("%s: Can't start transmit-command.\n",dev->name);
+ }
+# else
+ next_nop = (p->nop_point + 1) & 0x1;
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+
+ p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
+ dev->trans_start = jiffies;
+ p->nop_point = next_nop;
+ dev_kfree_skb(skb,FREE_WRITE);
+# endif
+#else
+ p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
+ if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS )
+ next_nop = 0;
+
+ p->xmit_cmds[p->xmit_count]->cmd_status = 0;
+ /* linkpointer of xmit-command already points to next nop cmd */
+ p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop]));
+ p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
+ dev->trans_start = jiffies;
+ p->xmit_count = next_nop;
+
+ {
+ long flags;
+ save_flags(flags);
+ cli();
+ if(p->xmit_count != p->xmit_last)
+ dev->tbusy = 0;
+ p->lock = 0;
+ restore_flags(flags);
+ }
+ dev_kfree_skb(skb,FREE_WRITE);
+#endif
+ }
+ return 0;
+}
+
+/*******************************************
+ * Someone wanna have the statistics
+ */
+
+static struct enet_statistics *ni52_get_stats(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ unsigned short crc,aln,rsc,ovrn;
+
+ crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */
+ p->scb->crc_errs = 0;
+ aln = p->scb->aln_errs;
+ p->scb->aln_errs = 0;
+ rsc = p->scb->rsc_errs;
+ p->scb->rsc_errs = 0;
+ ovrn = p->scb->ovrn_errs;
+ p->scb->ovrn_errs = 0;
+
+ p->stats.rx_crc_errors += crc;
+ p->stats.rx_fifo_errors += ovrn;
+ p->stats.rx_frame_errors += aln;
+ p->stats.rx_dropped += rsc;
+
+ return &p->stats;
+}
+
+/********************************************************
+ * Set MC list ..
+ */
+static void set_multicast_list(struct device *dev)
+{
+ if(!dev->start)
+ {
+ printk("%s: Can't apply promiscuous/multicastmode to a not running interface.\n",dev->name);
+ return;
+ }
+
+ dev->start = 0;
+
+ ni_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ ni_enaint();
+
+ dev->start = 1;
+}
+
+#ifdef MODULE
+static struct device dev_ni52 = {
+ " ", /* "ni5210": device name inserted by net_init.c */
+ 0, 0, 0, 0,
+ 0x300, 9, /* I/O address, IRQ */
+ 0, 0, 0, NULL, ni52_probe };
+
+/* set: io,irq,memstart,memend or set it when calling insmod */
+int irq=9;
+int io=0x300;
+long memstart=0; /* e.g 0xd0000 */
+long memend=0; /* e.g 0xd4000 */
+
+int init_module(void)
+{
+ if(io <= 0x0 || !memend || !memstart || irq < 2) {
+ printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
+ return -ENODEV;
+ }
+ dev_ni52.irq = irq;
+ dev_ni52.base_addr = io;
+ dev_ni52.mem_end = memend;
+ dev_ni52.mem_start = memstart;
+ if (register_netdev(&dev_ni52) != 0)
+ return -EIO;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ release_region(dev_ni52.base_addr, NI52_TOTAL_SIZE);
+ kfree(dev_ni52.priv);
+ dev_ni52.priv = NULL;
+ unregister_netdev(&dev_ni52);
+}
+#endif /* MODULE */
+
+#if 0
+/*
+ * DUMP .. we expect a not running CMD unit and enough space
+ */
+void ni52_dump(struct device *dev,void *ptr)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr;
+ int i;
+
+ p->scb->cmd_cuc = CUC_ABORT;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+
+ dump_cmd->cmd_status = 0;
+ dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST;
+ dump_cmd->dump_offset = make16((dump_cmd + 1));
+ dump_cmd->cmd_link = 0xffff;
+
+ p->scb->cbl_offset = make16(dump_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+ WAIT_4_STAT_COMPL(dump_cmd);
+
+ if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't get dump information.\n",dev->name);
+
+ for(i=0;i<170;i++) {
+ printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]);
+ if(i % 24 == 23)
+ printk("\n");
+ }
+ printk("\n");
+}
+#endif
+
+/*
+ * END: linux/drivers/net/ni52.c
+ */
+
+
diff --git a/linux/src/drivers/net/ni52.h b/linux/src/drivers/net/ni52.h
new file mode 100644
index 0000000..b3dfdd2
--- /dev/null
+++ b/linux/src/drivers/net/ni52.h
@@ -0,0 +1,310 @@
+/*
+ * Intel i82586 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * copyrights (c) 1994 by Michael Hipp (mhipp@student.uni-tuebingen.de)
+ *
+ * I have done a look in the following sources:
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's i82586-driver for BSD
+ */
+
+
+#define NI52_RESET 0 /* writing to this address, resets the i82586 */
+#define NI52_ATTENTION 1 /* channel attention, kick the 586 */
+#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */
+#define NI52_TDIS 2 /* Xmit disable */
+#define NI52_INTENA 5 /* Interrupt enable */
+#define NI52_INTDIS 4 /* Interrupt disable */
+#define NI52_MAGIC1 6 /* dunno exact function */
+#define NI52_MAGIC2 7 /* dunno exact function */
+
+#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */
+#define NI52_MAGICVAL2 0x55
+
+/*
+ * where to find the System Configuration Pointer (SCP)
+ */
+#define SCP_DEFAULT_ADDRESS 0xfffff4
+
+
+/*
+ * System Configuration Pointer Struct
+ */
+
+struct scp_struct
+{
+ unsigned short zero_dum0; /* has to be zero */
+ unsigned char sysbus; /* 0=16Bit,1=8Bit */
+ unsigned char zero_dum1; /* has to be zero for 586 */
+ unsigned short zero_dum2;
+ unsigned short zero_dum3;
+ char *iscp; /* pointer to the iscp-block */
+};
+
+
+/*
+ * Intermediate System Configuration Pointer (ISCP)
+ */
+struct iscp_struct
+{
+ unsigned char busy; /* 586 clears after successful init */
+ unsigned char zero_dummy; /* has to be zero */
+ unsigned short scb_offset; /* pointeroffset to the scb_base */
+ char *scb_base; /* base-address of all 16-bit offsets */
+};
+
+/*
+ * System Control Block (SCB)
+ */
+struct scb_struct
+{
+ unsigned char rus;
+ unsigned char cus;
+ unsigned char cmd_ruc; /* command word: RU part */
+ unsigned char cmd_cuc; /* command word: CU part & ACK */
+ unsigned short cbl_offset; /* pointeroffset, command block list */
+ unsigned short rfa_offset; /* pointeroffset, receive frame area */
+ unsigned short crc_errs; /* CRC-Error counter */
+ unsigned short aln_errs; /* allignmenterror counter */
+ unsigned short rsc_errs; /* Resourceerror counter */
+ unsigned short ovrn_errs; /* OVerrunerror counter */
+};
+
+/*
+ * possible command values for the command word
+ */
+#define RUC_MASK 0x0070 /* mask for RU commands */
+#define RUC_NOP 0x0000 /* NOP-command */
+#define RUC_START 0x0010 /* start RU */
+#define RUC_RESUME 0x0020 /* resume RU after suspend */
+#define RUC_SUSPEND 0x0030 /* suspend RU */
+#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
+
+#define CUC_MASK 0x07 /* mask for CU command */
+#define CUC_NOP 0x00 /* NOP-command */
+#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */
+#define CUC_RESUME 0x02 /* resume after suspend */
+#define CUC_SUSPEND 0x03 /* Suspend CU */
+#define CUC_ABORT 0x04 /* abort command operation immediately */
+
+#define ACK_MASK 0xf0 /* mask for ACK command */
+#define ACK_CX 0x80 /* acknowledges STAT_CX */
+#define ACK_FR 0x40 /* ack. STAT_FR */
+#define ACK_CNA 0x20 /* ack. STAT_CNA */
+#define ACK_RNR 0x10 /* ack. STAT_RNR */
+
+/*
+ * possible status values for the status word
+ */
+#define STAT_MASK 0xf0 /* mask for cause of interrupt */
+#define STAT_CX 0x80 /* CU finished cmd with its I bit set */
+#define STAT_FR 0x40 /* RU finished receiving a frame */
+#define STAT_CNA 0x20 /* CU left active state */
+#define STAT_RNR 0x10 /* RU left ready state */
+
+#define CU_STATUS 0x7 /* CU status, 0=idle */
+#define CU_SUSPEND 0x1 /* CU is suspended */
+#define CU_ACTIVE 0x2 /* CU is active */
+
+#define RU_STATUS 0x70 /* RU status, 0=idle */
+#define RU_SUSPEND 0x10 /* RU suspended */
+#define RU_NOSPACE 0x20 /* RU no resources */
+#define RU_READY 0x40 /* RU is ready */
+
+/*
+ * Receive Frame Descriptor (RFD)
+ */
+struct rfd_struct
+{
+ unsigned char stat_low; /* status word */
+ unsigned char stat_high; /* status word */
+ unsigned char rfd_sf; /* 82596 mode only */
+ unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
+ unsigned short next; /* linkoffset to next RFD */
+ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
+ unsigned char dest[6]; /* ethernet-address, destination */
+ unsigned char source[6]; /* ethernet-address, source */
+ unsigned short length; /* 802.3 frame-length */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RFD_LAST 0x80 /* last: last rfd in the list */
+#define RFD_SUSP 0x40 /* last: suspend RU after */
+#define RFD_COMPL 0x80
+#define RFD_OK 0x20
+#define RFD_BUSY 0x40
+#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */
+#define RFD_ERR_CRC 0x08 /* CRC error */
+#define RFD_ERR_ALGN 0x04 /* Alignment error */
+#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
+#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
+
+#define RFD_ERR_FTS 0x0080 /* Frame to short */
+#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
+#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
+#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
+#define RFD_COLLDET 0x0001 /* Detected collision during reception */
+
+/*
+ * Receive Buffer Descriptor (RBD)
+ */
+struct rbd_struct
+{
+ unsigned short status; /* status word,number of used bytes in buff */
+ unsigned short next; /* pointeroffset to next RBD */
+ char *buffer; /* receive buffer address pointer */
+ unsigned short size; /* size of this buffer */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RBD_LAST 0x8000 /* last buffer */
+#define RBD_USED 0x4000 /* this buffer has data */
+#define RBD_MASK 0x3fff /* size-mask for length */
+
+/*
+ * Statusvalues for Commands/RFD
+ */
+#define STAT_COMPL 0x8000 /* status: frame/command is complete */
+#define STAT_BUSY 0x4000 /* status: frame/command is busy */
+#define STAT_OK 0x2000 /* status: frame/command is ok */
+
+/*
+ * Action-Commands
+ */
+#define CMD_NOP 0x0000 /* NOP */
+#define CMD_IASETUP 0x0001 /* initial address setup command */
+#define CMD_CONFIGURE 0x0002 /* configure command */
+#define CMD_MCSETUP 0x0003 /* MC setup command */
+#define CMD_XMIT 0x0004 /* transmit command */
+#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
+#define CMD_DUMP 0x0006 /* dump command */
+#define CMD_DIAGNOSE 0x0007 /* diagnose command */
+
+/*
+ * Action command bits
+ */
+#define CMD_LAST 0x8000 /* indicates last command in the CBL */
+#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
+#define CMD_INT 0x2000 /* generate interrupt after execution */
+
+/*
+ * NOP - command
+ */
+struct nop_cmd_struct
+{
+ unsigned short cmd_status; /* status of this command */
+ unsigned short cmd_cmd; /* the command itself (+bits) */
+ unsigned short cmd_link; /* offsetpointer to next command */
+};
+
+/*
+ * IA Setup command
+ */
+struct iasetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char iaddr[6];
+};
+
+/*
+ * Configure command
+ */
+struct configure_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char byte_cnt; /* size of the config-cmd */
+ unsigned char fifo; /* fifo/recv monitor */
+ unsigned char sav_bf; /* save bad frames (bit7=1)*/
+ unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
+ unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
+ unsigned char ifs; /* inter frame spacing */
+ unsigned char time_low; /* slot time low */
+ unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
+ unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
+ unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
+ unsigned char fram_len; /* minimal frame len */
+ unsigned char dummy; /* dummy */
+};
+
+/*
+ * Multicast Setup command
+ */
+struct mcsetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short mc_cnt; /* number of bytes in the MC-List */
+ unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+};
+
+/*
+ * DUMP command
+ */
+struct dump_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short dump_offset; /* pointeroffset to DUMP space */
+};
+
+/*
+ * transmit command
+ */
+struct transmit_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short tbd_offset; /* pointeroffset to TBD */
+ unsigned char dest[6]; /* destination address of the frame */
+ unsigned short length; /* user defined: 802.3 length / Ether type */
+};
+
+#define TCMD_ERRMASK 0x0fa0
+#define TCMD_MAXCOLLMASK 0x000f
+#define TCMD_MAXCOLL 0x0020
+#define TCMD_HEARTBEAT 0x0040
+#define TCMD_DEFERRED 0x0080
+#define TCMD_UNDERRUN 0x0100
+#define TCMD_LOSTCTS 0x0200
+#define TCMD_NOCARRIER 0x0400
+#define TCMD_LATECOLL 0x0800
+
+struct tdr_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short status;
+};
+
+#define TDR_LNK_OK 0x8000 /* No link problem identified */
+#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
+#define TDR_ET_OPN 0x2000 /* open, no correct termination */
+#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
+#define TDR_TIMEMASK 0x07ff /* mask for the time field */
+
+/*
+ * Transmit Buffer Descriptor (TBD)
+ */
+struct tbd_struct
+{
+ unsigned short size; /* size + EOF-Flag(15) */
+ unsigned short next; /* pointeroffset to next TBD */
+ char *buffer; /* pointer to buffer */
+};
+
+#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+
+
+
+
diff --git a/linux/src/drivers/net/ni65.c b/linux/src/drivers/net/ni65.c
new file mode 100644
index 0000000..75e8914
--- /dev/null
+++ b/linux/src/drivers/net/ni65.c
@@ -0,0 +1,1228 @@
+/*
+ * ni6510 (am7990 'lance' chip) driver for Linux-net-3
+ * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
+ * copyrights (c) 1994,1995,1996 by M.Hipp
+ *
+ * This driver can handle the old ni6510 board and the newer ni6510
+ * EtherBlaster. (probably it also works with every full NE2100
+ * compatible card)
+ *
+ * To compile as module, type:
+ * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ -DMODULE -c ni65.c
+ * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers the Linux-kernel.
+ *
+ * comments/bugs/suggestions can be sent to:
+ * Michael Hipp
+ * email: Michael.Hipp@student.uni-tuebingen.de
+ *
+ * sources:
+ * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
+ * and from the original drivers by D.Becker
+ *
+ * known problems:
+ * - on some PCI boards (including my own) the card/board/ISA-bridge has
+ * problems with bus master DMA. This results in lotsa overruns.
+ * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
+ * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
+ * Or just play with your BIOS options to optimize ISA-DMA access.
+ * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
+ * defines -> please report me your experience then
+ * - Harald reported for ASUS SP3G mainboards, that you should use
+ * the 'optimal settings' from the user's manual on page 3-12!
+ *
+ * credits:
+ * thanx to Jason Sullivan for sending me a ni6510 card!
+ * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
+ *
+ * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
+ * average: FTP -> 8384421 bytes received in 8.5 seconds
+ * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
+ * peak: FTP -> 8384421 bytes received in 7.5 seconds
+ * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
+ */
+
+/*
+ * 96.Sept.29: virt_to_bus stuff added for new memory modell
+ * 96.April.29: Added Harald Koenig's Patches (MH)
+ * 96.April.13: enhanced error handling .. more tests (MH)
+ * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
+ * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
+ * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
+ * hopefully no more 16MB limit
+ *
+ * 95.Nov.18: multicast tweaked (AC).
+ *
+ * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
+ *
+ * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+#include "ni65.h"
+
+/*
+ * the current setting allows an acceptable performance
+ * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
+ * the header of this file
+ * 'invert' the defines for max. performance. This may cause DMA problems
+ * on some boards (e.g on my ASUS SP3G)
+ */
+#undef XMT_VIA_SKB
+#undef RCV_VIA_SKB
+#define RCV_PARANOIA_CHECK
+
+#define MID_PERFORMANCE
+
+#if defined( LOW_PERFORMANCE )
+ static int isa0=7,isa1=7,csr80=0x0c10;
+#elif defined( MID_PERFORMANCE )
+ static int isa0=5,isa1=5,csr80=0x2810;
+#else /* high performance */
+ static int isa0=4,isa1=4,csr80=0x0017;
+#endif
+
+/*
+ * a few card/vendor specific defines
+ */
+#define NI65_ID0 0x00
+#define NI65_ID1 0x55
+#define NI65_EB_ID0 0x52
+#define NI65_EB_ID1 0x44
+#define NE2100_ID0 0x57
+#define NE2100_ID1 0x57
+
+#define PORT p->cmdr_addr
+
+/*
+ * buffer configuration
+ */
+#if 1
+#define RMDNUM 16
+#define RMDNUMMASK 0x80000000
+#else
+#define RMDNUM 8
+#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
+#endif
+
+#if 0
+#define TMDNUM 1
+#define TMDNUMMASK 0x00000000
+#else
+#define TMDNUM 4
+#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
+#endif
+
+/* slightly oversized */
+#define R_BUF_SIZE 1544
+#define T_BUF_SIZE 1544
+
+/*
+ * lance register defines
+ */
+#define L_DATAREG 0x00
+#define L_ADDRREG 0x02
+#define L_RESET 0x04
+#define L_CONFIG 0x05
+#define L_BUSIF 0x06
+
+/*
+ * to access the lance/am7990-regs, you have to write
+ * reg-number into L_ADDRREG, then you can access it using L_DATAREG
+ */
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+#define INIT_RING_BEFORE_START 0x1
+#define FULL_RESET_ON_ERROR 0x2
+
+#if 0
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
+ outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
+ inw(PORT+L_DATAREG))
+#if 0
+#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#else
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+#else
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+
+static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
+
+static struct card {
+ unsigned char id0,id1;
+ short id_offset;
+ short total_size;
+ short cmd_offset;
+ short addr_offset;
+ unsigned char *vendor_id;
+ char *cardname;
+ unsigned char config;
+} cards[] = {
+ { NI65_ID0,NI65_ID1,0x0e,0x10,0x0,0x8,ni_vendor,"ni6510", 0x1 } ,
+ { NI65_EB_ID0,NI65_EB_ID1,0x0e,0x18,0x10,0x0,ni_vendor,"ni6510 EtherBlaster", 0x2 } ,
+ { NE2100_ID0,NE2100_ID1,0x0e,0x18,0x10,0x0,NULL,"generic NE2100", 0x0 }
+};
+#define NUM_CARDS 3
+
+struct priv
+{
+ struct rmd rmdhead[RMDNUM];
+ struct tmd tmdhead[TMDNUM];
+ struct init_block ib;
+ int rmdnum;
+ int tmdnum,tmdlast;
+#ifdef RCV_VIA_SKB
+ struct sk_buff *recv_skb[RMDNUM];
+#else
+ void *recvbounce[RMDNUM];
+#endif
+#ifdef XMT_VIA_SKB
+ struct sk_buff *tmd_skb[TMDNUM];
+#endif
+ void *tmdbounce[TMDNUM];
+ int tmdbouncenum;
+ int lock,xmit_queued;
+ struct enet_statistics stats;
+ void *self;
+ int cmdr_addr;
+ int cardno;
+ int features;
+};
+
+static int ni65_probe1(struct device *dev,int);
+static void ni65_interrupt(int irq, void * dev_id, struct pt_regs *regs);
+static void ni65_recv_intr(struct device *dev,int);
+static void ni65_xmit_intr(struct device *dev,int);
+static int ni65_open(struct device *dev);
+static int ni65_lance_reinit(struct device *dev);
+static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
+static int ni65_send_packet(struct sk_buff *skb, struct device *dev);
+static int ni65_close(struct device *dev);
+static int ni65_alloc_buffer(struct device *dev);
+static void ni65_free_buffer(struct priv *p);
+static struct enet_statistics *ni65_get_stats(struct device *);
+static void set_multicast_list(struct device *dev);
+
+static int irqtab[] = { 9,12,15,5 }; /* irq config-translate */
+static int dmatab[] = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
+
+static int debuglevel = 1;
+
+/*
+ * set 'performance' registers .. we must STOP lance for that
+ */
+static void ni65_set_performance(struct priv *p)
+{
+ writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
+
+ if( !(cards[p->cardno].config & 0x02) )
+ return;
+
+ outw(80,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) != 80)
+ return;
+
+ writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
+ outw(0,PORT+L_ADDRREG);
+ outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
+ outw(1,PORT+L_ADDRREG);
+ outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
+
+ outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
+}
+
+/*
+ * open interface (up)
+ */
+static int ni65_open(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ int irqval = request_irq(dev->irq, &ni65_interrupt,0,
+ cards[p->cardno].cardname,NULL);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n",
+ dev->name,dev->irq, irqval);
+ return -EAGAIN;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ if(ni65_lance_reinit(dev))
+ {
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ MOD_INC_USE_COUNT;
+ return 0;
+ }
+ else
+ {
+ irq2dev_map[dev->irq] = NULL;
+ free_irq(dev->irq,NULL);
+ dev->start = 0;
+ return -EAGAIN;
+ }
+}
+
+/*
+ * close interface (down)
+ */
+static int ni65_close(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
+
+#ifdef XMT_VIA_SKB
+ {
+ int i;
+ for(i=0;i<TMDNUM;i++)
+ {
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i],FREE_WRITE);
+ p->tmd_skb[i] = NULL;
+ }
+ }
+ }
+#endif
+ irq2dev_map[dev->irq] = NULL;
+ free_irq(dev->irq,NULL);
+ dev->tbusy = 1;
+ dev->start = 0;
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Probe The Card (not the lance-chip)
+ */
+#ifdef MODULE
+static
+#endif
+int ni65_probe(struct device *dev)
+{
+ int *port;
+ static int ports[] = {0x360,0x300,0x320,0x340, 0};
+
+ if (dev->base_addr > 0x1ff) /* Check a single specified location. */
+ return ni65_probe1(dev, dev->base_addr);
+ else if (dev->base_addr > 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (port = ports; *port; port++)
+ {
+ if (ni65_probe1(dev, *port) == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+/*
+ * this is the real card probe ..
+ */
+static int ni65_probe1(struct device *dev,int ioaddr)
+{
+ int i,j;
+ struct priv *p;
+
+ for(i=0;i<NUM_CARDS;i++) {
+ if(check_region(ioaddr, cards[i].total_size))
+ continue;
+ if(cards[i].id_offset >= 0) {
+ if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
+ inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
+ continue;
+ }
+ }
+ if(cards[i].vendor_id) {
+ for(j=0;j<3;j++)
+ if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j])
+ continue;
+ }
+ break;
+ }
+ if(i == NUM_CARDS)
+ return -ENODEV;
+
+ for(j=0;j<6;j++)
+ dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+
+ if( (j=ni65_alloc_buffer(dev)) < 0)
+ return j;
+ p = (struct priv *) dev->priv;
+ p->cmdr_addr = ioaddr + cards[i].cmd_offset;
+ p->cardno = i;
+
+ printk("%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (j=readreg(CSR0)) != 0x4) {
+ printk(KERN_ERR "can't RESET card: %04x\n",j);
+ ni65_free_buffer(p);
+ return -EAGAIN;
+ }
+
+ outw(88,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) == 88) {
+ unsigned long v;
+ v = inw(PORT+L_DATAREG);
+ v <<= 16;
+ outw(89,PORT+L_ADDRREG);
+ v |= inw(PORT+L_DATAREG);
+ printk("Version %#08lx, ",v);
+ p->features = INIT_RING_BEFORE_START;
+ }
+ else {
+ printk("ancient LANCE, ");
+ p->features = 0x0;
+ }
+
+ if(test_bit(0,&cards[i].config)) {
+ dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
+ dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
+ printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
+ }
+ else {
+ if(dev->dma == 0) {
+ /* 'stuck test' from lance.c */
+ int dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) | (inb(DMA2_STAT_REG) & 0xf0);
+ for(i=1;i<5;i++) {
+ int dma = dmatab[i];
+ if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
+ continue;
+ disable_dma(dma);
+ set_dma_mode(dma,DMA_MODE_CASCADE);
+ enable_dma(dma);
+ ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
+ disable_dma(dma);
+ free_dma(dma);
+ if(readreg(CSR0) & CSR0_IDON)
+ break;
+ }
+ if(i == 5) {
+ printk("Can't detect DMA channel!\n");
+ ni65_free_buffer(p);
+ return -EAGAIN;
+ }
+ dev->dma = dmatab[i];
+ printk("DMA %d (autodetected), ",dev->dma);
+ }
+ else
+ printk("DMA %d (assigned), ",dev->dma);
+
+ if(dev->irq < 2)
+ {
+ ni65_init_lance(p,dev->dev_addr,0,0);
+ autoirq_setup(0);
+ writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
+
+ if(!(dev->irq = autoirq_report(2)))
+ {
+ printk("Failed to detect IRQ line!\n");
+ ni65_free_buffer(p);
+ return -EAGAIN;
+ }
+ printk("IRQ %d (autodetected).\n",dev->irq);
+ }
+ else
+ printk("IRQ %d (assigned).\n",dev->irq);
+ }
+
+ if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
+ {
+ printk("%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
+ ni65_free_buffer(p);
+ return -EAGAIN;
+ }
+
+ /*
+ * Grab the region so we can find another board.
+ */
+ request_region(ioaddr,cards[p->cardno].total_size,cards[p->cardno].cardname);
+
+ dev->base_addr = ioaddr;
+
+ dev->open = ni65_open;
+ dev->stop = ni65_close;
+ dev->hard_start_xmit = ni65_send_packet;
+ dev->get_stats = ni65_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+
+ ether_setup(dev);
+
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 0;
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * set lance register and trigger init
+ */
+static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
+{
+ int i;
+ u32 pib;
+
+ writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
+
+ for(i=0;i<6;i++)
+ p->ib.eaddr[i] = daddr[i];
+
+ for(i=0;i<8;i++)
+ p->ib.filter[i] = filter;
+ p->ib.mode = mode;
+
+ p->ib.trp = (u32) virt_to_bus(p->tmdhead) | TMDNUMMASK;
+ p->ib.rrp = (u32) virt_to_bus(p->rmdhead) | RMDNUMMASK;
+ writereg(0,CSR3); /* busmaster/no word-swap */
+ pib = (u32) virt_to_bus(&p->ib);
+ writereg(pib & 0xffff,CSR1);
+ writereg(pib >> 16,CSR2);
+
+ writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
+
+ for(i=0;i<32;i++)
+ {
+ udelay(4000);
+ if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
+ break; /* init ok ? */
+ }
+}
+
+/*
+ * allocate memory area and check the 16MB border
+ */
+static void *ni65_alloc_mem(struct device *dev,char *what,int size,int type)
+{
+ struct sk_buff *skb=NULL;
+ unsigned char *ptr;
+ void *ret;
+
+ if(type) {
+ ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
+ if(!skb) {
+ printk("%s: unable to allocate %s memory.\n",dev->name,what);
+ return NULL;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2+16);
+ skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
+ ptr = skb->data;
+ }
+ else {
+ ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
+ if(!ret) {
+ printk("%s: unable to allocate %s memory.\n",dev->name,what);
+ return NULL;
+ }
+ }
+ if( (u32) virt_to_bus(ptr+size) > 0x1000000) {
+ printk("%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
+ if(type)
+ kfree_skb(skb,FREE_WRITE);
+ else
+ kfree(ptr);
+ return NULL;
+ }
+ return ret;
+}
+
+/*
+ * allocate all memory structures .. send/recv buffers etc ...
+ */
+static int ni65_alloc_buffer(struct device *dev)
+{
+ unsigned char *ptr;
+ struct priv *p;
+ int i;
+
+ /*
+ * we need 8-aligned memory ..
+ */
+ ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
+ if(!ptr)
+ return -ENOMEM;
+
+ p = dev->priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
+ memset((char *) dev->priv,0,sizeof(struct priv));
+ p->self = ptr;
+
+ for(i=0;i<TMDNUM;i++)
+ {
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = NULL;
+#endif
+ p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
+ if(!p->tmdbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
+ if(!p->recv_skb[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#else
+ p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
+ if(!p->recvbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#endif
+ }
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * free buffers and private struct
+ */
+static void ni65_free_buffer(struct priv *p)
+{
+ int i;
+
+ if(!p)
+ return;
+
+ for(i=0;i<TMDNUM;i++) {
+ if(p->tmdbounce[i])
+ kfree(p->tmdbounce[i]);
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i])
+ dev_kfree_skb(p->tmd_skb[i],FREE_WRITE);
+#endif
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ if(p->recv_skb[i])
+ dev_kfree_skb(p->recv_skb[i],FREE_WRITE);
+#else
+ if(p->recvbounce[i])
+ kfree(p->recvbounce[i]);
+#endif
+ }
+ if(p->self)
+ kfree(p->self);
+}
+
+
+/*
+ * stop and (re)start lance .. e.g after an error
+ */
+static void ni65_stop_start(struct device *dev,struct priv *p)
+{
+ int csr0 = CSR0_INEA;
+
+ writedatareg(CSR0_STOP);
+
+ if(debuglevel > 1)
+ printk("ni65_stop_start\n");
+
+ if(p->features & INIT_RING_BEFORE_START) {
+ int i;
+#ifdef XMT_VIA_SKB
+ struct sk_buff *skb_save[TMDNUM];
+#endif
+ unsigned long buffer[TMDNUM];
+ short blen[TMDNUM];
+
+ if(p->xmit_queued) {
+ while(1) {
+ if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
+ break;
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ break;
+ }
+ }
+
+ for(i=0;i<TMDNUM;i++) {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ skb_save[i] = p->tmd_skb[i];
+#endif
+ buffer[i] = (u32) bus_to_virt(tmdp->u.buffer);
+ blen[i] = tmdp->blen;
+ tmdp->u.s.status = 0x0;
+ }
+
+ for(i=0;i<RMDNUM;i++) {
+ struct rmd *rmdp = p->rmdhead + i;
+ rmdp->u.s.status = RCV_OWN;
+ }
+ p->tmdnum = p->xmit_queued = 0;
+ writedatareg(CSR0_STRT | csr0);
+
+ for(i=0;i<TMDNUM;i++) {
+ int num = (i + p->tmdlast) & (TMDNUM-1);
+ p->tmdhead[i].u.buffer = (u32) virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
+ p->tmdhead[i].blen = blen[num];
+ if(p->tmdhead[i].u.s.status & XMIT_OWN) {
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+ p->xmit_queued = 1;
+ writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
+ }
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = skb_save[num];
+#endif
+ }
+ p->rmdnum = p->tmdlast = 0;
+ if(!p->lock)
+ dev->tbusy = (p->tmdnum || !p->xmit_queued) ? 0 : 1;
+ dev->trans_start = jiffies;
+ }
+ else
+ writedatareg(CSR0_STRT | csr0);
+}
+
+/*
+ * init lance (write init-values .. init-buffers) (open-helper)
+ */
+static int ni65_lance_reinit(struct device *dev)
+{
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->lock = 0;
+ p->xmit_queued = 0;
+
+ disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
+ set_dma_mode(dev->dma,DMA_MODE_CASCADE);
+ enable_dma(dev->dma);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (i=readreg(CSR0) ) != 0x4)
+ {
+ printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
+ cards[p->cardno].cardname,(int) i);
+ disable_dma(dev->dma);
+ return 0;
+ }
+
+ p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
+ for(i=0;i<TMDNUM;i++)
+ {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i],FREE_WRITE);
+ p->tmd_skb[i] = NULL;
+ }
+#endif
+ tmdp->u.buffer = 0x0;
+ tmdp->u.s.status = XMIT_START | XMIT_END;
+ tmdp->blen = tmdp->status2 = 0;
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+ struct rmd *rmdp = p->rmdhead + i;
+#ifdef RCV_VIA_SKB
+ rmdp->u.buffer = (u32) virt_to_bus(p->recv_skb[i]->data);
+#else
+ rmdp->u.buffer = (u32) virt_to_bus(p->recvbounce[i]);
+#endif
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN;
+ }
+
+ if(dev->flags & IFF_PROMISC)
+ ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
+ else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
+ ni65_init_lance(p,dev->dev_addr,0xff,0x0);
+ else
+ ni65_init_lance(p,dev->dev_addr,0x00,0x00);
+
+ /*
+ * ni65_set_lance_mem() sets L_ADDRREG to CSR0
+ * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
+ */
+
+ if(inw(PORT+L_DATAREG) & CSR0_IDON) {
+ ni65_set_performance(p);
+ /* init OK: start lance , enable interrupts */
+ writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
+ return 1; /* ->OK */
+ }
+ printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
+ disable_dma(dev->dma);
+ return 0; /* ->Error */
+}
+
+/*
+ * interrupt handler
+ */
+static void ni65_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+{
+ int csr0;
+ struct device *dev = (struct device *) irq2dev_map[irq];
+ struct priv *p;
+ int bcnt = 32;
+
+ if (dev == NULL) {
+ printk (KERN_ERR "ni65_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if(set_bit(0,(int *) &dev->interrupt)) {
+ printk("ni65: oops .. interrupt while proceeding interrupt\n");
+ return;
+ }
+ p = (struct priv *) dev->priv;
+
+ while(--bcnt) {
+ csr0 = inw(PORT+L_DATAREG);
+
+#if 0
+ writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
+#else
+ writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
+#endif
+
+ if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
+ break;
+
+ if(csr0 & CSR0_RINT) /* RECV-int? */
+ ni65_recv_intr(dev,csr0);
+ if(csr0 & CSR0_TINT) /* XMIT-int? */
+ ni65_xmit_intr(dev,csr0);
+
+ if(csr0 & CSR0_ERR)
+ {
+ struct priv *p = (struct priv *) dev->priv;
+ if(debuglevel > 1)
+ printk("%s: general error: %04x.\n",dev->name,csr0);
+ if(csr0 & CSR0_BABL)
+ p->stats.tx_errors++;
+ if(csr0 & CSR0_MISS) {
+ int i;
+ for(i=0;i<RMDNUM;i++)
+ printk("%02x ",p->rmdhead[i].u.s.status);
+ printk("\n");
+ p->stats.rx_errors++;
+ }
+ if(csr0 & CSR0_MERR) {
+ if(debuglevel > 1)
+ printk("%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
+ ni65_stop_start(dev,p);
+ }
+ }
+ }
+
+#ifdef RCV_PARANOIA_CHECK
+{
+ int j;
+ for(j=0;j<RMDNUM;j++)
+ {
+ struct priv *p = (struct priv *) dev->priv;
+ int i,k,num1,num2;
+ for(i=RMDNUM-1;i>0;i--) {
+ num2 = (p->rmdnum + i) & (RMDNUM-1);
+ if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
+ break;
+ }
+
+ if(i) {
+ for(k=0;k<RMDNUM;k++) {
+ num1 = (p->rmdnum + k) & (RMDNUM-1);
+ if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
+ break;
+ }
+ if(!k)
+ break;
+
+ if(debuglevel > 0)
+ {
+ char buf[256],*buf1;
+ int k;
+ buf1 = buf;
+ for(k=0;k<RMDNUM;k++) {
+ sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
+ buf1 += 3;
+ }
+ *buf1 = 0;
+ printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
+ }
+
+ p->rmdnum = num1;
+ ni65_recv_intr(dev,csr0);
+ if((p->rmdhead[num2].u.s.status & RCV_OWN))
+ break; /* ok, we are 'in sync' again */
+ }
+ else
+ break;
+ }
+}
+#endif
+
+ if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
+ printk("%s: RX or TX was offline -> restart\n",dev->name);
+ ni65_stop_start(dev,p);
+ }
+ else
+ writedatareg(CSR0_INEA);
+
+ dev->interrupt = 0;
+
+ return;
+}
+
+/*
+ * We have received an Xmit-Interrupt ..
+ * send a new packet if necessary
+ */
+static void ni65_xmit_intr(struct device *dev,int csr0)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ while(p->xmit_queued)
+ {
+ struct tmd *tmdp = p->tmdhead + p->tmdlast;
+ int tmdstat = tmdp->u.s.status;
+
+ if(tmdstat & XMIT_OWN)
+ break;
+
+ if(tmdstat & XMIT_ERR)
+ {
+#if 0
+ if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
+ printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
+#endif
+ /* checking some errors */
+ if(tmdp->status2 & XMIT_RTRY)
+ p->stats.tx_aborted_errors++;
+ if(tmdp->status2 & XMIT_LCAR)
+ p->stats.tx_carrier_errors++;
+ if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
+ /* this stops the xmitter */
+ p->stats.tx_fifo_errors++;
+ if(debuglevel > 0)
+ printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
+ if(p->features & INIT_RING_BEFORE_START) {
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
+ ni65_stop_start(dev,p);
+ break; /* no more Xmit processing .. */
+ }
+ else
+ ni65_stop_start(dev,p);
+ }
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
+ if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
+ p->stats.tx_errors++;
+ tmdp->status2 = 0;
+ }
+ else
+ p->stats.tx_packets++;
+
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[p->tmdlast]) {
+ dev_kfree_skb(p->tmd_skb[p->tmdlast],FREE_WRITE);
+ p->tmd_skb[p->tmdlast] = NULL;
+ }
+#endif
+
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ p->xmit_queued = 0;
+ }
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+}
+
+/*
+ * We have received a packet
+ */
+static void ni65_recv_intr(struct device *dev,int csr0)
+{
+ struct rmd *rmdp;
+ int rmdstat,len;
+ int cnt=0;
+ struct priv *p = (struct priv *) dev->priv;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
+ {
+ cnt++;
+ if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
+ {
+ if(!(rmdstat & RCV_ERR)) {
+ if(rmdstat & RCV_START)
+ {
+ p->stats.rx_length_errors++;
+ printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
+ }
+ }
+ else {
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
+ dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
+ if(rmdstat & RCV_FRAM)
+ p->stats.rx_frame_errors++;
+ if(rmdstat & RCV_OFLO)
+ p->stats.rx_over_errors++;
+ if(rmdstat & RCV_CRC)
+ p->stats.rx_crc_errors++;
+ if(rmdstat & RCV_BUF_ERR)
+ p->stats.rx_fifo_errors++;
+ }
+ if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
+ p->stats.rx_errors++;
+ }
+ else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
+ {
+#ifdef RCV_VIA_SKB
+ struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
+ if (skb)
+ skb_reserve(skb,16);
+#else
+ struct sk_buff *skb = dev_alloc_skb(len+2);
+#endif
+ if(skb)
+ {
+ skb_reserve(skb,2);
+ skb->dev = dev;
+#ifdef RCV_VIA_SKB
+ if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
+ skb_put(skb,len);
+ eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);
+ }
+ else {
+ struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
+ skb_put(skb,R_BUF_SIZE);
+ p->recv_skb[p->rmdnum] = skb;
+ rmdp->u.buffer = (u32) virt_to_bus(skb->data);
+ skb = skb1;
+ skb_trim(skb,len);
+ }
+#else
+ skb_put(skb,len);
+ eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
+#endif
+ p->stats.rx_packets++;
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ }
+ else
+ {
+ printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
+ p->stats.rx_dropped++;
+ }
+ }
+ else {
+ printk(KERN_INFO "%s: received runt packet\n",dev->name);
+ p->stats.rx_errors++;
+ }
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN; /* change owner */
+ p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+}
+
+/*
+ * kick xmitter ..
+ */
+static int ni65_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(dev->tbusy)
+ {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 50)
+ return 1;
+
+ printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
+{
+ int i;
+ for(i=0;i<TMDNUM;i++)
+ printk("%02x ",p->tmdhead[i].u.s.status);
+ printk("\n");
+}
+ ni65_lance_reinit(dev);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ if(skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk(KERN_ERR "%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+ if (set_bit(0, (void*)&p->lock)) {
+ printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
+ return 1;
+ }
+
+ {
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ struct tmd *tmdp;
+ long flags;
+
+#ifdef XMT_VIA_SKB
+ if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
+#endif
+
+ memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data,
+ (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len);
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ save_flags(flags);
+ cli();
+
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
+ p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
+
+#ifdef XMT_VIA_SKB
+ }
+ else {
+ save_flags(flags);
+ cli();
+
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) virt_to_bus(skb->data);
+ p->tmd_skb[p->tmdnum] = skb;
+ }
+#endif
+ tmdp->blen = -len;
+
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
+ writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
+
+ p->xmit_queued = 1;
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+
+ dev->tbusy = (p->tmdnum == p->tmdlast) ? 1 : 0;
+ p->lock = 0;
+ dev->trans_start = jiffies;
+
+ restore_flags(flags);
+ }
+
+ return 0;
+}
+
+static struct enet_statistics *ni65_get_stats(struct device *dev)
+{
+
+#if 0
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+ for(i=0;i<RMDNUM;i++) {
+ struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
+ printk("%02x ",rmdp->u.s.status);
+ }
+ printk("\n");
+#endif
+
+ return &((struct priv *) dev->priv)->stats;
+}
+
+static void set_multicast_list(struct device *dev)
+{
+ if(!ni65_lance_reinit(dev))
+ printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
+ dev->tbusy = 0;
+}
+
+#ifdef MODULE
+static struct device dev_ni65 = {
+ " ", /* "ni6510": device name inserted by net_init.c */
+ 0, 0, 0, 0,
+ 0x360, 9, /* I/O address, IRQ */
+ 0, 0, 0, NULL, ni65_probe };
+
+/* set: io,irq,dma or set it when calling insmod */
+static int irq=0;
+static int io=0;
+static int dma=0;
+
+int init_module(void)
+{
+#if 0
+ if(io <= 0x0 || irq < 2) {
+ printk("ni65: Autoprobing not allowed for modules.\n");
+ printk("ni65: Set symbols 'io' 'irq' and 'dma'\n");
+ return -ENODEV;
+ }
+#endif
+ dev_ni65.irq = irq;
+ dev_ni65.dma = dma;
+ dev_ni65.base_addr = io;
+ if (register_netdev(&dev_ni65) != 0)
+ return -EIO;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ struct priv *p;
+ p = (struct priv *) dev_ni65.priv;
+ if(!p) {
+ printk("Ooops .. no privat struct\n");
+ return;
+ }
+ disable_dma(dev_ni65.dma);
+ free_dma(dev_ni65.dma);
+ release_region(dev_ni65.base_addr,cards[p->cardno].total_size);
+ ni65_free_buffer(p);
+ dev_ni65.priv = NULL;
+ unregister_netdev(&dev_ni65);
+}
+#endif /* MODULE */
+
+/*
+ * END of ni65.c
+ */
+
+
diff --git a/linux/src/drivers/net/ni65.h b/linux/src/drivers/net/ni65.h
new file mode 100644
index 0000000..6438095
--- /dev/null
+++ b/linux/src/drivers/net/ni65.h
@@ -0,0 +1,130 @@
+/* am7990 (lance) definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by
+ * same Gnu Public License that covers that work.
+ *
+ * Michael Hipp
+ * email: mhipp@student.uni-tuebingen.de
+ *
+ * sources: (mail me or ask archie if you need them)
+ * crynwr-packet-driver
+ */
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define M_PROM 0x8000 /* Promiscuous Mode */
+#define M_INTL 0x0040 /* Internal Loopback */
+#define M_DRTY 0x0020 /* Disable Retry */
+#define M_COLL 0x0010 /* Force Collision */
+#define M_DTCR 0x0008 /* Disable Transmit CRC) */
+#define M_LOOP 0x0004 /* Loopback */
+#define M_DTX 0x0002 /* Disable the Transmitter */
+#define M_DRX 0x0001 /* Disable the Receiver */
+
+
+/*
+ * Receive message descriptor bit definitions.
+ */
+
+#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define RCV_ERR 0x40 /* Error Summary */
+#define RCV_FRAM 0x20 /* Framing Error */
+#define RCV_OFLO 0x10 /* Overflow Error */
+#define RCV_CRC 0x08 /* CRC Error */
+#define RCV_BUF_ERR 0x04 /* Buffer Error */
+#define RCV_START 0x02 /* Start of Packet */
+#define RCV_END 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor bit definitions.
+ */
+
+#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define XMIT_ERR 0x40 /* Error Summary */
+#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
+#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
+#define XMIT_DEF 0x04 /* Deferred */
+#define XMIT_START 0x02 /* Start of Packet */
+#define XMIT_END 0x01 /* End of Packet */
+
+/*
+ * transmit status (2) (valid if XMIT_ERR == 1)
+ */
+
+#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */
+#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */
+#define XMIT_LCAR 0x0800 /* Loss of Carrier */
+#define XMIT_LCOL 0x1000 /* Late collision */
+#define XMIT_RESERV 0x2000 /* Reserved */
+#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
+#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
+
+struct init_block
+{
+ unsigned short mode;
+ unsigned char eaddr[6];
+ unsigned char filter[8];
+ /* bit 29-31: number of rmd's (power of 2) */
+ u32 rrp; /* receive ring pointer (align 8) */
+ /* bit 29-31: number of tmd's (power of 2) */
+ u32 trp; /* transmit ring pointer (align 8) */
+};
+
+struct rmd /* Receive Message Descriptor */
+{
+ union
+ {
+ volatile u32 buffer;
+ struct
+ {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile short blen;
+ volatile unsigned short mlen;
+};
+
+struct tmd
+{
+ union
+ {
+ volatile u32 buffer;
+ struct
+ {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile unsigned short blen;
+ volatile unsigned short status2;
+};
+
+
diff --git a/linux/src/drivers/net/ns820.c b/linux/src/drivers/net/ns820.c
new file mode 100644
index 0000000..968f3ac
--- /dev/null
+++ b/linux/src/drivers/net/ns820.c
@@ -0,0 +1,1547 @@
+/* ns820.c: A Linux Gigabit Ethernet driver for the NatSemi DP83820 series. */
+/*
+ Written/copyright 1999-2003 by Donald Becker.
+ Copyright 2002-2003 by Scyld Computing Corporation.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL. License for under other terms may be
+ available. Contact the original author for details.
+
+ The original author may be reached as becker@scyld.com, or at
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/natsemi.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"ns820.c:v1.03a 8/09/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/natsemi.html\n";
+/* Updated to recommendations in pci-skeleton v2.13. */
+
+/* Automatically extracted configuration info:
+probe-func: ns820_probe
+config-in: tristate 'National Semiconductor DP8382x series PCI Ethernet support' CONFIG_NATSEMI820
+
+c-help-name: National Semiconductor DP8382x series PCI Ethernet support
+c-help-symbol: CONFIG_NATSEMI820
+c-help: This driver is for the National Semiconductor DP83820 Gigabit Ethernet
+c-help: adapter series.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/natsemi.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip uses a 2048 element hash table based on the Ethernet CRC.
+ Previous natsemi chips had unreliable multicast filter circuitry.
+ To work around an observed problem set this value to '0',
+ which will immediately switch to Rx-all-multicast.
+ */
+static int multicast_filter_limit = 100;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature.
+ This chip can only receive into aligned buffers, so architectures such
+ as the Alpha AXP might benefit from a copy-align.
+*/
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability, however setting full_duplex[] is deprecated.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+ Use 0x1000 or 0x2000 for gigabit.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ Understand the implications before changing these settings!
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority, confuses the system network buffer limits,
+ and wastes memory.
+ Too-large receive rings waste memory and confound network buffer limits.
+*/
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
+#define RX_RING_SIZE 64
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung.
+ Re-autonegotiation may take up to 3 seconds.
+ */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("National Semiconductor DP83820 series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to force full duplex, non-negotiated link "
+ "(deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for National Semiconductor DP83820 10/100/1000
+Ethernet NIC. It is superficially similar to the 810 series "natsemi.c"
+driver, however the register layout, descriptor layout and element
+length of the new chip series is different.
+
+II. Board-specific settings
+
+This driver requires the PCI interrupt line to be configured.
+It honors the EEPROM-set values.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+The NatSemi design uses a 'next descriptor' pointer that the driver forms
+into a list, thus rings can be arbitrarily sized. Before changing the
+ring sizes you should understand the flow and cache effects of the
+full/available/empty hysteresis.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that unaligned buffers are not permitted
+by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. On copies frames are put into the
+skbuff at an offset of "+2", 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+The NatSemi 820 series PCI gigabit chips are very common on low-cost NICs.
+The '821 appears to be the same as '820 chip, only with pins for the upper
+32 bits marked "N/C".
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+The NatSemi dp83820 datasheet is available: search www.natsemi.com
+
+IVc. Errata
+
+None characterised.
+
+*/
+
+
+
+static void *ns820_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int power_event(void *dev_instance, int event);
+enum chip_capability_flags {FDXActiveLow=1, InvertGbXcvrPwr=2, };
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ { "D-Link DGE-500T (DP83820)",
+ { 0x0022100B, 0xffffffff, 0x49001186, 0xffffffff, },
+ PCI_IOTYPE, 256, FDXActiveLow},
+ {"NatSemi DP83820", { 0x0022100B, 0xffffffff },
+ PCI_IOTYPE, 256, 0},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info ns820_drv_id = {
+ "ns820", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ ns820_probe1, power_event };
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. Please do not change these names without good reason.
+*/
+enum register_offsets {
+ ChipCmd=0x00, ChipConfig=0x04, EECtrl=0x08, PCIBusCfg=0x0C,
+ IntrStatus=0x10, IntrMask=0x14, IntrEnable=0x18, IntrHoldoff=0x1C,
+ TxRingPtr=0x20, TxRingPtrHi=0x24, TxConfig=0x28,
+ RxRingPtr=0x30, RxRingPtrHi=0x34, RxConfig=0x38,
+ WOLCmd=0x40, PauseCmd=0x44, RxFilterAddr=0x48, RxFilterData=0x4C,
+ BootRomAddr=0x50, BootRomData=0x54, ChipRevReg=0x58,
+ StatsCtrl=0x5C, RxPktErrs=0x60, RxMissed=0x68, RxCRCErrs=0x64,
+};
+
+/* Bits in ChipCmd. */
+enum ChipCmdBits {
+ ChipReset=0x100, SoftIntr=0x80, RxReset=0x20, TxReset=0x10,
+ RxOff=0x08, RxOn=0x04, TxOff=0x02, TxOn=0x01,
+};
+
+/* Bits in ChipConfig. */
+enum ChipConfigBits {
+ CfgLinkGood=0x80000000, CfgFDX=0x10000000,
+ CfgXcrReset=0x0400, CfgXcrOff=0x0200,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x0001, IntrRxIntr=0x0002, IntrRxErr=0x0004, IntrRxEarly=0x0008,
+ IntrRxIdle=0x0010, IntrRxOverrun=0x0020,
+ IntrTxDone=0x0040, IntrTxIntr=0x0080, IntrTxErr=0x0100,
+ IntrTxIdle=0x0200, IntrTxUnderrun=0x0400,
+ StatsMax=0x0800, IntrDrv=0x1000, WOLPkt=0x2000, LinkChange=0x4000,
+ RxStatusOverrun=0x10000,
+ RxResetDone=0x00200000, TxResetDone=0x00400000,
+ IntrPCIErr=0x001E0000,
+ IntrNormalSummary=0x0251, IntrAbnormalSummary=0xED20,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+ AcceptErr=0x20, AcceptRunt=0x10,
+ AcceptBroadcast=0xC0000000,
+ AcceptMulticast=0x00200000, AcceptAllMulticast=0x20000000,
+ AcceptAllPhys=0x10000000, AcceptMyPhys=0x08000000,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+#if ADDRLEN == 64
+ u64 next_desc;
+ u64 buf_addr;
+#endif
+ u32 next_desc;
+ u32 buf_addr;
+ s32 cmd_status;
+ u32 vlan_status;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
+ DescNoCRC=0x10000000,
+ DescPktOK=0x08000000, RxTooLong=0x00400000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc rx_ring[RX_RING_SIZE];
+ struct netdev_desc tx_ring[TX_RING_SIZE];
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ long in_interrupt; /* Word-long for SMP locks. */
+ int max_interrupt_work;
+ int intr_enable;
+ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
+ unsigned int rx_q_empty:1; /* Set out-of-skbuffs. */
+
+ struct netdev_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ /* These values keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* Rx filter. */
+ u32 cur_rx_mode;
+ u32 rx_filter[16];
+ int multicast_filter_limit;
+ /* FIFO and PCI burst thresholds. */
+ int tx_config, rx_config;
+ /* MII transceiver section. */
+ u16 advertising; /* NWay media advertisement */
+};
+
+static int eeprom_read(long ioaddr, int location);
+static void mdio_sync(long mdio_addr);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static int rx_ring_fill(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int ns820_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&ns820_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *ns820_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ /* Perhaps NETIF_MSG_PROBE */
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, 12 - i));
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(ChipReset, ioaddr + ChipCmd);
+ /* Power up Xcvr. */
+ writel(~CfgXcrOff & readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x33ff;
+ if (np->default_port & 0x330)
+ np->medialock = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ /* Allow forcing the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ mdio_write(dev, 1, 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ return dev;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+ The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses.
+ Update to the code in other drivers for 8/10 bit addresses.
+*/
+
+/* Delay between EEPROM clock transitions.
+ This "delay" forces out buffered PCI writes, which is sufficient to meet
+ the timing requirements of most EEPROMs.
+*/
+#define eeprom_delay(ee_addr) readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x04, EE_DataIn=0x01, EE_ChipSelect=0x08, EE_DataOut=0x02,
+};
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataIn)
+
+/* The EEPROM commands include the 01 preamble. */
+enum EEPROM_Cmds {
+ EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7,
+};
+
+static int eeprom_read(long addr, int location)
+{
+ long eeprom_addr = addr + EECtrl;
+ int read_cmd = (EE_ReadCmd << 6) | location;
+ int retval = 0;
+ int i;
+
+ writel(EE_Write0, eeprom_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ writel(dataval, eeprom_addr);
+ eeprom_delay(eeprom_addr);
+ writel(dataval | EE_ShiftClk, eeprom_addr);
+ eeprom_delay(eeprom_addr);
+ }
+ writel(EE_ChipSelect, eeprom_addr);
+ eeprom_delay(eeprom_addr);
+
+ for (i = 15; i >= 0; i--) {
+ writel(EE_ChipSelect | EE_ShiftClk, eeprom_addr);
+ eeprom_delay(eeprom_addr);
+ retval |= (readl(eeprom_addr) & EE_DataOut) ? 1 << i : 0;
+ writel(EE_ChipSelect, eeprom_addr);
+ eeprom_delay(eeprom_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_Write0, eeprom_addr);
+ writel(0, eeprom_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ Read and write MII registers using software-generated serial MDIO
+ protocol. See the MII specifications or DP83840A data sheet for details.
+
+ The maximum data clock rate is 2.5 Mhz. To meet minimum timing we
+ must flush writes to the PCI bus with a PCI read. */
+#define mdio_delay(mdio_addr) readl(mdio_addr)
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with older tranceivers, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x0040, MDIO_Data=0x0010, MDIO_EnbOutput=0x0020,
+};
+#define MDIO_EnbIn (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ writel(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long mdio_addr = dev->base_addr + EECtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ writel(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ writel(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ writel(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((readl(mdio_addr) & MDIO_Data) ? 1 : 0);
+ writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ long mdio_addr = dev->base_addr + EECtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ writel(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ writel(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ writel(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ /* We have not yet encountered a case where we need to reset the chip. */
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ /* Power up Xcvr. */
+ writel((~CfgXcrOff & readl(ioaddr + ChipConfig)) | 0x00400000,
+ ioaddr + ChipConfig);
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d intr_status %8.8x.\n",
+ dev->name, dev->irq, intr_status);
+
+ init_ring(dev);
+
+#if defined(ADDR_64BITS) && defined(__alpha__)
+ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtrHi);
+ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtrHi);
+#else
+ writel(0, ioaddr + RxRingPtrHi);
+ writel(0, ioaddr + TxRingPtrHi);
+#endif
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ for (i = 0; i < 6; i += 2) {
+ writel(i, ioaddr + RxFilterAddr);
+ writel(dev->dev_addr[i] + (dev->dev_addr[i+1] << 8),
+ ioaddr + RxFilterData);
+ }
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+ /* Configure for standard, in-spec Ethernet. */
+
+ if (np->full_duplex ||
+ ((readl(ioaddr + ChipConfig) & CfgFDX) == 0) ^
+ ((np->drv_flags & FDXActiveLow) != 0)) {
+ np->tx_config = 0xD0801002;
+ np->rx_config = 0x10000020;
+ } else {
+ np->tx_config = 0x10801002;
+ np->rx_config = 0x0020;
+ }
+ if (dev->mtu > 1500)
+ np->rx_config |= 0x08000000;
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Setting TxConfig to %8.8x.\n",
+ dev->name, (int)readl(ioaddr + TxConfig));
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ np->in_interrupt = 0;
+
+ check_duplex(dev);
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ np->intr_enable = IntrNormalSummary | IntrAbnormalSummary | 0x1f;
+ writel(np->intr_enable, ioaddr + IntrMask);
+ writel(1, ioaddr + IntrEnable);
+
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ writel(4, ioaddr + StatsCtrl); /* Clear Stats */
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int duplex;
+
+ if (np->duplex_lock)
+ return;
+ duplex = readl(ioaddr + ChipConfig) & CfgFDX ? 1 : 0;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
+ " capability.\n", dev->name,
+ duplex ? "full" : "half");
+ if (duplex) {
+ np->rx_config |= 0x10000000;
+ np->tx_config |= 0xC0000000;
+ } else {
+ np->rx_config &= ~0x10000000;
+ np->tx_config &= ~0xC0000000;
+ }
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Setting TxConfig to %8.8x (%8.8x).\n",
+ dev->name, np->tx_config, (int)readl(ioaddr + TxConfig));
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Driver monitor timer tick, status %8.8x.\n",
+ dev->name, (int)readl(ioaddr + ChipConfig));
+ if (np->rx_q_empty) {
+ /* Trigger an interrupt to refill. */
+ writel(SoftIntr, ioaddr + ChipCmd);
+ }
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ tx_timeout(dev);
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + TxRingPtr));
+
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].cmd_status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", np->tx_ring[i].cmd_status);
+ printk("\n");
+ }
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+/* Refill the Rx ring buffers, returning non-zero if not full. */
+static int rx_ring_fill(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned int entry;
+
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ return 1; /* Better luck next time. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].buf_addr = virt_to_bus(skb->tail);
+ }
+ np->rx_ring[entry].cmd_status = cpu_to_le32(DescIntr | np->rx_buf_sz);
+ }
+ return 0;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ /* MAX(PKT_BUF_SZ, dev->mtu + 8); */
+ /* I know you _want_ to change this without understanding it. Don't. */
+ np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 8);
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]);
+ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].next_desc = virt_to_bus(&np->tx_ring[i+1]);
+ np->tx_ring[i].cmd_status = 0;
+ }
+ np->tx_ring[i-1].next_desc = virt_to_bus(&np->tx_ring[0]);
+
+ /* Fill in the Rx buffers.
+ Allocation failure just leaves a "negative" np->dirty_rx. */
+ np->dirty_rx = (unsigned int)(0 - RX_RING_SIZE);
+ rx_ring_fill(dev);
+
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned int entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx.
+ No spinlock is needed for either Tx or Rx.
+ */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ np->tx_ring[entry].buf_addr = virt_to_bus(skb->data);
+ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn|DescIntr | skb->len);
+ np->cur_tx++;
+
+ /* StrongARM: Explicitly cache flush np->tx_ring and skb->data,skb->len. */
+
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_QUEUE_LEN - 4) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ /* Wake the potentially-idle transmit channel. */
+ writel(TxOn, dev->base_addr + ChipCmd);
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int boguscnt;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+ "device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ boguscnt = np->max_interrupt_work;
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#endif
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0 || intr_status == 0xffffffff)
+ break;
+
+ /* Acknowledge all of the current interrupt sources ASAP.
+ Nominally the read above accomplishes this, but... */
+ writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
+
+ if (intr_status & (IntrRxDone | IntrRxIntr)) {
+ netdev_rx(dev);
+ np->rx_q_empty = rx_ring_fill(dev);
+ }
+
+ if (intr_status & (IntrRxIdle | IntrDrv)) {
+ unsigned int old_dirty_rx = np->dirty_rx;
+ if (rx_ring_fill(dev) == 0)
+ np->rx_q_empty = 0;
+ /* Restart Rx engine iff we did add a buffer. */
+ if (np->dirty_rx != old_dirty_rx)
+ writel(RxOn, dev->base_addr + ChipCmd);
+ }
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Tx entry %d @%p status %8.8x.\n",
+ dev->name, entry, &np->tx_ring[entry],
+ np->tx_ring[entry].cmd_status);
+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
+ break;
+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(0x08000000)) {
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, np->tx_ring[entry].cmd_status);
+ np->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+ } else { /* Various Tx errors */
+ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
+ if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
+ if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
+ if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
+ if (tx_status & 0x00200000) np->stats.tx_window_errors++;
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ np->stats.tx_errors++;
+ }
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ /* Note the 4 slot hysteresis to mark the queue non-full. */
+ if (np->tx_full
+ && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & IntrAbnormalSummary)
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ np->restore_intr_enable = 1;
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ clear_bit(0, (void*)&dev->interrupt);
+#endif
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+ s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+
+ /* If the driver owns the next entry it's a new packet. Send it up. */
+ while (desc_status < 0) { /* e.g. & DescOwn */
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In netdev_rx() entry %d status was %8.8x.\n",
+ entry, desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ((desc_status & (DescMore|DescPktOK|RxTooLong)) != DescPktOK) {
+ if (desc_status & DescMore) {
+ printk(KERN_WARNING "%s: Oversized(?) Ethernet frame spanned "
+ "multiple buffers, entry %#x status %x.\n",
+ dev->name, np->cur_rx, desc_status);
+ np->stats.rx_length_errors++;
+ } else {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & 0x06000000) np->stats.rx_over_errors++;
+ if (desc_status & 0x00600000) np->stats.rx_length_errors++;
+ if (desc_status & 0x00140000) np->stats.rx_frame_errors++;
+ if (desc_status & 0x00080000) np->stats.rx_crc_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+ int pkt_len = (desc_status & 0x0fff) - 4; /* Omit CRC size. */
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if HAS_IP_COPYSUM
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (np->msg_level & NETIF_MSG_PKTDATA)
+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+ "%d.%d.%d.%d.\n",
+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+ skb->data[8], skb->data[9], skb->data[10],
+ skb->data[11], skb->data[12], skb->data[13],
+ skb->data[14], skb->data[15], skb->data[16],
+ skb->data[17]);
+#endif
+ skb->protocol = eth_type_trans(skb, dev);
+ /* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+ }
+
+ /* Refill is now done in the main interrupt loop. */
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (intr_status & LinkChange) {
+ int chip_config = readl(ioaddr + ChipConfig);
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+ " %4.4x partner %4.4x.\n", dev->name,
+ (int)readl(ioaddr + 0x90), (int)readl(ioaddr + 0x94));
+ if (chip_config & CfgLinkGood)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ check_duplex(dev);
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ /* Increase the Tx threshold, 32 byte units. */
+ if ((np->tx_config & 0x3f) < 62)
+ np->tx_config += 2; /* +64 bytes */
+ writel(np->tx_config, ioaddr + TxConfig);
+ }
+ if (intr_status & WOLPkt) {
+ int wol_status = readl(ioaddr + WOLCmd);
+ printk(KERN_NOTICE "%s: Link wake-up event %8.8x",
+ dev->name, wol_status);
+ }
+ if (intr_status & (RxStatusOverrun | IntrRxOverrun)) {
+ if (np->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: Rx overflow! ns820 %8.8x.\n",
+ dev->name, intr_status);
+ np->stats.rx_fifo_errors++;
+ }
+ if (intr_status & ~(LinkChange|StatsMax|RxResetDone|TxResetDone|
+ RxStatusOverrun|0xA7ff)) {
+ if (np->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: Something Wicked happened! ns820 %8.8x.\n",
+ dev->name, intr_status);
+ }
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrPCIErr) {
+ np->stats.tx_fifo_errors++;
+ np->stats.rx_fifo_errors++;
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int crc_errs = readl(ioaddr + RxCRCErrs);
+
+ if (crc_errs != 0xffffffff) {
+ /* We need not lock this segment of code for SMP.
+ There is no atomic-add vulnerability for most CPUs,
+ and statistics are non-critical. */
+ /* The chip only need report frame silently dropped. */
+ np->stats.rx_crc_errors += crc_errs;
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+ }
+
+ return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+ A big-endian version is also available.
+ This is slow but compact code. Do not use this routine for bulk data,
+ use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c.
+ Chips may use the upper or lower CRC bits, and may reverse and/or invert
+ them. Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u8 mc_filter[64]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys
+ | AcceptMyPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x7ff,
+ mc_filter);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ for (i = 0; i < 64; i += 2) {
+ writel(rx_mode + 0x200 + i, ioaddr + RxFilterAddr);
+ writel((mc_filter[i+1]<<8) + mc_filter[i], ioaddr + RxFilterData);
+ }
+ }
+ writel(rx_mode, ioaddr + RxFilterAddr);
+ np->cur_rx_mode = rx_mode;
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = 1;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == 1) {
+ u16 miireg = data[1] & 0x1f;
+ u16 value = data[2];
+ switch (miireg) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (np->duplex_lock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x "
+ "Int %2.2x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd),
+ (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* We don't want the timer to re-start anything. */
+ del_timer(&np->timer);
+
+ /* Disable interrupts using the mask. */
+ writel(0, ioaddr + IntrMask);
+ writel(0, ioaddr + IntrEnable);
+ writel(2, ioaddr + StatsCtrl); /* Freeze Stats */
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+
+ get_stats(dev);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. %8.8x %8.8x.\n",
+ i, np->tx_ring[i].cmd_status, (u32)np->tx_ring[i].buf_addr);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x\n",
+ i, np->rx_ring[i].cmd_status, (u32)np->rx_ring[i].buf_addr);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].cmd_status = 0;
+ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+ /* Power down Xcvr. */
+ writel(CfgXcrOff | readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int power_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, freeze stats, stop Tx and Rx. */
+ writel(0, ioaddr + IntrEnable);
+ writel(2, ioaddr + StatsCtrl);
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+ writel(CfgXcrOff | readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the open() actions should be repeated. */
+ writel(~CfgXcrOff & readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+ set_rx_mode(dev);
+ writel(np->intr_enable, ioaddr + IntrEnable);
+ writel(1, ioaddr + IntrEnable);
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+#ifdef CARDBUS
+ register_driver(&etherdev_ops);
+ return 0;
+#else
+ return pci_drv_register(&ns820_drv_id, NULL);
+#endif
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&etherdev_ops);
+#else
+ pci_drv_unregister(&ns820_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+ iounmap((char *)root_net_dev->base_addr);
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` ns820.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c ns820.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c ns820.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/pci-scan.c b/linux/src/drivers/net/pci-scan.c
new file mode 100644
index 0000000..ffb7b12
--- /dev/null
+++ b/linux/src/drivers/net/pci-scan.c
@@ -0,0 +1,659 @@
+/* pci-scan.c: Linux PCI network adapter support code. */
+/*
+ Originally written 1999-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License (GPL), incorporated herein by
+ reference. Drivers interacting with these functions are derivative
+ works and thus also must be licensed under the GPL and include an explicit
+ GPL notice.
+
+ This code provides common scan and activate functions for PCI network
+ interfaces.
+
+ The author may be reached as becker@scyld.com, or
+ Donald Becker
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Other contributers:
+*/
+static const char version[] =
+"pci-scan.c:v1.12 7/30/2003 Donald Becker <becker@scyld.com>"
+" http://www.scyld.com/linux/drivers.html\n";
+
+/* A few user-configurable values that may be modified when a module. */
+
+static int msg_level = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+static int min_pci_latency = 32;
+
+#if ! defined(__KERNEL__)
+#define __KERNEL__ 1
+#endif
+#if !defined(__OPTIMIZE__) && /* Mach glue, we think this is ok now: */ 0
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with the proper options, including "-O".
+#endif
+
+#if defined(MODULE) && ! defined(EXPORT_SYMTAB)
+#define EXPORT_SYMTAB
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#if LINUX_VERSION_CODE < 0x20500 && defined(MODVERSIONS)
+/* Another interface semantics screw-up. */
+#include <linux/module.h>
+#include <linux/modversions.h>
+#else
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20300
+/* Bogus change in the middle of a "stable" kernel series.
+ Also, in 2.4.7+ slab must come before interrupt.h to avoid breakage. */
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <asm/io.h>
+#include "pci-scan.h"
+#include "kern_compat.h"
+#if defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
+#include <linux/apm_bios.h>
+#endif
+#ifdef CONFIG_PM
+/* New in 2.4 kernels, pointlessly incompatible with earlier APM. */
+#include <linux/pm.h>
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+#if (LINUX_VERSION_CODE < 0x20100)
+#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
+#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
+#define PCI_CAP_ID_PM 0x01 /* Power Management */
+#endif
+
+int (*register_hotswap_hook)(struct drv_id_info *did);
+void (*unregister_hotswap_hook)(struct drv_id_info *did);
+
+#if LINUX_VERSION_CODE > 0x20118 && defined(MODULE)
+MODULE_LICENSE("GPL");
+MODULE_PARM(msg_level, "i");
+MODULE_PARM(min_pci_latency, "i");
+MODULE_PARM_DESC(msg_level, "Enable additional status messages (0-7)");
+MODULE_PARM_DESC(min_pci_latency,
+ "Minimum value for the PCI Latency Timer settings");
+#if defined(EXPORT_SYMTAB)
+EXPORT_SYMBOL_NOVERS(pci_drv_register);
+EXPORT_SYMBOL_NOVERS(pci_drv_unregister);
+EXPORT_SYMBOL_NOVERS(acpi_wake);
+EXPORT_SYMBOL_NOVERS(acpi_set_pwr_state);
+EXPORT_SYMBOL_NOVERS(register_hotswap_hook);
+EXPORT_SYMBOL_NOVERS(unregister_hotswap_hook);
+#endif
+#endif
+
+/* List of registered drivers. */
+static struct drv_id_info *drv_list;
+/* List of detected PCI devices, for APM events. */
+static struct dev_info {
+ struct dev_info *next;
+ void *dev;
+ struct drv_id_info *drv_id;
+ int flags;
+} *dev_list;
+
+/*
+ This code is not intended to support every configuration.
+ It is intended to minimize duplicated code by providing the functions
+ needed in almost every PCI driver.
+
+ The "no kitchen sink" policy:
+ Additional features and code will be added to this module only if more
+ than half of the drivers for common hardware would benefit from the feature.
+*/
+
+/*
+ Ideally we would detect and number all cards of a type (e.g. network) in
+ PCI slot order.
+ But that does not work with hot-swap card, CardBus cards and added drivers.
+ So instead we detect just the each chip table in slot order.
+
+ This routine takes a PCI ID table, scans the PCI bus, and calls the
+ associated attach/probe1 routine with the hardware already activated and
+ single I/O or memory address already mapped.
+
+ This routine will later be supplemented with CardBus and hot-swap PCI
+ support using the same table. Thus the pci_chip_tbl[] should not be
+ marked as __initdata.
+*/
+
+#if LINUX_VERSION_CODE >= 0x20200
+/* Grrrr.. complex abstaction layers with negative benefit. */
+int pci_drv_register(struct drv_id_info *drv_id, void *initial_device)
+{
+ int chip_idx, cards_found = 0;
+ struct pci_dev *pdev = NULL;
+ struct pci_id_info *pci_tbl = drv_id->pci_dev_tbl;
+ struct drv_id_info *drv;
+ void *newdev;
+
+
+ /* Ignore a double-register attempt. */
+ for (drv = drv_list; drv; drv = drv->next)
+ if (drv == drv_id)
+ return -EBUSY;
+
+ while ((pdev = pci_find_class(drv_id->pci_class, pdev)) != 0) {
+ u32 pci_id, pci_subsys_id, pci_class_rev;
+ u16 pci_command, new_command;
+ int pci_flags;
+ long pciaddr; /* Bus address. */
+ long ioaddr; /* Mapped address for this processor. */
+
+ pci_read_config_dword(pdev, PCI_VENDOR_ID, &pci_id);
+ /* Offset 0x2c is PCI_SUBSYSTEM_ID aka PCI_SUBSYSTEM_VENDOR_ID. */
+ pci_read_config_dword(pdev, 0x2c, &pci_subsys_id);
+ pci_read_config_dword(pdev, PCI_REVISION_ID, &pci_class_rev);
+
+ if (msg_level > 3)
+ printk(KERN_DEBUG "PCI ID %8.8x subsystem ID is %8.8x.\n",
+ pci_id, pci_subsys_id);
+ for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+ struct pci_id_info *chip = &pci_tbl[chip_idx];
+ if ((pci_id & chip->id.pci_mask) == chip->id.pci
+ && (pci_subsys_id&chip->id.subsystem_mask) == chip->id.subsystem
+ && (pci_class_rev&chip->id.revision_mask) == chip->id.revision)
+ break;
+ }
+ if (pci_tbl[chip_idx].name == 0) /* Compiled out! */
+ continue;
+
+ pci_flags = pci_tbl[chip_idx].pci_flags;
+#if LINUX_VERSION_CODE >= 0x2030C
+ /* Wow. A oversized, hard-to-use abstraction. Bogus. */
+ pciaddr = pdev->resource[(pci_flags >> 4) & 7].start;
+#else
+ pciaddr = pdev->base_address[(pci_flags >> 4) & 7];
+#if defined(__alpha__) /* Really any machine with 64 bit addressing. */
+ if (pci_flags & PCI_ADDR_64BITS)
+ pciaddr |= ((long)pdev->base_address[((pci_flags>>4)&7)+ 1]) << 32;
+#endif
+#endif
+ if (msg_level > 2)
+ printk(KERN_INFO "Found %s at PCI address %#lx, mapped IRQ %d.\n",
+ pci_tbl[chip_idx].name, pciaddr, pdev->irq);
+
+ if ( ! (pci_flags & PCI_UNUSED_IRQ) &&
+ (pdev->irq == 0 || pdev->irq == 255)) {
+ if (pdev->bus->number == 32) /* Broken CardBus activation. */
+ printk(KERN_WARNING "Resources for CardBus device '%s' have"
+ " not been allocated.\n"
+ KERN_WARNING "Activation has been delayed.\n",
+ pci_tbl[chip_idx].name);
+ else
+ printk(KERN_WARNING "PCI device '%s' was not assigned an "
+ "IRQ.\n"
+ KERN_WARNING "It will not be activated.\n",
+ pci_tbl[chip_idx].name);
+ continue;
+ }
+ if ((pci_flags & PCI_BASE_ADDRESS_SPACE_IO)) {
+ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+ if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
+ continue;
+ } else if ((ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_tbl[chip_idx].io_size)) == 0) {
+ printk(KERN_INFO "Failed to map PCI address %#lx for device "
+ "'%s'.\n", pciaddr, pci_tbl[chip_idx].name);
+ continue;
+ }
+ if ( ! (pci_flags & PCI_NO_ACPI_WAKE))
+ acpi_wake(pdev);
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ new_command = pci_command | (pci_flags & 7);
+ if (pci_command != new_command) {
+ printk(KERN_INFO " The PCI BIOS has not enabled the"
+ " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
+ pdev->bus->number, pdev->devfn, pci_command, new_command);
+ pci_write_config_word(pdev, PCI_COMMAND, new_command);
+ }
+
+ newdev = drv_id->probe1(pdev, initial_device,
+ ioaddr, pdev->irq, chip_idx, cards_found);
+ if (newdev == NULL)
+ continue;
+ initial_device = 0;
+ cards_found++;
+ if (pci_flags & PCI_COMMAND_MASTER) {
+ pci_set_master(pdev);
+ if ( ! (pci_flags & PCI_NO_MIN_LATENCY)) {
+ u8 pci_latency;
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < min_pci_latency) {
+ printk(KERN_INFO " PCI latency timer (CFLT) is "
+ "unreasonably low at %d. Setting to %d clocks.\n",
+ pci_latency, min_pci_latency);
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
+ min_pci_latency);
+ }
+ }
+ }
+ {
+ struct dev_info *devp =
+ kmalloc(sizeof(struct dev_info), GFP_KERNEL);
+ if (devp == 0)
+ continue;
+ devp->next = dev_list;
+ devp->dev = newdev;
+ devp->drv_id = drv_id;
+ dev_list = devp;
+ }
+ }
+
+ if (((drv_id->flags & PCI_HOTSWAP)
+ && register_hotswap_hook && (*register_hotswap_hook)(drv_id) == 0)
+ || cards_found) {
+ MOD_INC_USE_COUNT;
+ drv_id->next = drv_list;
+ drv_list = drv_id;
+ return 0;
+ } else
+ return -ENODEV;
+}
+#else
+int pci_drv_register(struct drv_id_info *drv_id, void *initial_device)
+{
+ int pci_index, cards_found = 0;
+ unsigned char pci_bus, pci_device_fn;
+ struct pci_dev *pdev;
+ struct pci_id_info *pci_tbl = drv_id->pci_dev_tbl;
+ void *newdev;
+
+ if ( ! pcibios_present())
+ return -ENODEV;
+
+ for (pci_index = 0; pci_index < 0xff; pci_index++) {
+ u32 pci_id, subsys_id, pci_class_rev;
+ u16 pci_command, new_command;
+ int chip_idx, irq, pci_flags;
+ long pciaddr;
+ long ioaddr;
+ u32 pci_busaddr;
+ u8 pci_irq_line;
+
+ if (pcibios_find_class (drv_id->pci_class, pci_index,
+ &pci_bus, &pci_device_fn)
+ != PCIBIOS_SUCCESSFUL)
+ break;
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_VENDOR_ID, &pci_id);
+ /* Offset 0x2c is PCI_SUBSYSTEM_ID aka PCI_SUBSYSTEM_VENDOR_ID. */
+ pcibios_read_config_dword(pci_bus, pci_device_fn, 0x2c, &subsys_id);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_REVISION_ID, &pci_class_rev);
+
+ for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+ struct pci_id_info *chip = &pci_tbl[chip_idx];
+ if ((pci_id & chip->id.pci_mask) == chip->id.pci
+ && (subsys_id & chip->id.subsystem_mask) == chip->id.subsystem
+ && (pci_class_rev&chip->id.revision_mask) == chip->id.revision)
+ break;
+ }
+ if (pci_tbl[chip_idx].name == 0) /* Compiled out! */
+ continue;
+
+ pci_flags = pci_tbl[chip_idx].pci_flags;
+ pdev = pci_find_slot(pci_bus, pci_device_fn);
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ irq = pci_irq_line;
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ ((pci_flags >> 2) & 0x1C) + 0x10,
+ &pci_busaddr);
+ pciaddr = pci_busaddr;
+#if defined(__alpha__)
+ if (pci_flags & PCI_ADDR_64BITS) {
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ ((pci_flags >> 2) & 0x1C) + 0x14,
+ &pci_busaddr);
+ pciaddr |= ((long)pci_busaddr)<<32;
+ }
+#endif
+
+ if (msg_level > 2)
+ printk(KERN_INFO "Found %s at PCI address %#lx, IRQ %d.\n",
+ pci_tbl[chip_idx].name, pciaddr, irq);
+
+ if ( ! (pci_flags & PCI_UNUSED_IRQ) &&
+ (irq == 0 || irq >= 16)) {
+ if (pci_bus == 32) /* Broken CardBus activation. */
+ printk(KERN_WARNING "Resources for CardBus device '%s' have"
+ " not been allocated.\n"
+ KERN_WARNING "It will not be activated.\n",
+ pci_tbl[chip_idx].name);
+ else
+ printk(KERN_WARNING "PCI device '%s' was not assigned an "
+ "IRQ.\n"
+ KERN_WARNING "It will not be activated.\n",
+ pci_tbl[chip_idx].name);
+ continue;
+ }
+
+ if ((pciaddr & PCI_BASE_ADDRESS_SPACE_IO)) {
+ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+ if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
+ continue;
+ } else if ((ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_tbl[chip_idx].io_size)) == 0) {
+ printk(KERN_INFO "Failed to map PCI address %#lx.\n",
+ pciaddr);
+ continue;
+ }
+
+ if ( ! (pci_flags & PCI_NO_ACPI_WAKE))
+ acpi_wake(pdev);
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ new_command = pci_command | (pci_flags & 7);
+ if (pci_command != new_command) {
+ printk(KERN_INFO " The PCI BIOS has not enabled the"
+ " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
+ pci_bus, pci_device_fn, pci_command, new_command);
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, new_command);
+ }
+
+ newdev = drv_id->probe1(pdev, initial_device,
+ ioaddr, irq, chip_idx, cards_found);
+
+ if (newdev && (pci_flags & PCI_COMMAND_MASTER) &&
+ ! (pci_flags & PCI_NO_MIN_LATENCY)) {
+ u8 pci_latency;
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < min_pci_latency) {
+ printk(KERN_INFO " PCI latency timer (CFLT) is "
+ "unreasonably low at %d. Setting to %d clocks.\n",
+ pci_latency, min_pci_latency);
+ pcibios_write_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, min_pci_latency);
+ }
+ }
+ if (newdev) {
+ struct dev_info *devp =
+ kmalloc(sizeof(struct dev_info), GFP_KERNEL);
+ if (devp) {
+ devp->next = dev_list;
+ devp->dev = newdev;
+ devp->drv_id = drv_id;
+ dev_list = devp;
+ }
+ }
+ initial_device = 0;
+ cards_found++;
+ }
+
+ if (((drv_id->flags & PCI_HOTSWAP)
+ && register_hotswap_hook && (*register_hotswap_hook)(drv_id) == 0)
+ || cards_found) {
+ MOD_INC_USE_COUNT;
+ drv_id->next = drv_list;
+ drv_list = drv_id;
+ return 0;
+ } else
+ return cards_found ? 0 : -ENODEV;
+}
+#endif
+
+void pci_drv_unregister(struct drv_id_info *drv_id)
+{
+ struct drv_id_info **drvp;
+ struct dev_info **devip = &dev_list;
+
+ if (unregister_hotswap_hook)
+ (*unregister_hotswap_hook)(drv_id);
+
+ for (drvp = &drv_list; *drvp; drvp = &(*drvp)->next)
+ if (*drvp == drv_id) {
+ *drvp = (*drvp)->next;
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ while (*devip) {
+ struct dev_info *thisdevi = *devip;
+ if (thisdevi->drv_id == drv_id) {
+ *devip = thisdevi->next;
+ kfree(thisdevi);
+ } else
+ devip = &(*devip)->next;
+ }
+
+ return;
+}
+
+#if LINUX_VERSION_CODE < 0x20400
+/*
+ Search PCI configuration space for the specified capability registers.
+ Return the index, or 0 on failure.
+ The 2.4 kernel now includes this function.
+*/
+int pci_find_capability(struct pci_dev *pdev, int findtype)
+{
+ u16 pci_status, cap_type;
+ u8 pci_cap_idx;
+ int cap_idx;
+
+ pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+ if ( ! (pci_status & PCI_STATUS_CAP_LIST))
+ return 0;
+ pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pci_cap_idx);
+ cap_idx = pci_cap_idx;
+ for (cap_idx = pci_cap_idx; cap_idx; cap_idx = (cap_type >> 8) & 0xff) {
+ pci_read_config_word(pdev, cap_idx, &cap_type);
+ if ((cap_type & 0xff) == findtype)
+ return cap_idx;
+ }
+ return 0;
+}
+#endif
+
+/* Change a device from D3 (sleep) to D0 (active).
+ Return the old power state.
+ This is more complicated than you might first expect since most cards
+ forget all PCI config info during the transition! */
+int acpi_wake(struct pci_dev *pdev)
+{
+ u32 base[5], romaddr;
+ u16 pci_command, pwr_command;
+ u8 pci_latency, pci_cacheline, irq;
+ int i, pwr_cmd_idx = pci_find_capability(pdev, PCI_CAP_ID_PM);
+
+ if (pwr_cmd_idx == 0)
+ return 0;
+ pci_read_config_word(pdev, pwr_cmd_idx + 4, &pwr_command);
+ if ((pwr_command & 3) == 0)
+ return 0;
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ for (i = 0; i < 5; i++)
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0 + i*4,
+ &base[i]);
+ pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &romaddr);
+ pci_read_config_byte( pdev, PCI_LATENCY_TIMER, &pci_latency);
+ pci_read_config_byte( pdev, PCI_CACHE_LINE_SIZE, &pci_cacheline);
+ pci_read_config_byte( pdev, PCI_INTERRUPT_LINE, &irq);
+
+ pci_write_config_word(pdev, pwr_cmd_idx + 4, 0x0000);
+ for (i = 0; i < 5; i++)
+ if (base[i])
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0 + i*4,
+ base[i]);
+ pci_write_config_dword(pdev, PCI_ROM_ADDRESS, romaddr);
+ pci_write_config_byte( pdev, PCI_INTERRUPT_LINE, irq);
+ pci_write_config_byte( pdev, PCI_CACHE_LINE_SIZE, pci_cacheline);
+ pci_write_config_byte( pdev, PCI_LATENCY_TIMER, pci_latency);
+ pci_write_config_word( pdev, PCI_COMMAND, pci_command | 5);
+ return pwr_command & 3;
+}
+
+int acpi_set_pwr_state(struct pci_dev *pdev, enum acpi_pwr_state new_state)
+{
+ u16 pwr_command;
+ int pwr_cmd_idx = pci_find_capability(pdev, PCI_CAP_ID_PM);
+
+ if (pwr_cmd_idx == 0)
+ return 0;
+ pci_read_config_word(pdev, pwr_cmd_idx + 4, &pwr_command);
+ if ((pwr_command & 3) == ACPI_D3 && new_state != ACPI_D3)
+ acpi_wake(pdev); /* The complicated sequence. */
+ pci_write_config_word(pdev, pwr_cmd_idx + 4,
+ (pwr_command & ~3) | new_state);
+ return pwr_command & 3;
+}
+
+#if defined(CONFIG_PM)
+static int handle_pm_event(struct pm_dev *dev, int event, void *data)
+{
+ static int down = 0;
+ struct dev_info *devi;
+ int pwr_cmd = -1;
+
+ if (msg_level > 1)
+ printk(KERN_DEBUG "pci-scan: Handling power event %d for driver "
+ "list %s...\n",
+ event, drv_list->name);
+ switch (event) {
+ case PM_SUSPEND:
+ if (down) {
+ printk(KERN_DEBUG "pci-scan: Received extra suspend event\n");
+ break;
+ }
+ down = 1;
+ for (devi = dev_list; devi; devi = devi->next)
+ if (devi->drv_id->pwr_event)
+ devi->drv_id->pwr_event(devi->dev, DRV_SUSPEND);
+ break;
+ case PM_RESUME:
+ if (!down) {
+ printk(KERN_DEBUG "pci-scan: Received bogus resume event\n");
+ break;
+ }
+ for (devi = dev_list; devi; devi = devi->next) {
+ if (devi->drv_id->pwr_event) {
+ if (msg_level > 3)
+ printk(KERN_DEBUG "pci-scan: Calling resume for %s "
+ "device.\n", devi->drv_id->name);
+ devi->drv_id->pwr_event(devi->dev, DRV_RESUME);
+ }
+ }
+ down = 0;
+ break;
+ case PM_SET_WAKEUP: pwr_cmd = DRV_PWR_WakeOn; break;
+ case PM_EJECT: pwr_cmd = DRV_DETACH; break;
+ default:
+ printk(KERN_DEBUG "pci-scan: Unknown power management event %d.\n",
+ event);
+ }
+ if (pwr_cmd >= 0)
+ for (devi = dev_list; devi; devi = devi->next)
+ if (devi->drv_id->pwr_event)
+ devi->drv_id->pwr_event(devi->dev, pwr_cmd);
+
+ return 0;
+}
+
+#elif defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
+static int handle_apm_event(apm_event_t event)
+{
+ static int down = 0;
+ struct dev_info *devi;
+
+ if (msg_level > 1)
+ printk(KERN_DEBUG "pci-scan: Handling APM event %d for driver "
+ "list %s...\n",
+ event, drv_list->name);
+ return 0;
+ switch (event) {
+ case APM_SYS_SUSPEND:
+ case APM_USER_SUSPEND:
+ if (down) {
+ printk(KERN_DEBUG "pci-scan: Received extra suspend event\n");
+ break;
+ }
+ down = 1;
+ for (devi = dev_list; devi; devi = devi->next)
+ if (devi->drv_id->pwr_event)
+ devi->drv_id->pwr_event(devi->dev, DRV_SUSPEND);
+ break;
+ case APM_NORMAL_RESUME:
+ case APM_CRITICAL_RESUME:
+ if (!down) {
+ printk(KERN_DEBUG "pci-scan: Received bogus resume event\n");
+ break;
+ }
+ for (devi = dev_list; devi; devi = devi->next)
+ if (devi->drv_id->pwr_event)
+ devi->drv_id->pwr_event(devi->dev, DRV_RESUME);
+ down = 0;
+ break;
+ }
+ return 0;
+}
+#endif /* CONFIG_APM */
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (msg_level) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s", version);
+
+#if defined(CONFIG_PM)
+ pm_register(PM_PCI_DEV, 0, &handle_pm_event);
+#elif defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
+ apm_register_callback(&handle_apm_event);
+#endif
+ return 0;
+}
+void cleanup_module(void)
+{
+#if defined(CONFIG_PM)
+ pm_unregister_all(&handle_pm_event);
+#elif defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
+ apm_unregister_callback(&handle_apm_event);
+#endif
+ if (dev_list != NULL)
+ printk(KERN_WARNING "pci-scan: Unfreed device references.\n");
+ return;
+}
+#endif
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -DEXPORT_SYMTAB -Wall -Wstrict-prototypes -O6 -c pci-scan.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/pci-scan.h b/linux/src/drivers/net/pci-scan.h
new file mode 100644
index 0000000..649b34b
--- /dev/null
+++ b/linux/src/drivers/net/pci-scan.h
@@ -0,0 +1,90 @@
+#ifndef _PCI_SCAN_H
+#define _PCI_SCAN_H
+/*
+ version 1.02 $Version:$ $Date: 2006/01/22 15:54:41 $
+ Copyright 1999-2001 Donald Becker / Scyld Computing Corporation
+ This software is part of the Linux kernel. It may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+*/
+
+/*
+ These are the structures in the table that drives the PCI probe routines.
+ Note the matching code uses a bitmask: more specific table entries should
+ be placed before "catch-all" entries.
+
+ The table must be zero terminated.
+*/
+enum pci_id_flags_bits {
+ /* Set PCI command register bits before calling probe1(). */
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ /* Read and map the single following PCI BAR. */
+ PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
+ PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
+ PCI_UNUSED_IRQ=0x800,
+};
+
+struct pci_id_info {
+ const char *name;
+ struct match_info {
+ int pci, pci_mask, subsystem, subsystem_mask;
+ int revision, revision_mask; /* Only 8 bits. */
+ } id;
+ enum pci_id_flags_bits pci_flags;
+ int io_size; /* Needed for I/O region check or ioremap(). */
+ int drv_flags; /* Driver use, intended as capability flags. */
+};
+
+enum drv_id_flags {
+ PCI_HOTSWAP=1, /* Leave module loaded for Cardbus-like chips. */
+};
+enum drv_pwr_action {
+ DRV_NOOP, /* No action. */
+ DRV_ATTACH, /* The driver may expect power ops. */
+ DRV_SUSPEND, /* Machine suspending, next event RESUME or DETACH. */
+ DRV_RESUME, /* Resume from previous SUSPEND */
+ DRV_DETACH, /* Card will-be/is gone. Valid from SUSPEND! */
+ DRV_PWR_WakeOn, /* Put device in e.g. Wake-On-LAN mode. */
+ DRV_PWR_DOWN, /* Go to lowest power mode. */
+ DRV_PWR_UP, /* Go to normal power mode. */
+};
+
+struct drv_id_info {
+ const char *name; /* Single-word driver name. */
+ int flags;
+ int pci_class; /* Typically PCI_CLASS_NETWORK_ETHERNET<<8. */
+ struct pci_id_info *pci_dev_tbl;
+ void *(*probe1)(struct pci_dev *pdev, void *dev_ptr,
+ long ioaddr, int irq, int table_idx, int fnd_cnt);
+ /* Optional, called for suspend, resume and detach. */
+ int (*pwr_event)(void *dev, int event);
+ /* Internal values. */
+ struct drv_id_info *next;
+ void *cb_ops;
+};
+
+/* PCI scan and activate.
+ Scan PCI-like hardware, calling probe1(..,dev,..) on devices that match.
+ Returns -ENODEV, a negative number, if no cards are found. */
+
+extern int pci_drv_register(struct drv_id_info *drv_id, void *initial_device);
+extern void pci_drv_unregister(struct drv_id_info *drv_id);
+
+
+/* ACPI routines.
+ Wake (change to ACPI D0 state) or set the ACPI power level of a sleeping
+ ACPI device. Returns the old power state. */
+
+int acpi_wake(struct pci_dev *pdev);
+enum acpi_pwr_state {ACPI_D0, ACPI_D1, ACPI_D2, ACPI_D3};
+int acpi_set_pwr_state(struct pci_dev *pdev, enum acpi_pwr_state state);
+
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
+#endif
diff --git a/linux/src/drivers/net/pcnet32.c b/linux/src/drivers/net/pcnet32.c
new file mode 100644
index 0000000..da0e870
--- /dev/null
+++ b/linux/src/drivers/net/pcnet32.c
@@ -0,0 +1,970 @@
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ * Copyright 1996,97 Thomas Bogendoerfer, 1993-1995,1998 Donald Becker
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * Derived from the lance driver written 1993-1995 by Donald Becker.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ * This driver is for AMD PCnet-PCI based ethercards
+ */
+
+static const char *version = "pcnet32.c:v0.99B 4/4/98 DJBecker/TSBogend.\n";
+
+/* A few user-configurable values. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#define PCNET_LOG_TX_BUFFERS 4
+#define PCNET_LOG_RX_BUFFERS 4
+
+#ifdef MODULE
+#ifdef MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* Driver verbosity level. 0 = no messages, 7 = wordy death.
+ Modify here, or when loading as a module. */
+static int pcnet32_debug = 1;
+
+/*
+ * Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * History:
+ * v0.01: Initial version
+ * only tested on Alpha Noname Board
+ * v0.02: changed IRQ handling for new interrupt scheme (dev_id)
+ * tested on a ASUS SP3G
+ * v0.10: fixed an odd problem with the 79C794 in a Compaq Deskpro XL
+ * looks like the 974 doesn't like stopping and restarting in a
+ * short period of time; now we do a reinit of the lance; the
+ * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
+ * and hangs the machine (thanks to Klaus Liedl for debugging)
+ * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32,
+ * made it standalone (no need for lance.c)
+ * v0.13: added additional PCI detecting for special PCI devices (Compaq)
+ * v0.14: stripped down additional PCI probe (thanks to David C Niemi
+ * and sveneric@xs4all.nl for testing this on their Compaq boxes)
+ * v0.15: added 79C965 (VLB) probe
+ * added interrupt sharing for PCI chips
+ * v0.16: fixed set_multicast_list on Alpha machines
+ * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
+ * v0.19: changed setting of autoselect bit
+ * v0.20: removed additional Compaq PCI probe; there is now a working one
+ * in arch/i386/bios32.c
+ * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu
+ * v0.22: added printing of status to ring dump
+ * v0.23: changed enet_statistics to net_devive_stats
+ * v0.99: Changes for 2.0.34 final release. -djb
+ */
+
+
+#ifndef __powerpc__
+#define le16_to_cpu(val) (val)
+#define le32_to_cpu(val) (val)
+#endif
+#if (LINUX_VERSION_CODE < 0x20123)
+//#define test_and_set_bit(val, addr) set_bit(val, addr)
+#endif
+
+#define TX_RING_SIZE (1 << (PCNET_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((PCNET_LOG_TX_BUFFERS) << 12)
+
+#define RX_RING_SIZE (1 << (PCNET_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((PCNET_LOG_RX_BUFFERS) << 4)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+enum pcnet_offsets { PCNET32_DATA=0x10, PCNET32_ADDR=0x12, PCNET32_RESET=0x14,
+ PCNET32_BUS_IF=0x16,};
+#define PCNET32_TOTAL_SIZE 0x20
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+ u32 base;
+ s16 buf_length;
+ s16 status;
+ u32 msg_length;
+ u32 reserved;
+};
+
+struct pcnet32_tx_head {
+ u32 base;
+ s16 length;
+ s16 status;
+ u32 misc;
+ u32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+ u16 mode;
+ u16 tlen_rlen;
+ u8 phys_addr[6];
+ u16 reserved;
+ u32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring;
+ u32 tx_ring;
+};
+
+struct pcnet32_private {
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries
+ in 32bit mode. */
+ struct pcnet32_rx_head rx_ring[RX_RING_SIZE];
+ struct pcnet32_tx_head tx_ring[TX_RING_SIZE];
+ struct pcnet32_init_block init_block;
+ const char *name;
+ struct device *next_module;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ struct enet_statistics stats;
+ char tx_full;
+ unsigned long lock;
+};
+
+static struct pcnet_chip_type {
+ int id_number;
+ const char *name;
+ int flags;
+} chip_table[] = {
+ {0x2420, "PCnet/PCI 79C970", 0},
+ {0x2430, "PCnet32", 0},
+ {0x2621, "PCnet/PCI II 79C970A", 0},
+ {0x2623, "PCnet/FAST 79C971", 0},
+ {0x2624, "PCnet/FAST+ 79C972", 0},
+ {0x0, "PCnet32 (unknown)", 0},
+};
+
+/* Index of functions. */
+int pcnet32_probe(struct device *dev);
+static int pcnet32_probe1(struct device *dev, unsigned int ioaddr, unsigned char irq_line);
+static int pcnet32_open(struct device *dev);
+static void pcnet32_init_ring(struct device *dev);
+static int pcnet32_start_xmit(struct sk_buff *skb, struct device *dev);
+static int pcnet32_rx(struct device *dev);
+static void pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int pcnet32_close(struct device *dev);
+static struct enet_statistics *pcnet32_get_stats(struct device *dev);
+static void pcnet32_set_multicast_list(struct device *dev);
+
+
+/* A list of all installed PCnet32 devices, for removing the driver module. */
+static struct device *root_pcnet32_dev = NULL;
+
+int pcnet32_probe (struct device *dev)
+{
+ static int pci_index = 0; /* Static, for multiple probe calls. */
+ int cards_found = 0;
+
+ if ( ! pcibios_present())
+ return ENODEV;
+
+ for (;pci_index < 0xff; pci_index++) {
+ u8 irq_line;
+ u16 pci_command, new_command;
+ unsigned char pci_bus, pci_device_fn;
+ u32 pci_ioaddr;
+
+ if (pcibios_find_device (PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE,
+ pci_index, &pci_bus, &pci_device_fn)
+ != PCIBIOS_SUCCESSFUL)
+ break;
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+
+ /* Avoid already found cards from previous pcnet32_probe() calls */
+ if (check_region(pci_ioaddr, PCNET32_TOTAL_SIZE))
+ continue;
+
+ /* Activate the card: fix for brain-damaged Win98 BIOSes. */
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
+ if (pci_command != new_command) {
+ printk(KERN_INFO " The PCI BIOS has not enabled the AMD Ethernet"
+ " device at %2x-%2x."
+ " Updating PCI command %4.4x->%4.4x.\n",
+ pci_bus, pci_device_fn, pci_command, new_command);
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, new_command);
+ }
+
+ if (pcnet32_probe1(dev, pci_ioaddr, irq_line) != 0) {
+ /* Should never happen. */
+ printk(KERN_ERR "pcnet32.c: Probe of PCI card at %#x failed.\n",
+ pci_ioaddr);
+ } else
+ dev = 0;
+ cards_found++;
+ }
+
+ return cards_found ? 0 : -ENODEV;
+}
+
+
+/* pcnet32_probe1 */
+static int pcnet32_probe1(struct device *dev, unsigned int ioaddr, unsigned char irq_line)
+{
+ struct pcnet32_private *lp;
+ int i;
+ const char *chipname;
+
+ /* check if there is really a pcnet chip on that ioaddr */
+ if ((inb(ioaddr + 14) != 0x57) || (inb(ioaddr + 15) != 0x57))
+ return ENODEV;
+
+ inw(ioaddr+PCNET32_RESET); /* Reset the PCNET32 */
+
+ outw(0x0000, ioaddr+PCNET32_ADDR); /* Switch to window 0 */
+ if (inw(ioaddr+PCNET32_DATA) != 0x0004)
+ return ENODEV;
+
+ /* Get the version of the chip. */
+ outw(88, ioaddr+PCNET32_ADDR);
+ if (inw(ioaddr+PCNET32_ADDR) != 88) {
+ /* should never happen */
+ return ENODEV;
+ } else { /* Good, it's a newer chip. */
+ int chip_version = inw(ioaddr+PCNET32_DATA);
+ outw(89, ioaddr+PCNET32_ADDR);
+ chip_version |= inw(ioaddr+PCNET32_DATA) << 16;
+ if (pcnet32_debug > 2)
+ printk(" PCnet chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return ENODEV;
+ chip_version = (chip_version >> 12) & 0xffff;
+ for (i = 0; chip_table[i].id_number; i++)
+ if (chip_table[i].id_number == chip_version)
+ break;
+ chipname = chip_table[i].name;
+ }
+
+ dev = init_etherdev(dev, 0);
+
+ printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
+
+ /* There is a 16 byte station address PROM at the base address.
+ The first six bytes are the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ printk("\n");
+
+ dev->base_addr = ioaddr;
+ request_region(ioaddr, PCNET32_TOTAL_SIZE, dev->name);
+
+ /* Data structures used by the PCnet32 are 16byte aligned and DMAble. */
+ lp = (struct pcnet32_private *)
+ (((unsigned long)kmalloc(sizeof(*lp)+15, GFP_DMA | GFP_KERNEL)+15) & ~15);
+
+ memset(lp, 0, sizeof(*lp));
+ dev->priv = lp;
+
+ lp->next_module = root_pcnet32_dev;
+ root_pcnet32_dev = dev;
+
+ lp->name = chipname;
+ lp->rx_buffs = (unsigned long) kmalloc(PKT_BUF_SZ*RX_RING_SIZE, GFP_DMA | GFP_KERNEL);
+
+ lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
+ lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = (u32)le32_to_cpu(virt_to_bus(lp->rx_ring));
+ lp->init_block.tx_ring = (u32)le32_to_cpu(virt_to_bus(lp->tx_ring));
+
+ /* switch pcnet32 to 32bit mode */
+ outw(0x0014, ioaddr+PCNET32_ADDR);
+ outw(0x0002, ioaddr+PCNET32_BUS_IF);
+
+ outw(0x0001, ioaddr+PCNET32_ADDR);
+ inw(ioaddr+PCNET32_ADDR);
+ outw(virt_to_bus(&lp->init_block) & 0xffff, ioaddr+PCNET32_DATA);
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ inw(ioaddr+PCNET32_ADDR);
+ outw(virt_to_bus(&lp->init_block) >> 16, ioaddr+PCNET32_DATA);
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ inw(ioaddr+PCNET32_ADDR);
+
+ dev->irq = irq_line;
+
+ if (pcnet32_debug > 0)
+ printk("%s", version);
+
+ /* The PCNET32-specific entries in the device structure. */
+ dev->open = &pcnet32_open;
+ dev->hard_start_xmit = &pcnet32_start_xmit;
+ dev->stop = &pcnet32_close;
+ dev->get_stats = &pcnet32_get_stats;
+ dev->set_multicast_list = &pcnet32_set_multicast_list;
+
+ /* Fill in the generic fields of the device structure. */
+ ether_setup(dev);
+ return 0;
+}
+
+
+static int
+pcnet32_open(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, &pcnet32_interrupt, SA_SHIRQ,
+ dev->name, (void *)dev)) {
+ return -EAGAIN;
+ }
+ MOD_INC_USE_COUNT;
+
+ /* Reset the PCNET32 */
+ inw(ioaddr+PCNET32_RESET);
+
+ /* switch pcnet32 to 32bit mode */
+ outw(0x0014, ioaddr+PCNET32_ADDR);
+ outw(0x0002, ioaddr+PCNET32_BUS_IF);
+
+ /* Turn on auto-select of media (AUI, BNC). */
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ /* only touch autoselect bit */
+ outw(inw(ioaddr+PCNET32_BUS_IF) | 0x0002, ioaddr+PCNET32_BUS_IF);
+
+ if (pcnet32_debug > 1)
+ printk("%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq,
+ (u32) virt_to_bus(lp->tx_ring),
+ (u32) virt_to_bus(lp->rx_ring),
+ (u32) virt_to_bus(&lp->init_block));
+
+ /* check for ATLAS T1/E1 LAW card */
+ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && dev->dev_addr[2] == 0x75) {
+ /* select GPSI mode */
+ lp->init_block.mode = 0x0100;
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ outw(inw(ioaddr+PCNET32_BUS_IF) & ~2, ioaddr+PCNET32_BUS_IF);
+ /* switch full duplex on */
+ outw(0x0009, ioaddr+PCNET32_ADDR);
+ outw(inw(ioaddr+PCNET32_BUS_IF) | 1, ioaddr+PCNET32_BUS_IF);
+ } else
+ lp->init_block.mode = 0x0000;
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ pcnet32_init_ring(dev);
+
+ /* Re-initialize the PCNET32, and start it when done. */
+ outw(0x0001, ioaddr+PCNET32_ADDR);
+ outw(virt_to_bus(&lp->init_block) &0xffff, ioaddr+PCNET32_DATA);
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ outw(virt_to_bus(&lp->init_block) >> 16, ioaddr+PCNET32_DATA);
+
+ outw(0x0004, ioaddr+PCNET32_ADDR);
+ outw(0x0915, ioaddr+PCNET32_DATA);
+
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ outw(0x0001, ioaddr+PCNET32_DATA);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+PCNET32_DATA) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ outw(0x0042, ioaddr+PCNET32_DATA);
+
+ if (pcnet32_debug > 2)
+ printk("%s: PCNET32 open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) virt_to_bus(&lp->init_block), inw(ioaddr+PCNET32_DATA));
+
+ return 0; /* Always succeed */
+}
+
+/*
+ * The LANCE has been halted for one reason or another (busmaster memory
+ * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ * etc.). Modern LANCE variants always reload their ring-buffer
+ * configuration when restarted, so we must reinitialize our ring
+ * context before restarting. As part of this reinitialization,
+ * find all packets still on the Tx ring and pretend that they had been
+ * sent (in effect, drop the packets on the floor) - the higher-level
+ * protocols will time out and retransmit. It'd be better to shuffle
+ * these skbs to a temp list and then actually re-Tx them after
+ * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+ */
+
+static void
+pcnet32_purge_tx_ring(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ dev_kfree_skb(lp->tx_skbuff[i], FREE_WRITE);
+ lp->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+/* Initialize the PCNET32 Rx and Tx rings. */
+static void
+pcnet32_init_ring(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ int i;
+
+ lp->lock = 0, lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_ring[i].base = (u32)le32_to_cpu(virt_to_bus((char *)lp->rx_buffs + i*PKT_BUF_SZ));
+ lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+ lp->rx_ring[i].status = le16_to_cpu(0x8000);
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_ring[i].base = 0;
+ lp->tx_ring[i].status = 0;
+ }
+
+ lp->init_block.tlen_rlen = TX_RING_LEN_BITS | RX_RING_LEN_BITS;
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.rx_ring = (u32)le32_to_cpu(virt_to_bus(lp->rx_ring));
+ lp->init_block.tx_ring = (u32)le32_to_cpu(virt_to_bus(lp->tx_ring));
+}
+
+static void
+pcnet32_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
+{
+ int i;
+ unsigned int ioaddr = dev->base_addr;
+
+ pcnet32_purge_tx_ring(dev);
+ pcnet32_init_ring(dev);
+
+ outw(0x0000, ioaddr + PCNET32_ADDR);
+ /* ReInit Ring */
+ outw(0x0001, ioaddr + PCNET32_DATA);
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+PCNET32_DATA) & 0x0100)
+ break;
+
+ outw(csr0_bits, ioaddr + PCNET32_DATA);
+}
+
+static int
+pcnet32_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ unsigned int ioaddr = dev->base_addr;
+ int entry;
+ unsigned long flags;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 20)
+ return 1;
+ outw(0, ioaddr+PCNET32_ADDR);
+ printk("%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, inw(ioaddr+PCNET32_DATA));
+ outw(0x0004, ioaddr+PCNET32_DATA);
+ lp->stats.tx_errors++;
+#ifndef final_version
+ {
+ int i;
+ printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0 ; i < RX_RING_SIZE; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length, (unsigned)lp->rx_ring[i].status);
+ for (i = 0 ; i < TX_RING_SIZE; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc, (unsigned)lp->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+ pcnet32_restart(dev, 0x0042, 1);
+
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+
+ return 0;
+ }
+
+ if (pcnet32_debug > 3) {
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ printk("%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", dev->name,
+ inw(ioaddr+PCNET32_DATA));
+ outw(0x0000, ioaddr+PCNET32_DATA);
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ if (test_and_set_bit(0, (void*)&lp->lock) != 0) {
+ if (pcnet32_debug > 0)
+ printk("%s: tx queue lock!.\n", dev->name);
+ /* don't clear dev->tbusy flag. */
+ return 1;
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
+
+ lp->tx_ring[entry].misc = 0x00000000;
+
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = (u32)le32_to_cpu(virt_to_bus(skb->data));
+ lp->tx_ring[entry].status = le16_to_cpu(0x8300);
+
+ lp->cur_tx++;
+
+ /* Trigger an immediate send poll. */
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ outw(0x0048, ioaddr+PCNET32_DATA);
+
+ dev->trans_start = jiffies;
+
+ save_flags(flags);
+ cli();
+ lp->lock = 0;
+ if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
+ clear_bit(0, (void*)&dev->tbusy);
+ else
+ lp->tx_full = 1;
+ restore_flags(flags);
+
+ return 0;
+}
+
+/* The PCNET32 interrupt handler. */
+static void
+pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)dev_id;
+ struct pcnet32_private *lp;
+ unsigned int csr0, ioaddr;
+ int boguscnt = max_interrupt_work;
+ int must_restart;
+
+ if (dev == NULL) {
+ printk ("pcnet32_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct pcnet32_private *)dev->priv;
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ outw(0x00, dev->base_addr + PCNET32_ADDR);
+ while ((csr0 = inw(dev->base_addr + PCNET32_DATA)) & 0x8600
+ && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & ~0x004f, dev->base_addr + PCNET32_DATA);
+
+ must_restart = 0;
+
+ if (pcnet32_debug > 5)
+ printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, inw(dev->base_addr + PCNET32_DATA));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ pcnet32_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x4000) {
+ /* There was an major error, log it. */
+ int err_status = le16_to_cpu(lp->tx_ring[entry].misc);
+ lp->stats.tx_errors++;
+ if (err_status & 0x04000000) lp->stats.tx_aborted_errors++;
+ if (err_status & 0x08000000) lp->stats.tx_carrier_errors++;
+ if (err_status & 0x10000000) lp->stats.tx_window_errors++;
+ if (err_status & 0x40000000) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ lp->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk("%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+ } else {
+ if (status & 0x1800)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && dev->tbusy
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ clear_bit(0, (void*)&dev->tbusy);
+ mark_bh(NET_BH);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) {
+ /*
+ * this happens when our receive ring is full. This
+ * shouldn't be a problem as we will see normal rx
+ * interrupts for the frames in the receive ring. But
+ * there are some PCI chipsets (I can reproduce this
+ * on SP3G with Intel saturn chipset) which have some-
+ * times problems and will fill up the receive ring
+ * with error descriptors. In this situation we don't
+ * get a rx interrupt, but a missed frame interrupt
+ * sooner or later. So we try to clean up our receive
+ * ring here.
+ */
+ pcnet32_rx(dev);
+ lp->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & 0x0800) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ outw(0x0000, dev->base_addr + PCNET32_ADDR);
+ outw(0x0004, dev->base_addr + PCNET32_DATA);
+ pcnet32_restart(dev, 0x0002, 0);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ outw(0x0000, dev->base_addr + PCNET32_ADDR);
+ outw(0x7940, dev->base_addr + PCNET32_DATA);
+
+ if (pcnet32_debug > 4)
+ printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
+ dev->name, inw(ioaddr + PCNET32_ADDR),
+ inw(dev->base_addr + PCNET32_DATA));
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+pcnet32_rx(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
+ int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
+
+ if (status != 0x03) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20) lp->stats.rx_frame_errors++;
+ if (status & 0x10) lp->stats.rx_over_errors++;
+ if (status & 0x08) lp->stats.rx_crc_errors++;
+ if (status & 0x04) lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
+ }
+ else
+ {
+ /* Malloc up new buffer, compatible with net-2e. */
+ short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len < 60) {
+ printk("%s: Runt packet!\n",dev->name);
+ lp->stats.rx_errors++;
+ } else {
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ for (i=0; i < RX_RING_SIZE; i++)
+ if ((short)le16_to_cpu(lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].status) < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2)
+ {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)bus_to_virt(le32_to_cpu(lp->rx_ring[entry].base)),
+ pkt_len,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ }
+ /* The docs say that the buffer length isn't touched, but Andrew Boyd
+ of QNX reports that some revs of the 79C965 clear it. */
+ lp->rx_ring[entry].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+static int
+pcnet32_close(struct device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+
+ dev->start = 0;
+ set_bit(0, (void*)&dev->tbusy);
+
+ outw(112, ioaddr+PCNET32_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+PCNET32_DATA);
+
+ outw(0, ioaddr+PCNET32_ADDR);
+
+ if (pcnet32_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(ioaddr+PCNET32_DATA));
+
+ /* We stop the PCNET32 here -- it occasionally polls
+ memory if we don't. */
+ outw(0x0004, ioaddr+PCNET32_DATA);
+
+ free_irq(dev->irq, dev);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *pcnet32_get_stats(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ unsigned int ioaddr = dev->base_addr;
+ unsigned short saved_addr;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ saved_addr = inw(ioaddr+PCNET32_ADDR);
+ outw(112, ioaddr+PCNET32_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+PCNET32_DATA);
+ outw(saved_addr, ioaddr+PCNET32_ADDR);
+ restore_flags(flags);
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void pcnet32_set_multicast_list(struct device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ lp->init_block.mode |= 0x8000;
+ } else {
+ int num_addrs=dev->mc_count;
+ if(dev->flags&IFF_ALLMULTI)
+ num_addrs=1;
+ /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(lp->init_block.filter , (num_addrs == 0) ? 0 : -1, sizeof(lp->init_block.filter));
+ lp->init_block.mode &= ~0x8000;
+ }
+
+ outw(0, ioaddr+PCNET32_ADDR);
+ outw(0x0004, ioaddr+PCNET32_DATA); /* Temporarily stop the lance. */
+
+ pcnet32_restart(dev, 0x0042, 0); /* Resume normal operation */
+
+}
+
+
+#ifdef MODULE
+#if LINUX_VERSION_CODE > 0x20118
+MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+MODULE_DESCRIPTION("AMD PCnet/PCI ethernet driver");
+MODULE_PARM(debug, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+#endif
+
+/* An additional parameter that may be passed in... */
+static int debug = -1;
+
+int
+init_module(void)
+{
+ if (debug >= 0)
+ pcnet32_debug = debug;
+
+#ifdef CARDBUS
+ register_driver(&pcnet32_ops);
+ return 0;
+#else
+ return pcnet32_probe(NULL);
+#endif
+}
+
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&pcnet32_ops);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_pcnet32_dev) {
+ next_dev = ((struct pcnet32_private *)root_pcnet32_dev->priv)->next_module;
+ unregister_netdev(root_pcnet32_dev);
+ release_region(root_pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+ kfree(root_pcnet32_dev);
+ root_pcnet32_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c pcnet32.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/rtl8139.c b/linux/src/drivers/net/rtl8139.c
new file mode 100644
index 0000000..e97c905
--- /dev/null
+++ b/linux/src/drivers/net/rtl8139.c
@@ -0,0 +1,1737 @@
+/* rtl8139.c: A RealTek RTL8129/8139 Fast Ethernet driver for Linux. */
+/*
+ Written and Copyright 1997-2003 by Donald Becker.
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for boards based on the RTL8129 and RTL8139 PCI ethernet
+ chips.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/rtl8139.html
+
+ Twister-tuning table provided by Kinston <shangh@realtek.com.tw>.
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char versionA[] =
+"rtl8139.c:v1.23a 8/24/2003 Donald Becker, becker@scyld.com.\n";
+static const char versionB[] =
+" http://www.scyld.com/network/rtl8139.html\n";
+
+#ifndef USE_MEM_OPS
+/* Note: Register access width and timing restrictions apply in MMIO mode.
+ This updated driver should nominally work, but I/O mode is better tested. */
+#define USE_IO_OPS
+#endif
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. It
+ is efficient to update the hardware filter, but recalculating the table
+ for a long filter list is painful. */
+static int multicast_filter_limit = 32;
+
+/* Used to pass the full-duplex flag, etc. */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Maximum size of the in-memory receive ring (smaller if no memory). */
+#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+#define TX_BUF_SIZE 1536
+
+/* PCI Tuning Parameters
+ Threshold is bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
+
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024. */
+#define RX_FIFO_THRESH 4 /* Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 bytes */
+#define TX_DMA_BURST 4 /* Calculate as 16<<val. */
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with full-sized Ethernet frames.
+ This is a cross-driver value that is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE >= 0x20300
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= 0x20200
+#include <asm/spinlock.h>
+#endif
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the RealTek RTL8129 series, the RealTek
+Fast Ethernet controllers for PCI and CardBus. This chip is used on many
+low-end boards, sometimes with custom chip labels.
+
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS will assign the
+PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Rx Ring buffers
+
+The receive unit uses a single linear ring buffer rather than the more
+common (and more efficient) descriptor-based architecture. Incoming frames
+are sequentially stored into the Rx region, and the host copies them into
+skbuffs.
+
+Comment: While it is theoretically possible to process many frames in place,
+any delay in Rx processing would block the Rx ring and cause us to drop
+frames. It would be difficult to design a protocol stack where the data
+buffer could be recalled by the device driver.
+
+IIIb. Tx operation
+
+The RTL8129 uses a fixed set of four Tx descriptors in register space. Tx
+frames must be 32 bit aligned. Linux aligns the IP header on word
+boundaries, and 14 byte ethernet header means that almost all frames will
+need to be copied to an alignment buffer. The driver statically allocates
+alignment the four alignment buffers at open() time.
+
+IVb. References
+
+http://www.realtek.com.tw/cn/cn.html
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+
+IVc. Errata
+
+*/
+
+
+static void *rtl8139_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int rtl_pwr_event(void *dev_instance, int event);
+
+enum chip_capability_flags {HAS_MII_XCVR=0x01, HAS_CHIP_XCVR=0x02,
+ HAS_LNK_CHNG=0x04, HAS_DESC=0x08};
+#ifdef USE_IO_OPS
+#define RTL8139_IOTYPE PCI_USES_MASTER|PCI_USES_IO |PCI_ADDR0
+#else
+#define RTL8139_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
+#endif
+#define RTL8129_CAPS HAS_MII_XCVR
+#define RTL8139_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG
+#define RTL8139D_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG|HAS_DESC
+
+/* Note: Update the marked constant in _attach() if the RTL8139B entry moves.*/
+static struct pci_id_info pci_tbl[] = {
+ {"RealTek RTL8139C+, 64 bit high performance",
+ { 0x813910ec, 0xffffffff, 0,0, 0x20, 0xff},
+ RTL8139_IOTYPE, 0x80, RTL8139D_CAPS, },
+ {"RealTek RTL8139C Fast Ethernet",
+ { 0x813910ec, 0xffffffff, 0,0, 0x10, 0xff},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"RealTek RTL8129 Fast Ethernet", { 0x812910ec, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8129_CAPS, },
+ {"RealTek RTL8139 Fast Ethernet", { 0x813910ec, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"RealTek RTL8139B PCI/CardBus", { 0x813810ec, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"SMC1211TX EZCard 10/100 (RealTek RTL8139)", { 0x12111113, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"Accton MPX5030 (RealTek RTL8139)", { 0x12111113, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"D-Link DFE-530TX+ (RealTek RTL8139C)",
+ { 0x13001186, 0xffffffff, 0x13011186, 0xffffffff,},
+ RTL8139_IOTYPE, 0x100, RTL8139_CAPS, },
+ {"D-Link DFE-538TX (RealTek RTL8139)", { 0x13001186, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"LevelOne FPC-0106Tx (RealTek RTL8139)", { 0x0106018a, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"Compaq HNE-300 (RealTek RTL8139c)", { 0x8139021b, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"Edimax EP-4103DL CardBus (RealTek RTL8139c)", { 0xab0613d1, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"Siemens 1012v2 CardBus (RealTek RTL8139c)", { 0x101202ac, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info rtl8139_drv_id = {
+ "realtek", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_tbl,
+ rtl8139_probe1, rtl_pwr_event };
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* The rest of these values should never change. */
+#define NUM_TX_DESC 4 /* Number of Tx descriptor registers. */
+
+/* Symbolic offsets to registers. */
+enum RTL8129_registers {
+ MAC0=0, /* Ethernet hardware address. */
+ MAR0=8, /* Multicast filter. */
+ TxStatus0=0x10, /* Transmit status (Four 32bit registers). */
+ TxAddr0=0x20, /* Tx descriptors (also four 32bit). */
+ RxBuf=0x30, RxEarlyCnt=0x34, RxEarlyStatus=0x36,
+ ChipCmd=0x37, RxBufPtr=0x38, RxBufAddr=0x3A,
+ IntrMask=0x3C, IntrStatus=0x3E,
+ TxConfig=0x40, RxConfig=0x44,
+ Timer=0x48, /* A general-purpose counter. */
+ RxMissed=0x4C, /* 24 bits valid, write clears. */
+ Cfg9346=0x50, Config0=0x51, Config1=0x52,
+ FlashReg=0x54, GPPinData=0x58, GPPinDir=0x59, MII_SMI=0x5A, HltClk=0x5B,
+ MultiIntr=0x5C, TxSummary=0x60,
+ MII_BMCR=0x62, MII_BMSR=0x64, NWayAdvert=0x66, NWayLPAR=0x68,
+ NWayExpansion=0x6A,
+ /* Undocumented registers, but required for proper operation. */
+ FIFOTMS=0x70, /* FIFO Control and test. */
+ CSCR=0x74, /* Chip Status and Configuration Register. */
+ PARA78=0x78, PARA7c=0x7c, /* Magic transceiver parameter register. */
+};
+
+enum ChipCmdBits {
+ CmdReset=0x10, CmdRxEnb=0x08, CmdTxEnb=0x04, RxBufEmpty=0x01, };
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatusBits {
+ PCIErr=0x8000, PCSTimeout=0x4000,
+ RxFIFOOver=0x40, RxUnderrun=0x20, RxOverflow=0x10,
+ TxErr=0x08, TxOK=0x04, RxErr=0x02, RxOK=0x01,
+};
+enum TxStatusBits {
+ TxHostOwns=0x2000, TxUnderrun=0x4000, TxStatOK=0x8000,
+ TxOutOfWindow=0x20000000, TxAborted=0x40000000, TxCarrierLost=0x80000000,
+};
+enum RxStatusBits {
+ RxMulticast=0x8000, RxPhysical=0x4000, RxBroadcast=0x2000,
+ RxBadSymbol=0x0020, RxRunt=0x0010, RxTooLong=0x0008, RxCRCErr=0x0004,
+ RxBadAlign=0x0002, RxStatusOK=0x0001,
+};
+
+/* Twister tuning parameters from RealTek.
+ Completely undocumented, but required to tune bad links. */
+enum CSCRBits {
+ CSCR_LinkOKBit=0x0400, CSCR_LinkChangeBit=0x0800,
+ CSCR_LinkStatusBits=0x0f000, CSCR_LinkDownOffCmd=0x003c0,
+ CSCR_LinkDownCmd=0x0f3c0,
+};
+#define PARA78_default 0x78fa8388
+#define PARA7c_default 0xcb38de43 /* param[0][3] */
+#define PARA7c_xxx 0xcb38de43
+unsigned long param[4][4]={
+ {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+};
+
+#define PRIV_ALIGN 15 /* Desired alignment mask */
+struct rtl8129_private {
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
+ int msg_level;
+ int max_interrupt_work;
+
+ /* Receive state. */
+ unsigned char *rx_ring;
+ unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
+ unsigned int rx_buf_len; /* Size (8K 16K 32K or 64KB) of the Rx ring */
+
+ /* Transmit state. */
+ unsigned int cur_tx, dirty_tx, tx_flag;
+ unsigned long tx_full; /* The Tx queue is full. */
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[NUM_TX_DESC];
+ unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+
+ /* Receive filter state. */
+ unsigned int rx_config;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int cur_rx_mode;
+ int multicast_filter_limit;
+
+ /* Transceiver state. */
+ char phys[4]; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ char twistie, twist_row, twist_col; /* Twister tune state. */
+ u8 config1;
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+ unsigned int default_port; /* Last dev->if_port value. */
+};
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("RealTek RTL8129/8139 Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex.");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+
+static int rtl8129_open(struct net_device *dev);
+static void rtl_hw_start(struct net_device *dev);
+static int read_eeprom(long ioaddr, int location, int addr_len);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int val);
+static void rtl8129_timer(unsigned long data);
+static void rtl8129_tx_timeout(struct net_device *dev);
+static void rtl8129_init_ring(struct net_device *dev);
+static int rtl8129_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int rtl8129_rx(struct net_device *dev);
+static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static void rtl_error(struct net_device *dev, int status, int link_status);
+static int rtl8129_close(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct net_device_stats *rtl8129_get_stats(struct net_device *dev);
+static inline u32 ether_crc(int length, unsigned char *data);
+static void set_rx_mode(struct net_device *dev);
+
+
+/* A list of all installed RTL8129 devices, for removing the driver module. */
+static struct net_device *root_rtl8129_dev = NULL;
+
+#ifndef MODULE
+int rtl8139_probe(struct net_device *dev)
+{
+ static int did_version = 0; /* Already printed version info. */
+
+ if (debug >= NETIF_MSG_DRV /* Emit version even if no cards detected. */
+ && did_version++ == 0)
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+ return pci_drv_register(&rtl8139_drv_id, dev);
+}
+#endif
+
+static void *rtl8139_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int found_cnt)
+{
+ struct net_device *dev;
+ struct rtl8129_private *np;
+ void *priv_mem;
+ int i, option = found_cnt < MAX_UNITS ? options[found_cnt] : 0;
+ int config1;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
+ dev->name, pci_tbl[chip_idx].name, ioaddr, irq);
+
+ /* Bring the chip out of low-power mode. */
+ config1 = inb(ioaddr + Config1);
+ if (pci_tbl[chip_idx].drv_flags & HAS_MII_XCVR) /* rtl8129 chip */
+ outb(config1 & ~0x03, ioaddr + Config1);
+
+ {
+ int addr_len = read_eeprom(ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+ for (i = 0; i < 3; i++)
+ ((u16 *)(dev->dev_addr))[i] =
+ le16_to_cpu(read_eeprom(ioaddr, i+7, addr_len));
+ }
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x.\n", dev->dev_addr[i]);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_rtl8129_dev;
+ root_rtl8129_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ np->config1 = config1;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes too much time. */
+ if (np->drv_flags & HAS_MII_XCVR) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < sizeof(np->phys); phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ printk(KERN_INFO "%s: MII transceiver %d status 0x%4.4x "
+ "advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: No MII transceivers found! Assuming SYM "
+ "transceiver.\n",
+ dev->name);
+ np->phys[0] = 32;
+ }
+ } else
+ np->phys[0] = 32;
+
+ /* Put the chip into low-power mode. */
+ outb(0xC0, ioaddr + Cfg9346);
+ if (np->drv_flags & HAS_MII_XCVR) /* rtl8129 chip */
+ outb(0x03, ioaddr + Config1);
+
+ outb('H', ioaddr + HltClk); /* 'R' would leave the clock running. */
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ np->full_duplex = (option & 0x220) ? 1 : 0;
+ np->default_port = option & 0x330;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+
+ if (found_cnt < MAX_UNITS && full_duplex[found_cnt] > 0)
+ np->full_duplex = full_duplex[found_cnt];
+
+ if (np->full_duplex) {
+ printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
+ /* Changing the MII-advertised media might prevent re-connection. */
+ np->duplex_lock = 1;
+ }
+ if (np->default_port) {
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (option & 0x220 ? "full" : "half"));
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
+ }
+
+ /* The rtl81x9-specific entries in the device structure. */
+ dev->open = &rtl8129_open;
+ dev->hard_start_xmit = &rtl8129_start_xmit;
+ dev->stop = &rtl8129_close;
+ dev->get_stats = &rtl8129_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ return dev;
+}
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
+#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
+#define EE_ENB (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay() inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5)
+#define EE_READ_CMD (6)
+#define EE_ERASE_CMD (7)
+
+static int read_eeprom(long ioaddr, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ long ee_addr = ioaddr + Cfg9346;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ outb(EE_ENB & ~EE_CS, ee_addr);
+ outb(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outb(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ outb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ }
+ outb(EE_ENB, ee_addr);
+ eeprom_delay();
+
+ for (i = 16; i > 0; i--) {
+ outb(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outb(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(~EE_CS, ee_addr);
+ return retval;
+}
+
+/* MII serial management: mostly bogus for now. */
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol.
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+#define MDIO_DIR 0x80
+#define MDIO_DATA_OUT 0x04
+#define MDIO_DATA_IN 0x02
+#define MDIO_CLK 0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+
+#define mdio_delay(mdio_addr) inb(mdio_addr)
+
+static char mii_2_8139_map[8] = {MII_BMCR, MII_BMSR, 0, 0, NWayAdvert,
+ NWayLPAR, NWayExpansion, 0 };
+
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_sync(long mdio_addr)
+{
+ int i;
+
+ for (i = 32; i >= 0; i--) {
+ outb(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ outb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long mdio_addr = dev->base_addr + MII_SMI;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int retval = 0;
+ int i;
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ return location < 8 && mii_2_8139_map[location] ?
+ inw(dev->base_addr + mii_2_8139_map[location]) : 0;
+ }
+ mdio_sync(mdio_addr);
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+ outb(MDIO_DIR | dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ outb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outb(0, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((inb(mdio_addr) & MDIO_DATA_IN) ? 1 : 0);
+ outb(MDIO_CLK, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ long mdio_addr = dev->base_addr + MII_SMI;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ long ioaddr = dev->base_addr;
+ if (location == 0) {
+ outb(0xC0, ioaddr + Cfg9346);
+ outw(value, ioaddr + MII_BMCR);
+ outb(0x00, ioaddr + Cfg9346);
+ } else if (location < 8 && mii_2_8139_map[location])
+ outw(value, ioaddr + mii_2_8139_map[location]);
+ return;
+ }
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+ outb(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ outb(dataval | MDIO_CLK, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ outb(0, mdio_addr);
+ mdio_delay(mdio_addr);
+ outb(MDIO_CLK, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+
+static int rtl8129_open(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int rx_buf_len_idx;
+
+ MOD_INC_USE_COUNT;
+
+ /* The Rx ring allocation size is 2^N + delta, which is worst-case for
+ the kernel binary-buddy allocation. We allocate the Tx bounce buffers
+ at the same time to use some of the otherwise wasted space.
+ The delta of +16 is required for dribble-over because the receiver does
+ not wrap when the packet terminates just beyond the end of the ring. */
+ rx_buf_len_idx = RX_BUF_LEN_IDX;
+ do {
+ tp->rx_buf_len = 8192 << rx_buf_len_idx;
+ tp->rx_ring = kmalloc(tp->rx_buf_len + 16 +
+ (TX_BUF_SIZE * NUM_TX_DESC), GFP_KERNEL);
+ } while (tp->rx_ring == NULL && --rx_buf_len_idx >= 0);
+
+ if (tp->rx_ring == NULL) {
+ if (debug > 0)
+ printk(KERN_ERR "%s: Couldn't allocate a %d byte receive ring.\n",
+ dev->name, tp->rx_buf_len);
+ MOD_DEC_USE_COUNT;
+ return -ENOMEM;
+ }
+ tp->tx_bufs = tp->rx_ring + tp->rx_buf_len + 16;
+
+ rtl8129_init_ring(dev);
+ tp->full_duplex = tp->duplex_lock;
+ tp->tx_flag = (TX_FIFO_THRESH<<11) & 0x003f0000;
+ tp->rx_config =
+ (RX_FIFO_THRESH << 13) | (rx_buf_len_idx << 11) | (RX_DMA_BURST<<8);
+
+ if (request_irq(dev->irq, &rtl8129_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ rtl_hw_start(dev);
+ netif_start_tx_queue(dev);
+
+ if (tp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG"%s: rtl8129_open() ioaddr %#lx IRQ %d"
+ " GP Pins %2.2x %s-duplex.\n",
+ dev->name, ioaddr, dev->irq, inb(ioaddr + GPPinData),
+ tp->full_duplex ? "full" : "half");
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&tp->timer);
+ tp->timer.expires = jiffies + 3*HZ;
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = &rtl8129_timer;
+ add_timer(&tp->timer);
+
+ return 0;
+}
+
+/* Start the hardware at open or resume. */
+static void rtl_hw_start(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Soft reset the chip. */
+ outb(CmdReset, ioaddr + ChipCmd);
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--)
+ if ((inb(ioaddr + ChipCmd) & CmdReset) == 0)
+ break;
+ /* Restore our idea of the MAC address. */
+ outb(0xC0, ioaddr + Cfg9346);
+ outl(cpu_to_le32(*(u32*)(dev->dev_addr + 0)), ioaddr + MAC0 + 0);
+ outl(cpu_to_le32(*(u32*)(dev->dev_addr + 4)), ioaddr + MAC0 + 4);
+
+ /* Hmmm, do these belong here? */
+ tp->cur_rx = 0;
+
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+ outl(tp->rx_config, ioaddr + RxConfig);
+ /* Check this value: the documentation contradicts ifself. Is the
+ IFG correct with bit 28:27 zero, or with |0x03000000 ? */
+ outl((TX_DMA_BURST<<8), ioaddr + TxConfig);
+
+ /* This is check_duplex() */
+ if (tp->phys[0] >= 0 || (tp->drv_flags & HAS_MII_XCVR)) {
+ u16 mii_reg5 = mdio_read(dev, tp->phys[0], 5);
+ if (mii_reg5 == 0xffff)
+ ; /* Not there */
+ else if ((mii_reg5 & 0x0100) == 0x0100
+ || (mii_reg5 & 0x00C0) == 0x0040)
+ tp->full_duplex = 1;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: Setting %s%s-duplex based on"
+ " auto-negotiated partner ability %4.4x.\n", dev->name,
+ mii_reg5 == 0 ? "" :
+ (mii_reg5 & 0x0180) ? "100mbps " : "10mbps ",
+ tp->full_duplex ? "full" : "half", mii_reg5);
+ }
+
+ if (tp->drv_flags & HAS_MII_XCVR) /* rtl8129 chip */
+ outb(tp->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
+ outb(0x00, ioaddr + Cfg9346);
+
+ outl(virt_to_bus(tp->rx_ring), ioaddr + RxBuf);
+ /* Start the chip's Tx and Rx process. */
+ outl(0, ioaddr + RxMissed);
+ set_rx_mode(dev);
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outw(PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver
+ | TxErr | TxOK | RxErr | RxOK, ioaddr + IntrMask);
+
+}
+
+static void rtl8129_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+
+ if (! np->duplex_lock && mii_reg5 != 0xffff) {
+ int duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ printk(KERN_INFO "%s: Using %s-duplex based on MII #%d link"
+ " partner ability of %4.4x.\n", dev->name,
+ np->full_duplex ? "full" : "half", np->phys[0], mii_reg5);
+ if (np->drv_flags & HAS_MII_XCVR) {
+ outb(0xC0, ioaddr + Cfg9346);
+ outb(np->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
+ outb(0x00, ioaddr + Cfg9346);
+ }
+ }
+ }
+#if LINUX_VERSION_CODE < 0x20300
+ /* Check for bogusness. */
+ if (inw(ioaddr + IntrStatus) & (TxOK | RxOK)) {
+ int status = inw(ioaddr + IntrStatus); /* Double check */
+ if (status & (TxOK | RxOK) && ! dev->interrupt) {
+ printk(KERN_ERR "%s: RTL8139 Interrupt line blocked, status %x.\n",
+ dev->name, status);
+ rtl8129_interrupt(dev->irq, dev, 0);
+ }
+ }
+ if (dev->tbusy && jiffies - dev->trans_start >= 2*TX_TIMEOUT)
+ rtl8129_tx_timeout(dev);
+#else
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ rtl8129_tx_timeout(dev);
+ }
+#endif
+
+#if defined(RTL_TUNE_TWISTER)
+ /* This is a complicated state machine to configure the "twister" for
+ impedance/echos based on the cable length.
+ All of this is magic and undocumented.
+ */
+ if (np->twistie) switch(np->twistie) {
+ case 1: {
+ if (inw(ioaddr + CSCR) & CSCR_LinkOKBit) {
+ /* We have link beat, let us tune the twister. */
+ outw(CSCR_LinkDownOffCmd, ioaddr + CSCR);
+ np->twistie = 2; /* Change to state 2. */
+ next_tick = HZ/10;
+ } else {
+ /* Just put in some reasonable defaults for when beat returns. */
+ outw(CSCR_LinkDownCmd, ioaddr + CSCR);
+ outl(0x20,ioaddr + FIFOTMS); /* Turn on cable test mode. */
+ outl(PARA78_default ,ioaddr + PARA78);
+ outl(PARA7c_default ,ioaddr + PARA7c);
+ np->twistie = 0; /* Bail from future actions. */
+ }
+ } break;
+ case 2: {
+ /* Read how long it took to hear the echo. */
+ int linkcase = inw(ioaddr + CSCR) & CSCR_LinkStatusBits;
+ if (linkcase == 0x7000) np->twist_row = 3;
+ else if (linkcase == 0x3000) np->twist_row = 2;
+ else if (linkcase == 0x1000) np->twist_row = 1;
+ else np->twist_row = 0;
+ np->twist_col = 0;
+ np->twistie = 3; /* Change to state 2. */
+ next_tick = HZ/10;
+ } break;
+ case 3: {
+ /* Put out four tuning parameters, one per 100msec. */
+ if (np->twist_col == 0) outw(0, ioaddr + FIFOTMS);
+ outl(param[(int)np->twist_row][(int)np->twist_col], ioaddr + PARA7c);
+ next_tick = HZ/10;
+ if (++np->twist_col >= 4) {
+ /* For short cables we are done.
+ For long cables (row == 3) check for mistune. */
+ np->twistie = (np->twist_row == 3) ? 4 : 0;
+ }
+ } break;
+ case 4: {
+ /* Special case for long cables: check for mistune. */
+ if ((inw(ioaddr + CSCR) & CSCR_LinkStatusBits) == 0x7000) {
+ np->twistie = 0;
+ break;
+ } else {
+ outl(0xfb38de03, ioaddr + PARA7c);
+ np->twistie = 5;
+ next_tick = HZ/10;
+ }
+ } break;
+ case 5: {
+ /* Retune for shorter cable (column 2). */
+ outl(0x20,ioaddr + FIFOTMS);
+ outl(PARA78_default, ioaddr + PARA78);
+ outl(PARA7c_default, ioaddr + PARA7c);
+ outl(0x00,ioaddr + FIFOTMS);
+ np->twist_row = 2;
+ np->twist_col = 0;
+ np->twistie = 3;
+ next_tick = HZ/10;
+ } break;
+ }
+#endif
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ if (np->drv_flags & HAS_MII_XCVR)
+ printk(KERN_DEBUG"%s: Media selection tick, GP pins %2.2x.\n",
+ dev->name, inb(ioaddr + GPPinData));
+ else
+ printk(KERN_DEBUG"%s: Media selection tick, Link partner %4.4x.\n",
+ dev->name, inw(ioaddr + NWayLPAR));
+ printk(KERN_DEBUG"%s: Other registers are IntMask %4.4x "
+ "IntStatus %4.4x RxStatus %4.4x.\n",
+ dev->name, inw(ioaddr + IntrMask), inw(ioaddr + IntrStatus),
+ (int)inl(ioaddr + RxEarlyStatus));
+ printk(KERN_DEBUG"%s: Chip config %2.2x %2.2x.\n",
+ dev->name, inb(ioaddr + Config0), inb(ioaddr + Config1));
+ }
+
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void rtl8129_tx_timeout(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int status = inw(ioaddr + IntrStatus);
+ int mii_reg, i;
+
+ /* Could be wrapped with if (tp->msg_level & NETIF_MSG_TX_ERR) */
+ printk(KERN_ERR "%s: Transmit timeout, status %2.2x %4.4x "
+ "media %2.2x.\n",
+ dev->name, inb(ioaddr + ChipCmd), status, inb(ioaddr + GPPinData));
+
+ if (status & (TxOK | RxOK)) {
+ printk(KERN_ERR "%s: RTL8139 Interrupt line blocked, status %x.\n",
+ dev->name, status);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outw(0x0000, ioaddr + IntrMask);
+ /* Emit info to figure out what went wrong. */
+ printk(KERN_DEBUG "%s: Tx queue start entry %d dirty entry %d%s.\n",
+ dev->name, tp->cur_tx, tp->dirty_tx, tp->tx_full ? ", full" : "");
+ for (i = 0; i < NUM_TX_DESC; i++)
+ printk(KERN_DEBUG "%s: Tx descriptor %d is %8.8x.%s\n",
+ dev->name, i, (int)inl(ioaddr + TxStatus0 + i*4),
+ i == tp->dirty_tx % NUM_TX_DESC ? " (queue head)" : "");
+ printk(KERN_DEBUG "%s: MII #%d registers are:", dev->name, tp->phys[0]);
+ for (mii_reg = 0; mii_reg < 8; mii_reg++)
+ printk(" %4.4x", mdio_read(dev, tp->phys[0], mii_reg));
+ printk(".\n");
+
+ /* Stop a shared interrupt from scavenging while we are. */
+ tp->dirty_tx = tp->cur_tx = 0;
+ /* Dump the unsent Tx packets. */
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ if (tp->tx_skbuff[i]) {
+ dev_free_skb(tp->tx_skbuff[i]);
+ tp->tx_skbuff[i] = 0;
+ tp->stats.tx_dropped++;
+ }
+ }
+ rtl_hw_start(dev);
+ netif_unpause_tx_queue(dev);
+ tp->tx_full = 0;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+rtl8129_init_ring(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int i;
+
+ tp->tx_full = 0;
+ tp->dirty_tx = tp->cur_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ tp->tx_skbuff[i] = 0;
+ tp->tx_buf[i] = &tp->tx_bufs[i*TX_BUF_SIZE];
+ }
+}
+
+static int
+rtl8129_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int entry;
+
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ rtl8129_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % NUM_TX_DESC;
+
+ tp->tx_skbuff[entry] = skb;
+ if ((long)skb->data & 3) { /* Must use alignment buffer. */
+ memcpy(tp->tx_buf[entry], skb->data, skb->len);
+ outl(virt_to_bus(tp->tx_buf[entry]), ioaddr + TxAddr0 + entry*4);
+ } else
+ outl(virt_to_bus(skb->data), ioaddr + TxAddr0 + entry*4);
+ /* Note: the chip doesn't have auto-pad! */
+ outl(tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN),
+ ioaddr + TxStatus0 + entry*4);
+
+ /* There is a race condition here -- we might read dirty_tx, take an
+ interrupt that clears the Tx queue, and only then set tx_full.
+ So we do this in two phases. */
+ if (++tp->cur_tx - tp->dirty_tx >= NUM_TX_DESC) {
+ set_bit(0, &tp->tx_full);
+ if (tp->cur_tx - (volatile unsigned int)tp->dirty_tx < NUM_TX_DESC) {
+ clear_bit(0, &tp->tx_full);
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev);
+
+ dev->trans_start = jiffies;
+ if (tp->msg_level & NETIF_MSG_TX_QUEUED)
+ printk(KERN_DEBUG"%s: Queued Tx packet at %p size %d to slot %d.\n",
+ dev->name, skb->data, (int)skb->len, entry);
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+ struct rtl8129_private *tp = np;
+ int boguscnt = np->max_interrupt_work;
+ long ioaddr = dev->base_addr;
+ int link_changed = 0; /* Grrr, avoid bogus "uninitialized" warning */
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x20123
+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#endif
+
+ do {
+ int status = inw(ioaddr + IntrStatus);
+ /* Acknowledge all of the current interrupt sources ASAP, but
+ an first get an additional status bit from CSCR. */
+ if (status & RxUnderrun)
+ link_changed = inw(ioaddr+CSCR) & CSCR_LinkChangeBit;
+ outw(status, ioaddr + IntrStatus);
+
+ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG"%s: interrupt status=%#4.4x new intstat=%#4.4x.\n",
+ dev->name, status, inw(ioaddr + IntrStatus));
+
+ if ((status & (PCIErr|PCSTimeout|RxUnderrun|RxOverflow|RxFIFOOver
+ |TxErr|TxOK|RxErr|RxOK)) == 0)
+ break;
+
+ if (status & (RxOK|RxUnderrun|RxOverflow|RxFIFOOver))/* Rx interrupt */
+ rtl8129_rx(dev);
+
+ if (status & (TxOK | TxErr)) {
+ unsigned int dirty_tx = tp->dirty_tx;
+
+ while (tp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % NUM_TX_DESC;
+ int txstatus = inl(ioaddr + TxStatus0 + entry*4);
+
+ if ( ! (txstatus & (TxStatOK | TxUnderrun | TxAborted)))
+ break; /* It still hasn't been Txed */
+
+ /* Note: TxCarrierLost is always asserted at 100mbps. */
+ if (txstatus & (TxOutOfWindow | TxAborted)) {
+ /* There was an major error, log it. */
+ if (tp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_NOTICE"%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+ tp->stats.tx_errors++;
+ if (txstatus&TxAborted) {
+ tp->stats.tx_aborted_errors++;
+ outl(TX_DMA_BURST << 8, ioaddr + TxConfig);
+ }
+ if (txstatus&TxCarrierLost) tp->stats.tx_carrier_errors++;
+ if (txstatus&TxOutOfWindow) tp->stats.tx_window_errors++;
+#ifdef ETHER_STATS
+ if ((txstatus & 0x0f000000) == 0x0f000000)
+ tp->stats.collisions16++;
+#endif
+ } else {
+ if (tp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status"
+ " %8.8x.\n", dev->name, txstatus);
+ if (txstatus & TxUnderrun) {
+ /* Add 64 to the Tx FIFO threshold. */
+ if (tp->tx_flag < 0x00300000)
+ tp->tx_flag += 0x00020000;
+ tp->stats.tx_fifo_errors++;
+ }
+ tp->stats.collisions += (txstatus >> 24) & 15;
+#if LINUX_VERSION_CODE > 0x20119
+ tp->stats.tx_bytes += txstatus & 0x7ff;
+#endif
+ tp->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dev_free_skb_irq(tp->tx_skbuff[entry]);
+ tp->tx_skbuff[entry] = 0;
+ if (test_bit(0, &tp->tx_full)) {
+ /* The ring is no longer full, clear tbusy. */
+ clear_bit(0, &tp->tx_full);
+ netif_resume_tx_queue(dev);
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
+ printk(KERN_ERR"%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, tp->cur_tx, (int)tp->tx_full);
+ dirty_tx += NUM_TX_DESC;
+ }
+#endif
+ tp->dirty_tx = dirty_tx;
+ }
+
+ /* Check uncommon events with one test. */
+ if (status & (PCIErr|PCSTimeout |RxUnderrun|RxOverflow|RxFIFOOver
+ |TxErr|RxErr)) {
+ if (status == 0xffff) /* Missing chip! */
+ break;
+ rtl_error(dev, status, link_changed);
+ }
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING"%s: Too much work at interrupt, "
+ "IntrStatus=0x%4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+ outw(0xffff, ioaddr + IntrStatus);
+ break;
+ }
+ } while (1);
+
+ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG"%s: exiting interrupt, intr_status=%#4.4x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x20123
+ clear_bit(0, (void*)&dev->interrupt);
+#endif
+ return;
+}
+
+/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
+ field alignments and semantics. */
+static int rtl8129_rx(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ unsigned char *rx_ring = tp->rx_ring;
+ u16 cur_rx = tp->cur_rx;
+
+ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG"%s: In rtl8129_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n",
+ dev->name, cur_rx, inw(ioaddr + RxBufAddr),
+ inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
+
+ while ((inb(ioaddr + ChipCmd) & RxBufEmpty) == 0) {
+ int ring_offset = cur_rx % tp->rx_buf_len;
+ u32 rx_status = le32_to_cpu(*(u32*)(rx_ring + ring_offset));
+ int rx_size = rx_status >> 16; /* Includes the CRC. */
+
+ if (tp->msg_level & NETIF_MSG_RX_STATUS) {
+ int i;
+ printk(KERN_DEBUG"%s: rtl8129_rx() status %4.4x, size %4.4x,"
+ " cur %4.4x.\n",
+ dev->name, rx_status, rx_size, cur_rx);
+ printk(KERN_DEBUG"%s: Frame contents ", dev->name);
+ for (i = 0; i < 70; i++)
+ printk(" %2.2x", rx_ring[ring_offset + i]);
+ printk(".\n");
+ }
+ if (rx_status & (RxBadSymbol|RxRunt|RxTooLong|RxCRCErr|RxBadAlign)) {
+ if (tp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG"%s: Ethernet frame had errors,"
+ " status %8.8x.\n", dev->name, rx_status);
+ if (rx_status == 0xffffffff) {
+ printk(KERN_NOTICE"%s: Invalid receive status at ring "
+ "offset %4.4x\n", dev->name, ring_offset);
+ rx_status = 0;
+ }
+ if (rx_status & RxTooLong) {
+ if (tp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_NOTICE"%s: Oversized Ethernet frame, status"
+ " %4.4x!\n",
+ dev->name, rx_status);
+ /* A.C.: The chip hangs here.
+ This should never occur, which means that we are screwed
+ when it does.
+ */
+ }
+ tp->stats.rx_errors++;
+ if (rx_status & (RxBadSymbol|RxBadAlign))
+ tp->stats.rx_frame_errors++;
+ if (rx_status & (RxRunt|RxTooLong)) tp->stats.rx_length_errors++;
+ if (rx_status & RxCRCErr) tp->stats.rx_crc_errors++;
+ /* Reset the receiver, based on RealTek recommendation. (Bug?) */
+ tp->cur_rx = 0;
+ outb(CmdTxEnb, ioaddr + ChipCmd);
+ /* A.C.: Reset the multicast list. */
+ set_rx_mode(dev);
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+ struct sk_buff *skb;
+ int pkt_size = rx_size - 4;
+
+ /* Allocate a common-sized skbuff if we are close. */
+ skb = dev_alloc_skb(1400 < pkt_size && pkt_size < PKT_BUF_SZ-2 ?
+ PKT_BUF_SZ : pkt_size + 2);
+ if (skb == NULL) {
+ printk(KERN_WARNING"%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ /* We should check that some rx space is free.
+ If not, free one and mark stats->rx_dropped++. */
+ tp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP fields. */
+ if (ring_offset + rx_size > tp->rx_buf_len) {
+ int semi_count = tp->rx_buf_len - ring_offset - 4;
+ /* This could presumably use two calls to copy_and_sum()? */
+ memcpy(skb_put(skb, semi_count), &rx_ring[ring_offset + 4],
+ semi_count);
+ memcpy(skb_put(skb, pkt_size-semi_count), rx_ring,
+ pkt_size-semi_count);
+ if (tp->msg_level & NETIF_MSG_PKTDATA) {
+ int i;
+ printk(KERN_DEBUG"%s: Frame wrap @%d",
+ dev->name, semi_count);
+ for (i = 0; i < 16; i++)
+ printk(" %2.2x", rx_ring[i]);
+ printk(".\n");
+ memset(rx_ring, 0xcc, 16);
+ }
+ } else {
+ eth_copy_and_sum(skb, &rx_ring[ring_offset + 4],
+ pkt_size, 0);
+ skb_put(skb, pkt_size);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+#if LINUX_VERSION_CODE > 0x20119
+ tp->stats.rx_bytes += pkt_size;
+#endif
+ tp->stats.rx_packets++;
+ }
+
+ cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
+ outw(cur_rx - 16, ioaddr + RxBufPtr);
+ }
+ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG"%s: Done rtl8129_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n",
+ dev->name, cur_rx, inw(ioaddr + RxBufAddr),
+ inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
+ tp->cur_rx = cur_rx;
+ return 0;
+}
+
+/* Error and abnormal or uncommon events handlers. */
+static void rtl_error(struct net_device *dev, int status, int link_changed)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE"%s: Abnormal interrupt, status %8.8x.\n",
+ dev->name, status);
+
+ /* Update the error count. */
+ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+
+ if (status & RxUnderrun){
+ /* This might actually be a link change event. */
+ if ((tp->drv_flags & HAS_LNK_CHNG) && link_changed) {
+ /* Really link-change on new chips. */
+ int lpar = inw(ioaddr + NWayLPAR);
+ int duplex = (lpar&0x0100) || (lpar & 0x01C0) == 0x0040
+ || tp->duplex_lock;
+ /* Do not use MII_BMSR as that clears sticky bit. */
+ if (inw(ioaddr + GPPinData) & 0x0004) {
+ netif_link_down(dev);
+ } else
+ netif_link_up(dev);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Link changed, link partner "
+ "%4.4x new duplex %d.\n",
+ dev->name, lpar, duplex);
+ tp->full_duplex = duplex;
+ /* Only count as errors with no link change. */
+ status &= ~RxUnderrun;
+ } else {
+ /* If this does not work, we will do rtl_hw_start(dev); */
+ outb(CmdTxEnb, ioaddr + ChipCmd);
+ set_rx_mode(dev); /* Reset the multicast list. */
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+
+ tp->stats.rx_errors++;
+ tp->stats.rx_fifo_errors++;
+ }
+ }
+
+ if (status & (RxOverflow | RxErr | RxFIFOOver)) tp->stats.rx_errors++;
+ if (status & (PCSTimeout)) tp->stats.rx_length_errors++;
+ if (status & RxFIFOOver) tp->stats.rx_fifo_errors++;
+ if (status & RxOverflow) {
+ tp->stats.rx_over_errors++;
+ tp->cur_rx = inw(ioaddr + RxBufAddr) % tp->rx_buf_len;
+ outw(tp->cur_rx - 16, ioaddr + RxBufPtr);
+ }
+ if (status & PCIErr) {
+ u32 pci_cmd_status;
+ pci_read_config_dword(tp->pci_dev, PCI_COMMAND, &pci_cmd_status);
+
+ printk(KERN_ERR "%s: PCI Bus error %4.4x.\n",
+ dev->name, pci_cmd_status);
+ }
+}
+
+static int
+rtl8129_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (tp->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG"%s: Shutting down ethercard, status was 0x%4.4x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outw(0x0000, ioaddr + IntrMask);
+
+ /* Stop the chip's Tx and Rx DMA processes. */
+ outb(0x00, ioaddr + ChipCmd);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+
+ del_timer(&tp->timer);
+
+ free_irq(dev->irq, dev);
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ if (tp->tx_skbuff[i])
+ dev_free_skb(tp->tx_skbuff[i]);
+ tp->tx_skbuff[i] = 0;
+ }
+ kfree(tp->rx_ring);
+ tp->rx_ring = 0;
+
+ /* Green! Put the chip in low-power mode. */
+ outb(0xC0, ioaddr + Cfg9346);
+ outb(tp->config1 | 0x03, ioaddr + Config1);
+ outb('H', ioaddr + HltClk); /* 'R' would leave the clock running. */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/*
+ Handle user-level ioctl() calls.
+ We must use two numeric constants as the key because some clueless person
+ changed value for the symbolic name.
+*/
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x3f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0], data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ }
+ mdio_write(dev, data[0], data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = 0; /* No rx_copybreak, always copy. */
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct net_device_stats *
+rtl8129_get_stats(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (netif_running(dev)) {
+ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+ }
+
+ return &tp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This routine is not state sensitive and need not be SMP locked. */
+
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while (--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+/* Bits in RxConfig. */
+enum rx_mode_bits {
+ AcceptErr=0x20, AcceptRunt=0x10, AcceptBroadcast=0x08,
+ AcceptMulticast=0x04, AcceptMyPhys=0x02, AcceptAllPhys=0x01,
+};
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int i, rx_mode;
+
+ if (tp->msg_level & NETIF_MSG_RXFILTER)
+ printk(KERN_DEBUG"%s: set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
+ dev->name, dev->flags, (int)inl(ioaddr + RxConfig));
+
+ /* Note: do not reorder, GCC is clever about common statements. */
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE"%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = AcceptBroadcast|AcceptMulticast|AcceptMyPhys|AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((dev->mc_count > tp->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct dev_mc_list *mclist;
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26, mc_filter);
+ }
+ /* We can safely update without stopping the chip. */
+ outl(tp->rx_config | rx_mode, ioaddr + RxConfig);
+ tp->mc_filter[0] = mc_filter[0];
+ tp->mc_filter[1] = mc_filter[1];
+ outl(mc_filter[0], ioaddr + MAR0 + 0);
+ outl(mc_filter[1], ioaddr + MAR0 + 4);
+ return;
+}
+
+
+static int rtl_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk("%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ netif_device_detach(dev);
+ /* Disable interrupts, stop Tx and Rx. */
+ outw(0x0000, ioaddr + IntrMask);
+ outb(0x00, ioaddr + ChipCmd);
+ /* Update the error counts. */
+ np->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+ break;
+ case DRV_RESUME:
+ netif_device_attach(dev);
+ rtl_hw_start(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_rtl8129_dev; *devp; devp = next) {
+ next = &((struct rtl8129_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CARDBUS
+
+#include <pcmcia/driver_ops.h>
+
+static dev_node_t *rtl8139_attach(dev_locator_t *loc)
+{
+ struct net_device *dev;
+ u16 dev_id;
+ u32 pciaddr;
+ u8 bus, devfn, irq;
+ long hostaddr;
+ /* Note: the chip index should match the 8139B pci_tbl[] entry. */
+ int chip_idx = 2;
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ printk(KERN_DEBUG "rtl8139_attach(bus %d, function %d)\n", bus, devfn);
+#ifdef USE_IO_OPS
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &pciaddr);
+ hostaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+#else
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &pciaddr);
+ hostaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_tbl[chip_idx].io_size);
+#endif
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+ pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &dev_id);
+ if (hostaddr == 0 || irq == 0) {
+ printk(KERN_ERR "The %s interface at %d/%d was not assigned an %s.\n"
+ KERN_ERR " It will not be activated.\n",
+ pci_tbl[chip_idx].name, bus, devfn,
+ hostaddr == 0 ? "address" : "IRQ");
+ return NULL;
+ }
+ dev = rtl8139_probe1(pci_find_slot(bus, devfn), NULL,
+ hostaddr, irq, chip_idx, 0);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+ node->major = node->minor = 0;
+ node->next = NULL;
+ MOD_INC_USE_COUNT;
+ return node;
+ }
+ return NULL;
+}
+
+static void rtl8139_detach(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "rtl8139_detach(%s)\n", node->dev_name);
+ for (devp = &root_rtl8129_dev; *devp; devp = next) {
+ next = &((struct rtl8129_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ struct rtl8129_private *np =
+ (struct rtl8129_private *)(*devp)->priv;
+ unregister_netdev(*devp);
+ release_region((*devp)->base_addr, pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)(*devp)->base_addr);
+#endif
+ kfree(*devp);
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ *devp = *next;
+ kfree(node);
+ MOD_DEC_USE_COUNT;
+ }
+}
+
+struct driver_operations realtek_ops = {
+ "realtek_cb",
+ rtl8139_attach, /*rtl8139_suspend*/0, /*rtl8139_resume*/0, rtl8139_detach
+};
+
+#endif /* Cardbus support */
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+#ifdef CARDBUS
+ register_driver(&realtek_ops);
+ return 0;
+#else
+ return pci_drv_register(&rtl8139_drv_id, NULL);
+#endif
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&realtek_ops);
+#else
+ pci_drv_unregister(&rtl8139_drv_id);
+#endif
+
+ while (root_rtl8129_dev) {
+ struct rtl8129_private *np = (void *)(root_rtl8129_dev->priv);
+ unregister_netdev(root_rtl8129_dev);
+ release_region(root_rtl8129_dev->base_addr,
+ pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)(root_rtl8129_dev->base_addr));
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_rtl8129_dev);
+ root_rtl8129_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` rtl8139.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c rtl8139.c"
+ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c rtl8139.c -o realtek_cb.o -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/seeq8005.c b/linux/src/drivers/net/seeq8005.c
new file mode 100644
index 0000000..4adebde
--- /dev/null
+++ b/linux/src/drivers/net/seeq8005.c
@@ -0,0 +1,760 @@
+/* seeq8005.c: A network driver for linux. */
+/*
+ Based on skeleton.c,
+ Written 1993-94 by Donald Becker.
+ See the skeleton.c file for further copyright information.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as hamish@zot.apana.org.au
+
+ This file is a network device driver for the SEEQ 8005 chipset and
+ the Linux operating system.
+
+*/
+
+static const char *version =
+ "seeq8005.c:v1.00 8/07/95 Hamish Coleman (hamish@zot.apana.org.au)\n";
+
+/*
+ Sources:
+ SEEQ 8005 databook
+
+ Version history:
+ 1.00 Public release. cosmetic changes (no warnings now)
+ 0.68 Turning per- packet,interrupt debug messages off - testing for release.
+ 0.67 timing problems/bad buffer reads seem to be fixed now
+ 0.63 *!@$ protocol=eth_type_trans -- now packets flow
+ 0.56 Send working
+ 0.48 Receive working
+*/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include "seeq8005.h"
+
+/* First, a few definitions that the brave might change. */
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int seeq8005_portlist[] =
+ { 0x300, 0x320, 0x340, 0x360, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */
+ long open_time; /* Useless example local info. */
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+#define SA_ADDR0 0x00
+#define SA_ADDR1 0x80
+#define SA_ADDR2 0x4b
+
+/* Index to functions, as function prototypes. */
+
+extern int seeq8005_probe(struct device *dev);
+
+static int seeq8005_probe1(struct device *dev, int ioaddr);
+static int seeq8005_open(struct device *dev);
+static int seeq8005_send_packet(struct sk_buff *skb, struct device *dev);
+static void seeq8005_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void seeq8005_rx(struct device *dev);
+static int seeq8005_close(struct device *dev);
+static struct enet_statistics *seeq8005_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+/* Example routines you must write ;->. */
+#define tx_done(dev) (inw(SEEQ_STATUS) & SEEQSTAT_TX_ON)
+extern void hardware_send_packet(struct device *dev, char *buf, int length);
+extern void seeq8005_init(struct device *dev, int startp);
+inline void wait_for_buffer(struct device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry seeq8005_drv =
+{"seeq8005", seeq8005_probe1, SEEQ8005_IO_EXTENT, seeq8005_portlist};
+#else
+int
+seeq8005_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return seeq8005_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; seeq8005_portlist[i]; i++) {
+ int ioaddr = seeq8005_portlist[i];
+ if (check_region(ioaddr, SEEQ8005_IO_EXTENT))
+ continue;
+ if (seeq8005_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probes avoids doing writes, and
+ verifies that the correct device exists and functions. */
+
+static int seeq8005_probe1(struct device *dev, int ioaddr)
+{
+ static unsigned version_printed = 0;
+ int i,j;
+ unsigned char SA_prom[32];
+ int old_cfg1;
+ int old_cfg2;
+ int old_stat;
+ int old_dmaar;
+ int old_rear;
+
+ if (net_debug>1)
+ printk("seeq8005: probing at 0x%x\n",ioaddr);
+
+ old_stat = inw(SEEQ_STATUS); /* read status register */
+ if (old_stat == 0xffff)
+ return ENODEV; /* assume that 0xffff == no device */
+ if ( (old_stat & 0x1800) != 0x1800 ) { /* assume that unused bits are 1, as my manual says */
+ if (net_debug>1) {
+ printk("seeq8005: reserved stat bits != 0x1800\n");
+ printk(" == 0x%04x\n",old_stat);
+ }
+ return ENODEV;
+ }
+
+ old_rear = inw(SEEQ_REA);
+ if (old_rear == 0xffff) {
+ outw(0,SEEQ_REA);
+ if (inw(SEEQ_REA) == 0xffff) { /* assume that 0xffff == no device */
+ return ENODEV;
+ }
+ } else if ((old_rear & 0xff00) != 0xff00) { /* assume that unused bits are 1 */
+ if (net_debug>1) {
+ printk("seeq8005: unused rear bits != 0xff00\n");
+ printk(" == 0x%04x\n",old_rear);
+ }
+ return ENODEV;
+ }
+
+ old_cfg2 = inw(SEEQ_CFG2); /* read CFG2 register */
+ old_cfg1 = inw(SEEQ_CFG1);
+ old_dmaar = inw(SEEQ_DMAAR);
+
+ if (net_debug>4) {
+ printk("seeq8005: stat = 0x%04x\n",old_stat);
+ printk("seeq8005: cfg1 = 0x%04x\n",old_cfg1);
+ printk("seeq8005: cfg2 = 0x%04x\n",old_cfg2);
+ printk("seeq8005: raer = 0x%04x\n",old_rear);
+ printk("seeq8005: dmaar= 0x%04x\n",old_dmaar);
+ }
+
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); /* setup for reading PROM */
+ outw( 0, SEEQ_DMAAR); /* set starting PROM address */
+ outw( SEEQCFG1_BUFFER_PROM, SEEQ_CFG1); /* set buffer to look at PROM */
+
+
+ j=0;
+ for(i=0; i <32; i++) {
+ j+= SA_prom[i] = inw(SEEQ_BUFFER) & 0xff;
+ }
+
+#if 0
+ /* untested because I only have the one card */
+ if ( (j&0xff) != 0 ) { /* checksum appears to be 8bit = 0 */
+ if (net_debug>1) { /* check this before deciding that we have a card */
+ printk("seeq8005: prom sum error\n");
+ }
+ outw( old_stat, SEEQ_STATUS);
+ outw( old_dmaar, SEEQ_DMAAR);
+ outw( old_cfg1, SEEQ_CFG1);
+ return ENODEV;
+ }
+#endif
+
+ outw( SEEQCFG2_RESET, SEEQ_CFG2); /* reset the card */
+ SLOW_DOWN_IO; /* have to wait 4us after a reset - should be fixed */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+
+ if (net_debug) {
+ printk("seeq8005: prom sum = 0x%08x\n",j);
+ for(j=0; j<32; j+=16) {
+ printk("seeq8005: prom %02x: ",j);
+ for(i=0;i<16;i++) {
+ printk("%02x ",SA_prom[j|i]);
+ }
+ printk(" ");
+ for(i=0;i<16;i++) {
+ if ((SA_prom[j|i]>31)&&(SA_prom[j|i]<127)) {
+ printk("%c", SA_prom[j|i]);
+ } else {
+ printk(" ");
+ }
+ }
+ printk("\n");
+ }
+ }
+
+#if 0
+ /*
+ * testing the packet buffer memory doesn't work yet
+ * but all other buffer accesses do
+ * - fixing is not a priority
+ */
+ if (net_debug>1) { /* test packet buffer memory */
+ printk("seeq8005: testing packet buffer ... ");
+ outw( SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0 , SEEQ_DMAAR);
+ for(i=0;i<32768;i++) {
+ outw(0x5a5a, SEEQ_BUFFER);
+ }
+ j=jiffies+HZ;
+ while ( ((inw(SEEQ_STATUS) & SEEQSTAT_FIFO_EMPTY) != SEEQSTAT_FIFO_EMPTY) && jiffies < j )
+ mb();
+ outw( 0 , SEEQ_DMAAR);
+ while ( ((inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && jiffies < j+HZ)
+ mb();
+ if ( (inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (inw(SEEQ_STATUS)& SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ j=0;
+ for(i=0;i<32768;i++) {
+ if (inw(SEEQ_BUFFER) != 0x5a5a)
+ j++;
+ }
+ if (j) {
+ printk("%i\n",j);
+ } else {
+ printk("ok.\n");
+ }
+ }
+#endif
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ if (net_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("%s: %s found at %#3x, ", dev->name, "seeq8005", ioaddr);
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = SA_prom[i+6]);
+
+ if (dev->irq == 0xff)
+ ; /* Do nothing: a user-level program will set it. */
+ else if (dev->irq < 2) { /* "Auto-IRQ" */
+ autoirq_setup(0);
+
+ outw( SEEQCMD_RX_INT_EN | SEEQCMD_SET_RX_ON | SEEQCMD_SET_RX_OFF, SEEQ_CMD );
+
+ dev->irq = autoirq_report(0);
+
+ if (net_debug >= 2)
+ printk(" autoirq is %d\n", dev->irq);
+ } else if (dev->irq == 2)
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ * or don't know which one to set.
+ */
+ dev->irq = 9;
+
+#if 0
+ {
+ int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", NULL);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+ return EAGAIN;
+ }
+ }
+#endif
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ request_region(ioaddr, SEEQ8005_IO_EXTENT,"seeq8005");
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = seeq8005_open;
+ dev->stop = seeq8005_close;
+ dev->hard_start_xmit = seeq8005_send_packet;
+ dev->get_stats = seeq8005_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ dev->flags &= ~IFF_MULTICAST;
+
+ return 0;
+}
+
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+static int
+seeq8005_open(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ {
+ int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", NULL);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+ return EAGAIN;
+ }
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Reset the hardware here. Don't forget to set the station address. */
+ seeq8005_init(dev, 1);
+
+ lp->open_time = jiffies;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ return 0;
+}
+
+static int
+seeq8005_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ printk("%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
+ /* Try to restart the adaptor. */
+ seeq8005_init(dev, 1);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* You might need to clean up and record Tx statistics here. */
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+seeq8005_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if (dev->interrupt)
+ printk ("%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ status = inw(SEEQ_STATUS);
+ do {
+ if (net_debug >2) {
+ printk("%s: int, status=0x%04x\n",dev->name,status);
+ }
+
+ if (status & SEEQSTAT_WINDOW_INT) {
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ if (net_debug) {
+ printk("%s: window int!\n",dev->name);
+ }
+ }
+ if (status & SEEQSTAT_TX_INT) {
+ outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ lp->stats.tx_packets++;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ if (status & SEEQSTAT_RX_INT) {
+ /* Got a packet(s). */
+ seeq8005_rx(dev);
+ }
+ status = inw(SEEQ_STATUS);
+ } while ( (++boguscount < 10) && (status & SEEQSTAT_ANY_INT)) ;
+
+ if(net_debug>2) {
+ printk("%s: eoi\n",dev->name);
+ }
+ dev->interrupt = 0;
+ return;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+seeq8005_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int boguscount = 10;
+ int pkt_hdr;
+ int ioaddr = dev->base_addr;
+
+ do {
+ int next_packet;
+ int pkt_len;
+ int i;
+ int status;
+
+ status = inw(SEEQ_STATUS);
+ outw( lp->receive_ptr, SEEQ_DMAAR);
+ outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ wait_for_buffer(dev);
+ next_packet = ntohs(inw(SEEQ_BUFFER));
+ pkt_hdr = inw(SEEQ_BUFFER);
+
+ if (net_debug>2) {
+ printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr);
+ }
+
+ if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) { /* Read all the frames? */
+ return; /* Done for now */
+ }
+
+ if ((pkt_hdr & SEEQPKTS_DONE)==0)
+ break;
+
+ if (next_packet < lp->receive_ptr) {
+ pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4;
+ } else {
+ pkt_len = next_packet - lp->receive_ptr - 4;
+ }
+
+ if (next_packet < ((DEFAULT_TEA+1)<<8)) { /* is the next_packet address sane? */
+ printk("%s: recv packet ring corrupt, resetting board\n",dev->name);
+ seeq8005_init(dev,1);
+ return;
+ }
+
+ lp->receive_ptr = next_packet;
+
+ if (net_debug>2) {
+ printk("%s: recv len=0x%04x\n",dev->name,pkt_len);
+ }
+
+ if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (pkt_hdr & SEEQPKTS_SHORT) lp->stats.rx_frame_errors++;
+ if (pkt_hdr & SEEQPKTS_DRIB) lp->stats.rx_frame_errors++;
+ if (pkt_hdr & SEEQPKTS_OVERSIZE) lp->stats.rx_over_errors++;
+ if (pkt_hdr & SEEQPKTS_CRC_ERR) lp->stats.rx_crc_errors++;
+ /* skip over this packet */
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA);
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+ unsigned char *buf;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* align data on 16 byte */
+ buf = skb_put(skb,pkt_len);
+
+ insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1);
+
+ if (net_debug>2) {
+ char * p = buf;
+ printk("%s: recv ",dev->name);
+ for(i=0;i<14;i++) {
+ printk("%02x ",*(p++)&0xff);
+ }
+ printk("\n");
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN));
+
+ /* If any worth-while packets have been received, netif_rx()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int
+seeq8005_close(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ lp->open_time = 0;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Flush the Tx and disable Rx here. */
+ outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+
+ free_irq(dev->irq, NULL);
+
+ irq2dev_map[dev->irq] = 0;
+
+ /* Update the statistics here. */
+
+ return 0;
+
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+seeq8005_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void
+set_multicast_list(struct device *dev)
+{
+/*
+ * I _could_ do up to 6 addresses here, but won't (yet?)
+ */
+
+#if 0
+ int ioaddr = dev->base_addr;
+/*
+ * hmm, not even sure if my matching works _anyway_ - seem to be receiving
+ * _everything_ . . .
+ */
+
+ if (num_addrs) { /* Enable promiscuous mode */
+ outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_ALL, SEEQ_CFG1);
+ dev->flags|=IFF_PROMISC;
+ } else { /* Disable promiscuous mode, use normal mode */
+ outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_BROAD, SEEQ_CFG1);
+ }
+#endif
+}
+
+void seeq8005_init(struct device *dev, int startp)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ outw(SEEQCFG2_RESET, SEEQ_CFG2); /* reset device */
+ SLOW_DOWN_IO; /* have to wait 4us after a reset - should be fixed */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0, SEEQ_DMAAR); /* load start address into both low and high byte */
+/* wait_for_buffer(dev); */ /* I think that you only need a wait for memory buffer */
+ outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
+
+ for(i=0;i<6;i++) { /* set Station address */
+ outb(dev->dev_addr[i], SEEQ_BUFFER);
+ SLOW_DOWN_IO;
+ }
+
+ outw( SEEQCFG1_BUFFER_TEA, SEEQ_CFG1); /* set xmit end area pointer to 16K */
+ outb( DEFAULT_TEA, SEEQ_BUFFER); /* this gives us 16K of send buffer and 48K of recv buffer */
+
+ lp->receive_ptr = (DEFAULT_TEA+1)<<8; /* so we can find our packet_header */
+ outw( lp->receive_ptr, SEEQ_RPR); /* Receive Pointer Register is set to recv buffer memory */
+
+ outw( 0x00ff, SEEQ_REA); /* Receive Area End */
+
+ if (net_debug>4) {
+ printk("%s: SA0 = ",dev->name);
+
+ outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0, SEEQ_DMAAR);
+ outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
+
+ for(i=0;i<6;i++) {
+ printk("%02x ",inb(SEEQ_BUFFER));
+ }
+ printk("\n");
+ }
+
+ outw( SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD | SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
+ outw( SEEQCFG2_AUTO_REA | SEEQCFG2_CTRLO, SEEQ_CFG2);
+ outw( SEEQCMD_SET_RX_ON | SEEQCMD_TX_INT_EN | SEEQCMD_RX_INT_EN, SEEQ_CMD);
+
+ if (net_debug>4) {
+ int old_cfg1;
+ old_cfg1 = inw(SEEQ_CFG1);
+ printk("%s: stat = 0x%04x\n",dev->name,inw(SEEQ_STATUS));
+ printk("%s: cfg1 = 0x%04x\n",dev->name,old_cfg1);
+ printk("%s: cfg2 = 0x%04x\n",dev->name,inw(SEEQ_CFG2));
+ printk("%s: raer = 0x%04x\n",dev->name,inw(SEEQ_REA));
+ printk("%s: dmaar= 0x%04x\n",dev->name,inw(SEEQ_DMAAR));
+
+ }
+}
+
+
+void hardware_send_packet(struct device * dev, char *buf, int length)
+{
+ int ioaddr = dev->base_addr;
+ int status = inw(SEEQ_STATUS);
+ int transmit_ptr = 0;
+ int tmp;
+
+ if (net_debug>4) {
+ printk("%s: send 0x%04x\n",dev->name,length);
+ }
+
+ /* Set FIFO to writemode and set packet-buffer address */
+ outw( SEEQCMD_FIFO_WRITE | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( transmit_ptr, SEEQ_DMAAR);
+
+ /* output SEEQ Packet header barfage */
+ outw( htons(length + 4), SEEQ_BUFFER);
+ outw( SEEQPKTH_XMIT | SEEQPKTH_DATA_FOLLOWS | SEEQPKTH_XMIT_INT_EN, SEEQ_BUFFER );
+
+ /* blat the buffer */
+ outsw( SEEQ_BUFFER, buf, (length +1) >> 1);
+ /* paranoia !! */
+ outw( 0, SEEQ_BUFFER);
+ outw( 0, SEEQ_BUFFER);
+
+ /* set address of start of transmit chain */
+ outw( transmit_ptr, SEEQ_TPR);
+
+ /* drain FIFO */
+ tmp = jiffies;
+ while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && (jiffies < tmp + HZ))
+ mb();
+
+ /* doit ! */
+ outw( SEEQCMD_WINDOW_INT_ACK | SEEQCMD_SET_TX_ON | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+
+}
+
+
+/*
+ * wait_for_buffer
+ *
+ * This routine waits for the SEEQ chip to assert that the FIFO is ready
+ * by checking for a window interrupt, and then clearing it
+ */
+inline void wait_for_buffer(struct device * dev)
+{
+ int ioaddr = dev->base_addr;
+ int tmp;
+ int status;
+
+ tmp = jiffies + HZ;
+ while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && jiffies < tmp)
+ mb();
+
+ if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c skeleton.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/seeq8005.h b/linux/src/drivers/net/seeq8005.h
new file mode 100644
index 0000000..809ba6d
--- /dev/null
+++ b/linux/src/drivers/net/seeq8005.h
@@ -0,0 +1,156 @@
+/*
+ * defines, etc for the seeq8005
+ */
+
+/*
+ * This file is distributed under GPL.
+ *
+ * This style and layout of this file is also copied
+ * from many of the other linux network device drivers.
+ */
+
+/* The number of low I/O ports used by the ethercard. */
+#define SEEQ8005_IO_EXTENT 16
+
+#define SEEQ_B (ioaddr)
+
+#define SEEQ_CMD (SEEQ_B) /* Write only */
+#define SEEQ_STATUS (SEEQ_B) /* Read only */
+#define SEEQ_CFG1 (SEEQ_B + 2)
+#define SEEQ_CFG2 (SEEQ_B + 4)
+#define SEEQ_REA (SEEQ_B + 6) /* Receive End Area Register */
+#define SEEQ_RPR (SEEQ_B + 10) /* Receive Pointer Register */
+#define SEEQ_TPR (SEEQ_B + 12) /* Transmit Pointer Register */
+#define SEEQ_DMAAR (SEEQ_B + 14) /* DMA Address Register */
+#define SEEQ_BUFFER (SEEQ_B + 8) /* Buffer Window Register */
+
+#define DEFAULT_TEA (0x3f)
+
+#define SEEQCMD_DMA_INT_EN (0x0001) /* DMA Interrupt Enable */
+#define SEEQCMD_RX_INT_EN (0x0002) /* Receive Interrupt Enable */
+#define SEEQCMD_TX_INT_EN (0x0004) /* Transmit Interrupt Enable */
+#define SEEQCMD_WINDOW_INT_EN (0x0008) /* What the hell is this for?? */
+#define SEEQCMD_INT_MASK (0x000f)
+
+#define SEEQCMD_DMA_INT_ACK (0x0010) /* DMA ack */
+#define SEEQCMD_RX_INT_ACK (0x0020)
+#define SEEQCMD_TX_INT_ACK (0x0040)
+#define SEEQCMD_WINDOW_INT_ACK (0x0080)
+#define SEEQCMD_ACK_ALL (0x00f0)
+
+#define SEEQCMD_SET_DMA_ON (0x0100) /* Enables DMA Request logic */
+#define SEEQCMD_SET_RX_ON (0x0200) /* Enables Packet RX */
+#define SEEQCMD_SET_TX_ON (0x0400) /* Starts TX run */
+#define SEEQCMD_SET_DMA_OFF (0x0800)
+#define SEEQCMD_SET_RX_OFF (0x1000)
+#define SEEQCMD_SET_TX_OFF (0x2000)
+#define SEEQCMD_SET_ALL_OFF (0x3800) /* set all logic off */
+
+#define SEEQCMD_FIFO_READ (0x4000) /* Set FIFO to read mode (read from Buffer) */
+#define SEEQCMD_FIFO_WRITE (0x8000) /* Set FIFO to write mode */
+
+#define SEEQSTAT_DMA_INT_EN (0x0001) /* Status of interrupt enable */
+#define SEEQSTAT_RX_INT_EN (0x0002)
+#define SEEQSTAT_TX_INT_EN (0x0004)
+#define SEEQSTAT_WINDOW_INT_EN (0x0008)
+
+#define SEEQSTAT_DMA_INT (0x0010) /* Interrupt flagged */
+#define SEEQSTAT_RX_INT (0x0020)
+#define SEEQSTAT_TX_INT (0x0040)
+#define SEEQSTAT_WINDOW_INT (0x0080)
+#define SEEQSTAT_ANY_INT (0x00f0)
+
+#define SEEQSTAT_DMA_ON (0x0100) /* DMA logic on */
+#define SEEQSTAT_RX_ON (0x0200) /* Packet RX on */
+#define SEEQSTAT_TX_ON (0x0400) /* TX running */
+
+#define SEEQSTAT_FIFO_FULL (0x2000)
+#define SEEQSTAT_FIFO_EMPTY (0x4000)
+#define SEEQSTAT_FIFO_DIR (0x8000) /* 1=read, 0=write */
+
+#define SEEQCFG1_BUFFER_MASK (0x000f) /* define what maps into the BUFFER register */
+#define SEEQCFG1_BUFFER_MAC0 (0x0000) /* MAC station addresses 0-5 */
+#define SEEQCFG1_BUFFER_MAC1 (0x0001)
+#define SEEQCFG1_BUFFER_MAC2 (0x0002)
+#define SEEQCFG1_BUFFER_MAC3 (0x0003)
+#define SEEQCFG1_BUFFER_MAC4 (0x0004)
+#define SEEQCFG1_BUFFER_MAC5 (0x0005)
+#define SEEQCFG1_BUFFER_PROM (0x0006) /* The Address/CFG PROM */
+#define SEEQCFG1_BUFFER_TEA (0x0007) /* Transmit end area */
+#define SEEQCFG1_BUFFER_BUFFER (0x0008) /* Packet buffer memory */
+#define SEEQCFG1_BUFFER_INT_VEC (0x0009) /* Interrupt Vector */
+
+#define SEEQCFG1_DMA_INTVL_MASK (0x0030)
+#define SEEQCFG1_DMA_CONT (0x0000)
+#define SEEQCFG1_DMA_800ns (0x0010)
+#define SEEQCFG1_DMA_1600ns (0x0020)
+#define SEEQCFG1_DMA_3200ns (0x0030)
+
+#define SEEQCFG1_DMA_LEN_MASK (0x00c0)
+#define SEEQCFG1_DMA_LEN1 (0x0000)
+#define SEEQCFG1_DMA_LEN2 (0x0040)
+#define SEEQCFG1_DMA_LEN4 (0x0080)
+#define SEEQCFG1_DMA_LEN8 (0x00c0)
+
+#define SEEQCFG1_MAC_MASK (0x3f00) /* Dis/enable bits for MAC addresses */
+#define SEEQCFG1_MAC0_EN (0x0100)
+#define SEEQCFG1_MAC1_EN (0x0200)
+#define SEEQCFG1_MAC2_EN (0x0400)
+#define SEEQCFG1_MAC3_EN (0x0800)
+#define SEEQCFG1_MAC4_EN (0x1000)
+#define SEEQCFG1_MAC5_EN (0x2000)
+
+#define SEEQCFG1_MATCH_MASK (0xc000) /* Packet matching logic cfg bits */
+#define SEEQCFG1_MATCH_SPECIFIC (0x0000) /* only matching MAC addresses */
+#define SEEQCFG1_MATCH_BROAD (0x4000) /* matching and broadcast addresses */
+#define SEEQCFG1_MATCH_MULTI (0x8000) /* matching, broadcast and multicast */
+#define SEEQCFG1_MATCH_ALL (0xc000) /* Promiscuous mode */
+
+#define SEEQCFG1_DEFAULT (SEEQCFG1_BUFFER_BUFFER | SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD)
+
+#define SEEQCFG2_BYTE_SWAP (0x0001) /* 0=Intel byte-order */
+#define SEEQCFG2_AUTO_REA (0x0002) /* if set, Receive End Area will be updated when reading from Buffer */
+
+#define SEEQCFG2_CRC_ERR_EN (0x0008) /* enables receiving of packets with CRC errors */
+#define SEEQCFG2_DRIBBLE_EN (0x0010) /* enables receiving of non-aligned packets */
+#define SEEQCFG2_SHORT_EN (0x0020) /* enables receiving of short packets */
+
+#define SEEQCFG2_SLOTSEL (0x0040) /* 0= standard IEEE802.3, 1= smaller,faster, non-standard */
+#define SEEQCFG2_NO_PREAM (0x0080) /* 1= user supplies Xmit preamble bytes */
+#define SEEQCFG2_ADDR_LEN (0x0100) /* 1= 2byte addresses */
+#define SEEQCFG2_REC_CRC (0x0200) /* 0= received packets will have CRC stripped from them */
+#define SEEQCFG2_XMIT_NO_CRC (0x0400) /* don't xmit CRC with each packet (user supplies it) */
+#define SEEQCFG2_LOOPBACK (0x0800)
+#define SEEQCFG2_CTRLO (0x1000)
+#define SEEQCFG2_RESET (0x8000) /* software Hard-reset bit */
+
+struct seeq_pkt_hdr {
+ unsigned short next; /* address of next packet header */
+ unsigned char babble_int:1, /* enable int on >1514 byte packet */
+ coll_int:1, /* enable int on collision */
+ coll_16_int:1, /* enable int on >15 collision */
+ xmit_int:1, /* enable int on success (or xmit with <15 collision) */
+ unused:1,
+ data_follows:1, /* if not set, process this as a header and pointer only */
+ chain_cont:1, /* if set, more headers in chain only cmd bit valid in recv header */
+ xmit_recv:1; /* if set, a xmit packet, else a receive packet.*/
+ unsigned char status;
+};
+
+#define SEEQPKTH_BAB_INT_EN (0x01) /* xmit only */
+#define SEEQPKTH_COL_INT_EN (0x02) /* xmit only */
+#define SEEQPKTH_COL16_INT_EN (0x04) /* xmit only */
+#define SEEQPKTH_XMIT_INT_EN (0x08) /* xmit only */
+#define SEEQPKTH_DATA_FOLLOWS (0x20) /* supposedly in xmit only */
+#define SEEQPKTH_CHAIN (0x40) /* more headers follow */
+#define SEEQPKTH_XMIT (0x80)
+
+#define SEEQPKTS_BABBLE (0x0100) /* xmit only */
+#define SEEQPKTS_OVERSIZE (0x0100) /* recv only */
+#define SEEQPKTS_COLLISION (0x0200) /* xmit only */
+#define SEEQPKTS_CRC_ERR (0x0200) /* recv only */
+#define SEEQPKTS_COLL16 (0x0400) /* xmit only */
+#define SEEQPKTS_DRIB (0x0400) /* recv only */
+#define SEEQPKTS_SHORT (0x0800) /* recv only */
+#define SEEQPKTS_DONE (0x8000)
+#define SEEQPKTS_ANY_ERROR (0x0f00)
diff --git a/linux/src/drivers/net/sis900.c b/linux/src/drivers/net/sis900.c
new file mode 100644
index 0000000..d9e5f63
--- /dev/null
+++ b/linux/src/drivers/net/sis900.c
@@ -0,0 +1,1803 @@
+/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
+ Copyright 1999 Silicon Integrated System Corporation
+ Revision: 1.06.11 Apr. 30 2002
+
+ Modified from the driver which is originally written by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License (GPL), incorporated herein by reference.
+ Drivers based on this skeleton fall under the GPL and must retain
+ the authorship (implicit copyright) notice.
+
+ References:
+ SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support,
+ preliminary Rev. 1.0 Jan. 14, 1998
+ SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support,
+ preliminary Rev. 1.0 Nov. 10, 1998
+ SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
+ preliminary Rev. 1.0 Jan. 18, 1998
+ http://www.sis.com.tw/support/databook.htm
+
+ Rev 1.06.11 Apr. 25 2002 Mufasa Yang (mufasa@sis.com.tw) added SiS962 support
+ Rev 1.06.10 Dec. 18 2001 Hui-Fen Hsu workaround for EDB & RTL8201 PHY
+ Rev 1.06.09 Sep. 28 2001 Hui-Fen Hsu update for 630ET & workaround for ICS1893 PHY
+ Rev 1.06.08 Mar. 2 2001 Hui-Fen Hsu (hfhsu@sis.com.tw) some bug fix & 635M/B support
+ Rev 1.06.07 Jan. 8 2001 Lei-Chun Chang added RTL8201 PHY support
+ Rev 1.06.06 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support
+ Rev 1.06.05 Aug. 22 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalier workaroung rule
+ Rev 1.06.03 Dec. 23 1999 Ollie Lho Third release
+ Rev 1.06.02 Nov. 23 1999 Ollie Lho bug in mac probing fixed
+ Rev 1.06.01 Nov. 16 1999 Ollie Lho CRC calculation provide by Joseph Zbiciak (im14u2c@primenet.com)
+ Rev 1.06 Nov. 4 1999 Ollie Lho (ollie@sis.com.tw) Second release
+ Rev 1.05.05 Oct. 29 1999 Ollie Lho (ollie@sis.com.tw) Single buffer Tx/Rx
+ Chin-Shan Li (lcs@sis.com.tw) Added AMD Am79c901 HomePNA PHY support
+ Rev 1.05 Aug. 7 1999 Jim Huang (cmhuang@sis.com.tw) Initial release
+*/
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/bios32.h>
+#include <linux/compatmac.h>
+
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <asm/types.h>
+#include "sis900.h"
+
+
+#if LINUX_VERSION_CODE < 0x20159
+#define dev_free_skb(skb) dev_kfree_skb (skb, FREE_WRITE);
+#else /* Grrr, incompatible changes should change the name. */
+#define dev_free_skb(skb) dev_kfree_skb(skb);
+#endif
+
+static const char *version =
+"sis900.c: modified v1.06.11 4/30/2002";
+
+static int max_interrupt_work = 20;
+static int multicast_filter_limit = 128;
+
+#define sis900_debug debug
+static int sis900_debug = 0;
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+
+enum pci_flags_bit {
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+struct mac_chip_info {
+ const char *name;
+ u16 vendor_id, device_id, flags;
+ int io_size;
+ struct device *(*probe) (struct mac_chip_info *mac, long ioaddr, int irq,
+ int pci_index, unsigned char pci_device_fn, unsigned char pci_bus, struct device * net_dev);
+};
+static struct device * sis900_mac_probe (struct mac_chip_info * mac, long ioaddr, int irq,
+ int pci_index, unsigned char pci_device_fn,
+ unsigned char pci_bus, struct device * net_dev);
+static struct mac_chip_info mac_chip_table[] = {
+ { "SiS 900 PCI Fast Ethernet", PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
+ PCI_COMMAND_IO|PCI_COMMAND_MASTER, SIS900_TOTAL_SIZE, sis900_mac_probe},
+ { "SiS 7016 PCI Fast Ethernet",PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
+ PCI_COMMAND_IO|PCI_COMMAND_MASTER, SIS900_TOTAL_SIZE, sis900_mac_probe},
+ {0,}, /* 0 terminatted list. */
+};
+
+static void sis900_read_mode(struct device *net_dev, int *speed, int *duplex);
+
+static struct mii_chip_info {
+ const char * name;
+ u16 phy_id0;
+ u16 phy_id1;
+ u8 phy_types;
+#define HOME 0x0001
+#define LAN 0x0002
+#define MIX 0x0003
+} mii_chip_table[] = {
+ { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
+ { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
+ { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
+ { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
+ { "ICS LAN PHY", 0x0015, 0xF440, LAN },
+ { "NS 83851 PHY", 0x2000, 0x5C20, MIX },
+ { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
+ {0,},
+};
+
+struct mii_phy {
+ struct mii_phy * next;
+ int phy_addr;
+ u16 phy_id0;
+ u16 phy_id1;
+ u16 status;
+ u8 phy_types;
+};
+
+typedef struct _BufferDesc {
+ u32 link;
+ u32 cmdsts;
+ u32 bufptr;
+} BufferDesc;
+
+struct sis900_private {
+ struct device *next_module;
+ struct enet_statistics stats;
+
+ /* struct pci_dev * pci_dev;*/
+ unsigned char pci_bus;
+ unsigned char pci_device_fn;
+ int pci_index;
+
+ struct mac_chip_info * mac;
+ struct mii_phy * mii;
+ struct mii_phy * first_mii; /* record the first mii structure */
+ unsigned int cur_phy;
+
+ struct timer_list timer; /* Link status detection timer. */
+ u8 autong_complete; /* 1: auto-negotiate complete */
+
+ unsigned int cur_rx, dirty_rx; /* producer/comsumer pointers for Tx/Rx ring */
+ unsigned int cur_tx, dirty_tx;
+
+ /* The saved address of a sent/receive-in-place packet buffer */
+ struct sk_buff *tx_skbuff[NUM_TX_DESC];
+ struct sk_buff *rx_skbuff[NUM_RX_DESC];
+ BufferDesc tx_ring[NUM_TX_DESC];
+ BufferDesc rx_ring[NUM_RX_DESC];
+
+ unsigned int tx_full; /* The Tx queue is full. */
+ int LinkOn;
+};
+
+#ifdef MODULE
+#if LINUX_VERSION_CODE > 0x20115
+MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>");
+MODULE_DESCRIPTION("SiS 900 PCI Fast Ethernet driver");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+#endif
+#endif
+
+static int sis900_open(struct device *net_dev);
+static int sis900_mii_probe (unsigned char pci_bus, unsigned char pci_device_fn, struct device * net_dev);
+static void sis900_init_rxfilter (struct device * net_dev);
+static u16 read_eeprom(long ioaddr, int location);
+static u16 mdio_read(struct device *net_dev, int phy_id, int location);
+static void mdio_write(struct device *net_dev, int phy_id, int location, int val);
+static void sis900_timer(unsigned long data);
+static void sis900_check_mode (struct device *net_dev, struct mii_phy *mii_phy);
+static void sis900_tx_timeout(struct device *net_dev);
+static void sis900_init_tx_ring(struct device *net_dev);
+static void sis900_init_rx_ring(struct device *net_dev);
+static int sis900_start_xmit(struct sk_buff *skb, struct device *net_dev);
+static int sis900_rx(struct device *net_dev);
+static void sis900_finish_xmit (struct device *net_dev);
+static void sis900_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int sis900_close(struct device *net_dev);
+static int mii_ioctl(struct device *net_dev, struct ifreq *rq, int cmd);
+static struct enet_statistics *sis900_get_stats(struct device *net_dev);
+static u16 sis900_compute_hashtable_index(u8 *addr, u8 revision);
+static void set_rx_mode(struct device *net_dev);
+static void sis900_reset(struct device *net_dev);
+static void sis630_set_eq(struct device *net_dev, u8 revision);
+static u16 sis900_default_phy(struct device * net_dev);
+static void sis900_set_capability( struct device *net_dev ,struct mii_phy *phy);
+static u16 sis900_reset_phy(struct device *net_dev, int phy_addr);
+static void sis900_auto_negotiate(struct device *net_dev, int phy_addr);
+static void sis900_set_mode (long ioaddr, int speed, int duplex);
+
+/* A list of all installed SiS900 devices, for removing the driver module. */
+static struct device *root_sis900_dev = NULL;
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+ {"sis900", sis900_probe, SIS900_TOTAL_SIZE, NULL};
+#endif
+
+/* walk through every ethernet PCI devices to see if some of them are matched with our card list*/
+int sis900_probe (struct device * net_dev)
+{
+ int found = 0;
+ int pci_index = 0;
+ unsigned char pci_bus, pci_device_fn;
+ long ioaddr;
+ int irq;
+
+ if (!pcibios_present())
+ return -ENODEV;
+
+ for (; pci_index < 0xff; pci_index++)
+ {
+ u16 vendor, device, pci_command;
+ struct mac_chip_info *mac;
+
+ if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
+ &pci_bus, &pci_device_fn) != PCIBIOS_SUCCESSFUL)
+ break;
+
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_DEVICE_ID, &device);
+
+ for (mac = mac_chip_table; mac->vendor_id; mac++)
+ {
+ if (vendor == mac->vendor_id && device == mac->device_id) break;
+ }
+
+ /* pci_dev does not match any of our cards */
+ if (mac->vendor_id == 0)
+ continue;
+
+ {
+ u32 pci_ioaddr;
+ u8 pci_irq_line;
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ ioaddr = pci_ioaddr & ~3;
+ irq = pci_irq_line;
+
+ if ((mac->flags & PCI_USES_IO) &&
+ check_region (pci_ioaddr, mac->io_size))
+ continue;
+
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+
+ {
+ u8 lat;
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_LATENCY_TIMER, &lat);
+ if (lat < 16) {
+ printk("PCI: Increasing latency timer of device %02x:%02x to 64\n",
+ pci_bus, pci_device_fn);
+ pcibios_write_config_byte(pci_bus, pci_device_fn, PCI_LATENCY_TIMER, 64);
+ }
+ }
+ net_dev = mac->probe (mac, ioaddr, irq, pci_index, pci_device_fn, pci_bus, net_dev);
+ if (net_dev != NULL)
+ {
+ found++;
+ }
+ net_dev = NULL;
+ }
+ }
+ return found ? 0 : -ENODEV;
+
+}
+
+/* older SiS900 and friends, use EEPROM to store MAC address */
+static int
+sis900_get_mac_addr(long ioaddr, struct device *net_dev)
+{
+ u16 signature;
+ int i;
+
+ /* check to see if we have sane EEPROM */
+ signature = (u16) read_eeprom(ioaddr, EEPROMSignature);
+ if (signature == 0xffff || signature == 0x0000) {
+ printk (KERN_INFO "%s: Error EERPOM read %x\n",
+ net_dev->name, signature);
+ return 0;
+ }
+
+ /* get MAC address from EEPROM */
+ for (i = 0; i < 3; i++)
+ ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ return 1;
+}
+
+/* SiS630E model, use APC CMOS RAM to store MAC address */
+static int sis630e_get_mac_addr(long ioaddr, int pci_index, struct device *net_dev)
+{
+ u8 reg;
+ int i;
+ u8 pci_bus, pci_dfn;
+ int not_found;
+
+ not_found = pcibios_find_device(0x1039, 0x0008,
+ pci_index,
+ &pci_bus,
+ &pci_dfn);
+ if (not_found) {
+ printk("%s: Can not find ISA bridge\n", net_dev->name);
+ return 0;
+ }
+ pcibios_read_config_byte(pci_bus, pci_dfn, 0x48, &reg);
+ pcibios_write_config_byte(pci_bus, pci_dfn, 0x48, reg | 0x40);
+
+ for (i = 0; i < 6; i++) {
+ outb(0x09 + i, 0x70);
+ ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
+ }
+ pcibios_write_config_byte(pci_bus, pci_dfn, 0x48, reg & ~0x40);
+
+ return 1;
+}
+
+/* 635 model : set Mac reload bit and get mac address from rfdr */
+static int sis635_get_mac_addr(struct device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ u32 rfcrSave;
+ u32 i;
+
+ rfcrSave = inl(rfcr + ioaddr);
+
+ outl(rfcrSave | RELOAD, ioaddr + cr);
+ outl(0, ioaddr + cr);
+
+ /* disable packet filtering before setting filter */
+ outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+
+ /* load MAC addr to filter data register */
+ for (i = 0 ; i < 3 ; i++) {
+ outl((i << RFADDR_shift), ioaddr + rfcr);
+ *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
+ }
+
+ /* enable packet filitering */
+ outl(rfcrSave | RFEN, rfcr + ioaddr);
+
+ return 1;
+}
+
+
+/**
+ * sis962_get_mac_addr: - Get MAC address for SiS962 model
+ * @pci_dev: the sis900 pci device
+ * @net_dev: the net device to get address for
+ *
+ * SiS962 model, use EEPROM to store MAC address. And EEPROM is shared by
+ * LAN and 1394. When access EEPROM, send EEREQ signal to hardware first
+ * and wait for EEGNT. If EEGNT is ON, EEPROM is permitted to be access
+ * by LAN, otherwise is not. After MAC address is read from EEPROM, send
+ * EEDONE signal to refuse EEPROM access by LAN.
+ * MAC address is read into @net_dev->dev_addr.
+ */
+
+static int sis962_get_mac_addr(struct device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ long ee_addr = ioaddr + mear;
+ u32 waittime = 0;
+ int i;
+
+ outl(EEREQ, ee_addr);
+ while(waittime < 2000) {
+ if(inl(ee_addr) & EEGNT) {
+ /* get MAC address from EEPROM */
+ for (i = 0; i < 3; i++)
+ ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ outl(EEDONE, ee_addr);
+ return 1;
+ } else {
+ udelay(1);
+ waittime ++;
+ }
+ }
+ outl(EEDONE, ee_addr);
+ return 0;
+}
+
+struct device *
+sis900_mac_probe (struct mac_chip_info *mac, long ioaddr, int irq, int pci_index,
+ unsigned char pci_device_fn, unsigned char pci_bus, struct device * net_dev)
+{
+ struct sis900_private *sis_priv;
+ static int did_version = 0;
+
+ u8 revision;
+ int i, ret = 0;
+
+ if (did_version++ == 0)
+ printk(KERN_INFO "%s\n", version);
+
+ if ((net_dev = init_etherdev(net_dev, 0)) == NULL)
+ return NULL;
+
+ if ((net_dev->priv = kmalloc(sizeof(struct sis900_private), GFP_KERNEL)) == NULL) {
+ unregister_netdev(net_dev);
+ return NULL;
+ }
+
+ sis_priv = net_dev->priv;
+ memset(sis_priv, 0, sizeof(struct sis900_private));
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, mac->io_size, net_dev->name);
+ net_dev->base_addr = ioaddr;
+ net_dev->irq = irq;
+
+ sis_priv->mac = mac;
+ sis_priv->pci_bus = pci_bus;
+ sis_priv->pci_device_fn = pci_device_fn;
+ sis_priv->pci_index = pci_index;
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_CLASS_REVISION, &revision);
+
+ if ( revision == SIS630E_900_REV )
+ ret = sis630e_get_mac_addr(ioaddr, pci_index, net_dev);
+ else if ((revision > 0x81) && (revision <= 0x90))
+ ret = sis635_get_mac_addr(net_dev);
+ else if (revision == SIS962_900_REV)
+ ret = sis962_get_mac_addr(net_dev);
+ else
+ ret = sis900_get_mac_addr(ioaddr, net_dev);
+
+ if (ret == 0) {
+ unregister_netdev(net_dev);
+ return NULL;
+ }
+
+ /* print some information about our NIC */
+ printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ", net_dev->name, mac->name,
+ ioaddr, irq);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", (u8)net_dev->dev_addr[i]);
+ printk("%2.2x.\n", net_dev->dev_addr[i]);
+
+ /* 630ET : set the mii access mode as software-mode */
+ if (revision == SIS630ET_900_REV)
+ outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr);
+
+ /* probe for mii transceiver */
+ if (sis900_mii_probe(pci_bus, pci_device_fn, net_dev) == 0) {
+ unregister_netdev(net_dev);
+ kfree(sis_priv);
+ release_region(ioaddr, mac->io_size);
+ return NULL;
+ }
+
+ sis_priv->next_module = root_sis900_dev;
+ root_sis900_dev = net_dev;
+
+ /* The SiS900-specific entries in the device structure. */
+ net_dev->open = &sis900_open;
+ net_dev->hard_start_xmit = &sis900_start_xmit;
+ net_dev->stop = &sis900_close;
+ net_dev->get_stats = &sis900_get_stats;
+ net_dev->set_multicast_list = &set_rx_mode;
+ net_dev->do_ioctl = &mii_ioctl;
+
+ return net_dev;
+}
+
+/* sis900_mii_probe: - Probe MII PHY for sis900 */
+static int sis900_mii_probe (unsigned char pci_bus, unsigned char pci_device_fn, struct device * net_dev)
+{
+ struct sis900_private * sis_priv = (struct sis900_private *)net_dev->priv;
+ u16 poll_bit = MII_STAT_LINK, status = 0;
+ unsigned int timeout = jiffies + 5 * HZ;
+ int phy_addr;
+ u8 revision;
+
+ sis_priv->mii = NULL;
+
+ /* search for total of 32 possible mii phy addresses */
+ for (phy_addr = 0; phy_addr < 32; phy_addr++) {
+ struct mii_phy * mii_phy = NULL;
+ u16 mii_status;
+ int i;
+
+ for(i=0; i<2; i++)
+ mii_status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (mii_status == 0xffff || mii_status == 0x0000)
+ /* the mii is not accessable, try next one */
+ continue;
+
+ if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) {
+ printk(KERN_INFO "Cannot allocate mem for struct mii_phy\n");
+ return 0;
+ }
+
+ mii_phy->phy_id0 = mdio_read(net_dev, phy_addr, MII_PHY_ID0);
+ mii_phy->phy_id1 = mdio_read(net_dev, phy_addr, MII_PHY_ID1);
+ mii_phy->phy_addr = phy_addr;
+ mii_phy->status = mii_status;
+ mii_phy->next = sis_priv->mii;
+ sis_priv->mii = mii_phy;
+ sis_priv->first_mii = mii_phy;
+
+ for (i=0; mii_chip_table[i].phy_id1; i++)
+ if ( ( mii_phy->phy_id0 == mii_chip_table[i].phy_id0 ) &&
+ ( (mii_phy->phy_id1 & 0xFFF0) == mii_chip_table[i].phy_id1 )){
+
+ mii_phy->phy_types = mii_chip_table[i].phy_types;
+ if(mii_chip_table[i].phy_types == MIX)
+ mii_phy->phy_types =
+ (mii_status & (MII_STAT_CAN_TX_FDX | MII_STAT_CAN_TX))?LAN:HOME;
+ printk(KERN_INFO "%s: %s transceiver found at address %d.\n",
+ net_dev->name, mii_chip_table[i].name, phy_addr);
+ break;
+ }
+
+ if( !mii_chip_table[i].phy_id1 )
+ printk(KERN_INFO "%s: Unknown PHY transceiver found at address %d.\n",
+ net_dev->name, phy_addr);
+ }
+
+ if (sis_priv->mii == NULL) {
+ printk(KERN_INFO "%s: No MII transceivers found!\n",
+ net_dev->name);
+ return 0;
+ }
+
+ /* Slect Default PHY to put in sis_priv->mii & sis_priv->cur_phy */
+ sis_priv->mii = NULL;
+ sis900_default_phy( net_dev );
+
+ /* Reset PHY if default PHY is internal sis900 */
+ if( (sis_priv->mii->phy_id0 == 0x001D) &&
+ ( (sis_priv->mii->phy_id1&0xFFF0) == 0x8000) )
+ status = sis900_reset_phy( net_dev, sis_priv->cur_phy );
+
+ /* workaround for ICS1893 PHY */
+ if ((sis_priv->mii->phy_id0 == 0x0015) &&
+ ((sis_priv->mii->phy_id1&0xFFF0) == 0xF440))
+ mdio_write(net_dev, sis_priv->cur_phy, 0x0018, 0xD200);
+
+ if( status & MII_STAT_LINK ){
+ while (poll_bit)
+ {
+ poll_bit ^= (mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS) & poll_bit);
+ if (jiffies >= timeout)
+ {
+ printk(KERN_WARNING "%s: reset phy and link down now\n", net_dev->name);
+ return -ETIME;
+ }
+ }
+ }
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_CLASS_REVISION, &revision);
+ if (revision == SIS630E_900_REV) {
+ /* SiS 630E has some bugs on default value of PHY registers */
+ mdio_write(net_dev, sis_priv->cur_phy, MII_ANADV, 0x05e1);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG1, 0x22);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG2, 0xff00);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_MASK, 0xffc0);
+ //mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, 0x1000);
+ }
+
+ if (sis_priv->mii->status & MII_STAT_LINK)
+ sis_priv->LinkOn = TRUE;
+ else
+ sis_priv->LinkOn = FALSE;
+
+ return 1;
+}
+
+
+/* sis900_default_phy : Select one default PHY for sis900 mac */
+static u16 sis900_default_phy(struct device * net_dev)
+{
+ struct sis900_private * sis_priv = (struct sis900_private *)net_dev->priv;
+ struct mii_phy *phy = NULL, *phy_home = NULL, *default_phy = NULL;
+ u16 status;
+
+ for( phy=sis_priv->first_mii; phy; phy=phy->next ){
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+
+ /* Link ON & Not select deafalut PHY */
+ if ( (status & MII_STAT_LINK) && !(default_phy) )
+ default_phy = phy;
+ else{
+ status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
+ mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
+ status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
+ if( phy->phy_types == HOME )
+ phy_home = phy;
+ }
+ }
+
+ if( (!default_phy) && phy_home )
+ default_phy = phy_home;
+ else if(!default_phy)
+ default_phy = sis_priv->first_mii;
+
+ if( sis_priv->mii != default_phy ){
+ sis_priv->mii = default_phy;
+ sis_priv->cur_phy = default_phy->phy_addr;
+ printk(KERN_INFO "%s: Using transceiver found at address %d as default\n", net_dev->name,sis_priv->cur_phy);
+ }
+
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL);
+ status &= (~MII_CNTL_ISOLATE);
+
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, status);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+
+ return status;
+}
+
+
+/* sis900_set_capability : set the media capability of network adapter */
+static void sis900_set_capability( struct device *net_dev , struct mii_phy *phy )
+{
+ u16 cap;
+ u16 status;
+
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+
+ cap = MII_NWAY_CSMA_CD |
+ ((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) |
+ ((phy->status & MII_STAT_CAN_TX) ? MII_NWAY_TX:0) |
+ ((phy->status & MII_STAT_CAN_T_FDX) ? MII_NWAY_T_FDX:0)|
+ ((phy->status & MII_STAT_CAN_T) ? MII_NWAY_T:0);
+
+ mdio_write( net_dev, phy->phy_addr, MII_ANADV, cap );
+}
+
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay() inl(ee_addr)
+
+/* Read Serial EEPROM through EEPROM Access Register, Note that location is
+ in word (16 bits) unit */
+static u16 read_eeprom(long ioaddr, int location)
+{
+ int i;
+ u16 retval = 0;
+ long ee_addr = ioaddr + mear;
+ u32 read_cmd = location | EEread;
+
+ outl(0, ee_addr);
+ eeprom_delay();
+ outl(EECS, ee_addr);
+ eeprom_delay();
+
+ /* Shift the read command (9) bits out. */
+ for (i = 8; i >= 0; i--) {
+ u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
+ outl(dataval, ee_addr);
+ eeprom_delay();
+ outl(dataval | EECLK, ee_addr);
+ eeprom_delay();
+ }
+ outb(EECS, ee_addr);
+ eeprom_delay();
+
+ /* read the 16-bits data in */
+ for (i = 16; i > 0; i--) {
+ outl(EECS, ee_addr);
+ eeprom_delay();
+ outl(EECS | EECLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(0, ee_addr);
+ eeprom_delay();
+// outl(EECLK, ee_addr);
+
+ return (retval);
+}
+
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol. Note that the command bits and data bits are
+ send out seperately */
+#define mdio_delay() inl(mdio_addr)
+
+static void mdio_idle(long mdio_addr)
+{
+ outl(MDIO | MDDIR, mdio_addr);
+ mdio_delay();
+ outl(MDIO | MDDIR | MDC, mdio_addr);
+}
+
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_reset(long mdio_addr)
+{
+ int i;
+
+ for (i = 31; i >= 0; i--) {
+ outl(MDDIR | MDIO, mdio_addr);
+ mdio_delay();
+ outl(MDDIR | MDIO | MDC, mdio_addr);
+ mdio_delay();
+ }
+ return;
+}
+
+static u16 mdio_read(struct device *net_dev, int phy_id, int location)
+{
+ long mdio_addr = net_dev->base_addr + mear;
+ int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+ u16 retval = 0;
+ int i;
+
+ mdio_reset(mdio_addr);
+ mdio_idle(mdio_addr);
+
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outl(dataval, mdio_addr);
+ mdio_delay();
+ outl(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+
+ /* Read the 16 data bits. */
+ for (i = 16; i > 0; i--) {
+ outl(0, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0);
+ outl(MDC, mdio_addr);
+ mdio_delay();
+ }
+ outl(0x00, mdio_addr);
+
+ return retval;
+}
+
+static void mdio_write(struct device *net_dev, int phy_id, int location, int value)
+{
+ long mdio_addr = net_dev->base_addr + mear;
+ int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+ int i;
+
+ mdio_reset(mdio_addr);
+ mdio_idle(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outb(dataval, mdio_addr);
+ mdio_delay();
+ outb(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+ mdio_delay();
+
+ /* Shift the value bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outl(dataval, mdio_addr);
+ mdio_delay();
+ outl(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+ mdio_delay();
+
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ outb(0, mdio_addr);
+ mdio_delay();
+ outb(MDC, mdio_addr);
+ mdio_delay();
+ }
+ outl(0x00, mdio_addr);
+
+ return;
+}
+
+static u16 sis900_reset_phy(struct device *net_dev, int phy_addr)
+{
+ int i = 0;
+ u16 status;
+
+ while (i++ < 2)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
+
+ return status;
+}
+
+static int
+sis900_open(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ u8 revision;
+
+ /* Soft reset the chip. */
+ sis900_reset(net_dev);
+
+ /* Equalizer workaroung Rule */
+ pcibios_read_config_byte(sis_priv->pci_bus, sis_priv->pci_device_fn, PCI_CLASS_REVISION, &revision);
+ sis630_set_eq(net_dev, revision);
+
+ if (request_irq(net_dev->irq, &sis900_interrupt, SA_SHIRQ, net_dev->name, net_dev)) {
+ return -EAGAIN;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ sis900_init_rxfilter(net_dev);
+
+ sis900_init_tx_ring(net_dev);
+ sis900_init_rx_ring(net_dev);
+
+ set_rx_mode(net_dev);
+
+ net_dev->tbusy = 0;
+ net_dev->interrupt = 0;
+ net_dev->start = 1;
+
+ /* Workaround for EDB */
+ sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+ outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
+ outl(IE, ioaddr + ier);
+
+ sis900_check_mode(net_dev, sis_priv->mii);
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&sis_priv->timer);
+ sis_priv->timer.expires = jiffies + HZ;
+ sis_priv->timer.data = (unsigned long)net_dev;
+ sis_priv->timer.function = &sis900_timer;
+ add_timer(&sis_priv->timer);
+
+ return 0;
+}
+
+/* set receive filter address to our MAC address */
+static void
+sis900_init_rxfilter (struct device * net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ u32 rfcrSave;
+ u32 i;
+
+ rfcrSave = inl(rfcr + ioaddr);
+
+ /* disable packet filtering before setting filter */
+ outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+
+ /* load MAC addr to filter data register */
+ for (i = 0 ; i < 3 ; i++) {
+ u32 w;
+
+ w = (u32) *((u16 *)(net_dev->dev_addr)+i);
+ outl((i << RFADDR_shift), ioaddr + rfcr);
+ outl(w, ioaddr + rfdr);
+
+ if (sis900_debug > 2) {
+ printk(KERN_INFO "%s: Receive Filter Addrss[%d]=%x\n",
+ net_dev->name, i, inl(ioaddr + rfdr));
+ }
+ }
+
+ /* enable packet filitering */
+ outl(rfcrSave | RFEN, rfcr + ioaddr);
+}
+
+/* Initialize the Tx ring. */
+static void
+sis900_init_tx_ring(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int i;
+
+ sis_priv->tx_full = 0;
+ sis_priv->dirty_tx = sis_priv->cur_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ sis_priv->tx_skbuff[i] = NULL;
+
+ sis_priv->tx_ring[i].link = (u32) virt_to_bus(&sis_priv->tx_ring[i+1]);
+ sis_priv->tx_ring[i].cmdsts = 0;
+ sis_priv->tx_ring[i].bufptr = 0;
+ }
+ sis_priv->tx_ring[i-1].link = (u32) virt_to_bus(&sis_priv->tx_ring[0]);
+
+ /* load Transmit Descriptor Register */
+ outl(virt_to_bus(&sis_priv->tx_ring[0]), ioaddr + txdp);
+ if (sis900_debug > 2)
+ printk(KERN_INFO "%s: TX descriptor register loaded with: %8.8x\n",
+ net_dev->name, inl(ioaddr + txdp));
+}
+
+/* Initialize the Rx descriptor ring, pre-allocate recevie buffers */
+static void
+sis900_init_rx_ring(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int i;
+
+ sis_priv->cur_rx = 0;
+ sis_priv->dirty_rx = 0;
+
+ /* init RX descriptor */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ sis_priv->rx_skbuff[i] = NULL;
+
+ sis_priv->rx_ring[i].link = (u32) virt_to_bus(&sis_priv->rx_ring[i+1]);
+ sis_priv->rx_ring[i].cmdsts = 0;
+ sis_priv->rx_ring[i].bufptr = 0;
+ }
+ sis_priv->rx_ring[i-1].link = (u32) virt_to_bus(&sis_priv->rx_ring[0]);
+
+ /* allocate sock buffers */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a "hole"
+ on the buffer ring, it is not clear how the
+ hardware will react to this kind of degenerated
+ buffer */
+ break;
+ }
+ skb->dev = net_dev;
+ sis_priv->rx_skbuff[i] = skb;
+ sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[i].bufptr = virt_to_bus(skb->tail);
+ }
+ sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
+
+ /* load Receive Descriptor Register */
+ outl(virt_to_bus(&sis_priv->rx_ring[0]), ioaddr + rxdp);
+ if (sis900_debug > 2)
+ printk(KERN_INFO "%s: RX descriptor register loaded with: %8.8x\n",
+ net_dev->name, inl(ioaddr + rxdp));
+}
+
+/**
+ * sis630_set_eq: - set phy equalizer value for 630 LAN
+ * @net_dev: the net device to set equalizer value
+ * @revision: 630 LAN revision number
+ *
+ * 630E equalizer workaround rule(Cyrus Huang 08/15)
+ * PHY register 14h(Test)
+ * Bit 14: 0 -- Automatically dectect (default)
+ * 1 -- Manually set Equalizer filter
+ * Bit 13: 0 -- (Default)
+ * 1 -- Speed up convergence of equalizer setting
+ * Bit 9 : 0 -- (Default)
+ * 1 -- Disable Baseline Wander
+ * Bit 3~7 -- Equalizer filter setting
+ * Link ON: Set Bit 9, 13 to 1, Bit 14 to 0
+ * Then calculate equalizer value
+ * Then set equalizer value, and set Bit 14 to 1, Bit 9 to 0
+ * Link Off:Set Bit 13 to 1, Bit 14 to 0
+ * Calculate Equalizer value:
+ * When Link is ON and Bit 14 is 0, SIS900PHY will auto-dectect proper equalizer value.
+ * When the equalizer is stable, this value is not a fixed value. It will be within
+ * a small range(eg. 7~9). Then we get a minimum and a maximum value(eg. min=7, max=9)
+ * 0 <= max <= 4 --> set equalizer to max
+ * 5 <= max <= 14 --> set equalizer to max+1 or set equalizer to max+2 if max == min
+ * max >= 15 --> set equalizer to max+5 or set equalizer to max+6 if max == min
+ */
+
+static void sis630_set_eq(struct device *net_dev, u8 revision)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ u16 reg14h, eq_value, max_value=0, min_value=0;
+ u8 host_bridge_rev;
+ int i, maxcount=10;
+ int not_found;
+ u8 pci_bus, pci_device_fn;
+
+ if ( !(revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
+ revision == SIS630A_900_REV || revision == SIS630ET_900_REV) )
+ return;
+ not_found = pcibios_find_device(SIS630_VENDOR_ID, SIS630_DEVICE_ID,
+ sis_priv->pci_index,
+ &pci_bus,
+ &pci_device_fn);
+ if (not_found)
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_CLASS_REVISION, &host_bridge_rev);
+
+ if (sis_priv->LinkOn) {
+ reg14h=mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, (0x2200 | reg14h) & 0xBFFF);
+ for (i=0; i < maxcount; i++) {
+ eq_value=(0x00F8 & mdio_read(net_dev, sis_priv->cur_phy, MII_RESV)) >> 3;
+ if (i == 0)
+ max_value=min_value=eq_value;
+ max_value=(eq_value > max_value) ? eq_value : max_value;
+ min_value=(eq_value < min_value) ? eq_value : min_value;
+ }
+ /* 630E rule to determine the equalizer value */
+ if (revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
+ revision == SIS630ET_900_REV) {
+ if (max_value < 5)
+ eq_value=max_value;
+ else if (max_value >= 5 && max_value < 15)
+ eq_value=(max_value == min_value) ? max_value+2 : max_value+1;
+ else if (max_value >= 15)
+ eq_value=(max_value == min_value) ? max_value+6 : max_value+5;
+ }
+ /* 630B0&B1 rule to determine the equalizer value */
+ if (revision == SIS630A_900_REV &&
+ (host_bridge_rev == SIS630B0 || host_bridge_rev == SIS630B1)) {
+ if (max_value == 0)
+ eq_value=3;
+ else
+ eq_value=(max_value+min_value+1)/2;
+ }
+ /* write equalizer value and setting */
+ reg14h=mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ reg14h=(reg14h & 0xFF07) | ((eq_value << 3) & 0x00F8);
+ reg14h=(reg14h | 0x6000) & 0xFDFF;
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, reg14h);
+ }
+ else {
+ reg14h=mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ if (revision == SIS630A_900_REV &&
+ (host_bridge_rev == SIS630B0 || host_bridge_rev == SIS630B1))
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, (reg14h | 0x2200) & 0xBFFF);
+ else
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, (reg14h | 0x2000) & 0xBFFF);
+ }
+ return;
+}
+
+
+/* on each timer ticks we check two things, Link Status (ON/OFF) and
+ Link Mode (10/100/Full/Half)
+*/
+static void sis900_timer(unsigned long data)
+{
+ struct device *net_dev = (struct device *)data;
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ struct mii_phy *mii_phy = sis_priv->mii;
+ static int next_tick = 5*HZ;
+ u16 status;
+ u8 revision;
+
+ if(!sis_priv->autong_complete){
+ int speed, duplex = 0;
+
+ sis900_read_mode(net_dev, &speed, &duplex);
+ if(duplex){
+ sis900_set_mode(net_dev->base_addr, speed, duplex);
+ pcibios_read_config_byte(sis_priv->pci_bus, sis_priv->pci_device_fn, PCI_CLASS_REVISION, &revision);
+ sis630_set_eq(net_dev, revision);
+ }
+
+ sis_priv->timer.expires = jiffies + HZ;
+ add_timer(&sis_priv->timer);
+ return;
+ }
+
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+
+ /* Link OFF -> ON */
+ if ( !sis_priv->LinkOn ) {
+LookForLink:
+ /* Search for new PHY */
+ status = sis900_default_phy( net_dev );
+ mii_phy = sis_priv->mii;
+
+ if( status & MII_STAT_LINK ){
+ sis900_check_mode(net_dev, mii_phy);
+ sis_priv->LinkOn = TRUE;
+ }
+ }
+ /* Link ON -> OFF */
+ else{
+ if( !(status & MII_STAT_LINK) ){
+ sis_priv->LinkOn = FALSE;
+ printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
+
+ /* Change mode issue */
+ if( (mii_phy->phy_id0 == 0x001D) &&
+ ( (mii_phy->phy_id1 & 0xFFF0) == 0x8000 ))
+ sis900_reset_phy( net_dev, sis_priv->cur_phy );
+
+ pcibios_read_config_byte(sis_priv->pci_bus, sis_priv->pci_device_fn, PCI_CLASS_REVISION, &revision);
+ sis630_set_eq(net_dev, revision);
+
+ goto LookForLink;
+ }
+ }
+
+ sis_priv->timer.expires = jiffies + next_tick;
+ add_timer(&sis_priv->timer);
+}
+
+static void sis900_check_mode (struct device *net_dev, struct mii_phy *mii_phy)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int speed, duplex;
+
+ if( mii_phy->phy_types == LAN ){
+ outl( ~EXD & inl( ioaddr + cfg ), ioaddr + cfg);
+ sis900_set_capability(net_dev , mii_phy);
+ sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
+ }else{
+ outl(EXD | inl( ioaddr + cfg ), ioaddr + cfg);
+ speed = HW_SPEED_HOME;
+ duplex = FDX_CAPABLE_HALF_SELECTED;
+ sis900_set_mode(net_dev->base_addr, speed, duplex);
+ sis_priv->autong_complete = 1;
+ }
+}
+
+static void sis900_set_mode (long ioaddr, int speed, int duplex)
+{
+ u32 tx_flags = 0, rx_flags = 0;
+
+ if( inl(ioaddr + cfg) & EDB_MASTER_EN ){
+ tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) | (TX_FILL_THRESH << TxFILLT_shift);
+ rx_flags = DMA_BURST_64 << RxMXDMA_shift;
+ }
+ else{
+ tx_flags = TxATP | (DMA_BURST_512 << TxMXDMA_shift) | (TX_FILL_THRESH << TxFILLT_shift);
+ rx_flags = DMA_BURST_512 << RxMXDMA_shift;
+ }
+
+ if (speed == HW_SPEED_HOME || speed == HW_SPEED_10_MBPS ) {
+ rx_flags |= (RxDRNT_10 << RxDRNT_shift);
+ tx_flags |= (TxDRNT_10 << TxDRNT_shift);
+ }
+ else {
+ rx_flags |= (RxDRNT_100 << RxDRNT_shift);
+ tx_flags |= (TxDRNT_100 << TxDRNT_shift);
+ }
+
+ if (duplex == FDX_CAPABLE_FULL_SELECTED) {
+ tx_flags |= (TxCSI | TxHBI);
+ rx_flags |= RxATX;
+ }
+
+ outl (tx_flags, ioaddr + txcfg);
+ outl (rx_flags, ioaddr + rxcfg);
+}
+
+
+static void sis900_auto_negotiate(struct device *net_dev, int phy_addr)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ int i = 0;
+ u32 status;
+
+ while (i++ < 2)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (!(status & MII_STAT_LINK)){
+ printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
+ sis_priv->autong_complete = 1;
+ sis_priv->LinkOn = FALSE;
+ return;
+ }
+
+ /* (Re)start AutoNegotiate */
+ mdio_write(net_dev, phy_addr, MII_CONTROL,
+ MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
+ sis_priv->autong_complete = 0;
+}
+
+
+static void sis900_read_mode(struct device *net_dev, int *speed, int *duplex)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ struct mii_phy *phy = sis_priv->mii;
+ int phy_addr = sis_priv->cur_phy;
+ u32 status;
+ u16 autoadv, autorec;
+ int i = 0;
+
+ while (i++ < 2)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (!(status & MII_STAT_LINK)) return;
+
+ /* AutoNegotiate completed */
+ autoadv = mdio_read(net_dev, phy_addr, MII_ANADV);
+ autorec = mdio_read(net_dev, phy_addr, MII_ANLPAR);
+ status = autoadv & autorec;
+
+ *speed = HW_SPEED_10_MBPS;
+ *duplex = FDX_CAPABLE_HALF_SELECTED;
+
+ if (status & (MII_NWAY_TX | MII_NWAY_TX_FDX))
+ *speed = HW_SPEED_100_MBPS;
+ if (status & ( MII_NWAY_TX_FDX | MII_NWAY_T_FDX))
+ *duplex = FDX_CAPABLE_FULL_SELECTED;
+
+ sis_priv->autong_complete = 1;
+
+ /* Workaround for Realtek RTL8201 PHY issue */
+ if((phy->phy_id0 == 0x0000) && ((phy->phy_id1 & 0xFFF0) == 0x8200)){
+ if(mdio_read(net_dev, phy_addr, MII_CONTROL) & MII_CNTL_FDX)
+ *duplex = FDX_CAPABLE_FULL_SELECTED;
+ if(mdio_read(net_dev, phy_addr, 0x0019) & 0x01)
+ *speed = HW_SPEED_100_MBPS;
+ }
+
+ printk(KERN_INFO "%s: Media Link On %s %s-duplex \n",
+ net_dev->name,
+ *speed == HW_SPEED_100_MBPS ?
+ "100mbps" : "10mbps",
+ *duplex == FDX_CAPABLE_FULL_SELECTED ?
+ "full" : "half");
+}
+
+
+static void sis900_tx_timeout(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int i;
+
+ printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n",
+ net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x0000, ioaddr + imr);
+
+ /* discard unsent packets, should this code section be protected by
+ cli(), sti() ?? */
+ sis_priv->dirty_tx = sis_priv->cur_tx = 0;
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ if (sis_priv->tx_skbuff[i] != NULL) {
+ dev_free_skb(sis_priv->tx_skbuff[i]);
+ sis_priv->tx_skbuff[i] = 0;
+ sis_priv->tx_ring[i].cmdsts = 0;
+ sis_priv->tx_ring[i].bufptr = 0;
+ sis_priv->stats.tx_dropped++;
+ }
+ }
+ net_dev->trans_start = jiffies;
+ net_dev->tbusy = sis_priv->tx_full = 0;
+
+ /* FIXME: Should we restart the transmission thread here ?? */
+ outl(TxENA | inl(ioaddr + cr), ioaddr + cr);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+ return;
+}
+
+static int
+sis900_start_xmit(struct sk_buff *skb, struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ unsigned int entry;
+
+ /* test tbusy to see if we have timeout situation then set it */
+ if (test_and_set_bit(0, (void*)&net_dev->tbusy) != 0) {
+ if (jiffies - net_dev->trans_start > TX_TIMEOUT)
+ sis900_tx_timeout(net_dev);
+ return 1;
+ }
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = sis_priv->cur_tx % NUM_TX_DESC;
+ sis_priv->tx_skbuff[entry] = skb;
+
+ /* set the transmit buffer descriptor and enable Transmit State Machine */
+ sis_priv->tx_ring[entry].bufptr = virt_to_bus(skb->data);
+ sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
+ outl(TxENA | inl(ioaddr + cr), ioaddr + cr);
+
+ if (++sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC) {
+ /* Typical path, clear tbusy to indicate more
+ transmission is possible */
+ clear_bit(0, (void*)&net_dev->tbusy);
+ } else {
+ /* no more transmit descriptor avaiable, tbusy remain set */
+ sis_priv->tx_full = 1;
+ }
+
+ net_dev->trans_start = jiffies;
+
+ {
+ int i;
+ for (i = 0; i < 100000; i++); /* GRUIIIIIK */
+ }
+
+ if (sis900_debug > 3)
+ printk(KERN_INFO "%s: Queued Tx packet at %p size %d "
+ "to slot %d.\n",
+ net_dev->name, skb->data, (int)skb->len, entry);
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void sis900_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct device *net_dev = (struct device *)dev_instance;
+ int boguscnt = max_interrupt_work;
+ long ioaddr = net_dev->base_addr;
+ u32 status;
+
+#if defined(__i386__)
+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+ if (test_and_set_bit(0, (void*)&net_dev->interrupt)) {
+ printk(KERN_INFO "%s: SMP simultaneous entry of "
+ "an interrupt handler.\n", net_dev->name);
+ net_dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#else
+ if (net_dev->interrupt) {
+ printk(KERN_INFO "%s: Re-entering the interrupt handler.\n",
+ net_dev->name);
+ return;
+ }
+ net_dev->interrupt = 1;
+#endif
+
+ do {
+ status = inl(ioaddr + isr);
+
+ if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
+ /* nothing intresting happened */
+ break;
+
+ /* why dow't we break after Tx/Rx case ?? keyword: full-duplex */
+ if (status & (RxORN | RxERR | RxOK))
+ /* Rx interrupt */
+ sis900_rx(net_dev);
+
+ if (status & (TxURN | TxERR | TxIDLE))
+ /* Tx interrupt */
+ sis900_finish_xmit(net_dev);
+
+ /* something strange happened !!! */
+ if (status & HIBERR) {
+ printk(KERN_INFO "%s: Abnormal interrupt,"
+ "status %#8.8x.\n", net_dev->name, status);
+ break;
+ }
+ if (--boguscnt < 0) {
+ printk(KERN_INFO "%s: Too much work at interrupt, "
+ "interrupt status = %#8.8x.\n",
+ net_dev->name, status);
+ break;
+ }
+ } while (1);
+
+ if (sis900_debug > 4)
+ printk(KERN_INFO "%s: exiting interrupt, "
+ "interrupt status = 0x%#8.8x.\n",
+ net_dev->name, inl(ioaddr + isr));
+
+#if defined(__i386__)
+ clear_bit(0, (void*)&net_dev->interrupt);
+#else
+ net_dev->interrupt = 0;
+#endif
+ return;
+}
+
+/* Process receive interrupt events, put buffer to higher layer and refill buffer pool
+ Note: This fucntion is called by interrupt handler, don't do "too much" work here */
+static int sis900_rx(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
+ u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
+
+ if (sis900_debug > 4)
+ printk(KERN_INFO "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d "
+ "status:0x%8.8x\n",
+ sis_priv->cur_rx, sis_priv->dirty_rx, rx_status);
+
+ while (rx_status & OWN) {
+ unsigned int rx_size;
+
+ rx_size = (rx_status & DSIZE) - CRC_SIZE;
+
+ if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
+ /* corrupted packet received */
+ if (sis900_debug > 4)
+ printk(KERN_INFO "%s: Corrupted packet "
+ "received, buffer status = 0x%8.8x.\n",
+ net_dev->name, rx_status);
+ sis_priv->stats.rx_errors++;
+ if (rx_status & OVERRUN)
+ sis_priv->stats.rx_over_errors++;
+ if (rx_status & (TOOLONG|RUNT))
+ sis_priv->stats.rx_length_errors++;
+ if (rx_status & (RXISERR | FAERR))
+ sis_priv->stats.rx_frame_errors++;
+ if (rx_status & CRCERR)
+ sis_priv->stats.rx_crc_errors++;
+ /* reset buffer descriptor state */
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ } else {
+ struct sk_buff * skb;
+
+ /* This situation should never happen, but due to
+ some unknow bugs, it is possible that
+ we are working on NULL sk_buff :-( */
+ if (sis_priv->rx_skbuff[entry] == NULL) {
+ printk(KERN_INFO "%s: NULL pointer "
+ "encountered in Rx ring, skipping\n",
+ net_dev->name);
+ break;
+ }
+
+ /* gvie the socket buffer to upper layers */
+ skb = sis_priv->rx_skbuff[entry];
+ skb_put(skb, rx_size);
+ skb->protocol = eth_type_trans(skb, net_dev);
+ netif_rx(skb);
+
+ /* some network statistics */
+ if ((rx_status & BCAST) == MCAST)
+ sis_priv->stats.multicast++;
+ net_dev->last_rx = jiffies;
+ /* sis_priv->stats.rx_bytes += rx_size;*/
+ sis_priv->stats.rx_packets++;
+
+ /* refill the Rx buffer, what if there is not enought memory for
+ new socket buffer ?? */
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a "hole"
+ on the buffer ring, it is not clear how the
+ hardware will react to this kind of degenerated
+ buffer */
+ printk(KERN_INFO "%s: Memory squeeze,"
+ "deferring packet.\n",
+ net_dev->name);
+ sis_priv->rx_skbuff[entry] = NULL;
+ /* reset buffer descriptor state */
+ sis_priv->rx_ring[entry].cmdsts = 0;
+ sis_priv->rx_ring[entry].bufptr = 0;
+ sis_priv->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = net_dev;
+ sis_priv->rx_skbuff[entry] = skb;
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[entry].bufptr = virt_to_bus(skb->tail);
+ sis_priv->dirty_rx++;
+ }
+ sis_priv->cur_rx++;
+ entry = sis_priv->cur_rx % NUM_RX_DESC;
+ rx_status = sis_priv->rx_ring[entry].cmdsts;
+ } // while
+
+ /* refill the Rx buffer, what if the rate of refilling is slower than
+ consuming ?? */
+ for (;sis_priv->cur_rx - sis_priv->dirty_rx > 0; sis_priv->dirty_rx++) {
+ struct sk_buff *skb;
+
+ entry = sis_priv->dirty_rx % NUM_RX_DESC;
+
+ if (sis_priv->rx_skbuff[entry] == NULL) {
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a "hole"
+ on the buffer ring, it is not clear how the
+ hardware will react to this kind of degenerated
+ buffer */
+ printk(KERN_INFO "%s: Memory squeeze,"
+ "deferring packet.\n",
+ net_dev->name);
+ sis_priv->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = net_dev;
+ sis_priv->rx_skbuff[entry] = skb;
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[entry].bufptr = virt_to_bus(skb->tail);
+ }
+ }
+
+ /* re-enable the potentially idle receive state matchine */
+ outl(RxENA | inl(ioaddr + cr), ioaddr + cr );
+
+ return 0;
+}
+
+/* finish up transmission of packets, check for error condition and free skbuff etc.
+ Note: This fucntion is called by interrupt handler, don't do "too much" work here */
+static void sis900_finish_xmit (struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+
+ for (; sis_priv->dirty_tx < sis_priv->cur_tx; sis_priv->dirty_tx++) {
+ unsigned int entry;
+ u32 tx_status;
+
+ entry = sis_priv->dirty_tx % NUM_TX_DESC;
+ tx_status = sis_priv->tx_ring[entry].cmdsts;
+
+ if (tx_status & OWN) {
+ /* The packet is not transmitted yet (owned by hardware) !
+ Note: the interrupt is generated only when Tx Machine
+ is idle, so this is an almost impossible case */
+ break;
+ }
+
+ if (tx_status & (ABORT | UNDERRUN | OWCOLL)) {
+ /* packet unsuccessfully transmitted */
+ if (sis900_debug > 4)
+ printk(KERN_INFO "%s: Transmit "
+ "error, Tx status %8.8x.\n",
+ net_dev->name, tx_status);
+ sis_priv->stats.tx_errors++;
+ if (tx_status & UNDERRUN)
+ sis_priv->stats.tx_fifo_errors++;
+ if (tx_status & ABORT)
+ sis_priv->stats.tx_aborted_errors++;
+ if (tx_status & NOCARRIER)
+ sis_priv->stats.tx_carrier_errors++;
+ if (tx_status & OWCOLL)
+ sis_priv->stats.tx_window_errors++;
+ } else {
+ /* packet successfully transmitted */
+ sis_priv->stats.collisions += (tx_status & COLCNT) >> 16;
+ /* sis_priv->stats.tx_bytes += tx_status & DSIZE;*/
+ sis_priv->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ dev_free_skb(sis_priv->tx_skbuff[entry]);
+ sis_priv->tx_skbuff[entry] = NULL;
+ sis_priv->tx_ring[entry].bufptr = 0;
+ sis_priv->tx_ring[entry].cmdsts = 0;
+ }
+
+ if (sis_priv->tx_full && net_dev->tbusy &&
+ sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) {
+ /* The ring is no longer full, clear tbusy, tx_full and
+ schedule more transmission by marking NET_BH */
+ sis_priv->tx_full = 0;
+ clear_bit(0, (void *)&net_dev->tbusy);
+ mark_bh(NET_BH);
+ }
+}
+
+static int
+sis900_close(struct device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ int i;
+
+ net_dev->start = 0;
+ net_dev->tbusy = 1;
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x0000, ioaddr + imr);
+ outl(0x0000, ioaddr + ier);
+
+ /* Stop the chip's Tx and Rx Status Machine */
+ outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+
+ del_timer(&sis_priv->timer);
+
+ free_irq(net_dev->irq, net_dev);
+
+ /* Free Tx and RX skbuff */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ if (sis_priv->rx_skbuff[i] != NULL)
+ dev_free_skb(sis_priv->rx_skbuff[i]);
+ sis_priv->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ if (sis_priv->tx_skbuff[i] != NULL)
+ dev_free_skb(sis_priv->tx_skbuff[i]);
+ sis_priv->tx_skbuff[i] = 0;
+ }
+
+ /* Green! Put the chip in low-power mode. */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int mii_ioctl(struct device *net_dev, struct ifreq *rq, int cmd)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+ data[0] = sis_priv->mii->phy_addr;
+ /* Fall Through */
+ case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+ data[3] = mdio_read(net_dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+ if (!suser())
+ return -EPERM;
+ mdio_write(net_dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct enet_statistics *
+sis900_get_stats(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+
+ return &sis_priv->stats;
+}
+
+
+/* SiS 900 uses the most sigificant 7 bits to index a 128 bits multicast
+ * hash table, which makes this function a little bit different from other drivers
+ * SiS 900 B0 & 635 M/B uses the most significat 8 bits to index 256 bits
+ * multicast hash table.
+ */
+static u16 sis900_compute_hashtable_index(u8 *addr, u8 revision)
+{
+
+/* what is the correct value of the POLYNOMIAL ??
+ Donald Becker use 0x04C11DB7U
+ Joseph Zbiciak im14u2c@primenet.com gives me the
+ correct answer, thank you Joe !! */
+#define POLYNOMIAL 0x04C11DB7L
+ u32 crc = 0xffffffff, msb;
+ int i, j;
+ u32 byte;
+
+ for (i = 0; i < 6; i++) {
+ byte = *addr++;
+ for (j = 0; j < 8; j++) {
+ msb = crc >> 31;
+ crc <<= 1;
+ if (msb ^ (byte & 1)) {
+ crc ^= POLYNOMIAL;
+ }
+ byte >>= 1;
+ }
+ }
+
+ /* leave 8 or 7 most siginifant bits */
+ if((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
+ return ((int)(crc >> 24));
+ else
+ return ((int)(crc >> 25));
+}
+
+static void set_rx_mode(struct device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ struct sis900_private * sis_priv = (struct sis900_private *)net_dev->priv;
+ u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */
+ int i, table_entries;
+ u32 rx_mode;
+ u8 revision;
+
+ /* 635 Hash Table entires = 256(2^16) */
+ pcibios_read_config_byte(sis_priv->pci_bus, sis_priv->pci_device_fn, PCI_CLASS_REVISION, &revision);
+ if((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
+ table_entries = 16;
+ else
+ table_entries = 8;
+
+ if (net_dev->flags & IFF_PROMISC) {
+ /* Accept any kinds of packets */
+ rx_mode = RFPromiscuous;
+ for (i = 0; i < table_entries; i++)
+ mc_filter[i] = 0xffff;
+ } else if ((net_dev->mc_count > multicast_filter_limit) ||
+ (net_dev->flags & IFF_ALLMULTI)) {
+ /* too many multicast addresses or accept all multicast packets */
+ rx_mode = RFAAB | RFAAM;
+ for (i = 0; i < table_entries; i++)
+ mc_filter[i] = 0xffff;
+ } else {
+ /* Accept Broadcast packets, destination addresses match our MAC address,
+ use Receive Filter to reject unwanted MCAST packets */
+ struct dev_mc_list *mclist;
+ rx_mode = RFAAB;
+ for (i = 0, mclist = net_dev->mc_list; mclist && i < net_dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(sis900_compute_hashtable_index(mclist->dmi_addr, revision),
+ mc_filter);
+ }
+
+ /* update Multicast Hash Table in Receive Filter */
+ for (i = 0; i < table_entries; i++) {
+ /* why plus 0x04 ??, That makes the correct value for hash table. */
+ outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr);
+ outl(mc_filter[i], ioaddr + rfdr);
+ }
+
+ outl(RFEN | rx_mode, ioaddr + rfcr);
+
+ /* sis900 is capatable of looping back packet at MAC level for debugging purpose */
+ if (net_dev->flags & IFF_LOOPBACK) {
+ u32 cr_saved;
+ /* We must disable Tx/Rx before setting loopback mode */
+ cr_saved = inl(ioaddr + cr);
+ outl(cr_saved | TxDIS | RxDIS, ioaddr + cr);
+ /* enable loopback */
+ outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg);
+ outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg);
+ /* restore cr */
+ outl(cr_saved, ioaddr + cr);
+ }
+
+ return;
+}
+
+static void sis900_reset(struct device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ int i = 0;
+ u8 revision;
+ u32 status = TxRCMP | RxRCMP;
+
+ outl(0, ioaddr + ier);
+ outl(0, ioaddr + imr);
+ outl(0, ioaddr + rfcr);
+
+ outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr);
+
+ /* Check that the chip has finished the reset. */
+ while (status && (i++ < 1000)) {
+ status ^= (inl(isr + ioaddr) & status);
+ }
+
+ pcibios_read_config_byte(sis_priv->pci_bus, sis_priv->pci_device_fn, PCI_CLASS_REVISION, &revision);
+ if( (revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV) )
+ outl(PESEL | RND_CNT, ioaddr + cfg);
+ else
+ outl(PESEL, ioaddr + cfg);
+}
+
+#ifdef MODULE
+int init_module(void)
+{
+ return sis900_probe(NULL);
+}
+
+void
+cleanup_module(void)
+{
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_sis900_dev) {
+ struct sis900_private *sis_priv =
+ (struct sis900_private *)root_sis900_dev->priv;
+ struct device *next_dev = sis_priv->next_module;
+ struct mii_phy *phy = NULL;
+
+ while(sis_priv->first_mii){
+ phy = sis_priv->first_mii;
+ sis_priv->first_mii = phy->next;
+ kfree(phy);
+ }
+
+ unregister_netdev(root_sis900_dev);
+ release_region(root_sis900_dev->base_addr,
+ sis_priv->mac->io_size);
+ kfree(sis_priv);
+ kfree(root_sis900_dev);
+
+ root_sis900_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
diff --git a/linux/src/drivers/net/sis900.h b/linux/src/drivers/net/sis900.h
new file mode 100644
index 0000000..2153625
--- /dev/null
+++ b/linux/src/drivers/net/sis900.h
@@ -0,0 +1,284 @@
+/* sis900.h Definitions for SiS ethernet controllers including 7014/7016 and 900
+ * Copyright 1999 Silicon Integrated System Corporation
+ * References:
+ * SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support,
+ * preliminary Rev. 1.0 Jan. 14, 1998
+ * SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support,
+ * preliminary Rev. 1.0 Nov. 10, 1998
+ * SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
+ * preliminary Rev. 1.0 Jan. 18, 1998
+ * http://www.sis.com.tw/support/databook.htm
+ */
+
+/* MAC operationl registers of SiS 7016 and SiS 900 ehternet controller */
+/* The I/O extent, SiS 900 needs 256 bytes of io address */
+#define SIS900_TOTAL_SIZE 0x100
+
+/* Symbolic offsets to registers. */
+enum sis900_registers {
+ cr=0x0, //Command Register
+ cfg=0x4, //Configuration Register
+ mear=0x8, //EEPROM Access Register
+ ptscr=0xc, //PCI Test Control Register
+ isr=0x10, //Interrupt Status Register
+ imr=0x14, //Interrupt Mask Register
+ ier=0x18, //Interrupt Enable Register
+ epar=0x18, //Enhanced PHY Access Register
+ txdp=0x20, //Transmit Descriptor Pointer Register
+ txcfg=0x24, //Transmit Configuration Register
+ rxdp=0x30, //Receive Descriptor Pointer Register
+ rxcfg=0x34, //Receive Configuration Register
+ flctrl=0x38, //Flow Control Register
+ rxlen=0x3c, //Receive Packet Length Register
+ rfcr=0x48, //Receive Filter Control Register
+ rfdr=0x4C, //Receive Filter Data Register
+ pmctrl=0xB0, //Power Management Control Register
+ pmer=0xB4 //Power Management Wake-up Event Register
+};
+
+/* Symbolic names for bits in various registers */
+enum sis900_command_register_bits {
+ RELOAD = 0x00000400, ACCESSMODE = 0x00000200,/* ET */
+ RESET = 0x00000100, SWI = 0x00000080, RxRESET = 0x00000020,
+ TxRESET = 0x00000010, RxDIS = 0x00000008, RxENA = 0x00000004,
+ TxDIS = 0x00000002, TxENA = 0x00000001
+};
+
+enum sis900_configuration_register_bits {
+ DESCRFMT = 0x00000100 /* 7016 specific */, REQALG = 0x00000080,
+ SB = 0x00000040, POW = 0x00000020, EXD = 0x00000010,
+ PESEL = 0x00000008, LPM = 0x00000004, BEM = 0x00000001,
+ /* 635 & 900B Specific */
+ RND_CNT = 0x00000400, FAIR_BACKOFF = 0x00000200,
+ EDB_MASTER_EN = 0x00002000
+};
+
+enum sis900_eeprom_access_reigster_bits {
+ MDC = 0x00000040, MDDIR = 0x00000020, MDIO = 0x00000010, /* 7016 specific */
+ EECS = 0x00000008, EECLK = 0x00000004, EEDO = 0x00000002,
+ EEDI = 0x00000001
+};
+
+enum sis900_interrupt_register_bits {
+ WKEVT = 0x10000000, TxPAUSEEND = 0x08000000, TxPAUSE = 0x04000000,
+ TxRCMP = 0x02000000, RxRCMP = 0x01000000, DPERR = 0x00800000,
+ SSERR = 0x00400000, RMABT = 0x00200000, RTABT = 0x00100000,
+ RxSOVR = 0x00010000, HIBERR = 0x00008000, SWINT = 0x00001000,
+ MIBINT = 0x00000800, TxURN = 0x00000400, TxIDLE = 0x00000200,
+ TxERR = 0x00000100, TxDESC = 0x00000080, TxOK = 0x00000040,
+ RxORN = 0x00000020, RxIDLE = 0x00000010, RxEARLY = 0x00000008,
+ RxERR = 0x00000004, RxDESC = 0x00000002, RxOK = 0x00000001
+};
+
+enum sis900_interrupt_enable_reigster_bits {
+ IE = 0x00000001
+};
+
+/* maximum dma burst fro transmission and receive*/
+#define MAX_DMA_RANGE 7 /* actually 0 means MAXIMUM !! */
+#define TxMXDMA_shift 20
+#define RxMXDMA_shift 20
+
+enum sis900_tx_rx_dma{
+ DMA_BURST_512 = 0, DMA_BURST_64 = 5
+};
+
+/* transmit FIFO threshholds */
+#define TX_FILL_THRESH 16 /* 1/4 FIFO size */
+#define TxFILLT_shift 8
+#define TxDRNT_shift 0
+#define TxDRNT_100 48 /* 3/4 FIFO size */
+#define TxDRNT_10 16 /* 1/2 FIFO size */
+
+enum sis900_transmit_config_register_bits {
+ TxCSI = 0x80000000, TxHBI = 0x40000000, TxMLB = 0x20000000,
+ TxATP = 0x10000000, TxIFG = 0x0C000000, TxFILLT = 0x00003F00,
+ TxDRNT = 0x0000003F
+};
+
+/* recevie FIFO thresholds */
+#define RxDRNT_shift 1
+#define RxDRNT_100 16 /* 1/2 FIFO size */
+#define RxDRNT_10 24 /* 3/4 FIFO size */
+
+enum sis900_reveive_config_register_bits {
+ RxAEP = 0x80000000, RxARP = 0x40000000, RxATX = 0x10000000,
+ RxAJAB = 0x08000000, RxDRNT = 0x0000007F
+};
+
+#define RFAA_shift 28
+#define RFADDR_shift 16
+
+enum sis900_receive_filter_control_register_bits {
+ RFEN = 0x80000000, RFAAB = 0x40000000, RFAAM = 0x20000000,
+ RFAAP = 0x10000000, RFPromiscuous = (RFAAB|RFAAM|RFAAP)
+};
+
+enum sis900_reveive_filter_data_mask {
+ RFDAT = 0x0000FFFF
+};
+
+/* EEPROM Addresses */
+enum sis900_eeprom_address {
+ EEPROMSignature = 0x00, EEPROMVendorID = 0x02, EEPROMDeviceID = 0x03,
+ EEPROMMACAddr = 0x08, EEPROMChecksum = 0x0b
+};
+
+/* The EEPROM commands include the alway-set leading bit. Refer to NM93Cxx datasheet */
+enum sis900_eeprom_command {
+ EEread = 0x0180, EEwrite = 0x0140, EEerase = 0x01C0,
+ EEwriteEnable = 0x0130, EEwriteDisable = 0x0100,
+ EEeraseAll = 0x0120, EEwriteAll = 0x0110,
+ EEaddrMask = 0x013F, EEcmdShift = 16
+};
+
+/* For SiS962, request the eeprom software access */
+enum sis962_eeprom_command {
+ EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100
+};
+
+/* Manamgement Data I/O (mdio) frame */
+#define MIIread 0x6000
+#define MIIwrite 0x5002
+#define MIIpmdShift 7
+#define MIIregShift 2
+#define MIIcmdLen 16
+#define MIIcmdShift 16
+
+/* Buffer Descriptor Status*/
+enum sis900_buffer_status {
+ OWN = 0x80000000, MORE = 0x40000000, INTR = 0x20000000,
+ SUPCRC = 0x10000000, INCCRC = 0x10000000,
+ OK = 0x08000000, DSIZE = 0x00000FFF
+};
+/* Status for TX Buffers */
+enum sis900_tx_buffer_status {
+ ABORT = 0x04000000, UNDERRUN = 0x02000000, NOCARRIER = 0x01000000,
+ DEFERD = 0x00800000, EXCDEFER = 0x00400000, OWCOLL = 0x00200000,
+ EXCCOLL = 0x00100000, COLCNT = 0x000F0000
+};
+
+enum sis900_rx_bufer_status {
+ OVERRUN = 0x02000000, DEST = 0x00800000, BCAST = 0x01800000,
+ MCAST = 0x01000000, UNIMATCH = 0x00800000, TOOLONG = 0x00400000,
+ RUNT = 0x00200000, RXISERR = 0x00100000, CRCERR = 0x00080000,
+ FAERR = 0x00040000, LOOPBK = 0x00020000, RXCOL = 0x00010000
+};
+
+/* MII register offsets */
+enum mii_registers {
+ MII_CONTROL = 0x0000, MII_STATUS = 0x0001, MII_PHY_ID0 = 0x0002,
+ MII_PHY_ID1 = 0x0003, MII_ANADV = 0x0004, MII_ANLPAR = 0x0005,
+ MII_ANEXT = 0x0006
+};
+
+/* mii registers specific to SiS 900 */
+enum sis_mii_registers {
+ MII_CONFIG1 = 0x0010, MII_CONFIG2 = 0x0011, MII_STSOUT = 0x0012,
+ MII_MASK = 0x0013, MII_RESV = 0x0014
+};
+
+/* mii registers specific to ICS 1893 */
+enum ics_mii_registers {
+ MII_EXTCTRL = 0x0010, MII_QPDSTS = 0x0011, MII_10BTOP = 0x0012,
+ MII_EXTCTRL2 = 0x0013
+};
+
+/* mii registers specific to AMD 79C901 */
+enum amd_mii_registers {
+ MII_STATUS_SUMMARY = 0x0018
+};
+
+/* MII Control register bit definitions. */
+enum mii_control_register_bits {
+ MII_CNTL_FDX = 0x0100, MII_CNTL_RST_AUTO = 0x0200,
+ MII_CNTL_ISOLATE = 0x0400, MII_CNTL_PWRDWN = 0x0800,
+ MII_CNTL_AUTO = 0x1000, MII_CNTL_SPEED = 0x2000,
+ MII_CNTL_LPBK = 0x4000, MII_CNTL_RESET = 0x8000
+};
+
+/* MII Status register bit */
+enum mii_status_register_bits {
+ MII_STAT_EXT = 0x0001, MII_STAT_JAB = 0x0002,
+ MII_STAT_LINK = 0x0004, MII_STAT_CAN_AUTO = 0x0008,
+ MII_STAT_FAULT = 0x0010, MII_STAT_AUTO_DONE = 0x0020,
+ MII_STAT_CAN_T = 0x0800, MII_STAT_CAN_T_FDX = 0x1000,
+ MII_STAT_CAN_TX = 0x2000, MII_STAT_CAN_TX_FDX = 0x4000,
+ MII_STAT_CAN_T4 = 0x8000
+};
+
+#define MII_ID1_OUI_LO 0xFC00 /* low bits of OUI mask */
+#define MII_ID1_MODEL 0x03F0 /* model number */
+#define MII_ID1_REV 0x000F /* model number */
+
+/* MII NWAY Register Bits ...
+ valid for the ANAR (Auto-Negotiation Advertisement) and
+ ANLPAR (Auto-Negotiation Link Partner) registers */
+enum mii_nway_register_bits {
+ MII_NWAY_NODE_SEL = 0x001f, MII_NWAY_CSMA_CD = 0x0001,
+ MII_NWAY_T = 0x0020, MII_NWAY_T_FDX = 0x0040,
+ MII_NWAY_TX = 0x0080, MII_NWAY_TX_FDX = 0x0100,
+ MII_NWAY_T4 = 0x0200, MII_NWAY_PAUSE = 0x0400,
+ MII_NWAY_RF = 0x2000, MII_NWAY_ACK = 0x4000,
+ MII_NWAY_NP = 0x8000
+};
+
+enum mii_stsout_register_bits {
+ MII_STSOUT_LINK_FAIL = 0x4000,
+ MII_STSOUT_SPD = 0x0080, MII_STSOUT_DPLX = 0x0040
+};
+
+enum mii_stsics_register_bits {
+ MII_STSICS_SPD = 0x8000, MII_STSICS_DPLX = 0x4000,
+ MII_STSICS_LINKSTS = 0x0001
+};
+
+enum mii_stssum_register_bits {
+ MII_STSSUM_LINK = 0x0008, MII_STSSUM_DPLX = 0x0004,
+ MII_STSSUM_AUTO = 0x0002, MII_STSSUM_SPD = 0x0001
+};
+
+enum sis900_revision_id {
+ SIS630A_900_REV = 0x80, SIS630E_900_REV = 0x81,
+ SIS630S_900_REV = 0x82, SIS630EA1_900_REV = 0x83,
+ SIS630ET_900_REV = 0x84, SIS635A_900_REV = 0x90,
+ SIS962_900_REV = 0X91, SIS900B_900_REV = 0x03
+};
+
+enum sis630_revision_id {
+ SIS630A0 = 0x00, SIS630A1 = 0x01,
+ SIS630B0 = 0x10, SIS630B1 = 0x11
+};
+
+#define FDX_CAPABLE_DUPLEX_UNKNOWN 0
+#define FDX_CAPABLE_HALF_SELECTED 1
+#define FDX_CAPABLE_FULL_SELECTED 2
+
+#define HW_SPEED_UNCONFIG 0
+#define HW_SPEED_HOME 1
+#define HW_SPEED_10_MBPS 10
+#define HW_SPEED_100_MBPS 100
+#define HW_SPEED_DEFAULT (HW_SPEED_100_MBPS)
+
+#define CRC_SIZE 4
+#define MAC_HEADER_SIZE 14
+
+#define TX_BUF_SIZE 1536
+#define RX_BUF_SIZE 1536
+
+#define NUM_TX_DESC 16 /* Number of Tx descriptor registers. */
+#define NUM_RX_DESC 16 /* Number of Rx descriptor registers. */
+
+#define TRUE 1
+#define FALSE 0
+
+/* PCI stuff, should be move to pic.h */
+#define PCI_DEVICE_ID_SI_900 0x900
+#define PCI_DEVICE_ID_SI_7016 0x7016
+#define SIS630_VENDOR_ID 0x1039
+#define SIS630_DEVICE_ID 0x0630
+
+/* ioctl for accessing MII transveiver */
+#define SIOCGMIIPHY (SIOCDEVPRIVATE) /* Get the PHY in use. */
+#define SIOCGMIIREG (SIOCDEVPRIVATE+1) /* Read a PHY register. */
+#define SIOCSMIIREG (SIOCDEVPRIVATE+2) /* Write a PHY register */
diff --git a/linux/src/drivers/net/sk_g16.c b/linux/src/drivers/net/sk_g16.c
new file mode 100644
index 0000000..13ebb3e
--- /dev/null
+++ b/linux/src/drivers/net/sk_g16.c
@@ -0,0 +1,2110 @@
+/*-
+ * Copyright (C) 1994 by PJD Weichmann & SWS Bern, Switzerland
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ * Module : sk_g16.c
+ *
+ * Version : $Revision: 1.1 $
+ *
+ * Author : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/26
+ * Last Updated : $Date: 1999/04/26 05:52:37 $
+ *
+ * Description : Schneider & Koch G16 Ethernet Device Driver for
+ * Linux Kernel >= 1.1.22
+ * Update History :
+ *
+-*/
+
+static const char *rcsid = "$Id: sk_g16.c,v 1.1 1999/04/26 05:52:37 tb Exp $";
+
+/*
+ * The Schneider & Koch (SK) G16 Network device driver is based
+ * on the 'ni6510' driver from Michael Hipp which can be found at
+ * ftp://sunsite.unc.edu/pub/Linux/system/Network/drivers/nidrivers.tar.gz
+ *
+ * Sources: 1) ni6510.c by M. Hipp
+ * 2) depca.c by D.C. Davies
+ * 3) skeleton.c by D. Becker
+ * 4) Am7990 Local Area Network Controller for Ethernet (LANCE),
+ * AMD, Pub. #05698, June 1989
+ *
+ * Many Thanks for helping me to get things working to:
+ *
+ * A. Cox (A.Cox@swansea.ac.uk)
+ * M. Hipp (mhipp@student.uni-tuebingen.de)
+ * R. Bolz (Schneider & Koch, Germany)
+ *
+ * See README.sk_g16 for details about limitations and bugs for the
+ * current version.
+ *
+ * To Do:
+ * - Support of SK_G8 and other SK Network Cards.
+ * - Autoset memory mapped RAM. Check for free memory and then
+ * configure RAM correctly.
+ * - SK_close should really set card in to initial state.
+ * - Test if IRQ 3 is not switched off. Use autoirq() functionality.
+ * (as in /drivers/net/skeleton.c)
+ * - Implement Multicast addressing. At minimum something like
+ * in depca.c.
+ * - Redo the statistics part.
+ * - Try to find out if the board is in 8 Bit or 16 Bit slot.
+ * If in 8 Bit mode don't use IRQ 11.
+ * - (Try to make it slightly faster.)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "sk_g16.h"
+
+/*
+ * Schneider & Koch Card Definitions
+ * =================================
+ */
+
+#define SK_NAME "SK_G16"
+
+/*
+ * SK_G16 Configuration
+ * --------------------
+ */
+
+/*
+ * Abbreviations
+ * -------------
+ *
+ * RAM - used for the 16KB shared memory
+ * Boot_ROM, ROM - are used for referencing the BootEPROM
+ *
+ * SK_BOOT_ROM and SK_ADDR are symbolic constants used to configure
+ * the behaviour of the driver and the SK_G16.
+ *
+ * ! See sk_g16.install on how to install and configure the driver !
+ *
+ * SK_BOOT_ROM defines if the Boot_ROM should be switched off or not.
+ *
+ * SK_ADDR defines the address where the RAM will be mapped into the real
+ * host memory.
+ * valid addresses are from 0xa0000 to 0xfc000 in 16Kbyte steps.
+ */
+
+#define SK_BOOT_ROM 1 /* 1=BootROM on 0=off */
+
+#define SK_ADDR 0xcc000
+
+/*
+ * In POS3 are bits A14-A19 of the address bus. These bits can be set
+ * to choose the RAM address. That's why we only can choose the RAM address
+ * in 16KB steps.
+ */
+
+#define POS_ADDR (rom_addr>>14) /* Do not change this line */
+
+/*
+ * SK_G16 I/O PORT's + IRQ's + Boot_ROM locations
+ * ----------------------------------------------
+ */
+
+/*
+ * As nearly every card has also SK_G16 a specified I/O Port region and
+ * only a few possible IRQ's.
+ * In the Installation Guide from Schneider & Koch is listed a possible
+ * Interrupt IRQ2. IRQ2 is always IRQ9 in boards with two cascaded interrupt
+ * controllers. So we use in SK_IRQS IRQ9.
+ */
+
+/* Don't touch any of the following #defines. */
+
+#define SK_IO_PORTS { 0x100, 0x180, 0x208, 0x220, 0x288, 0x320, 0x328, 0x390, 0 }
+
+#define SK_IRQS { 3, 5, 9, 11, 0 }
+
+#define SK_BOOT_ROM_LOCATIONS { 0xc0000, 0xc4000, 0xc8000, 0xcc000, 0xd0000, 0xd4000, 0xd8000, 0xdc000, 0 }
+
+#define SK_BOOT_ROM_ID { 0x55, 0xaa, 0x10, 0x50, 0x06, 0x33 }
+
+/*
+ * SK_G16 POS REGISTERS
+ * --------------------
+ */
+
+/*
+ * SK_G16 has a Programmable Option Select (POS) Register.
+ * The POS is composed of 8 separate registers (POS0-7) which
+ * are I/O mapped on an address set by the W1 switch.
+ *
+ */
+
+#define SK_POS_SIZE 8 /* 8 I/O Ports are used by SK_G16 */
+
+#define SK_POS0 ioaddr /* Card-ID Low (R) */
+#define SK_POS1 ioaddr+1 /* Card-ID High (R) */
+#define SK_POS2 ioaddr+2 /* Card-Enable, Boot-ROM Disable (RW) */
+#define SK_POS3 ioaddr+3 /* Base address of RAM */
+#define SK_POS4 ioaddr+4 /* IRQ */
+
+/* POS5 - POS7 are unused */
+
+/*
+ * SK_G16 MAC PREFIX
+ * -----------------
+ */
+
+/*
+ * Scheider & Koch manufacturer code (00:00:a5).
+ * This must be checked, that we are sure it is a SK card.
+ */
+
+#define SK_MAC0 0x00
+#define SK_MAC1 0x00
+#define SK_MAC2 0x5a
+
+/*
+ * SK_G16 ID
+ * ---------
+ */
+
+/*
+ * If POS0,POS1 contain the following ID, then we know
+ * at which I/O Port Address we are.
+ */
+
+#define SK_IDLOW 0xfd
+#define SK_IDHIGH 0x6a
+
+
+/*
+ * LANCE POS Bit definitions
+ * -------------------------
+ */
+
+#define SK_ROM_RAM_ON (POS2_CARD)
+#define SK_ROM_RAM_OFF (POS2_EPROM)
+#define SK_ROM_ON (inb(SK_POS2) & POS2_CARD)
+#define SK_ROM_OFF (inb(SK_POS2) | POS2_EPROM)
+#define SK_RAM_ON (inb(SK_POS2) | POS2_CARD)
+#define SK_RAM_OFF (inb(SK_POS2) & POS2_EPROM)
+
+#define POS2_CARD 0x0001 /* 1 = SK_G16 on 0 = off */
+#define POS2_EPROM 0x0002 /* 1 = Boot EPROM off 0 = on */
+
+/*
+ * SK_G16 Memory mapped Registers
+ * ------------------------------
+ *
+ */
+
+#define SK_IOREG (board->ioreg) /* LANCE data registers. */
+#define SK_PORT (board->port) /* Control, Status register */
+#define SK_IOCOM (board->iocom) /* I/O Command */
+
+/*
+ * SK_G16 Status/Control Register bits
+ * -----------------------------------
+ *
+ * (C) Controlreg (S) Statusreg
+ */
+
+/*
+ * Register transfer: 0 = no transfer
+ * 1 = transferring data between LANCE and I/O reg
+ */
+#define SK_IORUN 0x20
+
+/*
+ * LANCE interrupt: 0 = LANCE interrupt occurred
+ * 1 = no LANCE interrupt occurred
+ */
+#define SK_IRQ 0x10
+
+#define SK_RESET 0x08 /* Reset SK_CARD: 0 = RESET 1 = normal */
+#define SK_RW 0x02 /* 0 = write to 1 = read from */
+#define SK_ADR 0x01 /* 0 = REG DataPort 1 = RAP Reg addr port */
+
+
+#define SK_RREG SK_RW /* Transferdirection to read from lance */
+#define SK_WREG 0 /* Transferdirection to write to lance */
+#define SK_RAP SK_ADR /* Destination Register RAP */
+#define SK_RDATA 0 /* Destination Register REG DataPort */
+
+/*
+ * SK_G16 I/O Command
+ * ------------------
+ */
+
+/*
+ * Any bitcombination sets the internal I/O bit (transfer will start)
+ * when written to I/O Command
+ */
+
+#define SK_DOIO 0x80 /* Do Transfer */
+
+/*
+ * LANCE RAP (Register Address Port).
+ * ---------------------------------
+ */
+
+/*
+ * The LANCE internal registers are selected through the RAP.
+ * The Registers are:
+ *
+ * CSR0 - Status and Control flags
+ * CSR1 - Low order bits of initialize block (bits 15:00)
+ * CSR2 - High order bits of initialize block (bits 07:00, 15:08 are reserved)
+ * CSR3 - Allows redefinition of the Bus Master Interface.
+ * This register must be set to 0x0002, which means BSWAP = 0,
+ * ACON = 1, BCON = 0;
+ *
+ */
+
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+/*
+ * General Definitions
+ * ===================
+ */
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * We have 16KB RAM which can be accessed by the LANCE. In the
+ * memory are not only the buffers but also the ring descriptors and
+ * the initialize block.
+ * Don't change anything unless you really know what you do.
+ */
+
+#define LC_LOG_TX_BUFFERS 1 /* (2 == 2^^1) 2 Transmit buffers */
+#define LC_LOG_RX_BUFFERS 3 /* (8 == 2^^3) 8 Receive buffers */
+
+/* Descriptor ring sizes */
+
+#define TMDNUM (1 << (LC_LOG_TX_BUFFERS)) /* 2 Transmit descriptor rings */
+#define RMDNUM (1 << (LC_LOG_RX_BUFFERS)) /* 8 Receive Buffers */
+
+/* Define Mask for setting RMD, TMD length in the LANCE init_block */
+
+#define TMDNUMMASK (LC_LOG_TX_BUFFERS << 29)
+#define RMDNUMMASK (LC_LOG_RX_BUFFERS << 29)
+
+/*
+ * Data Buffer size is set to maximum packet length.
+ */
+
+#define PKT_BUF_SZ 1518
+
+/*
+ * The number of low I/O ports used by the ethercard.
+ */
+
+#define ETHERCARD_TOTAL_SIZE SK_POS_SIZE
+
+/*
+ * Portreserve is there to mark the Card I/O Port region as used.
+ * Check_region is to check if the region at ioaddr with the size "size"
+ * is free or not.
+ * Snarf_region allocates the I/O Port region.
+ */
+
+#ifndef HAVE_PORTRESERVE
+
+#define check_region(ioaddr, size) 0
+#define request_region(ioaddr, size,name) do ; while (0)
+
+#endif
+
+/*
+ * SK_DEBUG
+ *
+ * Here you can choose what level of debugging wanted.
+ *
+ * If SK_DEBUG and SK_DEBUG2 are undefined, then only the
+ * necessary messages will be printed.
+ *
+ * If SK_DEBUG is defined, there will be many debugging prints
+ * which can help to find some mistakes in configuration or even
+ * in the driver code.
+ *
+ * If SK_DEBUG2 is defined, many many messages will be printed
+ * which normally you don't need. I used this to check the interrupt
+ * routine.
+ *
+ * (If you define only SK_DEBUG2 then only the messages for
+ * checking interrupts will be printed!)
+ *
+ * Normal way of live is:
+ *
+ * For the whole thing get going let both symbolic constants
+ * undefined. If you face any problems and you know what's going
+ * on (you know something about the card and you can interpret some
+ * hex LANCE register output) then define SK_DEBUG
+ *
+ */
+
+#undef SK_DEBUG /* debugging */
+#undef SK_DEBUG2 /* debugging with more verbose report */
+
+#ifdef SK_DEBUG
+#define PRINTK(x) printk x
+#else
+#define PRINTK(x) /**/
+#endif
+
+#ifdef SK_DEBUG2
+#define PRINTK2(x) printk x
+#else
+#define PRINTK2(x) /**/
+#endif
+
+/*
+ * SK_G16 RAM
+ *
+ * The components are memory mapped and can be set in a region from
+ * 0x00000 through 0xfc000 in 16KB steps.
+ *
+ * The Network components are: dual ported RAM, Prom, I/O Reg, Status-,
+ * Controlregister and I/O Command.
+ *
+ * dual ported RAM: This is the only memory region which the LANCE chip
+ * has access to. From the Lance it is addressed from 0x0000 to
+ * 0x3fbf. The host accesses it normally.
+ *
+ * PROM: The PROM obtains the ETHERNET-MAC-Address. It is realised as a
+ * 8-Bit PROM, this means only the 16 even addresses are used of the
+ * 32 Byte Address region. Access to a odd address results in invalid
+ * data.
+ *
+ * LANCE I/O Reg: The I/O Reg is build of 4 single Registers, Low-Byte Write,
+ * Hi-Byte Write, Low-Byte Read, Hi-Byte Read.
+ * Transfer from or to the LANCE is always in 16Bit so Low and High
+ * registers are always relevant.
+ *
+ * The Data from the Readregister is not the data in the Writeregister!!
+ *
+ * Port: Status- and Controlregister.
+ * Two different registers which share the same address, Status is
+ * read-only, Control is write-only.
+ *
+ * I/O Command:
+ * Any bitcombination written in here starts the transmission between
+ * Host and LANCE.
+ */
+
+typedef struct
+{
+ unsigned char ram[0x3fc0]; /* 16KB dual ported ram */
+ unsigned char rom[0x0020]; /* 32Byte PROM containing 6Byte MAC */
+ unsigned char res1[0x0010]; /* reserved */
+ unsigned volatile short ioreg;/* LANCE I/O Register */
+ unsigned volatile char port; /* Statusregister and Controlregister */
+ unsigned char iocom; /* I/O Command Register */
+} SK_RAM;
+
+/* struct */
+
+/*
+ * This is the structure for the dual ported ram. We
+ * have exactly 16 320 Bytes. In here there must be:
+ *
+ * - Initialize Block (starting at a word boundary)
+ * - Receive and Transmit Descriptor Rings (quadword boundary)
+ * - Data Buffers (arbitrary boundary)
+ *
+ * This is because LANCE has on SK_G16 only access to the dual ported
+ * RAM and nowhere else.
+ */
+
+struct SK_ram
+{
+ struct init_block ib;
+ struct tmd tmde[TMDNUM];
+ struct rmd rmde[RMDNUM];
+ char tmdbuf[TMDNUM][PKT_BUF_SZ];
+ char rmdbuf[RMDNUM][PKT_BUF_SZ];
+};
+
+/*
+ * Structure where all necessary information is for ring buffer
+ * management and statistics.
+ */
+
+struct priv
+{
+ struct SK_ram *ram; /* dual ported ram structure */
+ struct rmd *rmdhead; /* start of receive ring descriptors */
+ struct tmd *tmdhead; /* start of transmit ring descriptors */
+ int rmdnum; /* actual used ring descriptor */
+ int tmdnum; /* actual transmit descriptor for transmitting data */
+ int tmdlast; /* last sent descriptor used for error handling, etc */
+ void *rmdbufs[RMDNUM]; /* pointer to the receive buffers */
+ void *tmdbufs[TMDNUM]; /* pointer to the transmit buffers */
+ struct enet_statistics stats; /* Device driver statistics */
+};
+
+/* global variable declaration */
+
+/* IRQ map used to reserve a IRQ (see SK_open()) */
+
+/* extern void *irq2dev_map[16]; */ /* Declared in <linux/ioport.h> */
+
+/* static variables */
+
+static SK_RAM *board; /* pointer to our memory mapped board components */
+
+/* Macros */
+
+
+/* Function Prototypes */
+
+/*
+ * Device Driver functions
+ * -----------------------
+ * See for short explanation of each function its definitions header.
+ */
+
+int SK_init(struct device *dev);
+static int SK_probe(struct device *dev, short ioaddr);
+
+static int SK_open(struct device *dev);
+static int SK_send_packet(struct sk_buff *skb, struct device *dev);
+static void SK_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static void SK_rxintr(struct device *dev);
+static void SK_txintr(struct device *dev);
+static int SK_close(struct device *dev);
+
+static struct enet_statistics *SK_get_stats(struct device *dev);
+
+unsigned int SK_rom_addr(void);
+
+static void set_multicast_list(struct device *dev);
+
+/*
+ * LANCE Functions
+ * ---------------
+ */
+
+static int SK_lance_init(struct device *dev, unsigned short mode);
+void SK_reset_board(void);
+void SK_set_RAP(int reg_number);
+int SK_read_reg(int reg_number);
+int SK_rread_reg(void);
+void SK_write_reg(int reg_number, int value);
+
+/*
+ * Debugging functions
+ * -------------------
+ */
+
+void SK_print_pos(struct device *dev, char *text);
+void SK_print_dev(struct device *dev, char *text);
+void SK_print_ram(struct device *dev);
+
+
+/*-
+ * Function : SK_init
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Check for a SK_G16 network adaptor and initialize it.
+ * This function gets called by dev_init which initializes
+ * all Network devices.
+ *
+ * Parameters : I : struct device *dev - structure preconfigured
+ * from Space.c
+ * Return Value : 0 = Driver Found and initialized
+ * Errors : ENODEV - no device found
+ * ENXIO - not probed
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+/*
+ * Check for a network adaptor of this type, and return '0' if one exists.
+ * If dev->base_addr == 0, probe all likely locations.
+ * If dev->base_addr == 1, always return failure.
+ * If dev->base_addr == 2, allocate space for the device and return success
+ * (detachable devices only).
+ */
+
+int SK_init(struct device *dev)
+{
+ int ioaddr = 0; /* I/O port address used for POS regs */
+ int *port, ports[] = SK_IO_PORTS; /* SK_G16 supported ports */
+
+ /* get preconfigured base_addr from dev which is done in Space.c */
+ int base_addr = dev->base_addr;
+
+ PRINTK(("%s: %s", SK_NAME, rcsid));
+ rcsid = NULL; /* We do not want to use this further */
+
+ if (base_addr > 0x0ff) /* Check a single specified address */
+ {
+ /* Check if on specified address is a SK_G16 */
+
+ if ( (inb(SK_POS0) == SK_IDLOW) ||
+ (inb(SK_POS1) == SK_IDHIGH) )
+ {
+ return SK_probe(dev, base_addr);
+ }
+
+ return ENODEV; /* Sorry, but on specified address NO SK_G16 */
+ }
+ else if (base_addr > 0) /* Don't probe at all */
+ {
+ return ENXIO;
+ }
+
+ /* Autoprobe base_addr */
+
+ for (port = &ports[0]; *port; port++)
+ {
+ ioaddr = *port; /* we need ioaddr for accessing POS regs */
+
+ /* Check if I/O Port region is used by another board */
+
+ if (check_region(ioaddr, ETHERCARD_TOTAL_SIZE))
+ {
+ continue; /* Try next Port address */
+ }
+
+ /* Check if at ioaddr is a SK_G16 */
+
+ if ( !(inb(SK_POS0) == SK_IDLOW) ||
+ !(inb(SK_POS1) == SK_IDHIGH) )
+ {
+ continue; /* Try next Port address */
+ }
+
+ dev->base_addr = ioaddr; /* Set I/O Port Address */
+
+ if (SK_probe(dev, ioaddr) == 0)
+ {
+ return 0; /* Card found and initialized */
+ }
+ }
+
+ dev->base_addr = base_addr; /* Write back original base_addr */
+
+ return ENODEV; /* Failed to find or init driver */
+
+} /* End of SK_init */
+
+
+/*-
+ * Function : SK_probe
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function is called by SK_init and
+ * does the main part of initialization.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : short ioaddr - I/O Port address where POS is.
+ * Return Value : 0 = Initialization done
+ * Errors : ENODEV - No SK_G16 found
+ * -1 - Configuration problem
+ * Globals : irq2dev_map - Which device uses which IRQ
+ * : board - pointer to SK_RAM
+ * Update History :
+ * YY/MM/DD uid Description
+ * 94/06/30 pwe SK_ADDR now checked and at the correct place
+-*/
+
+int SK_probe(struct device *dev, short ioaddr)
+{
+ int i,j; /* Counters */
+ int sk_addr_flag = 0; /* SK ADDR correct? 1 - no, 0 - yes */
+ unsigned int rom_addr; /* used to store RAM address used for POS_ADDR */
+
+ struct priv *p; /* SK_G16 private structure */
+
+ if (SK_ADDR & 0x3fff || SK_ADDR < 0xa0000)
+ {
+
+ sk_addr_flag = 1;
+
+ /*
+ * Now here we could use a routine which searches for a free
+ * place in the ram and set SK_ADDR if found. TODO.
+ */
+ }
+
+ if (SK_BOOT_ROM) /* Shall we keep Boot_ROM on ? */
+ {
+ PRINTK(("## %s: SK_BOOT_ROM is set.\n", SK_NAME));
+
+ rom_addr = SK_rom_addr();
+
+ if (rom_addr == 0) /* No Boot_ROM found */
+ {
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR; /* assign predefined address */
+
+ PRINTK(("## %s: NO Bootrom found \n", SK_NAME));
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+ else if (rom_addr == SK_ADDR)
+ {
+ printk("%s: RAM + ROM are set to the same address %#08x\n"
+ " Check configuration. Now switching off Boot_ROM\n",
+ SK_NAME, rom_addr);
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off*/
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+ else
+ {
+ PRINTK(("## %s: Found ROM at %#08x\n", SK_NAME, rom_addr));
+ PRINTK(("## %s: Keeping Boot_ROM on\n", SK_NAME));
+
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR;
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_ROM_RAM_ON, SK_POS2); /* RAM on, BOOT_ROM on */
+ }
+ }
+ else /* Don't keep Boot_ROM */
+ {
+ PRINTK(("## %s: SK_BOOT_ROM is not set.\n", SK_NAME));
+
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_rom_addr(); /* Try to find a Boot_ROM */
+
+ /* IF we find a Boot_ROM disable it */
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+
+ /* We found a Boot_ROM and it's gone. Set RAM address on
+ * Boot_ROM address.
+ */
+
+ if (rom_addr)
+ {
+ printk("%s: We found Boot_ROM at %#08x. Now setting RAM on"
+ "that address\n", SK_NAME, rom_addr);
+
+ outb(POS_ADDR, SK_POS3); /* Set RAM on Boot_ROM address */
+ }
+ else /* We did not find a Boot_ROM, use predefined SK_ADDR for ram */
+ {
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR;
+
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ }
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "POS registers after ROM, RAM config");
+#endif
+
+ board = (SK_RAM *) rom_addr;
+
+ /* Read in station address */
+ for (i = 0, j = 0; i < ETH_ALEN; i++, j+=2)
+ {
+ dev->dev_addr[i] = board->rom[j];
+ }
+
+ /* Check for manufacturer code */
+ if (!(dev->dev_addr[0] == SK_MAC0 &&
+ dev->dev_addr[1] == SK_MAC1 &&
+ dev->dev_addr[2] == SK_MAC2) )
+ {
+ PRINTK(("## %s: We did not find SK_G16 at RAM location.\n",
+ SK_NAME));
+ return ENODEV; /* NO SK_G16 found */
+ }
+
+ printk("%s: %s found at %#3x, HW addr: %#04x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name,
+ "Schneider & Koch Netcard",
+ (unsigned int) dev->base_addr,
+ dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5]);
+
+ /* Allocate memory for private structure */
+ p = dev->priv = (void *) kmalloc(sizeof(struct priv), GFP_KERNEL);
+ if (p == NULL) {
+ printk("%s: ERROR - no memory for driver data!\n", dev->name);
+ return -ENOMEM;
+ }
+ memset((char *) dev->priv, 0, sizeof(struct priv)); /* clear memory */
+
+ /* Grab the I/O Port region */
+ request_region(ioaddr, ETHERCARD_TOTAL_SIZE,"sk_g16");
+
+ /* Assign our Device Driver functions */
+
+ dev->open = &SK_open;
+ dev->stop = &SK_close;
+ dev->hard_start_xmit = &SK_send_packet;
+ dev->get_stats = &SK_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+
+ /* Set the generic fields of the device structure */
+
+ ether_setup(dev);
+
+ dev->flags &= ~IFF_MULTICAST;
+
+ /* Initialize private structure */
+
+ p->ram = (struct SK_ram *) rom_addr; /* Set dual ported RAM addr */
+ p->tmdhead = &(p->ram)->tmde[0]; /* Set TMD head */
+ p->rmdhead = &(p->ram)->rmde[0]; /* Set RMD head */
+
+ /* Initialize buffer pointers */
+
+ for (i = 0; i < TMDNUM; i++)
+ {
+ p->tmdbufs[i] = &(p->ram)->tmdbuf[i];
+ }
+
+ for (i = 0; i < RMDNUM; i++)
+ {
+ p->rmdbufs[i] = &(p->ram)->rmdbuf[i];
+ }
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "End of SK_probe");
+ SK_print_ram(dev);
+#endif
+
+ return 0; /* Initialization done */
+
+} /* End of SK_probe() */
+
+
+/*-
+ * Function : SK_open
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function is called sometimes after booting
+ * when ifconfig program is run.
+ *
+ * This function requests an IRQ, sets the correct
+ * IRQ in the card. Then calls SK_lance_init() to
+ * init and start the LANCE chip. Then if everything is
+ * ok returns with 0 (OK), which means SK_G16 is now
+ * opened and operational.
+ *
+ * (Called by dev_open() /net/inet/dev.c)
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * Return Value : 0 - Device opened
+ * Errors : -EAGAIN - Open failed
+ * Globals : irq2dev_map - which device uses which irq
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_open(struct device *dev)
+{
+ int i = 0;
+ int irqval = 0;
+ int ioaddr = dev->base_addr;
+
+ int irqtab[] = SK_IRQS;
+
+ struct priv *p = (struct priv *)dev->priv;
+
+ PRINTK(("## %s: At beginning of SK_open(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ if (dev->irq == 0) /* Autoirq */
+ {
+ i = 0;
+
+ /*
+ * Check if one IRQ out of SK_IRQS is free and install
+ * interrupt handler.
+ * Most done by request_irq().
+ * irqval: 0 - interrupt handler installed for IRQ irqtab[i]
+ * -EBUSY - interrupt busy
+ * -EINVAL - irq > 15 or handler = NULL
+ */
+
+ do
+ {
+ irqval = request_irq(irqtab[i], &SK_interrupt, 0, "sk_g16", NULL);
+ i++;
+ } while (irqval && irqtab[i]);
+
+ if (irqval) /* We tried every possible IRQ but no success */
+ {
+ printk("%s: unable to get an IRQ\n", dev->name);
+ return -EAGAIN;
+ }
+
+ dev->irq = irqtab[--i];
+
+ outb(i<<2, SK_POS4); /* Set Card on probed IRQ */
+
+ }
+ else if (dev->irq == 2) /* IRQ2 is always IRQ9 */
+ {
+ if (request_irq(9, &SK_interrupt, 0, "sk_g16", NULL))
+ {
+ printk("%s: unable to get IRQ 9\n", dev->name);
+ return -EAGAIN;
+ }
+ dev->irq = 9;
+
+ /*
+ * Now we set card on IRQ2.
+ * This can be confusing, but remember that IRQ2 on the network
+ * card is in reality IRQ9
+ */
+ outb(0x08, SK_POS4); /* set card to IRQ2 */
+
+ }
+ else /* Check IRQ as defined in Space.c */
+ {
+ int i = 0;
+
+ /* check if IRQ free and valid. Then install Interrupt handler */
+
+ if (request_irq(dev->irq, &SK_interrupt, 0, "sk_g16", NULL))
+ {
+ printk("%s: unable to get selected IRQ\n", dev->name);
+ return -EAGAIN;
+ }
+
+ switch(dev->irq)
+ {
+ case 3: i = 0;
+ break;
+ case 5: i = 1;
+ break;
+ case 2: i = 2;
+ break;
+ case 11:i = 3;
+ break;
+ default:
+ printk("%s: Preselected IRQ %d is invalid for %s boards",
+ dev->name,
+ dev->irq,
+ SK_NAME);
+ return -EAGAIN;
+ }
+
+ outb(i<<2, SK_POS4); /* Set IRQ on card */
+ }
+
+ irq2dev_map[dev->irq] = dev; /* Set IRQ as used by us */
+
+ printk("%s: Schneider & Koch G16 at %#3x, IRQ %d, shared mem at %#08x\n",
+ dev->name, (unsigned int)dev->base_addr,
+ (int) dev->irq, (unsigned int) p->ram);
+
+ if (!(i = SK_lance_init(dev, 0))) /* LANCE init OK? */
+ {
+
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+#ifdef SK_DEBUG
+
+ /*
+ * This debug block tries to stop LANCE,
+ * reinit LANCE with transmitter and receiver disabled,
+ * then stop again and reinit with NORMAL_MODE
+ */
+
+ printk("## %s: After lance init. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_write_reg(CSR0, CSR0_STOP);
+ printk("## %s: LANCE stopped. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_lance_init(dev, MODE_DTX | MODE_DRX);
+ printk("## %s: Reinit with DTX + DRX off. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_write_reg(CSR0, CSR0_STOP);
+ printk("## %s: LANCE stopped. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_lance_init(dev, MODE_NORMAL);
+ printk("## %s: LANCE back to normal mode. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_print_pos(dev, "POS regs before returning OK");
+
+#endif /* SK_DEBUG */
+
+ return 0; /* SK_open() is successful */
+ }
+ else /* LANCE init failed */
+ {
+
+ PRINTK(("## %s: LANCE init failed: CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ dev->start = 0; /* Device not ready */
+ return -EAGAIN;
+ }
+
+} /* End of SK_open() */
+
+
+/*-
+ * Function : SK_lance_init
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Reset LANCE chip, fill RMD, TMD structures with
+ * start values and Start LANCE.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : int mode - put LANCE into "mode" see data-sheet for
+ * more info.
+ * Return Value : 0 - Init done
+ * Errors : -1 - Init failed
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_lance_init(struct device *dev, unsigned short mode)
+{
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+ struct tmd *tmdp;
+ struct rmd *rmdp;
+
+ PRINTK(("## %s: At beginning of LANCE init. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ /* Reset LANCE */
+ SK_reset_board();
+
+ /* Initialize TMD's with start values */
+ p->tmdnum = 0; /* First descriptor for transmitting */
+ p->tmdlast = 0; /* First descriptor for reading stats */
+
+ for (i = 0; i < TMDNUM; i++) /* Init all TMD's */
+ {
+ tmdp = p->tmdhead + i;
+
+ tmdp->u.buffer = (unsigned long) p->tmdbufs[i]; /* assign buffer */
+
+ /* Mark TMD as start and end of packet */
+ tmdp->u.s.status = TX_STP | TX_ENP;
+ }
+
+
+ /* Initialize RMD's with start values */
+
+ p->rmdnum = 0; /* First RMD which will be used */
+
+ for (i = 0; i < RMDNUM; i++) /* Init all RMD's */
+ {
+ rmdp = p->rmdhead + i;
+
+
+ rmdp->u.buffer = (unsigned long) p->rmdbufs[i]; /* assign buffer */
+
+ /*
+ * LANCE must be owner at beginning so that he can fill in
+ * receiving packets, set status and release RMD
+ */
+
+ rmdp->u.s.status = RX_OWN;
+
+ rmdp->blen = -PKT_BUF_SZ; /* Buffer Size in a two's complement */
+
+ rmdp->mlen = 0; /* init message length */
+
+ }
+
+ /* Fill LANCE Initialize Block */
+
+ (p->ram)->ib.mode = mode; /* Set operation mode */
+
+ for (i = 0; i < ETH_ALEN; i++) /* Set physical address */
+ {
+ (p->ram)->ib.paddr[i] = dev->dev_addr[i];
+ }
+
+ for (i = 0; i < 8; i++) /* Set multicast, logical address */
+ {
+ (p->ram)->ib.laddr[i] = 0; /* We do not use logical addressing */
+ }
+
+ /* Set ring descriptor pointers and set number of descriptors */
+
+ (p->ram)->ib.rdrp = (int) p->rmdhead | RMDNUMMASK;
+ (p->ram)->ib.tdrp = (int) p->tmdhead | TMDNUMMASK;
+
+ /* Prepare LANCE Control and Status Registers */
+
+ cli();
+
+ SK_write_reg(CSR3, CSR3_ACON); /* Ale Control !!!THIS MUST BE SET!!!! */
+
+ /*
+ * LANCE addresses the RAM from 0x0000 to 0x3fbf and has no access to
+ * PC Memory locations.
+ *
+ * In structure SK_ram is defined that the first thing in ram
+ * is the initialization block. So his address is for LANCE always
+ * 0x0000
+ *
+ * CSR1 contains low order bits 15:0 of initialization block address
+ * CSR2 is built of:
+ * 7:0 High order bits 23:16 of initialization block address
+ * 15:8 reserved, must be 0
+ */
+
+ /* Set initialization block address (must be on word boundary) */
+ SK_write_reg(CSR1, 0); /* Set low order bits 15:0 */
+ SK_write_reg(CSR2, 0); /* Set high order bits 23:16 */
+
+
+ PRINTK(("## %s: After setting CSR1-3. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ /* Initialize LANCE */
+
+ /*
+ * INIT = Initialize, when set, causes the LANCE to begin the
+ * initialization procedure and access the Init Block.
+ */
+
+ SK_write_reg(CSR0, CSR0_INIT);
+
+ sti();
+
+ /* Wait until LANCE finished initialization */
+
+ SK_set_RAP(CSR0); /* Register Address Pointer to CSR0 */
+
+ for (i = 0; (i < 100) && !(SK_rread_reg() & CSR0_IDON); i++)
+ ; /* Wait until init done or go ahead if problems (i>=100) */
+
+ if (i >= 100) /* Something is wrong ! */
+ {
+ printk("%s: can't init am7990, status: %04x "
+ "init_block: %#08x\n",
+ dev->name, (int) SK_read_reg(CSR0),
+ (unsigned int) &(p->ram)->ib);
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "LANCE INIT failed");
+ SK_print_dev(dev,"Device Structure:");
+#endif
+
+ return -1; /* LANCE init failed */
+ }
+
+ PRINTK(("## %s: init done after %d ticks\n", SK_NAME, i));
+
+ /* Clear Initialize done, enable Interrupts, start LANCE */
+
+ SK_write_reg(CSR0, CSR0_IDON | CSR0_INEA | CSR0_STRT);
+
+ PRINTK(("## %s: LANCE started. CSR0: %#06x\n", SK_NAME,
+ SK_read_reg(CSR0)));
+
+ return 0; /* LANCE is up and running */
+
+} /* End of SK_lance_init() */
+
+
+
+/*-
+ * Function : SK_send_packet
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : Writes an socket buffer into a transmit descriptor
+ * and starts transmission.
+ *
+ * Parameters : I : struct sk_buff *skb - packet to transfer
+ * I : struct device *dev - SK_G16 device structure
+ * Return Value : 0 - OK
+ * 1 - Could not transmit (dev_queue_xmit will queue it)
+ * and try to sent it later
+ * Globals : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ struct tmd *tmdp;
+
+ if (dev->tbusy)
+ {
+ /* if Transmitter more than 150ms busy -> time_out */
+
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 15)
+ {
+ return 1; /* We have to try transmit later */
+ }
+
+ printk("%s: xmitter timed out, try to restart!\n", dev->name);
+
+ SK_lance_init(dev, MODE_NORMAL); /* Reinit LANCE */
+
+ dev->tbusy = 0; /* Clear Transmitter flag */
+
+ dev->trans_start = jiffies; /* Mark Start of transmission */
+
+ }
+
+ /*
+ * If some upper Layer thinks we missed a transmit done interrupt
+ * we are passed NULL.
+ * (dev_queue_xmit net/inet/dev.c
+ */
+
+ if (skb == NULL)
+ {
+ /*
+ * Dequeue packets from transmit queue and send them.
+ */
+ dev_tint(dev);
+
+ return 0;
+ }
+
+ PRINTK2(("## %s: SK_send_packet() called, CSR0 %#04x.\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+
+ /*
+ * Block a timer-based transmit from overlapping.
+ * This means check if we are already in.
+ */
+
+ if (set_bit(0, (void *) &dev->tbusy) != 0) /* dev->tbusy already set ? */
+ {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ }
+ else
+ {
+ /* Evaluate Packet length */
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ tmdp = p->tmdhead + p->tmdnum; /* Which descriptor for transmitting */
+
+ /* Fill in Transmit Message Descriptor */
+
+ /* Copy data into dual ported ram */
+
+ memcpy((char *) (tmdp->u.buffer & 0x00ffffff), (char *)skb->data,
+ skb->len);
+
+ tmdp->blen = -len; /* set length to transmit */
+
+ /*
+ * Packet start and end is always set because we use the maximum
+ * packet length as buffer length.
+ * Relinquish ownership to LANCE
+ */
+
+ tmdp->u.s.status = TX_OWN | TX_STP | TX_ENP;
+
+ /* Start Demand Transmission */
+ SK_write_reg(CSR0, CSR0_TDMD | CSR0_INEA);
+
+ dev->trans_start = jiffies; /* Mark start of transmission */
+
+ /* Set pointer to next transmit buffer */
+ p->tmdnum++;
+ p->tmdnum &= TMDNUM-1;
+
+ /* Do we own the next transmit buffer ? */
+ if (! ((p->tmdhead + p->tmdnum)->u.s.status & TX_OWN) )
+ {
+ /*
+ * We own next buffer and are ready to transmit, so
+ * clear busy flag
+ */
+ dev->tbusy = 0;
+ }
+ }
+ dev_kfree_skb(skb, FREE_WRITE);
+ return 0;
+} /* End of SK_send_packet */
+
+
+/*-
+ * Function : SK_interrupt
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : SK_G16 interrupt handler which checks for LANCE
+ * Errors, handles transmit and receive interrupts
+ *
+ * Parameters : I : int irq, void *dev_id, struct pt_regs * regs -
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ int csr0;
+ struct device *dev = (struct device *) irq2dev_map[irq];
+ struct priv *p = (struct priv *) dev->priv;
+
+
+ PRINTK2(("## %s: SK_interrupt(). status: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ if (dev == NULL)
+ {
+ printk("SK_interrupt(): IRQ %d for unknown device.\n", irq);
+ }
+
+
+ if (dev->interrupt)
+ {
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ }
+
+ csr0 = SK_read_reg(CSR0); /* store register for checking */
+
+ dev->interrupt = 1; /* We are handling an interrupt */
+
+ /*
+ * Acknowledge all of the current interrupt sources, disable
+ * Interrupts (INEA = 0)
+ */
+
+ SK_write_reg(CSR0, csr0 & CSR0_CLRALL);
+
+ if (csr0 & CSR0_ERR) /* LANCE Error */
+ {
+ printk("%s: error: %04x\n", dev->name, csr0);
+
+ if (csr0 & CSR0_MISS) /* No place to store packet ? */
+ {
+ p->stats.rx_dropped++;
+ }
+ }
+
+ if (csr0 & CSR0_RINT) /* Receive Interrupt (packet arrived) */
+ {
+ SK_rxintr(dev);
+ }
+
+ if (csr0 & CSR0_TINT) /* Transmit interrupt (packet sent) */
+ {
+ SK_txintr(dev);
+ }
+
+ SK_write_reg(CSR0, CSR0_INEA); /* Enable Interrupts */
+
+ dev->interrupt = 0; /* We are out */
+} /* End of SK_interrupt() */
+
+
+/*-
+ * Function : SK_txintr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : After sending a packet we check status, update
+ * statistics and relinquish ownership of transmit
+ * descriptor ring.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_txintr(struct device *dev)
+{
+ int tmdstat;
+ struct tmd *tmdp;
+ struct priv *p = (struct priv *) dev->priv;
+
+
+ PRINTK2(("## %s: SK_txintr() status: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ tmdp = p->tmdhead + p->tmdlast; /* Which buffer we sent at last ? */
+
+ /* Set next buffer */
+ p->tmdlast++;
+ p->tmdlast &= TMDNUM-1;
+
+ tmdstat = tmdp->u.s.status & 0xff00; /* filter out status bits 15:08 */
+
+ /*
+ * We check status of transmitted packet.
+ * see LANCE data-sheet for error explanation
+ */
+ if (tmdstat & TX_ERR) /* Error occurred */
+ {
+ printk("%s: TX error: %04x %04x\n", dev->name, (int) tmdstat,
+ (int) tmdp->status2);
+
+ if (tmdp->status2 & TX_TDR) /* TDR problems? */
+ {
+ printk("%s: tdr-problems \n", dev->name);
+ }
+
+ if (tmdp->status2 & TX_RTRY) /* Failed in 16 attempts to transmit ? */
+ p->stats.tx_aborted_errors++;
+ if (tmdp->status2 & TX_LCOL) /* Late collision ? */
+ p->stats.tx_window_errors++;
+ if (tmdp->status2 & TX_LCAR) /* Loss of Carrier ? */
+ p->stats.tx_carrier_errors++;
+ if (tmdp->status2 & TX_UFLO) /* Underflow error ? */
+ {
+ p->stats.tx_fifo_errors++;
+
+ /*
+ * If UFLO error occurs it will turn transmitter of.
+ * So we must reinit LANCE
+ */
+
+ SK_lance_init(dev, MODE_NORMAL);
+ }
+
+ p->stats.tx_errors++;
+
+ tmdp->status2 = 0; /* Clear error flags */
+ }
+ else if (tmdstat & TX_MORE) /* Collisions occurred ? */
+ {
+ /*
+ * Here I have a problem.
+ * I only know that there must be one or up to 15 collisions.
+ * That's why TX_MORE is set, because after 16 attempts TX_RTRY
+ * will be set which means couldn't send packet aborted transfer.
+ *
+ * First I did not have this in but then I thought at minimum
+ * we see that something was not ok.
+ * If anyone knows something better than this to handle this
+ * please report it. (see Email addresses in the README file)
+ */
+
+ p->stats.collisions++;
+ }
+ else /* Packet sent without any problems */
+ {
+ p->stats.tx_packets++;
+ }
+
+ /*
+ * We mark transmitter not busy anymore, because now we have a free
+ * transmit descriptor which can be filled by SK_send_packet and
+ * afterwards sent by the LANCE
+ */
+
+ dev->tbusy = 0;
+
+ /*
+ * mark_bh(NET_BH);
+ * This will cause net_bh() to run after this interrupt handler.
+ *
+ * The function which do handle slow IRQ parts is do_bottom_half()
+ * which runs at normal kernel priority, that means all interrupt are
+ * enabled. (see kernel/irq.c)
+ *
+ * net_bh does something like this:
+ * - check if already in net_bh
+ * - try to transmit something from the send queue
+ * - if something is in the receive queue send it up to higher
+ * levels if it is a known protocol
+ * - try to transmit something from the send queue
+ */
+
+ mark_bh(NET_BH);
+
+} /* End of SK_txintr() */
+
+
+/*-
+ * Function : SK_rxintr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : Buffer sent, check for errors, relinquish ownership
+ * of the receive message descriptor.
+ *
+ * Parameters : I : SK_G16 device structure
+ * Return Value : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_rxintr(struct device *dev)
+{
+
+ struct rmd *rmdp;
+ int rmdstat;
+ struct priv *p = (struct priv *) dev->priv;
+
+ PRINTK2(("## %s: SK_rxintr(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ rmdp = p->rmdhead + p->rmdnum;
+
+ /* As long as we own the next entry, check status and send
+ * it up to higher layer
+ */
+
+ while (!( (rmdstat = rmdp->u.s.status) & RX_OWN))
+ {
+ /*
+ * Start and end of packet must be set, because we use
+ * the ethernet maximum packet length (1518) as buffer size.
+ *
+ * Because our buffers are at maximum OFLO and BUFF errors are
+ * not to be concerned (see Data sheet)
+ */
+
+ if ((rmdstat & (RX_STP | RX_ENP)) != (RX_STP | RX_ENP))
+ {
+ /* Start of a frame > 1518 Bytes ? */
+
+ if (rmdstat & RX_STP)
+ {
+ p->stats.rx_errors++; /* bad packet received */
+ p->stats.rx_length_errors++; /* packet too long */
+
+ printk("%s: packet too long\n", dev->name);
+ }
+
+ /*
+ * All other packets will be ignored until a new frame with
+ * start (RX_STP) set follows.
+ *
+ * What we do is just give descriptor free for new incoming
+ * packets.
+ */
+
+ rmdp->u.s.status = RX_OWN; /* Relinquish ownership to LANCE */
+
+ }
+ else if (rmdstat & RX_ERR) /* Receive Error ? */
+ {
+ printk("%s: RX error: %04x\n", dev->name, (int) rmdstat);
+
+ p->stats.rx_errors++;
+
+ if (rmdstat & RX_FRAM) p->stats.rx_frame_errors++;
+ if (rmdstat & RX_CRC) p->stats.rx_crc_errors++;
+
+ rmdp->u.s.status = RX_OWN; /* Relinquish ownership to LANCE */
+
+ }
+ else /* We have a packet which can be queued for the upper layers */
+ {
+
+ int len = (rmdp->mlen & 0x0fff); /* extract message length from receive buffer */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(len+2); /* allocate socket buffer */
+
+ if (skb == NULL) /* Could not get mem ? */
+ {
+
+ /*
+ * Couldn't allocate sk_buffer so we give descriptor back
+ * to Lance, update statistics and go ahead.
+ */
+
+ rmdp->u.s.status = RX_OWN; /* Relinquish ownership to LANCE */
+ printk("%s: Couldn't allocate sk_buff, deferring packet.\n",
+ dev->name);
+ p->stats.rx_dropped++;
+
+ break; /* Jump out */
+ }
+
+ /* Prepare sk_buff to queue for upper layers */
+
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align IP header on 16 byte boundary */
+
+ /*
+ * Copy data out of our receive descriptor into sk_buff.
+ *
+ * (rmdp->u.buffer & 0x00ffffff) -> get address of buffer and
+ * ignore status fields)
+ */
+
+ memcpy(skb_put(skb,len), (unsigned char *) (rmdp->u.buffer & 0x00ffffff),
+ len);
+
+
+ /*
+ * Notify the upper protocol layers that there is another packet
+ * to handle
+ *
+ * netif_rx() always succeeds. see /net/inet/dev.c for more.
+ */
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb); /* queue packet and mark it for processing */
+
+ /*
+ * Packet is queued and marked for processing so we
+ * free our descriptor and update statistics
+ */
+
+ rmdp->u.s.status = RX_OWN;
+ p->stats.rx_packets++;
+
+
+ p->rmdnum++;
+ p->rmdnum %= RMDNUM;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+ }
+} /* End of SK_rxintr() */
+
+
+/*-
+ * Function : SK_close
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : close gets called from dev_close() and should
+ * deinstall the card (free_irq, mem etc).
+ *
+ * Parameters : I : struct device *dev - our device structure
+ * Return Value : 0 - closed device driver
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+/* I have tried to set BOOT_ROM on and RAM off but then, after a 'ifconfig
+ * down' the system stops. So I don't shut set card to init state.
+ */
+
+static int SK_close(struct device *dev)
+{
+
+ PRINTK(("## %s: SK_close(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ dev->tbusy = 1; /* Transmitter busy */
+ dev->start = 0; /* Card down */
+
+ printk("%s: Shutting %s down CSR0 %#06x\n", dev->name, SK_NAME,
+ (int) SK_read_reg(CSR0));
+
+ SK_write_reg(CSR0, CSR0_STOP); /* STOP the LANCE */
+
+ free_irq(dev->irq, NULL); /* Free IRQ */
+ irq2dev_map[dev->irq] = 0; /* Mark IRQ as unused */
+
+ return 0; /* always succeed */
+
+} /* End of SK_close() */
+
+
+/*-
+ * Function : SK_get_stats
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Return current status structure to upper layers.
+ * It is called by sprintf_stats (dev.c).
+ *
+ * Parameters : I : struct device *dev - our device structure
+ * Return Value : struct enet_statistics * - our current statistics
+ * Errors : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static struct enet_statistics *SK_get_stats(struct device *dev)
+{
+
+ struct priv *p = (struct priv *) dev->priv;
+
+ PRINTK(("## %s: SK_get_stats(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ return &p->stats; /* Return Device status */
+
+} /* End of SK_get_stats() */
+
+
+/*-
+ * Function : set_multicast_list
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function gets called when a program performs
+ * a SIOCSIFFLAGS call. Ifconfig does this if you call
+ * 'ifconfig [-]allmulti' which enables or disables the
+ * Promiscuous mode.
+ * Promiscuous mode is when the Network card accepts all
+ * packets, not only the packets which match our MAC
+ * Address. It is useful for writing a network monitor,
+ * but it is also a security problem. You have to remember
+ * that all information on the net is not encrypted.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device Structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+ * 95/10/18 ACox New multicast calling scheme
+-*/
+
+
+/* Set or clear the multicast filter for SK_G16.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+
+ if (dev->flags&IFF_PROMISC)
+ {
+ /* Reinitialize LANCE with MODE_PROM set */
+ SK_lance_init(dev, MODE_PROM);
+ }
+ else if (dev->mc_count==0 && !(dev->flags&IFF_ALLMULTI))
+ {
+ /* Reinitialize LANCE without MODE_PROM */
+ SK_lance_init(dev, MODE_NORMAL);
+ }
+ else
+ {
+ /* Multicast with logical address filter on */
+ /* Reinitialize LANCE without MODE_PROM */
+ SK_lance_init(dev, MODE_NORMAL);
+
+ /* Not implemented yet. */
+ }
+} /* End of set_multicast_list() */
+
+
+
+/*-
+ * Function : SK_rom_addr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/06/01
+ *
+ * Description : Try to find a Boot_ROM at all possible locations
+ *
+ * Parameters : None
+ * Return Value : Address where Boot_ROM is
+ * Errors : 0 - Did not find Boot_ROM
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+unsigned int SK_rom_addr(void)
+{
+ int i,j;
+ int rom_found = 0;
+ unsigned int rom_location[] = SK_BOOT_ROM_LOCATIONS;
+ unsigned char rom_id[] = SK_BOOT_ROM_ID;
+ unsigned char *test_byte;
+
+ /* Autodetect Boot_ROM */
+ PRINTK(("## %s: Autodetection of Boot_ROM\n", SK_NAME));
+
+ for (i = 0; (rom_location[i] != 0) && (rom_found == 0); i++)
+ {
+
+ PRINTK(("## Trying ROM location %#08x", rom_location[i]));
+
+ rom_found = 1;
+ for (j = 0; j < 6; j++)
+ {
+ test_byte = (unsigned char *) (rom_location[i]+j);
+ PRINTK((" %02x ", *test_byte));
+
+ if(!(*test_byte == rom_id[j]))
+ {
+ rom_found = 0;
+ }
+ }
+ PRINTK(("\n"));
+ }
+
+ if (rom_found == 1)
+ {
+ PRINTK(("## %s: Boot_ROM found at %#08x\n",
+ SK_NAME, rom_location[(i-1)]));
+
+ return (rom_location[--i]);
+ }
+ else
+ {
+ PRINTK(("%s: No Boot_ROM found\n", SK_NAME));
+ return 0;
+ }
+} /* End of SK_rom_addr() */
+
+
+
+/* LANCE access functions
+ *
+ * ! CSR1-3 can only be accessed when in CSR0 the STOP bit is set !
+ */
+
+
+/*-
+ * Function : SK_reset_board
+ *
+ * Author : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/25
+ *
+ * Description : This function resets SK_G16 and all components, but
+ * POS registers are not changed
+ *
+ * Parameters : None
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ *
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_reset_board(void)
+{
+ int i;
+
+ SK_PORT = 0x00; /* Reset active */
+ for (i = 0; i < 10 ; i++) /* Delay min 5ms */
+ ;
+ SK_PORT = SK_RESET; /* Set back to normal operation */
+
+} /* End of SK_reset_board() */
+
+
+/*-
+ * Function : SK_set_RAP
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : Set LANCE Register Address Port to register
+ * for later data transfer.
+ *
+ * Parameters : I : reg_number - which CSR to read/write from/to
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_set_RAP(int reg_number)
+{
+ SK_IOREG = reg_number;
+ SK_PORT = SK_RESET | SK_RAP | SK_WREG;
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+} /* End of SK_set_RAP() */
+
+
+/*-
+ * Function : SK_read_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : Set RAP and read data from a LANCE CSR register
+ *
+ * Parameters : I : reg_number - which CSR to read from
+ * Return Value : Register contents
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+int SK_read_reg(int reg_number)
+{
+ SK_set_RAP(reg_number);
+
+ SK_PORT = SK_RESET | SK_RDATA | SK_RREG;
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+ return (SK_IOREG);
+
+} /* End of SK_read_reg() */
+
+
+/*-
+ * Function : SK_rread_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/28
+ *
+ * Description : Read data from preseted register.
+ * This function requires that you know which
+ * Register is actually set. Be aware that CSR1-3
+ * can only be accessed when in CSR0 STOP is set.
+ *
+ * Return Value : Register contents
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+int SK_rread_reg(void)
+{
+ SK_PORT = SK_RESET | SK_RDATA | SK_RREG;
+
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+ return (SK_IOREG);
+
+} /* End of SK_rread_reg() */
+
+
+/*-
+ * Function : SK_write_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function sets the RAP then fills in the
+ * LANCE I/O Reg and starts Transfer to LANCE.
+ * It waits until transfer has ended which is max. 7 ms
+ * and then it returns.
+ *
+ * Parameters : I : reg_number - which CSR to write to
+ * I : value - what value to fill into register
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_write_reg(int reg_number, int value)
+{
+ SK_set_RAP(reg_number);
+
+ SK_IOREG = value;
+ SK_PORT = SK_RESET | SK_RDATA | SK_WREG;
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+} /* End of SK_write_reg */
+
+
+
+/*
+ * Debugging functions
+ * -------------------
+ */
+
+/*-
+ * Function : SK_print_pos
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function prints out the 4 POS (Programmable
+ * Option Select) Registers. Used mainly to debug operation.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : char * - Text which will be printed as title
+ * Return Value : None
+ * Errors : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_pos(struct device *dev, char *text)
+{
+ int ioaddr = dev->base_addr;
+
+ unsigned char pos0 = inb(SK_POS0),
+ pos1 = inb(SK_POS1),
+ pos2 = inb(SK_POS2),
+ pos3 = inb(SK_POS3),
+ pos4 = inb(SK_POS4);
+
+
+ printk("## %s: %s.\n"
+ "## pos0=%#4x pos1=%#4x pos2=%#04x pos3=%#08x pos4=%#04x\n",
+ SK_NAME, text, pos0, pos1, pos2, (pos3<<14), pos4);
+
+} /* End of SK_print_pos() */
+
+
+
+/*-
+ * Function : SK_print_dev
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function simply prints out the important fields
+ * of the device structure.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : char *text - Title for printing
+ * Return Value : None
+ * Errors : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_dev(struct device *dev, char *text)
+{
+ if (dev == NULL)
+ {
+ printk("## %s: Device Structure. %s\n", SK_NAME, text);
+ printk("## DEVICE == NULL\n");
+ }
+ else
+ {
+ printk("## %s: Device Structure. %s\n", SK_NAME, text);
+ printk("## Device Name: %s Base Address: %#06lx IRQ: %d\n",
+ dev->name, dev->base_addr, dev->irq);
+
+ printk("## FLAGS: start: %d tbusy: %ld int: %d\n",
+ dev->start, dev->tbusy, dev->interrupt);
+
+ printk("## next device: %#08x init function: %#08x\n",
+ (int) dev->next, (int) dev->init);
+ }
+
+} /* End of SK_print_dev() */
+
+
+
+/*-
+ * Function : SK_print_ram
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/06/02
+ *
+ * Description : This function is used to check how are things set up
+ * in the 16KB RAM. Also the pointers to the receive and
+ * transmit descriptor rings and rx and tx buffers locations.
+ * It contains a minor bug in printing, but has no effect to the values
+ * only newlines are not correct.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_ram(struct device *dev)
+{
+
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ printk("## %s: RAM Details.\n"
+ "## RAM at %#08x tmdhead: %#08x rmdhead: %#08x initblock: %#08x\n",
+ SK_NAME,
+ (unsigned int) p->ram,
+ (unsigned int) p->tmdhead,
+ (unsigned int) p->rmdhead,
+ (unsigned int) &(p->ram)->ib);
+
+ printk("## ");
+
+ for(i = 0; i < TMDNUM; i++)
+ {
+ if (!(i % 3)) /* Every third line do a newline */
+ {
+ printk("\n## ");
+ }
+ printk("tmdbufs%d: %#08x ", (i+1), (int) p->tmdbufs[i]);
+ }
+ printk("## ");
+
+ for(i = 0; i < RMDNUM; i++)
+ {
+ if (!(i % 3)) /* Every third line do a newline */
+ {
+ printk("\n## ");
+ }
+ printk("rmdbufs%d: %#08x ", (i+1), (int) p->rmdbufs[i]);
+ }
+ printk("\n");
+
+} /* End of SK_print_ram() */
+
diff --git a/linux/src/drivers/net/sk_g16.h b/linux/src/drivers/net/sk_g16.h
new file mode 100644
index 0000000..31ae19a
--- /dev/null
+++ b/linux/src/drivers/net/sk_g16.h
@@ -0,0 +1,164 @@
+/*-
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ * Module : sk_g16.h
+ * Version : $Revision: 1.1.4.1 $
+ *
+ * Author : M.Hipp (mhipp@student.uni-tuebingen.de)
+ * changes by : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/25
+ *
+ * Description : In here are all necessary definitions of
+ * the am7990 (LANCE) chip used for writing a
+ * network device driver which uses this chip
+ *
+-*/
+
+#ifndef SK_G16_H
+
+#define SK_G16_H
+
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ *
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+
+/*
+ * Control and Status Register 3 (CSR3) bit definitions
+ *
+ */
+
+#define CSR3_BSWAP 0x0004 /* Byte Swap (RW) */
+#define CSR3_ACON 0x0002 /* ALE Control (RW) */
+#define CSR3_BCON 0x0001 /* Byte Control (RW) */
+
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define MODE_PROM 0x8000 /* Promiscuous Mode */
+#define MODE_INTL 0x0040 /* Internal Loopback */
+#define MODE_DRTY 0x0020 /* Disable Retry */
+#define MODE_COLL 0x0010 /* Force Collision */
+#define MODE_DTCR 0x0008 /* Disable Transmit CRC) */
+#define MODE_LOOP 0x0004 /* Loopback */
+#define MODE_DTX 0x0002 /* Disable the Transmitter */
+#define MODE_DRX 0x0001 /* Disable the Receiver */
+
+#define MODE_NORMAL 0x0000 /* Normal operation mode */
+
+/*
+ * Receive message descriptor status bit definitions.
+ */
+
+#define RX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
+#define RX_ERR 0x40 /* Error Summary */
+#define RX_FRAM 0x20 /* Framing Error */
+#define RX_OFLO 0x10 /* Overflow Error */
+#define RX_CRC 0x08 /* CRC Error */
+#define RX_BUFF 0x04 /* Buffer Error */
+#define RX_STP 0x02 /* Start of Packet */
+#define RX_ENP 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor status bit definitions.
+ */
+
+#define TX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
+#define TX_ERR 0x40 /* Error Summary */
+#define TX_MORE 0x10 /* More the 1 retry needed to Xmit */
+#define TX_ONE 0x08 /* One retry needed to Xmit */
+#define TX_DEF 0x04 /* Deferred */
+#define TX_STP 0x02 /* Start of Packet */
+#define TX_ENP 0x01 /* End of Packet */
+
+/*
+ * Transmit status (2) (valid if TX_ERR == 1)
+ */
+
+#define TX_BUFF 0x8000 /* Buffering error (no ENP) */
+#define TX_UFLO 0x4000 /* Underflow (late memory) */
+#define TX_LCOL 0x1000 /* Late collision */
+#define TX_LCAR 0x0400 /* Loss of Carrier */
+#define TX_RTRY 0x0200 /* Failed after 16 retransmissions */
+#define TX_TDR 0x003f /* Time-domain-reflectometer-value */
+
+
+/*
+ * Structures used for Communication with the LANCE
+ */
+
+/* LANCE Initialize Block */
+
+struct init_block
+{
+ unsigned short mode; /* Mode Register */
+ unsigned char paddr[6]; /* Physical Address (MAC) */
+ unsigned char laddr[8]; /* Logical Filter Address (not used) */
+ unsigned int rdrp; /* Receive Descriptor Ring pointer */
+ unsigned int tdrp; /* Transmit Descriptor Ring pointer */
+};
+
+
+/* Receive Message Descriptor Entry */
+
+struct rmd
+{
+ union
+ {
+ unsigned long buffer; /* Address of buffer */
+ struct
+ {
+ unsigned char unused[3];
+ unsigned volatile char status; /* Status Bits */
+ } s;
+ } u;
+ volatile short blen; /* Buffer Length (two's complement) */
+ unsigned short mlen; /* Message Byte Count */
+};
+
+
+/* Transmit Message Descriptor Entry */
+
+struct tmd
+{
+ union
+ {
+ unsigned long buffer; /* Address of buffer */
+ struct
+ {
+ unsigned char unused[3];
+ unsigned volatile char status; /* Status Bits */
+ } s;
+ } u;
+ unsigned short blen; /* Buffer Length (two's complement) */
+ unsigned volatile short status2; /* Error Status Bits */
+};
+
+#endif /* End of SK_G16_H */
diff --git a/linux/src/drivers/net/smc-ultra.c b/linux/src/drivers/net/smc-ultra.c
new file mode 100644
index 0000000..f593aeb
--- /dev/null
+++ b/linux/src/drivers/net/smc-ultra.c
@@ -0,0 +1,496 @@
+/* smc-ultra.c: A SMC Ultra ethernet driver for linux. */
+/*
+ This is a driver for the SMC Ultra and SMC EtherEZ ISA ethercards.
+
+ Written 1993-1998 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This driver uses the cards in the 8390-compatible mode.
+ Most of the run-time complexity is handled by the generic code in
+ 8390.c. The code in this file is responsible for
+
+ ultra_probe() Detecting and initializing the card.
+ ultra_probe1()
+
+ ultra_open() The card-specific details of starting, stopping
+ ultra_reset_8390() and resetting the 8390 NIC core.
+ ultra_close()
+
+ ultra_block_input() Routines for reading and writing blocks of
+ ultra_block_output() packet buffer memory.
+ ultra_pio_input()
+ ultra_pio_output()
+
+ This driver enables the shared memory only when doing the actual data
+ transfers to avoid a bug in early version of the card that corrupted
+ data transferred by a AHA1542.
+
+ This driver now supports the programmed-I/O (PIO) data transfer mode of
+ the EtherEZ. It does not use the non-8390-compatible "Altego" mode.
+ That support (if available) is in smc-ez.c.
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users.
+ Donald Becker : 4/17/96 PIO support, minor potential problems avoided.
+ Donald Becker : 6/6/96 correctly set auto-wrap bit.
+*/
+
+static const char *version =
+ "smc-ultra.c:v2.02 2/3/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int ultra_portlist[] =
+{0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380, 0};
+
+int ultra_probe(struct device *dev);
+int ultra_probe1(struct device *dev, int ioaddr);
+
+static int ultra_open(struct device *dev);
+static void ultra_reset_8390(struct device *dev);
+static void ultra_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void ultra_pio_get_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra_pio_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra_pio_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static int ultra_close_card(struct device *dev);
+
+
+#define START_PG 0x00 /* First page of TX buffer */
+
+#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */
+#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */
+#define IOPD 0x02 /* I/O Pipe Data (16 bits), PIO operation. */
+#define IOPA 0x07 /* I/O Pipe Address for PIO operation. */
+#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA_IO_EXTENT 32
+#define EN0_ERWCNT 0x08 /* Early receive warning count. */
+
+/* Probe for the Ultra. This looks like a 8013 with the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following.
+*/
+#ifdef HAVE_DEVLIST
+struct netdev_entry ultra_drv =
+{"ultra", ultra_probe1, NETCARD_IO_EXTENT, netcard_portlist};
+#else
+
+int ultra_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ultra_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; ultra_portlist[i]; i++) {
+ int ioaddr = ultra_portlist[i];
+ if (check_region(ioaddr, ULTRA_IO_EXTENT))
+ continue;
+ if (ultra_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+int ultra_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ int checksum = 0;
+ const char *model_name;
+ unsigned char eeprom_irq = 0;
+ static unsigned version_printed = 0;
+ /* Values from various config regs. */
+ unsigned char num_pages, irqreg, addr, piomode;
+ unsigned char idreg = inb(ioaddr + 7);
+ unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+
+ /* Check the ID nibble. */
+ if ((idreg & 0xF0) != 0x20 /* SMC Ultra */
+ && (idreg & 0xF0) != 0x40) /* SMC EtherEZ */
+ return ENODEV;
+
+ /* Select the station address register set. */
+ outb(reg4, ioaddr + 4);
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if ((checksum & 0xff) != 0xFF)
+ return ENODEV;
+
+ if (dev == NULL)
+ dev = init_etherdev(0, 0);
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
+
+ printk("%s: %s at %#3x,", dev->name, model_name, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* Switch from the station address to the alternate register set and
+ read the useful registers there. */
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enabled FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+ piomode = inb(ioaddr + 0x8);
+ addr = inb(ioaddr + 0xb);
+ irqreg = inb(ioaddr + 0xd);
+
+ /* Switch back to the station address register set so that the MS-DOS driver
+ can find the card after a warm boot. */
+ outb(reg4, ioaddr + 4);
+
+ if (dev->irq < 2) {
+ unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
+ int irq;
+
+ /* The IRQ bits are split. */
+ irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
+
+ if (irq == 0) {
+ printk(", failed to detect IRQ line.\n");
+ return -EAGAIN;
+ }
+ dev->irq = irq;
+ eeprom_irq = 1;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (", no memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* OK, we are certain this is going to work. Setup the device. */
+ request_region(ioaddr, ULTRA_IO_EXTENT, model_name);
+
+ /* The 8390 isn't at the base address, so fake the offset */
+ dev->base_addr = ioaddr+ULTRA_NIC_OFFSET;
+
+ {
+ int addr_tbl[4] = {0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000};
+ short num_pages_tbl[4] = {0x20, 0x40, 0x80, 0xff};
+
+ dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ;
+ num_pages = num_pages_tbl[(addr >> 4) & 3];
+ }
+
+ ei_status.name = model_name;
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = START_PG;
+ ei_status.rx_start_page = START_PG + TX_PAGES;
+ ei_status.stop_page = num_pages;
+
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end
+ = dev->mem_start + (ei_status.stop_page - START_PG)*256;
+
+ if (piomode) {
+ printk(",%s IRQ %d programmed-I/O mode.\n",
+ eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
+ ei_status.block_input = &ultra_pio_input;
+ ei_status.block_output = &ultra_pio_output;
+ ei_status.get_8390_hdr = &ultra_pio_get_hdr;
+ } else {
+ printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ",
+ dev->irq, dev->mem_start, dev->mem_end-1);
+ ei_status.block_input = &ultra_block_input;
+ ei_status.block_output = &ultra_block_output;
+ ei_status.get_8390_hdr = &ultra_get_8390_hdr;
+ }
+ ei_status.reset_8390 = &ultra_reset_8390;
+ dev->open = &ultra_open;
+ dev->stop = &ultra_close_card;
+ NS8390_init(dev, 0);
+
+ return 0;
+}
+
+static int
+ultra_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ unsigned char irq2reg[] = {0, 0, 0x04, 0x08, 0, 0x0C, 0, 0x40,
+ 0, 0x04, 0x44, 0x48, 0, 0, 0, 0x4C, };
+
+ if (request_irq(dev->irq, ei_interrupt, 0, ei_status.name, dev))
+ return -EAGAIN;
+
+ outb(0x00, ioaddr); /* Disable shared memory for safety. */
+ outb(0x80, ioaddr + 5);
+ /* Set the IRQ line. */
+ outb(inb(ioaddr + 4) | 0x80, ioaddr + 4);
+ outb((inb(ioaddr + 13) & ~0x4C) | irq2reg[dev->irq], ioaddr + 13);
+ outb(inb(ioaddr + 4) & 0x7f, ioaddr + 4);
+
+ if (ei_status.block_input == &ultra_pio_input) {
+ outb(0x11, ioaddr + 6); /* Enable interrupts and PIO. */
+ outb(0x01, ioaddr + 0x19); /* Enable ring read auto-wrap. */
+ } else
+ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
+ /* Set the early receive warning level in window 0 high enough not
+ to receive ERW interrupts. */
+ outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
+ outb(0xff, dev->base_addr + EN0_ERWCNT);
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static void
+ultra_reset_8390(struct device *dev)
+{
+ int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */
+
+ outb(ULTRA_RESET, cmd_port);
+ if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(0x00, cmd_port); /* Disable shared memory for safety. */
+ outb(0x80, cmd_port + 5);
+ if (ei_status.block_input == &ultra_pio_input)
+ outb(0x11, cmd_port + 6); /* Enable interrupts and PIO. */
+ else
+ outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ultra_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ unsigned long hdr_start = dev->mem_start + ((ring_page - START_PG)<<8);
+
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem on */
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem off */
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps. */
+
+static void
+ultra_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ unsigned long xfer_start = dev->mem_start + ring_offset - (START_PG<<8);
+
+ /* Enable shared memory. */
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET);
+
+ if (xfer_start + count > dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = dev->rmem_end - xfer_start;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
+}
+
+static void
+ultra_block_output(struct device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ unsigned long shmem = dev->mem_start + ((start_page - START_PG)<<8);
+
+ /* Enable shared memory. */
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET);
+
+ memcpy_toio(shmem, buf, count);
+
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
+}
+
+/* The identical operations for programmed I/O cards.
+ The PIO model is trivial to use: the 16 bit start address is written
+ byte-sequentially to IOPA, with no intervening I/O operations, and the
+ data is read or written to the IOPD data port.
+ The only potential complication is that the address register is shared
+ and must be always be rewritten between each read/write direction change.
+ This is no problem for us, as the 8390 code ensures that we are single
+ threaded. */
+static void ultra_pio_get_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(ring_page, ioaddr + IOPA);
+ insw(ioaddr + IOPD, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+}
+
+static void ultra_pio_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ char *buf = skb->data;
+
+ /* For now set the address again, although it should already be correct. */
+ outb(ring_offset, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(ring_offset >> 8, ioaddr + IOPA);
+ /* We know skbuffs are padded to at least word alignment. */
+ insw(ioaddr + IOPD, buf, (count+1)>>1);
+}
+
+static void ultra_pio_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(start_page, ioaddr + IOPA);
+ /* An extra odd byte is OK here as well. */
+ outsw(ioaddr + IOPD, buf, (count+1)>>1);
+}
+
+static int
+ultra_close_card(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+ irq2dev_map[dev->irq] = 0;
+
+ NS8390_init(dev, 0);
+
+ /* We should someday disable shared memory and change to 8-bit mode
+ "just in case"... */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_ULTRA_CARDS 4 /* Max number of Ultra cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_ULTRA_CARDS] = { 0, };
+static struct device dev_ultra[MAX_ULTRA_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_ULTRA_CARDS] = { 0, };
+static int irq[MAX_ULTRA_CARDS] = { 0, };
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) {
+ struct device *dev = &dev_ultra[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = ultra_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "smc-ultra.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "smc-ultra.c: No SMC Ultra card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) {
+ struct device *dev = &dev_ultra[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: ultra_close_card() does free_irq + irq2dev */
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(ioaddr, ULTRA_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -Wall -O6 -I/usr/src/linux/net/inet -c smc-ultra.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/smc-ultra32.c b/linux/src/drivers/net/smc-ultra32.c
new file mode 100644
index 0000000..6cde4c2
--- /dev/null
+++ b/linux/src/drivers/net/smc-ultra32.c
@@ -0,0 +1,413 @@
+/* smc-ultra32.c: An SMC Ultra32 EISA ethernet driver for linux.
+
+Sources:
+
+ This driver is based on (cloned from) the ISA SMC Ultra driver
+ written by Donald Becker. Modifications to support the EISA
+ version of the card by Paul Gortmaker and Leonard N. Zubkoff.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+Theory of Operation:
+
+ The SMC Ultra32C card uses the SMC 83c790 chip which is also
+ found on the ISA SMC Ultra cards. It has a shared memory mode of
+ operation that makes it similar to the ISA version of the card.
+ The main difference is that the EISA card has 32KB of RAM, but
+ only an 8KB window into that memory. The EISA card also can be
+ set for a bus-mastering mode of operation via the ECU, but that
+ is not (and probably will never be) supported by this driver.
+ The ECU should be run to enable shared memory and to disable the
+ bus-mastering feature for use with linux.
+
+ By programming the 8390 to use only 8KB RAM, the modifications
+ to the ISA driver can be limited to the probe and initialization
+ code. This allows easy integration of EISA support into the ISA
+ driver. However, the driver development kit from SMC provided the
+ register information for sliding the 8KB window, and hence the 8390
+ is programmed to use the full 32KB RAM.
+
+ Unfortunately this required code changes outside the probe/init
+ routines, and thus we decided to separate the EISA driver from
+ the ISA one. In this way, ISA users don't end up with a larger
+ driver due to the EISA code, and EISA users don't end up with a
+ larger driver due to the ISA EtherEZ PIO code. The driver is
+ similar to the 3c503/16 driver, in that the window must be set
+ back to the 1st 8KB of space for access to the two 8390 Tx slots.
+
+ In testing, using only 8KB RAM (3 Tx / 5 Rx) didn't appear to
+ be a limiting factor, since the EISA bus could get packets off
+ the card fast enough, but having the use of lots of RAM as Rx
+ space is extra insurance if interrupt latencies become excessive.
+
+*/
+
+static const char *version = "smc-ultra32.c: 06/97 v1.00\n";
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+int ultra32_probe(struct device *dev);
+int ultra32_probe1(struct device *dev, int ioaddr);
+static int ultra32_open(struct device *dev);
+static void ultra32_reset_8390(struct device *dev);
+static void ultra32_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra32_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra32_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static int ultra32_close(struct device *dev);
+
+#define ULTRA32_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA32_RESET 0x80 /* Board reset, in ULTRA32_CMDREG. */
+#define ULTRA32_MEMENB 0x40 /* Enable the shared memory. */
+#define ULTRA32_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA32_IO_EXTENT 32
+#define EN0_ERWCNT 0x08 /* Early receive warning count. */
+
+/*
+ * Defines that apply only to the Ultra32 EISA card. Note that
+ * "smc" = 10011 01101 00011 = 0x4da3, and hence !smc8010.cfg translates
+ * into an EISA ID of 0x1080A34D
+ */
+#define ULTRA32_BASE 0xca0
+#define ULTRA32_ID 0x1080a34d
+#define ULTRA32_IDPORT (-0x20) /* 0xc80 */
+/* Config regs 1->7 from the EISA !SMC8010.CFG file. */
+#define ULTRA32_CFG1 0x04 /* 0xca4 */
+#define ULTRA32_CFG2 0x05 /* 0xca5 */
+#define ULTRA32_CFG3 (-0x18) /* 0xc88 */
+#define ULTRA32_CFG4 (-0x17) /* 0xc89 */
+#define ULTRA32_CFG5 (-0x16) /* 0xc8a */
+#define ULTRA32_CFG6 (-0x15) /* 0xc8b */
+#define ULTRA32_CFG7 0x0d /* 0xcad */
+
+
+/* Probe for the Ultra32. This looks like a 8013 with the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following.
+*/
+
+int ultra32_probe(struct device *dev)
+{
+ const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"};
+ int ioaddr, edge, media;
+
+ if (!EISA_bus) return ENODEV;
+
+ /* EISA spec allows for up to 16 slots, but 8 is typical. */
+ for (ioaddr = 0x1000 + ULTRA32_BASE; ioaddr < 0x9000; ioaddr += 0x1000)
+ if (check_region(ioaddr, ULTRA32_IO_EXTENT) == 0 &&
+ inb(ioaddr + ULTRA32_IDPORT) != 0xff &&
+ inl(ioaddr + ULTRA32_IDPORT) == ULTRA32_ID) {
+ media = inb(ioaddr + ULTRA32_CFG7) & 0x03;
+ edge = inb(ioaddr + ULTRA32_CFG5) & 0x08;
+ printk("SMC Ultra32 in EISA Slot %d, Media: %s, %s IRQs.\n",
+ ioaddr >> 12, ifmap[media],
+ (edge ? "Edge Triggered" : "Level Sensitive"));
+ if (ultra32_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+ return ENODEV;
+}
+
+int ultra32_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ int checksum = 0;
+ const char *model_name;
+ static unsigned version_printed = 0;
+ /* Values from various config regs. */
+ unsigned char idreg = inb(ioaddr + 7);
+ unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+
+ /* Check the ID nibble. */
+ if ((idreg & 0xf0) != 0x20) /* SMC Ultra */
+ return ENODEV;
+
+ /* Select the station address register set. */
+ outb(reg4, ioaddr + 4);
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if ((checksum & 0xff) != 0xff)
+ return ENODEV;
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("smc-ultra32.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ model_name = "SMC Ultra32";
+
+ printk("%s: %s at 0x%X,", dev->name, model_name, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* Switch from the station address to the alternate register set and
+ read the useful registers there. */
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+
+ /* Reset RAM addr. */
+ outb(0x00, ioaddr + 0x0b);
+
+ /* Switch back to the station address register set so that the
+ MS-DOS driver can find the card after a warm boot. */
+ outb(reg4, ioaddr + 4);
+
+ if ((inb(ioaddr + ULTRA32_CFG5) & 0x40) == 0) {
+ printk("\nsmc-ultra32: Card RAM is disabled! "
+ "Run EISA config utility.\n");
+ return ENODEV;
+ }
+ if ((inb(ioaddr + ULTRA32_CFG2) & 0x04) == 0)
+ printk("\nsmc-ultra32: Ignoring Bus-Master enable bit. "
+ "Run EISA config utility.\n");
+
+ if (dev->irq < 2) {
+ unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
+ int irq = irqmap[inb(ioaddr + ULTRA32_CFG5) & 0x07];
+ if (irq == 0) {
+ printk(", failed to detect IRQ line.\n");
+ return -EAGAIN;
+ }
+ dev->irq = irq;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (", no memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* OK, we are certain this is going to work. Setup the device. */
+ request_region(ioaddr, ULTRA32_IO_EXTENT, model_name);
+
+ /* The 8390 isn't at the base address, so fake the offset */
+ dev->base_addr = ioaddr + ULTRA32_NIC_OFFSET;
+
+ /* Save RAM address in the unused reg0 to avoid excess inb's. */
+ ei_status.reg0 = inb(ioaddr + ULTRA32_CFG3) & 0xfc;
+
+ dev->mem_start = 0xc0000 + ((ei_status.reg0 & 0x7c) << 11);
+
+ ei_status.name = model_name;
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = 0;
+ ei_status.rx_start_page = TX_PAGES;
+ /* All Ultra32 cards have 32KB memory with an 8KB window. */
+ ei_status.stop_page = 128;
+
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end = dev->mem_start + 0x1fff;
+
+ printk(", IRQ %d, 32KB memory, 8KB window at 0x%lx-0x%lx.\n",
+ dev->irq, dev->mem_start, dev->mem_end);
+ ei_status.block_input = &ultra32_block_input;
+ ei_status.block_output = &ultra32_block_output;
+ ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
+ ei_status.reset_8390 = &ultra32_reset_8390;
+ dev->open = &ultra32_open;
+ dev->stop = &ultra32_close;
+ NS8390_init(dev, 0);
+
+ return 0;
+}
+
+static int ultra32_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */
+
+ if (request_irq(dev->irq, ei_interrupt, 0, ei_status.name, dev))
+ return -EAGAIN;
+
+ outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
+ outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
+ outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
+ outb(0x01, ioaddr + 6); /* Enable Interrupts. */
+ /* Set the early receive warning level in window 0 high enough not
+ to receive ERW interrupts. */
+ outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
+ outb(0xff, dev->base_addr + EN0_ERWCNT);
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int ultra32_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* CMDREG */
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + ULTRA32_CFG6); /* Disable Interrupts. */
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+ irq2dev_map[dev->irq] = 0;
+
+ NS8390_init(dev, 0);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static void ultra32_reset_8390(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC base addr */
+
+ outb(ULTRA32_RESET, ioaddr);
+ if (ei_debug > 1) printk("resetting Ultra32, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
+ outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
+ outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
+ outb(0x01, ioaddr + 6); /* Enable Interrupts. */
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ultra32_get_8390_hdr(struct device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ unsigned long hdr_start = dev->mem_start + ((ring_page & 0x1f) << 8);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ /* Select correct 8KB Window. */
+ outb(ei_status.reg0 | ((ring_page & 0x60) >> 5), RamReg);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps, or in this case, when a
+ packet spans an 8KB boundary. Note that the current 8KB segment is
+ already set by the get_8390_hdr routine. */
+
+static void ultra32_block_input(struct device *dev,
+ int count,
+ struct sk_buff *skb,
+ int ring_offset)
+{
+ unsigned long xfer_start = dev->mem_start + (ring_offset & 0x1fff);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ if ((ring_offset & ~0x1fff) != ((ring_offset + count - 1) & ~0x1fff)) {
+ int semi_count = 8192 - (ring_offset & 0x1FFF);
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ if (ring_offset < 96*256) {
+ /* Select next 8KB Window. */
+ ring_offset += semi_count;
+ outb(ei_status.reg0 | ((ring_offset & 0x6000) >> 13), RamReg);
+ memcpy_fromio(skb->data + semi_count, dev->mem_start, count);
+ } else {
+ /* Select first 8KB Window. */
+ outb(ei_status.reg0, RamReg);
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ }
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+}
+
+static void ultra32_block_output(struct device *dev,
+ int count,
+ const unsigned char *buf,
+ int start_page)
+{
+ unsigned long xfer_start = dev->mem_start + (start_page<<8);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ /* Select first 8KB Window. */
+ outb(ei_status.reg0, RamReg);
+
+ memcpy_toio(xfer_start, buf, count);
+}
+
+#ifdef MODULE
+#define MAX_ULTRA32_CARDS 4 /* Max number of Ultra cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_ULTRA32_CARDS] = { 0, };
+static struct device dev_ultra[MAX_ULTRA32_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
+ struct device *dev = &dev_ultra[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->init = ultra32_probe;
+ if (register_netdev(dev) != 0) {
+ if (found > 0) return 0; /* Got at least one. */
+ printk(KERN_WARNING "smc-ultra32.c: No SMC Ultra32 found.\n");
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
+ struct device *dev = &dev_ultra[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: ultra32_close_card() does free_irq + irq2dev */
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(ioaddr, ULTRA32_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/linux/src/drivers/net/smc9194.c b/linux/src/drivers/net/smc9194.c
new file mode 100644
index 0000000..e3d648d
--- /dev/null
+++ b/linux/src/drivers/net/smc9194.c
@@ -0,0 +1,1779 @@
+/*------------------------------------------------------------------------
+ . smc9194.c
+ . This is a driver for SMC's 9000 series of Ethernet cards.
+ .
+ . Copyright (C) 1996 by Erik Stahlman
+ . This software may be used and distributed according to the terms
+ . of the GNU Public License, incorporated herein by reference.
+ .
+ . "Features" of the SMC chip:
+ . 4608 byte packet memory. ( for the 91C92. Others have more )
+ . EEPROM for configuration
+ . AUI/TP selection ( mine has 10Base2/10BaseT select )
+ .
+ . Arguments:
+ . io = for the base address
+ . irq = for the IRQ
+ . ifport = 0 for autodetect, 1 for TP, 2 for AUI ( or 10base2 )
+ .
+ . author:
+ . Erik Stahlman ( erik@vt.edu )
+ .
+ . Hardware multicast code from Peter Cammaert ( pc@denkart.be )
+ .
+ . Sources:
+ . o SMC databook
+ . o skeleton.c by Donald Becker ( becker@cesdis.gsfc.nasa.gov )
+ . o ( a LOT of advice from Becker as well )
+ .
+ . History:
+ . 12/07/95 Erik Stahlman written, got receive/xmit handled
+ . 01/03/96 Erik Stahlman worked out some bugs, actually usable!!! :-)
+ . 01/06/96 Erik Stahlman cleaned up some, better testing, etc
+ . 01/29/96 Erik Stahlman fixed autoirq, added multicast
+ . 02/01/96 Erik Stahlman 1. disabled all interrupts in smc_reset
+ . 2. got rid of post-decrementing bug -- UGH.
+ . 02/13/96 Erik Stahlman Tried to fix autoirq failure. Added more
+ . descriptive error messages.
+ . 02/15/96 Erik Stahlman Fixed typo that caused detection failure
+ . 02/23/96 Erik Stahlman Modified it to fit into kernel tree
+ . Added support to change hardware address
+ . Cleared stats on opens
+ . 02/26/96 Erik Stahlman Trial support for Kernel 1.2.13
+ . Kludge for automatic IRQ detection
+ . 03/04/96 Erik Stahlman Fixed kernel 1.3.70 +
+ . Fixed bug reported by Gardner Buchanan in
+ . smc_enable, with outw instead of outb
+ . 03/06/96 Erik Stahlman Added hardware multicast from Peter Cammaert
+ ----------------------------------------------------------------------------*/
+
+static const char *version =
+ "smc9194.c:v0.12 03/06/96 by Erik Stahlman (erik@vt.edu)\n";
+
+#ifdef MODULE
+#include <linux/module.h>
+#include <linux/version.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "smc9194.h"
+/*------------------------------------------------------------------------
+ .
+ . Configuration options, for the experienced user to change.
+ .
+ -------------------------------------------------------------------------*/
+
+/*
+ . this is for kernels > 1.2.70
+*/
+#define REALLY_NEW_KERNEL
+#ifndef REALLY_NEW_KERNEL
+#define free_irq( x, y ) free_irq( x )
+#define request_irq( x, y, z, u, v ) request_irq( x, y, z, u )
+#endif
+
+/*
+ . Do you want to use this with old kernels.
+ . WARNING: this is not well tested.
+#define SUPPORT_OLD_KERNEL
+*/
+
+
+/*
+ . Do you want to use 32 bit xfers? This should work on all chips, as
+ . the chipset is designed to accommodate them.
+*/
+#define USE_32_BIT 1
+
+/*
+ .the SMC9194 can be at any of the following port addresses. To change,
+ .for a slightly different card, you can add it to the array. Keep in
+ .mind that the array must end in zero.
+*/
+static unsigned int smc_portlist[] =
+ { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0,
+ 0x300, 0x320, 0x340, 0x360, 0x380, 0x3A0, 0x3C0, 0x3E0, 0};
+
+/*
+ . Wait time for memory to be free. This probably shouldn't be
+ . tuned that much, as waiting for this means nothing else happens
+ . in the system
+*/
+#define MEMORY_WAIT_TIME 16
+
+/*
+ . DEBUGGING LEVELS
+ .
+ . 0 for normal operation
+ . 1 for slightly more details
+ . >2 for various levels of increasingly useless information
+ . 2 for interrupt tracking, status flags
+ . 3 for packet dumps, etc.
+*/
+#define SMC_DEBUG 0
+
+#if (SMC_DEBUG > 2 )
+#define PRINTK3(x) printk x
+#else
+#define PRINTK3(x)
+#endif
+
+#if SMC_DEBUG > 1
+#define PRINTK2(x) printk x
+#else
+#define PRINTK2(x)
+#endif
+
+#ifdef SMC_DEBUG
+#define PRINTK(x) printk x
+#else
+#define PRINTK(x)
+#endif
+
+
+/* the older versions of the kernel cannot support autoprobing */
+#ifdef SUPPORT_OLD_KERNEL
+#define NO_AUTOPROBE
+#endif
+
+
+/*------------------------------------------------------------------------
+ .
+ . The internal workings of the driver. If you are changing anything
+ . here with the SMC stuff, you should have the datasheet and known
+ . what you are doing.
+ .
+ -------------------------------------------------------------------------*/
+#define CARDNAME "SMC9194"
+
+#ifdef SUPPORT_OLD_KERNEL
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+/* store this information for the driver.. */
+struct smc_local {
+ /*
+ these are things that the kernel wants me to keep, so users
+ can find out semi-useless statistics of how well the card is
+ performing
+ */
+ struct enet_statistics stats;
+
+ /*
+ If I have to wait until memory is available to send
+ a packet, I will store the skbuff here, until I get the
+ desired memory. Then, I'll send it out and free it.
+ */
+ struct sk_buff * saved_skb;
+
+ /*
+ . This keeps track of how many packets that I have
+ . sent out. When an TX_EMPTY interrupt comes, I know
+ . that all of these have been sent.
+ */
+ int packets_waiting;
+};
+
+
+/*-----------------------------------------------------------------
+ .
+ . The driver can be entered at any of the following entry points.
+ .
+ .------------------------------------------------------------------ */
+
+/*
+ . This is called by register_netdev(). It is responsible for
+ . checking the portlist for the SMC9000 series chipset. If it finds
+ . one, then it will initialize the device, find the hardware information,
+ . and sets up the appropriate device parameters.
+ . NOTE: Interrupts are *OFF* when this procedure is called.
+ .
+ . NB:This shouldn't be static since it is referred to externally.
+*/
+int smc_init(struct device *dev);
+
+/*
+ . The kernel calls this function when someone wants to use the device,
+ . typically 'ifconfig ethX up'.
+*/
+static int smc_open(struct device *dev);
+
+/*
+ . This is called by the kernel to send a packet out into the net. it's
+ . responsible for doing a best-effort send, but if it's simply not possible
+ . to send it, the packet gets dropped.
+*/
+static int smc_send_packet(struct sk_buff *skb, struct device *dev);
+
+/*
+ . This is called by the kernel in response to 'ifconfig ethX down'. It
+ . is responsible for cleaning up everything that the open routine
+ . does, and maybe putting the card into a powerdown state.
+*/
+static int smc_close(struct device *dev);
+
+/*
+ . This routine allows the proc file system to query the driver's
+ . statistics.
+*/
+static struct enet_statistics * smc_query_statistics( struct device *dev);
+
+/*
+ . Finally, a call to set promiscuous mode ( for TCPDUMP and related
+ . programs ) and multicast modes.
+*/
+#ifdef SUPPORT_OLD_KERNEL
+static void smc_set_multicast_list(struct device *dev, int num_addrs,
+ void *addrs);
+#else
+static void smc_set_multicast_list(struct device *dev);
+#endif
+
+/*---------------------------------------------------------------
+ .
+ . Interrupt level calls..
+ .
+ ----------------------------------------------------------------*/
+
+/*
+ . Handles the actual interrupt
+*/
+#ifdef REALLY_NEW_KERNEL
+static void smc_interrupt(int irq, void *, struct pt_regs *regs);
+#else
+static void smc_interrupt(int irq, struct pt_regs *regs);
+#endif
+/*
+ . This is a separate procedure to handle the receipt of a packet, to
+ . leave the interrupt code looking slightly cleaner
+*/
+inline static void smc_rcv( struct device *dev );
+/*
+ . This handles a TX interrupt, which is only called when an error
+ . relating to a packet is sent.
+*/
+inline static void smc_tx( struct device * dev );
+
+/*
+ ------------------------------------------------------------
+ .
+ . Internal routines
+ .
+ ------------------------------------------------------------
+*/
+
+/*
+ . Test if a given location contains a chip, trying to cause as
+ . little damage as possible if it's not a SMC chip.
+*/
+static int smc_probe( int ioaddr );
+
+/*
+ . this routine initializes the cards hardware, prints out the configuration
+ . to the system log as well as the vanity message, and handles the setup
+ . of a device parameter.
+ . It will give an error if it can't initialize the card.
+*/
+static int smc_initcard( struct device *, int ioaddr );
+
+/*
+ . A rather simple routine to print out a packet for debugging purposes.
+*/
+#if SMC_DEBUG > 2
+static void print_packet( byte *, int );
+#endif
+
+#define tx_done(dev) 1
+
+/* this is called to actually send the packet to the chip */
+static void smc_hardware_send_packet( struct device * dev );
+
+/* Since I am not sure if I will have enough room in the chip's ram
+ . to store the packet, I call this routine, which either sends it
+ . now, or generates an interrupt when the card is ready for the
+ . packet */
+static int smc_wait_to_send_packet( struct sk_buff * skb, struct device *dev );
+
+/* this does a soft reset on the device */
+static void smc_reset( int ioaddr );
+
+/* Enable Interrupts, Receive, and Transmit */
+static void smc_enable( int ioaddr );
+
+/* this puts the device in an inactive state */
+static void smc_shutdown( int ioaddr );
+
+#ifndef NO_AUTOPROBE
+/* This routine will find the IRQ of the driver if one is not
+ . specified in the input to the device. */
+static int smc_findirq( int ioaddr );
+#endif
+
+/*
+ this routine will set the hardware multicast table to the specified
+ values given it by the higher level routines
+*/
+#ifndef SUPPORT_OLD_KERNEL
+static void smc_setmulticast( int ioaddr, int count, struct dev_mc_list * );
+static int crc32( char *, int );
+#endif
+
+#ifdef SUPPORT_OLD_KERNEL
+extern struct device *init_etherdev(struct device *dev, int sizeof_private,
+ unsigned long *mem_startp );
+#endif
+
+/*
+ . Function: smc_reset( int ioaddr )
+ . Purpose:
+ . This sets the SMC91xx chip to its normal state, hopefully from whatever
+ . mess that any other DOS driver has put it in.
+ .
+ . Maybe I should reset more registers to defaults in here? SOFTRESET should
+ . do that for me.
+ .
+ . Method:
+ . 1. send a SOFT RESET
+ . 2. wait for it to finish
+ . 3. enable autorelease mode
+ . 4. reset the memory management unit
+ . 5. clear all interrupts
+ .
+*/
+static void smc_reset( int ioaddr )
+{
+ /* This resets the registers mostly to defaults, but doesn't
+ affect EEPROM. That seems unnecessary */
+ SMC_SELECT_BANK( 0 );
+ outw( RCR_SOFTRESET, ioaddr + RCR );
+
+ /* this should pause enough for the chip to be happy */
+ SMC_DELAY( );
+
+ /* Set the transmit and receive configuration registers to
+ default values */
+ outw( RCR_CLEAR, ioaddr + RCR );
+ outw( TCR_CLEAR, ioaddr + TCR );
+
+ /* set the control register to automatically
+ release successfully transmitted packets, to make the best
+ use out of our limited memory */
+ SMC_SELECT_BANK( 1 );
+ outw( inw( ioaddr + CONTROL ) | CTL_AUTO_RELEASE , ioaddr + CONTROL );
+
+ /* Reset the MMU */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_RESET, ioaddr + MMU_CMD );
+
+ /* Note: It doesn't seem that waiting for the MMU busy is needed here,
+ but this is a place where future chipsets _COULD_ break. Be wary
+ of issuing another MMU command right after this */
+
+ outb( 0, ioaddr + INT_MASK );
+}
+
+/*
+ . Function: smc_enable
+ . Purpose: let the chip talk to the outside work
+ . Method:
+ . 1. Enable the transmitter
+ . 2. Enable the receiver
+ . 3. Enable interrupts
+*/
+static void smc_enable( int ioaddr )
+{
+ SMC_SELECT_BANK( 0 );
+ /* see the header file for options in TCR/RCR NORMAL*/
+ outw( TCR_NORMAL, ioaddr + TCR );
+ outw( RCR_NORMAL, ioaddr + RCR );
+
+ /* now, enable interrupts */
+ SMC_SELECT_BANK( 2 );
+ outb( SMC_INTERRUPT_MASK, ioaddr + INT_MASK );
+}
+
+/*
+ . Function: smc_shutdown
+ . Purpose: closes down the SMC91xxx chip.
+ . Method:
+ . 1. zero the interrupt mask
+ . 2. clear the enable receive flag
+ . 3. clear the enable xmit flags
+ .
+ . TODO:
+ . (1) maybe utilize power down mode.
+ . Why not yet? Because while the chip will go into power down mode,
+ . the manual says that it will wake up in response to any I/O requests
+ . in the register space. Empirical results do not show this working.
+*/
+static void smc_shutdown( int ioaddr )
+{
+ /* no more interrupts for me */
+ SMC_SELECT_BANK( 2 );
+ outb( 0, ioaddr + INT_MASK );
+
+ /* and tell the card to stay away from that nasty outside world */
+ SMC_SELECT_BANK( 0 );
+ outb( RCR_CLEAR, ioaddr + RCR );
+ outb( TCR_CLEAR, ioaddr + TCR );
+#if 0
+ /* finally, shut the chip down */
+ SMC_SELECT_BANK( 1 );
+ outw( inw( ioaddr + CONTROL ), CTL_POWERDOWN, ioaddr + CONTROL );
+#endif
+}
+
+
+#ifndef SUPPORT_OLD_KERNEL
+/*
+ . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds )
+ . Purpose:
+ . This sets the internal hardware table to filter out unwanted multicast
+ . packets before they take up memory.
+ .
+ . The SMC chip uses a hash table where the high 6 bits of the CRC of
+ . address are the offset into the table. If that bit is 1, then the
+ . multicast packet is accepted. Otherwise, it's dropped silently.
+ .
+ . To use the 6 bits as an offset into the table, the high 3 bits are the
+ . number of the 8 bit register, while the low 3 bits are the bit within
+ . that register.
+ .
+ . This routine is based very heavily on the one provided by Peter Cammaert.
+*/
+
+
+static void smc_setmulticast( int ioaddr, int count, struct dev_mc_list * addrs ) {
+ int i;
+ unsigned char multicast_table[ 8 ];
+ struct dev_mc_list * cur_addr;
+ /* table for flipping the order of 3 bits */
+ unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
+
+ /* start with a table of all zeros: reject all */
+ memset( multicast_table, 0, sizeof( multicast_table ) );
+
+ cur_addr = addrs;
+ for ( i = 0; i < count ; i ++, cur_addr = cur_addr->next ) {
+ int position;
+
+ /* do we have a pointer here? */
+ if ( !cur_addr )
+ break;
+ /* make sure this is a multicast address - shouldn't this
+ be a given if we have it here ? */
+ if ( !( *cur_addr->dmi_addr & 1 ) )
+ continue;
+
+ /* only use the low order bits */
+ position = crc32( cur_addr->dmi_addr, 6 ) & 0x3f;
+
+ /* do some messy swapping to put the bit in the right spot */
+ multicast_table[invert3[position&7]] |=
+ (1<<invert3[(position>>3)&7]);
+
+ }
+ /* now, the table can be loaded into the chipset */
+ SMC_SELECT_BANK( 3 );
+
+ for ( i = 0; i < 8 ; i++ ) {
+ outb( multicast_table[i], ioaddr + MULTICAST1 + i );
+ }
+}
+
+/*
+ Finds the CRC32 of a set of bytes.
+ Again, from Peter Cammaert's code.
+*/
+static int crc32( char * s, int length ) {
+ /* indices */
+ int perByte;
+ int perBit;
+ /* crc polynomial for Ethernet */
+ const unsigned long poly = 0xedb88320;
+ /* crc value - preinitialized to all 1's */
+ unsigned long crc_value = 0xffffffff;
+
+ for ( perByte = 0; perByte < length; perByte ++ ) {
+ unsigned char c;
+
+ c = *(s++);
+ for ( perBit = 0; perBit < 8; perBit++ ) {
+ crc_value = (crc_value>>1)^
+ (((crc_value^c)&0x01)?poly:0);
+ c >>= 1;
+ }
+ }
+ return crc_value;
+}
+
+#endif
+
+
+/*
+ . Function: smc_wait_to_send_packet( struct sk_buff * skb, struct device * )
+ . Purpose:
+ . Attempt to allocate memory for a packet, if chip-memory is not
+ . available, then tell the card to generate an interrupt when it
+ . is available.
+ .
+ . Algorithm:
+ .
+ . o if the saved_skb is not currently null, then drop this packet
+ . on the floor. This should never happen, because of TBUSY.
+ . o if the saved_skb is null, then replace it with the current packet,
+ . o See if I can sending it now.
+ . o (NO): Enable interrupts and let the interrupt handler deal with it.
+ . o (YES):Send it now.
+*/
+static int smc_wait_to_send_packet( struct sk_buff * skb, struct device * dev )
+{
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ word length;
+ unsigned short numPages;
+ word time_out;
+
+ if ( lp->saved_skb) {
+ /* THIS SHOULD NEVER HAPPEN. */
+ lp->stats.tx_aborted_errors++;
+ printk(CARDNAME": Bad Craziness - sent packet while busy.\n" );
+ return 1;
+ }
+ lp->saved_skb = skb;
+
+ length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ /*
+ . the MMU wants the number of pages to be the number of 256 bytes
+ . 'pages', minus 1 ( since a packet can't ever have 0 pages :) )
+ */
+ numPages = length / 256;
+
+ if (numPages > 7 ) {
+ printk(CARDNAME": Far too big packet error. \n");
+ /* freeing the packet is a good thing here... but should
+ . any packets of this size get down here? */
+ dev_kfree_skb (skb, FREE_WRITE);
+ lp->saved_skb = NULL;
+ /* this IS an error, but, i don't want the skb saved */
+ return 0;
+ }
+ /* either way, a packet is waiting now */
+ lp->packets_waiting++;
+
+ /* now, try to allocate the memory */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_ALLOC | numPages, ioaddr + MMU_CMD );
+ /*
+ . Performance Hack
+ .
+ . wait a short amount of time.. if I can send a packet now, I send
+ . it now. Otherwise, I enable an interrupt and wait for one to be
+ . available.
+ .
+ . I could have handled this a slightly different way, by checking to
+ . see if any memory was available in the FREE MEMORY register. However,
+ . either way, I need to generate an allocation, and the allocation works
+ . no matter what, so I saw no point in checking free memory.
+ */
+ time_out = MEMORY_WAIT_TIME;
+ do {
+ word status;
+
+ status = inb( ioaddr + INTERRUPT );
+ if ( status & IM_ALLOC_INT ) {
+ /* acknowledge the interrupt */
+ outb( IM_ALLOC_INT, ioaddr + INTERRUPT );
+ break;
+ }
+ } while ( -- time_out );
+
+ if ( !time_out ) {
+ /* oh well, wait until the chip finds memory later */
+ SMC_ENABLE_INT( IM_ALLOC_INT );
+ PRINTK2((CARDNAME": memory allocation deferred. \n"));
+ /* it's deferred, but I'll handle it later */
+ return 0;
+ }
+ /* or YES! I can send the packet now.. */
+ smc_hardware_send_packet(dev);
+
+ return 0;
+}
+
+/*
+ . Function: smc_hardware_send_packet(struct device * )
+ . Purpose:
+ . This sends the actual packet to the SMC9xxx chip.
+ .
+ . Algorithm:
+ . First, see if a saved_skb is available.
+ . ( this should NOT be called if there is no 'saved_skb'
+ . Now, find the packet number that the chip allocated
+ . Point the data pointers at it in memory
+ . Set the length word in the chip's memory
+ . Dump the packet to chip memory
+ . Check if a last byte is needed ( odd length packet )
+ . if so, set the control flag right
+ . Tell the card to send it
+ . Enable the transmit interrupt, so I know if it failed
+ . Free the kernel data if I actually sent it.
+*/
+static void smc_hardware_send_packet( struct device * dev )
+{
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+ byte packet_no;
+ struct sk_buff * skb = lp->saved_skb;
+ word length;
+ unsigned short ioaddr;
+ byte * buf;
+
+ ioaddr = dev->base_addr;
+
+ if ( !skb ) {
+ PRINTK((CARDNAME": In XMIT with no packet to send \n"));
+ return;
+ }
+ length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ buf = skb->data;
+
+ /* If I get here, I _know_ there is a packet slot waiting for me */
+ packet_no = inb( ioaddr + PNR_ARR + 1 );
+ if ( packet_no & 0x80 ) {
+ /* or isn't there? BAD CHIP! */
+ printk(KERN_DEBUG CARDNAME": Memory allocation failed. \n");
+ kfree(skb);
+ lp->saved_skb = NULL;
+ dev->tbusy = 0;
+ return;
+ }
+
+ /* we have a packet address, so tell the card to use it */
+ outb( packet_no, ioaddr + PNR_ARR );
+
+ /* point to the beginning of the packet */
+ outw( PTR_AUTOINC , ioaddr + POINTER );
+
+ PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length ));
+#if SMC_DEBUG > 2
+ print_packet( buf, length );
+#endif
+
+ /* send the packet length ( +6 for status, length and ctl byte )
+ and the status word ( set to zeros ) */
+#ifdef USE_32_BIT
+ outl( (length +6 ) << 16 , ioaddr + DATA_1 );
+#else
+ outw( 0, ioaddr + DATA_1 );
+ /* send the packet length ( +6 for status words, length, and ctl*/
+ outb( (length+6) & 0xFF,ioaddr + DATA_1 );
+ outb( (length+6) >> 8 , ioaddr + DATA_1 );
+#endif
+
+ /* send the actual data
+ . I _think_ it's faster to send the longs first, and then
+ . mop up by sending the last word. It depends heavily
+ . on alignment, at least on the 486. Maybe it would be
+ . a good idea to check which is optimal? But that could take
+ . almost as much time as is saved?
+ */
+#ifdef USE_32_BIT
+ if ( length & 0x2 ) {
+ outsl(ioaddr + DATA_1, buf, length >> 2 );
+ outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
+ }
+ else
+ outsl(ioaddr + DATA_1, buf, length >> 2 );
+#else
+ outsw(ioaddr + DATA_1 , buf, (length ) >> 1);
+#endif
+ /* Send the last byte, if there is one. */
+
+ if ( (length & 1) == 0 ) {
+ outw( 0, ioaddr + DATA_1 );
+ } else {
+ outb( buf[length -1 ], ioaddr + DATA_1 );
+ outb( 0x20, ioaddr + DATA_1);
+ }
+
+ /* enable the interrupts */
+ SMC_ENABLE_INT( (IM_TX_INT | IM_TX_EMPTY_INT) );
+
+ /* and let the chipset deal with it */
+ outw( MC_ENQUEUE , ioaddr + MMU_CMD );
+
+ PRINTK2((CARDNAME": Sent packet of length %d \n",length));
+
+ lp->saved_skb = NULL;
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ dev->trans_start = jiffies;
+
+ /* we can send another packet */
+ dev->tbusy = 0;
+
+
+ return;
+}
+
+/*-------------------------------------------------------------------------
+ |
+ | smc_init( struct device * dev )
+ | Input parameters:
+ | dev->base_addr == 0, try to find all possible locations
+ | dev->base_addr == 1, return failure code
+ | dev->base_addr == 2, always allocate space, and return success
+ | dev->base_addr == <anything else> this is the address to check
+ |
+ | Output:
+ | 0 --> there is a device
+ | anything else, error
+ |
+ ---------------------------------------------------------------------------
+*/
+int smc_init(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ /* try a specific location */
+ if (base_addr > 0x1ff) {
+ int error;
+ error = smc_probe(base_addr);
+ if ( 0 == error ) {
+ return smc_initcard( dev, base_addr );
+ }
+ return error;
+ } else {
+ if ( 0 != base_addr ) {
+ return -ENXIO;
+ }
+ }
+
+ /* check every ethernet address */
+ for (i = 0; smc_portlist[i]; i++) {
+ int ioaddr = smc_portlist[i];
+
+ /* check if the area is available */
+ if (check_region( ioaddr , SMC_IO_EXTENT))
+ continue;
+
+ /* check this specific address */
+ if ( smc_probe( ioaddr ) == 0) {
+ return smc_initcard( dev, ioaddr );
+ }
+ }
+
+ /* couldn't find anything */
+ return -ENODEV;
+}
+
+#ifndef NO_AUTOPROBE
+/*----------------------------------------------------------------------
+ . smc_findirq
+ .
+ . This routine has a simple purpose -- make the SMC chip generate an
+ . interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ ------------------------------------------------------------------------
+*/
+int smc_findirq( int ioaddr )
+{
+ int timeout = 20;
+
+
+ /* I have to do a STI() here, because this is called from
+ a routine that does an CLI during this process, making it
+ rather difficult to get interrupts for auto detection */
+ sti();
+
+ autoirq_setup( 0 );
+
+ /*
+ * What I try to do here is trigger an ALLOC_INT. This is done
+ * by allocating a small chunk of memory, which will give an interrupt
+ * when done.
+ */
+
+
+ SMC_SELECT_BANK(2);
+ /* enable ALLOCation interrupts ONLY */
+ outb( IM_ALLOC_INT, ioaddr + INT_MASK );
+
+ /*
+ . Allocate 512 bytes of memory. Note that the chip was just
+ . reset so all the memory is available
+ */
+ outw( MC_ALLOC | 1, ioaddr + MMU_CMD );
+
+ /*
+ . Wait until positive that the interrupt has been generated
+ */
+ while ( timeout ) {
+ byte int_status;
+
+ int_status = inb( ioaddr + INTERRUPT );
+
+ if ( int_status & IM_ALLOC_INT )
+ break; /* got the interrupt */
+ timeout--;
+ }
+ /* there is really nothing that I can do here if timeout fails,
+ as autoirq_report will return a 0 anyway, which is what I
+ want in this case. Plus, the clean up is needed in both
+ cases. */
+
+ /* DELAY HERE!
+ On a fast machine, the status might change before the interrupt
+ is given to the processor. This means that the interrupt was
+ never detected, and autoirq_report fails to report anything.
+ This should fix autoirq_* problems.
+ */
+ SMC_DELAY();
+ SMC_DELAY();
+
+ /* and disable all interrupts again */
+ outb( 0, ioaddr + INT_MASK );
+
+ /* clear hardware interrupts again, because that's how it
+ was when I was called... */
+ cli();
+
+ /* and return what I found */
+ return autoirq_report( 0 );
+}
+#endif
+
+/*----------------------------------------------------------------------
+ . Function: smc_probe( int ioaddr )
+ .
+ . Purpose:
+ . Tests to see if a given ioaddr points to an SMC9xxx chip.
+ . Returns a 0 on success
+ .
+ . Algorithm:
+ . (1) see if the high byte of BANK_SELECT is 0x33
+ . (2) compare the ioaddr with the base register's address
+ . (3) see if I recognize the chip ID in the appropriate register
+ .
+ .---------------------------------------------------------------------
+ */
+
+static int smc_probe( int ioaddr )
+{
+ unsigned int bank;
+ word revision_register;
+ word base_address_register;
+
+ /* First, see if the high byte is 0x33 */
+ bank = inw( ioaddr + BANK_SELECT );
+ if ( (bank & 0xFF00) != 0x3300 ) {
+ return -ENODEV;
+ }
+ /* The above MIGHT indicate a device, but I need to write to further
+ test this. */
+ outw( 0x0, ioaddr + BANK_SELECT );
+ bank = inw( ioaddr + BANK_SELECT );
+ if ( (bank & 0xFF00 ) != 0x3300 ) {
+ return -ENODEV;
+ }
+ /* well, we've already written once, so hopefully another time won't
+ hurt. This time, I need to switch the bank register to bank 1,
+ so I can access the base address register */
+ SMC_SELECT_BANK(1);
+ base_address_register = inw( ioaddr + BASE );
+ if ( ioaddr != ( base_address_register >> 3 & 0x3E0 ) ) {
+ printk(CARDNAME ": IOADDR %x doesn't match configuration (%x)."
+ "Probably not a SMC chip\n",
+ ioaddr, base_address_register >> 3 & 0x3E0 );
+ /* well, the base address register didn't match. Must not have
+ been a SMC chip after all. */
+ return -ENODEV;
+ }
+
+ /* check if the revision register is something that I recognize.
+ These might need to be added to later, as future revisions
+ could be added. */
+ SMC_SELECT_BANK(3);
+ revision_register = inw( ioaddr + REVISION );
+ if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) {
+ /* I don't recognize this chip, so... */
+ printk(CARDNAME ": IO %x: Unrecognized revision register:"
+ " %x, Contact author. \n", ioaddr, revision_register );
+
+ return -ENODEV;
+ }
+
+ /* at this point I'll assume that the chip is an SMC9xxx.
+ It might be prudent to check a listing of MAC addresses
+ against the hardware address, or do some other tests. */
+ return 0;
+}
+
+/*---------------------------------------------------------------
+ . Here I do typical initialization tasks.
+ .
+ . o Initialize the structure if needed
+ . o print out my vanity message if not done so already
+ . o print out what type of hardware is detected
+ . o print out the ethernet address
+ . o find the IRQ
+ . o set up my private data
+ . o configure the dev structure with my subroutines
+ . o actually GRAB the irq.
+ . o GRAB the region
+ .-----------------------------------------------------------------
+*/
+static int smc_initcard(struct device *dev, int ioaddr)
+{
+ int i;
+
+ static unsigned version_printed = 0;
+
+ /* registers */
+ word revision_register;
+ word configuration_register;
+ word memory_info_register;
+ word memory_cfg_register;
+
+ const char * version_string;
+ const char * if_string;
+ int memory;
+
+ int irqval;
+
+ /* see if I need to initialize the ethernet card structure */
+ if (dev == NULL) {
+#ifdef SUPPORT_OLD_KERNEL
+#ifndef MODULE
+/* note: the old module interface does not support this call */
+ dev = init_etherdev( 0, sizeof( struct smc_local ), 0 );
+#endif
+#else
+ dev = init_etherdev(0, 0);
+#endif
+ if (dev == NULL)
+ return -ENOMEM;
+ }
+
+ if (version_printed++ == 0)
+ printk("%s", version);
+
+ /* fill in some of the fields */
+ dev->base_addr = ioaddr;
+
+ /*
+ . Get the MAC address ( bank 1, regs 4 - 9 )
+ */
+ SMC_SELECT_BANK( 1 );
+ for ( i = 0; i < 6; i += 2 ) {
+ word address;
+
+ address = inw( ioaddr + ADDR0 + i );
+ dev->dev_addr[ i + 1] = address >> 8;
+ dev->dev_addr[ i ] = address & 0xFF;
+ }
+
+ /* get the memory information */
+
+ SMC_SELECT_BANK( 0 );
+ memory_info_register = inw( ioaddr + MIR );
+ memory_cfg_register = inw( ioaddr + MCR );
+ memory = ( memory_cfg_register >> 9 ) & 0x7; /* multiplier */
+ memory *= 256 * ( memory_info_register & 0xFF );
+
+ /*
+ Now, I want to find out more about the chip. This is sort of
+ redundant, but it's cleaner to have it in both, rather than having
+ one VERY long probe procedure.
+ */
+ SMC_SELECT_BANK(3);
+ revision_register = inw( ioaddr + REVISION );
+ version_string = chip_ids[ ( revision_register >> 4 ) & 0xF ];
+ if ( !version_string ) {
+ /* I shouldn't get here because this call was done before.... */
+ return -ENODEV;
+ }
+
+ /* is it using AUI or 10BaseT ? */
+ if ( dev->if_port == 0 ) {
+ SMC_SELECT_BANK(1);
+ configuration_register = inw( ioaddr + CONFIG );
+ if ( configuration_register & CFG_AUI_SELECT )
+ dev->if_port = 2;
+ else
+ dev->if_port = 1;
+ }
+ if_string = interfaces[ dev->if_port - 1 ];
+
+ /* now, reset the chip, and put it into a known state */
+ smc_reset( ioaddr );
+
+ /*
+ . If dev->irq is 0, then the device has to be banged on to see
+ . what the IRQ is.
+ .
+ . This banging doesn't always detect the IRQ, for unknown reasons.
+ . a workaround is to reset the chip and try again.
+ .
+ . Interestingly, the DOS packet driver *SETS* the IRQ on the card to
+ . be what is requested on the command line. I don't do that, mostly
+ . because the card that I have uses a non-standard method of accessing
+ . the IRQs, and because this _should_ work in most configurations.
+ .
+ . Specifying an IRQ is done with the assumption that the user knows
+ . what (s)he is doing. No checking is done!!!!
+ .
+ */
+#ifndef NO_AUTOPROBE
+ if ( dev->irq < 2 ) {
+ int trials;
+
+ trials = 3;
+ while ( trials-- ) {
+ dev->irq = smc_findirq( ioaddr );
+ if ( dev->irq )
+ break;
+ /* kick the card and try again */
+ smc_reset( ioaddr );
+ }
+ }
+ if (dev->irq == 0 ) {
+ printk(CARDNAME": Couldn't autodetect your IRQ. Use irq=xx.\n");
+ return -ENODEV;
+ }
+#else
+ if (dev->irq == 0 ) {
+ printk(CARDNAME
+ ": Autoprobing IRQs is not supported for old kernels.\n");
+ return -ENODEV;
+ }
+#endif
+ if (dev->irq == 2) {
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ * or don't know which one to set.
+ */
+ dev->irq = 9;
+ }
+
+ /* now, print out the card info, in a short format.. */
+
+ printk(CARDNAME ": %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ",
+ version_string, revision_register & 0xF, ioaddr, dev->irq,
+ if_string, memory );
+ /*
+ . Print the Ethernet address
+ */
+ printk("ADDR: ");
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i] );
+ printk("%2.2x \n", dev->dev_addr[5] );
+
+
+ /* Initialize the private structure. */
+ if (dev->priv == NULL) {
+ dev->priv = kmalloc(sizeof(struct smc_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ }
+ /* set the private data to zero by default */
+ memset(dev->priv, 0, sizeof(struct smc_local));
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ /* Grab the IRQ */
+ irqval = request_irq(dev->irq, &smc_interrupt, 0, CARDNAME, NULL);
+ if (irqval) {
+ printk(CARDNAME": unable to get IRQ %d (irqval=%d).\n",
+ dev->irq, irqval);
+ return -EAGAIN;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Grab the region so that no one else tries to probe our ioports. */
+ request_region(ioaddr, SMC_IO_EXTENT, CARDNAME);
+
+ dev->open = smc_open;
+ dev->stop = smc_close;
+ dev->hard_start_xmit = smc_send_packet;
+ dev->get_stats = smc_query_statistics;
+#ifdef HAVE_MULTICAST
+ dev->set_multicast_list = &smc_set_multicast_list;
+#endif
+
+ return 0;
+}
+
+#if SMC_DEBUG > 2
+static void print_packet( byte * buf, int length )
+{
+#if 0
+ int i;
+ int remainder;
+ int lines;
+
+ printk("Packet of length %d \n", length );
+ lines = length / 16;
+ remainder = length % 16;
+
+ for ( i = 0; i < lines ; i ++ ) {
+ int cur;
+
+ for ( cur = 0; cur < 8; cur ++ ) {
+ byte a, b;
+
+ a = *(buf ++ );
+ b = *(buf ++ );
+ printk("%02x%02x ", a, b );
+ }
+ printk("\n");
+ }
+ for ( i = 0; i < remainder/2 ; i++ ) {
+ byte a, b;
+
+ a = *(buf ++ );
+ b = *(buf ++ );
+ printk("%02x%02x ", a, b );
+ }
+ printk("\n");
+#endif
+}
+#endif
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc ..
+ *
+ */
+static int smc_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ int i; /* used to set hw ethernet address */
+
+ /* clear out all the junk that was put here before... */
+ memset(dev->priv, 0, sizeof(struct smc_local));
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+#ifdef MODULE
+ MOD_INC_USE_COUNT;
+#endif
+
+ /* reset the hardware */
+
+ smc_reset( ioaddr );
+ smc_enable( ioaddr );
+
+ /* Select which interface to use */
+
+ SMC_SELECT_BANK( 1 );
+ if ( dev->if_port == 1 ) {
+ outw( inw( ioaddr + CONFIG ) & ~CFG_AUI_SELECT,
+ ioaddr + CONFIG );
+ }
+ else if ( dev->if_port == 2 ) {
+ outw( inw( ioaddr + CONFIG ) | CFG_AUI_SELECT,
+ ioaddr + CONFIG );
+ }
+
+ /*
+ According to Becker, I have to set the hardware address
+ at this point, because the (l)user can set it with an
+ ioctl. Easily done...
+ */
+ SMC_SELECT_BANK( 1 );
+ for ( i = 0; i < 6; i += 2 ) {
+ word address;
+
+ address = dev->dev_addr[ i + 1 ] << 8 ;
+ address |= dev->dev_addr[ i ];
+ outw( address, ioaddr + ADDR0 + i );
+ }
+ return 0;
+}
+
+/*--------------------------------------------------------
+ . Called by the kernel to send a packet out into the void
+ . of the net. This routine is largely based on
+ . skeleton.c, from Becker.
+ .--------------------------------------------------------
+*/
+static int smc_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n",
+ tx_done(dev) ? "IRQ conflict" :
+ "network cable problem");
+ /* "kick" the adaptor */
+ smc_reset( dev->base_addr );
+ smc_enable( dev->base_addr );
+
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ /* clear anything saved */
+ ((struct smc_local *)dev->priv)->saved_skb = NULL;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk(KERN_WARNING CARDNAME": Transmitter access conflict.\n");
+ dev_kfree_skb (skb, FREE_WRITE);
+ } else {
+ /* Well, I want to send the packet.. but I don't know
+ if I can send it right now... */
+ return smc_wait_to_send_packet( skb, dev );
+ }
+ return 0;
+}
+
+/*--------------------------------------------------------------------
+ .
+ . This is the main routine of the driver, to handle the device when
+ . it needs some attention.
+ .
+ . So:
+ . first, save state of the chipset
+ . branch off into routines to handle each case, and acknowledge
+ . each to the interrupt register
+ . and finally restore state.
+ .
+ ---------------------------------------------------------------------*/
+#ifdef REALLY_NEW_KERNEL
+static void smc_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+#else
+static void smc_interrupt(int irq, struct pt_regs * regs)
+#endif
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ int ioaddr = dev->base_addr;
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+
+ byte status;
+ word card_stats;
+ byte mask;
+ int timeout;
+ /* state registers */
+ word saved_bank;
+ word saved_pointer;
+
+
+
+ PRINTK3((CARDNAME": SMC interrupt started \n"));
+
+ if (dev == NULL) {
+ printk(KERN_WARNING CARDNAME": irq %d for unknown device.\n",
+ irq);
+ return;
+ }
+
+/* will Linux let this happen ?? If not, this costs some speed */
+ if ( dev->interrupt ) {
+ printk(KERN_WARNING CARDNAME": interrupt inside interrupt.\n");
+ return;
+ }
+
+ dev->interrupt = 1;
+
+ saved_bank = inw( ioaddr + BANK_SELECT );
+
+ SMC_SELECT_BANK(2);
+ saved_pointer = inw( ioaddr + POINTER );
+
+ mask = inb( ioaddr + INT_MASK );
+ /* clear all interrupts */
+ outb( 0, ioaddr + INT_MASK );
+
+
+ /* set a timeout value, so I don't stay here forever */
+ timeout = 4;
+
+ PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask ));
+ do {
+ /* read the status flag, and mask it */
+ status = inb( ioaddr + INTERRUPT ) & mask;
+ if (!status )
+ break;
+
+ PRINTK3((KERN_WARNING CARDNAME
+ ": Handling interrupt status %x \n", status ));
+
+ if (status & IM_RCV_INT) {
+ /* Got a packet(s). */
+ PRINTK2((KERN_WARNING CARDNAME
+ ": Receive Interrupt\n"));
+ smc_rcv(dev);
+ } else if (status & IM_TX_INT ) {
+ PRINTK2((KERN_WARNING CARDNAME
+ ": TX ERROR handled\n"));
+ smc_tx(dev);
+ outb(IM_TX_INT, ioaddr + INTERRUPT );
+ } else if (status & IM_TX_EMPTY_INT ) {
+ /* update stats */
+ SMC_SELECT_BANK( 0 );
+ card_stats = inw( ioaddr + COUNTER );
+ /* single collisions */
+ lp->stats.collisions += card_stats & 0xF;
+ card_stats >>= 4;
+ /* multiple collisions */
+ lp->stats.collisions += card_stats & 0xF;
+
+ /* these are for when linux supports these statistics */
+#if 0
+ card_stats >>= 4;
+ /* deferred */
+ card_stats >>= 4;
+ /* excess deferred */
+#endif
+ SMC_SELECT_BANK( 2 );
+ PRINTK2((KERN_WARNING CARDNAME
+ ": TX_BUFFER_EMPTY handled\n"));
+ outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT );
+ mask &= ~IM_TX_EMPTY_INT;
+ lp->stats.tx_packets += lp->packets_waiting;
+ lp->packets_waiting = 0;
+
+ } else if (status & IM_ALLOC_INT ) {
+ PRINTK2((KERN_DEBUG CARDNAME
+ ": Allocation interrupt \n"));
+ /* clear this interrupt so it doesn't happen again */
+ mask &= ~IM_ALLOC_INT;
+
+ smc_hardware_send_packet( dev );
+
+ /* enable xmit interrupts based on this */
+ mask |= ( IM_TX_EMPTY_INT | IM_TX_INT );
+
+ /* and let the card send more packets to me */
+ mark_bh( NET_BH );
+
+ PRINTK2((CARDNAME": Handoff done successfully.\n"));
+ } else if (status & IM_RX_OVRN_INT ) {
+ lp->stats.rx_errors++;
+ lp->stats.rx_fifo_errors++;
+ outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
+ } else if (status & IM_EPH_INT ) {
+ PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n"));
+ } else if (status & IM_ERCV_INT ) {
+ PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n"));
+ outb( IM_ERCV_INT, ioaddr + INTERRUPT );
+ }
+ } while ( timeout -- );
+
+
+ /* restore state register */
+ SMC_SELECT_BANK( 2 );
+ outb( mask, ioaddr + INT_MASK );
+
+ PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask ));
+ outw( saved_pointer, ioaddr + POINTER );
+
+ SMC_SELECT_BANK( saved_bank );
+
+ dev->interrupt = 0;
+ PRINTK3((CARDNAME ": Interrupt done\n"));
+ return;
+}
+
+/*-------------------------------------------------------------
+ .
+ . smc_rcv - receive a packet from the card
+ .
+ . There is ( at least ) a packet waiting to be read from
+ . chip-memory.
+ .
+ . o Read the status
+ . o If an error, record it
+ . o otherwise, read in the packet
+ --------------------------------------------------------------
+*/
+static void smc_rcv(struct device *dev)
+{
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int packet_number;
+ word status;
+ word packet_length;
+
+ /* assume bank 2 */
+
+ packet_number = inw( ioaddr + FIFO_PORTS );
+
+ if ( packet_number & FP_RXEMPTY ) {
+ /* we got called , but nothing was on the FIFO */
+ PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO. \n"));
+ /* don't need to restore anything */
+ return;
+ }
+
+ /* start reading from the start of the packet */
+ outw( PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER );
+
+ /* First two words are status and packet_length */
+ status = inw( ioaddr + DATA_1 );
+ packet_length = inw( ioaddr + DATA_1 );
+
+ packet_length &= 0x07ff; /* mask off top bits */
+
+ PRINTK2(("RCV: STATUS %4x LENGTH %4x\n", status, packet_length ));
+ /*
+ . the packet length contains 3 extra words :
+ . status, length, and a extra word with an odd byte .
+ */
+ packet_length -= 6;
+
+ if ( !(status & RS_ERRORS ) ){
+ /* do stuff to make a new packet */
+ struct sk_buff * skb;
+ byte * data;
+
+ /* read one extra byte */
+ if ( status & RS_ODDFRAME )
+ packet_length++;
+
+ /* set multicast stats */
+ if ( status & RS_MULTICAST )
+ lp->stats.multicast++;
+
+#ifdef SUPPORT_OLD_KERNEL
+ skb = alloc_skb( packet_length + 5, GFP_ATOMIC );
+#else
+ skb = dev_alloc_skb( packet_length + 5);
+#endif
+
+ if ( skb == NULL ) {
+ printk(KERN_NOTICE CARDNAME
+ ": Low memory, packet dropped.\n");
+ lp->stats.rx_dropped++;
+ }
+
+ /*
+ ! This should work without alignment, but it could be
+ ! in the worse case
+ */
+#ifndef SUPPORT_OLD_KERNEL
+ /* TODO: Should I use 32bit alignment here ? */
+ skb_reserve( skb, 2 ); /* 16 bit alignment */
+#endif
+
+ skb->dev = dev;
+#ifdef SUPPORT_OLD_KERNEL
+ skb->len = packet_length;
+ data = skb->data;
+#else
+ data = skb_put( skb, packet_length);
+#endif
+#ifdef USE_32_BIT
+ /* QUESTION: Like in the TX routine, do I want
+ to send the DWORDs or the bytes first, or some
+ mixture. A mixture might improve already slow PIO
+ performance */
+ PRINTK3((" Reading %d dwords (and %d bytes) \n",
+ packet_length >> 2, packet_length & 3 ));
+ insl(ioaddr + DATA_1 , data, packet_length >> 2 );
+ /* read the left over bytes */
+ insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC),
+ packet_length & 0x3 );
+#else
+ PRINTK3((" Reading %d words and %d byte(s) \n",
+ (packet_length >> 1 ), packet_length & 1 );
+ if ( packet_length & 1 )
+ *(data++) = inb( ioaddr + DATA_1 );
+ insw(ioaddr + DATA_1 , data, (packet_length + 1 ) >> 1);
+ if ( packet_length & 1 ) {
+ data += packet_length & ~1;
+ *((data++) = inb( ioaddr + DATA_1 );
+ }
+#endif
+#if SMC_DEBUG > 2
+ print_packet( data, packet_length );
+#endif
+
+#ifndef SUPPORT_OLD_KERNEL
+ skb->protocol = eth_type_trans(skb, dev );
+#endif
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ } else {
+ /* error ... */
+ lp->stats.rx_errors++;
+
+ if ( status & RS_ALGNERR ) lp->stats.rx_frame_errors++;
+ if ( status & (RS_TOOSHORT | RS_TOOLONG ) )
+ lp->stats.rx_length_errors++;
+ if ( status & RS_BADCRC) lp->stats.rx_crc_errors++;
+ }
+ /* error or good, tell the card to get rid of this packet */
+ outw( MC_RELEASE, ioaddr + MMU_CMD );
+
+
+ return;
+}
+
+
+/*************************************************************************
+ . smc_tx
+ .
+ . Purpose: Handle a transmit error message. This will only be called
+ . when an error, because of the AUTO_RELEASE mode.
+ .
+ . Algorithm:
+ . Save pointer and packet no
+ . Get the packet no from the top of the queue
+ . check if it's valid ( if not, is this an error??? )
+ . read the status word
+ . record the error
+ . ( resend? Not really, since we don't want old packets around )
+ . Restore saved values
+ ************************************************************************/
+static void smc_tx( struct device * dev )
+{
+ int ioaddr = dev->base_addr;
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+ byte saved_packet;
+ byte packet_no;
+ word tx_status;
+
+
+ /* assume bank 2 */
+
+ saved_packet = inb( ioaddr + PNR_ARR );
+ packet_no = inw( ioaddr + FIFO_PORTS );
+ packet_no &= 0x7F;
+
+ /* select this as the packet to read from */
+ outb( packet_no, ioaddr + PNR_ARR );
+
+ /* read the first word from this packet */
+ outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER );
+
+ tx_status = inw( ioaddr + DATA_1 );
+ PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status ));
+
+ lp->stats.tx_errors++;
+ if ( tx_status & TS_LOSTCAR ) lp->stats.tx_carrier_errors++;
+ if ( tx_status & TS_LATCOL ) {
+ printk(KERN_DEBUG CARDNAME
+ ": Late collision occurred on last xmit.\n");
+ lp->stats.tx_window_errors++;
+ }
+#if 0
+ if ( tx_status & TS_16COL ) { ... }
+#endif
+
+ if ( tx_status & TS_SUCCESS ) {
+ printk(CARDNAME": Successful packet caused interrupt \n");
+ }
+ /* re-enable transmit */
+ SMC_SELECT_BANK( 0 );
+ outw( inw( ioaddr + TCR ) | TCR_ENABLE, ioaddr + TCR );
+
+ /* kill the packet */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_FREEPKT, ioaddr + MMU_CMD );
+
+ /* one less packet waiting for me */
+ lp->packets_waiting--;
+
+ outb( saved_packet, ioaddr + PNR_ARR );
+ return;
+}
+
+/*----------------------------------------------------
+ . smc_close
+ .
+ . this makes the board clean up everything that it can
+ . and not talk to the outside world. Caused by
+ . an 'ifconfig ethX down'
+ .
+ -----------------------------------------------------*/
+static int smc_close(struct device *dev)
+{
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* clear everything */
+ smc_shutdown( dev->base_addr );
+
+ /* Update the statistics here. */
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif
+
+ return 0;
+}
+
+/*------------------------------------------------------------
+ . Get the current statistics.
+ . This may be called with the card open or closed.
+ .-------------------------------------------------------------*/
+static struct enet_statistics * smc_query_statistics(struct device *dev) {
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+/*-----------------------------------------------------------
+ . smc_set_multicast_list
+ .
+ . This routine will, depending on the values passed to it,
+ . either make it accept multicast packets, go into
+ . promiscuous mode ( for TCPDUMP and cousins ) or accept
+ . a select set of multicast packets
+*/
+#ifdef SUPPORT_OLD_KERNEL
+static void smc_set_multicast_list( struct device * dev,
+ int num_addrs, void * addrs )
+#else
+static void smc_set_multicast_list(struct device *dev)
+#endif
+{
+ short ioaddr = dev->base_addr;
+
+ SMC_SELECT_BANK(0);
+#ifdef SUPPORT_OLD_KERNEL
+ if ( num_addrs < 0 )
+#else
+ if ( dev->flags & IFF_PROMISC )
+#endif
+ outw( inw(ioaddr + RCR ) | RCR_PROMISC, ioaddr + RCR );
+
+/* BUG? I never disable promiscuous mode if multicasting was turned on.
+ Now, I turn off promiscuous mode, but I don't do anything to multicasting
+ when promiscuous mode is turned on.
+*/
+
+ /* Here, I am setting this to accept all multicast packets.
+ I don't need to zero the multicast table, because the flag is
+ checked before the table is
+ */
+#ifdef SUPPORT_OLD_KERNEL
+ else if ( num_addrs > 20 ) /* arbitrary constant */
+#else
+ else if (dev->flags & IFF_ALLMULTI)
+#endif
+ outw( inw(ioaddr + RCR ) | RCR_ALMUL, ioaddr + RCR );
+
+ /* We just get all multicast packets even if we only want them
+ . from one source. This will be changed at some future
+ . point. */
+#ifdef SUPPORT_OLD_KERNEL
+ else if (num_addrs > 0 ) {
+/* the old kernel support will not have hardware multicast support. It would
+ involve more kludges, and make the multicast setting code even worse.
+ Instead, just use the ALMUL method. This is reasonable, considering that
+ it is seldom used
+*/
+ outw( inw( ioaddr + RCR ) & ~RCR_PROMISC, ioaddr + RCR );
+ outw( inw( ioadddr + RCR ) | RCR_ALMUL, ioadddr + RCR );
+ }
+#else
+ else if (dev->mc_count ) {
+ /* support hardware multicasting */
+
+ /* be sure I get rid of flags I might have set */
+ outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
+ ioaddr + RCR );
+ /* NOTE: this has to set the bank, so make sure it is the
+ last thing called. The bank is set to zero at the top */
+ smc_setmulticast( ioaddr, dev->mc_count, dev->mc_list );
+ }
+#endif
+ else {
+ outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
+ ioaddr + RCR );
+
+ /*
+ since I'm disabling all multicast entirely, I need to
+ clear the multicast list
+ */
+ SMC_SELECT_BANK( 3 );
+ outw( 0, ioaddr + MULTICAST1 );
+ outw( 0, ioaddr + MULTICAST2 );
+ outw( 0, ioaddr + MULTICAST3 );
+ outw( 0, ioaddr + MULTICAST4 );
+ }
+}
+
+#ifdef MODULE
+
+static char devicename[9] = { 0, };
+static struct device devSMC9194 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0, /* I/O address, IRQ */
+ 0, 0, 0, NULL, smc_init };
+
+int io = 0;
+int irq = 0;
+int ifport = 0;
+
+int init_module(void)
+{
+ int result;
+
+ if (io == 0)
+ printk(KERN_WARNING
+ CARDNAME": You shouldn't use auto-probing with insmod!\n" );
+
+ /* copy the parameters from insmod into the device structure */
+ devSMC9194.base_addr = io;
+ devSMC9194.irq = irq;
+ devSMC9194.if_port = ifport;
+ if ((result = register_netdev(&devSMC9194)) != 0)
+ return result;
+
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ unregister_netdev(&devSMC9194);
+
+ free_irq(devSMC9194.irq, NULL );
+ irq2dev_map[devSMC9194.irq] = NULL;
+ release_region(devSMC9194.base_addr, SMC_IO_EXTENT);
+
+ if (devSMC9194.priv)
+ kfree_s(devSMC9194.priv, sizeof(struct smc_local));
+}
+
+#endif /* MODULE */
+
diff --git a/linux/src/drivers/net/smc9194.h b/linux/src/drivers/net/smc9194.h
new file mode 100644
index 0000000..66f8b8c
--- /dev/null
+++ b/linux/src/drivers/net/smc9194.h
@@ -0,0 +1,240 @@
+/*------------------------------------------------------------------------
+ . smc9194.h
+ . Copyright (C) 1996 by Erik Stahlman
+ .
+ . This software may be used and distributed according to the terms
+ . of the GNU Public License, incorporated herein by reference.
+ .
+ . This file contains register information and access macros for
+ . the SMC91xxx chipset.
+ .
+ . Information contained in this file was obtained from the SMC91C94
+ . manual from SMC. To get a copy, if you really want one, you can find
+ . information under www.smc.com in the components division.
+ . ( this thanks to advice from Donald Becker ).
+ .
+ . Authors
+ . Erik Stahlman ( erik@vt.edu )
+ .
+ . History
+ . 01/06/96 Erik Stahlman moved definitions here from main .c file
+ . 01/19/96 Erik Stahlman polished this up some, and added better
+ . error handling
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC9194_H_
+#define _SMC9194_H_
+
+/* I want some simple types */
+
+typedef unsigned char byte;
+typedef unsigned short word;
+typedef unsigned long int dword;
+
+
+/* Because of bank switching, the SMC91xxx uses only 16 I/O ports */
+
+#define SMC_IO_EXTENT 16
+
+
+/*---------------------------------------------------------------
+ .
+ . A description of the SMC registers is probably in order here,
+ . although for details, the SMC datasheet is invaluable.
+ .
+ . Basically, the chip has 4 banks of registers ( 0 to 3 ), which
+ . are accessed by writing a number into the BANK_SELECT register
+ . ( I also use a SMC_SELECT_BANK macro for this ).
+ .
+ . The banks are configured so that for most purposes, bank 2 is all
+ . that is needed for simple run time tasks.
+ -----------------------------------------------------------------------*/
+
+/*
+ . Bank Select Register:
+ .
+ . yyyy yyyy 0000 00xx
+ . xx = bank number
+ . yyyy yyyy = 0x33, for identification purposes.
+*/
+#define BANK_SELECT 14
+
+/* BANK 0 */
+
+#define TCR 0 /* transmit control register */
+#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */
+#define TCR_FDUPLX 0x0800 /* receive packets sent out */
+#define TCR_STP_SQET 0x1000 /* stop transmitting if Signal quality error */
+#define TCR_MON_CNS 0x0400 /* monitors the carrier status */
+#define TCR_PAD_ENABLE 0x0080 /* pads short packets to 64 bytes */
+
+#define TCR_CLEAR 0 /* do NOTHING */
+/* the normal settings for the TCR register : */
+/* QUESTION: do I want to enable padding of short packets ? */
+#define TCR_NORMAL TCR_ENABLE
+
+
+#define EPH_STATUS 2
+#define ES_LINK_OK 0x4000 /* is the link integrity ok ? */
+
+#define RCR 4
+#define RCR_SOFTRESET 0x8000 /* resets the chip */
+#define RCR_STRIP_CRC 0x200 /* strips CRC */
+#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */
+#define RCR_ALMUL 0x4 /* receive all multicast packets */
+#define RCR_PROMISC 0x2 /* enable promiscuous mode */
+
+/* the normal settings for the RCR register : */
+#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE)
+#define RCR_CLEAR 0x0 /* set it to a base state */
+
+#define COUNTER 6
+#define MIR 8
+#define MCR 10
+/* 12 is reserved */
+
+/* BANK 1 */
+#define CONFIG 0
+#define CFG_AUI_SELECT 0x100
+#define BASE 2
+#define ADDR0 4
+#define ADDR1 6
+#define ADDR2 8
+#define GENERAL 10
+#define CONTROL 12
+#define CTL_POWERDOWN 0x2000
+#define CTL_LE_ENABLE 0x80
+#define CTL_CR_ENABLE 0x40
+#define CTL_TE_ENABLE 0x0020
+#define CTL_AUTO_RELEASE 0x0800
+#define CTL_EPROM_ACCESS 0x0003 /* high if Eprom is being read */
+
+/* BANK 2 */
+#define MMU_CMD 0
+#define MC_BUSY 1 /* only readable bit in the register */
+#define MC_NOP 0
+#define MC_ALLOC 0x20 /* or with number of 256 byte packets */
+#define MC_RESET 0x40
+#define MC_REMOVE 0x60 /* remove the current rx packet */
+#define MC_RELEASE 0x80 /* remove and release the current rx packet */
+#define MC_FREEPKT 0xA0 /* Release packet in PNR register */
+#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */
+
+#define PNR_ARR 2
+#define FIFO_PORTS 4
+
+#define FP_RXEMPTY 0x8000
+#define FP_TXEMPTY 0x80
+
+#define POINTER 6
+#define PTR_READ 0x2000
+#define PTR_RCV 0x8000
+#define PTR_AUTOINC 0x4000
+#define PTR_AUTO_INC 0x0040
+
+#define DATA_1 8
+#define DATA_2 10
+#define INTERRUPT 12
+
+#define INT_MASK 13
+#define IM_RCV_INT 0x1
+#define IM_TX_INT 0x2
+#define IM_TX_EMPTY_INT 0x4
+#define IM_ALLOC_INT 0x8
+#define IM_RX_OVRN_INT 0x10
+#define IM_EPH_INT 0x20
+#define IM_ERCV_INT 0x40 /* not on SMC9192 */
+
+/* BANK 3 */
+#define MULTICAST1 0
+#define MULTICAST2 2
+#define MULTICAST3 4
+#define MULTICAST4 6
+#define MGMT 8
+#define REVISION 10 /* ( hi: chip id low: rev # ) */
+
+
+/* this is NOT on SMC9192 */
+#define ERCV 12
+
+#define CHIP_9190 3
+#define CHIP_9194 4
+#define CHIP_9195 5
+#define CHIP_91100 7
+
+static const char * chip_ids[ 15 ] = {
+ NULL, NULL, NULL,
+ /* 3 */ "SMC91C90/91C92",
+ /* 4 */ "SMC91C94",
+ /* 5 */ "SMC91C95",
+ NULL,
+ /* 7 */ "SMC91C100",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL};
+
+/*
+ . Transmit status bits
+*/
+#define TS_SUCCESS 0x0001
+#define TS_LOSTCAR 0x0400
+#define TS_LATCOL 0x0200
+#define TS_16COL 0x0010
+
+/*
+ . Receive status bits
+*/
+#define RS_ALGNERR 0x8000
+#define RS_BADCRC 0x2000
+#define RS_ODDFRAME 0x1000
+#define RS_TOOLONG 0x0800
+#define RS_TOOSHORT 0x0400
+#define RS_MULTICAST 0x0001
+#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
+
+static const char * interfaces[ 2 ] = { "TP", "AUI" };
+
+/*-------------------------------------------------------------------------
+ . I define some macros to make it easier to do somewhat common
+ . or slightly complicated, repeated tasks.
+ --------------------------------------------------------------------------*/
+
+/* select a register bank, 0 to 3 */
+
+#define SMC_SELECT_BANK(x) { outw( x, ioaddr + BANK_SELECT ); }
+
+/* define a small delay for the reset */
+#define SMC_DELAY() { inw( ioaddr + RCR );\
+ inw( ioaddr + RCR );\
+ inw( ioaddr + RCR ); }
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(x) {\
+ unsigned char mask;\
+ SMC_SELECT_BANK(2);\
+ mask = inb( ioaddr + INT_MASK );\
+ mask |= (x);\
+ outb( mask, ioaddr + INT_MASK ); \
+}
+
+/* this disables an interrupt from the interrupt mask register */
+
+#define SMC_DISABLE_INT(x) {\
+ unsigned char mask;\
+ SMC_SELECT_BANK(2);\
+ mask = inb( ioaddr + INT_MASK );\
+ mask &= ~(x);\
+ outb( mask, ioaddr + INT_MASK ); \
+}
+
+/*----------------------------------------------------------------------
+ . Define the interrupts that I want to receive from the card
+ .
+ . I want:
+ . IM_EPH_INT, for nasty errors
+ . IM_RCV_INT, for happy received packets
+ . IM_RX_OVRN_INT, because I have to kick the receiver
+ --------------------------------------------------------------------------*/
+#define SMC_INTERRUPT_MASK (IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT)
+
+#endif /* _SMC_9194_H_ */
+
diff --git a/linux/src/drivers/net/starfire.c b/linux/src/drivers/net/starfire.c
new file mode 100644
index 0000000..b8702a0
--- /dev/null
+++ b/linux/src/drivers/net/starfire.c
@@ -0,0 +1,1535 @@
+/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
+/*
+ Written/Copyright 1998-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/starfire.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"starfire.c:v1.09 7/22/2003 Copyright by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" Updates and info at http://www.scyld.com/network/starfire.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Used for tuning interrupt latency vs. overhead. */
+static int interrupt_mitigation = 0x0;
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Starfire has a 512 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' exist for driver interoperability,
+ however full_duplex[] should never be used in new configurations.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Automatically extracted configuration info:
+probe-func: starfire_probe
+config-in: tristate 'Adaptec DuraLAN ("starfire") series PCI Ethernet support' CONFIG_DURLAN
+
+c-help-name: Adaptec DuraLAN ("starfire") series PCI Ethernet support
+c-help-symbol: CONFIG_DURALAN
+c-help: This driver is for the Adaptec DuraLAN series, the 6915, 62022
+c-help: and 62044 boards.
+c-help: Design information, usage details and updates are available from
+c-help: http://www.scyld.com/network/starfire.html
+*/
+
+/* Operational parameters that are set at compile time. */
+
+/* The "native" ring sizes are either 256 or 2048.
+ However in some modes a descriptor may be marked to wrap the ring earlier.
+ The driver allocates a single page for each descriptor ring, constraining
+ the maximum size in an architecture-dependent way.
+*/
+#define RX_RING_SIZE 256
+#define TX_RING_SIZE 32
+/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
+#define DONE_Q_SIZE 1024
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability.
+ Compatibility defines are in kern_compat.h */
+
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(debug, "Driver message enable level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to set forced full duplex (deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Adaptec 6915 DuraLAN "Starfire" 64 bit PCI Ethernet
+adapter, and the multiport boards using the same chip.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
+ring sizes are set fixed by the hardware, but may optionally be wrapped
+earlier by the END bit in the descriptor.
+This driver uses that hardware queue size for the Rx ring, where a large
+number of entries has no ill effect beyond increases the potential backlog.
+The Tx ring is wrapped with the END bit, since a large hardware Tx queue
+disables the queue layer priority ordering and we have no mechanism to
+utilize the hardware two-level priority queue. When modifying the
+RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
+levels.
+
+IIIb/c. Transmit/Receive Structure
+
+See the Adaptec manual for the many possible structures, and options for
+each structure. There are far too many to document here.
+
+For transmit this driver uses type 1 transmit descriptors, and relies on
+automatic minimum-length padding. It does not use the completion queue
+consumer index, but instead checks for non-zero status entries.
+
+For receive this driver uses type 0 receive descriptors. The driver
+allocates full frame size skbuffs for the Rx ring buffers, so all frames
+should fit in a single descriptor. The driver does not use the completion
+queue consumer index, but instead checks for non-zero status entries.
+
+When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
+is allocated and the frame is copied to the new skbuff. When the incoming
+frame is larger, the skbuff is passed directly up the protocol stack.
+Buffers consumed this way are replaced by newly allocated skbuffs in a later
+phase of receive.
+
+A notable aspect of operation is that unaligned buffers are not permitted by
+the Starfire hardware. The IP header at offset 14 in an ethernet frame thus
+isn't longword aligned, which may cause problems on some machine
+e.g. Alphas. Copied frames are put into the skbuff at an offset of "+2",
+16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+The Adaptec Starfire manuals, available only from Adaptec.
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+IVc. Errata
+
+*/
+
+
+
+static void *starfire_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int starfire_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags {CanHaveMII=1, };
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0)
+/* And maps in 0.5MB(!) -- no I/O mapping here! */
+#define MEM_ADDR_SZ 0x80000
+
+#if 0 && (defined(__x86_64) || defined(__alpha__))
+/* Enable 64 bit address modes. */
+#define STARFIRE_ADDR_64BITS 1
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Adaptec Starfire 6915", { 0x69159004, 0xffffffff, },
+ PCI_IOTYPE, MEM_ADDR_SZ, CanHaveMII},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info starfire_drv_id = {
+ "starfire", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ starfire_probe1, starfire_pwr_event };
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum register_offsets {
+ PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
+ IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
+ MIICtrl=0x52000, StationAddr=0x50120, EEPROMCtrl=0x51000,
+ TxDescCtrl=0x50090,
+ TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
+ TxRingHiAddr=0x5009C, /* 64 bit address extension. */
+ TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
+ TxThreshold=0x500B0,
+ CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
+ RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
+ CompletionQConsumerIdx=0x500C4,
+ RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
+ RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
+ TxMode=0x55000,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrNormalSummary=0x8000, IntrAbnormalSummary=0x02000000,
+ IntrRxDone=0x0300, IntrRxEmpty=0x10040, IntrRxPCIErr=0x80000,
+ IntrTxDone=0x4000, IntrTxEmpty=0x1000, IntrTxPCIErr=0x80000,
+ StatsMax=0x08000000, LinkChange=0xf0000000,
+ IntrTxDataLow=0x00040000,
+ IntrPCIPin=0x01,
+};
+
+/* Bits in the RxFilterMode register. */
+enum rx_mode_bits {
+ AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
+ AcceptMulticast=0x10, AcceptMyPhys=0xE040,
+};
+
+/* Misc. bits. Symbolic names so that may be searched for. */
+enum misc_bits {
+ ChipResetCmd=1, /* PCIDeviceConfig */
+ PCIIntEnb=0x00800000, /* PCIDeviceConfig */
+ TxEnable=0x0A, RxEnable=0x05, SoftIntr=0x100, /* GenCtrl */
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct starfire_rx_desc {
+ u32 rxaddr; /* Optionally 64 bits. */
+#if defined(STARFIRE_ADDR_64BITS)
+ u32 rxaddr_hi; /* Optionally 64 bits. */
+#endif
+};
+enum rx_desc_bits {
+ RxDescValid=1, RxDescEndRing=2,
+};
+
+/* Completion queue entry.
+ You must update the page allocation, init_ring and the shift count in rx()
+ if using a larger format. */
+struct rx_done_desc {
+ u32 status; /* Low 16 bits is length. */
+#ifdef full_rx_status
+ u32 status2;
+ u16 vlanid;
+ u16 csum; /* partial checksum */
+ u32 timestamp;
+#endif
+};
+enum rx_done_bits {
+ RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
+};
+
+/* Type 1 Tx descriptor. */
+struct starfire_tx_desc {
+ u32 status; /* Upper bits are status, lower 16 length. */
+ u32 addr;
+};
+enum tx_desc_bits {
+ TxDescID=0xB1010000, /* Also marks single fragment, add CRC. */
+ TxDescIntr=0x08000000, TxRingWrap=0x04000000,
+};
+struct tx_done_report {
+ u32 status; /* timestamp, index. */
+#if 0
+ u32 intrstatus; /* interrupt status */
+#endif
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct starfire_rx_desc *rx_ring;
+ struct starfire_tx_desc *tx_ring;
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of rx/tx-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ u8 pad0[100]; /* Impact padding */
+ /* Pointers to completion queues (full pages). Cache line pad.. */
+ struct rx_done_desc *rx_done_q __attribute__((aligned (L1_CACHE_BYTES)));
+ unsigned int rx_done;
+ struct tx_done_report *tx_done_q __attribute__((aligned (L1_CACHE_BYTES)));
+ unsigned int tx_done;
+
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int max_interrupt_work;
+ int intr_enable;
+ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
+ unsigned int polling:1; /* Erk, IRQ err. */
+
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ /* These values keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1, /* Full-duplex operation requested. */
+ medialock:1, /* Xcvr set to fixed speed/duplex. */
+ rx_flowctrl:1,
+ tx_flowctrl:1; /* Use 802.3x flow control. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ u32 tx_mode;
+ u8 tx_threshold;
+ u32 cur_rx_mode;
+ u16 mc_filter[32];
+ int multicast_filter_limit;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value);
+static int netdev_open(struct net_device *dev);
+static int change_mtu(struct net_device *dev, int new_mtu);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int starfire_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&starfire_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *starfire_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ /* Serial EEPROM reads are hidden by the hardware. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20-i);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(ChipResetCmd, ioaddr + PCIDeviceConfig);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->medialock = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+ dev->change_mtu = &change_mtu;
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+
+ /* Force the media type after detecting the transceiver. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ return dev;
+}
+
+
+/* Read the MII Management Data I/O (MDIO) interfaces. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
+ int result, boguscnt=1000;
+ /* ??? Should we add a busy-wait here? */
+ do
+ result = readl(mdio_addr);
+ while ((result & 0xC0000000) != 0x80000000 && --boguscnt >= 0);
+ return result & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
+ writel(value, mdio_addr);
+ /* The busy-wait will occur before a read. */
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ /* We have no reports that indicate we need to reset the chip.
+ But to be on the safe side... */
+ /* Disable the Rx and Tx, and reset the chip. */
+ writel(0, ioaddr + GenCtrl);
+ writel(ChipResetCmd, ioaddr + PCIDeviceConfig);
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+ /* Allocate the various queues, failing gracefully. */
+ if (np->tx_done_q == 0)
+ np->tx_done_q = (struct tx_done_report *)get_free_page(GFP_KERNEL);
+ if (np->rx_done_q == 0)
+ np->rx_done_q = (struct rx_done_desc *)get_free_page(GFP_KERNEL);
+ if (np->tx_ring == 0)
+ np->tx_ring = (struct starfire_tx_desc *)get_free_page(GFP_KERNEL);
+ if (np->rx_ring == 0)
+ np->rx_ring = (struct starfire_rx_desc *)get_free_page(GFP_KERNEL);
+ if (np->tx_done_q == 0 || np->rx_done_q == 0
+ || np->rx_ring == 0 || np->tx_ring == 0) {
+ /* Retain the pages to increase our chances next time. */
+ MOD_DEC_USE_COUNT;
+ return -ENOMEM;
+ }
+
+ init_ring(dev);
+ /* Set the size of the Rx buffers. */
+ writel((np->rx_buf_sz<<16) | 0xA000, ioaddr + RxDescQCtrl);
+
+ /* Set Tx descriptor to type 1 and padding to 0 bytes. */
+ writel(0x02000401, ioaddr + TxDescCtrl);
+
+#if defined(STARFIRE_ADDR_64BITS)
+ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxDescQHiAddr);
+ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingHiAddr);
+#else
+ writel(0, ioaddr + RxDescQHiAddr);
+ writel(0, ioaddr + TxRingHiAddr);
+ writel(0, ioaddr + CompletionHiAddr);
+#endif
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxDescQAddr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ writel(virt_to_bus(np->tx_done_q), ioaddr + TxCompletionAddr);
+ writel(virt_to_bus(np->rx_done_q), ioaddr + RxCompletionAddr);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
+
+ /* Fill both the unused Tx SA register and the Rx perfect filter. */
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + 5-i);
+ for (i = 0; i < 16; i++) {
+ u16 *eaddrs = (u16 *)dev->dev_addr;
+ long setup_frm = ioaddr + 0x56000 + i*16;
+ writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
+ writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
+ writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
+ }
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+ np->tx_mode = 0; /* Initialized when TxMode set. */
+ np->tx_threshold = 4;
+ writel(np->tx_threshold, ioaddr + TxThreshold);
+ writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
+ set_rx_mode(dev);
+
+ np->advertising = mdio_read(dev, np->phys[0], 4);
+ check_duplex(dev);
+ netif_start_tx_queue(dev);
+
+ /* Set the interrupt mask and enable PCI interrupts. */
+ np->intr_enable = IntrRxDone | IntrRxEmpty | IntrRxPCIErr |
+ IntrTxDone | IntrTxEmpty | IntrTxPCIErr |
+ StatsMax | LinkChange | IntrNormalSummary | IntrAbnormalSummary
+ | 0x0010;
+ writel(np->intr_enable, ioaddr + IntrEnable);
+ writel(PCIIntEnb | readl(ioaddr + PCIDeviceConfig),
+ ioaddr + PCIDeviceConfig);
+
+ /* Enable the Rx and Tx units. */
+ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n",
+ dev->name);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+/* The starfire can handle frame sizes up to 64KB, but we arbitrarily
+ * limit the size.
+ */
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 17268))
+ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int new_tx_mode;
+
+ new_tx_mode = 0x0C04 | (np->tx_flowctrl ? 0x0800:0)
+ | (np->rx_flowctrl ? 0x0400:0);
+ if (np->medialock) {
+ if (np->full_duplex)
+ new_tx_mode |= 2;
+ } else {
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
+ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (duplex)
+ new_tx_mode |= 2;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
+ " negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ }
+ }
+ if (new_tx_mode != np->tx_mode) {
+ np->tx_mode = new_tx_mode;
+ writel(np->tx_mode | 0x8000, ioaddr + TxMode);
+ writel(np->tx_mode, ioaddr + TxMode);
+ }
+}
+
+/* Check for duplex changes, but mostly check for failures. */
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int status = readl(ioaddr + IntrStatus);
+ static long last_msg = 0;
+
+ /* Normally we check only every few seconds. */
+ np->timer.expires = jiffies + 60*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
+ dev->name, status);
+ }
+
+ /* Check for a missing chip or failed interrupt line.
+ * The latter may be falsely triggered, so we check twice. */
+ if (status == 0xffffffff) {
+ if (jiffies - last_msg > 10*HZ) {
+ last_msg = jiffies;
+ printk(KERN_ERR "%s: The Starfire chip is missing!\n",
+ dev->name);
+ }
+ } else if (np->polling) {
+ if (status & IntrPCIPin) {
+ intr_handler(dev->irq, dev, 0);
+ if (jiffies - last_msg > 10*HZ) {
+ printk(KERN_ERR "%s: IRQ %d is still blocked!\n",
+ dev->name, dev->irq);
+ last_msg = jiffies;
+ }
+ } else if (jiffies - last_msg > 10*HZ)
+ np->polling = 0;
+ np->timer.expires = jiffies + 2;
+ } else if (status & IntrPCIPin) {
+ int new_status = readl(ioaddr + IntrStatus);
+ /* Bogus hardware IRQ mapping: Fake an interrupt handler call. */
+ if (new_status & IntrPCIPin) {
+ printk(KERN_ERR "%s: IRQ %d is not raising an interrupt! "
+ "Status %8.8x/%8.8x. \n",
+ dev->name, dev->irq, status, new_status);
+ intr_handler(dev->irq, dev, 0);
+ np->timer.expires = jiffies + 2;
+ np->polling = 1;
+ }
+ } else if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ /* This will not catch tbusy incorrectly set when the queue is empty,
+ * but that state should never occur. */
+ tx_timeout(dev);
+ }
+
+ check_duplex(dev);
+
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__)
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", np->tx_ring[i].status);
+ printk("\n" KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].rxaddr);
+ printk("\n");
+ }
+#endif
+
+ /* If a specific problem is reported, reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+ writel(0, ioaddr + GenCtrl);
+ /* Enable the Rx and Tx units. */
+ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ :
+ (dev->mtu + 14 + 3) & ~3); /* Round to word. */
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ /* Grrr, we cannot offset to correctly align the IP header. */
+ np->rx_ring[i].rxaddr =
+ virt_to_le32desc(skb->tail) | cpu_to_le32(RxDescValid);
+ }
+ writew(i - 1, dev->base_addr + RxDescQIdx);
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* Clear the remainder of the Rx buffer ring. */
+ for ( ; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rxaddr = 0;
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing);
+
+ /* Clear the completion rings. */
+ for (i = 0; i < DONE_Q_SIZE; i++) {
+ np->rx_done_q[i].status = 0;
+ np->tx_done_q[i].status = 0;
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].status = 0;
+ }
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the field
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
+ /* Add "| TxDescIntr" to generate Tx-done interrupts. */
+ np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID);
+#if 1
+ if (entry >= TX_RING_SIZE-1) { /* Wrap ring */
+ np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);
+ entry = -1;
+ }
+#endif
+
+ /* On some architectures better performance results by explicitly
+ flushing cache lines: pci_flush_virt(skb->data, skb->len); */
+
+ np->cur_tx++;
+ /* Update the producer index. */
+ writel(++entry, dev->base_addr + TxProducerIdx);
+
+ /* cf. using TX_QUEUE_LEN instead of TX_RING_SIZE here. */
+ if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) {
+ np->tx_full = 1;
+ /* Check for the rare case of a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_RING_SIZE - 2) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Tx frame #%d slot %d %8.8x %8.8x.\n",
+ dev->name, np->cur_tx, entry,
+ np->tx_ring[entry].status, np->tx_ring[entry].addr);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int boguscnt;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+ "device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ boguscnt = np->max_interrupt_work;
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrClear);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0 || intr_status == 0xffffffff)
+ break;
+
+ if (intr_status & IntrRxDone)
+ netdev_rx(dev);
+
+ /* Scavenge the skbuff list based on the Tx-done queue.
+ There are redundant checks here that may be cleaned up
+ after the driver has proven to be reliable. */
+ {
+ int consumer = readl(ioaddr + TxConsumerIdx);
+ int tx_status;
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
+ dev->name, consumer);
+#if 0
+ if (np->tx_done >= 250 || np->tx_done == 0)
+ printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, "
+ "%d is %8.8x.\n", dev->name,
+ np->tx_done, np->tx_done_q[np->tx_done].status,
+ (np->tx_done+1) & (DONE_Q_SIZE-1),
+ np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status);
+#endif
+ while ((tx_status = cpu_to_le32(np->tx_done_q[np->tx_done].status))
+ != 0) {
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
+ dev->name, np->tx_done, tx_status);
+ if ((tx_status & 0xe0000000) == 0xa0000000) {
+ np->stats.tx_packets++;
+ } else if ((tx_status & 0xe0000000) == 0x80000000) {
+ u16 entry = tx_status; /* Implicit truncate */
+ entry >>= 3;
+ /* Scavenge the descriptor. */
+ if (np->tx_skbuff[entry]) {
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ } else
+ printk(KERN_WARNING "%s: Null skbuff at entry %d!!!\n",
+ dev->name, entry);
+ np->tx_skbuff[entry] = 0;
+ np->dirty_tx++;
+ }
+ np->tx_done_q[np->tx_done].status = 0;
+ np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);
+ }
+ writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
+ }
+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & IntrAbnormalSummary)
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ writel(0x0021, ioaddr + IntrTimerCtrl);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+ u32 desc_status;
+
+ if (np->rx_done_q == 0) {
+ printk(KERN_ERR "%s: rx_done_q is NULL! rx_done is %d. %p.\n",
+ dev->name, np->rx_done, np->tx_done_q);
+ return 0;
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status of %d was %8.8x.\n",
+ np->rx_done, desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & RxOK)) {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & RxFIFOErr)
+ np->stats.rx_fifo_errors++;
+ } else {
+ struct sk_buff *skb;
+ u16 pkt_len = desc_status; /* Implicitly Truncate */
+ int entry = (desc_status >> 16) & 0x7ff;
+
+#ifndef final_version
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ ", bogus_cnt %d.\n",
+ pkt_len, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+#ifndef final_version /* Remove after testing. */
+ if (le32desc_to_virt(np->rx_ring[entry].rxaddr & ~3) != temp)
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in netdev_rx: %p vs. %p / %p.\n",
+ dev->name,
+ le32desc_to_virt(np->rx_ring[entry].rxaddr),
+ skb->head, temp);
+#endif
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+#ifdef full_rx_status
+ if (np->rx_done_q[np->rx_done].status2 & cpu_to_le32(0x01000000))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+#endif
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+ }
+ np->cur_rx++;
+ np->rx_done_q[np->rx_done].status = 0;
+ np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1);
+ }
+ writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx);
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ int entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].rxaddr =
+ virt_to_le32desc(skb->tail) | cpu_to_le32(RxDescValid);
+ }
+ if (entry == RX_RING_SIZE - 1)
+ np->rx_ring[entry].rxaddr |= cpu_to_le32(RxDescEndRing);
+ /* We could defer this until later... */
+ writew(entry, dev->base_addr + RxDescQIdx);
+ }
+
+ if ((np->msg_level & NETIF_MSG_RX_STATUS)
+ || memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1))
+ printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x %d.\n",
+ np->rx_done, desc_status,
+ memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1));
+
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ if (intr_status & LinkChange) {
+ int phy_num = np->phys[0];
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+ " %4.4x partner %4.4x.\n", dev->name,
+ mdio_read(dev, phy_num, 4),
+ mdio_read(dev, phy_num, 5));
+ /* Clear sticky bit. */
+ mdio_read(dev, phy_num, 1);
+ /* If link beat has returned... */
+ if (mdio_read(dev, phy_num, 1) & 0x0004)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ check_duplex(dev);
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ /* Came close to underrunning the Tx FIFO, increase threshold. */
+ if (intr_status & IntrTxDataLow)
+ writel(++np->tx_threshold, dev->base_addr + TxThreshold);
+ /* Ingore expected normal events, and handled abnormal events. */
+ if ((intr_status &
+ ~(IntrAbnormalSummary|LinkChange|StatsMax|IntrTxDataLow| 0xFF01))
+ && (np->msg_level & NETIF_MSG_DRV))
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrTxPCIErr)
+ np->stats.tx_fifo_errors++;
+ if (intr_status & IntrRxPCIErr)
+ np->stats.rx_fifo_errors++;
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ /* This adapter architecture needs no SMP locks. */
+#if LINUX_VERSION_CODE > 0x20119
+ np->stats.tx_bytes = readl(ioaddr + 0x57010);
+ np->stats.rx_bytes = readl(ioaddr + 0x57044);
+#endif
+ np->stats.tx_packets = readl(ioaddr + 0x57000);
+ np->stats.tx_aborted_errors =
+ readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
+ np->stats.tx_window_errors = readl(ioaddr + 0x57018);
+ np->stats.collisions = readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
+
+ /* The chip only need report frame silently dropped. */
+ np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
+ writew(0, ioaddr + RxDMAStatus);
+ np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
+ np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
+ np->stats.rx_length_errors = readl(ioaddr + 0x57058);
+ np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
+
+ return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+ A big-endian version is also available.
+ This is slow but compact code. Do not use this routine for bulk data,
+ use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c.
+ Chips may use the upper or lower CRC bits, and may reverse and/or invert
+ them. Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = ~0; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 rx_mode;
+ struct dev_mc_list *mclist;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptAll|AcceptMyPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys;
+ } else if (dev->mc_count <= 15) {
+ /* Use the 16 element perfect filter. */
+ long filter_addr = ioaddr + 0x56000 + 1*16;
+ for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count;
+ i++, mclist = mclist->next) {
+ u16 *eaddrs = (u16 *)mclist->dmi_addr;
+ writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
+ writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
+ writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
+ }
+ while (i++ < 16) {
+ writew(0xffff, filter_addr); filter_addr += 4;
+ writew(0xffff, filter_addr); filter_addr += 4;
+ writew(0xffff, filter_addr); filter_addr += 8;
+ }
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ } else {
+ /* Must use a multicast hash table. */
+ long filter_addr;
+ u16 mc_filter[32]; /* Multicast hash filter */
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23, mc_filter);
+ }
+ /* Clear the perfect filter list. */
+ filter_addr = ioaddr + 0x56000 + 1*16;
+ for (i = 1; i < 16; i++) {
+ writew(0xffff, filter_addr); filter_addr += 4;
+ writew(0xffff, filter_addr); filter_addr += 4;
+ writew(0xffff, filter_addr); filter_addr += 8;
+ }
+ for (filter_addr=ioaddr + 0x56100, i=0; i < 32; filter_addr+= 16, i++){
+ np->mc_filter[i] = mc_filter[i];
+ writew(mc_filter[i], filter_addr);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ }
+ writel(rx_mode, ioaddr + RxFilterMode);
+}
+
+/*
+ Handle user-level ioctl() calls.
+ We must use two numeric constants as the key because some clueless person
+ changed the value for the symbolic name.
+*/
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ check_duplex(dev);
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(0, ioaddr + GenCtrl);
+
+ del_timer(&np->timer);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
+ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x -> %8.8x.\n",
+ i, np->tx_ring[i].status, np->tx_ring[i].addr,
+ np->tx_done_q[i].status);
+ printk(KERN_DEBUG " Rx ring at %8.8x -> %p:\n",
+ (int)virt_to_bus(np->rx_ring), np->rx_done_q);
+ if (np->rx_done_q)
+ for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
+ printk(KERN_DEBUG " #%d desc. %8.8x -> %8.8x\n",
+ i, np->rx_ring[i].rxaddr, np->rx_done_q[i].status);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rxaddr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+static int starfire_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, stop Tx and Rx. */
+ writel(0x0000, ioaddr + IntrEnable);
+ writel(0, ioaddr + GenCtrl);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: we must factor start_chip() out of open(). */
+ writel(np->tx_threshold, ioaddr + TxThreshold);
+ writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
+ set_rx_mode(dev);
+ writel(np->intr_enable, ioaddr + IntrEnable);
+ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ if (pci_drv_register(&starfire_drv_id, NULL)) {
+ printk(KERN_INFO " No Starfire adapters detected, driver not loaded.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&starfire_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+ iounmap((char *)(root_net_dev->base_addr));
+ next_dev = np->next_module;
+ if (np->tx_done_q) free_page((long)np->tx_done_q);
+ if (np->rx_done_q) free_page((long)np->rx_done_q);
+ if (np->priv_addr) kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` starfire.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c starfire.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c starfire.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/sundance.c b/linux/src/drivers/net/sundance.c
new file mode 100644
index 0000000..3723164
--- /dev/null
+++ b/linux/src/drivers/net/sundance.c
@@ -0,0 +1,1556 @@
+/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
+/*
+ Written 1999-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/sundance.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"sundance.c:v1.11 2/4/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/sundance.html\n";
+/* Updated to recommendations in pci-skeleton v2.12. */
+
+/* Automatically extracted configuration info:
+probe-func: sundance_probe
+config-in: tristate 'Sundance ST201 "Alta" PCI Ethernet support' CONFIG_SUNDANCE
+c-help-name: Sundance ST201 "Alta" PCI Ethernet support
+c-help-symbol: CONFIG_SUNDANCE
+c-help: This driver is for the Sundance ST201 "Alta" and Kendin KS8723, as
+c-help: used on the D-Link DFE-550 and DFE-580.
+c-help: Design information, usage details and updates are available from
+c-help: http://www.scyld.com/network/sundance.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The sundance uses a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature.
+ This chip can receive into any byte alignment buffers, so word-oriented
+ archs do not need a copy-align of the IP header. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Ring sizes are a power of two only for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ There must be at least five Tx entries for the tx_full hysteresis, and
+ more than 31 requires modifying the Tx status handling error recovery.
+ Leave a inactive gap in the Tx ring for better cache behavior.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ Large receive rings waste memory and impact buffer accounting.
+ The driver need to protect against interrupt latency and the kernel
+ not reserving enough available memory.
+*/
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with older tranceivers, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE >= 0x20300
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= 0x20200
+#include <asm/spinlock.h>
+#endif
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to set forced full duplex (deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the Sundance Technologies "Alta" ST201 chip.
+The Kendin KS8723 is the same design with an integrated transceiver and
+new quirks.
+
+II. Board-specific settings
+
+This is an all-in-one chip, so there are no board-specific settings.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+Some chips explicitly use only 2^N sized rings, while others use a
+'next descriptor' pointer that the driver forms into rings.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+Unaligned buffers are permitted by the Sundance hardware, so
+frames are received into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+The Sundance ST201 datasheet, preliminary version.
+The Kendin KS8723 datasheet, preliminary version.
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+IVc. Errata
+
+*/
+
+
+
+/* Work-around for Kendin chip bugs. This will be reversed after tracking
+ down all of the chip access quirks in memory mode. */
+#ifndef USE_MEM_OPS
+#define USE_IO_OPS 1
+#endif
+
+static void *sundance_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int sundance_pwr_event(void *dev_instance, int event);
+
+enum chip_capability_flags {CanHaveMII=1, KendinPktDropBug=2, };
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"D-Link DFE-580TX (Kendin/Sundance ST201 Alta)",
+ {0x10021186, 0xffffffff, 0x10121186, 0xffffffff, 0x14, 0xff},
+ PCI_IOTYPE, 128, CanHaveMII|KendinPktDropBug},
+ {"D-Link DFE-580TX (Sundance ST201)",
+ {0x10021186, 0xffffffff, 0x10121186, 0xffffffff, },
+ PCI_IOTYPE, 128, CanHaveMII|KendinPktDropBug},
+ {"D-Link DFE-550FX 100baseFx (Sundance ST201)",
+ {0x10031186, 0xffffffff, },
+ PCI_IOTYPE, 128, CanHaveMII|KendinPktDropBug},
+ {"OEM Sundance Technology ST201", {0x10021186, 0xffffffff, },
+ PCI_IOTYPE, 128, CanHaveMII},
+ {"Sundance Technology Alta", {0x020113F0, 0xffffffff, },
+ PCI_IOTYPE, 128, CanHaveMII},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info sundance_drv_id = {
+ "sundance", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ sundance_probe1, sundance_pwr_event };
+
+/* This driver was written to use PCI memory space, however x86-oriented
+ hardware often uses I/O space accesses. */
+#ifdef USE_IO_OPS
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum alta_offsets {
+ DMACtrl=0x00, TxListPtr=0x04, TxDMACtrl=0x08, TxDescPoll=0x0a,
+ RxDMAStatus=0x0c, RxListPtr=0x10, RxDMACtrl=0x14, RxDescPoll=0x16,
+ LEDCtrl=0x1a, ASICCtrl=0x30,
+ EEData=0x34, EECtrl=0x36, TxThreshold=0x3c,
+ FlashAddr=0x40, FlashData=0x44, WakeEvent=0x45, TxStatus=0x46,
+ DownCounter=0x48, IntrClear=0x4a, IntrEnable=0x4c, IntrStatus=0x4e,
+ MACCtrl0=0x50, MACCtrl1=0x52, StationAddr=0x54,
+ MaxFrameSize=0x5A, RxMode=0x5c, MIICtrl=0x5e,
+ MulticastFilter0=0x60, MulticastFilter1=0x64,
+ RxOctetsLow=0x68, RxOctetsHigh=0x6a, TxOctetsLow=0x6c, TxOctetsHigh=0x6e,
+ TxFramesOK=0x70, RxFramesOK=0x72, StatsCarrierError=0x74,
+ StatsLateColl=0x75, StatsMultiColl=0x76, StatsOneColl=0x77,
+ StatsTxDefer=0x78, RxMissed=0x79, StatsTxXSDefer=0x7a, StatsTxAbort=0x7b,
+ StatsBcastTx=0x7c, StatsBcastRx=0x7d, StatsMcastTx=0x7e, StatsMcastRx=0x7f,
+ /* Aliased and bogus values! */
+ RxStatus=0x0c,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
+ IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
+ IntrDrvRqst=0x0040,
+ StatsMax=0x0080, LinkChange=0x0100,
+ IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+ AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
+ AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
+};
+/* Bits in MACCtrl. */
+enum mac_ctrl0_bits {
+ EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
+ EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
+};
+enum mac_ctrl1_bits {
+ StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
+ TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
+ RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
+};
+
+/* The Rx and Tx buffer descriptors.
+ Using only 32 bit fields simplifies software endian correction.
+ This structure must be aligned, and should avoid spanning cache lines.
+*/
+struct netdev_desc {
+ u32 next_desc;
+ u32 status;
+ struct desc_frag { u32 addr, length; } frag[1];
+};
+
+/* Bits in netdev_desc.status */
+enum desc_status_bits {
+ DescOwn=0x8000, DescEndPacket=0x4000, DescEndRing=0x2000,
+ DescTxDMADone=0x10000,
+ LastFrag=0x80000000, DescIntrOnTx=0x8000, DescIntrOnDMADone=0x80000000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc rx_ring[RX_RING_SIZE];
+ struct netdev_desc tx_ring[TX_RING_SIZE];
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ int max_interrupt_work;
+
+ /* Note: Group variables for cache line effect. */
+ struct netdev_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ spinlock_t txlock; /* Group with Tx control cache line. */
+ struct netdev_desc *last_tx; /* Last Tx descriptor used. */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+
+ /* These values keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* Multicast and receive mode. */
+ spinlock_t mcastlock; /* SMP lock multicast updates. */
+ u16 mcast_filter[4];
+ int multicast_filter_limit;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ int link_status;
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+/* The station address location in the EEPROM. */
+#define EEPROM_SA_OFFSET 0x10
+
+static int eeprom_read(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id,
+ unsigned int location);
+static void mdio_write(struct net_device *dev, int phy_id,
+ unsigned int location, int value);
+static int netdev_open(struct net_device *dev);
+static void sundance_start(struct net_device *dev);
+static int change_mtu(struct net_device *dev, int new_mtu);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int sundance_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&sundance_drv_id, dev) < 0)
+ return -ENODEV;
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *sundance_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ /* Perhaps NETIF_MSG_PROBE */
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] =
+ le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* All failure checks before this point.
+ We do a request_region() only to register /proc/ioports info. */
+#ifdef USE_IO_OPS
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+#endif
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex)
+ np->medialock = 1;
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+ dev->change_mtu = &change_mtu;
+
+ if (1) {
+ int phy, phy_idx = 0;
+ np->phys[0] = 1; /* Default setting */
+ mii_preamble_required++;
+ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ if ((mii_status & 0x0040) == 0)
+ mii_preamble_required++;
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ mii_preamble_required--;
+ np->mii_cnt = phy_idx;
+ if (phy_idx == 0)
+ printk(KERN_INFO "%s: No MII transceiver found!, ASIC status %x\n",
+ dev->name, (int)readl(ioaddr + ASICCtrl));
+ }
+
+ /* Allow forcing the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ if (np->mii_cnt)
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ /* Reset the chip to erase previous misconfiguration. */
+ if (np->msg_level & NETIF_MSG_MISC)
+ printk("ASIC Control is %x.\n", (int)readl(ioaddr + ASICCtrl));
+ writel(0x007f0000 | readl(ioaddr + ASICCtrl), ioaddr + ASICCtrl);
+ if (np->msg_level & NETIF_MSG_MISC)
+ printk("ASIC Control is now %x.\n", (int)readl(ioaddr + ASICCtrl));
+
+ return dev;
+}
+
+
+
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 8191)) /* Limited by RxDMAFrameLen */
+ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
+static int eeprom_read(long ioaddr, int location)
+{
+ int boguscnt = 2000; /* Typical 190 ticks. */
+ writew(0x0200 | (location & 0xff), ioaddr + EECtrl);
+ do {
+ if (! (readw(ioaddr + EECtrl) & 0x8000)) {
+ return readw(ioaddr + EEData);
+ }
+ } while (--boguscnt > 0);
+ return 0;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz.
+ The timing is decoupled from the processor clock by flushing the write
+ from the CPU write buffer with a following read, and using PCI
+ transaction time. */
+#define mdio_in(mdio_addr) readb(mdio_addr)
+#define mdio_out(value, mdio_addr) writeb(value, mdio_addr)
+#define mdio_delay(mdio_addr) readb(mdio_addr)
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
+};
+#define MDIO_EnbIn (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ mdio_out(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, unsigned int location)
+{
+ long mdio_addr = dev->base_addr + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_Data) ? 1 : 0);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id,
+ unsigned int location, int value)
+{
+ long mdio_addr = dev->base_addr + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ np->full_duplex = np->duplex_lock;
+ np->mcastlock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+
+ sundance_start(dev);
+ netif_start_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
+ "MAC Control %x, %4.4x %4.4x.\n",
+ dev->name, (int)readl(ioaddr + RxStatus),
+ (int)readw(ioaddr + TxStatus), (int)readl(ioaddr + MACCtrl0),
+ (int)readw(ioaddr + MACCtrl1), (int)readw(ioaddr + MACCtrl0));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void sundance_start(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* No reports have indicated that we need to reset the chip. */
+
+ writel(virt_to_bus(&np->rx_ring[np->cur_rx % RX_RING_SIZE]),
+ ioaddr + RxListPtr);
+ /* The Tx list pointer is written as packets are queued. */
+
+ /* Station address must be written as 16 bit words with the Kendin chip. */
+ for (i = 0; i < 6; i += 2)
+ writew((dev->dev_addr[i + 1] << 8) + dev->dev_addr[i],
+ ioaddr + StationAddr + i);
+
+ np->link_status = readb(ioaddr + MIICtrl) & 0xE0;
+ writew((np->full_duplex || (np->link_status & 0x20)) ? 0x120 : 0,
+ ioaddr + MACCtrl0);
+ writew(dev->mtu + 14, ioaddr + MaxFrameSize);
+ if (dev->mtu > 2047)
+ writel(readl(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
+
+ set_rx_mode(dev);
+ writew(0, ioaddr + DownCounter);
+ /* Set the chip to poll every N*320nsec. */
+ writeb(100, ioaddr + RxDescPoll);
+ writeb(127, ioaddr + TxDescPoll);
+#if 0
+ if (np->drv_flags & KendinPktDropBug)
+ writeb(0x01, ioaddr + DebugCtrl1);
+#endif
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writew(IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
+ | StatsMax | LinkChange, ioaddr + IntrEnable);
+ writew(StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
+ int duplex;
+
+ if (np->duplex_lock || mii_reg5 == 0xffff)
+ return;
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
+ "negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ writew(duplex ? 0x20 : 0, ioaddr + MACCtrl0);
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
+ "Tx %x Rx %x.\n",
+ dev->name, (int)readw(ioaddr + IntrEnable),
+ (int)readw(ioaddr + TxStatus), (int)readl(ioaddr + RxStatus));
+ }
+ /* Note: This does not catch a 0 or 1 element stuck queue. */
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ tx_timeout(dev);
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x,"
+ " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %8.8x: ", (int)np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+ printk("\n"KERN_DEBUG" Tx ring %8.8x: ", (int)np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", np->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+ writew(IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
+ | StatsMax | LinkChange, ioaddr + IntrEnable);
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ np->rx_buf_sz = dev->mtu + 20;
+ if (np->rx_buf_sz < PKT_BUF_SZ)
+ np->rx_buf_sz = PKT_BUF_SZ;
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].frag[0].length = 0;
+ np->rx_skbuff[i] = 0;
+ }
+ /* Wrap the ring. */
+ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ np->rx_ring[i].frag[0].addr = virt_to_le32desc(skb->tail);
+ np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].status = 0;
+ }
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_desc *txdesc;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+ np->tx_skbuff[entry] = skb;
+ txdesc = &np->tx_ring[entry];
+
+ txdesc->next_desc = 0;
+ /* Note: disable the interrupt generation here before releasing. */
+ txdesc->status =
+ cpu_to_le32((entry<<2) | DescIntrOnDMADone | DescIntrOnTx | 1);
+ txdesc->frag[0].addr = virt_to_le32desc(skb->data);
+ txdesc->frag[0].length = cpu_to_le32(skb->len | LastFrag);
+ if (np->last_tx)
+ np->last_tx->next_desc = virt_to_le32desc(txdesc);
+ np->last_tx = txdesc;
+ np->cur_tx++;
+
+ /* On some architectures: explicitly flush cache lines here. */
+
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_QUEUE_LEN - 2) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+
+ /* Side effect: The read wakes the potentially-idle transmit channel. */
+ if (readl(dev->base_addr + TxListPtr) == 0)
+ writel(virt_to_bus(&np->tx_ring[entry]), dev->base_addr + TxListPtr);
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d len %ld queued in slot %u.\n",
+ dev->name, np->cur_tx, skb->len, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int boguscnt;
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ boguscnt = np->max_interrupt_work;
+
+ do {
+ int intr_status = readw(ioaddr + IntrStatus);
+ if ((intr_status & ~IntrRxDone) == 0 || intr_status == 0xffff)
+ break;
+
+ writew(intr_status & (IntrRxDMADone | IntrPCIErr |
+ IntrDrvRqst |IntrTxDone|IntrTxDMADone |
+ StatsMax | LinkChange),
+ ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status & IntrRxDMADone)
+ netdev_rx(dev);
+
+ if (intr_status & IntrTxDone) {
+ int txboguscnt = 32;
+ int tx_status = readw(ioaddr + TxStatus);
+ while (tx_status & 0x80) {
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk("%s: Transmit status is %4.4x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x1e) {
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk("%s: Transmit error status %4.4x.\n",
+ dev->name, tx_status);
+ np->stats.tx_errors++;
+ if (tx_status & 0x10) np->stats.tx_fifo_errors++;
+#ifdef ETHER_STATS
+ if (tx_status & 0x08) np->stats.collisions16++;
+#else
+ if (tx_status & 0x08) np->stats.collisions++;
+#endif
+ if (tx_status & 0x04) np->stats.tx_fifo_errors++;
+ if (tx_status & 0x02) np->stats.tx_window_errors++;
+ /* This reset has not been verified!. */
+ if (tx_status & 0x10) { /* Reset the Tx. */
+ writel(0x001c0000 | readl(ioaddr + ASICCtrl),
+ ioaddr + ASICCtrl);
+#if 0 /* Do we need to reset the Tx pointer here? */
+ writel(virt_to_bus(&np->tx_ring[np->dirty_tx]),
+ dev->base_addr + TxListPtr);
+#endif
+ }
+ if (tx_status & 0x1e) /* Restart the Tx. */
+ writew(TxEnable, ioaddr + MACCtrl1);
+ }
+ /* Yup, this is a documentation bug. It cost me *hours*. */
+ writew(0, ioaddr + TxStatus);
+ if (--txboguscnt < 0)
+ break;
+ tx_status = readw(ioaddr + TxStatus);
+ }
+ }
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ if ( ! (np->tx_ring[entry].status & cpu_to_le32(DescTxDMADone)))
+ break;
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrDrvRqst | IntrPCIErr | LinkChange | StatsMax))
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ int intr_clear = readw(ioaddr + IntrClear);
+ get_stats(dev);
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x / 0x%4.4x .. 0x%4.4x.\n",
+ dev->name, intr_status, intr_clear,
+ (int)readw(ioaddr + IntrClear));
+ /* Re-enable us in 3.2msec. */
+ writew(1000, ioaddr + DownCounter);
+ writew(IntrDrvRqst, ioaddr + IntrEnable);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readw(ioaddr + IntrStatus));
+
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+ entry, np->rx_ring[entry].status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (np->rx_head_desc->status & cpu_to_le32(DescOwn)) {
+ struct netdev_desc *desc = np->rx_head_desc;
+ u32 frame_status = le32_to_cpu(desc->status);
+ int pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ frame_status);
+ if (--boguscnt < 0)
+ break;
+ if (frame_status & 0x001f4000) {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ frame_status);
+ np->stats.rx_errors++;
+ if (frame_status & 0x00100000) np->stats.rx_length_errors++;
+ if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
+ if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
+ if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
+ if (frame_status & 0x00100000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame,"
+ " status %8.8x.\n",
+ dev->name, frame_status);
+ }
+ } else {
+ struct sk_buff *skb;
+
+#ifndef final_version
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ ", bogus_cnt %d.\n",
+ pkt_len, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ np->rx_ring[entry].frag[0].addr = virt_to_le32desc(skb->tail);
+ }
+ /* Perhaps we need not reset this field. */
+ np->rx_ring[entry].frag[0].length =
+ cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[entry].status = 0;
+ }
+
+ /* No need to restart Rx engine, it will poll. */
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ if (intr_status & IntrDrvRqst) {
+ /* Stop the down counter and turn interrupts back on. */
+ printk(KERN_WARNING "%s: Turning interrupts back on.\n", dev->name);
+ writew(0, ioaddr + DownCounter);
+ writew(IntrRxDMADone | IntrPCIErr | IntrDrvRqst |
+ IntrTxDone | StatsMax | LinkChange, ioaddr + IntrEnable);
+ }
+ if (intr_status & LinkChange) {
+ int new_status = readb(ioaddr + MIICtrl) & 0xE0;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+ " %4.4x partner %4.4x.\n", dev->name,
+ mdio_read(dev, np->phys[0], 4),
+ mdio_read(dev, np->phys[0], 5));
+ if ((np->link_status ^ new_status) & 0x80) {
+ if (new_status & 0x80)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ }
+ np->link_status = new_status;
+ check_duplex(dev);
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if (intr_status & IntrPCIErr) {
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* We must do a global reset of DMA to continue. */
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ if (readw(ioaddr + StationAddr) == 0xffff)
+ return &np->stats;
+
+ /* We do not spinlock statistics.
+ A window only exists if we have non-atomic adds, the error counts
+ are typically zero, and statistics are non-critical. */
+ np->stats.rx_missed_errors += readb(ioaddr + RxMissed);
+ np->stats.tx_packets += readw(ioaddr + TxFramesOK);
+ np->stats.rx_packets += readw(ioaddr + RxFramesOK);
+ np->stats.collisions += readb(ioaddr + StatsLateColl);
+ np->stats.collisions += readb(ioaddr + StatsMultiColl);
+ np->stats.collisions += readb(ioaddr + StatsOneColl);
+ readb(ioaddr + StatsCarrierError);
+ readb(ioaddr + StatsTxDefer);
+ for (i = StatsTxXSDefer; i <= StatsMcastRx; i++)
+ readb(ioaddr + i);
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += readw(ioaddr + TxOctetsLow);
+ np->stats.tx_bytes += readw(ioaddr + TxOctetsHigh) << 16;
+ np->stats.rx_bytes += readw(ioaddr + RxOctetsLow);
+ np->stats.rx_bytes += readw(ioaddr + RxOctetsHigh) << 16;
+#else
+ readw(ioaddr + TxOctetsLow);
+ readw(ioaddr + TxOctetsHigh);
+ readw(ioaddr + RxOctetsLow);
+ readw(ioaddr + RxOctetsHigh);
+#endif
+
+ return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+ A big-endian version is also available.
+ This is slow but compact code. Do not use this routine for bulk data,
+ use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c.
+ Chips may use the upper or lower CRC bits, and may reverse and/or invert
+ them. Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = ~0; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 mc_filter[4]; /* Multicast hash filter */
+ u32 rx_mode;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, ~0, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ } else if (dev->mc_count) {
+ struct dev_mc_list *mclist;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
+ }
+ rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
+ } else {
+ writeb(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
+ return;
+ }
+ for (i = 0; i < 4; i++)
+ writew(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
+ writeb(rx_mode, ioaddr + RxMode);
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sundance_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, stop Tx and Rx. */
+ writew(0x0000, ioaddr + IntrEnable);
+ writew(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
+ break;
+ case DRV_RESUME:
+ sundance_start(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ case DRV_PWR_WakeOn:
+ writeb(readb(ioaddr + WakeEvent) | 2, ioaddr + WakeEvent);
+ /* Fall through. */
+ case DRV_PWR_DOWN:
+ case DRV_PWR_UP:
+ acpi_set_pwr_state(np->pci_dev, event==DRV_PWR_UP ? ACPI_D0:ACPI_D3);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, (int)readw(ioaddr + TxStatus),
+ (int)readl(ioaddr + RxStatus), (int)readw(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writew(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writew(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
+
+ del_timer(&np->timer);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
+ i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
+ np->tx_ring[i].frag[0].length);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
+ np->rx_ring[i].frag[0].length);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&sundance_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&sundance_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+#ifdef USE_IO_OPS
+ release_region(root_net_dev->base_addr,
+ pci_id_tbl[np->chip_id].io_size);
+#else
+ iounmap((char *)root_net_dev->base_addr);
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` sundance.o"
+ * compile-cmd1: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c sundance.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c sundance.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/tlan.c b/linux/src/drivers/net/tlan.c
new file mode 100644
index 0000000..fedc11f
--- /dev/null
+++ b/linux/src/drivers/net/tlan.c
@@ -0,0 +1,2863 @@
+/********************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.c
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ * (C) 1998 James Banks
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ ** This file is best viewed/edited with columns>=132.
+ *
+ ** Useful (if not required) reading:
+ *
+ * Texas Instruments, ThunderLAN Programmer's Guide,
+ * TI Literature Number SPWU013A
+ * available in PDF format from www.ti.com
+ * Level One, LXT901 and LXT970 Data Sheets
+ * available in PDF format from www.level1.com
+ * National Semiconductor, DP83840A Data Sheet
+ * available in PDF format from www.national.com
+ * Microchip Technology, 24C01A/02A/04A Data Sheet
+ * available in PDF format from www.microchip.com
+ *
+ ********************************************************************/
+
+
+#include <linux/module.h>
+
+#include "tlan.h"
+
+#include <linux/bios32.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+
+
+typedef u32 (TLanIntVectorFunc)( struct device *, u16 );
+
+
+#ifdef MODULE
+
+static struct device *TLanDevices = NULL;
+static int TLanDevicesInstalled = 0;
+
+#endif
+
+
+static int debug = 0;
+static int aui = 0;
+static int sa_int = 0;
+static int bbuf = 0;
+static int duplex = 0;
+static int speed = 0;
+static u8 *TLanPadBuffer;
+static char TLanSignature[] = "TLAN";
+static int TLanVersionMajor = 1;
+static int TLanVersionMinor = 0;
+
+
+static TLanAdapterEntry TLanAdapterList[] = {
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10,
+ "Compaq Netelligent 10 T PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10_100,
+ "Compaq Netelligent 10/100 TX PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETFLEX_3P_INTEGRATED,
+ "Compaq Integrated NetFlex-3/P",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETFLEX_3P,
+ "Compaq NetFlex-3/P",
+ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETFLEX_3P_BNC,
+ "Compaq NetFlex-3/P",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10_100_PROLIANT,
+ "Compaq Netelligent Integrated 10/100 TX UTP",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10_100_DUAL,
+ "Compaq Netelligent Dual 10/100 TX PCI UTP",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_DESKPRO_4000_5233MMX,
+ "Compaq Netelligent 10/100 TX Embedded UTP",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { PCI_VENDOR_ID_OLICOM,
+ PCI_DEVICE_ID_OLICOM_OC2183,
+ "Olicom OC-2183/2185",
+ TLAN_ADAPTER_USE_INTERN_10,
+ 0xF8
+ },
+ { PCI_VENDOR_ID_OLICOM,
+ PCI_DEVICE_ID_OLICOM_OC2325,
+ "Olicom OC-2325",
+ TLAN_ADAPTER_UNMANAGED_PHY,
+ 0xF8
+ },
+ { PCI_VENDOR_ID_OLICOM,
+ PCI_DEVICE_ID_OLICOM_OC2326,
+ "Olicom OC-2326",
+ TLAN_ADAPTER_USE_INTERN_10,
+ 0xF8
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
+ "Compaq Netelligent 10/100 TX UTP",
+ TLAN_ADAPTER_ACTIVITY_LED,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10_T2,
+ "Compaq Netelligent 10 T/2 PCI UTP/Coax",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { 0,
+ 0,
+ NULL,
+ 0,
+ 0
+ } /* End of List */
+};
+
+
+static int TLan_PciProbe( u8 *, u8 *, u8 *, u8 *, u32 *, u32 * );
+static int TLan_Init( struct device * );
+static int TLan_Open(struct device *dev);
+static int TLan_StartTx(struct sk_buff *, struct device *);
+static void TLan_HandleInterrupt(int, void *, struct pt_regs *);
+static int TLan_Close(struct device *);
+static struct net_device_stats *TLan_GetStats( struct device * );
+static void TLan_SetMulticastList( struct device * );
+
+static u32 TLan_HandleInvalid( struct device *, u16 );
+static u32 TLan_HandleTxEOF( struct device *, u16 );
+static u32 TLan_HandleStatOverflow( struct device *, u16 );
+static u32 TLan_HandleRxEOF( struct device *, u16 );
+static u32 TLan_HandleDummy( struct device *, u16 );
+static u32 TLan_HandleTxEOC( struct device *, u16 );
+static u32 TLan_HandleStatusCheck( struct device *, u16 );
+static u32 TLan_HandleRxEOC( struct device *, u16 );
+
+static void TLan_Timer( unsigned long );
+
+static void TLan_ResetLists( struct device * );
+static void TLan_FreeLists( struct device * );
+static void TLan_PrintDio( u16 );
+static void TLan_PrintList( TLanList *, char *, int );
+static void TLan_ReadAndClearStats( struct device *, int );
+static void TLan_ResetAdapter( struct device * );
+static void TLan_FinishReset( struct device * );
+static void TLan_SetMac( struct device *, int areg, char *mac );
+
+static void TLan_PhyPrint( struct device * );
+static void TLan_PhyDetect( struct device * );
+static void TLan_PhyPowerDown( struct device * );
+static void TLan_PhyPowerUp( struct device * );
+static void TLan_PhyReset( struct device * );
+static void TLan_PhyStartLink( struct device * );
+static void TLan_PhyFinishAutoNeg( struct device * );
+/*
+static int TLan_PhyNop( struct device * );
+static int TLan_PhyInternalCheck( struct device * );
+static int TLan_PhyInternalService( struct device * );
+static int TLan_PhyDp83840aCheck( struct device * );
+*/
+
+static int TLan_MiiReadReg( struct device *, u16, u16, u16 * );
+static void TLan_MiiSendData( u16, u32, unsigned );
+static void TLan_MiiSync( u16 );
+static void TLan_MiiWriteReg( struct device *, u16, u16, u16 );
+
+static void TLan_EeSendStart( u16 );
+static int TLan_EeSendByte( u16, u8, int );
+static void TLan_EeReceiveByte( u16, u8 *, int );
+static int TLan_EeReadByte( struct device *, u8, u8 * );
+
+
+static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
+ TLan_HandleInvalid,
+ TLan_HandleTxEOF,
+ TLan_HandleStatOverflow,
+ TLan_HandleRxEOF,
+ TLan_HandleDummy,
+ TLan_HandleTxEOC,
+ TLan_HandleStatusCheck,
+ TLan_HandleRxEOC
+};
+
+static inline void
+TLan_SetTimer( struct device *dev, u32 ticks, u32 type )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+
+ cli();
+ if ( priv->timer.function != NULL ) {
+ return;
+ }
+ priv->timer.function = &TLan_Timer;
+ sti();
+
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + ticks;
+ priv->timerSetAt = jiffies;
+ priv->timerType = type;
+ add_timer( &priv->timer );
+
+} /* TLan_SetTimer */
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Primary Functions
+
+ These functions are more or less common to all Linux network drivers.
+
+******************************************************************************
+*****************************************************************************/
+
+
+#ifdef MODULE
+
+ /***************************************************************
+ * init_module
+ *
+ * Returns:
+ * 0 if module installed ok, non-zero if not.
+ * Parms:
+ * None
+ *
+ * This function begins the setup of the driver creating a
+ * pad buffer, finding all TLAN devices (matching
+ * TLanAdapterList entries), and creating and initializing a
+ * device structure for each adapter.
+ *
+ **************************************************************/
+
+extern int init_module(void)
+{
+ TLanPrivateInfo *priv;
+ u8 bus;
+ struct device *dev;
+ size_t dev_size;
+ u8 dfn;
+ u32 index;
+ int failed;
+ int found;
+ u32 io_base;
+ u8 irq;
+ u8 rev;
+
+ printk( "TLAN driver, v%d.%d, (C) 1997-8 Caldera, Inc.\n",
+ TLanVersionMajor,
+ TLanVersionMinor
+ );
+ TLanPadBuffer = (u8 *) kmalloc( TLAN_MIN_FRAME_SIZE,
+ ( GFP_KERNEL | GFP_DMA )
+ );
+ if ( TLanPadBuffer == NULL ) {
+ printk( "TLAN: Could not allocate memory for pad buffer.\n" );
+ return -ENOMEM;
+ }
+
+ memset( TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE );
+
+ dev_size = sizeof(struct device) + sizeof(TLanPrivateInfo);
+
+ while ( ( found = TLan_PciProbe( &bus, &dfn, &irq, &rev, &io_base, &index ) ) ) {
+ dev = (struct device *) kmalloc( dev_size, GFP_KERNEL );
+ if ( dev == NULL ) {
+ printk( "TLAN: Could not allocate memory for device.\n" );
+ continue;
+ }
+ memset( dev, 0, dev_size );
+
+ dev->priv = priv = ( (void *) dev ) + sizeof(struct device);
+ dev->name = priv->devName;
+ strcpy( priv->devName, " " );
+ dev->base_addr = io_base;
+ dev->irq = irq;
+ dev->init = TLan_Init;
+
+ priv->adapter = &TLanAdapterList[index];
+ priv->adapterRev = rev;
+ priv->aui = aui;
+ if ( ( duplex != 1 ) && ( duplex != 2 ) ) {
+ duplex = 0;
+ }
+ priv->duplex = duplex;
+ if ( ( speed != 10 ) && ( speed != 100 ) ) {
+ speed = 0;
+ }
+ priv->speed = speed;
+ priv->sa_int = sa_int;
+ priv->debug = debug;
+
+ ether_setup( dev );
+
+ failed = register_netdev( dev );
+
+ if ( failed ) {
+ printk( "TLAN: Could not register device.\n" );
+ kfree( dev );
+ } else {
+ priv->nextDevice = TLanDevices;
+ TLanDevices = dev;
+ TLanDevicesInstalled++;
+ printk("TLAN: %s irq=%2d io=%04x, %s, Rev. %d\n",
+ dev->name,
+ (int) dev->irq,
+ (int) dev->base_addr,
+ priv->adapter->deviceLabel,
+ priv->adapterRev );
+ }
+ }
+
+ /* printk( "TLAN: Found %d device(s).\n", TLanDevicesInstalled ); */
+
+ return ( ( TLanDevicesInstalled >= 0 ) ? 0 : -ENODEV );
+
+} /* init_module */
+
+
+
+
+ /***************************************************************
+ * cleanup_module
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * None
+ *
+ * Goes through the TLanDevices list and frees the device
+ * structs and memory associated with each device (lists
+ * and buffers). It also ureserves the IO port regions
+ * associated with this device.
+ *
+ **************************************************************/
+
+extern void cleanup_module(void)
+{
+ struct device *dev;
+ TLanPrivateInfo *priv;
+
+ while ( TLanDevicesInstalled ) {
+ dev = TLanDevices;
+ priv = (TLanPrivateInfo *) dev->priv;
+ if ( priv->dmaStorage ) {
+ kfree( priv->dmaStorage );
+ }
+ release_region( dev->base_addr, 0x10 );
+ unregister_netdev( dev );
+ TLanDevices = priv->nextDevice;
+ kfree( dev );
+ TLanDevicesInstalled--;
+ }
+ kfree( TLanPadBuffer );
+
+} /* cleanup_module */
+
+
+#else /* MODULE */
+
+
+
+
+ /***************************************************************
+ * tlan_probe
+ *
+ * Returns:
+ * 0 on success, error code on error
+ * Parms:
+ * dev device struct to use if adapter is
+ * found.
+ *
+ * The name is lower case to fit in with all the rest of
+ * the netcard_probe names. This function looks for a/
+ * another TLan based adapter, setting it up with the
+ * provided device struct if one is found.
+ *
+ **************************************************************/
+
+extern int tlan_probe( struct device *dev )
+{
+ TLanPrivateInfo *priv;
+ static int pad_allocated = 0;
+ int found;
+ u8 bus, dfn, irq, rev;
+ u32 io_base, index;
+
+ found = TLan_PciProbe( &bus, &dfn, &irq, &rev, &io_base, &index );
+
+ if ( ! found ) {
+ return -ENODEV;
+ }
+
+ dev->priv = kmalloc( sizeof(TLanPrivateInfo), GFP_KERNEL );
+
+ if ( dev->priv == NULL ) {
+ printk( "TLAN: Could not allocate memory for device.\n" );
+ return -ENOMEM;
+ }
+
+ memset( dev->priv, 0, sizeof(TLanPrivateInfo) );
+
+ if ( ! pad_allocated ) {
+ TLanPadBuffer = (u8 *) kmalloc( TLAN_MIN_FRAME_SIZE,
+// ( GFP_KERNEL | GFP_DMA )
+ ( GFP_KERNEL )
+ );
+ if ( TLanPadBuffer == NULL ) {
+ printk( "TLAN: Could not allocate memory for padding.\n" );
+ kfree( dev->priv );
+ return -ENOMEM;
+ } else {
+ pad_allocated = 1;
+ memset( TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE );
+ }
+ }
+
+ priv = (TLanPrivateInfo *) dev->priv;
+
+ dev->name = priv->devName;
+ strcpy( priv->devName, " " );
+
+ dev = init_etherdev( dev, sizeof(TLanPrivateInfo) );
+
+ dev->base_addr = io_base;
+ dev->irq = irq;
+
+
+ priv->adapter = &TLanAdapterList[index];
+ priv->adapterRev = rev;
+ priv->aui = dev->mem_start & 0x01;
+ priv->duplex = ( ( dev->mem_start & 0x0C ) == 0x0C ) ? 0 : ( dev->mem_start & 0x0C ) >> 2;
+ priv->speed = ( ( dev->mem_start & 0x30 ) == 0x30 ) ? 0 : ( dev->mem_start & 0x30 ) >> 4;
+ if ( priv->speed == 0x1 ) {
+ priv->speed = TLAN_SPEED_10;
+ } else if ( priv->speed == 0x2 ) {
+ priv->speed = TLAN_SPEED_100;
+ }
+ priv->sa_int = dev->mem_start & 0x02;
+ priv->debug = dev->mem_end;
+
+
+ printk("TLAN %d.%d: %s irq=%2d io=%04x, %s, Rev. %d\n",
+ TLanVersionMajor,
+ TLanVersionMinor,
+ dev->name,
+ (int) irq,
+ io_base,
+ priv->adapter->deviceLabel,
+ priv->adapterRev );
+
+ TLan_Init( dev );
+
+ return 0;
+
+} /* tlan_probe */
+
+
+#endif /* MODULE */
+
+
+
+
+ /***************************************************************
+ * TLan_PciProbe
+ *
+ * Returns:
+ * 1 if another TLAN card was found, 0 if not.
+ * Parms:
+ * pci_bus The PCI bus the card was found
+ * on.
+ * pci_dfn The PCI whatever the card was
+ * found at.
+ * pci_irq The IRQ of the found adapter.
+ * pci_rev The revision of the adapter.
+ * pci_io_base The first IO port used by the
+ * adapter.
+ * dl_ix The index in the device list
+ * of the adapter.
+ *
+ * This function searches for an adapter with PCI vendor
+ * and device IDs matching those in the TLanAdapterList.
+ * The function 'remembers' the last device it found,
+ * and so finds a new device (if anymore are to be found)
+ * each time the function is called. It then looks up
+ * pertinent PCI info and returns it to the caller.
+ *
+ **************************************************************/
+
+int TLan_PciProbe( u8 *pci_bus, u8 *pci_dfn, u8 *pci_irq, u8 *pci_rev, u32 *pci_io_base, u32 *dl_ix )
+{
+ static int dl_index = 0;
+ static int pci_index = 0;
+
+ int not_found;
+ u8 pci_latency;
+ u16 pci_command;
+ int reg;
+
+
+ if ( ! pcibios_present() ) {
+ printk( "TLAN: PCI Bios not present.\n" );
+ return 0;
+ }
+
+ for (; TLanAdapterList[dl_index].vendorId != 0; dl_index++) {
+
+ not_found = pcibios_find_device(
+ TLanAdapterList[dl_index].vendorId,
+ TLanAdapterList[dl_index].deviceId,
+ pci_index,
+ pci_bus,
+ pci_dfn
+ );
+
+ if ( ! not_found ) {
+
+ TLAN_DBG(
+ TLAN_DEBUG_GNRL,
+ "TLAN: found: Vendor Id = 0x%hx, Device Id = 0x%hx\n",
+ TLanAdapterList[dl_index].vendorId,
+ TLanAdapterList[dl_index].deviceId
+ );
+
+ pcibios_read_config_byte ( *pci_bus, *pci_dfn, PCI_REVISION_ID, pci_rev);
+ pcibios_read_config_byte ( *pci_bus, *pci_dfn, PCI_INTERRUPT_LINE, pci_irq);
+ pcibios_read_config_word ( *pci_bus, *pci_dfn, PCI_COMMAND, &pci_command);
+ pcibios_read_config_dword( *pci_bus, *pci_dfn, PCI_BASE_ADDRESS_0, pci_io_base);
+ pcibios_read_config_byte ( *pci_bus, *pci_dfn, PCI_LATENCY_TIMER, &pci_latency);
+
+ if (pci_latency < 0x10) {
+ pcibios_write_config_byte( *pci_bus, *pci_dfn, PCI_LATENCY_TIMER, 0xff);
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: Setting latency timer to max.\n");
+ }
+
+ for ( reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg +=4 ) {
+ pcibios_read_config_dword( *pci_bus, *pci_dfn, reg, pci_io_base);
+ if ((pci_command & PCI_COMMAND_IO) && (*pci_io_base & 0x3)) {
+ *pci_io_base &= PCI_BASE_ADDRESS_IO_MASK;
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: IO mapping is available at %x.\n", *pci_io_base);
+ break;
+ } else {
+ *pci_io_base = 0;
+ }
+ }
+
+ if ( *pci_io_base == 0 )
+ printk("TLAN: IO mapping not available, ignoring device.\n");
+
+ if ( ! ( pci_command & PCI_COMMAND_MASTER ) ) {
+ pcibios_write_config_word ( *pci_bus, *pci_dfn, PCI_COMMAND, pci_command | PCI_COMMAND_MASTER );
+ printk( "TLAN: Activating PCI bus mastering for this device.\n" );
+ }
+
+ pci_index++;
+
+ if ( *pci_io_base ) {
+ *dl_ix = dl_index;
+ return 1;
+ }
+
+ } else {
+ pci_index = 0;
+ }
+ }
+
+ return 0;
+
+} /* TLan_PciProbe */
+
+
+
+
+ /***************************************************************
+ * TLan_Init
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev The structure of the device to be
+ * init'ed.
+ *
+ * This function completes the initialization of the
+ * device structure and driver. It reserves the IO
+ * addresses, allocates memory for the lists and bounce
+ * buffers, retrieves the MAC address from the eeprom
+ * and assignes the device's methods.
+ *
+ **************************************************************/
+
+int TLan_Init( struct device *dev )
+{
+ int dma_size;
+ int err;
+ int i;
+ TLanPrivateInfo *priv;
+
+ priv = (TLanPrivateInfo *) dev->priv;
+
+ err = check_region( dev->base_addr, 0x10 );
+ if ( err ) {
+ printk( "TLAN: %s: Io port region 0x%lx size 0x%x in use.\n",
+ dev->name,
+ dev->base_addr,
+ 0x10 );
+ return -EIO;
+ }
+ request_region( dev->base_addr, 0x10, TLanSignature );
+
+ if ( bbuf ) {
+ dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
+ * ( sizeof(TLanList) + TLAN_MAX_FRAME_SIZE );
+ } else {
+ dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
+ * ( sizeof(TLanList) );
+ }
+
+ priv->dmaStorage = kmalloc( dma_size, GFP_KERNEL | GFP_DMA );
+ if ( priv->dmaStorage == NULL ) {
+ printk( "TLAN: Could not allocate lists and buffers for %s.\n",
+ dev->name );
+ return -ENOMEM;
+ }
+ memset( priv->dmaStorage, 0, dma_size );
+ priv->rxList = (TLanList *)
+ ( ( ( (u32) priv->dmaStorage ) + 7 ) & 0xFFFFFFF8 );
+ priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
+
+ if ( bbuf ) {
+ priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS );
+ priv->txBuffer = priv->rxBuffer
+ + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
+ }
+
+ err = 0;
+ for ( i = 0; i < 6 ; i++ )
+ err |= TLan_EeReadByte( dev,
+ (u8) priv->adapter->addrOfs + i,
+ (u8 *) &dev->dev_addr[i] );
+ if ( err ) {
+ printk( "TLAN: %s: Error reading MAC from eeprom: %d\n",
+ dev->name,
+ err );
+ }
+
+ dev->addr_len = 6;
+
+ dev->open = &TLan_Open;
+ dev->hard_start_xmit = &TLan_StartTx;
+ dev->stop = &TLan_Close;
+ dev->get_stats = &TLan_GetStats;
+ dev->set_multicast_list = &TLan_SetMulticastList;
+
+
+ return 0;
+
+} /* TLan_Init */
+
+
+
+
+ /***************************************************************
+ * TLan_Open
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev Structure of device to be opened.
+ *
+ * This routine puts the driver and TLAN adapter in a
+ * state where it is ready to send and receive packets.
+ * It allocates the IRQ, resets and brings the adapter
+ * out of reset, and allows interrupts. It also delays
+ * the startup for autonegotiation or sends a Rx GO
+ * command to the adapter, as appropriate.
+ *
+ **************************************************************/
+
+int TLan_Open( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int err;
+
+ priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
+ if ( priv->sa_int ) {
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: Using SA_INTERRUPT\n" );
+ err = request_irq( dev->irq, TLan_HandleInterrupt, SA_SHIRQ | SA_INTERRUPT, TLanSignature, dev );
+ } else {
+ err = request_irq( dev->irq, TLan_HandleInterrupt, SA_SHIRQ, TLanSignature, dev );
+ }
+ if ( err ) {
+ printk( "TLAN: Cannot open %s because IRQ %d is already in use.\n", dev->name, dev->irq );
+ return -EAGAIN;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ /* NOTE: It might not be necessary to read the stats before a
+ reset if you don't care what the values are.
+ */
+ TLan_ResetLists( dev );
+ TLan_ReadAndClearStats( dev, TLAN_IGNORE );
+ TLan_ResetAdapter( dev );
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Opened. TLAN Chip Rev: %x\n", dev->name, priv->tlanRev );
+
+ return 0;
+
+} /* TLan_Open */
+
+
+
+
+ /***************************************************************
+ * TLan_StartTx
+ *
+ * Returns:
+ * 0 on success, non-zero on failure.
+ * Parms:
+ * skb A pointer to the sk_buff containing the
+ * frame to be sent.
+ * dev The device to send the data on.
+ *
+ * This function adds a frame to the Tx list to be sent
+ * ASAP. First it verifies that the adapter is ready and
+ * there is room in the queue. Then it sets up the next
+ * available list, copies the frame to the corresponding
+ * buffer. If the adapter Tx channel is idle, it gives
+ * the adapter a Tx Go command on the list, otherwise it
+ * sets the forward address of the previous list to point
+ * to this one. Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+int TLan_StartTx( struct sk_buff *skb, struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ TLanList *tail_list;
+ u8 *tail_buffer;
+ int pad;
+
+ if ( ! priv->phyOnline ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: %s PHY is not ready\n", dev->name );
+ dev_kfree_skb( skb, FREE_WRITE );
+ return 0;
+ }
+
+ tail_list = priv->txList + priv->txTail;
+
+ if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: %s is busy (Head=%d Tail=%d)\n", dev->name, priv->txHead, priv->txTail );
+ dev->tbusy = 1;
+ priv->txBusyCount++;
+ return 1;
+ }
+
+ tail_list->forward = 0;
+
+ if ( bbuf ) {
+ tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
+ memcpy( tail_buffer, skb->data, skb->len );
+ } else {
+ tail_list->buffer[0].address = virt_to_bus( skb->data );
+ tail_list->buffer[9].address = (u32) skb;
+ }
+
+ pad = TLAN_MIN_FRAME_SIZE - skb->len;
+
+ if ( pad > 0 ) {
+ tail_list->frameSize = (u16) skb->len + pad;
+ tail_list->buffer[0].count = (u32) skb->len;
+ tail_list->buffer[1].count = TLAN_LAST_BUFFER | (u32) pad;
+ tail_list->buffer[1].address = virt_to_bus( TLanPadBuffer );
+ } else {
+ tail_list->frameSize = (u16) skb->len;
+ tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
+ tail_list->buffer[1].count = 0;
+ tail_list->buffer[1].address = 0;
+ }
+
+ cli();
+ tail_list->cStat = TLAN_CSTAT_READY;
+ if ( ! priv->txInProgress ) {
+ priv->txInProgress = 1;
+ outw( 0x4, dev->base_addr + TLAN_HOST_INT );
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
+ outl( virt_to_bus( tail_list ), dev->base_addr + TLAN_CH_PARM );
+ outl( TLAN_HC_GO | TLAN_HC_ACK, dev->base_addr + TLAN_HOST_CMD );
+ } else {
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: Adding buffer %d to TX channel\n", priv->txTail );
+ if ( priv->txTail == 0 ) {
+ ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward = virt_to_bus( tail_list );
+ } else {
+ ( priv->txList + ( priv->txTail - 1 ) )->forward = virt_to_bus( tail_list );
+ }
+ }
+ sti();
+
+ CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
+
+ if ( bbuf ) {
+ dev_kfree_skb( skb, FREE_WRITE );
+ }
+
+ dev->trans_start = jiffies;
+ return 0;
+
+} /* TLan_StartTx */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleInterrupt
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * irq The line on which the interrupt
+ * occurred.
+ * dev_id A pointer to the device assigned to
+ * this irq line.
+ * regs ???
+ *
+ * This function handles an interrupt generated by its
+ * assigned TLAN adapter. The function deactivates
+ * interrupts on its adapter, records the type of
+ * interrupt, executes the appropriate subhandler, and
+ * acknowdges the interrupt to the adapter (thus
+ * re-enabling adapter interrupts.
+ *
+ **************************************************************/
+
+void TLan_HandleInterrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ u32 ack;
+ struct device *dev;
+ u32 host_cmd;
+ u16 host_int;
+ int type;
+
+ dev = (struct device *) dev_id;
+
+ cli();
+ if ( dev->interrupt ) {
+ printk( "TLAN: Re-entering interrupt handler for %s: %d.\n" , dev->name, dev->interrupt );
+ }
+ dev->interrupt++;
+
+ host_int = inw( dev->base_addr + TLAN_HOST_INT );
+ outw( host_int, dev->base_addr + TLAN_HOST_INT );
+
+ type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
+
+ ack = TLanIntVector[type]( dev, host_int );
+
+ if ( ack ) {
+ host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
+ outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
+ }
+
+ dev->interrupt--;
+ sti();
+
+} /* TLan_HandleInterrupts */
+
+
+
+
+ /***************************************************************
+ * TLan_Close
+ *
+ * Returns:
+ * An error code.
+ * Parms:
+ * dev The device structure of the device to
+ * close.
+ *
+ * This function shuts down the adapter. It records any
+ * stats, puts the adapter into reset state, deactivates
+ * its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
+
+int TLan_Close(struct device *dev)
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+ if ( priv->timer.function != NULL )
+ del_timer( &priv->timer );
+ free_irq( dev->irq, dev );
+ TLan_FreeLists( dev );
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: Device %s closed.\n", dev->name );
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+
+} /* TLan_Close */
+
+
+
+
+ /***************************************************************
+ * TLan_GetStats
+ *
+ * Returns:
+ * A pointer to the device's statistics structure.
+ * Parms:
+ * dev The device structure to return the
+ * stats for.
+ *
+ * This function updates the devices statistics by reading
+ * the TLAN chip's onboard registers. Then it returns the
+ * address of the statistics structure.
+ *
+ **************************************************************/
+
+struct net_device_stats *TLan_GetStats( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int i;
+
+ /* Should only read stats if open ? */
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+
+ TLAN_DBG( TLAN_DEBUG_RX, "TLAN RECEIVE: %s EOC count = %d\n", dev->name, priv->rxEocCount );
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: %s Busy count = %d\n", dev->name, priv->txBusyCount );
+ if ( debug & TLAN_DEBUG_GNRL ) {
+ TLan_PrintDio( dev->base_addr );
+ TLan_PhyPrint( dev );
+ }
+ if ( debug & TLAN_DEBUG_LIST ) {
+ for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
+ TLan_PrintList( priv->rxList + i, "RX", i );
+ for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
+ TLan_PrintList( priv->txList + i, "TX", i );
+ }
+
+ return ( &( (TLanPrivateInfo *) dev->priv )->stats );
+
+} /* TLan_GetStats */
+
+
+
+
+ /***************************************************************
+ * TLan_SetMulticastList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure to set the
+ * multicast list for.
+ *
+ * This function sets the TLAN adaptor to various receive
+ * modes. If the IFF_PROMISC flag is set, promiscuous
+ * mode is acitviated. Otherwise, promiscuous mode is
+ * turned off. If the IFF_ALLMULTI flag is set, then
+ * the hash table is set to receive all group addresses.
+ * Otherwise, the first three multicast addresses are
+ * stored in AREG_1-3, and the rest are selected via the
+ * hash table, as necessary.
+ *
+ **************************************************************/
+
+void TLan_SetMulticastList( struct device *dev )
+{
+ struct dev_mc_list *dmi = dev->mc_list;
+ u32 hash1 = 0;
+ u32 hash2 = 0;
+ int i;
+ u32 offset;
+ u8 tmp;
+
+ if ( dev->flags & IFF_PROMISC ) {
+ tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
+ } else {
+ tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
+ if ( dev->flags & IFF_ALLMULTI ) {
+ for ( i = 0; i < 3; i++ )
+ TLan_SetMac( dev, i + 1, NULL );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
+ } else {
+ for ( i = 0; i < dev->mc_count; i++ ) {
+ if ( i < 3 ) {
+ TLan_SetMac( dev, i + 1, (char *) &dmi->dmi_addr );
+ } else {
+ offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
+ if ( offset < 32 )
+ hash1 |= ( 1 << offset );
+ else
+ hash2 |= ( 1 << ( offset - 32 ) );
+ }
+ dmi = dmi->next;
+ }
+ for ( ; i < 3; i++ )
+ TLan_SetMac( dev, i + 1, NULL );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
+ }
+ }
+
+} /* TLan_SetMulticastList */
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Interrupt Vectors and Table
+
+ Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN
+ Programmer's Guide" for more informations on handling interrupts
+ generated by TLAN based adapters.
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_HandleInvalid
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles invalid interrupts. This should
+ * never happen unless some other adapter is trying to use
+ * the IRQ line assigned to the device.
+ *
+ **************************************************************/
+
+u32 TLan_HandleInvalid( struct device *dev, u16 host_int )
+{
+ host_int = 0;
+ /* printk( "TLAN: Invalid interrupt on %s.\n", dev->name ); */
+ return 0;
+
+} /* TLan_HandleInvalid */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleTxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Tx EOF interrupts which are raised
+ * by the adapter when it has completed sending the
+ * contents of a buffer. If detemines which list/buffer
+ * was completed and resets it. If the buffer was the last
+ * in the channel (EOC), then the function checks to see if
+ * another buffer is ready to send, and if so, sends a Tx
+ * Go command. Finally, the driver activates/continues the
+ * activity LED.
+ *
+ **************************************************************/
+
+u32 TLan_HandleTxEOF( struct device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int eoc = 0;
+ TLanList *head_list;
+ u32 ack = 1;
+
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", priv->txHead, priv->txTail );
+ host_int = 0;
+ head_list = priv->txList + priv->txHead;
+
+ if ( ! bbuf ) {
+ dev_kfree_skb( (struct sk_buff *) head_list->buffer[9].address, FREE_WRITE );
+ head_list->buffer[9].address = 0;
+ }
+
+ if ( head_list->cStat & TLAN_CSTAT_EOC )
+ eoc = 1;
+ if (!(head_list->cStat & TLAN_CSTAT_FRM_CMP)) {
+ printk( "TLAN: Received interrupt for uncompleted TX frame.\n" );
+ }
+
+#if LINUX_KERNEL_VERSION > 0x20100
+ priv->stats->tx_bytes += head_list->frameSize;
+#endif
+
+ head_list->cStat = TLAN_CSTAT_UNUSED;
+ dev->tbusy = 0;
+ CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
+ if ( eoc ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", priv->txHead, priv->txTail );
+ head_list = priv->txList + priv->txHead;
+ if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+ outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->txInProgress = 0;
+ }
+ }
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
+ if ( priv->timer.function == NULL ) {
+ TLan_SetTimer( dev, TLAN_TIMER_ACT_DELAY, TLAN_TIMER_ACTIVITY );
+ } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
+ priv->timerSetAt = jiffies;
+ }
+ }
+
+ return ack;
+
+} /* TLan_HandleTxEOF */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleStatOverflow
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Statistics Overflow interrupt
+ * which means that one or more of the TLAN statistics
+ * registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
+
+u32 TLan_HandleStatOverflow( struct device *dev, u16 host_int )
+{
+ host_int = 0;
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+
+ return 1;
+
+} /* TLan_HandleStatOverflow */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleRxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Rx EOF interrupt which
+ * indicates a frame has been received by the adapter from
+ * the net and the frame has been transferred to memory.
+ * The function determines the bounce buffer the frame has
+ * been loaded into, creates a new sk_buff big enough to
+ * hold the frame, and sends it to protocol stack. It
+ * then resets the used buffer and appends it to the end
+ * of the list. If the frame was the last in the Rx
+ * channel (EOC), the function restarts the receive channel
+ * by sending an Rx Go command to the adapter. Then it
+ * activates/continues the activity LED.
+ *
+ **************************************************************/
+
+u32 TLan_HandleRxEOF( struct device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u32 ack = 1;
+ int eoc = 0;
+ u8 *head_buffer;
+ TLanList *head_list;
+ struct sk_buff *skb;
+ TLanList *tail_list;
+ void *t;
+
+ TLAN_DBG( TLAN_DEBUG_RX, "TLAN RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail );
+ host_int = 0;
+ head_list = priv->rxList + priv->rxHead;
+ tail_list = priv->rxList + priv->rxTail;
+
+ if ( head_list->cStat & TLAN_CSTAT_EOC ) {
+ eoc = 1;
+ }
+
+ if (!(head_list->cStat & TLAN_CSTAT_FRM_CMP)) {
+ printk( "TLAN: Received interrupt for uncompleted RX frame.\n" );
+ } else if ( bbuf ) {
+ skb = dev_alloc_skb( head_list->frameSize + 7 );
+ if ( skb == NULL ) {
+ printk( "TLAN: Couldn't allocate memory for received data.\n" );
+ } else {
+ head_buffer = priv->rxBuffer + ( priv->rxHead * TLAN_MAX_FRAME_SIZE );
+ skb->dev = dev;
+ skb_reserve( skb, 2 );
+ t = (void *) skb_put( skb, head_list->frameSize );
+
+#if LINUX_KERNEL_VERSION > 0x20100
+ priv->stats->rx_bytes += head_list->frameSize;
+#endif
+
+ memcpy( t, head_buffer, head_list->frameSize );
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ }
+ } else {
+ skb = (struct sk_buff *) head_list->buffer[9].address;
+ head_list->buffer[9].address = 0;
+ skb_trim( skb, head_list->frameSize );
+
+#if LINUX_KERNEL_VERSION > 0x20100
+ priv->stats->rx_bytes += head_list->frameSize;
+#endif
+
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+
+ skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
+ if ( skb == NULL ) {
+ printk( "TLAN: Couldn't allocate memory for received data.\n" );
+ /* If this ever happened it would be a problem */
+ } else {
+ skb->dev = dev;
+ skb_reserve( skb, 2 );
+ t = (void *) skb_put( skb, TLAN_MAX_FRAME_SIZE );
+ head_list->buffer[0].address = virt_to_bus( t );
+ head_list->buffer[9].address = (u32) skb;
+ }
+ }
+
+ head_list->forward = 0;
+ head_list->frameSize = TLAN_MAX_FRAME_SIZE;
+ head_list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
+ tail_list->forward = virt_to_bus( head_list );
+
+ CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
+ CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
+
+ if ( eoc ) {
+ TLAN_DBG( TLAN_DEBUG_RX, "TLAN RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail );
+ head_list = priv->rxList + priv->rxHead;
+ outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rxEocCount++;
+ }
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
+ if ( priv->timer.function == NULL ) {
+ TLan_SetTimer( dev, TLAN_TIMER_ACT_DELAY, TLAN_TIMER_ACTIVITY );
+ } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
+ priv->timerSetAt = jiffies;
+ }
+ }
+
+ dev->last_rx = jiffies;
+
+ return ack;
+
+} /* TLan_HandleRxEOF */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleDummy
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Dummy interrupt, which is
+ * raised whenever a test interrupt is generated by setting
+ * the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
+
+u32 TLan_HandleDummy( struct device *dev, u16 host_int )
+{
+ host_int = 0;
+ printk( "TLAN: Test interrupt on %s.\n", dev->name );
+ return 1;
+
+} /* TLan_HandleDummy */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleTxEOC
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurances by
+ * reading the CSTAT member of the list structure. Tx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * functionality, so process EOC events if this is the
+ * case.
+ *
+ **************************************************************/
+
+u32 TLan_HandleTxEOC( struct device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ TLanList *head_list;
+ u32 ack = 1;
+
+ host_int = 0;
+ if ( priv->tlanRev < 0x30 ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", priv->txHead, priv->txTail );
+ head_list = priv->txList + priv->txHead;
+ if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+ outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->txInProgress = 0;
+ }
+ }
+
+ return ack;
+
+} /* TLan_HandleTxEOC */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleStatusCheck
+ *
+ * Returns:
+ * 0 if Adapter check, 1 if Network Status check.
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Adapter Check/Network Status
+ * interrupts generated by the adapter. It checks the
+ * vector in the HOST_INT register to determine if it is
+ * an Adapter Check interrupt. If so, it resets the
+ * adapter. Otherwise it clears the status registers
+ * and services the PHY.
+ *
+ **************************************************************/
+
+u32 TLan_HandleStatusCheck( struct device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u32 ack;
+ u32 error;
+ u8 net_sts;
+ u32 phy;
+ u16 tlphy_ctl;
+ u16 tlphy_sts;
+
+ ack = 1;
+ if ( host_int & TLAN_HI_IV_MASK ) {
+ error = inl( dev->base_addr + TLAN_CH_PARM );
+ printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error );
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+ TLan_FreeLists( dev );
+ TLan_ResetLists( dev );
+ TLan_ResetAdapter( dev );
+ dev->tbusy = 0;
+ ack = 0;
+ } else {
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Status Check\n", dev->name );
+ phy = priv->phy[priv->phyNum];
+
+ net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
+ if ( net_sts ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Net_Sts = %x\n", dev->name, (unsigned) net_sts );
+ }
+ if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
+ if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
+ tlphy_ctl |= TLAN_TC_SWAPOL;
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
+ tlphy_ctl &= ~TLAN_TC_SWAPOL;
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ }
+
+ if (debug) {
+ TLan_PhyPrint( dev );
+ }
+ }
+ }
+
+ return ack;
+
+} /* TLan_HandleStatusCheck */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleRxEOC
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurances by
+ * reading the CSTAT member of the list structure. Rx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * CSTAT member or a INTDIS register, so if this chip is
+ * pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
+
+u32 TLan_HandleRxEOC( struct device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ TLanList *head_list;
+ u32 ack = 1;
+
+ host_int = 0;
+ if ( priv->tlanRev < 0x30 ) {
+ TLAN_DBG( TLAN_DEBUG_RX, "TLAN RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", priv->rxHead, priv->rxTail );
+ head_list = priv->rxList + priv->rxHead;
+ outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rxEocCount++;
+ }
+
+ return ack;
+
+} /* TLan_HandleRxEOC */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Timer Function
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_Timer
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * data A value given to add timer when
+ * add_timer was called.
+ *
+ * This function handles timed functionality for the
+ * TLAN driver. The two current timer uses are for
+ * delaying for autonegotionation and driving the ACT LED.
+ * - Autonegotiation requires being allowed about
+ * 2 1/2 seconds before attempting to transmit a
+ * packet. It would be a very bad thing to hang
+ * the kernel this long, so the driver doesn't
+ * allow transmission 'til after this time, for
+ * certain PHYs. It would be much nicer if all
+ * PHYs were interrupt-capable like the internal
+ * PHY.
+ * - The ACT LED, which shows adapter activity, is
+ * driven by the driver, and so must be left on
+ * for a short period to power up the LED so it
+ * can be seen. This delay can be changed by
+ * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ * if desired. 10 jiffies produces a slightly
+ * sluggish response.
+ *
+ **************************************************************/
+
+void TLan_Timer( unsigned long data )
+{
+ struct device *dev = (struct device *) data;
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u32 elapsed;
+
+ priv->timer.function = NULL;
+
+ switch ( priv->timerType ) {
+ case TLAN_TIMER_PHY_PDOWN:
+ TLan_PhyPowerDown( dev );
+ break;
+ case TLAN_TIMER_PHY_PUP:
+ TLan_PhyPowerUp( dev );
+ break;
+ case TLAN_TIMER_PHY_RESET:
+ TLan_PhyReset( dev );
+ break;
+ case TLAN_TIMER_PHY_START_LINK:
+ TLan_PhyStartLink( dev );
+ break;
+ case TLAN_TIMER_PHY_FINISH_AN:
+ TLan_PhyFinishAutoNeg( dev );
+ break;
+ case TLAN_TIMER_FINISH_RESET:
+ TLan_FinishReset( dev );
+ break;
+ case TLAN_TIMER_ACTIVITY:
+ cli();
+ if ( priv->timer.function == NULL ) {
+ elapsed = jiffies - priv->timerSetAt;
+ if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ } else {
+ priv->timer.function = &TLan_Timer;
+ priv->timer.expires = priv->timerSetAt + TLAN_TIMER_ACT_DELAY;
+ sti();
+ add_timer( &priv->timer );
+ }
+ }
+ sti();
+ break;
+ default:
+ break;
+ }
+
+} /* TLan_Timer */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Adapter Related Routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_ResetLists
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure with the list
+ * stuctures to be reset.
+ *
+ * This routine sets the variables associated with managing
+ * the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+void TLan_ResetLists( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int i;
+ TLanList *list;
+ struct sk_buff *skb;
+ void *t = NULL;
+
+ priv->txHead = 0;
+ priv->txTail = 0;
+ for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
+ list = priv->txList + i;
+ list->cStat = TLAN_CSTAT_UNUSED;
+ if ( bbuf ) {
+ list->buffer[0].address = virt_to_bus( priv->txBuffer + ( i * TLAN_MAX_FRAME_SIZE ) );
+ } else {
+ list->buffer[0].address = 0;
+ }
+ list->buffer[2].count = 0;
+ list->buffer[2].address = 0;
+ }
+
+ priv->rxHead = 0;
+ priv->rxTail = TLAN_NUM_RX_LISTS - 1;
+ for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
+ list = priv->rxList + i;
+ list->cStat = TLAN_CSTAT_READY;
+ list->frameSize = TLAN_MAX_FRAME_SIZE;
+ list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
+ if ( bbuf ) {
+ list->buffer[0].address = virt_to_bus( priv->rxBuffer + ( i * TLAN_MAX_FRAME_SIZE ) );
+ } else {
+ skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
+ if ( skb == NULL ) {
+ printk( "TLAN: Couldn't allocate memory for received data.\n" );
+ /* If this ever happened it would be a problem */
+ } else {
+ skb->dev = dev;
+ skb_reserve( skb, 2 );
+ t = (void *) skb_put( skb, TLAN_MAX_FRAME_SIZE );
+ }
+ list->buffer[0].address = virt_to_bus( t );
+ list->buffer[9].address = (u32) skb;
+ }
+ list->buffer[1].count = 0;
+ list->buffer[1].address = 0;
+ if ( i < TLAN_NUM_RX_LISTS - 1 )
+ list->forward = virt_to_bus( list + 1 );
+ else
+ list->forward = 0;
+ }
+
+} /* TLan_ResetLists */
+
+
+void TLan_FreeLists( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int i;
+ TLanList *list;
+ struct sk_buff *skb;
+
+ if ( ! bbuf ) {
+ for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
+ list = priv->txList + i;
+ skb = (struct sk_buff *) list->buffer[9].address;
+ if ( skb ) {
+ dev_kfree_skb( skb, FREE_WRITE );
+ list->buffer[9].address = 0;
+ }
+ }
+
+ for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
+ list = priv->rxList + i;
+ skb = (struct sk_buff *) list->buffer[9].address;
+ if ( skb ) {
+ dev_kfree_skb( skb, FREE_READ );
+ list->buffer[9].address = 0;
+ }
+ }
+ }
+
+} /* TLan_FreeLists */
+
+
+
+
+ /***************************************************************
+ * TLan_PrintDio
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base Base IO port of the device of
+ * which to print DIO registers.
+ *
+ * This function prints out all the internal (DIO)
+ * registers of a TLAN chip.
+ *
+ **************************************************************/
+
+void TLan_PrintDio( u16 io_base )
+{
+ u32 data0, data1;
+ int i;
+
+ printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", io_base );
+ printk( "TLAN: Off. +0 +4\n" );
+ for ( i = 0; i < 0x4C; i+= 8 ) {
+ data0 = TLan_DioRead32( io_base, i );
+ data1 = TLan_DioRead32( io_base, i + 0x4 );
+ printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 );
+ }
+
+} /* TLan_PrintDio */
+
+
+
+
+ /***************************************************************
+ * TLan_PrintList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * list A pointer to the TLanList structure to
+ * be printed.
+ * type A string to designate type of list,
+ * "Rx" or "Tx".
+ * num The index of the list.
+ *
+ * This function prints out the contents of the list
+ * pointed to by the list parameter.
+ *
+ **************************************************************/
+
+void TLan_PrintList( TLanList *list, char *type, int num)
+{
+ int i;
+
+ printk( "TLAN: %s List %d at 0x%08x\n", type, num, (u32) list );
+ printk( "TLAN: Forward = 0x%08x\n", list->forward );
+ printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
+ printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
+ /* for ( i = 0; i < 10; i++ ) { */
+ for ( i = 0; i < 2; i++ ) {
+ printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", i, list->buffer[i].count, list->buffer[i].address );
+ }
+
+} /* TLan_PrintList */
+
+
+
+
+ /***************************************************************
+ * TLan_ReadAndClearStats
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to which to read stats.
+ * record Flag indicating whether to add
+ *
+ * This functions reads all the internal status registers
+ * of the TLAN chip, which clears them as a side effect.
+ * It then either adds the values to the device's status
+ * struct, or discards them, depending on whether record
+ * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
+
+void TLan_ReadAndClearStats( struct device *dev, int record )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u32 tx_good, tx_under;
+ u32 rx_good, rx_over;
+ u32 def_tx, crc, code;
+ u32 multi_col, single_col;
+ u32 excess_col, late_col, loss;
+
+ outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ tx_good = inb( dev->base_addr + TLAN_DIO_DATA );
+ tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
+ tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+
+ outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ rx_good = inb( dev->base_addr + TLAN_DIO_DATA );
+ rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
+ rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+
+ outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
+ def_tx = inb( dev->base_addr + TLAN_DIO_DATA );
+ def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
+ code = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+
+ outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ multi_col = inb( dev->base_addr + TLAN_DIO_DATA );
+ multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
+ single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
+
+ outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
+ late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
+ loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
+
+ if ( record ) {
+ priv->stats.rx_packets += rx_good;
+ priv->stats.rx_errors += rx_over + crc + code;
+ priv->stats.tx_packets += tx_good;
+ priv->stats.tx_errors += tx_under + loss;
+ priv->stats.collisions += multi_col + single_col + excess_col + late_col;
+
+ priv->stats.rx_over_errors += rx_over;
+ priv->stats.rx_crc_errors += crc;
+ priv->stats.rx_frame_errors += code;
+
+ priv->stats.tx_aborted_errors += tx_under;
+ priv->stats.tx_carrier_errors += loss;
+ }
+
+} /* TLan_ReadAndClearStats */
+
+
+
+
+ /***************************************************************
+ * TLan_Reset
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to be reset.
+ *
+ * This function resets the adapter and it's physical
+ * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
+ * Programmer's Guide" for details. The routine tries to
+ * implement what is detailed there, though adjustments
+ * have been made.
+ *
+ **************************************************************/
+
+void
+TLan_ResetAdapter( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int i;
+ u32 addr;
+ u32 data;
+ u8 data8;
+
+ priv->tlanFullDuplex = FALSE;
+/* 1. Assert reset bit. */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_AD_RST;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+ udelay(1000);
+
+/* 2. Turn off interrupts. ( Probably isn't necessary ) */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_INT_OFF;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+/* 3. Clear AREGs and HASHs. */
+
+ for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
+ TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
+ }
+
+/* 4. Setup NetConfig register. */
+
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+
+/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
+
+ outl( TLAN_HC_LD_TMR | 0x0, dev->base_addr + TLAN_HOST_CMD );
+ outl( TLAN_HC_LD_THR | 0x1, dev->base_addr + TLAN_HOST_CMD );
+
+/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
+
+ outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
+ addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+ TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
+
+/* 7. Setup the remaining registers. */
+
+ if ( priv->tlanRev >= 0x30 ) {
+ data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
+ TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
+ }
+ TLan_PhyDetect( dev );
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
+ if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
+ data |= TLAN_NET_CFG_BIT;
+ if ( priv->aui == 1 ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
+ } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
+ priv->tlanFullDuplex = TRUE;
+ } else {
+ TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
+ }
+ }
+ if ( priv->phyNum == 0 ) {
+ data |= TLAN_NET_CFG_PHY_EN;
+ }
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
+ TLan_FinishReset( dev );
+ } else {
+ TLan_PhyPowerDown( dev );
+ }
+
+} /* TLan_ResetAdapter */
+
+
+
+
+void
+TLan_FinishReset( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u8 data;
+ u32 phy;
+ u8 sio;
+ u16 status;
+ u16 tlphy_ctl;
+
+ phy = priv->phy[priv->phyNum];
+
+ data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
+ if ( priv->tlanFullDuplex ) {
+ data |= TLAN_NET_CMD_DUPLEX;
+ }
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
+ data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
+ if ( priv->phyNum == 0 ) {
+ data |= TLAN_NET_MASK_MASK7;
+ }
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
+ TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, TLAN_MAX_FRAME_SIZE );
+
+ if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || ( priv->aui ) ) {
+ status = MII_GS_LINK;
+ printk( "TLAN: %s: Link forced.\n", dev->name );
+ } else {
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ udelay( 1000 );
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ if ( status & MII_GS_LINK ) {
+ printk( "TLAN: %s: Link active.\n", dev->name );
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ }
+ }
+
+ if ( priv->phyNum == 0 ) {
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
+ tlphy_ctl |= TLAN_TC_INTEN;
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
+ sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
+ sio |= TLAN_NET_SIO_MINTEN;
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
+ }
+
+ if ( status & MII_GS_LINK ) {
+ TLan_SetMac( dev, 0, dev->dev_addr );
+ priv->phyOnline = 1;
+ outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
+ if ( debug >= 1 ) {
+ outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
+ }
+ outl( virt_to_bus( priv->rxList ), dev->base_addr + TLAN_CH_PARM );
+ outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
+ } else {
+ printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", dev->name );
+ TLan_SetTimer( dev, 1000, TLAN_TIMER_FINISH_RESET );
+ return;
+ }
+
+} /* TLan_FinishReset */
+
+
+
+
+ /***************************************************************
+ * TLan_SetMac
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * on which to change the AREG.
+ * areg The AREG to set the address in (0 - 3).
+ * mac A pointer to an array of chars. Each
+ * element stores one byte of the address.
+ * IE, it isn't in ascii.
+ *
+ * This function transfers a MAC address to one of the
+ * TLAN AREGs (address registers). The TLAN chip locks
+ * the register on writing to offset 0 and unlocks the
+ * register after writing to offset 5. If NULL is passed
+ * in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
+
+void TLan_SetMac( struct device *dev, int areg, char *mac )
+{
+ int i;
+
+ areg *= 6;
+
+ if ( mac != NULL ) {
+ for ( i = 0; i < 6; i++ )
+ TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, mac[i] );
+ } else {
+ for ( i = 0; i < 6; i++ )
+ TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, 0 );
+ }
+
+} /* TLan_SetMac */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver PHY Layer Routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+ /*********************************************************************
+ * TLan_PhyPrint
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the
+ * TLAN device having the PHYs to be detailed.
+ *
+ * This function prints the registers a PHY (aka tranceiver).
+ *
+ ********************************************************************/
+
+void TLan_PhyPrint( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 i, data0, data1, data2, data3, phy;
+
+ phy = priv->phy[priv->phyNum];
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
+ printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
+ } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
+ printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
+ printk( "TLAN: Off. +0 +1 +2 +3 \n" );
+ for ( i = 0; i < 0x20; i+= 4 ) {
+ printk( "TLAN: 0x%02x", i );
+ TLan_MiiReadReg( dev, phy, i, &data0 );
+ printk( " 0x%04hx", data0 );
+ TLan_MiiReadReg( dev, phy, i + 1, &data1 );
+ printk( " 0x%04hx", data1 );
+ TLan_MiiReadReg( dev, phy, i + 2, &data2 );
+ printk( " 0x%04hx", data2 );
+ TLan_MiiReadReg( dev, phy, i + 3, &data3 );
+ printk( " 0x%04hx\n", data3 );
+ }
+ } else {
+ printk( "TLAN: Device %s, Invalid PHY.\n", dev->name );
+ }
+
+} /* TLan_PhyPrint */
+
+
+
+
+ /*********************************************************************
+ * TLan_PhyDetect
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the adapter
+ * for which the PHY needs determined.
+ *
+ * So far I've found that adapters which have external PHYs
+ * may also use the internal PHY for part of the functionality.
+ * (eg, AUI/Thinnet). This function finds out if this TLAN
+ * chip has an internal PHY, and then finds the first external
+ * PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
+
+void TLan_PhyDetect( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 control;
+ u16 hi;
+ u16 lo;
+ u32 phy;
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
+ priv->phyNum = 0xFFFF;
+ return;
+ }
+
+ TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
+
+ if ( hi != 0xFFFF ) {
+ priv->phy[0] = TLAN_PHY_MAX_ADDR;
+ } else {
+ priv->phy[0] = TLAN_PHY_NONE;
+ }
+
+ priv->phy[1] = TLAN_PHY_NONE;
+ for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
+ TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
+ TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
+ TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
+ if ( ( control != 0xFFFF ) || ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: PHY found at %02x %04x %04x %04x\n", phy, control, hi, lo );
+ if ( ( priv->phy[1] == TLAN_PHY_NONE ) && ( phy != TLAN_PHY_MAX_ADDR ) ) {
+ priv->phy[1] = phy;
+ }
+ }
+ }
+
+ if ( priv->phy[1] != TLAN_PHY_NONE ) {
+ priv->phyNum = 1;
+ } else if ( priv->phy[0] != TLAN_PHY_NONE ) {
+ priv->phyNum = 0;
+ } else {
+ printk( "TLAN: Cannot initialize device, no PHY was found!\n" );
+ }
+
+} /* TLan_PhyDetect */
+
+
+
+
+void TLan_PhyPowerDown( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 value;
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Powering down PHY(s).\n", dev->name );
+ value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
+ TLan_MiiSync( dev->base_addr );
+ TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
+ if ( ( priv->phyNum == 0 ) && ( priv->phy[1] != TLAN_PHY_NONE ) && ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
+ TLan_MiiSync( dev->base_addr );
+ TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
+ }
+
+ /* Wait for 5 jiffies (50 ms) and powerup
+ * This is abitrary. It is intended to make sure the
+ * tranceiver settles.
+ */
+ TLan_SetTimer( dev, 5, TLAN_TIMER_PHY_PUP );
+
+} /* TLan_PhyPowerDown */
+
+
+
+
+void TLan_PhyPowerUp( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 value;
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Powering up PHY.\n", dev->name );
+ TLan_MiiSync( dev->base_addr );
+ value = MII_GC_LOOPBK;
+ TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
+
+ /* Wait for 50 jiffies (500 ms) and reset the
+ * tranceiver. The TLAN docs say both 50 ms and
+ * 500 ms, so do the longer, just in case
+ */
+ TLan_SetTimer( dev, 50, TLAN_TIMER_PHY_RESET );
+
+} /* TLan_PhyPowerUp */
+
+
+
+
+void TLan_PhyReset( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 phy;
+ u16 value;
+
+ phy = priv->phy[priv->phyNum];
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Reseting PHY.\n", dev->name );
+ TLan_MiiSync( dev->base_addr );
+ value = MII_GC_LOOPBK | MII_GC_RESET;
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
+ TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
+ while ( value & MII_GC_RESET ) {
+ TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
+ }
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0 );
+
+ /* Wait for 50 jiffies (500 ms) and initialize.
+ * I don't remember why I wait this long.
+ */
+ TLan_SetTimer( dev, 50, TLAN_TIMER_PHY_START_LINK );
+
+} /* TLan_PhyReset */
+
+
+
+
+void TLan_PhyStartLink( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 ability;
+ u16 control;
+ u16 data;
+ u16 phy;
+ u16 status;
+ u16 tctl;
+
+ phy = priv->phy[priv->phyNum];
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Trying to activate link.\n", dev->name );
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ if ( ( status & MII_GS_AUTONEG ) &&
+ ( priv->duplex == TLAN_DUPLEX_DEFAULT ) &&
+ ( priv->speed == TLAN_SPEED_DEFAULT ) &&
+ ( ! priv->aui ) ) {
+ ability = status >> 11;
+
+ if ( priv->speed == TLAN_SPEED_10 ) {
+ ability &= 0x0003;
+ } else if ( priv->speed == TLAN_SPEED_100 ) {
+ ability &= 0x001C;
+ }
+
+ if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ ability &= 0x000A;
+ } else if ( priv->duplex == TLAN_DUPLEX_HALF ) {
+ ability &= 0x0005;
+ }
+
+ TLan_MiiWriteReg( dev, phy, MII_AN_ADV, ( ability << 5 ) | 1 );
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
+
+ /* Wait for 400 jiffies (4 sec) for autonegotiation
+ * to complete. The max spec time is less than this
+ * but the card need additional time to start AN.
+ * .5 sec should be plenty extra.
+ */
+ printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
+ TLan_SetTimer( dev, 400, TLAN_TIMER_PHY_FINISH_AN );
+ return;
+ }
+
+ if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
+ priv->phyNum = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
+ TLan_SetTimer( dev, 4, TLAN_TIMER_PHY_PDOWN );
+ return;
+ } else if ( priv->phyNum == 0 ) {
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
+ if ( priv->aui ) {
+ tctl |= TLAN_TC_AUISEL;
+ } else {
+ tctl &= ~TLAN_TC_AUISEL;
+ control = 0;
+ if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ control |= MII_GC_DUPLEX;
+ priv->tlanFullDuplex = TRUE;
+ }
+ if ( priv->speed == TLAN_SPEED_100 ) {
+ control |= MII_GC_SPEEDSEL;
+ }
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
+ }
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
+ }
+
+ /* Wait for 100 jiffies (1 sec) to give the tranceiver time
+ * to establish link.
+ */
+ TLan_SetTimer( dev, 100, TLAN_TIMER_FINISH_RESET );
+
+} /* TLan_PhyStartLink */
+
+
+
+
+void TLan_PhyFinishAutoNeg( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 an_adv;
+ u16 an_lpa;
+ u16 data;
+ u16 mode;
+ u16 phy;
+ u16 status;
+
+ phy = priv->phy[priv->phyNum];
+
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
+ /* Wait for 800 jiffies (8 sec) to give the process
+ * more time. Perhaps we should fail after a while.
+ */
+ printk( "TLAN: Giving autonegotiation more time.\n" );
+ TLan_SetTimer( dev, 800, TLAN_TIMER_PHY_FINISH_AN );
+ return;
+ }
+
+ printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
+ TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
+ TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
+ mode = an_adv & an_lpa & 0x03E0;
+ if ( mode & 0x0100 ) {
+ priv->tlanFullDuplex = TRUE;
+ } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
+ priv->tlanFullDuplex = TRUE;
+ }
+
+ if ( ( ! ( mode & 0x0180 ) ) && ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && ( priv->phyNum != 0 ) ) {
+ priv->phyNum = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
+ TLan_SetTimer( dev, 40, TLAN_TIMER_PHY_PDOWN );
+ return;
+ }
+
+ if ( priv->phyNum == 0 ) {
+ if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || ( an_adv & an_lpa & 0x0040 ) ) {
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB | MII_GC_DUPLEX );
+ printk( "TLAN: Starting internal PHY with DUPLEX\n" );
+ } else {
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
+ printk( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
+ }
+ }
+
+ /* Wait for 10 jiffies (100 ms). No reason in partiticular.
+ */
+ TLan_SetTimer( dev, 10, TLAN_TIMER_FINISH_RESET );
+
+} /* TLan_PhyFinishAutoNeg */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver MII Routines
+
+ These routines are based on the information in Chap. 2 of the
+ "ThunderLAN Programmer's Guide", pp. 15-24.
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_MiiReadReg
+ *
+ * Returns:
+ * 0 if ack received ok
+ * 1 otherwise.
+ *
+ * Parms:
+ * dev The device structure containing
+ * The io address and interrupt count
+ * for this device.
+ * phy The address of the PHY to be queried.
+ * reg The register whose contents are to be
+ * retreived.
+ * val A pointer to a variable to store the
+ * retrieved value.
+ *
+ * This function uses the TLAN's MII bus to retreive the contents
+ * of a given register on a PHY. It sends the appropriate info
+ * and then reads the 16-bit register value from the MII bus via
+ * the TLAN SIO register.
+ *
+ **************************************************************/
+
+int TLan_MiiReadReg( struct device *dev, u16 phy, u16 reg, u16 *val )
+{
+ u8 nack;
+ u16 sio, tmp;
+ u32 i;
+ int err;
+ int minten;
+
+ err = FALSE;
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ if ( dev->interrupt == 0 )
+ cli();
+ dev->interrupt++;
+
+ TLan_MiiSync(dev->base_addr);
+
+ minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
+ if ( minten )
+ TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
+
+ TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
+ TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */
+ TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
+ TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+
+
+ TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */
+
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */
+
+ nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */
+ if (nack) { /* No ACK, so fake it */
+ for (i = 0; i < 16; i++) {
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ }
+ tmp = 0xffff;
+ err = TRUE;
+ } else { /* ACK, so read data */
+ for (tmp = 0, i = 0x8000; i; i >>= 1) {
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
+ if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
+ tmp |= i;
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ }
+ }
+
+
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+
+ if ( minten )
+ TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
+
+ *val = tmp;
+
+ dev->interrupt--;
+ if ( dev->interrupt == 0 )
+ sti();
+
+ return err;
+
+} /* TLan_MiiReadReg */
+
+
+
+
+ /***************************************************************
+ * TLan_MiiSendData
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ * dev The address of the PHY to be queried.
+ * data The value to be placed on the MII bus.
+ * num_bits The number of bits in data that are to
+ * be placed on the MII bus.
+ *
+ * This function sends on sequence of bits on the MII
+ * configuration bus.
+ *
+ **************************************************************/
+
+void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+{
+ u16 sio;
+ u32 i;
+
+ if ( num_bits == 0 )
+ return;
+
+ outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+ TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
+
+ for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
+ TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
+ TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+ if ( data & i )
+ TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
+ else
+ TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
+ TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+ }
+
+} /* TLan_MiiSendData */
+
+
+
+
+ /***************************************************************
+ * TLan_MiiSync
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ *
+ * This functions syncs all PHYs in terms of the MII configuration
+ * bus.
+ *
+ **************************************************************/
+
+void TLan_MiiSync( u16 base_port )
+{
+ int i;
+ u16 sio;
+
+ outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
+ for ( i = 0; i < 32; i++ ) {
+ TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
+ TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ }
+
+} /* TLan_MiiSync */
+
+
+
+
+ /***************************************************************
+ * TLan_MiiWriteReg
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure for the device
+ * to write to.
+ * phy The address of the PHY to be written to.
+ * reg The register whose contents are to be
+ * written.
+ * val The value to be written to the register.
+ *
+ * This function uses the TLAN's MII bus to write the contents of a
+ * given register on a PHY. It sends the appropriate info and then
+ * writes the 16-bit register value from the MII configuration bus
+ * via the TLAN SIO register.
+ *
+ **************************************************************/
+
+void TLan_MiiWriteReg( struct device *dev, u16 phy, u16 reg, u16 val )
+{
+ u16 sio;
+ int minten;
+
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ if ( dev->interrupt == 0 )
+ cli();
+ dev->interrupt++;
+
+ TLan_MiiSync( dev->base_addr );
+
+ minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
+ if ( minten )
+ TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
+
+ TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
+ TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */
+ TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
+ TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+
+ TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */
+ TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */
+
+ TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */
+ TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+
+ if ( minten )
+ TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
+
+ dev->interrupt--;
+ if ( dev->interrupt == 0 )
+ sti();
+
+} /* TLan_MiiWriteReg */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Eeprom routines
+
+ The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A
+ EEPROM. These functions are based on information in Microchip's
+ data sheet. I don't know how well this functions will work with
+ other EEPROMs.
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_EeSendStart
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ *
+ * This function sends a start cycle to an EEPROM attached
+ * to a TLAN chip.
+ *
+ **************************************************************/
+
+void TLan_EeSendStart( u16 io_base )
+{
+ u16 sio;
+
+ outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+
+} /* TLan_EeSendStart */
+
+
+
+
+ /***************************************************************
+ * TLan_EeSendByte
+ *
+ * Returns:
+ * If the correct ack was received, 0, otherwise 1
+ * Parms: io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data The 8 bits of information to
+ * send to the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is sent after the ack is
+ * read.
+ *
+ * This function sends a byte on the serial EEPROM line,
+ * driving the clock to send each bit. The function then
+ * reverses transmission direction and reads an acknowledge
+ * bit.
+ *
+ **************************************************************/
+
+int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+{
+ int err;
+ u8 place;
+ u16 sio;
+
+ outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ /* Assume clock is low, tx is enabled; */
+ for ( place = 0x80; place != 0; place >>= 1 ) {
+ if ( place & data )
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ else
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ }
+ TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+
+ if ( ( ! err ) && stop ) {
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ }
+
+ return ( err );
+
+} /* TLan_EeSendByte */
+
+
+
+
+ /***************************************************************
+ * TLan_EeReceiveByte
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data An address to a char to hold the
+ * data sent from the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is received, and no ack is
+ * sent.
+ *
+ * This function receives 8 bits of data from the EEPROM
+ * over the serial link. It then sends and ack bit, or no
+ * ack and a stop bit. This function is used to retrieve
+ * data after the address of a byte in the EEPROM has been
+ * sent.
+ *
+ **************************************************************/
+
+void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+{
+ u8 place;
+ u16 sio;
+
+ outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+ *data = 0;
+
+ /* Assume clock is low, tx is enabled; */
+ TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
+ for ( place = 0x80; place; place >>= 1 ) {
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
+ *data |= place;
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ }
+
+ TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+ if ( ! stop ) {
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ } else {
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ }
+
+} /* TLan_EeReceiveByte */
+
+
+
+
+ /***************************************************************
+ * TLan_EeReadByte
+ *
+ * Returns:
+ * No error = 0, else, the stage at which the error
+ * occured.
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * ee_addr The address of the byte in the
+ * EEPROM whose contents are to be
+ * retrieved.
+ * data An address to a char to hold the
+ * data obtained from the EEPROM.
+ *
+ * This function reads a byte of information from an byte
+ * cell in the EEPROM.
+ *
+ **************************************************************/
+
+int TLan_EeReadByte( struct device *dev, u8 ee_addr, u8 *data )
+{
+ int err;
+
+ if ( dev->interrupt == 0 )
+ cli();
+ dev->interrupt++;
+
+ TLan_EeSendStart( dev->base_addr );
+ err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
+ if (err)
+ return 1;
+ err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
+ if (err)
+ return 2;
+ TLan_EeSendStart( dev->base_addr );
+ err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
+ if (err)
+ return 3;
+ TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
+
+ dev->interrupt--;
+ if ( dev->interrupt == 0 )
+ sti();
+
+ return 0;
+
+} /* TLan_EeReadByte */
+
+
+
+
+
diff --git a/linux/src/drivers/net/tlan.h b/linux/src/drivers/net/tlan.h
new file mode 100644
index 0000000..a66e26c
--- /dev/null
+++ b/linux/src/drivers/net/tlan.h
@@ -0,0 +1,525 @@
+#ifndef TLAN_H
+#define TLAN_H
+/********************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.h
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ ** This file is best viewed/edited with tabstop=4, colums>=132
+ *
+ ********************************************************************/
+
+
+#include <asm/io.h>
+#include <asm/types.h>
+#include <linux/netdevice.h>
+
+#if LINUX_VERSION_CODE <= 0x20100
+#define net_device_stats enet_statistics
+#endif
+
+
+
+
+ /*****************************************************************
+ * TLan Definitions
+ *
+ ****************************************************************/
+
+#define FALSE 0
+#define TRUE 1
+
+#define TLAN_MIN_FRAME_SIZE 64
+#define TLAN_MAX_FRAME_SIZE 1600
+
+#define TLAN_NUM_RX_LISTS 4
+#define TLAN_NUM_TX_LISTS 8
+
+#define TLAN_IGNORE 0
+#define TLAN_RECORD 1
+
+#define TLAN_DBG(lvl, format, args...) if (debug&lvl) printk( format, ##args );
+#define TLAN_DEBUG_GNRL 0x0001
+#define TLAN_DEBUG_TX 0x0002
+#define TLAN_DEBUG_RX 0x0004
+#define TLAN_DEBUG_LIST 0x0008
+
+
+
+
+ /*****************************************************************
+ * Device Identification Definitions
+ *
+ ****************************************************************/
+
+#define PCI_DEVICE_ID_NETELLIGENT_10 0xAE34
+#define PCI_DEVICE_ID_NETELLIGENT_10_100 0xAE32
+#define PCI_DEVICE_ID_NETFLEX_3P_INTEGRATED 0xAE35
+#define PCI_DEVICE_ID_NETFLEX_3P 0xF130
+#define PCI_DEVICE_ID_NETFLEX_3P_BNC 0xF150
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_PROLIANT 0xAE43
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_DUAL 0xAE40
+#define PCI_DEVICE_ID_DESKPRO_4000_5233MMX 0xB011
+#define PCI_DEVICE_ID_NETELLIGENT_10_T2 0xB012
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100 0xB030
+#ifndef PCI_DEVICE_ID_OLICOM_OC2183
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2325
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2326
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#endif
+
+typedef struct tlan_adapter_entry {
+ u16 vendorId;
+ u16 deviceId;
+ char *deviceLabel;
+ u32 flags;
+ u16 addrOfs;
+} TLanAdapterEntry;
+
+#define TLAN_ADAPTER_NONE 0x00000000
+#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
+#define TLAN_ADAPTER_BIT_RATE_PHY 0x00000002
+#define TLAN_ADAPTER_USE_INTERN_10 0x00000004
+#define TLAN_ADAPTER_ACTIVITY_LED 0x00000008
+
+#define TLAN_SPEED_DEFAULT 0
+#define TLAN_SPEED_10 10
+#define TLAN_SPEED_100 100
+
+#define TLAN_DUPLEX_DEFAULT 0
+#define TLAN_DUPLEX_HALF 1
+#define TLAN_DUPLEX_FULL 2
+
+
+
+
+ /*****************************************************************
+ * Rx/Tx List Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_BUFFERS_PER_LIST 10
+#define TLAN_LAST_BUFFER 0x80000000
+#define TLAN_CSTAT_UNUSED 0x8000
+#define TLAN_CSTAT_FRM_CMP 0x4000
+#define TLAN_CSTAT_READY 0x3000
+#define TLAN_CSTAT_EOC 0x0800
+#define TLAN_CSTAT_RX_ERROR 0x0400
+#define TLAN_CSTAT_PASS_CRC 0x0200
+#define TLAN_CSTAT_DP_PR 0x0100
+
+
+typedef struct tlan_buffer_ref_tag {
+ u32 count;
+ u32 address;
+} TLanBufferRef;
+
+
+typedef struct tlan_list_tag {
+ u32 forward;
+ u16 cStat;
+ u16 frameSize;
+ TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST];
+} TLanList;
+
+
+typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
+
+
+
+
+ /*****************************************************************
+ * PHY definitions
+ *
+ ****************************************************************/
+
+#define TLAN_PHY_MAX_ADDR 0x1F
+#define TLAN_PHY_NONE 0x20
+
+
+
+
+ /*****************************************************************
+ * TLAN Private Information Structure
+ *
+ ****************************************************************/
+
+typedef struct tlan_private_tag {
+ struct device *nextDevice;
+ void *dmaStorage;
+ u8 *padBuffer;
+ TLanList *rxList;
+ u8 *rxBuffer;
+ u32 rxHead;
+ u32 rxTail;
+ u32 rxEocCount;
+ TLanList *txList;
+ u8 *txBuffer;
+ u32 txHead;
+ u32 txInProgress;
+ u32 txTail;
+ u32 txBusyCount;
+ u32 phyOnline;
+ u32 timerSetAt;
+ u32 timerType;
+ struct timer_list timer;
+ struct net_device_stats stats;
+ TLanAdapterEntry *adapter;
+ u32 adapterRev;
+ u32 aui;
+ u32 debug;
+ u32 duplex;
+ u32 phy[2];
+ u32 phyNum;
+ u32 sa_int;
+ u32 speed;
+ u8 tlanRev;
+ u8 tlanFullDuplex;
+ char devName[8];
+} TLanPrivateInfo;
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Timer Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_TIMER_LINK 1
+#define TLAN_TIMER_ACTIVITY 2
+#define TLAN_TIMER_PHY_PDOWN 3
+#define TLAN_TIMER_PHY_PUP 4
+#define TLAN_TIMER_PHY_RESET 5
+#define TLAN_TIMER_PHY_START_LINK 6
+#define TLAN_TIMER_PHY_FINISH_AN 7
+#define TLAN_TIMER_FINISH_RESET 8
+
+#define TLAN_TIMER_ACT_DELAY 10
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Eeprom Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_EEPROM_ACK 0
+#define TLAN_EEPROM_STOP 1
+
+
+
+
+ /*****************************************************************
+ * Host Register Offsets and Contents
+ *
+ ****************************************************************/
+
+#define TLAN_HOST_CMD 0x00
+#define TLAN_HC_GO 0x80000000
+#define TLAN_HC_STOP 0x40000000
+#define TLAN_HC_ACK 0x20000000
+#define TLAN_HC_CS_MASK 0x1FE00000
+#define TLAN_HC_EOC 0x00100000
+#define TLAN_HC_RT 0x00080000
+#define TLAN_HC_NES 0x00040000
+#define TLAN_HC_AD_RST 0x00008000
+#define TLAN_HC_LD_TMR 0x00004000
+#define TLAN_HC_LD_THR 0x00002000
+#define TLAN_HC_REQ_INT 0x00001000
+#define TLAN_HC_INT_OFF 0x00000800
+#define TLAN_HC_INT_ON 0x00000400
+#define TLAN_HC_AC_MASK 0x000000FF
+#define TLAN_CH_PARM 0x04
+#define TLAN_DIO_ADR 0x08
+#define TLAN_DA_ADR_INC 0x8000
+#define TLAN_DA_RAM_ADR 0x4000
+#define TLAN_HOST_INT 0x0A
+#define TLAN_HI_IV_MASK 0x1FE0
+#define TLAN_HI_IT_MASK 0x001C
+#define TLAN_DIO_DATA 0x0C
+
+
+/* ThunderLAN Internal Register DIO Offsets */
+
+#define TLAN_NET_CMD 0x00
+#define TLAN_NET_CMD_NRESET 0x80
+#define TLAN_NET_CMD_NWRAP 0x40
+#define TLAN_NET_CMD_CSF 0x20
+#define TLAN_NET_CMD_CAF 0x10
+#define TLAN_NET_CMD_NOBRX 0x08
+#define TLAN_NET_CMD_DUPLEX 0x04
+#define TLAN_NET_CMD_TRFRAM 0x02
+#define TLAN_NET_CMD_TXPACE 0x01
+#define TLAN_NET_SIO 0x01
+#define TLAN_NET_SIO_MINTEN 0x80
+#define TLAN_NET_SIO_ECLOK 0x40
+#define TLAN_NET_SIO_ETXEN 0x20
+#define TLAN_NET_SIO_EDATA 0x10
+#define TLAN_NET_SIO_NMRST 0x08
+#define TLAN_NET_SIO_MCLK 0x04
+#define TLAN_NET_SIO_MTXEN 0x02
+#define TLAN_NET_SIO_MDATA 0x01
+#define TLAN_NET_STS 0x02
+#define TLAN_NET_STS_MIRQ 0x80
+#define TLAN_NET_STS_HBEAT 0x40
+#define TLAN_NET_STS_TXSTOP 0x20
+#define TLAN_NET_STS_RXSTOP 0x10
+#define TLAN_NET_STS_RSRVD 0x0F
+#define TLAN_NET_MASK 0x03
+#define TLAN_NET_MASK_MASK7 0x80
+#define TLAN_NET_MASK_MASK6 0x40
+#define TLAN_NET_MASK_MASK5 0x20
+#define TLAN_NET_MASK_MASK4 0x10
+#define TLAN_NET_MASK_RSRVD 0x0F
+#define TLAN_NET_CONFIG 0x04
+#define TLAN_NET_CFG_RCLK 0x8000
+#define TLAN_NET_CFG_TCLK 0x4000
+#define TLAN_NET_CFG_BIT 0x2000
+#define TLAN_NET_CFG_RXCRC 0x1000
+#define TLAN_NET_CFG_PEF 0x0800
+#define TLAN_NET_CFG_1FRAG 0x0400
+#define TLAN_NET_CFG_1CHAN 0x0200
+#define TLAN_NET_CFG_MTEST 0x0100
+#define TLAN_NET_CFG_PHY_EN 0x0080
+#define TLAN_NET_CFG_MSMASK 0x007F
+#define TLAN_MAN_TEST 0x06
+#define TLAN_DEF_VENDOR_ID 0x08
+#define TLAN_DEF_DEVICE_ID 0x0A
+#define TLAN_DEF_REVISION 0x0C
+#define TLAN_DEF_SUBCLASS 0x0D
+#define TLAN_DEF_MIN_LAT 0x0E
+#define TLAN_DEF_MAX_LAT 0x0F
+#define TLAN_AREG_0 0x10
+#define TLAN_AREG_1 0x16
+#define TLAN_AREG_2 0x1C
+#define TLAN_AREG_3 0x22
+#define TLAN_HASH_1 0x28
+#define TLAN_HASH_2 0x2C
+#define TLAN_GOOD_TX_FRMS 0x30
+#define TLAN_TX_UNDERUNS 0x33
+#define TLAN_GOOD_RX_FRMS 0x34
+#define TLAN_RX_OVERRUNS 0x37
+#define TLAN_DEFERRED_TX 0x38
+#define TLAN_CRC_ERRORS 0x3A
+#define TLAN_CODE_ERRORS 0x3B
+#define TLAN_MULTICOL_FRMS 0x3C
+#define TLAN_SINGLECOL_FRMS 0x3E
+#define TLAN_EXCESSCOL_FRMS 0x40
+#define TLAN_LATE_COLS 0x41
+#define TLAN_CARRIER_LOSS 0x42
+#define TLAN_ACOMMIT 0x43
+#define TLAN_LED_REG 0x44
+#define TLAN_LED_ACT 0x10
+#define TLAN_LED_LINK 0x01
+#define TLAN_BSIZE_REG 0x45
+#define TLAN_MAX_RX 0x46
+#define TLAN_INT_DIS 0x48
+#define TLAN_ID_TX_EOC 0x04
+#define TLAN_ID_RX_EOF 0x02
+#define TLAN_ID_RX_EOC 0x01
+
+
+
+/* ThunderLAN Interrupt Codes */
+
+#define TLAN_INT_NUMBER_OF_INTS 8
+
+#define TLAN_INT_NONE 0x0000
+#define TLAN_INT_TX_EOF 0x0001
+#define TLAN_INT_STAT_OVERFLOW 0x0002
+#define TLAN_INT_RX_EOF 0x0003
+#define TLAN_INT_DUMMY 0x0004
+#define TLAN_INT_TX_EOC 0x0005
+#define TLAN_INT_STATUS_CHECK 0x0006
+#define TLAN_INT_RX_EOC 0x0007
+
+
+
+/* ThunderLAN MII Registers */
+
+/* Generic MII/PHY Registers */
+
+#define MII_GEN_CTL 0x00
+#define MII_GC_RESET 0x8000
+#define MII_GC_LOOPBK 0x4000
+#define MII_GC_SPEEDSEL 0x2000
+#define MII_GC_AUTOENB 0x1000
+#define MII_GC_PDOWN 0x0800
+#define MII_GC_ISOLATE 0x0400
+#define MII_GC_AUTORSRT 0x0200
+#define MII_GC_DUPLEX 0x0100
+#define MII_GC_COLTEST 0x0080
+#define MII_GC_RESERVED 0x007F
+#define MII_GEN_STS 0x01
+#define MII_GS_100BT4 0x8000
+#define MII_GS_100BTXFD 0x4000
+#define MII_GS_100BTXHD 0x2000
+#define MII_GS_10BTFD 0x1000
+#define MII_GS_10BTHD 0x0800
+#define MII_GS_RESERVED 0x07C0
+#define MII_GS_AUTOCMPLT 0x0020
+#define MII_GS_RFLT 0x0010
+#define MII_GS_AUTONEG 0x0008
+#define MII_GS_LINK 0x0004
+#define MII_GS_JABBER 0x0002
+#define MII_GS_EXTCAP 0x0001
+#define MII_GEN_ID_HI 0x02
+#define MII_GEN_ID_LO 0x03
+#define MII_GIL_OUI 0xFC00
+#define MII_GIL_MODEL 0x03F0
+#define MII_GIL_REVISION 0x000F
+#define MII_AN_ADV 0x04
+#define MII_AN_LPA 0x05
+#define MII_AN_EXP 0x06
+
+/* ThunderLAN Specific MII/PHY Registers */
+
+#define TLAN_TLPHY_ID 0x10
+#define TLAN_TLPHY_CTL 0x11
+#define TLAN_TC_IGLINK 0x8000
+#define TLAN_TC_SWAPOL 0x4000
+#define TLAN_TC_AUISEL 0x2000
+#define TLAN_TC_SQEEN 0x1000
+#define TLAN_TC_MTEST 0x0800
+#define TLAN_TC_RESERVED 0x07F8
+#define TLAN_TC_NFEW 0x0004
+#define TLAN_TC_INTEN 0x0002
+#define TLAN_TC_TINT 0x0001
+#define TLAN_TLPHY_STS 0x12
+#define TLAN_TS_MINT 0x8000
+#define TLAN_TS_PHOK 0x4000
+#define TLAN_TS_POLOK 0x2000
+#define TLAN_TS_TPENERGY 0x1000
+#define TLAN_TS_RESERVED 0x0FFF
+
+
+#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0
+
+/* Routines to access internal registers. */
+
+inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return (inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)));
+
+} /* TLan_DioRead8 */
+
+
+
+
+inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return (inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)));
+
+} /* TLan_DioRead16 */
+
+
+
+
+inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return (inl(base_addr + TLAN_DIO_DATA));
+
+} /* TLan_DioRead32 */
+
+
+
+
+inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
+
+}
+
+
+
+
+inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+
+
+
+inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+
+
+#if 0
+inline void TLan_ClearBit(u8 bit, u16 port)
+{
+ outb_p(inb_p(port) & ~bit, port);
+}
+
+
+
+
+inline int TLan_GetBit(u8 bit, u16 port)
+{
+ return ((int) (inb_p(port) & bit));
+}
+
+
+
+
+inline void TLan_SetBit(u8 bit, u16 port)
+{
+ outb_p(inb_p(port) | bit, port);
+}
+#endif
+
+#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port)
+#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit))
+#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port)
+
+
+inline u32 xor( u32 a, u32 b )
+{
+ return ( ( a && ! b ) || ( ! a && b ) );
+}
+#define XOR8( a, b, c, d, e, f, g, h ) xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
+#define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
+
+inline u32 TLan_HashFunc( u8 *a )
+{
+ u32 hash;
+
+ hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), DA(a,30), DA(a,36), DA(a,42) );
+ hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), DA(a,31), DA(a,37), DA(a,43) ) << 1;
+ hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), DA(a,32), DA(a,38), DA(a,44) ) << 2;
+ hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), DA(a,33), DA(a,39), DA(a,45) ) << 3;
+ hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), DA(a,34), DA(a,40), DA(a,46) ) << 4;
+ hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), DA(a,35), DA(a,41), DA(a,47) ) << 5;
+
+ return hash;
+
+}
+
+
+
+
+#endif
diff --git a/linux/src/drivers/net/tulip.c b/linux/src/drivers/net/tulip.c
new file mode 100644
index 0000000..2a20301
--- /dev/null
+++ b/linux/src/drivers/net/tulip.c
@@ -0,0 +1,3685 @@
+/* tulip.c: A DEC 21040 family ethernet driver for Linux. */
+/*
+ Written/copyright 1994-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the Digital "Tulip" Ethernet adapter interface.
+ It should work with most DEC 21*4*-based chips/ethercards, as well as
+ with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and ASIX.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/tulip.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"tulip.c:v0.97 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/tulip.html\n";
+
+#define SMP_CHECK
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 2; /* Message enable: 0..31 = no..all messages. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 25;
+
+#define MAX_UNITS 8
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS] = {0, };
+static int options[MAX_UNITS] = {0, };
+static int mtu[MAX_UNITS] = {0, }; /* Jumbo MTU for interfaces. */
+
+/* The possible media types that can be set in options[] are: */
+#define MEDIA_MASK 31
+static const char * const medianame[32] = {
+ "10baseT", "10base2", "AUI", "100baseTx",
+ "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
+ "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
+ "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
+ "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
+ "","","","", "","","","", "","","","Transceiver reset",
+};
+
+/* Set if the PCI BIOS detects the chips on a multiport board backwards. */
+#ifdef REVERSE_PROBE_ORDER
+static int reverse_probe = 1;
+#else
+static int reverse_probe = 0;
+#endif
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#ifdef __alpha__ /* Always copy to aligned IP headers. */
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+
+/*
+ Set the bus performance register.
+ Typical: Set 16 longword cache alignment, no burst limit.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 No alignment 0x00000000 unlimited 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords
+ Warning: many older 486 systems are broken and require setting 0x00A04800
+ 8 longword cache alignment, 8 longword burst.
+ ToDo: Non-Intel setting could be better.
+*/
+
+#if defined(__alpha__) || defined(__x86_64) || defined(__ia64)
+static int csr0 = 0x01A00000 | 0xE000;
+#elif defined(__i386__) || defined(__powerpc__) || defined(__sparc__)
+/* Do *not* rely on hardware endian correction for big-endian machines! */
+static int csr0 = 0x01A00000 | 0x8000;
+#else
+#warning Processor architecture undefined!
+static int csr0 = 0x00A00000 | 0x4800;
+#endif
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ Typical is a 64 element hash table based on the Ethernet CRC.
+ This value does not apply to the 512 bit table chips.
+*/
+static int multicast_filter_limit = 32;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the descriptor ring sizes a power of two for efficiency.
+ The Tx queue length limits transmit packets to a portion of the available
+ ring entries. It should be at least one element less to allow multicast
+ filter setup frames to be queued. It must be at least four for hysteresis.
+ Making the Tx queue too long decreases the effectiveness of channel
+ bonding and packet priority.
+ Large receive rings waste memory and confound network buffer limits.
+ These values have been carefully studied: changing these might mask a
+ problem, it won't fix it.
+*/
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+/* Preferred skbuff allocation size. */
+#define PKT_BUF_SZ 1536
+/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
+ to support a pre-NWay full-duplex signaling mechanism using short frames.
+ No one knows what it should be, but if left at its default value some
+ 10base2(!) packets trigger a full-duplex-request interrupt. */
+#define FULL_DUPLEX_MAGIC 0x6969
+
+/* The include file section. We start by doing checks and fix-ups for
+ missing compile flags. */
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(reverse_probe, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(csr0, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+#ifdef MODULE_PARM_DESC
+MODULE_PARM_DESC(debug, "Tulip driver message level (0-31)");
+MODULE_PARM_DESC(options,
+ "Tulip: force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Tulip driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "Tulip: non-zero to set forced full duplex.");
+MODULE_PARM_DESC(rx_copybreak,
+ "Tulip breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Tulip breakpoint for switching to Rx-all-multicast");
+MODULE_PARM_DESC(reverse_probe, "Search PCI devices in reverse order to work "
+ "around misordered multiport NICS.");
+MODULE_PARM_DESC(csr0, "Special setting for the CSR0 PCI bus parameter "
+ "register.");
+#endif
+
+/* This driver was originally written to use I/O space access, but now
+ uses memory space by default. Override this this with -DUSE_IO_OPS. */
+#if (LINUX_VERSION_CODE < 0x20100) || ! defined(MODULE)
+#define USE_IO_OPS
+#endif
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the DECchip "Tulip", Digital's
+single-chip ethernet controllers for PCI. Supported members of the family
+are the 21040, 21041, 21140, 21140A, 21142, and 21143. Similar work-alike
+chips from Lite-On, Macronics, ASIX, Compex and other listed below are also
+supported.
+
+These chips are used on at least 140 unique PCI board designs. The great
+number of chips and board designs supported is the reason for the
+driver size and complexity. Almost of the increasing complexity is in the
+board configuration and media selection code. There is very little
+increasing in the operational critical path length.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS preferably should assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+
+Some boards have EEPROMs tables with default media entry. The factory default
+is usually "autoselect". This should only be overridden when using
+transceiver connections without link beat e.g. 10base2 or AUI, or (rarely!)
+for forcing full-duplex when used with old link partners that do not do
+autonegotiation.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Tulip can use either ring buffers or lists of Tx and Rx descriptors.
+This driver uses statically allocated rings of Rx and Tx descriptors, set at
+compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
+for the Rx ring buffers at open() time and passes the skb->data field to the
+Tulip as receive data buffers. When an incoming frame is less than
+RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
+copied to the new skbuff. When the incoming frame is larger, the skbuff is
+passed directly up the protocol stack and replaced by a newly allocated
+skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. For small frames the copying cost is negligible (esp. considering
+that we are pre-loading the cache with immediately useful header
+information). For large frames the copying cost is non-trivial, and the
+larger copy might flush the cache of useful data. A subtle aspect of this
+choice is that the Tulip only receives into longword aligned buffers, thus
+the IP header at offset 14 is not longword aligned for further processing.
+Copied frames are put into the new skbuff at an offset of "+2", thus copying
+has the beneficial effect of aligning the IP header and preloading the
+cache.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it is queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'tp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can not be selectively turned off, so
+we cannot avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Duke Kamstra of SMC for long ago providing an EtherPower board.
+Greg LaPolla at Linksys provided PNIC and other Linksys boards.
+Znyx provided a four-port card for testing.
+
+IVb. References
+
+http://scyld.com/expert/NWay.html
+http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
+http://www.national.com/pf/DP/DP83840A.html
+http://www.asix.com.tw/pmac.htm
+http://www.admtek.com.tw/
+
+IVc. Errata
+
+The old DEC databooks were light on details.
+The 21040 databook claims that CSR13, CSR14, and CSR15 should each be the last
+register of the set CSR12-15 written. Hmmm, now how is that possible?
+
+The DEC SROM format is very badly designed not precisely defined, leading to
+part of the media selection junkheap below. Some boards do not have EEPROM
+media tables and need to be patched up. Worse, other boards use the DEC
+design kit media table when it is not correct for their design.
+
+We cannot use MII interrupts because there is no defined GPIO pin to attach
+them. The MII transceiver status is polled using an kernel timer.
+
+*/
+
+static void *tulip_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int tulip_pwr_event(void *dev_instance, int event);
+
+#ifdef USE_IO_OPS
+#define TULIP_IOTYPE PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0
+#define TULIP_SIZE 0x80
+#define TULIP_SIZE1 0x100
+#else
+#define TULIP_IOTYPE PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1
+#define TULIP_SIZE 0x400 /* New PCI v2.1 recommends 4K min mem size. */
+#define TULIP_SIZE1 0x400 /* New PCI v2.1 recommends 4K min mem size. */
+#endif
+
+/* This much match tulip_tbl[]! Note 21142 == 21143. */
+enum tulip_chips {
+ DC21040=0, DC21041=1, DC21140=2, DC21142=3, DC21143=3,
+ LC82C168, MX98713, MX98715, MX98725, AX88141, AX88140, PNIC2, COMET,
+ COMPEX9881, I21145, XIRCOM, CONEXANT,
+ /* These flags may be added to the chip type. */
+ HAS_VLAN=0x100,
+};
+
+static struct pci_id_info pci_id_tbl[] = {
+ { "Digital DC21040 Tulip", { 0x00021011, 0xffffffff },
+ TULIP_IOTYPE, 0x80, DC21040 },
+ { "Digital DC21041 Tulip", { 0x00141011, 0xffffffff },
+ TULIP_IOTYPE, 0x80, DC21041 },
+ { "Digital DS21140A Tulip", { 0x00091011, 0xffffffff, 0,0, 0x20,0xf0 },
+ TULIP_IOTYPE, 0x80, DC21140 },
+ { "Digital DS21140 Tulip", { 0x00091011, 0xffffffff },
+ TULIP_IOTYPE, 0x80, DC21140 },
+ { "Digital DS21143-xD Tulip", { 0x00191011, 0xffffffff, 0,0, 0x40,0xf0 },
+ TULIP_IOTYPE, TULIP_SIZE, DC21142 | HAS_VLAN },
+ { "Digital DS21143-xC Tulip", { 0x00191011, 0xffffffff, 0,0, 0x30,0xf0 },
+ TULIP_IOTYPE, TULIP_SIZE, DC21142 },
+ { "Digital DS21142 Tulip", { 0x00191011, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE, DC21142 },
+ { "Kingston KNE110tx (PNIC)",
+ { 0x000211AD, 0xffffffff, 0xf0022646, 0xffffffff },
+ TULIP_IOTYPE, 256, LC82C168 },
+ { "Linksys LNE100TX (82c168 PNIC)", /* w/SYM */
+ { 0x000211AD, 0xffffffff, 0xffff11ad, 0xffffffff, 17,0xff },
+ TULIP_IOTYPE, 256, LC82C168 },
+ { "Linksys LNE100TX (82c169 PNIC)", /* w/ MII */
+ { 0x000211AD, 0xffffffff, 0xf00311ad, 0xffffffff, 32,0xff },
+ TULIP_IOTYPE, 256, LC82C168 },
+ { "Lite-On 82c168 PNIC", { 0x000211AD, 0xffffffff },
+ TULIP_IOTYPE, 256, LC82C168 },
+ { "Macronix 98713 PMAC", { 0x051210d9, 0xffffffff },
+ TULIP_IOTYPE, 256, MX98713 },
+ { "Macronix 98715 PMAC", { 0x053110d9, 0xffffffff },
+ TULIP_IOTYPE, 256, MX98715 },
+ { "Macronix 98725 PMAC", { 0x053110d9, 0xffffffff },
+ TULIP_IOTYPE, 256, MX98725 },
+ { "ASIX AX88141", { 0x1400125B, 0xffffffff, 0,0, 0x10, 0xf0 },
+ TULIP_IOTYPE, 128, AX88141 },
+ { "ASIX AX88140", { 0x1400125B, 0xffffffff },
+ TULIP_IOTYPE, 128, AX88140 },
+ { "Lite-On LC82C115 PNIC-II", { 0xc11511AD, 0xffffffff },
+ TULIP_IOTYPE, 256, PNIC2 },
+ { "ADMtek AN981 Comet", { 0x09811317, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-P", { 0x09851317, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C", { 0x19851317, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "D-Link DFE-680TXD v1.0 (ADMtek Centaur-C)", { 0x15411186, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C (Linksys v2)", { 0xab0213d1, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C (Linksys)", { 0xab0313d1, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C (Linksys)", { 0xab0813d1, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C (Linksys PCM200 v3)", { 0xab081737, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C (Linksys PCM200 v3)", { 0xab091737, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "STMicro STE10/100 Comet", { 0x0981104a, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "STMicro STE10/100A Comet", { 0x2774104a, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Comet-II", { 0x95111317, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Comet-II (9513)", { 0x95131317, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "SMC1255TX (ADMtek Comet)",
+ { 0x12161113, 0xffffffff, 0x125510b8, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "Accton EN1217/EN2242 (ADMtek Comet)", { 0x12161113, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "SMC1255TX (ADMtek Comet-II)", { 0x125510b8, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Comet-II (model 1020)", { 0x1020111a, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "Allied Telesyn A120 (ADMtek Comet)", { 0xa1201259, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "Compex RL100-TX", { 0x988111F6, 0xffffffff },
+ TULIP_IOTYPE, 128, COMPEX9881 },
+ { "Intel 21145 Tulip", { 0x00398086, 0xffffffff },
+ TULIP_IOTYPE, 128, I21145 },
+ { "Xircom Tulip clone", { 0x0003115d, 0xffffffff },
+ TULIP_IOTYPE, 128, XIRCOM },
+ { "Davicom DM9102", { 0x91021282, 0xffffffff },
+ TULIP_IOTYPE, 0x80, DC21140 },
+ { "Davicom DM9100", { 0x91001282, 0xffffffff },
+ TULIP_IOTYPE, 0x80, DC21140 },
+ { "Macronix mxic-98715 (EN1217)", { 0x12171113, 0xffffffff },
+ TULIP_IOTYPE, 256, MX98715 },
+ { "Conexant LANfinity", { 0x180314f1, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, CONEXANT },
+ { "3Com 3cSOHO100B-TX (ADMtek Centaur)", { 0x930010b7, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { 0},
+};
+
+struct drv_id_info tulip_drv_id = {
+ "tulip", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ tulip_probe1, tulip_pwr_event };
+
+/* This table is used during operation for capabilities and media timer. */
+
+static void tulip_timer(unsigned long data);
+static void nway_timer(unsigned long data);
+static void mxic_timer(unsigned long data);
+static void pnic_timer(unsigned long data);
+static void comet_timer(unsigned long data);
+
+enum tbl_flag {
+ HAS_MII=1, HAS_MEDIA_TABLE=2, CSR12_IN_SROM=4, ALWAYS_CHECK_MII=8,
+ HAS_PWRDWN=0x10, MC_HASH_ONLY=0x20, /* Hash-only multicast filter. */
+ HAS_PNICNWAY=0x80, HAS_NWAY=0x40, /* Uses internal NWay xcvr. */
+ HAS_INTR_MITIGATION=0x100, IS_ASIX=0x200, HAS_8023X=0x400,
+ COMET_MAC_ADDR=0x0800,
+};
+
+/* Note: this table must match enum tulip_chips above. */
+static struct tulip_chip_table {
+ char *chip_name;
+ int io_size; /* Unused */
+ int valid_intrs; /* CSR7 interrupt enable settings */
+ int flags;
+ void (*media_timer)(unsigned long data);
+} tulip_tbl[] = {
+ { "Digital DC21040 Tulip", 128, 0x0001ebef, 0, tulip_timer },
+ { "Digital DC21041 Tulip", 128, 0x0001ebff,
+ HAS_MEDIA_TABLE | HAS_NWAY, tulip_timer },
+ { "Digital DS21140 Tulip", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, tulip_timer },
+ { "Digital DS21143 Tulip", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY
+ | HAS_INTR_MITIGATION, nway_timer },
+ { "Lite-On 82c168 PNIC", 256, 0x0001ebef,
+ HAS_MII | HAS_PNICNWAY, pnic_timer },
+ { "Macronix 98713 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+ { "Macronix 98715 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer },
+ { "Macronix 98725 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer },
+ { "ASIX AX88140", 128, 0x0001fbff,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX, tulip_timer },
+ { "ASIX AX88141", 128, 0x0001fbff,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX, tulip_timer },
+ { "Lite-On PNIC-II", 256, 0x0801fbff,
+ HAS_MII | HAS_NWAY | HAS_8023X, nway_timer },
+ { "ADMtek Comet", 256, 0x0001abef,
+ HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
+ { "Compex 9881 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+ { "Intel DS21145 Tulip", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY,
+ nway_timer },
+ { "Xircom tulip work-alike", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY,
+ nway_timer },
+ { "Conexant LANfinity", 256, 0x0001ebef,
+ HAS_MII | HAS_PWRDWN, tulip_timer },
+ {0},
+};
+
+/* A full-duplex map for media types. */
+enum MediaIs {
+ MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
+ MediaIs100=16};
+static const char media_cap[32] =
+{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
+static u8 t21040_csr13[] = {2,0x0C,8,4, 4,0,0,0, 0,0,0,0, 4,0,0,0};
+
+/* 21041 transceiver register settings: 10-T, 10-2, AUI, 10-T, 10T-FD*/
+static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, };
+static u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, };
+static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+/* Offsets to the Command and Status Registers, "CSRs". All accesses
+ must be longword instructions and quadword aligned. */
+enum tulip_offsets {
+ CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
+ CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
+ CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78 };
+
+/* The bits in the CSR5 status registers, mostly interrupt sources. */
+enum status_bits {
+ TimerInt=0x800, TPLnkFail=0x1000, TPLnkPass=0x10,
+ NormalIntr=0x10000, AbnormalIntr=0x8000, PCIBusError=0x2000,
+ RxJabber=0x200, RxStopped=0x100, RxNoBuf=0x80, RxIntr=0x40,
+ TxFIFOUnderflow=0x20, TxJabber=0x08, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
+};
+
+/* The configuration bits in CSR6. */
+enum csr6_mode_bits {
+ TxOn=0x2000, RxOn=0x0002, FullDuplex=0x0200,
+ AcceptBroadcast=0x0100, AcceptAllMulticast=0x0080,
+ AcceptAllPhys=0x0040, AcceptRunt=0x0008,
+};
+
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct tulip_rx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1, buffer2;
+};
+
+struct tulip_tx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1, buffer2; /* We use only buffer 1. */
+};
+
+enum desc_status_bits {
+ DescOwned=0x80000000, RxDescFatalErr=0x8000, RxWholePkt=0x0300,
+};
+
+/* Ring-wrap flag in length field, use for last ring entry.
+ 0x01000000 means chain on buffer2 address,
+ 0x02000000 means use the ring start address in CSR2/3.
+ Note: Some work-alike chips do not function correctly in chained mode.
+ The ASIX chip works only in chained mode.
+ Thus we indicates ring mode, but always write the 'next' field for
+ chained mode as well.
+*/
+#define DESC_RING_WRAP 0x02000000
+
+#define EEPROM_SIZE 512 /* support 256*16 EEPROMs */
+
+struct medialeaf {
+ u8 type;
+ u8 media;
+ unsigned char *leafdata;
+};
+
+struct mediatable {
+ u16 defaultmedia;
+ u8 leafcount, csr12dir; /* General purpose pin directions. */
+ unsigned has_mii:1, has_nonmii:1, has_reset:6;
+ u32 csr15dir, csr15val; /* 21143 NWay setting. */
+ struct medialeaf mleaf[0];
+};
+
+struct mediainfo {
+ struct mediainfo *next;
+ int info_type;
+ int index;
+ unsigned char *info;
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct tulip_private {
+ struct tulip_rx_desc rx_ring[RX_RING_SIZE];
+ struct tulip_tx_desc tx_ring[TX_RING_SIZE];
+ /* The saved addresses of Rx/Tx-in-place packet buffers. */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address of dev->priv for kfree */
+ /* Multicast filter control. */
+ u16 setup_frame[96]; /* Pseudo-Tx frame to init address table. */
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int multicast_filter_limit;
+ struct pci_dev *pci_dev;
+ int chip_id, revision;
+ int flags;
+ int max_interrupt_work;
+ int msg_level;
+ unsigned int csr0, csr6; /* Current CSR0, CSR6 settings. */
+ /* Note: cache line pairing and isolation of Rx vs. Tx indicies. */
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+ unsigned int rx_dead:1; /* We have no Rx buffers. */
+
+ struct net_device_stats stats;
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+
+ /* Media selection state. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int full_duplex_lock:1;
+ unsigned int fake_addr:1; /* Multiport board faked address. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Do not sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+ unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
+ struct timer_list timer; /* Media selection timer. */
+ void (*link_change)(struct net_device *dev, int csr5);
+ u16 lpar; /* 21143 Link partner ability. */
+ u16 sym_advertise, mii_advertise; /* NWay to-advertise. */
+ u16 advertising[4]; /* MII advertise, from SROM table. */
+ signed char phys[4], mii_cnt; /* MII device addresses. */
+ spinlock_t mii_lock;
+ struct mediatable *mtable;
+ int cur_index; /* Current media index. */
+ int saved_if_port;
+};
+
+static void start_link(struct net_device *dev);
+static void parse_eeprom(struct net_device *dev);
+static int read_eeprom(long ioaddr, int location, int addr_len);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int tulip_open(struct net_device *dev);
+/* Chip-specific media selection (timer functions prototyped above). */
+static int check_duplex(struct net_device *dev);
+static void select_media(struct net_device *dev, int startup);
+static void init_media(struct net_device *dev);
+static void nway_lnk_change(struct net_device *dev, int csr5);
+static void nway_start(struct net_device *dev);
+static void pnic_lnk_change(struct net_device *dev, int csr5);
+static void pnic_do_nway(struct net_device *dev);
+
+static void tulip_tx_timeout(struct net_device *dev);
+static void tulip_init_ring(struct net_device *dev);
+static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int tulip_rx(struct net_device *dev);
+static void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int tulip_close(struct net_device *dev);
+static struct net_device_stats *tulip_get_stats(struct net_device *dev);
+#ifdef HAVE_PRIVATE_IOCTL
+static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#endif
+static void set_rx_mode(struct net_device *dev);
+
+
+
+/* A list of all installed Tulip devices. */
+static struct net_device *root_tulip_dev = NULL;
+
+static void *tulip_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int pci_tbl_idx, int find_cnt)
+{
+ struct net_device *dev;
+ struct tulip_private *tp;
+ void *priv_mem;
+ /* See note below on the multiport cards. */
+ static unsigned char last_phys_addr[6] = {0x02, 'L', 'i', 'n', 'u', 'x'};
+ static int last_irq = 0;
+ static int multiport_cnt = 0; /* For four-port boards w/one EEPROM */
+ u8 chip_rev;
+ int i, chip_idx = pci_id_tbl[pci_tbl_idx].drv_flags & 0xff;
+ unsigned short sum;
+ u8 ee_data[EEPROM_SIZE];
+
+ /* Bring the 21041/21143 out of sleep mode.
+ Caution: Snooze mode does not work with some boards! */
+ if (tulip_tbl[chip_idx].flags & HAS_PWRDWN)
+ pci_write_config_dword(pdev, 0x40, 0x00000000);
+
+ if (inl(ioaddr + CSR5) == 0xffffffff) {
+ printk(KERN_ERR "The Tulip chip at %#lx is not functioning.\n", ioaddr);
+ return 0;
+ }
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ /* Make certain the data structures are quadword aligned. */
+ priv_mem = kmalloc(sizeof(*tp) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+ dev->priv = tp = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(tp, 0, sizeof(*tp));
+ tp->mii_lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+ tp->priv_addr = priv_mem;
+
+ tp->next_module = root_tulip_dev;
+ root_tulip_dev = dev;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
+
+ printk(KERN_INFO "%s: %s rev %d at %#3lx,",
+ dev->name, pci_id_tbl[pci_tbl_idx].name, chip_rev, ioaddr);
+
+ /* Stop the Tx and Rx processes. */
+ outl(inl(ioaddr + CSR6) & ~TxOn & ~RxOn, ioaddr + CSR6);
+ /* Clear the missed-packet counter. */
+ inl(ioaddr + CSR8);
+
+ if (chip_idx == DC21041 && inl(ioaddr + CSR9) & 0x8000) {
+ printk(" 21040 compatible mode,");
+ chip_idx = DC21040;
+ }
+
+ /* The SROM/EEPROM interface varies dramatically. */
+ sum = 0;
+ if (chip_idx == DC21040) {
+ outl(0, ioaddr + CSR9); /* Reset the pointer with a dummy write. */
+ for (i = 0; i < 6; i++) {
+ int value, boguscnt = 100000;
+ do
+ value = inl(ioaddr + CSR9);
+ while (value < 0 && --boguscnt > 0);
+ dev->dev_addr[i] = value;
+ sum += value & 0xff;
+ }
+ } else if (chip_idx == LC82C168) {
+ for (i = 0; i < 3; i++) {
+ int value, boguscnt = 100000;
+ outl(0x600 | i, ioaddr + 0x98);
+ do
+ value = inl(ioaddr + CSR9);
+ while (value < 0 && --boguscnt > 0);
+ put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i);
+ sum += value & 0xffff;
+ }
+ } else if (chip_idx == COMET) {
+ /* No need to read the EEPROM. */
+ put_unaligned(le32_to_cpu(inl(ioaddr + 0xA4)), (u32 *)dev->dev_addr);
+ put_unaligned(le16_to_cpu(inl(ioaddr + 0xA8)),
+ (u16 *)(dev->dev_addr + 4));
+ for (i = 0; i < 6; i ++)
+ sum += dev->dev_addr[i];
+ } else {
+ /* A serial EEPROM interface, we read now and sort it out later. */
+ int sa_offset = 0;
+ int ee_addr_size = read_eeprom(ioaddr, 0xff, 8) & 0x40000 ? 8 : 6;
+ int eeprom_word_cnt = 1 << ee_addr_size;
+
+ for (i = 0; i < eeprom_word_cnt; i++)
+ ((u16 *)ee_data)[i] =
+ le16_to_cpu(read_eeprom(ioaddr, i, ee_addr_size));
+
+ /* DEC now has a specification (see Notes) but early board makers
+ just put the address in the first EEPROM locations. */
+ /* This does memcmp(eedata, eedata+16, 8) */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ sa_offset = 20;
+ if (chip_idx == CONEXANT) {
+ /* Check that the tuple type and length is correct. */
+ if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
+ sa_offset = 0x19A;
+ } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
+ ee_data[2] == 0) {
+ sa_offset = 2; /* Grrr, damn Matrox boards. */
+ multiport_cnt = 4;
+ }
+ for (i = 0; i < 6; i ++) {
+ dev->dev_addr[i] = ee_data[i + sa_offset];
+ sum += ee_data[i + sa_offset];
+ }
+ }
+ /* Lite-On boards have the address byte-swapped. */
+ if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0)
+ && dev->dev_addr[1] == 0x00)
+ for (i = 0; i < 6; i+=2) {
+ char tmp = dev->dev_addr[i];
+ dev->dev_addr[i] = dev->dev_addr[i+1];
+ dev->dev_addr[i+1] = tmp;
+ }
+ /* On the Zynx 315 Etherarray and other multiport boards only the
+ first Tulip has an EEPROM.
+ The addresses of the subsequent ports are derived from the first.
+ Many PCI BIOSes also incorrectly report the IRQ line, so we correct
+ that here as well. */
+ if (sum == 0 || sum == 6*0xff) {
+ printk(" EEPROM not present,");
+ for (i = 0; i < 5; i++)
+ dev->dev_addr[i] = last_phys_addr[i];
+ dev->dev_addr[i] = last_phys_addr[i] + 1;
+#if defined(__i386__) /* Patch up x86 BIOS bug. */
+ if (last_irq)
+ irq = last_irq;
+#endif
+ }
+
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2X", i ? ':' : ' ', last_phys_addr[i] = dev->dev_addr[i]);
+ printk(", IRQ %d.\n", irq);
+ last_irq = irq;
+
+#ifdef USE_IO_OPS
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+#endif
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ tp->pci_dev = pdev;
+ tp->msg_level = (1 << debug) - 1;
+ tp->chip_id = chip_idx;
+ tp->revision = chip_rev;
+ tp->flags = tulip_tbl[chip_idx].flags
+ | (pci_id_tbl[pci_tbl_idx].drv_flags & 0xffffff00);
+ tp->rx_copybreak = rx_copybreak;
+ tp->max_interrupt_work = max_interrupt_work;
+ tp->multicast_filter_limit = multicast_filter_limit;
+ tp->csr0 = csr0;
+
+ /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles.
+ And the ASIX must have a burst limit or horrible things happen. */
+ if (chip_idx == DC21143 && chip_rev == 65)
+ tp->csr0 &= ~0x01000000;
+ else if (tp->flags & IS_ASIX)
+ tp->csr0 |= 0x2000;
+
+ /* We support a zillion ways to set the media type. */
+#ifdef TULIP_FULL_DUPLEX
+ tp->full_duplex = 1;
+ tp->full_duplex_lock = 1;
+#endif
+#ifdef TULIP_DEFAULT_MEDIA
+ tp->default_port = TULIP_DEFAULT_MEDIA;
+#endif
+#ifdef TULIP_NO_MEDIA_SWITCH
+ tp->medialock = 1;
+#endif
+
+ /* The lower four bits are the media type. */
+ if (find_cnt >= 0 && find_cnt < MAX_UNITS) {
+ if (options[find_cnt] & 0x1f)
+ tp->default_port = options[find_cnt] & 0x1f;
+ if ((options[find_cnt] & 0x200) || full_duplex[find_cnt] > 0)
+ tp->full_duplex = 1;
+ if (mtu[find_cnt] > 0)
+ dev->mtu = mtu[find_cnt];
+ }
+ if (dev->mem_start)
+ tp->default_port = dev->mem_start & 0x1f;
+ if (tp->default_port) {
+ printk(KERN_INFO "%s: Transceiver selection forced to %s.\n",
+ dev->name, medianame[tp->default_port & MEDIA_MASK]);
+ tp->medialock = 1;
+ if (media_cap[tp->default_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+ }
+ if (tp->full_duplex)
+ tp->full_duplex_lock = 1;
+
+ if (media_cap[tp->default_port] & MediaIsMII) {
+ u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+ tp->mii_advertise = media2advert[tp->default_port - 9];
+ tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
+ }
+
+ /* This is logically part of probe1(), but too complex to write inline. */
+ if (tp->flags & HAS_MEDIA_TABLE) {
+ memcpy(tp->eeprom, ee_data, sizeof(tp->eeprom));
+ parse_eeprom(dev);
+ }
+
+ /* The Tulip-specific entries in the device structure. */
+ dev->open = &tulip_open;
+ dev->hard_start_xmit = &tulip_start_xmit;
+ dev->stop = &tulip_close;
+ dev->get_stats = &tulip_get_stats;
+#ifdef HAVE_PRIVATE_IOCTL
+ dev->do_ioctl = &private_ioctl;
+#endif
+#ifdef HAVE_MULTICAST
+ dev->set_multicast_list = &set_rx_mode;
+#endif
+
+ if (tp->flags & HAS_NWAY)
+ tp->link_change = nway_lnk_change;
+ else if (tp->flags & HAS_PNICNWAY)
+ tp->link_change = pnic_lnk_change;
+ start_link(dev);
+ if (chip_idx == COMET) {
+ /* Set the Comet LED configuration. */
+ outl(0xf0000000, ioaddr + CSR9);
+ }
+
+ return dev;
+}
+
+/* Start the link, typically called at probe1() time but sometimes later with
+ multiport cards. */
+static void start_link(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ if ((tp->flags & ALWAYS_CHECK_MII) ||
+ (tp->mtable && tp->mtable->has_mii) ||
+ ( ! tp->mtable && (tp->flags & HAS_MII))) {
+ int phyn, phy_idx = 0;
+ if (tp->mtable && tp->mtable->has_mii) {
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == 11) {
+ tp->cur_index = i;
+ tp->saved_if_port = dev->if_port;
+ select_media(dev, 2);
+ dev->if_port = tp->saved_if_port;
+ break;
+ }
+ }
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later,
+ but takes much time. */
+ for (phyn = 1; phyn <= 32 && phy_idx < sizeof(tp->phys); phyn++) {
+ int phy = phyn & 0x1f;
+ int mii_status = mdio_read(dev, phy, 1);
+ if ((mii_status & 0x8301) == 0x8001 ||
+ ((mii_status & 0x8000) == 0 && (mii_status & 0x7800) != 0)) {
+ int mii_reg0 = mdio_read(dev, phy, 0);
+ int mii_advert = mdio_read(dev, phy, 4);
+ int to_advert;
+
+ if (tp->mii_advertise)
+ to_advert = tp->mii_advertise;
+ else if (tp->advertising[phy_idx])
+ to_advert = tp->advertising[phy_idx];
+ else /* Leave unchanged. */
+ tp->mii_advertise = to_advert = mii_advert;
+
+ tp->phys[phy_idx++] = phy;
+ printk(KERN_INFO "%s: MII transceiver #%d "
+ "config %4.4x status %4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_reg0, mii_status, mii_advert);
+ /* Fixup for DLink with miswired PHY. */
+ if (mii_advert != to_advert) {
+ printk(KERN_DEBUG "%s: Advertising %4.4x on PHY %d,"
+ " previously advertising %4.4x.\n",
+ dev->name, to_advert, phy, mii_advert);
+ mdio_write(dev, phy, 4, to_advert);
+ }
+ /* Enable autonegotiation: some boards default to off. */
+ mdio_write(dev, phy, 0, (mii_reg0 & ~0x3000) |
+ (tp->full_duplex ? 0x0100 : 0x0000) |
+ ((media_cap[tp->default_port] & MediaIs100) ?
+ 0x2000 : 0x1000));
+ }
+ }
+ tp->mii_cnt = phy_idx;
+ if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
+ printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
+ dev->name);
+ tp->phys[0] = 1;
+ }
+ }
+
+ /* Reset the xcvr interface and turn on heartbeat. */
+ switch (tp->chip_id) {
+ case DC21040:
+ outl(0x00000000, ioaddr + CSR13);
+ outl(0x00000004, ioaddr + CSR13);
+ break;
+ case DC21041:
+ /* This is nway_start(). */
+ if (tp->sym_advertise == 0)
+ tp->sym_advertise = 0x0061;
+ outl(0x00000000, ioaddr + CSR13);
+ outl(0xFFFFFFFF, ioaddr + CSR14);
+ outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
+ outl(inl(ioaddr + CSR6) | FullDuplex, ioaddr + CSR6);
+ outl(0x0000EF01, ioaddr + CSR13);
+ break;
+ case DC21140: default:
+ if (tp->mtable)
+ outl(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
+ break;
+ case DC21142:
+ case PNIC2:
+ if (tp->mii_cnt || media_cap[dev->if_port] & MediaIsMII) {
+ outl(0x82020000, ioaddr + CSR6);
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ outl(0x820E0000, ioaddr + CSR6);
+ } else
+ nway_start(dev);
+ break;
+ case LC82C168:
+ if ( ! tp->mii_cnt) {
+ tp->nway = 1;
+ tp->nwayset = 0;
+ outl(0x00420000, ioaddr + CSR6);
+ outl(0x30, ioaddr + CSR12);
+ outl(0x0001F078, ioaddr + 0xB8);
+ outl(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
+ }
+ break;
+ case COMPEX9881:
+ outl(0x00000000, ioaddr + CSR6);
+ outl(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
+ outl(0x00000001, ioaddr + CSR13);
+ break;
+ case MX98713: case MX98715: case MX98725:
+ outl(0x01a80000, ioaddr + CSR6);
+ outl(0xFFFFFFFF, ioaddr + CSR14);
+ outl(0x00001000, ioaddr + CSR12);
+ break;
+ case COMET:
+ break;
+ }
+
+ if (tp->flags & HAS_PWRDWN)
+ pci_write_config_dword(tp->pci_dev, 0x40, 0x40000000);
+}
+
+
+/* Serial EEPROM section. */
+/* The main routine to parse the very complicated SROM structure.
+ Search www.digital.com for "21X4 SROM" to get details.
+ This code is very complex, and will require changes to support
+ additional cards, so I will be verbose about what is going on.
+ */
+
+/* Known cards that have old-style EEPROMs.
+ Writing this table is described at
+ http://www.scyld.com/network/tulip-media.html
+*/
+static struct fixups {
+ char *name;
+ unsigned char addr0, addr1, addr2;
+ u16 newtable[32]; /* Max length below. */
+} eeprom_fixups[] = {
+ {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
+ 0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
+ {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
+ 0x0000, 0x009E, /* 10baseT */
+ 0x0004, 0x009E, /* 10baseT-FD */
+ 0x0903, 0x006D, /* 100baseTx */
+ 0x0905, 0x006D, /* 100baseTx-FD */ }},
+ {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f,
+ 0x0107, 0x8021, /* 100baseFx */
+ 0x0108, 0x8021, /* 100baseFx-FD */
+ 0x0100, 0x009E, /* 10baseT */
+ 0x0104, 0x009E, /* 10baseT-FD */
+ 0x0103, 0x006D, /* 100baseTx */
+ 0x0105, 0x006D, /* 100baseTx-FD */ }},
+ {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513,
+ 0x1001, 0x009E, /* 10base2, CSR12 0x10*/
+ 0x0000, 0x009E, /* 10baseT */
+ 0x0004, 0x009E, /* 10baseT-FD */
+ 0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */
+ 0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}},
+ {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F,
+ 0x1B01, 0x0000, /* 10base2, CSR12 0x1B */
+ 0x0B00, 0x009E, /* 10baseT, CSR12 0x0B */
+ 0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */
+ 0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */
+ 0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */
+ }},
+ {0, 0, 0, 0, {}}};
+
+static const char * block_name[] = {"21140 non-MII", "21140 MII PHY",
+ "21142 Serial PHY", "21142 MII PHY", "21143 SYM PHY", "21143 reset method"};
+
+#if defined(__i386__) /* AKA get_unaligned() */
+#define get_u16(ptr) (*(u16 *)(ptr))
+#else
+#define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
+#endif
+
+static void parse_eeprom(struct net_device *dev)
+{
+ /* The last media info list parsed, for multiport boards. */
+ static struct mediatable *last_mediatable = NULL;
+ static unsigned char *last_ee_data = NULL;
+ static int controller_index = 0;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ unsigned char *p, *ee_data = tp->eeprom;
+ int new_advertise = 0;
+ int i;
+
+ tp->mtable = 0;
+ /* Detect an old-style (SA only) EEPROM layout:
+ memcmp(eedata, eedata+16, 8). */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ break;
+ if (i >= 8) {
+ if (ee_data[0] == 0xff) {
+ if (last_mediatable) {
+ controller_index++;
+ printk(KERN_INFO "%s: Controller %d of multiport board.\n",
+ dev->name, controller_index);
+ tp->mtable = last_mediatable;
+ ee_data = last_ee_data;
+ goto subsequent_board;
+ } else
+ printk(KERN_INFO "%s: Missing EEPROM, this interface may "
+ "not work correctly!\n",
+ dev->name);
+ return;
+ }
+ /* Do a fix-up based on the vendor half of the station address. */
+ for (i = 0; eeprom_fixups[i].name; i++) {
+ if (dev->dev_addr[0] == eeprom_fixups[i].addr0
+ && dev->dev_addr[1] == eeprom_fixups[i].addr1
+ && dev->dev_addr[2] == eeprom_fixups[i].addr2) {
+ if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55)
+ i++; /* An Accton EN1207, not an outlaw Maxtech. */
+ memcpy(ee_data + 26, eeprom_fixups[i].newtable,
+ sizeof(eeprom_fixups[i].newtable));
+ printk(KERN_INFO "%s: Old format EEPROM on '%s' board.\n"
+ KERN_INFO "%s: Using substitute media control info.\n",
+ dev->name, eeprom_fixups[i].name, dev->name);
+ break;
+ }
+ }
+ if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
+ printk(KERN_INFO "%s: Old style EEPROM with no media selection "
+ "information.\n",
+ dev->name);
+ return;
+ }
+ }
+
+ controller_index = 0;
+ if (ee_data[19] > 1) {
+ struct net_device *prev_dev;
+ struct tulip_private *otp;
+ /* This is a multiport board. The probe order may be "backwards", so
+ we patch up already found devices. */
+ last_ee_data = ee_data;
+ for (prev_dev = tp->next_module; prev_dev; prev_dev = otp->next_module) {
+ otp = (struct tulip_private *)prev_dev->priv;
+ if (otp->eeprom[0] == 0xff && otp->mtable == 0) {
+ parse_eeprom(prev_dev);
+ start_link(prev_dev);
+ } else
+ break;
+ }
+ controller_index = 0;
+ }
+subsequent_board:
+
+ p = (void *)ee_data + ee_data[27 + controller_index*3];
+ if (ee_data[27] == 0) { /* No valid media table. */
+ } else if (tp->chip_id == DC21041) {
+ int media = get_u16(p);
+ int count = p[2];
+ p += 3;
+
+ printk(KERN_INFO "%s: 21041 Media table, default media %4.4x (%s).\n",
+ dev->name, media,
+ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ for (i = 0; i < count; i++) {
+ unsigned char media_block = *p++;
+ int media_code = media_block & MEDIA_MASK;
+ if (media_block & 0x40)
+ p += 6;
+ switch(media_code) {
+ case 0: new_advertise |= 0x0020; break;
+ case 4: new_advertise |= 0x0040; break;
+ }
+ printk(KERN_INFO "%s: 21041 media #%d, %s.\n",
+ dev->name, media_code, medianame[media_code]);
+ }
+ } else {
+ unsigned char csr12dir = 0;
+ int count;
+ struct mediatable *mtable;
+ u16 media = get_u16(p);
+
+ p += 2;
+ if (tp->flags & CSR12_IN_SROM)
+ csr12dir = *p++;
+ count = *p++;
+ mtable = (struct mediatable *)
+ kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf),
+ GFP_KERNEL);
+ if (mtable == NULL)
+ return; /* Horrible, impossible failure. */
+ last_mediatable = tp->mtable = mtable;
+ mtable->defaultmedia = media;
+ mtable->leafcount = count;
+ mtable->csr12dir = csr12dir;
+ mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
+ mtable->csr15dir = mtable->csr15val = 0;
+
+ printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name,
+ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ for (i = 0; i < count; i++) {
+ struct medialeaf *leaf = &mtable->mleaf[i];
+
+ if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */
+ leaf->type = 0;
+ leaf->media = p[0] & 0x3f;
+ leaf->leafdata = p;
+ if ((p[2] & 0x61) == 0x01) /* Bogus, but Znyx boards do it. */
+ mtable->has_mii = 1;
+ p += 4;
+ } else {
+ switch(leaf->type = p[1]) {
+ case 5:
+ mtable->has_reset = i + 1; /* Assure non-zero */
+ /* Fall through */
+ case 6:
+ leaf->media = 31;
+ break;
+ case 1: case 3:
+ mtable->has_mii = 1;
+ leaf->media = 11;
+ break;
+ case 2:
+ if ((p[2] & 0x3f) == 0) {
+ u32 base15 = (p[2] & 0x40) ? get_u16(p + 7) : 0x0008;
+ u16 *p1 = (u16 *)(p + (p[2] & 0x40 ? 9 : 3));
+ mtable->csr15dir = (get_unaligned(p1 + 0)<<16) + base15;
+ mtable->csr15val = (get_unaligned(p1 + 1)<<16) + base15;
+ }
+ /* Fall through. */
+ case 0: case 4:
+ mtable->has_nonmii = 1;
+ leaf->media = p[2] & MEDIA_MASK;
+ switch (leaf->media) {
+ case 0: new_advertise |= 0x0020; break;
+ case 4: new_advertise |= 0x0040; break;
+ case 3: new_advertise |= 0x0080; break;
+ case 5: new_advertise |= 0x0100; break;
+ case 6: new_advertise |= 0x0200; break;
+ }
+ break;
+ default:
+ leaf->media = 19;
+ }
+ leaf->leafdata = p + 2;
+ p += (p[0] & 0x3f) + 1;
+ }
+ if ((tp->msg_level & NETIF_MSG_LINK) &&
+ leaf->media == 11) {
+ unsigned char *bp = leaf->leafdata;
+ printk(KERN_INFO "%s: MII interface PHY %d, setup/reset "
+ "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
+ dev->name, bp[0], bp[1], bp[2 + bp[1]*2],
+ bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+ }
+ if (tp->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
+ "by a %s (%d) block.\n",
+ dev->name, i, medianame[leaf->media], leaf->media,
+ leaf->type < 6 ? block_name[leaf->type] : "UNKNOWN",
+ leaf->type);
+ }
+ if (new_advertise)
+ tp->sym_advertise = new_advertise;
+ }
+}
+/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x02 /* EEPROM shift clock. */
+#define EE_CS 0x01 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x04 /* Data from the Tulip to EEPROM. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x05
+#define EE_DATA_READ 0x08 /* Data from the EEPROM chip. */
+#define EE_ENB (0x4800 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ Even at 33Mhz current PCI implementations do not overrun the EEPROM clock.
+ We add a bus turn-around to insure that this remains true. */
+#define eeprom_delay() inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_READ_CMD (6)
+
+/* Note: this routine returns extra data bits for size detection. */
+static int read_eeprom(long ioaddr, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ long ee_addr = ioaddr + CSR9;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ outl(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outl(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ }
+ outl(EE_ENB, ee_addr);
+ eeprom_delay();
+
+ for (i = 16; i > 0; i--) {
+ outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outl(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details. */
+
+/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues or future 66Mhz PCI. */
+#define mdio_delay() inl(mdio_addr)
+
+/* Read and write the MII registers using software-generated serial
+ MDIO protocol. It is just different enough from the EEPROM protocol
+ to not share code. The maxium data clock rate is 2.5 Mhz. */
+#define MDIO_SHIFT_CLK 0x10000
+#define MDIO_DATA_WRITE0 0x00000
+#define MDIO_DATA_WRITE1 0x20000
+#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
+#define MDIO_ENB_IN 0x40000
+#define MDIO_DATA_READ 0x80000
+
+static const unsigned char comet_miireg2offset[32] = {
+ 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0,
+ 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, };
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+ int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
+ int retval = 0;
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + CSR9;
+ unsigned long flags;
+
+ if (location & ~0x1f)
+ return 0xffff;
+
+ if (tp->chip_id == COMET && phy_id == 30) {
+ if (comet_miireg2offset[location])
+ return inl(ioaddr + comet_miireg2offset[location]);
+ return 0xffff;
+ }
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ int i = 1000;
+ outl(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ while (--i > 0)
+ if ( ! ((retval = inl(ioaddr + 0xA0)) & 0x80000000))
+ break;
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return retval & 0xffff;
+ }
+
+ /* Establish sync by sending at least 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+
+ outl(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outl(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int val)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+ int cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | (val & 0xffff);
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + CSR9;
+ unsigned long flags;
+
+ if (location & ~0x1f)
+ return;
+
+ if (tp->chip_id == COMET && phy_id == 30) {
+ if (comet_miireg2offset[location])
+ outl(val, ioaddr + comet_miireg2offset[location]);
+ return;
+ }
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ int i = 1000;
+ outl(cmd, ioaddr + 0xA0);
+ do
+ if ( ! (inl(ioaddr + 0xA0) & 0x80000000))
+ break;
+ while (--i > 0);
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return;
+ }
+
+ /* Establish sync by sending 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+ outl(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ outl(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return;
+}
+
+
+static int
+tulip_open(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 3*HZ;
+
+ /* Wake the chip from sleep/snooze mode. */
+ if (tp->flags & HAS_PWRDWN)
+ pci_write_config_dword(tp->pci_dev, 0x40, 0);
+
+ /* On some chip revs we must set the MII/SYM port before the reset!? */
+ if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
+ outl(0x00040000, ioaddr + CSR6);
+
+ /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+ outl(0x00000001, ioaddr + CSR0);
+
+ MOD_INC_USE_COUNT;
+
+ /* This would be done after interrupts are initialized, but we do not want
+ to frob the transceiver only to fail later. */
+ if (request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ /* Deassert reset.
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list. */
+ outl(tp->csr0, ioaddr + CSR0);
+
+ if (tp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: tulip_open() irq %d.\n", dev->name, dev->irq);
+
+ tulip_init_ring(dev);
+
+ if (tp->chip_id == PNIC2) {
+ u32 addr_high = (dev->dev_addr[1]<<8) + (dev->dev_addr[0]<<0);
+ /* This address setting does not appear to impact chip operation?? */
+ outl((dev->dev_addr[5]<<8) + dev->dev_addr[4] +
+ (dev->dev_addr[3]<<24) + (dev->dev_addr[2]<<16),
+ ioaddr + 0xB0);
+ outl(addr_high + (addr_high<<16), ioaddr + 0xB8);
+ }
+ if (tp->flags & MC_HASH_ONLY) {
+ u32 addr_low = cpu_to_le32(get_unaligned((u32 *)dev->dev_addr));
+ u32 addr_high = cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)));
+ if (tp->flags & IS_ASIX) {
+ outl(0, ioaddr + CSR13);
+ outl(addr_low, ioaddr + CSR14);
+ outl(1, ioaddr + CSR13);
+ outl(addr_high, ioaddr + CSR14);
+ } else if (tp->flags & COMET_MAC_ADDR) {
+ outl(addr_low, ioaddr + 0xA4);
+ outl(addr_high, ioaddr + 0xA8);
+ outl(0, ioaddr + 0xAC);
+ outl(0, ioaddr + 0xB0);
+ }
+ }
+
+ outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
+ outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
+
+ if ( ! tp->full_duplex_lock)
+ tp->full_duplex = 0;
+ init_media(dev);
+ if (media_cap[dev->if_port] & MediaIsMII)
+ check_duplex(dev);
+ set_rx_mode(dev);
+
+ /* Start the Tx to process setup frame. */
+ outl(tp->csr6, ioaddr + CSR6);
+ outl(tp->csr6 | TxOn, ioaddr + CSR6);
+
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ outl(0, ioaddr + CSR2); /* Rx poll demand */
+
+ if (tp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done tulip_open(), CSR0 %8.8x, CSR5 %8.8x CSR6 "
+ "%8.8x.\n", dev->name, (int)inl(ioaddr + CSR0),
+ (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR6));
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&tp->timer);
+ tp->timer.expires = jiffies + next_tick;
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+ add_timer(&tp->timer);
+
+ return 0;
+}
+
+static void init_media(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ tp->saved_if_port = dev->if_port;
+ if (dev->if_port == 0)
+ dev->if_port = tp->default_port;
+
+ /* Allow selecting a default media. */
+ i = 0;
+ if (tp->mtable == NULL)
+ goto media_picked;
+ if (dev->if_port) {
+ int looking_for = media_cap[dev->if_port] & MediaIsMII ? 11 :
+ (dev->if_port == 12 ? 0 : dev->if_port);
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ printk(KERN_INFO "%s: Using user-specified media %s.\n",
+ dev->name, medianame[dev->if_port]);
+ goto media_picked;
+ }
+ }
+ if ((tp->mtable->defaultmedia & 0x0800) == 0) {
+ int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
+ dev->name, medianame[looking_for]);
+ goto media_picked;
+ }
+ }
+ /* Start sensing first non-full-duplex media. */
+ for (i = tp->mtable->leafcount - 1;
+ (media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
+ ;
+media_picked:
+
+ tp->csr6 = 0;
+ tp->cur_index = i;
+ tp->nwayset = 0;
+
+ if (dev->if_port) {
+ if (tp->chip_id == DC21143 &&
+ (media_cap[dev->if_port] & MediaIsMII)) {
+ /* We must reset the media CSRs when we force-select MII mode. */
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ outl(0x0008, ioaddr + CSR15);
+ }
+ select_media(dev, 1);
+ return;
+ }
+ switch(tp->chip_id) {
+ case DC21041:
+ /* tp->nway = 1;*/
+ nway_start(dev);
+ break;
+ case DC21142:
+ if (tp->mii_cnt) {
+ select_media(dev, 1);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Using MII transceiver %d, status "
+ "%4.4x.\n",
+ dev->name, tp->phys[0], mdio_read(dev, tp->phys[0], 1));
+ outl(0x82020000, ioaddr + CSR6);
+ tp->csr6 = 0x820E0000;
+ dev->if_port = 11;
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ } else
+ nway_start(dev);
+ break;
+ case PNIC2:
+ nway_start(dev);
+ break;
+ case LC82C168:
+ if (tp->mii_cnt) {
+ dev->if_port = 11;
+ tp->csr6 = 0x814C0000 | (tp->full_duplex ? FullDuplex : 0);
+ outl(0x0001, ioaddr + CSR15);
+ } else if (inl(ioaddr + CSR5) & TPLnkPass)
+ pnic_do_nway(dev);
+ else {
+ /* Start with 10mbps to do autonegotiation. */
+ outl(0x32, ioaddr + CSR12);
+ tp->csr6 = 0x00420000;
+ outl(0x0001B078, ioaddr + 0xB8);
+ outl(0x0201B078, ioaddr + 0xB8);
+ }
+ break;
+ case MX98713: case COMPEX9881:
+ dev->if_port = 0;
+ tp->csr6 = 0x01880000 | (tp->full_duplex ? FullDuplex : 0);
+ outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+ break;
+ case MX98715: case MX98725:
+ /* Provided by BOLO, Macronix - 12/10/1998. */
+ dev->if_port = 0;
+ tp->csr6 = 0x01a80000 | FullDuplex;
+ outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+ outl(0x11000 | inw(ioaddr + 0xa0), ioaddr + 0xa0);
+ break;
+ case COMET: case CONEXANT:
+ /* Enable automatic Tx underrun recovery. */
+ outl(inl(ioaddr + 0x88) | 1, ioaddr + 0x88);
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+ tp->csr6 = 0x00040000;
+ break;
+ case AX88140: case AX88141:
+ tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
+ break;
+ default:
+ select_media(dev, 1);
+ }
+}
+
+/* Set up the transceiver control registers for the selected media type.
+ STARTUP indicates to reset the transceiver. It is set to '2' for
+ the initial card detection, and '1' during resume or open().
+*/
+static void select_media(struct net_device *dev, int startup)
+{
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ struct mediatable *mtable = tp->mtable;
+ u32 new_csr6;
+ int i;
+
+ if (mtable) {
+ struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index];
+ unsigned char *p = mleaf->leafdata;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media table type %d.\n",
+ dev->name, mleaf->type);
+ switch (mleaf->type) {
+ case 0: /* 21140 non-MII xcvr. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
+ " with control setting %2.2x.\n",
+ dev->name, p[1]);
+ dev->if_port = p[0];
+ if (startup)
+ outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+ outl(p[1], ioaddr + CSR12);
+ new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18);
+ break;
+ case 2: case 4: {
+ u16 setup[5];
+ u32 csr13val, csr14val, csr15dir, csr15val;
+ for (i = 0; i < 5; i++)
+ setup[i] = get_u16(&p[i*2 + 1]);
+
+ dev->if_port = p[0] & MEDIA_MASK;
+ if (media_cap[dev->if_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+
+ if (startup && mtable->has_reset) {
+ struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset-1];
+ unsigned char *rst = rleaf->leafdata;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+ dev->name);
+ for (i = 0; i < rst[0]; i++)
+ outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
+ "%4.4x/%4.4x.\n",
+ dev->name, medianame[dev->if_port], setup[0], setup[1]);
+ if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
+ csr13val = setup[0];
+ csr14val = setup[1];
+ csr15dir = (setup[3]<<16) | setup[2];
+ csr15val = (setup[4]<<16) | setup[2];
+ outl(0, ioaddr + CSR13);
+ outl(csr14val, ioaddr + CSR14);
+ outl(csr15dir, ioaddr + CSR15); /* Direction */
+ outl(csr15val, ioaddr + CSR15); /* Data */
+ outl(csr13val, ioaddr + CSR13);
+ } else {
+ csr13val = 1;
+ csr14val = 0x0003FFFF;
+ csr15dir = (setup[0]<<16) | 0x0008;
+ csr15val = (setup[1]<<16) | 0x0008;
+ if (dev->if_port <= 4)
+ csr14val = t21142_csr14[dev->if_port];
+ if (startup) {
+ outl(0, ioaddr + CSR13);
+ outl(csr14val, ioaddr + CSR14);
+ }
+ outl(csr15dir, ioaddr + CSR15); /* Direction */
+ outl(csr15val, ioaddr + CSR15); /* Data */
+ if (startup) outl(csr13val, ioaddr + CSR13);
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n",
+ dev->name, csr15dir, csr15val);
+ if (mleaf->type == 4)
+ new_csr6 = 0x820A0000 | ((setup[2] & 0x71) << 18);
+ else
+ new_csr6 = 0x82420000;
+ break;
+ }
+ case 1: case 3: {
+ int phy_num = p[0];
+ int init_length = p[1];
+ u16 *misc_info;
+
+ dev->if_port = 11;
+ new_csr6 = 0x020E0000;
+ if (mleaf->type == 3) { /* 21142 */
+ u16 *init_sequence = (u16*)(p+2);
+ u16 *reset_sequence = &((u16*)(p+3))[init_length];
+ int reset_length = p[2 + init_length*2];
+ misc_info = reset_sequence + reset_length;
+ if (startup)
+ for (i = 0; i < reset_length; i++)
+ outl(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15);
+ for (i = 0; i < init_length; i++)
+ outl(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15);
+ } else {
+ u8 *init_sequence = p + 2;
+ u8 *reset_sequence = p + 3 + init_length;
+ int reset_length = p[2 + init_length];
+ misc_info = (u16*)(reset_sequence + reset_length);
+ if (startup) {
+ outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+ for (i = 0; i < reset_length; i++)
+ outl(reset_sequence[i], ioaddr + CSR12);
+ }
+ for (i = 0; i < init_length; i++)
+ outl(init_sequence[i], ioaddr + CSR12);
+ }
+ tp->advertising[phy_num] = get_u16(&misc_info[1]) | 1;
+ if (startup < 2) {
+ if (tp->mii_advertise == 0)
+ tp->mii_advertise = tp->advertising[phy_num];
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n",
+ dev->name, tp->mii_advertise, tp->phys[phy_num]);
+ mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
+ }
+ break;
+ }
+ default:
+ printk(KERN_DEBUG "%s: Invalid media table selection %d.\n",
+ dev->name, mleaf->type);
+ new_csr6 = 0x020E0000;
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
+ dev->name, medianame[dev->if_port],
+ (int)inl(ioaddr + CSR12) & 0xff);
+ } else if (tp->chip_id == DC21041) {
+ int port = dev->if_port <= 4 ? dev->if_port : 0;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: 21041 using media %s, CSR12 is %4.4x.\n",
+ dev->name, medianame[port == 3 ? 12: port],
+ (int)inl(ioaddr + CSR12));
+ outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+ outl(t21041_csr14[port], ioaddr + CSR14);
+ outl(t21041_csr15[port], ioaddr + CSR15);
+ outl(t21041_csr13[port], ioaddr + CSR13);
+ new_csr6 = 0x80020000;
+ } else if (tp->chip_id == LC82C168) {
+ if (startup && ! tp->medialock)
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
+ dev->name, (int)inl(ioaddr + 0xB8),
+ medianame[dev->if_port]);
+ if (tp->mii_cnt) {
+ new_csr6 = 0x810C0000;
+ outl(0x0001, ioaddr + CSR15);
+ outl(0x0201B07A, ioaddr + 0xB8);
+ } else if (startup) {
+ /* Start with 10mbps to do autonegotiation. */
+ outl(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ outl(0x0001B078, ioaddr + 0xB8);
+ outl(0x0201B078, ioaddr + 0xB8);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+ outl(0x33, ioaddr + CSR12);
+ new_csr6 = 0x01860000;
+ /* Trigger autonegotiation. */
+ outl(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8);
+ } else {
+ outl(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ outl(0x1F078, ioaddr + 0xB8);
+ }
+ } else if (tp->chip_id == DC21040) { /* 21040 */
+ /* Turn on the xcvr interface. */
+ int csr12 = inl(ioaddr + CSR12);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: 21040 media type is %s, CSR12 is %2.2x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ if (media_cap[dev->if_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+ new_csr6 = 0x20000;
+ /* Set the full duplux match frame. */
+ outl(FULL_DUPLEX_MAGIC, ioaddr + CSR11);
+ outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+ if (t21040_csr13[dev->if_port] & 8) {
+ outl(0x0705, ioaddr + CSR14);
+ outl(0x0006, ioaddr + CSR15);
+ } else {
+ outl(0xffff, ioaddr + CSR14);
+ outl(0x0000, ioaddr + CSR15);
+ }
+ outl(0x8f01 | t21040_csr13[dev->if_port], ioaddr + CSR13);
+ } else { /* Unknown chip type with no media table. */
+ if (tp->default_port == 0)
+ dev->if_port = tp->mii_cnt ? 11 : 3;
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ new_csr6 = 0x020E0000;
+ } else if (media_cap[dev->if_port] & MediaIsFx) {
+ new_csr6 = 0x02860000;
+ } else
+ new_csr6 = 0x038E0000;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: No media description table, assuming "
+ "%s transceiver, CSR12 %2.2x.\n",
+ dev->name, medianame[dev->if_port],
+ (int)inl(ioaddr + CSR12));
+ }
+
+ tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) |
+ (tp->full_duplex ? FullDuplex : 0);
+ return;
+}
+
+/*
+ Check the MII negotiated duplex, and change the CSR6 setting if
+ required.
+ Return 0 if everything is OK.
+ Return < 0 if the transceiver is missing or has no link beat.
+ */
+static int check_duplex(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int mii_reg1, mii_reg5, negotiated, duplex;
+
+ if (tp->full_duplex_lock)
+ return 0;
+ mii_reg5 = mdio_read(dev, tp->phys[0], 5);
+ negotiated = mii_reg5 & tp->mii_advertise;
+
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO "%s: MII link partner %4.4x, negotiated %4.4x.\n",
+ dev->name, mii_reg5, negotiated);
+ if (mii_reg5 == 0xffff)
+ return -2;
+ if ((mii_reg5 & 0x4000) == 0 && /* No negotiation. */
+ ((mii_reg1 = mdio_read(dev, tp->phys[0], 1)) & 0x0004) == 0) {
+ int new_reg1 = mdio_read(dev, tp->phys[0], 1);
+ if ((new_reg1 & 0x0004) == 0) {
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO "%s: No link beat on the MII interface,"
+ " status %4.4x.\n", dev->name, new_reg1);
+ return -1;
+ }
+ }
+ duplex = ((negotiated & 0x0300) == 0x0100
+ || (negotiated & 0x00C0) == 0x0040);
+ /* 100baseTx-FD or 10T-FD, but not 100-HD */
+ if (tp->full_duplex != duplex) {
+ tp->full_duplex = duplex;
+ if (negotiated & 0x0380) /* 100mbps. */
+ tp->csr6 &= ~0x00400000;
+ if (tp->full_duplex) tp->csr6 |= FullDuplex;
+ else tp->csr6 &= ~FullDuplex;
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII "
+ "#%d link partner capability of %4.4x.\n",
+ dev->name, tp->full_duplex ? "full" : "half",
+ tp->phys[0], mii_reg5);
+ return 1;
+ }
+ return 0;
+}
+
+static void tulip_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 csr12 = inl(ioaddr + CSR12);
+ int next_tick = 2*HZ;
+
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
+ " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n",
+ dev->name, medianame[dev->if_port], (int)inl(ioaddr + CSR5),
+ (int)inl(ioaddr + CSR6), csr12, (int)inl(ioaddr + CSR13),
+ (int)inl(ioaddr + CSR14), (int)inl(ioaddr + CSR15));
+
+ switch (tp->chip_id) {
+ case DC21040:
+ if (!tp->medialock && (csr12 & 0x0002)) { /* Network error */
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO "%s: No link beat found.\n",
+ dev->name);
+ dev->if_port = (dev->if_port == 2 ? 0 : 2);
+ select_media(dev, 0);
+ dev->trans_start = jiffies;
+ }
+ break;
+ case DC21041:
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: 21041 media tick CSR12 %8.8x.\n",
+ dev->name, csr12);
+ if (tp->medialock) break;
+ switch (dev->if_port) {
+ case 0: case 3: case 4:
+ if (csr12 & 0x0004) { /*LnkFail */
+ /* 10baseT is dead. Check for activity on alternate port. */
+ tp->mediasense = 1;
+ if (csr12 & 0x0200)
+ dev->if_port = 2;
+ else
+ dev->if_port = 1;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: No 21041 10baseT link beat, Media "
+ "switched to %s.\n",
+ dev->name, medianame[dev->if_port]);
+ outl(0, ioaddr + CSR13); /* Reset */
+ outl(t21041_csr14[dev->if_port], ioaddr + CSR14);
+ outl(t21041_csr15[dev->if_port], ioaddr + CSR15);
+ outl(t21041_csr13[dev->if_port], ioaddr + CSR13);
+ next_tick = 10*HZ; /* 2.4 sec. */
+ } else
+ next_tick = 30*HZ;
+ break;
+ case 1: /* 10base2 */
+ case 2: /* AUI */
+ if (csr12 & 0x0100) {
+ next_tick = (30*HZ); /* 30 sec. */
+ tp->mediasense = 0;
+ } else if ((csr12 & 0x0004) == 0) {
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: 21041 media switched to 10baseT.\n",
+ dev->name);
+ dev->if_port = 0;
+ select_media(dev, 0);
+ next_tick = (24*HZ)/10; /* 2.4 sec. */
+ } else if (tp->mediasense || (csr12 & 0x0002)) {
+ dev->if_port = 3 - dev->if_port; /* Swap ports. */
+ select_media(dev, 0);
+ next_tick = 20*HZ;
+ } else {
+ next_tick = 20*HZ;
+ }
+ break;
+ }
+ break;
+ case DC21140: case DC21142: case MX98713: case COMPEX9881: default: {
+ struct medialeaf *mleaf;
+ unsigned char *p;
+ if (tp->mtable == NULL) { /* No EEPROM info, use generic code. */
+ /* Not much that can be done.
+ Assume this a generic MII or SYM transceiver. */
+ next_tick = 60*HZ;
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x "
+ "CSR12 0x%2.2x.\n",
+ dev->name, (int)inl(ioaddr + CSR6), csr12 & 0xff);
+ break;
+ }
+ mleaf = &tp->mtable->mleaf[tp->cur_index];
+ p = mleaf->leafdata;
+ switch (mleaf->type) {
+ case 0: case 4: {
+ /* Type 0 serial or 4 SYM transceiver. Check the link beat bit. */
+ int offset = mleaf->type == 4 ? 5 : 2;
+ s8 bitnum = p[offset];
+ if (p[offset+1] & 0x80) {
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG"%s: Transceiver monitor tick "
+ "CSR12=%#2.2x, no media sense.\n",
+ dev->name, csr12);
+ if (mleaf->type == 4) {
+ if (mleaf->media == 3 && (csr12 & 0x02))
+ goto select_next_media;
+ }
+ break;
+ }
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x"
+ " bit %d is %d, expecting %d.\n",
+ dev->name, csr12, (bitnum >> 1) & 7,
+ (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+ (bitnum >= 0));
+ /* Check that the specified bit has the proper value. */
+ if ((bitnum < 0) !=
+ ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Link beat detected for %s.\n",
+ dev->name, medianame[mleaf->media & MEDIA_MASK]);
+ if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
+ goto actually_mii;
+ break;
+ }
+ if (tp->medialock)
+ break;
+ select_next_media:
+ if (--tp->cur_index < 0) {
+ /* We start again, but should instead look for default. */
+ tp->cur_index = tp->mtable->leafcount - 1;
+ }
+ dev->if_port = tp->mtable->mleaf[tp->cur_index].media;
+ if (media_cap[dev->if_port] & MediaIsFD)
+ goto select_next_media; /* Skip FD entries. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: No link beat on media %s,"
+ " trying transceiver type %s.\n",
+ dev->name, medianame[mleaf->media & MEDIA_MASK],
+ medianame[tp->mtable->mleaf[tp->cur_index].media]);
+ select_media(dev, 0);
+ /* Restart the transmit process. */
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ next_tick = (24*HZ)/10;
+ break;
+ }
+ case 1: case 3: /* 21140, 21142 MII */
+ actually_mii:
+ check_duplex(dev);
+ next_tick = 60*HZ;
+ break;
+ case 2: /* 21142 serial block has no link beat. */
+ default:
+ break;
+ }
+ }
+ break;
+ }
+ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+}
+
+/* Handle internal NWay transceivers uniquely.
+ These exist on the 21041, 21143 (in SYM mode) and the PNIC2.
+ */
+static void nway_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr12 = inl(ioaddr + CSR12);
+ int next_tick = 60*HZ;
+ int new_csr6 = 0;
+
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO"%s: N-Way autonegotiation status %8.8x, %s.\n",
+ dev->name, csr12, medianame[dev->if_port]);
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ check_duplex(dev);
+ } else if (tp->nwayset) {
+ /* Do not screw up a negotiated session! */
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ } else if (tp->medialock) {
+ ;
+ } else if (dev->if_port == 3) {
+ if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
+ "trying NWay.\n", dev->name, csr12);
+ nway_start(dev);
+ next_tick = 3*HZ;
+ }
+ } else if ((csr12 & 0x7000) != 0x5000) {
+ /* Negotiation failed. Search media types. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
+ dev->name, csr12);
+ if (!(csr12 & 4)) { /* 10mbps link beat good. */
+ new_csr6 = 0x82420000;
+ dev->if_port = 0;
+ outl(0, ioaddr + CSR13);
+ outl(0x0003FFFF, ioaddr + CSR14);
+ outw(t21142_csr15[dev->if_port], ioaddr + CSR15);
+ outl(t21142_csr13[dev->if_port], ioaddr + CSR13);
+ } else {
+ /* Select 100mbps port to check for link beat. */
+ new_csr6 = 0x83860000;
+ dev->if_port = 3;
+ outl(0, ioaddr + CSR13);
+ outl(0x0003FF7F, ioaddr + CSR14);
+ outw(8, ioaddr + CSR15);
+ outl(1, ioaddr + CSR13);
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
+ dev->name, medianame[dev->if_port]);
+ if (new_csr6 != (tp->csr6 & ~0x20D7)) {
+ tp->csr6 &= 0x20D7;
+ tp->csr6 |= new_csr6;
+ outl(0x0301, ioaddr + CSR12);
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ }
+ next_tick = 3*HZ;
+ }
+ if (tp->cur_tx - tp->dirty_tx > 0 &&
+ jiffies - dev->trans_start > TX_TIMEOUT) {
+ printk(KERN_WARNING "%s: Tx hung, %d vs. %d.\n",
+ dev->name, tp->cur_tx, tp->dirty_tx);
+ tulip_tx_timeout(dev);
+ }
+
+ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+}
+
+static void nway_start(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr14 = ((tp->sym_advertise & 0x0780) << 9) |
+ ((tp->sym_advertise&0x0020)<<1) | 0xffbf;
+
+ dev->if_port = 0;
+ tp->nway = tp->mediasense = 1;
+ tp->nwayset = tp->lpar = 0;
+ if (tp->chip_id == PNIC2) {
+ tp->csr6 = 0x01000000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
+ return;
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Restarting internal NWay autonegotiation, "
+ "%8.8x.\n", dev->name, csr14);
+ outl(0x0001, ioaddr + CSR13);
+ outl(csr14, ioaddr + CSR14);
+ tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0)
+ | (tp->csr6 & 0x20ff);
+ outl(tp->csr6, ioaddr + CSR6);
+ if (tp->mtable && tp->mtable->csr15dir) {
+ outl(tp->mtable->csr15dir, ioaddr + CSR15);
+ outl(tp->mtable->csr15val, ioaddr + CSR15);
+ } else if (tp->chip_id != PNIC2)
+ outw(0x0008, ioaddr + CSR15);
+ if (tp->chip_id == DC21041) /* Trigger NWAY. */
+ outl(0xEF01, ioaddr + CSR12);
+ else
+ outl(0x1301, ioaddr + CSR12);
+}
+
+static void nway_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr12 = inl(ioaddr + CSR12);
+
+ if (tp->chip_id == PNIC2) {
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: PNIC-2 link status changed, CSR5/12/14 %8.8x"
+ " %8.8x, %8.8x.\n",
+ dev->name, csr12, csr5, (int)inl(ioaddr + CSR14));
+ dev->if_port = 5;
+ tp->lpar = csr12 >> 16;
+ tp->nwayset = 1;
+ tp->csr6 = 0x01000000 | (tp->csr6 & 0xffff);
+ outl(tp->csr6, ioaddr + CSR6);
+ return;
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
+ "%8.8x.\n", dev->name, csr12, csr5, (int)inl(ioaddr + CSR14));
+
+ /* If NWay finished and we have a negotiated partner capability. */
+ if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
+ int setup_done = 0;
+ int negotiated = tp->sym_advertise & (csr12 >> 16);
+ tp->lpar = csr12 >> 16;
+ tp->nwayset = 1;
+ if (negotiated & 0x0100) dev->if_port = 5;
+ else if (negotiated & 0x0080) dev->if_port = 3;
+ else if (negotiated & 0x0040) dev->if_port = 4;
+ else if (negotiated & 0x0020) dev->if_port = 0;
+ else {
+ tp->nwayset = 0;
+ if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
+ dev->if_port = 3;
+ }
+ tp->full_duplex = (media_cap[dev->if_port] & MediaAlwaysFD) ? 1:0;
+
+ if (tp->msg_level & NETIF_MSG_LINK) {
+ if (tp->nwayset)
+ printk(KERN_INFO "%s: Switching to %s based on link "
+ "negotiation %4.4x & %4.4x = %4.4x.\n",
+ dev->name, medianame[dev->if_port], tp->sym_advertise,
+ tp->lpar, negotiated);
+ else
+ printk(KERN_INFO "%s: Autonegotiation failed, using %s,"
+ " link beat status %4.4x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ }
+
+ if (tp->mtable) {
+ int i;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == dev->if_port) {
+ tp->cur_index = i;
+ select_media(dev, 0);
+ setup_done = 1;
+ break;
+ }
+ }
+ if ( ! setup_done) {
+ tp->csr6 = (dev->if_port & 1 ? 0x838E0000 : 0x82420000)
+ | (tp->csr6 & 0x20ff);
+ if (tp->full_duplex)
+ tp->csr6 |= FullDuplex;
+ outl(1, ioaddr + CSR13);
+ }
+#if 0 /* Restart should not be needed. */
+ outl(tp->csr6 | 0x0000, ioaddr + CSR6);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n",
+ dev->name, inl(ioaddr + CSR5));
+#endif
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
+ dev->name, tp->csr6, (int)inl(ioaddr + CSR6),
+ (int)inl(ioaddr + CSR12));
+ } else if ((tp->nwayset && (csr5 & 0x08000000)
+ && (dev->if_port == 3 || dev->if_port == 5)
+ && (csr12 & 2) == 2) ||
+ (tp->nway && (csr5 & (TPLnkFail)))) {
+ /* Link blew? Maybe restart NWay. */
+ del_timer(&tp->timer);
+ nway_start(dev);
+ tp->timer.expires = jiffies + 3*HZ;
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+ if (tp->msg_level & NETIF_MSG_LINK) /* TIMER? */
+ printk(KERN_INFO"%s: 21143 %s link beat %s.\n",
+ dev->name, medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
+ if ((csr12 & 2) && ! tp->medialock) {
+ del_timer(&tp->timer);
+ nway_start(dev);
+ tp->timer.expires = jiffies + 3*HZ;
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 5)
+ outl(inl(ioaddr + CSR14) & ~0x080, ioaddr + CSR14);
+ } else if (dev->if_port == 0 || dev->if_port == 4) {
+ if ((csr12 & 4) == 0)
+ printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
+ dev->name);
+ } else if (!(csr12 & 4)) { /* 10mbps link beat good. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 10mbps sensed media.\n",
+ dev->name);
+ dev->if_port = 0;
+ } else if (tp->nwayset) {
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n",
+ dev->name, medianame[dev->if_port], tp->csr6);
+ } else { /* 100mbps link beat good. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n",
+ dev->name);
+ dev->if_port = 3;
+ tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
+ outl(0x0003FF7F, ioaddr + CSR14);
+ outl(0x0301, ioaddr + CSR12);
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ }
+}
+
+static void mxic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+ if (tp->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
+ (int)inl(ioaddr + CSR12));
+ }
+ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+}
+
+static void pnic_do_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 phy_reg = inl(ioaddr + 0xB8);
+ u32 new_csr6 = tp->csr6 & ~0x40C40200;
+
+ if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+ if (phy_reg & 0x20000000) dev->if_port = 5;
+ else if (phy_reg & 0x40000000) dev->if_port = 3;
+ else if (phy_reg & 0x10000000) dev->if_port = 4;
+ else if (phy_reg & 0x08000000) dev->if_port = 0;
+ tp->nwayset = 1;
+ new_csr6 = (dev->if_port & 1) ? 0x01860000 : 0x00420000;
+ outl(0x32 | (dev->if_port & 1), ioaddr + CSR12);
+ if (dev->if_port & 1)
+ outl(0x1F868, ioaddr + 0xB8);
+ if (phy_reg & 0x30000000) {
+ tp->full_duplex = 1;
+ new_csr6 |= FullDuplex;
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
+ dev->name, phy_reg, medianame[dev->if_port]);
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+ outl(tp->csr6 | RxOn, ioaddr + CSR6); /* Restart Tx */
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ dev->trans_start = jiffies;
+ }
+ }
+}
+
+static void pnic_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int phy_reg = inl(ioaddr + 0xB8);
+
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n",
+ dev->name, phy_reg, csr5);
+ if (inl(ioaddr + CSR5) & TPLnkFail) {
+ outl((inl(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
+ if (! tp->nwayset || jiffies - dev->trans_start > 1*HZ) {
+ tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
+ outl(tp->csr6, ioaddr + CSR6);
+ outl(0x30, ioaddr + CSR12);
+ outl(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
+ dev->trans_start = jiffies;
+ }
+ } else if (inl(ioaddr + CSR5) & TPLnkPass) {
+ pnic_do_nway(dev);
+ outl((inl(ioaddr + CSR7) & ~TPLnkPass) | TPLnkFail, ioaddr + CSR7);
+ }
+}
+static void pnic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ if (check_duplex(dev) > 0)
+ next_tick = 3*HZ;
+ } else {
+ int csr12 = inl(ioaddr + CSR12);
+ int new_csr6 = tp->csr6 & ~0x40C40200;
+ int phy_reg = inl(ioaddr + 0xB8);
+ int csr5 = inl(ioaddr + CSR5);
+
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s "
+ "CSR5 %8.8x.\n",
+ dev->name, phy_reg, medianame[dev->if_port], csr5);
+ if (phy_reg & 0x04000000) { /* Remote link fault */
+ outl(0x0201F078, ioaddr + 0xB8);
+ next_tick = 1*HZ;
+ tp->nwayset = 0;
+ } else if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+ pnic_do_nway(dev);
+ next_tick = 60*HZ;
+ } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
+ "CSR5 %8.8x, PHY %3.3x.\n",
+ dev->name, medianame[dev->if_port], csr12,
+ (int)inl(ioaddr + CSR5), (int)inl(ioaddr + 0xB8));
+ next_tick = 3*HZ;
+ if (tp->medialock) {
+ } else if (tp->nwayset && (dev->if_port & 1)) {
+ next_tick = 1*HZ;
+ } else if (dev->if_port == 0) {
+ dev->if_port = 3;
+ outl(0x33, ioaddr + CSR12);
+ new_csr6 = 0x01860000;
+ outl(0x1F868, ioaddr + 0xB8);
+ } else {
+ dev->if_port = 0;
+ outl(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ outl(0x1F078, ioaddr + 0xB8);
+ }
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+ outl(tp->csr6 | RxOn, ioaddr + CSR6); /* Restart Tx */
+ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ dev->trans_start = jiffies;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Changing PNIC configuration to %s "
+ "%s-duplex, CSR6 %8.8x.\n",
+ dev->name, medianame[dev->if_port],
+ tp->full_duplex ? "full" : "half", new_csr6);
+ }
+ }
+ }
+ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+}
+
+static void comet_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int next_tick = 60*HZ;
+
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
+ "%4.4x.\n",
+ dev->name, mdio_read(dev, tp->phys[0], 1),
+ mdio_read(dev, tp->phys[0], 5));
+ check_duplex(dev);
+ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+}
+
+static void tulip_tx_timeout(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ /* Do nothing -- the media monitor should handle this. */
+ int mii_bmsr = mdio_read(dev, tp->phys[0], 1);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_WARNING "%s: Transmit timeout using MII device,"
+ " status %4.4x.\n",
+ dev->name, mii_bmsr);
+ if ( ! (mii_bmsr & 0x0004)) { /* No link beat present */
+ dev->trans_start = jiffies;
+ netif_link_down(dev);
+ return;
+ }
+ } else switch (tp->chip_id) {
+ case DC21040:
+ if ( !tp->medialock && inl(ioaddr + CSR12) & 0x0002) {
+ dev->if_port = (dev->if_port == 2 ? 0 : 2);
+ printk(KERN_INFO "%s: transmit timed out, switching to "
+ "%s.\n",
+ dev->name, medianame[dev->if_port]);
+ select_media(dev, 0);
+ }
+ dev->trans_start = jiffies;
+ return; /* Note: not break! */
+ case DC21041: {
+ int csr12 = inl(ioaddr + CSR12);
+
+ printk(KERN_WARNING "%s: 21041 transmit timed out, status %8.8x, "
+ "CSR12 %8.8x, CSR13 %8.8x, CSR14 %8.8x, resetting...\n",
+ dev->name, (int)inl(ioaddr + CSR5), csr12,
+ (int)inl(ioaddr + CSR13), (int)inl(ioaddr + CSR14));
+ tp->mediasense = 1;
+ if ( ! tp->medialock) {
+ if (dev->if_port == 1 || dev->if_port == 2)
+ dev->if_port = (csr12 & 0x0004) ? 2 - dev->if_port : 0;
+ else
+ dev->if_port = 1;
+ select_media(dev, 0);
+ }
+ break;
+ }
+ case DC21142:
+ if (tp->nwayset) {
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, "
+ "SIA %8.8x %8.8x %8.8x %8.8x, restarting NWay .\n",
+ dev->name, (int)inl(ioaddr + CSR5),
+ (int)inl(ioaddr + CSR12), (int)inl(ioaddr + CSR13),
+ (int)inl(ioaddr + CSR14), (int)inl(ioaddr + CSR15));
+ nway_start(dev);
+ break;
+ }
+ /* Fall through. */
+ case DC21140: case MX98713: case COMPEX9881:
+ printk(KERN_WARNING "%s: %s transmit timed out, status %8.8x, "
+ "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
+ dev->name, tulip_tbl[tp->chip_id].chip_name,
+ (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR12),
+ (int)inl(ioaddr + CSR13), (int)inl(ioaddr + CSR14),
+ (int)inl(ioaddr + CSR15));
+ if ( ! tp->medialock && tp->mtable) {
+ do
+ --tp->cur_index;
+ while (tp->cur_index >= 0
+ && (media_cap[tp->mtable->mleaf[tp->cur_index].media]
+ & MediaIsFD));
+ if (tp->cur_index < 0) {
+ /* We start again, but should instead look for default. */
+ tp->cur_index = tp->mtable->leafcount - 1;
+ }
+ select_media(dev, 0);
+ printk(KERN_WARNING "%s: transmit timed out, switching to %s "
+ "media.\n", dev->name, medianame[dev->if_port]);
+ }
+ break;
+ case PNIC2:
+ printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
+ "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
+ dev->name, (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR6),
+ (int)inl(ioaddr + CSR7), (int)inl(ioaddr + CSR12));
+ break;
+ default:
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
+ "%8.8x, resetting...\n",
+ dev->name, (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR12));
+ }
+
+#if defined(way_too_many_messages) && defined(__i386__)
+ if (tp->msg_level & NETIF_MSG_TXERR) {
+ int i;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
+ int j;
+ printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
+ "%2.2x %2.2x %2.2x.\n",
+ i, (unsigned int)tp->rx_ring[i].status,
+ (unsigned int)tp->rx_ring[i].length,
+ (unsigned int)tp->rx_ring[i].buffer1,
+ (unsigned int)tp->rx_ring[i].buffer2,
+ buf[0], buf[1], buf[2]);
+ for (j = 0; buf[j] != 0xee && j < 1600; j++)
+ if (j < 100) printk(" %2.2x", buf[j]);
+ printk(" j=%d.\n", j);
+ }
+ printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
+ printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Stop and restart the Tx process.
+ The pwr_event approach of empty/init_rings() may be better... */
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+
+ dev->trans_start = jiffies;
+ tp->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void tulip_init_ring(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+
+ tp->rx_dead = tp->tx_full = 0;
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+
+ tp->rx_buf_sz = dev->mtu + 18;
+ if (tp->rx_buf_sz < PKT_BUF_SZ)
+ tp->rx_buf_sz = PKT_BUF_SZ;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ tp->rx_ring[i].status = 0x00000000;
+ tp->rx_ring[i].length = cpu_to_le32(tp->rx_buf_sz);
+ tp->rx_ring[i].buffer2 = virt_to_le32desc(&tp->rx_ring[i+1]);
+ tp->rx_skbuff[i] = NULL;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ tp->rx_ring[i-1].length |= cpu_to_le32(DESC_RING_WRAP);
+ tp->rx_ring[i-1].buffer2 = virt_to_le32desc(&tp->rx_ring[0]);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ /* Note the receive buffer must be longword aligned.
+ dev_alloc_skb() provides 16 byte alignment. But do *not*
+ use skb_reserve() to align the IP header! */
+ struct sk_buff *skb = dev_alloc_skb(tp->rx_buf_sz);
+ tp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[i].status = cpu_to_le32(DescOwned);
+ tp->rx_ring[i].buffer1 = virt_to_le32desc(skb->tail);
+ }
+ tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ tp->tx_skbuff[i] = 0;
+ tp->tx_ring[i].status = 0x00000000;
+ tp->tx_ring[i].buffer2 = virt_to_le32desc(&tp->tx_ring[i+1]);
+ }
+ tp->tx_ring[i-1].buffer2 = virt_to_le32desc(&tp->tx_ring[0]);
+}
+
+static int
+tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int entry, q_used_cnt;
+ u32 flag;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tulip_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the field
+ with the ownership bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % TX_RING_SIZE;
+ q_used_cnt = tp->cur_tx - tp->dirty_tx;
+
+ tp->tx_skbuff[entry] = skb;
+ tp->tx_ring[entry].buffer1 = virt_to_le32desc(skb->data);
+
+ if (q_used_cnt < TX_QUEUE_LEN/2) {/* Typical path */
+ flag = 0x60000000; /* No interrupt */
+ } else if (q_used_cnt == TX_QUEUE_LEN/2) {
+ flag = 0xe0000000; /* Tx-done intr. */
+ } else if (q_used_cnt < TX_QUEUE_LEN) {
+ flag = 0x60000000; /* No Tx-done intr. */
+ } else { /* Leave room for set_rx_mode() to fill entries. */
+ tp->tx_full = 1;
+ flag = 0xe0000000; /* Tx-done intr. */
+ }
+ if (entry == TX_RING_SIZE-1)
+ flag = 0xe0000000 | DESC_RING_WRAP;
+
+ tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ tp->cur_tx++;
+ if ( ! tp->tx_full)
+ netif_unpause_tx_queue(dev);
+ else {
+ netif_stop_tx_queue(dev);
+ /* Check for a just-cleared queue race.
+ Note that this code path differs from other drivers because we
+ set the tx_full flag early. */
+ if ( ! tp->tx_full)
+ netif_resume_tx_queue(dev);
+ }
+
+ dev->trans_start = jiffies;
+ /* Trigger an immediate transmit demand. */
+ outl(0, dev->base_addr + CSR1);
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr5, work_budget = tp->max_interrupt_work;
+
+ do {
+ csr5 = inl(ioaddr + CSR5);
+ if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
+ break;
+
+ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+ dev->name, csr5, (int)inl(dev->base_addr + CSR5));
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+
+ if (csr5 & (RxIntr | RxNoBuf))
+ work_budget -= tulip_rx(dev);
+
+ if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
+ unsigned int dirty_tx;
+
+ for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still has not been Txed */
+ /* Check for Rx filter setup frames. */
+ if (tp->tx_skbuff[entry] == NULL)
+ continue;
+
+ if (status & 0x8000) {
+ /* There was an major error, log it. */
+ if (tp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, status);
+ tp->stats.tx_errors++;
+ if (status & 0x4104) tp->stats.tx_aborted_errors++;
+ if (status & 0x0C00) tp->stats.tx_carrier_errors++;
+ if (status & 0x0200) tp->stats.tx_window_errors++;
+ if (status & 0x0002) tp->stats.tx_fifo_errors++;
+ if ((status & 0x0080) && tp->full_duplex == 0)
+ tp->stats.tx_heartbeat_errors++;
+#ifdef ETHER_STATS
+ if (status & 0x0100) tp->stats.collisions16++;
+#endif
+ } else {
+ if (tp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit complete, status "
+ "%8.8x.\n", dev->name, status);
+#ifdef ETHER_STATS
+ if (status & 0x0001) tp->stats.tx_deferred++;
+#endif
+#if LINUX_VERSION_CODE > 0x20127
+ tp->stats.tx_bytes += tp->tx_skbuff[entry]->len;
+#endif
+ tp->stats.collisions += (status >> 3) & 15;
+ tp->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dev_free_skb_irq(tp->tx_skbuff[entry]);
+ tp->tx_skbuff[entry] = 0;
+ }
+
+#ifndef final_version
+ if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (tp->tx_full && tp->cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, clear tbusy. */
+ tp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ tp->dirty_tx = dirty_tx;
+ }
+
+ if (tp->rx_dead) {
+ tulip_rx(dev);
+ if (tp->cur_rx - tp->dirty_rx < RX_RING_SIZE - 3) {
+ printk(KERN_ERR "%s: Restarted Rx at %d / %d.\n",
+ dev->name, tp->cur_rx, tp->dirty_rx);
+ outl(0, ioaddr + CSR2); /* Rx poll demand */
+ tp->rx_dead = 0;
+ }
+ }
+
+ /* Log errors. */
+ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
+ if (csr5 == 0xffffffff)
+ break;
+ if (csr5 & TxJabber) tp->stats.tx_errors++;
+ if (csr5 & PCIBusError) {
+ printk(KERN_ERR "%s: PCI Fatal Bus Error, %8.8x.\n",
+ dev->name, csr5);
+ }
+ if (csr5 & TxFIFOUnderflow) {
+ if ((tp->csr6 & 0xC000) != 0xC000)
+ tp->csr6 += 0x4000; /* Bump up the Tx threshold */
+ else
+ tp->csr6 |= 0x00200000; /* Store-n-forward. */
+ if (tp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_WARNING "%s: Tx threshold increased, "
+ "new CSR6 %x.\n", dev->name, tp->csr6);
+ }
+ if (csr5 & TxDied) {
+ /* This is normal when changing Tx modes. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_WARNING "%s: The transmitter stopped."
+ " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+ dev->name, csr5, (int)inl(ioaddr + CSR6), tp->csr6);
+ }
+ if (csr5 & (TxDied | TxFIFOUnderflow | PCIBusError)) {
+ /* Restart the transmit process. */
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ }
+ if (csr5 & (RxStopped | RxNoBuf)) {
+ /* Missed a Rx frame or mode change. */
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+ if (tp->flags & COMET_MAC_ADDR) {
+ outl(tp->mc_filter[0], ioaddr + 0xAC);
+ outl(tp->mc_filter[1], ioaddr + 0xB0);
+ }
+ tulip_rx(dev);
+ if (csr5 & RxNoBuf)
+ tp->rx_dead = 1;
+ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ }
+ if (csr5 & TimerInt) {
+ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
+ dev->name, csr5);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ }
+ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
+ if (tp->link_change)
+ (tp->link_change)(dev, csr5);
+ }
+ /* Clear all error sources, included undocumented ones! */
+ outl(0x0800f7ba, ioaddr + CSR5);
+ }
+ if (--work_budget < 0) {
+ if (tp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_WARNING "%s: Too much work during an interrupt, "
+ "csr5=0x%8.8x.\n", dev->name, csr5);
+ /* Acknowledge all interrupt sources. */
+ outl(0x8001ffff, ioaddr + CSR5);
+ if (tp->flags & HAS_INTR_MITIGATION) {
+ /* Josip Loncaric at ICASE did extensive experimentation
+ to develop a good interrupt mitigation setting.*/
+ outl(0x8b240000, ioaddr + CSR11);
+ } else {
+ /* Mask all interrupting sources, set timer to re-enable. */
+ outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt,
+ ioaddr + CSR7);
+ outl(0x0012, ioaddr + CSR11);
+ }
+ break;
+ }
+ } while (1);
+
+ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
+ dev->name, (int)inl(ioaddr + CSR5));
+
+ return;
+}
+
+static int tulip_rx(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int entry = tp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+ int work_done = 0;
+
+ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
+ tp->rx_ring[entry].status);
+ /* If we own the next entry, it is a new packet. Send it up. */
+ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+ s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+ dev->name, entry, status);
+ if (--rx_work_limit < 0)
+ break;
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (tp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_WARNING "%s: Oversized Ethernet frame "
+ "spanned multiple buffers, status %8.8x!\n",
+ dev->name, status);
+ tp->stats.rx_length_errors++;
+ }
+ } else if (status & RxDescFatalErr) {
+ /* There was a fatal error. */
+ if (tp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ tp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) tp->stats.rx_length_errors++;
+ if (status & 0x0004) tp->stats.rx_frame_errors++;
+ if (status & 0x0002) tp->stats.rx_crc_errors++;
+ if (status & 0x0001) tp->stats.rx_fifo_errors++;
+ }
+ } else {
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((status >> 16) & 0x7ff) - 4;
+ struct sk_buff *skb;
+
+#ifndef final_version
+ if (pkt_len > 1518) {
+ printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
+ dev->name, pkt_len, pkt_len);
+ pkt_len = 1518;
+ tp->stats.rx_length_errors++;
+ }
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < tp->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if (LINUX_VERSION_CODE >= 0x20100)
+ eth_copy_and_sum(skb, tp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), tp->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ work_done++;
+ } else { /* Pass up the skb already on the Rx ring. */
+ skb_put(skb = tp->rx_skbuff[entry], pkt_len);
+ tp->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ tp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ tp->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++tp->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
+ entry = tp->dirty_rx % RX_RING_SIZE;
+ if (tp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb;
+ skb = tp->rx_skbuff[entry] = dev_alloc_skb(tp->rx_buf_sz);
+ if (skb == NULL) {
+ if (tp->cur_rx - tp->dirty_rx == RX_RING_SIZE)
+ printk(KERN_ERR "%s: No kernel memory to allocate "
+ "receive buffers.\n", dev->name);
+ break;
+ }
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[entry].buffer1 = virt_to_le32desc(skb->tail);
+ work_done++;
+ }
+ tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+ }
+
+ return work_done;
+}
+
+static void empty_rings(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->rx_skbuff[i];
+ tp->rx_skbuff[i] = 0;
+ tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
+ tp->rx_ring[i].length = 0;
+ tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
+ if (skb) {
+#if LINUX_VERSION_CODE < 0x20100
+ skb->free = 1;
+#endif
+ dev_free_skb(skb);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (tp->tx_skbuff[i])
+ dev_free_skb(tp->tx_skbuff[i]);
+ tp->tx_skbuff[i] = 0;
+ }
+}
+
+static int tulip_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+
+ netif_stop_tx_queue(dev);
+
+ if (tp->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, (int)inl(ioaddr + CSR5));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x00000000, ioaddr + CSR7);
+ /* Stop the Tx and Rx processes. */
+ outl(inl(ioaddr + CSR6) & ~TxOn & ~RxOn, ioaddr + CSR6);
+ /* 21040 -- Leave the card in 10baseT state. */
+ if (tp->chip_id == DC21040)
+ outl(0x00000004, ioaddr + CSR13);
+
+ if (inl(ioaddr + CSR6) != 0xffffffff)
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+
+ del_timer(&tp->timer);
+
+ free_irq(dev->irq, dev);
+
+ dev->if_port = tp->saved_if_port;
+
+ empty_rings(dev);
+ /* Leave the driver in snooze, not sleep, mode. */
+ if (tp->flags & HAS_PWRDWN)
+ pci_write_config_dword(tp->pci_dev, 0x40, 0x40000000);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct net_device_stats *tulip_get_stats(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr8 = inl(ioaddr + CSR8);
+
+ if (netif_running(dev) && csr8 != 0xffffffff)
+ tp->stats.rx_missed_errors += (u16)csr8;
+
+ return &tp->stats;
+}
+
+#ifdef HAVE_PRIVATE_IOCTL
+/* Provide ioctl() calls to examine the MII xcvr state.
+ We emulate a MII management registers for chips without MII.
+ The two numeric constants are because some clueless person
+ changed value for the symbolic name.
+ */
+static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+ unsigned int phy = tp->phys[0];
+ unsigned int regnum = data[1];
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ if (tp->mii_cnt)
+ data[0] = phy;
+ else if (tp->flags & HAS_NWAY)
+ data[0] = 32;
+ else if (tp->chip_id == COMET)
+ data[0] = 1;
+ else
+ return -ENODEV;
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ if (data[0] == 32 && (tp->flags & HAS_NWAY)) {
+ int csr12 = inl(ioaddr + CSR12);
+ int csr14 = inl(ioaddr + CSR14);
+ switch (regnum) {
+ case 0:
+ if (((csr14<<5) & 0x1000) ||
+ (dev->if_port == 5 && tp->nwayset))
+ data[3] = 0x1000;
+ else
+ data[3] = (media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
+ | (media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
+ break;
+ case 1:
+ data[3] = 0x1848 + ((csr12&0x7000) == 0x5000 ? 0x20 : 0)
+ + ((csr12&0x06) == 6 ? 0 : 4);
+ if (tp->chip_id != DC21041)
+ data[3] |= 0x6048;
+ break;
+ case 4: {
+ /* Advertised value, bogus 10baseTx-FD value from CSR6. */
+ data[3] = ((inl(ioaddr + CSR6)>>3)&0x0040)+((csr14>>1)&0x20)+1;
+ if (tp->chip_id != DC21041)
+ data[3] |= ((csr14>>9)&0x03C0);
+ break;
+ }
+ case 5: data[3] = tp->lpar; break;
+ default: data[3] = 0; break;
+ }
+ } else {
+ data[3] = mdio_read(dev, data[0] & 0x1f, regnum);
+ }
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (regnum & ~0x1f)
+ return -EINVAL;
+ if (data[0] == phy) {
+ u16 value = data[2];
+ switch (regnum) {
+ case 0: /* Check for autonegotiation on or reset. */
+ tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (tp->full_duplex_lock)
+ tp->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: tp->mii_advertise = data[2]; break;
+ }
+ }
+ if (data[0] == 32 && (tp->flags & HAS_NWAY)) {
+ u16 value = data[2];
+ if (regnum == 0) {
+ if ((value & 0x1200) == 0x1200)
+ nway_start(dev);
+ } else if (regnum == 4)
+ tp->sym_advertise = value;
+ } else {
+ mdio_write(dev, data[0] & 0x1f, regnum, data[2]);
+ }
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = tp->msg_level;
+ data32[1] = tp->multicast_filter_limit;
+ data32[2] = tp->max_interrupt_work;
+ data32[3] = tp->rx_copybreak;
+ data32[4] = inl(ioaddr + CSR11);
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ tp->msg_level = data32[0];
+ tp->multicast_filter_limit = data32[1];
+ tp->max_interrupt_work = data32[2];
+ tp->rx_copybreak = data32[3];
+ if (tp->flags & HAS_INTR_MITIGATION) {
+ u32 *d = (u32 *)&rq->ifr_data;
+ outl(data32[4], ioaddr + CSR11);
+ printk(KERN_NOTICE "%s: Set interrupt mitigate paramters %8.8x.\n",
+ dev->name, d[0]);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+#endif /* HAVE_PRIVATE_IOCTL */
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling tp->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+/* The little-endian AUTODIN32 ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline u32 ether_crc_le(int length, unsigned char *data)
+{
+ u32 crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr6 = inl(ioaddr + CSR6) & ~0x00D5;
+
+ tp->csr6 &= ~0x00D5;
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ /* Unconditionally log net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ } else if ((dev->mc_count > tp->multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well -- accept all multicasts. */
+ tp->csr6 |= AcceptAllMulticast;
+ csr6 |= AcceptAllMulticast;
+ } else if (tp->flags & MC_HASH_ONLY) {
+ /* Some work-alikes have only a 64-entry hash filter table. */
+ /* Should verify correctness on big-endian/__powerpc__ */
+ struct dev_mc_list *mclist;
+ int i;
+ if (dev->mc_count > tp->multicast_filter_limit) {
+ tp->csr6 |= AcceptAllMulticast;
+ csr6 |= AcceptAllMulticast;
+ } else {
+ u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
+ int filterbit;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ if (tp->flags & COMET_MAC_ADDR)
+ filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ else
+ filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ filterbit &= 0x3f;
+ set_bit(filterbit, mc_filter);
+ if (tp->msg_level & NETIF_MSG_RXFILTER)
+ printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
+ "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name,
+ mclist->dmi_addr[0], mclist->dmi_addr[1],
+ mclist->dmi_addr[2], mclist->dmi_addr[3],
+ mclist->dmi_addr[4], mclist->dmi_addr[5],
+ ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ }
+ if (mc_filter[0] == tp->mc_filter[0] &&
+ mc_filter[1] == tp->mc_filter[1])
+ ; /* No change. */
+ else if (tp->flags & IS_ASIX) {
+ outl(2, ioaddr + CSR13);
+ outl(mc_filter[0], ioaddr + CSR14);
+ outl(3, ioaddr + CSR13);
+ outl(mc_filter[1], ioaddr + CSR14);
+ } else if (tp->flags & COMET_MAC_ADDR) {
+ outl(mc_filter[0], ioaddr + 0xAC);
+ outl(mc_filter[1], ioaddr + 0xB0);
+ }
+ tp->mc_filter[0] = mc_filter[0];
+ tp->mc_filter[1] = mc_filter[1];
+ }
+ } else {
+ u16 *eaddrs, *setup_frm = tp->setup_frame;
+ struct dev_mc_list *mclist;
+ u32 tx_flags = 0x08000000 | 192;
+ int i;
+
+ /* Note that only the low-address shortword of setup_frame is valid!
+ The values are doubled for big-endian architectures. */
+ if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+ u16 hash_table[32];
+ tx_flags = 0x08400000 | 192; /* Use hash filter. */
+ memset(hash_table, 0, sizeof(hash_table));
+ set_bit(255, hash_table); /* Broadcast entry */
+ /* This should work on big-endian machines as well. */
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff,
+ hash_table);
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
+ }
+ setup_frm = &tp->setup_frame[13*6];
+ } else {
+ /* We have <= 14 addresses so we can use the wonderful
+ 16 address perfect filtering of the Tulip. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ }
+ /* Fill the unused entries with the broadcast address. */
+ memset(setup_frm, 0xff, (15-i)*12);
+ setup_frm = &tp->setup_frame[15*6];
+ }
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+ /* Now add this frame to the Tx list. */
+ if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
+ /* Same setup recently queued, we need not add it. */
+ } else {
+ unsigned long flags;
+ unsigned int entry;
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+
+ if (entry != 0) {
+ /* Avoid a chip errata by prefixing a dummy entry. */
+ tp->tx_skbuff[entry] = 0;
+ tp->tx_ring[entry].length =
+ (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP):0;
+ tp->tx_ring[entry].buffer1 = 0;
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+ }
+
+ tp->tx_skbuff[entry] = 0;
+ /* Put the setup frame on the Tx list. */
+ if (entry == TX_RING_SIZE-1)
+ tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
+ tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
+ tp->tx_ring[entry].buffer1 = virt_to_le32desc(tp->setup_frame);
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
+ netif_stop_tx_queue(dev);
+ tp->tx_full = 1;
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
+ }
+ }
+ outl(csr6, ioaddr + CSR6);
+}
+
+
+static int tulip_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk("%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND: {
+ int csr6 = inl(ioaddr + CSR6);
+ /* Disable interrupts, stop the chip, gather stats. */
+ if (csr6 != 0xffffffff) {
+ int csr8 = inl(ioaddr + CSR8);
+ outl(0x00000000, ioaddr + CSR7);
+ outl(csr6 & ~TxOn & ~RxOn, ioaddr + CSR6);
+ tp->stats.rx_missed_errors += (unsigned short)csr8;
+ }
+ empty_rings(dev);
+ /* Put the 21143 into sleep mode. */
+ if (tp->flags & HAS_PWRDWN)
+ pci_write_config_dword(tp->pci_dev, 0x40,0x80000000);
+ break;
+ }
+ case DRV_RESUME:
+ if (tp->flags & HAS_PWRDWN)
+ pci_write_config_dword(tp->pci_dev, 0x40, 0x0000);
+ outl(tp->csr0, ioaddr + CSR0);
+ tulip_init_ring(dev);
+ outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
+ outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
+ if (tp->mii_cnt) {
+ dev->if_port = 11;
+ if (tp->mtable && tp->mtable->has_mii)
+ select_media(dev, 1);
+ tp->csr6 = 0x820E0000;
+ dev->if_port = 11;
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ } else if (! tp->medialock)
+ nway_start(dev);
+ else
+ select_media(dev, 1);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ outl(0, ioaddr + CSR2); /* Rx poll demand */
+ set_rx_mode(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ printk(KERN_ERR "%s: Tulip CardBus interface was detached while "
+ "still active.\n", dev->name);
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ if (tp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_DEBUG "%s: Unregistering device.\n", dev->name);
+ unregister_netdev(dev);
+#ifdef USE_IO_OPS
+ release_region(dev->base_addr, pci_id_tbl[tp->chip_id].io_size);
+#else
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (tp->priv_addr)
+ kfree(tp->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CARDBUS
+
+#include <pcmcia/driver_ops.h>
+
+static dev_node_t *tulip_attach(dev_locator_t *loc)
+{
+ struct net_device *dev;
+ long ioaddr;
+ struct pci_dev *pdev;
+ u8 bus, devfn, irq;
+ u32 dev_id;
+ u32 pciaddr;
+ int i, chip_id = 4; /* DC21143 */
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ printk(KERN_INFO "tulip_attach(bus %d, function %d)\n", bus, devfn);
+ pdev = pci_find_slot(bus, devfn);
+#ifdef USE_IO_OPS
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &pciaddr);
+ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+#else
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &pciaddr);
+ ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_id_tbl[DC21142].io_size);
+#endif
+ pci_read_config_dword(pdev, 0, &dev_id);
+ pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &irq);
+ if (ioaddr == 0 || irq == 0) {
+ printk(KERN_ERR "The Tulip CardBus Ethernet interface at %d/%d was "
+ "not assigned an %s.\n"
+ KERN_ERR " It will not be activated.\n",
+ bus, devfn, ioaddr == 0 ? "address" : "IRQ");
+ return NULL;
+ }
+ for (i = 0; pci_id_tbl[i].id.pci; i++) {
+ if (pci_id_tbl[i].id.pci == (dev_id & pci_id_tbl[i].id.pci_mask)) {
+ chip_id = i; break;
+ }
+ }
+ dev = tulip_probe1(pdev, NULL, ioaddr, irq, chip_id, 0);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+ node->major = node->minor = 0;
+ node->next = NULL;
+ MOD_INC_USE_COUNT;
+ return node;
+ }
+ return NULL;
+}
+
+static void tulip_suspend(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "tulip_suspend(%s)\n", node->dev_name);
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) {
+ tulip_pwr_event(*devp, DRV_SUSPEND);
+ break;
+ }
+ }
+}
+
+static void tulip_resume(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "tulip_resume(%s)\n", node->dev_name);
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) {
+ tulip_pwr_event(*devp, DRV_RESUME);
+ break;
+ }
+ }
+}
+
+static void tulip_detach(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "tulip_detach(%s)\n", node->dev_name);
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ struct tulip_private *tp = (struct tulip_private *)(*devp)->priv;
+ unregister_netdev(*devp);
+#ifdef USE_IO_OPS
+ release_region((*devp)->base_addr, pci_id_tbl[DC21142].io_size);
+#else
+ iounmap((char *)(*devp)->base_addr);
+#endif
+ kfree(*devp);
+ if (tp->priv_addr)
+ kfree(tp->priv_addr);
+ *devp = *next;
+ kfree(node);
+ MOD_DEC_USE_COUNT;
+ }
+}
+
+struct driver_operations tulip_ops = {
+ "tulip_cb", tulip_attach, tulip_suspend, tulip_resume, tulip_detach
+};
+
+#endif /* Cardbus support */
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+#ifdef CARDBUS
+ register_driver(&tulip_ops);
+ return 0;
+#else
+ return pci_drv_register(&tulip_drv_id, NULL);
+#endif
+ reverse_probe = 0; /* Not used. */
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&tulip_ops);
+#else
+ pci_drv_unregister(&tulip_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_tulip_dev) {
+ struct tulip_private *tp = (struct tulip_private*)root_tulip_dev->priv;
+ unregister_netdev(root_tulip_dev);
+#ifdef USE_IO_OPS
+ release_region(root_tulip_dev->base_addr,
+ pci_id_tbl[tp->chip_id].io_size);
+#else
+ iounmap((char *)root_tulip_dev->base_addr);
+#endif
+ next_dev = tp->next_module;
+ if (tp->priv_addr)
+ kfree(tp->priv_addr);
+ kfree(root_tulip_dev);
+ root_tulip_dev = next_dev;
+ }
+}
+#else
+int tulip_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&tulip_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+ reverse_probe = 0; /* Not used. */
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` tulip.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c tulip.c"
+ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c tulip.c -o tulip_cb.o -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/via-rhine.c b/linux/src/drivers/net/via-rhine.c
new file mode 100644
index 0000000..4d7fceb
--- /dev/null
+++ b/linux/src/drivers/net/via-rhine.c
@@ -0,0 +1,1427 @@
+/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
+/*
+ Written 1998-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is designed for the VIA VT86c100A Rhine-II PCI Fast Ethernet
+ controller. It also works with the older 3043 Rhine-I chip.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/via-rhine.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"via-rhine.c:v1.16 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/via-rhine.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: via_rhine_probe
+config-in: tristate 'VIA "Rhine" vt86c100, vt3043, and vt3065 series PCI Ethernet support' CONFIG_VIA_RHINE
+
+c-help-name: VIA Rhine series PCI Ethernet support
+c-help-symbol: CONFIG_VIA_RHINE
+c-help: This driver is for the VIA Rhine (v3043) and Rhine-II
+c-help: (vt3065 AKA vt86c100) network adapter chip series.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/via-rhine.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Rhine has a 64 element 8390-like hash table. */
+static const int multicast_filter_limit = 32;
+
+/* Operational parameters that are set at compile time. */
+
+/* Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed bus+endian portability operations. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+/* This driver was written to use PCI memory space, however most versions
+ of the Rhine only work correctly with I/O space accesses. */
+#if defined(VIA_USE_MEMORY)
+#warning Many adapters using the VIA Rhine chip are not configured to work
+#warning with PCI memory space accesses.
+#else
+#define USE_IO_OPS
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex "
+ "(deprecated, use options[] instead).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
+controller.
+
+II. Board-specific settings
+
+Boards with this chip are functional only in a bus-master PCI slot.
+
+Many operational settings are loaded from the EEPROM to the Config word at
+offset 0x78. This driver assumes that they are correct.
+If this driver is compiled to use PCI memory space operations the EEPROM
+must be configured to enable memory ops.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver attempts to use a zero-copy receive and transmit scheme.
+
+Alas, all data buffers are required to start on a 32 bit boundary, so
+the driver must often copy transmit packets into bounce buffers.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in the last phase of netdev_rx().
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+Since the VIA chips are only able to transfer data to buffers on 32 bit
+boundaries, the the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. Copying these unaligned buffers
+has the beneficial effect of 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+This driver was originally written using a preliminary VT86C100A manual
+from
+ http://www.via.com.tw/
+The usual background material was used:
+ http://www.scyld.com/expert/100mbps.html
+ http://scyld.com/expert/NWay.html
+
+Additional information is now available, especially for the newer chips.
+ http://www.via.com.tw/en/Networking/DS6105LOM100.pdf
+
+IVc. Errata
+
+The VT86C100A manual is not reliable information.
+The 3043 chip does not handle unaligned transmit or receive buffers,
+resulting in significant performance degradation for bounce buffer
+copies on transmit and unaligned IP headers on receive.
+The chip does not pad to minimum transmit length.
+
+There is a bug with the transmit descriptor pointer handling when the
+chip encounters a transmit error.
+
+*/
+
+
+
+static void *via_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int via_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags {
+ CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4, HasV1TxStat=8,
+ ReqTxAlign=0x10, HasWOL=0x20, HasIPChecksum=0x40, HasVLAN=0x80,
+
+};
+
+#if defined(VIA_USE_MEMORY)
+#define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
+#define RHINE_I_IOSIZE 128
+#define RHINEII_IOSIZE 4096
+#else
+#define RHINE_IOTYPE (PCI_USES_IO | PCI_USES_MASTER | PCI_ADDR0)
+#define RHINE_I_IOSIZE 128
+#define RHINEII_IOSIZE 256
+#endif
+
+static struct pci_id_info pci_tbl[] = {
+ { "VIA VT3043 Rhine", { 0x30431106, 0xffffffff,},
+ RHINE_IOTYPE, RHINE_I_IOSIZE, CanHaveMII | ReqTxAlign | HasV1TxStat },
+ { "VIA VT86C100A Rhine", { 0x61001106, 0xffffffff,},
+ RHINE_IOTYPE, RHINE_I_IOSIZE, CanHaveMII | ReqTxAlign | HasV1TxStat },
+ { "VIA VT6102 Rhine-II", { 0x30651106, 0xffffffff,},
+ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII | HasWOL },
+ { "VIA VT6105LOM Rhine-III (3106)", { 0x31061106, 0xffffffff,},
+ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII | HasWOL },
+ /* Duplicate entry, with 'M' features enabled. */
+ { "VIA VT6105M Rhine-III (3106)", { 0x31061106, 0xffffffff,},
+ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII|HasWOL|HasIPChecksum|HasVLAN},
+ { "VIA VT6105M Rhine-III (3053 prototype)", { 0x30531106, 0xffffffff,},
+ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII | HasWOL },
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info via_rhine_drv_id = {
+ "via-rhine", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_tbl,
+ via_probe1, via_pwr_event
+};
+
+/* Offsets to the device registers.
+*/
+enum register_offsets {
+ StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
+ IntrStatus=0x0C, IntrEnable=0x0E,
+ MulticastFilter0=0x10, MulticastFilter1=0x14,
+ RxRingPtr=0x18, TxRingPtr=0x1C,
+ MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+ MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
+ Config=0x78, ConfigA=0x7A, RxMissed=0x7C, RxCRCErrs=0x7E,
+ StickyHW=0x83, WOLcrClr=0xA4, WOLcgClr=0xA7, PwrcsrClr=0xAC,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
+ IntrTxDone=0x0002, IntrTxAbort=0x0008, IntrTxUnderrun=0x0010,
+ IntrPCIErr=0x0040,
+ IntrStatsMax=0x0080, IntrRxEarly=0x0100, IntrMIIChange=0x0200,
+ IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
+ IntrTxAborted=0x2000, IntrLinkChange=0x4000,
+ IntrRxWakeUp=0x8000,
+ IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct rx_desc {
+ s32 rx_status;
+ u32 desc_length;
+ u32 addr;
+ u32 next_desc;
+};
+struct tx_desc {
+ s32 tx_status;
+ u32 desc_length;
+ u32 addr;
+ u32 next_desc;
+};
+
+/* Bits in *_desc.status */
+enum rx_status_bits {
+ RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F};
+enum desc_status_bits {
+ DescOwn=0x80000000, DescEndPacket=0x4000, DescIntr=0x1000,
+};
+
+/* Bits in rx.desc_length for extended status. */
+enum rx_info_bits {
+ RxTypeTag=0x00010000,
+ RxTypeUDP=0x00020000, RxTypeTCP=0x00040000, RxTypeIP=0x00080000,
+ RxTypeUTChksumOK=0x00100000, RxTypeIPChksumOK=0x00200000,
+ /* Summarized. */
+ RxTypeCsumMask=0x003E0000,
+ RxTypeUDPSumOK=0x003A0000, RxTypeTCPSumOK=0x003C0000,
+};
+
+/* Bits in ChipCmd. */
+enum chip_cmd_bits {
+ CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
+ CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
+ CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
+ CmdNoTxPoll=0x0800, CmdReset=0x8000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct rx_desc rx_ring[RX_RING_SIZE];
+ struct tx_desc tx_ring[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ unsigned char *tx_buf[TX_RING_SIZE]; /* Tx bounce buffers */
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ int msg_level;
+ int max_interrupt_work;
+ int intr_enable;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+
+ struct rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int cur_tx, dirty_tx;
+ u16 chip_cmd; /* Current setting for ChipCmd */
+ int multicast_filter_limit;
+ u32 mc_filter[2];
+ int rx_mode;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ u8 tx_thresh, rx_thresh;
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int via_rhine_probe(struct net_device *dev)
+{
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&via_rhine_drv_id, dev);
+}
+#endif
+
+static void *via_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_tbl[chip_idx].name, ioaddr);
+
+ /* We would prefer to directly read the EEPROM but access may be locked. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
+ if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
+ /* Reload the station address from the EEPROM. */
+ writeb(0x20, ioaddr + MACRegEEcsr);
+ /* Typically 2 cycles to reload. */
+ for (i = 0; i < 150; i++)
+ if (! (readb(ioaddr + MACRegEEcsr) & 0x20))
+ break;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
+ if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
+ printk(" (MISSING EEPROM ADDRESS)");
+ /* Fill a temp addr with the "locally administered" bit set. */
+ memcpy(dev->dev_addr, ">Linux", 6);
+ }
+ }
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Make certain the descriptor lists are cache-aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+#ifdef USE_IO_OPS
+ request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
+#endif
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writew(CmdReset, ioaddr + ChipCmd);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 15;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ np->phys[0] = 1; /* Standard for this chip. */
+ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x Link %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising,
+ mdio_read(dev, phy, 5));
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+
+ /* Allow forcing the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ if (np->mii_cnt)
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ return dev;
+}
+
+
+/* Read and write over the MII Management Data I/O (MDIO) interface. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int regnum)
+{
+ long ioaddr = dev->base_addr;
+ int boguscnt = 1024;
+
+ /* Wait for a previous command to complete. */
+ while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+ ;
+ writeb(0x00, ioaddr + MIICmd);
+ writeb(phy_id, ioaddr + MIIPhyAddr);
+ writeb(regnum, ioaddr + MIIRegAddr);
+ writeb(0x40, ioaddr + MIICmd); /* Trigger read */
+ boguscnt = 1024;
+ while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
+ ;
+ return readw(ioaddr + MIIData);
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int boguscnt = 1024;
+
+ if (phy_id == np->phys[0]) {
+ switch (regnum) {
+ case 0: /* Is user forcing speed/duplex? */
+ if (value & 0x9000) /* Autonegotiation. */
+ np->duplex_lock = 0;
+ else
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ }
+ /* Wait for a previous command to complete. */
+ while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+ ;
+ writeb(0x00, ioaddr + MIICmd);
+ writeb(phy_id, ioaddr + MIIPhyAddr);
+ writeb(regnum, ioaddr + MIIRegAddr);
+ writew(value, ioaddr + MIIData);
+ writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Reset the chip. */
+ writew(CmdReset, ioaddr + ChipCmd);
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers. */
+ writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
+ /* Configure the FIFO thresholds. */
+ writeb(0x20, ioaddr + TxConfig); /* Initial threshold 32 bytes */
+ np->tx_thresh = 0x20;
+ np->rx_thresh = 0x60; /* Written in set_rx_mode(). */
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ np->intr_enable = IntrRxDone | IntrRxErr | IntrRxEmpty |
+ IntrRxOverflow| IntrRxDropped| IntrTxDone | IntrTxAbort |
+ IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange |
+ IntrMIIChange;
+ /* Enable interrupts by setting the interrupt mask. */
+ writew(np->intr_enable, ioaddr + IntrEnable);
+
+ np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
+ if (np->duplex_lock)
+ np->chip_cmd |= CmdFDuplex;
+ writew(np->chip_cmd, ioaddr + ChipCmd);
+
+ check_duplex(dev);
+ /* The LED outputs of various MII xcvrs should be configured. */
+ /* For NS or Mison phys, turn on bit 1 in register 0x17 */
+ /* For ESI phys, turn on bit 7 in register 0x17. */
+ mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
+ (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status %4.4x "
+ "MII status: %4.4x.\n",
+ dev->name, readw(ioaddr + ChipCmd),
+ mdio_read(dev, np->phys[0], 1));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 2;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
+ int duplex;
+
+ if (np->duplex_lock || mii_reg5 == 0xffff)
+ return;
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+ " partner capability of %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], mii_reg5);
+ if (duplex)
+ np->chip_cmd |= CmdFDuplex;
+ else
+ np->chip_cmd &= ~CmdFDuplex;
+ writew(np->chip_cmd, ioaddr + ChipCmd);
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
+ dev->name, readw(ioaddr + IntrStatus));
+ }
+ if (netif_queue_paused(dev)
+ && np->cur_tx - np->dirty_tx > 1
+ && jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+
+ check_duplex(dev);
+
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+ "%4.4x, resetting...\n",
+ dev->name, readw(ioaddr + IntrStatus),
+ mdio_read(dev, np->phys[0], 1));
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Restart the chip's Tx processes . */
+ writel(virt_to_bus(np->tx_ring + (np->dirty_tx % TX_RING_SIZE)),
+ ioaddr + TxRingPtr);
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ /* Use 1518/+18 if the CRC is transferred. */
+ np->rx_buf_sz = dev->mtu + 14;
+ if (np->rx_buf_sz < PKT_BUF_SZ)
+ np->rx_buf_sz = PKT_BUF_SZ;
+ np->rx_head_desc = &np->rx_ring[0];
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rx_status = 0;
+ np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
+ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[i].addr = virt_to_le32desc(skb->tail);
+ np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].tx_status = 0;
+ np->tx_ring[i].desc_length = cpu_to_le32(0x00e08000);
+ np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
+ np->tx_buf[i] = 0; /* Allocated as/if needed. */
+ }
+ np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
+
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the descriptor word
+ with the "ownership" bit last. No SMP locking is needed if the
+ cur_tx is incremented after the descriptor is consistent. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ if ((np->drv_flags & ReqTxAlign) && ((long)skb->data & 3)) {
+ /* Must use alignment buffer. */
+ if (np->tx_buf[entry] == NULL &&
+ (np->tx_buf[entry] = kmalloc(PKT_BUF_SZ, GFP_KERNEL)) == NULL)
+ return 1;
+ memcpy(np->tx_buf[entry], skb->data, skb->len);
+ np->tx_ring[entry].addr = virt_to_le32desc(np->tx_buf[entry]);
+ } else
+ np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
+ /* Explicitly flush packet data cache lines here. */
+
+ np->tx_ring[entry].desc_length =
+ cpu_to_le32(0x00E08000 | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+ np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+
+ np->cur_tx++;
+
+ /* Explicitly flush descriptor cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_QUEUE_LEN - 2) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int boguscnt = np->max_interrupt_work;
+
+ do {
+ u32 intr_status = readw(ioaddr + IntrStatus);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ writew(intr_status & 0xffff, ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
+ IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
+ netdev_rx(dev);
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ int txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
+ if (txstatus & DescOwn)
+ break;
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG " Tx scavenge %d status %4.4x.\n",
+ entry, txstatus);
+ if (txstatus & 0x8000) {
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
+ dev->name, txstatus);
+ np->stats.tx_errors++;
+ if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
+ if (txstatus & 0x0200) np->stats.tx_window_errors++;
+ if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
+ if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
+ if (txstatus & 0x0002) np->stats.tx_fifo_errors++;
+#ifdef ETHER_STATS
+ if (txstatus & 0x0100) np->stats.collisions16++;
+#endif
+ /* Transmitter restarted in 'abnormal' handler. */
+ } else {
+#ifdef ETHER_STATS
+ if (txstatus & 0x0001) np->stats.tx_deferred++;
+#endif
+ if (np->drv_flags & HasV1TxStat)
+ np->stats.collisions += (txstatus >> 3) & 15;
+ else
+ np->stats.collisions += txstatus & 15;
+#if defined(NETSTATS_VER2)
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+ np->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ /* Note the 4 slot hysteresis in mark the queue non-full. */
+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | IntrLinkChange | IntrMIIChange |
+ IntrStatsMax | IntrTxAbort | IntrTxUnderrun))
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readw(ioaddr + IntrStatus));
+
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %8.8x.\n",
+ entry, np->rx_head_desc->rx_status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
+ struct rx_desc *desc = np->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->rx_status);
+ int data_size = desc_status >> 16;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status is %4.4x.\n",
+ desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ( (desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
+ if ((desc_status & RxWholePkt) != RxWholePkt) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %4.4x!\n",
+ dev->name, np->cur_rx, data_size, desc_status);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
+ dev->name, np->rx_head_desc,
+ &np->rx_ring[np->cur_rx % RX_RING_SIZE]);
+ np->stats.rx_length_errors++;
+ } else if (desc_status & RxErr) {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & 0x0030) np->stats.rx_length_errors++;
+ if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
+ if (desc_status & 0x0004) np->stats.rx_frame_errors++;
+ if (desc_status & 0x0002) np->stats.rx_crc_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+ /* Length should omit the CRC */
+ int pkt_len = data_size - 4;
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ { /* Use hardware checksum info. */
+ int rxtype = le32_to_cpu(desc->desc_length);
+ int csum_bits = rxtype & RxTypeCsumMask;
+ if (csum_bits == RxTypeUDPSumOK ||
+ csum_bits == RxTypeTCPSumOK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+#if defined(NETSTATS_VER2)
+ np->stats.rx_bytes += pkt_len;
+#endif
+ np->stats.rx_packets++;
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
+ }
+ np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
+ }
+
+ /* Pre-emptively restart Rx engine. */
+ writew(CmdRxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (intr_status & (IntrMIIChange | IntrLinkChange)) {
+ if (readb(ioaddr + MIIStatus) & 0x02) {
+ /* Link failed, restart autonegotiation. */
+ if (np->drv_flags & HasDavicomPhy)
+ mdio_write(dev, np->phys[0], 0, 0x3300);
+ netif_link_down(dev);
+ } else {
+ netif_link_up(dev);
+ check_duplex(dev);
+ }
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_ERR "%s: MII status changed: Autonegotiation "
+ "advertising %4.4x partner %4.4x.\n", dev->name,
+ mdio_read(dev, np->phys[0], 4),
+ mdio_read(dev, np->phys[0], 5));
+ }
+ if (intr_status & IntrStatsMax) {
+ np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
+ writel(0, ioaddr + RxMissed);
+ }
+ if (intr_status & IntrTxAbort) {
+ /* Stats counted in Tx-done handler, just restart Tx. */
+ writel(virt_to_bus(&np->tx_ring[np->dirty_tx % TX_RING_SIZE]),
+ ioaddr + TxRingPtr);
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ if (np->tx_thresh < 0xE0)
+ writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_INFO "%s: Transmitter underrun, increasing Tx "
+ "threshold setting to %2.2x.\n", dev->name, np->tx_thresh);
+ }
+ if ((intr_status & ~(IntrLinkChange | IntrMIIChange | IntrStatsMax |
+ IntrTxAbort|IntrTxAborted | IntrNormalSummary))
+ && (np->msg_level & NETIF_MSG_DRV)) {
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Recovery for other fault sources not known. */
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Nominally we should lock this segment of code for SMP, although
+ the vulnerability window is very small and statistics are
+ non-critical. */
+ np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
+ writel(0, ioaddr + RxMissed);
+
+ return &np->stats;
+}
+
+/* The big-endian AUTODIN II ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = 0x1C;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ writel(0xffffffff, ioaddr + MulticastFilter0);
+ writel(0xffffffff, ioaddr + MulticastFilter1);
+ rx_mode = 0x0C;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26,
+ mc_filter);
+ }
+ writel(mc_filter[0], ioaddr + MulticastFilter0);
+ writel(mc_filter[1], ioaddr + MulticastFilter1);
+ rx_mode = 0x0C;
+ }
+ writeb(np->rx_thresh | rx_mode, ioaddr + RxConfig);
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* Note: forced media tracking is done in mdio_write(). */
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, readw(ioaddr + ChipCmd));
+
+ /* Switch to loopback mode to avoid hardware races. */
+ writeb(np->tx_thresh | 0x01, ioaddr + TxConfig);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writew(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ np->chip_cmd = CmdStop;
+ writew(CmdStop, ioaddr + ChipCmd);
+
+ del_timer(&np->timer);
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rx_status = 0;
+ np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ if (np->tx_buf[i]) {
+ kfree(np->tx_buf[i]);
+ np->tx_buf[i] = 0;
+ }
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int via_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, stop Tx and Rx. */
+ writew(0x0000, ioaddr + IntrEnable);
+ /* Stop the chip's Tx and Rx processes. */
+ writew(CmdStop, ioaddr + ChipCmd);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the actions are very chip specific. */
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+ writew(np->chip_cmd, ioaddr + ChipCmd);
+ writew(np->intr_enable, ioaddr + IntrEnable);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&via_rhine_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&via_rhine_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+#ifdef USE_IO_OPS
+ release_region(root_net_dev->base_addr, pci_tbl[np->chip_id].io_size);
+#else
+ iounmap((char *)(root_net_dev->base_addr));
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` via-rhine.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c via-rhine.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c via-rhine.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/wavelan.c b/linux/src/drivers/net/wavelan.c
new file mode 100644
index 0000000..dbe8815
--- /dev/null
+++ b/linux/src/drivers/net/wavelan.c
@@ -0,0 +1,4373 @@
+/*
+ * WaveLAN ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ * Original copyright follows (also see the end of this file).
+ * See wavelan.p.h for details.
+ */
+
+/*
+ * AT&T GIS (nee NCR) WaveLAN card:
+ * An Ethernet-like radio transceiver
+ * controlled by an Intel 82586 coprocessor.
+ */
+
+#include "wavelan.p.h" /* Private header */
+
+/************************* MISC SUBROUTINES **************************/
+/*
+ * Subroutines which won't fit in one of the following category
+ * (WaveLAN modem or i82586)
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Wrapper for disabling interrupts.
+ */
+static inline unsigned long
+wv_splhi(void)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ return(flags);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wrapper for re-enabling interrupts.
+ */
+static inline void
+wv_splx(unsigned long flags)
+{
+ restore_flags(flags);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Translate irq number to PSA irq parameter
+ */
+static u_char
+wv_irq_to_psa(int irq)
+{
+ if(irq < 0 || irq >= NELS(irqvals))
+ return 0;
+
+ return irqvals[irq];
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Translate PSA irq parameter to irq number
+ */
+static int
+wv_psa_to_irq(u_char irqval)
+{
+ int irq;
+
+ for(irq = 0; irq < NELS(irqvals); irq++)
+ if(irqvals[irq] == irqval)
+ return irq;
+
+ return -1;
+}
+
+#ifdef STRUCT_CHECK
+/*------------------------------------------------------------------*/
+/*
+ * Sanity routine to verify the sizes of the various WaveLAN interface
+ * structures.
+ */
+static char *
+wv_struct_check(void)
+{
+#define SC(t,s,n) if (sizeof(t) != s) return(n);
+
+ SC(psa_t, PSA_SIZE, "psa_t");
+ SC(mmw_t, MMW_SIZE, "mmw_t");
+ SC(mmr_t, MMR_SIZE, "mmr_t");
+ SC(ha_t, HA_SIZE, "ha_t");
+
+#undef SC
+
+ return((char *) NULL);
+} /* wv_struct_check */
+#endif /* STRUCT_CHECK */
+
+/********************* HOST ADAPTER SUBROUTINES *********************/
+/*
+ * Useful subroutines to manage the WaveLAN ISA interface
+ *
+ * One major difference with the PCMCIA hardware (except the port mapping)
+ * is that we have to keep the state of the Host Control Register
+ * because of the interrupt enable & bus size flags.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read from card's Host Adaptor Status Register.
+ */
+static inline u_short
+hasr_read(u_long ioaddr)
+{
+ return(inw(HASR(ioaddr)));
+} /* hasr_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write to card's Host Adapter Command Register.
+ */
+static inline void
+hacr_write(u_long ioaddr,
+ u_short hacr)
+{
+ outw(hacr, HACR(ioaddr));
+} /* hacr_write */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write to card's Host Adapter Command Register. Include a delay for
+ * those times when it is needed.
+ */
+static inline void
+hacr_write_slow(u_long ioaddr,
+ u_short hacr)
+{
+ hacr_write(ioaddr, hacr);
+ /* delay might only be needed sometimes */
+ udelay(1000L);
+} /* hacr_write_slow */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set the channel attention bit.
+ */
+static inline void
+set_chan_attn(u_long ioaddr,
+ u_short hacr)
+{
+ hacr_write(ioaddr, hacr | HACR_CA);
+} /* set_chan_attn */
+
+/*------------------------------------------------------------------*/
+/*
+ * Reset, and then set host adaptor into default mode.
+ */
+static inline void
+wv_hacr_reset(u_long ioaddr)
+{
+ hacr_write_slow(ioaddr, HACR_RESET);
+ hacr_write(ioaddr, HACR_DEFAULT);
+} /* wv_hacr_reset */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set the i/o transfer over the ISA bus to 8 bits mode
+ */
+static inline void
+wv_16_off(u_long ioaddr,
+ u_short hacr)
+{
+ hacr &= ~HACR_16BITS;
+ hacr_write(ioaddr, hacr);
+} /* wv_16_off */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set the i/o transfer over the ISA bus to 8 bits mode
+ */
+static inline void
+wv_16_on(u_long ioaddr,
+ u_short hacr)
+{
+ hacr |= HACR_16BITS;
+ hacr_write(ioaddr, hacr);
+} /* wv_16_on */
+
+/*------------------------------------------------------------------*/
+/*
+ * Disable interrupts on the WaveLAN hardware
+ */
+static inline void
+wv_ints_off(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_long x;
+
+ x = wv_splhi();
+
+ lp->hacr &= ~HACR_INTRON;
+ hacr_write(ioaddr, lp->hacr);
+
+ wv_splx(x);
+} /* wv_ints_off */
+
+/*------------------------------------------------------------------*/
+/*
+ * Enable interrupts on the WaveLAN hardware
+ */
+static inline void
+wv_ints_on(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_long x;
+
+ x = wv_splhi();
+
+ lp->hacr |= HACR_INTRON;
+ hacr_write(ioaddr, lp->hacr);
+
+ wv_splx(x);
+} /* wv_ints_on */
+
+/******************* MODEM MANAGEMENT SUBROUTINES *******************/
+/*
+ * Useful subroutines to manage the modem of the WaveLAN
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read the Parameter Storage Area from the WaveLAN card's memory
+ */
+/*
+ * Read bytes from the PSA.
+ */
+static void
+psa_read(u_long ioaddr,
+ u_short hacr,
+ int o, /* offset in PSA */
+ u_char * b, /* buffer to fill */
+ int n) /* size to read */
+{
+ wv_16_off(ioaddr, hacr);
+
+ while(n-- > 0)
+ {
+ outw(o, PIOR2(ioaddr));
+ o++;
+ *b++ = inb(PIOP2(ioaddr));
+ }
+
+ wv_16_on(ioaddr, hacr);
+} /* psa_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write the Paramter Storage Area to the WaveLAN card's memory
+ */
+static void
+psa_write(u_long ioaddr,
+ u_short hacr,
+ int o, /* Offset in psa */
+ u_char * b, /* Buffer in memory */
+ int n) /* Length of buffer */
+{
+ int count = 0;
+
+ wv_16_off(ioaddr, hacr);
+
+ while(n-- > 0)
+ {
+ outw(o, PIOR2(ioaddr));
+ o++;
+
+ outb(*b, PIOP2(ioaddr));
+ b++;
+
+ /* Wait for the memory to finish its write cycle */
+ count = 0;
+ while((count++ < 100) &&
+ (hasr_read(ioaddr) & HASR_PSA_BUSY))
+ udelay(1000);
+ }
+
+ wv_16_on(ioaddr, hacr);
+} /* psa_write */
+
+#ifdef PSA_CRC
+/*------------------------------------------------------------------*/
+/*
+ * Calculate the PSA CRC (not tested yet)
+ * As the WaveLAN drivers don't use the CRC, I won't use it either.
+ * Thanks to Valster, Nico <NVALSTER@wcnd.nl.lucent.com> for the code
+ * NOTE: By specifying a length including the CRC position the
+ * returned value should be zero. (i.e. a correct checksum in the PSA)
+ */
+static u_short
+psa_crc(u_short * psa, /* The PSA */
+ int size) /* Number of short for CRC */
+{
+ int byte_cnt; /* Loop on the PSA */
+ u_short crc_bytes = 0; /* Data in the PSA */
+ int bit_cnt; /* Loop on the bits of the short */
+
+ for(byte_cnt = 0; byte_cnt <= size; byte_cnt++ )
+ {
+ crc_bytes ^= psa[byte_cnt]; /* Its an xor */
+
+ for(bit_cnt = 1; bit_cnt < 9; bit_cnt++ )
+ {
+ if(crc_bytes & 0x0001)
+ crc_bytes = (crc_bytes >> 1) ^ 0xA001;
+ else
+ crc_bytes >>= 1 ;
+ }
+ }
+
+ return crc_bytes;
+} /* psa_crc */
+#endif /* PSA_CRC */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write 1 byte to the MMC.
+ */
+static inline void
+mmc_out(u_long ioaddr,
+ u_short o,
+ u_char d)
+{
+ /* Wait for MMC to go idle */
+ while(inw(HASR(ioaddr)) & HASR_MMC_BUSY)
+ ;
+
+ outw((u_short) (((u_short) d << 8) | (o << 1) | 1),
+ MMCR(ioaddr));
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to write bytes to the Modem Management Controller.
+ * We start by the end because it is the way it should be !
+ */
+static inline void
+mmc_write(u_long ioaddr,
+ u_char o,
+ u_char * b,
+ int n)
+{
+ o += n;
+ b += n;
+
+ while(n-- > 0 )
+ mmc_out(ioaddr, --o, *(--b));
+} /* mmc_write */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read 1 byte from the MMC.
+ * Optimised version for 1 byte, avoid using memory...
+ */
+static inline u_char
+mmc_in(u_long ioaddr,
+ u_short o)
+{
+ while(inw(HASR(ioaddr)) & HASR_MMC_BUSY)
+ ;
+ outw(o << 1, MMCR(ioaddr));
+
+ while(inw(HASR(ioaddr)) & HASR_MMC_BUSY)
+ ;
+ return (u_char) (inw(MMCR(ioaddr)) >> 8);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to read bytes from the Modem Management Controller.
+ * The implementation is complicated by a lack of address lines,
+ * which prevents decoding of the low-order bit.
+ * (code has just been moved in the above function)
+ * We start by the end because it is the way it should be !
+ */
+static inline void
+mmc_read(u_long ioaddr,
+ u_char o,
+ u_char * b,
+ int n)
+{
+ o += n;
+ b += n;
+
+ while(n-- > 0)
+ *(--b) = mmc_in(ioaddr, --o);
+} /* mmc_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Get the type of encryption available...
+ */
+static inline int
+mmc_encr(u_long ioaddr) /* i/o port of the card */
+{
+ int temp;
+
+ temp = mmc_in(ioaddr, mmroff(0, mmr_des_avail));
+ if((temp != MMR_DES_AVAIL_DES) && (temp != MMR_DES_AVAIL_AES))
+ return 0;
+ else
+ return temp;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wait for the frequency EEPROM to complete a command...
+ * I hope this one will be optimally inlined...
+ */
+static inline void
+fee_wait(u_long ioaddr, /* i/o port of the card */
+ int delay, /* Base delay to wait for */
+ int number) /* Number of time to wait */
+{
+ int count = 0; /* Wait only a limited time */
+
+ while((count++ < number) &&
+ (mmc_in(ioaddr, mmroff(0, mmr_fee_status)) & MMR_FEE_STATUS_BUSY))
+ udelay(delay);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Read bytes from the frequency EEPROM (frequency select cards).
+ */
+static void
+fee_read(u_long ioaddr, /* i/o port of the card */
+ u_short o, /* destination offset */
+ u_short * b, /* data buffer */
+ int n) /* number of registers */
+{
+ b += n; /* Position at the end of the area */
+
+ /* Write the address */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n - 1);
+
+ /* Loop on all buffer */
+ while(n-- > 0)
+ {
+ /* Write the read command */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_READ);
+
+ /* Wait until EEPROM is ready (should be quick!) */
+ fee_wait(ioaddr, 10, 100);
+
+ /* Read the value */
+ *--b = ((mmc_in(ioaddr, mmroff(0, mmr_fee_data_h)) << 8) |
+ mmc_in(ioaddr, mmroff(0, mmr_fee_data_l)));
+ }
+}
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write bytes from the Frequency EEPROM (frequency select cards).
+ * This is a bit complicated, because the frequency EEPROM has to
+ * be unprotected and the write enabled.
+ * Jean II
+ */
+static void
+fee_write(u_long ioaddr, /* i/o port of the card */
+ u_short o, /* destination offset */
+ u_short * b, /* data buffer */
+ int n) /* number of registers */
+{
+ b += n; /* Position at the end of the area */
+
+#ifdef EEPROM_IS_PROTECTED /* disabled */
+#ifdef DOESNT_SEEM_TO_WORK /* disabled */
+ /* Ask to read the protected register */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRREAD);
+
+ fee_wait(ioaddr, 10, 100);
+
+ /* Read the protected register */
+ printk("Protected 2 : %02X-%02X\n",
+ mmc_in(ioaddr, mmroff(0, mmr_fee_data_h)),
+ mmc_in(ioaddr, mmroff(0, mmr_fee_data_l)));
+#endif /* DOESNT_SEEM_TO_WORK */
+
+ /* Enable protected register */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PREN);
+
+ fee_wait(ioaddr, 10, 100);
+
+ /* Unprotect area */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
+#ifdef DOESNT_SEEM_TO_WORK /* disabled */
+ /* Or use : */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRCLEAR);
+#endif /* DOESNT_SEEM_TO_WORK */
+
+ fee_wait(ioaddr, 10, 100);
+#endif /* EEPROM_IS_PROTECTED */
+
+ /* Write enable */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WREN);
+
+ fee_wait(ioaddr, 10, 100);
+
+ /* Write the EEPROM address */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n - 1);
+
+ /* Loop on all buffer */
+ while(n-- > 0)
+ {
+ /* Write the value */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_data_h), (*--b) >> 8);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_data_l), *b & 0xFF);
+
+ /* Write the write command */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WRITE);
+
+ /* Wavelan doc says : wait at least 10 ms for EEBUSY = 0 */
+ udelay(10000);
+ fee_wait(ioaddr, 10, 100);
+ }
+
+ /* Write disable */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_DS);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WDS);
+
+ fee_wait(ioaddr, 10, 100);
+
+#ifdef EEPROM_IS_PROTECTED /* disabled */
+ /* Reprotect EEPROM */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x00);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
+
+ fee_wait(ioaddr, 10, 100);
+#endif /* EEPROM_IS_PROTECTED */
+}
+#endif /* WIRELESS_EXT */
+
+/************************ I82586 SUBROUTINES *************************/
+/*
+ * Usefull subroutines to manage the Ethernet controler
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read bytes from the on-board RAM.
+ * Why inlining this function make it fail ???
+ */
+static /*inline*/ void
+obram_read(u_long ioaddr,
+ u_short o,
+ u_char * b,
+ int n)
+{
+ outw(o, PIOR1(ioaddr));
+ insw(PIOP1(ioaddr), (unsigned short *) b, (n + 1) >> 1);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Write bytes to the on-board RAM.
+ */
+static inline void
+obram_write(u_long ioaddr,
+ u_short o,
+ u_char * b,
+ int n)
+{
+ outw(o, PIOR1(ioaddr));
+ outsw(PIOP1(ioaddr), (unsigned short *) b, (n + 1) >> 1);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Acknowledge the reading of the status issued by the i82586
+ */
+static void
+wv_ack(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_short scb_cs;
+ int i;
+
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ scb_cs &= SCB_ST_INT;
+
+ if(scb_cs == 0)
+ return;
+
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for(i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&scb_cs, sizeof(scb_cs));
+ if(scb_cs == 0)
+ break;
+
+ udelay(10);
+ }
+ udelay(100);
+
+#ifdef DEBUG_CONFIG_ERROR
+ if(i <= 0)
+ printk(KERN_INFO "%s: wv_ack(): board not accepting command.\n",
+ dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Set channel attention bit and busy wait until command has
+ * completed, then acknowledge the command completion.
+ */
+static inline int
+wv_synchronous_cmd(device * dev,
+ const char * str)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_short scb_cmd;
+ ach_t cb;
+ int i;
+
+ scb_cmd = SCB_CMD_CUC & SCB_CMD_CUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cmd, sizeof(scb_cmd));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, OFFSET_CU, (unsigned char *)&cb, sizeof(cb));
+ if (cb.ac_status & AC_SFLD_C)
+ break;
+
+ udelay(10);
+ }
+ udelay(100);
+
+ if(i <= 0 || !(cb.ac_status & AC_SFLD_OK))
+ {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO "%s: %s failed; status = 0x%x\n",
+ dev->name, str, cb.ac_status);
+#endif
+#ifdef DEBUG_I82586_SHOW
+ wv_scb_show(ioaddr);
+#endif
+ return -1;
+ }
+
+ /* Ack the status */
+ wv_ack(dev);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Configuration commands completion interrupt.
+ * Check if done, and if ok...
+ */
+static inline int
+wv_config_complete(device * dev,
+ u_long ioaddr,
+ net_local * lp)
+{
+ unsigned short mcs_addr;
+ unsigned short status;
+ int ret;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wv_config_complete()\n", dev->name);
+#endif
+
+ mcs_addr = lp->tx_first_in_use + sizeof(ac_tx_t) + sizeof(ac_nop_t)
+ + sizeof(tbd_t) + sizeof(ac_cfg_t) + sizeof(ac_ias_t);
+
+ /* Read the status of the last command (set mc list) */
+ obram_read(ioaddr, acoff(mcs_addr, ac_status), (unsigned char *)&status, sizeof(status));
+
+ /* If not completed -> exit */
+ if((status & AC_SFLD_C) == 0)
+ ret = 0; /* Not ready to be scrapped */
+ else
+ {
+#ifdef DEBUG_CONFIG_ERROR
+ unsigned short cfg_addr;
+ unsigned short ias_addr;
+
+ /* Check mc_config command */
+ if(status & AC_SFLD_OK != 0)
+ printk(KERN_INFO "wv_config_complete(): set_multicast_address failed; status = 0x%x\n",
+ dev->name, str, status);
+
+ /* check ia-config command */
+ ias_addr = mcs_addr - sizeof(ac_ias_t);
+ obram_read(ioaddr, acoff(ias_addr, ac_status), (unsigned char *)&status, sizeof(status));
+ if(status & AC_SFLD_OK != 0)
+ printk(KERN_INFO "wv_config_complete(): set_MAC_address; status = 0x%x\n",
+ dev->name, str, status);
+
+ /* Check config command */
+ cfg_addr = ias_addr - sizeof(ac_cfg_t);
+ obram_read(ioaddr, acoff(cfg_addr, ac_status), (unsigned char *)&status, sizeof(status));
+ if(status & AC_SFLD_OK != 0)
+ printk(KERN_INFO "wv_config_complete(): configure; status = 0x%x\n",
+ dev->name, str, status);
+#endif /* DEBUG_CONFIG_ERROR */
+
+ ret = 1; /* Ready to be scrapped */
+ }
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wv_config_complete() - %d\n", dev->name, ret);
+#endif
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Command completion interrupt.
+ * Reclaim as many freed tx buffers as we can.
+ */
+static int
+wv_complete(device * dev,
+ u_long ioaddr,
+ net_local * lp)
+{
+ int nreaped = 0;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wv_complete()\n", dev->name);
+#endif
+
+ /* Loop on all the transmit buffers */
+ while(lp->tx_first_in_use != I82586NULL)
+ {
+ unsigned short tx_status;
+
+ /* Read the first transmit buffer */
+ obram_read(ioaddr, acoff(lp->tx_first_in_use, ac_status), (unsigned char *)&tx_status, sizeof(tx_status));
+
+ /* Hack for reconfiguration... */
+ if(tx_status == 0xFFFF)
+ if(!wv_config_complete(dev, ioaddr, lp))
+ break; /* Not completed */
+
+ /* If not completed -> exit */
+ if((tx_status & AC_SFLD_C) == 0)
+ break;
+
+ /* We now remove this buffer */
+ nreaped++;
+ --lp->tx_n_in_use;
+
+/*
+if (lp->tx_n_in_use > 0)
+ printk("%c", "0123456789abcdefghijk"[lp->tx_n_in_use]);
+*/
+
+ /* Was it the last one ? */
+ if(lp->tx_n_in_use <= 0)
+ lp->tx_first_in_use = I82586NULL;
+ else
+ {
+ /* Next one in the chain */
+ lp->tx_first_in_use += TXBLOCKZ;
+ if(lp->tx_first_in_use >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ lp->tx_first_in_use -= NTXBLOCKS * TXBLOCKZ;
+ }
+
+ /* Hack for reconfiguration... */
+ if(tx_status == 0xFFFF)
+ continue;
+
+ /* Now, check status of the finished command */
+ if(tx_status & AC_SFLD_OK)
+ {
+ int ncollisions;
+
+ lp->stats.tx_packets++;
+ ncollisions = tx_status & AC_SFLD_MAXCOL;
+ lp->stats.collisions += ncollisions;
+#ifdef DEBUG_INTERRUPT_INFO
+ if(ncollisions > 0)
+ printk(KERN_DEBUG "%s: wv_complete(): tx completed after %d collisions.\n",
+ dev->name, ncollisions);
+#endif
+ }
+ else
+ {
+ lp->stats.tx_errors++;
+#ifndef IGNORE_NORMAL_XMIT_ERRS
+ if(tx_status & AC_SFLD_S10)
+ {
+ lp->stats.tx_carrier_errors++;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_complete(): tx error: no CS.\n",
+ dev->name);
+#endif
+ }
+#endif /* IGNORE_NORMAL_XMIT_ERRS */
+ if(tx_status & AC_SFLD_S9)
+ {
+ lp->stats.tx_carrier_errors++;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_complete(): tx error: lost CTS.\n",
+ dev->name);
+#endif
+ }
+ if(tx_status & AC_SFLD_S8)
+ {
+ lp->stats.tx_fifo_errors++;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_complete(): tx error: slow DMA.\n",
+ dev->name);
+#endif
+ }
+#ifndef IGNORE_NORMAL_XMIT_ERRS
+ if(tx_status & AC_SFLD_S6)
+ {
+ lp->stats.tx_heartbeat_errors++;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_complete(): tx error: heart beat.\n",
+ dev->name);
+#endif
+ }
+ if(tx_status & AC_SFLD_S5)
+ {
+ lp->stats.tx_aborted_errors++;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_complete(): tx error: too many collisions.\n",
+ dev->name);
+#endif
+ }
+#endif /* IGNORE_NORMAL_XMIT_ERRS */
+ }
+
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wv_complete(): tx completed, tx_status 0x%04x\n",
+ dev->name, tx_status);
+#endif
+ }
+
+#ifdef DEBUG_INTERRUPT_INFO
+ if(nreaped > 1)
+ printk(KERN_DEBUG "%s: wv_complete(): reaped %d\n", dev->name, nreaped);
+#endif
+
+ /*
+ * Inform upper layers.
+ */
+ if(lp->tx_n_in_use < NTXBLOCKS - 1)
+ {
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wv_complete()\n", dev->name);
+#endif
+ return nreaped;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Reconfigure the i82586, or at least ask for it...
+ * Because wv_82586_config use a transmission buffer, we must do it
+ * when we are sure that there is one left, so we do it now
+ * or in wavelan_packet_xmit() (I can't find any better place,
+ * wavelan_interrupt is not an option...), so you may experience
+ * some delay sometime...
+ */
+static inline void
+wv_82586_reconfig(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+
+ /* Check if we can do it now ! */
+ if(!(dev->start) || (set_bit(0, (void *)&dev->tbusy) != 0))
+ {
+ lp->reconfig_82586 = 1;
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wv_82586_reconfig(): delayed (busy = %ld, start = %d)\n",
+ dev->name, dev->tbusy, dev->start);
+#endif
+ }
+ else
+ wv_82586_config(dev);
+}
+
+/********************* DEBUG & INFO SUBROUTINES *********************/
+/*
+ * This routines are used in the code to show debug informations.
+ * Most of the time, it dump the content of hardware structures...
+ */
+
+#ifdef DEBUG_PSA_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted contents of the Parameter Storage Area.
+ */
+static void
+wv_psa_show(psa_t * p)
+{
+ printk(KERN_DEBUG "##### WaveLAN psa contents: #####\n");
+ printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n",
+ p->psa_io_base_addr_1,
+ p->psa_io_base_addr_2,
+ p->psa_io_base_addr_3,
+ p->psa_io_base_addr_4);
+ printk(KERN_DEBUG "psa_rem_boot_addr_1: 0x%02X %02X %02X\n",
+ p->psa_rem_boot_addr_1,
+ p->psa_rem_boot_addr_2,
+ p->psa_rem_boot_addr_3);
+ printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params);
+ printk("psa_int_req_no: %d\n", p->psa_int_req_no);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "psa_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ p->psa_unused0[0],
+ p->psa_unused0[1],
+ p->psa_unused0[2],
+ p->psa_unused0[3],
+ p->psa_unused0[4],
+ p->psa_unused0[5],
+ p->psa_unused0[6]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "psa_univ_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_univ_mac_addr[0],
+ p->psa_univ_mac_addr[1],
+ p->psa_univ_mac_addr[2],
+ p->psa_univ_mac_addr[3],
+ p->psa_univ_mac_addr[4],
+ p->psa_univ_mac_addr[5]);
+ printk(KERN_DEBUG "psa_local_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_local_mac_addr[0],
+ p->psa_local_mac_addr[1],
+ p->psa_local_mac_addr[2],
+ p->psa_local_mac_addr[3],
+ p->psa_local_mac_addr[4],
+ p->psa_local_mac_addr[5]);
+ printk(KERN_DEBUG "psa_univ_local_sel: %d, ", p->psa_univ_local_sel);
+ printk("psa_comp_number: %d, ", p->psa_comp_number);
+ printk("psa_thr_pre_set: 0x%02x\n", p->psa_thr_pre_set);
+ printk(KERN_DEBUG "psa_feature_select/decay_prm: 0x%02x, ",
+ p->psa_feature_select);
+ printk("psa_subband/decay_update_prm: %d\n", p->psa_subband);
+ printk(KERN_DEBUG "psa_quality_thr: 0x%02x, ", p->psa_quality_thr);
+ printk("psa_mod_delay: 0x%02x\n", p->psa_mod_delay);
+ printk(KERN_DEBUG "psa_nwid: 0x%02x%02x, ", p->psa_nwid[0], p->psa_nwid[1]);
+ printk("psa_nwid_select: %d\n", p->psa_nwid_select);
+ printk(KERN_DEBUG "psa_encryption_select: %d, ", p->psa_encryption_select);
+ printk("psa_encryption_key[]: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_encryption_key[0],
+ p->psa_encryption_key[1],
+ p->psa_encryption_key[2],
+ p->psa_encryption_key[3],
+ p->psa_encryption_key[4],
+ p->psa_encryption_key[5],
+ p->psa_encryption_key[6],
+ p->psa_encryption_key[7]);
+ printk(KERN_DEBUG "psa_databus_width: %d\n", p->psa_databus_width);
+ printk(KERN_DEBUG "psa_call_code/auto_squelch: 0x%02x, ",
+ p->psa_call_code[0]);
+ printk("psa_call_code[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ p->psa_call_code[0],
+ p->psa_call_code[1],
+ p->psa_call_code[2],
+ p->psa_call_code[3],
+ p->psa_call_code[4],
+ p->psa_call_code[5],
+ p->psa_call_code[6],
+ p->psa_call_code[7]);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n",
+ p->psa_reserved[0],
+ p->psa_reserved[1],
+ p->psa_reserved[2],
+ p->psa_reserved[3]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
+ printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
+ printk("psa_crc_status: 0x%02x\n", p->psa_crc_status);
+} /* wv_psa_show */
+#endif /* DEBUG_PSA_SHOW */
+
+#ifdef DEBUG_MMC_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the Modem Management Controller.
+ * This function need to be completed...
+ */
+static void
+wv_mmc_show(device * dev)
+{
+ u_long ioaddr = dev->base_addr;
+ net_local * lp = (net_local *)dev->priv;
+ mmr_t m;
+
+ /* Basic check */
+ if(hasr_read(ioaddr) & HASR_NO_CLK)
+ {
+ printk(KERN_WARNING "%s: wv_mmc_show: modem not connected\n",
+ dev->name);
+ return;
+ }
+
+ /* Read the mmc */
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
+ mmc_read(ioaddr, 0, (u_char *)&m, sizeof(m));
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+ /* Don't forget to update statistics */
+ lp->wstats.discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
+#endif /* WIRELESS_EXT */
+
+ printk(KERN_DEBUG "##### WaveLAN modem status registers: #####\n");
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ m.mmr_unused0[0],
+ m.mmr_unused0[1],
+ m.mmr_unused0[2],
+ m.mmr_unused0[3],
+ m.mmr_unused0[4],
+ m.mmr_unused0[5],
+ m.mmr_unused0[6],
+ m.mmr_unused0[7]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "Encryption algorythm: %02X - Status: %02X\n",
+ m.mmr_des_avail, m.mmr_des_status);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused1[]: %02X:%02X:%02X:%02X:%02X\n",
+ m.mmr_unused1[0],
+ m.mmr_unused1[1],
+ m.mmr_unused1[2],
+ m.mmr_unused1[3],
+ m.mmr_unused1[4]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "dce_status: 0x%x [%s%s%s%s]\n",
+ m.mmr_dce_status,
+ (m.mmr_dce_status & MMR_DCE_STATUS_RX_BUSY) ? "energy detected,":"",
+ (m.mmr_dce_status & MMR_DCE_STATUS_LOOPT_IND) ?
+ "loop test indicated," : "",
+ (m.mmr_dce_status & MMR_DCE_STATUS_TX_BUSY) ? "transmitter on," : "",
+ (m.mmr_dce_status & MMR_DCE_STATUS_JBR_EXPIRED) ?
+ "jabber timer expired," : "");
+ printk(KERN_DEBUG "Dsp ID: %02X\n",
+ m.mmr_dsp_id);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused2[]: %02X:%02X\n",
+ m.mmr_unused2[0],
+ m.mmr_unused2[1]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "# correct_nwid: %d, # wrong_nwid: %d\n",
+ (m.mmr_correct_nwid_h << 8) | m.mmr_correct_nwid_l,
+ (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l);
+ printk(KERN_DEBUG "thr_pre_set: 0x%x [current signal %s]\n",
+ m.mmr_thr_pre_set & MMR_THR_PRE_SET,
+ (m.mmr_thr_pre_set & MMR_THR_PRE_SET_CUR) ? "above" : "below");
+ printk(KERN_DEBUG "signal_lvl: %d [%s], ",
+ m.mmr_signal_lvl & MMR_SIGNAL_LVL,
+ (m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) ? "new msg" : "no new msg");
+ printk("silence_lvl: %d [%s], ", m.mmr_silence_lvl & MMR_SILENCE_LVL,
+ (m.mmr_silence_lvl & MMR_SILENCE_LVL_VALID) ? "update done" : "no new update");
+ printk("sgnl_qual: 0x%x [%s]\n",
+ m.mmr_sgnl_qual & MMR_SGNL_QUAL,
+ (m.mmr_sgnl_qual & MMR_SGNL_QUAL_ANT) ? "Antenna 1" : "Antenna 0");
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "netw_id_l: %x\n", m.mmr_netw_id_l);
+#endif /* DEBUG_SHOW_UNUSED */
+} /* wv_mmc_show */
+#endif /* DEBUG_MMC_SHOW */
+
+#ifdef DEBUG_I82586_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the last block of the i82586 memory
+ */
+static void
+wv_scb_show(u_long ioaddr)
+{
+ scb_t scb;
+
+ obram_read(ioaddr, OFFSET_SCB, (unsigned char *)&scb, sizeof(scb));
+
+ printk(KERN_DEBUG "##### WaveLAN system control block: #####\n");
+
+ printk(KERN_DEBUG "status: ");
+ printk("stat 0x%x[%s%s%s%s] ",
+ (scb.scb_status & (SCB_ST_CX | SCB_ST_FR | SCB_ST_CNA | SCB_ST_RNR)) >> 12,
+ (scb.scb_status & SCB_ST_CX) ? "cmd completion interrupt," : "",
+ (scb.scb_status & SCB_ST_FR) ? "frame received," : "",
+ (scb.scb_status & SCB_ST_CNA) ? "cmd unit not active," : "",
+ (scb.scb_status & SCB_ST_RNR) ? "rcv unit not ready," : "");
+ printk("cus 0x%x[%s%s%s] ",
+ (scb.scb_status & SCB_ST_CUS) >> 8,
+ ((scb.scb_status & SCB_ST_CUS) == SCB_ST_CUS_IDLE) ? "idle" : "",
+ ((scb.scb_status & SCB_ST_CUS) == SCB_ST_CUS_SUSP) ? "suspended" : "",
+ ((scb.scb_status & SCB_ST_CUS) == SCB_ST_CUS_ACTV) ? "active" : "");
+ printk("rus 0x%x[%s%s%s%s]\n",
+ (scb.scb_status & SCB_ST_RUS) >> 4,
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_IDLE) ? "idle" : "",
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_SUSP) ? "suspended" : "",
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_NRES) ? "no resources" : "",
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_RDY) ? "ready" : "");
+
+ printk(KERN_DEBUG "command: ");
+ printk("ack 0x%x[%s%s%s%s] ",
+ (scb.scb_command & (SCB_CMD_ACK_CX | SCB_CMD_ACK_FR | SCB_CMD_ACK_CNA | SCB_CMD_ACK_RNR)) >> 12,
+ (scb.scb_command & SCB_CMD_ACK_CX) ? "ack cmd completion," : "",
+ (scb.scb_command & SCB_CMD_ACK_FR) ? "ack frame received," : "",
+ (scb.scb_command & SCB_CMD_ACK_CNA) ? "ack CU not active," : "",
+ (scb.scb_command & SCB_CMD_ACK_RNR) ? "ack RU not ready," : "");
+ printk("cuc 0x%x[%s%s%s%s%s] ",
+ (scb.scb_command & SCB_CMD_CUC) >> 8,
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_NOP) ? "nop" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_GO) ? "start cbl_offset" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_RES) ? "resume execution" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_SUS) ? "suspend execution" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_ABT) ? "abort execution" : "");
+ printk("ruc 0x%x[%s%s%s%s%s]\n",
+ (scb.scb_command & SCB_CMD_RUC) >> 4,
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_NOP) ? "nop" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_GO) ? "start rfa_offset" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_RES) ? "resume reception" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_SUS) ? "suspend reception" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_ABT) ? "abort reception" : "");
+
+ printk(KERN_DEBUG "cbl_offset 0x%x ", scb.scb_cbl_offset);
+ printk("rfa_offset 0x%x\n", scb.scb_rfa_offset);
+
+ printk(KERN_DEBUG "crcerrs %d ", scb.scb_crcerrs);
+ printk("alnerrs %d ", scb.scb_alnerrs);
+ printk("rscerrs %d ", scb.scb_rscerrs);
+ printk("ovrnerrs %d\n", scb.scb_ovrnerrs);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the i82586's receive unit.
+ */
+static void
+wv_ru_show(device * dev)
+{
+ /* net_local *lp = (net_local *) dev->priv; */
+
+ printk(KERN_DEBUG "##### WaveLAN i82586 receiver unit status: #####\n");
+ printk(KERN_DEBUG "ru:");
+ /*
+ * Not implemented yet...
+ */
+ printk("\n");
+} /* wv_ru_show */
+
+/*------------------------------------------------------------------*/
+/*
+ * Display info about one control block of the i82586 memory
+ */
+static void
+wv_cu_show_one(device * dev,
+ net_local * lp,
+ int i,
+ u_short p)
+{
+ u_long ioaddr;
+ ac_tx_t actx;
+
+ ioaddr = dev->base_addr;
+
+ printk("%d: 0x%x:", i, p);
+
+ obram_read(ioaddr, p, (unsigned char *)&actx, sizeof(actx));
+ printk(" status=0x%x,", actx.tx_h.ac_status);
+ printk(" command=0x%x,", actx.tx_h.ac_command);
+
+ /*
+ {
+ tbd_t tbd;
+
+ obram_read(ioaddr, actx.tx_tbd_offset, (unsigned char *)&tbd, sizeof(tbd));
+ printk(" tbd_status=0x%x,", tbd.tbd_status);
+ }
+ */
+
+ printk("|");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Print status of the command unit of the i82586
+ */
+static void
+wv_cu_show(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ unsigned int i;
+ u_short p;
+
+ printk(KERN_DEBUG "##### WaveLAN i82586 command unit status: #####\n");
+
+ printk(KERN_DEBUG);
+ for(i = 0, p = lp->tx_first_in_use; i < NTXBLOCKS; i++)
+ {
+ wv_cu_show_one(dev, lp, i, p);
+
+ p += TXBLOCKZ;
+ if(p >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ p -= NTXBLOCKS * TXBLOCKZ;
+ }
+ printk("\n");
+}
+#endif /* DEBUG_I82586_SHOW */
+
+#ifdef DEBUG_DEVICE_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the WaveLAN PCMCIA device driver.
+ */
+static void
+wv_dev_show(device * dev)
+{
+ printk(KERN_DEBUG "dev:");
+ printk(" start=%d,", dev->start);
+ printk(" tbusy=%ld,", dev->tbusy);
+ printk(" interrupt=%d,", dev->interrupt);
+ printk(" trans_start=%ld,", dev->trans_start);
+ printk(" flags=0x%x,", dev->flags);
+ printk("\n");
+} /* wv_dev_show */
+
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the WaveLAN PCMCIA device driver's
+ * private information.
+ */
+static void
+wv_local_show(device * dev)
+{
+ net_local *lp;
+
+ lp = (net_local *)dev->priv;
+
+ printk(KERN_DEBUG "local:");
+ printk(" tx_n_in_use=%d,", lp->tx_n_in_use);
+ printk(" hacr=0x%x,", lp->hacr);
+ printk(" rx_head=0x%x,", lp->rx_head);
+ printk(" rx_last=0x%x,", lp->rx_last);
+ printk(" tx_first_free=0x%x,", lp->tx_first_free);
+ printk(" tx_first_in_use=0x%x,", lp->tx_first_in_use);
+ printk("\n");
+} /* wv_local_show */
+#endif /* DEBUG_DEVICE_SHOW */
+
+#if defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO)
+/*------------------------------------------------------------------*/
+/*
+ * Dump packet header (and content if necessary) on the screen
+ */
+static inline void
+wv_packet_info(u_char * p, /* Packet to dump */
+ int length, /* Length of the packet */
+ char * msg1, /* Name of the device */
+ char * msg2) /* Name of the function */
+{
+#ifndef DEBUG_PACKET_DUMP
+ printk(KERN_DEBUG "%s: %s(): dest %02X:%02X:%02X:%02X:%02X:%02X, length %d\n",
+ msg1, msg2, p[0], p[1], p[2], p[3], p[4], p[5], length);
+ printk(KERN_DEBUG "%s: %s(): src %02X:%02X:%02X:%02X:%02X:%02X, type 0x%02X%02X\n",
+ msg1, msg2, p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]);
+
+#else /* DEBUG_PACKET_DUMP */
+ int i;
+ int maxi;
+
+ printk(KERN_DEBUG "%s: %s(): len=%d, data=\"", msg1, msg2, length);
+
+ if((maxi = length) > DEBUG_PACKET_DUMP)
+ maxi = DEBUG_PACKET_DUMP;
+ for(i = 0; i < maxi; i++)
+ if(p[i] >= ' ' && p[i] <= '~')
+ printk(" %c", p[i]);
+ else
+ printk("%02X", p[i]);
+ if(maxi < length)
+ printk("..");
+ printk("\"\n");
+ printk(KERN_DEBUG "\n");
+#endif /* DEBUG_PACKET_DUMP */
+}
+#endif /* defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO) */
+
+/*------------------------------------------------------------------*/
+/*
+ * This is the information which is displayed by the driver at startup
+ * There is a lot of flag to configure it at your will...
+ */
+static inline void
+wv_init_info(device * dev)
+{
+ short ioaddr = dev->base_addr;
+ net_local * lp = (net_local *)dev->priv;
+ psa_t psa;
+ int i;
+
+ /* Read the parameter storage area */
+ psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa));
+
+#ifdef DEBUG_PSA_SHOW
+ wv_psa_show(&psa);
+#endif
+#ifdef DEBUG_MMC_SHOW
+ wv_mmc_show(dev);
+#endif
+#ifdef DEBUG_I82586_SHOW
+ wv_cu_show(dev);
+#endif
+
+#ifdef DEBUG_BASIC_SHOW
+ /* Now, let's go for the basic stuff */
+ printk(KERN_NOTICE "%s: WaveLAN at %#x,", dev->name, ioaddr);
+ for(i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ printk("%s%02X", (i == 0) ? " " : ":", dev->dev_addr[i]);
+ printk(", IRQ %d", dev->irq);
+
+ /* Print current network id */
+ if(psa.psa_nwid_select)
+ printk(", nwid 0x%02X-%02X", psa.psa_nwid[0], psa.psa_nwid[1]);
+ else
+ printk(", nwid off");
+
+ /* If 2.00 card */
+ if(!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ {
+ unsigned short freq;
+
+ /* Ask the EEPROM to read the frequency from the first area */
+ fee_read(ioaddr, 0x00 /* 1st area - frequency... */,
+ &freq, 1);
+
+ /* Print frequency */
+ printk(", 2.00, %ld", (freq >> 6) + 2400L);
+
+ /* Hack !!! */
+ if(freq & 0x20)
+ printk(".5");
+ }
+ else
+ {
+ printk(", PC");
+ switch(psa.psa_comp_number)
+ {
+ case PSA_COMP_PC_AT_915:
+ case PSA_COMP_PC_AT_2400:
+ printk("-AT");
+ break;
+ case PSA_COMP_PC_MC_915:
+ case PSA_COMP_PC_MC_2400:
+ printk("-MC");
+ break;
+ case PSA_COMP_PCMCIA_915:
+ printk("MCIA");
+ break;
+ default:
+ printk("???");
+ }
+ printk(", ");
+ switch (psa.psa_subband)
+ {
+ case PSA_SUBBAND_915:
+ printk("915");
+ break;
+ case PSA_SUBBAND_2425:
+ printk("2425");
+ break;
+ case PSA_SUBBAND_2460:
+ printk("2460");
+ break;
+ case PSA_SUBBAND_2484:
+ printk("2484");
+ break;
+ case PSA_SUBBAND_2430_5:
+ printk("2430.5");
+ break;
+ default:
+ printk("???");
+ }
+ }
+
+ printk(" MHz\n");
+#endif /* DEBUG_BASIC_SHOW */
+
+#ifdef DEBUG_VERSION_SHOW
+ /* Print version information */
+ printk(KERN_NOTICE "%s", version);
+#endif
+} /* wv_init_info */
+
+/********************* IOCTL, STATS & RECONFIG *********************/
+/*
+ * We found here routines that are called by Linux on differents
+ * occasions after the configuration and not for transmitting data
+ * These may be called when the user use ifconfig, /proc/net/dev
+ * or wireless extensions
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Get the current ethernet statistics. This may be called with the
+ * card open or closed.
+ * Used when the user read /proc/net/dev
+ */
+static en_stats *
+wavelan_get_stats(device * dev)
+{
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <>wavelan_get_stats()\n", dev->name);
+#endif
+
+ return(&((net_local *) dev->priv)->stats);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+static void
+wavelan_set_multicast_list(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_set_multicast_list()\n", dev->name);
+#endif
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
+ dev->name, dev->flags, dev->mc_count);
+#endif
+
+ /* If we ask for promiscuous mode,
+ * or all multicast addresses (we don't have that !)
+ * or too much multicast addresses for the hardware filter */
+ if((dev->flags & IFF_PROMISC) ||
+ (dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > I82586_MAX_MULTICAST_ADDRESSES))
+ {
+ /*
+ * Enable promiscuous mode: receive all packets.
+ */
+ if(!lp->promiscuous)
+ {
+ lp->promiscuous = 1;
+ lp->mc_count = 0;
+
+ wv_82586_reconfig(dev);
+
+ /* Tell the kernel that we are doing a really bad job... */
+ dev->flags |= IFF_PROMISC;
+ }
+ }
+ else
+ /* If there is some multicast addresses to send */
+ if(dev->mc_list != (struct dev_mc_list *) NULL)
+ {
+ /*
+ * Disable promiscuous mode, but receive all packets
+ * in multicast list
+ */
+#ifdef MULTICAST_AVOID
+ if(lp->promiscuous ||
+ (dev->mc_count != lp->mc_count))
+#endif
+ {
+ lp->promiscuous = 0;
+ lp->mc_count = dev->mc_count;
+
+ wv_82586_reconfig(dev);
+ }
+ }
+ else
+ {
+ /*
+ * Switch to normal mode: disable promiscuous mode and
+ * clear the multicast list.
+ */
+ if(lp->promiscuous || lp->mc_count == 0)
+ {
+ lp->promiscuous = 0;
+ lp->mc_count = 0;
+
+ wv_82586_reconfig(dev);
+ }
+ }
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_set_multicast_list()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This function doesn't exist...
+ */
+static int
+wavelan_set_mac_address(device * dev,
+ void * addr)
+{
+ struct sockaddr * mac = addr;
+
+ /* Copy the address */
+ memcpy(dev->dev_addr, mac->sa_data, WAVELAN_ADDR_SIZE);
+
+ /* Reconfig the beast */
+ wv_82586_reconfig(dev);
+
+ return 0;
+}
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+
+/*------------------------------------------------------------------*/
+/*
+ * Frequency setting (for hardware able of it)
+ * It's a bit complicated and you don't really want to look into it...
+ * (called in wavelan_ioctl)
+ */
+static inline int
+wv_set_frequency(u_long ioaddr, /* i/o port of the card */
+ iw_freq * frequency)
+{
+ const int BAND_NUM = 10; /* Number of bands */
+ long freq = 0L; /* offset to 2.4 GHz in .5 MHz */
+#ifdef DEBUG_IOCTL_INFO
+ int i;
+#endif
+
+ /* Setting by frequency */
+ /* Theoretically, you may set any frequency between
+ * the two limits with a 0.5 MHz precision. In practice,
+ * I don't want you to have trouble with local
+ * regulations... */
+ if((frequency->e == 1) &&
+ (frequency->m >= (int) 2.412e8) && (frequency->m <= (int) 2.487e8))
+ {
+ freq = ((frequency->m / 10000) - 24000L) / 5;
+ }
+
+ /* Setting by channel (same as wfreqsel) */
+ /* Warning : each channel is 22MHz wide, so some of the channels
+ * will interfere... */
+ if((frequency->e == 0) &&
+ (frequency->m >= 0) && (frequency->m < BAND_NUM))
+ {
+ /* frequency in 1/4 of MHz (as read in the offset register) */
+ short bands[] = { 0x30, 0x58, 0x64, 0x7A, 0x80, 0xA8, 0xD0, 0xF0, 0xF8, 0x150 };
+
+ /* Get frequency offset */
+ freq = bands[frequency->m] >> 1;
+ }
+
+ /* Verify if the frequency is allowed */
+ if(freq != 0L)
+ {
+ u_short table[10]; /* Authorized frequency table */
+
+ /* Read the frequency table */
+ fee_read(ioaddr, 0x71 /* frequency table */,
+ table, 10);
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "Frequency table :");
+ for(i = 0; i < 10; i++)
+ {
+ printk(" %04X",
+ table[i]);
+ }
+ printk("\n");
+#endif
+
+ /* Look in the table if the frequency is allowed */
+ if(!(table[9 - ((freq - 24) / 16)] &
+ (1 << ((freq - 24) % 16))))
+ return -EINVAL; /* not allowed */
+ }
+ else
+ return -EINVAL;
+
+ /* If we get a usable frequency */
+ if(freq != 0L)
+ {
+ unsigned short area[16];
+ unsigned short dac[2];
+ unsigned short area_verify[16];
+ unsigned short dac_verify[2];
+ /* Corresponding gain (in the power adjust value table)
+ * see AT&T WaveLAN Data Manual, REF 407-024689/E, page 3-8
+ * & WCIN062D.DOC, page 6.2.9 */
+ unsigned short power_limit[] = { 40, 80, 120, 160, 0 };
+ int power_band = 0; /* Selected band */
+ unsigned short power_adjust; /* Correct value */
+
+ /* Search for the gain */
+ power_band = 0;
+ while((freq > power_limit[power_band]) &&
+ (power_limit[++power_band] != 0))
+ ;
+
+ /* Read the first area */
+ fee_read(ioaddr, 0x00,
+ area, 16);
+
+ /* Read the DAC */
+ fee_read(ioaddr, 0x60,
+ dac, 2);
+
+ /* Read the new power adjust value */
+ fee_read(ioaddr, 0x6B - (power_band >> 1),
+ &power_adjust, 1);
+ if(power_band & 0x1)
+ power_adjust >>= 8;
+ else
+ power_adjust &= 0xFF;
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "WaveLAN EEPROM Area 1:");
+ for(i = 0; i < 16; i++)
+ {
+ printk(" %04X",
+ area[i]);
+ }
+ printk("\n");
+
+ printk(KERN_DEBUG "WaveLAN EEPROM DAC: %04X %04X\n",
+ dac[0], dac[1]);
+#endif
+
+ /* Frequency offset (for info only) */
+ area[0] = ((freq << 5) & 0xFFE0) | (area[0] & 0x1F);
+
+ /* Receiver Principle main divider coefficient */
+ area[3] = (freq >> 1) + 2400L - 352L;
+ area[2] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
+
+ /* Transmitter Main divider coefficient */
+ area[13] = (freq >> 1) + 2400L;
+ area[12] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
+
+ /* Others part of the area are flags, bit streams or unused... */
+
+ /* Set the value in the DAC. */
+ dac[1] = ((power_adjust >> 1) & 0x7F) | (dac[1] & 0xFF80);
+ dac[0] = ((power_adjust & 0x1) << 4) | (dac[0] & 0xFFEF);
+
+ /* Write the first area. */
+ fee_write(ioaddr, 0x00,
+ area, 16);
+
+ /* Write the DAC. */
+ fee_write(ioaddr, 0x60,
+ dac, 2);
+
+ /* We now should verify here that the EEPROM writing was OK. */
+
+ /* Reread the first area. */
+ fee_read(ioaddr, 0x00,
+ area_verify, 16);
+
+ /* ReRead the DAC */
+ fee_read(ioaddr, 0x60,
+ dac_verify, 2);
+
+ /* Compare */
+ if(memcmp(area, area_verify, 16 * 2) ||
+ memcmp(dac, dac_verify, 2 * 2))
+ {
+#ifdef DEBUG_IOCTL_ERROR
+ printk(KERN_INFO "WaveLAN: wv_set_frequency: unable to write new frequency to EEPROM(?).\n");
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ /* We must download the frequency parameters to the
+ * synthesizers (from the EEPROM - area 1)
+ * Note: as the EEPROM is automatically decremented, we set the end
+ * if the area... */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x0F);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
+ MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
+
+ /* Wait until the download is finished */
+ fee_wait(ioaddr, 100, 100);
+
+ /* We must now download the power adjust value (gain) to
+ * the synthesizers (from the EEPROM - area 7 - DAC) */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x61);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
+ MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
+
+ /* Wait until the download is finished */
+ fee_wait(ioaddr, 100, 100);
+
+#ifdef DEBUG_IOCTL_INFO
+ /* Verification of what we have done... */
+
+ printk(KERN_DEBUG "WaveLAN EEPROM Area 1:");
+ for(i = 0; i < 16; i++)
+ {
+ printk(" %04X",
+ area_verify[i]);
+ }
+ printk("\n");
+
+ printk(KERN_DEBUG "WaveLAN EEPROM DAC: %04X %04X\n",
+ dac_verify[0], dac_verify[1]);
+#endif
+
+ return 0;
+ }
+ else
+ return -EINVAL; /* Bah, never get there... */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Give the list of available frequencies
+ */
+static inline int
+wv_frequency_list(u_long ioaddr, /* i/o port of the card */
+ iw_freq * list, /* List of frequency to fill */
+ int max) /* Maximum number of frequencies */
+{
+ u_short table[10]; /* Authorized frequency table */
+ long freq = 0L; /* offset to 2.4 GHz in .5 MHz + 12 MHz */
+ int i; /* index in the table */
+
+ /* Read the frequency table */
+ fee_read(ioaddr, 0x71 /* frequency table */,
+ table, 10);
+
+ /* Look all frequencies */
+ i = 0;
+ for(freq = 0; freq < 150; freq++)
+ /* Look in the table if the frequency is allowed */
+ if(table[9 - (freq / 16)] & (1 << (freq % 16)))
+ {
+ /* put in the list */
+ list[i].m = (((freq + 24) * 5) + 24000L) * 10000;
+ list[i++].e = 1;
+
+ /* Check number */
+ if(i >= max)
+ return(i);
+ }
+
+ return(i);
+}
+
+#ifdef WIRELESS_SPY
+/*------------------------------------------------------------------*/
+/*
+ * Gather wireless spy statistics : for each packet, compare the source
+ * address with out list, and if match, get the stats...
+ * Sorry, but this function really need wireless extensions...
+ */
+static inline void
+wl_spy_gather(device * dev,
+ u_char * mac, /* MAC address */
+ u_char * stats) /* Statistics to gather */
+{
+ net_local * lp = (net_local *) dev->priv;
+ int i;
+
+ /* Look all addresses */
+ for(i = 0; i < lp->spy_number; i++)
+ /* If match */
+ if(!memcmp(mac, lp->spy_address[i], WAVELAN_ADDR_SIZE))
+ {
+ /* Update statistics */
+ lp->spy_stat[i].qual = stats[2] & MMR_SGNL_QUAL;
+ lp->spy_stat[i].level = stats[0] & MMR_SIGNAL_LVL;
+ lp->spy_stat[i].noise = stats[1] & MMR_SILENCE_LVL;
+ lp->spy_stat[i].updated = 0x7;
+ }
+}
+#endif /* WIRELESS_SPY */
+
+#ifdef HISTOGRAM
+/*------------------------------------------------------------------*/
+/*
+ * This function calculates an histogram on the signal level.
+ * As the noise is quite constant, it's like doing it on the SNR.
+ * We have defined a set of interval (lp->his_range), and each time
+ * the level goes in that interval, we increment the count (lp->his_sum).
+ * With this histogram you may detect if one WaveLAN is really weak,
+ * or you may also calculate the mean and standard deviation of the level.
+ */
+static inline void
+wl_his_gather(device * dev,
+ u_char * stats) /* Statistics to gather */
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_char level = stats[0] & MMR_SIGNAL_LVL;
+ int i;
+
+ /* Find the correct interval */
+ i = 0;
+ while((i < (lp->his_number - 1)) && (level >= lp->his_range[i++]))
+ ;
+
+ /* Increment interval counter */
+ (lp->his_sum[i])++;
+}
+#endif /* HISTOGRAM */
+
+/*------------------------------------------------------------------*/
+/*
+ * Perform ioctl : config & info stuff
+ * This is here that are treated the wireless extensions (iwconfig)
+ */
+static int
+wavelan_ioctl(struct device * dev, /* device on which the ioctl is applied */
+ struct ifreq * rq, /* data passed */
+ int cmd) /* ioctl number */
+{
+ u_long ioaddr = dev->base_addr;
+ net_local * lp = (net_local *)dev->priv; /* lp is not unused */
+ struct iwreq * wrq = (struct iwreq *) rq;
+ psa_t psa;
+ mm_t m;
+ unsigned long x;
+ int ret = 0;
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_ioctl(cmd=0x%X)\n", dev->name, cmd);
+#endif
+
+ /* Disable interrupts & save flags */
+ x = wv_splhi();
+
+ /* Look what is the request */
+ switch(cmd)
+ {
+ /* --------------- WIRELESS EXTENSIONS --------------- */
+
+ case SIOCGIWNAME:
+ strcpy(wrq->u.name, "Wavelan");
+ break;
+
+ case SIOCSIWNWID:
+ /* Set NWID in WaveLAN */
+ if(wrq->u.nwid.on)
+ {
+ /* Set NWID in psa */
+ psa.psa_nwid[0] = (wrq->u.nwid.nwid & 0xFF00) >> 8;
+ psa.psa_nwid[1] = wrq->u.nwid.nwid & 0xFF;
+ psa.psa_nwid_select = 0x01;
+ psa_write(ioaddr, lp->hacr, (char *)psa.psa_nwid - (char *)&psa,
+ (unsigned char *)psa.psa_nwid, 3);
+
+ /* Set NWID in mmc */
+ m.w.mmw_netw_id_l = wrq->u.nwid.nwid & 0xFF;
+ m.w.mmw_netw_id_h = (wrq->u.nwid.nwid & 0xFF00) >> 8;
+ mmc_write(ioaddr, (char *)&m.w.mmw_netw_id_l - (char *)&m,
+ (unsigned char *)&m.w.mmw_netw_id_l, 2);
+ mmc_out(ioaddr, mmwoff(0, mmw_loopt_sel), 0x00);
+ }
+ else
+ {
+ /* Disable nwid in the psa */
+ psa.psa_nwid_select = 0x00;
+ psa_write(ioaddr, lp->hacr,
+ (char *)&psa.psa_nwid_select - (char *)&psa,
+ (unsigned char *)&psa.psa_nwid_select, 1);
+
+ /* Disable nwid in the mmc (no filtering) */
+ mmc_out(ioaddr, mmwoff(0, mmw_loopt_sel), MMW_LOOPT_SEL_DIS_NWID);
+ }
+ break;
+
+ case SIOCGIWNWID:
+ /* Read the NWID */
+ psa_read(ioaddr, lp->hacr, (char *)psa.psa_nwid - (char *)&psa,
+ (unsigned char *)psa.psa_nwid, 3);
+ wrq->u.nwid.nwid = (psa.psa_nwid[0] << 8) + psa.psa_nwid[1];
+ wrq->u.nwid.on = psa.psa_nwid_select;
+ break;
+
+ case SIOCSIWFREQ:
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable) */
+ if(!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ ret = wv_set_frequency(ioaddr, &(wrq->u.freq));
+ else
+ ret = -EOPNOTSUPP;
+ break;
+
+ case SIOCGIWFREQ:
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable)
+ * (does it work for everybody ??? - especially old cards...) */
+ if(!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ {
+ unsigned short freq;
+
+ /* Ask the EEPROM to read the frequency from the first area */
+ fee_read(ioaddr, 0x00 /* 1st area - frequency... */,
+ &freq, 1);
+ wrq->u.freq.m = ((freq >> 5) * 5 + 24000L) * 10000;
+ wrq->u.freq.e = 1;
+ }
+ else
+ {
+ int bands[] = { 915e6, 2.425e8, 2.46e8, 2.484e8, 2.4305e8 };
+
+ psa_read(ioaddr, lp->hacr, (char *)&psa.psa_subband - (char *)&psa,
+ (unsigned char *)&psa.psa_subband, 1);
+
+ if(psa.psa_subband <= 4)
+ {
+ wrq->u.freq.m = bands[psa.psa_subband];
+ wrq->u.freq.e = (psa.psa_subband != 0);
+ }
+ else
+ ret = -EOPNOTSUPP;
+ }
+ break;
+
+ case SIOCSIWSENS:
+ /* Set the level threshold */
+ if(!suser())
+ return -EPERM;
+ psa.psa_thr_pre_set = wrq->u.sensitivity & 0x3F;
+ psa_write(ioaddr, lp->hacr, (char *)&psa.psa_thr_pre_set - (char *)&psa,
+ (unsigned char *) &psa.psa_thr_pre_set, 1);
+ mmc_out(ioaddr, mmwoff(0, mmw_thr_pre_set), psa.psa_thr_pre_set);
+ break;
+
+ case SIOCGIWSENS:
+ /* Read the level threshold */
+ psa_read(ioaddr, lp->hacr, (char *)&psa.psa_thr_pre_set - (char *)&psa,
+ (unsigned char *) &psa.psa_thr_pre_set, 1);
+ wrq->u.sensitivity = psa.psa_thr_pre_set & 0x3F;
+ break;
+
+ case SIOCSIWENCODE:
+ /* Set encryption key */
+ if(!mmc_encr(ioaddr))
+ {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if(wrq->u.encoding.method)
+ { /* enable encryption */
+ int i;
+ long long key = wrq->u.encoding.code;
+
+ for(i = 7; i >= 0; i--)
+ {
+ psa.psa_encryption_key[i] = key & 0xFF;
+ key >>= 8;
+ }
+ psa.psa_encryption_select = 1;
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_encryption_select - (char *) &psa,
+ (unsigned char *) &psa.psa_encryption_select, 8+1);
+
+ mmc_out(ioaddr, mmwoff(0, mmw_encr_enable),
+ MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE);
+ mmc_write(ioaddr, mmwoff(0, mmw_encr_key),
+ (unsigned char *) &psa.psa_encryption_key, 8);
+ }
+ else
+ { /* disable encryption */
+ psa.psa_encryption_select = 0;
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_encryption_select - (char *) &psa,
+ (unsigned char *) &psa.psa_encryption_select, 1);
+
+ mmc_out(ioaddr, mmwoff(0, mmw_encr_enable), 0);
+ }
+ break;
+
+ case SIOCGIWENCODE:
+ /* Read the encryption key */
+ if(!mmc_encr(ioaddr))
+ {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ /* only super-user can see encryption key */
+ if(!suser())
+ {
+ ret = -EPERM;
+ break;
+ }
+ else
+ {
+ int i;
+ long long key = 0;
+
+ psa_read(ioaddr, lp->hacr,
+ (char *) &psa.psa_encryption_select - (char *) &psa,
+ (unsigned char *) &psa.psa_encryption_select, 1+8);
+ for(i = 0; i < 8; i++)
+ {
+ key <<= 8;
+ key += psa.psa_encryption_key[i];
+ }
+ wrq->u.encoding.code = key;
+
+ /* encryption is enabled */
+ if(psa.psa_encryption_select)
+ wrq->u.encoding.method = mmc_encr(ioaddr);
+ else
+ wrq->u.encoding.method = 0;
+ }
+ break;
+
+ case SIOCGIWRANGE:
+ /* basic checking */
+ if(wrq->u.data.pointer != (caddr_t) 0)
+ {
+ struct iw_range range;
+
+ /* Verify the user buffer */
+ ret = verify_area(VERIFY_WRITE, wrq->u.data.pointer,
+ sizeof(struct iw_range));
+ if(ret)
+ break;
+
+ /* Set the length (useless : its constant...) */
+ wrq->u.data.length = sizeof(struct iw_range);
+
+ /* Set information in the range struct */
+ range.throughput = 1.6 * 1024 * 1024; /* don't argue on this ! */
+ range.min_nwid = 0x0000;
+ range.max_nwid = 0xFFFF;
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable). */
+ if(!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ {
+ range.num_channels = 10;
+ range.num_frequency = wv_frequency_list(ioaddr, range.freq,
+ IW_MAX_FREQUENCIES);
+ }
+ else
+ range.num_channels = range.num_frequency = 0;
+
+ range.sensitivity = 0x3F;
+ range.max_qual.qual = MMR_SGNL_QUAL;
+ range.max_qual.level = MMR_SIGNAL_LVL;
+ range.max_qual.noise = MMR_SILENCE_LVL;
+
+ /* Copy structure to the user buffer */
+ copy_to_user(wrq->u.data.pointer, &range,
+ sizeof(struct iw_range));
+ }
+ break;
+
+ case SIOCGIWPRIV:
+ /* Basic checking... */
+ if(wrq->u.data.pointer != (caddr_t) 0)
+ {
+ struct iw_priv_args priv[] =
+ { /* cmd, set_args, get_args, name */
+ { SIOCSIPQTHR, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "setqualthr" },
+ { SIOCGIPQTHR, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "getqualthr" },
+
+ { SIOCSIPHISTO, IW_PRIV_TYPE_BYTE | 16, 0, "sethisto" },
+ { SIOCGIPHISTO, 0, IW_PRIV_TYPE_INT | 16, "gethisto" },
+ };
+
+ /* Verify the user buffer */
+ ret = verify_area(VERIFY_WRITE, wrq->u.data.pointer,
+ sizeof(priv));
+ if(ret)
+ break;
+
+ /* Set the number of ioctl available */
+ wrq->u.data.length = 4;
+
+ /* Copy structure to the user buffer */
+ copy_to_user(wrq->u.data.pointer, (u_char *) priv,
+ sizeof(priv));
+ }
+ break;
+
+#ifdef WIRELESS_SPY
+ case SIOCSIWSPY:
+ /* Set the spy list */
+
+ /* Check the number of addresses */
+ if(wrq->u.data.length > IW_MAX_SPY)
+ {
+ ret = -E2BIG;
+ break;
+ }
+ lp->spy_number = wrq->u.data.length;
+
+ /* If there is some addresses to copy */
+ if(lp->spy_number > 0)
+ {
+ struct sockaddr address[IW_MAX_SPY];
+ int i;
+
+ /* Verify where the user has set his addresses */
+ ret = verify_area(VERIFY_READ, wrq->u.data.pointer,
+ sizeof(struct sockaddr) * lp->spy_number);
+ if(ret)
+ break;
+ /* Copy addresses to the driver */
+ copy_from_user(address, wrq->u.data.pointer,
+ sizeof(struct sockaddr) * lp->spy_number);
+
+ /* Copy addresses to the lp structure */
+ for(i = 0; i < lp->spy_number; i++)
+ {
+ memcpy(lp->spy_address[i], address[i].sa_data,
+ WAVELAN_ADDR_SIZE);
+ }
+
+ /* Reset structure... */
+ memset(lp->spy_stat, 0x00, sizeof(iw_qual) * IW_MAX_SPY);
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "SetSpy - Set of new addresses is :\n");
+ for(i = 0; i < wrq->u.data.length; i++)
+ printk(KERN_DEBUG "%02X:%02X:%02X:%02X:%02X:%02X \n",
+ lp->spy_address[i][0],
+ lp->spy_address[i][1],
+ lp->spy_address[i][2],
+ lp->spy_address[i][3],
+ lp->spy_address[i][4],
+ lp->spy_address[i][5]);
+#endif /* DEBUG_IOCTL_INFO */
+ }
+
+ break;
+
+ case SIOCGIWSPY:
+ /* Get the spy list and spy stats */
+
+ /* Set the number of addresses */
+ wrq->u.data.length = lp->spy_number;
+
+ /* If the user want to have the addresses back... */
+ if((lp->spy_number > 0) && (wrq->u.data.pointer != (caddr_t) 0))
+ {
+ struct sockaddr address[IW_MAX_SPY];
+ int i;
+
+ /* Verify the user buffer */
+ ret = verify_area(VERIFY_WRITE, wrq->u.data.pointer,
+ (sizeof(iw_qual) + sizeof(struct sockaddr))
+ * IW_MAX_SPY);
+ if(ret)
+ break;
+
+ /* Copy addresses from the lp structure */
+ for(i = 0; i < lp->spy_number; i++)
+ {
+ memcpy(address[i].sa_data, lp->spy_address[i],
+ WAVELAN_ADDR_SIZE);
+ address[i].sa_family = AF_UNIX;
+ }
+
+ /* Copy addresses to the user buffer */
+ copy_to_user(wrq->u.data.pointer, address,
+ sizeof(struct sockaddr) * lp->spy_number);
+
+ /* Copy stats to the user buffer (just after) */
+ copy_to_user(wrq->u.data.pointer +
+ (sizeof(struct sockaddr) * lp->spy_number),
+ lp->spy_stat, sizeof(iw_qual) * lp->spy_number);
+
+ /* Reset updated flags */
+ for(i = 0; i < lp->spy_number; i++)
+ lp->spy_stat[i].updated = 0x0;
+ } /* if(pointer != NULL) */
+
+ break;
+#endif /* WIRELESS_SPY */
+
+ /* ------------------ PRIVATE IOCTL ------------------ */
+
+ case SIOCSIPQTHR:
+ if(!suser())
+ return -EPERM;
+ psa.psa_quality_thr = *(wrq->u.name) & 0x0F;
+ psa_write(ioaddr, lp->hacr, (char *)&psa.psa_quality_thr - (char *)&psa,
+ (unsigned char *)&psa.psa_quality_thr, 1);
+ mmc_out(ioaddr, mmwoff(0, mmw_quality_thr), psa.psa_quality_thr);
+ break;
+
+ case SIOCGIPQTHR:
+ psa_read(ioaddr, lp->hacr, (char *)&psa.psa_quality_thr - (char *)&psa,
+ (unsigned char *)&psa.psa_quality_thr, 1);
+ *(wrq->u.name) = psa.psa_quality_thr & 0x0F;
+ break;
+
+#ifdef HISTOGRAM
+ case SIOCSIPHISTO:
+ /* Verif if the user is root */
+ if(!suser())
+ return -EPERM;
+
+ /* Check the number of intervals */
+ if(wrq->u.data.length > 16)
+ {
+ ret = -E2BIG;
+ break;
+ }
+ lp->his_number = wrq->u.data.length;
+
+ /* If there is some addresses to copy */
+ if(lp->his_number > 0)
+ {
+ /* Verify where the user has set his addresses */
+ ret = verify_area(VERIFY_READ, wrq->u.data.pointer,
+ sizeof(char) * lp->his_number);
+ if(ret)
+ break;
+ /* Copy interval ranges to the driver */
+ copy_from_user(lp->his_range, wrq->u.data.pointer,
+ sizeof(char) * lp->his_number);
+
+ /* Reset structure... */
+ memset(lp->his_sum, 0x00, sizeof(long) * 16);
+ }
+ break;
+
+ case SIOCGIPHISTO:
+ /* Set the number of intervals */
+ wrq->u.data.length = lp->his_number;
+
+ /* Give back the distribution statistics */
+ if((lp->his_number > 0) && (wrq->u.data.pointer != (caddr_t) 0))
+ {
+ /* Verify the user buffer */
+ ret = verify_area(VERIFY_WRITE, wrq->u.data.pointer,
+ sizeof(long) * 16);
+ if(ret)
+ break;
+
+ /* Copy data to the user buffer */
+ copy_to_user(wrq->u.data.pointer, lp->his_sum,
+ sizeof(long) * lp->his_number);
+ } /* if(pointer != NULL) */
+ break;
+#endif /* HISTOGRAM */
+
+ /* ------------------- OTHER IOCTL ------------------- */
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ /* Enable interrupts, restore flags */
+ wv_splx(x);
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_ioctl()\n", dev->name);
+#endif
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Get wireless statistics
+ * Called by /proc/net/wireless
+ */
+static iw_stats *
+wavelan_get_wireless_stats(device * dev)
+{
+ u_long ioaddr = dev->base_addr;
+ net_local * lp = (net_local *) dev->priv;
+ mmr_t m;
+ iw_stats * wstats;
+ unsigned long x;
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_get_wireless_stats()\n", dev->name);
+#endif
+
+ /* Disable interrupts & save flags */
+ x = wv_splhi();
+
+ if(lp == (net_local *) NULL)
+ return (iw_stats *) NULL;
+ wstats = &lp->wstats;
+
+ /* Get data from the mmc */
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
+
+ mmc_read(ioaddr, mmroff(0, mmr_dce_status), &m.mmr_dce_status, 1);
+ mmc_read(ioaddr, mmroff(0, mmr_wrong_nwid_l), &m.mmr_wrong_nwid_l, 2);
+ mmc_read(ioaddr, mmroff(0, mmr_thr_pre_set), &m.mmr_thr_pre_set, 4);
+
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
+
+ /* Copy data to wireless stuff */
+ wstats->status = m.mmr_dce_status;
+ wstats->qual.qual = m.mmr_sgnl_qual & MMR_SGNL_QUAL;
+ wstats->qual.level = m.mmr_signal_lvl & MMR_SIGNAL_LVL;
+ wstats->qual.noise = m.mmr_silence_lvl & MMR_SILENCE_LVL;
+ wstats->qual.updated = (((m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 7) |
+ ((m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 6) |
+ ((m.mmr_silence_lvl & MMR_SILENCE_LVL_VALID) >> 5));
+ wstats->discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
+ wstats->discard.code = 0L;
+ wstats->discard.misc = 0L;
+
+ /* Enable interrupts & restore flags */
+ wv_splx(x);
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_get_wireless_stats()\n", dev->name);
+#endif
+ return &lp->wstats;
+}
+#endif /* WIRELESS_EXT */
+
+/************************* PACKET RECEPTION *************************/
+/*
+ * This part deals with receiving the packets.
+ * The interrupt handler gets an interrupt when a packet has been
+ * successfully received and calls this part.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does the actual copying of data (including the Ethernet
+ * header structure) from the WaveLAN card to an sk_buff chain that
+ * will be passed up to the network interface layer. NOTE: we
+ * currently don't handle trailer protocols (neither does the rest of
+ * the network interface), so if that is needed, it will (at least in
+ * part) be added here. The contents of the receive ring buffer are
+ * copied to a message chain that is then passed to the kernel.
+ *
+ * Note: if any errors occur, the packet is "dropped on the floor"
+ * (called by wv_packet_rcv())
+ */
+static inline void
+wv_packet_read(device * dev,
+ u_short buf_off,
+ int sksize)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ struct sk_buff * skb;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_packet_read(0x%X, %d)\n",
+ dev->name, fd_p, sksize);
+#endif
+
+ /* Allocate buffer for the data */
+ if((skb = dev_alloc_skb(sksize)) == (struct sk_buff *) NULL)
+ {
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC).\n",
+ dev->name, sksize);
+#endif
+ lp->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = dev;
+
+ /* Copy the packet to the buffer */
+ obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize);
+ skb->protocol=eth_type_trans(skb, dev);
+
+#ifdef DEBUG_RX_INFO
+ wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read");
+#endif /* DEBUG_RX_INFO */
+
+ /* Statistics gathering & stuff associated.
+ * It seem a bit messy with all the define, but it's really simple... */
+#if defined(WIRELESS_SPY) || defined(HISTOGRAM)
+ if(
+#ifdef WIRELESS_SPY
+ (lp->spy_number > 0) ||
+#endif /* WIRELESS_SPY */
+#ifdef HISTOGRAM
+ (lp->his_number > 0) ||
+#endif /* HISTOGRAM */
+ 0)
+ {
+ u_char stats[3]; /* signal level, noise level, signal quality */
+
+ /* read signal level, silence level and signal quality bytes */
+ /* Note: in the PCMCIA hardware, these are part of the frame. It seems
+ * that for the ISA hardware, it's nowhere to be found in the frame,
+ * so I'm obliged to do this (it has a side effect on /proc/net/wireless).
+ * Any ideas? */
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
+ mmc_read(ioaddr, mmroff(0, mmr_signal_lvl), stats, 3);
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
+
+#ifdef DEBUG_RX_INFO
+ printk(KERN_DEBUG "%s: wv_packet_read(): Signal level %d/63, Silence level %d/63, signal quality %d/16\n",
+ dev->name, stats[0] & 0x3F, stats[1] & 0x3F, stats[2] & 0x0F);
+#endif
+
+ /* Spying stuff */
+#ifdef WIRELESS_SPY
+ wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, stats);
+#endif /* WIRELESS_SPY */
+#ifdef HISTOGRAM
+ wl_his_gather(dev, stats);
+#endif /* HISTOGRAM */
+ }
+#endif /* defined(WIRELESS_SPY) || defined(HISTOGRAM) */
+
+ /*
+ * Hand the packet to the Network Module
+ */
+ netif_rx(skb);
+
+ lp->stats.rx_packets++;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Transfer as many packets as we can
+ * from the device RAM.
+ * Called by the interrupt handler.
+ */
+static inline void
+wv_receive(device * dev)
+{
+ u_long ioaddr = dev->base_addr;
+ net_local * lp = (net_local *)dev->priv;
+ int nreaped = 0;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_receive()\n", dev->name);
+#endif
+
+ /* Loop on each received packet */
+ for(;;)
+ {
+ fd_t fd;
+ rbd_t rbd;
+ ushort pkt_len;
+
+ obram_read(ioaddr, lp->rx_head, (unsigned char *) &fd, sizeof(fd));
+
+ /* If the current frame is not complete, we have reach the end... */
+ if((fd.fd_status & FD_STATUS_C) != FD_STATUS_C)
+ break; /* This is how we exit the loop */
+
+ nreaped++;
+
+ /* Check if frame correctly received */
+ if((fd.fd_status & (FD_STATUS_B | FD_STATUS_OK)) !=
+ (FD_STATUS_B | FD_STATUS_OK))
+ {
+ /*
+ * Not sure about this one -- it does not seem
+ * to be an error so we will keep quiet about it.
+ */
+#ifndef IGNORE_NORMAL_XMIT_ERRS
+#ifdef DEBUG_RX_ERROR
+ if((fd.fd_status & FD_STATUS_B) != FD_STATUS_B)
+ printk(KERN_INFO "%s: wv_receive(): frame not consumed by RU.\n",
+ dev->name);
+#endif
+#endif /* IGNORE_NORMAL_XMIT_ERRS */
+
+#ifdef DEBUG_RX_ERROR
+ if((fd.fd_status & FD_STATUS_OK) != FD_STATUS_OK)
+ printk(KERN_INFO "%s: wv_receive(): frame not received successfully.\n",
+ dev->name);
+#endif
+ }
+
+ /* Were there problems in processing the frame? Let's check. */
+ if((fd.fd_status & (FD_STATUS_S6 | FD_STATUS_S7 | FD_STATUS_S8 |
+ FD_STATUS_S9 | FD_STATUS_S10 | FD_STATUS_S11))
+ != 0)
+ {
+ lp->stats.rx_errors++;
+
+#ifdef DEBUG_RX_ERROR
+ if((fd.fd_status & FD_STATUS_S6) != 0)
+ printk(KERN_INFO "%s: wv_receive(): no EOF flag.\n", dev->name);
+#endif
+
+ if((fd.fd_status & FD_STATUS_S7) != 0)
+ {
+ lp->stats.rx_length_errors++;
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): frame too short.\n",
+ dev->name);
+#endif
+ }
+
+ if((fd.fd_status & FD_STATUS_S8) != 0)
+ {
+ lp->stats.rx_over_errors++;
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): rx DMA overrun.\n",
+ dev->name);
+#endif
+ }
+
+ if((fd.fd_status & FD_STATUS_S9) != 0)
+ {
+ lp->stats.rx_fifo_errors++;
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): ran out of resources.\n",
+ dev->name);
+#endif
+ }
+
+ if((fd.fd_status & FD_STATUS_S10) != 0)
+ {
+ lp->stats.rx_frame_errors++;
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): alignment error.\n",
+ dev->name);
+#endif
+ }
+
+ if((fd.fd_status & FD_STATUS_S11) != 0)
+ {
+ lp->stats.rx_crc_errors++;
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): CRC error.\n", dev->name);
+#endif
+ }
+ }
+
+ /* Does the frame contain a pointer to the data? Let's check. */
+ if(fd.fd_rbd_offset == I82586NULL)
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): frame has no data.\n", dev->name);
+#endif
+ else
+ {
+ obram_read(ioaddr, fd.fd_rbd_offset,
+ (unsigned char *) &rbd, sizeof(rbd));
+
+#ifdef DEBUG_RX_ERROR
+ if((rbd.rbd_status & RBD_STATUS_EOF) != RBD_STATUS_EOF)
+ printk(KERN_INFO "%s: wv_receive(): missing EOF flag.\n",
+ dev->name);
+
+ if((rbd.rbd_status & RBD_STATUS_F) != RBD_STATUS_F)
+ printk(KERN_INFO "%s: wv_receive(): missing F flag.\n",
+ dev->name);
+#endif
+
+ pkt_len = rbd.rbd_status & RBD_STATUS_ACNT;
+
+ /* Read the packet and transmit to Linux */
+ wv_packet_read(dev, rbd.rbd_bufl, pkt_len);
+ } /* if frame has data */
+
+ fd.fd_status = 0;
+ obram_write(ioaddr, fdoff(lp->rx_head, fd_status),
+ (unsigned char *) &fd.fd_status, sizeof(fd.fd_status));
+
+ fd.fd_command = FD_COMMAND_EL;
+ obram_write(ioaddr, fdoff(lp->rx_head, fd_command),
+ (unsigned char *) &fd.fd_command, sizeof(fd.fd_command));
+
+ fd.fd_command = 0;
+ obram_write(ioaddr, fdoff(lp->rx_last, fd_command),
+ (unsigned char *) &fd.fd_command, sizeof(fd.fd_command));
+
+ lp->rx_last = lp->rx_head;
+ lp->rx_head = fd.fd_link_offset;
+ } /* for(;;) -> loop on all frames */
+
+#ifdef DEBUG_RX_INFO
+ if(nreaped > 1)
+ printk(KERN_DEBUG "%s: wv_receive(): reaped %d\n", dev->name, nreaped);
+#endif
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_receive()\n", dev->name);
+#endif
+}
+
+/*********************** PACKET TRANSMISSION ***********************/
+/*
+ * This part deals with sending packet through the WaveLAN
+ *
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine fills in the appropriate registers and memory
+ * locations on the WaveLAN card and starts the card off on
+ * the transmit.
+ *
+ * The principle :
+ * Each block contain a transmit command, a nop command,
+ * a transmit block descriptor and a buffer.
+ * The CU read the transmit block which point to the tbd,
+ * read the tbd and the content of the buffer.
+ * When it has finished with it, it goes to the next command
+ * which in our case is the nop. The nop point on itself,
+ * so the CU stop here.
+ * When we add the next block, we modify the previous nop
+ * to make it point on the new tx command.
+ * Simple, isn't it ?
+ *
+ * (called in wavelan_packet_xmit())
+ */
+static inline void
+wv_packet_write(device * dev,
+ void * buf,
+ short length)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ unsigned short txblock;
+ unsigned short txpred;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short buf_addr;
+ ac_tx_t tx;
+ ac_nop_t nop;
+ tbd_t tbd;
+ int clen = length;
+ unsigned long x;
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_packet_write(%d)\n", dev->name, length);
+#endif
+
+ /* Check if we need some padding */
+ if(clen < ETH_ZLEN)
+ clen = ETH_ZLEN;
+
+ x = wv_splhi();
+
+ /* Calculate addresses of next block and previous block */
+ txblock = lp->tx_first_free;
+ txpred = txblock - TXBLOCKZ;
+ if(txpred < OFFSET_CU)
+ txpred += NTXBLOCKS * TXBLOCKZ;
+ lp->tx_first_free += TXBLOCKZ;
+ if(lp->tx_first_free >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ lp->tx_first_free -= NTXBLOCKS * TXBLOCKZ;
+
+/*
+if (lp->tx_n_in_use > 0)
+ printk("%c", "0123456789abcdefghijk"[lp->tx_n_in_use]);
+*/
+
+ lp->tx_n_in_use++;
+
+ /* Calculate addresses of the differents part of the block */
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ buf_addr = tbd_addr + sizeof(tbd);
+
+ /*
+ * Transmit command.
+ */
+ tx.tx_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_tx_t, tx_addr, tx_h.ac_status),
+ (unsigned char *) &tx.tx_h.ac_status,
+ sizeof(tx.tx_h.ac_status));
+
+ /*
+ * NOP command.
+ */
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *) &nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /*
+ * Transmit buffer descriptor
+ */
+ tbd.tbd_status = TBD_STATUS_EOF | (TBD_STATUS_ACNT & clen);
+ tbd.tbd_next_bd_offset = I82586NULL;
+ tbd.tbd_bufl = buf_addr;
+ tbd.tbd_bufh = 0;
+ obram_write(ioaddr, tbd_addr, (unsigned char *)&tbd, sizeof(tbd));
+
+ /*
+ * Data
+ */
+ obram_write(ioaddr, buf_addr, buf, clen);
+
+ /*
+ * Overwrite the predecessor NOP link
+ * so that it points to this txblock.
+ */
+ nop_addr = txpred + sizeof(tx);
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *)&nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = txblock;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /* If watchdog not already active, activate it... */
+ if(lp->watchdog.prev == (timer_list *) NULL)
+ {
+ /* set timer to expire in WATCHDOG_JIFFIES */
+ lp->watchdog.expires = jiffies + WATCHDOG_JIFFIES;
+ add_timer(&lp->watchdog);
+ }
+
+ if(lp->tx_first_in_use == I82586NULL)
+ lp->tx_first_in_use = txblock;
+
+ if(lp->tx_n_in_use < NTXBLOCKS - 1)
+ dev->tbusy = 0;
+
+ wv_splx(x);
+
+#ifdef DEBUG_TX_INFO
+ wv_packet_info((u_char *) buf, length, dev->name, "wv_packet_write");
+#endif /* DEBUG_TX_INFO */
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_packet_write()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine is called when we want to send a packet (NET3 callback)
+ * In this routine, we check if the hardware is ready to accept
+ * the packet. We also prevent reentrance. Then, we call the function
+ * to send the packet...
+ */
+static int
+wavelan_packet_xmit(struct sk_buff * skb,
+ device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_packet_xmit(0x%X)\n", dev->name,
+ (unsigned) skb);
+#endif
+
+ /* This flag indicate that the hardware can't perform a transmission.
+ * Theoretically, NET3 checks it before sending a packet to the driver,
+ * but in fact it never does that and pool continuously.
+ * As the watchdog will abort overly long transmissions, we are quite safe.
+ */
+ if(dev->tbusy)
+ return 1;
+
+ /*
+ * If some higher layer thinks we've missed
+ * a tx-done interrupt we are passed NULL.
+ * Caution: dev_tint() handles the cli()/sti() itself.
+ */
+ if(skb == (struct sk_buff *)0)
+ {
+#ifdef DEBUG_TX_ERROR
+ printk(KERN_INFO "%s: wavelan_packet_xmit(): skb == NULL\n", dev->name);
+#endif
+ dev_tint(dev);
+ return 0;
+ }
+
+ /*
+ * Block a timer-based transmit from overlapping.
+ * In other words, prevent reentering this routine.
+ */
+ if(set_bit(0, (void *)&dev->tbusy) != 0)
+#ifdef DEBUG_TX_ERROR
+ printk(KERN_INFO "%s: Transmitter access conflict.\n", dev->name);
+#endif
+ else
+ {
+ /* If somebody has asked to reconfigure the controller,
+ * we can do it now.
+ */
+ if(lp->reconfig_82586)
+ {
+ wv_82586_config(dev);
+ if(dev->tbusy)
+ return 1;
+ }
+
+#ifdef DEBUG_TX_ERROR
+ if(skb->next)
+ printk(KERN_INFO "skb has next\n");
+#endif
+
+ wv_packet_write(dev, skb->data, skb->len);
+ }
+
+ dev_kfree_skb(skb, FREE_WRITE);
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_packet_xmit()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*********************** HARDWARE CONFIGURATION ***********************/
+/*
+ * This part does the real job of starting and configuring the hardware.
+ */
+
+/*--------------------------------------------------------------------*/
+/*
+ * Routine to initialize the Modem Management Controller.
+ * (called by wv_hw_reset())
+ */
+static inline int
+wv_mmc_init(device * dev)
+{
+ u_long ioaddr = dev->base_addr;
+ net_local * lp = (net_local *)dev->priv;
+ psa_t psa;
+ mmw_t m;
+ int configured;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_mmc_init()\n", dev->name);
+#endif
+
+ /* Read the parameter storage area */
+ psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa));
+
+#ifdef USE_PSA_CONFIG
+ configured = psa.psa_conf_status & 1;
+#else
+ configured = 0;
+#endif
+
+ /* Is the PSA is not configured */
+ if(!configured)
+ {
+ /* User will be able to configure NWID after (with iwconfig) */
+ psa.psa_nwid[0] = 0;
+ psa.psa_nwid[1] = 0;
+
+ /* no NWID checking since NWID is not set */
+ psa.psa_nwid_select = 0;
+
+ /* Disable encryption */
+ psa.psa_encryption_select = 0;
+
+ /* Set to standard values
+ * 0x04 for AT,
+ * 0x01 for MCA,
+ * 0x04 for PCMCIA and 2.00 card (AT&T 407-024689/E document)
+ */
+ if (psa.psa_comp_number & 1)
+ psa.psa_thr_pre_set = 0x01;
+ else
+ psa.psa_thr_pre_set = 0x04;
+ psa.psa_quality_thr = 0x03;
+
+ /* It is configured */
+ psa.psa_conf_status |= 1;
+
+#ifdef USE_PSA_CONFIG
+ /* Write the psa */
+ psa_write(ioaddr, lp->hacr, (char *)psa.psa_nwid - (char *)&psa,
+ (unsigned char *)psa.psa_nwid, 4);
+ psa_write(ioaddr, lp->hacr, (char *)&psa.psa_thr_pre_set - (char *)&psa,
+ (unsigned char *)&psa.psa_thr_pre_set, 1);
+ psa_write(ioaddr, lp->hacr, (char *)&psa.psa_quality_thr - (char *)&psa,
+ (unsigned char *)&psa.psa_quality_thr, 1);
+ psa_write(ioaddr, lp->hacr, (char *)&psa.psa_conf_status - (char *)&psa,
+ (unsigned char *)&psa.psa_conf_status, 1);
+#endif
+ }
+
+ /* Zero the mmc structure */
+ memset(&m, 0x00, sizeof(m));
+
+ /* Copy PSA info to the mmc */
+ m.mmw_netw_id_l = psa.psa_nwid[1];
+ m.mmw_netw_id_h = psa.psa_nwid[0];
+
+ if(psa.psa_nwid_select & 1)
+ m.mmw_loopt_sel = 0x00;
+ else
+ m.mmw_loopt_sel = MMW_LOOPT_SEL_DIS_NWID;
+
+ memcpy(&m.mmw_encr_key, &psa.psa_encryption_key,
+ sizeof(m.mmw_encr_key));
+
+ if(psa.psa_encryption_select)
+ m.mmw_encr_enable = MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE;
+ else
+ m.mmw_encr_enable = 0;
+
+ m.mmw_thr_pre_set = psa.psa_thr_pre_set & 0x3F;
+ m.mmw_quality_thr = psa.psa_quality_thr & 0x0F;
+
+ /* Missing: encryption stuff... */
+
+ /*
+ * Set default modem control parameters.
+ * See NCR document 407-0024326 Rev. A.
+ */
+ m.mmw_jabber_enable = 0x01;
+ m.mmw_anten_sel = MMW_ANTEN_SEL_ALG_EN;
+ m.mmw_ifs = 0x20;
+ m.mmw_mod_delay = 0x04;
+ m.mmw_jam_time = 0x38;
+
+ m.mmw_encr_enable = 0;
+ m.mmw_des_io_invert = 0;
+ m.mmw_freeze = 0;
+ m.mmw_decay_prm = 0;
+ m.mmw_decay_updat_prm = 0;
+
+ /* Write all info to MMC */
+ mmc_write(ioaddr, 0, (u_char *)&m, sizeof(m));
+
+ /* The following code starts the modem of the 2.00 frequency
+ * selectable cards at power on. It's not strictly needed for the
+ * following boots.
+ * The original patch was by Joe Finney for the PCMCIA driver, but
+ * I've cleaned it up a bit and added documentation.
+ * Thanks to Loeke Brederveld from Lucent for the info.
+ */
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable)
+ * (does it work for everybody? -- especially old cards?) */
+ /* Note: WFREQSEL verifies that it is able to read a sensible
+ * frequency from EEPROM (address 0x00) and that MMR_FEE_STATUS_ID
+ * is 0xA (Xilinx version) or 0xB (Ariadne version).
+ * My test is more crude but does work. */
+ if(!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ {
+ /* We must download the frequency parameters to the
+ * synthesizers (from the EEPROM - area 1)
+ * Note : as the EEPROM is auto decremented, we set the end
+ * if the area... */
+ m.mmw_fee_addr = 0x0F;
+ m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
+ mmc_write(ioaddr, (char *)&m.mmw_fee_ctrl - (char *)&m,
+ (unsigned char *)&m.mmw_fee_ctrl, 2);
+
+ /* Wait until the download is finished */
+ fee_wait(ioaddr, 100, 100);
+
+#ifdef DEBUG_CONFIG_INFO
+ /* The frequency was in the last word downloaded. */
+ mmc_read(ioaddr, (char *)&m.mmw_fee_data_l - (char *)&m,
+ (unsigned char *)&m.mmw_fee_data_l, 2);
+
+ /* Print some info for the user. */
+ printk(KERN_DEBUG "%s: WaveLAN 2.00 recognised (frequency select) : Current frequency = %ld\n",
+ dev->name,
+ ((m.mmw_fee_data_h << 4) |
+ (m.mmw_fee_data_l >> 4)) * 5 / 2 + 24000L);
+#endif
+
+ /* We must now download the power adjust value (gain) to
+ * the synthesizers (from the EEPROM - area 7 - DAC) */
+ m.mmw_fee_addr = 0x61;
+ m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
+ mmc_write(ioaddr, (char *)&m.mmw_fee_ctrl - (char *)&m,
+ (unsigned char *)&m.mmw_fee_ctrl, 2);
+
+ /* Wait until the download is finished */
+ } /* if 2.00 card */
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_mmc_init()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Construct the fd and rbd structures.
+ * Start the receive unit.
+ * (called by wv_hw_reset())
+ */
+static inline int
+wv_ru_start(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_short scb_cs;
+ fd_t fd;
+ rbd_t rbd;
+ u_short rx;
+ u_short rx_next;
+ int i;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_ru_start()\n", dev->name);
+#endif
+
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status), (unsigned char *)&scb_cs, sizeof(scb_cs));
+ if((scb_cs & SCB_ST_RUS) == SCB_ST_RUS_RDY)
+ return 0;
+
+ lp->rx_head = OFFSET_RU;
+
+ for(i = 0, rx = lp->rx_head; i < NRXBLOCKS; i++, rx = rx_next)
+ {
+ rx_next = (i == NRXBLOCKS - 1) ? lp->rx_head : rx + RXBLOCKZ;
+
+ fd.fd_status = 0;
+ fd.fd_command = (i == NRXBLOCKS - 1) ? FD_COMMAND_EL : 0;
+ fd.fd_link_offset = rx_next;
+ fd.fd_rbd_offset = rx + sizeof(fd);
+ obram_write(ioaddr, rx, (unsigned char *)&fd, sizeof(fd));
+
+ rbd.rbd_status = 0;
+ rbd.rbd_next_rbd_offset = I82586NULL;
+ rbd.rbd_bufl = rx + sizeof(fd) + sizeof(rbd);
+ rbd.rbd_bufh = 0;
+ rbd.rbd_el_size = RBD_EL | (RBD_SIZE & MAXDATAZ);
+ obram_write(ioaddr, rx + sizeof(fd),
+ (unsigned char *) &rbd, sizeof(rbd));
+
+ lp->rx_last = rx;
+ }
+
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_rfa_offset),
+ (unsigned char *) &lp->rx_head, sizeof(lp->rx_head));
+
+ scb_cs = SCB_CMD_RUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for(i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ if (scb_cs == 0)
+ break;
+
+ udelay(10);
+ }
+
+ if(i <= 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wavelan_ru_start(): board not accepting command.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_ru_start()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Initialise the transmit blocks.
+ * Start the command unit executing the NOP
+ * self-loop of the first transmit block.
+ *
+ * Here, we create the list of send buffers used to transmit packets
+ * between the PC and the command unit. For each buffer, we create a
+ * buffer descriptor (pointing on the buffer), a transmit command
+ * (pointing to the buffer descriptor) and a NOP command.
+ * The transmit command is linked to the NOP, and the NOP to itself.
+ * When we will have finished executing the transmit command, we will
+ * then loop on the NOP. By releasing the NOP link to a new command,
+ * we may send another buffer.
+ *
+ * (called by wv_hw_reset())
+ */
+static inline int
+wv_cu_start(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ int i;
+ u_short txblock;
+ u_short first_nop;
+ u_short scb_cs;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_cu_start()\n", dev->name);
+#endif
+
+ lp->tx_first_free = OFFSET_CU;
+ lp->tx_first_in_use = I82586NULL;
+
+ for(i = 0, txblock = OFFSET_CU;
+ i < NTXBLOCKS;
+ i++, txblock += TXBLOCKZ)
+ {
+ ac_tx_t tx;
+ ac_nop_t nop;
+ tbd_t tbd;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short buf_addr;
+
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ buf_addr = tbd_addr + sizeof(tbd);
+
+ tx.tx_h.ac_status = 0;
+ tx.tx_h.ac_command = acmd_transmit | AC_CFLD_I;
+ tx.tx_h.ac_link = nop_addr;
+ tx.tx_tbd_offset = tbd_addr;
+ obram_write(ioaddr, tx_addr, (unsigned char *) &tx, sizeof(tx));
+
+ nop.nop_h.ac_status = 0;
+ nop.nop_h.ac_command = acmd_nop;
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, nop_addr, (unsigned char *) &nop, sizeof(nop));
+
+ tbd.tbd_status = TBD_STATUS_EOF;
+ tbd.tbd_next_bd_offset = I82586NULL;
+ tbd.tbd_bufl = buf_addr;
+ tbd.tbd_bufh = 0;
+ obram_write(ioaddr, tbd_addr, (unsigned char *) &tbd, sizeof(tbd));
+ }
+
+ first_nop = OFFSET_CU + (NTXBLOCKS - 1) * TXBLOCKZ + sizeof(ac_tx_t);
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_cbl_offset),
+ (unsigned char *) &first_nop, sizeof(first_nop));
+
+ scb_cs = SCB_CMD_CUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for(i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ if (scb_cs == 0)
+ break;
+
+ udelay(10);
+ }
+
+ if(i <= 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wavelan_cu_start(): board not accepting command.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+
+ lp->tx_n_in_use = 0;
+ dev->tbusy = 0;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_cu_start()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does a standard config of the WaveLAN controler (i82586).
+ *
+ * It initialises the scp, iscp and scb structure
+ * The first two are just pointers to the next.
+ * The last one is used for basic configuration and for basic
+ * communication (interrupt status).
+ *
+ * (called by wv_hw_reset())
+ */
+static inline int
+wv_82586_start(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ scp_t scp; /* system configuration pointer */
+ iscp_t iscp; /* intermediate scp */
+ scb_t scb; /* system control block */
+ ach_t cb; /* Action command header */
+ u_char zeroes[512];
+ int i;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_82586_start()\n", dev->name);
+#endif
+
+ /*
+ * Clear the onboard RAM.
+ */
+ memset(&zeroes[0], 0x00, sizeof(zeroes));
+ for(i = 0; i < I82586_MEMZ; i += sizeof(zeroes))
+ obram_write(ioaddr, i, &zeroes[0], sizeof(zeroes));
+
+ /*
+ * Construct the command unit structures:
+ * scp, iscp, scb, cb.
+ */
+ memset(&scp, 0x00, sizeof(scp));
+ scp.scp_sysbus = SCP_SY_16BBUS;
+ scp.scp_iscpl = OFFSET_ISCP;
+ obram_write(ioaddr, OFFSET_SCP, (unsigned char *)&scp, sizeof(scp));
+
+ memset(&iscp, 0x00, sizeof(iscp));
+ iscp.iscp_busy = 1;
+ iscp.iscp_offset = OFFSET_SCB;
+ obram_write(ioaddr, OFFSET_ISCP, (unsigned char *)&iscp, sizeof(iscp));
+
+ /* Our first command is to reset the i82586. */
+ memset(&scb, 0x00, sizeof(scb));
+ scb.scb_command = SCB_CMD_RESET;
+ scb.scb_cbl_offset = OFFSET_CU;
+ scb.scb_rfa_offset = OFFSET_RU;
+ obram_write(ioaddr, OFFSET_SCB, (unsigned char *)&scb, sizeof(scb));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ /* Wait for command to finish. */
+ for(i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, OFFSET_ISCP, (unsigned char *) &iscp, sizeof(iscp));
+
+ if(iscp.iscp_busy == (unsigned short) 0)
+ break;
+
+ udelay(10);
+ }
+
+ if(i <= 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wv_82586_start(): iscp_busy timeout.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+
+ /* Check command completion */
+ for(i = 15; i > 0; i--)
+ {
+ obram_read(ioaddr, OFFSET_SCB, (unsigned char *) &scb, sizeof(scb));
+
+ if (scb.scb_status == (SCB_ST_CX | SCB_ST_CNA))
+ break;
+
+ udelay(10);
+ }
+
+ if (i <= 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wv_82586_start(): status: expected 0x%02x, got 0x%02x.\n",
+ dev->name, SCB_ST_CX | SCB_ST_CNA, scb.scb_status);
+#endif
+ return -1;
+ }
+
+ wv_ack(dev);
+
+ /* Set the action command header. */
+ memset(&cb, 0x00, sizeof(cb));
+ cb.ac_command = AC_CFLD_EL | (AC_CFLD_CMD & acmd_diagnose);
+ cb.ac_link = OFFSET_CU;
+ obram_write(ioaddr, OFFSET_CU, (unsigned char *)&cb, sizeof(cb));
+
+ if(wv_synchronous_cmd(dev, "diag()") == -1)
+ return -1;
+
+ obram_read(ioaddr, OFFSET_CU, (unsigned char *)&cb, sizeof(cb));
+ if(cb.ac_status & AC_SFLD_FAIL)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wv_82586_start(): i82586 Self Test failed.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+
+#ifdef DEBUG_I82586_SHOW
+ wv_scb_show(ioaddr);
+#endif
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_82586_start()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does a standard configuration of the WaveLAN controller
+ * (i82586).
+ *
+ * This routine is a violent hack. We use the first free transmit block
+ * to make our configuration. In the buffer area, we create the three
+ * configuration commands (linked). We make the previous NOP point to
+ * the beginning of the buffer instead of the tx command. After, we go
+ * as usual to the NOP command.
+ * Note that only the last command (mc_set) will generate an interrupt.
+ *
+ * (called by wv_hw_reset(), wv_82586_reconfig())
+ */
+static void
+wv_82586_config(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ unsigned short txblock;
+ unsigned short txpred;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short cfg_addr;
+ unsigned short ias_addr;
+ unsigned short mcs_addr;
+ ac_tx_t tx;
+ ac_nop_t nop;
+ ac_cfg_t cfg; /* Configure action */
+ ac_ias_t ias; /* IA-setup action */
+ ac_mcs_t mcs; /* Multicast setup */
+ struct dev_mc_list * dmi;
+ unsigned long x;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_82586_config()\n", dev->name);
+#endif
+
+ x = wv_splhi();
+
+ /* Calculate addresses of next block and previous block */
+ txblock = lp->tx_first_free;
+ txpred = txblock - TXBLOCKZ;
+ if(txpred < OFFSET_CU)
+ txpred += NTXBLOCKS * TXBLOCKZ;
+ lp->tx_first_free += TXBLOCKZ;
+ if(lp->tx_first_free >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ lp->tx_first_free -= NTXBLOCKS * TXBLOCKZ;
+
+ lp->tx_n_in_use++;
+
+ /* Calculate addresses of the different parts of the block. */
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ cfg_addr = tbd_addr + sizeof(tbd_t); /* beginning of the buffer */
+ ias_addr = cfg_addr + sizeof(cfg);
+ mcs_addr = ias_addr + sizeof(ias);
+
+ /*
+ * Transmit command
+ */
+ tx.tx_h.ac_status = 0xFFFF; /* Fake completion value */
+ obram_write(ioaddr, toff(ac_tx_t, tx_addr, tx_h.ac_status),
+ (unsigned char *) &tx.tx_h.ac_status,
+ sizeof(tx.tx_h.ac_status));
+
+ /*
+ * NOP command
+ */
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *) &nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /* Create a configure action */
+ memset(&cfg, 0x00, sizeof(cfg));
+
+#if 0
+ /*
+ * The default board configuration
+ */
+ cfg.fifolim_bytecnt = 0x080c;
+ cfg.addrlen_mode = 0x2600;
+ cfg.linprio_interframe = 0x7820; /* IFS=120, ACS=2 */
+ cfg.slot_time = 0xf00c; /* slottime=12 */
+ cfg.hardware = 0x0008; /* tx even without CD */
+ cfg.min_frame_len = 0x0040;
+#endif /* 0 */
+
+ /*
+ * For Linux we invert AC_CFG_ALOC(..) so as to conform
+ * to the way that net packets reach us from above.
+ * (See also ac_tx_t.)
+ */
+ cfg.cfg_byte_cnt = AC_CFG_BYTE_CNT(sizeof(ac_cfg_t) - sizeof(ach_t));
+ cfg.cfg_fifolim = AC_CFG_FIFOLIM(8);
+ cfg.cfg_byte8 = AC_CFG_SAV_BF(0) |
+ AC_CFG_SRDY(0);
+ cfg.cfg_byte9 = AC_CFG_ELPBCK(0) |
+ AC_CFG_ILPBCK(0) |
+ AC_CFG_PRELEN(AC_CFG_PLEN_2) |
+ AC_CFG_ALOC(1) |
+ AC_CFG_ADDRLEN(WAVELAN_ADDR_SIZE);
+ cfg.cfg_byte10 = AC_CFG_BOFMET(0) |
+ AC_CFG_ACR(0) |
+ AC_CFG_LINPRIO(0);
+ cfg.cfg_ifs = 32;
+ cfg.cfg_slotl = 0;
+ cfg.cfg_byte13 = AC_CFG_RETRYNUM(15) |
+ AC_CFG_SLTTMHI(2);
+ cfg.cfg_byte14 = AC_CFG_FLGPAD(0) |
+ AC_CFG_BTSTF(0) |
+ AC_CFG_CRC16(0) |
+ AC_CFG_NCRC(0) |
+ AC_CFG_TNCRS(1) |
+ AC_CFG_MANCH(0) |
+ AC_CFG_BCDIS(0) |
+ AC_CFG_PRM(lp->promiscuous);
+ cfg.cfg_byte15 = AC_CFG_ICDS(0) |
+ AC_CFG_CDTF(0) |
+ AC_CFG_ICSS(0) |
+ AC_CFG_CSTF(0);
+/*
+ cfg.cfg_min_frm_len = AC_CFG_MNFRM(64);
+*/
+ cfg.cfg_min_frm_len = AC_CFG_MNFRM(8);
+
+ cfg.cfg_h.ac_command = (AC_CFLD_CMD & acmd_configure);
+ cfg.cfg_h.ac_link = ias_addr;
+ obram_write(ioaddr, cfg_addr, (unsigned char *)&cfg, sizeof(cfg));
+
+ /* Setup the MAC address */
+ memset(&ias, 0x00, sizeof(ias));
+ ias.ias_h.ac_command = (AC_CFLD_CMD & acmd_ia_setup);
+ ias.ias_h.ac_link = mcs_addr;
+ memcpy(&ias.ias_addr[0], (unsigned char *)&dev->dev_addr[0], sizeof(ias.ias_addr));
+ obram_write(ioaddr, ias_addr, (unsigned char *)&ias, sizeof(ias));
+
+ /* Initialize adapter's ethernet multicast addresses */
+ memset(&mcs, 0x00, sizeof(mcs));
+ mcs.mcs_h.ac_command = AC_CFLD_I | (AC_CFLD_CMD & acmd_mc_setup);
+ mcs.mcs_h.ac_link = nop_addr;
+ mcs.mcs_cnt = WAVELAN_ADDR_SIZE * lp->mc_count;
+ obram_write(ioaddr, mcs_addr, (unsigned char *)&mcs, sizeof(mcs));
+
+ /* If any address to set */
+ if(lp->mc_count)
+ {
+ for(dmi=dev->mc_list; dmi; dmi=dmi->next)
+ outsw(PIOP1(ioaddr), (u_short *) dmi->dmi_addr,
+ WAVELAN_ADDR_SIZE >> 1);
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wv_82586_config(): set %d multicast addresses:\n",
+ dev->name, lp->mc_count);
+ for(dmi=dev->mc_list; dmi; dmi=dmi->next)
+ printk(KERN_DEBUG " %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
+ dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5] );
+#endif
+ }
+
+ /*
+ * Overwrite the predecessor NOP link
+ * so that it points to the configure action.
+ */
+ nop_addr = txpred + sizeof(tx);
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *)&nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = cfg_addr;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /* If watchdog not already active, activate it... */
+ if(lp->watchdog.prev == (timer_list *) NULL)
+ {
+ /* set timer to expire in WATCHDOG_JIFFIES */
+ lp->watchdog.expires = jiffies + WATCHDOG_JIFFIES;
+ add_timer(&lp->watchdog);
+ }
+
+ lp->reconfig_82586 = 0;
+
+ if(lp->tx_first_in_use == I82586NULL)
+ lp->tx_first_in_use = txblock;
+
+ if(lp->tx_n_in_use < NTXBLOCKS - 1)
+ dev->tbusy = 0;
+
+ wv_splx(x);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_82586_config()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine, called by wavelan_close(), gracefully stops the
+ * WaveLAN controller (i82586).
+ */
+static inline void
+wv_82586_stop(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_short scb_cmd;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_82586_stop()\n", dev->name);
+#endif
+
+ /* Suspend both command unit and receive unit. */
+ scb_cmd = (SCB_CMD_CUC & SCB_CMD_CUC_SUS) | (SCB_CMD_RUC & SCB_CMD_RUC_SUS);
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *)&scb_cmd, sizeof(scb_cmd));
+ set_chan_attn(ioaddr, lp->hacr);
+
+ /* No more interrupts */
+ wv_ints_off(dev);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_82586_stop()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Totally reset the WaveLAN and restart it.
+ * Performs the following actions:
+ * 1. A power reset (reset DMA)
+ * 2. Initialize the radio modem (using wv_mmc_init)
+ * 3. Reset & Configure LAN controller (using wv_82586_start)
+ * 4. Start the LAN controller's command unit
+ * 5. Start the LAN controller's receive unit
+ */
+static int
+wv_hw_reset(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_hw_reset(dev=0x%x)\n", dev->name,
+ (unsigned int)dev);
+#endif
+
+ /* If watchdog was activated, kill it! */
+ if(lp->watchdog.prev != (timer_list *) NULL)
+ del_timer(&lp->watchdog);
+
+ /* Increase the number of resets done */
+ lp->nresets++;
+
+ wv_hacr_reset(ioaddr);
+ lp->hacr = HACR_DEFAULT;
+
+ if((wv_mmc_init(dev) < 0) ||
+ (wv_82586_start(dev) < 0))
+ return -1;
+
+ /* Enable the card to send interrupts */
+ wv_ints_on(dev);
+
+ /* Start card functions */
+ if((wv_ru_start(dev) < 0) ||
+ (wv_cu_start(dev) < 0))
+ return -1;
+
+ /* Finish configuration */
+ wv_82586_config(dev);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_hw_reset()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Check if there is a WaveLAN at the specific base address.
+ * As a side effect, this reads the MAC address.
+ * (called in wavelan_probe() and init_module())
+ */
+static int
+wv_check_ioaddr(u_long ioaddr,
+ u_char * mac)
+{
+ int i; /* Loop counter */
+
+ /* Check if the base address if available */
+ if(check_region(ioaddr, sizeof(ha_t)))
+ return EADDRINUSE; /* ioaddr already used... */
+
+ /* Reset host interface */
+ wv_hacr_reset(ioaddr);
+
+ /* Read the MAC address from the parameter storage area */
+ psa_read(ioaddr, HACR_DEFAULT, psaoff(0, psa_univ_mac_addr),
+ mac, 6);
+
+ /*
+ * Check the first three octets of the address for the manufacturer's code.
+ * Note: If this can't find your WaveLAN card, you've got a
+ * non-NCR/AT&T/Lucent ISA card. See wavelan.p.h for details on
+ * how to configure your card.
+ */
+ for(i = 0; i < (sizeof(MAC_ADDRESSES) / sizeof(char) / 3); i++)
+ if((mac[0] == MAC_ADDRESSES[i][0]) &&
+ (mac[1] == MAC_ADDRESSES[i][1]) &&
+ (mac[2] == MAC_ADDRESSES[i][2]))
+ return 0;
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_WARNING "WaveLAN (0x%3X): your MAC address might be: %02X:%02X:%02X.\n",
+ ioaddr, mac[0], mac[1], mac[2]);
+#endif
+ return ENODEV;
+}
+
+/************************ INTERRUPT HANDLING ************************/
+
+/*
+ * This function is the interrupt handler for the WaveLAN card. This
+ * routine will be called whenever:
+ */
+static void
+wavelan_interrupt(int irq,
+ void * dev_id,
+ struct pt_regs * regs)
+{
+ device * dev;
+ u_long ioaddr;
+ net_local * lp;
+ u_short hasr;
+ u_short status;
+ u_short ack_cmd;
+
+ if((dev = (device *) (irq2dev_map[irq])) == (device *) NULL)
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_WARNING "wavelan_interrupt(): irq %d for unknown device.\n",
+ irq);
+#endif
+ return;
+ }
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_interrupt()\n", dev->name);
+#endif
+
+ lp = (net_local *) dev->priv;
+ ioaddr = dev->base_addr;
+
+ /* Prevent reentrance. What should we do here? */
+#ifdef DEBUG_INTERRUPT_ERROR
+ if(dev->interrupt)
+ printk(KERN_INFO "%s: wavelan_interrupt(): Re-entering the interrupt handler.\n",
+ dev->name);
+#endif
+ dev->interrupt = 1;
+
+ if((hasr = hasr_read(ioaddr)) & HASR_MMC_INTR)
+ {
+ u_char dce_status;
+
+ /*
+ * Interrupt from the modem management controller.
+ * This will clear it -- ignored for now.
+ */
+ mmc_read(ioaddr, mmroff(0, mmr_dce_status), &dce_status, sizeof(dce_status));
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_interrupt(): unexpected mmc interrupt: status 0x%04x.\n",
+ dev->name, dce_status);
+#endif
+ }
+
+ if((hasr & HASR_82586_INTR) == 0)
+ {
+ dev->interrupt = 0;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_interrupt(): interrupt not coming from i82586\n",
+ dev->name);
+#endif
+ return;
+ }
+
+ /* Read interrupt data. */
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status),
+ (unsigned char *) &status, sizeof(status));
+
+ /*
+ * Acknowledge the interrupt(s).
+ */
+ ack_cmd = status & SCB_ST_INT;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &ack_cmd, sizeof(ack_cmd));
+ set_chan_attn(ioaddr, lp->hacr);
+
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wavelan_interrupt(): status 0x%04x.\n",
+ dev->name, status);
+#endif
+
+ /* Command completed. */
+ if((status & SCB_ST_CX) == SCB_ST_CX)
+ {
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wavelan_interrupt(): command completed.\n",
+ dev->name);
+#endif
+ wv_complete(dev, ioaddr, lp);
+
+ /* If watchdog was activated, kill it ! */
+ if(lp->watchdog.prev != (timer_list *) NULL)
+ del_timer(&lp->watchdog);
+ if(lp->tx_n_in_use > 0)
+ {
+ /* set timer to expire in WATCHDOG_JIFFIES */
+ lp->watchdog.expires = jiffies + WATCHDOG_JIFFIES;
+ add_timer(&lp->watchdog);
+ }
+ }
+
+ /* Frame received. */
+ if((status & SCB_ST_FR) == SCB_ST_FR)
+ {
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wavelan_interrupt(): received packet.\n",
+ dev->name);
+#endif
+ wv_receive(dev);
+ }
+
+ /* Check the state of the command unit. */
+ if(((status & SCB_ST_CNA) == SCB_ST_CNA) ||
+ (((status & SCB_ST_CUS) != SCB_ST_CUS_ACTV) && dev->start))
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_interrupt(): CU inactive -- restarting\n",
+ dev->name);
+#endif
+ wv_hw_reset(dev);
+ }
+
+ /* Check the state of the command unit. */
+ if(((status & SCB_ST_RNR) == SCB_ST_RNR) ||
+ (((status & SCB_ST_RUS) != SCB_ST_RUS_RDY) && dev->start))
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_interrupt(): RU not ready -- restarting\n",
+ dev->name);
+#endif
+ wv_hw_reset(dev);
+ }
+
+ dev->interrupt = 0;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_interrupt()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Watchdog: when we start a transmission, we set a timer in the
+ * kernel. If the transmission completes, this timer is disabled. If
+ * the timer expires, we try to unlock the hardware.
+ *
+ * Note: this watchdog doesn't work on the same principle as the
+ * watchdog in the previous version of the ISA driver. I made it this
+ * way because the overhead of add_timer() and del_timer() is nothing
+ * and because it avoids calling the watchdog, saving some CPU time.
+ */
+static void
+wavelan_watchdog(u_long a)
+{
+ device * dev;
+ net_local * lp;
+ u_long ioaddr;
+ unsigned long x;
+ unsigned int nreaped;
+
+ dev = (device *) a;
+ ioaddr = dev->base_addr;
+ lp = (net_local *) dev->priv;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_watchdog()\n", dev->name);
+#endif
+
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_watchdog: watchdog timer expired\n",
+ dev->name);
+#endif
+
+ x = wv_splhi();
+
+ dev = (device *) a;
+ ioaddr = dev->base_addr;
+ lp = (net_local *) dev->priv;
+
+ if(lp->tx_n_in_use <= 0)
+ {
+ wv_splx(x);
+ return;
+ }
+
+ nreaped = wv_complete(dev, ioaddr, lp);
+
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wavelan_watchdog(): %d reaped, %d remain.\n",
+ dev->name, nreaped, lp->tx_n_in_use);
+#endif
+
+#ifdef DEBUG_PSA_SHOW
+ {
+ psa_t psa;
+ psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
+ wv_psa_show(&psa);
+ }
+#endif
+#ifdef DEBUG_MMC_SHOW
+ wv_mmc_show(dev);
+#endif
+#ifdef DEBUG_I82586_SHOW
+ wv_cu_show(dev);
+#endif
+
+ /* If no buffer has been freed */
+ if(nreaped == 0)
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_watchdog(): cleanup failed, trying reset\n",
+ dev->name);
+#endif
+ wv_hw_reset(dev);
+ }
+ else
+ /* Reset watchdog for next transmission. */
+ if(lp->tx_n_in_use > 0)
+ {
+ /* set timer to expire in WATCHDOG_JIFFIES */
+ lp->watchdog.expires = jiffies + WATCHDOG_JIFFIES;
+ add_timer(&lp->watchdog);
+ }
+
+ wv_splx(x);
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_watchdog()\n", dev->name);
+#endif
+}
+
+/********************* CONFIGURATION CALLBACKS *********************/
+/*
+ * Here are the functions called by the Linux networking code (NET3)
+ * for initialization, configuration and deinstallations of the
+ * WaveLAN ISA hardware.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Configure and start up the WaveLAN PCMCIA adaptor.
+ * Called by NET3 when it "open" the device.
+ */
+static int
+wavelan_open(device * dev)
+{
+ u_long x;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_open(dev=0x%x)\n", dev->name,
+ (unsigned int) dev);
+#endif
+
+ /* Check irq */
+ if(dev->irq == 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "%s: wavelan_open(): no IRQ\n", dev->name);
+#endif
+ return -ENXIO;
+ }
+
+ if((irq2dev_map[dev->irq] != (device *) NULL) ||
+ /* This is always true, but avoid the false IRQ. */
+ ((irq2dev_map[dev->irq] = dev) == (device *) NULL) ||
+ (request_irq(dev->irq, &wavelan_interrupt, 0, "WaveLAN", NULL) != 0))
+ {
+ irq2dev_map[dev->irq] = (device *) NULL;
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "%s: wavelan_open(): invalid IRQ\n", dev->name);
+#endif
+ return -EAGAIN;
+ }
+
+ x = wv_splhi();
+ if(wv_hw_reset(dev) != -1)
+ {
+ dev->interrupt = 0;
+ dev->start = 1;
+ }
+ else
+ {
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = (device *) NULL;
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wavelan_open(): impossible to start the card\n",
+ dev->name);
+#endif
+ return -EAGAIN;
+ }
+ wv_splx(x);
+
+ MOD_INC_USE_COUNT;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_open()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Shut down the WaveLAN ISA card.
+ * Called by NET3 when it "closes" the device.
+ */
+static int
+wavelan_close(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_close(dev=0x%x)\n", dev->name,
+ (unsigned int) dev);
+#endif
+
+ /* Not do the job twice. */
+ if(dev->start == 0)
+ return 0;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* If watchdog was activated, kill it! */
+ if(lp->watchdog.prev != (timer_list *) NULL)
+ del_timer(&lp->watchdog);
+
+ /*
+ * Flush the Tx and disable Rx.
+ */
+ wv_82586_stop(dev);
+
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = (device *) NULL;
+
+ MOD_DEC_USE_COUNT;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_close()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Probe an I/O address, and if the WaveLAN is there configure the
+ * device structure
+ * (called by wavelan_probe() & via init_module())
+ */
+static int
+wavelan_config(device * dev)
+{
+ u_long ioaddr = dev->base_addr;
+ u_char irq_mask;
+ int irq;
+ net_local * lp;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_config(dev=0x%x, ioaddr=0x%x)\n", dev->name,
+ (unsigned int)dev, ioaddr);
+#endif
+
+ /* Check irq arg on command line */
+ if(dev->irq != 0)
+ {
+ irq_mask = wv_irq_to_psa(dev->irq);
+
+ if(irq_mask == 0)
+ {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_WARNING "%s: wavelan_config(): invalid irq %d -- ignored.\n",
+ dev->name, dev->irq);
+#endif
+ dev->irq = 0;
+ }
+ else
+ {
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wavelan_config(): changing irq to %d\n",
+ dev->name, dev->irq);
+#endif
+ psa_write(ioaddr, HACR_DEFAULT,
+ psaoff(0, psa_int_req_no), &irq_mask, 1);
+ wv_hacr_reset(ioaddr);
+ }
+ }
+
+ psa_read(ioaddr, HACR_DEFAULT, psaoff(0, psa_int_req_no), &irq_mask, 1);
+ if((irq = wv_psa_to_irq(irq_mask)) == -1)
+ {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO "%s: wavelan_config(): could not wavelan_map_irq(%d).\n",
+ dev->name, irq_mask);
+#endif
+ return EAGAIN;
+ }
+
+ dev->irq = irq;
+
+ request_region(ioaddr, sizeof(ha_t), "wavelan");
+
+ dev->mem_start = 0x0000;
+ dev->mem_end = 0x0000;
+ dev->if_port = 0;
+
+ /* Initialize device structures */
+ dev->priv = kmalloc(sizeof(net_local), GFP_KERNEL);
+ if(dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0x00, sizeof(net_local));
+ lp = (net_local *)dev->priv;
+
+ /* Back link to the device structure. */
+ lp->dev = dev;
+ /* Add the device at the beginning of the linked list. */
+ lp->next = wavelan_list;
+ wavelan_list = lp;
+
+ lp->hacr = HACR_DEFAULT;
+
+ lp->watchdog.function = wavelan_watchdog;
+ lp->watchdog.data = (unsigned long) dev;
+ lp->promiscuous = 0;
+ lp->mc_count = 0;
+
+ /*
+ * Fill in the fields of the device structure
+ * with Ethernet-generic values.
+ */
+ ether_setup(dev);
+
+ dev->open = wavelan_open;
+ dev->stop = wavelan_close;
+ dev->hard_start_xmit = wavelan_packet_xmit;
+ dev->get_stats = wavelan_get_stats;
+ dev->set_multicast_list = &wavelan_set_multicast_list;
+ dev->set_mac_address = &wavelan_set_mac_address;
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+ dev->do_ioctl = wavelan_ioctl;
+ dev->get_wireless_stats = wavelan_get_wireless_stats;
+#endif
+
+ dev->mtu = WAVELAN_MTU;
+
+ /* Display nice info */
+ wv_init_info(dev);
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_config()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Check for a network adaptor of this type. Return '0' iff one
+ * exists. (There seem to be different interpretations of
+ * the initial value of dev->base_addr.
+ * We follow the example in drivers/net/ne.c.)
+ * (called in "Space.c")
+ * As this function is called outside the wavelan module, it should be
+ * declared extern, but it seem to cause troubles...
+ */
+/* extern */ int
+wavelan_probe(device * dev)
+{
+ short base_addr;
+ mac_addr mac; /* MAC address (check WaveLAN existence) */
+ int i;
+ int r;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_probe(dev=0x%x (base_addr=0x%x))\n",
+ dev->name, (unsigned int)dev, (unsigned int)dev->base_addr);
+#endif
+
+#ifdef STRUCT_CHECK
+ if (wv_struct_check() != (char *) NULL)
+ {
+ printk(KERN_WARNING "%s: wavelan_probe(): structure/compiler botch: \"%s\"\n",
+ dev->name, wv_struct_check());
+ return ENODEV;
+ }
+#endif /* STRUCT_CHECK */
+
+ /* Check the value of the command line parameter for base address */
+ base_addr = dev->base_addr;
+
+ /* Don't probe at all. */
+ if(base_addr < 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "%s: wavelan_probe(): invalid base address\n",
+ dev->name);
+#endif
+ return ENXIO;
+ }
+
+ /* Check a single specified location. */
+ if(base_addr > 0x100)
+ {
+ /* Check if the is something at this base address */
+ if((r = wv_check_ioaddr(base_addr, mac)) == 0)
+ {
+ memcpy(dev->dev_addr, mac, 6); /* Copy MAC address */
+ r = wavelan_config(dev);
+ }
+
+#ifdef DEBUG_CONFIG_INFO
+ if(r != 0)
+ printk(KERN_DEBUG "%s: wavelan_probe(): no device at specified base address (0x%X) or address already in use\n",
+ dev->name, base_addr);
+#endif
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_probe()\n", dev->name);
+#endif
+ return r;
+ }
+
+ /* Scan all possible addresses of the WaveLAN hardware */
+ for(i = 0; i < NELS(iobase); i++)
+ {
+ /* Check whether there is something at this base address */
+ if(wv_check_ioaddr(iobase[i], mac) == 0)
+ {
+ dev->base_addr = iobase[i]; /* Copy base address. */
+ memcpy(dev->dev_addr, mac, 6); /* Copy MAC address. */
+ if(wavelan_config(dev) == 0)
+ {
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_probe()\n", dev->name);
+#endif
+ return 0;
+ }
+ }
+ }
+
+ /* We may have touch base_addr: another driver may not like it. */
+ dev->base_addr = base_addr;
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wavelan_probe(): no device found\n",
+ dev->name);
+#endif
+
+ return ENODEV;
+}
+
+/****************************** MODULE ******************************/
+/*
+ * Module entry point: insertion & removal
+ */
+
+#ifdef MODULE
+/*------------------------------------------------------------------*/
+/*
+ * Insertion of the module.
+ * I'm now quite proud of the multi-device support.
+ */
+int
+init_module(void)
+{
+ mac_addr mac; /* MAC address (check WaveLAN existence) */
+ int ret = 0;
+ int i;
+
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "-> init_module()\n");
+#endif
+
+ /* If probing is asked */
+ if(io[0] == 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "WaveLAN init_module(): doing device probing (bad !)\n");
+ printk(KERN_WARNING "Specify base addresses while loading module to correct the problem\n");
+#endif
+
+ /* Copy the basic set of address to be probed. */
+ for(i = 0; i < NELS(iobase); i++)
+ io[i] = iobase[i];
+ }
+
+
+ /* Loop on all possible base addresses */
+ i = -1;
+ while((io[++i] != 0) && (i < NELS(io)))
+ {
+ /* Check if there is something at this base address. */
+ if(wv_check_ioaddr(io[i], mac) == 0)
+ {
+ device * dev;
+
+ /* Create device and set basics args */
+ dev = kmalloc(sizeof(struct device), GFP_KERNEL);
+ memset(dev, 0x00, sizeof(struct device));
+ dev->name = name[i];
+ dev->base_addr = io[i];
+ dev->irq = irq[i];
+ dev->init = &wavelan_config;
+ memcpy(dev->dev_addr, mac, 6); /* Copy MAC address */
+
+ /* Try to create the device */
+ if(register_netdev(dev) != 0)
+ {
+ /* DeAllocate everything */
+ /* Note : if dev->priv is mallocated, there is no way to fail */
+ kfree_s(dev, sizeof(struct device));
+ ret = -EIO;
+ }
+ } /* if there is something at the address */
+ } /* Loop on all addresses. */
+
+#ifdef DEBUG_CONFIG_ERRORS
+ if(wavelan_list == (net_local *) NULL)
+ printk(KERN_WARNING "WaveLAN init_module(): no device found\n");
+#endif
+
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "<- init_module()\n");
+#endif
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Removal of the module
+ */
+void
+cleanup_module(void)
+{
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "-> cleanup_module()\n");
+#endif
+
+ /* Loop on all devices and release them. */
+ while(wavelan_list != (net_local *) NULL)
+ {
+ device * dev = wavelan_list->dev;
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: cleanup_module(): removing device at 0x%x\n",
+ dev->name, (unsigned int) dev);
+#endif
+
+ /* Release the ioport-region. */
+ release_region(dev->base_addr, sizeof(ha_t));
+
+ /* Definitely remove the device. */
+ unregister_netdev(dev);
+
+ /* Unlink the device. */
+ wavelan_list = wavelan_list->next;
+
+ /* Free pieces. */
+ kfree_s(dev->priv, sizeof(struct net_local));
+ kfree_s(dev, sizeof(struct device));
+ }
+
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "<- cleanup_module()\n");
+#endif
+}
+#endif /* MODULE */
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU Public License.
+ *
+ * This software was developed as a component of the
+ * Linux operating system.
+ * It is based on other device drivers and information
+ * either written or supplied by:
+ * Ajay Bakre (bakre@paul.rutgers.edu),
+ * Donald Becker (becker@cesdis.gsfc.nasa.gov),
+ * Loeke Brederveld (Loeke.Brederveld@Utrecht.NCR.com),
+ * Anders Klemets (klemets@it.kth.se),
+ * Vladimir V. Kolpakov (w@stier.koenig.ru),
+ * Marc Meertens (Marc.Meertens@Utrecht.NCR.com),
+ * Pauline Middelink (middelin@polyware.iaf.nl),
+ * Robert Morris (rtm@das.harvard.edu),
+ * Jean Tourrilhes (jt@hplb.hpl.hp.com),
+ * Girish Welling (welling@paul.rutgers.edu),
+ *
+ * Thanks go also to:
+ * James Ashton (jaa101@syseng.anu.edu.au),
+ * Alan Cox (iialan@iiit.swan.ac.uk),
+ * Allan Creighton (allanc@cs.usyd.edu.au),
+ * Matthew Geier (matthew@cs.usyd.edu.au),
+ * Remo di Giovanni (remo@cs.usyd.edu.au),
+ * Eckhard Grah (grah@wrcs1.urz.uni-wuppertal.de),
+ * Vipul Gupta (vgupta@cs.binghamton.edu),
+ * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
+ * Tim Nicholson (tim@cs.usyd.edu.au),
+ * Ian Parkin (ian@cs.usyd.edu.au),
+ * John Rosenberg (johnr@cs.usyd.edu.au),
+ * George Rossi (george@phm.gov.au),
+ * Arthur Scott (arthur@cs.usyd.edu.au),
+ * Peter Storey,
+ * for their assistance and advice.
+ *
+ * Please send bug reports, updates, comments to:
+ *
+ * Bruce Janson Email: bruce@cs.usyd.edu.au
+ * Basser Department of Computer Science Phone: +61-2-9351-3423
+ * University of Sydney, N.S.W., 2006, AUSTRALIA Fax: +61-2-9351-3838
+ */
diff --git a/linux/src/drivers/net/wavelan.h b/linux/src/drivers/net/wavelan.h
new file mode 100644
index 0000000..2e92c79
--- /dev/null
+++ b/linux/src/drivers/net/wavelan.h
@@ -0,0 +1,346 @@
+/*
+ * Wavelan ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ * Original copyrigth follow. See wavelan.p.h for details.
+ *
+ * This file contain the declarations of the Wavelan hardware. Note that
+ * the Wavelan ISA include a i82586 controler (see definitions in
+ * file i82586.h).
+ *
+ * The main difference between the ISA hardware and the pcmcia one is
+ * the Ethernet Controler (i82586 instead of i82593).
+ * The i82586 allow multiple transmit buffers. The PSA need to be accessed
+ * through the host interface.
+ */
+
+#ifndef _WAVELAN_H
+#define _WAVELAN_H
+
+/* The detection of the wavelan card is made by reading the MAC
+ * address from the card and checking it. If you have a non AT&T
+ * product (OEM, like DEC RoamAbout, or Digital Ocean, Epson, ...),
+ * you might need to modify this part to accomodate your hardware...
+ */
+const char MAC_ADDRESSES[][3] =
+{
+ { 0x08, 0x00, 0x0E }, /* AT&T Wavelan (standard) & DEC RoamAbout */
+ { 0x08, 0x00, 0x6A }, /* AT&T Wavelan (alternate) */
+ /* Add your card here and send me the patch ! */
+};
+
+#define WAVELAN_ADDR_SIZE 6 /* Size of a MAC address */
+
+#define WAVELAN_MTU 1500 /* Maximum size of WaveLAN packet */
+
+#define MAXDATAZ (WAVELAN_ADDR_SIZE + WAVELAN_ADDR_SIZE + 2 + WAVELAN_MTU)
+
+/*************************** PC INTERFACE ****************************/
+
+/*
+ * Host Adaptor structure.
+ * (base is board port address).
+ */
+typedef union hacs_u hacs_u;
+union hacs_u
+{
+ unsigned short hu_command; /* Command register */
+#define HACR_RESET 0x0001 /* Reset board */
+#define HACR_CA 0x0002 /* Set Channel Attention for 82586 */
+#define HACR_16BITS 0x0004 /* 16 bits operation (0 => 8bits) */
+#define HACR_OUT0 0x0008 /* General purpose output pin 0 */
+ /* not used - must be 1 */
+#define HACR_OUT1 0x0010 /* General purpose output pin 1 */
+ /* not used - must be 1 */
+#define HACR_82586_INT_ENABLE 0x0020 /* Enable 82586 interrupts */
+#define HACR_MMC_INT_ENABLE 0x0040 /* Enable MMC interrupts */
+#define HACR_INTR_CLR_ENABLE 0x0080 /* Enable interrupt status read/clear */
+ unsigned short hu_status; /* Status Register */
+#define HASR_82586_INTR 0x0001 /* Interrupt request from 82586 */
+#define HASR_MMC_INTR 0x0002 /* Interrupt request from MMC */
+#define HASR_MMC_BUSY 0x0004 /* MMC busy indication */
+#define HASR_PSA_BUSY 0x0008 /* LAN parameter storage area busy */
+};
+
+typedef struct ha_t ha_t;
+struct ha_t
+{
+ hacs_u ha_cs; /* Command and status registers */
+#define ha_command ha_cs.hu_command
+#define ha_status ha_cs.hu_status
+ unsigned short ha_mmcr; /* Modem Management Ctrl Register */
+ unsigned short ha_pior0; /* Program I/O Address Register Port 0 */
+ unsigned short ha_piop0; /* Program I/O Port 0 */
+ unsigned short ha_pior1; /* Program I/O Address Register Port 1 */
+ unsigned short ha_piop1; /* Program I/O Port 1 */
+ unsigned short ha_pior2; /* Program I/O Address Register Port 2 */
+ unsigned short ha_piop2; /* Program I/O Port 2 */
+};
+
+#define HA_SIZE 16
+
+#define hoff(p,f) (unsigned short)((void *)(&((ha_t *)((void *)0 + (p)))->f) - (void *)0)
+#define HACR(p) hoff(p, ha_command)
+#define HASR(p) hoff(p, ha_status)
+#define MMCR(p) hoff(p, ha_mmcr)
+#define PIOR0(p) hoff(p, ha_pior0)
+#define PIOP0(p) hoff(p, ha_piop0)
+#define PIOR1(p) hoff(p, ha_pior1)
+#define PIOP1(p) hoff(p, ha_piop1)
+#define PIOR2(p) hoff(p, ha_pior2)
+#define PIOP2(p) hoff(p, ha_piop2)
+
+/*
+ * Program I/O Mode Register values.
+ */
+#define STATIC_PIO 0 /* Mode 1: static mode */
+ /* RAM access ??? */
+#define AUTOINCR_PIO 1 /* Mode 2: auto increment mode */
+ /* RAM access ??? */
+#define AUTODECR_PIO 2 /* Mode 3: auto decrement mode */
+ /* RAM access ??? */
+#define PARAM_ACCESS_PIO 3 /* Mode 4: LAN parameter access mode */
+ /* Parameter access. */
+#define PIO_MASK 3 /* register mask */
+#define PIOM(cmd,piono) ((u_short)cmd << 10 << (piono * 2))
+
+#define HACR_DEFAULT (HACR_OUT0 | HACR_OUT1 | HACR_16BITS | PIOM(STATIC_PIO, 0) | PIOM(AUTOINCR_PIO, 1) | PIOM(PARAM_ACCESS_PIO, 2))
+#define HACR_INTRON (HACR_82586_INT_ENABLE | HACR_MMC_INT_ENABLE | HACR_INTR_CLR_ENABLE)
+
+/************************** MEMORY LAYOUT **************************/
+
+/*
+ * Onboard 64k RAM layout.
+ * (Offsets from 0x0000.)
+ */
+#define OFFSET_RU 0x0000 /* 75 % memory */
+#define OFFSET_CU 0xC000 /* 25 % memory */
+#define OFFSET_SCB (OFFSET_ISCP - sizeof(scb_t))
+#define OFFSET_ISCP (OFFSET_SCP - sizeof(iscp_t))
+#define OFFSET_SCP I82586_SCP_ADDR
+
+#define RXBLOCKZ (sizeof(fd_t) + sizeof(rbd_t) + MAXDATAZ)
+#define TXBLOCKZ (sizeof(ac_tx_t) + sizeof(ac_nop_t) + sizeof(tbd_t) + MAXDATAZ)
+
+#define NRXBLOCKS ((OFFSET_CU - OFFSET_RU) / RXBLOCKZ)
+#define NTXBLOCKS ((OFFSET_SCB - OFFSET_CU) / TXBLOCKZ)
+
+/********************** PARAMETER STORAGE AREA **********************/
+
+/*
+ * Parameter Storage Area (PSA).
+ */
+typedef struct psa_t psa_t;
+struct psa_t
+{
+ unsigned char psa_io_base_addr_1; /* [0x00] Base address 1 ??? */
+ unsigned char psa_io_base_addr_2; /* [0x01] Base address 2 */
+ unsigned char psa_io_base_addr_3; /* [0x02] Base address 3 */
+ unsigned char psa_io_base_addr_4; /* [0x03] Base address 4 */
+ unsigned char psa_rem_boot_addr_1; /* [0x04] Remote Boot Address 1 */
+ unsigned char psa_rem_boot_addr_2; /* [0x05] Remote Boot Address 2 */
+ unsigned char psa_rem_boot_addr_3; /* [0x06] Remote Boot Address 3 */
+ unsigned char psa_holi_params; /* [0x07] HOst Lan Interface (HOLI) Parameters */
+ unsigned char psa_int_req_no; /* [0x08] Interrupt Request Line */
+ unsigned char psa_unused0[7]; /* [0x09-0x0F] unused */
+
+ unsigned char psa_univ_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x10-0x15] Universal (factory) MAC Address */
+ unsigned char psa_local_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x16-1B] Local MAC Address */
+ unsigned char psa_univ_local_sel; /* [0x1C] Universal Local Selection */
+#define PSA_UNIVERSAL 0 /* Universal (factory) */
+#define PSA_LOCAL 1 /* Local */
+ unsigned char psa_comp_number; /* [0x1D] Compatability Number: */
+#define PSA_COMP_PC_AT_915 0 /* PC-AT 915 MHz */
+#define PSA_COMP_PC_MC_915 1 /* PC-MC 915 MHz */
+#define PSA_COMP_PC_AT_2400 2 /* PC-AT 2.4 GHz */
+#define PSA_COMP_PC_MC_2400 3 /* PC-MC 2.4 GHz */
+#define PSA_COMP_PCMCIA_915 4 /* PCMCIA 915 MHz or 2.0 */
+ unsigned char psa_thr_pre_set; /* [0x1E] Modem Threshold Preset */
+ unsigned char psa_feature_select; /* [0x1F] Call code required (1=on) */
+#define PSA_FEATURE_CALL_CODE 0x01 /* Call code required (Japan) */
+ unsigned char psa_subband; /* [0x20] Subband */
+#define PSA_SUBBAND_915 0 /* 915 MHz or 2.0 */
+#define PSA_SUBBAND_2425 1 /* 2425 MHz */
+#define PSA_SUBBAND_2460 2 /* 2460 MHz */
+#define PSA_SUBBAND_2484 3 /* 2484 MHz */
+#define PSA_SUBBAND_2430_5 4 /* 2430.5 MHz */
+ unsigned char psa_quality_thr; /* [0x21] Modem Quality Threshold */
+ unsigned char psa_mod_delay; /* [0x22] Modem Delay ??? (reserved) */
+ unsigned char psa_nwid[2]; /* [0x23-0x24] Network ID */
+ unsigned char psa_nwid_select; /* [0x25] Network ID Select On Off */
+ unsigned char psa_encryption_select; /* [0x26] Encryption On Off */
+ unsigned char psa_encryption_key[8]; /* [0x27-0x2E] Encryption Key */
+ unsigned char psa_databus_width; /* [0x2F] AT bus width select 8/16 */
+ unsigned char psa_call_code[8]; /* [0x30-0x37] (Japan) Call Code */
+ unsigned char psa_nwid_prefix[2]; /* [0x38-0x39] Roaming domain */
+ unsigned char psa_reserved[2]; /* [0x3A-0x3B] Reserved - fixed 00 */
+ unsigned char psa_conf_status; /* [0x3C] Conf Status, bit 0=1:config*/
+ unsigned char psa_crc[2]; /* [0x3D] CRC-16 over PSA */
+ unsigned char psa_crc_status; /* [0x3F] CRC Valid Flag */
+};
+
+#define PSA_SIZE 64
+
+/* Calculate offset of a field in the above structure
+ * Warning : only even addresses are used */
+#define psaoff(p,f) ((unsigned short) ((void *)(&((psa_t *) ((void *) NULL + (p)))->f) - (void *) NULL))
+
+/******************** MODEM MANAGEMENT INTERFACE ********************/
+
+/*
+ * Modem Management Controller (MMC) write structure.
+ */
+typedef struct mmw_t mmw_t;
+struct mmw_t
+{
+ unsigned char mmw_encr_key[8]; /* encryption key */
+ unsigned char mmw_encr_enable; /* enable/disable encryption */
+#define MMW_ENCR_ENABLE_MODE 0x02 /* Mode of security option */
+#define MMW_ENCR_ENABLE_EN 0x01 /* Enable security option */
+ unsigned char mmw_unused0[1]; /* unused */
+ unsigned char mmw_des_io_invert; /* Encryption option */
+#define MMW_DES_IO_INVERT_RES 0x0F /* Reserved */
+#define MMW_DES_IO_INVERT_CTRL 0xF0 /* Control ??? (set to 0) */
+ unsigned char mmw_unused1[5]; /* unused */
+ unsigned char mmw_loopt_sel; /* looptest selection */
+#define MMW_LOOPT_SEL_DIS_NWID 0x40 /* disable NWID filtering */
+#define MMW_LOOPT_SEL_INT 0x20 /* activate Attention Request */
+#define MMW_LOOPT_SEL_LS 0x10 /* looptest w/o collision avoidance */
+#define MMW_LOOPT_SEL_LT3A 0x08 /* looptest 3a */
+#define MMW_LOOPT_SEL_LT3B 0x04 /* looptest 3b */
+#define MMW_LOOPT_SEL_LT3C 0x02 /* looptest 3c */
+#define MMW_LOOPT_SEL_LT3D 0x01 /* looptest 3d */
+ unsigned char mmw_jabber_enable; /* jabber timer enable */
+ /* Abort transmissions > 200 ms */
+ unsigned char mmw_freeze; /* freeze / unfreeeze signal level */
+ /* 0 : signal level & qual updated for every new message, 1 : frozen */
+ unsigned char mmw_anten_sel; /* antenna selection */
+#define MMW_ANTEN_SEL_SEL 0x01 /* direct antenna selection */
+#define MMW_ANTEN_SEL_ALG_EN 0x02 /* antenna selection algo. enable */
+ unsigned char mmw_ifs; /* inter frame spacing */
+ /* min time between transmission in bit periods (.5 us) - bit 0 ignored */
+ unsigned char mmw_mod_delay; /* modem delay (synchro) */
+ unsigned char mmw_jam_time; /* jamming time (after collision) */
+ unsigned char mmw_unused2[1]; /* unused */
+ unsigned char mmw_thr_pre_set; /* level threshold preset */
+ /* Discard all packet with signal < this value (4) */
+ unsigned char mmw_decay_prm; /* decay parameters */
+ unsigned char mmw_decay_updat_prm; /* decay update parameterz */
+ unsigned char mmw_quality_thr; /* quality (z-quotient) threshold */
+ /* Discard all packet with quality < this value (3) */
+ unsigned char mmw_netw_id_l; /* NWID low order byte */
+ unsigned char mmw_netw_id_h; /* NWID high order byte */
+ /* Network ID or Domain : create virtual net on the air */
+
+ /* 2.0 Hardware extension - frequency selection support */
+ unsigned char mmw_mode_select; /* for analog tests (set to 0) */
+ unsigned char mmw_unused3[1]; /* unused */
+ unsigned char mmw_fee_ctrl; /* frequency eeprom control */
+#define MMW_FEE_CTRL_PRE 0x10 /* Enable protected instructions */
+#define MMW_FEE_CTRL_DWLD 0x08 /* Download eeprom to mmc */
+#define MMW_FEE_CTRL_CMD 0x07 /* EEprom commands : */
+#define MMW_FEE_CTRL_READ 0x06 /* Read */
+#define MMW_FEE_CTRL_WREN 0x04 /* Write enable */
+#define MMW_FEE_CTRL_WRITE 0x05 /* Write data to address */
+#define MMW_FEE_CTRL_WRALL 0x04 /* Write data to all addresses */
+#define MMW_FEE_CTRL_WDS 0x04 /* Write disable */
+#define MMW_FEE_CTRL_PRREAD 0x16 /* Read addr from protect register */
+#define MMW_FEE_CTRL_PREN 0x14 /* Protect register enable */
+#define MMW_FEE_CTRL_PRCLEAR 0x17 /* Unprotect all registers */
+#define MMW_FEE_CTRL_PRWRITE 0x15 /* Write addr in protect register */
+#define MMW_FEE_CTRL_PRDS 0x14 /* Protect register disable */
+ /* Never issue this command (PRDS) : it's irreversible !!! */
+
+ unsigned char mmw_fee_addr; /* EEprom address */
+#define MMW_FEE_ADDR_CHANNEL 0xF0 /* Select the channel */
+#define MMW_FEE_ADDR_OFFSET 0x0F /* Offset in channel data */
+#define MMW_FEE_ADDR_EN 0xC0 /* FEE_CTRL enable operations */
+#define MMW_FEE_ADDR_DS 0x00 /* FEE_CTRL disable operations */
+#define MMW_FEE_ADDR_ALL 0x40 /* FEE_CTRL all operations */
+#define MMW_FEE_ADDR_CLEAR 0xFF /* FEE_CTRL clear operations */
+
+ unsigned char mmw_fee_data_l; /* Write data to EEprom */
+ unsigned char mmw_fee_data_h; /* high octet */
+ unsigned char mmw_ext_ant; /* Setting for external antenna */
+#define MMW_EXT_ANT_EXTANT 0x01 /* Select external antenna */
+#define MMW_EXT_ANT_POL 0x02 /* Polarity of the antenna */
+#define MMW_EXT_ANT_INTERNAL 0x00 /* Internal antenna */
+#define MMW_EXT_ANT_EXTERNAL 0x03 /* External antenna */
+#define MMW_EXT_ANT_IQ_TEST 0x1C /* IQ test pattern (set to 0) */
+};
+
+#define MMW_SIZE 37
+
+#define mmwoff(p,f) (unsigned short)((void *)(&((mmw_t *)((void *)0 + (p)))->f) - (void *)0)
+
+/*
+ * Modem Management Controller (MMC) read structure.
+ */
+typedef struct mmr_t mmr_t;
+struct mmr_t
+{
+ unsigned char mmr_unused0[8]; /* unused */
+ unsigned char mmr_des_status; /* encryption status */
+ unsigned char mmr_des_avail; /* encryption available (0x55 read) */
+#define MMR_DES_AVAIL_DES 0x55 /* DES available */
+#define MMR_DES_AVAIL_AES 0x33 /* AES (AT&T) available */
+ unsigned char mmr_des_io_invert; /* des I/O invert register */
+ unsigned char mmr_unused1[5]; /* unused */
+ unsigned char mmr_dce_status; /* DCE status */
+#define MMR_DCE_STATUS_RX_BUSY 0x01 /* receiver busy */
+#define MMR_DCE_STATUS_LOOPT_IND 0x02 /* loop test indicated */
+#define MMR_DCE_STATUS_TX_BUSY 0x04 /* transmitter on */
+#define MMR_DCE_STATUS_JBR_EXPIRED 0x08 /* jabber timer expired */
+ unsigned char mmr_dsp_id; /* DSP id (AA = Daedalus rev A) */
+ unsigned char mmr_unused2[2]; /* unused */
+ unsigned char mmr_correct_nwid_l; /* # of correct NWID's rxd (low) */
+ unsigned char mmr_correct_nwid_h; /* # of correct NWID's rxd (high) */
+ /* Warning : Read high order octet first !!! */
+ unsigned char mmr_wrong_nwid_l; /* # of wrong NWID's rxd (low) */
+ unsigned char mmr_wrong_nwid_h; /* # of wrong NWID's rxd (high) */
+ unsigned char mmr_thr_pre_set; /* level threshold preset */
+#define MMR_THR_PRE_SET 0x3F /* level threshold preset */
+#define MMR_THR_PRE_SET_CUR 0x80 /* Current signal above it */
+ unsigned char mmr_signal_lvl; /* signal level */
+#define MMR_SIGNAL_LVL 0x3F /* signal level */
+#define MMR_SIGNAL_LVL_VALID 0x80 /* Updated since last read */
+ unsigned char mmr_silence_lvl; /* silence level (noise) */
+#define MMR_SILENCE_LVL 0x3F /* silence level */
+#define MMR_SILENCE_LVL_VALID 0x80 /* Updated since last read */
+ unsigned char mmr_sgnl_qual; /* signal quality */
+#define MMR_SGNL_QUAL 0x0F /* signal quality */
+#define MMR_SGNL_QUAL_ANT 0x80 /* current antenna used */
+ unsigned char mmr_netw_id_l; /* NWID low order byte ??? */
+ unsigned char mmr_unused3[3]; /* unused */
+
+ /* 2.0 Hardware extension - frequency selection support */
+ unsigned char mmr_fee_status; /* Status of frequency eeprom */
+#define MMR_FEE_STATUS_ID 0xF0 /* Modem revision id */
+#define MMR_FEE_STATUS_DWLD 0x08 /* Download in progress */
+#define MMR_FEE_STATUS_BUSY 0x04 /* EEprom busy */
+ unsigned char mmr_unused4[1]; /* unused */
+ unsigned char mmr_fee_data_l; /* Read data from eeprom (low) */
+ unsigned char mmr_fee_data_h; /* Read data from eeprom (high) */
+};
+
+#define MMR_SIZE 36
+
+#define mmroff(p,f) (unsigned short)((void *)(&((mmr_t *)((void *)0 + (p)))->f) - (void *)0)
+
+/* Make the two above structures one */
+typedef union mm_t
+{
+ struct mmw_t w; /* Write to the mmc */
+ struct mmr_t r; /* Read from the mmc */
+} mm_t;
+
+#endif /* _WAVELAN_H */
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU Public License.
+ *
+ * For more details, see wavelan.c.
+ */
diff --git a/linux/src/drivers/net/wavelan.p.h b/linux/src/drivers/net/wavelan.p.h
new file mode 100644
index 0000000..3a6124e
--- /dev/null
+++ b/linux/src/drivers/net/wavelan.p.h
@@ -0,0 +1,635 @@
+/*
+ * Wavelan ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ *
+ * This file contain all definition and declarations necessary for the
+ * wavelan isa driver. This file is a private header, so it should
+ * be included only on wavelan.c !!!
+ */
+
+#ifndef WAVELAN_P_H
+#define WAVELAN_P_H
+
+/************************** DOCUMENTATION **************************/
+/*
+ * This driver provide a Linux interface to the Wavelan ISA hardware
+ * The Wavelan is a product of Lucent ("http://wavelan.netland.nl/").
+ * This division was formerly part of NCR and then AT&T.
+ * Wavelan are also distributed by DEC (RoamAbout), Digital Ocean and
+ * Aironet (Arlan). If you have one of those product, you will need to
+ * make some changes below...
+ *
+ * This driver is still a beta software. A lot of bugs have been corrected,
+ * a lot of functionalities are implemented, the whole appear pretty stable,
+ * but there is still some area of improvement (encryption, performance...).
+ *
+ * To know how to use this driver, read the NET3 HOWTO.
+ * If you want to exploit the many other fonctionalities, look comments
+ * in the code...
+ *
+ * This driver is the result of the effort of many peoples (see below).
+ */
+
+/* ------------------------ SPECIFIC NOTES ------------------------ */
+/*
+ * wavelan.o is darn too big
+ * -------------------------
+ * That's true ! There is a very simple way to reduce the driver
+ * object by 33% (yes !). Comment out the following line :
+ * #include <linux/wireless.h>
+ *
+ * MAC address and hardware detection :
+ * ----------------------------------
+ * The detection code of the wavelan chech that the first 3
+ * octets of the MAC address fit the company code. This type of
+ * detection work well for AT&T cards (because the AT&T code is
+ * hardcoded in wavelan.h), but of course will fail for other
+ * manufacturer.
+ *
+ * If you are sure that your card is derived from the wavelan,
+ * here is the way to configure it :
+ * 1) Get your MAC address
+ * a) With your card utilities (wfreqsel, instconf, ...)
+ * b) With the driver :
+ * o compile the kernel with DEBUG_CONFIG_INFO enabled
+ * o Boot and look the card messages
+ * 2) Set your MAC code (3 octets) in MAC_ADDRESSES[][3] (wavelan.h)
+ * 3) Compile & verify
+ * 4) Send me the MAC code - I will include it in the next version...
+ *
+ * "CU Inactive" message at boot up :
+ * -----------------------------------
+ * It seem that there is some weird timings problems with the
+ * Intel microcontroler. In fact, this message is triggered by a
+ * bad reading of the on board ram the first time we read the
+ * control block. If you ignore this message, all is ok (but in
+ * fact, currently, it reset the wavelan hardware).
+ *
+ * To get rid of that problem, there is two solution. The first
+ * is to add a dummy read of the scb at the end of
+ * wv_82586_config. The second is to add the timers
+ * wv_synchronous_cmd and wv_ack (the udelay just after the
+ * waiting loops - seem that the controler is not totally ready
+ * when it say it is !).
+ *
+ * In the current code, I use the second solution (to be
+ * consistent with the original solution of Bruce Janson).
+ */
+
+/* --------------------- WIRELESS EXTENSIONS --------------------- */
+/*
+ * This driver is the first one to support "wireless extensions".
+ * This set of extensions provide you some way to control the wireless
+ * caracteristics of the hardware in a standard way and support for
+ * applications for taking advantage of it (like Mobile IP).
+ *
+ * You will need to enable the CONFIG_NET_RADIO define in the kernel
+ * configuration to enable the wireless extensions (this is the one
+ * giving access to the radio network device choice).
+ *
+ * It might also be a good idea as well to fetch the wireless tools to
+ * configure the device and play a bit.
+ */
+
+/* ---------------------------- FILES ---------------------------- */
+/*
+ * wavelan.c : The actual code for the driver - C functions
+ *
+ * wavelan.p.h : Private header : local types / vars for the driver
+ *
+ * wavelan.h : Description of the hardware interface & structs
+ *
+ * i82586.h : Description if the Ethernet controler
+ */
+
+/* --------------------------- HISTORY --------------------------- */
+/*
+ * (Made with information in drivers headers. It may not be accurate,
+ * and I garantee nothing except my best effort...)
+ *
+ * The history of the Wavelan drivers is as complicated as history of
+ * the Wavelan itself (NCR -> AT&T -> Lucent).
+ *
+ * All started with Anders Klemets <klemets@paul.rutgers.edu>,
+ * writting a Wavelan ISA driver for the MACH microkernel. Girish
+ * Welling <welling@paul.rutgers.edu> had also worked on it.
+ * Keith Moore modify this for the Pcmcia hardware.
+ *
+ * Robert Morris <rtm@das.harvard.edu> port these two drivers to BSDI
+ * and add specific Pcmcia support (there is currently no equivalent
+ * of the PCMCIA package under BSD...).
+ *
+ * Jim Binkley <jrb@cs.pdx.edu> port both BSDI drivers to freeBSD.
+ *
+ * Bruce Janson <bruce@cs.usyd.edu.au> port the BSDI ISA driver to Linux.
+ *
+ * Anthony D. Joseph <adj@lcs.mit.edu> started modify Bruce driver
+ * (with help of the BSDI PCMCIA driver) for PCMCIA.
+ * Yunzhou Li <yunzhou@strat.iol.unh.edu> finished is work.
+ * Joe Finney <joe@comp.lancs.ac.uk> patched the driver to start
+ * correctly 2.00 cards (2.4 GHz with frequency selection).
+ * David Hinds <dhinds@hyper.stanford.edu> integrated the whole in his
+ * Pcmcia package (+ bug corrections).
+ *
+ * I (Jean Tourrilhes - jt@hplb.hpl.hp.com) then started to make some
+ * patchs to the Pcmcia driver. After, I added code in the ISA driver
+ * for Wireless Extensions and full support of frequency selection
+ * cards. Then, I've done the same to the Pcmcia driver + some
+ * reorganisation. Finally, I came back to the ISA driver to
+ * upgrade it at the same level as the Pcmcia one and reorganise
+ * the code
+ * Loeke Brederveld <lbrederv@wavelan.com> from Lucent has given me
+ * much needed informations on the Wavelan hardware.
+ */
+
+/* The original copyrights and litteratures mention others names and
+ * credits. I don't know what there part in this development was...
+ */
+
+/* By the way : for the copyright & legal stuff :
+ * Almost everybody wrote code under GNU or BSD license (or alike),
+ * and want that their original copyright remain somewhere in the
+ * code (for myself, I go with the GPL).
+ * Nobody want to take responsibility for anything, except the fame...
+ */
+
+/* --------------------------- CREDITS --------------------------- */
+/*
+ * This software was developed as a component of the
+ * Linux operating system.
+ * It is based on other device drivers and information
+ * either written or supplied by:
+ * Ajay Bakre (bakre@paul.rutgers.edu),
+ * Donald Becker (becker@cesdis.gsfc.nasa.gov),
+ * Loeke Brederveld (Loeke.Brederveld@Utrecht.NCR.com),
+ * Brent Elphick <belphick@uwaterloo.ca>,
+ * Anders Klemets (klemets@it.kth.se),
+ * Vladimir V. Kolpakov (w@stier.koenig.ru),
+ * Marc Meertens (Marc.Meertens@Utrecht.NCR.com),
+ * Pauline Middelink (middelin@polyware.iaf.nl),
+ * Robert Morris (rtm@das.harvard.edu),
+ * Jean Tourrilhes (jt@hplb.hpl.hp.com),
+ * Girish Welling (welling@paul.rutgers.edu),
+ * Clark Woodworth <clark@hiway1.exit109.com>
+ * Yongguang Zhang <ygz@isl.hrl.hac.com>...
+ *
+ * Thanks go also to:
+ * James Ashton (jaa101@syseng.anu.edu.au),
+ * Alan Cox (iialan@iiit.swan.ac.uk),
+ * Allan Creighton (allanc@cs.usyd.edu.au),
+ * Matthew Geier (matthew@cs.usyd.edu.au),
+ * Remo di Giovanni (remo@cs.usyd.edu.au),
+ * Eckhard Grah (grah@wrcs1.urz.uni-wuppertal.de),
+ * Vipul Gupta (vgupta@cs.binghamton.edu),
+ * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
+ * Tim Nicholson (tim@cs.usyd.edu.au),
+ * Ian Parkin (ian@cs.usyd.edu.au),
+ * John Rosenberg (johnr@cs.usyd.edu.au),
+ * George Rossi (george@phm.gov.au),
+ * Arthur Scott (arthur@cs.usyd.edu.au),
+ * Stanislav Sinyagin <stas@isf.ru>
+ * Peter Storey,
+ * for their assistance and advice.
+ *
+ * Additional Credits:
+ *
+ * My developpement has been done under Linux 2.0.x (Debian 1.1) with
+ * an HP Vectra XP/60.
+ *
+ */
+
+/* ------------------------- IMPROVEMENTS ------------------------- */
+/*
+ * I proudly present :
+ *
+ * Changes mades in first pre-release :
+ * ----------------------------------
+ * - Reorganisation of the code, function name change
+ * - Creation of private header (wavelan.p.h)
+ * - Reorganised debug messages
+ * - More comments, history, ...
+ * - mmc_init : configure the PSA if not done
+ * - mmc_init : correct default value of level threshold for pcmcia
+ * - mmc_init : 2.00 detection better code for 2.00 init
+ * - better info at startup
+ * - irq setting (note : this setting is permanent...)
+ * - Watchdog : change strategy (+ solve module removal problems)
+ * - add wireless extensions (ioctl & get_wireless_stats)
+ * get/set nwid/frequency on fly, info for /proc/net/wireless
+ * - More wireless extension : SETSPY and GETSPY
+ * - Make wireless extensions optional
+ * - Private ioctl to set/get quality & level threshold, histogram
+ * - Remove /proc/net/wavelan
+ * - Supress useless stuff from lp (net_local)
+ * - kernel 2.1 support (copy_to/from_user instead of memcpy_to/fromfs)
+ * - Add message level (debug stuff in /var/adm/debug & errors not
+ * displayed at console and still in /var/adm/messages)
+ * - multi device support
+ * - Start fixing the probe (init code)
+ * - More inlines
+ * - man page
+ * - Lot of others minor details & cleanups
+ *
+ * Changes made in second pre-release :
+ * ----------------------------------
+ * - Cleanup init code (probe & module init)
+ * - Better multi device support (module)
+ * - name assignement (module)
+ *
+ * Changes made in third pre-release :
+ * ---------------------------------
+ * - Be more conservative on timers
+ * - Preliminary support for multicast (I still lack some details...)
+ *
+ * Changes made in fourth pre-release :
+ * ----------------------------------
+ * - multicast (revisited and finished)
+ * - Avoid reset in set_multicast_list (a really big hack)
+ * if somebody could apply this code for other i82586 based driver...
+ * - Share on board memory 75% RU / 25% CU (instead of 50/50)
+ *
+ * Changes made for release in 2.1.15 :
+ * ----------------------------------
+ * - Change the detection code for multi manufacturer code support
+ *
+ * Changes made for release in 2.1.17 :
+ * ----------------------------------
+ * - Update to wireless extensions changes
+ * - Silly bug in card initial configuration (psa_conf_status)
+ *
+ * Changes made for release in 2.1.27 & 2.0.30 :
+ * -------------------------------------------
+ * - Small bug in debug code (probably not the last one...)
+ * - Remove extern kerword for wavelan_probe()
+ * - Level threshold is now a standard wireless extension (version 4 !)
+ *
+ * Changes made for release in 2.1.36 :
+ * ----------------------------------
+ * - Encryption setting from Brent Elphick (thanks a lot !)
+ * - 'ioaddr' to 'u_long' for the Alpha (thanks to Stanislav Sinyagin)
+ *
+ * Wishes & dreams :
+ * ---------------
+ * - Roaming
+ */
+
+/***************************** INCLUDES *****************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+#include <linux/timer.h>
+
+#include <linux/wireless.h> /* Wireless extensions */
+
+/* Wavelan declarations */
+#include "i82586.h"
+#include "wavelan.h"
+
+/****************************** DEBUG ******************************/
+
+#undef DEBUG_MODULE_TRACE /* Module insertion/removal */
+#undef DEBUG_CALLBACK_TRACE /* Calls made by Linux */
+#undef DEBUG_INTERRUPT_TRACE /* Calls to handler */
+#undef DEBUG_INTERRUPT_INFO /* type of interrupt & so on */
+#define DEBUG_INTERRUPT_ERROR /* problems */
+#undef DEBUG_CONFIG_TRACE /* Trace the config functions */
+#undef DEBUG_CONFIG_INFO /* What's going on... */
+#define DEBUG_CONFIG_ERRORS /* Errors on configuration */
+#undef DEBUG_TX_TRACE /* Transmission calls */
+#undef DEBUG_TX_INFO /* Header of the transmited packet */
+#define DEBUG_TX_ERROR /* unexpected conditions */
+#undef DEBUG_RX_TRACE /* Transmission calls */
+#undef DEBUG_RX_INFO /* Header of the transmited packet */
+#define DEBUG_RX_ERROR /* unexpected conditions */
+#undef DEBUG_PACKET_DUMP 16 /* Dump packet on the screen */
+#undef DEBUG_IOCTL_TRACE /* Misc call by Linux */
+#undef DEBUG_IOCTL_INFO /* Various debug info */
+#define DEBUG_IOCTL_ERROR /* What's going wrong */
+#define DEBUG_BASIC_SHOW /* Show basic startup info */
+#undef DEBUG_VERSION_SHOW /* Print version info */
+#undef DEBUG_PSA_SHOW /* Dump psa to screen */
+#undef DEBUG_MMC_SHOW /* Dump mmc to screen */
+#undef DEBUG_SHOW_UNUSED /* Show also unused fields */
+#undef DEBUG_I82586_SHOW /* Show i82586 status */
+#undef DEBUG_DEVICE_SHOW /* Show device parameters */
+
+/* Options : */
+#define USE_PSA_CONFIG /* Use info from the PSA */
+#define IGNORE_NORMAL_XMIT_ERRS /* Don't bother with normal conditions */
+#undef STRUCT_CHECK /* Verify padding of structures */
+#undef PSA_CRC /* Check CRC in PSA */
+#undef OLDIES /* Old code (to redo) */
+#undef RECORD_SNR /* To redo */
+#undef EEPROM_IS_PROTECTED /* Doesn't seem to be necessary */
+#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+/* Warning : these stuff will slow down the driver... */
+#define WIRELESS_SPY /* Enable spying addresses */
+#undef HISTOGRAM /* Enable histogram of sig level... */
+#endif
+
+/************************ CONSTANTS & MACROS ************************/
+
+#ifdef DEBUG_VERSION_SHOW
+static const char *version = "wavelan.c : v16 (wireless extensions) 17/4/97\n";
+#endif
+
+/* Watchdog temporisation */
+#define WATCHDOG_JIFFIES 32 /* TODO: express in HZ. */
+
+/* Macro to get the number of elements in an array */
+#define NELS(a) (sizeof(a) / sizeof(a[0]))
+
+/* ------------------------ PRIVATE IOCTL ------------------------ */
+
+#define SIOCSIPQTHR SIOCDEVPRIVATE /* Set quality threshold */
+#define SIOCGIPQTHR SIOCDEVPRIVATE + 1 /* Get quality threshold */
+#define SIOCSIPLTHR SIOCDEVPRIVATE + 2 /* Set level threshold */
+#define SIOCGIPLTHR SIOCDEVPRIVATE + 3 /* Get level threshold */
+
+#define SIOCSIPHISTO SIOCDEVPRIVATE + 6 /* Set histogram ranges */
+#define SIOCGIPHISTO SIOCDEVPRIVATE + 7 /* Get histogram values */
+
+/* ----------------------- VERSION SUPPORT ----------------------- */
+
+/* This ugly patch is needed to cope with old version of the kernel */
+#ifndef copy_from_user
+#define copy_from_user memcpy_fromfs
+#define copy_to_user memcpy_tofs
+#endif
+
+/****************************** TYPES ******************************/
+
+/* Shortcuts */
+typedef struct device device;
+typedef struct enet_statistics en_stats;
+typedef struct iw_statistics iw_stats;
+typedef struct iw_quality iw_qual;
+typedef struct iw_freq iw_freq;
+typedef struct net_local net_local;
+typedef struct timer_list timer_list;
+
+/* Basic types */
+typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
+
+/*
+ * Static specific data for the interface.
+ *
+ * For each network interface, Linux keep data in two structure. "device"
+ * keep the generic data (same format for everybody) and "net_local" keep
+ * the additional specific data.
+ * Note that some of this specific data is in fact generic (en_stats, for
+ * example).
+ */
+struct net_local
+{
+ net_local * next; /* Linked list of the devices */
+ device * dev; /* Reverse link... */
+ en_stats stats; /* Ethernet interface statistics */
+ int nresets; /* Number of hw resets */
+ u_char reconfig_82586; /* Need to reconfigure the controler */
+ u_char promiscuous; /* Promiscuous mode */
+ int mc_count; /* Number of multicast addresses */
+ timer_list watchdog; /* To avoid blocking state */
+ u_short hacr; /* Current host interface state */
+
+ int tx_n_in_use;
+ u_short rx_head;
+ u_short rx_last;
+ u_short tx_first_free;
+ u_short tx_first_in_use;
+
+#ifdef WIRELESS_EXT
+ iw_stats wstats; /* Wireless specific stats */
+#endif
+
+#ifdef WIRELESS_SPY
+ int spy_number; /* Number of addresses to spy */
+ mac_addr spy_address[IW_MAX_SPY]; /* The addresses to spy */
+ iw_qual spy_stat[IW_MAX_SPY]; /* Statistics gathered */
+#endif /* WIRELESS_SPY */
+#ifdef HISTOGRAM
+ int his_number; /* Number of intervals */
+ u_char his_range[16]; /* Boundaries of interval ]n-1; n] */
+ u_long his_sum[16]; /* Sum in interval */
+#endif /* HISTOGRAM */
+};
+
+/**************************** PROTOTYPES ****************************/
+
+/* ----------------------- MISC SUBROUTINES ------------------------ */
+static inline unsigned long /* flags */
+ wv_splhi(void); /* Disable interrupts */
+static inline void
+ wv_splx(unsigned long); /* ReEnable interrupts : flags */
+static u_char
+ wv_irq_to_psa(int);
+static int
+ wv_psa_to_irq(u_char);
+/* ------------------- HOST ADAPTER SUBROUTINES ------------------- */
+static inline u_short /* data */
+ hasr_read(u_long); /* Read the host interface : base address */
+static inline void
+ hacr_write(u_long, /* Write to host interface : base address */
+ u_short), /* data */
+ hacr_write_slow(u_long,
+ u_short),
+ set_chan_attn(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_hacr_reset(u_long), /* ioaddr */
+ wv_16_off(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_16_on(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_ints_off(device *),
+ wv_ints_on(device *);
+/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
+static void
+ psa_read(u_long, /* Read the Parameter Storage Area */
+ u_short, /* hacr */
+ int, /* offset in PSA */
+ u_char *, /* buffer to fill */
+ int), /* size to read */
+ psa_write(u_long, /* Write to the PSA */
+ u_short, /* hacr */
+ int, /* Offset in psa */
+ u_char *, /* Buffer in memory */
+ int); /* Length of buffer */
+static inline void
+ mmc_out(u_long, /* Write 1 byte to the Modem Manag Control */
+ u_short,
+ u_char),
+ mmc_write(u_long, /* Write n bytes to the MMC */
+ u_char,
+ u_char *,
+ int);
+static inline u_char /* Read 1 byte from the MMC */
+ mmc_in(u_long,
+ u_short);
+static inline void
+ mmc_read(u_long, /* Read n bytes from the MMC */
+ u_char,
+ u_char *,
+ int),
+ fee_wait(u_long, /* Wait for frequency EEprom : base address */
+ int, /* Base delay to wait for */
+ int); /* Number of time to wait */
+static void
+ fee_read(u_long, /* Read the frequency EEprom : base address */
+ u_short, /* destination offset */
+ u_short *, /* data buffer */
+ int); /* number of registers */
+/* ---------------------- I82586 SUBROUTINES ----------------------- */
+static /*inline*/ void
+ obram_read(u_long, /* ioaddr */
+ u_short, /* o */
+ u_char *, /* b */
+ int); /* n */
+static inline void
+ obram_write(u_long, /* ioaddr */
+ u_short, /* o */
+ u_char *, /* b */
+ int); /* n */
+static void
+ wv_ack(device *);
+static inline int
+ wv_synchronous_cmd(device *,
+ const char *),
+ wv_config_complete(device *,
+ u_long,
+ net_local *);
+static int
+ wv_complete(device *,
+ u_long,
+ net_local *);
+static inline void
+ wv_82586_reconfig(device *);
+/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */
+#ifdef DEBUG_I82586_SHOW
+static void
+ wv_scb_show(unsigned short);
+#endif
+static inline void
+ wv_init_info(device *); /* display startup info */
+/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
+static en_stats *
+ wavelan_get_stats(device *); /* Give stats /proc/net/dev */
+static void
+ wavelan_set_multicast_list(device *);
+/* ----------------------- PACKET RECEPTION ----------------------- */
+static inline void
+ wv_packet_read(device *, /* Read a packet from a frame */
+ u_short,
+ int),
+ wv_receive(device *); /* Read all packets waiting */
+/* --------------------- PACKET TRANSMISSION --------------------- */
+static inline void
+ wv_packet_write(device *, /* Write a packet to the Tx buffer */
+ void *,
+ short);
+static int
+ wavelan_packet_xmit(struct sk_buff *, /* Send a packet */
+ device *);
+/* -------------------- HARDWARE CONFIGURATION -------------------- */
+static inline int
+ wv_mmc_init(device *), /* Initialize the modem */
+ wv_ru_start(device *), /* Start the i82586 receiver unit */
+ wv_cu_start(device *), /* Start the i82586 command unit */
+ wv_82586_start(device *); /* Start the i82586 */
+static void
+ wv_82586_config(device *); /* Configure the i82586 */
+static inline void
+ wv_82586_stop(device *);
+static int
+ wv_hw_reset(device *), /* Reset the wavelan hardware */
+ wv_check_ioaddr(u_long, /* ioaddr */
+ u_char *); /* mac address (read) */
+/* ---------------------- INTERRUPT HANDLING ---------------------- */
+static void
+ wavelan_interrupt(int, /* Interrupt handler */
+ void *,
+ struct pt_regs *);
+static void
+ wavelan_watchdog(u_long); /* Transmission watchdog */
+/* ------------------- CONFIGURATION CALLBACKS ------------------- */
+static int
+ wavelan_open(device *), /* Open the device */
+ wavelan_close(device *), /* Close the device */
+ wavelan_config(device *); /* Configure one device */
+extern int
+ wavelan_probe(device *); /* See Space.c */
+
+/**************************** VARIABLES ****************************/
+
+/*
+ * This is the root of the linked list of wavelan drivers
+ * It is use to verify that we don't reuse the same base address
+ * for two differents drivers and to make the cleanup when
+ * removing the module.
+ */
+static net_local * wavelan_list = (net_local *) NULL;
+
+/*
+ * This table is used to translate the psa value to irq number
+ * and vice versa...
+ */
+static u_char irqvals[] =
+{
+ 0, 0, 0, 0x01,
+ 0x02, 0x04, 0, 0x08,
+ 0, 0, 0x10, 0x20,
+ 0x40, 0, 0, 0x80,
+};
+
+/*
+ * Table of the available i/o address (base address) for wavelan
+ */
+static unsigned short iobase[] =
+{
+#if 0
+ /* Leave out 0x3C0 for now -- seems to clash with some video
+ * controllers.
+ * Leave out the others too -- we will always use 0x390 and leave
+ * 0x300 for the Ethernet device.
+ * Jean II : 0x3E0 is really fine as well...
+ */
+ 0x300, 0x390, 0x3E0, 0x3C0
+#endif /* 0 */
+ 0x390, 0x3E0
+};
+
+#ifdef MODULE
+/* Name of the devices (memory allocation) */
+static char devname[4][IFNAMSIZ] = { "", "", "", "" };
+
+/* Parameters set by insmod */
+static int io[4] = { 0, 0, 0, 0 };
+static int irq[4] = { 0, 0, 0, 0 };
+static char * name[4] = { devname[0], devname[1], devname[2], devname[3] };
+#endif /* MODULE */
+
+#endif /* WAVELAN_P_H */
diff --git a/linux/src/drivers/net/wd.c b/linux/src/drivers/net/wd.c
new file mode 100644
index 0000000..dd87902
--- /dev/null
+++ b/linux/src/drivers/net/wd.c
@@ -0,0 +1,513 @@
+/* wd.c: A WD80x3 ethernet driver for linux. */
+/*
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is a driver for WD8003 and WD8013 "compatible" ethercards.
+
+ Thanks to Russ Nelson (nelson@crnwyr.com) for loaning me a WD8013.
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users, support
+ for non-standard memory sizes.
+
+
+*/
+
+static const char *version =
+ "wd.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int wd_portlist[] =
+{0x300, 0x280, 0x380, 0x240, 0};
+
+int wd_probe(struct device *dev);
+int wd_probe1(struct device *dev, int ioaddr);
+
+static int wd_open(struct device *dev);
+static void wd_reset_8390(struct device *dev);
+static void wd_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void wd_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void wd_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static int wd_close_card(struct device *dev);
+
+
+#define WD_START_PG 0x00 /* First page of TX buffer */
+#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */
+#define WD13_STOP_PG 0x40 /* Last page +1 of RX ring */
+
+#define WD_CMDREG 0 /* Offset to ASIC command register. */
+#define WD_RESET 0x80 /* Board reset, in WD_CMDREG. */
+#define WD_MEMENB 0x40 /* Enable the shared memory. */
+#define WD_CMDREG5 5 /* Offset to 16-bit-only ASIC register 5. */
+#define ISA16 0x80 /* Enable 16 bit access from the ISA bus. */
+#define NIC16 0x40 /* Enable 16 bit access from the 8390. */
+#define WD_NIC_OFFSET 16 /* Offset to the 8390 from the base_addr. */
+#define WD_IO_EXTENT 32
+
+
+/* Probe for the WD8003 and WD8013. These cards have the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following. A Soundblaster can have the same checksum as an WDethercard,
+ so we have an extra exclusionary check for it.
+
+ The wd_probe1() routine initializes the card and fills the
+ station address field. */
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry wd_drv =
+{"wd", wd_probe1, WD_IO_EXTENT, wd_portlist};
+#else
+
+int wd_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return wd_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; wd_portlist[i]; i++) {
+ int ioaddr = wd_portlist[i];
+ if (check_region(ioaddr, WD_IO_EXTENT))
+ continue;
+ if (wd_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+int wd_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ int checksum = 0;
+ int ancient = 0; /* An old card without config registers. */
+ int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
+ const char *model_name;
+ static unsigned version_printed = 0;
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if (inb(ioaddr + 8) == 0xff /* Extra check to avoid soundcard. */
+ || inb(ioaddr + 9) == 0xff
+ || (checksum & 0xff) != 0xFF)
+ return ENODEV;
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("wd.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ /* Check for semi-valid mem_start/end values if supplied. */
+ if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
+ printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
+ dev->mem_start = 0;
+ dev->mem_end = 0;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("%s: WD80x3 at %#3x, ", dev->name, ioaddr);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* The following PureData probe code was contributed by
+ Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
+ configuration differently from others so we have to check for them.
+ This detects an 8 bit, 16 bit or dumb (Toshiba, jumpered) card.
+ */
+ if (inb(ioaddr+0) == 'P' && inb(ioaddr+1) == 'D') {
+ unsigned char reg5 = inb(ioaddr+5);
+
+ switch (inb(ioaddr+2)) {
+ case 0x03: word16 = 0; model_name = "PDI8023-8"; break;
+ case 0x05: word16 = 0; model_name = "PDUC8023"; break;
+ case 0x0a: word16 = 1; model_name = "PDI8023-16"; break;
+ /* Either 0x01 (dumb) or they've released a new version. */
+ default: word16 = 0; model_name = "PDI8023"; break;
+ }
+ dev->mem_start = ((reg5 & 0x1c) + 0xc0) << 12;
+ dev->irq = (reg5 & 0xe0) == 0xe0 ? 10 : (reg5 >> 5) + 1;
+ } else { /* End of PureData probe */
+ /* This method of checking for a 16-bit board is borrowed from the
+ we.c driver. A simpler method is just to look in ASIC reg. 0x03.
+ I'm comparing the two method in alpha test to make certain they
+ return the same result. */
+ /* Check for the old 8 bit board - it has register 0/8 aliasing.
+ Do NOT check i>=6 here -- it hangs the old 8003 boards! */
+ for (i = 0; i < 6; i++)
+ if (inb(ioaddr+i) != inb(ioaddr+8+i))
+ break;
+ if (i >= 6) {
+ ancient = 1;
+ model_name = "WD8003-old";
+ word16 = 0;
+ } else {
+ int tmp = inb(ioaddr+1); /* fiddle with 16bit bit */
+ outb( tmp ^ 0x01, ioaddr+1 ); /* attempt to clear 16bit bit */
+ if (((inb( ioaddr+1) & 0x01) == 0x01) /* A 16 bit card */
+ && (tmp & 0x01) == 0x01 ) { /* In a 16 slot. */
+ int asic_reg5 = inb(ioaddr+WD_CMDREG5);
+ /* Magic to set ASIC to word-wide mode. */
+ outb( NIC16 | (asic_reg5&0x1f), ioaddr+WD_CMDREG5);
+ outb(tmp, ioaddr+1);
+ model_name = "WD8013";
+ word16 = 1; /* We have a 16bit board here! */
+ } else {
+ model_name = "WD8003";
+ word16 = 0;
+ }
+ outb(tmp, ioaddr+1); /* Restore original reg1 value. */
+ }
+#ifndef final_version
+ if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
+ printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
+ word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
+#endif
+ }
+
+#if defined(WD_SHMEM) && WD_SHMEM > 0x80000
+ /* Allow a compile-time override. */
+ dev->mem_start = WD_SHMEM;
+#else
+ if (dev->mem_start == 0) {
+ /* Sanity and old 8003 check */
+ int reg0 = inb(ioaddr);
+ if (reg0 == 0xff || reg0 == 0) {
+ /* Future plan: this could check a few likely locations first. */
+ dev->mem_start = 0xd0000;
+ printk(" assigning address %#lx", dev->mem_start);
+ } else {
+ int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
+ /* Some boards don't have the register 5 -- it returns 0xff. */
+ if (high_addr_bits == 0x1f || word16 == 0)
+ high_addr_bits = 0x01;
+ dev->mem_start = ((reg0&0x3f) << 13) + (high_addr_bits << 19);
+ }
+ }
+#endif
+
+ /* The 8390 isn't at the base address -- the ASIC regs are there! */
+ dev->base_addr = ioaddr+WD_NIC_OFFSET;
+
+ if (dev->irq < 2) {
+ int irqmap[] = {9,3,5,7,10,11,15,4};
+ int reg1 = inb(ioaddr+1);
+ int reg4 = inb(ioaddr+4);
+ if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */
+ short nic_addr = ioaddr+WD_NIC_OFFSET;
+
+ /* We have an old-style ethercard that doesn't report its IRQ
+ line. Do autoirq to find the IRQ line. Note that this IS NOT
+ a reliable way to trigger an interrupt. */
+ outb_p(E8390_NODMA + E8390_STOP, nic_addr);
+ outb(0x00, nic_addr+EN0_IMR); /* Disable all intrs. */
+ autoirq_setup(0);
+ outb_p(0xff, nic_addr + EN0_IMR); /* Enable all interrupts. */
+ outb_p(0x00, nic_addr + EN0_RCNTLO);
+ outb_p(0x00, nic_addr + EN0_RCNTHI);
+ outb(E8390_RREAD+E8390_START, nic_addr); /* Trigger it... */
+ dev->irq = autoirq_report(2);
+ outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
+
+ if (ei_debug > 2)
+ printk(" autoirq is %d", dev->irq);
+ if (dev->irq < 2)
+ dev->irq = word16 ? 10 : 5;
+ } else
+ dev->irq = irqmap[((reg4 >> 5) & 0x03) + (reg1 & 0x04)];
+ } else if (dev->irq == 2) /* Fixup bogosity: IRQ2 is really IRQ9 */
+ dev->irq = 9;
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ if (request_irq(dev->irq, ei_interrupt, 0, model_name, NULL)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return EAGAIN;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ free_irq(dev->irq, NULL);
+ return -ENOMEM;
+ }
+
+ /* OK, were are certain this is going to work. Setup the device. */
+ request_region(ioaddr, WD_IO_EXTENT, model_name);
+
+ ei_status.name = model_name;
+ ei_status.word16 = word16;
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+
+ /* Don't map in the shared memory until the board is actually opened. */
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+
+ /* Some cards (eg WD8003EBT) can be jumpered for more (32k!) memory. */
+ if (dev->mem_end != 0) {
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ } else {
+ ei_status.stop_page = word16 ? WD13_STOP_PG : WD03_STOP_PG;
+ dev->mem_end = dev->mem_start + (ei_status.stop_page - WD_START_PG)*256;
+ }
+ dev->rmem_end = dev->mem_end;
+
+ printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
+ model_name, dev->irq, dev->mem_start, dev->mem_end-1);
+
+ ei_status.reset_8390 = &wd_reset_8390;
+ ei_status.block_input = &wd_block_input;
+ ei_status.block_output = &wd_block_output;
+ ei_status.get_8390_hdr = &wd_get_8390_hdr;
+ dev->open = &wd_open;
+ dev->stop = &wd_close_card;
+ NS8390_init(dev, 0);
+
+#if 1
+ /* Enable interrupt generation on softconfig cards -- M.U */
+ /* .. but possibly potentially unsafe - Donald */
+ if (inb(ioaddr+14) & 0x20)
+ outb(inb(ioaddr+4)|0x80, ioaddr+4);
+#endif
+
+ return 0;
+}
+
+static int
+wd_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ /* Map in the shared memory. Always set register 0 last to remain
+ compatible with very old boards. */
+ ei_status.reg0 = ((dev->mem_start>>13) & 0x3f) | WD_MEMENB;
+ ei_status.reg5 = ((dev->mem_start>>19) & 0x1f) | NIC16;
+
+ if (ei_status.word16)
+ outb(ei_status.reg5, ioaddr+WD_CMDREG5);
+ outb(ei_status.reg0, ioaddr); /* WD_CMDREG */
+
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static void
+wd_reset_8390(struct device *dev)
+{
+ int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ outb(WD_RESET, wd_cmd_port);
+ if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
+ ei_status.txing = 0;
+
+ /* Set up the ASIC registers, just in case something changed them. */
+ outb((((dev->mem_start>>13) & 0x3f)|WD_MEMENB), wd_cmd_port);
+ if (ei_status.word16)
+ outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+wd_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ unsigned long hdr_start = dev->mem_start + ((ring_page - WD_START_PG)<<8);
+
+ /* We'll always get a 4 byte header read followed by a packet read, so
+ we enable 16 bit mode before the header, and disable after the body. */
+ if (ei_status.word16)
+ outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, and trivial
+ on the Western digital card where there is no choice of how to do it.
+ The only complications are that the ring buffer wraps, and need to map
+ switch between 8- and 16-bit modes. */
+
+static void
+wd_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ unsigned long xfer_start = dev->mem_start + ring_offset - (WD_START_PG<<8);
+
+ if (xfer_start + count > dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = dev->rmem_end - xfer_start;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+
+ /* Turn off 16 bit access so that reboot works. ISA brain-damage */
+ if (ei_status.word16)
+ outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+}
+
+static void
+wd_block_output(struct device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ long shmem = dev->mem_start + ((start_page - WD_START_PG)<<8);
+
+
+ if (ei_status.word16) {
+ /* Turn on and off 16 bit access so that reboot works. */
+ outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+ memcpy_toio(shmem, buf, count);
+ outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+ } else
+ memcpy_toio(shmem, buf, count);
+}
+
+
+static int
+wd_close_card(struct device *dev)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+
+ /* Change from 16-bit to 8-bit shared memory so reboot works. */
+ if (ei_status.word16)
+ outb(ei_status.reg5, wd_cmdreg + WD_CMDREG5 );
+
+ /* And disable the shared memory. */
+ outb(ei_status.reg0 & ~WD_MEMENB, wd_cmdreg);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_WD_CARDS 4 /* Max number of wd cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_WD_CARDS] = { 0, };
+static struct device dev_wd[MAX_WD_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_WD_CARDS] = { 0, };
+static int irq[MAX_WD_CARDS] = { 0, };
+static int mem[MAX_WD_CARDS] = { 0, };
+static int mem_end[MAX_WD_CARDS] = { 0, }; /* for non std. mem size */
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
+ struct device *dev = &dev_wd[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ dev->mem_end = mem_end[this_dev];
+ dev->init = wd_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "wd.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "wd.c: No wd80x3 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
+ struct device *dev = &dev_wd[this_dev];
+ if (dev->priv != NULL) {
+ int ioaddr = dev->base_addr - WD_NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(ioaddr, WD_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c wd.c"
+ * version-control: t
+ * tab-width: 4
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/linux/src/drivers/net/winbond-840.c b/linux/src/drivers/net/winbond-840.c
new file mode 100644
index 0000000..556d8ad
--- /dev/null
+++ b/linux/src/drivers/net/winbond-840.c
@@ -0,0 +1,1558 @@
+/* winbond-840.c: A Linux network device driver for the Winbond W89c840. */
+/*
+ Written 1998-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/drivers.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+
+ Do not remove the copyright infomation.
+ Do not change the version information unless an improvement has been made.
+ Merely removing my name, as Compex has done in the past, does not count
+ as an improvement.
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"winbond-840.c:v1.10 7/22/2003 Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/drivers.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: winbond840_probe
+config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
+
+c-help-name: Winbond W89c840 PCI Ethernet support
+c-help-symbol: CONFIG_WINBOND_840
+c-help: The winbond-840.c driver is for the Winbond W89c840 chip.
+c-help: This chip is named TX9882 on the Compex RL100-ATX board.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/drivers.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The '840 uses a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability, however setting full_duplex[] is deprecated.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority, confuses the system network buffer limits,
+ and wastes memory.
+ Larger receive rings merely waste memory.
+*/
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
+#define RX_RING_SIZE 32
+
+/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
+ To avoid overflowing we don't queue again until we have room for a
+ full-size packet.
+ */
+#define TX_FIFO_SIZE (2048)
+#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung.
+ Re-autonegotiation may take up to 3 seconds.
+ */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Configure the PCI bus bursts and FIFO thresholds.
+ 486: Set 8 longword cache alignment, 8 longword burst.
+ 586: Set 16 longword cache alignment, no burst limit.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 <not allowed> 0000 align to cache 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list. */
+#define TX_DESC_SIZE 16
+#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
+static int csr0 = 0x00100000 | 0xE000 | TX_DESC_SIZE;
+#elif defined(__alpha__) || defined(__x86_64) || defined(__ia64)
+static int csr0 = 0xE000 | TX_DESC_SIZE;
+#elif defined(__i386__)
+static int csr0 = 0xE000 | TX_DESC_SIZE;
+#else
+static int csr0 = 0xE000 | TX_DESC_SIZE;
+#warning Processor architecture unknown!
+#endif
+
+
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex.");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Winbond w89c840 chip.
+
+II. Board-specific settings
+
+None.
+
+III. Driver operation
+
+This chip is very similar to the Digital 21*4* "Tulip" family. The first
+twelve registers and the descriptor format are nearly identical. Read a
+Tulip manual for operational details.
+
+A significant difference is that the multicast filter and station address are
+stored in registers rather than loaded through a pseudo-transmit packet.
+
+Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
+full-sized packet we must use both data buffers in a descriptor. Thus the
+driver uses ring mode where descriptors are implicitly sequential in memory,
+rather than using the second descriptor address as a chain pointer to
+subsequent descriptors.
+
+IV. Notes
+
+If you are going to almost clone a Tulip, why not go all the way and avoid
+the need for a new driver?
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+http://www.winbond.com.tw/
+
+IVc. Errata
+
+A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
+correctly detect a full FIFO, and queuing more than 2048 bytes may result in
+silent data corruption.
+
+*/
+
+
+
+/*
+ PCI probe table.
+*/
+static void *w840_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int winbond_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags {
+ CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
+#ifdef USE_IO_OPS
+#define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
+#else
+#define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Winbond W89c840", /* Sometime a Level-One switch card. */
+ { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
+ W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
+ {"Winbond W89c840", { 0x08401050, 0xffffffff, },
+ W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
+ {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
+ W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info winbond840_drv_id = {
+ "winbond-840", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ w840_probe1, winbond_pwr_event };
+
+/* This driver was written to use PCI memory space, however some x86 systems
+ work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
+ accesses instead of memory space. */
+
+#ifdef USE_IO_OPS
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+/* Offsets to the Command and Status Registers, "CSRs".
+ While similar to the Tulip, these registers are longword aligned.
+ Note: It's not useful to define symbolic names for every register bit in
+ the device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+*/
+enum w840_offsets {
+ PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
+ RxRingPtr=0x0C, TxRingPtr=0x10,
+ IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
+ RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
+ CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
+ MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
+ CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
+};
+
+/* Bits in the interrupt status/enable registers. */
+/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
+enum intr_status_bits {
+ NormalIntr=0x10000, AbnormalIntr=0x8000,
+ IntrPCIErr=0x2000, TimerInt=0x800,
+ IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
+ TxFIFOUnderflow=0x20, RxErrIntr=0x10,
+ TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
+};
+
+/* Bits in the NetworkConfig register. */
+enum rx_mode_bits {
+ TxOn=0x2000, RxOn=0x0002, FullDuplex=0x0200,
+ AcceptErr=0x80, AcceptRunt=0x40, /* Not used */
+ AcceptBroadcast=0x20, AcceptMulticast=0x10, AcceptAllPhys=0x08,
+};
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
+ MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
+};
+
+/* The Tulip-like Rx and Tx buffer descriptors. */
+struct w840_rx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1;
+ u32 next_desc;
+};
+
+struct w840_tx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1, buffer2; /* We use only buffer 1. */
+ char pad[TX_DESC_SIZE - 16];
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
+ DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
+ DescIntr=0x80000000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct w840_rx_desc rx_ring[RX_RING_SIZE];
+ struct w840_tx_desc tx_ring[TX_RING_SIZE];
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ int csr0, csr6;
+ unsigned int polling; /* Switched to polling mode. */
+ int max_interrupt_work;
+
+ struct w840_rx_desc *rx_head_desc;
+ unsigned int rx_ring_size;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int tx_ring_size;
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_q_bytes, tx_unq_bytes;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+
+ /* These values track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* Rx filter. */
+ u32 cur_rx_mode;
+ u32 rx_filter[2];
+ int multicast_filter_limit;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+static int eeprom_read(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static inline unsigned ether_crc(int length, unsigned char *data);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+static void *w840_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+#if LINUX_VERSION_CODE < 0x20155
+ printk(KERN_INFO "%s: %s at 0x%lx, %2.2x:%2.2x",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
+ pci_bus_number(pdev), pci_devfn(pdev)>>3);
+#else
+ printk(KERN_INFO "%s: %s at 0x%lx, %2.2x:%2.2x",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
+ pdev->bus->number, pdev->devfn>>3);
+#endif
+
+ /* Warning: validate for big-endian machines. */
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Out of memory is very unlikely. */
+ if (priv_mem == NULL)
+ return NULL;
+
+#ifdef USE_IO_OPS
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+#endif
+
+ /* Reset the chip to erase previous misconfiguration.
+ No hold time required! */
+ writel(0x00000001, ioaddr + PCIBusCfg);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ /* The descriptor lists must be aligned. */
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+ np->tx_ring_size = TX_RING_SIZE;
+ np->rx_ring_size = RX_RING_SIZE;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ if ((card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ || (np->drv_flags & AlwaysFDX))
+ np->full_duplex = 1;
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ if (phy_idx == 0) {
+ printk(KERN_WARNING "%s: MII PHY not found -- this device may "
+ "not operate correctly.\n"
+ KERN_WARNING "%s: If this is a switch card, explicitly "
+ "force full duplex on this interface.\n",
+ dev->name, dev->name);
+ if (np->drv_flags & FDXOnNoMII) {
+ printk(KERN_INFO "%s: Assuming a switch card, forcing full "
+ "duplex.\n", dev->name);
+ np->full_duplex = np->duplex_lock = 1;
+ }
+ }
+ }
+ /* Allow forcing the media type. */
+ if (np->full_duplex) {
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ if (np->mii_cnt)
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ return dev;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+ The Winbond NIC uses serial bit streams generated by the host processor. */
+
+/* Delay between EEPROM clock transitions.
+ This "delay" is to force out buffered PCI writes. */
+#define eeprom_delay(ee_addr) readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
+ EE_ChipSelect=0x801, EE_DataIn=0x08,
+};
+
+/* The EEPROM commands always start with 01.. preamble bits.
+ Commands are prepended to the variable-length address. */
+enum EEPROM_Cmds {
+ EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
+};
+
+static int eeprom_read(long addr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = addr + EECtrl;
+ int read_cmd = location | EE_ReadCmd;
+
+ writel(EE_ChipSelect, ee_addr);
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ writel(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ writel(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(0, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz.
+ The timing is decoupled from the processor clock by flushing the write
+ from the CPU write buffer with a following read, and using PCI
+ transaction time. */
+#define mdio_in(mdio_addr) readl(mdio_addr)
+#define mdio_out(value, mdio_addr) writel(value, mdio_addr)
+#define mdio_delay(mdio_addr) readl(mdio_addr)
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with older tranceivers, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 1;
+
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ mdio_out(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long mdio_addr = dev->base_addr + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 20; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_DataIn) ? 1 : 0);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int reg, int value)
+{
+ long mdio_addr = dev->base_addr + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (reg<<18) | value;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers. */
+ np->csr0 = csr0;
+ writel(np->csr0, ioaddr + PCIBusCfg);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ writel(0, ioaddr + RxStartDemand);
+ np->csr6 = np->full_duplex ? 0x20022202 : 0x20022002;
+ check_duplex(dev);
+ set_rx_mode(dev);
+
+ netif_start_tx_queue(dev);
+
+ /* Clear and Enable interrupts by setting the interrupt mask.
+ See enum intr_status_bits above for bit guide.
+ We omit: TimerInt, IntrRxDied, IntrTxStopped
+ */
+ writel(0x1A0F5, ioaddr + IntrStatus);
+ writel(0x1A0F5, ioaddr + IntrEnable);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
+ int duplex;
+
+ if (np->duplex_lock || mii_reg5 == 0xffff)
+ return;
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
+ "negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ np->csr6 &= ~0x200;
+ np->csr6 |= duplex ? 0x200 : 0;
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+ int old_csr6 = np->csr6;
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
+ "config %8.8x.\n",
+ dev->name, intr_status, (int)readl(ioaddr + NetworkConfig));
+ /* Check for blocked interrupts. */
+ if (np->polling) {
+ if (intr_status & 0x1ffff) {
+ intr_handler(dev->irq, dev, 0);
+ next_tick = 1;
+ np->polling = 1;
+ } else if (++np->polling > 10*HZ)
+ np->polling = 0;
+ else
+ next_tick = 2;
+ } else if ((intr_status & 0x1ffff)) {
+ np->polling = 1;
+ }
+
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ tx_timeout(dev);
+ }
+ check_duplex(dev);
+ if (np->csr6 != old_csr6) {
+ writel(np->csr6 & ~0x0002, ioaddr + NetworkConfig);
+ writel(np->csr6 | 0x2002, ioaddr + NetworkConfig);
+ }
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+
+#ifndef __alpha__
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < np->rx_ring_size; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < np->tx_ring_size; i++)
+ printk(" %8.8x", np->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Perhaps we should reinitialize the hardware here. Just trigger a
+ Tx demand for now. */
+ writel(0, ioaddr + TxStartDemand);
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_tx = np->dirty_tx = 0;
+ np->tx_q_bytes = np->tx_unq_bytes = 0;
+
+ np->cur_rx = np->dirty_rx = 0;
+ np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ : dev->mtu + 14);
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < np->rx_ring_size; i++) {
+ np->rx_ring[i].length = np->rx_buf_sz;
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].length |= DescEndRing;
+ np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < np->rx_ring_size; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[i].buffer1 = virt_to_bus(skb->tail);
+ np->rx_ring[i].status = DescOwn | DescIntr;
+ }
+ np->dirty_rx = (unsigned int)(i - np->rx_ring_size);
+
+ for (i = 0; i < np->tx_ring_size; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].status = 0;
+ }
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % np->tx_ring_size;
+
+ np->tx_skbuff[entry] = skb;
+ np->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
+
+#define one_buffer
+#define BPT 1022
+#if defined(one_buffer)
+ np->tx_ring[entry].length = DescWholePkt | skb->len;
+ if (entry >= np->tx_ring_size-1) /* Wrap ring */
+ np->tx_ring[entry].length |= DescIntr | DescEndRing;
+ np->tx_ring[entry].status = DescOwn;
+ np->cur_tx++;
+#elif defined(two_buffer)
+ if (skb->len > BPT) {
+ unsigned int entry1 = ++np->cur_tx % np->tx_ring_size;
+ np->tx_ring[entry].length = DescStartPkt | BPT;
+ np->tx_ring[entry1].length = DescEndPkt | (skb->len - BPT);
+ np->tx_ring[entry1].buffer1 = virt_to_bus((skb->data) + BPT);
+ np->tx_ring[entry1].status = DescOwn;
+ np->tx_ring[entry].status = DescOwn;
+ if (entry >= np->tx_ring_size-1)
+ np->tx_ring[entry].length |= DescIntr|DescEndRing;
+ else if (entry1 >= np->tx_ring_size-1)
+ np->tx_ring[entry1].length |= DescIntr|DescEndRing;
+ np->cur_tx++;
+ } else {
+ np->tx_ring[entry].length = DescWholePkt | skb->len;
+ if (entry >= np->tx_ring_size-1) /* Wrap ring */
+ np->tx_ring[entry].length |= DescIntr | DescEndRing;
+ np->tx_ring[entry].status = DescOwn;
+ np->cur_tx++;
+ }
+#elif defined(split_buffer)
+ {
+ /* Work around the Tx-FIFO-full bug by splitting our transmit packet
+ into two pieces, the first which may be loaded without overflowing
+ the FIFO, and the second which contains the remainder of the
+ packet. When we get a Tx-done interrupt that frees enough room
+ in the FIFO we mark the remainder of the packet as loadable.
+
+ This has the problem that the Tx descriptors are written both
+ here and in the interrupt handler.
+ */
+
+ int buf1size = TX_FIFO_SIZE - (np->tx_q_bytes - np->tx_unq_bytes);
+ int buf2size = skb->len - buf1size;
+
+ if (buf2size <= 0) { /* We fit into one descriptor. */
+ np->tx_ring[entry].length = DescWholePkt | skb->len;
+ } else { /* We must use two descriptors. */
+ unsigned int entry2;
+ np->tx_ring[entry].length = DescIntr | DescStartPkt | buf1size;
+ if (entry >= np->tx_ring_size-1) { /* Wrap ring */
+ np->tx_ring[entry].length |= DescEndRing;
+ entry2 = 0;
+ } else
+ entry2 = entry + 1;
+ np->cur_tx++;
+ np->tx_ring[entry2].buffer1 =
+ virt_to_bus(skb->data + buf1size);
+ np->tx_ring[entry2].length = DescEndPkt | buf2size;
+ if (entry2 >= np->tx_ring_size-1) /* Wrap ring */
+ np->tx_ring[entry2].length |= DescEndRing;
+ }
+ np->tx_ring[entry].status = DescOwn;
+ np->cur_tx++;
+ }
+#endif
+ np->tx_q_bytes += skb->len;
+ writel(0, dev->base_addr + TxStartDemand);
+
+ /* Work around horrible bug in the chip by marking the queue as full
+ when we do not have FIFO room for a maximum sized packet. */
+ if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN) {
+ np->tx_full = 1;
+ netif_stop_tx_queue(dev);
+ } else if ((np->drv_flags & HasBrokenTx)
+ && np->tx_q_bytes - np->tx_unq_bytes > TX_BUG_FIFO_LIMIT) {
+ np->tx_full = 1;
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int work_limit = np->max_interrupt_work;
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ writel(intr_status & 0x0001ffff, ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if ((intr_status & (NormalIntr|AbnormalIntr)) == 0
+ || intr_status == 0xffffffff)
+ break;
+
+ if (intr_status & (IntrRxDone | RxNoBuf))
+ netdev_rx(dev);
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % np->tx_ring_size;
+ int tx_status = np->tx_ring[entry].status;
+
+ if (tx_status < 0)
+ break;
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x8000) { /* There was an error, log it. */
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ np->stats.tx_errors++;
+ if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
+ if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
+ if (tx_status & 0x0200) np->stats.tx_window_errors++;
+ if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
+ if ((tx_status & 0x0080) && np->full_duplex == 0)
+ np->stats.tx_heartbeat_errors++;
+#ifdef ETHER_STATS
+ if (tx_status & 0x0100) np->stats.collisions16++;
+#endif
+ } else {
+#ifdef ETHER_STATS
+ if (tx_status & 0x0001) np->stats.tx_deferred++;
+#endif
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+ np->stats.collisions += (tx_status >> 3) & 15;
+ np->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ np->tx_unq_bytes += np->tx_skbuff[entry]->len;
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ if (np->tx_full &&
+ np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4
+ && np->tx_q_bytes - np->tx_unq_bytes < TX_BUG_FIFO_LIMIT) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
+ TimerInt | IntrTxStopped))
+ netdev_error(dev, intr_status);
+
+ if (--work_limit < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n", dev->name, intr_status);
+ /* Set the timer to re-enable the other interrupts after
+ 10*82usec ticks. */
+ writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
+ writel(10, ioaddr + GPTimer);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % np->rx_ring_size;
+ int work_limit = np->dirty_rx + np->rx_ring_size - np->cur_rx;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+ entry, np->rx_ring[entry].status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (--work_limit >= 0) {
+ struct w840_rx_desc *desc = np->rx_head_desc;
+ s32 status = desc->status;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ status);
+ if (status < 0)
+ break;
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x status %4.4x!\n",
+ dev->name, np->cur_rx, status);
+ np->stats.rx_length_errors++;
+ }
+ } else if (status & 0x8000) {
+ /* There was a fatal error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ np->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) np->stats.rx_length_errors++;
+ if (status & 0x004C) np->stats.rx_frame_errors++;
+ if (status & 0x0002) np->stats.rx_crc_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+ /* Omit the four octet CRC from the length. */
+ int pkt_len = ((status >> 16) & 0x7ff) - 4;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " status %x.\n", pkt_len, status);
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ /* Call copy + cksum if available. */
+#if HAS_IP_COPYSUM
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+#ifndef final_version /* Remove after testing. */
+ if (bus_to_virt(desc->buffer1) != temp)
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in netdev_rx: %p vs. %p / %p.\n",
+ dev->name, bus_to_virt(desc->buffer1),
+ skb->head, temp);
+#endif
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++np->cur_rx) % np->rx_ring_size;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % np->rx_ring_size;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].buffer1 = virt_to_bus(skb->tail);
+ }
+ np->rx_ring[entry].status = DescOwn;
+ }
+
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ if (np->msg_level & NETIF_MSG_MISC)
+ printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
+ dev->name, intr_status);
+ if (intr_status == 0xffffffff)
+ return;
+ if (intr_status & TxFIFOUnderflow) {
+ np->csr6 += 0x4000; /* Bump up the Tx threshold */
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Tx underflow, increasing threshold to "
+ "%8.8x.\n", dev->name, np->csr6);
+ writel(np->csr6, ioaddr + NetworkConfig);
+ }
+ if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
+ np->stats.rx_errors++;
+ }
+ if (intr_status & TimerInt) {
+ /* Re-enable other interrupts. */
+ writel(0x1A0F5, ioaddr + IntrEnable);
+ }
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+ writel(0, ioaddr + RxStartDemand);
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ /* The chip only need report frame silently dropped. */
+ if (netif_running(dev))
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+
+ return &np->stats;
+}
+
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, ~0, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F,
+ mc_filter);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast;
+ }
+ writel(mc_filter[0], ioaddr + MulticastFilter0);
+ writel(mc_filter[1], ioaddr + MulticastFilter1);
+ np->csr6 &= ~0x00F8;
+ np->csr6 |= rx_mode;
+ writel(np->csr6, ioaddr + NetworkConfig);
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+static void empty_rings(struct net_device *dev)
+{
+ struct netdev_private *np = (void *)dev->priv;
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < np->rx_ring_size; i++) {
+ np->rx_ring[i].status = 0;
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < np->tx_ring_size; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
+ "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus),
+ (int)readl(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(np->csr6 &= ~0x20FA, ioaddr + NetworkConfig);
+
+ del_timer(&np->timer);
+ if (readl(ioaddr + NetworkConfig) != 0xffffffff)
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ int i;
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < np->tx_ring_size; i++)
+ printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
+ i, np->tx_ring[i].length,
+ np->tx_ring[i].status, np->tx_ring[i].buffer1);
+ printk(KERN_DEBUG "\n" KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < np->rx_ring_size; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].length,
+ np->rx_ring[i].status, np->rx_ring[i].buffer1);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+ empty_rings(dev);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int winbond_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND: {
+ int csr6 = readl(ioaddr + NetworkConfig);
+ /* Disable interrupts, stop the chip, gather stats. */
+ if (csr6 != 0xffffffff) {
+ int csr8 = readl(ioaddr + RxMissed);
+ writel(0x00000000, ioaddr + IntrEnable);
+ writel(csr6 & ~TxOn & ~RxOn, ioaddr + NetworkConfig);
+ np->stats.rx_missed_errors += (unsigned short)csr8;
+ }
+ empty_rings(dev);
+ break;
+ }
+ case DRV_RESUME:
+ writel(np->csr0, ioaddr + PCIBusCfg);
+ init_ring(dev);
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+ writel(0x1A0F5, ioaddr + IntrStatus);
+ writel(0x1A0F5, ioaddr + IntrEnable);
+ writel(np->csr6 | TxOn | RxOn, ioaddr + NetworkConfig);
+ writel(0, ioaddr + RxStartDemand); /* Rx poll demand */
+ set_rx_mode(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ printk(KERN_ERR "%s: Winbond-840 NIC removed while still "
+ "active.\n", dev->name);
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&winbond840_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&winbond840_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+#ifdef USE_IO_OPS
+ release_region(root_net_dev->base_addr,
+ pci_id_tbl[np->chip_id].io_size);
+#else
+ iounmap((char *)(root_net_dev->base_addr));
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+#else
+int winbond840_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&winbond840_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` winbond-840.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c winbond-840.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/yellowfin.c b/linux/src/drivers/net/yellowfin.c
new file mode 100644
index 0000000..9d7ace8
--- /dev/null
+++ b/linux/src/drivers/net/yellowfin.c
@@ -0,0 +1,1482 @@
+/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
+/*
+ Written 1997-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
+ It also supports the Symbios Logic version of the same chip core.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/yellowfin.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"yellowfin.c:v1.10 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/yellowfin.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ Typical is a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 64;
+
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+/* System-wide count of bogus-rx frames. */
+static int bogus_rx = 0;
+static int dma_ctrl = 0x004A0263; /* Constrained by errata */
+static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
+#elif YF_NEW /* A future perfect board :->. */
+static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
+static int fifo_cfg = 0x0028;
+#else
+static int dma_ctrl = 0x004A0263; /* Constrained by errata */
+static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
+#endif
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ No media types are currently defined. These options exist only for
+ compatibility with other drivers.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Do ugly workaround for GX server chipset errata. */
+static int gx_fix = 0;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority, confuses the system network buffer limits,
+ and wastes memory.
+ Too-large receive rings waste memory and confound network buffer limits.
+*/
+#define TX_RING_SIZE 16
+#define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
+#define RX_RING_SIZE 64
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/unaligned.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(gx_fix, "i");
+MODULE_PARM_DESC(debug, "Driver message level enable (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to force full duplex, non-negotiated link "
+ "(deprecated).");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+MODULE_PARM_DESC(gx_fix, "Set to work around old GX chipset errata");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Packet Engines "Yellowfin" Gigabit
+Ethernet adapter. The only PCA currently supported is the G-NIC 64-bit
+PCI card.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS preferably should assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
+This is a descriptor list scheme similar to that used by the EEPro100 and
+Tulip. This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the Yellowfin as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack and replaced by a newly allocated skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. For small frames the copying cost is negligible (esp. considering
+that we are pre-loading the cache with immediately useful header
+information). For large frames the copying cost is non-trivial, and the
+larger copy might flush the cache of useful data.
+
+IIIC. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'yp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
+Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
+and an AlphaStation to verifty the Alpha port!
+
+IVb. References
+
+Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
+Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
+ Data Manual v3.0
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
+
+IVc. Errata
+
+See Packet Engines confidential appendix (prototype chips only).
+*/
+
+
+
+static void *yellowfin_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int fnd_cnt);
+enum capability_flags {
+ HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
+ HasMACAddrBug=32, /* Only on early revs. */
+};
+/* The PCI I/O space extent. */
+#define YELLOWFIN_SIZE 0x100
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
+ PCI_IOTYPE, YELLOWFIN_SIZE,
+ FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug},
+ {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
+ PCI_IOTYPE, YELLOWFIN_SIZE, HasMII },
+ {0,},
+};
+
+struct drv_id_info yellowfin_drv_id = {
+ "yellowfin", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ yellowfin_probe1, };
+
+/* Offsets to the Yellowfin registers. Various sizes and alignments. */
+enum yellowfin_offsets {
+ TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
+ TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
+ RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
+ RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
+ EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
+ ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
+ Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
+ MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
+ MII_Status=0xAE,
+ RxDepth=0xB8, FlowCtrl=0xBC,
+ AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
+ EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
+ EEFeature=0xF5,
+};
+
+/* The Yellowfin Rx and Tx buffer descriptors.
+ Elements are written as 32 bit for endian portability. */
+struct yellowfin_desc {
+ u32 dbdma_cmd;
+ u32 addr;
+ u32 branch_addr;
+ u32 result_status;
+};
+
+struct tx_status_words {
+#if defined(__powerpc__)
+ u16 tx_errs;
+ u16 tx_cnt;
+ u16 paused;
+ u16 total_tx_cnt;
+#else /* Little endian chips. */
+ u16 tx_cnt;
+ u16 tx_errs;
+ u16 total_tx_cnt;
+ u16 paused;
+#endif
+};
+
+/* Bits in yellowfin_desc.cmd */
+enum desc_cmd_bits {
+ CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
+ CMD_NOP=0x60000000, CMD_STOP=0x70000000,
+ BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
+ BRANCH_IFTRUE=0x040000,
+};
+
+/* Bits in yellowfin_desc.status */
+enum desc_status_bits { RX_EOP=0x0040, };
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
+ IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
+ IntrEarlyRx=0x100, IntrWakeup=0x200, };
+
+#define PRIV_ALIGN 31 /* Required alignment mask */
+struct yellowfin_private {
+ /* Descriptor rings first for alignment.
+ Tx requires a second descriptor for status. */
+ struct yellowfin_desc rx_ring[RX_RING_SIZE];
+ struct yellowfin_desc tx_ring[TX_RING_SIZE*2];
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct tx_status_words tx_status[TX_RING_SIZE];
+ struct timer_list timer; /* Media selection timer. */
+ struct net_device_stats stats;
+ /* Frequently used and paired value: keep adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ long in_interrupt;
+ int max_interrupt_work;
+
+ struct yellowfin_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ struct tx_status_words *tx_tail_desc;
+ unsigned int cur_tx, dirty_tx;
+ int tx_threshold;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+ /* Rx multicast filter. */
+ u16 mc_filter[4];
+ int rx_mode;
+ int multicast_filter_limit;
+};
+
+static int read_eeprom(long ioaddr, int location);
+static int mdio_read(long ioaddr, int phy_id, int location);
+static void mdio_write(long ioaddr, int phy_id, int location, int value);
+#ifdef HAVE_PRIVATE_IOCTL
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#endif
+static int yellowfin_open(struct net_device *dev);
+static void yellowfin_timer(unsigned long data);
+static void yellowfin_tx_timeout(struct net_device *dev);
+static void yellowfin_init_ring(struct net_device *dev);
+static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int yellowfin_rx(struct net_device *dev);
+static void yellowfin_error(struct net_device *dev, int intr_status);
+static int yellowfin_close(struct net_device *dev);
+static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+
+
+
+/* A list of installed Yellowfin devices, for removing the driver module. */
+static struct net_device *root_yellowfin_dev = NULL;
+
+#ifndef MODULE
+int yellowfin_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&yellowfin_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *yellowfin_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt)
+{
+ struct net_device *dev;
+ struct yellowfin_private *np;
+ void *priv_mem;
+ int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ int drv_flags = pci_id_tbl[chip_idx].drv_flags;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, (int)inl(ioaddr + ChipRev),
+ ioaddr);
+
+ if (drv_flags & IsGigabit)
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
+ else {
+ int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
+ }
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Reset the chip. */
+ outl(0x80000000, ioaddr + DMACtrl);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* We do a request_region() only to register /proc/ioports info. */
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_yellowfin_dev;
+ root_yellowfin_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 15;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+ if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex)
+ np->duplex_lock = 1;
+
+ /* The Yellowfin-specific entries in the device structure. */
+ dev->open = &yellowfin_open;
+ dev->hard_start_xmit = &yellowfin_start_xmit;
+ dev->stop = &yellowfin_close;
+ dev->get_stats = &yellowfin_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ if (np->drv_flags & HasMII) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(ioaddr, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(ioaddr, phy, 4);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+
+ return dev;
+}
+
+static int read_eeprom(long ioaddr, int location)
+{
+ int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
+
+ outb(location, ioaddr + EEAddr);
+ outb(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
+ while ((inb(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
+ ;
+ return inb(ioaddr + EERead);
+}
+
+/* MII Managemen Data I/O accesses.
+ These routines assume the MDIO controller is idle, and do not exit until
+ the command is finished. */
+
+static int mdio_read(long ioaddr, int phy_id, int location)
+{
+ int i;
+
+ outw((phy_id<<8) + location, ioaddr + MII_Addr);
+ outw(1, ioaddr + MII_Cmd);
+ for (i = 10000; i >= 0; i--)
+ if ((inw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return inw(ioaddr + MII_Rd_Data);
+}
+
+static void mdio_write(long ioaddr, int phy_id, int location, int value)
+{
+ int i;
+
+ outw((phy_id<<8) + location, ioaddr + MII_Addr);
+ outw(value, ioaddr + MII_Wr_Data);
+
+ /* Wait for the command to finish. */
+ for (i = 10000; i >= 0; i--)
+ if ((inw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return;
+}
+
+
+static int yellowfin_open(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Reset the chip. */
+ outl(0x80000000, ioaddr + DMACtrl);
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name,
+ dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (yp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ yellowfin_init_ring(dev);
+
+ outl(virt_to_bus(yp->rx_ring), ioaddr + RxPtr);
+ outl(virt_to_bus(yp->tx_ring), ioaddr + TxPtr);
+
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + StnAddr + i);
+
+ /* Set up various condition 'select' registers.
+ There are no options here. */
+ outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
+ outl(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
+ outl(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
+ outl(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
+ outl(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
+ outl(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
+
+ /* Initialize other registers: with so many this eventually this will
+ converted to an offset/value list. */
+ outl(dma_ctrl, ioaddr + DMACtrl);
+ outw(fifo_cfg, ioaddr + FIFOcfg);
+ /* Enable automatic generation of flow control frames, period 0xffff. */
+ outl(0x0030FFFF, ioaddr + FlowCtrl);
+
+ yp->tx_threshold = 32;
+ outl(yp->tx_threshold, ioaddr + TxThreshold);
+
+ if (dev->if_port == 0)
+ dev->if_port = yp->default_port;
+
+ yp->in_interrupt = 0;
+
+ /* Setting the Rx mode will start the Rx process. */
+ if (yp->drv_flags & IsGigabit) {
+ /* We are always in full-duplex mode with gigabit! */
+ yp->full_duplex = 1;
+ outw(0x01CF, ioaddr + Cnfg);
+ } else {
+ outw(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
+ outw(0x1018, ioaddr + FrameGap1);
+ outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
+ }
+ yp->rx_mode = 0;
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
+ outw(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
+ outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
+ outl(0x80008000, ioaddr + TxCtrl);
+
+ if (yp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
+ dev->name);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&yp->timer);
+ yp->timer.expires = jiffies + 3*HZ;
+ yp->timer.data = (unsigned long)dev;
+ yp->timer.function = &yellowfin_timer; /* timer handler */
+ add_timer(&yp->timer);
+
+ return 0;
+}
+
+static void yellowfin_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+ if (yp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+ if (jiffies - dev->trans_start > TX_TIMEOUT
+ && yp->cur_tx - yp->dirty_tx > 1
+ && netif_queue_paused(dev))
+ yellowfin_tx_timeout(dev);
+
+ if (yp->mii_cnt) {
+ int mii_reg1 = mdio_read(ioaddr, yp->phys[0], 1);
+ int mii_reg5 = mdio_read(ioaddr, yp->phys[0], 5);
+ int negotiated = mii_reg5 & yp->advertising;
+ if (yp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
+ "link partner capability %4.4x.\n",
+ dev->name, yp->phys[0], mii_reg1, mii_reg5);
+
+ if ( ! yp->duplex_lock &&
+ ((negotiated & 0x0300) == 0x0100
+ || (negotiated & 0x00C0) == 0x0040)) {
+ yp->full_duplex = 1;
+ }
+ outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
+
+ if (mii_reg1 & 0x0004)
+ next_tick = 60*HZ;
+ else
+ next_tick = 3*HZ;
+ }
+
+ yp->timer.expires = jiffies + next_tick;
+ add_timer(&yp->timer);
+}
+
+static void yellowfin_tx_timeout(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
+ "status %4.4x, Rx status %4.4x, resetting...\n",
+ dev->name, yp->cur_tx, yp->dirty_tx,
+ (int)inl(ioaddr + TxStatus), (int)inl(ioaddr + RxStatus));
+
+ /* Note: these should be KERN_DEBUG. */
+ if (yp->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", yp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", yp->rx_ring[i].result_status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", yp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
+ yp->tx_ring[i].result_status);
+ printk("\n");
+ }
+
+ /* If the hardware is found to hang regularly, we will update the code
+ to reinitialize the chip here. */
+ dev->if_port = 0;
+
+ /* Wake the potentially-idle transmit channel. */
+ outl(0x10001000, dev->base_addr + TxCtrl);
+ if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
+ netif_unpause_tx_queue(dev);
+
+ dev->trans_start = jiffies;
+ yp->stats.tx_errors++;
+ return;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void yellowfin_init_ring(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int i;
+
+ yp->tx_full = 0;
+ yp->cur_rx = yp->cur_tx = 0;
+ yp->dirty_tx = 0;
+
+ yp->rx_buf_sz = dev->mtu + 18 + 15;
+ /* Match other driver's allocation size when possible. */
+ if (yp->rx_buf_sz < PKT_BUF_SZ)
+ yp->rx_buf_sz = PKT_BUF_SZ;
+ yp->rx_head_desc = &yp->rx_ring[0];
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ yp->rx_ring[i].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
+ yp->rx_ring[i].branch_addr = virt_to_le32desc(&yp->rx_ring[i+1]);
+ }
+ /* Mark the last entry as wrapping the ring. */
+ yp->rx_ring[i-1].branch_addr = virt_to_le32desc(&yp->rx_ring[0]);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
+ yp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ yp->rx_ring[i].addr = virt_to_le32desc(skb->tail);
+ }
+ yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+#define NO_TXSTATS
+#ifdef NO_TXSTATS
+ /* In this mode the Tx ring needs only a single descriptor. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ yp->tx_skbuff[i] = 0;
+ yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
+ }
+ /* Wrap ring */
+ yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
+ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);
+#else
+ /* Tx ring needs a pair of descriptors, the second for the status. */
+ for (i = 0; i < TX_RING_SIZE*2; i++) {
+ yp->tx_skbuff[i/2] = 0;
+ /* Branch on Tx error. */
+ yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
+ i++;
+ if (yp->flags & FullTxStatus) {
+ yp->tx_ring[i].dbdma_cmd =
+ cpu_to_le32(CMD_TXSTATUS | sizeof(yp->tx_status[i]));
+ yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]);
+ yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2]);
+ } else { /* Symbios chips write only tx_errs word. */
+ yp->tx_ring[i].dbdma_cmd =
+ cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
+ yp->tx_ring[i].request_cnt = 2;
+ yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2].tx_errs);
+ }
+ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
+ }
+ /* Wrap ring */
+ yp->tx_ring[--i].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
+ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);
+#endif
+ yp->tx_tail_desc = &yp->tx_status[0];
+ return;
+}
+
+static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ unsigned entry;
+
+#if LINUX_VERSION_CODE < 0x20323
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ yellowfin_tx_timeout(dev);
+ return 1;
+ }
+#endif
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = yp->cur_tx % TX_RING_SIZE;
+
+ yp->tx_skbuff[entry] = skb;
+
+ if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
+ int cacheline_end = (virt_to_bus(skb->data) + skb->len) % 32;
+ /* Fix GX chipset errata. */
+ if (cacheline_end > 24 || cacheline_end == 0)
+ skb->len += 32 - cacheline_end + 1;
+ }
+#ifdef NO_TXSTATS
+ yp->tx_ring[entry].addr = virt_to_le32desc(skb->data);
+ yp->tx_ring[entry].result_status = 0;
+ if (entry >= TX_RING_SIZE-1) {
+ /* New stop command. */
+ yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
+ cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | skb->len);
+ } else {
+ yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[entry].dbdma_cmd =
+ cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | skb->len);
+ }
+ yp->cur_tx++;
+#else
+ yp->tx_ring[entry<<1].request_cnt = skb->len;
+ yp->tx_ring[entry<<1].addr = virt_to_le32desc(skb->data);
+ /* The input_last (status-write) command is constant, but we must rewrite
+ the subsequent 'stop' command. */
+
+ yp->cur_tx++;
+ {
+ unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
+ yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ }
+ /* Final step -- overwrite the old 'stop' command. */
+
+ yp->tx_ring[entry<<1].dbdma_cmd =
+ cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
+ CMD_TX_PKT | BRANCH_IFTRUE) | skb->len);
+#endif
+
+ /* Non-x86 Todo: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ outl(0x10001000, dev->base_addr + TxCtrl);
+
+ if (yp->cur_tx - yp->dirty_tx >= TX_QUEUE_SIZE) {
+ netif_stop_tx_queue(dev);
+ yp->tx_full = 1;
+ if (yp->cur_tx - (volatile int)yp->dirty_tx < TX_QUEUE_SIZE) {
+ netif_unpause_tx_queue(dev);
+ yp->tx_full = 0;
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ dev->trans_start = jiffies;
+
+ if (yp->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
+ dev->name, yp->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct yellowfin_private *yp;
+ long ioaddr;
+ int boguscnt = max_interrupt_work;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ yp = (struct yellowfin_private *)dev->priv;
+ if (test_and_set_bit(0, (void*)&yp->in_interrupt)) {
+ printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+ return;
+ }
+
+ do {
+ u16 intr_status = inw(ioaddr + IntrClear);
+
+ if (yp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ if (intr_status & (IntrRxDone | IntrEarlyRx)) {
+ yellowfin_rx(dev);
+ outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
+ }
+
+#ifdef NO_TXSTATS
+ for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
+ int entry = yp->dirty_tx % TX_RING_SIZE;
+ if (yp->tx_ring[entry].result_status == 0)
+ break;
+ yp->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ yp->stats.tx_bytes += yp->tx_skbuff[entry]->len;
+#endif
+ /* Free the original skb. */
+ dev_free_skb_irq(yp->tx_skbuff[entry]);
+ yp->tx_skbuff[entry] = 0;
+ }
+ if (yp->tx_full
+ && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
+ /* The ring is no longer full, clear tbusy. */
+ yp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+#else
+ if (intr_status & IntrTxDone
+ || yp->tx_tail_desc->tx_errs) {
+ unsigned dirty_tx = yp->dirty_tx;
+
+ for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ /* Todo: optimize this. */
+ int entry = dirty_tx % TX_RING_SIZE;
+ u16 tx_errs = yp->tx_status[entry].tx_errs;
+
+#ifndef final_version
+ if (yp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
+ "%4.4x %4.4x %4.4x %4.4x.\n",
+ dev->name, entry,
+ yp->tx_status[entry].tx_cnt,
+ yp->tx_status[entry].tx_errs,
+ yp->tx_status[entry].total_tx_cnt,
+ yp->tx_status[entry].paused);
+#endif
+ if (tx_errs == 0)
+ break; /* It still hasn't been Txed */
+ if (tx_errs & 0xF810) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (yp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
+ dev->name, tx_errs);
+#endif
+ yp->stats.tx_errors++;
+ if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
+ if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
+ if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
+ if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
+#ifdef ETHER_STATS
+ if (tx_errs & 0x1000) yp->stats.collisions16++;
+#endif
+ } else {
+#ifndef final_version
+ if (yp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
+ dev->name, tx_errs);
+#endif
+#ifdef ETHER_STATS
+ if (tx_errs & 0x0400) yp->stats.tx_deferred++;
+#endif
+#if LINUX_VERSION_CODE > 0x20127
+ yp->stats.tx_bytes += yp->tx_skbuff[entry]->len;
+#endif
+ yp->stats.collisions += tx_errs & 15;
+ yp->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ dev_free_skb_irq(yp->tx_skbuff[entry]);
+ yp->tx_skbuff[entry] = 0;
+ /* Mark status as empty. */
+ yp->tx_status[entry].tx_errs = 0;
+ }
+
+#ifndef final_version
+ if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (yp->tx_full
+ && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
+ /* The ring is no longer full, clear tbusy. */
+ yp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ yp->dirty_tx = dirty_tx;
+ yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
+ }
+#endif
+
+ /* Log errors and other uncommon events. */
+ if (intr_status & 0x2ee) /* Abnormal error summary. */
+ yellowfin_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (yp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+ clear_bit(0, (void*)&yp->in_interrupt);
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int yellowfin_rx(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int entry = yp->cur_rx % RX_RING_SIZE;
+ int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
+
+ if (yp->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
+ entry, yp->rx_ring[entry].result_status);
+ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
+ entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
+ yp->rx_ring[entry].result_status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (yp->rx_head_desc->result_status) {
+ struct yellowfin_desc *desc = yp->rx_head_desc;
+ u16 desc_status = le32_to_cpu(desc->result_status) >> 16;
+ int data_size =
+ (le32_to_cpu(desc->dbdma_cmd) - le32_to_cpu(desc->result_status))
+ & 0xffff;
+ u8 *buf_addr = le32desc_to_virt(desc->addr);
+ s16 frame_status = get_unaligned((s16*)&(buf_addr[data_size - 2]));
+
+ if (yp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
+ frame_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & RX_EOP)) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
+ " status %4.4x!\n", dev->name, desc_status);
+ yp->stats.rx_length_errors++;
+ } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
+ /* There was a error. */
+ if (yp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
+ frame_status);
+ yp->stats.rx_errors++;
+ if (frame_status & 0x0060) yp->stats.rx_length_errors++;
+ if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
+ if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
+ if (frame_status < 0) yp->stats.rx_dropped++;
+ } else if ( !(yp->drv_flags & IsGigabit) &&
+ ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
+ u8 status1 = buf_addr[data_size-2];
+ u8 status2 = buf_addr[data_size-1];
+ yp->stats.rx_errors++;
+ if (status1 & 0xC0) yp->stats.rx_length_errors++;
+ if (status2 & 0x03) yp->stats.rx_frame_errors++;
+ if (status2 & 0x04) yp->stats.rx_crc_errors++;
+ if (status2 & 0x80) yp->stats.rx_dropped++;
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ } else if ((yp->flags & HasMACAddrBug) &&
+ memcmp(le32desc_to_virt(yp->rx_ring[entry].addr),
+ dev->dev_addr, 6) != 0
+ && memcmp(le32desc_to_virt(yp->rx_ring[entry].addr),
+ "\377\377\377\377\377\377", 6) != 0) {
+ if (bogus_rx++ == 0)
+ printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x:%2.2x.\n",
+ dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
+ buf_addr[3], buf_addr[4], buf_addr[5]);
+#endif
+ } else {
+ struct sk_buff *skb;
+ int pkt_len = data_size -
+ (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
+ /* To verify: Yellowfin Length should omit the CRC! */
+
+#ifndef final_version
+ if (yp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, data_size, boguscnt);
+#endif
+ /* Check if the packet is long enough to just pass up the skbuff
+ without copying to a properly sized skbuff. */
+ if (pkt_len > yp->rx_copybreak) {
+ char *temp = skb_put(skb = yp->rx_skbuff[entry], pkt_len);
+ yp->rx_skbuff[entry] = NULL;
+#ifndef final_version /* Remove after testing. */
+ if (le32desc_to_virt(yp->rx_ring[entry].addr) != temp)
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in yellowfin_rx: %p vs. %p / %p.\n",
+ dev->name,
+ le32desc_to_virt(yp->rx_ring[entry].addr),
+ skb->head, temp);
+#endif
+ } else {
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL)
+ break;
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if HAS_IP_COPYSUM
+ eth_copy_and_sum(skb, yp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), yp->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ yp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ yp->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++yp->cur_rx) % RX_RING_SIZE;
+ yp->rx_head_desc = &yp->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
+ entry = yp->dirty_rx % RX_RING_SIZE;
+ if (yp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
+ yp->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ yp->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
+ }
+ yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
+ if (entry != 0)
+ yp->rx_ring[entry - 1].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
+ else
+ yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
+ | yp->rx_buf_sz);
+ }
+
+ return 0;
+}
+
+static void yellowfin_error(struct net_device *dev, int intr_status)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear what to do here. */
+ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
+ yp->stats.tx_errors++;
+ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
+ yp->stats.rx_errors++;
+}
+
+static int yellowfin_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (yp->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, inw(ioaddr + TxStatus),
+ inw(ioaddr + RxStatus), inw(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outw(0x0000, ioaddr + IntrEnb);
+
+ /* Stop the chip's Tx and Rx processes. */
+ outl(0x80000000, ioaddr + RxCtrl);
+ outl(0x80000000, ioaddr + TxCtrl);
+
+ del_timer(&yp->timer);
+
+#if defined(__i386__)
+ if (yp->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(yp->tx_ring));
+ for (i = 0; i < TX_RING_SIZE*2; i++)
+ printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
+ inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
+ i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
+ yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
+ printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG " #%d status %4.4x %4.4x %4.4x %4.4x.\n",
+ i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
+ yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
+
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(yp->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
+ inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
+ i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
+ yp->rx_ring[i].result_status);
+ if (yp->msg_level & NETIF_MSG_PKTDATA) {
+ if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
+ int j;
+ for (j = 0; j < 0x50; j++)
+ printk(" %4.4x",
+ get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
+ printk("\n");
+ }
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (yp->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ yp->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(yp->rx_skbuff[i]);
+ }
+ yp->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (yp->tx_skbuff[i])
+ dev_free_skb(yp->tx_skbuff[i]);
+ yp->tx_skbuff[i] = 0;
+ }
+
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ if (yp->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
+ dev->name, bogus_rx);
+ }
+#endif
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ return &yp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor. */
+
+/* The little-endian AUTODIN32 ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ u16 hash_table[4] = {0, 0, 0, 0};
+ int mc_change = 0;
+ int new_rx_mode, i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ new_rx_mode = 0x000F;
+ } else if (dev->mc_count > yp->multicast_filter_limit
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well, or accept all multicasts. */
+ new_rx_mode = 0x000B;
+ } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
+ struct dev_mc_list *mclist;
+
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ /* Due to a bug in the early chip versions, multiple filter
+ slots must be set for each address. */
+ if (yp->drv_flags & HasMulticastBug) {
+ set_bit((ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ set_bit((ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ set_bit((ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ }
+ set_bit((ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ }
+ if (memcmp(hash_table, yp->mc_filter, sizeof hash_table) != 0)
+ mc_change = 1;
+ new_rx_mode = 0x0003;
+ } else { /* Normal, unicast/broadcast-only mode. */
+ new_rx_mode = 0x0001;
+ }
+
+ /* Stop the Rx process to change any value. */
+ if (yp->rx_mode != new_rx_mode || mc_change) {
+ long ioaddr = dev->base_addr;
+ u16 cfg_value = inw(ioaddr + Cnfg);
+
+ outw(cfg_value & ~0x1000, ioaddr + Cnfg);
+
+ yp->rx_mode = new_rx_mode;
+ outw(new_rx_mode, ioaddr + AddrMode);
+ memcpy(yp->mc_filter, hash_table, sizeof hash_table);
+ /* Copy the hash table to the chip. */
+ for (i = 0; i < 4; i++)
+ outw(hash_table[i], ioaddr + HashTbl + i*2);
+
+ /* Restart the Rx process. */
+ outw(cfg_value | 0x1000, ioaddr + Cnfg);
+ }
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct yellowfin_private *np = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&yellowfin_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&yellowfin_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_yellowfin_dev) {
+ struct yellowfin_private *np = (void *)(root_yellowfin_dev->priv);
+ unregister_netdev(root_yellowfin_dev);
+#ifdef USE_IO_OPS
+ release_region(root_yellowfin_dev->base_addr,
+ pci_id_tbl[np->chip_id].io_size);
+#else
+ iounmap((char *)root_yellowfin_dev->base_addr);
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_yellowfin_dev);
+ root_yellowfin_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` yellowfin.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c yellowfin.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/znet.c b/linux/src/drivers/net/znet.c
new file mode 100644
index 0000000..a9996fd
--- /dev/null
+++ b/linux/src/drivers/net/znet.c
@@ -0,0 +1,746 @@
+/* znet.c: An Zenith Z-Note ethernet driver for linux. */
+
+static const char *version = "znet.c:v1.02 9/23/94 becker@cesdis.gsfc.nasa.gov\n";
+
+/*
+ Written by Donald Becker.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov.
+ This driver is based on the Linux skeleton driver. The copyright of the
+ skeleton driver is held by the United States Government, as represented
+ by DIRNSA, and it is released under the GPL.
+
+ Thanks to Mike Hollick for alpha testing and suggestions.
+
+ References:
+ The Crynwr packet driver.
+
+ "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992
+ Intel Microcommunications Databook, Vol. 1, 1990.
+ As usual with Intel, the documentation is incomplete and inaccurate.
+ I had to read the Crynwr packet driver to figure out how to actually
+ use the i82593, and guess at what register bits matched the loosely
+ related i82586.
+
+ Theory of Operation
+
+ The i82593 used in the Zenith Z-Note series operates using two(!) slave
+ DMA channels, one interrupt, and one 8-bit I/O port.
+
+ While there several ways to configure '593 DMA system, I chose the one
+ that seemed commensurate with the highest system performance in the face
+ of moderate interrupt latency: Both DMA channels are configured as
+ recirculating ring buffers, with one channel (#0) dedicated to Rx and
+ the other channel (#1) to Tx and configuration. (Note that this is
+ different than the Crynwr driver, where the Tx DMA channel is initialized
+ before each operation. That approach simplifies operation and Tx error
+ recovery, but requires additional I/O in normal operation and precludes
+ transmit buffer chaining.)
+
+ Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides
+ a reasonable ring size for Rx, while simplifying DMA buffer allocation --
+ DMA buffers must not cross a 128K boundary. (In truth the size selection
+ was influenced by my lack of '593 documentation. I thus was constrained
+ to use the Crynwr '593 initialization table, which sets the Rx ring size
+ to 8K.)
+
+ Despite my usual low opinion about Intel-designed parts, I must admit
+ that the bulk data handling of the i82593 is a good design for
+ an integrated system, like a laptop, where using two slave DMA channels
+ doesn't pose a problem. I still take issue with using only a single I/O
+ port. In the same controlled environment there are essentially no
+ limitations on I/O space, and using multiple locations would eliminate
+ the need for multiple operations when looking at status registers,
+ setting the Rx ring boundary, or switching to promiscuous mode.
+
+ I also question Zenith's selection of the '593: one of the advertised
+ advantages of earlier Intel parts was that if you figured out the magic
+ initialization incantation you could use the same part on many different
+ network types. Zenith's use of the "FriendlyNet" (sic) connector rather
+ than an on-board transceiver leads me to believe that they were planning
+ to take advantage of this. But, uhmmm, the '593 omits all but ethernet
+ functionality from the serial subsystem.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+
+#ifndef ZNET_DEBUG
+#define ZNET_DEBUG 1
+#endif
+static unsigned int znet_debug = ZNET_DEBUG;
+
+/* The DMA modes we need aren't in <dma.h>. */
+#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */
+#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */
+#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
+#define DMA_BUF_SIZE 8192
+#define RX_BUF_SIZE 8192
+#define TX_BUF_SIZE 8192
+
+/* Commands to the i82593 channel 0. */
+#define CMD0_CHNL_0 0x00
+#define CMD0_CHNL_1 0x10 /* Switch to channel 1. */
+#define CMD0_NOP (CMD0_CHNL_0)
+#define CMD0_PORT_1 CMD0_CHNL_1
+#define CMD1_PORT_0 1
+#define CMD0_IA_SETUP 1
+#define CMD0_CONFIGURE 2
+#define CMD0_MULTICAST_LIST 3
+#define CMD0_TRANSMIT 4
+#define CMD0_DUMP 6
+#define CMD0_DIAGNOSE 7
+#define CMD0_Rx_ENABLE 8
+#define CMD0_Rx_DISABLE 10
+#define CMD0_Rx_STOP 11
+#define CMD0_RETRANSMIT 12
+#define CMD0_ABORT 13
+#define CMD0_RESET 14
+
+#define CMD0_ACK 0x80
+
+#define CMD0_STAT0 (0 << 5)
+#define CMD0_STAT1 (1 << 5)
+#define CMD0_STAT2 (2 << 5)
+#define CMD0_STAT3 (3 << 5)
+
+#define net_local znet_private
+struct znet_private {
+ int rx_dma, tx_dma;
+ struct enet_statistics stats;
+ /* The starting, current, and end pointers for the packet buffers. */
+ ushort *rx_start, *rx_cur, *rx_end;
+ ushort *tx_start, *tx_cur, *tx_end;
+ ushort tx_buf_len; /* Tx buffer length, in words. */
+};
+
+/* Only one can be built-in;-> */
+static struct znet_private zn;
+static ushort dma_buffer1[DMA_BUF_SIZE/2];
+static ushort dma_buffer2[DMA_BUF_SIZE/2];
+static ushort dma_buffer3[DMA_BUF_SIZE/2 + 8];
+
+/* The configuration block. What an undocumented nightmare. The first
+ set of values are those suggested (without explanation) for ethernet
+ in the Intel 82586 databook. The rest appear to be completely undocumented,
+ except for cryptic notes in the Crynwr packet driver. This driver uses
+ the Crynwr values verbatim. */
+
+static unsigned char i593_init[] = {
+ 0xAA, /* 0: 16-byte input & 80-byte output FIFO. */
+ /* threshold, 96-byte FIFO, 82593 mode. */
+ 0x88, /* 1: Continuous w/interrupts, 128-clock DMA.*/
+ 0x2E, /* 2: 8-byte preamble, NO address insertion, */
+ /* 6-byte Ethernet address, loopback off.*/
+ 0x00, /* 3: Default priorities & backoff methods. */
+ 0x60, /* 4: 96-bit interframe spacing. */
+ 0x00, /* 5: 512-bit slot time (low-order). */
+ 0xF2, /* 6: Slot time (high-order), 15 COLL retries. */
+ 0x00, /* 7: Promisc-off, broadcast-on, default CRC. */
+ 0x00, /* 8: Default carrier-sense, collision-detect. */
+ 0x40, /* 9: 64-byte minimum frame length. */
+ 0x5F, /* A: Type/length checks OFF, no CRC input,
+ "jabber" termination, etc. */
+ 0x00, /* B: Full-duplex disabled. */
+ 0x3F, /* C: Default multicast addresses & backoff. */
+ 0x07, /* D: Default IFS retriggering. */
+ 0x31, /* E: Internal retransmit, drop "runt" packets,
+ synchr. DRQ deassertion, 6 status bytes. */
+ 0x22, /* F: Receive ring-buffer size (8K),
+ receive-stop register enable. */
+};
+
+struct netidblk {
+ char magic[8]; /* The magic number (string) "NETIDBLK" */
+ unsigned char netid[8]; /* The physical station address */
+ char nettype, globalopt;
+ char vendor[8]; /* The machine vendor and product name. */
+ char product[8];
+ char irq1, irq2; /* Interrupts, only one is currently used. */
+ char dma1, dma2;
+ short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */
+ short iobase1, iosize1;
+ short iobase2, iosize2; /* Second iobase unused. */
+ char driver_options; /* Misc. bits */
+ char pad;
+};
+
+int znet_probe(struct device *dev);
+static int znet_open(struct device *dev);
+static int znet_send_packet(struct sk_buff *skb, struct device *dev);
+static void znet_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void znet_rx(struct device *dev);
+static int znet_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+static void hardware_init(struct device *dev);
+static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset);
+
+#ifdef notdef
+static struct sigaction znet_sigaction = { &znet_interrupt, 0, 0, NULL, };
+#endif
+
+
+/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
+ BIOS area. We just scan for the signature, and pull the vital parameters
+ out of the structure. */
+
+int znet_probe(struct device *dev)
+{
+ int i;
+ struct netidblk *netinfo;
+ char *p;
+
+ /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
+ for(p = (char *)0xf0000; p < (char *)0x100000; p++)
+ if (*p == 'N' && strncmp(p, "NETIDBLK", 8) == 0)
+ break;
+
+ if (p >= (char *)0x100000) {
+ if (znet_debug > 1)
+ printk(KERN_INFO "No Z-Note ethernet adaptor found.\n");
+ return ENODEV;
+ }
+ netinfo = (struct netidblk *)p;
+ dev->base_addr = netinfo->iobase1;
+ dev->irq = netinfo->irq1;
+
+ printk(KERN_INFO "%s: ZNET at %#3lx,", dev->name, dev->base_addr);
+
+ /* The station address is in the "netidblk" at 0x0f0000. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = netinfo->netid[i]);
+
+ printk(", using IRQ %d DMA %d and %d.\n", dev->irq, netinfo->dma1,
+ netinfo->dma2);
+
+ if (znet_debug > 1) {
+ printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n",
+ dev->name, netinfo->vendor,
+ netinfo->irq1, netinfo->irq2,
+ netinfo->dma1, netinfo->dma2);
+ printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n",
+ dev->name, netinfo->iobase1, netinfo->iosize1,
+ netinfo->iobase2, netinfo->iosize2, netinfo->nettype);
+ }
+
+ if (znet_debug > 0)
+ printk("%s%s", KERN_INFO, version);
+
+ dev->priv = (void *) &zn;
+ zn.rx_dma = netinfo->dma1;
+ zn.tx_dma = netinfo->dma2;
+
+ /* These should never fail. You can't add devices to a sealed box! */
+ if (request_irq(dev->irq, &znet_interrupt, 0, "ZNet", NULL)
+ || request_dma(zn.rx_dma,"ZNet rx")
+ || request_dma(zn.tx_dma,"ZNet tx")) {
+ printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name);
+ return EBUSY;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Allocate buffer memory. We can cross a 128K boundary, so we
+ must be careful about the allocation. It's easiest to waste 8K. */
+ if (dma_page_eq(dma_buffer1, &dma_buffer1[RX_BUF_SIZE/2-1]))
+ zn.rx_start = dma_buffer1;
+ else
+ zn.rx_start = dma_buffer2;
+
+ if (dma_page_eq(dma_buffer3, &dma_buffer3[RX_BUF_SIZE/2-1]))
+ zn.tx_start = dma_buffer3;
+ else
+ zn.tx_start = dma_buffer2;
+ zn.rx_end = zn.rx_start + RX_BUF_SIZE/2;
+ zn.tx_buf_len = TX_BUF_SIZE/2;
+ zn.tx_end = zn.tx_start + zn.tx_buf_len;
+
+ /* The ZNET-specific entries in the device structure. */
+ dev->open = &znet_open;
+ dev->hard_start_xmit = &znet_send_packet;
+ dev->stop = &znet_close;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the 'dev' with ethernet-generic values. */
+ ether_setup(dev);
+
+ return 0;
+}
+
+
+static int znet_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (znet_debug > 2)
+ printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name);
+
+ /* Turn on the 82501 SIA, using zenith-specific magic. */
+ outb(0x10, 0xe6); /* Select LAN control register */
+ outb(inb(0xe7) | 0x84, 0xe7); /* Turn on LAN power (bit 2). */
+ /* According to the Crynwr driver we should wait 50 msec. for the
+ LAN clock to stabilize. My experiments indicates that the '593 can
+ be initialized immediately. The delay is probably needed for the
+ DC-to-DC converter to come up to full voltage, and for the oscillator
+ to be spot-on at 20Mhz before transmitting.
+ Until this proves to be a problem we rely on the higher layers for the
+ delay and save allocating a timer entry. */
+
+ /* This follows the packet driver's lead, and checks for success. */
+ if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00)
+ printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n",
+ dev->name);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ hardware_init(dev);
+ dev->start = 1;
+
+ return 0;
+}
+
+static int znet_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (znet_debug > 4)
+ printk(KERN_DEBUG "%s: ZNet_send_packet(%ld).\n", dev->name, dev->tbusy);
+
+ /* Transmitter timeout, likely just recovery after suspending the machine. */
+ if (dev->tbusy) {
+ ushort event, tx_status, rx_offset, state;
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 10)
+ return 1;
+ outb(CMD0_STAT0, ioaddr); event = inb(ioaddr);
+ outb(CMD0_STAT1, ioaddr); tx_status = inw(ioaddr);
+ outb(CMD0_STAT2, ioaddr); rx_offset = inw(ioaddr);
+ outb(CMD0_STAT3, ioaddr); state = inb(ioaddr);
+ printk(KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x,"
+ " resetting.\n", dev->name, event, tx_status, rx_offset, state);
+ if (tx_status == 0x0400)
+ printk(KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n",
+ dev->name);
+ outb(CMD0_RESET, ioaddr);
+ hardware_init(dev);
+ }
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Check that the part hasn't reset itself, probably from suspend. */
+ outb(CMD0_STAT0, ioaddr);
+ if (inw(ioaddr) == 0x0010
+ && inw(ioaddr) == 0x0000
+ && inw(ioaddr) == 0x0010)
+ hardware_init(dev);
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = (void *)skb->data;
+ ushort *tx_link = zn.tx_cur - 1;
+ ushort rnd_len = (length + 1)>>1;
+
+ {
+ short dma_port = ((zn.tx_dma&3)<<2) + IO_DMA2_BASE;
+ unsigned addr = inb(dma_port);
+ addr |= inb(dma_port) << 8;
+ addr <<= 1;
+ if (((int)zn.tx_cur & 0x1ffff) != addr)
+ printk(KERN_WARNING "Address mismatch at Tx: %#x vs %#x.\n",
+ (int)zn.tx_cur & 0xffff, addr);
+ zn.tx_cur = (ushort *)(((int)zn.tx_cur & 0xfe0000) | addr);
+ }
+
+ if (zn.tx_cur >= zn.tx_end)
+ zn.tx_cur = zn.tx_start;
+ *zn.tx_cur++ = length;
+ if (zn.tx_cur + rnd_len + 1 > zn.tx_end) {
+ int semi_cnt = (zn.tx_end - zn.tx_cur)<<1; /* Cvrt to byte cnt. */
+ memcpy(zn.tx_cur, buf, semi_cnt);
+ rnd_len -= semi_cnt>>1;
+ memcpy(zn.tx_start, buf + semi_cnt, length - semi_cnt);
+ zn.tx_cur = zn.tx_start + rnd_len;
+ } else {
+ memcpy(zn.tx_cur, buf, skb->len);
+ zn.tx_cur += rnd_len;
+ }
+ *zn.tx_cur++ = 0;
+ cli(); {
+ *tx_link = CMD0_TRANSMIT + CMD0_CHNL_1;
+ /* Is this always safe to do? */
+ outb(CMD0_TRANSMIT + CMD0_CHNL_1,ioaddr);
+ } sti();
+
+ dev->trans_start = jiffies;
+ if (znet_debug > 4)
+ printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length);
+ }
+ dev_kfree_skb(skb, FREE_WRITE);
+ return 0;
+}
+
+/* The ZNET interrupt handler. */
+static void znet_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = irq2dev_map[irq];
+ int ioaddr;
+ int boguscnt = 20;
+
+ if (dev == NULL) {
+ printk(KERN_WARNING "znet_interrupt(): IRQ %d for unknown device.\n", irq);
+ return;
+ }
+
+ dev->interrupt = 1;
+ ioaddr = dev->base_addr;
+
+ outb(CMD0_STAT0, ioaddr);
+ do {
+ ushort status = inb(ioaddr);
+ if (znet_debug > 5) {
+ ushort result, rx_ptr, running;
+ outb(CMD0_STAT1, ioaddr);
+ result = inw(ioaddr);
+ outb(CMD0_STAT2, ioaddr);
+ rx_ptr = inw(ioaddr);
+ outb(CMD0_STAT3, ioaddr);
+ running = inb(ioaddr);
+ printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n",
+ dev->name, status, result, rx_ptr, running, boguscnt);
+ }
+ if ((status & 0x80) == 0)
+ break;
+
+ if ((status & 0x0F) == 4) { /* Transmit done. */
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int tx_status;
+ outb(CMD0_STAT1, ioaddr);
+ tx_status = inw(ioaddr);
+ /* It's undocumented, but tx_status seems to match the i82586. */
+ if (tx_status & 0x2000) {
+ lp->stats.tx_packets++;
+ lp->stats.collisions += tx_status & 0xf;
+ } else {
+ if (tx_status & 0x0600) lp->stats.tx_carrier_errors++;
+ if (tx_status & 0x0100) lp->stats.tx_fifo_errors++;
+ if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if (tx_status & 0x0020) lp->stats.tx_aborted_errors++;
+ /* ...and the catch-all. */
+ if ((tx_status | 0x0760) != 0x0760)
+ lp->stats.tx_errors++;
+ }
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+
+ if ((status & 0x40)
+ || (status & 0x0f) == 11) {
+ znet_rx(dev);
+ }
+ /* Clear the interrupts we've handled. */
+ outb(CMD0_ACK,ioaddr);
+ } while (boguscnt--);
+
+ dev->interrupt = 0;
+ return;
+}
+
+static void znet_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = 1;
+ short next_frame_end_offset = 0; /* Offset of next frame start. */
+ short *cur_frame_end;
+ short cur_frame_end_offset;
+
+ outb(CMD0_STAT2, ioaddr);
+ cur_frame_end_offset = inw(ioaddr);
+
+ if (cur_frame_end_offset == zn.rx_cur - zn.rx_start) {
+ printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n",
+ dev->name, cur_frame_end_offset);
+ return;
+ }
+
+ /* Use same method as the Crynwr driver: construct a forward list in
+ the same area of the backwards links we now have. This allows us to
+ pass packets to the upper layers in the order they were received --
+ important for fast-path sequential operations. */
+ while (zn.rx_start + cur_frame_end_offset != zn.rx_cur
+ && ++boguscount < 5) {
+ unsigned short hi_cnt, lo_cnt, hi_status, lo_status;
+ int count, status;
+
+ if (cur_frame_end_offset < 4) {
+ /* Oh no, we have a special case: the frame trailer wraps around
+ the end of the ring buffer. We've saved space at the end of
+ the ring buffer for just this problem. */
+ memcpy(zn.rx_end, zn.rx_start, 8);
+ cur_frame_end_offset += (RX_BUF_SIZE/2);
+ }
+ cur_frame_end = zn.rx_start + cur_frame_end_offset - 4;
+
+ lo_status = *cur_frame_end++;
+ hi_status = *cur_frame_end++;
+ status = ((hi_status & 0xff) << 8) + (lo_status & 0xff);
+ lo_cnt = *cur_frame_end++;
+ hi_cnt = *cur_frame_end++;
+ count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff);
+
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x"
+ " count %#x status %04x.\n",
+ cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt,
+ count, status);
+ cur_frame_end[-4] = status;
+ cur_frame_end[-3] = next_frame_end_offset;
+ cur_frame_end[-2] = count;
+ next_frame_end_offset = cur_frame_end_offset;
+ cur_frame_end_offset -= ((count + 1)>>1) + 3;
+ if (cur_frame_end_offset < 0)
+ cur_frame_end_offset += RX_BUF_SIZE/2;
+ };
+
+ /* Now step forward through the list. */
+ do {
+ ushort *this_rfp_ptr = zn.rx_start + next_frame_end_offset;
+ int status = this_rfp_ptr[-4];
+ int pkt_len = this_rfp_ptr[-2];
+
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x"
+ " next %04x.\n", next_frame_end_offset<<1, status, pkt_len,
+ this_rfp_ptr[-3]<<1);
+ /* Once again we must assume that the i82586 docs apply. */
+ if ( ! (status & 0x2000)) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x0800) lp->stats.rx_crc_errors++;
+ if (status & 0x0400) lp->stats.rx_frame_errors++;
+ if (status & 0x0200) lp->stats.rx_over_errors++; /* Wrong. */
+ if (status & 0x0100) lp->stats.rx_fifo_errors++;
+ if (status & 0x0080) lp->stats.rx_length_errors++;
+ } else if (pkt_len > 1536) {
+ lp->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ if (znet_debug)
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+
+ if (&zn.rx_cur[(pkt_len+1)>>1] > zn.rx_end) {
+ int semi_cnt = (zn.rx_end - zn.rx_cur)<<1;
+ memcpy(skb_put(skb,semi_cnt), zn.rx_cur, semi_cnt);
+ memcpy(skb_put(skb,pkt_len-semi_cnt), zn.rx_start,
+ pkt_len - semi_cnt);
+ } else {
+ memcpy(skb_put(skb,pkt_len), zn.rx_cur, pkt_len);
+ if (znet_debug > 6) {
+ unsigned int *packet = (unsigned int *) skb->data;
+ printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0],
+ packet[1], packet[2], packet[3]);
+ }
+ }
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ zn.rx_cur = this_rfp_ptr;
+ if (zn.rx_cur >= zn.rx_end)
+ zn.rx_cur -= RX_BUF_SIZE/2;
+ update_stop_hit(ioaddr, (zn.rx_cur - zn.rx_start)<<1);
+ next_frame_end_offset = this_rfp_ptr[-3];
+ if (next_frame_end_offset == 0) /* Read all the frames? */
+ break; /* Done for now */
+ this_rfp_ptr = zn.rx_start + next_frame_end_offset;
+ } while (--boguscount);
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(INET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ return;
+}
+
+/* The inverse routine to znet_open(). */
+static int znet_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ outb(CMD0_RESET, ioaddr); /* CMD0_RESET */
+
+ disable_dma(zn.rx_dma);
+ disable_dma(zn.tx_dma);
+
+ free_irq(dev->irq, NULL);
+
+ if (znet_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ /* Turn off transceiver power. */
+ outb(0x10, 0xe6); /* Select LAN control register */
+ outb(inb(0xe7) & ~0x84, 0xe7); /* Turn on LAN power (bit 2). */
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ As a side effect this routine must also initialize the device parameters.
+ This is taken advantage of in open().
+
+ N.B. that we change i593_init[] in place. This (properly) makes the
+ mode change persistent, but must be changed if this code is moved to
+ a multiple adaptor environment.
+ */
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ i593_init[7] &= ~3; i593_init[7] |= 1;
+ i593_init[13] &= ~8; i593_init[13] |= 8;
+ } else if (dev->mc_list || (dev->flags&IFF_ALLMULTI)) {
+ /* Enable accept-all-multicast mode */
+ i593_init[7] &= ~3; i593_init[7] |= 0;
+ i593_init[13] &= ~8; i593_init[13] |= 8;
+ } else { /* Enable normal mode. */
+ i593_init[7] &= ~3; i593_init[7] |= 0;
+ i593_init[13] &= ~8; i593_init[13] |= 0;
+ }
+ *zn.tx_cur++ = sizeof(i593_init);
+ memcpy(zn.tx_cur, i593_init, sizeof(i593_init));
+ zn.tx_cur += sizeof(i593_init)/2;
+ outb(CMD0_CONFIGURE+CMD0_CHNL_1, ioaddr);
+#ifdef not_tested
+ if (num_addrs > 0) {
+ int addrs_len = 6*num_addrs;
+ *zn.tx_cur++ = addrs_len;
+ memcpy(zn.tx_cur, addrs, addrs_len);
+ outb(CMD0_MULTICAST_LIST+CMD0_CHNL_1, ioaddr);
+ zn.tx_cur += addrs_len>>1;
+ }
+#endif
+}
+
+void show_dma(void)
+{
+ short dma_port = ((zn.tx_dma&3)<<2) + IO_DMA2_BASE;
+ unsigned addr = inb(dma_port);
+ addr |= inb(dma_port) << 8;
+ printk("Addr: %04x cnt:%3x...", addr<<1, get_dma_residue(zn.tx_dma));
+}
+
+/* Initialize the hardware. We have to do this when the board is open()ed
+ or when we come out of suspend mode. */
+static void hardware_init(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ zn.rx_cur = zn.rx_start;
+ zn.tx_cur = zn.tx_start;
+
+ /* Reset the chip, and start it up. */
+ outb(CMD0_RESET, ioaddr);
+
+ cli(); { /* Protect against a DMA flip-flop */
+ disable_dma(zn.rx_dma); /* reset by an interrupting task. */
+ clear_dma_ff(zn.rx_dma);
+ set_dma_mode(zn.rx_dma, DMA_RX_MODE);
+ set_dma_addr(zn.rx_dma, (unsigned int) zn.rx_start);
+ set_dma_count(zn.rx_dma, RX_BUF_SIZE);
+ enable_dma(zn.rx_dma);
+ /* Now set up the Tx channel. */
+ disable_dma(zn.tx_dma);
+ clear_dma_ff(zn.tx_dma);
+ set_dma_mode(zn.tx_dma, DMA_TX_MODE);
+ set_dma_addr(zn.tx_dma, (unsigned int) zn.tx_start);
+ set_dma_count(zn.tx_dma, zn.tx_buf_len<<1);
+ enable_dma(zn.tx_dma);
+ } sti();
+
+ if (znet_debug > 1)
+ printk(KERN_DEBUG "%s: Initializing the i82593, tx buf %p... ", dev->name,
+ zn.tx_start);
+ /* Do an empty configure command, just like the Crynwr driver. This
+ resets to chip to its default values. */
+ *zn.tx_cur++ = 0;
+ *zn.tx_cur++ = 0;
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+ outb(CMD0_CONFIGURE+CMD0_CHNL_1, ioaddr);
+ *zn.tx_cur++ = sizeof(i593_init);
+ memcpy(zn.tx_cur, i593_init, sizeof(i593_init));
+ zn.tx_cur += sizeof(i593_init)/2;
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+ outb(CMD0_CONFIGURE+CMD0_CHNL_1, ioaddr);
+ *zn.tx_cur++ = 6;
+ memcpy(zn.tx_cur, dev->dev_addr, 6);
+ zn.tx_cur += 3;
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+ outb(CMD0_IA_SETUP + CMD0_CHNL_1, ioaddr);
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+
+ update_stop_hit(ioaddr, 8192);
+ if (znet_debug > 1) printk("enabling Rx.\n");
+ outb(CMD0_Rx_ENABLE+CMD0_CHNL_0, ioaddr);
+ dev->tbusy = 0;
+}
+
+static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset)
+{
+ outb(CMD0_PORT_1, ioaddr);
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Updating stop hit with value %02x.\n",
+ (rx_stop_offset >> 6) | 0x80);
+ outb((rx_stop_offset >> 6) | 0x80, ioaddr);
+ outb(CMD1_PORT_0, ioaddr);
+}
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c znet.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/pci/pci.c b/linux/src/drivers/pci/pci.c
new file mode 100644
index 0000000..cf7dd80
--- /dev/null
+++ b/linux/src/drivers/pci/pci.c
@@ -0,0 +1,1322 @@
+/*
+ * drivers/pci/pci.c
+ *
+ * PCI services that are built on top of the BIOS32 service.
+ *
+ * Copyright 1993, 1994, 1995 Drew Eckhardt, Frederic Potter,
+ * David Mosberger-Tang
+ *
+ * Apr 12, 1998 : Fixed handling of alien header types. [mj]
+ */
+
+#include <linux/config.h>
+#include <linux/ptrace.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+
+struct pci_bus pci_root;
+struct pci_dev *pci_devices = 0;
+
+
+/*
+ * The bridge_id field is an offset of an item into the array
+ * BRIDGE_MAPPING_TYPE. 0xff indicates that the device is not a PCI
+ * bridge, or that we don't know for the moment how to configure it.
+ * I'm trying to do my best so that the kernel stays small. Different
+ * chipset can have same optimization structure. i486 and pentium
+ * chipsets from the same manufacturer usually have the same
+ * structure.
+ */
+#define DEVICE(vid,did,name) \
+ {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##did, (name), 0xff}
+
+#define BRIDGE(vid,did,name,bridge) \
+ {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##did, (name), (bridge)}
+
+/*
+ * Sorted in ascending order by vendor and device.
+ * Use binary search for lookup. If you add a device make sure
+ * it is sequential by both vendor and device id.
+ */
+struct pci_dev_info dev_info[] = {
+ DEVICE( COMPAQ, COMPAQ_1280, "QVision 1280/p"),
+ DEVICE( COMPAQ, COMPAQ_SMART2P, "Smart-2/P RAID Controller"),
+ DEVICE( COMPAQ, COMPAQ_NETEL100,"Netelligent 10/100"),
+ DEVICE( COMPAQ, COMPAQ_NETEL10, "Netelligent 10"),
+ DEVICE( COMPAQ, COMPAQ_NETFLEX3I,"NetFlex 3"),
+ DEVICE( COMPAQ, COMPAQ_NETEL100D,"Netelligent 10/100 Dual"),
+ DEVICE( COMPAQ, COMPAQ_NETEL100PI,"Netelligent 10/100 ProLiant"),
+ DEVICE( COMPAQ, COMPAQ_NETEL100I,"Netelligent 10/100 Integrated"),
+ DEVICE( COMPAQ, COMPAQ_THUNDER, "ThunderLAN"),
+ DEVICE( COMPAQ, COMPAQ_NETFLEX3B,"NetFlex 3 BNC"),
+ DEVICE( NCR, NCR_53C810, "53c810"),
+ DEVICE( NCR, NCR_53C820, "53c820"),
+ DEVICE( NCR, NCR_53C825, "53c825"),
+ DEVICE( NCR, NCR_53C815, "53c815"),
+ DEVICE( NCR, NCR_53C860, "53c860"),
+ DEVICE( NCR, NCR_53C896, "53c896"),
+ DEVICE( NCR, NCR_53C895, "53c895"),
+ DEVICE( NCR, NCR_53C885, "53c885"),
+ DEVICE( NCR, NCR_53C875, "53c875"),
+ DEVICE( NCR, NCR_53C875J, "53c875J"),
+ DEVICE( ATI, ATI_68800, "68800AX"),
+ DEVICE( ATI, ATI_215CT222, "215CT222"),
+ DEVICE( ATI, ATI_210888CX, "210888CX"),
+ DEVICE( ATI, ATI_215GB, "Mach64 GB"),
+ DEVICE( ATI, ATI_215GD, "Mach64 GD (Rage Pro)"),
+ DEVICE( ATI, ATI_215GI, "Mach64 GI (Rage Pro)"),
+ DEVICE( ATI, ATI_215GP, "Mach64 GP (Rage Pro)"),
+ DEVICE( ATI, ATI_215GQ, "Mach64 GQ (Rage Pro)"),
+ DEVICE( ATI, ATI_215GT, "Mach64 GT (Rage II)"),
+ DEVICE( ATI, ATI_215GTB, "Mach64 GT (Rage II)"),
+ DEVICE( ATI, ATI_210888GX, "210888GX"),
+ DEVICE( ATI, ATI_215LG, "Mach64 LG (3D Rage LT)"),
+ DEVICE( ATI, ATI_264LT, "Mach64 LT"),
+ DEVICE( ATI, ATI_264VT, "Mach64 VT"),
+ DEVICE( VLSI, VLSI_82C592, "82C592-FC1"),
+ DEVICE( VLSI, VLSI_82C593, "82C593-FC1"),
+ DEVICE( VLSI, VLSI_82C594, "82C594-AFC2"),
+ DEVICE( VLSI, VLSI_82C597, "82C597-AFC2"),
+ DEVICE( VLSI, VLSI_82C541, "82C541 Lynx"),
+ DEVICE( VLSI, VLSI_82C543, "82C543 Lynx ISA"),
+ DEVICE( VLSI, VLSI_82C532, "82C532"),
+ DEVICE( VLSI, VLSI_82C534, "82C534"),
+ DEVICE( VLSI, VLSI_82C535, "82C535"),
+ DEVICE( VLSI, VLSI_82C147, "82C147"),
+ DEVICE( VLSI, VLSI_VAS96011, "VAS96011 (Golden Gate II)"),
+ DEVICE( ADL, ADL_2301, "2301"),
+ DEVICE( NS, NS_87415, "87415"),
+ DEVICE( NS, NS_87410, "87410"),
+ DEVICE( TSENG, TSENG_W32P_2, "ET4000W32P"),
+ DEVICE( TSENG, TSENG_W32P_b, "ET4000W32P rev B"),
+ DEVICE( TSENG, TSENG_W32P_c, "ET4000W32P rev C"),
+ DEVICE( TSENG, TSENG_W32P_d, "ET4000W32P rev D"),
+ DEVICE( TSENG, TSENG_ET6000, "ET6000"),
+ DEVICE( WEITEK, WEITEK_P9000, "P9000"),
+ DEVICE( WEITEK, WEITEK_P9100, "P9100"),
+ BRIDGE( DEC, DEC_BRD, "DC21050", 0x00),
+ DEVICE( DEC, DEC_TULIP, "DC21040"),
+ DEVICE( DEC, DEC_TGA, "DC21030 (TGA)"),
+ DEVICE( DEC, DEC_TULIP_FAST, "DC21140"),
+ DEVICE( DEC, DEC_TGA2, "TGA2"),
+ DEVICE( DEC, DEC_FDDI, "DEFPA"),
+ DEVICE( DEC, DEC_TULIP_PLUS, "DC21041"),
+ DEVICE( DEC, DEC_21142, "DC21142"),
+ DEVICE( DEC, DEC_21052, "DC21052"),
+ DEVICE( DEC, DEC_21150, "DC21150"),
+ DEVICE( DEC, DEC_21152, "DC21152"),
+ DEVICE( CIRRUS, CIRRUS_7548, "GD 7548"),
+ DEVICE( CIRRUS, CIRRUS_5430, "GD 5430"),
+ DEVICE( CIRRUS, CIRRUS_5434_4, "GD 5434"),
+ DEVICE( CIRRUS, CIRRUS_5434_8, "GD 5434"),
+ DEVICE( CIRRUS, CIRRUS_5436, "GD 5436"),
+ DEVICE( CIRRUS, CIRRUS_5446, "GD 5446"),
+ DEVICE( CIRRUS, CIRRUS_5480, "GD 5480"),
+ DEVICE( CIRRUS, CIRRUS_5464, "GD 5464"),
+ DEVICE( CIRRUS, CIRRUS_5465, "GD 5465"),
+ DEVICE( CIRRUS, CIRRUS_6729, "CL 6729"),
+ DEVICE( CIRRUS, CIRRUS_6832, "PD 6832"),
+ DEVICE( CIRRUS, CIRRUS_7542, "CL 7542"),
+ DEVICE( CIRRUS, CIRRUS_7543, "CL 7543"),
+ DEVICE( CIRRUS, CIRRUS_7541, "CL 7541"),
+ DEVICE( IBM, IBM_FIRE_CORAL, "Fire Coral"),
+ DEVICE( IBM, IBM_TR, "Token Ring"),
+ DEVICE( IBM, IBM_82G2675, "82G2675"),
+ DEVICE( IBM, IBM_MCA, "MicroChannel"),
+ DEVICE( IBM, IBM_82351, "82351"),
+ DEVICE( IBM, IBM_SERVERAID, "ServeRAID"),
+ DEVICE( IBM, IBM_TR_WAKE, "Wake On LAN Token Ring"),
+ DEVICE( IBM, IBM_3780IDSP, "MWave DSP"),
+ DEVICE( WD, WD_7197, "WD 7197"),
+ DEVICE( AMD, AMD_LANCE, "79C970"),
+ DEVICE( AMD, AMD_SCSI, "53C974"),
+ DEVICE( TRIDENT, TRIDENT_9397, "Cyber9397"),
+ DEVICE( TRIDENT, TRIDENT_9420, "TG 9420"),
+ DEVICE( TRIDENT, TRIDENT_9440, "TG 9440"),
+ DEVICE( TRIDENT, TRIDENT_9660, "TG 9660 / Cyber9385"),
+ DEVICE( TRIDENT, TRIDENT_9750, "Image 975"),
+ DEVICE( AI, AI_M1435, "M1435"),
+ DEVICE( MATROX, MATROX_MGA_2, "Atlas PX2085"),
+ DEVICE( MATROX, MATROX_MIL, "Millennium"),
+ DEVICE( MATROX, MATROX_MYS, "Mystique"),
+ DEVICE( MATROX, MATROX_MIL_2, "Millennium II"),
+ DEVICE( MATROX, MATROX_MIL_2_AGP,"Millennium II AGP"),
+ DEVICE( MATROX, MATROX_MGA_IMP, "MGA Impression"),
+ DEVICE( CT, CT_65545, "65545"),
+ DEVICE( CT, CT_65548, "65548"),
+ DEVICE( CT, CT_65550, "65550"),
+ DEVICE( CT, CT_65554, "65554"),
+ DEVICE( CT, CT_65555, "65555"),
+ DEVICE( MIRO, MIRO_36050, "ZR36050"),
+ DEVICE( NEC, NEC_PCX2, "PowerVR PCX2"),
+ DEVICE( FD, FD_36C70, "TMC-18C30"),
+ DEVICE( SI, SI_5591_AGP, "5591/5592 AGP"),
+ DEVICE( SI, SI_6202, "6202"),
+ DEVICE( SI, SI_503, "85C503"),
+ DEVICE( SI, SI_ACPI, "ACPI"),
+ DEVICE( SI, SI_5597_VGA, "5597/5598 VGA"),
+ DEVICE( SI, SI_6205, "6205"),
+ DEVICE( SI, SI_501, "85C501"),
+ DEVICE( SI, SI_496, "85C496"),
+ DEVICE( SI, SI_601, "85C601"),
+ DEVICE( SI, SI_5107, "5107"),
+ DEVICE( SI, SI_5511, "85C5511"),
+ DEVICE( SI, SI_5513, "85C5513"),
+ DEVICE( SI, SI_5571, "5571"),
+ DEVICE( SI, SI_5591, "5591/5592 Host"),
+ DEVICE( SI, SI_5597, "5597/5598 Host"),
+ DEVICE( SI, SI_7001, "7001 USB"),
+ DEVICE( HP, HP_J2585A, "J2585A"),
+ DEVICE( HP, HP_J2585B, "J2585B (Lassen)"),
+ DEVICE( PCTECH, PCTECH_RZ1000, "RZ1000 (buggy)"),
+ DEVICE( PCTECH, PCTECH_RZ1001, "RZ1001 (buggy?)"),
+ DEVICE( PCTECH, PCTECH_SAMURAI_0,"Samurai 0"),
+ DEVICE( PCTECH, PCTECH_SAMURAI_1,"Samurai 1"),
+ DEVICE( PCTECH, PCTECH_SAMURAI_IDE,"Samurai IDE"),
+ DEVICE( DPT, DPT, "SmartCache/Raid"),
+ DEVICE( OPTI, OPTI_92C178, "92C178"),
+ DEVICE( OPTI, OPTI_82C557, "82C557 Viper-M"),
+ DEVICE( OPTI, OPTI_82C558, "82C558 Viper-M ISA+IDE"),
+ DEVICE( OPTI, OPTI_82C621, "82C621"),
+ DEVICE( OPTI, OPTI_82C700, "82C700"),
+ DEVICE( OPTI, OPTI_82C701, "82C701 FireStar Plus"),
+ DEVICE( OPTI, OPTI_82C814, "82C814 Firebridge 1"),
+ DEVICE( OPTI, OPTI_82C822, "82C822"),
+ DEVICE( OPTI, OPTI_82C825, "82C825 Firebridge 2"),
+ DEVICE( SGS, SGS_2000, "STG 2000X"),
+ DEVICE( SGS, SGS_1764, "STG 1764X"),
+ DEVICE( BUSLOGIC, BUSLOGIC_MULTIMASTER_NC, "MultiMaster NC"),
+ DEVICE( BUSLOGIC, BUSLOGIC_MULTIMASTER, "MultiMaster"),
+ DEVICE( BUSLOGIC, BUSLOGIC_FLASHPOINT, "FlashPoint"),
+ DEVICE( TI, TI_TVP4010, "TVP4010 Permedia"),
+ DEVICE( TI, TI_TVP4020, "TVP4020 Permedia 2"),
+ DEVICE( TI, TI_PCI1130, "PCI1130"),
+ DEVICE( TI, TI_PCI1131, "PCI1131"),
+ DEVICE( TI, TI_PCI1250, "PCI1250"),
+ DEVICE( OAK, OAK_OTI107, "OTI107"),
+ DEVICE( WINBOND2, WINBOND2_89C940,"NE2000-PCI"),
+ DEVICE( MOTOROLA, MOTOROLA_MPC105,"MPC105 Eagle"),
+ DEVICE( MOTOROLA, MOTOROLA_MPC106,"MPC106 Grackle"),
+ DEVICE( MOTOROLA, MOTOROLA_RAVEN, "Raven"),
+ DEVICE( PROMISE, PROMISE_20246, "IDE UltraDMA/33"),
+ DEVICE( PROMISE, PROMISE_5300, "DC5030"),
+ DEVICE( N9, N9_I128, "Imagine 128"),
+ DEVICE( N9, N9_I128_2, "Imagine 128v2"),
+ DEVICE( UMC, UMC_UM8673F, "UM8673F"),
+ BRIDGE( UMC, UMC_UM8891A, "UM8891A", 0x01),
+ DEVICE( UMC, UMC_UM8886BF, "UM8886BF"),
+ DEVICE( UMC, UMC_UM8886A, "UM8886A"),
+ BRIDGE( UMC, UMC_UM8881F, "UM8881F", 0x02),
+ DEVICE( UMC, UMC_UM8886F, "UM8886F"),
+ DEVICE( UMC, UMC_UM9017F, "UM9017F"),
+ DEVICE( UMC, UMC_UM8886N, "UM8886N"),
+ DEVICE( UMC, UMC_UM8891N, "UM8891N"),
+ DEVICE( X, X_AGX016, "ITT AGX016"),
+ DEVICE( PICOP, PICOP_PT86C52X, "PT86C52x Vesuvius"),
+ DEVICE( PICOP, PICOP_PT80C524, "PT80C524 Nile"),
+ DEVICE( APPLE, APPLE_BANDIT, "Bandit"),
+ DEVICE( APPLE, APPLE_GC, "Grand Central"),
+ DEVICE( APPLE, APPLE_HYDRA, "Hydra"),
+ DEVICE( NEXGEN, NEXGEN_82C501, "82C501"),
+ DEVICE( QLOGIC, QLOGIC_ISP1020, "ISP1020"),
+ DEVICE( QLOGIC, QLOGIC_ISP1022, "ISP1022"),
+ DEVICE( CYRIX, CYRIX_5510, "5510"),
+ DEVICE( CYRIX, CYRIX_PCI_MASTER,"PCI Master"),
+ DEVICE( CYRIX, CYRIX_5520, "5520"),
+ DEVICE( CYRIX, CYRIX_5530_LEGACY,"5530 Kahlua Legacy"),
+ DEVICE( CYRIX, CYRIX_5530_SMI, "5530 Kahlua SMI"),
+ DEVICE( CYRIX, CYRIX_5530_IDE, "5530 Kahlua IDE"),
+ DEVICE( CYRIX, CYRIX_5530_AUDIO,"5530 Kahlua Audio"),
+ DEVICE( CYRIX, CYRIX_5530_VIDEO,"5530 Kahlua Video"),
+ DEVICE( LEADTEK, LEADTEK_805, "S3 805"),
+ DEVICE( CONTAQ, CONTAQ_82C599, "82C599"),
+ DEVICE( CONTAQ, CONTAQ_82C693, "82C693"),
+ DEVICE( OLICOM, OLICOM_OC3136, "OC-3136/3137"),
+ DEVICE( OLICOM, OLICOM_OC2315, "OC-2315"),
+ DEVICE( OLICOM, OLICOM_OC2325, "OC-2325"),
+ DEVICE( OLICOM, OLICOM_OC2183, "OC-2183/2185"),
+ DEVICE( OLICOM, OLICOM_OC2326, "OC-2326"),
+ DEVICE( OLICOM, OLICOM_OC6151, "OC-6151/6152"),
+ DEVICE( SUN, SUN_EBUS, "EBUS"),
+ DEVICE( SUN, SUN_HAPPYMEAL, "Happy Meal Ethernet"),
+ DEVICE( SUN, SUN_SIMBA, "Advanced PCI Bridge"),
+ DEVICE( SUN, SUN_PBM, "PCI Bus Module"),
+ DEVICE( SUN, SUN_SABRE, "Ultra IIi PCI"),
+ DEVICE( CMD, CMD_640, "640 (buggy)"),
+ DEVICE( CMD, CMD_643, "643"),
+ DEVICE( CMD, CMD_646, "646"),
+ DEVICE( CMD, CMD_670, "670"),
+ DEVICE( VISION, VISION_QD8500, "QD-8500"),
+ DEVICE( VISION, VISION_QD8580, "QD-8580"),
+ DEVICE( BROOKTREE, BROOKTREE_848, "Bt848"),
+ DEVICE( BROOKTREE, BROOKTREE_849A, "Bt849"),
+ DEVICE( BROOKTREE, BROOKTREE_8474, "Bt8474"),
+ DEVICE( SIERRA, SIERRA_STB, "STB Horizon 64"),
+ DEVICE( ACC, ACC_2056, "2056"),
+ DEVICE( WINBOND, WINBOND_83769, "W83769F"),
+ DEVICE( WINBOND, WINBOND_82C105, "SL82C105"),
+ DEVICE( WINBOND, WINBOND_83C553, "W83C553"),
+ DEVICE( DATABOOK, DATABOOK_87144, "DB87144"),
+ DEVICE( PLX, PLX_SPCOM200, "SPCom 200 PCI serial I/O"),
+ DEVICE( PLX, PLX_9050, "PLX9050 PCI <-> IOBus Bridge"),
+ DEVICE( PLX, PLX_9080, "PCI9080 I2O"),
+ DEVICE( MADGE, MADGE_MK2, "Smart 16/4 BM Mk2 Ringnode"),
+ DEVICE( 3COM, 3COM_3C339, "3C339 TokenRing"),
+ DEVICE( 3COM, 3COM_3C590, "3C590 10bT"),
+ DEVICE( 3COM, 3COM_3C595TX, "3C595 100bTX"),
+ DEVICE( 3COM, 3COM_3C595T4, "3C595 100bT4"),
+ DEVICE( 3COM, 3COM_3C595MII, "3C595 100b-MII"),
+ DEVICE( 3COM, 3COM_3C900TPO, "3C900 10bTPO"),
+ DEVICE( 3COM, 3COM_3C900COMBO,"3C900 10b Combo"),
+ DEVICE( 3COM, 3COM_3C905TX, "3C905 100bTX"),
+ DEVICE( 3COM, 3COM_3C905T4, "3C905 100bT4"),
+ DEVICE( 3COM, 3COM_3C905B_TX, "3C905B 100bTX"),
+ DEVICE( SMC, SMC_EPIC100, "9432 TX"),
+ DEVICE( AL, AL_M1445, "M1445"),
+ DEVICE( AL, AL_M1449, "M1449"),
+ DEVICE( AL, AL_M1451, "M1451"),
+ DEVICE( AL, AL_M1461, "M1461"),
+ DEVICE( AL, AL_M1489, "M1489"),
+ DEVICE( AL, AL_M1511, "M1511"),
+ DEVICE( AL, AL_M1513, "M1513"),
+ DEVICE( AL, AL_M1521, "M1521"),
+ DEVICE( AL, AL_M1523, "M1523"),
+ DEVICE( AL, AL_M1531, "M1531 Aladdin IV"),
+ DEVICE( AL, AL_M1533, "M1533 Aladdin IV"),
+ DEVICE( AL, AL_M3307, "M3307 MPEG-1 decoder"),
+ DEVICE( AL, AL_M4803, "M4803"),
+ DEVICE( AL, AL_M5219, "M5219"),
+ DEVICE( AL, AL_M5229, "M5229 TXpro"),
+ DEVICE( AL, AL_M5237, "M5237 USB"),
+ DEVICE( SURECOM, SURECOM_NE34, "NE-34PCI LAN"),
+ DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_NM2070, "Magicgraph NM2070"),
+ DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_128V, "MagicGraph 128V"),
+ DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_128ZV, "MagicGraph 128ZV"),
+ DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_NM2160, "MagicGraph NM2160"),
+ DEVICE( ASP, ASP_ABP940, "ABP940"),
+ DEVICE( ASP, ASP_ABP940U, "ABP940U"),
+ DEVICE( ASP, ASP_ABP940UW, "ABP940UW"),
+ DEVICE( MACRONIX, MACRONIX_MX98713,"MX98713"),
+ DEVICE( MACRONIX, MACRONIX_MX987x5,"MX98715 / MX98725"),
+ DEVICE( CERN, CERN_SPSB_PMC, "STAR/RD24 SCI-PCI (PMC)"),
+ DEVICE( CERN, CERN_SPSB_PCI, "STAR/RD24 SCI-PCI (PMC)"),
+ DEVICE( CERN, CERN_HIPPI_DST, "HIPPI destination"),
+ DEVICE( CERN, CERN_HIPPI_SRC, "HIPPI source"),
+ DEVICE( IMS, IMS_8849, "8849"),
+ DEVICE( TEKRAM2, TEKRAM2_690c, "DC690c"),
+ DEVICE( TUNDRA, TUNDRA_CA91C042,"CA91C042 Universe"),
+ DEVICE( AMCC, AMCC_MYRINET, "Myrinet PCI (M2-PCI-32)"),
+ DEVICE( AMCC, AMCC_S5933, "S5933"),
+ DEVICE( AMCC, AMCC_S5933_HEPC3,"S5933 Traquair HEPC3"),
+ DEVICE( INTERG, INTERG_1680, "IGA-1680"),
+ DEVICE( INTERG, INTERG_1682, "IGA-1682"),
+ DEVICE( REALTEK, REALTEK_8029, "8029"),
+ DEVICE( REALTEK, REALTEK_8129, "8129"),
+ DEVICE( REALTEK, REALTEK_8139, "8139"),
+ DEVICE( TRUEVISION, TRUEVISION_T1000,"TARGA 1000"),
+ DEVICE( INIT, INIT_320P, "320 P"),
+ DEVICE( INIT, INIT_360P, "360 P"),
+ DEVICE( VIA, VIA_82C505, "VT 82C505"),
+ DEVICE( VIA, VIA_82C561, "VT 82C561"),
+ DEVICE( VIA, VIA_82C586_1, "VT 82C586 Apollo IDE"),
+ DEVICE( VIA, VIA_82C576, "VT 82C576 3V"),
+ DEVICE( VIA, VIA_82C585, "VT 82C585 Apollo VP1/VPX"),
+ DEVICE( VIA, VIA_82C586_0, "VT 82C586 Apollo ISA"),
+ DEVICE( VIA, VIA_82C595, "VT 82C595 Apollo VP2"),
+ DEVICE( VIA, VIA_82C597_0, "VT 82C597 Apollo VP3"),
+ DEVICE( VIA, VIA_82C926, "VT 82C926 Amazon"),
+ DEVICE( VIA, VIA_82C416, "VT 82C416MV"),
+ DEVICE( VIA, VIA_82C595_97, "VT 82C595 Apollo VP2/97"),
+ DEVICE( VIA, VIA_82C586_2, "VT 82C586 Apollo USB"),
+ DEVICE( VIA, VIA_82C586_3, "VT 82C586B Apollo ACPI"),
+ DEVICE( VIA, VIA_86C100A, "VT 86C100A"),
+ DEVICE( VIA, VIA_82C597_1, "VT 82C597 Apollo VP3 AGP"),
+ DEVICE( VORTEX, VORTEX_GDT60x0, "GDT 60x0"),
+ DEVICE( VORTEX, VORTEX_GDT6000B,"GDT 6000b"),
+ DEVICE( VORTEX, VORTEX_GDT6x10, "GDT 6110/6510"),
+ DEVICE( VORTEX, VORTEX_GDT6x20, "GDT 6120/6520"),
+ DEVICE( VORTEX, VORTEX_GDT6530, "GDT 6530"),
+ DEVICE( VORTEX, VORTEX_GDT6550, "GDT 6550"),
+ DEVICE( VORTEX, VORTEX_GDT6x17, "GDT 6117/6517"),
+ DEVICE( VORTEX, VORTEX_GDT6x27, "GDT 6127/6527"),
+ DEVICE( VORTEX, VORTEX_GDT6537, "GDT 6537"),
+ DEVICE( VORTEX, VORTEX_GDT6557, "GDT 6557"),
+ DEVICE( VORTEX, VORTEX_GDT6x15, "GDT 6115/6515"),
+ DEVICE( VORTEX, VORTEX_GDT6x25, "GDT 6125/6525"),
+ DEVICE( VORTEX, VORTEX_GDT6535, "GDT 6535"),
+ DEVICE( VORTEX, VORTEX_GDT6555, "GDT 6555"),
+ DEVICE( VORTEX, VORTEX_GDT6x17RP,"GDT 6117RP/6517RP"),
+ DEVICE( VORTEX, VORTEX_GDT6x27RP,"GDT 6127RP/6527RP"),
+ DEVICE( VORTEX, VORTEX_GDT6537RP,"GDT 6537RP"),
+ DEVICE( VORTEX, VORTEX_GDT6557RP,"GDT 6557RP"),
+ DEVICE( VORTEX, VORTEX_GDT6x11RP,"GDT 6111RP/6511RP"),
+ DEVICE( VORTEX, VORTEX_GDT6x21RP,"GDT 6121RP/6521RP"),
+ DEVICE( VORTEX, VORTEX_GDT6x17RP1,"GDT 6117RP1/6517RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6x27RP1,"GDT 6127RP1/6527RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6537RP1,"GDT 6537RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6557RP1,"GDT 6557RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6x11RP1,"GDT 6111RP1/6511RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6x21RP1,"GDT 6121RP1/6521RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6x17RP2,"GDT 6117RP2/6517RP2"),
+ DEVICE( VORTEX, VORTEX_GDT6x27RP2,"GDT 6127RP2/6527RP2"),
+ DEVICE( VORTEX, VORTEX_GDT6537RP2,"GDT 6537RP2"),
+ DEVICE( VORTEX, VORTEX_GDT6557RP2,"GDT 6557RP2"),
+ DEVICE( VORTEX, VORTEX_GDT6x11RP2,"GDT 6111RP2/6511RP2"),
+ DEVICE( VORTEX, VORTEX_GDT6x21RP2,"GDT 6121RP2/6521RP2"),
+ DEVICE( EF, EF_ATM_FPGA, "155P-MF1 (FPGA)"),
+ DEVICE( EF, EF_ATM_ASIC, "155P-MF1 (ASIC)"),
+ DEVICE( FORE, FORE_PCA200PC, "PCA-200PC"),
+ DEVICE( FORE, FORE_PCA200E, "PCA-200E"),
+ DEVICE( IMAGINGTECH, IMAGINGTECH_ICPCI, "MVC IC-PCI"),
+ DEVICE( PHILIPS, PHILIPS_SAA7146,"SAA7146"),
+ DEVICE( CYCLONE, CYCLONE_SDK, "SDK"),
+ DEVICE( ALLIANCE, ALLIANCE_PROMOTIO, "Promotion-6410"),
+ DEVICE( ALLIANCE, ALLIANCE_PROVIDEO, "Provideo"),
+ DEVICE( ALLIANCE, ALLIANCE_AT24, "AT24"),
+ DEVICE( ALLIANCE, ALLIANCE_AT3D, "AT3D"),
+ DEVICE( VMIC, VMIC_VME, "VMIVME-7587"),
+ DEVICE( DIGI, DIGI_EPC, "AccelPort EPC"),
+ DEVICE( DIGI, DIGI_RIGHTSWITCH, "RightSwitch SE-6"),
+ DEVICE( DIGI, DIGI_XEM, "AccelPort Xem"),
+ DEVICE( DIGI, DIGI_XR, "AccelPort Xr"),
+ DEVICE( DIGI, DIGI_CX, "AccelPort C/X"),
+ DEVICE( DIGI, DIGI_XRJ, "AccelPort Xr/J"),
+ DEVICE( DIGI, DIGI_EPCJ, "AccelPort EPC/J"),
+ DEVICE( DIGI, DIGI_XR_920, "AccelPort Xr 920"),
+ DEVICE( MUTECH, MUTECH_MV1000, "MV-1000"),
+ DEVICE( RENDITION, RENDITION_VERITE,"Verite 1000"),
+ DEVICE( RENDITION, RENDITION_VERITE2100,"Verite 2100"),
+ DEVICE( TOSHIBA, TOSHIBA_601, "Laptop"),
+ DEVICE( TOSHIBA, TOSHIBA_TOPIC95,"ToPIC95"),
+ DEVICE( TOSHIBA, TOSHIBA_TOPIC97,"ToPIC97"),
+ DEVICE( RICOH, RICOH_RL5C466, "RL5C466"),
+ DEVICE( ARTOP, ARTOP_ATP850UF, "ATP850UF"),
+ DEVICE( ZEITNET, ZEITNET_1221, "1221"),
+ DEVICE( ZEITNET, ZEITNET_1225, "1225"),
+ DEVICE( OMEGA, OMEGA_82C092G, "82C092G"),
+ DEVICE( LITEON, LITEON_LNE100TX,"LNE100TX"),
+ DEVICE( NP, NP_PCI_FDDI, "NP-PCI"),
+ DEVICE( ATT, ATT_L56XMF, "L56xMF"),
+ DEVICE( SPECIALIX, SPECIALIX_IO8, "IO8+/PCI"),
+ DEVICE( SPECIALIX, SPECIALIX_XIO, "XIO/SIO host"),
+ DEVICE( SPECIALIX, SPECIALIX_RIO, "RIO host"),
+ DEVICE( AURAVISION, AURAVISION_VXP524,"VXP524"),
+ DEVICE( IKON, IKON_10115, "10115 Greensheet"),
+ DEVICE( IKON, IKON_10117, "10117 Greensheet"),
+ DEVICE( ZORAN, ZORAN_36057, "ZR36057"),
+ DEVICE( ZORAN, ZORAN_36120, "ZR36120"),
+ DEVICE( KINETIC, KINETIC_2915, "2915 CAMAC"),
+ DEVICE( COMPEX, COMPEX_ENET100VG4, "Readylink ENET100-VG4"),
+ DEVICE( COMPEX, COMPEX_RL2000, "ReadyLink 2000"),
+ DEVICE( RP, RP8OCTA, "RocketPort 8 Oct"),
+ DEVICE( RP, RP8INTF, "RocketPort 8 Intf"),
+ DEVICE( RP, RP16INTF, "RocketPort 16 Intf"),
+ DEVICE( RP, RP32INTF, "RocketPort 32 Intf"),
+ DEVICE( CYCLADES, CYCLOM_Y_Lo, "Cyclom-Y below 1Mbyte"),
+ DEVICE( CYCLADES, CYCLOM_Y_Hi, "Cyclom-Y above 1Mbyte"),
+ DEVICE( CYCLADES, CYCLOM_Z_Lo, "Cyclom-Z below 1Mbyte"),
+ DEVICE( CYCLADES, CYCLOM_Z_Hi, "Cyclom-Z above 1Mbyte"),
+ DEVICE( ESSENTIAL, ESSENTIAL_ROADRUNNER,"Roadrunner serial HIPPI"),
+ DEVICE( O2, O2_6832, "6832"),
+ DEVICE( 3DFX, 3DFX_VOODOO, "Voodoo"),
+ DEVICE( 3DFX, 3DFX_VOODOO2, "Voodoo2"),
+ DEVICE( SIGMADES, SIGMADES_6425, "REALmagic64/GX"),
+ DEVICE( STALLION, STALLION_ECHPCI832,"EasyConnection 8/32"),
+ DEVICE( STALLION, STALLION_ECHPCI864,"EasyConnection 8/64"),
+ DEVICE( STALLION, STALLION_EIOPCI,"EasyIO"),
+ DEVICE( OPTIBASE, OPTIBASE_FORGE, "MPEG Forge"),
+ DEVICE( OPTIBASE, OPTIBASE_FUSION,"MPEG Fusion"),
+ DEVICE( OPTIBASE, OPTIBASE_VPLEX, "VideoPlex"),
+ DEVICE( OPTIBASE, OPTIBASE_VPLEXCC,"VideoPlex CC"),
+ DEVICE( OPTIBASE, OPTIBASE_VQUEST,"VideoQuest"),
+ DEVICE( ASIX, ASIX_88140, "88140"),
+ DEVICE( SATSAGEM, SATSAGEM_PCR2101,"PCR2101 DVB receiver"),
+ DEVICE( SATSAGEM, SATSAGEM_TELSATTURBO,"Telsat Turbo DVB"),
+ DEVICE( ENSONIQ, ENSONIQ_AUDIOPCI,"AudioPCI"),
+ DEVICE( PICTUREL, PICTUREL_PCIVST,"PCIVST"),
+ DEVICE( NVIDIA_SGS, NVIDIA_SGS_RIVA128, "Riva 128"),
+ DEVICE( CBOARDS, CBOARDS_DAS1602_16,"DAS1602/16"),
+ DEVICE( SYMPHONY, SYMPHONY_101, "82C101"),
+ DEVICE( TEKRAM, TEKRAM_DC290, "DC-290"),
+ DEVICE( 3DLABS, 3DLABS_300SX, "GLINT 300SX"),
+ DEVICE( 3DLABS, 3DLABS_500TX, "GLINT 500TX"),
+ DEVICE( 3DLABS, 3DLABS_DELTA, "GLINT Delta"),
+ DEVICE( 3DLABS, 3DLABS_PERMEDIA,"PERMEDIA"),
+ DEVICE( 3DLABS, 3DLABS_MX, "GLINT MX"),
+ DEVICE( AVANCE, AVANCE_ALG2064, "ALG2064i"),
+ DEVICE( AVANCE, AVANCE_2302, "ALG-2302"),
+ DEVICE( NETVIN, NETVIN_NV5000SC,"NV5000"),
+ DEVICE( S3, S3_PLATO_PXS, "PLATO/PX (system)"),
+ DEVICE( S3, S3_ViRGE, "ViRGE"),
+ DEVICE( S3, S3_TRIO, "Trio32/Trio64"),
+ DEVICE( S3, S3_AURORA64VP, "Aurora64V+"),
+ DEVICE( S3, S3_TRIO64UVP, "Trio64UV+"),
+ DEVICE( S3, S3_ViRGE_VX, "ViRGE/VX"),
+ DEVICE( S3, S3_868, "Vision 868"),
+ DEVICE( S3, S3_928, "Vision 928-P"),
+ DEVICE( S3, S3_864_1, "Vision 864-P"),
+ DEVICE( S3, S3_864_2, "Vision 864-P"),
+ DEVICE( S3, S3_964_1, "Vision 964-P"),
+ DEVICE( S3, S3_964_2, "Vision 964-P"),
+ DEVICE( S3, S3_968, "Vision 968"),
+ DEVICE( S3, S3_TRIO64V2, "Trio64V2/DX or /GX"),
+ DEVICE( S3, S3_PLATO_PXG, "PLATO/PX (graphics)"),
+ DEVICE( S3, S3_ViRGE_DXGX, "ViRGE/DX or /GX"),
+ DEVICE( S3, S3_ViRGE_GX2, "ViRGE/GX2"),
+ DEVICE( S3, S3_ViRGE_MX, "ViRGE/MX"),
+ DEVICE( S3, S3_ViRGE_MXP, "ViRGE/MX+"),
+ DEVICE( S3, S3_ViRGE_MXPMV, "ViRGE/MX+MV"),
+ DEVICE( S3, S3_SONICVIBES, "SonicVibes"),
+ DEVICE( INTEL, INTEL_82375, "82375EB"),
+ BRIDGE( INTEL, INTEL_82424, "82424ZX Saturn", 0x00),
+ DEVICE( INTEL, INTEL_82378, "82378IB"),
+ DEVICE( INTEL, INTEL_82430, "82430ZX Aries"),
+ BRIDGE( INTEL, INTEL_82434, "82434LX Mercury/Neptune", 0x00),
+ DEVICE( INTEL, INTEL_82092AA_0,"82092AA PCMCIA bridge"),
+ DEVICE( INTEL, INTEL_82092AA_1,"82092AA EIDE"),
+ DEVICE( INTEL, INTEL_7116, "SAA7116"),
+ DEVICE( INTEL, INTEL_82596, "82596"),
+ DEVICE( INTEL, INTEL_82865, "82865"),
+ DEVICE( INTEL, INTEL_82557, "82557"),
+ DEVICE( INTEL, INTEL_82437, "82437"),
+ DEVICE( INTEL, INTEL_82371_0, "82371 Triton PIIX"),
+ DEVICE( INTEL, INTEL_82371_1, "82371 Triton PIIX"),
+ DEVICE( INTEL, INTEL_82371MX, "430MX - 82371MX MPIIX"),
+ DEVICE( INTEL, INTEL_82437MX, "430MX - 82437MX MTSC"),
+ DEVICE( INTEL, INTEL_82441, "82441FX Natoma"),
+ DEVICE( INTEL, INTEL_82380FB, "82380FB Mobile"),
+ DEVICE( INTEL, INTEL_82439, "82439HX Triton II"),
+ DEVICE( INTEL, INTEL_82371SB_0,"82371SB PIIX3 ISA"),
+ DEVICE( INTEL, INTEL_82371SB_1,"82371SB PIIX3 IDE"),
+ DEVICE( INTEL, INTEL_82371SB_2,"82371SB PIIX3 USB"),
+ DEVICE( INTEL, INTEL_82437VX, "82437VX Triton II"),
+ DEVICE( INTEL, INTEL_82439TX, "82439TX"),
+ DEVICE( INTEL, INTEL_82371AB_0,"82371AB PIIX4 ISA"),
+ DEVICE( INTEL, INTEL_82371AB, "82371AB PIIX4 IDE"),
+ DEVICE( INTEL, INTEL_82371AB_2,"82371AB PIIX4 USB"),
+ DEVICE( INTEL, INTEL_82371AB_3,"82371AB PIIX4 ACPI"),
+ DEVICE( INTEL, INTEL_82443LX_0,"440LX - 82443LX PAC Host"),
+ DEVICE( INTEL, INTEL_82443LX_1,"440LX - 82443LX PAC AGP"),
+ DEVICE( INTEL, INTEL_82443BX_0,"440BX - 82443BX Host"),
+ DEVICE( INTEL, INTEL_82443BX_1,"440BX - 82443BX AGP"),
+ DEVICE( INTEL, INTEL_82443BX_2,"440BX - 82443BX Host (no AGP)"),
+ DEVICE( INTEL, INTEL_82443GX_0,"440GX - 82443GX Host"),
+ DEVICE( INTEL, INTEL_82443GX_1,"440GX - 82443GX AGP"),
+ DEVICE( INTEL, INTEL_82443GX_2,"440GX - 82443GX Host (no AGP)"),
+ DEVICE( INTEL, INTEL_P6, "Orion P6"),
+ DEVICE( INTEL, INTEL_82450GX, "82450GX Orion P6"),
+ DEVICE( KTI, KTI_ET32P2, "ET32P2"),
+ DEVICE( ADAPTEC, ADAPTEC_7810, "AIC-7810 RAID"),
+ DEVICE( ADAPTEC, ADAPTEC_7850, "AIC-7850"),
+ DEVICE( ADAPTEC, ADAPTEC_7855, "AIC-7855"),
+ DEVICE( ADAPTEC, ADAPTEC_5800, "AIC-5800"),
+ DEVICE( ADAPTEC, ADAPTEC_7860, "AIC-7860"),
+ DEVICE( ADAPTEC, ADAPTEC_7861, "AIC-7861"),
+ DEVICE( ADAPTEC, ADAPTEC_7870, "AIC-7870"),
+ DEVICE( ADAPTEC, ADAPTEC_7871, "AIC-7871"),
+ DEVICE( ADAPTEC, ADAPTEC_7872, "AIC-7872"),
+ DEVICE( ADAPTEC, ADAPTEC_7873, "AIC-7873"),
+ DEVICE( ADAPTEC, ADAPTEC_7874, "AIC-7874"),
+ DEVICE( ADAPTEC, ADAPTEC_7895, "AIC-7895U"),
+ DEVICE( ADAPTEC, ADAPTEC_7880, "AIC-7880U"),
+ DEVICE( ADAPTEC, ADAPTEC_7881, "AIC-7881U"),
+ DEVICE( ADAPTEC, ADAPTEC_7882, "AIC-7882U"),
+ DEVICE( ADAPTEC, ADAPTEC_7883, "AIC-7883U"),
+ DEVICE( ADAPTEC, ADAPTEC_7884, "AIC-7884U"),
+ DEVICE( ADAPTEC, ADAPTEC_1030, "ABA-1030 DVB receiver"),
+ DEVICE( ADAPTEC2, ADAPTEC2_2940U2, "AHA-2940U2"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7890, "AIC-7890/1"),
+ DEVICE( ADAPTEC2, ADAPTEC2_3940U2, "AHA-3940U2"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7896, "AIC-7896/7"),
+ DEVICE( ATRONICS, ATRONICS_2015, "IDE-2015PL"),
+ DEVICE( TIGERJET, TIGERJET_300, "Tiger300 ISDN"),
+ DEVICE( ARK, ARK_STING, "Stingray"),
+ DEVICE( ARK, ARK_STINGARK, "Stingray ARK 2000PV"),
+ DEVICE( ARK, ARK_2000MT, "2000MT")
+};
+
+
+#ifdef CONFIG_PCI_OPTIMIZE
+
+/*
+ * An item of this structure has the following meaning:
+ * for each optimization, the register address, the mask
+ * and value to write to turn it on.
+ * There are 5 optimizations for the moment:
+ * Cache L2 write back best than write through
+ * Posted Write for CPU to PCI enable
+ * Posted Write for CPU to MEMORY enable
+ * Posted Write for PCI to MEMORY enable
+ * PCI Burst enable
+ *
+ * Half of the bios I've meet don't allow you to turn that on, and you
+ * can gain more than 15% on graphic accesses using those
+ * optimizations...
+ */
+struct optimization_type {
+ const char *type;
+ const char *off;
+ const char *on;
+} bridge_optimization[] = {
+ {"Cache L2", "write through", "write back"},
+ {"CPU-PCI posted write", "off", "on"},
+ {"CPU-Memory posted write", "off", "on"},
+ {"PCI-Memory posted write", "off", "on"},
+ {"PCI burst", "off", "on"}
+};
+
+#define NUM_OPTIMIZATIONS \
+ (sizeof(bridge_optimization) / sizeof(bridge_optimization[0]))
+
+struct bridge_mapping_type {
+ unsigned char addr; /* config space address */
+ unsigned char mask;
+ unsigned char value;
+} bridge_mapping[] = {
+ /*
+ * Intel Neptune/Mercury/Saturn:
+ * If the internal cache is write back,
+ * the L2 cache must be write through!
+ * I've to check out how to control that
+ * for the moment, we won't touch the cache
+ */
+ {0x0 ,0x02 ,0x02 },
+ {0x53 ,0x02 ,0x02 },
+ {0x53 ,0x01 ,0x01 },
+ {0x54 ,0x01 ,0x01 },
+ {0x54 ,0x02 ,0x02 },
+
+ /*
+ * UMC 8891A Pentium chipset:
+ * Why did you think UMC was cheaper ??
+ */
+ {0x50 ,0x10 ,0x00 },
+ {0x51 ,0x40 ,0x40 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+
+ /*
+ * UMC UM8881F
+ * This is a dummy entry for my tests.
+ * I have this chipset and no docs....
+ */
+ {0x0 ,0x1 ,0x1 },
+ {0x0 ,0x2 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 }
+};
+
+#endif /* CONFIG_PCI_OPTIMIZE */
+
+
+/*
+ * device_info[] is sorted so we can use binary search
+ */
+struct pci_dev_info *pci_lookup_dev(unsigned int vendor, unsigned int dev)
+{
+ int min = 0,
+ max = sizeof(dev_info)/sizeof(dev_info[0]) - 1;
+
+ for ( ; ; )
+ {
+ int i = (min + max) >> 1;
+ long order;
+
+ order = dev_info[i].vendor - (long) vendor;
+ if (!order)
+ order = dev_info[i].device - (long) dev;
+
+ if (order < 0)
+ {
+ min = i + 1;
+ if ( min > max )
+ return 0;
+ continue;
+ }
+
+ if (order > 0)
+ {
+ max = i - 1;
+ if ( min > max )
+ return 0;
+ continue;
+ }
+
+ return & dev_info[ i ];
+ }
+}
+
+const char *pci_strclass (unsigned int class)
+{
+ switch (class >> 8) {
+ case PCI_CLASS_NOT_DEFINED: return "Non-VGA device";
+ case PCI_CLASS_NOT_DEFINED_VGA: return "VGA compatible device";
+
+ case PCI_CLASS_STORAGE_SCSI: return "SCSI storage controller";
+ case PCI_CLASS_STORAGE_IDE: return "IDE interface";
+ case PCI_CLASS_STORAGE_FLOPPY: return "Floppy disk controller";
+ case PCI_CLASS_STORAGE_IPI: return "IPI bus controller";
+ case PCI_CLASS_STORAGE_RAID: return "RAID bus controller";
+ case PCI_CLASS_STORAGE_OTHER: return "Unknown mass storage controller";
+
+ case PCI_CLASS_NETWORK_ETHERNET: return "Ethernet controller";
+ case PCI_CLASS_NETWORK_TOKEN_RING: return "Token ring network controller";
+ case PCI_CLASS_NETWORK_FDDI: return "FDDI network controller";
+ case PCI_CLASS_NETWORK_ATM: return "ATM network controller";
+ case PCI_CLASS_NETWORK_OTHER: return "Network controller";
+
+ case PCI_CLASS_DISPLAY_VGA: return "VGA compatible controller";
+ case PCI_CLASS_DISPLAY_XGA: return "XGA compatible controller";
+ case PCI_CLASS_DISPLAY_OTHER: return "Display controller";
+
+ case PCI_CLASS_MULTIMEDIA_VIDEO: return "Multimedia video controller";
+ case PCI_CLASS_MULTIMEDIA_AUDIO: return "Multimedia audio controller";
+ case PCI_CLASS_MULTIMEDIA_OTHER: return "Multimedia controller";
+
+ case PCI_CLASS_MEMORY_RAM: return "RAM memory";
+ case PCI_CLASS_MEMORY_FLASH: return "FLASH memory";
+ case PCI_CLASS_MEMORY_OTHER: return "Memory";
+
+ case PCI_CLASS_BRIDGE_HOST: return "Host bridge";
+ case PCI_CLASS_BRIDGE_ISA: return "ISA bridge";
+ case PCI_CLASS_BRIDGE_EISA: return "EISA bridge";
+ case PCI_CLASS_BRIDGE_MC: return "MicroChannel bridge";
+ case PCI_CLASS_BRIDGE_PCI: return "PCI bridge";
+ case PCI_CLASS_BRIDGE_PCMCIA: return "PCMCIA bridge";
+ case PCI_CLASS_BRIDGE_NUBUS: return "NuBus bridge";
+ case PCI_CLASS_BRIDGE_CARDBUS: return "CardBus bridge";
+ case PCI_CLASS_BRIDGE_OTHER: return "Bridge";
+
+ case PCI_CLASS_COMMUNICATION_SERIAL: return "Serial controller";
+ case PCI_CLASS_COMMUNICATION_PARALLEL: return "Parallel controller";
+ case PCI_CLASS_COMMUNICATION_OTHER: return "Communication controller";
+
+ case PCI_CLASS_SYSTEM_PIC: return "PIC";
+ case PCI_CLASS_SYSTEM_DMA: return "DMA controller";
+ case PCI_CLASS_SYSTEM_TIMER: return "Timer";
+ case PCI_CLASS_SYSTEM_RTC: return "RTC";
+ case PCI_CLASS_SYSTEM_OTHER: return "System peripheral";
+
+ case PCI_CLASS_INPUT_KEYBOARD: return "Keyboard controller";
+ case PCI_CLASS_INPUT_PEN: return "Digitizer Pen";
+ case PCI_CLASS_INPUT_MOUSE: return "Mouse controller";
+ case PCI_CLASS_INPUT_OTHER: return "Input device controller";
+
+ case PCI_CLASS_DOCKING_GENERIC: return "Generic Docking Station";
+ case PCI_CLASS_DOCKING_OTHER: return "Docking Station";
+
+ case PCI_CLASS_PROCESSOR_386: return "386";
+ case PCI_CLASS_PROCESSOR_486: return "486";
+ case PCI_CLASS_PROCESSOR_PENTIUM: return "Pentium";
+ case PCI_CLASS_PROCESSOR_ALPHA: return "Alpha";
+ case PCI_CLASS_PROCESSOR_POWERPC: return "Power PC";
+ case PCI_CLASS_PROCESSOR_CO: return "Co-processor";
+
+ case PCI_CLASS_SERIAL_FIREWIRE: return "FireWire (IEEE 1394)";
+ case PCI_CLASS_SERIAL_ACCESS: return "ACCESS Bus";
+ case PCI_CLASS_SERIAL_SSA: return "SSA";
+ case PCI_CLASS_SERIAL_USB: return "USB Controller";
+ case PCI_CLASS_SERIAL_FIBER: return "Fiber Channel";
+
+ default: return "Unknown class";
+ }
+}
+
+
+const char *pci_strvendor(unsigned int vendor)
+{
+ switch (vendor) {
+ case PCI_VENDOR_ID_COMPAQ: return "Compaq";
+ case PCI_VENDOR_ID_NCR: return "NCR";
+ case PCI_VENDOR_ID_ATI: return "ATI";
+ case PCI_VENDOR_ID_VLSI: return "VLSI";
+ case PCI_VENDOR_ID_ADL: return "Advance Logic";
+ case PCI_VENDOR_ID_NS: return "NS";
+ case PCI_VENDOR_ID_TSENG: return "Tseng'Lab";
+ case PCI_VENDOR_ID_WEITEK: return "Weitek";
+ case PCI_VENDOR_ID_DEC: return "DEC";
+ case PCI_VENDOR_ID_CIRRUS: return "Cirrus Logic";
+ case PCI_VENDOR_ID_IBM: return "IBM";
+ case PCI_VENDOR_ID_WD: return "Western Digital";
+ case PCI_VENDOR_ID_AMD: return "AMD";
+ case PCI_VENDOR_ID_TRIDENT: return "Trident";
+ case PCI_VENDOR_ID_AI: return "Acer Incorporated";
+ case PCI_VENDOR_ID_MATROX: return "Matrox";
+ case PCI_VENDOR_ID_CT: return "Chips & Technologies";
+ case PCI_VENDOR_ID_MIRO: return "Miro";
+ case PCI_VENDOR_ID_NEC: return "NEC";
+ case PCI_VENDOR_ID_FD: return "Future Domain";
+ case PCI_VENDOR_ID_SI: return "Silicon Integrated Systems";
+ case PCI_VENDOR_ID_HP: return "Hewlett Packard";
+ case PCI_VENDOR_ID_PCTECH: return "PCTECH";
+ case PCI_VENDOR_ID_DPT: return "DPT";
+ case PCI_VENDOR_ID_OPTI: return "OPTi";
+ case PCI_VENDOR_ID_SGS: return "SGS Thomson";
+ case PCI_VENDOR_ID_BUSLOGIC: return "BusLogic";
+ case PCI_VENDOR_ID_TI: return "Texas Instruments";
+ case PCI_VENDOR_ID_OAK: return "OAK";
+ case PCI_VENDOR_ID_WINBOND2: return "Winbond";
+ case PCI_VENDOR_ID_MOTOROLA: return "Motorola";
+ case PCI_VENDOR_ID_PROMISE: return "Promise Technology";
+ case PCI_VENDOR_ID_APPLE: return "Apple";
+ case PCI_VENDOR_ID_N9: return "Number Nine";
+ case PCI_VENDOR_ID_UMC: return "UMC";
+ case PCI_VENDOR_ID_X: return "X TECHNOLOGY";
+ case PCI_VENDOR_ID_NEXGEN: return "Nexgen";
+ case PCI_VENDOR_ID_QLOGIC: return "Q Logic";
+ case PCI_VENDOR_ID_LEADTEK: return "Leadtek Research";
+ case PCI_VENDOR_ID_CONTAQ: return "Contaq";
+ case PCI_VENDOR_ID_FOREX: return "Forex";
+ case PCI_VENDOR_ID_OLICOM: return "Olicom";
+ case PCI_VENDOR_ID_CMD: return "CMD";
+ case PCI_VENDOR_ID_VISION: return "Vision";
+ case PCI_VENDOR_ID_BROOKTREE: return "Brooktree";
+ case PCI_VENDOR_ID_SIERRA: return "Sierra";
+ case PCI_VENDOR_ID_ACC: return "ACC MICROELECTRONICS";
+ case PCI_VENDOR_ID_WINBOND: return "Winbond";
+ case PCI_VENDOR_ID_DATABOOK: return "Databook";
+ case PCI_VENDOR_ID_3COM: return "3Com";
+ case PCI_VENDOR_ID_SMC: return "SMC";
+ case PCI_VENDOR_ID_AL: return "Acer Labs";
+ case PCI_VENDOR_ID_MITSUBISHI: return "Mitsubishi";
+ case PCI_VENDOR_ID_NEOMAGIC: return "Neomagic";
+ case PCI_VENDOR_ID_ASP: return "Advanced System Products";
+ case PCI_VENDOR_ID_CERN: return "CERN";
+ case PCI_VENDOR_ID_IMS: return "IMS";
+ case PCI_VENDOR_ID_TEKRAM2: return "Tekram";
+ case PCI_VENDOR_ID_TUNDRA: return "Tundra";
+ case PCI_VENDOR_ID_AMCC: return "AMCC";
+ case PCI_VENDOR_ID_INTERG: return "Intergraphics";
+ case PCI_VENDOR_ID_REALTEK: return "Realtek";
+ case PCI_VENDOR_ID_TRUEVISION: return "Truevision";
+ case PCI_VENDOR_ID_INIT: return "Initio Corp";
+ case PCI_VENDOR_ID_VIA: return "VIA Technologies";
+ case PCI_VENDOR_ID_VORTEX: return "VORTEX";
+ case PCI_VENDOR_ID_EF: return "Efficient Networks";
+ case PCI_VENDOR_ID_FORE: return "Fore Systems";
+ case PCI_VENDOR_ID_IMAGINGTECH: return "Imaging Technology";
+ case PCI_VENDOR_ID_PHILIPS: return "Philips";
+ case PCI_VENDOR_ID_PLX: return "PLX";
+ case PCI_VENDOR_ID_ALLIANCE: return "Alliance";
+ case PCI_VENDOR_ID_VMIC: return "VMIC";
+ case PCI_VENDOR_ID_DIGI: return "Digi Intl.";
+ case PCI_VENDOR_ID_MUTECH: return "Mutech";
+ case PCI_VENDOR_ID_RENDITION: return "Rendition";
+ case PCI_VENDOR_ID_TOSHIBA: return "Toshiba";
+ case PCI_VENDOR_ID_RICOH: return "Ricoh";
+ case PCI_VENDOR_ID_ZEITNET: return "ZeitNet";
+ case PCI_VENDOR_ID_OMEGA: return "Omega Micro";
+ case PCI_VENDOR_ID_NP: return "Network Peripherals";
+ case PCI_VENDOR_ID_SPECIALIX: return "Specialix";
+ case PCI_VENDOR_ID_IKON: return "Ikon";
+ case PCI_VENDOR_ID_ZORAN: return "Zoran";
+ case PCI_VENDOR_ID_COMPEX: return "Compex";
+ case PCI_VENDOR_ID_RP: return "Comtrol";
+ case PCI_VENDOR_ID_CYCLADES: return "Cyclades";
+ case PCI_VENDOR_ID_3DFX: return "3Dfx";
+ case PCI_VENDOR_ID_SIGMADES: return "Sigma Designs";
+ case PCI_VENDOR_ID_OPTIBASE: return "Optibase";
+ case PCI_VENDOR_ID_NVIDIA_SGS: return "NVidia/SGS Thomson";
+ case PCI_VENDOR_ID_ENSONIQ: return "Ensoniq";
+ case PCI_VENDOR_ID_SYMPHONY: return "Symphony";
+ case PCI_VENDOR_ID_TEKRAM: return "Tekram";
+ case PCI_VENDOR_ID_3DLABS: return "3Dlabs";
+ case PCI_VENDOR_ID_AVANCE: return "Avance";
+ case PCI_VENDOR_ID_NETVIN: return "NetVin";
+ case PCI_VENDOR_ID_S3: return "S3 Inc.";
+ case PCI_VENDOR_ID_INTEL: return "Intel";
+ case PCI_VENDOR_ID_KTI: return "KTI";
+ case PCI_VENDOR_ID_ADAPTEC: return "Adaptec";
+ case PCI_VENDOR_ID_ADAPTEC2: return "Adaptec";
+ case PCI_VENDOR_ID_ATRONICS: return "Atronics";
+ case PCI_VENDOR_ID_ARK: return "ARK Logic";
+ case PCI_VENDOR_ID_ASIX: return "ASIX";
+ case PCI_VENDOR_ID_LITEON: return "Lite-on";
+ default: return "Unknown vendor";
+ }
+}
+
+
+const char *pci_strdev(unsigned int vendor, unsigned int device)
+{
+ struct pci_dev_info *info;
+
+ info = pci_lookup_dev(vendor, device);
+ return info ? info->name : "Unknown device";
+}
+
+
+
+/*
+ * Turn on/off PCI bridge optimization. This should allow benchmarking.
+ */
+static void burst_bridge(unsigned char bus, unsigned char devfn,
+ unsigned char pos, int turn_on)
+{
+#ifdef CONFIG_PCI_OPTIMIZE
+ struct bridge_mapping_type *bmap;
+ unsigned char val;
+ int i;
+
+ pos *= NUM_OPTIMIZATIONS;
+ printk("PCI bridge optimization.\n");
+ for (i = 0; i < NUM_OPTIMIZATIONS; i++) {
+ printk(" %s: ", bridge_optimization[i].type);
+ bmap = &bridge_mapping[pos + i];
+ if (!bmap->addr) {
+ printk("Not supported.");
+ } else {
+ pcibios_read_config_byte(bus, devfn, bmap->addr, &val);
+ if ((val & bmap->mask) == bmap->value) {
+ printk("%s.", bridge_optimization[i].on);
+ if (!turn_on) {
+ pcibios_write_config_byte(bus, devfn,
+ bmap->addr,
+ (val | bmap->mask)
+ - bmap->value);
+ printk("Changed! Now %s.", bridge_optimization[i].off);
+ }
+ } else {
+ printk("%s.", bridge_optimization[i].off);
+ if (turn_on) {
+ pcibios_write_config_byte(bus, devfn,
+ bmap->addr,
+ (val & (0xff - bmap->mask))
+ + bmap->value);
+ printk("Changed! Now %s.", bridge_optimization[i].on);
+ }
+ }
+ }
+ printk("\n");
+ }
+#endif /* CONFIG_PCI_OPTIMIZE */
+}
+
+
+/*
+ * Convert some of the configuration space registers of the device at
+ * address (bus,devfn) into a string (possibly several lines each).
+ * The configuration string is stored starting at buf[len]. If the
+ * string would exceed the size of the buffer (SIZE), 0 is returned.
+ */
+static int sprint_dev_config(struct pci_dev *dev, char *buf, int size)
+{
+ unsigned long base;
+ unsigned int l, class_rev, bus, devfn, last_reg;
+ unsigned short vendor, device, status;
+ unsigned char bist, latency, min_gnt, max_lat, hdr_type;
+ int reg, len = 0;
+ const char *str;
+
+ bus = dev->bus->number;
+ devfn = dev->devfn;
+
+ pcibios_read_config_byte (bus, devfn, PCI_HEADER_TYPE, &hdr_type);
+ pcibios_read_config_dword(bus, devfn, PCI_CLASS_REVISION, &class_rev);
+ pcibios_read_config_word (bus, devfn, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word (bus, devfn, PCI_DEVICE_ID, &device);
+ pcibios_read_config_word (bus, devfn, PCI_STATUS, &status);
+ pcibios_read_config_byte (bus, devfn, PCI_BIST, &bist);
+ pcibios_read_config_byte (bus, devfn, PCI_LATENCY_TIMER, &latency);
+ pcibios_read_config_byte (bus, devfn, PCI_MIN_GNT, &min_gnt);
+ pcibios_read_config_byte (bus, devfn, PCI_MAX_LAT, &max_lat);
+ if (len + 80 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, " Bus %2d, device %3d, function %2d:\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ if (len + 80 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, " %s: %s %s (rev %d).\n ",
+ pci_strclass(class_rev >> 8), pci_strvendor(vendor),
+ pci_strdev(vendor, device), class_rev & 0xff);
+
+ if (!pci_lookup_dev(vendor, device)) {
+ len += sprintf(buf + len,
+ "Vendor id=%x. Device id=%x.\n ",
+ vendor, device);
+ }
+
+ str = 0; /* to keep gcc shut... */
+ switch (status & PCI_STATUS_DEVSEL_MASK) {
+ case PCI_STATUS_DEVSEL_FAST: str = "Fast devsel. "; break;
+ case PCI_STATUS_DEVSEL_MEDIUM: str = "Medium devsel. "; break;
+ case PCI_STATUS_DEVSEL_SLOW: str = "Slow devsel. "; break;
+ }
+ if (len + strlen(str) > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, str);
+
+ if (status & PCI_STATUS_FAST_BACK) {
+# define fast_b2b_capable "Fast back-to-back capable. "
+ if (len + strlen(fast_b2b_capable) > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, fast_b2b_capable);
+# undef fast_b2b_capable
+ }
+
+ if (bist & PCI_BIST_CAPABLE) {
+# define BIST_capable "BIST capable. "
+ if (len + strlen(BIST_capable) > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, BIST_capable);
+# undef BIST_capable
+ }
+
+ if (dev->irq) {
+ if (len + 40 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, "IRQ %d. ", dev->irq);
+ }
+
+ if (dev->master) {
+ if (len + 80 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, "Master Capable. ");
+ if (latency)
+ len += sprintf(buf + len, "Latency=%d. ", latency);
+ else
+ len += sprintf(buf + len, "No bursts. ");
+ if (min_gnt)
+ len += sprintf(buf + len, "Min Gnt=%d.", min_gnt);
+ if (max_lat)
+ len += sprintf(buf + len, "Max Lat=%d.", max_lat);
+ }
+
+ switch (hdr_type & 0x7f) {
+ case 0:
+ last_reg = PCI_BASE_ADDRESS_5;
+ break;
+ case 1:
+ last_reg = PCI_BASE_ADDRESS_1;
+ break;
+ default:
+ last_reg = 0;
+ }
+ for (reg = PCI_BASE_ADDRESS_0; reg <= last_reg; reg += 4) {
+ if (len + 40 > size) {
+ return -1;
+ }
+ pcibios_read_config_dword(bus, devfn, reg, &l);
+ base = l;
+ if (!base) {
+ continue;
+ }
+
+ if (base & PCI_BASE_ADDRESS_SPACE_IO) {
+ len += sprintf(buf + len,
+ "\n I/O at 0x%lx.",
+ base & PCI_BASE_ADDRESS_IO_MASK);
+ } else {
+ const char *pref, *type = "unknown";
+
+ if (base & PCI_BASE_ADDRESS_MEM_PREFETCH) {
+ pref = "P";
+ } else {
+ pref = "Non-p";
+ }
+ switch (base & PCI_BASE_ADDRESS_MEM_TYPE_MASK) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ type = "32 bit"; break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ type = "20 bit"; break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ type = "64 bit";
+ /* read top 32 bit address of base addr: */
+ reg += 4;
+ pcibios_read_config_dword(bus, devfn, reg, &l);
+ base |= ((u64) l) << 32;
+ break;
+ }
+ len += sprintf(buf + len,
+ "\n %srefetchable %s memory at "
+ "0x%lx.", pref, type,
+ base & PCI_BASE_ADDRESS_MEM_MASK);
+ }
+ }
+
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+
+/*
+ * Return list of PCI devices as a character string for /proc/pci.
+ * BUF is a buffer that is PAGE_SIZE bytes long.
+ */
+int get_pci_list(char *buf)
+{
+ int nprinted, len, size;
+ struct pci_dev *dev;
+# define MSG "\nwarning: page-size limit reached!\n"
+
+ /* reserve same for truncation warning message: */
+ size = PAGE_SIZE - (strlen(MSG) + 1);
+ len = sprintf(buf, "PCI devices found:\n");
+
+ for (dev = pci_devices; dev; dev = dev->next) {
+ nprinted = sprint_dev_config(dev, buf + len, size - len);
+ if (nprinted < 0) {
+ return len + sprintf(buf + len, MSG);
+ }
+ len += nprinted;
+ }
+ return len;
+}
+
+
+/*
+ * pci_malloc() returns initialized memory of size SIZE. Can be
+ * used only while pci_init() is active.
+ */
+static void *pci_malloc(long size, unsigned long *mem_startp)
+{
+ void *mem;
+
+#ifdef DEBUG
+ printk("...pci_malloc(size=%ld,mem=%p)", size, *mem_startp);
+#endif
+ mem = (void*) *mem_startp;
+ *mem_startp += (size + sizeof(void*) - 1) & ~(sizeof(void*) - 1);
+ memset(mem, 0, size);
+ return mem;
+}
+
+
+static unsigned int scan_bus(struct pci_bus *bus, unsigned long *mem_startp)
+{
+ unsigned int devfn, l, max;
+ unsigned char cmd, tmp, hdr_type, ht, is_multi = 0;
+ struct pci_dev_info *info;
+ struct pci_dev *dev;
+ struct pci_bus *child;
+
+#ifdef DEBUG
+ printk("...scan_bus(busno=%d,mem=%p)\n", bus->number, *mem_startp);
+#endif
+
+ max = bus->secondary;
+ for (devfn = 0; devfn < 0xff; ++devfn) {
+ if (PCI_FUNC(devfn) && !is_multi) {
+ /* Not a multi-function device */
+ continue;
+ }
+ pcibios_read_config_byte(bus->number, devfn, PCI_HEADER_TYPE, &hdr_type);
+ if (!PCI_FUNC(devfn))
+ is_multi = hdr_type & 0x80;
+
+ pcibios_read_config_dword(bus->number, devfn, PCI_VENDOR_ID, &l);
+ /* some broken boards return 0 if a slot is empty: */
+ if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
+ continue;
+
+ dev = pci_malloc(sizeof(*dev), mem_startp);
+ dev->bus = bus;
+ dev->devfn = devfn;
+ dev->vendor = l & 0xffff;
+ dev->device = (l >> 16) & 0xffff;
+
+ /*
+ * Check to see if we know about this device and report
+ * a message at boot time. This is the only way to
+ * learn about new hardware...
+ */
+ info = pci_lookup_dev(dev->vendor, dev->device);
+ if (!info) {
+#if 0
+ printk("Warning : Unknown PCI device (%x:%x). Please read include/linux/pci.h\n",
+ dev->vendor, dev->device);
+#endif
+ } else {
+ /* Some BIOS' are lazy. Let's do their job: */
+ if (info->bridge_type != 0xff) {
+ burst_bridge(bus->number, devfn,
+ info->bridge_type, 1);
+ }
+ }
+
+ /* non-destructively determine if device can be a master: */
+ pcibios_read_config_byte(bus->number, devfn, PCI_COMMAND,
+ &cmd);
+ pcibios_write_config_byte(bus->number, devfn, PCI_COMMAND,
+ cmd | PCI_COMMAND_MASTER);
+ pcibios_read_config_byte(bus->number, devfn, PCI_COMMAND,
+ &tmp);
+ dev->master = ((tmp & PCI_COMMAND_MASTER) != 0);
+ pcibios_write_config_byte(bus->number, devfn, PCI_COMMAND,
+ cmd);
+
+ /* read irq level (may be changed during pcibios_fixup()): */
+ pcibios_read_config_byte(bus->number, devfn,
+ PCI_INTERRUPT_LINE, &dev->irq);
+
+ /* check to see if this device is a PCI-PCI bridge: */
+ pcibios_read_config_dword(bus->number, devfn,
+ PCI_CLASS_REVISION, &l);
+ l = l >> 8; /* upper 3 bytes */
+ dev->class = l;
+
+ /*
+ * Check if the header type is known and consistent with
+ * device type. PCI-to-PCI Bridges should have hdr_type 1,
+ * CardBus Bridges 2, all other devices 0.
+ */
+ switch (dev->class >> 8) {
+ case PCI_CLASS_BRIDGE_PCI:
+ ht = 1;
+ break;
+ case PCI_CLASS_BRIDGE_CARDBUS:
+ ht = 2;
+ break;
+ default:
+ ht = 0;
+ }
+ if (ht != (hdr_type & 0x7f)) {
+ printk(KERN_WARNING "PCI: %02x:%02x [%04x/%04x/%06x] has unknown header type %02x, ignoring.\n",
+ bus->number, dev->devfn, dev->vendor, dev->device, dev->class, hdr_type);
+ continue;
+ }
+
+ /*
+ * Put it into the simple chain of all PCI devices.
+ * It is used to find devices once everything is set up.
+ */
+ dev->next = pci_devices;
+ pci_devices = dev;
+
+ /*
+ * Now insert it into the list of devices held
+ * by the parent bus.
+ */
+ dev->sibling = bus->devices;
+ bus->devices = dev;
+
+ if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI) {
+ unsigned int buses;
+ unsigned short cr;
+
+ /*
+ * Insert it into the tree of buses.
+ */
+ child = pci_malloc(sizeof(*child), mem_startp);
+ child->next = bus->children;
+ bus->children = child;
+ child->self = dev;
+ child->parent = bus;
+
+ /*
+ * Set up the primary, secondary and subordinate
+ * bus numbers.
+ */
+ child->number = child->secondary = ++max;
+ child->primary = bus->secondary;
+ child->subordinate = 0xff;
+ /*
+ * Clear all status bits and turn off memory,
+ * I/O and master enables.
+ */
+ pcibios_read_config_word(bus->number, devfn,
+ PCI_COMMAND, &cr);
+ pcibios_write_config_word(bus->number, devfn,
+ PCI_COMMAND, 0x0000);
+ pcibios_write_config_word(bus->number, devfn,
+ PCI_STATUS, 0xffff);
+ /*
+ * Read the existing primary/secondary/subordinate bus
+ * number configuration to determine if the PCI bridge
+ * has already been configured by the system. If so,
+ * do not modify the configuration, merely note it.
+ */
+ pcibios_read_config_dword(bus->number, devfn, 0x18,
+ &buses);
+ if ((buses & 0xFFFFFF) != 0)
+ {
+ child->primary = buses & 0xFF;
+ child->secondary = (buses >> 8) & 0xFF;
+ child->subordinate = (buses >> 16) & 0xFF;
+ child->number = child->secondary;
+ max = scan_bus(child, mem_startp);
+ }
+ else
+ {
+ /*
+ * Configure the bus numbers for this bridge:
+ */
+ buses &= 0xff000000;
+ buses |=
+ (((unsigned int)(child->primary) << 0) |
+ ((unsigned int)(child->secondary) << 8) |
+ ((unsigned int)(child->subordinate) << 16));
+ pcibios_write_config_dword(bus->number, devfn, 0x18,
+ buses);
+ /*
+ * Now we can scan all subordinate buses:
+ */
+ max = scan_bus(child, mem_startp);
+ /*
+ * Set the subordinate bus number to its real
+ * value:
+ */
+ child->subordinate = max;
+ buses = (buses & 0xff00ffff)
+ | ((unsigned int)(child->subordinate) << 16);
+ pcibios_write_config_dword(bus->number, devfn, 0x18,
+ buses);
+ }
+ pcibios_write_config_word(bus->number, devfn,
+ PCI_COMMAND, cr);
+ }
+ }
+ /*
+ * We've scanned the bus and so we know all about what's on
+ * the other side of any bridges that may be on this bus plus
+ * any devices.
+ *
+ * Return how far we've got finding sub-buses.
+ */
+ return max;
+}
+
+
+unsigned long pci_init (unsigned long mem_start, unsigned long mem_end)
+{
+ mem_start = pcibios_init(mem_start, mem_end);
+
+ if (!pcibios_present()) {
+ printk("pci_init: no BIOS32 detected\n");
+ return mem_start;
+ }
+
+ printk("Probing PCI hardware.\n");
+
+ memset(&pci_root, 0, sizeof(pci_root));
+ pci_root.subordinate = scan_bus(&pci_root, &mem_start);
+
+ /* give BIOS a chance to apply platform specific fixes: */
+ mem_start = pcibios_fixup(mem_start, mem_end);
+
+#ifdef DEBUG
+ {
+ int len = get_pci_list((char*)mem_start);
+ if (len) {
+ ((char *) mem_start)[len] = '\0';
+ printk("%s\n", (char *) mem_start);
+ }
+ }
+#endif
+ return mem_start;
+}
diff --git a/linux/src/drivers/scsi/53c7,8xx.h b/linux/src/drivers/scsi/53c7,8xx.h
new file mode 100644
index 0000000..1a6680f
--- /dev/null
+++ b/linux/src/drivers/scsi/53c7,8xx.h
@@ -0,0 +1,1584 @@
+/*
+ * NCR 53c{7,8}0x0 driver, header file
+ *
+ * Sponsored by
+ * iX Multiuser Multitasking Magazine
+ * Hannover, Germany
+ * hm@ix.de
+ *
+ * Copyright 1993, 1994, 1995 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@PoohSticks.ORG
+ * +1 (303) 786-7975
+ *
+ * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
+ *
+ * PRE-ALPHA
+ *
+ * For more information, please consult
+ *
+ * NCR 53C700/53C700-66
+ * SCSI I/O Processor
+ * Data Manual
+ *
+ * NCR 53C810
+ * PCI-SCSI I/O Processor
+ * Data Manual
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * +1 (719) 578-3400
+ *
+ * Toll free literature number
+ * +1 (800) 334-5454
+ *
+ */
+
+#ifndef NCR53c7x0_H
+#define NCR53c7x0_H
+#if !defined(LINUX_1_2) && !defined(LINUX_1_3)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE > 65536 + 3 * 256
+#define LINUX_1_3
+#else
+#define LINUX_1_2
+#endif
+#endif
+
+/*
+ * Prevent name space pollution in hosts.c, and only provide the
+ * define we need to get the NCR53c7x0 driver into the host template
+ * array.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+#include <scsi/scsicam.h>
+
+extern int NCR53c7xx_abort(Scsi_Cmnd *);
+extern int NCR53c7xx_detect(Scsi_Host_Template *tpnt);
+extern int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+extern int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
+#ifdef MODULE
+extern int NCR53c7xx_release(struct Scsi_Host *);
+#else
+#define NCR53c7xx_release NULL
+#endif
+
+#ifdef LINUX_1_2
+#define NCR53c7xx {NULL, NULL, "NCR53c{7,8}xx (rel 17)", NCR53c7xx_detect,\
+ NULL, /* info */ NULL, /* command, deprecated */ NULL, \
+ NCR53c7xx_queue_command, NCR53c7xx_abort, NCR53c7xx_reset, \
+ NULL /* slave attach */, scsicam_bios_param, /* can queue */ 24, \
+ /* id */ 7, 127 /* old SG_ALL */, /* cmd per lun */ 3, \
+ /* present */ 0, /* unchecked isa dma */ 0, DISABLE_CLUSTERING}
+#else
+#define NCR53c7xx {NULL, NULL, NULL, NULL, \
+ "NCR53c{7,8}xx (rel 17)", NCR53c7xx_detect,\
+ NULL, /* info */ NULL, /* command, deprecated */ NULL, \
+ NCR53c7xx_queue_command, NCR53c7xx_abort, NCR53c7xx_reset, \
+ NULL /* slave attach */, scsicam_bios_param, /* can queue */ 24, \
+ /* id */ 7, 127 /* old SG_ALL */, /* cmd per lun */ 3, \
+ /* present */ 0, /* unchecked isa dma */ 0, DISABLE_CLUSTERING}
+#endif
+
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
+#ifndef HOSTS_C
+#ifdef LINUX_1_2
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are trivial on the 1:1 Linux/i386 mapping (but if we ever
+ * make the kernel segment mapped at 0, we need to do translation
+ * on the i386 as well)
+ */
+extern inline unsigned long virt_to_phys(volatile void * address)
+{
+ return (unsigned long) address;
+}
+
+extern inline void * phys_to_virt(unsigned long address)
+{
+ return (void *) address;
+}
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#define readl(addr) (*(volatile unsigned int *) (addr))
+
+#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
+#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
+#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
+
+#define mb()
+
+#endif /* def LINUX_1_2 */
+
+/* Register addresses, ordered numerically */
+
+/* SCSI control 0 rw, default = 0xc0 */
+#define SCNTL0_REG 0x00
+#define SCNTL0_ARB1 0x80 /* 0 0 = simple arbitration */
+#define SCNTL0_ARB2 0x40 /* 1 1 = full arbitration */
+#define SCNTL0_STRT 0x20 /* Start Sequence */
+#define SCNTL0_WATN 0x10 /* Select with ATN */
+#define SCNTL0_EPC 0x08 /* Enable parity checking */
+/* Bit 2 is reserved on 800 series chips */
+#define SCNTL0_EPG_700 0x04 /* Enable parity generation */
+#define SCNTL0_AAP 0x02 /* ATN/ on parity error */
+#define SCNTL0_TRG 0x01 /* Target mode */
+
+/* SCSI control 1 rw, default = 0x00 */
+
+#define SCNTL1_REG 0x01
+#define SCNTL1_EXC 0x80 /* Extra Clock Cycle of Data setup */
+#define SCNTL1_ADB 0x40 /* contents of SODL on bus */
+#define SCNTL1_ESR_700 0x20 /* Enable SIOP response to selection
+ and reselection */
+#define SCNTL1_DHP_800 0x20 /* Disable halt on parity error or ATN
+ target mode only */
+#define SCNTL1_CON 0x10 /* Connected */
+#define SCNTL1_RST 0x08 /* SCSI RST/ */
+#define SCNTL1_AESP 0x04 /* Force bad parity */
+#define SCNTL1_SND_700 0x02 /* Start SCSI send */
+#define SCNTL1_IARB_800 0x02 /* Immediate Arbitration, start
+ arbitration immediately after
+ busfree is detected */
+#define SCNTL1_RCV_700 0x01 /* Start SCSI receive */
+#define SCNTL1_SST_800 0x01 /* Start SCSI transfer */
+
+/* SCSI control 2 rw, */
+
+#define SCNTL2_REG_800 0x02
+#define SCNTL2_800_SDU 0x80 /* SCSI disconnect unexpected */
+
+/* SCSI control 3 rw */
+
+#define SCNTL3_REG_800 0x03
+#define SCNTL3_800_SCF_SHIFT 4
+#define SCNTL3_800_SCF_MASK 0x70
+#define SCNTL3_800_SCF2 0x40 /* Synchronous divisor */
+#define SCNTL3_800_SCF1 0x20 /* 0x00 = SCLK/3 */
+#define SCNTL3_800_SCF0 0x10 /* 0x10 = SCLK/1 */
+ /* 0x20 = SCLK/1.5
+ 0x30 = SCLK/2
+ 0x40 = SCLK/3 */
+
+#define SCNTL3_800_CCF_SHIFT 0
+#define SCNTL3_800_CCF_MASK 0x07
+#define SCNTL3_800_CCF2 0x04 /* 0x00 50.01 to 66 */
+#define SCNTL3_800_CCF1 0x02 /* 0x01 16.67 to 25 */
+#define SCNTL3_800_CCF0 0x01 /* 0x02 25.01 - 37.5
+ 0x03 37.51 - 50
+ 0x04 50.01 - 66 */
+
+/*
+ * SCSI destination ID rw - the appropriate bit is set for the selected
+ * target ID. This is written by the SCSI SCRIPTS processor.
+ * default = 0x00
+ */
+#define SDID_REG_700 0x02
+#define SDID_REG_800 0x06
+
+#define GP_REG_800 0x07 /* General purpose IO */
+#define GP_800_IO1 0x02
+#define GP_800_IO2 0x01
+
+
+/* SCSI interrupt enable rw, default = 0x00 */
+#define SIEN_REG_700 0x03
+#define SIEN0_REG_800 0x40
+#define SIEN_MA 0x80 /* Phase mismatch (ini) or ATN (tgt) */
+#define SIEN_FC 0x40 /* Function complete */
+#define SIEN_700_STO 0x20 /* Selection or reselection timeout */
+#define SIEN_800_SEL 0x20 /* Selected */
+#define SIEN_700_SEL 0x10 /* Selected or reselected */
+#define SIEN_800_RESEL 0x10 /* Reselected */
+#define SIEN_SGE 0x08 /* SCSI gross error */
+#define SIEN_UDC 0x04 /* Unexpected disconnect */
+#define SIEN_RST 0x02 /* SCSI RST/ received */
+#define SIEN_PAR 0x01 /* Parity error */
+
+/*
+ * SCSI chip ID rw
+ * NCR53c700 :
+ * When arbitrating, the highest bit is used, when reselection or selection
+ * occurs, the chip responds to all IDs for which a bit is set.
+ * default = 0x00
+ * NCR53c810 :
+ * Uses bit mapping
+ */
+#define SCID_REG 0x04
+/* Bit 7 is reserved on 800 series chips */
+#define SCID_800_RRE 0x40 /* Enable response to reselection */
+#define SCID_800_SRE 0x20 /* Enable response to selection */
+/* Bits four and three are reserved on 800 series chips */
+#define SCID_800_ENC_MASK 0x07 /* Encoded SCSI ID */
+
+/* SCSI transfer rw, default = 0x00 */
+#define SXFER_REG 0x05
+#define SXFER_DHP 0x80 /* Disable halt on parity */
+
+#define SXFER_TP2 0x40 /* Transfer period msb */
+#define SXFER_TP1 0x20
+#define SXFER_TP0 0x10 /* lsb */
+#define SXFER_TP_MASK 0x70
+/* FIXME : SXFER_TP_SHIFT == 5 is right for '8xx chips */
+#define SXFER_TP_SHIFT 5
+#define SXFER_TP_4 0x00 /* Divisors */
+#define SXFER_TP_5 0x10<<1
+#define SXFER_TP_6 0x20<<1
+#define SXFER_TP_7 0x30<<1
+#define SXFER_TP_8 0x40<<1
+#define SXFER_TP_9 0x50<<1
+#define SXFER_TP_10 0x60<<1
+#define SXFER_TP_11 0x70<<1
+
+#define SXFER_MO3 0x08 /* Max offset msb */
+#define SXFER_MO2 0x04
+#define SXFER_MO1 0x02
+#define SXFER_MO0 0x01 /* lsb */
+#define SXFER_MO_MASK 0x0f
+#define SXFER_MO_SHIFT 0
+
+/*
+ * SCSI output data latch rw
+ * The contents of this register are driven onto the SCSI bus when
+ * the Assert Data Bus bit of the SCNTL1 register is set and
+ * the CD, IO, and MSG bits of the SOCL register match the SCSI phase
+ */
+#define SODL_REG_700 0x06
+#define SODL_REG_800 0x54
+
+
+/*
+ * SCSI output control latch rw, default = 0
+ * Note that when the chip is being manually programmed as an initiator,
+ * the MSG, CD, and IO bits must be set correctly for the phase the target
+ * is driving the bus in. Otherwise no data transfer will occur due to
+ * phase mismatch.
+ */
+
+#define SBCL_REG 0x0b
+#define SBCL_REQ 0x80 /* REQ */
+#define SBCL_ACK 0x40 /* ACK */
+#define SBCL_BSY 0x20 /* BSY */
+#define SBCL_SEL 0x10 /* SEL */
+#define SBCL_ATN 0x08 /* ATN */
+#define SBCL_MSG 0x04 /* MSG */
+#define SBCL_CD 0x02 /* C/D */
+#define SBCL_IO 0x01 /* I/O */
+#define SBCL_PHASE_CMDOUT SBCL_CD
+#define SBCL_PHASE_DATAIN SBCL_IO
+#define SBCL_PHASE_DATAOUT 0
+#define SBCL_PHASE_MSGIN (SBCL_CD|SBCL_IO|SBCL_MSG)
+#define SBCL_PHASE_MSGOUT (SBCL_CD|SBCL_MSG)
+#define SBCL_PHASE_STATIN (SBCL_CD|SBCL_IO)
+#define SBCL_PHASE_MASK (SBCL_CD|SBCL_IO|SBCL_MSG)
+
+/*
+ * SCSI first byte received latch ro
+ * This register contains the first byte received during a block MOVE
+ * SCSI SCRIPTS instruction, including
+ *
+ * Initiator mode Target mode
+ * Message in Command
+ * Status Message out
+ * Data in Data out
+ *
+ * It also contains the selecting or reselecting device's ID and our
+ * ID.
+ *
+ * Note that this is the register the various IF conditionals can
+ * operate on.
+ */
+#define SFBR_REG 0x08
+
+/*
+ * SCSI input data latch ro
+ * In initiator mode, data is latched into this register on the rising
+ * edge of REQ/. In target mode, data is latched on the rising edge of
+ * ACK/
+ */
+#define SIDL_REG_700 0x09
+#define SIDL_REG_800 0x50
+
+/*
+ * SCSI bus data lines ro
+ * This register reflects the instantaneous status of the SCSI data
+ * lines. Note that SCNTL0 must be set to disable parity checking,
+ * otherwise reading this register will latch new parity.
+ */
+#define SBDL_REG_700 0x0a
+#define SBDL_REG_800 0x58
+
+#define SSID_REG_800 0x0a
+#define SSID_800_VAL 0x80 /* Exactly two bits asserted at sel */
+#define SSID_800_ENCID_MASK 0x07 /* Device which performed operation */
+
+
+/*
+ * SCSI bus control lines rw,
+ * instantaneous readout of control lines
+ */
+#define SOCL_REG 0x0b
+#define SOCL_REQ 0x80 /* REQ ro */
+#define SOCL_ACK 0x40 /* ACK ro */
+#define SOCL_BSY 0x20 /* BSY ro */
+#define SOCL_SEL 0x10 /* SEL ro */
+#define SOCL_ATN 0x08 /* ATN ro */
+#define SOCL_MSG 0x04 /* MSG ro */
+#define SOCL_CD 0x02 /* C/D ro */
+#define SOCL_IO 0x01 /* I/O ro */
+/*
+ * Synchronous SCSI Clock Control bits
+ * 0 - set by DCNTL
+ * 1 - SCLK / 1.0
+ * 2 - SCLK / 1.5
+ * 3 - SCLK / 2.0
+ */
+#define SBCL_SSCF1 0x02 /* wo, -66 only */
+#define SBCL_SSCF0 0x01 /* wo, -66 only */
+#define SBCL_SSCF_MASK 0x03
+
+/*
+ * XXX note : when reading the DSTAT and STAT registers to clear interrupts,
+ * insure that 10 clocks elapse between the two
+ */
+/* DMA status ro */
+#define DSTAT_REG 0x0c
+#define DSTAT_DFE 0x80 /* DMA FIFO empty */
+#define DSTAT_800_MDPE 0x40 /* Master Data Parity Error */
+#define DSTAT_800_BF 0x20 /* Bus Fault */
+#define DSTAT_ABRT 0x10 /* Aborted - set on error */
+#define DSTAT_SSI 0x08 /* SCRIPTS single step interrupt */
+#define DSTAT_SIR 0x04 /* SCRIPTS interrupt received -
+ set when INT instruction is
+ executed */
+#define DSTAT_WTD 0x02 /* Watchdog timeout detected */
+#define DSTAT_OPC 0x01 /* Illegal instruction */
+#define DSTAT_800_IID 0x01 /* Same thing, different name */
+
+
+/* NCR53c800 moves this stuff into SIST0 */
+#define SSTAT0_REG 0x0d /* SCSI status 0 ro */
+#define SIST0_REG_800 0x42
+#define SSTAT0_MA 0x80 /* ini : phase mismatch,
+ * tgt : ATN/ asserted
+ */
+#define SSTAT0_CMP 0x40 /* function complete */
+#define SSTAT0_700_STO 0x20 /* Selection or reselection timeout */
+#define SIST0_800_SEL 0x20 /* Selected */
+#define SSTAT0_700_SEL 0x10 /* Selected or reselected */
+#define SIST0_800_RSL 0x10 /* Reselected */
+#define SSTAT0_SGE 0x08 /* SCSI gross error */
+#define SSTAT0_UDC 0x04 /* Unexpected disconnect */
+#define SSTAT0_RST 0x02 /* SCSI RST/ received */
+#define SSTAT0_PAR 0x01 /* Parity error */
+
+/* And uses SSTAT0 for what was SSTAT1 */
+
+#define SSTAT1_REG 0x0e /* SCSI status 1 ro */
+#define SSTAT1_ILF 0x80 /* SIDL full */
+#define SSTAT1_ORF 0x40 /* SODR full */
+#define SSTAT1_OLF 0x20 /* SODL full */
+#define SSTAT1_AIP 0x10 /* Arbitration in progress */
+#define SSTAT1_LOA 0x08 /* Lost arbitration */
+#define SSTAT1_WOA 0x04 /* Won arbitration */
+#define SSTAT1_RST 0x02 /* Instant readout of RST/ */
+#define SSTAT1_SDP 0x01 /* Instant readout of SDP/ */
+
+#define SSTAT2_REG 0x0f /* SCSI status 2 ro */
+#define SSTAT2_FF3 0x80 /* number of bytes in synchronous */
+#define SSTAT2_FF2 0x40 /* data FIFO */
+#define SSTAT2_FF1 0x20
+#define SSTAT2_FF0 0x10
+#define SSTAT2_FF_MASK 0xf0
+#define SSTAT2_FF_SHIFT 4
+
+/*
+ * Latched signals, latched on the leading edge of REQ/ for initiators,
+ * ACK/ for targets.
+ */
+#define SSTAT2_SDP 0x08 /* SDP */
+#define SSTAT2_MSG 0x04 /* MSG */
+#define SSTAT2_CD 0x02 /* C/D */
+#define SSTAT2_IO 0x01 /* I/O */
+#define SSTAT2_PHASE_CMDOUT SSTAT2_CD
+#define SSTAT2_PHASE_DATAIN SSTAT2_IO
+#define SSTAT2_PHASE_DATAOUT 0
+#define SSTAT2_PHASE_MSGIN (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
+#define SSTAT2_PHASE_MSGOUT (SSTAT2_CD|SSTAT2_MSG)
+#define SSTAT2_PHASE_STATIN (SSTAT2_CD|SSTAT2_IO)
+#define SSTAT2_PHASE_MASK (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
+
+
+/* NCR53c700-66 only */
+#define SCRATCHA_REG_00 0x10 /* through 0x13 Scratch A rw */
+/* NCR53c710 and higher */
+#define DSA_REG 0x10 /* DATA structure address */
+
+#define CTEST0_REG_700 0x14 /* Chip test 0 ro */
+#define CTEST0_REG_800 0x18 /* Chip test 0 rw, general purpose */
+/* 0x80 - 0x04 are reserved */
+#define CTEST0_700_RTRG 0x02 /* Real target mode */
+#define CTEST0_700_DDIR 0x01 /* Data direction, 1 =
+ * SCSI bus to host, 0 =
+ * host to SCSI.
+ */
+
+#define CTEST1_REG_700 0x15 /* Chip test 1 ro */
+#define CTEST1_REG_800 0x19 /* Chip test 1 ro */
+#define CTEST1_FMT3 0x80 /* Identify which byte lanes are empty */
+#define CTEST1_FMT2 0x40 /* in the DMA FIFO */
+#define CTEST1_FMT1 0x20
+#define CTEST1_FMT0 0x10
+
+#define CTEST1_FFL3 0x08 /* Identify which bytes lanes are full */
+#define CTEST1_FFL2 0x04 /* in the DMA FIFO */
+#define CTEST1_FFL1 0x02
+#define CTEST1_FFL0 0x01
+
+#define CTEST2_REG_700 0x16 /* Chip test 2 ro */
+#define CTEST2_REG_800 0x1a /* Chip test 2 ro */
+
+#define CTEST2_800_DDIR 0x80 /* 1 = SCSI->host */
+#define CTEST2_800_SIGP 0x40 /* A copy of SIGP in ISTAT.
+ Reading this register clears */
+#define CTEST2_800_CIO 0x20 /* Configured as IO */.
+#define CTEST2_800_CM 0x10 /* Configured as memory */
+
+/* 0x80 - 0x40 are reserved on 700 series chips */
+#define CTEST2_700_SOFF 0x20 /* SCSI Offset Compare,
+ * As an initiator, this bit is
+ * one when the synchronous offset
+ * is zero, as a target this bit
+ * is one when the synchronous
+ * offset is at the maximum
+ * defined in SXFER
+ */
+#define CTEST2_700_SFP 0x10 /* SCSI FIFO parity bit,
+ * reading CTEST3 unloads a byte
+ * from the FIFO and sets this
+ */
+#define CTEST2_700_DFP 0x08 /* DMA FIFO parity bit,
+ * reading CTEST6 unloads a byte
+ * from the FIFO and sets this
+ */
+#define CTEST2_TEOP 0x04 /* SCSI true end of process,
+ * indicates a totally finished
+ * transfer
+ */
+#define CTEST2_DREQ 0x02 /* Data request signal */
+/* 0x01 is reserved on 700 series chips */
+#define CTEST2_800_DACK 0x01
+
+/*
+ * Chip test 3 ro
+ * Unloads the bottom byte of the eight deep SCSI synchronous FIFO,
+ * check SSTAT2 FIFO full bits to determine size. Note that a GROSS
+ * error results if a read is attempted on this register. Also note
+ * that 16 and 32 bit reads of this register will cause corruption.
+ */
+#define CTEST3_REG_700 0x17
+/* Chip test 3 rw */
+#define CTEST3_REG_800 0x1b
+#define CTEST3_800_V3 0x80 /* Chip revision */
+#define CTEST3_800_V2 0x40
+#define CTEST3_800_V1 0x20
+#define CTEST3_800_V0 0x10
+#define CTEST3_800_FLF 0x08 /* Flush DMA FIFO */
+#define CTEST3_800_CLF 0x04 /* Clear DMA FIFO */
+#define CTEST3_800_FM 0x02 /* Fetch mode pin */
+/* bit 0 is reserved on 800 series chips */
+
+#define CTEST4_REG_700 0x18 /* Chip test 4 rw */
+#define CTEST4_REG_800 0x21 /* Chip test 4 rw */
+/* 0x80 is reserved on 700 series chips */
+#define CTEST4_800_BDIS 0x80 /* Burst mode disable */
+#define CTEST4_ZMOD 0x40 /* High impedance mode */
+#define CTEST4_SZM 0x20 /* SCSI bus high impedance */
+#define CTEST4_700_SLBE 0x10 /* SCSI loopback enabled */
+#define CTEST4_800_SRTM 0x10 /* Shadow Register Test Mode */
+#define CTEST4_700_SFWR 0x08 /* SCSI FIFO write enable,
+ * redirects writes from SODL
+ * to the SCSI FIFO.
+ */
+#define CTEST4_800_MPEE 0x08 /* Enable parity checking
+ during master cycles on PCI
+ bus */
+
+/*
+ * These bits send the contents of the CTEST6 register to the appropriate
+ * byte lane of the 32 bit DMA FIFO. Normal operation is zero, otherwise
+ * the high bit means the low two bits select the byte lane.
+ */
+#define CTEST4_FBL2 0x04
+#define CTEST4_FBL1 0x02
+#define CTEST4_FBL0 0x01
+#define CTEST4_FBL_MASK 0x07
+#define CTEST4_FBL_0 0x04 /* Select DMA FIFO byte lane 0 */
+#define CTEST4_FBL_1 0x05 /* Select DMA FIFO byte lane 1 */
+#define CTEST4_FBL_2 0x06 /* Select DMA FIFO byte lane 2 */
+#define CTEST4_FBL_3 0x07 /* Select DMA FIFO byte lane 3 */
+#define CTEST4_800_SAVE (CTEST4_800_BDIS)
+
+
+#define CTEST5_REG_700 0x19 /* Chip test 5 rw */
+#define CTEST5_REG_800 0x22 /* Chip test 5 rw */
+/*
+ * Clock Address Incrementor. When set, it increments the
+ * DNAD register to the next bus size boundary. It automatically
+ * resets itself when the operation is complete.
+ */
+#define CTEST5_ADCK 0x80
+/*
+ * Clock Byte Counter. When set, it decrements the DBC register to
+ * the next bus size boundary.
+ */
+#define CTEST5_BBCK 0x40
+/*
+ * Reset SCSI Offset. Setting this bit to 1 clears the current offset
+ * pointer in the SCSI synchronous offset counter (SSTAT). This bit
+ * is set to 1 if a SCSI Gross Error Condition occurs. The offset should
+ * be cleared when a synchronous transfer fails. When written, it is
+ * automatically cleared after the SCSI synchronous offset counter is
+ * reset.
+ */
+/* Bit 5 is reserved on 800 series chips */
+#define CTEST5_700_ROFF 0x20
+/*
+ * Master Control for Set or Reset pulses. When 1, causes the low
+ * four bits of register to set when set, 0 causes the low bits to
+ * clear when set.
+ */
+#define CTEST5_MASR 0x10
+#define CTEST5_DDIR 0x08 /* DMA direction */
+/*
+ * Bits 2-0 are reserved on 800 series chips
+ */
+#define CTEST5_700_EOP 0x04 /* End of process */
+#define CTEST5_700_DREQ 0x02 /* Data request */
+#define CTEST5_700_DACK 0x01 /* Data acknowledge */
+
+/*
+ * Chip test 6 rw - writing to this register writes to the byte
+ * lane in the DMA FIFO as determined by the FBL bits in the CTEST4
+ * register.
+ */
+#define CTEST6_REG_700 0x1a
+#define CTEST6_REG_800 0x23
+
+#define CTEST7_REG 0x1b /* Chip test 7 rw */
+/* 0x80 - 0x40 are reserved on NCR53c700 and NCR53c700-66 chips */
+#define CTEST7_10_CDIS 0x80 /* Cache burst disable */
+#define CTEST7_10_SC1 0x40 /* Snoop control bits */
+#define CTEST7_10_SC0 0x20
+#define CTEST7_10_SC_MASK 0x60
+/* 0x20 is reserved on the NCR53c700 */
+#define CTEST7_0060_FM 0x20 /* Fetch mode */
+#define CTEST7_STD 0x10 /* Selection timeout disable */
+#define CTEST7_DFP 0x08 /* DMA FIFO parity bit for CTEST6 */
+#define CTEST7_EVP 0x04 /* 1 = host bus even parity, 0 = odd */
+#define CTEST7_10_TT1 0x02 /* Transfer type */
+#define CTEST7_00_DC 0x02 /* Set to drive DC low during instruction
+ fetch */
+#define CTEST7_DIFF 0x01 /* Differential mode */
+
+#define CTEST7_SAVE ( CTEST7_EVP | CTEST7_DIFF )
+
+
+#define TEMP_REG 0x1c /* through 0x1f Temporary stack rw */
+
+#define DFIFO_REG 0x20 /* DMA FIFO rw */
+/*
+ * 0x80 is reserved on the NCR53c710, the CLF and FLF bits have been
+ * moved into the CTEST8 register.
+ */
+#define DFIFO_00_FLF 0x80 /* Flush DMA FIFO to memory */
+#define DFIFO_00_CLF 0x40 /* Clear DMA and SCSI FIFOs */
+#define DFIFO_BO6 0x40
+#define DFIFO_BO5 0x20
+#define DFIFO_BO4 0x10
+#define DFIFO_BO3 0x08
+#define DFIFO_BO2 0x04
+#define DFIFO_BO1 0x02
+#define DFIFO_BO0 0x01
+#define DFIFO_10_BO_MASK 0x7f /* 7 bit counter */
+#define DFIFO_00_BO_MASK 0x3f /* 6 bit counter */
+
+/*
+ * Interrupt status rw
+ * Note that this is the only register which can be read while SCSI
+ * SCRIPTS are being executed.
+ */
+#define ISTAT_REG_700 0x21
+#define ISTAT_REG_800 0x14
+#define ISTAT_ABRT 0x80 /* Software abort, write
+ *1 to abort, wait for interrupt. */
+/* 0x40 and 0x20 are reserved on NCR53c700 and NCR53c700-66 chips */
+#define ISTAT_10_SRST 0x40 /* software reset */
+#define ISTAT_10_SIGP 0x20 /* signal script */
+/* 0x10 is reserved on NCR53c700 series chips */
+#define ISTAT_800_SEM 0x10 /* semaphore */
+#define ISTAT_CON 0x08 /* 1 when connected */
+#define ISTAT_800_INTF 0x04 /* Interrupt on the fly */
+#define ISTAT_700_PRE 0x04 /* Pointer register empty.
+ * Set to 1 when DSPS and DSP
+ * registers are empty in pipeline
+ * mode, always set otherwise.
+ */
+#define ISTAT_SIP 0x02 /* SCSI interrupt pending from
+ * SCSI portion of SIOP see
+ * SSTAT0
+ */
+#define ISTAT_DIP 0x01 /* DMA interrupt pending
+ * see DSTAT
+ */
+
+/* NCR53c700-66 and NCR53c710 only */
+#define CTEST8_REG 0x22 /* Chip test 8 rw */
+#define CTEST8_0066_EAS 0x80 /* Enable alternate SCSI clock,
+ * ie read from SCLK/ rather than CLK/
+ */
+#define CTEST8_0066_EFM 0x40 /* Enable fetch and master outputs */
+#define CTEST8_0066_GRP 0x20 /* Generate Receive Parity for
+ * pass through. This insures that
+ * bad parity won't reach the host
+ * bus.
+ */
+#define CTEST8_0066_TE 0x10 /* TolerANT enable. Enable
+ * active negation, should only
+ * be used for slow SCSI
+ * non-differential.
+ */
+#define CTEST8_0066_HSC 0x08 /* Halt SCSI clock */
+#define CTEST8_0066_SRA 0x04 /* Shorten REQ/ACK filtering,
+ * must be set for fast SCSI-II
+ * speeds.
+ */
+#define CTEST8_0066_DAS 0x02 /* Disable automatic target/initiator
+ * switching.
+ */
+#define CTEST8_0066_LDE 0x01 /* Last disconnect enable.
+ * The status of pending
+ * disconnect is maintained by
+ * the core, eliminating
+ * the possibility of missing a
+ * selection or reselection
+ * while waiting to fetch a
+ * WAIT DISCONNECT opcode.
+ */
+
+#define CTEST8_10_V3 0x80 /* Chip revision */
+#define CTEST8_10_V2 0x40
+#define CTEST8_10_V1 0x20
+#define CTEST8_10_V0 0x10
+#define CTEST8_10_V_MASK 0xf0
+#define CTEST8_10_FLF 0x08 /* Flush FIFOs */
+#define CTEST8_10_CLF 0x04 /* Clear FIFOs */
+#define CTEST8_10_FM 0x02 /* Fetch pin mode */
+#define CTEST8_10_SM 0x01 /* Snoop pin mode */
+
+
+/*
+ * The CTEST9 register may be used to differentiate between a
+ * NCR53c700 and a NCR53c710.
+ *
+ * Write 0xff to this register.
+ * Read it.
+ * If the contents are 0xff, it is a NCR53c700
+ * If the contents are 0x00, it is a NCR53c700-66 first revision
+ * If the contents are some other value, it is some other NCR53c700-66
+ */
+#define CTEST9_REG_00 0x23 /* Chip test 9 ro */
+#define LCRC_REG_10 0x23
+
+/*
+ * 0x24 through 0x27 are the DMA byte counter register. Instructions
+ * write their high 8 bits into the DCMD register, the low 24 bits into
+ * the DBC register.
+ *
+ * Function is dependent on the command type being executed.
+ */
+
+
+#define DBC_REG 0x24
+/*
+ * For Block Move Instructions, DBC is a 24 bit quantity representing
+ * the number of bytes to transfer.
+ * For Transfer Control Instructions, DBC is bit fielded as follows :
+ */
+/* Bits 20 - 23 should be clear */
+#define DBC_TCI_TRUE (1 << 19) /* Jump when true */
+#define DBC_TCI_COMPARE_DATA (1 << 18) /* Compare data */
+#define DBC_TCI_COMPARE_PHASE (1 << 17) /* Compare phase with DCMD field */
+#define DBC_TCI_WAIT_FOR_VALID (1 << 16) /* Wait for REQ */
+/* Bits 8 - 15 are reserved on some implementations ? */
+#define DBC_TCI_MASK_MASK 0xff00 /* Mask for data compare */
+#define DBC_TCI_MASK_SHIFT 8
+#define DBC_TCI_DATA_MASK 0xff /* Data to be compared */
+#define DBC_TCI_DATA_SHIFT 0
+
+#define DBC_RWRI_IMMEDIATE_MASK 0xff00 /* Immediate data */
+#define DBC_RWRI_IMMEDIATE_SHIFT 8 /* Amount to shift */
+#define DBC_RWRI_ADDRESS_MASK 0x3f0000 /* Register address */
+#define DBC_RWRI_ADDRESS_SHIFT 16
+
+
+/*
+ * DMA command r/w
+ */
+#define DCMD_REG 0x27
+#define DCMD_TYPE_MASK 0xc0 /* Masks off type */
+#define DCMD_TYPE_BMI 0x00 /* Indicates a Block Move instruction */
+#define DCMD_BMI_IO 0x01 /* I/O, CD, and MSG bits selecting */
+#define DCMD_BMI_CD 0x02 /* the phase for the block MOVE */
+#define DCMD_BMI_MSG 0x04 /* instruction */
+
+#define DCMD_BMI_OP_MASK 0x18 /* mask for opcode */
+#define DCMD_BMI_OP_MOVE_T 0x00 /* MOVE */
+#define DCMD_BMI_OP_MOVE_I 0x08 /* MOVE Initiator */
+
+#define DCMD_BMI_INDIRECT 0x20 /* Indirect addressing */
+
+#define DCMD_TYPE_TCI 0x80 /* Indicates a Transfer Control
+ instruction */
+#define DCMD_TCI_IO 0x01 /* I/O, CD, and MSG bits selecting */
+#define DCMD_TCI_CD 0x02 /* the phase for the block MOVE */
+#define DCMD_TCI_MSG 0x04 /* instruction */
+#define DCMD_TCI_OP_MASK 0x38 /* mask for opcode */
+#define DCMD_TCI_OP_JUMP 0x00 /* JUMP */
+#define DCMD_TCI_OP_CALL 0x08 /* CALL */
+#define DCMD_TCI_OP_RETURN 0x10 /* RETURN */
+#define DCMD_TCI_OP_INT 0x18 /* INT */
+
+#define DCMD_TYPE_RWRI 0x40 /* Indicates I/O or register Read/Write
+ instruction */
+#define DCMD_RWRI_OPC_MASK 0x38 /* Opcode mask */
+#define DCMD_RWRI_OPC_WRITE 0x28 /* Write SFBR to register */
+#define DCMD_RWRI_OPC_READ 0x30 /* Read register to SFBR */
+#define DCMD_RWRI_OPC_MODIFY 0x38 /* Modify in place */
+
+#define DCMD_RWRI_OP_MASK 0x07
+#define DCMD_RWRI_OP_MOVE 0x00
+#define DCMD_RWRI_OP_SHL 0x01
+#define DCMD_RWRI_OP_OR 0x02
+#define DCMD_RWRI_OP_XOR 0x03
+#define DCMD_RWRI_OP_AND 0x04
+#define DCMD_RWRI_OP_SHR 0x05
+#define DCMD_RWRI_OP_ADD 0x06
+#define DCMD_RWRI_OP_ADDC 0x07
+
+#define DCMD_TYPE_MMI 0xc0 /* Indicates a Memory Move instruction
+ (three words) */
+
+
+#define DNAD_REG 0x28 /* through 0x2b DMA next address for
+ data */
+#define DSP_REG 0x2c /* through 0x2f DMA SCRIPTS pointer rw */
+#define DSPS_REG 0x30 /* through 0x33 DMA SCRIPTS pointer
+ save rw */
+#define DMODE_REG_00 0x34 /* DMA mode rw */
+#define DMODE_00_BL1 0x80 /* Burst length bits */
+#define DMODE_00_BL0 0x40
+#define DMODE_BL_MASK 0xc0
+/* Burst lengths (800) */
+#define DMODE_BL_2 0x00 /* 2 transfer */
+#define DMODE_BL_4 0x40 /* 4 transfers */
+#define DMODE_BL_8 0x80 /* 8 transfers */
+#define DMODE_BL_16 0xc0 /* 16 transfers */
+
+#define DMODE_700_BW16 0x20 /* Host buswidth = 16 */
+#define DMODE_700_286 0x10 /* 286 mode */
+#define DMODE_700_IOM 0x08 /* Transfer to IO port */
+#define DMODE_700_FAM 0x04 /* Fixed address mode */
+#define DMODE_700_PIPE 0x02 /* Pipeline mode disables
+ * automatic fetch / exec
+ */
+#define DMODE_MAN 0x01 /* Manual start mode,
+ * requires a 1 to be written
+ * to the start DMA bit in the DCNTL
+ * register to run scripts
+ */
+
+#define DMODE_700_SAVE ( DMODE_00_BL_MASK | DMODE_00_BW16 | DMODE_00_286 )
+
+/* NCR53c800 series only */
+#define SCRATCHA_REG_800 0x34 /* through 0x37 Scratch A rw */
+/* NCR53c710 only */
+#define SCRATCB_REG_10 0x34 /* through 0x37 scratch B rw */
+
+#define DMODE_REG_10 0x38 /* DMA mode rw, NCR53c710 and newer */
+#define DMODE_800_SIOM 0x20 /* Source IO = 1 */
+#define DMODE_800_DIOM 0x10 /* Destination IO = 1 */
+#define DMODE_800_ERL 0x08 /* Enable Read Line */
+
+/* 35-38 are reserved on 700 and 700-66 series chips */
+#define DIEN_REG 0x39 /* DMA interrupt enable rw */
+/* 0x80, 0x40, and 0x20 are reserved on 700-series chips */
+#define DIEN_800_MDPE 0x40 /* Master data parity error */
+#define DIEN_800_BF 0x20 /* BUS fault */
+#define DIEN_ABRT 0x10 /* Enable aborted interrupt */
+#define DIEN_SSI 0x08 /* Enable single step interrupt */
+#define DIEN_SIR 0x04 /* Enable SCRIPTS INT command
+ * interrupt
+ */
+/* 0x02 is reserved on 800 series chips */
+#define DIEN_700_WTD 0x02 /* Enable watchdog timeout interrupt */
+#define DIEN_700_OPC 0x01 /* Enable illegal instruction
+ * interrupt
+ */
+#define DIEN_800_IID 0x01 /* Same meaning, different name */
+
+/*
+ * DMA watchdog timer rw
+ * set in 16 CLK input periods.
+ */
+#define DWT_REG 0x3a
+
+/* DMA control rw */
+#define DCNTL_REG 0x3b
+#define DCNTL_700_CF1 0x80 /* Clock divisor bits */
+#define DCNTL_700_CF0 0x40
+#define DCNTL_700_CF_MASK 0xc0
+/* Clock divisors Divisor SCLK range (MHZ) */
+#define DCNTL_700_CF_2 0x00 /* 2.0 37.51-50.00 */
+#define DCNTL_700_CF_1_5 0x40 /* 1.5 25.01-37.50 */
+#define DCNTL_700_CF_1 0x80 /* 1.0 16.67-25.00 */
+#define DCNTL_700_CF_3 0xc0 /* 3.0 50.01-66.67 (53c700-66) */
+
+#define DCNTL_700_S16 0x20 /* Load scripts 16 bits at a time */
+#define DCNTL_SSM 0x10 /* Single step mode */
+#define DCNTL_700_LLM 0x08 /* Low level mode, can only be set
+ * after selection */
+#define DCNTL_800_IRQM 0x08 /* Totem pole IRQ pin */
+#define DCNTL_STD 0x04 /* Start DMA / SCRIPTS */
+/* 0x02 is reserved */
+#define DCNTL_00_RST 0x01 /* Software reset, resets everything
+ * but 286 mode bit in DMODE. On the
+ * NCR53c710, this bit moved to CTEST8
+ */
+#define DCNTL_10_COM 0x01 /* 700 software compatibility mode */
+
+#define DCNTL_700_SAVE ( DCNTL_CF_MASK | DCNTL_S16)
+
+
+/* NCR53c700-66 only */
+#define SCRATCHB_REG_00 0x3c /* through 0x3f scratch b rw */
+#define SCRATCHB_REG_800 0x5c /* through 0x5f scratch b rw */
+/* NCR53c710 only */
+#define ADDER_REG_10 0x3c /* Adder, NCR53c710 only */
+
+#define SIEN1_REG_800 0x41
+#define SIEN1_800_STO 0x04 /* selection/reselection timeout */
+#define SIEN1_800_GEN 0x02 /* general purpose timer */
+#define SIEN1_800_HTH 0x01 /* handshake to handshake */
+
+#define SIST1_REG_800 0x43
+#define SIST1_800_STO 0x04 /* selection/reselection timeout */
+#define SIST1_800_GEN 0x02 /* general purpose timer */
+#define SIST1_800_HTH 0x01 /* handshake to handshake */
+
+#define SLPAR_REG_800 0x44 /* Parity */
+
+#define MACNTL_REG_800 0x46 /* Memory access control */
+#define MACNTL_800_TYP3 0x80
+#define MACNTL_800_TYP2 0x40
+#define MACNTL_800_TYP1 0x20
+#define MACNTL_800_TYP0 0x10
+#define MACNTL_800_DWR 0x08
+#define MACNTL_800_DRD 0x04
+#define MACNTL_800_PSCPT 0x02
+#define MACNTL_800_SCPTS 0x01
+
+#define GPCNTL_REG_800 0x47 /* General Purpose Pin Control */
+
+/* Timeouts are expressed such that 0=off, 1=100us, doubling after that */
+#define STIME0_REG_800 0x48 /* SCSI Timer Register 0 */
+#define STIME0_800_HTH_MASK 0xf0 /* Handshake to Handshake timeout */
+#define STIME0_800_HTH_SHIFT 4
+#define STIME0_800_SEL_MASK 0x0f /* Selection timeout */
+#define STIME0_800_SEL_SHIFT 0
+
+#define STIME1_REG_800 0x49
+#define STIME1_800_GEN_MASK 0x0f /* General purpose timer */
+
+#define RESPID_REG_800 0x4a /* Response ID, bit fielded. 8
+ bits on narrow chips, 16 on WIDE */
+
+#define STEST0_REG_800 0x4c
+#define STEST0_800_SLT 0x08 /* Selection response logic test */
+#define STEST0_800_ART 0x04 /* Arbitration priority encoder test */
+#define STEST0_800_SOZ 0x02 /* Synchronous offset zero */
+#define STEST0_800_SOM 0x01 /* Synchronous offset maximum */
+
+#define STEST1_REG_800 0x4d
+#define STEST1_800_SCLK 0x80 /* Disable SCSI clock */
+
+#define STEST2_REG_800 0x4e
+#define STEST2_800_SCE 0x80 /* Enable SOCL/SODL */
+#define STEST2_800_ROF 0x40 /* Reset SCSI sync offset */
+#define STEST2_800_SLB 0x10 /* Enable SCSI loopback mode */
+#define STEST2_800_SZM 0x08 /* SCSI high impedance mode */
+#define STEST2_800_EXT 0x02 /* Extend REQ/ACK filter 30 to 60ns */
+#define STEST2_800_LOW 0x01 /* SCSI low level mode */
+
+#define STEST3_REG_800 0x4f
+#define STEST3_800_TE 0x80 /* Enable active negation */
+#define STEST3_800_STR 0x40 /* SCSI FIFO test read */
+#define STEST3_800_HSC 0x20 /* Halt SCSI clock */
+#define STEST3_800_DSI 0x10 /* Disable single initiator response */
+#define STEST3_800_TTM 0x04 /* Time test mode */
+#define STEST3_800_CSF 0x02 /* Clear SCSI FIFO */
+#define STEST3_800_STW 0x01 /* SCSI FIFO test write */
+
+#define OPTION_PARITY 0x1 /* Enable parity checking */
+#define OPTION_TAGGED_QUEUE 0x2 /* Enable SCSI-II tagged queuing */
+#define OPTION_700 0x8 /* Always run NCR53c700 scripts */
+#define OPTION_INTFLY 0x10 /* Use INTFLY interrupts */
+#define OPTION_DEBUG_INTR 0x20 /* Debug interrupts */
+#define OPTION_DEBUG_INIT_ONLY 0x40 /* Run initialization code and
+ simple test code, return
+ DID_NO_CONNECT if any SCSI
+ commands are attempted. */
+#define OPTION_DEBUG_READ_ONLY 0x80 /* Return DID_ERROR if any
+ SCSI write is attempted */
+#define OPTION_DEBUG_TRACE 0x100 /* Animated trace mode, print
+ each address and instruction
+ executed to debug buffer. */
+#define OPTION_DEBUG_SINGLE 0x200 /* stop after executing one
+ instruction */
+#define OPTION_SYNCHRONOUS 0x400 /* Enable sync SCSI. */
+#define OPTION_MEMORY_MAPPED 0x800 /* NCR registers have valid
+ memory mapping */
+#define OPTION_IO_MAPPED 0x1000 /* NCR registers have valid
+ I/O mapping */
+#define OPTION_DEBUG_PROBE_ONLY 0x2000 /* Probe only, don't even init */
+#define OPTION_DEBUG_TESTS_ONLY 0x4000 /* Probe, init, run selected tests */
+#define OPTION_DEBUG_TEST0 0x08000 /* Run test 0 */
+#define OPTION_DEBUG_TEST1 0x10000 /* Run test 1 */
+#define OPTION_DEBUG_TEST2 0x20000 /* Run test 2 */
+#define OPTION_DEBUG_DUMP 0x40000 /* Dump commands */
+#define OPTION_DEBUG_TARGET_LIMIT 0x80000 /* Only talk to target+luns specified */
+#define OPTION_DEBUG_NCOMMANDS_LIMIT 0x100000 /* Limit the number of commands */
+#define OPTION_DEBUG_SCRIPT 0x200000 /* Print when checkpoints are passed */
+#define OPTION_DEBUG_FIXUP 0x400000 /* print fixup values */
+#define OPTION_DEBUG_DSA 0x800000
+#define OPTION_DEBUG_CORRUPTION 0x1000000 /* Detect script corruption */
+#define OPTION_DEBUG_SDTR 0x2000000 /* Debug SDTR problem */
+#define OPTION_DEBUG_MISMATCH 0x4000000 /* Debug phase mismatches */
+#define OPTION_DISCONNECT 0x8000000 /* Allow disconnect */
+#define OPTION_DEBUG_DISCONNECT 0x10000000
+#define OPTION_ALWAYS_SYNCHRONOUS 0x20000000 /* Negotiate sync. transfers
+ on power up */
+#define OPTION_DEBUG_QUEUES 0x80000000
+#define OPTION_DEBUG_ALLOCATION 0x100000000LL
+#define OPTION_DEBUG_SYNCHRONOUS 0x200000000LL /* Sanity check SXFER and
+ SCNTL3 registers */
+#define OPTION_NO_ASYNC 0x400000000LL /* Don't automagically send
+ SDTR for async transfers when
+ we haven't been told to do
+ a synchronous transfer. */
+#define OPTION_NO_PRINT_RACE 0x800000000LL /* Don't print message when
+ the reselect/WAIT DISCONNECT
+ race condition hits */
+#if !defined(PERM_OPTIONS)
+#define PERM_OPTIONS 0
+#endif
+
+struct NCR53c7x0_synchronous {
+ u32 select_indirect; /* Value used for indirect selection */
+ u32 script[8]; /* Size ?? Script used when target is
+ reselected */
+ unsigned char synchronous_want[5]; /* Per target desired SDTR */
+/*
+ * Set_synchronous programs these, select_indirect and current settings after
+ * int_debug_should show a match.
+ */
+ unsigned char sxfer_sanity, scntl3_sanity;
+};
+
+#define CMD_FLAG_SDTR 1 /* Initiating synchronous
+ transfer negotiation */
+#define CMD_FLAG_WDTR 2 /* Initiating wide transfer
+ negotiation */
+#define CMD_FLAG_DID_SDTR 4 /* did SDTR */
+#define CMD_FLAG_DID_WDTR 8 /* did WDTR */
+
+struct NCR53c7x0_table_indirect {
+ u32 count;
+ void *address;
+};
+
+enum ncr_event {
+ EVENT_NONE = 0,
+/*
+ * Order is IMPORTANT, since these must correspond to the event interrupts
+ * in 53c7,8xx.scr
+ */
+
+ EVENT_ISSUE_QUEUE = 0x5000000, /* Command was added to issue queue */
+ EVENT_START_QUEUE, /* Command moved to start queue */
+ EVENT_SELECT, /* Command completed selection */
+ EVENT_DISCONNECT, /* Command disconnected */
+ EVENT_RESELECT, /* Command reselected */
+ EVENT_COMPLETE, /* Command completed */
+ EVENT_IDLE,
+ EVENT_SELECT_FAILED,
+ EVENT_BEFORE_SELECT,
+ EVENT_RESELECT_FAILED
+};
+
+struct NCR53c7x0_event {
+ enum ncr_event event; /* What type of event */
+ unsigned char target;
+ unsigned char lun;
+ struct timeval time;
+ u32 *dsa; /* What's in the DSA register now (virt) */
+/*
+ * A few things from that SCSI pid so we know what happened after
+ * the Scsi_Cmnd structure in question may have disappeared.
+ */
+ unsigned long pid; /* The SCSI PID which caused this
+ event */
+ unsigned char cmnd[12];
+};
+
+/*
+ * Things in the NCR53c7x0_cmd structure are split into two parts :
+ *
+ * 1. A fixed portion, for things which are not accessed directly by static NCR
+ * code (ie, are referenced only by the Linux side of the driver,
+ * or only by dynamically generated code).
+ *
+ * 2. The DSA portion, for things which are accessed directly by static NCR
+ * code.
+ *
+ * This is a little ugly, but it
+ * 1. Avoids conflicts between the NCR code's picture of the structure, and
+ * Linux code's idea of what it looks like.
+ *
+ * 2. Minimizes the pain in the Linux side of the code needed
+ * to calculate real dsa locations for things, etc.
+ *
+ */
+
+struct NCR53c7x0_cmd {
+ void *real; /* Real, unaligned address for
+ free function */
+ void (* free)(void *, int); /* Command to deallocate; NULL
+ for structures allocated with
+ scsi_register, etc. */
+ Scsi_Cmnd *cmd; /* Associated Scsi_Cmnd
+ structure, Scsi_Cmnd points
+ at NCR53c7x0_cmd using
+ host_scribble structure */
+
+ int size; /* scsi_malloc'd size of this
+ structure */
+
+ int flags; /* CMD_* flags */
+
+/*
+ * SDTR and WIDE messages are an either/or affair
+ * in this message, since we will go into message out and send
+ * _the whole mess_ without dropping out of message out to
+ * let the target go into message in after sending the first
+ * message.
+ */
+
+ unsigned char select[11]; /* Select message, includes
+ IDENTIFY
+ (optional) QUEUE TAG
+ (optional) SDTR or WDTR
+ */
+
+
+ volatile struct NCR53c7x0_cmd *next; /* Linux maintained lists (free,
+ running, eventually finished */
+
+
+ u32 *data_transfer_start; /* Start of data transfer routines */
+ u32 *data_transfer_end; /* Address after end of data transfer o
+ routines */
+/*
+ * The following three fields were moved from the DSA proper to here
+ * since only dynamically generated NCR code refers to them, meaning
+ * we don't need dsa_* absolutes, and it is simpler to let the
+ * host code refer to them directly.
+ */
+
+/*
+ * HARD CODED : residual and saved_residual need to agree with the sizes
+ * used in NCR53c7,8xx.scr.
+ *
+ * FIXME: we want to consider the case where we have odd-length
+ * scatter/gather buffers and a WIDE transfer, in which case
+ * we'll need to use the CHAIN MOVE instruction. Ick.
+ */
+ u32 residual[6]; /* Residual data transfer which
+ allows pointer code to work
+ right.
+
+ [0-1] : Conditional call to
+ appropriate other transfer
+ routine.
+ [2-3] : Residual block transfer
+ instruction.
+ [4-5] : Jump to instruction
+ after splice.
+ */
+ u32 saved_residual[6]; /* Copy of old residual, so we
+ can get another partial
+ transfer and still recover
+ */
+
+ u32 saved_data_pointer; /* Saved data pointer */
+
+ u32 dsa_next_addr; /* _Address_ of dsa_next field
+ in this dsa for RISCy
+ style constant. */
+
+ u32 dsa_addr; /* Address of dsa; RISCy style
+ constant */
+
+ u32 dsa[0]; /* Variable length (depending
+ on host type, number of scatter /
+ gather buffers, etc). */
+};
+
+struct NCR53c7x0_break {
+ u32 *address, old_instruction[2];
+ struct NCR53c7x0_break *next;
+ unsigned char old_size; /* Size of old instruction */
+};
+
+/* Indicates that the NCR is not executing code */
+#define STATE_HALTED 0
+/*
+ * Indicates that the NCR is executing the wait for select / reselect
+ * script. Only used when running NCR53c700 compatible scripts, only
+ * state during which an ABORT is _not_ considered an error condition.
+ */
+#define STATE_WAITING 1
+/* Indicates that the NCR is executing other code. */
+#define STATE_RUNNING 2
+/*
+ * Indicates that the NCR was being aborted.
+ */
+#define STATE_ABORTING 3
+/* Indicates that the NCR was successfully aborted. */
+#define STATE_ABORTED 4
+/* Indicates that the NCR has been disabled due to a fatal error */
+#define STATE_DISABLED 5
+
+/*
+ * Where knowledge of SCSI SCRIPT(tm) specified values are needed
+ * in an interrupt handler, an interrupt handler exists for each
+ * different SCSI script so we don't have name space problems.
+ *
+ * Return values of these handlers are as follows :
+ */
+#define SPECIFIC_INT_NOTHING 0 /* don't even restart */
+#define SPECIFIC_INT_RESTART 1 /* restart at the next instruction */
+#define SPECIFIC_INT_ABORT 2 /* recoverable error, abort cmd */
+#define SPECIFIC_INT_PANIC 3 /* unrecoverable error, panic */
+#define SPECIFIC_INT_DONE 4 /* normal command completion */
+#define SPECIFIC_INT_BREAK 5 /* break point encountered */
+
+struct NCR53c7x0_hostdata {
+ int size; /* Size of entire Scsi_Host
+ structure */
+ int board; /* set to board type, useful if
+ we have host specific things,
+ ie, a general purpose I/O
+ bit is being used to enable
+ termination, etc. */
+
+ int chip; /* set to chip type; 700-66 is
+ 700-66, rest are last three
+ digits of part number */
+ /*
+ * PCI bus, device, function, only for NCR53c8x0 chips.
+ * pci_valid indicates that the PCI configuration information
+ * is valid, and we can twiddle MAX_LAT, etc. as recommended
+ * for maximum performance in the NCR documentation.
+ */
+ unsigned char pci_bus, pci_device_fn;
+ unsigned pci_valid:1;
+
+ u32 *dsp; /* dsp to restart with after
+ all stacked interrupts are
+ handled. */
+
+ unsigned dsp_changed:1; /* Has dsp changed within this
+ set of stacked interrupts ? */
+
+ unsigned char dstat; /* Most recent value of dstat */
+ unsigned dstat_valid:1;
+
+ unsigned expecting_iid:1; /* Expect IID interrupt */
+ unsigned expecting_sto:1; /* Expect STO interrupt */
+
+ /*
+ * The code stays cleaner if we use variables with function
+ * pointers and offsets that are unique for the different
+ * scripts rather than having a slew of switch(hostdata->chip)
+ * statements.
+ *
+ * It also means that the #defines from the SCSI SCRIPTS(tm)
+ * don't have to be visible outside of the script-specific
+ * instructions, preventing name space pollution.
+ */
+
+ void (* init_fixup)(struct Scsi_Host *host);
+ void (* init_save_regs)(struct Scsi_Host *host);
+ void (* dsa_fixup)(struct NCR53c7x0_cmd *cmd);
+ void (* soft_reset)(struct Scsi_Host *host);
+ int (* run_tests)(struct Scsi_Host *host);
+
+ /*
+ * Called when DSTAT_SIR is set, indicating an interrupt generated
+ * by the INT instruction, where values are unique for each SCSI
+ * script. Should return one of the SPEC_* values.
+ */
+
+ int (* dstat_sir_intr)(struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
+
+ int dsa_len; /* Size of DSA structure */
+
+ /*
+ * Location of DSA fields for the SCSI SCRIPT corresponding to this
+ * chip.
+ */
+
+ s32 dsa_start;
+ s32 dsa_end;
+ s32 dsa_next;
+ s32 dsa_prev;
+ s32 dsa_cmnd;
+ s32 dsa_select;
+ s32 dsa_msgout;
+ s32 dsa_cmdout;
+ s32 dsa_dataout;
+ s32 dsa_datain;
+ s32 dsa_msgin;
+ s32 dsa_msgout_other;
+ s32 dsa_write_sync;
+ s32 dsa_write_resume;
+ s32 dsa_check_reselect;
+ s32 dsa_status;
+ s32 dsa_saved_pointer;
+ s32 dsa_jump_dest;
+
+ /*
+ * Important entry points that generic fixup code needs
+ * to know about, fixed up.
+ */
+
+ s32 E_accept_message;
+ s32 E_command_complete;
+ s32 E_data_transfer;
+ s32 E_dsa_code_template;
+ s32 E_dsa_code_template_end;
+ s32 E_end_data_transfer;
+ s32 E_msg_in;
+ s32 E_initiator_abort;
+ s32 E_other_transfer;
+ s32 E_other_in;
+ s32 E_other_out;
+ s32 E_target_abort;
+ s32 E_debug_break;
+ s32 E_reject_message;
+ s32 E_respond_message;
+ s32 E_select;
+ s32 E_select_msgout;
+ s32 E_test_0;
+ s32 E_test_1;
+ s32 E_test_2;
+ s32 E_test_3;
+ s32 E_dsa_zero;
+ s32 E_cmdout_cmdout;
+ s32 E_wait_reselect;
+ s32 E_dsa_code_begin;
+
+ long long options; /* Bitfielded set of options enabled */
+ volatile u32 test_completed; /* Test completed */
+ int test_running; /* Test currently running */
+ s32 test_source;
+ volatile s32 test_dest;
+
+ volatile int state; /* state of driver, only used for
+ OPTION_700 */
+
+ unsigned char dmode; /*
+ * set to the address of the DMODE
+ * register for this chip.
+ */
+ unsigned char istat; /*
+ * set to the address of the ISTAT
+ * register for this chip.
+ */
+
+ int scsi_clock; /*
+ * SCSI clock in HZ. 0 may be used
+ * for unknown, although this will
+ * disable synchronous negotiation.
+ */
+
+ volatile int intrs; /* Number of interrupts */
+ volatile int resets; /* Number of SCSI resets */
+ unsigned char saved_dmode;
+ unsigned char saved_ctest4;
+ unsigned char saved_ctest7;
+ unsigned char saved_dcntl;
+ unsigned char saved_scntl3;
+
+ unsigned char this_id_mask;
+
+ /* Debugger information */
+ struct NCR53c7x0_break *breakpoints, /* Linked list of all break points */
+ *breakpoint_current; /* Current breakpoint being stepped
+ through, NULL if we are running
+ normally. */
+#ifdef NCR_DEBUG
+ int debug_size; /* Size of debug buffer */
+ volatile int debug_count; /* Current data count */
+ volatile char *debug_buf; /* Output ring buffer */
+ volatile char *debug_write; /* Current write pointer */
+ volatile char *debug_read; /* Current read pointer */
+#endif /* def NCR_DEBUG */
+
+ /* XXX - primitive debugging junk, remove when working ? */
+ int debug_print_limit; /* Number of commands to print
+ out exhaustive debugging
+ information for if
+ OPTION_DEBUG_DUMP is set */
+
+ unsigned char debug_lun_limit[16]; /* If OPTION_DEBUG_TARGET_LIMIT
+ set, puke if commands are sent
+ to other target/lun combinations */
+
+ int debug_count_limit; /* Number of commands to execute
+ before puking to limit debugging
+ output */
+
+
+ volatile unsigned idle:1; /* set to 1 if idle */
+
+ /*
+ * Table of synchronous+wide transfer parameters set on a per-target
+ * basis.
+ */
+
+ volatile struct NCR53c7x0_synchronous sync[16];
+
+ volatile Scsi_Cmnd *issue_queue;
+ /* waiting to be issued by
+ Linux driver */
+ volatile struct NCR53c7x0_cmd *running_list;
+ /* commands running, maintained
+ by Linux driver */
+
+ volatile struct NCR53c7x0_cmd *current; /* currently connected
+ nexus, ONLY valid for
+ NCR53c700/NCR53c700-66
+ */
+
+ volatile struct NCR53c7x0_cmd *spare; /* pointer to spare,
+ allocated at probe time,
+ which we can use for
+ initialization */
+ volatile struct NCR53c7x0_cmd *free;
+ int max_cmd_size; /* Maximum size of NCR53c7x0_cmd
+ based on number of
+ scatter/gather segments, etc.
+ */
+ volatile int num_cmds; /* Number of commands
+ allocated */
+ volatile int extra_allocate;
+ volatile unsigned char cmd_allocated[16]; /* Have we allocated commands
+ for this target yet? If not,
+ do so ASAP */
+ volatile unsigned char busy[16][8]; /* number of commands
+ executing on each target
+ */
+ /*
+ * Eventually, I'll switch to a coroutine for calling
+ * cmd->done(cmd), etc. so that we can overlap interrupt
+ * processing with this code for maximum performance.
+ */
+
+ volatile struct NCR53c7x0_cmd *finished_queue;
+
+
+ /* Shared variables between SCRIPT and host driver */
+ volatile u32 *schedule; /* Array of JUMPs to dsa_begin
+ routines of various DSAs.
+ When not in use, replace
+ with jump to next slot */
+
+
+ volatile unsigned char msg_buf[16]; /* buffer for messages
+ other than the command
+ complete message */
+
+ /* Per-target default synchronous and WIDE messages */
+ volatile unsigned char synchronous_want[16][5];
+ volatile unsigned char wide_want[16][4];
+
+ /* Bit fielded set of targets we want to speak synchronously with */
+ volatile u16 initiate_sdtr;
+ /* Bit fielded set of targets we want to speak wide with */
+ volatile u16 initiate_wdtr;
+ /* Bit fielded list of targets we've talked to. */
+ volatile u16 talked_to;
+
+ /* Array of bit-fielded lun lists that we need to request_sense */
+ volatile unsigned char request_sense[16];
+
+ u32 addr_reconnect_dsa_head; /* RISCy style constant,
+ address of following */
+ volatile u32 reconnect_dsa_head;
+ /* Data identifying nexus we are trying to match during reselection */
+ volatile unsigned char reselected_identify; /* IDENTIFY message */
+ volatile unsigned char reselected_tag; /* second byte of queue tag
+ message or 0 */
+ /* These were static variables before we moved them */
+
+ s32 NCR53c7xx_zero;
+ s32 NCR53c7xx_sink;
+ u32 NOP_insn;
+ char NCR53c7xx_msg_reject;
+ char NCR53c7xx_msg_abort;
+ char NCR53c7xx_msg_nop;
+
+ volatile int event_size, event_index;
+ volatile struct NCR53c7x0_event *events;
+
+ /* If we need to generate code to kill off the currently connected
+ command, this is where we do it. Should have a BMI instruction
+ to source or sink the current data, followed by a JUMP
+ to abort_connected */
+
+ u32 *abort_script;
+
+ int script_count; /* Size of script in words */
+ u32 script[0]; /* Relocated SCSI script */
+
+};
+
+#define IRQ_NONE 255
+#define DMA_NONE 255
+#define IRQ_AUTO 254
+#define DMA_AUTO 254
+
+#define BOARD_GENERIC 0
+
+#define NCR53c7x0_insn_size(insn) \
+ (((insn) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI ? 3 : 2)
+
+
+#define NCR53c7x0_local_declare() \
+ volatile unsigned char *NCR53c7x0_address_memory; \
+ unsigned int NCR53c7x0_address_io; \
+ int NCR53c7x0_memory_mapped
+
+#define NCR53c7x0_local_setup(host) \
+ NCR53c7x0_address_memory = (void *) (host)->base; \
+ NCR53c7x0_address_io = (unsigned int) (host)->io_port; \
+ NCR53c7x0_memory_mapped = ((struct NCR53c7x0_hostdata *) \
+ host->hostdata)-> options & OPTION_MEMORY_MAPPED
+
+#define NCR53c7x0_read8(address) \
+ (NCR53c7x0_memory_mapped ? \
+ (unsigned int)readb(NCR53c7x0_address_memory + (address)) : \
+ inb(NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_read16(address) \
+ (NCR53c7x0_memory_mapped ? \
+ (unsigned int)readw(NCR53c7x0_address_memory + (address)) : \
+ inw(NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_read32(address) \
+ (NCR53c7x0_memory_mapped ? \
+ (unsigned int) readl(NCR53c7x0_address_memory + (address)) : \
+ inl(NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_write8(address,value) \
+ (NCR53c7x0_memory_mapped ? \
+ ({writeb((value), NCR53c7x0_address_memory + (address)); mb();}) : \
+ outb((value), NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_write16(address,value) \
+ (NCR53c7x0_memory_mapped ? \
+ ({writew((value), NCR53c7x0_address_memory + (address)); mb();}) : \
+ outw((value), NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_write32(address,value) \
+ (NCR53c7x0_memory_mapped ? \
+ ({writel((value), NCR53c7x0_address_memory + (address)); mb();}) : \
+ outl((value), NCR53c7x0_address_io + (address)))
+
+/* Patch arbitrary 32 bit words in the script */
+#define patch_abs_32(script, offset, symbol, value) \
+ for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
+ (u32)); ++i) { \
+ (script)[A_##symbol##_used[i] - (offset)] += (value); \
+ if (hostdata->options & OPTION_DEBUG_FIXUP) \
+ printk("scsi%d : %s reference %d at 0x%x in %s is now 0x%x\n",\
+ host->host_no, #symbol, i, A_##symbol##_used[i] - \
+ (int)(offset), #script, (script)[A_##symbol##_used[i] - \
+ (offset)]); \
+ }
+
+/* Patch read/write instruction immediate field */
+#define patch_abs_rwri_data(script, offset, symbol, value) \
+ for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
+ (u32)); ++i) \
+ (script)[A_##symbol##_used[i] - (offset)] = \
+ ((script)[A_##symbol##_used[i] - (offset)] & \
+ ~DBC_RWRI_IMMEDIATE_MASK) | \
+ (((value) << DBC_RWRI_IMMEDIATE_SHIFT) & \
+ DBC_RWRI_IMMEDIATE_MASK)
+
+/* Patch transfer control instruction data field */
+#define patch_abs_tci_data(script, offset, symbol, value) \
+ for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
+ (u32)); ++i) \
+ (script)[A_##symbol##_used[i] - (offset)] = \
+ ((script)[A_##symbol##_used[i] - (offset)] & \
+ ~DBC_TCI_DATA_MASK) | \
+ (((value) << DBC_TCI_DATA_SHIFT) & \
+ DBC_TCI_DATA_MASK)
+
+/* Patch field in dsa structure (assignment should be +=?) */
+#define patch_dsa_32(dsa, symbol, word, value) \
+ { \
+ (dsa)[(hostdata->symbol - hostdata->dsa_start) / sizeof(u32) \
+ + (word)] = (value); \
+ if (hostdata->options & OPTION_DEBUG_DSA) \
+ printk("scsi : dsa %s symbol %s(%d) word %d now 0x%x\n", \
+ #dsa, #symbol, hostdata->symbol, \
+ (word), (u32) (value)); \
+ }
+
+/* Paranoid people could use panic() here. */
+#define FATAL(host) shutdown((host));
+
+#endif /* NCR53c7x0_C */
+#endif /* NCR53c7x0_H */
diff --git a/linux/src/drivers/scsi/53c78xx.c b/linux/src/drivers/scsi/53c78xx.c
new file mode 100644
index 0000000..e6a66ff
--- /dev/null
+++ b/linux/src/drivers/scsi/53c78xx.c
@@ -0,0 +1,6401 @@
+/*
+ * PERM_OPTIONS are driver options which will be enabled for all NCR boards
+ * in the system at driver initialization time.
+ *
+ * Don't THINK about touching these in PERM_OPTIONS :
+ * OPTION_IO_MAPPED
+ * Memory mapped IO does not work under i86 Linux.
+ *
+ * OPTION_DEBUG_TEST1
+ * Test 1 does bus mastering and interrupt tests, which will help weed
+ * out brain damaged main boards.
+ *
+ * These are development kernel changes. Code for them included in this
+ * driver release may or may not work. If you turn them on, you should be
+ * running the latest copy of the development sources from
+ *
+ * ftp://tsx-11.mit.edu/pub/linux/ALPHA/scsi/53c7,8xx
+ *
+ * and be subscribed to the ncr53c810@colorado.edu mailing list. To
+ * subscribe, send mail to majordomo@colorado.edu with
+ *
+ * subscribe ncr53c810
+ *
+ * in the text.
+ *
+ *
+ * OPTION_NO_ASYNC
+ * Don't negotiate for asynchronous transfers on the first command
+ * when OPTION_ALWAYS_SYNCHRONOUS is set. Useful for dain bramaged
+ * devices which do something bad rather than sending a MESSAGE
+ * REJECT back to us like they should if they can't cope.
+ *
+ * OPTION_SYNCHRONOUS
+ * Enable support for synchronous transfers. Target negotiated
+ * synchronous transfers will be responded to. To initiate
+ * a synchronous transfer request, call
+ *
+ * request_synchronous (hostno, target)
+ *
+ * from within KGDB.
+ *
+ * OPTION_ALWAYS_SYNCHRONOUS
+ * Negotiate for synchronous transfers with every target after
+ * driver initialization or a SCSI bus reset. This is a bit dangerous,
+ * since there are some dain bramaged SCSI devices which will accept
+ * SDTR messages but keep talking asynchronously.
+ *
+ * OPTION_DISCONNECT
+ * Enable support for disconnect/reconnect. To change the
+ * default setting on a given host adapter, call
+ *
+ * request_disconnect (hostno, allow)
+ *
+ * where allow is non-zero to allow, 0 to disallow.
+ *
+ * If you really want to run 10MHz FAST SCSI-II transfers, you should
+ * know that the NCR driver currently ignores parity information. Most
+ * systems do 5MHz SCSI fine. I've seen a lot that have problems faster
+ * than 8MHz. To play it safe, we only request 5MHz transfers.
+ *
+ * If you'd rather get 10MHz transfers, edit sdtr_message and change
+ * the fourth byte from 50 to 25.
+ */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SCSI_NCR53C7xx_sync
+#ifdef CONFIG_SCSI_NCR53C7xx_DISCONNECT
+#define PERM_OPTIONS (OPTION_IO_MAPPED|OPTION_DEBUG_TEST1|OPTION_DISCONNECT|\
+ OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS)
+#else
+#define PERM_OPTIONS (OPTION_IO_MAPPED|OPTION_DEBUG_TEST1|\
+ OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS)
+#endif
+#else
+#ifdef CONFIG_SCSI_NCR53C7xx_DISCONNECT
+#define PERM_OPTIONS (OPTION_IO_MAPPED|OPTION_DEBUG_TEST1|OPTION_DISCONNECT|\
+ OPTION_SYNCHRONOUS)
+#else
+#define PERM_OPTIONS (OPTION_IO_MAPPED|OPTION_DEBUG_TEST1|OPTION_SYNCHRONOUS)
+#endif
+#endif
+
+/*
+ * Sponsored by
+ * iX Multiuser Multitasking Magazine
+ * Hannover, Germany
+ * hm@ix.de
+ *
+ * Copyright 1993, 1994, 1995 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@PoohSticks.ORG
+ * +1 (303) 786-7975
+ *
+ * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
+ *
+ * For more information, please consult
+ *
+ * NCR53C810
+ * SCSI I/O Processor
+ * Programmer's Guide
+ *
+ * NCR 53C810
+ * PCI-SCSI I/O Processor
+ * Data Manual
+ *
+ * NCR 53C810/53C820
+ * PCI-SCSI I/O Processor Design In Guide
+ *
+ * For literature on Symbios Logic Inc. formerly NCR, SCSI,
+ * and Communication products please call (800) 334-5454 or
+ * (719) 536-3300.
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ */
+
+/*
+ * Design issues :
+ * The cumulative latency needed to propagate a read/write request
+ * through the file system, buffer cache, driver stacks, SCSI host, and
+ * SCSI device is ultimately the limiting factor in throughput once we
+ * have a sufficiently fast host adapter.
+ *
+ * So, to maximize performance we want to keep the ratio of latency to data
+ * transfer time to a minimum by
+ * 1. Minimizing the total number of commands sent (typical command latency
+ * including drive and bus mastering host overhead is as high as 4.5ms)
+ * to transfer a given amount of data.
+ *
+ * This is accomplished by placing no arbitrary limit on the number
+ * of scatter/gather buffers supported, since we can transfer 1K
+ * per scatter/gather buffer without Eric's cluster patches,
+ * 4K with.
+ *
+ * 2. Minimizing the number of fatal interrupts serviced, since
+ * fatal interrupts halt the SCSI I/O processor. Basically,
+ * this means offloading the practical maximum amount of processing
+ * to the SCSI chip.
+ *
+ * On the NCR53c810/820/720, this is accomplished by using
+ * interrupt-on-the-fly signals when commands complete,
+ * and only handling fatal errors and SDTR / WDTR messages
+ * in the host code.
+ *
+ * On the NCR53c710, interrupts are generated as on the NCR53c8x0,
+ * only the lack of a interrupt-on-the-fly facility complicates
+ * things. Also, SCSI ID registers and commands are
+ * bit fielded rather than binary encoded.
+ *
+ * On the NCR53c700 and NCR53c700-66, operations that are done via
+ * indirect, table mode on the more advanced chips must be
+ * replaced by calls through a jump table which
+ * acts as a surrogate for the DSA. Unfortunately, this
+ * will mean that we must service an interrupt for each
+ * disconnect/reconnect.
+ *
+ * 3. Eliminating latency by pipelining operations at the different levels.
+ *
+ * This driver allows a configurable number of commands to be enqueued
+ * for each target/lun combination (experimentally, I have discovered
+ * that two seems to work best) and will ultimately allow for
+ * SCSI-II tagged queuing.
+ *
+ *
+ * Architecture :
+ * This driver is built around a Linux queue of commands waiting to
+ * be executed, and a shared Linux/NCR array of commands to start. Commands
+ * are transfered to the array by the run_process_issue_queue() function
+ * which is called whenever a command completes.
+ *
+ * As commands are completed, the interrupt routine is triggered,
+ * looks for commands in the linked list of completed commands with
+ * valid status, removes these commands from a list of running commands,
+ * calls the done routine, and flags their target/luns as not busy.
+ *
+ * Due to limitations in the intelligence of the NCR chips, certain
+ * concessions are made. In many cases, it is easier to dynamically
+ * generate/fix-up code rather than calculate on the NCR at run time.
+ * So, code is generated or fixed up for
+ *
+ * - Handling data transfers, using a variable number of MOVE instructions
+ * interspersed with CALL MSG_IN, WHEN MSGIN instructions.
+ *
+ * The DATAIN and DATAOUT routines are separate, so that an incorrect
+ * direction can be trapped, and space isn't wasted.
+ *
+ * It may turn out that we're better off using some sort
+ * of table indirect instruction in a loop with a variable
+ * sized table on the NCR53c710 and newer chips.
+ *
+ * - Checking for reselection (NCR53c710 and better)
+ *
+ * - Handling the details of SCSI context switches (NCR53c710 and better),
+ * such as reprogramming appropriate synchronous parameters,
+ * removing the dsa structure from the NCR's queue of outstanding
+ * commands, etc.
+ *
+ */
+
+/*
+ * Accommodate differences between stock 1.2.x and 1.3.x asm-i386/types.h
+ * so lusers can drop in 53c7,8xx.* and get something which compiles
+ * without warnings.
+ */
+
+#if !defined(LINUX_1_2) && !defined(LINUX_1_3)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE > 65536 + 3 * 256
+#define LINUX_1_3
+#else
+#define LINUX_1_2
+#endif
+#endif
+
+#ifdef LINUX_1_2
+#define u32 bogus_u32
+#define s32 bogus_s32
+#include <asm/types.h>
+#undef u32
+#undef s32
+typedef __signed__ int s32;
+typedef unsigned int u32;
+#endif /* def LINUX_1_2 */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/time.h>
+#ifdef LINUX_1_2
+#include "../block/blk.h"
+#else
+#include <linux/blk.h>
+#endif
+#undef current
+
+#include "scsi.h"
+#include "hosts.h"
+#include "53c7,8xx.h"
+#include "constants.h"
+#include "sd.h"
+#include <linux/stat.h>
+#include <linux/stddef.h>
+
+#ifndef LINUX_1_2
+struct proc_dir_entry proc_scsi_ncr53c7xx = {
+ PROC_SCSI_NCR53C7xx, 9, "ncr53c7xx",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+#endif
+
+static int check_address (unsigned long addr, int size);
+static void dump_events (struct Scsi_Host *host, int count);
+static Scsi_Cmnd * return_outstanding_commands (struct Scsi_Host *host,
+ int free, int issue);
+static void hard_reset (struct Scsi_Host *host);
+static void ncr_scsi_reset (struct Scsi_Host *host);
+static void print_lots (struct Scsi_Host *host);
+static void set_synchronous (struct Scsi_Host *host, int target, int sxfer,
+ int scntl3, int now_connected);
+static int datapath_residual (struct Scsi_Host *host);
+static const char * sbcl_to_phase (int sbcl);
+static void print_progress (Scsi_Cmnd *cmd);
+static void print_queues (struct Scsi_Host *host);
+static void process_issue_queue (unsigned long flags);
+static int shutdown (struct Scsi_Host *host);
+static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int result);
+static int disable (struct Scsi_Host *host);
+static int NCR53c8xx_run_tests (struct Scsi_Host *host);
+static int NCR53c8xx_script_len;
+static int NCR53c8xx_dsa_len;
+static void NCR53c7x0_intr(int irq, void *dev_id, struct pt_regs * regs);
+static int ncr_halt (struct Scsi_Host *host);
+static void intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd
+ *cmd);
+static void intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
+static void print_dsa (struct Scsi_Host *host, u32 *dsa,
+ const char *prefix);
+static int print_insn (struct Scsi_Host *host, const u32 *insn,
+ const char *prefix, int kernel);
+
+static void NCR53c8xx_dsa_fixup (struct NCR53c7x0_cmd *cmd);
+static void NCR53c8x0_init_fixup (struct Scsi_Host *host);
+static int NCR53c8x0_dstat_sir_intr (struct Scsi_Host *host, struct
+ NCR53c7x0_cmd *cmd);
+static void NCR53c8x0_soft_reset (struct Scsi_Host *host);
+
+/* INSMOD variables */
+static long long perm_options = PERM_OPTIONS;
+/* 14 = .5s; 15 is max; decreasing divides by two. */
+static int selection_timeout = 14;
+/* Size of event list (per host adapter) */
+static int track_events = 0;
+
+static struct Scsi_Host *first_host = NULL; /* Head of list of NCR boards */
+static Scsi_Host_Template *the_template = NULL;
+
+/*
+ * KNOWN BUGS :
+ * - There is some sort of conflict when the PPP driver is compiled with
+ * support for 16 channels?
+ *
+ * - On systems which predate the 1.3.x initialization order change,
+ * the NCR driver will cause Cannot get free page messages to appear.
+ * These are harmless, but I don't know of an easy way to avoid them.
+ *
+ * - With OPTION_DISCONNECT, on two systems under unknown circumstances,
+ * we get a PHASE MISMATCH with DSA set to zero (suggests that we
+ * are occurring somewhere in the reselection code) where
+ * DSP=some value DCMD|DBC=same value.
+ *
+ * Closer inspection suggests that we may be trying to execute
+ * some portion of the DSA?
+ * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
+ * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
+ * scsi0 : no current command : unexpected phase MSGIN.
+ * DSP=0x1c46cc, DCMD|DBC=0x1c46ac, DSA=0x0
+ * DSPS=0x0, TEMP=0x1c3e70, DMODE=0x80
+ * scsi0 : DSP->
+ * 001c46cc : 0x001c46cc 0x00000000
+ * 001c46d4 : 0x001c5ea0 0x000011f8
+ *
+ * Changed the print code in the phase_mismatch handler so
+ * that we call print_lots to try to diagnose this.
+ *
+ */
+
+/*
+ * Possible future direction of architecture for max performance :
+ *
+ * We're using a single start array for the NCR chip. This is
+ * sub-optimal, because we cannot add a command which would conflict with
+ * an executing command to this start queue, and therefore must insert the
+ * next command for a given I/T/L combination after the first has completed;
+ * incurring our interrupt latency between SCSI commands.
+ *
+ * To allow further pipelining of the NCR and host CPU operation, we want
+ * to set things up so that immediately on termination of a command destined
+ * for a given LUN, we get that LUN busy again.
+ *
+ * To do this, we need to add a 32 bit pointer to which is jumped to
+ * on completion of a command. If no new command is available, this
+ * would point to the usual DSA issue queue select routine.
+ *
+ * If one were, it would point to a per-NCR53c7x0_cmd select routine
+ * which starts execution immediately, inserting the command at the head
+ * of the start queue if the NCR chip is selected or reselected.
+ *
+ * We would change so that we keep a list of outstanding commands
+ * for each unit, rather than a single running_list. We'd insert
+ * a new command into the right running list; if the NCR didn't
+ * have something running for that yet, we'd put it in the
+ * start queue as well. Some magic needs to happen to handle the
+ * race condition between the first command terminating before the
+ * new one is written.
+ *
+ * Potential for profiling :
+ * Call do_gettimeofday(struct timeval *tv) to get 800ns resolution.
+ */
+
+
+/*
+ * TODO :
+ * 1. To support WIDE transfers, not much needs to happen. We
+ * should do CHMOVE instructions instead of MOVEs when
+ * we have scatter/gather segments of uneven length. When
+ * we do this, we need to handle the case where we disconnect
+ * between segments.
+ *
+ * 2. Currently, when Icky things happen we do a FATAL(). Instead,
+ * we want to do an integrity check on the parts of the NCR hostdata
+ * structure which were initialized at boot time; FATAL() if that
+ * fails, and otherwise try to recover. Keep track of how many
+ * times this has happened within a single SCSI command; if it
+ * gets excessive, then FATAL().
+ *
+ * 3. Parity checking is currently disabled, and a few things should
+ * happen here now that we support synchronous SCSI transfers :
+ * 1. On soft-reset, we should set the EPC (Enable Parity Checking)
+ * and AAP (Assert SATN/ on parity error) bits in SCNTL0.
+ *
+ * 2. We should enable the parity interrupt in the SIEN0 register.
+ *
+ * 3. intr_phase_mismatch() needs to believe that message out is
+ * always an "acceptable" phase to have a mismatch in. If
+ * the old phase was MSG_IN, we should send a MESSAGE PARITY
+ * error. If the old phase was something else, we should send
+ * a INITIATOR_DETECTED_ERROR message. Note that this could
+ * cause a RESTORE POINTERS message; so we should handle that
+ * correctly first. Instead, we should probably do an
+ * initiator_abort.
+ *
+ * 4. MPEE bit of CTEST4 should be set so we get interrupted if
+ * we detect an error.
+ *
+ *
+ * 5. The initial code has been tested on the NCR53c810. I don't
+ * have access to NCR53c700, 700-66 (Forex boards), NCR53c710
+ * (NCR Pentium systems), NCR53c720, NCR53c820, or NCR53c825 boards to
+ * finish development on those platforms.
+ *
+ * NCR53c820/825/720 - need to add wide transfer support, including WDTR
+ * negotiation, programming of wide transfer capabilities
+ * on reselection and table indirect selection.
+ *
+ * NCR53c710 - need to add fatal interrupt or GEN code for
+ * command completion signaling. Need to modify all
+ * SDID, SCID, etc. registers, and table indirect select code
+ * since these use bit fielded (ie 1<<target) instead of
+ * binary encoded target ids. Need to accommodate
+ * different register mappings, probably scan through
+ * the SCRIPT code and change the non SFBR register operand
+ * of all MOVE instructions.
+ *
+ * NCR53c700/700-66 - need to add code to refix addresses on
+ * every nexus change, eliminate all table indirect code,
+ * very messy.
+ *
+ * 6. The NCR53c7x0 series is very popular on other platforms that
+ * could be running Linux - ie, some high performance AMIGA SCSI
+ * boards use it.
+ *
+ * So, I should include #ifdef'd code so that it is
+ * compatible with these systems.
+ *
+ * Specifically, the little Endian assumptions I made in my
+ * bit fields need to change, and if the NCR doesn't see memory
+ * the right way, we need to provide options to reverse words
+ * when the scripts are relocated.
+ *
+ * 7. Use vremap() to access memory mapped boards.
+ */
+
+/*
+ * Allow for simultaneous existence of multiple SCSI scripts so we
+ * can have a single driver binary for all of the family.
+ *
+ * - one for NCR53c700 and NCR53c700-66 chips (not yet supported)
+ * - one for rest (only the NCR53c810, 815, 820, and 825 are currently
+ * supported)
+ *
+ * So that we only need two SCSI scripts, we need to modify things so
+ * that we fixup register accesses in READ/WRITE instructions, and
+ * we'll also have to accommodate the bit vs. binary encoding of IDs
+ * with the 7xx chips.
+ */
+
+/*
+ * Use pci_chips_ids to translate in both directions between PCI device ID
+ * and chip numbers.
+ */
+
+static struct {
+ unsigned short pci_device_id;
+ int chip;
+/*
+ * The revision field of the PCI_CLASS_REVISION register is compared
+ * against each of these fields if the field is not -1. If it
+ * is less than min_revision or larger than max_revision, a warning
+ * message is printed.
+ */
+ int max_revision;
+ int min_revision;
+} pci_chip_ids[] = {
+ {PCI_DEVICE_ID_NCR_53C810, 810, 2, 1},
+ {PCI_DEVICE_ID_NCR_53C815, 815, 3, 2},
+ {PCI_DEVICE_ID_NCR_53C820, 820, -1, -1},
+ {PCI_DEVICE_ID_NCR_53C825, 825, -1, -1}
+};
+
+#define NPCI_CHIP_IDS (sizeof (pci_chip_ids) / sizeof(pci_chip_ids[0]))
+
+#define ROUNDUP(adr,type) \
+ ((void *) (((long) (adr) + sizeof(type) - 1) & ~(sizeof(type) - 1)))
+
+/*
+ * Forced detection and autoprobe code for various hardware. Currently,
+ * entry points for these are not included in init/main.c because if the
+ * PCI BIOS code isn't working right, you're not going to be able to use
+ * the hardware anyways; this way we force users to solve their
+ * problems rather than forcing detection and blaming us when it
+ * does not work.
+ */
+
+static struct override {
+ int chip; /* 700, 70066, 710, 720, 810, 820 */
+ int board; /* Any special board level gunk */
+ unsigned pci:1;
+ union {
+ struct {
+ int base; /* Memory address - indicates memory mapped regs */
+ int io_port;/* I/O port address - indicates I/O mapped regs */
+ int irq; /* IRQ line */
+ int dma; /* DMA channel - often none */
+ } normal;
+ struct {
+ int bus;
+ int device;
+ int function;
+ } pci;
+ } data;
+ long long options;
+} overrides [4] = {{0,},};
+static int commandline_current = 0;
+static int no_overrides = 0;
+
+#if 0
+#define OVERRIDE_LIMIT (sizeof(overrides) / sizeof(struct override))
+#else
+#define OVERRIDE_LIMIT commandline_current
+#endif
+
+/*
+ * Function: issue_to_cmd
+ *
+ * Purpose: convert jump instruction in issue array to NCR53c7x0_cmd
+ * structure pointer.
+ *
+ * Inputs; issue - pointer to start of NOP or JUMP instruction
+ * in issue array.
+ *
+ * Returns: pointer to command on success; 0 if opcode is NOP.
+ */
+
+static inline struct NCR53c7x0_cmd *
+issue_to_cmd (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
+ u32 *issue)
+{
+ return (issue[0] != hostdata->NOP_insn) ?
+ /*
+ * If the IF TRUE bit is set, it's a JUMP instruction. The
+ * operand is a bus pointer to the dsa_begin routine for this DSA. The
+ * dsa field of the NCR53c7x0_cmd structure starts with the
+ * DSA code template. By converting to a virtual address,
+ * subtracting the code template size, and offset of the
+ * dsa field, we end up with a pointer to the start of the
+ * structure (alternatively, we could use the
+ * dsa_cmnd field, an anachronism from when we weren't
+ * sure what the relationship between the NCR structures
+ * and host structures were going to be.
+ */
+ (struct NCR53c7x0_cmd *) ((char *) bus_to_virt (issue[1]) -
+ (hostdata->E_dsa_code_begin - hostdata->E_dsa_code_template) -
+ offsetof(struct NCR53c7x0_cmd, dsa))
+ /* If the IF TRUE bit is not set, it's a NOP */
+ : NULL;
+}
+
+
+/*
+ * Function : static internal_setup(int board, int chip, char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : board - currently, unsupported. chip - 700, 70066, 710, 720
+ * 810, 815, 820, 825, although currently only the NCR53c810 is
+ * supported.
+ *
+ */
+
+static void
+internal_setup(int board, int chip, char *str, int *ints) {
+ unsigned char pci; /* Specifies a PCI override, with bus, device,
+ function */
+
+ pci = (str && !strcmp (str, "pci")) ? 1 : 0;
+
+/*
+ * Override syntaxes are as follows :
+ * ncr53c700,ncr53c700-66,ncr53c710,ncr53c720=mem,io,irq,dma
+ * ncr53c810,ncr53c820,ncr53c825=mem,io,irq or pci,bus,device,function
+ */
+
+ if (commandline_current < OVERRIDE_LIMIT) {
+ overrides[commandline_current].pci = pci ? 1 : 0;
+ if (!pci) {
+ overrides[commandline_current].data.normal.base = ints[1];
+ overrides[commandline_current].data.normal.io_port = ints[2];
+ overrides[commandline_current].data.normal.irq = ints[3];
+ overrides[commandline_current].data.normal.dma = (ints[0] >= 4) ?
+ ints[4] : DMA_NONE;
+ /* FIXME: options is now a long long */
+ overrides[commandline_current].options = (ints[0] >= 5) ?
+ ints[5] : 0;
+ } else {
+ overrides[commandline_current].data.pci.bus = ints[1];
+ overrides[commandline_current].data.pci.device = ints[2];
+ overrides[commandline_current].data.pci.function = ints[3];
+ /* FIXME: options is now a long long */
+ overrides[commandline_current].options = (ints[0] >= 4) ?
+ ints[4] : 0;
+ }
+ overrides[commandline_current].board = board;
+ overrides[commandline_current].chip = chip;
+ ++commandline_current;
+ ++no_overrides;
+ } else {
+ printk ("53c7,7x0.c:internal_setup() : too many overrides\n");
+ }
+}
+
+/*
+ * XXX - we might want to implement a single override function
+ * with a chip type field, revamp the command line configuration,
+ * etc.
+ */
+
+#define setup_wrapper(x) \
+void ncr53c##x##_setup (char *str, int *ints) { \
+ internal_setup (BOARD_GENERIC, x, str, ints); \
+}
+
+setup_wrapper(700)
+setup_wrapper(70066)
+setup_wrapper(710)
+setup_wrapper(720)
+setup_wrapper(810)
+setup_wrapper(815)
+setup_wrapper(820)
+setup_wrapper(825)
+
+/*
+ * FIXME: we should junk these, in favor of synchronous_want and
+ * wide_want in the NCR53c7x0_hostdata structure.
+ */
+
+/* Template for "preferred" synchronous transfer parameters. */
+
+static const unsigned char sdtr_message[] = {
+#ifdef CONFIG_SCSI_NCR53C7xx_FAST
+ EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 25 /* *4ns */, 8 /* off */
+#else
+ EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 50 /* *4ns */, 8 /* off */
+#endif
+};
+
+/* Template to request asynchronous transfers */
+
+static const unsigned char async_message[] = {
+ EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 0, 0 /* asynchronous */
+};
+
+/* Template for "preferred" WIDE transfer parameters */
+
+static const unsigned char wdtr_message[] = {
+ EXTENDED_MESSAGE, 2 /* length */, EXTENDED_WDTR, 1 /* 2^1 bytes */
+};
+
+/*
+ * Function : struct Scsi_Host *find_host (int host)
+ *
+ * Purpose : KGDB support function which translates a host number
+ * to a host structure.
+ *
+ * Inputs : host - number of SCSI host
+ *
+ * Returns : NULL on failure, pointer to host structure on success.
+ */
+
+static struct Scsi_Host *
+find_host (int host) {
+ struct Scsi_Host *h;
+ for (h = first_host; h && h->host_no != host; h = h->next);
+ if (!h) {
+ printk (KERN_ALERT "scsi%d not found\n", host);
+ return NULL;
+ } else if (h->hostt != the_template) {
+ printk (KERN_ALERT "scsi%d is not a NCR board\n", host);
+ return NULL;
+ }
+ return h;
+}
+
+/*
+ * Function : request_synchronous (int host, int target)
+ *
+ * Purpose : KGDB interface which will allow us to negotiate for
+ * synchronous transfers. This ill be replaced with a more
+ * integrated function; perhaps a new entry in the scsi_host
+ * structure, accessible via an ioctl() or perhaps /proc/scsi.
+ *
+ * Inputs : host - number of SCSI host; target - number of target.
+ *
+ * Returns : 0 when negotiation has been setup for next SCSI command,
+ * -1 on failure.
+ */
+
+static int
+request_synchronous (int host, int target) {
+ struct Scsi_Host *h;
+ struct NCR53c7x0_hostdata *hostdata;
+ unsigned long flags;
+ if (target < 0) {
+ printk (KERN_ALERT "target %d is bogus\n", target);
+ return -1;
+ }
+ if (!(h = find_host (host)))
+ return -1;
+ else if (h->this_id == target) {
+ printk (KERN_ALERT "target %d is host ID\n", target);
+ return -1;
+ }
+#ifndef LINUX_1_2
+ else if (target > h->max_id) {
+ printk (KERN_ALERT "target %d exceeds maximum of %d\n", target,
+ h->max_id);
+ return -1;
+ }
+#endif
+ hostdata = (struct NCR53c7x0_hostdata *)h->hostdata;
+
+ save_flags(flags);
+ cli();
+ if (hostdata->initiate_sdtr & (1 << target)) {
+ restore_flags(flags);
+ printk (KERN_ALERT "target %d already doing SDTR\n", target);
+ return -1;
+ }
+ hostdata->initiate_sdtr |= (1 << target);
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * Function : request_disconnect (int host, int on_or_off)
+ *
+ * Purpose : KGDB support function, tells us to allow or disallow
+ * disconnections.
+ *
+ * Inputs : host - number of SCSI host; on_or_off - non-zero to allow,
+ * zero to disallow.
+ *
+ * Returns : 0 on success, * -1 on failure.
+ */
+
+static int
+request_disconnect (int host, int on_or_off) {
+ struct Scsi_Host *h;
+ struct NCR53c7x0_hostdata *hostdata;
+ if (!(h = find_host (host)))
+ return -1;
+ hostdata = (struct NCR53c7x0_hostdata *) h->hostdata;
+ if (on_or_off)
+ hostdata->options |= OPTION_DISCONNECT;
+ else
+ hostdata->options &= ~OPTION_DISCONNECT;
+ return 0;
+}
+
+/*
+ * Function : static void NCR53c7x0_driver_init (struct Scsi_Host *host)
+ *
+ * Purpose : Initialize internal structures, as required on startup, or
+ * after a SCSI bus reset.
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ */
+
+static void
+NCR53c7x0_driver_init (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int i, j;
+ u32 *current;
+ for (i = 0; i < 16; ++i) {
+ hostdata->request_sense[i] = 0;
+ for (j = 0; j < 8; ++j)
+ hostdata->busy[i][j] = 0;
+ set_synchronous (host, i, /* sxfer */ 0, hostdata->saved_scntl3, 0);
+ }
+ hostdata->issue_queue = NULL;
+ hostdata->running_list = hostdata->finished_queue =
+ hostdata->current = NULL;
+ for (i = 0, current = (u32 *) hostdata->schedule;
+ i < host->can_queue; ++i, current += 2) {
+ current[0] = hostdata->NOP_insn;
+ current[1] = 0xdeadbeef;
+ }
+ current[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) | DBC_TCI_TRUE;
+ current[1] = (u32) virt_to_bus (hostdata->script) +
+ hostdata->E_wait_reselect;
+ hostdata->reconnect_dsa_head = 0;
+ hostdata->addr_reconnect_dsa_head = (u32)
+ virt_to_bus((void *) &(hostdata->reconnect_dsa_head));
+ hostdata->expecting_iid = 0;
+ hostdata->expecting_sto = 0;
+ if (hostdata->options & OPTION_ALWAYS_SYNCHRONOUS)
+ hostdata->initiate_sdtr = 0xffff;
+ else
+ hostdata->initiate_sdtr = 0;
+ hostdata->talked_to = 0;
+ hostdata->idle = 1;
+}
+
+/*
+ * Function : static int ccf_to_clock (int ccf)
+ *
+ * Purpose : Return the largest SCSI clock allowable for a given
+ * clock conversion factor, allowing us to do synchronous periods
+ * when we don't know what the SCSI clock is by taking at least
+ * as long as the device says we can.
+ *
+ * Inputs : ccf
+ *
+ * Returns : clock on success, -1 on failure.
+ */
+
+static int
+ccf_to_clock (int ccf) {
+ switch (ccf) {
+ case 1: return 25000000; /* Divide by 1.0 */
+ case 2: return 37500000; /* Divide by 1.5 */
+ case 3: return 50000000; /* Divide by 2.0 */
+ case 0: /* Divide by 3.0 */
+ case 4: return 66000000;
+ default: return -1;
+ }
+}
+
+/*
+ * Function : static int clock_to_ccf (int clock)
+ *
+ * Purpose : Return the clock conversion factor for a given SCSI clock.
+ *
+ * Inputs : clock - SCSI clock expressed in Hz.
+ *
+ * Returns : ccf on success, -1 on failure.
+ */
+
+static int
+clock_to_ccf (int clock) {
+ if (clock < 16666666)
+ return -1;
+ if (clock < 25000000)
+ return 1; /* Divide by 1.0 */
+ else if (clock < 37500000)
+ return 2; /* Divide by 1.5 */
+ else if (clock < 50000000)
+ return 3; /* Divide by 2.0 */
+ else if (clock < 66000000)
+ return 4; /* Divide by 3.0 */
+ else
+ return -1;
+}
+
+/*
+ * Function : static int NCR53c7x0_init (struct Scsi_Host *host)
+ *
+ * Purpose : initialize the internal structures for a given SCSI host
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ *
+ * Preconditions : when this function is called, the chip_type
+ * field of the hostdata structure MUST have been set.
+ *
+ * Returns : 0 on success, -1 on failure.
+ */
+
+static int
+NCR53c7x0_init (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ int i, ccf, expected_ccf;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct Scsi_Host *search;
+ /*
+ * There are some things which we need to know about in order to provide
+ * a semblance of support. Print 'em if they aren't what we expect,
+ * otherwise don't add to the noise.
+ *
+ * -1 means we don't know what to expect.
+ */
+ int expected_id = -1;
+ int expected_clock = -1;
+ int uninitialized = 0;
+ /*
+ * FIXME : this is only on Intel boxes. On other platforms, this
+ * will differ.
+ */
+ int expected_mapping = OPTION_IO_MAPPED;
+ NCR53c7x0_local_setup(host);
+
+ switch (hostdata->chip) {
+ case 820:
+ case 825:
+#ifdef notyet
+ host->max_id = 15;
+#endif
+ /* Fall through */
+ case 810:
+ case 815:
+ hostdata->dstat_sir_intr = NCR53c8x0_dstat_sir_intr;
+ hostdata->init_save_regs = NULL;
+ hostdata->dsa_fixup = NCR53c8xx_dsa_fixup;
+ hostdata->init_fixup = NCR53c8x0_init_fixup;
+ hostdata->soft_reset = NCR53c8x0_soft_reset;
+ hostdata->run_tests = NCR53c8xx_run_tests;
+/* Is the SCSI clock ever anything else on these chips? */
+ expected_clock = hostdata->scsi_clock = 40000000;
+ expected_id = 7;
+ break;
+ default:
+ printk ("scsi%d : chip type of %d is not supported yet, detaching.\n",
+ host->host_no, hostdata->chip);
+ scsi_unregister (host);
+ return -1;
+ }
+
+ /* Assign constants accessed by NCR */
+ hostdata->NCR53c7xx_zero = 0;
+ hostdata->NCR53c7xx_msg_reject = MESSAGE_REJECT;
+ hostdata->NCR53c7xx_msg_abort = ABORT;
+ hostdata->NCR53c7xx_msg_nop = NOP;
+ hostdata->NOP_insn = (DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24;
+
+ if (expected_mapping == -1 ||
+ (hostdata->options & (OPTION_MEMORY_MAPPED)) !=
+ (expected_mapping & OPTION_MEMORY_MAPPED))
+ printk ("scsi%d : using %s mapped access\n", host->host_no,
+ (hostdata->options & OPTION_MEMORY_MAPPED) ? "memory" :
+ "io");
+
+ hostdata->dmode = (hostdata->chip == 700 || hostdata->chip == 70066) ?
+ DMODE_REG_00 : DMODE_REG_10;
+ hostdata->istat = ((hostdata->chip / 100) == 8) ?
+ ISTAT_REG_800 : ISTAT_REG_700;
+
+/* Only the ISTAT register is readable when the NCR is running, so make
+ sure it's halted. */
+ ncr_halt(host);
+
+/*
+ * XXX - the NCR53c700 uses bitfielded registers for SCID, SDID, etc,
+ * as does the 710 with one bit per SCSI ID. Conversely, the NCR
+ * uses a normal, 3 bit binary representation of these values.
+ *
+ * Get the rest of the NCR documentation, and FIND OUT where the change
+ * was.
+ */
+#if 0
+ tmp = hostdata->this_id_mask = NCR53c7x0_read8(SCID_REG);
+ for (host->this_id = 0; tmp != 1; tmp >>=1, ++host->this_id);
+#else
+ host->this_id = NCR53c7x0_read8(SCID_REG) & 15;
+ if (host->this_id == 0)
+ host->this_id = 7; /* sanitize hostid---0 doesn't make sense */
+ hostdata->this_id_mask = 1 << host->this_id;
+#endif
+
+/*
+ * Note : we should never encounter a board setup for ID0. So,
+ * if we see ID0, assume that it was uninitialized and set it
+ * to the industry standard 7.
+ */
+ if (!host->this_id) {
+ printk("scsi%d : initiator ID was %d, changing to 7\n",
+ host->host_no, host->this_id);
+ host->this_id = 7;
+ hostdata->this_id_mask = 1 << 7;
+ uninitialized = 1;
+ };
+
+ if (expected_id == -1 || host->this_id != expected_id)
+ printk("scsi%d : using initiator ID %d\n", host->host_no,
+ host->this_id);
+
+ /*
+ * Save important registers to allow a soft reset.
+ */
+
+ if ((hostdata->chip / 100) == 8) {
+ /*
+ * CTEST4 controls burst mode disable.
+ */
+ hostdata->saved_ctest4 = NCR53c7x0_read8(CTEST4_REG_800) &
+ CTEST4_800_SAVE;
+ } else {
+ /*
+ * CTEST7 controls cache snooping, burst mode, and support for
+ * external differential drivers.
+ */
+ hostdata->saved_ctest7 = NCR53c7x0_read8(CTEST7_REG) & CTEST7_SAVE;
+ }
+
+ /*
+ * On NCR53c700 series chips, DCNTL controls the SCSI clock divisor,
+ * on 800 series chips, it allows for a totem-pole IRQ driver.
+ */
+
+ hostdata->saved_dcntl = NCR53c7x0_read8(DCNTL_REG);
+
+ /*
+ * DCNTL_800_IRQM controls weather we are using an open drain
+ * driver (reset) or totem pole driver (set). In all cases,
+ * it's level active. I suppose this is an issue when we're trying to
+ * wire-or the same PCI INTx line?
+ */
+ if ((hostdata->chip / 100) == 8)
+ hostdata->saved_dcntl &= ~DCNTL_800_IRQM;
+
+ /*
+ * DMODE controls DMA burst length, and on 700 series chips,
+ * 286 mode and bus width
+ */
+ hostdata->saved_dmode = NCR53c7x0_read8(hostdata->dmode);
+
+ /*
+ * Now that burst length and enabled/disabled status is known,
+ * clue the user in on it.
+ */
+
+ if ((hostdata->chip / 100) == 8) {
+ if (hostdata->saved_ctest4 & CTEST4_800_BDIS) {
+ printk ("scsi%d : burst mode disabled\n", host->host_no);
+ } else {
+ switch (hostdata->saved_dmode & DMODE_BL_MASK) {
+ case DMODE_BL_2: i = 2; break;
+ case DMODE_BL_4: i = 4; break;
+ case DMODE_BL_8: i = 8; break;
+ case DMODE_BL_16: i = 16; break;
+ default: i = 0;
+ }
+ printk ("scsi%d : burst length %d\n", host->host_no, i);
+ }
+ }
+
+ /*
+ * On NCR53c810 and NCR53c820 chips, SCNTL3 contails the synchronous
+ * and normal clock conversion factors.
+ */
+ if (hostdata->chip / 100 == 8) {
+ expected_ccf = clock_to_ccf (expected_clock);
+ hostdata->saved_scntl3 = NCR53c7x0_read8(SCNTL3_REG_800);
+ ccf = hostdata->saved_scntl3 & SCNTL3_800_CCF_MASK;
+ if (expected_ccf != -1 && ccf != expected_ccf && !ccf) {
+ hostdata->saved_scntl3 = (hostdata->saved_scntl3 &
+ ~SCNTL3_800_CCF_MASK) | expected_ccf;
+ if (!uninitialized) {
+ printk ("scsi%d : reset ccf to %d from %d\n",
+ host->host_no, expected_ccf, ccf);
+ uninitialized = 1;
+ }
+ }
+ } else
+ ccf = 0;
+
+ /*
+ * If we don't have a SCSI clock programmed, pick one on the upper
+ * bound of that allowed by NCR so that our transfers err on the
+ * slow side, since transfer period must be >= the agreed
+ * upon period.
+ */
+
+ if ((!hostdata->scsi_clock) && (hostdata->scsi_clock = ccf_to_clock (ccf))
+ == -1) {
+ printk ("scsi%d : clock conversion factor %d unknown.\n"
+ " synchronous transfers disabled\n",
+ host->host_no, ccf);
+ hostdata->options &= ~OPTION_SYNCHRONOUS;
+ hostdata->scsi_clock = 0;
+ }
+
+ if (expected_clock == -1 || hostdata->scsi_clock != expected_clock)
+ printk ("scsi%d : using %dMHz SCSI clock\n", host->host_no,
+ hostdata->scsi_clock / 1000000);
+
+ for (i = 0; i < 16; ++i)
+ hostdata->cmd_allocated[i] = 0;
+
+ if (hostdata->init_save_regs)
+ hostdata->init_save_regs (host);
+ if (hostdata->init_fixup)
+ hostdata->init_fixup (host);
+
+ if (!the_template) {
+ the_template = host->hostt;
+ first_host = host;
+ }
+
+ /*
+ * Linux SCSI drivers have always been plagued with initialization
+ * problems - some didn't work with the BIOS disabled since they expected
+ * initialization from it, some didn't work when the networking code
+ * was enabled and registers got scrambled, etc.
+ *
+ * To avoid problems like this, in the future, we will do a soft
+ * reset on the SCSI chip, taking it back to a sane state.
+ */
+
+ hostdata->soft_reset (host);
+
+#if 1
+ hostdata->debug_count_limit = -1;
+#else
+ hostdata->debug_count_limit = 1;
+#endif
+ hostdata->intrs = -1;
+ hostdata->resets = -1;
+ memcpy ((void *) hostdata->synchronous_want, (void *) sdtr_message,
+ sizeof (hostdata->synchronous_want));
+
+ NCR53c7x0_driver_init (host);
+
+ /*
+ * Set up an interrupt handler if we aren't already sharing an IRQ
+ * with another board.
+ */
+
+ for (search = first_host; search && !(search->hostt == the_template &&
+ search->irq == host->irq && search != host); search=search->next);
+
+ if (!search) {
+ if (request_irq(host->irq, NCR53c7x0_intr, SA_INTERRUPT, "53c7,8xx", NULL)) {
+ printk("scsi%d : IRQ%d not free, detaching\n"
+ " You have either a configuration problem, or a\n"
+ " broken BIOS. You may wish to manually assign\n"
+ " an interrupt to the NCR board rather than using\n"
+ " an automatic setting.\n",
+ host->host_no, host->irq);
+ scsi_unregister (host);
+ return -1;
+ }
+ } else {
+ printk("scsi%d : using interrupt handler previously installed for scsi%d\n",
+ host->host_no, search->host_no);
+ }
+
+
+ if ((hostdata->run_tests && hostdata->run_tests(host) == -1) ||
+ (hostdata->options & OPTION_DEBUG_TESTS_ONLY)) {
+ /* XXX Should disable interrupts, etc. here */
+ scsi_unregister (host);
+ return -1;
+ } else {
+ if (host->io_port) {
+ host->n_io_port = 128;
+ request_region (host->io_port, host->n_io_port, "ncr53c7,8xx");
+ }
+ }
+
+ if (NCR53c7x0_read8 (SBCL_REG) & SBCL_BSY) {
+ printk ("scsi%d : bus wedge, doing SCSI reset\n", host->host_no);
+ hard_reset (host);
+ }
+ return 0;
+}
+
+/*
+ * Function : static int normal_init(Scsi_Host_Template *tpnt, int board,
+ * int chip, u32 base, int io_port, int irq, int dma, int pcivalid,
+ * unsigned char pci_bus, unsigned char pci_device_fn,
+ * long long options);
+ *
+ * Purpose : initializes a NCR53c7,8x0 based on base addresses,
+ * IRQ, and DMA channel.
+ *
+ * Useful where a new NCR chip is backwards compatible with
+ * a supported chip, but the DEVICE ID has changed so it
+ * doesn't show up when the autoprobe does a pcibios_find_device.
+ *
+ * Inputs : tpnt - Template for this SCSI adapter, board - board level
+ * product, chip - 810, 820, or 825, bus - PCI bus, device_fn -
+ * device and function encoding as used by PCI BIOS calls.
+ *
+ * Returns : 0 on success, -1 on failure.
+ *
+ */
+
+static int
+normal_init (Scsi_Host_Template *tpnt, int board, int chip,
+ u32 base, int io_port, int irq, int dma, int pci_valid,
+ unsigned char pci_bus, unsigned char pci_device_fn, long long options) {
+ struct Scsi_Host *instance;
+ struct NCR53c7x0_hostdata *hostdata;
+ char chip_str[80];
+ int script_len = 0, dsa_len = 0, size = 0, max_cmd_size = 0,
+ schedule_size = 0, ok = 0;
+ void *tmp;
+
+ options |= perm_options;
+
+ switch (chip) {
+ case 825:
+ case 820:
+ case 815:
+ case 810:
+ schedule_size = (tpnt->can_queue + 1) * 8 /* JUMP instruction size */;
+ script_len = NCR53c8xx_script_len;
+ dsa_len = NCR53c8xx_dsa_len;
+ options |= OPTION_INTFLY;
+ sprintf (chip_str, "NCR53c%d", chip);
+ break;
+ default:
+ printk("scsi-ncr53c7,8xx : unsupported SCSI chip %d\n", chip);
+ return -1;
+ }
+
+ printk("scsi-ncr53c7,8xx : %s at memory 0x%x, io 0x%x, irq %d",
+ chip_str, (unsigned) base, io_port, irq);
+ if (dma == DMA_NONE)
+ printk("\n");
+ else
+ printk(", dma %d\n", dma);
+
+ if ((chip / 100 == 8) && !pci_valid)
+ printk ("scsi-ncr53c7,8xx : for better reliability and performance, please use the\n"
+ " PCI override instead.\n"
+ " Syntax : ncr53c8{10,15,20,25}=pci,<bus>,<device>,<function>\n"
+ " <bus> and <device> are usually 0.\n");
+
+ if (options & OPTION_DEBUG_PROBE_ONLY) {
+ printk ("scsi-ncr53c7,8xx : probe only enabled, aborting initialization\n");
+ return -1;
+ }
+
+ max_cmd_size = sizeof(struct NCR53c7x0_cmd) + dsa_len +
+ /* Size of dynamic part of command structure : */
+ 2 * /* Worst case : we don't know if we need DATA IN or DATA out */
+ ( 2 * /* Current instructions per scatter/gather segment */
+ tpnt->sg_tablesize +
+ 3 /* Current startup / termination required per phase */
+ ) *
+ 8 /* Each instruction is eight bytes */;
+
+ /* Allocate fixed part of hostdata, dynamic part to hold appropriate
+ SCSI SCRIPT(tm) plus a single, maximum-sized NCR53c7x0_cmd structure.
+
+ We need a NCR53c7x0_cmd structure for scan_scsis() when we are
+ not loaded as a module, and when we're loaded as a module, we
+ can't use a non-dynamically allocated structure because modules
+ are vmalloc()'d, which can allow structures to cross page
+ boundaries and breaks our physical/virtual address assumptions
+ for DMA.
+
+ So, we stick it past the end of our hostdata structure.
+
+ ASSUMPTION :
+ Regardless of how many simultaneous SCSI commands we allow,
+ the probe code only executes a _single_ instruction at a time,
+ so we only need one here, and don't need to allocate NCR53c7x0_cmd
+ structures for each target until we are no longer in scan_scsis
+ and kmalloc() has become functional (memory_init() happens
+ after all device driver initialization).
+ */
+
+ size = sizeof(struct NCR53c7x0_hostdata) + script_len +
+ /* Note that alignment will be guaranteed, since we put the command
+ allocated at probe time after the fixed-up SCSI script, which
+ consists of 32 bit words, aligned on a 32 bit boundary. But
+ on a 64bit machine we need 8 byte alignment for hostdata->free, so
+ we add in another 4 bytes to take care of potential misalignment
+ */
+ (sizeof(void *) - sizeof(u32)) + max_cmd_size + schedule_size;
+
+ instance = scsi_register (tpnt, size);
+ if (!instance)
+ return -1;
+
+ /* FIXME : if we ever support an ISA NCR53c7xx based board, we
+ need to check if the chip is running in a 16 bit mode, and if so
+ unregister it if it is past the 16M (0x1000000) mark */
+
+ hostdata = (struct NCR53c7x0_hostdata *)
+ instance->hostdata;
+ hostdata->size = size;
+ hostdata->script_count = script_len / sizeof(u32);
+ hostdata = (struct NCR53c7x0_hostdata *) instance->hostdata;
+ hostdata->board = board;
+ hostdata->chip = chip;
+ if ((hostdata->pci_valid = pci_valid)) {
+ hostdata->pci_bus = pci_bus;
+ hostdata->pci_device_fn = pci_device_fn;
+ }
+
+ /*
+ * Being memory mapped is more desirable, since
+ *
+ * - Memory accesses may be faster.
+ *
+ * - The destination and source address spaces are the same for
+ * all instructions, meaning we don't have to twiddle dmode or
+ * any other registers.
+ *
+ * So, we try for memory mapped, and if we don't get it,
+ * we go for port mapped, and that failing we tell the user
+ * it can't work.
+ */
+
+ if (base) {
+ instance->base = (unsigned char *) (unsigned long) base;
+ /* Check for forced I/O mapping */
+ if (!(options & OPTION_IO_MAPPED)) {
+ options |= OPTION_MEMORY_MAPPED;
+ ok = 1;
+ }
+ } else {
+ options &= ~OPTION_MEMORY_MAPPED;
+ }
+
+ if (io_port) {
+ instance->io_port = io_port;
+ options |= OPTION_IO_MAPPED;
+ ok = 1;
+ } else {
+ options &= ~OPTION_IO_MAPPED;
+ }
+
+ if (!ok) {
+ printk ("scsi%d : not initializing, no I/O or memory mapping known \n",
+ instance->host_no);
+ scsi_unregister (instance);
+ return -1;
+ }
+ instance->irq = irq;
+ instance->dma_channel = dma;
+
+ hostdata->options = options;
+ hostdata->dsa_len = dsa_len;
+ hostdata->max_cmd_size = max_cmd_size;
+ hostdata->num_cmds = 1;
+ /* Initialize single command */
+ tmp = (hostdata->script + hostdata->script_count);
+ hostdata->free = ROUNDUP(tmp, void *);
+ hostdata->free->real = tmp;
+ hostdata->free->size = max_cmd_size;
+ hostdata->free->free = NULL;
+ hostdata->free->next = NULL;
+ hostdata->extra_allocate = 0;
+
+ /* Allocate command start code space */
+ hostdata->schedule = (chip == 700 || chip == 70066) ?
+ NULL : (u32 *) ((char *)hostdata->free + max_cmd_size);
+
+/*
+ * For diagnostic purposes, we don't really care how fast things blaze.
+ * For profiling, we want to access the 800ns resolution system clock,
+ * using a 'C' call on the host processor.
+ *
+ * Therefore, there's no need for the NCR chip to directly manipulate
+ * this data, and we should put it wherever is most convenient for
+ * Linux.
+ */
+ if (track_events)
+ hostdata->events = (struct NCR53c7x0_event *) (track_events ?
+ vmalloc (sizeof (struct NCR53c7x0_event) * track_events) : NULL);
+ else
+ hostdata->events = NULL;
+
+ if (hostdata->events) {
+ memset ((void *) hostdata->events, 0, sizeof(struct NCR53c7x0_event) *
+ track_events);
+ hostdata->event_size = track_events;
+ hostdata->event_index = 0;
+ } else
+ hostdata->event_size = 0;
+
+ return NCR53c7x0_init(instance);
+}
+
+
+/*
+ * Function : static int ncr_pci_init(Scsi_Host_Template *tpnt, int board,
+ * int chip, int bus, int device_fn, long long options)
+ *
+ * Purpose : initializes a NCR53c800 family based on the PCI
+ * bus, device, and function location of it. Allows
+ * reprogramming of latency timer and determining addresses
+ * and whether bus mastering, etc. are OK.
+ *
+ * Useful where a new NCR chip is backwards compatible with
+ * a supported chip, but the DEVICE ID has changed so it
+ * doesn't show up when the autoprobe does a pcibios_find_device.
+ *
+ * Inputs : tpnt - Template for this SCSI adapter, board - board level
+ * product, chip - 810, 820, or 825, bus - PCI bus, device_fn -
+ * device and function encoding as used by PCI BIOS calls.
+ *
+ * Returns : 0 on success, -1 on failure.
+ *
+ */
+
+static int
+ncr_pci_init (Scsi_Host_Template *tpnt, int board, int chip,
+ unsigned char bus, unsigned char device_fn, long long options) {
+ unsigned short vendor_id, device_id, command;
+#ifdef LINUX_1_2
+ unsigned long
+#else
+ unsigned int
+#endif
+ base, io_port;
+ unsigned char irq, revision;
+ int error, expected_chip;
+ int expected_id = -1, max_revision = -1, min_revision = -1;
+ int i;
+
+ printk("scsi-ncr53c7,8xx : at PCI bus %d, device %d, function %d\n",
+ bus, (int) (device_fn & 0xf8) >> 3,
+ (int) device_fn & 7);
+
+ if (!pcibios_present()) {
+ printk("scsi-ncr53c7,8xx : not initializing due to lack of PCI BIOS,\n"
+ " try using memory, port, irq override instead.\n");
+ return -1;
+ }
+
+ if ((error = pcibios_read_config_word (bus, device_fn, PCI_VENDOR_ID,
+ &vendor_id)) ||
+ (error = pcibios_read_config_word (bus, device_fn, PCI_DEVICE_ID,
+ &device_id)) ||
+ (error = pcibios_read_config_word (bus, device_fn, PCI_COMMAND,
+ &command)) ||
+ (error = pcibios_read_config_dword (bus, device_fn,
+ PCI_BASE_ADDRESS_0, &io_port)) ||
+ (error = pcibios_read_config_dword (bus, device_fn,
+ PCI_BASE_ADDRESS_1, &base)) ||
+ (error = pcibios_read_config_byte (bus, device_fn, PCI_CLASS_REVISION,
+ &revision)) ||
+ (error = pcibios_read_config_byte (bus, device_fn, PCI_INTERRUPT_LINE,
+ &irq))) {
+ printk ("scsi-ncr53c7,8xx : error %s not initializing due to error reading configuration space\n"
+ " perhaps you specified an incorrect PCI bus, device, or function.\n"
+ , pcibios_strerror(error));
+ return -1;
+ }
+
+ /* If any one ever clones the NCR chips, this will have to change */
+
+ if (vendor_id != PCI_VENDOR_ID_NCR) {
+ printk ("scsi-ncr53c7,8xx : not initializing, 0x%04x is not NCR vendor ID\n",
+ (int) vendor_id);
+ return -1;
+ }
+
+
+ /*
+ * Bit 0 is the address space indicator and must be one for I/O
+ * space mappings, bit 1 is reserved, discard them after checking
+ * that they have the correct value of 1.
+ */
+
+ if (command & PCI_COMMAND_IO) {
+ if ((io_port & 3) != 1) {
+ printk ("scsi-ncr53c7,8xx : disabling I/O mapping since base address 0 (0x%x)\n"
+ " bits 0..1 indicate a non-IO mapping\n",
+ (unsigned) io_port);
+ io_port = 0;
+ } else
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ } else {
+ io_port = 0;
+ }
+
+ if (command & PCI_COMMAND_MEMORY) {
+ if ((base & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
+ printk("scsi-ncr53c7,8xx : disabling memory mapping since base address 1\n"
+ " contains a non-memory mapping\n");
+ base = 0;
+ } else
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ } else {
+ base = 0;
+ }
+
+ if (!io_port && !base) {
+ printk ("scsi-ncr53c7,8xx : not initializing, both I/O and memory mappings disabled\n");
+ return -1;
+ }
+
+ if (!(command & PCI_COMMAND_MASTER)) {
+ printk ("scsi-ncr53c7,8xx : not initializing, BUS MASTERING was disabled\n");
+ return -1;
+ }
+
+ for (i = 0; i < NPCI_CHIP_IDS; ++i) {
+ if (device_id == pci_chip_ids[i].pci_device_id) {
+ max_revision = pci_chip_ids[i].max_revision;
+ min_revision = pci_chip_ids[i].min_revision;
+ expected_chip = pci_chip_ids[i].chip;
+ }
+ if (chip == pci_chip_ids[i].chip)
+ expected_id = pci_chip_ids[i].pci_device_id;
+ }
+
+ if (chip && device_id != expected_id)
+ printk ("scsi-ncr53c7,8xx : warning : device id of 0x%04x doesn't\n"
+ " match expected 0x%04x\n",
+ (unsigned int) device_id, (unsigned int) expected_id );
+
+ if (max_revision != -1 && revision > max_revision)
+ printk ("scsi-ncr53c7,8xx : warning : revision of %d is greater than %d.\n",
+ (int) revision, max_revision);
+ else if (min_revision != -1 && revision < min_revision)
+ printk ("scsi-ncr53c7,8xx : warning : revision of %d is less than %d.\n",
+ (int) revision, min_revision);
+
+ if (io_port && check_region (io_port, 128)) {
+ printk ("scsi-ncr53c7,8xx : IO region 0x%x to 0x%x is in use\n",
+ (unsigned) io_port, (unsigned) io_port + 127);
+ return -1;
+ }
+
+ return normal_init (tpnt, board, chip, (int) base, io_port,
+ (int) irq, DMA_NONE, 1, bus, device_fn, options);
+}
+
+
+/*
+ * Function : int NCR53c7xx_detect(Scsi_Host_Template *tpnt)
+ *
+ * Purpose : detects and initializes NCR53c7,8x0 SCSI chips
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter
+ *
+ * Returns : number of host adapters detected
+ *
+ */
+
+int
+NCR53c7xx_detect(Scsi_Host_Template *tpnt) {
+ int i;
+ int current_override;
+ int count; /* Number of boards detected */
+ unsigned char pci_bus, pci_device_fn;
+ static short pci_index=0; /* Device index to PCI BIOS calls */
+
+#ifndef LINUX_1_2
+ tpnt->proc_dir = &proc_scsi_ncr53c7xx;
+#endif
+
+ for (current_override = count = 0; current_override < OVERRIDE_LIMIT;
+ ++current_override) {
+ if (overrides[current_override].pci ?
+ !ncr_pci_init (tpnt, overrides[current_override].board,
+ overrides[current_override].chip,
+ (unsigned char) overrides[current_override].data.pci.bus,
+ (((overrides[current_override].data.pci.device
+ << 3) & 0xf8)|(overrides[current_override].data.pci.function &
+ 7)), overrides[current_override].options):
+ !normal_init (tpnt, overrides[current_override].board,
+ overrides[current_override].chip,
+ overrides[current_override].data.normal.base,
+ overrides[current_override].data.normal.io_port,
+ overrides[current_override].data.normal.irq,
+ overrides[current_override].data.normal.dma,
+ 0 /* PCI data invalid */, 0 /* PCI bus place holder */,
+ 0 /* PCI device_function place holder */,
+ overrides[current_override].options)) {
+ ++count;
+ }
+ }
+
+ if (pcibios_present()) {
+ for (i = 0; i < NPCI_CHIP_IDS; ++i)
+ for (pci_index = 0;
+ !pcibios_find_device (PCI_VENDOR_ID_NCR,
+ pci_chip_ids[i].pci_device_id, pci_index, &pci_bus,
+ &pci_device_fn);
+ ++pci_index)
+ if (!ncr_pci_init (tpnt, BOARD_GENERIC, pci_chip_ids[i].chip,
+ pci_bus, pci_device_fn, /* no options */ 0))
+ ++count;
+ }
+ return count;
+}
+
+/* NCR53c810 and NCR53c820 script handling code */
+
+#include "53c8xx_d.h"
+#ifdef A_int_debug_sync
+#define DEBUG_SYNC_INTR A_int_debug_sync
+#endif
+static int NCR53c8xx_script_len = sizeof (SCRIPT);
+static int NCR53c8xx_dsa_len = A_dsa_end + Ent_dsa_zero - Ent_dsa_code_template;
+
+/*
+ * Function : static void NCR53c8x0_init_fixup (struct Scsi_Host *host)
+ *
+ * Purpose : copy and fixup the SCSI SCRIPTS(tm) code for this device.
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ *
+ */
+
+static void
+NCR53c8x0_init_fixup (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned char tmp;
+ int i, ncr_to_memory, memory_to_ncr;
+ u32 base;
+ NCR53c7x0_local_setup(host);
+
+
+ /* XXX - NOTE : this code MUST be made endian aware */
+ /* Copy code into buffer that was allocated at detection time. */
+ memcpy ((void *) hostdata->script, (void *) SCRIPT,
+ sizeof(SCRIPT));
+ /* Fixup labels */
+ for (i = 0; i < PATCHES; ++i)
+ hostdata->script[LABELPATCHES[i]] +=
+ virt_to_bus(hostdata->script);
+ /* Fixup addresses of constants that used to be EXTERNAL */
+
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_abort,
+ virt_to_bus(&(hostdata->NCR53c7xx_msg_abort)));
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_reject,
+ virt_to_bus(&(hostdata->NCR53c7xx_msg_reject)));
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_zero,
+ virt_to_bus(&(hostdata->NCR53c7xx_zero)));
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_sink,
+ virt_to_bus(&(hostdata->NCR53c7xx_sink)));
+ patch_abs_32 (hostdata->script, 0, NOP_insn,
+ virt_to_bus(&(hostdata->NOP_insn)));
+ patch_abs_32 (hostdata->script, 0, schedule,
+ virt_to_bus((void *) hostdata->schedule));
+
+ /* Fixup references to external variables: */
+ for (i = 0; i < EXTERNAL_PATCHES_LEN; ++i)
+ hostdata->script[EXTERNAL_PATCHES[i].offset] +=
+ virt_to_bus(EXTERNAL_PATCHES[i].address);
+
+ /*
+ * Fixup absolutes set at boot-time.
+ *
+ * All non-code absolute variables suffixed with "dsa_" and "int_"
+ * are constants, and need no fixup provided the assembler has done
+ * it for us (I don't know what the "real" NCR assembler does in
+ * this case, my assembler does the right magic).
+ */
+
+ patch_abs_rwri_data (hostdata->script, 0, dsa_save_data_pointer,
+ Ent_dsa_code_save_data_pointer - Ent_dsa_zero);
+ patch_abs_rwri_data (hostdata->script, 0, dsa_restore_pointers,
+ Ent_dsa_code_restore_pointers - Ent_dsa_zero);
+ patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
+ Ent_dsa_code_check_reselect - Ent_dsa_zero);
+
+ /*
+ * Just for the hell of it, preserve the settings of
+ * Burst Length and Enable Read Line bits from the DMODE
+ * register. Make sure SCRIPTS start automagically.
+ */
+
+ tmp = NCR53c7x0_read8(DMODE_REG_10);
+ tmp &= (DMODE_800_ERL | DMODE_BL_MASK);
+
+ if (!(hostdata->options & OPTION_MEMORY_MAPPED)) {
+ base = (u32) host->io_port;
+ memory_to_ncr = tmp|DMODE_800_DIOM;
+ ncr_to_memory = tmp|DMODE_800_SIOM;
+ } else {
+ base = virt_to_bus(host->base);
+ memory_to_ncr = ncr_to_memory = tmp;
+ }
+
+ patch_abs_32 (hostdata->script, 0, addr_scratch, base + SCRATCHA_REG_800);
+ patch_abs_32 (hostdata->script, 0, addr_temp, base + TEMP_REG);
+
+ /*
+ * I needed some variables in the script to be accessible to
+ * both the NCR chip and the host processor. For these variables,
+ * I made the arbitrary decision to store them directly in the
+ * hostdata structure rather than in the RELATIVE area of the
+ * SCRIPTS.
+ */
+
+
+ patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_memory, tmp);
+ patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_ncr, memory_to_ncr);
+ patch_abs_rwri_data (hostdata->script, 0, dmode_ncr_to_memory, ncr_to_memory);
+
+ patch_abs_32 (hostdata->script, 0, msg_buf,
+ virt_to_bus((void *)&(hostdata->msg_buf)));
+ patch_abs_32 (hostdata->script, 0, reconnect_dsa_head,
+ virt_to_bus((void *)&(hostdata->reconnect_dsa_head)));
+ patch_abs_32 (hostdata->script, 0, addr_reconnect_dsa_head,
+ virt_to_bus((void *)&(hostdata->addr_reconnect_dsa_head)));
+ patch_abs_32 (hostdata->script, 0, reselected_identify,
+ virt_to_bus((void *)&(hostdata->reselected_identify)));
+/* reselected_tag is currently unused */
+#if 0
+ patch_abs_32 (hostdata->script, 0, reselected_tag,
+ virt_to_bus((void *)&(hostdata->reselected_tag)));
+#endif
+
+ patch_abs_32 (hostdata->script, 0, test_dest,
+ virt_to_bus((void*)&hostdata->test_dest));
+ patch_abs_32 (hostdata->script, 0, test_src,
+ virt_to_bus(&hostdata->test_source));
+
+ patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
+ (unsigned char)(Ent_dsa_code_check_reselect - Ent_dsa_zero));
+
+/* These are for event logging; the ncr_event enum contains the
+ actual interrupt numbers. */
+#ifdef A_int_EVENT_SELECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT, (u32) EVENT_SELECT);
+#endif
+#ifdef A_int_EVENT_DISCONNECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_DISCONNECT, (u32) EVENT_DISCONNECT);
+#endif
+#ifdef A_int_EVENT_RESELECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT, (u32) EVENT_RESELECT);
+#endif
+#ifdef A_int_EVENT_COMPLETE
+ patch_abs_32 (hostdata->script, 0, int_EVENT_COMPLETE, (u32) EVENT_COMPLETE);
+#endif
+#ifdef A_int_EVENT_IDLE
+ patch_abs_32 (hostdata->script, 0, int_EVENT_IDLE, (u32) EVENT_IDLE);
+#endif
+#ifdef A_int_EVENT_SELECT_FAILED
+ patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT_FAILED,
+ (u32) EVENT_SELECT_FAILED);
+#endif
+#ifdef A_int_EVENT_BEFORE_SELECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_BEFORE_SELECT,
+ (u32) EVENT_BEFORE_SELECT);
+#endif
+#ifdef A_int_EVENT_RESELECT_FAILED
+ patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT_FAILED,
+ (u32) EVENT_RESELECT_FAILED);
+#endif
+
+ /*
+ * Make sure the NCR and Linux code agree on the location of
+ * certain fields.
+ */
+
+ hostdata->E_accept_message = Ent_accept_message;
+ hostdata->E_command_complete = Ent_command_complete;
+ hostdata->E_cmdout_cmdout = Ent_cmdout_cmdout;
+ hostdata->E_data_transfer = Ent_data_transfer;
+ hostdata->E_debug_break = Ent_debug_break;
+ hostdata->E_dsa_code_template = Ent_dsa_code_template;
+ hostdata->E_dsa_code_template_end = Ent_dsa_code_template_end;
+ hostdata->E_end_data_transfer = Ent_end_data_transfer;
+ hostdata->E_initiator_abort = Ent_initiator_abort;
+ hostdata->E_msg_in = Ent_msg_in;
+ hostdata->E_other_transfer = Ent_other_transfer;
+ hostdata->E_other_in = Ent_other_in;
+ hostdata->E_other_out = Ent_other_out;
+ hostdata->E_reject_message = Ent_reject_message;
+ hostdata->E_respond_message = Ent_respond_message;
+ hostdata->E_select = Ent_select;
+ hostdata->E_select_msgout = Ent_select_msgout;
+ hostdata->E_target_abort = Ent_target_abort;
+#ifdef Ent_test_0
+ hostdata->E_test_0 = Ent_test_0;
+#endif
+ hostdata->E_test_1 = Ent_test_1;
+ hostdata->E_test_2 = Ent_test_2;
+#ifdef Ent_test_3
+ hostdata->E_test_3 = Ent_test_3;
+#endif
+ hostdata->E_wait_reselect = Ent_wait_reselect;
+ hostdata->E_dsa_code_begin = Ent_dsa_code_begin;
+
+ hostdata->dsa_cmdout = A_dsa_cmdout;
+ hostdata->dsa_cmnd = A_dsa_cmnd;
+ hostdata->dsa_datain = A_dsa_datain;
+ hostdata->dsa_dataout = A_dsa_dataout;
+ hostdata->dsa_end = A_dsa_end;
+ hostdata->dsa_msgin = A_dsa_msgin;
+ hostdata->dsa_msgout = A_dsa_msgout;
+ hostdata->dsa_msgout_other = A_dsa_msgout_other;
+ hostdata->dsa_next = A_dsa_next;
+ hostdata->dsa_select = A_dsa_select;
+ hostdata->dsa_start = Ent_dsa_code_template - Ent_dsa_zero;
+ hostdata->dsa_status = A_dsa_status;
+ hostdata->dsa_jump_dest = Ent_dsa_code_fix_jump - Ent_dsa_zero +
+ 8 /* destination operand */;
+
+ /* sanity check */
+ if (A_dsa_fields_start != Ent_dsa_code_template_end -
+ Ent_dsa_zero)
+ printk("scsi%d : NCR dsa_fields start is %d not %d\n",
+ host->host_no, A_dsa_fields_start, Ent_dsa_code_template_end -
+ Ent_dsa_zero);
+
+ printk("scsi%d : NCR code relocated to 0x%lx (virt 0x%p)\n", host->host_no,
+ virt_to_bus(hostdata->script), hostdata->script);
+}
+
+/*
+ * Function : static int NCR53c8xx_run_tests (struct Scsi_Host *host)
+ *
+ * Purpose : run various verification tests on the NCR chip,
+ * including interrupt generation, and proper bus mastering
+ * operation.
+ *
+ * Inputs : host - a properly initialized Scsi_Host structure
+ *
+ * Preconditions : the NCR chip must be in a halted state.
+ *
+ * Returns : 0 if all tests were successful, -1 on error.
+ *
+ */
+
+static int
+NCR53c8xx_run_tests (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long timeout;
+ u32 start;
+ int failed, i;
+ unsigned long flags;
+ NCR53c7x0_local_setup(host);
+
+ /* The NCR chip _must_ be idle to run the test scripts */
+
+ save_flags(flags);
+ cli();
+ if (!hostdata->idle) {
+ printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+
+ /*
+ * Check for functional interrupts, this could work as an
+ * autoprobe routine.
+ */
+
+ if ((hostdata->options & OPTION_DEBUG_TEST1) &&
+ hostdata->state != STATE_DISABLED) {
+ hostdata->idle = 0;
+ hostdata->test_running = 1;
+ hostdata->test_completed = -1;
+ hostdata->test_dest = 0;
+ hostdata->test_source = 0xdeadbeef;
+ start = virt_to_bus (hostdata->script) + hostdata->E_test_1;
+ hostdata->state = STATE_RUNNING;
+ printk ("scsi%d : test 1", host->host_no);
+ NCR53c7x0_write32 (DSP_REG, start);
+ printk (" started\n");
+ sti();
+
+ /*
+ * This is currently a .5 second timeout, since (in theory) no slow
+ * board will take that long. In practice, we've seen one
+ * pentium which occasionally fails with this, but works with
+ * 10 times as much?
+ */
+
+ timeout = jiffies + 5 * HZ / 10;
+ while ((hostdata->test_completed == -1) && jiffies < timeout)
+ barrier();
+
+ failed = 1;
+ if (hostdata->test_completed == -1)
+ printk ("scsi%d : driver test 1 timed out%s\n",host->host_no ,
+ (hostdata->test_dest == 0xdeadbeef) ?
+ " due to lost interrupt.\n"
+ " Please verify that the correct IRQ is being used for your board,\n"
+ " and that the motherboard IRQ jumpering matches the PCI setup on\n"
+ " PCI systems.\n"
+ " If you are using a NCR53c810 board in a PCI system, you should\n"
+ " also verify that the board is jumpered to use PCI INTA, since\n"
+ " most PCI motherboards lack support for INTB, INTC, and INTD.\n"
+ : "");
+ else if (hostdata->test_completed != 1)
+ printk ("scsi%d : test 1 bad interrupt value (%d)\n",
+ host->host_no, hostdata->test_completed);
+ else
+ failed = (hostdata->test_dest != 0xdeadbeef);
+
+ if (hostdata->test_dest != 0xdeadbeef) {
+ printk ("scsi%d : driver test 1 read 0x%x instead of 0xdeadbeef indicating a\n"
+ " probable cache invalidation problem. Please configure caching\n"
+ " as write-through or disabled\n",
+ host->host_no, hostdata->test_dest);
+ }
+
+ if (failed) {
+ printk ("scsi%d : DSP = 0x%p (script at 0x%p, start at 0x%x)\n",
+ host->host_no, bus_to_virt(NCR53c7x0_read32(DSP_REG)),
+ hostdata->script, start);
+ printk ("scsi%d : DSPS = 0x%x\n", host->host_no,
+ NCR53c7x0_read32(DSPS_REG));
+ restore_flags(flags);
+ return -1;
+ }
+ hostdata->test_running = 0;
+ }
+
+ if ((hostdata->options & OPTION_DEBUG_TEST2) &&
+ hostdata->state != STATE_DISABLED) {
+ u32 dsa[48];
+ unsigned char identify = IDENTIFY(0, 0);
+ unsigned char cmd[6];
+ unsigned char data[36];
+ unsigned char status = 0xff;
+ unsigned char msg = 0xff;
+
+ cmd[0] = INQUIRY;
+ cmd[1] = cmd[2] = cmd[3] = cmd[5] = 0;
+ cmd[4] = sizeof(data);
+
+ dsa[2] = 1;
+ dsa[3] = virt_to_bus(&identify);
+ dsa[4] = 6;
+ dsa[5] = virt_to_bus(&cmd);
+ dsa[6] = sizeof(data);
+ dsa[7] = virt_to_bus(&data);
+ dsa[8] = 1;
+ dsa[9] = virt_to_bus(&status);
+ dsa[10] = 1;
+ dsa[11] = virt_to_bus(&msg);
+
+ for (i = 0; i < 3; ++i) {
+ cli();
+ if (!hostdata->idle) {
+ printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+
+ /* SCNTL3 SDID */
+ dsa[0] = (0x33 << 24) | (i << 16) ;
+ hostdata->idle = 0;
+ hostdata->test_running = 2;
+ hostdata->test_completed = -1;
+ start = virt_to_bus(hostdata->script) + hostdata->E_test_2;
+ hostdata->state = STATE_RUNNING;
+ NCR53c7x0_write32 (DSA_REG, virt_to_bus(dsa));
+ NCR53c7x0_write32 (DSP_REG, start);
+ sti();
+
+ timeout = jiffies + 5 * HZ; /* arbitrary */
+ while ((hostdata->test_completed == -1) && jiffies < timeout)
+ barrier();
+ NCR53c7x0_write32 (DSA_REG, 0);
+
+ if (hostdata->test_completed == 2) {
+ data[35] = 0;
+ printk ("scsi%d : test 2 INQUIRY to target %d, lun 0 : %s\n",
+ host->host_no, i, data + 8);
+ printk ("scsi%d : status ", host->host_no);
+ print_status (status);
+ printk ("\nscsi%d : message ", host->host_no);
+ print_msg (&msg);
+ printk ("\n");
+ } else if (hostdata->test_completed == 3) {
+ printk("scsi%d : test 2 no connection with target %d\n",
+ host->host_no, i);
+ if (!hostdata->idle) {
+ printk("scsi%d : not idle\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+ } else if (hostdata->test_completed == -1) {
+ printk ("scsi%d : test 2 timed out\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+ hostdata->test_running = 0;
+ }
+ }
+
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * Function : static void NCR53c8xx_dsa_fixup (struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : copy the NCR53c8xx dsa structure into cmd's dsa buffer,
+ * performing all necessary relocation.
+ *
+ * Inputs : cmd, a NCR53c7x0_cmd structure with a dsa area large
+ * enough to hold the NCR53c8xx dsa.
+ */
+
+static void
+NCR53c8xx_dsa_fixup (struct NCR53c7x0_cmd *cmd) {
+ Scsi_Cmnd *c = cmd->cmd;
+ struct Scsi_Host *host = c->host;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int i;
+
+ memcpy (cmd->dsa, hostdata->script + (hostdata->E_dsa_code_template / 4),
+ hostdata->E_dsa_code_template_end - hostdata->E_dsa_code_template);
+
+ /*
+ * Note : within the NCR 'C' code, dsa points to the _start_
+ * of the DSA structure, and _not_ the offset of dsa_zero within
+ * that structure used to facilitate shorter signed offsets
+ * for the 8 bit ALU.
+ *
+ * The implications of this are that
+ *
+ * - 32 bit A_dsa_* absolute values require an additional
+ * dsa_zero added to their value to be correct, since they are
+ * relative to dsa_zero which is in essentially a separate
+ * space from the code symbols.
+ *
+ * - All other symbols require no special treatment.
+ */
+
+ patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_lun, c->lun);
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_next, virt_to_bus(&cmd->dsa_next_addr));
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_next, virt_to_bus(cmd->dsa) + Ent_dsa_zero -
+ Ent_dsa_code_template + A_dsa_next);
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_sync, virt_to_bus((void *)hostdata->sync[c->target].script));
+ patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_target, c->target);
+ /* XXX - new pointer stuff */
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_saved_pointer, virt_to_bus(&cmd->saved_data_pointer));
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_saved_residual, virt_to_bus(&cmd->saved_residual));
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_residual, virt_to_bus(&cmd->residual));
+
+ /* XXX - new start stuff */
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_dsa_value, virt_to_bus(&cmd->dsa_addr));
+
+}
+
+/*
+ * Function : run_process_issue_queue (void)
+ *
+ * Purpose : insure that the coroutine is running and will process our
+ * request. process_issue_queue_running is checked/set here (in an
+ * inline function) rather than in process_issue_queue itself to reduce
+ * the chances of stack overflow.
+ *
+ */
+
+static volatile int process_issue_queue_running = 0;
+
+static __inline__ void
+run_process_issue_queue(void) {
+ unsigned long flags;
+ save_flags (flags);
+ cli();
+ if (!process_issue_queue_running) {
+ process_issue_queue_running = 1;
+ process_issue_queue(flags);
+ /*
+ * process_issue_queue_running is cleared in process_issue_queue
+ * once it can't do more work, and process_issue_queue exits with
+ * interrupts disabled.
+ */
+ }
+ restore_flags (flags);
+}
+
+/*
+ * Function : static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int
+ * result)
+ *
+ * Purpose : mark SCSI command as finished, OR'ing the host portion
+ * of the result word into the result field of the corresponding
+ * Scsi_Cmnd structure, and removing it from the internal queues.
+ *
+ * Inputs : cmd - command, result - entire result field
+ *
+ * Preconditions : the NCR chip should be in a halted state when
+ * abnormal_finished is run, since it modifies structures which
+ * the NCR expects to have exclusive access to.
+ */
+
+static void
+abnormal_finished (struct NCR53c7x0_cmd *cmd, int result) {
+ Scsi_Cmnd *c = cmd->cmd;
+ struct Scsi_Host *host = c->host;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ int left, found;
+ volatile struct NCR53c7x0_cmd * linux_search;
+ volatile struct NCR53c7x0_cmd * volatile *linux_prev;
+ volatile u32 *ncr_prev, *current, ncr_search;
+
+#if 0
+ printk ("scsi%d: abnormal finished\n", host->host_no);
+#endif
+
+ save_flags(flags);
+ cli();
+ found = 0;
+ /*
+ * Traverse the NCR issue array until we find a match or run out
+ * of instructions. Instructions in the NCR issue array are
+ * either JUMP or NOP instructions, which are 2 words in length.
+ */
+
+
+ for (found = 0, left = host->can_queue, current = hostdata->schedule;
+ left > 0; --left, current += 2)
+ {
+ if (issue_to_cmd (host, hostdata, (u32 *) current) == cmd)
+ {
+ current[0] = hostdata->NOP_insn;
+ current[1] = 0xdeadbeef;
+ ++found;
+ break;
+ }
+ }
+
+ /*
+ * Traverse the NCR reconnect list of DSA structures until we find
+ * a pointer to this dsa or have found too many command structures.
+ * We let prev point at the next field of the previous element or
+ * head of the list, so we don't do anything different for removing
+ * the head element.
+ */
+
+ for (left = host->can_queue,
+ ncr_search = hostdata->reconnect_dsa_head,
+ ncr_prev = &hostdata->reconnect_dsa_head;
+ left >= 0 && ncr_search &&
+ ((char*)bus_to_virt(ncr_search) + hostdata->dsa_start)
+ != (char *) cmd->dsa;
+ ncr_prev = (u32*) ((char*)bus_to_virt(ncr_search) +
+ hostdata->dsa_next), ncr_search = *ncr_prev, --left);
+
+ if (left < 0)
+ printk("scsi%d: loop detected in ncr reconnect list\n",
+ host->host_no);
+ else if (ncr_search)
+ if (found)
+ printk("scsi%d: scsi %ld in ncr issue array and reconnect lists\n",
+ host->host_no, c->pid);
+ else {
+ volatile u32 * next = (u32 *)
+ ((char *)bus_to_virt(ncr_search) + hostdata->dsa_next);
+ *ncr_prev = *next;
+/* If we're at the tail end of the issue queue, update that pointer too. */
+ found = 1;
+ }
+
+ /*
+ * Traverse the host running list until we find this command or discover
+ * we have too many elements, pointing linux_prev at the next field of the
+ * linux_previous element or head of the list, search at this element.
+ */
+
+ for (left = host->can_queue, linux_search = hostdata->running_list,
+ linux_prev = &hostdata->running_list;
+ left >= 0 && linux_search && linux_search != cmd;
+ linux_prev = &(linux_search->next),
+ linux_search = linux_search->next, --left);
+
+ if (left < 0)
+ printk ("scsi%d: loop detected in host running list for scsi pid %ld\n",
+ host->host_no, c->pid);
+ else if (linux_search) {
+ *linux_prev = linux_search->next;
+ --hostdata->busy[c->target][c->lun];
+ }
+
+ /* Return the NCR command structure to the free list */
+ cmd->next = hostdata->free;
+ hostdata->free = cmd;
+ c->host_scribble = NULL;
+
+ /* And return */
+ c->result = result;
+ c->scsi_done(c);
+
+ restore_flags(flags);
+ run_process_issue_queue();
+}
+
+/*
+ * Function : static void intr_break (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : Handler for breakpoint interrupts from a SCSI script
+ *
+ * Inputs : host - pointer to this host adapter's structure,
+ * cmd - pointer to the command (if any) dsa was pointing
+ * to.
+ *
+ */
+
+static void
+intr_break (struct Scsi_Host *host, struct
+ NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_break *bp;
+#if 0
+ Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
+#endif
+ u32 *dsp;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ NCR53c7x0_local_setup(host);
+
+ /*
+ * Find the break point corresponding to this address, and
+ * dump the appropriate debugging information to standard
+ * output.
+ */
+ save_flags(flags);
+ cli();
+ dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
+ for (bp = hostdata->breakpoints; bp && bp->address != dsp;
+ bp = bp->next);
+ if (!bp)
+ panic("scsi%d : break point interrupt from %p with no breakpoint!",
+ host->host_no, dsp);
+
+ /*
+ * Configure the NCR chip for manual start mode, so that we can
+ * point the DSP register at the instruction that follows the
+ * INT int_debug_break instruction.
+ */
+
+ NCR53c7x0_write8 (hostdata->dmode,
+ NCR53c7x0_read8(hostdata->dmode)|DMODE_MAN);
+
+ /*
+ * And update the DSP register, using the size of the old
+ * instruction in bytes.
+ */
+
+ restore_flags(flags);
+}
+/*
+ * Function : static void print_synchronous (const char *prefix,
+ * const unsigned char *msg)
+ *
+ * Purpose : print a pretty, user and machine parsable representation
+ * of a SDTR message, including the "real" parameters, data
+ * clock so we can tell transfer rate at a glance.
+ *
+ * Inputs ; prefix - text to prepend, msg - SDTR message (5 bytes)
+ */
+
+static void
+print_synchronous (const char *prefix, const unsigned char *msg) {
+ if (msg[4]) {
+ int Hz = 1000000000 / (msg[3] * 4);
+ int integer = Hz / 1000000;
+ int fraction = (Hz - (integer * 1000000)) / 10000;
+ printk ("%speriod %dns offset %d %d.%02dMHz %s SCSI%s\n",
+ prefix, (int) msg[3] * 4, (int) msg[4], integer, fraction,
+ (((msg[3] * 4) < 200) ? "FAST" : "synchronous"),
+ (((msg[3] * 4) < 200) ? "-II" : ""));
+ } else
+ printk ("%sasynchronous SCSI\n", prefix);
+}
+
+/*
+ * Function : static void set_synchronous (struct Scsi_Host *host,
+ * int target, int sxfer, int scntl3, int now_connected)
+ *
+ * Purpose : reprogram transfers between the selected SCSI initiator and
+ * target with the given register values; in the indirect
+ * select operand, reselection script, and chip registers.
+ *
+ * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
+ * sxfer and scntl3 - NCR registers. now_connected - if non-zero,
+ * we should reprogram the registers now too.
+ */
+
+static void
+set_synchronous (struct Scsi_Host *host, int target, int sxfer, int scntl3,
+ int now_connected) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 *script;
+ NCR53c7x0_local_setup(host);
+
+ /* These are eight bit registers */
+ sxfer &= 0xff;
+ scntl3 &= 0xff;
+
+ hostdata->sync[target].sxfer_sanity = sxfer;
+ hostdata->sync[target].scntl3_sanity = scntl3;
+
+/*
+ * HARD CODED : synchronous script is EIGHT words long. This
+ * must agree with 53c7.8xx.h
+ */
+
+ if ((hostdata->chip != 700) && (hostdata->chip != 70066)) {
+ hostdata->sync[target].select_indirect = (scntl3 << 24) |
+ (target << 16) | (sxfer << 8);
+
+ script = (u32 *) hostdata->sync[target].script;
+
+ /* XXX - add NCR53c7x0 code to reprogram SCF bits if we want to */
+ if ((hostdata->chip / 100) == 8) {
+ script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
+ DCMD_RWRI_OP_MOVE) << 24) |
+ (SCNTL3_REG_800 << 16) | (scntl3 << 8);
+ script[1] = 0;
+ script += 2;
+ }
+
+ script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
+ DCMD_RWRI_OP_MOVE) << 24) |
+ (SXFER_REG << 16) | (sxfer << 8);
+ script[1] = 0;
+ script += 2;
+
+#ifdef DEBUG_SYNC_INTR
+ if (hostdata->options & OPTION_DEBUG_DISCONNECT) {
+ script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_INT) << 24) | DBC_TCI_TRUE;
+ script[1] = DEBUG_SYNC_INTR;
+ script += 2;
+ }
+#endif
+
+ script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_RETURN) << 24) | DBC_TCI_TRUE;
+ script[1] = 0;
+ script += 2;
+ }
+
+ if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS)
+ printk ("scsi%d : target %d sync parameters are sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, target, sxfer, scntl3);
+
+ if (now_connected) {
+ if ((hostdata->chip / 100) == 8)
+ NCR53c7x0_write8(SCNTL3_REG_800, scntl3);
+ NCR53c7x0_write8(SXFER_REG, sxfer);
+ }
+}
+
+
+/*
+ * Function : static int asynchronous (struct Scsi_Host *host, int target)
+ *
+ * Purpose : reprogram between the selected SCSI Host adapter and target
+ * (assumed to be currently connected) for asynchronous transfers.
+ *
+ * Inputs : host - SCSI host structure, target - numeric target ID.
+ *
+ * Preconditions : the NCR chip should be in one of the halted states
+ */
+
+static void
+asynchronous (struct Scsi_Host *host, int target) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ NCR53c7x0_local_setup(host);
+ set_synchronous (host, target, /* no offset */ 0, hostdata->saved_scntl3,
+ 1);
+ printk ("scsi%d : setting target %d to asynchronous SCSI\n",
+ host->host_no, target);
+}
+
+/*
+ * XXX - do we want to go out of our way (ie, add extra code to selection
+ * in the NCR53c710/NCR53c720 script) to reprogram the synchronous
+ * conversion bits, or can we be content in just setting the
+ * sxfer bits?
+ */
+
+/* Table for NCR53c8xx synchronous values */
+static const struct {
+ int div; /* Total clock divisor * 10 */
+ unsigned char scf; /* */
+ unsigned char tp; /* 4 + tp = xferp divisor */
+} syncs[] = {
+/* div scf tp div scf tp div scf tp */
+ { 40, 1, 0}, { 50, 1, 1}, { 60, 1, 2},
+ { 70, 1, 3}, { 75, 2, 1}, { 80, 1, 4},
+ { 90, 1, 5}, { 100, 1, 6}, { 105, 2, 3},
+ { 110, 1, 7}, { 120, 2, 4}, { 135, 2, 5},
+ { 140, 3, 3}, { 150, 2, 6}, { 160, 3, 4},
+ { 165, 2, 7}, { 180, 3, 5}, { 200, 3, 6},
+ { 210, 4, 3}, { 220, 3, 7}, { 240, 4, 4},
+ { 270, 4, 5}, { 300, 4, 6}, { 330, 4, 7}
+};
+
+/*
+ * Function : static void synchronous (struct Scsi_Host *host, int target,
+ * char *msg)
+ *
+ * Purpose : reprogram transfers between the selected SCSI initiator and
+ * target for synchronous SCSI transfers such that the synchronous
+ * offset is less than that requested and period at least as long
+ * as that requested. Also modify *msg such that it contains
+ * an appropriate response.
+ *
+ * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
+ * msg - synchronous transfer request.
+ */
+
+
+static void
+synchronous (struct Scsi_Host *host, int target, char *msg) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int desire, divisor, i, limit;
+ unsigned char scntl3, sxfer;
+/* The diagnostic message fits on one line, even with max. width integers */
+ char buf[80];
+
+/* Desired transfer clock in Hz */
+ desire = 1000000000L / (msg[3] * 4);
+/* Scale the available SCSI clock by 10 so we get tenths */
+ divisor = (hostdata->scsi_clock * 10) / desire;
+
+/* NCR chips can handle at most an offset of 8 */
+ if (msg[4] > 8)
+ msg[4] = 8;
+
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk("scsi%d : optimal synchronous divisor of %d.%01d\n",
+ host->host_no, divisor / 10, divisor % 10);
+
+ limit = (sizeof(syncs) / sizeof(syncs[0]) -1);
+ for (i = 0; (i < limit) && (divisor > syncs[i].div); ++i);
+
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk("scsi%d : selected synchronous divisor of %d.%01d\n",
+ host->host_no, syncs[i].div / 10, syncs[i].div % 10);
+
+ msg[3] = ((1000000000L / hostdata->scsi_clock) * syncs[i].div / 10 / 4);
+
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk("scsi%d : selected synchronous period of %dns\n", host->host_no,
+ msg[3] * 4);
+
+ scntl3 = (hostdata->chip / 100 == 8) ? ((hostdata->saved_scntl3 &
+ ~SCNTL3_800_SCF_MASK) | (syncs[i].scf << SCNTL3_800_SCF_SHIFT)) : 0;
+ sxfer = (msg[4] << SXFER_MO_SHIFT) | ((syncs[i].tp) << SXFER_TP_SHIFT);
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk ("scsi%d : sxfer=0x%x scntl3=0x%x\n",
+ host->host_no, (int) sxfer, (int) scntl3);
+ set_synchronous (host, target, sxfer, scntl3, 1);
+ sprintf (buf, "scsi%d : setting target %d to ", host->host_no, target);
+ print_synchronous (buf, msg);
+}
+
+/*
+ * Function : static int NCR53c8x0_dstat_sir_intr (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : Handler for INT generated instructions for the
+ * NCR53c810/820 SCSI SCRIPT
+ *
+ * Inputs : host - pointer to this host adapter's structure,
+ * cmd - pointer to the command (if any) dsa was pointing
+ * to.
+ *
+ */
+
+static int
+NCR53c8x0_dstat_sir_intr (struct Scsi_Host *host, struct
+ NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ int print;
+ Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 dsps,*dsp; /* Argument of the INT instruction */
+ NCR53c7x0_local_setup(host);
+ dsps = NCR53c7x0_read32(DSPS_REG);
+ dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : DSPS = 0x%x\n", host->host_no, dsps);
+
+ switch (dsps) {
+ case A_int_msg_1:
+ print = 1;
+ switch (hostdata->msg_buf[0]) {
+ /*
+ * Unless we've initiated synchronous negotiation, I don't
+ * think that this should happen.
+ */
+ case MESSAGE_REJECT:
+ hostdata->dsp = hostdata->script + hostdata->E_accept_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ if (cmd && (cmd->flags & CMD_FLAG_SDTR)) {
+ printk ("scsi%d : target %d rejected SDTR\n", host->host_no,
+ c->target);
+ cmd->flags &= ~CMD_FLAG_SDTR;
+ asynchronous (host, c->target);
+ print = 0;
+ }
+ break;
+ case INITIATE_RECOVERY:
+ printk ("scsi%d : extended contingent allegiance not supported yet, rejecting\n",
+ host->host_no);
+ /* Fall through to default */
+ hostdata->dsp = hostdata->script + hostdata->E_reject_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ break;
+ default:
+ printk ("scsi%d : unsupported message, rejecting\n",
+ host->host_no);
+ hostdata->dsp = hostdata->script + hostdata->E_reject_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ }
+ if (print) {
+ printk ("scsi%d : received message", host->host_no);
+ if (c)
+ printk (" from target %d lun %d ", c->target, c->lun);
+ print_msg ((unsigned char *) hostdata->msg_buf);
+ printk("\n");
+ }
+
+ return SPECIFIC_INT_NOTHING;
+
+
+ case A_int_msg_sdtr:
+/*
+ * At this point, hostdata->msg_buf contains
+ * 0 EXTENDED MESSAGE
+ * 1 length
+ * 2 SDTR
+ * 3 period * 4ns
+ * 4 offset
+ */
+
+ if (cmd) {
+ char buf[80];
+ sprintf (buf, "scsi%d : target %d %s ", host->host_no, c->target,
+ (cmd->flags & CMD_FLAG_SDTR) ? "accepting" : "requesting");
+ print_synchronous (buf, (unsigned char *) hostdata->msg_buf);
+
+ /*
+ * Initiator initiated, won't happen unless synchronous
+ * transfers are enabled. If we get a SDTR message in
+ * response to our SDTR, we should program our parameters
+ * such that
+ * offset <= requested offset
+ * period >= requested period
+ */
+ if (cmd->flags & CMD_FLAG_SDTR) {
+ cmd->flags &= ~CMD_FLAG_SDTR;
+ if (hostdata->msg_buf[4])
+ synchronous (host, c->target, (unsigned char *)
+ hostdata->msg_buf);
+ else
+ asynchronous (host, c->target);
+ hostdata->dsp = hostdata->script + hostdata->E_accept_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ } else {
+ if (hostdata->options & OPTION_SYNCHRONOUS) {
+ cmd->flags |= CMD_FLAG_DID_SDTR;
+ synchronous (host, c->target, (unsigned char *)
+ hostdata->msg_buf);
+ } else {
+ hostdata->msg_buf[4] = 0; /* 0 offset = async */
+ asynchronous (host, c->target);
+ }
+ patch_dsa_32 (cmd->dsa, dsa_msgout_other, 0, 5);
+ patch_dsa_32 (cmd->dsa, dsa_msgout_other, 1, (u32)
+ virt_to_bus ((void *)&hostdata->msg_buf));
+ hostdata->dsp = hostdata->script +
+ hostdata->E_respond_message / sizeof(u32);
+ hostdata->dsp_changed = 1;
+ }
+ return SPECIFIC_INT_NOTHING;
+ }
+ /* Fall through to abort if we couldn't find a cmd, and
+ therefore a dsa structure to twiddle */
+ case A_int_msg_wdtr:
+ hostdata->dsp = hostdata->script + hostdata->E_reject_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ case A_int_err_unexpected_phase:
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : unexpected phase\n", host->host_no);
+ return SPECIFIC_INT_ABORT;
+ case A_int_err_selected:
+ printk ("scsi%d : selected by target %d\n", host->host_no,
+ (int) NCR53c7x0_read8(SDID_REG_800) &7);
+ hostdata->dsp = hostdata->script + hostdata->E_target_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ case A_int_err_unexpected_reselect:
+ printk ("scsi%d : unexpected reselect by target %d lun %d\n",
+ host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & 7,
+ hostdata->reselected_identify & 7);
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+/*
+ * Since contingent allegiance conditions are cleared by the next
+ * command issued to a target, we must issue a REQUEST SENSE
+ * command after receiving a CHECK CONDITION status, before
+ * another command is issued.
+ *
+ * Since this NCR53c7x0_cmd will be freed after use, we don't
+ * care if we step on the various fields, so modify a few things.
+ */
+ case A_int_err_check_condition:
+#if 0
+ if (hostdata->options & OPTION_DEBUG_INTR)
+#endif
+ printk ("scsi%d : CHECK CONDITION\n", host->host_no);
+ if (!c) {
+ printk("scsi%d : CHECK CONDITION with no SCSI command\n",
+ host->host_no);
+ return SPECIFIC_INT_PANIC;
+ }
+
+ /*
+ * FIXME : this uses the normal one-byte selection message.
+ * We may want to renegotiate for synchronous & WIDE transfers
+ * since these could be the crux of our problem.
+ *
+ hostdata->NOP_insn* FIXME : once SCSI-II tagged queuing is implemented, we'll
+ * have to set this up so that the rest of the DSA
+ * agrees with this being an untagged queue'd command.
+ */
+
+ patch_dsa_32 (cmd->dsa, dsa_msgout, 0, 1);
+
+ /*
+ * Modify the table indirect for COMMAND OUT phase, since
+ * Request Sense is a six byte command.
+ */
+
+ patch_dsa_32 (cmd->dsa, dsa_cmdout, 0, 6);
+
+ c->cmnd[0] = REQUEST_SENSE;
+ c->cmnd[1] &= 0xe0; /* Zero all but LUN */
+ c->cmnd[2] = 0;
+ c->cmnd[3] = 0;
+ c->cmnd[4] = sizeof(c->sense_buffer);
+ c->cmnd[5] = 0;
+
+ /*
+ * Disable dataout phase, and program datain to transfer to the
+ * sense buffer, and add a jump to other_transfer after the
+ * command so overflow/underrun conditions are detected.
+ */
+
+ patch_dsa_32 (cmd->dsa, dsa_dataout, 0,
+ virt_to_bus(hostdata->script) + hostdata->E_other_transfer);
+ patch_dsa_32 (cmd->dsa, dsa_datain, 0,
+ virt_to_bus(cmd->data_transfer_start));
+ cmd->data_transfer_start[0] = (((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I |
+ DCMD_BMI_IO)) << 24) | sizeof(c->sense_buffer);
+ cmd->data_transfer_start[1] = (u32) virt_to_bus(c->sense_buffer);
+
+ cmd->data_transfer_start[2] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP)
+ << 24) | DBC_TCI_TRUE;
+ cmd->data_transfer_start[3] = (u32) virt_to_bus(hostdata->script) +
+ hostdata->E_other_transfer;
+
+ /*
+ * Currently, this command is flagged as completed, ie
+ * it has valid status and message data. Reflag it as
+ * incomplete. Q - need to do something so that original
+ * status, etc are used.
+ */
+
+ cmd->cmd->result = 0xffff;
+
+ /*
+ * Restart command as a REQUEST SENSE.
+ */
+ hostdata->dsp = (u32 *) hostdata->script + hostdata->E_select /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ case A_int_debug_break:
+ return SPECIFIC_INT_BREAK;
+ case A_int_norm_aborted:
+ hostdata->dsp = (u32 *) hostdata->schedule;
+ hostdata->dsp_changed = 1;
+ if (cmd)
+ abnormal_finished (cmd, DID_ERROR << 16);
+ return SPECIFIC_INT_NOTHING;
+ case A_int_test_1:
+ case A_int_test_2:
+ hostdata->idle = 1;
+ hostdata->test_completed = (dsps - A_int_test_1) / 0x00010000 + 1;
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk("scsi%d : test%d complete\n", host->host_no,
+ hostdata->test_completed);
+ return SPECIFIC_INT_NOTHING;
+#ifdef A_int_debug_reselected_ok
+ case A_int_debug_reselected_ok:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ /*
+ * Note - this dsa is not based on location relative to
+ * the command structure, but to location relative to the
+ * DSA register
+ */
+ u32 *dsa;
+ dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
+
+ printk("scsi%d : reselected_ok (DSA = 0x%x (virt 0x%p)\n",
+ host->host_no, NCR53c7x0_read32(DSA_REG), dsa);
+ printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt(cmd->saved_data_pointer));
+ print_insn (host, hostdata->script + Ent_reselected_ok /
+ sizeof(u32), "", 1);
+ printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, NCR53c7x0_read8(SXFER_REG),
+ NCR53c7x0_read8(SCNTL3_REG_800));
+ if (c) {
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script, "", 1);
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script + 2, "", 1);
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_reselect_check
+ case A_int_debug_reselect_check:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ u32 *dsa;
+#if 0
+ u32 *code;
+#endif
+ /*
+ * Note - this dsa is not based on location relative to
+ * the command structure, but to location relative to the
+ * DSA register
+ */
+ dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
+ printk("scsi%d : reselected_check_next (DSA = 0x%lx (virt 0x%p))\n",
+ host->host_no, virt_to_bus(dsa), dsa);
+ if (dsa) {
+ printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt (cmd->saved_data_pointer));
+#if 0
+ printk("scsi%d : template code :\n", host->host_no);
+ for (code = dsa + (Ent_dsa_code_check_reselect - Ent_dsa_zero)
+ / sizeof(u32); code < (dsa + Ent_dsa_zero / sizeof(u32));
+ code += print_insn (host, code, "", 1));
+#endif
+ }
+ print_insn (host, hostdata->script + Ent_reselected_ok /
+ sizeof(u32), "", 1);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_dsa_schedule
+ case A_int_debug_dsa_schedule:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ u32 *dsa;
+ /*
+ * Note - this dsa is not based on location relative to
+ * the command structure, but to location relative to the
+ * DSA register
+ */
+ dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
+ printk("scsi%d : dsa_schedule (old DSA = 0x%lx (virt 0x%p))\n",
+ host->host_no, virt_to_bus(dsa), dsa);
+ if (dsa)
+ printk("scsi%d : resume address is 0x%x (virt 0x%p)\n"
+ " (temp was 0x%x (virt 0x%p))\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt (cmd->saved_data_pointer),
+ NCR53c7x0_read32 (TEMP_REG),
+ bus_to_virt (NCR53c7x0_read32(TEMP_REG)));
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_scheduled
+ case A_int_debug_scheduled:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : new I/O 0x%x (virt 0x%p) scheduled\n",
+ host->host_no, NCR53c7x0_read32(DSA_REG),
+ bus_to_virt(NCR53c7x0_read32(DSA_REG)));
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_idle
+ case A_int_debug_idle:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : idle\n", host->host_no);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_cmd
+ case A_int_debug_cmd:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : command sent\n");
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_dsa_loaded
+ case A_int_debug_dsa_loaded:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : DSA loaded with 0x%x (virt 0x%p)\n", host->host_no,
+ NCR53c7x0_read32(DSA_REG),
+ bus_to_virt(NCR53c7x0_read32(DSA_REG)));
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_reselected
+ case A_int_debug_reselected:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ printk("scsi%d : reselected by target %d lun %d\n",
+ host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & ~0x80,
+ (int) hostdata->reselected_identify & 7);
+ print_queues(host);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_disconnect_msg
+ case A_int_debug_disconnect_msg:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ if (c)
+ printk("scsi%d : target %d lun %d disconnecting\n",
+ host->host_no, c->target, c->lun);
+ else
+ printk("scsi%d : unknown target disconnecting\n",
+ host->host_no);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_disconnected
+ case A_int_debug_disconnected:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ printk ("scsi%d : disconnected, new queues are\n",
+ host->host_no);
+ print_queues(host);
+#if 0
+ printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, NCR53c7x0_read8(SXFER_REG),
+ NCR53c7x0_read8(SCNTL3_REG_800));
+#endif
+ if (c) {
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script, "", 1);
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script + 2, "", 1);
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_panic
+ case A_int_debug_panic:
+ printk("scsi%d : int_debug_panic received\n", host->host_no);
+ print_lots (host);
+ return SPECIFIC_INT_PANIC;
+#endif
+#ifdef A_int_debug_saved
+ case A_int_debug_saved:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ printk ("scsi%d : saved data pointer 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt (cmd->saved_data_pointer));
+ print_progress (c);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_restored
+ case A_int_debug_restored:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ if (cmd) {
+ int size;
+ printk ("scsi%d : restored data pointer 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer, bus_to_virt (
+ cmd->saved_data_pointer));
+ size = print_insn (host, (u32 *)
+ bus_to_virt(cmd->saved_data_pointer), "", 1);
+ size = print_insn (host, (u32 *)
+ bus_to_virt(cmd->saved_data_pointer) + size, "", 1);
+ print_progress (c);
+ }
+#if 0
+ printk ("scsi%d : datapath residual %d\n",
+ host->host_no, datapath_residual (host)) ;
+#endif
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_sync
+ case A_int_debug_sync:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
+ unsigned char sxfer = NCR53c7x0_read8 (SXFER_REG),
+ scntl3 = NCR53c7x0_read8 (SCNTL3_REG_800);
+ if (c) {
+ if (sxfer != hostdata->sync[c->target].sxfer_sanity ||
+ scntl3 != hostdata->sync[c->target].scntl3_sanity) {
+ printk ("scsi%d : sync sanity check failed sxfer=0x%x, scntl3=0x%x",
+ host->host_no, sxfer, scntl3);
+ NCR53c7x0_write8 (SXFER_REG, sxfer);
+ NCR53c7x0_write8 (SCNTL3_REG_800, scntl3);
+ }
+ } else
+ printk ("scsi%d : unknown command sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, (int) sxfer, (int) scntl3);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_datain
+ case A_int_debug_datain:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
+ int size;
+ printk ("scsi%d : In do_datain (%s) sxfer=0x%x, scntl3=0x%x\n"
+ " datapath residual=%d\n",
+ host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
+ (int) NCR53c7x0_read8(SXFER_REG),
+ (int) NCR53c7x0_read8(SCNTL3_REG_800),
+ datapath_residual (host)) ;
+ print_insn (host, dsp, "", 1);
+ size = print_insn (host, (u32 *) bus_to_virt(dsp[1]), "", 1);
+ print_insn (host, (u32 *) bus_to_virt(dsp[1]) + size, "", 1);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+/*
+ * FIXME : for 7xx support, we need to read SDID_REG_700 and handle
+ * the comparison as bitfielded, not binary.
+ */
+#ifdef A_int_debug_check_dsa
+ case A_int_debug_check_dsa:
+ if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
+ int sdid = NCR53c7x0_read8 (SDID_REG_800) & 15;
+ char *where = dsp - NCR53c7x0_insn_size(NCR53c7x0_read8
+ (DCMD_REG)) == hostdata->script +
+ Ent_select_check_dsa / sizeof(u32) ?
+ "selection" : "reselection";
+ if (c && sdid != c->target) {
+ printk ("scsi%d : SDID target %d != DSA target %d at %s\n",
+ host->host_no, sdid, c->target, where);
+ print_lots(host);
+ dump_events (host, 20);
+ return SPECIFIC_INT_PANIC;
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+ default:
+ if ((dsps & 0xff000000) == 0x03000000) {
+ printk ("scsi%d : misc debug interrupt 0x%x\n",
+ host->host_no, dsps);
+ return SPECIFIC_INT_RESTART;
+ } else if ((dsps & 0xff000000) == 0x05000000) {
+ if (hostdata->events) {
+ struct NCR53c7x0_event *event;
+ ++hostdata->event_index;
+ if (hostdata->event_index >= hostdata->event_size)
+ hostdata->event_index = 0;
+ event = (struct NCR53c7x0_event *) hostdata->events +
+ hostdata->event_index;
+ event->event = (enum ncr_event) dsps;
+ event->dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+ /* FIXME : this needs to change for the '7xx family */
+ if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON)
+ event->target = NCR53c7x0_read8(SSID_REG_800);
+ else
+ event->target = 255;
+
+ if (event->event == EVENT_RESELECT)
+ event->lun = hostdata->reselected_identify & 0xf;
+ else if (c)
+ event->lun = c->lun;
+ else
+ event->lun = 255;
+ do_gettimeofday(&(event->time));
+ if (c) {
+ event->pid = c->pid;
+ memcpy ((void *) event->cmnd, (void *) c->cmnd,
+ sizeof (event->cmnd));
+ } else {
+ event->pid = -1;
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+ }
+
+ printk ("scsi%d : unknown user interrupt 0x%x\n",
+ host->host_no, (unsigned) dsps);
+ return SPECIFIC_INT_PANIC;
+ }
+}
+
+/*
+ * XXX - the stock NCR assembler won't output the scriptu.h file,
+ * which undefine's all #define'd CPP symbols from the script.h
+ * file, which will create problems if you use multiple scripts
+ * with the same symbol names.
+ *
+ * If you insist on using NCR's assembler, you could generate
+ * scriptu.h from script.h using something like
+ *
+ * grep #define script.h | \
+ * sed 's/#define[ ][ ]*\([_a-zA-Z][_a-zA-Z0-9]*\).*$/#undefine \1/' \
+ * > scriptu.h
+ */
+
+#include "53c8xx_u.h"
+
+/* XXX - add alternate script handling code here */
+
+
+#ifdef NCR_DEBUG
+/*
+ * Debugging without a debugger is no fun. So, I've provided
+ * a debugging interface in the NCR53c7x0 driver. To avoid
+ * kernel cruft, there's just enough here to act as an interface
+ * to a user level debugger (aka, GDB).
+ *
+ *
+ * The following restrictions apply to debugger commands :
+ * 1. The command must be terminated by a newline.
+ * 2. Command length must be less than 80 bytes including the
+ * newline.
+ * 3. The entire command must be written with one system call.
+ */
+
+static const char debugger_help =
+"bc <addr> - clear breakpoint\n"
+"bl - list breakpoints\n"
+"bs <addr> - set breakpoint\n"
+"g - start\n"
+"h - halt\n"
+"? - this message\n"
+"i - info\n"
+"mp <addr> <size> - print memory\n"
+"ms <addr> <size> <value> - store memory\n"
+"rp <num> <size> - print register\n"
+"rs <num> <size> <value> - store register\n"
+"s - single step\n"
+"tb - begin trace \n"
+"te - end trace\n";
+
+/*
+ * Whenever we change a break point, we should probably
+ * set the NCR up so that it is in a single step mode.
+ */
+
+static int debugger_fn_bc (struct Scsi_Host *host, struct debugger_token *token,
+ u32 args[]) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ instance->hostdata;
+ struct NCR53c7x0_break *bp, **prev;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ for (bp = (struct NCR53c7x0_break *) instance->breakpoints,
+ prev = (struct NCR53c7x0_break **) &instance->breakpoints;
+ bp; prev = (struct NCR53c7x0_break **) &(bp->next),
+ bp = (struct NCR53c7x0_break *) bp->next);
+
+ if (!bp) {
+ restore_flags(flags);
+ return -EIO;
+ }
+
+ /*
+ * XXX - we need to insure that the processor is halted
+ * here in order to prevent a race condition.
+ */
+
+ memcpy ((void *) bp->addr, (void *) bp->old, sizeof(bp->old));
+ if (prev)
+ *prev = bp->next;
+
+ restore_flags(flags);
+ return 0;
+}
+
+
+static int
+debugger_fn_bl (struct Scsi_Host *host, struct debugger_token *token,
+ u32 args[]) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_break *bp;
+ char buf[80];
+ size_t len;
+ unsigned long flags;
+ /*
+ * XXX - we need to insure that the processor is halted
+ * here in order to prevent a race condition. So, if the
+ * processor isn't halted, print an error message and continue.
+ */
+
+ sprintf (buf, "scsi%d : bp : warning : processor not halted\b",
+ host->host_no);
+ debugger_kernel_write (host, buf, strlen(buf));
+
+ save_flags(flags);
+ cli();
+ for (bp = (struct NCR53c7x0_break *) host->breakpoints;
+ bp; bp = (struct NCR53c7x0_break *) bp->next); {
+ sprintf (buf, "scsi%d : bp : success : at %08x, replaces %08x %08x",
+ bp->addr, bp->old[0], bp->old[1]);
+ len = strlen(buf);
+ if ((bp->old[0] & (DCMD_TYPE_MASK << 24)) ==
+ (DCMD_TYPE_MMI << 24)) {
+ sprintf(buf + len, "%08x\n", * (u32 *) bp->addr);
+ } else {
+ sprintf(buf + len, "\n");
+ }
+ len = strlen(buf);
+ debugger_kernel_write (host, buf, len);
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+static int
+debugger_fn_bs (struct Scsi_Host *host, struct debugger_token *token,
+ u32 args[]) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_break *bp;
+ char buf[80];
+ size_t len;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+
+ if (hostdata->state != STATE_HALTED) {
+ sprintf (buf, "scsi%d : bs : failure : NCR not halted\n", host->host_no);
+ debugger_kernel_write (host, buf, strlen(buf));
+ restore_flags(flags);
+ return -1;
+ }
+
+ if (!(bp = kmalloc (sizeof (struct NCR53c7x0_break)))) {
+ printk ("scsi%d : kmalloc(%d) of breakpoint structure failed, try again\n",
+ host->host_no, sizeof(struct NCR53c7x0_break));
+ restore_flags(flags);
+ return -1;
+ }
+
+ bp->address = (u32 *) args[0];
+ memcpy ((void *) bp->old_instruction, (void *) bp->address, 8);
+ bp->old_size = (((bp->old_instruction[0] >> 24) & DCMD_TYPE_MASK) ==
+ DCMD_TYPE_MMI ? 3 : 2;
+ bp->next = hostdata->breakpoints;
+ hostdata->breakpoints = bp->next;
+ memcpy ((void *) bp->address, (void *) hostdata->E_debug_break, 8);
+
+ restore_flags(flags);
+ return 0;
+}
+
+#define TOKEN(name,nargs) {#name, nargs, debugger_fn_##name}
+static const struct debugger_token {
+ char *name;
+ int numargs;
+ int (*fn)(struct debugger_token *token, u32 args[]);
+} debugger_tokens[] = {
+ TOKEN(bc,1), TOKEN(bl,0), TOKEN(bs,1), TOKEN(g,0), TOKEN(halt,0),
+ {DT_help, "?", 0} , TOKEN(h,0), TOKEN(i,0), TOKEN(mp,2),
+ TOKEN(ms,3), TOKEN(rp,2), TOKEN(rs,2), TOKEN(s,0), TOKEN(tb,0), TOKEN(te,0)
+};
+
+#define NDT sizeof(debugger_tokens / sizeof(struct debugger_token))
+
+static struct Scsi_Host * inode_to_host (struct inode *inode) {
+ int dev;
+ struct Scsi_Host *tmp;
+ for (dev = MINOR(inode->rdev), host = first_host;
+ (host->hostt == the_template); --dev, host = host->next)
+ if (!dev) return host;
+ return NULL;
+}
+
+
+static int
+debugger_user_write (struct inode *inode,struct file *filp,
+ char *buf,int count) {
+ struct Scsi_Host *host; /* This SCSI host */
+ struct NCR53c7x0_hostadata *hostdata;
+ char input_buf[80], /* Kernel space copy of buf */
+ *ptr; /* Pointer to argument list */
+ u32 args[3]; /* Arguments */
+ int i, j, error, len;
+
+ if (!(host = inode_to_host(inode)))
+ return -ENXIO;
+
+ hostdata = (struct NCR53c7x0_hostdata *) host->hostdata;
+
+ if (error = verify_area(VERIFY_READ,buf,count))
+ return error;
+
+ if (count > 80)
+ return -EIO;
+
+ memcpy_from_fs(input_buf, buf, count);
+
+ if (input_buf[count - 1] != '\n')
+ return -EIO;
+
+ input_buf[count - 1]=0;
+
+ for (i = 0; i < NDT; ++i) {
+ len = strlen (debugger_tokens[i].name);
+ if (!strncmp(input_buf, debugger_tokens[i].name, len))
+ break;
+ };
+
+ if (i == NDT)
+ return -EIO;
+
+ for (ptr = input_buf + len, j = 0; j < debugger_tokens[i].nargs && *ptr;) {
+ if (*ptr == ' ' || *ptr == '\t') {
+ ++ptr;
+ } else if (isdigit(*ptr)) {
+ args[j++] = simple_strtoul (ptr, &ptr, 0);
+ } else {
+ return -EIO;
+ }
+ }
+
+ if (j != debugger_tokens[i].nargs)
+ return -EIO;
+
+ return count;
+}
+
+static int
+debugger_user_read (struct inode *inode,struct file *filp,
+ char *buf,int count) {
+ struct Scsi_Host *instance;
+
+}
+
+static int
+debugger_kernel_write (struct Scsi_Host *host, char *buf, size_t
+ buflen) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int copy, left;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ while (buflen) {
+ left = (hostdata->debug_buf + hostdata->debug_size - 1) -
+ hostdata->debug_write;
+ copy = (buflen <= left) ? buflen : left;
+ memcpy (hostdata->debug_write, buf, copy);
+ buf += copy;
+ buflen -= copy;
+ hostdata->debug_count += copy;
+ if ((hostdata->debug_write += copy) ==
+ (hostdata->debug_buf + hostdata->debug_size))
+ hosdata->debug_write = hostdata->debug_buf;
+ }
+ restore_flags(flags);
+}
+
+#endif /* def NCRDEBUG */
+
+/*
+ * Function : static void NCR538xx_soft_reset (struct Scsi_Host *host)
+ *
+ * Purpose : perform a soft reset of the NCR53c8xx chip
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ *
+ * Preconditions : NCR53c7x0_init must have been called for this
+ * host.
+ *
+ */
+
+static void
+NCR53c8x0_soft_reset (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ NCR53c7x0_local_setup(host);
+
+
+ /*
+ * Do a soft reset of the chip so that everything is
+ * reinitialized to the power-on state.
+ *
+ * Basically follow the procedure outlined in the NCR53c700
+ * data manual under Chapter Six, How to Use, Steps Necessary to
+ * Start SCRIPTS, with the exception of actually starting the
+ * script and setting up the synchronous transfer gunk.
+ */
+
+ NCR53c7x0_write8(ISTAT_REG_800, ISTAT_10_SRST);
+ NCR53c7x0_write8(ISTAT_REG_800, 0);
+ NCR53c7x0_write8(hostdata->dmode, hostdata->saved_dmode & ~DMODE_MAN);
+
+
+ /*
+ * Respond to reselection by targets and use our _initiator_ SCSI ID
+ * for arbitration. If notyet, also respond to SCSI selection.
+ *
+ * XXX - Note : we must reprogram this when reselecting as
+ * a target.
+ */
+
+#ifdef notyet
+ NCR53c7x0_write8(SCID_REG, (host->this_id & 7)|SCID_800_RRE|SCID_800_SRE);
+#else
+ NCR53c7x0_write8(SCID_REG, (host->this_id & 7)|SCID_800_RRE);
+#endif
+ NCR53c7x0_write8(RESPID_REG_800, hostdata->this_id_mask);
+
+ /*
+ * Use a maximum (1.6) second handshake to handshake timeout,
+ * and SCSI recommended .5s selection timeout.
+ */
+
+ /*
+ * The new gcc won't recognize preprocessing directives
+ * within macro args.
+ */
+#if 0
+ NCR53c7x0_write8(STIME0_REG_800,
+ ((selection_timeout << STIME0_800_SEL_SHIFT) & STIME0_800_SEL_MASK)
+ | ((15 << STIME0_800_HTH_SHIFT) & STIME0_800_HTH_MASK));
+#else
+/* Disable HTH interrupt */
+ NCR53c7x0_write8(STIME0_REG_800,
+ ((selection_timeout << STIME0_800_SEL_SHIFT) & STIME0_800_SEL_MASK));
+#endif
+
+
+ /*
+ * Enable active negation for happy synchronous transfers.
+ */
+
+ NCR53c7x0_write8(STEST3_REG_800, STEST3_800_TE);
+
+ /*
+ * Enable all interrupts, except parity which we only want when
+ * the user requests it.
+ */
+
+ NCR53c7x0_write8(DIEN_REG, DIEN_800_MDPE | DIEN_800_BF |
+ DIEN_ABRT | DIEN_SSI | DIEN_SIR | DIEN_800_IID);
+
+
+ NCR53c7x0_write8(SIEN0_REG_800, ((hostdata->options & OPTION_PARITY) ?
+ SIEN_PAR : 0) | SIEN_RST | SIEN_UDC | SIEN_SGE | SIEN_MA);
+ NCR53c7x0_write8(SIEN1_REG_800, SIEN1_800_STO | SIEN1_800_HTH);
+
+ /*
+ * Use saved clock frequency divisor and scripts loaded in 16 bit
+ * mode flags from the saved dcntl.
+ */
+
+ NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl);
+ NCR53c7x0_write8(CTEST4_REG_800, hostdata->saved_ctest4);
+
+ /* Enable active negation */
+ NCR53c7x0_write8(STEST3_REG_800, STEST3_800_TE);
+}
+
+/*
+ * Function static struct NCR53c7x0_cmd *allocate_cmd (Scsi_Cmnd *cmd)
+ *
+ * Purpose : Return the first free NCR53c7x0_cmd structure (which are
+ * reused in a LIFO manner to minimize cache thrashing).
+ *
+ * Side effects : If we haven't yet scheduled allocation of NCR53c7x0_cmd
+ * structures for this device, do so. Attempt to complete all scheduled
+ * allocations using kmalloc(), putting NCR53c7x0_cmd structures on
+ * the free list. Teach programmers not to drink and hack.
+ *
+ * Inputs : cmd - SCSI command
+ *
+ * Returns : NCR53c7x0_cmd structure allocated on behalf of cmd;
+ * NULL on failure.
+ */
+
+static struct NCR53c7x0_cmd *
+allocate_cmd (Scsi_Cmnd *cmd) {
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ void *real; /* Real address */
+ int size; /* Size of *tmp */
+ struct NCR53c7x0_cmd *tmp;
+ unsigned long flags;
+
+ if (hostdata->options & OPTION_DEBUG_ALLOCATION)
+ printk ("scsi%d : num_cmds = %d, can_queue = %d\n"
+ " target = %d, lun = %d, %s\n",
+ host->host_no, hostdata->num_cmds, host->can_queue,
+ cmd->target, cmd->lun, (hostdata->cmd_allocated[cmd->target] &
+ (1 << cmd->lun)) ? "already allocated" : "not allocated");
+
+/*
+ * If we have not yet reserved commands for this I_T_L nexus, and
+ * the device exists (as indicated by permanent Scsi_Cmnd structures
+ * being allocated under 1.3.x, or being outside of scan_scsis in
+ * 1.2.x), do so now.
+ */
+ if (!(hostdata->cmd_allocated[cmd->target] & (1 << cmd->lun)) &&
+#ifdef LINUX_1_2
+ !in_scan_scsis
+#else
+ cmd->device && cmd->device->has_cmdblocks
+#endif
+ ) {
+ if ((hostdata->extra_allocate + hostdata->num_cmds) < host->can_queue)
+ hostdata->extra_allocate += host->cmd_per_lun;
+ hostdata->cmd_allocated[cmd->target] |= (1 << cmd->lun);
+ }
+
+ for (; hostdata->extra_allocate > 0 ; --hostdata->extra_allocate,
+ ++hostdata->num_cmds) {
+ /* historically, kmalloc has returned unaligned addresses; pad so we
+ have enough room to ROUNDUP */
+ size = hostdata->max_cmd_size + sizeof (void *);
+/* FIXME: for ISA bus '7xx chips, we need to or GFP_DMA in here */
+ real = kmalloc (size, GFP_ATOMIC);
+ if (!real) {
+ if (hostdata->options & OPTION_DEBUG_ALLOCATION)
+ printk ("scsi%d : kmalloc(%d) failed\n",
+ host->host_no, size);
+ break;
+ }
+ tmp = ROUNDUP(real, void *);
+ tmp->real = real;
+ tmp->size = size;
+#ifdef LINUX_1_2
+ tmp->free = ((void (*)(void *, int)) kfree_s);
+#else
+ tmp->free = ((void (*)(void *, int)) kfree);
+#endif
+ save_flags (flags);
+ cli();
+ tmp->next = hostdata->free;
+ hostdata->free = tmp;
+ restore_flags (flags);
+ }
+ save_flags(flags);
+ cli();
+ tmp = (struct NCR53c7x0_cmd *) hostdata->free;
+ if (tmp) {
+ hostdata->free = tmp->next;
+ }
+ restore_flags(flags);
+ if (!tmp)
+ printk ("scsi%d : can't allocate command for target %d lun %d\n",
+ host->host_no, cmd->target, cmd->lun);
+ return tmp;
+}
+
+/*
+ * Function static struct NCR53c7x0_cmd *create_cmd (Scsi_Cmnd *cmd)
+ *
+ *
+ * Purpose : allocate a NCR53c7x0_cmd structure, initialize it based on the
+ * Scsi_Cmnd structure passed in cmd, including dsa and Linux field
+ * initialization, and dsa code relocation.
+ *
+ * Inputs : cmd - SCSI command
+ *
+ * Returns : NCR53c7x0_cmd structure corresponding to cmd,
+ * NULL on failure.
+ */
+
+static struct NCR53c7x0_cmd *
+create_cmd (Scsi_Cmnd *cmd) {
+ NCR53c7x0_local_declare();
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_cmd *tmp; /* NCR53c7x0_cmd structure for this command */
+ int datain, /* Number of instructions per phase */
+ dataout;
+ int data_transfer_instructions, /* Count of dynamic instructions */
+ i; /* Counter */
+ u32 *cmd_datain, /* Address of datain/dataout code */
+ *cmd_dataout; /* Incremented as we assemble */
+#ifdef notyet
+ unsigned char *msgptr; /* Current byte in select message */
+ int msglen; /* Length of whole select message */
+#endif
+ unsigned long flags;
+ NCR53c7x0_local_setup(cmd->host);
+
+ if (!(tmp = allocate_cmd (cmd)))
+ return NULL;
+
+
+ /*
+ * Decide whether we need to generate commands for DATA IN,
+ * DATA OUT, neither, or both based on the SCSI command
+ */
+
+ switch (cmd->cmnd[0]) {
+ /* These commands do DATA IN */
+ case INQUIRY:
+ case MODE_SENSE:
+ case READ_6:
+ case READ_10:
+ case READ_CAPACITY:
+ case REQUEST_SENSE:
+ datain = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
+ dataout = 0;
+ break;
+ /* These commands do DATA OUT */
+ case MODE_SELECT:
+ case WRITE_6:
+ case WRITE_10:
+ case START_STOP: /* also SCAN, which may do DATA OUT */
+#if 0
+ printk("scsi%d : command is ", host->host_no);
+ print_command(cmd->cmnd);
+#endif
+#if 0
+ printk ("scsi%d : %d scatter/gather segments\n", host->host_no,
+ cmd->use_sg);
+#endif
+ datain = 0;
+ dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
+#if 0
+ hostdata->options |= OPTION_DEBUG_INTR;
+#endif
+ break;
+ /*
+ * These commands do no data transfer, we should force an
+ * interrupt if a data phase is attempted on them.
+ */
+ case TEST_UNIT_READY:
+ datain = dataout = 0;
+ break;
+ /*
+ * We don't know about these commands, so generate code to handle
+ * both DATA IN and DATA OUT phases.
+ */
+ default:
+ datain = dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
+ }
+
+ /*
+ * New code : so that active pointers work correctly regardless
+ * of where the saved data pointer is at, we want to immediately
+ * enter the dynamic code after selection, and on a non-data
+ * phase perform a CALL to the non-data phase handler, with
+ * returns back to this address.
+ *
+ * If a phase mismatch is encountered in the middle of a
+ * Block MOVE instruction, we want to _leave_ that instruction
+ * unchanged as the current case is, modify a temporary buffer,
+ * and point the active pointer (TEMP) at that.
+ *
+ * Furthermore, we want to implement a saved data pointer,
+ * set by the SAVE_DATA_POINTERs message.
+ *
+ * So, the data transfer segments will change to
+ * CALL data_transfer, WHEN NOT data phase
+ * MOVE x, x, WHEN data phase
+ * ( repeat )
+ * JUMP other_transfer
+ */
+
+ data_transfer_instructions = datain + dataout;
+
+ /*
+ * When we perform a request sense, we overwrite various things,
+ * including the data transfer code. Make sure we have enough
+ * space to do that.
+ */
+
+ if (data_transfer_instructions < 2)
+ data_transfer_instructions = 2;
+
+
+ /*
+ * The saved data pointer is set up so that a RESTORE POINTERS message
+ * will start the data transfer over at the beginning.
+ */
+
+ tmp->saved_data_pointer = virt_to_bus (hostdata->script) +
+ hostdata->E_data_transfer;
+
+ /*
+ * Initialize Linux specific fields.
+ */
+
+ tmp->cmd = cmd;
+ tmp->next = NULL;
+ tmp->flags = 0;
+ tmp->dsa_next_addr = virt_to_bus(tmp->dsa) + hostdata->dsa_next -
+ hostdata->dsa_start;
+ tmp->dsa_addr = virt_to_bus(tmp->dsa) - hostdata->dsa_start;
+
+ /*
+ * Calculate addresses of dynamic code to fill in DSA
+ */
+
+ tmp->data_transfer_start = tmp->dsa + (hostdata->dsa_end -
+ hostdata->dsa_start) / sizeof(u32);
+ tmp->data_transfer_end = tmp->data_transfer_start +
+ 2 * data_transfer_instructions;
+
+ cmd_datain = datain ? tmp->data_transfer_start : NULL;
+ cmd_dataout = dataout ? (datain ? cmd_datain + 2 * datain : tmp->
+ data_transfer_start) : NULL;
+
+ /*
+ * Fill in the NCR53c7x0_cmd structure as follows
+ * dsa, with fixed up DSA code
+ * datain code
+ * dataout code
+ */
+
+ /* Copy template code into dsa and perform all necessary fixups */
+ if (hostdata->dsa_fixup)
+ hostdata->dsa_fixup(tmp);
+
+ patch_dsa_32(tmp->dsa, dsa_next, 0, 0);
+ patch_dsa_32(tmp->dsa, dsa_cmnd, 0, virt_to_bus(cmd));
+
+ if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS)
+ if (hostdata->sync[cmd->target].select_indirect !=
+ ((hostdata->sync[cmd->target].scntl3_sanity << 24) |
+ (cmd->target << 16) |
+ (hostdata->sync[cmd->target].sxfer_sanity << 8))) {
+ printk ("scsi%d : sanity check failed select_indirect=0x%x\n",
+ host->host_no, hostdata->sync[cmd->target].select_indirect);
+ FATAL(host);
+
+ }
+
+ patch_dsa_32(tmp->dsa, dsa_select, 0, hostdata->sync[cmd->target].
+ select_indirect);
+ /*
+ * Right now, we'll do the WIDE and SYNCHRONOUS negotiations on
+ * different commands; although it should be trivial to do them
+ * both at the same time.
+ */
+ if (hostdata->initiate_wdtr & (1 << cmd->target)) {
+ memcpy ((void *) (tmp->select + 1), (void *) wdtr_message,
+ sizeof(wdtr_message));
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(wdtr_message));
+ save_flags(flags);
+ cli();
+ hostdata->initiate_wdtr &= ~(1 << cmd->target);
+ restore_flags(flags);
+ } else if (hostdata->initiate_sdtr & (1 << cmd->target)) {
+ memcpy ((void *) (tmp->select + 1), (void *) sdtr_message,
+ sizeof(sdtr_message));
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(sdtr_message));
+ tmp->flags |= CMD_FLAG_SDTR;
+ save_flags(flags);
+ cli();
+ hostdata->initiate_sdtr &= ~(1 << cmd->target);
+ restore_flags(flags);
+
+ }
+#if 1
+ else if (!(hostdata->talked_to & (1 << cmd->target)) &&
+ !(hostdata->options & OPTION_NO_ASYNC)) {
+ memcpy ((void *) (tmp->select + 1), (void *) async_message,
+ sizeof(async_message));
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(async_message));
+ tmp->flags |= CMD_FLAG_SDTR;
+ }
+#endif
+ else
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1);
+ hostdata->talked_to |= (1 << cmd->target);
+ tmp->select[0] = (hostdata->options & OPTION_DISCONNECT) ?
+ IDENTIFY (1, cmd->lun) : IDENTIFY (0, cmd->lun);
+ patch_dsa_32(tmp->dsa, dsa_msgout, 1, virt_to_bus(tmp->select));
+ patch_dsa_32(tmp->dsa, dsa_cmdout, 0, cmd->cmd_len);
+ patch_dsa_32(tmp->dsa, dsa_cmdout, 1, virt_to_bus(cmd->cmnd));
+ patch_dsa_32(tmp->dsa, dsa_dataout, 0, cmd_dataout ?
+ virt_to_bus (cmd_dataout)
+ : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
+ patch_dsa_32(tmp->dsa, dsa_datain, 0, cmd_datain ?
+ virt_to_bus (cmd_datain)
+ : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
+ /*
+ * XXX - need to make endian aware, should use separate variables
+ * for both status and message bytes.
+ */
+ patch_dsa_32(tmp->dsa, dsa_msgin, 0, 1);
+/*
+ * FIXME : these only works for little endian. We probably want to
+ * provide message and status fields in the NCR53c7x0_cmd
+ * structure, and assign them to cmd->result when we're done.
+ */
+ patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&cmd->result) + 1);
+ patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
+ patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&cmd->result));
+ patch_dsa_32(tmp->dsa, dsa_msgout_other, 0, 1);
+ patch_dsa_32(tmp->dsa, dsa_msgout_other, 1,
+ virt_to_bus(&(hostdata->NCR53c7xx_msg_nop)));
+
+ /*
+ * Generate code for zero or more of the DATA IN, DATA OUT phases
+ * in the format
+ *
+ * CALL data_transfer, WHEN NOT phase
+ * MOVE first buffer length, first buffer address, WHEN phase
+ * ...
+ * MOVE last buffer length, last buffer address, WHEN phase
+ * JUMP other_transfer
+ */
+
+/*
+ * See if we're getting to data transfer by generating an unconditional
+ * interrupt.
+ */
+#if 0
+ if (datain) {
+ cmd_datain[0] = 0x98080000;
+ cmd_datain[1] = 0x03ffd00d;
+ cmd_datain += 2;
+ }
+#endif
+
+/*
+ * XXX - I'm undecided whether all of this nonsense is faster
+ * in the long run, or whether I should just go and implement a loop
+ * on the NCR chip using table indirect mode?
+ *
+ * In any case, this is how it _must_ be done for 53c700/700-66 chips,
+ * so this stays even when we come up with something better.
+ *
+ * When we're limited to 1 simultaneous command, no overlapping processing,
+ * we're seeing 630K/sec, with 7% CPU usage on a slow Syquest 45M
+ * drive.
+ *
+ * Not bad, not good. We'll see.
+ */
+
+ for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
+ cmd_dataout += 4, ++i) {
+ u32 buf = cmd->use_sg ?
+ virt_to_bus(((struct scatterlist *)cmd->buffer)[i].address) :
+ virt_to_bus(cmd->request_buffer);
+ u32 count = cmd->use_sg ?
+ ((struct scatterlist *)cmd->buffer)[i].length :
+ cmd->request_bufflen;
+
+ if (datain) {
+ /* CALL other_in, WHEN NOT DATA_IN */
+ cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
+ DCMD_TCI_IO) << 24) |
+ DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
+ cmd_datain[1] = virt_to_bus (hostdata->script) +
+ hostdata->E_other_in;
+ /* MOVE count, buf, WHEN DATA_IN */
+ cmd_datain[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I | DCMD_BMI_IO)
+ << 24) | count;
+ cmd_datain[3] = buf;
+#if 0
+ print_insn (host, cmd_datain, "dynamic ", 1);
+ print_insn (host, cmd_datain + 2, "dynamic ", 1);
+#endif
+ }
+ if (dataout) {
+ /* CALL other_out, WHEN NOT DATA_OUT */
+ cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL) << 24) |
+ DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
+ cmd_dataout[1] = virt_to_bus(hostdata->script) +
+ hostdata->E_other_out;
+ /* MOVE count, buf, WHEN DATA+OUT */
+ cmd_dataout[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I) << 24)
+ | count;
+ cmd_dataout[3] = buf;
+#if 0
+ print_insn (host, cmd_dataout, "dynamic ", 1);
+ print_insn (host, cmd_dataout + 2, "dynamic ", 1);
+#endif
+ }
+ }
+
+ /*
+ * Install JUMP instructions after the data transfer routines to return
+ * control to the do_other_transfer routines.
+ */
+
+
+ if (datain) {
+ cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
+ DBC_TCI_TRUE;
+ cmd_datain[1] = virt_to_bus(hostdata->script) +
+ hostdata->E_other_transfer;
+#if 0
+ print_insn (host, cmd_datain, "dynamic jump ", 1);
+#endif
+ cmd_datain += 2;
+ }
+#if 0
+ if (datain) {
+ cmd_datain[0] = 0x98080000;
+ cmd_datain[1] = 0x03ffdeed;
+ cmd_datain += 2;
+ }
+#endif
+ if (dataout) {
+ cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
+ DBC_TCI_TRUE;
+ cmd_dataout[1] = virt_to_bus(hostdata->script) +
+ hostdata->E_other_transfer;
+#if 0
+ print_insn (host, cmd_dataout, "dynamic jump ", 1);
+#endif
+ cmd_dataout += 2;
+ }
+ return tmp;
+}
+
+/*
+ * Function : int NCR53c7xx_queue_command (Scsi_Cmnd *cmd,
+ * void (*done)(Scsi_Cmnd *))
+ *
+ * Purpose : enqueues a SCSI command
+ *
+ * Inputs : cmd - SCSI command, done - function called on completion, with
+ * a pointer to the command descriptor.
+ *
+ * Returns : 0
+ *
+ * Side effects :
+ * cmd is added to the per instance driver issue_queue, with major
+ * twiddling done to the host specific fields of cmd. If the
+ * process_issue_queue coroutine isn't running, it is restarted.
+ *
+ * NOTE : we use the host_scribble field of the Scsi_Cmnd structure to
+ * hold our own data, and pervert the ptr field of the SCp field
+ * to create a linked list.
+ */
+
+int
+NCR53c7xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *)) {
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ unsigned long flags;
+ Scsi_Cmnd *tmp;
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.buffer = NULL;
+
+ save_flags(flags);
+ cli();
+ if ((hostdata->options & (OPTION_DEBUG_INIT_ONLY|OPTION_DEBUG_PROBE_ONLY))
+ || ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
+ !(hostdata->debug_lun_limit[cmd->target] & (1 << cmd->lun)))
+#ifdef LINUX_1_2
+ || cmd->target > 7
+#else
+ || cmd->target > host->max_id
+#endif
+ || cmd->target == host->this_id
+ || hostdata->state == STATE_DISABLED) {
+ printk("scsi%d : disabled or bad target %d lun %d\n", host->host_no,
+ cmd->target, cmd->lun);
+ cmd->result = (DID_BAD_TARGET << 16);
+ } else if ((hostdata->options & OPTION_DEBUG_NCOMMANDS_LIMIT) &&
+ (hostdata->debug_count_limit == 0)) {
+ printk("scsi%d : maximum commands exceeded\n", host->host_no);
+ cmd->result = (DID_BAD_TARGET << 16);
+ cmd->result = (DID_BAD_TARGET << 16);
+ } else if (hostdata->options & OPTION_DEBUG_READ_ONLY) {
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n",
+ host->host_no);
+ cmd->result = (DID_BAD_TARGET << 16);
+ }
+ } else {
+ if ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
+ hostdata->debug_count_limit != -1)
+ --hostdata->debug_count_limit;
+ restore_flags (flags);
+ cmd->result = 0xffff; /* The NCR will overwrite message
+ and status with valid data */
+ cmd->host_scribble = (unsigned char *) (tmp = create_cmd (cmd));
+ }
+ cli();
+ /*
+ * REQUEST SENSE commands are inserted at the head of the queue
+ * so that we do not clear the contingent allegiance condition
+ * they may be looking at.
+ */
+
+ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ cmd->SCp.ptr = (unsigned char *) hostdata->issue_queue;
+ hostdata->issue_queue = cmd;
+ } else {
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->SCp.ptr;
+ tmp = (Scsi_Cmnd *) tmp->SCp.ptr);
+ tmp->SCp.ptr = (unsigned char *) cmd;
+ }
+ restore_flags (flags);
+ run_process_issue_queue();
+ return 0;
+}
+
+/*
+ * Function : void to_schedule_list (struct Scsi_Host *host,
+ * struct NCR53c7x0_hostdata * hostdata, Scsi_Cmnd *cmd)
+ *
+ * Purpose : takes a SCSI command which was just removed from the
+ * issue queue, and deals with it by inserting it in the first
+ * free slot in the schedule list or by terminating it immediately.
+ *
+ * Inputs :
+ * host - SCSI host adapter; hostdata - hostdata structure for
+ * this adapter; cmd - a pointer to the command; should have
+ * the host_scribble field initialized to point to a valid
+ *
+ * Side effects :
+ * cmd is added to the per instance schedule list, with minor
+ * twiddling done to the host specific fields of cmd.
+ *
+ */
+
+static __inline__ void
+to_schedule_list (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
+ struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ Scsi_Cmnd *tmp = cmd->cmd;
+ unsigned long flags;
+ /* dsa start is negative, so subtraction is used */
+ volatile u32 *current;
+
+ int i;
+ NCR53c7x0_local_setup(host);
+#if 0
+ printk("scsi%d : new dsa is 0x%lx (virt 0x%p)\n", host->host_no,
+ virt_to_bus(dsa), dsa);
+#endif
+
+ save_flags(flags);
+ cli();
+
+ /*
+ * Work around race condition : if an interrupt fired and we
+ * got disabled forget about this command.
+ */
+
+ if (hostdata->state == STATE_DISABLED) {
+ printk("scsi%d : driver disabled\n", host->host_no);
+ tmp->result = (DID_BAD_TARGET << 16);
+ cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
+ hostdata->free = cmd;
+ tmp->scsi_done(tmp);
+ restore_flags (flags);
+ return;
+ }
+
+ for (i = host->can_queue, current = hostdata->schedule;
+ i > 0 && current[0] != hostdata->NOP_insn;
+ --i, current += 2 /* JUMP instructions are two words */);
+
+ if (i > 0) {
+ ++hostdata->busy[tmp->target][tmp->lun];
+ cmd->next = hostdata->running_list;
+ hostdata->running_list = cmd;
+
+ /* Restore this instruction to a NOP once the command starts */
+ cmd->dsa [(hostdata->dsa_jump_dest - hostdata->dsa_start) /
+ sizeof(u32)] = (u32) virt_to_bus ((void *)current);
+ /* Replace the current jump operand. */
+ current[1] =
+ virt_to_bus ((void *) cmd->dsa) + hostdata->E_dsa_code_begin -
+ hostdata->E_dsa_code_template;
+ /* Replace the NOP instruction with a JUMP */
+ current[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) |
+ DBC_TCI_TRUE;
+ } else {
+ printk ("scsi%d: no free slot\n", host->host_no);
+ disable(host);
+ tmp->result = (DID_ERROR << 16);
+ cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
+ hostdata->free = cmd;
+ tmp->scsi_done(tmp);
+ restore_flags (flags);
+ return;
+ }
+
+ /*
+ * If the NCR chip is in an idle state, start it running the scheduler
+ * immediately. Otherwise, signal the chip to jump to schedule as
+ * soon as it is idle.
+ */
+ if (hostdata->idle) {
+ hostdata->idle = 0;
+ hostdata->state = STATE_RUNNING;
+ NCR53c7x0_write32 (DSP_REG, virt_to_bus ((void *)hostdata->schedule));
+ } else {
+ NCR53c7x0_write8(hostdata->istat, ISTAT_10_SIGP);
+ }
+
+ restore_flags(flags);
+}
+
+/*
+ * Function : busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata
+ * *hostdata, Scsi_Cmnd *cmd)
+ *
+ * Purpose : decide if we can pass the given SCSI command on to the
+ * device in question or not.
+ *
+ * Returns : non-zero when we're busy, 0 when we aren't.
+ */
+
+static __inline__ int
+busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
+ Scsi_Cmnd *cmd) {
+ /* FIXME : in the future, this needs to accommodate SCSI-II tagged
+ queuing, and we may be able to play with fairness here a bit.
+ */
+ return hostdata->busy[cmd->target][cmd->lun];
+}
+
+/*
+ * Function : process_issue_queue (void)
+ *
+ * Purpose : transfer commands from the issue queue to NCR start queue
+ * of each NCR53c7/8xx in the system, avoiding kernel stack
+ * overflows when the scsi_done() function is invoked recursively.
+ *
+ * NOTE : process_issue_queue exits with interrupts *disabled*, so the
+ * caller must reenable them if it desires.
+ *
+ * NOTE : process_issue_queue should be called from both
+ * NCR53c7x0_queue_command() and from the interrupt handler
+ * after command completion in case NCR53c7x0_queue_command()
+ * isn't invoked again but we've freed up resources that are
+ * needed.
+ */
+
+static void
+process_issue_queue (unsigned long flags) {
+ Scsi_Cmnd *tmp, *prev;
+ struct Scsi_Host *host;
+ struct NCR53c7x0_hostdata *hostdata;
+ int done;
+
+ /*
+ * We run (with interrupts disabled) until we're sure that none of
+ * the host adapters have anything that can be done, at which point
+ * we set process_issue_queue_running to 0 and exit.
+ *
+ * Interrupts are enabled before doing various other internal
+ * instructions, after we've decided that we need to run through
+ * the loop again.
+ *
+ */
+
+ do {
+ cli(); /* Freeze request queues */
+ done = 1;
+ for (host = first_host; host && host->hostt == the_template;
+ host = host->next) {
+ hostdata = (struct NCR53c7x0_hostdata *) host->hostdata;
+ cli();
+ if (hostdata->issue_queue) {
+ if (hostdata->state == STATE_DISABLED) {
+ tmp = (Scsi_Cmnd *) hostdata->issue_queue;
+ hostdata->issue_queue = (Scsi_Cmnd *) tmp->SCp.ptr;
+ tmp->result = (DID_BAD_TARGET << 16);
+ if (tmp->host_scribble) {
+ ((struct NCR53c7x0_cmd *)tmp->host_scribble)->next =
+ hostdata->free;
+ hostdata->free =
+ (struct NCR53c7x0_cmd *)tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ }
+ tmp->scsi_done (tmp);
+ done = 0;
+ } else
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
+ prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *)
+ tmp->SCp.ptr)
+ if (!tmp->host_scribble ||
+ !busyp (host, hostdata, tmp)) {
+ if (prev)
+ prev->SCp.ptr = tmp->SCp.ptr;
+ else
+ hostdata->issue_queue = (Scsi_Cmnd *)
+ tmp->SCp.ptr;
+ tmp->SCp.ptr = NULL;
+ if (tmp->host_scribble) {
+ if (hostdata->options & OPTION_DEBUG_QUEUES)
+ printk ("scsi%d : moving command for target %d lun %d to start list\n",
+ host->host_no, tmp->target, tmp->lun);
+
+
+ to_schedule_list (host, hostdata,
+ (struct NCR53c7x0_cmd *)
+ tmp->host_scribble);
+ } else {
+ if (((tmp->result & 0xff) == 0xff) ||
+ ((tmp->result & 0xff00) == 0xff00)) {
+ printk ("scsi%d : danger Will Robinson!\n",
+ host->host_no);
+ tmp->result = DID_ERROR << 16;
+ disable (host);
+ }
+ tmp->scsi_done(tmp);
+ }
+ done = 0;
+ } /* if target/lun is not busy */
+ } /* if hostdata->issue_queue */
+ if (!done)
+ restore_flags (flags);
+ } /* for host */
+ } while (!done);
+ process_issue_queue_running = 0;
+}
+
+/*
+ * Function : static void intr_scsi (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : handle all SCSI interrupts, indicated by the setting
+ * of the SIP bit in the ISTAT register.
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ */
+
+static void
+intr_scsi (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ unsigned char sstat0_sist0, sist1, /* Registers */
+ fatal; /* Did a fatal interrupt
+ occur ? */
+
+ int is_8xx_chip;
+ NCR53c7x0_local_setup(host);
+
+ fatal = 0;
+
+ is_8xx_chip = ((unsigned) (hostdata->chip - 800)) < 100;
+ if (is_8xx_chip) {
+ sstat0_sist0 = NCR53c7x0_read8(SIST0_REG_800);
+ udelay(1);
+ sist1 = NCR53c7x0_read8(SIST1_REG_800);
+ } else {
+ sstat0_sist0 = NCR53c7x0_read8(SSTAT0_REG);
+ sist1 = 0;
+ }
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : SIST0 0x%0x, SIST1 0x%0x\n", host->host_no,
+ sstat0_sist0, sist1);
+
+ /* 250ms selection timeout */
+ if ((is_8xx_chip && (sist1 & SIST1_800_STO)) ||
+ (!is_8xx_chip && (sstat0_sist0 & SSTAT0_700_STO))) {
+ fatal = 1;
+ if (hostdata->options & OPTION_DEBUG_INTR) {
+ printk ("scsi%d : Selection Timeout\n", host->host_no);
+ if (cmd) {
+ printk("scsi%d : target %d, lun %d, command ",
+ host->host_no, cmd->cmd->target, cmd->cmd->lun);
+ print_command (cmd->cmd->cmnd);
+ printk("scsi%d : dsp = 0x%x (virt 0x%p)\n", host->host_no,
+ NCR53c7x0_read32(DSP_REG),
+ bus_to_virt(NCR53c7x0_read32(DSP_REG)));
+ } else {
+ printk("scsi%d : no command\n", host->host_no);
+ }
+ }
+/*
+ * XXX - question : how do we want to handle the Illegal Instruction
+ * interrupt, which may occur before or after the Selection Timeout
+ * interrupt?
+ */
+
+ if (1) {
+ hostdata->idle = 1;
+ hostdata->expecting_sto = 0;
+
+ if (hostdata->test_running) {
+ hostdata->test_running = 0;
+ hostdata->test_completed = 3;
+ } else if (cmd) {
+ abnormal_finished(cmd, DID_BAD_TARGET << 16);
+ }
+#if 0
+ hostdata->intrs = 0;
+#endif
+ }
+ }
+
+/*
+ * FIXME : in theory, we can also get a UDC when a STO occurs.
+ */
+ if (sstat0_sist0 & SSTAT0_UDC) {
+ fatal = 1;
+ if (cmd) {
+ printk("scsi%d : target %d lun %d unexpected disconnect\n",
+ host->host_no, cmd->cmd->target, cmd->cmd->lun);
+ print_lots (host);
+ abnormal_finished(cmd, DID_ERROR << 16);
+ } else
+ printk("scsi%d : unexpected disconnect (no command)\n",
+ host->host_no);
+
+ hostdata->dsp = (u32 *) hostdata->schedule;
+ hostdata->dsp_changed = 1;
+ }
+
+ /* SCSI PARITY error */
+ if (sstat0_sist0 & SSTAT0_PAR) {
+ fatal = 1;
+ if (cmd && cmd->cmd) {
+ printk("scsi%d : target %d lun %d parity error.\n",
+ host->host_no, cmd->cmd->target, cmd->cmd->lun);
+ abnormal_finished (cmd, DID_PARITY << 16);
+ } else
+ printk("scsi%d : parity error\n", host->host_no);
+ /* Should send message out, parity error */
+
+ /* XXX - Reduce synchronous transfer rate! */
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ /* SCSI GROSS error */
+ }
+
+ if (sstat0_sist0 & SSTAT0_SGE) {
+ fatal = 1;
+ printk("scsi%d : gross error\n", host->host_no);
+ /* Reset SCSI offset */
+ if ((hostdata->chip / 100) == 8) {
+ NCR53c7x0_write8 (STEST2_REG_800, STEST2_800_ROF);
+ }
+
+ /*
+ * A SCSI gross error may occur when we have
+ *
+ * - A synchronous offset which causes the SCSI FIFO to be overwritten.
+ *
+ * - A REQ which causes the maximum synchronous offset programmed in
+ * the SXFER register to be exceeded.
+ *
+ * - A phase change with an outstanding synchronous offset.
+ *
+ * - Residual data in the synchronous data FIFO, with a transfer
+ * other than a synchronous receive is started.$#
+ */
+
+
+ /* XXX Should deduce synchronous transfer rate! */
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ /* Phase mismatch */
+ }
+
+ if (sstat0_sist0 & SSTAT0_MA) {
+ fatal = 1;
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : SSTAT0_MA\n", host->host_no);
+ intr_phase_mismatch (host, cmd);
+ }
+
+#if 0
+ if (sstat0_sist0 & SIST0_800_RSL)
+ printk ("scsi%d : Oh no Mr. Bill!\n", host->host_no);
+#endif
+
+/*
+ * If a fatal SCSI interrupt occurs, we must insure that the DMA and
+ * SCSI FIFOs were flushed.
+ */
+
+ if (fatal) {
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+
+/* XXX - code check for 700/800 chips */
+ if (!(hostdata->dstat & DSTAT_DFE)) {
+ printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ printk ("scsi%d: Flushing DMA FIFO\n",
+ host->host_no);
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_FLF);
+ while (!((hostdata->dstat = NCR53c7x0_read8(DSTAT_REG)) &
+ DSTAT_DFE));
+ } else {
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_CLF);
+ while (NCR53c7x0_read8 (CTEST3_REG_800) & CTEST3_800_CLF);
+ }
+ hostdata->dstat |= DSTAT_DFE;
+ }
+ }
+}
+
+/*
+ * Function : static void NCR53c7x0_intr (int irq, void *dev_id, struct pt_regs * regs)
+ *
+ * Purpose : handle NCR53c7x0 interrupts for all NCR devices sharing
+ * the same IRQ line.
+ *
+ * Inputs : Since we're using the SA_INTERRUPT interrupt handler
+ * semantics, irq indicates the interrupt which invoked
+ * this handler.
+ */
+
+static void
+NCR53c7x0_intr (int irq, void *dev_id, struct pt_regs * regs) {
+ NCR53c7x0_local_declare();
+ struct Scsi_Host *host; /* Host we are looking at */
+ unsigned char istat; /* Values of interrupt regs */
+ struct NCR53c7x0_hostdata *hostdata; /* host->hostdata */
+ struct NCR53c7x0_cmd *cmd, /* command which halted */
+ **cmd_prev_ptr;
+ u32 *dsa; /* DSA */
+ int done = 1; /* Indicates when handler
+ should terminate */
+ int interrupted = 0; /* This HA generated
+ an interrupt */
+ int have_intfly; /* Don't print warning
+ messages when we stack
+ INTFLYs */
+ unsigned long flags;
+
+#ifdef NCR_DEBUG
+ char buf[80]; /* Debugging sprintf buffer */
+ size_t buflen; /* Length of same */
+#endif
+
+ do {
+ done = 1;
+ for (host = first_host; host; host = host->next)
+ if (host->hostt == the_template && host->irq == irq) {
+ NCR53c7x0_local_setup(host);
+
+ hostdata = (struct NCR53c7x0_hostdata *) host->hostdata;
+ hostdata->dsp_changed = 0;
+ interrupted = 0;
+ have_intfly = 0;
+
+ do {
+ int is_8xx_chip;
+
+ hostdata->dstat_valid = 0;
+ interrupted = 0;
+ /*
+ * Only read istat once, since reading it again will unstack
+ * interrupts?
+ */
+ istat = NCR53c7x0_read8(hostdata->istat);
+
+ /*
+ * INTFLY interrupts are used by the NCR53c720, NCR53c810,
+ * and NCR53c820 to signify completion of a command. Since
+ * the SCSI processor continues running, we can't just look
+ * at the contents of the DSA register and continue running.
+ */
+/* XXX - this is too big, offends my sense of aesthetics, and should
+ move to intr_intfly() */
+ is_8xx_chip = ((unsigned) (hostdata->chip - 800)) < 100;
+ if ((hostdata->options & OPTION_INTFLY) &&
+ (is_8xx_chip && (istat & ISTAT_800_INTF))) {
+ char search_found = 0; /* Got at least one ? */
+ done = 0;
+ interrupted = 1;
+
+ /*
+ * Clear the INTF bit by writing a one.
+ * This reset operation is self-clearing.
+ */
+ NCR53c7x0_write8(hostdata->istat, istat|ISTAT_800_INTF);
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : INTFLY\n", host->host_no);
+
+ /*
+ * Traverse our list of running commands, and look
+ * for those with valid (non-0xff ff) status and message
+ * bytes encoded in the result which signify command
+ * completion.
+ */
+
+
+ save_flags(flags);
+ cli();
+restart:
+ for (cmd_prev_ptr = (struct NCR53c7x0_cmd **)
+ &(hostdata->running_list), cmd =
+ (struct NCR53c7x0_cmd *) hostdata->running_list; cmd ;
+ cmd_prev_ptr = (struct NCR53c7x0_cmd **) &(cmd->next),
+ cmd = (struct NCR53c7x0_cmd *) cmd->next) {
+ Scsi_Cmnd *tmp;
+
+ if (!cmd) {
+ printk("scsi%d : very weird.\n", host->host_no);
+ break;
+ }
+
+ if (!(tmp = cmd->cmd)) {
+ printk("scsi%d : weird. NCR53c7x0_cmd has no Scsi_Cmnd\n",
+ host->host_no);
+ continue;
+ }
+#if 0
+ printk ("scsi%d : looking at result of 0x%x\n",
+ host->host_no, cmd->cmd->result);
+#endif
+
+ if (((tmp->result & 0xff) == 0xff) ||
+ ((tmp->result & 0xff00) == 0xff00))
+ continue;
+
+ search_found = 1;
+
+ /* Important - remove from list _before_ done is called */
+ if (cmd_prev_ptr)
+ *cmd_prev_ptr = (struct NCR53c7x0_cmd *) cmd->next;
+
+ --hostdata->busy[tmp->target][tmp->lun];
+ cmd->next = hostdata->free;
+ hostdata->free = cmd;
+
+ tmp->host_scribble = NULL;
+
+ if (hostdata->options & OPTION_DEBUG_INTR) {
+ printk ("scsi%d : command complete : pid %lu, id %d,lun %d result 0x%x ",
+ host->host_no, tmp->pid, tmp->target, tmp->lun, tmp->result);
+ print_command (tmp->cmnd);
+ }
+
+#if 0
+ hostdata->options &= ~OPTION_DEBUG_INTR;
+#endif
+ tmp->scsi_done(tmp);
+ goto restart;
+
+ }
+ restore_flags(flags);
+
+ /*
+ * I think that we're stacking INTFLY interrupts; taking care of
+ * all the finished commands on the first one, and then getting
+ * worried when we see the next one. The magic with have_intfly
+ * should tell if this is the case..
+ */
+
+ if (!search_found && !have_intfly) {
+ printk ("scsi%d : WARNING : INTFLY with no completed commands.\n",
+ host->host_no);
+ } else if (!have_intfly) {
+ have_intfly = 1;
+ run_process_issue_queue();
+ }
+ }
+
+ if (istat & (ISTAT_SIP|ISTAT_DIP)) {
+ done = 0;
+ interrupted = 1;
+ hostdata->state = STATE_HALTED;
+
+ if (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK)
+ printk ("scsi%d : SCSI FIFO not empty\n",
+ host->host_no);
+
+ /*
+ * NCR53c700 and NCR53c700-66 change the current SCSI
+ * process, hostdata->current, in the Linux driver so
+ * cmd = hostdata->current.
+ *
+ * With other chips, we must look through the commands
+ * executing and find the command structure which
+ * corresponds to the DSA register.
+ */
+
+ if (hostdata->options & OPTION_700) {
+ cmd = (struct NCR53c7x0_cmd *) hostdata->current;
+ } else {
+ dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+ for (cmd = (struct NCR53c7x0_cmd *)
+ hostdata->running_list; cmd &&
+ (dsa + (hostdata->dsa_start / sizeof(u32))) !=
+ cmd->dsa;
+ cmd = (struct NCR53c7x0_cmd *)(cmd->next));
+ }
+ if (hostdata->options & OPTION_DEBUG_INTR) {
+ if (cmd) {
+ printk("scsi%d : interrupt for pid %lu, id %d, lun %d ",
+ host->host_no, cmd->cmd->pid, (int) cmd->cmd->target,
+ (int) cmd->cmd->lun);
+ print_command (cmd->cmd->cmnd);
+ } else {
+ printk("scsi%d : no active command\n", host->host_no);
+ }
+ }
+
+ if (istat & ISTAT_SIP) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : ISTAT_SIP\n", host->host_no);
+ intr_scsi (host, cmd);
+ }
+
+ if (istat & ISTAT_DIP) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : ISTAT_DIP\n", host->host_no);
+ intr_dma (host, cmd);
+ }
+
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+
+ /* XXX - code check for 700/800 chips */
+ if (!(hostdata->dstat & DSTAT_DFE)) {
+ printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ printk ("scsi%d: Flushing DMA FIFO\n",
+ host->host_no);
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_FLF);
+ while (!((hostdata->dstat = NCR53c7x0_read8(DSTAT_REG)) &
+ DSTAT_DFE));
+ } else
+ {
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_CLF);
+ while (NCR53c7x0_read8 (CTEST3_REG_800) & CTEST3_800_CLF);
+ }
+ hostdata->dstat |= DSTAT_DFE;
+ }
+ }
+ } while (interrupted);
+
+
+
+ if (hostdata->intrs != -1)
+ hostdata->intrs++;
+#if 0
+ if (hostdata->intrs > 40) {
+ printk("scsi%d : too many interrupts, halting", host->host_no);
+ disable(host);
+ }
+#endif
+
+ if (!hostdata->idle && hostdata->state == STATE_HALTED) {
+ if (!hostdata->dsp_changed) {
+ hostdata->dsp = (u32 *)
+ bus_to_virt(NCR53c7x0_read32(DSP_REG));
+ }
+
+#if 0
+ printk("scsi%d : new dsp is 0x%lx (virt 0x%p)\n",
+ host->host_no, virt_to_bus(hostdata->dsp), hostdata->dsp);
+#endif
+
+ hostdata->state = STATE_RUNNING;
+ NCR53c7x0_write32 (DSP_REG, virt_to_bus(hostdata->dsp));
+ }
+ }
+ } while (!done);
+}
+
+
+/*
+ * Function : static int abort_connected (struct Scsi_Host *host)
+ *
+ * Purpose : Assuming that the NCR SCSI processor is currently
+ * halted, break the currently established nexus. Clean
+ * up of the NCR53c7x0_cmd and Scsi_Cmnd structures should
+ * be done on receipt of the abort interrupt.
+ *
+ * Inputs : host - SCSI host
+ *
+ */
+
+static int
+abort_connected (struct Scsi_Host *host) {
+#ifdef NEW_ABORT
+ NCR53c7x0_local_declare();
+#endif
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+/* FIXME : this probably should change for production kernels; at the
+ least, counter should move to a per-host structure. */
+ static int counter = 5;
+#ifdef NEW_ABORT
+ int sstat, phase, offset;
+ u32 *script;
+ NCR53c7x0_local_setup(host);
+#endif
+
+ if (--counter <= 0) {
+ disable(host);
+ return 0;
+ }
+
+ printk ("scsi%d : DANGER : abort_connected() called \n",
+ host->host_no);
+
+#ifdef NEW_ABORT
+
+/*
+ * New strategy : Rather than using a generic abort routine,
+ * we'll specifically try to source or sink the appropriate
+ * amount of data for the phase we're currently in (taking into
+ * account the current synchronous offset)
+ */
+
+ sstat = (NCR53c8x0_read8 ((chip / 100) == 8 ? SSTAT1_REG : SSTAT2_REG);
+ offset = OFFSET (sstat & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
+ phase = sstat & SSTAT2_PHASE_MASK;
+
+/*
+ * SET ATN
+ * MOVE source_or_sink, WHEN CURRENT PHASE
+ * < repeat for each outstanding byte >
+ * JUMP send_abort_message
+ */
+
+ script = hostdata->abort_script = kmalloc (
+ 8 /* instruction size */ * (
+ 1 /* set ATN */ +
+ (!offset ? 1 : offset) /* One transfer per outstanding byte */ +
+ 1 /* send abort message */),
+ GFP_ATOMIC);
+
+
+#else /* def NEW_ABORT */
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+#endif /* def NEW_ABORT */
+ hostdata->dsp_changed = 1;
+
+/* XXX - need to flag the command as aborted after the abort_connected
+ code runs
+ */
+ return 0;
+}
+
+/*
+ * Function : static int datapath_residual (Scsi_Host *host)
+ *
+ * Purpose : return residual data count of what's in the chip.
+ *
+ * Inputs : host - SCSI host
+ */
+
+static int
+datapath_residual (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int count, synchronous, sstat;
+ NCR53c7x0_local_setup(host);
+ /* COMPAT : the 700 and 700-66 need to use DFIFO_00_BO_MASK */
+ count = ((NCR53c7x0_read8 (DFIFO_REG) & DFIFO_10_BO_MASK) -
+ (NCR53c7x0_read32 (DBC_REG) & DFIFO_10_BO_MASK)) & DFIFO_10_BO_MASK;
+ synchronous = NCR53c7x0_read8 (SXFER_REG) & SXFER_MO_MASK;
+ /* COMPAT : DDIR is elsewhere on non-'8xx chips. */
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ /* Receive */
+ if (synchronous)
+ count += (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
+ else
+ if (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT0_REG : SSTAT1_REG) & SSTAT1_ILF)
+ ++count;
+ } else {
+ /* Send */
+ sstat = ((hostdata->chip / 100) == 8) ? NCR53c7x0_read8 (SSTAT0_REG) :
+ NCR53c7x0_read8 (SSTAT1_REG);
+ if (sstat & SSTAT1_OLF)
+ ++count;
+ if (synchronous && (sstat & SSTAT1_ORF))
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : static const char * sbcl_to_phase (int sbcl)_
+ *
+ * Purpose : Convert SBCL register to user-parsable phase representation
+ *
+ * Inputs : sbcl - value of sbcl register
+ */
+
+
+static const char *
+sbcl_to_phase (int sbcl) {
+ switch (sbcl & SBCL_PHASE_MASK) {
+ case SBCL_PHASE_DATAIN:
+ return "DATAIN";
+ case SBCL_PHASE_DATAOUT:
+ return "DATAOUT";
+ case SBCL_PHASE_MSGIN:
+ return "MSGIN";
+ case SBCL_PHASE_MSGOUT:
+ return "MSGOUT";
+ case SBCL_PHASE_CMDOUT:
+ return "CMDOUT";
+ case SBCL_PHASE_STATIN:
+ return "STATUSIN";
+ default:
+ return "unknown";
+ }
+}
+
+/*
+ * Function : static const char * sstat2_to_phase (int sstat)_
+ *
+ * Purpose : Convert SSTAT2 register to user-parsable phase representation
+ *
+ * Inputs : sstat - value of sstat register
+ */
+
+
+static const char *
+sstat2_to_phase (int sstat) {
+ switch (sstat & SSTAT2_PHASE_MASK) {
+ case SSTAT2_PHASE_DATAIN:
+ return "DATAIN";
+ case SSTAT2_PHASE_DATAOUT:
+ return "DATAOUT";
+ case SSTAT2_PHASE_MSGIN:
+ return "MSGIN";
+ case SSTAT2_PHASE_MSGOUT:
+ return "MSGOUT";
+ case SSTAT2_PHASE_CMDOUT:
+ return "CMDOUT";
+ case SSTAT2_PHASE_STATIN:
+ return "STATUSIN";
+ default:
+ return "unknown";
+ }
+}
+
+/*
+ * Function : static void intr_phase_mismatch (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : Handle phase mismatch interrupts
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ *
+ * Side effects : The abort_connected() routine is called or the NCR chip
+ * is restarted, jumping to the command_complete entry point, or
+ * patching the address and transfer count of the current instruction
+ * and calling the msg_in entry point as appropriate.
+ */
+
+static void
+intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ u32 dbc_dcmd, *dsp, *dsp_next;
+ unsigned char dcmd, sbcl;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int residual;
+ enum {ACTION_ABORT, ACTION_ABORT_PRINT, ACTION_CONTINUE} action =
+ ACTION_ABORT_PRINT;
+ const char *where = NULL;
+ NCR53c7x0_local_setup(host);
+
+ /*
+ * Corrective action is based on where in the SCSI SCRIPT(tm) the error
+ * occurred, as well as which SCSI phase we are currently in.
+ */
+ dsp_next = bus_to_virt(NCR53c7x0_read32(DSP_REG));
+
+ /*
+ * Fetch the current instruction, and remove the operands for easier
+ * interpretation.
+ */
+ dbc_dcmd = NCR53c7x0_read32(DBC_REG);
+ dcmd = (dbc_dcmd & 0xff000000) >> 24;
+ /*
+ * Like other processors, the NCR adjusts the instruction pointer before
+ * instruction decode. Set the DSP address back to what it should
+ * be for this instruction based on its size (2 or 3 32 bit words).
+ */
+ dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
+
+
+ /*
+ * Read new SCSI phase from the SBCL lines. Since all of our code uses
+ * a WHEN conditional instead of an IF conditional, we don't need to
+ * wait for a new REQ.
+ */
+ sbcl = NCR53c7x0_read8(SBCL_REG) & SBCL_PHASE_MASK;
+
+ if (!cmd) {
+ action = ACTION_ABORT_PRINT;
+ where = "no current command";
+ /*
+ * The way my SCSI SCRIPTS(tm) are architected, recoverable phase
+ * mismatches should only occur where we're doing a multi-byte
+ * BMI instruction. Specifically, this means
+ *
+ * - select messages (a SCSI-I target may ignore additional messages
+ * after the IDENTIFY; any target may reject a SDTR or WDTR)
+ *
+ * - command out (targets may send a message to signal an error
+ * condition, or go into STATUSIN after they've decided
+ * they don't like the command.
+ *
+ * - reply_message (targets may reject a multi-byte message in the
+ * middle)
+ *
+ * - data transfer routines (command completion with buffer space
+ * left, disconnect message, or error message)
+ */
+ } else if (((dsp >= cmd->data_transfer_start &&
+ dsp < cmd->data_transfer_end)) || dsp == (cmd->residual + 2)) {
+ if ((dcmd & (DCMD_TYPE_MASK|DCMD_BMI_OP_MASK|DCMD_BMI_INDIRECT|
+ DCMD_BMI_MSG|DCMD_BMI_CD)) == (DCMD_TYPE_BMI|
+ DCMD_BMI_OP_MOVE_I)) {
+ residual = datapath_residual (host);
+ if (hostdata->options & OPTION_DEBUG_DISCONNECT)
+ printk ("scsi%d : handling residual transfer (+ %d bytes from DMA FIFO)\n",
+ host->host_no, residual);
+
+ /*
+ * The first instruction is a CALL to the alternate handler for
+ * this data transfer phase, so we can do calls to
+ * munge_msg_restart as we would if control were passed
+ * from normal dynamic code.
+ */
+ if (dsp != cmd->residual + 2) {
+ cmd->residual[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
+ ((dcmd & DCMD_BMI_IO) ? DCMD_TCI_IO : 0)) << 24) |
+ DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
+ cmd->residual[1] = virt_to_bus(hostdata->script)
+ + ((dcmd & DCMD_BMI_IO)
+ ? hostdata->E_other_in : hostdata->E_other_out);
+ }
+
+ /*
+ * The second instruction is the a data transfer block
+ * move instruction, reflecting the pointer and count at the
+ * time of the phase mismatch.
+ */
+ cmd->residual[2] = dbc_dcmd + residual;
+ cmd->residual[3] = NCR53c7x0_read32(DNAD_REG) - residual;
+
+ /*
+ * The third and final instruction is a jump to the instruction
+ * which follows the instruction which had to be 'split'
+ */
+ if (dsp != cmd->residual + 2) {
+ cmd->residual[4] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP)
+ << 24) | DBC_TCI_TRUE;
+ cmd->residual[5] = virt_to_bus(dsp_next);
+ }
+
+ /*
+ * For the sake of simplicity, transfer control to the
+ * conditional CALL at the start of the residual buffer.
+ */
+ hostdata->dsp = cmd->residual;
+ hostdata->dsp_changed = 1;
+ action = ACTION_CONTINUE;
+ } else {
+ where = "non-BMI dynamic DSA code";
+ action = ACTION_ABORT_PRINT;
+ }
+ } else if (dsp == (hostdata->script + hostdata->E_select_msgout / 4)) {
+ /* Release ATN */
+ NCR53c7x0_write8 (SOCL_REG, 0);
+ switch (sbcl) {
+ /*
+ * Some devices (SQ555 come to mind) grab the IDENTIFY message
+ * sent on selection, and decide to go into COMMAND OUT phase
+ * rather than accepting the rest of the messages or rejecting
+ * them. Handle these devices gracefully.
+ */
+ case SBCL_PHASE_CMDOUT:
+ hostdata->dsp = dsp + 2 /* two _words_ */;
+ hostdata->dsp_changed = 1;
+ printk ("scsi%d : target %d ignored SDTR and went into COMMAND OUT\n",
+ host->host_no, cmd->cmd->target);
+ cmd->flags &= ~CMD_FLAG_SDTR;
+ action = ACTION_CONTINUE;
+ break;
+ case SBCL_PHASE_MSGIN:
+ hostdata->dsp = hostdata->script + hostdata->E_msg_in /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ action = ACTION_CONTINUE;
+ break;
+ default:
+ where="select message out";
+ action = ACTION_ABORT_PRINT;
+ }
+ /*
+ * Some SCSI devices will interpret a command as they read the bytes
+ * off the SCSI bus, and may decide that the command is Bogus before
+ * they've read the entire command off the bus.
+ */
+ } else if (dsp == hostdata->script + hostdata->E_cmdout_cmdout / sizeof
+ (u32)) {
+ hostdata->dsp = hostdata->script + hostdata->E_data_transfer /
+ sizeof (u32);
+ hostdata->dsp_changed = 1;
+ action = ACTION_CONTINUE;
+ /* FIXME : we need to handle message reject, etc. within msg_respond. */
+#ifdef notyet
+ } else if (dsp == hostdata->script + hostdata->E_reply_message) {
+ switch (sbcl) {
+ /* Any other phase mismatches abort the currently executing command. */
+#endif
+ } else {
+ where = "unknown location";
+ action = ACTION_ABORT_PRINT;
+ }
+
+ /* Flush DMA FIFO */
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+ if (!(hostdata->dstat & DSTAT_DFE)) {
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ printk ("scsi%d: Flushing DMA FIFO\n",
+ host->host_no);
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_FLF);
+ /* FIXME : what about stacked DMA interrupts? */
+ while (!((hostdata->dstat = NCR53c7x0_read8(DSTAT_REG)) &
+ DSTAT_DFE));
+ } else {
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_CLF);
+ while (NCR53c7x0_read8 (CTEST3_REG_800) & CTEST3_800_CLF);
+ }
+ hostdata->dstat |= DSTAT_DFE;
+ }
+
+ switch (action) {
+ case ACTION_ABORT_PRINT:
+ printk("scsi%d : %s : unexpected phase %s.\n",
+ host->host_no, where ? where : "unknown location",
+ sbcl_to_phase(sbcl));
+ print_lots (host);
+ /* Fall through to ACTION_ABORT */
+ case ACTION_ABORT:
+ abort_connected (host);
+ break;
+ case ACTION_CONTINUE:
+ break;
+ }
+
+#if 0
+ if (hostdata->dsp_changed) {
+ printk("scsi%d: new dsp 0x%p\n", host->host_no, hostdata->dsp);
+ print_insn (host, hostdata->dsp, "", 1);
+ }
+#endif
+
+}
+
+/*
+ * Function : static void intr_bf (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : handle BUS FAULT interrupts
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ */
+
+static void
+intr_bf (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 *dsp,
+ *next_dsp, /* Current dsp */
+ *dsa,
+ dbc_dcmd; /* DCMD (high eight bits) + DBC */
+ unsigned short pci_status;
+ int tmp;
+ unsigned long flags;
+ char *reason = NULL;
+ /* Default behavior is for a silent error, with a retry until we've
+ exhausted retries. */
+ enum {MAYBE, ALWAYS, NEVER} retry = MAYBE;
+ int report = 0;
+ NCR53c7x0_local_setup(host);
+
+ dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
+ next_dsp = bus_to_virt (NCR53c7x0_read32(DSP_REG));
+ dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
+/* FIXME - check chip type */
+ dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
+
+ /*
+ * Bus faults can be caused by either a Bad Address or
+ * Target Abort. We should check the Received Target Abort
+ * bit of the PCI status register and Master Abort Bit.
+ *
+ * - Master Abort bit indicates that no device claimed
+ * the address with DEVSEL within five clocks
+ *
+ * - Target Abort bit indicates that a target claimed it,
+ * but changed its mind once it saw the byte enables.
+ *
+ */
+
+ if ((hostdata->chip / 100) == 8) {
+ save_flags (flags);
+ cli();
+ tmp = pcibios_read_config_word (hostdata->pci_bus,
+ hostdata->pci_device_fn, PCI_STATUS, &pci_status);
+ restore_flags (flags);
+ if (tmp == PCIBIOS_SUCCESSFUL) {
+ if (pci_status & PCI_STATUS_REC_TARGET_ABORT) {
+ reason = "PCI target abort";
+ pci_status &= ~PCI_STATUS_REC_TARGET_ABORT;
+ } else if (pci_status & PCI_STATUS_REC_MASTER_ABORT) {
+ reason = "No device asserted PCI DEVSEL within five bus clocks";
+ pci_status &= ~PCI_STATUS_REC_MASTER_ABORT;
+ } else if (pci_status & PCI_STATUS_PARITY) {
+ report = 1;
+ pci_status &= ~PCI_STATUS_PARITY;
+ }
+ } else {
+ printk ("scsi%d : couldn't read status register : %s\n",
+ host->host_no, pcibios_strerror (tmp));
+ retry = NEVER;
+ }
+ }
+
+#ifndef notyet
+ report = 1;
+#endif
+ if (report && reason) {
+ printk(KERN_ALERT "scsi%d : BUS FAULT reason = %s\n",
+ host->host_no, reason ? reason : "unknown");
+ print_lots (host);
+ }
+
+#ifndef notyet
+ retry = NEVER;
+#endif
+
+ /*
+ * TODO : we should attempt to recover from any spurious bus
+ * faults. After X retries, we should figure that things are
+ * sufficiently wedged, and call NCR53c7xx_reset.
+ *
+ * This code should only get executed once we've decided that we
+ * cannot retry.
+ */
+
+ if (retry == NEVER) {
+ printk(KERN_ALERT " mail drew@PoohSticks.ORG\n");
+ FATAL (host);
+ }
+}
+
+/*
+ * Function : static void intr_dma (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : handle all DMA interrupts, indicated by the setting
+ * of the DIP bit in the ISTAT register.
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ */
+
+static void
+intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned char dstat; /* DSTAT */
+ u32 *dsp,
+ *next_dsp, /* Current dsp */
+ *dsa,
+ dbc_dcmd; /* DCMD (high eight bits) + DBC */
+ int tmp;
+ unsigned long flags;
+ NCR53c7x0_local_setup(host);
+
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+
+ dstat = hostdata->dstat;
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk("scsi%d : DSTAT=0x%x\n", host->host_no, (int) dstat);
+
+ dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
+ next_dsp = bus_to_virt(NCR53c7x0_read32(DSP_REG));
+ dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
+/* XXX - check chip type */
+ dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+
+ /*
+ * DSTAT_ABRT is the aborted interrupt. This is set whenever the
+ * SCSI chip is aborted.
+ *
+ * With NCR53c700 and NCR53c700-66 style chips, we should only
+ * get this when the chip is currently running the accept
+ * reselect/select code and we have set the abort bit in the
+ * ISTAT register.
+ *
+ */
+
+ if (dstat & DSTAT_ABRT) {
+#if 0
+ /* XXX - add code here to deal with normal abort */
+ if ((hostdata->options & OPTION_700) && (hostdata->state ==
+ STATE_ABORTING)) {
+ } else
+#endif
+ {
+ printk(KERN_ALERT "scsi%d : unexpected abort interrupt at\n"
+ " ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "s ", 1);
+ FATAL (host);
+ }
+ }
+
+ /*
+ * DSTAT_SSI is the single step interrupt. Should be generated
+ * whenever we have single stepped or are tracing.
+ */
+
+ if (dstat & DSTAT_SSI) {
+ if (hostdata->options & OPTION_DEBUG_TRACE) {
+ } else if (hostdata->options & OPTION_DEBUG_SINGLE) {
+ print_insn (host, dsp, "s ", 0);
+ save_flags(flags);
+ cli();
+/* XXX - should we do this, or can we get away with writing dsp? */
+
+ NCR53c7x0_write8 (DCNTL_REG, (NCR53c7x0_read8(DCNTL_REG) &
+ ~DCNTL_SSM) | DCNTL_STD);
+ restore_flags(flags);
+ } else {
+ printk(KERN_ALERT "scsi%d : unexpected single step interrupt at\n"
+ " ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "", 1);
+ printk(KERN_ALERT " mail drew@PoohSticks.ORG\n");
+ FATAL (host);
+ }
+ }
+
+ /*
+ * DSTAT_IID / DSTAT_OPC (same bit, same meaning, only the name
+ * is different) is generated whenever an illegal instruction is
+ * encountered.
+ *
+ * XXX - we may want to emulate INTFLY here, so we can use
+ * the same SCSI SCRIPT (tm) for NCR53c710 through NCR53c810
+ * chips.
+ */
+
+ if (dstat & DSTAT_OPC) {
+ /*
+ * Ascertain if this IID interrupts occurred before or after a STO
+ * interrupt. Since the interrupt handling code now leaves
+ * DSP unmodified until _after_ all stacked interrupts have been
+ * processed, reading the DSP returns the original DSP register.
+ * This means that if dsp lies between the select code, and
+ * message out following the selection code (where the IID interrupt
+ * would have to have occurred by due to the implicit wait for REQ),
+ * we have an IID interrupt resulting from a STO condition and
+ * can ignore it.
+ */
+
+ if (((dsp >= (hostdata->script + hostdata->E_select / sizeof(u32))) &&
+ (dsp <= (hostdata->script + hostdata->E_select_msgout /
+ sizeof(u32) + 8))) || (hostdata->test_running == 2)) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : ignoring DSTAT_IID for SSTAT_STO\n",
+ host->host_no);
+ if (hostdata->expecting_iid) {
+ hostdata->expecting_iid = 0;
+ hostdata->idle = 1;
+ if (hostdata->test_running == 2) {
+ hostdata->test_running = 0;
+ hostdata->test_completed = 3;
+ } else if (cmd)
+ abnormal_finished (cmd, DID_BAD_TARGET << 16);
+ } else {
+ hostdata->expecting_sto = 1;
+ }
+ /*
+ * We can't guarantee we'll be able to execute the WAIT DISCONNECT
+ * instruction within the 3.4us of bus free and arbitration delay
+ * that a target can RESELECT in and assert REQ after we've dropped
+ * ACK. If this happens, we'll get an illegal instruction interrupt.
+ * Doing away with the WAIT DISCONNECT instructions broke everything,
+ * so instead I'll settle for moving one WAIT DISCONNECT a few
+ * instructions closer to the CLEAR ACK before it to minimize the
+ * chances of this happening, and handle it if it occurs anyway.
+ *
+ * Simply continue with what we were doing, and control should
+ * be transfered to the schedule routine which will ultimately
+ * pass control onto the reselection or selection (not yet)
+ * code.
+ */
+ } else if (dbc_dcmd == 0x48000000 && (NCR53c7x0_read8 (SBCL_REG) &
+ SBCL_REQ)) {
+ if (!(hostdata->options & OPTION_NO_PRINT_RACE))
+ {
+ printk("scsi%d: REQ before WAIT DISCONNECT IID\n",
+ host->host_no);
+ hostdata->options |= OPTION_NO_PRINT_RACE;
+ }
+ } else {
+ printk(KERN_ALERT "scsi%d : illegal instruction\n", host->host_no);
+ print_lots (host);
+ printk(KERN_ALERT " mail drew@PoohSticks.ORG with ALL\n"
+ " boot messages and diagnostic output\n");
+ FATAL (host);
+ }
+ }
+
+ /*
+ * DSTAT_BF are bus fault errors
+ */
+
+ if (dstat & DSTAT_800_BF) {
+ intr_bf (host, cmd);
+ }
+
+
+ /*
+ * DSTAT_SIR interrupts are generated by the execution of
+ * the INT instruction. Since the exact values available
+ * are determined entirely by the SCSI script running,
+ * and are local to a particular script, a unique handler
+ * is called for each script.
+ */
+
+ if (dstat & DSTAT_SIR) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : DSTAT_SIR\n", host->host_no);
+ switch ((tmp = hostdata->dstat_sir_intr (host, cmd))) {
+ case SPECIFIC_INT_NOTHING:
+ case SPECIFIC_INT_RESTART:
+ break;
+ case SPECIFIC_INT_ABORT:
+ abort_connected(host);
+ break;
+ case SPECIFIC_INT_PANIC:
+ printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "", 1);
+ printk(KERN_ALERT " dstat_sir_intr() returned SPECIFIC_INT_PANIC\n");
+ FATAL (host);
+ break;
+ case SPECIFIC_INT_BREAK:
+ intr_break (host, cmd);
+ break;
+ default:
+ printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "", 1);
+ printk(KERN_ALERT" dstat_sir_intr() returned unknown value %d\n",
+ tmp);
+ FATAL (host);
+ }
+ }
+
+ if ((hostdata->chip / 100) == 8 && (dstat & DSTAT_800_MDPE)) {
+ printk(KERN_ALERT "scsi%d : Master Data Parity Error\n",
+ host->host_no);
+ FATAL (host);
+ }
+}
+
+/*
+ * Function : static int print_insn (struct Scsi_Host *host,
+ * u32 *insn, int kernel)
+ *
+ * Purpose : print numeric representation of the instruction pointed
+ * to by insn to the debugging or kernel message buffer
+ * as appropriate.
+ *
+ * If desired, a user level program can interpret this
+ * information.
+ *
+ * Inputs : host, insn - host, pointer to instruction, prefix -
+ * string to prepend, kernel - use printk instead of debugging buffer.
+ *
+ * Returns : size, in u32s, of instruction printed.
+ */
+
+/*
+ * FIXME: should change kernel parameter so that it takes an ENUM
+ * specifying severity - either KERN_ALERT or KERN_PANIC so
+ * all panic messages are output with the same severity.
+ */
+
+static int
+print_insn (struct Scsi_Host *host, const u32 *insn,
+ const char *prefix, int kernel) {
+ char buf[160], /* Temporary buffer and pointer. ICKY
+ arbitrary length. */
+
+
+ *tmp;
+ unsigned char dcmd; /* dcmd register for *insn */
+ int size;
+
+ /*
+ * Check to see if the instruction pointer is not bogus before
+ * indirecting through it; avoiding red-zone at start of
+ * memory.
+ *
+ * FIXME: icky magic needs to happen here on non-intel boxes which
+ * don't have kernel memory mapped in like this. Might be reasonable
+ * to use vverify()?
+ */
+
+ if (MAP_NR(insn) < 1 || MAP_NR(insn + 8) > MAP_NR(high_memory) ||
+ ((((dcmd = (insn[0] >> 24) & 0xff) & DCMD_TYPE_MMI) == DCMD_TYPE_MMI) &&
+ MAP_NR(insn + 12) > MAP_NR(high_memory))) {
+ size = 0;
+ sprintf (buf, "%s%p: address out of range\n",
+ prefix, insn);
+ } else {
+/*
+ * FIXME : (void *) cast in virt_to_bus should be unnecessary, because
+ * it should take const void * as argument.
+ */
+ sprintf(buf, "%s0x%lx (virt 0x%p) : 0x%08x 0x%08x (virt 0x%p)",
+ (prefix ? prefix : ""), virt_to_bus((void *) insn), insn,
+ insn[0], insn[1], bus_to_virt (insn[1]));
+ tmp = buf + strlen(buf);
+ if ((dcmd & DCMD_TYPE_MASK) == DCMD_TYPE_MMI) {
+ sprintf (tmp, " 0x%08x (virt 0x%p)\n", insn[2],
+ bus_to_virt(insn[2]));
+ size = 3;
+ } else {
+ sprintf (tmp, "\n");
+ size = 2;
+ }
+ }
+
+ if (kernel)
+ printk ("%s", buf);
+#ifdef NCR_DEBUG
+ else {
+ size_t len = strlen(buf);
+ debugger_kernel_write(host, buf, len);
+ }
+#endif
+ return size;
+}
+
+/*
+ * Function : static const char *ncr_state (int state)
+ *
+ * Purpose : convert state (probably from hostdata->state) to a string
+ *
+ * Inputs : state
+ *
+ * Returns : char * representation of state, "unknown" on error.
+ */
+
+static const char *
+ncr_state (int state) {
+ switch (state) {
+ case STATE_HALTED: return "halted";
+ case STATE_WAITING: return "waiting";
+ case STATE_RUNNING: return "running";
+ case STATE_ABORTING: return "aborting";
+ case STATE_DISABLED: return "disabled";
+ default: return "unknown";
+ }
+}
+
+/*
+ * Function : int NCR53c7xx_abort (Scsi_Cmnd *cmd)
+ *
+ * Purpose : Abort an errant SCSI command, doing all necessary
+ * cleanup of the issue_queue, running_list, shared Linux/NCR
+ * dsa issue and reconnect queues.
+ *
+ * Inputs : cmd - command to abort, code - entire result field
+ *
+ * Returns : 0 on success, -1 on failure.
+ */
+
+int
+NCR53c7xx_abort (Scsi_Cmnd *cmd) {
+ NCR53c7x0_local_declare();
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata = host ? (struct NCR53c7x0_hostdata *)
+ host->hostdata : NULL;
+ unsigned long flags;
+ struct NCR53c7x0_cmd *curr, **prev;
+ Scsi_Cmnd *me, **last;
+#if 0
+ static long cache_pid = -1;
+#endif
+
+
+ if (!host) {
+ printk ("Bogus SCSI command pid %ld; no host structure\n",
+ cmd->pid);
+ return SCSI_ABORT_ERROR;
+ } else if (!hostdata) {
+ printk ("Bogus SCSI host %d; no hostdata\n", host->host_no);
+ return SCSI_ABORT_ERROR;
+ }
+ NCR53c7x0_local_setup(host);
+
+/*
+ * CHECK : I don't think that reading ISTAT will unstack any interrupts,
+ * since we need to write the INTF bit to clear it, and SCSI/DMA
+ * interrupts don't clear until we read SSTAT/SIST and DSTAT registers.
+ *
+ * See that this is the case.
+ *
+ * I suspect that several of our failures may be coming from a new fatal
+ * interrupt (possibly due to a phase mismatch) happening after we've left
+ * the interrupt handler, but before the PIC has had the interrupt condition
+ * cleared.
+ */
+
+ if (NCR53c7x0_read8(hostdata->istat) &
+ (ISTAT_DIP|ISTAT_SIP|
+ (hostdata->chip / 100 == 8 ? ISTAT_800_INTF : 0))) {
+ printk ("scsi%d : dropped interrupt for command %ld\n", host->host_no,
+ cmd->pid);
+ NCR53c7x0_intr (host->irq, NULL, NULL);
+ return SCSI_ABORT_BUSY;
+ }
+
+ save_flags(flags);
+ cli();
+#if 0
+ if (cache_pid == cmd->pid)
+ panic ("scsi%d : bloody fetus %d\n", host->host_no, cmd->pid);
+ else
+ cache_pid = cmd->pid;
+#endif
+
+
+/*
+ * The command could be hiding in the issue_queue. This would be very
+ * nice, as commands can't be moved from the high level driver's issue queue
+ * into the shared queue until an interrupt routine is serviced, and this
+ * moving is atomic.
+ *
+ * If this is the case, we don't have to worry about anything - we simply
+ * pull the command out of the old queue, and call it aborted.
+ */
+
+ for (me = (Scsi_Cmnd *) hostdata->issue_queue,
+ last = (Scsi_Cmnd **) &(hostdata->issue_queue);
+ me && me != cmd; last = (Scsi_Cmnd **)&(me->SCp.ptr),
+ me = (Scsi_Cmnd *)me->SCp.ptr);
+
+ if (me) {
+ *last = (Scsi_Cmnd *) me->SCp.ptr;
+ if (me->host_scribble) {
+ ((struct NCR53c7x0_cmd *)me->host_scribble)->next = hostdata->free;
+ hostdata->free = (struct NCR53c7x0_cmd *) me->host_scribble;
+ me->host_scribble = NULL;
+ }
+ cmd->result = DID_ABORT << 16;
+ cmd->scsi_done(cmd);
+ printk ("scsi%d : found command %ld in Linux issue queue\n",
+ host->host_no, me->pid);
+ restore_flags(flags);
+ run_process_issue_queue();
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * That failing, the command could be in our list of already executing
+ * commands. If this is the case, drastic measures are called for.
+ */
+
+ for (curr = (struct NCR53c7x0_cmd *) hostdata->running_list,
+ prev = (struct NCR53c7x0_cmd **) &(hostdata->running_list);
+ curr && curr->cmd != cmd; prev = (struct NCR53c7x0_cmd **)
+ &(curr->next), curr = (struct NCR53c7x0_cmd *) curr->next);
+
+ if (curr) {
+ if ((cmd->result & 0xff) != 0xff && (cmd->result & 0xff00) != 0xff00) {
+ if (prev)
+ *prev = (struct NCR53c7x0_cmd *) curr->next;
+ curr->next = (struct NCR53c7x0_cmd *) hostdata->free;
+ cmd->host_scribble = NULL;
+ hostdata->free = curr;
+ cmd->scsi_done(cmd);
+ printk ("scsi%d : found finished command %ld in running list\n",
+ host->host_no, cmd->pid);
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ } else {
+ printk ("scsi%d : DANGER : command running, can not abort.\n",
+ cmd->host->host_no);
+ restore_flags(flags);
+ return SCSI_ABORT_BUSY;
+ }
+ }
+
+/*
+ * And if we couldn't find it in any of our queues, it must have been
+ * a dropped interrupt.
+ */
+
+ curr = (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ if (curr) {
+ curr->next = hostdata->free;
+ hostdata->free = curr;
+ cmd->host_scribble = NULL;
+ }
+
+ if (((cmd->result & 0xff00) == 0xff00) ||
+ ((cmd->result & 0xff) == 0xff)) {
+ printk ("scsi%d : did this command ever run?\n", host->host_no);
+ cmd->result = DID_ABORT << 16;
+ } else {
+ printk ("scsi%d : probably lost INTFLY, normal completion\n",
+ host->host_no);
+/*
+ * FIXME : We need to add an additional flag which indicates if a
+ * command was ever counted as BUSY, so if we end up here we can
+ * decrement the busy count if and only if it is necessary.
+ */
+ --hostdata->busy[cmd->target][cmd->lun];
+ }
+ restore_flags(flags);
+ cmd->scsi_done(cmd);
+
+/*
+ * We need to run process_issue_queue since termination of this command
+ * may allow another queued command to execute first?
+ */
+ return SCSI_ABORT_NOT_RUNNING;
+}
+
+/*
+ * Function : int NCR53c7xx_reset (Scsi_Cmnd *cmd)
+ *
+ * Purpose : perform a hard reset of the SCSI bus and NCR
+ * chip.
+ *
+ * Inputs : cmd - command which caused the SCSI RESET
+ *
+ * Returns : 0 on success.
+ */
+
+int
+NCR53c7xx_reset (Scsi_Cmnd *cmd, unsigned int reset_flags) {
+ NCR53c7x0_local_declare();
+ unsigned long flags;
+ int found = 0;
+ struct NCR53c7x0_cmd * c;
+ Scsi_Cmnd *tmp;
+ /*
+ * When we call scsi_done(), it's going to wake up anything sleeping on the
+ * resources which were in use by the aborted commands, and we'll start to
+ * get new commands.
+ *
+ * We can't let this happen until after we've re-initialized the driver
+ * structures, and can't reinitialize those structures until after we've
+ * dealt with their contents.
+ *
+ * So, we need to find all of the commands which were running, stick
+ * them on a linked list of completed commands (we'll use the host_scribble
+ * pointer), do our reinitialization, and then call the done function for
+ * each command.
+ */
+ Scsi_Cmnd *nuke_list = NULL;
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+
+ NCR53c7x0_local_setup(host);
+ save_flags(flags);
+ cli();
+ ncr_halt (host);
+ print_lots (host);
+ dump_events (host, 30);
+ ncr_scsi_reset (host);
+ for (tmp = nuke_list = return_outstanding_commands (host, 1 /* free */,
+ 0 /* issue */ ); tmp; tmp = (Scsi_Cmnd *) tmp->SCp.buffer)
+ if (tmp == cmd) {
+ found = 1;
+ break;
+ }
+
+ /*
+ * If we didn't find the command which caused this reset in our running
+ * list, then we've lost it. See that it terminates normally anyway.
+ */
+ if (!found) {
+ c = (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ if (c) {
+ cmd->host_scribble = NULL;
+ c->next = hostdata->free;
+ hostdata->free = c;
+ } else
+ printk ("scsi%d: lost command %ld\n", host->host_no, cmd->pid);
+ cmd->SCp.buffer = (struct scatterlist *) nuke_list;
+ nuke_list = cmd;
+ }
+
+ NCR53c7x0_driver_init (host);
+ hostdata->soft_reset (host);
+ if (hostdata->resets == 0)
+ disable(host);
+ else if (hostdata->resets != -1)
+ --hostdata->resets;
+ sti();
+ for (; nuke_list; nuke_list = tmp) {
+ tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
+ nuke_list->result = DID_RESET << 16;
+ nuke_list->scsi_done (nuke_list);
+ }
+ restore_flags(flags);
+ return SCSI_RESET_SUCCESS;
+}
+
+/*
+ * The NCR SDMS bios follows Annex A of the SCSI-CAM draft, and
+ * therefore shares the scsicam_bios_param function.
+ */
+
+/*
+ * Function : int insn_to_offset (Scsi_Cmnd *cmd, u32 *insn)
+ *
+ * Purpose : convert instructions stored at NCR pointer into data
+ * pointer offset.
+ *
+ * Inputs : cmd - SCSI command; insn - pointer to instruction. Either current
+ * DSP, or saved data pointer.
+ *
+ * Returns : offset on success, -1 on failure.
+ */
+
+
+static int
+insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) cmd->host->hostdata;
+ struct NCR53c7x0_cmd *ncmd =
+ (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ int offset = 0, buffers;
+ struct scatterlist *segment;
+ char *ptr;
+ int found = 0;
+
+/*
+ * With the current code implementation, if the insn is inside dynamically
+ * generated code, the data pointer will be the instruction preceding
+ * the next transfer segment.
+ */
+
+ if (!check_address ((unsigned long) ncmd, sizeof (struct NCR53c7x0_cmd)) &&
+ ((insn >= ncmd->data_transfer_start &&
+ insn < ncmd->data_transfer_end) ||
+ (insn >= ncmd->residual &&
+ insn < (ncmd->residual +
+ sizeof(ncmd->residual))))) {
+ ptr = bus_to_virt(insn[3]);
+
+ if ((buffers = cmd->use_sg)) {
+ for (offset = 0,
+ segment = (struct scatterlist *) cmd->buffer;
+ buffers && !((found = ((ptr >= segment->address) &&
+ (ptr < (segment->address + segment->length)))));
+ --buffers, offset += segment->length, ++segment)
+#if 0
+ printk("scsi%d: comparing 0x%p to 0x%p\n",
+ cmd->host->host_no, saved, segment->address);
+#else
+ ;
+#endif
+ offset += ptr - segment->address;
+ } else {
+ found = 1;
+ offset = ptr - (char *) (cmd->request_buffer);
+ }
+ } else if ((insn >= hostdata->script +
+ hostdata->E_data_transfer / sizeof(u32)) &&
+ (insn <= hostdata->script +
+ hostdata->E_end_data_transfer / sizeof(u32))) {
+ found = 1;
+ offset = 0;
+ }
+ return found ? offset : -1;
+}
+
+
+
+/*
+ * Function : void print_progress (Scsi_Cmnd *cmd)
+ *
+ * Purpose : print the current location of the saved data pointer
+ *
+ * Inputs : cmd - command we are interested in
+ *
+ */
+
+static void
+print_progress (Scsi_Cmnd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_cmd *ncmd =
+ (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ int offset, i;
+ char *where;
+ u32 *ptr;
+ NCR53c7x0_local_setup (cmd->host);
+ for (i = 0; i < 2; ++i) {
+ if (check_address ((unsigned long) ncmd,
+ sizeof (struct NCR53c7x0_cmd)) == -1)
+ continue;
+ if (!i) {
+ where = "saved";
+ ptr = bus_to_virt(ncmd->saved_data_pointer);
+ } else {
+ where = "active";
+ ptr = bus_to_virt (NCR53c7x0_read32 (DSP_REG) -
+ NCR53c7x0_insn_size (NCR53c7x0_read8 (DCMD_REG)) *
+ sizeof(u32));
+ }
+ offset = insn_to_offset (cmd, ptr);
+
+ if (offset != -1)
+ printk ("scsi%d : %s data pointer at offset %d\n",
+ cmd->host->host_no, where, offset);
+ else {
+ int size;
+ printk ("scsi%d : can't determine %s data pointer offset\n",
+ cmd->host->host_no, where);
+ if (ncmd) {
+ size = print_insn (cmd->host,
+ bus_to_virt(ncmd->saved_data_pointer), "", 1);
+ print_insn (cmd->host,
+ bus_to_virt(ncmd->saved_data_pointer) + size * sizeof(u32),
+ "", 1);
+ }
+ }
+ }
+}
+
+
+static void
+print_dsa (struct Scsi_Host *host, u32 *dsa, const char *prefix) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int i, len;
+ char *ptr;
+ Scsi_Cmnd *cmd;
+
+ if (check_address ((unsigned long) dsa, hostdata->dsa_end -
+ hostdata->dsa_start) == -1) {
+ printk("scsi%d : bad dsa virt 0x%p\n", host->host_no, dsa);
+ return;
+ }
+ printk("%sscsi%d : dsa at phys 0x%lx (virt 0x%p)\n"
+ " + %d : dsa_msgout length = %u, data = 0x%x (virt 0x%p)\n" ,
+ prefix ? prefix : "",
+ host->host_no, virt_to_bus (dsa), dsa, hostdata->dsa_msgout,
+ dsa[hostdata->dsa_msgout / sizeof(u32)],
+ dsa[hostdata->dsa_msgout / sizeof(u32) + 1],
+ bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]));
+
+ /*
+ * Only print messages if they're sane in length so we don't
+ * blow the kernel printk buffer on something which won't buy us
+ * anything.
+ */
+
+ if (dsa[hostdata->dsa_msgout / sizeof(u32)] <
+ sizeof (hostdata->free->select))
+ for (i = dsa[hostdata->dsa_msgout / sizeof(u32)],
+ ptr = bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]);
+ i > 0 && !check_address ((unsigned long) ptr, 1);
+ ptr += len, i -= len) {
+ printk(" ");
+ len = print_msg (ptr);
+ printk("\n");
+ if (!len)
+ break;
+ }
+
+ printk(" + %d : select_indirect = 0x%x\n",
+ hostdata->dsa_select, dsa[hostdata->dsa_select / sizeof(u32)]);
+ cmd = (Scsi_Cmnd *) bus_to_virt(dsa[hostdata->dsa_cmnd / sizeof(u32)]);
+ printk(" + %d : dsa_cmnd = 0x%x ", hostdata->dsa_cmnd,
+ (u32) virt_to_bus(cmd));
+ if (cmd) {
+ printk(" result = 0x%x, target = %d, lun = %d, cmd = ",
+ cmd->result, cmd->target, cmd->lun);
+ print_command(cmd->cmnd);
+ } else
+ printk("\n");
+ printk(" + %d : dsa_next = 0x%x\n", hostdata->dsa_next,
+ dsa[hostdata->dsa_next / sizeof(u32)]);
+ if (cmd) {
+ printk("scsi%d target %d : sxfer_sanity = 0x%x, scntl3_sanity = 0x%x\n"
+ " script : ",
+ host->host_no, cmd->target,
+ hostdata->sync[cmd->target].sxfer_sanity,
+ hostdata->sync[cmd->target].scntl3_sanity);
+ for (i = 0; i < (sizeof(hostdata->sync[cmd->target].script) / 4); ++i)
+ printk ("0x%x ", hostdata->sync[cmd->target].script[i]);
+ printk ("\n");
+ print_progress (cmd);
+ }
+}
+/*
+ * Function : void print_queues (Scsi_Host *host)
+ *
+ * Purpose : print the contents of the NCR issue and reconnect queues
+ *
+ * Inputs : host - SCSI host we are interested in
+ *
+ */
+
+static void
+print_queues (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 *dsa, *next_dsa;
+ volatile u32 *current;
+ int left;
+ Scsi_Cmnd *cmd, *next_cmd;
+ unsigned long flags;
+
+ printk ("scsi%d : issue queue\n", host->host_no);
+
+ for (left = host->can_queue, cmd = (Scsi_Cmnd *) hostdata->issue_queue;
+ left >= 0 && cmd;
+ cmd = next_cmd) {
+ next_cmd = (Scsi_Cmnd *) cmd->SCp.ptr;
+ save_flags(flags);
+ cli();
+ if (cmd->host_scribble) {
+ if (check_address ((unsigned long) (cmd->host_scribble),
+ sizeof (cmd->host_scribble)) == -1)
+ printk ("scsi%d: scsi pid %ld bad pointer to NCR53c7x0_cmd\n",
+ host->host_no, cmd->pid);
+ /* print_dsa does sanity check on address, no need to check */
+ else
+ print_dsa (host, ((struct NCR53c7x0_cmd *) cmd->host_scribble)
+ -> dsa, "");
+ } else
+ printk ("scsi%d : scsi pid %ld for target %d lun %d has no NCR53c7x0_cmd\n",
+ host->host_no, cmd->pid, cmd->target, cmd->lun);
+ restore_flags(flags);
+ }
+
+ if (left <= 0) {
+ printk ("scsi%d : loop detected in issue queue\n",
+ host->host_no);
+ }
+
+ /*
+ * Traverse the NCR reconnect and start DSA structures, printing out
+ * each element until we hit the end or detect a loop. Currently,
+ * the reconnect structure is a linked list; and the start structure
+ * is an array. Eventually, the reconnect structure will become a
+ * list as well, since this simplifies the code.
+ */
+
+ printk ("scsi%d : schedule dsa array :\n", host->host_no);
+ for (left = host->can_queue, current = hostdata->schedule;
+ left > 0; current += 2, --left)
+ if (current[0] != hostdata->NOP_insn)
+/* FIXME : convert pointer to dsa_begin to pointer to dsa. */
+ print_dsa (host, bus_to_virt (current[1] -
+ (hostdata->E_dsa_code_begin -
+ hostdata->E_dsa_code_template)), "");
+ printk ("scsi%d : end schedule dsa array\n", host->host_no);
+
+ printk ("scsi%d : reconnect_dsa_head :\n", host->host_no);
+
+ for (left = host->can_queue,
+ dsa = bus_to_virt (hostdata->reconnect_dsa_head);
+ left >= 0 && dsa;
+ dsa = next_dsa) {
+ save_flags (flags);
+ cli();
+ if (check_address ((unsigned long) dsa, sizeof(dsa)) == -1) {
+ printk ("scsi%d: bad DSA pointer 0x%p", host->host_no,
+ dsa);
+ next_dsa = NULL;
+ }
+ else
+ {
+ next_dsa = bus_to_virt(dsa[hostdata->dsa_next / sizeof(u32)]);
+ print_dsa (host, dsa, "");
+ }
+ restore_flags(flags);
+ }
+ printk ("scsi%d : end reconnect_dsa_head\n", host->host_no);
+ if (left < 0)
+ printk("scsi%d: possible loop in ncr reconnect list\n",
+ host->host_no);
+}
+
+static void
+print_lots (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ u32 *dsp_next, *dsp, *dsa, dbc_dcmd;
+ unsigned char dcmd, sbcl;
+ int i, size;
+ NCR53c7x0_local_setup(host);
+
+ if ((dsp_next = bus_to_virt(NCR53c7x0_read32 (DSP_REG)))) {
+ dbc_dcmd = NCR53c7x0_read32(DBC_REG);
+ dcmd = (dbc_dcmd & 0xff000000) >> 24;
+ dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
+ dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+ sbcl = NCR53c7x0_read8 (SBCL_REG);
+
+
+ printk ("scsi%d : DCMD|DBC=0x%x, DNAD=0x%x (virt 0x%p)\n"
+ " DSA=0x%lx (virt 0x%p)\n"
+ " DSPS=0x%x, TEMP=0x%x (virt 0x%p), DMODE=0x%x\n"
+ " SXFER=0x%x, SCNTL3=0x%x\n"
+ " %s%s%sphase=%s, %d bytes in SCSI FIFO\n"
+ " STEST0=0x%x\n",
+ host->host_no, dbc_dcmd, NCR53c7x0_read32(DNAD_REG),
+ bus_to_virt(NCR53c7x0_read32(DNAD_REG)),
+ virt_to_bus(dsa), dsa,
+ NCR53c7x0_read32(DSPS_REG), NCR53c7x0_read32(TEMP_REG),
+ bus_to_virt (NCR53c7x0_read32(TEMP_REG)),
+ (int) NCR53c7x0_read8(hostdata->dmode),
+ (int) NCR53c7x0_read8(SXFER_REG),
+ (int) NCR53c7x0_read8(SCNTL3_REG_800),
+ (sbcl & SBCL_BSY) ? "BSY " : "",
+ (sbcl & SBCL_SEL) ? "SEL " : "",
+ (sbcl & SBCL_REQ) ? "REQ " : "",
+ sstat2_to_phase(NCR53c7x0_read8 (((hostdata->chip / 100) == 8) ?
+ SSTAT1_REG : SSTAT2_REG)),
+ (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT,
+ NCR53c7x0_read8 (STEST0_REG_800));
+ printk ("scsi%d : DSP 0x%lx (virt 0x%p) ->\n", host->host_no,
+ virt_to_bus(dsp), dsp);
+ for (i = 6; i > 0; --i, dsp += size)
+ size = print_insn (host, dsp, "", 1);
+ if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
+ printk ("scsi%d : connected (SDID=0x%x, SSID=0x%x)\n",
+ host->host_no, NCR53c7x0_read8 (SDID_REG_800),
+ NCR53c7x0_read8 (SSID_REG_800));
+ print_dsa (host, dsa, "");
+ }
+
+#if 1
+ print_queues (host);
+#endif
+ }
+}
+
+/*
+ * Function : static int shutdown (struct Scsi_Host *host)
+ *
+ * Purpose : does a clean (we hope) shutdown of the NCR SCSI
+ * chip. Use prior to dumping core, unloading the NCR driver,
+ *
+ * Returns : 0 on success
+ */
+static int
+shutdown (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ unsigned long flags;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ NCR53c7x0_local_setup(host);
+ save_flags (flags);
+ cli();
+/* Get in a state where we can reset the SCSI bus */
+ ncr_halt (host);
+ ncr_scsi_reset (host);
+ hostdata->soft_reset(host);
+
+ disable (host);
+ restore_flags (flags);
+ return 0;
+}
+
+/*
+ * Function : void ncr_scsi_reset (struct Scsi_Host *host)
+ *
+ * Purpose : reset the SCSI bus.
+ */
+
+static void
+ncr_scsi_reset (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ int sien = 0;
+ NCR53c7x0_local_setup(host);
+ save_flags (flags);
+ cli();
+ if ((hostdata->chip / 100) == 8) {
+ sien = NCR53c7x0_read8(SIEN0_REG_800);
+ NCR53c7x0_write8(SIEN0_REG_800, sien & ~SIEN_RST);
+ }
+ NCR53c7x0_write8(SCNTL1_REG, SCNTL1_RST);
+ udelay(25); /* Minimum amount of time to assert RST */
+ NCR53c7x0_write8(SCNTL1_REG, 0);
+ if ((hostdata->chip / 100) == 8) {
+ NCR53c7x0_write8(SIEN0_REG_800, sien);
+ }
+ restore_flags (flags);
+}
+
+/*
+ * Function : void hard_reset (struct Scsi_Host *host)
+ *
+ */
+
+static void
+hard_reset (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ save_flags (flags);
+ cli();
+ ncr_scsi_reset(host);
+ NCR53c7x0_driver_init (host);
+ if (hostdata->soft_reset)
+ hostdata->soft_reset (host);
+ restore_flags(flags);
+}
+
+
+/*
+ * Function : Scsi_Cmnd *return_outstanding_commands (struct Scsi_Host *host,
+ * int free, int issue)
+ *
+ * Purpose : return a linked list (using the SCp.buffer field as next,
+ * so we don't perturb hostdata. We don't use a field of the
+ * NCR53c7x0_cmd structure since we may not have allocated one
+ * for the command causing the reset.) of Scsi_Cmnd structures that
+ * had propagated below the Linux issue queue level. If free is set,
+ * free the NCR53c7x0_cmd structures which are associated with
+ * the Scsi_Cmnd structures, and clean up any internal
+ * NCR lists that the commands were on. If issue is set,
+ * also return commands in the issue queue.
+ *
+ * Returns : linked list of commands
+ *
+ * NOTE : the caller should insure that the NCR chip is halted
+ * if the free flag is set.
+ */
+
+static Scsi_Cmnd *
+return_outstanding_commands (struct Scsi_Host *host, int free, int issue) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_cmd *c;
+ int i;
+ u32 *current;
+ Scsi_Cmnd *list = NULL, *tmp;
+ for (c = (struct NCR53c7x0_cmd *) hostdata->running_list; c;
+ c = (struct NCR53c7x0_cmd *) c->next) {
+ if (c->cmd->SCp.buffer) {
+ printk ("scsi%d : loop detected in running list!\n", host->host_no);
+ break;
+ } else {
+ printk ("The sti() implicit in a printk() prevents hangs\n");
+ break;
+ }
+
+ c->cmd->SCp.buffer = (struct scatterlist *) list;
+ list = c->cmd;
+ if (free) {
+ c->next = hostdata->free;
+ hostdata->free = c;
+ }
+ }
+
+ if (free) {
+ for (i = 0, current = (u32 *) hostdata->schedule;
+ i < host->can_queue; ++i, current += 2) {
+ current[0] = hostdata->NOP_insn;
+ current[1] = 0xdeadbeef;
+ }
+ hostdata->current = NULL;
+ }
+
+ if (issue) {
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; tmp = tmp->next) {
+ if (tmp->SCp.buffer) {
+ printk ("scsi%d : loop detected in issue queue!\n",
+ host->host_no);
+ break;
+ }
+ tmp->SCp.buffer = (struct scatterlist *) list;
+ list = tmp;
+ }
+ if (free)
+ hostdata->issue_queue = NULL;
+
+ }
+ return list;
+}
+
+/*
+ * Function : static int disable (struct Scsi_Host *host)
+ *
+ * Purpose : disables the given NCR host, causing all commands
+ * to return a driver error. Call this so we can unload the
+ * module during development and try again. Eventually,
+ * we should be able to find clean workarounds for these
+ * problems.
+ *
+ * Inputs : host - hostadapter to twiddle
+ *
+ * Returns : 0 on success.
+ */
+
+static int
+disable (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ Scsi_Cmnd *nuke_list, *tmp;
+ save_flags(flags);
+ cli();
+ if (hostdata->state != STATE_HALTED)
+ ncr_halt (host);
+ nuke_list = return_outstanding_commands (host, 1 /* free */, 1 /* issue */);
+ hard_reset (host);
+ hostdata->state = STATE_DISABLED;
+ restore_flags(flags);
+ printk ("scsi%d : nuking commands\n", host->host_no);
+ for (; nuke_list; nuke_list = tmp) {
+ tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
+ nuke_list->result = DID_ERROR << 16;
+ nuke_list->scsi_done(nuke_list);
+ }
+ printk ("scsi%d : done. \n", host->host_no);
+ printk (KERN_ALERT "scsi%d : disabled. Unload and reload\n",
+ host->host_no);
+ return 0;
+}
+
+/*
+ * Function : static int ncr_halt (struct Scsi_Host *host)
+ *
+ * Purpose : halts the SCSI SCRIPTS(tm) processor on the NCR chip
+ *
+ * Inputs : host - SCSI chip to halt
+ *
+ * Returns : 0 on success
+ */
+
+static int
+ncr_halt (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ unsigned long flags;
+ unsigned char istat, tmp;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int stage;
+ NCR53c7x0_local_setup(host);
+
+ save_flags(flags);
+ cli();
+ /* Stage 0 : eat all interrupts
+ Stage 1 : set ABORT
+ Stage 2 : eat all but abort interrupts
+ Stage 3 : eat all interrupts
+ */
+ for (stage = 0;;) {
+ if (stage == 1) {
+ NCR53c7x0_write8(hostdata->istat, ISTAT_ABRT);
+ ++stage;
+ }
+ istat = NCR53c7x0_read8 (hostdata->istat);
+ if (istat & ISTAT_SIP) {
+ if ((hostdata->chip / 100) == 8) {
+ tmp = NCR53c7x0_read8(SIST0_REG_800);
+ udelay(1);
+ tmp = NCR53c7x0_read8(SIST1_REG_800);
+ } else {
+ tmp = NCR53c7x0_read8(SSTAT0_REG);
+ }
+ } else if (istat & ISTAT_DIP) {
+ tmp = NCR53c7x0_read8(DSTAT_REG);
+ if (stage == 2) {
+ if (tmp & DSTAT_ABRT) {
+ NCR53c7x0_write8(hostdata->istat, 0);
+ ++stage;
+ } else {
+ printk(KERN_ALERT "scsi%d : could not halt NCR chip\n",
+ host->host_no);
+ disable (host);
+ }
+ }
+ }
+ if (!(istat & (ISTAT_SIP|ISTAT_DIP)))
+ if (stage == 0)
+ ++stage;
+ else if (stage == 3)
+ break;
+ }
+ hostdata->state = STATE_HALTED;
+ restore_flags(flags);
+#if 0
+ print_lots (host);
+#endif
+ return 0;
+}
+
+/*
+ * Function: event_name (int event)
+ *
+ * Purpose: map event enum into user-readable strings.
+ */
+
+static const char *
+event_name (int event) {
+ switch (event) {
+ case EVENT_NONE: return "none";
+ case EVENT_ISSUE_QUEUE: return "to issue queue";
+ case EVENT_START_QUEUE: return "to start queue";
+ case EVENT_SELECT: return "selected";
+ case EVENT_DISCONNECT: return "disconnected";
+ case EVENT_RESELECT: return "reselected";
+ case EVENT_COMPLETE: return "completed";
+ case EVENT_IDLE: return "idle";
+ case EVENT_SELECT_FAILED: return "select failed";
+ case EVENT_BEFORE_SELECT: return "before select";
+ case EVENT_RESELECT_FAILED: return "reselect failed";
+ default: return "unknown";
+ }
+}
+
+/*
+ * Function : void dump_events (struct Scsi_Host *host, count)
+ *
+ * Purpose : print last count events which have occurred.
+ */
+static void
+dump_events (struct Scsi_Host *host, int count) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_event event;
+ int i;
+ unsigned long flags;
+ if (hostdata->events) {
+ if (count > hostdata->event_size)
+ count = hostdata->event_size;
+ for (i = hostdata->event_index; count > 0;
+ i = (i ? i - 1 : hostdata->event_size -1), --count) {
+ save_flags(flags);
+/*
+ * By copying the event we're currently examining with interrupts
+ * disabled, we can do multiple printk(), etc. operations and
+ * still be guaranteed that they're happening on the same
+ * event structure.
+ */
+ cli();
+#if 0
+ event = hostdata->events[i];
+#else
+ memcpy ((void *) &event, (void *) &(hostdata->events[i]),
+ sizeof(event));
+#endif
+
+ restore_flags(flags);
+ printk ("scsi%d : %s event %d at %ld secs %ld usecs target %d lun %d\n",
+ host->host_no, event_name (event.event), count,
+ (long) event.time.tv_sec, (long) event.time.tv_usec,
+ event.target, event.lun);
+ if (event.dsa)
+ printk (" event for dsa 0x%lx (virt 0x%p)\n",
+ virt_to_bus(event.dsa), event.dsa);
+ if (event.pid != -1) {
+ printk (" event for pid %ld ", event.pid);
+ print_command (event.cmnd);
+ }
+ }
+ }
+}
+
+/*
+ * Function: check_address
+ *
+ * Purpose: Check to see if a possibly corrupt pointer will fault the
+ * kernel.
+ *
+ * Inputs: addr - address; size - size of area
+ *
+ * Returns: 0 if area is OK, -1 on error.
+ *
+ * NOTES: should be implemented in terms of vverify on kernels
+ * that have it.
+ */
+
+static int
+check_address (unsigned long addr, int size) {
+ return (MAP_NR(addr) < 1 || MAP_NR(addr + size) > MAP_NR(high_memory) ?
+ -1 : 0);
+}
+
+#ifdef MODULE
+int
+NCR53c7x0_release(struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ struct NCR53c7x0_cmd *cmd, *tmp;
+ shutdown (host);
+ if (host->irq != IRQ_NONE)
+ {
+ int irq_count;
+ struct Scsi_Host *tmp;
+ for (irq_count = 0, tmp = first_host; tmp; tmp = tmp->next)
+ if (tmp->hostt == the_template && tmp->irq == host->irq)
+ ++irq_count;
+ if (irq_count == 1)
+ free_irq(host->irq, NULL);
+ }
+ if (host->dma_channel != DMA_NONE)
+ free_dma(host->dma_channel);
+ if (host->io_port)
+ release_region(host->io_port, host->n_io_port);
+
+ for (cmd = (struct NCR53c7x0_cmd *) hostdata->free; cmd; cmd = tmp,
+ --hostdata->num_cmds) {
+ tmp = (struct NCR53c7x0_cmd *) cmd->next;
+ /*
+ * If we're going to loop, try to stop it to get a more accurate
+ * count of the leaked commands.
+ */
+ cmd->next = NULL;
+ if (cmd->free)
+ cmd->free ((void *) cmd->real, cmd->size);
+ }
+ if (hostdata->num_cmds)
+ printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n",
+ host->host_no, hostdata->num_cmds);
+ if (hostdata->events)
+ vfree ((void *)hostdata->events);
+ return 1;
+}
+Scsi_Host_Template driver_template = NCR53c7xx;
+#include "scsi_module.c"
+#endif /* def MODULE */
diff --git a/linux/src/drivers/scsi/53c8xx_d.h b/linux/src/drivers/scsi/53c8xx_d.h
new file mode 100644
index 0000000..b586340
--- /dev/null
+++ b/linux/src/drivers/scsi/53c8xx_d.h
@@ -0,0 +1,2677 @@
+u32 SCRIPT[] = {
+/*
+
+
+; NCR 53c810 driver, main script
+; Sponsored by
+; iX Multiuser Multitasking Magazine
+; hm@ix.de
+;
+; Copyright 1993, 1994, 1995 Drew Eckhardt
+; Visionary Computing
+; (Unix and Linux consulting and custom programming)
+; drew@PoohSticks.ORG
+; +1 (303) 786-7975
+;
+; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
+;
+; PRE-ALPHA
+;
+; For more information, please consult
+;
+; NCR 53C810
+; PCI-SCSI I/O Processor
+; Data Manual
+;
+; NCR 53C710
+; SCSI I/O Processor
+; Programmers Guide
+;
+; NCR Microelectronics
+; 1635 Aeroplaza Drive
+; Colorado Springs, CO 80916
+; 1+ (719) 578-3400
+;
+; Toll free literature number
+; +1 (800) 334-5454
+;
+; IMPORTANT : This code is self modifying due to the limitations of
+; the NCR53c7,8xx series chips. Persons debugging this code with
+; the remote debugger should take this into account, and NOT set
+; breakpoints in modified instructions.
+;
+; Design:
+; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
+; microcontroller using a simple instruction set.
+;
+; So, to minimize the effects of interrupt latency, and to maximize
+; throughput, this driver offloads the practical maximum amount
+; of processing to the SCSI chip while still maintaining a common
+; structure.
+;
+; Where tradeoffs were needed between efficiency on the older
+; chips and the newer NCR53c800 series, the NCR53c800 series
+; was chosen.
+;
+; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
+; automate SCSI transfers without host processor intervention, this
+; isn't the case with the NCR53c710 and newer chips which allow
+;
+; - reads and writes to the internal registers from within the SCSI
+; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
+; state so that multiple threads of execution are possible, and also
+; provide an ALU for loop control, etc.
+;
+; - table indirect addressing for some instructions. This allows
+; pointers to be located relative to the DSA ((Data Structure
+; Address) register.
+;
+; These features make it possible to implement a mailbox style interface,
+; where the same piece of code is run to handle I/O for multiple threads
+; at once minimizing our need to relocate code. Since the NCR53c700/
+; NCR53c800 series have a unique combination of features, making a
+; a standard ingoing/outgoing mailbox system, costly, I've modified it.
+;
+; - Mailboxes are a mixture of code and data. This lets us greatly
+; simplify the NCR53c810 code and do things that would otherwise
+; not be possible.
+;
+; The saved data pointer is now implemented as follows :
+;
+; Control flow has been architected such that if control reaches
+; munge_save_data_pointer, on a restore pointers message or
+; reconnection, a jump to the address formerly in the TEMP register
+; will allow the SCSI command to resume execution.
+;
+
+;
+; Note : the DSA structures must be aligned on 32 bit boundaries,
+; since the source and destination of MOVE MEMORY instructions
+; must share the same alignment and this is the alignment of the
+; NCR registers.
+;
+
+ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
+ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
+ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
+ ; for current dsa
+ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
+ ; sync routine
+ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
+ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
+ ; saved data pointer
+ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
+ ; current residual code
+ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
+ ; saved residual code
+ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
+ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
+ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
+
+;
+; Once a device has initiated reselection, we need to compare it
+; against the singly linked list of commands which have disconnected
+; and are pending reselection. These commands are maintained in
+; an unordered singly linked list of DSA structures, through the
+; DSA pointers at their 'centers' headed by the reconnect_dsa_head
+; pointer.
+;
+; To avoid complications in removing commands from the list,
+; I minimize the amount of expensive (at eight operations per
+; addition @ 500-600ns each) pointer operations which must
+; be done in the NCR driver by precomputing them on the
+; host processor during dsa structure generation.
+;
+; The fixed-up per DSA code knows how to recognize the nexus
+; associated with the corresponding SCSI command, and modifies
+; the source and destination pointers for the MOVE MEMORY
+; instruction which is executed when reselected_ok is called
+; to remove the command from the list. Similarly, DSA is
+; loaded with the address of the next DSA structure and
+; reselected_check_next is called if a failure occurs.
+;
+; Perhaps more concisely, the net effect of the mess is
+;
+; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
+; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
+; src = &dsa->next;
+; if (target_id == dsa->id && target_lun == dsa->lun) {
+; *dest = *src;
+; break;
+; }
+; }
+;
+; if (!dsa)
+; error (int_err_unexpected_reselect);
+; else
+; longjmp (dsa->jump_resume, 0);
+;
+;
+
+
+; Define DSA structure used for mailboxes
+ENTRY dsa_code_template
+dsa_code_template:
+ENTRY dsa_code_begin
+dsa_code_begin:
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000000 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
+
+at 0x00000002 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000005 : */ 0x78380000,0x00000000,
+/*
+ CALL scratch_to_dsa
+
+at 0x00000007 : */ 0x88080000,0x00000980,
+/*
+ CALL select
+
+at 0x00000009 : */ 0x88080000,0x000001fc,
+/*
+; Handle the phase mismatch which may have resulted from the
+; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
+; may or may not be necessary, and we should update script_asm.pl
+; to handle multiple pieces.
+ CLEAR ATN
+
+at 0x0000000b : */ 0x60000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000000d : */ 0x60000040,0x00000000,
+/*
+
+; Replace second operand with address of JUMP instruction dest operand
+; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
+ENTRY dsa_code_fix_jump
+dsa_code_fix_jump:
+ MOVE MEMORY 4, NOP_insn, 0
+
+at 0x0000000f : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ JUMP select_done
+
+at 0x00000012 : */ 0x80080000,0x00000224,
+/*
+
+; wrong_dsa loads the DSA register with the value of the dsa_next
+; field.
+;
+wrong_dsa:
+; Patch the MOVE MEMORY INSTRUCTION such that
+; the destination address is the address of the OLD
+; next pointer.
+;
+ MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok + 8
+
+at 0x00000014 : */ 0xc0000004,0x00000000,0x00000758,
+/*
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000017 : */ 0x78380000,0x00000000,
+/*
+;
+; Move the _contents_ of the next pointer into the DSA register as
+; the next I_T_L or I_T_L_Q tupple to check against the established
+; nexus.
+;
+ MOVE MEMORY 4, dsa_temp_next, addr_scratch
+
+at 0x00000019 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x0000001c : */ 0x78380000,0x00000000,
+/*
+ CALL scratch_to_dsa
+
+at 0x0000001e : */ 0x88080000,0x00000980,
+/*
+ JUMP reselected_check_next
+
+at 0x00000020 : */ 0x80080000,0x000006a4,
+/*
+
+ABSOLUTE dsa_save_data_pointer = 0
+ENTRY dsa_code_save_data_pointer
+dsa_code_save_data_pointer:
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000022 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_temp, dsa_temp_addr_saved_pointer
+
+at 0x00000024 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000027 : */ 0x78380000,0x00000000,
+/*
+; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
+ MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
+
+at 0x00000029 : */ 0xc0000018,0x00000000,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000002c : */ 0x60000040,0x00000000,
+/*
+
+
+
+ RETURN
+
+at 0x0000002e : */ 0x90080000,0x00000000,
+/*
+ABSOLUTE dsa_restore_pointers = 0
+ENTRY dsa_code_restore_pointers
+dsa_code_restore_pointers:
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000030 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, dsa_temp_addr_saved_pointer, addr_temp
+
+at 0x00000032 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000035 : */ 0x78380000,0x00000000,
+/*
+; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
+ MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
+
+at 0x00000037 : */ 0xc0000018,0x00000000,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000003a : */ 0x60000040,0x00000000,
+/*
+
+
+
+ RETURN
+
+at 0x0000003c : */ 0x90080000,0x00000000,
+/*
+
+ABSOLUTE dsa_check_reselect = 0
+; dsa_check_reselect determines whether or not the current target and
+; lun match the current DSA
+ENTRY dsa_code_check_reselect
+dsa_code_check_reselect:
+ MOVE SSID TO SFBR ; SSID contains 3 bit target ID
+
+at 0x0000003e : */ 0x720a0000,0x00000000,
+/*
+; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
+ JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0xf8
+
+at 0x00000040 : */ 0x8084f800,0x00ffff48,
+/*
+;
+; Hack - move to scratch first, since SFBR is not writeable
+; via the CPU and hence a MOVE MEMORY instruction.
+;
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000042 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 1, reselected_identify, addr_scratch
+
+at 0x00000044 : */ 0xc0000001,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000047 : */ 0x78380000,0x00000000,
+/*
+ MOVE SCRATCH0 TO SFBR
+
+at 0x00000049 : */ 0x72340000,0x00000000,
+/*
+; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
+ JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
+
+at 0x0000004b : */ 0x8084f800,0x00ffff1c,
+/*
+; Patch the MOVE MEMORY INSTRUCTION such that
+; the source address is the address of this dsa's
+; next pointer.
+ MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok + 4
+
+at 0x0000004d : */ 0xc0000004,0x00000000,0x00000754,
+/*
+ CALL reselected_ok
+
+at 0x00000050 : */ 0x88080000,0x00000750,
+/*
+ CALL dsa_temp_sync
+
+at 0x00000052 : */ 0x88080000,0x00000000,
+/*
+; Release ACK on the IDENTIFY message _after_ we've set the synchronous
+; transfer parameters!
+ CLEAR ACK
+
+at 0x00000054 : */ 0x60000040,0x00000000,
+/*
+; Implicitly restore pointers on reselection, so a RETURN
+; will transfer control back to the right spot.
+ CALL REL (dsa_code_restore_pointers)
+
+at 0x00000056 : */ 0x88880000,0x00ffff60,
+/*
+ RETURN
+
+at 0x00000058 : */ 0x90080000,0x00000000,
+/*
+ENTRY dsa_zero
+dsa_zero:
+ENTRY dsa_code_template_end
+dsa_code_template_end:
+
+; Perform sanity check for dsa_fields_start == dsa_code_template_end -
+; dsa_zero, puke.
+
+ABSOLUTE dsa_fields_start = 0 ; Sanity marker
+ ; pad 48 bytes (fix this RSN)
+ABSOLUTE dsa_next = 48 ; len 4 Next DSA
+ ; del 4 Previous DSA address
+ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
+ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
+ ; table indirect select
+ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
+ ; select message
+ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
+ ; command
+ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
+ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
+ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
+ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
+ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
+ ; (Synchronous transfer negotiation, etc).
+ABSOLUTE dsa_end = 112
+
+ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
+ ; terminated by a call to JUMP wait_reselect
+
+; Linked lists of DSA structures
+ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
+ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable containing
+ ; address of reconnect_dsa_head
+
+; These select the source and destination of a MOVE MEMORY instruction
+ABSOLUTE dmode_memory_to_memory = 0x0
+ABSOLUTE dmode_memory_to_ncr = 0x0
+ABSOLUTE dmode_ncr_to_memory = 0x0
+
+ABSOLUTE addr_scratch = 0x0
+ABSOLUTE addr_temp = 0x0
+
+
+; Interrupts -
+; MSB indicates type
+; 0 handle error condition
+; 1 handle message
+; 2 handle normal condition
+; 3 debugging interrupt
+; 4 testing interrupt
+; Next byte indicates specific error
+
+; XXX not yet implemented, I'm not sure if I want to -
+; Next byte indicates the routine the error occurred in
+; The LSB indicates the specific place the error occurred
+
+ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
+ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
+ABSOLUTE int_err_unexpected_reselect = 0x00020000
+ABSOLUTE int_err_check_condition = 0x00030000
+ABSOLUTE int_err_no_phase = 0x00040000
+ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
+ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
+ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
+ ; received
+
+ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
+ ; registers.
+ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
+ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
+ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
+ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
+ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
+ABSOLUTE int_debug_break = 0x03000000 ; Break point
+
+ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
+
+
+ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
+ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
+ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
+
+
+; These should start with 0x05000000, with low bits incrementing for
+; each one.
+
+
+
+ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
+ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
+ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
+ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
+ABSOLUTE NOP_insn = 0 ; NOP instruction
+
+; Pointer to message, potentially multi-byte
+ABSOLUTE msg_buf = 0
+
+; Pointer to holding area for reselection information
+ABSOLUTE reselected_identify = 0
+ABSOLUTE reselected_tag = 0
+
+; Request sense command pointer, it's a 6 byte command, should
+; be constant for all commands since we always want 16 bytes of
+; sense and we don't need to change any fields as we did under
+; SCSI-I when we actually cared about the LUN field.
+;EXTERNAL NCR53c7xx_sense ; Request sense command
+
+
+; dsa_schedule
+; PURPOSE : after a DISCONNECT message has been received, and pointers
+; saved, insert the current DSA structure at the head of the
+; disconnected queue and fall through to the scheduler.
+;
+; CALLS : OK
+;
+; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
+; of disconnected commands
+;
+; MODIFIES : SCRATCH, reconnect_dsa_head
+;
+; EXITS : always passes control to schedule
+
+ENTRY dsa_schedule
+dsa_schedule:
+
+
+
+
+;
+; Calculate the address of the next pointer within the DSA
+; structure of the command that is currently disconnecting
+;
+ CALL dsa_to_scratch
+
+at 0x0000005a : */ 0x88080000,0x00000938,
+/*
+ MOVE SCRATCH0 + dsa_next TO SCRATCH0
+
+at 0x0000005c : */ 0x7e343000,0x00000000,
+/*
+ MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
+
+at 0x0000005e : */ 0x7f350000,0x00000000,
+/*
+ MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
+
+at 0x00000060 : */ 0x7f360000,0x00000000,
+/*
+ MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
+
+at 0x00000062 : */ 0x7f370000,0x00000000,
+/*
+
+; Point the next field of this DSA structure at the current disconnected
+; list
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000064 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
+
+at 0x00000066 : */ 0xc0000004,0x00000000,0x000001b4,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000069 : */ 0x78380000,0x00000000,
+/*
+dsa_schedule_insert:
+ MOVE MEMORY 4, reconnect_dsa_head, 0
+
+at 0x0000006b : */ 0xc0000004,0x00000000,0x00000000,
+/*
+
+; And update the head pointer.
+ CALL dsa_to_scratch
+
+at 0x0000006e : */ 0x88080000,0x00000938,
+/*
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000070 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
+
+at 0x00000072 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000075 : */ 0x78380000,0x00000000,
+/*
+
+
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x00000077 : */ 0x7c027f00,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x00000079 : */ 0x60000040,0x00000000,
+/*
+
+ WAIT DISCONNECT
+
+at 0x0000007b : */ 0x48000000,0x00000000,
+/*
+
+
+
+
+
+
+ JUMP schedule
+
+at 0x0000007d : */ 0x80080000,0x00000000,
+/*
+
+
+;
+; select
+;
+; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
+; On success, the current DSA structure is removed from the issue
+; queue. Usually, this is entered as a fall-through from schedule,
+; although the contingent allegiance handling code will write
+; the select entry address to the DSP to restart a command as a
+; REQUEST SENSE. A message is sent (usually IDENTIFY, although
+; additional SDTR or WDTR messages may be sent). COMMAND OUT
+; is handled.
+;
+; INPUTS : DSA - SCSI command, issue_dsa_head
+;
+; CALLS : NOT OK
+;
+; MODIFIES : SCRATCH, issue_dsa_head
+;
+; EXITS : on reselection or selection, go to select_failed
+; otherwise, RETURN so control is passed back to
+; dsa_begin.
+;
+
+ENTRY select
+select:
+
+
+
+
+
+
+
+
+
+
+
+
+ CLEAR TARGET
+
+at 0x0000007f : */ 0x60000200,0x00000000,
+/*
+
+; XXX
+;
+; In effect, SELECTION operations are backgrounded, with execution
+; continuing until code which waits for REQ or a fatal interrupt is
+; encountered.
+;
+; So, for more performance, we could overlap the code which removes
+; the command from the NCRs issue queue with the selection, but
+; at this point I don't want to deal with the error recovery.
+;
+
+
+ SELECT ATN FROM dsa_select, select_failed
+
+at 0x00000081 : */ 0x4300003c,0x000007a4,
+/*
+ JUMP select_msgout, WHEN MSG_OUT
+
+at 0x00000083 : */ 0x860b0000,0x00000214,
+/*
+ENTRY select_msgout
+select_msgout:
+ MOVE FROM dsa_msgout, WHEN MSG_OUT
+
+at 0x00000085 : */ 0x1e000000,0x00000040,
+/*
+
+
+
+
+
+
+
+
+
+
+ RETURN
+
+at 0x00000087 : */ 0x90080000,0x00000000,
+/*
+
+;
+; select_done
+;
+; PURPOSE: continue on to normal data transfer; called as the exit
+; point from dsa_begin.
+;
+; INPUTS: dsa
+;
+; CALLS: OK
+;
+;
+
+select_done:
+
+
+
+
+
+
+
+; After a successful selection, we should get either a CMD phase or
+; some transfer request negotiation message.
+
+ JUMP cmdout, WHEN CMD
+
+at 0x00000089 : */ 0x820b0000,0x00000244,
+/*
+ INT int_err_unexpected_phase, WHEN NOT MSG_IN
+
+at 0x0000008b : */ 0x9f030000,0x00000000,
+/*
+
+select_msg_in:
+ CALL msg_in, WHEN MSG_IN
+
+at 0x0000008d : */ 0x8f0b0000,0x00000404,
+/*
+ JUMP select_msg_in, WHEN MSG_IN
+
+at 0x0000008f : */ 0x870b0000,0x00000234,
+/*
+
+cmdout:
+ INT int_err_unexpected_phase, WHEN NOT CMD
+
+at 0x00000091 : */ 0x9a030000,0x00000000,
+/*
+
+
+
+ENTRY cmdout_cmdout
+cmdout_cmdout:
+
+ MOVE FROM dsa_cmdout, WHEN CMD
+
+at 0x00000093 : */ 0x1a000000,0x00000048,
+/*
+
+
+
+
+;
+; data_transfer
+; other_out
+; other_in
+; other_transfer
+;
+; PURPOSE : handle the main data transfer for a SCSI command in
+; several parts. In the first part, data_transfer, DATA_IN
+; and DATA_OUT phases are allowed, with the user provided
+; code (usually dynamically generated based on the scatter/gather
+; list associated with a SCSI command) called to handle these
+; phases.
+;
+; After control has passed to one of the user provided
+; DATA_IN or DATA_OUT routines, back calls are made to
+; other_transfer_in or other_transfer_out to handle non-DATA IN
+; and DATA OUT phases respectively, with the state of the active
+; data pointer being preserved in TEMP.
+;
+; On completion, the user code passes control to other_transfer
+; which causes DATA_IN and DATA_OUT to result in unexpected_phase
+; interrupts so that data overruns may be trapped.
+;
+; INPUTS : DSA - SCSI command
+;
+; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
+; other_transfer
+;
+; MODIFIES : SCRATCH
+;
+; EXITS : if STATUS IN is detected, signifying command completion,
+; the NCR jumps to command_complete. If MSG IN occurs, a
+; CALL is made to msg_in. Otherwise, other_transfer runs in
+; an infinite loop.
+;
+
+ENTRY data_transfer
+data_transfer:
+ JUMP cmdout_cmdout, WHEN CMD
+
+at 0x00000095 : */ 0x820b0000,0x0000024c,
+/*
+ CALL msg_in, WHEN MSG_IN
+
+at 0x00000097 : */ 0x8f0b0000,0x00000404,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x00000099 : */ 0x9e0b0000,0x00000000,
+/*
+ JUMP do_dataout, WHEN DATA_OUT
+
+at 0x0000009b : */ 0x800b0000,0x0000028c,
+/*
+ JUMP do_datain, WHEN DATA_IN
+
+at 0x0000009d : */ 0x810b0000,0x000002e4,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x0000009f : */ 0x830b0000,0x0000060c,
+/*
+ JUMP data_transfer
+
+at 0x000000a1 : */ 0x80080000,0x00000254,
+/*
+ENTRY end_data_transfer
+end_data_transfer:
+
+;
+; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
+; should be fixed up whenever the nexus changes so it can point to the
+; correct routine for that command.
+;
+
+
+; Nasty jump to dsa->dataout
+do_dataout:
+ CALL dsa_to_scratch
+
+at 0x000000a3 : */ 0x88080000,0x00000938,
+/*
+ MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
+
+at 0x000000a5 : */ 0x7e345000,0x00000000,
+/*
+ MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
+
+at 0x000000a7 : */ 0x7f350000,0x00000000,
+/*
+ MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
+
+at 0x000000a9 : */ 0x7f360000,0x00000000,
+/*
+ MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
+
+at 0x000000ab : */ 0x7f370000,0x00000000,
+/*
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x000000ad : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
+
+at 0x000000af : */ 0xc0000004,0x00000000,0x000002d4,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000000b2 : */ 0x78380000,0x00000000,
+/*
+dataout_to_jump:
+ MOVE MEMORY 4, 0, dataout_jump + 4
+
+at 0x000000b4 : */ 0xc0000004,0x00000000,0x000002e0,
+/*
+dataout_jump:
+ JUMP 0
+
+at 0x000000b7 : */ 0x80080000,0x00000000,
+/*
+
+; Nasty jump to dsa->dsain
+do_datain:
+ CALL dsa_to_scratch
+
+at 0x000000b9 : */ 0x88080000,0x00000938,
+/*
+ MOVE SCRATCH0 + dsa_datain TO SCRATCH0
+
+at 0x000000bb : */ 0x7e345400,0x00000000,
+/*
+ MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
+
+at 0x000000bd : */ 0x7f350000,0x00000000,
+/*
+ MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
+
+at 0x000000bf : */ 0x7f360000,0x00000000,
+/*
+ MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
+
+at 0x000000c1 : */ 0x7f370000,0x00000000,
+/*
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x000000c3 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
+
+at 0x000000c5 : */ 0xc0000004,0x00000000,0x0000032c,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000000c8 : */ 0x78380000,0x00000000,
+/*
+ENTRY datain_to_jump
+datain_to_jump:
+ MOVE MEMORY 4, 0, datain_jump + 4
+
+at 0x000000ca : */ 0xc0000004,0x00000000,0x00000338,
+/*
+
+
+
+datain_jump:
+ JUMP 0
+
+at 0x000000cd : */ 0x80080000,0x00000000,
+/*
+
+
+
+; Note that other_out and other_in loop until a non-data phase
+; is discovered, so we only execute return statements when we
+; can go on to the next data phase block move statement.
+
+ENTRY other_out
+other_out:
+
+
+
+ INT int_err_unexpected_phase, WHEN CMD
+
+at 0x000000cf : */ 0x9a0b0000,0x00000000,
+/*
+ JUMP msg_in_restart, WHEN MSG_IN
+
+at 0x000000d1 : */ 0x870b0000,0x000003e4,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x000000d3 : */ 0x9e0b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_IN
+
+at 0x000000d5 : */ 0x990b0000,0x00000000,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x000000d7 : */ 0x830b0000,0x0000060c,
+/*
+ JUMP other_out, WHEN NOT DATA_OUT
+
+at 0x000000d9 : */ 0x80030000,0x0000033c,
+/*
+ RETURN
+
+at 0x000000db : */ 0x90080000,0x00000000,
+/*
+
+ENTRY other_in
+other_in:
+
+
+
+ INT int_err_unexpected_phase, WHEN CMD
+
+at 0x000000dd : */ 0x9a0b0000,0x00000000,
+/*
+ JUMP msg_in_restart, WHEN MSG_IN
+
+at 0x000000df : */ 0x870b0000,0x000003e4,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x000000e1 : */ 0x9e0b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_OUT
+
+at 0x000000e3 : */ 0x980b0000,0x00000000,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x000000e5 : */ 0x830b0000,0x0000060c,
+/*
+ JUMP other_in, WHEN NOT DATA_IN
+
+at 0x000000e7 : */ 0x81030000,0x00000374,
+/*
+ RETURN
+
+at 0x000000e9 : */ 0x90080000,0x00000000,
+/*
+
+
+ENTRY other_transfer
+other_transfer:
+ INT int_err_unexpected_phase, WHEN CMD
+
+at 0x000000eb : */ 0x9a0b0000,0x00000000,
+/*
+ CALL msg_in, WHEN MSG_IN
+
+at 0x000000ed : */ 0x8f0b0000,0x00000404,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x000000ef : */ 0x9e0b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_OUT
+
+at 0x000000f1 : */ 0x980b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_IN
+
+at 0x000000f3 : */ 0x990b0000,0x00000000,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x000000f5 : */ 0x830b0000,0x0000060c,
+/*
+ JUMP other_transfer
+
+at 0x000000f7 : */ 0x80080000,0x000003ac,
+/*
+
+;
+; msg_in_restart
+; msg_in
+; munge_msg
+;
+; PURPOSE : process messages from a target. msg_in is called when the
+; caller hasn't read the first byte of the message. munge_message
+; is called when the caller has read the first byte of the message,
+; and left it in SFBR. msg_in_restart is called when the caller
+; hasn't read the first byte of the message, and wishes RETURN
+; to transfer control back to the address of the conditional
+; CALL instruction rather than to the instruction after it.
+;
+; Various int_* interrupts are generated when the host system
+; needs to intervene, as is the case with SDTR, WDTR, and
+; INITIATE RECOVERY messages.
+;
+; When the host system handles one of these interrupts,
+; it can respond by reentering at reject_message,
+; which rejects the message and returns control to
+; the caller of msg_in or munge_msg, accept_message
+; which clears ACK and returns control, or reply_message
+; which sends the message pointed to by the DSA
+; msgout_other table indirect field.
+;
+; DISCONNECT messages are handled by moving the command
+; to the reconnect_dsa_queue.
+;
+; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
+; only)
+;
+; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
+;
+; MODIFIES : SCRATCH, DSA on DISCONNECT
+;
+; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
+; and normal return from message handlers running under
+; Linux, control is returned to the caller. Receipt
+; of DISCONNECT messages pass control to dsa_schedule.
+;
+ENTRY msg_in_restart
+msg_in_restart:
+; XXX - hackish
+;
+; Since it's easier to debug changes to the statically
+; compiled code, rather than the dynamically generated
+; stuff, such as
+;
+; MOVE x, y, WHEN data_phase
+; CALL other_z, WHEN NOT data_phase
+; MOVE x, y, WHEN data_phase
+;
+; I'd like to have certain routines (notably the message handler)
+; restart on the conditional call rather than the next instruction.
+;
+; So, subtract 8 from the return address
+
+ MOVE TEMP0 + 0xf8 TO TEMP0
+
+at 0x000000f9 : */ 0x7e1cf800,0x00000000,
+/*
+ MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
+
+at 0x000000fb : */ 0x7f1dff00,0x00000000,
+/*
+ MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
+
+at 0x000000fd : */ 0x7f1eff00,0x00000000,
+/*
+ MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
+
+at 0x000000ff : */ 0x7f1fff00,0x00000000,
+/*
+
+ENTRY msg_in
+msg_in:
+ MOVE 1, msg_buf, WHEN MSG_IN
+
+at 0x00000101 : */ 0x0f000001,0x00000000,
+/*
+
+munge_msg:
+ JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
+
+at 0x00000103 : */ 0x800c0001,0x00000524,
+/*
+ JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
+
+at 0x00000105 : */ 0x800cdf20,0x0000044c,
+/*
+;
+; XXX - I've seen a handful of broken SCSI devices which fail to issue
+; a SAVE POINTERS message before disconnecting in the middle of
+; a transfer, assuming that the DATA POINTER will be implicitly
+; restored.
+;
+; Historically, I've often done an implicit save when the DISCONNECT
+; message is processed. We may want to consider having the option of
+; doing that here.
+;
+ JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
+
+at 0x00000107 : */ 0x800c0002,0x00000454,
+/*
+ JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
+
+at 0x00000109 : */ 0x800c0003,0x000004b8,
+/*
+ JUMP munge_disconnect, IF 0x04 ; DISCONNECT
+
+at 0x0000010b : */ 0x800c0004,0x0000051c,
+/*
+ INT int_msg_1, IF 0x07 ; MESSAGE REJECT
+
+at 0x0000010d : */ 0x980c0007,0x01020000,
+/*
+ INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
+
+at 0x0000010f : */ 0x980c000f,0x01020000,
+/*
+
+
+
+ JUMP reject_message
+
+at 0x00000111 : */ 0x80080000,0x000005b4,
+/*
+
+munge_2:
+ JUMP reject_message
+
+at 0x00000113 : */ 0x80080000,0x000005b4,
+/*
+;
+; The SCSI standard allows targets to recover from transient
+; error conditions by backing up the data pointer with a
+; RESTORE POINTERS message.
+;
+; So, we must save and restore the _residual_ code as well as
+; the current instruction pointer. Because of this messiness,
+; it is simpler to put dynamic code in the dsa for this and to
+; just do a simple jump down there.
+;
+
+munge_save_data_pointer:
+ MOVE DSA0 + dsa_save_data_pointer TO SFBR
+
+at 0x00000115 : */ 0x76100000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH0
+
+at 0x00000117 : */ 0x6a340000,0x00000000,
+/*
+ MOVE DSA1 + 0xff TO SFBR WITH CARRY
+
+at 0x00000119 : */ 0x7711ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH1
+
+at 0x0000011b : */ 0x6a350000,0x00000000,
+/*
+ MOVE DSA2 + 0xff TO SFBR WITH CARRY
+
+at 0x0000011d : */ 0x7712ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH2
+
+at 0x0000011f : */ 0x6a360000,0x00000000,
+/*
+ MOVE DSA3 + 0xff TO SFBR WITH CARRY
+
+at 0x00000121 : */ 0x7713ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH3
+
+at 0x00000123 : */ 0x6a370000,0x00000000,
+/*
+
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000125 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
+
+at 0x00000127 : */ 0xc0000004,0x00000000,0x000004b4,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x0000012a : */ 0x78380000,0x00000000,
+/*
+jump_dsa_save:
+ JUMP 0
+
+at 0x0000012c : */ 0x80080000,0x00000000,
+/*
+
+munge_restore_pointers:
+ MOVE DSA0 + dsa_restore_pointers TO SFBR
+
+at 0x0000012e : */ 0x76100000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH0
+
+at 0x00000130 : */ 0x6a340000,0x00000000,
+/*
+ MOVE DSA1 + 0xff TO SFBR WITH CARRY
+
+at 0x00000132 : */ 0x7711ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH1
+
+at 0x00000134 : */ 0x6a350000,0x00000000,
+/*
+ MOVE DSA2 + 0xff TO SFBR WITH CARRY
+
+at 0x00000136 : */ 0x7712ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH2
+
+at 0x00000138 : */ 0x6a360000,0x00000000,
+/*
+ MOVE DSA3 + 0xff TO SFBR WITH CARRY
+
+at 0x0000013a : */ 0x7713ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH3
+
+at 0x0000013c : */ 0x6a370000,0x00000000,
+/*
+
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x0000013e : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
+
+at 0x00000140 : */ 0xc0000004,0x00000000,0x00000518,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000143 : */ 0x78380000,0x00000000,
+/*
+jump_dsa_restore:
+ JUMP 0
+
+at 0x00000145 : */ 0x80080000,0x00000000,
+/*
+
+
+munge_disconnect:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ JUMP dsa_schedule
+
+at 0x00000147 : */ 0x80080000,0x00000168,
+/*
+
+
+
+
+
+munge_extended:
+ CLEAR ACK
+
+at 0x00000149 : */ 0x60000040,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN NOT MSG_IN
+
+at 0x0000014b : */ 0x9f030000,0x00000000,
+/*
+ MOVE 1, msg_buf + 1, WHEN MSG_IN
+
+at 0x0000014d : */ 0x0f000001,0x00000001,
+/*
+ JUMP munge_extended_2, IF 0x02
+
+at 0x0000014f : */ 0x800c0002,0x00000554,
+/*
+ JUMP munge_extended_3, IF 0x03
+
+at 0x00000151 : */ 0x800c0003,0x00000584,
+/*
+ JUMP reject_message
+
+at 0x00000153 : */ 0x80080000,0x000005b4,
+/*
+
+munge_extended_2:
+ CLEAR ACK
+
+at 0x00000155 : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, msg_buf + 2, WHEN MSG_IN
+
+at 0x00000157 : */ 0x0f000001,0x00000002,
+/*
+ JUMP reject_message, IF NOT 0x02 ; Must be WDTR
+
+at 0x00000159 : */ 0x80040002,0x000005b4,
+/*
+ CLEAR ACK
+
+at 0x0000015b : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, msg_buf + 3, WHEN MSG_IN
+
+at 0x0000015d : */ 0x0f000001,0x00000003,
+/*
+ INT int_msg_wdtr
+
+at 0x0000015f : */ 0x98080000,0x01000000,
+/*
+
+munge_extended_3:
+ CLEAR ACK
+
+at 0x00000161 : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, msg_buf + 2, WHEN MSG_IN
+
+at 0x00000163 : */ 0x0f000001,0x00000002,
+/*
+ JUMP reject_message, IF NOT 0x01 ; Must be SDTR
+
+at 0x00000165 : */ 0x80040001,0x000005b4,
+/*
+ CLEAR ACK
+
+at 0x00000167 : */ 0x60000040,0x00000000,
+/*
+ MOVE 2, msg_buf + 3, WHEN MSG_IN
+
+at 0x00000169 : */ 0x0f000002,0x00000003,
+/*
+ INT int_msg_sdtr
+
+at 0x0000016b : */ 0x98080000,0x01010000,
+/*
+
+ENTRY reject_message
+reject_message:
+ SET ATN
+
+at 0x0000016d : */ 0x58000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000016f : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
+
+at 0x00000171 : */ 0x0e000001,0x00000000,
+/*
+ RETURN
+
+at 0x00000173 : */ 0x90080000,0x00000000,
+/*
+
+ENTRY accept_message
+accept_message:
+ CLEAR ATN
+
+at 0x00000175 : */ 0x60000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x00000177 : */ 0x60000040,0x00000000,
+/*
+ RETURN
+
+at 0x00000179 : */ 0x90080000,0x00000000,
+/*
+
+ENTRY respond_message
+respond_message:
+ SET ATN
+
+at 0x0000017b : */ 0x58000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000017d : */ 0x60000040,0x00000000,
+/*
+ MOVE FROM dsa_msgout_other, WHEN MSG_OUT
+
+at 0x0000017f : */ 0x1e000000,0x00000068,
+/*
+ RETURN
+
+at 0x00000181 : */ 0x90080000,0x00000000,
+/*
+
+;
+; command_complete
+;
+; PURPOSE : handle command termination when STATUS IN is detected by reading
+; a status byte followed by a command termination message.
+;
+; Normal termination results in an INTFLY instruction, and
+; the host system can pick out which command terminated by
+; examining the MESSAGE and STATUS buffers of all currently
+; executing commands;
+;
+; Abnormal (CHECK_CONDITION) termination results in an
+; int_err_check_condition interrupt so that a REQUEST SENSE
+; command can be issued out-of-order so that no other command
+; clears the contingent allegiance condition.
+;
+;
+; INPUTS : DSA - command
+;
+; CALLS : OK
+;
+; EXITS : On successful termination, control is passed to schedule.
+; On abnormal termination, the user will usually modify the
+; DSA fields and corresponding buffers and return control
+; to select.
+;
+
+ENTRY command_complete
+command_complete:
+ MOVE FROM dsa_status, WHEN STATUS
+
+at 0x00000183 : */ 0x1b000000,0x00000060,
+/*
+
+ MOVE SFBR TO SCRATCH0 ; Save status
+
+at 0x00000185 : */ 0x6a340000,0x00000000,
+/*
+
+ENTRY command_complete_msgin
+command_complete_msgin:
+ MOVE FROM dsa_msgin, WHEN MSG_IN
+
+at 0x00000187 : */ 0x1f000000,0x00000058,
+/*
+; Indicate that we should be expecting a disconnect
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x00000189 : */ 0x7c027f00,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000018b : */ 0x60000040,0x00000000,
+/*
+
+ WAIT DISCONNECT
+
+at 0x0000018d : */ 0x48000000,0x00000000,
+/*
+
+;
+; The SCSI specification states that when a UNIT ATTENTION condition
+; is pending, as indicated by a CHECK CONDITION status message,
+; the target shall revert to asynchronous transfers. Since
+; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
+; basis, and returning control to our scheduler could work on a command
+; running on another lun on that target using the old parameters, we must
+; interrupt the host processor to get them changed, or change them ourselves.
+;
+; Once SCSI-II tagged queueing is implemented, things will be even more
+; hairy, since contingent allegiance conditions exist on a per-target/lun
+; basis, and issuing a new command with a different tag would clear it.
+; In these cases, we must interrupt the host processor to get a request
+; added to the HEAD of the queue with the request sense command, or we
+; must automatically issue the request sense command.
+
+
+
+
+
+ INTFLY
+
+at 0x0000018f : */ 0x98180000,0x00000000,
+/*
+
+
+
+
+
+ JUMP schedule
+
+at 0x00000191 : */ 0x80080000,0x00000000,
+/*
+command_failed:
+ INT int_err_check_condition
+
+at 0x00000193 : */ 0x98080000,0x00030000,
+/*
+
+
+
+
+;
+; wait_reselect
+;
+; PURPOSE : This is essentially the idle routine, where control lands
+; when there are no new processes to schedule. wait_reselect
+; waits for reselection, selection, and new commands.
+;
+; When a successful reselection occurs, with the aid
+; of fixed up code in each DSA, wait_reselect walks the
+; reconnect_dsa_queue, asking each dsa if the target ID
+; and LUN match its.
+;
+; If a match is found, a call is made back to reselected_ok,
+; which through the miracles of self modifying code, extracts
+; the found DSA from the reconnect_dsa_queue and then
+; returns control to the DSAs thread of execution.
+;
+; INPUTS : NONE
+;
+; CALLS : OK
+;
+; MODIFIES : DSA,
+;
+; EXITS : On successful reselection, control is returned to the
+; DSA which called reselected_ok. If the WAIT RESELECT
+; was interrupted by a new commands arrival signaled by
+; SIG_P, control is passed to schedule. If the NCR is
+; selected, the host system is interrupted with an
+; int_err_selected which is usually responded to by
+; setting DSP to the target_abort address.
+
+ENTRY wait_reselect
+wait_reselect:
+
+
+
+
+
+
+ WAIT RESELECT wait_reselect_failed
+
+at 0x00000195 : */ 0x50000000,0x0000076c,
+/*
+
+reselected:
+
+
+
+ CLEAR TARGET
+
+at 0x00000197 : */ 0x60000200,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000199 : */ 0x78380000,0x00000000,
+/*
+ ; Read all data needed to reestablish the nexus -
+ MOVE 1, reselected_identify, WHEN MSG_IN
+
+at 0x0000019b : */ 0x0f000001,0x00000000,
+/*
+ ; We used to CLEAR ACK here.
+
+
+
+
+
+ ; Point DSA at the current head of the disconnected queue.
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x0000019d : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
+
+at 0x0000019f : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000001a2 : */ 0x78380000,0x00000000,
+/*
+ CALL scratch_to_dsa
+
+at 0x000001a4 : */ 0x88080000,0x00000980,
+/*
+
+ ; Fix the update-next pointer so that the reconnect_dsa_head
+ ; pointer is the one that will be updated if this DSA is a hit
+ ; and we remove it from the queue.
+
+ MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok + 8
+
+at 0x000001a6 : */ 0xc0000004,0x00000000,0x00000758,
+/*
+
+ENTRY reselected_check_next
+reselected_check_next:
+
+
+
+ ; Check for a NULL pointer.
+ MOVE DSA0 TO SFBR
+
+at 0x000001a9 : */ 0x72100000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001ab : */ 0x80040000,0x000006ec,
+/*
+ MOVE DSA1 TO SFBR
+
+at 0x000001ad : */ 0x72110000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001af : */ 0x80040000,0x000006ec,
+/*
+ MOVE DSA2 TO SFBR
+
+at 0x000001b1 : */ 0x72120000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001b3 : */ 0x80040000,0x000006ec,
+/*
+ MOVE DSA3 TO SFBR
+
+at 0x000001b5 : */ 0x72130000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001b7 : */ 0x80040000,0x000006ec,
+/*
+ INT int_err_unexpected_reselect
+
+at 0x000001b9 : */ 0x98080000,0x00020000,
+/*
+
+reselected_not_end:
+ ;
+ ; XXX the ALU is only eight bits wide, and the assembler
+ ; wont do the dirt work for us. As long as dsa_check_reselect
+ ; is negative, we need to sign extend with 1 bits to the full
+ ; 32 bit width of the address.
+ ;
+ ; A potential work around would be to have a known alignment
+ ; of the DSA structure such that the base address plus
+ ; dsa_check_reselect doesn't require carrying from bytes
+ ; higher than the LSB.
+ ;
+
+ MOVE DSA0 TO SFBR
+
+at 0x000001bb : */ 0x72100000,0x00000000,
+/*
+ MOVE SFBR + dsa_check_reselect TO SCRATCH0
+
+at 0x000001bd : */ 0x6e340000,0x00000000,
+/*
+ MOVE DSA1 TO SFBR
+
+at 0x000001bf : */ 0x72110000,0x00000000,
+/*
+ MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
+
+at 0x000001c1 : */ 0x6f35ff00,0x00000000,
+/*
+ MOVE DSA2 TO SFBR
+
+at 0x000001c3 : */ 0x72120000,0x00000000,
+/*
+ MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
+
+at 0x000001c5 : */ 0x6f36ff00,0x00000000,
+/*
+ MOVE DSA3 TO SFBR
+
+at 0x000001c7 : */ 0x72130000,0x00000000,
+/*
+ MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
+
+at 0x000001c9 : */ 0x6f37ff00,0x00000000,
+/*
+
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x000001cb : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, reselected_check + 4
+
+at 0x000001cd : */ 0xc0000004,0x00000000,0x0000074c,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000001d0 : */ 0x78380000,0x00000000,
+/*
+reselected_check:
+ JUMP 0
+
+at 0x000001d2 : */ 0x80080000,0x00000000,
+/*
+
+
+;
+;
+ENTRY reselected_ok
+reselected_ok:
+ MOVE MEMORY 4, 0, 0 ; Patched : first word
+
+at 0x000001d4 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ ; is address of
+ ; successful dsa_next
+ ; Second word is last
+ ; unsuccessful dsa_next,
+ ; starting with
+ ; dsa_reconnect_head
+ ; We used to CLEAR ACK here.
+
+
+
+
+
+
+ RETURN ; Return control to where
+
+at 0x000001d7 : */ 0x90080000,0x00000000,
+/*
+
+
+
+
+selected:
+ INT int_err_selected;
+
+at 0x000001d9 : */ 0x98080000,0x00010000,
+/*
+
+;
+; A select or reselect failure can be caused by one of two conditions :
+; 1. SIG_P was set. This will be the case if the user has written
+; a new value to a previously NULL head of the issue queue.
+;
+; 2. The NCR53c810 was selected or reselected by another device.
+;
+; 3. The bus was already busy since we were selected or reselected
+; before starting the command.
+
+wait_reselect_failed:
+
+
+
+; Check selected bit.
+ MOVE SIST0 & 0x20 TO SFBR
+
+at 0x000001db : */ 0x74422000,0x00000000,
+/*
+ JUMP selected, IF 0x20
+
+at 0x000001dd : */ 0x800c0020,0x00000764,
+/*
+; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
+ MOVE CTEST2 & 0x40 TO SFBR
+
+at 0x000001df : */ 0x741a4000,0x00000000,
+/*
+ JUMP schedule, IF 0x40
+
+at 0x000001e1 : */ 0x800c0040,0x00000000,
+/*
+; Check connected bit.
+; FIXME: this needs to change if we support target mode
+ MOVE ISTAT & 0x08 TO SFBR
+
+at 0x000001e3 : */ 0x74140800,0x00000000,
+/*
+ JUMP reselected, IF 0x08
+
+at 0x000001e5 : */ 0x800c0008,0x0000065c,
+/*
+; FIXME : Something bogus happened, and we shouldn't fail silently.
+
+
+
+ INT int_debug_panic
+
+at 0x000001e7 : */ 0x98080000,0x030b0000,
+/*
+
+
+
+select_failed:
+
+
+
+; Otherwise, mask the selected and reselected bits off SIST0
+ MOVE SIST0 & 0x30 TO SFBR
+
+at 0x000001e9 : */ 0x74423000,0x00000000,
+/*
+ JUMP selected, IF 0x20
+
+at 0x000001eb : */ 0x800c0020,0x00000764,
+/*
+ JUMP reselected, IF 0x10
+
+at 0x000001ed : */ 0x800c0010,0x0000065c,
+/*
+; If SIGP is set, the user just gave us another command, and
+; we should restart or return to the scheduler.
+; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
+ MOVE CTEST2 & 0x40 TO SFBR
+
+at 0x000001ef : */ 0x741a4000,0x00000000,
+/*
+ JUMP select, IF 0x40
+
+at 0x000001f1 : */ 0x800c0040,0x000001fc,
+/*
+; Check connected bit.
+; FIXME: this needs to change if we support target mode
+; FIXME: is this really necessary?
+ MOVE ISTAT & 0x08 TO SFBR
+
+at 0x000001f3 : */ 0x74140800,0x00000000,
+/*
+ JUMP reselected, IF 0x08
+
+at 0x000001f5 : */ 0x800c0008,0x0000065c,
+/*
+; FIXME : Something bogus happened, and we shouldn't fail silently.
+
+
+
+ INT int_debug_panic
+
+at 0x000001f7 : */ 0x98080000,0x030b0000,
+/*
+
+
+;
+; test_1
+; test_2
+;
+; PURPOSE : run some verification tests on the NCR. test_1
+; copies test_src to test_dest and interrupts the host
+; processor, testing for cache coherency and interrupt
+; problems in the processes.
+;
+; test_2 runs a command with offsets relative to the
+; DSA on entry, and is useful for miscellaneous experimentation.
+;
+
+; Verify that interrupts are working correctly and that we don't
+; have a cache invalidation problem.
+
+ABSOLUTE test_src = 0, test_dest = 0
+ENTRY test_1
+test_1:
+ MOVE MEMORY 4, test_src, test_dest
+
+at 0x000001f9 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ INT int_test_1
+
+at 0x000001fc : */ 0x98080000,0x04000000,
+/*
+
+;
+; Run arbitrary commands, with test code establishing a DSA
+;
+
+ENTRY test_2
+test_2:
+ CLEAR TARGET
+
+at 0x000001fe : */ 0x60000200,0x00000000,
+/*
+ SELECT ATN FROM 0, test_2_fail
+
+at 0x00000200 : */ 0x43000000,0x00000850,
+/*
+ JUMP test_2_msgout, WHEN MSG_OUT
+
+at 0x00000202 : */ 0x860b0000,0x00000810,
+/*
+ENTRY test_2_msgout
+test_2_msgout:
+ MOVE FROM 8, WHEN MSG_OUT
+
+at 0x00000204 : */ 0x1e000000,0x00000008,
+/*
+ MOVE FROM 16, WHEN CMD
+
+at 0x00000206 : */ 0x1a000000,0x00000010,
+/*
+ MOVE FROM 24, WHEN DATA_IN
+
+at 0x00000208 : */ 0x19000000,0x00000018,
+/*
+ MOVE FROM 32, WHEN STATUS
+
+at 0x0000020a : */ 0x1b000000,0x00000020,
+/*
+ MOVE FROM 40, WHEN MSG_IN
+
+at 0x0000020c : */ 0x1f000000,0x00000028,
+/*
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x0000020e : */ 0x7c027f00,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x00000210 : */ 0x60000040,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x00000212 : */ 0x48000000,0x00000000,
+/*
+test_2_fail:
+ INT int_test_2
+
+at 0x00000214 : */ 0x98080000,0x04010000,
+/*
+
+ENTRY debug_break
+debug_break:
+ INT int_debug_break
+
+at 0x00000216 : */ 0x98080000,0x03000000,
+/*
+
+;
+; initiator_abort
+; target_abort
+;
+; PURPOSE : Abort the currently established nexus from with initiator
+; or target mode.
+;
+;
+
+ENTRY target_abort
+target_abort:
+ SET TARGET
+
+at 0x00000218 : */ 0x58000200,0x00000000,
+/*
+ DISCONNECT
+
+at 0x0000021a : */ 0x48000000,0x00000000,
+/*
+ CLEAR TARGET
+
+at 0x0000021c : */ 0x60000200,0x00000000,
+/*
+ JUMP schedule
+
+at 0x0000021e : */ 0x80080000,0x00000000,
+/*
+
+ENTRY initiator_abort
+initiator_abort:
+ SET ATN
+
+at 0x00000220 : */ 0x58000008,0x00000000,
+/*
+;
+; The SCSI-I specification says that targets may go into MSG out at
+; their leisure upon receipt of the ATN single. On all versions of the
+; specification, we can't change phases until REQ transitions true->false,
+; so we need to sink/source one byte of data to allow the transition.
+;
+; For the sake of safety, we'll only source one byte of data in all
+; cases, but to accommodate the SCSI-I dain bramage, we'll sink an
+; arbitrary number of bytes.
+ JUMP spew_cmd, WHEN CMD
+
+at 0x00000222 : */ 0x820b0000,0x000008b8,
+/*
+ JUMP eat_msgin, WHEN MSG_IN
+
+at 0x00000224 : */ 0x870b0000,0x000008c8,
+/*
+ JUMP eat_datain, WHEN DATA_IN
+
+at 0x00000226 : */ 0x810b0000,0x000008f8,
+/*
+ JUMP eat_status, WHEN STATUS
+
+at 0x00000228 : */ 0x830b0000,0x000008e0,
+/*
+ JUMP spew_dataout, WHEN DATA_OUT
+
+at 0x0000022a : */ 0x800b0000,0x00000910,
+/*
+ JUMP sated
+
+at 0x0000022c : */ 0x80080000,0x00000918,
+/*
+spew_cmd:
+ MOVE 1, NCR53c7xx_zero, WHEN CMD
+
+at 0x0000022e : */ 0x0a000001,0x00000000,
+/*
+ JUMP sated
+
+at 0x00000230 : */ 0x80080000,0x00000918,
+/*
+eat_msgin:
+ MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
+
+at 0x00000232 : */ 0x0f000001,0x00000000,
+/*
+ JUMP eat_msgin, WHEN MSG_IN
+
+at 0x00000234 : */ 0x870b0000,0x000008c8,
+/*
+ JUMP sated
+
+at 0x00000236 : */ 0x80080000,0x00000918,
+/*
+eat_status:
+ MOVE 1, NCR53c7xx_sink, WHEN STATUS
+
+at 0x00000238 : */ 0x0b000001,0x00000000,
+/*
+ JUMP eat_status, WHEN STATUS
+
+at 0x0000023a : */ 0x830b0000,0x000008e0,
+/*
+ JUMP sated
+
+at 0x0000023c : */ 0x80080000,0x00000918,
+/*
+eat_datain:
+ MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
+
+at 0x0000023e : */ 0x09000001,0x00000000,
+/*
+ JUMP eat_datain, WHEN DATA_IN
+
+at 0x00000240 : */ 0x810b0000,0x000008f8,
+/*
+ JUMP sated
+
+at 0x00000242 : */ 0x80080000,0x00000918,
+/*
+spew_dataout:
+ MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
+
+at 0x00000244 : */ 0x08000001,0x00000000,
+/*
+sated:
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x00000246 : */ 0x7c027f00,0x00000000,
+/*
+ MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
+
+at 0x00000248 : */ 0x0e000001,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x0000024a : */ 0x48000000,0x00000000,
+/*
+ INT int_norm_aborted
+
+at 0x0000024c : */ 0x98080000,0x02040000,
+/*
+
+;
+; dsa_to_scratch
+; scratch_to_dsa
+;
+; PURPOSE :
+; The NCR chips cannot do a move memory instruction with the DSA register
+; as the source or destination. So, we provide a couple of subroutines
+; that let us switch between the DSA register and scratch register.
+;
+; Memory moves to/from the DSPS register also don't work, but we
+; don't use them.
+;
+;
+
+
+dsa_to_scratch:
+ MOVE DSA0 TO SFBR
+
+at 0x0000024e : */ 0x72100000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH0
+
+at 0x00000250 : */ 0x6a340000,0x00000000,
+/*
+ MOVE DSA1 TO SFBR
+
+at 0x00000252 : */ 0x72110000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH1
+
+at 0x00000254 : */ 0x6a350000,0x00000000,
+/*
+ MOVE DSA2 TO SFBR
+
+at 0x00000256 : */ 0x72120000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH2
+
+at 0x00000258 : */ 0x6a360000,0x00000000,
+/*
+ MOVE DSA3 TO SFBR
+
+at 0x0000025a : */ 0x72130000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH3
+
+at 0x0000025c : */ 0x6a370000,0x00000000,
+/*
+ RETURN
+
+at 0x0000025e : */ 0x90080000,0x00000000,
+/*
+
+scratch_to_dsa:
+ MOVE SCRATCH0 TO SFBR
+
+at 0x00000260 : */ 0x72340000,0x00000000,
+/*
+ MOVE SFBR TO DSA0
+
+at 0x00000262 : */ 0x6a100000,0x00000000,
+/*
+ MOVE SCRATCH1 TO SFBR
+
+at 0x00000264 : */ 0x72350000,0x00000000,
+/*
+ MOVE SFBR TO DSA1
+
+at 0x00000266 : */ 0x6a110000,0x00000000,
+/*
+ MOVE SCRATCH2 TO SFBR
+
+at 0x00000268 : */ 0x72360000,0x00000000,
+/*
+ MOVE SFBR TO DSA2
+
+at 0x0000026a : */ 0x6a120000,0x00000000,
+/*
+ MOVE SCRATCH3 TO SFBR
+
+at 0x0000026c : */ 0x72370000,0x00000000,
+/*
+ MOVE SFBR TO DSA3
+
+at 0x0000026e : */ 0x6a130000,0x00000000,
+/*
+ RETURN
+
+at 0x00000270 : */ 0x90080000,0x00000000,
+};
+
+#define A_NCR53c7xx_msg_abort 0x00000000
+u32 A_NCR53c7xx_msg_abort_used[] = {
+ 0x00000249,
+};
+
+#define A_NCR53c7xx_msg_reject 0x00000000
+u32 A_NCR53c7xx_msg_reject_used[] = {
+ 0x00000172,
+};
+
+#define A_NCR53c7xx_sink 0x00000000
+u32 A_NCR53c7xx_sink_used[] = {
+ 0x00000233,
+ 0x00000239,
+ 0x0000023f,
+};
+
+#define A_NCR53c7xx_zero 0x00000000
+u32 A_NCR53c7xx_zero_used[] = {
+ 0x0000022f,
+ 0x00000245,
+};
+
+#define A_NOP_insn 0x00000000
+u32 A_NOP_insn_used[] = {
+ 0x00000010,
+};
+
+#define A_addr_reconnect_dsa_head 0x00000000
+u32 A_addr_reconnect_dsa_head_used[] = {
+ 0x000001a7,
+};
+
+#define A_addr_scratch 0x00000000
+u32 A_addr_scratch_used[] = {
+ 0x00000004,
+ 0x0000001b,
+ 0x00000046,
+ 0x00000067,
+ 0x00000073,
+ 0x000000b0,
+ 0x000000c6,
+ 0x00000128,
+ 0x00000141,
+ 0x000001a1,
+ 0x000001ce,
+};
+
+#define A_addr_temp 0x00000000
+u32 A_addr_temp_used[] = {
+ 0x00000025,
+ 0x00000034,
+};
+
+#define A_dmode_memory_to_memory 0x00000000
+u32 A_dmode_memory_to_memory_used[] = {
+ 0x00000005,
+ 0x0000001c,
+ 0x00000027,
+ 0x00000035,
+ 0x00000047,
+ 0x00000069,
+ 0x00000075,
+ 0x000000b2,
+ 0x000000c8,
+ 0x0000012a,
+ 0x00000143,
+ 0x00000199,
+ 0x000001a2,
+ 0x000001d0,
+};
+
+#define A_dmode_memory_to_ncr 0x00000000
+u32 A_dmode_memory_to_ncr_used[] = {
+ 0x00000000,
+ 0x00000017,
+ 0x00000030,
+ 0x00000042,
+ 0x0000019d,
+};
+
+#define A_dmode_ncr_to_memory 0x00000000
+u32 A_dmode_ncr_to_memory_used[] = {
+ 0x00000022,
+ 0x00000064,
+ 0x00000070,
+ 0x000000ad,
+ 0x000000c3,
+ 0x00000125,
+ 0x0000013e,
+ 0x000001cb,
+};
+
+#define A_dsa_check_reselect 0x00000000
+u32 A_dsa_check_reselect_used[] = {
+ 0x000001bd,
+};
+
+#define A_dsa_cmdout 0x00000048
+u32 A_dsa_cmdout_used[] = {
+ 0x00000094,
+};
+
+#define A_dsa_cmnd 0x00000038
+u32 A_dsa_cmnd_used[] = {
+};
+
+#define A_dsa_datain 0x00000054
+u32 A_dsa_datain_used[] = {
+ 0x000000bb,
+};
+
+#define A_dsa_dataout 0x00000050
+u32 A_dsa_dataout_used[] = {
+ 0x000000a5,
+};
+
+#define A_dsa_end 0x00000070
+u32 A_dsa_end_used[] = {
+};
+
+#define A_dsa_fields_start 0x00000000
+u32 A_dsa_fields_start_used[] = {
+};
+
+#define A_dsa_msgin 0x00000058
+u32 A_dsa_msgin_used[] = {
+ 0x00000188,
+};
+
+#define A_dsa_msgout 0x00000040
+u32 A_dsa_msgout_used[] = {
+ 0x00000086,
+};
+
+#define A_dsa_msgout_other 0x00000068
+u32 A_dsa_msgout_other_used[] = {
+ 0x00000180,
+};
+
+#define A_dsa_next 0x00000030
+u32 A_dsa_next_used[] = {
+ 0x0000005c,
+};
+
+#define A_dsa_restore_pointers 0x00000000
+u32 A_dsa_restore_pointers_used[] = {
+ 0x0000012e,
+};
+
+#define A_dsa_save_data_pointer 0x00000000
+u32 A_dsa_save_data_pointer_used[] = {
+ 0x00000115,
+};
+
+#define A_dsa_select 0x0000003c
+u32 A_dsa_select_used[] = {
+ 0x00000081,
+};
+
+#define A_dsa_status 0x00000060
+u32 A_dsa_status_used[] = {
+ 0x00000184,
+};
+
+#define A_dsa_temp_addr_array_value 0x00000000
+u32 A_dsa_temp_addr_array_value_used[] = {
+};
+
+#define A_dsa_temp_addr_dsa_value 0x00000000
+u32 A_dsa_temp_addr_dsa_value_used[] = {
+ 0x00000003,
+};
+
+#define A_dsa_temp_addr_new_value 0x00000000
+u32 A_dsa_temp_addr_new_value_used[] = {
+};
+
+#define A_dsa_temp_addr_next 0x00000000
+u32 A_dsa_temp_addr_next_used[] = {
+ 0x00000015,
+ 0x0000004e,
+};
+
+#define A_dsa_temp_addr_residual 0x00000000
+u32 A_dsa_temp_addr_residual_used[] = {
+ 0x0000002a,
+ 0x00000039,
+};
+
+#define A_dsa_temp_addr_saved_pointer 0x00000000
+u32 A_dsa_temp_addr_saved_pointer_used[] = {
+ 0x00000026,
+ 0x00000033,
+};
+
+#define A_dsa_temp_addr_saved_residual 0x00000000
+u32 A_dsa_temp_addr_saved_residual_used[] = {
+ 0x0000002b,
+ 0x00000038,
+};
+
+#define A_dsa_temp_lun 0x00000000
+u32 A_dsa_temp_lun_used[] = {
+ 0x0000004b,
+};
+
+#define A_dsa_temp_next 0x00000000
+u32 A_dsa_temp_next_used[] = {
+ 0x0000001a,
+};
+
+#define A_dsa_temp_sync 0x00000000
+u32 A_dsa_temp_sync_used[] = {
+ 0x00000053,
+};
+
+#define A_dsa_temp_target 0x00000000
+u32 A_dsa_temp_target_used[] = {
+ 0x00000040,
+};
+
+#define A_int_debug_break 0x03000000
+u32 A_int_debug_break_used[] = {
+ 0x00000217,
+};
+
+#define A_int_debug_panic 0x030b0000
+u32 A_int_debug_panic_used[] = {
+ 0x000001e8,
+ 0x000001f8,
+};
+
+#define A_int_err_check_condition 0x00030000
+u32 A_int_err_check_condition_used[] = {
+ 0x00000194,
+};
+
+#define A_int_err_no_phase 0x00040000
+u32 A_int_err_no_phase_used[] = {
+};
+
+#define A_int_err_selected 0x00010000
+u32 A_int_err_selected_used[] = {
+ 0x000001da,
+};
+
+#define A_int_err_unexpected_phase 0x00000000
+u32 A_int_err_unexpected_phase_used[] = {
+ 0x0000008c,
+ 0x00000092,
+ 0x0000009a,
+ 0x000000d0,
+ 0x000000d4,
+ 0x000000d6,
+ 0x000000de,
+ 0x000000e2,
+ 0x000000e4,
+ 0x000000ec,
+ 0x000000f0,
+ 0x000000f2,
+ 0x000000f4,
+ 0x0000014c,
+};
+
+#define A_int_err_unexpected_reselect 0x00020000
+u32 A_int_err_unexpected_reselect_used[] = {
+ 0x000001ba,
+};
+
+#define A_int_msg_1 0x01020000
+u32 A_int_msg_1_used[] = {
+ 0x0000010e,
+ 0x00000110,
+};
+
+#define A_int_msg_sdtr 0x01010000
+u32 A_int_msg_sdtr_used[] = {
+ 0x0000016c,
+};
+
+#define A_int_msg_wdtr 0x01000000
+u32 A_int_msg_wdtr_used[] = {
+ 0x00000160,
+};
+
+#define A_int_norm_aborted 0x02040000
+u32 A_int_norm_aborted_used[] = {
+ 0x0000024d,
+};
+
+#define A_int_norm_command_complete 0x02020000
+u32 A_int_norm_command_complete_used[] = {
+};
+
+#define A_int_norm_disconnected 0x02030000
+u32 A_int_norm_disconnected_used[] = {
+};
+
+#define A_int_norm_reselect_complete 0x02010000
+u32 A_int_norm_reselect_complete_used[] = {
+};
+
+#define A_int_norm_reset 0x02050000
+u32 A_int_norm_reset_used[] = {
+};
+
+#define A_int_norm_select_complete 0x02000000
+u32 A_int_norm_select_complete_used[] = {
+};
+
+#define A_int_test_1 0x04000000
+u32 A_int_test_1_used[] = {
+ 0x000001fd,
+};
+
+#define A_int_test_2 0x04010000
+u32 A_int_test_2_used[] = {
+ 0x00000215,
+};
+
+#define A_int_test_3 0x04020000
+u32 A_int_test_3_used[] = {
+};
+
+#define A_msg_buf 0x00000000
+u32 A_msg_buf_used[] = {
+ 0x00000102,
+ 0x0000014e,
+ 0x00000158,
+ 0x0000015e,
+ 0x00000164,
+ 0x0000016a,
+};
+
+#define A_reconnect_dsa_head 0x00000000
+u32 A_reconnect_dsa_head_used[] = {
+ 0x0000006c,
+ 0x00000074,
+ 0x000001a0,
+};
+
+#define A_reselected_identify 0x00000000
+u32 A_reselected_identify_used[] = {
+ 0x00000045,
+ 0x0000019c,
+};
+
+#define A_reselected_tag 0x00000000
+u32 A_reselected_tag_used[] = {
+};
+
+#define A_schedule 0x00000000
+u32 A_schedule_used[] = {
+ 0x0000007e,
+ 0x00000192,
+ 0x000001e2,
+ 0x0000021f,
+};
+
+#define A_test_dest 0x00000000
+u32 A_test_dest_used[] = {
+ 0x000001fb,
+};
+
+#define A_test_src 0x00000000
+u32 A_test_src_used[] = {
+ 0x000001fa,
+};
+
+#define Ent_accept_message 0x000005d4
+#define Ent_cmdout_cmdout 0x0000024c
+#define Ent_command_complete 0x0000060c
+#define Ent_command_complete_msgin 0x0000061c
+#define Ent_data_transfer 0x00000254
+#define Ent_datain_to_jump 0x00000328
+#define Ent_debug_break 0x00000858
+#define Ent_dsa_code_begin 0x00000000
+#define Ent_dsa_code_check_reselect 0x000000f8
+#define Ent_dsa_code_fix_jump 0x0000003c
+#define Ent_dsa_code_restore_pointers 0x000000c0
+#define Ent_dsa_code_save_data_pointer 0x00000088
+#define Ent_dsa_code_template 0x00000000
+#define Ent_dsa_code_template_end 0x00000168
+#define Ent_dsa_schedule 0x00000168
+#define Ent_dsa_zero 0x00000168
+#define Ent_end_data_transfer 0x0000028c
+#define Ent_initiator_abort 0x00000880
+#define Ent_msg_in 0x00000404
+#define Ent_msg_in_restart 0x000003e4
+#define Ent_other_in 0x00000374
+#define Ent_other_out 0x0000033c
+#define Ent_other_transfer 0x000003ac
+#define Ent_reject_message 0x000005b4
+#define Ent_reselected_check_next 0x000006a4
+#define Ent_reselected_ok 0x00000750
+#define Ent_respond_message 0x000005ec
+#define Ent_select 0x000001fc
+#define Ent_select_msgout 0x00000214
+#define Ent_target_abort 0x00000860
+#define Ent_test_1 0x000007e4
+#define Ent_test_2 0x000007f8
+#define Ent_test_2_msgout 0x00000810
+#define Ent_wait_reselect 0x00000654
+u32 LABELPATCHES[] = {
+ 0x00000008,
+ 0x0000000a,
+ 0x00000013,
+ 0x00000016,
+ 0x0000001f,
+ 0x00000021,
+ 0x0000004f,
+ 0x00000051,
+ 0x0000005b,
+ 0x00000068,
+ 0x0000006f,
+ 0x00000082,
+ 0x00000084,
+ 0x0000008a,
+ 0x0000008e,
+ 0x00000090,
+ 0x00000096,
+ 0x00000098,
+ 0x0000009c,
+ 0x0000009e,
+ 0x000000a0,
+ 0x000000a2,
+ 0x000000a4,
+ 0x000000b1,
+ 0x000000b6,
+ 0x000000ba,
+ 0x000000c7,
+ 0x000000cc,
+ 0x000000d2,
+ 0x000000d8,
+ 0x000000da,
+ 0x000000e0,
+ 0x000000e6,
+ 0x000000e8,
+ 0x000000ee,
+ 0x000000f6,
+ 0x000000f8,
+ 0x00000104,
+ 0x00000106,
+ 0x00000108,
+ 0x0000010a,
+ 0x0000010c,
+ 0x00000112,
+ 0x00000114,
+ 0x00000129,
+ 0x00000142,
+ 0x00000148,
+ 0x00000150,
+ 0x00000152,
+ 0x00000154,
+ 0x0000015a,
+ 0x00000166,
+ 0x00000196,
+ 0x000001a5,
+ 0x000001a8,
+ 0x000001ac,
+ 0x000001b0,
+ 0x000001b4,
+ 0x000001b8,
+ 0x000001cf,
+ 0x000001de,
+ 0x000001e6,
+ 0x000001ec,
+ 0x000001ee,
+ 0x000001f2,
+ 0x000001f6,
+ 0x00000201,
+ 0x00000203,
+ 0x00000223,
+ 0x00000225,
+ 0x00000227,
+ 0x00000229,
+ 0x0000022b,
+ 0x0000022d,
+ 0x00000231,
+ 0x00000235,
+ 0x00000237,
+ 0x0000023b,
+ 0x0000023d,
+ 0x00000241,
+ 0x00000243,
+};
+
+struct {
+ u32 offset;
+ void *address;
+} EXTERNAL_PATCHES[] = {
+};
+
+u32 INSTRUCTIONS = 301;
+u32 PATCHES = 81;
+u32 EXTERNAL_PATCHES_LEN = 0;
diff --git a/linux/src/drivers/scsi/53c8xx_u.h b/linux/src/drivers/scsi/53c8xx_u.h
new file mode 100644
index 0000000..c3d486f
--- /dev/null
+++ b/linux/src/drivers/scsi/53c8xx_u.h
@@ -0,0 +1,97 @@
+#undef A_NCR53c7xx_msg_abort
+#undef A_NCR53c7xx_msg_reject
+#undef A_NCR53c7xx_sink
+#undef A_NCR53c7xx_zero
+#undef A_NOP_insn
+#undef A_addr_reconnect_dsa_head
+#undef A_addr_scratch
+#undef A_addr_temp
+#undef A_dmode_memory_to_memory
+#undef A_dmode_memory_to_ncr
+#undef A_dmode_ncr_to_memory
+#undef A_dsa_check_reselect
+#undef A_dsa_cmdout
+#undef A_dsa_cmnd
+#undef A_dsa_datain
+#undef A_dsa_dataout
+#undef A_dsa_end
+#undef A_dsa_fields_start
+#undef A_dsa_msgin
+#undef A_dsa_msgout
+#undef A_dsa_msgout_other
+#undef A_dsa_next
+#undef A_dsa_restore_pointers
+#undef A_dsa_save_data_pointer
+#undef A_dsa_select
+#undef A_dsa_status
+#undef A_dsa_temp_addr_array_value
+#undef A_dsa_temp_addr_dsa_value
+#undef A_dsa_temp_addr_new_value
+#undef A_dsa_temp_addr_next
+#undef A_dsa_temp_addr_residual
+#undef A_dsa_temp_addr_saved_pointer
+#undef A_dsa_temp_addr_saved_residual
+#undef A_dsa_temp_lun
+#undef A_dsa_temp_next
+#undef A_dsa_temp_sync
+#undef A_dsa_temp_target
+#undef A_int_debug_break
+#undef A_int_debug_panic
+#undef A_int_err_check_condition
+#undef A_int_err_no_phase
+#undef A_int_err_selected
+#undef A_int_err_unexpected_phase
+#undef A_int_err_unexpected_reselect
+#undef A_int_msg_1
+#undef A_int_msg_sdtr
+#undef A_int_msg_wdtr
+#undef A_int_norm_aborted
+#undef A_int_norm_command_complete
+#undef A_int_norm_disconnected
+#undef A_int_norm_reselect_complete
+#undef A_int_norm_reset
+#undef A_int_norm_select_complete
+#undef A_int_test_1
+#undef A_int_test_2
+#undef A_int_test_3
+#undef A_msg_buf
+#undef A_reconnect_dsa_head
+#undef A_reselected_identify
+#undef A_reselected_tag
+#undef A_schedule
+#undef A_test_dest
+#undef A_test_src
+#undef Ent_accept_message
+#undef Ent_cmdout_cmdout
+#undef Ent_command_complete
+#undef Ent_command_complete_msgin
+#undef Ent_data_transfer
+#undef Ent_datain_to_jump
+#undef Ent_debug_break
+#undef Ent_dsa_code_begin
+#undef Ent_dsa_code_check_reselect
+#undef Ent_dsa_code_fix_jump
+#undef Ent_dsa_code_restore_pointers
+#undef Ent_dsa_code_save_data_pointer
+#undef Ent_dsa_code_template
+#undef Ent_dsa_code_template_end
+#undef Ent_dsa_schedule
+#undef Ent_dsa_zero
+#undef Ent_end_data_transfer
+#undef Ent_initiator_abort
+#undef Ent_msg_in
+#undef Ent_msg_in_restart
+#undef Ent_other_in
+#undef Ent_other_out
+#undef Ent_other_transfer
+#undef Ent_reject_message
+#undef Ent_reselected_check_next
+#undef Ent_reselected_ok
+#undef Ent_respond_message
+#undef Ent_select
+#undef Ent_select_msgout
+#undef Ent_target_abort
+#undef Ent_test_1
+#undef Ent_test_2
+#undef Ent_test_2_msgout
+#undef Ent_wait_reselect
diff --git a/linux/src/drivers/scsi/AM53C974.c b/linux/src/drivers/scsi/AM53C974.c
new file mode 100644
index 0000000..da139ce
--- /dev/null
+++ b/linux/src/drivers/scsi/AM53C974.c
@@ -0,0 +1,2270 @@
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/blk.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "scsi.h"
+#include "hosts.h"
+#include "AM53C974.h"
+#include "constants.h"
+#include "sd.h"
+
+/* AM53/79C974 (PCscsi) driver release 0.5
+ *
+ * The architecture and much of the code of this device
+ * driver was originally developed by Drew Eckhardt for
+ * the NCR5380. The following copyrights apply:
+ * For the architecture and all pieces of code which can also be found
+ * in the NCR5380 device driver:
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * The AM53C974_nobios_detect code was originally developed by
+ * Robin Cutshaw (robin@xfree86.org) and is used here in a
+ * slightly modified form.
+ *
+ * For the remaining code:
+ * Copyright 1994, D. Frieauff
+ * EMail: fri@rsx42sun0.dofn.de
+ * Phone: x49-7545-8-2256 , x49-7541-42305
+ */
+
+#ifdef AM53C974_DEBUG
+ #define DEB(x) x
+ #ifdef AM53C974_DEBUG_KEYWAIT
+ #define KEYWAIT() AM53C974_keywait()
+ #else
+ #define KEYWAIT()
+ #endif
+ #ifdef AM53C974_DEBUG_INIT
+ #define DEB_INIT(x) x
+ #else
+ #define DEB_INIT(x)
+ #endif
+ #ifdef AM53C974_DEBUG_MSG
+ #define DEB_MSG(x) x
+ #else
+ #define DEB_MSG(x)
+ #endif
+ #ifdef AM53C974_DEB_RESEL
+ #define DEB_RESEL(x) x
+ #else
+ #define DEB_RESEL(x)
+ #endif
+ #ifdef AM53C974_DEBUG_QUEUE
+ #define DEB_QUEUE(x) x
+ #define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
+ #define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
+ #else
+ #define DEB_QUEUE(x)
+ #define LIST(x,y)
+ #define REMOVE(w,x,y,z)
+ #endif
+ #ifdef AM53C974_DEBUG_INFO
+ #define DEB_INFO(x) x
+ #else
+ #define DEB_INFO(x)
+ #endif
+ #ifdef AM53C974_DEBUG_LINKED
+ #define DEB_LINKED(x) x
+ #else
+ #define DEB_LINKED(x)
+ #endif
+ #ifdef AM53C974_DEBUG_INTR
+ #define DEB_INTR(x) x
+ #else
+ #define DEB_INTR(x)
+ #endif
+#else
+ #define DEB_INIT(x)
+ #define DEB(x)
+ #define DEB_QUEUE(x)
+ #define LIST(x,y)
+ #define REMOVE(w,x,y,z)
+ #define DEB_INFO(x)
+ #define DEB_LINKED(x)
+ #define DEB_INTR(x)
+ #define DEB_MSG(x)
+ #define DEB_RESEL(x)
+ #define KEYWAIT()
+#endif
+ #ifdef AM53C974_DEBUG_ABORT
+ #define DEB_ABORT(x) x
+ #else
+ #define DEB_ABORT(x)
+ #endif
+
+#ifdef VERBOSE_AM53C974_DEBUG
+#define VDEB(x) x
+#else
+#define VDEB(x)
+#endif
+
+#define INSIDE(x,l,h) ( ((x) >= (l)) && ((x) <= (h)) )
+
+#ifdef AM53C974_DEBUG
+static void AM53C974_print_pci(struct Scsi_Host *instance);
+static void AM53C974_print_phase(struct Scsi_Host *instance);
+static void AM53C974_print_queues(struct Scsi_Host *instance);
+#endif /* AM53C974_DEBUG */
+static void AM53C974_print(struct Scsi_Host *instance);
+static void AM53C974_keywait(void);
+static int AM53C974_bios_detect(Scsi_Host_Template *tpnt);
+static int AM53C974_nobios_detect(Scsi_Host_Template *tpnt);
+static int AM53C974_init(Scsi_Host_Template *tpnt, pci_config_t pci_config);
+static void AM53C974_config_after_reset(struct Scsi_Host *instance);
+static __inline__ void initialize_SCp(Scsi_Cmnd *cmd);
+static __inline__ void run_main(void);
+static void AM53C974_main (void);
+static void AM53C974_intr(int irq, void *dev_id, struct pt_regs *regs);
+static void AM53C974_intr_disconnect(struct Scsi_Host *instance);
+static int AM53C974_sync_neg(struct Scsi_Host *instance, int target, unsigned char *msg);
+static __inline__ void AM53C974_set_async(struct Scsi_Host *instance, int target);
+static __inline__ void AM53C974_set_sync(struct Scsi_Host *instance, int target);
+static void AM53C974_information_transfer(struct Scsi_Host *instance,
+ unsigned char statreg, unsigned char isreg,
+ unsigned char instreg, unsigned char cfifo,
+ unsigned char dmastatus);
+static int AM53C974_message(struct Scsi_Host *instance, Scsi_Cmnd *cmd, unsigned char msg);
+static void AM53C974_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag);
+static void AM53C974_intr_reselect(struct Scsi_Host *instance, unsigned char statreg);
+static __inline__ void AM53C974_transfer_dma(struct Scsi_Host *instance, short dir,
+ unsigned long length, char *data);
+static void AM53C974_dma_blast(struct Scsi_Host *instance, unsigned char dmastatus,
+ unsigned char statreg);
+static void AM53C974_intr_bus_reset(struct Scsi_Host *instance);
+
+static struct Scsi_Host *first_instance = NULL;
+static Scsi_Host_Template *the_template = NULL;
+static struct Scsi_Host *first_host = NULL; /* Head of list of AMD boards */
+static volatile int main_running = 0;
+static int commandline_current = 0;
+override_t overrides[7] = { {-1, 0, 0, 0}, }; /* LILO overrides */
+
+struct proc_dir_entry proc_scsi_am53c974 = {
+ PROC_SCSI_AM53C974, 8, "am53c974",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#ifdef AM53C974_DEBUG
+static int deb_stop = 1;
+
+/**************************************************************************
+ * Function : void AM53C974_print_pci(struct Scsi_Host *instance)
+ *
+ * Purpose : dump the PCI registers for debugging purposes
+ *
+ * Input : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print_pci(struct Scsi_Host *instance)
+{
+int i;
+unsigned short vendor_id, device_id, command, status, scratch[8];
+unsigned long class_revision, base;
+unsigned char irq, cache_line_size, latency_timer, header_type;
+
+AM53C974_PCIREG_OPEN();
+
+for (i = 0; i < 8; i++) *(scratch + i) = AM53C974_PCIREG_READ_WORD(instance, PCI_SCRATCH_REG_0 + 2*i);
+vendor_id = AM53C974_PCIREG_READ_WORD(instance, PCI_VENDOR_ID);
+device_id = AM53C974_PCIREG_READ_WORD(instance, PCI_DEVICE_ID);
+command = AM53C974_PCIREG_READ_WORD(instance, PCI_COMMAND);
+status = AM53C974_PCIREG_READ_WORD(instance, PCI_STATUS);
+class_revision = AM53C974_PCIREG_READ_DWORD(instance, PCI_CLASS_REVISION);
+cache_line_size = AM53C974_PCIREG_READ_BYTE(instance, PCI_CACHE_LINE_SIZE);
+latency_timer = AM53C974_PCIREG_READ_BYTE(instance, PCI_LATENCY_TIMER);
+header_type = AM53C974_PCIREG_READ_BYTE(instance, PCI_HEADER_TYPE);
+base = AM53C974_PCIREG_READ_DWORD(instance, PCI_BASE_ADDRESS_0);
+irq = AM53C974_PCIREG_READ_BYTE(instance, PCI_INTERRUPT_LINE);
+
+AM53C974_PCIREG_CLOSE();
+
+
+printk("------------- start of PCI register dump -------------\n");
+printk("PCI_VENDOR_ID: 0x%x\n", vendor_id);
+printk("PCI_DEVICE_ID: 0x%x\n", device_id);
+printk("PCI_COMMAND: 0x%x\n", command);
+printk("PCI_STATUS: 0x%x\n", status);
+printk("PCI_CLASS_REVISION: 0x%lx\n", class_revision);
+printk("PCI_CACHE_LINE_SIZE: 0x%x\n", cache_line_size);
+printk("PCI_LATENCY_TIMER: 0x%x\n", latency_timer);
+printk("PCI_HEADER_TYPE: 0x%x\n", header_type);
+printk("PCI_BASE_ADDRESS_0: 0x%lx\n", base);
+printk("PCI_INTERRUPT_LINE: %d\n", irq);
+for (i = 0; i < 8; i++) printk("PCI_SCRATCH_%d: 0x%x\n", i, scratch[i]);
+printk("------------- end of PCI register dump -------------\n\n");
+}
+
+static struct {
+ unsigned char value;
+ char *name;
+} phases[] = {
+{PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"},
+{PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"},
+{PHASE_RES_0, "RESERVED 0"}, {PHASE_RES_1, "RESERVED 1"}};
+
+/**************************************************************************
+ * Function : void AM53C974_print_phase(struct Scsi_Host *instance)
+ *
+ * Purpose : print the current SCSI phase for debugging purposes
+ *
+ * Input : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print_phase(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+unsigned char statreg, latched;
+int i;
+AM53C974_setio(instance);
+
+latched = (AM53C974_read_8(CNTLREG2)) & CNTLREG2_ENF;
+statreg = AM53C974_read_8(STATREG);
+for (i = 0; (phases[i].value != PHASE_RES_1) &&
+ (phases[i].value != (statreg & STATREG_PHASE)); ++i);
+if (latched)
+ printk("scsi%d : phase %s, latched at end of last command\n", instance->host_no, phases[i].name);
+ else
+ printk("scsi%d : phase %s, real time\n", instance->host_no, phases[i].name);
+}
+
+/**************************************************************************
+ * Function : void AM53C974_print_queues(struct Scsi_Host *instance)
+ *
+ * Purpose : print commands in the various queues
+ *
+ * Inputs : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print_queues(struct Scsi_Host *instance)
+{
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *ptr;
+
+printk("AM53C974: coroutine is%s running.\n", main_running ? "" : "n't");
+
+cli();
+
+if (!hostdata->connected) {
+ printk ("scsi%d: no currently connected command\n", instance->host_no); }
+ else {
+ print_Scsi_Cmnd ((Scsi_Cmnd *)hostdata->connected); }
+if (!hostdata->sel_cmd) {
+ printk ("scsi%d: no currently arbitrating command\n", instance->host_no); }
+ else {
+ print_Scsi_Cmnd ((Scsi_Cmnd *)hostdata->sel_cmd); }
+
+printk ("scsi%d: issue_queue ", instance->host_no);
+if (!hostdata->issue_queue)
+ printk("empty\n");
+ else {
+ printk(":\n");
+ for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *)ptr->host_scribble)
+ print_Scsi_Cmnd (ptr); }
+
+printk ("scsi%d: disconnected_queue ", instance->host_no);
+if (!hostdata->disconnected_queue)
+ printk("empty\n");
+ else {
+ printk(":\n");
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *)ptr->host_scribble)
+ print_Scsi_Cmnd (ptr); }
+
+sti();
+}
+
+#endif /* AM53C974_DEBUG */
+
+/**************************************************************************
+ * Function : void AM53C974_print(struct Scsi_Host *instance)
+ *
+ * Purpose : dump the chip registers for debugging purposes
+ *
+ * Input : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+unsigned long ctcreg, dmastc, dmaspa, dmawbc, dmawac;
+unsigned char cmdreg, statreg, isreg, cfireg, cntlreg[4], dmacmd, dmastatus;
+AM53C974_setio(instance);
+
+cli();
+ctcreg = AM53C974_read_8(CTCHREG) << 16;
+ctcreg |= AM53C974_read_8(CTCMREG) << 8;
+ctcreg |= AM53C974_read_8(CTCLREG);
+cmdreg = AM53C974_read_8(CMDREG);
+statreg = AM53C974_read_8(STATREG);
+isreg = AM53C974_read_8(ISREG);
+cfireg = AM53C974_read_8(CFIREG);
+cntlreg[0] = AM53C974_read_8(CNTLREG1);
+cntlreg[1] = AM53C974_read_8(CNTLREG2);
+cntlreg[2] = AM53C974_read_8(CNTLREG3);
+cntlreg[3] = AM53C974_read_8(CNTLREG4);
+dmacmd = AM53C974_read_8(DMACMD);
+dmastc = AM53C974_read_32(DMASTC);
+dmaspa = AM53C974_read_32(DMASPA);
+dmawbc = AM53C974_read_32(DMAWBC);
+dmawac = AM53C974_read_32(DMAWAC);
+dmastatus = AM53C974_read_8(DMASTATUS);
+sti();
+
+printk("AM53C974 register dump:\n");
+printk("IO base: 0x%04lx; CTCREG: 0x%04lx; CMDREG: 0x%02x; STATREG: 0x%02x; ISREG: 0x%02x\n",
+ io_port, ctcreg, cmdreg, statreg, isreg);
+printk("CFIREG: 0x%02x; CNTLREG1-4: 0x%02x; 0x%02x; 0x%02x; 0x%02x\n",
+ cfireg, cntlreg[0], cntlreg[1], cntlreg[2], cntlreg[3]);
+printk("DMACMD: 0x%02x; DMASTC: 0x%04lx; DMASPA: 0x%04lx\n", dmacmd, dmastc, dmaspa);
+printk("DMAWBC: 0x%04lx; DMAWAC: 0x%04lx; DMASTATUS: 0x%02x\n", dmawbc, dmawac, dmastatus);
+printk("---------------------------------------------------------\n");
+}
+
+/**************************************************************************
+* Function : void AM53C974_keywait(void)
+*
+* Purpose : wait until a key is pressed, if it was the 'r' key leave singlestep mode;
+* this function is used for debugging only
+*
+* Input : none
+**************************************************************************/
+static void AM53C974_keywait(void)
+{
+#ifdef AM53C974_DEBUG
+int key;
+
+if (!deb_stop) return;
+#endif
+
+cli();
+while ((inb_p(0x64) & 0x01) != 0x01) ;
+#ifdef AM53C974_DEBUG
+key = inb(0x60);
+if (key == 0x93) deb_stop = 0; /* don't stop if 'r' was pressed */
+#endif
+sti();
+}
+
+/**************************************************************************
+* Function : AM53C974_setup(char *str, int *ints)
+*
+* Purpose : LILO command line initialization of the overrides array,
+*
+* Inputs : str - unused, ints - array of integer parameters with ints[0]
+* equal to the number of ints.
+*
+* NOTE : this function needs to be declared as an external function
+* in init/main.c and included there in the bootsetups list
+***************************************************************************/
+void AM53C974_setup(char *str, int *ints)
+{
+if (ints[0] < 4)
+ printk("AM53C974_setup: wrong number of parameters;\n correct syntax is: AM53C974=host-scsi-id, target-scsi-id, max-rate, max-offset\n");
+ else {
+ if (commandline_current < (sizeof(overrides) / sizeof(override_t))) {
+ if ((ints[1] < 0) || (ints[1] > 7) ||
+ (ints[2] < 0) || (ints[2] > 7) ||
+ (ints[1] == ints[2]) ||
+ (ints[3] < (DEF_CLK / MAX_PERIOD)) || (ints[3] > (DEF_CLK / MIN_PERIOD)) ||
+ (ints[4] < 0) || (ints[4] > MAX_OFFSET))
+ printk("AM53C974_setup: illegal parameter\n");
+ else {
+ overrides[commandline_current].host_scsi_id = ints[1];
+ overrides[commandline_current].target_scsi_id = ints[2];
+ overrides[commandline_current].max_rate = ints[3];
+ overrides[commandline_current].max_offset = ints[4];
+ commandline_current++; }
+ }
+ else
+ printk("AM53C974_setup: too many overrides\n");
+ }
+}
+
+#if defined (CONFIG_PCI)
+/**************************************************************************
+* Function : int AM53C974_bios_detect(Scsi_Host_Template *tpnt)
+*
+* Purpose : detects and initializes AM53C974 SCSI chips with PCI Bios
+*
+* Inputs : tpnt - host template
+*
+* Returns : number of host adapters detected
+**************************************************************************/
+int AM53C974_bios_detect(Scsi_Host_Template *tpnt)
+{
+int count = 0; /* number of boards detected */
+int pci_index;
+pci_config_t pci_config;
+
+for (pci_index = 0; pci_index <= 16; ++pci_index) {
+ unsigned char pci_bus, pci_device_fn;
+ if (pcibios_find_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI, pci_index, &pci_bus, &pci_device_fn) != 0)
+ break;
+
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_VENDOR_ID, &pci_config._vendor);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_DEVICE_ID, &pci_config._device);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_COMMAND, &pci_config._command);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_STATUS, &pci_config._status);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_CLASS_REVISION, &pci_config._class_revision);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_CACHE_LINE_SIZE, &pci_config._cache_line_size);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_LATENCY_TIMER, &pci_config._latency_timer);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_HEADER_TYPE, &pci_config._header_type);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_BIST, &pci_config._bist);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_0, &pci_config._base0);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_1, &pci_config._base1);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_2, &pci_config._base2);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_3, &pci_config._base3);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_4, &pci_config._base4);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_5, &pci_config._base5);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_ROM_ADDRESS, &pci_config._baserom);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_INTERRUPT_LINE, &pci_config._int_line);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_INTERRUPT_PIN, &pci_config._int_pin);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_MIN_GNT, &pci_config._min_gnt);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_MAX_LAT, &pci_config._max_lat);
+ pci_config._pcibus = 0xFFFFFFFF;
+ pci_config._cardnum = 0xFFFFFFFF;
+
+ /* check whether device is I/O mapped -- should be */
+ if (!(pci_config._command & PCI_COMMAND_IO)) continue;
+
+ /* PCI Spec 2.1 states that it is either the driver's or the PCI card's responsibility
+ to set the PCI Master Enable Bit if needed.
+ (from Mark Stockton <marks@schooner.sys.hou.compaq.com>) */
+ if (!(pci_config._command & PCI_COMMAND_MASTER)) {
+ pci_config._command |= PCI_COMMAND_MASTER;
+ printk("PCI Master Bit has not been set. Setting...\n");
+ pcibios_write_config_word(pci_bus, pci_device_fn, PCI_COMMAND, pci_config._command); }
+
+ /* everything seems OK now, so initialize */
+ if (AM53C974_init(tpnt, pci_config)) count++ ;
+ }
+return (count);
+}
+#endif
+
+/**************************************************************************
+* Function : int AM53C974_nobios_detect(Scsi_Host_Template *tpnt)
+*
+* Purpose : detects and initializes AM53C974 SCSI chips using PCI config 2
+*
+* Inputs : tpnt - host template
+*
+* Returns : number of host adapters detected
+*
+* NOTE : This code assumes the controller on PCI bus 0.
+*
+* Origin: Robin Cutshaw (robin@xfree86.org)
+**************************************************************************/
+int AM53C974_nobios_detect(Scsi_Host_Template *tpnt)
+{
+int count = 0; /* number of boards detected */
+pci_config_t pci_config;
+
+/* first try PCI config method 1 */
+for (pci_config._pcibus = 0; pci_config._pcibus < 0x10; pci_config._pcibus++) {
+ for (pci_config._cardnum = 0; pci_config._cardnum < 0x20; pci_config._cardnum++) {
+ unsigned long config_cmd;
+ config_cmd = 0x80000000 | (pci_config._pcibus<<16) | (pci_config._cardnum<<11);
+
+ outl(config_cmd, 0xCF8); /* ioreg 0 */
+ pci_config._device_vendor = inl(0xCFC);
+
+ if ((pci_config._vendor == PCI_VENDOR_ID_AMD) && (pci_config._device == PCI_DEVICE_ID_AMD_SCSI)) {
+ outl(config_cmd | PCI_COMMAND, 0xCF8); pci_config._status_command = inl(0xCFC);
+ outl(config_cmd | PCI_CLASS_REVISION, 0xCF8); pci_config._class_revision = inl(0xCFC);
+ outl(config_cmd | PCI_CACHE_LINE_SIZE, 0xCF8); pci_config._bist_header_latency_cache = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_0, 0xCF8); pci_config._base0 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_1, 0xCF8); pci_config._base1 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_2, 0xCF8); pci_config._base2 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_3, 0xCF8); pci_config._base3 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_4, 0xCF8); pci_config._base4 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_5, 0xCF8); pci_config._base5 = inl(0xCFC);
+ outl(config_cmd | PCI_ROM_ADDRESS, 0xCF8); pci_config._baserom = inl(0xCFC);
+ outl(config_cmd | PCI_INTERRUPT_LINE, 0xCF8); pci_config._max_min_ipin_iline = inl(0xCFC);
+
+ /* check whether device is I/O mapped -- should be */
+ if (!(pci_config._command & PCI_COMMAND_IO)) continue;
+
+ /* PCI Spec 2.1 states that it is either the driver's or the PCI card's responsibility
+ to set the PCI Master Enable Bit if needed.
+ From Mark Stockton <marks@schooner.sys.hou.compaq.com> */
+ if (!(pci_config._command & PCI_COMMAND_MASTER)) {
+ pci_config._command |= PCI_COMMAND_MASTER;
+ printk("Config 1; PCI Master Bit has not been set. Setting...\n");
+ outl(config_cmd | PCI_COMMAND, 0xCF8); outw(pci_config._command, 0xCFC); }
+
+ /* everything seems OK now, so initialize */
+ if (AM53C974_init(tpnt, pci_config)) count++ ;
+ }
+ }
+ }
+outb(0, 0xCF8); /* is this really necessary? */
+
+/* try PCI config method 2, if no device was detected by method 1 */
+if (!count) {
+ AM53C974_PCIREG_OPEN();
+
+ pci_config._pcibus = 0xFFFFFFFF;
+ pci_config._cardnum = 0xFFFFFFFF;
+
+ for (pci_config._ioaddr = 0xC000; pci_config._ioaddr < 0xD000; pci_config._ioaddr += 0x0100) {
+ pci_config._device_vendor = inl(pci_config._ioaddr);
+
+ if ((pci_config._vendor == PCI_VENDOR_ID_AMD) && (pci_config._device == PCI_DEVICE_ID_AMD_SCSI)) {
+ pci_config._status_command = inl(pci_config._ioaddr + PCI_COMMAND);
+ pci_config._class_revision = inl(pci_config._ioaddr + PCI_CLASS_REVISION);
+ pci_config._bist_header_latency_cache = inl(pci_config._ioaddr + PCI_CACHE_LINE_SIZE);
+ pci_config._base0 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_0);
+ pci_config._base1 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_1);
+ pci_config._base2 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_2);
+ pci_config._base3 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_3);
+ pci_config._base4 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_4);
+ pci_config._base5 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_5);
+ pci_config._baserom = inl(pci_config._ioaddr + PCI_ROM_ADDRESS);
+ pci_config._max_min_ipin_iline = inl(pci_config._ioaddr + PCI_INTERRUPT_LINE);
+
+ /* check whether device is I/O mapped -- should be */
+ if (!(pci_config._command & PCI_COMMAND_IO)) continue;
+
+ /* PCI Spec 2.1 states that it is either the driver's or the PCI card's responsibility
+ to set the PCI Master Enable Bit if needed.
+ From Mark Stockton <marks@schooner.sys.hou.compaq.com> */
+ if (!(pci_config._command & PCI_COMMAND_MASTER)) {
+ pci_config._command |= PCI_COMMAND_MASTER;
+ printk("Config 2; PCI Master Bit has not been set. Setting...\n");
+ outw(pci_config._command, pci_config._ioaddr + PCI_COMMAND); }
+
+ /* everything seems OK now, so initialize */
+ if (AM53C974_init(tpnt, pci_config)) count++ ;
+ }
+ }
+ AM53C974_PCIREG_CLOSE();
+ }
+
+return(count);
+}
+
+/**************************************************************************
+* Function : int AM53C974_detect(Scsi_Host_Template *tpnt)
+*
+* Purpose : detects and initializes AM53C974 SCSI chips
+*
+* Inputs : tpnt - host template
+*
+* Returns : number of host adapters detected
+**************************************************************************/
+int AM53C974_detect(Scsi_Host_Template *tpnt)
+{
+int count; /* number of boards detected */
+
+tpnt->proc_dir = &proc_scsi_am53c974;
+
+#if defined (CONFIG_PCI)
+if (pcibios_present())
+ count = AM53C974_bios_detect(tpnt);
+ else
+#endif
+count = AM53C974_nobios_detect(tpnt);
+return (count);
+}
+
+/**************************************************************************
+* Function : int AM53C974_init(Scsi_Host_Template *tpnt, pci_config_t pci_config)
+*
+* Purpose : initializes instance and corresponding AM53/79C974 chip,
+*
+* Inputs : tpnt - template, pci_config - PCI configuration,
+*
+* Returns : 1 on success, 0 on failure.
+*
+* NOTE: If no override for the controller's SCSI id is given and AM53C974_SCSI_ID
+* is not defined we assume that the SCSI address of this controller is correctly
+* set up by the BIOS (as reflected by contents of register CNTLREG1).
+* This is the only BIOS assistance we need.
+**************************************************************************/
+static int AM53C974_init(Scsi_Host_Template *tpnt, pci_config_t pci_config)
+{
+AM53C974_local_declare();
+int i, j;
+struct Scsi_Host *instance, *search;
+struct AM53C974_hostdata *hostdata;
+
+#ifdef AM53C974_OPTION_DEBUG_PROBE_ONLY
+ printk ("AM53C974: probe only enabled, aborting initialization\n");
+ return 0;
+#endif
+
+instance = scsi_register(tpnt, sizeof(struct AM53C974_hostdata));
+hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+instance->base = NULL;
+instance->io_port = pci_config._base0 & (pci_config._base0 & 0x1 ?
+ 0xFFFFFFFC : 0xFFFFFFF0);
+instance->irq = pci_config._int_line;
+instance->dma_channel = -1;
+AM53C974_setio(instance);
+
+#ifdef AM53C974_SCSI_ID
+instance->this_id = AM53C974_SCSI_ID;
+AM53C974_write_8(CNTLREG1, instance->this_id & CNTLREG1_SID);
+#else
+instance->this_id = AM53C974_read_8(CNTLREG1) & CNTLREG1_SID;
+if (instance->this_id != 7)
+ printk("scsi%d: WARNING: unusual hostadapter SCSI id %d; please verify!\n",
+ instance->host_no, instance->this_id);
+#endif
+
+for (i = 0; i < sizeof(hostdata->msgout); i++) {
+ hostdata->msgout[i] = NOP;
+ hostdata->last_message[i] = NOP; }
+for (i = 0; i < 8; i++) {
+ hostdata->busy[i] = 0;
+ hostdata->sync_per[i] = DEF_STP;
+ hostdata->sync_off[i] = 0;
+ hostdata->sync_neg[i] = 0;
+ hostdata->sync_en[i] = DEFAULT_SYNC_NEGOTIATION_ENABLED;
+ hostdata->max_rate[i] = DEFAULT_RATE;
+ hostdata->max_offset[i] = DEFAULT_SYNC_OFFSET; }
+
+/* overwrite defaults by LILO overrides */
+for (i = 0; i < commandline_current; i++) {
+ if (overrides[i].host_scsi_id == instance->this_id) {
+ j = overrides[i].target_scsi_id;
+ hostdata->sync_en[j] = 1;
+ hostdata->max_rate[j] = overrides[i].max_rate;
+ hostdata->max_offset[j] = overrides[i].max_offset;
+ }
+ }
+
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->issue_queue = NULL;
+hostdata->disconnected_queue = NULL;
+hostdata->in_reset = 0;
+hostdata->aborted = 0;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+
+/* Set up an interrupt handler if we aren't already sharing an IRQ with another board */
+for (search = first_host;
+ search && ( ((the_template != NULL) && (search->hostt != the_template)) ||
+ (search->irq != instance->irq) || (search == instance) );
+ search = search->next);
+if (!search) {
+ if (request_irq(instance->irq, AM53C974_intr, SA_INTERRUPT, "AM53C974", NULL)) {
+ printk("scsi%d: IRQ%d not free, detaching\n", instance->host_no, instance->irq);
+ scsi_unregister(instance);
+ return 0; }
+ }
+ else {
+ printk("scsi%d: using interrupt handler previously installed for scsi%d\n",
+ instance->host_no, search->host_no); }
+
+if (!the_template) {
+ the_template = instance->hostt;
+ first_instance = instance; }
+
+/* do hard reset */
+AM53C974_write_8(CMDREG, CMDREG_RDEV); /* reset device */
+udelay(5);
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+AM53C974_write_8(CNTLREG1, CNTLREG1_DISR | instance->this_id);
+AM53C974_write_8(CMDREG, CMDREG_RBUS); /* reset SCSI bus */
+udelay(10);
+AM53C974_config_after_reset(instance);
+udelay(500000);
+return(1);
+}
+
+/*********************************************************************
+* Function : AM53C974_config_after_reset(struct Scsi_Host *instance) *
+* *
+* Purpose : initializes chip registers after reset *
+* *
+* Inputs : instance - which AM53C974 *
+* *
+* Returns : nothing *
+**********************************************************************/
+static void AM53C974_config_after_reset(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+AM53C974_setio(instance);
+
+/* clear SCSI FIFO */
+AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+
+/* configure device */
+AM53C974_write_8(STIMREG, DEF_SCSI_TIMEOUT);
+AM53C974_write_8(STPREG, DEF_STP & STPREG_STP);
+AM53C974_write_8(SOFREG, (DEF_SOF_RAD<<6) | (DEF_SOF_RAA<<4));
+AM53C974_write_8(CLKFREG, DEF_CLKF & CLKFREG_MASK);
+AM53C974_write_8(CNTLREG1, (DEF_ETM<<7) | CNTLREG1_DISR | (DEF_PERE<<4) | instance->this_id);
+AM53C974_write_8(CNTLREG2, (DEF_ENF<<6));
+AM53C974_write_8(CNTLREG3, (DEF_ADIDCHK<<7) | (DEF_FASTSCSI<<4) | (DEF_FASTCLK<<3));
+AM53C974_write_8(CNTLREG4, (DEF_GLITCH<<6) | (DEF_PWD<<5) | (DEF_RAE<<3) | (DEF_RADE<<2) | CNTLREG4_RES);
+}
+
+/***********************************************************************
+* Function : const char *AM53C974_info(struct Scsi_Host *instance) *
+* *
+* Purpose : return device driver information *
+* *
+* Inputs : instance - which AM53C974 *
+* *
+* Returns : info string *
+************************************************************************/
+const char *AM53C974_info(struct Scsi_Host *instance)
+{
+static char info[100];
+
+sprintf(info, "AM53/79C974 PCscsi driver rev. %d.%d; host I/O address: 0x%x; irq: %d\n",
+ AM53C974_DRIVER_REVISION_MAJOR, AM53C974_DRIVER_REVISION_MINOR,
+ instance->io_port, instance->irq);
+return (info);
+}
+
+/**************************************************************************
+* Function : int AM53C974_command (Scsi_Cmnd *SCpnt) *
+* *
+* Purpose : the unqueued SCSI command function, replaced by the *
+* AM53C974_queue_command function *
+* *
+* Inputs : SCpnt - pointer to command structure *
+* *
+* Returns :status, see hosts.h for details *
+***************************************************************************/
+int AM53C974_command(Scsi_Cmnd *SCpnt)
+{
+DEB(printk("AM53C974_command called\n"));
+return 0;
+}
+
+/**************************************************************************
+* Function : void initialize_SCp(Scsi_Cmnd *cmd) *
+* *
+* Purpose : initialize the saved data pointers for cmd to point to the *
+* start of the buffer. *
+* *
+* Inputs : cmd - Scsi_Cmnd structure to have pointers reset. *
+* *
+* Returns : nothing *
+**************************************************************************/
+static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
+{
+if (cmd->use_sg) {
+ cmd->SCp.buffer = (struct scatterlist *)cmd->buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ cmd->SCp.ptr = (char *)cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length; }
+ else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *)cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen; }
+}
+
+/**************************************************************************
+* Function : run_main(void) *
+* *
+* Purpose : insure that the coroutine is running and will process our *
+* request. main_running is checked/set here (in an inline *
+* function rather than in AM53C974_main itself to reduce the *
+* chances of stack overflow. *
+* *
+* *
+* Inputs : none *
+* *
+* Returns : nothing *
+**************************************************************************/
+static __inline__ void run_main(void)
+{
+cli();
+if (!main_running) {
+ /* main_running is cleared in AM53C974_main once it can't do
+ more work, and AM53C974_main exits with interrupts disabled. */
+ main_running = 1;
+ AM53C974_main();
+ sti(); }
+ else
+ sti();
+}
+
+/**************************************************************************
+* Function : int AM53C974_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+*
+* Purpose : writes SCSI command into AM53C974 FIFO
+*
+* Inputs : cmd - SCSI command, done - function called on completion, with
+* a pointer to the command descriptor.
+*
+* Returns : status, see hosts.h for details
+*
+* Side effects :
+* cmd is added to the per instance issue_queue, with minor
+* twiddling done to the host specific fields of cmd. If the
+* main coroutine is not running, it is restarted.
+**************************************************************************/
+int AM53C974_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+{
+struct Scsi_Host *instance = cmd->host;
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *tmp;
+
+cli();
+DEB_QUEUE(printk(SEPARATOR_LINE));
+DEB_QUEUE(printk("scsi%d: AM53C974_queue_command called\n", instance->host_no));
+DEB_QUEUE(printk("cmd=%02x target=%02x lun=%02x bufflen=%d use_sg = %02x\n",
+ cmd->cmnd[0], cmd->target, cmd->lun, cmd->request_bufflen, cmd->use_sg));
+
+/* We use the host_scribble field as a pointer to the next command in a queue */
+cmd->host_scribble = NULL;
+cmd->scsi_done = done;
+cmd->result = 0;
+cmd->device->disconnect = 0;
+
+/* Insert the cmd into the issue queue. Note that REQUEST SENSE
+ * commands are added to the head of the queue since any command will
+ * clear the contingent allegiance condition that exists and the
+ * sense data is only guaranteed to be valid while the condition exists. */
+if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ LIST(cmd, hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = cmd; }
+ else {
+ for (tmp = (Scsi_Cmnd *)hostdata->issue_queue; tmp->host_scribble;
+ tmp = (Scsi_Cmnd *)tmp->host_scribble);
+ LIST(cmd, tmp);
+ tmp->host_scribble = (unsigned char *)cmd; }
+
+DEB_QUEUE(printk("scsi%d : command added to %s of queue\n", instance->host_no,
+ (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"));
+
+/* Run the coroutine if it isn't already running. */
+run_main();
+return 0;
+}
+
+/**************************************************************************
+ * Function : AM53C974_main (void)
+ *
+ * Purpose : AM53C974_main is a coroutine that runs as long as more work can
+ * be done on the AM53C974 host adapters in a system. Both
+ * AM53C974_queue_command() and AM53C974_intr() will try to start it
+ * in case it is not running.
+ *
+ * NOTE : AM53C974_main exits with interrupts *disabled*, the caller should
+ * reenable them. This prevents reentrancy and kernel stack overflow.
+ **************************************************************************/
+static void AM53C974_main(void)
+{
+AM53C974_local_declare();
+Scsi_Cmnd *tmp, *prev;
+struct Scsi_Host *instance;
+struct AM53C974_hostdata *hostdata;
+int done;
+
+/* We run (with interrupts disabled) until we're sure that none of
+ * the host adapters have anything that can be done, at which point
+ * we set main_running to 0 and exit. */
+
+do {
+ cli(); /* Freeze request queues */
+ done = 1;
+ for (instance = first_instance; instance && instance->hostt == the_template;
+ instance = instance->next) {
+ hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+ AM53C974_setio(instance);
+ /* start to select target if we are not connected and not in the
+ selection process */
+ if (!hostdata->connected && !hostdata->sel_cmd) {
+ /* Search through the issue_queue for a command destined for a target
+ that is not busy. */
+ for (tmp = (Scsi_Cmnd *)hostdata->issue_queue, prev = NULL; tmp;
+ prev = tmp, tmp = (Scsi_Cmnd *)tmp->host_scribble) {
+ /* When we find one, remove it from the issue queue. */
+ if (!(hostdata->busy[tmp->target] & (1 << tmp->lun))) {
+ if (prev) {
+ REMOVE(prev, (Scsi_Cmnd *)(prev->host_scribble), tmp,
+ (Scsi_Cmnd *)(tmp->host_scribble));
+ prev->host_scribble = tmp->host_scribble; }
+ else {
+ REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble);
+ hostdata->issue_queue = (Scsi_Cmnd *)tmp->host_scribble; }
+ tmp->host_scribble = NULL;
+
+ /* go into selection mode, disable reselection and wait for
+ SO interrupt which will continue with the selection process */
+ hostdata->selecting = 1;
+ hostdata->sel_cmd = tmp;
+ AM53C974_write_8(CMDREG, CMDREG_DSR);
+ break;
+ } /* if target/lun is not busy */
+
+ } /* for */
+ } /* if (!hostdata->connected) */
+ else {
+ DEB(printk("main: connected; cmd = 0x%lx, sel_cmd = 0x%lx\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd));
+ }
+ } /* for instance */
+ } while (!done);
+main_running = 0;
+}
+
+/************************************************************************
+* Function : AM53C974_intr(int irq, void *dev_id, struct pt_regs *regs) *
+* *
+* Purpose : interrupt handler *
+* *
+* Inputs : irq - interrupt line, regs - ? *
+* *
+* Returns : nothing *
+************************************************************************/
+static void AM53C974_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+AM53C974_local_declare();
+struct Scsi_Host *instance;
+struct AM53C974_hostdata *hostdata;
+unsigned char cmdreg, dmastatus, statreg, isreg, instreg, cfifo;
+
+/* find AM53C974 hostadapter responsible for this interrupt */
+for (instance = first_instance; instance; instance = instance->next)
+ if ((instance->irq == irq) && (instance->hostt == the_template)) goto FOUND;
+sti();
+return;
+
+/* found; now decode and process */
+FOUND:
+hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+dmastatus = AM53C974_read_8(DMASTATUS);
+
+DEB_INTR(printk(SEPARATOR_LINE));
+DEB_INTR(printk("AM53C974 interrupt; dmastatus=0x%02x\n", dmastatus));
+KEYWAIT();
+
+/*** DMA related interrupts ***/
+if (hostdata->connected && (dmastatus & (DMASTATUS_ERROR | DMASTATUS_PWDN |
+ DMASTATUS_ABORT))) {
+ /* DMA error or POWERDOWN */
+ printk("scsi%d: DMA error or powerdown; dmastatus: 0x%02x\n",
+ instance->host_no, dmastatus);
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ panic("scsi%d: cannot recover\n", instance->host_no); }
+
+if (hostdata->connected && (dmastatus & DMASTATUS_DONE)) {
+ /* DMA transfer done */
+ unsigned long residual;
+ cli();
+ if (!(AM53C974_read_8(DMACMD) & DMACMD_DIR)) {
+ do {
+ dmastatus = AM53C974_read_8(DMASTATUS);
+ residual = AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16);
+ residual += AM53C974_read_8(CFIREG) & CFIREG_CF;
+ } while (!(dmastatus & DMASTATUS_SCSIINT) && residual);
+ residual = AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16);
+ residual += AM53C974_read_8(CFIREG) & CFIREG_CF;
+ }
+ else
+ residual = 0;
+ hostdata->connected->SCp.ptr += hostdata->connected->SCp.this_residual - residual;
+ hostdata->connected->SCp.this_residual = residual;
+
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+
+ /* if service request missed before, process it now (ugly) */
+ if (hostdata->dma_busy) {
+ hostdata->dma_busy = 0;
+ cmdreg = AM53C974_read_8(CMDREG);
+ statreg = AM53C974_read_8(STATREG);
+ isreg = AM53C974_read_8(ISREG);
+ instreg = AM53C974_read_8(INSTREG);
+ cfifo = AM53C974_cfifo();
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo,
+ dmastatus); }
+ sti();
+ }
+
+if (!(dmastatus & DMASTATUS_SCSIINT)) {
+ sti();
+ return; }
+
+/*** SCSI related interrupts ***/
+cmdreg = AM53C974_read_8(CMDREG);
+statreg = AM53C974_read_8(STATREG);
+isreg = AM53C974_read_8(ISREG);
+instreg = AM53C974_read_8(INSTREG);
+cfifo = AM53C974_cfifo();
+
+DEB_INTR(printk("scsi%d: statreg: 0x%02x; isreg: 0x%02x; instreg: 0x%02x; cfifo: 0x%02x\n",
+ instance->host_no, statreg, isreg, instreg, cfifo));
+
+if (statreg & STATREG_PE) {
+ /* parity error */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ printk("scsi%d : PARITY error\n", instance->host_no);
+ if (hostdata->connected) hostdata->sync_off[hostdata->connected->target] = 0; /* setup asynchronous transfer */
+ hostdata->aborted = 1; }
+
+if (statreg & STATREG_IOE) {
+ /* illegal operation error */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ printk("scsi%d : ILLEGAL OPERATION error\n", instance->host_no);
+ printk("cmdreg: 0x%02x; dmacmd: 0x%02x; statreg: 0x%02x; \n"
+ "isreg: 0x%02x; instreg: 0x%02x; cfifo: 0x%02x\n",
+ cmdreg, AM53C974_read_8(DMACMD), statreg, isreg, instreg, cfifo); }
+if (hostdata->in_reset && (instreg & INSTREG_SRST)) {
+ /* RESET INTERRUPT */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ DEB(printk("Bus reset interrupt received\n"));
+ AM53C974_intr_bus_reset(instance);
+ cli();
+ if (hostdata->connected) {
+ hostdata->connected->result = DID_RESET << 16;
+ hostdata->connected->scsi_done((Scsi_Cmnd *)hostdata->connected);
+ hostdata->connected = NULL; }
+ else {
+ if (hostdata->sel_cmd) {
+ hostdata->sel_cmd->result = DID_RESET << 16;
+ hostdata->sel_cmd->scsi_done((Scsi_Cmnd *)hostdata->sel_cmd);
+ hostdata->sel_cmd = NULL; }
+ }
+ sti();
+ if (hostdata->in_reset == 1) goto EXIT;
+ else return;
+ }
+
+if (instreg & INSTREG_ICMD) {
+ /* INVALID COMMAND INTERRUPT */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ printk("scsi%d: Invalid command interrupt\n", instance->host_no);
+ printk("cmdreg: 0x%02x; dmacmd: 0x%02x; statreg: 0x%02x; dmastatus: 0x%02x; \n"
+ "isreg: 0x%02x; instreg: 0x%02x; cfifo: 0x%02x\n",
+ cmdreg, AM53C974_read_8(DMACMD), statreg, dmastatus, isreg, instreg, cfifo);
+ panic("scsi%d: cannot recover\n", instance->host_no); }
+
+if (instreg & INSTREG_DIS) {
+ /* DISCONNECT INTERRUPT */
+ DEB_INTR(printk("Disconnect interrupt received; "));
+ cli();
+ AM53C974_intr_disconnect(instance);
+ sti();
+ goto EXIT; }
+
+if (instreg & INSTREG_RESEL) {
+ /* RESELECTION INTERRUPT */
+ DEB_INTR(printk("Reselection interrupt received\n"));
+ cli();
+ AM53C974_intr_reselect(instance, statreg);
+ sti();
+ goto EXIT; }
+
+if (instreg & INSTREG_SO) {
+ DEB_INTR(printk("Successful operation interrupt received\n"));
+ if (hostdata->selecting) {
+ DEB_INTR(printk("DSR completed, starting select\n"));
+ cli();
+ AM53C974_select(instance, (Scsi_Cmnd *)hostdata->sel_cmd,
+ (hostdata->sel_cmd->cmnd[0] == REQUEST_SENSE) ?
+ TAG_NONE : TAG_NEXT);
+ hostdata->selecting = 0;
+ AM53C974_set_sync(instance, hostdata->sel_cmd->target);
+ sti();
+ return; }
+
+ if (hostdata->sel_cmd != NULL) {
+ if ( ((isreg & ISREG_IS) != ISREG_OK_NO_STOP) &&
+ ((isreg & ISREG_IS) != ISREG_OK_STOP) ) {
+ /* UNSUCCESSFUL SELECTION */
+ DEB_INTR(printk("unsuccessful selection\n"));
+ cli();
+ hostdata->dma_busy = 0;
+ LIST(hostdata->sel_cmd, hostdata->issue_queue);
+ hostdata->sel_cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = hostdata->sel_cmd;
+ hostdata->sel_cmd = NULL;
+ hostdata->selecting = 0;
+ sti();
+ goto EXIT; }
+ else {
+ /* SUCCESSFUL SELECTION */
+ DEB(printk("successful selection; cmd=0x%02lx\n", (long)hostdata->sel_cmd));
+ cli();
+ hostdata->dma_busy = 0;
+ hostdata->disconnecting = 0;
+ hostdata->connected = hostdata->sel_cmd;
+ hostdata->sel_cmd = NULL;
+ hostdata->selecting = 0;
+#ifdef SCSI2
+ if (!hostdata->connected->device->tagged_queue)
+#endif
+ hostdata->busy[hostdata->connected->target] |= (1 << hostdata->connected->lun);
+ /* very strange -- use_sg is sometimes nonzero for request sense commands !! */
+ if ((hostdata->connected->cmnd[0] == REQUEST_SENSE) && hostdata->connected->use_sg) {
+ DEB(printk("scsi%d: REQUEST_SENSE command with nonzero use_sg\n", instance->host_no));
+ KEYWAIT();
+ hostdata->connected->use_sg = 0; }
+ initialize_SCp((Scsi_Cmnd *)hostdata->connected);
+ hostdata->connected->SCp.phase = PHASE_CMDOUT;
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo, dmastatus);
+ sti();
+ return; }
+ }
+ else {
+ cli();
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo, dmastatus);
+ sti();
+ return; }
+ }
+
+if (instreg & INSTREG_SR) {
+ DEB_INTR(printk("Service request interrupt received, "));
+ if (hostdata->connected) {
+ DEB_INTR(printk("calling information_transfer\n"));
+ cli();
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo, dmastatus);
+ sti(); }
+ else {
+ printk("scsi%d: weird: service request when no command connected\n", instance->host_no);
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO); } /* clear FIFO */
+ return;
+ }
+
+EXIT:
+ DEB_INTR(printk("intr: starting main\n"));
+ run_main();
+ DEB_INTR(printk("end of intr\n"));
+}
+
+/**************************************************************************
+* Function : AM53C974_intr_disconnect(struct Scsi_Host *instance)
+*
+* Purpose : manage target disconnection
+*
+* Inputs : instance -- which AM53C974
+*
+* Returns : nothing
+**************************************************************************/
+static void AM53C974_intr_disconnect(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *cmd;
+AM53C974_setio(instance);
+
+if (hostdata->sel_cmd != NULL) {
+ /* normal selection timeout, typical for nonexisting targets */
+ cmd = (Scsi_Cmnd *)hostdata->sel_cmd;
+ DEB_INTR(printk("bad target\n"));
+ cmd->result = DID_BAD_TARGET << 16;
+ goto EXIT_FINISHED; }
+
+if (!hostdata->connected) {
+ /* can happen if controller was reset, a device tried to reconnect,
+ failed and disconnects now */
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+ return; }
+
+if (hostdata->disconnecting) {
+ /* target sent disconnect message, so we are prepared */
+ cmd = (Scsi_Cmnd *)hostdata->connected;
+ AM53C974_set_async(instance, cmd->target);
+ DEB_INTR(printk("scsi%d : disc. from cmnd %d for ta %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ if (cmd->device->disconnect) {
+ /* target wants to reselect later */
+ DEB_INTR(printk("ok, re-enabling selection\n"));
+ LIST(cmd,hostdata->disconnected_queue);
+ cmd->host_scribble = (unsigned char *)hostdata->disconnected_queue;
+ hostdata->disconnected_queue = cmd;
+ DEB_QUEUE(printk("scsi%d : command for target %d lun %d this %d was moved from connected to"
+ " the disconnected_queue\n", instance->host_no, cmd->target,
+ cmd->lun, hostdata->disconnected_queue->SCp.this_residual));
+ DEB_QUEUE(AM53C974_print_queues(instance));
+ goto EXIT_UNFINISHED; }
+ else {
+ /* target does not want to reselect later, we are really finished */
+#ifdef AM53C974_DEBUG
+ if (cmd->cmnd[0] == REQUEST_SENSE) {
+ int i;
+ printk("Request sense data dump:\n");
+ for (i = 0; i < cmd->request_bufflen; i++) {
+ printk("%02x ", *((char *)(cmd->request_buffer) + i));
+ if (i && !(i % 16)) printk("\n"); }
+ printk("\n"); }
+#endif
+ goto EXIT_FINISHED; } /* !cmd->device->disconnect */
+ } /* if (hostdata->disconnecting) */
+
+/* no disconnect message received; unexpected disconnection */
+cmd = (Scsi_Cmnd *)hostdata->connected;
+if (cmd) {
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ AM53C974_set_async(instance, cmd->target);
+ printk("scsi%d: Unexpected disconnect; phase: %d; target: %d; this_residual: %d; buffers_residual: %d; message: %d\n",
+ instance->host_no, cmd->SCp.phase, cmd->target, cmd->SCp.this_residual, cmd->SCp.buffers_residual,
+ cmd->SCp.Message);
+ printk("cmdreg: 0x%02x; statreg: 0x%02x; isreg: 0x%02x; cfifo: 0x%02x\n",
+ AM53C974_read_8(CMDREG), AM53C974_read_8(STATREG), AM53C974_read_8(ISREG),
+ AM53C974_read_8(CFIREG) & CFIREG_CF);
+
+ if ((hostdata->last_message[0] == EXTENDED_MESSAGE) &&
+ (hostdata->last_message[2] == EXTENDED_SDTR)) {
+ /* sync. negotiation was aborted, setup asynchronous transfer with target */
+ hostdata->sync_off[cmd->target] = 0; }
+ if (hostdata->aborted || hostdata->msgout[0] == ABORT)
+ cmd->result = DID_ABORT << 16;
+ else
+ cmd->result = DID_ERROR << 16;
+ goto EXIT_FINISHED; }
+
+EXIT_FINISHED:
+hostdata->aborted = 0;
+hostdata->msgout[0] = NOP;
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+DEB(printk("disconnect; issue_queue: 0x%lx, disconnected_queue: 0x%lx\n",
+ (long)hostdata->issue_queue, (long)hostdata->disconnected_queue));
+cmd->scsi_done(cmd);
+
+if (!hostdata->selecting) {
+ AM53C974_set_async(instance, cmd->target);
+ AM53C974_write_8(CMDREG, CMDREG_ESR); } /* allow reselect */
+return;
+
+EXIT_UNFINISHED:
+hostdata->msgout[0] = NOP;
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->aborted = 0;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+DEB(printk("disconnect; issue_queue: 0x%lx, disconnected_queue: 0x%lx\n",
+ (long)hostdata->issue_queue, (long)hostdata->disconnected_queue));
+if (!hostdata->selecting) {
+ AM53C974_set_async(instance, cmd->target);
+ AM53C974_write_8(CMDREG, CMDREG_ESR); } /* allow reselect */
+return;
+}
+
+/**************************************************************************
+* Function : int AM53C974_sync_neg(struct Scsi_Host *instance, int target, unsigned char *msg)
+*
+* Purpose : setup message string for sync. negotiation
+*
+* Inputs : instance -- which AM53C974
+* target -- which SCSI target to deal with
+* msg -- input message string
+*
+* Returns : 0 if parameters accepted or 1 if not accepted
+*
+* Side effects: hostdata is changed
+*
+* Note: we assume here that fastclk is enabled
+**************************************************************************/
+static int AM53C974_sync_neg(struct Scsi_Host *instance, int target, unsigned char *msg)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+int period, offset, i, rate, rate_rem;
+AM53C974_setio(instance);
+
+period = (DEF_CLK * msg[3] * 8 + 1000) / 2000;
+if (period < MIN_PERIOD) {
+ period = MIN_PERIOD;
+ hostdata->msgout[3] = period / 4; }
+ else
+ if (period > MAX_PERIOD) {
+ period = MAX_PERIOD;
+ hostdata->msgout[3] = period / 4; }
+ else
+ hostdata->msgout[3] = msg[3];
+offset = msg[4];
+if (offset > MAX_OFFSET) offset = MAX_OFFSET;
+hostdata->msgout[4] = offset;
+hostdata->sync_per[target] = period;
+hostdata->sync_off[target] = offset;
+for (i = 0; i < 3; i++) hostdata->msgout[i] = msg[i];
+if ((hostdata->msgout[3] != msg[3]) || (msg[4] != offset)) return(1);
+
+rate = DEF_CLK / period;
+rate_rem = 10 * (DEF_CLK - period * rate) / period;
+
+if (offset)
+ printk("\ntarget %d: rate=%d.%d Mhz, synchronous, sync offset=%d bytes\n",
+ target, rate, rate_rem, offset);
+ else
+ printk("\ntarget %d: rate=%d.%d Mhz, asynchronous\n", target, rate, rate_rem);
+
+return(0);
+}
+
+/**************************************************************************
+* Function : AM53C974_set_async(struct Scsi_Host *instance, int target)
+*
+* Purpose : put controller into async. mode
+*
+* Inputs : instance -- which AM53C974
+* target -- which SCSI target to deal with
+*
+* Returns : nothing
+**************************************************************************/
+static __inline__ void AM53C974_set_async(struct Scsi_Host *instance, int target)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+AM53C974_write_8(STPREG, hostdata->sync_per[target]);
+AM53C974_write_8(SOFREG, (DEF_SOF_RAD<<6) | (DEF_SOF_RAA<<4));
+}
+
+/**************************************************************************
+* Function : AM53C974_set_sync(struct Scsi_Host *instance, int target)
+*
+* Purpose : put controller into sync. mode
+*
+* Inputs : instance -- which AM53C974
+* target -- which SCSI target to deal with
+*
+* Returns : nothing
+**************************************************************************/
+static __inline__ void AM53C974_set_sync(struct Scsi_Host *instance, int target)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+AM53C974_write_8(STPREG, hostdata->sync_per[target]);
+AM53C974_write_8(SOFREG, (SOFREG_SO & hostdata->sync_off[target]) |
+ (DEF_SOF_RAD<<6) | (DEF_SOF_RAA<<4));
+}
+
+/***********************************************************************
+* Function : AM53C974_information_transfer(struct Scsi_Host *instance, *
+* unsigned char statreg, unsigned char isreg, *
+* unsigned char instreg, unsigned char cfifo, *
+* unsigned char dmastatus) *
+* *
+* Purpose : handle phase changes *
+* *
+* Inputs : instance - which AM53C974 *
+* statreg - status register *
+* isreg - internal state register *
+* instreg - interrupt status register *
+* cfifo - number of bytes in FIFO *
+* dmastatus - dma status register *
+* *
+* Returns : nothing *
+************************************************************************/
+static void AM53C974_information_transfer(struct Scsi_Host *instance,
+ unsigned char statreg, unsigned char isreg,
+ unsigned char instreg, unsigned char cfifo,
+ unsigned char dmastatus)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *cmd = (Scsi_Cmnd *)hostdata->connected;
+int ret, i, len, residual=-1;
+AM53C974_setio(instance);
+
+DEB_INFO(printk(SEPARATOR_LINE));
+switch (statreg & STATREG_PHASE) { /* scsi phase */
+ case PHASE_DATAOUT:
+ DEB_INFO(printk("Dataout phase; cmd=0x%lx, sel_cmd=0x%lx, this_residual=%d, buffers_residual=%d\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
+ cmd->SCp.phase = PHASE_DATAOUT;
+ goto PHASE_DATA_IO;
+
+ case PHASE_DATAIN:
+ DEB_INFO(printk("Datain phase; cmd=0x%lx, sel_cmd=0x%lx, this_residual=%d, buffers_residual=%d\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
+ cmd->SCp.phase = PHASE_DATAIN;
+ PHASE_DATA_IO:
+ if (hostdata->aborted) {
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ return; }
+ if ((!cmd->SCp.this_residual) && cmd->SCp.buffers_residual) {
+ cmd->SCp.buffer++;
+ cmd->SCp.buffers_residual--;
+ cmd->SCp.ptr = (unsigned char *)cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length; }
+ if (cmd->SCp.this_residual) {
+ if (!(AM53C974_read_8(DMACMD) & DMACMD_START)) {
+ hostdata->dma_busy = 0;
+ AM53C974_transfer_dma(instance, statreg & STATREG_IO,
+ (unsigned long)cmd->SCp.this_residual,
+ cmd->SCp.ptr); }
+ else
+ hostdata->dma_busy = 1;
+ }
+ return;
+
+ case PHASE_MSGIN:
+ DEB_INFO(printk("Message-In phase; cmd=0x%lx, sel_cmd=0x%lx\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd));
+ AM53C974_set_async(instance, cmd->target);
+ if (cmd->SCp.phase == PHASE_DATAIN)
+ AM53C974_dma_blast(instance, dmastatus, statreg);
+ if ((cmd->SCp.phase == PHASE_DATAOUT) && (AM53C974_read_8(DMACMD) & DMACMD_START)) {
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ residual = cfifo + (AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16));
+ cmd->SCp.ptr += cmd->SCp.this_residual - residual;
+ cmd->SCp.this_residual = residual;
+ if (cfifo) { AM53C974_write_8(CMDREG, CMDREG_CFIFO); cfifo = 0; }
+ }
+ if (cmd->SCp.phase == PHASE_STATIN) {
+ while ((AM53C974_read_8(CFIREG) & CFIREG_CF) < 2) ;
+ cmd->SCp.Status = AM53C974_read_8(FFREG);
+ cmd->SCp.Message = AM53C974_read_8(FFREG);
+ DEB_INFO(printk("Message-In phase; status=0x%02x, message=0x%02x\n",
+ cmd->SCp.Status, cmd->SCp.Message));
+ ret = AM53C974_message(instance, cmd, cmd->SCp.Message); }
+ else {
+ if (!cfifo) {
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ AM53C974_poll_int();
+ cmd->SCp.Message = AM53C974_read_8(FFREG);
+ }
+ ret = AM53C974_message(instance, cmd, cmd->SCp.Message);
+ }
+ cmd->SCp.phase = PHASE_MSGIN;
+ AM53C974_set_sync(instance, cmd->target);
+ break;
+ case PHASE_MSGOUT:
+ DEB_INFO(printk("Message-Out phase; cfifo=%d; msgout[0]=0x%02x\n",
+ AM53C974_read_8(CFIREG) & CFIREG_CF, hostdata->msgout[0]));
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ AM53C974_set_async(instance, cmd->target);
+ for (i = 0; i < sizeof(hostdata->last_message); i++)
+ hostdata->last_message[i] = hostdata->msgout[i];
+ if ((hostdata->msgout[0] == 0) || INSIDE(hostdata->msgout[0], 0x02, 0x1F) ||
+ INSIDE(hostdata->msgout[0], 0x80, 0xFF))
+ len = 1;
+ else {
+ if (hostdata->msgout[0] == EXTENDED_MESSAGE) {
+#ifdef AM53C974_DEBUG_INFO
+ printk("Extended message dump:\n");
+ for (i = 0; i < hostdata->msgout[1] + 2; i++) {
+ printk("%02x ", hostdata->msgout[i]);
+ if (i && !(i % 16)) printk("\n"); }
+ printk("\n");
+#endif
+ len = hostdata->msgout[1] + 2; }
+ else
+ len = 2;
+ }
+ for (i = 0; i < len; i++) AM53C974_write_8(FFREG, hostdata->msgout[i]);
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ cmd->SCp.phase = PHASE_MSGOUT;
+ hostdata->msgout[0] = NOP;
+ AM53C974_set_sync(instance, cmd->target);
+ break;
+
+ case PHASE_CMDOUT:
+ DEB_INFO(printk("Command-Out phase\n"));
+ AM53C974_set_async(instance, cmd->target);
+ for (i = 0; i < cmd->cmd_len; i++) AM53C974_write_8(FFREG, cmd->cmnd[i]);
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ cmd->SCp.phase = PHASE_CMDOUT;
+ AM53C974_set_sync(instance, cmd->target);
+ break;
+
+ case PHASE_STATIN:
+ DEB_INFO(printk("Status phase\n"));
+ if (cmd->SCp.phase == PHASE_DATAIN)
+ AM53C974_dma_blast(instance, dmastatus, statreg);
+ AM53C974_set_async(instance, cmd->target);
+ if (cmd->SCp.phase == PHASE_DATAOUT) {
+ unsigned long residual;
+
+ if (AM53C974_read_8(DMACMD) & DMACMD_START) {
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ residual = cfifo + (AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16));
+ cmd->SCp.ptr += cmd->SCp.this_residual - residual;
+ cmd->SCp.this_residual = residual; }
+ if (cfifo) { AM53C974_write_8(CMDREG, CMDREG_CFIFO); cfifo = 0; }
+ }
+ cmd->SCp.phase = PHASE_STATIN;
+ AM53C974_write_8(CMDREG, CMDREG_ICCS); /* command complete */
+ break;
+
+ case PHASE_RES_0:
+ case PHASE_RES_1:
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ DEB_INFO(printk("Reserved phase\n"));
+ break;
+ }
+KEYWAIT();
+}
+
+/******************************************************************************
+* Function : int AM53C974_message(struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+* unsigned char msg)
+*
+* Purpose : handle SCSI messages
+*
+* Inputs : instance -- which AM53C974
+* cmd -- SCSI command the message belongs to
+* msg -- message id byte
+*
+* Returns : 1 on success, 0 on failure.
+**************************************************************************/
+static int AM53C974_message(struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+ unsigned char msg)
+{
+AM53C974_local_declare();
+static unsigned char extended_msg[10];
+unsigned char statreg;
+int len, ret = 0;
+unsigned char *p;
+#ifdef AM53C974_DEBUG_MSG
+int j;
+#endif
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+DEB_MSG(printk(SEPARATOR_LINE));
+
+/* Linking lets us reduce the time required to get the
+ * next command out to the device, hopefully this will
+ * mean we don't waste another revolution due to the delays
+ * required by ARBITRATION and another SELECTION.
+ * In the current implementation proposal, low level drivers
+ * merely have to start the next command, pointed to by
+ * next_link, done() is called as with unlinked commands. */
+switch (msg) {
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+ /* Accept message by releasing ACK */
+ DEB_LINKED(printk("scsi%d : target %d lun %d linked command complete.\n",
+ instance->host_no, cmd->target, cmd->lun));
+ /* Sanity check : A linked command should only terminate with
+ * one of these messages if there are more linked commands available. */
+ if (!cmd->next_link) {
+ printk("scsi%d : target %d lun %d linked command complete, no next_link\n"
+ instance->host_no, cmd->target, cmd->lun);
+ hostdata->aborted = 1;
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break; }
+ if (hostdata->aborted) {
+ DEB_ABORT(printk("ATN set for cmnd %d upon reception of LINKED_CMD_COMPLETE or"
+ "LINKED_FLG_CMD_COMPLETE message\n", cmd->cmnd[0]));
+ AM53C974_write_8(CMDREG, CMDREG_SATN); }
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+
+ initialize_SCp(cmd->next_link);
+ /* The next command is still part of this process */
+ cmd->next_link->tag = cmd->tag;
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ DEB_LINKED(printk("scsi%d : target %d lun %d linked request done, calling scsi_done().\n",
+ instance->host_no, cmd->target, cmd->lun));
+ cmd->scsi_done(cmd);
+ cmd = hostdata->connected;
+ break;
+
+#endif /* def LINKED */
+
+ case ABORT:
+ case COMMAND_COMPLETE:
+ DEB_MSG(printk("scsi%d: command complete message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ hostdata->disconnecting = 1;
+ cmd->device->disconnect = 0;
+
+ /* I'm not sure what the correct thing to do here is :
+ *
+ * If the command that just executed is NOT a request
+ * sense, the obvious thing to do is to set the result
+ * code to the values of the stored parameters.
+ * If it was a REQUEST SENSE command, we need some way
+ * to differentiate between the failure code of the original
+ * and the failure code of the REQUEST sense - the obvious
+ * case is success, where we fall through and leave the result
+ * code unchanged.
+ *
+ * The non-obvious place is where the REQUEST SENSE failed */
+ if (cmd->cmnd[0] != REQUEST_SENSE)
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ else if (cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ if (hostdata->aborted) {
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ DEB_ABORT(printk("ATN set for cmnd %d upon reception of ABORT or"
+ "COMMAND_COMPLETE message\n", cmd->cmnd[0]));
+ break; }
+ if ((cmd->cmnd[0] != REQUEST_SENSE) && (cmd->SCp.Status == CHECK_CONDITION)) {
+ DEB_MSG(printk("scsi%d : performing request sense\n", instance->host_no));
+ cmd->cmnd[0] = REQUEST_SENSE;
+ cmd->cmnd[1] &= 0xe0;
+ cmd->cmnd[2] = 0;
+ cmd->cmnd[3] = 0;
+ cmd->cmnd[4] = sizeof(cmd->sense_buffer);
+ cmd->cmnd[5] = 0;
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *)cmd->sense_buffer;
+ cmd->SCp.this_residual = sizeof(cmd->sense_buffer);
+ LIST(cmd,hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = (Scsi_Cmnd *)cmd;
+ DEB_MSG(printk("scsi%d : REQUEST SENSE added to head of issue queue\n",instance->host_no));
+ }
+
+ /* Accept message by clearing ACK */
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ case MESSAGE_REJECT:
+ DEB_MSG(printk("scsi%d: reject message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ switch (hostdata->last_message[0]) {
+ case EXTENDED_MESSAGE:
+ if (hostdata->last_message[2] == EXTENDED_SDTR) {
+ /* sync. negotiation was rejected, setup asynchronous transfer with target */
+ printk("\ntarget %d: rate=%d Mhz, asynchronous (sync. negotiation rejected)\n",
+ cmd->target, DEF_CLK / DEF_STP);
+ hostdata->sync_off[cmd->target] = 0;
+ hostdata->sync_per[cmd->target] = DEF_STP; }
+ break;
+ case HEAD_OF_QUEUE_TAG:
+ case ORDERED_QUEUE_TAG:
+ case SIMPLE_QUEUE_TAG:
+ cmd->device->tagged_queue = 0;
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+ break;
+ default:
+ break;
+ }
+ if (hostdata->aborted) AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ case DISCONNECT:
+ DEB_MSG(printk("scsi%d: disconnect message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ cmd->device->disconnect = 1;
+ hostdata->disconnecting = 1;
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* Accept message by clearing ACK */
+ break;
+
+ case SAVE_POINTERS:
+ case RESTORE_POINTERS:
+ DEB_MSG(printk("scsi%d: save/restore pointers message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ /* The SCSI data pointer is *IMPLICITLY* saved on a disconnect
+ * operation, in violation of the SCSI spec so we can safely
+ * ignore SAVE/RESTORE pointers calls.
+ *
+ * Unfortunately, some disks violate the SCSI spec and
+ * don't issue the required SAVE_POINTERS message before
+ * disconnecting, and we have to break spec to remain
+ * compatible. */
+ if (hostdata->aborted) {
+ DEB_ABORT(printk("ATN set for cmnd %d upon reception of SAVE/REST. POINTERS message\n",
+ cmd->cmnd[0]));
+ AM53C974_write_8(CMDREG, CMDREG_SATN); }
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ case EXTENDED_MESSAGE:
+ DEB_MSG(printk("scsi%d: extended message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ /* Extended messages are sent in the following format :
+ * Byte
+ * 0 EXTENDED_MESSAGE == 1
+ * 1 length (includes one byte for code, doesn't include first two bytes)
+ * 2 code
+ * 3..length+1 arguments
+ */
+ /* BEWARE!! THIS CODE IS EXTREMELY UGLY */
+ extended_msg[0] = EXTENDED_MESSAGE;
+ AM53C974_read_8(INSTREG) ; /* clear int */
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* ack. msg byte, then wait for SO */
+ AM53C974_poll_int();
+ /* get length */
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ AM53C974_poll_int();
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* ack. msg byte, then wait for SO */
+ AM53C974_poll_int();
+ extended_msg[1] = len = AM53C974_read_8(FFREG); /* get length */
+ p = extended_msg+2;
+ /* read the remaining (len) bytes */
+ while (len) {
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ AM53C974_poll_int();
+ if (len > 1) {
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* ack. msg byte, then wait for SO */
+ AM53C974_poll_int(); }
+ *p = AM53C974_read_8(FFREG);
+ p++; len--; }
+
+#ifdef AM53C974_DEBUG_MSG
+ printk("scsi%d: received extended message: ", instance->host_no);
+ for (j = 0; j < extended_msg[1] + 2; j++) {
+ printk("0x%02x ", extended_msg[j]);
+ if (j && !(j % 16)) printk("\n"); }
+ printk("\n");
+#endif
+
+ /* check message */
+ if (extended_msg[2] == EXTENDED_SDTR)
+ ret = AM53C974_sync_neg(instance, cmd->target, extended_msg);
+ if (ret || hostdata->aborted) AM53C974_write_8(CMDREG, CMDREG_SATN);
+
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ default:
+ printk("scsi%d: unknown message 0x%02x received\n",instance->host_no, msg);
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ /* reject message */
+ hostdata->msgout[0] = MESSAGE_REJECT;
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ return(0);
+ break;
+
+ } /* switch (msg) */
+KEYWAIT();
+return(1);
+}
+
+/**************************************************************************
+* Function : AM53C974_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
+*
+* Purpose : try to establish nexus for the command;
+* start sync negotiation via start stop and transfer the command in
+* cmdout phase in case of an inquiry or req. sense command with no
+* sync. neg. performed yet
+*
+* Inputs : instance -- which AM53C974
+* cmd -- command which requires the selection
+* tag -- tagged queueing
+*
+* Returns : nothing
+*
+* Note: this function initializes the selection process, which is continued
+* in the interrupt handler
+**************************************************************************/
+static void AM53C974_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+unsigned char cfifo, tmp[3];
+unsigned int i, len, cmd_size = COMMAND_SIZE(cmd->cmnd[0]);
+AM53C974_setio(instance);
+
+cfifo = AM53C974_cfifo();
+if (cfifo) {
+ printk("scsi%d: select error; %d residual bytes in FIFO\n", instance->host_no, cfifo);
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO); /* clear FIFO */
+ }
+
+tmp[0] = IDENTIFY(1, cmd->lun);
+
+#ifdef SCSI2
+if (cmd->device->tagged_queue && (tag != TAG_NONE)) {
+ tmp[1] = SIMPLE_QUEUE_TAG;
+ if (tag == TAG_NEXT) {
+ /* 0 is TAG_NONE, used to imply no tag for this command */
+ if (cmd->device->current_tag == 0) cmd->device->current_tag = 1;
+ cmd->tag = cmd->device->current_tag;
+ cmd->device->current_tag++; }
+ else
+ cmd->tag = (unsigned char)tag;
+ tmp[2] = cmd->tag;
+ hostdata->last_message[0] = SIMPLE_QUEUE_TAG;
+ len = 3;
+ AM53C974_write_8(FFREG, tmp[0]);
+ AM53C974_write_8(FFREG, tmp[1]);
+ AM53C974_write_8(FFREG, tmp[2]);
+ }
+ else
+#endif /* def SCSI2 */
+ {
+ len = 1;
+ AM53C974_write_8(FFREG, tmp[0]);
+ cmd->tag = 0; }
+
+/* in case of an inquiry or req. sense command with no sync. neg performed yet, we start
+ sync negotiation via start stops and transfer the command in cmdout phase */
+if (((cmd->cmnd[0] == INQUIRY) || (cmd->cmnd[0] == REQUEST_SENSE)) &&
+ !(hostdata->sync_neg[cmd->target]) && hostdata->sync_en[cmd->target]) {
+ hostdata->sync_neg[cmd->target] = 1;
+ hostdata->msgout[0] = EXTENDED_MESSAGE;
+ hostdata->msgout[1] = 3;
+ hostdata->msgout[2] = EXTENDED_SDTR;
+ hostdata->msgout[3] = 250 / (int)hostdata->max_rate[cmd->target];
+ hostdata->msgout[4] = hostdata->max_offset[cmd->target];
+ len += 5; }
+
+AM53C974_write_8(SDIDREG, SDIREG_MASK & cmd->target); /* setup dest. id */
+AM53C974_write_8(STIMREG, DEF_SCSI_TIMEOUT); /* setup timeout reg */
+switch (len) {
+ case 1:
+ for (i = 0; i < cmd_size; i++) AM53C974_write_8(FFREG, cmd->cmnd[i]);
+ AM53C974_write_8(CMDREG, CMDREG_SAS); /* select with ATN, 1 msg byte */
+ hostdata->msgout[0] = NOP;
+ break;
+ case 3:
+ for (i = 0; i < cmd_size; i++) AM53C974_write_8(FFREG, cmd->cmnd[i]);
+ AM53C974_write_8(CMDREG, CMDREG_SA3S); /* select with ATN, 3 msg bytes */
+ hostdata->msgout[0] = NOP;
+ break;
+ default:
+ AM53C974_write_8(CMDREG, CMDREG_SASS); /* select with ATN, stop steps; continue in message out phase */
+ break;
+ }
+}
+
+/**************************************************************************
+* Function : AM53C974_intr_select(struct Scsi_Host *instance, unsigned char statreg)
+*
+* Purpose : handle reselection
+*
+* Inputs : instance -- which AM53C974
+* statreg -- status register
+*
+* Returns : nothing
+*
+* side effects: manipulates hostdata
+**************************************************************************/
+static void AM53C974_intr_reselect(struct Scsi_Host *instance, unsigned char statreg)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+unsigned char cfifo, msg[3], lun, t, target = 0;
+#ifdef SCSI2
+ unsigned char tag;
+#endif
+Scsi_Cmnd *tmp = NULL, *prev;
+AM53C974_setio(instance);
+
+cfifo = AM53C974_cfifo();
+
+if (hostdata->selecting) {
+ /* caught reselect interrupt in selection process;
+ put selecting command back into the issue queue and continue with the
+ reselecting command */
+ DEB_RESEL(printk("AM53C974_intr_reselect: in selection process\n"));
+ LIST(hostdata->sel_cmd, hostdata->issue_queue);
+ hostdata->sel_cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = hostdata->sel_cmd;
+ hostdata->sel_cmd = NULL;
+ hostdata->selecting = 0; }
+
+/* 2 bytes must be in the FIFO now */
+if (cfifo != 2) {
+ printk("scsi %d: error: %d bytes in fifo, 2 expected\n", instance->host_no, cfifo);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+
+/* determine target which reselected */
+t = AM53C974_read_8(FFREG);
+if (!(t & (1 << instance->this_id))) {
+ printk("scsi %d: error: invalid host id\n", instance->host_no);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+t ^= (1 << instance->this_id);
+target = 0; while (t != 1) { t >>= 1; target++; }
+DEB_RESEL(printk("scsi %d: reselect; target: %d\n", instance->host_no, target));
+
+if (hostdata->aborted) goto EXIT_ABORT;
+
+if ((statreg & STATREG_PHASE) != PHASE_MSGIN) {
+ printk("scsi %d: error: upon reselection interrupt not in MSGIN\n", instance->host_no);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+
+msg[0] = AM53C974_read_8(FFREG);
+if (!(msg[0] & 0x80)) {
+ printk("scsi%d: error: expecting IDENTIFY message, got ", instance->host_no);
+ print_msg(msg);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+
+lun = (msg[0] & 0x07);
+
+/* We need to add code for SCSI-II to track which devices have
+ * I_T_L_Q nexuses established, and which have simple I_T_L
+ * nexuses so we can chose to do additional data transfer. */
+#ifdef SCSI2
+#error "SCSI-II tagged queueing is not supported yet"
+#endif
+
+/* Find the command corresponding to the I_T_L or I_T_L_Q nexus we
+ * just reestablished, and remove it from the disconnected queue. */
+for (tmp = (Scsi_Cmnd *)hostdata->disconnected_queue, prev = NULL;
+ tmp; prev = tmp, tmp = (Scsi_Cmnd *)tmp->host_scribble)
+ if ((target == tmp->target) && (lun == tmp->lun)
+#ifdef SCSI2
+ && (tag == tmp->tag)
+#endif
+ ) {
+ if (prev) {
+ REMOVE(prev, (Scsi_Cmnd *)(prev->host_scribble), tmp,
+ (Scsi_Cmnd *)(tmp->host_scribble));
+ prev->host_scribble = tmp->host_scribble; }
+ else {
+ REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble);
+ hostdata->disconnected_queue = (Scsi_Cmnd *)tmp->host_scribble; }
+ tmp->host_scribble = NULL;
+ hostdata->connected = tmp;
+ break; }
+
+if (!tmp) {
+#ifdef SCSI2
+ printk("scsi%d: warning : target %d lun %d tag %d not in disconnect_queue.\n",
+ instance->host_no, target, lun, tag);
+#else
+ printk("scsi%d: warning : target %d lun %d not in disconnect_queue.\n",
+ instance->host_no, target, lun);
+#endif
+ /* Since we have an established nexus that we can't do anything with, we must abort it. */
+ hostdata->aborted = 1;
+ DEB(AM53C974_keywait());
+ goto EXIT_ABORT; }
+ else
+ goto EXIT_OK;
+
+EXIT_ABORT:
+AM53C974_write_8(CMDREG, CMDREG_SATN);
+AM53C974_write_8(CMDREG, CMDREG_MA);
+return;
+
+EXIT_OK:
+DEB_RESEL(printk("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
+ instance->host_no, target, tmp->lun, tmp->tag));
+AM53C974_set_sync(instance, target);
+AM53C974_write_8(SDIDREG, SDIREG_MASK & target); /* setup dest. id */
+AM53C974_write_8(CMDREG, CMDREG_MA);
+hostdata->dma_busy = 0;
+hostdata->connected->SCp.phase = PHASE_CMDOUT;
+}
+
+/**************************************************************************
+* Function : AM53C974_transfer_dma(struct Scsi_Host *instance, short dir,
+* unsigned long length, char *data)
+*
+* Purpose : setup DMA transfer
+*
+* Inputs : instance -- which AM53C974
+* dir -- direction flag, 0: write to device, read from memory;
+* 1: read from device, write to memory
+* length -- number of bytes to transfer to from buffer
+* data -- pointer to data buffer
+*
+* Returns : nothing
+**************************************************************************/
+static __inline__ void AM53C974_transfer_dma(struct Scsi_Host *instance, short dir,
+ unsigned long length, char *data)
+{
+AM53C974_local_declare();
+AM53C974_setio(instance);
+
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+AM53C974_write_8(DMACMD, (dir << 7) | DMACMD_INTE_D); /* idle command */
+AM53C974_write_8(STCLREG, (unsigned char)(length & 0xff));
+AM53C974_write_8(STCMREG, (unsigned char)((length & 0xff00) >> 8));
+AM53C974_write_8(STCHREG, (unsigned char)((length & 0xff0000) >> 16));
+AM53C974_write_32(DMASTC, length & 0xffffff);
+AM53C974_write_32(DMASPA, virt_to_bus(data));
+AM53C974_write_8(CMDREG, CMDREG_IT | CMDREG_DMA);
+AM53C974_write_8(DMACMD, (dir << 7) | DMACMD_INTE_D | DMACMD_START);
+}
+
+/**************************************************************************
+* Function : AM53C974_dma_blast(struct Scsi_Host *instance, unsigned char dmastatus,
+* unsigned char statreg)
+*
+* Purpose : cleanup DMA transfer
+*
+* Inputs : instance -- which AM53C974
+* dmastatus -- dma status register
+* statreg -- status register
+*
+* Returns : nothing
+**************************************************************************/
+static void AM53C974_dma_blast(struct Scsi_Host *instance, unsigned char dmastatus,
+ unsigned char statreg)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+unsigned long ctcreg;
+int dir = statreg & STATREG_IO;
+int cfifo, pio, i = 0;
+AM53C974_setio(instance);
+
+do {
+ cfifo = AM53C974_cfifo();
+ i++;
+ } while (cfifo && (i < 50000));
+pio = (i == 50000) ? 1: 0;
+
+if (statreg & STATREG_CTZ) { AM53C974_write_8(DMACMD, DMACMD_IDLE); return; }
+
+if (dmastatus & DMASTATUS_DONE) { AM53C974_write_8(DMACMD, DMACMD_IDLE); return; }
+
+AM53C974_write_8(DMACMD, ((dir << 7) & DMACMD_DIR) | DMACMD_BLAST);
+while(!(AM53C974_read_8(DMASTATUS) & DMASTATUS_BCMPLT)) ;
+AM53C974_write_8(DMACMD, DMACMD_IDLE);
+
+if (pio) {
+ /* transfer residual bytes via PIO */
+ unsigned char *wac = (unsigned char *)AM53C974_read_32(DMAWAC);
+ printk("pio mode, residual=%d\n", AM53C974_read_8(CFIREG) & CFIREG_CF);
+ while (AM53C974_read_8(CFIREG) & CFIREG_CF) *(wac++) = AM53C974_read_8(FFREG);
+ }
+
+ctcreg = AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16);
+
+hostdata->connected->SCp.ptr += hostdata->connected->SCp.this_residual - ctcreg;
+hostdata->connected->SCp.this_residual = ctcreg;
+}
+
+/**************************************************************************
+* Function : AM53C974_intr_bus_reset(struct Scsi_Host *instance)
+*
+* Purpose : handle bus reset interrupt
+*
+* Inputs : instance -- which AM53C974
+*
+* Returns : nothing
+**************************************************************************/
+static void AM53C974_intr_bus_reset(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+unsigned char cntlreg1;
+AM53C974_setio(instance);
+
+AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+
+cntlreg1 = AM53C974_read_8(CNTLREG1);
+AM53C974_write_8(CNTLREG1, cntlreg1 | CNTLREG1_DISR);
+}
+
+/**************************************************************************
+* Function : int AM53C974_abort(Scsi_Cmnd *cmd)
+*
+* Purpose : abort a command
+*
+* Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
+* host byte of the result field to, if zero DID_ABORTED is
+* used.
+*
+* Returns : 0 - success, -1 on failure.
+ **************************************************************************/
+int AM53C974_abort(Scsi_Cmnd *cmd)
+{
+AM53C974_local_declare();
+struct Scsi_Host *instance = cmd->host;
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *tmp, **prev;
+
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+cli();
+AM53C974_setio(instance);
+
+DEB_ABORT(printk(SEPARATOR_LINE));
+DEB_ABORT(printk("scsi%d : AM53C974_abort called -- trouble starts!!\n", instance->host_no));
+DEB_ABORT(AM53C974_print(instance));
+DEB_ABORT(AM53C974_keywait());
+
+/* Case 1 : If the command is the currently executing command,
+ we'll set the aborted flag and return control so that the
+ information transfer routine can exit cleanly. */
+if ((hostdata->connected == cmd) || (hostdata->sel_cmd == cmd)) {
+ DEB_ABORT(printk("scsi%d: aborting connected command\n", instance->host_no));
+ hostdata->aborted = 1;
+ hostdata->msgout[0] = ABORT;
+ sti();
+ return(SCSI_ABORT_PENDING); }
+
+/* Case 2 : If the command hasn't been issued yet,
+ we simply remove it from the issue queue. */
+for (prev = (Scsi_Cmnd **)&(hostdata->issue_queue),
+ tmp = (Scsi_Cmnd *)hostdata->issue_queue; tmp;
+ prev = (Scsi_Cmnd **)&(tmp->host_scribble),
+ tmp = (Scsi_Cmnd *)tmp->host_scribble) {
+ if (cmd == tmp) {
+ DEB_ABORT(printk("scsi%d : abort removed command from issue queue.\n", instance->host_no));
+ REMOVE(5, *prev, tmp, tmp->host_scribble);
+ (*prev) = (Scsi_Cmnd *)tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ sti();
+ tmp->done(tmp);
+ return(SCSI_ABORT_SUCCESS); }
+#ifdef AM53C974_DEBUG_ABORT
+ else {
+ if (prev == (Scsi_Cmnd **)tmp)
+ printk("scsi%d : LOOP\n", instance->host_no);
+ }
+#endif
+ }
+
+/* Case 3 : If any commands are connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail. */
+if (hostdata->connected || hostdata->sel_cmd) {
+ DEB_ABORT(printk("scsi%d : abort failed, other command connected.\n", instance->host_no));
+ sti();
+ return(SCSI_ABORT_NOT_RUNNING); }
+
+/* Case 4: If the command is currently disconnected from the bus, and
+ * there are no connected commands, we reconnect the I_T_L or
+ * I_T_L_Q nexus associated with it, go into message out, and send
+ * an abort message. */
+for (tmp = (Scsi_Cmnd *)hostdata->disconnected_queue; tmp;
+ tmp = (Scsi_Cmnd *)tmp->host_scribble) {
+ if (cmd == tmp) {
+ DEB_ABORT(printk("scsi%d: aborting disconnected command\n", instance->host_no));
+ hostdata->aborted = 1;
+ hostdata->msgout[0] = ABORT;
+ hostdata->selecting = 1;
+ hostdata->sel_cmd = tmp;
+ AM53C974_write_8(CMDREG, CMDREG_DSR);
+ sti();
+ return(SCSI_ABORT_PENDING); }
+ }
+
+/* Case 5 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke. */
+DEB_ABORT(printk("scsi%d : abort failed, command not found.\n", instance->host_no));
+sti();
+return(SCSI_ABORT_NOT_RUNNING);
+}
+
+/**************************************************************************
+* Function : int AM53C974_reset(Scsi_Cmnd *cmd)
+*
+* Purpose : reset the SCSI controller and bus
+*
+* Inputs : cmd -- which command within the command block was responsible for the reset
+*
+* Returns : status (SCSI_ABORT_SUCCESS)
+**************************************************************************/
+int AM53C974_reset(Scsi_Cmnd *cmd, unsigned int flags)
+{
+AM53C974_local_declare();
+int i;
+struct Scsi_Host *instance = cmd->host;
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+cli();
+DEB(printk("AM53C974_reset called; "));
+
+printk("AM53C974_reset called\n");
+AM53C974_print(instance);
+AM53C974_keywait();
+
+/* do hard reset */
+AM53C974_write_8(CMDREG, CMDREG_RDEV);
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+hostdata->msgout[0] = NOP;
+for (i = 0; i < 8; i++) {
+ hostdata->busy[i] = 0;
+ hostdata->sync_per[i] = DEF_STP;
+ hostdata->sync_off[i] = 0;
+ hostdata->sync_neg[i] = 0; }
+hostdata->last_message[0] = NOP;
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->issue_queue = NULL;
+hostdata->disconnected_queue = NULL;
+hostdata->in_reset = 0;
+hostdata->aborted = 0;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+
+/* reset bus */
+AM53C974_write_8(CNTLREG1, CNTLREG1_DISR | instance->this_id); /* disable interrupt upon SCSI RESET */
+AM53C974_write_8(CMDREG, CMDREG_RBUS); /* reset SCSI bus */
+udelay(40);
+AM53C974_config_after_reset(instance);
+
+sti();
+cmd->result = DID_RESET << 16;
+cmd->scsi_done(cmd);
+return SCSI_ABORT_SUCCESS;
+}
+
+
+/*
+ * AM53C974_release()
+ *
+ * Release resources allocated for a single AM53C974 adapter.
+ */
+int
+AM53C974_release(struct Scsi_Host *shp)
+{
+ free_irq(shp->irq, NULL);
+ scsi_unregister(shp);
+ return 0;
+}
+
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AM53C974;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/AM53C974.h b/linux/src/drivers/scsi/AM53C974.h
new file mode 100644
index 0000000..d94db92
--- /dev/null
+++ b/linux/src/drivers/scsi/AM53C974.h
@@ -0,0 +1,409 @@
+/* AM53/79C974 (PCscsi) driver release 0.5
+ *
+ * The architecture and much of the code of this device
+ * driver was originally developed by Drew Eckhardt for
+ * the NCR5380. The following copyrights apply:
+ * For the architecture and all parts similar to the NCR5380:
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * The AM53C974_nobios_detect code was originally developed by
+ * Robin Cutshaw (robin@xfree86.org) and is used here in a
+ * modified form.
+ *
+ * For the other parts:
+ * Copyright 1994, D. Frieauff
+ * EMail: fri@rsx42sun0.dofn.de
+ * Phone: x49-7545-8-2256 , x49-7541-42305
+ */
+
+#ifndef AM53C974_H
+#define AM53C974_H
+
+#include <scsi/scsicam.h>
+
+/***************************************************************************************
+* Default setting of the controller's SCSI id. Edit and uncomment this only if your *
+* BIOS does not correctly initialize the controller's SCSI id. *
+* If you don't get a warning during boot, it is correctly initialized. *
+****************************************************************************************/
+/* #define AM53C974_SCSI_ID 7 */
+
+/***************************************************************************************
+* Default settings for sync. negotiation enable, transfer rate and sync. offset. *
+* These settings can be replaced by LILO overrides (append) with the following syntax: *
+* AM53C974=host-scsi-id, target-scsi-id, max-rate, max-offset *
+* Sync. negotiation is disabled by default and will be enabled for those targets which *
+* are specified in the LILO override *
+****************************************************************************************/
+#define DEFAULT_SYNC_NEGOTIATION_ENABLED 0 /* 0 or 1 */
+#define DEFAULT_RATE 5 /* MHz, min: 3; max: 10 */
+#define DEFAULT_SYNC_OFFSET 0 /* bytes, min: 0; max: 15; use 0 for async. mode */
+
+
+/* --------------------- don't edit below here --------------------- */
+
+#define AM53C974_DRIVER_REVISION_MAJOR 0
+#define AM53C974_DRIVER_REVISION_MINOR 5
+#define SEPARATOR_LINE \
+"--------------------------------------------------------------------------\n"
+
+/* debug control */
+/* #define AM53C974_DEBUG */
+/* #define AM53C974_DEBUG_MSG */
+/* #define AM53C974_DEBUG_KEYWAIT */
+/* #define AM53C974_DEBUG_INIT */
+/* #define AM53C974_DEBUG_QUEUE */
+/* #define AM53C974_DEBUG_INFO */
+/* #define AM53C974_DEBUG_LINKED */
+/* #define VERBOSE_AM53C974_DEBUG */
+/* #define AM53C974_DEBUG_INTR */
+/* #define AM53C974_DEB_RESEL */
+#define AM53C974_DEBUG_ABORT
+/* #define AM53C974_OPTION_DEBUG_PROBE_ONLY */
+
+/* special options/constants */
+#define DEF_CLK 40 /* chip clock freq. in MHz */
+#define MIN_PERIOD 4 /* for negotiation: min. number of clocks per cycle */
+#define MAX_PERIOD 13 /* for negotiation: max. number of clocks per cycle */
+#define MAX_OFFSET 15 /* for negotiation: max. offset (0=async) */
+
+#define DEF_SCSI_TIMEOUT 245 /* STIMREG value, 40 Mhz */
+#define DEF_STP 8 /* STPREG value assuming 5.0 MB/sec, FASTCLK, FASTSCSI */
+#define DEF_SOF_RAD 0 /* REQ/ACK deassertion delay */
+#define DEF_SOF_RAA 0 /* REQ/ACK assertion delay */
+#define DEF_ETM 0 /* CNTLREG1, ext. timing mode */
+#define DEF_PERE 1 /* CNTLREG1, parity error reporting */
+#define DEF_CLKF 0 /* CLKFREG, 0=40 Mhz */
+#define DEF_ENF 1 /* CNTLREG2, enable features */
+#define DEF_ADIDCHK 0 /* CNTLREG3, additional ID check */
+#define DEF_FASTSCSI 1 /* CNTLREG3, fast SCSI */
+#define DEF_FASTCLK 1 /* CNTLREG3, fast clocking, 5 MB/sec at 40MHz chip clk */
+#define DEF_GLITCH 1 /* CNTLREG4, glitch eater, 0=12ns, 1=35ns, 2=25ns, 3=off */
+#define DEF_PWD 0 /* CNTLREG4, reduced power feature */
+#define DEF_RAE 0 /* CNTLREG4, RAE active negation on REQ, ACK only */
+#define DEF_RADE 1 /* 1CNTLREG4, active negation on REQ, ACK and data */
+
+/*** PCI block ***/
+/* standard registers are defined in <linux/pci.h> */
+#ifndef PCI_VENDOR_ID_AMD
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+#endif
+#define PCI_BASE_MASK 0xFFFFFFE0
+#define PCI_COMMAND_PERREN 0x40
+#define PCI_SCRATCH_REG_0 0x40 /* 16 bits */
+#define PCI_SCRATCH_REG_1 0x42 /* 16 bits */
+#define PCI_SCRATCH_REG_2 0x44 /* 16 bits */
+#define PCI_SCRATCH_REG_3 0x46 /* 16 bits */
+#define PCI_SCRATCH_REG_4 0x48 /* 16 bits */
+#define PCI_SCRATCH_REG_5 0x4A /* 16 bits */
+#define PCI_SCRATCH_REG_6 0x4C /* 16 bits */
+#define PCI_SCRATCH_REG_7 0x4E /* 16 bits */
+
+/*** SCSI block ***/
+#define CTCLREG 0x00 /* r current transf. count, low byte */
+#define CTCMREG 0x04 /* r current transf. count, middle byte */
+#define CTCHREG 0x38 /* r current transf. count, high byte */
+#define STCLREG 0x00 /* w start transf. count, low byte */
+#define STCMREG 0x04 /* w start transf. count, middle byte */
+#define STCHREG 0x38 /* w start transf. count, high byte */
+#define FFREG 0x08 /* rw SCSI FIFO reg. */
+#define STIMREG 0x14 /* w SCSI timeout reg. */
+
+#define SDIDREG 0x10 /* w SCSI destination ID reg. */
+#define SDIREG_MASK 0x07 /* mask */
+
+#define STPREG 0x18 /* w synchronous transf. period reg. */
+#define STPREG_STP 0x1F /* synchr. transfer period */
+
+#define CLKFREG 0x24 /* w clock factor reg. */
+#define CLKFREG_MASK 0x07 /* mask */
+
+#define CMDREG 0x0C /* rw SCSI command reg. */
+#define CMDREG_DMA 0x80 /* set DMA mode (set together with opcodes below) */
+#define CMDREG_IT 0x10 /* information transfer */
+#define CMDREG_ICCS 0x11 /* initiator command complete steps */
+#define CMDREG_MA 0x12 /* message accepted */
+#define CMDREG_TPB 0x98 /* transfer pad bytes, DMA mode only */
+#define CMDREG_SATN 0x1A /* set ATN */
+#define CMDREG_RATN 0x1B /* reset ATN */
+#define CMDREG_SOAS 0x41 /* select without ATN steps */
+#define CMDREG_SAS 0x42 /* select with ATN steps (1 msg byte) */
+#define CMDREG_SASS 0x43 /* select with ATN and stop steps */
+#define CMDREG_ESR 0x44 /* enable selection/reselection */
+#define CMDREG_DSR 0x45 /* disable selection/reselection */
+#define CMDREG_SA3S 0x46 /* select with ATN 3 steps (3 msg bytes) */
+#define CMDREG_NOP 0x00 /* no operation */
+#define CMDREG_CFIFO 0x01 /* clear FIFO */
+#define CMDREG_RDEV 0x02 /* reset device */
+#define CMDREG_RBUS 0x03 /* reset SCSI bus */
+
+#define STATREG 0x10 /* r SCSI status reg. */
+#define STATREG_INT 0x80 /* SCSI interrupt condition detected */
+#define STATREG_IOE 0x40 /* SCSI illegal operation error detected */
+#define STATREG_PE 0x20 /* SCSI parity error detected */
+#define STATREG_CTZ 0x10 /* CTC reg decremented to zero */
+#define STATREG_MSG 0x04 /* SCSI MSG phase (latched?) */
+#define STATREG_CD 0x02 /* SCSI C/D phase (latched?) */
+#define STATREG_IO 0x01 /* SCSI I/O phase (latched?) */
+#define STATREG_PHASE 0x07 /* SCSI phase mask */
+
+#define INSTREG 0x14 /* r interrupt status reg. */
+#define INSTREG_SRST 0x80 /* SCSI reset detected */
+#define INSTREG_ICMD 0x40 /* SCSI invalid command detected */
+#define INSTREG_DIS 0x20 /* target disconnected or sel/resel timeout*/
+#define INSTREG_SR 0x10 /* device on bus has service request */
+#define INSTREG_SO 0x08 /* successful operation */
+#define INSTREG_RESEL 0x04 /* device reselected as initiator */
+
+#define ISREG 0x18 /* r internal state reg. */
+#define ISREG_SOF 0x08 /* synchronous offset flag (act. low) */
+#define ISREG_IS 0x07 /* status of intermediate op. */
+#define ISREG_OK_NO_STOP 0x04 /* selection successful */
+#define ISREG_OK_STOP 0x01 /* selection successful */
+
+#define CFIREG 0x1C /* r current FIFO/internal state reg. */
+#define CFIREG_IS 0xE0 /* status of intermediate op. */
+#define CFIREG_CF 0x1F /* number of bytes in SCSI FIFO */
+
+#define SOFREG 0x1C /* w synchr. offset reg. */
+#define SOFREG_RAD 0xC0 /* REQ/ACK deassertion delay (sync.) */
+#define SOFREG_RAA 0x30 /* REQ/ACK assertion delay (sync.) */
+#define SOFREG_SO 0x0F /* synch. offset (sync.) */
+
+#define CNTLREG1 0x20 /* rw control register one */
+#define CNTLREG1_ETM 0x80 /* set extended timing mode */
+#define CNTLREG1_DISR 0x40 /* disable interrupt on SCSI reset */
+#define CNTLREG1_PERE 0x10 /* enable parity error reporting */
+#define CNTLREG1_SID 0x07 /* host adapter SCSI ID */
+
+#define CNTLREG2 0x2C /* rw control register two */
+#define CNTLREG2_ENF 0x40 /* enable features */
+
+#define CNTLREG3 0x30 /* rw control register three */
+#define CNTLREG3_ADIDCHK 0x80 /* additional ID check */
+#define CNTLREG3_FASTSCSI 0x10 /* fast SCSI */
+#define CNTLREG3_FASTCLK 0x08 /* fast SCSI clocking */
+
+#define CNTLREG4 0x34 /* rw control register four */
+#define CNTLREG4_GLITCH 0xC0 /* glitch eater */
+#define CNTLREG4_PWD 0x20 /* reduced power feature */
+#define CNTLREG4_RAE 0x08 /* write only, active negot. ctrl. */
+#define CNTLREG4_RADE 0x04 /* active negot. ctrl. */
+#define CNTLREG4_RES 0x10 /* reserved bit, must be 1 */
+
+/*** DMA block ***/
+#define DMACMD 0x40 /* rw command */
+#define DMACMD_DIR 0x80 /* transfer direction (1=read from device) */
+#define DMACMD_INTE_D 0x40 /* DMA transfer interrupt enable */
+#define DMACMD_INTE_P 0x20 /* page transfer interrupt enable */
+#define DMACMD_MDL 0x10 /* map to memory descriptor list */
+#define DMACMD_DIAG 0x04 /* diagnostics, set to 0 */
+#define DMACMD_IDLE 0x00 /* idle cmd */
+#define DMACMD_BLAST 0x01 /* flush FIFO to memory */
+#define DMACMD_ABORT 0x02 /* terminate DMA */
+#define DMACMD_START 0x03 /* start DMA */
+
+#define DMASTATUS 0x54 /* r status register */
+#define DMASTATUS_BCMPLT 0x20 /* BLAST complete */
+#define DMASTATUS_SCSIINT 0x10 /* SCSI interrupt pending */
+#define DMASTATUS_DONE 0x08 /* DMA transfer terminated */
+#define DMASTATUS_ABORT 0x04 /* DMA transfer aborted */
+#define DMASTATUS_ERROR 0x02 /* DMA transfer error */
+#define DMASTATUS_PWDN 0x02 /* power down indicator */
+
+#define DMASTC 0x44 /* rw starting transfer count */
+#define DMASPA 0x48 /* rw starting physical address */
+#define DMAWBC 0x4C /* r working byte counter */
+#define DMAWAC 0x50 /* r working address counter */
+#define DMASMDLA 0x58 /* rw starting MDL address */
+#define DMAWMAC 0x5C /* r working MDL counter */
+
+/*** SCSI phases ***/
+#define PHASE_MSGIN 0x07
+#define PHASE_MSGOUT 0x06
+#define PHASE_RES_1 0x05
+#define PHASE_RES_0 0x04
+#define PHASE_STATIN 0x03
+#define PHASE_CMDOUT 0x02
+#define PHASE_DATAIN 0x01
+#define PHASE_DATAOUT 0x00
+
+struct AM53C974_hostdata {
+ volatile unsigned in_reset:1; /* flag, says bus reset pending */
+ volatile unsigned aborted:1; /* flag, says aborted */
+ volatile unsigned selecting:1; /* selection started, but not yet finished */
+ volatile unsigned disconnecting: 1; /* disconnection started, but not yet finished */
+ volatile unsigned dma_busy:1; /* dma busy when service request for info transfer received */
+ volatile unsigned char msgout[10]; /* message to output in MSGOUT_PHASE */
+ volatile unsigned char last_message[10]; /* last message OUT */
+ volatile Scsi_Cmnd *issue_queue; /* waiting to be issued */
+ volatile Scsi_Cmnd *disconnected_queue; /* waiting for reconnect */
+ volatile Scsi_Cmnd *sel_cmd; /* command for selection */
+ volatile Scsi_Cmnd *connected; /* currently connected command */
+ volatile unsigned char busy[8]; /* index = target, bit = lun */
+ unsigned char sync_per[8]; /* synchronous transfer period (in effect) */
+ unsigned char sync_off[8]; /* synchronous offset (in effect) */
+ unsigned char sync_neg[8]; /* sync. negotiation performed (in effect) */
+ unsigned char sync_en[8]; /* sync. negotiation performed (in effect) */
+ unsigned char max_rate[8]; /* max. transfer rate (setup) */
+ unsigned char max_offset[8]; /* max. sync. offset (setup), only valid if corresponding sync_en is nonzero */
+ };
+
+#define AM53C974 { \
+ NULL, /* pointer to next in list */ \
+ NULL, /* long * usage_count */ \
+ NULL, /* struct proc_dir_entry *proc_dir */ \
+ NULL, /* int (*proc_info)(char *, char **, off_t, int, int, int); */ \
+ "AM53C974", /* name */ \
+ AM53C974_detect, /* int (* detect)(struct SHT *) */ \
+ NULL, /* int (*release)(struct Scsi_Host *) */ \
+ AM53C974_info, /* const char *(* info)(struct Scsi_Host *) */ \
+ AM53C974_command, /* int (* command)(Scsi_Cmnd *) */ \
+ AM53C974_queue_command, /* int (* queuecommand)(Scsi_Cmnd *, \
+ void (*done)(Scsi_Cmnd *)) */ \
+ AM53C974_abort, /* int (* abort)(Scsi_Cmnd *) */ \
+ AM53C974_reset, /* int (* reset)(Scsi_Cmnd *) */ \
+ NULL, /* int (* slave_attach)(int, int) */ \
+ scsicam_bios_param, /* int (* bios_param)(Disk *, int, int[]) */ \
+ 12, /* can_queue */ \
+ -1, /* this_id */ \
+ SG_ALL, /* sg_tablesize */ \
+ 1, /* cmd_per_lun */ \
+ 0, /* present, i.e. how many adapters of this kind */ \
+ 0, /* unchecked_isa_dma */ \
+ DISABLE_CLUSTERING /* use_clustering */ \
+ }
+
+void AM53C974_setup(char *str, int *ints);
+int AM53C974_detect(Scsi_Host_Template *tpnt);
+int AM53C974_biosparm(Disk *disk, int dev, int *info_array);
+const char *AM53C974_info(struct Scsi_Host *);
+int AM53C974_command(Scsi_Cmnd *SCpnt);
+int AM53C974_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
+int AM53C974_abort(Scsi_Cmnd *cmd);
+int AM53C974_reset (Scsi_Cmnd *cmd, unsigned int flags);
+
+#define AM53C974_local_declare() unsigned long io_port
+#define AM53C974_setio(instance) io_port = instance->io_port
+#define AM53C974_read_8(addr) inb(io_port + (addr))
+#define AM53C974_write_8(addr,x) outb((x), io_port + (addr))
+#define AM53C974_read_16(addr) inw(io_port + (addr))
+#define AM53C974_write_16(addr,x) outw((x), io_port + (addr))
+#define AM53C974_read_32(addr) inl(io_port + (addr))
+#define AM53C974_write_32(addr,x) outl((x), io_port + (addr))
+
+#define AM53C974_poll_int() { do { statreg = AM53C974_read_8(STATREG); } \
+ while (!(statreg & STATREG_INT)) ; \
+ AM53C974_read_8(INSTREG) ; } /* clear int */
+#define AM53C974_cfifo() (AM53C974_read_8(CFIREG) & CFIREG_CF)
+
+/* These are "special" values for the tag parameter passed to AM53C974_select. */
+#define TAG_NEXT -1 /* Use next free tag */
+#define TAG_NONE -2 /* Establish I_T_L nexus instead of I_T_L_Q
+ * even on SCSI-II devices */
+
+/************ LILO overrides *************/
+typedef struct _override_t {
+ int host_scsi_id; /* SCSI id of the bus controller */
+ int target_scsi_id; /* SCSI id of target */
+ int max_rate; /* max. transfer rate */
+ int max_offset; /* max. sync. offset, 0 = asynchronous */
+ } override_t;
+
+/************ PCI stuff *************/
+#define AM53C974_PCIREG_OPEN() outb(0xF1, 0xCF8); outb(0, 0xCFA)
+#define AM53C974_PCIREG_CLOSE() outb(0, 0xCF8)
+#define AM53C974_PCIREG_READ_BYTE(instance,a) ( inb((a) + (instance)->io_port) )
+#define AM53C974_PCIREG_READ_WORD(instance,a) ( inw((a) + (instance)->io_port) )
+#define AM53C974_PCIREG_READ_DWORD(instance,a) ( inl((a) + (instance)->io_port) )
+#define AM53C974_PCIREG_WRITE_BYTE(instance,x,a) ( outb((x), (a) + (instance)->io_port) )
+#define AM53C974_PCIREG_WRITE_WORD(instance,x,a) ( outw((x), (a) + (instance)->io_port) )
+#define AM53C974_PCIREG_WRITE_DWORD(instance,x,a) ( outl((x), (a) + (instance)->io_port) )
+
+typedef struct _pci_config_t {
+ /* start of official PCI config space header */
+ union {
+ unsigned int device_vendor;
+ struct {
+ unsigned short vendor;
+ unsigned short device;
+ } dv;
+ } dv_id;
+#define _device_vendor dv_id.device_vendor
+#define _vendor dv_id.dv.vendor
+#define _device dv_id.dv.device
+ union {
+ unsigned int status_command;
+ struct {
+ unsigned short command;
+ unsigned short status;
+ } sc;
+ } stat_cmd;
+#define _status_command stat_cmd.status_command
+#define _command stat_cmd.sc.command
+#define _status stat_cmd.sc.status
+ union {
+ unsigned int class_revision;
+ struct {
+ unsigned char rev_id;
+ unsigned char prog_if;
+ unsigned char sub_class;
+ unsigned char base_class;
+ } cr;
+ } class_rev;
+#define _class_revision class_rev.class_revision
+#define _rev_id class_rev.cr.rev_id
+#define _prog_if class_rev.cr.prog_if
+#define _sub_class class_rev.cr.sub_class
+#define _base_class class_rev.cr.base_class
+ union {
+ unsigned int bist_header_latency_cache;
+ struct {
+ unsigned char cache_line_size;
+ unsigned char latency_timer;
+ unsigned char header_type;
+ unsigned char bist;
+ } bhlc;
+ } bhlc;
+#define _bist_header_latency_cache bhlc.bist_header_latency_cache
+#define _cache_line_size bhlc.bhlc.cache_line_size
+#define _latency_timer bhlc.bhlc.latency_timer
+#define _header_type bhlc.bhlc.header_type
+#define _bist bhlc.bhlc.bist
+ unsigned int _base0;
+ unsigned int _base1;
+ unsigned int _base2;
+ unsigned int _base3;
+ unsigned int _base4;
+ unsigned int _base5;
+ unsigned int rsvd1;
+ unsigned int rsvd2;
+ unsigned int _baserom;
+ unsigned int rsvd3;
+ unsigned int rsvd4;
+ union {
+ unsigned int max_min_ipin_iline;
+ struct {
+ unsigned char int_line;
+ unsigned char int_pin;
+ unsigned char min_gnt;
+ unsigned char max_lat;
+ } mmii;
+ } mmii;
+#define _max_min_ipin_iline mmii.max_min_ipin_iline
+#define _int_line mmii.mmii.int_line
+#define _int_pin mmii.mmii.int_pin
+#define _min_gnt mmii.mmii.min_gnt
+#define _max_lat mmii.mmii.max_lat
+ /* end of official PCI config space header */
+ unsigned short _ioaddr; /* config type 1 - private I/O addr */
+ unsigned int _pcibus; /* config type 2 - private bus id */
+ unsigned int _cardnum; /* config type 2 - private card number */
+} pci_config_t;
+
+#endif /* AM53C974_H */
diff --git a/linux/src/drivers/scsi/BusLogic.c b/linux/src/drivers/scsi/BusLogic.c
new file mode 100644
index 0000000..3c52e15
--- /dev/null
+++ b/linux/src/drivers/scsi/BusLogic.c
@@ -0,0 +1,5003 @@
+/*
+
+ Linux Driver for BusLogic MultiMaster and FlashPoint SCSI Host Adapters
+
+ Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>
+
+ This program is free software; you may redistribute and/or modify it under
+ the terms of the GNU General Public License Version 2 as published by the
+ Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for complete details.
+
+ The author respectfully requests that any modifications to this software be
+ sent directly to him for evaluation and testing.
+
+ Special thanks to Wayne Yen, Jin-Lon Hon, and Alex Win of BusLogic, whose
+ advice has been invaluable, to David Gentzel, for writing the original Linux
+ BusLogic driver, and to Paul Gortmaker, for being such a dedicated test site.
+
+ Finally, special thanks to Mylex/BusLogic for making the FlashPoint SCCB
+ Manager available as freely redistributable source code.
+
+*/
+
+
+#define BusLogic_DriverVersion "2.0.15"
+#define BusLogic_DriverDate "17 August 1998"
+
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/stat.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include "BusLogic.h"
+#include "FlashPoint.c"
+
+
+/*
+ BusLogic_DriverOptionsCount is a count of the number of BusLogic Driver
+ Options specifications provided via the Linux Kernel Command Line or via
+ the Loadable Kernel Module Installation Facility.
+*/
+
+static int
+ BusLogic_DriverOptionsCount = 0;
+
+
+/*
+ BusLogic_DriverOptions is an array of Driver Options structures representing
+ BusLogic Driver Options specifications provided via the Linux Kernel Command
+ Line or via the Loadable Kernel Module Installation Facility.
+*/
+
+static BusLogic_DriverOptions_T
+ BusLogic_DriverOptions[BusLogic_MaxHostAdapters];
+
+
+/*
+ BusLogic_Options can be assigned a string by the Loadable Kernel Module
+ Installation Facility to be parsed for BusLogic Driver Options
+ specifications.
+*/
+
+static char
+ *BusLogic_Options = NULL;
+
+
+/*
+ BusLogic_ProbeOptions is a set of Probe Options to be applied across
+ all BusLogic Host Adapters.
+*/
+
+static BusLogic_ProbeOptions_T
+ BusLogic_ProbeOptions = { 0 };
+
+
+/*
+ BusLogic_GlobalOptions is a set of Global Options to be applied across
+ all BusLogic Host Adapters.
+*/
+
+static BusLogic_GlobalOptions_T
+ BusLogic_GlobalOptions = { 0 };
+
+
+/*
+ BusLogic_FirstRegisteredHostAdapter and BusLogic_LastRegisteredHostAdapter
+ are pointers to the first and last registered BusLogic Host Adapters.
+*/
+
+static BusLogic_HostAdapter_T
+ *BusLogic_FirstRegisteredHostAdapter = NULL,
+ *BusLogic_LastRegisteredHostAdapter = NULL;
+
+
+/*
+ BusLogic_ProbeInfoCount is the number of entries in BusLogic_ProbeInfoList.
+*/
+
+static int
+ BusLogic_ProbeInfoCount = 0;
+
+
+/*
+ BusLogic_ProbeInfoList is the list of I/O Addresses and Bus Probe Information
+ to be checked for potential BusLogic Host Adapters. It is initialized by
+ interrogating the PCI Configuration Space on PCI machines as well as from the
+ list of standard BusLogic I/O Addresses.
+*/
+
+static BusLogic_ProbeInfo_T
+ *BusLogic_ProbeInfoList = NULL;
+
+
+/*
+ BusLogic_CommandFailureReason holds a string identifying the reason why a
+ call to BusLogic_Command failed. It is only non-NULL when BusLogic_Command
+ returns a failure code.
+*/
+
+static char
+ *BusLogic_CommandFailureReason;
+
+
+/*
+ BusLogic_ProcDirectoryEntry is the BusLogic /proc/scsi directory entry.
+*/
+
+PROC_DirectoryEntry_T
+ BusLogic_ProcDirectoryEntry =
+ { PROC_SCSI_BUSLOGIC, 8, "BusLogic", S_IFDIR | S_IRUGO | S_IXUGO, 2 };
+
+
+/*
+ BusLogic_AnnounceDriver announces the Driver Version and Date, Author's
+ Name, Copyright Notice, and Electronic Mail Address.
+*/
+
+static void BusLogic_AnnounceDriver(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_Announce("***** BusLogic SCSI Driver Version "
+ BusLogic_DriverVersion " of "
+ BusLogic_DriverDate " *****\n", HostAdapter);
+ BusLogic_Announce("Copyright 1995-1998 by Leonard N. Zubkoff "
+ "<lnz@dandelion.com>\n", HostAdapter);
+}
+
+
+/*
+ BusLogic_DriverInfo returns the Host Adapter Name to identify this SCSI
+ Driver and Host Adapter.
+*/
+
+const char *BusLogic_DriverInfo(SCSI_Host_T *Host)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Host->hostdata;
+ return HostAdapter->FullModelName;
+}
+
+
+/*
+ BusLogic_RegisterHostAdapter adds Host Adapter to the list of registered
+ BusLogic Host Adapters.
+*/
+
+static void BusLogic_RegisterHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ HostAdapter->Next = NULL;
+ if (BusLogic_FirstRegisteredHostAdapter == NULL)
+ {
+ BusLogic_FirstRegisteredHostAdapter = HostAdapter;
+ BusLogic_LastRegisteredHostAdapter = HostAdapter;
+ }
+ else
+ {
+ BusLogic_LastRegisteredHostAdapter->Next = HostAdapter;
+ BusLogic_LastRegisteredHostAdapter = HostAdapter;
+ }
+}
+
+
+/*
+ BusLogic_UnregisterHostAdapter removes Host Adapter from the list of
+ registered BusLogic Host Adapters.
+*/
+
+static void BusLogic_UnregisterHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ if (HostAdapter == BusLogic_FirstRegisteredHostAdapter)
+ {
+ BusLogic_FirstRegisteredHostAdapter =
+ BusLogic_FirstRegisteredHostAdapter->Next;
+ if (HostAdapter == BusLogic_LastRegisteredHostAdapter)
+ BusLogic_LastRegisteredHostAdapter = NULL;
+ }
+ else
+ {
+ BusLogic_HostAdapter_T *PreviousHostAdapter =
+ BusLogic_FirstRegisteredHostAdapter;
+ while (PreviousHostAdapter != NULL &&
+ PreviousHostAdapter->Next != HostAdapter)
+ PreviousHostAdapter = PreviousHostAdapter->Next;
+ if (PreviousHostAdapter != NULL)
+ PreviousHostAdapter->Next = HostAdapter->Next;
+ }
+ HostAdapter->Next = NULL;
+}
+
+
+/*
+ BusLogic_InitializeCCBs initializes a group of Command Control Blocks (CCBs)
+ for Host Adapter from the BlockSize bytes located at BlockPointer. The newly
+ created CCBs are added to Host Adapter's free list.
+*/
+
+static void BusLogic_InitializeCCBs(BusLogic_HostAdapter_T *HostAdapter,
+ void *BlockPointer, int BlockSize)
+{
+ BusLogic_CCB_T *CCB = (BusLogic_CCB_T *) BlockPointer;
+ memset(BlockPointer, 0, BlockSize);
+ CCB->AllocationGroupHead = true;
+ while ((BlockSize -= sizeof(BusLogic_CCB_T)) >= 0)
+ {
+ CCB->Status = BusLogic_CCB_Free;
+ CCB->HostAdapter = HostAdapter;
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ CCB->CallbackFunction = BusLogic_QueueCompletedCCB;
+ CCB->BaseAddress = HostAdapter->FlashPointInfo.BaseAddress;
+ }
+ CCB->Next = HostAdapter->Free_CCBs;
+ CCB->NextAll = HostAdapter->All_CCBs;
+ HostAdapter->Free_CCBs = CCB;
+ HostAdapter->All_CCBs = CCB;
+ HostAdapter->AllocatedCCBs++;
+ CCB++;
+ }
+}
+
+
+/*
+ BusLogic_CreateInitialCCBs allocates the initial CCBs for Host Adapter.
+*/
+
+static boolean BusLogic_CreateInitialCCBs(BusLogic_HostAdapter_T *HostAdapter)
+{
+ int BlockSize = BusLogic_CCB_AllocationGroupSize * sizeof(BusLogic_CCB_T);
+ while (HostAdapter->AllocatedCCBs < HostAdapter->InitialCCBs)
+ {
+ void *BlockPointer = kmalloc(BlockSize,
+ (HostAdapter->BounceBuffersRequired
+ ? GFP_ATOMIC | GFP_DMA
+ : GFP_ATOMIC));
+ if (BlockPointer == NULL)
+ {
+ BusLogic_Error("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
+ HostAdapter);
+ return false;
+ }
+ BusLogic_InitializeCCBs(HostAdapter, BlockPointer, BlockSize);
+ }
+ return true;
+}
+
+
+/*
+ BusLogic_DestroyCCBs deallocates the CCBs for Host Adapter.
+*/
+
+static void BusLogic_DestroyCCBs(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_CCB_T *NextCCB = HostAdapter->All_CCBs, *CCB;
+ HostAdapter->All_CCBs = NULL;
+ HostAdapter->Free_CCBs = NULL;
+ while ((CCB = NextCCB) != NULL)
+ {
+ NextCCB = CCB->NextAll;
+ if (CCB->AllocationGroupHead)
+ kfree(CCB);
+ }
+}
+
+
+/*
+ BusLogic_CreateAdditionalCCBs allocates Additional CCBs for Host Adapter. If
+ allocation fails and there are no remaining CCBs available, the Driver Queue
+ Depth is decreased to a known safe value to avoid potential deadlocks when
+ multiple host adapters share the same IRQ Channel.
+*/
+
+static void BusLogic_CreateAdditionalCCBs(BusLogic_HostAdapter_T *HostAdapter,
+ int AdditionalCCBs,
+ boolean SuccessMessageP)
+{
+ int BlockSize = BusLogic_CCB_AllocationGroupSize * sizeof(BusLogic_CCB_T);
+ int PreviouslyAllocated = HostAdapter->AllocatedCCBs;
+ if (AdditionalCCBs <= 0) return;
+ while (HostAdapter->AllocatedCCBs - PreviouslyAllocated < AdditionalCCBs)
+ {
+ void *BlockPointer = kmalloc(BlockSize,
+ (HostAdapter->BounceBuffersRequired
+ ? GFP_ATOMIC | GFP_DMA
+ : GFP_ATOMIC));
+ if (BlockPointer == NULL) break;
+ BusLogic_InitializeCCBs(HostAdapter, BlockPointer, BlockSize);
+ }
+ if (HostAdapter->AllocatedCCBs > PreviouslyAllocated)
+ {
+ if (SuccessMessageP)
+ BusLogic_Notice("Allocated %d additional CCBs (total now %d)\n",
+ HostAdapter,
+ HostAdapter->AllocatedCCBs - PreviouslyAllocated,
+ HostAdapter->AllocatedCCBs);
+ return;
+ }
+ BusLogic_Notice("Failed to allocate additional CCBs\n", HostAdapter);
+ if (HostAdapter->DriverQueueDepth >
+ HostAdapter->AllocatedCCBs - HostAdapter->TargetDeviceCount)
+ {
+ HostAdapter->DriverQueueDepth =
+ HostAdapter->AllocatedCCBs - HostAdapter->TargetDeviceCount;
+ HostAdapter->SCSI_Host->can_queue = HostAdapter->DriverQueueDepth;
+ }
+}
+
+
+/*
+ BusLogic_AllocateCCB allocates a CCB from Host Adapter's free list,
+ allocating more memory from the Kernel if necessary. The Host Adapter's
+ Lock should already have been acquired by the caller.
+*/
+
+static BusLogic_CCB_T *BusLogic_AllocateCCB(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ static unsigned long SerialNumber = 0;
+ BusLogic_CCB_T *CCB;
+ CCB = HostAdapter->Free_CCBs;
+ if (CCB != NULL)
+ {
+ CCB->SerialNumber = ++SerialNumber;
+ HostAdapter->Free_CCBs = CCB->Next;
+ CCB->Next = NULL;
+ if (HostAdapter->Free_CCBs == NULL)
+ BusLogic_CreateAdditionalCCBs(HostAdapter,
+ HostAdapter->IncrementalCCBs,
+ true);
+ return CCB;
+ }
+ BusLogic_CreateAdditionalCCBs(HostAdapter,
+ HostAdapter->IncrementalCCBs,
+ true);
+ CCB = HostAdapter->Free_CCBs;
+ if (CCB == NULL) return NULL;
+ CCB->SerialNumber = ++SerialNumber;
+ HostAdapter->Free_CCBs = CCB->Next;
+ CCB->Next = NULL;
+ return CCB;
+}
+
+
+/*
+ BusLogic_DeallocateCCB deallocates a CCB, returning it to the Host Adapter's
+ free list. The Host Adapter's Lock should already have been acquired by the
+ caller.
+*/
+
+static void BusLogic_DeallocateCCB(BusLogic_CCB_T *CCB)
+{
+ BusLogic_HostAdapter_T *HostAdapter = CCB->HostAdapter;
+ CCB->Command = NULL;
+ CCB->Status = BusLogic_CCB_Free;
+ CCB->Next = HostAdapter->Free_CCBs;
+ HostAdapter->Free_CCBs = CCB;
+}
+
+
+/*
+ BusLogic_Command sends the command OperationCode to HostAdapter, optionally
+ providing ParameterLength bytes of ParameterData and receiving at most
+ ReplyLength bytes of ReplyData; any excess reply data is received but
+ discarded.
+
+ On success, this function returns the number of reply bytes read from
+ the Host Adapter (including any discarded data); on failure, it returns
+ -1 if the command was invalid, or -2 if a timeout occurred.
+
+ BusLogic_Command is called exclusively during host adapter detection and
+ initialization, so performance and latency are not critical, and exclusive
+ access to the Host Adapter hardware is assumed. Once the host adapter and
+ driver are initialized, the only Host Adapter command that is issued is the
+ single byte Execute Mailbox Command operation code, which does not require
+ waiting for the Host Adapter Ready bit to be set in the Status Register.
+*/
+
+static int BusLogic_Command(BusLogic_HostAdapter_T *HostAdapter,
+ BusLogic_OperationCode_T OperationCode,
+ void *ParameterData,
+ int ParameterLength,
+ void *ReplyData,
+ int ReplyLength)
+{
+ unsigned char *ParameterPointer = (unsigned char *) ParameterData;
+ unsigned char *ReplyPointer = (unsigned char *) ReplyData;
+ BusLogic_StatusRegister_T StatusRegister;
+ BusLogic_InterruptRegister_T InterruptRegister;
+ ProcessorFlags_T ProcessorFlags = 0;
+ int ReplyBytes = 0, Result;
+ long TimeoutCounter;
+ /*
+ Clear out the Reply Data if provided.
+ */
+ if (ReplyLength > 0)
+ memset(ReplyData, 0, ReplyLength);
+ /*
+ If the IRQ Channel has not yet been acquired, then interrupts must be
+ disabled while issuing host adapter commands since a Command Complete
+ interrupt could occur if the IRQ Channel was previously enabled by another
+ BusLogic Host Adapter or another driver sharing the same IRQ Channel.
+ */
+ if (!HostAdapter->IRQ_ChannelAcquired)
+ {
+ save_flags(ProcessorFlags);
+ cli();
+ }
+ /*
+ Wait for the Host Adapter Ready bit to be set and the Command/Parameter
+ Register Busy bit to be reset in the Status Register.
+ */
+ TimeoutCounter = 10000;
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister.Bits.HostAdapterReady &&
+ !StatusRegister.Bits.CommandParameterRegisterBusy)
+ break;
+ udelay(100);
+ }
+ if (TimeoutCounter < 0)
+ {
+ BusLogic_CommandFailureReason = "Timeout waiting for Host Adapter Ready";
+ Result = -2;
+ goto Done;
+ }
+ /*
+ Write the OperationCode to the Command/Parameter Register.
+ */
+ HostAdapter->HostAdapterCommandCompleted = false;
+ BusLogic_WriteCommandParameterRegister(HostAdapter, OperationCode);
+ /*
+ Write any additional Parameter Bytes.
+ */
+ TimeoutCounter = 10000;
+ while (ParameterLength > 0 && --TimeoutCounter >= 0)
+ {
+ /*
+ Wait 100 microseconds to give the Host Adapter enough time to determine
+ whether the last value written to the Command/Parameter Register was
+ valid or not. If the Command Complete bit is set in the Interrupt
+ Register, then the Command Invalid bit in the Status Register will be
+ reset if the Operation Code or Parameter was valid and the command
+ has completed, or set if the Operation Code or Parameter was invalid.
+ If the Data In Register Ready bit is set in the Status Register, then
+ the Operation Code was valid, and data is waiting to be read back
+ from the Host Adapter. Otherwise, wait for the Command/Parameter
+ Register Busy bit in the Status Register to be reset.
+ */
+ udelay(100);
+ InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter);
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (InterruptRegister.Bits.CommandComplete) break;
+ if (HostAdapter->HostAdapterCommandCompleted) break;
+ if (StatusRegister.Bits.DataInRegisterReady) break;
+ if (StatusRegister.Bits.CommandParameterRegisterBusy) continue;
+ BusLogic_WriteCommandParameterRegister(HostAdapter, *ParameterPointer++);
+ ParameterLength--;
+ }
+ if (TimeoutCounter < 0)
+ {
+ BusLogic_CommandFailureReason =
+ "Timeout waiting for Parameter Acceptance";
+ Result = -2;
+ goto Done;
+ }
+ /*
+ The Modify I/O Address command does not cause a Command Complete Interrupt.
+ */
+ if (OperationCode == BusLogic_ModifyIOAddress)
+ {
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister.Bits.CommandInvalid)
+ {
+ BusLogic_CommandFailureReason = "Modify I/O Address Invalid";
+ Result = -1;
+ goto Done;
+ }
+ if (BusLogic_GlobalOptions.TraceConfiguration)
+ BusLogic_Notice("BusLogic_Command(%02X) Status = %02X: "
+ "(Modify I/O Address)\n", HostAdapter,
+ OperationCode, StatusRegister.All);
+ Result = 0;
+ goto Done;
+ }
+ /*
+ Select an appropriate timeout value for awaiting command completion.
+ */
+ switch (OperationCode)
+ {
+ case BusLogic_InquireInstalledDevicesID0to7:
+ case BusLogic_InquireInstalledDevicesID8to15:
+ case BusLogic_InquireTargetDevices:
+ /* Approximately 60 seconds. */
+ TimeoutCounter = 60*10000;
+ break;
+ default:
+ /* Approximately 1 second. */
+ TimeoutCounter = 10000;
+ break;
+ }
+ /*
+ Receive any Reply Bytes, waiting for either the Command Complete bit to
+ be set in the Interrupt Register, or for the Interrupt Handler to set the
+ Host Adapter Command Completed bit in the Host Adapter structure.
+ */
+ while (--TimeoutCounter >= 0)
+ {
+ InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter);
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (InterruptRegister.Bits.CommandComplete) break;
+ if (HostAdapter->HostAdapterCommandCompleted) break;
+ if (StatusRegister.Bits.DataInRegisterReady)
+ {
+ if (++ReplyBytes <= ReplyLength)
+ *ReplyPointer++ = BusLogic_ReadDataInRegister(HostAdapter);
+ else BusLogic_ReadDataInRegister(HostAdapter);
+ }
+ if (OperationCode == BusLogic_FetchHostAdapterLocalRAM &&
+ StatusRegister.Bits.HostAdapterReady) break;
+ udelay(100);
+ }
+ if (TimeoutCounter < 0)
+ {
+ BusLogic_CommandFailureReason = "Timeout waiting for Command Complete";
+ Result = -2;
+ goto Done;
+ }
+ /*
+ Clear any pending Command Complete Interrupt.
+ */
+ BusLogic_InterruptReset(HostAdapter);
+ /*
+ Provide tracing information if requested.
+ */
+ if (BusLogic_GlobalOptions.TraceConfiguration)
+ {
+ int i;
+ BusLogic_Notice("BusLogic_Command(%02X) Status = %02X: %2d ==> %2d:",
+ HostAdapter, OperationCode,
+ StatusRegister.All, ReplyLength, ReplyBytes);
+ if (ReplyLength > ReplyBytes) ReplyLength = ReplyBytes;
+ for (i = 0; i < ReplyLength; i++)
+ BusLogic_Notice(" %02X", HostAdapter,
+ ((unsigned char *) ReplyData)[i]);
+ BusLogic_Notice("\n", HostAdapter);
+ }
+ /*
+ Process Command Invalid conditions.
+ */
+ if (StatusRegister.Bits.CommandInvalid)
+ {
+ /*
+ Some early BusLogic Host Adapters may not recover properly from
+ a Command Invalid condition, so if this appears to be the case,
+ a Soft Reset is issued to the Host Adapter. Potentially invalid
+ commands are never attempted after Mailbox Initialization is
+ performed, so there should be no Host Adapter state lost by a
+ Soft Reset in response to a Command Invalid condition.
+ */
+ udelay(1000);
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister.Bits.CommandInvalid ||
+ StatusRegister.Bits.Reserved ||
+ StatusRegister.Bits.DataInRegisterReady ||
+ StatusRegister.Bits.CommandParameterRegisterBusy ||
+ !StatusRegister.Bits.HostAdapterReady ||
+ !StatusRegister.Bits.InitializationRequired ||
+ StatusRegister.Bits.DiagnosticActive ||
+ StatusRegister.Bits.DiagnosticFailure)
+ {
+ BusLogic_SoftReset(HostAdapter);
+ udelay(1000);
+ }
+ BusLogic_CommandFailureReason = "Command Invalid";
+ Result = -1;
+ goto Done;
+ }
+ /*
+ Handle Excess Parameters Supplied conditions.
+ */
+ if (ParameterLength > 0)
+ {
+ BusLogic_CommandFailureReason = "Excess Parameters Supplied";
+ Result = -1;
+ goto Done;
+ }
+ /*
+ Indicate the command completed successfully.
+ */
+ BusLogic_CommandFailureReason = NULL;
+ Result = ReplyBytes;
+ /*
+ Restore the interrupt status if necessary and return.
+ */
+Done:
+ if (!HostAdapter->IRQ_ChannelAcquired)
+ restore_flags(ProcessorFlags);
+ return Result;
+}
+
+
+/*
+ BusLogic_AppendProbeAddressISA appends a single ISA I/O Address to the list
+ of I/O Address and Bus Probe Information to be checked for potential BusLogic
+ Host Adapters.
+*/
+
+static void BusLogic_AppendProbeAddressISA(BusLogic_IO_Address_T IO_Address)
+{
+ BusLogic_ProbeInfo_T *ProbeInfo;
+ if (BusLogic_ProbeInfoCount >= BusLogic_MaxHostAdapters) return;
+ ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++];
+ ProbeInfo->HostAdapterType = BusLogic_MultiMaster;
+ ProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus;
+ ProbeInfo->IO_Address = IO_Address;
+}
+
+
+/*
+ BusLogic_InitializeProbeInfoListISA initializes the list of I/O Address and
+ Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters
+ only from the list of standard BusLogic MultiMaster ISA I/O Addresses.
+*/
+
+static void BusLogic_InitializeProbeInfoListISA(BusLogic_HostAdapter_T
+ *PrototypeHostAdapter)
+{
+ /*
+ If BusLogic Driver Options specifications requested that ISA Bus Probes
+ be inhibited, do not proceed further.
+ */
+ if (BusLogic_ProbeOptions.NoProbeISA) return;
+ /*
+ Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
+ */
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe330
+ : check_region(0x330, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x330);
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe334
+ : check_region(0x334, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x334);
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe230
+ : check_region(0x230, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x230);
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe234
+ : check_region(0x234, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x234);
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe130
+ : check_region(0x130, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x130);
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe134
+ : check_region(0x134, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x134);
+}
+
+
+#ifdef CONFIG_PCI
+
+
+/*
+ BusLogic_SortProbeInfo sorts a section of BusLogic_ProbeInfoList in order
+ of increasing PCI Bus and Device Number.
+*/
+
+static void BusLogic_SortProbeInfo(BusLogic_ProbeInfo_T *ProbeInfoList,
+ int ProbeInfoCount)
+{
+ int LastInterchange = ProbeInfoCount-1, Bound, j;
+ while (LastInterchange > 0)
+ {
+ Bound = LastInterchange;
+ LastInterchange = 0;
+ for (j = 0; j < Bound; j++)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo1 = &ProbeInfoList[j];
+ BusLogic_ProbeInfo_T *ProbeInfo2 = &ProbeInfoList[j+1];
+ if (ProbeInfo1->Bus > ProbeInfo2->Bus ||
+ (ProbeInfo1->Bus == ProbeInfo2->Bus &&
+ (ProbeInfo1->Device > ProbeInfo2->Device)))
+ {
+ BusLogic_ProbeInfo_T TempProbeInfo;
+ memcpy(&TempProbeInfo, ProbeInfo1, sizeof(BusLogic_ProbeInfo_T));
+ memcpy(ProbeInfo1, ProbeInfo2, sizeof(BusLogic_ProbeInfo_T));
+ memcpy(ProbeInfo2, &TempProbeInfo, sizeof(BusLogic_ProbeInfo_T));
+ LastInterchange = j;
+ }
+ }
+ }
+}
+
+
+/*
+ BusLogic_InitializeMultiMasterProbeInfo initializes the list of I/O Address
+ and Bus Probe Information to be checked for potential BusLogic MultiMaster
+ SCSI Host Adapters by interrogating the PCI Configuration Space on PCI
+ machines as well as from the list of standard BusLogic MultiMaster ISA
+ I/O Addresses. It returns the number of PCI MultiMaster Host Adapters found.
+*/
+
+static int BusLogic_InitializeMultiMasterProbeInfo(BusLogic_HostAdapter_T
+ *PrototypeHostAdapter)
+{
+ BusLogic_ProbeInfo_T *PrimaryProbeInfo =
+ &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount];
+ int NonPrimaryPCIMultiMasterIndex = BusLogic_ProbeInfoCount + 1;
+ int NonPrimaryPCIMultiMasterCount = 0, PCIMultiMasterCount = 0;
+ boolean ForceBusDeviceScanningOrder = false;
+ boolean ForceBusDeviceScanningOrderChecked = false;
+ boolean StandardAddressSeen[6];
+ unsigned char Bus, DeviceFunction;
+ unsigned int BaseAddress0, BaseAddress1;
+ unsigned char IRQ_Channel;
+ BusLogic_IO_Address_T IO_Address;
+ BusLogic_PCI_Address_T PCI_Address;
+ unsigned short Index = 0;
+ int i;
+ if (BusLogic_ProbeInfoCount >= BusLogic_MaxHostAdapters) return 0;
+ BusLogic_ProbeInfoCount++;
+ for (i = 0; i < 6; i++)
+ StandardAddressSeen[i] = false;
+ /*
+ Iterate over the MultiMaster PCI Host Adapters. For each enumerated host
+ adapter, determine whether its ISA Compatible I/O Port is enabled and if
+ so, whether it is assigned the Primary I/O Address. A host adapter that is
+ assigned the Primary I/O Address will always be the preferred boot device.
+ The MultiMaster BIOS will first recognize a host adapter at the Primary I/O
+ Address, then any other PCI host adapters, and finally any host adapters
+ located at the remaining standard ISA I/O Addresses. When a PCI host
+ adapter is found with its ISA Compatible I/O Port enabled, a command is
+ issued to disable the ISA Compatible I/O Port, and it is noted that the
+ particular standard ISA I/O Address need not be probed.
+ */
+ PrimaryProbeInfo->IO_Address = 0;
+ while (pcibios_find_device(PCI_VENDOR_ID_BUSLOGIC,
+ PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
+ Index++, &Bus, &DeviceFunction) == 0)
+ if (pcibios_read_config_dword(Bus, DeviceFunction,
+ PCI_BASE_ADDRESS_0, &BaseAddress0) == 0 &&
+ pcibios_read_config_dword(Bus, DeviceFunction,
+ PCI_BASE_ADDRESS_1, &BaseAddress1) == 0 &&
+ pcibios_read_config_byte(Bus, DeviceFunction,
+ PCI_INTERRUPT_LINE, &IRQ_Channel) == 0)
+ {
+ BusLogic_HostAdapter_T *HostAdapter = PrototypeHostAdapter;
+ BusLogic_PCIHostAdapterInformation_T PCIHostAdapterInformation;
+ BusLogic_ModifyIOAddressRequest_T ModifyIOAddressRequest;
+ unsigned char Device = DeviceFunction >> 3;
+ IO_Address = BaseAddress0 & PCI_BASE_ADDRESS_IO_MASK;
+ PCI_Address = BaseAddress1 & PCI_BASE_ADDRESS_MEM_MASK;
+ if ((BaseAddress0 & PCI_BASE_ADDRESS_SPACE)
+ != PCI_BASE_ADDRESS_SPACE_IO)
+ {
+ BusLogic_Error("BusLogic: Base Address0 0x%X not I/O for "
+ "MultiMaster Host Adapter\n", NULL, BaseAddress0);
+ BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n",
+ NULL, Bus, Device, IO_Address);
+ continue;
+ }
+ if ((BaseAddress1 & PCI_BASE_ADDRESS_SPACE)
+ != PCI_BASE_ADDRESS_SPACE_MEMORY)
+ {
+ BusLogic_Error("BusLogic: Base Address1 0x%X not Memory for "
+ "MultiMaster Host Adapter\n", NULL, BaseAddress1);
+ BusLogic_Error("at PCI Bus %d Device %d PCI Address 0x%X\n",
+ NULL, Bus, Device, PCI_Address);
+ continue;
+ }
+ if (IRQ_Channel == 0 || IRQ_Channel >= NR_IRQS)
+ {
+ BusLogic_Error("BusLogic: IRQ Channel %d illegal for "
+ "MultiMaster Host Adapter\n", NULL, IRQ_Channel);
+ BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n",
+ NULL, Bus, Device, IO_Address);
+ continue;
+ }
+ if (BusLogic_GlobalOptions.TraceProbe)
+ {
+ BusLogic_Notice("BusLogic: PCI MultiMaster Host Adapter "
+ "detected at\n", NULL);
+ BusLogic_Notice("BusLogic: PCI Bus %d Device %d I/O Address "
+ "0x%X PCI Address 0x%X\n", NULL,
+ Bus, Device, IO_Address, PCI_Address);
+ }
+ /*
+ Issue the Inquire PCI Host Adapter Information command to determine
+ the ISA Compatible I/O Port. If the ISA Compatible I/O Port is
+ known and enabled, note that the particular Standard ISA I/O
+ Address should not be probed.
+ */
+ HostAdapter->IO_Address = IO_Address;
+ BusLogic_InterruptReset(HostAdapter);
+ if (BusLogic_Command(HostAdapter,
+ BusLogic_InquirePCIHostAdapterInformation,
+ NULL, 0, &PCIHostAdapterInformation,
+ sizeof(PCIHostAdapterInformation))
+ == sizeof(PCIHostAdapterInformation))
+ {
+ if (PCIHostAdapterInformation.ISACompatibleIOPort < 6)
+ StandardAddressSeen[PCIHostAdapterInformation
+ .ISACompatibleIOPort] = true;
+ }
+ else PCIHostAdapterInformation.ISACompatibleIOPort =
+ BusLogic_IO_Disable;
+ /*
+ Issue the Modify I/O Address command to disable the ISA Compatible
+ I/O Port.
+ */
+ ModifyIOAddressRequest = BusLogic_IO_Disable;
+ BusLogic_Command(HostAdapter, BusLogic_ModifyIOAddress,
+ &ModifyIOAddressRequest,
+ sizeof(ModifyIOAddressRequest), NULL, 0);
+ /*
+ For the first MultiMaster Host Adapter enumerated, issue the Fetch
+ Host Adapter Local RAM command to read byte 45 of the AutoSCSI area,
+ for the setting of the "Use Bus And Device # For PCI Scanning Seq."
+ option. Issue the Inquire Board ID command since this option is
+ only valid for the BT-948/958/958D.
+ */
+ if (!ForceBusDeviceScanningOrderChecked)
+ {
+ BusLogic_FetchHostAdapterLocalRAMRequest_T
+ FetchHostAdapterLocalRAMRequest;
+ BusLogic_AutoSCSIByte45_T AutoSCSIByte45;
+ BusLogic_BoardID_T BoardID;
+ FetchHostAdapterLocalRAMRequest.ByteOffset =
+ BusLogic_AutoSCSI_BaseOffset + 45;
+ FetchHostAdapterLocalRAMRequest.ByteCount =
+ sizeof(AutoSCSIByte45);
+ BusLogic_Command(HostAdapter,
+ BusLogic_FetchHostAdapterLocalRAM,
+ &FetchHostAdapterLocalRAMRequest,
+ sizeof(FetchHostAdapterLocalRAMRequest),
+ &AutoSCSIByte45, sizeof(AutoSCSIByte45));
+ BusLogic_Command(HostAdapter, BusLogic_InquireBoardID,
+ NULL, 0, &BoardID, sizeof(BoardID));
+ if (BoardID.FirmwareVersion1stDigit == '5')
+ ForceBusDeviceScanningOrder =
+ AutoSCSIByte45.ForceBusDeviceScanningOrder;
+ ForceBusDeviceScanningOrderChecked = true;
+ }
+ /*
+ Determine whether this MultiMaster Host Adapter has its ISA
+ Compatible I/O Port enabled and is assigned the Primary I/O Address.
+ If it does, then it is the Primary MultiMaster Host Adapter and must
+ be recognized first. If it does not, then it is added to the list
+ for probing after any Primary MultiMaster Host Adapter is probed.
+ */
+ if (PCIHostAdapterInformation.ISACompatibleIOPort == BusLogic_IO_330)
+ {
+ PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster;
+ PrimaryProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus;
+ PrimaryProbeInfo->IO_Address = IO_Address;
+ PrimaryProbeInfo->PCI_Address = PCI_Address;
+ PrimaryProbeInfo->Bus = Bus;
+ PrimaryProbeInfo->Device = Device;
+ PrimaryProbeInfo->IRQ_Channel = IRQ_Channel;
+ PCIMultiMasterCount++;
+ }
+ else if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo =
+ &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++];
+ ProbeInfo->HostAdapterType = BusLogic_MultiMaster;
+ ProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus;
+ ProbeInfo->IO_Address = IO_Address;
+ ProbeInfo->PCI_Address = PCI_Address;
+ ProbeInfo->Bus = Bus;
+ ProbeInfo->Device = Device;
+ ProbeInfo->IRQ_Channel = IRQ_Channel;
+ NonPrimaryPCIMultiMasterCount++;
+ PCIMultiMasterCount++;
+ }
+ else BusLogic_Warning("BusLogic: Too many Host Adapters "
+ "detected\n", NULL);
+ }
+ /*
+ If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq." option is ON
+ for the first enumerated MultiMaster Host Adapter, and if that host adapter
+ is a BT-948/958/958D, then the MultiMaster BIOS will recognize MultiMaster
+ Host Adapters in the order of increasing PCI Bus and Device Number. In
+ that case, sort the probe information into the same order the BIOS uses.
+ If this option is OFF, then the MultiMaster BIOS will recognize MultiMaster
+ Host Adapters in the order they are enumerated by the PCI BIOS, and hence
+ no sorting is necessary.
+ */
+ if (ForceBusDeviceScanningOrder)
+ BusLogic_SortProbeInfo(&BusLogic_ProbeInfoList[
+ NonPrimaryPCIMultiMasterIndex],
+ NonPrimaryPCIMultiMasterCount);
+ /*
+ If no PCI MultiMaster Host Adapter is assigned the Primary I/O Address,
+ then the Primary I/O Address must be probed explicitly before any PCI
+ host adapters are probed.
+ */
+ if (!BusLogic_ProbeOptions.NoProbeISA)
+ if (PrimaryProbeInfo->IO_Address == 0 &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe330
+ : check_region(0x330, BusLogic_MultiMasterAddressCount) == 0))
+ {
+ PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster;
+ PrimaryProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus;
+ PrimaryProbeInfo->IO_Address = 0x330;
+ }
+ /*
+ Append the list of standard BusLogic MultiMaster ISA I/O Addresses,
+ omitting the Primary I/O Address which has already been handled.
+ */
+ if (!BusLogic_ProbeOptions.NoProbeISA)
+ {
+ if (!StandardAddressSeen[1] &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe334
+ : check_region(0x334, BusLogic_MultiMasterAddressCount) == 0))
+ BusLogic_AppendProbeAddressISA(0x334);
+ if (!StandardAddressSeen[2] &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe230
+ : check_region(0x230, BusLogic_MultiMasterAddressCount) == 0))
+ BusLogic_AppendProbeAddressISA(0x230);
+ if (!StandardAddressSeen[3] &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe234
+ : check_region(0x234, BusLogic_MultiMasterAddressCount) == 0))
+ BusLogic_AppendProbeAddressISA(0x234);
+ if (!StandardAddressSeen[4] &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe130
+ : check_region(0x130, BusLogic_MultiMasterAddressCount) == 0))
+ BusLogic_AppendProbeAddressISA(0x130);
+ if (!StandardAddressSeen[5] &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe134
+ : check_region(0x134, BusLogic_MultiMasterAddressCount) == 0))
+ BusLogic_AppendProbeAddressISA(0x134);
+ }
+ /*
+ Iterate over the older non-compliant MultiMaster PCI Host Adapters,
+ noting the PCI bus location and assigned IRQ Channel.
+ */
+ Index = 0;
+ while (pcibios_find_device(PCI_VENDOR_ID_BUSLOGIC,
+ PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC,
+ Index++, &Bus, &DeviceFunction) == 0)
+ if (pcibios_read_config_dword(Bus, DeviceFunction,
+ PCI_BASE_ADDRESS_0, &BaseAddress0) == 0 &&
+ pcibios_read_config_byte(Bus, DeviceFunction,
+ PCI_INTERRUPT_LINE, &IRQ_Channel) == 0)
+ {
+ unsigned char Device = DeviceFunction >> 3;
+ IO_Address = BaseAddress0 & PCI_BASE_ADDRESS_IO_MASK;
+ if (IO_Address == 0 || IRQ_Channel == 0 || IRQ_Channel >= NR_IRQS)
+ continue;
+ for (i = 0; i < BusLogic_ProbeInfoCount; i++)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo = &BusLogic_ProbeInfoList[i];
+ if (ProbeInfo->IO_Address == IO_Address &&
+ ProbeInfo->HostAdapterType == BusLogic_MultiMaster)
+ {
+ ProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus;
+ ProbeInfo->PCI_Address = 0;
+ ProbeInfo->Bus = Bus;
+ ProbeInfo->Device = Device;
+ ProbeInfo->IRQ_Channel = IRQ_Channel;
+ break;
+ }
+ }
+ }
+ return PCIMultiMasterCount;
+}
+
+
+/*
+ BusLogic_InitializeFlashPointProbeInfo initializes the list of I/O Address
+ and Bus Probe Information to be checked for potential BusLogic FlashPoint
+ Host Adapters by interrogating the PCI Configuration Space. It returns the
+ number of FlashPoint Host Adapters found.
+*/
+
+static int BusLogic_InitializeFlashPointProbeInfo(BusLogic_HostAdapter_T
+ *PrototypeHostAdapter)
+{
+ int FlashPointIndex = BusLogic_ProbeInfoCount, FlashPointCount = 0;
+ unsigned char Bus, DeviceFunction;
+ unsigned int BaseAddress0, BaseAddress1;
+ unsigned char IRQ_Channel;
+ BusLogic_IO_Address_T IO_Address;
+ BusLogic_PCI_Address_T PCI_Address;
+ unsigned short Index = 0;
+ /*
+ Interrogate PCI Configuration Space for any FlashPoint Host Adapters.
+ */
+ while (pcibios_find_device(PCI_VENDOR_ID_BUSLOGIC,
+ PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT,
+ Index++, &Bus, &DeviceFunction) == 0)
+ if (pcibios_read_config_dword(Bus, DeviceFunction,
+ PCI_BASE_ADDRESS_0, &BaseAddress0) == 0 &&
+ pcibios_read_config_dword(Bus, DeviceFunction,
+ PCI_BASE_ADDRESS_1, &BaseAddress1) == 0 &&
+ pcibios_read_config_byte(Bus, DeviceFunction,
+ PCI_INTERRUPT_LINE, &IRQ_Channel) == 0)
+ {
+ unsigned char Device = DeviceFunction >> 3;
+ IO_Address = BaseAddress0 & PCI_BASE_ADDRESS_IO_MASK;
+ PCI_Address = BaseAddress1 & PCI_BASE_ADDRESS_MEM_MASK;
+#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+ if ((BaseAddress0 & PCI_BASE_ADDRESS_SPACE)
+ != PCI_BASE_ADDRESS_SPACE_IO)
+ {
+ BusLogic_Error("BusLogic: Base Address0 0x%X not I/O for "
+ "FlashPoint Host Adapter\n", NULL, BaseAddress0);
+ BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n",
+ NULL, Bus, Device, IO_Address);
+ continue;
+ }
+ if ((BaseAddress1 & PCI_BASE_ADDRESS_SPACE)
+ != PCI_BASE_ADDRESS_SPACE_MEMORY)
+ {
+ BusLogic_Error("BusLogic: Base Address1 0x%X not Memory for "
+ "FlashPoint Host Adapter\n", NULL, BaseAddress1);
+ BusLogic_Error("at PCI Bus %d Device %d PCI Address 0x%X\n",
+ NULL, Bus, Device, PCI_Address);
+ continue;
+ }
+ if (IRQ_Channel == 0 || IRQ_Channel >= NR_IRQS)
+ {
+ BusLogic_Error("BusLogic: IRQ Channel %d illegal for "
+ "FlashPoint Host Adapter\n", NULL, IRQ_Channel);
+ BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n",
+ NULL, Bus, Device, IO_Address);
+ continue;
+ }
+ if (BusLogic_GlobalOptions.TraceProbe)
+ {
+ BusLogic_Notice("BusLogic: FlashPoint Host Adapter "
+ "detected at\n", NULL);
+ BusLogic_Notice("BusLogic: PCI Bus %d Device %d I/O Address "
+ "0x%X PCI Address 0x%X\n", NULL,
+ Bus, Device, IO_Address, PCI_Address);
+ }
+ if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo =
+ &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++];
+ ProbeInfo->HostAdapterType = BusLogic_FlashPoint;
+ ProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus;
+ ProbeInfo->IO_Address = IO_Address;
+ ProbeInfo->PCI_Address = PCI_Address;
+ ProbeInfo->Bus = Bus;
+ ProbeInfo->Device = Device;
+ ProbeInfo->IRQ_Channel = IRQ_Channel;
+ FlashPointCount++;
+ }
+ else BusLogic_Warning("BusLogic: Too many Host Adapters "
+ "detected\n", NULL);
+#else
+ BusLogic_Error("BusLogic: FlashPoint Host Adapter detected at "
+ "PCI Bus %d Device %d\n", NULL, Bus, Device);
+ BusLogic_Error("BusLogic: I/O Address 0x%X PCI Address 0x%X, "
+ "but FlashPoint\n", NULL, IO_Address, PCI_Address);
+ BusLogic_Error("BusLogic: support was omitted in this kernel "
+ "configuration.\n", NULL);
+#endif
+ }
+ /*
+ The FlashPoint BIOS will scan for FlashPoint Host Adapters in the order of
+ increasing PCI Bus and Device Number, so sort the probe information into
+ the same order the BIOS uses.
+ */
+ BusLogic_SortProbeInfo(&BusLogic_ProbeInfoList[FlashPointIndex],
+ FlashPointCount);
+ return FlashPointCount;
+}
+
+
+/*
+ BusLogic_InitializeProbeInfoList initializes the list of I/O Address and Bus
+ Probe Information to be checked for potential BusLogic SCSI Host Adapters by
+ interrogating the PCI Configuration Space on PCI machines as well as from the
+ list of standard BusLogic MultiMaster ISA I/O Addresses. By default, if both
+ FlashPoint and PCI MultiMaster Host Adapters are present, this driver will
+ probe for FlashPoint Host Adapters first unless the BIOS primary disk is
+ controlled by the first PCI MultiMaster Host Adapter, in which case
+ MultiMaster Host Adapters will be probed first. The BusLogic Driver Options
+ specifications "MultiMasterFirst" and "FlashPointFirst" can be used to force
+ a particular probe order.
+*/
+
+static void BusLogic_InitializeProbeInfoList(BusLogic_HostAdapter_T
+ *PrototypeHostAdapter)
+{
+ /*
+ If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
+ Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
+ */
+ if (!BusLogic_ProbeOptions.NoProbePCI && pcibios_present())
+ {
+ if (BusLogic_ProbeOptions.MultiMasterFirst)
+ {
+ BusLogic_InitializeMultiMasterProbeInfo(PrototypeHostAdapter);
+ BusLogic_InitializeFlashPointProbeInfo(PrototypeHostAdapter);
+ }
+ else if (BusLogic_ProbeOptions.FlashPointFirst)
+ {
+ BusLogic_InitializeFlashPointProbeInfo(PrototypeHostAdapter);
+ BusLogic_InitializeMultiMasterProbeInfo(PrototypeHostAdapter);
+ }
+ else
+ {
+ int FlashPointCount =
+ BusLogic_InitializeFlashPointProbeInfo(PrototypeHostAdapter);
+ int PCIMultiMasterCount =
+ BusLogic_InitializeMultiMasterProbeInfo(PrototypeHostAdapter);
+ if (FlashPointCount > 0 && PCIMultiMasterCount > 0)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo =
+ &BusLogic_ProbeInfoList[FlashPointCount];
+ BusLogic_HostAdapter_T *HostAdapter = PrototypeHostAdapter;
+ BusLogic_FetchHostAdapterLocalRAMRequest_T
+ FetchHostAdapterLocalRAMRequest;
+ BusLogic_BIOSDriveMapByte_T Drive0MapByte;
+ while (ProbeInfo->HostAdapterBusType != BusLogic_PCI_Bus)
+ ProbeInfo++;
+ HostAdapter->IO_Address = ProbeInfo->IO_Address;
+ FetchHostAdapterLocalRAMRequest.ByteOffset =
+ BusLogic_BIOS_BaseOffset + BusLogic_BIOS_DriveMapOffset + 0;
+ FetchHostAdapterLocalRAMRequest.ByteCount =
+ sizeof(Drive0MapByte);
+ BusLogic_Command(HostAdapter,
+ BusLogic_FetchHostAdapterLocalRAM,
+ &FetchHostAdapterLocalRAMRequest,
+ sizeof(FetchHostAdapterLocalRAMRequest),
+ &Drive0MapByte, sizeof(Drive0MapByte));
+ /*
+ If the Map Byte for BIOS Drive 0 indicates that BIOS Drive 0
+ is controlled by this PCI MultiMaster Host Adapter, then
+ reverse the probe order so that MultiMaster Host Adapters are
+ probed before FlashPoint Host Adapters.
+ */
+ if (Drive0MapByte.DiskGeometry !=
+ BusLogic_BIOS_Disk_Not_Installed)
+ {
+ BusLogic_ProbeInfo_T
+ SavedProbeInfo[BusLogic_MaxHostAdapters];
+ int MultiMasterCount =
+ BusLogic_ProbeInfoCount - FlashPointCount;
+ memcpy(SavedProbeInfo,
+ BusLogic_ProbeInfoList,
+ BusLogic_ProbeInfoCount
+ * sizeof(BusLogic_ProbeInfo_T));
+ memcpy(&BusLogic_ProbeInfoList[0],
+ &SavedProbeInfo[FlashPointCount],
+ MultiMasterCount * sizeof(BusLogic_ProbeInfo_T));
+ memcpy(&BusLogic_ProbeInfoList[MultiMasterCount],
+ &SavedProbeInfo[0],
+ FlashPointCount * sizeof(BusLogic_ProbeInfo_T));
+ }
+ }
+ }
+ }
+ else BusLogic_InitializeProbeInfoListISA(PrototypeHostAdapter);
+}
+
+
+#endif /* CONFIG_PCI */
+
+
+/*
+ BusLogic_Failure prints a standardized error message, and then returns false.
+*/
+
+static boolean BusLogic_Failure(BusLogic_HostAdapter_T *HostAdapter,
+ char *ErrorMessage)
+{
+ BusLogic_AnnounceDriver(HostAdapter);
+ if (HostAdapter->HostAdapterBusType == BusLogic_PCI_Bus)
+ {
+ BusLogic_Error("While configuring BusLogic PCI Host Adapter at\n",
+ HostAdapter);
+ BusLogic_Error("Bus %d Device %d I/O Address 0x%X PCI Address 0x%X:\n",
+ HostAdapter, HostAdapter->Bus, HostAdapter->Device,
+ HostAdapter->IO_Address, HostAdapter->PCI_Address);
+ }
+ else BusLogic_Error("While configuring BusLogic Host Adapter at "
+ "I/O Address 0x%X:\n", HostAdapter,
+ HostAdapter->IO_Address);
+ BusLogic_Error("%s FAILED - DETACHING\n", HostAdapter, ErrorMessage);
+ if (BusLogic_CommandFailureReason != NULL)
+ BusLogic_Error("ADDITIONAL FAILURE INFO - %s\n", HostAdapter,
+ BusLogic_CommandFailureReason);
+ return false;
+}
+
+
+/*
+ BusLogic_ProbeHostAdapter probes for a BusLogic Host Adapter.
+*/
+
+static boolean BusLogic_ProbeHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_StatusRegister_T StatusRegister;
+ BusLogic_InterruptRegister_T InterruptRegister;
+ BusLogic_GeometryRegister_T GeometryRegister;
+ /*
+ FlashPoint Host Adapters are Probed by the FlashPoint SCCB Manager.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ FlashPoint_Info_T *FlashPointInfo = &HostAdapter->FlashPointInfo;
+ FlashPointInfo->BaseAddress =
+ (BusLogic_Base_Address_T) HostAdapter->IO_Address;
+ FlashPointInfo->IRQ_Channel = HostAdapter->IRQ_Channel;
+ FlashPointInfo->Present = false;
+ if (!(FlashPoint_ProbeHostAdapter(FlashPointInfo) == 0 &&
+ FlashPointInfo->Present))
+ {
+ BusLogic_Error("BusLogic: FlashPoint Host Adapter detected at "
+ "PCI Bus %d Device %d\n", HostAdapter,
+ HostAdapter->Bus, HostAdapter->Device);
+ BusLogic_Error("BusLogic: I/O Address 0x%X PCI Address 0x%X, "
+ "but FlashPoint\n", HostAdapter,
+ HostAdapter->IO_Address, HostAdapter->PCI_Address);
+ BusLogic_Error("BusLogic: Probe Function failed to validate it.\n",
+ HostAdapter);
+ return false;
+ }
+ if (BusLogic_GlobalOptions.TraceProbe)
+ BusLogic_Notice("BusLogic_Probe(0x%X): FlashPoint Found\n",
+ HostAdapter, HostAdapter->IO_Address);
+ /*
+ Indicate the Host Adapter Probe completed successfully.
+ */
+ return true;
+ }
+ /*
+ Read the Status, Interrupt, and Geometry Registers to test if there are I/O
+ ports that respond, and to check the values to determine if they are from a
+ BusLogic Host Adapter. A nonexistent I/O port will return 0xFF, in which
+ case there is definitely no BusLogic Host Adapter at this base I/O Address.
+ The test here is a subset of that used by the BusLogic Host Adapter BIOS.
+ */
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter);
+ GeometryRegister.All = BusLogic_ReadGeometryRegister(HostAdapter);
+ if (BusLogic_GlobalOptions.TraceProbe)
+ BusLogic_Notice("BusLogic_Probe(0x%X): Status 0x%02X, Interrupt 0x%02X, "
+ "Geometry 0x%02X\n", HostAdapter,
+ HostAdapter->IO_Address, StatusRegister.All,
+ InterruptRegister.All, GeometryRegister.All);
+ if (StatusRegister.All == 0 ||
+ StatusRegister.Bits.DiagnosticActive ||
+ StatusRegister.Bits.CommandParameterRegisterBusy ||
+ StatusRegister.Bits.Reserved ||
+ StatusRegister.Bits.CommandInvalid ||
+ InterruptRegister.Bits.Reserved != 0)
+ return false;
+ /*
+ Check the undocumented Geometry Register to test if there is an I/O port
+ that responded. Adaptec Host Adapters do not implement the Geometry
+ Register, so this test helps serve to avoid incorrectly recognizing an
+ Adaptec 1542A or 1542B as a BusLogic. Unfortunately, the Adaptec 1542C
+ series does respond to the Geometry Register I/O port, but it will be
+ rejected later when the Inquire Extended Setup Information command is
+ issued in BusLogic_CheckHostAdapter. The AMI FastDisk Host Adapter is a
+ BusLogic clone that implements the same interface as earlier BusLogic
+ Host Adapters, including the undocumented commands, and is therefore
+ supported by this driver. However, the AMI FastDisk always returns 0x00
+ upon reading the Geometry Register, so the extended translation option
+ should always be left disabled on the AMI FastDisk.
+ */
+ if (GeometryRegister.All == 0xFF) return false;
+ /*
+ Indicate the Host Adapter Probe completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_HardwareResetHostAdapter issues a Hardware Reset to the Host Adapter
+ and waits for Host Adapter Diagnostics to complete. If HardReset is true, a
+ Hard Reset is performed which also initiates a SCSI Bus Reset. Otherwise, a
+ Soft Reset is performed which only resets the Host Adapter without forcing a
+ SCSI Bus Reset.
+*/
+
+static boolean BusLogic_HardwareResetHostAdapter(BusLogic_HostAdapter_T
+ *HostAdapter,
+ boolean HardReset)
+{
+ BusLogic_StatusRegister_T StatusRegister;
+ int TimeoutCounter;
+ /*
+ FlashPoint Host Adapters are Hard Reset by the FlashPoint SCCB Manager.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ FlashPoint_Info_T *FlashPointInfo = &HostAdapter->FlashPointInfo;
+ FlashPointInfo->HostSoftReset = !HardReset;
+ FlashPointInfo->ReportDataUnderrun = true;
+ HostAdapter->CardHandle =
+ FlashPoint_HardwareResetHostAdapter(FlashPointInfo);
+ if (HostAdapter->CardHandle == FlashPoint_BadCardHandle) return false;
+ /*
+ Indicate the Host Adapter Hard Reset completed successfully.
+ */
+ return true;
+ }
+ /*
+ Issue a Hard Reset or Soft Reset Command to the Host Adapter. The Host
+ Adapter should respond by setting Diagnostic Active in the Status Register.
+ */
+ if (HardReset)
+ BusLogic_HardReset(HostAdapter);
+ else BusLogic_SoftReset(HostAdapter);
+ /*
+ Wait until Diagnostic Active is set in the Status Register.
+ */
+ TimeoutCounter = 5*10000;
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister.Bits.DiagnosticActive) break;
+ udelay(100);
+ }
+ if (BusLogic_GlobalOptions.TraceHardwareReset)
+ BusLogic_Notice("BusLogic_HardwareReset(0x%X): Diagnostic Active, "
+ "Status 0x%02X\n", HostAdapter,
+ HostAdapter->IO_Address, StatusRegister.All);
+ if (TimeoutCounter < 0) return false;
+ /*
+ Wait 100 microseconds to allow completion of any initial diagnostic
+ activity which might leave the contents of the Status Register
+ unpredictable.
+ */
+ udelay(100);
+ /*
+ Wait until Diagnostic Active is reset in the Status Register.
+ */
+ TimeoutCounter = 10*10000;
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (!StatusRegister.Bits.DiagnosticActive) break;
+ udelay(100);
+ }
+ if (BusLogic_GlobalOptions.TraceHardwareReset)
+ BusLogic_Notice("BusLogic_HardwareReset(0x%X): Diagnostic Completed, "
+ "Status 0x%02X\n", HostAdapter,
+ HostAdapter->IO_Address, StatusRegister.All);
+ if (TimeoutCounter < 0) return false;
+ /*
+ Wait until at least one of the Diagnostic Failure, Host Adapter Ready,
+ or Data In Register Ready bits is set in the Status Register.
+ */
+ TimeoutCounter = 10000;
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister.Bits.DiagnosticFailure ||
+ StatusRegister.Bits.HostAdapterReady ||
+ StatusRegister.Bits.DataInRegisterReady)
+ break;
+ udelay(100);
+ }
+ if (BusLogic_GlobalOptions.TraceHardwareReset)
+ BusLogic_Notice("BusLogic_HardwareReset(0x%X): Host Adapter Ready, "
+ "Status 0x%02X\n", HostAdapter,
+ HostAdapter->IO_Address, StatusRegister.All);
+ if (TimeoutCounter < 0) return false;
+ /*
+ If Diagnostic Failure is set or Host Adapter Ready is reset, then an
+ error occurred during the Host Adapter diagnostics. If Data In Register
+ Ready is set, then there is an Error Code available.
+ */
+ if (StatusRegister.Bits.DiagnosticFailure ||
+ !StatusRegister.Bits.HostAdapterReady)
+ {
+ BusLogic_CommandFailureReason = NULL;
+ BusLogic_Failure(HostAdapter, "HARD RESET DIAGNOSTICS");
+ BusLogic_Error("HOST ADAPTER STATUS REGISTER = %02X\n",
+ HostAdapter, StatusRegister.All);
+ if (StatusRegister.Bits.DataInRegisterReady)
+ {
+ unsigned char ErrorCode = BusLogic_ReadDataInRegister(HostAdapter);
+ BusLogic_Error("HOST ADAPTER ERROR CODE = %d\n",
+ HostAdapter, ErrorCode);
+ }
+ return false;
+ }
+ /*
+ Indicate the Host Adapter Hard Reset completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_CheckHostAdapter checks to be sure this really is a BusLogic
+ Host Adapter.
+*/
+
+static boolean BusLogic_CheckHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ExtendedSetupInformation_T ExtendedSetupInformation;
+ BusLogic_RequestedReplyLength_T RequestedReplyLength;
+ boolean Result = true;
+ /*
+ FlashPoint Host Adapters do not require this protection.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter)) return true;
+ /*
+ Issue the Inquire Extended Setup Information command. Only genuine
+ BusLogic Host Adapters and true clones support this command. Adaptec 1542C
+ series Host Adapters that respond to the Geometry Register I/O port will
+ fail this command.
+ */
+ RequestedReplyLength = sizeof(ExtendedSetupInformation);
+ if (BusLogic_Command(HostAdapter,
+ BusLogic_InquireExtendedSetupInformation,
+ &RequestedReplyLength,
+ sizeof(RequestedReplyLength),
+ &ExtendedSetupInformation,
+ sizeof(ExtendedSetupInformation))
+ != sizeof(ExtendedSetupInformation))
+ Result = false;
+ /*
+ Provide tracing information if requested and return.
+ */
+ if (BusLogic_GlobalOptions.TraceProbe)
+ BusLogic_Notice("BusLogic_Check(0x%X): MultiMaster %s\n", HostAdapter,
+ HostAdapter->IO_Address, (Result ? "Found" : "Not Found"));
+ return Result;
+}
+
+
+/*
+ BusLogic_ReadHostAdapterConfiguration reads the Configuration Information
+ from Host Adapter and initializes the Host Adapter structure.
+*/
+
+static boolean BusLogic_ReadHostAdapterConfiguration(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ BusLogic_BoardID_T BoardID;
+ BusLogic_Configuration_T Configuration;
+ BusLogic_SetupInformation_T SetupInformation;
+ BusLogic_ExtendedSetupInformation_T ExtendedSetupInformation;
+ BusLogic_HostAdapterModelNumber_T HostAdapterModelNumber;
+ BusLogic_FirmwareVersion3rdDigit_T FirmwareVersion3rdDigit;
+ BusLogic_FirmwareVersionLetter_T FirmwareVersionLetter;
+ BusLogic_PCIHostAdapterInformation_T PCIHostAdapterInformation;
+ BusLogic_FetchHostAdapterLocalRAMRequest_T FetchHostAdapterLocalRAMRequest;
+ BusLogic_AutoSCSIData_T AutoSCSIData;
+ BusLogic_GeometryRegister_T GeometryRegister;
+ BusLogic_RequestedReplyLength_T RequestedReplyLength;
+ unsigned char *TargetPointer, Character;
+ int TargetID, i;
+ /*
+ Configuration Information for FlashPoint Host Adapters is provided in the
+ FlashPoint_Info structure by the FlashPoint SCCB Manager's Probe Function.
+ Initialize fields in the Host Adapter structure from the FlashPoint_Info
+ structure.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ FlashPoint_Info_T *FlashPointInfo = &HostAdapter->FlashPointInfo;
+ TargetPointer = HostAdapter->ModelName;
+ *TargetPointer++ = 'B';
+ *TargetPointer++ = 'T';
+ *TargetPointer++ = '-';
+ for (i = 0; i < sizeof(FlashPointInfo->ModelNumber); i++)
+ *TargetPointer++ = FlashPointInfo->ModelNumber[i];
+ *TargetPointer++ = '\0';
+ strcpy(HostAdapter->FirmwareVersion, FlashPoint_FirmwareVersion);
+ HostAdapter->SCSI_ID = FlashPointInfo->SCSI_ID;
+ HostAdapter->ExtendedTranslationEnabled =
+ FlashPointInfo->ExtendedTranslationEnabled;
+ HostAdapter->ParityCheckingEnabled =
+ FlashPointInfo->ParityCheckingEnabled;
+ HostAdapter->BusResetEnabled = !FlashPointInfo->HostSoftReset;
+ HostAdapter->LevelSensitiveInterrupt = true;
+ HostAdapter->HostWideSCSI = FlashPointInfo->HostWideSCSI;
+ HostAdapter->HostDifferentialSCSI = false;
+ HostAdapter->HostSupportsSCAM = true;
+ HostAdapter->HostUltraSCSI = true;
+ HostAdapter->ExtendedLUNSupport = true;
+ HostAdapter->TerminationInfoValid = true;
+ HostAdapter->LowByteTerminated = FlashPointInfo->LowByteTerminated;
+ HostAdapter->HighByteTerminated = FlashPointInfo->HighByteTerminated;
+ HostAdapter->SCAM_Enabled = FlashPointInfo->SCAM_Enabled;
+ HostAdapter->SCAM_Level2 = FlashPointInfo->SCAM_Level2;
+ HostAdapter->DriverScatterGatherLimit = BusLogic_ScatterGatherLimit;
+ HostAdapter->MaxTargetDevices = (HostAdapter->HostWideSCSI ? 16 : 8);
+ HostAdapter->MaxLogicalUnits = 32;
+ HostAdapter->InitialCCBs = 4 * BusLogic_CCB_AllocationGroupSize;
+ HostAdapter->IncrementalCCBs = BusLogic_CCB_AllocationGroupSize;
+ HostAdapter->DriverQueueDepth = 255;
+ HostAdapter->HostAdapterQueueDepth = HostAdapter->DriverQueueDepth;
+ HostAdapter->SynchronousPermitted = FlashPointInfo->SynchronousPermitted;
+ HostAdapter->FastPermitted = FlashPointInfo->FastPermitted;
+ HostAdapter->UltraPermitted = FlashPointInfo->UltraPermitted;
+ HostAdapter->WidePermitted = FlashPointInfo->WidePermitted;
+ HostAdapter->DisconnectPermitted = FlashPointInfo->DisconnectPermitted;
+ HostAdapter->TaggedQueuingPermitted = 0xFFFF;
+ goto Common;
+ }
+ /*
+ Issue the Inquire Board ID command.
+ */
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireBoardID, NULL, 0,
+ &BoardID, sizeof(BoardID)) != sizeof(BoardID))
+ return BusLogic_Failure(HostAdapter, "INQUIRE BOARD ID");
+ /*
+ Issue the Inquire Configuration command.
+ */
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireConfiguration, NULL, 0,
+ &Configuration, sizeof(Configuration))
+ != sizeof(Configuration))
+ return BusLogic_Failure(HostAdapter, "INQUIRE CONFIGURATION");
+ /*
+ Issue the Inquire Setup Information command.
+ */
+ RequestedReplyLength = sizeof(SetupInformation);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireSetupInformation,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &SetupInformation, sizeof(SetupInformation))
+ != sizeof(SetupInformation))
+ return BusLogic_Failure(HostAdapter, "INQUIRE SETUP INFORMATION");
+ /*
+ Issue the Inquire Extended Setup Information command.
+ */
+ RequestedReplyLength = sizeof(ExtendedSetupInformation);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireExtendedSetupInformation,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &ExtendedSetupInformation,
+ sizeof(ExtendedSetupInformation))
+ != sizeof(ExtendedSetupInformation))
+ return BusLogic_Failure(HostAdapter, "INQUIRE EXTENDED SETUP INFORMATION");
+ /*
+ Issue the Inquire Firmware Version 3rd Digit command.
+ */
+ FirmwareVersion3rdDigit = '\0';
+ if (BoardID.FirmwareVersion1stDigit > '0')
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireFirmwareVersion3rdDigit,
+ NULL, 0, &FirmwareVersion3rdDigit,
+ sizeof(FirmwareVersion3rdDigit))
+ != sizeof(FirmwareVersion3rdDigit))
+ return BusLogic_Failure(HostAdapter, "INQUIRE FIRMWARE 3RD DIGIT");
+ /*
+ Issue the Inquire Host Adapter Model Number command.
+ */
+ if (ExtendedSetupInformation.BusType == 'A' &&
+ BoardID.FirmwareVersion1stDigit == '2')
+ /* BusLogic BT-542B ISA 2.xx */
+ strcpy(HostAdapterModelNumber, "542B");
+ else if (ExtendedSetupInformation.BusType == 'E' &&
+ BoardID.FirmwareVersion1stDigit == '2' &&
+ (BoardID.FirmwareVersion2ndDigit <= '1' ||
+ (BoardID.FirmwareVersion2ndDigit == '2' &&
+ FirmwareVersion3rdDigit == '0')))
+ /* BusLogic BT-742A EISA 2.1x or 2.20 */
+ strcpy(HostAdapterModelNumber, "742A");
+ else if (ExtendedSetupInformation.BusType == 'E' &&
+ BoardID.FirmwareVersion1stDigit == '0')
+ /* AMI FastDisk EISA Series 441 0.x */
+ strcpy(HostAdapterModelNumber, "747A");
+ else
+ {
+ RequestedReplyLength = sizeof(HostAdapterModelNumber);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireHostAdapterModelNumber,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &HostAdapterModelNumber,
+ sizeof(HostAdapterModelNumber))
+ != sizeof(HostAdapterModelNumber))
+ return BusLogic_Failure(HostAdapter,
+ "INQUIRE HOST ADAPTER MODEL NUMBER");
+ }
+ /*
+ BusLogic MultiMaster Host Adapters can be identified by their model number
+ and the major version number of their firmware as follows:
+
+ 5.xx BusLogic "W" Series Host Adapters:
+ BT-948/958/958D
+ 4.xx BusLogic "C" Series Host Adapters:
+ BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF
+ 3.xx BusLogic "S" Series Host Adapters:
+ BT-747S/747D/757S/757D/445S/545S/542D
+ BT-542B/742A (revision H)
+ 2.xx BusLogic "A" Series Host Adapters:
+ BT-542B/742A (revision G and below)
+ 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter
+ */
+ /*
+ Save the Model Name and Host Adapter Name in the Host Adapter structure.
+ */
+ TargetPointer = HostAdapter->ModelName;
+ *TargetPointer++ = 'B';
+ *TargetPointer++ = 'T';
+ *TargetPointer++ = '-';
+ for (i = 0; i < sizeof(HostAdapterModelNumber); i++)
+ {
+ Character = HostAdapterModelNumber[i];
+ if (Character == ' ' || Character == '\0') break;
+ *TargetPointer++ = Character;
+ }
+ *TargetPointer++ = '\0';
+ /*
+ Save the Firmware Version in the Host Adapter structure.
+ */
+ TargetPointer = HostAdapter->FirmwareVersion;
+ *TargetPointer++ = BoardID.FirmwareVersion1stDigit;
+ *TargetPointer++ = '.';
+ *TargetPointer++ = BoardID.FirmwareVersion2ndDigit;
+ if (FirmwareVersion3rdDigit != ' ' && FirmwareVersion3rdDigit != '\0')
+ *TargetPointer++ = FirmwareVersion3rdDigit;
+ *TargetPointer = '\0';
+ /*
+ Issue the Inquire Firmware Version Letter command.
+ */
+ if (strcmp(HostAdapter->FirmwareVersion, "3.3") >= 0)
+ {
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireFirmwareVersionLetter,
+ NULL, 0, &FirmwareVersionLetter,
+ sizeof(FirmwareVersionLetter))
+ != sizeof(FirmwareVersionLetter))
+ return BusLogic_Failure(HostAdapter,
+ "INQUIRE FIRMWARE VERSION LETTER");
+ if (FirmwareVersionLetter != ' ' && FirmwareVersionLetter != '\0')
+ *TargetPointer++ = FirmwareVersionLetter;
+ *TargetPointer = '\0';
+ }
+ /*
+ Save the Host Adapter SCSI ID in the Host Adapter structure.
+ */
+ HostAdapter->SCSI_ID = Configuration.HostAdapterID;
+ /*
+ Determine the Bus Type and save it in the Host Adapter structure, determine
+ and save the IRQ Channel if necessary, and determine and save the DMA
+ Channel for ISA Host Adapters.
+ */
+ HostAdapter->HostAdapterBusType =
+ BusLogic_HostAdapterBusTypes[HostAdapter->ModelName[3] - '4'];
+ if (HostAdapter->IRQ_Channel == 0)
+ {
+ if (Configuration.IRQ_Channel9)
+ HostAdapter->IRQ_Channel = 9;
+ else if (Configuration.IRQ_Channel10)
+ HostAdapter->IRQ_Channel = 10;
+ else if (Configuration.IRQ_Channel11)
+ HostAdapter->IRQ_Channel = 11;
+ else if (Configuration.IRQ_Channel12)
+ HostAdapter->IRQ_Channel = 12;
+ else if (Configuration.IRQ_Channel14)
+ HostAdapter->IRQ_Channel = 14;
+ else if (Configuration.IRQ_Channel15)
+ HostAdapter->IRQ_Channel = 15;
+ }
+ if (HostAdapter->HostAdapterBusType == BusLogic_ISA_Bus)
+ {
+ if (Configuration.DMA_Channel5)
+ HostAdapter->DMA_Channel = 5;
+ else if (Configuration.DMA_Channel6)
+ HostAdapter->DMA_Channel = 6;
+ else if (Configuration.DMA_Channel7)
+ HostAdapter->DMA_Channel = 7;
+ }
+ /*
+ Determine whether Extended Translation is enabled and save it in
+ the Host Adapter structure.
+ */
+ GeometryRegister.All = BusLogic_ReadGeometryRegister(HostAdapter);
+ HostAdapter->ExtendedTranslationEnabled =
+ GeometryRegister.Bits.ExtendedTranslationEnabled;
+ /*
+ Save the Scatter Gather Limits, Level Sensitive Interrupt flag, Wide
+ SCSI flag, Differential SCSI flag, SCAM Supported flag, and
+ Ultra SCSI flag in the Host Adapter structure.
+ */
+ HostAdapter->HostAdapterScatterGatherLimit =
+ ExtendedSetupInformation.ScatterGatherLimit;
+ HostAdapter->DriverScatterGatherLimit =
+ HostAdapter->HostAdapterScatterGatherLimit;
+ if (HostAdapter->HostAdapterScatterGatherLimit > BusLogic_ScatterGatherLimit)
+ HostAdapter->DriverScatterGatherLimit = BusLogic_ScatterGatherLimit;
+ if (ExtendedSetupInformation.Misc.LevelSensitiveInterrupt)
+ HostAdapter->LevelSensitiveInterrupt = true;
+ HostAdapter->HostWideSCSI = ExtendedSetupInformation.HostWideSCSI;
+ HostAdapter->HostDifferentialSCSI =
+ ExtendedSetupInformation.HostDifferentialSCSI;
+ HostAdapter->HostSupportsSCAM = ExtendedSetupInformation.HostSupportsSCAM;
+ HostAdapter->HostUltraSCSI = ExtendedSetupInformation.HostUltraSCSI;
+ /*
+ Determine whether Extended LUN Format CCBs are supported and save the
+ information in the Host Adapter structure.
+ */
+ if (HostAdapter->FirmwareVersion[0] == '5' ||
+ (HostAdapter->FirmwareVersion[0] == '4' && HostAdapter->HostWideSCSI))
+ HostAdapter->ExtendedLUNSupport = true;
+ /*
+ Issue the Inquire PCI Host Adapter Information command to read the
+ Termination Information from "W" series MultiMaster Host Adapters.
+ */
+ if (HostAdapter->FirmwareVersion[0] == '5')
+ {
+ if (BusLogic_Command(HostAdapter,
+ BusLogic_InquirePCIHostAdapterInformation,
+ NULL, 0, &PCIHostAdapterInformation,
+ sizeof(PCIHostAdapterInformation))
+ != sizeof(PCIHostAdapterInformation))
+ return BusLogic_Failure(HostAdapter,
+ "INQUIRE PCI HOST ADAPTER INFORMATION");
+ /*
+ Save the Termination Information in the Host Adapter structure.
+ */
+ if (PCIHostAdapterInformation.GenericInfoValid)
+ {
+ HostAdapter->TerminationInfoValid = true;
+ HostAdapter->LowByteTerminated =
+ PCIHostAdapterInformation.LowByteTerminated;
+ HostAdapter->HighByteTerminated =
+ PCIHostAdapterInformation.HighByteTerminated;
+ }
+ }
+ /*
+ Issue the Fetch Host Adapter Local RAM command to read the AutoSCSI data
+ from "W" and "C" series MultiMaster Host Adapters.
+ */
+ if (HostAdapter->FirmwareVersion[0] >= '4')
+ {
+ FetchHostAdapterLocalRAMRequest.ByteOffset =
+ BusLogic_AutoSCSI_BaseOffset;
+ FetchHostAdapterLocalRAMRequest.ByteCount = sizeof(AutoSCSIData);
+ if (BusLogic_Command(HostAdapter,
+ BusLogic_FetchHostAdapterLocalRAM,
+ &FetchHostAdapterLocalRAMRequest,
+ sizeof(FetchHostAdapterLocalRAMRequest),
+ &AutoSCSIData, sizeof(AutoSCSIData))
+ != sizeof(AutoSCSIData))
+ return BusLogic_Failure(HostAdapter, "FETCH HOST ADAPTER LOCAL RAM");
+ /*
+ Save the Parity Checking Enabled, Bus Reset Enabled, and Termination
+ Information in the Host Adapter structure.
+ */
+ HostAdapter->ParityCheckingEnabled = AutoSCSIData.ParityCheckingEnabled;
+ HostAdapter->BusResetEnabled = AutoSCSIData.BusResetEnabled;
+ if (HostAdapter->FirmwareVersion[0] == '4')
+ {
+ HostAdapter->TerminationInfoValid = true;
+ HostAdapter->LowByteTerminated = AutoSCSIData.LowByteTerminated;
+ HostAdapter->HighByteTerminated = AutoSCSIData.HighByteTerminated;
+ }
+ /*
+ Save the Wide Permitted, Fast Permitted, Synchronous Permitted,
+ Disconnect Permitted, Ultra Permitted, and SCAM Information in the
+ Host Adapter structure.
+ */
+ HostAdapter->WidePermitted = AutoSCSIData.WidePermitted;
+ HostAdapter->FastPermitted = AutoSCSIData.FastPermitted;
+ HostAdapter->SynchronousPermitted =
+ AutoSCSIData.SynchronousPermitted;
+ HostAdapter->DisconnectPermitted =
+ AutoSCSIData.DisconnectPermitted;
+ if (HostAdapter->HostUltraSCSI)
+ HostAdapter->UltraPermitted = AutoSCSIData.UltraPermitted;
+ if (HostAdapter->HostSupportsSCAM)
+ {
+ HostAdapter->SCAM_Enabled = AutoSCSIData.SCAM_Enabled;
+ HostAdapter->SCAM_Level2 = AutoSCSIData.SCAM_Level2;
+ }
+ }
+ /*
+ Initialize fields in the Host Adapter structure for "S" and "A" series
+ MultiMaster Host Adapters.
+ */
+ if (HostAdapter->FirmwareVersion[0] < '4')
+ {
+ if (SetupInformation.SynchronousInitiationEnabled)
+ {
+ HostAdapter->SynchronousPermitted = 0xFF;
+ if (HostAdapter->HostAdapterBusType == BusLogic_EISA_Bus)
+ {
+ if (ExtendedSetupInformation.Misc.FastOnEISA)
+ HostAdapter->FastPermitted = 0xFF;
+ if (strcmp(HostAdapter->ModelName, "BT-757") == 0)
+ HostAdapter->WidePermitted = 0xFF;
+ }
+ }
+ HostAdapter->DisconnectPermitted = 0xFF;
+ HostAdapter->ParityCheckingEnabled =
+ SetupInformation.ParityCheckingEnabled;
+ HostAdapter->BusResetEnabled = true;
+ }
+ /*
+ Determine the maximum number of Target IDs and Logical Units supported by
+ this driver for Wide and Narrow Host Adapters.
+ */
+ HostAdapter->MaxTargetDevices = (HostAdapter->HostWideSCSI ? 16 : 8);
+ HostAdapter->MaxLogicalUnits = (HostAdapter->ExtendedLUNSupport ? 32 : 8);
+ /*
+ Select appropriate values for the Mailbox Count, Driver Queue Depth,
+ Initial CCBs, and Incremental CCBs variables based on whether or not Strict
+ Round Robin Mode is supported. If Strict Round Robin Mode is supported,
+ then there is no performance degradation in using the maximum possible
+ number of Outgoing and Incoming Mailboxes and allowing the Tagged and
+ Untagged Queue Depths to determine the actual utilization. If Strict Round
+ Robin Mode is not supported, then the Host Adapter must scan all the
+ Outgoing Mailboxes whenever an Outgoing Mailbox entry is made, which can
+ cause a substantial performance penalty. The host adapters actually have
+ room to store the following number of CCBs internally; that is, they can
+ internally queue and manage this many active commands on the SCSI bus
+ simultaneously. Performance measurements demonstrate that the Driver Queue
+ Depth should be set to the Mailbox Count, rather than the Host Adapter
+ Queue Depth (internal CCB capacity), as it is more efficient to have the
+ queued commands waiting in Outgoing Mailboxes if necessary than to block
+ the process in the higher levels of the SCSI Subsystem.
+
+ 192 BT-948/958/958D
+ 100 BT-946C/956C/956CD/747C/757C/757CD/445C
+ 50 BT-545C/540CF
+ 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A
+ */
+ if (HostAdapter->FirmwareVersion[0] == '5')
+ HostAdapter->HostAdapterQueueDepth = 192;
+ else if (HostAdapter->FirmwareVersion[0] == '4')
+ HostAdapter->HostAdapterQueueDepth =
+ (HostAdapter->HostAdapterBusType != BusLogic_ISA_Bus ? 100 : 50);
+ else HostAdapter->HostAdapterQueueDepth = 30;
+ if (strcmp(HostAdapter->FirmwareVersion, "3.31") >= 0)
+ {
+ HostAdapter->StrictRoundRobinModeSupport = true;
+ HostAdapter->MailboxCount = BusLogic_MaxMailboxes;
+ }
+ else
+ {
+ HostAdapter->StrictRoundRobinModeSupport = false;
+ HostAdapter->MailboxCount = 32;
+ }
+ HostAdapter->DriverQueueDepth = HostAdapter->MailboxCount;
+ HostAdapter->InitialCCBs = 4 * BusLogic_CCB_AllocationGroupSize;
+ HostAdapter->IncrementalCCBs = BusLogic_CCB_AllocationGroupSize;
+ /*
+ Tagged Queuing support is available and operates properly on all "W" series
+ MultiMaster Host Adapters, on "C" series MultiMaster Host Adapters with
+ firmware version 4.22 and above, and on "S" series MultiMaster Host
+ Adapters with firmware version 3.35 and above.
+ */
+ HostAdapter->TaggedQueuingPermitted = 0;
+ switch (HostAdapter->FirmwareVersion[0])
+ {
+ case '5':
+ HostAdapter->TaggedQueuingPermitted = 0xFFFF;
+ break;
+ case '4':
+ if (strcmp(HostAdapter->FirmwareVersion, "4.22") >= 0)
+ HostAdapter->TaggedQueuingPermitted = 0xFFFF;
+ break;
+ case '3':
+ if (strcmp(HostAdapter->FirmwareVersion, "3.35") >= 0)
+ HostAdapter->TaggedQueuingPermitted = 0xFFFF;
+ break;
+ }
+ /*
+ Determine the Host Adapter BIOS Address if the BIOS is enabled and
+ save it in the Host Adapter structure. The BIOS is disabled if the
+ BIOS_Address is 0.
+ */
+ HostAdapter->BIOS_Address = ExtendedSetupInformation.BIOS_Address << 12;
+ /*
+ ISA Host Adapters require Bounce Buffers if there is more than 16MB memory.
+ */
+ if (HostAdapter->HostAdapterBusType == BusLogic_ISA_Bus &&
+ (void *) high_memory > (void *) MAX_DMA_ADDRESS)
+ HostAdapter->BounceBuffersRequired = true;
+ /*
+ BusLogic BT-445S Host Adapters prior to board revision E have a hardware
+ bug whereby when the BIOS is enabled, transfers to/from the same address
+ range the BIOS occupies modulo 16MB are handled incorrectly. Only properly
+ functioning BT-445S Host Adapters have firmware version 3.37, so require
+ that ISA Bounce Buffers be used for the buggy BT-445S models if there is
+ more than 16MB memory.
+ */
+ if (HostAdapter->BIOS_Address > 0 &&
+ strcmp(HostAdapter->ModelName, "BT-445S") == 0 &&
+ strcmp(HostAdapter->FirmwareVersion, "3.37") < 0 &&
+ (void *) high_memory > (void *) MAX_DMA_ADDRESS)
+ HostAdapter->BounceBuffersRequired = true;
+ /*
+ Initialize parameters common to MultiMaster and FlashPoint Host Adapters.
+ */
+Common:
+ /*
+ Initialize the Host Adapter Full Model Name from the Model Name.
+ */
+ strcpy(HostAdapter->FullModelName, "BusLogic ");
+ strcat(HostAdapter->FullModelName, HostAdapter->ModelName);
+ /*
+ Select an appropriate value for the Tagged Queue Depth either from a
+ BusLogic Driver Options specification, or based on whether this Host
+ Adapter requires that ISA Bounce Buffers be used. The Tagged Queue Depth
+ is left at 0 for automatic determination in BusLogic_SelectQueueDepths.
+ Initialize the Untagged Queue Depth.
+ */
+ for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++)
+ {
+ unsigned char QueueDepth = 0;
+ if (HostAdapter->DriverOptions != NULL &&
+ HostAdapter->DriverOptions->QueueDepth[TargetID] > 0)
+ QueueDepth = HostAdapter->DriverOptions->QueueDepth[TargetID];
+ else if (HostAdapter->BounceBuffersRequired)
+ QueueDepth = BusLogic_TaggedQueueDepthBB;
+ HostAdapter->QueueDepth[TargetID] = QueueDepth;
+ }
+ if (HostAdapter->BounceBuffersRequired)
+ HostAdapter->UntaggedQueueDepth = BusLogic_UntaggedQueueDepthBB;
+ else HostAdapter->UntaggedQueueDepth = BusLogic_UntaggedQueueDepth;
+ if (HostAdapter->DriverOptions != NULL)
+ HostAdapter->CommonQueueDepth =
+ HostAdapter->DriverOptions->CommonQueueDepth;
+ if (HostAdapter->CommonQueueDepth > 0 &&
+ HostAdapter->CommonQueueDepth < HostAdapter->UntaggedQueueDepth)
+ HostAdapter->UntaggedQueueDepth = HostAdapter->CommonQueueDepth;
+ /*
+ Tagged Queuing is only allowed if Disconnect/Reconnect is permitted.
+ Therefore, mask the Tagged Queuing Permitted Default bits with the
+ Disconnect/Reconnect Permitted bits.
+ */
+ HostAdapter->TaggedQueuingPermitted &= HostAdapter->DisconnectPermitted;
+ /*
+ Combine the default Tagged Queuing Permitted bits with any BusLogic Driver
+ Options Tagged Queuing specification.
+ */
+ if (HostAdapter->DriverOptions != NULL)
+ HostAdapter->TaggedQueuingPermitted =
+ (HostAdapter->DriverOptions->TaggedQueuingPermitted &
+ HostAdapter->DriverOptions->TaggedQueuingPermittedMask) |
+ (HostAdapter->TaggedQueuingPermitted &
+ ~HostAdapter->DriverOptions->TaggedQueuingPermittedMask);
+ /*
+ Select appropriate values for the Error Recovery Strategy array
+ either from a BusLogic Driver Options specification, or using
+ BusLogic_ErrorRecovery_Default.
+ */
+ for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++)
+ if (HostAdapter->DriverOptions != NULL)
+ HostAdapter->ErrorRecoveryStrategy[TargetID] =
+ HostAdapter->DriverOptions->ErrorRecoveryStrategy[TargetID];
+ else HostAdapter->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_Default;
+ /*
+ Select an appropriate value for Bus Settle Time either from a BusLogic
+ Driver Options specification, or from BusLogic_DefaultBusSettleTime.
+ */
+ if (HostAdapter->DriverOptions != NULL &&
+ HostAdapter->DriverOptions->BusSettleTime > 0)
+ HostAdapter->BusSettleTime = HostAdapter->DriverOptions->BusSettleTime;
+ else HostAdapter->BusSettleTime = BusLogic_DefaultBusSettleTime;
+ /*
+ Indicate reading the Host Adapter Configuration completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_ReportHostAdapterConfiguration reports the configuration of
+ Host Adapter.
+*/
+
+static boolean BusLogic_ReportHostAdapterConfiguration(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ unsigned short AllTargetsMask = (1 << HostAdapter->MaxTargetDevices) - 1;
+ unsigned short SynchronousPermitted, FastPermitted;
+ unsigned short UltraPermitted, WidePermitted;
+ unsigned short DisconnectPermitted, TaggedQueuingPermitted;
+ boolean CommonSynchronousNegotiation, CommonTaggedQueueDepth;
+ boolean CommonErrorRecovery;
+ char SynchronousString[BusLogic_MaxTargetDevices+1];
+ char WideString[BusLogic_MaxTargetDevices+1];
+ char DisconnectString[BusLogic_MaxTargetDevices+1];
+ char TaggedQueuingString[BusLogic_MaxTargetDevices+1];
+ char ErrorRecoveryString[BusLogic_MaxTargetDevices+1];
+ char *SynchronousMessage = SynchronousString;
+ char *WideMessage = WideString;
+ char *DisconnectMessage = DisconnectString;
+ char *TaggedQueuingMessage = TaggedQueuingString;
+ char *ErrorRecoveryMessage = ErrorRecoveryString;
+ int TargetID;
+ BusLogic_Info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n",
+ HostAdapter, HostAdapter->ModelName,
+ BusLogic_HostAdapterBusNames[HostAdapter->HostAdapterBusType],
+ (HostAdapter->HostWideSCSI ? " Wide" : ""),
+ (HostAdapter->HostDifferentialSCSI ? " Differential" : ""),
+ (HostAdapter->HostUltraSCSI ? " Ultra" : ""));
+ BusLogic_Info(" Firmware Version: %s, I/O Address: 0x%X, "
+ "IRQ Channel: %d/%s\n", HostAdapter,
+ HostAdapter->FirmwareVersion,
+ HostAdapter->IO_Address, HostAdapter->IRQ_Channel,
+ (HostAdapter->LevelSensitiveInterrupt ? "Level" : "Edge"));
+ if (HostAdapter->HostAdapterBusType != BusLogic_PCI_Bus)
+ {
+ BusLogic_Info(" DMA Channel: ", HostAdapter);
+ if (HostAdapter->DMA_Channel > 0)
+ BusLogic_Info("%d, ", HostAdapter, HostAdapter->DMA_Channel);
+ else BusLogic_Info("None, ", HostAdapter);
+ if (HostAdapter->BIOS_Address > 0)
+ BusLogic_Info("BIOS Address: 0x%X, ", HostAdapter,
+ HostAdapter->BIOS_Address);
+ else BusLogic_Info("BIOS Address: None, ", HostAdapter);
+ }
+ else
+ {
+ BusLogic_Info(" PCI Bus: %d, Device: %d, Address: ",
+ HostAdapter, HostAdapter->Bus, HostAdapter->Device);
+ if (HostAdapter->PCI_Address > 0)
+ BusLogic_Info("0x%X, ", HostAdapter, HostAdapter->PCI_Address);
+ else BusLogic_Info("Unassigned, ", HostAdapter);
+ }
+ BusLogic_Info("Host Adapter SCSI ID: %d\n", HostAdapter,
+ HostAdapter->SCSI_ID);
+ BusLogic_Info(" Parity Checking: %s, Extended Translation: %s\n",
+ HostAdapter,
+ (HostAdapter->ParityCheckingEnabled
+ ? "Enabled" : "Disabled"),
+ (HostAdapter->ExtendedTranslationEnabled
+ ? "Enabled" : "Disabled"));
+ AllTargetsMask &= ~(1 << HostAdapter->SCSI_ID);
+ SynchronousPermitted = HostAdapter->SynchronousPermitted & AllTargetsMask;
+ FastPermitted = HostAdapter->FastPermitted & AllTargetsMask;
+ UltraPermitted = HostAdapter->UltraPermitted & AllTargetsMask;
+ if ((BusLogic_MultiMasterHostAdapterP(HostAdapter) &&
+ (HostAdapter->FirmwareVersion[0] >= '4' ||
+ HostAdapter->HostAdapterBusType == BusLogic_EISA_Bus)) ||
+ BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ CommonSynchronousNegotiation = false;
+ if (SynchronousPermitted == 0)
+ {
+ SynchronousMessage = "Disabled";
+ CommonSynchronousNegotiation = true;
+ }
+ else if (SynchronousPermitted == AllTargetsMask)
+ {
+ if (FastPermitted == 0)
+ {
+ SynchronousMessage = "Slow";
+ CommonSynchronousNegotiation = true;
+ }
+ else if (FastPermitted == AllTargetsMask)
+ {
+ if (UltraPermitted == 0)
+ {
+ SynchronousMessage = "Fast";
+ CommonSynchronousNegotiation = true;
+ }
+ else if (UltraPermitted == AllTargetsMask)
+ {
+ SynchronousMessage = "Ultra";
+ CommonSynchronousNegotiation = true;
+ }
+ }
+ }
+ if (!CommonSynchronousNegotiation)
+ {
+ for (TargetID = 0;
+ TargetID < HostAdapter->MaxTargetDevices;
+ TargetID++)
+ SynchronousString[TargetID] =
+ ((!(SynchronousPermitted & (1 << TargetID))) ? 'N' :
+ (!(FastPermitted & (1 << TargetID)) ? 'S' :
+ (!(UltraPermitted & (1 << TargetID)) ? 'F' : 'U')));
+ SynchronousString[HostAdapter->SCSI_ID] = '#';
+ SynchronousString[HostAdapter->MaxTargetDevices] = '\0';
+ }
+ }
+ else SynchronousMessage =
+ (SynchronousPermitted == 0 ? "Disabled" : "Enabled");
+ WidePermitted = HostAdapter->WidePermitted & AllTargetsMask;
+ if (WidePermitted == 0)
+ WideMessage = "Disabled";
+ else if (WidePermitted == AllTargetsMask)
+ WideMessage = "Enabled";
+ else
+ {
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ WideString[TargetID] =
+ ((WidePermitted & (1 << TargetID)) ? 'Y' : 'N');
+ WideString[HostAdapter->SCSI_ID] = '#';
+ WideString[HostAdapter->MaxTargetDevices] = '\0';
+ }
+ DisconnectPermitted = HostAdapter->DisconnectPermitted & AllTargetsMask;
+ if (DisconnectPermitted == 0)
+ DisconnectMessage = "Disabled";
+ else if (DisconnectPermitted == AllTargetsMask)
+ DisconnectMessage = "Enabled";
+ else
+ {
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ DisconnectString[TargetID] =
+ ((DisconnectPermitted & (1 << TargetID)) ? 'Y' : 'N');
+ DisconnectString[HostAdapter->SCSI_ID] = '#';
+ DisconnectString[HostAdapter->MaxTargetDevices] = '\0';
+ }
+ TaggedQueuingPermitted =
+ HostAdapter->TaggedQueuingPermitted & AllTargetsMask;
+ if (TaggedQueuingPermitted == 0)
+ TaggedQueuingMessage = "Disabled";
+ else if (TaggedQueuingPermitted == AllTargetsMask)
+ TaggedQueuingMessage = "Enabled";
+ else
+ {
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ TaggedQueuingString[TargetID] =
+ ((TaggedQueuingPermitted & (1 << TargetID)) ? 'Y' : 'N');
+ TaggedQueuingString[HostAdapter->SCSI_ID] = '#';
+ TaggedQueuingString[HostAdapter->MaxTargetDevices] = '\0';
+ }
+ BusLogic_Info(" Synchronous Negotiation: %s, Wide Negotiation: %s\n",
+ HostAdapter, SynchronousMessage, WideMessage);
+ BusLogic_Info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n",
+ HostAdapter, DisconnectMessage, TaggedQueuingMessage);
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ BusLogic_Info(" Scatter/Gather Limit: %d of %d segments, "
+ "Mailboxes: %d\n", HostAdapter,
+ HostAdapter->DriverScatterGatherLimit,
+ HostAdapter->HostAdapterScatterGatherLimit,
+ HostAdapter->MailboxCount);
+ BusLogic_Info(" Driver Queue Depth: %d, "
+ "Host Adapter Queue Depth: %d\n",
+ HostAdapter, HostAdapter->DriverQueueDepth,
+ HostAdapter->HostAdapterQueueDepth);
+ }
+ else BusLogic_Info(" Driver Queue Depth: %d, "
+ "Scatter/Gather Limit: %d segments\n",
+ HostAdapter, HostAdapter->DriverQueueDepth,
+ HostAdapter->DriverScatterGatherLimit);
+ BusLogic_Info(" Tagged Queue Depth: ", HostAdapter);
+ CommonTaggedQueueDepth = true;
+ for (TargetID = 1; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (HostAdapter->QueueDepth[TargetID] != HostAdapter->QueueDepth[0])
+ {
+ CommonTaggedQueueDepth = false;
+ break;
+ }
+ if (CommonTaggedQueueDepth)
+ {
+ if (HostAdapter->QueueDepth[0] > 0)
+ BusLogic_Info("%d", HostAdapter, HostAdapter->QueueDepth[0]);
+ else BusLogic_Info("Automatic", HostAdapter);
+ }
+ else BusLogic_Info("Individual", HostAdapter);
+ BusLogic_Info(", Untagged Queue Depth: %d\n", HostAdapter,
+ HostAdapter->UntaggedQueueDepth);
+ CommonErrorRecovery = true;
+ for (TargetID = 1; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (HostAdapter->ErrorRecoveryStrategy[TargetID] !=
+ HostAdapter->ErrorRecoveryStrategy[0])
+ {
+ CommonErrorRecovery = false;
+ break;
+ }
+ if (CommonErrorRecovery)
+ ErrorRecoveryMessage =
+ BusLogic_ErrorRecoveryStrategyNames[
+ HostAdapter->ErrorRecoveryStrategy[0]];
+ else
+ {
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ ErrorRecoveryString[TargetID] =
+ BusLogic_ErrorRecoveryStrategyLetters[
+ HostAdapter->ErrorRecoveryStrategy[TargetID]];
+ ErrorRecoveryString[HostAdapter->SCSI_ID] = '#';
+ ErrorRecoveryString[HostAdapter->MaxTargetDevices] = '\0';
+ }
+ BusLogic_Info(" Error Recovery Strategy: %s, SCSI Bus Reset: %s\n",
+ HostAdapter, ErrorRecoveryMessage,
+ (HostAdapter->BusResetEnabled ? "Enabled" : "Disabled"));
+ if (HostAdapter->TerminationInfoValid)
+ {
+ if (HostAdapter->HostWideSCSI)
+ BusLogic_Info(" SCSI Bus Termination: %s", HostAdapter,
+ (HostAdapter->LowByteTerminated
+ ? (HostAdapter->HighByteTerminated
+ ? "Both Enabled" : "Low Enabled")
+ : (HostAdapter->HighByteTerminated
+ ? "High Enabled" : "Both Disabled")));
+ else BusLogic_Info(" SCSI Bus Termination: %s", HostAdapter,
+ (HostAdapter->LowByteTerminated ?
+ "Enabled" : "Disabled"));
+ if (HostAdapter->HostSupportsSCAM)
+ BusLogic_Info(", SCAM: %s", HostAdapter,
+ (HostAdapter->SCAM_Enabled
+ ? (HostAdapter->SCAM_Level2
+ ? "Enabled, Level 2" : "Enabled, Level 1")
+ : "Disabled"));
+ BusLogic_Info("\n", HostAdapter);
+ }
+ /*
+ Indicate reporting the Host Adapter configuration completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_AcquireResources acquires the system resources necessary to use
+ Host Adapter.
+*/
+
+static boolean BusLogic_AcquireResources(BusLogic_HostAdapter_T *HostAdapter)
+{
+ if (HostAdapter->IRQ_Channel == 0)
+ {
+ BusLogic_Error("NO LEGAL INTERRUPT CHANNEL ASSIGNED - DETACHING\n",
+ HostAdapter);
+ return false;
+ }
+ /*
+ Acquire shared access to the IRQ Channel.
+ */
+ if (request_irq(HostAdapter->IRQ_Channel, BusLogic_InterruptHandler,
+ SA_INTERRUPT | SA_SHIRQ,
+ HostAdapter->FullModelName, HostAdapter) < 0)
+ {
+ BusLogic_Error("UNABLE TO ACQUIRE IRQ CHANNEL %d - DETACHING\n",
+ HostAdapter, HostAdapter->IRQ_Channel);
+ return false;
+ }
+ HostAdapter->IRQ_ChannelAcquired = true;
+ /*
+ Acquire exclusive access to the DMA Channel.
+ */
+ if (HostAdapter->DMA_Channel > 0)
+ {
+ if (request_dma(HostAdapter->DMA_Channel,
+ HostAdapter->FullModelName) < 0)
+ {
+ BusLogic_Error("UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n",
+ HostAdapter, HostAdapter->DMA_Channel);
+ return false;
+ }
+ set_dma_mode(HostAdapter->DMA_Channel, DMA_MODE_CASCADE);
+ enable_dma(HostAdapter->DMA_Channel);
+ HostAdapter->DMA_ChannelAcquired = true;
+ }
+ /*
+ Indicate the System Resource Acquisition completed successfully,
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_ReleaseResources releases any system resources previously acquired
+ by BusLogic_AcquireResources.
+*/
+
+static void BusLogic_ReleaseResources(BusLogic_HostAdapter_T *HostAdapter)
+{
+ /*
+ Release shared access to the IRQ Channel.
+ */
+ if (HostAdapter->IRQ_ChannelAcquired)
+ free_irq(HostAdapter->IRQ_Channel, HostAdapter);
+ /*
+ Release exclusive access to the DMA Channel.
+ */
+ if (HostAdapter->DMA_ChannelAcquired)
+ free_dma(HostAdapter->DMA_Channel);
+}
+
+
+/*
+ BusLogic_InitializeHostAdapter initializes Host Adapter. This is the only
+ function called during SCSI Host Adapter detection which modifies the state
+ of the Host Adapter from its initial power on or hard reset state.
+*/
+
+static boolean BusLogic_InitializeHostAdapter(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ BusLogic_ExtendedMailboxRequest_T ExtendedMailboxRequest;
+ BusLogic_RoundRobinModeRequest_T RoundRobinModeRequest;
+ BusLogic_SetCCBFormatRequest_T SetCCBFormatRequest;
+ int TargetID;
+ /*
+ Initialize the pointers to the first and last CCBs that are queued for
+ completion processing.
+ */
+ HostAdapter->FirstCompletedCCB = NULL;
+ HostAdapter->LastCompletedCCB = NULL;
+ /*
+ Initialize the Bus Device Reset Pending CCB, Tagged Queuing Active,
+ Command Successful Flag, Active Commands, and Commands Since Reset
+ for each Target Device.
+ */
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ HostAdapter->BusDeviceResetPendingCCB[TargetID] = NULL;
+ HostAdapter->TargetFlags[TargetID].TaggedQueuingActive = false;
+ HostAdapter->TargetFlags[TargetID].CommandSuccessfulFlag = false;
+ HostAdapter->ActiveCommands[TargetID] = 0;
+ HostAdapter->CommandsSinceReset[TargetID] = 0;
+ }
+ /*
+ FlashPoint Host Adapters do not use Outgoing and Incoming Mailboxes.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter)) goto Done;
+ /*
+ Initialize the Outgoing and Incoming Mailbox pointers.
+ */
+ HostAdapter->FirstOutgoingMailbox =
+ (BusLogic_OutgoingMailbox_T *) HostAdapter->MailboxSpace;
+ HostAdapter->LastOutgoingMailbox =
+ HostAdapter->FirstOutgoingMailbox + HostAdapter->MailboxCount - 1;
+ HostAdapter->NextOutgoingMailbox = HostAdapter->FirstOutgoingMailbox;
+ HostAdapter->FirstIncomingMailbox =
+ (BusLogic_IncomingMailbox_T *) (HostAdapter->LastOutgoingMailbox + 1);
+ HostAdapter->LastIncomingMailbox =
+ HostAdapter->FirstIncomingMailbox + HostAdapter->MailboxCount - 1;
+ HostAdapter->NextIncomingMailbox = HostAdapter->FirstIncomingMailbox;
+ /*
+ Initialize the Outgoing and Incoming Mailbox structures.
+ */
+ memset(HostAdapter->FirstOutgoingMailbox, 0,
+ HostAdapter->MailboxCount * sizeof(BusLogic_OutgoingMailbox_T));
+ memset(HostAdapter->FirstIncomingMailbox, 0,
+ HostAdapter->MailboxCount * sizeof(BusLogic_IncomingMailbox_T));
+ /*
+ Initialize the Host Adapter's Pointer to the Outgoing/Incoming Mailboxes.
+ */
+ ExtendedMailboxRequest.MailboxCount = HostAdapter->MailboxCount;
+ ExtendedMailboxRequest.BaseMailboxAddress =
+ Virtual_to_Bus(HostAdapter->FirstOutgoingMailbox);
+ if (BusLogic_Command(HostAdapter, BusLogic_InitializeExtendedMailbox,
+ &ExtendedMailboxRequest,
+ sizeof(ExtendedMailboxRequest), NULL, 0) < 0)
+ return BusLogic_Failure(HostAdapter, "MAILBOX INITIALIZATION");
+ /*
+ Enable Strict Round Robin Mode if supported by the Host Adapter. In
+ Strict Round Robin Mode, the Host Adapter only looks at the next Outgoing
+ Mailbox for each new command, rather than scanning through all the
+ Outgoing Mailboxes to find any that have new commands in them. Strict
+ Round Robin Mode is significantly more efficient.
+ */
+ if (HostAdapter->StrictRoundRobinModeSupport)
+ {
+ RoundRobinModeRequest = BusLogic_StrictRoundRobinMode;
+ if (BusLogic_Command(HostAdapter, BusLogic_EnableStrictRoundRobinMode,
+ &RoundRobinModeRequest,
+ sizeof(RoundRobinModeRequest), NULL, 0) < 0)
+ return BusLogic_Failure(HostAdapter, "ENABLE STRICT ROUND ROBIN MODE");
+ }
+ /*
+ For Host Adapters that support Extended LUN Format CCBs, issue the Set CCB
+ Format command to allow 32 Logical Units per Target Device.
+ */
+ if (HostAdapter->ExtendedLUNSupport)
+ {
+ SetCCBFormatRequest = BusLogic_ExtendedLUNFormatCCB;
+ if (BusLogic_Command(HostAdapter, BusLogic_SetCCBFormat,
+ &SetCCBFormatRequest, sizeof(SetCCBFormatRequest),
+ NULL, 0) < 0)
+ return BusLogic_Failure(HostAdapter, "SET CCB FORMAT");
+ }
+ /*
+ Announce Successful Initialization.
+ */
+Done:
+ if (!HostAdapter->HostAdapterInitialized)
+ {
+ BusLogic_Info("*** %s Initialized Successfully ***\n",
+ HostAdapter, HostAdapter->FullModelName);
+ BusLogic_Info("\n", HostAdapter);
+ }
+ else BusLogic_Warning("*** %s Initialized Successfully ***\n",
+ HostAdapter, HostAdapter->FullModelName);
+ HostAdapter->HostAdapterInitialized = true;
+ /*
+ Indicate the Host Adapter Initialization completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_TargetDeviceInquiry inquires about the Target Devices accessible
+ through Host Adapter.
+*/
+
+static boolean BusLogic_TargetDeviceInquiry(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ BusLogic_InstalledDevices_T InstalledDevices;
+ BusLogic_InstalledDevices8_T InstalledDevicesID0to7;
+ BusLogic_SetupInformation_T SetupInformation;
+ BusLogic_SynchronousPeriod_T SynchronousPeriod;
+ BusLogic_RequestedReplyLength_T RequestedReplyLength;
+ int TargetID;
+ /*
+ Wait a few seconds between the Host Adapter Hard Reset which initiates
+ a SCSI Bus Reset and issuing any SCSI Commands. Some SCSI devices get
+ confused if they receive SCSI Commands too soon after a SCSI Bus Reset.
+ */
+ BusLogic_Delay(HostAdapter->BusSettleTime);
+ /*
+ FlashPoint Host Adapters do not provide for Target Device Inquiry.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter)) return true;
+ /*
+ Inhibit the Target Device Inquiry if requested.
+ */
+ if (HostAdapter->DriverOptions != NULL &&
+ HostAdapter->DriverOptions->LocalOptions.InhibitTargetInquiry)
+ return true;
+ /*
+ Issue the Inquire Target Devices command for host adapters with firmware
+ version 4.25 or later, or the Inquire Installed Devices ID 0 to 7 command
+ for older host adapters. This is necessary to force Synchronous Transfer
+ Negotiation so that the Inquire Setup Information and Inquire Synchronous
+ Period commands will return valid data. The Inquire Target Devices command
+ is preferable to Inquire Installed Devices ID 0 to 7 since it only probes
+ Logical Unit 0 of each Target Device.
+ */
+ if (strcmp(HostAdapter->FirmwareVersion, "4.25") >= 0)
+ {
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireTargetDevices, NULL, 0,
+ &InstalledDevices, sizeof(InstalledDevices))
+ != sizeof(InstalledDevices))
+ return BusLogic_Failure(HostAdapter, "INQUIRE TARGET DEVICES");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ HostAdapter->TargetFlags[TargetID].TargetExists =
+ (InstalledDevices & (1 << TargetID) ? true : false);
+ }
+ else
+ {
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireInstalledDevicesID0to7,
+ NULL, 0, &InstalledDevicesID0to7,
+ sizeof(InstalledDevicesID0to7))
+ != sizeof(InstalledDevicesID0to7))
+ return BusLogic_Failure(HostAdapter,
+ "INQUIRE INSTALLED DEVICES ID 0 TO 7");
+ for (TargetID = 0; TargetID < 8; TargetID++)
+ HostAdapter->TargetFlags[TargetID].TargetExists =
+ (InstalledDevicesID0to7[TargetID] != 0 ? true : false);
+ }
+ /*
+ Issue the Inquire Setup Information command.
+ */
+ RequestedReplyLength = sizeof(SetupInformation);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireSetupInformation,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &SetupInformation, sizeof(SetupInformation))
+ != sizeof(SetupInformation))
+ return BusLogic_Failure(HostAdapter, "INQUIRE SETUP INFORMATION");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ HostAdapter->SynchronousOffset[TargetID] =
+ (TargetID < 8
+ ? SetupInformation.SynchronousValuesID0to7[TargetID].Offset
+ : SetupInformation.SynchronousValuesID8to15[TargetID-8].Offset);
+ if (strcmp(HostAdapter->FirmwareVersion, "5.06L") >= 0)
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ HostAdapter->TargetFlags[TargetID].WideTransfersActive =
+ (TargetID < 8
+ ? (SetupInformation.WideTransfersActiveID0to7 & (1 << TargetID)
+ ? true : false)
+ : (SetupInformation.WideTransfersActiveID8to15 & (1 << (TargetID-8))
+ ? true : false));
+ /*
+ Issue the Inquire Synchronous Period command.
+ */
+ if (HostAdapter->FirmwareVersion[0] >= '3')
+ {
+ RequestedReplyLength = sizeof(SynchronousPeriod);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireSynchronousPeriod,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &SynchronousPeriod, sizeof(SynchronousPeriod))
+ != sizeof(SynchronousPeriod))
+ return BusLogic_Failure(HostAdapter, "INQUIRE SYNCHRONOUS PERIOD");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ HostAdapter->SynchronousPeriod[TargetID] = SynchronousPeriod[TargetID];
+ }
+ else
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (SetupInformation.SynchronousValuesID0to7[TargetID].Offset > 0)
+ HostAdapter->SynchronousPeriod[TargetID] =
+ 20 + 5 * SetupInformation.SynchronousValuesID0to7[TargetID]
+ .TransferPeriod;
+ /*
+ Indicate the Target Device Inquiry completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_ReportTargetDeviceInfo reports about the Target Devices accessible
+ through Host Adapter.
+*/
+
+static void BusLogic_ReportTargetDeviceInfo(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ int TargetID;
+ /*
+ Inhibit the Target Device Inquiry and Reporting if requested.
+ */
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter) &&
+ HostAdapter->DriverOptions != NULL &&
+ HostAdapter->DriverOptions->LocalOptions.InhibitTargetInquiry)
+ return;
+ /*
+ Report on the Target Devices found.
+ */
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (TargetFlags->TargetExists && !TargetFlags->TargetInfoReported)
+ {
+ int SynchronousTransferRate = 0;
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ boolean WideTransfersActive;
+ FlashPoint_InquireTargetInfo(
+ HostAdapter->CardHandle, TargetID,
+ &HostAdapter->SynchronousPeriod[TargetID],
+ &HostAdapter->SynchronousOffset[TargetID],
+ &WideTransfersActive);
+ TargetFlags->WideTransfersActive = WideTransfersActive;
+ }
+ else if (TargetFlags->WideTransfersSupported &&
+ (HostAdapter->WidePermitted & (1 << TargetID)) &&
+ strcmp(HostAdapter->FirmwareVersion, "5.06L") < 0)
+ TargetFlags->WideTransfersActive = true;
+ if (HostAdapter->SynchronousPeriod[TargetID] > 0)
+ SynchronousTransferRate =
+ 100000 / HostAdapter->SynchronousPeriod[TargetID];
+ if (TargetFlags->WideTransfersActive)
+ SynchronousTransferRate <<= 1;
+ if (SynchronousTransferRate >= 9950)
+ {
+ SynchronousTransferRate = (SynchronousTransferRate + 50) / 100;
+ BusLogic_Info("Target %d: Queue Depth %d, %sSynchronous at "
+ "%d.%01d MB/sec, offset %d\n",
+ HostAdapter, TargetID,
+ HostAdapter->QueueDepth[TargetID],
+ (TargetFlags->WideTransfersActive ? "Wide " : ""),
+ SynchronousTransferRate / 10,
+ SynchronousTransferRate % 10,
+ HostAdapter->SynchronousOffset[TargetID]);
+ }
+ else if (SynchronousTransferRate > 0)
+ {
+ SynchronousTransferRate = (SynchronousTransferRate + 5) / 10;
+ BusLogic_Info("Target %d: Queue Depth %d, %sSynchronous at "
+ "%d.%02d MB/sec, offset %d\n",
+ HostAdapter, TargetID,
+ HostAdapter->QueueDepth[TargetID],
+ (TargetFlags->WideTransfersActive ? "Wide " : ""),
+ SynchronousTransferRate / 100,
+ SynchronousTransferRate % 100,
+ HostAdapter->SynchronousOffset[TargetID]);
+ }
+ else BusLogic_Info("Target %d: Queue Depth %d, Asynchronous\n",
+ HostAdapter, TargetID,
+ HostAdapter->QueueDepth[TargetID]);
+ TargetFlags->TargetInfoReported = true;
+ }
+ }
+}
+
+
+/*
+ BusLogic_InitializeHostStructure initializes the fields in the SCSI Host
+ structure. The base, io_port, n_io_ports, irq, and dma_channel fields in the
+ SCSI Host structure are intentionally left uninitialized, as this driver
+ handles acquisition and release of these resources explicitly, as well as
+ ensuring exclusive access to the Host Adapter hardware and data structures
+ through explicit acquisition and release of the Host Adapter's Lock.
+*/
+
+static void BusLogic_InitializeHostStructure(BusLogic_HostAdapter_T
+ *HostAdapter,
+ SCSI_Host_T *Host)
+{
+ Host->max_id = HostAdapter->MaxTargetDevices;
+ Host->max_lun = HostAdapter->MaxLogicalUnits;
+ Host->max_channel = 0;
+ Host->unique_id = HostAdapter->IO_Address;
+ Host->this_id = HostAdapter->SCSI_ID;
+ Host->can_queue = HostAdapter->DriverQueueDepth;
+ Host->sg_tablesize = HostAdapter->DriverScatterGatherLimit;
+ Host->unchecked_isa_dma = HostAdapter->BounceBuffersRequired;
+ Host->cmd_per_lun = HostAdapter->UntaggedQueueDepth;
+}
+
+
+/*
+ BusLogic_SelectQueueDepths selects Queue Depths for each Target Device based
+ on the Host Adapter's Total Queue Depth and the number, type, speed, and
+ capabilities of the Target Devices. When called for the last Host Adapter,
+ it reports on the Target Device Information for all BusLogic Host Adapters
+ since all the Target Devices have now been probed.
+*/
+
+static void BusLogic_SelectQueueDepths(SCSI_Host_T *Host,
+ SCSI_Device_T *DeviceList)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Host->hostdata;
+ int TaggedDeviceCount = 0, AutomaticTaggedDeviceCount = 0;
+ int UntaggedDeviceCount = 0, AutomaticTaggedQueueDepth = 0;
+ int AllocatedQueueDepth = 0;
+ SCSI_Device_T *Device;
+ int TargetID;
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (HostAdapter->TargetFlags[TargetID].TargetExists)
+ {
+ int QueueDepth = HostAdapter->QueueDepth[TargetID];
+ if (HostAdapter->TargetFlags[TargetID].TaggedQueuingSupported &&
+ (HostAdapter->TaggedQueuingPermitted & (1 << TargetID)))
+ {
+ TaggedDeviceCount++;
+ if (QueueDepth == 0) AutomaticTaggedDeviceCount++;
+ }
+ else
+ {
+ UntaggedDeviceCount++;
+ if (QueueDepth == 0 ||
+ QueueDepth > HostAdapter->UntaggedQueueDepth)
+ {
+ QueueDepth = HostAdapter->UntaggedQueueDepth;
+ HostAdapter->QueueDepth[TargetID] = QueueDepth;
+ }
+ }
+ AllocatedQueueDepth += QueueDepth;
+ if (QueueDepth == 1)
+ HostAdapter->TaggedQueuingPermitted &= ~(1 << TargetID);
+ }
+ HostAdapter->TargetDeviceCount = TaggedDeviceCount + UntaggedDeviceCount;
+ if (AutomaticTaggedDeviceCount > 0)
+ {
+ AutomaticTaggedQueueDepth =
+ (HostAdapter->HostAdapterQueueDepth - AllocatedQueueDepth)
+ / AutomaticTaggedDeviceCount;
+ if (AutomaticTaggedQueueDepth > BusLogic_MaxAutomaticTaggedQueueDepth)
+ AutomaticTaggedQueueDepth = BusLogic_MaxAutomaticTaggedQueueDepth;
+ if (AutomaticTaggedQueueDepth < BusLogic_MinAutomaticTaggedQueueDepth)
+ AutomaticTaggedQueueDepth = BusLogic_MinAutomaticTaggedQueueDepth;
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (HostAdapter->TargetFlags[TargetID].TargetExists &&
+ HostAdapter->QueueDepth[TargetID] == 0)
+ {
+ AllocatedQueueDepth += AutomaticTaggedQueueDepth;
+ HostAdapter->QueueDepth[TargetID] = AutomaticTaggedQueueDepth;
+ }
+ }
+ for (Device = DeviceList; Device != NULL; Device = Device->next)
+ if (Device->host == Host)
+ Device->queue_depth = HostAdapter->QueueDepth[Device->id];
+ /* Allocate an extra CCB for each Target Device for a Bus Device Reset. */
+ AllocatedQueueDepth += HostAdapter->TargetDeviceCount;
+ if (AllocatedQueueDepth > HostAdapter->DriverQueueDepth)
+ AllocatedQueueDepth = HostAdapter->DriverQueueDepth;
+ BusLogic_CreateAdditionalCCBs(HostAdapter,
+ AllocatedQueueDepth
+ - HostAdapter->AllocatedCCBs,
+ false);
+ if (HostAdapter == BusLogic_LastRegisteredHostAdapter)
+ for (HostAdapter = BusLogic_FirstRegisteredHostAdapter;
+ HostAdapter != NULL;
+ HostAdapter = HostAdapter->Next)
+ BusLogic_ReportTargetDeviceInfo(HostAdapter);
+}
+
+
+/*
+ BusLogic_DetectHostAdapter probes for BusLogic Host Adapters at the standard
+ I/O Addresses where they may be located, initializing, registering, and
+ reporting the configuration of each BusLogic Host Adapter it finds. It
+ returns the number of BusLogic Host Adapters successfully initialized and
+ registered.
+*/
+
+int BusLogic_DetectHostAdapter(SCSI_Host_Template_T *HostTemplate)
+{
+ int BusLogicHostAdapterCount = 0, DriverOptionsIndex = 0, ProbeIndex;
+ BusLogic_HostAdapter_T *PrototypeHostAdapter;
+ if (BusLogic_ProbeOptions.NoProbe) return 0;
+ BusLogic_ProbeInfoList = (BusLogic_ProbeInfo_T *)
+ kmalloc(BusLogic_MaxHostAdapters * sizeof(BusLogic_ProbeInfo_T),
+ GFP_ATOMIC);
+ if (BusLogic_ProbeInfoList == NULL)
+ {
+ BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL);
+ return 0;
+ }
+ memset(BusLogic_ProbeInfoList, 0,
+ BusLogic_MaxHostAdapters * sizeof(BusLogic_ProbeInfo_T));
+ PrototypeHostAdapter = (BusLogic_HostAdapter_T *)
+ kmalloc(sizeof(BusLogic_HostAdapter_T), GFP_ATOMIC);
+ if (PrototypeHostAdapter == NULL)
+ {
+ kfree(BusLogic_ProbeInfoList);
+ BusLogic_Error("BusLogic: Unable to allocate Prototype "
+ "Host Adapter\n", NULL);
+ return 0;
+ }
+ memset(PrototypeHostAdapter, 0, sizeof(BusLogic_HostAdapter_T));
+ if (BusLogic_Options != NULL)
+ BusLogic_ParseDriverOptions(BusLogic_Options);
+ BusLogic_InitializeProbeInfoList(PrototypeHostAdapter);
+ for (ProbeIndex = 0; ProbeIndex < BusLogic_ProbeInfoCount; ProbeIndex++)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo = &BusLogic_ProbeInfoList[ProbeIndex];
+ BusLogic_HostAdapter_T *HostAdapter = PrototypeHostAdapter;
+ SCSI_Host_T *Host;
+ if (ProbeInfo->IO_Address == 0) continue;
+ memset(HostAdapter, 0, sizeof(BusLogic_HostAdapter_T));
+ HostAdapter->HostAdapterType = ProbeInfo->HostAdapterType;
+ HostAdapter->HostAdapterBusType = ProbeInfo->HostAdapterBusType;
+ HostAdapter->IO_Address = ProbeInfo->IO_Address;
+ HostAdapter->PCI_Address = ProbeInfo->PCI_Address;
+ HostAdapter->Bus = ProbeInfo->Bus;
+ HostAdapter->Device = ProbeInfo->Device;
+ HostAdapter->IRQ_Channel = ProbeInfo->IRQ_Channel;
+ HostAdapter->AddressCount =
+ BusLogic_HostAdapterAddressCount[HostAdapter->HostAdapterType];
+ /*
+ Probe the Host Adapter. If unsuccessful, abort further initialization.
+ */
+ if (!BusLogic_ProbeHostAdapter(HostAdapter)) continue;
+ /*
+ Hard Reset the Host Adapter. If unsuccessful, abort further
+ initialization.
+ */
+ if (!BusLogic_HardwareResetHostAdapter(HostAdapter, true)) continue;
+ /*
+ Check the Host Adapter. If unsuccessful, abort further initialization.
+ */
+ if (!BusLogic_CheckHostAdapter(HostAdapter)) continue;
+ /*
+ Initialize the Driver Options field if provided.
+ */
+ if (DriverOptionsIndex < BusLogic_DriverOptionsCount)
+ HostAdapter->DriverOptions =
+ &BusLogic_DriverOptions[DriverOptionsIndex++];
+ /*
+ Announce the Driver Version and Date, Author's Name, Copyright Notice,
+ and Electronic Mail Address.
+ */
+ BusLogic_AnnounceDriver(HostAdapter);
+ /*
+ Register usage of the I/O Address range. From this point onward, any
+ failure will be assumed to be due to a problem with the Host Adapter,
+ rather than due to having mistakenly identified this port as belonging
+ to a BusLogic Host Adapter. The I/O Address range will not be
+ released, thereby preventing it from being incorrectly identified as
+ any other type of Host Adapter.
+ */
+ request_region(HostAdapter->IO_Address, HostAdapter->AddressCount,
+ "BusLogic");
+ /*
+ Register the SCSI Host structure.
+ */
+ Host = scsi_register(HostTemplate, sizeof(BusLogic_HostAdapter_T));
+ HostAdapter = (BusLogic_HostAdapter_T *) Host->hostdata;
+ memcpy(HostAdapter, PrototypeHostAdapter, sizeof(BusLogic_HostAdapter_T));
+ HostAdapter->SCSI_Host = Host;
+ HostAdapter->HostNumber = Host->host_no;
+ Host->select_queue_depths = BusLogic_SelectQueueDepths;
+ /*
+ Add Host Adapter to the end of the list of registered BusLogic
+ Host Adapters.
+ */
+ BusLogic_RegisterHostAdapter(HostAdapter);
+ /*
+ Read the Host Adapter Configuration, Configure the Host Adapter,
+ Acquire the System Resources necessary to use the Host Adapter, then
+ Create the Initial CCBs, Initialize the Host Adapter, and finally
+ perform Target Device Inquiry.
+ */
+ if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) &&
+ BusLogic_ReportHostAdapterConfiguration(HostAdapter) &&
+ BusLogic_AcquireResources(HostAdapter) &&
+ BusLogic_CreateInitialCCBs(HostAdapter) &&
+ BusLogic_InitializeHostAdapter(HostAdapter) &&
+ BusLogic_TargetDeviceInquiry(HostAdapter))
+ {
+ /*
+ Initialization has been completed successfully. Release and
+ re-register usage of the I/O Address range so that the Model
+ Name of the Host Adapter will appear, and initialize the SCSI
+ Host structure.
+ */
+ release_region(HostAdapter->IO_Address,
+ HostAdapter->AddressCount);
+ request_region(HostAdapter->IO_Address,
+ HostAdapter->AddressCount,
+ HostAdapter->FullModelName);
+ BusLogic_InitializeHostStructure(HostAdapter, Host);
+ BusLogicHostAdapterCount++;
+ }
+ else
+ {
+ /*
+ An error occurred during Host Adapter Configuration Querying, Host
+ Adapter Configuration, Resource Acquisition, CCB Creation, Host
+ Adapter Initialization, or Target Device Inquiry, so remove Host
+ Adapter from the list of registered BusLogic Host Adapters, destroy
+ the CCBs, Release the System Resources, and Unregister the SCSI
+ Host.
+ */
+ BusLogic_DestroyCCBs(HostAdapter);
+ BusLogic_ReleaseResources(HostAdapter);
+ BusLogic_UnregisterHostAdapter(HostAdapter);
+ scsi_unregister(Host);
+ }
+ }
+ kfree(PrototypeHostAdapter);
+ kfree(BusLogic_ProbeInfoList);
+ BusLogic_ProbeInfoList = NULL;
+ return BusLogicHostAdapterCount;
+}
+
+
+/*
+ BusLogic_ReleaseHostAdapter releases all resources previously acquired to
+ support a specific Host Adapter, including the I/O Address range, and
+ unregisters the BusLogic Host Adapter.
+*/
+
+int BusLogic_ReleaseHostAdapter(SCSI_Host_T *Host)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Host->hostdata;
+ /*
+ FlashPoint Host Adapters must first be released by the FlashPoint
+ SCCB Manager.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ FlashPoint_ReleaseHostAdapter(HostAdapter->CardHandle);
+ /*
+ Destroy the CCBs and release any system resources acquired to
+ support Host Adapter.
+ */
+ BusLogic_DestroyCCBs(HostAdapter);
+ BusLogic_ReleaseResources(HostAdapter);
+ /*
+ Release usage of the I/O Address range.
+ */
+ release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
+ /*
+ Remove Host Adapter from the list of registered BusLogic Host Adapters.
+ */
+ BusLogic_UnregisterHostAdapter(HostAdapter);
+ return 0;
+}
+
+
+/*
+ BusLogic_QueueCompletedCCB queues CCB for completion processing.
+*/
+
+static void BusLogic_QueueCompletedCCB(BusLogic_CCB_T *CCB)
+{
+ BusLogic_HostAdapter_T *HostAdapter = CCB->HostAdapter;
+ CCB->Status = BusLogic_CCB_Completed;
+ CCB->Next = NULL;
+ if (HostAdapter->FirstCompletedCCB == NULL)
+ {
+ HostAdapter->FirstCompletedCCB = CCB;
+ HostAdapter->LastCompletedCCB = CCB;
+ }
+ else
+ {
+ HostAdapter->LastCompletedCCB->Next = CCB;
+ HostAdapter->LastCompletedCCB = CCB;
+ }
+ HostAdapter->ActiveCommands[CCB->TargetID]--;
+}
+
+
+/*
+ BusLogic_ComputeResultCode computes a SCSI Subsystem Result Code from
+ the Host Adapter Status and Target Device Status.
+*/
+
+static int BusLogic_ComputeResultCode(BusLogic_HostAdapter_T *HostAdapter,
+ BusLogic_HostAdapterStatus_T
+ HostAdapterStatus,
+ BusLogic_TargetDeviceStatus_T
+ TargetDeviceStatus)
+{
+ int HostStatus;
+ switch (HostAdapterStatus)
+ {
+ case BusLogic_CommandCompletedNormally:
+ case BusLogic_LinkedCommandCompleted:
+ case BusLogic_LinkedCommandCompletedWithFlag:
+ HostStatus = DID_OK;
+ break;
+ case BusLogic_SCSISelectionTimeout:
+ HostStatus = DID_TIME_OUT;
+ break;
+ case BusLogic_InvalidOutgoingMailboxActionCode:
+ case BusLogic_InvalidCommandOperationCode:
+ case BusLogic_InvalidCommandParameter:
+ BusLogic_Warning("BusLogic Driver Protocol Error 0x%02X\n",
+ HostAdapter, HostAdapterStatus);
+ case BusLogic_DataUnderRun:
+ case BusLogic_DataOverRun:
+ case BusLogic_UnexpectedBusFree:
+ case BusLogic_LinkedCCBhasInvalidLUN:
+ case BusLogic_AutoRequestSenseFailed:
+ case BusLogic_TaggedQueuingMessageRejected:
+ case BusLogic_UnsupportedMessageReceived:
+ case BusLogic_HostAdapterHardwareFailed:
+ case BusLogic_TargetDeviceReconnectedImproperly:
+ case BusLogic_AbortQueueGenerated:
+ case BusLogic_HostAdapterSoftwareError:
+ case BusLogic_HostAdapterHardwareTimeoutError:
+ case BusLogic_SCSIParityErrorDetected:
+ HostStatus = DID_ERROR;
+ break;
+ case BusLogic_InvalidBusPhaseRequested:
+ case BusLogic_TargetFailedResponseToATN:
+ case BusLogic_HostAdapterAssertedRST:
+ case BusLogic_OtherDeviceAssertedRST:
+ case BusLogic_HostAdapterAssertedBusDeviceReset:
+ HostStatus = DID_RESET;
+ break;
+ default:
+ BusLogic_Warning("Unknown Host Adapter Status 0x%02X\n",
+ HostAdapter, HostAdapterStatus);
+ HostStatus = DID_ERROR;
+ break;
+ }
+ return (HostStatus << 16) | TargetDeviceStatus;
+}
+
+
+/*
+ BusLogic_ScanIncomingMailboxes scans the Incoming Mailboxes saving any
+ Incoming Mailbox entries for completion processing.
+*/
+
+static void BusLogic_ScanIncomingMailboxes(BusLogic_HostAdapter_T *HostAdapter)
+{
+ /*
+ Scan through the Incoming Mailboxes in Strict Round Robin fashion, saving
+ any completed CCBs for further processing. It is essential that for each
+ CCB and SCSI Command issued, command completion processing is performed
+ exactly once. Therefore, only Incoming Mailboxes with completion code
+ Command Completed Without Error, Command Completed With Error, or Command
+ Aborted At Host Request are saved for completion processing. When an
+ Incoming Mailbox has a completion code of Aborted Command Not Found, the
+ CCB had already completed or been aborted before the current Abort request
+ was processed, and so completion processing has already occurred and no
+ further action should be taken.
+ */
+ BusLogic_IncomingMailbox_T *NextIncomingMailbox =
+ HostAdapter->NextIncomingMailbox;
+ BusLogic_CompletionCode_T CompletionCode;
+ while ((CompletionCode = NextIncomingMailbox->CompletionCode) !=
+ BusLogic_IncomingMailboxFree)
+ {
+ BusLogic_CCB_T *CCB = (BusLogic_CCB_T *)
+ Bus_to_Virtual(NextIncomingMailbox->CCB);
+ if (CompletionCode != BusLogic_AbortedCommandNotFound)
+ {
+ if (CCB->Status == BusLogic_CCB_Active ||
+ CCB->Status == BusLogic_CCB_Reset)
+ {
+ /*
+ Save the Completion Code for this CCB and queue the CCB
+ for completion processing.
+ */
+ CCB->CompletionCode = CompletionCode;
+ BusLogic_QueueCompletedCCB(CCB);
+ }
+ else
+ {
+ /*
+ If a CCB ever appears in an Incoming Mailbox and is not marked
+ as status Active or Reset, then there is most likely a bug in
+ the Host Adapter firmware.
+ */
+ BusLogic_Warning("Illegal CCB #%ld status %d in "
+ "Incoming Mailbox\n", HostAdapter,
+ CCB->SerialNumber, CCB->Status);
+ }
+ }
+ NextIncomingMailbox->CompletionCode = BusLogic_IncomingMailboxFree;
+ if (++NextIncomingMailbox > HostAdapter->LastIncomingMailbox)
+ NextIncomingMailbox = HostAdapter->FirstIncomingMailbox;
+ }
+ HostAdapter->NextIncomingMailbox = NextIncomingMailbox;
+}
+
+
+/*
+ BusLogic_ProcessCompletedCCBs iterates over the completed CCBs for Host
+ Adapter setting the SCSI Command Result Codes, deallocating the CCBs, and
+ calling the SCSI Subsystem Completion Routines. The Host Adapter's Lock
+ should already have been acquired by the caller.
+*/
+
+static void BusLogic_ProcessCompletedCCBs(BusLogic_HostAdapter_T *HostAdapter)
+{
+ if (HostAdapter->ProcessCompletedCCBsActive) return;
+ HostAdapter->ProcessCompletedCCBsActive = true;
+ while (HostAdapter->FirstCompletedCCB != NULL)
+ {
+ BusLogic_CCB_T *CCB = HostAdapter->FirstCompletedCCB;
+ SCSI_Command_T *Command = CCB->Command;
+ HostAdapter->FirstCompletedCCB = CCB->Next;
+ if (HostAdapter->FirstCompletedCCB == NULL)
+ HostAdapter->LastCompletedCCB = NULL;
+ /*
+ Process the Completed CCB.
+ */
+ if (CCB->Opcode == BusLogic_BusDeviceReset)
+ {
+ int TargetID = CCB->TargetID;
+ BusLogic_Warning("Bus Device Reset CCB #%ld to Target "
+ "%d Completed\n", HostAdapter,
+ CCB->SerialNumber, TargetID);
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].BusDeviceResetsCompleted);
+ HostAdapter->TargetFlags[TargetID].TaggedQueuingActive = false;
+ HostAdapter->CommandsSinceReset[TargetID] = 0;
+ HostAdapter->LastResetCompleted[TargetID] = jiffies;
+ /*
+ Place CCB back on the Host Adapter's free list.
+ */
+ BusLogic_DeallocateCCB(CCB);
+ /*
+ Bus Device Reset CCBs have the Command field non-NULL only when a
+ Bus Device Reset was requested for a Command that did not have a
+ currently active CCB in the Host Adapter (i.e., a Synchronous
+ Bus Device Reset), and hence would not have its Completion Routine
+ called otherwise.
+ */
+ while (Command != NULL)
+ {
+ SCSI_Command_T *NextCommand = Command->reset_chain;
+ Command->reset_chain = NULL;
+ Command->result = DID_RESET << 16;
+ Command->scsi_done(Command);
+ Command = NextCommand;
+ }
+ /*
+ Iterate over the CCBs for this Host Adapter performing completion
+ processing for any CCBs marked as Reset for this Target.
+ */
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Status == BusLogic_CCB_Reset && CCB->TargetID == TargetID)
+ {
+ Command = CCB->Command;
+ BusLogic_DeallocateCCB(CCB);
+ HostAdapter->ActiveCommands[TargetID]--;
+ Command->result = DID_RESET << 16;
+ Command->scsi_done(Command);
+ }
+ HostAdapter->BusDeviceResetPendingCCB[TargetID] = NULL;
+ }
+ else
+ {
+ /*
+ Translate the Completion Code, Host Adapter Status, and Target
+ Device Status into a SCSI Subsystem Result Code.
+ */
+ switch (CCB->CompletionCode)
+ {
+ case BusLogic_IncomingMailboxFree:
+ case BusLogic_AbortedCommandNotFound:
+ case BusLogic_InvalidCCB:
+ BusLogic_Warning("CCB #%ld to Target %d Impossible State\n",
+ HostAdapter, CCB->SerialNumber, CCB->TargetID);
+ break;
+ case BusLogic_CommandCompletedWithoutError:
+ HostAdapter->TargetStatistics[CCB->TargetID]
+ .CommandsCompleted++;
+ HostAdapter->TargetFlags[CCB->TargetID]
+ .CommandSuccessfulFlag = true;
+ Command->result = DID_OK << 16;
+ break;
+ case BusLogic_CommandAbortedAtHostRequest:
+ BusLogic_Warning("CCB #%ld to Target %d Aborted\n",
+ HostAdapter, CCB->SerialNumber, CCB->TargetID);
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[CCB->TargetID]
+ .CommandAbortsCompleted);
+ Command->result = DID_ABORT << 16;
+ break;
+ case BusLogic_CommandCompletedWithError:
+ Command->result =
+ BusLogic_ComputeResultCode(HostAdapter,
+ CCB->HostAdapterStatus,
+ CCB->TargetDeviceStatus);
+ if (CCB->HostAdapterStatus != BusLogic_SCSISelectionTimeout)
+ {
+ HostAdapter->TargetStatistics[CCB->TargetID]
+ .CommandsCompleted++;
+ if (BusLogic_GlobalOptions.TraceErrors)
+ {
+ int i;
+ BusLogic_Notice("CCB #%ld Target %d: Result %X Host "
+ "Adapter Status %02X "
+ "Target Status %02X\n",
+ HostAdapter, CCB->SerialNumber,
+ CCB->TargetID, Command->result,
+ CCB->HostAdapterStatus,
+ CCB->TargetDeviceStatus);
+ BusLogic_Notice("CDB ", HostAdapter);
+ for (i = 0; i < CCB->CDB_Length; i++)
+ BusLogic_Notice(" %02X", HostAdapter, CCB->CDB[i]);
+ BusLogic_Notice("\n", HostAdapter);
+ BusLogic_Notice("Sense ", HostAdapter);
+ for (i = 0; i < CCB->SenseDataLength; i++)
+ BusLogic_Notice(" %02X", HostAdapter,
+ Command->sense_buffer[i]);
+ BusLogic_Notice("\n", HostAdapter);
+ }
+ }
+ break;
+ }
+ /*
+ When an INQUIRY command completes normally, save the
+ CmdQue (Tagged Queuing Supported) and WBus16 (16 Bit
+ Wide Data Transfers Supported) bits.
+ */
+ if (CCB->CDB[0] == INQUIRY && CCB->CDB[1] == 0 &&
+ CCB->HostAdapterStatus == BusLogic_CommandCompletedNormally)
+ {
+ BusLogic_TargetFlags_T *TargetFlags =
+ &HostAdapter->TargetFlags[CCB->TargetID];
+ SCSI_Inquiry_T *InquiryResult =
+ (SCSI_Inquiry_T *) Command->request_buffer;
+ TargetFlags->TargetExists = true;
+ TargetFlags->TaggedQueuingSupported = InquiryResult->CmdQue;
+ TargetFlags->WideTransfersSupported = InquiryResult->WBus16;
+ }
+ /*
+ Place CCB back on the Host Adapter's free list.
+ */
+ BusLogic_DeallocateCCB(CCB);
+ /*
+ Call the SCSI Command Completion Routine.
+ */
+ Command->scsi_done(Command);
+ }
+ }
+ HostAdapter->ProcessCompletedCCBsActive = false;
+}
+
+
+/*
+ BusLogic_InterruptHandler handles hardware interrupts from BusLogic Host
+ Adapters.
+*/
+
+static void BusLogic_InterruptHandler(int IRQ_Channel,
+ void *DeviceIdentifier,
+ Registers_T *InterruptRegisters)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) DeviceIdentifier;
+ ProcessorFlags_T ProcessorFlags;
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_AcquireHostAdapterLockIH(HostAdapter, &ProcessorFlags);
+ /*
+ Handle Interrupts appropriately for each Host Adapter type.
+ */
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ BusLogic_InterruptRegister_T InterruptRegister;
+ /*
+ Read the Host Adapter Interrupt Register.
+ */
+ InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter);
+ if (InterruptRegister.Bits.InterruptValid)
+ {
+ /*
+ Acknowledge the interrupt and reset the Host Adapter
+ Interrupt Register.
+ */
+ BusLogic_InterruptReset(HostAdapter);
+ /*
+ Process valid External SCSI Bus Reset and Incoming Mailbox
+ Loaded Interrupts. Command Complete Interrupts are noted,
+ and Outgoing Mailbox Available Interrupts are ignored, as
+ they are never enabled.
+ */
+ if (InterruptRegister.Bits.ExternalBusReset)
+ HostAdapter->HostAdapterExternalReset = true;
+ else if (InterruptRegister.Bits.IncomingMailboxLoaded)
+ BusLogic_ScanIncomingMailboxes(HostAdapter);
+ else if (InterruptRegister.Bits.CommandComplete)
+ HostAdapter->HostAdapterCommandCompleted = true;
+ }
+ }
+ else
+ {
+ /*
+ Check if there is a pending interrupt for this Host Adapter.
+ */
+ if (FlashPoint_InterruptPending(HostAdapter->CardHandle))
+ switch (FlashPoint_HandleInterrupt(HostAdapter->CardHandle))
+ {
+ case FlashPoint_NormalInterrupt:
+ break;
+ case FlashPoint_ExternalBusReset:
+ HostAdapter->HostAdapterExternalReset = true;
+ break;
+ case FlashPoint_InternalError:
+ BusLogic_Warning("Internal FlashPoint Error detected"
+ " - Resetting Host Adapter\n", HostAdapter);
+ HostAdapter->HostAdapterInternalError = true;
+ break;
+ }
+ }
+ /*
+ Process any completed CCBs.
+ */
+ if (HostAdapter->FirstCompletedCCB != NULL)
+ BusLogic_ProcessCompletedCCBs(HostAdapter);
+ /*
+ Reset the Host Adapter if requested.
+ */
+ if (HostAdapter->HostAdapterExternalReset ||
+ HostAdapter->HostAdapterInternalError)
+ {
+ BusLogic_ResetHostAdapter(HostAdapter, NULL, 0);
+ HostAdapter->HostAdapterExternalReset = false;
+ HostAdapter->HostAdapterInternalError = false;
+ scsi_mark_host_reset(HostAdapter->SCSI_Host);
+ }
+ /*
+ Release exclusive access to Host Adapter.
+ */
+ BusLogic_ReleaseHostAdapterLockIH(HostAdapter, &ProcessorFlags);
+}
+
+
+/*
+ BusLogic_WriteOutgoingMailbox places CCB and Action Code into an Outgoing
+ Mailbox for execution by Host Adapter. The Host Adapter's Lock should
+ already have been acquired by the caller.
+*/
+
+static boolean BusLogic_WriteOutgoingMailbox(BusLogic_HostAdapter_T
+ *HostAdapter,
+ BusLogic_ActionCode_T ActionCode,
+ BusLogic_CCB_T *CCB)
+{
+ BusLogic_OutgoingMailbox_T *NextOutgoingMailbox;
+ NextOutgoingMailbox = HostAdapter->NextOutgoingMailbox;
+ if (NextOutgoingMailbox->ActionCode == BusLogic_OutgoingMailboxFree)
+ {
+ CCB->Status = BusLogic_CCB_Active;
+ /*
+ The CCB field must be written before the Action Code field since
+ the Host Adapter is operating asynchronously and the locking code
+ does not protect against simultaneous access by the Host Adapter.
+ */
+ NextOutgoingMailbox->CCB = Virtual_to_Bus(CCB);
+ NextOutgoingMailbox->ActionCode = ActionCode;
+ BusLogic_StartMailboxCommand(HostAdapter);
+ if (++NextOutgoingMailbox > HostAdapter->LastOutgoingMailbox)
+ NextOutgoingMailbox = HostAdapter->FirstOutgoingMailbox;
+ HostAdapter->NextOutgoingMailbox = NextOutgoingMailbox;
+ if (ActionCode == BusLogic_MailboxStartCommand)
+ {
+ HostAdapter->ActiveCommands[CCB->TargetID]++;
+ if (CCB->Opcode != BusLogic_BusDeviceReset)
+ HostAdapter->TargetStatistics[CCB->TargetID].CommandsAttempted++;
+ }
+ return true;
+ }
+ return false;
+}
+
+
+/*
+ BusLogic_QueueCommand creates a CCB for Command and places it into an
+ Outgoing Mailbox for execution by the associated Host Adapter.
+*/
+
+int BusLogic_QueueCommand(SCSI_Command_T *Command,
+ void (*CompletionRoutine)(SCSI_Command_T *))
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Command->host->hostdata;
+ BusLogic_TargetFlags_T *TargetFlags =
+ &HostAdapter->TargetFlags[Command->target];
+ BusLogic_TargetStatistics_T *TargetStatistics =
+ HostAdapter->TargetStatistics;
+ unsigned char *CDB = Command->cmnd;
+ int CDB_Length = Command->cmd_len;
+ int TargetID = Command->target;
+ int LogicalUnit = Command->lun;
+ void *BufferPointer = Command->request_buffer;
+ int BufferLength = Command->request_bufflen;
+ int SegmentCount = Command->use_sg;
+ ProcessorFlags_T ProcessorFlags;
+ BusLogic_CCB_T *CCB;
+ /*
+ SCSI REQUEST_SENSE commands will be executed automatically by the Host
+ Adapter for any errors, so they should not be executed explicitly unless
+ the Sense Data is zero indicating that no error occurred.
+ */
+ if (CDB[0] == REQUEST_SENSE && Command->sense_buffer[0] != 0)
+ {
+ Command->result = DID_OK << 16;
+ CompletionRoutine(Command);
+ return 0;
+ }
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_AcquireHostAdapterLock(HostAdapter, &ProcessorFlags);
+ /*
+ Allocate a CCB from the Host Adapter's free list. In the unlikely event
+ that there are none available and memory allocation fails, wait 1 second
+ and try again. If that fails, the Host Adapter is probably hung so signal
+ an error as a Host Adapter Hard Reset should be initiated soon.
+ */
+ CCB = BusLogic_AllocateCCB(HostAdapter);
+ if (CCB == NULL)
+ {
+ BusLogic_Delay(1);
+ CCB = BusLogic_AllocateCCB(HostAdapter);
+ if (CCB == NULL)
+ {
+ Command->result = DID_ERROR << 16;
+ CompletionRoutine(Command);
+ goto Done;
+ }
+ }
+ /*
+ Initialize the fields in the BusLogic Command Control Block (CCB).
+ */
+ if (SegmentCount == 0)
+ {
+ CCB->Opcode = BusLogic_InitiatorCCB;
+ CCB->DataLength = BufferLength;
+ CCB->DataPointer = Virtual_to_Bus(BufferPointer);
+ }
+ else
+ {
+ SCSI_ScatterList_T *ScatterList = (SCSI_ScatterList_T *) BufferPointer;
+ int Segment;
+ CCB->Opcode = BusLogic_InitiatorCCB_ScatterGather;
+ CCB->DataLength = SegmentCount * sizeof(BusLogic_ScatterGatherSegment_T);
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ CCB->DataPointer = Virtual_to_Bus(CCB->ScatterGatherList);
+ else CCB->DataPointer = Virtual_to_32Bit_Virtual(CCB->ScatterGatherList);
+ for (Segment = 0; Segment < SegmentCount; Segment++)
+ {
+ CCB->ScatterGatherList[Segment].SegmentByteCount =
+ ScatterList[Segment].length;
+ CCB->ScatterGatherList[Segment].SegmentDataPointer =
+ Virtual_to_Bus(ScatterList[Segment].address);
+ }
+ }
+ switch (CDB[0])
+ {
+ case READ_6:
+ case READ_10:
+ CCB->DataDirection = BusLogic_DataInLengthChecked;
+ TargetStatistics[TargetID].ReadCommands++;
+ BusLogic_IncrementByteCounter(
+ &TargetStatistics[TargetID].TotalBytesRead, BufferLength);
+ BusLogic_IncrementSizeBucket(
+ TargetStatistics[TargetID].ReadCommandSizeBuckets, BufferLength);
+ break;
+ case WRITE_6:
+ case WRITE_10:
+ CCB->DataDirection = BusLogic_DataOutLengthChecked;
+ TargetStatistics[TargetID].WriteCommands++;
+ BusLogic_IncrementByteCounter(
+ &TargetStatistics[TargetID].TotalBytesWritten, BufferLength);
+ BusLogic_IncrementSizeBucket(
+ TargetStatistics[TargetID].WriteCommandSizeBuckets, BufferLength);
+ break;
+ default:
+ CCB->DataDirection = BusLogic_UncheckedDataTransfer;
+ break;
+ }
+ CCB->CDB_Length = CDB_Length;
+ CCB->SenseDataLength = sizeof(Command->sense_buffer);
+ CCB->HostAdapterStatus = 0;
+ CCB->TargetDeviceStatus = 0;
+ CCB->TargetID = TargetID;
+ CCB->LogicalUnit = LogicalUnit;
+ CCB->TagEnable = false;
+ CCB->LegacyTagEnable = false;
+ /*
+ BusLogic recommends that after a Reset the first couple of commands that
+ are sent to a Target Device be sent in a non Tagged Queue fashion so that
+ the Host Adapter and Target Device can establish Synchronous and Wide
+ Transfer before Queue Tag messages can interfere with the Synchronous and
+ Wide Negotiation messages. By waiting to enable Tagged Queuing until after
+ the first BusLogic_MaxTaggedQueueDepth commands have been queued, it is
+ assured that after a Reset any pending commands are requeued before Tagged
+ Queuing is enabled and that the Tagged Queuing message will not occur while
+ the partition table is being printed. In addition, some devices do not
+ properly handle the transition from non-tagged to tagged commands, so it is
+ necessary to wait until there are no pending commands for a target device
+ before queuing tagged commands.
+ */
+ if (HostAdapter->CommandsSinceReset[TargetID]++ >=
+ BusLogic_MaxTaggedQueueDepth &&
+ !TargetFlags->TaggedQueuingActive &&
+ HostAdapter->ActiveCommands[TargetID] == 0 &&
+ TargetFlags->TaggedQueuingSupported &&
+ (HostAdapter->TaggedQueuingPermitted & (1 << TargetID)))
+ {
+ TargetFlags->TaggedQueuingActive = true;
+ BusLogic_Notice("Tagged Queuing now active for Target %d\n",
+ HostAdapter, TargetID);
+ }
+ if (TargetFlags->TaggedQueuingActive)
+ {
+ BusLogic_QueueTag_T QueueTag = BusLogic_SimpleQueueTag;
+ /*
+ When using Tagged Queuing with Simple Queue Tags, it appears that disk
+ drive controllers do not guarantee that a queued command will not
+ remain in a disconnected state indefinitely if commands that read or
+ write nearer the head position continue to arrive without interruption.
+ Therefore, for each Target Device this driver keeps track of the last
+ time either the queue was empty or an Ordered Queue Tag was issued. If
+ more than 4 seconds (one fifth of the 20 second disk timeout) have
+ elapsed since this last sequence point, this command will be issued
+ with an Ordered Queue Tag rather than a Simple Queue Tag, which forces
+ the Target Device to complete all previously queued commands before
+ this command may be executed.
+ */
+ if (HostAdapter->ActiveCommands[TargetID] == 0)
+ HostAdapter->LastSequencePoint[TargetID] = jiffies;
+ else if (jiffies - HostAdapter->LastSequencePoint[TargetID] > 4*HZ)
+ {
+ HostAdapter->LastSequencePoint[TargetID] = jiffies;
+ QueueTag = BusLogic_OrderedQueueTag;
+ }
+ if (HostAdapter->ExtendedLUNSupport)
+ {
+ CCB->TagEnable = true;
+ CCB->QueueTag = QueueTag;
+ }
+ else
+ {
+ CCB->LegacyTagEnable = true;
+ CCB->LegacyQueueTag = QueueTag;
+ }
+ }
+ memcpy(CCB->CDB, CDB, CDB_Length);
+ CCB->SenseDataPointer = Virtual_to_Bus(&Command->sense_buffer);
+ CCB->Command = Command;
+ Command->scsi_done = CompletionRoutine;
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ /*
+ Place the CCB in an Outgoing Mailbox. The higher levels of the SCSI
+ Subsystem should not attempt to queue more commands than can be placed
+ in Outgoing Mailboxes, so there should always be one free. In the
+ unlikely event that there are none available, wait 1 second and try
+ again. If that fails, the Host Adapter is probably hung so signal an
+ error as a Host Adapter Hard Reset should be initiated soon.
+ */
+ if (!BusLogic_WriteOutgoingMailbox(
+ HostAdapter, BusLogic_MailboxStartCommand, CCB))
+ {
+ BusLogic_Warning("Unable to write Outgoing Mailbox - "
+ "Pausing for 1 second\n", HostAdapter);
+ BusLogic_Delay(1);
+ if (!BusLogic_WriteOutgoingMailbox(
+ HostAdapter, BusLogic_MailboxStartCommand, CCB))
+ {
+ BusLogic_Warning("Still unable to write Outgoing Mailbox - "
+ "Host Adapter Dead?\n", HostAdapter);
+ BusLogic_DeallocateCCB(CCB);
+ Command->result = DID_ERROR << 16;
+ Command->scsi_done(Command);
+ }
+ }
+ }
+ else
+ {
+ /*
+ Call the FlashPoint SCCB Manager to start execution of the CCB.
+ */
+ CCB->Status = BusLogic_CCB_Active;
+ HostAdapter->ActiveCommands[TargetID]++;
+ TargetStatistics[TargetID].CommandsAttempted++;
+ FlashPoint_StartCCB(HostAdapter->CardHandle, CCB);
+ /*
+ The Command may have already completed and BusLogic_QueueCompletedCCB
+ been called, or it may still be pending.
+ */
+ if (CCB->Status == BusLogic_CCB_Completed)
+ BusLogic_ProcessCompletedCCBs(HostAdapter);
+ }
+ /*
+ Release exclusive access to Host Adapter.
+ */
+Done:
+ BusLogic_ReleaseHostAdapterLock(HostAdapter, &ProcessorFlags);
+ return 0;
+}
+
+
+/*
+ BusLogic_AbortCommand aborts Command if possible.
+*/
+
+int BusLogic_AbortCommand(SCSI_Command_T *Command)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Command->host->hostdata;
+ int TargetID = Command->target;
+ ProcessorFlags_T ProcessorFlags;
+ BusLogic_CCB_T *CCB;
+ int Result;
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].CommandAbortsRequested);
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_AcquireHostAdapterLock(HostAdapter, &ProcessorFlags);
+ /*
+ If this Command has already completed, then no Abort is necessary.
+ */
+ if (Command->serial_number != Command->serial_number_at_timeout)
+ {
+ BusLogic_Warning("Unable to Abort Command to Target %d - "
+ "Already Completed\n", HostAdapter, TargetID);
+ Result = SCSI_ABORT_NOT_RUNNING;
+ goto Done;
+ }
+ /*
+ Attempt to find an Active CCB for this Command. If no Active CCB for this
+ Command is found, then no Abort is necessary.
+ */
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Command == Command) break;
+ if (CCB == NULL)
+ {
+ BusLogic_Warning("Unable to Abort Command to Target %d - "
+ "No CCB Found\n", HostAdapter, TargetID);
+ Result = SCSI_ABORT_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Completed)
+ {
+ BusLogic_Warning("Unable to Abort Command to Target %d - "
+ "CCB Completed\n", HostAdapter, TargetID);
+ Result = SCSI_ABORT_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Reset)
+ {
+ BusLogic_Warning("Unable to Abort Command to Target %d - "
+ "CCB Reset\n", HostAdapter, TargetID);
+ Result = SCSI_ABORT_PENDING;
+ goto Done;
+ }
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ /*
+ Attempt to Abort this CCB. MultiMaster Firmware versions prior to 5.xx
+ do not generate Abort Tag messages, but only generate the non-tagged
+ Abort message. Since non-tagged commands are not sent by the Host
+ Adapter until the queue of outstanding tagged commands has completed,
+ and the Abort message is treated as a non-tagged command, it is
+ effectively impossible to abort commands when Tagged Queuing is active.
+ Firmware version 5.xx does generate Abort Tag messages, so it is
+ possible to abort commands when Tagged Queuing is active.
+ */
+ if (HostAdapter->TargetFlags[TargetID].TaggedQueuingActive &&
+ HostAdapter->FirmwareVersion[0] < '5')
+ {
+ BusLogic_Warning("Unable to Abort CCB #%ld to Target %d - "
+ "Abort Tag Not Supported\n",
+ HostAdapter, CCB->SerialNumber, TargetID);
+ Result = SCSI_ABORT_SNOOZE;
+ }
+ else if (BusLogic_WriteOutgoingMailbox(
+ HostAdapter, BusLogic_MailboxAbortCommand, CCB))
+ {
+ BusLogic_Warning("Aborting CCB #%ld to Target %d\n",
+ HostAdapter, CCB->SerialNumber, TargetID);
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].CommandAbortsAttempted);
+ Result = SCSI_ABORT_PENDING;
+ }
+ else
+ {
+ BusLogic_Warning("Unable to Abort CCB #%ld to Target %d - "
+ "No Outgoing Mailboxes\n",
+ HostAdapter, CCB->SerialNumber, TargetID);
+ Result = SCSI_ABORT_BUSY;
+ }
+ }
+ else
+ {
+ /*
+ Call the FlashPoint SCCB Manager to abort execution of the CCB.
+ */
+ BusLogic_Warning("Aborting CCB #%ld to Target %d\n",
+ HostAdapter, CCB->SerialNumber, TargetID);
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].CommandAbortsAttempted);
+ FlashPoint_AbortCCB(HostAdapter->CardHandle, CCB);
+ /*
+ The Abort may have already been completed and
+ BusLogic_QueueCompletedCCB been called, or it
+ may still be pending.
+ */
+ Result = SCSI_ABORT_PENDING;
+ if (CCB->Status == BusLogic_CCB_Completed)
+ {
+ BusLogic_ProcessCompletedCCBs(HostAdapter);
+ Result = SCSI_ABORT_SUCCESS;
+ }
+ }
+ /*
+ Release exclusive access to Host Adapter.
+ */
+Done:
+ BusLogic_ReleaseHostAdapterLock(HostAdapter, &ProcessorFlags);
+ return Result;
+}
+
+
+/*
+ BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all
+ currently executing SCSI Commands as having been Reset.
+*/
+
+static int BusLogic_ResetHostAdapter(BusLogic_HostAdapter_T *HostAdapter,
+ SCSI_Command_T *Command,
+ unsigned int ResetFlags)
+{
+ ProcessorFlags_T ProcessorFlags;
+ BusLogic_CCB_T *CCB;
+ int TargetID, Result;
+ boolean HardReset;
+ if (HostAdapter->HostAdapterExternalReset)
+ {
+ BusLogic_IncrementErrorCounter(&HostAdapter->ExternalHostAdapterResets);
+ HardReset = false;
+ }
+ else if (HostAdapter->HostAdapterInternalError)
+ {
+ BusLogic_IncrementErrorCounter(&HostAdapter->HostAdapterInternalErrors);
+ HardReset = true;
+ }
+ else
+ {
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[Command->target]
+ .HostAdapterResetsRequested);
+ HardReset = true;
+ }
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_AcquireHostAdapterLock(HostAdapter, &ProcessorFlags);
+ /*
+ If this is an Asynchronous Reset and this Command has already completed,
+ then no Reset is necessary.
+ */
+ if (ResetFlags & SCSI_RESET_ASYNCHRONOUS)
+ {
+ TargetID = Command->target;
+ if (Command->serial_number != Command->serial_number_at_timeout)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "Already Completed or Reset\n",
+ HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Command == Command) break;
+ if (CCB == NULL)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "No CCB Found\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Completed)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "CCB Completed\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Reset &&
+ HostAdapter->BusDeviceResetPendingCCB[TargetID] == NULL)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "Reset Pending\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_PENDING;
+ goto Done;
+ }
+ }
+ if (Command == NULL)
+ {
+ if (HostAdapter->HostAdapterInternalError)
+ BusLogic_Warning("Resetting %s due to Host Adapter Internal Error\n",
+ HostAdapter, HostAdapter->FullModelName);
+ else BusLogic_Warning("Resetting %s due to External SCSI Bus Reset\n",
+ HostAdapter, HostAdapter->FullModelName);
+ }
+ else
+ {
+ BusLogic_Warning("Resetting %s due to Target %d\n", HostAdapter,
+ HostAdapter->FullModelName, Command->target);
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[Command->target]
+ .HostAdapterResetsAttempted);
+ }
+ /*
+ Attempt to Reset and Reinitialize the Host Adapter.
+ */
+ if (!(BusLogic_HardwareResetHostAdapter(HostAdapter, HardReset) &&
+ BusLogic_InitializeHostAdapter(HostAdapter)))
+ {
+ BusLogic_Error("Resetting %s Failed\n", HostAdapter,
+ HostAdapter->FullModelName);
+ Result = SCSI_RESET_ERROR;
+ goto Done;
+ }
+ if (Command != NULL)
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[Command->target]
+ .HostAdapterResetsCompleted);
+ /*
+ Mark all currently executing CCBs as having been Reset.
+ */
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Status == BusLogic_CCB_Active)
+ CCB->Status = BusLogic_CCB_Reset;
+ /*
+ Wait a few seconds between the Host Adapter Hard Reset which initiates
+ a SCSI Bus Reset and issuing any SCSI Commands. Some SCSI devices get
+ confused if they receive SCSI Commands too soon after a SCSI Bus Reset.
+ Note that a timer interrupt may occur here, but all active CCBs have
+ already been marked Reset and so a reentrant call will return Pending.
+ */
+ if (HardReset)
+ BusLogic_Delay(HostAdapter->BusSettleTime);
+ /*
+ If this is a Synchronous Reset, perform completion processing for
+ the Command being Reset.
+ */
+ if (ResetFlags & SCSI_RESET_SYNCHRONOUS)
+ {
+ Command->result = DID_RESET << 16;
+ Command->scsi_done(Command);
+ }
+ /*
+ Perform completion processing for all CCBs marked as Reset.
+ */
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Status == BusLogic_CCB_Reset)
+ {
+ Command = CCB->Command;
+ BusLogic_DeallocateCCB(CCB);
+ while (Command != NULL)
+ {
+ SCSI_Command_T *NextCommand = Command->reset_chain;
+ Command->reset_chain = NULL;
+ Command->result = DID_RESET << 16;
+ Command->scsi_done(Command);
+ Command = NextCommand;
+ }
+ }
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ HostAdapter->LastResetAttempted[TargetID] = jiffies;
+ HostAdapter->LastResetCompleted[TargetID] = jiffies;
+ }
+ Result = SCSI_RESET_SUCCESS | SCSI_RESET_HOST_RESET;
+ /*
+ Release exclusive access to Host Adapter.
+ */
+Done:
+ BusLogic_ReleaseHostAdapterLock(HostAdapter, &ProcessorFlags);
+ return Result;
+}
+
+
+/*
+ BusLogic_SendBusDeviceReset sends a Bus Device Reset to the Target
+ Device associated with Command.
+*/
+
+static int BusLogic_SendBusDeviceReset(BusLogic_HostAdapter_T *HostAdapter,
+ SCSI_Command_T *Command,
+ unsigned int ResetFlags)
+{
+ int TargetID = Command->target;
+ BusLogic_CCB_T *CCB, *XCCB;
+ ProcessorFlags_T ProcessorFlags;
+ int Result = -1;
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].BusDeviceResetsRequested);
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_AcquireHostAdapterLock(HostAdapter, &ProcessorFlags);
+ /*
+ If this is an Asynchronous Reset and this Command has already completed,
+ then no Reset is necessary.
+ */
+ if (ResetFlags & SCSI_RESET_ASYNCHRONOUS)
+ {
+ if (Command->serial_number != Command->serial_number_at_timeout)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "Already Completed\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Command == Command) break;
+ if (CCB == NULL)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "No CCB Found\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Completed)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "CCB Completed\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Reset)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "Reset Pending\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_PENDING;
+ goto Done;
+ }
+ else if (HostAdapter->BusDeviceResetPendingCCB[TargetID] != NULL)
+ {
+ BusLogic_Warning("Bus Device Reset already pending to Target %d\n",
+ HostAdapter, TargetID);
+ goto Done;
+ }
+ }
+ /*
+ If this is a Synchronous Reset and a Bus Device Reset is already pending
+ for this Target Device, do not send a second one. Add this Command to
+ the list of Commands for which completion processing must be performed
+ when the Bus Device Reset CCB completes.
+ */
+ if (ResetFlags & SCSI_RESET_SYNCHRONOUS)
+ if ((CCB = HostAdapter->BusDeviceResetPendingCCB[TargetID]) != NULL)
+ {
+ Command->reset_chain = CCB->Command;
+ CCB->Command = Command;
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "Reset Pending\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_PENDING;
+ goto Done;
+ }
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ /*
+ MultiMaster Firmware versions prior to 5.xx treat a Bus Device Reset as
+ a non-tagged command. Since non-tagged commands are not sent by the
+ Host Adapter until the queue of outstanding tagged commands has
+ completed, it is effectively impossible to send a Bus Device Reset
+ while there are tagged commands outstanding. Therefore, in that case a
+ full Host Adapter Hard Reset and SCSI Bus Reset must be done.
+ */
+ if (HostAdapter->TargetFlags[TargetID].TaggedQueuingActive &&
+ HostAdapter->ActiveCommands[TargetID] > 0 &&
+ HostAdapter->FirmwareVersion[0] < '5')
+ goto Done;
+ }
+ /*
+ Allocate a CCB from the Host Adapter's free list. In the unlikely event
+ that there are none available and memory allocation fails, attempt a full
+ Host Adapter Hard Reset and SCSI Bus Reset.
+ */
+ CCB = BusLogic_AllocateCCB(HostAdapter);
+ if (CCB == NULL) goto Done;
+ BusLogic_Warning("Sending Bus Device Reset CCB #%ld to Target %d\n",
+ HostAdapter, CCB->SerialNumber, TargetID);
+ CCB->Opcode = BusLogic_BusDeviceReset;
+ CCB->TargetID = TargetID;
+ /*
+ For Synchronous Resets, arrange for the interrupt handler to perform
+ completion processing for the Command being Reset.
+ */
+ if (ResetFlags & SCSI_RESET_SYNCHRONOUS)
+ {
+ Command->reset_chain = NULL;
+ CCB->Command = Command;
+ }
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ /*
+ Attempt to write an Outgoing Mailbox with the Bus Device Reset CCB.
+ If sending a Bus Device Reset is impossible, attempt a full Host
+ Adapter Hard Reset and SCSI Bus Reset.
+ */
+ if (!(BusLogic_WriteOutgoingMailbox(
+ HostAdapter, BusLogic_MailboxStartCommand, CCB)))
+ {
+ BusLogic_Warning("Unable to write Outgoing Mailbox for "
+ "Bus Device Reset\n", HostAdapter);
+ BusLogic_DeallocateCCB(CCB);
+ goto Done;
+ }
+ }
+ else
+ {
+ /*
+ Call the FlashPoint SCCB Manager to start execution of the CCB.
+ */
+ CCB->Status = BusLogic_CCB_Active;
+ HostAdapter->ActiveCommands[TargetID]++;
+ FlashPoint_StartCCB(HostAdapter->CardHandle, CCB);
+ }
+ /*
+ If there is a currently executing CCB in the Host Adapter for this Command
+ (i.e. this is an Asynchronous Reset), then an Incoming Mailbox entry may be
+ made with a completion code of BusLogic_HostAdapterAssertedBusDeviceReset.
+ If there is no active CCB for this Command (i.e. this is a Synchronous
+ Reset), then the Bus Device Reset CCB's Command field will have been set
+ to the Command so that the interrupt for the completion of the Bus Device
+ Reset can call the Completion Routine for the Command. On successful
+ execution of a Bus Device Reset, older firmware versions did return the
+ pending CCBs with the appropriate completion code, but more recent firmware
+ versions only return the Bus Device Reset CCB itself. This driver handles
+ both cases by marking all the currently executing CCBs to this Target
+ Device as Reset. When the Bus Device Reset CCB is processed by the
+ interrupt handler, any remaining CCBs marked as Reset will have completion
+ processing performed.
+ */
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].BusDeviceResetsAttempted);
+ HostAdapter->BusDeviceResetPendingCCB[TargetID] = CCB;
+ HostAdapter->LastResetAttempted[TargetID] = jiffies;
+ for (XCCB = HostAdapter->All_CCBs; XCCB != NULL; XCCB = XCCB->NextAll)
+ if (XCCB->Status == BusLogic_CCB_Active && XCCB->TargetID == TargetID)
+ XCCB->Status = BusLogic_CCB_Reset;
+ /*
+ FlashPoint Host Adapters may have already completed the Bus Device
+ Reset and BusLogic_QueueCompletedCCB been called, or it may still be
+ pending.
+ */
+ Result = SCSI_RESET_PENDING;
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ if (CCB->Status == BusLogic_CCB_Completed)
+ {
+ BusLogic_ProcessCompletedCCBs(HostAdapter);
+ Result = SCSI_RESET_SUCCESS;
+ }
+ /*
+ If a Bus Device Reset was not possible for some reason, force a full
+ Host Adapter Hard Reset and SCSI Bus Reset.
+ */
+Done:
+ if (Result < 0)
+ Result = BusLogic_ResetHostAdapter(HostAdapter, Command, ResetFlags);
+ /*
+ Release exclusive access to Host Adapter.
+ */
+ BusLogic_ReleaseHostAdapterLock(HostAdapter, &ProcessorFlags);
+ return Result;
+}
+
+
+/*
+ BusLogic_ResetCommand takes appropriate action to reset Command.
+*/
+
+int BusLogic_ResetCommand(SCSI_Command_T *Command, unsigned int ResetFlags)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Command->host->hostdata;
+ int TargetID = Command->target;
+ BusLogic_ErrorRecoveryStrategy_T
+ ErrorRecoveryStrategy = HostAdapter->ErrorRecoveryStrategy[TargetID];
+ /*
+ Disable Tagged Queuing if it is active for this Target Device and if
+ it has been less than 10 minutes since the last reset occurred, or since
+ the system was initialized if no prior resets have occurred.
+ */
+ if (HostAdapter->TargetFlags[TargetID].TaggedQueuingActive &&
+ jiffies - HostAdapter->LastResetCompleted[TargetID] < 10*60*HZ)
+ {
+ HostAdapter->TaggedQueuingPermitted &= ~(1 << TargetID);
+ HostAdapter->TargetFlags[TargetID].TaggedQueuingActive = false;
+ BusLogic_Warning("Tagged Queuing now disabled for Target %d\n",
+ HostAdapter, TargetID);
+ }
+ switch (ErrorRecoveryStrategy)
+ {
+ case BusLogic_ErrorRecovery_Default:
+ if (ResetFlags & SCSI_RESET_SUGGEST_HOST_RESET)
+ return BusLogic_ResetHostAdapter(HostAdapter, Command, ResetFlags);
+ else if (ResetFlags & SCSI_RESET_SUGGEST_BUS_RESET)
+ return BusLogic_ResetHostAdapter(HostAdapter, Command, ResetFlags);
+ /* Fall through to Bus Device Reset case. */
+ case BusLogic_ErrorRecovery_BusDeviceReset:
+ /*
+ The Bus Device Reset Error Recovery Strategy only graduates to a Hard
+ Reset when no commands have completed successfully since the last Bus
+ Device Reset and it has been at least 100 milliseconds. This prevents
+ a sequence of commands that all timeout together from immediately
+ forcing a Hard Reset before the Bus Device Reset has had a chance to
+ clear the error condition.
+ */
+ if (HostAdapter->TargetFlags[TargetID].CommandSuccessfulFlag ||
+ jiffies - HostAdapter->LastResetAttempted[TargetID] < HZ/10)
+ {
+ HostAdapter->TargetFlags[TargetID].CommandSuccessfulFlag = false;
+ return BusLogic_SendBusDeviceReset(HostAdapter, Command, ResetFlags);
+ }
+ /* Fall through to Hard Reset case. */
+ case BusLogic_ErrorRecovery_HardReset:
+ return BusLogic_ResetHostAdapter(HostAdapter, Command, ResetFlags);
+ case BusLogic_ErrorRecovery_None:
+ BusLogic_Warning("Error Recovery for Target %d Suppressed\n",
+ HostAdapter, TargetID);
+ break;
+ }
+ return SCSI_RESET_PUNT;
+}
+
+
+/*
+ BusLogic_BIOSDiskParameters returns the Heads/Sectors/Cylinders BIOS Disk
+ Parameters for Disk. The default disk geometry is 64 heads, 32 sectors, and
+ the appropriate number of cylinders so as not to exceed drive capacity. In
+ order for disks equal to or larger than 1 GB to be addressable by the BIOS
+ without exceeding the BIOS limitation of 1024 cylinders, Extended Translation
+ may be enabled in AutoSCSI on FlashPoint Host Adapters and on "W" and "C"
+ series MultiMaster Host Adapters, or by a dip switch setting on "S" and "A"
+ series MultiMaster Host Adapters. With Extended Translation enabled, drives
+ between 1 GB inclusive and 2 GB exclusive are given a disk geometry of 128
+ heads and 32 sectors, and drives above 2 GB inclusive are given a disk
+ geometry of 255 heads and 63 sectors. However, if the BIOS detects that the
+ Extended Translation setting does not match the geometry in the partition
+ table, then the translation inferred from the partition table will be used by
+ the BIOS, and a warning may be displayed.
+*/
+
+int BusLogic_BIOSDiskParameters(SCSI_Disk_T *Disk, KernelDevice_T Device,
+ int *Parameters)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Disk->device->host->hostdata;
+ BIOS_DiskParameters_T *DiskParameters = (BIOS_DiskParameters_T *) Parameters;
+ struct buffer_head *BufferHead;
+ if (HostAdapter->ExtendedTranslationEnabled &&
+ Disk->capacity >= 2*1024*1024 /* 1 GB in 512 byte sectors */)
+ {
+ if (Disk->capacity >= 4*1024*1024 /* 2 GB in 512 byte sectors */)
+ {
+ DiskParameters->Heads = 255;
+ DiskParameters->Sectors = 63;
+ }
+ else
+ {
+ DiskParameters->Heads = 128;
+ DiskParameters->Sectors = 32;
+ }
+ }
+ else
+ {
+ DiskParameters->Heads = 64;
+ DiskParameters->Sectors = 32;
+ }
+ DiskParameters->Cylinders =
+ Disk->capacity / (DiskParameters->Heads * DiskParameters->Sectors);
+ /*
+ Attempt to read the first 1024 bytes from the disk device.
+ */
+ BufferHead = bread(MKDEV(MAJOR(Device), MINOR(Device) & ~0x0F), 0, 1024);
+ if (BufferHead == NULL) return 0;
+ /*
+ If the boot sector partition table flag is valid, search for a partition
+ table entry whose end_head matches one of the standard BusLogic geometry
+ translations (64/32, 128/32, or 255/63).
+ */
+ if (*(unsigned short *) (BufferHead->b_data + 0x1FE) == 0xAA55)
+ {
+ PartitionTable_T *FirstPartitionEntry =
+ (PartitionTable_T *) (BufferHead->b_data + 0x1BE);
+ PartitionTable_T *PartitionEntry = FirstPartitionEntry;
+ int SavedCylinders = DiskParameters->Cylinders, PartitionNumber;
+ unsigned char PartitionEntryEndHead, PartitionEntryEndSector;
+ for (PartitionNumber = 0; PartitionNumber < 4; PartitionNumber++)
+ {
+ PartitionEntryEndHead = PartitionEntry->end_head;
+ PartitionEntryEndSector = PartitionEntry->end_sector & 0x3F;
+ if (PartitionEntryEndHead == 64-1)
+ {
+ DiskParameters->Heads = 64;
+ DiskParameters->Sectors = 32;
+ break;
+ }
+ else if (PartitionEntryEndHead == 128-1)
+ {
+ DiskParameters->Heads = 128;
+ DiskParameters->Sectors = 32;
+ break;
+ }
+ else if (PartitionEntryEndHead == 255-1)
+ {
+ DiskParameters->Heads = 255;
+ DiskParameters->Sectors = 63;
+ break;
+ }
+ PartitionEntry++;
+ }
+ if (PartitionNumber == 4)
+ {
+ PartitionEntryEndHead = FirstPartitionEntry->end_head;
+ PartitionEntryEndSector = FirstPartitionEntry->end_sector & 0x3F;
+ }
+ DiskParameters->Cylinders =
+ Disk->capacity / (DiskParameters->Heads * DiskParameters->Sectors);
+ if (PartitionNumber < 4 &&
+ PartitionEntryEndSector == DiskParameters->Sectors)
+ {
+ if (DiskParameters->Cylinders != SavedCylinders)
+ BusLogic_Warning("Adopting Geometry %d/%d from Partition Table\n",
+ HostAdapter,
+ DiskParameters->Heads, DiskParameters->Sectors);
+ }
+ else if (PartitionEntryEndHead > 0 || PartitionEntryEndSector > 0)
+ {
+ BusLogic_Warning("Warning: Partition Table appears to "
+ "have Geometry %d/%d which is\n", HostAdapter,
+ PartitionEntryEndHead + 1,
+ PartitionEntryEndSector);
+ BusLogic_Warning("not compatible with current BusLogic "
+ "Host Adapter Geometry %d/%d\n", HostAdapter,
+ DiskParameters->Heads, DiskParameters->Sectors);
+ }
+ }
+ brelse(BufferHead);
+ return 0;
+}
+
+
+/*
+ BugLogic_ProcDirectoryInfo implements /proc/scsi/BusLogic/<N>.
+*/
+
+int BusLogic_ProcDirectoryInfo(char *ProcBuffer, char **StartPointer,
+ off_t Offset, int BytesAvailable,
+ int HostNumber, int WriteFlag)
+{
+ BusLogic_HostAdapter_T *HostAdapter;
+ BusLogic_TargetStatistics_T *TargetStatistics;
+ int TargetID, Length;
+ char *Buffer;
+ for (HostAdapter = BusLogic_FirstRegisteredHostAdapter;
+ HostAdapter != NULL;
+ HostAdapter = HostAdapter->Next)
+ if (HostAdapter->HostNumber == HostNumber) break;
+ if (HostAdapter == NULL)
+ {
+ BusLogic_Error("Cannot find Host Adapter for SCSI Host %d\n",
+ NULL, HostNumber);
+ return 0;
+ }
+ TargetStatistics = HostAdapter->TargetStatistics;
+ if (WriteFlag)
+ {
+ HostAdapter->ExternalHostAdapterResets = 0;
+ HostAdapter->HostAdapterInternalErrors = 0;
+ memset(TargetStatistics, 0,
+ BusLogic_MaxTargetDevices * sizeof(BusLogic_TargetStatistics_T));
+ return 0;
+ }
+ Buffer = HostAdapter->MessageBuffer;
+ Length = HostAdapter->MessageBufferLength;
+ Length += sprintf(&Buffer[Length], "\n\
+Current Driver Queue Depth: %d\n\
+Currently Allocated CCBs: %d\n",
+ HostAdapter->DriverQueueDepth,
+ HostAdapter->AllocatedCCBs);
+ Length += sprintf(&Buffer[Length], "\n\n\
+ DATA TRANSFER STATISTICS\n\
+\n\
+Target Tagged Queuing Queue Depth Active Attempted Completed\n\
+====== ============== =========== ====== ========= =========\n");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (!TargetFlags->TargetExists) continue;
+ Length +=
+ sprintf(&Buffer[Length], " %2d %s", TargetID,
+ (TargetFlags->TaggedQueuingSupported
+ ? (TargetFlags->TaggedQueuingActive
+ ? " Active"
+ : (HostAdapter->TaggedQueuingPermitted & (1 << TargetID)
+ ? " Permitted" : " Disabled"))
+ : "Not Supported"));
+ Length += sprintf(&Buffer[Length],
+ " %3d %3u %9u %9u\n",
+ HostAdapter->QueueDepth[TargetID],
+ HostAdapter->ActiveCommands[TargetID],
+ TargetStatistics[TargetID].CommandsAttempted,
+ TargetStatistics[TargetID].CommandsCompleted);
+ }
+ Length += sprintf(&Buffer[Length], "\n\
+Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\
+====== ============= ============== =================== ===================\n");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (!TargetFlags->TargetExists) continue;
+ Length +=
+ sprintf(&Buffer[Length], " %2d %9u %9u", TargetID,
+ TargetStatistics[TargetID].ReadCommands,
+ TargetStatistics[TargetID].WriteCommands);
+ if (TargetStatistics[TargetID].TotalBytesRead.Billions > 0)
+ Length +=
+ sprintf(&Buffer[Length], " %9u%09u",
+ TargetStatistics[TargetID].TotalBytesRead.Billions,
+ TargetStatistics[TargetID].TotalBytesRead.Units);
+ else
+ Length +=
+ sprintf(&Buffer[Length], " %9u",
+ TargetStatistics[TargetID].TotalBytesRead.Units);
+ if (TargetStatistics[TargetID].TotalBytesWritten.Billions > 0)
+ Length +=
+ sprintf(&Buffer[Length], " %9u%09u\n",
+ TargetStatistics[TargetID].TotalBytesWritten.Billions,
+ TargetStatistics[TargetID].TotalBytesWritten.Units);
+ else
+ Length +=
+ sprintf(&Buffer[Length], " %9u\n",
+ TargetStatistics[TargetID].TotalBytesWritten.Units);
+ }
+ Length += sprintf(&Buffer[Length], "\n\
+Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\
+====== ======= ========= ========= ========= ========= =========\n");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (!TargetFlags->TargetExists) continue;
+ Length +=
+ sprintf(&Buffer[Length],
+ " %2d Read %9u %9u %9u %9u %9u\n", TargetID,
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[0],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[1],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[2],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[3],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[4]);
+ Length +=
+ sprintf(&Buffer[Length],
+ " %2d Write %9u %9u %9u %9u %9u\n", TargetID,
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[0],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[1],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[2],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[3],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[4]);
+ }
+ Length += sprintf(&Buffer[Length], "\n\
+Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\
+====== ======= ========= ========= ========= ========= =========\n");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (!TargetFlags->TargetExists) continue;
+ Length +=
+ sprintf(&Buffer[Length],
+ " %2d Read %9u %9u %9u %9u %9u\n", TargetID,
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[5],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[6],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[7],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[8],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[9]);
+ Length +=
+ sprintf(&Buffer[Length],
+ " %2d Write %9u %9u %9u %9u %9u\n", TargetID,
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[5],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[6],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[7],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[8],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[9]);
+ }
+ Length += sprintf(&Buffer[Length], "\n\n\
+ ERROR RECOVERY STATISTICS\n\
+\n\
+ Command Aborts Bus Device Resets Host Adapter Resets\n\
+Target Requested Completed Requested Completed Requested Completed\n\
+ ID \\\\\\\\ Attempted //// \\\\\\\\ Attempted //// \\\\\\\\ Attempted ////\n\
+====== ===== ===== ===== ===== ===== ===== ===== ===== =====\n");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (!TargetFlags->TargetExists) continue;
+ Length +=
+ sprintf(&Buffer[Length], "\
+ %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", TargetID,
+ TargetStatistics[TargetID].CommandAbortsRequested,
+ TargetStatistics[TargetID].CommandAbortsAttempted,
+ TargetStatistics[TargetID].CommandAbortsCompleted,
+ TargetStatistics[TargetID].BusDeviceResetsRequested,
+ TargetStatistics[TargetID].BusDeviceResetsAttempted,
+ TargetStatistics[TargetID].BusDeviceResetsCompleted,
+ TargetStatistics[TargetID].HostAdapterResetsRequested,
+ TargetStatistics[TargetID].HostAdapterResetsAttempted,
+ TargetStatistics[TargetID].HostAdapterResetsCompleted);
+ }
+ Length += sprintf(&Buffer[Length], "\nExternal Host Adapter Resets: %d\n",
+ HostAdapter->ExternalHostAdapterResets);
+ Length += sprintf(&Buffer[Length], "Host Adapter Internal Errors: %d\n",
+ HostAdapter->HostAdapterInternalErrors);
+ if (Length >= BusLogic_MessageBufferSize)
+ BusLogic_Error("Message Buffer length %d exceeds size %d\n",
+ HostAdapter, Length, BusLogic_MessageBufferSize);
+ if ((Length -= Offset) <= 0) return 0;
+ if (Length >= BytesAvailable) Length = BytesAvailable;
+ *StartPointer = &HostAdapter->MessageBuffer[Offset];
+ return Length;
+}
+
+
+/*
+ BusLogic_Message prints Driver Messages.
+*/
+
+static void BusLogic_Message(BusLogic_MessageLevel_T MessageLevel,
+ char *Format,
+ BusLogic_HostAdapter_T *HostAdapter,
+ ...)
+{
+ static char Buffer[BusLogic_LineBufferSize];
+ static boolean BeginningOfLine = true;
+ va_list Arguments;
+ int Length = 0;
+ va_start(Arguments, HostAdapter);
+ Length = vsprintf(Buffer, Format, Arguments);
+ va_end(Arguments);
+ if (MessageLevel == BusLogic_AnnounceLevel)
+ {
+ static int AnnouncementLines = 0;
+ strcpy(&HostAdapter->MessageBuffer[HostAdapter->MessageBufferLength],
+ Buffer);
+ HostAdapter->MessageBufferLength += Length;
+ if (++AnnouncementLines <= 2)
+ printk("%sscsi: %s", BusLogic_MessageLevelMap[MessageLevel], Buffer);
+ }
+ else if (MessageLevel == BusLogic_InfoLevel)
+ {
+ strcpy(&HostAdapter->MessageBuffer[HostAdapter->MessageBufferLength],
+ Buffer);
+ HostAdapter->MessageBufferLength += Length;
+ if (BeginningOfLine)
+ {
+ if (Buffer[0] != '\n' || Length > 1)
+ printk("%sscsi%d: %s", BusLogic_MessageLevelMap[MessageLevel],
+ HostAdapter->HostNumber, Buffer);
+ }
+ else printk("%s", Buffer);
+ }
+ else
+ {
+ if (BeginningOfLine)
+ {
+ if (HostAdapter != NULL && HostAdapter->HostAdapterInitialized)
+ printk("%sscsi%d: %s", BusLogic_MessageLevelMap[MessageLevel],
+ HostAdapter->HostNumber, Buffer);
+ else printk("%s%s", BusLogic_MessageLevelMap[MessageLevel], Buffer);
+ }
+ else printk("%s", Buffer);
+ }
+ BeginningOfLine = (Buffer[Length-1] == '\n');
+}
+
+
+/*
+ BusLogic_ParseKeyword parses an individual option keyword. It returns true
+ and updates the pointer if the keyword is recognized and false otherwise.
+*/
+
+static boolean BusLogic_ParseKeyword(char **StringPointer, char *Keyword)
+{
+ char *Pointer = *StringPointer;
+ while (*Keyword != '\0')
+ {
+ char StringChar = *Pointer++;
+ char KeywordChar = *Keyword++;
+ if (StringChar >= 'A' && StringChar <= 'Z')
+ StringChar += 'a' - 'Z';
+ if (KeywordChar >= 'A' && KeywordChar <= 'Z')
+ KeywordChar += 'a' - 'Z';
+ if (StringChar != KeywordChar) return false;
+ }
+ *StringPointer = Pointer;
+ return true;
+}
+
+
+/*
+ BusLogic_ParseDriverOptions handles processing of BusLogic Driver Options
+ specifications.
+
+ BusLogic Driver Options may be specified either via the Linux Kernel Command
+ Line or via the Loadable Kernel Module Installation Facility. Driver Options
+ for multiple host adapters may be specified either by separating the option
+ strings by a semicolon, or by specifying multiple "BusLogic=" strings on the
+ command line. Individual option specifications for a single host adapter are
+ separated by commas. The Probing and Debugging Options apply to all host
+ adapters whereas the remaining options apply individually only to the
+ selected host adapter.
+
+ The BusLogic Driver Probing Options comprise the following:
+
+ IO:<integer>
+
+ The "IO:" option specifies an ISA I/O Address to be probed for a non-PCI
+ MultiMaster Host Adapter. If neither "IO:" nor "NoProbeISA" options are
+ specified, then the standard list of BusLogic MultiMaster ISA I/O Addresses
+ will be probed (0x330, 0x334, 0x230, 0x234, 0x130, and 0x134). Multiple
+ "IO:" options may be specified to precisely determine the I/O Addresses to
+ be probed, but the probe order will always follow the standard list.
+
+ NoProbe
+
+ The "NoProbe" option disables all probing and therefore no BusLogic Host
+ Adapters will be detected.
+
+ NoProbeISA
+
+ The "NoProbeISA" option disables probing of the standard BusLogic ISA I/O
+ Addresses and therefore only PCI MultiMaster and FlashPoint Host Adapters
+ will be detected.
+
+ NoProbePCI
+
+ The "NoProbePCI" options disables the interrogation of PCI Configuration
+ Space and therefore only ISA Multimaster Host Adapters will be detected, as
+ well as PCI Multimaster Host Adapters that have their ISA Compatible I/O
+ Port set to "Primary" or "Alternate".
+
+ NoSortPCI
+
+ The "NoSortPCI" option forces PCI MultiMaster Host Adapters to be
+ enumerated in the order provided by the PCI BIOS, ignoring any setting of
+ the AutoSCSI "Use Bus And Device # For PCI Scanning Seq." option.
+
+ MultiMasterFirst
+
+ The "MultiMasterFirst" option forces MultiMaster Host Adapters to be probed
+ before FlashPoint Host Adapters. By default, if both FlashPoint and PCI
+ MultiMaster Host Adapters are present, this driver will probe for
+ FlashPoint Host Adapters first unless the BIOS primary disk is controlled
+ by the first PCI MultiMaster Host Adapter, in which case MultiMaster Host
+ Adapters will be probed first.
+
+ FlashPointFirst
+
+ The "FlashPointFirst" option forces FlashPoint Host Adapters to be probed
+ before MultiMaster Host Adapters.
+
+ The BusLogic Driver Tagged Queuing Options allow for explicitly specifying
+ the Queue Depth and whether Tagged Queuing is permitted for each Target
+ Device (assuming that the Target Device supports Tagged Queuing). The Queue
+ Depth is the number of SCSI Commands that are allowed to be concurrently
+ presented for execution (either to the Host Adapter or Target Device). Note
+ that explicitly enabling Tagged Queuing may lead to problems; the option to
+ enable or disable Tagged Queuing is provided primarily to allow disabling
+ Tagged Queuing on Target Devices that do not implement it correctly. The
+ following options are available:
+
+ QueueDepth:<integer>
+
+ The "QueueDepth:" or QD:" option specifies the Queue Depth to use for all
+ Target Devices that support Tagged Queuing, as well as the maximum Queue
+ Depth for devices that do not support Tagged Queuing. If no Queue Depth
+ option is provided, the Queue Depth will be determined automatically based
+ on the Host Adapter's Total Queue Depth and the number, type, speed, and
+ capabilities of the detected Target Devices. For Host Adapters that
+ require ISA Bounce Buffers, the Queue Depth is automatically set by default
+ to BusLogic_TaggedQueueDepthBB or BusLogic_UntaggedQueueDepthBB to avoid
+ excessive preallocation of DMA Bounce Buffer memory. Target Devices that
+ do not support Tagged Queuing always have their Queue Depth set to
+ BusLogic_UntaggedQueueDepth or BusLogic_UntaggedQueueDepthBB, unless a
+ lower Queue Depth option is provided. A Queue Depth of 1 automatically
+ disables Tagged Queuing.
+
+ QueueDepth:[<integer>,<integer>...]
+
+ The "QueueDepth:[...]" or "QD:[...]" option specifies the Queue Depth
+ individually for each Target Device. If an <integer> is omitted, the
+ associated Target Device will have its Queue Depth selected automatically.
+
+ TaggedQueuing:Default
+
+ The "TaggedQueuing:Default" or "TQ:Default" option permits Tagged Queuing
+ based on the firmware version of the BusLogic Host Adapter and based on
+ whether the Queue Depth allows queuing multiple commands.
+
+ TaggedQueuing:Enable
+
+ The "TaggedQueuing:Enable" or "TQ:Enable" option enables Tagged Queuing for
+ all Target Devices on this Host Adapter, overriding any limitation that
+ would otherwise be imposed based on the Host Adapter firmware version.
+
+ TaggedQueuing:Disable
+
+ The "TaggedQueuing:Disable" or "TQ:Disable" option disables Tagged Queuing
+ for all Target Devices on this Host Adapter.
+
+ TaggedQueuing:<Target-Spec>
+
+ The "TaggedQueuing:<Target-Spec>" or "TQ:<Target-Spec>" option controls
+ Tagged Queuing individually for each Target Device. <Target-Spec> is a
+ sequence of "Y", "N", and "X" characters. "Y" enables Tagged Queuing, "N"
+ disables Tagged Queuing, and "X" accepts the default based on the firmware
+ version. The first character refers to Target Device 0, the second to
+ Target Device 1, and so on; if the sequence of "Y", "N", and "X" characters
+ does not cover all the Target Devices, unspecified characters are assumed
+ to be "X".
+
+ The BusLogic Driver Error Recovery Option allows for explicitly specifying
+ the Error Recovery action to be performed when BusLogic_ResetCommand is
+ called due to a SCSI Command failing to complete successfully. The following
+ options are available:
+
+ ErrorRecovery:Default
+
+ The "ErrorRecovery:Default" or "ER:Default" option selects between the Hard
+ Reset and Bus Device Reset options based on the recommendation of the SCSI
+ Subsystem.
+
+ ErrorRecovery:HardReset
+
+ The "ErrorRecovery:HardReset" or "ER:HardReset" option will initiate a Host
+ Adapter Hard Reset which also causes a SCSI Bus Reset.
+
+ ErrorRecovery:BusDeviceReset
+
+ The "ErrorRecovery:BusDeviceReset" or "ER:BusDeviceReset" option will send
+ a Bus Device Reset message to the individual Target Device causing the
+ error. If Error Recovery is again initiated for this Target Device and no
+ SCSI Command to this Target Device has completed successfully since the Bus
+ Device Reset message was sent, then a Hard Reset will be attempted.
+
+ ErrorRecovery:None
+
+ The "ErrorRecovery:None" or "ER:None" option suppresses Error Recovery.
+ This option should only be selected if a SCSI Bus Reset or Bus Device Reset
+ will cause the Target Device or a critical operation to suffer a complete
+ and unrecoverable failure.
+
+ ErrorRecovery:<Target-Spec>
+
+ The "ErrorRecovery:<Target-Spec>" or "ER:<Target-Spec>" option controls
+ Error Recovery individually for each Target Device. <Target-Spec> is a
+ sequence of "D", "H", "B", and "N" characters. "D" selects Default, "H"
+ selects Hard Reset, "B" selects Bus Device Reset, and "N" selects None.
+ The first character refers to Target Device 0, the second to Target Device
+ 1, and so on; if the sequence of "D", "H", "B", and "N" characters does not
+ cover all the possible Target Devices, unspecified characters are assumed
+ to be "D".
+
+ The BusLogic Driver Miscellaneous Options comprise the following:
+
+ BusSettleTime:<seconds>
+
+ The "BusSettleTime:" or "BST:" option specifies the Bus Settle Time in
+ seconds. The Bus Settle Time is the amount of time to wait between a Host
+ Adapter Hard Reset which initiates a SCSI Bus Reset and issuing any SCSI
+ Commands. If unspecified, it defaults to BusLogic_DefaultBusSettleTime.
+
+ InhibitTargetInquiry
+
+ The "InhibitTargetInquiry" option inhibits the execution of an Inquire
+ Target Devices or Inquire Installed Devices command on MultiMaster Host
+ Adapters. This may be necessary with some older Target Devices that do not
+ respond correctly when Logical Units above 0 are addressed.
+
+ The BusLogic Driver Debugging Options comprise the following:
+
+ TraceProbe
+
+ The "TraceProbe" option enables tracing of Host Adapter Probing.
+
+ TraceHardwareReset
+
+ The "TraceHardwareReset" option enables tracing of Host Adapter Hardware
+ Reset.
+
+ TraceConfiguration
+
+ The "TraceConfiguration" option enables tracing of Host Adapter
+ Configuration.
+
+ TraceErrors
+
+ The "TraceErrors" option enables tracing of SCSI Commands that return an
+ error from the Target Device. The CDB and Sense Data will be printed for
+ each SCSI Command that fails.
+
+ Debug
+
+ The "Debug" option enables all debugging options.
+
+ The following examples demonstrate setting the Queue Depth for Target Devices
+ 1 and 2 on the first host adapter to 7 and 15, the Queue Depth for all Target
+ Devices on the second host adapter to 31, and the Bus Settle Time on the
+ second host adapter to 30 seconds.
+
+ Linux Kernel Command Line:
+
+ linux BusLogic=QueueDepth:[,7,15];QueueDepth:31,BusSettleTime:30
+
+ LILO Linux Boot Loader (in /etc/lilo.conf):
+
+ append = "BusLogic=QueueDepth:[,7,15];QueueDepth:31,BusSettleTime:30"
+
+ INSMOD Loadable Kernel Module Installation Facility:
+
+ insmod BusLogic.o \
+ 'BusLogic_Options="QueueDepth:[,7,15];QueueDepth:31,BusSettleTime:30"'
+
+ NOTE: Module Utilities 2.1.71 or later is required for correct parsing
+ of driver options containing commas.
+
+*/
+
+static void BusLogic_ParseDriverOptions(char *OptionsString)
+{
+ while (true)
+ {
+ BusLogic_DriverOptions_T *DriverOptions =
+ &BusLogic_DriverOptions[BusLogic_DriverOptionsCount++];
+ int TargetID;
+ memset(DriverOptions, 0, sizeof(BusLogic_DriverOptions_T));
+ for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++)
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_Default;
+ while (*OptionsString != '\0' && *OptionsString != ';')
+ {
+ /* Probing Options. */
+ if (BusLogic_ParseKeyword(&OptionsString, "IO:"))
+ {
+ BusLogic_IO_Address_T IO_Address =
+ simple_strtoul(OptionsString, &OptionsString, 0);
+ BusLogic_ProbeOptions.LimitedProbeISA = true;
+ switch (IO_Address)
+ {
+ case 0x330:
+ BusLogic_ProbeOptions.Probe330 = true;
+ break;
+ case 0x334:
+ BusLogic_ProbeOptions.Probe334 = true;
+ break;
+ case 0x230:
+ BusLogic_ProbeOptions.Probe230 = true;
+ break;
+ case 0x234:
+ BusLogic_ProbeOptions.Probe234 = true;
+ break;
+ case 0x130:
+ BusLogic_ProbeOptions.Probe130 = true;
+ break;
+ case 0x134:
+ BusLogic_ProbeOptions.Probe134 = true;
+ break;
+ default:
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(illegal I/O Address 0x%X)\n",
+ NULL, IO_Address);
+ return;
+ }
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString, "NoProbeISA"))
+ BusLogic_ProbeOptions.NoProbeISA = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "NoProbePCI"))
+ BusLogic_ProbeOptions.NoProbePCI = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "NoProbe"))
+ BusLogic_ProbeOptions.NoProbe = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "NoSortPCI"))
+ BusLogic_ProbeOptions.NoSortPCI = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "MultiMasterFirst"))
+ BusLogic_ProbeOptions.MultiMasterFirst = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "FlashPointFirst"))
+ BusLogic_ProbeOptions.FlashPointFirst = true;
+ /* Tagged Queuing Options. */
+ else if (BusLogic_ParseKeyword(&OptionsString, "QueueDepth:[") ||
+ BusLogic_ParseKeyword(&OptionsString, "QD:["))
+ {
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ {
+ unsigned short QueueDepth =
+ simple_strtoul(OptionsString, &OptionsString, 0);
+ if (QueueDepth > BusLogic_MaxTaggedQueueDepth)
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(illegal Queue Depth %d)\n",
+ NULL, QueueDepth);
+ return;
+ }
+ DriverOptions->QueueDepth[TargetID] = QueueDepth;
+ if (*OptionsString == ',')
+ OptionsString++;
+ else if (*OptionsString == ']')
+ break;
+ else
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(',' or ']' expected at '%s')\n",
+ NULL, OptionsString);
+ return;
+ }
+ }
+ if (*OptionsString != ']')
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(']' expected at '%s')\n",
+ NULL, OptionsString);
+ return;
+ }
+ else OptionsString++;
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString, "QueueDepth:") ||
+ BusLogic_ParseKeyword(&OptionsString, "QD:"))
+ {
+ unsigned short QueueDepth =
+ simple_strtoul(OptionsString, &OptionsString, 0);
+ if (QueueDepth == 0 || QueueDepth > BusLogic_MaxTaggedQueueDepth)
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(illegal Queue Depth %d)\n",
+ NULL, QueueDepth);
+ return;
+ }
+ DriverOptions->CommonQueueDepth = QueueDepth;
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ DriverOptions->QueueDepth[TargetID] = QueueDepth;
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString, "TaggedQueuing:") ||
+ BusLogic_ParseKeyword(&OptionsString, "TQ:"))
+ {
+ if (BusLogic_ParseKeyword(&OptionsString, "Default"))
+ {
+ DriverOptions->TaggedQueuingPermitted = 0x0000;
+ DriverOptions->TaggedQueuingPermittedMask = 0x0000;
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString, "Enable"))
+ {
+ DriverOptions->TaggedQueuingPermitted = 0xFFFF;
+ DriverOptions->TaggedQueuingPermittedMask = 0xFFFF;
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString, "Disable"))
+ {
+ DriverOptions->TaggedQueuingPermitted = 0x0000;
+ DriverOptions->TaggedQueuingPermittedMask = 0xFFFF;
+ }
+ else
+ {
+ unsigned short TargetBit;
+ for (TargetID = 0, TargetBit = 1;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++, TargetBit <<= 1)
+ switch (*OptionsString++)
+ {
+ case 'Y':
+ DriverOptions->TaggedQueuingPermitted |= TargetBit;
+ DriverOptions->TaggedQueuingPermittedMask |= TargetBit;
+ break;
+ case 'N':
+ DriverOptions->TaggedQueuingPermitted &= ~TargetBit;
+ DriverOptions->TaggedQueuingPermittedMask |= TargetBit;
+ break;
+ case 'X':
+ break;
+ default:
+ OptionsString--;
+ TargetID = BusLogic_MaxTargetDevices;
+ break;
+ }
+ }
+ }
+ /* Error Recovery Option. */
+ else if (BusLogic_ParseKeyword(&OptionsString, "ErrorRecovery:") ||
+ BusLogic_ParseKeyword(&OptionsString, "ER:"))
+ {
+ if (BusLogic_ParseKeyword(&OptionsString, "Default"))
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_Default;
+ else if (BusLogic_ParseKeyword(&OptionsString, "HardReset"))
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_HardReset;
+ else if (BusLogic_ParseKeyword(&OptionsString, "BusDeviceReset"))
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_BusDeviceReset;
+ else if (BusLogic_ParseKeyword(&OptionsString, "None"))
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_None;
+ else
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ switch (*OptionsString++)
+ {
+ case 'D':
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_Default;
+ break;
+ case 'H':
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_HardReset;
+ break;
+ case 'B':
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_BusDeviceReset;
+ break;
+ case 'N':
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_None;
+ break;
+ default:
+ OptionsString--;
+ TargetID = BusLogic_MaxTargetDevices;
+ break;
+ }
+ }
+ /* Miscellaneous Options. */
+ else if (BusLogic_ParseKeyword(&OptionsString, "BusSettleTime:") ||
+ BusLogic_ParseKeyword(&OptionsString, "BST:"))
+ {
+ unsigned short BusSettleTime =
+ simple_strtoul(OptionsString, &OptionsString, 0);
+ if (BusSettleTime > 5 * 60)
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(illegal Bus Settle Time %d)\n",
+ NULL, BusSettleTime);
+ return;
+ }
+ DriverOptions->BusSettleTime = BusSettleTime;
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString,
+ "InhibitTargetInquiry"))
+ DriverOptions->LocalOptions.InhibitTargetInquiry = true;
+ /* Debugging Options. */
+ else if (BusLogic_ParseKeyword(&OptionsString, "TraceProbe"))
+ BusLogic_GlobalOptions.TraceProbe = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "TraceHardwareReset"))
+ BusLogic_GlobalOptions.TraceHardwareReset = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "TraceConfiguration"))
+ BusLogic_GlobalOptions.TraceConfiguration = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "TraceErrors"))
+ BusLogic_GlobalOptions.TraceErrors = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "Debug"))
+ {
+ BusLogic_GlobalOptions.TraceProbe = true;
+ BusLogic_GlobalOptions.TraceHardwareReset = true;
+ BusLogic_GlobalOptions.TraceConfiguration = true;
+ BusLogic_GlobalOptions.TraceErrors = true;
+ }
+ if (*OptionsString == ',')
+ OptionsString++;
+ else if (*OptionsString != ';' && *OptionsString != '\0')
+ {
+ BusLogic_Error("BusLogic: Unexpected Driver Option '%s' "
+ "ignored\n", NULL, OptionsString);
+ *OptionsString = '\0';
+ }
+ }
+ if (!(BusLogic_DriverOptionsCount == 0 ||
+ BusLogic_ProbeInfoCount == 0 ||
+ BusLogic_DriverOptionsCount == BusLogic_ProbeInfoCount))
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(all or no I/O Addresses must be specified)\n", NULL);
+ return;
+ }
+ /*
+ Tagged Queuing is disabled when the Queue Depth is 1 since queuing
+ multiple commands is not possible.
+ */
+ for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++)
+ if (DriverOptions->QueueDepth[TargetID] == 1)
+ {
+ unsigned short TargetBit = 1 << TargetID;
+ DriverOptions->TaggedQueuingPermitted &= ~TargetBit;
+ DriverOptions->TaggedQueuingPermittedMask |= TargetBit;
+ }
+ if (*OptionsString == ';') OptionsString++;
+ if (*OptionsString == '\0') return;
+ }
+}
+
+
+/*
+ BusLogic_Setup handles processing of Kernel Command Line Arguments.
+*/
+
+void BusLogic_Setup(char *CommandLineString, int *CommandLineIntegers)
+{
+ if (CommandLineIntegers[0] != 0)
+ {
+ BusLogic_Error("BusLogic: Obsolete Command Line Entry "
+ "Format Ignored\n", NULL);
+ return;
+ }
+ if (CommandLineString == NULL || *CommandLineString == '\0') return;
+ BusLogic_ParseDriverOptions(CommandLineString);
+}
+
+
+/*
+ Include Module support if requested.
+*/
+
+#ifdef MODULE
+
+SCSI_Host_Template_T driver_template = BUSLOGIC;
+
+#include "scsi_module.c"
+
+#endif
diff --git a/linux/src/drivers/scsi/BusLogic.h b/linux/src/drivers/scsi/BusLogic.h
new file mode 100644
index 0000000..f60ee07
--- /dev/null
+++ b/linux/src/drivers/scsi/BusLogic.h
@@ -0,0 +1,1775 @@
+/*
+
+ Linux Driver for BusLogic MultiMaster and FlashPoint SCSI Host Adapters
+
+ Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>
+
+ This program is free software; you may redistribute and/or modify it under
+ the terms of the GNU General Public License Version 2 as published by the
+ Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for complete details.
+
+ The author respectfully requests that any modifications to this software be
+ sent directly to him for evaluation and testing.
+
+ Special thanks to Wayne Yen, Jin-Lon Hon, and Alex Win of BusLogic, whose
+ advice has been invaluable, to David Gentzel, for writing the original Linux
+ BusLogic driver, and to Paul Gortmaker, for being such a dedicated test site.
+
+ Finally, special thanks to Mylex/BusLogic for making the FlashPoint SCCB
+ Manager available as freely redistributable source code.
+
+*/
+
+
+#include <linux/config.h>
+
+
+/*
+ Define types for some of the structures that interface with the rest
+ of the Linux Kernel and SCSI Subsystem.
+*/
+
+typedef kdev_t KernelDevice_T;
+typedef struct proc_dir_entry PROC_DirectoryEntry_T;
+typedef unsigned long ProcessorFlags_T;
+typedef struct pt_regs Registers_T;
+typedef struct partition PartitionTable_T;
+typedef Scsi_Host_Template SCSI_Host_Template_T;
+typedef struct Scsi_Host SCSI_Host_T;
+typedef struct scsi_device SCSI_Device_T;
+typedef struct scsi_disk SCSI_Disk_T;
+typedef struct scsi_cmnd SCSI_Command_T;
+typedef struct scatterlist SCSI_ScatterList_T;
+
+
+/*
+ Define prototypes for the BusLogic Driver Interface Functions.
+*/
+
+extern PROC_DirectoryEntry_T BusLogic_ProcDirectoryEntry;
+extern const char *BusLogic_DriverInfo(SCSI_Host_T *);
+extern int BusLogic_DetectHostAdapter(SCSI_Host_Template_T *);
+extern int BusLogic_ReleaseHostAdapter(SCSI_Host_T *);
+extern int BusLogic_QueueCommand(SCSI_Command_T *,
+ void (*CompletionRoutine)(SCSI_Command_T *));
+extern int BusLogic_AbortCommand(SCSI_Command_T *);
+extern int BusLogic_ResetCommand(SCSI_Command_T *, unsigned int);
+extern int BusLogic_BIOSDiskParameters(SCSI_Disk_T *, KernelDevice_T, int *);
+extern int BusLogic_ProcDirectoryInfo(char *, char **, off_t, int, int, int);
+
+
+/*
+ Define the BusLogic SCSI Host Template structure.
+*/
+
+#define BUSLOGIC \
+ { proc_dir: &BusLogic_ProcDirectoryEntry, /* ProcFS Directory Entry */ \
+ proc_info: BusLogic_ProcDirectoryInfo, /* ProcFS Info Function */ \
+ name: "BusLogic", /* Driver Name */ \
+ detect: BusLogic_DetectHostAdapter, /* Detect Host Adapter */ \
+ release: BusLogic_ReleaseHostAdapter, /* Release Host Adapter */ \
+ info: BusLogic_DriverInfo, /* Driver Info Function */ \
+ queuecommand: BusLogic_QueueCommand, /* Queue Command Function */ \
+ abort: BusLogic_AbortCommand, /* Abort Command Function */ \
+ reset: BusLogic_ResetCommand, /* Reset Command Function */ \
+ bios_param: BusLogic_BIOSDiskParameters, /* BIOS Disk Parameters */ \
+ unchecked_isa_dma: 1, /* Default Initial Value */ \
+ use_clustering: ENABLE_CLUSTERING } /* Enable Clustering */
+
+
+/*
+ BusLogic_DriverVersion protects the private portion of this file.
+*/
+
+#ifdef BusLogic_DriverVersion
+
+
+/*
+ FlashPoint support is only available for the Intel x86 Architecture with
+ CONFIG_PCI set.
+*/
+
+#ifndef __i386__
+#undef CONFIG_SCSI_OMIT_FLASHPOINT
+#define CONFIG_SCSI_OMIT_FLASHPOINT
+#endif
+
+#ifndef CONFIG_PCI
+#undef CONFIG_SCSI_OMIT_FLASHPOINT
+#define CONFIG_SCSI_OMIT_FLASHPOINT
+#define BusLogic_InitializeProbeInfoListISA \
+ BusLogic_InitializeProbeInfoList
+#endif
+
+
+/*
+ Define the maximum number of BusLogic Host Adapters supported by this driver.
+*/
+
+#define BusLogic_MaxHostAdapters 16
+
+
+/*
+ Define the maximum number of Target Devices supported by this driver.
+*/
+
+#define BusLogic_MaxTargetDevices 16
+
+
+/*
+ Define the maximum number of Scatter/Gather Segments used by this driver.
+ For optimal performance, it is important that this limit be at least as
+ large as the largest single request generated by the I/O Subsystem.
+*/
+
+#define BusLogic_ScatterGatherLimit 128
+
+
+/*
+ Define the maximum, maximum automatic, minimum automatic, and default Queue
+ Depth to allow for Target Devices depending on whether or not they support
+ Tagged Queuing and whether or not ISA Bounce Buffers are required.
+*/
+
+#define BusLogic_MaxTaggedQueueDepth 64
+#define BusLogic_MaxAutomaticTaggedQueueDepth 28
+#define BusLogic_MinAutomaticTaggedQueueDepth 7
+#define BusLogic_TaggedQueueDepthBB 3
+#define BusLogic_UntaggedQueueDepth 3
+#define BusLogic_UntaggedQueueDepthBB 2
+
+
+/*
+ Define the default amount of time in seconds to wait between a Host Adapter
+ Hard Reset which initiates a SCSI Bus Reset and issuing any SCSI commands.
+ Some SCSI devices get confused if they receive SCSI commands too soon after
+ a SCSI Bus Reset.
+*/
+
+#define BusLogic_DefaultBusSettleTime 2
+
+
+/*
+ Define the maximum number of Mailboxes that should be used for MultiMaster
+ Host Adapters. This number is chosen to be larger than the maximum Host
+ Adapter Queue Depth and small enough so that the Host Adapter structure
+ does not cross an allocation block size boundary.
+*/
+
+#define BusLogic_MaxMailboxes 211
+
+
+/*
+ Define the number of CCBs that should be allocated as a group to optimize
+ Kernel memory allocation.
+*/
+
+#define BusLogic_CCB_AllocationGroupSize 7
+
+
+/*
+ Define the Host Adapter Line and Message Buffer Sizes.
+*/
+
+#define BusLogic_LineBufferSize 100
+#define BusLogic_MessageBufferSize 9700
+
+
+/*
+ Define the Driver Message Levels.
+*/
+
+typedef enum BusLogic_MessageLevel
+{
+ BusLogic_AnnounceLevel = 0,
+ BusLogic_InfoLevel = 1,
+ BusLogic_NoticeLevel = 2,
+ BusLogic_WarningLevel = 3,
+ BusLogic_ErrorLevel = 4
+}
+BusLogic_MessageLevel_T;
+
+static char
+ *BusLogic_MessageLevelMap[] =
+ { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING, KERN_ERR };
+
+
+/*
+ Define Driver Message macros.
+*/
+
+#define BusLogic_Announce(Format, Arguments...) \
+ BusLogic_Message(BusLogic_AnnounceLevel, Format, ##Arguments)
+
+#define BusLogic_Info(Format, Arguments...) \
+ BusLogic_Message(BusLogic_InfoLevel, Format, ##Arguments)
+
+#define BusLogic_Notice(Format, Arguments...) \
+ BusLogic_Message(BusLogic_NoticeLevel, Format, ##Arguments)
+
+#define BusLogic_Warning(Format, Arguments...) \
+ BusLogic_Message(BusLogic_WarningLevel, Format, ##Arguments)
+
+#define BusLogic_Error(Format, Arguments...) \
+ BusLogic_Message(BusLogic_ErrorLevel, Format, ##Arguments)
+
+
+/*
+ Define the types of BusLogic Host Adapters that are supported and the number
+ of I/O Addresses required by each type.
+*/
+
+typedef enum
+{
+ BusLogic_MultiMaster = 1,
+ BusLogic_FlashPoint = 2
+}
+__attribute__ ((packed))
+BusLogic_HostAdapterType_T;
+
+#define BusLogic_MultiMasterAddressCount 4
+#define BusLogic_FlashPointAddressCount 256
+
+static int
+ BusLogic_HostAdapterAddressCount[3] =
+ { 0, BusLogic_MultiMasterAddressCount, BusLogic_FlashPointAddressCount };
+
+
+/*
+ Define macros for testing the Host Adapter Type.
+*/
+
+#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+
+#define BusLogic_MultiMasterHostAdapterP(HostAdapter) \
+ (HostAdapter->HostAdapterType == BusLogic_MultiMaster)
+
+#define BusLogic_FlashPointHostAdapterP(HostAdapter) \
+ (HostAdapter->HostAdapterType == BusLogic_FlashPoint)
+
+#else
+
+#define BusLogic_MultiMasterHostAdapterP(HostAdapter) \
+ (true)
+
+#define BusLogic_FlashPointHostAdapterP(HostAdapter) \
+ (false)
+
+#endif
+
+
+/*
+ Define the possible Host Adapter Bus Types.
+*/
+
+typedef enum
+{
+ BusLogic_Unknown_Bus = 0,
+ BusLogic_ISA_Bus = 1,
+ BusLogic_EISA_Bus = 2,
+ BusLogic_PCI_Bus = 3,
+ BusLogic_VESA_Bus = 4,
+ BusLogic_MCA_Bus = 5
+}
+__attribute__ ((packed))
+BusLogic_HostAdapterBusType_T;
+
+static char
+ *BusLogic_HostAdapterBusNames[] =
+ { "Unknown", "ISA", "EISA", "PCI", "VESA", "MCA" };
+
+static BusLogic_HostAdapterBusType_T
+ BusLogic_HostAdapterBusTypes[] =
+ { BusLogic_VESA_Bus, /* BT-4xx */
+ BusLogic_ISA_Bus, /* BT-5xx */
+ BusLogic_MCA_Bus, /* BT-6xx */
+ BusLogic_EISA_Bus, /* BT-7xx */
+ BusLogic_Unknown_Bus, /* BT-8xx */
+ BusLogic_PCI_Bus }; /* BT-9xx */
+
+
+/*
+ Define the possible Host Adapter BIOS Disk Geometry Translations.
+*/
+
+typedef enum BusLogic_BIOS_DiskGeometryTranslation
+{
+ BusLogic_BIOS_Disk_Not_Installed = 0,
+ BusLogic_BIOS_Disk_Installed_64x32 = 1,
+ BusLogic_BIOS_Disk_Installed_128x32 = 2,
+ BusLogic_BIOS_Disk_Installed_255x63 = 3
+}
+__attribute__ ((packed))
+BusLogic_BIOS_DiskGeometryTranslation_T;
+
+
+/*
+ Define a Boolean data type.
+*/
+
+typedef enum { false, true } __attribute__ ((packed)) boolean;
+
+
+/*
+ Define a 32 bit I/O Address data type.
+*/
+
+typedef unsigned int BusLogic_IO_Address_T;
+
+
+/*
+ Define a 32 bit PCI Bus Address data type.
+*/
+
+typedef unsigned int BusLogic_PCI_Address_T;
+
+
+/*
+ Define a 32 bit Base Address data type.
+*/
+
+typedef unsigned int BusLogic_Base_Address_T;
+
+
+/*
+ Define a 32 bit Bus Address data type.
+*/
+
+typedef unsigned int BusLogic_BusAddress_T;
+
+
+/*
+ Define a 32 bit Byte Count data type.
+*/
+
+typedef unsigned int BusLogic_ByteCount_T;
+
+
+/*
+ Define a 10^18 Statistics Byte Counter data type.
+*/
+
+typedef struct BusLogic_ByteCounter
+{
+ unsigned int Units;
+ unsigned int Billions;
+}
+BusLogic_ByteCounter_T;
+
+
+/*
+ Define the structure for I/O Address and Bus Probing Information.
+*/
+
+typedef struct BusLogic_ProbeInfo
+{
+ BusLogic_HostAdapterType_T HostAdapterType;
+ BusLogic_HostAdapterBusType_T HostAdapterBusType;
+ BusLogic_IO_Address_T IO_Address;
+ BusLogic_PCI_Address_T PCI_Address;
+ unsigned char Bus;
+ unsigned char Device;
+ unsigned char IRQ_Channel;
+}
+BusLogic_ProbeInfo_T;
+
+
+/*
+ Define the Probe Options.
+*/
+
+typedef struct BusLogic_ProbeOptions
+{
+ boolean NoProbe:1; /* Bit 0 */
+ boolean NoProbeISA:1; /* Bit 1 */
+ boolean NoProbePCI:1; /* Bit 2 */
+ boolean NoSortPCI:1; /* Bit 3 */
+ boolean MultiMasterFirst:1; /* Bit 4 */
+ boolean FlashPointFirst:1; /* Bit 5 */
+ boolean LimitedProbeISA:1; /* Bit 6 */
+ boolean Probe330:1; /* Bit 7 */
+ boolean Probe334:1; /* Bit 8 */
+ boolean Probe230:1; /* Bit 9 */
+ boolean Probe234:1; /* Bit 10 */
+ boolean Probe130:1; /* Bit 11 */
+ boolean Probe134:1; /* Bit 12 */
+}
+BusLogic_ProbeOptions_T;
+
+
+/*
+ Define the Global Options.
+*/
+
+typedef struct BusLogic_GlobalOptions
+{
+ boolean TraceProbe:1; /* Bit 0 */
+ boolean TraceHardwareReset:1; /* Bit 1 */
+ boolean TraceConfiguration:1; /* Bit 2 */
+ boolean TraceErrors:1; /* Bit 3 */
+}
+BusLogic_GlobalOptions_T;
+
+
+/*
+ Define the Local Options.
+*/
+
+typedef struct BusLogic_LocalOptions
+{
+ boolean InhibitTargetInquiry:1; /* Bit 0 */
+}
+BusLogic_LocalOptions_T;
+
+
+/*
+ Define the Error Recovery Strategy Options.
+*/
+
+typedef enum
+{
+ BusLogic_ErrorRecovery_Default = 0,
+ BusLogic_ErrorRecovery_BusDeviceReset = 1,
+ BusLogic_ErrorRecovery_HardReset = 2,
+ BusLogic_ErrorRecovery_None = 3
+}
+__attribute__ ((packed))
+BusLogic_ErrorRecoveryStrategy_T;
+
+static char
+ *BusLogic_ErrorRecoveryStrategyNames[] =
+ { "Default", "Bus Device Reset", "Hard Reset", "None" },
+ BusLogic_ErrorRecoveryStrategyLetters[] =
+ { 'D', 'B', 'H', 'N' };
+
+
+/*
+ Define the BusLogic SCSI Host Adapter I/O Register Offsets.
+*/
+
+#define BusLogic_ControlRegisterOffset 0 /* WO register */
+#define BusLogic_StatusRegisterOffset 0 /* RO register */
+#define BusLogic_CommandParameterRegisterOffset 1 /* WO register */
+#define BusLogic_DataInRegisterOffset 1 /* RO register */
+#define BusLogic_InterruptRegisterOffset 2 /* RO register */
+#define BusLogic_GeometryRegisterOffset 3 /* RO register */
+
+
+/*
+ Define the structure of the write-only Control Register.
+*/
+
+typedef union BusLogic_ControlRegister
+{
+ unsigned char All;
+ struct {
+ unsigned char :4; /* Bits 0-3 */
+ boolean SCSIBusReset:1; /* Bit 4 */
+ boolean InterruptReset:1; /* Bit 5 */
+ boolean SoftReset:1; /* Bit 6 */
+ boolean HardReset:1; /* Bit 7 */
+ } Bits;
+}
+BusLogic_ControlRegister_T;
+
+
+/*
+ Define the structure of the read-only Status Register.
+*/
+
+typedef union BusLogic_StatusRegister
+{
+ unsigned char All;
+ struct {
+ boolean CommandInvalid:1; /* Bit 0 */
+ boolean Reserved:1; /* Bit 1 */
+ boolean DataInRegisterReady:1; /* Bit 2 */
+ boolean CommandParameterRegisterBusy:1; /* Bit 3 */
+ boolean HostAdapterReady:1; /* Bit 4 */
+ boolean InitializationRequired:1; /* Bit 5 */
+ boolean DiagnosticFailure:1; /* Bit 6 */
+ boolean DiagnosticActive:1; /* Bit 7 */
+ } Bits;
+}
+BusLogic_StatusRegister_T;
+
+
+/*
+ Define the structure of the read-only Interrupt Register.
+*/
+
+typedef union BusLogic_InterruptRegister
+{
+ unsigned char All;
+ struct {
+ boolean IncomingMailboxLoaded:1; /* Bit 0 */
+ boolean OutgoingMailboxAvailable:1; /* Bit 1 */
+ boolean CommandComplete:1; /* Bit 2 */
+ boolean ExternalBusReset:1; /* Bit 3 */
+ unsigned char Reserved:3; /* Bits 4-6 */
+ boolean InterruptValid:1; /* Bit 7 */
+ } Bits;
+}
+BusLogic_InterruptRegister_T;
+
+
+/*
+ Define the structure of the read-only Geometry Register.
+*/
+
+typedef union BusLogic_GeometryRegister
+{
+ unsigned char All;
+ struct {
+ BusLogic_BIOS_DiskGeometryTranslation_T Drive0Geometry:2; /* Bits 0-1 */
+ BusLogic_BIOS_DiskGeometryTranslation_T Drive1Geometry:2; /* Bits 2-3 */
+ unsigned char :3; /* Bits 4-6 */
+ boolean ExtendedTranslationEnabled:1; /* Bit 7 */
+ } Bits;
+}
+BusLogic_GeometryRegister_T;
+
+
+/*
+ Define the BusLogic SCSI Host Adapter Command Register Operation Codes.
+*/
+
+typedef enum
+{
+ BusLogic_TestCommandCompleteInterrupt = 0x00,
+ BusLogic_InitializeMailbox = 0x01,
+ BusLogic_ExecuteMailboxCommand = 0x02,
+ BusLogic_ExecuteBIOSCommand = 0x03,
+ BusLogic_InquireBoardID = 0x04,
+ BusLogic_EnableOutgoingMailboxAvailableInt = 0x05,
+ BusLogic_SetSCSISelectionTimeout = 0x06,
+ BusLogic_SetPreemptTimeOnBus = 0x07,
+ BusLogic_SetTimeOffBus = 0x08,
+ BusLogic_SetBusTransferRate = 0x09,
+ BusLogic_InquireInstalledDevicesID0to7 = 0x0A,
+ BusLogic_InquireConfiguration = 0x0B,
+ BusLogic_EnableTargetMode = 0x0C,
+ BusLogic_InquireSetupInformation = 0x0D,
+ BusLogic_WriteAdapterLocalRAM = 0x1A,
+ BusLogic_ReadAdapterLocalRAM = 0x1B,
+ BusLogic_WriteBusMasterChipFIFO = 0x1C,
+ BusLogic_ReadBusMasterChipFIFO = 0x1D,
+ BusLogic_EchoCommandData = 0x1F,
+ BusLogic_HostAdapterDiagnostic = 0x20,
+ BusLogic_SetAdapterOptions = 0x21,
+ BusLogic_InquireInstalledDevicesID8to15 = 0x23,
+ BusLogic_InquireTargetDevices = 0x24,
+ BusLogic_DisableHostAdapterInterrupt = 0x25,
+ BusLogic_InitializeExtendedMailbox = 0x81,
+ BusLogic_ExecuteSCSICommand = 0x83,
+ BusLogic_InquireFirmwareVersion3rdDigit = 0x84,
+ BusLogic_InquireFirmwareVersionLetter = 0x85,
+ BusLogic_InquirePCIHostAdapterInformation = 0x86,
+ BusLogic_InquireHostAdapterModelNumber = 0x8B,
+ BusLogic_InquireSynchronousPeriod = 0x8C,
+ BusLogic_InquireExtendedSetupInformation = 0x8D,
+ BusLogic_EnableStrictRoundRobinMode = 0x8F,
+ BusLogic_StoreHostAdapterLocalRAM = 0x90,
+ BusLogic_FetchHostAdapterLocalRAM = 0x91,
+ BusLogic_StoreLocalDataInEEPROM = 0x92,
+ BusLogic_UploadAutoSCSICode = 0x94,
+ BusLogic_ModifyIOAddress = 0x95,
+ BusLogic_SetCCBFormat = 0x96,
+ BusLogic_WriteInquiryBuffer = 0x9A,
+ BusLogic_ReadInquiryBuffer = 0x9B,
+ BusLogic_FlashROMUploadDownload = 0xA7,
+ BusLogic_ReadSCAMData = 0xA8,
+ BusLogic_WriteSCAMData = 0xA9
+}
+BusLogic_OperationCode_T;
+
+
+/*
+ Define the Inquire Board ID reply structure.
+*/
+
+typedef struct BusLogic_BoardID
+{
+ unsigned char BoardType; /* Byte 0 */
+ unsigned char CustomFeatures; /* Byte 1 */
+ unsigned char FirmwareVersion1stDigit; /* Byte 2 */
+ unsigned char FirmwareVersion2ndDigit; /* Byte 3 */
+}
+BusLogic_BoardID_T;
+
+
+/*
+ Define the Inquire Installed Devices ID 0 to 7 and Inquire Installed
+ Devices ID 8 to 15 reply type. For each Target Device, a byte is returned
+ where bit 0 set indicates that Logical Unit 0 exists, bit 1 set indicates
+ that Logical Unit 1 exists, and so on.
+*/
+
+typedef unsigned char BusLogic_InstalledDevices8_T[8];
+
+
+/*
+ Define the Inquire Target Devices reply type. Inquire Target Devices only
+ tests Logical Unit 0 of each Target Device unlike the Inquire Installed
+ Devices commands which test Logical Units 0 - 7. Two bytes are returned,
+ where byte 0 bit 0 set indicates that Target Device 0 exists, and so on.
+*/
+
+typedef unsigned short BusLogic_InstalledDevices_T;
+
+
+/*
+ Define the Inquire Configuration reply structure.
+*/
+
+typedef struct BusLogic_Configuration
+{
+ unsigned char :5; /* Byte 0 Bits 0-4 */
+ boolean DMA_Channel5:1; /* Byte 0 Bit 5 */
+ boolean DMA_Channel6:1; /* Byte 0 Bit 6 */
+ boolean DMA_Channel7:1; /* Byte 0 Bit 7 */
+ boolean IRQ_Channel9:1; /* Byte 1 Bit 0 */
+ boolean IRQ_Channel10:1; /* Byte 1 Bit 1 */
+ boolean IRQ_Channel11:1; /* Byte 1 Bit 2 */
+ boolean IRQ_Channel12:1; /* Byte 1 Bit 3 */
+ unsigned char :1; /* Byte 1 Bit 4 */
+ boolean IRQ_Channel14:1; /* Byte 1 Bit 5 */
+ boolean IRQ_Channel15:1; /* Byte 1 Bit 6 */
+ unsigned char :1; /* Byte 1 Bit 7 */
+ unsigned char HostAdapterID:4; /* Byte 2 Bits 0-3 */
+ unsigned char :4; /* Byte 2 Bits 4-7 */
+}
+BusLogic_Configuration_T;
+
+
+/*
+ Define the Inquire Setup Information reply structure.
+*/
+
+typedef struct BusLogic_SynchronousValue
+{
+ unsigned char Offset:4; /* Bits 0-3 */
+ unsigned char TransferPeriod:3; /* Bits 4-6 */
+ boolean Synchronous:1; /* Bit 7 */
+}
+BusLogic_SynchronousValue_T;
+
+typedef BusLogic_SynchronousValue_T
+ BusLogic_SynchronousValues8_T[8];
+
+typedef BusLogic_SynchronousValue_T
+ BusLogic_SynchronousValues_T[BusLogic_MaxTargetDevices];
+
+typedef struct BusLogic_SetupInformation
+{
+ boolean SynchronousInitiationEnabled:1; /* Byte 0 Bit 0 */
+ boolean ParityCheckingEnabled:1; /* Byte 0 Bit 1 */
+ unsigned char :6; /* Byte 0 Bits 2-7 */
+ unsigned char BusTransferRate; /* Byte 1 */
+ unsigned char PreemptTimeOnBus; /* Byte 2 */
+ unsigned char TimeOffBus; /* Byte 3 */
+ unsigned char MailboxCount; /* Byte 4 */
+ unsigned char MailboxAddress[3]; /* Bytes 5-7 */
+ BusLogic_SynchronousValues8_T SynchronousValuesID0to7; /* Bytes 8-15 */
+ unsigned char DisconnectPermittedID0to7; /* Byte 16 */
+ unsigned char Signature; /* Byte 17 */
+ unsigned char CharacterD; /* Byte 18 */
+ unsigned char HostBusType; /* Byte 19 */
+ unsigned char WideTransfersPermittedID0to7; /* Byte 20 */
+ unsigned char WideTransfersActiveID0to7; /* Byte 21 */
+ BusLogic_SynchronousValues8_T SynchronousValuesID8to15; /* Bytes 22-29 */
+ unsigned char DisconnectPermittedID8to15; /* Byte 30 */
+ unsigned char :8; /* Byte 31 */
+ unsigned char WideTransfersPermittedID8to15; /* Byte 32 */
+ unsigned char WideTransfersActiveID8to15; /* Byte 33 */
+}
+BusLogic_SetupInformation_T;
+
+
+/*
+ Define the Initialize Extended Mailbox request structure.
+*/
+
+typedef struct BusLogic_ExtendedMailboxRequest
+{
+ unsigned char MailboxCount; /* Byte 0 */
+ BusLogic_BusAddress_T BaseMailboxAddress; /* Bytes 1-4 */
+}
+__attribute__ ((packed))
+BusLogic_ExtendedMailboxRequest_T;
+
+
+/*
+ Define the Inquire Firmware Version 3rd Digit reply type.
+*/
+
+typedef unsigned char BusLogic_FirmwareVersion3rdDigit_T;
+
+
+/*
+ Define the Inquire Firmware Version Letter reply type.
+*/
+
+typedef unsigned char BusLogic_FirmwareVersionLetter_T;
+
+
+/*
+ Define the Inquire PCI Host Adapter Information reply type. The ISA
+ Compatible I/O Port values are defined here and are also used with
+ the Modify I/O Address command.
+*/
+
+typedef enum BusLogic_ISACompatibleIOPort
+{
+ BusLogic_IO_330 = 0,
+ BusLogic_IO_334 = 1,
+ BusLogic_IO_230 = 2,
+ BusLogic_IO_234 = 3,
+ BusLogic_IO_130 = 4,
+ BusLogic_IO_134 = 5,
+ BusLogic_IO_Disable = 6,
+ BusLogic_IO_Disable2 = 7
+}
+__attribute__ ((packed))
+BusLogic_ISACompatibleIOPort_T;
+
+typedef struct BusLogic_PCIHostAdapterInformation
+{
+ BusLogic_ISACompatibleIOPort_T ISACompatibleIOPort; /* Byte 0 */
+ unsigned char PCIAssignedIRQChannel; /* Byte 1 */
+ boolean LowByteTerminated:1; /* Byte 2 Bit 0 */
+ boolean HighByteTerminated:1; /* Byte 2 Bit 1 */
+ unsigned char :2; /* Byte 2 Bits 2-3 */
+ boolean JP1:1; /* Byte 2 Bit 4 */
+ boolean JP2:1; /* Byte 2 Bit 5 */
+ boolean JP3:1; /* Byte 2 Bit 6 */
+ boolean GenericInfoValid:1; /* Byte 2 Bit 7 */
+ unsigned char :8; /* Byte 3 */
+}
+BusLogic_PCIHostAdapterInformation_T;
+
+
+/*
+ Define the Inquire Host Adapter Model Number reply type.
+*/
+
+typedef unsigned char BusLogic_HostAdapterModelNumber_T[5];
+
+
+/*
+ Define the Inquire Synchronous Period reply type. For each Target Device,
+ a byte is returned which represents the Synchronous Transfer Period in units
+ of 10 nanoseconds.
+*/
+
+typedef unsigned char BusLogic_SynchronousPeriod_T[BusLogic_MaxTargetDevices];
+
+
+/*
+ Define the Inquire Extended Setup Information reply structure.
+*/
+
+typedef struct BusLogic_ExtendedSetupInformation
+{
+ unsigned char BusType; /* Byte 0 */
+ unsigned char BIOS_Address; /* Byte 1 */
+ unsigned short ScatterGatherLimit; /* Bytes 2-3 */
+ unsigned char MailboxCount; /* Byte 4 */
+ BusLogic_BusAddress_T BaseMailboxAddress; /* Bytes 5-8 */
+ struct { unsigned char :2; /* Byte 9 Bits 0-1 */
+ boolean FastOnEISA:1; /* Byte 9 Bit 2 */
+ unsigned char :3; /* Byte 9 Bits 3-5 */
+ boolean LevelSensitiveInterrupt:1; /* Byte 9 Bit 6 */
+ unsigned char :1; } Misc; /* Byte 9 Bit 7 */
+ unsigned char FirmwareRevision[3]; /* Bytes 10-12 */
+ boolean HostWideSCSI:1; /* Byte 13 Bit 0 */
+ boolean HostDifferentialSCSI:1; /* Byte 13 Bit 1 */
+ boolean HostSupportsSCAM:1; /* Byte 13 Bit 2 */
+ boolean HostUltraSCSI:1; /* Byte 13 Bit 3 */
+ boolean HostSmartTermination:1; /* Byte 13 Bit 4 */
+ unsigned char :3; /* Byte 13 Bits 5-7 */
+}
+__attribute__ ((packed))
+BusLogic_ExtendedSetupInformation_T;
+
+
+/*
+ Define the Enable Strict Round Robin Mode request type.
+*/
+
+typedef enum BusLogic_RoundRobinModeRequest
+{
+ BusLogic_AggressiveRoundRobinMode = 0,
+ BusLogic_StrictRoundRobinMode = 1
+}
+__attribute__ ((packed))
+BusLogic_RoundRobinModeRequest_T;
+
+
+/*
+ Define the Fetch Host Adapter Local RAM request type.
+*/
+
+#define BusLogic_BIOS_BaseOffset 0
+#define BusLogic_AutoSCSI_BaseOffset 64
+
+typedef struct BusLogic_FetchHostAdapterLocalRAMRequest
+{
+ unsigned char ByteOffset; /* Byte 0 */
+ unsigned char ByteCount; /* Byte 1 */
+}
+BusLogic_FetchHostAdapterLocalRAMRequest_T;
+
+
+/*
+ Define the Host Adapter Local RAM AutoSCSI structure.
+*/
+
+typedef struct BusLogic_AutoSCSIData
+{
+ unsigned char InternalFactorySignature[2]; /* Bytes 0-1 */
+ unsigned char InformationByteCount; /* Byte 2 */
+ unsigned char HostAdapterType[6]; /* Bytes 3-8 */
+ unsigned char :8; /* Byte 9 */
+ boolean FloppyEnabled:1; /* Byte 10 Bit 0 */
+ boolean FloppySecondary:1; /* Byte 10 Bit 1 */
+ boolean LevelSensitiveInterrupt:1; /* Byte 10 Bit 2 */
+ unsigned char :2; /* Byte 10 Bits 3-4 */
+ unsigned char SystemRAMAreaForBIOS:3; /* Byte 10 Bits 5-7 */
+ unsigned char DMA_Channel:7; /* Byte 11 Bits 0-6 */
+ boolean DMA_AutoConfiguration:1; /* Byte 11 Bit 7 */
+ unsigned char IRQ_Channel:7; /* Byte 12 Bits 0-6 */
+ boolean IRQ_AutoConfiguration:1; /* Byte 12 Bit 7 */
+ unsigned char DMA_TransferRate; /* Byte 13 */
+ unsigned char SCSI_ID; /* Byte 14 */
+ boolean LowByteTerminated:1; /* Byte 15 Bit 0 */
+ boolean ParityCheckingEnabled:1; /* Byte 15 Bit 1 */
+ boolean HighByteTerminated:1; /* Byte 15 Bit 2 */
+ boolean NoisyCablingEnvironment:1; /* Byte 15 Bit 3 */
+ boolean FastSynchronousNegotiation:1; /* Byte 15 Bit 4 */
+ boolean BusResetEnabled:1; /* Byte 15 Bit 5 */
+ boolean :1; /* Byte 15 Bit 6 */
+ boolean ActiveNegationEnabled:1; /* Byte 15 Bit 7 */
+ unsigned char BusOnDelay; /* Byte 16 */
+ unsigned char BusOffDelay; /* Byte 17 */
+ boolean HostAdapterBIOSEnabled:1; /* Byte 18 Bit 0 */
+ boolean BIOSRedirectionOfINT19Enabled:1; /* Byte 18 Bit 1 */
+ boolean ExtendedTranslationEnabled:1; /* Byte 18 Bit 2 */
+ boolean MapRemovableAsFixedEnabled:1; /* Byte 18 Bit 3 */
+ boolean :1; /* Byte 18 Bit 4 */
+ boolean BIOSSupportsMoreThan2DrivesEnabled:1; /* Byte 18 Bit 5 */
+ boolean BIOSInterruptModeEnabled:1; /* Byte 18 Bit 6 */
+ boolean FlopticalSupportEnabled:1; /* Byte 19 Bit 7 */
+ unsigned short DeviceEnabled; /* Bytes 19-20 */
+ unsigned short WidePermitted; /* Bytes 21-22 */
+ unsigned short FastPermitted; /* Bytes 23-24 */
+ unsigned short SynchronousPermitted; /* Bytes 25-26 */
+ unsigned short DisconnectPermitted; /* Bytes 27-28 */
+ unsigned short SendStartUnitCommand; /* Bytes 29-30 */
+ unsigned short IgnoreInBIOSScan; /* Bytes 31-32 */
+ unsigned char PCIInterruptPin:2; /* Byte 33 Bits 0-1 */
+ unsigned char HostAdapterIOPortAddress:2; /* Byte 33 Bits 2-3 */
+ boolean StrictRoundRobinModeEnabled:1; /* Byte 33 Bit 4 */
+ boolean VESABusSpeedGreaterThan33MHz:1; /* Byte 33 Bit 5 */
+ boolean VESABurstWriteEnabled:1; /* Byte 33 Bit 6 */
+ boolean VESABurstReadEnabled:1; /* Byte 33 Bit 7 */
+ unsigned short UltraPermitted; /* Bytes 34-35 */
+ unsigned int :32; /* Bytes 36-39 */
+ unsigned char :8; /* Byte 40 */
+ unsigned char AutoSCSIMaximumLUN; /* Byte 41 */
+ boolean :1; /* Byte 42 Bit 0 */
+ boolean SCAM_Dominant:1; /* Byte 42 Bit 1 */
+ boolean SCAM_Enabled:1; /* Byte 42 Bit 2 */
+ boolean SCAM_Level2:1; /* Byte 42 Bit 3 */
+ unsigned char :4; /* Byte 42 Bits 4-7 */
+ boolean INT13ExtensionEnabled:1; /* Byte 43 Bit 0 */
+ boolean :1; /* Byte 43 Bit 1 */
+ boolean CDROMBootEnabled:1; /* Byte 43 Bit 2 */
+ unsigned char :5; /* Byte 43 Bits 3-7 */
+ unsigned char BootTargetID:4; /* Byte 44 Bits 0-3 */
+ unsigned char BootChannel:4; /* Byte 44 Bits 4-7 */
+ unsigned char ForceBusDeviceScanningOrder:1; /* Byte 45 Bit 0 */
+ unsigned char :7; /* Byte 45 Bits 1-7 */
+ unsigned short NonTaggedToAlternateLUNPermitted; /* Bytes 46-47 */
+ unsigned short RenegotiateSyncAfterCheckCondition; /* Bytes 48-49 */
+ unsigned char Reserved[10]; /* Bytes 50-59 */
+ unsigned char ManufacturingDiagnostic[2]; /* Bytes 60-61 */
+ unsigned short Checksum; /* Bytes 62-63 */
+}
+__attribute__ ((packed))
+BusLogic_AutoSCSIData_T;
+
+
+/*
+ Define the Host Adapter Local RAM Auto SCSI Byte 45 structure.
+*/
+
+typedef struct BusLogic_AutoSCSIByte45
+{
+ unsigned char ForceBusDeviceScanningOrder:1; /* Bit 0 */
+ unsigned char :7; /* Bits 1-7 */
+}
+BusLogic_AutoSCSIByte45_T;
+
+
+/*
+ Define the Host Adapter Local RAM BIOS Drive Map Byte structure.
+*/
+
+#define BusLogic_BIOS_DriveMapOffset 17
+
+typedef struct BusLogic_BIOSDriveMapByte
+{
+ unsigned char TargetIDBit3:1; /* Bit 0 */
+ unsigned char :2; /* Bits 1-2 */
+ BusLogic_BIOS_DiskGeometryTranslation_T DiskGeometry:2; /* Bits 3-4 */
+ unsigned char TargetID:3; /* Bits 5-7 */
+}
+BusLogic_BIOSDriveMapByte_T;
+
+
+/*
+ Define the Modify I/O Address request type. On PCI Host Adapters, the
+ Modify I/O Address command allows modification of the ISA compatible I/O
+ Address that the Host Adapter responds to; it does not affect the PCI
+ compliant I/O Address assigned at system initialization.
+*/
+
+typedef BusLogic_ISACompatibleIOPort_T BusLogic_ModifyIOAddressRequest_T;
+
+
+/*
+ Define the Set CCB Format request type. Extended LUN Format CCBs are
+ necessary to support more than 8 Logical Units per Target Device.
+*/
+
+typedef enum BusLogic_SetCCBFormatRequest
+{
+ BusLogic_LegacyLUNFormatCCB = 0,
+ BusLogic_ExtendedLUNFormatCCB = 1
+}
+__attribute__ ((packed))
+BusLogic_SetCCBFormatRequest_T;
+
+
+/*
+ Define the Requested Reply Length type used by the Inquire Setup Information,
+ Inquire Host Adapter Model Number, Inquire Synchronous Period, and Inquire
+ Extended Setup Information commands.
+*/
+
+typedef unsigned char BusLogic_RequestedReplyLength_T;
+
+
+/*
+ Define the Outgoing Mailbox Action Codes.
+*/
+
+typedef enum
+{
+ BusLogic_OutgoingMailboxFree = 0x00,
+ BusLogic_MailboxStartCommand = 0x01,
+ BusLogic_MailboxAbortCommand = 0x02
+}
+__attribute__ ((packed))
+BusLogic_ActionCode_T;
+
+
+/*
+ Define the Incoming Mailbox Completion Codes. The MultiMaster Firmware
+ only uses codes 0 - 4. The FlashPoint SCCB Manager has no mailboxes, so
+ completion codes are stored in the CCB; it only uses codes 1, 2, 4, and 5.
+*/
+
+typedef enum
+{
+ BusLogic_IncomingMailboxFree = 0x00,
+ BusLogic_CommandCompletedWithoutError = 0x01,
+ BusLogic_CommandAbortedAtHostRequest = 0x02,
+ BusLogic_AbortedCommandNotFound = 0x03,
+ BusLogic_CommandCompletedWithError = 0x04,
+ BusLogic_InvalidCCB = 0x05
+}
+__attribute__ ((packed))
+BusLogic_CompletionCode_T;
+
+
+/*
+ Define the Command Control Block (CCB) Opcodes.
+*/
+
+typedef enum
+{
+ BusLogic_InitiatorCCB = 0x00,
+ BusLogic_TargetCCB = 0x01,
+ BusLogic_InitiatorCCB_ScatterGather = 0x02,
+ BusLogic_InitiatorCCB_ResidualDataLength = 0x03,
+ BusLogic_InitiatorCCB_ScatterGatherResidual = 0x04,
+ BusLogic_BusDeviceReset = 0x81
+}
+__attribute__ ((packed))
+BusLogic_CCB_Opcode_T;
+
+
+/*
+ Define the CCB Data Direction Codes.
+*/
+
+typedef enum
+{
+ BusLogic_UncheckedDataTransfer = 0,
+ BusLogic_DataInLengthChecked = 1,
+ BusLogic_DataOutLengthChecked = 2,
+ BusLogic_NoDataTransfer = 3
+}
+BusLogic_DataDirection_T;
+
+
+/*
+ Define the Host Adapter Status Codes. The MultiMaster Firmware does not
+ return status code 0x0C; it uses 0x12 for both overruns and underruns.
+*/
+
+typedef enum
+{
+ BusLogic_CommandCompletedNormally = 0x00,
+ BusLogic_LinkedCommandCompleted = 0x0A,
+ BusLogic_LinkedCommandCompletedWithFlag = 0x0B,
+ BusLogic_DataUnderRun = 0x0C,
+ BusLogic_SCSISelectionTimeout = 0x11,
+ BusLogic_DataOverRun = 0x12,
+ BusLogic_UnexpectedBusFree = 0x13,
+ BusLogic_InvalidBusPhaseRequested = 0x14,
+ BusLogic_InvalidOutgoingMailboxActionCode = 0x15,
+ BusLogic_InvalidCommandOperationCode = 0x16,
+ BusLogic_LinkedCCBhasInvalidLUN = 0x17,
+ BusLogic_InvalidCommandParameter = 0x1A,
+ BusLogic_AutoRequestSenseFailed = 0x1B,
+ BusLogic_TaggedQueuingMessageRejected = 0x1C,
+ BusLogic_UnsupportedMessageReceived = 0x1D,
+ BusLogic_HostAdapterHardwareFailed = 0x20,
+ BusLogic_TargetFailedResponseToATN = 0x21,
+ BusLogic_HostAdapterAssertedRST = 0x22,
+ BusLogic_OtherDeviceAssertedRST = 0x23,
+ BusLogic_TargetDeviceReconnectedImproperly = 0x24,
+ BusLogic_HostAdapterAssertedBusDeviceReset = 0x25,
+ BusLogic_AbortQueueGenerated = 0x26,
+ BusLogic_HostAdapterSoftwareError = 0x27,
+ BusLogic_HostAdapterHardwareTimeoutError = 0x30,
+ BusLogic_SCSIParityErrorDetected = 0x34
+}
+__attribute__ ((packed))
+BusLogic_HostAdapterStatus_T;
+
+
+/*
+ Define the SCSI Target Device Status Codes.
+*/
+
+typedef enum
+{
+ BusLogic_OperationGood = 0x00,
+ BusLogic_CheckCondition = 0x02,
+ BusLogic_DeviceBusy = 0x08
+}
+__attribute__ ((packed))
+BusLogic_TargetDeviceStatus_T;
+
+
+/*
+ Define the Queue Tag Codes.
+*/
+
+typedef enum
+{
+ BusLogic_SimpleQueueTag = 0,
+ BusLogic_HeadOfQueueTag = 1,
+ BusLogic_OrderedQueueTag = 2,
+ BusLogic_ReservedQT = 3
+}
+BusLogic_QueueTag_T;
+
+
+/*
+ Define the SCSI Command Descriptor Block (CDB).
+*/
+
+#define BusLogic_CDB_MaxLength 12
+
+typedef unsigned char SCSI_CDB_T[BusLogic_CDB_MaxLength];
+
+
+/*
+ Define the Scatter/Gather Segment structure required by the MultiMaster
+ Firmware Interface and the FlashPoint SCCB Manager.
+*/
+
+typedef struct BusLogic_ScatterGatherSegment
+{
+ BusLogic_ByteCount_T SegmentByteCount; /* Bytes 0-3 */
+ BusLogic_BusAddress_T SegmentDataPointer; /* Bytes 4-7 */
+}
+BusLogic_ScatterGatherSegment_T;
+
+
+/*
+ Define the Driver CCB Status Codes.
+*/
+
+typedef enum
+{
+ BusLogic_CCB_Free = 0,
+ BusLogic_CCB_Active = 1,
+ BusLogic_CCB_Completed = 2,
+ BusLogic_CCB_Reset = 3
+}
+__attribute__ ((packed))
+BusLogic_CCB_Status_T;
+
+
+/*
+ Define the 32 Bit Mode Command Control Block (CCB) structure. The first 40
+ bytes are defined by and common to both the MultiMaster Firmware and the
+ FlashPoint SCCB Manager. The next 60 bytes are defined by the FlashPoint
+ SCCB Manager. The remaining components are defined by the Linux BusLogic
+ Driver. Extended LUN Format CCBs differ from Legacy LUN Format 32 Bit Mode
+ CCBs only in having the TagEnable and QueueTag fields moved from byte 17 to
+ byte 1, and the Logical Unit field in byte 17 expanded to 6 bits. In theory,
+ Extended LUN Format CCBs can support up to 64 Logical Units, but in practice
+ many devices will respond improperly to Logical Units between 32 and 63, and
+ the SCSI-2 specification defines Bit 5 as LUNTAR. Extended LUN Format CCBs
+ are used by recent versions of the MultiMaster Firmware, as well as by the
+ FlashPoint SCCB Manager; the FlashPoint SCCB Manager only supports 32 Logical
+ Units. Since 64 Logical Units are unlikely to be needed in practice, and
+ since they are problematic for the above reasons, and since limiting them to
+ 5 bits simplifies the CCB structure definition, this driver only supports
+ 32 Logical Units per Target Device.
+*/
+
+typedef struct BusLogic_CCB
+{
+ /*
+ MultiMaster Firmware and FlashPoint SCCB Manager Common Portion.
+ */
+ BusLogic_CCB_Opcode_T Opcode; /* Byte 0 */
+ unsigned char :3; /* Byte 1 Bits 0-2 */
+ BusLogic_DataDirection_T DataDirection:2; /* Byte 1 Bits 3-4 */
+ boolean TagEnable:1; /* Byte 1 Bit 5 */
+ BusLogic_QueueTag_T QueueTag:2; /* Byte 1 Bits 6-7 */
+ unsigned char CDB_Length; /* Byte 2 */
+ unsigned char SenseDataLength; /* Byte 3 */
+ BusLogic_ByteCount_T DataLength; /* Bytes 4-7 */
+ BusLogic_BusAddress_T DataPointer; /* Bytes 8-11 */
+ unsigned char :8; /* Byte 12 */
+ unsigned char :8; /* Byte 13 */
+ BusLogic_HostAdapterStatus_T HostAdapterStatus; /* Byte 14 */
+ BusLogic_TargetDeviceStatus_T TargetDeviceStatus; /* Byte 15 */
+ unsigned char TargetID; /* Byte 16 */
+ unsigned char LogicalUnit:5; /* Byte 17 Bits 0-4 */
+ boolean LegacyTagEnable:1; /* Byte 17 Bit 5 */
+ BusLogic_QueueTag_T LegacyQueueTag:2; /* Byte 17 Bits 6-7 */
+ SCSI_CDB_T CDB; /* Bytes 18-29 */
+ unsigned char :8; /* Byte 30 */
+ unsigned char :8; /* Byte 31 */
+ unsigned int :32; /* Bytes 32-35 */
+ BusLogic_BusAddress_T SenseDataPointer; /* Bytes 36-39 */
+ /*
+ FlashPoint SCCB Manager Defined Portion.
+ */
+ void (*CallbackFunction)(struct BusLogic_CCB *); /* Bytes 40-43 */
+ BusLogic_Base_Address_T BaseAddress; /* Bytes 44-47 */
+ BusLogic_CompletionCode_T CompletionCode; /* Byte 48 */
+#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+ unsigned char :8; /* Byte 49 */
+ unsigned short OS_Flags; /* Bytes 50-51 */
+ unsigned char Private[48]; /* Bytes 52-99 */
+#endif
+ /*
+ BusLogic Linux Driver Defined Portion.
+ */
+ boolean AllocationGroupHead;
+ BusLogic_CCB_Status_T Status;
+ unsigned long SerialNumber;
+ SCSI_Command_T *Command;
+ struct BusLogic_HostAdapter *HostAdapter;
+ struct BusLogic_CCB *Next;
+ struct BusLogic_CCB *NextAll;
+ BusLogic_ScatterGatherSegment_T
+ ScatterGatherList[BusLogic_ScatterGatherLimit];
+}
+BusLogic_CCB_T;
+
+
+/*
+ Define the 32 Bit Mode Outgoing Mailbox structure.
+*/
+
+typedef struct BusLogic_OutgoingMailbox
+{
+ BusLogic_BusAddress_T CCB; /* Bytes 0-3 */
+ unsigned int :24; /* Bytes 4-6 */
+ BusLogic_ActionCode_T ActionCode; /* Byte 7 */
+}
+BusLogic_OutgoingMailbox_T;
+
+
+/*
+ Define the 32 Bit Mode Incoming Mailbox structure.
+*/
+
+typedef struct BusLogic_IncomingMailbox
+{
+ BusLogic_BusAddress_T CCB; /* Bytes 0-3 */
+ BusLogic_HostAdapterStatus_T HostAdapterStatus; /* Byte 4 */
+ BusLogic_TargetDeviceStatus_T TargetDeviceStatus; /* Byte 5 */
+ unsigned char :8; /* Byte 6 */
+ BusLogic_CompletionCode_T CompletionCode; /* Byte 7 */
+}
+BusLogic_IncomingMailbox_T;
+
+
+/*
+ Define the BusLogic Driver Options structure.
+*/
+
+typedef struct BusLogic_DriverOptions
+{
+ unsigned short TaggedQueuingPermitted;
+ unsigned short TaggedQueuingPermittedMask;
+ unsigned short BusSettleTime;
+ BusLogic_LocalOptions_T LocalOptions;
+ unsigned char CommonQueueDepth;
+ unsigned char QueueDepth[BusLogic_MaxTargetDevices];
+ BusLogic_ErrorRecoveryStrategy_T
+ ErrorRecoveryStrategy[BusLogic_MaxTargetDevices];
+}
+BusLogic_DriverOptions_T;
+
+
+/*
+ Define the Host Adapter Target Flags structure.
+*/
+
+typedef struct BusLogic_TargetFlags
+{
+ boolean TargetExists:1;
+ boolean TaggedQueuingSupported:1;
+ boolean WideTransfersSupported:1;
+ boolean TaggedQueuingActive:1;
+ boolean WideTransfersActive:1;
+ boolean CommandSuccessfulFlag:1;
+ boolean TargetInfoReported:1;
+}
+BusLogic_TargetFlags_T;
+
+
+/*
+ Define the Host Adapter Target Statistics structure.
+*/
+
+#define BusLogic_SizeBuckets 10
+
+typedef unsigned int BusLogic_CommandSizeBuckets_T[BusLogic_SizeBuckets];
+
+typedef struct BusLogic_TargetStatistics
+{
+ unsigned int CommandsAttempted;
+ unsigned int CommandsCompleted;
+ unsigned int ReadCommands;
+ unsigned int WriteCommands;
+ BusLogic_ByteCounter_T TotalBytesRead;
+ BusLogic_ByteCounter_T TotalBytesWritten;
+ BusLogic_CommandSizeBuckets_T ReadCommandSizeBuckets;
+ BusLogic_CommandSizeBuckets_T WriteCommandSizeBuckets;
+ unsigned short CommandAbortsRequested;
+ unsigned short CommandAbortsAttempted;
+ unsigned short CommandAbortsCompleted;
+ unsigned short BusDeviceResetsRequested;
+ unsigned short BusDeviceResetsAttempted;
+ unsigned short BusDeviceResetsCompleted;
+ unsigned short HostAdapterResetsRequested;
+ unsigned short HostAdapterResetsAttempted;
+ unsigned short HostAdapterResetsCompleted;
+}
+BusLogic_TargetStatistics_T;
+
+
+/*
+ Define the FlashPoint Card Handle data type.
+*/
+
+#define FlashPoint_BadCardHandle 0xFFFFFFFF
+
+typedef unsigned int FlashPoint_CardHandle_T;
+
+
+/*
+ Define the FlashPoint Information structure. This structure is defined
+ by the FlashPoint SCCB Manager.
+*/
+
+typedef struct FlashPoint_Info
+{
+ BusLogic_Base_Address_T BaseAddress; /* Bytes 0-3 */
+ boolean Present; /* Byte 4 */
+ unsigned char IRQ_Channel; /* Byte 5 */
+ unsigned char SCSI_ID; /* Byte 6 */
+ unsigned char SCSI_LUN; /* Byte 7 */
+ unsigned short FirmwareRevision; /* Bytes 8-9 */
+ unsigned short SynchronousPermitted; /* Bytes 10-11 */
+ unsigned short FastPermitted; /* Bytes 12-13 */
+ unsigned short UltraPermitted; /* Bytes 14-15 */
+ unsigned short DisconnectPermitted; /* Bytes 16-17 */
+ unsigned short WidePermitted; /* Bytes 18-19 */
+ boolean ParityCheckingEnabled:1; /* Byte 20 Bit 0 */
+ boolean HostWideSCSI:1; /* Byte 20 Bit 1 */
+ boolean HostSoftReset:1; /* Byte 20 Bit 2 */
+ boolean ExtendedTranslationEnabled:1; /* Byte 20 Bit 3 */
+ boolean LowByteTerminated:1; /* Byte 20 Bit 4 */
+ boolean HighByteTerminated:1; /* Byte 20 Bit 5 */
+ boolean ReportDataUnderrun:1; /* Byte 20 Bit 6 */
+ boolean SCAM_Enabled:1; /* Byte 20 Bit 7 */
+ boolean SCAM_Level2:1; /* Byte 21 Bit 0 */
+ unsigned char :7; /* Byte 21 Bits 1-7 */
+ unsigned char Family; /* Byte 22 */
+ unsigned char BusType; /* Byte 23 */
+ unsigned char ModelNumber[3]; /* Bytes 24-26 */
+ unsigned char RelativeCardNumber; /* Byte 27 */
+ unsigned char Reserved[4]; /* Bytes 28-31 */
+ unsigned int OS_Reserved; /* Bytes 32-35 */
+ unsigned char TranslationInfo[4]; /* Bytes 36-39 */
+ unsigned int Reserved2[5]; /* Bytes 40-59 */
+ unsigned int SecondaryRange; /* Bytes 60-63 */
+}
+FlashPoint_Info_T;
+
+
+/*
+ Define the BusLogic Driver Host Adapter structure.
+*/
+
+typedef struct BusLogic_HostAdapter
+{
+ SCSI_Host_T *SCSI_Host;
+ BusLogic_HostAdapterType_T HostAdapterType;
+ BusLogic_HostAdapterBusType_T HostAdapterBusType;
+ BusLogic_IO_Address_T IO_Address;
+ BusLogic_PCI_Address_T PCI_Address;
+ unsigned short AddressCount;
+ unsigned char HostNumber;
+ unsigned char ModelName[9];
+ unsigned char FirmwareVersion[6];
+ unsigned char FullModelName[18];
+ unsigned char Bus;
+ unsigned char Device;
+ unsigned char IRQ_Channel;
+ unsigned char DMA_Channel;
+ unsigned char SCSI_ID;
+ boolean IRQ_ChannelAcquired:1;
+ boolean DMA_ChannelAcquired:1;
+ boolean ExtendedTranslationEnabled:1;
+ boolean ParityCheckingEnabled:1;
+ boolean BusResetEnabled:1;
+ boolean LevelSensitiveInterrupt:1;
+ boolean HostWideSCSI:1;
+ boolean HostDifferentialSCSI:1;
+ boolean HostSupportsSCAM:1;
+ boolean HostUltraSCSI:1;
+ boolean ExtendedLUNSupport:1;
+ boolean TerminationInfoValid:1;
+ boolean LowByteTerminated:1;
+ boolean HighByteTerminated:1;
+ boolean BounceBuffersRequired:1;
+ boolean StrictRoundRobinModeSupport:1;
+ boolean SCAM_Enabled:1;
+ boolean SCAM_Level2:1;
+ boolean HostAdapterInitialized:1;
+ boolean HostAdapterExternalReset:1;
+ boolean HostAdapterInternalError:1;
+ boolean ProcessCompletedCCBsActive;
+ volatile boolean HostAdapterCommandCompleted;
+ unsigned short HostAdapterScatterGatherLimit;
+ unsigned short DriverScatterGatherLimit;
+ unsigned short MaxTargetDevices;
+ unsigned short MaxLogicalUnits;
+ unsigned short MailboxCount;
+ unsigned short InitialCCBs;
+ unsigned short IncrementalCCBs;
+ unsigned short AllocatedCCBs;
+ unsigned short DriverQueueDepth;
+ unsigned short HostAdapterQueueDepth;
+ unsigned short UntaggedQueueDepth;
+ unsigned short CommonQueueDepth;
+ unsigned short BusSettleTime;
+ unsigned short SynchronousPermitted;
+ unsigned short FastPermitted;
+ unsigned short UltraPermitted;
+ unsigned short WidePermitted;
+ unsigned short DisconnectPermitted;
+ unsigned short TaggedQueuingPermitted;
+ unsigned short ExternalHostAdapterResets;
+ unsigned short HostAdapterInternalErrors;
+ unsigned short TargetDeviceCount;
+ unsigned short MessageBufferLength;
+ BusLogic_BusAddress_T BIOS_Address;
+ BusLogic_DriverOptions_T *DriverOptions;
+ FlashPoint_Info_T FlashPointInfo;
+ FlashPoint_CardHandle_T CardHandle;
+ struct BusLogic_HostAdapter *Next;
+ BusLogic_CCB_T *All_CCBs;
+ BusLogic_CCB_T *Free_CCBs;
+ BusLogic_CCB_T *FirstCompletedCCB;
+ BusLogic_CCB_T *LastCompletedCCB;
+ BusLogic_CCB_T *BusDeviceResetPendingCCB[BusLogic_MaxTargetDevices];
+ BusLogic_ErrorRecoveryStrategy_T
+ ErrorRecoveryStrategy[BusLogic_MaxTargetDevices];
+ BusLogic_TargetFlags_T TargetFlags[BusLogic_MaxTargetDevices];
+ unsigned char QueueDepth[BusLogic_MaxTargetDevices];
+ unsigned char SynchronousPeriod[BusLogic_MaxTargetDevices];
+ unsigned char SynchronousOffset[BusLogic_MaxTargetDevices];
+ unsigned char ActiveCommands[BusLogic_MaxTargetDevices];
+ unsigned int CommandsSinceReset[BusLogic_MaxTargetDevices];
+ unsigned long LastSequencePoint[BusLogic_MaxTargetDevices];
+ unsigned long LastResetAttempted[BusLogic_MaxTargetDevices];
+ unsigned long LastResetCompleted[BusLogic_MaxTargetDevices];
+ BusLogic_OutgoingMailbox_T *FirstOutgoingMailbox;
+ BusLogic_OutgoingMailbox_T *LastOutgoingMailbox;
+ BusLogic_OutgoingMailbox_T *NextOutgoingMailbox;
+ BusLogic_IncomingMailbox_T *FirstIncomingMailbox;
+ BusLogic_IncomingMailbox_T *LastIncomingMailbox;
+ BusLogic_IncomingMailbox_T *NextIncomingMailbox;
+ BusLogic_TargetStatistics_T TargetStatistics[BusLogic_MaxTargetDevices];
+ unsigned char MailboxSpace[BusLogic_MaxMailboxes
+ * (sizeof(BusLogic_OutgoingMailbox_T)
+ + sizeof(BusLogic_IncomingMailbox_T))];
+ char MessageBuffer[BusLogic_MessageBufferSize];
+}
+BusLogic_HostAdapter_T;
+
+
+/*
+ Define a structure for the BIOS Disk Parameters.
+*/
+
+typedef struct BIOS_DiskParameters
+{
+ int Heads;
+ int Sectors;
+ int Cylinders;
+}
+BIOS_DiskParameters_T;
+
+
+/*
+ Define a structure for the SCSI Inquiry command results.
+*/
+
+typedef struct SCSI_Inquiry
+{
+ unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */
+ unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */
+ unsigned char DeviceTypeModifier:7; /* Byte 1 Bits 0-6 */
+ boolean RMB:1; /* Byte 1 Bit 7 */
+ unsigned char ANSI_ApprovedVersion:3; /* Byte 2 Bits 0-2 */
+ unsigned char ECMA_Version:3; /* Byte 2 Bits 3-5 */
+ unsigned char ISO_Version:2; /* Byte 2 Bits 6-7 */
+ unsigned char ResponseDataFormat:4; /* Byte 3 Bits 0-3 */
+ unsigned char :2; /* Byte 3 Bits 4-5 */
+ boolean TrmIOP:1; /* Byte 3 Bit 6 */
+ boolean AENC:1; /* Byte 3 Bit 7 */
+ unsigned char AdditionalLength; /* Byte 4 */
+ unsigned char :8; /* Byte 5 */
+ unsigned char :8; /* Byte 6 */
+ boolean SftRe:1; /* Byte 7 Bit 0 */
+ boolean CmdQue:1; /* Byte 7 Bit 1 */
+ boolean :1; /* Byte 7 Bit 2 */
+ boolean Linked:1; /* Byte 7 Bit 3 */
+ boolean Sync:1; /* Byte 7 Bit 4 */
+ boolean WBus16:1; /* Byte 7 Bit 5 */
+ boolean WBus32:1; /* Byte 7 Bit 6 */
+ boolean RelAdr:1; /* Byte 7 Bit 7 */
+ unsigned char VendorIdentification[8]; /* Bytes 8-15 */
+ unsigned char ProductIdentification[16]; /* Bytes 16-31 */
+ unsigned char ProductRevisionLevel[4]; /* Bytes 32-35 */
+}
+SCSI_Inquiry_T;
+
+
+/*
+ BusLogic_AcquireHostAdapterLock acquires exclusive access to Host Adapter.
+*/
+
+static inline
+void BusLogic_AcquireHostAdapterLock(BusLogic_HostAdapter_T *HostAdapter,
+ ProcessorFlags_T *ProcessorFlags)
+{
+ save_flags(*ProcessorFlags);
+ cli();
+}
+
+
+/*
+ BusLogic_ReleaseHostAdapterLock releases exclusive access to Host Adapter.
+*/
+
+static inline
+void BusLogic_ReleaseHostAdapterLock(BusLogic_HostAdapter_T *HostAdapter,
+ ProcessorFlags_T *ProcessorFlags)
+{
+ restore_flags(*ProcessorFlags);
+}
+
+
+/*
+ BusLogic_AcquireHostAdapterLockIH acquires exclusive access to Host Adapter,
+ but is only called from the interrupt handler when interrupts are disabled.
+*/
+
+static inline
+void BusLogic_AcquireHostAdapterLockIH(BusLogic_HostAdapter_T *HostAdapter,
+ ProcessorFlags_T *ProcessorFlags)
+{
+}
+
+
+/*
+ BusLogic_ReleaseHostAdapterLockIH releases exclusive access to Host Adapter,
+ but is only called from the interrupt handler when interrupts are disabled.
+*/
+
+static inline
+void BusLogic_ReleaseHostAdapterLockIH(BusLogic_HostAdapter_T *HostAdapter,
+ ProcessorFlags_T *ProcessorFlags)
+{
+}
+
+
+/*
+ Define functions to provide an abstraction for reading and writing the
+ Host Adapter I/O Registers.
+*/
+
+static inline
+void BusLogic_SCSIBusReset(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ControlRegister_T ControlRegister;
+ ControlRegister.All = 0;
+ ControlRegister.Bits.SCSIBusReset = true;
+ outb(ControlRegister.All,
+ HostAdapter->IO_Address + BusLogic_ControlRegisterOffset);
+}
+
+static inline
+void BusLogic_InterruptReset(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ControlRegister_T ControlRegister;
+ ControlRegister.All = 0;
+ ControlRegister.Bits.InterruptReset = true;
+ outb(ControlRegister.All,
+ HostAdapter->IO_Address + BusLogic_ControlRegisterOffset);
+}
+
+static inline
+void BusLogic_SoftReset(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ControlRegister_T ControlRegister;
+ ControlRegister.All = 0;
+ ControlRegister.Bits.SoftReset = true;
+ outb(ControlRegister.All,
+ HostAdapter->IO_Address + BusLogic_ControlRegisterOffset);
+}
+
+static inline
+void BusLogic_HardReset(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ControlRegister_T ControlRegister;
+ ControlRegister.All = 0;
+ ControlRegister.Bits.HardReset = true;
+ outb(ControlRegister.All,
+ HostAdapter->IO_Address + BusLogic_ControlRegisterOffset);
+}
+
+static inline
+unsigned char BusLogic_ReadStatusRegister(BusLogic_HostAdapter_T *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_StatusRegisterOffset);
+}
+
+static inline
+void BusLogic_WriteCommandParameterRegister(BusLogic_HostAdapter_T
+ *HostAdapter,
+ unsigned char Value)
+{
+ outb(Value,
+ HostAdapter->IO_Address + BusLogic_CommandParameterRegisterOffset);
+}
+
+static inline
+unsigned char BusLogic_ReadDataInRegister(BusLogic_HostAdapter_T *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_DataInRegisterOffset);
+}
+
+static inline
+unsigned char BusLogic_ReadInterruptRegister(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_InterruptRegisterOffset);
+}
+
+static inline
+unsigned char BusLogic_ReadGeometryRegister(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_GeometryRegisterOffset);
+}
+
+
+/*
+ BusLogic_StartMailboxCommand issues an Execute Mailbox Command, which
+ notifies the Host Adapter that an entry has been made in an Outgoing
+ Mailbox.
+*/
+
+static inline
+void BusLogic_StartMailboxCommand(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_WriteCommandParameterRegister(HostAdapter,
+ BusLogic_ExecuteMailboxCommand);
+}
+
+
+/*
+ BusLogic_Delay waits for Seconds to elapse.
+*/
+
+static inline void BusLogic_Delay(int Seconds)
+{
+ int Milliseconds = 1000 * Seconds;
+ unsigned long ProcessorFlags;
+ save_flags(ProcessorFlags);
+ sti();
+ while (--Milliseconds >= 0) udelay(1000);
+ restore_flags(ProcessorFlags);
+}
+
+
+/*
+ Virtual_to_Bus and Bus_to_Virtual map between Kernel Virtual Addresses
+ and PCI/VLB/EISA/ISA Bus Addresses.
+*/
+
+static inline BusLogic_BusAddress_T Virtual_to_Bus(void *VirtualAddress)
+{
+ return (BusLogic_BusAddress_T) virt_to_bus(VirtualAddress);
+}
+
+static inline void *Bus_to_Virtual(BusLogic_BusAddress_T BusAddress)
+{
+ return (void *) bus_to_virt(BusAddress);
+}
+
+
+/*
+ Virtual_to_32Bit_Virtual maps between Kernel Virtual Addresses and
+ 32 bit Kernel Virtual Addresses. This avoids compilation warnings
+ on 64 bit architectures.
+*/
+
+static inline
+BusLogic_BusAddress_T Virtual_to_32Bit_Virtual(void *VirtualAddress)
+{
+ return (BusLogic_BusAddress_T) (unsigned long) VirtualAddress;
+}
+
+
+/*
+ BusLogic_IncrementErrorCounter increments Error Counter by 1, stopping at
+ 65535 rather than wrapping around to 0.
+*/
+
+static inline void BusLogic_IncrementErrorCounter(unsigned short *ErrorCounter)
+{
+ if (*ErrorCounter < 65535) (*ErrorCounter)++;
+}
+
+
+/*
+ BusLogic_IncrementByteCounter increments Byte Counter by Amount.
+*/
+
+static inline void BusLogic_IncrementByteCounter(BusLogic_ByteCounter_T
+ *ByteCounter,
+ unsigned int Amount)
+{
+ ByteCounter->Units += Amount;
+ if (ByteCounter->Units > 999999999)
+ {
+ ByteCounter->Units -= 1000000000;
+ ByteCounter->Billions++;
+ }
+}
+
+
+/*
+ BusLogic_IncrementSizeBucket increments the Bucket for Amount.
+*/
+
+static inline void BusLogic_IncrementSizeBucket(BusLogic_CommandSizeBuckets_T
+ CommandSizeBuckets,
+ unsigned int Amount)
+{
+ int Index = 0;
+ if (Amount < 8*1024)
+ {
+ if (Amount < 2*1024)
+ Index = (Amount < 1*1024 ? 0 : 1);
+ else Index = (Amount < 4*1024 ? 2 : 3);
+ }
+ else if (Amount < 128*1024)
+ {
+ if (Amount < 32*1024)
+ Index = (Amount < 16*1024 ? 4 : 5);
+ else Index = (Amount < 64*1024 ? 6 : 7);
+ }
+ else Index = (Amount < 256*1024 ? 8 : 9);
+ CommandSizeBuckets[Index]++;
+}
+
+
+/*
+ Define the version number of the FlashPoint Firmware (SCCB Manager).
+*/
+
+#define FlashPoint_FirmwareVersion "5.02"
+
+
+/*
+ Define the possible return values from FlashPoint_HandleInterrupt.
+*/
+
+#define FlashPoint_NormalInterrupt 0x00
+#define FlashPoint_InternalError 0xFE
+#define FlashPoint_ExternalBusReset 0xFF
+
+
+/*
+ Define prototypes for the forward referenced BusLogic Driver
+ Internal Functions.
+*/
+
+static void BusLogic_QueueCompletedCCB(BusLogic_CCB_T *);
+static void BusLogic_InterruptHandler(int, void *, Registers_T *);
+static int BusLogic_ResetHostAdapter(BusLogic_HostAdapter_T *,
+ SCSI_Command_T *, unsigned int);
+static void BusLogic_Message(BusLogic_MessageLevel_T, char *,
+ BusLogic_HostAdapter_T *, ...);
+static void BusLogic_ParseDriverOptions(char *);
+
+
+#endif /* BusLogic_DriverVersion */
diff --git a/linux/src/drivers/scsi/FlashPoint.c b/linux/src/drivers/scsi/FlashPoint.c
new file mode 100644
index 0000000..8d2f102
--- /dev/null
+++ b/linux/src/drivers/scsi/FlashPoint.c
@@ -0,0 +1,12156 @@
+/*
+
+ FlashPoint.c -- FlashPoint SCCB Manager for Linux
+
+ This file contains the FlashPoint SCCB Manager from BusLogic's FlashPoint
+ Driver Developer's Kit, with minor modifications by Leonard N. Zubkoff for
+ Linux compatibility. It was provided by BusLogic in the form of 16 separate
+ source files, which would have unnecessarily cluttered the scsi directory, so
+ the individual files have been combined into this single file.
+
+ Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+
+ This file is available under both the GNU General Public License
+ and a BSD-style copyright; see LICENSE.FlashPoint for details.
+
+*/
+
+
+#include <linux/config.h>
+
+
+#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+
+
+#define UNIX
+#define FW_TYPE _SCCB_MGR_
+#define MAX_CARDS 8
+#undef BUSTYPE_PCI
+
+
+#define OS_InPortByte(port) inb(port)
+#define OS_InPortWord(port) inw(port)
+#define OS_InPortLong(port) inl(port)
+#define OS_OutPortByte(port, value) outb(value, port)
+#define OS_OutPortWord(port, value) outw(value, port)
+#define OS_OutPortLong(port, value) outl(value, port)
+#define OS_Lock(x)
+#define OS_UnLock(x)
+
+
+/*
+ Define name replacements for compatibility with the Linux BusLogic Driver.
+*/
+
+#define SccbMgr_sense_adapter FlashPoint_ProbeHostAdapter
+#define SccbMgr_config_adapter FlashPoint_HardwareResetHostAdapter
+#define SccbMgr_unload_card FlashPoint_ReleaseHostAdapter
+#define SccbMgr_start_sccb FlashPoint_StartCCB
+#define SccbMgr_abort_sccb FlashPoint_AbortCCB
+#define SccbMgr_my_int FlashPoint_InterruptPending
+#define SccbMgr_isr FlashPoint_HandleInterrupt
+
+
+/*
+ Define name replacements to avoid kernel namespace pollution.
+*/
+
+#define BL_Card FPT_BL_Card
+#define BusMasterInit FPT_BusMasterInit
+#define CalcCrc16 FPT_CalcCrc16
+#define CalcLrc FPT_CalcLrc
+#define ChkIfChipInitialized FPT_ChkIfChipInitialized
+#define DiagBusMaster FPT_DiagBusMaster
+#define DiagEEPROM FPT_DiagEEPROM
+#define DiagXbow FPT_DiagXbow
+#define GetTarLun FPT_GetTarLun
+#define RNVRamData FPT_RNVRamData
+#define RdStack FPT_RdStack
+#define SccbMgrTableInitAll FPT_SccbMgrTableInitAll
+#define SccbMgrTableInitCard FPT_SccbMgrTableInitCard
+#define SccbMgrTableInitTarget FPT_SccbMgrTableInitTarget
+#define SccbMgr_bad_isr FPT_SccbMgr_bad_isr
+#define SccbMgr_scsi_reset FPT_SccbMgr_scsi_reset
+#define SccbMgr_timer_expired FPT_SccbMgr_timer_expired
+#define SendMsg FPT_SendMsg
+#define Wait FPT_Wait
+#define Wait1Second FPT_Wait1Second
+#define WrStack FPT_WrStack
+#define XbowInit FPT_XbowInit
+#define autoCmdCmplt FPT_autoCmdCmplt
+#define autoLoadDefaultMap FPT_autoLoadDefaultMap
+#define busMstrDataXferStart FPT_busMstrDataXferStart
+#define busMstrSGDataXferStart FPT_busMstrSGDataXferStart
+#define busMstrTimeOut FPT_busMstrTimeOut
+#define dataXferProcessor FPT_dataXferProcessor
+#define default_intena FPT_default_intena
+#define hostDataXferAbort FPT_hostDataXferAbort
+#define hostDataXferRestart FPT_hostDataXferRestart
+#define inisci FPT_inisci
+#define mbCards FPT_mbCards
+#define nvRamInfo FPT_nvRamInfo
+#define phaseBusFree FPT_phaseBusFree
+#define phaseChkFifo FPT_phaseChkFifo
+#define phaseCommand FPT_phaseCommand
+#define phaseDataIn FPT_phaseDataIn
+#define phaseDataOut FPT_phaseDataOut
+#define phaseDecode FPT_phaseDecode
+#define phaseIllegal FPT_phaseIllegal
+#define phaseMsgIn FPT_phaseMsgIn
+#define phaseMsgOut FPT_phaseMsgOut
+#define phaseStatus FPT_phaseStatus
+#define queueAddSccb FPT_queueAddSccb
+#define queueCmdComplete FPT_queueCmdComplete
+#define queueDisconnect FPT_queueDisconnect
+#define queueFindSccb FPT_queueFindSccb
+#define queueFlushSccb FPT_queueFlushSccb
+#define queueFlushTargSccb FPT_queueFlushTargSccb
+#define queueSearchSelect FPT_queueSearchSelect
+#define queueSelectFail FPT_queueSelectFail
+#define s_PhaseTbl FPT_s_PhaseTbl
+#define scamHAString FPT_scamHAString
+#define scamInfo FPT_scamInfo
+#define scarb FPT_scarb
+#define scasid FPT_scasid
+#define scbusf FPT_scbusf
+#define sccbMgrTbl FPT_sccbMgrTbl
+#define schkdd FPT_schkdd
+#define scini FPT_scini
+#define sciso FPT_sciso
+#define scmachid FPT_scmachid
+#define scsavdi FPT_scsavdi
+#define scsel FPT_scsel
+#define scsell FPT_scsell
+#define scsendi FPT_scsendi
+#define scvalq FPT_scvalq
+#define scwirod FPT_scwirod
+#define scwiros FPT_scwiros
+#define scwtsel FPT_scwtsel
+#define scxferc FPT_scxferc
+#define sdecm FPT_sdecm
+#define sfm FPT_sfm
+#define shandem FPT_shandem
+#define sinits FPT_sinits
+#define sisyncn FPT_sisyncn
+#define sisyncr FPT_sisyncr
+#define siwidn FPT_siwidn
+#define siwidr FPT_siwidr
+#define sres FPT_sres
+#define sresb FPT_sresb
+#define ssel FPT_ssel
+#define ssenss FPT_ssenss
+#define sssyncv FPT_sssyncv
+#define stsyncn FPT_stsyncn
+#define stwidn FPT_stwidn
+#define sxfrp FPT_sxfrp
+#define utilEERead FPT_utilEERead
+#define utilEEReadOrg FPT_utilEEReadOrg
+#define utilEESendCmdAddr FPT_utilEESendCmdAddr
+#define utilEEWrite FPT_utilEEWrite
+#define utilEEWriteOnOff FPT_utilEEWriteOnOff
+#define utilUpdateResidual FPT_utilUpdateResidual
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: globals.h $
+ *
+ * Description: Common shared global defines.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+#ifndef __GLOBALS_H__
+#define __GLOBALS_H__
+
+#define _UCB_MGR_ 1
+#define _SCCB_MGR_ 2
+
+/*#include <osflags.h>*/
+
+#define MAX_CDBLEN 12
+
+#define SCAM_LEV_2 1
+
+#define CRCMASK 0xA001
+
+/* In your osflags.h file, please ENSURE that only ONE OS FLAG
+ is on at a time !!! Also, please make sure you turn set the
+ variable FW_TYPE to either _UCB_MGR_ or _SCCB_MGR_ !!! */
+
+#if defined(DOS) || defined(WIN95_16) || defined(OS2) || defined(OTHER_16)
+ #define COMPILER_16_BIT 1
+#elif defined(NETWARE) || defined(NT) || defined(WIN95_32) || defined(UNIX) || defined(OTHER_32) || defined(SOLARIS_REAL_MODE)
+ #define COMPILER_32_BIT 1
+#endif
+
+
+#define BL_VENDOR_ID 0x104B
+#define FP_DEVICE_ID 0x8130
+#define MM_DEVICE_ID 0x1040
+
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+#ifndef TRUE
+#define TRUE (!(FALSE))
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define FAILURE 0xFFFFFFFFL
+
+
+typedef unsigned char UCHAR;
+typedef unsigned short USHORT;
+typedef unsigned int UINT;
+typedef unsigned long ULONG;
+typedef unsigned char * PUCHAR;
+typedef unsigned short* PUSHORT;
+typedef unsigned long * PULONG;
+typedef void * PVOID;
+
+
+#if defined(COMPILER_16_BIT)
+typedef unsigned char far * uchar_ptr;
+typedef unsigned short far * ushort_ptr;
+typedef unsigned long far * ulong_ptr;
+#endif /* 16_BIT_COMPILER */
+
+#if defined(COMPILER_32_BIT)
+typedef unsigned char * uchar_ptr;
+typedef unsigned short * ushort_ptr;
+typedef unsigned long * ulong_ptr;
+#endif /* 32_BIT_COMPILER */
+
+
+/* NEW TYPE DEFINITIONS (shared with Mylex North)
+
+** Use following type defines to avoid confusion in 16 and 32-bit
+** environments. Avoid using 'int' as it denotes 16 bits in 16-bit
+** environment and 32 in 32-bit environments.
+
+*/
+
+#define s08bits char
+#define s16bits short
+#define s32bits long
+
+#define u08bits unsigned s08bits
+#define u16bits unsigned s16bits
+#define u32bits unsigned s32bits
+
+#if defined(COMPILER_16_BIT)
+
+typedef u08bits far * pu08bits;
+typedef u16bits far * pu16bits;
+typedef u32bits far * pu32bits;
+
+#endif /* COMPILER_16_BIT */
+
+#if defined(COMPILER_32_BIT)
+
+typedef u08bits * pu08bits;
+typedef u16bits * pu16bits;
+typedef u32bits * pu32bits;
+
+#endif /* COMPILER_32_BIT */
+
+
+#define BIT(x) ((UCHAR)(1<<(x))) /* single-bit mask in bit position x */
+#define BITW(x) ((USHORT)(1<<(x))) /* single-bit mask in bit position x */
+
+
+
+#if defined(DOS)
+/*#include <dos.h>*/
+ #undef inportb /* undefine for Borland Lib */
+ #undef inport /* they may have define I/O function in LIB */
+ #undef outportb
+ #undef outport
+
+ #define OS_InPortByte(ioport) inportb(ioport)
+ #define OS_InPortWord(ioport) inport(ioport)
+ #define OS_InPortLong(ioport) inportq(ioport, val)
+ #define OS_OutPortByte(ioport, val) outportb(ioport, val)
+ #define OS_OutPortWord(ioport, val) outport(ioport, val)
+ #define OS_OutPortLong(ioport) outportq(ioport, val)
+#endif /* DOS */
+
+#if defined(NETWARE) || defined(OTHER_32) || defined(OTHER_16)
+ extern u08bits OS_InPortByte(u32bits ioport);
+ extern u16bits OS_InPortWord(u32bits ioport);
+ extern u32bits OS_InPortLong(u32bits ioport);
+
+ extern OS_InPortByteBuffer(u32bits ioport, pu08bits buffer, u32bits count);
+ extern OS_InPortWordBuffer(u32bits ioport, pu16bits buffer, u32bits count);
+ extern OS_OutPortByte(u32bits ioport, u08bits val);
+ extern OS_OutPortWord(u32bits ioport, u16bits val);
+ extern OS_OutPortLong(u32bits ioport, u32bits val);
+ extern OS_OutPortByteBuffer(u32bits ioport, pu08bits buffer, u32bits count);
+ extern OS_OutPortWordBuffer(u32bits ioport, pu16bits buffer, u32bits count);
+#endif /* NETWARE || OTHER_32 || OTHER_16 */
+
+#if defined (NT) || defined(WIN95_32) || defined(WIN95_16)
+ #if defined(NT)
+
+ extern __declspec(dllimport) u08bits ScsiPortReadPortUchar(pu08bits ioport);
+ extern __declspec(dllimport) u16bits ScsiPortReadPortUshort(pu16bits ioport);
+ extern __declspec(dllimport) u32bits ScsiPortReadPortUlong(pu32bits ioport);
+ extern __declspec(dllimport) void ScsiPortWritePortUchar(pu08bits ioport, u08bits val);
+ extern __declspec(dllimport) void ScsiPortWritePortUshort(pu16bits port, u16bits val);
+ extern __declspec(dllimport) void ScsiPortWritePortUlong(pu32bits port, u32bits val);
+
+ #else
+
+ extern u08bits ScsiPortReadPortUchar(pu08bits ioport);
+ extern u16bits ScsiPortReadPortUshort(pu16bits ioport);
+ extern u32bits ScsiPortReadPortUlong(pu32bits ioport);
+ extern void ScsiPortWritePortUchar(pu08bits ioport, u08bits val);
+ extern void ScsiPortWritePortUshort(pu16bits port, u16bits val);
+ extern void ScsiPortWritePortUlong(pu32bits port, u32bits val);
+ #endif
+
+
+ #define OS_InPortByte(ioport) ScsiPortReadPortUchar((pu08bits) ioport)
+ #define OS_InPortWord(ioport) ScsiPortReadPortUshort((pu16bits) ioport)
+ #define OS_InPortLong(ioport) ScsiPortReadPortUlong((pu32bits) ioport)
+
+ #define OS_OutPortByte(ioport, val) ScsiPortWritePortUchar((pu08bits) ioport, (u08bits) val)
+ #define OS_OutPortWord(ioport, val) ScsiPortWritePortUshort((pu16bits) ioport, (u16bits) val)
+ #define OS_OutPortLong(ioport, val) ScsiPortWritePortUlong((pu32bits) ioport, (u32bits) val)
+ #define OS_OutPortByteBuffer(ioport, buffer, count) \
+ ScsiPortWritePortBufferUchar((pu08bits)&port, (pu08bits) buffer, (u32bits) count)
+ #define OS_OutPortWordBuffer(ioport, buffer, count) \
+ ScsiPortWritePortBufferUshort((pu16bits)&port, (pu16bits) buffer, (u32bits) count)
+
+ #define OS_Lock(x)
+ #define OS_UnLock(x)
+#endif /* NT || WIN95_32 || WIN95_16 */
+
+#if defined (UNIX) && !defined(OS_InPortByte)
+ #define OS_InPortByte(ioport) inb((u16bits)ioport)
+ #define OS_InPortWord(ioport) inw((u16bits)ioport)
+ #define OS_InPortLong(ioport) inl((u16bits)ioport)
+ #define OS_OutPortByte(ioport,val) outb((u16bits)ioport, (u08bits)val)
+ #define OS_OutPortWord(ioport,val) outw((u16bits)ioport, (u16bits)val)
+ #define OS_OutPortLong(ioport,val) outl((u16bits)ioport, (u32bits)val)
+
+ #define OS_Lock(x)
+ #define OS_UnLock(x)
+#endif /* UNIX */
+
+
+#if defined(OS2)
+ extern u08bits inb(u32bits ioport);
+ extern u16bits inw(u32bits ioport);
+ extern void outb(u32bits ioport, u08bits val);
+ extern void outw(u32bits ioport, u16bits val);
+
+ #define OS_InPortByte(ioport) inb(ioport)
+ #define OS_InPortWord(ioport) inw(ioport)
+ #define OS_OutPortByte(ioport, val) outb(ioport, val)
+ #define OS_OutPortWord(ioport, val) outw(ioport, val)
+ extern u32bits OS_InPortLong(u32bits ioport);
+ extern void OS_OutPortLong(u32bits ioport, u32bits val);
+
+ #define OS_Lock(x)
+ #define OS_UnLock(x)
+#endif /* OS2 */
+
+#if defined(SOLARIS_REAL_MODE)
+
+extern unsigned char inb(unsigned long ioport);
+extern unsigned short inw(unsigned long ioport);
+
+#define OS_InPortByte(ioport) inb(ioport)
+#define OS_InPortWord(ioport) inw(ioport)
+
+extern void OS_OutPortByte(unsigned long ioport, unsigned char val);
+extern void OS_OutPortWord(unsigned long ioport, unsigned short val);
+extern unsigned long OS_InPortLong(unsigned long ioport);
+extern void OS_OutPortLong(unsigned long ioport, unsigned long val);
+
+#define OS_Lock(x)
+#define OS_UnLock(x)
+
+#endif /* SOLARIS_REAL_MODE */
+
+#endif /* __GLOBALS_H__ */
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: sccbmgr.h $
+ *
+ * Description: Common shared SCCB Interface defines and SCCB
+ * Manager specifics defines.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+#ifndef __SCCB_H__
+#define __SCCB_H__
+
+/*#include <osflags.h>*/
+/*#include <globals.h>*/
+
+#if defined(BUGBUG)
+#define debug_size 32
+#endif
+
+#if defined(DOS)
+
+ typedef struct _SCCB near *PSCCB;
+ #if (FW_TYPE == _SCCB_MGR_)
+ typedef void (*CALL_BK_FN)(PSCCB);
+ #endif
+
+#elif defined(OS2)
+
+ typedef struct _SCCB far *PSCCB;
+ #if (FW_TYPE == _SCCB_MGR_)
+ typedef void (far *CALL_BK_FN)(PSCCB);
+ #endif
+
+#else
+
+ typedef struct _SCCB *PSCCB;
+ #if (FW_TYPE == _SCCB_MGR_)
+ typedef void (*CALL_BK_FN)(PSCCB);
+ #endif
+
+#endif
+
+
+typedef struct SCCBMgr_info {
+ ULONG si_baseaddr;
+ UCHAR si_present;
+ UCHAR si_intvect;
+ UCHAR si_id;
+ UCHAR si_lun;
+ USHORT si_fw_revision;
+ USHORT si_per_targ_init_sync;
+ USHORT si_per_targ_fast_nego;
+ USHORT si_per_targ_ultra_nego;
+ USHORT si_per_targ_no_disc;
+ USHORT si_per_targ_wide_nego;
+ USHORT si_flags;
+ UCHAR si_card_family;
+ UCHAR si_bustype;
+ UCHAR si_card_model[3];
+ UCHAR si_relative_cardnum;
+ UCHAR si_reserved[4];
+ ULONG si_OS_reserved;
+ UCHAR si_XlatInfo[4];
+ ULONG si_reserved2[5];
+ ULONG si_secondary_range;
+} SCCBMGR_INFO;
+
+#if defined(DOS)
+ typedef SCCBMGR_INFO * PSCCBMGR_INFO;
+#else
+ #if defined (COMPILER_16_BIT)
+ typedef SCCBMGR_INFO far * PSCCBMGR_INFO;
+ #else
+ typedef SCCBMGR_INFO * PSCCBMGR_INFO;
+ #endif
+#endif // defined(DOS)
+
+
+
+
+#if (FW_TYPE==_SCCB_MGR_)
+ #define SCSI_PARITY_ENA 0x0001
+ #define LOW_BYTE_TERM 0x0010
+ #define HIGH_BYTE_TERM 0x0020
+ #define BUSTYPE_PCI 0x3
+#endif
+
+#define SUPPORT_16TAR_32LUN 0x0002
+#define SOFT_RESET 0x0004
+#define EXTENDED_TRANSLATION 0x0008
+#define POST_ALL_UNDERRRUNS 0x0040
+#define FLAG_SCAM_ENABLED 0x0080
+#define FLAG_SCAM_LEVEL2 0x0100
+
+
+
+
+#define HARPOON_FAMILY 0x02
+
+
+#define ISA_BUS_CARD 0x01
+#define EISA_BUS_CARD 0x02
+#define PCI_BUS_CARD 0x03
+#define VESA_BUS_CARD 0x04
+
+/* SCCB struc used for both SCCB and UCB manager compiles!
+ * The UCB Manager treats the SCCB as it's 'native hardware structure'
+ */
+
+
+#pragma pack(1)
+typedef struct _SCCB {
+ UCHAR OperationCode;
+ UCHAR ControlByte;
+ UCHAR CdbLength;
+ UCHAR RequestSenseLength;
+ ULONG DataLength;
+ ULONG DataPointer;
+ UCHAR CcbRes[2];
+ UCHAR HostStatus;
+ UCHAR TargetStatus;
+ UCHAR TargID;
+ UCHAR Lun;
+ UCHAR Cdb[12];
+ UCHAR CcbRes1;
+ UCHAR Reserved1;
+ ULONG Reserved2;
+ ULONG SensePointer;
+
+
+ CALL_BK_FN SccbCallback; /* VOID (*SccbCallback)(); */
+ ULONG SccbIOPort; /* Identifies board base port */
+ UCHAR SccbStatus;
+ UCHAR SCCBRes2;
+ USHORT SccbOSFlags;
+
+
+ ULONG Sccb_XferCnt; /* actual transfer count */
+ ULONG Sccb_ATC;
+ ULONG SccbVirtDataPtr; /* virtual addr for OS/2 */
+ ULONG Sccb_res1;
+ USHORT Sccb_MGRFlags;
+ USHORT Sccb_sgseg;
+ UCHAR Sccb_scsimsg; /* identify msg for selection */
+ UCHAR Sccb_tag;
+ UCHAR Sccb_scsistat;
+ UCHAR Sccb_idmsg; /* image of last msg in */
+ PSCCB Sccb_forwardlink;
+ PSCCB Sccb_backlink;
+ ULONG Sccb_savedATC;
+ UCHAR Save_Cdb[6];
+ UCHAR Save_CdbLen;
+ UCHAR Sccb_XferState;
+ ULONG Sccb_SGoffset;
+#if (FW_TYPE == _UCB_MGR_)
+ PUCB Sccb_ucb_ptr;
+#endif
+ } SCCB;
+
+#define SCCB_SIZE sizeof(SCCB)
+
+#pragma pack()
+
+
+
+#define SCSI_INITIATOR_COMMAND 0x00
+#define TARGET_MODE_COMMAND 0x01
+#define SCATTER_GATHER_COMMAND 0x02
+#define RESIDUAL_COMMAND 0x03
+#define RESIDUAL_SG_COMMAND 0x04
+#define RESET_COMMAND 0x81
+
+
+#define F_USE_CMD_Q 0x20 /*Inidcates TAGGED command. */
+#define TAG_TYPE_MASK 0xC0 /*Type of tag msg to send. */
+#define TAG_Q_MASK 0xE0
+#define SCCB_DATA_XFER_OUT 0x10 /* Write */
+#define SCCB_DATA_XFER_IN 0x08 /* Read */
+
+
+#define FOURTEEN_BYTES 0x00 /* Request Sense Buffer size */
+#define NO_AUTO_REQUEST_SENSE 0x01 /* No Request Sense Buffer */
+
+
+#define BUS_FREE_ST 0
+#define SELECT_ST 1
+#define SELECT_BDR_ST 2 /* Select w\ Bus Device Reset */
+#define SELECT_SN_ST 3 /* Select w\ Sync Nego */
+#define SELECT_WN_ST 4 /* Select w\ Wide Data Nego */
+#define SELECT_Q_ST 5 /* Select w\ Tagged Q'ing */
+#define COMMAND_ST 6
+#define DATA_OUT_ST 7
+#define DATA_IN_ST 8
+#define DISCONNECT_ST 9
+#define STATUS_ST 10
+#define ABORT_ST 11
+#define MESSAGE_ST 12
+
+
+#define F_HOST_XFER_DIR 0x01
+#define F_ALL_XFERRED 0x02
+#define F_SG_XFER 0x04
+#define F_AUTO_SENSE 0x08
+#define F_ODD_BALL_CNT 0x10
+#define F_NO_DATA_YET 0x80
+
+
+#define F_STATUSLOADED 0x01
+#define F_MSGLOADED 0x02
+#define F_DEV_SELECTED 0x04
+
+
+#define SCCB_COMPLETE 0x00 /* SCCB completed without error */
+#define SCCB_DATA_UNDER_RUN 0x0C
+#define SCCB_SELECTION_TIMEOUT 0x11 /* Set SCSI selection timed out */
+#define SCCB_DATA_OVER_RUN 0x12
+#define SCCB_UNEXPECTED_BUS_FREE 0x13 /* Target dropped SCSI BSY */
+#define SCCB_PHASE_SEQUENCE_FAIL 0x14 /* Target bus phase sequence failure */
+
+#define SCCB_INVALID_OP_CODE 0x16 /* SCCB invalid operation code */
+#define SCCB_INVALID_SCCB 0x1A /* Invalid SCCB - bad parameter */
+#define SCCB_GROSS_FW_ERR 0x27 /* Major problem! */
+#define SCCB_BM_ERR 0x30 /* BusMaster error. */
+#define SCCB_PARITY_ERR 0x34 /* SCSI parity error */
+
+
+
+#if (FW_TYPE==_UCB_MGR_)
+ #define HBA_AUTO_SENSE_FAIL 0x1B
+ #define HBA_TQ_REJECTED 0x1C
+ #define HBA_UNSUPORTED_MSG 0x1D
+ #define HBA_HW_ERROR 0x20
+ #define HBA_ATN_NOT_RESPONDED 0x21
+ #define HBA_SCSI_RESET_BY_ADAPTER 0x22
+ #define HBA_SCSI_RESET_BY_TARGET 0x23
+ #define HBA_WRONG_CONNECTION 0x24
+ #define HBA_BUS_DEVICE_RESET 0x25
+ #define HBA_ABORT_QUEUE 0x26
+
+#else // these are not defined in BUDI/UCB
+
+ #define SCCB_INVALID_DIRECTION 0x18 /* Invalid target direction */
+ #define SCCB_DUPLICATE_SCCB 0x19 /* Duplicate SCCB */
+ #define SCCB_SCSI_RST 0x35 /* SCSI RESET detected. */
+
+#endif // (FW_TYPE==_UCB_MGR_)
+
+
+#define SCCB_IN_PROCESS 0x00
+#define SCCB_SUCCESS 0x01
+#define SCCB_ABORT 0x02
+#define SCCB_NOT_FOUND 0x03
+#define SCCB_ERROR 0x04
+#define SCCB_INVALID 0x05
+
+#define SCCB_SIZE sizeof(SCCB)
+
+
+
+
+#if (FW_TYPE == _UCB_MGR_)
+ void SccbMgr_start_sccb(CARD_HANDLE pCurrCard, PUCB p_ucb);
+ s32bits SccbMgr_abort_sccb(CARD_HANDLE pCurrCard, PUCB p_ucb);
+ u08bits SccbMgr_my_int(CARD_HANDLE pCurrCard);
+ s32bits SccbMgr_isr(CARD_HANDLE pCurrCard);
+ void SccbMgr_scsi_reset(CARD_HANDLE pCurrCard);
+ void SccbMgr_timer_expired(CARD_HANDLE pCurrCard);
+ void SccbMgr_unload_card(CARD_HANDLE pCurrCard);
+ void SccbMgr_restore_foreign_state(CARD_HANDLE pCurrCard);
+ void SccbMgr_restore_native_state(CARD_HANDLE pCurrCard);
+ void SccbMgr_save_foreign_state(PADAPTER_INFO pAdapterInfo);
+
+#endif
+
+
+#if (FW_TYPE == _SCCB_MGR_)
+
+ #if defined (DOS)
+ int SccbMgr_sense_adapter(PSCCBMGR_INFO pCardInfo);
+ USHORT SccbMgr_config_adapter(PSCCBMGR_INFO pCardInfo);
+ void SccbMgr_start_sccb(USHORT pCurrCard, PSCCB p_SCCB);
+ int SccbMgr_abort_sccb(USHORT pCurrCard, PSCCB p_SCCB);
+ UCHAR SccbMgr_my_int(USHORT pCurrCard);
+ int SccbMgr_isr(USHORT pCurrCard);
+ void SccbMgr_scsi_reset(USHORT pCurrCard);
+ void SccbMgr_timer_expired(USHORT pCurrCard);
+ USHORT SccbMgr_status(USHORT pCurrCard);
+ void SccbMgr_unload_card(USHORT pCurrCard);
+
+ #else //non-DOS
+
+ int SccbMgr_sense_adapter(PSCCBMGR_INFO pCardInfo);
+ ULONG SccbMgr_config_adapter(PSCCBMGR_INFO pCardInfo);
+ void SccbMgr_start_sccb(ULONG pCurrCard, PSCCB p_SCCB);
+ int SccbMgr_abort_sccb(ULONG pCurrCard, PSCCB p_SCCB);
+ UCHAR SccbMgr_my_int(ULONG pCurrCard);
+ int SccbMgr_isr(ULONG pCurrCard);
+ void SccbMgr_scsi_reset(ULONG pCurrCard);
+ void SccbMgr_enable_int(ULONG pCurrCard);
+ void SccbMgr_disable_int(ULONG pCurrCard);
+ void SccbMgr_timer_expired(ULONG pCurrCard);
+ void SccbMgr_unload_card(ULONG pCurrCard);
+
+ #endif
+#endif // (FW_TYPE == _SCCB_MGR_)
+
+#endif /* __SCCB_H__ */
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: blx30.h $
+ *
+ * Description: This module contains SCCB/UCB Manager implementation
+ * specific stuff.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+
+#ifndef __blx30_H__
+#define __blx30_H__
+
+/*#include <globals.h>*/
+
+#define ORION_FW_REV 3110
+
+
+
+
+#define HARP_REVD 1
+
+
+#if defined(DOS)
+#define QUEUE_DEPTH 8+1 /*1 for Normal disconnect 0 for Q'ing. */
+#else
+#define QUEUE_DEPTH 254+1 /*1 for Normal disconnect 32 for Q'ing. */
+#endif // defined(DOS)
+
+#define MAX_MB_CARDS 4 /* Max. no of cards suppoerted on Mother Board */
+
+#define WIDE_SCSI 1
+
+#if defined(WIDE_SCSI)
+ #if defined(DOS)
+ #define MAX_SCSI_TAR 16
+ #define MAX_LUN 8
+ #define LUN_MASK 0x07
+ #else
+ #define MAX_SCSI_TAR 16
+ #define MAX_LUN 32
+ #define LUN_MASK 0x1f
+
+ #endif
+#else
+ #define MAX_SCSI_TAR 8
+ #define MAX_LUN 8
+ #define LUN_MASK 0x07
+#endif
+
+#if defined(HARP_REVA)
+#define SG_BUF_CNT 15 /*Number of prefetched elements. */
+#else
+#define SG_BUF_CNT 16 /*Number of prefetched elements. */
+#endif
+
+#define SG_ELEMENT_SIZE 8 /*Eight byte per element. */
+#define SG_LOCAL_MASK 0x00000000L
+#define SG_ELEMENT_MASK 0xFFFFFFFFL
+
+
+#if (FW_TYPE == _UCB_MGR_)
+ #define OPC_DECODE_NORMAL 0x0f7f
+#endif // _UCB_MGR_
+
+
+
+#if defined(DOS)
+
+/*#include <dos.h>*/
+ #define RD_HARPOON(ioport) (OS_InPortByte(ioport))
+ #define RDW_HARPOON(ioport) (OS_InPortWord(ioport))
+ #define WR_HARPOON(ioport,val) (OS_OutPortByte(ioport,val))
+ #define WRW_HARPOON(ioport,val) (OS_OutPortWord(ioport,val))
+
+ #define RD_HARP32(port,offset,data) asm{db 66h; \
+ push ax; \
+ mov dx,port; \
+ add dx, offset; \
+ db 66h; \
+ in ax,dx; \
+ db 66h; \
+ mov word ptr data,ax;\
+ db 66h; \
+ pop ax}
+
+ #define WR_HARP32(port,offset,data) asm{db 66h; \
+ push ax; \
+ mov dx,port; \
+ add dx, offset; \
+ db 66h; \
+ mov ax,word ptr data;\
+ db 66h; \
+ out dx,ax; \
+ db 66h; \
+ pop ax}
+#endif /* DOS */
+
+#if defined(NETWARE) || defined(OTHER_32) || defined(OTHER_16)
+ #define RD_HARPOON(ioport) OS_InPortByte((unsigned long)ioport)
+ #define RDW_HARPOON(ioport) OS_InPortWord((unsigned long)ioport)
+ #define RD_HARP32(ioport,offset,data) (data = OS_InPortLong(ioport + offset))
+ #define WR_HARPOON(ioport,val) OS_OutPortByte((ULONG)ioport,(UCHAR) val)
+ #define WRW_HARPOON(ioport,val) OS_OutPortWord((ULONG)ioport,(USHORT)val)
+ #define WR_HARP32(ioport,offset,data) OS_OutPortLong((ioport + offset), data)
+#endif /* NETWARE || OTHER_32 || OTHER_16 */
+
+#if defined(NT) || defined(WIN95_32) || defined(WIN95_16)
+ #define RD_HARPOON(ioport) OS_InPortByte((ULONG)ioport)
+ #define RDW_HARPOON(ioport) OS_InPortWord((ULONG)ioport)
+ #define RD_HARP32(ioport,offset,data) (data = OS_InPortLong((ULONG)(ioport + offset)))
+ #define WR_HARPOON(ioport,val) OS_OutPortByte((ULONG)ioport,(UCHAR) val)
+ #define WRW_HARPOON(ioport,val) OS_OutPortWord((ULONG)ioport,(USHORT)val)
+ #define WR_HARP32(ioport,offset,data) OS_OutPortLong((ULONG)(ioport + offset), data)
+#endif /* NT || WIN95_32 || WIN95_16 */
+
+#if defined (UNIX)
+ #define RD_HARPOON(ioport) OS_InPortByte((u32bits)ioport)
+ #define RDW_HARPOON(ioport) OS_InPortWord((u32bits)ioport)
+ #define RD_HARP32(ioport,offset,data) (data = OS_InPortLong((u32bits)(ioport + offset)))
+ #define WR_HARPOON(ioport,val) OS_OutPortByte((u32bits)ioport,(u08bits) val)
+ #define WRW_HARPOON(ioport,val) OS_OutPortWord((u32bits)ioport,(u16bits)val)
+ #define WR_HARP32(ioport,offset,data) OS_OutPortLong((u32bits)(ioport + offset), data)
+#endif /* UNIX */
+
+#if defined(OS2)
+ #define RD_HARPOON(ioport) OS_InPortByte((unsigned long)ioport)
+ #define RDW_HARPOON(ioport) OS_InPortWord((unsigned long)ioport)
+ #define RD_HARP32(ioport,offset,data) (data = OS_InPortLong((ULONG)(ioport + offset)))
+ #define WR_HARPOON(ioport,val) OS_OutPortByte((ULONG)ioport,(UCHAR) val)
+ #define WRW_HARPOON(ioport,val) OS_OutPortWord((ULONG)ioport,(USHORT)val)
+ #define WR_HARP32(ioport,offset,data) OS_OutPortLong(((ULONG)(ioport + offset)), data)
+#endif /* OS2 */
+
+#if defined(SOLARIS_REAL_MODE)
+
+ #define RD_HARPOON(ioport) OS_InPortByte((unsigned long)ioport)
+ #define RDW_HARPOON(ioport) OS_InPortWord((unsigned long)ioport)
+ #define RD_HARP32(ioport,offset,data) (data = OS_InPortLong((ULONG)(ioport + offset)))
+ #define WR_HARPOON(ioport,val) OS_OutPortByte((ULONG)ioport,(UCHAR) val)
+ #define WRW_HARPOON(ioport,val) OS_OutPortWord((ULONG)ioport,(USHORT)val)
+ #define WR_HARP32(ioport,offset,data) OS_OutPortLong((ULONG)(ioport + offset), (ULONG)data)
+
+#endif /* SOLARIS_REAL_MODE */
+
+#endif /* __BLX30_H__ */
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: target.h $
+ *
+ * Description: Definitions for Target related structures
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+#ifndef __TARGET__
+#define __TARGET__
+
+/*#include <globals.h>*/
+/*#include <blx30.h>*/
+
+
+#define TAR_SYNC_MASK (BIT(7)+BIT(6))
+#define SYNC_UNKNOWN 0x00
+#define SYNC_TRYING BIT(6)
+#define SYNC_SUPPORTED (BIT(7)+BIT(6))
+
+#define TAR_WIDE_MASK (BIT(5)+BIT(4))
+#define WIDE_DISABLED 0x00
+#define WIDE_ENABLED BIT(4)
+#define WIDE_NEGOCIATED BIT(5)
+
+#define TAR_TAG_Q_MASK (BIT(3)+BIT(2))
+#define TAG_Q_UNKNOWN 0x00
+#define TAG_Q_TRYING BIT(2)
+#define TAG_Q_REJECT BIT(3)
+#define TAG_Q_SUPPORTED (BIT(3)+BIT(2))
+
+#define TAR_ALLOW_DISC BIT(0)
+
+
+#define EE_SYNC_MASK (BIT(0)+BIT(1))
+#define EE_SYNC_ASYNC 0x00
+#define EE_SYNC_5MB BIT(0)
+#define EE_SYNC_10MB BIT(1)
+#define EE_SYNC_20MB (BIT(0)+BIT(1))
+
+#define EE_ALLOW_DISC BIT(6)
+#define EE_WIDE_SCSI BIT(7)
+
+
+#if defined(DOS)
+ typedef struct SCCBMgr_tar_info near *PSCCBMgr_tar_info;
+
+#elif defined(OS2)
+ typedef struct SCCBMgr_tar_info far *PSCCBMgr_tar_info;
+
+#else
+ typedef struct SCCBMgr_tar_info *PSCCBMgr_tar_info;
+
+#endif
+
+
+typedef struct SCCBMgr_tar_info {
+
+ PSCCB TarSelQ_Head;
+ PSCCB TarSelQ_Tail;
+ UCHAR TarLUN_CA; /*Contingent Allgiance */
+ UCHAR TarTagQ_Cnt;
+ UCHAR TarSelQ_Cnt;
+ UCHAR TarStatus;
+ UCHAR TarEEValue;
+ UCHAR TarSyncCtrl;
+ UCHAR TarReserved[2]; /* for alignment */
+ UCHAR LunDiscQ_Idx[MAX_LUN];
+ UCHAR TarLUNBusy[MAX_LUN];
+} SCCBMGR_TAR_INFO;
+
+typedef struct NVRAMInfo {
+ UCHAR niModel; /* Model No. of card */
+ UCHAR niCardNo; /* Card no. */
+#if defined(DOS)
+ USHORT niBaseAddr; /* Port Address of card */
+#else
+ ULONG niBaseAddr; /* Port Address of card */
+#endif
+ UCHAR niSysConf; /* Adapter Configuration byte - Byte 16 of eeprom map */
+ UCHAR niScsiConf; /* SCSI Configuration byte - Byte 17 of eeprom map */
+ UCHAR niScamConf; /* SCAM Configuration byte - Byte 20 of eeprom map */
+ UCHAR niAdapId; /* Host Adapter ID - Byte 24 of eerpom map */
+ UCHAR niSyncTbl[MAX_SCSI_TAR / 2]; /* Sync/Wide byte of targets */
+ UCHAR niScamTbl[MAX_SCSI_TAR][4]; /* Compressed Scam name string of Targets */
+}NVRAMINFO;
+
+#if defined(DOS)
+typedef NVRAMINFO near *PNVRamInfo;
+#elif defined (OS2)
+typedef NVRAMINFO far *PNVRamInfo;
+#else
+typedef NVRAMINFO *PNVRamInfo;
+#endif
+
+#define MODEL_LT 1
+#define MODEL_DL 2
+#define MODEL_LW 3
+#define MODEL_DW 4
+
+
+typedef struct SCCBcard {
+ PSCCB currentSCCB;
+#if (FW_TYPE==_SCCB_MGR_)
+ PSCCBMGR_INFO cardInfo;
+#else
+ PADAPTER_INFO cardInfo;
+#endif
+
+#if defined(DOS)
+ USHORT ioPort;
+#else
+ ULONG ioPort;
+#endif
+
+ USHORT cmdCounter;
+ UCHAR discQCount;
+ UCHAR tagQ_Lst;
+ UCHAR cardIndex;
+ UCHAR scanIndex;
+ UCHAR globalFlags;
+ UCHAR ourId;
+ PNVRamInfo pNvRamInfo;
+ PSCCB discQ_Tbl[QUEUE_DEPTH];
+
+}SCCBCARD;
+
+#if defined(DOS)
+typedef struct SCCBcard near *PSCCBcard;
+#elif defined (OS2)
+typedef struct SCCBcard far *PSCCBcard;
+#else
+typedef struct SCCBcard *PSCCBcard;
+#endif
+
+
+#define F_TAG_STARTED 0x01
+#define F_CONLUN_IO 0x02
+#define F_DO_RENEGO 0x04
+#define F_NO_FILTER 0x08
+#define F_GREEN_PC 0x10
+#define F_HOST_XFER_ACT 0x20
+#define F_NEW_SCCB_CMD 0x40
+#define F_UPDATE_EEPROM 0x80
+
+
+#define ID_STRING_LENGTH 32
+#define TYPE_CODE0 0x63 /*Level2 Mstr (bits 7-6), */
+
+#define TYPE_CODE1 00 /*No ID yet */
+
+#define SLV_TYPE_CODE0 0xA3 /*Priority Bit set (bits 7-6), */
+
+#define ASSIGN_ID 0x00
+#define SET_P_FLAG 0x01
+#define CFG_CMPLT 0x03
+#define DOM_MSTR 0x0F
+#define SYNC_PTRN 0x1F
+
+#define ID_0_7 0x18
+#define ID_8_F 0x11
+#define ID_10_17 0x12
+#define ID_18_1F 0x0B
+#define MISC_CODE 0x14
+#define CLR_P_FLAG 0x18
+#define LOCATE_ON 0x12
+#define LOCATE_OFF 0x0B
+
+#define LVL_1_MST 0x00
+#define LVL_2_MST 0x40
+#define DOM_LVL_2 0xC0
+
+
+#define INIT_SELTD 0x01
+#define LEVEL2_TAR 0x02
+
+
+enum scam_id_st { ID0,ID1,ID2,ID3,ID4,ID5,ID6,ID7,ID8,ID9,ID10,ID11,ID12,
+ ID13,ID14,ID15,ID_UNUSED,ID_UNASSIGNED,ID_ASSIGNED,LEGACY,
+ CLR_PRIORITY,NO_ID_AVAIL };
+
+typedef struct SCCBscam_info {
+
+ UCHAR id_string[ID_STRING_LENGTH];
+ enum scam_id_st state;
+
+} SCCBSCAM_INFO, *PSCCBSCAM_INFO;
+
+#endif
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: scsi2.h $
+ *
+ * Description: Register definitions for HARPOON ASIC.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+#ifndef __SCSI_H__
+#define __SCSI_H__
+
+
+
+#define SCSI_TEST_UNIT_READY 0x00
+#define SCSI_REZERO_UNIT 0x01
+#define SCSI_REQUEST_SENSE 0x03
+#define SCSI_FORMAT_UNIT 0x04
+#define SCSI_REASSIGN 0x07
+#define SCSI_READ 0x08
+#define SCSI_WRITE 0x0A
+#define SCSI_SEEK 0x0B
+#define SCSI_INQUIRY 0x12
+#define SCSI_MODE_SELECT 0x15
+#define SCSI_RESERVE_UNIT 0x16
+#define SCSI_RELEASE_UNIT 0x17
+#define SCSI_MODE_SENSE 0x1A
+#define SCSI_START_STOP_UNIT 0x1B
+#define SCSI_SEND_DIAGNOSTIC 0x1D
+#define SCSI_READ_CAPACITY 0x25
+#define SCSI_READ_EXTENDED 0x28
+#define SCSI_WRITE_EXTENDED 0x2A
+#define SCSI_SEEK_EXTENDED 0x2B
+#define SCSI_WRITE_AND_VERIFY 0x2E
+#define SCSI_VERIFY 0x2F
+#define SCSI_READ_DEFECT_DATA 0x37
+#define SCSI_WRITE_BUFFER 0x3B
+#define SCSI_READ_BUFFER 0x3C
+#define SCSI_RECV_DIAGNOSTIC 0x1C
+#define SCSI_READ_LONG 0x3E
+#define SCSI_WRITE_LONG 0x3F
+#define SCSI_LAST_SCSI_CMND SCSI_WRITE_LONG
+#define SCSI_INVALID_CMND 0xFF
+
+
+
+#define SSGOOD 0x00
+#define SSCHECK 0x02
+#define SSCOND_MET 0x04
+#define SSBUSY 0x08
+#define SSRESERVATION_CONFLICT 0x18
+#define SSCMD_TERM 0x22
+#define SSQ_FULL 0x28
+
+
+#define SKNO_SEN 0x00
+#define SKRECOV_ERR 0x01
+#define SKNOT_RDY 0x02
+#define SKMED_ERR 0x03
+#define SKHW_ERR 0x04
+#define SKILL_REQ 0x05
+#define SKUNIT_ATTN 0x06
+#define SKDATA_PROTECT 0x07
+#define SKBLNK_CHK 0x08
+#define SKCPY_ABORT 0x0A
+#define SKABORT_CMD 0x0B
+#define SKEQUAL 0x0C
+#define SKVOL_OVF 0x0D
+#define SKMIS_CMP 0x0E
+
+
+#define SMCMD_COMP 0x00
+#define SMEXT 0x01
+#define SMSAVE_DATA_PTR 0x02
+#define SMREST_DATA_PTR 0x03
+#define SMDISC 0x04
+#define SMINIT_DETEC_ERR 0x05
+#define SMABORT 0x06
+#define SMREJECT 0x07
+#define SMNO_OP 0x08
+#define SMPARITY 0x09
+#define SMDEV_RESET 0x0C
+#define SMABORT_TAG 0x0D
+#define SMINIT_RECOVERY 0x0F
+#define SMREL_RECOVERY 0x10
+
+#define SMIDENT 0x80
+#define DISC_PRIV 0x40
+
+
+#define SMSYNC 0x01
+#define SM10MBS 0x19 /* 100ns */
+#define SM5MBS 0x32 /* 200ns */
+#define SMOFFSET 0x0F /* Maxoffset value */
+#define SMWDTR 0x03
+#define SM8BIT 0x00
+#define SM16BIT 0x01
+#define SM32BIT 0x02
+#define SMIGNORWR 0x23 /* Ignore Wide Residue */
+
+
+#define ARBITRATION_DELAY 0x01 /* 2.4us using a 40Mhz clock */
+#define BUS_SETTLE_DELAY 0x01 /* 400ns */
+#define BUS_CLEAR_DELAY 0x01 /* 800ns */
+
+
+
+#define SPHASE_TO 0x0A /* 10 second timeout waiting for */
+#define SCMD_TO 0x0F /* Overall command timeout */
+
+
+
+#define SIX_BYTE_CMD 0x06
+#define TEN_BYTE_CMD 0x0A
+#define TWELVE_BYTE_CMD 0x0C
+
+#define ASYNC 0x00
+#define PERI25NS 0x06 /* 25/4ns to next clock for xbow. */
+#define SYNC10MBS 0x19
+#define SYNC5MBS 0x32
+#define MAX_OFFSET 0x0F /* Maxbyteoffset for Sync Xfers */
+
+#endif
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: eeprom.h $
+ *
+ * Description: Definitions for EEPROM related structures
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+#ifndef __EEPROM__
+#define __EEPROM__
+
+/*#include <globals.h>*/
+
+#define EEPROM_WD_CNT 256
+
+#define EEPROM_CHECK_SUM 0
+#define FW_SIGNATURE 2
+#define MODEL_NUMB_0 4
+#define MODEL_NUMB_1 5
+#define MODEL_NUMB_2 6
+#define MODEL_NUMB_3 7
+#define MODEL_NUMB_4 8
+#define MODEL_NUMB_5 9
+#define IO_BASE_ADDR 10
+#define IRQ_NUMBER 12
+#define PCI_INT_PIN 13
+#define BUS_DELAY 14 /*On time in byte 14 off delay in 15 */
+#define SYSTEM_CONFIG 16
+#define SCSI_CONFIG 17
+#define BIOS_CONFIG 18
+#define SPIN_UP_DELAY 19
+#define SCAM_CONFIG 20
+#define ADAPTER_SCSI_ID 24
+
+
+#define IGNORE_B_SCAN 32
+#define SEND_START_ENA 34
+#define DEVICE_ENABLE 36
+
+#define SYNC_RATE_TBL 38
+#define SYNC_RATE_TBL01 38
+#define SYNC_RATE_TBL23 40
+#define SYNC_RATE_TBL45 42
+#define SYNC_RATE_TBL67 44
+#define SYNC_RATE_TBL89 46
+#define SYNC_RATE_TBLab 48
+#define SYNC_RATE_TBLcd 50
+#define SYNC_RATE_TBLef 52
+
+
+
+#define EE_SCAMBASE 256
+
+
+
+ #define DOM_MASTER (BIT(0) + BIT(1))
+ #define SCAM_ENABLED BIT(2)
+ #define SCAM_LEVEL2 BIT(3)
+
+
+ #define RENEGO_ENA BITW(10)
+ #define CONNIO_ENA BITW(11)
+ #define GREEN_PC_ENA BITW(12)
+
+
+ #define AUTO_RATE_00 00
+ #define AUTO_RATE_05 01
+ #define AUTO_RATE_10 02
+ #define AUTO_RATE_20 03
+
+ #define WIDE_NEGO_BIT BIT(7)
+ #define DISC_ENABLE_BIT BIT(6)
+
+
+#endif
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: harpoon.h $
+ *
+ * Description: Register definitions for HARPOON ASIC.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+
+/*#include <globals.h>*/
+
+#ifndef __HARPOON__
+#define __HARPOON__
+
+
+ #define hp_vendor_id_0 0x00 /* LSB */
+ #define ORION_VEND_0 0x4B
+
+ #define hp_vendor_id_1 0x01 /* MSB */
+ #define ORION_VEND_1 0x10
+
+ #define hp_device_id_0 0x02 /* LSB */
+ #define ORION_DEV_0 0x30
+
+ #define hp_device_id_1 0x03 /* MSB */
+ #define ORION_DEV_1 0x81
+
+ /* Sub Vendor ID and Sub Device ID only available in
+ Harpoon Version 2 and higher */
+
+ #define hp_sub_vendor_id_0 0x04 /* LSB */
+ #define hp_sub_vendor_id_1 0x05 /* MSB */
+ #define hp_sub_device_id_0 0x06 /* LSB */
+ #define hp_sub_device_id_1 0x07 /* MSB */
+
+
+ #define hp_dual_addr_lo 0x08
+ #define hp_dual_addr_lmi 0x09
+ #define hp_dual_addr_hmi 0x0A
+ #define hp_dual_addr_hi 0x0B
+
+ #define hp_semaphore 0x0C
+ #define SCCB_MGR_ACTIVE BIT(0)
+ #define TICKLE_ME BIT(1)
+ #define SCCB_MGR_PRESENT BIT(3)
+ #define BIOS_IN_USE BIT(4)
+
+ #define hp_user_defined_D 0x0D
+
+ #define hp_reserved_E 0x0E
+
+ #define hp_sys_ctrl 0x0F
+
+ #define STOP_CLK BIT(0) /*Turn off BusMaster Clock */
+ #define DRVR_RST BIT(1) /*Firmware Reset to 80C15 chip */
+ #define HALT_MACH BIT(3) /*Halt State Machine */
+ #define HARD_ABORT BIT(4) /*Hard Abort */
+ #define DIAG_MODE BIT(5) /*Diagnostic Mode */
+
+ #define BM_ABORT_TMOUT 0x50 /*Halt State machine time out */
+
+ #define hp_sys_cfg 0x10
+
+ #define DONT_RST_FIFO BIT(7) /*Don't reset FIFO */
+
+
+ #define hp_host_ctrl0 0x11
+
+ #define DUAL_ADDR_MODE BIT(0) /*Enable 64-bit addresses */
+ #define IO_MEM_SPACE BIT(1) /*I/O Memory Space */
+ #define RESOURCE_LOCK BIT(2) /*Enable Resource Lock */
+ #define IGNOR_ACCESS_ERR BIT(3) /*Ignore Access Error */
+ #define HOST_INT_EDGE BIT(4) /*Host interrupt level/edge mode sel */
+ #define SIX_CLOCKS BIT(5) /*6 Clocks between Strobe */
+ #define DMA_EVEN_PARITY BIT(6) /*Enable DMA Enen Parity */
+
+/*
+ #define BURST_MODE BIT(0)
+*/
+
+ #define hp_reserved_12 0x12
+
+ #define hp_host_blk_cnt 0x13
+
+ #define XFER_BLK1 0x00 /* 0 0 0 1 byte per block*/
+ #define XFER_BLK2 0x01 /* 0 0 1 2 byte per block*/
+ #define XFER_BLK4 0x02 /* 0 1 0 4 byte per block*/
+ #define XFER_BLK8 0x03 /* 0 1 1 8 byte per block*/
+ #define XFER_BLK16 0x04 /* 1 0 0 16 byte per block*/
+ #define XFER_BLK32 0x05 /* 1 0 1 32 byte per block*/
+ #define XFER_BLK64 0x06 /* 1 1 0 64 byte per block*/
+
+ #define BM_THRESHOLD 0x40 /* PCI mode can only xfer 16 bytes*/
+
+
+ #define hp_reserved_14 0x14
+ #define hp_reserved_15 0x15
+ #define hp_reserved_16 0x16
+
+ #define hp_int_mask 0x17
+
+ #define INT_CMD_COMPL BIT(0) /* DMA command complete */
+ #define INT_EXT_STATUS BIT(1) /* Extended Status Set */
+ #define INT_SCSI BIT(2) /* Scsi block interrupt */
+ #define INT_FIFO_RDY BIT(4) /* FIFO data ready */
+
+
+ #define hp_xfer_cnt_lo 0x18
+ #define hp_xfer_cnt_mi 0x19
+ #define hp_xfer_cnt_hi 0x1A
+ #define hp_xfer_cmd 0x1B
+
+ #define XFER_HOST_DMA 0x00 /* 0 0 0 Transfer Host -> DMA */
+ #define XFER_DMA_HOST 0x01 /* 0 0 1 Transfer DMA -> Host */
+ #define XFER_HOST_MPU 0x02 /* 0 1 0 Transfer Host -> MPU */
+ #define XFER_MPU_HOST 0x03 /* 0 1 1 Transfer MPU -> Host */
+ #define XFER_DMA_MPU 0x04 /* 1 0 0 Transfer DMA -> MPU */
+ #define XFER_MPU_DMA 0x05 /* 1 0 1 Transfer MPU -> DMA */
+ #define SET_SEMAPHORE 0x06 /* 1 1 0 Set Semaphore */
+ #define XFER_NOP 0x07 /* 1 1 1 Transfer NOP */
+ #define XFER_MB_MPU 0x06 /* 1 1 0 Transfer MB -> MPU */
+ #define XFER_MB_DMA 0x07 /* 1 1 1 Transfer MB -> DMA */
+
+
+ #define XFER_HOST_AUTO 0x00 /* 0 0 Auto Transfer Size */
+ #define XFER_HOST_8BIT 0x08 /* 0 1 8 BIT Transfer Size */
+ #define XFER_HOST_16BIT 0x10 /* 1 0 16 BIT Transfer Size */
+ #define XFER_HOST_32BIT 0x18 /* 1 1 32 BIT Transfer Size */
+
+ #define XFER_DMA_8BIT 0x20 /* 0 1 8 BIT Transfer Size */
+ #define XFER_DMA_16BIT 0x40 /* 1 0 16 BIT Transfer Size */
+
+ #define DISABLE_INT BIT(7) /*Do not interrupt at end of cmd. */
+
+ #define HOST_WRT_CMD ((DISABLE_INT + XFER_HOST_DMA + XFER_HOST_AUTO + XFER_DMA_8BIT))
+ #define HOST_RD_CMD ((DISABLE_INT + XFER_DMA_HOST + XFER_HOST_AUTO + XFER_DMA_8BIT))
+ #define WIDE_HOST_WRT_CMD ((DISABLE_INT + XFER_HOST_DMA + XFER_HOST_AUTO + XFER_DMA_16BIT))
+ #define WIDE_HOST_RD_CMD ((DISABLE_INT + XFER_DMA_HOST + XFER_HOST_AUTO + XFER_DMA_16BIT))
+
+ #define hp_host_addr_lo 0x1C
+ #define hp_host_addr_lmi 0x1D
+ #define hp_host_addr_hmi 0x1E
+ #define hp_host_addr_hi 0x1F
+
+ #define hp_pio_data 0x20
+ #define hp_reserved_21 0x21
+ #define hp_ee_ctrl 0x22
+
+ #define EXT_ARB_ACK BIT(7)
+ #define SCSI_TERM_ENA_H BIT(6) /* SCSI high byte terminator */
+ #define SEE_MS BIT(5)
+ #define SEE_CS BIT(3)
+ #define SEE_CLK BIT(2)
+ #define SEE_DO BIT(1)
+ #define SEE_DI BIT(0)
+
+ #define EE_READ 0x06
+ #define EE_WRITE 0x05
+ #define EWEN 0x04
+ #define EWEN_ADDR 0x03C0
+ #define EWDS 0x04
+ #define EWDS_ADDR 0x0000
+
+ #define hp_brdctl 0x23
+
+ #define DAT_7 BIT(7)
+ #define DAT_6 BIT(6)
+ #define DAT_5 BIT(5)
+ #define BRD_STB BIT(4)
+ #define BRD_CS BIT(3)
+ #define BRD_WR BIT(2)
+
+ #define hp_reserved_24 0x24
+ #define hp_reserved_25 0x25
+
+
+
+
+ #define hp_bm_ctrl 0x26
+
+ #define SCSI_TERM_ENA_L BIT(0) /*Enable/Disable external terminators */
+ #define FLUSH_XFER_CNTR BIT(1) /*Flush transfer counter */
+ #define BM_XFER_MIN_8 BIT(2) /*Enable bus master transfer of 9 */
+ #define BIOS_ENA BIT(3) /*Enable BIOS/FLASH Enable */
+ #define FORCE1_XFER BIT(5) /*Always xfer one byte in byte mode */
+ #define FAST_SINGLE BIT(6) /*?? */
+
+ #define BMCTRL_DEFAULT (FORCE1_XFER|FAST_SINGLE|SCSI_TERM_ENA_L)
+
+ #define hp_reserved_27 0x27
+
+ #define hp_sg_addr 0x28
+ #define hp_page_ctrl 0x29
+
+ #define SCATTER_EN BIT(0)
+ #define SGRAM_ARAM BIT(1)
+ #define BIOS_SHADOW BIT(2)
+ #define G_INT_DISABLE BIT(3) /* Enable/Disable all Interrupts */
+ #define NARROW_SCSI_CARD BIT(4) /* NARROW/WIDE SCSI config pin */
+
+ #define hp_reserved_2A 0x2A
+ #define hp_pci_cmd_cfg 0x2B
+
+ #define IO_SPACE_ENA BIT(0) /*enable I/O space */
+ #define MEM_SPACE_ENA BIT(1) /*enable memory space */
+ #define BUS_MSTR_ENA BIT(2) /*enable bus master operation */
+ #define MEM_WI_ENA BIT(4) /*enable Write and Invalidate */
+ #define PAR_ERR_RESP BIT(6) /*enable parity error responce. */
+
+ #define hp_reserved_2C 0x2C
+
+ #define hp_pci_stat_cfg 0x2D
+
+ #define DATA_PARITY_ERR BIT(0)
+ #define REC_TARGET_ABORT BIT(4) /*received Target abort */
+ #define REC_MASTER_ABORT BIT(5) /*received Master abort */
+ #define SIG_SYSTEM_ERR BIT(6)
+ #define DETECTED_PAR_ERR BIT(7)
+
+ #define hp_reserved_2E 0x2E
+
+ #define hp_sys_status 0x2F
+
+ #define SLV_DATA_RDY BIT(0) /*Slave data ready */
+ #define XFER_CNT_ZERO BIT(1) /*Transfer counter = 0 */
+ #define BM_FIFO_EMPTY BIT(2) /*FIFO empty */
+ #define BM_FIFO_FULL BIT(3) /*FIFO full */
+ #define HOST_OP_DONE BIT(4) /*host operation done */
+ #define DMA_OP_DONE BIT(5) /*DMA operation done */
+ #define SLV_OP_DONE BIT(6) /*Slave operation done */
+ #define PWR_ON_FLAG BIT(7) /*Power on flag */
+
+ #define hp_reserved_30 0x30
+
+ #define hp_host_status0 0x31
+
+ #define HOST_TERM BIT(5) /*Host Terminal Count */
+ #define HOST_TRSHLD BIT(6) /*Host Threshold */
+ #define CONNECTED_2_HOST BIT(7) /*Connected to Host */
+
+ #define hp_reserved_32 0x32
+
+ #define hp_rev_num 0x33
+
+ #define REV_A_CONST 0x0E
+ #define REV_B_CONST 0x0E
+
+ #define hp_stack_data 0x34
+ #define hp_stack_addr 0x35
+
+ #define hp_ext_status 0x36
+
+ #define BM_FORCE_OFF BIT(0) /*Bus Master is forced to get off */
+ #define PCI_TGT_ABORT BIT(0) /*PCI bus master transaction aborted */
+ #define PCI_DEV_TMOUT BIT(1) /*PCI Device Time out */
+ #define FIFO_TC_NOT_ZERO BIT(2) /*FIFO or transfer counter not zero */
+ #define CHIP_RST_OCCUR BIT(3) /*Chip reset occurs */
+ #define CMD_ABORTED BIT(4) /*Command aborted */
+ #define BM_PARITY_ERR BIT(5) /*parity error on data received */
+ #define PIO_OVERRUN BIT(6) /*Slave data overrun */
+ #define BM_CMD_BUSY BIT(7) /*Bus master transfer command busy */
+ #define BAD_EXT_STATUS (BM_FORCE_OFF | PCI_DEV_TMOUT | CMD_ABORTED | \
+ BM_PARITY_ERR | PIO_OVERRUN)
+
+ #define hp_int_status 0x37
+
+ #define BM_CMD_CMPL BIT(0) /*Bus Master command complete */
+ #define EXT_STATUS_ON BIT(1) /*Extended status is valid */
+ #define SCSI_INTERRUPT BIT(2) /*Global indication of a SCSI int. */
+ #define BM_FIFO_RDY BIT(4)
+ #define INT_ASSERTED BIT(5) /* */
+ #define SRAM_BUSY BIT(6) /*Scatter/Gather RAM busy */
+ #define CMD_REG_BUSY BIT(7)
+
+
+ #define hp_fifo_cnt 0x38
+ #define hp_curr_host_cnt 0x39
+ #define hp_reserved_3A 0x3A
+ #define hp_fifo_in_addr 0x3B
+
+ #define hp_fifo_out_addr 0x3C
+ #define hp_reserved_3D 0x3D
+ #define hp_reserved_3E 0x3E
+ #define hp_reserved_3F 0x3F
+
+
+
+ extern USHORT default_intena;
+
+ #define hp_intena 0x40
+
+ #define RESET BITW(7)
+ #define PROG_HLT BITW(6)
+ #define PARITY BITW(5)
+ #define FIFO BITW(4)
+ #define SEL BITW(3)
+ #define SCAM_SEL BITW(2)
+ #define RSEL BITW(1)
+ #define TIMEOUT BITW(0)
+ #define BUS_FREE BITW(15)
+ #define XFER_CNT_0 BITW(14)
+ #define PHASE BITW(13)
+ #define IUNKWN BITW(12)
+ #define ICMD_COMP BITW(11)
+ #define ITICKLE BITW(10)
+ #define IDO_STRT BITW(9)
+ #define ITAR_DISC BITW(8)
+ #define AUTO_INT (BITW(12)+BITW(11)+BITW(10)+BITW(9)+BITW(8))
+ #define CLR_ALL_INT 0xFFFF
+ #define CLR_ALL_INT_1 0xFF00
+
+ #define hp_intstat 0x42
+
+ #define hp_scsisig 0x44
+
+ #define SCSI_SEL BIT(7)
+ #define SCSI_BSY BIT(6)
+ #define SCSI_REQ BIT(5)
+ #define SCSI_ACK BIT(4)
+ #define SCSI_ATN BIT(3)
+ #define SCSI_CD BIT(2)
+ #define SCSI_MSG BIT(1)
+ #define SCSI_IOBIT BIT(0)
+
+ #define S_SCSI_PHZ (BIT(2)+BIT(1)+BIT(0))
+ #define S_CMD_PH (BIT(2) )
+ #define S_MSGO_PH (BIT(2)+BIT(1) )
+ #define S_STAT_PH (BIT(2) +BIT(0))
+ #define S_MSGI_PH (BIT(2)+BIT(1)+BIT(0))
+ #define S_DATAI_PH ( BIT(0))
+ #define S_DATAO_PH 0x00
+ #define S_ILL_PH ( BIT(1) )
+
+ #define hp_scsictrl_0 0x45
+
+ #define NO_ARB BIT(7)
+ #define SEL_TAR BIT(6)
+ #define ENA_ATN BIT(4)
+ #define ENA_RESEL BIT(2)
+ #define SCSI_RST BIT(1)
+ #define ENA_SCAM_SEL BIT(0)
+
+
+
+ #define hp_portctrl_0 0x46
+
+ #define SCSI_PORT BIT(7)
+ #define SCSI_INBIT BIT(6)
+ #define DMA_PORT BIT(5)
+ #define DMA_RD BIT(4)
+ #define HOST_PORT BIT(3)
+ #define HOST_WRT BIT(2)
+ #define SCSI_BUS_EN BIT(1)
+ #define START_TO BIT(0)
+
+ #define hp_scsireset 0x47
+
+ #define SCSI_TAR BIT(7)
+ #define SCSI_INI BIT(6)
+ #define SCAM_EN BIT(5)
+ #define ACK_HOLD BIT(4)
+ #define DMA_RESET BIT(3)
+ #define HPSCSI_RESET BIT(2)
+ #define PROG_RESET BIT(1)
+ #define FIFO_CLR BIT(0)
+
+ #define hp_xfercnt_0 0x48
+ #define hp_xfercnt_1 0x49
+ #define hp_xfercnt_2 0x4A
+ #define hp_xfercnt_3 0x4B
+
+ #define hp_fifodata_0 0x4C
+ #define hp_fifodata_1 0x4D
+ #define hp_addstat 0x4E
+
+ #define SCAM_TIMER BIT(7)
+ #define AUTO_RUNNING BIT(6)
+ #define FAST_SYNC BIT(5)
+ #define SCSI_MODE8 BIT(3)
+ #define SCSI_PAR_ERR BIT(0)
+
+ #define hp_prgmcnt_0 0x4F
+
+ #define AUTO_PC_MASK 0x3F
+
+ #define hp_selfid_0 0x50
+ #define hp_selfid_1 0x51
+ #define hp_arb_id 0x52
+
+ #define ARB_ID (BIT(3) + BIT(2) + BIT(1) + BIT(0))
+
+ #define hp_select_id 0x53
+
+ #define RESEL_ID (BIT(7) + BIT(6) + BIT(5) + BIT(4))
+ #define SELECT_ID (BIT(3) + BIT(2) + BIT(1) + BIT(0))
+
+ #define hp_synctarg_base 0x54
+ #define hp_synctarg_12 0x54
+ #define hp_synctarg_13 0x55
+ #define hp_synctarg_14 0x56
+ #define hp_synctarg_15 0x57
+
+ #define hp_synctarg_8 0x58
+ #define hp_synctarg_9 0x59
+ #define hp_synctarg_10 0x5A
+ #define hp_synctarg_11 0x5B
+
+ #define hp_synctarg_4 0x5C
+ #define hp_synctarg_5 0x5D
+ #define hp_synctarg_6 0x5E
+ #define hp_synctarg_7 0x5F
+
+ #define hp_synctarg_0 0x60
+ #define hp_synctarg_1 0x61
+ #define hp_synctarg_2 0x62
+ #define hp_synctarg_3 0x63
+
+ #define RATE_20MB 0x00
+ #define RATE_10MB ( BIT(5))
+ #define RATE_6_6MB ( BIT(6) )
+ #define RATE_5MB ( BIT(6)+BIT(5))
+ #define RATE_4MB (BIT(7) )
+ #define RATE_3_33MB (BIT(7) +BIT(5))
+ #define RATE_2_85MB (BIT(7)+BIT(6) )
+ #define RATE_2_5MB (BIT(7)+BIT(5)+BIT(6))
+ #define NEXT_CLK BIT(5)
+ #define SLOWEST_SYNC (BIT(7)+BIT(6)+BIT(5))
+ #define NARROW_SCSI BIT(4)
+ #define SYNC_OFFSET (BIT(3) + BIT(2) + BIT(1) + BIT(0))
+ #define DEFAULT_ASYNC 0x00
+ #define DEFAULT_OFFSET 0x0F
+
+ #define hp_autostart_0 0x64
+ #define hp_autostart_1 0x65
+ #define hp_autostart_2 0x66
+ #define hp_autostart_3 0x67
+
+
+
+ #define DISABLE 0x00
+ #define AUTO_IMMED BIT(5)
+ #define SELECT BIT(6)
+ #define RESELECT (BIT(6)+BIT(5))
+ #define BUSFREE BIT(7)
+ #define XFER_0 (BIT(7)+BIT(5))
+ #define END_DATA (BIT(7)+BIT(6))
+ #define MSG_PHZ (BIT(7)+BIT(6)+BIT(5))
+
+ #define hp_gp_reg_0 0x68
+ #define hp_gp_reg_1 0x69
+ #define hp_gp_reg_2 0x6A
+ #define hp_gp_reg_3 0x6B
+
+ #define hp_seltimeout 0x6C
+
+
+ #define TO_2ms 0x54 /* 2.0503ms */
+ #define TO_4ms 0x67 /* 3.9959ms */
+
+ #define TO_5ms 0x03 /* 4.9152ms */
+ #define TO_10ms 0x07 /* 11.xxxms */
+ #define TO_250ms 0x99 /* 250.68ms */
+ #define TO_290ms 0xB1 /* 289.99ms */
+ #define TO_350ms 0xD6 /* 350.62ms */
+ #define TO_417ms 0xFF /* 417.79ms */
+
+ #define hp_clkctrl_0 0x6D
+
+ #define PWR_DWN BIT(6)
+ #define ACTdeassert BIT(4)
+ #define ATNonErr BIT(3)
+ #define CLK_30MHZ BIT(1)
+ #define CLK_40MHZ (BIT(1) + BIT(0))
+ #define CLK_50MHZ BIT(2)
+
+ #define CLKCTRL_DEFAULT (ACTdeassert | CLK_40MHZ)
+
+ #define hp_fiforead 0x6E
+ #define hp_fifowrite 0x6F
+
+ #define hp_offsetctr 0x70
+ #define hp_xferstat 0x71
+
+ #define FIFO_FULL BIT(7)
+ #define FIFO_EMPTY BIT(6)
+ #define FIFO_MASK 0x3F /* Mask for the FIFO count value. */
+ #define FIFO_LEN 0x20
+
+ #define hp_portctrl_1 0x72
+
+ #define EVEN_HOST_P BIT(5)
+ #define INVT_SCSI BIT(4)
+ #define CHK_SCSI_P BIT(3)
+ #define HOST_MODE8 BIT(0)
+ #define HOST_MODE16 0x00
+
+ #define hp_xfer_pad 0x73
+
+ #define ID_UNLOCK BIT(3)
+ #define XFER_PAD BIT(2)
+
+ #define hp_scsidata_0 0x74
+ #define hp_scsidata_1 0x75
+ #define hp_timer_0 0x76
+ #define hp_timer_1 0x77
+
+ #define hp_reserved_78 0x78
+ #define hp_reserved_79 0x79
+ #define hp_reserved_7A 0x7A
+ #define hp_reserved_7B 0x7B
+
+ #define hp_reserved_7C 0x7C
+ #define hp_reserved_7D 0x7D
+ #define hp_reserved_7E 0x7E
+ #define hp_reserved_7F 0x7F
+
+ #define hp_aramBase 0x80
+ #define BIOS_DATA_OFFSET 0x60
+ #define BIOS_RELATIVE_CARD 0x64
+
+
+
+
+ #define AUTO_LEN 0x80
+ #define AR0 0x00
+ #define AR1 BITW(8)
+ #define AR2 BITW(9)
+ #define AR3 (BITW(9) + BITW(8))
+ #define SDATA BITW(10)
+
+ #define NOP_OP 0x00 /* Nop command */
+
+ #define CRD_OP BITW(11) /* Cmp Reg. w/ Data */
+
+ #define CRR_OP BITW(12) /* Cmp Reg. w. Reg. */
+
+ #define CBE_OP (BITW(14)+BITW(12)+BITW(11)) /* Cmp SCSI cmd class & Branch EQ */
+
+ #define CBN_OP (BITW(14)+BITW(13)) /* Cmp SCSI cmd class & Branch NOT EQ */
+
+ #define CPE_OP (BITW(14)+BITW(11)) /* Cmp SCSI phs & Branch EQ */
+
+ #define CPN_OP (BITW(14)+BITW(12)) /* Cmp SCSI phs & Branch NOT EQ */
+
+
+ #define ADATA_OUT 0x00
+ #define ADATA_IN BITW(8)
+ #define ACOMMAND BITW(10)
+ #define ASTATUS (BITW(10)+BITW(8))
+ #define AMSG_OUT (BITW(10)+BITW(9))
+ #define AMSG_IN (BITW(10)+BITW(9)+BITW(8))
+ #define AILLEGAL (BITW(9)+BITW(8))
+
+
+ #define BRH_OP BITW(13) /* Branch */
+
+
+ #define ALWAYS 0x00
+ #define EQUAL BITW(8)
+ #define NOT_EQ BITW(9)
+
+ #define TCB_OP (BITW(13)+BITW(11)) /* Test condition & branch */
+
+
+ #define ATN_SET BITW(8)
+ #define ATN_RESET BITW(9)
+ #define XFER_CNT (BITW(9)+BITW(8))
+ #define FIFO_0 BITW(10)
+ #define FIFO_NOT0 (BITW(10)+BITW(8))
+ #define T_USE_SYNC0 (BITW(10)+BITW(9))
+
+
+ #define MPM_OP BITW(15) /* Match phase and move data */
+
+ #define MDR_OP (BITW(12)+BITW(11)) /* Move data to Reg. */
+
+ #define MRR_OP BITW(14) /* Move DReg. to Reg. */
+
+
+ #define S_IDREG (BIT(2)+BIT(1)+BIT(0))
+
+
+ #define D_AR0 0x00
+ #define D_AR1 BIT(0)
+ #define D_AR2 BIT(1)
+ #define D_AR3 (BIT(1) + BIT(0))
+ #define D_SDATA BIT(2)
+ #define D_BUCKET (BIT(2) + BIT(1) + BIT(0))
+
+
+ #define ADR_OP (BITW(13)+BITW(12)) /* Logical AND Reg. w. Data */
+
+ #define ADS_OP (BITW(14)+BITW(13)+BITW(12))
+
+ #define ODR_OP (BITW(13)+BITW(12)+BITW(11))
+
+ #define ODS_OP (BITW(14)+BITW(13)+BITW(12)+BITW(11))
+
+ #define STR_OP (BITW(15)+BITW(14)) /* Store to A_Reg. */
+
+ #define AINT_ENA1 0x00
+ #define AINT_STAT1 BITW(8)
+ #define ASCSI_SIG BITW(9)
+ #define ASCSI_CNTL (BITW(9)+BITW(8))
+ #define APORT_CNTL BITW(10)
+ #define ARST_CNTL (BITW(10)+BITW(8))
+ #define AXFERCNT0 (BITW(10)+BITW(9))
+ #define AXFERCNT1 (BITW(10)+BITW(9)+BITW(8))
+ #define AXFERCNT2 BITW(11)
+ #define AFIFO_DATA (BITW(11)+BITW(8))
+ #define ASCSISELID (BITW(11)+BITW(9))
+ #define ASCSISYNC0 (BITW(11)+BITW(9)+BITW(8))
+
+
+ #define RAT_OP (BITW(14)+BITW(13)+BITW(11))
+
+ #define SSI_OP (BITW(15)+BITW(11))
+
+
+ #define SSI_ITAR_DISC (ITAR_DISC >> 8)
+ #define SSI_IDO_STRT (IDO_STRT >> 8)
+ #define SSI_IDI_STRT (IDO_STRT >> 8)
+
+ #define SSI_ICMD_COMP (ICMD_COMP >> 8)
+ #define SSI_ITICKLE (ITICKLE >> 8)
+
+ #define SSI_IUNKWN (IUNKWN >> 8)
+ #define SSI_INO_CC (IUNKWN >> 8)
+ #define SSI_IRFAIL (IUNKWN >> 8)
+
+
+ #define NP 0x10 /*Next Phase */
+ #define NTCMD 0x02 /*Non- Tagged Command start */
+ #define CMDPZ 0x04 /*Command phase */
+ #define DINT 0x12 /*Data Out/In interrupt */
+ #define DI 0x13 /*Data Out */
+ #define MI 0x14 /*Message In */
+ #define DC 0x19 /*Disconnect Message */
+ #define ST 0x1D /*Status Phase */
+ #define UNKNWN 0x24 /*Unknown bus action */
+ #define CC 0x25 /*Command Completion failure */
+ #define TICK 0x26 /*New target reselected us. */
+ #define RFAIL 0x27 /*Reselection failed */
+ #define SELCHK 0x28 /*Select & Check SCSI ID latch reg */
+
+
+ #define ID_MSG_STRT hp_aramBase + 0x00
+ #define NON_TAG_ID_MSG hp_aramBase + 0x06
+ #define CMD_STRT hp_aramBase + 0x08
+ #define SYNC_MSGS hp_aramBase + 0x08
+
+
+
+
+
+ #define TAG_STRT 0x00
+ #define SELECTION_START 0x00
+ #define DISCONNECT_START 0x10/2
+ #define END_DATA_START 0x14/2
+ #define NONTAG_STRT 0x02/2
+ #define CMD_ONLY_STRT CMDPZ/2
+ #define TICKLE_STRT TICK/2
+ #define SELCHK_STRT SELCHK/2
+
+
+
+
+#define mEEPROM_CLK_DELAY(port) (RD_HARPOON(port+hp_intstat_1))
+
+#define mWAIT_10MS(port) (RD_HARPOON(port+hp_intstat_1))
+
+
+#define CLR_XFER_CNT(port) (WR_HARPOON(port+hp_xfercnt_0, 0x00))
+
+#define SET_XFER_CNT(port, data) (WR_HARP32(port,hp_xfercnt_0,data))
+
+#define GET_XFER_CNT(port, xfercnt) {RD_HARP32(port,hp_xfercnt_0,xfercnt); xfercnt &= 0xFFFFFF;}
+/* #define GET_XFER_CNT(port, xfercnt) (xfercnt = RD_HARPOON(port+hp_xfercnt_2), \
+ xfercnt <<= 16,\
+ xfercnt |= RDW_HARPOON((USHORT)(port+hp_xfercnt_0)))
+ */
+#if defined(DOS)
+#define HP_SETUP_ADDR_CNT(port,addr,count) (WRW_HARPOON((USHORT)(port+hp_host_addr_lo), (USHORT)(addr & 0x0000FFFFL)),\
+ addr >>= 16,\
+ WRW_HARPOON((USHORT)(port+hp_host_addr_hmi), (USHORT)(addr & 0x0000FFFFL)),\
+ WR_HARP32(port,hp_xfercnt_0,count),\
+ WRW_HARPOON((USHORT)(port+hp_xfer_cnt_lo), (USHORT)(count & 0x0000FFFFL)),\
+ count >>= 16,\
+ WR_HARPOON(port+hp_xfer_cnt_hi, (count & 0xFF)))
+#else
+#define HP_SETUP_ADDR_CNT(port,addr,count) (WRW_HARPOON((port+hp_host_addr_lo), (USHORT)(addr & 0x0000FFFFL)),\
+ addr >>= 16,\
+ WRW_HARPOON((port+hp_host_addr_hmi), (USHORT)(addr & 0x0000FFFFL)),\
+ WR_HARP32(port,hp_xfercnt_0,count),\
+ WRW_HARPOON((port+hp_xfer_cnt_lo), (USHORT)(count & 0x0000FFFFL)),\
+ count >>= 16,\
+ WR_HARPOON(port+hp_xfer_cnt_hi, (count & 0xFF)))
+#endif
+
+#define ACCEPT_MSG(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
+ WR_HARPOON(port+hp_scsisig, S_ILL_PH);}
+
+
+#define ACCEPT_MSG_ATN(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
+ WR_HARPOON(port+hp_scsisig, (S_ILL_PH|SCSI_ATN));}
+
+#define ACCEPT_STAT(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
+ WR_HARPOON(port+hp_scsisig, S_ILL_PH);}
+
+#define ACCEPT_STAT_ATN(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
+ WR_HARPOON(port+hp_scsisig, (S_ILL_PH|SCSI_ATN));}
+
+#define DISABLE_AUTO(port) (WR_HARPOON(port+hp_scsireset, PROG_RESET),\
+ WR_HARPOON(port+hp_scsireset, 0x00))
+
+#define ARAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) | SGRAM_ARAM)))
+
+#define SGRAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~SGRAM_ARAM)))
+
+#define MDISABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE)))
+
+#define MENABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE)))
+
+
+
+#endif
+
+
+#if (FW_TYPE==_UCB_MGR_)
+void ReadNVRam(PSCCBcard pCurrCard,PUCB p_ucb);
+void WriteNVRam(PSCCBcard pCurrCard,PUCB p_ucb);
+void UpdateCheckSum(u32bits baseport);
+#endif // (FW_TYPE==_UCB_MGR_)
+
+#if defined(DOS)
+UCHAR sfm(USHORT port, PSCCB pcurrSCCB);
+void scsiStartAuto(USHORT port);
+UCHAR sisyncn(USHORT port, UCHAR p_card, UCHAR syncFlag);
+void ssel(USHORT port, UCHAR p_card);
+void sres(USHORT port, UCHAR p_card, PSCCBcard pCurrCard);
+void sdecm(UCHAR message, USHORT port, UCHAR p_card);
+void shandem(USHORT port, UCHAR p_card,PSCCB pCurrSCCB);
+void stsyncn(USHORT port, UCHAR p_card);
+void sisyncr(USHORT port,UCHAR sync_pulse, UCHAR offset);
+void sssyncv(USHORT p_port, UCHAR p_id, UCHAR p_sync_value, PSCCBMgr_tar_info currTar_Info);
+void sresb(USHORT port, UCHAR p_card);
+void sxfrp(USHORT p_port, UCHAR p_card);
+void schkdd(USHORT port, UCHAR p_card);
+UCHAR RdStack(USHORT port, UCHAR index);
+void WrStack(USHORT portBase, UCHAR index, UCHAR data);
+UCHAR ChkIfChipInitialized(USHORT ioPort);
+
+#if defined(V302)
+UCHAR GetTarLun(USHORT port, UCHAR p_card, UCHAR our_target, PSCCBcard pCurrCard, PUCHAR tag, PUCHAR lun);
+#endif
+
+void SendMsg(USHORT port, UCHAR message);
+void queueFlushTargSccb(UCHAR p_card, UCHAR thisTarg, UCHAR error_code);
+UCHAR scsellDOS(USHORT p_port, UCHAR targ_id);
+#else
+UCHAR sfm(ULONG port, PSCCB pcurrSCCB);
+void scsiStartAuto(ULONG port);
+UCHAR sisyncn(ULONG port, UCHAR p_card, UCHAR syncFlag);
+void ssel(ULONG port, UCHAR p_card);
+void sres(ULONG port, UCHAR p_card, PSCCBcard pCurrCard);
+void sdecm(UCHAR message, ULONG port, UCHAR p_card);
+void shandem(ULONG port, UCHAR p_card,PSCCB pCurrSCCB);
+void stsyncn(ULONG port, UCHAR p_card);
+void sisyncr(ULONG port,UCHAR sync_pulse, UCHAR offset);
+void sssyncv(ULONG p_port, UCHAR p_id, UCHAR p_sync_value, PSCCBMgr_tar_info currTar_Info);
+void sresb(ULONG port, UCHAR p_card);
+void sxfrp(ULONG p_port, UCHAR p_card);
+void schkdd(ULONG port, UCHAR p_card);
+UCHAR RdStack(ULONG port, UCHAR index);
+void WrStack(ULONG portBase, UCHAR index, UCHAR data);
+UCHAR ChkIfChipInitialized(ULONG ioPort);
+
+#if defined(V302)
+UCHAR GetTarLun(ULONG port, UCHAR p_card, UCHAR our_target, PSCCBcard pCurrCard, PUCHAR tar, PUCHAR lun);
+#endif
+
+void SendMsg(ULONG port, UCHAR message);
+void queueFlushTargSccb(UCHAR p_card, UCHAR thisTarg, UCHAR error_code);
+#endif
+
+void ssenss(PSCCBcard pCurrCard);
+void sinits(PSCCB p_sccb, UCHAR p_card);
+void RNVRamData(PNVRamInfo pNvRamInfo);
+
+#if defined(WIDE_SCSI)
+ #if defined(DOS)
+ UCHAR siwidn(USHORT port, UCHAR p_card);
+ void stwidn(USHORT port, UCHAR p_card);
+ void siwidr(USHORT port, UCHAR width);
+ #else
+ UCHAR siwidn(ULONG port, UCHAR p_card);
+ void stwidn(ULONG port, UCHAR p_card);
+ void siwidr(ULONG port, UCHAR width);
+ #endif
+#endif
+
+
+void queueSelectFail(PSCCBcard pCurrCard, UCHAR p_card);
+void queueDisconnect(PSCCB p_SCCB, UCHAR p_card);
+void queueCmdComplete(PSCCBcard pCurrCard, PSCCB p_SCCB, UCHAR p_card);
+void queueSearchSelect(PSCCBcard pCurrCard, UCHAR p_card);
+void queueFlushSccb(UCHAR p_card, UCHAR error_code);
+void queueAddSccb(PSCCB p_SCCB, UCHAR card);
+UCHAR queueFindSccb(PSCCB p_SCCB, UCHAR p_card);
+void utilUpdateResidual(PSCCB p_SCCB);
+USHORT CalcCrc16(UCHAR buffer[]);
+UCHAR CalcLrc(UCHAR buffer[]);
+
+
+#if defined(DOS)
+void Wait1Second(USHORT p_port);
+void Wait(USHORT p_port, UCHAR p_delay);
+void utilEEWriteOnOff(USHORT p_port,UCHAR p_mode);
+void utilEEWrite(USHORT p_port, USHORT ee_data, USHORT ee_addr);
+USHORT utilEERead(USHORT p_port, USHORT ee_addr);
+USHORT utilEEReadOrg(USHORT p_port, USHORT ee_addr);
+void utilEESendCmdAddr(USHORT p_port, UCHAR ee_cmd, USHORT ee_addr);
+#else
+void Wait1Second(ULONG p_port);
+void Wait(ULONG p_port, UCHAR p_delay);
+void utilEEWriteOnOff(ULONG p_port,UCHAR p_mode);
+void utilEEWrite(ULONG p_port, USHORT ee_data, USHORT ee_addr);
+USHORT utilEERead(ULONG p_port, USHORT ee_addr);
+USHORT utilEEReadOrg(ULONG p_port, USHORT ee_addr);
+void utilEESendCmdAddr(ULONG p_port, UCHAR ee_cmd, USHORT ee_addr);
+#endif
+
+
+
+#if defined(OS2)
+ void far phaseDataOut(ULONG port, UCHAR p_card);
+ void far phaseDataIn(ULONG port, UCHAR p_card);
+ void far phaseCommand(ULONG port, UCHAR p_card);
+ void far phaseStatus(ULONG port, UCHAR p_card);
+ void far phaseMsgOut(ULONG port, UCHAR p_card);
+ void far phaseMsgIn(ULONG port, UCHAR p_card);
+ void far phaseIllegal(ULONG port, UCHAR p_card);
+#else
+ #if defined(DOS)
+ void phaseDataOut(USHORT port, UCHAR p_card);
+ void phaseDataIn(USHORT port, UCHAR p_card);
+ void phaseCommand(USHORT port, UCHAR p_card);
+ void phaseStatus(USHORT port, UCHAR p_card);
+ void phaseMsgOut(USHORT port, UCHAR p_card);
+ void phaseMsgIn(USHORT port, UCHAR p_card);
+ void phaseIllegal(USHORT port, UCHAR p_card);
+ #else
+ void phaseDataOut(ULONG port, UCHAR p_card);
+ void phaseDataIn(ULONG port, UCHAR p_card);
+ void phaseCommand(ULONG port, UCHAR p_card);
+ void phaseStatus(ULONG port, UCHAR p_card);
+ void phaseMsgOut(ULONG port, UCHAR p_card);
+ void phaseMsgIn(ULONG port, UCHAR p_card);
+ void phaseIllegal(ULONG port, UCHAR p_card);
+ #endif
+#endif
+
+#if defined(DOS)
+void phaseDecode(USHORT port, UCHAR p_card);
+void phaseChkFifo(USHORT port, UCHAR p_card);
+void phaseBusFree(USHORT p_port, UCHAR p_card);
+#else
+void phaseDecode(ULONG port, UCHAR p_card);
+void phaseChkFifo(ULONG port, UCHAR p_card);
+void phaseBusFree(ULONG p_port, UCHAR p_card);
+#endif
+
+
+
+
+#if defined(DOS)
+void XbowInit(USHORT port, UCHAR scamFlg);
+void BusMasterInit(USHORT p_port);
+int DiagXbow(USHORT port);
+int DiagBusMaster(USHORT port);
+void DiagEEPROM(USHORT p_port);
+#else
+void XbowInit(ULONG port, UCHAR scamFlg);
+void BusMasterInit(ULONG p_port);
+int DiagXbow(ULONG port);
+int DiagBusMaster(ULONG port);
+void DiagEEPROM(ULONG p_port);
+#endif
+
+
+
+
+#if defined(DOS)
+void busMstrAbort(USHORT port);
+UCHAR busMstrTimeOut(USHORT port);
+void dataXferProcessor(USHORT port, PSCCBcard pCurrCard);
+void busMstrSGDataXferStart(USHORT port, PSCCB pCurrSCCB);
+void busMstrDataXferStart(USHORT port, PSCCB pCurrSCCB);
+void hostDataXferAbort(USHORT port, UCHAR p_card, PSCCB pCurrSCCB);
+#else
+void busMstrAbort(ULONG port);
+UCHAR busMstrTimeOut(ULONG port);
+void dataXferProcessor(ULONG port, PSCCBcard pCurrCard);
+void busMstrSGDataXferStart(ULONG port, PSCCB pCurrSCCB);
+void busMstrDataXferStart(ULONG port, PSCCB pCurrSCCB);
+void hostDataXferAbort(ULONG port, UCHAR p_card, PSCCB pCurrSCCB);
+#endif
+void hostDataXferRestart(PSCCB currSCCB);
+
+
+#if defined (DOS)
+UCHAR SccbMgr_bad_isr(USHORT p_port, UCHAR p_card, PSCCBcard pCurrCard, USHORT p_int);
+#else
+UCHAR SccbMgr_bad_isr(ULONG p_port, UCHAR p_card, PSCCBcard pCurrCard, USHORT p_int);
+
+#endif
+
+void SccbMgrTableInitAll(void);
+void SccbMgrTableInitCard(PSCCBcard pCurrCard, UCHAR p_card);
+void SccbMgrTableInitTarget(UCHAR p_card, UCHAR target);
+
+
+
+void scini(UCHAR p_card, UCHAR p_our_id, UCHAR p_power_up);
+
+#if defined(DOS)
+int scarb(USHORT p_port, UCHAR p_sel_type);
+void scbusf(USHORT p_port);
+void scsel(USHORT p_port);
+void scasid(UCHAR p_card, USHORT p_port);
+UCHAR scxferc(USHORT p_port, UCHAR p_data);
+UCHAR scsendi(USHORT p_port, UCHAR p_id_string[]);
+UCHAR sciso(USHORT p_port, UCHAR p_id_string[]);
+void scwirod(USHORT p_port, UCHAR p_data_bit);
+void scwiros(USHORT p_port, UCHAR p_data_bit);
+UCHAR scvalq(UCHAR p_quintet);
+UCHAR scsell(USHORT p_port, UCHAR targ_id);
+void scwtsel(USHORT p_port);
+void inisci(UCHAR p_card, USHORT p_port, UCHAR p_our_id);
+void scsavdi(UCHAR p_card, USHORT p_port);
+#else
+int scarb(ULONG p_port, UCHAR p_sel_type);
+void scbusf(ULONG p_port);
+void scsel(ULONG p_port);
+void scasid(UCHAR p_card, ULONG p_port);
+UCHAR scxferc(ULONG p_port, UCHAR p_data);
+UCHAR scsendi(ULONG p_port, UCHAR p_id_string[]);
+UCHAR sciso(ULONG p_port, UCHAR p_id_string[]);
+void scwirod(ULONG p_port, UCHAR p_data_bit);
+void scwiros(ULONG p_port, UCHAR p_data_bit);
+UCHAR scvalq(UCHAR p_quintet);
+UCHAR scsell(ULONG p_port, UCHAR targ_id);
+void scwtsel(ULONG p_port);
+void inisci(UCHAR p_card, ULONG p_port, UCHAR p_our_id);
+void scsavdi(UCHAR p_card, ULONG p_port);
+#endif
+UCHAR scmachid(UCHAR p_card, UCHAR p_id_string[]);
+
+
+#if defined(DOS)
+void autoCmdCmplt(USHORT p_port, UCHAR p_card);
+void autoLoadDefaultMap(USHORT p_port);
+#else
+void autoCmdCmplt(ULONG p_port, UCHAR p_card);
+void autoLoadDefaultMap(ULONG p_port);
+#endif
+
+
+
+#if (FW_TYPE==_SCCB_MGR_)
+ void OS_start_timer(unsigned long ioport, unsigned long timeout);
+ void OS_stop_timer(unsigned long ioport, unsigned long timeout);
+ void OS_disable_int(unsigned char intvec);
+ void OS_enable_int(unsigned char intvec);
+ void OS_delay(unsigned long count);
+ int OS_VirtToPhys(u32bits CardHandle, u32bits *physaddr, u32bits *virtaddr);
+ #if !(defined(UNIX) || defined(OS2) || defined(SOLARIS_REAL_MODE))
+ void OS_Lock(PSCCBMGR_INFO pCardInfo);
+ void OS_UnLock(PSCCBMGR_INFO pCardInfo);
+#endif // if FW_TYPE == ...
+
+#endif
+
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+
+
+#if defined(OS2)
+ extern void (far *s_PhaseTbl[8]) (ULONG, UCHAR);
+#else
+ #if defined(DOS)
+ extern void (*s_PhaseTbl[8]) (USHORT, UCHAR);
+ #else
+ extern void (*s_PhaseTbl[8]) (ULONG, UCHAR);
+ #endif
+#endif
+
+extern SCCBSCAM_INFO scamInfo[MAX_SCSI_TAR];
+extern NVRAMINFO nvRamInfo[MAX_MB_CARDS];
+#if defined(DOS) || defined(OS2)
+extern UCHAR temp_id_string[ID_STRING_LENGTH];
+#endif
+extern UCHAR scamHAString[];
+
+
+extern UCHAR mbCards;
+#if defined(BUGBUG)
+extern UCHAR debug_int[MAX_CARDS][debug_size];
+extern UCHAR debug_index[MAX_CARDS];
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
+#endif
+
+#if (FW_TYPE==_SCCB_MGR_)
+#if defined(DOS)
+ extern UCHAR first_time;
+#endif
+#endif /* (FW_TYPE==_SCCB_MGR_) */
+
+#if (FW_TYPE==_UCB_MGR_)
+#if defined(DOS)
+ extern u08bits first_time;
+#endif
+#endif /* (FW_TYPE==_UCB_MGR_) */
+
+#if defined(BUGBUG)
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
+#endif
+
+extern unsigned int SccbGlobalFlags;
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: sccb.c $
+ *
+ * Description: Functions relating to handling of the SCCB interface
+ * between the device driver and the HARPOON.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+ /*#include <budioctl.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <eeprom.h>*/
+/*#include <scsi2.h>*/
+/*#include <harpoon.h>*/
+
+
+
+#if (FW_TYPE==_SCCB_MGR_)
+#define mOS_Lock(card) OS_Lock((PSCCBMGR_INFO)(((PSCCBcard)card)->cardInfo))
+#define mOS_UnLock(card) OS_UnLock((PSCCBMGR_INFO)(((PSCCBcard)card)->cardInfo))
+#else /* FW_TYPE==_UCB_MGR_ */
+#define mOS_Lock(card) OS_Lock((u32bits)(((PSCCBcard)card)->ioPort))
+#define mOS_UnLock(card) OS_UnLock((u32bits)(((PSCCBcard)card)->ioPort))
+#endif
+
+
+/*
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+extern SCCBCARD BL_Card[MAX_CARDS];
+
+extern NVRAMINFO nvRamInfo[MAX_MB_CARDS];
+extern UCHAR mbCards;
+
+#if defined (OS2)
+ extern void (far *s_PhaseTbl[8]) (ULONG, UCHAR);
+#else
+ #if defined(DOS)
+ extern void (*s_PhaseTbl[8]) (USHORT, UCHAR);
+ #else
+ extern void (*s_PhaseTbl[8]) (ULONG, UCHAR);
+ #endif
+#endif
+
+
+#if defined(BUGBUG)
+extern UCHAR debug_int[MAX_CARDS][debug_size];
+extern UCHAR debug_index[MAX_CARDS];
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
+#endif
+*/
+
+#if (FW_TYPE==_SCCB_MGR_)
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_sense_adapter
+ *
+ * Description: Setup and/or Search for cards and return info to caller.
+ *
+ *---------------------------------------------------------------------*/
+
+int SccbMgr_sense_adapter(PSCCBMGR_INFO pCardInfo)
+{
+#if defined(DOS)
+#else
+ static UCHAR first_time = 1;
+#endif
+
+ UCHAR i,j,id,ScamFlg;
+ USHORT temp,temp2,temp3,temp4,temp5,temp6;
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+ PNVRamInfo pCurrNvRam;
+
+#if defined(DOS)
+ ioport = (USHORT)pCardInfo->si_baseaddr;
+#else
+ ioport = pCardInfo->si_baseaddr;
+#endif
+
+
+ if (RD_HARPOON(ioport+hp_vendor_id_0) != ORION_VEND_0)
+ return((int)FAILURE);
+
+ if ((RD_HARPOON(ioport+hp_vendor_id_1) != ORION_VEND_1))
+ return((int)FAILURE);
+
+ if ((RD_HARPOON(ioport+hp_device_id_0) != ORION_DEV_0))
+ return((int)FAILURE);
+
+ if ((RD_HARPOON(ioport+hp_device_id_1) != ORION_DEV_1))
+ return((int)FAILURE);
+
+
+ if (RD_HARPOON(ioport+hp_rev_num) != 0x0f){
+
+/* For new Harpoon then check for sub_device ID LSB
+ the bits(0-3) must be all ZERO for compatible with
+ current version of SCCBMgr, else skip this Harpoon
+ device. */
+
+ if (RD_HARPOON(ioport+hp_sub_device_id_0) & 0x0f)
+ return((int)FAILURE);
+ }
+
+ if (first_time)
+ {
+ SccbMgrTableInitAll();
+ first_time = 0;
+ mbCards = 0;
+ }
+
+ if(RdStack(ioport, 0) != 0x00) {
+ if(ChkIfChipInitialized(ioport) == FALSE)
+ {
+ pCurrNvRam = NULL;
+ WR_HARPOON(ioport+hp_semaphore, 0x00);
+ XbowInit(ioport, 0); /*Must Init the SCSI before attempting */
+ DiagEEPROM(ioport);
+ }
+ else
+ {
+ if(mbCards < MAX_MB_CARDS) {
+ pCurrNvRam = &nvRamInfo[mbCards];
+ mbCards++;
+ pCurrNvRam->niBaseAddr = ioport;
+ RNVRamData(pCurrNvRam);
+ }else
+ return((int) FAILURE);
+ }
+ }else
+ pCurrNvRam = NULL;
+#if defined (NO_BIOS_OPTION)
+ pCurrNvRam = NULL;
+ XbowInit(ioport, 0); /*Must Init the SCSI before attempting */
+ DiagEEPROM(ioport);
+#endif /* No BIOS Option */
+
+ WR_HARPOON(ioport+hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(ioport+hp_sys_ctrl, 0x00);
+
+ if(pCurrNvRam)
+ pCardInfo->si_id = pCurrNvRam->niAdapId;
+ else
+ pCardInfo->si_id = (UCHAR)(utilEERead(ioport, (ADAPTER_SCSI_ID/2)) &
+ (UCHAR)0x0FF);
+
+ pCardInfo->si_lun = 0x00;
+ pCardInfo->si_fw_revision = ORION_FW_REV;
+ temp2 = 0x0000;
+ temp3 = 0x0000;
+ temp4 = 0x0000;
+ temp5 = 0x0000;
+ temp6 = 0x0000;
+
+ for (id = 0; id < (16/2); id++) {
+
+ if(pCurrNvRam){
+ temp = (USHORT) pCurrNvRam->niSyncTbl[id];
+ temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
+ (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
+ }else
+ temp = utilEERead(ioport, (USHORT)((SYNC_RATE_TBL/2)+id));
+
+ for (i = 0; i < 2; temp >>=8,i++) {
+
+ temp2 >>= 1;
+ temp3 >>= 1;
+ temp4 >>= 1;
+ temp5 >>= 1;
+ temp6 >>= 1;
+ switch (temp & 0x3)
+ {
+ case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */
+ temp6 |= 0x8000; /* Fall through */
+ case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */
+ temp5 |= 0x8000; /* Fall through */
+ case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */
+ temp2 |= 0x8000; /* Fall through */
+ case AUTO_RATE_00: /* Asynchronous */
+ break;
+ }
+
+ if (temp & DISC_ENABLE_BIT)
+ temp3 |= 0x8000;
+
+ if (temp & WIDE_NEGO_BIT)
+ temp4 |= 0x8000;
+
+ }
+ }
+
+ pCardInfo->si_per_targ_init_sync = temp2;
+ pCardInfo->si_per_targ_no_disc = temp3;
+ pCardInfo->si_per_targ_wide_nego = temp4;
+ pCardInfo->si_per_targ_fast_nego = temp5;
+ pCardInfo->si_per_targ_ultra_nego = temp6;
+
+ if(pCurrNvRam)
+ i = pCurrNvRam->niSysConf;
+ else
+ i = (UCHAR)(utilEERead(ioport, (SYSTEM_CONFIG/2)));
+
+ if(pCurrNvRam)
+ ScamFlg = pCurrNvRam->niScamConf;
+ else
+ ScamFlg = (UCHAR) utilEERead(ioport, SCAM_CONFIG/2);
+
+ pCardInfo->si_flags = 0x0000;
+
+ if (i & 0x01)
+ pCardInfo->si_flags |= SCSI_PARITY_ENA;
+
+ if (!(i & 0x02))
+ pCardInfo->si_flags |= SOFT_RESET;
+
+ if (i & 0x10)
+ pCardInfo->si_flags |= EXTENDED_TRANSLATION;
+
+ if (ScamFlg & SCAM_ENABLED)
+ pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
+
+ if (ScamFlg & SCAM_LEVEL2)
+ pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
+
+ j = (RD_HARPOON(ioport+hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ if (i & 0x04) {
+ j |= SCSI_TERM_ENA_L;
+ }
+ WR_HARPOON(ioport+hp_bm_ctrl, j );
+
+ j = (RD_HARPOON(ioport+hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
+ if (i & 0x08) {
+ j |= SCSI_TERM_ENA_H;
+ }
+ WR_HARPOON(ioport+hp_ee_ctrl, j );
+
+ if (!(RD_HARPOON(ioport+hp_page_ctrl) & NARROW_SCSI_CARD))
+
+ pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
+
+ pCardInfo->si_card_family = HARPOON_FAMILY;
+ pCardInfo->si_bustype = BUSTYPE_PCI;
+
+ if(pCurrNvRam){
+ pCardInfo->si_card_model[0] = '9';
+ switch(pCurrNvRam->niModel & 0x0f){
+ case MODEL_LT:
+ pCardInfo->si_card_model[1] = '3';
+ pCardInfo->si_card_model[2] = '0';
+ break;
+ case MODEL_LW:
+ pCardInfo->si_card_model[1] = '5';
+ pCardInfo->si_card_model[2] = '0';
+ break;
+ case MODEL_DL:
+ pCardInfo->si_card_model[1] = '3';
+ pCardInfo->si_card_model[2] = '2';
+ break;
+ case MODEL_DW:
+ pCardInfo->si_card_model[1] = '5';
+ pCardInfo->si_card_model[2] = '2';
+ break;
+ }
+ }else{
+ temp = utilEERead(ioport, (MODEL_NUMB_0/2));
+ pCardInfo->si_card_model[0] = (UCHAR)(temp >> 8);
+ temp = utilEERead(ioport, (MODEL_NUMB_2/2));
+
+ pCardInfo->si_card_model[1] = (UCHAR)(temp & 0x00FF);
+ pCardInfo->si_card_model[2] = (UCHAR)(temp >> 8);
+ }
+
+ if (pCardInfo->si_card_model[1] == '3')
+ {
+ if (RD_HARPOON(ioport+hp_ee_ctrl) & BIT(7))
+ pCardInfo->si_flags |= LOW_BYTE_TERM;
+ }
+ else if (pCardInfo->si_card_model[2] == '0')
+ {
+ temp = RD_HARPOON(ioport+hp_xfer_pad);
+ WR_HARPOON(ioport+hp_xfer_pad, (temp & ~BIT(4)));
+ if (RD_HARPOON(ioport+hp_ee_ctrl) & BIT(7))
+ pCardInfo->si_flags |= LOW_BYTE_TERM;
+ WR_HARPOON(ioport+hp_xfer_pad, (temp | BIT(4)));
+ if (RD_HARPOON(ioport+hp_ee_ctrl) & BIT(7))
+ pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ WR_HARPOON(ioport+hp_xfer_pad, temp);
+ }
+ else
+ {
+ temp = RD_HARPOON(ioport+hp_ee_ctrl);
+ temp2 = RD_HARPOON(ioport+hp_xfer_pad);
+ WR_HARPOON(ioport+hp_ee_ctrl, (temp | SEE_CS));
+ WR_HARPOON(ioport+hp_xfer_pad, (temp2 | BIT(4)));
+ temp3 = 0;
+ for (i = 0; i < 8; i++)
+ {
+ temp3 <<= 1;
+ if (!(RD_HARPOON(ioport+hp_ee_ctrl) & BIT(7)))
+ temp3 |= 1;
+ WR_HARPOON(ioport+hp_xfer_pad, (temp2 & ~BIT(4)));
+ WR_HARPOON(ioport+hp_xfer_pad, (temp2 | BIT(4)));
+ }
+ WR_HARPOON(ioport+hp_ee_ctrl, temp);
+ WR_HARPOON(ioport+hp_xfer_pad, temp2);
+ if (!(temp3 & BIT(7)))
+ pCardInfo->si_flags |= LOW_BYTE_TERM;
+ if (!(temp3 & BIT(6)))
+ pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ }
+
+
+ ARAM_ACCESS(ioport);
+
+ for ( i = 0; i < 4; i++ ) {
+
+ pCardInfo->si_XlatInfo[i] =
+ RD_HARPOON(ioport+hp_aramBase+BIOS_DATA_OFFSET+i);
+ }
+
+ /* return with -1 if no sort, else return with
+ logical card number sorted by BIOS (zero-based) */
+
+ pCardInfo->si_relative_cardnum =
+ (UCHAR)(RD_HARPOON(ioport+hp_aramBase+BIOS_RELATIVE_CARD)-1);
+
+ SGRAM_ACCESS(ioport);
+
+ s_PhaseTbl[0] = phaseDataOut;
+ s_PhaseTbl[1] = phaseDataIn;
+ s_PhaseTbl[2] = phaseIllegal;
+ s_PhaseTbl[3] = phaseIllegal;
+ s_PhaseTbl[4] = phaseCommand;
+ s_PhaseTbl[5] = phaseStatus;
+ s_PhaseTbl[6] = phaseMsgOut;
+ s_PhaseTbl[7] = phaseMsgIn;
+
+ pCardInfo->si_present = 0x01;
+
+#if defined(BUGBUG)
+
+
+ for (i = 0; i < MAX_CARDS; i++) {
+
+ for (id=0; id<debug_size; id++)
+ debug_int[i][id] = (UCHAR)0x00;
+ debug_index[i] = 0;
+ }
+
+#endif
+
+ return(0);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_config_adapter
+ *
+ * Description: Setup adapter for normal operation (hard reset).
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+USHORT SccbMgr_config_adapter(PSCCBMGR_INFO pCardInfo)
+#else
+ULONG SccbMgr_config_adapter(PSCCBMGR_INFO pCardInfo)
+#endif
+{
+ PSCCBcard CurrCard = NULL;
+ PNVRamInfo pCurrNvRam;
+ UCHAR i,j,thisCard, ScamFlg;
+ USHORT temp,sync_bit_map,id;
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+
+#if defined(DOS)
+ ioport = (USHORT)pCardInfo->si_baseaddr;
+#else
+ ioport = pCardInfo->si_baseaddr;
+#endif
+
+ for(thisCard =0; thisCard <= MAX_CARDS; thisCard++) {
+
+ if (thisCard == MAX_CARDS) {
+
+ return(FAILURE);
+ }
+
+ if (BL_Card[thisCard].ioPort == ioport) {
+
+ CurrCard = &BL_Card[thisCard];
+ SccbMgrTableInitCard(CurrCard,thisCard);
+ break;
+ }
+
+ else if (BL_Card[thisCard].ioPort == 0x00) {
+
+ BL_Card[thisCard].ioPort = ioport;
+ CurrCard = &BL_Card[thisCard];
+
+ if(mbCards)
+ for(i = 0; i < mbCards; i++){
+ if(CurrCard->ioPort == nvRamInfo[i].niBaseAddr)
+ CurrCard->pNvRamInfo = &nvRamInfo[i];
+ }
+ SccbMgrTableInitCard(CurrCard,thisCard);
+ CurrCard->cardIndex = thisCard;
+ CurrCard->cardInfo = pCardInfo;
+
+ break;
+ }
+ }
+
+ pCurrNvRam = CurrCard->pNvRamInfo;
+
+ if(pCurrNvRam){
+ ScamFlg = pCurrNvRam->niScamConf;
+ }
+ else{
+ ScamFlg = (UCHAR) utilEERead(ioport, SCAM_CONFIG/2);
+ }
+
+
+ BusMasterInit(ioport);
+ XbowInit(ioport, ScamFlg);
+
+#if defined (NO_BIOS_OPTION)
+
+
+ if (DiagXbow(ioport)) return(FAILURE);
+ if (DiagBusMaster(ioport)) return(FAILURE);
+
+#endif /* No BIOS Option */
+
+ autoLoadDefaultMap(ioport);
+
+
+ for (i = 0,id = 0x01; i != pCardInfo->si_id; i++,id <<= 1){}
+
+ WR_HARPOON(ioport+hp_selfid_0, id);
+ WR_HARPOON(ioport+hp_selfid_1, 0x00);
+ WR_HARPOON(ioport+hp_arb_id, pCardInfo->si_id);
+ CurrCard->ourId = pCardInfo->si_id;
+
+ i = (UCHAR) pCardInfo->si_flags;
+ if (i & SCSI_PARITY_ENA)
+ WR_HARPOON(ioport+hp_portctrl_1,(HOST_MODE8 | CHK_SCSI_P));
+
+ j = (RD_HARPOON(ioport+hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ if (i & LOW_BYTE_TERM)
+ j |= SCSI_TERM_ENA_L;
+ WR_HARPOON(ioport+hp_bm_ctrl, j);
+
+ j = (RD_HARPOON(ioport+hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
+ if (i & HIGH_BYTE_TERM)
+ j |= SCSI_TERM_ENA_H;
+ WR_HARPOON(ioport+hp_ee_ctrl, j );
+
+
+ if (!(pCardInfo->si_flags & SOFT_RESET)) {
+
+ sresb(ioport,thisCard);
+
+ scini(thisCard, pCardInfo->si_id, 0);
+ }
+
+
+
+ if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
+ CurrCard->globalFlags |= F_NO_FILTER;
+
+ if(pCurrNvRam){
+ if(pCurrNvRam->niSysConf & 0x10)
+ CurrCard->globalFlags |= F_GREEN_PC;
+ }
+ else{
+ if (utilEERead(ioport, (SYSTEM_CONFIG/2)) & GREEN_PC_ENA)
+ CurrCard->globalFlags |= F_GREEN_PC;
+ }
+
+ /* Set global flag to indicate Re-Negotiation to be done on all
+ ckeck condition */
+ if(pCurrNvRam){
+ if(pCurrNvRam->niScsiConf & 0x04)
+ CurrCard->globalFlags |= F_DO_RENEGO;
+ }
+ else{
+ if (utilEERead(ioport, (SCSI_CONFIG/2)) & RENEGO_ENA)
+ CurrCard->globalFlags |= F_DO_RENEGO;
+ }
+
+ if(pCurrNvRam){
+ if(pCurrNvRam->niScsiConf & 0x08)
+ CurrCard->globalFlags |= F_CONLUN_IO;
+ }
+ else{
+ if (utilEERead(ioport, (SCSI_CONFIG/2)) & CONNIO_ENA)
+ CurrCard->globalFlags |= F_CONLUN_IO;
+ }
+
+
+ temp = pCardInfo->si_per_targ_no_disc;
+
+ for (i = 0,id = 1; i < MAX_SCSI_TAR; i++, id <<= 1) {
+
+ if (temp & id)
+ sccbMgrTbl[thisCard][i].TarStatus |= TAR_ALLOW_DISC;
+ }
+
+ sync_bit_map = 0x0001;
+
+ for (id = 0; id < (MAX_SCSI_TAR/2); id++) {
+
+ if(pCurrNvRam){
+ temp = (USHORT) pCurrNvRam->niSyncTbl[id];
+ temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
+ (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
+ }else
+ temp = utilEERead(ioport, (USHORT)((SYNC_RATE_TBL/2)+id));
+
+ for (i = 0; i < 2; temp >>=8,i++) {
+
+ if (pCardInfo->si_per_targ_init_sync & sync_bit_map) {
+
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue = (UCHAR)temp;
+ }
+
+ else {
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= SYNC_SUPPORTED;
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue =
+ (UCHAR)(temp & ~EE_SYNC_MASK);
+ }
+
+#if defined(WIDE_SCSI)
+/* if ((pCardInfo->si_per_targ_wide_nego & sync_bit_map) ||
+ (id*2+i >= 8)){
+*/
+ if (pCardInfo->si_per_targ_wide_nego & sync_bit_map){
+
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue |= EE_WIDE_SCSI;
+
+ }
+
+ else { /* NARROW SCSI */
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= WIDE_NEGOCIATED;
+ }
+
+#else
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= WIDE_NEGOCIATED;
+#endif
+
+
+ sync_bit_map <<= 1;
+
+
+
+ }
+ }
+
+ WR_HARPOON((ioport+hp_semaphore),
+ (UCHAR)(RD_HARPOON((ioport+hp_semaphore)) | SCCB_MGR_PRESENT));
+
+#if defined(DOS)
+ return((USHORT)CurrCard);
+#else
+ return((ULONG)CurrCard);
+#endif
+}
+
+#else /* end (FW_TYPE==_SCCB_MGR_) */
+
+
+
+STATIC s16bits FP_PresenceCheck(PMGR_INFO pMgrInfo)
+{
+ PMGR_ENTRYPNTS pMgr_EntryPnts = &pMgrInfo->mi_Functions;
+
+ pMgr_EntryPnts->UCBMgr_probe_adapter = probe_adapter;
+ pMgr_EntryPnts->UCBMgr_init_adapter = init_adapter;
+ pMgr_EntryPnts->UCBMgr_start_UCB = SccbMgr_start_sccb;
+ pMgr_EntryPnts->UCBMgr_build_UCB = build_UCB;
+ pMgr_EntryPnts->UCBMgr_abort_UCB = SccbMgr_abort_sccb;
+ pMgr_EntryPnts->UCBMgr_my_int = SccbMgr_my_int;
+ pMgr_EntryPnts->UCBMgr_isr = SccbMgr_isr;
+ pMgr_EntryPnts->UCBMgr_scsi_reset = SccbMgr_scsi_reset;
+ pMgr_EntryPnts->UCBMgr_timer_expired = SccbMgr_timer_expired;
+#ifndef NO_IOCTLS
+ pMgr_EntryPnts->UCBMgr_unload_card = SccbMgr_unload_card;
+ pMgr_EntryPnts->UCBMgr_save_foreign_state =
+ SccbMgr_save_foreign_state;
+ pMgr_EntryPnts->UCBMgr_restore_foreign_state =
+ SccbMgr_restore_foreign_state;
+ pMgr_EntryPnts->UCBMgr_restore_native_state =
+ SccbMgr_restore_native_state;
+#endif /*NO_IOCTLS*/
+
+ pMgrInfo->mi_SGListFormat=0x01;
+ pMgrInfo->mi_DataPtrFormat=0x01;
+ pMgrInfo->mi_MaxSGElements= (u16bits) 0xffffffff;
+ pMgrInfo->mi_MgrPrivateLen=sizeof(SCCB);
+ pMgrInfo->mi_PCIVendorID=BL_VENDOR_ID;
+ pMgrInfo->mi_PCIDeviceID=FP_DEVICE_ID;
+ pMgrInfo->mi_MgrAttributes= ATTR_IO_MAPPED +
+ ATTR_PHYSICAL_ADDRESS +
+ ATTR_VIRTUAL_ADDRESS +
+ ATTR_OVERLAPPED_IO_IOCTLS_OK;
+ pMgrInfo->mi_IoRangeLen = 256;
+ return(0);
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: probe_adapter
+ *
+ * Description: Setup and/or Search for cards and return info to caller.
+ *
+ *---------------------------------------------------------------------*/
+STATIC s32bits probe_adapter(PADAPTER_INFO pAdapterInfo)
+{
+ u16bits temp,temp2,temp3,temp4;
+ u08bits i,j,id;
+
+#if defined(DOS)
+#else
+ static u08bits first_time = 1;
+#endif
+ BASE_PORT ioport;
+ PNVRamInfo pCurrNvRam;
+
+ ioport = (BASE_PORT)pAdapterInfo->ai_baseaddr;
+
+
+
+ if (RD_HARPOON(ioport+hp_vendor_id_0) != ORION_VEND_0)
+ return(1);
+
+ if ((RD_HARPOON(ioport+hp_vendor_id_1) != ORION_VEND_1))
+ return(2);
+
+ if ((RD_HARPOON(ioport+hp_device_id_0) != ORION_DEV_0))
+ return(3);
+
+ if ((RD_HARPOON(ioport+hp_device_id_1) != ORION_DEV_1))
+ return(4);
+
+
+ if (RD_HARPOON(ioport+hp_rev_num) != 0x0f){
+
+
+/* For new Harpoon then check for sub_device ID LSB
+ the bits(0-3) must be all ZERO for compatible with
+ current version of SCCBMgr, else skip this Harpoon
+ device. */
+
+ if (RD_HARPOON(ioport+hp_sub_device_id_0) & 0x0f)
+ return(5);
+ }
+
+ if (first_time) {
+
+ SccbMgrTableInitAll();
+ first_time = 0;
+ mbCards = 0;
+ }
+
+ if(RdStack(ioport, 0) != 0x00) {
+ if(ChkIfChipInitialized(ioport) == FALSE)
+ {
+ pCurrNvRam = NULL;
+ WR_HARPOON(ioport+hp_semaphore, 0x00);
+ XbowInit(ioport, 0); /*Must Init the SCSI before attempting */
+ DiagEEPROM(ioport);
+ }
+ else
+ {
+ if(mbCards < MAX_MB_CARDS) {
+ pCurrNvRam = &nvRamInfo[mbCards];
+ mbCards++;
+ pCurrNvRam->niBaseAddr = ioport;
+ RNVRamData(pCurrNvRam);
+ }else
+ return((int) FAILURE);
+ }
+ }else
+ pCurrNvRam = NULL;
+
+#if defined (NO_BIOS_OPTION)
+ pCurrNvRam = NULL;
+ XbowInit(ioport, 0); /*Must Init the SCSI before attempting */
+ DiagEEPROM(ioport);
+#endif /* No BIOS Option */
+
+ WR_HARPOON(ioport+hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(ioport+hp_sys_ctrl, 0x00);
+
+ if(pCurrNvRam)
+ pAdapterInfo->ai_id = pCurrNvRam->niAdapId;
+ else
+ pAdapterInfo->ai_id = (u08bits)(utilEERead(ioport, (ADAPTER_SCSI_ID/2)) &
+ (u08bits)0x0FF);
+
+ pAdapterInfo->ai_lun = 0x00;
+ pAdapterInfo->ai_fw_revision[0] = '3';
+ pAdapterInfo->ai_fw_revision[1] = '1';
+ pAdapterInfo->ai_fw_revision[2] = '1';
+ pAdapterInfo->ai_fw_revision[3] = ' ';
+ pAdapterInfo->ai_NumChannels = 1;
+
+ temp2 = 0x0000;
+ temp3 = 0x0000;
+ temp4 = 0x0000;
+
+ for (id = 0; id < (16/2); id++) {
+
+ if(pCurrNvRam){
+ temp = (USHORT) pCurrNvRam->niSyncTbl[id];
+ temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
+ (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
+ }else
+ temp = utilEERead(ioport, (u16bits)((SYNC_RATE_TBL/2)+id));
+
+ for (i = 0; i < 2; temp >>=8,i++) {
+
+ if ((temp & 0x03) != AUTO_RATE_00) {
+
+ temp2 >>= 0x01;
+ temp2 |= 0x8000;
+ }
+
+ else {
+ temp2 >>= 0x01;
+ }
+
+ if (temp & DISC_ENABLE_BIT) {
+
+ temp3 >>= 0x01;
+ temp3 |= 0x8000;
+ }
+
+ else {
+ temp3 >>= 0x01;
+ }
+
+ if (temp & WIDE_NEGO_BIT) {
+
+ temp4 >>= 0x01;
+ temp4 |= 0x8000;
+ }
+
+ else {
+ temp4 >>= 0x01;
+ }
+
+ }
+ }
+
+ pAdapterInfo->ai_per_targ_init_sync = temp2;
+ pAdapterInfo->ai_per_targ_no_disc = temp3;
+ pAdapterInfo->ai_per_targ_wide_nego = temp4;
+ if(pCurrNvRam)
+ i = pCurrNvRam->niSysConf;
+ else
+ i = (u08bits)(utilEERead(ioport, (SYSTEM_CONFIG/2)));
+
+ /*
+ ** interrupts always level-triggered for FlashPoint
+ */
+ pAdapterInfo->ai_stateinfo |= LEVEL_TRIG;
+
+ if (i & 0x01)
+ pAdapterInfo->ai_stateinfo |= SCSI_PARITY_ENA;
+
+ if (i & 0x02) /* SCSI Bus reset in AutoSCSI Set ? */
+ {
+ if(pCurrNvRam)
+ {
+ j = pCurrNvRam->niScamConf;
+ }
+ else
+ {
+ j = (u08bits) utilEERead(ioport, SCAM_CONFIG/2);
+ }
+ if(j & SCAM_ENABLED)
+ {
+ if(j & SCAM_LEVEL2)
+ {
+ pAdapterInfo->ai_stateinfo |= SCAM2_ENA;
+ }
+ else
+ {
+ pAdapterInfo->ai_stateinfo |= SCAM1_ENA;
+ }
+ }
+ }
+ j = (RD_HARPOON(ioport+hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ if (i & 0x04) {
+ j |= SCSI_TERM_ENA_L;
+ pAdapterInfo->ai_stateinfo |= LOW_BYTE_TERM_ENA;
+ }
+ WR_HARPOON(ioport+hp_bm_ctrl, j );
+
+ j = (RD_HARPOON(ioport+hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
+ if (i & 0x08) {
+ j |= SCSI_TERM_ENA_H;
+ pAdapterInfo->ai_stateinfo |= HIGH_BYTE_TERM_ENA;
+ }
+ WR_HARPOON(ioport+hp_ee_ctrl, j );
+
+ if(RD_HARPOON(ioport + hp_page_ctrl) & BIOS_SHADOW)
+ {
+ pAdapterInfo->ai_FlashRomSize = 64 * 1024; /* 64k ROM */
+ }
+ else
+ {
+ pAdapterInfo->ai_FlashRomSize = 32 * 1024; /* 32k ROM */
+ }
+
+ pAdapterInfo->ai_stateinfo |= (FAST20_ENA | TAG_QUEUE_ENA);
+ if (!(RD_HARPOON(ioport+hp_page_ctrl) & NARROW_SCSI_CARD))
+ {
+ pAdapterInfo->ai_attributes |= (WIDE_CAPABLE | FAST20_CAPABLE
+ | SCAM2_CAPABLE
+ | TAG_QUEUE_CAPABLE
+ | SUPRESS_UNDERRRUNS_CAPABLE
+ | SCSI_PARITY_CAPABLE);
+ pAdapterInfo->ai_MaxTarg = 16;
+ pAdapterInfo->ai_MaxLun = 32;
+ }
+ else
+ {
+ pAdapterInfo->ai_attributes |= (FAST20_CAPABLE | SCAM2_CAPABLE
+ | TAG_QUEUE_CAPABLE
+ | SUPRESS_UNDERRRUNS_CAPABLE
+ | SCSI_PARITY_CAPABLE);
+ pAdapterInfo->ai_MaxTarg = 8;
+ pAdapterInfo->ai_MaxLun = 8;
+ }
+
+ pAdapterInfo->ai_product_family = HARPOON_FAMILY;
+ pAdapterInfo->ai_HBAbustype = BUSTYPE_PCI;
+
+ for (i=0;i<CARD_MODEL_NAMELEN;i++)
+ {
+ pAdapterInfo->ai_card_model[i]=' '; /* initialize the ai_card_model */
+ }
+
+ if(pCurrNvRam){
+ pAdapterInfo->ai_card_model[0] = '9';
+ switch(pCurrNvRam->niModel & 0x0f){
+ case MODEL_LT:
+ pAdapterInfo->ai_card_model[1] = '3';
+ pAdapterInfo->ai_card_model[2] = '0';
+ break;
+ case MODEL_LW:
+ pAdapterInfo->ai_card_model[1] = '5';
+ pAdapterInfo->ai_card_model[2] = '0';
+ break;
+ case MODEL_DL:
+ pAdapterInfo->ai_card_model[1] = '3';
+ pAdapterInfo->ai_card_model[2] = '2';
+ break;
+ case MODEL_DW:
+ pAdapterInfo->ai_card_model[1] = '5';
+ pAdapterInfo->ai_card_model[2] = '2';
+ break;
+ }
+ }else{
+ temp = utilEERead(ioport, (MODEL_NUMB_0/2));
+ pAdapterInfo->ai_card_model[0] = (u08bits)(temp >> 8);
+ temp = utilEERead(ioport, (MODEL_NUMB_2/2));
+
+ pAdapterInfo->ai_card_model[1] = (u08bits)(temp & 0x00FF);
+ pAdapterInfo->ai_card_model[2] = (u08bits)(temp >> 8);
+ }
+
+
+
+ pAdapterInfo->ai_FiberProductType = 0;
+
+ pAdapterInfo->ai_secondary_range = 0;
+
+ for (i=0;i<WORLD_WIDE_NAMELEN;i++)
+ {
+ pAdapterInfo->ai_worldwidename[i]='\0';
+ }
+
+ for (i=0;i<VENDOR_NAMELEN;i++)
+ {
+ pAdapterInfo->ai_vendorstring[i]='\0';
+ }
+ pAdapterInfo->ai_vendorstring[0]='B';
+ pAdapterInfo->ai_vendorstring[1]='U';
+ pAdapterInfo->ai_vendorstring[2]='S';
+ pAdapterInfo->ai_vendorstring[3]='L';
+ pAdapterInfo->ai_vendorstring[4]='O';
+ pAdapterInfo->ai_vendorstring[5]='G';
+ pAdapterInfo->ai_vendorstring[6]='I';
+ pAdapterInfo->ai_vendorstring[7]='C';
+
+ for (i=0;i<FAMILY_NAMELEN;i++)
+ {
+ pAdapterInfo->ai_AdapterFamilyString[i]='\0';
+ }
+ pAdapterInfo->ai_AdapterFamilyString[0]='F';
+ pAdapterInfo->ai_AdapterFamilyString[1]='L';
+ pAdapterInfo->ai_AdapterFamilyString[2]='A';
+ pAdapterInfo->ai_AdapterFamilyString[3]='S';
+ pAdapterInfo->ai_AdapterFamilyString[4]='H';
+ pAdapterInfo->ai_AdapterFamilyString[5]='P';
+ pAdapterInfo->ai_AdapterFamilyString[6]='O';
+ pAdapterInfo->ai_AdapterFamilyString[7]='I';
+ pAdapterInfo->ai_AdapterFamilyString[8]='N';
+ pAdapterInfo->ai_AdapterFamilyString[9]='T';
+
+ ARAM_ACCESS(ioport);
+
+ for ( i = 0; i < 4; i++ ) {
+
+ pAdapterInfo->ai_XlatInfo[i] =
+ RD_HARPOON(ioport+hp_aramBase+BIOS_DATA_OFFSET+i);
+ }
+
+ /* return with -1 if no sort, else return with
+ logical card number sorted by BIOS (zero-based) */
+
+
+ pAdapterInfo->ai_relative_cardnum =
+ (u08bits)(RD_HARPOON(ioport+hp_aramBase+BIOS_RELATIVE_CARD)-1);
+
+ SGRAM_ACCESS(ioport);
+
+ s_PhaseTbl[0] = phaseDataOut;
+ s_PhaseTbl[1] = phaseDataIn;
+ s_PhaseTbl[2] = phaseIllegal;
+ s_PhaseTbl[3] = phaseIllegal;
+ s_PhaseTbl[4] = phaseCommand;
+ s_PhaseTbl[5] = phaseStatus;
+ s_PhaseTbl[6] = phaseMsgOut;
+ s_PhaseTbl[7] = phaseMsgIn;
+
+ pAdapterInfo->ai_present = 0x01;
+
+#if defined(BUGBUG)
+
+
+ for (i = 0; i < MAX_CARDS; i++) {
+
+ for (id=0; id<debug_size; id++)
+ debug_int[i][id] = (u08bits)0x00;
+ debug_index[i] = 0;
+ }
+
+#endif
+
+ return(0);
+}
+
+
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: init_adapter, exported to BUDI via UCBMgr_init_adapter entry
+ *
+ *
+ * Description: Setup adapter for normal operation (hard reset).
+ *
+ *---------------------------------------------------------------------*/
+STATIC CARD_HANDLE init_adapter(PADAPTER_INFO pCardInfo)
+{
+ PSCCBcard CurrCard;
+ PNVRamInfo pCurrNvRam;
+ u08bits i,j,thisCard, ScamFlg;
+ u16bits temp,sync_bit_map,id;
+ BASE_PORT ioport;
+
+ ioport = (BASE_PORT)pCardInfo->ai_baseaddr;
+
+ for(thisCard =0; thisCard <= MAX_CARDS; thisCard++) {
+
+ if (thisCard == MAX_CARDS) {
+
+ return(FAILURE);
+ }
+
+ if (BL_Card[thisCard].ioPort == ioport) {
+
+ CurrCard = &BL_Card[thisCard];
+ SccbMgrTableInitCard(CurrCard,thisCard);
+ break;
+ }
+
+ else if (BL_Card[thisCard].ioPort == 0x00) {
+
+ BL_Card[thisCard].ioPort = ioport;
+ CurrCard = &BL_Card[thisCard];
+
+ if(mbCards)
+ for(i = 0; i < mbCards; i++){
+ if(CurrCard->ioPort == nvRamInfo[i].niBaseAddr)
+ CurrCard->pNvRamInfo = &nvRamInfo[i];
+ }
+ SccbMgrTableInitCard(CurrCard,thisCard);
+ CurrCard->cardIndex = thisCard;
+ CurrCard->cardInfo = pCardInfo;
+
+ break;
+ }
+ }
+
+ pCurrNvRam = CurrCard->pNvRamInfo;
+
+
+ if(pCurrNvRam){
+ ScamFlg = pCurrNvRam->niScamConf;
+ }
+ else{
+ ScamFlg = (UCHAR) utilEERead(ioport, SCAM_CONFIG/2);
+ }
+
+
+ BusMasterInit(ioport);
+ XbowInit(ioport, ScamFlg);
+
+#if defined (NO_BIOS_OPTION)
+
+
+ if (DiagXbow(ioport)) return(FAILURE);
+ if (DiagBusMaster(ioport)) return(FAILURE);
+
+#endif /* No BIOS Option */
+
+ autoLoadDefaultMap(ioport);
+
+
+ for (i = 0,id = 0x01; i != pCardInfo->ai_id; i++,id <<= 1){}
+
+ WR_HARPOON(ioport+hp_selfid_0, id);
+ WR_HARPOON(ioport+hp_selfid_1, 0x00);
+ WR_HARPOON(ioport+hp_arb_id, pCardInfo->ai_id);
+ CurrCard->ourId = (unsigned char) pCardInfo->ai_id;
+
+ i = (u08bits) pCardInfo->ai_stateinfo;
+ if (i & SCSI_PARITY_ENA)
+ WR_HARPOON(ioport+hp_portctrl_1,(HOST_MODE8 | CHK_SCSI_P));
+
+ j = (RD_HARPOON(ioport+hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ if (i & LOW_BYTE_TERM_ENA)
+ j |= SCSI_TERM_ENA_L;
+ WR_HARPOON(ioport+hp_bm_ctrl, j);
+
+ j = (RD_HARPOON(ioport+hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
+ if (i & HIGH_BYTE_TERM_ENA)
+ j |= SCSI_TERM_ENA_H;
+ WR_HARPOON(ioport+hp_ee_ctrl, j );
+
+
+ if (!(pCardInfo->ai_stateinfo & NO_RESET_IN_INIT)) {
+
+ sresb(ioport,thisCard);
+
+ scini(thisCard, (u08bits) pCardInfo->ai_id, 0);
+ }
+
+
+
+ if (pCardInfo->ai_stateinfo & SUPRESS_UNDERRRUNS_ENA)
+ CurrCard->globalFlags |= F_NO_FILTER;
+
+ if(pCurrNvRam){
+ if(pCurrNvRam->niSysConf & 0x10)
+ CurrCard->globalFlags |= F_GREEN_PC;
+ }
+ else{
+ if (utilEERead(ioport, (SYSTEM_CONFIG/2)) & GREEN_PC_ENA)
+ CurrCard->globalFlags |= F_GREEN_PC;
+ }
+
+ /* Set global flag to indicate Re-Negotiation to be done on all
+ ckeck condition */
+ if(pCurrNvRam){
+ if(pCurrNvRam->niScsiConf & 0x04)
+ CurrCard->globalFlags |= F_DO_RENEGO;
+ }
+ else{
+ if (utilEERead(ioport, (SCSI_CONFIG/2)) & RENEGO_ENA)
+ CurrCard->globalFlags |= F_DO_RENEGO;
+ }
+
+ if(pCurrNvRam){
+ if(pCurrNvRam->niScsiConf & 0x08)
+ CurrCard->globalFlags |= F_CONLUN_IO;
+ }
+ else{
+ if (utilEERead(ioport, (SCSI_CONFIG/2)) & CONNIO_ENA)
+ CurrCard->globalFlags |= F_CONLUN_IO;
+ }
+
+ temp = pCardInfo->ai_per_targ_no_disc;
+
+ for (i = 0,id = 1; i < MAX_SCSI_TAR; i++, id <<= 1) {
+
+ if (temp & id)
+ sccbMgrTbl[thisCard][i].TarStatus |= TAR_ALLOW_DISC;
+ }
+
+ sync_bit_map = 0x0001;
+
+ for (id = 0; id < (MAX_SCSI_TAR/2); id++){
+
+ if(pCurrNvRam){
+ temp = (USHORT) pCurrNvRam->niSyncTbl[id];
+ temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
+ (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
+ }else
+ temp = utilEERead(ioport, (u16bits)((SYNC_RATE_TBL/2)+id));
+
+ for (i = 0; i < 2; temp >>=8,i++){
+
+ if (pCardInfo->ai_per_targ_init_sync & sync_bit_map){
+
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue = (u08bits)temp;
+ }
+
+ else {
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= SYNC_SUPPORTED;
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue =
+ (u08bits)(temp & ~EE_SYNC_MASK);
+ }
+
+#if defined(WIDE_SCSI)
+/* if ((pCardInfo->ai_per_targ_wide_nego & sync_bit_map) ||
+ (id*2+i >= 8)){
+*/
+ if (pCardInfo->ai_per_targ_wide_nego & sync_bit_map){
+
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue |= EE_WIDE_SCSI;
+
+ }
+
+ else { /* NARROW SCSI */
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= WIDE_NEGOCIATED;
+ }
+
+#else
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= WIDE_NEGOCIATED;
+#endif
+
+
+ sync_bit_map <<= 1;
+ }
+ }
+
+
+ pCardInfo->ai_SGListFormat=0x01;
+ pCardInfo->ai_DataPtrFormat=0x01;
+ pCardInfo->ai_AEN_mask &= SCSI_RESET_COMPLETE;
+
+ WR_HARPOON((ioport+hp_semaphore),
+ (u08bits)(RD_HARPOON((ioport+hp_semaphore)) | SCCB_MGR_PRESENT));
+
+ return((u32bits)CurrCard);
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: build_ucb, exported to BUDI via UCBMgr_build_ucb entry
+ *
+ * Description: prepare fw portion of ucb. do not start, resource not guaranteed
+ * so don't manipulate anything that's derived from states which
+ * may change
+ *
+ *---------------------------------------------------------------------*/
+void build_UCB(CARD_HANDLE pCurrCard, PUCB p_ucb)
+{
+
+ u08bits thisCard;
+ u08bits i,j;
+
+ PSCCB p_sccb;
+
+
+ thisCard = ((PSCCBcard) pCurrCard)->cardIndex;
+
+
+ p_sccb=(PSCCB)p_ucb->UCB_MgrPrivatePtr;
+
+
+ p_sccb->Sccb_ucb_ptr=p_ucb;
+
+ switch (p_ucb->UCB_opcode & (OPC_DEVICE_RESET+OPC_XFER_SG+OPC_CHK_RESIDUAL))
+ {
+ case OPC_DEVICE_RESET:
+ p_sccb->OperationCode=RESET_COMMAND;
+ break;
+ case OPC_XFER_SG:
+ p_sccb->OperationCode=SCATTER_GATHER_COMMAND;
+ break;
+ case OPC_XFER_SG+OPC_CHK_RESIDUAL:
+ p_sccb->OperationCode=RESIDUAL_SG_COMMAND;
+ break;
+ case OPC_CHK_RESIDUAL:
+
+ p_sccb->OperationCode=RESIDUAL_COMMAND;
+ break;
+ default:
+ p_sccb->OperationCode=SCSI_INITIATOR_COMMAND;
+ break;
+ }
+
+ if (p_ucb->UCB_opcode & OPC_TQ_ENABLE)
+ {
+ p_sccb->ControlByte = (u08bits)((p_ucb->UCB_opcode & OPC_TQ_MASK)>>2) | F_USE_CMD_Q;
+ }
+ else
+ {
+ p_sccb->ControlByte = 0;
+ }
+
+
+ p_sccb->CdbLength = (u08bits)p_ucb->UCB_cdblen;
+
+ if (p_ucb->UCB_opcode & OPC_NO_AUTO_SENSE)
+ {
+ p_sccb->RequestSenseLength = 0;
+ }
+ else
+ {
+ p_sccb->RequestSenseLength = (unsigned char) p_ucb->UCB_senselen;
+ }
+
+
+ if (p_ucb->UCB_opcode & OPC_XFER_SG)
+ {
+ p_sccb->DataPointer=p_ucb->UCB_virt_dataptr;
+ p_sccb->DataLength = (((u32bits)p_ucb->UCB_NumSgElements)<<3);
+ }
+ else
+ {
+ p_sccb->DataPointer=p_ucb->UCB_phys_dataptr;
+ p_sccb->DataLength=p_ucb->UCB_datalen;
+ };
+
+ p_sccb->HostStatus=0;
+ p_sccb->TargetStatus=0;
+ p_sccb->TargID=(unsigned char)p_ucb->UCB_targid;
+ p_sccb->Lun=(unsigned char) p_ucb->UCB_lun;
+ p_sccb->SccbIOPort=((PSCCBcard)pCurrCard)->ioPort;
+
+ j=p_ucb->UCB_cdblen;
+ for (i=0;i<j;i++)
+ {
+ p_sccb->Cdb[i] = p_ucb->UCB_cdb[i];
+ }
+
+ p_sccb->SensePointer=p_ucb->UCB_phys_senseptr;
+
+ sinits(p_sccb,thisCard);
+
+}
+#ifndef NO_IOCTLS
+
+/*---------------------------------------------------------------------
+ *
+ * Function: GetDevSyncRate
+ *
+ *---------------------------------------------------------------------*/
+STATIC int GetDevSyncRate(PSCCBcard pCurrCard,PUCB p_ucb)
+{
+ struct _SYNC_RATE_INFO * pSyncStr;
+ PSCCBMgr_tar_info currTar_Info;
+ BASE_PORT ioport;
+ u08bits scsiID, j;
+
+#if (FW_TYPE != _SCCB_MGR_)
+ if( p_ucb->UCB_targid >= pCurrCard->cardInfo->ai_MaxTarg )
+ {
+ return(1);
+ }
+#endif
+
+ ioport = pCurrCard->ioPort;
+ pSyncStr = (struct _SYNC_RATE_INFO *) p_ucb->UCB_virt_dataptr;
+ scsiID = (u08bits) p_ucb->UCB_targid;
+ currTar_Info = &sccbMgrTbl[pCurrCard->cardIndex][scsiID];
+ j = currTar_Info->TarSyncCtrl;
+
+ switch (currTar_Info->TarEEValue & EE_SYNC_MASK)
+ {
+ case EE_SYNC_ASYNC:
+ pSyncStr->RequestMegaXferRate = 0x00;
+ break;
+ case EE_SYNC_5MB:
+ pSyncStr->RequestMegaXferRate = (j & NARROW_SCSI) ? 50 : 100;
+ break;
+ case EE_SYNC_10MB:
+ pSyncStr->RequestMegaXferRate = (j & NARROW_SCSI) ? 100 : 200;
+ break;
+ case EE_SYNC_20MB:
+ pSyncStr->RequestMegaXferRate = (j & NARROW_SCSI) ? 200 : 400;
+ break;
+ }
+
+ switch ((j >> 5) & 0x07)
+ {
+ case 0x00:
+ if((j & 0x07) == 0x00)
+ {
+ pSyncStr->ActualMegaXferRate = 0x00; /* Async Mode */
+ }
+ else
+ {
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 200 : 400;
+ }
+ break;
+ case 0x01:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 100 : 200;
+ break;
+ case 0x02:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 66 : 122;
+ break;
+ case 0x03:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 50 : 100;
+ break;
+ case 0x04:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 40 : 80;
+ break;
+ case 0x05:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 33 : 66;
+ break;
+ case 0x06:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 28 : 56;
+ break;
+ case 0x07:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 25 : 50;
+ break;
+ }
+ pSyncStr->NegotiatedOffset = j & 0x0f;
+
+ return(0);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SetDevSyncRate
+ *
+ *---------------------------------------------------------------------*/
+STATIC int SetDevSyncRate(PSCCBcard pCurrCard, PUCB p_ucb)
+{
+ struct _SYNC_RATE_INFO * pSyncStr;
+ PSCCBMgr_tar_info currTar_Info;
+ BASE_PORT ioPort;
+ u08bits scsiID, i, j, syncVal;
+ u16bits syncOffset, actualXferRate;
+ union {
+ u08bits tempb[2];
+ u16bits tempw;
+ }temp2;
+
+#if (FW_TYPE != _SCCB_MGR_)
+ if( p_ucb->UCB_targid >= pCurrCard->cardInfo->ai_MaxTarg )
+ {
+ return(1);
+ }
+#endif
+
+ ioPort = pCurrCard->ioPort;
+ pSyncStr = (struct _SYNC_RATE_INFO *) p_ucb->UCB_virt_dataptr;
+ scsiID = (u08bits) p_ucb->UCB_targid;
+ currTar_Info = &sccbMgrTbl[pCurrCard->cardIndex][scsiID];
+ i = RD_HARPOON(ioPort+hp_xfer_pad); /* Save current value */
+ WR_HARPOON(ioPort+hp_xfer_pad, (i | ID_UNLOCK));
+ WR_HARPOON(ioPort+hp_select_id, ((scsiID << 4) | scsiID));
+ j = RD_HARPOON(ioPort+hp_synctarg_0);
+ WR_HARPOON(ioPort+hp_xfer_pad, i); /* restore value */
+
+ actualXferRate = pSyncStr->ActualMegaXferRate;
+ if(!(j & NARROW_SCSI))
+ {
+ actualXferRate <<= 1;
+ }
+ if(actualXferRate == 0x00)
+ {
+ syncVal = EE_SYNC_ASYNC; /* Async Mode */
+ }
+ if(actualXferRate == 0x0200)
+ {
+ syncVal = EE_SYNC_20MB; /* 20/40 MB Mode */
+ }
+ if(actualXferRate > 0x0050 && actualXferRate < 0x0200 )
+ {
+ syncVal = EE_SYNC_10MB; /* 10/20 MB Mode */
+ }
+ else
+ {
+ syncVal = EE_SYNC_5MB; /* 5/10 MB Mode */
+ }
+ if(currTar_Info->TarEEValue && EE_SYNC_MASK == syncVal)
+ return(0);
+ currTar_Info->TarEEValue = (!(EE_SYNC_MASK & currTar_Info->TarEEValue))
+ | syncVal;
+ syncOffset = (SYNC_RATE_TBL + scsiID) / 2;
+ temp2.tempw = utilEERead(ioPort, syncOffset);
+ if(scsiID & 0x01)
+ {
+ temp2.tempb[0] = (!(EE_SYNC_MASK & temp2.tempb[0])) | syncVal;
+ }
+ else
+ {
+ temp2.tempb[1] = (!(EE_SYNC_MASK & temp2.tempb[1])) | syncVal;
+ }
+ utilEEWriteOnOff(ioPort, 1);
+ utilEEWrite(ioPort, temp2.tempw, syncOffset);
+ utilEEWriteOnOff(ioPort, 0);
+ UpdateCheckSum(ioPort);
+
+ return(0);
+}
+/*---------------------------------------------------------------------
+ *
+ * Function: GetDevWideMode
+ *
+ *---------------------------------------------------------------------*/
+int GetDevWideMode(PSCCBcard pCurrCard,PUCB p_ucb)
+{
+ u08bits *pData;
+
+ pData = (u08bits *)p_ucb->UCB_virt_dataptr;
+ if(sccbMgrTbl[pCurrCard->cardIndex][p_ucb->UCB_targid].TarEEValue
+ & EE_WIDE_SCSI)
+ {
+ *pData = 1;
+ }
+ else
+ {
+ *pData = 0;
+ }
+
+ return(0);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SetDevWideMode
+ *
+ *---------------------------------------------------------------------*/
+int SetDevWideMode(PSCCBcard pCurrCard,PUCB p_ucb)
+{
+ u08bits *pData;
+ PSCCBMgr_tar_info currTar_Info;
+ BASE_PORT ioPort;
+ u08bits scsiID, scsiWideMode;
+ u16bits syncOffset;
+ union {
+ u08bits tempb[2];
+ u16bits tempw;
+ }temp2;
+
+#if (FW_TYPE != _SCCB_MGR_)
+ if( !(pCurrCard->cardInfo->ai_attributes & WIDE_CAPABLE) )
+ {
+ return(1);
+ }
+
+ if( p_ucb->UCB_targid >= pCurrCard->cardInfo->ai_MaxTarg )
+ {
+ return(1);
+ }
+#endif
+
+ ioPort = pCurrCard->ioPort;
+ pData = (u08bits *)p_ucb->UCB_virt_dataptr;
+ scsiID = (u08bits) p_ucb->UCB_targid;
+ currTar_Info = &sccbMgrTbl[pCurrCard->cardIndex][scsiID];
+
+ if(*pData)
+ {
+ if(currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ {
+ return(0);
+ }
+ else
+ {
+ scsiWideMode = EE_WIDE_SCSI;
+ }
+ }
+ else
+ {
+ if(!(currTar_Info->TarEEValue & EE_WIDE_SCSI))
+ {
+ return(0);
+ }
+ else
+ {
+ scsiWideMode = 0;
+ }
+ }
+ currTar_Info->TarEEValue = (!(EE_WIDE_SCSI & currTar_Info->TarEEValue))
+ | scsiWideMode;
+
+ syncOffset = (SYNC_RATE_TBL + scsiID) / 2;
+ temp2.tempw = utilEERead(ioPort, syncOffset);
+ if(scsiID & 0x01)
+ {
+ temp2.tempb[0] = (!(EE_WIDE_SCSI & temp2.tempb[0])) | scsiWideMode;
+ }
+ else
+ {
+ temp2.tempb[1] = (!(EE_WIDE_SCSI & temp2.tempb[1])) | scsiWideMode;
+ }
+ utilEEWriteOnOff(ioPort, 1);
+ utilEEWrite(ioPort, temp2.tempw, syncOffset);
+ utilEEWriteOnOff(ioPort, 0);
+ UpdateCheckSum(ioPort);
+
+ return(0);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: ReadNVRam
+ *
+ *---------------------------------------------------------------------*/
+void ReadNVRam(PSCCBcard pCurrCard,PUCB p_ucb)
+{
+ u08bits *pdata;
+ u16bits i,numwrds,numbytes,offset,temp;
+ u08bits OneMore = FALSE;
+#if defined(DOS)
+ u16bits ioport;
+#else
+ u32bits ioport;
+#endif
+
+ numbytes = (u16bits) p_ucb->UCB_datalen;
+ ioport = pCurrCard->ioPort;
+ pdata = (u08bits *) p_ucb->UCB_virt_dataptr;
+ offset = (u16bits) (p_ucb->UCB_IOCTLParams[0]);
+
+
+
+ if (offset & 0x1)
+ {
+ *((u16bits*) pdata) = utilEERead(ioport,(u16bits)((offset - 1) / 2)); /* 16 bit read */
+ *pdata = *(pdata + 1);
+ ++offset;
+ ++pdata;
+ --numbytes;
+ }
+
+ numwrds = numbytes / 2;
+ if (numbytes & 1)
+ OneMore = TRUE;
+
+ for (i = 0; i < numwrds; i++)
+ {
+ *((u16bits*) pdata) = utilEERead(ioport,(u16bits)(offset / 2));
+ pdata += 2;
+ offset += 2;
+ }
+ if (OneMore)
+ {
+ --pdata;
+ -- offset;
+ temp = utilEERead(ioport,(u16bits)(offset / 2));
+ *pdata = (u08bits) (temp);
+ }
+
+} /* end proc ReadNVRam */
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: WriteNVRam
+ *
+ *---------------------------------------------------------------------*/
+void WriteNVRam(PSCCBcard pCurrCard,PUCB p_ucb)
+{
+ u08bits *pdata;
+ u16bits i,numwrds,numbytes,offset, eeprom_end;
+ u08bits OneMore = FALSE;
+ union {
+ u08bits tempb[2];
+ u16bits tempw;
+ } temp2;
+
+#if defined(DOS)
+ u16bits ioport;
+#else
+ u32bits ioport;
+#endif
+
+ numbytes = (u16bits) p_ucb->UCB_datalen;
+ ioport = pCurrCard->ioPort;
+ pdata = (u08bits *) p_ucb->UCB_virt_dataptr;
+ offset = (u16bits) (p_ucb->UCB_IOCTLParams[0]);
+
+ if (RD_HARPOON(ioport+hp_page_ctrl) & NARROW_SCSI_CARD)
+ eeprom_end = 512;
+ else
+ eeprom_end = 768;
+
+ if(offset > eeprom_end)
+ return;
+
+ if((offset + numbytes) > eeprom_end)
+ numbytes = eeprom_end - offset;
+
+ utilEEWriteOnOff(ioport,1); /* Enable write access to the EEPROM */
+
+
+
+ if (offset & 0x1)
+ {
+ temp2.tempw = utilEERead(ioport,(u16bits)((offset - 1) / 2)); /* 16 bit read */
+ temp2.tempb[1] = *pdata;
+ utilEEWrite(ioport, temp2.tempw, (u16bits)((offset -1) / 2));
+ *pdata = *(pdata + 1);
+ ++offset;
+ ++pdata;
+ --numbytes;
+ }
+
+ numwrds = numbytes / 2;
+ if (numbytes & 1)
+ OneMore = TRUE;
+
+ for (i = 0; i < numwrds; i++)
+ {
+ utilEEWrite(ioport, *((pu16bits)pdata),(u16bits)(offset / 2));
+ pdata += 2;
+ offset += 2;
+ }
+ if (OneMore)
+ {
+
+ temp2.tempw = utilEERead(ioport,(u16bits)(offset / 2));
+ temp2.tempb[0] = *pdata;
+ utilEEWrite(ioport, temp2.tempw, (u16bits)(offset / 2));
+ }
+ utilEEWriteOnOff(ioport,0); /* Turn off write access */
+ UpdateCheckSum((u32bits)ioport);
+
+} /* end proc WriteNVRam */
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: UpdateCheckSum
+ *
+ * Description: Update Check Sum in EEPROM
+ *
+ *---------------------------------------------------------------------*/
+
+
+void UpdateCheckSum(u32bits baseport)
+{
+ USHORT i,sum_data, eeprom_end;
+
+ sum_data = 0x0000;
+
+
+ if (RD_HARPOON(baseport+hp_page_ctrl) & NARROW_SCSI_CARD)
+ eeprom_end = 512;
+ else
+ eeprom_end = 768;
+
+ for (i = 1; i < eeprom_end/2; i++)
+ {
+ sum_data += utilEERead(baseport, i);
+ }
+
+ utilEEWriteOnOff(baseport,1); /* Enable write access to the EEPROM */
+
+ utilEEWrite(baseport, sum_data, EEPROM_CHECK_SUM/2);
+ utilEEWriteOnOff(baseport,0); /* Turn off write access */
+}
+
+void SccbMgr_save_foreign_state(PADAPTER_INFO pAdapterInfo)
+{
+}
+
+
+void SccbMgr_restore_foreign_state(CARD_HANDLE pCurrCard)
+{
+}
+
+void SccbMgr_restore_native_state(CARD_HANDLE pCurrCard)
+{
+}
+
+#endif /* NO_IOCTLS */
+
+#endif /* (FW_TYPE==_UCB_MGR_) */
+
+#ifndef NO_IOCTLS
+#if (FW_TYPE==_UCB_MGR_)
+void SccbMgr_unload_card(CARD_HANDLE pCurrCard)
+#else
+#if defined(DOS)
+void SccbMgr_unload_card(USHORT pCurrCard)
+#else
+void SccbMgr_unload_card(ULONG pCurrCard)
+#endif
+#endif
+{
+ UCHAR i;
+#if defined(DOS)
+ USHORT portBase;
+ USHORT regOffset;
+#else
+ ULONG portBase;
+ ULONG regOffset;
+#endif
+ ULONG scamData;
+#if defined(OS2)
+ ULONG far *pScamTbl;
+#else
+ ULONG *pScamTbl;
+#endif
+ PNVRamInfo pCurrNvRam;
+
+ pCurrNvRam = ((PSCCBcard)pCurrCard)->pNvRamInfo;
+
+ if(pCurrNvRam){
+ WrStack(pCurrNvRam->niBaseAddr, 0, pCurrNvRam->niModel);
+ WrStack(pCurrNvRam->niBaseAddr, 1, pCurrNvRam->niSysConf);
+ WrStack(pCurrNvRam->niBaseAddr, 2, pCurrNvRam->niScsiConf);
+ WrStack(pCurrNvRam->niBaseAddr, 3, pCurrNvRam->niScamConf);
+ WrStack(pCurrNvRam->niBaseAddr, 4, pCurrNvRam->niAdapId);
+
+ for(i = 0; i < MAX_SCSI_TAR / 2; i++)
+ WrStack(pCurrNvRam->niBaseAddr, (UCHAR)(i+5), pCurrNvRam->niSyncTbl[i]);
+
+ portBase = pCurrNvRam->niBaseAddr;
+
+ for(i = 0; i < MAX_SCSI_TAR; i++){
+ regOffset = hp_aramBase + 64 + i*4;
+#if defined(OS2)
+ pScamTbl = (ULONG far *) &pCurrNvRam->niScamTbl[i];
+#else
+ pScamTbl = (ULONG *) &pCurrNvRam->niScamTbl[i];
+#endif
+ scamData = *pScamTbl;
+ WR_HARP32(portBase, regOffset, scamData);
+ }
+
+ }else{
+ WrStack(((PSCCBcard)pCurrCard)->ioPort, 0, 0);
+ }
+}
+#endif /* NO_IOCTLS */
+
+
+void RNVRamData(PNVRamInfo pNvRamInfo)
+{
+ UCHAR i;
+#if defined(DOS)
+ USHORT portBase;
+ USHORT regOffset;
+#else
+ ULONG portBase;
+ ULONG regOffset;
+#endif
+ ULONG scamData;
+#if defined (OS2)
+ ULONG far *pScamTbl;
+#else
+ ULONG *pScamTbl;
+#endif
+
+ pNvRamInfo->niModel = RdStack(pNvRamInfo->niBaseAddr, 0);
+ pNvRamInfo->niSysConf = RdStack(pNvRamInfo->niBaseAddr, 1);
+ pNvRamInfo->niScsiConf = RdStack(pNvRamInfo->niBaseAddr, 2);
+ pNvRamInfo->niScamConf = RdStack(pNvRamInfo->niBaseAddr, 3);
+ pNvRamInfo->niAdapId = RdStack(pNvRamInfo->niBaseAddr, 4);
+
+ for(i = 0; i < MAX_SCSI_TAR / 2; i++)
+ pNvRamInfo->niSyncTbl[i] = RdStack(pNvRamInfo->niBaseAddr, (UCHAR)(i+5));
+
+ portBase = pNvRamInfo->niBaseAddr;
+
+ for(i = 0; i < MAX_SCSI_TAR; i++){
+ regOffset = hp_aramBase + 64 + i*4;
+ RD_HARP32(portBase, regOffset, scamData);
+#if defined(OS2)
+ pScamTbl = (ULONG far *) &pNvRamInfo->niScamTbl[i];
+#else
+ pScamTbl = (ULONG *) &pNvRamInfo->niScamTbl[i];
+#endif
+ *pScamTbl = scamData;
+ }
+
+}
+
+#if defined(DOS)
+UCHAR RdStack(USHORT portBase, UCHAR index)
+#else
+UCHAR RdStack(ULONG portBase, UCHAR index)
+#endif
+{
+ WR_HARPOON(portBase + hp_stack_addr, index);
+ return(RD_HARPOON(portBase + hp_stack_data));
+}
+
+#if defined(DOS)
+void WrStack(USHORT portBase, UCHAR index, UCHAR data)
+#else
+void WrStack(ULONG portBase, UCHAR index, UCHAR data)
+#endif
+{
+ WR_HARPOON(portBase + hp_stack_addr, index);
+ WR_HARPOON(portBase + hp_stack_data, data);
+}
+
+
+#if (FW_TYPE==_UCB_MGR_)
+u08bits ChkIfChipInitialized(BASE_PORT ioPort)
+#else
+#if defined(DOS)
+UCHAR ChkIfChipInitialized(USHORT ioPort)
+#else
+UCHAR ChkIfChipInitialized(ULONG ioPort)
+#endif
+#endif
+{
+ if((RD_HARPOON(ioPort + hp_arb_id) & 0x0f) != RdStack(ioPort, 4))
+ return(FALSE);
+ if((RD_HARPOON(ioPort + hp_clkctrl_0) & CLKCTRL_DEFAULT)
+ != CLKCTRL_DEFAULT)
+ return(FALSE);
+ if((RD_HARPOON(ioPort + hp_seltimeout) == TO_250ms) ||
+ (RD_HARPOON(ioPort + hp_seltimeout) == TO_290ms))
+ return(TRUE);
+ return(FALSE);
+
+}
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_start_sccb
+ *
+ * Description: Start a command pointed to by p_Sccb. When the
+ * command is completed it will be returned via the
+ * callback function.
+ *
+ *---------------------------------------------------------------------*/
+#if (FW_TYPE==_UCB_MGR_)
+void SccbMgr_start_sccb(CARD_HANDLE pCurrCard, PUCB p_ucb)
+#else
+#if defined(DOS)
+void SccbMgr_start_sccb(USHORT pCurrCard, PSCCB p_Sccb)
+#else
+void SccbMgr_start_sccb(ULONG pCurrCard, PSCCB p_Sccb)
+#endif
+#endif
+{
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+ UCHAR thisCard, lun;
+ PSCCB pSaveSccb;
+ CALL_BK_FN callback;
+
+#if (FW_TYPE==_UCB_MGR_)
+ PSCCB p_Sccb;
+#endif
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+ thisCard = ((PSCCBcard) pCurrCard)->cardIndex;
+ ioport = ((PSCCBcard) pCurrCard)->ioPort;
+
+#if (FW_TYPE==_UCB_MGR_)
+ p_Sccb = (PSCCB)p_ucb->UCB_MgrPrivatePtr;
+#endif
+
+ if((p_Sccb->TargID > MAX_SCSI_TAR) || (p_Sccb->Lun > MAX_LUN))
+ {
+
+#if (FW_TYPE==_UCB_MGR_)
+ p_ucb->UCB_hbastat = SCCB_COMPLETE;
+ p_ucb->UCB_status=SCCB_ERROR;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+#endif
+
+#if (FW_TYPE==_SCCB_MGR_)
+ p_Sccb->HostStatus = SCCB_COMPLETE;
+ p_Sccb->SccbStatus = SCCB_ERROR;
+ callback = (CALL_BK_FN)p_Sccb->SccbCallback;
+ if (callback)
+ callback(p_Sccb);
+#endif
+
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ }
+
+#if (FW_TYPE==_SCCB_MGR_)
+ sinits(p_Sccb,thisCard);
+#endif
+
+
+#if (FW_TYPE==_UCB_MGR_)
+#ifndef NO_IOCTLS
+
+ if (p_ucb->UCB_opcode & OPC_IOCTL)
+ {
+
+ switch (p_ucb->UCB_IOCTLCommand)
+ {
+ case READ_NVRAM:
+ ReadNVRam((PSCCBcard)pCurrCard,p_ucb);
+ p_ucb->UCB_status=UCB_SUCCESS;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+
+ case WRITE_NVRAM:
+ WriteNVRam((PSCCBcard)pCurrCard,p_ucb);
+ p_ucb->UCB_status=UCB_SUCCESS;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+
+ case SEND_SCSI_PASSTHRU:
+#if (FW_TYPE != _SCCB_MGR_)
+ if( p_ucb->UCB_targid >=
+ ((PSCCBcard)pCurrCard)->cardInfo->ai_MaxTarg )
+ {
+ p_ucb->UCB_status = UCB_ERROR;
+ p_ucb->UCB_hbastat = HASTAT_HW_ERROR;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ }
+#endif
+ break;
+
+ case HARD_RESET:
+ p_ucb->UCB_status = UCB_INVALID;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ case GET_DEVICE_SYNCRATE:
+ if( !GetDevSyncRate((PSCCBcard)pCurrCard,p_ucb) )
+ {
+ p_ucb->UCB_status = UCB_SUCCESS;
+ }
+ else
+ {
+ p_ucb->UCB_status = UCB_ERROR;
+ p_ucb->UCB_hbastat = HASTAT_HW_ERROR;
+ }
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ case SET_DEVICE_SYNCRATE:
+ if( !SetDevSyncRate((PSCCBcard)pCurrCard,p_ucb) )
+ {
+ p_ucb->UCB_status = UCB_SUCCESS;
+ }
+ else
+ {
+ p_ucb->UCB_status = UCB_ERROR;
+ p_ucb->UCB_hbastat = HASTAT_HW_ERROR;
+ }
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ case GET_WIDE_MODE:
+ if( !GetDevWideMode((PSCCBcard)pCurrCard,p_ucb) )
+ {
+ p_ucb->UCB_status = UCB_SUCCESS;
+ }
+ else
+ {
+ p_ucb->UCB_status = UCB_ERROR;
+ p_ucb->UCB_hbastat = HASTAT_HW_ERROR;
+ }
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ case SET_WIDE_MODE:
+ if( !SetDevWideMode((PSCCBcard)pCurrCard,p_ucb) )
+ {
+ p_ucb->UCB_status = UCB_SUCCESS;
+ }
+ else
+ {
+ p_ucb->UCB_status = UCB_ERROR;
+ p_ucb->UCB_hbastat = HASTAT_HW_ERROR;
+ }
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ default:
+ p_ucb->UCB_status=UCB_INVALID;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ }
+ }
+#endif /* NO_IOCTLS */
+#endif /* (FW_TYPE==_UCB_MGR_) */
+
+
+ if (!((PSCCBcard) pCurrCard)->cmdCounter)
+ {
+ WR_HARPOON(ioport+hp_semaphore, (RD_HARPOON(ioport+hp_semaphore)
+ | SCCB_MGR_ACTIVE));
+
+ if (((PSCCBcard) pCurrCard)->globalFlags & F_GREEN_PC)
+ {
+ WR_HARPOON(ioport+hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(ioport+hp_sys_ctrl, 0x00);
+ }
+ }
+
+ ((PSCCBcard)pCurrCard)->cmdCounter++;
+
+ if (RD_HARPOON(ioport+hp_semaphore) & BIOS_IN_USE) {
+
+ WR_HARPOON(ioport+hp_semaphore, (RD_HARPOON(ioport+hp_semaphore)
+ | TICKLE_ME));
+ if(p_Sccb->OperationCode == RESET_COMMAND)
+ {
+ pSaveSccb = ((PSCCBcard) pCurrCard)->currentSCCB;
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ queueSelectFail(&BL_Card[thisCard], thisCard);
+ ((PSCCBcard) pCurrCard)->currentSCCB = pSaveSccb;
+ }
+ else
+ {
+ queueAddSccb(p_Sccb,thisCard);
+ }
+ }
+
+ else if ((RD_HARPOON(ioport+hp_page_ctrl) & G_INT_DISABLE)) {
+
+ if(p_Sccb->OperationCode == RESET_COMMAND)
+ {
+ pSaveSccb = ((PSCCBcard) pCurrCard)->currentSCCB;
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ queueSelectFail(&BL_Card[thisCard], thisCard);
+ ((PSCCBcard) pCurrCard)->currentSCCB = pSaveSccb;
+ }
+ else
+ {
+ queueAddSccb(p_Sccb,thisCard);
+ }
+ }
+
+ else {
+
+ MDISABLE_INT(ioport);
+
+ if((((PSCCBcard) pCurrCard)->globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[thisCard][p_Sccb->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ lun = p_Sccb->Lun;
+ else
+ lun = 0;
+ if ((((PSCCBcard) pCurrCard)->currentSCCB == NULL) &&
+ (sccbMgrTbl[thisCard][p_Sccb->TargID].TarSelQ_Cnt == 0) &&
+ (sccbMgrTbl[thisCard][p_Sccb->TargID].TarLUNBusy[lun]
+ == FALSE)) {
+
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ mOS_UnLock((PSCCBcard)pCurrCard);
+#if defined(DOS)
+ ssel((USHORT)p_Sccb->SccbIOPort,thisCard);
+#else
+ ssel(p_Sccb->SccbIOPort,thisCard);
+#endif
+ mOS_Lock((PSCCBcard)pCurrCard);
+ }
+
+ else {
+
+ if(p_Sccb->OperationCode == RESET_COMMAND)
+ {
+ pSaveSccb = ((PSCCBcard) pCurrCard)->currentSCCB;
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ queueSelectFail(&BL_Card[thisCard], thisCard);
+ ((PSCCBcard) pCurrCard)->currentSCCB = pSaveSccb;
+ }
+ else
+ {
+ queueAddSccb(p_Sccb,thisCard);
+ }
+ }
+
+
+ MENABLE_INT(ioport);
+ }
+
+ mOS_UnLock((PSCCBcard)pCurrCard);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_abort_sccb
+ *
+ * Description: Abort the command pointed to by p_Sccb. When the
+ * command is completed it will be returned via the
+ * callback function.
+ *
+ *---------------------------------------------------------------------*/
+#if (FW_TYPE==_UCB_MGR_)
+s32bits SccbMgr_abort_sccb(CARD_HANDLE pCurrCard, PUCB p_ucb)
+#else
+#if defined(DOS)
+int SccbMgr_abort_sccb(USHORT pCurrCard, PSCCB p_Sccb)
+#else
+int SccbMgr_abort_sccb(ULONG pCurrCard, PSCCB p_Sccb)
+#endif
+#endif
+
+{
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+
+ UCHAR thisCard;
+ CALL_BK_FN callback;
+ UCHAR TID;
+ PSCCB pSaveSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+
+#if (FW_TYPE==_UCB_MGR_)
+ PSCCB p_Sccb;
+ p_Sccb=(PSCCB)p_ucb->UCB_MgrPrivatePtr;
+#endif
+
+ ioport = ((PSCCBcard) pCurrCard)->ioPort;
+
+ thisCard = ((PSCCBcard)pCurrCard)->cardIndex;
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+
+ if (RD_HARPOON(ioport+hp_page_ctrl) & G_INT_DISABLE)
+ {
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ }
+
+ else
+ {
+
+ if (queueFindSccb(p_Sccb,thisCard))
+ {
+
+ mOS_UnLock((PSCCBcard)pCurrCard);
+
+ ((PSCCBcard)pCurrCard)->cmdCounter--;
+
+ if (!((PSCCBcard)pCurrCard)->cmdCounter)
+ WR_HARPOON(ioport+hp_semaphore,(RD_HARPOON(ioport+hp_semaphore)
+ & (UCHAR)(~(SCCB_MGR_ACTIVE | TICKLE_ME)) ));
+
+#if (FW_TYPE==_SCCB_MGR_)
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ callback = p_Sccb->SccbCallback;
+ callback(p_Sccb);
+#else
+ p_ucb->UCB_status=SCCB_ABORT;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ callback(p_ucb);
+#endif
+
+ return(0);
+ }
+
+ else
+ {
+ mOS_UnLock((PSCCBcard)pCurrCard);
+
+ if (((PSCCBcard)pCurrCard)->currentSCCB == p_Sccb)
+ {
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ return(0);
+
+ }
+
+ else
+ {
+
+ TID = p_Sccb->TargID;
+
+
+ if(p_Sccb->Sccb_tag)
+ {
+ MDISABLE_INT(ioport);
+ if (((PSCCBcard) pCurrCard)->discQ_Tbl[p_Sccb->Sccb_tag]==p_Sccb)
+ {
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ p_Sccb->Sccb_scsistat = ABORT_ST;
+#if (FW_TYPE==_UCB_MGR_)
+ p_ucb->UCB_status=SCCB_ABORT;
+#endif
+ p_Sccb->Sccb_scsimsg = SMABORT_TAG;
+
+ if(((PSCCBcard) pCurrCard)->currentSCCB == NULL)
+ {
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ ssel(ioport, thisCard);
+ }
+ else
+ {
+ pSaveSCCB = ((PSCCBcard) pCurrCard)->currentSCCB;
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ queueSelectFail((PSCCBcard) pCurrCard, thisCard);
+ ((PSCCBcard) pCurrCard)->currentSCCB = pSaveSCCB;
+ }
+ }
+ MENABLE_INT(ioport);
+ return(0);
+ }
+ else
+ {
+ currTar_Info = &sccbMgrTbl[thisCard][p_Sccb->TargID];
+
+ if(BL_Card[thisCard].discQ_Tbl[currTar_Info->LunDiscQ_Idx[p_Sccb->Lun]]
+ == p_Sccb)
+ {
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ return(0);
+ }
+ }
+ }
+ }
+ }
+ return(-1);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_my_int
+ *
+ * Description: Do a quick check to determine if there is a pending
+ * interrupt for this card and disable the IRQ Pin if so.
+ *
+ *---------------------------------------------------------------------*/
+#if (FW_TYPE==_UCB_MGR_)
+u08bits SccbMgr_my_int(CARD_HANDLE pCurrCard)
+#else
+#if defined(DOS)
+UCHAR SccbMgr_my_int(USHORT pCurrCard)
+#else
+UCHAR SccbMgr_my_int(ULONG pCurrCard)
+#endif
+#endif
+{
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+
+ ioport = ((PSCCBcard)pCurrCard)->ioPort;
+
+ if (RD_HARPOON(ioport+hp_int_status) & INT_ASSERTED)
+ {
+
+#if defined(DOS)
+ MDISABLE_INT(ioport);
+#endif
+
+ return(TRUE);
+ }
+
+ else
+
+ return(FALSE);
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_isr
+ *
+ * Description: This is our entry point when an interrupt is generated
+ * by the card and the upper level driver passes it on to
+ * us.
+ *
+ *---------------------------------------------------------------------*/
+#if (FW_TYPE==_UCB_MGR_)
+s32bits SccbMgr_isr(CARD_HANDLE pCurrCard)
+#else
+#if defined(DOS)
+int SccbMgr_isr(USHORT pCurrCard)
+#else
+int SccbMgr_isr(ULONG pCurrCard)
+#endif
+#endif
+{
+ PSCCB currSCCB;
+ UCHAR thisCard,result,bm_status, bm_int_st;
+ USHORT hp_int;
+ UCHAR i, target;
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+
+ thisCard = ((PSCCBcard)pCurrCard)->cardIndex;
+ ioport = ((PSCCBcard)pCurrCard)->ioPort;
+
+ MDISABLE_INT(ioport);
+
+#if defined(BUGBUG)
+ WR_HARPOON(ioport+hp_user_defined_D, RD_HARPOON(ioport+hp_int_status));
+#endif
+
+ if ((bm_int_st=RD_HARPOON(ioport+hp_int_status)) & EXT_STATUS_ON)
+ bm_status = RD_HARPOON(ioport+hp_ext_status) & (UCHAR)BAD_EXT_STATUS;
+ else
+ bm_status = 0;
+
+ WR_HARPOON(ioport+hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT));
+
+ mOS_UnLock((PSCCBcard)pCurrCard);
+
+ while ((hp_int = RDW_HARPOON((ioport+hp_intstat)) & default_intena) |
+ bm_status)
+ {
+
+ currSCCB = ((PSCCBcard)pCurrCard)->currentSCCB;
+
+#if defined(BUGBUG)
+ Debug_Load(thisCard,(UCHAR) 0XFF);
+ Debug_Load(thisCard,bm_int_st);
+
+ Debug_Load(thisCard,hp_int_0);
+ Debug_Load(thisCard,hp_int_1);
+#endif
+
+
+ if (hp_int & (FIFO | TIMEOUT | RESET | SCAM_SEL) || bm_status) {
+ result = SccbMgr_bad_isr(ioport,thisCard,((PSCCBcard)pCurrCard),hp_int);
+ WRW_HARPOON((ioport+hp_intstat), (FIFO | TIMEOUT | RESET | SCAM_SEL));
+ bm_status = 0;
+
+ if (result) {
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+ MENABLE_INT(ioport);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return(result);
+ }
+ }
+
+
+ else if (hp_int & ICMD_COMP) {
+
+ if ( !(hp_int & BUS_FREE) ) {
+ /* Wait for the BusFree before starting a new command. We
+ must also check for being reselected since the BusFree
+ may not show up if another device reselects us in 1.5us or
+ less. SRR Wednesday, 3/8/1995.
+ */
+ while (!(RDW_HARPOON((ioport+hp_intstat)) & (BUS_FREE | RSEL))) ;
+ }
+
+ if (((PSCCBcard)pCurrCard)->globalFlags & F_HOST_XFER_ACT)
+
+ phaseChkFifo(ioport, thisCard);
+
+/* WRW_HARPOON((ioport+hp_intstat),
+ (BUS_FREE | ICMD_COMP | ITAR_DISC | XFER_CNT_0));
+ */
+
+ WRW_HARPOON((ioport+hp_intstat), CLR_ALL_INT_1);
+
+ autoCmdCmplt(ioport,thisCard);
+
+ }
+
+
+ else if (hp_int & ITAR_DISC)
+ {
+
+ if (((PSCCBcard)pCurrCard)->globalFlags & F_HOST_XFER_ACT) {
+
+ phaseChkFifo(ioport, thisCard);
+
+ }
+
+ if (RD_HARPOON(ioport+hp_gp_reg_1) == SMSAVE_DATA_PTR) {
+
+ WR_HARPOON(ioport+hp_gp_reg_1, 0x00);
+ currSCCB->Sccb_XferState |= F_NO_DATA_YET;
+
+ currSCCB->Sccb_savedATC = currSCCB->Sccb_ATC;
+ }
+
+ currSCCB->Sccb_scsistat = DISCONNECT_ST;
+ queueDisconnect(currSCCB,thisCard);
+
+ /* Wait for the BusFree before starting a new command. We
+ must also check for being reselected since the BusFree
+ may not show up if another device reselects us in 1.5us or
+ less. SRR Wednesday, 3/8/1995.
+ */
+ while (!(RDW_HARPOON((ioport+hp_intstat)) & (BUS_FREE | RSEL)) &&
+ !((RDW_HARPOON((ioport+hp_intstat)) & PHASE) &&
+ RD_HARPOON((ioport+hp_scsisig)) ==
+ (SCSI_BSY | SCSI_REQ | SCSI_CD | SCSI_MSG | SCSI_IOBIT))) ;
+
+ /*
+ The additional loop exit condition above detects a timing problem
+ with the revision D/E harpoon chips. The caller should reset the
+ host adapter to recover when 0xFE is returned.
+ */
+ if (!(RDW_HARPOON((ioport+hp_intstat)) & (BUS_FREE | RSEL)))
+ {
+ mOS_Lock((PSCCBcard)pCurrCard);
+ MENABLE_INT(ioport);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return 0xFE;
+ }
+
+ WRW_HARPOON((ioport+hp_intstat), (BUS_FREE | ITAR_DISC));
+
+
+ ((PSCCBcard)pCurrCard)->globalFlags |= F_NEW_SCCB_CMD;
+
+ }
+
+
+ else if (hp_int & RSEL) {
+
+ WRW_HARPOON((ioport+hp_intstat), (PROG_HLT | RSEL | PHASE | BUS_FREE));
+
+ if (RDW_HARPOON((ioport+hp_intstat)) & ITAR_DISC)
+ {
+ if (((PSCCBcard)pCurrCard)->globalFlags & F_HOST_XFER_ACT)
+ {
+ phaseChkFifo(ioport, thisCard);
+ }
+
+ if (RD_HARPOON(ioport+hp_gp_reg_1) == SMSAVE_DATA_PTR)
+ {
+ WR_HARPOON(ioport+hp_gp_reg_1, 0x00);
+ currSCCB->Sccb_XferState |= F_NO_DATA_YET;
+ currSCCB->Sccb_savedATC = currSCCB->Sccb_ATC;
+ }
+
+ WRW_HARPOON((ioport+hp_intstat), (BUS_FREE | ITAR_DISC));
+ currSCCB->Sccb_scsistat = DISCONNECT_ST;
+ queueDisconnect(currSCCB,thisCard);
+ }
+
+ sres(ioport,thisCard,((PSCCBcard)pCurrCard));
+ phaseDecode(ioport,thisCard);
+
+ }
+
+
+ else if ((hp_int & IDO_STRT) && (!(hp_int & BUS_FREE)))
+ {
+
+ WRW_HARPOON((ioport+hp_intstat), (IDO_STRT | XFER_CNT_0));
+ phaseDecode(ioport,thisCard);
+
+ }
+
+
+ else if ( (hp_int & IUNKWN) || (hp_int & PROG_HLT) )
+ {
+ WRW_HARPOON((ioport+hp_intstat), (PHASE | IUNKWN | PROG_HLT));
+ if ((RD_HARPOON(ioport+hp_prgmcnt_0) & (UCHAR)0x3f)< (UCHAR)SELCHK)
+ {
+ phaseDecode(ioport,thisCard);
+ }
+ else
+ {
+ /* Harpoon problem some SCSI target device respond to selection
+ with short BUSY pulse (<400ns) this will make the Harpoon is not able
+ to latch the correct Target ID into reg. x53.
+ The work around require to correct this reg. But when write to this
+ reg. (0x53) also increment the FIFO write addr reg (0x6f), thus we
+ need to read this reg first then restore it later. After update to 0x53 */
+
+ i = (UCHAR)(RD_HARPOON(ioport+hp_fifowrite));
+ target = (UCHAR)(RD_HARPOON(ioport+hp_gp_reg_3));
+ WR_HARPOON(ioport+hp_xfer_pad, (UCHAR) ID_UNLOCK);
+ WR_HARPOON(ioport+hp_select_id, (UCHAR)(target | target<<4));
+ WR_HARPOON(ioport+hp_xfer_pad, (UCHAR) 0x00);
+ WR_HARPOON(ioport+hp_fifowrite, i);
+ WR_HARPOON(ioport+hp_autostart_3, (AUTO_IMMED+TAG_STRT));
+ }
+ }
+
+ else if (hp_int & XFER_CNT_0) {
+
+ WRW_HARPOON((ioport+hp_intstat), XFER_CNT_0);
+
+ schkdd(ioport,thisCard);
+
+ }
+
+
+ else if (hp_int & BUS_FREE) {
+
+ WRW_HARPOON((ioport+hp_intstat), BUS_FREE);
+
+ if (((PSCCBcard)pCurrCard)->globalFlags & F_HOST_XFER_ACT) {
+
+ hostDataXferAbort(ioport,thisCard,currSCCB);
+ }
+
+ phaseBusFree(ioport,thisCard);
+ }
+
+
+ else if (hp_int & ITICKLE) {
+
+ WRW_HARPOON((ioport+hp_intstat), ITICKLE);
+ ((PSCCBcard)pCurrCard)->globalFlags |= F_NEW_SCCB_CMD;
+ }
+
+
+
+ if (((PSCCBcard)pCurrCard)->globalFlags & F_NEW_SCCB_CMD) {
+
+
+ ((PSCCBcard)pCurrCard)->globalFlags &= ~F_NEW_SCCB_CMD;
+
+
+ if (((PSCCBcard)pCurrCard)->currentSCCB == NULL) {
+
+ queueSearchSelect(((PSCCBcard)pCurrCard),thisCard);
+ }
+
+ if (((PSCCBcard)pCurrCard)->currentSCCB != NULL) {
+ ((PSCCBcard)pCurrCard)->globalFlags &= ~F_NEW_SCCB_CMD;
+ ssel(ioport,thisCard);
+ }
+
+ break;
+
+ }
+
+ } /*end while */
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+ MENABLE_INT(ioport);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+
+ return(0);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Sccb_bad_isr
+ *
+ * Description: Some type of interrupt has occurred which is slightly
+ * out of the ordinary. We will now decode it fully, in
+ * this routine. This is broken up in an attempt to save
+ * processing time.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+UCHAR SccbMgr_bad_isr(USHORT p_port, UCHAR p_card, PSCCBcard pCurrCard, USHORT p_int)
+#else
+UCHAR SccbMgr_bad_isr(ULONG p_port, UCHAR p_card, PSCCBcard pCurrCard, USHORT p_int)
+#endif
+{
+#if defined(HARP_REVX)
+ ULONG timer;
+#endif
+UCHAR temp, ScamFlg;
+PSCCBMgr_tar_info currTar_Info;
+PNVRamInfo pCurrNvRam;
+
+
+ if (RD_HARPOON(p_port+hp_ext_status) &
+ (BM_FORCE_OFF | PCI_DEV_TMOUT | BM_PARITY_ERR | PIO_OVERRUN) )
+ {
+
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+ {
+
+ hostDataXferAbort(p_port,p_card, pCurrCard->currentSCCB);
+ }
+
+ if (RD_HARPOON(p_port+hp_pci_stat_cfg) & REC_MASTER_ABORT)
+
+ {
+ WR_HARPOON(p_port+hp_pci_stat_cfg,
+ (RD_HARPOON(p_port+hp_pci_stat_cfg) & ~REC_MASTER_ABORT));
+
+ WR_HARPOON(p_port+hp_host_blk_cnt, 0x00);
+
+ }
+
+ if (pCurrCard->currentSCCB != NULL)
+ {
+
+ if (!pCurrCard->currentSCCB->HostStatus)
+ pCurrCard->currentSCCB->HostStatus = SCCB_BM_ERR;
+
+ sxfrp(p_port,p_card);
+
+ temp = (UCHAR)(RD_HARPOON(p_port+hp_ee_ctrl) &
+ (EXT_ARB_ACK | SCSI_TERM_ENA_H));
+ WR_HARPOON(p_port+hp_ee_ctrl, ((UCHAR)temp | SEE_MS | SEE_CS));
+ WR_HARPOON(p_port+hp_ee_ctrl, temp);
+
+ if (!(RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | RESET)))
+ {
+ phaseDecode(p_port,p_card);
+ }
+ }
+ }
+
+
+ else if (p_int & RESET)
+ {
+
+ WR_HARPOON(p_port+hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(p_port+hp_sys_ctrl, 0x00);
+ if (pCurrCard->currentSCCB != NULL) {
+
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+
+ hostDataXferAbort(p_port,p_card, pCurrCard->currentSCCB);
+ }
+
+
+ DISABLE_AUTO(p_port);
+
+ sresb(p_port,p_card);
+
+ while(RD_HARPOON(p_port+hp_scsictrl_0) & SCSI_RST) {}
+
+ pCurrNvRam = pCurrCard->pNvRamInfo;
+ if(pCurrNvRam){
+ ScamFlg = pCurrNvRam->niScamConf;
+ }
+ else{
+ ScamFlg = (UCHAR) utilEERead(p_port, SCAM_CONFIG/2);
+ }
+
+ XbowInit(p_port, ScamFlg);
+
+ scini(p_card, pCurrCard->ourId, 0);
+
+ return(0xFF);
+ }
+
+
+ else if (p_int & FIFO) {
+
+ WRW_HARPOON((p_port+hp_intstat), FIFO);
+
+#if defined(HARP_REVX)
+
+ for (timer=0x00FFFFFFL; timer != 0x00000000L; timer--) {
+
+ if (RD_HARPOON(p_port+hp_xferstat) & FIFO_EMPTY)
+ break;
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & BUS_FREE)
+ break;
+ }
+
+
+ if ( (RD_HARPOON(p_port+hp_xferstat) & FIFO_EMPTY) &&
+ (RD_HARPOON(p_port+hp_fiforead) !=
+ RD_HARPOON(p_port+hp_fifowrite)) &&
+ (RD_HARPOON(p_port+hp_xfercnt_0))
+ )
+
+ WR_HARPOON((p_port+hp_xferstat), 0x01);
+
+/* else
+ */
+/* sxfrp(p_port,p_card);
+ */
+#else
+ if (pCurrCard->currentSCCB != NULL)
+ sxfrp(p_port,p_card);
+#endif
+ }
+
+ else if (p_int & TIMEOUT)
+ {
+
+ DISABLE_AUTO(p_port);
+
+ WRW_HARPOON((p_port+hp_intstat),
+ (PROG_HLT | TIMEOUT | SEL |BUS_FREE | PHASE | IUNKWN));
+
+ pCurrCard->currentSCCB->HostStatus = SCCB_SELECTION_TIMEOUT;
+
+
+ currTar_Info = &sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID];
+ if((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ currTar_Info->TarLUNBusy[pCurrCard->currentSCCB->Lun] = FALSE;
+ else
+ currTar_Info->TarLUNBusy[0] = FALSE;
+
+
+ if (currTar_Info->TarEEValue & EE_SYNC_MASK)
+ {
+ currTar_Info->TarSyncCtrl = 0;
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ }
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+ sssyncv(p_port, pCurrCard->currentSCCB->TargID, NARROW_SCSI,currTar_Info);
+
+ queueCmdComplete(pCurrCard, pCurrCard->currentSCCB, p_card);
+
+ }
+
+#if defined(SCAM_LEV_2)
+
+ else if (p_int & SCAM_SEL)
+ {
+
+ scarb(p_port,LEVEL2_TAR);
+ scsel(p_port);
+ scasid(p_card, p_port);
+
+ scbusf(p_port);
+
+ WRW_HARPOON((p_port+hp_intstat), SCAM_SEL);
+ }
+#endif
+
+ return(0x00);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_scsi_reset
+ *
+ * Description: A SCSI bus reset will be generated and all outstanding
+ * Sccbs will be returned via the callback.
+ *
+ *---------------------------------------------------------------------*/
+#if (FW_TYPE==_UCB_MGR_)
+void SccbMgr_scsi_reset(CARD_HANDLE pCurrCard)
+#else
+#if defined(DOS)
+void SccbMgr_scsi_reset(USHORT pCurrCard)
+#else
+void SccbMgr_scsi_reset(ULONG pCurrCard)
+#endif
+#endif
+{
+ UCHAR thisCard;
+
+ thisCard = ((PSCCBcard)pCurrCard)->cardIndex;
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+
+ if (((PSCCBcard) pCurrCard)->globalFlags & F_GREEN_PC)
+ {
+ WR_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_sys_ctrl, 0x00);
+ }
+
+ sresb(((PSCCBcard)pCurrCard)->ioPort,thisCard);
+
+ if (RD_HARPOON(((PSCCBcard)pCurrCard)->ioPort+hp_ext_status) & BM_CMD_BUSY)
+ {
+ WR_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_page_ctrl,
+ (RD_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_page_ctrl)
+ & ~SCATTER_EN));
+
+ WR_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_sg_addr,0x00);
+
+ ((PSCCBcard) pCurrCard)->globalFlags &= ~F_HOST_XFER_ACT;
+ busMstrTimeOut(((PSCCBcard) pCurrCard)->ioPort);
+
+ WR_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_int_mask,
+ (INT_CMD_COMPL | SCSI_INTERRUPT));
+ }
+
+/*
+ if (utilEERead(((PSCCBcard)pCurrCard)->ioPort, (SCAM_CONFIG/2))
+ & SCAM_ENABLED)
+*/
+ scini(thisCard, ((PSCCBcard)pCurrCard)->ourId, 0);
+
+#if (FW_TYPE==_UCB_MGR_)
+ ((PSCCBcard)pCurrCard)->cardInfo->ai_AEN_routine(0x01,pCurrCard,0,0,0,0);
+#endif
+
+ mOS_UnLock((PSCCBcard)pCurrCard);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_timer_expired
+ *
+ * Description: This function allow me to kill my own job that has not
+ * yet completed, and has cause a timeout to occur. This
+ * timeout has caused the upper level driver to call this
+ * function.
+ *
+ *---------------------------------------------------------------------*/
+
+#if (FW_TYPE==_UCB_MGR_)
+void SccbMgr_timer_expired(CARD_HANDLE pCurrCard)
+#else
+#if defined(DOS)
+void SccbMgr_timer_expired(USHORT pCurrCard)
+#else
+void SccbMgr_timer_expired(ULONG pCurrCard)
+#endif
+#endif
+{
+}
+
+#if defined(DOS)
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_status
+ *
+ * Description: This function returns the number of outstanding SCCB's.
+ * This is specific to the DOS enviroment, which needs this
+ * to help them keep protected and real mode commands staight.
+ *
+ *---------------------------------------------------------------------*/
+
+USHORT SccbMgr_status(USHORT pCurrCard)
+{
+ return(BL_Card[pCurrCard].cmdCounter);
+}
+#endif
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgrTableInit
+ *
+ * Description: Initialize all Sccb manager data structures.
+ *
+ *---------------------------------------------------------------------*/
+
+void SccbMgrTableInitAll()
+{
+ UCHAR thisCard;
+
+ for (thisCard = 0; thisCard < MAX_CARDS; thisCard++)
+ {
+ SccbMgrTableInitCard(&BL_Card[thisCard],thisCard);
+
+ BL_Card[thisCard].ioPort = 0x00;
+ BL_Card[thisCard].cardInfo = NULL;
+ BL_Card[thisCard].cardIndex = 0xFF;
+ BL_Card[thisCard].ourId = 0x00;
+ BL_Card[thisCard].pNvRamInfo = NULL;
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgrTableInit
+ *
+ * Description: Initialize all Sccb manager data structures.
+ *
+ *---------------------------------------------------------------------*/
+
+void SccbMgrTableInitCard(PSCCBcard pCurrCard, UCHAR p_card)
+{
+ UCHAR scsiID, qtag;
+
+ for (qtag = 0; qtag < QUEUE_DEPTH; qtag++)
+ {
+ BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ }
+
+ for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++)
+ {
+ sccbMgrTbl[p_card][scsiID].TarStatus = 0;
+ sccbMgrTbl[p_card][scsiID].TarEEValue = 0;
+ SccbMgrTableInitTarget(p_card, scsiID);
+ }
+
+ pCurrCard->scanIndex = 0x00;
+ pCurrCard->currentSCCB = NULL;
+ pCurrCard->globalFlags = 0x00;
+ pCurrCard->cmdCounter = 0x00;
+ pCurrCard->tagQ_Lst = 0x01;
+ pCurrCard->discQCount = 0;
+
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgrTableInit
+ *
+ * Description: Initialize all Sccb manager data structures.
+ *
+ *---------------------------------------------------------------------*/
+
+void SccbMgrTableInitTarget(UCHAR p_card, UCHAR target)
+{
+
+ UCHAR lun, qtag;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currTar_Info = &sccbMgrTbl[p_card][target];
+
+ currTar_Info->TarSelQ_Cnt = 0;
+ currTar_Info->TarSyncCtrl = 0;
+
+ currTar_Info->TarSelQ_Head = NULL;
+ currTar_Info->TarSelQ_Tail = NULL;
+ currTar_Info->TarTagQ_Cnt = 0;
+ currTar_Info->TarLUN_CA = FALSE;
+
+
+ for (lun = 0; lun < MAX_LUN; lun++)
+ {
+ currTar_Info->TarLUNBusy[lun] = FALSE;
+ currTar_Info->LunDiscQ_Idx[lun] = 0;
+ }
+
+ for (qtag = 0; qtag < QUEUE_DEPTH; qtag++)
+ {
+ if(BL_Card[p_card].discQ_Tbl[qtag] != NULL)
+ {
+ if(BL_Card[p_card].discQ_Tbl[qtag]->TargID == target)
+ {
+ BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ BL_Card[p_card].discQCount--;
+ }
+ }
+ }
+}
+
+#if defined(BUGBUG)
+
+/*****************************************************************
+ * Save the current byte in the debug array
+ *****************************************************************/
+
+
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data)
+{
+ debug_int[p_card][debug_index[p_card]] = p_bug_data;
+ debug_index[p_card]++;
+
+ if (debug_index[p_card] == debug_size)
+
+ debug_index[p_card] = 0;
+}
+
+#endif
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: sccb_dat.c $
+ *
+ * Description: Functions relating to handling of the SCCB interface
+ * between the device driver and the HARPOON.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <harpoon.h>*/
+
+/*
+** IMPORTANT NOTE!!!
+**
+** You MUST preassign all data to a valid value or zero. This is
+** required due to the MS compiler bug under OS/2 and Solaris Real-Mode
+** driver environment.
+*/
+
+
+SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR] = { { { 0 } } };
+SCCBCARD BL_Card[MAX_CARDS] = { { 0 } };
+SCCBSCAM_INFO scamInfo[MAX_SCSI_TAR] = { { { 0 } } };
+NVRAMINFO nvRamInfo[MAX_MB_CARDS] = { { 0 } };
+
+
+#if defined(OS2)
+void (far *s_PhaseTbl[8]) (ULONG, UCHAR) = { 0 };
+UCHAR temp_id_string[ID_STRING_LENGTH] = { 0 };
+#elif defined(SOLARIS_REAL_MODE) || defined(__STDC__)
+void (*s_PhaseTbl[8]) (ULONG, UCHAR) = { 0 };
+#else
+void (*s_PhaseTbl[8]) ();
+#endif
+
+#if defined(DOS)
+UCHAR first_time = 0;
+#endif
+
+UCHAR mbCards = 0;
+UCHAR scamHAString[] = {0x63, 0x07, 'B', 'U', 'S', 'L', 'O', 'G', 'I', 'C', \
+ ' ', 'B', 'T', '-', '9', '3', '0', \
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, \
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20};
+
+USHORT default_intena = 0;
+
+#if defined(BUGBUG)
+UCHAR debug_int[MAX_CARDS][debug_size] = { 0 };
+UCHAR debug_index[MAX_CARDS] = { 0 };
+UCHAR reserved_1[3] = { 0 };
+#endif
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: scsi.c $
+ *
+ * Description: Functions for handling SCSI bus functions such as
+ * selection/reselection, sync negotiation, message-in
+ * decoding.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <eeprom.h>*/
+/*#include <harpoon.h>*/
+
+
+/*
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+#if defined(BUGBUG)
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
+#endif
+*/
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sfetm
+ *
+ * Description: Read in a message byte from the SCSI bus, and check
+ * for a parity error.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR sfm(USHORT port, PSCCB pCurrSCCB)
+#else
+UCHAR sfm(ULONG port, PSCCB pCurrSCCB)
+#endif
+{
+ UCHAR message;
+ USHORT TimeOutLoop;
+
+ TimeOutLoop = 0;
+ while( (!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ)) &&
+ (TimeOutLoop++ < 20000) ){}
+
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+
+ message = RD_HARPOON(port+hp_scsidata_0);
+
+ WR_HARPOON(port+hp_scsisig, SCSI_ACK + S_MSGI_PH);
+
+
+ if (TimeOutLoop > 20000)
+ message = 0x00; /* force message byte = 0 if Time Out on Req */
+
+ if ((RDW_HARPOON((port+hp_intstat)) & PARITY) &&
+ (RD_HARPOON(port+hp_addstat) & SCSI_PAR_ERR))
+ {
+ WR_HARPOON(port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+ WR_HARPOON(port+hp_xferstat, 0);
+ WR_HARPOON(port+hp_fiforead, 0);
+ WR_HARPOON(port+hp_fifowrite, 0);
+ if (pCurrSCCB != NULL)
+ {
+ pCurrSCCB->Sccb_scsimsg = SMPARITY;
+ }
+ message = 0x00;
+ do
+ {
+ ACCEPT_MSG_ATN(port);
+ TimeOutLoop = 0;
+ while( (!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ)) &&
+ (TimeOutLoop++ < 20000) ){}
+ if (TimeOutLoop > 20000)
+ {
+ WRW_HARPOON((port+hp_intstat), PARITY);
+ return(message);
+ }
+ if ((RD_HARPOON(port+hp_scsisig) & S_SCSI_PHZ) != S_MSGI_PH)
+ {
+ WRW_HARPOON((port+hp_intstat), PARITY);
+ return(message);
+ }
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+
+ RD_HARPOON(port+hp_scsidata_0);
+
+ WR_HARPOON(port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+
+ }while(1);
+
+ }
+ WR_HARPOON(port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+ WR_HARPOON(port+hp_xferstat, 0);
+ WR_HARPOON(port+hp_fiforead, 0);
+ WR_HARPOON(port+hp_fifowrite, 0);
+ return(message);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: ssel
+ *
+ * Description: Load up automation and select target device.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void ssel(USHORT port, UCHAR p_card)
+#else
+void ssel(ULONG port, UCHAR p_card)
+#endif
+{
+
+#if defined(DOS)
+ UCHAR auto_loaded, i, target, *theCCB;
+#elif defined(OS2)
+ UCHAR auto_loaded, i, target;
+ UCHAR far *theCCB;
+#else
+ UCHAR auto_loaded, i, target, *theCCB;
+#endif
+
+#if defined(DOS)
+ USHORT cdb_reg;
+#else
+ ULONG cdb_reg;
+#endif
+ PSCCBcard CurrCard;
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+ UCHAR lastTag, lun;
+
+ CurrCard = &BL_Card[p_card];
+ currSCCB = CurrCard->currentSCCB;
+ target = currSCCB->TargID;
+ currTar_Info = &sccbMgrTbl[p_card][target];
+ lastTag = CurrCard->tagQ_Lst;
+
+ ARAM_ACCESS(port);
+
+
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT)
+ currSCCB->ControlByte &= ~F_USE_CMD_Q;
+
+ if(((CurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+
+ lun = currSCCB->Lun;
+ else
+ lun = 0;
+
+
+#if defined(DOS)
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+
+#else
+
+ if (CurrCard->globalFlags & F_TAG_STARTED)
+ {
+ if (!(currSCCB->ControlByte & F_USE_CMD_Q))
+ {
+ if ((currTar_Info->TarLUN_CA == FALSE)
+ && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK)
+ == TAG_Q_TRYING))
+ {
+
+ if (currTar_Info->TarTagQ_Cnt !=0)
+ {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ queueSelectFail(CurrCard,p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+
+ else {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ }
+
+ } /*End non-tagged */
+
+ else {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ }
+
+ } /*!Use cmd Q Tagged */
+
+ else {
+ if (currTar_Info->TarLUN_CA == TRUE)
+ {
+ queueSelectFail(CurrCard,p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+
+ } /*else use cmd Q tagged */
+
+ } /*if glob tagged started */
+
+ else {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ }
+
+#endif /* DOS */
+
+
+
+ if((((CurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ || (!(currSCCB->ControlByte & F_USE_CMD_Q))))
+ {
+ if(CurrCard->discQCount >= QUEUE_DEPTH)
+ {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ queueSelectFail(CurrCard,p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+ for (i = 1; i < QUEUE_DEPTH; i++)
+ {
+ if (++lastTag >= QUEUE_DEPTH) lastTag = 1;
+ if (CurrCard->discQ_Tbl[lastTag] == NULL)
+ {
+ CurrCard->tagQ_Lst = lastTag;
+ currTar_Info->LunDiscQ_Idx[lun] = lastTag;
+ CurrCard->discQ_Tbl[lastTag] = currSCCB;
+ CurrCard->discQCount++;
+ break;
+ }
+ }
+ if(i == QUEUE_DEPTH)
+ {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ queueSelectFail(CurrCard,p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+ }
+
+
+
+ auto_loaded = FALSE;
+
+ WR_HARPOON(port+hp_select_id, target);
+ WR_HARPOON(port+hp_gp_reg_3, target); /* Use by new automation logic */
+
+ if (currSCCB->OperationCode == RESET_COMMAND) {
+ WRW_HARPOON((port+ID_MSG_STRT), (MPM_OP+AMSG_OUT+
+ (currSCCB->Sccb_idmsg & ~DISC_PRIV)));
+
+ WRW_HARPOON((port+ID_MSG_STRT+2),BRH_OP+ALWAYS+NP);
+
+ currSCCB->Sccb_scsimsg = SMDEV_RESET;
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+ auto_loaded = TRUE;
+ currSCCB->Sccb_scsistat = SELECT_BDR_ST;
+
+ if (currTar_Info->TarEEValue & EE_SYNC_MASK)
+ {
+ currTar_Info->TarSyncCtrl = 0;
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ }
+
+#if defined(WIDE_SCSI)
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ }
+#endif
+
+ sssyncv(port, target, NARROW_SCSI,currTar_Info);
+ SccbMgrTableInitTarget(p_card, target);
+
+ }
+
+ else if(currSCCB->Sccb_scsistat == ABORT_ST)
+ {
+ WRW_HARPOON((port+ID_MSG_STRT), (MPM_OP+AMSG_OUT+
+ (currSCCB->Sccb_idmsg & ~DISC_PRIV)));
+
+ WRW_HARPOON((port+ID_MSG_STRT+2),BRH_OP+ALWAYS+CMDPZ);
+
+ WRW_HARPOON((port+SYNC_MSGS+0), (MPM_OP+AMSG_OUT+
+ (((UCHAR)(currSCCB->ControlByte & TAG_TYPE_MASK)
+ >> 6) | (UCHAR)0x20)));
+ WRW_HARPOON((port+SYNC_MSGS+2),
+ (MPM_OP+AMSG_OUT+currSCCB->Sccb_tag));
+ WRW_HARPOON((port+SYNC_MSGS+4), (BRH_OP+ALWAYS+NP ));
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+ auto_loaded = TRUE;
+
+ }
+
+#if defined(WIDE_SCSI)
+
+
+ else if (!(currTar_Info->TarStatus & WIDE_NEGOCIATED)) {
+ auto_loaded = siwidn(port,p_card);
+ currSCCB->Sccb_scsistat = SELECT_WN_ST;
+ }
+
+#endif
+
+
+ else if (!((currTar_Info->TarStatus & TAR_SYNC_MASK)
+ == SYNC_SUPPORTED)) {
+ auto_loaded = sisyncn(port,p_card, FALSE);
+ currSCCB->Sccb_scsistat = SELECT_SN_ST;
+ }
+
+
+ if (!auto_loaded)
+ {
+
+#if !defined(DOS)
+ if (currSCCB->ControlByte & F_USE_CMD_Q)
+ {
+
+ CurrCard->globalFlags |= F_TAG_STARTED;
+
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK)
+ == TAG_Q_REJECT)
+ {
+ currSCCB->ControlByte &= ~F_USE_CMD_Q;
+
+ /* Fix up the start instruction with a jump to
+ Non-Tag-CMD handling */
+ WRW_HARPOON((port+ID_MSG_STRT),BRH_OP+ALWAYS+NTCMD);
+
+ WRW_HARPOON((port+NON_TAG_ID_MSG),
+ (MPM_OP+AMSG_OUT+currSCCB->Sccb_idmsg));
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+
+ /* Setup our STATE so we know what happend when
+ the wheels fall off. */
+ currSCCB->Sccb_scsistat = SELECT_ST;
+
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ }
+
+ else
+ {
+ WRW_HARPOON((port+ID_MSG_STRT), (MPM_OP+AMSG_OUT+currSCCB->Sccb_idmsg));
+
+ WRW_HARPOON((port+ID_MSG_STRT+2), (MPM_OP+AMSG_OUT+
+ (((UCHAR)(currSCCB->ControlByte & TAG_TYPE_MASK)
+ >> 6) | (UCHAR)0x20)));
+
+ for (i = 1; i < QUEUE_DEPTH; i++)
+ {
+ if (++lastTag >= QUEUE_DEPTH) lastTag = 1;
+ if (CurrCard->discQ_Tbl[lastTag] == NULL)
+ {
+ WRW_HARPOON((port+ID_MSG_STRT+6),
+ (MPM_OP+AMSG_OUT+lastTag));
+ CurrCard->tagQ_Lst = lastTag;
+ currSCCB->Sccb_tag = lastTag;
+ CurrCard->discQ_Tbl[lastTag] = currSCCB;
+ CurrCard->discQCount++;
+ break;
+ }
+ }
+
+
+ if ( i == QUEUE_DEPTH )
+ {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ queueSelectFail(CurrCard,p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+
+ currSCCB->Sccb_scsistat = SELECT_Q_ST;
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+ }
+ }
+
+ else
+ {
+#endif /* !DOS */
+
+ WRW_HARPOON((port+ID_MSG_STRT),BRH_OP+ALWAYS+NTCMD);
+
+ WRW_HARPOON((port+NON_TAG_ID_MSG),
+ (MPM_OP+AMSG_OUT+currSCCB->Sccb_idmsg));
+
+ currSCCB->Sccb_scsistat = SELECT_ST;
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+#if !defined(DOS)
+ }
+#endif
+
+
+#if defined(OS2)
+ theCCB = (UCHAR far *)&currSCCB->Cdb[0];
+#else
+ theCCB = (UCHAR *)&currSCCB->Cdb[0];
+#endif
+
+ cdb_reg = port + CMD_STRT;
+
+ for (i=0; i < currSCCB->CdbLength; i++)
+ {
+ WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + *theCCB));
+ cdb_reg +=2;
+ theCCB++;
+ }
+
+ if (currSCCB->CdbLength != TWELVE_BYTE_CMD)
+ WRW_HARPOON(cdb_reg, (BRH_OP+ALWAYS+ NP));
+
+ } /* auto_loaded */
+
+#if defined(WIDE_SCSI)
+ WRW_HARPOON((port+hp_fiforead), (USHORT) 0x00);
+ WR_HARPOON(port+hp_xferstat, 0x00);
+#endif
+
+ WRW_HARPOON((port+hp_intstat), (PROG_HLT | TIMEOUT | SEL | BUS_FREE));
+
+ WR_HARPOON(port+hp_portctrl_0,(SCSI_PORT));
+
+
+ if (!(currSCCB->Sccb_MGRFlags & F_DEV_SELECTED))
+ {
+ WR_HARPOON(port+hp_scsictrl_0, (SEL_TAR | ENA_ATN | ENA_RESEL | ENA_SCAM_SEL));
+ }
+ else
+ {
+
+/* auto_loaded = (RD_HARPOON(port+hp_autostart_3) & (UCHAR)0x1F);
+ auto_loaded |= AUTO_IMMED; */
+ auto_loaded = AUTO_IMMED;
+
+ DISABLE_AUTO(port);
+
+ WR_HARPOON(port+hp_autostart_3, auto_loaded);
+ }
+
+ SGRAM_ACCESS(port);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sres
+ *
+ * Description: Hookup the correct CCB and handle the incoming messages.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void sres(USHORT port, UCHAR p_card, PSCCBcard pCurrCard)
+#else
+void sres(ULONG port, UCHAR p_card, PSCCBcard pCurrCard)
+#endif
+{
+
+#if defined(V302)
+#ifdef DOS
+ UCHAR our_target,message, msgRetryCount;
+ extern UCHAR lun, tag;
+#else
+ UCHAR our_target,message,lun,tag, msgRetryCount;
+#endif
+
+#else /* V302 */
+ UCHAR our_target, message, lun = 0, tag, msgRetryCount;
+#endif /* V302 */
+
+
+ PSCCBMgr_tar_info currTar_Info;
+ PSCCB currSCCB;
+
+
+
+
+ if(pCurrCard->currentSCCB != NULL)
+ {
+ currTar_Info = &sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID];
+ DISABLE_AUTO(port);
+
+
+ WR_HARPOON((port+hp_scsictrl_0),(ENA_RESEL | ENA_SCAM_SEL));
+
+
+ currSCCB = pCurrCard->currentSCCB;
+ if(currSCCB->Sccb_scsistat == SELECT_WN_ST)
+ {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ currSCCB->Sccb_scsistat = BUS_FREE_ST;
+ }
+ if(currSCCB->Sccb_scsistat == SELECT_SN_ST)
+ {
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ currSCCB->Sccb_scsistat = BUS_FREE_ST;
+ }
+ if(((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ currTar_Info->TarLUNBusy[currSCCB->Lun] = FALSE;
+ if(currSCCB->Sccb_scsistat != ABORT_ST)
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[currSCCB->Lun]]
+ = NULL;
+ }
+ }
+ else
+ {
+ currTar_Info->TarLUNBusy[0] = FALSE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(currSCCB->Sccb_scsistat != ABORT_ST)
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }
+ }else
+ {
+ if(currSCCB->Sccb_scsistat != ABORT_ST)
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ }
+
+ queueSelectFail(&BL_Card[p_card],p_card);
+ }
+
+#if defined(WIDE_SCSI)
+ WRW_HARPOON((port+hp_fiforead), (USHORT) 0x00);
+#endif
+
+
+ our_target = (UCHAR)(RD_HARPOON(port+hp_select_id) >> 4);
+ currTar_Info = &sccbMgrTbl[p_card][our_target];
+
+
+ msgRetryCount = 0;
+ do
+ {
+
+#if defined(V302)
+
+ message = GetTarLun(port, p_card, our_target, pCurrCard, &tag, &lun);
+
+#else /* V302 */
+
+ currTar_Info = &sccbMgrTbl[p_card][our_target];
+ tag = 0;
+
+
+ while(!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ))
+ {
+ if (! (RD_HARPOON(port+hp_scsisig) & SCSI_BSY))
+ {
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ return;
+ }
+ }
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ if ((RD_HARPOON(port+hp_scsisig) & S_SCSI_PHZ) == S_MSGI_PH)
+ {
+
+ message = sfm(port,pCurrCard->currentSCCB);
+ if (message)
+ {
+
+ if (message <= (0x80 | LUN_MASK))
+ {
+ lun = message & (UCHAR)LUN_MASK;
+
+#if !defined(DOS)
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING)
+ {
+ if (currTar_Info->TarTagQ_Cnt != 0)
+ {
+
+ if (!(currTar_Info->TarLUN_CA))
+ {
+ ACCEPT_MSG(port); /*Release the ACK for ID msg. */
+
+
+ message = sfm(port,pCurrCard->currentSCCB);
+ if (message)
+ {
+ ACCEPT_MSG(port);
+ }
+
+ else
+ message = FALSE;
+
+ if(message != FALSE)
+ {
+ tag = sfm(port,pCurrCard->currentSCCB);
+
+ if (!(tag))
+ message = FALSE;
+ }
+
+ } /*C.A. exists! */
+
+ } /*End Q cnt != 0 */
+
+ } /*End Tag cmds supported! */
+#endif /* !DOS */
+
+ } /*End valid ID message. */
+
+ else
+ {
+
+ ACCEPT_MSG_ATN(port);
+ }
+
+ } /* End good id message. */
+
+ else
+ {
+
+ message = FALSE;
+ }
+ }
+ else
+ {
+ ACCEPT_MSG_ATN(port);
+
+ while (!(RDW_HARPOON((port+hp_intstat)) & (PHASE | RESET)) &&
+ !(RD_HARPOON(port+hp_scsisig) & SCSI_REQ) &&
+ (RD_HARPOON(port+hp_scsisig) & SCSI_BSY)) ;
+
+ return;
+ }
+
+#endif /* V302 */
+
+ if(message == FALSE)
+ {
+ msgRetryCount++;
+ if(msgRetryCount == 1)
+ {
+ SendMsg(port, SMPARITY);
+ }
+ else
+ {
+ SendMsg(port, SMDEV_RESET);
+
+ sssyncv(port, our_target, NARROW_SCSI,currTar_Info);
+
+ if (sccbMgrTbl[p_card][our_target].TarEEValue & EE_SYNC_MASK)
+ {
+
+ sccbMgrTbl[p_card][our_target].TarStatus &= ~TAR_SYNC_MASK;
+
+ }
+
+ if (sccbMgrTbl[p_card][our_target].TarEEValue & EE_WIDE_SCSI)
+ {
+
+ sccbMgrTbl[p_card][our_target].TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+
+ queueFlushTargSccb(p_card, our_target, SCCB_COMPLETE);
+ SccbMgrTableInitTarget(p_card,our_target);
+ return;
+ }
+ }
+ }while(message == FALSE);
+
+
+
+ if(((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ pCurrCard->currentSCCB = pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[lun]];
+ if(pCurrCard->currentSCCB != NULL)
+ {
+ ACCEPT_MSG(port);
+ }
+ else
+ {
+ ACCEPT_MSG_ATN(port);
+ }
+ }
+ else
+ {
+ currTar_Info->TarLUNBusy[0] = TRUE;
+
+
+ if (tag)
+ {
+ if (pCurrCard->discQ_Tbl[tag] != NULL)
+ {
+ pCurrCard->currentSCCB = pCurrCard->discQ_Tbl[tag];
+ currTar_Info->TarTagQ_Cnt--;
+ ACCEPT_MSG(port);
+ }
+ else
+ {
+ ACCEPT_MSG_ATN(port);
+ }
+ }else
+ {
+ pCurrCard->currentSCCB = pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]];
+ if(pCurrCard->currentSCCB != NULL)
+ {
+ ACCEPT_MSG(port);
+ }
+ else
+ {
+ ACCEPT_MSG_ATN(port);
+ }
+ }
+ }
+
+ if(pCurrCard->currentSCCB != NULL)
+ {
+ if(pCurrCard->currentSCCB->Sccb_scsistat == ABORT_ST)
+ {
+ /* During Abort Tag command, the target could have got re-selected
+ and completed the command. Check the select Q and remove the CCB
+ if it is in the Select Q */
+ queueFindSccb(pCurrCard->currentSCCB, p_card);
+ }
+ }
+
+
+ while (!(RDW_HARPOON((port+hp_intstat)) & (PHASE | RESET)) &&
+ !(RD_HARPOON(port+hp_scsisig) & SCSI_REQ) &&
+ (RD_HARPOON(port+hp_scsisig) & SCSI_BSY)) ;
+}
+
+#if defined(V302)
+
+#if defined(DOS)
+UCHAR GetTarLun(USHORT port, UCHAR p_card, UCHAR our_target, PSCCBcard pCurrCard, PUCHAR tag, PUCHAR lun)
+#else
+UCHAR GetTarLun(ULONG port, UCHAR p_card, UCHAR our_target, PSCCBcard pCurrCard, PUCHAR tag, PUCHAR lun)
+#endif
+{
+ UCHAR message;
+ PSCCBMgr_tar_info currTar_Info;
+
+
+ currTar_Info = &sccbMgrTbl[p_card][our_target];
+ *tag = 0;
+
+
+ while(!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ))
+ {
+ if (! (RD_HARPOON(port+hp_scsisig) & SCSI_BSY))
+ {
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ return(TRUE);
+ }
+ }
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ if ((RD_HARPOON(port+hp_scsisig) & S_SCSI_PHZ) == S_MSGI_PH)
+ {
+
+ message = sfm(port,pCurrCard->currentSCCB);
+ if (message)
+ {
+
+ if (message <= (0x80 | LUN_MASK))
+ {
+ *lun = message & (UCHAR)LUN_MASK;
+
+#if !defined(DOS)
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING)
+ {
+ if (currTar_Info->TarTagQ_Cnt != 0)
+ {
+
+ if (!(currTar_Info->TarLUN_CA))
+ {
+ ACCEPT_MSG(port); /*Release the ACK for ID msg. */
+
+
+ message = sfm(port,pCurrCard->currentSCCB);
+ if (message)
+ {
+ ACCEPT_MSG(port);
+ }
+
+ else
+ return(FALSE);
+
+ *tag = sfm(port,pCurrCard->currentSCCB);
+
+ if (!(*tag)) return(FALSE);
+
+ } /*C.A. exists! */
+
+ } /*End Q cnt != 0 */
+
+ } /*End Tag cmds supported! */
+#endif /* !DOS */
+
+ } /*End valid ID message. */
+
+ else
+ {
+
+ ACCEPT_MSG_ATN(port);
+ }
+
+ } /* End good id message. */
+
+ else
+ {
+
+ return(FALSE);
+ }
+ }
+ else
+ {
+ ACCEPT_MSG_ATN(port);
+ return(TRUE);
+ }
+ return(TRUE);
+}
+
+#endif /* V302 */
+
+#if defined(DOS)
+void SendMsg(USHORT port, UCHAR message)
+#else
+void SendMsg(ULONG port, UCHAR message)
+#endif
+{
+ while(!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ))
+ {
+ if (! (RD_HARPOON(port+hp_scsisig) & SCSI_BSY))
+ {
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ return;
+ }
+ }
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ if ((RD_HARPOON(port+hp_scsisig) & S_SCSI_PHZ) == S_MSGO_PH)
+ {
+ WRW_HARPOON((port+hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0));
+
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_BUS_EN);
+
+ WR_HARPOON(port+hp_scsidata_0,message);
+
+ WR_HARPOON(port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+
+ ACCEPT_MSG(port);
+
+ WR_HARPOON(port+hp_portctrl_0, 0x00);
+
+ if ((message == SMABORT) || (message == SMDEV_RESET) ||
+ (message == SMABORT_TAG) )
+ {
+ while(!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | PHASE))) {}
+
+ if (RDW_HARPOON((port+hp_intstat)) & BUS_FREE)
+ {
+ WRW_HARPOON((port+hp_intstat), BUS_FREE);
+ }
+ }
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sdecm
+ *
+ * Description: Determine the proper responce to the message from the
+ * target device.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void sdecm(UCHAR message, USHORT port, UCHAR p_card)
+#else
+void sdecm(UCHAR message, ULONG port, UCHAR p_card)
+#endif
+{
+ PSCCB currSCCB;
+ PSCCBcard CurrCard;
+ PSCCBMgr_tar_info currTar_Info;
+
+ CurrCard = &BL_Card[p_card];
+ currSCCB = CurrCard->currentSCCB;
+
+ currTar_Info = &sccbMgrTbl[p_card][currSCCB->TargID];
+
+ if (message == SMREST_DATA_PTR)
+ {
+ if (!(currSCCB->Sccb_XferState & F_NO_DATA_YET))
+ {
+ currSCCB->Sccb_ATC = currSCCB->Sccb_savedATC;
+
+ hostDataXferRestart(currSCCB);
+ }
+
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+
+ else if (message == SMCMD_COMP)
+ {
+
+
+ if (currSCCB->Sccb_scsistat == SELECT_Q_ST)
+ {
+ currTar_Info->TarStatus &= ~(UCHAR)TAR_TAG_Q_MASK;
+ currTar_Info->TarStatus |= (UCHAR)TAG_Q_REJECT;
+ }
+
+ ACCEPT_MSG(port);
+
+ }
+
+ else if ((message == SMNO_OP) || (message >= SMIDENT)
+ || (message == SMINIT_RECOVERY) || (message == SMREL_RECOVERY))
+ {
+
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+
+ else if (message == SMREJECT)
+ {
+
+ if ((currSCCB->Sccb_scsistat == SELECT_SN_ST) ||
+ (currSCCB->Sccb_scsistat == SELECT_WN_ST) ||
+ ((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING ) ||
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING ) )
+
+ {
+ WRW_HARPOON((port+hp_intstat), BUS_FREE);
+
+ ACCEPT_MSG(port);
+
+
+ while ((!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ)) &&
+ (!(RDW_HARPOON((port+hp_intstat)) & BUS_FREE))) {}
+
+ if(currSCCB->Lun == 0x00)
+ {
+ if ((currSCCB->Sccb_scsistat == SELECT_SN_ST))
+ {
+
+ currTar_Info->TarStatus |= (UCHAR)SYNC_SUPPORTED;
+
+ currTar_Info->TarEEValue &= ~EE_SYNC_MASK;
+ }
+
+#if defined(WIDE_SCSI)
+ else if ((currSCCB->Sccb_scsistat == SELECT_WN_ST))
+ {
+
+
+ currTar_Info->TarStatus = (currTar_Info->TarStatus &
+ ~WIDE_ENABLED) | WIDE_NEGOCIATED;
+
+ currTar_Info->TarEEValue &= ~EE_WIDE_SCSI;
+
+ }
+#endif
+
+ else if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING )
+ {
+ currTar_Info->TarStatus = (currTar_Info->TarStatus &
+ ~(UCHAR)TAR_TAG_Q_MASK) | TAG_Q_REJECT;
+
+
+ currSCCB->ControlByte &= ~F_USE_CMD_Q;
+ CurrCard->discQCount--;
+ CurrCard->discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ currSCCB->Sccb_tag = 0x00;
+
+ }
+ }
+
+ if (RDW_HARPOON((port+hp_intstat)) & BUS_FREE)
+ {
+
+
+ if(currSCCB->Lun == 0x00)
+ {
+ WRW_HARPOON((port+hp_intstat), BUS_FREE);
+ CurrCard->globalFlags |= F_NEW_SCCB_CMD;
+ }
+ }
+
+ else
+ {
+
+ if((CurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ currTar_Info->TarLUNBusy[currSCCB->Lun] = TRUE;
+ else
+ currTar_Info->TarLUNBusy[0] = TRUE;
+
+
+ currSCCB->ControlByte &= ~(UCHAR)F_USE_CMD_Q;
+
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+
+ }
+ }
+
+ else
+ {
+ ACCEPT_MSG(port);
+
+ while ((!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ)) &&
+ (!(RDW_HARPOON((port+hp_intstat)) & BUS_FREE))) {}
+
+ if (!(RDW_HARPOON((port+hp_intstat)) & BUS_FREE))
+ {
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }
+ }
+
+ else if (message == SMEXT)
+ {
+
+ ACCEPT_MSG(port);
+ shandem(port,p_card,currSCCB);
+ }
+
+ else if (message == SMIGNORWR)
+ {
+
+ ACCEPT_MSG(port); /* ACK the RESIDUE MSG */
+
+ message = sfm(port,currSCCB);
+
+ if(currSCCB->Sccb_scsimsg != SMPARITY)
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+
+
+ else
+ {
+
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ currSCCB->Sccb_scsimsg = SMREJECT;
+
+ ACCEPT_MSG_ATN(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: shandem
+ *
+ * Description: Decide what to do with the extended message.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void shandem(USHORT port, UCHAR p_card, PSCCB pCurrSCCB)
+#else
+void shandem(ULONG port, UCHAR p_card, PSCCB pCurrSCCB)
+#endif
+{
+ UCHAR length,message;
+
+ length = sfm(port,pCurrSCCB);
+ if (length)
+ {
+
+ ACCEPT_MSG(port);
+ message = sfm(port,pCurrSCCB);
+ if (message)
+ {
+
+ if (message == SMSYNC)
+ {
+
+ if (length == 0x03)
+ {
+
+ ACCEPT_MSG(port);
+ stsyncn(port,p_card);
+ }
+ else
+ {
+
+ pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ ACCEPT_MSG_ATN(port);
+ }
+ }
+#if defined(WIDE_SCSI)
+ else if (message == SMWDTR)
+ {
+
+ if (length == 0x02)
+ {
+
+ ACCEPT_MSG(port);
+ stwidn(port,p_card);
+ }
+ else
+ {
+
+ pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ ACCEPT_MSG_ATN(port);
+
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }
+#endif
+ else
+ {
+
+ pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ ACCEPT_MSG_ATN(port);
+
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }
+ else
+ {
+ if(pCurrSCCB->Sccb_scsimsg != SMPARITY)
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }else
+ {
+ if(pCurrSCCB->Sccb_scsimsg == SMPARITY)
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sisyncn
+ *
+ * Description: Read in a message byte from the SCSI bus, and check
+ * for a parity error.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR sisyncn(USHORT port, UCHAR p_card, UCHAR syncFlag)
+#else
+UCHAR sisyncn(ULONG port, UCHAR p_card, UCHAR syncFlag)
+#endif
+{
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ currTar_Info = &sccbMgrTbl[p_card][currSCCB->TargID];
+
+ if (!((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING)) {
+
+
+ WRW_HARPOON((port+ID_MSG_STRT),
+ (MPM_OP+AMSG_OUT+(currSCCB->Sccb_idmsg & ~(UCHAR)DISC_PRIV)));
+
+ WRW_HARPOON((port+ID_MSG_STRT+2),BRH_OP+ALWAYS+CMDPZ);
+
+ WRW_HARPOON((port+SYNC_MSGS+0), (MPM_OP+AMSG_OUT+SMEXT ));
+ WRW_HARPOON((port+SYNC_MSGS+2), (MPM_OP+AMSG_OUT+0x03 ));
+ WRW_HARPOON((port+SYNC_MSGS+4), (MPM_OP+AMSG_OUT+SMSYNC));
+
+
+ if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
+
+ WRW_HARPOON((port+SYNC_MSGS+6), (MPM_OP+AMSG_OUT+ 12));
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_10MB)
+
+ WRW_HARPOON((port+SYNC_MSGS+6), (MPM_OP+AMSG_OUT+ 25));
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_5MB)
+
+ WRW_HARPOON((port+SYNC_MSGS+6), (MPM_OP+AMSG_OUT+ 50));
+
+ else
+ WRW_HARPOON((port+SYNC_MSGS+6), (MPM_OP+AMSG_OUT+ 00));
+
+
+ WRW_HARPOON((port+SYNC_MSGS+8), (RAT_OP ));
+ WRW_HARPOON((port+SYNC_MSGS+10),(MPM_OP+AMSG_OUT+DEFAULT_OFFSET));
+ WRW_HARPOON((port+SYNC_MSGS+12),(BRH_OP+ALWAYS+NP ));
+
+
+ if(syncFlag == FALSE)
+ {
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(UCHAR)TAR_SYNC_MASK) | (UCHAR)SYNC_TRYING);
+ }
+ else
+ {
+ WR_HARPOON(port+hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT));
+ }
+
+
+ return(TRUE);
+ }
+
+ else {
+
+ currTar_Info->TarStatus |= (UCHAR)SYNC_SUPPORTED;
+ currTar_Info->TarEEValue &= ~EE_SYNC_MASK;
+ return(FALSE);
+ }
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: stsyncn
+ *
+ * Description: The has sent us a Sync Nego message so handle it as
+ * necessary.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void stsyncn(USHORT port, UCHAR p_card)
+#else
+void stsyncn(ULONG port, UCHAR p_card)
+#endif
+{
+ UCHAR sync_msg,offset,sync_reg,our_sync_msg;
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ currTar_Info = &sccbMgrTbl[p_card][currSCCB->TargID];
+
+ sync_msg = sfm(port,currSCCB);
+
+ if((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY))
+ {
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ return;
+ }
+
+ ACCEPT_MSG(port);
+
+
+ offset = sfm(port,currSCCB);
+
+ if((offset == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY))
+ {
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ return;
+ }
+
+ if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
+
+ our_sync_msg = 12; /* Setup our Message to 20mb/s */
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_10MB)
+
+ our_sync_msg = 25; /* Setup our Message to 10mb/s */
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_5MB)
+
+ our_sync_msg = 50; /* Setup our Message to 5mb/s */
+ else
+
+ our_sync_msg = 0; /* Message = Async */
+
+ if (sync_msg < our_sync_msg) {
+ sync_msg = our_sync_msg; /*if faster, then set to max. */
+ }
+
+ if (offset == ASYNC)
+ sync_msg = ASYNC;
+
+ if (offset > MAX_OFFSET)
+ offset = MAX_OFFSET;
+
+ sync_reg = 0x00;
+
+ if (sync_msg > 12)
+
+ sync_reg = 0x20; /* Use 10MB/s */
+
+ if (sync_msg > 25)
+
+ sync_reg = 0x40; /* Use 6.6MB/s */
+
+ if (sync_msg > 38)
+
+ sync_reg = 0x60; /* Use 5MB/s */
+
+ if (sync_msg > 50)
+
+ sync_reg = 0x80; /* Use 4MB/s */
+
+ if (sync_msg > 62)
+
+ sync_reg = 0xA0; /* Use 3.33MB/s */
+
+ if (sync_msg > 75)
+
+ sync_reg = 0xC0; /* Use 2.85MB/s */
+
+ if (sync_msg > 87)
+
+ sync_reg = 0xE0; /* Use 2.5MB/s */
+
+ if (sync_msg > 100) {
+
+ sync_reg = 0x00; /* Use ASYNC */
+ offset = 0x00;
+ }
+
+
+#if defined(WIDE_SCSI)
+ if (currTar_Info->TarStatus & WIDE_ENABLED)
+
+ sync_reg |= offset;
+
+ else
+
+ sync_reg |= (offset | NARROW_SCSI);
+
+#else
+ sync_reg |= (offset | NARROW_SCSI);
+#endif
+
+ sssyncv(port,currSCCB->TargID,sync_reg,currTar_Info);
+
+
+ if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
+
+
+ ACCEPT_MSG(port);
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(UCHAR)TAR_SYNC_MASK) | (UCHAR)SYNC_SUPPORTED);
+
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+
+ else {
+
+
+ ACCEPT_MSG_ATN(port);
+
+ sisyncr(port,sync_msg,offset);
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(UCHAR)TAR_SYNC_MASK) | (UCHAR)SYNC_SUPPORTED);
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sisyncr
+ *
+ * Description: Answer the targets sync message.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void sisyncr(USHORT port,UCHAR sync_pulse, UCHAR offset)
+#else
+void sisyncr(ULONG port,UCHAR sync_pulse, UCHAR offset)
+#endif
+{
+ ARAM_ACCESS(port);
+ WRW_HARPOON((port+SYNC_MSGS+0), (MPM_OP+AMSG_OUT+SMEXT ));
+ WRW_HARPOON((port+SYNC_MSGS+2), (MPM_OP+AMSG_OUT+0x03 ));
+ WRW_HARPOON((port+SYNC_MSGS+4), (MPM_OP+AMSG_OUT+SMSYNC));
+ WRW_HARPOON((port+SYNC_MSGS+6), (MPM_OP+AMSG_OUT+sync_pulse));
+ WRW_HARPOON((port+SYNC_MSGS+8), (RAT_OP ));
+ WRW_HARPOON((port+SYNC_MSGS+10),(MPM_OP+AMSG_OUT+offset));
+ WRW_HARPOON((port+SYNC_MSGS+12),(BRH_OP+ALWAYS+NP ));
+ SGRAM_ACCESS(port);
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(port+hp_autostart_3, (AUTO_IMMED+CMD_ONLY_STRT));
+
+ while (!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | AUTO_INT))) {}
+}
+
+
+
+#if defined(WIDE_SCSI)
+
+/*---------------------------------------------------------------------
+ *
+ * Function: siwidn
+ *
+ * Description: Read in a message byte from the SCSI bus, and check
+ * for a parity error.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR siwidn(USHORT port, UCHAR p_card)
+#else
+UCHAR siwidn(ULONG port, UCHAR p_card)
+#endif
+{
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ currTar_Info = &sccbMgrTbl[p_card][currSCCB->TargID];
+
+ if (!((currTar_Info->TarStatus & TAR_WIDE_MASK) == WIDE_NEGOCIATED)) {
+
+
+ WRW_HARPOON((port+ID_MSG_STRT),
+ (MPM_OP+AMSG_OUT+(currSCCB->Sccb_idmsg & ~(UCHAR)DISC_PRIV)));
+
+ WRW_HARPOON((port+ID_MSG_STRT+2),BRH_OP+ALWAYS+CMDPZ);
+
+ WRW_HARPOON((port+SYNC_MSGS+0), (MPM_OP+AMSG_OUT+SMEXT ));
+ WRW_HARPOON((port+SYNC_MSGS+2), (MPM_OP+AMSG_OUT+0x02 ));
+ WRW_HARPOON((port+SYNC_MSGS+4), (MPM_OP+AMSG_OUT+SMWDTR));
+ WRW_HARPOON((port+SYNC_MSGS+6), (RAT_OP ));
+ WRW_HARPOON((port+SYNC_MSGS+8), (MPM_OP+AMSG_OUT+ SM16BIT));
+ WRW_HARPOON((port+SYNC_MSGS+10),(BRH_OP+ALWAYS+NP ));
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(UCHAR)TAR_WIDE_MASK) | (UCHAR)WIDE_ENABLED);
+
+ return(TRUE);
+ }
+
+ else {
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(UCHAR)TAR_WIDE_MASK) | WIDE_NEGOCIATED);
+
+ currTar_Info->TarEEValue &= ~EE_WIDE_SCSI;
+ return(FALSE);
+ }
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: stwidn
+ *
+ * Description: The has sent us a Wide Nego message so handle it as
+ * necessary.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void stwidn(USHORT port, UCHAR p_card)
+#else
+void stwidn(ULONG port, UCHAR p_card)
+#endif
+{
+ UCHAR width;
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ currTar_Info = &sccbMgrTbl[p_card][currSCCB->TargID];
+
+ width = sfm(port,currSCCB);
+
+ if((width == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY))
+ {
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ return;
+ }
+
+
+ if (!(currTar_Info->TarEEValue & EE_WIDE_SCSI))
+ width = 0;
+
+ if (width) {
+ currTar_Info->TarStatus |= WIDE_ENABLED;
+ width = 0;
+ }
+ else {
+ width = NARROW_SCSI;
+ currTar_Info->TarStatus &= ~WIDE_ENABLED;
+ }
+
+
+ sssyncv(port,currSCCB->TargID,width,currTar_Info);
+
+
+ if (currSCCB->Sccb_scsistat == SELECT_WN_ST)
+ {
+
+
+
+ currTar_Info->TarStatus |= WIDE_NEGOCIATED;
+
+ if (!((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_SUPPORTED))
+ {
+ ACCEPT_MSG_ATN(port);
+ ARAM_ACCESS(port);
+ sisyncn(port,p_card, TRUE);
+ currSCCB->Sccb_scsistat = SELECT_SN_ST;
+ SGRAM_ACCESS(port);
+ }
+ else
+ {
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }
+
+ else {
+
+
+ ACCEPT_MSG_ATN(port);
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ width = SM16BIT;
+ else
+ width = SM8BIT;
+
+ siwidr(port,width);
+
+ currTar_Info->TarStatus |= (WIDE_NEGOCIATED | WIDE_ENABLED);
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: siwidr
+ *
+ * Description: Answer the targets Wide nego message.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void siwidr(USHORT port, UCHAR width)
+#else
+void siwidr(ULONG port, UCHAR width)
+#endif
+{
+ ARAM_ACCESS(port);
+ WRW_HARPOON((port+SYNC_MSGS+0), (MPM_OP+AMSG_OUT+SMEXT ));
+ WRW_HARPOON((port+SYNC_MSGS+2), (MPM_OP+AMSG_OUT+0x02 ));
+ WRW_HARPOON((port+SYNC_MSGS+4), (MPM_OP+AMSG_OUT+SMWDTR));
+ WRW_HARPOON((port+SYNC_MSGS+6), (RAT_OP ));
+ WRW_HARPOON((port+SYNC_MSGS+8),(MPM_OP+AMSG_OUT+width));
+ WRW_HARPOON((port+SYNC_MSGS+10),(BRH_OP+ALWAYS+NP ));
+ SGRAM_ACCESS(port);
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(port+hp_autostart_3, (AUTO_IMMED+CMD_ONLY_STRT));
+
+ while (!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | AUTO_INT))) {}
+}
+
+#endif
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sssyncv
+ *
+ * Description: Write the desired value to the Sync Register for the
+ * ID specified.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void sssyncv(USHORT p_port, UCHAR p_id, UCHAR p_sync_value,PSCCBMgr_tar_info currTar_Info)
+#else
+void sssyncv(ULONG p_port, UCHAR p_id, UCHAR p_sync_value,PSCCBMgr_tar_info currTar_Info)
+#endif
+{
+ UCHAR index;
+
+ index = p_id;
+
+ switch (index) {
+
+ case 0:
+ index = 12; /* hp_synctarg_0 */
+ break;
+ case 1:
+ index = 13; /* hp_synctarg_1 */
+ break;
+ case 2:
+ index = 14; /* hp_synctarg_2 */
+ break;
+ case 3:
+ index = 15; /* hp_synctarg_3 */
+ break;
+ case 4:
+ index = 8; /* hp_synctarg_4 */
+ break;
+ case 5:
+ index = 9; /* hp_synctarg_5 */
+ break;
+ case 6:
+ index = 10; /* hp_synctarg_6 */
+ break;
+ case 7:
+ index = 11; /* hp_synctarg_7 */
+ break;
+ case 8:
+ index = 4; /* hp_synctarg_8 */
+ break;
+ case 9:
+ index = 5; /* hp_synctarg_9 */
+ break;
+ case 10:
+ index = 6; /* hp_synctarg_10 */
+ break;
+ case 11:
+ index = 7; /* hp_synctarg_11 */
+ break;
+ case 12:
+ index = 0; /* hp_synctarg_12 */
+ break;
+ case 13:
+ index = 1; /* hp_synctarg_13 */
+ break;
+ case 14:
+ index = 2; /* hp_synctarg_14 */
+ break;
+ case 15:
+ index = 3; /* hp_synctarg_15 */
+
+ }
+
+ WR_HARPOON(p_port+hp_synctarg_base+index, p_sync_value);
+
+ currTar_Info->TarSyncCtrl = p_sync_value;
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sresb
+ *
+ * Description: Reset the desired card's SCSI bus.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void sresb(USHORT port, UCHAR p_card)
+#else
+void sresb(ULONG port, UCHAR p_card)
+#endif
+{
+ UCHAR scsiID, i;
+
+ PSCCBMgr_tar_info currTar_Info;
+
+ WR_HARPOON(port+hp_page_ctrl,
+ (RD_HARPOON(port+hp_page_ctrl) | G_INT_DISABLE));
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT);
+
+ WR_HARPOON(port+hp_scsictrl_0, SCSI_RST);
+
+ scsiID = RD_HARPOON(port+hp_seltimeout);
+ WR_HARPOON(port+hp_seltimeout,TO_5ms);
+ WRW_HARPOON((port+hp_intstat), TIMEOUT);
+
+ WR_HARPOON(port+hp_portctrl_0,(SCSI_PORT | START_TO));
+
+ while (!(RDW_HARPOON((port+hp_intstat)) & TIMEOUT)) {}
+
+ WR_HARPOON(port+hp_seltimeout,scsiID);
+
+ WR_HARPOON(port+hp_scsictrl_0, ENA_SCAM_SEL);
+
+ Wait(port, TO_5ms);
+
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT);
+
+ WR_HARPOON(port+hp_int_mask, (RD_HARPOON(port+hp_int_mask) | 0x00));
+
+ for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++)
+ {
+ currTar_Info = &sccbMgrTbl[p_card][scsiID];
+
+ if (currTar_Info->TarEEValue & EE_SYNC_MASK)
+ {
+ currTar_Info->TarSyncCtrl = 0;
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ }
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+ sssyncv(port, scsiID, NARROW_SCSI,currTar_Info);
+
+ SccbMgrTableInitTarget(p_card, scsiID);
+ }
+
+ BL_Card[p_card].scanIndex = 0x00;
+ BL_Card[p_card].currentSCCB = NULL;
+ BL_Card[p_card].globalFlags &= ~(F_TAG_STARTED | F_HOST_XFER_ACT
+ | F_NEW_SCCB_CMD);
+ BL_Card[p_card].cmdCounter = 0x00;
+ BL_Card[p_card].discQCount = 0x00;
+ BL_Card[p_card].tagQ_Lst = 0x01;
+
+ for(i = 0; i < QUEUE_DEPTH; i++)
+ BL_Card[p_card].discQ_Tbl[i] = NULL;
+
+ WR_HARPOON(port+hp_page_ctrl,
+ (RD_HARPOON(port+hp_page_ctrl) & ~G_INT_DISABLE));
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: ssenss
+ *
+ * Description: Setup for the Auto Sense command.
+ *
+ *---------------------------------------------------------------------*/
+void ssenss(PSCCBcard pCurrCard)
+{
+ UCHAR i;
+ PSCCB currSCCB;
+
+ currSCCB = pCurrCard->currentSCCB;
+
+
+ currSCCB->Save_CdbLen = currSCCB->CdbLength;
+
+ for (i = 0; i < 6; i++) {
+
+ currSCCB->Save_Cdb[i] = currSCCB->Cdb[i];
+ }
+
+ currSCCB->CdbLength = SIX_BYTE_CMD;
+ currSCCB->Cdb[0] = SCSI_REQUEST_SENSE;
+ currSCCB->Cdb[1] = currSCCB->Cdb[1] & (UCHAR)0xE0; /*Keep LUN. */
+ currSCCB->Cdb[2] = 0x00;
+ currSCCB->Cdb[3] = 0x00;
+ currSCCB->Cdb[4] = currSCCB->RequestSenseLength;
+ currSCCB->Cdb[5] = 0x00;
+
+ currSCCB->Sccb_XferCnt = (unsigned long)currSCCB->RequestSenseLength;
+
+ currSCCB->Sccb_ATC = 0x00;
+
+ currSCCB->Sccb_XferState |= F_AUTO_SENSE;
+
+ currSCCB->Sccb_XferState &= ~F_SG_XFER;
+
+ currSCCB->Sccb_idmsg = currSCCB->Sccb_idmsg & ~(UCHAR)DISC_PRIV;
+
+ currSCCB->ControlByte = 0x00;
+
+ currSCCB->Sccb_MGRFlags &= F_STATUSLOADED;
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sxfrp
+ *
+ * Description: Transfer data into the bit bucket until the device
+ * decides to switch phase.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void sxfrp(USHORT p_port, UCHAR p_card)
+#else
+void sxfrp(ULONG p_port, UCHAR p_card)
+#endif
+{
+ UCHAR curr_phz;
+
+
+ DISABLE_AUTO(p_port);
+
+ if (BL_Card[p_card].globalFlags & F_HOST_XFER_ACT) {
+
+ hostDataXferAbort(p_port,p_card,BL_Card[p_card].currentSCCB);
+
+ }
+
+ /* If the Automation handled the end of the transfer then do not
+ match the phase or we will get out of sync with the ISR. */
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | XFER_CNT_0 | AUTO_INT))
+ return;
+
+ WR_HARPOON(p_port+hp_xfercnt_0, 0x00);
+
+ curr_phz = RD_HARPOON(p_port+hp_scsisig) & (UCHAR)S_SCSI_PHZ;
+
+ WRW_HARPOON((p_port+hp_intstat), XFER_CNT_0);
+
+
+ WR_HARPOON(p_port+hp_scsisig, curr_phz);
+
+ while ( !(RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | RESET)) &&
+ (curr_phz == (RD_HARPOON(p_port+hp_scsisig) & (UCHAR)S_SCSI_PHZ)) )
+ {
+ if (curr_phz & (UCHAR)SCSI_IOBIT)
+ {
+ WR_HARPOON(p_port+hp_portctrl_0, (SCSI_PORT | HOST_PORT | SCSI_INBIT));
+
+ if (!(RD_HARPOON(p_port+hp_xferstat) & FIFO_EMPTY))
+ {
+ RD_HARPOON(p_port+hp_fifodata_0);
+ }
+ }
+ else
+ {
+ WR_HARPOON(p_port+hp_portctrl_0, (SCSI_PORT | HOST_PORT | HOST_WRT));
+ if (RD_HARPOON(p_port+hp_xferstat) & FIFO_EMPTY)
+ {
+ WR_HARPOON(p_port+hp_fifodata_0,0xFA);
+ }
+ }
+ } /* End of While loop for padding data I/O phase */
+
+ while ( !(RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | RESET)))
+ {
+ if (RD_HARPOON(p_port+hp_scsisig) & SCSI_REQ)
+ break;
+ }
+
+ WR_HARPOON(p_port+hp_portctrl_0, (SCSI_PORT | HOST_PORT | SCSI_INBIT));
+ while (!(RD_HARPOON(p_port+hp_xferstat) & FIFO_EMPTY))
+ {
+ RD_HARPOON(p_port+hp_fifodata_0);
+ }
+
+ if ( !(RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | RESET)))
+ {
+ WR_HARPOON(p_port+hp_autostart_0, (AUTO_IMMED+DISCONNECT_START));
+ while (!(RDW_HARPOON((p_port+hp_intstat)) & AUTO_INT)) {}
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & (ICMD_COMP | ITAR_DISC))
+ while (!(RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | RSEL))) ;
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: schkdd
+ *
+ * Description: Make sure data has been flushed from both FIFOs and abort
+ * the operations if necessary.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void schkdd(USHORT port, UCHAR p_card)
+#else
+void schkdd(ULONG port, UCHAR p_card)
+#endif
+{
+ USHORT TimeOutLoop;
+ UCHAR sPhase;
+
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+
+ if ((currSCCB->Sccb_scsistat != DATA_OUT_ST) &&
+ (currSCCB->Sccb_scsistat != DATA_IN_ST)) {
+ return;
+ }
+
+
+
+ if (currSCCB->Sccb_XferState & F_ODD_BALL_CNT)
+ {
+
+ currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt-1);
+
+ currSCCB->Sccb_XferCnt = 1;
+
+ currSCCB->Sccb_XferState &= ~F_ODD_BALL_CNT;
+ WRW_HARPOON((port+hp_fiforead), (USHORT) 0x00);
+ WR_HARPOON(port+hp_xferstat, 0x00);
+ }
+
+ else
+ {
+
+ currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt;
+
+ currSCCB->Sccb_XferCnt = 0;
+ }
+
+ if ((RDW_HARPOON((port+hp_intstat)) & PARITY) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE)) {
+
+ currSCCB->HostStatus = SCCB_PARITY_ERR;
+ WRW_HARPOON((port+hp_intstat), PARITY);
+ }
+
+
+ hostDataXferAbort(port,p_card,currSCCB);
+
+
+ while (RD_HARPOON(port+hp_scsisig) & SCSI_ACK) {}
+
+ TimeOutLoop = 0;
+
+ while(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY)
+ {
+ if (RDW_HARPOON((port+hp_intstat)) & BUS_FREE) {
+ return;
+ }
+ if (RD_HARPOON(port+hp_offsetctr) & (UCHAR)0x1F) {
+ break;
+ }
+ if (RDW_HARPOON((port+hp_intstat)) & RESET) {
+ return;
+ }
+ if ((RD_HARPOON(port+hp_scsisig) & SCSI_REQ) || (TimeOutLoop++>0x3000) )
+ break;
+ }
+
+ sPhase = RD_HARPOON(port+hp_scsisig) & (SCSI_BSY | S_SCSI_PHZ);
+ if ((!(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY)) ||
+ (RD_HARPOON(port+hp_offsetctr) & (UCHAR)0x1F) ||
+ (sPhase == (SCSI_BSY | S_DATAO_PH)) ||
+ (sPhase == (SCSI_BSY | S_DATAI_PH)))
+ {
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+
+ if (!(currSCCB->Sccb_XferState & F_ALL_XFERRED))
+ {
+ if (currSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+ phaseDataIn(port,p_card);
+ }
+
+ else {
+ phaseDataOut(port,p_card);
+ }
+ }
+ else
+ {
+ sxfrp(port,p_card);
+ if (!(RDW_HARPOON((port+hp_intstat)) &
+ (BUS_FREE | ICMD_COMP | ITAR_DISC | RESET)))
+ {
+ WRW_HARPOON((port+hp_intstat), AUTO_INT);
+ phaseDecode(port,p_card);
+ }
+ }
+
+ }
+
+ else {
+ WR_HARPOON(port+hp_portctrl_0, 0x00);
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sinits
+ *
+ * Description: Setup SCCB manager fields in this SCCB.
+ *
+ *---------------------------------------------------------------------*/
+
+void sinits(PSCCB p_sccb, UCHAR p_card)
+{
+ PSCCBMgr_tar_info currTar_Info;
+
+ if((p_sccb->TargID > MAX_SCSI_TAR) || (p_sccb->Lun > MAX_LUN))
+ {
+ return;
+ }
+ currTar_Info = &sccbMgrTbl[p_card][p_sccb->TargID];
+
+ p_sccb->Sccb_XferState = 0x00;
+ p_sccb->Sccb_XferCnt = p_sccb->DataLength;
+
+ if ((p_sccb->OperationCode == SCATTER_GATHER_COMMAND) ||
+ (p_sccb->OperationCode == RESIDUAL_SG_COMMAND)) {
+
+ p_sccb->Sccb_SGoffset = 0;
+ p_sccb->Sccb_XferState = F_SG_XFER;
+ p_sccb->Sccb_XferCnt = 0x00;
+ }
+
+ if (p_sccb->DataLength == 0x00)
+
+ p_sccb->Sccb_XferState |= F_ALL_XFERRED;
+
+ if (p_sccb->ControlByte & F_USE_CMD_Q)
+ {
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT)
+ p_sccb->ControlByte &= ~F_USE_CMD_Q;
+
+ else
+ currTar_Info->TarStatus |= TAG_Q_TRYING;
+ }
+
+/* For !single SCSI device in system & device allow Disconnect
+ or command is tag_q type then send Cmd with Disconnect Enable
+ else send Cmd with Disconnect Disable */
+
+/*
+ if (((!(BL_Card[p_card].globalFlags & F_SINGLE_DEVICE)) &&
+ (currTar_Info->TarStatus & TAR_ALLOW_DISC)) ||
+ (currTar_Info->TarStatus & TAG_Q_TRYING)) {
+*/
+ if ((currTar_Info->TarStatus & TAR_ALLOW_DISC) ||
+ (currTar_Info->TarStatus & TAG_Q_TRYING)) {
+ p_sccb->Sccb_idmsg = (UCHAR)(SMIDENT | DISC_PRIV) | p_sccb->Lun;
+ }
+
+ else {
+
+ p_sccb->Sccb_idmsg = (UCHAR)SMIDENT | p_sccb->Lun;
+ }
+
+ p_sccb->HostStatus = 0x00;
+ p_sccb->TargetStatus = 0x00;
+ p_sccb->Sccb_tag = 0x00;
+ p_sccb->Sccb_MGRFlags = 0x00;
+ p_sccb->Sccb_sgseg = 0x00;
+ p_sccb->Sccb_ATC = 0x00;
+ p_sccb->Sccb_savedATC = 0x00;
+/*
+ p_sccb->SccbVirtDataPtr = 0x00;
+ p_sccb->Sccb_forwardlink = NULL;
+ p_sccb->Sccb_backlink = NULL;
+ */
+ p_sccb->Sccb_scsistat = BUS_FREE_ST;
+ p_sccb->SccbStatus = SCCB_IN_PROCESS;
+ p_sccb->Sccb_scsimsg = SMNO_OP;
+
+}
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: phase.c $
+ *
+ * Description: Functions to intially handle the SCSI bus phase when
+ * the target asserts request (and the automation is not
+ * enabled to handle the situation).
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <harpoon.h>*/
+
+
+/*
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+
+#if defined(OS2)
+ extern void (far *s_PhaseTbl[8]) (ULONG, UCHAR);
+#else
+ #if defined(DOS)
+ extern void (*s_PhaseTbl[8]) (USHORT, UCHAR);
+ #else
+ extern void (*s_PhaseTbl[8]) (ULONG, UCHAR);
+ #endif
+#endif
+*/
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Decode
+ *
+ * Description: Determine the phase and call the appropriate function.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void phaseDecode(USHORT p_port, UCHAR p_card)
+#else
+void phaseDecode(ULONG p_port, UCHAR p_card)
+#endif
+{
+ unsigned char phase_ref;
+#if defined(OS2)
+ void (far *phase) (ULONG, UCHAR);
+#else
+ #if defined(DOS)
+ void (*phase) (USHORT, UCHAR);
+ #else
+ void (*phase) (ULONG, UCHAR);
+ #endif
+#endif
+
+
+ DISABLE_AUTO(p_port);
+
+ phase_ref = (UCHAR) (RD_HARPOON(p_port+hp_scsisig) & S_SCSI_PHZ);
+
+ phase = s_PhaseTbl[phase_ref];
+
+ (*phase)(p_port, p_card); /* Call the correct phase func */
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Data Out Phase
+ *
+ * Description: Start up both the BusMaster and Xbow.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseDataOut(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseDataOut(USHORT port, UCHAR p_card)
+#else
+void phaseDataOut(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ if (currSCCB == NULL)
+ {
+ return; /* Exit if No SCCB record */
+ }
+
+ currSCCB->Sccb_scsistat = DATA_OUT_ST;
+ currSCCB->Sccb_XferState &= ~(F_HOST_XFER_DIR | F_NO_DATA_YET);
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+
+ WRW_HARPOON((port+hp_intstat), XFER_CNT_0);
+
+ WR_HARPOON(port+hp_autostart_0, (END_DATA+END_DATA_START));
+
+ dataXferProcessor(port, &BL_Card[p_card]);
+
+#if defined(NOBUGBUG)
+ if (RDW_HARPOON((port+hp_intstat)) & XFER_CNT_0)
+ WRW_HARPOON((port+hp_intstat), XFER_CNT_0);
+
+#endif
+
+
+ if (currSCCB->Sccb_XferCnt == 0) {
+
+
+ if ((currSCCB->ControlByte & SCCB_DATA_XFER_OUT) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE))
+ currSCCB->HostStatus = SCCB_DATA_OVER_RUN;
+
+ sxfrp(port,p_card);
+ if (!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | RESET)))
+ phaseDecode(port,p_card);
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Data In Phase
+ *
+ * Description: Startup the BusMaster and the XBOW.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseDataIn(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseDataIn(USHORT port, UCHAR p_card)
+#else
+void phaseDataIn(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (currSCCB == NULL)
+ {
+ return; /* Exit if No SCCB record */
+ }
+
+
+ currSCCB->Sccb_scsistat = DATA_IN_ST;
+ currSCCB->Sccb_XferState |= F_HOST_XFER_DIR;
+ currSCCB->Sccb_XferState &= ~F_NO_DATA_YET;
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+
+ WRW_HARPOON((port+hp_intstat), XFER_CNT_0);
+
+ WR_HARPOON(port+hp_autostart_0, (END_DATA+END_DATA_START));
+
+ dataXferProcessor(port, &BL_Card[p_card]);
+
+ if (currSCCB->Sccb_XferCnt == 0) {
+
+
+ if ((currSCCB->ControlByte & SCCB_DATA_XFER_IN) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE))
+ currSCCB->HostStatus = SCCB_DATA_OVER_RUN;
+
+ sxfrp(port,p_card);
+ if (!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | RESET)))
+ phaseDecode(port,p_card);
+
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Command Phase
+ *
+ * Description: Load the CDB into the automation and start it up.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseCommand(ULONG p_port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseCommand(USHORT p_port, UCHAR p_card)
+#else
+void phaseCommand(ULONG p_port, UCHAR p_card)
+#endif
+#endif
+{
+ PSCCB currSCCB;
+#if defined(DOS)
+ USHORT cdb_reg;
+#else
+ ULONG cdb_reg;
+#endif
+ UCHAR i;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (currSCCB->OperationCode == RESET_COMMAND) {
+
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ currSCCB->CdbLength = SIX_BYTE_CMD;
+ }
+
+ WR_HARPOON(p_port+hp_scsisig, 0x00);
+
+ ARAM_ACCESS(p_port);
+
+
+ cdb_reg = p_port + CMD_STRT;
+
+ for (i=0; i < currSCCB->CdbLength; i++) {
+
+ if (currSCCB->OperationCode == RESET_COMMAND)
+
+ WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + 0x00));
+
+ else
+ WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + currSCCB->Cdb[i]));
+ cdb_reg +=2;
+ }
+
+ if (currSCCB->CdbLength != TWELVE_BYTE_CMD)
+ WRW_HARPOON(cdb_reg, (BRH_OP+ALWAYS+ NP));
+
+ WR_HARPOON(p_port+hp_portctrl_0,(SCSI_PORT));
+
+ currSCCB->Sccb_scsistat = COMMAND_ST;
+
+ WR_HARPOON(p_port+hp_autostart_3, (AUTO_IMMED | CMD_ONLY_STRT));
+ SGRAM_ACCESS(p_port);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Status phase
+ *
+ * Description: Bring in the status and command complete message bytes
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseStatus(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseStatus(USHORT port, UCHAR p_card)
+#else
+void phaseStatus(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+ /* Start-up the automation to finish off this command and let the
+ isr handle the interrupt for command complete when it comes in.
+ We could wait here for the interrupt to be generated?
+ */
+
+ WR_HARPOON(port+hp_scsisig, 0x00);
+
+ WR_HARPOON(port+hp_autostart_0, (AUTO_IMMED+END_DATA_START));
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Message Out
+ *
+ * Description: Send out our message (if we have one) and handle whatever
+ * else is involed.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseMsgOut(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseMsgOut(USHORT port, UCHAR p_card)
+#else
+void phaseMsgOut(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+ UCHAR message,scsiID;
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (currSCCB != NULL) {
+
+ message = currSCCB->Sccb_scsimsg;
+ scsiID = currSCCB->TargID;
+
+ if (message == SMDEV_RESET)
+ {
+
+
+ currTar_Info = &sccbMgrTbl[p_card][scsiID];
+ currTar_Info->TarSyncCtrl = 0;
+ sssyncv(port, scsiID, NARROW_SCSI,currTar_Info);
+
+ if (sccbMgrTbl[p_card][scsiID].TarEEValue & EE_SYNC_MASK)
+ {
+
+ sccbMgrTbl[p_card][scsiID].TarStatus &= ~TAR_SYNC_MASK;
+
+ }
+
+ if (sccbMgrTbl[p_card][scsiID].TarEEValue & EE_WIDE_SCSI)
+ {
+
+ sccbMgrTbl[p_card][scsiID].TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+
+ queueFlushSccb(p_card,SCCB_COMPLETE);
+ SccbMgrTableInitTarget(p_card,scsiID);
+ }
+ else if (currSCCB->Sccb_scsistat == ABORT_ST)
+ {
+ currSCCB->HostStatus = SCCB_COMPLETE;
+ if(BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] != NULL)
+ {
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ sccbMgrTbl[p_card][scsiID].TarTagQ_Cnt--;
+ }
+
+ }
+
+ else if (currSCCB->Sccb_scsistat < COMMAND_ST)
+ {
+
+
+ if(message == SMNO_OP)
+ {
+ currSCCB->Sccb_MGRFlags |= F_DEV_SELECTED;
+
+ ssel(port,p_card);
+ return;
+ }
+ }
+ else
+ {
+
+
+ if (message == SMABORT)
+
+ queueFlushSccb(p_card,SCCB_COMPLETE);
+ }
+
+ }
+ else
+ {
+ message = SMABORT;
+ }
+
+ WRW_HARPOON((port+hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0));
+
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_BUS_EN);
+
+ WR_HARPOON(port+hp_scsidata_0,message);
+
+ WR_HARPOON(port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+
+ ACCEPT_MSG(port);
+
+ WR_HARPOON(port+hp_portctrl_0, 0x00);
+
+ if ((message == SMABORT) || (message == SMDEV_RESET) ||
+ (message == SMABORT_TAG) )
+ {
+
+ while(!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | PHASE))) {}
+
+ if (RDW_HARPOON((port+hp_intstat)) & BUS_FREE)
+ {
+ WRW_HARPOON((port+hp_intstat), BUS_FREE);
+
+ if (currSCCB != NULL)
+ {
+
+ if((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = FALSE;
+ else
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = FALSE;
+
+ queueCmdComplete(&BL_Card[p_card],currSCCB, p_card);
+ }
+
+ else
+ {
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+ }
+ }
+
+ else
+ {
+
+ sxfrp(port,p_card);
+ }
+ }
+
+ else
+ {
+
+ if(message == SMPARITY)
+ {
+ currSCCB->Sccb_scsimsg = SMNO_OP;
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ else
+ {
+ sxfrp(port,p_card);
+ }
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Message In phase
+ *
+ * Description: Bring in the message and determine what to do with it.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseMsgIn(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseMsgIn(USHORT port, UCHAR p_card)
+#else
+void phaseMsgIn(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+ UCHAR message;
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (BL_Card[p_card].globalFlags & F_HOST_XFER_ACT)
+ {
+
+ phaseChkFifo(port, p_card);
+ }
+
+ message = RD_HARPOON(port+hp_scsidata_0);
+ if ((message == SMDISC) || (message == SMSAVE_DATA_PTR))
+ {
+
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+END_DATA_START));
+
+ }
+
+ else
+ {
+
+ message = sfm(port,currSCCB);
+ if (message)
+ {
+
+
+ sdecm(message,port,p_card);
+
+ }
+ else
+ {
+ if(currSCCB->Sccb_scsimsg != SMPARITY)
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Illegal phase
+ *
+ * Description: Target switched to some illegal phase, so all we can do
+ * is report an error back to the host (if that is possible)
+ * and send an ABORT message to the misbehaving target.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseIllegal(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseIllegal(USHORT port, UCHAR p_card)
+#else
+void phaseIllegal(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ WR_HARPOON(port+hp_scsisig, RD_HARPOON(port+hp_scsisig));
+ if (currSCCB != NULL) {
+
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ currSCCB->Sccb_scsistat = ABORT_ST;
+ currSCCB->Sccb_scsimsg = SMABORT;
+ }
+
+ ACCEPT_MSG_ATN(port);
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Check FIFO
+ *
+ * Description: Make sure data has been flushed from both FIFOs and abort
+ * the operations if necessary.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void phaseChkFifo(USHORT port, UCHAR p_card)
+#else
+void phaseChkFifo(ULONG port, UCHAR p_card)
+#endif
+{
+ ULONG xfercnt;
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (currSCCB->Sccb_scsistat == DATA_IN_ST)
+ {
+
+ while((!(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY)) &&
+ (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY)) {}
+
+
+ if (!(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY))
+ {
+ currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt;
+
+ currSCCB->Sccb_XferCnt = 0;
+
+ if ((RDW_HARPOON((port+hp_intstat)) & PARITY) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE))
+ {
+ currSCCB->HostStatus = SCCB_PARITY_ERR;
+ WRW_HARPOON((port+hp_intstat), PARITY);
+ }
+
+ hostDataXferAbort(port,p_card,currSCCB);
+
+ dataXferProcessor(port, &BL_Card[p_card]);
+
+ while((!(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY)) &&
+ (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY)) {}
+
+ }
+ } /*End Data In specific code. */
+
+
+
+#if defined(DOS)
+ asm { mov dx,port;
+ add dx,hp_xfercnt_2;
+ in al,dx;
+ dec dx;
+ xor ah,ah;
+ mov word ptr xfercnt+2,ax;
+ in al,dx;
+ dec dx;
+ mov ah,al;
+ in al,dx;
+ mov word ptr xfercnt,ax;
+ }
+#else
+ GET_XFER_CNT(port,xfercnt);
+#endif
+
+
+ WR_HARPOON(port+hp_xfercnt_0, 0x00);
+
+
+ WR_HARPOON(port+hp_portctrl_0, 0x00);
+
+ currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt - xfercnt);
+
+ currSCCB->Sccb_XferCnt = xfercnt;
+
+ if ((RDW_HARPOON((port+hp_intstat)) & PARITY) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE)) {
+
+ currSCCB->HostStatus = SCCB_PARITY_ERR;
+ WRW_HARPOON((port+hp_intstat), PARITY);
+ }
+
+
+ hostDataXferAbort(port,p_card,currSCCB);
+
+
+ WR_HARPOON(port+hp_fifowrite, 0x00);
+ WR_HARPOON(port+hp_fiforead, 0x00);
+ WR_HARPOON(port+hp_xferstat, 0x00);
+
+ WRW_HARPOON((port+hp_intstat), XFER_CNT_0);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Bus Free
+ *
+ * Description: We just went bus free so figure out if it was
+ * because of command complete or from a disconnect.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void phaseBusFree(USHORT port, UCHAR p_card)
+#else
+void phaseBusFree(ULONG port, UCHAR p_card)
+#endif
+{
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (currSCCB != NULL)
+ {
+
+ DISABLE_AUTO(port);
+
+
+ if (currSCCB->OperationCode == RESET_COMMAND)
+ {
+
+ if((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = FALSE;
+ else
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = FALSE;
+
+ queueCmdComplete(&BL_Card[p_card], currSCCB, p_card);
+
+ queueSearchSelect(&BL_Card[p_card],p_card);
+
+ }
+
+ else if(currSCCB->Sccb_scsistat == SELECT_SN_ST)
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |=
+ (UCHAR)SYNC_SUPPORTED;
+ sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_SYNC_MASK;
+ }
+
+ else if(currSCCB->Sccb_scsistat == SELECT_WN_ST)
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus =
+ (sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED;
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_WIDE_SCSI;
+ }
+
+#if !defined(DOS)
+ else if(currSCCB->Sccb_scsistat == SELECT_Q_ST)
+ {
+ /* Make sure this is not a phony BUS_FREE. If we were
+ reselected or if BUSY is NOT on then this is a
+ valid BUS FREE. SRR Wednesday, 5/10/1995. */
+
+ if ((!(RD_HARPOON(port+hp_scsisig) & SCSI_BSY)) ||
+ (RDW_HARPOON((port+hp_intstat)) & RSEL))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus &= ~TAR_TAG_Q_MASK;
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |= TAG_Q_REJECT;
+ }
+
+ else
+ {
+ return;
+ }
+ }
+#endif
+
+ else
+ {
+
+ currSCCB->Sccb_scsistat = BUS_FREE_ST;
+
+ if (!currSCCB->HostStatus)
+ {
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ }
+
+ if((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = FALSE;
+ else
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = FALSE;
+
+ queueCmdComplete(&BL_Card[p_card], currSCCB, p_card);
+ return;
+ }
+
+
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ } /*end if !=null */
+}
+
+
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: automate.c $
+ *
+ * Description: Functions relating to programming the automation of
+ * the HARPOON.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <harpoon.h>*/
+
+/*
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+extern SCCBCARD BL_Card[MAX_CARDS];
+*/
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Auto Load Default Map
+ *
+ * Description: Load the Automation RAM with the defualt map values.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void autoLoadDefaultMap(USHORT p_port)
+#else
+void autoLoadDefaultMap(ULONG p_port)
+#endif
+{
+#if defined(DOS)
+ USHORT map_addr;
+#else
+ ULONG map_addr;
+#endif
+
+ ARAM_ACCESS(p_port);
+ map_addr = p_port + hp_aramBase;
+
+ WRW_HARPOON(map_addr, (MPM_OP+AMSG_OUT+ 0xC0)); /*ID MESSAGE */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+AMSG_OUT+ 0x20)); /*SIMPLE TAG QUEUEING MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, RAT_OP); /*RESET ATTENTION */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+AMSG_OUT+ 0x00)); /*TAG ID MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 0 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 1 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 2 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 3 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 4 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 5 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 6 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 7 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 8 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 9 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 10 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 11 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPE_OP+ADATA_OUT+ DINT)); /*JUMP IF DATA OUT */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (TCB_OP+FIFO_0+ DI)); /*JUMP IF NO DATA IN FIFO */
+ map_addr +=2; /*This means AYNC DATA IN */
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_IDO_STRT)); /*STOP AND INTERRUPT */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPE_OP+ADATA_IN+DINT)); /*JUMP IF NOT DATA IN PHZ */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPN_OP+AMSG_IN+ ST)); /*IF NOT MSG IN CHECK 4 DATA IN */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CRD_OP+SDATA+ 0x02)); /*SAVE DATA PTR MSG? */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (BRH_OP+NOT_EQ+ DC)); /*GO CHECK FOR DISCONNECT MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MRR_OP+SDATA+ D_AR1)); /*SAVE DATA PTRS MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPN_OP+AMSG_IN+ ST)); /*IF NOT MSG IN CHECK DATA IN */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CRD_OP+SDATA+ 0x04)); /*DISCONNECT MSG? */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (BRH_OP+NOT_EQ+ UNKNWN));/*UKNKNOWN MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MRR_OP+SDATA+ D_BUCKET));/*XFER DISCONNECT MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_ITAR_DISC));/*STOP AND INTERRUPT */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPN_OP+ASTATUS+ UNKNWN));/*JUMP IF NOT STATUS PHZ. */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MRR_OP+SDATA+ D_AR0)); /*GET STATUS BYTE */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPN_OP+AMSG_IN+ CC)); /*ERROR IF NOT MSG IN PHZ */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CRD_OP+SDATA+ 0x00)); /*CHECK FOR CMD COMPLETE MSG. */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (BRH_OP+NOT_EQ+ CC)); /*ERROR IF NOT CMD COMPLETE MSG. */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MRR_OP+SDATA+ D_BUCKET));/*GET CMD COMPLETE MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_ICMD_COMP));/*END OF COMMAND */
+ map_addr +=2;
+
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_IUNKWN)); /*RECEIVED UNKNOWN MSG BYTE */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_ITICKLE)); /*BIOS Tickled the Mgr */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_IRFAIL)); /*EXPECTED ID/TAG MESSAGES AND */
+ map_addr +=2; /* DIDN'T GET ONE */
+ WRW_HARPOON(map_addr, (CRR_OP+AR3+ S_IDREG)); /* comp SCSI SEL ID & AR3*/
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (BRH_OP+EQUAL+ 0x00)); /*SEL ID OK then Conti. */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */
+
+
+
+ SGRAM_ACCESS(p_port);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Auto Command Complete
+ *
+ * Description: Post command back to host and find another command
+ * to execute.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void autoCmdCmplt(USHORT p_port, UCHAR p_card)
+#else
+void autoCmdCmplt(ULONG p_port, UCHAR p_card)
+#endif
+{
+ PSCCB currSCCB;
+ UCHAR status_byte;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ status_byte = RD_HARPOON(p_port+hp_gp_reg_0);
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA = FALSE;
+
+ if (status_byte != SSGOOD) {
+
+ if (status_byte == SSQ_FULL) {
+
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = TRUE;
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[currSCCB->Lun]] = NULL;
+ }
+ else
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = TRUE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }else
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+
+ currSCCB->Sccb_MGRFlags |= F_STATUSLOADED;
+
+ queueSelectFail(&BL_Card[p_card],p_card);
+
+ return;
+ }
+
+ if(currSCCB->Sccb_scsistat == SELECT_SN_ST)
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |=
+ (UCHAR)SYNC_SUPPORTED;
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_SYNC_MASK;
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = TRUE;
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[currSCCB->Lun]] = NULL;
+ }
+ else
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = TRUE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }else
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ return;
+
+ }
+
+ if(currSCCB->Sccb_scsistat == SELECT_WN_ST)
+ {
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus =
+ (sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED;
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_WIDE_SCSI;
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = TRUE;
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[currSCCB->Lun]] = NULL;
+ }
+ else
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = TRUE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }else
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ return;
+
+ }
+
+ if (status_byte == SSCHECK)
+ {
+ if(BL_Card[p_card].globalFlags & F_DO_RENEGO)
+ {
+ if (sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue & EE_SYNC_MASK)
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus &= ~TAR_SYNC_MASK;
+ }
+ if (sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue & EE_WIDE_SCSI)
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus &= ~TAR_WIDE_MASK;
+ }
+ }
+ }
+
+ if (!(currSCCB->Sccb_XferState & F_AUTO_SENSE)) {
+
+ currSCCB->SccbStatus = SCCB_ERROR;
+ currSCCB->TargetStatus = status_byte;
+
+ if (status_byte == SSCHECK) {
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA
+ = TRUE;
+
+
+#if (FW_TYPE==_SCCB_MGR_)
+ if (currSCCB->RequestSenseLength != NO_AUTO_REQUEST_SENSE) {
+
+ if (currSCCB->RequestSenseLength == 0)
+ currSCCB->RequestSenseLength = 14;
+
+ ssenss(&BL_Card[p_card]);
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = TRUE;
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[currSCCB->Lun]] = NULL;
+ }
+ else
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = TRUE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }else
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ return;
+ }
+#else
+ if ((!(currSCCB->Sccb_ucb_ptr->UCB_opcode & OPC_NO_AUTO_SENSE)) &&
+ (currSCCB->RequestSenseLength))
+ {
+ ssenss(&BL_Card[p_card]);
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = TRUE;
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[currSCCB->Lun]] = NULL;
+ }
+ else
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = TRUE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }else
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ return;
+ }
+
+#endif
+ }
+ }
+ }
+
+
+ if((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = FALSE;
+ else
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = FALSE;
+
+
+ queueCmdComplete(&BL_Card[p_card], currSCCB, p_card);
+}
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: busmstr.c $
+ *
+ * Description: Functions to start, stop, and abort BusMaster operations.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <harpoon.h>*/
+
+
+/*
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+*/
+
+#define SHORT_WAIT 0x0000000F
+#define LONG_WAIT 0x0000FFFFL
+
+#if defined(BUGBUG)
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
+#endif
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Data Transfer Processor
+ *
+ * Description: This routine performs two tasks.
+ * (1) Start data transfer by calling HOST_DATA_XFER_START
+ * function. Once data transfer is started, (2) Depends
+ * on the type of data transfer mode Scatter/Gather mode
+ * or NON Scatter/Gather mode. In NON Scatter/Gather mode,
+ * this routine checks Sccb_MGRFlag (F_HOST_XFER_ACT bit) for
+ * data transfer done. In Scatter/Gather mode, this routine
+ * checks bus master command complete and dual rank busy
+ * bit to keep chaining SC transfer command. Similarly,
+ * in Scatter/Gather mode, it checks Sccb_MGRFlag
+ * (F_HOST_XFER_ACT bit) for data transfer done.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void dataXferProcessor(USHORT port, PSCCBcard pCurrCard)
+#else
+void dataXferProcessor(ULONG port, PSCCBcard pCurrCard)
+#endif
+{
+ PSCCB currSCCB;
+
+ currSCCB = pCurrCard->currentSCCB;
+
+ if (currSCCB->Sccb_XferState & F_SG_XFER)
+ {
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+
+ {
+ currSCCB->Sccb_sgseg += (UCHAR)SG_BUF_CNT;
+ currSCCB->Sccb_SGoffset = 0x00;
+ }
+ pCurrCard->globalFlags |= F_HOST_XFER_ACT;
+
+ busMstrSGDataXferStart(port, currSCCB);
+ }
+
+ else
+ {
+ if (!(pCurrCard->globalFlags & F_HOST_XFER_ACT))
+ {
+ pCurrCard->globalFlags |= F_HOST_XFER_ACT;
+
+ busMstrDataXferStart(port, currSCCB);
+ }
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMaster Scatter Gather Data Transfer Start
+ *
+ * Description:
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void busMstrSGDataXferStart(USHORT p_port, PSCCB pcurrSCCB)
+#else
+void busMstrSGDataXferStart(ULONG p_port, PSCCB pcurrSCCB)
+#endif
+{
+ ULONG count,addr,tmpSGCnt;
+ UINT sg_index;
+ UCHAR sg_count, i;
+#if defined(DOS)
+ USHORT reg_offset;
+#else
+ ULONG reg_offset;
+#endif
+
+
+ if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+
+ count = ((ULONG) HOST_RD_CMD)<<24;
+ }
+
+ else {
+ count = ((ULONG) HOST_WRT_CMD)<<24;
+ }
+
+ sg_count = 0;
+ tmpSGCnt = 0;
+ sg_index = pcurrSCCB->Sccb_sgseg;
+ reg_offset = hp_aramBase;
+
+
+ i = (UCHAR) (RD_HARPOON(p_port+hp_page_ctrl) & ~(SGRAM_ARAM|SCATTER_EN));
+
+
+ WR_HARPOON(p_port+hp_page_ctrl, i);
+
+ while ((sg_count < (UCHAR)SG_BUF_CNT) &&
+ ((ULONG)(sg_index * (UINT)SG_ELEMENT_SIZE) < pcurrSCCB->DataLength) ) {
+
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ tmpSGCnt += *(((ULONG far *)pcurrSCCB->DataPointer)+
+ (sg_index * 2));
+
+ count |= *(((ULONG far *)pcurrSCCB->DataPointer)+
+ (sg_index * 2));
+
+ addr = *(((ULONG far *)pcurrSCCB->DataPointer)+
+ ((sg_index * 2) + 1));
+
+#else
+ tmpSGCnt += *(((ULONG *)pcurrSCCB->DataPointer)+
+ (sg_index * 2));
+
+ count |= *(((ULONG *)pcurrSCCB->DataPointer)+
+ (sg_index * 2));
+
+ addr = *(((ULONG *)pcurrSCCB->DataPointer)+
+ ((sg_index * 2) + 1));
+#endif
+
+
+ if ((!sg_count) && (pcurrSCCB->Sccb_SGoffset)) {
+
+ addr += ((count & 0x00FFFFFFL) - pcurrSCCB->Sccb_SGoffset);
+ count = (count & 0xFF000000L) | pcurrSCCB->Sccb_SGoffset;
+
+ tmpSGCnt = count & 0x00FFFFFFL;
+ }
+
+ WR_HARP32(p_port,reg_offset,addr);
+ reg_offset +=4;
+
+ WR_HARP32(p_port,reg_offset,count);
+ reg_offset +=4;
+
+ count &= 0xFF000000L;
+ sg_index++;
+ sg_count++;
+
+ } /*End While */
+
+ pcurrSCCB->Sccb_XferCnt = tmpSGCnt;
+
+ WR_HARPOON(p_port+hp_sg_addr,(sg_count<<4));
+
+ if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+
+ WR_HARP32(p_port,hp_xfercnt_0,tmpSGCnt);
+
+
+ WR_HARPOON(p_port+hp_portctrl_0,(DMA_PORT | SCSI_PORT | SCSI_INBIT));
+ WR_HARPOON(p_port+hp_scsisig, S_DATAI_PH);
+ }
+
+ else {
+
+
+ if ((!(RD_HARPOON(p_port+hp_synctarg_0) & NARROW_SCSI)) &&
+ (tmpSGCnt & 0x000000001))
+ {
+
+ pcurrSCCB->Sccb_XferState |= F_ODD_BALL_CNT;
+ tmpSGCnt--;
+ }
+
+
+ WR_HARP32(p_port,hp_xfercnt_0,tmpSGCnt);
+
+ WR_HARPOON(p_port+hp_portctrl_0,(SCSI_PORT | DMA_PORT | DMA_RD));
+ WR_HARPOON(p_port+hp_scsisig, S_DATAO_PH);
+ }
+
+
+ WR_HARPOON(p_port+hp_page_ctrl, (UCHAR) (i | SCATTER_EN));
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMaster Data Transfer Start
+ *
+ * Description:
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void busMstrDataXferStart(USHORT p_port, PSCCB pcurrSCCB)
+#else
+void busMstrDataXferStart(ULONG p_port, PSCCB pcurrSCCB)
+#endif
+{
+ ULONG addr,count;
+
+ if (!(pcurrSCCB->Sccb_XferState & F_AUTO_SENSE)) {
+
+ count = pcurrSCCB->Sccb_XferCnt;
+
+ addr = (ULONG) pcurrSCCB->DataPointer + pcurrSCCB->Sccb_ATC;
+ }
+
+ else {
+ addr = pcurrSCCB->SensePointer;
+ count = pcurrSCCB->RequestSenseLength;
+
+ }
+
+#if defined(DOS)
+ asm { mov dx,p_port;
+ mov ax,word ptr count;
+ add dx,hp_xfer_cnt_lo;
+ out dx,al;
+ inc dx;
+ xchg ah,al
+ out dx,al;
+ inc dx;
+ mov ax,word ptr count+2;
+ out dx,al;
+ inc dx;
+ inc dx;
+ mov ax,word ptr addr;
+ out dx,al;
+ inc dx;
+ xchg ah,al
+ out dx,al;
+ inc dx;
+ mov ax,word ptr addr+2;
+ out dx,al;
+ inc dx;
+ xchg ah,al
+ out dx,al;
+ }
+
+ WR_HARP32(p_port,hp_xfercnt_0,count);
+
+#else
+ HP_SETUP_ADDR_CNT(p_port,addr,count);
+#endif
+
+
+ if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+
+ WR_HARPOON(p_port+hp_portctrl_0,(DMA_PORT | SCSI_PORT | SCSI_INBIT));
+ WR_HARPOON(p_port+hp_scsisig, S_DATAI_PH);
+
+ WR_HARPOON(p_port+hp_xfer_cmd,
+ (XFER_DMA_HOST | XFER_HOST_AUTO | XFER_DMA_8BIT));
+ }
+
+ else {
+
+ WR_HARPOON(p_port+hp_portctrl_0,(SCSI_PORT | DMA_PORT | DMA_RD));
+ WR_HARPOON(p_port+hp_scsisig, S_DATAO_PH);
+
+ WR_HARPOON(p_port+hp_xfer_cmd,
+ (XFER_HOST_DMA | XFER_HOST_AUTO | XFER_DMA_8BIT));
+
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMaster Timeout Handler
+ *
+ * Description: This function is called after a bus master command busy time
+ * out is detected. This routines issue halt state machine
+ * with a software time out for command busy. If command busy
+ * is still asserted at the end of the time out, it issues
+ * hard abort with another software time out. It hard abort
+ * command busy is also time out, it'll just give up.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+UCHAR busMstrTimeOut(USHORT p_port)
+#else
+UCHAR busMstrTimeOut(ULONG p_port)
+#endif
+{
+ ULONG timeout;
+
+ timeout = LONG_WAIT;
+
+ WR_HARPOON(p_port+hp_sys_ctrl, HALT_MACH);
+
+ while ((!(RD_HARPOON(p_port+hp_ext_status) & CMD_ABORTED)) && timeout--) {}
+
+
+
+ if (RD_HARPOON(p_port+hp_ext_status) & BM_CMD_BUSY) {
+ WR_HARPOON(p_port+hp_sys_ctrl, HARD_ABORT);
+
+ timeout = LONG_WAIT;
+ while ((RD_HARPOON(p_port+hp_ext_status) & BM_CMD_BUSY) && timeout--) {}
+ }
+
+ RD_HARPOON(p_port+hp_int_status); /*Clear command complete */
+
+ if (RD_HARPOON(p_port+hp_ext_status) & BM_CMD_BUSY) {
+ return(TRUE);
+ }
+
+ else {
+ return(FALSE);
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Host Data Transfer Abort
+ *
+ * Description: Abort any in progress transfer.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void hostDataXferAbort(USHORT port, UCHAR p_card, PSCCB pCurrSCCB)
+#else
+void hostDataXferAbort(ULONG port, UCHAR p_card, PSCCB pCurrSCCB)
+#endif
+{
+
+ ULONG timeout;
+ ULONG remain_cnt;
+ UINT sg_ptr;
+
+ BL_Card[p_card].globalFlags &= ~F_HOST_XFER_ACT;
+
+ if (pCurrSCCB->Sccb_XferState & F_AUTO_SENSE) {
+
+
+ if (!(RD_HARPOON(port+hp_int_status) & INT_CMD_COMPL)) {
+
+ WR_HARPOON(port+hp_bm_ctrl, (RD_HARPOON(port+hp_bm_ctrl) | FLUSH_XFER_CNTR));
+ timeout = LONG_WAIT;
+
+ while ((RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) && timeout--) {}
+
+ WR_HARPOON(port+hp_bm_ctrl, (RD_HARPOON(port+hp_bm_ctrl) & ~FLUSH_XFER_CNTR));
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ if (busMstrTimeOut(port)) {
+
+ if (pCurrSCCB->HostStatus == 0x00)
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+
+ }
+
+ if (RD_HARPOON(port+hp_int_status) & INT_EXT_STATUS)
+
+ if (RD_HARPOON(port+hp_ext_status) & BAD_EXT_STATUS)
+
+ if (pCurrSCCB->HostStatus == 0x00)
+
+ {
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+#if defined(BUGBUG)
+ WR_HARPOON(port+hp_dual_addr_lo,
+ RD_HARPOON(port+hp_ext_status));
+#endif
+ }
+ }
+ }
+ }
+
+ else if (pCurrSCCB->Sccb_XferCnt) {
+
+ if (pCurrSCCB->Sccb_XferState & F_SG_XFER) {
+
+
+ WR_HARPOON(port+hp_page_ctrl, (RD_HARPOON(port+hp_page_ctrl) &
+ ~SCATTER_EN));
+
+ WR_HARPOON(port+hp_sg_addr,0x00);
+
+ sg_ptr = pCurrSCCB->Sccb_sgseg + SG_BUF_CNT;
+
+ if (sg_ptr > (UINT)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE)) {
+
+ sg_ptr = (UINT)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE);
+ }
+
+ remain_cnt = pCurrSCCB->Sccb_XferCnt;
+
+ while (remain_cnt < 0x01000000L) {
+
+ sg_ptr--;
+
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ if (remain_cnt > (ULONG)(*(((ULONG far *)pCurrSCCB->
+ DataPointer) + (sg_ptr * 2)))) {
+
+ remain_cnt -= (ULONG)(*(((ULONG far *)pCurrSCCB->
+ DataPointer) + (sg_ptr * 2)));
+ }
+
+#else
+ if (remain_cnt > (ULONG)(*(((ULONG *)pCurrSCCB->
+ DataPointer) + (sg_ptr * 2)))) {
+
+ remain_cnt -= (ULONG)(*(((ULONG *)pCurrSCCB->
+ DataPointer) + (sg_ptr * 2)));
+ }
+#endif
+
+ else {
+
+ break;
+ }
+ }
+
+
+
+ if (remain_cnt < 0x01000000L) {
+
+
+ pCurrSCCB->Sccb_SGoffset = remain_cnt;
+
+ pCurrSCCB->Sccb_sgseg = (USHORT)sg_ptr;
+
+
+ if ((ULONG)(sg_ptr * SG_ELEMENT_SIZE) == pCurrSCCB->DataLength
+ && (remain_cnt == 0))
+
+ pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED;
+ }
+
+ else {
+
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_GROSS_FW_ERR;
+ }
+ }
+ }
+
+
+ if (!(pCurrSCCB->Sccb_XferState & F_HOST_XFER_DIR)) {
+
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ busMstrTimeOut(port);
+ }
+
+ else {
+
+ if (RD_HARPOON(port+hp_int_status) & INT_EXT_STATUS) {
+
+ if (RD_HARPOON(port+hp_ext_status) & BAD_EXT_STATUS) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+#if defined(BUGBUG)
+ WR_HARPOON(port+hp_dual_addr_lo,
+ RD_HARPOON(port+hp_ext_status));
+#endif
+ }
+ }
+ }
+
+ }
+ }
+
+ else {
+
+
+ if ((RD_HARPOON(port+hp_fifo_cnt)) >= BM_THRESHOLD) {
+
+ timeout = SHORT_WAIT;
+
+ while ((RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) &&
+ ((RD_HARPOON(port+hp_fifo_cnt)) >= BM_THRESHOLD) &&
+ timeout--) {}
+ }
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ WR_HARPOON(port+hp_bm_ctrl, (RD_HARPOON(port+hp_bm_ctrl) |
+ FLUSH_XFER_CNTR));
+
+ timeout = LONG_WAIT;
+
+ while ((RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) &&
+ timeout--) {}
+
+ WR_HARPOON(port+hp_bm_ctrl, (RD_HARPOON(port+hp_bm_ctrl) &
+ ~FLUSH_XFER_CNTR));
+
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+ }
+
+ busMstrTimeOut(port);
+ }
+ }
+
+ if (RD_HARPOON(port+hp_int_status) & INT_EXT_STATUS) {
+
+ if (RD_HARPOON(port+hp_ext_status) & BAD_EXT_STATUS) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+#if defined(BUGBUG)
+ WR_HARPOON(port+hp_dual_addr_lo,
+ RD_HARPOON(port+hp_ext_status));
+#endif
+ }
+ }
+ }
+ }
+
+ }
+
+ else {
+
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ timeout = LONG_WAIT;
+
+ while ((RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) && timeout--) {}
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+ }
+
+ busMstrTimeOut(port);
+ }
+ }
+
+
+ if (RD_HARPOON(port+hp_int_status) & INT_EXT_STATUS) {
+
+ if (RD_HARPOON(port+hp_ext_status) & BAD_EXT_STATUS) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+#if defined(BUGBUG)
+ WR_HARPOON(port+hp_dual_addr_lo,
+ RD_HARPOON(port+hp_ext_status));
+#endif
+ }
+ }
+
+ }
+
+ if (pCurrSCCB->Sccb_XferState & F_SG_XFER) {
+
+ WR_HARPOON(port+hp_page_ctrl, (RD_HARPOON(port+hp_page_ctrl) &
+ ~SCATTER_EN));
+
+ WR_HARPOON(port+hp_sg_addr,0x00);
+
+ pCurrSCCB->Sccb_sgseg += SG_BUF_CNT;
+
+ pCurrSCCB->Sccb_SGoffset = 0x00;
+
+
+ if ((ULONG)(pCurrSCCB->Sccb_sgseg * SG_ELEMENT_SIZE) >=
+ pCurrSCCB->DataLength) {
+
+ pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED;
+
+ pCurrSCCB->Sccb_sgseg = (USHORT)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE);
+
+ }
+ }
+
+ else {
+
+ if (!(pCurrSCCB->Sccb_XferState & F_AUTO_SENSE))
+
+ pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED;
+ }
+ }
+
+ WR_HARPOON(port+hp_int_mask,(INT_CMD_COMPL | SCSI_INTERRUPT));
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Host Data Transfer Restart
+ *
+ * Description: Reset the available count due to a restore data
+ * pointers message.
+ *
+ *---------------------------------------------------------------------*/
+void hostDataXferRestart(PSCCB currSCCB)
+{
+ ULONG data_count;
+ UINT sg_index;
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ ULONG far *sg_ptr;
+#else
+ ULONG *sg_ptr;
+#endif
+
+ if (currSCCB->Sccb_XferState & F_SG_XFER) {
+
+ currSCCB->Sccb_XferCnt = 0;
+
+ sg_index = 0xffff; /*Index by long words into sg list. */
+ data_count = 0; /*Running count of SG xfer counts. */
+
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ sg_ptr = (ULONG far *)currSCCB->DataPointer;
+#else
+ sg_ptr = (ULONG *)currSCCB->DataPointer;
+#endif
+
+ while (data_count < currSCCB->Sccb_ATC) {
+
+ sg_index++;
+ data_count += *(sg_ptr+(sg_index * 2));
+ }
+
+ if (data_count == currSCCB->Sccb_ATC) {
+
+ currSCCB->Sccb_SGoffset = 0;
+ sg_index++;
+ }
+
+ else {
+ currSCCB->Sccb_SGoffset = data_count - currSCCB->Sccb_ATC;
+ }
+
+ currSCCB->Sccb_sgseg = (USHORT)sg_index;
+ }
+
+ else {
+ currSCCB->Sccb_XferCnt = currSCCB->DataLength - currSCCB->Sccb_ATC;
+ }
+}
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: scam.c $
+ *
+ * Description: Functions relating to handling of the SCAM selection
+ * and the determination of the SCSI IDs to be assigned
+ * to all perspective SCSI targets.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <eeprom.h>*/
+/*#include <harpoon.h>*/
+
+
+
+/*
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBSCAM_INFO scamInfo[MAX_SCSI_TAR];
+extern NVRAMINFO nvRamInfo[MAX_MB_CARDS];
+#if defined(DOS) || defined(OS2)
+extern UCHAR temp_id_string[ID_STRING_LENGTH];
+#endif
+extern UCHAR scamHAString[];
+*/
+/*---------------------------------------------------------------------
+ *
+ * Function: scini
+ *
+ * Description: Setup all data structures necessary for SCAM selection.
+ *
+ *---------------------------------------------------------------------*/
+
+void scini(UCHAR p_card, UCHAR p_our_id, UCHAR p_power_up)
+{
+
+#if defined(SCAM_LEV_2)
+ UCHAR loser,assigned_id;
+#endif
+#if defined(DOS)
+
+ USHORT p_port;
+#else
+ ULONG p_port;
+#endif
+
+ UCHAR i,k,ScamFlg ;
+ PSCCBcard currCard;
+ PNVRamInfo pCurrNvRam;
+
+ currCard = &BL_Card[p_card];
+ p_port = currCard->ioPort;
+ pCurrNvRam = currCard->pNvRamInfo;
+
+
+ if(pCurrNvRam){
+ ScamFlg = pCurrNvRam->niScamConf;
+ i = pCurrNvRam->niSysConf;
+ }
+ else{
+ ScamFlg = (UCHAR) utilEERead(p_port, SCAM_CONFIG/2);
+ i = (UCHAR)(utilEERead(p_port, (SYSTEM_CONFIG/2)));
+ }
+ if(!(i & 0x02)) /* check if reset bus in AutoSCSI parameter set */
+ return;
+
+ inisci(p_card,p_port, p_our_id);
+
+ /* Force to wait 1 sec after SCSI bus reset. Some SCAM device FW
+ too slow to return to SCAM selection */
+
+ /* if (p_power_up)
+ Wait1Second(p_port);
+ else
+ Wait(p_port, TO_250ms); */
+
+ Wait1Second(p_port);
+
+#if defined(SCAM_LEV_2)
+
+ if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2))
+ {
+ while (!(scarb(p_port,INIT_SELTD))) {}
+
+ scsel(p_port);
+
+ do {
+ scxferc(p_port,SYNC_PTRN);
+ scxferc(p_port,DOM_MSTR);
+ loser = scsendi(p_port,&scamInfo[p_our_id].id_string[0]);
+ } while ( loser == 0xFF );
+
+ scbusf(p_port);
+
+ if ((p_power_up) && (!loser))
+ {
+ sresb(p_port,p_card);
+ Wait(p_port, TO_250ms);
+
+ while (!(scarb(p_port,INIT_SELTD))) {}
+
+ scsel(p_port);
+
+ do {
+ scxferc(p_port, SYNC_PTRN);
+ scxferc(p_port, DOM_MSTR);
+ loser = scsendi(p_port,&scamInfo[p_our_id].
+ id_string[0]);
+ } while ( loser == 0xFF );
+
+ scbusf(p_port);
+ }
+ }
+
+ else
+ {
+ loser = FALSE;
+ }
+
+
+ if (!loser)
+ {
+
+#endif /* SCAM_LEV_2 */
+
+ scamInfo[p_our_id].state = ID_ASSIGNED;
+
+
+ if (ScamFlg & SCAM_ENABLED)
+ {
+
+ for (i=0; i < MAX_SCSI_TAR; i++)
+ {
+ if ((scamInfo[i].state == ID_UNASSIGNED) ||
+ (scamInfo[i].state == ID_UNUSED))
+ {
+ if (scsell(p_port,i))
+ {
+ scamInfo[i].state = LEGACY;
+ if ((scamInfo[i].id_string[0] != 0xFF) ||
+ (scamInfo[i].id_string[1] != 0xFA))
+ {
+
+ scamInfo[i].id_string[0] = 0xFF;
+ scamInfo[i].id_string[1] = 0xFA;
+ if(pCurrNvRam == NULL)
+ currCard->globalFlags |= F_UPDATE_EEPROM;
+ }
+ }
+ }
+ }
+
+ sresb(p_port,p_card);
+ Wait1Second(p_port);
+ while (!(scarb(p_port,INIT_SELTD))) {}
+ scsel(p_port);
+ scasid(p_card, p_port);
+ }
+
+#if defined(SCAM_LEV_2)
+
+ }
+
+ else if ((loser) && (ScamFlg & SCAM_ENABLED))
+ {
+ scamInfo[p_our_id].id_string[0] = SLV_TYPE_CODE0;
+ assigned_id = FALSE;
+ scwtsel(p_port);
+
+ do {
+ while (scxferc(p_port,0x00) != SYNC_PTRN) {}
+
+ i = scxferc(p_port,0x00);
+ if (i == ASSIGN_ID)
+ {
+ if (!(scsendi(p_port,&scamInfo[p_our_id].id_string[0])))
+ {
+ i = scxferc(p_port,0x00);
+ if (scvalq(i))
+ {
+ k = scxferc(p_port,0x00);
+
+ if (scvalq(k))
+ {
+ currCard->ourId =
+ ((UCHAR)(i<<3)+(k & (UCHAR)7)) & (UCHAR) 0x3F;
+ inisci(p_card, p_port, p_our_id);
+ scamInfo[currCard->ourId].state = ID_ASSIGNED;
+ scamInfo[currCard->ourId].id_string[0]
+ = SLV_TYPE_CODE0;
+ assigned_id = TRUE;
+ }
+ }
+ }
+ }
+
+ else if (i == SET_P_FLAG)
+ {
+ if (!(scsendi(p_port,
+ &scamInfo[p_our_id].id_string[0])))
+ scamInfo[p_our_id].id_string[0] |= 0x80;
+ }
+ }while (!assigned_id);
+
+ while (scxferc(p_port,0x00) != CFG_CMPLT) {}
+ }
+
+#endif /* SCAM_LEV_2 */
+ if (ScamFlg & SCAM_ENABLED)
+ {
+ scbusf(p_port);
+ if (currCard->globalFlags & F_UPDATE_EEPROM)
+ {
+ scsavdi(p_card, p_port);
+ currCard->globalFlags &= ~F_UPDATE_EEPROM;
+ }
+ }
+
+
+#if defined(DOS)
+ for (i=0; i < MAX_SCSI_TAR; i++)
+ {
+ if (((ScamFlg & SCAM_ENABLED) && (scamInfo[i].state == LEGACY))
+ || (i != p_our_id))
+ {
+ scsellDOS(p_port,i);
+ }
+ }
+#endif
+
+/*
+ for (i=0,k=0; i < MAX_SCSI_TAR; i++)
+ {
+ if ((scamInfo[i].state == ID_ASSIGNED) ||
+ (scamInfo[i].state == LEGACY))
+ k++;
+ }
+
+ if (k==2)
+ currCard->globalFlags |= F_SINGLE_DEVICE;
+ else
+ currCard->globalFlags &= ~F_SINGLE_DEVICE;
+*/
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scarb
+ *
+ * Description: Gain control of the bus and wait SCAM select time (250ms)
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+int scarb(USHORT p_port, UCHAR p_sel_type)
+#else
+int scarb(ULONG p_port, UCHAR p_sel_type)
+#endif
+{
+ if (p_sel_type == INIT_SELTD)
+ {
+
+ while (RD_HARPOON(p_port+hp_scsisig) & (SCSI_SEL | SCSI_BSY)) {}
+
+
+ if (RD_HARPOON(p_port+hp_scsisig) & SCSI_SEL)
+ return(FALSE);
+
+ if (RD_HARPOON(p_port+hp_scsidata_0) != 00)
+ return(FALSE);
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig) | SCSI_BSY));
+
+ if (RD_HARPOON(p_port+hp_scsisig) & SCSI_SEL) {
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig) &
+ ~SCSI_BSY));
+ return(FALSE);
+ }
+
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig) | SCSI_SEL));
+
+ if (RD_HARPOON(p_port+hp_scsidata_0) != 00) {
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig) &
+ ~(SCSI_BSY | SCSI_SEL)));
+ return(FALSE);
+ }
+ }
+
+
+ WR_HARPOON(p_port+hp_clkctrl_0, (RD_HARPOON(p_port+hp_clkctrl_0)
+ & ~ACTdeassert));
+ WR_HARPOON(p_port+hp_scsireset, SCAM_EN);
+ WR_HARPOON(p_port+hp_scsidata_0, 0x00);
+#if defined(WIDE_SCSI)
+ WR_HARPOON(p_port+hp_scsidata_1, 0x00);
+#endif
+ WR_HARPOON(p_port+hp_portctrl_0, SCSI_BUS_EN);
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig) | SCSI_MSG));
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig)
+ & ~SCSI_BSY));
+
+ Wait(p_port,TO_250ms);
+
+ return(TRUE);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scbusf
+ *
+ * Description: Release the SCSI bus and disable SCAM selection.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scbusf(USHORT p_port)
+#else
+void scbusf(ULONG p_port)
+#endif
+{
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE));
+
+
+ WR_HARPOON(p_port+hp_scsidata_0, 0x00);
+
+ WR_HARPOON(p_port+hp_portctrl_0, (RD_HARPOON(p_port+hp_portctrl_0)
+ & ~SCSI_BUS_EN));
+
+ WR_HARPOON(p_port+hp_scsisig, 0x00);
+
+
+ WR_HARPOON(p_port+hp_scsireset, (RD_HARPOON(p_port+hp_scsireset)
+ & ~SCAM_EN));
+
+ WR_HARPOON(p_port+hp_clkctrl_0, (RD_HARPOON(p_port+hp_clkctrl_0)
+ | ACTdeassert));
+
+#if defined(SCAM_LEV_2)
+ WRW_HARPOON((p_port+hp_intstat), (BUS_FREE | AUTO_INT | SCAM_SEL));
+#else
+ WRW_HARPOON((p_port+hp_intstat), (BUS_FREE | AUTO_INT));
+#endif
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE));
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scasid
+ *
+ * Description: Assign an ID to all the SCAM devices.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scasid(UCHAR p_card, USHORT p_port)
+#else
+void scasid(UCHAR p_card, ULONG p_port)
+#endif
+{
+#if defined(DOS) || defined(OS2)
+ /* Use external defined in global space area, instead of Stack
+ space. WIN/95 DOS doesnot work TINY mode. The OS doesnot intialize
+ SS equal to DS. Thus the array allocated on stack doesnot get
+ access correctly */
+#else
+ UCHAR temp_id_string[ID_STRING_LENGTH];
+#endif
+
+ UCHAR i,k,scam_id;
+ UCHAR crcBytes[3];
+ PNVRamInfo pCurrNvRam;
+ ushort_ptr pCrcBytes;
+
+ pCurrNvRam = BL_Card[p_card].pNvRamInfo;
+
+ i=FALSE;
+
+ while (!i)
+ {
+
+ for (k=0; k < ID_STRING_LENGTH; k++)
+ {
+ temp_id_string[k] = (UCHAR) 0x00;
+ }
+
+ scxferc(p_port,SYNC_PTRN);
+ scxferc(p_port,ASSIGN_ID);
+
+ if (!(sciso(p_port,&temp_id_string[0])))
+ {
+ if(pCurrNvRam){
+ pCrcBytes = (ushort_ptr)&crcBytes[0];
+ *pCrcBytes = CalcCrc16(&temp_id_string[0]);
+ crcBytes[2] = CalcLrc(&temp_id_string[0]);
+ temp_id_string[1] = crcBytes[2];
+ temp_id_string[2] = crcBytes[0];
+ temp_id_string[3] = crcBytes[1];
+ for(k = 4; k < ID_STRING_LENGTH; k++)
+ temp_id_string[k] = (UCHAR) 0x00;
+ }
+ i = scmachid(p_card,temp_id_string);
+
+ if (i == CLR_PRIORITY)
+ {
+ scxferc(p_port,MISC_CODE);
+ scxferc(p_port,CLR_P_FLAG);
+ i = FALSE; /*Not the last ID yet. */
+ }
+
+ else if (i != NO_ID_AVAIL)
+ {
+ if (i < 8 )
+ scxferc(p_port,ID_0_7);
+ else
+ scxferc(p_port,ID_8_F);
+
+ scam_id = (i & (UCHAR) 0x07);
+
+
+ for (k=1; k < 0x08; k <<= 1)
+ if (!( k & i ))
+ scam_id += 0x08; /*Count number of zeros in DB0-3. */
+
+ scxferc(p_port,scam_id);
+
+ i = FALSE; /*Not the last ID yet. */
+ }
+ }
+
+ else
+ {
+ i = TRUE;
+ }
+
+ } /*End while */
+
+ scxferc(p_port,SYNC_PTRN);
+ scxferc(p_port,CFG_CMPLT);
+}
+
+
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scsel
+ *
+ * Description: Select all the SCAM devices.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scsel(USHORT p_port)
+#else
+void scsel(ULONG p_port)
+#endif
+{
+
+ WR_HARPOON(p_port+hp_scsisig, SCSI_SEL);
+ scwiros(p_port, SCSI_MSG);
+
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_SEL | SCSI_BSY));
+
+
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD));
+ WR_HARPOON(p_port+hp_scsidata_0, (UCHAR)(RD_HARPOON(p_port+hp_scsidata_0) |
+ (UCHAR)(BIT(7)+BIT(6))));
+
+
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_BSY | SCSI_IOBIT | SCSI_CD));
+ scwiros(p_port, SCSI_SEL);
+
+ WR_HARPOON(p_port+hp_scsidata_0, (UCHAR)(RD_HARPOON(p_port+hp_scsidata_0) &
+ ~(UCHAR)BIT(6)));
+ scwirod(p_port, BIT(6));
+
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD));
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scxferc
+ *
+ * Description: Handshake the p_data (DB4-0) across the bus.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR scxferc(USHORT p_port, UCHAR p_data)
+#else
+UCHAR scxferc(ULONG p_port, UCHAR p_data)
+#endif
+{
+ UCHAR curr_data, ret_data;
+
+ curr_data = p_data | BIT(7) | BIT(5); /*Start with DB7 & DB5 asserted. */
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ curr_data &= ~BIT(7);
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ scwirod(p_port,BIT(7)); /*Wait for DB7 to be released. */
+ while (!(RD_HARPOON(p_port+hp_scsidata_0) & BIT(5)));
+
+ ret_data = (RD_HARPOON(p_port+hp_scsidata_0) & (UCHAR) 0x1F);
+
+ curr_data |= BIT(6);
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ curr_data &= ~BIT(5);
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ scwirod(p_port,BIT(5)); /*Wait for DB5 to be released. */
+
+ curr_data &= ~(BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0)); /*Release data bits */
+ curr_data |= BIT(7);
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ curr_data &= ~BIT(6);
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ scwirod(p_port,BIT(6)); /*Wait for DB6 to be released. */
+
+ return(ret_data);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scsendi
+ *
+ * Description: Transfer our Identification string to determine if we
+ * will be the dominant master.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR scsendi(USHORT p_port, UCHAR p_id_string[])
+#else
+UCHAR scsendi(ULONG p_port, UCHAR p_id_string[])
+#endif
+{
+ UCHAR ret_data,byte_cnt,bit_cnt,defer;
+
+ defer = FALSE;
+
+ for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) {
+
+ for (bit_cnt = 0x80; bit_cnt != 0 ; bit_cnt >>= 1) {
+
+ if (defer)
+ ret_data = scxferc(p_port,00);
+
+ else if (p_id_string[byte_cnt] & bit_cnt)
+
+ ret_data = scxferc(p_port,02);
+
+ else {
+
+ ret_data = scxferc(p_port,01);
+ if (ret_data & 02)
+ defer = TRUE;
+ }
+
+ if ((ret_data & 0x1C) == 0x10)
+ return(0x00); /*End of isolation stage, we won! */
+
+ if (ret_data & 0x1C)
+ return(0xFF);
+
+ if ((defer) && (!(ret_data & 0x1F)))
+ return(0x01); /*End of isolation stage, we lost. */
+
+ } /*bit loop */
+
+ } /*byte loop */
+
+ if (defer)
+ return(0x01); /*We lost */
+ else
+ return(0); /*We WON! Yeeessss! */
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sciso
+ *
+ * Description: Transfer the Identification string.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR sciso(USHORT p_port, UCHAR p_id_string[])
+#else
+UCHAR sciso(ULONG p_port, UCHAR p_id_string[])
+#endif
+{
+ UCHAR ret_data,the_data,byte_cnt,bit_cnt;
+
+ the_data = 0;
+
+ for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) {
+
+ for (bit_cnt = 0; bit_cnt < 8; bit_cnt++) {
+
+ ret_data = scxferc(p_port,0);
+
+ if (ret_data & 0xFC)
+ return(0xFF);
+
+ else {
+
+ the_data <<= 1;
+ if (ret_data & BIT(1)) {
+ the_data |= 1;
+ }
+ }
+
+ if ((ret_data & 0x1F) == 0)
+ {
+/*
+ if(bit_cnt != 0 || bit_cnt != 8)
+ {
+ byte_cnt = 0;
+ bit_cnt = 0;
+ scxferc(p_port, SYNC_PTRN);
+ scxferc(p_port, ASSIGN_ID);
+ continue;
+ }
+*/
+ if (byte_cnt)
+ return(0x00);
+ else
+ return(0xFF);
+ }
+
+ } /*bit loop */
+
+ p_id_string[byte_cnt] = the_data;
+
+ } /*byte loop */
+
+ return(0);
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scwirod
+ *
+ * Description: Sample the SCSI data bus making sure the signal has been
+ * deasserted for the correct number of consecutive samples.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scwirod(USHORT p_port, UCHAR p_data_bit)
+#else
+void scwirod(ULONG p_port, UCHAR p_data_bit)
+#endif
+{
+ UCHAR i;
+
+ i = 0;
+ while ( i < MAX_SCSI_TAR ) {
+
+ if (RD_HARPOON(p_port+hp_scsidata_0) & p_data_bit)
+
+ i = 0;
+
+ else
+
+ i++;
+
+ }
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scwiros
+ *
+ * Description: Sample the SCSI Signal lines making sure the signal has been
+ * deasserted for the correct number of consecutive samples.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scwiros(USHORT p_port, UCHAR p_data_bit)
+#else
+void scwiros(ULONG p_port, UCHAR p_data_bit)
+#endif
+{
+ UCHAR i;
+
+ i = 0;
+ while ( i < MAX_SCSI_TAR ) {
+
+ if (RD_HARPOON(p_port+hp_scsisig) & p_data_bit)
+
+ i = 0;
+
+ else
+
+ i++;
+
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scvalq
+ *
+ * Description: Make sure we received a valid data byte.
+ *
+ *---------------------------------------------------------------------*/
+
+UCHAR scvalq(UCHAR p_quintet)
+{
+ UCHAR count;
+
+ for (count=1; count < 0x08; count<<=1) {
+ if (!(p_quintet & count))
+ p_quintet -= 0x80;
+ }
+
+ if (p_quintet & 0x18)
+ return(FALSE);
+
+ else
+ return(TRUE);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scsell
+ *
+ * Description: Select the specified device ID using a selection timeout
+ * less than 4ms. If somebody responds then it is a legacy
+ * drive and this ID must be marked as such.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR scsell(USHORT p_port, UCHAR targ_id)
+#else
+UCHAR scsell(ULONG p_port, UCHAR targ_id)
+#endif
+{
+#if defined(DOS)
+ USHORT i;
+#else
+ ULONG i;
+#endif
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE));
+
+ ARAM_ACCESS(p_port);
+
+ WR_HARPOON(p_port+hp_addstat,(RD_HARPOON(p_port+hp_addstat) | SCAM_TIMER));
+ WR_HARPOON(p_port+hp_seltimeout,TO_4ms);
+
+
+ for (i = p_port+CMD_STRT; i < p_port+CMD_STRT+12; i+=2) {
+ WRW_HARPOON(i, (MPM_OP+ACOMMAND));
+ }
+ WRW_HARPOON(i, (BRH_OP+ALWAYS+ NP));
+
+ WRW_HARPOON((p_port+hp_intstat),
+ (RESET | TIMEOUT | SEL | BUS_FREE | AUTO_INT));
+
+ WR_HARPOON(p_port+hp_select_id, targ_id);
+
+ WR_HARPOON(p_port+hp_portctrl_0, SCSI_PORT);
+ WR_HARPOON(p_port+hp_autostart_3, (SELECT | CMD_ONLY_STRT));
+ WR_HARPOON(p_port+hp_scsictrl_0, (SEL_TAR | ENA_RESEL));
+
+
+ while (!(RDW_HARPOON((p_port+hp_intstat)) &
+ (RESET | PROG_HLT | TIMEOUT | AUTO_INT))) {}
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & RESET)
+ Wait(p_port, TO_250ms);
+
+ DISABLE_AUTO(p_port);
+
+ WR_HARPOON(p_port+hp_addstat,(RD_HARPOON(p_port+hp_addstat) & ~SCAM_TIMER));
+ WR_HARPOON(p_port+hp_seltimeout,TO_290ms);
+
+ SGRAM_ACCESS(p_port);
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & (RESET | TIMEOUT) ) {
+
+ WRW_HARPOON((p_port+hp_intstat),
+ (RESET | TIMEOUT | SEL | BUS_FREE | PHASE));
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE));
+
+ return(FALSE); /*No legacy device */
+ }
+
+ else {
+
+ while(!(RDW_HARPOON((p_port+hp_intstat)) & BUS_FREE)) {
+ if (RD_HARPOON(p_port+hp_scsisig) & SCSI_REQ)
+ {
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+ ACCEPT_MSG(p_port);
+ }
+ }
+
+ WRW_HARPOON((p_port+hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE));
+
+ return(TRUE); /*Found one of them oldies! */
+ }
+}
+
+#if defined(DOS)
+/*---------------------------------------------------------------------
+ *
+ * Function: scsell for DOS
+ *
+ * Description: Select the specified device ID using a selection timeout
+ * less than 2ms. This was specially required to solve
+ * the problem with Plextor 12X CD-ROM drive. This drive
+ * was responding the Selection at the end of 4ms and
+ * hanging the system.
+ *
+ *---------------------------------------------------------------------*/
+
+UCHAR scsellDOS(USHORT p_port, UCHAR targ_id)
+{
+ USHORT i;
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE));
+
+ ARAM_ACCESS(p_port);
+
+ WR_HARPOON(p_port+hp_addstat,(RD_HARPOON(p_port+hp_addstat) | SCAM_TIMER));
+ WR_HARPOON(p_port+hp_seltimeout,TO_2ms);
+
+
+ for (i = p_port+CMD_STRT; i < p_port+CMD_STRT+12; i+=2) {
+ WRW_HARPOON(i, (MPM_OP+ACOMMAND));
+ }
+ WRW_HARPOON(i, (BRH_OP+ALWAYS+ NP));
+
+ WRW_HARPOON((p_port+hp_intstat),
+ (RESET | TIMEOUT | SEL | BUS_FREE | AUTO_INT));
+
+ WR_HARPOON(p_port+hp_select_id, targ_id);
+
+ WR_HARPOON(p_port+hp_portctrl_0, SCSI_PORT);
+ WR_HARPOON(p_port+hp_autostart_3, (SELECT | CMD_ONLY_STRT));
+ WR_HARPOON(p_port+hp_scsictrl_0, (SEL_TAR | ENA_RESEL));
+
+
+ while (!(RDW_HARPOON((p_port+hp_intstat)) &
+ (RESET | PROG_HLT | TIMEOUT | AUTO_INT))) {}
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & RESET)
+ Wait(p_port, TO_250ms);
+
+ DISABLE_AUTO(p_port);
+
+ WR_HARPOON(p_port+hp_addstat,(RD_HARPOON(p_port+hp_addstat) & ~SCAM_TIMER));
+ WR_HARPOON(p_port+hp_seltimeout,TO_290ms);
+
+ SGRAM_ACCESS(p_port);
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & (RESET | TIMEOUT) ) {
+
+ WRW_HARPOON((p_port+hp_intstat),
+ (RESET | TIMEOUT | SEL | BUS_FREE | PHASE));
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE));
+
+ return(FALSE); /*No legacy device */
+ }
+
+ else {
+
+ while(!(RDW_HARPOON((p_port+hp_intstat)) & BUS_FREE)) {
+ if (RD_HARPOON(p_port+hp_scsisig) & SCSI_REQ)
+ {
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+ ACCEPT_MSG(p_port);
+ }
+ }
+
+ WRW_HARPOON((p_port+hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE));
+
+ return(TRUE); /*Found one of them oldies! */
+ }
+}
+#endif /* DOS */
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scwtsel
+ *
+ * Description: Wait to be selected by another SCAM initiator.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scwtsel(USHORT p_port)
+#else
+void scwtsel(ULONG p_port)
+#endif
+{
+ while(!(RDW_HARPOON((p_port+hp_intstat)) & SCAM_SEL)) {}
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: inisci
+ *
+ * Description: Setup the data Structure with the info from the EEPROM.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void inisci(UCHAR p_card, USHORT p_port, UCHAR p_our_id)
+#else
+void inisci(UCHAR p_card, ULONG p_port, UCHAR p_our_id)
+#endif
+{
+ UCHAR i,k,max_id;
+ USHORT ee_data;
+ PNVRamInfo pCurrNvRam;
+
+ pCurrNvRam = BL_Card[p_card].pNvRamInfo;
+
+ if (RD_HARPOON(p_port+hp_page_ctrl) & NARROW_SCSI_CARD)
+ max_id = 0x08;
+
+ else
+ max_id = 0x10;
+
+ if(pCurrNvRam){
+ for(i = 0; i < max_id; i++){
+
+ for(k = 0; k < 4; k++)
+ scamInfo[i].id_string[k] = pCurrNvRam->niScamTbl[i][k];
+ for(k = 4; k < ID_STRING_LENGTH; k++)
+ scamInfo[i].id_string[k] = (UCHAR) 0x00;
+
+ if(scamInfo[i].id_string[0] == 0x00)
+ scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */
+ else
+ scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */
+
+ }
+ }else {
+ for (i=0; i < max_id; i++)
+ {
+ for (k=0; k < ID_STRING_LENGTH; k+=2)
+ {
+ ee_data = utilEERead(p_port, (USHORT)((EE_SCAMBASE/2) +
+ (USHORT) (i*((USHORT)ID_STRING_LENGTH/2)) + (USHORT)(k/2)));
+ scamInfo[i].id_string[k] = (UCHAR) ee_data;
+ ee_data >>= 8;
+ scamInfo[i].id_string[k+1] = (UCHAR) ee_data;
+ }
+
+ if ((scamInfo[i].id_string[0] == 0x00) ||
+ (scamInfo[i].id_string[0] == 0xFF))
+
+ scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */
+
+ else
+ scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */
+
+ }
+ }
+ for(k = 0; k < ID_STRING_LENGTH; k++)
+ scamInfo[p_our_id].id_string[k] = scamHAString[k];
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scmachid
+ *
+ * Description: Match the Device ID string with our values stored in
+ * the EEPROM.
+ *
+ *---------------------------------------------------------------------*/
+
+UCHAR scmachid(UCHAR p_card, UCHAR p_id_string[])
+{
+
+ UCHAR i,k,match;
+
+
+ for (i=0; i < MAX_SCSI_TAR; i++) {
+
+#if !defined(SCAM_LEV_2)
+ if (scamInfo[i].state == ID_UNASSIGNED)
+ {
+#endif
+ match = TRUE;
+
+ for (k=0; k < ID_STRING_LENGTH; k++)
+ {
+ if (p_id_string[k] != scamInfo[i].id_string[k])
+ match = FALSE;
+ }
+
+ if (match)
+ {
+ scamInfo[i].state = ID_ASSIGNED;
+ return(i);
+ }
+
+#if !defined(SCAM_LEV_2)
+ }
+#endif
+
+ }
+
+
+
+ if (p_id_string[0] & BIT(5))
+ i = 8;
+ else
+ i = MAX_SCSI_TAR;
+
+ if (((p_id_string[0] & 0x06) == 0x02) || ((p_id_string[0] & 0x06) == 0x04))
+ match = p_id_string[1] & (UCHAR) 0x1F;
+ else
+ match = 7;
+
+ while (i > 0)
+ {
+ i--;
+
+ if (scamInfo[match].state == ID_UNUSED)
+ {
+ for (k=0; k < ID_STRING_LENGTH; k++)
+ {
+ scamInfo[match].id_string[k] = p_id_string[k];
+ }
+
+ scamInfo[match].state = ID_ASSIGNED;
+
+ if(BL_Card[p_card].pNvRamInfo == NULL)
+ BL_Card[p_card].globalFlags |= F_UPDATE_EEPROM;
+ return(match);
+
+ }
+
+
+ match--;
+
+ if (match == 0xFF)
+ {
+ if (p_id_string[0] & BIT(5))
+ match = 7;
+ else
+ match = MAX_SCSI_TAR-1;
+ }
+ }
+
+
+
+ if (p_id_string[0] & BIT(7))
+ {
+ return(CLR_PRIORITY);
+ }
+
+
+ if (p_id_string[0] & BIT(5))
+ i = 8;
+ else
+ i = MAX_SCSI_TAR;
+
+ if (((p_id_string[0] & 0x06) == 0x02) || ((p_id_string[0] & 0x06) == 0x04))
+ match = p_id_string[1] & (UCHAR) 0x1F;
+ else
+ match = 7;
+
+ while (i > 0)
+ {
+
+ i--;
+
+ if (scamInfo[match].state == ID_UNASSIGNED)
+ {
+ for (k=0; k < ID_STRING_LENGTH; k++)
+ {
+ scamInfo[match].id_string[k] = p_id_string[k];
+ }
+
+ scamInfo[match].id_string[0] |= BIT(7);
+ scamInfo[match].state = ID_ASSIGNED;
+ if(BL_Card[p_card].pNvRamInfo == NULL)
+ BL_Card[p_card].globalFlags |= F_UPDATE_EEPROM;
+ return(match);
+
+ }
+
+
+ match--;
+
+ if (match == 0xFF)
+ {
+ if (p_id_string[0] & BIT(5))
+ match = 7;
+ else
+ match = MAX_SCSI_TAR-1;
+ }
+ }
+
+ return(NO_ID_AVAIL);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scsavdi
+ *
+ * Description: Save off the device SCAM ID strings.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scsavdi(UCHAR p_card, USHORT p_port)
+#else
+void scsavdi(UCHAR p_card, ULONG p_port)
+#endif
+{
+ UCHAR i,k,max_id;
+ USHORT ee_data,sum_data;
+
+
+ sum_data = 0x0000;
+
+ for (i = 1; i < EE_SCAMBASE/2; i++)
+ {
+ sum_data += utilEERead(p_port, i);
+ }
+
+
+ utilEEWriteOnOff(p_port,1); /* Enable write access to the EEPROM */
+
+ if (RD_HARPOON(p_port+hp_page_ctrl) & NARROW_SCSI_CARD)
+ max_id = 0x08;
+
+ else
+ max_id = 0x10;
+
+ for (i=0; i < max_id; i++)
+ {
+
+ for (k=0; k < ID_STRING_LENGTH; k+=2)
+ {
+ ee_data = scamInfo[i].id_string[k+1];
+ ee_data <<= 8;
+ ee_data |= scamInfo[i].id_string[k];
+ sum_data += ee_data;
+ utilEEWrite(p_port, ee_data, (USHORT)((EE_SCAMBASE/2) +
+ (USHORT)(i*((USHORT)ID_STRING_LENGTH/2)) + (USHORT)(k/2)));
+ }
+ }
+
+
+ utilEEWrite(p_port, sum_data, EEPROM_CHECK_SUM/2);
+ utilEEWriteOnOff(p_port,0); /* Turn off write access */
+}
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: diagnose.c $
+ *
+ * Description: Diagnostic funtions for testing the integrity of
+ * the HARPOON.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <eeprom.h>*/
+/*#include <harpoon.h>*/
+
+/*---------------------------------------------------------------------
+ *
+ * Function: XbowInit
+ *
+ * Description: Setup the Xbow for normal operation.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void XbowInit(USHORT port, UCHAR ScamFlg)
+#else
+void XbowInit(ULONG port, UCHAR ScamFlg)
+#endif
+{
+UCHAR i;
+
+ i = RD_HARPOON(port+hp_page_ctrl);
+ WR_HARPOON(port+hp_page_ctrl, (UCHAR) (i | G_INT_DISABLE));
+
+ WR_HARPOON(port+hp_scsireset,0x00);
+ WR_HARPOON(port+hp_portctrl_1,HOST_MODE8);
+
+ WR_HARPOON(port+hp_scsireset,(DMA_RESET | HPSCSI_RESET | PROG_RESET | \
+ FIFO_CLR));
+
+ WR_HARPOON(port+hp_scsireset,SCSI_INI);
+
+ WR_HARPOON(port+hp_clkctrl_0,CLKCTRL_DEFAULT);
+
+ WR_HARPOON(port+hp_scsisig,0x00); /* Clear any signals we might */
+ WR_HARPOON(port+hp_scsictrl_0,ENA_SCAM_SEL);
+
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT);
+
+#if defined(SCAM_LEV_2)
+ default_intena = RESET | RSEL | PROG_HLT | TIMEOUT |
+ BUS_FREE | XFER_CNT_0 | AUTO_INT;
+
+ if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2))
+ default_intena |= SCAM_SEL;
+
+#else
+ default_intena = RESET | RSEL | PROG_HLT | TIMEOUT |
+ BUS_FREE | XFER_CNT_0 | AUTO_INT;
+#endif
+ WRW_HARPOON((port+hp_intena), default_intena);
+
+ WR_HARPOON(port+hp_seltimeout,TO_290ms);
+
+ /* Turn on SCSI_MODE8 for narrow cards to fix the
+ strapping issue with the DUAL CHANNEL card */
+ if (RD_HARPOON(port+hp_page_ctrl) & NARROW_SCSI_CARD)
+ WR_HARPOON(port+hp_addstat,SCSI_MODE8);
+
+#if defined(NO_BIOS_OPTION)
+
+ WR_HARPOON(port+hp_synctarg_0,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_1,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_2,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_3,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_4,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_5,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_6,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_7,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_8,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_9,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_10,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_11,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_12,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_13,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_14,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_15,NARROW_SCSI);
+
+#endif
+ WR_HARPOON(port+hp_page_ctrl, i);
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMasterInit
+ *
+ * Description: Initialize the BusMaster for normal operations.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void BusMasterInit(USHORT p_port)
+#else
+void BusMasterInit(ULONG p_port)
+#endif
+{
+
+
+ WR_HARPOON(p_port+hp_sys_ctrl, DRVR_RST);
+ WR_HARPOON(p_port+hp_sys_ctrl, 0x00);
+
+ WR_HARPOON(p_port+hp_host_blk_cnt, XFER_BLK64);
+
+
+ WR_HARPOON(p_port+hp_bm_ctrl, (BMCTRL_DEFAULT));
+
+ WR_HARPOON(p_port+hp_ee_ctrl, (SCSI_TERM_ENA_H));
+
+
+#if defined(NT)
+
+ WR_HARPOON(p_port+hp_pci_cmd_cfg, (RD_HARPOON(p_port+hp_pci_cmd_cfg)
+ & ~MEM_SPACE_ENA));
+
+#endif
+
+ RD_HARPOON(p_port+hp_int_status); /*Clear interrupts. */
+ WR_HARPOON(p_port+hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT));
+ WR_HARPOON(p_port+hp_page_ctrl, (RD_HARPOON(p_port+hp_page_ctrl) &
+ ~SCATTER_EN));
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: DiagXbow
+ *
+ * Description: Test Xbow integrity. Non-zero return indicates an error.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+int DiagXbow(USHORT port)
+#else
+int DiagXbow(ULONG port)
+#endif
+{
+ unsigned char fifo_cnt,loop_cnt;
+
+ unsigned char fifodata[5];
+ fifodata[0] = 0x00;
+ fifodata[1] = 0xFF;
+ fifodata[2] = 0x55;
+ fifodata[3] = 0xAA;
+ fifodata[4] = 0x00;
+
+
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT);
+ WRW_HARPOON((port+hp_intena), 0x0000);
+
+ WR_HARPOON(port+hp_seltimeout,TO_5ms);
+
+ WR_HARPOON(port+hp_portctrl_0,START_TO);
+
+
+ for(fifodata[4] = 0x01; fifodata[4] != (UCHAR) 0; fifodata[4] = fifodata[4] << 1) {
+
+ WR_HARPOON(port+hp_selfid_0,fifodata[4]);
+ WR_HARPOON(port+hp_selfid_1,fifodata[4]);
+
+ if ((RD_HARPOON(port+hp_selfid_0) != fifodata[4]) ||
+ (RD_HARPOON(port+hp_selfid_1) != fifodata[4]))
+ return(1);
+ }
+
+
+ for(loop_cnt = 0; loop_cnt < 4; loop_cnt++) {
+
+ WR_HARPOON(port+hp_portctrl_0,(HOST_PORT | HOST_WRT | START_TO));
+
+
+ for (fifo_cnt = 0; fifo_cnt < FIFO_LEN; fifo_cnt++) {
+
+ WR_HARPOON(port+hp_fifodata_0, fifodata[loop_cnt]);
+ }
+
+
+ if (!(RD_HARPOON(port+hp_xferstat) & FIFO_FULL))
+ return(1);
+
+
+ WR_HARPOON(port+hp_portctrl_0,(HOST_PORT | START_TO));
+
+ for (fifo_cnt = 0; fifo_cnt < FIFO_LEN; fifo_cnt++) {
+
+ if (RD_HARPOON(port+hp_fifodata_0) != fifodata[loop_cnt])
+ return(1);
+ }
+
+
+ if (!(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY))
+ return(1);
+ }
+
+
+ while(!(RDW_HARPOON((port+hp_intstat)) & TIMEOUT)) {}
+
+
+ WR_HARPOON(port+hp_seltimeout,TO_290ms);
+
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT);
+
+ WRW_HARPOON((port+hp_intena), default_intena);
+
+ return(0);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: DiagBusMaster
+ *
+ * Description: Test BusMaster integrity. Non-zero return indicates an
+ * error.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+int DiagBusMaster(USHORT port)
+#else
+int DiagBusMaster(ULONG port)
+#endif
+{
+ UCHAR testdata;
+
+ for(testdata = (UCHAR) 1; testdata != (UCHAR)0; testdata = testdata << 1) {
+
+ WR_HARPOON(port+hp_xfer_cnt_lo,testdata);
+ WR_HARPOON(port+hp_xfer_cnt_mi,testdata);
+ WR_HARPOON(port+hp_xfer_cnt_hi,testdata);
+ WR_HARPOON(port+hp_host_addr_lo,testdata);
+ WR_HARPOON(port+hp_host_addr_lmi,testdata);
+ WR_HARPOON(port+hp_host_addr_hmi,testdata);
+ WR_HARPOON(port+hp_host_addr_hi,testdata);
+
+ if ((RD_HARPOON(port+hp_xfer_cnt_lo) != testdata) ||
+ (RD_HARPOON(port+hp_xfer_cnt_mi) != testdata) ||
+ (RD_HARPOON(port+hp_xfer_cnt_hi) != testdata) ||
+ (RD_HARPOON(port+hp_host_addr_lo) != testdata) ||
+ (RD_HARPOON(port+hp_host_addr_lmi) != testdata) ||
+ (RD_HARPOON(port+hp_host_addr_hmi) != testdata) ||
+ (RD_HARPOON(port+hp_host_addr_hi) != testdata))
+
+ return(1);
+ }
+ RD_HARPOON(port+hp_int_status); /*Clear interrupts. */
+ return(0);
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: DiagEEPROM
+ *
+ * Description: Verfiy checksum and 'Key' and initialize the EEPROM if
+ * neccessary.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void DiagEEPROM(USHORT p_port)
+#else
+void DiagEEPROM(ULONG p_port)
+#endif
+
+{
+ USHORT index,temp,max_wd_cnt;
+
+ if (RD_HARPOON(p_port+hp_page_ctrl) & NARROW_SCSI_CARD)
+ max_wd_cnt = EEPROM_WD_CNT;
+ else
+ max_wd_cnt = EEPROM_WD_CNT * 2;
+
+ temp = utilEERead(p_port, FW_SIGNATURE/2);
+
+ if (temp == 0x4641) {
+
+ for (index = 2; index < max_wd_cnt; index++) {
+
+ temp += utilEERead(p_port, index);
+
+ }
+
+ if (temp == utilEERead(p_port, EEPROM_CHECK_SUM/2)) {
+
+ return; /*EEPROM is Okay so return now! */
+ }
+ }
+
+
+ utilEEWriteOnOff(p_port,(UCHAR)1);
+
+ for (index = 0; index < max_wd_cnt; index++) {
+
+ utilEEWrite(p_port, 0x0000, index);
+ }
+
+ temp = 0;
+
+ utilEEWrite(p_port, 0x4641, FW_SIGNATURE/2);
+ temp += 0x4641;
+ utilEEWrite(p_port, 0x3920, MODEL_NUMB_0/2);
+ temp += 0x3920;
+ utilEEWrite(p_port, 0x3033, MODEL_NUMB_2/2);
+ temp += 0x3033;
+ utilEEWrite(p_port, 0x2020, MODEL_NUMB_4/2);
+ temp += 0x2020;
+ utilEEWrite(p_port, 0x70D3, SYSTEM_CONFIG/2);
+ temp += 0x70D3;
+ utilEEWrite(p_port, 0x0010, BIOS_CONFIG/2);
+ temp += 0x0010;
+ utilEEWrite(p_port, 0x0003, SCAM_CONFIG/2);
+ temp += 0x0003;
+ utilEEWrite(p_port, 0x0007, ADAPTER_SCSI_ID/2);
+ temp += 0x0007;
+
+ utilEEWrite(p_port, 0x0000, IGNORE_B_SCAN/2);
+ temp += 0x0000;
+ utilEEWrite(p_port, 0x0000, SEND_START_ENA/2);
+ temp += 0x0000;
+ utilEEWrite(p_port, 0x0000, DEVICE_ENABLE/2);
+ temp += 0x0000;
+
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL01/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL23/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL45/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL67/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL89/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLab/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLcd/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLef/2);
+ temp += 0x4242;
+
+
+ utilEEWrite(p_port, 0x6C46, 64/2); /*PRODUCT ID */
+ temp += 0x6C46;
+ utilEEWrite(p_port, 0x7361, 66/2); /* FlashPoint LT */
+ temp += 0x7361;
+ utilEEWrite(p_port, 0x5068, 68/2);
+ temp += 0x5068;
+ utilEEWrite(p_port, 0x696F, 70/2);
+ temp += 0x696F;
+ utilEEWrite(p_port, 0x746E, 72/2);
+ temp += 0x746E;
+ utilEEWrite(p_port, 0x4C20, 74/2);
+ temp += 0x4C20;
+ utilEEWrite(p_port, 0x2054, 76/2);
+ temp += 0x2054;
+ utilEEWrite(p_port, 0x2020, 78/2);
+ temp += 0x2020;
+
+ index = ((EE_SCAMBASE/2)+(7*16));
+ utilEEWrite(p_port, (0x0700+TYPE_CODE0), index);
+ temp += (0x0700+TYPE_CODE0);
+ index++;
+ utilEEWrite(p_port, 0x5542, index); /*Vendor ID code */
+ temp += 0x5542; /* BUSLOGIC */
+ index++;
+ utilEEWrite(p_port, 0x4C53, index);
+ temp += 0x4C53;
+ index++;
+ utilEEWrite(p_port, 0x474F, index);
+ temp += 0x474F;
+ index++;
+ utilEEWrite(p_port, 0x4349, index);
+ temp += 0x4349;
+ index++;
+ utilEEWrite(p_port, 0x5442, index); /*Vendor unique code */
+ temp += 0x5442; /* BT- 930 */
+ index++;
+ utilEEWrite(p_port, 0x202D, index);
+ temp += 0x202D;
+ index++;
+ utilEEWrite(p_port, 0x3339, index);
+ temp += 0x3339;
+ index++; /*Serial # */
+ utilEEWrite(p_port, 0x2030, index); /* 01234567 */
+ temp += 0x2030;
+ index++;
+ utilEEWrite(p_port, 0x5453, index);
+ temp += 0x5453;
+ index++;
+ utilEEWrite(p_port, 0x5645, index);
+ temp += 0x5645;
+ index++;
+ utilEEWrite(p_port, 0x2045, index);
+ temp += 0x2045;
+ index++;
+ utilEEWrite(p_port, 0x202F, index);
+ temp += 0x202F;
+ index++;
+ utilEEWrite(p_port, 0x4F4A, index);
+ temp += 0x4F4A;
+ index++;
+ utilEEWrite(p_port, 0x204E, index);
+ temp += 0x204E;
+ index++;
+ utilEEWrite(p_port, 0x3539, index);
+ temp += 0x3539;
+
+
+
+ utilEEWrite(p_port, temp, EEPROM_CHECK_SUM/2);
+
+ utilEEWriteOnOff(p_port,(UCHAR)0);
+
+}
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: utility.c $
+ *
+ * Description: Utility functions relating to queueing and EEPROM
+ * manipulation and any other garbage functions.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <harpoon.h>*/
+
+
+/*
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+extern unsigned int SccbGlobalFlags;
+*/
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Search Select
+ *
+ * Description: Try to find a new command to execute.
+ *
+ *---------------------------------------------------------------------*/
+
+void queueSearchSelect(PSCCBcard pCurrCard, UCHAR p_card)
+{
+ UCHAR scan_ptr, lun;
+ PSCCBMgr_tar_info currTar_Info;
+ PSCCB pOldSccb;
+
+ scan_ptr = pCurrCard->scanIndex;
+ do
+ {
+ currTar_Info = &sccbMgrTbl[p_card][scan_ptr];
+ if((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ {
+ if (currTar_Info->TarSelQ_Cnt != 0)
+ {
+
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR)
+ scan_ptr = 0;
+
+ for(lun=0; lun < MAX_LUN; lun++)
+ {
+ if(currTar_Info->TarLUNBusy[lun] == FALSE)
+ {
+
+ pCurrCard->currentSCCB = currTar_Info->TarSelQ_Head;
+ pOldSccb = NULL;
+
+ while((pCurrCard->currentSCCB != NULL) &&
+ (lun != pCurrCard->currentSCCB->Lun))
+ {
+ pOldSccb = pCurrCard->currentSCCB;
+ pCurrCard->currentSCCB = (PSCCB)(pCurrCard->currentSCCB)->
+ Sccb_forwardlink;
+ }
+ if(pCurrCard->currentSCCB == NULL)
+ continue;
+ if(pOldSccb != NULL)
+ {
+ pOldSccb->Sccb_forwardlink = (PSCCB)(pCurrCard->currentSCCB)->
+ Sccb_forwardlink;
+ pOldSccb->Sccb_backlink = (PSCCB)(pCurrCard->currentSCCB)->
+ Sccb_backlink;
+ currTar_Info->TarSelQ_Cnt--;
+ }
+ else
+ {
+ currTar_Info->TarSelQ_Head = (PSCCB)(pCurrCard->currentSCCB)->Sccb_forwardlink;
+
+ if (currTar_Info->TarSelQ_Head == NULL)
+ {
+ currTar_Info->TarSelQ_Tail = NULL;
+ currTar_Info->TarSelQ_Cnt = 0;
+ }
+ else
+ {
+ currTar_Info->TarSelQ_Cnt--;
+ currTar_Info->TarSelQ_Head->Sccb_backlink = (PSCCB)NULL;
+ }
+ }
+ pCurrCard->scanIndex = scan_ptr;
+
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+
+ break;
+ }
+ }
+ }
+
+ else
+ {
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR) {
+ scan_ptr = 0;
+ }
+ }
+
+ }
+ else
+ {
+ if ((currTar_Info->TarSelQ_Cnt != 0) &&
+ (currTar_Info->TarLUNBusy[0] == FALSE))
+ {
+
+ pCurrCard->currentSCCB = currTar_Info->TarSelQ_Head;
+
+ currTar_Info->TarSelQ_Head = (PSCCB)(pCurrCard->currentSCCB)->Sccb_forwardlink;
+
+ if (currTar_Info->TarSelQ_Head == NULL)
+ {
+ currTar_Info->TarSelQ_Tail = NULL;
+ currTar_Info->TarSelQ_Cnt = 0;
+ }
+ else
+ {
+ currTar_Info->TarSelQ_Cnt--;
+ currTar_Info->TarSelQ_Head->Sccb_backlink = (PSCCB)NULL;
+ }
+
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR)
+ scan_ptr = 0;
+
+ pCurrCard->scanIndex = scan_ptr;
+
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+
+ break;
+ }
+
+ else
+ {
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR)
+ {
+ scan_ptr = 0;
+ }
+ }
+ }
+ } while (scan_ptr != pCurrCard->scanIndex);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Select Fail
+ *
+ * Description: Add the current SCCB to the head of the Queue.
+ *
+ *---------------------------------------------------------------------*/
+
+void queueSelectFail(PSCCBcard pCurrCard, UCHAR p_card)
+{
+ UCHAR thisTarg;
+ PSCCBMgr_tar_info currTar_Info;
+
+ if (pCurrCard->currentSCCB != NULL)
+ {
+ thisTarg = (UCHAR)(((PSCCB)(pCurrCard->currentSCCB))->TargID);
+ currTar_Info = &sccbMgrTbl[p_card][thisTarg];
+
+ pCurrCard->currentSCCB->Sccb_backlink = (PSCCB)NULL;
+
+ pCurrCard->currentSCCB->Sccb_forwardlink = currTar_Info->TarSelQ_Head;
+
+ if (currTar_Info->TarSelQ_Cnt == 0)
+ {
+ currTar_Info->TarSelQ_Tail = pCurrCard->currentSCCB;
+ }
+
+ else
+ {
+ currTar_Info->TarSelQ_Head->Sccb_backlink = pCurrCard->currentSCCB;
+ }
+
+
+ currTar_Info->TarSelQ_Head = pCurrCard->currentSCCB;
+
+ pCurrCard->currentSCCB = NULL;
+ currTar_Info->TarSelQ_Cnt++;
+ }
+}
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Command Complete
+ *
+ * Description: Call the callback function with the current SCCB.
+ *
+ *---------------------------------------------------------------------*/
+
+void queueCmdComplete(PSCCBcard pCurrCard, PSCCB p_sccb, UCHAR p_card)
+{
+
+#if (FW_TYPE==_UCB_MGR_)
+
+ u08bits SCSIcmd;
+ CALL_BK_FN callback;
+ PSCCBMgr_tar_info currTar_Info;
+
+ PUCB p_ucb;
+ p_ucb=p_sccb->Sccb_ucb_ptr;
+
+ SCSIcmd = p_sccb->Cdb[0];
+
+
+ if (!(p_sccb->Sccb_XferState & F_ALL_XFERRED))
+ {
+
+ if ((p_ucb->UCB_opcode & OPC_CHK_UNDER_OVER_RUN) &&
+ (p_sccb->HostStatus == SCCB_COMPLETE) &&
+ (p_sccb->TargetStatus != SSCHECK))
+
+ if ((SCSIcmd == SCSI_READ) ||
+ (SCSIcmd == SCSI_WRITE) ||
+ (SCSIcmd == SCSI_READ_EXTENDED) ||
+ (SCSIcmd == SCSI_WRITE_EXTENDED) ||
+ (SCSIcmd == SCSI_WRITE_AND_VERIFY) ||
+ (SCSIcmd == SCSI_START_STOP_UNIT) ||
+ (pCurrCard->globalFlags & F_NO_FILTER)
+ )
+ p_sccb->HostStatus = SCCB_DATA_UNDER_RUN;
+ }
+
+ p_ucb->UCB_status=SCCB_SUCCESS;
+
+ if ((p_ucb->UCB_hbastat=p_sccb->HostStatus) || (p_ucb->UCB_scsistat=p_sccb->TargetStatus))
+ {
+ p_ucb->UCB_status=SCCB_ERROR;
+ }
+
+ if ((p_sccb->OperationCode == RESIDUAL_SG_COMMAND) ||
+ (p_sccb->OperationCode == RESIDUAL_COMMAND))
+ {
+
+ utilUpdateResidual(p_sccb);
+
+ p_ucb->UCB_datalen=p_sccb->DataLength;
+ }
+
+ pCurrCard->cmdCounter--;
+ if (!pCurrCard->cmdCounter)
+ {
+
+ if (pCurrCard->globalFlags & F_GREEN_PC)
+ {
+ WR_HARPOON(pCurrCard->ioPort+hp_clkctrl_0,(PWR_DWN | CLKCTRL_DEFAULT));
+ WR_HARPOON(pCurrCard->ioPort+hp_sys_ctrl, STOP_CLK);
+ }
+
+ WR_HARPOON(pCurrCard->ioPort+hp_semaphore,
+ (RD_HARPOON(pCurrCard->ioPort+hp_semaphore) & ~SCCB_MGR_ACTIVE));
+ }
+
+ if(pCurrCard->discQCount != 0)
+ {
+ currTar_Info = &sccbMgrTbl[p_card][p_sccb->TargID];
+ if(((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[p_sccb->Lun]] = NULL;
+ }
+ else
+ {
+ if(p_sccb->Sccb_tag)
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[p_sccb->Sccb_tag] = NULL;
+ }else
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+
+ }
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ callback(p_ucb);
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+ pCurrCard->currentSCCB = NULL;
+}
+
+
+
+
+#else
+
+ UCHAR i, SCSIcmd;
+ CALL_BK_FN callback;
+ PSCCBMgr_tar_info currTar_Info;
+
+ SCSIcmd = p_sccb->Cdb[0];
+
+
+ if (!(p_sccb->Sccb_XferState & F_ALL_XFERRED)) {
+
+ if ((p_sccb->ControlByte & (SCCB_DATA_XFER_OUT | SCCB_DATA_XFER_IN)) &&
+ (p_sccb->HostStatus == SCCB_COMPLETE) &&
+ (p_sccb->TargetStatus != SSCHECK))
+
+ if ((SCSIcmd == SCSI_READ) ||
+ (SCSIcmd == SCSI_WRITE) ||
+ (SCSIcmd == SCSI_READ_EXTENDED) ||
+ (SCSIcmd == SCSI_WRITE_EXTENDED) ||
+ (SCSIcmd == SCSI_WRITE_AND_VERIFY) ||
+ (SCSIcmd == SCSI_START_STOP_UNIT) ||
+ (pCurrCard->globalFlags & F_NO_FILTER)
+ )
+ p_sccb->HostStatus = SCCB_DATA_UNDER_RUN;
+ }
+
+
+ if(p_sccb->SccbStatus == SCCB_IN_PROCESS)
+ {
+ if (p_sccb->HostStatus || p_sccb->TargetStatus)
+ p_sccb->SccbStatus = SCCB_ERROR;
+ else
+ p_sccb->SccbStatus = SCCB_SUCCESS;
+ }
+
+ if (p_sccb->Sccb_XferState & F_AUTO_SENSE) {
+
+ p_sccb->CdbLength = p_sccb->Save_CdbLen;
+ for (i=0; i < 6; i++) {
+ p_sccb->Cdb[i] = p_sccb->Save_Cdb[i];
+ }
+ }
+
+ if ((p_sccb->OperationCode == RESIDUAL_SG_COMMAND) ||
+ (p_sccb->OperationCode == RESIDUAL_COMMAND)) {
+
+ utilUpdateResidual(p_sccb);
+ }
+
+ pCurrCard->cmdCounter--;
+ if (!pCurrCard->cmdCounter) {
+
+ if (pCurrCard->globalFlags & F_GREEN_PC) {
+ WR_HARPOON(pCurrCard->ioPort+hp_clkctrl_0,(PWR_DWN | CLKCTRL_DEFAULT));
+ WR_HARPOON(pCurrCard->ioPort+hp_sys_ctrl, STOP_CLK);
+ }
+
+ WR_HARPOON(pCurrCard->ioPort+hp_semaphore,
+ (RD_HARPOON(pCurrCard->ioPort+hp_semaphore) & ~SCCB_MGR_ACTIVE));
+
+ }
+
+ if(pCurrCard->discQCount != 0)
+ {
+ currTar_Info = &sccbMgrTbl[p_card][p_sccb->TargID];
+ if(((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[p_sccb->Lun]] = NULL;
+ }
+ else
+ {
+ if(p_sccb->Sccb_tag)
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[p_sccb->Sccb_tag] = NULL;
+ }else
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+
+ }
+
+ callback = (CALL_BK_FN)p_sccb->SccbCallback;
+ callback(p_sccb);
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+ pCurrCard->currentSCCB = NULL;
+}
+#endif /* ( if FW_TYPE==...) */
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Disconnect
+ *
+ * Description: Add SCCB to our disconnect array.
+ *
+ *---------------------------------------------------------------------*/
+void queueDisconnect(PSCCB p_sccb, UCHAR p_card)
+{
+ PSCCBMgr_tar_info currTar_Info;
+
+ currTar_Info = &sccbMgrTbl[p_card][p_sccb->TargID];
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ BL_Card[p_card].discQ_Tbl[currTar_Info->LunDiscQ_Idx[p_sccb->Lun]] = p_sccb;
+ }
+ else
+ {
+ if (p_sccb->Sccb_tag)
+ {
+ BL_Card[p_card].discQ_Tbl[p_sccb->Sccb_tag] = p_sccb;
+ sccbMgrTbl[p_card][p_sccb->TargID].TarLUNBusy[0] = FALSE;
+ sccbMgrTbl[p_card][p_sccb->TargID].TarTagQ_Cnt++;
+ }else
+ {
+ BL_Card[p_card].discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]] = p_sccb;
+ }
+ }
+ BL_Card[p_card].currentSCCB = NULL;
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Flush SCCB
+ *
+ * Description: Flush all SCCB's back to the host driver for this target.
+ *
+ *---------------------------------------------------------------------*/
+
+void queueFlushSccb(UCHAR p_card, UCHAR error_code)
+{
+ UCHAR qtag,thisTarg;
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ if(currSCCB != NULL)
+ {
+ thisTarg = (UCHAR)currSCCB->TargID;
+ currTar_Info = &sccbMgrTbl[p_card][thisTarg];
+
+ for (qtag=0; qtag<QUEUE_DEPTH; qtag++) {
+
+ if (BL_Card[p_card].discQ_Tbl[qtag] &&
+ (BL_Card[p_card].discQ_Tbl[qtag]->TargID == thisTarg))
+ {
+
+ BL_Card[p_card].discQ_Tbl[qtag]->HostStatus = (UCHAR)error_code;
+
+ queueCmdComplete(&BL_Card[p_card],BL_Card[p_card].discQ_Tbl[qtag], p_card);
+
+ BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ currTar_Info->TarTagQ_Cnt--;
+
+ }
+ }
+ }
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Flush Target SCCB
+ *
+ * Description: Flush all SCCB's back to the host driver for this target.
+ *
+ *---------------------------------------------------------------------*/
+
+void queueFlushTargSccb(UCHAR p_card, UCHAR thisTarg, UCHAR error_code)
+{
+ UCHAR qtag;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currTar_Info = &sccbMgrTbl[p_card][thisTarg];
+
+ for (qtag=0; qtag<QUEUE_DEPTH; qtag++) {
+
+ if (BL_Card[p_card].discQ_Tbl[qtag] &&
+ (BL_Card[p_card].discQ_Tbl[qtag]->TargID == thisTarg))
+ {
+
+ BL_Card[p_card].discQ_Tbl[qtag]->HostStatus = (UCHAR)error_code;
+
+ queueCmdComplete(&BL_Card[p_card],BL_Card[p_card].discQ_Tbl[qtag], p_card);
+
+ BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ currTar_Info->TarTagQ_Cnt--;
+
+ }
+ }
+
+}
+
+
+
+
+
+void queueAddSccb(PSCCB p_SCCB, UCHAR p_card)
+{
+ PSCCBMgr_tar_info currTar_Info;
+ currTar_Info = &sccbMgrTbl[p_card][p_SCCB->TargID];
+
+ p_SCCB->Sccb_forwardlink = NULL;
+
+ p_SCCB->Sccb_backlink = currTar_Info->TarSelQ_Tail;
+
+ if (currTar_Info->TarSelQ_Cnt == 0) {
+
+ currTar_Info->TarSelQ_Head = p_SCCB;
+ }
+
+ else {
+
+ currTar_Info->TarSelQ_Tail->Sccb_forwardlink = p_SCCB;
+ }
+
+
+ currTar_Info->TarSelQ_Tail = p_SCCB;
+ currTar_Info->TarSelQ_Cnt++;
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Find SCCB
+ *
+ * Description: Search the target select Queue for this SCCB, and
+ * remove it if found.
+ *
+ *---------------------------------------------------------------------*/
+
+UCHAR queueFindSccb(PSCCB p_SCCB, UCHAR p_card)
+{
+ PSCCB q_ptr;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currTar_Info = &sccbMgrTbl[p_card][p_SCCB->TargID];
+
+ q_ptr = currTar_Info->TarSelQ_Head;
+
+ while(q_ptr != NULL) {
+
+ if (q_ptr == p_SCCB) {
+
+
+ if (currTar_Info->TarSelQ_Head == q_ptr) {
+
+ currTar_Info->TarSelQ_Head = q_ptr->Sccb_forwardlink;
+ }
+
+ if (currTar_Info->TarSelQ_Tail == q_ptr) {
+
+ currTar_Info->TarSelQ_Tail = q_ptr->Sccb_backlink;
+ }
+
+ if (q_ptr->Sccb_forwardlink != NULL) {
+ q_ptr->Sccb_forwardlink->Sccb_backlink = q_ptr->Sccb_backlink;
+ }
+
+ if (q_ptr->Sccb_backlink != NULL) {
+ q_ptr->Sccb_backlink->Sccb_forwardlink = q_ptr->Sccb_forwardlink;
+ }
+
+ currTar_Info->TarSelQ_Cnt--;
+
+ return(TRUE);
+ }
+
+ else {
+ q_ptr = q_ptr->Sccb_forwardlink;
+ }
+ }
+
+
+ return(FALSE);
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Utility Update Residual Count
+ *
+ * Description: Update the XferCnt to the remaining byte count.
+ * If we transferred all the data then just write zero.
+ * If Non-SG transfer then report Total Cnt - Actual Transfer
+ * Cnt. For SG transfers add the count fields of all
+ * remaining SG elements, as well as any partial remaining
+ * element.
+ *
+ *---------------------------------------------------------------------*/
+
+void utilUpdateResidual(PSCCB p_SCCB)
+{
+ ULONG partial_cnt;
+ UINT sg_index;
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ ULONG far *sg_ptr;
+#else
+ ULONG *sg_ptr;
+#endif
+
+ if (p_SCCB->Sccb_XferState & F_ALL_XFERRED) {
+
+ p_SCCB->DataLength = 0x0000;
+ }
+
+ else if (p_SCCB->Sccb_XferState & F_SG_XFER) {
+
+ partial_cnt = 0x0000;
+
+ sg_index = p_SCCB->Sccb_sgseg;
+
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ sg_ptr = (ULONG far *)p_SCCB->DataPointer;
+#else
+ sg_ptr = (ULONG *)p_SCCB->DataPointer;
+#endif
+
+ if (p_SCCB->Sccb_SGoffset) {
+
+ partial_cnt = p_SCCB->Sccb_SGoffset;
+ sg_index++;
+ }
+
+ while ( ((ULONG)sg_index * (ULONG)SG_ELEMENT_SIZE) <
+ p_SCCB->DataLength ) {
+
+ partial_cnt += *(sg_ptr+(sg_index * 2));
+ sg_index++;
+ }
+
+ p_SCCB->DataLength = partial_cnt;
+ }
+
+ else {
+
+ p_SCCB->DataLength -= p_SCCB->Sccb_ATC;
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Wait 1 Second
+ *
+ * Description: Wait for 1 second.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void Wait1Second(USHORT p_port)
+#else
+void Wait1Second(ULONG p_port)
+#endif
+{
+ UCHAR i;
+
+ for(i=0; i < 4; i++) {
+
+ Wait(p_port, TO_250ms);
+
+ if ((RD_HARPOON(p_port+hp_scsictrl_0) & SCSI_RST))
+ break;
+
+ if((RDW_HARPOON((p_port+hp_intstat)) & SCAM_SEL))
+ break;
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Wait
+ *
+ * Description: Wait the desired delay.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void Wait(USHORT p_port, UCHAR p_delay)
+#else
+void Wait(ULONG p_port, UCHAR p_delay)
+#endif
+{
+ UCHAR old_timer;
+ UCHAR green_flag;
+
+ old_timer = RD_HARPOON(p_port+hp_seltimeout);
+
+ green_flag=RD_HARPOON(p_port+hp_clkctrl_0);
+ WR_HARPOON(p_port+hp_clkctrl_0, CLKCTRL_DEFAULT);
+
+ WR_HARPOON(p_port+hp_seltimeout,p_delay);
+ WRW_HARPOON((p_port+hp_intstat), TIMEOUT);
+ WRW_HARPOON((p_port+hp_intena), (default_intena & ~TIMEOUT));
+
+
+ WR_HARPOON(p_port+hp_portctrl_0,
+ (RD_HARPOON(p_port+hp_portctrl_0) | START_TO));
+
+ while (!(RDW_HARPOON((p_port+hp_intstat)) & TIMEOUT)) {
+
+ if ((RD_HARPOON(p_port+hp_scsictrl_0) & SCSI_RST))
+ break;
+
+ if ((RDW_HARPOON((p_port+hp_intstat)) & SCAM_SEL))
+ break;
+ }
+
+ WR_HARPOON(p_port+hp_portctrl_0,
+ (RD_HARPOON(p_port+hp_portctrl_0) & ~START_TO));
+
+ WRW_HARPOON((p_port+hp_intstat), TIMEOUT);
+ WRW_HARPOON((p_port+hp_intena), default_intena);
+
+ WR_HARPOON(p_port+hp_clkctrl_0,green_flag);
+
+ WR_HARPOON(p_port+hp_seltimeout,old_timer);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Enable/Disable Write to EEPROM
+ *
+ * Description: The EEPROM must first be enabled for writes
+ * A total of 9 clocks are needed.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void utilEEWriteOnOff(USHORT p_port,UCHAR p_mode)
+#else
+void utilEEWriteOnOff(ULONG p_port,UCHAR p_mode)
+#endif
+{
+ UCHAR ee_value;
+
+ ee_value = (UCHAR)(RD_HARPOON(p_port+hp_ee_ctrl) & (EXT_ARB_ACK | SCSI_TERM_ENA_H));
+
+ if (p_mode)
+
+ utilEESendCmdAddr(p_port, EWEN, EWEN_ADDR);
+
+ else
+
+
+ utilEESendCmdAddr(p_port, EWDS, EWDS_ADDR);
+
+ WR_HARPOON(p_port+hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value); /*Turn off Master Select */
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Write EEPROM
+ *
+ * Description: Write a word to the EEPROM at the specified
+ * address.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void utilEEWrite(USHORT p_port, USHORT ee_data, USHORT ee_addr)
+#else
+void utilEEWrite(ULONG p_port, USHORT ee_data, USHORT ee_addr)
+#endif
+{
+
+ UCHAR ee_value;
+ USHORT i;
+
+ ee_value = (UCHAR)((RD_HARPOON(p_port+hp_ee_ctrl) & (EXT_ARB_ACK | SCSI_TERM_ENA_H))|
+ (SEE_MS | SEE_CS));
+
+
+
+ utilEESendCmdAddr(p_port, EE_WRITE, ee_addr);
+
+
+ ee_value |= (SEE_MS + SEE_CS);
+
+ for(i = 0x8000; i != 0; i>>=1) {
+
+ if (i & ee_data)
+ ee_value |= SEE_DO;
+ else
+ ee_value &= ~SEE_DO;
+
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ }
+ ee_value &= (EXT_ARB_ACK | SCSI_TERM_ENA_H);
+ WR_HARPOON(p_port+hp_ee_ctrl, (ee_value | SEE_MS));
+
+ Wait(p_port, TO_10ms);
+
+ WR_HARPOON(p_port+hp_ee_ctrl, (ee_value | SEE_MS | SEE_CS)); /* Set CS to EEPROM */
+ WR_HARPOON(p_port+hp_ee_ctrl, (ee_value | SEE_MS)); /* Turn off CS */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value); /* Turn off Master Select */
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Read EEPROM
+ *
+ * Description: Read a word from the EEPROM at the desired
+ * address.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+USHORT utilEERead(USHORT p_port, USHORT ee_addr)
+#else
+USHORT utilEERead(ULONG p_port, USHORT ee_addr)
+#endif
+{
+ USHORT i, ee_data1, ee_data2;
+
+ i = 0;
+ ee_data1 = utilEEReadOrg(p_port, ee_addr);
+ do
+ {
+ ee_data2 = utilEEReadOrg(p_port, ee_addr);
+
+ if(ee_data1 == ee_data2)
+ return(ee_data1);
+
+ ee_data1 = ee_data2;
+ i++;
+
+ }while(i < 4);
+
+ return(ee_data1);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Read EEPROM Original
+ *
+ * Description: Read a word from the EEPROM at the desired
+ * address.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+USHORT utilEEReadOrg(USHORT p_port, USHORT ee_addr)
+#else
+USHORT utilEEReadOrg(ULONG p_port, USHORT ee_addr)
+#endif
+{
+
+ UCHAR ee_value;
+ USHORT i, ee_data;
+
+ ee_value = (UCHAR)((RD_HARPOON(p_port+hp_ee_ctrl) & (EXT_ARB_ACK | SCSI_TERM_ENA_H))|
+ (SEE_MS | SEE_CS));
+
+
+ utilEESendCmdAddr(p_port, EE_READ, ee_addr);
+
+
+ ee_value |= (SEE_MS + SEE_CS);
+ ee_data = 0;
+
+ for(i = 1; i <= 16; i++) {
+
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+
+ ee_data <<= 1;
+
+ if (RD_HARPOON(p_port+hp_ee_ctrl) & SEE_DI)
+ ee_data |= 1;
+ }
+
+ ee_value &= ~(SEE_MS + SEE_CS);
+ WR_HARPOON(p_port+hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value); /*Turn off Master Select */
+
+ return(ee_data);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Send EE command and Address to the EEPROM
+ *
+ * Description: Transfers the correct command and sends the address
+ * to the eeprom.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void utilEESendCmdAddr(USHORT p_port, UCHAR ee_cmd, USHORT ee_addr)
+#else
+void utilEESendCmdAddr(ULONG p_port, UCHAR ee_cmd, USHORT ee_addr)
+#endif
+{
+ UCHAR ee_value;
+ UCHAR narrow_flg;
+
+ USHORT i;
+
+
+ narrow_flg= (UCHAR)(RD_HARPOON(p_port+hp_page_ctrl) & NARROW_SCSI_CARD);
+
+
+ ee_value = SEE_MS;
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+
+ ee_value |= SEE_CS; /* Set CS to EEPROM */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+
+
+ for(i = 0x04; i != 0; i>>=1) {
+
+ if (i & ee_cmd)
+ ee_value |= SEE_DO;
+ else
+ ee_value &= ~SEE_DO;
+
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ }
+
+
+ if (narrow_flg)
+ i = 0x0080;
+
+ else
+ i = 0x0200;
+
+
+ while (i != 0) {
+
+ if (i & ee_addr)
+ ee_value |= SEE_DO;
+ else
+ ee_value &= ~SEE_DO;
+
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+
+ i >>= 1;
+ }
+}
+
+USHORT CalcCrc16(UCHAR buffer[])
+{
+ USHORT crc=0;
+ int i,j;
+ USHORT ch;
+ for (i=0; i < ID_STRING_LENGTH; i++)
+ {
+ ch = (USHORT) buffer[i];
+ for(j=0; j < 8; j++)
+ {
+ if ((crc ^ ch) & 1)
+ crc = (crc >> 1) ^ CRCMASK;
+ else
+ crc >>= 1;
+ ch >>= 1;
+ }
+ }
+ return(crc);
+}
+
+UCHAR CalcLrc(UCHAR buffer[])
+{
+ int i;
+ UCHAR lrc;
+ lrc = 0;
+ for(i = 0; i < ID_STRING_LENGTH; i++)
+ lrc ^= buffer[i];
+ return(lrc);
+}
+
+
+
+/*
+ The following inline definitions avoid type conflicts.
+*/
+
+static inline unsigned char
+FlashPoint__ProbeHostAdapter(FlashPoint_Info_T *FlashPointInfo)
+{
+ return FlashPoint_ProbeHostAdapter((PSCCBMGR_INFO) FlashPointInfo);
+}
+
+
+static inline FlashPoint_CardHandle_T
+FlashPoint__HardwareResetHostAdapter(FlashPoint_Info_T *FlashPointInfo)
+{
+ return FlashPoint_HardwareResetHostAdapter((PSCCBMGR_INFO) FlashPointInfo);
+}
+
+static inline void
+FlashPoint__ReleaseHostAdapter(FlashPoint_CardHandle_T CardHandle)
+{
+ FlashPoint_ReleaseHostAdapter(CardHandle);
+}
+
+
+static inline void
+FlashPoint__StartCCB(FlashPoint_CardHandle_T CardHandle, BusLogic_CCB_T *CCB)
+{
+ FlashPoint_StartCCB(CardHandle, (PSCCB) CCB);
+}
+
+
+static inline void
+FlashPoint__AbortCCB(FlashPoint_CardHandle_T CardHandle, BusLogic_CCB_T *CCB)
+{
+ FlashPoint_AbortCCB(CardHandle, (PSCCB) CCB);
+}
+
+
+static inline boolean
+FlashPoint__InterruptPending(FlashPoint_CardHandle_T CardHandle)
+{
+ return FlashPoint_InterruptPending(CardHandle);
+}
+
+
+static inline int
+FlashPoint__HandleInterrupt(FlashPoint_CardHandle_T CardHandle)
+{
+ return FlashPoint_HandleInterrupt(CardHandle);
+}
+
+
+#define FlashPoint_ProbeHostAdapter FlashPoint__ProbeHostAdapter
+#define FlashPoint_HardwareResetHostAdapter FlashPoint__HardwareResetHostAdapter
+#define FlashPoint_ReleaseHostAdapter FlashPoint__ReleaseHostAdapter
+#define FlashPoint_StartCCB FlashPoint__StartCCB
+#define FlashPoint_AbortCCB FlashPoint__AbortCCB
+#define FlashPoint_InterruptPending FlashPoint__InterruptPending
+#define FlashPoint_HandleInterrupt FlashPoint__HandleInterrupt
+
+
+/*
+ FlashPoint_InquireTargetInfo returns the Synchronous Period, Synchronous
+ Offset, and Wide Transfers Active information for TargetID on CardHandle.
+*/
+
+void FlashPoint_InquireTargetInfo(FlashPoint_CardHandle_T CardHandle,
+ int TargetID,
+ unsigned char *SynchronousPeriod,
+ unsigned char *SynchronousOffset,
+ unsigned char *WideTransfersActive)
+{
+ SCCBMGR_TAR_INFO *TargetInfo =
+ &sccbMgrTbl[((SCCBCARD *)CardHandle)->cardIndex][TargetID];
+ if ((TargetInfo->TarSyncCtrl & SYNC_OFFSET) > 0)
+ {
+ *SynchronousPeriod = 5 * ((TargetInfo->TarSyncCtrl >> 5) + 1);
+ *SynchronousOffset = TargetInfo->TarSyncCtrl & SYNC_OFFSET;
+ }
+ else
+ {
+ *SynchronousPeriod = 0;
+ *SynchronousOffset = 0;
+ }
+ *WideTransfersActive = (TargetInfo->TarSyncCtrl & NARROW_SCSI ? 0 : 1);
+}
+
+
+#else /* CONFIG_SCSI_OMIT_FLASHPOINT */
+
+
+/*
+ Define prototypes for the FlashPoint SCCB Manager Functions.
+*/
+
+extern unsigned char FlashPoint_ProbeHostAdapter(FlashPoint_Info_T *);
+extern FlashPoint_CardHandle_T
+ FlashPoint_HardwareResetHostAdapter(FlashPoint_Info_T *);
+extern void FlashPoint_StartCCB(FlashPoint_CardHandle_T, BusLogic_CCB_T *);
+extern int FlashPoint_AbortCCB(FlashPoint_CardHandle_T, BusLogic_CCB_T *);
+extern boolean FlashPoint_InterruptPending(FlashPoint_CardHandle_T);
+extern int FlashPoint_HandleInterrupt(FlashPoint_CardHandle_T);
+extern void FlashPoint_ReleaseHostAdapter(FlashPoint_CardHandle_T);
+extern void FlashPoint_InquireTargetInfo(FlashPoint_CardHandle_T,
+ int, unsigned char *,
+ unsigned char *, unsigned char *);
+
+
+#endif /* CONFIG_SCSI_OMIT_FLASHPOINT */
diff --git a/linux/src/drivers/scsi/NCR5380.c b/linux/src/drivers/scsi/NCR5380.c
new file mode 100644
index 0000000..4f085e9
--- /dev/null
+++ b/linux/src/drivers/scsi/NCR5380.c
@@ -0,0 +1,3246 @@
+#ifndef NDEBUG
+#define NDEBUG (NDEBUG_RESTART_SELECT | NDEBUG_ABORT)
+#endif
+/*
+ * NCR 5380 generic driver routines. These should make it *trivial*
+ * to implement 5380 SCSI drivers under Linux with a non-trantor
+ * architecture.
+ *
+ * Note that these routines also work with NR53c400 family chips.
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * DISTRIBUTION RELEASE 6.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * Revision 1.7 1996/3/2 Ray Van Tassle (rayvt@comm.mot.com)
+ * added proc_info
+ * added support needed for DTC 3180/3280
+ * fixed a couple of bugs
+ *
+
+ * Revision 1.5 1994/01/19 09:14:57 drew
+ * Fixed udelay() hack that was being used on DATAOUT phases
+ * instead of a proper wait for the final handshake.
+ *
+ * Revision 1.4 1994/01/19 06:44:25 drew
+ * *** empty log message ***
+ *
+ * Revision 1.3 1994/01/19 05:24:40 drew
+ * Added support for TCR LAST_BYTE_SENT bit.
+ *
+ * Revision 1.2 1994/01/15 06:14:11 drew
+ * REAL DMA support, bug fixes.
+ *
+ * Revision 1.1 1994/01/15 06:00:54 drew
+ * Initial revision
+ *
+ */
+
+/*
+ * Further development / testing that should be done :
+ * 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete
+ * code so that everything does the same thing that's done at the
+ * end of a pseudo-DMA read operation.
+ *
+ * 2. Fix REAL_DMA (interrupt driven, polled works fine) -
+ * basically, transfer size needs to be reduced by one
+ * and the last byte read as is done with PSEUDO_DMA.
+ *
+ * 3. Test USLEEP code
+ *
+ * 4. Test SCSI-II tagged queueing (I have no devices which support
+ * tagged queueing)
+ *
+ * 5. Test linked command handling code after Eric is ready with
+ * the high level code.
+ */
+
+#if (NDEBUG & NDEBUG_LISTS)
+#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
+#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
+#else
+#define LIST(x,y)
+#define REMOVE(w,x,y,z)
+#endif
+
+#ifndef notyet
+#undef LINKED
+#undef USLEEP
+#undef REAL_DMA
+#endif
+
+#ifdef REAL_DMA_POLL
+#undef READ_OVERRUNS
+#define READ_OVERRUNS
+#endif
+
+/*
+ * Design
+ * Issues :
+ *
+ * The other Linux SCSI drivers were written when Linux was Intel PC-only,
+ * and specifically for each board rather than each chip. This makes their
+ * adaptation to platforms like the Mac (Some of which use NCR5380's)
+ * more difficult than it has to be.
+ *
+ * Also, many of the SCSI drivers were written before the command queuing
+ * routines were implemented, meaning their implementations of queued
+ * commands were hacked on rather than designed in from the start.
+ *
+ * When I designed the Linux SCSI drivers I figured that
+ * while having two different SCSI boards in a system might be useful
+ * for debugging things, two of the same type wouldn't be used.
+ * Well, I was wrong and a number of users have mailed me about running
+ * multiple high-performance SCSI boards in a server.
+ *
+ * Finally, when I get questions from users, I have no idea what
+ * revision of my driver they are running.
+ *
+ * This driver attempts to address these problems :
+ * This is a generic 5380 driver. To use it on a different platform,
+ * one simply writes appropriate system specific macros (ie, data
+ * transfer - some PC's will use the I/O bus, 68K's must use
+ * memory mapped) and drops this file in their 'C' wrapper.
+ *
+ * As far as command queueing, two queues are maintained for
+ * each 5380 in the system - commands that haven't been issued yet,
+ * and commands that are currently executing. This means that an
+ * unlimited number of commands may be queued, letting
+ * more commands propagate from the higher driver levels giving higher
+ * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported,
+ * allowing multiple commands to propagate all the way to a SCSI-II device
+ * while a command is already executing.
+ *
+ * To solve the multiple-boards-in-the-same-system problem,
+ * there is a separate instance structure for each instance
+ * of a 5380 in the system. So, multiple NCR5380 drivers will
+ * be able to coexist with appropriate changes to the high level
+ * SCSI code.
+ *
+ * A NCR5380_PUBLIC_REVISION macro is provided, with the release
+ * number (updated for each public release) printed by the
+ * NCR5380_print_options command, which should be called from the
+ * wrapper detect function, so that I know what release of the driver
+ * users are using.
+ *
+ * Issues specific to the NCR5380 :
+ *
+ * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
+ * piece of hardware that requires you to sit in a loop polling for
+ * the REQ signal as long as you are connected. Some devices are
+ * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
+ * while doing long seek operations.
+ *
+ * The workaround for this is to keep track of devices that have
+ * disconnected. If the device hasn't disconnected, for commands that
+ * should disconnect, we do something like
+ *
+ * while (!REQ is asserted) { sleep for N usecs; poll for M usecs }
+ *
+ * Some tweaking of N and M needs to be done. An algorithm based
+ * on "time to data" would give the best results as long as short time
+ * to datas (ie, on the same track) were considered, however these
+ * broken devices are the exception rather than the rule and I'd rather
+ * spend my time optimizing for the normal case.
+ *
+ * Architecture :
+ *
+ * At the heart of the design is a coroutine, NCR5380_main,
+ * which is started when not running by the interrupt handler,
+ * timer, and queue command function. It attempts to establish
+ * I_T_L or I_T_L_Q nexuses by removing the commands from the
+ * issue queue and calling NCR5380_select() if a nexus
+ * is not established.
+ *
+ * Once a nexus is established, the NCR5380_information_transfer()
+ * phase goes through the various phases as instructed by the target.
+ * if the target goes into MSG IN and sends a DISCONNECT message,
+ * the command structure is placed into the per instance disconnected
+ * queue, and NCR5380_main tries to find more work. If USLEEP
+ * was defined, and the target is idle for too long, the system
+ * will try to sleep.
+ *
+ * If a command has disconnected, eventually an interrupt will trigger,
+ * calling NCR5380_intr() which will in turn call NCR5380_reselect
+ * to reestablish a nexus. This will run main if necessary.
+ *
+ * On command termination, the done function will be called as
+ * appropriate.
+ *
+ * SCSI pointers are maintained in the SCp field of SCSI command
+ * structures, being initialized after the command is connected
+ * in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
+ * Note that in violation of the standard, an implicit SAVE POINTERS operation
+ * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
+ */
+
+/*
+ * Using this file :
+ * This file a skeleton Linux SCSI driver for the NCR 5380 series
+ * of chips. To use it, you write a architecture specific functions
+ * and macros and include this file in your driver.
+ *
+ * These macros control options :
+ * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
+ * defined.
+ *
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
+ * transceivers.
+ *
+ * DONT_USE_INTR - if defined, never use interrupts, even if we probe or
+ * override-configure an IRQ.
+ *
+ * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512
+ * bytes at a time. Since interrupts are disabled by default during
+ * these transfers, we might need this to give reasonable interrupt
+ * service time if the transfer size gets too large.
+ *
+ * LINKED - if defined, linked commands are supported.
+ *
+ * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases.
+ *
+ * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
+ *
+ * REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't
+ * rely on phase mismatch and EOP interrupts to determine end
+ * of phase.
+ *
+ * SCSI2 - if defined, SCSI-2 tagged queuing is used where possible
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
+ * only really want to use this if you're having a problem with
+ * dropped characters during high speed communications, and even
+ * then, you're going to be better off twiddling with transfersize
+ * in the high level code.
+ *
+ * USLEEP - if defined, on devices that aren't disconnecting from the
+ * bus, we will go to sleep so that the CPU can get real work done
+ * when we run a command that won't complete immediately.
+ *
+ * Note that if USLEEP is defined, NCR5380_TIMER *must* also be
+ * defined.
+ *
+ * Defaults for these will be provided if USLEEP is defined, although
+ * the user may want to adjust these to allocate CPU resources to
+ * the SCSI driver or "real" code.
+ *
+ * USLEEP_SLEEP - amount of time, in jiffies, to sleep
+ *
+ * USLEEP_POLL - amount of time, in jiffies, to poll
+ *
+ * These macros MUST be defined :
+ * NCR5380_local_declare() - declare any local variables needed for your
+ * transfer routines.
+ *
+ * NCR5380_setup(instance) - initialize any local variables needed from a given
+ * instance of the host adapter for NCR5380_{read,write,pread,pwrite}
+ *
+ * NCR5380_read(register) - read from the specified register
+ *
+ * NCR5380_write(register, value) - write to the specific register
+ *
+ * NCR5380_implementation_fields - additional fields needed for this
+ * specific implementation of the NCR5380
+ *
+ * Either real DMA *or* pseudo DMA may be implemented
+ * REAL functions :
+ * NCR5380_REAL_DMA should be defined if real DMA is to be used.
+ * Note that the DMA setup functions should return the number of bytes
+ * that they were able to program the controller for.
+ *
+ * Also note that generic i386/PC versions of these macros are
+ * available as NCR5380_i386_dma_write_setup,
+ * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
+ *
+ * NCR5380_dma_write_setup(instance, src, count) - initialize
+ * NCR5380_dma_read_setup(instance, dst, count) - initialize
+ * NCR5380_dma_residual(instance); - residual count
+ *
+ * PSEUDO functions :
+ * NCR5380_pwrite(instance, src, count)
+ * NCR5380_pread(instance, dst, count);
+ *
+ * If nothing specific to this implementation needs doing (ie, with external
+ * hardware), you must also define
+ *
+ * NCR5380_queue_command
+ * NCR5380_reset
+ * NCR5380_abort
+ * NCR5380_proc_info
+ *
+ * to be the global entry points into the specific driver, ie
+ * #define NCR5380_queue_command t128_queue_command.
+ *
+ * If this is not done, the routines will be defined as static functions
+ * with the NCR5380* names and the user must provide a globally
+ * accessible wrapper function.
+ *
+ * The generic driver is initialized by calling NCR5380_init(instance),
+ * after setting the appropriate host specific fields and ID. If the
+ * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
+ * possible) function may be used. Before the specific driver initialization
+ * code finishes, NCR5380_print_options should be called.
+ */
+
+static int do_abort (struct Scsi_Host *host);
+static void do_reset (struct Scsi_Host *host);
+static struct Scsi_Host *first_instance = NULL;
+static Scsi_Host_Template *the_template = NULL;
+
+/*
+ * Function : void initialize_SCp(Scsi_Cmnd *cmd)
+ *
+ * Purpose : initialize the saved data pointers for cmd to point to the
+ * start of the buffer.
+ *
+ * Inputs : cmd - Scsi_Cmnd structure to have pointers reset.
+ */
+
+static __inline__ void initialize_SCp(Scsi_Cmnd *cmd) {
+ /*
+ * Initialize the Scsi Pointer field so that all of the commands in the
+ * various queues are valid.
+ */
+
+ if (cmd->use_sg) {
+ cmd->SCp.buffer = (struct scatterlist *) cmd->buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ cmd->SCp.ptr = (char *) cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ } else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *) cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen;
+ }
+}
+
+#include <linux/delay.h>
+
+#ifdef NDEBUG
+static struct {
+ unsigned char mask;
+ const char * name;}
+signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" },
+ { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" },
+ { SR_SEL, "SEL" }, {0, NULL}},
+basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}},
+icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"},
+ {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"},
+ {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"},
+ {0, NULL}},
+mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"},
+ {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR,
+ "MODE PARITY INTR"}, {MR_MONITOR_BSY, "MODE MONITOR BSY"},
+ {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"},
+ {0, NULL}};
+
+/*
+ * Function : void NCR5380_print(struct Scsi_Host *instance)
+ *
+ * Purpose : print the SCSI bus signals for debugging purposes
+ *
+ * Input : instance - which NCR5380
+ */
+
+static void NCR5380_print(struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ unsigned char status, data, basr, mr, icr, i;
+ NCR5380_setup(instance);
+ cli();
+ data = NCR5380_read(CURRENT_SCSI_DATA_REG);
+ status = NCR5380_read(STATUS_REG);
+ mr = NCR5380_read(MODE_REG);
+ icr = NCR5380_read(INITIATOR_COMMAND_REG);
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+ sti();
+ printk("STATUS_REG: %02x ", status);
+ for (i = 0; signals[i].mask ; ++i)
+ if (status & signals[i].mask)
+ printk(",%s", signals[i].name);
+ printk("\nBASR: %02x ", basr);
+ for (i = 0; basrs[i].mask ; ++i)
+ if (basr & basrs[i].mask)
+ printk(",%s", basrs[i].name);
+ printk("\nICR: %02x ", icr);
+ for (i = 0; icrs[i].mask; ++i)
+ if (icr & icrs[i].mask)
+ printk(",%s", icrs[i].name);
+ printk("\nMODE: %02x ", mr);
+ for (i = 0; mrs[i].mask; ++i)
+ if (mr & mrs[i].mask)
+ printk(",%s", mrs[i].name);
+ printk("\n");
+}
+
+static struct {
+ unsigned char value;
+ const char *name;
+} phases[] = {
+{PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"},
+{PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"},
+{PHASE_UNKNOWN, "UNKNOWN"}};
+
+/*
+ * Function : void NCR5380_print_phase(struct Scsi_Host *instance)
+ *
+ * Purpose : print the current SCSI phase for debugging purposes
+ *
+ * Input : instance - which NCR5380
+ */
+
+static void NCR5380_print_phase(struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ unsigned char status;
+ int i;
+ NCR5380_setup(instance);
+
+ status = NCR5380_read(STATUS_REG);
+ if (!(status & SR_REQ))
+ printk("scsi%d : REQ not asserted, phase unknown.\n",
+ instance->host_no);
+ else {
+ for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
+ (phases[i].value != (status & PHASE_MASK)); ++i);
+ printk("scsi%d : phase %s\n", instance->host_no, phases[i].name);
+ }
+}
+#endif
+
+/*
+ * We need to have our coroutine active given these constraints :
+ * 1. The mutex flag, main_running, can only be set when the main
+ * routine can actually process data, otherwise SCSI commands
+ * will never get issued.
+ *
+ * 2. NCR5380_main() shouldn't be called before it has exited, because
+ * other drivers have had kernel stack overflows in similar
+ * situations.
+ *
+ * 3. We don't want to inline NCR5380_main() because of space concerns,
+ * even though it is only called in two places.
+ *
+ * So, the solution is to set the mutex in an inline wrapper for the
+ * main coroutine, and have the main coroutine exit with interrupts
+ * disabled after the final search through the queues so that no race
+ * conditions are possible.
+ */
+
+static volatile int main_running = 0;
+
+/*
+ * Function : run_main(void)
+ *
+ * Purpose : insure that the coroutine is running and will process our
+ * request. main_running is checked/set here (in an inline function)
+ * rather than in NCR5380_main itself to reduce the chances of stack
+ * overflow.
+ *
+ */
+
+static __inline__ void run_main(void) {
+ cli();
+ if (!main_running) {
+ main_running = 1;
+ NCR5380_main();
+ /*
+ * main_running is cleared in NCR5380_main once it can't do
+ * more work, and NCR5380_main exits with interrupts disabled.
+ */
+ sti();
+ } else
+ sti();
+}
+
+#ifdef USLEEP
+#ifndef NCR5380_TIMER
+#error "NCR5380_TIMER must be defined so that this type of NCR5380 driver gets a unique timer."
+#endif
+
+/*
+ * These need tweaking, and would probably work best as per-device
+ * flags initialized differently for disk, tape, cd, etc devices.
+ * People with broken devices are free to experiment as to what gives
+ * the best results for them.
+ *
+ * USLEEP_SLEEP should be a minimum seek time.
+ *
+ * USLEEP_POLL should be a maximum rotational latency.
+ */
+#ifndef USLEEP_SLEEP
+/* 20 ms (reasonable hard disk speed) */
+#define USLEEP_SLEEP (20*HZ/1000)
+#endif
+/* 300 RPM (floppy speed) */
+#ifndef USLEEP_POLL
+#define USLEEP_POLL (200*HZ/1000)
+#endif
+
+static struct Scsi_Host * expires_first = NULL;
+
+/*
+ * Function : int should_disconnect (unsigned char cmd)
+ *
+ * Purpose : decide weather a command would normally disconnect or
+ * not, since if it won't disconnect we should go to sleep.
+ *
+ * Input : cmd - opcode of SCSI command
+ *
+ * Returns : DISCONNECT_LONG if we should disconnect for a really long
+ * time (ie always, sleep, look for REQ active, sleep),
+ * DISCONNECT_TIME_TO_DATA if we would only disconnect for a normal
+ * time-to-data delay, DISCONNECT_NONE if this command would return
+ * immediately.
+ *
+ * Future sleep algorithms based on time to data can exploit
+ * something like this so they can differentiate between "normal"
+ * (ie, read, write, seek) and unusual commands (ie, * format).
+ *
+ * Note : We don't deal with commands that handle an immediate disconnect,
+ *
+ */
+
+static int should_disconnect (unsigned char cmd) {
+ switch (cmd) {
+ case READ_6:
+ case WRITE_6:
+ case SEEK_6:
+ case READ_10:
+ case WRITE_10:
+ case SEEK_10:
+ return DISCONNECT_TIME_TO_DATA;
+ case FORMAT_UNIT:
+ case SEARCH_HIGH:
+ case SEARCH_LOW:
+ case SEARCH_EQUAL:
+ return DISCONNECT_LONG;
+ default:
+ return DISCONNECT_NONE;
+ }
+}
+
+/*
+ * Assumes instance->time_expires has been set in higher level code.
+ */
+
+static int NCR5380_set_timer (struct Scsi_Host *instance) {
+ struct Scsi_Host *tmp, **prev;
+
+ cli();
+ if (((struct NCR5380_hostdata *) (instance->host_data))->next_timer) {
+ sti();
+ return -1;
+ }
+
+ for (prev = &expires_first, tmp = expires_first; tmp;
+ prev = &(((struct NCR5380_hostdata *) tmp->host_data)->next_timer),
+ tmp = ((struct NCR5380_hostdata *) tmp->host_data)->next_timer)
+ if (instance->time_expires < tmp->time_expires)
+ break;
+
+ instance->next_timer = tmp;
+ *prev = instance;
+ timer_table[NCR5380_TIMER].expires = expires_first->time_expires;
+ timer_active |= 1 << NCR5380_TIMER;
+ sti();
+ return 0;
+}
+
+/* Doing something about unwanted reentrancy here might be useful */
+void NCR5380_timer_fn(void) {
+ struct Scsi_Host *instance;
+ cli();
+ for (; expires_first && expires_first->time_expires >= jiffies; ) {
+ instance = ((NCR5380_hostdata *) expires_first->host_data)->
+ expires_next;
+ ((NCR5380_hostdata *) expires_first->host_data)->expires_next =
+ NULL;
+ ((NCR5380_hostdata *) expires_first->host_data)->time_expires =
+ 0;
+ expires_first = instance;
+ }
+
+ if (expires_first) {
+ timer_table[NCR5380_TIMER].expires = ((NCR5380_hostdata *)
+ expires_first->host_data)->time_expires;
+ timer_active |= (1 << NCR5380_TIMER);
+ } else {
+ timer_table[NCR5380_TIMER].expires = 0;
+ timer_active &= ~(1 << MCR5380_TIMER);
+ }
+ sti();
+
+ run_main();
+}
+#endif /* def USLEEP */
+
+static void NCR5380_all_init (void) {
+ static int done = 0;
+ if (!done) {
+#if (NDEBUG & NDEBUG_INIT)
+ printk("scsi : NCR5380_all_init()\n");
+#endif
+ done = 1;
+#ifdef USLEEP
+ timer_table[NCR5380_TIMER].expires = 0;
+ timer_table[NCR5380_TIMER].fn = NCR5380_timer_fn;
+#endif
+ }
+}
+
+#ifdef AUTOPROBE_IRQ
+/*
+ * Function : int NCR5380_probe_irq (struct Scsi_Host *instance, int possible)
+ *
+ * Purpose : autoprobe for the IRQ line used by the NCR5380.
+ *
+ * Inputs : instance - pointer to this instance of the NCR5380 driver,
+ * possible - bitmask of permissible interrupts.
+ *
+ * Returns : number of the IRQ selected, IRQ_NONE if no interrupt fired.
+ *
+ * XXX no effort is made to deal with spurious interrupts.
+ */
+
+
+static int probe_irq;
+static void probe_intr (int irq, void *dev_id, struct pt_regs * regs) {
+ probe_irq = irq;
+};
+
+static int NCR5380_probe_irq (struct Scsi_Host *instance, int possible) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ unsigned long timeout;
+ int trying_irqs, i, mask;
+ NCR5380_setup(instance);
+
+ for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
+ if ((mask & possible) && (request_irq(i, &probe_intr, SA_INTERRUPT, "NCR-probe", NULL)
+ == 0))
+ trying_irqs |= mask;
+
+ timeout = jiffies + 250*HZ/1000;
+ probe_irq = IRQ_NONE;
+
+/*
+ * A interrupt is triggered whenever BSY = false, SEL = true
+ * and a bit set in the SELECT_ENABLE_REG is asserted on the
+ * SCSI bus.
+ *
+ * Note that the bus is only driven when the phase control signals
+ * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
+ * to zero.
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA |
+ ICR_ASSERT_SEL);
+
+ while (probe_irq == IRQ_NONE && jiffies < timeout)
+ barrier();
+
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ for (i = 0, mask = 1; i < 16; ++i, mask <<= 1)
+ if (trying_irqs & mask)
+ free_irq(i, NULL);
+
+ return probe_irq;
+}
+#endif /* AUTOPROBE_IRQ */
+
+/*
+ * Function : void NCR58380_print_options (struct Scsi_Host *instance)
+ *
+ * Purpose : called by probe code indicating the NCR5380 driver
+ * options that were selected.
+ *
+ * Inputs : instance, pointer to this instance. Unused.
+ */
+
+static void NCR5380_print_options (struct Scsi_Host *instance) {
+ printk(" generic options"
+#ifdef AUTOPROBE_IRQ
+ " AUTOPROBE_IRQ"
+#endif
+#ifdef AUTOSENSE
+ " AUTOSENSE"
+#endif
+#ifdef DIFFERENTIAL
+ " DIFFERENTIAL"
+#endif
+#ifdef REAL_DMA
+ " REAL DMA"
+#endif
+#ifdef REAL_DMA_POLL
+ " REAL DMA POLL"
+#endif
+#ifdef PARITY
+ " PARITY"
+#endif
+#ifdef PSEUDO_DMA
+ " PSEUDO DMA"
+#endif
+#ifdef SCSI2
+ " SCSI-2"
+#endif
+#ifdef UNSAFE
+ " UNSAFE "
+#endif
+ );
+#ifdef USLEEP
+ printk(" USLEEP, USLEEP_POLL=%d USLEEP_SLEEP=%d", USLEEP_POLL, USLEEP_SLEEP);
+#endif
+ printk(" generic release=%d", NCR5380_PUBLIC_RELEASE);
+ if (((struct NCR5380_hostdata *)instance->hostdata)->flags & FLAG_NCR53C400) {
+ printk(" ncr53c400 release=%d", NCR53C400_PUBLIC_RELEASE);
+ }
+}
+
+/*
+ * Function : void NCR5380_print_status (struct Scsi_Host *instance)
+ *
+ * Purpose : print commands in the various queues, called from
+ * NCR5380_abort and NCR5380_debug to aid debugging.
+ *
+ * Inputs : instance, pointer to this instance.
+ */
+
+static void NCR5380_print_status (struct Scsi_Host *instance) {
+ static char pr_bfr[512];
+ char *start;
+ int len;
+
+ printk("NCR5380 : coroutine is%s running.\n",
+ main_running ? "" : "n't");
+
+#ifdef NDEBUG
+ NCR5380_print (instance);
+ NCR5380_print_phase (instance);
+#endif
+
+ len = NCR5380_proc_info(pr_bfr, &start, 0, sizeof(pr_bfr),
+ instance->host_no, 0);
+ pr_bfr[len] = 0;
+ printk("\n%s\n", pr_bfr);
+ }
+
+/******************************************/
+/*
+ * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED]
+ *
+ * *buffer: I/O buffer
+ * **start: if inout == FALSE pointer into buffer where user read should start
+ * offset: current offset
+ * length: length of buffer
+ * hostno: Scsi_Host host_no
+ * inout: TRUE - user is writing; FALSE - user is reading
+ *
+ * Return the number of bytes read from or written
+*/
+
+#undef SPRINTF
+#define SPRINTF(args...) do { if(pos < buffer + length-80) pos += sprintf(pos, ## args); } while(0)
+static
+char *lprint_Scsi_Cmnd (Scsi_Cmnd *cmd, char *pos, char *buffer, int length);
+static
+char *lprint_command (unsigned char *cmd, char *pos, char *buffer, int len);
+static
+char *lprint_opcode(int opcode, char *pos, char *buffer, int length);
+
+#ifndef NCR5380_proc_info
+static
+#endif
+int NCR5380_proc_info (
+ char *buffer, char **start,off_t offset,
+ int length,int hostno,int inout)
+{
+ char *pos = buffer;
+ struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
+ Scsi_Cmnd *ptr;
+
+ for (instance = first_instance; instance &&
+ instance->host_no != hostno; instance=instance->next)
+ ;
+ if (!instance)
+ return(-ESRCH);
+ hostdata = (struct NCR5380_hostdata *)instance->hostdata;
+
+ if (inout) { /* Has data been written to the file ? */
+#ifdef DTC_PUBLIC_RELEASE
+ dtc_wmaxi = dtc_maxi = 0;
+#endif
+#ifdef PAS16_PUBLIC_RELEASE
+ pas_wmaxi = pas_maxi = 0;
+#endif
+ return(-ENOSYS); /* Currently this is a no-op */
+ }
+ SPRINTF("NCR5380 core release=%d. ", NCR5380_PUBLIC_RELEASE);
+ if (((struct NCR5380_hostdata *)instance->hostdata)->flags & FLAG_NCR53C400)
+ SPRINTF("ncr53c400 release=%d. ", NCR53C400_PUBLIC_RELEASE);
+#ifdef DTC_PUBLIC_RELEASE
+ SPRINTF("DTC 3180/3280 release %d", DTC_PUBLIC_RELEASE);
+#endif
+#ifdef T128_PUBLIC_RELEASE
+ SPRINTF("T128 release %d", T128_PUBLIC_RELEASE);
+#endif
+#ifdef GENERIC_NCR5380_PUBLIC_RELEASE
+ SPRINTF("Generic5380 release %d", GENERIC_NCR5380_PUBLIC_RELEASE);
+#endif
+#ifdef PAS16_PUBLIC_RELEASE
+SPRINTF("PAS16 release=%d", PAS16_PUBLIC_RELEASE);
+#endif
+
+ SPRINTF("\nBase Addr: 0x%05lX ", (long)instance->base);
+ SPRINTF("io_port: %04x ", (int)instance->io_port);
+ if (instance->irq == IRQ_NONE)
+ SPRINTF("IRQ: None.\n");
+ else
+ SPRINTF("IRQ: %d.\n", instance->irq);
+
+#ifdef DTC_PUBLIC_RELEASE
+ SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n",
+ dtc_wmaxi, dtc_maxi);
+#endif
+#ifdef PAS16_PUBLIC_RELEASE
+ SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n",
+ pas_wmaxi, pas_maxi);
+#endif
+ cli();
+ SPRINTF("NCR5380 : coroutine is%s running.\n", main_running ? "" : "n't");
+ if (!hostdata->connected)
+ SPRINTF("scsi%d: no currently connected command\n", instance->host_no);
+ else
+ pos = lprint_Scsi_Cmnd ((Scsi_Cmnd *) hostdata->connected,
+ pos, buffer, length);
+ SPRINTF("scsi%d: issue_queue\n", instance->host_no);
+ for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length);
+
+ SPRINTF("scsi%d: disconnected_queue\n", instance->host_no);
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length);
+
+ sti();
+ *start=buffer;
+ if (pos - buffer < offset)
+ return 0;
+ else if (pos - buffer - offset < length)
+ return pos - buffer - offset;
+ return length;
+}
+
+static
+char *lprint_Scsi_Cmnd (Scsi_Cmnd *cmd, char *pos, char *buffer, int length) {
+ SPRINTF("scsi%d : destination target %d, lun %d\n",
+ cmd->host->host_no, cmd->target, cmd->lun);
+ SPRINTF(" command = ");
+ pos = lprint_command (cmd->cmnd, pos, buffer, length);
+ return (pos);
+}
+
+static
+char *lprint_command (unsigned char *command,
+ char *pos, char *buffer, int length) {
+ int i, s;
+ pos = lprint_opcode(command[0], pos, buffer, length);
+ for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ SPRINTF("%02x ", command[i]);
+ SPRINTF("\n");
+ return(pos);
+}
+
+static
+char *lprint_opcode(int opcode, char *pos, char *buffer, int length) {
+ SPRINTF("%2d (0x%02x)", opcode, opcode);
+ return(pos);
+}
+
+
+/*
+ * Function : void NCR5380_init (struct Scsi_Host *instance, flags)
+ *
+ * Purpose : initializes *instance and corresponding 5380 chip,
+ * with flags OR'd into the initial flags value.
+ *
+ * Inputs : instance - instantiation of the 5380 driver.
+ *
+ * Notes : I assume that the host, hostno, and id bits have been
+ * set correctly. I don't care about the irq and other fields.
+ *
+ */
+
+static void NCR5380_init (struct Scsi_Host *instance, int flags) {
+ NCR5380_local_declare();
+ int i, pass;
+ unsigned long timeout;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+
+#ifdef NCR53C400
+ if (flags & FLAG_NCR53C400)
+ instance->NCR5380_instance_name += NCR53C400_address_adjust;
+#endif
+
+ NCR5380_setup(instance);
+
+ NCR5380_all_init();
+
+ hostdata->aborted = 0;
+ hostdata->id_mask = 1 << instance->this_id;
+ for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
+ if (i > hostdata->id_mask)
+ hostdata->id_higher_mask |= i;
+ for (i = 0; i < 8; ++i)
+ hostdata->busy[i] = 0;
+#ifdef REAL_DMA
+ hostdata->dmalen = 0;
+#endif
+ hostdata->targets_present = 0;
+ hostdata->connected = NULL;
+ hostdata->issue_queue = NULL;
+ hostdata->disconnected_queue = NULL;
+#ifdef NCR5380_STATS
+ for (i = 0; i < 8; ++i) {
+ hostdata->time_read[i] = 0;
+ hostdata->time_write[i] = 0;
+ hostdata->bytes_read[i] = 0;
+ hostdata->bytes_write[i] = 0;
+ }
+ hostdata->timebase = 0;
+ hostdata->pendingw = 0;
+ hostdata->pendingr = 0;
+#endif
+
+ /* The CHECK code seems to break the 53C400. Will check it later maybe */
+ if (flags & FLAG_NCR53C400)
+ hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags;
+ else
+ hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT | flags;
+
+ if (!the_template) {
+ the_template = instance->hostt;
+ first_instance = instance;
+ }
+
+
+#ifdef USLEEP
+ hostdata->time_expires = 0;
+ hostdata->next_timer = NULL;
+#endif
+
+#ifndef AUTOSENSE
+ if ((instance->cmd_per_lun > 1) || instance->can_queue > 1))
+ printk("scsi%d : WARNING : support for multiple outstanding commands enabled\n"
+ " without AUTOSENSE option, contingent allegiance conditions may\n"
+ " be incorrectly cleared.\n", instance->host_no);
+#endif /* def AUTOSENSE */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+
+#ifdef NCR53C400
+ if (hostdata->flags & FLAG_NCR53C400) {
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
+ }
+#endif
+
+ /*
+ * Detect and correct bus wedge problems.
+ *
+ * If the system crashed, it may have crashed in a state
+ * where a SCSI command was still executing, and the
+ * SCSI bus is not in a BUS FREE STATE.
+ *
+ * If this is the case, we'll try to abort the currently
+ * established nexus which we know nothing about, and that
+ * failing, do a hard reset of the SCSI bus
+ */
+
+ for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) &&
+ pass <= 6 ; ++pass) {
+ switch (pass) {
+ case 1:
+ case 3:
+ case 5:
+ printk("scsi%d: SCSI bus busy, waiting up to five seconds\n",
+ instance->host_no);
+ timeout = jiffies + 5*HZ;
+ while (jiffies < timeout && (NCR5380_read(STATUS_REG) & SR_BSY));
+ break;
+ case 2:
+ printk("scsi%d: bus busy, attempting abort\n",
+ instance->host_no);
+ do_abort (instance);
+ break;
+ case 4:
+ printk("scsi%d: bus busy, attempting reset\n",
+ instance->host_no);
+ do_reset (instance);
+ break;
+ case 6:
+ printk("scsi%d: bus locked solid or invalid override\n",
+ instance->host_no);
+ }
+ }
+}
+
+/*
+ * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd,
+ * void (*done)(Scsi_Cmnd *))
+ *
+ * Purpose : enqueues a SCSI command
+ *
+ * Inputs : cmd - SCSI command, done - function called on completion, with
+ * a pointer to the command descriptor.
+ *
+ * Returns : 0
+ *
+ * Side effects :
+ * cmd is added to the per instance issue_queue, with minor
+ * twiddling done to the host specific fields of cmd. If the
+ * main coroutine is not running, it is restarted.
+ *
+ */
+
+/* Only make static if a wrapper function is used */
+#ifndef NCR5380_queue_command
+static
+#endif
+int NCR5380_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) {
+ struct Scsi_Host *instance = cmd->host;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ Scsi_Cmnd *tmp;
+
+#if (NDEBUG & NDEBUG_NO_WRITE)
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n",
+ instance->host_no);
+ cmd->result = (DID_ERROR << 16);
+ done(cmd);
+ return 0;
+ }
+#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
+
+#ifdef NCR5380_STATS
+# if 0
+ if (!hostdata->connected && !hostdata->issue_queue &&
+ !hostdata->disconnected_queue) {
+ hostdata->timebase = jiffies;
+ }
+# endif
+# ifdef NCR5380_STAT_LIMIT
+ if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
+# endif
+ switch (cmd->cmnd[0])
+ {
+ case WRITE:
+ case WRITE_6:
+ case WRITE_10:
+ hostdata->time_write[cmd->target] -= (jiffies - hostdata->timebase);
+ hostdata->bytes_write[cmd->target] += cmd->request_bufflen;
+ hostdata->pendingw++;
+ break;
+ case READ:
+ case READ_6:
+ case READ_10:
+ hostdata->time_read[cmd->target] -= (jiffies - hostdata->timebase);
+ hostdata->bytes_read[cmd->target] += cmd->request_bufflen;
+ hostdata->pendingr++;
+ break;
+ }
+#endif
+
+ /*
+ * We use the host_scribble field as a pointer to the next command
+ * in a queue
+ */
+
+ cmd->host_scribble = NULL;
+ cmd->scsi_done = done;
+
+ cmd->result = 0;
+
+
+ /*
+ * Insert the cmd into the issue queue. Note that REQUEST SENSE
+ * commands are added to the head of the queue since any command will
+ * clear the contingent allegiance condition that exists and the
+ * sense data is only guaranteed to be valid while the condition exists.
+ */
+
+ cli();
+ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ LIST(cmd, hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *) hostdata->issue_queue;
+ hostdata->issue_queue = cmd;
+ } else {
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->host_scribble;
+ tmp = (Scsi_Cmnd *) tmp->host_scribble);
+ LIST(cmd, tmp);
+ tmp->host_scribble = (unsigned char *) cmd;
+ }
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : command added to %s of queue\n", instance->host_no,
+ (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
+#endif
+
+/* Run the coroutine if it isn't already running. */
+ run_main();
+ return 0;
+}
+
+/*
+ * Function : NCR5380_main (void)
+ *
+ * Purpose : NCR5380_main is a coroutine that runs as long as more work can
+ * be done on the NCR5380 host adapters in a system. Both
+ * NCR5380_queue_command() and NCR5380_intr() will try to start it
+ * in case it is not running.
+ *
+ * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should
+ * reenable them. This prevents reentrancy and kernel stack overflow.
+ */
+
+static void NCR5380_main (void) {
+ Scsi_Cmnd *tmp, *prev;
+ struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
+ int done;
+
+ /*
+ * We run (with interrupts disabled) until we're sure that none of
+ * the host adapters have anything that can be done, at which point
+ * we set main_running to 0 and exit.
+ *
+ * Interrupts are enabled before doing various other internal
+ * instructions, after we've decided that we need to run through
+ * the loop again.
+ *
+ * this should prevent any race conditions.
+ */
+
+ do {
+ cli(); /* Freeze request queues */
+ done = 1;
+ for (instance = first_instance; instance &&
+ instance->hostt == the_template; instance=instance->next) {
+ hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+ cli();
+ if (!hostdata->connected) {
+#if (NDEBUG & NDEBUG_MAIN)
+ printk("scsi%d : not connected\n", instance->host_no);
+#endif
+ /*
+ * Search through the issue_queue for a command destined
+ * for a target that's not busy.
+ */
+#if (NDEBUG & NDEBUG_LISTS)
+ for (tmp= (Scsi_Cmnd *) hostdata->issue_queue, prev=NULL; tmp && (tmp != prev); prev=tmp, tmp=(Scsi_Cmnd*)tmp->host_scribble)
+ ;
+ /*printk("%p ", tmp);*/
+ if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/
+#endif
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
+ prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *)
+ tmp->host_scribble) {
+
+#if (NDEBUG & NDEBUG_LISTS)
+ if (prev != tmp)
+ printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun);
+#endif
+ /* When we find one, remove it from the issue queue. */
+ if (!(hostdata->busy[tmp->target] & (1 << tmp->lun))) {
+ if (prev) {
+ REMOVE(prev,prev->host_scribble,tmp,tmp->host_scribble);
+ prev->host_scribble = tmp->host_scribble;
+ } else {
+ REMOVE(-1,hostdata->issue_queue,tmp,tmp->host_scribble);
+ hostdata->issue_queue = (Scsi_Cmnd *) tmp->host_scribble;
+ }
+ tmp->host_scribble = NULL;
+
+ /* reenable interrupts after finding one */
+ sti();
+
+ /*
+ * Attempt to establish an I_T_L nexus here.
+ * On success, instance->hostdata->connected is set.
+ * On failure, we must add the command back to the
+ * issue queue so we can keep trying.
+ */
+#if (NDEBUG & (NDEBUG_MAIN | NDEBUG_QUEUES))
+ printk("scsi%d : main() : command for target %d lun %d removed from issue_queue\n",
+ instance->host_no, tmp->target, tmp->lun);
+#endif
+
+ /*
+ * A successful selection is defined as one that
+ * leaves us with the command connected and
+ * in hostdata->connected, OR has terminated the
+ * command.
+ *
+ * With successful commands, we fall through
+ * and see if we can do an information transfer,
+ * with failures we will restart.
+ */
+
+ if (!NCR5380_select(instance, tmp,
+ /*
+ * REQUEST SENSE commands are issued without tagged
+ * queueing, even on SCSI-II devices because the
+ * contingent allegiance condition exists for the
+ * entire unit.
+ */
+ (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE :
+ TAG_NEXT)) {
+ break;
+ } else {
+ cli();
+ LIST(tmp, hostdata->issue_queue);
+ tmp->host_scribble = (unsigned char *)
+ hostdata->issue_queue;
+ hostdata->issue_queue = tmp;
+ done = 0;
+ sti();
+#if (NDEBUG & (NDEBUG_MAIN | NDEBUG_QUEUES))
+ printk("scsi%d : main(): select() failed, returned to issue_queue\n",
+ instance->host_no);
+#endif
+ }
+ } /* if target/lun is not busy */
+ } /* for */
+ } /* if (!hostdata->connected) */
+
+ if (hostdata->connected
+#ifdef REAL_DMA
+ && !hostdata->dmalen
+#endif
+#ifdef USLEEP
+ && (!hostdata->time_expires || hostdata->time_expires >= jiffies)
+#endif
+ ) {
+ sti();
+#if (NDEBUG & NDEBUG_MAIN)
+ printk("scsi%d : main() : performing information transfer\n",
+ instance->host_no);
+#endif
+ NCR5380_information_transfer(instance);
+#if (NDEBUG & NDEBUG_MAIN)
+ printk("scsi%d : main() : done set false\n", instance->host_no);
+#endif
+ done = 0;
+ } else
+ break;
+ } /* for instance */
+ } while (!done);
+ main_running = 0;
+}
+
+#ifndef DONT_USE_INTR
+/*
+ * Function : void NCR5380_intr (int irq)
+ *
+ * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
+ * from the disconnected queue, and restarting NCR5380_main()
+ * as required.
+ *
+ * Inputs : int irq, irq that caused this interrupt.
+ *
+ */
+
+static void NCR5380_intr (int irq, void *dev_id, struct pt_regs * regs) {
+ NCR5380_local_declare();
+ struct Scsi_Host *instance;
+ int done;
+ unsigned char basr;
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi : NCR5380 irq %d triggered\n", irq);
+#endif
+ do {
+ done = 1;
+ for (instance = first_instance; instance && (instance->hostt ==
+ the_template); instance = instance->next)
+ if (instance->irq == irq) {
+
+ /* Look for pending interrupts */
+ NCR5380_setup(instance);
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+ /* XXX dispatch to appropriate routine if found and done=0 */
+ if (basr & BASR_IRQ) {
+#if (NDEBUG & NDEBUG_INTR)
+ NCR5380_print(instance);
+#endif
+ if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) ==
+ (SR_SEL | SR_IO)) {
+ done = 0;
+ sti();
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi%d : SEL interrupt\n", instance->host_no);
+#endif
+ NCR5380_reselect(instance);
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else if (basr & BASR_PARITY_ERROR) {
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi%d : PARITY interrupt\n", instance->host_no);
+#endif
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi%d : RESET interrupt\n", instance->host_no);
+#endif
+ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else {
+/*
+ * XXX the rest of the interrupt conditions should *only* occur during a
+ * DMA transfer, which I haven't gotten around to fixing yet.
+ */
+
+#if defined(REAL_DMA)
+ /*
+ * We should only get PHASE MISMATCH and EOP interrupts
+ * if we have DMA enabled, so do a sanity check based on
+ * the current setting of the MODE register.
+ */
+
+ if ((NCR5380_read(MODE_REG) & MR_DMA) && ((basr &
+ BASR_END_DMA_TRANSFER) ||
+ !(basr & BASR_PHASE_MATCH))) {
+ int transfered;
+
+ if (!hostdata->connected)
+ panic("scsi%d : received end of DMA interrupt with no connected cmd\n",
+ instance->hostno);
+
+ transfered = (hostdata->dmalen - NCR5380_dma_residual(instance));
+ hostdata->connected->SCp.this_residual -= transferred;
+ hostdata->connected->SCp.ptr += transferred;
+ hostdata->dmalen = 0;
+
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+#if NCR_TIMEOUT
+ {
+ unsigned long timeout = jiffies + NCR_TIMEOUT;
+
+ while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK
+ && jiffies < timeout)
+ ;
+ if (jiffies >= timeout)
+ printk("scsi%d: timeout at NCR5380.c:%d\n",
+ host->host_no, __LINE__);
+ }
+#else /* NCR_TIMEOUT */
+ while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ }
+#else
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
+#endif
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+#endif
+ }
+ } /* if BASR_IRQ */
+ if (!done)
+ run_main();
+ } /* if (instance->irq == irq) */
+ } while (!done);
+}
+#endif
+
+#ifdef NCR5380_STATS
+static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd* cmd)
+{
+# ifdef NCR5380_STAT_LIMIT
+ if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
+# endif
+ switch (cmd->cmnd[0])
+ {
+ case WRITE:
+ case WRITE_6:
+ case WRITE_10:
+ hostdata->time_write[cmd->target] += (jiffies - hostdata->timebase);
+ /*hostdata->bytes_write[cmd->target] += cmd->request_bufflen;*/
+ hostdata->pendingw--;
+ break;
+ case READ:
+ case READ_6:
+ case READ_10:
+ hostdata->time_read[cmd->target] += (jiffies - hostdata->timebase);
+ /*hostdata->bytes_read[cmd->target] += cmd->request_bufflen;*/
+ hostdata->pendingr--;
+ break;
+ }
+}
+#endif
+
+/*
+ * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+ * int tag);
+ *
+ * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
+ * including ARBITRATION, SELECTION, and initial message out for
+ * IDENTIFY and queue messages.
+ *
+ * Inputs : instance - instantiation of the 5380 driver on which this
+ * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for
+ * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
+ * the command that is presently connected.
+ *
+ * Returns : -1 if selection could not execute for some reason,
+ * 0 if selection succeeded or failed because the target
+ * did not respond.
+ *
+ * Side effects :
+ * If bus busy, arbitration failed, etc, NCR5380_select() will exit
+ * with registers as they should have been on entry - ie
+ * SELECT_ENABLE will be set appropriately, the NCR5380
+ * will cease to drive any SCSI bus signals.
+ *
+ * If successful : I_T_L or I_T_L_Q nexus will be established,
+ * instance->connected will be set to cmd.
+ * SELECT interrupt will be disabled.
+ *
+ * If failed (no target) : cmd->scsi_done() will be called, and the
+ * cmd->result host byte set to DID_BAD_TARGET.
+ */
+
+static int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+ int tag) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata*)
+ instance->hostdata;
+ unsigned char tmp[3], phase;
+ unsigned char *data;
+ int len;
+ unsigned long timeout;
+ NCR5380_setup(instance);
+
+ hostdata->restart_select = 0;
+#if defined (NDEBUG) && (NDEBUG & NDEBUG_ARBITRATION)
+ NCR5380_print(instance);
+ printk("scsi%d : starting arbitration, id = %d\n", instance->host_no,
+ instance->this_id);
+#endif
+ cli();
+
+ /*
+ * Set the phase bits to 0, otherwise the NCR5380 won't drive the
+ * data bus during SELECTION.
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+
+ /*
+ * Start arbitration.
+ */
+
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(MODE_REG, MR_ARBITRATE);
+
+ sti();
+
+ /* Wait for arbitration logic to complete */
+#if NCR_TIMEOUT
+ {
+ unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
+
+ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS)
+ && jiffies < timeout)
+ ;
+ if (jiffies >= timeout)
+ {
+ printk("scsi: arbitration timeout at %d\n", __LINE__);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ }
+#else /* NCR_TIMEOUT */
+ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS));
+#endif
+
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : arbitration complete\n", instance->host_no);
+/* Avoid GCC 2.4.5 asm needs to many reloads error */
+ __asm__("nop");
+#endif
+
+ /*
+ * The arbitration delay is 2.2us, but this is a minimum and there is
+ * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate
+ * the integral nature of udelay().
+ *
+ */
+
+ udelay(3);
+
+ /* Check for lost arbitration */
+ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
+ (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) ||
+ (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
+ NCR5380_write(MODE_REG, MR_BASE);
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n",
+ instance->host_no);
+#endif
+ return -1;
+ }
+
+
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL);
+
+ if (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) {
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n",
+ instance->host_no);
+#endif
+ return -1;
+ }
+
+ /*
+ * Again, bus clear + bus settle time is 1.2us, however, this is
+ * a minimum so we'll udelay ceil(1.2)
+ */
+
+ udelay(2);
+
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : won arbitration\n", instance->host_no);
+#endif
+
+
+ /*
+ * Now that we have won arbitration, start Selection process, asserting
+ * the host and target ID's on the SCSI bus.
+ */
+
+ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->target)));
+
+ /*
+ * Raise ATN while SEL is true before BSY goes false from arbitration,
+ * since this is the only way to guarantee that we'll get a MESSAGE OUT
+ * phase immediately after selection.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL ));
+ NCR5380_write(MODE_REG, MR_BASE);
+
+ /*
+ * Reselect interrupts must be turned off prior to the dropping of BSY,
+ * otherwise we will trigger an interrupt.
+ */
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+
+ /*
+ * The initiator shall then wait at least two deskew delays and release
+ * the BSY signal.
+ */
+ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
+
+ /* Reset BSY */
+ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA |
+ ICR_ASSERT_ATN | ICR_ASSERT_SEL));
+
+ /*
+ * Something weird happens when we cease to drive BSY - looks
+ * like the board/chip is letting us do another read before the
+ * appropriate propagation delay has expired, and we're confusing
+ * a BSY signal from ourselves as the target's response to SELECTION.
+ *
+ * A small delay (the 'C++' frontend breaks the pipeline with an
+ * unnecessary jump, making it work on my 386-33/Trantor T128, the
+ * tighter 'C' code breaks and requires this) solves the problem -
+ * the 1 us delay is arbitrary, and only used because this delay will
+ * be the same on other platforms and since it works here, it should
+ * work there.
+ *
+ * wingel suggests that this could be due to failing to wait
+ * one deskew delay.
+ */
+
+ udelay(1);
+
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : selecting target %d\n", instance->host_no, cmd->target);
+#endif
+
+ /*
+ * The SCSI specification calls for a 250 ms timeout for the actual
+ * selection.
+ */
+
+ timeout = jiffies + 250*HZ/1000;
+
+ /*
+ * XXX very interesting - we're seeing a bounce where the BSY we
+ * asserted is being reflected / still asserted (propagation delay?)
+ * and it's detecting as true. Sigh.
+ */
+
+ while ((jiffies < timeout) && !(NCR5380_read(STATUS_REG) &
+ (SR_BSY | SR_IO)));
+
+ if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) ==
+ (SR_SEL | SR_IO)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_reselect(instance);
+ printk ("scsi%d : reselection after won arbitration?\n",
+ instance->host_no);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+
+ /*
+ * No less than two deskew delays after the initiator detects the
+ * BSY signal is true, it shall release the SEL signal and may
+ * change the DATA BUS. -wingel
+ */
+
+ udelay(1);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+
+ if (!(NCR5380_read(STATUS_REG) & SR_BSY)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ if (hostdata->targets_present & (1 << cmd->target)) {
+ printk("scsi%d : weirdness\n", instance->host_no);
+ if (hostdata->restart_select)
+ printk("\trestart select\n");
+#ifdef NDEBUG
+ NCR5380_print (instance);
+#endif
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ cmd->result = DID_BAD_TARGET << 16;
+#ifdef NCR5380_STATS
+ collect_stats(hostdata, cmd);
+#endif
+ cmd->scsi_done(cmd);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : target did not respond within 250ms\n",
+ instance->host_no);
+#endif
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return 0;
+ }
+
+ hostdata->targets_present |= (1 << cmd->target);
+
+ /*
+ * Since we followed the SCSI spec, and raised ATN while SEL
+ * was true but before BSY was false during selection, the information
+ * transfer phase should be a MESSAGE OUT phase so that we can send the
+ * IDENTIFY message.
+ *
+ * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
+ * message (2 bytes) with a tag ID that we increment with every command
+ * until it wraps back to 0.
+ *
+ * XXX - it turns out that there are some broken SCSI-II devices,
+ * which claim to support tagged queuing but fail when more than
+ * some number of commands are issued at once.
+ */
+
+ /* Wait for start of REQ/ACK handshake */
+#ifdef NCR_TIMEOUT
+ {
+ unsigned long timeout = jiffies + NCR_TIMEOUT;
+
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ) && jiffies < timeout);
+
+ if (jiffies >= timeout) {
+ printk("scsi%d: timeout at NCR5380.c:%d\n", __LINE__);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ }
+#else /* NCR_TIMEOUT */
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ));
+#endif /* def NCR_TIMEOUT */
+
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : target %d selected, going into MESSAGE OUT phase.\n",
+ instance->host_no, cmd->target);
+#endif
+ tmp[0] = IDENTIFY(((instance->irq == IRQ_NONE) ? 0 : 1), cmd->lun);
+#ifdef SCSI2
+ if (cmd->device->tagged_queue && (tag != TAG_NONE)) {
+ tmp[1] = SIMPLE_QUEUE_TAG;
+ if (tag == TAG_NEXT) {
+ /* 0 is TAG_NONE, used to imply no tag for this command */
+ if (cmd->device->current_tag == 0)
+ cmd->device->current_tag = 1;
+
+ cmd->tag = cmd->device->current_tag;
+ cmd->device->current_tag++;
+ } else
+ cmd->tag = (unsigned char) tag;
+
+ tmp[2] = cmd->tag;
+ hostdata->last_message = SIMPLE_QUEUE_TAG;
+ len = 3;
+ } else
+#endif /* def SCSI2 */
+ {
+ len = 1;
+ cmd->tag=0;
+ }
+
+ /* Send message(s) */
+ data = tmp;
+ phase = PHASE_MSGOUT;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : nexus established.\n", instance->host_no);
+#endif
+ /* XXX need to handle errors here */
+ hostdata->connected = cmd;
+#ifdef SCSI2
+ if (!cmd->device->tagged_queue)
+#endif
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+
+ initialize_SCp(cmd);
+
+
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ * unsigned char *phase, int *count, unsigned char **data)
+ *
+ * Purpose : transfers data in given phase using polled I/O
+ *
+ * Inputs : instance - instance of driver, *phase - pointer to
+ * what phase is expected, *count - pointer to number of
+ * bytes to transfer, **data - pointer to data pointer.
+ *
+ * Returns : -1 when different phase is entered without transferring
+ * maximum number of bytes, 0 if all bytes or transfered or exit
+ * is in same phase.
+ *
+ * Also, *phase, *count, *data are modified in place.
+ *
+ * XXX Note : handling for bus free may be useful.
+ */
+
+/*
+ * Note : this code is not as quick as it could be, however it
+ * IS 100% reliable, and for the actual data transfer where speed
+ * counts, we will always do a pseudo DMA or DMA transfer.
+ */
+
+static int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data) {
+ NCR5380_local_declare();
+ register unsigned char p = *phase, tmp;
+ register int c = *count;
+ register unsigned char *d = *data;
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_PIO)
+ if (!(p & SR_IO))
+ printk("scsi%d : pio write %d bytes\n", instance->host_no, c);
+ else
+ printk("scsi%d : pio read %d bytes\n", instance->host_no, c);
+#endif
+
+ /*
+ * The NCR5380 chip will only drive the SCSI bus when the
+ * phase specified in the appropriate bits of the TARGET COMMAND
+ * REGISTER match the STATUS REGISTER
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
+
+ do {
+ /*
+ * Wait for assertion of REQ, after which the phase bits will be
+ * valid
+ */
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
+
+#if (NDEBUG & NDEBUG_HANDSHAKE)
+ printk("scsi%d : REQ detected\n", instance->host_no);
+#endif
+
+ /* Check for phase mismatch */
+ if ((tmp & PHASE_MASK) != p) {
+#if (NDEBUG & NDEBUG_PIO)
+ printk("scsi%d : phase mismatch\n", instance->host_no);
+ NCR5380_print_phase(instance);
+#endif
+ break;
+ }
+
+ /* Do actual transfer from SCSI bus to / from memory */
+ if (!(p & SR_IO))
+ NCR5380_write(OUTPUT_DATA_REG, *d);
+ else
+ *d = NCR5380_read(CURRENT_SCSI_DATA_REG);
+
+ ++d;
+
+ /*
+ * The SCSI standard suggests that in MSGOUT phase, the initiator
+ * should drop ATN on the last byte of the message phase
+ * after REQ has been asserted for the handshake but before
+ * the initiator raises ACK.
+ */
+
+ if (!(p & SR_IO)) {
+ if (!((p & SR_MSG) && c > 1)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA);
+#if (NDEBUG & NDEBUG_PIO)
+ NCR5380_print(instance);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ACK);
+ } else {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN);
+#if (NDEBUG & NDEBUG_PIO)
+ NCR5380_print(instance);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
+ }
+ } else {
+#if (NDEBUG & NDEBUG_PIO)
+ NCR5380_print(instance);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
+ }
+
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+
+#if (NDEBUG & NDEBUG_HANDSHAKE)
+ printk("scsi%d : req false, handshake complete\n", instance->host_no);
+#endif
+
+/*
+ * We have several special cases to consider during REQ/ACK handshaking :
+ * 1. We were in MSGOUT phase, and we are on the last byte of the
+ * message. ATN must be dropped as ACK is dropped.
+ *
+ * 2. We are in a MSGIN phase, and we are on the last byte of the
+ * message. We must exit with ACK asserted, so that the calling
+ * code may raise ATN before dropping ACK to reject the message.
+ *
+ * 3. ACK and ATN are clear and the target may proceed as normal.
+ */
+ if (!(p == PHASE_MSGIN && c == 1)) {
+ if (p == PHASE_MSGOUT && c > 1)
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ else
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ }
+ } while (--c);
+
+#if (NDEBUG & NDEBUG_PIO)
+ printk("scsi%d : residual %d\n", instance->host_no, c);
+#endif
+
+ *count = c;
+ *data = d;
+ tmp = NCR5380_read(STATUS_REG);
+ if (tmp & SR_REQ)
+ *phase = tmp & PHASE_MASK;
+ else
+ *phase = PHASE_UNKNOWN;
+
+ if (!c || (*phase == p))
+ return 0;
+ else
+ return -1;
+}
+
+static void do_reset (struct Scsi_Host *host) {
+ NCR5380_local_declare();
+ NCR5380_setup(host);
+
+ cli();
+ NCR5380_write(TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
+ udelay(25);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ sti();
+}
+
+/*
+ * Function : do_abort (Scsi_Host *host)
+ *
+ * Purpose : abort the currently established nexus. Should only be
+ * called from a routine which can drop into a
+ *
+ * Returns : 0 on success, -1 on failure.
+ */
+
+static int do_abort (struct Scsi_Host *host) {
+ NCR5380_local_declare();
+ unsigned char tmp, *msgptr, phase;
+ int len;
+ NCR5380_setup(host);
+
+
+ /* Request message out phase */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+
+ /*
+ * Wait for the target to indicate a valid phase by asserting
+ * REQ. Once this happens, we'll have either a MSGOUT phase
+ * and can immediately send the ABORT message, or we'll have some
+ * other phase and will have to source/sink data.
+ *
+ * We really don't care what value was on the bus or what value
+ * the target sees, so we just handshake.
+ */
+
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
+
+ if ((tmp & PHASE_MASK) != PHASE_MSGOUT) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
+ ICR_ASSERT_ACK);
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ }
+
+ tmp = ABORT;
+ msgptr = &tmp;
+ len = 1;
+ phase = PHASE_MSGOUT;
+ NCR5380_transfer_pio (host, &phase, &len, &msgptr);
+
+ /*
+ * If we got here, and the command completed successfully,
+ * we're about to go into bus free state.
+ */
+
+ return len ? -1 : 0;
+}
+
+#if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL)
+/*
+ * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ * unsigned char *phase, int *count, unsigned char **data)
+ *
+ * Purpose : transfers data in given phase using either real
+ * or pseudo DMA.
+ *
+ * Inputs : instance - instance of driver, *phase - pointer to
+ * what phase is expected, *count - pointer to number of
+ * bytes to transfer, **data - pointer to data pointer.
+ *
+ * Returns : -1 when different phase is entered without transferring
+ * maximum number of bytes, 0 if all bytes or transfered or exit
+ * is in same phase.
+ *
+ * Also, *phase, *count, *data are modified in place.
+ *
+ */
+
+
+static int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data) {
+ NCR5380_local_declare();
+ register int c = *count;
+ register unsigned char p = *phase;
+ register unsigned char *d = *data;
+ unsigned char tmp;
+ int foo;
+#if defined(REAL_DMA_POLL)
+ int cnt, toPIO;
+ unsigned char saved_data = 0, overrun = 0, residue;
+#endif
+
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+
+ NCR5380_setup(instance);
+
+ if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
+ *phase = tmp;
+ return -1;
+ }
+#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
+#ifdef READ_OVERRUNS
+ if (p & SR_IO) {
+ c -= 2;
+ }
+#endif
+#if (NDEBUG & NDEBUG_DMA)
+ printk("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n",
+ instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" :
+ "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d);
+#endif
+ hostdata->dma_len = (p & SR_IO) ?
+ NCR5380_dma_read_setup(instance, d, c) :
+ NCR5380_dma_write_setup(instance, d, c);
+#endif
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
+
+#ifdef REAL_DMA
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
+#elif defined(REAL_DMA_POLL)
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
+#else
+ /*
+ * Note : on my sample board, watch-dog timeouts occurred when interrupts
+ * were not disabled for the duration of a single DMA transfer, from
+ * before the setting of DMA mode to after transfer of the last byte.
+ */
+
+#if defined(PSEUDO_DMA) && !defined(UNSAFE)
+ cli();
+#endif
+ /* KLL May need eop and parity in 53c400 */
+ if (hostdata->flags & FLAG_NCR53C400)
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_PAR_CHECK
+ | MR_ENABLE_PAR_INTR | MR_ENABLE_EOP_INTR | MR_DMA_MODE
+ | MR_MONITOR_BSY);
+ else
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
+#endif /* def REAL_DMA */
+
+#if (NDEBUG & NDEBUG_DMA) & 0
+ printk("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
+#endif
+
+/*
+ * FOO stuff. For some UNAPPARENT reason, I'm getting
+ * watchdog timers fired on bootup for NO APPARENT REASON, meaning it's
+ * probably a timing problem.
+ *
+ * Since this is the only place I have back-to-back writes, perhaps this
+ * is the problem?
+ */
+
+ if (p & SR_IO) {
+#ifndef FOO
+ udelay(1);
+#endif
+ NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
+ } else {
+#ifndef FOO
+ udelay(1);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
+#ifndef FOO
+ udelay(1);
+#endif
+ NCR5380_write(START_DMA_SEND_REG, 0);
+#ifndef FOO
+ udelay(1);
+#endif
+ }
+
+#if defined(REAL_DMA_POLL)
+ do {
+ tmp = NCR5380_read(BUS_AND_STATUS_REG);
+ } while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR |
+ BASR_END_DMA_TRANSFER)));
+
+/*
+ At this point, either we've completed DMA, or we have a phase mismatch,
+ or we've unexpectedly lost BUSY (which is a real error).
+
+ For write DMAs, we want to wait until the last byte has been
+ transferred out over the bus before we turn off DMA mode. Alas, there
+ seems to be no terribly good way of doing this on a 5380 under all
+ conditions. For non-scatter-gather operations, we can wait until REQ
+ and ACK both go false, or until a phase mismatch occurs. Gather-writes
+ are nastier, since the device will be expecting more data than we
+ are prepared to send it, and REQ will remain asserted. On a 53C8[01] we
+ could test LAST BIT SENT to assure transfer (I imagine this is precisely
+ why this signal was added to the newer chips) but on the older 538[01]
+ this signal does not exist. The workaround for this lack is a watchdog;
+ we bail out of the wait-loop after a modest amount of wait-time if
+ the usual exit conditions are not met. Not a terribly clean or
+ correct solution :-%
+
+ Reads are equally tricky due to a nasty characteristic of the NCR5380.
+ If the chip is in DMA mode for an READ, it will respond to a target's
+ REQ by latching the SCSI data into the INPUT DATA register and asserting
+ ACK, even if it has _already_ been notified by the DMA controller that
+ the current DMA transfer has completed! If the NCR5380 is then taken
+ out of DMA mode, this already-acknowledged byte is lost.
+
+ This is not a problem for "one DMA transfer per command" reads, because
+ the situation will never arise... either all of the data is DMA'ed
+ properly, or the target switches to MESSAGE IN phase to signal a
+ disconnection (either operation bringing the DMA to a clean halt).
+ However, in order to handle scatter-reads, we must work around the
+ problem. The chosen fix is to DMA N-2 bytes, then check for the
+ condition before taking the NCR5380 out of DMA mode. One or two extra
+ bytes are transferred via PIO as necessary to fill out the original
+ request.
+*/
+
+ if (p & SR_IO) {
+#ifdef READ_OVERRUNS
+ udelay(10);
+ if (((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH|BASR_ACK)) ==
+ (BASR_PHASE_MATCH | BASR_ACK))) {
+ saved_data = NCR5380_read(INPUT_DATA_REGISTER);
+ overrun = 1;
+ }
+#endif
+ } else {
+ int limit = 100;
+ while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) ||
+ (NCR5380_read(STATUS_REG) & SR_REQ)) {
+ if (!(tmp & BASR_PHASE_MATCH)) break;
+ if (--limit < 0) break;
+ }
+ }
+
+
+#if (NDEBUG & NDEBUG_DMA)
+ printk("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n",
+ instance->host_no, tmp, NCR5380_read(STATUS_REG));
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ residue = NCR5380_dma_residual(instance);
+ c -= residue;
+ *count -= c;
+ *data += c;
+ *phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
+
+#ifdef READ_OVERRUNS
+ if (*phase == p && (p & SR_IO) && residue == 0) {
+ if (overrun) {
+#if (NDEBUG & NDEBUG_DMA)
+ printk("Got an input overrun, using saved byte\n");
+#endif
+ **data = saved_data;
+ *data += 1;
+ *count -= 1;
+ cnt = toPIO = 1;
+ } else {
+ printk("No overrun??\n");
+ cnt = toPIO = 2;
+ }
+#if (NDEBUG & NDEBUG_DMA)
+ printk("Doing %d-byte PIO to 0x%X\n", cnt, *data);
+#endif
+ NCR5380_transfer_pio(instance, phase, &cnt, data);
+ *count -= toPIO - cnt;
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_DMA)
+ printk("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n",
+ *data, *count, *(*data+*count-1), *(*data+*count));
+#endif
+ return 0;
+
+#elif defined(REAL_DMA)
+ return 0;
+#else /* defined(REAL_DMA_POLL) */
+ if (p & SR_IO) {
+#ifdef DMA_WORKS_RIGHT
+ foo = NCR5380_pread(instance, d, c);
+#else
+ int diff = 1;
+ if (hostdata->flags & FLAG_NCR53C400) {
+ diff=0;
+ }
+
+ if (!(foo = NCR5380_pread(instance, d, c - diff))) {
+ /*
+ * We can't disable DMA mode after successfully transferring
+ * what we plan to be the last byte, since that would open up
+ * a race condition where if the target asserted REQ before
+ * we got the DMA mode reset, the NCR5380 would have latched
+ * an additional byte into the INPUT DATA register and we'd
+ * have dropped it.
+ *
+ * The workaround was to transfer one fewer bytes than we
+ * intended to with the pseudo-DMA read function, wait for
+ * the chip to latch the last byte, read it, and then disable
+ * pseudo-DMA mode.
+ *
+ * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
+ * REQ is deasserted when ACK is asserted, and not reasserted
+ * until ACK goes false. Since the NCR5380 won't lower ACK
+ * until DACK is asserted, which won't happen unless we twiddle
+ * the DMA port or we take the NCR5380 out of DMA mode, we
+ * can guarantee that we won't handshake another extra
+ * byte.
+ */
+
+ if (!(hostdata->flags & FLAG_NCR53C400)) {
+ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ));
+ /* Wait for clean handshake */
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ d[c - 1] = NCR5380_read(INPUT_DATA_REG);
+ }
+ }
+#endif
+ } else {
+#ifdef DMA_WORKS_RIGHT
+ foo = NCR5380_pwrite(instance, d, c);
+#else
+ int timeout;
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("About to pwrite %d bytes\n", c);
+#endif
+ if (!(foo = NCR5380_pwrite(instance, d, c))) {
+ /*
+ * Wait for the last byte to be sent. If REQ is being asserted for
+ * the byte we're interested, we'll ACK it and it will go false.
+ */
+ if (!(hostdata->flags & FLAG_HAS_LAST_BYTE_SENT)) {
+ timeout = 20000;
+#if 1
+#if 1
+ while (!(NCR5380_read(BUS_AND_STATUS_REG) &
+ BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) &
+ BASR_PHASE_MATCH));
+#else
+ if (NCR5380_read(STATUS_REG) & SR_REQ) {
+ for (; timeout &&
+ !(NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
+ --timeout);
+ for (; timeout && (NCR5380_read(STATUS_REG) & SR_REQ);
+ --timeout);
+ }
+#endif
+
+
+#if (NDEBUG & NDEBUG_LAST_BYTE_SENT)
+ if (!timeout)
+ printk("scsi%d : timed out on last byte\n",
+ instance->host_no);
+#endif
+
+
+ if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {
+ hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;
+ if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {
+ hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT;
+#if (NDEBUG & NDEBUG_LAST_BYTE_SENT)
+ printk("scsi%d : last bit sent works\n",
+ instance->host_no);
+#endif
+ }
+ }
+ } else {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("Waiting for LASTBYTE\n");
+#endif
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT));
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("Got LASTBYTE\n");
+#endif
+ }
+#else
+ udelay (5);
+#endif
+ }
+#endif
+ }
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: Checking for IRQ\n");
+#endif
+ if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got it, reading reset interrupt reg\n");
+#endif
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else {
+ printk("53C400w: IRQ NOT THERE!\n");
+ }
+ }
+
+ *data = d + c;
+ *count = 0;
+ *phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
+#if 0
+ NCR5380_print_phase(instance);
+#endif
+#if defined(PSEUDO_DMA) && !defined(UNSAFE)
+ sti();
+#endif /* defined(REAL_DMA_POLL) */
+ return foo;
+#endif /* def REAL_DMA */
+}
+#endif /* defined(REAL_DMA) | defined(PSEUDO_DMA) */
+
+/*
+ * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
+ *
+ * Purpose : run through the various SCSI phases and do as the target
+ * directs us to. Operates on the currently connected command,
+ * instance->connected.
+ *
+ * Inputs : instance, instance for which we are doing commands
+ *
+ * Side effects : SCSI things happen, the disconnected queue will be
+ * modified if a command disconnects, *instance->connected will
+ * change.
+ *
+ * XXX Note : we need to watch for bus free or a reset condition here
+ * to recover from an unexpected bus free condition.
+ */
+
+static void NCR5380_information_transfer (struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ unsigned char msgout = NOP;
+ int sink = 0;
+ int len;
+#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
+ int transfersize;
+#endif
+ unsigned char *data;
+ unsigned char phase, tmp, extended_msg[10], old_phase=0xff;
+ Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected;
+ NCR5380_setup(instance);
+
+ while (1) {
+ tmp = NCR5380_read(STATUS_REG);
+ /* We only have a valid SCSI phase when REQ is asserted */
+ if (tmp & SR_REQ) {
+ phase = (tmp & PHASE_MASK);
+ if (phase != old_phase) {
+ old_phase = phase;
+#if (NDEBUG & NDEBUG_INFORMATION)
+ NCR5380_print_phase(instance);
+#endif
+ }
+
+ if (sink && (phase != PHASE_MSGOUT)) {
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
+ ICR_ASSERT_ACK);
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ sink = 0;
+ continue;
+ }
+
+ switch (phase) {
+ case PHASE_DATAIN:
+ case PHASE_DATAOUT:
+#if (NDEBUG & NDEBUG_NO_DATAOUT)
+ printk("scsi%d : NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n",
+ instance->host_no);
+ sink = 1;
+ do_abort(instance);
+ cmd->result = DID_ERROR << 16;
+ cmd->done(cmd);
+ return;
+#endif
+ /*
+ * If there is no room left in the current buffer in the
+ * scatter-gather list, move onto the next one.
+ */
+
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+#if (NDEBUG & NDEBUG_INFORMATION)
+ printk("scsi%d : %d bytes and %d buffers left\n",
+ instance->host_no, cmd->SCp.this_residual,
+ cmd->SCp.buffers_residual);
+#endif
+ }
+
+ /*
+ * The preferred transfer method is going to be
+ * PSEUDO-DMA for systems that are strictly PIO,
+ * since we can let the hardware do the handshaking.
+ *
+ * For this to work, we need to know the transfersize
+ * ahead of time, since the pseudo-DMA code will sit
+ * in an unconditional loop.
+ */
+
+#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
+ /* KLL
+ * PSEUDO_DMA is defined here. If this is the g_NCR5380
+ * driver then it will always be defined, so the
+ * FLAG_NO_PSEUDO_DMA is used to inhibit PDMA in the base
+ * NCR5380 case. I think this is a fairly clean solution.
+ * We supplement these 2 if's with the flag.
+ */
+#ifdef NCR5380_dma_xfer_len
+ if (!cmd->device->borken &&
+ !(hostdata->flags & FLAG_NO_PSEUDO_DMA) &&
+ (transfersize = NCR5380_dma_xfer_len(instance, cmd)) != 0) {
+#else
+ transfersize = cmd->transfersize;
+
+#ifdef LIMIT_TRANSFERSIZE /* If we have problems with interrupt service */
+ if( transfersize > 512 )
+ transfersize = 512;
+#endif /* LIMIT_TRANSFERSIZE */
+
+ if (!cmd->device->borken && transfersize &&
+ !(hostdata->flags & FLAG_NO_PSEUDO_DMA) &&
+ cmd->SCp.this_residual && !(cmd->SCp.this_residual %
+ transfersize)) {
+ /* Limit transfers to 32K, for xx400 & xx406
+ * pseudoDMA that transfers in 128 bytes blocks. */
+ if (transfersize > 32*1024)
+ transfersize = 32*1024;
+#endif
+ len = transfersize;
+ if (NCR5380_transfer_dma(instance, &phase,
+ &len, (unsigned char **) &cmd->SCp.ptr)) {
+ /*
+ * If the watchdog timer fires, all future accesses to this
+ * device will use the polled-IO.
+ */
+ printk("scsi%d : switching target %d lun %d to slow handshake\n",
+ instance->host_no, cmd->target, cmd->lun);
+ cmd->device->borken = 1;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ sink = 1;
+ do_abort(instance);
+ cmd->result = DID_ERROR << 16;
+ cmd->done(cmd);
+ /* XXX - need to source or sink data here, as appropriate */
+ } else
+ cmd->SCp.this_residual -= transfersize - len;
+ } else
+#endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
+ NCR5380_transfer_pio(instance, &phase,
+ (int *) &cmd->SCp.this_residual, (unsigned char **)
+ &cmd->SCp.ptr);
+ break;
+ case PHASE_MSGIN:
+ len = 1;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ cmd->SCp.Message = tmp;
+
+ switch (tmp) {
+ /*
+ * Linking lets us reduce the time required to get the
+ * next command out to the device, hopefully this will
+ * mean we don't waste another revolution due to the delays
+ * required by ARBITRATION and another SELECTION.
+ *
+ * In the current implementation proposal, low level drivers
+ * merely have to start the next command, pointed to by
+ * next_link, done() is called as with unlinked commands.
+ */
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+#if (NDEBUG & NDEBUG_LINKED)
+ printk("scsi%d : target %d lun %d linked command complete.\n",
+ instance->host_no, cmd->target, cmd->lun);
+#endif
+ /*
+ * Sanity check : A linked command should only terminate with
+ * one of these messages if there are more linked commands
+ * available.
+ */
+
+ if (!cmd->next_link) {
+ printk("scsi%d : target %d lun %d linked command complete, no next_link\n"
+ instance->host_no, cmd->target, cmd->lun);
+ sink = 1;
+ do_abort (instance);
+ return;
+ }
+
+ initialize_SCp(cmd->next_link);
+ /* The next command is still part of this process */
+ cmd->next_link->tag = cmd->tag;
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+#if (NDEBUG & NDEBUG_LINKED)
+ printk("scsi%d : target %d lun %d linked request done, calling scsi_done().\n",
+ instance->host_no, cmd->target, cmd->lun);
+#endif
+#ifdef NCR5380_STATS
+ collect_stats(hostdata, cmd);
+#endif
+ cmd->scsi_done(cmd);
+ cmd = hostdata->connected;
+ break;
+#endif /* def LINKED */
+ case ABORT:
+ case COMMAND_COMPLETE:
+ /* Accept message by clearing ACK */
+ sink = 1;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ hostdata->connected = NULL;
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : command for target %d, lun %d completed\n",
+ instance->host_no, cmd->target, cmd->lun);
+#endif
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+
+ /*
+ * I'm not sure what the correct thing to do here is :
+ *
+ * If the command that just executed is NOT a request
+ * sense, the obvious thing to do is to set the result
+ * code to the values of the stored parameters.
+ *
+ * If it was a REQUEST SENSE command, we need some way
+ * to differentiate between the failure code of the original
+ * and the failure code of the REQUEST sense - the obvious
+ * case is success, where we fall through and leave the result
+ * code unchanged.
+ *
+ * The non-obvious place is where the REQUEST SENSE failed
+ */
+
+ if (cmd->cmnd[0] != REQUEST_SENSE)
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ else if (cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+
+#ifdef AUTOSENSE
+ if ((cmd->cmnd[0] != REQUEST_SENSE) &&
+ (cmd->SCp.Status == CHECK_CONDITION)) {
+#if (NDEBUG & NDEBUG_AUTOSENSE)
+ printk("scsi%d : performing request sense\n",
+ instance->host_no);
+#endif
+ cmd->cmnd[0] = REQUEST_SENSE;
+ cmd->cmnd[1] &= 0xe0;
+ cmd->cmnd[2] = 0;
+ cmd->cmnd[3] = 0;
+ cmd->cmnd[4] = sizeof(cmd->sense_buffer);
+ cmd->cmnd[5] = 0;
+
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *) cmd->sense_buffer;
+ cmd->SCp.this_residual = sizeof(cmd->sense_buffer);
+
+ cli();
+ LIST(cmd,hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *)
+ hostdata->issue_queue;
+ hostdata->issue_queue = (Scsi_Cmnd *) cmd;
+ sti();
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : REQUEST SENSE added to head of issue queue\n",instance->host_no);
+#endif
+ } else {
+#endif /* def AUTOSENSE */
+#ifdef NCR5380_STATS
+ collect_stats(hostdata, cmd);
+#endif
+ cmd->scsi_done(cmd);
+ }
+
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /*
+ * Restore phase bits to 0 so an interrupted selection,
+ * arbitration can resume.
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
+ barrier();
+ return;
+ case MESSAGE_REJECT:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ switch (hostdata->last_message) {
+ case HEAD_OF_QUEUE_TAG:
+ case ORDERED_QUEUE_TAG:
+ case SIMPLE_QUEUE_TAG:
+ cmd->device->tagged_queue = 0;
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+ break;
+ default:
+ break;
+ }
+ case DISCONNECT:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ cmd->device->disconnect = 1;
+ cli();
+ LIST(cmd,hostdata->disconnected_queue);
+ cmd->host_scribble = (unsigned char *)
+ hostdata->disconnected_queue;
+ hostdata->connected = NULL;
+ hostdata->disconnected_queue = cmd;
+ sti();
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : command for target %d lun %d was moved from connected to"
+ " the disconnected_queue\n", instance->host_no,
+ cmd->target, cmd->lun);
+#endif
+ /*
+ * Restore phase bits to 0 so an interrupted selection,
+ * arbitration can resume.
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ /* Enable reselect interrupts */
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /* Wait for bus free to avoid nasty timeouts */
+ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
+ barrier();
+#if 0
+ NCR5380_print_status(instance);
+#endif
+ return;
+ /*
+ * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
+ * operation, in violation of the SCSI spec so we can safely
+ * ignore SAVE/RESTORE pointers calls.
+ *
+ * Unfortunately, some disks violate the SCSI spec and
+ * don't issue the required SAVE_POINTERS message before
+ * disconnecting, and we have to break spec to remain
+ * compatible.
+ */
+ case SAVE_POINTERS:
+ case RESTORE_POINTERS:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ break;
+ case EXTENDED_MESSAGE:
+/*
+ * Extended messages are sent in the following format :
+ * Byte
+ * 0 EXTENDED_MESSAGE == 1
+ * 1 length (includes one byte for code, doesn't
+ * include first two bytes)
+ * 2 code
+ * 3..length+1 arguments
+ *
+ * Start the extended message buffer with the EXTENDED_MESSAGE
+ * byte, since print_msg() wants the whole thing.
+ */
+ extended_msg[0] = EXTENDED_MESSAGE;
+ /* Accept first byte by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+#if (NDEBUG & NDEBUG_EXTENDED)
+ printk("scsi%d : receiving extended message\n",
+ instance->host_no);
+#endif
+
+ len = 2;
+ data = extended_msg + 1;
+ phase = PHASE_MSGIN;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+#if (NDEBUG & NDEBUG_EXTENDED)
+ printk("scsi%d : length=%d, code=0x%02x\n",
+ instance->host_no, (int) extended_msg[1],
+ (int) extended_msg[2]);
+#endif
+
+ if (!len && extended_msg[1] <=
+ (sizeof (extended_msg) - 1)) {
+ /* Accept third byte by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ len = extended_msg[1] - 1;
+ data = extended_msg + 3;
+ phase = PHASE_MSGIN;
+
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+#if (NDEBUG & NDEBUG_EXTENDED)
+ printk("scsi%d : message received, residual %d\n",
+ instance->host_no, len);
+#endif
+
+ switch (extended_msg[2]) {
+ case EXTENDED_SDTR:
+ case EXTENDED_WDTR:
+ case EXTENDED_MODIFY_DATA_POINTER:
+ case EXTENDED_EXTENDED_IDENTIFY:
+ tmp = 0;
+ }
+ } else if (len) {
+ printk("scsi%d: error receiving extended message\n",
+ instance->host_no);
+ tmp = 0;
+ } else {
+ printk("scsi%d: extended message code %02x length %d is too long\n",
+ instance->host_no, extended_msg[2], extended_msg[1]);
+ tmp = 0;
+ }
+ /* Fall through to reject message */
+
+ /*
+ * If we get something weird that we aren't expecting,
+ * reject it.
+ */
+ default:
+ if (!tmp) {
+ printk("scsi%d: rejecting message ", instance->host_no);
+ print_msg (extended_msg);
+ printk("\n");
+ } else if (tmp != EXTENDED_MESSAGE)
+ printk("scsi%d: rejecting unknown message %02x from target %d, lun %d\n",
+ instance->host_no, tmp, cmd->target, cmd->lun);
+ else
+ printk("scsi%d: rejecting unknown extended message code %02x, length %d from target %d, lun %d\n",
+ instance->host_no, extended_msg[1], extended_msg[0], cmd->target, cmd->lun);
+
+ msgout = MESSAGE_REJECT;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ break;
+ } /* switch (tmp) */
+ break;
+ case PHASE_MSGOUT:
+ len = 1;
+ data = &msgout;
+ hostdata->last_message = msgout;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ if (msgout == ABORT) {
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->connected = NULL;
+ cmd->result = DID_ERROR << 16;
+#ifdef NCR5380_STATS
+ collect_stats(hostdata, cmd);
+#endif
+ cmd->scsi_done(cmd);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return;
+ }
+ msgout = NOP;
+ break;
+ case PHASE_CMDOUT:
+ len = cmd->cmd_len;
+ data = cmd->cmnd;
+ /*
+ * XXX for performance reasons, on machines with a
+ * PSEUDO-DMA architecture we should probably
+ * use the dma transfer function.
+ */
+ NCR5380_transfer_pio(instance, &phase, &len,
+ &data);
+#ifdef USLEEP
+ if (!disconnect && should_disconnect(cmd->cmnd[0])) {
+ hostdata->time_expires = jiffies + USLEEP_SLEEP;
+#if (NDEBUG & NDEBUG_USLEEP)
+ printk("scsi%d : issued command, sleeping until %ul\n", instance->host_no,
+ hostdata->time_expires);
+#endif
+ NCR5380_set_timer (instance);
+ return;
+ }
+#endif /* def USLEEP */
+ break;
+ case PHASE_STATIN:
+ len = 1;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ cmd->SCp.Status = tmp;
+ break;
+ default:
+ printk("scsi%d : unknown phase\n", instance->host_no);
+#ifdef NDEBUG
+ NCR5380_print(instance);
+#endif
+ } /* switch(phase) */
+ } /* if (tmp * SR_REQ) */
+#ifdef USLEEP
+ else {
+ if (!disconnect && hostdata->time_expires && jiffies >
+ hostdata->time_expires) {
+ hostdata->time_expires = jiffies + USLEEP_SLEEP;
+#if (NDEBUG & NDEBUG_USLEEP)
+ printk("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no,
+ hostdata->time_expires);
+#endif
+ NCR5380_set_timer (instance);
+ return;
+ }
+ }
+#endif
+ } /* while (1) */
+}
+
+/*
+ * Function : void NCR5380_reselect (struct Scsi_Host *instance)
+ *
+ * Purpose : does reselection, initializing the instance->connected
+ * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q
+ * nexus has been reestablished,
+ *
+ * Inputs : instance - this instance of the NCR5380.
+ *
+ */
+
+
+static void NCR5380_reselect (struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ unsigned char target_mask;
+ unsigned char lun, phase;
+ int len;
+#ifdef SCSI2
+ unsigned char tag;
+#endif
+ unsigned char msg[3];
+ unsigned char *data;
+ Scsi_Cmnd *tmp = NULL, *prev;
+ int abort = 0;
+ NCR5380_setup(instance);
+
+ /*
+ * Disable arbitration, etc. since the host adapter obviously
+ * lost, and tell an interrupted NCR5380_select() to restart.
+ */
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ hostdata->restart_select = 1;
+
+ target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
+
+#if (NDEBUG & NDEBUG_RESELECTION)
+ printk("scsi%d : reselect\n", instance->host_no);
+#endif
+
+ /*
+ * At this point, we have detected that our SCSI ID is on the bus,
+ * SEL is true and BSY was false for at least one bus settle delay
+ * (400 ns).
+ *
+ * We must assert BSY ourselves, until the target drops the SEL
+ * signal.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
+
+ while (NCR5380_read(STATUS_REG) & SR_SEL);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ /*
+ * Wait for target to go into MSGIN.
+ */
+
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ));
+
+ len = 1;
+ data = msg;
+ phase = PHASE_MSGIN;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+
+ if (!(msg[0] & 0x80)) {
+ printk("scsi%d : expecting IDENTIFY message, got ",
+ instance->host_no);
+ print_msg(msg);
+ abort = 1;
+ } else {
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ lun = (msg[0] & 0x07);
+
+ /*
+ * We need to add code for SCSI-II to track which devices have
+ * I_T_L_Q nexuses established, and which have simple I_T_L
+ * nexuses so we can chose to do additional data transfer.
+ */
+
+#ifdef SCSI2
+#error "SCSI-II tagged queueing is not supported yet"
+#endif
+
+ /*
+ * Find the command corresponding to the I_T_L or I_T_L_Q nexus we
+ * just reestablished, and remove it from the disconnected queue.
+ */
+
+
+ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL;
+ tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)
+ if ((target_mask == (1 << tmp->target)) && (lun == tmp->lun)
+#ifdef SCSI2
+ && (tag == tmp->tag)
+#endif
+) {
+ if (prev) {
+ REMOVE(prev,prev->host_scribble,tmp,tmp->host_scribble);
+ prev->host_scribble = tmp->host_scribble;
+ } else {
+ REMOVE(-1,hostdata->disconnected_queue,tmp,tmp->host_scribble);
+ hostdata->disconnected_queue = (Scsi_Cmnd *) tmp->host_scribble;
+ }
+ tmp->host_scribble = NULL;
+ break;
+ }
+
+ if (!tmp) {
+#ifdef SCSI2
+ printk("scsi%d : warning : target bitmask %02x lun %d tag %d not in disconnect_queue.\n",
+ instance->host_no, target_mask, lun, tag);
+#else
+ printk("scsi%d : warning : target bitmask %02x lun %d not in disconnect_queue.\n",
+ instance->host_no, target_mask, lun);
+#endif
+ /*
+ * Since we have an established nexus that we can't do anything with,
+ * we must abort it.
+ */
+ abort = 1;
+ }
+ }
+
+ if (abort) {
+ do_abort (instance);
+ } else {
+ hostdata->connected = tmp;
+#if (NDEBUG & NDEBUG_RESELECTION)
+ printk("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n",
+ instance->host_no, tmp->target, tmp->lun, tmp->tag);
+#endif
+ }
+}
+
+/*
+ * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
+ *
+ * Purpose : called by interrupt handler when DMA finishes or a phase
+ * mismatch occurs (which would finish the DMA transfer).
+ *
+ * Inputs : instance - this instance of the NCR5380.
+ *
+ * Returns : pointer to the Scsi_Cmnd structure for which the I_T_L
+ * nexus has been reestablished, on failure NULL is returned.
+ */
+
+#ifdef REAL_DMA
+static void NCR5380_dma_complete (NCR5380_instance *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *
+ instance->hostdata);
+ int transferred;
+ NCR5380_setup(instance);
+
+ /*
+ * XXX this might not be right.
+ *
+ * Wait for final byte to transfer, ie wait for ACK to go false.
+ *
+ * We should use the Last Byte Sent bit, unfortunately this is
+ * not available on the 5380/5381 (only the various CMOS chips)
+ */
+
+ while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ /*
+ * The only places we should see a phase mismatch and have to send
+ * data from the same set of pointers will be the data transfer
+ * phases. So, residual, requested length are only important here.
+ */
+
+ if (!(hostdata->connected->SCp.phase & SR_CD)) {
+ transferred = instance->dmalen - NCR5380_dma_residual();
+ hostdata->connected->SCp.this_residual -= transferred;
+ hostdata->connected->SCp.ptr += transferred;
+ }
+}
+#endif /* def REAL_DMA */
+
+/*
+ * Function : int NCR5380_abort (Scsi_Cmnd *cmd)
+ *
+ * Purpose : abort a command
+ *
+ * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
+ * host byte of the result field to, if zero DID_ABORTED is
+ * used.
+ *
+ * Returns : 0 - success, -1 on failure.
+ *
+ * XXX - there is no way to abort the command that is currently
+ * connected, you have to wait for it to complete. If this is
+ * a problem, we could implement longjmp() / setjmp(), setjmp()
+ * called where the loop started in NCR5380_main().
+ */
+
+#ifndef NCR5380_abort
+static
+#endif
+int NCR5380_abort (Scsi_Cmnd *cmd) {
+ NCR5380_local_declare();
+ struct Scsi_Host *instance = cmd->host;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ Scsi_Cmnd *tmp, **prev;
+
+ printk("scsi%d : aborting command\n", instance->host_no);
+ print_Scsi_Cmnd (cmd);
+
+ NCR5380_print_status (instance);
+
+ printk("scsi%d : aborting command\n", instance->host_no);
+ print_Scsi_Cmnd (cmd);
+
+ NCR5380_print_status (instance);
+
+ cli();
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : abort called\n", instance->host_no);
+ printk(" basr 0x%X, sr 0x%X\n",
+ NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG));
+#endif
+
+#if 0
+/*
+ * Case 1 : If the command is the currently executing command,
+ * we'll set the aborted flag and return control so that
+ * information transfer routine can exit cleanly.
+ */
+
+ if (hostdata->connected == cmd) {
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : aborting connected command\n", instance->host_no);
+#endif
+ hostdata->aborted = 1;
+/*
+ * We should perform BSY checking, and make sure we haven't slipped
+ * into BUS FREE.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN);
+/*
+ * Since we can't change phases until we've completed the current
+ * handshake, we have to source or sink a byte of data if the current
+ * phase is not MSGOUT.
+ */
+
+/*
+ * Return control to the executing NCR drive so we can clear the
+ * aborted flag and get back into our main loop.
+ */
+
+ return 0;
+ }
+#endif
+
+/*
+ * Case 2 : If the command hasn't been issued yet, we simply remove it
+ * from the issue queue.
+ */
+#if (NDEBUG & NDEBUG_ABORT)
+ /* KLL */
+ printk("scsi%d : abort going into loop.\n", instance->host_no);
+#endif
+ for (prev = (Scsi_Cmnd **) &(hostdata->issue_queue),
+ tmp = (Scsi_Cmnd *) hostdata->issue_queue;
+ tmp; prev = (Scsi_Cmnd **) &(tmp->host_scribble), tmp =
+ (Scsi_Cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ REMOVE(5,*prev,tmp,tmp->host_scribble);
+ (*prev) = (Scsi_Cmnd *) tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ sti();
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : abort removed command from issue queue.\n",
+ instance->host_no);
+#endif
+ tmp->done(tmp);
+ return SCSI_ABORT_SUCCESS;
+ }
+#if (NDEBUG & NDEBUG_ABORT)
+ /* KLL */
+ else if (prev == tmp) printk("scsi%d : LOOP\n", instance->host_no);
+#endif
+
+/*
+ * Case 3 : If any commands are connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail.
+ */
+
+ if (hostdata->connected) {
+ sti();
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : abort failed, command connected.\n", instance->host_no);
+#endif
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+/*
+ * Case 4: If the command is currently disconnected from the bus, and
+ * there are no connected commands, we reconnect the I_T_L or
+ * I_T_L_Q nexus associated with it, go into message out, and send
+ * an abort message.
+ *
+ * This case is especially ugly. In order to reestablish the nexus, we
+ * need to call NCR5380_select(). The easiest way to implement this
+ * function was to abort if the bus was busy, and let the interrupt
+ * handler triggered on the SEL for reselect take care of lost arbitrations
+ * where necessary, meaning interrupts need to be enabled.
+ *
+ * When interrupts are enabled, the queues may change - so we
+ * can't remove it from the disconnected queue before selecting it
+ * because that could cause a failure in hashing the nexus if that
+ * device reselected.
+ *
+ * Since the queues may change, we can't use the pointers from when we
+ * first locate it.
+ *
+ * So, we must first locate the command, and if NCR5380_select()
+ * succeeds, then issue the abort, relocate the command and remove
+ * it from the disconnected queue.
+ */
+
+ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp;
+ tmp = (Scsi_Cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ sti();
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : aborting disconnected command.\n", instance->host_no);
+#endif
+
+ if (NCR5380_select (instance, cmd, (int) cmd->tag))
+ return SCSI_ABORT_BUSY;
+
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : nexus reestablished.\n", instance->host_no);
+#endif
+
+ do_abort (instance);
+
+ cli();
+ for (prev = (Scsi_Cmnd **) &(hostdata->disconnected_queue),
+ tmp = (Scsi_Cmnd *) hostdata->disconnected_queue;
+ tmp; prev = (Scsi_Cmnd **) &(tmp->host_scribble), tmp =
+ (Scsi_Cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ REMOVE(5,*prev,tmp,tmp->host_scribble);
+ *prev = (Scsi_Cmnd *) tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ sti();
+ tmp->done(tmp);
+ return SCSI_ABORT_SUCCESS;
+ }
+ }
+
+/*
+ * Case 5 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke.
+ */
+
+ sti();
+ printk("scsi%d : warning : SCSI command probably completed successfully\n"
+ " before abortion\n", instance->host_no);
+ return SCSI_ABORT_NOT_RUNNING;
+}
+
+
+/*
+ * Function : int NCR5380_reset (Scsi_Cmnd *cmd, unsigned int reset_flags)
+ *
+ * Purpose : reset the SCSI bus.
+ *
+ * Returns : SCSI_RESET_WAKEUP
+ *
+ */
+
+#ifndef NCR5380_reset
+static
+#endif
+int NCR5380_reset (Scsi_Cmnd *cmd, unsigned int dummy) {
+ NCR5380_local_declare();
+ NCR5380_setup(cmd->host);
+
+ NCR5380_print_status (cmd->host);
+ do_reset (cmd->host);
+
+ return SCSI_RESET_WAKEUP;
+}
+
diff --git a/linux/src/drivers/scsi/NCR5380.h b/linux/src/drivers/scsi/NCR5380.h
new file mode 100644
index 0000000..c2a7519
--- /dev/null
+++ b/linux/src/drivers/scsi/NCR5380.h
@@ -0,0 +1,369 @@
+/*
+ * NCR 5380 defines
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * DISTRIBUTION RELEASE 7
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+#ifndef NCR5380_H
+#define NCR5380_H
+
+#define NCR5380_PUBLIC_RELEASE 7
+#define NCR53C400_PUBLIC_RELEASE 2
+
+#define NDEBUG_ARBITRATION 0x1
+#define NDEBUG_AUTOSENSE 0x2
+#define NDEBUG_DMA 0x4
+#define NDEBUG_HANDSHAKE 0x8
+#define NDEBUG_INFORMATION 0x10
+#define NDEBUG_INIT 0x20
+#define NDEBUG_INTR 0x40
+#define NDEBUG_LINKED 0x80
+#define NDEBUG_MAIN 0x100
+#define NDEBUG_NO_DATAOUT 0x200
+#define NDEBUG_NO_WRITE 0x400
+#define NDEBUG_PIO 0x800
+#define NDEBUG_PSEUDO_DMA 0x1000
+#define NDEBUG_QUEUES 0x2000
+#define NDEBUG_RESELECTION 0x4000
+#define NDEBUG_SELECTION 0x8000
+#define NDEBUG_USLEEP 0x10000
+#define NDEBUG_LAST_BYTE_SENT 0x20000
+#define NDEBUG_RESTART_SELECT 0x40000
+#define NDEBUG_EXTENDED 0x80000
+#define NDEBUG_C400_PREAD 0x100000
+#define NDEBUG_C400_PWRITE 0x200000
+#define NDEBUG_LISTS 0x400000
+
+/*
+ * The contents of the OUTPUT DATA register are asserted on the bus when
+ * either arbitration is occurring or the phase-indicating signals (
+ * IO, CD, MSG) in the TARGET COMMAND register and the ASSERT DATA
+ * bit in the INITIATOR COMMAND register is set.
+ */
+
+#define OUTPUT_DATA_REG 0 /* wo DATA lines on SCSI bus */
+#define CURRENT_SCSI_DATA_REG 0 /* ro same */
+
+#define INITIATOR_COMMAND_REG 1 /* rw */
+#define ICR_ASSERT_RST 0x80 /* rw Set to assert RST */
+#define ICR_ARBITRATION_PROGRESS 0x40 /* ro Indicates arbitration complete */
+#define ICR_TRI_STATE 0x40 /* wo Set to tri-state drivers */
+#define ICR_ARBITRATION_LOST 0x20 /* ro Indicates arbitration lost */
+#define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */
+#define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */
+#define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */
+#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */
+#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */
+#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */
+
+#ifdef DIFFERENTIAL
+#define ICR_BASE ICR_DIFF_ENABLE
+#else
+#define ICR_BASE 0
+#endif
+
+#define MODE_REG 2
+/*
+ * Note : BLOCK_DMA code will keep DRQ asserted for the duration of the
+ * transfer, causing the chip to hog the bus. You probably don't want
+ * this.
+ */
+#define MR_BLOCK_DMA_MODE 0x80 /* rw block mode DMA */
+#define MR_TARGET 0x40 /* rw target mode */
+#define MR_ENABLE_PAR_CHECK 0x20 /* rw enable parity checking */
+#define MR_ENABLE_PAR_INTR 0x10 /* rw enable bad parity interrupt */
+#define MR_ENABLE_EOP_INTR 0x08 /* rw enable eop interrupt */
+#define MR_MONITOR_BSY 0x04 /* rw enable int on unexpected bsy fail */
+#define MR_DMA_MODE 0x02 /* rw DMA / pseudo DMA mode */
+#define MR_ARBITRATE 0x01 /* rw start arbitration */
+
+#ifdef PARITY
+#define MR_BASE MR_ENABLE_PAR_CHECK
+#else
+#define MR_BASE 0
+#endif
+
+#define TARGET_COMMAND_REG 3
+#define TCR_LAST_BYTE_SENT 0x80 /* ro DMA done */
+#define TCR_ASSERT_REQ 0x08 /* tgt rw assert REQ */
+#define TCR_ASSERT_MSG 0x04 /* tgt rw assert MSG */
+#define TCR_ASSERT_CD 0x02 /* tgt rw assert CD */
+#define TCR_ASSERT_IO 0x01 /* tgt rw assert IO */
+
+#define STATUS_REG 4 /* ro */
+/*
+ * Note : a set bit indicates an active signal, driven by us or another
+ * device.
+ */
+#define SR_RST 0x80
+#define SR_BSY 0x40
+#define SR_REQ 0x20
+#define SR_MSG 0x10
+#define SR_CD 0x08
+#define SR_IO 0x04
+#define SR_SEL 0x02
+#define SR_DBP 0x01
+
+/*
+ * Setting a bit in this register will cause an interrupt to be generated when
+ * BSY is false and SEL true and this bit is asserted on the bus.
+ */
+#define SELECT_ENABLE_REG 4 /* wo */
+
+#define BUS_AND_STATUS_REG 5 /* ro */
+#define BASR_END_DMA_TRANSFER 0x80 /* ro set on end of transfer */
+#define BASR_DRQ 0x40 /* ro mirror of DRQ pin */
+#define BASR_PARITY_ERROR 0x20 /* ro parity error detected */
+#define BASR_IRQ 0x10 /* ro mirror of IRQ pin */
+#define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */
+#define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */
+#define BASR_ATN 0x02 /* ro BUS status */
+#define BASR_ACK 0x01 /* ro BUS status */
+
+/* Write any value to this register to start a DMA send */
+#define START_DMA_SEND_REG 5 /* wo */
+
+/*
+ * Used in DMA transfer mode, data is latched from the SCSI bus on
+ * the falling edge of REQ (ini) or ACK (tgt)
+ */
+#define INPUT_DATA_REG 6 /* ro */
+
+/* Write any value to this register to start a DMA receive */
+#define START_DMA_TARGET_RECEIVE_REG 6 /* wo */
+
+/* Read this register to clear interrupt conditions */
+#define RESET_PARITY_INTERRUPT_REG 7 /* ro */
+
+/* Write any value to this register to start an ini mode DMA receive */
+#define START_DMA_INITIATOR_RECEIVE_REG 7 /* wo */
+
+#define C400_CONTROL_STATUS_REG NCR53C400_register_offset-8 /* rw */
+
+#define CSR_RESET 0x80 /* wo Resets 53c400 */
+#define CSR_53C80_REG 0x80 /* ro 5380 registers busy */
+#define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */
+#define CSR_SCSI_BUFF_INTR 0x20 /* rw Enable int on transfer ready */
+#define CSR_53C80_INTR 0x10 /* rw Enable 53c80 interrupts */
+#define CSR_SHARED_INTR 0x08 /* rw Interrupt sharing */
+#define CSR_HOST_BUF_NOT_RDY 0x04 /* ro Is Host buffer ready */
+#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer read */
+#define CSR_GATED_53C80_IRQ 0x01 /* ro Last block xferred */
+
+#if 0
+#define CSR_BASE CSR_SCSI_BUFF_INTR | CSR_53C80_INTR
+#else
+#define CSR_BASE CSR_53C80_INTR
+#endif
+
+/* Number of 128-byte blocks to be transferred */
+#define C400_BLOCK_COUNTER_REG NCR53C400_register_offset-7 /* rw */
+
+/* Resume transfer after disconnect */
+#define C400_RESUME_TRANSFER_REG NCR53C400_register_offset-6 /* wo */
+
+/* Access to host buffer stack */
+#define C400_HOST_BUFFER NCR53C400_register_offset-4 /* rw */
+
+
+/* Note : PHASE_* macros are based on the values of the STATUS register */
+#define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
+
+#define PHASE_DATAOUT 0
+#define PHASE_DATAIN SR_IO
+#define PHASE_CMDOUT SR_CD
+#define PHASE_STATIN (SR_CD | SR_IO)
+#define PHASE_MSGOUT (SR_MSG | SR_CD)
+#define PHASE_MSGIN (SR_MSG | SR_CD | SR_IO)
+#define PHASE_UNKNOWN 0xff
+
+/*
+ * Convert status register phase to something we can use to set phase in
+ * the target register so we can get phase mismatch interrupts on DMA
+ * transfers.
+ */
+
+#define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
+
+/*
+ * The internal should_disconnect() function returns these based on the
+ * expected length of a disconnect if a device supports disconnect/
+ * reconnect.
+ */
+
+#define DISCONNECT_NONE 0
+#define DISCONNECT_TIME_TO_DATA 1
+#define DISCONNECT_LONG 2
+
+/*
+ * These are "special" values for the tag parameter passed to NCR5380_select.
+ */
+
+#define TAG_NEXT -1 /* Use next free tag */
+#define TAG_NONE -2 /*
+ * Establish I_T_L nexus instead of I_T_L_Q
+ * even on SCSI-II devices.
+ */
+
+/*
+ * These are "special" values for the irq and dma_channel fields of the
+ * Scsi_Host structure
+ */
+
+#define IRQ_NONE 255
+#define DMA_NONE 255
+#define IRQ_AUTO 254
+#define DMA_AUTO 254
+
+#define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */
+#define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */
+#define FLAG_NCR53C400 4 /* NCR53c400 */
+#define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */
+
+#ifndef ASM
+struct NCR5380_hostdata {
+ NCR5380_implementation_fields; /* implementation specific */
+ unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */
+ unsigned char targets_present; /* targets we have connected
+ to, so we can call a select
+ failure a retryable condition */
+ volatile unsigned char busy[8]; /* index = target, bit = lun */
+#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
+ volatile int dma_len; /* requested length of DMA */
+#endif
+ volatile unsigned char last_message; /* last message OUT */
+ volatile Scsi_Cmnd *connected; /* currently connected command */
+ volatile Scsi_Cmnd *issue_queue; /* waiting to be issued */
+ volatile Scsi_Cmnd *disconnected_queue; /* waiting for reconnect */
+ volatile int restart_select; /* we have disconnected,
+ used to restart
+ NCR5380_select() */
+ volatile unsigned aborted:1; /* flag, says aborted */
+ int flags;
+#ifdef USLEEP
+ unsigned long time_expires; /* in jiffies, set prior to sleeping */
+ struct Scsi_Host *next_timer;
+#endif
+#ifdef NCR5380_STATS
+ unsigned timebase; /* Base for time calcs */
+ long time_read[8]; /* time to do reads */
+ long time_write[8]; /* time to do writes */
+ unsigned long bytes_read[8]; /* bytes read */
+ unsigned long bytes_write[8]; /* bytes written */
+ unsigned pendingr;
+ unsigned pendingw;
+#endif
+};
+
+#ifdef __KERNEL__
+static struct Scsi_Host *first_instance; /* linked list of 5380's */
+
+#if defined(AUTOPROBE_IRQ)
+static int NCR5380_probe_irq (struct Scsi_Host *instance, int possible);
+#endif
+static void NCR5380_init (struct Scsi_Host *instance, int flags);
+static void NCR5380_information_transfer (struct Scsi_Host *instance);
+#ifndef DONT_USE_INTR
+static void NCR5380_intr (int irq, void *dev_id, struct pt_regs * regs);
+#endif
+static void NCR5380_main (void);
+static void NCR5380_print_options (struct Scsi_Host *instance);
+static void NCR5380_print_phase (struct Scsi_Host *instance);
+static void NCR5380_print (struct Scsi_Host *instance);
+#ifndef NCR5380_abort
+static
+#endif
+int NCR5380_abort (Scsi_Cmnd *cmd);
+#ifndef NCR5380_reset
+static
+#endif
+int NCR5380_reset (Scsi_Cmnd *cmd, unsigned int reset_flags);
+#ifndef NCR5380_queue_command
+static
+#endif
+int NCR5380_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
+
+
+static void NCR5380_reselect (struct Scsi_Host *instance);
+static int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag);
+#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL)
+static int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data);
+#endif
+static int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data);
+
+#if (defined(REAL_DMA) || defined(REAL_DMA_POLL))
+
+#if defined(i386) || defined(__alpha__)
+
+static __inline__ int NCR5380_pc_dma_setup (struct Scsi_Host *instance,
+ unsigned char *ptr, unsigned int count, unsigned char mode) {
+ unsigned limit;
+ unsigned long bus_addr = virt_to_bus(ptr);
+
+ if (instance->dma_channel <=3) {
+ if (count > 65536)
+ count = 65536;
+ limit = 65536 - (bus_addr & 0xFFFF);
+ } else {
+ if (count > 65536 * 2)
+ count = 65536 * 2;
+ limit = 65536* 2 - (bus_addr & 0x1FFFF);
+ }
+
+ if (count > limit) count = limit;
+
+ if ((count & 1) || (bus_addr & 1))
+ panic ("scsi%d : attempted unaligned DMA transfer\n", instance->host_no);
+ cli();
+ disable_dma(instance->dma_channel);
+ clear_dma_ff(instance->dma_channel);
+ set_dma_addr(instance->dma_channel, bus_addr);
+ set_dma_count(instance->dma_channel, count);
+ set_dma_mode(instance->dma_channel, mode);
+ enable_dma(instance->dma_channel);
+ sti();
+ return count;
+}
+
+static __inline__ int NCR5380_pc_dma_write_setup (struct Scsi_Host *instance,
+ unsigned char *src, unsigned int count) {
+ return NCR5380_pc_dma_setup (instance, src, count, DMA_MODE_WRITE);
+}
+
+static __inline__ int NCR5380_pc_dma_read_setup (struct Scsi_Host *instance,
+ unsigned char *src, unsigned int count) {
+ return NCR5380_pc_dma_setup (instance, src, count, DMA_MODE_READ);
+}
+
+static __inline__ int NCR5380_pc_dma_residual (struct Scsi_Host *instance) {
+ register int tmp;
+ cli();
+ clear_dma_ff(instance->dma_channel);
+ tmp = get_dma_residue(instance->dma_channel);
+ sti();
+ return tmp;
+}
+#endif /* defined(i386) || defined(__alpha__) */
+#endif /* defined(REAL_DMA) */
+#endif __KERNEL_
+#endif /* ndef ASM */
+#endif /* NCR5380_H */
diff --git a/linux/src/drivers/scsi/NCR53c406a.c b/linux/src/drivers/scsi/NCR53c406a.c
new file mode 100644
index 0000000..7745f5a
--- /dev/null
+++ b/linux/src/drivers/scsi/NCR53c406a.c
@@ -0,0 +1,1079 @@
+/*
+ * NCR53c406.c
+ * Low-level SCSI driver for NCR53c406a chip.
+ * Copyright (C) 1994, 1995, 1996 Normunds Saumanis (normunds@fi.ibm.com)
+ *
+ * LILO command line usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]
+ * Specify IRQ = 0 for non-interrupt driven mode.
+ * FASTPIO = 1 for fast pio mode, 0 for slow mode.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#define NCR53C406A_DEBUG 0
+#define VERBOSE_NCR53C406A_DEBUG 0
+
+/* Set this to 1 for PIO mode (recommended) or to 0 for DMA mode */
+#define USE_PIO 1
+
+#define USE_BIOS 0
+/* #define BIOS_ADDR 0xD8000 */ /* define this if autoprobe fails */
+/* #define PORT_BASE 0x330 */ /* define this if autoprobe fails */
+/* #define IRQ_LEV 0 */ /* define this if autoprobe fails */
+#define DMA_CHAN 5 /* this is ignored if DMA is disabled */
+
+/* Set this to 0 if you encounter kernel lockups while transferring
+ * data in PIO mode */
+#define USE_FAST_PIO 1
+
+/* ============= End of user configurable parameters ============= */
+
+#include <linux/module.h>
+
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "NCR53c406a.h"
+
+/* ============================================================= */
+
+#define WATCHDOG 5000000
+
+#define SYNC_MODE 0 /* Synchronous transfer mode */
+
+#if DEBUG
+#undef NCR53C406A_DEBUG
+#define NCR53C406A_DEBUG 1
+#endif
+
+#if USE_PIO
+#define USE_DMA 0
+#else
+#define USE_DMA 1
+#endif
+
+/* Default configuration */
+#define C1_IMG 0x07 /* ID=7 */
+#define C2_IMG 0x48 /* FE SCSI2 */
+#if USE_DMA
+#define C3_IMG 0x21 /* CDB TE */
+#else
+#define C3_IMG 0x20 /* CDB */
+#endif
+#define C4_IMG 0x04 /* ANE */
+#define C5_IMG 0xb6 /* AA PI SIE POL */
+
+#define REG0 (outb(C4_IMG, CONFIG4))
+#define REG1 (outb(C5_IMG, CONFIG5))
+
+#if NCR53C406A_DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+#if VERBOSE_NCR53C406A_DEBUG
+#define VDEB(x) x
+#else
+#define VDEB(x)
+#endif
+
+#define LOAD_DMA_COUNT(count) \
+ outb(count & 0xff, TC_LSB); \
+ outb((count >> 8) & 0xff, TC_MSB); \
+ outb((count >> 16) & 0xff, TC_HIGH);
+
+/* Chip commands */
+#define DMA_OP 0x80
+
+#define SCSI_NOP 0x00
+#define FLUSH_FIFO 0x01
+#define CHIP_RESET 0x02
+#define SCSI_RESET 0x03
+#define RESELECT 0x40
+#define SELECT_NO_ATN 0x41
+#define SELECT_ATN 0x42
+#define SELECT_ATN_STOP 0x43
+#define ENABLE_SEL 0x44
+#define DISABLE_SEL 0x45
+#define SELECT_ATN3 0x46
+#define RESELECT3 0x47
+#define TRANSFER_INFO 0x10
+#define INIT_CMD_COMPLETE 0x11
+#define MSG_ACCEPT 0x12
+#define TRANSFER_PAD 0x18
+#define SET_ATN 0x1a
+#define RESET_ATN 0x1b
+#define SEND_MSG 0x20
+#define SEND_STATUS 0x21
+#define SEND_DATA 0x22
+#define DISCONN_SEQ 0x23
+#define TERMINATE_SEQ 0x24
+#define TARG_CMD_COMPLETE 0x25
+#define DISCONN 0x27
+#define RECV_MSG 0x28
+#define RECV_CMD 0x29
+#define RECV_DATA 0x2a
+#define RECV_CMD_SEQ 0x2b
+#define TARGET_ABORT_DMA 0x04
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+
+#if NCR53C406A_DEBUG
+#define rtrc(i) {inb(0x3da);outb(0x31,0x3c0);outb((i),0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+/*----------------------------------------------------------------*/
+
+enum Phase {
+ idle,
+ data_out,
+ data_in,
+ command_ph,
+ status_ph,
+ message_out,
+ message_in
+};
+
+/* Static function prototypes */
+static void NCR53c406a_intr(int, void *, struct pt_regs *);
+static void internal_done(Scsi_Cmnd *);
+static void wait_intr(void);
+static void chip_init(void);
+static void calc_port_addr(void);
+#ifndef IRQ_LEV
+static int irq_probe(void);
+#endif
+
+/* ================================================================= */
+
+#if USE_BIOS
+static void *bios_base = (void *)0;
+#endif
+
+#if PORT_BASE
+static int port_base = PORT_BASE;
+#else
+static int port_base = 0;
+#endif
+
+#if IRQ_LEV
+static int irq_level = IRQ_LEV;
+#else
+static int irq_level = -1; /* 0 is 'no irq', so use -1 for 'uninitialized'*/
+#endif
+
+#if USE_DMA
+static int dma_chan = 0;
+#endif
+
+#if USE_PIO
+static int fast_pio = USE_FAST_PIO;
+#endif
+
+static Scsi_Cmnd *current_SC = NULL;
+static volatile int internal_done_flag = 0;
+static volatile int internal_done_errcode = 0;
+static char info_msg[256];
+
+struct proc_dir_entry proc_scsi_NCR53c406a = {
+ PROC_SCSI_NCR53C406A, 7, "NCR53c406a",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+/* ================================================================= */
+
+/* possible BIOS locations */
+#if USE_BIOS
+static void *addresses[] = {
+ (void *)0xd8000,
+ (void *)0xc8000
+};
+#define ADDRESS_COUNT (sizeof( addresses ) / sizeof( unsigned ))
+#endif USE_BIOS
+
+/* possible i/o port addresses */
+static unsigned short ports[] = { 0x230, 0x330 };
+#define PORT_COUNT (sizeof( ports ) / sizeof( unsigned short ))
+
+/* possible interrupt channels */
+static unsigned short intrs[] = { 10, 11, 12, 15 };
+#define INTR_COUNT (sizeof( intrs ) / sizeof( unsigned short ))
+
+/* signatures for NCR 53c406a based controllers */
+#if USE_BIOS
+struct signature {
+ char *signature;
+ int sig_offset;
+ int sig_length;
+} signatures[] = {
+ /* 1 2 3 4 5 6 */
+ /* 123456789012345678901234567890123456789012345678901234567890 */
+ { "Copyright (C) Acculogic, Inc.\r\n2.8M Diskette Extension Bios ver 4.04.03 03/01/1993", 61, 82 },
+};
+#define SIGNATURE_COUNT (sizeof( signatures ) / sizeof( struct signature ))
+#endif USE_BIOS
+
+/* ============================================================ */
+
+/* Control Register Set 0 */
+static int TC_LSB; /* transfer counter lsb */
+static int TC_MSB; /* transfer counter msb */
+static int SCSI_FIFO; /* scsi fifo register */
+static int CMD_REG; /* command register */
+static int STAT_REG; /* status register */
+static int DEST_ID; /* selection/reselection bus id */
+static int INT_REG; /* interrupt status register */
+static int SRTIMOUT; /* select/reselect timeout reg */
+static int SEQ_REG; /* sequence step register */
+static int SYNCPRD; /* synchronous transfer period */
+static int FIFO_FLAGS; /* indicates # of bytes in fifo */
+static int SYNCOFF; /* synchronous offset register */
+static int CONFIG1; /* configuration register */
+static int CLKCONV; /* clock conversion reg */
+/*static int TESTREG;*/ /* test mode register */
+static int CONFIG2; /* Configuration 2 Register */
+static int CONFIG3; /* Configuration 3 Register */
+static int CONFIG4; /* Configuration 4 Register */
+static int TC_HIGH; /* Transfer Counter High */
+/*static int FIFO_BOTTOM;*/ /* Reserve FIFO byte register */
+
+/* Control Register Set 1 */
+/*static int JUMPER_SENSE;*/ /* Jumper sense port reg (r/w) */
+/*static int SRAM_PTR;*/ /* SRAM address pointer reg (r/w) */
+/*static int SRAM_DATA;*/ /* SRAM data register (r/w) */
+static int PIO_FIFO; /* PIO FIFO registers (r/w) */
+/*static int PIO_FIFO1;*/ /* */
+/*static int PIO_FIFO2;*/ /* */
+/*static int PIO_FIFO3;*/ /* */
+static int PIO_STATUS; /* PIO status (r/w) */
+/*static int ATA_CMD;*/ /* ATA command/status reg (r/w) */
+/*static int ATA_ERR;*/ /* ATA features/error register (r/w)*/
+static int PIO_FLAG; /* PIO flag interrupt enable (r/w) */
+static int CONFIG5; /* Configuration 5 register (r/w) */
+/*static int SIGNATURE;*/ /* Signature Register (r) */
+/*static int CONFIG6;*/ /* Configuration 6 register (r) */
+
+/* ============================================================== */
+
+#if USE_DMA
+static __inline__ int
+NCR53c406a_dma_setup (unsigned char *ptr,
+ unsigned int count,
+ unsigned char mode) {
+ unsigned limit;
+ unsigned long flags = 0;
+
+ VDEB(printk("dma: before count=%d ", count));
+ if (dma_chan <=3) {
+ if (count > 65536)
+ count = 65536;
+ limit = 65536 - (((unsigned) ptr) & 0xFFFF);
+ } else {
+ if (count > (65536<<1))
+ count = (65536<<1);
+ limit = (65536<<1) - (((unsigned) ptr) & 0x1FFFF);
+ }
+
+ if (count > limit) count = limit;
+
+ VDEB(printk("after count=%d\n", count));
+ if ((count & 1) || (((unsigned) ptr) & 1))
+ panic ("NCR53c406a: attempted unaligned DMA transfer\n");
+
+ save_flags(flags);
+ cli();
+ disable_dma(dma_chan);
+ clear_dma_ff(dma_chan);
+ set_dma_addr(dma_chan, (long) ptr);
+ set_dma_count(dma_chan, count);
+ set_dma_mode(dma_chan, mode);
+ enable_dma(dma_chan);
+ restore_flags(flags);
+
+ return count;
+}
+
+static __inline__ int
+NCR53c406a_dma_write(unsigned char *src, unsigned int count) {
+ return NCR53c406a_dma_setup (src, count, DMA_MODE_WRITE);
+}
+
+static __inline__ int
+NCR53c406a_dma_read(unsigned char *src, unsigned int count) {
+ return NCR53c406a_dma_setup (src, count, DMA_MODE_READ);
+}
+
+static __inline__ int
+NCR53c406a_dma_residual (void) {
+ register int tmp;
+ unsigned long flags = 0;
+ save_flags(flags);
+ cli();
+ clear_dma_ff(dma_chan);
+ tmp = get_dma_residue(dma_chan);
+ restore_flags(flags);
+
+ return tmp;
+}
+#endif USE_DMA
+
+#if USE_PIO
+static __inline__ int NCR53c406a_pio_read(unsigned char *request,
+ unsigned int reqlen)
+{
+ int i;
+ int len; /* current scsi fifo size */
+ unsigned long flags = 0;
+
+ REG1;
+ while (reqlen) {
+ i = inb(PIO_STATUS);
+ /* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80)
+ return 0;
+
+ switch( i & 0x1e ) {
+ default:
+ case 0x10:
+ len=0; break;
+ case 0x0:
+ len=1; break;
+ case 0x8:
+ len=42; break;
+ case 0xc:
+ len=84; break;
+ case 0xe:
+ len=128; break;
+ }
+
+ if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occurred */
+ return 0;
+ }
+
+ if (len) {
+ if( len > reqlen )
+ len = reqlen;
+
+ save_flags(flags);
+ cli();
+ if( fast_pio && len > 3 ) {
+ insl(PIO_FIFO,request,len>>2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ }
+ else {
+ while(len--) {
+ *request++ = inb(PIO_FIFO);
+ reqlen--;
+ }
+ }
+ restore_flags(flags);
+ }
+ }
+ return 0;
+}
+
+static __inline__ int NCR53c406a_pio_write(unsigned char *request,
+ unsigned int reqlen)
+{
+ int i = 0;
+ int len; /* current scsi fifo size */
+ unsigned long flags = 0;
+
+ REG1;
+ while (reqlen && !(i&0x40)) {
+ i = inb(PIO_STATUS);
+ /* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80) /* error */
+ return 0;
+
+ switch( i & 0x1e ) {
+ case 0x10:
+ len=128; break;
+ case 0x0:
+ len=84; break;
+ case 0x8:
+ len=42; break;
+ case 0xc:
+ len=1; break;
+ default:
+ case 0xe:
+ len=0; break;
+ }
+
+ if (len) {
+ if( len > reqlen )
+ len = reqlen;
+
+ save_flags(flags);
+ cli();
+ if( fast_pio && len > 3 ) {
+ outsl(PIO_FIFO,request,len>>2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ }
+ else {
+ while(len--) {
+ outb(*request++, PIO_FIFO);
+ reqlen--;
+ }
+ }
+ restore_flags(flags);
+ }
+ }
+ return 0;
+}
+#endif USE_PIO
+
+int
+NCR53c406a_detect(Scsi_Host_Template * tpnt){
+ struct Scsi_Host *shpnt;
+#ifndef PORT_BASE
+ int i;
+#endif
+
+#if USE_BIOS
+ int ii, jj;
+ bios_base = 0;
+ /* look for a valid signature */
+ for( ii=0; ii < ADDRESS_COUNT && !bios_base; ii++)
+ for( jj=0; (jj < SIGNATURE_COUNT) && !bios_base; jj++)
+ if(!memcmp((void *) addresses[ii]+signatures[jj].sig_offset,
+ (void *) signatures[jj].signature,
+ (int) signatures[jj].sig_length))
+ bios_base=addresses[ii];
+
+ if(!bios_base){
+ printk("NCR53c406a: BIOS signature not found\n");
+ return 0;
+ }
+
+ DEB(printk("NCR53c406a BIOS found at %X\n", (unsigned int) bios_base););
+#endif USE_BIOS
+
+#ifdef PORT_BASE
+ if (check_region(port_base, 0x10)) /* ports already snatched */
+ port_base = 0;
+
+#else /* autodetect */
+ if (port_base) { /* LILO override */
+ if (check_region(port_base, 0x10))
+ port_base = 0;
+ }
+ else {
+ for(i=0; i<PORT_COUNT && !port_base; i++){
+ if(check_region(ports[i], 0x10)){
+ DEB(printk("NCR53c406a: port %x in use\n", ports[i]));
+ }
+ else {
+ VDEB(printk("NCR53c406a: port %x available\n", ports[i]));
+ outb(C5_IMG, ports[i] + 0x0d); /* reg set 1 */
+ if( (inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7
+ && (inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7
+ && (inb(ports[i] + 0x0e) & 0xf8) == 0x58 ) {
+ VDEB(printk("NCR53c406a: Sig register valid\n"));
+ VDEB(printk("port_base=%x\n", port_base));
+ port_base = ports[i];
+ }
+ }
+ }
+ }
+#endif PORT_BASE
+
+ if(!port_base){ /* no ports found */
+ printk("NCR53c406a: no available ports found\n");
+ return 0;
+ }
+
+ DEB(printk("NCR53c406a detected\n"));
+
+ calc_port_addr();
+ chip_init();
+
+#ifndef IRQ_LEV
+ if (irq_level < 0) { /* LILO override if >= 0*/
+ irq_level = -1; // XXX No probing irq_probe();
+ if (irq_level < 0) { /* Trouble */
+ printk("NCR53c406a: IRQ problem, irq_level=%d, giving up\n", irq_level);
+ return 0;
+ }
+ }
+#endif
+
+ DEB(printk("NCR53c406a: using port_base %x\n", port_base));
+ request_region(port_base, 0x10, "NCR53c406a");
+
+ if(irq_level > 0) {
+ if(request_irq(irq_level, NCR53c406a_intr, 0, "NCR53c406a", NULL)){
+ printk("NCR53c406a: unable to allocate IRQ %d\n", irq_level);
+ return 0;
+ }
+ tpnt->can_queue = 1;
+ DEB(printk("NCR53c406a: allocated IRQ %d\n", irq_level));
+ }
+ else if (irq_level == 0) {
+ tpnt->can_queue = 0;
+ DEB(printk("NCR53c406a: No interrupts detected\n"));
+#if USE_DMA
+ printk("NCR53c406a: No interrupts found and DMA mode defined. Giving up.\n");
+ return 0;
+#endif USE_DMA
+ }
+ else {
+ DEB(printk("NCR53c406a: Shouldn't get here!\n"));
+ return 0;
+ }
+
+#if USE_DMA
+ dma_chan = DMA_CHAN;
+ if(request_dma(dma_chan, "NCR53c406a") != 0){
+ printk("NCR53c406a: unable to allocate DMA channel %d\n", dma_chan);
+ return 0;
+ }
+
+ DEB(printk("Allocated DMA channel %d\n", dma_chan));
+#endif USE_DMA
+
+ tpnt->present = 1;
+ tpnt->proc_dir = &proc_scsi_NCR53c406a;
+
+ shpnt = scsi_register(tpnt, 0);
+ shpnt->irq = irq_level;
+ shpnt->io_port = port_base;
+ shpnt->n_io_port = 0x10;
+#if USE_DMA
+ shpnt->dma = dma_chan;
+#endif
+
+#if USE_DMA
+ sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, DMA channel %d.",
+ port_base, irq_level, dma_chan);
+#else
+ sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, %s PIO mode.",
+ port_base, irq_level, fast_pio ? "fast" : "slow");
+#endif
+
+ return (tpnt->present);
+}
+
+/* called from init/main.c */
+void NCR53c406a_setup(char *str, int *ints)
+{
+ static size_t setup_idx = 0;
+ size_t i;
+
+ DEB(printk("NCR53c406a: Setup called\n"););
+
+ if (setup_idx >= PORT_COUNT - 1) {
+ printk("NCR53c406a: Setup called too many times. Bad LILO params?\n");
+ return;
+ }
+ if (ints[0] < 1 || ints[0] > 3) {
+ printk("NCR53c406a: Malformed command line\n");
+ printk("NCR53c406a: Usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]\n");
+ return;
+ }
+ for (i = 0; i < PORT_COUNT && !port_base; i++)
+ if (ports[i] == ints[1]) {
+ port_base = ints[1];
+ DEB(printk("NCR53c406a: Specified port_base 0x%X\n", port_base);)
+ }
+ if (!port_base) {
+ printk("NCR53c406a: Invalid PORTBASE 0x%X specified\n", ints[1]);
+ return;
+ }
+
+ if (ints[0] > 1) {
+ if (ints[2] == 0) {
+ irq_level = 0;
+ DEB(printk("NCR53c406a: Specified irq %d\n", irq_level);)
+ }
+ else
+ for (i = 0; i < INTR_COUNT && irq_level < 0; i++)
+ if (intrs[i] == ints[2]) {
+ irq_level = ints[2];
+ DEB(printk("NCR53c406a: Specified irq %d\n", port_base);)
+ }
+ if (irq_level < 0)
+ printk("NCR53c406a: Invalid IRQ %d specified\n", ints[2]);
+ }
+
+ if (ints[0] > 2)
+ fast_pio = ints[3];
+
+ DEB(printk("NCR53c406a: port_base=0x%X, irq=%d, fast_pio=%d\n",
+ port_base, irq_level, fast_pio);)
+}
+
+const char*
+NCR53c406a_info(struct Scsi_Host *SChost){
+ DEB(printk("NCR53c406a_info called\n"));
+ return (info_msg);
+}
+
+static void internal_done(Scsi_Cmnd *SCpnt) {
+ internal_done_errcode = SCpnt->result;
+ ++internal_done_flag;
+}
+
+
+static void wait_intr() {
+ int i = jiffies + WATCHDOG;
+
+ while(i>jiffies && !(inb(STAT_REG)&0xe0)) /* wait for a pseudo-interrupt */
+ barrier();
+
+ if (i <= jiffies) { /* Timed out */
+ rtrc(0);
+ current_SC->result = DID_TIME_OUT << 16;
+ current_SC->SCp.phase = idle;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ NCR53c406a_intr(0, NULL, NULL);
+}
+
+int NCR53c406a_command(Scsi_Cmnd *SCpnt){
+ DEB(printk("NCR53c406a_command called\n"));
+ NCR53c406a_queue(SCpnt, internal_done);
+ if(irq_level)
+ while (!internal_done_flag);
+ else /* interrupts not supported */
+ while (!internal_done_flag)
+ wait_intr();
+
+ internal_done_flag = 0;
+ return internal_done_errcode;
+}
+
+
+int
+NCR53c406a_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)){
+ int i;
+ unsigned long flags = 0;
+
+ VDEB(printk("NCR53c406a_queue called\n"));
+ DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n",
+ SCpnt->cmnd[0],
+ SCpnt->cmd_len,
+ SCpnt->target,
+ SCpnt->lun,
+ SCpnt->request_bufflen));
+
+#if 0
+ VDEB(for(i=0; i<SCpnt->cmd_len; i++)
+ printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
+ VDEB(printk("\n"));
+#endif
+
+ current_SC = SCpnt;
+ current_SC->scsi_done = done;
+ current_SC->SCp.phase = command_ph;
+ current_SC->SCp.Status = 0;
+ current_SC->SCp.Message = 0;
+
+ save_flags(flags);
+ cli();
+ REG0;
+ outb(SCpnt->target, DEST_ID); /* set destination */
+ outb(FLUSH_FIFO, CMD_REG); /* reset the fifos */
+
+ for(i=0; i<SCpnt->cmd_len; i++){
+ outb(SCpnt->cmnd[i], SCSI_FIFO);
+ }
+ outb(SELECT_NO_ATN, CMD_REG);
+ restore_flags(flags);
+
+ rtrc(1);
+ return 0;
+}
+
+int
+NCR53c406a_abort(Scsi_Cmnd *SCpnt){
+ DEB(printk("NCR53c406a_abort called\n"));
+ return SCSI_ABORT_SNOOZE; /* Don't know how to abort */
+}
+
+int
+NCR53c406a_reset(Scsi_Cmnd *SCpnt, unsigned int flags){
+ DEB(printk("NCR53c406a_reset called\n"));
+ outb(C4_IMG, CONFIG4); /* Select reg set 0 */
+ outb(CHIP_RESET, CMD_REG);
+ outb(SCSI_NOP, CMD_REG); /* required after reset */
+ outb(SCSI_RESET, CMD_REG);
+ chip_init();
+
+ rtrc(2);
+ if (irq_level)
+ return SCSI_RESET_PENDING; /* should get an interrupt */
+ else
+ return SCSI_RESET_WAKEUP; /* won't get any interrupts */
+}
+
+int
+NCR53c406a_biosparm(Scsi_Disk *disk, kdev_t dev, int* info_array){
+ int size;
+
+ DEB(printk("NCR53c406a_biosparm called\n"));
+
+ size = disk->capacity;
+ info_array[0] = 64; /* heads */
+ info_array[1] = 32; /* sectors */
+ info_array[2] = size>>11; /* cylinders */
+ if (info_array[2] > 1024) { /* big disk */
+ info_array[0] = 255;
+ info_array[1] = 63;
+ info_array[2] = size / (255*63);
+ }
+ return 0;
+ }
+
+ static void
+NCR53c406a_intr(int unused, void *dev_id, struct pt_regs *regs){
+ DEB(unsigned char fifo_size;)
+ DEB(unsigned char seq_reg;)
+ unsigned char status, int_reg;
+ unsigned long flags = 0;
+#if USE_PIO
+ unsigned char pio_status;
+ struct scatterlist *sglist;
+ unsigned int sgcount;
+#endif
+
+ VDEB(printk("NCR53c406a_intr called\n"));
+
+ save_flags(flags);
+ cli();
+#if USE_PIO
+ REG1;
+ pio_status = inb(PIO_STATUS);
+#endif
+ REG0;
+ status = inb(STAT_REG);
+ DEB(seq_reg = inb(SEQ_REG));
+ int_reg = inb(INT_REG);
+ DEB(fifo_size = inb(FIFO_FLAGS) & 0x1f);
+ restore_flags(flags);
+
+#if NCR53C406A_DEBUG
+ printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x",
+ status, seq_reg, int_reg, fifo_size);
+#if (USE_DMA)
+ printk("\n");
+#else
+ printk(", pio=%02x\n", pio_status);
+#endif USE_DMA
+#endif NCR53C406A_DEBUG
+
+ if(int_reg & 0x80){ /* SCSI reset intr */
+ rtrc(3);
+ DEB(printk("NCR53c406a: reset intr received\n"));
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_RESET << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+#if USE_PIO
+ if(pio_status & 0x80) {
+ printk("NCR53C406A: Warning: PIO error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_ERROR << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+#endif USE_PIO
+
+ if(status & 0x20) { /* Parity error */
+ printk("NCR53c406a: Warning: parity error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_PARITY << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ if(status & 0x40) { /* Gross error */
+ printk("NCR53c406a: Warning: gross error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_ERROR << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ if(int_reg & 0x20){ /* Disconnect */
+ DEB(printk("NCR53c406a: disconnect intr received\n"));
+ if(current_SC->SCp.phase != message_in){ /* Unexpected disconnect */
+ current_SC->result = DID_NO_CONNECT << 16;
+ }
+ else{ /* Command complete, return status and message */
+ current_SC->result = (current_SC->SCp.Status & 0xff)
+ | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16);
+ }
+
+ rtrc(0);
+ current_SC->SCp.phase = idle;
+ current_SC->scsi_done( current_SC );
+ return;
+ }
+
+ switch(status & 0x07){ /* scsi phase */
+ case 0x00: /* DATA-OUT */
+ if(int_reg & 0x10){ /* Target requesting info transfer */
+ rtrc(5);
+ current_SC->SCp.phase = data_out;
+ VDEB(printk("NCR53c406a: Data-Out phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+#if USE_DMA /* No s/g support for DMA */
+ NCR53c406a_dma_write(current_SC->request_buffer,
+ current_SC->request_bufflen);
+#endif USE_DMA
+ outb(TRANSFER_INFO | DMA_OP, CMD_REG);
+#if USE_PIO
+ if (!current_SC->use_sg) /* Don't use scatter-gather */
+ NCR53c406a_pio_write(current_SC->request_buffer,
+ current_SC->request_bufflen);
+ else { /* use scatter-gather */
+ sgcount = current_SC->use_sg;
+ sglist = current_SC->request_buffer;
+ while( sgcount-- ) {
+ NCR53c406a_pio_write(sglist->address, sglist->length);
+ sglist++;
+ }
+ }
+ REG0;
+#endif USE_PIO
+ }
+ break;
+
+ case 0x01: /* DATA-IN */
+ if(int_reg & 0x10){ /* Target requesting info transfer */
+ rtrc(6);
+ current_SC->SCp.phase = data_in;
+ VDEB(printk("NCR53c406a: Data-In phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+#if USE_DMA /* No s/g support for DMA */
+ NCR53c406a_dma_read(current_SC->request_buffer,
+ current_SC->request_bufflen);
+#endif USE_DMA
+ outb(TRANSFER_INFO | DMA_OP, CMD_REG);
+#if USE_PIO
+ if (!current_SC->use_sg) /* Don't use scatter-gather */
+ NCR53c406a_pio_read(current_SC->request_buffer,
+ current_SC->request_bufflen);
+ else { /* Use scatter-gather */
+ sgcount = current_SC->use_sg;
+ sglist = current_SC->request_buffer;
+ while( sgcount-- ) {
+ NCR53c406a_pio_read(sglist->address, sglist->length);
+ sglist++;
+ }
+ }
+ REG0;
+#endif USE_PIO
+ }
+ break;
+
+ case 0x02: /* COMMAND */
+ current_SC->SCp.phase = command_ph;
+ printk("NCR53c406a: Warning: Unknown interrupt occurred in command phase!\n");
+ break;
+
+ case 0x03: /* STATUS */
+ rtrc(7);
+ current_SC->SCp.phase = status_ph;
+ VDEB(printk("NCR53c406a: Status phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ outb(INIT_CMD_COMPLETE, CMD_REG);
+ break;
+
+ case 0x04: /* Reserved */
+ case 0x05: /* Reserved */
+ printk("NCR53c406a: WARNING: Reserved phase!!!\n");
+ break;
+
+ case 0x06: /* MESSAGE-OUT */
+ DEB(printk("NCR53c406a: Message-Out phase\n"));
+ current_SC->SCp.phase = message_out;
+ outb(SET_ATN, CMD_REG); /* Reject the message */
+ outb(MSG_ACCEPT, CMD_REG);
+ break;
+
+ case 0x07: /* MESSAGE-IN */
+ rtrc(4);
+ VDEB(printk("NCR53c406a: Message-In phase\n"));
+ current_SC->SCp.phase = message_in;
+
+ current_SC->SCp.Status = inb(SCSI_FIFO);
+ current_SC->SCp.Message = inb(SCSI_FIFO);
+
+ VDEB(printk("SCSI FIFO size=%d\n", inb(FIFO_FLAGS) & 0x1f));
+ DEB(printk("Status = %02x Message = %02x\n",
+ current_SC->SCp.Status, current_SC->SCp.Message));
+
+ if(current_SC->SCp.Message == SAVE_POINTERS ||
+ current_SC->SCp.Message == DISCONNECT) {
+ outb(SET_ATN, CMD_REG); /* Reject message */
+ DEB(printk("Discarding SAVE_POINTERS message\n"));
+ }
+ outb(MSG_ACCEPT, CMD_REG);
+ break;
+ }
+}
+
+#ifndef IRQ_LEV
+static int irq_probe()
+{
+ int irqs, irq;
+ int i;
+
+ inb(INT_REG); /* clear the interrupt register */
+ sti();
+ irqs = probe_irq_on();
+
+ /* Invalid command will cause an interrupt */
+ REG0;
+ outb(0xff, CMD_REG);
+
+ /* Wait for the interrupt to occur */
+ i = jiffies + WATCHDOG;
+ while(i > jiffies && !(inb(STAT_REG) & 0x80))
+ barrier();
+ if (i <= jiffies) { /* Timed out, must be hardware trouble */
+ probe_irq_off(irqs);
+ return -1;
+ }
+
+ irq = probe_irq_off(irqs);
+
+ /* Kick the chip */
+ outb(CHIP_RESET, CMD_REG);
+ outb(SCSI_NOP, CMD_REG);
+ chip_init();
+
+ return irq;
+}
+#endif IRQ_LEV
+
+static void chip_init()
+{
+ REG1;
+#if USE_DMA
+ outb(0x00, PIO_STATUS);
+#else /* USE_PIO */
+ outb(0x01, PIO_STATUS);
+#endif
+ outb(0x00, PIO_FLAG);
+
+ outb(C4_IMG, CONFIG4); /* REG0; */
+ outb(C3_IMG, CONFIG3);
+ outb(C2_IMG, CONFIG2);
+ outb(C1_IMG, CONFIG1);
+
+ outb(0x05, CLKCONV); /* clock conversion factor */
+ outb(0x9C, SRTIMOUT); /* Selection timeout */
+ outb(0x05, SYNCPRD); /* Synchronous transfer period */
+ outb(SYNC_MODE, SYNCOFF); /* synchronous mode */
+}
+
+void calc_port_addr()
+{
+ /* Control Register Set 0 */
+ TC_LSB = (port_base+0x00);
+ TC_MSB = (port_base+0x01);
+ SCSI_FIFO = (port_base+0x02);
+ CMD_REG = (port_base+0x03);
+ STAT_REG = (port_base+0x04);
+ DEST_ID = (port_base+0x04);
+ INT_REG = (port_base+0x05);
+ SRTIMOUT = (port_base+0x05);
+ SEQ_REG = (port_base+0x06);
+ SYNCPRD = (port_base+0x06);
+ FIFO_FLAGS = (port_base+0x07);
+ SYNCOFF = (port_base+0x07);
+ CONFIG1 = (port_base+0x08);
+ CLKCONV = (port_base+0x09);
+ /* TESTREG = (port_base+0x0A); */
+ CONFIG2 = (port_base+0x0B);
+ CONFIG3 = (port_base+0x0C);
+ CONFIG4 = (port_base+0x0D);
+ TC_HIGH = (port_base+0x0E);
+ /* FIFO_BOTTOM = (port_base+0x0F); */
+
+ /* Control Register Set 1 */
+ /* JUMPER_SENSE = (port_base+0x00);*/
+ /* SRAM_PTR = (port_base+0x01);*/
+ /* SRAM_DATA = (port_base+0x02);*/
+ PIO_FIFO = (port_base+0x04);
+ /* PIO_FIFO1 = (port_base+0x05);*/
+ /* PIO_FIFO2 = (port_base+0x06);*/
+ /* PIO_FIFO3 = (port_base+0x07);*/
+ PIO_STATUS = (port_base+0x08);
+ /* ATA_CMD = (port_base+0x09);*/
+ /* ATA_ERR = (port_base+0x0A);*/
+ PIO_FLAG = (port_base+0x0B);
+ CONFIG5 = (port_base+0x0D);
+ /* SIGNATURE = (port_base+0x0E);*/
+ /* CONFIG6 = (port_base+0x0F);*/
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = NCR53c406a;
+
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we get a uniform tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/NCR53c406a.h b/linux/src/drivers/scsi/NCR53c406a.h
new file mode 100644
index 0000000..88e45e5
--- /dev/null
+++ b/linux/src/drivers/scsi/NCR53c406a.h
@@ -0,0 +1,83 @@
+#ifndef _NCR53C406A_H
+#define _NCR53C406A_H
+
+/*
+ * NCR53c406a.h
+ *
+ * Copyright (C) 1994 Normunds Saumanis (normunds@rx.tech.swh.lv)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+/* NOTE: scatter-gather support only works in PIO mode.
+ * Use SG_NONE if DMA mode is enabled!
+ */
+#define NCR53c406a { \
+ NULL /* next */, \
+ NULL /* usage count */, \
+ &proc_scsi_NCR53c406a /* proc_dir */, \
+ NULL /* proc_info */, \
+ "NCR53c406a" /* name */, \
+ NCR53c406a_detect /* detect */, \
+ NULL /* release */, \
+ NCR53c406a_info /* info */, \
+ NCR53c406a_command /* command */, \
+ NCR53c406a_queue /* queuecommand */, \
+ NCR53c406a_abort /* abort */, \
+ NCR53c406a_reset /* reset */, \
+ NULL /* slave_attach */, \
+ NCR53c406a_biosparm /* biosparm */, \
+ 1 /* can_queue */, \
+ 7 /* SCSI ID of the chip */, \
+ 32 /*SG_ALL*/ /*SG_NONE*/, \
+ 1 /* commands per lun */, \
+ 0 /* number of boards in system */, \
+ 1 /* unchecked_isa_dma */, \
+ ENABLE_CLUSTERING \
+}
+
+extern struct proc_dir_entry proc_scsi_NCR53c406a;
+
+int NCR53c406a_detect(Scsi_Host_Template *);
+const char* NCR53c406a_info(struct Scsi_Host *);
+
+int NCR53c406a_command(Scsi_Cmnd *);
+int NCR53c406a_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int NCR53c406a_abort(Scsi_Cmnd *);
+int NCR53c406a_reset(Scsi_Cmnd *, unsigned int);
+int NCR53c406a_biosparm(Disk *, kdev_t, int []);
+
+#endif /* _NCR53C406A_H */
+
+/*
+ * Overrides for Emacs so that we get a uniform tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/linux/src/drivers/scsi/advansys.c b/linux/src/drivers/scsi/advansys.c
new file mode 100644
index 0000000..7aea67c
--- /dev/null
+++ b/linux/src/drivers/scsi/advansys.c
@@ -0,0 +1,15554 @@
+/* $Id: advansys.c,v 1.1.4.1 2005/06/02 18:52:38 ams Exp $ */
+#define ASC_VERSION "3.1E" /* AdvanSys Driver Version */
+
+/*
+ * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
+ *
+ * Copyright (c) 1995-1998 Advanced System Products, Inc.
+ * All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that redistributions of source
+ * code retain the above copyright notice and this comment without
+ * modification.
+ *
+ * There is an AdvanSys Linux WWW page at:
+ * http://www.advansys.com/linux.html
+ *
+ * The latest version of the AdvanSys driver is available at:
+ * ftp://ftp.advansys.com/pub/linux/linux.tgz
+ *
+ * Please send questions, comments, bug reports to:
+ * bobf@advansys.com (Bob Frey)
+ */
+
+/*
+
+ Documentation for the AdvanSys Driver
+
+ A. Linux Kernel Testing
+ B. Adapters Supported by this Driver
+ C. Linux v1.2.X - Directions for Adding the AdvanSys Driver
+ D. Linux v1.3.1 - v1.3.57 - Directions for Adding the AdvanSys Driver
+ E. Linux v1.3.58 and Newer - Upgrading the AdvanSys Driver
+ F. Source Comments
+ G. Driver Compile Time Options and Debugging
+ H. Driver LILO Option
+ I. Release History
+ J. Known Problems or Issues
+ K. Credits
+ L. AdvanSys Contact Information
+
+ A. Linux Kernel Testing
+
+ This driver has been tested in the following Linux kernels: v1.2.13,
+ v1.3.57, v2.0.33, v2.1.77. These kernel versions are major releases
+ of Linux or the latest Linux kernel versions available when this version
+ of the driver was released. The driver should also work in earlier
+ versions of the Linux kernel. Beginning with v1.3.58 the AdvanSys driver
+ is included with all Linux kernels. Please refer to sections C, D, and
+ E for instructions on adding or upgrading the AdvanSys driver.
+
+ B. Adapters Supported by this Driver
+
+ AdvanSys (Advanced System Products, Inc.) manufactures the following
+ RISC-based, Bus-Mastering, Fast (10 Mhz) and Ultra (20 Mhz) Narrow
+ (8-bit transfer) SCSI Host Adapters for the ISA, EISA, VL, and PCI
+ buses and RISC-based, Bus-Mastering, Ultra (20 Mhz) Wide (16-bit
+ transfer) SCSI Host Adapters for the PCI bus.
+
+ The CDB counts below indicate the number of SCSI CDB (Command
+ Descriptor Block) requests that can be stored in the RISC chip
+ cache and board LRAM. A CDB is a single SCSI command. The driver
+ detect routine will display the number of CDBs available for each
+ adapter detected. The number of CDBs used by the driver can be
+ lowered in the BIOS by changing the 'Host Queue Size' adapter setting.
+
+ Connectivity Products:
+ ABP510/5150 - Bus-Master ISA (240 CDB) (Footnote 1)
+ ABP5140 - Bus-Master ISA PnP (16 CDB) (Footnote 1, 3)
+ ABP5142 - Bus-Master ISA PnP with floppy (16 CDB) (Footnote 4)
+ ABP920 - Bus-Master PCI (16 CDB)
+ ABP930 - Bus-Master PCI (16 CDB) (Footnote 5)
+ ABP930U - Bus-Master PCI Ultra (16 CDB)
+ ABP930UA - Bus-Master PCI Ultra (16 CDB)
+ ABP960 - Bus-Master PCI MAC/PC (16 CDB) (Footnote 2)
+ ABP960U - Bus-Master PCI MAC/PC Ultra (16 CDB) (Footnote 2)
+
+ Single Channel Products:
+ ABP542 - Bus-Master ISA with floppy (240 CDB)
+ ABP742 - Bus-Master EISA (240 CDB)
+ ABP842 - Bus-Master VL (240 CDB)
+ ABP940 - Bus-Master PCI (240 CDB)
+ ABP940U - Bus-Master PCI Ultra (240 CDB)
+ ABP970 - Bus-Master PCI MAC/PC (240 CDB)
+ ABP970U - Bus-Master PCI MAC/PC Ultra (240 CDB)
+ ABP940UW - Bus-Master PCI Ultra-Wide (240 CDB)
+
+ Multi Channel Products:
+ ABP752 - Dual Channel Bus-Master EISA (240 CDB Per Channel)
+ ABP852 - Dual Channel Bus-Master VL (240 CDB Per Channel)
+ ABP950 - Dual Channel Bus-Master PCI (240 CDB Per Channel)
+ ABP980 - Four Channel Bus-Master PCI (240 CDB Per Channel)
+ ABP980U - Four Channel Bus-Master PCI Ultra (240 CDB Per Channel)
+
+ Footnotes:
+ 1. This board has been shipped by HP with the 4020i CD-R drive.
+ The board has no BIOS so it cannot control a boot device, but
+ it can control any secondary SCSI device.
+ 2. This board has been sold by Iomega as a Jaz Jet PCI adapter.
+ 3. This board has been sold by SIIG as the i540 SpeedMaster.
+ 4. This board has been sold by SIIG as the i542 SpeedMaster.
+ 5. This board has been sold by SIIG as the Fast SCSI Pro PCI.
+
+ C. Linux v1.2.X - Directions for Adding the AdvanSys Driver
+
+ These directions apply to v1.2.13. For versions that follow v1.2.13.
+ but precede v1.3.57 some of the changes for Linux v1.3.X listed
+ below may need to be modified or included. A patch is available
+ for v1.2.13 from the AdvanSys WWW and FTP sites.
+
+ There are two source files: advansys.h and advansys.c. Copy
+ both of these files to the directory /usr/src/linux/drivers/scsi.
+
+ 1. Add the following line to /usr/src/linux/arch/i386/config.in
+ after "comment 'SCSI low-level drivers'":
+
+ bool 'AdvanSys SCSI support' CONFIG_SCSI_ADVANSYS y
+
+ 2. Add the following lines to /usr/src/linux/drivers/scsi/hosts.c
+ after "#include "hosts.h"":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ #include "advansys.h"
+ #endif
+
+ and after "static Scsi_Host_Template builtin_scsi_hosts[] =":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ ADVANSYS,
+ #endif
+
+ 3. Add the following lines to /usr/src/linux/drivers/scsi/Makefile:
+
+ ifdef CONFIG_SCSI_ADVANSYS
+ SCSI_SRCS := $(SCSI_SRCS) advansys.c
+ SCSI_OBJS := $(SCSI_OBJS) advansys.o
+ else
+ SCSI_MODULE_OBJS := $(SCSI_MODULE_OBJS) advansys.o
+ endif
+
+ 4. (Optional) If you would like to enable the LILO command line
+ and /etc/lilo.conf 'advansys' option, make the following changes.
+ This option can be used to disable I/O port scanning or to limit
+ I/O port scanning to specific addresses. Refer to the 'Driver
+ LILO Option' section below. Add the following lines to
+ /usr/src/linux/init/main.c in the prototype section:
+
+ extern void advansys_setup(char *str, int *ints);
+
+ and add the following lines to the bootsetups[] array.
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ { "advansys=", advansys_setup },
+ #endif
+
+ 5. If you have the HP 4020i CD-R driver and Linux v1.2.X you should
+ add a fix to the CD-ROM target driver. This fix will allow
+ you to mount CDs with the iso9660 file system. Linux v1.3.X
+ already has this fix. In the file /usr/src/linux/drivers/scsi/sr.c
+ and function get_sectorsize() after the line:
+
+ if(scsi_CDs[i].sector_size == 0) scsi_CDs[i].sector_size = 2048;
+
+ add the following line:
+
+ if(scsi_CDs[i].sector_size == 2340) scsi_CDs[i].sector_size = 2048;
+
+ 6. In the directory /usr/src/linux run 'make config' to configure
+ the AdvanSys driver, then run 'make vmlinux' or 'make zlilo' to
+ make the kernel. If the AdvanSys driver is not configured, then
+ a loadable module can be built by running 'make modules' and
+ 'make modules_install'. Use 'insmod' and 'rmmod' to install
+ and remove advansys.o.
+
+ D. Linux v1.3.1 - v1.3.57 - Directions for Adding the AdvanSys Driver
+
+ These directions apply to v1.3.57. For versions that precede v1.3.57
+ some of these changes may need to be modified or eliminated. A patch
+ is available for v1.3.57 from the AdvanSys WWW and FTP sites.
+ Beginning with v1.3.58 this driver is included with the Linux
+ distribution eliminating the need for making any changes.
+
+ There are two source files: advansys.h and advansys.c. Copy
+ both of these files to the directory /usr/src/linux/drivers/scsi.
+
+ 1. Add the following line to /usr/src/linux/drivers/scsi/Config.in
+ after "comment 'SCSI low-level drivers'":
+
+ dep_tristate 'AdvanSys SCSI support' CONFIG_SCSI_ADVANSYS $CONFIG_SCSI
+
+ 2. Add the following lines to /usr/src/linux/drivers/scsi/hosts.c
+ after "#include "hosts.h"":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ #include "advansys.h"
+ #endif
+
+ and after "static Scsi_Host_Template builtin_scsi_hosts[] =":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ ADVANSYS,
+ #endif
+
+ 3. Add the following lines to /usr/src/linux/drivers/scsi/Makefile:
+
+ ifeq ($(CONFIG_SCSI_ADVANSYS),y)
+ L_OBJS += advansys.o
+ else
+ ifeq ($(CONFIG_SCSI_ADVANSYS),m)
+ M_OBJS += advansys.o
+ endif
+ endif
+
+ 4. Add the following line to /usr/src/linux/include/linux/proc_fs.h
+ in the enum scsi_directory_inos array:
+
+ PROC_SCSI_ADVANSYS,
+
+ 5. (Optional) If you would like to enable the LILO command line
+ and /etc/lilo.conf 'advansys' option, make the following changes.
+ This option can be used to disable I/O port scanning or to limit
+ I/O port scanning to specific addresses. Refer to the 'Driver
+ LILO Option' section below. Add the following lines to
+ /usr/src/linux/init/main.c in the prototype section:
+
+ extern void advansys_setup(char *str, int *ints);
+
+ and add the following lines to the bootsetups[] array.
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ { "advansys=", advansys_setup },
+ #endif
+
+ 6. In the directory /usr/src/linux run 'make config' to configure
+ the AdvanSys driver, then run 'make vmlinux' or 'make zlilo' to
+ make the kernel. If the AdvanSys driver is not configured, then
+ a loadable module can be built by running 'make modules' and
+ 'make modules_install'. Use 'insmod' and 'rmmod' to install
+ and remove advansys.o.
+
+ E. Linux v1.3.58 and Newer - Upgrading the AdvanSys Driver
+
+ To upgrade the AdvanSys driver in a Linux v1.3.58 and newer
+ kernel, first check the version of the current driver. The
+ version is defined by the manifest constant ASC_VERSION at
+ the beginning of advansys.c. The new driver should have a
+ ASC_VERSION value greater than the current version. To install
+ the new driver rename advansys.c and advansys.h in the Linux
+ kernel source tree drivers/scsi directory to different names
+ or save them to a different directory in case you want to revert
+ to the old version of the driver. After the old driver is saved
+ copy the new advansys.c and advansys.h to drivers/scsi, rebuild
+ the kernel, and install the new kernel. No other changes are needed.
+
+ F. Source Comments
+
+ 1. Use tab stops set to 4 for the source files. For vi use 'se tabstops=4'.
+
+ 2. This driver should be maintained in multiple files. But to make
+ it easier to include with Linux and to follow Linux conventions,
+ the whole driver is maintained in the source files advansys.h and
+ advansys.c. In this file logical sections of the driver begin with
+ a comment that contains '---'. The following are the logical sections
+ of the driver below.
+
+ --- Linux Version
+ --- Linux Include Files
+ --- Driver Options
+ --- Debugging Header
+ --- Asc Library Constants and Macros
+ --- Adv Library Constants and Macros
+ --- Driver Constants and Macros
+ --- Driver Structures
+ --- Driver Data
+ --- Driver Function Prototypes
+ --- Linux 'Scsi_Host_Template' and advansys_setup() Functions
+ --- Loadable Driver Support
+ --- Miscellaneous Driver Functions
+ --- Functions Required by the Asc Library
+ --- Functions Required by the Adv Library
+ --- Tracing and Debugging Functions
+ --- Asc Library Functions
+ --- Adv Library Functions
+
+ 3. The string 'XXX' is used to flag code that needs to be re-written
+ or that contains a problem that needs to be addressed.
+
+ 4. I have stripped comments from and reformatted the source for the
+ Asc Library and Adv Library to reduce the size of this file. This
+ source can be found under the following headings. The Asc Library
+ is used to support Narrow Boards. The Adv Library is used to
+ support Wide Boards.
+
+ --- Asc Library Constants and Macros
+ --- Adv Library Constants and Macros
+ --- Asc Library Functions
+ --- Adv Library Functions
+
+ G. Driver Compile Time Options and Debugging
+
+ In this source file the following constants can be defined. They are
+ defined in the source below. Both of these options are enabled by
+ default.
+
+ 1. ADVANSYS_ASSERT - Enable driver assertions (Def: Enabled)
+
+ Enabling this option adds assertion logic statements to the
+ driver. If an assertion fails a message will be displayed to
+ the console, but the system will continue to operate. Any
+ assertions encountered should be reported to the person
+ responsible for the driver. Assertion statements may proactively
+ detect problems with the driver and facilitate fixing these
+ problems. Enabling assertions will add a small overhead to the
+ execution of the driver.
+
+ 2. ADVANSYS_DEBUG - Enable driver debugging (Def: Disabled)
+
+ Enabling this option adds tracing functions to the driver and
+ the ability to set a driver tracing level at boot time. This
+ option will also export symbols not required outside the driver to
+ the kernel name space. This option is very useful for debugging
+ the driver, but it will add to the size of the driver execution
+ image and add overhead to the execution of the driver.
+
+ The amount of debugging output can be controlled with the global
+ variable 'asc_dbglvl'. The higher the number the more output. By
+ default the debug level is 0.
+
+ If the driver is loaded at boot time and the LILO Driver Option
+ is included in the system, the debug level can be changed by
+ specifying a 5th (ASC_NUM_IOPORT_PROBE + 1) I/O Port. The
+ first three hex digits of the pseudo I/O Port must be set to
+ 'deb' and the fourth hex digit specifies the debug level: 0 - F.
+ The following command line will look for an adapter at 0x330
+ and set the debug level to 2.
+
+ linux advansys=0x330,0,0,0,0xdeb2
+
+ If the driver is built as a loadable module this variable can be
+ defined when the driver is loaded. The following insmod command
+ will set the debug level to one.
+
+ insmod advansys.o asc_dbglvl=1
+
+ Debugging Message Levels:
+ 0: Errors Only
+ 1: High-Level Tracing
+ 2-N: Verbose Tracing
+
+ I don't know the approved way for turning on printk()s to the
+ console. Here's a program I use to do this. Debug output is
+ logged in /var/adm/messages.
+
+ main()
+ {
+ syscall(103, 7, 0, 0);
+ }
+
+ I found that increasing LOG_BUF_LEN to 40960 in kernel/printk.c
+ prevents most level 1 debug messages from being lost.
+
+ 3. ADVANSYS_STATS - Enable statistics (Def: Enabled >= v1.3.0)
+
+ Enabling this option adds statistics collection and display
+ through /proc to the driver. The information is useful for
+ monitoring driver and device performance. It will add to the
+ size of the driver execution image and add minor overhead to
+ the execution of the driver.
+
+ Statistics are maintained on a per adapter basis. Driver entry
+ point call counts and transfer size counts are maintained.
+ Statistics are only available for kernels greater than or equal
+ to v1.3.0 with the CONFIG_PROC_FS (/proc) file system configured.
+
+ AdvanSys SCSI adapter files have the following path name format:
+
+ /proc/scsi/advansys/[0-(ASC_NUM_BOARD_SUPPORTED-1)]
+
+ This information can be displayed with cat. For example:
+
+ cat /proc/scsi/advansys/0
+
+ When ADVANSYS_STATS is not defined the AdvanSys /proc files only
+ contain adapter and device configuration information.
+
+ H. Driver LILO Option
+
+ If init/main.c is modified as described in the 'Directions for Adding
+ the AdvanSys Driver to Linux' section (B.4.) above, the driver will
+ recognize the 'advansys' LILO command line and /etc/lilo.conf option.
+ This option can be used to either disable I/O port scanning or to limit
+ scanning to 1 - 4 I/O ports. Regardless of the option setting EISA and
+ PCI boards will still be searched for and detected. This option only
+ affects searching for ISA and VL boards.
+
+ Examples:
+ 1. Eliminate I/O port scanning:
+ boot: linux advansys=
+ or
+ boot: linux advansys=0x0
+ 2. Limit I/O port scanning to one I/O port:
+ boot: linux advansys=0x110
+ 3. Limit I/O port scanning to four I/O ports:
+ boot: linux advansys=0x110,0x210,0x230,0x330
+
+ For a loadable module the same effect can be achieved by setting
+ the 'asc_iopflag' variable and 'asc_ioport' array when loading
+ the driver, e.g.
+
+ insmod advansys.o asc_iopflag=1 asc_ioport=0x110,0x330
+
+ If ADVANSYS_DEBUG is defined a 5th (ASC_NUM_IOPORT_PROBE + 1)
+ I/O Port may be added to specify the driver debug level. Refer to
+ the 'Driver Compile Time Options and Debugging' section above for
+ more information.
+
+ I. Release History
+
+ BETA-1.0 (12/23/95):
+ First Release
+
+ BETA-1.1 (12/28/95):
+ 1. Prevent advansys_detect() from being called twice.
+ 2. Add LILO 0xdeb[0-f] option to set 'asc_dbglvl'.
+
+ 1.2 (1/12/96):
+ 1. Prevent re-entrancy in the interrupt handler which
+ resulted in the driver hanging Linux.
+ 2. Fix problem that prevented ABP-940 cards from being
+ recognized on some PCI motherboards.
+ 3. Add support for the ABP-5140 PnP ISA card.
+ 4. Fix check condition return status.
+ 5. Add conditionally compiled code for Linux v1.3.X.
+
+ 1.3 (2/23/96):
+ 1. Fix problem in advansys_biosparam() that resulted in the
+ wrong drive geometry being returned for drives > 1GB with
+ extended translation enabled.
+ 2. Add additional tracing during device initialization.
+ 3. Change code that only applies to ISA PnP adapter.
+ 4. Eliminate 'make dep' warning.
+ 5. Try to fix problem with handling resets by increasing their
+ timeout value.
+
+ 1.4 (5/8/96):
+ 1. Change definitions to eliminate conflicts with other subsystems.
+ 2. Add versioning code for the shared interrupt changes.
+ 3. Eliminate problem in asc_rmqueue() with iterating after removing
+ a request.
+ 4. Remove reset request loop problem from the "Known Problems or
+ Issues" section. This problem was isolated and fixed in the
+ mid-level SCSI driver.
+
+ 1.5 (8/8/96):
+ 1. Add support for ABP-940U (PCI Ultra) adapter.
+ 2. Add support for IRQ sharing by setting the SA_SHIRQ flag for
+ request_irq and supplying a dev_id pointer to both request_irq()
+ and free_irq().
+ 3. In AscSearchIOPortAddr11() restore a call to check_region() which
+ should be used before I/O port probing.
+ 4. Fix bug in asc_prt_hex() which resulted in the displaying
+ the wrong data.
+ 5. Incorporate miscellaneous Asc Library bug fixes and new microcode.
+ 6. Change driver versioning to be specific to each Linux sub-level.
+ 7. Change statistics gathering to be per adapter instead of global
+ to the driver.
+ 8. Add more information and statistics to the adapter /proc file:
+ /proc/scsi/advansys[0...].
+ 9. Remove 'cmd_per_lun' from the "Known Problems or Issues" list.
+ This problem has been addressed with the SCSI mid-level changes
+ made in v1.3.89. The advansys_select_queue_depths() function
+ was added for the v1.3.89 changes.
+
+ 1.6 (9/10/96):
+ 1. Incorporate miscellaneous Asc Library bug fixes and new microcode.
+
+ 1.7 (9/25/96):
+ 1. Enable clustering and optimize the setting of the maximum number
+ of scatter gather elements for any particular board. Clustering
+ increases CPU utilization, but results in a relatively larger
+ increase in I/O throughput.
+ 2. Improve the performance of the request queuing functions by
+ adding a last pointer to the queue structure.
+ 3. Correct problems with reset and abort request handling that
+ could have hung or crashed Linux.
+ 4. Add more information to the adapter /proc file:
+ /proc/scsi/advansys[0...].
+ 5. Remove the request timeout issue form the driver issues list.
+ 6. Miscellaneous documentation additions and changes.
+
+ 1.8 (10/4/96):
+ 1. Make changes to handle the new v2.1.0 kernel memory mapping
+ in which a kernel virtual address may not be equivalent to its
+ bus or DMA memory address.
+ 2. Change abort and reset request handling to make it yet even
+ more robust.
+ 3. Try to mitigate request starvation by sending ordered requests
+ to heavily loaded, tag queuing enabled devices.
+ 4. Maintain statistics on request response time.
+ 5. Add request response time statistics and other information to
+ the adapter /proc file: /proc/scsi/advansys[0...].
+
+ 1.9 (10/21/96):
+ 1. Add conditionally compiled code (ASC_QUEUE_FLOW_CONTROL) to
+ make use of mid-level SCSI driver device queue depth flow
+ control mechanism. This will eliminate aborts caused by a
+ device being unable to keep up with requests and eliminate
+ repeat busy or QUEUE FULL status returned by a device.
+ 2. Incorporate miscellaneous Asc Library bug fixes.
+ 3. To allow the driver to work in kernels with broken module
+ support set 'cmd_per_lun' if the driver is compiled as a
+ module. This change affects kernels v1.3.89 to present.
+ 4. Remove PCI BIOS address from the driver banner. The PCI BIOS
+ is relocated by the motherboard BIOS and its new address can
+ not be determined by the driver.
+ 5. Add mid-level SCSI queue depth information to the adapter
+ /proc file: /proc/scsi/advansys[0...].
+
+ 2.0 (11/14/96):
+ 1. Change allocation of global structures used for device
+ initialization to guarantee they are in DMA-able memory.
+ Previously when the driver was loaded as a module these
+ structures might not have been in DMA-able memory, causing
+ device initialization to fail.
+
+ 2.1 (12/30/96):
+ 1. In advansys_reset(), if the request is a synchronous reset
+ request, even if the request serial number has changed, then
+ complete the request.
+ 2. Add Asc Library bug fixes including new microcode.
+ 3. Clear inquiry buffer before using it.
+ 4. Correct ifdef typo.
+
+ 2.2 (1/15/97):
+ 1. Add Asc Library bug fixes including new microcode.
+ 2. Add synchronous data transfer rate information to the
+ adapter /proc file: /proc/scsi/advansys[0...].
+ 3. Change ADVANSYS_DEBUG to be disabled by default. This
+ will reduce the size of the driver image, eliminate execution
+ overhead, and remove unneeded symbols from the kernel symbol
+ space that were previously added by the driver.
+ 4. Add new compile-time option ADVANSYS_ASSERT for assertion
+ code that used to be defined within ADVANSYS_DEBUG. This
+ option is enabled by default.
+
+ 2.8 (5/26/97):
+ 1. Change version number to 2.8 to synchronize the Linux driver
+ version numbering with other AdvanSys drivers.
+ 2. Reformat source files without tabs to present the same view
+ of the file to everyone regardless of the editor tab setting
+ being used.
+ 3. Add Asc Library bug fixes.
+
+ 3.1A (1/8/98):
+ 1. Change version number to 3.1 to indicate that support for
+ Ultra-Wide adapters (ABP-940UW) is included in this release.
+ 2. Add Asc Library (Narrow Board) bug fixes.
+ 3. Report an underrun condition with the host status byte set
+ to DID_UNDERRUN. Currently DID_UNDERRUN is defined to 0 which
+ causes the underrun condition to be ignored. When Linux defines
+ its own DID_UNDERRUN the constant defined in this file can be
+ removed.
+ 4. Add patch to AscWaitTixISRDone().
+ 5. Add support for up to 16 different AdvanSys host adapter SCSI
+ channels in one system. This allows four cards with four channels
+ to be used in one system.
+
+ 3.1B (1/9/98):
+ 1. Handle that PCI register base addresses are not always page
+ aligned even though ioremap() requires that the address argument
+ be page aligned.
+
+ 3.1C (1/10/98):
+ 1. Update latest BIOS version checked for from the /proc file.
+ 2. Don't set microcode SDTR variable at initialization. Instead
+ wait until device capabilities have been detected from an Inquiry
+ command.
+
+ 3.1D (1/21/98):
+ 1. Improve performance when the driver is compiled as module by
+ allowing up to 64 scatter-gather elements instead of 8.
+
+ 3.1E (5/1/98):
+ 1. Set time delay in AscWaitTixISRDone() to 1000 ms.
+ 2. Include SMP locking changes.
+ 3. For v2.1.93 and newer kernels use CONFIG_PCI and new PCI BIOS
+ access functions.
+ 4. Update board serial number printing.
+ 5. Try allocating an IRQ both with and without the SA_INTERRUPT
+ flag set to allow IRQ sharing with drivers that do not set
+ the SA_INTERRUPT flag. Also display a more descriptive error
+ message if request_irq() fails.
+ 5. Update to latest Asc and Adv Libraries.
+
+ J. Known Problems or Issues
+
+ 1. Remove conditional constants (ASC_QUEUE_FLOW_CONTROL) around
+ the queue depth flow control code when mid-level SCSI changes
+ are included in Linux.
+
+ K. Credits
+
+ Nathan Hartwell <mage@cdc3.cdc.net> provided the directions and
+ basis for the Linux v1.3.X changes which were included in the
+ 1.2 release.
+
+ Thomas E Zerucha <zerucha@shell.portal.com> pointed out a bug
+ in advansys_biosparam() which was fixed in the 1.3 release.
+
+ Erik Ratcliffe <erik@caldera.com> has done testing of the
+ AdvanSys driver in the Caldera releases.
+
+ Rik van Riel <H.H.vanRiel@fys.ruu.nl> provided a patch to
+ AscWaitTixISRDone() which he found necessary to make the
+ driver work with a SCSI-1 disk.
+
+ Mark Moran <mmoran@mmoran.com> has helped test Ultra-Wide
+ support in the 3.1A driver.
+
+ L. AdvanSys Contact Information
+
+ Mail: Advanced System Products, Inc.
+ 1150 Ringwood Court
+ San Jose, CA 95131
+ Operator: 1-408-383-9400
+ FAX: 1-408-383-9612
+ Tech Support: 1-800-525-7440/1-408-467-2930
+ BBS: 1-408-383-9540 (14400,N,8,1)
+ Interactive FAX: 1-408-383-9753
+ Customer Direct Sales: 1-800-525-7443/1-408-383-5777
+ Tech Support E-Mail: support@advansys.com
+ FTP Site: ftp.advansys.com (login: anonymous)
+ Web Site: http://www.advansys.com
+
+*/
+
+
+/*
+ * --- Linux Version
+ */
+
+/* Convert Linux Version, Patch-level, Sub-level to LINUX_VERSION_CODE. */
+#define ASC_LINUX_VERSION(V, P, S) (((V) * 65536) + ((P) * 256) + (S))
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif /* LINUX_VERSION_CODE */
+
+
+/*
+ * --- Linux Include Files
+ */
+
+#include <linux/config.h>
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+#ifdef MODULE
+#include <linux/module.h>
+#endif /* MODULE */
+#endif /* version >= v1.3.0 */
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+#include <linux/proc_fs.h>
+#endif /* version >= v1.3.0 */
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(2,1,23)
+#include <linux/init.h>
+#endif /* version >= v2.1.23 */
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+#include "../block/blk.h"
+#else /* version >= v1.3.0 */
+#include <linux/blk.h>
+#include <linux/stat.h>
+#endif /* version >= v1.3.0 */
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(2,1,95)
+#include <asm/spinlock.h>
+#endif /* version >= 2.1.95 */
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include "advansys.h"
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(2,1,93)
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#endif /* CONFIG_PCI */
+#else /* version < v2.1.93 */
+/*
+ * For earlier than v2.1.93 the driver has its own PCI configuration.
+ * If PCI is not needed in a kernel before v2.1.93 this define can be
+ * turned-off to make the driver object smaller.
+ */
+#define ASC_CONFIG_PCI
+#endif /* version < v2.1.93 */
+
+/*
+ * If Linux eventually defines a DID_UNDERRUN, the constant here can be
+ * removed. The current value of zero for DID_UNDERRUN results in underrun
+ * conditions being ignored.
+ */
+#define DID_UNDERRUN 0
+
+
+/*
+ * --- Driver Options
+ */
+
+/* Enable driver assertions. */
+#define ADVANSYS_ASSERT
+
+/* Enable driver tracing. */
+/* #define ADVANSYS_DEBUG */
+
+/*
+ * Because of no /proc to display them, statistics are disabled
+ * for versions prior to v1.3.0.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+#undef ADVANSYS_STATS /* Disable statistics */
+#else /* version >= v1.3.0 */
+#define ADVANSYS_STATS /* Enable statistics. */
+#endif /* version >= v1.3.0 */
+
+
+/*
+ * --- Debugging Header
+ */
+
+#ifdef ADVANSYS_DEBUG
+#define STATIC
+#else /* ADVANSYS_DEBUG */
+#define STATIC static
+#endif /* ADVANSYS_DEBUG */
+
+
+/*
+ * --- Asc Library Constants and Macros
+ */
+
+#define ASC_LIB_VERSION_MAJOR 1
+#define ASC_LIB_VERSION_MINOR 22
+#define ASC_LIB_SERIAL_NUMBER 113
+
+typedef unsigned char uchar;
+
+#ifndef NULL
+#define NULL (0)
+#endif
+#ifndef TRUE
+#define TRUE (1)
+#endif
+#ifndef FALSE
+#define FALSE (0)
+#endif
+#define REG register
+#define rchar REG __s8
+#define rshort REG __s16
+#define rint REG __s32
+#define rlong REG __s32
+#define ruchar REG __u8
+#define rushort REG __u16
+#define ruint REG __u32
+#define rulong REG __u32
+#define NULLPTR (void *)0
+#define FNULLPTR (void *)0UL
+#define EOF (-1)
+#define EOS '\0'
+#define ERR (-1)
+#define UB_ERR (uchar)(0xFF)
+#define UW_ERR (uint)(0xFFFF)
+#define UL_ERR (ulong)(0xFFFFFFFFUL)
+#define iseven_word(val) ((((uint)val) & (uint)0x0001) == 0)
+#define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0)
+#define toeven_word(val) (((uint)val) & (uint)0xFFFE)
+#define biton(val, bits) (((uint)(val >> bits) & (uint)0x0001) != 0)
+#define bitoff(val, bits) (((uint)(val >> bits) & (uint)0x0001) == 0)
+#define lbiton(val, bits) (((ulong)(val >> bits) & (ulong)0x00000001UL) != 0)
+#define lbitoff(val, bits) (((ulong)(val >> bits) & (ulong)0x00000001UL) == 0)
+#define absh(val) ((val) < 0 ? -(val) : (val))
+#define swapbyte(ch) ((((ch) << 4) | ((ch) >> 4)))
+#ifndef GBYTE
+#define GBYTE (0x40000000UL)
+#endif
+#ifndef MBYTE
+#define MBYTE (0x100000UL)
+#endif
+#ifndef KBYTE
+#define KBYTE (0x400)
+#endif
+#define HI_BYTE(x) (*((__u8 *)(&x)+1))
+#define LO_BYTE(x) (*((__u8 *)&x))
+#define HI_WORD(x) (*((__u16 *)(&x)+1))
+#define LO_WORD(x) (*((__u16 *)&x))
+#ifndef MAKEWORD
+#define MAKEWORD(lo, hi) ((__u16) (((__u16) lo) | ((__u16) hi << 8)))
+#endif
+#ifndef MAKELONG
+#define MAKELONG(lo, hi) ((__u32) (((__u32) lo) | ((__u32) hi << 16)))
+#endif
+#define SwapWords(dWord) ((__u32) ((dWord >> 16) | (dWord << 16)))
+#define SwapBytes(word) ((__u16) ((word >> 8) | (word << 8)))
+#define BigToLittle(dWord) ((__u32) (SwapWords(MAKELONG(SwapBytes(LO_WORD(dWord)), SwapBytes(HI_WORD(dWord))))))
+#define LittleToBig(dWord) BigToLittle(dWord)
+#define AscPCIConfigVendorIDRegister 0x0000
+#define AscPCIConfigDeviceIDRegister 0x0002
+#define AscPCIConfigCommandRegister 0x0004
+#define AscPCIConfigStatusRegister 0x0006
+#define AscPCIConfigRevisionIDRegister 0x0008
+#define AscPCIConfigCacheSize 0x000C
+#define AscPCIConfigLatencyTimer 0x000D
+#define AscPCIIOBaseRegister 0x0010
+#define AscPCICmdRegBits_IOMemBusMaster 0x0007
+#define ASC_PCI_ID2BUS(id) ((id) & 0xFF)
+#define ASC_PCI_ID2DEV(id) (((id) >> 11) & 0x1F)
+#define ASC_PCI_ID2FUNC(id) (((id) >> 8) & 0x7)
+#define ASC_PCI_MKID(bus, dev, func) ((((dev) & 0x1F) << 11) | (((func) & 0x7) << 8) | ((bus) & 0xFF))
+#define ASC_PCI_VENDORID 0x10CD
+#define ASC_PCI_DEVICEID_1200A 0x1100
+#define ASC_PCI_DEVICEID_1200B 0x1200
+#define ASC_PCI_DEVICEID_ULTRA 0x1300
+#define ASC_PCI_REVISION_3150 0x02
+#define ASC_PCI_REVISION_3050 0x03
+
+#define ASC_DVCLIB_CALL_DONE (1)
+#define ASC_DVCLIB_CALL_FAILED (0)
+#define ASC_DVCLIB_CALL_ERROR (-1)
+
+#define PortAddr unsigned short /* port address size */
+#define Ptr2Func ulong
+#define inp(port) inb(port)
+#define inpw(port) inw(port)
+#define inpl(port) inl(port)
+#define outp(port, byte) outb((byte), (port))
+#define outpw(port, word) outw((word), (port))
+#define outpl(port, long) outl((long), (port))
+#define ASC_MAX_SG_QUEUE 7
+#define ASC_MAX_SG_LIST SG_ALL
+
+#define ASC_CS_TYPE unsigned short
+#ifndef asc_ptr_type
+#define asc_ptr_type
+#endif
+
+#ifndef ASC_GET_PTR2FUNC
+#define ASC_GET_PTR2FUNC(fun) (Ptr2Func)(fun)
+#endif
+#define FLIP_BYTE_NIBBLE(x) (((x<<4)& 0xFF) | (x>>4))
+#define ASC_IS_ISA (0x0001)
+#define ASC_IS_ISAPNP (0x0081)
+#define ASC_IS_EISA (0x0002)
+#define ASC_IS_PCI (0x0004)
+#define ASC_IS_PCI_ULTRA (0x0104)
+#define ASC_IS_PCMCIA (0x0008)
+#define ASC_IS_MCA (0x0020)
+#define ASC_IS_VL (0x0040)
+#define ASC_ISA_PNP_PORT_ADDR (0x279)
+#define ASC_ISA_PNP_PORT_WRITE (ASC_ISA_PNP_PORT_ADDR+0x800)
+#define ASC_IS_WIDESCSI_16 (0x0100)
+#define ASC_IS_WIDESCSI_32 (0x0200)
+#define ASC_IS_BIG_ENDIAN (0x8000)
+#define ASC_CHIP_MIN_VER_VL (0x01)
+#define ASC_CHIP_MAX_VER_VL (0x07)
+#define ASC_CHIP_MIN_VER_PCI (0x09)
+#define ASC_CHIP_MAX_VER_PCI (0x0F)
+#define ASC_CHIP_VER_PCI_BIT (0x08)
+#define ASC_CHIP_MIN_VER_ISA (0x11)
+#define ASC_CHIP_MIN_VER_ISA_PNP (0x21)
+#define ASC_CHIP_MAX_VER_ISA (0x27)
+#define ASC_CHIP_VER_ISA_BIT (0x30)
+#define ASC_CHIP_VER_ISAPNP_BIT (0x20)
+#define ASC_CHIP_VER_ASYN_BUG (0x21)
+#define ASC_CHIP_VER_PCI 0x08
+#define ASC_CHIP_VER_PCI_ULTRA_3150 (ASC_CHIP_VER_PCI | 0x02)
+#define ASC_CHIP_VER_PCI_ULTRA_3050 (ASC_CHIP_VER_PCI | 0x03)
+#define ASC_CHIP_MIN_VER_EISA (0x41)
+#define ASC_CHIP_MAX_VER_EISA (0x47)
+#define ASC_CHIP_VER_EISA_BIT (0x40)
+#define ASC_CHIP_LATEST_VER_EISA ((ASC_CHIP_MIN_VER_EISA - 1) + 3)
+#define ASC_MAX_LIB_SUPPORTED_ISA_CHIP_VER 0x21
+#define ASC_MAX_LIB_SUPPORTED_PCI_CHIP_VER 0x0A
+#define ASC_MAX_VL_DMA_ADDR (0x07FFFFFFL)
+#define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL)
+#define ASC_MAX_PCI_DMA_ADDR (0xFFFFFFFFL)
+#define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL)
+#define ASC_MAX_ISA_DMA_ADDR (0x00FFFFFFL)
+#define ASC_MAX_ISA_DMA_COUNT (0x00FFFFFFL)
+#define ASC_MAX_EISA_DMA_ADDR (0x07FFFFFFL)
+#define ASC_MAX_EISA_DMA_COUNT (0x07FFFFFFL)
+#ifndef inpw_noswap
+#define inpw_noswap(port) inpw(port)
+#endif
+#ifndef outpw_noswap
+#define outpw_noswap(port, data) outpw(port, data)
+#endif
+#define ASC_SCSI_ID_BITS 3
+#define ASC_SCSI_TIX_TYPE uchar
+#define ASC_ALL_DEVICE_BIT_SET 0xFF
+#ifdef ASC_WIDESCSI_16
+#undef ASC_SCSI_ID_BITS
+#define ASC_SCSI_ID_BITS 4
+#define ASC_ALL_DEVICE_BIT_SET 0xFFFF
+#endif
+#ifdef ASC_WIDESCSI_32
+#undef ASC_SCSI_ID_BITS
+#define ASC_SCSI_ID_BITS 5
+#define ASC_ALL_DEVICE_BIT_SET 0xFFFFFFFFL
+#endif
+#if ASC_SCSI_ID_BITS == 3
+#define ASC_SCSI_BIT_ID_TYPE uchar
+#define ASC_MAX_TID 7
+#define ASC_MAX_LUN 7
+#define ASC_SCSI_WIDTH_BIT_SET 0xFF
+#elif ASC_SCSI_ID_BITS == 4
+#define ASC_SCSI_BIT_ID_TYPE ushort
+#define ASC_MAX_TID 15
+#define ASC_MAX_LUN 7
+#define ASC_SCSI_WIDTH_BIT_SET 0xFFFF
+#elif ASC_SCSI_ID_BITS == 5
+#define ASC_SCSI_BIT_ID_TYPE ulong
+#define ASC_MAX_TID 31
+#define ASC_MAX_LUN 7
+#define ASC_SCSI_WIDTH_BIT_SET 0xFFFFFFFF
+#else
+#error ASC_SCSI_ID_BITS definition is wrong
+#endif
+#define ASC_MAX_SENSE_LEN 32
+#define ASC_MIN_SENSE_LEN 14
+#define ASC_MAX_CDB_LEN 12
+#define ASC_SCSI_RESET_HOLD_TIME_US 60
+#define SCSICMD_TestUnitReady 0x00
+#define SCSICMD_Rewind 0x01
+#define SCSICMD_Rezero 0x01
+#define SCSICMD_RequestSense 0x03
+#define SCSICMD_Format 0x04
+#define SCSICMD_FormatUnit 0x04
+#define SCSICMD_Read6 0x08
+#define SCSICMD_Write6 0x0A
+#define SCSICMD_Seek6 0x0B
+#define SCSICMD_Inquiry 0x12
+#define SCSICMD_Verify6 0x13
+#define SCSICMD_ModeSelect6 0x15
+#define SCSICMD_ModeSense6 0x1A
+#define SCSICMD_StartStopUnit 0x1B
+#define SCSICMD_LoadUnloadTape 0x1B
+#define SCSICMD_ReadCapacity 0x25
+#define SCSICMD_Read10 0x28
+#define SCSICMD_Write10 0x2A
+#define SCSICMD_Seek10 0x2B
+#define SCSICMD_Erase10 0x2C
+#define SCSICMD_WriteAndVerify10 0x2E
+#define SCSICMD_Verify10 0x2F
+#define SCSICMD_WriteBuffer 0x3B
+#define SCSICMD_ReadBuffer 0x3C
+#define SCSICMD_ReadLong 0x3E
+#define SCSICMD_WriteLong 0x3F
+#define SCSICMD_ReadTOC 0x43
+#define SCSICMD_ReadHeader 0x44
+#define SCSICMD_ModeSelect10 0x55
+#define SCSICMD_ModeSense10 0x5A
+#define SCSI_TYPE_DASD 0x00
+#define SCSI_TYPE_SASD 0x01
+#define SCSI_TYPE_PRN 0x02
+#define SCSI_TYPE_PROC 0x03
+#define SCSI_TYPE_WORM 0x04
+#define SCSI_TYPE_CDROM 0x05
+#define SCSI_TYPE_SCANNER 0x06
+#define SCSI_TYPE_OPTMEM 0x07
+#define SCSI_TYPE_MED_CHG 0x08
+#define SCSI_TYPE_COMM 0x09
+#define SCSI_TYPE_UNKNOWN 0x1F
+#define SCSI_TYPE_NO_DVC 0xFF
+#define ASC_SCSIDIR_NOCHK 0x00
+#define ASC_SCSIDIR_T2H 0x08
+#define ASC_SCSIDIR_H2T 0x10
+#define ASC_SCSIDIR_NODATA 0x18
+#define SCSI_SENKEY_NO_SENSE 0x00
+#define SCSI_SENKEY_UNDEFINED 0x01
+#define SCSI_SENKEY_NOT_READY 0x02
+#define SCSI_SENKEY_MEDIUM_ERR 0x03
+#define SCSI_SENKEY_HW_ERR 0x04
+#define SCSI_SENKEY_ILLEGAL 0x05
+#define SCSI_SENKEY_ATTENTION 0x06
+#define SCSI_SENKEY_PROTECTED 0x07
+#define SCSI_SENKEY_BLANK 0x08
+#define SCSI_SENKEY_V_UNIQUE 0x09
+#define SCSI_SENKEY_CPY_ABORT 0x0A
+#define SCSI_SENKEY_ABORT 0x0B
+#define SCSI_SENKEY_EQUAL 0x0C
+#define SCSI_SENKEY_VOL_OVERFLOW 0x0D
+#define SCSI_SENKEY_MISCOMP 0x0E
+#define SCSI_SENKEY_RESERVED 0x0F
+#define SCSI_ASC_NOMEDIA 0x3A
+#define ASC_SRB_HOST(x) ((uchar)((uchar)(x) >> 4))
+#define ASC_SRB_TID(x) ((uchar)((uchar)(x) & (uchar)0x0F))
+#define ASC_SRB_LUN(x) ((uchar)((uint)(x) >> 13))
+#define PUT_CDB1(x) ((uchar)((uint)(x) >> 8))
+#define SS_GOOD 0x00
+#define SS_CHK_CONDITION 0x02
+#define SS_CONDITION_MET 0x04
+#define SS_TARGET_BUSY 0x08
+#define SS_INTERMID 0x10
+#define SS_INTERMID_COND_MET 0x14
+#define SS_RSERV_CONFLICT 0x18
+#define SS_CMD_TERMINATED 0x22
+#define SS_QUEUE_FULL 0x28
+#define MS_CMD_DONE 0x00
+#define MS_EXTEND 0x01
+#define MS_SDTR_LEN 0x03
+#define MS_SDTR_CODE 0x01
+#define MS_WDTR_LEN 0x02
+#define MS_WDTR_CODE 0x03
+#define MS_MDP_LEN 0x05
+#define MS_MDP_CODE 0x00
+#define M1_SAVE_DATA_PTR 0x02
+#define M1_RESTORE_PTRS 0x03
+#define M1_DISCONNECT 0x04
+#define M1_INIT_DETECTED_ERR 0x05
+#define M1_ABORT 0x06
+#define M1_MSG_REJECT 0x07
+#define M1_NO_OP 0x08
+#define M1_MSG_PARITY_ERR 0x09
+#define M1_LINK_CMD_DONE 0x0A
+#define M1_LINK_CMD_DONE_WFLAG 0x0B
+#define M1_BUS_DVC_RESET 0x0C
+#define M1_ABORT_TAG 0x0D
+#define M1_CLR_QUEUE 0x0E
+#define M1_INIT_RECOVERY 0x0F
+#define M1_RELEASE_RECOVERY 0x10
+#define M1_KILL_IO_PROC 0x11
+#define M2_QTAG_MSG_SIMPLE 0x20
+#define M2_QTAG_MSG_HEAD 0x21
+#define M2_QTAG_MSG_ORDERED 0x22
+#define M2_IGNORE_WIDE_RESIDUE 0x23
+
+typedef struct {
+ uchar peri_dvc_type:5;
+ uchar peri_qualifier:3;
+} ASC_SCSI_INQ0;
+
+typedef struct {
+ uchar dvc_type_modifier:7;
+ uchar rmb:1;
+} ASC_SCSI_INQ1;
+
+typedef struct {
+ uchar ansi_apr_ver:3;
+ uchar ecma_ver:3;
+ uchar iso_ver:2;
+} ASC_SCSI_INQ2;
+
+typedef struct {
+ uchar rsp_data_fmt:4;
+ uchar res:2;
+ uchar TemIOP:1;
+ uchar aenc:1;
+} ASC_SCSI_INQ3;
+
+typedef struct {
+ uchar StfRe:1;
+ uchar CmdQue:1;
+ uchar Reserved:1;
+ uchar Linked:1;
+ uchar Sync:1;
+ uchar WBus16:1;
+ uchar WBus32:1;
+ uchar RelAdr:1;
+} ASC_SCSI_INQ7;
+
+typedef struct {
+ ASC_SCSI_INQ0 byte0;
+ ASC_SCSI_INQ1 byte1;
+ ASC_SCSI_INQ2 byte2;
+ ASC_SCSI_INQ3 byte3;
+ uchar add_len;
+ uchar res1;
+ uchar res2;
+ ASC_SCSI_INQ7 byte7;
+ uchar vendor_id[8];
+ uchar product_id[16];
+ uchar product_rev_level[4];
+} ASC_SCSI_INQUIRY;
+
+typedef struct asc_req_sense {
+ uchar err_code:7;
+ uchar info_valid:1;
+ uchar segment_no;
+ uchar sense_key:4;
+ uchar reserved_bit:1;
+ uchar sense_ILI:1;
+ uchar sense_EOM:1;
+ uchar file_mark:1;
+ uchar info1[4];
+ uchar add_sense_len;
+ uchar cmd_sp_info[4];
+ uchar asc;
+ uchar ascq;
+ uchar fruc;
+ uchar sks_byte0:7;
+ uchar sks_valid:1;
+ uchar sks_bytes[2];
+ uchar notused[2];
+ uchar ex_sense_code;
+ uchar info2[4];
+} ASC_REQ_SENSE;
+
+#define ASC_SG_LIST_PER_Q 7
+#define QS_FREE 0x00
+#define QS_READY 0x01
+#define QS_DISC1 0x02
+#define QS_DISC2 0x04
+#define QS_BUSY 0x08
+#define QS_ABORTED 0x40
+#define QS_DONE 0x80
+#define QC_NO_CALLBACK 0x01
+#define QC_SG_SWAP_QUEUE 0x02
+#define QC_SG_HEAD 0x04
+#define QC_DATA_IN 0x08
+#define QC_DATA_OUT 0x10
+#define QC_URGENT 0x20
+#define QC_MSG_OUT 0x40
+#define QC_REQ_SENSE 0x80
+#define QCSG_SG_XFER_LIST 0x02
+#define QCSG_SG_XFER_MORE 0x04
+#define QCSG_SG_XFER_END 0x08
+#define QD_IN_PROGRESS 0x00
+#define QD_NO_ERROR 0x01
+#define QD_ABORTED_BY_HOST 0x02
+#define QD_WITH_ERROR 0x04
+#define QD_INVALID_REQUEST 0x80
+#define QD_INVALID_HOST_NUM 0x81
+#define QD_INVALID_DEVICE 0x82
+#define QD_ERR_INTERNAL 0xFF
+#define QHSTA_NO_ERROR 0x00
+#define QHSTA_M_SEL_TIMEOUT 0x11
+#define QHSTA_M_DATA_OVER_RUN 0x12
+#define QHSTA_M_DATA_UNDER_RUN 0x12
+#define QHSTA_M_UNEXPECTED_BUS_FREE 0x13
+#define QHSTA_M_BAD_BUS_PHASE_SEQ 0x14
+#define QHSTA_D_QDONE_SG_LIST_CORRUPTED 0x21
+#define QHSTA_D_ASC_DVC_ERROR_CODE_SET 0x22
+#define QHSTA_D_HOST_ABORT_FAILED 0x23
+#define QHSTA_D_EXE_SCSI_Q_FAILED 0x24
+#define QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT 0x25
+#define QHSTA_D_ASPI_NO_BUF_POOL 0x26
+#define QHSTA_M_WTM_TIMEOUT 0x41
+#define QHSTA_M_BAD_CMPL_STATUS_IN 0x42
+#define QHSTA_M_NO_AUTO_REQ_SENSE 0x43
+#define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44
+#define QHSTA_M_TARGET_STATUS_BUSY 0x45
+#define QHSTA_M_BAD_TAG_CODE 0x46
+#define QHSTA_M_BAD_QUEUE_FULL_OR_BUSY 0x47
+#define QHSTA_M_HUNG_REQ_SCSI_BUS_RESET 0x48
+#define QHSTA_D_LRAM_CMP_ERROR 0x81
+#define QHSTA_M_MICRO_CODE_ERROR_HALT 0xA1
+#define ASC_FLAG_SCSIQ_REQ 0x01
+#define ASC_FLAG_BIOS_SCSIQ_REQ 0x02
+#define ASC_FLAG_BIOS_ASYNC_IO 0x04
+#define ASC_FLAG_SRB_LINEAR_ADDR 0x08
+#define ASC_FLAG_WIN16 0x10
+#define ASC_FLAG_WIN32 0x20
+#define ASC_FLAG_ISA_OVER_16MB 0x40
+#define ASC_FLAG_DOS_VM_CALLBACK 0x80
+#define ASC_TAG_FLAG_EXTRA_BYTES 0x10
+#define ASC_TAG_FLAG_DISABLE_DISCONNECT 0x04
+#define ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX 0x08
+#define ASC_TAG_FLAG_DISABLE_CHK_COND_INT_HOST 0x40
+#define ASC_SCSIQ_CPY_BEG 4
+#define ASC_SCSIQ_SGHD_CPY_BEG 2
+#define ASC_SCSIQ_B_FWD 0
+#define ASC_SCSIQ_B_BWD 1
+#define ASC_SCSIQ_B_STATUS 2
+#define ASC_SCSIQ_B_QNO 3
+#define ASC_SCSIQ_B_CNTL 4
+#define ASC_SCSIQ_B_SG_QUEUE_CNT 5
+#define ASC_SCSIQ_D_DATA_ADDR 8
+#define ASC_SCSIQ_D_DATA_CNT 12
+#define ASC_SCSIQ_B_SENSE_LEN 20
+#define ASC_SCSIQ_DONE_INFO_BEG 22
+#define ASC_SCSIQ_D_SRBPTR 22
+#define ASC_SCSIQ_B_TARGET_IX 26
+#define ASC_SCSIQ_B_CDB_LEN 28
+#define ASC_SCSIQ_B_TAG_CODE 29
+#define ASC_SCSIQ_W_VM_ID 30
+#define ASC_SCSIQ_DONE_STATUS 32
+#define ASC_SCSIQ_HOST_STATUS 33
+#define ASC_SCSIQ_SCSI_STATUS 34
+#define ASC_SCSIQ_CDB_BEG 36
+#define ASC_SCSIQ_DW_REMAIN_XFER_ADDR 56
+#define ASC_SCSIQ_DW_REMAIN_XFER_CNT 60
+#define ASC_SCSIQ_B_SG_WK_QP 49
+#define ASC_SCSIQ_B_SG_WK_IX 50
+#define ASC_SCSIQ_W_REQ_COUNT 52
+#define ASC_SCSIQ_B_LIST_CNT 6
+#define ASC_SCSIQ_B_CUR_LIST_CNT 7
+#define ASC_SGQ_B_SG_CNTL 4
+#define ASC_SGQ_B_SG_HEAD_QP 5
+#define ASC_SGQ_B_SG_LIST_CNT 6
+#define ASC_SGQ_B_SG_CUR_LIST_CNT 7
+#define ASC_SGQ_LIST_BEG 8
+#define ASC_DEF_SCSI1_QNG 4
+#define ASC_MAX_SCSI1_QNG 4
+#define ASC_DEF_SCSI2_QNG 16
+#define ASC_MAX_SCSI2_QNG 32
+#define ASC_TAG_CODE_MASK 0x23
+#define ASC_STOP_REQ_RISC_STOP 0x01
+#define ASC_STOP_ACK_RISC_STOP 0x03
+#define ASC_STOP_CLEAN_UP_BUSY_Q 0x10
+#define ASC_STOP_CLEAN_UP_DISC_Q 0x20
+#define ASC_STOP_HOST_REQ_RISC_HALT 0x40
+#define ASC_TIDLUN_TO_IX(tid, lun) (ASC_SCSI_TIX_TYPE)((tid) + ((lun)<<ASC_SCSI_ID_BITS))
+#define ASC_TID_TO_TARGET_ID(tid) (ASC_SCSI_BIT_ID_TYPE)(0x01 << (tid))
+#define ASC_TIX_TO_TARGET_ID(tix) (0x01 << ((tix) & ASC_MAX_TID))
+#define ASC_TIX_TO_TID(tix) ((tix) & ASC_MAX_TID)
+#define ASC_TID_TO_TIX(tid) ((tid) & ASC_MAX_TID)
+#define ASC_TIX_TO_LUN(tix) (((tix) >> ASC_SCSI_ID_BITS) & ASC_MAX_LUN)
+#define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6))
+
+typedef struct asc_scisq_1 {
+ uchar status;
+ uchar q_no;
+ uchar cntl;
+ uchar sg_queue_cnt;
+ uchar target_id;
+ uchar target_lun;
+ ulong data_addr;
+ ulong data_cnt;
+ ulong sense_addr;
+ uchar sense_len;
+ uchar extra_bytes;
+} ASC_SCSIQ_1;
+
+typedef struct asc_scisq_2 {
+ ulong srb_ptr;
+ uchar target_ix;
+ uchar flag;
+ uchar cdb_len;
+ uchar tag_code;
+ ushort vm_id;
+} ASC_SCSIQ_2;
+
+typedef struct asc_scsiq_3 {
+ uchar done_stat;
+ uchar host_stat;
+ uchar scsi_stat;
+ uchar scsi_msg;
+} ASC_SCSIQ_3;
+
+typedef struct asc_scsiq_4 {
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar y_first_sg_list_qp;
+ uchar y_working_sg_qp;
+ uchar y_working_sg_ix;
+ uchar y_res;
+ ushort x_req_count;
+ ushort x_reconnect_rtn;
+ ulong x_saved_data_addr;
+ ulong x_saved_data_cnt;
+} ASC_SCSIQ_4;
+
+typedef struct asc_q_done_info {
+ ASC_SCSIQ_2 d2;
+ ASC_SCSIQ_3 d3;
+ uchar q_status;
+ uchar q_no;
+ uchar cntl;
+ uchar sense_len;
+ uchar extra_bytes;
+ uchar res;
+ ulong remain_bytes;
+} ASC_QDONE_INFO;
+
+typedef struct asc_sg_list {
+ ulong addr;
+ ulong bytes;
+} ASC_SG_LIST;
+
+typedef struct asc_sg_head {
+ ushort entry_cnt;
+ ushort queue_cnt;
+ ushort entry_to_copy;
+ ushort res;
+ ASC_SG_LIST sg_list[ASC_MAX_SG_LIST];
+} ASC_SG_HEAD;
+
+#define ASC_MIN_SG_LIST 2
+
+typedef struct asc_min_sg_head {
+ ushort entry_cnt;
+ ushort queue_cnt;
+ ushort entry_to_copy;
+ ushort res;
+ ASC_SG_LIST sg_list[ASC_MIN_SG_LIST];
+} ASC_MIN_SG_HEAD;
+
+#define QCX_SORT (0x0001)
+#define QCX_COALEASE (0x0002)
+
+typedef struct asc_scsi_q {
+ ASC_SCSIQ_1 q1;
+ ASC_SCSIQ_2 q2;
+ uchar *cdbptr;
+ ASC_SG_HEAD *sg_head;
+} ASC_SCSI_Q;
+
+typedef struct asc_scsi_req_q {
+ ASC_SCSIQ_1 r1;
+ ASC_SCSIQ_2 r2;
+ uchar *cdbptr;
+ ASC_SG_HEAD *sg_head;
+ uchar *sense_ptr;
+ ASC_SCSIQ_3 r3;
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar sense[ASC_MIN_SENSE_LEN];
+} ASC_SCSI_REQ_Q;
+
+typedef struct asc_scsi_bios_req_q {
+ ASC_SCSIQ_1 r1;
+ ASC_SCSIQ_2 r2;
+ uchar *cdbptr;
+ ASC_SG_HEAD *sg_head;
+ uchar *sense_ptr;
+ ASC_SCSIQ_3 r3;
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar sense[ASC_MIN_SENSE_LEN];
+} ASC_SCSI_BIOS_REQ_Q;
+
+typedef struct asc_risc_q {
+ uchar fwd;
+ uchar bwd;
+ ASC_SCSIQ_1 i1;
+ ASC_SCSIQ_2 i2;
+ ASC_SCSIQ_3 i3;
+ ASC_SCSIQ_4 i4;
+} ASC_RISC_Q;
+
+typedef struct asc_sg_list_q {
+ uchar seq_no;
+ uchar q_no;
+ uchar cntl;
+ uchar sg_head_qp;
+ uchar sg_list_cnt;
+ uchar sg_cur_list_cnt;
+} ASC_SG_LIST_Q;
+
+typedef struct asc_risc_sg_list_q {
+ uchar fwd;
+ uchar bwd;
+ ASC_SG_LIST_Q sg;
+ ASC_SG_LIST sg_list[7];
+} ASC_RISC_SG_LIST_Q;
+
+#define ASC_EXE_SCSI_IO_MAX_IDLE_LOOP 0x1000000UL
+#define ASC_EXE_SCSI_IO_MAX_WAIT_LOOP 1024
+#define ASCQ_ERR_NO_ERROR 0
+#define ASCQ_ERR_IO_NOT_FOUND 1
+#define ASCQ_ERR_LOCAL_MEM 2
+#define ASCQ_ERR_CHKSUM 3
+#define ASCQ_ERR_START_CHIP 4
+#define ASCQ_ERR_INT_TARGET_ID 5
+#define ASCQ_ERR_INT_LOCAL_MEM 6
+#define ASCQ_ERR_HALT_RISC 7
+#define ASCQ_ERR_GET_ASPI_ENTRY 8
+#define ASCQ_ERR_CLOSE_ASPI 9
+#define ASCQ_ERR_HOST_INQUIRY 0x0A
+#define ASCQ_ERR_SAVED_SRB_BAD 0x0B
+#define ASCQ_ERR_QCNTL_SG_LIST 0x0C
+#define ASCQ_ERR_Q_STATUS 0x0D
+#define ASCQ_ERR_WR_SCSIQ 0x0E
+#define ASCQ_ERR_PC_ADDR 0x0F
+#define ASCQ_ERR_SYN_OFFSET 0x10
+#define ASCQ_ERR_SYN_XFER_TIME 0x11
+#define ASCQ_ERR_LOCK_DMA 0x12
+#define ASCQ_ERR_UNLOCK_DMA 0x13
+#define ASCQ_ERR_VDS_CHK_INSTALL 0x14
+#define ASCQ_ERR_MICRO_CODE_HALT 0x15
+#define ASCQ_ERR_SET_LRAM_ADDR 0x16
+#define ASCQ_ERR_CUR_QNG 0x17
+#define ASCQ_ERR_SG_Q_LINKS 0x18
+#define ASCQ_ERR_SCSIQ_PTR 0x19
+#define ASCQ_ERR_ISR_RE_ENTRY 0x1A
+#define ASCQ_ERR_CRITICAL_RE_ENTRY 0x1B
+#define ASCQ_ERR_ISR_ON_CRITICAL 0x1C
+#define ASCQ_ERR_SG_LIST_ODD_ADDRESS 0x1D
+#define ASCQ_ERR_XFER_ADDRESS_TOO_BIG 0x1E
+#define ASCQ_ERR_SCSIQ_NULL_PTR 0x1F
+#define ASCQ_ERR_SCSIQ_BAD_NEXT_PTR 0x20
+#define ASCQ_ERR_GET_NUM_OF_FREE_Q 0x21
+#define ASCQ_ERR_SEND_SCSI_Q 0x22
+#define ASCQ_ERR_HOST_REQ_RISC_HALT 0x23
+#define ASCQ_ERR_RESET_SDTR 0x24
+#define ASC_WARN_NO_ERROR 0x0000
+#define ASC_WARN_IO_PORT_ROTATE 0x0001
+#define ASC_WARN_EEPROM_CHKSUM 0x0002
+#define ASC_WARN_IRQ_MODIFIED 0x0004
+#define ASC_WARN_AUTO_CONFIG 0x0008
+#define ASC_WARN_CMD_QNG_CONFLICT 0x0010
+#define ASC_WARN_EEPROM_RECOVER 0x0020
+#define ASC_WARN_CFG_MSW_RECOVER 0x0040
+#define ASC_WARN_SET_PCI_CONFIG_SPACE 0x0080
+#define ASC_IERR_WRITE_EEPROM 0x0001
+#define ASC_IERR_MCODE_CHKSUM 0x0002
+#define ASC_IERR_SET_PC_ADDR 0x0004
+#define ASC_IERR_START_STOP_CHIP 0x0008
+#define ASC_IERR_IRQ_NO 0x0010
+#define ASC_IERR_SET_IRQ_NO 0x0020
+#define ASC_IERR_CHIP_VERSION 0x0040
+#define ASC_IERR_SET_SCSI_ID 0x0080
+#define ASC_IERR_GET_PHY_ADDR 0x0100
+#define ASC_IERR_BAD_SIGNATURE 0x0200
+#define ASC_IERR_NO_BUS_TYPE 0x0400
+#define ASC_IERR_SCAM 0x0800
+#define ASC_IERR_SET_SDTR 0x1000
+#define ASC_IERR_RW_LRAM 0x8000
+#define ASC_DEF_IRQ_NO 10
+#define ASC_MAX_IRQ_NO 15
+#define ASC_MIN_IRQ_NO 10
+#define ASC_MIN_REMAIN_Q (0x02)
+#define ASC_DEF_MAX_TOTAL_QNG (0xF0)
+#define ASC_MIN_TAG_Q_PER_DVC (0x04)
+#define ASC_DEF_TAG_Q_PER_DVC (0x04)
+#define ASC_MIN_FREE_Q ASC_MIN_REMAIN_Q
+#define ASC_MIN_TOTAL_QNG ((ASC_MAX_SG_QUEUE)+(ASC_MIN_FREE_Q))
+#define ASC_MAX_TOTAL_QNG 240
+#define ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG 16
+#define ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG 8
+#define ASC_MAX_PCI_INRAM_TOTAL_QNG 20
+#define ASC_MAX_INRAM_TAG_QNG 16
+#define ASC_IOADR_TABLE_MAX_IX 11
+#define ASC_IOADR_GAP 0x10
+#define ASC_SEARCH_IOP_GAP 0x10
+#define ASC_MIN_IOP_ADDR (PortAddr)0x0100
+#define ASC_MAX_IOP_ADDR (PortAddr)0x3F0
+#define ASC_IOADR_1 (PortAddr)0x0110
+#define ASC_IOADR_2 (PortAddr)0x0130
+#define ASC_IOADR_3 (PortAddr)0x0150
+#define ASC_IOADR_4 (PortAddr)0x0190
+#define ASC_IOADR_5 (PortAddr)0x0210
+#define ASC_IOADR_6 (PortAddr)0x0230
+#define ASC_IOADR_7 (PortAddr)0x0250
+#define ASC_IOADR_8 (PortAddr)0x0330
+#define ASC_IOADR_DEF ASC_IOADR_8
+#define ASC_LIB_SCSIQ_WK_SP 256
+#define ASC_MAX_SYN_XFER_NO 16
+#define ASC_SYN_MAX_OFFSET 0x0F
+#define ASC_DEF_SDTR_OFFSET 0x0F
+#define ASC_DEF_SDTR_INDEX 0x00
+#define ASC_SDTR_ULTRA_PCI_10MB_INDEX 0x02
+#define SYN_XFER_NS_0 25
+#define SYN_XFER_NS_1 30
+#define SYN_XFER_NS_2 35
+#define SYN_XFER_NS_3 40
+#define SYN_XFER_NS_4 50
+#define SYN_XFER_NS_5 60
+#define SYN_XFER_NS_6 70
+#define SYN_XFER_NS_7 85
+#define SYN_ULTRA_XFER_NS_0 12
+#define SYN_ULTRA_XFER_NS_1 19
+#define SYN_ULTRA_XFER_NS_2 25
+#define SYN_ULTRA_XFER_NS_3 32
+#define SYN_ULTRA_XFER_NS_4 38
+#define SYN_ULTRA_XFER_NS_5 44
+#define SYN_ULTRA_XFER_NS_6 50
+#define SYN_ULTRA_XFER_NS_7 57
+#define SYN_ULTRA_XFER_NS_8 63
+#define SYN_ULTRA_XFER_NS_9 69
+#define SYN_ULTRA_XFER_NS_10 75
+#define SYN_ULTRA_XFER_NS_11 82
+#define SYN_ULTRA_XFER_NS_12 88
+#define SYN_ULTRA_XFER_NS_13 94
+#define SYN_ULTRA_XFER_NS_14 100
+#define SYN_ULTRA_XFER_NS_15 107
+
+typedef struct ext_msg {
+ uchar msg_type;
+ uchar msg_len;
+ uchar msg_req;
+ union {
+ struct {
+ uchar sdtr_xfer_period;
+ uchar sdtr_req_ack_offset;
+ } sdtr;
+ struct {
+ uchar wdtr_width;
+ } wdtr;
+ struct {
+ uchar mdp_b3;
+ uchar mdp_b2;
+ uchar mdp_b1;
+ uchar mdp_b0;
+ } mdp;
+ } u_ext_msg;
+ uchar res;
+} EXT_MSG;
+
+#define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
+#define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
+#define wdtr_width u_ext_msg.wdtr.wdtr_width
+#define mdp_b3 u_ext_msg.mdp_b3
+#define mdp_b2 u_ext_msg.mdp_b2
+#define mdp_b1 u_ext_msg.mdp_b1
+#define mdp_b0 u_ext_msg.mdp_b0
+
+typedef struct asc_dvc_cfg {
+ ASC_SCSI_BIT_ID_TYPE can_tagged_qng;
+ ASC_SCSI_BIT_ID_TYPE cmd_qng_enabled;
+ ASC_SCSI_BIT_ID_TYPE disc_enable;
+ ASC_SCSI_BIT_ID_TYPE sdtr_enable;
+ uchar chip_scsi_id:4;
+ uchar isa_dma_speed:4;
+ uchar isa_dma_channel;
+ uchar chip_version;
+ ushort pci_device_id;
+ ushort lib_serial_no;
+ ushort lib_version;
+ ushort mcode_date;
+ ushort mcode_version;
+ uchar max_tag_qng[ASC_MAX_TID + 1];
+ uchar *overrun_buf;
+ uchar sdtr_period_offset[ASC_MAX_TID + 1];
+ ushort pci_slot_info;
+ uchar adapter_info[6];
+} ASC_DVC_CFG;
+
+#define ASC_DEF_DVC_CNTL 0xFFFF
+#define ASC_DEF_CHIP_SCSI_ID 7
+#define ASC_DEF_ISA_DMA_SPEED 4
+#define ASC_INIT_STATE_NULL 0x0000
+#define ASC_INIT_STATE_BEG_GET_CFG 0x0001
+#define ASC_INIT_STATE_END_GET_CFG 0x0002
+#define ASC_INIT_STATE_BEG_SET_CFG 0x0004
+#define ASC_INIT_STATE_END_SET_CFG 0x0008
+#define ASC_INIT_STATE_BEG_LOAD_MC 0x0010
+#define ASC_INIT_STATE_END_LOAD_MC 0x0020
+#define ASC_INIT_STATE_BEG_INQUIRY 0x0040
+#define ASC_INIT_STATE_END_INQUIRY 0x0080
+#define ASC_INIT_RESET_SCSI_DONE 0x0100
+#define ASC_INIT_STATE_WITHOUT_EEP 0x8000
+#define ASC_PCI_DEVICE_ID_REV_A 0x1100
+#define ASC_PCI_DEVICE_ID_REV_B 0x1200
+#define ASC_BUG_FIX_IF_NOT_DWB 0x0001
+#define ASC_BUG_FIX_ASYN_USE_SYN 0x0002
+#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
+#define ASC_MIN_TAGGED_CMD 7
+#define ASC_MAX_SCSI_RESET_WAIT 30
+
+typedef struct asc_dvc_var {
+ PortAddr iop_base;
+ ushort err_code;
+ ushort dvc_cntl;
+ ushort bug_fix_cntl;
+ ushort bus_type;
+ Ptr2Func isr_callback;
+ Ptr2Func exe_callback;
+ ASC_SCSI_BIT_ID_TYPE init_sdtr;
+ ASC_SCSI_BIT_ID_TYPE sdtr_done;
+ ASC_SCSI_BIT_ID_TYPE use_tagged_qng;
+ ASC_SCSI_BIT_ID_TYPE unit_not_ready;
+ ASC_SCSI_BIT_ID_TYPE queue_full_or_busy;
+ ASC_SCSI_BIT_ID_TYPE start_motor;
+ uchar scsi_reset_wait;
+ uchar chip_no;
+ char is_in_int;
+ uchar max_total_qng;
+ uchar cur_total_qng;
+ uchar in_critical_cnt;
+ uchar irq_no;
+ uchar last_q_shortage;
+ ushort init_state;
+ uchar cur_dvc_qng[ASC_MAX_TID + 1];
+ uchar max_dvc_qng[ASC_MAX_TID + 1];
+ ASC_SCSI_Q *scsiq_busy_head[ASC_MAX_TID + 1];
+ ASC_SCSI_Q *scsiq_busy_tail[ASC_MAX_TID + 1];
+ uchar sdtr_period_tbl[ASC_MAX_SYN_XFER_NO];
+ ASC_DVC_CFG *cfg;
+ Ptr2Func saved_ptr2func;
+ ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer_always;
+ char redo_scam;
+ ushort res2;
+ uchar dos_int13_table[ASC_MAX_TID + 1];
+ ulong max_dma_count;
+ ASC_SCSI_BIT_ID_TYPE no_scam;
+ ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer;
+ uchar max_sdtr_index;
+ uchar host_init_sdtr_index;
+ ulong drv_ptr;
+ ulong uc_break;
+ ulong res7;
+ ulong res8;
+} ASC_DVC_VAR;
+
+typedef int (* ASC_ISR_CALLBACK) (ASC_DVC_VAR asc_ptr_type *, ASC_QDONE_INFO *);
+typedef int (* ASC_EXE_CALLBACK) (ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_Q *);
+
+typedef struct asc_dvc_inq_info {
+ uchar type[ASC_MAX_TID + 1][ASC_MAX_LUN + 1];
+} ASC_DVC_INQ_INFO;
+
+typedef struct asc_cap_info {
+ ulong lba;
+ ulong blk_size;
+} ASC_CAP_INFO;
+
+typedef struct asc_cap_info_array {
+ ASC_CAP_INFO cap_info[ASC_MAX_TID + 1][ASC_MAX_LUN + 1];
+} ASC_CAP_INFO_ARRAY;
+
+#define ASC_MCNTL_NO_SEL_TIMEOUT (ushort)0x0001
+#define ASC_MCNTL_NULL_TARGET (ushort)0x0002
+#define ASC_CNTL_INITIATOR (ushort)0x0001
+#define ASC_CNTL_BIOS_GT_1GB (ushort)0x0002
+#define ASC_CNTL_BIOS_GT_2_DISK (ushort)0x0004
+#define ASC_CNTL_BIOS_REMOVABLE (ushort)0x0008
+#define ASC_CNTL_NO_SCAM (ushort)0x0010
+#define ASC_CNTL_INT_MULTI_Q (ushort)0x0080
+#define ASC_CNTL_NO_LUN_SUPPORT (ushort)0x0040
+#define ASC_CNTL_NO_VERIFY_COPY (ushort)0x0100
+#define ASC_CNTL_RESET_SCSI (ushort)0x0200
+#define ASC_CNTL_INIT_INQUIRY (ushort)0x0400
+#define ASC_CNTL_INIT_VERBOSE (ushort)0x0800
+#define ASC_CNTL_SCSI_PARITY (ushort)0x1000
+#define ASC_CNTL_BURST_MODE (ushort)0x2000
+#define ASC_CNTL_SDTR_ENABLE_ULTRA (ushort)0x4000
+#define ASC_EEP_DVC_CFG_BEG_VL 2
+#define ASC_EEP_MAX_DVC_ADDR_VL 15
+#define ASC_EEP_DVC_CFG_BEG 32
+#define ASC_EEP_MAX_DVC_ADDR 45
+#define ASC_EEP_DEFINED_WORDS 10
+#define ASC_EEP_MAX_ADDR 63
+#define ASC_EEP_RES_WORDS 0
+#define ASC_EEP_MAX_RETRY 20
+#define ASC_MAX_INIT_BUSY_RETRY 8
+#define ASC_EEP_ISA_PNP_WSIZE 16
+
+typedef struct asceep_config {
+ ushort cfg_lsw;
+ ushort cfg_msw;
+ uchar init_sdtr;
+ uchar disc_enable;
+ uchar use_cmd_qng;
+ uchar start_motor;
+ uchar max_total_qng;
+ uchar max_tag_qng;
+ uchar bios_scan;
+ uchar power_up_wait;
+ uchar no_scam;
+ uchar chip_scsi_id:4;
+ uchar isa_dma_speed:4;
+ uchar dos_int13_table[ASC_MAX_TID + 1];
+ uchar adapter_info[6];
+ ushort cntl;
+ ushort chksum;
+} ASCEEP_CONFIG;
+
+#define ASC_PCI_CFG_LSW_SCSI_PARITY 0x0800
+#define ASC_PCI_CFG_LSW_BURST_MODE 0x0080
+#define ASC_PCI_CFG_LSW_INTR_ABLE 0x0020
+
+#define ASC_EEP_CMD_READ 0x80
+#define ASC_EEP_CMD_WRITE 0x40
+#define ASC_EEP_CMD_WRITE_ABLE 0x30
+#define ASC_EEP_CMD_WRITE_DISABLE 0x00
+#define ASC_OVERRUN_BSIZE 0x00000048UL
+#define ASC_CTRL_BREAK_ONCE 0x0001
+#define ASC_CTRL_BREAK_STAY_IDLE 0x0002
+#define ASCV_MSGOUT_BEG 0x0000
+#define ASCV_MSGOUT_SDTR_PERIOD (ASCV_MSGOUT_BEG+3)
+#define ASCV_MSGOUT_SDTR_OFFSET (ASCV_MSGOUT_BEG+4)
+#define ASCV_BREAK_SAVED_CODE (ushort)0x0006
+#define ASCV_MSGIN_BEG (ASCV_MSGOUT_BEG+8)
+#define ASCV_MSGIN_SDTR_PERIOD (ASCV_MSGIN_BEG+3)
+#define ASCV_MSGIN_SDTR_OFFSET (ASCV_MSGIN_BEG+4)
+#define ASCV_SDTR_DATA_BEG (ASCV_MSGIN_BEG+8)
+#define ASCV_SDTR_DONE_BEG (ASCV_SDTR_DATA_BEG+8)
+#define ASCV_MAX_DVC_QNG_BEG (ushort)0x0020
+#define ASCV_BREAK_ADDR (ushort)0x0028
+#define ASCV_BREAK_NOTIFY_COUNT (ushort)0x002A
+#define ASCV_BREAK_CONTROL (ushort)0x002C
+#define ASCV_BREAK_HIT_COUNT (ushort)0x002E
+
+#define ASCV_ASCDVC_ERR_CODE_W (ushort)0x0030
+#define ASCV_MCODE_CHKSUM_W (ushort)0x0032
+#define ASCV_MCODE_SIZE_W (ushort)0x0034
+#define ASCV_STOP_CODE_B (ushort)0x0036
+#define ASCV_DVC_ERR_CODE_B (ushort)0x0037
+#define ASCV_OVERRUN_PADDR_D (ushort)0x0038
+#define ASCV_OVERRUN_BSIZE_D (ushort)0x003C
+#define ASCV_HALTCODE_W (ushort)0x0040
+#define ASCV_CHKSUM_W (ushort)0x0042
+#define ASCV_MC_DATE_W (ushort)0x0044
+#define ASCV_MC_VER_W (ushort)0x0046
+#define ASCV_NEXTRDY_B (ushort)0x0048
+#define ASCV_DONENEXT_B (ushort)0x0049
+#define ASCV_USE_TAGGED_QNG_B (ushort)0x004A
+#define ASCV_SCSIBUSY_B (ushort)0x004B
+#define ASCV_Q_DONE_IN_PROGRESS_B (ushort)0x004C
+#define ASCV_CURCDB_B (ushort)0x004D
+#define ASCV_RCLUN_B (ushort)0x004E
+#define ASCV_BUSY_QHEAD_B (ushort)0x004F
+#define ASCV_DISC1_QHEAD_B (ushort)0x0050
+#define ASCV_DISC_ENABLE_B (ushort)0x0052
+#define ASCV_CAN_TAGGED_QNG_B (ushort)0x0053
+#define ASCV_HOSTSCSI_ID_B (ushort)0x0055
+#define ASCV_MCODE_CNTL_B (ushort)0x0056
+#define ASCV_NULL_TARGET_B (ushort)0x0057
+#define ASCV_FREE_Q_HEAD_W (ushort)0x0058
+#define ASCV_DONE_Q_TAIL_W (ushort)0x005A
+#define ASCV_FREE_Q_HEAD_B (ushort)(ASCV_FREE_Q_HEAD_W+1)
+#define ASCV_DONE_Q_TAIL_B (ushort)(ASCV_DONE_Q_TAIL_W+1)
+#define ASCV_HOST_FLAG_B (ushort)0x005D
+#define ASCV_TOTAL_READY_Q_B (ushort)0x0064
+#define ASCV_VER_SERIAL_B (ushort)0x0065
+#define ASCV_HALTCODE_SAVED_W (ushort)0x0066
+#define ASCV_WTM_FLAG_B (ushort)0x0068
+#define ASCV_RISC_FLAG_B (ushort)0x006A
+#define ASCV_REQ_SG_LIST_QP (ushort)0x006B
+#define ASC_HOST_FLAG_IN_ISR 0x01
+#define ASC_HOST_FLAG_ACK_INT 0x02
+#define ASC_RISC_FLAG_GEN_INT 0x01
+#define ASC_RISC_FLAG_REQ_SG_LIST 0x02
+#define IOP_CTRL (0x0F)
+#define IOP_STATUS (0x0E)
+#define IOP_INT_ACK IOP_STATUS
+#define IOP_REG_IFC (0x0D)
+#define IOP_SYN_OFFSET (0x0B)
+#define IOP_EXTRA_CONTROL (0x0D)
+#define IOP_REG_PC (0x0C)
+#define IOP_RAM_ADDR (0x0A)
+#define IOP_RAM_DATA (0x08)
+#define IOP_EEP_DATA (0x06)
+#define IOP_EEP_CMD (0x07)
+#define IOP_VERSION (0x03)
+#define IOP_CONFIG_HIGH (0x04)
+#define IOP_CONFIG_LOW (0x02)
+#define IOP_SIG_BYTE (0x01)
+#define IOP_SIG_WORD (0x00)
+#define IOP_REG_DC1 (0x0E)
+#define IOP_REG_DC0 (0x0C)
+#define IOP_REG_SB (0x0B)
+#define IOP_REG_DA1 (0x0A)
+#define IOP_REG_DA0 (0x08)
+#define IOP_REG_SC (0x09)
+#define IOP_DMA_SPEED (0x07)
+#define IOP_REG_FLAG (0x07)
+#define IOP_FIFO_H (0x06)
+#define IOP_FIFO_L (0x04)
+#define IOP_REG_ID (0x05)
+#define IOP_REG_QP (0x03)
+#define IOP_REG_IH (0x02)
+#define IOP_REG_IX (0x01)
+#define IOP_REG_AX (0x00)
+#define IFC_REG_LOCK (0x00)
+#define IFC_REG_UNLOCK (0x09)
+#define IFC_WR_EN_FILTER (0x10)
+#define IFC_RD_NO_EEPROM (0x10)
+#define IFC_SLEW_RATE (0x20)
+#define IFC_ACT_NEG (0x40)
+#define IFC_INP_FILTER (0x80)
+#define IFC_INIT_DEFAULT (IFC_ACT_NEG | IFC_REG_UNLOCK)
+#define SC_SEL (uchar)(0x80)
+#define SC_BSY (uchar)(0x40)
+#define SC_ACK (uchar)(0x20)
+#define SC_REQ (uchar)(0x10)
+#define SC_ATN (uchar)(0x08)
+#define SC_IO (uchar)(0x04)
+#define SC_CD (uchar)(0x02)
+#define SC_MSG (uchar)(0x01)
+#define SEC_SCSI_CTL (uchar)(0x80)
+#define SEC_ACTIVE_NEGATE (uchar)(0x40)
+#define SEC_SLEW_RATE (uchar)(0x20)
+#define SEC_ENABLE_FILTER (uchar)(0x10)
+#define ASC_HALT_EXTMSG_IN (ushort)0x8000
+#define ASC_HALT_CHK_CONDITION (ushort)0x8100
+#define ASC_HALT_SS_QUEUE_FULL (ushort)0x8200
+#define ASC_HALT_DISABLE_ASYN_USE_SYN_FIX (ushort)0x8300
+#define ASC_HALT_ENABLE_ASYN_USE_SYN_FIX (ushort)0x8400
+#define ASC_HALT_SDTR_REJECTED (ushort)0x4000
+#define ASC_MAX_QNO 0xF8
+#define ASC_DATA_SEC_BEG (ushort)0x0080
+#define ASC_DATA_SEC_END (ushort)0x0080
+#define ASC_CODE_SEC_BEG (ushort)0x0080
+#define ASC_CODE_SEC_END (ushort)0x0080
+#define ASC_QADR_BEG (0x4000)
+#define ASC_QADR_USED (ushort)(ASC_MAX_QNO * 64)
+#define ASC_QADR_END (ushort)0x7FFF
+#define ASC_QLAST_ADR (ushort)0x7FC0
+#define ASC_QBLK_SIZE 0x40
+#define ASC_BIOS_DATA_QBEG 0xF8
+#define ASC_MIN_ACTIVE_QNO 0x01
+#define ASC_QLINK_END 0xFF
+#define ASC_EEPROM_WORDS 0x10
+#define ASC_MAX_MGS_LEN 0x10
+#define ASC_BIOS_ADDR_DEF 0xDC00
+#define ASC_BIOS_SIZE 0x3800
+#define ASC_BIOS_RAM_OFF 0x3800
+#define ASC_BIOS_RAM_SIZE 0x800
+#define ASC_BIOS_MIN_ADDR 0xC000
+#define ASC_BIOS_MAX_ADDR 0xEC00
+#define ASC_BIOS_BANK_SIZE 0x0400
+#define ASC_MCODE_START_ADDR 0x0080
+#define ASC_CFG0_HOST_INT_ON 0x0020
+#define ASC_CFG0_BIOS_ON 0x0040
+#define ASC_CFG0_VERA_BURST_ON 0x0080
+#define ASC_CFG0_SCSI_PARITY_ON 0x0800
+#define ASC_CFG1_SCSI_TARGET_ON 0x0080
+#define ASC_CFG1_LRAM_8BITS_ON 0x0800
+#define ASC_CFG_MSW_CLR_MASK 0x3080
+#define CSW_TEST1 (ASC_CS_TYPE)0x8000
+#define CSW_AUTO_CONFIG (ASC_CS_TYPE)0x4000
+#define CSW_RESERVED1 (ASC_CS_TYPE)0x2000
+#define CSW_IRQ_WRITTEN (ASC_CS_TYPE)0x1000
+#define CSW_33MHZ_SELECTED (ASC_CS_TYPE)0x0800
+#define CSW_TEST2 (ASC_CS_TYPE)0x0400
+#define CSW_TEST3 (ASC_CS_TYPE)0x0200
+#define CSW_RESERVED2 (ASC_CS_TYPE)0x0100
+#define CSW_DMA_DONE (ASC_CS_TYPE)0x0080
+#define CSW_FIFO_RDY (ASC_CS_TYPE)0x0040
+#define CSW_EEP_READ_DONE (ASC_CS_TYPE)0x0020
+#define CSW_HALTED (ASC_CS_TYPE)0x0010
+#define CSW_SCSI_RESET_ACTIVE (ASC_CS_TYPE)0x0008
+#define CSW_PARITY_ERR (ASC_CS_TYPE)0x0004
+#define CSW_SCSI_RESET_LATCH (ASC_CS_TYPE)0x0002
+#define CSW_INT_PENDING (ASC_CS_TYPE)0x0001
+#define CIW_CLR_SCSI_RESET_INT (ASC_CS_TYPE)0x1000
+#define CIW_INT_ACK (ASC_CS_TYPE)0x0100
+#define CIW_TEST1 (ASC_CS_TYPE)0x0200
+#define CIW_TEST2 (ASC_CS_TYPE)0x0400
+#define CIW_SEL_33MHZ (ASC_CS_TYPE)0x0800
+#define CIW_IRQ_ACT (ASC_CS_TYPE)0x1000
+#define CC_CHIP_RESET (uchar)0x80
+#define CC_SCSI_RESET (uchar)0x40
+#define CC_HALT (uchar)0x20
+#define CC_SINGLE_STEP (uchar)0x10
+#define CC_DMA_ABLE (uchar)0x08
+#define CC_TEST (uchar)0x04
+#define CC_BANK_ONE (uchar)0x02
+#define CC_DIAG (uchar)0x01
+#define ASC_1000_ID0W 0x04C1
+#define ASC_1000_ID0W_FIX 0x00C1
+#define ASC_1000_ID1B 0x25
+#define ASC_EISA_BIG_IOP_GAP (0x1C30-0x0C50)
+#define ASC_EISA_SMALL_IOP_GAP (0x0020)
+#define ASC_EISA_MIN_IOP_ADDR (0x0C30)
+#define ASC_EISA_MAX_IOP_ADDR (0xFC50)
+#define ASC_EISA_REV_IOP_MASK (0x0C83)
+#define ASC_EISA_PID_IOP_MASK (0x0C80)
+#define ASC_EISA_CFG_IOP_MASK (0x0C86)
+#define ASC_GET_EISA_SLOT(iop) (PortAddr)((iop) & 0xF000)
+#define ASC_EISA_ID_740 0x01745004UL
+#define ASC_EISA_ID_750 0x01755004UL
+#define INS_HALTINT (ushort)0x6281
+#define INS_HALT (ushort)0x6280
+#define INS_SINT (ushort)0x6200
+#define INS_RFLAG_WTM (ushort)0x7380
+#define ASC_MC_SAVE_CODE_WSIZE 0x500
+#define ASC_MC_SAVE_DATA_WSIZE 0x40
+
+typedef struct asc_mc_saved {
+ ushort data[ASC_MC_SAVE_DATA_WSIZE];
+ ushort code[ASC_MC_SAVE_CODE_WSIZE];
+} ASC_MC_SAVED;
+
+#define AscGetQDoneInProgress(port) AscReadLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B)
+#define AscPutQDoneInProgress(port, val) AscWriteLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B, val)
+#define AscGetVarFreeQHead(port) AscReadLramWord((port), ASCV_FREE_Q_HEAD_W)
+#define AscGetVarDoneQTail(port) AscReadLramWord((port), ASCV_DONE_Q_TAIL_W)
+#define AscPutVarFreeQHead(port, val) AscWriteLramWord((port), ASCV_FREE_Q_HEAD_W, val)
+#define AscPutVarDoneQTail(port, val) AscWriteLramWord((port), ASCV_DONE_Q_TAIL_W, val)
+#define AscGetRiscVarFreeQHead(port) AscReadLramByte((port), ASCV_NEXTRDY_B)
+#define AscGetRiscVarDoneQTail(port) AscReadLramByte((port), ASCV_DONENEXT_B)
+#define AscPutRiscVarFreeQHead(port, val) AscWriteLramByte((port), ASCV_NEXTRDY_B, val)
+#define AscPutRiscVarDoneQTail(port, val) AscWriteLramByte((port), ASCV_DONENEXT_B, val)
+#define AscPutMCodeSDTRDoneAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id), (data)) ;
+#define AscGetMCodeSDTRDoneAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id)) ;
+#define AscPutMCodeInitSDTRAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id), data) ;
+#define AscGetMCodeInitSDTRAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id)) ;
+#define AscSynIndexToPeriod(index) (uchar)(asc_dvc->sdtr_period_tbl[ (index) ])
+#define AscGetChipSignatureByte(port) (uchar)inp((port)+IOP_SIG_BYTE)
+#define AscGetChipSignatureWord(port) (ushort)inpw((port)+IOP_SIG_WORD)
+#define AscGetChipVerNo(port) (uchar)inp((port)+IOP_VERSION)
+#define AscGetChipCfgLsw(port) (ushort)inpw((port)+IOP_CONFIG_LOW)
+#define AscGetChipCfgMsw(port) (ushort)inpw((port)+IOP_CONFIG_HIGH)
+#define AscSetChipCfgLsw(port, data) outpw((port)+IOP_CONFIG_LOW, data)
+#define AscSetChipCfgMsw(port, data) outpw((port)+IOP_CONFIG_HIGH, data)
+#define AscGetChipEEPCmd(port) (uchar)inp((port)+IOP_EEP_CMD)
+#define AscSetChipEEPCmd(port, data) outp((port)+IOP_EEP_CMD, data)
+#define AscGetChipEEPData(port) (ushort)inpw((port)+IOP_EEP_DATA)
+#define AscSetChipEEPData(port, data) outpw((port)+IOP_EEP_DATA, data)
+#define AscGetChipLramAddr(port) (ushort)inpw((PortAddr)((port)+IOP_RAM_ADDR))
+#define AscSetChipLramAddr(port, addr) outpw((PortAddr)((port)+IOP_RAM_ADDR), addr)
+#define AscGetChipLramData(port) (ushort)inpw((port)+IOP_RAM_DATA)
+#define AscSetChipLramData(port, data) outpw((port)+IOP_RAM_DATA, data)
+#define AscGetChipLramDataNoSwap(port) (ushort)inpw_noswap((port)+IOP_RAM_DATA)
+#define AscSetChipLramDataNoSwap(port, data) outpw_noswap((port)+IOP_RAM_DATA, data)
+#define AscGetChipIFC(port) (uchar)inp((port)+IOP_REG_IFC)
+#define AscSetChipIFC(port, data) outp((port)+IOP_REG_IFC, data)
+#define AscGetChipStatus(port) (ASC_CS_TYPE)inpw((port)+IOP_STATUS)
+#define AscSetChipStatus(port, cs_val) outpw((port)+IOP_STATUS, cs_val)
+#define AscGetChipControl(port) (uchar)inp((port)+IOP_CTRL)
+#define AscSetChipControl(port, cc_val) outp((port)+IOP_CTRL, cc_val)
+#define AscGetChipSyn(port) (uchar)inp((port)+IOP_SYN_OFFSET)
+#define AscSetChipSyn(port, data) outp((port)+IOP_SYN_OFFSET, data)
+#define AscSetPCAddr(port, data) outpw((port)+IOP_REG_PC, data)
+#define AscGetPCAddr(port) (ushort)inpw((port)+IOP_REG_PC)
+#define AscIsIntPending(port) (AscGetChipStatus(port) & (CSW_INT_PENDING | CSW_SCSI_RESET_LATCH))
+#define AscGetChipScsiID(port) ((AscGetChipCfgLsw(port) >> 8) & ASC_MAX_TID)
+#define AscGetExtraControl(port) (uchar)inp((port)+IOP_EXTRA_CONTROL)
+#define AscSetExtraControl(port, data) outp((port)+IOP_EXTRA_CONTROL, data)
+#define AscReadChipAX(port) (ushort)inpw((port)+IOP_REG_AX)
+#define AscWriteChipAX(port, data) outpw((port)+IOP_REG_AX, data)
+#define AscReadChipIX(port) (uchar)inp((port)+IOP_REG_IX)
+#define AscWriteChipIX(port, data) outp((port)+IOP_REG_IX, data)
+#define AscReadChipIH(port) (ushort)inpw((port)+IOP_REG_IH)
+#define AscWriteChipIH(port, data) outpw((port)+IOP_REG_IH, data)
+#define AscReadChipQP(port) (uchar)inp((port)+IOP_REG_QP)
+#define AscWriteChipQP(port, data) outp((port)+IOP_REG_QP, data)
+#define AscReadChipFIFO_L(port) (ushort)inpw((port)+IOP_REG_FIFO_L)
+#define AscWriteChipFIFO_L(port, data) outpw((port)+IOP_REG_FIFO_L, data)
+#define AscReadChipFIFO_H(port) (ushort)inpw((port)+IOP_REG_FIFO_H)
+#define AscWriteChipFIFO_H(port, data) outpw((port)+IOP_REG_FIFO_H, data)
+#define AscReadChipDmaSpeed(port) (uchar)inp((port)+IOP_DMA_SPEED)
+#define AscWriteChipDmaSpeed(port, data) outp((port)+IOP_DMA_SPEED, data)
+#define AscReadChipDA0(port) (ushort)inpw((port)+IOP_REG_DA0)
+#define AscWriteChipDA0(port) outpw((port)+IOP_REG_DA0, data)
+#define AscReadChipDA1(port) (ushort)inpw((port)+IOP_REG_DA1)
+#define AscWriteChipDA1(port) outpw((port)+IOP_REG_DA1, data)
+#define AscReadChipDC0(port) (ushort)inpw((port)+IOP_REG_DC0)
+#define AscWriteChipDC0(port) outpw((port)+IOP_REG_DC0, data)
+#define AscReadChipDC1(port) (ushort)inpw((port)+IOP_REG_DC1)
+#define AscWriteChipDC1(port) outpw((port)+IOP_REG_DC1, data)
+#define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID)
+#define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data)
+
+STATIC int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg);
+STATIC int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg);
+STATIC void AscWaitEEPRead(void);
+STATIC void AscWaitEEPWrite(void);
+STATIC ushort AscReadEEPWord(PortAddr, uchar);
+STATIC ushort AscWriteEEPWord(PortAddr, uchar, ushort);
+STATIC ushort AscGetEEPConfig(PortAddr, ASCEEP_CONFIG *, ushort);
+STATIC int AscSetEEPConfigOnce(PortAddr, ASCEEP_CONFIG *, ushort);
+STATIC int AscSetEEPConfig(PortAddr, ASCEEP_CONFIG *, ushort);
+STATIC int AscStartChip(PortAddr);
+STATIC int AscStopChip(PortAddr);
+STATIC void AscSetChipIH(PortAddr, ushort);
+STATIC int AscIsChipHalted(PortAddr);
+STATIC void AscAckInterrupt(PortAddr);
+STATIC void AscDisableInterrupt(PortAddr);
+STATIC void AscEnableInterrupt(PortAddr);
+STATIC void AscSetBank(PortAddr, uchar);
+STATIC int AscResetChipAndScsiBus(ASC_DVC_VAR *);
+STATIC ushort AscGetIsaDmaChannel(PortAddr);
+STATIC ushort AscSetIsaDmaChannel(PortAddr, ushort);
+STATIC uchar AscSetIsaDmaSpeed(PortAddr, uchar);
+STATIC uchar AscGetIsaDmaSpeed(PortAddr);
+STATIC uchar AscReadLramByte(PortAddr, ushort);
+STATIC ushort AscReadLramWord(PortAddr, ushort);
+STATIC ulong AscReadLramDWord(PortAddr, ushort);
+STATIC void AscWriteLramWord(PortAddr, ushort, ushort);
+STATIC void AscWriteLramDWord(PortAddr, ushort, ulong);
+STATIC void AscWriteLramByte(PortAddr, ushort, uchar);
+STATIC ulong AscMemSumLramWord(PortAddr, ushort, rint);
+STATIC void AscMemWordSetLram(PortAddr, ushort, ushort, rint);
+STATIC void AscMemWordCopyToLram(PortAddr, ushort, ushort *, int);
+STATIC void AscMemDWordCopyToLram(PortAddr, ushort, ulong *, int);
+STATIC void AscMemWordCopyFromLram(PortAddr, ushort, ushort *, int);
+STATIC ushort AscInitAscDvcVar(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitFromEEP(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitFromAscDvcVar(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitMicroCodeVar(ASC_DVC_VAR asc_ptr_type * asc_dvc);
+STATIC int AscTestExternalLram(ASC_DVC_VAR asc_ptr_type *);
+STATIC uchar AscMsgOutSDTR(ASC_DVC_VAR asc_ptr_type *, uchar, uchar);
+STATIC uchar AscCalSDTRData(ASC_DVC_VAR asc_ptr_type *, uchar, uchar);
+STATIC void AscSetChipSDTR(PortAddr, uchar, uchar);
+STATIC uchar AscGetSynPeriodIndex(ASC_DVC_VAR asc_ptr_type *, ruchar);
+STATIC uchar AscAllocFreeQueue(PortAddr, uchar);
+STATIC uchar AscAllocMultipleFreeQueue(PortAddr, uchar, uchar);
+STATIC int AscRiscHaltedAbortSRB(ASC_DVC_VAR asc_ptr_type *, ulong);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int AscRiscHaltedAbortTIX(ASC_DVC_VAR asc_ptr_type *, uchar);
+#endif /* version >= v1.3.89 */
+STATIC int AscHostReqRiscHalt(PortAddr);
+STATIC int AscStopQueueExe(PortAddr);
+STATIC int AscStartQueueExe(PortAddr);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int AscCleanUpDiscQueue(PortAddr);
+#endif /* version >= v1.3.89 */
+STATIC int AscCleanUpBusyQueue(PortAddr);
+STATIC int AscWaitTixISRDone(ASC_DVC_VAR asc_ptr_type *, uchar);
+STATIC int AscWaitISRDone(ASC_DVC_VAR asc_ptr_type *);
+STATIC ulong AscGetOnePhyAddr(ASC_DVC_VAR asc_ptr_type *, uchar *,
+ ulong);
+STATIC int AscSendScsiQueue(ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_Q * scsiq,
+ uchar n_q_required);
+STATIC int AscPutReadyQueue(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_Q *, uchar);
+STATIC int AscPutReadySgListQueue(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_Q *, uchar);
+STATIC int AscSetChipSynRegAtID(PortAddr, uchar, uchar);
+STATIC int AscSetRunChipSynRegAtID(PortAddr, uchar, uchar);
+STATIC ushort AscInitLram(ASC_DVC_VAR asc_ptr_type *);
+STATIC int AscReInitLram(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitQLinkVar(ASC_DVC_VAR asc_ptr_type *);
+STATIC int AscSetLibErrorCode(ASC_DVC_VAR asc_ptr_type *, ushort);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int _AscWaitQDone(PortAddr, ASC_SCSI_Q *);
+#endif /* version >= v1.3.89 */
+STATIC int AscIsrChipHalted(ASC_DVC_VAR asc_ptr_type *);
+STATIC uchar _AscCopyLramScsiDoneQ(PortAddr, ushort,
+ ASC_QDONE_INFO *, ulong);
+STATIC int AscIsrQDone(ASC_DVC_VAR asc_ptr_type *);
+STATIC int AscCompareString(uchar *, uchar *, int);
+STATIC ushort AscGetEisaChipCfg(PortAddr);
+STATIC ulong AscGetEisaProductID(PortAddr);
+STATIC PortAddr AscSearchIOPortAddrEISA(PortAddr);
+STATIC uchar AscGetChipScsiCtrl(PortAddr);
+STATIC uchar AscSetChipScsiID(PortAddr, uchar);
+STATIC uchar AscGetChipVersion(PortAddr, ushort);
+STATIC ushort AscGetChipBusType(PortAddr);
+STATIC ulong AscLoadMicroCode(PortAddr, ushort, ushort *, ushort);
+STATIC int AscFindSignature(PortAddr);
+STATIC PortAddr AscSearchIOPortAddr11(PortAddr);
+STATIC void AscToggleIRQAct(PortAddr);
+STATIC void AscSetISAPNPWaitForKey(void);
+STATIC uchar AscGetChipIRQ(PortAddr, ushort);
+STATIC uchar AscSetChipIRQ(PortAddr, uchar, ushort);
+STATIC ushort AscGetChipBiosAddress(PortAddr, ushort);
+STATIC long DvcEnterCritical(void);
+STATIC void DvcLeaveCritical(long);
+STATIC void DvcInPortWords(PortAddr, ushort *, int);
+STATIC void DvcOutPortWords(PortAddr, ushort *, int);
+STATIC void DvcOutPortDWords(PortAddr, ulong *, int);
+STATIC uchar DvcReadPCIConfigByte(ASC_DVC_VAR asc_ptr_type *, ushort);
+STATIC void DvcWritePCIConfigByte(ASC_DVC_VAR asc_ptr_type *,
+ ushort, uchar);
+STATIC ushort AscGetChipBiosAddress(PortAddr, ushort);
+STATIC void DvcSleepMilliSecond(ulong);
+STATIC void DvcDelayNanoSecond(ASC_DVC_VAR asc_ptr_type *, ulong);
+STATIC ulong DvcGetSGList(ASC_DVC_VAR asc_ptr_type *, uchar *,
+ ulong, ASC_SG_HEAD *);
+STATIC void DvcPutScsiQ(PortAddr, ushort, ushort *, int);
+STATIC void DvcGetQinfo(PortAddr, ushort, ushort *, int);
+STATIC PortAddr AscSearchIOPortAddr(PortAddr, ushort);
+STATIC ushort AscInitGetConfig(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitSetConfig(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitAsc1000Driver(ASC_DVC_VAR asc_ptr_type *);
+STATIC void AscAsyncFix(ASC_DVC_VAR asc_ptr_type *, uchar,
+ ASC_SCSI_INQUIRY *);
+STATIC int AscTagQueuingSafe(ASC_SCSI_INQUIRY *);
+STATIC void AscInquiryHandling(ASC_DVC_VAR asc_ptr_type *,
+ uchar, ASC_SCSI_INQUIRY *);
+STATIC int AscExeScsiQueue(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_Q *);
+STATIC int AscISR(ASC_DVC_VAR asc_ptr_type *);
+STATIC uint AscGetNumOfFreeQueue(ASC_DVC_VAR asc_ptr_type *, uchar,
+ uchar);
+STATIC int AscSgListToQueue(int);
+STATIC int AscAbortSRB(ASC_DVC_VAR asc_ptr_type *, ulong);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int AscResetDevice(ASC_DVC_VAR asc_ptr_type *, uchar);
+#endif /* version >= v1.3.89 */
+STATIC int AscResetSB(ASC_DVC_VAR asc_ptr_type *);
+STATIC void AscEnableIsaDma(uchar);
+STATIC ulong AscGetMaxDmaCount(ushort);
+
+
+/*
+ * --- Adv Library Constants and Macros
+ */
+
+#define ADV_LIB_VERSION_MAJOR 3
+#define ADV_LIB_VERSION_MINOR 45
+
+/* d_os_dep.h */
+#define ADV_OS_LINUX
+
+/*
+ * Define Adv Library required special types.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+#define AdvPortAddr unsigned short /* I/O Port address size */
+#else /* version >= v1,3,0 */
+#define AdvPortAddr unsigned long /* Virtual memory address size */
+#endif /* version >= v1,3,0 */
+
+/*
+ * Define Adv Library required memory access macros.
+ */
+#define ADV_MEM_READB(addr) readb(addr)
+#define ADV_MEM_READW(addr) readw(addr)
+#define ADV_MEM_WRITEB(addr, byte) writeb(byte, addr)
+#define ADV_MEM_WRITEW(addr, word) writew(word, addr)
+
+/*
+ * The I/O memory mapping function names changed in 2.1.X.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,0)
+#define ioremap vremap
+#define iounmap vfree
+#endif /* version < v2.1.0 */
+
+/*
+ * Define total number of simultaneous maximum element scatter-gather
+ * requests, i.e. ADV_TOT_SG_LIST * ADV_MAX_SG_LIST is the total number
+ * of simultaneous scatter-gather elements supported per wide adapter.
+ */
+#define ADV_TOT_SG_LIST 64
+
+/*
+ * Define Adv Library required per request scatter-gather element limit.
+ */
+#define ADV_MAX_SG_LIST 64
+
+/*
+ * Scatter-Gather Definitions per request.
+ *
+ * Because SG block memory is allocated in virtual memory but is
+ * referenced by the microcode as physical memory, we need to do
+ * calculations to insure there will be enough physically contiguous
+ * memory to support ADV_MAX_SG_LIST SG entries.
+ */
+
+/* Number of SG blocks needed. */
+#define ADV_NUM_SG_BLOCK \
+ ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK)
+
+/* Total contiguous memory needed for SG blocks. */
+#define ADV_SG_TOTAL_MEM_SIZE \
+ (sizeof(ADV_SG_BLOCK) * ADV_NUM_SG_BLOCK)
+
+#define ASC_PAGE_SIZE PAGE_SIZE
+
+/*
+ * Number of page crossings possible for the total contiguous virtual memory
+ * needed for SG blocks.
+ *
+ * We need to allocate this many additional SG blocks in virtual memory to
+ * insure there will be space for ADV_NUM_SG_BLOCK physically contiguous
+ * scatter-gather blocks.
+ */
+#define ADV_NUM_PAGE_CROSSING \
+ ((ADV_SG_TOTAL_MEM_SIZE + (ASC_PAGE_SIZE - 1))/ASC_PAGE_SIZE)
+
+/*
+ * Define Adv Library Assertion Macro.
+ */
+
+#define ADV_ASSERT(a) ASC_ASSERT(a)
+
+/* a_condor.h */
+#define ADV_PCI_VENDOR_ID 0x10CD
+#define ADV_PCI_DEVICE_ID_REV_A 0x2300
+
+#define ASC_EEP_DVC_CFG_BEGIN (0x00)
+#define ASC_EEP_DVC_CFG_END (0x15)
+#define ASC_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */
+#define ASC_EEP_MAX_WORD_ADDR (0x1E)
+
+#define ASC_EEP_DELAY_MS 100
+
+/*
+ * EEPROM bits reference by the RISC after initialization.
+ */
+#define ADV_EEPROM_BIG_ENDIAN 0x8000 /* EEPROM Bit 15 */
+#define ADV_EEPROM_BIOS_ENABLE 0x4000 /* EEPROM Bit 14 */
+#define ADV_EEPROM_TERM_POL 0x2000 /* EEPROM Bit 13 */
+
+/*
+ * EEPROM configuration format
+ *
+ * Field naming convention:
+ *
+ * *_enable indicates the field enables or disables the feature. The
+ * value is never reset.
+ *
+ * *_able indicates both whether a feature should be enabled or disabled
+ * and whether a device isi capable of the feature. At initialization
+ * this field may be set, but later if a device is found to be incapable
+ * of the feature, the field is cleared.
+ *
+ * Default values are maintained in a_init.c in the structure
+ * Default_EEPROM_Config.
+ */
+typedef struct adveep_config
+{
+ /* Word Offset, Description */
+
+ ushort cfg_lsw; /* 00 power up initialization */
+ /* bit 13 set - Term Polarity Control */
+ /* bit 14 set - BIOS Enable */
+ /* bit 15 set - Big Endian Mode */
+ ushort cfg_msw; /* 01 unused */
+ ushort disc_enable; /* 02 disconnect enable */
+ ushort wdtr_able; /* 03 Wide DTR able */
+ ushort sdtr_able; /* 04 Synchronous DTR able */
+ ushort start_motor; /* 05 send start up motor */
+ ushort tagqng_able; /* 06 tag queuing able */
+ ushort bios_scan; /* 07 BIOS device control */
+ ushort scam_tolerant; /* 08 no scam */
+
+ uchar adapter_scsi_id; /* 09 Host Adapter ID */
+ uchar bios_boot_delay; /* power up wait */
+
+ uchar scsi_reset_delay; /* 10 reset delay */
+ uchar bios_id_lun; /* first boot device scsi id & lun */
+ /* high nibble is lun */
+ /* low nibble is scsi id */
+
+ uchar termination; /* 11 0 - automatic */
+ /* 1 - low off / high off */
+ /* 2 - low off / high on */
+ /* 3 - low on / high on */
+ /* There is no low on / high off */
+
+ uchar reserved1; /* reserved byte (not used) */
+
+ ushort bios_ctrl; /* 12 BIOS control bits */
+ /* bit 0 set: BIOS don't act as initiator. */
+ /* bit 1 set: BIOS > 1 GB support */
+ /* bit 2 set: BIOS > 2 Disk Support */
+ /* bit 3 set: BIOS don't support removables */
+ /* bit 4 set: BIOS support bootable CD */
+ /* bit 5 set: */
+ /* bit 6 set: BIOS support multiple LUNs */
+ /* bit 7 set: BIOS display of message */
+ /* bit 8 set: */
+ /* bit 9 set: Reset SCSI bus during init. */
+ /* bit 10 set: */
+ /* bit 11 set: No verbose initialization. */
+ /* bit 12 set: SCSI parity enabled */
+ /* bit 13 set: */
+ /* bit 14 set: */
+ /* bit 15 set: */
+ ushort ultra_able; /* 13 ULTRA speed able */
+ ushort reserved2; /* 14 reserved */
+ uchar max_host_qng; /* 15 maximum host queuing */
+ uchar max_dvc_qng; /* maximum per device queuing */
+ ushort dvc_cntl; /* 16 control bit for driver */
+ ushort bug_fix; /* 17 control bit for bug fix */
+ ushort serial_number_word1; /* 18 Board serial number word 1 */
+ ushort serial_number_word2; /* 19 Board serial number word 2 */
+ ushort serial_number_word3; /* 20 Board serial number word 3 */
+ ushort check_sum; /* 21 EEP check sum */
+ uchar oem_name[16]; /* 22 OEM name */
+ ushort dvc_err_code; /* 30 last device driver error code */
+ ushort adv_err_code; /* 31 last uc and Adv Lib error code */
+ ushort adv_err_addr; /* 32 last uc error address */
+ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */
+ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */
+ ushort saved_adv_err_addr; /* 35 saved last uc error address */
+ ushort num_of_err; /* 36 number of error */
+} ADVEEP_CONFIG;
+
+/*
+ * EEPROM Commands
+ */
+#define ASC_EEP_CMD_DONE 0x0200
+#define ASC_EEP_CMD_DONE_ERR 0x0001
+
+/* cfg_word */
+#define EEP_CFG_WORD_BIG_ENDIAN 0x8000
+
+/* bios_ctrl */
+#define BIOS_CTRL_BIOS 0x0001
+#define BIOS_CTRL_EXTENDED_XLAT 0x0002
+#define BIOS_CTRL_GT_2_DISK 0x0004
+#define BIOS_CTRL_BIOS_REMOVABLE 0x0008
+#define BIOS_CTRL_BOOTABLE_CD 0x0010
+#define BIOS_CTRL_MULTIPLE_LUN 0x0040
+#define BIOS_CTRL_DISPLAY_MSG 0x0080
+#define BIOS_CTRL_NO_SCAM 0x0100
+#define BIOS_CTRL_RESET_SCSI_BUS 0x0200
+#define BIOS_CTRL_INIT_VERBOSE 0x0800
+#define BIOS_CTRL_SCSI_PARITY 0x1000
+
+/*
+ * ASC 3550 Internal Memory Size - 8KB
+ */
+#define ADV_CONDOR_MEMSIZE 0x2000 /* 8 KB Internal Memory */
+
+/*
+ * ASC 3550 I/O Length - 64 bytes
+ */
+#define ADV_CONDOR_IOLEN 0x40 /* I/O Port Range in bytes */
+
+/*
+ * Byte I/O register address from base of 'iop_base'.
+ */
+#define IOPB_INTR_STATUS_REG 0x00
+#define IOPB_CHIP_ID_1 0x01
+#define IOPB_INTR_ENABLES 0x02
+#define IOPB_CHIP_TYPE_REV 0x03
+#define IOPB_RES_ADDR_4 0x04
+#define IOPB_RES_ADDR_5 0x05
+#define IOPB_RAM_DATA 0x06
+#define IOPB_RES_ADDR_7 0x07
+#define IOPB_FLAG_REG 0x08
+#define IOPB_RES_ADDR_9 0x09
+#define IOPB_RISC_CSR 0x0A
+#define IOPB_RES_ADDR_B 0x0B
+#define IOPB_RES_ADDR_C 0x0C
+#define IOPB_RES_ADDR_D 0x0D
+#define IOPB_RES_ADDR_E 0x0E
+#define IOPB_RES_ADDR_F 0x0F
+#define IOPB_MEM_CFG 0x10
+#define IOPB_RES_ADDR_11 0x11
+#define IOPB_RES_ADDR_12 0x12
+#define IOPB_RES_ADDR_13 0x13
+#define IOPB_FLASH_PAGE 0x14
+#define IOPB_RES_ADDR_15 0x15
+#define IOPB_RES_ADDR_16 0x16
+#define IOPB_RES_ADDR_17 0x17
+#define IOPB_FLASH_DATA 0x18
+#define IOPB_RES_ADDR_19 0x19
+#define IOPB_RES_ADDR_1A 0x1A
+#define IOPB_RES_ADDR_1B 0x1B
+#define IOPB_RES_ADDR_1C 0x1C
+#define IOPB_RES_ADDR_1D 0x1D
+#define IOPB_RES_ADDR_1E 0x1E
+#define IOPB_RES_ADDR_1F 0x1F
+#define IOPB_DMA_CFG0 0x20
+#define IOPB_DMA_CFG1 0x21
+#define IOPB_TICKLE 0x22
+#define IOPB_DMA_REG_WR 0x23
+#define IOPB_SDMA_STATUS 0x24
+#define IOPB_SCSI_BYTE_CNT 0x25
+#define IOPB_HOST_BYTE_CNT 0x26
+#define IOPB_BYTE_LEFT_TO_XFER 0x27
+#define IOPB_BYTE_TO_XFER_0 0x28
+#define IOPB_BYTE_TO_XFER_1 0x29
+#define IOPB_BYTE_TO_XFER_2 0x2A
+#define IOPB_BYTE_TO_XFER_3 0x2B
+#define IOPB_ACC_GRP 0x2C
+#define IOPB_RES_ADDR_2D 0x2D
+#define IOPB_DEV_ID 0x2E
+#define IOPB_RES_ADDR_2F 0x2F
+#define IOPB_SCSI_DATA 0x30
+#define IOPB_RES_ADDR_31 0x31
+#define IOPB_RES_ADDR_32 0x32
+#define IOPB_SCSI_DATA_HSHK 0x33
+#define IOPB_SCSI_CTRL 0x34
+#define IOPB_RES_ADDR_35 0x35
+#define IOPB_RES_ADDR_36 0x36
+#define IOPB_RES_ADDR_37 0x37
+#define IOPB_RES_ADDR_38 0x38
+#define IOPB_RES_ADDR_39 0x39
+#define IOPB_RES_ADDR_3A 0x3A
+#define IOPB_RES_ADDR_3B 0x3B
+#define IOPB_RFIFO_CNT 0x3C
+#define IOPB_RES_ADDR_3D 0x3D
+#define IOPB_RES_ADDR_3E 0x3E
+#define IOPB_RES_ADDR_3F 0x3F
+
+/*
+ * Word I/O register address from base of 'iop_base'.
+ */
+#define IOPW_CHIP_ID_0 0x00 /* CID0 */
+#define IOPW_CTRL_REG 0x02 /* CC */
+#define IOPW_RAM_ADDR 0x04 /* LA */
+#define IOPW_RAM_DATA 0x06 /* LD */
+#define IOPW_RES_ADDR_08 0x08
+#define IOPW_RISC_CSR 0x0A /* CSR */
+#define IOPW_SCSI_CFG0 0x0C /* CFG0 */
+#define IOPW_SCSI_CFG1 0x0E /* CFG1 */
+#define IOPW_RES_ADDR_10 0x10
+#define IOPW_SEL_MASK 0x12 /* SM */
+#define IOPW_RES_ADDR_14 0x14
+#define IOPW_FLASH_ADDR 0x16 /* FA */
+#define IOPW_RES_ADDR_18 0x18
+#define IOPW_EE_CMD 0x1A /* EC */
+#define IOPW_EE_DATA 0x1C /* ED */
+#define IOPW_SFIFO_CNT 0x1E /* SFC */
+#define IOPW_RES_ADDR_20 0x20
+#define IOPW_Q_BASE 0x22 /* QB */
+#define IOPW_QP 0x24 /* QP */
+#define IOPW_IX 0x26 /* IX */
+#define IOPW_SP 0x28 /* SP */
+#define IOPW_PC 0x2A /* PC */
+#define IOPW_RES_ADDR_2C 0x2C
+#define IOPW_RES_ADDR_2E 0x2E
+#define IOPW_SCSI_DATA 0x30 /* SD */
+#define IOPW_SCSI_DATA_HSHK 0x32 /* SDH */
+#define IOPW_SCSI_CTRL 0x34 /* SC */
+#define IOPW_HSHK_CFG 0x36 /* HCFG */
+#define IOPW_SXFR_STATUS 0x36 /* SXS */
+#define IOPW_SXFR_CNTL 0x38 /* SXL */
+#define IOPW_SXFR_CNTH 0x3A /* SXH */
+#define IOPW_RES_ADDR_3C 0x3C
+#define IOPW_RFIFO_DATA 0x3E /* RFD */
+
+/*
+ * Doubleword I/O register address from base of 'iop_base'.
+ */
+#define IOPDW_RES_ADDR_0 0x00
+#define IOPDW_RAM_DATA 0x04
+#define IOPDW_RES_ADDR_8 0x08
+#define IOPDW_RES_ADDR_C 0x0C
+#define IOPDW_RES_ADDR_10 0x10
+#define IOPDW_RES_ADDR_14 0x14
+#define IOPDW_RES_ADDR_18 0x18
+#define IOPDW_RES_ADDR_1C 0x1C
+#define IOPDW_SDMA_ADDR0 0x20
+#define IOPDW_SDMA_ADDR1 0x24
+#define IOPDW_SDMA_COUNT 0x28
+#define IOPDW_SDMA_ERROR 0x2C
+#define IOPDW_RDMA_ADDR0 0x30
+#define IOPDW_RDMA_ADDR1 0x34
+#define IOPDW_RDMA_COUNT 0x38
+#define IOPDW_RDMA_ERROR 0x3C
+
+#define ADV_CHIP_ID_BYTE 0x25
+#define ADV_CHIP_ID_WORD 0x04C1
+
+#define ADV_SC_SCSI_BUS_RESET 0x2000
+
+#define ADV_INTR_ENABLE_HOST_INTR 0x01
+#define ADV_INTR_ENABLE_SEL_INTR 0x02
+#define ADV_INTR_ENABLE_DPR_INTR 0x04
+#define ADV_INTR_ENABLE_RTA_INTR 0x08
+#define ADV_INTR_ENABLE_RMA_INTR 0x10
+#define ADV_INTR_ENABLE_RST_INTR 0x20
+#define ADV_INTR_ENABLE_DPE_INTR 0x40
+#define ADV_INTR_ENABLE_GLOBAL_INTR 0x80
+
+#define ADV_INTR_STATUS_INTRA 0x01
+#define ADV_INTR_STATUS_INTRB 0x02
+#define ADV_INTR_STATUS_INTRC 0x04
+
+#define ADV_RISC_CSR_STOP (0x0000)
+#define ADV_RISC_TEST_COND (0x2000)
+#define ADV_RISC_CSR_RUN (0x4000)
+#define ADV_RISC_CSR_SINGLE_STEP (0x8000)
+
+#define ADV_CTRL_REG_HOST_INTR 0x0100
+#define ADV_CTRL_REG_SEL_INTR 0x0200
+#define ADV_CTRL_REG_DPR_INTR 0x0400
+#define ADV_CTRL_REG_RTA_INTR 0x0800
+#define ADV_CTRL_REG_RMA_INTR 0x1000
+#define ADV_CTRL_REG_RES_BIT14 0x2000
+#define ADV_CTRL_REG_DPE_INTR 0x4000
+#define ADV_CTRL_REG_POWER_DONE 0x8000
+#define ADV_CTRL_REG_ANY_INTR 0xFF00
+
+#define ADV_CTRL_REG_CMD_RESET 0x00C6
+#define ADV_CTRL_REG_CMD_WR_IO_REG 0x00C5
+#define ADV_CTRL_REG_CMD_RD_IO_REG 0x00C4
+#define ADV_CTRL_REG_CMD_WR_PCI_CFG_SPACE 0x00C3
+#define ADV_CTRL_REG_CMD_RD_PCI_CFG_SPACE 0x00C2
+
+#define ADV_SCSI_CTRL_RSTOUT 0x2000
+
+#define AdvIsIntPending(port) \
+ (AdvReadWordRegister(port, IOPW_CTRL_REG) & ADV_CTRL_REG_HOST_INTR)
+
+/*
+ * SCSI_CFG0 Register bit definitions
+ */
+#define TIMER_MODEAB 0xC000 /* Watchdog, Second, and Select. Timer Ctrl. */
+#define PARITY_EN 0x2000 /* Enable SCSI Parity Error detection */
+#define EVEN_PARITY 0x1000 /* Select Even Parity */
+#define WD_LONG 0x0800 /* Watchdog Interval, 1: 57 min, 0: 13 sec */
+#define QUEUE_128 0x0400 /* Queue Size, 1: 128 byte, 0: 64 byte */
+#define PRIM_MODE 0x0100 /* Primitive SCSI mode */
+#define SCAM_EN 0x0080 /* Enable SCAM selection */
+#define SEL_TMO_LONG 0x0040 /* Sel/Resel Timeout, 1: 400 ms, 0: 1.6 ms */
+#define CFRM_ID 0x0020 /* SCAM id sel. confirm., 1: fast, 0: 6.4 ms */
+#define OUR_ID_EN 0x0010 /* Enable OUR_ID bits */
+#define OUR_ID 0x000F /* SCSI ID */
+
+/*
+ * SCSI_CFG1 Register bit definitions
+ */
+#define BIG_ENDIAN 0x8000 /* Enable Big Endian Mode MIO:15, EEP:15 */
+#define TERM_POL 0x2000 /* Terminator Polarity Ctrl. MIO:13, EEP:13 */
+#define SLEW_RATE 0x1000 /* SCSI output buffer slew rate */
+#define FILTER_SEL 0x0C00 /* Filter Period Selection */
+#define FLTR_DISABLE 0x0000 /* Input Filtering Disabled */
+#define FLTR_11_TO_20NS 0x0800 /* Input Filtering 11ns to 20ns */
+#define FLTR_21_TO_39NS 0x0C00 /* Input Filtering 21ns to 39ns */
+#define ACTIVE_DBL 0x0200 /* Disable Active Negation */
+#define DIFF_MODE 0x0100 /* SCSI differential Mode (Read-Only) */
+#define DIFF_SENSE 0x0080 /* 1: No SE cables, 0: SE cable (Read-Only) */
+#define TERM_CTL_SEL 0x0040 /* Enable TERM_CTL_H and TERM_CTL_L */
+#define TERM_CTL 0x0030 /* External SCSI Termination Bits */
+#define TERM_CTL_H 0x0020 /* Enable External SCSI Upper Termination */
+#define TERM_CTL_L 0x0010 /* Enable External SCSI Lower Termination */
+#define CABLE_DETECT 0x000F /* External SCSI Cable Connection Status */
+
+#define CABLE_ILLEGAL_A 0x7
+ /* x 0 0 0 | on on | Illegal (all 3 connectors are used) */
+
+#define CABLE_ILLEGAL_B 0xB
+ /* 0 x 0 0 | on on | Illegal (all 3 connectors are used) */
+
+/*
+ The following table details the SCSI_CFG1 Termination Polarity,
+ Termination Control and Cable Detect bits.
+
+ Cable Detect | Termination
+ Bit 3 2 1 0 | 5 4 | Notes
+ _____________|________|____________________
+ 1 1 1 0 | on on | Internal wide only
+ 1 1 0 1 | on on | Internal narrow only
+ 1 0 1 1 | on on | External narrow only
+ 0 x 1 1 | on on | External wide only
+ 1 1 0 0 | on off| Internal wide and internal narrow
+ 1 0 1 0 | on off| Internal wide and external narrow
+ 0 x 1 0 | off off| Internal wide and external wide
+ 1 0 0 1 | on off| Internal narrow and external narrow
+ 0 x 0 1 | on off| Internal narrow and external wide
+ 1 1 1 1 | on on | No devices are attached
+ x 0 0 0 | on on | Illegal (all 3 connectors are used)
+ 0 x 0 0 | on on | Illegal (all 3 connectors are used)
+
+ x means don't-care (either '0' or '1')
+
+ If term_pol (bit 13) is '0' (active-low terminator enable), then:
+ 'on' is '0' and 'off' is '1'.
+
+ If term_pol bit is '1' (meaning active-hi terminator enable), then:
+ 'on' is '1' and 'off' is '0'.
+ */
+
+/*
+ * MEM_CFG Register bit definitions
+ */
+#define BIOS_EN 0x40 /* BIOS Enable MIO:14,EEP:14 */
+#define FAST_EE_CLK 0x20 /* Diagnostic Bit */
+#define RAM_SZ 0x1C /* Specify size of RAM to RISC */
+#define RAM_SZ_2KB 0x00 /* 2 KB */
+#define RAM_SZ_4KB 0x04 /* 4 KB */
+#define RAM_SZ_8KB 0x08 /* 8 KB */
+#define RAM_SZ_16KB 0x0C /* 16 KB */
+#define RAM_SZ_32KB 0x10 /* 32 KB */
+#define RAM_SZ_64KB 0x14 /* 64 KB */
+
+/*
+ * DMA_CFG0 Register bit definitions
+ *
+ * This register is only accessible to the host.
+ */
+#define BC_THRESH_ENB 0x80 /* PCI DMA Start Conditions */
+#define FIFO_THRESH 0x70 /* PCI DMA FIFO Threshold */
+#define FIFO_THRESH_16B 0x00 /* 16 bytes */
+#define FIFO_THRESH_32B 0x20 /* 32 bytes */
+#define FIFO_THRESH_48B 0x30 /* 48 bytes */
+#define FIFO_THRESH_64B 0x40 /* 64 bytes */
+#define FIFO_THRESH_80B 0x50 /* 80 bytes (default) */
+#define FIFO_THRESH_96B 0x60 /* 96 bytes */
+#define FIFO_THRESH_112B 0x70 /* 112 bytes */
+#define START_CTL 0x0C /* DMA start conditions */
+#define START_CTL_TH 0x00 /* Wait threshold level (default) */
+#define START_CTL_ID 0x04 /* Wait SDMA/SBUS idle */
+#define START_CTL_THID 0x08 /* Wait threshold and SDMA/SBUS idle */
+#define START_CTL_EMFU 0x0C /* Wait SDMA FIFO empty/full */
+#define READ_CMD 0x03 /* Memory Read Method */
+#define READ_CMD_MR 0x00 /* Memory Read */
+#define READ_CMD_MRL 0x02 /* Memory Read Long */
+#define READ_CMD_MRM 0x03 /* Memory Read Multiple (default) */
+
+/* a_advlib.h */
+
+/*
+ * Adv Library Status Definitions
+ */
+#define ADV_TRUE 1
+#define ADV_FALSE 0
+#define ADV_NOERROR 1
+#define ADV_SUCCESS 1
+#define ADV_BUSY 0
+#define ADV_ERROR (-1)
+
+
+/*
+ * ASC_DVC_VAR 'warn_code' values
+ */
+#define ASC_WARN_EEPROM_CHKSUM 0x0002 /* EEP check sum error */
+#define ASC_WARN_EEPROM_TERMINATION 0x0004 /* EEP termination bad field */
+#define ASC_WARN_SET_PCI_CONFIG_SPACE 0x0080 /* PCI config space set error */
+#define ASC_WARN_ERROR 0xFFFF /* ADV_ERROR return */
+
+#define ADV_MAX_TID 15 /* max. target identifier */
+#define ADV_MAX_LUN 7 /* max. logical unit number */
+
+
+/*
+ * AscInitGetConfig() and AscInitAsc1000Driver() Definitions
+ *
+ * Error code values are set in ASC_DVC_VAR 'err_code'.
+ */
+#define ASC_IERR_WRITE_EEPROM 0x0001 /* write EEPROM error */
+#define ASC_IERR_MCODE_CHKSUM 0x0002 /* micro code check sum error */
+#define ASC_IERR_START_STOP_CHIP 0x0008 /* start/stop chip failed */
+#define ASC_IERR_CHIP_VERSION 0x0040 /* wrong chip version */
+#define ASC_IERR_SET_SCSI_ID 0x0080 /* set SCSI ID failed */
+#define ASC_IERR_BAD_SIGNATURE 0x0200 /* signature not found */
+#define ASC_IERR_ILLEGAL_CONNECTION 0x0400 /* Illegal cable connection */
+#define ASC_IERR_SINGLE_END_DEVICE 0x0800 /* Single-end used w/differential */
+#define ASC_IERR_REVERSED_CABLE 0x1000 /* Narrow flat cable reversed */
+#define ASC_IERR_RW_LRAM 0x8000 /* read/write local RAM error */
+
+/*
+ * Fixed locations of microcode operating variables.
+ */
+#define ASC_MC_CODE_BEGIN_ADDR 0x0028 /* microcode start address */
+#define ASC_MC_CODE_END_ADDR 0x002A /* microcode end address */
+#define ASC_MC_CODE_CHK_SUM 0x002C /* microcode code checksum */
+#define ASC_MC_STACK_BEGIN 0x002E /* microcode stack begin */
+#define ASC_MC_STACK_END 0x0030 /* microcode stack end */
+#define ASC_MC_VERSION_DATE 0x0038 /* microcode version */
+#define ASC_MC_VERSION_NUM 0x003A /* microcode number */
+#define ASCV_VER_SERIAL_W 0x003C /* used in dos_init */
+#define ASC_MC_BIOSMEM 0x0040 /* BIOS RISC Memory Start */
+#define ASC_MC_BIOSLEN 0x0050 /* BIOS RISC Memory Length */
+#define ASC_MC_HALTCODE 0x0094 /* microcode halt code */
+#define ASC_MC_CALLERPC 0x0096 /* microcode halt caller PC */
+#define ASC_MC_ADAPTER_SCSI_ID 0x0098 /* one ID byte + reserved */
+#define ASC_MC_ULTRA_ABLE 0x009C
+#define ASC_MC_SDTR_ABLE 0x009E
+#define ASC_MC_TAGQNG_ABLE 0x00A0
+#define ASC_MC_DISC_ENABLE 0x00A2
+#define ASC_MC_IDLE_CMD 0x00A6
+#define ASC_MC_IDLE_PARA_STAT 0x00A8
+#define ASC_MC_DEFAULT_SCSI_CFG0 0x00AC
+#define ASC_MC_DEFAULT_SCSI_CFG1 0x00AE
+#define ASC_MC_DEFAULT_MEM_CFG 0x00B0
+#define ASC_MC_DEFAULT_SEL_MASK 0x00B2
+#define ASC_MC_RISC_NEXT_READY 0x00B4
+#define ASC_MC_RISC_NEXT_DONE 0x00B5
+#define ASC_MC_SDTR_DONE 0x00B6
+#define ASC_MC_NUMBER_OF_QUEUED_CMD 0x00C0
+#define ASC_MC_NUMBER_OF_MAX_CMD 0x00D0
+#define ASC_MC_DEVICE_HSHK_CFG_TABLE 0x0100
+#define ASC_MC_WDTR_ABLE 0x0120 /* Wide Transfer TID bitmask. */
+#define ASC_MC_CONTROL_FLAG 0x0122 /* Microcode control flag. */
+#define ASC_MC_WDTR_DONE 0x0124
+#define ASC_MC_HOST_NEXT_READY 0x0128 /* Host Next Ready RQL Entry. */
+#define ASC_MC_HOST_NEXT_DONE 0x0129 /* Host Next Done RQL Entry. */
+
+/*
+ * BIOS LRAM variable absolute offsets.
+ */
+#define BIOS_CODESEG 0x54
+#define BIOS_CODELEN 0x56
+#define BIOS_SIGNATURE 0x58
+#define BIOS_VERSION 0x5A
+#define BIOS_SIGNATURE 0x58
+
+/*
+ * Microcode Control Flags
+ *
+ * Flags set by the Adv Library in RISC variable 'control_flag' (0x122)
+ * and handled by the microcode.
+ */
+#define CONTROL_FLAG_IGNORE_PERR 0x0001 /* Ignore DMA Parity Errors */
+
+/*
+ * ASC_MC_DEVICE_HSHK_CFG_TABLE microcode table or HSHK_CFG register format
+ */
+#define HSHK_CFG_WIDE_XFR 0x8000
+#define HSHK_CFG_RATE 0x0F00
+#define HSHK_CFG_OFFSET 0x001F
+
+/*
+ * LRAM RISC Queue Lists (LRAM addresses 0x1200 - 0x19FF)
+ *
+ * Each of the 255 Adv Library/Microcode RISC queue lists or mailboxes
+ * starting at LRAM address 0x1200 is 8 bytes and has the following
+ * structure. Only 253 of these are actually used for command queues.
+ */
+
+#define ASC_MC_RISC_Q_LIST_BASE 0x1200
+#define ASC_MC_RISC_Q_LIST_SIZE 0x0008
+#define ASC_MC_RISC_Q_TOTAL_CNT 0x00FF /* Num. queue slots in LRAM. */
+#define ASC_MC_RISC_Q_FIRST 0x0001
+#define ASC_MC_RISC_Q_LAST 0x00FF
+
+#define ASC_DEF_MAX_HOST_QNG 0xFD /* Max. number of host commands (253) */
+#define ASC_DEF_MIN_HOST_QNG 0x10 /* Min. number of host commands (16) */
+#define ASC_DEF_MAX_DVC_QNG 0x3F /* Max. number commands per device (63) */
+#define ASC_DEF_MIN_DVC_QNG 0x04 /* Min. number commands per device (4) */
+
+/* RISC Queue List structure - 8 bytes */
+#define RQL_FWD 0 /* forward pointer (1 byte) */
+#define RQL_BWD 1 /* backward pointer (1 byte) */
+#define RQL_STATE 2 /* state byte - free, ready, done, aborted (1 byte) */
+#define RQL_TID 3 /* request target id (1 byte) */
+#define RQL_PHYADDR 4 /* request physical pointer (4 bytes) */
+
+/* RISC Queue List state values */
+#define ASC_MC_QS_FREE 0x00
+#define ASC_MC_QS_READY 0x01
+#define ASC_MC_QS_DONE 0x40
+#define ASC_MC_QS_ABORTED 0x80
+
+/* RISC Queue List pointer values */
+#define ASC_MC_NULL_Q 0x00 /* NULL_Q == 0 */
+#define ASC_MC_BIOS_Q 0xFF /* BIOS_Q = 255 */
+
+/* ASC_SCSI_REQ_Q 'cntl' field values */
+#define ASC_MC_QC_START_MOTOR 0x02 /* Issue start motor. */
+#define ASC_MC_QC_NO_OVERRUN 0x04 /* Don't report overrun. */
+#define ASC_MC_QC_FIRST_DMA 0x08 /* Internal microcode flag. */
+#define ASC_MC_QC_ABORTED 0x10 /* Request aborted by host. */
+#define ASC_MC_QC_REQ_SENSE 0x20 /* Auto-Request Sense. */
+#define ASC_MC_QC_DOS_REQ 0x80 /* Request issued by DOS. */
+
+
+/*
+ * ASC_SCSI_REQ_Q 'a_flag' definitions
+ *
+ * The Adv Library should limit use to the lower nibble (4 bits) of
+ * a_flag. Drivers are free to use the upper nibble (4 bits) of a_flag.
+ */
+#define ADV_POLL_REQUEST 0x01 /* poll for request completion */
+#define ADV_SCSIQ_DONE 0x02 /* request done */
+
+/*
+ * Adapter temporary configuration structure
+ *
+ * This structure can be discarded after initialization. Don't add
+ * fields here needed after initialization.
+ *
+ * Field naming convention:
+ *
+ * *_enable indicates the field enables or disables a feature. The
+ * value of the field is never reset.
+ */
+typedef struct adv_dvc_cfg {
+ ushort disc_enable; /* enable disconnection */
+ uchar chip_version; /* chip version */
+ uchar termination; /* Term. Ctrl. bits 6-5 of SCSI_CFG1 register */
+ ushort pci_device_id; /* PCI device code number */
+ ushort lib_version; /* Adv Library version number */
+ ushort control_flag; /* Microcode Control Flag */
+ ushort mcode_date; /* Microcode date */
+ ushort mcode_version; /* Microcode version */
+ ushort pci_slot_info; /* high byte device/function number */
+ /* bits 7-3 device num., bits 2-0 function num. */
+ /* low byte bus num. */
+ ushort bios_boot_wait; /* BIOS boot time delay */
+ ushort serial1; /* EEPROM serial number word 1 */
+ ushort serial2; /* EEPROM serial number word 2 */
+ ushort serial3; /* EEPROM serial number word 3 */
+} ADV_DVC_CFG;
+
+/*
+ * Adapter operation variable structure.
+ *
+ * One structure is required per host adapter.
+ *
+ * Field naming convention:
+ *
+ * *_able indicates both whether a feature should be enabled or disabled
+ * and whether a device isi capable of the feature. At initialization
+ * this field may be set, but later if a device is found to be incapable
+ * of the feature, the field is cleared.
+ */
+typedef struct adv_dvc_var {
+ AdvPortAddr iop_base; /* I/O port address */
+ ushort err_code; /* fatal error code */
+ ushort bios_ctrl; /* BIOS control word, EEPROM word 12 */
+ Ptr2Func isr_callback; /* pointer to function, called in AdvISR() */
+ Ptr2Func sbreset_callback; /* pointer to function, called in AdvISR() */
+ ushort wdtr_able; /* try WDTR for a device */
+ ushort sdtr_able; /* try SDTR for a device */
+ ushort ultra_able; /* try SDTR Ultra speed for a device */
+ ushort tagqng_able; /* try tagged queuing with a device */
+ uchar max_dvc_qng; /* maximum number of tagged commands per device */
+ ushort start_motor; /* start motor command allowed */
+ uchar scsi_reset_wait; /* delay in seconds after scsi bus reset */
+ uchar chip_no; /* should be assigned by caller */
+ uchar max_host_qng; /* maximum number of Q'ed command allowed */
+ uchar cur_host_qng; /* total number of queue command */
+ uchar irq_no; /* IRQ number */
+ ushort no_scam; /* scam_tolerant of EEPROM */
+ ushort idle_cmd_done; /* microcode idle command done set by AdvISR() */
+ ulong drv_ptr; /* driver pointer to private structure */
+ uchar chip_scsi_id; /* chip SCSI target ID */
+ /*
+ * Note: The following fields will not be used after initialization. The
+ * driver may discard the buffer after initialization is done.
+ */
+ ADV_DVC_CFG *cfg; /* temporary configuration structure */
+} ADV_DVC_VAR;
+
+#define NO_OF_SG_PER_BLOCK 15
+
+typedef struct asc_sg_block {
+ uchar reserved1;
+ uchar reserved2;
+ uchar first_entry_no; /* starting entry number */
+ uchar last_entry_no; /* last entry number */
+ struct asc_sg_block *sg_ptr; /* links to the next sg block */
+ struct {
+ ulong sg_addr; /* SG element address */
+ ulong sg_count; /* SG element count */
+ } sg_list[NO_OF_SG_PER_BLOCK];
+} ADV_SG_BLOCK;
+
+/*
+ * ASC_SCSI_REQ_Q - microcode request structure
+ *
+ * All fields in this structure up to byte 60 are used by the microcode.
+ * The microcode makes assumptions about the size and ordering of fields
+ * in this structure. Do not change the structure definition here without
+ * coordinating the change with the microcode.
+ */
+typedef struct adv_scsi_req_q {
+ uchar cntl; /* Ucode flags and state (ASC_MC_QC_*). */
+ uchar sg_entry_cnt; /* SG element count. Zero for no SG. */
+ uchar target_id; /* Device target identifier. */
+ uchar target_lun; /* Device target logical unit number. */
+ ulong data_addr; /* Data buffer physical address. */
+ ulong data_cnt; /* Data count. Ucode sets to residual. */
+ ulong sense_addr; /* Sense buffer physical address. */
+ ulong srb_ptr; /* Driver request pointer. */
+ uchar a_flag; /* Adv Library flag field. */
+ uchar sense_len; /* Auto-sense length. Ucode sets to residual. */
+ uchar cdb_len; /* SCSI CDB length. */
+ uchar tag_code; /* SCSI-2 Tag Queue Code: 00, 20-22. */
+ uchar done_status; /* Completion status. */
+ uchar scsi_status; /* SCSI status byte. */
+ uchar host_status; /* Ucode host status. */
+ uchar ux_sg_ix; /* Ucode working SG variable. */
+ uchar cdb[12]; /* SCSI command block. */
+ ulong sg_real_addr; /* SG list physical address. */
+ struct adv_scsi_req_q *free_scsiq_link;
+ ulong ux_wk_data_cnt; /* Saved data count at disconnection. */
+ struct adv_scsi_req_q *scsiq_ptr;
+ ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */
+ /*
+ * End of microcode structure - 60 bytes. The rest of the structure
+ * is used by the Adv Library and ignored by the microcode.
+ */
+ ulong vsense_addr; /* Sense buffer virtual address. */
+ ulong vdata_addr; /* Data buffer virtual address. */
+ uchar orig_sense_len; /* Original length of sense buffer. */
+} ADV_SCSI_REQ_Q; /* BIOS - 70 bytes, DOS - 76 bytes, W95, WNT - 69 bytes */
+
+/*
+ * Microcode idle loop commands
+ */
+#define IDLE_CMD_COMPLETED 0
+#define IDLE_CMD_STOP_CHIP 0x0001
+#define IDLE_CMD_STOP_CHIP_SEND_INT 0x0002
+#define IDLE_CMD_SEND_INT 0x0004
+#define IDLE_CMD_ABORT 0x0008
+#define IDLE_CMD_DEVICE_RESET 0x0010
+#define IDLE_CMD_SCSI_RESET 0x0020
+
+/*
+ * AdvSendIdleCmd() flag definitions.
+ */
+#define ADV_NOWAIT 0x01
+
+/*
+ * Wait loop time out values.
+ */
+#define SCSI_WAIT_10_SEC 10 /* 10 seconds */
+#define SCSI_MS_PER_SEC 1000 /* milliseconds per second */
+
+/*
+ * Device drivers must define the following functions.
+ */
+STATIC long DvcEnterCritical(void);
+STATIC void DvcLeaveCritical(long);
+STATIC void DvcSleepMilliSecond(ulong);
+STATIC uchar DvcAdvReadPCIConfigByte(ADV_DVC_VAR *, ushort);
+STATIC void DvcAdvWritePCIConfigByte(ADV_DVC_VAR *, ushort, uchar);
+STATIC ulong DvcGetPhyAddr(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *,
+ uchar *, long *, int);
+STATIC void DvcDelayMicroSecond(ADV_DVC_VAR *, ushort);
+
+/*
+ * Adv Library functions available to drivers.
+ */
+STATIC int AdvExeScsiQueue(ADV_DVC_VAR *,
+ ADV_SCSI_REQ_Q *);
+STATIC int AdvISR(ADV_DVC_VAR *);
+STATIC int AdvInitGetConfig(ADV_DVC_VAR *);
+STATIC int AdvInitAsc3550Driver(ADV_DVC_VAR *);
+STATIC int AdvResetSB(ADV_DVC_VAR *);
+
+/*
+ * Internal Adv Library functions.
+ */
+STATIC int AdvSendIdleCmd(ADV_DVC_VAR *, ushort, ulong, int);
+STATIC void AdvResetChip(ADV_DVC_VAR *);
+STATIC int AdvSendScsiCmd(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *);
+STATIC void AdvInquiryHandling(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *);
+STATIC int AdvInitFromEEP(ADV_DVC_VAR *);
+STATIC ushort AdvGetEEPConfig(AdvPortAddr, ADVEEP_CONFIG *);
+STATIC void AdvSetEEPConfig(AdvPortAddr, ADVEEP_CONFIG *);
+STATIC void AdvWaitEEPCmd(AdvPortAddr);
+STATIC ushort AdvReadEEPWord(AdvPortAddr, int);
+STATIC void AdvResetSCSIBus(ADV_DVC_VAR *);
+
+/*
+ * PCI Bus Definitions
+ */
+#define AscPCICmdRegBits_BusMastering 0x0007
+#define AscPCICmdRegBits_ParErrRespCtrl 0x0040
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+
+/* Read byte from a register. */
+#define AdvReadByteRegister(iop_base, reg_off) \
+ (inp((iop_base) + (reg_off)))
+
+/* Write byte to a register. */
+#define AdvWriteByteRegister(iop_base, reg_off, byte) \
+ (outp((iop_base) + (reg_off), (byte)))
+
+/* Read word (2 bytes) from a register. */
+#define AdvReadWordRegister(iop_base, reg_off) \
+ (inpw((iop_base) + (reg_off)))
+
+/* Write word (2 bytes) to a register. */
+#define AdvWriteWordRegister(iop_base, reg_off, word) \
+ (outpw((iop_base) + (reg_off), (word)))
+
+/* Read byte from LRAM. */
+#define AdvReadByteLram(iop_base, addr, byte) \
+do { \
+ outpw((iop_base) + IOPW_RAM_ADDR, (addr)); \
+ (byte) = inp((iop_base) + IOPB_RAM_DATA); \
+} while (0)
+
+/* Write byte to LRAM. */
+#define AdvWriteByteLram(iop_base, addr, byte) \
+ (outpw((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ outp((iop_base) + IOPB_RAM_DATA, (byte)))
+
+/* Read word (2 bytes) from LRAM. */
+#define AdvReadWordLram(iop_base, addr, word) \
+do { \
+ outpw((iop_base) + IOPW_RAM_ADDR, (addr)); \
+ (word) = inpw((iop_base) + IOPW_RAM_DATA); \
+} while (0)
+
+/* Write word (2 bytes) to LRAM. */
+#define AdvWriteWordLram(iop_base, addr, word) \
+ (outpw((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ outpw((iop_base) + IOPW_RAM_DATA, (word)))
+
+/* Write double word (4 bytes) to LRAM */
+/* Because of unspecified C language ordering don't use auto-increment. */
+#define AdvWriteDWordLram(iop_base, addr, dword) \
+ ((outpw((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ outpw((iop_base) + IOPW_RAM_DATA, (ushort) ((dword) & 0xFFFF))), \
+ (outpw((iop_base) + IOPW_RAM_ADDR, (addr) + 2), \
+ outpw((iop_base) + IOPW_RAM_DATA, (ushort) ((dword >> 16) & 0xFFFF))))
+
+/* Read word (2 bytes) from LRAM assuming that the address is already set. */
+#define AdvReadWordAutoIncLram(iop_base) \
+ (inpw((iop_base) + IOPW_RAM_DATA))
+
+/* Write word (2 bytes) to LRAM assuming that the address is already set. */
+#define AdvWriteWordAutoIncLram(iop_base, word) \
+ (outpw((iop_base) + IOPW_RAM_DATA, (word)))
+
+#else /* version >= v1,3,0 */
+
+/* Read byte from a register. */
+#define AdvReadByteRegister(iop_base, reg_off) \
+ (ADV_MEM_READB((iop_base) + (reg_off)))
+
+/* Write byte to a register. */
+#define AdvWriteByteRegister(iop_base, reg_off, byte) \
+ (ADV_MEM_WRITEB((iop_base) + (reg_off), (byte)))
+
+/* Read word (2 bytes) from a register. */
+#define AdvReadWordRegister(iop_base, reg_off) \
+ (ADV_MEM_READW((iop_base) + (reg_off)))
+
+/* Write word (2 bytes) to a register. */
+#define AdvWriteWordRegister(iop_base, reg_off, word) \
+ (ADV_MEM_WRITEW((iop_base) + (reg_off), (word)))
+
+/* Read byte from LRAM. */
+#define AdvReadByteLram(iop_base, addr, byte) \
+do { \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \
+ (byte) = ADV_MEM_READB((iop_base) + IOPB_RAM_DATA); \
+} while (0)
+
+/* Write byte to LRAM. */
+#define AdvWriteByteLram(iop_base, addr, byte) \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ ADV_MEM_WRITEB((iop_base) + IOPB_RAM_DATA, (byte)))
+
+/* Read word (2 bytes) from LRAM. */
+#define AdvReadWordLram(iop_base, addr, word) \
+do { \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \
+ (word) = ADV_MEM_READW((iop_base) + IOPW_RAM_DATA); \
+} while (0)
+
+/* Write word (2 bytes) to LRAM. */
+#define AdvWriteWordLram(iop_base, addr, word) \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word)))
+
+/* Write double word (4 bytes) to LRAM */
+/* Because of unspecified C language ordering don't use auto-increment. */
+#define AdvWriteDWordLram(iop_base, addr, dword) \
+ ((ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \
+ (ushort) ((dword) & 0xFFFF))), \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr) + 2), \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \
+ (ushort) ((dword >> 16) & 0xFFFF))))
+
+/* Read word (2 bytes) from LRAM assuming that the address is already set. */
+#define AdvReadWordAutoIncLram(iop_base) \
+ (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA))
+
+/* Write word (2 bytes) to LRAM assuming that the address is already set. */
+#define AdvWriteWordAutoIncLram(iop_base, word) \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word)))
+
+#endif /* version >= v1,3,0 */
+
+/*
+ * Define macro to check for Condor signature.
+ *
+ * Evaluate to ADV_TRUE if a Condor chip is found the specified port
+ * address 'iop_base'. Otherwise evalue to ADV_FALSE.
+ */
+#define AdvFindSignature(iop_base) \
+ (((AdvReadByteRegister((iop_base), IOPB_CHIP_ID_1) == \
+ ADV_CHIP_ID_BYTE) && \
+ (AdvReadWordRegister((iop_base), IOPW_CHIP_ID_0) == \
+ ADV_CHIP_ID_WORD)) ? ADV_TRUE : ADV_FALSE)
+
+/*
+ * Define macro to Return the version number of the chip at 'iop_base'.
+ *
+ * The second parameter 'bus_type' is currently unused.
+ */
+#define AdvGetChipVersion(iop_base, bus_type) \
+ AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV)
+
+/*
+ * Abort an SRB in the chip's RISC Memory. The 'srb_ptr' argument must
+ * match the ASC_SCSI_REQ_Q 'srb_ptr' field.
+ *
+ * If the request has not yet been sent to the device it will simply be
+ * aborted from RISC memory. If the request is disconnected it will be
+ * aborted on reselection by sending an Abort Message to the target ID.
+ *
+ * Return value:
+ * ADV_TRUE(1) - Queue was successfully aborted.
+ * ADV_FALSE(0) - Queue was not found on the active queue list.
+ */
+#define AdvAbortSRB(asc_dvc, srb_ptr) \
+ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \
+ (ulong) (srb_ptr), 0)
+
+/*
+ * Send a Bus Device Reset Message to the specified target ID.
+ *
+ * All outstanding commands will be purged if sending the
+ * Bus Device Reset Message is successful.
+ *
+ * Return Value:
+ * ADV_TRUE(1) - All requests on the target are purged.
+ * ADV_FALSE(0) - Couldn't issue Bus Device Reset Message; Requests
+ * are not purged.
+ */
+#define AdvResetDevice(asc_dvc, target_id) \
+ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \
+ (ulong) (target_id), 0)
+
+/*
+ * SCSI Wide Type definition.
+ */
+#define ADV_SCSI_BIT_ID_TYPE ushort
+
+/*
+ * AdvInitScsiTarget() 'cntl_flag' options.
+ */
+#define ADV_SCAN_LUN 0x01
+#define ADV_CAPINFO_NOLUN 0x02
+
+/*
+ * Convert target id to target id bit mask.
+ */
+#define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID))
+
+/*
+ * ASC_SCSI_REQ_Q 'done_status' and 'host_status' return values.
+ */
+
+#define QD_NO_STATUS 0x00 /* Request not completed yet. */
+#define QD_NO_ERROR 0x01
+#define QD_ABORTED_BY_HOST 0x02
+#define QD_WITH_ERROR 0x04
+
+#define QHSTA_NO_ERROR 0x00
+#define QHSTA_M_SEL_TIMEOUT 0x11
+#define QHSTA_M_DATA_OVER_RUN 0x12
+#define QHSTA_M_UNEXPECTED_BUS_FREE 0x13
+#define QHSTA_M_QUEUE_ABORTED 0x15
+#define QHSTA_M_SXFR_SDMA_ERR 0x16 /* SXFR_STATUS SCSI DMA Error */
+#define QHSTA_M_SXFR_SXFR_PERR 0x17 /* SXFR_STATUS SCSI Bus Parity Error */
+#define QHSTA_M_RDMA_PERR 0x18 /* RISC PCI DMA parity error */
+#define QHSTA_M_SXFR_OFF_UFLW 0x19 /* SXFR_STATUS Offset Underflow */
+#define QHSTA_M_SXFR_OFF_OFLW 0x20 /* SXFR_STATUS Offset Overflow */
+#define QHSTA_M_SXFR_WD_TMO 0x21 /* SXFR_STATUS Watchdog Timeout */
+#define QHSTA_M_SXFR_DESELECTED 0x22 /* SXFR_STATUS Deselected */
+/* Note: QHSTA_M_SXFR_XFR_OFLW is identical to QHSTA_M_DATA_OVER_RUN. */
+#define QHSTA_M_SXFR_XFR_OFLW 0x12 /* SXFR_STATUS Transfer Overflow */
+#define QHSTA_M_SXFR_XFR_PH_ERR 0x24 /* SXFR_STATUS Transfer Phase Error */
+#define QHSTA_M_SXFR_UNKNOWN_ERROR 0x25 /* SXFR_STATUS Unknown Error */
+#define QHSTA_M_WTM_TIMEOUT 0x41
+#define QHSTA_M_BAD_CMPL_STATUS_IN 0x42
+#define QHSTA_M_NO_AUTO_REQ_SENSE 0x43
+#define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44
+#define QHSTA_M_INVALID_DEVICE 0x45 /* Bad target ID */
+
+typedef int (* ADV_ISR_CALLBACK)
+ (ADV_DVC_VAR *, ADV_SCSI_REQ_Q *);
+
+typedef int (* ADV_SBRESET_CALLBACK)
+ (ADV_DVC_VAR *);
+
+/*
+ * Default EEPROM Configuration structure defined in a_init.c.
+ */
+STATIC ADVEEP_CONFIG Default_EEPROM_Config;
+
+/*
+ * DvcGetPhyAddr() flag arguments
+ */
+#define ADV_IS_SCSIQ_FLAG 0x01 /* 'addr' is ASC_SCSI_REQ_Q pointer */
+#define ADV_ASCGETSGLIST_VADDR 0x02 /* 'addr' is AscGetSGList() virtual addr */
+#define ADV_IS_SENSE_FLAG 0x04 /* 'addr' is sense virtual pointer */
+#define ADV_IS_DATA_FLAG 0x08 /* 'addr' is data virtual pointer */
+#define ADV_IS_SGLIST_FLAG 0x10 /* 'addr' is sglist virtual pointer */
+
+/* Return the address that is aligned at the next doubleword >= to 'addr'. */
+#define ADV_DWALIGN(addr) (((ulong) (addr) + 0x3) & ~0x3)
+
+/*
+ * Total contiguous memory needed for driver SG blocks.
+ *
+ * ADV_MAX_SG_LIST must be defined by a driver. It is the maximum
+ * number of scatter-gather elements the driver supports in a
+ * single request.
+ */
+
+#ifndef ADV_MAX_SG_LIST
+Forced Error: Driver must define ADV_MAX_SG_LIST.
+#endif /* ADV_MAX_SG_LIST */
+
+#define ADV_SG_LIST_MAX_BYTE_SIZE \
+ (sizeof(ADV_SG_BLOCK) * \
+ ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK))
+
+/*
+ * A driver may optionally define the assertion macro ADV_ASSERT() in
+ * its d_os_dep.h file. If the macro has not already been defined,
+ * then define the macro to a no-op.
+ */
+#ifndef ADV_ASSERT
+#define ADV_ASSERT(a)
+#endif /* ADV_ASSERT */
+
+
+/*
+ * --- Driver Constants and Macros
+ */
+
+#define ASC_NUM_BOARD_SUPPORTED 16
+#define ASC_NUM_IOPORT_PROBE 4
+#define ASC_NUM_BUS 4
+
+/* Reference Scsi_Host hostdata */
+#define ASC_BOARDP(host) ((asc_board_t *) &((host)->hostdata))
+
+/* asc_board_t flags */
+#define ASC_HOST_IN_RESET 0x01
+#define ASC_HOST_IN_ABORT 0x02
+#define ASC_IS_WIDE_BOARD 0x04 /* AdvanSys Wide Board */
+#define ASC_SELECT_QUEUE_DEPTHS 0x08
+
+#define ASC_NARROW_BOARD(boardp) (((boardp)->flags & ASC_IS_WIDE_BOARD) == 0)
+#define ASC_WIDE_BOARD(boardp) ((boardp)->flags & ASC_IS_WIDE_BOARD)
+
+#define NO_ISA_DMA 0xff /* No ISA DMA Channel Used */
+
+/*
+ * If the Linux kernel version supports freeing initialization code
+ * and data after loading, define macros for this purpose. These macros
+ * are not used when the driver is built as a module, cf. linux/init.h.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,23)
+#define ASC_INITFUNC(func) func
+#define ASC_INITDATA
+#define ASC_INIT
+#else /* version >= v2.1.23 */
+#define ASC_INITFUNC(func) __initfunc(func)
+#define ASC_INITDATA __initdata
+#define ASC_INIT __init
+#endif /* version >= v2.1.23 */
+
+#define ASC_INFO_SIZE 128 /* advansys_info() line size */
+
+/* /proc/scsi/advansys/[0...] related definitions */
+#define ASC_PRTBUF_SIZE 2048
+#define ASC_PRTLINE_SIZE 160
+
+#define ASC_PRT_NEXT() \
+ if (cp) { \
+ totlen += len; \
+ leftlen -= len; \
+ if (leftlen == 0) { \
+ return totlen; \
+ } \
+ cp += len; \
+ }
+
+#define ASC_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+/* Asc Library return codes */
+#define ASC_TRUE 1
+#define ASC_FALSE 0
+#define ASC_NOERROR 1
+#define ASC_BUSY 0
+#define ASC_ERROR (-1)
+
+/* Scsi_Cmnd function return codes */
+#define STATUS_BYTE(byte) (byte)
+#define MSG_BYTE(byte) ((byte) << 8)
+#define HOST_BYTE(byte) ((byte) << 16)
+#define DRIVER_BYTE(byte) ((byte) << 24)
+
+/*
+ * The following definitions and macros are OS independent interfaces to
+ * the queue functions:
+ * REQ - SCSI request structure
+ * REQP - pointer to SCSI request structure
+ * REQPTID(reqp) - reqp's target id
+ * REQPNEXT(reqp) - reqp's next pointer
+ * REQPNEXTP(reqp) - pointer to reqp's next pointer
+ * REQPTIME(reqp) - reqp's time stamp value
+ * REQTIMESTAMP() - system time stamp value
+ */
+typedef Scsi_Cmnd REQ, *REQP;
+#define REQPNEXT(reqp) ((reqp)->host_scribble)
+#define REQPNEXTP(reqp) ((REQP *) &((reqp)->host_scribble))
+#define REQPTID(reqp) ((reqp)->target)
+#define REQPTIME(reqp) ((reqp)->SCp.this_residual)
+#define REQTIMESTAMP() (jiffies)
+
+#define REQTIMESTAT(function, ascq, reqp, tid) \
+{ \
+ /*
+ * If the request time stamp is less than the system time stamp, then \
+ * maybe the system time stamp wrapped. Set the request time to zero.\
+ */ \
+ if (REQPTIME(reqp) <= REQTIMESTAMP()) { \
+ REQPTIME(reqp) = REQTIMESTAMP() - REQPTIME(reqp); \
+ } else { \
+ /* Indicate an error occurred with the assertion. */ \
+ ASC_ASSERT(REQPTIME(reqp) <= REQTIMESTAMP()); \
+ REQPTIME(reqp) = 0; \
+ } \
+ /* Handle first minimum time case without external initialization. */ \
+ if (((ascq)->q_tot_cnt[tid] == 1) || \
+ (REQPTIME(reqp) < (ascq)->q_min_tim[tid])) { \
+ (ascq)->q_min_tim[tid] = REQPTIME(reqp); \
+ ASC_DBG3(1, "%s: new q_min_tim[%d] %u\n", \
+ (function), (tid), (ascq)->q_min_tim[tid]); \
+ } \
+ if (REQPTIME(reqp) > (ascq)->q_max_tim[tid]) { \
+ (ascq)->q_max_tim[tid] = REQPTIME(reqp); \
+ ASC_DBG3(1, "%s: new q_max_tim[%d] %u\n", \
+ (function), tid, (ascq)->q_max_tim[tid]); \
+ } \
+ (ascq)->q_tot_tim[tid] += REQPTIME(reqp); \
+ /* Reset the time stamp field. */ \
+ REQPTIME(reqp) = 0; \
+}
+
+/* asc_enqueue() flags */
+#define ASC_FRONT 1
+#define ASC_BACK 2
+
+/* asc_dequeue_list() argument */
+#define ASC_TID_ALL (-1)
+
+/* Return non-zero, if the queue is empty. */
+#define ASC_QUEUE_EMPTY(ascq) ((ascq)->q_tidmask == 0)
+
+/* PCI configuration declarations */
+
+#define PCI_BASE_CLASS_PREDEFINED 0x00
+#define PCI_BASE_CLASS_MASS_STORAGE 0x01
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_BASE_CLASS_MEMORY_CONTROLLER 0x05
+#define PCI_BASE_CLASS_BRIDGE_DEVICE 0x06
+
+/* MASS STORAGE */
+#define PCI_SUB_CLASS_SCSI_CONTROLLER 0x00
+#define PCI_SUB_CLASS_IDE_CONTROLLER 0x01
+#define PCI_SUB_CLASS_FLOPPY_DISK_CONTROLLER 0x02
+#define PCI_SUB_CLASS_IPI_BUS_CONTROLLER 0x03
+#define PCI_SUB_CLASS_OTHER_MASS_CONTROLLER 0x80
+
+/* NETWORK CONTROLLER */
+#define PCI_SUB_CLASS_ETHERNET_CONTROLLER 0x00
+#define PCI_SUB_CLASS_TOKEN_RING_CONTROLLER 0x01
+#define PCI_SUB_CLASS_FDDI_CONTROLLER 0x02
+#define PCI_SUB_CLASS_OTHER_NETWORK_CONTROLLER 0x80
+
+/* DISPLAY CONTROLLER */
+#define PCI_SUB_CLASS_VGA_CONTROLLER 0x00
+#define PCI_SUB_CLASS_XGA_CONTROLLER 0x01
+#define PCI_SUB_CLASS_OTHER_DISPLAY_CONTROLLER 0x80
+
+/* MULTIMEDIA CONTROLLER */
+#define PCI_SUB_CLASS_VIDEO_DEVICE 0x00
+#define PCI_SUB_CLASS_AUDIO_DEVICE 0x01
+#define PCI_SUB_CLASS_OTHER_MULTIMEDIA_DEVICE 0x80
+
+/* MEMORY CONTROLLER */
+#define PCI_SUB_CLASS_RAM_CONTROLLER 0x00
+#define PCI_SUB_CLASS_FLASH_CONTROLLER 0x01
+#define PCI_SUB_CLASS_OTHER_MEMORY_CONTROLLER 0x80
+
+/* BRIDGE CONTROLLER */
+#define PCI_SUB_CLASS_HOST_BRIDGE_CONTROLLER 0x00
+#define PCI_SUB_CLASS_ISA_BRIDGE_CONTROLLER 0x01
+#define PCI_SUB_CLASS_EISA_BRIDGE_CONTROLLER 0x02
+#define PCI_SUB_CLASS_MC_BRIDGE_CONTROLLER 0x03
+#define PCI_SUB_CLASS_PCI_TO_PCI_BRIDGE_CONTROLLER 0x04
+#define PCI_SUB_CLASS_PCMCIA_BRIDGE_CONTROLLER 0x05
+#define PCI_SUB_CLASS_OTHER_BRIDGE_CONTROLLER 0x80
+
+#define PCI_MAX_SLOT 0x1F
+#define PCI_MAX_BUS 0xFF
+#define PCI_IOADDRESS_MASK 0xFFFE
+#define ASC_PCI_VENDORID 0x10CD
+#define ASC_PCI_DEVICE_ID_CNT 4 /* PCI Device ID count. */
+#define ASC_PCI_DEVICE_ID_1100 0x1100
+#define ASC_PCI_DEVICE_ID_1200 0x1200
+#define ASC_PCI_DEVICE_ID_1300 0x1300
+#define ASC_PCI_DEVICE_ID_2300 0x2300
+
+/* PCI IO Port Addresses to generate special cycle */
+
+#define PCI_CONFIG_ADDRESS_MECH1 0x0CF8
+#define PCI_CONFIG_DATA_MECH1 0x0CFC
+
+#define PCI_CONFIG_FORWARD_REGISTER 0x0CFA /* 0=type 0; 1=type 1; */
+
+#define PCI_CONFIG_BUS_NUMBER_MASK 0x00FF0000
+#define PCI_CONFIG_DEVICE_FUNCTION_MASK 0x0000FF00
+#define PCI_CONFIG_REGISTER_NUMBER_MASK 0x000000F8
+
+#define PCI_DEVICE_FOUND 0x0000
+#define PCI_DEVICE_NOT_FOUND 0xffff
+
+#define SUBCLASS_OFFSET 0x0A
+#define CLASSCODE_OFFSET 0x0B
+#define VENDORID_OFFSET 0x00
+#define DEVICEID_OFFSET 0x02
+
+#ifndef ADVANSYS_STATS
+#define ASC_STATS(shp, counter)
+#define ASC_STATS_ADD(shp, counter, count)
+#else /* ADVANSYS_STATS */
+#define ASC_STATS(shp, counter) \
+ (ASC_BOARDP(shp)->asc_stats.counter++)
+
+#define ASC_STATS_ADD(shp, counter, count) \
+ (ASC_BOARDP(shp)->asc_stats.counter += (count))
+#endif /* ADVANSYS_STATS */
+
+#define ASC_CEILING(val, unit) (((val) + ((unit) - 1))/(unit))
+
+/* If the result wraps when calculating tenths, return 0. */
+#define ASC_TENTHS(num, den) \
+ (((10 * ((num)/(den))) > (((num) * 10)/(den))) ? \
+ 0 : ((((num) * 10)/(den)) - (10 * ((num)/(den)))))
+
+/*
+ * Display a message to the console.
+ */
+#define ASC_PRINT(s) \
+ { \
+ printk("advansys: "); \
+ printk(s); \
+ }
+
+#define ASC_PRINT1(s, a1) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1)); \
+ }
+
+#define ASC_PRINT2(s, a1, a2) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1), (a2)); \
+ }
+
+#define ASC_PRINT3(s, a1, a2, a3) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1), (a2), (a3)); \
+ }
+
+#define ASC_PRINT4(s, a1, a2, a3, a4) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1), (a2), (a3), (a4)); \
+ }
+
+
+#ifndef ADVANSYS_DEBUG
+
+#define ASC_DBG(lvl, s)
+#define ASC_DBG1(lvl, s, a1)
+#define ASC_DBG2(lvl, s, a1, a2)
+#define ASC_DBG3(lvl, s, a1, a2, a3)
+#define ASC_DBG4(lvl, s, a1, a2, a3, a4)
+#define ASC_DBG_PRT_SCSI_HOST(lvl, s)
+#define ASC_DBG_PRT_SCSI_CMND(lvl, s)
+#define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp)
+#define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp)
+#define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone)
+#define ADV_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp)
+#define ASC_DBG_PRT_HEX(lvl, name, start, length)
+#define ASC_DBG_PRT_CDB(lvl, cdb, len)
+#define ASC_DBG_PRT_SENSE(lvl, sense, len)
+#define ASC_DBG_PRT_INQUIRY(lvl, inq, len)
+
+#else /* ADVANSYS_DEBUG */
+
+/*
+ * Debugging Message Levels:
+ * 0: Errors Only
+ * 1: High-Level Tracing
+ * 2-N: Verbose Tracing
+ */
+
+#define ASC_DBG(lvl, s) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk(s); \
+ } \
+ }
+
+#define ASC_DBG1(lvl, s, a1) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1)); \
+ } \
+ }
+
+#define ASC_DBG2(lvl, s, a1, a2) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1), (a2)); \
+ } \
+ }
+
+#define ASC_DBG3(lvl, s, a1, a2, a3) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1), (a2), (a3)); \
+ } \
+ }
+
+#define ASC_DBG4(lvl, s, a1, a2, a3, a4) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1), (a2), (a3), (a4)); \
+ } \
+ }
+
+#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_scsi_host(s); \
+ } \
+ }
+
+#define ASC_DBG_PRT_SCSI_CMND(lvl, s) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_scsi_cmnd(s); \
+ } \
+ }
+
+#define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_asc_scsi_q(scsiqp); \
+ } \
+ }
+
+#define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_asc_qdone_info(qdone); \
+ } \
+ }
+
+#define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_adv_scsi_req_q(scsiqp); \
+ } \
+ }
+
+#define ASC_DBG_PRT_HEX(lvl, name, start, length) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_hex((name), (start), (length)); \
+ } \
+ }
+
+#define ASC_DBG_PRT_CDB(lvl, cdb, len) \
+ ASC_DBG_PRT_HEX((lvl), "CDB", (uchar *) (cdb), (len));
+
+#define ASC_DBG_PRT_SENSE(lvl, sense, len) \
+ ASC_DBG_PRT_HEX((lvl), "SENSE", (uchar *) (sense), (len));
+
+#define ASC_DBG_PRT_INQUIRY(lvl, inq, len) \
+ ASC_DBG_PRT_HEX((lvl), "INQUIRY", (uchar *) (inq), (len));
+#endif /* ADVANSYS_DEBUG */
+
+#ifndef ADVANSYS_ASSERT
+#define ASC_ASSERT(a)
+#else /* ADVANSYS_ASSERT */
+
+#define ASC_ASSERT(a) \
+ { \
+ if (!(a)) { \
+ printk("ASC_ASSERT() Failure: file %s, line %d\n", \
+ __FILE__, __LINE__); \
+ } \
+ }
+
+#endif /* ADVANSYS_ASSERT */
+
+
+/*
+ * --- Driver Structures
+ */
+
+#ifdef ADVANSYS_STATS
+
+/* Per board statistics structure */
+struct asc_stats {
+ /* Driver Entrypoint Statistics */
+ ulong command; /* # calls to advansys_command() */
+ ulong queuecommand; /* # calls to advansys_queuecommand() */
+ ulong abort; /* # calls to advansys_abort() */
+ ulong reset; /* # calls to advansys_reset() */
+ ulong biosparam; /* # calls to advansys_biosparam() */
+ ulong interrupt; /* # advansys_interrupt() calls */
+ ulong callback; /* # calls to asc/adv_isr_callback() */
+ ulong done; /* # calls to request's scsi_done function */
+ ulong build_error; /* # asc/adv_build_req() ASC_ERROR returns. */
+ ulong adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */
+ ulong adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */
+ /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */
+ ulong exe_noerror; /* # ASC_NOERROR returns. */
+ ulong exe_busy; /* # ASC_BUSY returns. */
+ ulong exe_error; /* # ASC_ERROR returns. */
+ ulong exe_unknown; /* # unknown returns. */
+ /* Data Transfer Statistics */
+ ulong cont_cnt; /* # non-scatter-gather I/O requests received */
+ ulong cont_xfer; /* # contiguous transfer 512-bytes */
+ ulong sg_cnt; /* # scatter-gather I/O requests received */
+ ulong sg_elem; /* # scatter-gather elements */
+ ulong sg_xfer; /* # scatter-gather transfer 512-bytes */
+};
+#endif /* ADVANSYS_STATS */
+
+/*
+ * Request queuing structure
+ */
+typedef struct asc_queue {
+ ADV_SCSI_BIT_ID_TYPE q_tidmask; /* queue mask */
+ REQP q_first[ADV_MAX_TID+1]; /* first queued request */
+ REQP q_last[ADV_MAX_TID+1]; /* last queued request */
+#ifdef ADVANSYS_STATS
+ short q_cur_cnt[ADV_MAX_TID+1]; /* current queue count */
+ short q_max_cnt[ADV_MAX_TID+1]; /* maximum queue count */
+ ulong q_tot_cnt[ADV_MAX_TID+1]; /* total enqueue count */
+ ulong q_tot_tim[ADV_MAX_TID+1]; /* total time queued */
+ ushort q_max_tim[ADV_MAX_TID+1]; /* maximum time queued */
+ ushort q_min_tim[ADV_MAX_TID+1]; /* minimum time queued */
+#endif /* ADVANSYS_STATS */
+} asc_queue_t;
+
+/*
+ * Adv Library Request Structures
+ *
+ * The following two se structures are used to process Wide Board requests.
+ * One structure is needed for each command received from the Mid-Level SCSI
+ * driver.
+ *
+ * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library
+ * and microcode with the ADV_SCSI_REQ_Q field 'srb_ptr' pointing to the
+ * adv_req_t. The adv_req_t structure 'cmndp' field in turn points to the
+ * Mid-Level SCSI request structure.
+ *
+ * The adv_sgblk_t structure is used to handle requests that include
+ * scatter-gather elements.
+ */
+typedef struct adv_sgblk {
+ ADV_SG_BLOCK sg_block[ADV_NUM_SG_BLOCK + ADV_NUM_PAGE_CROSSING];
+ uchar align2[4]; /* Sgblock structure padding. */
+ struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */
+} adv_sgblk_t;
+
+typedef struct adv_req {
+ ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */
+ uchar align1[4]; /* Request structure padding. */
+ Scsi_Cmnd *cmndp; /* Mid-Level SCSI command pointer. */
+ adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */
+ struct adv_req *next_reqp; /* Next Request Structure. */
+} adv_req_t;
+
+/*
+ * Structure allocated for each board.
+ *
+ * This structure is allocated by scsi_register() at the end
+ * of the 'Scsi_Host' structure starting at the 'hostdata'
+ * field. It is guaranteed to be allocated from DMA-able memory.
+ */
+typedef struct asc_board {
+ int id; /* Board Id */
+ uint flags; /* Board flags */
+ union {
+ ASC_DVC_VAR asc_dvc_var; /* Narrow board */
+ ADV_DVC_VAR adv_dvc_var; /* Wide board */
+ } dvc_var;
+ union {
+ ASC_DVC_CFG asc_dvc_cfg; /* Narrow board */
+ ADV_DVC_CFG adv_dvc_cfg; /* Wide board */
+ } dvc_cfg;
+ asc_queue_t active; /* Active command queue */
+ asc_queue_t waiting; /* Waiting command queue */
+ asc_queue_t done; /* Done command queue */
+ ADV_SCSI_BIT_ID_TYPE init_tidmask; /* Target init./valid mask */
+ Scsi_Device *device[ADV_MAX_TID+1]; /* Mid-Level Scsi Device */
+ ushort reqcnt[ADV_MAX_TID+1]; /* Starvation request count */
+#if ASC_QUEUE_FLOW_CONTROL
+ ushort nerrcnt[ADV_MAX_TID+1]; /* No error request count */
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ ADV_SCSI_BIT_ID_TYPE queue_full; /* Queue full mask */
+ ushort queue_full_cnt[ADV_MAX_TID+1]; /* Queue full count */
+ union {
+ ASCEEP_CONFIG asc_eep; /* Narrow EEPROM config. */
+ ADVEEP_CONFIG adv_eep; /* Wide EEPROM config. */
+ } eep_config;
+ ulong last_reset; /* Saved last reset time */
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ /* /proc/scsi/advansys/[0...] */
+ char *prtbuf; /* Statistics Print Buffer */
+#endif /* version >= v1.3.0 */
+#ifdef ADVANSYS_STATS
+ struct asc_stats asc_stats; /* Board statistics */
+#endif /* ADVANSYS_STATS */
+ /*
+ * The following fields are used only for Narrow Boards.
+ */
+ /* The following three structures must be in DMA-able memory. */
+ ASC_SCSI_REQ_Q scsireqq;
+ ASC_CAP_INFO cap_info;
+ ASC_SCSI_INQUIRY inquiry;
+ uchar sdtr_data[ASC_MAX_TID+1]; /* SDTR information */
+ /*
+ * The following fields are used only for Wide Boards.
+ */
+ void *ioremap_addr; /* I/O Memory remap address. */
+ ushort ioport; /* I/O Port address. */
+ adv_req_t *orig_reqp; /* adv_req_t memory block. */
+ adv_req_t *adv_reqp; /* Request structures. */
+ adv_sgblk_t *orig_sgblkp; /* adv_sgblk_t memory block. */
+ adv_sgblk_t *adv_sgblkp; /* Scatter-gather structures. */
+ ushort bios_signature; /* BIOS Signature. */
+ ushort bios_version; /* BIOS Version. */
+ ushort bios_codeseg; /* BIOS Code Segment. */
+ ushort bios_codelen; /* BIOS Code Segment Length. */
+} asc_board_t;
+
+/*
+ * PCI configuration structures
+ */
+typedef struct _PCI_DATA_
+{
+ uchar type;
+ uchar bus;
+ uchar slot;
+ uchar func;
+ uchar offset;
+} PCI_DATA;
+
+typedef struct _PCI_DEVICE_
+{
+ ushort vendorID;
+ ushort deviceID;
+ ushort slotNumber;
+ ushort slotFound;
+ uchar busNumber;
+ uchar maxBusNumber;
+ uchar devFunc;
+ ushort startSlot;
+ ushort endSlot;
+ uchar bridge;
+ uchar type;
+} PCI_DEVICE;
+
+typedef struct _PCI_CONFIG_SPACE_
+{
+ ushort vendorID;
+ ushort deviceID;
+ ushort command;
+ ushort status;
+ uchar revision;
+ uchar classCode[3];
+ uchar cacheSize;
+ uchar latencyTimer;
+ uchar headerType;
+ uchar bist;
+ ulong baseAddress[6];
+ ushort reserved[4];
+ ulong optionRomAddr;
+ ushort reserved2[4];
+ uchar irqLine;
+ uchar irqPin;
+ uchar minGnt;
+ uchar maxLatency;
+} PCI_CONFIG_SPACE;
+
+
+/*
+ * --- Driver Data
+ */
+
+/* Note: All driver global data should be initialized. */
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+struct proc_dir_entry proc_scsi_advansys =
+{
+ PROC_SCSI_ADVANSYS, /* unsigned short low_ino */
+ 8, /* unsigned short namelen */
+ "advansys", /* const char *name */
+ S_IFDIR | S_IRUGO | S_IXUGO, /* mode_t mode */
+ 2 /* nlink_t nlink */
+};
+#endif /* version >= v1.3.0 */
+
+/* Number of boards detected in system. */
+STATIC int asc_board_count = 0;
+STATIC struct Scsi_Host *asc_host[ASC_NUM_BOARD_SUPPORTED] = { 0 };
+
+/* Overrun buffer shared between all boards. */
+STATIC uchar overrun_buf[ASC_OVERRUN_BSIZE] = { 0 };
+
+/*
+ * Global structures required to issue a command.
+ */
+STATIC ASC_SCSI_Q asc_scsi_q = { { 0 } };
+STATIC ASC_SG_HEAD asc_sg_head = { 0 };
+
+/* List of supported bus types. */
+STATIC ushort asc_bus[ASC_NUM_BUS] ASC_INITDATA = {
+ ASC_IS_ISA,
+ ASC_IS_VL,
+ ASC_IS_EISA,
+ ASC_IS_PCI,
+};
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+STATIC int pci_scan_method ASC_INITDATA = -1;
+#endif /* ASC_CONFIG_PCI */
+#endif /* version < v2.1.93 */
+
+/*
+ * Used with the LILO 'advansys' option to eliminate or
+ * limit I/O port probing at boot time, cf. advansys_setup().
+ */
+STATIC int asc_iopflag = ASC_FALSE;
+STATIC int asc_ioport[ASC_NUM_IOPORT_PROBE] = { 0, 0, 0, 0 };
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+/*
+ * In kernels earlier than v1.3.0, kmalloc() does not work
+ * during driver initialization. Therefore statically declare
+ * 16 elements of each structure. v1.3.0 kernels will probably
+ * not need any more than this number.
+ */
+uchar adv_req_buf[16 * sizeof(adv_req_t)] = { 0 };
+uchar adv_sgblk_buf[16 * sizeof(adv_sgblk_t)] = { 0 };
+#endif /* version >= v1,3,0 */
+
+#ifdef ADVANSYS_DEBUG
+STATIC char *
+asc_bus_name[ASC_NUM_BUS] = {
+ "ASC_IS_ISA",
+ "ASC_IS_VL",
+ "ASC_IS_EISA",
+ "ASC_IS_PCI",
+};
+
+STATIC int asc_dbglvl = 0;
+#endif /* ADVANSYS_DEBUG */
+
+/* Declaration for Asc Library internal data referenced by driver. */
+STATIC PortAddr _asc_def_iop_base[];
+
+
+/*
+ * --- Driver Function Prototypes
+ *
+ * advansys.h contains function prototypes for functions global to Linux.
+ */
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+STATIC int asc_proc_copy(off_t, off_t, char *, int , char *, int);
+#endif /* version >= v1.3.0 */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,70)
+STATIC void advansys_interrupt(int, struct pt_regs *);
+#else /* version >= v1.3.70 */
+STATIC void advansys_interrupt(int, void *, struct pt_regs *);
+#endif /* version >= v1.3.70 */
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC void advansys_select_queue_depths(struct Scsi_Host *,
+ Scsi_Device *);
+#endif /* version >= v1.3.89 */
+STATIC void advansys_command_done(Scsi_Cmnd *);
+STATIC void asc_scsi_done_list(Scsi_Cmnd *);
+STATIC int asc_execute_scsi_cmnd(Scsi_Cmnd *);
+STATIC int asc_build_req(asc_board_t *, Scsi_Cmnd *);
+STATIC int adv_build_req(asc_board_t *, Scsi_Cmnd *, ADV_SCSI_REQ_Q **);
+STATIC int adv_get_sglist(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *, Scsi_Cmnd *);
+STATIC void asc_isr_callback(ASC_DVC_VAR *, ASC_QDONE_INFO *);
+STATIC void adv_isr_callback(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+STATIC int asc_srch_pci_dev(PCI_DEVICE *);
+STATIC uchar asc_scan_method(void);
+STATIC int asc_pci_find_dev(PCI_DEVICE *);
+STATIC void asc_get_pci_cfg(PCI_DEVICE *, PCI_CONFIG_SPACE *);
+STATIC ushort asc_get_cfg_word(PCI_DATA *);
+STATIC uchar asc_get_cfg_byte(PCI_DATA *);
+STATIC void asc_put_cfg_byte(PCI_DATA *, uchar);
+#endif /* ASC_CONFIG_PCI */
+#endif /* version < v2.1.93 */
+STATIC void asc_enqueue(asc_queue_t *, REQP, int);
+STATIC REQP asc_dequeue(asc_queue_t *, int);
+STATIC REQP asc_dequeue_list(asc_queue_t *, REQP *, int);
+STATIC int asc_rmqueue(asc_queue_t *, REQP);
+STATIC int asc_isqueued(asc_queue_t *, REQP);
+STATIC void asc_execute_queue(asc_queue_t *);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+STATIC int asc_prt_board_devices(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_adv_bios(struct Scsi_Host *, char *, int);
+STATIC int asc_get_eeprom_string(ushort *serialnum, uchar *cp);
+STATIC int asc_prt_asc_board_eeprom(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_adv_board_eeprom(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_driver_conf(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_asc_board_info(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_adv_board_info(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_line(char *, int, char *fmt, ...);
+#endif /* version >= v1.3.0 */
+
+/* Declaration for Asc Library internal functions reference by driver. */
+STATIC int AscFindSignature(PortAddr);
+STATIC ushort AscGetEEPConfig(PortAddr, ASCEEP_CONFIG *, ushort);
+
+#ifdef ADVANSYS_STATS
+STATIC int asc_prt_board_stats(struct Scsi_Host *, char *, int);
+#endif /* ADVANSYS_STATS */
+
+#ifdef ADVANSYS_DEBUG
+STATIC void asc_prt_scsi_host(struct Scsi_Host *);
+STATIC void asc_prt_scsi_cmnd(Scsi_Cmnd *);
+STATIC void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *);
+STATIC void asc_prt_asc_dvc_var(ASC_DVC_VAR *);
+STATIC void asc_prt_asc_scsi_q(ASC_SCSI_Q *);
+STATIC void asc_prt_asc_qdone_info(ASC_QDONE_INFO *);
+STATIC void asc_prt_adv_dvc_cfg(ADV_DVC_CFG *);
+STATIC void asc_prt_adv_dvc_var(ADV_DVC_VAR *);
+STATIC void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *);
+STATIC void asc_prt_adv_sgblock(int, ADV_SG_BLOCK *);
+STATIC void asc_prt_hex(char *f, uchar *, int);
+#endif /* ADVANSYS_DEBUG */
+
+#ifdef ADVANSYS_ASSERT
+STATIC int interrupts_enabled(void);
+#endif /* ADVANSYS_ASSERT */
+
+
+/*
+ * --- Linux 'Scsi_Host_Template' and advansys_setup() Functions
+ */
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+/*
+ * advansys_proc_info() - /proc/scsi/advansys/[0-(ASC_NUM_BOARD_SUPPORTED-1)]
+ *
+ * *buffer: I/O buffer
+ * **start: if inout == FALSE pointer into buffer where user read should start
+ * offset: current offset into a /proc/scsi/advansys/[0...] file
+ * length: length of buffer
+ * hostno: Scsi_Host host_no
+ * inout: TRUE - user is writing; FALSE - user is reading
+ *
+ * Return the number of bytes read from or written to a
+ * /proc/scsi/advansys/[0...] file.
+ *
+ * Note: This function uses the per board buffer 'prtbuf' which is
+ * allocated when the board is initialized in advansys_detect(). The
+ * buffer is ASC_PRTBUF_SIZE bytes. The function asc_proc_copy() is
+ * used to write to the buffer. The way asc_proc_copy() is written
+ * if 'prtbuf' is too small it will not be overwritten. Instead the
+ * user just won't get all the available statistics.
+ */
+int
+advansys_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+ struct Scsi_Host *shp;
+ asc_board_t *boardp;
+ int i;
+ char *cp;
+ int cplen;
+ int cnt;
+ int totcnt;
+ int leftlen;
+ char *curbuf;
+ off_t advoffset;
+ Scsi_Device *scd;
+
+ ASC_DBG(1, "advansys_proc_info: begin\n");
+
+ /*
+ * User write not supported.
+ */
+ if (inout == TRUE) {
+ return(-ENOSYS);
+ }
+
+ /*
+ * User read of /proc/scsi/advansys/[0...] file.
+ */
+
+ /* Find the specified board. */
+ for (i = 0; i < asc_board_count; i++) {
+ if (asc_host[i]->host_no == hostno) {
+ break;
+ }
+ }
+ if (i == asc_board_count) {
+ return(-ENOENT);
+ }
+
+ shp = asc_host[i];
+ boardp = ASC_BOARDP(shp);
+
+ /* Copy read data starting at the beginning of the buffer. */
+ *start = buffer;
+ curbuf = buffer;
+ advoffset = 0;
+ totcnt = 0;
+ leftlen = length;
+
+ /*
+ * Get board configuration information.
+ *
+ * advansys_info() returns the board string from its own static buffer.
+ */
+ cp = (char *) advansys_info(shp);
+ strcat(cp, "\n");
+ cplen = strlen(cp);
+ /* Copy board information. */
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+ /*
+ * Display Wide Board BIOS Information.
+ */
+ if (ASC_WIDE_BOARD(boardp)) {
+ cp = boardp->prtbuf;
+ cplen = asc_prt_adv_bios(shp, cp, ASC_PRTBUF_SIZE);
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+ }
+
+ /*
+ * Display driver information for each device attached to the board.
+ */
+ cp = boardp->prtbuf;
+ cplen = asc_prt_board_devices(shp, cp, ASC_PRTBUF_SIZE);
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+ /*
+ * Display target driver information for each device attached
+ * to the board.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,75)
+ for (scd = scsi_devices; scd; scd = scd->next)
+#else /* version >= v2.1.75 */
+ for (scd = shp->host_queue; scd; scd = scd->next)
+#endif /* version >= v2.1.75 */
+ {
+ if (scd->host == shp) {
+ cp = boardp->prtbuf;
+ /*
+ * Note: If proc_print_scsidevice() writes more than
+ * ASC_PRTBUF_SIZE bytes, it will overrun 'prtbuf'.
+ */
+ proc_print_scsidevice(scd, cp, &cplen, 0);
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+ }
+ }
+
+ /*
+ * Display EEPROM configuration for the board.
+ */
+ cp = boardp->prtbuf;
+ if (ASC_NARROW_BOARD(boardp)) {
+ cplen = asc_prt_asc_board_eeprom(shp, cp, ASC_PRTBUF_SIZE);
+ } else {
+ cplen = asc_prt_adv_board_eeprom(shp, cp, ASC_PRTBUF_SIZE);
+ }
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+ /*
+ * Display driver configuration and information for the board.
+ */
+ cp = boardp->prtbuf;
+ cplen = asc_prt_driver_conf(shp, cp, ASC_PRTBUF_SIZE);
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+#ifdef ADVANSYS_STATS
+ /*
+ * Display driver statistics for the board.
+ */
+ cp = boardp->prtbuf;
+ cplen = asc_prt_board_stats(shp, cp, ASC_PRTBUF_SIZE);
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+#endif /* ADVANSYS_STATS */
+
+ /*
+ * Display Asc Library dynamic configuration information
+ * for the board.
+ */
+ cp = boardp->prtbuf;
+ if (ASC_NARROW_BOARD(boardp)) {
+ cplen = asc_prt_asc_board_info(shp, cp, ASC_PRTBUF_SIZE);
+ } else {
+ cplen = asc_prt_adv_board_info(shp, cp, ASC_PRTBUF_SIZE);
+ }
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+
+ return totcnt;
+}
+#endif /* version >= v1.3.0 */
+
+/*
+ * advansys_detect()
+ *
+ * Detect function for AdvanSys adapters.
+ *
+ * Argument is a pointer to the host driver's scsi_hosts entry.
+ *
+ * Return number of adapters found.
+ *
+ * Note: Because this function is called during system initialization
+ * it must not call SCSI mid-level functions including scsi_malloc()
+ * and scsi_free().
+ */
+ASC_INITFUNC(
+int
+advansys_detect(Scsi_Host_Template *tpnt)
+)
+{
+ static int detect_called = ASC_FALSE;
+ int iop;
+ int bus;
+ struct Scsi_Host *shp;
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp = NULL;
+ ADV_DVC_VAR *adv_dvc_varp = NULL;
+ int ioport = 0;
+ int share_irq = FALSE;
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ PCI_DEVICE pciDevice;
+ PCI_CONFIG_SPACE pciConfig;
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ unsigned long pci_memory_address;
+#endif /* version >= v1,3,0 */
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ struct pci_dev *pci_devp = NULL;
+ int pci_device_id_cnt = 0;
+ unsigned int pci_device_id[ASC_PCI_DEVICE_ID_CNT] = {
+ ASC_PCI_DEVICE_ID_1100,
+ ASC_PCI_DEVICE_ID_1200,
+ ASC_PCI_DEVICE_ID_1300,
+ ASC_PCI_DEVICE_ID_2300
+ };
+ unsigned long pci_memory_address;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+ int warn_code, err_code;
+ int ret;
+
+ if (detect_called == ASC_FALSE) {
+ detect_called = ASC_TRUE;
+ } else {
+ printk("AdvanSys SCSI: advansys_detect() multiple calls ignored\n");
+ return 0;
+ }
+
+ ASC_DBG(1, "advansys_detect: begin\n");
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ tpnt->proc_dir = &proc_scsi_advansys;
+#endif /* version >= v1.3.0 */
+
+ asc_board_count = 0;
+
+ /*
+ * If I/O port probing has been modified, then verify and
+ * clean-up the 'asc_ioport' list.
+ */
+ if (asc_iopflag == ASC_TRUE) {
+ for (ioport = 0; ioport < ASC_NUM_IOPORT_PROBE; ioport++) {
+ ASC_DBG2(1, "advansys_detect: asc_ioport[%d] %x\n",
+ ioport, asc_ioport[ioport]);
+ if (asc_ioport[ioport] != 0) {
+ for (iop = 0; iop < ASC_IOADR_TABLE_MAX_IX; iop++) {
+ if (_asc_def_iop_base[iop] == asc_ioport[ioport]) {
+ break;
+ }
+ }
+ if (iop == ASC_IOADR_TABLE_MAX_IX) {
+ printk(
+"AdvanSys SCSI: specified I/O Port 0x%X is invalid\n",
+ asc_ioport[ioport]);
+ asc_ioport[ioport] = 0;
+ }
+ }
+ }
+ ioport = 0;
+ }
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ memset(&pciDevice, 0, sizeof(PCI_DEVICE));
+ memset(&pciConfig, 0, sizeof(PCI_CONFIG_SPACE));
+ pciDevice.maxBusNumber = PCI_MAX_BUS;
+ pciDevice.endSlot = PCI_MAX_SLOT;
+#endif /* ASC_CONFIG_PCI */
+#endif /* version < v2.1.93 */
+
+ for (bus = 0; bus < ASC_NUM_BUS; bus++) {
+
+ ASC_DBG2(1, "advansys_detect: bus search type %d (%s)\n",
+ bus, asc_bus_name[bus]);
+ iop = 0;
+
+ while (asc_board_count < ASC_NUM_BOARD_SUPPORTED) {
+
+ ASC_DBG1(2, "advansys_detect: asc_board_count %d\n",
+ asc_board_count);
+
+ switch (asc_bus[bus]) {
+ case ASC_IS_ISA:
+ case ASC_IS_VL:
+ if (asc_iopflag == ASC_FALSE) {
+ iop = AscSearchIOPortAddr(iop, asc_bus[bus]);
+ } else {
+ /*
+ * ISA and VL I/O port scanning has either been
+ * eliminated or limited to selected ports on
+ * the LILO command line, /etc/lilo.conf, or
+ * by setting variables when the module was loaded.
+ */
+ ASC_DBG(1, "advansys_detect: I/O port scanning modified\n");
+ ioport_try_again:
+ iop = 0;
+ for (; ioport < ASC_NUM_IOPORT_PROBE; ioport++) {
+ if ((iop = asc_ioport[ioport]) != 0) {
+ break;
+ }
+ }
+ if (iop) {
+ ASC_DBG1(1, "advansys_detect: probing I/O port %x...\n",
+ iop);
+ if (check_region(iop, ASC_IOADR_GAP) != 0) {
+ printk(
+"AdvanSys SCSI: specified I/O Port 0x%X is busy\n", iop);
+ /* Don't try this I/O port twice. */
+ asc_ioport[ioport] = 0;
+ goto ioport_try_again;
+ } else if (AscFindSignature(iop) == ASC_FALSE) {
+ printk(
+"AdvanSys SCSI: specified I/O Port 0x%X has no adapter\n", iop);
+ /* Don't try this I/O port twice. */
+ asc_ioport[ioport] = 0;
+ goto ioport_try_again;
+ } else {
+ /*
+ * If this isn't an ISA board, then it must be
+ * a VL board. If currently looking an ISA
+ * board is being looked for then try for
+ * another ISA board in 'asc_ioport'.
+ */
+ if (asc_bus[bus] == ASC_IS_ISA &&
+ (AscGetChipVersion(iop, ASC_IS_ISA) &
+ ASC_CHIP_VER_ISA_BIT) == 0) {
+ /*
+ * Don't clear 'asc_ioport[ioport]'. Try
+ * this board again for VL. Increment
+ * 'ioport' past this board.
+ */
+ ioport++;
+ goto ioport_try_again;
+ }
+ }
+ /*
+ * This board appears good, don't try the I/O port
+ * again by clearing its value. Increment 'ioport'
+ * for the next iteration.
+ */
+ asc_ioport[ioport++] = 0;
+ }
+ }
+ break;
+
+ case ASC_IS_EISA:
+ iop = AscSearchIOPortAddr(iop, asc_bus[bus]);
+ break;
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ case ASC_IS_PCI:
+ if (asc_srch_pci_dev(&pciDevice) != PCI_DEVICE_FOUND) {
+ iop = 0;
+ } else {
+ ASC_DBG2(2,
+ "advansys_detect: slotFound %d, busNumber %d\n",
+ pciDevice.slotFound, pciDevice.busNumber);
+ asc_get_pci_cfg(&pciDevice, &pciConfig);
+ iop = pciConfig.baseAddress[0] & PCI_IOADDRESS_MASK;
+ ASC_DBG2(1,
+ "advansys_detect: vendorID %X, deviceID %X\n",
+ pciConfig.vendorID, pciConfig.deviceID);
+ ASC_DBG2(2, "advansys_detect: iop %X, irqLine %d\n",
+ iop, pciConfig.irqLine);
+ }
+ break;
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ case ASC_IS_PCI:
+ while (pci_device_id_cnt < ASC_PCI_DEVICE_ID_CNT) {
+ if ((pci_devp = pci_find_device(ASC_PCI_VENDORID,
+ pci_device_id[pci_device_id_cnt], pci_devp)) == NULL) {
+ pci_device_id_cnt++;
+ } else {
+ break;
+ }
+ }
+ if (pci_devp == NULL) {
+ iop = 0;
+ } else {
+ ASC_DBG2(2,
+ "advansys_detect: devfn %d, bus number %d\n",
+ pci_devp->devfn, pci_devp->bus->number);
+ iop = pci_devp->base_address[0] & PCI_IOADDRESS_MASK;
+ ASC_DBG2(1,
+ "advansys_detect: vendorID %X, deviceID %X\n",
+ pci_devp->vendor, pci_devp->device);
+ ASC_DBG2(2, "advansys_detect: iop %X, irqLine %d\n",
+ iop, pci_devp->irq);
+ }
+ break;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+
+ default:
+ ASC_PRINT1("advansys_detect: unknown bus type: %d\n",
+ asc_bus[bus]);
+ break;
+ }
+ ASC_DBG1(1, "advansys_detect: iop %x\n", iop);
+
+ /*
+ * Adapter not found, try next bus type.
+ */
+ if (iop == 0) {
+ break;
+ }
+
+ /*
+ * Adapter found.
+ *
+ * Register the adapter, get its configuration, and
+ * initialize it.
+ */
+ ASC_DBG(2, "advansys_detect: scsi_register()\n");
+ shp = scsi_register(tpnt, sizeof(asc_board_t));
+
+ /* Save a pointer to the Scsi_host of each board found. */
+ asc_host[asc_board_count++] = shp;
+
+ /* Initialize private per board data */
+ boardp = ASC_BOARDP(shp);
+ memset(boardp, 0, sizeof(asc_board_t));
+ boardp->id = asc_board_count - 1;
+
+ /*
+ * Handle both narrow and wide boards.
+ *
+ * If a Wide board was detected, set the board structure
+ * wide board flag. Set-up the board structure based on
+ * the board type.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ if (asc_bus[bus] == ASC_IS_PCI &&
+ pciConfig.deviceID == ASC_PCI_DEVICE_ID_2300) {
+ boardp->flags |= ASC_IS_WIDE_BOARD;
+ }
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ if (asc_bus[bus] == ASC_IS_PCI &&
+ pci_devp->device == ASC_PCI_DEVICE_ID_2300) {
+ boardp->flags |= ASC_IS_WIDE_BOARD;
+ }
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ ASC_DBG(1, "advansys_detect: narrow board\n");
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ asc_dvc_varp->bus_type = asc_bus[bus];
+ asc_dvc_varp->drv_ptr = (ulong) boardp;
+ asc_dvc_varp->cfg = &boardp->dvc_cfg.asc_dvc_cfg;
+ asc_dvc_varp->cfg->overrun_buf = &overrun_buf[0];
+ asc_dvc_varp->iop_base = iop;
+ asc_dvc_varp->isr_callback = (Ptr2Func) asc_isr_callback;
+ } else {
+ ASC_DBG(1, "advansys_detect: wide board\n");
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ adv_dvc_varp->drv_ptr = (ulong) boardp;
+ adv_dvc_varp->cfg = &boardp->dvc_cfg.adv_dvc_cfg;
+ adv_dvc_varp->isr_callback = (Ptr2Func) adv_isr_callback;
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+ adv_dvc_varp->iop_base = iop;
+#else /* version >= v1,3,0 */
+ /*
+ * Map the board's registers into virtual memory for
+ * PCI slave access. Only memory accesses are used to
+ * access the board's registers.
+ *
+ * Note: The PCI register base address is not always
+ * page aligned, but the address passed to ioremap()
+ * must be page aligned. It is guaranteed that the
+ * PCI register base address will not cross a page
+ * boundary.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ pci_memory_address = pciConfig.baseAddress[1];
+ if ((boardp->ioremap_addr =
+ ioremap(pci_memory_address & PAGE_MASK,
+ PAGE_SIZE)) == 0) {
+ ASC_PRINT3(
+"advansys_detect: board %d: ioremap(%lx, %d) returned NULL\n",
+ boardp->id, pci_memory_address, ADV_CONDOR_IOLEN);
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+ adv_dvc_varp->iop_base = (AdvPortAddr)
+ (boardp->ioremap_addr +
+ (pci_memory_address - (pci_memory_address & PAGE_MASK)));
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ pci_memory_address = pci_devp->base_address[1];
+ if ((boardp->ioremap_addr =
+ ioremap(pci_memory_address & PAGE_MASK,
+ PAGE_SIZE)) == 0) {
+ ASC_PRINT3(
+"advansys_detect: board %d: ioremap(%lx, %d) returned NULL\n",
+ boardp->id, pci_memory_address, ADV_CONDOR_IOLEN);
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+ adv_dvc_varp->iop_base = (AdvPortAddr)
+ (boardp->ioremap_addr +
+ (pci_memory_address - (pci_memory_address & PAGE_MASK)));
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+#endif /* version >= v1,3,0 */
+
+ /*
+ * Even though it isn't used to access the board in
+ * kernels greater than or equal to v1.3.0, save
+ * the I/O Port address so that it can be reported and
+ * displayed.
+ */
+ boardp->ioport = iop;
+ }
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ /*
+ * Allocate buffer for printing information from
+ * /proc/scsi/advansys/[0...].
+ */
+ if ((boardp->prtbuf =
+ kmalloc(ASC_PRTBUF_SIZE, GFP_ATOMIC)) == NULL) {
+ ASC_PRINT3(
+"advansys_detect: board %d: kmalloc(%d, %d) returned NULL\n",
+ boardp->id, ASC_PRTBUF_SIZE, GFP_ATOMIC);
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+#endif /* version >= v1.3.0 */
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Set the board bus type and PCI IRQ before
+ * calling AscInitGetConfig().
+ */
+ switch (asc_dvc_varp->bus_type) {
+ case ASC_IS_ISA:
+ shp->unchecked_isa_dma = TRUE;
+ share_irq = FALSE;
+ break;
+ case ASC_IS_VL:
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = FALSE;
+ break;
+ case ASC_IS_EISA:
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = TRUE;
+ break;
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ case ASC_IS_PCI:
+ shp->irq = asc_dvc_varp->irq_no = pciConfig.irqLine;
+ asc_dvc_varp->cfg->pci_device_id = pciConfig.deviceID;
+ asc_dvc_varp->cfg->pci_slot_info =
+ ASC_PCI_MKID(pciDevice.busNumber,
+ pciDevice.slotFound,
+ pciDevice.devFunc);
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = TRUE;
+ break;
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ case ASC_IS_PCI:
+ shp->irq = asc_dvc_varp->irq_no = pci_devp->irq;
+ asc_dvc_varp->cfg->pci_device_id = pci_devp->device;
+ asc_dvc_varp->cfg->pci_slot_info =
+ ASC_PCI_MKID(pci_devp->bus->number,
+ PCI_SLOT(pci_devp->devfn),
+ PCI_FUNC(pci_devp->devfn));
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = TRUE;
+ break;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+ default:
+ ASC_PRINT2(
+"advansys_detect: board %d: unknown adapter type: %d\n",
+ boardp->id, asc_dvc_varp->bus_type);
+ shp->unchecked_isa_dma = TRUE;
+ share_irq = FALSE;
+ break;
+ }
+ } else {
+ /*
+ * For Wide boards set PCI information before calling
+ * AdvInitGetConfig().
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ shp->irq = adv_dvc_varp->irq_no = pciConfig.irqLine;
+ adv_dvc_varp->cfg->pci_device_id = pciConfig.deviceID;
+ adv_dvc_varp->cfg->pci_slot_info =
+ ASC_PCI_MKID(pciDevice.busNumber,
+ pciDevice.slotFound,
+ pciDevice.devFunc);
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = TRUE;
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ shp->irq = adv_dvc_varp->irq_no = pci_devp->irq;
+ adv_dvc_varp->cfg->pci_device_id = pci_devp->device;
+ adv_dvc_varp->cfg->pci_slot_info =
+ ASC_PCI_MKID(pci_devp->bus->number,
+ PCI_SLOT(pci_devp->devfn),
+ PCI_FUNC(pci_devp->devfn));
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = TRUE;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+ }
+
+ /*
+ * Read the board configuration.
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * NOTE: AscInitGetConfig() may change the board's
+ * bus_type value. The asc_bus[bus] value should no
+ * longer be used. If the bus_type field must be
+ * referenced only use the bit-wise AND operator "&".
+ */
+ ASC_DBG(2, "advansys_detect: AscInitGetConfig()\n");
+ switch(ret = AscInitGetConfig(asc_dvc_varp)) {
+ case 0: /* No error */
+ break;
+ case ASC_WARN_IO_PORT_ROTATE:
+ ASC_PRINT1(
+"AscInitGetConfig: board %d: I/O port address modified\n",
+ boardp->id);
+ break;
+ case ASC_WARN_AUTO_CONFIG:
+ ASC_PRINT1(
+"AscInitGetConfig: board %d: I/O port increment switch enabled\n",
+ boardp->id);
+ break;
+ case ASC_WARN_EEPROM_CHKSUM:
+ ASC_PRINT1(
+"AscInitGetConfig: board %d: EEPROM checksum error\n",
+ boardp->id);
+ break;
+ case ASC_WARN_IRQ_MODIFIED:
+ ASC_PRINT1(
+"AscInitGetConfig: board %d: IRQ modified\n",
+ boardp->id);
+ break;
+ case ASC_WARN_CMD_QNG_CONFLICT:
+ ASC_PRINT1(
+"AscInitGetConfig: board %d: tag queuing enabled w/o disconnects\n",
+ boardp->id);
+ break;
+ default:
+ ASC_PRINT2(
+"AscInitGetConfig: board %d: unknown warning: %x\n",
+ boardp->id, ret);
+ break;
+ }
+ if ((err_code = asc_dvc_varp->err_code) != 0) {
+ ASC_PRINT3(
+"AscInitGetConfig: board %d error: init_state %x, err_code %x\n",
+ boardp->id, asc_dvc_varp->init_state,
+ asc_dvc_varp->err_code);
+ }
+ } else {
+ ASC_DBG(2, "advansys_detect: AdvInitGetConfig()\n");
+ if ((ret = AdvInitGetConfig(adv_dvc_varp)) != 0) {
+ ASC_PRINT2("AdvInitGetConfig: board %d: warning: %x\n",
+ boardp->id, ret);
+ }
+ if ((err_code = adv_dvc_varp->err_code) != 0) {
+ ASC_PRINT2(
+"AdvInitGetConfig: board %d error: err_code %x\n",
+ boardp->id, adv_dvc_varp->err_code);
+ }
+ }
+
+ if (err_code != 0) {
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+
+ /*
+ * Save the EEPROM configuration so that it can be displayed
+ * from /proc/scsi/advansys/[0...].
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+
+ ASCEEP_CONFIG *ep;
+
+ /*
+ * Set the adapter's target id bit in the 'init_tidmask' field.
+ */
+ boardp->init_tidmask |=
+ ADV_TID_TO_TIDMASK(asc_dvc_varp->cfg->chip_scsi_id);
+
+ /*
+ * Save EEPROM settings for the board.
+ */
+ ep = &boardp->eep_config.asc_eep;
+
+ ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable;
+ ep->disc_enable = asc_dvc_varp->cfg->disc_enable;
+ ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled;
+ ep->isa_dma_speed = asc_dvc_varp->cfg->isa_dma_speed;
+ ep->start_motor = asc_dvc_varp->start_motor;
+ ep->cntl = asc_dvc_varp->dvc_cntl;
+ ep->no_scam = asc_dvc_varp->no_scam;
+ ep->max_total_qng = asc_dvc_varp->max_total_qng;
+ ep->chip_scsi_id = asc_dvc_varp->cfg->chip_scsi_id;
+ /* 'max_tag_qng' is set to the same value for every device. */
+ ep->max_tag_qng = asc_dvc_varp->cfg->max_tag_qng[0];
+ ep->adapter_info[0] = asc_dvc_varp->cfg->adapter_info[0];
+ ep->adapter_info[1] = asc_dvc_varp->cfg->adapter_info[1];
+ ep->adapter_info[2] = asc_dvc_varp->cfg->adapter_info[2];
+ ep->adapter_info[3] = asc_dvc_varp->cfg->adapter_info[3];
+ ep->adapter_info[4] = asc_dvc_varp->cfg->adapter_info[4];
+ ep->adapter_info[5] = asc_dvc_varp->cfg->adapter_info[5];
+
+ /*
+ * Modify board configuration.
+ */
+ ASC_DBG(2, "advansys_detect: AscInitSetConfig()\n");
+ switch (ret = AscInitSetConfig(asc_dvc_varp)) {
+ case 0: /* No error. */
+ break;
+ case ASC_WARN_IO_PORT_ROTATE:
+ ASC_PRINT1(
+"AscInitSetConfig: board %d: I/O port address modified\n",
+ boardp->id);
+ break;
+ case ASC_WARN_AUTO_CONFIG:
+ ASC_PRINT1(
+"AscInitSetConfig: board %d: I/O port increment switch enabled\n",
+ boardp->id);
+ break;
+ case ASC_WARN_EEPROM_CHKSUM:
+ ASC_PRINT1(
+"AscInitSetConfig: board %d: EEPROM checksum error\n",
+ boardp->id);
+ break;
+ case ASC_WARN_IRQ_MODIFIED:
+ ASC_PRINT1(
+"AscInitSetConfig: board %d: IRQ modified\n",
+ boardp->id);
+ break;
+ case ASC_WARN_CMD_QNG_CONFLICT:
+ ASC_PRINT1(
+"AscInitSetConfig: board %d: tag queuing w/o disconnects\n",
+ boardp->id);
+ break;
+ default:
+ ASC_PRINT2(
+"AscInitSetConfig: board %d: unknown warning: %x\n",
+ boardp->id, ret);
+ break;
+ }
+ if (asc_dvc_varp->err_code != 0) {
+ ASC_PRINT3(
+"AscInitSetConfig: board %d error: init_state %x, err_code %x\n",
+ boardp->id, asc_dvc_varp->init_state,
+ asc_dvc_varp->err_code);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+
+ /*
+ * Finish initializing the 'Scsi_Host' structure.
+ */
+ /* AscInitSetConfig() will set the IRQ for non-PCI boards. */
+ if ((asc_dvc_varp->bus_type & ASC_IS_PCI) == 0) {
+ shp->irq = asc_dvc_varp->irq_no;
+ }
+ } else {
+
+ ADVEEP_CONFIG *ep;
+
+ /*
+ * Save Wide EEP Configuration Information.
+ */
+ ep = &boardp->eep_config.adv_eep;
+
+ ep->adapter_scsi_id = adv_dvc_varp->chip_scsi_id;
+ ep->max_host_qng = adv_dvc_varp->max_host_qng;
+ ep->max_dvc_qng = adv_dvc_varp->max_dvc_qng;
+ ep->termination = adv_dvc_varp->cfg->termination;
+ ep->disc_enable = adv_dvc_varp->cfg->disc_enable;
+ ep->bios_ctrl = adv_dvc_varp->bios_ctrl;
+ ep->wdtr_able = adv_dvc_varp->wdtr_able;
+ ep->sdtr_able = adv_dvc_varp->sdtr_able;
+ ep->ultra_able = adv_dvc_varp->ultra_able;
+ ep->tagqng_able = adv_dvc_varp->tagqng_able;
+ ep->start_motor = adv_dvc_varp->start_motor;
+ ep->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait;
+ ep->bios_boot_delay = adv_dvc_varp->cfg->bios_boot_wait;
+ ep->serial_number_word1 = adv_dvc_varp->cfg->serial1;
+ ep->serial_number_word2 = adv_dvc_varp->cfg->serial2;
+ ep->serial_number_word3 = adv_dvc_varp->cfg->serial3;
+
+ /*
+ * Set the adapter's target id bit in the 'init_tidmask' field.
+ */
+ boardp->init_tidmask |=
+ ADV_TID_TO_TIDMASK(adv_dvc_varp->chip_scsi_id);
+
+ /*
+ * Finish initializing the 'Scsi_Host' structure.
+ */
+ shp->irq = adv_dvc_varp->irq_no;
+ }
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ /*
+ * Channels are numbered beginning with 0. For AdvanSys One host
+ * structure supports one channel. Multi-channel boards have a
+ * separate host structure for each channel.
+ */
+ shp->max_channel = 0;
+#endif /* version >= v1.3.89 */
+ if (ASC_NARROW_BOARD(boardp)) {
+ shp->max_id = ASC_MAX_TID + 1;
+ shp->max_lun = ASC_MAX_LUN + 1;
+
+ shp->io_port = asc_dvc_varp->iop_base;
+ shp->n_io_port = ASC_IOADR_GAP;
+ shp->this_id = asc_dvc_varp->cfg->chip_scsi_id;
+
+ /* Set maximum number of queues the adapter can handle. */
+ shp->can_queue = asc_dvc_varp->max_total_qng;
+ } else {
+ shp->max_id = ADV_MAX_TID + 1;
+ shp->max_lun = ADV_MAX_LUN + 1;
+
+ /*
+ * Save the I/O Port address and length even though the
+ * in v1.3.0 and greater kernels the region is not used
+ * by a Wide board. Instead the board is accessed with
+ * Memory Mapped I/O.
+ */
+ shp->io_port = iop;
+ shp->n_io_port = ADV_CONDOR_IOLEN;
+
+ shp->this_id = adv_dvc_varp->chip_scsi_id;
+
+ /* Set maximum number of queues the adapter can handle. */
+ shp->can_queue = adv_dvc_varp->max_host_qng;
+ }
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,89)
+ /*
+ * In old kernels without tag queuing support and with memory
+ * allocation problems set a conservative 'cmd_per_lun' value.
+ */
+#ifdef MODULE
+ shp->cmd_per_lun = 1;
+#else /* MODULE */
+ shp->cmd_per_lun = 4;
+#endif /* MODULE */
+ ASC_DBG1(1, "advansys_detect: cmd_per_lun: %d\n", shp->cmd_per_lun);
+#else /* version >= v1.3.89 */
+ /*
+ * Following v1.3.89, 'cmd_per_lun' is no longer needed
+ * and should be set to zero.
+ *
+ * But because of a bug introduced in v1.3.89 if the driver is
+ * compiled as a module and 'cmd_per_lun' is zero, the Mid-Level
+ * SCSI function 'allocate_device' will panic. To allow the driver
+ * to work as a module in these kernels set 'cmd_per_lun' to 1.
+ */
+#ifdef MODULE
+ shp->cmd_per_lun = 1;
+#else /* MODULE */
+ shp->cmd_per_lun = 0;
+#endif /* MODULE */
+ /*
+ * Use the host 'select_queue_depths' function to determine
+ * the number of commands to queue per device.
+ */
+ shp->select_queue_depths = advansys_select_queue_depths;
+#endif /* version >= v1.3.89 */
+
+ /*
+ * Set the maximum number of scatter-gather elements the
+ * adapter can handle.
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Allow two commands with 'sg_tablesize' scatter-gather
+ * elements to be executed simultaneously. This value is
+ * the theoretical hardware limit. It may be decreased
+ * below.
+ */
+ shp->sg_tablesize =
+ (((asc_dvc_varp->max_total_qng - 2) / 2) *
+ ASC_SG_LIST_PER_Q) + 1;
+ } else {
+ shp->sg_tablesize = ADV_MAX_SG_LIST;
+ }
+
+#ifdef MODULE
+ /*
+ * If the driver is compiled as a module, set a limit on the
+ * 'sg_tablesize' value to prevent memory allocation failures.
+ * Memory allocation errors are more likely to occur at module
+ * load time, then at driver initialization time.
+ */
+ if (shp->sg_tablesize > 64) {
+ shp->sg_tablesize = 64;
+ }
+#endif /* MODULE */
+
+ /*
+ * The value of 'sg_tablesize' can not exceed the SCSI
+ * mid-level driver definition of SG_ALL. SG_ALL also
+ * must not be exceeded, because it is used to define the
+ * size of the scatter-gather table in 'struct asc_sg_head'.
+ */
+ if (shp->sg_tablesize > SG_ALL) {
+ shp->sg_tablesize = SG_ALL;
+ }
+
+ ASC_DBG1(1, "advansys_detect: sg_tablesize: %d\n",
+ shp->sg_tablesize);
+
+ /* BIOS start address. */
+ if (ASC_NARROW_BOARD(boardp)) {
+ shp->base = (char *) ((ulong) AscGetChipBiosAddress(
+ asc_dvc_varp->iop_base,
+ asc_dvc_varp->bus_type));
+ } else {
+ /*
+ * Fill-in BIOS board variables. The Wide BIOS saves
+ * information in LRAM that is used by the driver.
+ */
+ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_SIGNATURE,
+ boardp->bios_signature);
+ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_VERSION,
+ boardp->bios_version);
+ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODESEG,
+ boardp->bios_codeseg);
+ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODELEN,
+ boardp->bios_codelen);
+
+ ASC_DBG2(1,
+ "advansys_detect: bios_signature %x, bios_version %x\n",
+ boardp->bios_signature, boardp->bios_version);
+
+ ASC_DBG2(1,
+ "advansys_detect: bios_codeseg %x, bios_codelen %x\n",
+ boardp->bios_codeseg, boardp->bios_codelen);
+
+ /*
+ * If the BIOS saved a valid signature, then fill in
+ * the BIOS code segment base address.
+ */
+ if (boardp->bios_signature == 0x55AA) {
+ /*
+ * Convert x86 realmode code segment to a linear
+ * address by shifting left 4.
+ */
+ shp->base = (uchar *) (boardp->bios_codeseg << 4);
+ } else {
+ shp->base = 0;
+ }
+ }
+
+ /*
+ * Register Board Resources - I/O Port, DMA, IRQ
+ */
+
+ /* Register I/O port range. */
+ ASC_DBG(2, "advansys_detect: request_region()\n");
+ request_region(shp->io_port, shp->n_io_port, "advansys");
+
+ /* Register DMA Channel for Narrow boards. */
+ shp->dma_channel = NO_ISA_DMA; /* Default to no ISA DMA. */
+ if (ASC_NARROW_BOARD(boardp)) {
+ /* Register DMA channel for ISA bus. */
+ if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
+ shp->dma_channel = asc_dvc_varp->cfg->isa_dma_channel;
+ if ((ret =
+ request_dma(shp->dma_channel, "advansys")) != 0) {
+ ASC_PRINT3(
+"advansys_detect: board %d: request_dma() %d failed %d\n",
+ boardp->id, shp->dma_channel, ret);
+ release_region(shp->io_port, shp->n_io_port);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+ AscEnableIsaDma(shp->dma_channel);
+ }
+ }
+
+ /* Register IRQ Number. */
+ ASC_DBG1(2, "advansys_detect: request_irq() %d\n", shp->irq);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,70)
+ if ((ret = request_irq(shp->irq, advansys_interrupt,
+ SA_INTERRUPT, "advansys")) != 0)
+#else /* version >= v1.3.70 */
+ /*
+ * If request_irq() fails with the SA_INTERRUPT flag set,
+ * then try again without the SA_INTERRUPT flag set. This
+ * allows IRQ sharing to work even with other drivers that
+ * do not set the SA_INTERRUPT flag.
+ *
+ * If SA_INTERRUPT is not set, then interrupts are enabled
+ * before the driver interrupt function is called.
+ */
+ if (((ret = request_irq(shp->irq, advansys_interrupt,
+ SA_INTERRUPT | (share_irq == TRUE ? SA_SHIRQ : 0),
+ "advansys", boardp)) != 0) &&
+ ((ret = request_irq(shp->irq, advansys_interrupt,
+ (share_irq == TRUE ? SA_SHIRQ : 0),
+ "advansys", boardp)) != 0))
+#endif /* version >= v1.3.70 */
+ {
+ if (ret == -EBUSY) {
+ ASC_PRINT2(
+"advansys_detect: board %d: request_irq(): IRQ %d already in use.\n",
+ boardp->id, shp->irq);
+ } else if (ret == -EINVAL) {
+ ASC_PRINT2(
+"advansys_detect: board %d: request_irq(): IRQ %d not valid.\n",
+ boardp->id, shp->irq);
+ } else {
+ ASC_PRINT3(
+"advansys_detect: board %d: request_irq(): IRQ %d failed with %d\n",
+ boardp->id, shp->irq, ret);
+ }
+ release_region(shp->io_port, shp->n_io_port);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ iounmap(boardp->ioremap_addr);
+#endif /* version >= v1,3,0 */
+ if (shp->dma_channel != NO_ISA_DMA) {
+ free_dma(shp->dma_channel);
+ }
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+
+ /*
+ * Initialize board RISC chip and enable interrupts.
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+ ASC_DBG(2, "advansys_detect: AscInitAsc1000Driver()\n");
+ warn_code = AscInitAsc1000Driver(asc_dvc_varp);
+ err_code = asc_dvc_varp->err_code;
+
+ if (warn_code || err_code) {
+ ASC_PRINT4(
+"AscInitAsc1000Driver: board %d: error: init_state %x, warn %x error %x\n",
+ boardp->id, asc_dvc_varp->init_state,
+ warn_code, err_code);
+ }
+ } else {
+ int req_cnt;
+ adv_req_t *reqp = NULL;
+ int sg_cnt;
+ adv_sgblk_t *sgp = NULL;
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+ req_cnt = sizeof(adv_req_buf)/sizeof(adv_req_t);
+ sg_cnt = sizeof(adv_sgblk_buf)/sizeof(adv_sgblk_t);
+ reqp = (adv_req_t *) &adv_req_buf[0];
+ sgp = (adv_sgblk_t *) &adv_sgblk_buf[0];
+#else /* version >= v1.3.0 */
+ /*
+ * Allocate up to 'max_host_qng' request structures for
+ * the Wide board.
+ */
+ for (req_cnt = adv_dvc_varp->max_host_qng;
+ req_cnt > 0; req_cnt--) {
+
+ reqp = (adv_req_t *)
+ kmalloc(sizeof(adv_req_t) * req_cnt, GFP_ATOMIC);
+
+ ASC_DBG3(1,
+ "advansys_detect: reqp %x, req_cnt %d, bytes %d\n",
+ (unsigned) reqp, req_cnt, sizeof(adv_req_t) * req_cnt);
+
+ if (reqp != NULL) {
+ break;
+ }
+ }
+
+ /*
+ * Allocate up to ADV_TOT_SG_LIST request structures for
+ * the Wide board.
+ */
+ for (sg_cnt = ADV_TOT_SG_LIST; sg_cnt > 0; sg_cnt--) {
+
+ sgp = (adv_sgblk_t *)
+ kmalloc(sizeof(adv_sgblk_t) * sg_cnt, GFP_ATOMIC);
+
+ ASC_DBG3(1,
+ "advansys_detect: sgp %x, sg_cnt %d, bytes %d\n",
+ (unsigned) sgp, sg_cnt, sizeof(adv_sgblk_t) * sg_cnt);
+
+ if (sgp != NULL) {
+ break;
+ }
+ }
+#endif /* version >= v1.3.0 */
+
+ /*
+ * If no request structures or scatter-gather structures could
+ * be allocated, then return an error. Otherwise continue with
+ * initialization.
+ */
+ if (reqp == NULL) {
+ ASC_PRINT1(
+"advansys_detect: board %d: error: failed to kmalloc() adv_req_t buffer.\n",
+ boardp->id);
+ err_code = ADV_ERROR;
+ } else if (sgp == NULL) {
+ kfree(reqp);
+ ASC_PRINT1(
+"advansys_detect: board %d: error: failed to kmalloc() adv_sgblk_t buffer.\n",
+ boardp->id);
+ err_code = ADV_ERROR;
+ } else {
+
+ /*
+ * Save original pointer for kfree() in case the
+ * driver is built as a module and can be unloaded.
+ */
+ boardp->orig_reqp = reqp;
+
+ /*
+ * Point 'adv_reqp' to the request structures and
+ * link them together.
+ */
+ req_cnt--;
+ reqp[req_cnt].next_reqp = NULL;
+ for (; req_cnt > 0; req_cnt--) {
+ reqp[req_cnt - 1].next_reqp = &reqp[req_cnt];
+ }
+ boardp->adv_reqp = &reqp[0];
+
+ /*
+ * Save original pointer for kfree() in case the
+ * driver is built as a module and can be unloaded.
+ */
+ boardp->orig_sgblkp = sgp;
+
+ /*
+ * Point 'adv_sgblkp' to the request structures and
+ * link them together.
+ */
+ sg_cnt--;
+ sgp[sg_cnt].next_sgblkp = NULL;
+ for (; sg_cnt > 0; sg_cnt--) {
+ sgp[sg_cnt - 1].next_sgblkp = &sgp[sg_cnt];
+ }
+ boardp->adv_sgblkp = &sgp[0];
+
+ ASC_DBG(2, "advansys_detect: AdvInitAsc3550Driver()\n");
+ warn_code = AdvInitAsc3550Driver(adv_dvc_varp);
+ err_code = adv_dvc_varp->err_code;
+
+ if (warn_code || err_code) {
+ ASC_PRINT3(
+"AdvInitAsc3550Driver: board %d: error: warn %x, error %x\n",
+ boardp->id, warn_code, adv_dvc_varp->err_code);
+ }
+ }
+ }
+
+ if (err_code != 0) {
+ release_region(shp->io_port, shp->n_io_port);
+ if (ASC_WIDE_BOARD(boardp)) {
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ iounmap(boardp->ioremap_addr);
+#endif /* version >= v1,3,0 */
+ if (boardp->orig_reqp) {
+ kfree(boardp->orig_reqp);
+ boardp->orig_reqp = boardp->adv_reqp = NULL;
+ }
+ if (boardp->orig_sgblkp) {
+ kfree(boardp->orig_sgblkp);
+ boardp->orig_sgblkp = boardp->adv_sgblkp = NULL;
+ }
+ }
+ if (shp->dma_channel != NO_ISA_DMA) {
+ free_dma(shp->dma_channel);
+ }
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,70)
+ free_irq(shp->irq);
+#else /* version >= v1.3.70 */
+ free_irq(shp->irq, boardp);
+#endif /* version >= v1.3.70 */
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+ ASC_DBG_PRT_SCSI_HOST(2, shp);
+ }
+ }
+ ASC_DBG1(1, "advansys_detect: done: asc_board_count %d\n", asc_board_count);
+ return asc_board_count;
+}
+
+/*
+ * advansys_release()
+ *
+ * Release resources allocated for a single AdvanSys adapter.
+ */
+int
+advansys_release(struct Scsi_Host *shp)
+{
+ asc_board_t *boardp;
+
+ ASC_DBG(1, "advansys_release: begin\n");
+ boardp = ASC_BOARDP(shp);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,70)
+ free_irq(shp->irq);
+#else /* version >= v1.3.70 */
+ free_irq(shp->irq, boardp);
+#endif /* version >= v1.3.70 */
+ if (shp->dma_channel != NO_ISA_DMA) {
+ ASC_DBG(1, "advansys_release: free_dma()\n");
+ free_dma(shp->dma_channel);
+ }
+ release_region(shp->io_port, shp->n_io_port);
+ if (ASC_WIDE_BOARD(boardp)) {
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ iounmap(boardp->ioremap_addr);
+#endif /* version >= v1,3,0 */
+ if (boardp->orig_reqp) {
+ kfree(boardp->orig_reqp);
+ boardp->orig_reqp = boardp->adv_reqp = NULL;
+ }
+ if (boardp->orig_sgblkp) {
+ kfree(boardp->orig_sgblkp);
+ boardp->orig_sgblkp = boardp->adv_sgblkp = NULL;
+ }
+ }
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ ASC_ASSERT(boardp->prtbuf != NULL);
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+ scsi_unregister(shp);
+ ASC_DBG(1, "advansys_release: end\n");
+ return 0;
+}
+
+/*
+ * advansys_info()
+ *
+ * Return suitable for printing on the console with the argument
+ * adapter's configuration information.
+ *
+ * Note: The information line should not exceed ASC_INFO_SIZE bytes,
+ * otherwise the static 'info' array will be overrun.
+ */
+const char *
+advansys_info(struct Scsi_Host *shp)
+{
+ static char info[ASC_INFO_SIZE];
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ char *busname;
+
+ boardp = ASC_BOARDP(shp);
+ if (ASC_NARROW_BOARD(boardp)) {
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ ASC_DBG(1, "advansys_info: begin\n");
+ if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
+ if ((asc_dvc_varp->bus_type & ASC_IS_ISAPNP) == ASC_IS_ISAPNP) {
+ busname = "ISA PnP";
+ } else {
+ busname = "ISA";
+ }
+ sprintf(info,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,92)
+"AdvanSys SCSI %s: %s %u CDB: BIOS %X, IO %X/%X, IRQ %u, DMA %u",
+#else /* version >= v2.1.92 */
+"AdvanSys SCSI %s: %s %u CDB: BIOS %X, IO %lX/%X, IRQ %u, DMA %u",
+#endif /* version >= v2.1.92 */
+ ASC_VERSION, busname, asc_dvc_varp->max_total_qng,
+ (unsigned) shp->base,
+ shp->io_port, shp->n_io_port - 1,
+ shp->irq, shp->dma_channel);
+ } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
+ if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
+ == ASC_IS_PCI_ULTRA) {
+ busname = "PCI Ultra";
+ } else {
+ busname = "PCI";
+ }
+ sprintf(info,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,92)
+ "AdvanSys SCSI %s: %s %u CDB: IO %X/%X, IRQ %u",
+#else /* version >= v2.1.92 */
+ "AdvanSys SCSI %s: %s %u CDB: IO %lX/%X, IRQ %u",
+#endif /* version >= v2.1.92 */
+ ASC_VERSION, busname, asc_dvc_varp->max_total_qng,
+ shp->io_port, shp->n_io_port - 1, shp->irq);
+ } else {
+ if (asc_dvc_varp->bus_type & ASC_IS_VL) {
+ busname = "VL";
+ } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
+ busname = "EISA";
+ } else {
+ busname = "?";
+ ASC_PRINT2(
+ "advansys_info: board %d: unknown bus type %d\n",
+ boardp->id, asc_dvc_varp->bus_type);
+ }
+ sprintf(info,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,92)
+ "AdvanSys SCSI %s: %s %u CDB: BIOS %X, IO %X/%X, IRQ %u",
+#else /* version >= v2.1.92 */
+ "AdvanSys SCSI %s: %s %u CDB: BIOS %X, IO %lX/%X, IRQ %u",
+#endif /* version >= v2.1.92 */
+ ASC_VERSION, busname, asc_dvc_varp->max_total_qng,
+ (unsigned) shp->base, shp->io_port - 1,
+ shp->n_io_port, shp->irq);
+ }
+ } else {
+ /*
+ * Wide Adapter Information
+ *
+ * Memory-mapped I/O is used instead of I/O space to access
+ * the adapter, but display the I/O Port range. The Memory
+ * I/O address is displayed through the driver /proc file.
+ */
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ if (boardp->bios_signature == 0x55AA) {
+ sprintf(info,
+"AdvanSys SCSI %s: PCI Ultra-Wide: BIOS %X/%X, IO %X/%X, IRQ %u",
+ ASC_VERSION,
+ boardp->bios_codeseg << 4,
+ boardp->bios_codelen > 0 ?
+ (boardp->bios_codelen << 9) - 1 : 0,
+ (unsigned) boardp->ioport, ADV_CONDOR_IOLEN - 1,
+ shp->irq);
+ } else {
+ sprintf(info,
+"AdvanSys SCSI %s: PCI Ultra-Wide: IO %X/%X, IRQ %u",
+ ASC_VERSION,
+ (unsigned) boardp->ioport,
+ (ADV_CONDOR_IOLEN - 1),
+ shp->irq);
+ }
+ }
+ ASC_ASSERT(strlen(info) < ASC_INFO_SIZE);
+ ASC_DBG(1, "advansys_info: end\n");
+ return info;
+}
+
+/*
+ * advansys_command() - polled I/O entrypoint.
+ *
+ * Apparently host drivers shouldn't return until the command
+ * is finished.
+ *
+ * Note: This is an old interface that is no longer used by the SCSI
+ * mid-level driver. The new interface, advansys_queuecommand(),
+ * currently handles all requests.
+ */
+int
+advansys_command(Scsi_Cmnd *scp)
+{
+ ASC_DBG1(1, "advansys_command: scp %x\n", (unsigned) scp);
+ ASC_STATS(scp->host, command);
+ scp->SCp.Status = 0; /* Set to a known state */
+ advansys_queuecommand(scp, advansys_command_done);
+ while (scp->SCp.Status == 0) {
+ continue;
+ }
+ ASC_DBG1(1, "advansys_command: result %x\n", scp->result);
+ return scp->result;
+}
+
+/*
+ * advansys_queuecommand() - interrupt-driven I/O entrypoint.
+ *
+ * This function always returns 0. Command return status is saved
+ * in the 'scp' result field.
+ */
+int
+advansys_queuecommand(Scsi_Cmnd *scp, void (*done)(Scsi_Cmnd *))
+{
+ struct Scsi_Host *shp;
+ asc_board_t *boardp;
+ long flags;
+ Scsi_Cmnd *done_scp;
+
+ shp = scp->host;
+ boardp = ASC_BOARDP(shp);
+ ASC_STATS(shp, queuecommand);
+
+ /*
+ * Disable interrupts to preserve request ordering and provide
+ * mutually exclusive access to global structures used to initiate
+ * a request.
+ */
+ save_flags(flags);
+ cli();
+
+ /*
+ * Block new commands while handling a reset or abort request.
+ */
+ if (boardp->flags & (ASC_HOST_IN_RESET | ASC_HOST_IN_ABORT)) {
+ if (boardp->flags & ASC_HOST_IN_RESET) {
+ ASC_DBG1(1,
+ "advansys_queuecommand: scp %x blocked for reset request\n",
+ (unsigned) scp);
+ scp->result = HOST_BYTE(DID_RESET);
+ } else {
+ ASC_DBG1(1,
+ "advansys_queuecommand: scp %x blocked for abort request\n",
+ (unsigned) scp);
+ scp->result = HOST_BYTE(DID_ABORT);
+ }
+
+ /*
+ * Add blocked requests to the board's 'done' queue. The queued
+ * requests will be completed at the end of the abort or reset
+ * handling.
+ */
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ restore_flags(flags);
+ return 0;
+ }
+
+ /*
+ * Attempt to execute any waiting commands for the board.
+ */
+ if (!ASC_QUEUE_EMPTY(&boardp->waiting)) {
+ ASC_DBG(1,
+ "advansys_queuecommand: before asc_execute_queue() waiting\n");
+ asc_execute_queue(&boardp->waiting);
+ }
+
+ /*
+ * Save the function pointer to Linux mid-level 'done' function
+ * and attempt to execute the command.
+ *
+ * If ASC_ERROR is returned the request has been added to the
+ * board's 'active' queue and will be completed by the interrupt
+ * handler.
+ *
+ * If ASC_BUSY is returned add the request to the board's per
+ * target waiting list.
+ *
+ * If an error occurred, the request will have been placed on the
+ * board's 'done' queue and must be completed before returning.
+ */
+ scp->scsi_done = done;
+ switch (asc_execute_scsi_cmnd(scp)) {
+ case ASC_NOERROR:
+ break;
+ case ASC_BUSY:
+ asc_enqueue(&boardp->waiting, scp, ASC_BACK);
+ break;
+ case ASC_ERROR:
+ default:
+ done_scp = asc_dequeue_list(&boardp->done, NULL, ASC_TID_ALL);
+ /* Interrupts could be enabled here. */
+ asc_scsi_done_list(done_scp);
+ break;
+ }
+
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * advansys_abort()
+ *
+ * Abort the command specified by 'scp'.
+ */
+int
+advansys_abort(Scsi_Cmnd *scp)
+{
+ struct Scsi_Host *shp;
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ long flags;
+ int do_scsi_done;
+ int scp_found;
+ Scsi_Cmnd *done_scp = NULL;
+ int ret;
+
+ /* Save current flags and disable interrupts. */
+ save_flags(flags);
+ cli();
+
+ ASC_DBG1(1, "advansys_abort: scp %x\n", (unsigned) scp);
+
+#ifdef ADVANSYS_STATS
+ if (scp->host != NULL) {
+ ASC_STATS(scp->host, abort);
+ }
+#endif /* ADVANSYS_STATS */
+
+#ifdef ADVANSYS_ASSERT
+ do_scsi_done = ASC_ERROR;
+ scp_found = ASC_ERROR;
+ ret = ASC_ERROR;
+#endif /* ADVANSYS_ASSERT */
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ if (scp->serial_number != scp->serial_number_at_timeout) {
+ ASC_PRINT1(
+"advansys_abort: timeout serial number changed for request %x\n",
+ (unsigned) scp);
+ do_scsi_done = ASC_FALSE;
+ scp_found = ASC_FALSE;
+ ret = SCSI_ABORT_NOT_RUNNING;
+ } else
+#endif /* version >= v1.3.89 */
+ if ((shp = scp->host) == NULL) {
+ scp->result = HOST_BYTE(DID_ERROR);
+ do_scsi_done = ASC_TRUE;
+ scp_found = ASC_FALSE;
+ ret = SCSI_ABORT_ERROR;
+ } else if ((boardp = ASC_BOARDP(shp))->flags &
+ (ASC_HOST_IN_RESET | ASC_HOST_IN_ABORT)) {
+ ASC_PRINT2(
+"advansys_abort: board %d: Nested host reset or abort, flags 0x%x\n",
+ boardp->id, boardp->flags);
+ do_scsi_done = ASC_TRUE;
+ if ((asc_rmqueue(&boardp->active, scp) == ASC_TRUE) ||
+ (asc_rmqueue(&boardp->waiting, scp) == ASC_TRUE)) {
+ scp_found = ASC_TRUE;
+ } else {
+ scp_found = ASC_FALSE;
+ }
+ scp->result = HOST_BYTE(DID_ERROR);
+ ret = SCSI_ABORT_ERROR;
+ } else {
+ /* Set abort flag to avoid nested reset or abort requests. */
+ boardp->flags |= ASC_HOST_IN_ABORT;
+
+ do_scsi_done = ASC_TRUE;
+ if (asc_rmqueue(&boardp->waiting, scp) == ASC_TRUE) {
+ /*
+ * If asc_rmqueue() found the command on the waiting
+ * queue, it had not been sent to the device. After
+ * the queue is removed, no other handling is required.
+ */
+ ASC_DBG1(1, "advansys_abort: scp %x found on waiting queue\n",
+ (unsigned) scp);
+ scp_found = ASC_TRUE;
+ scp->result = HOST_BYTE(DID_ABORT);
+ ret = SCSI_ABORT_SUCCESS;
+ } else if (asc_isqueued(&boardp->active, scp) == ASC_TRUE) {
+ /*
+ * If asc_isqueued() found the command on the active
+ * queue, it has been sent to the device. The command
+ * will be returned through the interrupt handler after
+ * it has been aborted.
+ */
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Narrow Board
+ */
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ scp->result = HOST_BYTE(DID_ABORT);
+
+ sti(); /* Enable interrupts for AscAbortSRB(). */
+ ASC_DBG1(1, "advansys_abort: before AscAbortSRB(), scp %x\n",
+ (unsigned) scp);
+ switch (AscAbortSRB(asc_dvc_varp, (ulong) scp)) {
+ case ASC_TRUE:
+ /* asc_isr_callback() will be called */
+ ASC_DBG(1, "advansys_abort: AscAbortSRB() TRUE\n");
+ ret = SCSI_ABORT_PENDING;
+ break;
+ case ASC_FALSE:
+ /* Request has apparently already completed. */
+ ASC_DBG(1, "advansys_abort: AscAbortSRB() FALSE\n");
+ ret = SCSI_ABORT_NOT_RUNNING;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "advansys_abort: AscAbortSRB() ERROR\n");
+ ret = SCSI_ABORT_ERROR;
+ break;
+ }
+ cli();
+ } else {
+ /*
+ * Wide Board
+ */
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ scp->result = HOST_BYTE(DID_ABORT);
+
+ ASC_DBG1(1, "advansys_abort: before AdvAbortSRB(), scp %x\n",
+ (unsigned) scp);
+ switch (AdvAbortSRB(adv_dvc_varp, (ulong) scp)) {
+ case ASC_TRUE:
+ /* asc_isr_callback() will be called */
+ ASC_DBG(1, "advansys_abort: AdvAbortSRB() TRUE\n");
+ ret = SCSI_ABORT_PENDING;
+ break;
+ case ASC_FALSE:
+ /* Request has apparently already completed. */
+ ASC_DBG(1, "advansys_abort: AdvAbortSRB() FALSE\n");
+ ret = SCSI_ABORT_NOT_RUNNING;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "advansys_abort: AdvAbortSRB() ERROR\n");
+ ret = SCSI_ABORT_ERROR;
+ break;
+ }
+ /*
+ * Ensure all requests completed by the microcode have
+ * been processed by calling AdvISR().
+ */
+ (void) AdvISR(adv_dvc_varp);
+ }
+
+ /*
+ * The request will either still be on the active queue
+ * or have been added to the board's done queue.
+ */
+ if (asc_rmqueue(&boardp->active, scp) == ASC_TRUE) {
+ scp->result = HOST_BYTE(DID_ABORT);
+ scp_found = ASC_TRUE;
+ } else {
+ scp_found = asc_rmqueue(&boardp->done, scp);
+ ASC_ASSERT(scp_found == ASC_TRUE);
+ }
+
+ } else {
+ /*
+ * The command was not found on the active or waiting queues.
+ */
+ do_scsi_done = ASC_TRUE;
+ scp_found = ASC_FALSE;
+ ret = SCSI_ABORT_NOT_RUNNING;
+ }
+
+ /* Clear abort flag. */
+ boardp->flags &= ~ASC_HOST_IN_ABORT;
+
+ /*
+ * Because the ASC_HOST_IN_ABORT flag causes both
+ * 'advansys_interrupt' and 'asc_isr_callback' to
+ * queue requests to the board's 'done' queue and
+ * prevents waiting commands from being executed,
+ * these queued requests must be handled here.
+ */
+ done_scp = asc_dequeue_list(&boardp->done, NULL, ASC_TID_ALL);
+
+ /*
+ * Start any waiting commands for the board.
+ */
+ if (!ASC_QUEUE_EMPTY(&boardp->waiting)) {
+ ASC_DBG(1, "advansys_interrupt: before asc_execute_queue()\n");
+ asc_execute_queue(&boardp->waiting);
+ }
+ }
+
+ /* Interrupts could be enabled here. */
+
+ /*
+ * Complete the request to be aborted, unless it has been
+ * restarted as detected above, even if it was not found on
+ * the device active or waiting queues.
+ */
+ ASC_ASSERT(do_scsi_done != ASC_ERROR);
+ ASC_ASSERT(scp_found != ASC_ERROR);
+ if (do_scsi_done == ASC_TRUE) {
+ if (scp->scsi_done == NULL) {
+ ASC_PRINT1(
+"advansys_abort: aborted request scsi_done() is NULL, %x\n",
+ (unsigned) scp);
+ } else {
+ if (scp_found == ASC_FALSE) {
+ ASC_PRINT1(
+"advansys_abort: abort request not active or waiting, completing anyway %x\n",
+ (unsigned) scp);
+ }
+ ASC_STATS(scp->host, done);
+ scp->scsi_done(scp);
+ }
+ }
+
+ /*
+ * It is possible for the request done function to re-enable
+ * interrupts without confusing the driver. But here interrupts
+ * aren't enabled until all requests have been completed.
+ */
+ if (done_scp != NULL) {
+ asc_scsi_done_list(done_scp);
+ }
+
+ ASC_DBG1(1, "advansys_abort: ret %d\n", ret);
+
+ /* Re-enable interrupts, if they were enabled on entry. */
+ restore_flags(flags);
+
+ ASC_ASSERT(ret != ASC_ERROR);
+ return ret;
+}
+
+/*
+ * advansys_reset()
+ *
+ * Reset the device associated with the command 'scp'.
+ */
+int
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,89)
+advansys_reset(Scsi_Cmnd *scp)
+#else /* version >= v1.3.89 */
+advansys_reset(Scsi_Cmnd *scp, unsigned int reset_flags)
+#endif /* version >= v1.3.89 */
+{
+ struct Scsi_Host *shp;
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ long flags;
+ Scsi_Cmnd *done_scp = NULL, *last_scp = NULL;
+ Scsi_Cmnd *tscp, *new_last_scp;
+ int do_scsi_done;
+ int scp_found;
+ int status;
+ int target;
+ int ret;
+ int device_reset = ASC_FALSE;
+
+ /* Save current flags and disable interrupts. */
+ save_flags(flags);
+ cli();
+
+ ASC_DBG1(1, "advansys_reset: %x\n", (unsigned) scp);
+
+#ifdef ADVANSYS_STATS
+ if (scp->host != NULL) {
+ ASC_STATS(scp->host, reset);
+ }
+#endif /* ADVANSYS_STATS */
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ if ((reset_flags & SCSI_RESET_ASYNCHRONOUS) &&
+ (scp->serial_number != scp->serial_number_at_timeout)) {
+ ASC_PRINT1(
+"advansys_reset: timeout serial number changed for request %x\n",
+ (unsigned) scp);
+ do_scsi_done = ASC_FALSE;
+ scp_found = ASC_FALSE;
+ ret = SCSI_RESET_NOT_RUNNING;
+ } else
+#endif /* version >= v1.3.89 */
+ if ((shp = scp->host) == NULL) {
+ scp->result = HOST_BYTE(DID_ERROR);
+ do_scsi_done = ASC_TRUE;
+ scp_found = ASC_FALSE;
+ ret = SCSI_RESET_ERROR;
+ } else if ((boardp = ASC_BOARDP(shp))->flags &
+ (ASC_HOST_IN_RESET | ASC_HOST_IN_ABORT)) {
+ ASC_PRINT2(
+"advansys_reset: board %d: Nested host reset or abort, flags 0x%x\n",
+ boardp->id, boardp->flags);
+ do_scsi_done = ASC_TRUE;
+ if ((asc_rmqueue(&boardp->active, scp) == ASC_TRUE) ||
+ (asc_rmqueue(&boardp->waiting, scp) == ASC_TRUE)) {
+ scp_found = ASC_TRUE;
+ } else {
+ scp_found = ASC_FALSE;
+ }
+ scp->result = HOST_BYTE(DID_ERROR);
+ ret = SCSI_RESET_ERROR;
+ } else if (jiffies >= boardp->last_reset &&
+ jiffies < (boardp->last_reset + (10 * HZ))) {
+ /*
+ * Don't allow a reset to be attempted within 10 seconds
+ * of the last reset.
+ *
+ * If 'jiffies' wrapping occurs, the reset request will go
+ * through, because a wrapped 'jiffies' would not pass the
+ * test above.
+ */
+ ASC_DBG(1,
+ "advansys_reset: reset within 10 sec of last reset ignored\n");
+ do_scsi_done = ASC_TRUE;
+ if ((asc_rmqueue(&boardp->active, scp) == ASC_TRUE) ||
+ (asc_rmqueue(&boardp->waiting, scp) == ASC_TRUE)) {
+ scp_found = ASC_TRUE;
+ } else {
+ scp_found = ASC_FALSE;
+ }
+ scp->result = HOST_BYTE(DID_ERROR);
+ ret = SCSI_RESET_ERROR;
+ } else {
+ do_scsi_done = ASC_TRUE;
+
+ /* Set reset flag to avoid nested reset or abort requests. */
+ boardp->flags |= ASC_HOST_IN_RESET;
+
+ /*
+ * If the request is on the target waiting or active queue
+ * or the board done queue, then remove it and note that it
+ * was found.
+ */
+ if (asc_rmqueue(&boardp->active, scp) == ASC_TRUE) {
+ ASC_DBG(1, "advansys_reset: active scp_found = TRUE\n");
+ scp_found = ASC_TRUE;
+ } else if (asc_rmqueue(&boardp->waiting, scp) == ASC_TRUE) {
+ ASC_DBG(1, "advansys_reset: waiting scp_found = TRUE\n");
+ scp_found = ASC_TRUE;
+ } else if (asc_rmqueue(&boardp->done, scp) == ASC_TRUE) {
+ scp_found = ASC_TRUE;
+ } else {
+ scp_found = ASC_FALSE;
+ }
+
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Narrow Board
+ *
+ * If the suggest reset bus flags are set, then reset the bus.
+ * Otherwise only reset the device.
+ */
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ if (reset_flags &
+ (SCSI_RESET_SUGGEST_BUS_RESET |
+ SCSI_RESET_SUGGEST_HOST_RESET)) {
+#endif /* version >= v1.3.89 */
+
+ /*
+ * Reset the target's SCSI bus.
+ */
+ ASC_DBG(1, "advansys_reset: before AscResetSB()\n");
+ sti(); /* Enable interrupts for AscResetSB(). */
+ status = AscResetSB(asc_dvc_varp);
+ cli();
+ switch (status) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AscResetSB() success\n");
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "advansys_reset: AscResetSB() failed\n");
+ ret = SCSI_RESET_ERROR;
+ break;
+ }
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ } else {
+ /*
+ * Reset the specified device. If the device reset fails,
+ * then reset the SCSI bus.
+ */
+
+ ASC_DBG1(1,
+ "advansys_reset: before AscResetDevice(), target %d\n",
+ scp->target);
+ sti(); /* Enable interrupts for AscResetDevice(). */
+ status = AscResetDevice(asc_dvc_varp, scp->target);
+ cli();
+
+ switch (status) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AscResetDevice() success\n");
+ device_reset = ASC_TRUE;
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1,
+"advansys_reset: AscResetDevice() failed; Calling AscResetSB()\n");
+ sti(); /* Enable interrupts for AscResetSB(). */
+ status = AscResetSB(asc_dvc_varp);
+ cli();
+ switch (status) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AscResetSB() TRUE\n");
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "advansys_reset: AscResetSB() ERROR\n");
+ ret = SCSI_RESET_ERROR;
+ break;
+ }
+ break;
+ }
+ }
+#endif /* version >= v1.3.89 */
+ } else {
+ /*
+ * Wide Board
+ *
+ * If the suggest reset bus flags are set, then reset the bus.
+ * Otherwise only reset the device.
+ */
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ if (reset_flags &
+ (SCSI_RESET_SUGGEST_BUS_RESET |
+ SCSI_RESET_SUGGEST_HOST_RESET)) {
+#endif /* version >= v1.3.89 */
+
+ /*
+ * Reset the target's SCSI bus.
+ */
+ ASC_DBG(1, "advansys_reset: before AdvResetSB()\n");
+ switch (AdvResetSB(adv_dvc_varp)) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AdvResetSB() success\n");
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_FALSE:
+ default:
+ ASC_DBG(1, "advansys_reset: AdvResetSB() failed\n");
+ ret = SCSI_RESET_ERROR;
+ break;
+ }
+ /*
+ * Ensure all requests completed by the microcode have
+ * been processed by calling AdvISR().
+ */
+ (void) AdvISR(adv_dvc_varp);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ } else {
+ /*
+ * Reset the specified device. If the device reset fails,
+ * then reset the SCSI bus.
+ */
+
+ ASC_DBG1(1,
+ "advansys_reset: before AdvResetDevice(), target %d\n",
+ scp->target);
+
+ switch (AdvResetDevice(adv_dvc_varp, scp->target)) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AdvResetDevice() success\n");
+ device_reset = ASC_TRUE;
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_FALSE:
+ default:
+ ASC_DBG(1,
+"advansys_reset: AdvResetDevice() failed; Calling AdvResetSB()\n");
+
+ switch (AdvResetSB(adv_dvc_varp)) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AdvResetSB() TRUE\n");
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_FALSE:
+ default:
+ ASC_DBG(1, "advansys_reset: AdvResetSB() ERROR\n");
+ ret = SCSI_RESET_ERROR;
+ break;
+ }
+ break;
+ }
+ /*
+ * Ensure all requests completed by the microcode have
+ * been processed by calling AdvISR().
+ */
+ (void) AdvISR(adv_dvc_varp);
+ }
+#endif /* version >= v1.3.89 */
+ }
+
+ /*
+ * Because the ASC_HOST_IN_RESET flag causes both
+ * 'advansys_interrupt' and 'asc_isr_callback' to
+ * queue requests to the board's 'done' queue and
+ * prevents waiting commands from being executed,
+ * these queued requests must be handled here.
+ */
+ done_scp = asc_dequeue_list(&boardp->done, &last_scp,
+ ASC_TID_ALL);
+
+ /*
+ * If a device reset was performed dequeue all waiting
+ * and active requests for the device and set the request
+ * status to DID_RESET.
+ *
+ * If a SCSI bus reset was performed dequeue all waiting
+ * and active requests for all devices and set the request
+ * status to DID_RESET.
+ */
+ if (device_reset == ASC_TRUE) {
+ target = scp->target;
+ } else {
+ target = ASC_TID_ALL;
+ }
+
+ /*
+ * Add active requests to 'done_scp' and set the request status
+ * to DID_RESET.
+ */
+ if (done_scp == NULL) {
+ done_scp = asc_dequeue_list(&boardp->active, &last_scp, target);
+ for (tscp = done_scp; tscp; tscp = (REQP) REQPNEXT(tscp)) {
+ tscp->result = HOST_BYTE(DID_RESET);
+ }
+ } else {
+ ASC_ASSERT(last_scp != NULL);
+ REQPNEXT(last_scp) =
+ (unsigned char *) asc_dequeue_list(&boardp->active,
+ &new_last_scp, target);
+ if (new_last_scp != (Scsi_Cmnd *) NULL) {
+ ASC_ASSERT((REQP) REQPNEXT(last_scp) != NULL);
+ for (tscp = (Scsi_Cmnd *) REQPNEXT(last_scp);
+ tscp;
+ tscp = (Scsi_Cmnd *) REQPNEXT(tscp)) {
+ tscp->result = HOST_BYTE(DID_RESET);
+ }
+ last_scp = new_last_scp;
+ }
+ }
+
+ /*
+ * Add waiting requests to 'done_scp' and set the request status
+ * to DID_RESET.
+ */
+ if (done_scp == NULL) {
+ done_scp = asc_dequeue_list(&boardp->waiting, &last_scp, target);
+ for (tscp = done_scp; tscp; tscp = (REQP) REQPNEXT(tscp)) {
+ tscp->result = HOST_BYTE(DID_RESET);
+ }
+ } else {
+ ASC_ASSERT(last_scp != NULL);
+ REQPNEXT(last_scp) =
+ (unsigned char *) asc_dequeue_list(&boardp->waiting,
+ &new_last_scp, target);
+ if (new_last_scp != NULL) {
+ ASC_ASSERT((REQP) REQPNEXT(last_scp) != NULL);
+ for (tscp = (REQP) REQPNEXT(last_scp);
+ tscp;
+ tscp = (REQP) REQPNEXT(tscp)) {
+ tscp->result = HOST_BYTE(DID_RESET);
+ }
+ last_scp = new_last_scp;
+ }
+ }
+
+ /* Save the time of the most recently completed reset. */
+ boardp->last_reset = jiffies;
+
+ /* Clear reset flag. */
+ boardp->flags &= ~ASC_HOST_IN_RESET;
+
+ /*
+ * Start any waiting commands for the board.
+ */
+ if (!ASC_QUEUE_EMPTY(&boardp->waiting)) {
+ ASC_DBG(1, "advansys_interrupt: before asc_execute_queue()\n");
+ asc_execute_queue(&boardp->waiting);
+ }
+ ret = SCSI_RESET_SUCCESS;
+ }
+
+ /* Interrupts could be enabled here. */
+
+ ASC_ASSERT(do_scsi_done != ASC_ERROR);
+ ASC_ASSERT(scp_found != ASC_ERROR);
+ if (do_scsi_done == ASC_TRUE) {
+ if (scp->scsi_done == NULL) {
+ ASC_PRINT1(
+"advansys_reset: reset request scsi_done() is NULL, %x\n",
+ (unsigned) scp);
+ } else {
+ if (scp_found == ASC_FALSE) {
+ ASC_PRINT1(
+"advansys_reset: reset request not active or waiting, completing anyway %x\n",
+ (unsigned) scp);
+ }
+ ASC_STATS(scp->host, done);
+ scp->scsi_done(scp);
+ }
+ }
+
+ /*
+ * It is possible for the request done function to re-enable
+ * interrupts without confusing the driver. But here interrupts
+ * aren't enabled until requests have been completed.
+ */
+ if (done_scp != NULL) {
+ asc_scsi_done_list(done_scp);
+ }
+
+ ASC_DBG1(1, "advansys_reset: ret %d\n", ret);
+
+ /* Re-enable interrupts, if they were enabled on entry. */
+ restore_flags(flags);
+
+ ASC_ASSERT(ret != ASC_ERROR);
+ return ret;
+}
+
+/*
+ * advansys_biosparam()
+ *
+ * Translate disk drive geometry if the "BIOS greater than 1 GB"
+ * support is enabled for a drive.
+ *
+ * ip (information pointer) is an int array with the following definition:
+ * ip[0]: heads
+ * ip[1]: sectors
+ * ip[2]: cylinders
+ */
+int
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+advansys_biosparam(Disk *dp, int dep, int ip[])
+#else /* version >= v1.3.0 */
+advansys_biosparam(Disk *dp, kdev_t dep, int ip[])
+#endif /* version >= v1.3.0 */
+{
+ asc_board_t *boardp;
+
+ ASC_DBG(1, "advansys_biosparam: begin\n");
+ ASC_STATS(dp->device->host, biosparam);
+ boardp = ASC_BOARDP(dp->device->host);
+ if (ASC_NARROW_BOARD(boardp)) {
+ if ((boardp->dvc_var.asc_dvc_var.dvc_cntl &
+ ASC_CNTL_BIOS_GT_1GB) && dp->capacity > 0x200000) {
+ ip[0] = 255;
+ ip[1] = 63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ }
+ } else {
+ if ((boardp->dvc_var.adv_dvc_var.bios_ctrl &
+ BIOS_CTRL_EXTENDED_XLAT) && dp->capacity > 0x200000) {
+ ip[0] = 255;
+ ip[1] = 63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ }
+ }
+ ip[2] = dp->capacity / (ip[0] * ip[1]);
+ ASC_DBG(1, "advansys_biosparam: end\n");
+ return 0;
+}
+
+/*
+ * advansys_setup()
+ *
+ * This function is called from init/main.c at boot time.
+ * It it passed LILO parameters that can be set from the
+ * LILO command line or in /etc/lilo.conf.
+ *
+ * It is used by the AdvanSys driver to either disable I/O
+ * port scanning or to limit scanning to 1 - 4 I/O ports.
+ * Regardless of the option setting EISA and PCI boards
+ * will still be searched for and detected. This option
+ * only affects searching for ISA and VL boards.
+ *
+ * If ADVANSYS_DEBUG is defined the driver debug level may
+ * be set using the 5th (ASC_NUM_IOPORT_PROBE + 1) I/O Port.
+ *
+ * Examples:
+ * 1. Eliminate I/O port scanning:
+ * boot: linux advansys=
+ * or
+ * boot: linux advansys=0x0
+ * 2. Limit I/O port scanning to one I/O port:
+ * boot: linux advansys=0x110
+ * 3. Limit I/O port scanning to four I/O ports:
+ * boot: linux advansys=0x110,0x210,0x230,0x330
+ * 4. If ADVANSYS_DEBUG, limit I/O port scanning to four I/O ports and
+ * set the driver debug level to 2.
+ * boot: linux advansys=0x110,0x210,0x230,0x330,0xdeb2
+ *
+ * ints[0] - number of arguments
+ * ints[1] - first argument
+ * ints[2] - second argument
+ * ...
+ */
+ASC_INITFUNC(
+void
+advansys_setup(char *str, int *ints)
+)
+{
+ int i;
+
+ if (asc_iopflag == ASC_TRUE) {
+ printk("AdvanSys SCSI: 'advansys' LILO option may appear only once\n");
+ return;
+ }
+
+ asc_iopflag = ASC_TRUE;
+
+ if (ints[0] > ASC_NUM_IOPORT_PROBE) {
+#ifdef ADVANSYS_DEBUG
+ if ((ints[0] == ASC_NUM_IOPORT_PROBE + 1) &&
+ (ints[ASC_NUM_IOPORT_PROBE + 1] >> 4 == 0xdeb)) {
+ asc_dbglvl = ints[ASC_NUM_IOPORT_PROBE + 1] & 0xf;
+ } else {
+#endif /* ADVANSYS_DEBUG */
+ printk("AdvanSys SCSI: only %d I/O ports accepted\n",
+ ASC_NUM_IOPORT_PROBE);
+#ifdef ADVANSYS_DEBUG
+ }
+#endif /* ADVANSYS_DEBUG */
+ }
+
+#ifdef ADVANSYS_DEBUG
+ ASC_DBG1(1, "advansys_setup: ints[0] %d\n", ints[0]);
+ for (i = 1; i < ints[0]; i++) {
+ ASC_DBG2(1, " ints[%d] %x", i, ints[i]);
+ }
+ ASC_DBG(1, "\n");
+#endif /* ADVANSYS_DEBUG */
+
+ for (i = 1; i <= ints[0] && i <= ASC_NUM_IOPORT_PROBE; i++) {
+ asc_ioport[i-1] = ints[i];
+ ASC_DBG2(1, "advansys_setup: asc_ioport[%d] %x\n",
+ i - 1, asc_ioport[i-1]);
+ }
+}
+
+
+/*
+ * --- Loadable Driver Support
+ */
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = ADVANSYS;
+# include "scsi_module.c"
+#endif /* MODULE */
+
+
+/*
+ * --- Miscellaneous Driver Functions
+ */
+
+/*
+ * First-level interrupt handler.
+ *
+ * For versions > v1.3.70, 'dev_id' is a pointer to the interrupting
+ * adapter's asc_board_t. Because all boards are currently checked
+ * for interrupts on each interrupt, 'dev_id' is not referenced. 'dev_id'
+ * could be used to identify an interrupt passed to the AdvanSys driver,
+ * which is for a device sharing an interrupt with an AdvanSys adapter.
+ */
+STATIC void
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,70)
+advansys_interrupt(int irq, struct pt_regs *regs)
+#else /* version >= v1.3.70 */
+advansys_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+#endif /* version >= v1.3.70 */
+{
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,95)
+ long flags;
+#else /* version >= v2.1.95 */
+ unsigned long flags;
+#endif /* version >= v2.1.95 */
+ int i;
+ asc_board_t *boardp;
+ Scsi_Cmnd *done_scp = NULL, *last_scp = NULL;
+ Scsi_Cmnd *new_last_scp;
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,95)
+ /* Disable interrupts, if they aren't already disabled. */
+ save_flags(flags);
+ cli();
+#else /* version >= v2.1.95 */
+ /*
+ * Disable interrupts, if they aren't already disabled and acquire
+ * the I/O spinlock.
+ */
+ spin_lock_irqsave(&io_request_lock, flags);
+#endif /* version >= v2.1.95 */
+
+ ASC_DBG(1, "advansys_interrupt: begin\n");
+
+ /*
+ * Check for interrupts on all boards.
+ * AscISR() will call asc_isr_callback().
+ */
+ for (i = 0; i < asc_board_count; i++) {
+ boardp = ASC_BOARDP(asc_host[i]);
+ ASC_DBG2(2, "advansys_interrupt: i %d, boardp %lx\n",
+ i, (ulong) boardp)
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Narrow Board
+ */
+ if (AscIsIntPending(asc_host[i]->io_port)) {
+ ASC_STATS(asc_host[i], interrupt);
+ ASC_DBG(1, "advansys_interrupt: before AscISR()\n");
+ AscISR(&boardp->dvc_var.asc_dvc_var);
+ }
+ } else {
+ /*
+ * Wide Board
+ */
+ ASC_DBG(1, "advansys_interrupt: before AdvISR()\n");
+ if (AdvISR(&boardp->dvc_var.adv_dvc_var)) {
+ ASC_STATS(asc_host[i], interrupt);
+ }
+ }
+
+ /*
+ * Start waiting requests and create a list of completed requests.
+ *
+ * If a reset or abort request is being performed for the board,
+ * the reset or abort handler will complete pending requests after
+ * it has completed.
+ */
+ if ((boardp->flags & (ASC_HOST_IN_RESET | ASC_HOST_IN_ABORT)) == 0) {
+ ASC_DBG2(1, "advansys_interrupt: done_scp %lx, last_scp %lx\n",
+ (ulong) done_scp, (ulong) last_scp);
+
+ /* Start any waiting commands for the board. */
+ if (!ASC_QUEUE_EMPTY(&boardp->waiting)) {
+ ASC_DBG(1, "advansys_interrupt: before asc_execute_queue()\n");
+ asc_execute_queue(&boardp->waiting);
+ }
+
+ /*
+ * Add to the list of requests that must be completed.
+ *
+ * 'done_scp' will always be NULL on the first iteration
+ * of this loop. 'last_scp' is set at the same time as
+ * 'done_scp'.
+ */
+ if (done_scp == NULL) {
+ done_scp = asc_dequeue_list(&boardp->done, &last_scp,
+ ASC_TID_ALL);
+ } else {
+ ASC_ASSERT(last_scp != NULL);
+ REQPNEXT(last_scp) =
+ (unsigned char *) asc_dequeue_list(&boardp->done,
+ &new_last_scp,
+ ASC_TID_ALL);
+ if (new_last_scp != NULL) {
+ ASC_ASSERT(REQPNEXT(last_scp) != NULL);
+ last_scp = new_last_scp;
+ }
+ }
+ }
+ }
+
+ /* Interrupts could be enabled here. */
+
+ /*
+ * It is possible for the request done function to re-enable
+ * interrupts without confusing the driver. But here the
+ * original flags aren't restored until all requests have been
+ * completed.
+ */
+ asc_scsi_done_list(done_scp);
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,95)
+ /*
+ * Restore the original flags which will enable interrupts
+ * if and only if they were enabled on entry.
+ */
+ restore_flags(flags);
+#else /* version >= v2.1.95 */
+ /*
+ * Release the I/O spinlock and restore the original flags
+ * which will enable interrupts if and only if they were
+ * enabled on entry.
+ */
+ spin_unlock_irqrestore(&io_request_lock, flags);
+#endif /* version >= v2.1.95 */
+
+ ASC_DBG(1, "advansys_interrupt: end\n");
+ return;
+}
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+/*
+ * Set the number of commands to queue per device for the
+ * specified host adapter.
+ */
+STATIC void
+advansys_select_queue_depths(struct Scsi_Host *shp, Scsi_Device *devicelist)
+{
+ Scsi_Device *device;
+ asc_board_t *boardp;
+
+ boardp = ASC_BOARDP(shp);
+ boardp->flags |= ASC_SELECT_QUEUE_DEPTHS;
+ for (device = devicelist; device != NULL; device = device->next) {
+ if (device->host != shp) {
+ continue;
+ }
+ /*
+ * Save a pointer to the device and set its initial/maximum
+ * queue depth.
+ */
+ boardp->device[device->id] = device;
+ if (ASC_NARROW_BOARD(boardp)) {
+ device->queue_depth =
+ boardp->dvc_var.asc_dvc_var.max_dvc_qng[device->id];
+ } else {
+ device->queue_depth =
+ boardp->dvc_var.adv_dvc_var.max_dvc_qng;
+ }
+ ASC_DBG3(1, "advansys_select_queue_depths: shp %x, id %d, depth %d\n",
+ (unsigned) shp, device->id, device->queue_depth);
+ }
+}
+#endif /* version >= v1.3.89 */
+
+/*
+ * Function used only with polled I/O requests that are initiated by
+ * advansys_command().
+ */
+STATIC void
+advansys_command_done(Scsi_Cmnd *scp)
+{
+ ASC_DBG1(1, "advansys_command_done: scp %x\n", (unsigned) scp);
+ scp->SCp.Status = 1;
+}
+
+/*
+ * Complete all requests on the singly linked list pointed
+ * to by 'scp'.
+ *
+ * Interrupts can be enabled on entry.
+ */
+STATIC void
+asc_scsi_done_list(Scsi_Cmnd *scp)
+{
+ Scsi_Cmnd *tscp;
+
+ ASC_DBG(2, "asc_scsi_done_list: begin\n");
+ while (scp != NULL) {
+ ASC_DBG1(3, "asc_scsi_done_list: scp %x\n", (unsigned) scp);
+ tscp = (REQP) REQPNEXT(scp);
+ REQPNEXT(scp) = NULL;
+ ASC_STATS(scp->host, done);
+ ASC_ASSERT(scp->scsi_done != NULL);
+ scp->scsi_done(scp);
+ scp = tscp;
+ }
+ ASC_DBG(2, "asc_scsi_done_list: done\n");
+ return;
+}
+
+/*
+ * Execute a single 'Scsi_Cmnd'.
+ *
+ * The function 'done' is called when the request has been completed.
+ *
+ * Scsi_Cmnd:
+ *
+ * host - board controlling device
+ * device - device to send command
+ * target - target of device
+ * lun - lun of device
+ * cmd_len - length of SCSI CDB
+ * cmnd - buffer for SCSI 8, 10, or 12 byte CDB
+ * use_sg - if non-zero indicates scatter-gather request with use_sg elements
+ *
+ * if (use_sg == 0) {
+ * request_buffer - buffer address for request
+ * request_bufflen - length of request buffer
+ * } else {
+ * request_buffer - pointer to scatterlist structure
+ * }
+ *
+ * sense_buffer - sense command buffer
+ *
+ * result (4 bytes of an int):
+ * Byte Meaning
+ * 0 SCSI Status Byte Code
+ * 1 SCSI One Byte Message Code
+ * 2 Host Error Code
+ * 3 Mid-Level Error Code
+ *
+ * host driver fields:
+ * SCp - Scsi_Pointer used for command processing status
+ * scsi_done - used to save caller's done function
+ * host_scribble - used for pointer to another Scsi_Cmnd
+ *
+ * If this function returns ASC_NOERROR or ASC_ERROR the request
+ * has been enqueued on the board's 'done' queue and must be
+ * completed by the caller.
+ *
+ * If ASC_BUSY is returned the request must be enqueued by the
+ * caller and re-tried later.
+ */
+STATIC int
+asc_execute_scsi_cmnd(Scsi_Cmnd *scp)
+{
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ ADV_SCSI_REQ_Q *adv_scsiqp;
+ Scsi_Device *device;
+ int ret;
+
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_DBG2(1, "asc_execute_scsi_cmnd: scp %x, done %x\n",
+ (unsigned) scp, (unsigned) scp->scsi_done);
+
+ boardp = ASC_BOARDP(scp->host);
+ device = boardp->device[scp->target];
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Build and execute Narrow Board request.
+ */
+
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+
+ /*
+ * Build Asc Library request structure using the
+ * global structures 'asc_scsi_req' and 'asc_sg_head'.
+ *
+ * asc_build_req() can not return ASC_BUSY.
+ */
+ if (asc_build_req(boardp, scp) == ASC_ERROR) {
+ ASC_STATS(scp->host, build_error);
+ return ASC_ERROR;
+ }
+
+ /*
+ * Execute the command. If there is no error, add the command
+ * to the active queue.
+ */
+ switch (ret = AscExeScsiQueue(asc_dvc_varp, &asc_scsi_q)) {
+ case ASC_NOERROR:
+ ASC_STATS(scp->host, exe_noerror);
+ /*
+ * Increment monotonically increasing per device successful
+ * request counter. Wrapping doesn't matter.
+ */
+ boardp->reqcnt[scp->target]++;
+
+#if ASC_QUEUE_FLOW_CONTROL
+ /*
+ * Conditionally increment the device queue depth.
+ *
+ * If no error occurred and there have been 100 consecutive
+ * successful requests and the current queue depth is less
+ * than the maximum queue depth, then increment the current
+ * queue depth.
+ */
+ if (boardp->nerrcnt[scp->target]++ > 100) {
+ boardp->nerrcnt[scp->target] = 0;
+ if (device != NULL &&
+ (device->queue_curr_depth < device->queue_depth) &&
+ (!(boardp->queue_full &
+ ADV_TID_TO_TIDMASK(scp->target)) ||
+ (boardp->queue_full_cnt[scp->target] >
+ device->queue_curr_depth))) {
+ device->queue_curr_depth++;
+ }
+ }
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ asc_enqueue(&boardp->active, scp, ASC_BACK);
+ ASC_DBG(1,
+ "asc_execute_scsi_cmnd: AscExeScsiQueue(), ASC_NOERROR\n");
+ break;
+ case ASC_BUSY:
+ /* Caller must enqueue request and retry later. */
+ ASC_STATS(scp->host, exe_busy);
+#if ASC_QUEUE_FLOW_CONTROL
+ /*
+ * Clear consecutive no error counter and if possible decrement
+ * queue depth.
+ */
+ boardp->nerrcnt[scp->target] = 0;
+ if (device != NULL && device->queue_curr_depth > 1) {
+ device->queue_curr_depth--;
+ }
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ break;
+ case ASC_ERROR:
+ ASC_PRINT2(
+"asc_execute_scsi_cmnd: board %d: AscExeScsiQueue() ASC_ERROR, err_code %x\n",
+ boardp->id, asc_dvc_varp->err_code);
+ ASC_STATS(scp->host, exe_error);
+#if ASC_QUEUE_FLOW_CONTROL
+ /* Clear consecutive no error counter. */
+ boardp->nerrcnt[scp->target] = 0;
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ break;
+ default:
+ ASC_PRINT2(
+"asc_execute_scsi_cmnd: board %d: AscExeScsiQueue() unknown, err_code %x\n",
+ boardp->id, asc_dvc_varp->err_code);
+ ASC_STATS(scp->host, exe_unknown);
+#if ASC_QUEUE_FLOW_CONTROL
+ /* Clear consecutive no error counter. */
+ boardp->nerrcnt[scp->target] = 0;
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ break;
+ }
+ } else {
+ /*
+ * Build and execute Wide Board request.
+ */
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+
+ /*
+ * Build and get a pointer to an Adv Library request structure.
+ *
+ * If the request is successfully built then send it below,
+ * otherwise return with an error.
+ */
+ switch (adv_build_req(boardp, scp, &adv_scsiqp)) {
+ case ASC_NOERROR:
+ ASC_DBG(3, "asc_execute_scsi_cmnd: adv_build_req ASC_NOERROR\n");
+ break;
+ case ASC_BUSY:
+ ASC_DBG(1, "asc_execute_scsi_cmnd: adv_build_req ASC_BUSY\n");
+ return ASC_BUSY;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "asc_execute_scsi_cmnd: adv_build_req ASC_ERROR\n");
+ ASC_STATS(scp->host, build_error);
+ return ASC_ERROR;
+ }
+
+ /*
+ * Execute the command. If there is no error, add the command
+ * to the active queue.
+ */
+ switch (ret = AdvExeScsiQueue(adv_dvc_varp, adv_scsiqp)) {
+ case ASC_NOERROR:
+ ASC_STATS(scp->host, exe_noerror);
+ /*
+ * Increment monotonically increasing per device successful
+ * request counter. Wrapping doesn't matter.
+ */
+ boardp->reqcnt[scp->target]++;
+ asc_enqueue(&boardp->active, scp, ASC_BACK);
+ ASC_DBG(1,
+ "asc_execute_scsi_cmnd: AdvExeScsiQueue(), ASC_NOERROR\n");
+ break;
+ case ASC_BUSY:
+ /* Caller must enqueue request and retry later. */
+ ASC_STATS(scp->host, exe_busy);
+ break;
+ case ASC_ERROR:
+ ASC_PRINT2(
+"asc_execute_scsi_cmnd: board %d: AdvExeScsiQueue() ASC_ERROR, err_code %x\n",
+ boardp->id, adv_dvc_varp->err_code);
+ ASC_STATS(scp->host, exe_error);
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ break;
+ default:
+ ASC_PRINT2(
+"asc_execute_scsi_cmnd: board %d: AdvExeScsiQueue() unknown, err_code %x\n",
+ boardp->id, adv_dvc_varp->err_code);
+ ASC_STATS(scp->host, exe_unknown);
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ break;
+ }
+ }
+
+ ASC_DBG(1, "asc_execute_scsi_cmnd: end\n");
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ return ret;
+}
+
+/*
+ * Build a request structure for the Asc Library (Narrow Board).
+ *
+ * The global structures 'asc_scsi_q' and 'asc_sg_head' are
+ * used to build the request.
+ *
+ * If an error occurs, then return ASC_ERROR.
+ */
+STATIC int
+asc_build_req(asc_board_t *boardp, Scsi_Cmnd *scp)
+{
+ /*
+ * Mutually exclusive access is required to 'asc_scsi_q' and
+ * 'asc_sg_head' until after the request is started.
+ */
+ memset(&asc_scsi_q, 0, sizeof(ASC_SCSI_Q));
+
+ /*
+ * Point the ASC_SCSI_Q to the 'Scsi_Cmnd'.
+ */
+ asc_scsi_q.q2.srb_ptr = (ulong) scp;
+
+ /*
+ * Build the ASC_SCSI_Q request.
+ */
+ ASC_ASSERT(scp->cmd_len <= ASC_MAX_CDB_LEN);
+ if (scp->cmd_len > ASC_MAX_CDB_LEN) {
+ scp->cmd_len = ASC_MAX_CDB_LEN;
+ }
+ asc_scsi_q.cdbptr = &scp->cmnd[0];
+ asc_scsi_q.q2.cdb_len = scp->cmd_len;
+ asc_scsi_q.q1.target_id = ASC_TID_TO_TARGET_ID(scp->target);
+ asc_scsi_q.q1.target_lun = scp->lun;
+ asc_scsi_q.q2.target_ix = ASC_TIDLUN_TO_IX(scp->target, scp->lun);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ asc_scsi_q.q1.sense_addr = (ulong) &scp->sense_buffer[0];
+#else /* version >= v2.0.0 */
+ asc_scsi_q.q1.sense_addr = virt_to_bus(&scp->sense_buffer[0]);
+#endif /* version >= v2.0.0 */
+ asc_scsi_q.q1.sense_len = sizeof(scp->sense_buffer);
+
+ /*
+ * If there are any outstanding requests for the current target,
+ * then every 255th request send an ORDERED request. This heuristic
+ * tries to retain the benefit of request sorting while preventing
+ * request starvation. 255 is the max number of tags or pending commands
+ * a device may have outstanding.
+ *
+ * The request count is incremented below for every successfully
+ * started request.
+ *
+ */
+ if ((boardp->dvc_var.asc_dvc_var.cur_dvc_qng[scp->target] > 0) &&
+ (boardp->reqcnt[scp->target] % 255) == 0) {
+ asc_scsi_q.q2.tag_code = M2_QTAG_MSG_ORDERED;
+ } else {
+ asc_scsi_q.q2.tag_code = M2_QTAG_MSG_SIMPLE;
+ }
+
+ /*
+ * Build ASC_SCSI_Q for a contiguous buffer or a scatter-gather
+ * buffer command.
+ */
+ if (scp->use_sg == 0) {
+ /*
+ * CDB request of single contiguous buffer.
+ */
+ ASC_STATS(scp->host, cont_cnt);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ asc_scsi_q.q1.data_addr = (ulong) scp->request_buffer;
+#else /* version >= v2.0.0 */
+ asc_scsi_q.q1.data_addr = virt_to_bus(scp->request_buffer);
+#endif /* version >= v2.0.0 */
+ asc_scsi_q.q1.data_cnt = scp->request_bufflen;
+ ASC_STATS_ADD(scp->host, cont_xfer,
+ ASC_CEILING(scp->request_bufflen, 512));
+ asc_scsi_q.q1.sg_queue_cnt = 0;
+ asc_scsi_q.sg_head = NULL;
+ } else {
+ /*
+ * CDB scatter-gather request list.
+ */
+ int sgcnt;
+ struct scatterlist *slp;
+
+ if (scp->use_sg > scp->host->sg_tablesize) {
+ ASC_PRINT3(
+"asc_build_req: board %d: use_sg %d > sg_tablesize %d\n",
+ boardp->id, scp->use_sg, scp->host->sg_tablesize);
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ return ASC_ERROR;
+ }
+
+ ASC_STATS(scp->host, sg_cnt);
+
+ /*
+ * Use global ASC_SG_HEAD structure and set the ASC_SCSI_Q
+ * structure to point to it.
+ */
+ memset(&asc_sg_head, 0, sizeof(ASC_SG_HEAD));
+
+ asc_scsi_q.q1.cntl |= QC_SG_HEAD;
+ asc_scsi_q.sg_head = &asc_sg_head;
+ asc_scsi_q.q1.data_cnt = 0;
+ asc_scsi_q.q1.data_addr = 0;
+ asc_sg_head.entry_cnt = asc_scsi_q.q1.sg_queue_cnt = scp->use_sg;
+ ASC_STATS_ADD(scp->host, sg_elem, asc_sg_head.entry_cnt);
+
+ /*
+ * Convert scatter-gather list into ASC_SG_HEAD list.
+ */
+ slp = (struct scatterlist *) scp->request_buffer;
+ for (sgcnt = 0; sgcnt < scp->use_sg; sgcnt++, slp++) {
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ asc_sg_head.sg_list[sgcnt].addr = (ulong) slp->address;
+#else /* version >= v2.0.0 */
+ asc_sg_head.sg_list[sgcnt].addr = virt_to_bus(slp->address);
+#endif /* version >= v2.0.0 */
+ asc_sg_head.sg_list[sgcnt].bytes = slp->length;
+ ASC_STATS_ADD(scp->host, sg_xfer, ASC_CEILING(slp->length, 512));
+ }
+ }
+
+ ASC_DBG_PRT_ASC_SCSI_Q(2, &asc_scsi_q);
+ ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len);
+
+ return ASC_NOERROR;
+}
+
+/*
+ * Build a request structure for the Adv Library (Wide Board).
+ *
+ * If an adv_req_t can not be allocated to issue the request,
+ * then return ASC_BUSY. If an error occurs, then return ASC_ERROR.
+ */
+STATIC int
+adv_build_req(asc_board_t *boardp, Scsi_Cmnd *scp,
+ ADV_SCSI_REQ_Q **adv_scsiqpp)
+{
+ adv_req_t *reqp;
+ ADV_SCSI_REQ_Q *scsiqp;
+ int i;
+
+ /*
+ * Allocate an adv_req_t structure from the board to execute
+ * the command.
+ */
+ if (boardp->adv_reqp == NULL) {
+ ASC_DBG(1, "adv_build_req: no free adv_req_t\n");
+ ASC_STATS(scp->host, adv_build_noreq);
+ return ASC_BUSY;
+ } else {
+ reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp->next_reqp;
+ reqp->next_reqp = NULL;
+ }
+
+ /*
+ * Get 4-byte aligned ADV_SCSI_REQ_Q and ADV_SG_BLOCK pointers.
+ */
+ scsiqp = (ADV_SCSI_REQ_Q *) ADV_DWALIGN(&reqp->scsi_req_q);
+ memset(scsiqp, 0, sizeof(ADV_SCSI_REQ_Q));
+
+ /*
+ * Set the ADV_SCSI_REQ_Q 'srb_ptr' to point to the adv_req_t structure.
+ */
+ scsiqp->srb_ptr = (ulong) reqp;
+
+ /*
+ * Set the adv_req_t 'cmndp' to point to the Scsi_Cmnd structure.
+ */
+ reqp->cmndp = scp;
+
+ /*
+ * Build the ADV_SCSI_REQ_Q request.
+ */
+
+ /*
+ * Set CDB length and copy it to the request structure.
+ */
+ ASC_ASSERT(scp->cmd_len <= ASC_MAX_CDB_LEN);
+ if (scp->cmd_len > ASC_MAX_CDB_LEN) {
+ scp->cmd_len = ASC_MAX_CDB_LEN;
+ }
+ scsiqp->cdb_len = scp->cmd_len;
+ for (i = 0; i < scp->cmd_len; i++) {
+ scsiqp->cdb[i] = scp->cmnd[i];
+ }
+
+ scsiqp->target_id = scp->target;
+ scsiqp->target_lun = scp->lun;
+
+ scsiqp->vsense_addr = (ulong) &scp->sense_buffer[0];
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ scsiqp->sense_addr = (ulong) &scp->sense_buffer[0];
+#else /* version >= v2.0.0 */
+ scsiqp->sense_addr = virt_to_bus(&scp->sense_buffer[0]);
+#endif /* version >= v2.0.0 */
+ scsiqp->sense_len = sizeof(scp->sense_buffer);
+
+ /*
+ * Build ADV_SCSI_REQ_Q for a contiguous buffer or a scatter-gather
+ * buffer command.
+ */
+ scsiqp->data_cnt = scp->request_bufflen;
+ scsiqp->vdata_addr = (ulong) scp->request_buffer;
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ scsiqp->data_addr = (ulong) scp->request_buffer;
+#else /* version >= v2.0.0 */
+ scsiqp->data_addr = virt_to_bus(scp->request_buffer);
+#endif /* version >= v2.0.0 */
+
+ if (scp->use_sg == 0) {
+ /*
+ * CDB request of single contiguous buffer.
+ */
+ reqp->sgblkp = NULL;
+ scsiqp->sg_list_ptr = NULL;
+ ASC_STATS(scp->host, cont_cnt);
+ ASC_STATS_ADD(scp->host, cont_xfer,
+ ASC_CEILING(scp->request_bufflen, 512));
+ } else {
+ /*
+ * CDB scatter-gather request list.
+ */
+ if (scp->use_sg > ADV_MAX_SG_LIST) {
+ ASC_PRINT3(
+"adv_build_req: board %d: use_sg %d > ADV_MAX_SG_LIST %d\n",
+ boardp->id, scp->use_sg, scp->host->sg_tablesize);
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+
+ /*
+ * Free the 'adv_req_t' structure by adding it back to the
+ * board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+
+ return ASC_ERROR;
+ }
+
+ /*
+ * Allocate an 'adv_sgblk_t' structure from the board to
+ * execute the command.
+ */
+ if (boardp->adv_sgblkp == NULL) {
+ ASC_DBG(1, "adv_build_req: no free adv_sgblk_t\n");
+ ASC_STATS(scp->host, adv_build_nosg);
+ /*
+ * Free the 'adv_req_t' structure by adding it back to the
+ * board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+ return ASC_BUSY;
+ } else {
+ reqp->sgblkp = boardp->adv_sgblkp;
+ boardp->adv_sgblkp = reqp->sgblkp->next_sgblkp;
+ reqp->sgblkp->next_sgblkp = NULL;
+ }
+
+ /*
+ * Build scatter-gather list.
+ */
+ scsiqp->sg_list_ptr = (ADV_SG_BLOCK *)
+ ADV_DWALIGN(&reqp->sgblkp->sg_block[0]);
+
+ memset(scsiqp->sg_list_ptr, 0, sizeof(ADV_SG_BLOCK) *
+ (ADV_NUM_SG_BLOCK + ADV_NUM_PAGE_CROSSING));
+
+ if (adv_get_sglist(&boardp->dvc_var.adv_dvc_var, scsiqp, scp) ==
+ ADV_ERROR) {
+
+ /*
+ * Free the adv_sgblk_t structure, if any, by adding it back
+ * to the board free list.
+ */
+ ASC_ASSERT(reqp->sgblkp != NULL);
+ reqp->sgblkp->next_sgblkp = boardp->adv_sgblkp;
+ boardp->adv_sgblkp = reqp->sgblkp;
+
+ /*
+ * Free the adv_req_t structure by adding it back to the
+ * board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+
+ return ADV_ERROR;
+ }
+
+ ASC_STATS(scp->host, sg_cnt);
+ ASC_STATS_ADD(scp->host, sg_elem, scp->use_sg);
+ }
+
+ ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp);
+ ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len);
+
+ *adv_scsiqpp = scsiqp;
+
+ return ASC_NOERROR;
+}
+
+/*
+ * Build scatter-gather list for Adv Library (Wide Board).
+ *
+ * Return:
+ * ADV_SUCCESS(1) - SG List successfully created
+ * ADV_ERROR(-1) - SG List creation failed
+ */
+STATIC int
+adv_get_sglist(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp,
+ Scsi_Cmnd *scp)
+{
+ ADV_SG_BLOCK *sg_block; /* virtual address of a SG */
+ ulong sg_block_next_addr; /* block and its next */
+ ulong sg_block_physical_addr;
+ int sg_block_index, i; /* how many SG entries */
+ struct scatterlist *slp;
+ int sg_elem_cnt;
+
+ slp = (struct scatterlist *) scp->request_buffer;
+ sg_elem_cnt = scp->use_sg;
+
+ sg_block = scsiqp->sg_list_ptr;
+ sg_block_next_addr = (ulong) sg_block; /* allow math operation */
+ sg_block_physical_addr =
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ (ulong) scsiqp->sg_list_ptr;
+#else /* version >= v2.0.0 */
+ virt_to_bus(scsiqp->sg_list_ptr);
+#endif /* version >= v2.0.0 */
+ ADV_ASSERT(ADV_DWALIGN(sg_block_physical_addr) ==
+ sg_block_physical_addr);
+ scsiqp->sg_real_addr = sg_block_physical_addr;
+
+ sg_block_index = 0;
+ do
+ {
+ sg_block->first_entry_no = sg_block_index;
+ for (i = 0; i < NO_OF_SG_PER_BLOCK; i++)
+ {
+ sg_block->sg_list[i].sg_addr =
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ (ulong) slp->address;
+#else /* version >= v2.0.0 */
+ virt_to_bus(slp->address);
+#endif /* version >= v2.0.0 */
+ sg_block->sg_list[i].sg_count = slp->length;
+ ASC_STATS_ADD(scp->host, sg_xfer, ASC_CEILING(slp->length, 512));
+
+ if (--sg_elem_cnt == 0)
+ { /* last entry, get out */
+ scsiqp->sg_entry_cnt = sg_block_index + i + 1;
+ sg_block->last_entry_no = sg_block_index + i;
+ sg_block->sg_ptr = 0L; /* next link = NULL */
+ return ADV_SUCCESS;
+ }
+ slp++;
+ }
+ sg_block_next_addr += sizeof(ADV_SG_BLOCK);
+ sg_block_physical_addr += sizeof(ADV_SG_BLOCK);
+ ADV_ASSERT(ADV_DWALIGN(sg_block_physical_addr) ==
+ sg_block_physical_addr);
+
+ sg_block_index += NO_OF_SG_PER_BLOCK;
+ sg_block->sg_ptr = (ADV_SG_BLOCK *) sg_block_physical_addr;
+ sg_block->last_entry_no = sg_block_index - 1;
+ sg_block = (ADV_SG_BLOCK *) sg_block_next_addr; /* virtual addr */
+ }
+ while (1);
+ /* NOTREACHED */
+}
+
+/*
+ * asc_isr_callback() - Second Level Interrupt Handler called by AscISR().
+ *
+ * Interrupt callback function for the Narrow SCSI Asc Library.
+ */
+STATIC void
+asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
+{
+ asc_board_t *boardp;
+ Scsi_Cmnd *scp;
+ struct Scsi_Host *shp;
+ int underrun = ASC_FALSE;
+ int i;
+
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_DBG2(1, "asc_isr_callback: asc_dvc_varp %x, qdonep %x\n",
+ (unsigned) asc_dvc_varp, (unsigned) qdonep);
+ ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep);
+
+ /*
+ * Get the Scsi_Cmnd structure and Scsi_Host structure for the
+ * command that has been completed.
+ */
+ scp = (Scsi_Cmnd *) qdonep->d2.srb_ptr;
+ ASC_DBG1(1, "asc_isr_callback: scp %x\n", (unsigned) scp);
+
+ if (scp == NULL) {
+ ASC_PRINT("asc_isr_callback: scp is NULL\n");
+ return;
+ }
+ ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
+
+ /*
+ * If the request's host pointer is not valid, display a
+ * message and return.
+ */
+ shp = scp->host;
+ for (i = 0; i < asc_board_count; i++) {
+ if (asc_host[i] == shp) {
+ break;
+ }
+ }
+ if (i == asc_board_count) {
+ ASC_PRINT2("asc_isr_callback: scp %x has bad host pointer, host %x\n",
+ (unsigned) scp, (unsigned) shp);
+ return;
+ }
+
+ ASC_STATS(shp, callback);
+ ASC_DBG1(1, "asc_isr_callback: shp %x\n", (unsigned) shp);
+
+ /*
+ * If the request isn't found on the active queue, it may
+ * have been removed to handle a reset or abort request.
+ * Display a message and return.
+ */
+ boardp = ASC_BOARDP(shp);
+ ASC_ASSERT(asc_dvc_varp == &boardp->dvc_var.asc_dvc_var);
+ if (asc_rmqueue(&boardp->active, scp) == ASC_FALSE) {
+ ASC_PRINT2("asc_isr_callback: board %d: scp %x not on active queue\n",
+ boardp->id, (unsigned) scp);
+ return;
+ }
+
+ /*
+ * Check for an underrun condition.
+ */
+ if (scp->request_bufflen != 0 && qdonep->remain_bytes != 0 &&
+ qdonep->remain_bytes <= scp->request_bufflen != 0) {
+ ASC_DBG1(1, "asc_isr_callback: underrun condition %u bytes\n",
+ (unsigned) qdonep->remain_bytes);
+ underrun = ASC_TRUE;
+ }
+
+ /*
+ * 'qdonep' contains the command's ending status.
+ */
+ switch (qdonep->d3.done_stat) {
+ case QD_NO_ERROR:
+ ASC_DBG(2, "asc_isr_callback: QD_NO_ERROR\n");
+ switch (qdonep->d3.host_stat) {
+ case QHSTA_NO_ERROR:
+ scp->result = 0;
+ break;
+ default:
+ /* QHSTA error occurred */
+ scp->result = HOST_BYTE(DID_ERROR);
+ break;
+ }
+
+ /*
+ * If an INQUIRY command completed successfully, then call
+ * the AscInquiryHandling() function to set-up the device.
+ */
+ if (scp->cmnd[0] == SCSICMD_Inquiry && scp->lun == 0 &&
+ (scp->request_bufflen - qdonep->remain_bytes) >= 8)
+ {
+ AscInquiryHandling(asc_dvc_varp, scp->target & 0x7,
+ (ASC_SCSI_INQUIRY *) scp->request_buffer);
+ }
+
+ /*
+ * If there was an underrun without any other error,
+ * set DID_ERROR to indicate the underrun error.
+ *
+ * Note: There is no way yet to indicate the number
+ * of underrun bytes.
+ */
+ if (scp->result == 0 && underrun == ASC_TRUE) {
+ scp->result = HOST_BYTE(DID_UNDERRUN);
+ }
+ break;
+
+ case QD_WITH_ERROR:
+ ASC_DBG(2, "asc_isr_callback: QD_WITH_ERROR\n");
+ switch (qdonep->d3.host_stat) {
+ case QHSTA_NO_ERROR:
+ if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
+ ASC_DBG(2, "asc_isr_callback: SS_CHK_CONDITION\n");
+ ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
+ sizeof(scp->sense_buffer));
+ /*
+ * Note: The 'status_byte()' macro used by target drivers
+ * defined in scsi.h shifts the status byte returned by
+ * host drivers right by 1 bit. This is why target drivers
+ * also use right shifted status byte definitions. For
+ * instance target drivers use CHECK_CONDITION, defined to
+ * 0x1, instead of the SCSI defined check condition value
+ * of 0x2. Host drivers are supposed to return the status
+ * byte as it is defined by SCSI.
+ */
+ scp->result = DRIVER_BYTE(DRIVER_SENSE) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ } else {
+ scp->result = STATUS_BYTE(qdonep->d3.scsi_stat);
+ }
+ break;
+
+ default:
+ /* QHSTA error occurred */
+ ASC_DBG1(1, "asc_isr_callback: host_stat %x\n",
+ qdonep->d3.host_stat);
+ scp->result = HOST_BYTE(DID_BAD_TARGET);
+ break;
+ }
+ break;
+
+ case QD_ABORTED_BY_HOST:
+ ASC_DBG(1, "asc_isr_callback: QD_ABORTED_BY_HOST\n");
+ scp->result = HOST_BYTE(DID_ABORT) | MSG_BYTE(qdonep->d3.scsi_msg) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ break;
+
+ default:
+ ASC_DBG1(1, "asc_isr_callback: done_stat %x\n", qdonep->d3.done_stat);
+ scp->result = HOST_BYTE(DID_ERROR) | MSG_BYTE(qdonep->d3.scsi_msg) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ break;
+ }
+
+ /*
+ * If the 'init_tidmask' bit isn't already set for the target and the
+ * current request finished normally, then set the bit for the target
+ * to indicate that a device is present.
+ */
+ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->target)) == 0 &&
+ qdonep->d3.done_stat == QD_NO_ERROR &&
+ qdonep->d3.host_stat == QHSTA_NO_ERROR) {
+ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->target);
+ }
+
+ /*
+ * Because interrupts may be enabled by the 'Scsi_Cmnd' done
+ * function, add the command to the end of the board's done queue.
+ * The done function for the command will be called from
+ * advansys_interrupt().
+ */
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+
+ return;
+}
+
+/*
+ * adv_isr_callback() - Second Level Interrupt Handler called by AdvISR().
+ *
+ * Callback function for the Wide SCSI Adv Library.
+ */
+STATIC void
+adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
+{
+ asc_board_t *boardp;
+ adv_req_t *reqp;
+ Scsi_Cmnd *scp;
+ struct Scsi_Host *shp;
+ int underrun = ASC_FALSE;
+ int i;
+
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_DBG2(1, "adv_isr_callback: adv_dvc_varp %x, scsiqp %x\n",
+ (unsigned) adv_dvc_varp, (unsigned) scsiqp);
+ ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp);
+
+ /*
+ * Get the adv_req_t structure for the command that has been
+ * completed. The adv_req_t structure actually contains the
+ * completed ADV_SCSI_REQ_Q structure.
+ */
+ reqp = (adv_req_t *) scsiqp->srb_ptr;
+ ASC_DBG1(1, "adv_isr_callback: reqp %x\n", (unsigned) reqp);
+ if (reqp == NULL) {
+ ASC_PRINT("adv_isr_callback: reqp is NULL\n");
+ return;
+ }
+
+ /*
+ * Get the Scsi_Cmnd structure and Scsi_Host structure for the
+ * command that has been completed.
+ *
+ * Note: The adv_req_t request structure and adv_sgblk_t structure,
+ * if any, * dropped, because a board structure pointer can not be
+ * determined.
+ */
+ scp = reqp->cmndp;
+ ASC_DBG1(1, "adv_isr_callback: scp %x\n", (unsigned) scp);
+ if (scp == NULL) {
+ ASC_PRINT("adv_isr_callback: scp is NULL; adv_req_t dropped.\n");
+ return;
+ }
+ ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
+
+ /*
+ * If the request's host pointer is not valid, display a message
+ * and return.
+ */
+ shp = scp->host;
+ for (i = 0; i < asc_board_count; i++) {
+ if (asc_host[i] == shp) {
+ break;
+ }
+ }
+ /*
+ * Note: If the host structure is not found, the adv_req_t request
+ * structure and adv_sgblk_t structure, if any, is dropped.
+ */
+ if (i == asc_board_count) {
+ ASC_PRINT2("adv_isr_callback: scp %x has bad host pointer, host %x\n",
+ (unsigned) scp, (unsigned) shp);
+ return;
+ }
+
+ ASC_STATS(shp, callback);
+ ASC_DBG1(1, "adv_isr_callback: shp %x\n", (unsigned) shp);
+
+ /*
+ * If the request isn't found on the active queue, it may have been
+ * removed to handle a reset or abort request. Display a message and
+ * return.
+ *
+ * Note: Because the structure may still be in use don't attempt
+ * to free the adv_req_t and adv_sgblk_t, if any, structures.
+ */
+ boardp = ASC_BOARDP(shp);
+ ASC_ASSERT(adv_dvc_varp == &boardp->dvc_var.adv_dvc_var);
+ if (asc_rmqueue(&boardp->active, scp) == ASC_FALSE) {
+ ASC_PRINT2("adv_isr_callback: board %d: scp %x not on active queue\n",
+ boardp->id, (unsigned) scp);
+ return;
+ }
+
+ /*
+ * Check for an underrun condition.
+ */
+ if (scp->request_bufflen != 0 && scsiqp->data_cnt != 0) {
+ ASC_DBG1(1, "adv_isr_callback: underrun condition %lu bytes\n",
+ scsiqp->data_cnt);
+ underrun = ASC_TRUE;
+ }
+
+ /*
+ * 'done_status' contains the command's ending status.
+ */
+ switch (scsiqp->done_status) {
+ case QD_NO_ERROR:
+ ASC_DBG(2, "adv_isr_callback: QD_NO_ERROR\n");
+ switch (scsiqp->host_status) {
+ case QHSTA_NO_ERROR:
+ scp->result = 0;
+ break;
+ default:
+ /* QHSTA error occurred. */
+ ASC_DBG1(2, "adv_isr_callback: host_status %x\n",
+ scsiqp->host_status);
+ scp->result = HOST_BYTE(DID_ERROR);
+ break;
+ }
+ /*
+ * If there was an underrun without any other error,
+ * set DID_ERROR to indicate the underrun error.
+ *
+ * Note: There is no way yet to indicate the number
+ * of underrun bytes.
+ */
+ if (scp->result == 0 && underrun == ASC_TRUE) {
+ scp->result = HOST_BYTE(DID_UNDERRUN);
+ }
+ break;
+
+ case QD_WITH_ERROR:
+ ASC_DBG(2, "adv_isr_callback: QD_WITH_ERROR\n");
+ switch (scsiqp->host_status) {
+ case QHSTA_NO_ERROR:
+ if (scsiqp->scsi_status == SS_CHK_CONDITION) {
+ ASC_DBG(2, "adv_isr_callback: SS_CHK_CONDITION\n");
+ ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
+ sizeof(scp->sense_buffer));
+ /*
+ * Note: The 'status_byte()' macro used by target drivers
+ * defined in scsi.h shifts the status byte returned by
+ * host drivers right by 1 bit. This is why target drivers
+ * also use right shifted status byte definitions. For
+ * instance target drivers use CHECK_CONDITION, defined to
+ * 0x1, instead of the SCSI defined check condition value
+ * of 0x2. Host drivers are supposed to return the status
+ * byte as it is defined by SCSI.
+ */
+ scp->result = DRIVER_BYTE(DRIVER_SENSE) |
+ STATUS_BYTE(scsiqp->scsi_status);
+ } else {
+ scp->result = STATUS_BYTE(scsiqp->scsi_status);
+ }
+ break;
+
+ default:
+ /* Some other QHSTA error occurred. */
+ ASC_DBG1(1, "adv_isr_callback: host_status %x\n",
+ scsiqp->host_status);
+ scp->result = HOST_BYTE(DID_BAD_TARGET);
+ break;
+ }
+ break;
+
+ case QD_ABORTED_BY_HOST:
+ ASC_DBG(1, "adv_isr_callback: QD_ABORTED_BY_HOST\n");
+ scp->result = HOST_BYTE(DID_ABORT) | STATUS_BYTE(scsiqp->scsi_status);
+ break;
+
+ default:
+ ASC_DBG1(1, "adv_isr_callback: done_status %x\n", scsiqp->done_status);
+ scp->result = HOST_BYTE(DID_ERROR) | STATUS_BYTE(scsiqp->scsi_status);
+ break;
+ }
+
+ /*
+ * If the 'init_tidmask' bit isn't already set for the target and the
+ * current request finished normally, then set the bit for the target
+ * to indicate that a device is present.
+ */
+ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->target)) == 0 &&
+ scsiqp->done_status == QD_NO_ERROR &&
+ scsiqp->host_status == QHSTA_NO_ERROR) {
+ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->target);
+ }
+
+ /*
+ * Because interrupts may be enabled by the 'Scsi_Cmnd' done
+ * function, add the command to the end of the board's done queue.
+ * The done function for the command will be called from
+ * advansys_interrupt().
+ */
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+
+ /*
+ * Free the adv_sgblk_t structure, if any, by adding it back
+ * to the board free list.
+ */
+ if (reqp->sgblkp != NULL) {
+ reqp->sgblkp->next_sgblkp = boardp->adv_sgblkp;
+ boardp->adv_sgblkp = reqp->sgblkp;
+ }
+
+ /*
+ * Free the adv_req_t structure used with the command by adding
+ * it back to the board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+
+ ASC_DBG(1, "adv_isr_callback: done\n");
+
+ return;
+}
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+/*
+ * Search for an AdvanSys PCI device in the PCI configuration space.
+ */
+ASC_INITFUNC(
+STATIC int
+asc_srch_pci_dev(PCI_DEVICE *pciDevice)
+)
+{
+ int ret = PCI_DEVICE_NOT_FOUND;
+
+ ASC_DBG(2, "asc_srch_pci_dev: begin\n");
+
+ if (pci_scan_method == -1) {
+ pci_scan_method = asc_scan_method();
+ }
+ pciDevice->type = pci_scan_method;
+ ASC_DBG1(2, "asc_srch_pci_dev: type %d\n", pciDevice->type);
+
+ ret = asc_pci_find_dev(pciDevice);
+ ASC_DBG1(2, "asc_srch_pci_dev: asc_pci_find_dev() return %d\n", ret);
+ if (ret == PCI_DEVICE_FOUND) {
+ pciDevice->slotNumber = pciDevice->slotFound + 1;
+ pciDevice->startSlot = pciDevice->slotFound + 1;
+ } else {
+ if (pciDevice->bridge > pciDevice->busNumber) {
+ ASC_DBG2(2, "asc_srch_pci_dev: bridge %x, busNumber %x\n",
+ pciDevice->bridge, pciDevice->busNumber);
+ pciDevice->busNumber++;
+ pciDevice->slotNumber = 0;
+ pciDevice->startSlot = 0;
+ pciDevice->endSlot = 0x0f;
+ ret = asc_srch_pci_dev(pciDevice);
+ ASC_DBG1(2, "asc_srch_pci_dev: recursive call return %d\n", ret);
+ }
+ }
+
+ ASC_DBG1(2, "asc_srch_pci_dev: return %d\n", ret);
+ return ret;
+}
+
+/*
+ * Determine the access method to be used for 'pciDevice'.
+ */
+ASC_INITFUNC(
+STATIC uchar
+asc_scan_method(void)
+)
+{
+ ushort data;
+ PCI_DATA pciData;
+ uchar type;
+ uchar slot;
+
+ ASC_DBG(2, "asc_scan_method: begin\n");
+ memset(&pciData, 0, sizeof(pciData));
+ for (type = 1; type < 3; type++) {
+ pciData.type = type;
+ for (slot = 0; slot < PCI_MAX_SLOT; slot++) {
+ pciData.slot = slot;
+ data = asc_get_cfg_word(&pciData);
+ if ((data != 0xFFFF) && (data != 0x0000)) {
+ ASC_DBG2(4, "asc_scan_method: data %x, type %d\n", data, type);
+ return (type);
+ }
+ }
+ }
+ ASC_DBG1(4, "asc_scan_method: type %d\n", type);
+ return (type);
+}
+
+/*
+ * Check for an AdvanSys PCI device in 'pciDevice'.
+ *
+ * Return PCI_DEVICE_FOUND if found, otherwise return PCI_DEVICE_NOT_FOUND.
+ */
+ASC_INITFUNC(
+STATIC int
+asc_pci_find_dev(PCI_DEVICE *pciDevice)
+)
+{
+ PCI_DATA pciData;
+ ushort vendorid, deviceid;
+ uchar classcode, subclass;
+ uchar lslot;
+
+ ASC_DBG(3, "asc_pci_find_dev: begin\n");
+ pciData.type = pciDevice->type;
+ pciData.bus = pciDevice->busNumber;
+ pciData.func = pciDevice->devFunc;
+ lslot = pciDevice->startSlot;
+ for (; lslot < pciDevice->endSlot; lslot++) {
+ pciData.slot = lslot;
+ pciData.offset = VENDORID_OFFSET;
+ vendorid = asc_get_cfg_word(&pciData);
+ ASC_DBG1(3, "asc_pci_find_dev: vendorid %x\n", vendorid);
+ if (vendorid != 0xffff) {
+ pciData.offset = DEVICEID_OFFSET;
+ deviceid = asc_get_cfg_word(&pciData);
+ ASC_DBG1(3, "asc_pci_find_dev: deviceid %x\n", deviceid);
+ if ((vendorid == ASC_PCI_VENDORID) &&
+ ((deviceid == ASC_PCI_DEVICE_ID_1100) ||
+ (deviceid == ASC_PCI_DEVICE_ID_1200) ||
+ (deviceid == ASC_PCI_DEVICE_ID_1300) ||
+ (deviceid == ASC_PCI_DEVICE_ID_2300))) {
+ pciDevice->slotFound = lslot;
+ ASC_DBG(3, "asc_pci_find_dev: PCI_DEVICE_FOUND\n");
+ return PCI_DEVICE_FOUND;
+ } else {
+ pciData.offset = SUBCLASS_OFFSET;
+ subclass = asc_get_cfg_byte(&pciData);
+ pciData.offset = CLASSCODE_OFFSET;
+ classcode = asc_get_cfg_byte(&pciData);
+ if ((classcode & PCI_BASE_CLASS_BRIDGE_DEVICE) &&
+ (subclass & PCI_SUB_CLASS_PCI_TO_PCI_BRIDGE_CONTROLLER)) {
+ pciDevice->bridge++;
+ }
+ ASC_DBG2(3, "asc_pci_find_dev: subclass %x, classcode %x\n",
+ subclass, classcode);
+ }
+ }
+ }
+ return PCI_DEVICE_NOT_FOUND;
+}
+
+/*
+ * Read PCI configuration data into 'pciConfig'.
+ */
+ASC_INITFUNC(
+STATIC void
+asc_get_pci_cfg(PCI_DEVICE *pciDevice, PCI_CONFIG_SPACE *pciConfig)
+)
+{
+ PCI_DATA pciData;
+ uchar counter;
+ uchar *localConfig;
+
+ ASC_DBG1(4, "asc_get_pci_cfg: slotFound %d\n ",
+ pciDevice->slotFound);
+
+ pciData.type = pciDevice->type;
+ pciData.bus = pciDevice->busNumber;
+ pciData.slot = pciDevice->slotFound;
+ pciData.func = pciDevice->devFunc;
+ localConfig = (uchar *) pciConfig;
+
+ for (counter = 0; counter < sizeof(PCI_CONFIG_SPACE); counter++) {
+ pciData.offset = counter;
+ *localConfig = asc_get_cfg_byte(&pciData);
+ ASC_DBG1(4, "asc_get_pci_cfg: byte %x\n", *localConfig);
+ localConfig++;
+ }
+ ASC_DBG1(4, "asc_get_pci_cfg: counter %d\n", counter);
+}
+
+/*
+ * Read a word (16 bits) from the PCI configuration space.
+ *
+ * The configuration mechanism is checked for the correct access method.
+ */
+ASC_INITFUNC(
+STATIC ushort
+asc_get_cfg_word(PCI_DATA *pciData)
+)
+{
+ ushort tmp;
+ ulong address;
+ ulong lbus = pciData->bus;
+ ulong lslot = pciData->slot;
+ ulong lfunc = pciData->func;
+ uchar t2CFA, t2CF8;
+ ulong t1CF8, t1CFC;
+
+ ASC_DBG4(4, "asc_get_cfg_word: type %d, bus %lu, slot %lu, func %lu\n",
+ pciData->type, lbus, lslot, lfunc);
+
+ /*
+ * Check type of configuration mechanism.
+ */
+ if (pciData->type == 2) {
+ /*
+ * Save registers to be restored later.
+ */
+ t2CFA = inp(0xCFA); /* save PCI bus register */
+ t2CF8 = inp(0xCF8); /* save config space enable register */
+
+ /*
+ * Write the bus and enable registers.
+ */
+ /* set for type 1 cycle, if needed */
+ outp(0xCFA, pciData->bus);
+ /* set the function number */
+ outp(0xCF8, 0x10 | (pciData->func << 1)) ;
+
+ /*
+ * Read the configuration space type 2 locations.
+ */
+ tmp = (ushort) inpw(0xC000 | ((pciData->slot << 8) + pciData->offset));
+
+ outp(0xCFA, t2CFA); /* save PCI bus register */
+ outp(0xCF8, t2CF8); /* save config space enable register */
+ } else {
+ /*
+ * Type 1 or 3 configuration mechanism.
+ *
+ * Save the CONFIG_ADDRESS and CONFIG_DATA register values.
+ */
+ t1CF8 = inpl(0xCF8);
+ t1CFC = inpl(0xCFC);
+
+ /*
+ * enable <31>, bus = <23:16>, slot = <15:11>,
+ * func = <10:8>, reg = <7:2>
+ */
+ address = (ulong) ((lbus << 16) | (lslot << 11) |
+ (lfunc << 8) | (pciData->offset & 0xFC) | 0x80000000L);
+
+ /*
+ * Write out the address to CONFIG_ADDRESS.
+ */
+ outpl(0xCF8, address);
+
+ /*
+ * Read in word from CONFIG_DATA.
+ */
+ tmp = (ushort) ((inpl(0xCFC) >>
+ ((pciData->offset & 2) * 8)) & 0xFFFF);
+
+ /*
+ * Restore registers.
+ */
+ outpl(0xCF8, t1CF8);
+ outpl(0xCFC, t1CFC);
+ }
+ ASC_DBG1(4, "asc_get_cfg_word: config data: %x\n", tmp);
+ return tmp;
+}
+
+/*
+ * Reads a byte from the PCI configuration space.
+ *
+ * The configuration mechanism is checked for the correct access method.
+ */
+ASC_INITFUNC(
+STATIC uchar
+asc_get_cfg_byte(PCI_DATA *pciData)
+)
+{
+ uchar tmp;
+ ulong address;
+ ulong lbus = pciData->bus, lslot = pciData->slot, lfunc = pciData->func;
+ uchar t2CFA, t2CF8;
+ ulong t1CF8, t1CFC;
+
+ ASC_DBG1(4, "asc_get_cfg_byte: type: %d\n", pciData->type);
+
+ /*
+ * Check type of configuration mechanism.
+ */
+ if (pciData->type == 2) {
+ /*
+ * Save registers to be restored later.
+ */
+ t2CFA = inp(0xCFA); /* save PCI bus register */
+ t2CF8 = inp(0xCF8); /* save config space enable register */
+
+ /*
+ * Write the bus and enable registers.
+ */
+ /* set for type 1 cycle, if needed */
+ outp(0xCFA, pciData->bus);
+ /* set the function number */
+ outp(0xCF8, 0x10 | (pciData->func << 1));
+
+ /*
+ * Read configuration space type 2 locations.
+ */
+ tmp = inp(0xC000 | ((pciData->slot << 8) + pciData->offset));
+
+ /*
+ * Restore registers.
+ */
+ outp(0xCF8, t2CF8); /* restore the enable register */
+ outp(0xCFA, t2CFA); /* restore PCI bus register */
+ } else {
+ /*
+ * Type 1 or 3 configuration mechanism.
+ *
+ * Save CONFIG_ADDRESS and CONFIG_DATA register values.
+ */
+ t1CF8 = inpl(0xCF8);
+ t1CFC = inpl(0xCFC);
+
+ /*
+ * enable <31>, bus = <23:16>, slot = <15:11>, func = <10:8>,
+ * reg = <7:2>
+ */
+ address = (ulong) ((lbus << 16) | (lslot << 11) |
+ (lfunc << 8) | (pciData->offset & 0xFC) | 0x80000000L);
+
+ /*
+ * Write out address to CONFIG_ADDRESS.
+ */
+ outpl(0xCF8, address);
+
+ /*
+ * Read in word from CONFIG_DATA.
+ */
+ tmp = (uchar) ((inpl(0xCFC) >> ((pciData->offset & 3) * 8)) & 0xFF);
+
+ /*
+ * Restore registers.
+ */
+ outpl(0xCF8, t1CF8);
+ outpl(0xCFC, t1CFC);
+ }
+ ASC_DBG1(4, "asc_get_cfg_byte: config data: %x\n", tmp);
+ return tmp;
+}
+
+/*
+ * Write a byte to the PCI configuration space.
+ */
+ASC_INITFUNC(
+STATIC void
+asc_put_cfg_byte(PCI_DATA *pciData, uchar byte_data)
+)
+{
+ ulong tmpl;
+ ulong address;
+ ulong lbus = pciData->bus, lslot = pciData->slot, lfunc = pciData->func;
+ uchar t2CFA, t2CF8;
+ ulong t1CF8, t1CFC;
+
+ ASC_DBG2(4, "asc_put_cfg_byte: type: %d, byte_data %x\n",
+ pciData->type, byte_data);
+
+ /*
+ * Check type of configuration mechanism.
+ */
+ if (pciData->type == 2) {
+
+ /*
+ * Save registers to be restored later.
+ */
+ t2CFA = inp(0xCFA); /* save PCI bus register */
+ t2CF8 = inp(0xCF8); /* save config space enable register */
+
+ /*
+ * Write bus and enable registers.
+ */
+ outp(0xCFA, pciData->bus);
+
+ /*
+ * Set the function number.
+ */
+ outp(0xCF8, 0x10 | (pciData->func << 1));
+
+ /*
+ * Write the configuration space type 2 locations.
+ */
+ outp(0xC000 | ((pciData->slot << 8) + pciData->offset), byte_data);
+
+ /*
+ * Restore registers.
+ */
+ outp(0xCF8, t2CF8); /* restore the enable register */
+ outp(0xCFA, t2CFA); /* restore PCI bus register */
+ } else {
+
+ /*
+ * Type 1 or 3 configuration mechanism.
+ *
+ * Save the CONFIG_ADDRESS and CONFIG_DATA register values.
+ */
+ t1CF8 = inpl(0xCF8);
+ t1CFC = inpl(0xCFC);
+
+ /*
+ * enable <31>, bus = <23:16>, slot = <15:11>, func = <10:8>,
+ * reg = <7:2>
+ */
+ address = (ulong) ((lbus << 16) | (lslot << 11) | (lfunc << 8) |
+ (pciData->offset & 0xFC) | 0x80000000L);
+ /*
+ * Write out address to CONFIG_ADDRESS.
+ */
+ outpl(0xCF8, address);
+
+ /*
+ * Write double word to CONFIG_DATA preserving the bytes
+ * in the double not written.
+ */
+ tmpl = inpl(0xCFC) & ~(0xFF << ((pciData->offset & 3) * 8));
+ outpl(0xCFC, tmpl | (byte_data << ((pciData->offset & 3) * 8)));
+
+ /*
+ * Restore registers.
+ */
+ outpl(0xCF8, t1CF8);
+ outpl(0xCFC, t1CFC);
+ }
+ ASC_DBG(4, "asc_put_cfg_byte: end\n");
+}
+#endif /* ASC_CONFIG_PCI */
+#endif /* version < v2.1.93 */
+
+/*
+ * Add a 'REQP' to the end of specified queue. Set 'tidmask'
+ * to indicate a command is queued for the device.
+ *
+ * 'flag' may be either ASC_FRONT or ASC_BACK.
+ *
+ * 'REQPNEXT(reqp)' returns reqp's next pointer.
+ */
+STATIC void
+asc_enqueue(asc_queue_t *ascq, REQP reqp, int flag)
+{
+ int tid;
+
+ ASC_DBG3(3, "asc_enqueue: ascq %x, reqp %x, flag %d\n",
+ (unsigned) ascq, (unsigned) reqp, flag);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT(reqp != NULL);
+ ASC_ASSERT(flag == ASC_FRONT || flag == ASC_BACK);
+ tid = REQPTID(reqp);
+ ASC_ASSERT(tid >= 0 && tid <= ADV_MAX_TID);
+ if (flag == ASC_FRONT) {
+ REQPNEXT(reqp) = (unsigned char *) ascq->q_first[tid];
+ ascq->q_first[tid] = reqp;
+ /* If the queue was empty, set the last pointer. */
+ if (ascq->q_last[tid] == NULL) {
+ ascq->q_last[tid] = reqp;
+ }
+ } else { /* ASC_BACK */
+ if (ascq->q_last[tid] != NULL) {
+ REQPNEXT(ascq->q_last[tid]) = (unsigned char *) reqp;
+ }
+ ascq->q_last[tid] = reqp;
+ REQPNEXT(reqp) = NULL;
+ /* If the queue was empty, set the first pointer. */
+ if (ascq->q_first[tid] == NULL) {
+ ascq->q_first[tid] = reqp;
+ }
+ }
+ /* The queue has at least one entry, set its bit. */
+ ascq->q_tidmask |= ADV_TID_TO_TIDMASK(tid);
+#ifdef ADVANSYS_STATS
+ /* Maintain request queue statistics. */
+ ascq->q_tot_cnt[tid]++;
+ ascq->q_cur_cnt[tid]++;
+ if (ascq->q_cur_cnt[tid] > ascq->q_max_cnt[tid]) {
+ ascq->q_max_cnt[tid] = ascq->q_cur_cnt[tid];
+ ASC_DBG2(2, "asc_enqueue: new q_max_cnt[%d] %d\n",
+ tid, ascq->q_max_cnt[tid]);
+ }
+ REQPTIME(reqp) = REQTIMESTAMP();
+#endif /* ADVANSYS_STATS */
+ ASC_DBG1(3, "asc_enqueue: reqp %x\n", (unsigned) reqp);
+ return;
+}
+
+/*
+ * Return first queued 'REQP' on the specified queue for
+ * the specified target device. Clear the 'tidmask' bit for
+ * the device if no more commands are left queued for it.
+ *
+ * 'REQPNEXT(reqp)' returns reqp's next pointer.
+ */
+STATIC REQP
+asc_dequeue(asc_queue_t *ascq, int tid)
+{
+ REQP reqp;
+
+ ASC_DBG2(3, "asc_dequeue: ascq %x, tid %d\n", (unsigned) ascq, tid);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT(tid >= 0 && tid <= ADV_MAX_TID);
+ if ((reqp = ascq->q_first[tid]) != NULL) {
+ ASC_ASSERT(ascq->q_tidmask & ADV_TID_TO_TIDMASK(tid));
+ ascq->q_first[tid] = (REQP) REQPNEXT(reqp);
+ /* If the queue is empty, clear its bit and the last pointer. */
+ if (ascq->q_first[tid] == NULL) {
+ ascq->q_tidmask &= ~ADV_TID_TO_TIDMASK(tid);
+ ASC_ASSERT(ascq->q_last[tid] == reqp);
+ ascq->q_last[tid] = NULL;
+ }
+#ifdef ADVANSYS_STATS
+ /* Maintain request queue statistics. */
+ ascq->q_cur_cnt[tid]--;
+ ASC_ASSERT(ascq->q_cur_cnt[tid] >= 0);
+ REQTIMESTAT("asc_dequeue", ascq, reqp, tid);
+#endif /* ADVANSYS_STATS */
+ }
+ ASC_DBG1(3, "asc_dequeue: reqp %x\n", (unsigned) reqp);
+ return reqp;
+}
+
+/*
+ * Return a pointer to a singly linked list of all the requests queued
+ * for 'tid' on the 'asc_queue_t' pointed to by 'ascq'.
+ *
+ * If 'lastpp' is not NULL, '*lastpp' will be set to point to the
+ * the last request returned in the singly linked list.
+ *
+ * 'tid' should either be a valid target id or if it is ASC_TID_ALL,
+ * then all queued requests are concatenated into one list and
+ * returned.
+ *
+ * Note: If 'lastpp' is used to append a new list to the end of
+ * an old list, only change the old list last pointer if '*lastpp'
+ * (or the function return value) is not NULL, i.e. use a temporary
+ * variable for 'lastpp' and check its value after the function return
+ * before assigning it to the list last pointer.
+ *
+ * Unfortunately collecting queuing time statistics adds overhead to
+ * the function that isn't inherent to the function's algorithm.
+ */
+STATIC REQP
+asc_dequeue_list(asc_queue_t *ascq, REQP *lastpp, int tid)
+{
+ REQP firstp, lastp;
+ int i;
+
+ ASC_DBG2(3, "asc_dequeue_list: ascq %x, tid %d\n", (unsigned) ascq, tid);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT((tid == ASC_TID_ALL) || (tid >= 0 && tid <= ADV_MAX_TID));
+
+ /*
+ * If 'tid' is not ASC_TID_ALL, return requests only for
+ * the specified 'tid'. If 'tid' is ASC_TID_ALL, return all
+ * requests for all tids.
+ */
+ if (tid != ASC_TID_ALL) {
+ /* Return all requests for the specified 'tid'. */
+ if ((ascq->q_tidmask & ADV_TID_TO_TIDMASK(tid)) == 0) {
+ /* List is empty; Set first and last return pointers to NULL. */
+ firstp = lastp = NULL;
+ } else {
+ firstp = ascq->q_first[tid];
+ lastp = ascq->q_last[tid];
+ ascq->q_first[tid] = ascq->q_last[tid] = NULL;
+ ascq->q_tidmask &= ~ADV_TID_TO_TIDMASK(tid);
+#ifdef ADVANSYS_STATS
+ {
+ REQP reqp;
+ ascq->q_cur_cnt[tid] = 0;
+ for (reqp = firstp; reqp; reqp = (REQP) REQPNEXT(reqp)) {
+ REQTIMESTAT("asc_dequeue_list", ascq, reqp, tid);
+ }
+ }
+#endif /* ADVANSYS_STATS */
+ }
+ } else {
+ /* Return all requests for all tids. */
+ firstp = lastp = NULL;
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if (ascq->q_tidmask & ADV_TID_TO_TIDMASK(i)) {
+ if (firstp == NULL) {
+ firstp = ascq->q_first[i];
+ lastp = ascq->q_last[i];
+ } else {
+ ASC_ASSERT(lastp != NULL);
+ REQPNEXT(lastp) = (unsigned char *) ascq->q_first[i];
+ lastp = ascq->q_last[i];
+ }
+ ascq->q_first[i] = ascq->q_last[i] = NULL;
+ ascq->q_tidmask &= ~ADV_TID_TO_TIDMASK(i);
+#ifdef ADVANSYS_STATS
+ ascq->q_cur_cnt[i] = 0;
+#endif /* ADVANSYS_STATS */
+ }
+ }
+#ifdef ADVANSYS_STATS
+ {
+ REQP reqp;
+ for (reqp = firstp; reqp; reqp = (REQP) REQPNEXT(reqp)) {
+ REQTIMESTAT("asc_dequeue_list", ascq, reqp, reqp->target);
+ }
+ }
+#endif /* ADVANSYS_STATS */
+ }
+ if (lastpp) {
+ *lastpp = lastp;
+ }
+ ASC_DBG1(3, "asc_dequeue_list: firstp %x\n", (unsigned) firstp);
+ return firstp;
+}
+
+/*
+ * Remove the specified 'REQP' from the specified queue for
+ * the specified target device. Clear the 'tidmask' bit for the
+ * device if no more commands are left queued for it.
+ *
+ * 'REQPNEXT(reqp)' returns reqp's the next pointer.
+ *
+ * Return ASC_TRUE if the command was found and removed,
+ * otherwise return ASC_FALSE.
+ */
+STATIC int
+asc_rmqueue(asc_queue_t *ascq, REQP reqp)
+{
+ REQP currp, prevp;
+ int tid;
+ int ret = ASC_FALSE;
+
+ ASC_DBG2(3, "asc_rmqueue: ascq %x, reqp %x\n",
+ (unsigned) ascq, (unsigned) reqp);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT(reqp != NULL);
+
+ tid = REQPTID(reqp);
+ ASC_ASSERT(tid >= 0 && tid <= ADV_MAX_TID);
+
+ /*
+ * Handle the common case of 'reqp' being the first
+ * entry on the queue.
+ */
+ if (reqp == ascq->q_first[tid]) {
+ ret = ASC_TRUE;
+ ascq->q_first[tid] = (REQP) REQPNEXT(reqp);
+ /* If the queue is now empty, clear its bit and the last pointer. */
+ if (ascq->q_first[tid] == NULL) {
+ ascq->q_tidmask &= ~ADV_TID_TO_TIDMASK(tid);
+ ASC_ASSERT(ascq->q_last[tid] == reqp);
+ ascq->q_last[tid] = NULL;
+ }
+ } else if (ascq->q_first[tid] != NULL) {
+ ASC_ASSERT(ascq->q_last[tid] != NULL);
+ /*
+ * Because the case of 'reqp' being the first entry has been
+ * handled above and it is known the queue is not empty, if
+ * 'reqp' is found on the queue it is guaranteed the queue will
+ * not become empty and that 'q_first[tid]' will not be changed.
+ *
+ * Set 'prevp' to the first entry, 'currp' to the second entry,
+ * and search for 'reqp'.
+ */
+ for (prevp = ascq->q_first[tid], currp = (REQP) REQPNEXT(prevp);
+ currp; prevp = currp, currp = (REQP) REQPNEXT(currp)) {
+ if (currp == reqp) {
+ ret = ASC_TRUE;
+ REQPNEXT(prevp) = REQPNEXT(currp);
+ REQPNEXT(reqp) = NULL;
+ if (ascq->q_last[tid] == reqp) {
+ ascq->q_last[tid] = prevp;
+ }
+ break;
+ }
+ }
+ }
+#ifdef ADVANSYS_STATS
+ /* Maintain request queue statistics. */
+ if (ret == ASC_TRUE) {
+ ascq->q_cur_cnt[tid]--;
+ REQTIMESTAT("asc_rmqueue", ascq, reqp, tid);
+ }
+ ASC_ASSERT(ascq->q_cur_cnt[tid] >= 0);
+#endif /* ADVANSYS_STATS */
+ ASC_DBG2(3, "asc_rmqueue: reqp %x, ret %d\n", (unsigned) reqp, ret);
+ return ret;
+}
+
+/*
+ * If the specified 'REQP' is queued on the specified queue for
+ * the specified target device, return ASC_TRUE.
+ */
+STATIC int
+asc_isqueued(asc_queue_t *ascq, REQP reqp)
+{
+ REQP treqp;
+ int tid;
+ int ret = ASC_FALSE;
+
+ ASC_DBG2(3, "asc_isqueued: ascq %x, reqp %x\n",
+ (unsigned) ascq, (unsigned) reqp);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT(reqp != NULL);
+
+ tid = REQPTID(reqp);
+ ASC_ASSERT(tid >= 0 && tid <= ADV_MAX_TID);
+
+ for (treqp = ascq->q_first[tid]; treqp; treqp = (REQP) REQPNEXT(treqp)) {
+ ASC_ASSERT(ascq->q_tidmask & ADV_TID_TO_TIDMASK(tid));
+ if (treqp == reqp) {
+ ret = ASC_TRUE;
+ break;
+ }
+ }
+ ASC_DBG1(3, "asc_isqueued: ret %x\n", ret);
+ return ret;
+}
+
+/*
+ * Execute as many queued requests as possible for the specified queue.
+ *
+ * Calls asc_execute_scsi_cmnd() to execute a REQP/Scsi_Cmnd.
+ */
+STATIC void
+asc_execute_queue(asc_queue_t *ascq)
+{
+ ADV_SCSI_BIT_ID_TYPE scan_tidmask;
+ REQP reqp;
+ int i;
+
+ ASC_DBG1(1, "asc_execute_queue: ascq %x\n", (unsigned) ascq);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ /*
+ * Execute queued commands for devices attached to
+ * the current board in round-robin fashion.
+ */
+ scan_tidmask = ascq->q_tidmask;
+ do {
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if (scan_tidmask & ADV_TID_TO_TIDMASK(i)) {
+ if ((reqp = asc_dequeue(ascq, i)) == NULL) {
+ scan_tidmask &= ~ADV_TID_TO_TIDMASK(i);
+ } else if (asc_execute_scsi_cmnd((Scsi_Cmnd *) reqp)
+ == ASC_BUSY) {
+ scan_tidmask &= ~ADV_TID_TO_TIDMASK(i);
+ /* Put the request back at front of the list. */
+ asc_enqueue(ascq, reqp, ASC_FRONT);
+ }
+ }
+ }
+ } while (scan_tidmask);
+ return;
+}
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+/*
+ * asc_prt_board_devices()
+ *
+ * Print driver information for devices attached to the board.
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_board_devices(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ int leftlen;
+ int totlen;
+ int len;
+ int chip_scsi_id;
+ int i;
+
+ boardp = ASC_BOARDP(shp);
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nDevice Information for AdvanSys SCSI Host %d:\n", shp->host_no);
+ ASC_PRT_NEXT();
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
+ } else {
+ chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
+ }
+
+ len = asc_prt_line(cp, leftlen, "Target IDs Detected:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) {
+ len = asc_prt_line(cp, leftlen, " %X,", i);
+ ASC_PRT_NEXT();
+ }
+ }
+ len = asc_prt_line(cp, leftlen, " (%X=Host Adapter)\n", chip_scsi_id);
+ ASC_PRT_NEXT();
+
+ return totlen;
+}
+
+/*
+ * Display Wide Board BIOS Information.
+ */
+STATIC int
+asc_prt_adv_bios(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ int leftlen;
+ int totlen;
+ int len;
+ int upgrade = ASC_FALSE;
+ ushort major, minor, letter;
+
+ boardp = ASC_BOARDP(shp);
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen, "\nROM BIOS Version: ");
+ ASC_PRT_NEXT();
+
+ /*
+ * If the BIOS saved a valid signature, then fill in
+ * the BIOS code segment base address.
+ */
+ if (boardp->bios_signature != 0x55AA) {
+ len = asc_prt_line(cp, leftlen, "Pre-3.1\n");
+ ASC_PRT_NEXT();
+ upgrade = ASC_TRUE;
+ } else {
+ major = (boardp->bios_version >> 12) & 0xF;
+ minor = (boardp->bios_version >> 8) & 0xF;
+ letter = (boardp->bios_version & 0xFF);
+
+ len = asc_prt_line(cp, leftlen, "%d.%d%c\n",
+ major, minor, letter >= 26 ? '?' : letter + 'A');
+ ASC_PRT_NEXT();
+
+ /* Current available ROM BIOS release is 3.1C. */
+ if (major < 3 || (major <= 3 && minor < 1) ||
+ (major <= 3 && minor <= 1 && letter < ('C'- 'A'))) {
+ upgrade = ASC_TRUE;
+ }
+ }
+ if (upgrade == ASC_TRUE) {
+ len = asc_prt_line(cp, leftlen,
+"Newer version of ROM BIOS available: ftp://ftp.advansys.com/pub\n");
+ ASC_PRT_NEXT();
+ }
+
+ return totlen;
+}
+
+/*
+ * Add serial number to information bar if signature AAh
+ * is found in at bit 15-9 (7 bits) of word 1.
+ *
+ * Serial Number consists fo 12 alpha-numeric digits.
+ *
+ * 1 - Product type (A,B,C,D..) Word0: 15-13 (3 bits)
+ * 2 - MFG Location (A,B,C,D..) Word0: 12-10 (3 bits)
+ * 3-4 - Product ID (0-99) Word0: 9-0 (10 bits)
+ * 5 - Product revision (A-J) Word0: " "
+ *
+ * Signature Word1: 15-9 (7 bits)
+ * 6 - Year (0-9) Word1: 8-6 (3 bits) & Word2: 15 (1 bit)
+ * 7-8 - Week of the year (1-52) Word1: 5-0 (6 bits)
+ *
+ * 9-12 - Serial Number (A001-Z999) Word2: 14-0 (15 bits)
+ *
+ * Note 1: Only production cards will have a serial number.
+ *
+ * Note 2: Signature is most significant 7 bits (0xFE).
+ *
+ * Returns ASC_TRUE if serial number found, otherwise returns ASC_FALSE.
+ */
+STATIC int
+asc_get_eeprom_string(ushort *serialnum, uchar *cp)
+{
+ ushort w, num;
+
+ if ((serialnum[1] & 0xFE00) != ((ushort) 0xAA << 8)) {
+ return ASC_FALSE;
+ } else {
+ /*
+ * First word - 6 digits.
+ */
+ w = serialnum[0];
+
+ /* Product type - 1st digit. */
+ if ((*cp = 'A' + ((w & 0xE000) >> 13)) == 'H') {
+ /* Product type is P=Prototype */
+ *cp += 0x8;
+ }
+ cp++;
+
+ /* Manufacturing location - 2nd digit. */
+ *cp++ = 'A' + ((w & 0x1C00) >> 10);
+
+ /* Product ID - 3rd, 4th digits. */
+ num = w & 0x3FF;
+ *cp++ = '0' + (num / 100);
+ num %= 100;
+ *cp++ = '0' + (num / 10);
+
+ /* Product revision - 5th digit. */
+ *cp++ = 'A' + (num % 10);
+
+ /*
+ * Second word
+ */
+ w = serialnum[1];
+
+ /*
+ * Year - 6th digit.
+ *
+ * If bit 15 of third word is set, then the
+ * last digit of the year is greater than 7.
+ */
+ if (serialnum[2] & 0x8000) {
+ *cp++ = '8' + ((w & 0x1C0) >> 6);
+ } else {
+ *cp++ = '0' + ((w & 0x1C0) >> 6);
+ }
+
+ /* Week of year - 7th, 8th digits. */
+ num = w & 0x003F;
+ *cp++ = '0' + num / 10;
+ num %= 10;
+ *cp++ = '0' + num;
+
+ /*
+ * Third word
+ */
+ w = serialnum[2] & 0x7FFF;
+
+ /* Serial number - 9th digit. */
+ *cp++ = 'A' + (w / 1000);
+
+ /* 10th, 11th, 12th digits. */
+ num = w % 1000;
+ *cp++ = '0' + num / 100;
+ num %= 100;
+ *cp++ = '0' + num / 10;
+ num %= 10;
+ *cp++ = '0' + num;
+
+ *cp = '\0'; /* Null Terminate the string. */
+ return ASC_TRUE;
+ }
+}
+
+/*
+ * asc_prt_asc_board_eeprom()
+ *
+ * Print board EEPROM configuration.
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_asc_board_eeprom(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp;
+ int leftlen;
+ int totlen;
+ int len;
+ ASCEEP_CONFIG *ep;
+ int i;
+ int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 };
+ uchar serialstr[13];
+
+ boardp = ASC_BOARDP(shp);
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ ep = &boardp->eep_config.asc_eep;
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shp->host_no);
+ ASC_PRT_NEXT();
+
+ if (asc_get_eeprom_string((ushort *) &ep->adapter_info[0], serialstr) ==
+ ASC_TRUE) {
+ len = asc_prt_line(cp, leftlen, " Serial Number: %s\n", serialstr);
+ ASC_PRT_NEXT();
+ } else {
+ if (ep->adapter_info[5] == 0xBB) {
+ len = asc_prt_line(cp, leftlen,
+ " Default Settings Used for EEPROM-less Adapter.\n");
+ ASC_PRT_NEXT();
+ } else {
+ len = asc_prt_line(cp, leftlen,
+ " Serial Number Signature Not Present.\n");
+ ASC_PRT_NEXT();
+ }
+ }
+
+ len = asc_prt_line(cp, leftlen,
+" Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ep->chip_scsi_id, ep->max_total_qng, ep->max_tag_qng);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" cntl %x, no_scam %x\n",
+ ep->cntl, ep->no_scam);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Target ID: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %d", i);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Disconnects: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Command Queuing: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Start Motor: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Synchronous Transfer:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
+ len = asc_prt_line(cp, leftlen,
+" Host ISA DMA speed: %d MB/S\n",
+ isa_dma_speed[ep->isa_dma_speed]);
+ ASC_PRT_NEXT();
+ }
+
+ return totlen;
+}
+
+/*
+ * asc_prt_adv_board_eeprom()
+ *
+ * Print board EEPROM configuration.
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_adv_board_eeprom(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ int leftlen;
+ int totlen;
+ int len;
+ int i;
+ char *termstr;
+ uchar serialstr[13];
+ ADVEEP_CONFIG *ep;
+
+ boardp = ASC_BOARDP(shp);
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ ep = &boardp->eep_config.adv_eep;
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shp->host_no);
+ ASC_PRT_NEXT();
+
+ if (asc_get_eeprom_string(&ep->serial_number_word1, serialstr) ==
+ ASC_TRUE) {
+ len = asc_prt_line(cp, leftlen, " Serial Number: %s\n", serialstr);
+ ASC_PRT_NEXT();
+ } else {
+ len = asc_prt_line(cp, leftlen,
+ " Serial Number Signature Not Present.\n");
+ ASC_PRT_NEXT();
+ }
+
+ len = asc_prt_line(cp, leftlen,
+" Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ep->adapter_scsi_id, ep->max_host_qng, ep->max_dvc_qng);
+ ASC_PRT_NEXT();
+
+ switch (ep->termination) {
+ case 1:
+ termstr = "Low Off/High Off";
+ break;
+ case 2:
+ termstr = "Low Off/High On";
+ break;
+ case 3:
+ termstr = "Low On/High On";
+ break;
+ default:
+ case 0:
+ termstr = "Automatic";
+ break;
+ }
+
+ len = asc_prt_line(cp, leftlen,
+" termination: %u (%s), bios_ctrl: %x\n",
+ ep->termination, termstr, ep->bios_ctrl);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Target ID: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %X", i);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Disconnects: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Command Queuing: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Start Motor: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Synchronous Transfer:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Ultra Transfer: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->ultra_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Wide Transfer: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ return totlen;
+}
+
+/*
+ * asc_prt_driver_conf()
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_driver_conf(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ int leftlen;
+ int totlen;
+ int len;
+ int chip_scsi_id;
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ int i;
+#endif /* version >= v1.3.89 */
+
+ boardp = ASC_BOARDP(shp);
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n",
+ shp->host_no);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,89)
+" host_busy %u, last_reset %u, max_id %u, max_lun %u\n",
+ shp->host_busy, shp->last_reset, shp->max_id, shp->max_lun);
+#else /* version >= v1.3.89 */
+" host_busy %u, last_reset %u, max_id %u, max_lun %u, max_channel %u\n",
+ shp->host_busy, shp->last_reset, shp->max_id, shp->max_lun,
+ shp->max_channel);
+#endif /* version >= v1.3.89 */
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,57)
+" can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n",
+ shp->can_queue, shp->this_id, shp->sg_tablesize, shp->cmd_per_lun);
+#else /* version >= v1.3.57 */
+" unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n",
+ shp->unique_id, shp->can_queue, shp->this_id, shp->sg_tablesize,
+ shp->cmd_per_lun);
+#endif /* version >= v1.3.57 */
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,57)
+" unchecked_isa_dma %d, loaded_as_module %d\n",
+ shp->unchecked_isa_dma, shp->loaded_as_module);
+#else /* version >= v1.3.57 */
+" unchecked_isa_dma %d, use_clustering %d, loaded_as_module %d\n",
+ shp->unchecked_isa_dma, shp->use_clustering, shp->loaded_as_module);
+#endif /* version >= v1.3.57 */
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, " flags %x, last_reset %x, jiffies %x\n",
+ boardp->flags, boardp->last_reset, jiffies);
+ ASC_PRT_NEXT();
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
+ } else {
+ chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
+ }
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ if (boardp->flags & ASC_SELECT_QUEUE_DEPTHS) {
+ len = asc_prt_line(cp, leftlen, " queue_depth:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if (boardp->device[i] == NULL) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %X:%d",
+ i, boardp->device[i]->queue_depth);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+ }
+#endif /* version >= v1.3.89 */
+
+#if ASC_QUEUE_FLOW_CONTROL
+ if (ASC_NARROW_BOARD(boardp)) {
+ len = asc_prt_line(cp, leftlen, " queue_curr_depth:");
+ ASC_PRT_NEXT();
+ /* Use ASC_MAX_TID for Narrow Board. */
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if (boardp->device[i] == NULL) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%d",
+ i, boardp->device[i]->queue_curr_depth);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, " queue_count:");
+ ASC_PRT_NEXT();
+ /* Use ASC_MAX_TID for Narrow Board. */
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if (boardp->device[i] == NULL) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%d",
+ i, boardp->device[i]->queue_count);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+ }
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+
+ return totlen;
+}
+
+/*
+ * asc_prt_asc_board_info()
+ *
+ * Print dynamic board configuration information.
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_asc_board_info(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ int leftlen;
+ int totlen;
+ int len;
+ ASC_DVC_VAR *v;
+ ASC_DVC_CFG *c;
+ int i;
+
+ boardp = ASC_BOARDP(shp);
+ v = &boardp->dvc_var.asc_dvc_var;
+ c = &boardp->dvc_cfg.asc_dvc_cfg;
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n",
+ shp->host_no);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" chip_version %u, lib_version %x, lib_serial_no %u, mcode_date %x\n",
+ c->chip_version, c->lib_version, c->lib_serial_no, c->mcode_date);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" mcode_version %x, err_code %u\n",
+ c->mcode_version, v->err_code);
+ ASC_PRT_NEXT();
+
+ /* Current number of commands waiting for the host. */
+ len = asc_prt_line(cp, leftlen,
+" Total Command Pending: %d\n", v->cur_total_qng);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Command Queuing:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%c",
+ i, (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ /* Current number of commands waiting for a device. */
+ len = asc_prt_line(cp, leftlen,
+" Command Queue Pending:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%u", i, v->cur_dvc_qng[i]);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ /* Current limit on number of commands that can be sent to a device. */
+ len = asc_prt_line(cp, leftlen,
+" Command Queue Limit:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%u", i, v->max_dvc_qng[i]);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ /* Indicate whether the device has returned queue full status. */
+ len = asc_prt_line(cp, leftlen,
+" Command Queue Full:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if (boardp->queue_full & ADV_TID_TO_TIDMASK(i)) {
+ len = asc_prt_line(cp, leftlen, " %d:Y-%d",
+ i, boardp->queue_full_cnt[i]);
+ } else {
+ len = asc_prt_line(cp, leftlen, " %d:N", i);
+ }
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Synchronous Transfer:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%c",
+ i, (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ uchar syn_period_ix;
+
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
+ continue;
+ }
+ syn_period_ix = (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - 1);
+ len = asc_prt_line(cp, leftlen, " %d:", i);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+ " Transfer Period Factor: %d (%d.%d Mhz),",
+ v->sdtr_period_tbl[syn_period_ix],
+ 250 / v->sdtr_period_tbl[syn_period_ix],
+ ASC_TENTHS(250, v->sdtr_period_tbl[syn_period_ix]));
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d\n",
+ boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET);
+ ASC_PRT_NEXT();
+ }
+
+ return totlen;
+}
+
+/*
+ * asc_prt_adv_board_info()
+ *
+ * Print dynamic board configuration information.
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_adv_board_info(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ int leftlen;
+ int totlen;
+ int len;
+ int i;
+ ADV_DVC_VAR *v;
+ ADV_DVC_CFG *c;
+ AdvPortAddr iop_base;
+ ushort chip_scsi_id;
+ ushort lramword;
+ uchar lrambyte;
+ ushort sdtr_able;
+ ushort period;
+
+ boardp = ASC_BOARDP(shp);
+ v = &boardp->dvc_var.adv_dvc_var;
+ c = &boardp->dvc_cfg.adv_dvc_cfg;
+ iop_base = v->iop_base;
+ chip_scsi_id = v->chip_scsi_id;
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n",
+ shp->host_no);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" iop_base %lx, cable_detect: %X, err_code %u, idle_cmd_done %u\n",
+ v->iop_base,
+ AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1) & CABLE_DETECT,
+ v->err_code, v->idle_cmd_done);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" chip_version %u, lib_version %x, mcode_date %x, mcode_version %x\n",
+ c->chip_version, c->lib_version, c->mcode_date, c->mcode_version);
+ ASC_PRT_NEXT();
+
+ AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, lramword);
+ len = asc_prt_line(cp, leftlen,
+" Queuing Enabled:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ len = asc_prt_line(cp, leftlen, " %X:%c",
+ i, (lramword & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Queue Limit:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + i, lrambyte);
+
+ len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Command Pending:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_QUEUED_CMD + i, lrambyte);
+
+ len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, lramword);
+ len = asc_prt_line(cp, leftlen,
+" Wide Enabled:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ len = asc_prt_line(cp, leftlen, " %X:%c",
+ i, (lramword & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Transfer Bit Width:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i),
+ lramword);
+ len = asc_prt_line(cp, leftlen, " %X:%d",
+ i, (lramword & 0x8000) ? 16 : 8);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ len = asc_prt_line(cp, leftlen,
+" Synchronous Enabled:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ len = asc_prt_line(cp, leftlen, " %X:%c",
+ i, (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+
+ AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i),
+ lramword);
+ lramword &= ~0x8000;
+
+ if ((chip_scsi_id == i) ||
+ ((sdtr_able & ADV_TID_TO_TIDMASK(i)) == 0) ||
+ (lramword == 0)) {
+ continue;
+ }
+
+ len = asc_prt_line(cp, leftlen, " %X:", i);
+ ASC_PRT_NEXT();
+
+ period = (((lramword >> 8) * 25) + 50)/4;
+
+ len = asc_prt_line(cp, leftlen,
+ " Transfer Period Factor: %d (%d.%d Mhz),",
+ period, 250/period, ASC_TENTHS(250, period));
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d\n",
+ lramword & 0x1F);
+ ASC_PRT_NEXT();
+ }
+
+ return totlen;
+}
+
+/*
+ * asc_proc_copy()
+ *
+ * Copy proc information to a read buffer taking into account the current
+ * read offset in the file and the remaining space in the read buffer.
+ */
+STATIC int
+asc_proc_copy(off_t advoffset, off_t offset, char *curbuf, int leftlen,
+ char *cp, int cplen)
+{
+ int cnt = 0;
+
+ ASC_DBG3(2, "asc_proc_copy: offset %d, advoffset %d, cplen %d\n",
+ (unsigned) offset, (unsigned) advoffset, cplen);
+ if (offset <= advoffset) {
+ /* Read offset below current offset, copy everything. */
+ cnt = ASC_MIN(cplen, leftlen);
+ ASC_DBG3(2, "asc_proc_copy: curbuf %x, cp %x, cnt %d\n",
+ (unsigned) curbuf, (unsigned) cp, cnt);
+ memcpy(curbuf, cp, cnt);
+ } else if (offset < advoffset + cplen) {
+ /* Read offset within current range, partial copy. */
+ cnt = (advoffset + cplen) - offset;
+ cp = (cp + cplen) - cnt;
+ cnt = ASC_MIN(cnt, leftlen);
+ ASC_DBG3(2, "asc_proc_copy: curbuf %x, cp %x, cnt %d\n",
+ (unsigned) curbuf, (unsigned) cp, cnt);
+ memcpy(curbuf, cp, cnt);
+ }
+ return cnt;
+}
+
+/*
+ * asc_prt_line()
+ *
+ * If 'cp' is NULL print to the console, otherwise print to a buffer.
+ *
+ * Return 0 if printing to the console, otherwise return the number of
+ * bytes written to the buffer.
+ *
+ * Note: If any single line is greater than ASC_PRTLINE_SIZE bytes the stack
+ * will be corrupted. 's[]' is defined to be ASC_PRTLINE_SIZE bytes.
+ */
+STATIC int
+asc_prt_line(char *buf, int buflen, char *fmt, ...)
+{
+ va_list args;
+ int ret;
+ char s[ASC_PRTLINE_SIZE];
+
+ va_start(args, fmt);
+ ret = vsprintf(s, fmt, args);
+ ASC_ASSERT(ret < ASC_PRTLINE_SIZE);
+ if (buf == NULL) {
+ (void) printk("%s", s);
+ ret = 0;
+ } else {
+ ret = ASC_MIN(buflen, ret);
+ memcpy(buf, s, ret);
+ }
+ va_end(args);
+ return ret;
+}
+#endif /* version >= v1.3.0 */
+
+
+/*
+ * --- Functions Required by the Asc Library
+ */
+
+/*
+ * Delay for 'n' milliseconds. Don't use the 'jiffies'
+ * global variable which is incremented once every 5 ms
+ * from a timer interrupt, because this function may be
+ * called when interrupts are disabled.
+ */
+STATIC void
+DvcSleepMilliSecond(ulong n)
+{
+ ulong i;
+
+ ASC_DBG1(4, "DvcSleepMilliSecond: %lu\n", n);
+ for (i = 0; i < n; i++) {
+ udelay(1000);
+ }
+}
+
+STATIC long
+DvcEnterCritical(void)
+{
+ long flags;
+
+ save_flags(flags);
+ cli();
+ return flags;
+}
+
+STATIC void
+DvcLeaveCritical(long flags)
+{
+ restore_flags(flags);
+}
+
+STATIC ulong
+DvcGetSGList(ASC_DVC_VAR *asc_dvc_sg, uchar *buf_addr, ulong buf_len,
+ ASC_SG_HEAD *asc_sg_head_ptr)
+{
+ ulong buf_size;
+
+ buf_size = buf_len;
+ asc_sg_head_ptr->entry_cnt = 1;
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ asc_sg_head_ptr->sg_list[0].addr = (ulong) buf_addr;
+#else /* version >= v2.0.0 */
+ asc_sg_head_ptr->sg_list[0].addr = virt_to_bus(buf_addr);
+#endif /* version >= v2.0.0 */
+ asc_sg_head_ptr->sg_list[0].bytes = buf_size;
+ return buf_size;
+}
+
+/*
+ * void
+ * DvcPutScsiQ(PortAddr iop_base, ushort s_addr, ushort *outbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * Output an ASC_SCSI_Q structure to the chip
+ */
+STATIC void
+DvcPutScsiQ(PortAddr iop_base, ushort s_addr, ushort *outbuf, int words)
+{
+ int i;
+
+ ASC_DBG_PRT_HEX(2, "DvcPutScsiQ", (uchar *) outbuf, 2 * words);
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < words; i++, outbuf++) {
+ if (i == 2 || i == 10) {
+ continue;
+ }
+ AscSetChipLramDataNoSwap(iop_base, *outbuf);
+ }
+}
+
+/*
+ * void
+ * DvcGetQinfo(PortAddr iop_base, ushort s_addr, ushort *inbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * Input an ASC_QDONE_INFO structure from the chip
+ */
+STATIC void
+DvcGetQinfo(PortAddr iop_base, ushort s_addr, ushort *inbuf, int words)
+{
+ int i;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < words; i++, inbuf++) {
+ if (i == 5) {
+ continue;
+ }
+ *inbuf = AscGetChipLramDataNoSwap(iop_base);
+ }
+ ASC_DBG_PRT_HEX(2, "DvcGetQinfo", (uchar *) inbuf, 2 * words);
+}
+
+/*
+ * void DvcOutPortWords(ushort iop_base, ushort &outbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * output a buffer to an i/o port address
+ */
+STATIC void
+DvcOutPortWords(ushort iop_base, ushort *outbuf, int words)
+{
+ int i;
+
+ for (i = 0; i < words; i++, outbuf++)
+ outpw(iop_base, *outbuf);
+}
+
+/*
+ * void DvcInPortWords(ushort iop_base, ushort &outbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * input a buffer from an i/o port address
+ */
+STATIC void
+DvcInPortWords(ushort iop_base, ushort *inbuf, int words)
+{
+ int i;
+
+ for (i = 0; i < words; i++, inbuf++)
+ *inbuf = inpw(iop_base);
+}
+
+/*
+ * void DvcOutPortDWords(PortAddr port, ulong *pdw, int dwords)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * output a buffer of 32-bit integers to an i/o port address in
+ * 16 bit integer units
+ */
+STATIC void
+DvcOutPortDWords(PortAddr port, ulong *pdw, int dwords)
+{
+ int i;
+ int words;
+ ushort *pw;
+
+ pw = (ushort *) pdw;
+ words = dwords << 1;
+ for(i = 0; i < words; i++, pw++) {
+ outpw(port, *pw);
+ }
+ return;
+}
+
+/*
+ * Read a PCI configuration byte.
+ */
+ASC_INITFUNC(
+STATIC uchar
+DvcReadPCIConfigByte(
+ ASC_DVC_VAR asc_ptr_type *asc_dvc,
+ ushort offset)
+)
+{
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ PCI_DATA pciData;
+
+ pciData.bus = ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info);
+ pciData.slot = ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info);
+ pciData.func = ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info);
+ pciData.offset = offset;
+ pciData.type = pci_scan_method;
+ return asc_get_cfg_byte(&pciData);
+#else /* ASC_CONFIG_PCI */
+ return 0;
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ uchar byte_data;
+ pcibios_read_config_byte(ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info),
+ PCI_DEVFN(ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info),
+ ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info)),
+ offset, &byte_data);
+ return byte_data;
+#else /* CONFIG_PCI */
+ return 0;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+}
+
+/*
+ * Write a PCI configuration byte.
+ */
+ASC_INITFUNC(
+STATIC void
+DvcWritePCIConfigByte(
+ ASC_DVC_VAR asc_ptr_type *asc_dvc,
+ ushort offset,
+ uchar byte_data)
+)
+{
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ PCI_DATA pciData;
+
+ pciData.bus = ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info);
+ pciData.slot = ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info);
+ pciData.func = ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info);
+ pciData.offset = offset;
+ pciData.type = pci_scan_method;
+ asc_put_cfg_byte(&pciData, byte_data);
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ pcibios_write_config_byte(ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info),
+ PCI_DEVFN(ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info),
+ ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info)),
+ offset, byte_data);
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+}
+
+/*
+ * Return the BIOS address of the adapter at the specified
+ * I/O port and with the specified bus type.
+ */
+ASC_INITFUNC(
+STATIC ushort
+AscGetChipBiosAddress(
+ PortAddr iop_base,
+ ushort bus_type
+)
+)
+{
+ ushort cfg_lsw ;
+ ushort bios_addr ;
+
+ /*
+ * The PCI BIOS is re-located by the motherboard BIOS. Because
+ * of this the driver can not determine where a PCI BIOS is
+ * loaded and executes.
+ */
+ if (bus_type & ASC_IS_PCI)
+ {
+ return(0);
+ }
+
+ if((bus_type & ASC_IS_EISA) != 0)
+ {
+ cfg_lsw = AscGetEisaChipCfg(iop_base) ;
+ cfg_lsw &= 0x000F ;
+ bios_addr = (ushort)(ASC_BIOS_MIN_ADDR +
+ (cfg_lsw * ASC_BIOS_BANK_SIZE)) ;
+ return(bios_addr) ;
+ }/* if */
+
+ cfg_lsw = AscGetChipCfgLsw(iop_base) ;
+
+ /*
+ * ISA PnP uses the top bit as the 32K BIOS flag
+ */
+ if (bus_type == ASC_IS_ISAPNP)
+ {
+ cfg_lsw &= 0x7FFF;
+ }/* if */
+
+ bios_addr = (ushort)(((cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE) +
+ ASC_BIOS_MIN_ADDR) ;
+ return(bios_addr) ;
+}
+
+
+/*
+ * --- Functions Required by the Adv Library
+ */
+
+/*
+ * DvcGetPhyAddr()
+ *
+ * Return the physical address of 'vaddr' and set '*lenp' to the
+ * number of physically contiguous bytes that follow 'vaddr'.
+ * 'flag' indicates the type of structure whose physical address
+ * is being translated.
+ *
+ * Note: Because Linux currently doesn't page the kernel and all
+ * kernel buffers are physically contiguous, leave '*lenp' unchanged.
+ */
+ulong
+DvcGetPhyAddr(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq,
+ uchar *vaddr, long *lenp, int flag)
+{
+ ulong paddr;
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ paddr = (ulong) vaddr;
+#else /* version >= v2.0.0 */
+ paddr = virt_to_bus(vaddr);
+#endif /* version >= v2.0.0 */
+
+ ASC_DBG4(4,
+ "DvcGetPhyAddr: vaddr 0x%lx, lenp 0x%lx *lenp %lu, paddr 0x%lx\n",
+ (ulong) vaddr, (ulong) lenp, (ulong) *((ulong *) lenp), paddr);
+
+ return paddr;
+}
+
+/*
+ * Read a PCI configuration byte.
+ */
+ASC_INITFUNC(
+STATIC uchar
+DvcAdvReadPCIConfigByte(
+ ADV_DVC_VAR *asc_dvc,
+ ushort offset)
+)
+{
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ PCI_DATA pciData;
+
+ pciData.bus = ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info);
+ pciData.slot = ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info);
+ pciData.func = ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info);
+ pciData.offset = offset;
+ pciData.type = pci_scan_method;
+ return asc_get_cfg_byte(&pciData);
+#else /* ASC_CONFIG_PCI */
+ return 0;
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ uchar byte_data;
+ pcibios_read_config_byte(ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info),
+ PCI_DEVFN(ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info),
+ ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info)),
+ offset, &byte_data);
+ return byte_data;
+#else /* CONFIG_PCI */
+ return 0;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+}
+
+/*
+ * Write a PCI configuration byte.
+ */
+ASC_INITFUNC(
+STATIC void
+DvcAdvWritePCIConfigByte(
+ ADV_DVC_VAR *asc_dvc,
+ ushort offset,
+ uchar byte_data)
+)
+{
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ PCI_DATA pciData;
+
+ pciData.bus = ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info);
+ pciData.slot = ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info);
+ pciData.func = ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info);
+ pciData.offset = offset;
+ pciData.type = pci_scan_method;
+ asc_put_cfg_byte(&pciData, byte_data);
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ pcibios_write_config_byte(ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info),
+ PCI_DEVFN(ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info),
+ ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info)),
+ offset, byte_data);
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+}
+
+/*
+ * --- Tracing and Debugging Functions
+ */
+
+#ifdef ADVANSYS_STATS
+/*
+ * asc_prt_board_stats()
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_board_stats(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ int leftlen;
+ int totlen;
+ int len;
+ struct asc_stats *s;
+ int i;
+ ushort chip_scsi_id;
+ asc_board_t *boardp;
+ asc_queue_t *active;
+ asc_queue_t *waiting;
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ boardp = ASC_BOARDP(shp);
+ s = &boardp->asc_stats;
+
+ len = asc_prt_line(cp, leftlen,
+"\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n", shp->host_no);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" command %lu, queuecommand %lu, abort %lu, reset %lu, biosparam %lu\n",
+ s->command, s->queuecommand, s->abort, s->reset, s->biosparam);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" interrupt %lu, callback %lu, done %lu\n",
+ s->interrupt, s->callback, s->done);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" exe_noerror %lu, exe_busy %lu, exe_error %lu, exe_unknown %lu\n",
+ s->exe_noerror, s->exe_busy, s->exe_error, s->exe_unknown);
+ ASC_PRT_NEXT();
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ len = asc_prt_line(cp, leftlen,
+" build_error %lu\n",
+ s->build_error);
+ } else {
+ len = asc_prt_line(cp, leftlen,
+" build_error %lu, build_noreq %lu, build_nosg %lu\n",
+ s->build_error, s->adv_build_noreq, s->adv_build_nosg);
+ }
+ ASC_PRT_NEXT();
+
+ /*
+ * Display data transfer statistics.
+ */
+ if (s->cont_cnt > 0) {
+ len = asc_prt_line(cp, leftlen, " cont_cnt %lu, ", s->cont_cnt);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, "cont_xfer %lu.%01lu kb ",
+ s->cont_xfer/2,
+ ASC_TENTHS(s->cont_xfer, 2));
+ ASC_PRT_NEXT();
+
+ /* Contiguous transfer average size */
+ len = asc_prt_line(cp, leftlen, "avg_xfer %lu.%01lu kb\n",
+ (s->cont_xfer/2)/s->cont_cnt,
+ ASC_TENTHS((s->cont_xfer/2), s->cont_cnt));
+ ASC_PRT_NEXT();
+ }
+
+ if (s->sg_cnt > 0) {
+
+ len = asc_prt_line(cp, leftlen, " sg_cnt %lu, sg_elem %lu, ",
+ s->sg_cnt, s->sg_elem);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, "sg_xfer %lu.%01lu kb\n",
+ s->sg_xfer/2,
+ ASC_TENTHS(s->sg_xfer, 2));
+ ASC_PRT_NEXT();
+
+ /* Scatter gather transfer statistics */
+ len = asc_prt_line(cp, leftlen, " avg_num_elem %lu.%01lu, ",
+ s->sg_elem/s->sg_cnt,
+ ASC_TENTHS(s->sg_elem, s->sg_cnt));
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, "avg_elem_size %lu.%01lu kb, ",
+ (s->sg_xfer/2)/s->sg_elem,
+ ASC_TENTHS((s->sg_xfer/2), s->sg_elem));
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, "avg_xfer_size %lu.%01lu kb\n",
+ (s->sg_xfer/2)/s->sg_cnt,
+ ASC_TENTHS((s->sg_xfer/2), s->sg_cnt));
+ ASC_PRT_NEXT();
+ }
+
+ /*
+ * Display request queuing statistics.
+ */
+ len = asc_prt_line(cp, leftlen,
+" Active and Waiting Request Queues (Time Unit: %d HZ):\n", HZ);
+ ASC_PRT_NEXT();
+
+ active = &ASC_BOARDP(shp)->active;
+ waiting = &ASC_BOARDP(shp)->waiting;
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
+ } else {
+ chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
+ }
+
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ if (active->q_tot_cnt[i] > 0 || waiting->q_tot_cnt[i] > 0) {
+ len = asc_prt_line(cp, leftlen, " target %d\n", i);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" active: cnt [cur %d, max %d, tot %u], time [min %d, max %d, avg %lu.%01lu]\n",
+ active->q_cur_cnt[i], active->q_max_cnt[i],
+ active->q_tot_cnt[i],
+ active->q_min_tim[i], active->q_max_tim[i],
+ (active->q_tot_cnt[i] == 0) ? 0 :
+ (active->q_tot_tim[i]/active->q_tot_cnt[i]),
+ (active->q_tot_cnt[i] == 0) ? 0 :
+ ASC_TENTHS(active->q_tot_tim[i], active->q_tot_cnt[i]));
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" waiting: cnt [cur %d, max %d, tot %u], time [min %u, max %u, avg %lu.%01lu]\n",
+ waiting->q_cur_cnt[i], waiting->q_max_cnt[i],
+ waiting->q_tot_cnt[i],
+ waiting->q_min_tim[i], waiting->q_max_tim[i],
+ (waiting->q_tot_cnt[i] == 0) ? 0 :
+ (waiting->q_tot_tim[i]/waiting->q_tot_cnt[i]),
+ (waiting->q_tot_cnt[i] == 0) ? 0 :
+ ASC_TENTHS(waiting->q_tot_tim[i], waiting->q_tot_cnt[i]));
+ ASC_PRT_NEXT();
+ }
+ }
+
+ return totlen;
+}
+#endif /* ADVANSYS_STATS */
+
+#ifdef ADVANSYS_DEBUG
+/*
+ * asc_prt_scsi_host()
+ */
+STATIC void
+asc_prt_scsi_host(struct Scsi_Host *s)
+{
+ asc_board_t *boardp;
+
+ boardp = ASC_BOARDP(s);
+
+ printk("Scsi_Host at addr %x\n", (unsigned) s);
+ printk(
+" next %x, extra_bytes %u, host_busy %u, host_no %d, last_reset %d,\n",
+ (unsigned) s->next, s->extra_bytes, s->host_busy, s->host_no,
+ (unsigned) s->last_reset);
+
+ printk(
+" host_wait %x, host_queue %x, hostt %x, block %x,\n",
+ (unsigned) s->host_wait, (unsigned) s->host_queue,
+ (unsigned) s->hostt, (unsigned) s->block);
+
+ printk(
+" wish_block %d, base %x, io_port %d, n_io_port %d, irq %d, dma_channel %d,\n",
+ s->wish_block, (unsigned) s->base, s->io_port, s->n_io_port,
+ s->irq, s->dma_channel);
+
+ printk(
+" this_id %d, can_queue %d,\n", s->this_id, s->can_queue);
+
+ printk(
+" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d, loaded_as_module %d\n",
+ s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma,
+ s->loaded_as_module);
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ asc_prt_asc_dvc_var(&ASC_BOARDP(s)->dvc_var.asc_dvc_var);
+ asc_prt_asc_dvc_cfg(&ASC_BOARDP(s)->dvc_cfg.asc_dvc_cfg);
+ } else {
+ asc_prt_adv_dvc_var(&ASC_BOARDP(s)->dvc_var.adv_dvc_var);
+ asc_prt_adv_dvc_cfg(&ASC_BOARDP(s)->dvc_cfg.adv_dvc_cfg);
+ }
+}
+
+/*
+ * asc_prt_scsi_cmnd()
+ */
+STATIC void
+asc_prt_scsi_cmnd(Scsi_Cmnd *s)
+{
+ printk("Scsi_Cmnd at addr %x\n", (unsigned) s);
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+ printk(
+" host %x, device %x, target %u, lun %u\n",
+ (unsigned) s->host, (unsigned) s->device, s->target, s->lun);
+#else /* version >= v1.3.0 */
+ printk(
+" host %x, device %x, target %u, lun %u, channel %u,\n",
+ (unsigned) s->host, (unsigned) s->device, s->target, s->lun,
+ s->channel);
+#endif /* version >= v1.3.0 */
+
+ asc_prt_hex(" CDB", s->cmnd, s->cmd_len);
+
+ printk(
+" use_sg %u, sglist_len %u, abort_reason %x\n",
+ s->use_sg, s->sglist_len, s->abort_reason);
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,89)
+ printk(
+" retries %d, allowed %d\n",
+ s->retries, s->allowed);
+#else /* version >= v1.3.89 */
+ printk(
+" serial_number %x, serial_number_at_timeout %x, retries %d, allowed %d\n",
+ (unsigned) s->serial_number, (unsigned) s->serial_number_at_timeout,
+ s->retries, s->allowed);
+#endif /* version >= v1.3.89 */
+
+ printk(
+" timeout_per_command %d, timeout_total %d, timeout %d\n",
+ s->timeout_per_command, s->timeout_total, s->timeout);
+
+ printk(
+" internal_timeout %u, flags %u, this_count %d\n",
+ s->internal_timeout, s->flags, s->this_count);
+
+ printk(
+" scsi_done %x, done %x, host_scribble %x, result %x\n",
+ (unsigned) s->scsi_done, (unsigned) s->done,
+ (unsigned) s->host_scribble, s->result);
+
+ printk(
+" tag %u, pid %u\n",
+ (unsigned) s->tag, (unsigned) s->pid);
+}
+
+/*
+ * asc_prt_asc_dvc_var()
+ */
+STATIC void
+asc_prt_asc_dvc_var(ASC_DVC_VAR *h)
+{
+ printk("ASC_DVC_VAR at addr %x\n", (unsigned) h);
+
+ printk(
+" iop_base %x, err_code %x, dvc_cntl %x, bug_fix_cntl %d,\n",
+ h->iop_base, h->err_code, h->dvc_cntl, h->bug_fix_cntl);
+
+ printk(
+" bus_type %d, isr_callback %x, exe_callback %x, init_sdtr %x,\n",
+ h->bus_type, (unsigned) h->isr_callback, (unsigned) h->exe_callback,
+ (unsigned) h->init_sdtr);
+
+ printk(
+" sdtr_done %x, use_tagged_qng %x, unit_not_ready %x, chip_no %x,\n",
+ (unsigned) h->sdtr_done, (unsigned) h->use_tagged_qng,
+ (unsigned) h->unit_not_ready, (unsigned) h->chip_no);
+
+ printk(
+" queue_full_or_busy %x, start_motor %x, scsi_reset_wait %x, irq_no %x,\n",
+ (unsigned) h->queue_full_or_busy, (unsigned) h->start_motor,
+ (unsigned) h->scsi_reset_wait, (unsigned) h->irq_no);
+
+ printk(
+" is_in_int %x, max_total_qng %x, cur_total_qng %x, in_critical_cnt %x,\n",
+ (unsigned) h->is_in_int, (unsigned) h->max_total_qng,
+ (unsigned) h->cur_total_qng, (unsigned) h->in_critical_cnt);
+
+ printk(
+" last_q_shortage %x, init_state %x, no_scam %x, pci_fix_asyn_xfer %x,\n",
+ (unsigned) h->last_q_shortage, (unsigned) h->init_state,
+ (unsigned) h->no_scam, (unsigned) h->pci_fix_asyn_xfer);
+
+ printk(
+" cfg %x, saved_ptr2func %x\n",
+ (unsigned) h->cfg, (unsigned) h->saved_ptr2func);
+}
+
+/*
+ * asc_prt_asc_dvc_cfg()
+ */
+STATIC void
+asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h)
+{
+ printk("ASC_DVC_CFG at addr %x\n", (unsigned) h);
+
+ printk(
+" can_tagged_qng %x, cmd_qng_enabled %x, disc_enable %x, sdtr_enable %x,\n",
+ h->can_tagged_qng, h->cmd_qng_enabled, h->disc_enable,
+ h->sdtr_enable);
+
+ printk(
+" chip_scsi_id %d, isa_dma_speed %d, isa_dma_channel %d, chip_version %d,\n",
+ h->chip_scsi_id, h->isa_dma_speed, h->isa_dma_channel,
+ h->chip_version);
+
+ printk(
+" pci_device_id %d, lib_serial_no %x, lib_version %x, mcode_date %x,\n",
+ h->pci_device_id, h->lib_serial_no, h->lib_version, h->mcode_date);
+
+ printk(
+" mcode_version %d, overrun_buf %x\n",
+ h->mcode_version, (unsigned) h->overrun_buf);
+}
+
+/*
+ * asc_prt_asc_scsi_q()
+ */
+STATIC void
+asc_prt_asc_scsi_q(ASC_SCSI_Q *q)
+{
+ ASC_SG_HEAD *sgp;
+ int i;
+
+ printk("ASC_SCSI_Q at addr %x\n", (unsigned) q);
+
+ printk(
+" target_ix %u, target_lun %u, srb_ptr %x, tag_code %u,\n",
+ q->q2.target_ix, q->q1.target_lun,
+ (unsigned) q->q2.srb_ptr, q->q2.tag_code);
+
+ printk(
+" data_addr %x, data_cnt %lu, sense_addr %x, sense_len %u,\n",
+ (unsigned) q->q1.data_addr, q->q1.data_cnt,
+ (unsigned) q->q1.sense_addr, q->q1.sense_len);
+
+ printk(
+" cdbptr %x, cdb_len %u, sg_head %x, sg_queue_cnt %u\n",
+ (unsigned) q->cdbptr, q->q2.cdb_len,
+ (unsigned) q->sg_head, q->q1.sg_queue_cnt);
+
+ if (q->sg_head) {
+ sgp = q->sg_head;
+ printk("ASC_SG_HEAD at addr %x\n", (unsigned) sgp);
+ printk(" entry_cnt %u, queue_cnt %u\n", sgp->entry_cnt, sgp->queue_cnt);
+ for (i = 0; i < sgp->entry_cnt; i++) {
+ printk(" [%u]: addr %x, bytes %lu\n",
+ i, (unsigned) sgp->sg_list[i].addr, sgp->sg_list[i].bytes);
+ }
+
+ }
+}
+
+/*
+ * asc_prt_asc_qdone_info()
+ */
+STATIC void
+asc_prt_asc_qdone_info(ASC_QDONE_INFO *q)
+{
+ printk("ASC_QDONE_INFO at addr %x\n", (unsigned) q);
+ printk(
+" srb_ptr %x, target_ix %u, cdb_len %u, tag_code %u, done_stat %x\n",
+ (unsigned) q->d2.srb_ptr, q->d2.target_ix, q->d2.cdb_len,
+ q->d2.tag_code, q->d3.done_stat);
+ printk(
+" host_stat %x, scsi_stat %x, scsi_msg %x\n",
+ q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg);
+}
+
+/*
+ * asc_prt_adv_dvc_var()
+ *
+ * Display an ADV_DVC_VAR structure.
+ */
+STATIC void
+asc_prt_adv_dvc_var(ADV_DVC_VAR *h)
+{
+ printk(" ADV_DVC_VAR at addr 0x%lx\n", (ulong) h);
+
+ printk(
+" iop_base 0x%lx, err_code 0x%x, ultra_able 0x%x\n",
+ (ulong) h->iop_base, h->err_code, (unsigned) h->ultra_able);
+
+ printk(
+" isr_callback 0x%x, sdtr_able 0x%x, wdtr_able 0x%x\n",
+ (unsigned) h->isr_callback, (unsigned) h->wdtr_able,
+ (unsigned) h->sdtr_able);
+
+ printk(
+" start_motor 0x%x, scsi_reset_wait 0x%x, irq_no 0x%x,\n",
+ (unsigned) h->start_motor,
+ (unsigned) h->scsi_reset_wait, (unsigned) h->irq_no);
+
+ printk(
+" max_host_qng 0x%x, cur_host_qng 0x%x, max_dvc_qng 0x%x\n",
+ (unsigned) h->max_host_qng, (unsigned) h->cur_host_qng,
+ (unsigned) h->max_dvc_qng);
+
+ printk(
+" no_scam 0x%x, tagqng_able 0x%x, chip_scsi_id 0x%x, cfg 0x%lx\n",
+ (unsigned) h->no_scam, (unsigned) h->tagqng_able,
+ (unsigned) h->chip_scsi_id, (ulong) h->cfg);
+
+}
+
+/*
+ * asc_prt_adv_dvc_cfg()
+ *
+ * Display an ADV_DVC_CFG structure.
+ */
+STATIC void
+asc_prt_adv_dvc_cfg(ADV_DVC_CFG *h)
+{
+ printk(" ADV_DVC_CFG at addr 0x%lx\n", (ulong) h);
+
+ printk(
+" disc_enable 0x%x, termination 0x%x\n",
+ h->disc_enable, h->termination);
+
+ printk(
+" chip_version 0x%x, mcode_date 0x%x\n",
+ h->chip_version, h->mcode_date);
+
+ printk(
+" mcode_version 0x%x, pci_device_id 0x%x, lib_version 0x%x\n",
+ h->mcode_version, h->pci_device_id, h->lib_version);
+
+ printk(
+" control_flag 0x%x, pci_slot_info 0x%x\n",
+ h->control_flag, h->pci_slot_info);
+}
+
+/*
+ * asc_prt_adv_scsi_req_q()
+ *
+ * Display an ADV_SCSI_REQ_Q structure.
+ */
+STATIC void
+asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q)
+{
+ int i;
+ struct asc_sg_block *sg_ptr;
+
+ printk("ADV_SCSI_REQ_Q at addr %x\n", (unsigned) q);
+
+ printk(
+" target_id %u, target_lun %u, srb_ptr 0x%lx, a_flag 0x%x\n",
+ q->target_id, q->target_lun, q->srb_ptr, q->a_flag);
+
+ printk(" cntl 0x%x, data_addr 0x%lx, vdata_addr 0x%lx\n",
+ q->cntl, q->data_addr, q->vdata_addr);
+
+ printk(
+" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n",
+ q->data_cnt, q->sense_addr, q->sense_len);
+
+ printk(
+" cdb_len %u, done_status 0x%x, host_status 0x%x, scsi_status 0x%x\n",
+ q->cdb_len, q->done_status, q->host_status, q->scsi_status);
+
+ printk(
+" vsense_addr 0x%lx, scsiq_ptr 0x%lx, ux_wk_data_cnt %lu\n",
+ (ulong) q->vsense_addr, (ulong) q->scsiq_ptr,
+ (ulong) q->ux_wk_data_cnt);
+
+ printk(
+" sg_list_ptr 0x%lx, sg_real_addr 0x%lx, sg_entry_cnt %u\n",
+ (ulong) q->sg_list_ptr, (ulong) q->sg_real_addr, q->sg_entry_cnt);
+
+ printk(
+" ux_sg_ix %u, orig_sense_len %u\n",
+ q->ux_sg_ix, q->orig_sense_len);
+
+ /* Display the request's ADV_SG_BLOCK structures. */
+ for (sg_ptr = q->sg_list_ptr, i = 0; sg_ptr != NULL;
+ sg_ptr = sg_ptr->sg_ptr, i++) {
+ /*
+ * 'sg_ptr' is a physical address. Convert it to a virtual
+ * address by indexing 'i' into the virtual address array
+ * 'sg_list_ptr'.
+ *
+ * At the end of the each iteration of the loop 'sg_ptr' is
+ * converted back into a physical address by setting 'sg_ptr'
+ * to the next pointer 'sg_ptr->sg_ptr'.
+ */
+ sg_ptr = &(((ADV_SG_BLOCK *) (q->sg_list_ptr))[i]);
+ asc_prt_adv_sgblock(i, sg_ptr);
+ }
+}
+
+/*
+ * asc_prt_adv_sgblock()
+ *
+ * Display an ADV_SG_BLOCK structure.
+ */
+STATIC void
+asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b)
+{
+ int i, s;
+
+ /* Calculate starting entry number for the current block. */
+ s = sgblockno * NO_OF_SG_PER_BLOCK;
+
+ printk(" ADV_SG_BLOCK at addr 0x%lx (sgblockno %lu)\n",
+ (ulong) b, (ulong) sgblockno);
+ printk(
+" first_entry_no %lu, last_entry_no %lu, sg_ptr 0x%lx\n",
+ (ulong) b->first_entry_no, (ulong) b->last_entry_no, (ulong) b->sg_ptr);
+ ASC_ASSERT(b->first_entry_no - s >= 0);
+ ASC_ASSERT(b->last_entry_no - s >= 0);
+ ASC_ASSERT(b->last_entry_no - s <= NO_OF_SG_PER_BLOCK);
+ ASC_ASSERT(b->first_entry_no - s <= NO_OF_SG_PER_BLOCK);
+ ASC_ASSERT(b->first_entry_no - s <= NO_OF_SG_PER_BLOCK);
+ ASC_ASSERT(b->first_entry_no - s <= b->last_entry_no - s);
+ for (i = b->first_entry_no - s; i <= b->last_entry_no - s; i++) {
+ printk(" [%lu]: sg_addr 0x%lx, sg_count 0x%lx\n",
+ (ulong) i, (ulong) b->sg_list[i].sg_addr,
+ (ulong) b->sg_list[i].sg_count);
+ }
+}
+
+/*
+ * asc_prt_hex()
+ *
+ * Print hexadecimal output in 4 byte groupings 32 bytes
+ * or 8 double-words per line.
+ */
+STATIC void
+asc_prt_hex(char *f, uchar *s, int l)
+{
+ int i;
+ int j;
+ int k;
+ int m;
+
+ printk("%s: (%d bytes)\n", f, l);
+
+ for (i = 0; i < l; i += 32) {
+
+ /* Display a maximum of 8 double-words per line. */
+ if ((k = (l - i) / 4) >= 8) {
+ k = 8;
+ m = 0;
+ } else {
+ m = (l - i) % 4 ;
+ }
+
+ for (j = 0; j < k; j++) {
+ printk(" %2.2X%2.2X%2.2X%2.2X",
+ (unsigned) s[i+(j*4)], (unsigned) s[i+(j*4)+1],
+ (unsigned) s[i+(j*4)+2], (unsigned) s[i+(j*4)+3]);
+ }
+
+ switch (m) {
+ case 0:
+ default:
+ break;
+ case 1:
+ printk(" %2.2X",
+ (unsigned) s[i+(j*4)]);
+ break;
+ case 2:
+ printk(" %2.2X%2.2X",
+ (unsigned) s[i+(j*4)],
+ (unsigned) s[i+(j*4)+1]);
+ break;
+ case 3:
+ printk(" %2.2X%2.2X%2.2X",
+ (unsigned) s[i+(j*4)+1],
+ (unsigned) s[i+(j*4)+2],
+ (unsigned) s[i+(j*4)+3]);
+ break;
+ }
+
+ printk("\n");
+ }
+}
+#endif /* ADVANSYS_DEBUG */
+
+#ifdef ADVANSYS_ASSERT
+/*
+ * interrupts_enabled()
+ *
+ * Return 1 if interrupts are enabled, otherwise return 0.
+ */
+STATIC int
+interrupts_enabled(void)
+{
+ long flags;
+
+ save_flags(flags);
+ if (flags & 0x0200) {
+ return ASC_TRUE;
+ } else {
+ return ASC_FALSE;
+ }
+}
+#endif /* ADVANSYS_ASSERT */
+
+
+/*
+ * --- Asc Library Functions
+ */
+
+ASC_INITFUNC(
+STATIC ushort
+AscGetEisaChipCfg(
+ PortAddr iop_base
+)
+)
+{
+ PortAddr eisa_cfg_iop;
+
+ eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
+ (PortAddr) (ASC_EISA_CFG_IOP_MASK);
+ return (inpw(eisa_cfg_iop));
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscSetChipScsiID(
+ PortAddr iop_base,
+ uchar new_host_id
+)
+)
+{
+ ushort cfg_lsw;
+
+ if (AscGetChipScsiID(iop_base) == new_host_id) {
+ return (new_host_id);
+ }
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ cfg_lsw &= 0xF8FF;
+ cfg_lsw |= (ushort) ((new_host_id & ASC_MAX_TID) << 8);
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetChipScsiID(iop_base));
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscGetChipScsiCtrl(
+ PortAddr iop_base
+)
+)
+{
+ uchar sc;
+
+ AscSetBank(iop_base, 1);
+ sc = inp(iop_base + IOP_REG_SC);
+ AscSetBank(iop_base, 0);
+ return (sc);
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscGetChipVersion(
+ PortAddr iop_base,
+ ushort bus_type
+)
+)
+{
+ if ((bus_type & ASC_IS_EISA) != 0) {
+ PortAddr eisa_iop;
+ uchar revision;
+ eisa_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
+ (PortAddr) ASC_EISA_REV_IOP_MASK;
+ revision = inp(eisa_iop);
+ return ((uchar) ((ASC_CHIP_MIN_VER_EISA - 1) + revision));
+ }
+ return (AscGetChipVerNo(iop_base));
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscGetChipBusType(
+ PortAddr iop_base
+)
+)
+{
+ ushort chip_ver;
+
+ chip_ver = AscGetChipVerNo(iop_base);
+ if (
+ (chip_ver >= ASC_CHIP_MIN_VER_VL)
+ && (chip_ver <= ASC_CHIP_MAX_VER_VL)
+) {
+ if (
+ ((iop_base & 0x0C30) == 0x0C30)
+ || ((iop_base & 0x0C50) == 0x0C50)
+) {
+ return (ASC_IS_EISA);
+ }
+ return (ASC_IS_VL);
+ }
+ if ((chip_ver >= ASC_CHIP_MIN_VER_ISA) &&
+ (chip_ver <= ASC_CHIP_MAX_VER_ISA)) {
+ if (chip_ver >= ASC_CHIP_MIN_VER_ISA_PNP) {
+ return (ASC_IS_ISAPNP);
+ }
+ return (ASC_IS_ISA);
+ } else if ((chip_ver >= ASC_CHIP_MIN_VER_PCI) &&
+ (chip_ver <= ASC_CHIP_MAX_VER_PCI)) {
+ return (ASC_IS_PCI);
+ }
+ return (0);
+}
+
+ASC_INITFUNC(
+STATIC ulong
+AscLoadMicroCode(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort *mcode_buf,
+ ushort mcode_size
+)
+)
+{
+ ulong chksum;
+ ushort mcode_word_size;
+ ushort mcode_chksum;
+
+ mcode_word_size = (ushort) (mcode_size >> 1);
+ AscMemWordSetLram(iop_base, s_addr, 0, mcode_word_size);
+ AscMemWordCopyToLram(iop_base, s_addr, mcode_buf, mcode_word_size);
+ chksum = AscMemSumLramWord(iop_base, s_addr, mcode_word_size);
+ mcode_chksum = (ushort) AscMemSumLramWord(iop_base,
+ (ushort) ASC_CODE_SEC_BEG,
+ (ushort) ((mcode_size - s_addr - (ushort) ASC_CODE_SEC_BEG) / 2));
+ AscWriteLramWord(iop_base, ASCV_MCODE_CHKSUM_W, mcode_chksum);
+ AscWriteLramWord(iop_base, ASCV_MCODE_SIZE_W, mcode_size);
+ return (chksum);
+}
+
+ASC_INITFUNC(
+STATIC int
+AscFindSignature(
+ PortAddr iop_base
+)
+)
+{
+ ushort sig_word;
+
+ if (AscGetChipSignatureByte(iop_base) == (uchar) ASC_1000_ID1B) {
+ sig_word = AscGetChipSignatureWord(iop_base);
+ if ((sig_word == (ushort) ASC_1000_ID0W) ||
+ (sig_word == (ushort) ASC_1000_ID0W_FIX)) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+STATIC uchar _isa_pnp_inited ASC_INITDATA = 0;
+STATIC PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] ASC_INITDATA =
+{
+ 0x100, ASC_IOADR_1, 0x120, ASC_IOADR_2, 0x140, ASC_IOADR_3, ASC_IOADR_4,
+ ASC_IOADR_5, ASC_IOADR_6, ASC_IOADR_7, ASC_IOADR_8
+};
+
+ASC_INITFUNC(
+STATIC PortAddr
+AscSearchIOPortAddr(
+ PortAddr iop_beg,
+ ushort bus_type
+)
+)
+{
+ if (bus_type & ASC_IS_VL) {
+ while ((iop_beg = AscSearchIOPortAddr11(iop_beg)) != 0) {
+ if (AscGetChipVersion(iop_beg, bus_type) <= ASC_CHIP_MAX_VER_VL) {
+ return (iop_beg);
+ }
+ }
+ return (0);
+ }
+ if (bus_type & ASC_IS_ISA) {
+ if (_isa_pnp_inited == 0) {
+ AscSetISAPNPWaitForKey();
+ _isa_pnp_inited++;
+ }
+ while ((iop_beg = AscSearchIOPortAddr11(iop_beg)) != 0) {
+ if ((AscGetChipVersion(iop_beg, bus_type) & ASC_CHIP_VER_ISA_BIT) != 0) {
+ return (iop_beg);
+ }
+ }
+ return (0);
+ }
+ if (bus_type & ASC_IS_EISA) {
+ if ((iop_beg = AscSearchIOPortAddrEISA(iop_beg)) != 0) {
+ return (iop_beg);
+ }
+ return (0);
+ }
+ return (0);
+}
+
+ASC_INITFUNC(
+STATIC PortAddr
+AscSearchIOPortAddr11(
+ PortAddr s_addr
+)
+)
+{
+ int i;
+ PortAddr iop_base;
+
+ for (i = 0; i < ASC_IOADR_TABLE_MAX_IX; i++) {
+ if (_asc_def_iop_base[i] > s_addr) {
+ break;
+ }
+ }
+ for (; i < ASC_IOADR_TABLE_MAX_IX; i++) {
+ iop_base = _asc_def_iop_base[i];
+ if (check_region(iop_base, ASC_IOADR_GAP) != 0) {
+ ASC_DBG1(1,
+ "AscSearchIOPortAddr11: check_region() failed I/O port %x\n",
+ iop_base);
+ continue;
+ }
+ ASC_DBG1(1, "AscSearchIOPortAddr11: probing I/O port %x\n", iop_base);
+ if (AscFindSignature(iop_base)) {
+ return (iop_base);
+ }
+ }
+ return (0);
+}
+
+ASC_INITFUNC(
+STATIC void
+AscToggleIRQAct(
+ PortAddr iop_base
+)
+)
+{
+ AscSetChipStatus(iop_base, CIW_IRQ_ACT);
+ AscSetChipStatus(iop_base, 0);
+ return;
+}
+
+ASC_INITFUNC(
+STATIC void
+AscSetISAPNPWaitForKey(
+ void)
+)
+{
+ outp(ASC_ISA_PNP_PORT_ADDR, 0x02);
+ outp(ASC_ISA_PNP_PORT_WRITE, 0x02);
+ return;
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscGetChipIRQ(
+ PortAddr iop_base,
+ ushort bus_type
+)
+)
+{
+ ushort cfg_lsw;
+ uchar chip_irq;
+
+ if ((bus_type & ASC_IS_EISA) != 0) {
+ cfg_lsw = AscGetEisaChipCfg(iop_base);
+ chip_irq = (uchar) (((cfg_lsw >> 8) & 0x07) + 10);
+ if ((chip_irq == 13) || (chip_irq > 15)) {
+ return (0);
+ }
+ return (chip_irq);
+ }
+ if ((bus_type & ASC_IS_VL) != 0) {
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ chip_irq = (uchar) (((cfg_lsw >> 2) & 0x07));
+ if ((chip_irq == 0) ||
+ (chip_irq == 4) ||
+ (chip_irq == 7)) {
+ return (0);
+ }
+ return ((uchar) (chip_irq + (ASC_MIN_IRQ_NO - 1)));
+ }
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ chip_irq = (uchar) (((cfg_lsw >> 2) & 0x03));
+ if (chip_irq == 3)
+ chip_irq += (uchar) 2;
+ return ((uchar) (chip_irq + ASC_MIN_IRQ_NO));
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscSetChipIRQ(
+ PortAddr iop_base,
+ uchar irq_no,
+ ushort bus_type
+)
+)
+{
+ ushort cfg_lsw;
+
+ if ((bus_type & ASC_IS_VL) != 0) {
+ if (irq_no != 0) {
+ if ((irq_no < ASC_MIN_IRQ_NO) || (irq_no > ASC_MAX_IRQ_NO)) {
+ irq_no = 0;
+ } else {
+ irq_no -= (uchar) ((ASC_MIN_IRQ_NO - 1));
+ }
+ }
+ cfg_lsw = (ushort) (AscGetChipCfgLsw(iop_base) & 0xFFE3);
+ cfg_lsw |= (ushort) 0x0010;
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ AscToggleIRQAct(iop_base);
+ cfg_lsw = (ushort) (AscGetChipCfgLsw(iop_base) & 0xFFE0);
+ cfg_lsw |= (ushort) ((irq_no & 0x07) << 2);
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ AscToggleIRQAct(iop_base);
+ return (AscGetChipIRQ(iop_base, bus_type));
+ }
+ if ((bus_type & (ASC_IS_ISA)) != 0) {
+ if (irq_no == 15)
+ irq_no -= (uchar) 2;
+ irq_no -= (uchar) ASC_MIN_IRQ_NO;
+ cfg_lsw = (ushort) (AscGetChipCfgLsw(iop_base) & 0xFFF3);
+ cfg_lsw |= (ushort) ((irq_no & 0x03) << 2);
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetChipIRQ(iop_base, bus_type));
+ }
+ return (0);
+}
+
+ASC_INITFUNC(
+STATIC void
+AscEnableIsaDma(
+ uchar dma_channel
+)
+)
+{
+ if (dma_channel < 4) {
+ outp(0x000B, (ushort) (0xC0 | dma_channel));
+ outp(0x000A, dma_channel);
+ } else if (dma_channel < 8) {
+ outp(0x00D6, (ushort) (0xC0 | (dma_channel - 4)));
+ outp(0x00D4, (ushort) (dma_channel - 4));
+ }
+ return;
+}
+
+STATIC int
+AscIsrChipHalted(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ EXT_MSG ext_msg;
+ EXT_MSG out_msg;
+ ushort halt_q_addr;
+ int sdtr_accept;
+ ushort int_halt_code;
+ ASC_SCSI_BIT_ID_TYPE scsi_busy;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ PortAddr iop_base;
+ uchar tag_code;
+ uchar q_status;
+ uchar halt_qp;
+ uchar sdtr_data;
+ uchar target_ix;
+ uchar q_cntl, tid_no;
+ uchar cur_dvc_qng;
+ uchar asyn_sdtr;
+ uchar scsi_status;
+ asc_board_t *boardp;
+
+ ASC_ASSERT(asc_dvc->drv_ptr != 0);
+ boardp = (asc_board_t *) asc_dvc->drv_ptr;
+
+ iop_base = asc_dvc->iop_base;
+ int_halt_code = AscReadLramWord(iop_base, ASCV_HALTCODE_W);
+
+ halt_qp = AscReadLramByte(iop_base, ASCV_CURCDB_B);
+ halt_q_addr = ASC_QNO_TO_QADDR(halt_qp);
+ target_ix = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_TARGET_IX));
+ q_cntl = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL));
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ target_id = (uchar) ASC_TID_TO_TARGET_ID(tid_no);
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+
+ asyn_sdtr = ASYN_SDTR_DATA_FIX_PCI_REV_AB;
+ } else {
+ asyn_sdtr = 0;
+ }
+ if (int_halt_code == ASC_HALT_DISABLE_ASYN_USE_SYN_FIX) {
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+ AscSetChipSDTR(iop_base, 0, tid_no);
+ boardp->sdtr_data[tid_no] = 0;
+ }
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) {
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ boardp->sdtr_data[tid_no] = asyn_sdtr;
+ }
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_EXTMSG_IN) {
+
+ AscMemWordCopyFromLram(iop_base,
+ ASCV_MSGIN_BEG,
+ (ushort *) & ext_msg,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+
+ if (ext_msg.msg_type == MS_EXTEND &&
+ ext_msg.msg_req == MS_SDTR_CODE &&
+ ext_msg.msg_len == MS_SDTR_LEN) {
+ sdtr_accept = TRUE;
+ if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) {
+
+ sdtr_accept = FALSE;
+ ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET;
+ }
+ if ((ext_msg.xfer_period <
+ asc_dvc->sdtr_period_tbl[asc_dvc->host_init_sdtr_index]) ||
+ (ext_msg.xfer_period >
+ asc_dvc->sdtr_period_tbl[asc_dvc->max_sdtr_index])) {
+ sdtr_accept = FALSE;
+ ext_msg.xfer_period =
+ asc_dvc->sdtr_period_tbl[asc_dvc->host_init_sdtr_index];
+ }
+ if (sdtr_accept) {
+ sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
+ ext_msg.req_ack_offset);
+ if ((sdtr_data == 0xFF)) {
+
+ q_cntl |= QC_MSG_OUT;
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ boardp->sdtr_data[tid_no] = asyn_sdtr;
+ }
+ }
+ if (ext_msg.req_ack_offset == 0) {
+
+ q_cntl &= ~QC_MSG_OUT;
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ } else {
+ if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
+
+ q_cntl &= ~QC_MSG_OUT;
+ asc_dvc->sdtr_done |= target_id;
+ asc_dvc->init_sdtr |= target_id;
+ asc_dvc->pci_fix_asyn_xfer &= ~target_id;
+ sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
+ ext_msg.req_ack_offset);
+ AscSetChipSDTR(iop_base, sdtr_data, tid_no);
+ boardp->sdtr_data[tid_no] = sdtr_data;
+ } else {
+
+ q_cntl |= QC_MSG_OUT;
+ AscMsgOutSDTR(asc_dvc,
+ ext_msg.xfer_period,
+ ext_msg.req_ack_offset);
+ asc_dvc->pci_fix_asyn_xfer &= ~target_id;
+ sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
+ ext_msg.req_ack_offset);
+ AscSetChipSDTR(iop_base, sdtr_data, tid_no);
+ boardp->sdtr_data[tid_no] = sdtr_data;
+ asc_dvc->sdtr_done |= target_id;
+ asc_dvc->init_sdtr |= target_id;
+ }
+ }
+
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (ext_msg.msg_type == MS_EXTEND &&
+ ext_msg.msg_req == MS_WDTR_CODE &&
+ ext_msg.msg_len == MS_WDTR_LEN) {
+
+ ext_msg.wdtr_width = 0;
+ AscMemWordCopyToLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort *) & ext_msg,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+ q_cntl |= QC_MSG_OUT;
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else {
+
+ ext_msg.msg_type = M1_MSG_REJECT;
+ AscMemWordCopyToLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort *) & ext_msg,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+ q_cntl |= QC_MSG_OUT;
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ }
+ } else if (int_halt_code == ASC_HALT_CHK_CONDITION) {
+
+ q_cntl |= QC_REQ_SENSE;
+
+ if ((asc_dvc->init_sdtr & target_id) != 0) {
+
+ asc_dvc->sdtr_done &= ~target_id;
+
+ sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no);
+ q_cntl |= QC_MSG_OUT;
+ AscMsgOutSDTR(asc_dvc,
+ asc_dvc->sdtr_period_tbl[(sdtr_data >> 4) &
+ (uchar) (asc_dvc->max_sdtr_index - 1)],
+ (uchar) (sdtr_data & (uchar) ASC_SYN_MAX_OFFSET));
+ }
+
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+
+ tag_code = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE));
+ tag_code &= 0xDC;
+ if (
+ (asc_dvc->pci_fix_asyn_xfer & target_id)
+ && !(asc_dvc->pci_fix_asyn_xfer_always & target_id)
+) {
+
+ tag_code |= (ASC_TAG_FLAG_DISABLE_DISCONNECT
+ | ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
+
+ }
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE),
+ tag_code);
+
+ q_status = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_STATUS));
+ q_status |= (QS_READY | QS_BUSY);
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ q_status);
+
+ scsi_busy = AscReadLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B);
+ scsi_busy &= ~target_id;
+ AscWriteLramByte(iop_base, (ushort) ASCV_SCSIBUSY_B, scsi_busy);
+
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) {
+
+ AscMemWordCopyFromLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort *) & out_msg,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+
+ if ((out_msg.msg_type == MS_EXTEND) &&
+ (out_msg.msg_len == MS_SDTR_LEN) &&
+ (out_msg.msg_req == MS_SDTR_CODE)) {
+
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ boardp->sdtr_data[tid_no] = asyn_sdtr;
+ }
+ q_cntl &= ~QC_MSG_OUT;
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) {
+
+ scsi_status = AscReadLramByte(iop_base,
+ (ushort) ((ushort) halt_q_addr + (ushort) ASC_SCSIQ_SCSI_STATUS));
+ cur_dvc_qng = AscReadLramByte(iop_base,
+ (ushort) ((ushort) ASC_QADR_BEG + (ushort) target_ix));
+ if ((cur_dvc_qng > 0) &&
+ (asc_dvc->cur_dvc_qng[tid_no] > 0)) {
+
+ scsi_busy = AscReadLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B);
+ scsi_busy |= target_id;
+ AscWriteLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B, scsi_busy);
+ asc_dvc->queue_full_or_busy |= target_id;
+
+ if (scsi_status == SS_QUEUE_FULL) {
+ if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) {
+ cur_dvc_qng -= 1;
+ asc_dvc->max_dvc_qng[tid_no] = cur_dvc_qng;
+
+ AscWriteLramByte(iop_base,
+ (ushort) ((ushort) ASCV_MAX_DVC_QNG_BEG +
+ (ushort) tid_no),
+ cur_dvc_qng);
+
+ /*
+ * Set the device queue depth to the number of
+ * active requests when the QUEUE FULL condition
+ * was encountered.
+ */
+ boardp->queue_full |= target_id;
+ boardp->queue_full_cnt[tid_no] = cur_dvc_qng;
+#if ASC_QUEUE_FLOW_CONTROL
+ if (boardp->device[tid_no] != NULL &&
+ boardp->device[tid_no]->queue_curr_depth >
+ cur_dvc_qng) {
+ boardp->device[tid_no]->queue_curr_depth =
+ cur_dvc_qng;
+ }
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ }
+ }
+ }
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ }
+ return (0);
+}
+
+STATIC uchar
+_AscCopyLramScsiDoneQ(
+ PortAddr iop_base,
+ ushort q_addr,
+ REG ASC_QDONE_INFO * scsiq,
+ ulong max_dma_count
+)
+{
+ ushort _val;
+ uchar sg_queue_cnt;
+
+ DvcGetQinfo(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_DONE_INFO_BEG),
+ (ushort *) scsiq,
+ (ushort) ((sizeof (ASC_SCSIQ_2) + sizeof (ASC_SCSIQ_3)) / 2));
+ _val = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS));
+ scsiq->q_status = (uchar) _val;
+ scsiq->q_no = (uchar) (_val >> 8);
+ _val = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_CNTL));
+ scsiq->cntl = (uchar) _val;
+ sg_queue_cnt = (uchar) (_val >> 8);
+ _val = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_SENSE_LEN));
+ scsiq->sense_len = (uchar) _val;
+ scsiq->extra_bytes = (uchar) (_val >> 8);
+ scsiq->remain_bytes = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_DW_REMAIN_XFER_CNT));
+ scsiq->remain_bytes &= max_dma_count;
+ return (sg_queue_cnt);
+}
+
+STATIC int
+AscIsrQDone(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ uchar next_qp;
+ uchar n_q_used;
+ uchar sg_list_qp;
+ uchar sg_queue_cnt;
+ uchar q_cnt;
+ uchar done_q_tail;
+ uchar tid_no;
+ ASC_SCSI_BIT_ID_TYPE scsi_busy;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ PortAddr iop_base;
+ ushort q_addr;
+ ushort sg_q_addr;
+ uchar cur_target_qng;
+ ASC_QDONE_INFO scsiq_buf;
+ REG ASC_QDONE_INFO *scsiq;
+ int false_overrun;
+ ASC_ISR_CALLBACK asc_isr_callback;
+
+ iop_base = asc_dvc->iop_base;
+ asc_isr_callback = (ASC_ISR_CALLBACK) asc_dvc->isr_callback;
+ n_q_used = 1;
+ scsiq = (ASC_QDONE_INFO *) & scsiq_buf;
+ done_q_tail = (uchar) AscGetVarDoneQTail(iop_base);
+ q_addr = ASC_QNO_TO_QADDR(done_q_tail);
+ next_qp = AscReadLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_FWD));
+ if (next_qp != ASC_QLINK_END) {
+ AscPutVarDoneQTail(iop_base, next_qp);
+ q_addr = ASC_QNO_TO_QADDR(next_qp);
+ sg_queue_cnt = _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq,
+ asc_dvc->max_dma_count);
+ AscWriteLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ (uchar) (scsiq->q_status & (uchar) ~ (QS_READY | QS_ABORTED)));
+ tid_no = ASC_TIX_TO_TID(scsiq->d2.target_ix);
+ target_id = ASC_TIX_TO_TARGET_ID(scsiq->d2.target_ix);
+ if ((scsiq->cntl & QC_SG_HEAD) != 0) {
+ sg_q_addr = q_addr;
+ sg_list_qp = next_qp;
+ for (q_cnt = 0; q_cnt < sg_queue_cnt; q_cnt++) {
+ sg_list_qp = AscReadLramByte(iop_base,
+ (ushort) (sg_q_addr + (ushort) ASC_SCSIQ_B_FWD));
+ sg_q_addr = ASC_QNO_TO_QADDR(sg_list_qp);
+ if (sg_list_qp == ASC_QLINK_END) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SG_Q_LINKS);
+ scsiq->d3.done_stat = QD_WITH_ERROR;
+ scsiq->d3.host_stat = QHSTA_D_QDONE_SG_LIST_CORRUPTED;
+ goto FATAL_ERR_QDONE;
+ }
+ AscWriteLramByte(iop_base,
+ (ushort) (sg_q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ QS_FREE);
+ }
+ n_q_used = sg_queue_cnt + 1;
+ AscPutVarDoneQTail(iop_base, sg_list_qp);
+ }
+ if (asc_dvc->queue_full_or_busy & target_id) {
+ cur_target_qng = AscReadLramByte(iop_base,
+ (ushort) ((ushort) ASC_QADR_BEG + (ushort) scsiq->d2.target_ix));
+ if (cur_target_qng < asc_dvc->max_dvc_qng[tid_no]) {
+ scsi_busy = AscReadLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B);
+ scsi_busy &= ~target_id;
+ AscWriteLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B, scsi_busy);
+ asc_dvc->queue_full_or_busy &= ~target_id;
+ }
+ }
+ if (asc_dvc->cur_total_qng >= n_q_used) {
+ asc_dvc->cur_total_qng -= n_q_used;
+ if (asc_dvc->cur_dvc_qng[tid_no] != 0) {
+ asc_dvc->cur_dvc_qng[tid_no]--;
+ }
+ } else {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CUR_QNG);
+ scsiq->d3.done_stat = QD_WITH_ERROR;
+ goto FATAL_ERR_QDONE;
+ }
+ if ((scsiq->d2.srb_ptr == 0UL) ||
+ ((scsiq->q_status & QS_ABORTED) != 0)) {
+ return (0x11);
+ } else if (scsiq->q_status == QS_DONE) {
+ false_overrun = FALSE;
+ if (scsiq->extra_bytes != 0) {
+ scsiq->remain_bytes += (ulong) scsiq->extra_bytes;
+ }
+ if (scsiq->d3.done_stat == QD_WITH_ERROR) {
+ if (scsiq->d3.host_stat == QHSTA_M_DATA_OVER_RUN) {
+ if ((scsiq->cntl & (QC_DATA_IN | QC_DATA_OUT)) == 0) {
+ scsiq->d3.done_stat = QD_NO_ERROR;
+ scsiq->d3.host_stat = QHSTA_NO_ERROR;
+ } else if (false_overrun) {
+ scsiq->d3.done_stat = QD_NO_ERROR;
+ scsiq->d3.host_stat = QHSTA_NO_ERROR;
+ }
+ } else if (scsiq->d3.host_stat ==
+ QHSTA_M_HUNG_REQ_SCSI_BUS_RESET) {
+ AscStopChip(iop_base);
+ AscSetChipControl(iop_base,
+ (uchar) (CC_SCSI_RESET | CC_HALT));
+ DvcDelayNanoSecond(asc_dvc, 60000);
+ AscSetChipControl(iop_base, CC_HALT);
+ AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT);
+ AscSetChipStatus(iop_base, 0);
+ AscSetChipControl(iop_base, 0);
+ }
+ }
+ if ((scsiq->cntl & QC_NO_CALLBACK) == 0) {
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ } else {
+ if ((AscReadLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CDB_BEG)) ==
+ SCSICMD_StartStopUnit)) {
+ asc_dvc->unit_not_ready &= ~target_id;
+ if (scsiq->d3.done_stat != QD_NO_ERROR) {
+ asc_dvc->start_motor &= ~target_id;
+ }
+ }
+ }
+ return (1);
+ } else {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_Q_STATUS);
+ FATAL_ERR_QDONE:
+ if ((scsiq->cntl & QC_NO_CALLBACK) == 0) {
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ }
+ return (0x80);
+ }
+ }
+ return (0);
+}
+
+STATIC int
+AscISR(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ ASC_CS_TYPE chipstat;
+ PortAddr iop_base;
+ ushort saved_ram_addr;
+ uchar ctrl_reg;
+ uchar saved_ctrl_reg;
+ int int_pending;
+ int status;
+ uchar host_flag;
+
+ iop_base = asc_dvc->iop_base;
+ int_pending = FALSE;
+ if (((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0)
+ || (asc_dvc->isr_callback == 0)
+) {
+ return (ERR);
+ }
+ if (asc_dvc->in_critical_cnt != 0) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL);
+ return (ERR);
+ }
+ if (asc_dvc->is_in_int) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY);
+ return (ERR);
+ }
+ asc_dvc->is_in_int = TRUE;
+ ctrl_reg = AscGetChipControl(iop_base);
+ saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET |
+ CC_SINGLE_STEP | CC_DIAG | CC_TEST));
+ chipstat = AscGetChipStatus(iop_base);
+ if (chipstat & CSW_SCSI_RESET_LATCH) {
+ if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) {
+ int_pending = TRUE;
+ asc_dvc->sdtr_done = 0;
+ saved_ctrl_reg &= (uchar) (~CC_HALT);
+ while (AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) ;
+ AscSetChipControl(iop_base, (CC_CHIP_RESET | CC_HALT));
+ AscSetChipControl(iop_base, CC_HALT);
+ AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT);
+ AscSetChipStatus(iop_base, 0);
+ chipstat = AscGetChipStatus(iop_base);
+ }
+ }
+ saved_ram_addr = AscGetChipLramAddr(iop_base);
+ host_flag = AscReadLramByte(iop_base,
+ ASCV_HOST_FLAG_B) & (uchar) (~ASC_HOST_FLAG_IN_ISR);
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B,
+ (uchar) (host_flag | (uchar) ASC_HOST_FLAG_IN_ISR));
+ if ((chipstat & CSW_INT_PENDING)
+ || (int_pending)
+) {
+ AscAckInterrupt(iop_base);
+ int_pending = TRUE;
+ if ((chipstat & CSW_HALTED) &&
+ (ctrl_reg & CC_SINGLE_STEP)) {
+ if (AscIsrChipHalted(asc_dvc) == ERR) {
+ goto ISR_REPORT_QDONE_FATAL_ERROR;
+ } else {
+ saved_ctrl_reg &= (uchar) (~CC_HALT);
+ }
+ } else {
+ ISR_REPORT_QDONE_FATAL_ERROR:
+ if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) {
+ while (((status = AscIsrQDone(asc_dvc)) & 0x01) != 0) {
+ }
+ } else {
+ do {
+ if ((status = AscIsrQDone(asc_dvc)) == 1) {
+ break;
+ }
+ } while (status == 0x11);
+ }
+ if ((status & 0x80) != 0)
+ int_pending = ERR;
+ }
+ }
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag);
+ AscSetChipLramAddr(iop_base, saved_ram_addr);
+ AscSetChipControl(iop_base, saved_ctrl_reg);
+ asc_dvc->is_in_int = FALSE;
+ return (int_pending);
+}
+
+STATIC uchar _asc_mcode_buf[] ASC_INITDATA =
+{
+ 0x01, 0x03, 0x01, 0x19, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x91, 0x10, 0x0A, 0x05, 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xFF, 0x80, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x23, 0x00, 0x24, 0x00, 0x00, 0x00, 0x07, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x88, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0x73, 0x48, 0x04, 0x36, 0x00, 0x00, 0xA2, 0xC2, 0x00, 0x80, 0x73, 0x03, 0x23, 0x36, 0x40,
+ 0xB6, 0x00, 0x36, 0x00, 0x05, 0xD6, 0x0C, 0xD2, 0x12, 0xDA, 0x00, 0xA2, 0xC2, 0x00, 0x92, 0x80,
+ 0x1E, 0x98, 0x50, 0x00, 0xF5, 0x00, 0x48, 0x98, 0xDF, 0x23, 0x36, 0x60, 0xB6, 0x00, 0x92, 0x80,
+ 0x4F, 0x00, 0xF5, 0x00, 0x48, 0x98, 0xEF, 0x23, 0x36, 0x60, 0xB6, 0x00, 0x92, 0x80, 0x80, 0x62,
+ 0x92, 0x80, 0x00, 0x46, 0x17, 0xEE, 0x13, 0xEA, 0x02, 0x01, 0x09, 0xD8, 0xCD, 0x04, 0x4D, 0x00,
+ 0x00, 0xA3, 0xD6, 0x00, 0xA6, 0x97, 0x7F, 0x23, 0x04, 0x61, 0x84, 0x01, 0xE6, 0x84, 0xD2, 0xC1,
+ 0x80, 0x73, 0xCD, 0x04, 0x4D, 0x00, 0x00, 0xA3, 0xE2, 0x01, 0xA6, 0x97, 0xCE, 0x81, 0x00, 0x33,
+ 0x02, 0x00, 0xC0, 0x88, 0x80, 0x73, 0x80, 0x77, 0x00, 0x01, 0x01, 0xA1, 0x02, 0x01, 0x4F, 0x00,
+ 0x84, 0x97, 0x07, 0xA6, 0x0C, 0x01, 0x00, 0x33, 0x03, 0x00, 0xC0, 0x88, 0x03, 0x03, 0x03, 0xDE,
+ 0x00, 0x33, 0x05, 0x00, 0xC0, 0x88, 0xCE, 0x00, 0x69, 0x60, 0xCE, 0x00, 0x02, 0x03, 0x4A, 0x60,
+ 0x00, 0xA2, 0x80, 0x01, 0x80, 0x63, 0x07, 0xA6, 0x2C, 0x01, 0x80, 0x81, 0x03, 0x03, 0x80, 0x63,
+ 0xE2, 0x00, 0x07, 0xA6, 0x3C, 0x01, 0x00, 0x33, 0x04, 0x00, 0xC0, 0x88, 0x03, 0x07, 0x02, 0x01,
+ 0x04, 0xCA, 0x0D, 0x23, 0x68, 0x98, 0x4D, 0x04, 0x04, 0x85, 0x05, 0xD8, 0x0D, 0x23, 0x68, 0x98,
+ 0xCD, 0x04, 0x15, 0x23, 0xF6, 0x88, 0xFB, 0x23, 0x02, 0x61, 0x82, 0x01, 0x80, 0x63, 0x02, 0x03,
+ 0x06, 0xA3, 0x6A, 0x01, 0x00, 0x33, 0x0A, 0x00, 0xC0, 0x88, 0x4E, 0x00, 0x07, 0xA3, 0x76, 0x01,
+ 0x00, 0x33, 0x0B, 0x00, 0xC0, 0x88, 0xCD, 0x04, 0x36, 0x2D, 0x00, 0x33, 0x1A, 0x00, 0xC0, 0x88,
+ 0x50, 0x04, 0x90, 0x81, 0x06, 0xAB, 0x8A, 0x01, 0x90, 0x81, 0x4E, 0x00, 0x07, 0xA3, 0x9A, 0x01,
+ 0x50, 0x00, 0x00, 0xA3, 0x44, 0x01, 0x00, 0x05, 0x84, 0x81, 0x46, 0x97, 0x02, 0x01, 0x05, 0xC6,
+ 0x04, 0x23, 0xA0, 0x01, 0x15, 0x23, 0xA1, 0x01, 0xC6, 0x81, 0xFD, 0x23, 0x02, 0x61, 0x82, 0x01,
+ 0x0A, 0xDA, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA0, 0xBC, 0x01, 0x80, 0x63, 0xCD, 0x04, 0x36, 0x2D,
+ 0x00, 0x33, 0x1B, 0x00, 0xC0, 0x88, 0x06, 0x23, 0x68, 0x98, 0xCD, 0x04, 0xE6, 0x84, 0x06, 0x01,
+ 0x00, 0xA2, 0xDC, 0x01, 0x57, 0x60, 0x00, 0xA0, 0xE2, 0x01, 0xE6, 0x84, 0x80, 0x23, 0xA0, 0x01,
+ 0xE6, 0x84, 0x80, 0x73, 0x4B, 0x00, 0x06, 0x61, 0x00, 0xA2, 0x08, 0x02, 0x04, 0x01, 0x0C, 0xDE,
+ 0x02, 0x01, 0x03, 0xCC, 0x4F, 0x00, 0x84, 0x97, 0x04, 0x82, 0x08, 0x23, 0x02, 0x41, 0x82, 0x01,
+ 0x4F, 0x00, 0x62, 0x97, 0x48, 0x04, 0x84, 0x80, 0xF0, 0x97, 0x00, 0x46, 0x56, 0x00, 0x03, 0xC0,
+ 0x01, 0x23, 0xE8, 0x00, 0x81, 0x73, 0x06, 0x29, 0x03, 0x42, 0x06, 0xE2, 0x03, 0xEE, 0x67, 0xEB,
+ 0x11, 0x23, 0xF6, 0x88, 0x04, 0x98, 0xF4, 0x80, 0x80, 0x73, 0x80, 0x77, 0x07, 0xA4, 0x32, 0x02,
+ 0x7C, 0x95, 0x06, 0xA6, 0x3C, 0x02, 0x03, 0xA6, 0x4C, 0x04, 0xC0, 0x88, 0x04, 0x01, 0x03, 0xD8,
+ 0xB2, 0x98, 0x6A, 0x96, 0x4E, 0x82, 0xFE, 0x95, 0x80, 0x67, 0x83, 0x03, 0x80, 0x63, 0xB6, 0x2D,
+ 0x02, 0xA6, 0x78, 0x02, 0x07, 0xA6, 0x66, 0x02, 0x06, 0xA6, 0x6A, 0x02, 0x03, 0xA6, 0x6E, 0x02,
+ 0x00, 0x33, 0x10, 0x00, 0xC0, 0x88, 0x7C, 0x95, 0x50, 0x82, 0x60, 0x96, 0x50, 0x82, 0x04, 0x23,
+ 0xA0, 0x01, 0x14, 0x23, 0xA1, 0x01, 0x3C, 0x84, 0x04, 0x01, 0x0C, 0xDC, 0xE0, 0x23, 0x25, 0x61,
+ 0xEF, 0x00, 0x14, 0x01, 0x4F, 0x04, 0xA8, 0x01, 0x6F, 0x00, 0xA5, 0x01, 0x03, 0x23, 0xA4, 0x01,
+ 0x06, 0x23, 0x9C, 0x01, 0x24, 0x2B, 0x1C, 0x01, 0x02, 0xA6, 0xB6, 0x02, 0x07, 0xA6, 0x66, 0x02,
+ 0x06, 0xA6, 0x6A, 0x02, 0x03, 0xA6, 0x20, 0x04, 0x01, 0xA6, 0xC0, 0x02, 0x00, 0xA6, 0xC0, 0x02,
+ 0x00, 0x33, 0x12, 0x00, 0xC0, 0x88, 0x00, 0x0E, 0x80, 0x63, 0x00, 0x43, 0x00, 0xA0, 0x98, 0x02,
+ 0x4D, 0x04, 0x04, 0x01, 0x0B, 0xDC, 0xE7, 0x23, 0x04, 0x61, 0x84, 0x01, 0x10, 0x31, 0x12, 0x35,
+ 0x14, 0x01, 0xEC, 0x00, 0x6C, 0x38, 0x00, 0x3F, 0x00, 0x00, 0xF6, 0x82, 0x18, 0x23, 0x04, 0x61,
+ 0x18, 0xA0, 0xEE, 0x02, 0x04, 0x01, 0x9C, 0xC8, 0x00, 0x33, 0x1F, 0x00, 0xC0, 0x88, 0x08, 0x31,
+ 0x0A, 0x35, 0x0C, 0x39, 0x0E, 0x3D, 0x7E, 0x98, 0xB6, 0x2D, 0x01, 0xA6, 0x20, 0x03, 0x00, 0xA6,
+ 0x20, 0x03, 0x07, 0xA6, 0x18, 0x03, 0x06, 0xA6, 0x1C, 0x03, 0x03, 0xA6, 0x20, 0x04, 0x02, 0xA6,
+ 0x78, 0x02, 0x00, 0x33, 0x33, 0x00, 0xC0, 0x88, 0x7C, 0x95, 0xFA, 0x82, 0x60, 0x96, 0xFA, 0x82,
+ 0x82, 0x98, 0x80, 0x42, 0x7E, 0x98, 0x60, 0xE4, 0x04, 0x01, 0x29, 0xC8, 0x31, 0x05, 0x07, 0x01,
+ 0x00, 0xA2, 0x60, 0x03, 0x00, 0x43, 0x87, 0x01, 0x05, 0x05, 0x86, 0x98, 0x7E, 0x98, 0x00, 0xA6,
+ 0x22, 0x03, 0x07, 0xA6, 0x58, 0x03, 0x03, 0xA6, 0x3C, 0x04, 0x06, 0xA6, 0x5C, 0x03, 0x01, 0xA6,
+ 0x22, 0x03, 0x00, 0x33, 0x25, 0x00, 0xC0, 0x88, 0x7C, 0x95, 0x3E, 0x83, 0x60, 0x96, 0x3E, 0x83,
+ 0x04, 0x01, 0x0C, 0xCE, 0x03, 0xC8, 0x00, 0x33, 0x42, 0x00, 0xC0, 0x88, 0x00, 0x01, 0x05, 0x05,
+ 0xFF, 0xA2, 0x7E, 0x03, 0xB1, 0x01, 0x08, 0x23, 0xB2, 0x01, 0x3A, 0x83, 0x05, 0x05, 0x15, 0x01,
+ 0x00, 0xA2, 0x9E, 0x03, 0xEC, 0x00, 0x6E, 0x00, 0x95, 0x01, 0x6C, 0x38, 0x00, 0x3F, 0x00, 0x00,
+ 0x01, 0xA6, 0x9A, 0x03, 0x00, 0xA6, 0x9A, 0x03, 0x12, 0x84, 0x80, 0x42, 0x7E, 0x98, 0x01, 0xA6,
+ 0xA8, 0x03, 0x00, 0xA6, 0xC0, 0x03, 0x12, 0x84, 0xA6, 0x98, 0x80, 0x42, 0x01, 0xA6, 0xA8, 0x03,
+ 0x07, 0xA6, 0xB6, 0x03, 0xD8, 0x83, 0x7C, 0x95, 0xAC, 0x83, 0x00, 0x33, 0x2F, 0x00, 0xC0, 0x88,
+ 0xA6, 0x98, 0x80, 0x42, 0x00, 0xA6, 0xC0, 0x03, 0x07, 0xA6, 0xCE, 0x03, 0xD8, 0x83, 0x7C, 0x95,
+ 0xC4, 0x83, 0x00, 0x33, 0x26, 0x00, 0xC0, 0x88, 0x38, 0x2B, 0x80, 0x32, 0x80, 0x36, 0x04, 0x23,
+ 0xA0, 0x01, 0x12, 0x23, 0xA1, 0x01, 0x12, 0x84, 0x06, 0xF0, 0x06, 0xA4, 0xF6, 0x03, 0x80, 0x6B,
+ 0x05, 0x23, 0x83, 0x03, 0x80, 0x63, 0x03, 0xA6, 0x10, 0x04, 0x07, 0xA6, 0x08, 0x04, 0x06, 0xA6,
+ 0x0C, 0x04, 0x00, 0x33, 0x17, 0x00, 0xC0, 0x88, 0x7C, 0x95, 0xF6, 0x83, 0x60, 0x96, 0xF6, 0x83,
+ 0x20, 0x84, 0x06, 0xF0, 0x06, 0xA4, 0x20, 0x04, 0x80, 0x6B, 0x05, 0x23, 0x83, 0x03, 0x80, 0x63,
+ 0xB6, 0x2D, 0x03, 0xA6, 0x3C, 0x04, 0x07, 0xA6, 0x34, 0x04, 0x06, 0xA6, 0x38, 0x04, 0x00, 0x33,
+ 0x30, 0x00, 0xC0, 0x88, 0x7C, 0x95, 0x20, 0x84, 0x60, 0x96, 0x20, 0x84, 0x1D, 0x01, 0x06, 0xCC,
+ 0x00, 0x33, 0x00, 0x84, 0xC0, 0x20, 0x00, 0x23, 0xEA, 0x00, 0x81, 0x62, 0xA2, 0x0D, 0x80, 0x63,
+ 0x07, 0xA6, 0x5A, 0x04, 0x00, 0x33, 0x18, 0x00, 0xC0, 0x88, 0x03, 0x03, 0x80, 0x63, 0xA3, 0x01,
+ 0x07, 0xA4, 0x64, 0x04, 0x23, 0x01, 0x00, 0xA2, 0x86, 0x04, 0x0A, 0xA0, 0x76, 0x04, 0xE0, 0x00,
+ 0x00, 0x33, 0x1D, 0x00, 0xC0, 0x88, 0x0B, 0xA0, 0x82, 0x04, 0xE0, 0x00, 0x00, 0x33, 0x1E, 0x00,
+ 0xC0, 0x88, 0x42, 0x23, 0xF6, 0x88, 0x00, 0x23, 0x22, 0xA3, 0xE6, 0x04, 0x08, 0x23, 0x22, 0xA3,
+ 0xA2, 0x04, 0x28, 0x23, 0x22, 0xA3, 0xAE, 0x04, 0x02, 0x23, 0x22, 0xA3, 0xC4, 0x04, 0x42, 0x23,
+ 0xF6, 0x88, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA0, 0xAE, 0x04, 0x45, 0x23, 0xF6, 0x88, 0x04, 0x98,
+ 0x00, 0xA2, 0xC0, 0x04, 0xB2, 0x98, 0x00, 0x33, 0x00, 0x82, 0xC0, 0x20, 0x81, 0x62, 0xF0, 0x81,
+ 0x47, 0x23, 0xF6, 0x88, 0x04, 0x01, 0x0B, 0xDE, 0x04, 0x98, 0xB2, 0x98, 0x00, 0x33, 0x00, 0x81,
+ 0xC0, 0x20, 0x81, 0x62, 0x14, 0x01, 0x00, 0xA0, 0x08, 0x02, 0x43, 0x23, 0xF6, 0x88, 0x04, 0x23,
+ 0xA0, 0x01, 0x44, 0x23, 0xA1, 0x01, 0x80, 0x73, 0x4D, 0x00, 0x03, 0xA3, 0xF4, 0x04, 0x00, 0x33,
+ 0x27, 0x00, 0xC0, 0x88, 0x04, 0x01, 0x04, 0xDC, 0x02, 0x23, 0xA2, 0x01, 0x04, 0x23, 0xA0, 0x01,
+ 0x04, 0x98, 0x26, 0x95, 0x4B, 0x00, 0xF6, 0x00, 0x4F, 0x04, 0x4F, 0x00, 0x00, 0xA3, 0x22, 0x05,
+ 0x00, 0x05, 0x76, 0x00, 0x06, 0x61, 0x00, 0xA2, 0x1C, 0x05, 0x0A, 0x85, 0x46, 0x97, 0xCD, 0x04,
+ 0x24, 0x85, 0x48, 0x04, 0x84, 0x80, 0x02, 0x01, 0x03, 0xDA, 0x80, 0x23, 0x82, 0x01, 0x34, 0x85,
+ 0x02, 0x23, 0xA0, 0x01, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA2, 0x40, 0x05, 0x1D, 0x01, 0x04, 0xD6,
+ 0xFF, 0x23, 0x86, 0x41, 0x4B, 0x60, 0xCB, 0x00, 0xFF, 0x23, 0x80, 0x01, 0x49, 0x00, 0x81, 0x01,
+ 0x04, 0x01, 0x02, 0xC8, 0x30, 0x01, 0x80, 0x01, 0xF7, 0x04, 0x03, 0x01, 0x49, 0x04, 0x80, 0x01,
+ 0xC9, 0x00, 0x00, 0x05, 0x00, 0x01, 0xFF, 0xA0, 0x60, 0x05, 0x77, 0x04, 0x01, 0x23, 0xEA, 0x00,
+ 0x5D, 0x00, 0xFE, 0xC7, 0x00, 0x62, 0x00, 0x23, 0xEA, 0x00, 0x00, 0x63, 0x07, 0xA4, 0xF8, 0x05,
+ 0x03, 0x03, 0x02, 0xA0, 0x8E, 0x05, 0xF4, 0x85, 0x00, 0x33, 0x2D, 0x00, 0xC0, 0x88, 0x04, 0xA0,
+ 0xB8, 0x05, 0x80, 0x63, 0x00, 0x23, 0xDF, 0x00, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA2, 0xA4, 0x05,
+ 0x1D, 0x01, 0x06, 0xD6, 0x02, 0x23, 0x02, 0x41, 0x82, 0x01, 0x50, 0x00, 0x62, 0x97, 0x04, 0x85,
+ 0x04, 0x23, 0x02, 0x41, 0x82, 0x01, 0x04, 0x85, 0x08, 0xA0, 0xBE, 0x05, 0xF4, 0x85, 0x03, 0xA0,
+ 0xC4, 0x05, 0xF4, 0x85, 0x01, 0xA0, 0xCE, 0x05, 0x88, 0x00, 0x80, 0x63, 0xCC, 0x86, 0x07, 0xA0,
+ 0xEE, 0x05, 0x5F, 0x00, 0x00, 0x2B, 0xDF, 0x08, 0x00, 0xA2, 0xE6, 0x05, 0x80, 0x67, 0x80, 0x63,
+ 0x01, 0xA2, 0x7A, 0x06, 0x7C, 0x85, 0x06, 0x23, 0x68, 0x98, 0x48, 0x23, 0xF6, 0x88, 0x07, 0x23,
+ 0x80, 0x00, 0x06, 0x87, 0x80, 0x63, 0x7C, 0x85, 0x00, 0x23, 0xDF, 0x00, 0x00, 0x63, 0x4A, 0x00,
+ 0x06, 0x61, 0x00, 0xA2, 0x36, 0x06, 0x1D, 0x01, 0x16, 0xD4, 0xC0, 0x23, 0x07, 0x41, 0x83, 0x03,
+ 0x80, 0x63, 0x06, 0xA6, 0x1C, 0x06, 0x00, 0x33, 0x37, 0x00, 0xC0, 0x88, 0x1D, 0x01, 0x01, 0xD6,
+ 0x20, 0x23, 0x63, 0x60, 0x83, 0x03, 0x80, 0x63, 0x02, 0x23, 0xDF, 0x00, 0x07, 0xA6, 0x7C, 0x05,
+ 0xEF, 0x04, 0x6F, 0x00, 0x00, 0x63, 0x4B, 0x00, 0x06, 0x41, 0xCB, 0x00, 0x52, 0x00, 0x06, 0x61,
+ 0x00, 0xA2, 0x4E, 0x06, 0x1D, 0x01, 0x03, 0xCA, 0xC0, 0x23, 0x07, 0x41, 0x00, 0x63, 0x1D, 0x01,
+ 0x04, 0xCC, 0x00, 0x33, 0x00, 0x83, 0xC0, 0x20, 0x81, 0x62, 0x80, 0x23, 0x07, 0x41, 0x00, 0x63,
+ 0x80, 0x67, 0x08, 0x23, 0x83, 0x03, 0x80, 0x63, 0x00, 0x63, 0x01, 0x23, 0xDF, 0x00, 0x06, 0xA6,
+ 0x84, 0x06, 0x07, 0xA6, 0x7C, 0x05, 0x80, 0x67, 0x80, 0x63, 0x00, 0x33, 0x00, 0x40, 0xC0, 0x20,
+ 0x81, 0x62, 0x00, 0x63, 0x00, 0x00, 0xFE, 0x95, 0x83, 0x03, 0x80, 0x63, 0x06, 0xA6, 0x94, 0x06,
+ 0x07, 0xA6, 0x7C, 0x05, 0x00, 0x00, 0x01, 0xA0, 0x14, 0x07, 0x00, 0x2B, 0x40, 0x0E, 0x80, 0x63,
+ 0x01, 0x00, 0x06, 0xA6, 0xAA, 0x06, 0x07, 0xA6, 0x7C, 0x05, 0x40, 0x0E, 0x80, 0x63, 0x00, 0x43,
+ 0x00, 0xA0, 0xA2, 0x06, 0x06, 0xA6, 0xBC, 0x06, 0x07, 0xA6, 0x7C, 0x05, 0x80, 0x67, 0x40, 0x0E,
+ 0x80, 0x63, 0x07, 0xA6, 0x7C, 0x05, 0x00, 0x23, 0xDF, 0x00, 0x00, 0x63, 0x07, 0xA6, 0xD6, 0x06,
+ 0x00, 0x33, 0x2A, 0x00, 0xC0, 0x88, 0x03, 0x03, 0x80, 0x63, 0x89, 0x00, 0x0A, 0x2B, 0x07, 0xA6,
+ 0xE8, 0x06, 0x00, 0x33, 0x29, 0x00, 0xC0, 0x88, 0x00, 0x43, 0x00, 0xA2, 0xF4, 0x06, 0xC0, 0x0E,
+ 0x80, 0x63, 0xDE, 0x86, 0xC0, 0x0E, 0x00, 0x33, 0x00, 0x80, 0xC0, 0x20, 0x81, 0x62, 0x04, 0x01,
+ 0x02, 0xDA, 0x80, 0x63, 0x7C, 0x85, 0x80, 0x7B, 0x80, 0x63, 0x06, 0xA6, 0x8C, 0x06, 0x00, 0x33,
+ 0x2C, 0x00, 0xC0, 0x88, 0x0C, 0xA2, 0x2E, 0x07, 0xFE, 0x95, 0x83, 0x03, 0x80, 0x63, 0x06, 0xA6,
+ 0x2C, 0x07, 0x07, 0xA6, 0x7C, 0x05, 0x00, 0x33, 0x3D, 0x00, 0xC0, 0x88, 0x00, 0x00, 0x80, 0x67,
+ 0x83, 0x03, 0x80, 0x63, 0x0C, 0xA0, 0x44, 0x07, 0x07, 0xA6, 0x7C, 0x05, 0xBF, 0x23, 0x04, 0x61,
+ 0x84, 0x01, 0xE6, 0x84, 0x00, 0x63, 0xF0, 0x04, 0x01, 0x01, 0xF1, 0x00, 0x00, 0x01, 0xF2, 0x00,
+ 0x01, 0x05, 0x80, 0x01, 0x72, 0x04, 0x71, 0x00, 0x81, 0x01, 0x70, 0x04, 0x80, 0x05, 0x81, 0x05,
+ 0x00, 0x63, 0xF0, 0x04, 0xF2, 0x00, 0x72, 0x04, 0x01, 0x01, 0xF1, 0x00, 0x70, 0x00, 0x81, 0x01,
+ 0x70, 0x04, 0x71, 0x00, 0x81, 0x01, 0x72, 0x00, 0x80, 0x01, 0x71, 0x04, 0x70, 0x00, 0x80, 0x01,
+ 0x70, 0x04, 0x00, 0x63, 0xF0, 0x04, 0xF2, 0x00, 0x72, 0x04, 0x00, 0x01, 0xF1, 0x00, 0x70, 0x00,
+ 0x80, 0x01, 0x70, 0x04, 0x71, 0x00, 0x80, 0x01, 0x72, 0x00, 0x81, 0x01, 0x71, 0x04, 0x70, 0x00,
+ 0x81, 0x01, 0x70, 0x04, 0x00, 0x63, 0x00, 0x23, 0xB3, 0x01, 0x83, 0x05, 0xA3, 0x01, 0xA2, 0x01,
+ 0xA1, 0x01, 0x01, 0x23, 0xA0, 0x01, 0x00, 0x01, 0xC8, 0x00, 0x03, 0xA1, 0xC4, 0x07, 0x00, 0x33,
+ 0x07, 0x00, 0xC0, 0x88, 0x80, 0x05, 0x81, 0x05, 0x04, 0x01, 0x11, 0xC8, 0x48, 0x00, 0xB0, 0x01,
+ 0xB1, 0x01, 0x08, 0x23, 0xB2, 0x01, 0x05, 0x01, 0x48, 0x04, 0x00, 0x43, 0x00, 0xA2, 0xE4, 0x07,
+ 0x00, 0x05, 0xDA, 0x87, 0x00, 0x01, 0xC8, 0x00, 0xFF, 0x23, 0x80, 0x01, 0x05, 0x05, 0x00, 0x63,
+ 0xF7, 0x04, 0x1A, 0x09, 0xF6, 0x08, 0x6E, 0x04, 0x00, 0x02, 0x80, 0x43, 0x76, 0x08, 0x80, 0x02,
+ 0x77, 0x04, 0x00, 0x63, 0xF7, 0x04, 0x1A, 0x09, 0xF6, 0x08, 0x6E, 0x04, 0x00, 0x02, 0x00, 0xA0,
+ 0x14, 0x08, 0x16, 0x88, 0x00, 0x43, 0x76, 0x08, 0x80, 0x02, 0x77, 0x04, 0x00, 0x63, 0xF3, 0x04,
+ 0x00, 0x23, 0xF4, 0x00, 0x74, 0x00, 0x80, 0x43, 0xF4, 0x00, 0xCF, 0x40, 0x00, 0xA2, 0x44, 0x08,
+ 0x74, 0x04, 0x02, 0x01, 0xF7, 0xC9, 0xF6, 0xD9, 0x00, 0x01, 0x01, 0xA1, 0x24, 0x08, 0x04, 0x98,
+ 0x26, 0x95, 0x24, 0x88, 0x73, 0x04, 0x00, 0x63, 0xF3, 0x04, 0x75, 0x04, 0x5A, 0x88, 0x02, 0x01,
+ 0x04, 0xD8, 0x46, 0x97, 0x04, 0x98, 0x26, 0x95, 0x4A, 0x88, 0x75, 0x00, 0x00, 0xA3, 0x64, 0x08,
+ 0x00, 0x05, 0x4E, 0x88, 0x73, 0x04, 0x00, 0x63, 0x80, 0x7B, 0x80, 0x63, 0x06, 0xA6, 0x76, 0x08,
+ 0x00, 0x33, 0x3E, 0x00, 0xC0, 0x88, 0x80, 0x67, 0x83, 0x03, 0x80, 0x63, 0x00, 0x63, 0x38, 0x2B,
+ 0x9C, 0x88, 0x38, 0x2B, 0x92, 0x88, 0x32, 0x09, 0x31, 0x05, 0x92, 0x98, 0x05, 0x05, 0xB2, 0x09,
+ 0x00, 0x63, 0x00, 0x32, 0x00, 0x36, 0x00, 0x3A, 0x00, 0x3E, 0x00, 0x63, 0x80, 0x32, 0x80, 0x36,
+ 0x80, 0x3A, 0x80, 0x3E, 0x00, 0x63, 0x38, 0x2B, 0x40, 0x32, 0x40, 0x36, 0x40, 0x3A, 0x40, 0x3E,
+ 0x00, 0x63, 0x5A, 0x20, 0xC9, 0x40, 0x00, 0xA0, 0xB2, 0x08, 0x5D, 0x00, 0xFE, 0xC3, 0x00, 0x63,
+ 0x80, 0x73, 0xE6, 0x20, 0x02, 0x23, 0xE8, 0x00, 0x82, 0x73, 0xFF, 0xFD, 0x80, 0x73, 0x13, 0x23,
+ 0xF6, 0x88, 0x66, 0x20, 0xC0, 0x20, 0x04, 0x23, 0xA0, 0x01, 0xA1, 0x23, 0xA1, 0x01, 0x81, 0x62,
+ 0xE0, 0x88, 0x80, 0x73, 0x80, 0x77, 0x68, 0x00, 0x00, 0xA2, 0x80, 0x00, 0x03, 0xC2, 0xF1, 0xC7,
+ 0x41, 0x23, 0xF6, 0x88, 0x11, 0x23, 0xA1, 0x01, 0x04, 0x23, 0xA0, 0x01, 0xE6, 0x84,
+};
+
+STATIC ushort _asc_mcode_size ASC_INITDATA = sizeof(_asc_mcode_buf);
+STATIC ulong _asc_mcode_chksum ASC_INITDATA = 0x012B5442UL;
+
+#define ASC_SYN_OFFSET_ONE_DISABLE_LIST 16
+STATIC uchar _syn_offset_one_disable_cmd[ASC_SYN_OFFSET_ONE_DISABLE_LIST] =
+{
+ SCSICMD_Inquiry,
+ SCSICMD_RequestSense,
+ SCSICMD_ReadCapacity,
+ SCSICMD_ReadTOC,
+ SCSICMD_ModeSelect6,
+ SCSICMD_ModeSense6,
+ SCSICMD_ModeSelect10,
+ SCSICMD_ModeSense10,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF
+};
+
+STATIC int
+AscExeScsiQueue(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ REG ASC_SCSI_Q * scsiq
+)
+{
+ PortAddr iop_base;
+ int last_int_level;
+ int sta;
+ int n_q_required;
+ int disable_syn_offset_one_fix;
+ int i;
+ ulong addr;
+ ASC_EXE_CALLBACK asc_exe_callback;
+ ushort sg_entry_cnt = 0;
+ ushort sg_entry_cnt_minus_one = 0;
+ uchar target_ix;
+ uchar tid_no;
+ uchar sdtr_data;
+ uchar extra_bytes;
+ uchar scsi_cmd;
+ uchar disable_cmd;
+ ASC_SG_HEAD *sg_head;
+ ulong data_cnt;
+
+ iop_base = asc_dvc->iop_base;
+ sg_head = scsiq->sg_head;
+ asc_exe_callback = (ASC_EXE_CALLBACK) asc_dvc->exe_callback;
+ if (asc_dvc->err_code != 0)
+ return (ERR);
+ if (scsiq == (ASC_SCSI_Q *) 0L) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SCSIQ_NULL_PTR);
+ return (ERR);
+ }
+ scsiq->q1.q_no = 0;
+ if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) {
+ scsiq->q1.extra_bytes = 0;
+ }
+ sta = 0;
+ target_ix = scsiq->q2.target_ix;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ n_q_required = 1;
+ if (scsiq->cdbptr[0] == SCSICMD_RequestSense) {
+ if ((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) {
+ asc_dvc->sdtr_done &= ~scsiq->q1.target_id ;
+ sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no);
+ AscMsgOutSDTR(asc_dvc,
+ asc_dvc->sdtr_period_tbl[(sdtr_data >> 4) &
+ (uchar) (asc_dvc->max_sdtr_index - 1)],
+ (uchar) (sdtr_data & (uchar) ASC_SYN_MAX_OFFSET));
+ scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
+ }
+ }
+ last_int_level = DvcEnterCritical();
+ if (asc_dvc->in_critical_cnt != 0) {
+ DvcLeaveCritical(last_int_level);
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY);
+ return (ERR);
+ }
+ asc_dvc->in_critical_cnt++;
+ if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
+ if ((sg_entry_cnt = sg_head->entry_cnt) == 0) {
+ asc_dvc->in_critical_cnt--;
+ DvcLeaveCritical(last_int_level);
+ return (ERR);
+ }
+ if (sg_entry_cnt > ASC_MAX_SG_LIST) {
+ return (ERR);
+ }
+ if (sg_entry_cnt == 1) {
+ scsiq->q1.data_addr = sg_head->sg_list[0].addr;
+ scsiq->q1.data_cnt = sg_head->sg_list[0].bytes;
+ scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE);
+ }
+ sg_entry_cnt_minus_one = sg_entry_cnt - 1;
+ }
+ scsi_cmd = scsiq->cdbptr[0];
+ disable_syn_offset_one_fix = FALSE;
+ if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) &&
+ !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) {
+ if (scsiq->q1.cntl & QC_SG_HEAD) {
+ data_cnt = 0;
+ for (i = 0; i < sg_entry_cnt; i++) {
+ data_cnt += sg_head->sg_list[i].bytes;
+ }
+ } else {
+ data_cnt = scsiq->q1.data_cnt;
+ }
+ if (data_cnt != 0UL) {
+ if (data_cnt < 512UL) {
+ disable_syn_offset_one_fix = TRUE;
+ } else {
+ for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST; i++) {
+ disable_cmd = _syn_offset_one_disable_cmd[i];
+ if (disable_cmd == 0xFF) {
+ break;
+ }
+ if (scsi_cmd == disable_cmd) {
+ disable_syn_offset_one_fix = TRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (disable_syn_offset_one_fix) {
+ scsiq->q2.tag_code &= ~M2_QTAG_MSG_SIMPLE;
+ scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX |
+ ASC_TAG_FLAG_DISABLE_DISCONNECT);
+ } else {
+ scsiq->q2.tag_code &= 0x23;
+ }
+ if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
+ if (asc_dvc->bug_fix_cntl) {
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) {
+ if ((scsi_cmd == SCSICMD_Read6) ||
+ (scsi_cmd == SCSICMD_Read10)) {
+ addr = sg_head->sg_list[sg_entry_cnt_minus_one].addr +
+ sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
+ extra_bytes = (uchar) ((ushort) addr & 0x0003);
+ if ((extra_bytes != 0) &&
+ ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES)
+ == 0)) {
+ scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES;
+ scsiq->q1.extra_bytes = extra_bytes;
+ sg_head->sg_list[sg_entry_cnt_minus_one].bytes -=
+ (ulong) extra_bytes;
+ }
+ }
+ }
+ }
+ sg_head->entry_to_copy = sg_head->entry_cnt;
+ n_q_required = AscSgListToQueue(sg_entry_cnt);
+ if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >=
+ (uint) n_q_required) || ((scsiq->q1.cntl & QC_URGENT) != 0)) {
+ if ((sta = AscSendScsiQueue(asc_dvc, scsiq,
+ n_q_required)) == 1) {
+ asc_dvc->in_critical_cnt--;
+ if (asc_exe_callback != 0) {
+ (*asc_exe_callback) (asc_dvc, scsiq);
+ }
+ DvcLeaveCritical(last_int_level);
+ return (sta);
+ }
+ }
+ } else {
+ if (asc_dvc->bug_fix_cntl) {
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) {
+ if ((scsi_cmd == SCSICMD_Read6) ||
+ (scsi_cmd == SCSICMD_Read10)) {
+ addr = scsiq->q1.data_addr + scsiq->q1.data_cnt;
+ extra_bytes = (uchar) ((ushort) addr & 0x0003);
+ if ((extra_bytes != 0) &&
+ ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES)
+ == 0)) {
+ if (((ushort) scsiq->q1.data_cnt & 0x01FF) == 0) {
+ scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES;
+ scsiq->q1.data_cnt -= (ulong) extra_bytes;
+ scsiq->q1.extra_bytes = extra_bytes;
+ }
+ }
+ }
+ }
+ }
+ n_q_required = 1;
+ if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, 1) >= 1) ||
+ ((scsiq->q1.cntl & QC_URGENT) != 0)) {
+ if ((sta = AscSendScsiQueue(asc_dvc, scsiq,
+ n_q_required)) == 1) {
+ asc_dvc->in_critical_cnt--;
+ if (asc_exe_callback != 0) {
+ (*asc_exe_callback) (asc_dvc, scsiq);
+ }
+ DvcLeaveCritical(last_int_level);
+ return (sta);
+ }
+ }
+ }
+ asc_dvc->in_critical_cnt--;
+ DvcLeaveCritical(last_int_level);
+ return (sta);
+}
+
+STATIC int
+AscSendScsiQueue(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ REG ASC_SCSI_Q * scsiq,
+ uchar n_q_required
+)
+{
+ PortAddr iop_base;
+ uchar free_q_head;
+ uchar next_qp;
+ uchar tid_no;
+ uchar target_ix;
+ int sta;
+
+ iop_base = asc_dvc->iop_base;
+ target_ix = scsiq->q2.target_ix;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ sta = 0;
+ free_q_head = (uchar) AscGetVarFreeQHead(iop_base);
+ if (n_q_required > 1) {
+ if ((next_qp = AscAllocMultipleFreeQueue(iop_base,
+ free_q_head, (uchar) (n_q_required)))
+ != (uchar) ASC_QLINK_END) {
+ asc_dvc->last_q_shortage = 0;
+ scsiq->sg_head->queue_cnt = n_q_required - 1;
+ scsiq->q1.q_no = free_q_head;
+ if ((sta = AscPutReadySgListQueue(asc_dvc, scsiq,
+ free_q_head)) == 1) {
+ AscPutVarFreeQHead(iop_base, next_qp);
+ asc_dvc->cur_total_qng += (uchar) (n_q_required);
+ asc_dvc->cur_dvc_qng[tid_no]++;
+ }
+ return (sta);
+ }
+ } else if (n_q_required == 1) {
+ if ((next_qp = AscAllocFreeQueue(iop_base,
+ free_q_head)) != ASC_QLINK_END) {
+ scsiq->q1.q_no = free_q_head;
+ if ((sta = AscPutReadyQueue(asc_dvc, scsiq,
+ free_q_head)) == 1) {
+ AscPutVarFreeQHead(iop_base, next_qp);
+ asc_dvc->cur_total_qng++;
+ asc_dvc->cur_dvc_qng[tid_no]++;
+ }
+ return (sta);
+ }
+ }
+ return (sta);
+}
+
+STATIC int
+AscSgListToQueue(
+ int sg_list
+)
+{
+ int n_sg_list_qs;
+
+ n_sg_list_qs = ((sg_list - 1) / ASC_SG_LIST_PER_Q);
+ if (((sg_list - 1) % ASC_SG_LIST_PER_Q) != 0)
+ n_sg_list_qs++;
+ return (n_sg_list_qs + 1);
+}
+
+
+STATIC uint
+AscGetNumOfFreeQueue(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix,
+ uchar n_qs
+)
+{
+ uint cur_used_qs;
+ uint cur_free_qs;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ uchar tid_no;
+
+ target_id = ASC_TIX_TO_TARGET_ID(target_ix);
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ if ((asc_dvc->unit_not_ready & target_id) ||
+ (asc_dvc->queue_full_or_busy & target_id)) {
+ return (0);
+ }
+ if (n_qs == 1) {
+ cur_used_qs = (uint) asc_dvc->cur_total_qng +
+ (uint) asc_dvc->last_q_shortage +
+ (uint) ASC_MIN_FREE_Q;
+ } else {
+ cur_used_qs = (uint) asc_dvc->cur_total_qng +
+ (uint) ASC_MIN_FREE_Q;
+ }
+ if ((uint) (cur_used_qs + n_qs) <= (uint) asc_dvc->max_total_qng) {
+ cur_free_qs = (uint) asc_dvc->max_total_qng - cur_used_qs;
+ if (asc_dvc->cur_dvc_qng[tid_no] >=
+ asc_dvc->max_dvc_qng[tid_no]) {
+ return (0);
+ }
+ return (cur_free_qs);
+ }
+ if (n_qs > 1) {
+ if ((n_qs > asc_dvc->last_q_shortage) && (n_qs <= (asc_dvc->max_total_qng - ASC_MIN_FREE_Q))) {
+ asc_dvc->last_q_shortage = n_qs;
+ }
+ }
+ return (0);
+}
+
+STATIC int
+AscPutReadyQueue(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ REG ASC_SCSI_Q * scsiq,
+ uchar q_no
+)
+{
+ ushort q_addr;
+ uchar tid_no;
+ uchar sdtr_data;
+ uchar syn_period_ix;
+ uchar syn_offset;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ if (((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) &&
+ ((asc_dvc->sdtr_done & scsiq->q1.target_id) == 0)) {
+ tid_no = ASC_TIX_TO_TID(scsiq->q2.target_ix);
+ sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no);
+ syn_period_ix = (sdtr_data >> 4) & (asc_dvc->max_sdtr_index - 1);
+ syn_offset = sdtr_data & ASC_SYN_MAX_OFFSET;
+ AscMsgOutSDTR(asc_dvc,
+ asc_dvc->sdtr_period_tbl[syn_period_ix],
+ syn_offset);
+ scsiq->q1.cntl |= QC_MSG_OUT;
+ }
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) {
+ scsiq->q2.tag_code &= ~M2_QTAG_MSG_SIMPLE;
+ }
+ scsiq->q1.status = QS_FREE;
+ AscMemWordCopyToLram(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CDB_BEG),
+ (ushort *) scsiq->cdbptr,
+ (ushort) ((ushort) scsiq->q2.cdb_len >> 1));
+ DvcPutScsiQ(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CPY_BEG),
+ (ushort *) & scsiq->q1.cntl,
+ (ushort) ((((sizeof (ASC_SCSIQ_1) + sizeof (ASC_SCSIQ_2)) / 2) - 1)));
+ AscWriteLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ (ushort) (((ushort) scsiq->q1.q_no << 8) | (ushort) QS_READY));
+ return (1);
+}
+
+STATIC int
+AscPutReadySgListQueue(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ REG ASC_SCSI_Q * scsiq,
+ uchar q_no
+)
+{
+ int sta;
+ int i;
+ ASC_SG_HEAD *sg_head;
+ ASC_SG_LIST_Q scsi_sg_q;
+ ulong saved_data_addr;
+ ulong saved_data_cnt;
+ PortAddr iop_base;
+ ushort sg_list_dwords;
+ ushort sg_index;
+ ushort sg_entry_cnt;
+ ushort q_addr;
+ uchar next_qp;
+
+ iop_base = asc_dvc->iop_base;
+ sg_head = scsiq->sg_head;
+ saved_data_addr = scsiq->q1.data_addr;
+ saved_data_cnt = scsiq->q1.data_cnt;
+ scsiq->q1.data_addr = sg_head->sg_list[0].addr;
+ scsiq->q1.data_cnt = sg_head->sg_list[0].bytes;
+ sg_entry_cnt = sg_head->entry_cnt - 1;
+ if (sg_entry_cnt != 0) {
+ scsiq->q1.cntl |= QC_SG_HEAD;
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ sg_index = 1;
+ scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
+ scsi_sg_q.sg_head_qp = q_no;
+ scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
+ for (i = 0; i < sg_head->queue_cnt; i++) {
+ scsi_sg_q.seq_no = i + 1;
+ if (sg_entry_cnt > ASC_SG_LIST_PER_Q) {
+ sg_list_dwords = (uchar) (ASC_SG_LIST_PER_Q * 2);
+ sg_entry_cnt -= ASC_SG_LIST_PER_Q;
+ if (i == 0) {
+ scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q;
+ scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q;
+ } else {
+ scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1;
+ scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q - 1;
+ }
+ } else {
+ scsi_sg_q.cntl |= QCSG_SG_XFER_END;
+ sg_list_dwords = sg_entry_cnt << 1;
+ if (i == 0) {
+ scsi_sg_q.sg_list_cnt = sg_entry_cnt;
+ scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt;
+ } else {
+ scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1;
+ scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1;
+ }
+ sg_entry_cnt = 0;
+ }
+ next_qp = AscReadLramByte(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_B_FWD));
+ scsi_sg_q.q_no = next_qp;
+ q_addr = ASC_QNO_TO_QADDR(next_qp);
+ AscMemWordCopyToLram(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_SGHD_CPY_BEG),
+ (ushort *) & scsi_sg_q,
+ (ushort) (sizeof (ASC_SG_LIST_Q) >> 1));
+ AscMemDWordCopyToLram(iop_base,
+ (ushort) (q_addr + ASC_SGQ_LIST_BEG),
+ (ulong *) & sg_head->sg_list[sg_index],
+ (ushort) sg_list_dwords);
+ sg_index += ASC_SG_LIST_PER_Q;
+ }
+ } else {
+ scsiq->q1.cntl &= ~QC_SG_HEAD;
+ }
+ sta = AscPutReadyQueue(asc_dvc, scsiq, q_no);
+ scsiq->q1.data_addr = saved_data_addr;
+ scsiq->q1.data_cnt = saved_data_cnt;
+ return (sta);
+}
+
+STATIC int
+AscAbortSRB(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ulong srb_ptr
+)
+{
+ int sta;
+ ASC_SCSI_BIT_ID_TYPE saved_unit_not_ready;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ sta = ERR;
+ saved_unit_not_ready = asc_dvc->unit_not_ready;
+ asc_dvc->unit_not_ready = 0xFF;
+ AscWaitISRDone(asc_dvc);
+ if (AscStopQueueExe(iop_base) == 1) {
+ if (AscRiscHaltedAbortSRB(asc_dvc, srb_ptr) == 1) {
+ sta = 1;
+ AscCleanUpBusyQueue(iop_base);
+ AscStartQueueExe(iop_base);
+ } else {
+ sta = 0;
+ AscStartQueueExe(iop_base);
+ }
+ }
+ asc_dvc->unit_not_ready = saved_unit_not_ready;
+ return (sta);
+}
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int
+AscResetDevice(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix
+)
+{
+ PortAddr iop_base;
+ int sta;
+ uchar tid_no;
+
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ int i;
+ ASC_SCSI_REQ_Q scsiq_buf;
+ ASC_SCSI_REQ_Q *scsiq;
+ uchar *buf;
+ ASC_SCSI_BIT_ID_TYPE saved_unit_not_ready;
+ iop_base = asc_dvc->iop_base;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ target_id = ASC_TID_TO_TARGET_ID(tid_no);
+ saved_unit_not_ready = asc_dvc->unit_not_ready;
+ asc_dvc->unit_not_ready = target_id;
+ sta = ERR;
+ AscWaitTixISRDone(asc_dvc, target_ix);
+ if (AscStopQueueExe(iop_base) == 1) {
+ if (AscRiscHaltedAbortTIX(asc_dvc, target_ix) == 1) {
+ AscCleanUpBusyQueue(iop_base);
+ AscStartQueueExe(iop_base);
+ AscWaitTixISRDone(asc_dvc, target_ix);
+ sta = TRUE;
+ scsiq = (ASC_SCSI_REQ_Q *) & scsiq_buf;
+ buf = (uchar *) & scsiq_buf;
+ for (i = 0; i < sizeof (ASC_SCSI_REQ_Q); i++) {
+ *buf++ = 0x00;
+ }
+ scsiq->r1.status = (uchar) QS_READY;
+ scsiq->r2.cdb_len = 6;
+ scsiq->r2.tag_code = M2_QTAG_MSG_SIMPLE;
+ scsiq->r1.target_id = target_id;
+ scsiq->r2.target_ix = ASC_TIDLUN_TO_IX(tid_no, 0);
+ scsiq->cdbptr = (uchar *) scsiq->cdb;
+ scsiq->r1.cntl = QC_NO_CALLBACK | QC_MSG_OUT | QC_URGENT;
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_MSGOUT_BEG,
+ M1_BUS_DVC_RESET);
+ asc_dvc->unit_not_ready &= ~target_id;
+ asc_dvc->sdtr_done |= target_id;
+ if (AscExeScsiQueue(asc_dvc, (ASC_SCSI_Q *) scsiq)
+ == 1) {
+ asc_dvc->unit_not_ready = target_id;
+ DvcSleepMilliSecond(1000);
+ _AscWaitQDone(iop_base, (ASC_SCSI_Q *) scsiq);
+ if (AscStopQueueExe(iop_base) == 1) {
+ AscCleanUpDiscQueue(iop_base);
+ AscStartQueueExe(iop_base);
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+ AscSetRunChipSynRegAtID(iop_base, tid_no,
+ ASYN_SDTR_DATA_FIX_PCI_REV_AB);
+ }
+ AscWaitTixISRDone(asc_dvc, target_ix);
+ }
+ } else {
+ sta = 0;
+ }
+ asc_dvc->sdtr_done &= ~target_id;
+ } else {
+ sta = ERR;
+ AscStartQueueExe(iop_base);
+ }
+ }
+ asc_dvc->unit_not_ready = saved_unit_not_ready;
+ return (sta);
+}
+#endif /* version >= v1.3.89 */
+
+STATIC int
+AscResetSB(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ int sta;
+ int i;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ asc_dvc->unit_not_ready = 0xFF;
+ sta = TRUE;
+ AscWaitISRDone(asc_dvc);
+ AscStopQueueExe(iop_base);
+ asc_dvc->sdtr_done = 0;
+ AscResetChipAndScsiBus(asc_dvc);
+ DvcSleepMilliSecond((ulong) ((ushort) asc_dvc->scsi_reset_wait * 1000));
+ AscReInitLram(asc_dvc);
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->cur_dvc_qng[i] = 0;
+ if (asc_dvc->pci_fix_asyn_xfer & (ASC_SCSI_BIT_ID_TYPE) (0x01 << i)) {
+ AscSetChipSynRegAtID(iop_base, i, ASYN_SDTR_DATA_FIX_PCI_REV_AB);
+ }
+ }
+ asc_dvc->err_code = 0;
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ sta = ERR;
+ }
+ if (AscStartChip(iop_base) == 0) {
+ sta = ERR;
+ }
+ AscStartQueueExe(iop_base);
+ asc_dvc->unit_not_ready = 0;
+ asc_dvc->queue_full_or_busy = 0;
+ return (sta);
+}
+
+STATIC int
+AscSetRunChipSynRegAtID(
+ PortAddr iop_base,
+ uchar tid_no,
+ uchar sdtr_data
+)
+{
+ int sta = FALSE;
+
+ if (AscHostReqRiscHalt(iop_base)) {
+ sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data);
+ AscStartChip(iop_base);
+ return (sta);
+ }
+ return (sta);
+}
+
+STATIC int
+AscSetChipSynRegAtID(
+ PortAddr iop_base,
+ uchar id,
+ uchar sdtr_data
+)
+{
+ ASC_SCSI_BIT_ID_TYPE org_id;
+ int i;
+ int sta = TRUE;
+
+ AscSetBank(iop_base, 1);
+ org_id = AscReadChipDvcID(iop_base);
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if (org_id == (0x01 << i))
+ break;
+ }
+ org_id = i;
+ AscWriteChipDvcID(iop_base, id);
+ if (AscReadChipDvcID(iop_base) == (0x01 << id)) {
+ AscSetBank(iop_base, 0);
+ AscSetChipSyn(iop_base, sdtr_data);
+ if (AscGetChipSyn(iop_base) != sdtr_data) {
+ sta = FALSE;
+ }
+ } else {
+ sta = FALSE;
+ }
+ AscSetBank(iop_base, 1);
+ AscWriteChipDvcID(iop_base, org_id);
+ AscSetBank(iop_base, 0);
+ return (sta);
+}
+
+STATIC int
+AscReInitLram(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ AscInitLram(asc_dvc);
+ AscInitQLinkVar(asc_dvc);
+ return (0);
+}
+
+STATIC ushort
+AscInitLram(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ uchar i;
+ ushort s_addr;
+ PortAddr iop_base;
+ ushort warn_code;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0,
+ (ushort) (((int) (asc_dvc->max_total_qng + 2 + 1) * 64) >> 1)
+);
+ i = ASC_MIN_ACTIVE_QNO;
+ s_addr = ASC_QADR_BEG + ASC_QBLK_SIZE;
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_FWD),
+ (uchar) (i + 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_BWD),
+ (uchar) (asc_dvc->max_total_qng));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_QNO),
+ (uchar) i);
+ i++;
+ s_addr += ASC_QBLK_SIZE;
+ for (; i < asc_dvc->max_total_qng; i++, s_addr += ASC_QBLK_SIZE) {
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_FWD),
+ (uchar) (i + 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_BWD),
+ (uchar) (i - 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_QNO),
+ (uchar) i);
+ }
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_FWD),
+ (uchar) ASC_QLINK_END);
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_BWD),
+ (uchar) (asc_dvc->max_total_qng - 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_QNO),
+ (uchar) asc_dvc->max_total_qng);
+ i++;
+ s_addr += ASC_QBLK_SIZE;
+ for (; i <= (uchar) (asc_dvc->max_total_qng + 3);
+ i++, s_addr += ASC_QBLK_SIZE) {
+ AscWriteLramByte(iop_base,
+ (ushort) (s_addr + (ushort) ASC_SCSIQ_B_FWD), i);
+ AscWriteLramByte(iop_base,
+ (ushort) (s_addr + (ushort) ASC_SCSIQ_B_BWD), i);
+ AscWriteLramByte(iop_base,
+ (ushort) (s_addr + (ushort) ASC_SCSIQ_B_QNO), i);
+ }
+ return (warn_code);
+}
+
+STATIC ushort
+AscInitQLinkVar(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ PortAddr iop_base;
+ int i;
+ ushort lram_addr;
+
+ iop_base = asc_dvc->iop_base;
+ AscPutRiscVarFreeQHead(iop_base, 1);
+ AscPutRiscVarDoneQTail(iop_base, asc_dvc->max_total_qng);
+ AscPutVarFreeQHead(iop_base, 1);
+ AscPutVarDoneQTail(iop_base, asc_dvc->max_total_qng);
+ AscWriteLramByte(iop_base, ASCV_BUSY_QHEAD_B,
+ (uchar) ((int) asc_dvc->max_total_qng + 1));
+ AscWriteLramByte(iop_base, ASCV_DISC1_QHEAD_B,
+ (uchar) ((int) asc_dvc->max_total_qng + 2));
+ AscWriteLramByte(iop_base, (ushort) ASCV_TOTAL_READY_Q_B,
+ asc_dvc->max_total_qng);
+ AscWriteLramWord(iop_base, ASCV_ASCDVC_ERR_CODE_W, 0);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0);
+ AscWriteLramByte(iop_base, ASCV_SCSIBUSY_B, 0);
+ AscWriteLramByte(iop_base, ASCV_WTM_FLAG_B, 0);
+ AscPutQDoneInProgress(iop_base, 0);
+ lram_addr = ASC_QADR_BEG;
+ for (i = 0; i < 32; i++, lram_addr += 2) {
+ AscWriteLramWord(iop_base, lram_addr, 0);
+ }
+ return (0);
+}
+
+STATIC int
+AscSetLibErrorCode(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ushort err_code
+)
+{
+ if (asc_dvc->err_code == 0) {
+ asc_dvc->err_code = err_code;
+ AscWriteLramWord(asc_dvc->iop_base, ASCV_ASCDVC_ERR_CODE_W,
+ err_code);
+ }
+ return (err_code);
+}
+
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int
+_AscWaitQDone(
+ PortAddr iop_base,
+ REG ASC_SCSI_Q * scsiq
+)
+{
+ ushort q_addr;
+ uchar q_status;
+ int count = 0;
+
+ while (scsiq->q1.q_no == 0) ;
+ q_addr = ASC_QNO_TO_QADDR(scsiq->q1.q_no);
+ do {
+ q_status = AscReadLramByte(iop_base, q_addr + ASC_SCSIQ_B_STATUS);
+ DvcSleepMilliSecond(100L);
+ if (count++ > 30) {
+ return (0);
+ }
+ } while ((q_status & QS_READY) != 0);
+ return (1);
+}
+#endif /* version >= v1.3.89 */
+
+STATIC uchar
+AscMsgOutSDTR(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar sdtr_period,
+ uchar sdtr_offset
+)
+{
+ EXT_MSG sdtr_buf;
+ uchar sdtr_period_index;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ sdtr_buf.msg_type = MS_EXTEND;
+ sdtr_buf.msg_len = MS_SDTR_LEN;
+ sdtr_buf.msg_req = MS_SDTR_CODE;
+ sdtr_buf.xfer_period = sdtr_period;
+ sdtr_offset &= ASC_SYN_MAX_OFFSET;
+ sdtr_buf.req_ack_offset = sdtr_offset;
+ if ((sdtr_period_index =
+ AscGetSynPeriodIndex(asc_dvc, sdtr_period)) <=
+ asc_dvc->max_sdtr_index) {
+ AscMemWordCopyToLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort *) & sdtr_buf,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+ return ((sdtr_period_index << 4) | sdtr_offset);
+ } else {
+
+ sdtr_buf.req_ack_offset = 0;
+ AscMemWordCopyToLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort *) & sdtr_buf,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+ return (0);
+ }
+}
+
+STATIC uchar
+AscCalSDTRData(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar sdtr_period,
+ uchar syn_offset
+)
+{
+ uchar byte;
+ uchar sdtr_period_ix;
+
+ sdtr_period_ix = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
+ if (
+ (sdtr_period_ix > asc_dvc->max_sdtr_index)
+) {
+ return (0xFF);
+ }
+ byte = (sdtr_period_ix << 4) | (syn_offset & ASC_SYN_MAX_OFFSET);
+ return (byte);
+}
+
+STATIC void
+AscSetChipSDTR(
+ PortAddr iop_base,
+ uchar sdtr_data,
+ uchar tid_no
+)
+{
+ AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data);
+ AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data);
+ return;
+}
+
+STATIC uchar
+AscGetSynPeriodIndex(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ruchar syn_time
+)
+{
+ ruchar *period_table;
+ int max_index;
+ int min_index;
+ int i;
+
+ period_table = asc_dvc->sdtr_period_tbl;
+ max_index = (int) asc_dvc->max_sdtr_index;
+ min_index = (int)asc_dvc->host_init_sdtr_index ;
+ if ((syn_time <= period_table[max_index])) {
+ for (i = min_index; i < (max_index - 1); i++) {
+ if (syn_time <= period_table[i]) {
+ return ((uchar) i);
+ }
+ }
+ return ((uchar) max_index);
+ } else {
+ return ((uchar) (max_index + 1));
+ }
+}
+
+STATIC uchar
+AscAllocFreeQueue(
+ PortAddr iop_base,
+ uchar free_q_head
+)
+{
+ ushort q_addr;
+ uchar next_qp;
+ uchar q_status;
+
+ q_addr = ASC_QNO_TO_QADDR(free_q_head);
+ q_status = (uchar) AscReadLramByte(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_B_STATUS));
+ next_qp = AscReadLramByte(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_B_FWD));
+ if (((q_status & QS_READY) == 0) && (next_qp != ASC_QLINK_END)) {
+ return (next_qp);
+ }
+ return (ASC_QLINK_END);
+}
+
+STATIC uchar
+AscAllocMultipleFreeQueue(
+ PortAddr iop_base,
+ uchar free_q_head,
+ uchar n_free_q
+)
+{
+ uchar i;
+
+ for (i = 0; i < n_free_q; i++) {
+ if ((free_q_head = AscAllocFreeQueue(iop_base, free_q_head))
+ == ASC_QLINK_END) {
+ return (ASC_QLINK_END);
+ }
+ }
+ return (free_q_head);
+}
+
+STATIC int
+AscRiscHaltedAbortSRB(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ulong srb_ptr
+)
+{
+ PortAddr iop_base;
+ ushort q_addr;
+ uchar q_no;
+ ASC_QDONE_INFO scsiq_buf;
+ ASC_QDONE_INFO *scsiq;
+ ASC_ISR_CALLBACK asc_isr_callback;
+ int last_int_level;
+
+ iop_base = asc_dvc->iop_base;
+ asc_isr_callback = (ASC_ISR_CALLBACK) asc_dvc->isr_callback;
+ last_int_level = DvcEnterCritical();
+ scsiq = (ASC_QDONE_INFO *) & scsiq_buf;
+ for (q_no = ASC_MIN_ACTIVE_QNO; q_no <= asc_dvc->max_total_qng;
+ q_no++) {
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ scsiq->d2.srb_ptr = AscReadLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_D_SRBPTR));
+ if (scsiq->d2.srb_ptr == srb_ptr) {
+ _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count);
+ if (((scsiq->q_status & QS_READY) != 0)
+ && ((scsiq->q_status & QS_ABORTED) == 0)
+ && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)) {
+ scsiq->q_status |= QS_ABORTED;
+ scsiq->d3.done_stat = QD_ABORTED_BY_HOST;
+ AscWriteLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_D_SRBPTR),
+ 0L);
+ AscWriteLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ scsiq->q_status);
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ return (1);
+ }
+ }
+ }
+ DvcLeaveCritical(last_int_level);
+ return (0);
+}
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int
+AscRiscHaltedAbortTIX(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix
+)
+{
+ PortAddr iop_base;
+ ushort q_addr;
+ uchar q_no;
+ ASC_QDONE_INFO scsiq_buf;
+ ASC_QDONE_INFO *scsiq;
+ ASC_ISR_CALLBACK asc_isr_callback;
+ int last_int_level;
+
+ iop_base = asc_dvc->iop_base;
+ asc_isr_callback = (ASC_ISR_CALLBACK) asc_dvc->isr_callback;
+ last_int_level = DvcEnterCritical();
+ scsiq = (ASC_QDONE_INFO *) & scsiq_buf;
+ for (q_no = ASC_MIN_ACTIVE_QNO; q_no <= asc_dvc->max_total_qng;
+ q_no++) {
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count);
+ if (((scsiq->q_status & QS_READY) != 0) &&
+ ((scsiq->q_status & QS_ABORTED) == 0) &&
+ ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)) {
+ if (scsiq->d2.target_ix == target_ix) {
+ scsiq->q_status |= QS_ABORTED;
+ scsiq->d3.done_stat = QD_ABORTED_BY_HOST;
+ AscWriteLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_D_SRBPTR),
+ 0L);
+ AscWriteLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ scsiq->q_status);
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ }
+ }
+ }
+ DvcLeaveCritical(last_int_level);
+ return (1);
+}
+#endif /* version >= v1.3.89 */
+
+STATIC int
+AscHostReqRiscHalt(
+ PortAddr iop_base
+)
+{
+ int count = 0;
+ int sta = 0;
+ uchar saved_stop_code;
+
+ if (AscIsChipHalted(iop_base))
+ return (1);
+ saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP
+);
+ do {
+ if (AscIsChipHalted(iop_base)) {
+ sta = 1;
+ break;
+ }
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code);
+ return (sta);
+}
+
+STATIC int
+AscStopQueueExe(
+ PortAddr iop_base
+)
+{
+ int count = 0;
+
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) == 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_REQ_RISC_STOP);
+ do {
+ if (
+ AscReadLramByte(iop_base, ASCV_STOP_CODE_B) &
+ ASC_STOP_ACK_RISC_STOP) {
+ return (1);
+ }
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ }
+ return (0);
+}
+
+STATIC int
+AscStartQueueExe(
+ PortAddr iop_base
+)
+{
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) != 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0);
+ }
+ return (1);
+}
+
+STATIC int
+AscCleanUpBusyQueue(
+ PortAddr iop_base
+)
+{
+ int count;
+ uchar stop_code;
+
+ count = 0;
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) != 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_CLEAN_UP_BUSY_Q);
+ do {
+ stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
+ if ((stop_code & ASC_STOP_CLEAN_UP_BUSY_Q) == 0)
+ break;
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ }
+ return (1);
+}
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int
+AscCleanUpDiscQueue(
+ PortAddr iop_base
+)
+{
+ int count;
+ uchar stop_code;
+
+ count = 0;
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) != 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_CLEAN_UP_DISC_Q);
+ do {
+ stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
+ if ((stop_code & ASC_STOP_CLEAN_UP_DISC_Q) == 0)
+ break;
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ }
+ return (1);
+}
+#endif /* version >= v1.3.89 */
+
+STATIC int
+AscWaitTixISRDone(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix
+)
+{
+ uchar cur_req;
+ uchar tid_no;
+ int i = 0;
+
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ while (i++ < 10) {
+ if ((cur_req = asc_dvc->cur_dvc_qng[tid_no]) == 0) {
+ break;
+ }
+ DvcSleepMilliSecond(1000L);
+ if (asc_dvc->cur_dvc_qng[tid_no] == cur_req) {
+ break;
+ }
+ }
+ return (1);
+}
+
+STATIC int
+AscWaitISRDone(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ int tid;
+
+ for (tid = 0; tid <= ASC_MAX_TID; tid++) {
+ AscWaitTixISRDone(asc_dvc, ASC_TID_TO_TIX(tid));
+ }
+ return (1);
+}
+
+STATIC ulong
+AscGetOnePhyAddr(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar * buf_addr,
+ ulong buf_size
+)
+{
+ ASC_MIN_SG_HEAD sg_head;
+
+ sg_head.entry_cnt = ASC_MIN_SG_LIST;
+ if (DvcGetSGList(asc_dvc, (uchar *) buf_addr,
+ buf_size, (ASC_SG_HEAD *) & sg_head) != buf_size) {
+ return (0L);
+ }
+ if (sg_head.entry_cnt > 1) {
+ return (0L);
+ }
+ return (sg_head.sg_list[0].addr);
+}
+
+STATIC void
+DvcDelayMicroSecond(ADV_DVC_VAR *asc_dvc, ushort micro_sec)
+{
+ udelay(micro_sec);
+}
+
+STATIC void
+DvcDelayNanoSecond(ASC_DVC_VAR asc_ptr_type * asc_dvc, ulong nano_sec)
+{
+ udelay((nano_sec + 999)/1000);
+}
+
+ASC_INITFUNC(
+STATIC ulong
+AscGetEisaProductID(
+ PortAddr iop_base
+)
+)
+{
+ PortAddr eisa_iop;
+ ushort product_id_high, product_id_low;
+ ulong product_id;
+
+ eisa_iop = ASC_GET_EISA_SLOT(iop_base) | ASC_EISA_PID_IOP_MASK;
+ product_id_low = inpw(eisa_iop);
+ product_id_high = inpw(eisa_iop + 2);
+ product_id = ((ulong) product_id_high << 16) | (ulong) product_id_low;
+ return (product_id);
+}
+
+ASC_INITFUNC(
+STATIC PortAddr
+AscSearchIOPortAddrEISA(
+ PortAddr iop_base
+)
+)
+{
+ ulong eisa_product_id;
+
+ if (iop_base == 0) {
+ iop_base = ASC_EISA_MIN_IOP_ADDR;
+ } else {
+ if (iop_base == ASC_EISA_MAX_IOP_ADDR)
+ return (0);
+ if ((iop_base & 0x0050) == 0x0050) {
+ iop_base += ASC_EISA_BIG_IOP_GAP;
+ } else {
+ iop_base += ASC_EISA_SMALL_IOP_GAP;
+ }
+ }
+ while (iop_base <= ASC_EISA_MAX_IOP_ADDR) {
+ eisa_product_id = AscGetEisaProductID(iop_base);
+ if ((eisa_product_id == ASC_EISA_ID_740) ||
+ (eisa_product_id == ASC_EISA_ID_750)) {
+ if (AscFindSignature(iop_base)) {
+ inpw(iop_base + 4);
+ return (iop_base);
+ }
+ }
+ if (iop_base == ASC_EISA_MAX_IOP_ADDR)
+ return (0);
+ if ((iop_base & 0x0050) == 0x0050) {
+ iop_base += ASC_EISA_BIG_IOP_GAP;
+ } else {
+ iop_base += ASC_EISA_SMALL_IOP_GAP;
+ }
+ }
+ return (0);
+}
+
+STATIC int
+AscStartChip(
+ PortAddr iop_base
+)
+{
+ AscSetChipControl(iop_base, 0);
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) {
+ return (0);
+ }
+ return (1);
+}
+
+STATIC int
+AscStopChip(
+ PortAddr iop_base
+)
+{
+ uchar cc_val;
+
+ cc_val = AscGetChipControl(iop_base) & (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG));
+ AscSetChipControl(iop_base, (uchar) (cc_val | CC_HALT));
+ AscSetChipIH(iop_base, INS_HALT);
+ AscSetChipIH(iop_base, INS_RFLAG_WTM);
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) {
+ return (0);
+ }
+ return (1);
+}
+
+STATIC int
+AscIsChipHalted(
+ PortAddr iop_base
+)
+{
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) {
+ if ((AscGetChipControl(iop_base) & CC_HALT) != 0) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+STATIC void
+AscSetChipIH(
+ PortAddr iop_base,
+ ushort ins_code
+)
+{
+ AscSetBank(iop_base, 1);
+ AscWriteChipIH(iop_base, ins_code);
+ AscSetBank(iop_base, 0);
+ return;
+}
+
+STATIC void
+AscAckInterrupt(
+ PortAddr iop_base
+)
+{
+ uchar host_flag;
+ uchar risc_flag;
+ ushort loop;
+
+ loop = 0;
+ do {
+ risc_flag = AscReadLramByte(iop_base, ASCV_RISC_FLAG_B);
+ if (loop++ > 0x7FFF) {
+ break;
+ }
+ } while ((risc_flag & ASC_RISC_FLAG_GEN_INT) != 0);
+ host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B) & (~ASC_HOST_FLAG_ACK_INT);
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B,
+ (uchar) (host_flag | ASC_HOST_FLAG_ACK_INT));
+ AscSetChipStatus(iop_base, CIW_INT_ACK);
+ loop = 0;
+ while (AscGetChipStatus(iop_base) & CSW_INT_PENDING) {
+ AscSetChipStatus(iop_base, CIW_INT_ACK);
+ if (loop++ > 3) {
+ break;
+ }
+ }
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag);
+ return;
+}
+
+STATIC void
+AscDisableInterrupt(
+ PortAddr iop_base
+)
+{
+ ushort cfg;
+
+ cfg = AscGetChipCfgLsw(iop_base);
+ AscSetChipCfgLsw(iop_base, cfg & (~ASC_CFG0_HOST_INT_ON));
+ return;
+}
+
+STATIC void
+AscEnableInterrupt(
+ PortAddr iop_base
+)
+{
+ ushort cfg;
+
+ cfg = AscGetChipCfgLsw(iop_base);
+ AscSetChipCfgLsw(iop_base, cfg | ASC_CFG0_HOST_INT_ON);
+ return;
+}
+
+
+
+STATIC void
+AscSetBank(
+ PortAddr iop_base,
+ uchar bank
+)
+{
+ uchar val;
+
+ val = AscGetChipControl(iop_base) &
+ (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG | CC_SCSI_RESET | CC_CHIP_RESET));
+ if (bank == 1) {
+ val |= CC_BANK_ONE;
+ } else if (bank == 2) {
+ val |= CC_DIAG | CC_BANK_ONE;
+ } else {
+ val &= ~CC_BANK_ONE;
+ }
+ AscSetChipControl(iop_base, val);
+ return;
+}
+
+STATIC int
+AscResetChipAndScsiBus(
+ ASC_DVC_VAR *asc_dvc
+)
+{
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ while (AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) ;
+ AscStopChip(iop_base);
+ AscSetChipControl(iop_base, CC_CHIP_RESET | CC_SCSI_RESET | CC_HALT);
+ DvcDelayNanoSecond(asc_dvc, 60000);
+ AscSetChipIH(iop_base, INS_RFLAG_WTM);
+ AscSetChipIH(iop_base, INS_HALT);
+ AscSetChipControl(iop_base, CC_CHIP_RESET | CC_HALT);
+ AscSetChipControl(iop_base, CC_HALT);
+ DvcSleepMilliSecond(200);
+ AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT);
+ AscSetChipStatus(iop_base, 0);
+ return (AscIsChipHalted(iop_base));
+}
+
+ASC_INITFUNC(
+STATIC ulong
+AscGetMaxDmaCount(
+ ushort bus_type
+)
+)
+{
+ if (bus_type & ASC_IS_ISA)
+ return (ASC_MAX_ISA_DMA_COUNT);
+ else if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
+ return (ASC_MAX_VL_DMA_COUNT);
+ return (ASC_MAX_PCI_DMA_COUNT);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscGetIsaDmaChannel(
+ PortAddr iop_base
+)
+)
+{
+ ushort channel;
+
+ channel = AscGetChipCfgLsw(iop_base) & 0x0003;
+ if (channel == 0x03)
+ return (0);
+ else if (channel == 0x00)
+ return (7);
+ return (channel + 4);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscSetIsaDmaChannel(
+ PortAddr iop_base,
+ ushort dma_channel
+)
+)
+{
+ ushort cfg_lsw;
+ uchar value;
+
+ if ((dma_channel >= 5) && (dma_channel <= 7)) {
+ if (dma_channel == 7)
+ value = 0x00;
+ else
+ value = dma_channel - 4;
+ cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC;
+ cfg_lsw |= value;
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetIsaDmaChannel(iop_base));
+ }
+ return (0);
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscSetIsaDmaSpeed(
+ PortAddr iop_base,
+ uchar speed_value
+)
+)
+{
+ speed_value &= 0x07;
+ AscSetBank(iop_base, 1);
+ AscWriteChipDmaSpeed(iop_base, speed_value);
+ AscSetBank(iop_base, 0);
+ return (AscGetIsaDmaSpeed(iop_base));
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscGetIsaDmaSpeed(
+ PortAddr iop_base
+)
+)
+{
+ uchar speed_value;
+
+ AscSetBank(iop_base, 1);
+ speed_value = AscReadChipDmaSpeed(iop_base);
+ speed_value &= 0x07;
+ AscSetBank(iop_base, 0);
+ return (speed_value);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscReadPCIConfigWord(
+ ASC_DVC_VAR asc_ptr_type *asc_dvc,
+ ushort pci_config_offset)
+)
+{
+ uchar lsb, msb;
+
+ lsb = DvcReadPCIConfigByte(asc_dvc, pci_config_offset);
+ msb = DvcReadPCIConfigByte(asc_dvc, pci_config_offset + 1);
+ return ((ushort) ((msb << 8) | lsb));
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitGetConfig(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ ushort warn_code;
+ PortAddr iop_base;
+ ushort PCIDeviceID;
+ ushort PCIVendorID;
+ uchar PCIRevisionID;
+ uchar prevCmdRegBits;
+
+ warn_code = 0;
+ iop_base = asc_dvc->iop_base;
+ asc_dvc->init_state = ASC_INIT_STATE_BEG_GET_CFG;
+ if (asc_dvc->err_code != 0) {
+ return (UW_ERR);
+ }
+ if (asc_dvc->bus_type == ASC_IS_PCI) {
+ PCIVendorID = AscReadPCIConfigWord(asc_dvc,
+ AscPCIConfigVendorIDRegister);
+
+ PCIDeviceID = AscReadPCIConfigWord(asc_dvc,
+ AscPCIConfigDeviceIDRegister);
+
+ PCIRevisionID = DvcReadPCIConfigByte(asc_dvc,
+ AscPCIConfigRevisionIDRegister);
+
+ if (PCIVendorID != ASC_PCI_VENDORID) {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ prevCmdRegBits = DvcReadPCIConfigByte(asc_dvc,
+ AscPCIConfigCommandRegister);
+
+ if ((prevCmdRegBits & AscPCICmdRegBits_IOMemBusMaster) !=
+ AscPCICmdRegBits_IOMemBusMaster) {
+ DvcWritePCIConfigByte(asc_dvc,
+ AscPCIConfigCommandRegister,
+ (prevCmdRegBits |
+ AscPCICmdRegBits_IOMemBusMaster));
+
+ if ((DvcReadPCIConfigByte(asc_dvc,
+ AscPCIConfigCommandRegister)
+ & AscPCICmdRegBits_IOMemBusMaster)
+ != AscPCICmdRegBits_IOMemBusMaster) {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ }
+ if ((PCIDeviceID == ASC_PCI_DEVICEID_1200A) ||
+ (PCIDeviceID == ASC_PCI_DEVICEID_1200B)) {
+ DvcWritePCIConfigByte(asc_dvc,
+ AscPCIConfigLatencyTimer, 0x00);
+ if (DvcReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer)
+ != 0x00) {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ } else if (PCIDeviceID == ASC_PCI_DEVICEID_ULTRA) {
+ if (DvcReadPCIConfigByte(asc_dvc,
+ AscPCIConfigLatencyTimer) < 0x20) {
+ DvcWritePCIConfigByte(asc_dvc,
+ AscPCIConfigLatencyTimer, 0x20);
+
+ if (DvcReadPCIConfigByte(asc_dvc,
+ AscPCIConfigLatencyTimer) < 0x20) {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ }
+ }
+ }
+
+ if (AscFindSignature(iop_base)) {
+ warn_code |= AscInitAscDvcVar(asc_dvc);
+ warn_code |= AscInitFromEEP(asc_dvc);
+ asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG;
+ if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) {
+ asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
+ }
+ } else {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ }
+ return(warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitSetConfig(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ ushort warn_code = 0;
+
+ asc_dvc->init_state |= ASC_INIT_STATE_BEG_SET_CFG;
+ if (asc_dvc->err_code != 0)
+ return (UW_ERR);
+ if (AscFindSignature(asc_dvc->iop_base)) {
+ warn_code |= AscInitFromAscDvcVar(asc_dvc);
+ asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG;
+ } else {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ }
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitFromAscDvcVar(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ PortAddr iop_base;
+ ushort cfg_msw;
+ ushort warn_code;
+ ushort pci_device_id;
+
+ iop_base = asc_dvc->iop_base;
+ pci_device_id = asc_dvc->cfg->pci_device_id;
+ warn_code = 0;
+ cfg_msw = AscGetChipCfgMsw(iop_base);
+ if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) {
+ cfg_msw &= (~(ASC_CFG_MSW_CLR_MASK));
+ warn_code |= ASC_WARN_CFG_MSW_RECOVER;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ }
+ if ((asc_dvc->cfg->cmd_qng_enabled & asc_dvc->cfg->disc_enable) !=
+ asc_dvc->cfg->cmd_qng_enabled) {
+ asc_dvc->cfg->disc_enable = asc_dvc->cfg->cmd_qng_enabled;
+ warn_code |= ASC_WARN_CMD_QNG_CONFLICT;
+ }
+ if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) {
+ warn_code |= ASC_WARN_AUTO_CONFIG;
+ }
+ if ((asc_dvc->bus_type & (ASC_IS_ISA | ASC_IS_VL)) != 0) {
+ if (AscSetChipIRQ(iop_base, asc_dvc->irq_no, asc_dvc->bus_type)
+ != asc_dvc->irq_no) {
+ asc_dvc->err_code |= ASC_IERR_SET_IRQ_NO;
+ }
+ }
+ if (asc_dvc->bus_type & ASC_IS_PCI) {
+ cfg_msw &= 0xFFC0;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) {
+ } else {
+ if ((pci_device_id == ASC_PCI_DEVICE_ID_REV_A) ||
+ (pci_device_id == ASC_PCI_DEVICE_ID_REV_B)) {
+ asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB;
+ asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
+ }
+ }
+ } else if (asc_dvc->bus_type == ASC_IS_ISAPNP) {
+ if (AscGetChipVersion(iop_base, asc_dvc->bus_type)
+ == ASC_CHIP_VER_ASYN_BUG) {
+ asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
+ }
+ }
+ if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) !=
+ asc_dvc->cfg->chip_scsi_id) {
+ asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID;
+ }
+ if (asc_dvc->bus_type & ASC_IS_ISA) {
+ AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel);
+ AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed);
+ }
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitAsc1000Driver(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ ushort warn_code;
+ PortAddr iop_base;
+ extern ushort _asc_mcode_size;
+ extern ulong _asc_mcode_chksum;
+ extern uchar _asc_mcode_buf[];
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ if ((asc_dvc->dvc_cntl & ASC_CNTL_RESET_SCSI) &&
+ !(asc_dvc->init_state & ASC_INIT_RESET_SCSI_DONE)) {
+ AscResetChipAndScsiBus(asc_dvc);
+ DvcSleepMilliSecond((ulong) ((ushort) asc_dvc->scsi_reset_wait * 1000));
+ }
+ asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC;
+ if (asc_dvc->err_code != 0)
+ return (UW_ERR);
+ if (!AscFindSignature(asc_dvc->iop_base)) {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ return (warn_code);
+ }
+ AscDisableInterrupt(iop_base);
+ warn_code |= AscInitLram(asc_dvc);
+ if (asc_dvc->err_code != 0)
+ return (UW_ERR);
+ if (AscLoadMicroCode(iop_base, 0, (ushort *) _asc_mcode_buf,
+ _asc_mcode_size) != _asc_mcode_chksum) {
+ asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM;
+ return (warn_code);
+ }
+ warn_code |= AscInitMicroCodeVar(asc_dvc);
+ asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC;
+ AscEnableInterrupt(iop_base);
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitAscDvcVar(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ int i;
+ PortAddr iop_base;
+ ushort warn_code;
+ uchar chip_version;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ asc_dvc->err_code = 0;
+ if ((asc_dvc->bus_type &
+ (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
+ asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE;
+ }
+ AscSetChipControl(iop_base, CC_HALT);
+ AscSetChipStatus(iop_base, 0);
+ asc_dvc->bug_fix_cntl = 0;
+ asc_dvc->pci_fix_asyn_xfer = 0;
+ asc_dvc->pci_fix_asyn_xfer_always = 0;
+ asc_dvc->init_state = 0;
+ asc_dvc->sdtr_done = 0;
+ asc_dvc->cur_total_qng = 0;
+ asc_dvc->is_in_int = 0;
+ asc_dvc->in_critical_cnt = 0;
+ asc_dvc->last_q_shortage = 0;
+ asc_dvc->use_tagged_qng = 0;
+ asc_dvc->no_scam = 0;
+ asc_dvc->unit_not_ready = 0;
+ asc_dvc->queue_full_or_busy = 0;
+ asc_dvc->redo_scam = 0 ;
+ asc_dvc->res2 = 0 ;
+ asc_dvc->host_init_sdtr_index = 0 ;
+ asc_dvc->res7 = 0 ;
+ asc_dvc->res8 = 0 ;
+ asc_dvc->cfg->can_tagged_qng = 0 ;
+ asc_dvc->cfg->cmd_qng_enabled = 0;
+ asc_dvc->dvc_cntl = ASC_DEF_DVC_CNTL;
+ asc_dvc->init_sdtr = 0;
+ asc_dvc->max_total_qng = ASC_DEF_MAX_TOTAL_QNG;
+ asc_dvc->scsi_reset_wait = 3;
+ asc_dvc->start_motor = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->max_dma_count = AscGetMaxDmaCount(asc_dvc->bus_type);
+ asc_dvc->cfg->sdtr_enable = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->cfg->disc_enable = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->cfg->chip_scsi_id = ASC_DEF_CHIP_SCSI_ID;
+ asc_dvc->cfg->lib_serial_no = ASC_LIB_SERIAL_NUMBER;
+ asc_dvc->cfg->lib_version = (ASC_LIB_VERSION_MAJOR << 8) |
+ ASC_LIB_VERSION_MINOR;
+ chip_version = AscGetChipVersion(iop_base, asc_dvc->bus_type);
+ asc_dvc->cfg->chip_version = chip_version;
+ asc_dvc->sdtr_period_tbl[0] = SYN_XFER_NS_0;
+ asc_dvc->sdtr_period_tbl[1] = SYN_XFER_NS_1;
+ asc_dvc->sdtr_period_tbl[2] = SYN_XFER_NS_2;
+ asc_dvc->sdtr_period_tbl[3] = SYN_XFER_NS_3;
+ asc_dvc->sdtr_period_tbl[4] = SYN_XFER_NS_4;
+ asc_dvc->sdtr_period_tbl[5] = SYN_XFER_NS_5;
+ asc_dvc->sdtr_period_tbl[6] = SYN_XFER_NS_6;
+ asc_dvc->sdtr_period_tbl[7] = SYN_XFER_NS_7;
+ asc_dvc->max_sdtr_index = 7;
+ if ((asc_dvc->bus_type & ASC_IS_PCI) &&
+ (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3150)) {
+ asc_dvc->bus_type = ASC_IS_PCI_ULTRA;
+ asc_dvc->sdtr_period_tbl[0] = SYN_ULTRA_XFER_NS_0;
+ asc_dvc->sdtr_period_tbl[1] = SYN_ULTRA_XFER_NS_1;
+ asc_dvc->sdtr_period_tbl[2] = SYN_ULTRA_XFER_NS_2;
+ asc_dvc->sdtr_period_tbl[3] = SYN_ULTRA_XFER_NS_3;
+ asc_dvc->sdtr_period_tbl[4] = SYN_ULTRA_XFER_NS_4;
+ asc_dvc->sdtr_period_tbl[5] = SYN_ULTRA_XFER_NS_5;
+ asc_dvc->sdtr_period_tbl[6] = SYN_ULTRA_XFER_NS_6;
+ asc_dvc->sdtr_period_tbl[7] = SYN_ULTRA_XFER_NS_7;
+ asc_dvc->sdtr_period_tbl[8] = SYN_ULTRA_XFER_NS_8;
+ asc_dvc->sdtr_period_tbl[9] = SYN_ULTRA_XFER_NS_9;
+ asc_dvc->sdtr_period_tbl[10] = SYN_ULTRA_XFER_NS_10;
+ asc_dvc->sdtr_period_tbl[11] = SYN_ULTRA_XFER_NS_11;
+ asc_dvc->sdtr_period_tbl[12] = SYN_ULTRA_XFER_NS_12;
+ asc_dvc->sdtr_period_tbl[13] = SYN_ULTRA_XFER_NS_13;
+ asc_dvc->sdtr_period_tbl[14] = SYN_ULTRA_XFER_NS_14;
+ asc_dvc->sdtr_period_tbl[15] = SYN_ULTRA_XFER_NS_15;
+ asc_dvc->max_sdtr_index = 15;
+ if (chip_version == ASC_CHIP_VER_PCI_ULTRA_3150)
+ {
+ AscSetExtraControl(iop_base,
+ (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE));
+ } else if (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3050) {
+ AscSetExtraControl(iop_base,
+ (SEC_ACTIVE_NEGATE | SEC_ENABLE_FILTER));
+ }
+ }
+ if (asc_dvc->bus_type == ASC_IS_PCI) {
+ AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE));
+ }
+
+ asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED;
+ if (AscGetChipBusType(iop_base) == ASC_IS_ISAPNP) {
+ AscSetChipIFC(iop_base, IFC_INIT_DEFAULT);
+ asc_dvc->bus_type = ASC_IS_ISAPNP;
+ }
+ if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) {
+ asc_dvc->cfg->isa_dma_channel = (uchar) AscGetIsaDmaChannel(iop_base);
+ }
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->cur_dvc_qng[i] = 0;
+ asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG;
+ asc_dvc->scsiq_busy_head[i] = (ASC_SCSI_Q *) 0L;
+ asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *) 0L;
+ asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG;
+ }
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitFromEEP(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ ASCEEP_CONFIG eep_config_buf;
+ ASCEEP_CONFIG *eep_config;
+ PortAddr iop_base;
+ ushort chksum;
+ ushort warn_code;
+ ushort cfg_msw, cfg_lsw;
+ int i;
+ int write_eep = 0;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE);
+ AscStopQueueExe(iop_base);
+ if ((AscStopChip(iop_base) == FALSE) ||
+ (AscGetChipScsiCtrl(iop_base) != 0)) {
+ asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE;
+ AscResetChipAndScsiBus(asc_dvc);
+ DvcSleepMilliSecond((ulong) ((ushort) asc_dvc->scsi_reset_wait * 1000));
+ }
+ if (AscIsChipHalted(iop_base) == FALSE) {
+ asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
+ return (warn_code);
+ }
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
+ return (warn_code);
+ }
+ eep_config = (ASCEEP_CONFIG *) & eep_config_buf;
+ cfg_msw = AscGetChipCfgMsw(iop_base);
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) {
+ cfg_msw &= (~(ASC_CFG_MSW_CLR_MASK));
+ warn_code |= ASC_WARN_CFG_MSW_RECOVER;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ }
+ chksum = AscGetEEPConfig(iop_base, eep_config, asc_dvc->bus_type);
+ if (chksum == 0) {
+ chksum = 0xaa55;
+ }
+ if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) {
+ warn_code |= ASC_WARN_AUTO_CONFIG;
+ if (asc_dvc->cfg->chip_version == 3) {
+ if (eep_config->cfg_lsw != cfg_lsw) {
+ warn_code |= ASC_WARN_EEPROM_RECOVER;
+ eep_config->cfg_lsw = AscGetChipCfgLsw(iop_base);
+ }
+ if (eep_config->cfg_msw != cfg_msw) {
+ warn_code |= ASC_WARN_EEPROM_RECOVER;
+ eep_config->cfg_msw = AscGetChipCfgMsw(iop_base);
+ }
+ }
+ }
+ eep_config->cfg_msw &= ~ASC_CFG_MSW_CLR_MASK;
+ eep_config->cfg_lsw |= ASC_CFG0_HOST_INT_ON;
+ if (chksum != eep_config->chksum) {
+ if (AscGetChipVersion(iop_base, asc_dvc->bus_type) ==
+ ASC_CHIP_VER_PCI_ULTRA_3050 )
+ {
+ eep_config->init_sdtr = 0xFF;
+ eep_config->disc_enable = 0xFF;
+ eep_config->start_motor = 0xFF;
+ eep_config->use_cmd_qng = 0;
+ eep_config->max_total_qng = 0xF0;
+ eep_config->max_tag_qng = 0x20;
+ eep_config->cntl = 0xBFFF;
+ eep_config->chip_scsi_id = 7;
+ eep_config->no_scam = 0;
+ eep_config->adapter_info[0] = 0;
+ eep_config->adapter_info[1] = 0;
+ eep_config->adapter_info[2] = 0;
+ eep_config->adapter_info[3] = 0;
+ eep_config->adapter_info[4] = 0;
+ /* Indicate EEPROM-less board. */
+ eep_config->adapter_info[5] = 0xBB;
+ } else {
+ write_eep = 1 ;
+ warn_code |= ASC_WARN_EEPROM_CHKSUM ;
+ }
+ }
+ asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr ;
+ asc_dvc->cfg->disc_enable = eep_config->disc_enable;
+ asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng;
+ asc_dvc->cfg->isa_dma_speed = eep_config->isa_dma_speed;
+ asc_dvc->start_motor = eep_config->start_motor;
+ asc_dvc->dvc_cntl = eep_config->cntl;
+ asc_dvc->no_scam = eep_config->no_scam;
+ asc_dvc->cfg->adapter_info[0] = eep_config->adapter_info[0];
+ asc_dvc->cfg->adapter_info[1] = eep_config->adapter_info[1];
+ asc_dvc->cfg->adapter_info[2] = eep_config->adapter_info[2];
+ asc_dvc->cfg->adapter_info[3] = eep_config->adapter_info[3];
+ asc_dvc->cfg->adapter_info[4] = eep_config->adapter_info[4];
+ asc_dvc->cfg->adapter_info[5] = eep_config->adapter_info[5];
+ if (!AscTestExternalLram(asc_dvc)) {
+ if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA)) {
+ eep_config->max_total_qng = ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
+ eep_config->max_tag_qng = ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG;
+ } else {
+ eep_config->cfg_msw |= 0x0800;
+ cfg_msw |= 0x0800;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ eep_config->max_total_qng = ASC_MAX_PCI_INRAM_TOTAL_QNG;
+ eep_config->max_tag_qng = ASC_MAX_INRAM_TAG_QNG;
+ }
+ } else {
+ }
+ if (eep_config->max_total_qng < ASC_MIN_TOTAL_QNG) {
+ eep_config->max_total_qng = ASC_MIN_TOTAL_QNG;
+ }
+ if (eep_config->max_total_qng > ASC_MAX_TOTAL_QNG) {
+ eep_config->max_total_qng = ASC_MAX_TOTAL_QNG;
+ }
+ if (eep_config->max_tag_qng > eep_config->max_total_qng) {
+ eep_config->max_tag_qng = eep_config->max_total_qng;
+ }
+ if (eep_config->max_tag_qng < ASC_MIN_TAG_Q_PER_DVC) {
+ eep_config->max_tag_qng = ASC_MIN_TAG_Q_PER_DVC;
+ }
+ asc_dvc->max_total_qng = eep_config->max_total_qng;
+ if ((eep_config->use_cmd_qng & eep_config->disc_enable) !=
+ eep_config->use_cmd_qng) {
+ eep_config->disc_enable = eep_config->use_cmd_qng;
+ warn_code |= ASC_WARN_CMD_QNG_CONFLICT;
+ }
+ if (asc_dvc->bus_type & (ASC_IS_ISA | ASC_IS_VL | ASC_IS_EISA)) {
+ asc_dvc->irq_no = AscGetChipIRQ(iop_base, asc_dvc->bus_type);
+ }
+ eep_config->chip_scsi_id &= ASC_MAX_TID;
+ asc_dvc->cfg->chip_scsi_id = eep_config->chip_scsi_id;
+ if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) &&
+ !(asc_dvc->dvc_cntl & ASC_CNTL_SDTR_ENABLE_ULTRA)) {
+ asc_dvc->host_init_sdtr_index = ASC_SDTR_ULTRA_PCI_10MB_INDEX;
+ }
+
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->dos_int13_table[i] = eep_config->dos_int13_table[i];
+ asc_dvc->cfg->max_tag_qng[i] = eep_config->max_tag_qng;
+ asc_dvc->cfg->sdtr_period_offset[i] =
+ (uchar) (ASC_DEF_SDTR_OFFSET |
+ (asc_dvc->host_init_sdtr_index << 4));
+ }
+ eep_config->cfg_msw = AscGetChipCfgMsw(iop_base);
+ if (write_eep) {
+ (void) AscSetEEPConfig(iop_base, eep_config, asc_dvc->bus_type);
+ }
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitMicroCodeVar(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ int i;
+ ushort warn_code;
+ PortAddr iop_base;
+ ulong phy_addr;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ AscPutMCodeInitSDTRAtID(iop_base, i,
+ asc_dvc->cfg->sdtr_period_offset[i]
+);
+ }
+ AscInitQLinkVar(asc_dvc);
+ AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B,
+ asc_dvc->cfg->disc_enable);
+ AscWriteLramByte(iop_base, ASCV_HOSTSCSI_ID_B,
+ ASC_TID_TO_TARGET_ID(asc_dvc->cfg->chip_scsi_id));
+ if ((phy_addr = AscGetOnePhyAddr(asc_dvc,
+ (uchar *) asc_dvc->cfg->overrun_buf,
+ ASC_OVERRUN_BSIZE)) == 0L) {
+ asc_dvc->err_code |= ASC_IERR_GET_PHY_ADDR;
+ } else {
+ phy_addr = (phy_addr & 0xFFFFFFF8UL) + 8;
+ AscWriteLramDWord(iop_base, ASCV_OVERRUN_PADDR_D, phy_addr);
+ AscWriteLramDWord(iop_base, ASCV_OVERRUN_BSIZE_D,
+ ASC_OVERRUN_BSIZE - 8);
+ }
+ asc_dvc->cfg->mcode_date = AscReadLramWord(iop_base,
+ (ushort) ASCV_MC_DATE_W);
+ asc_dvc->cfg->mcode_version = AscReadLramWord(iop_base,
+ (ushort) ASCV_MC_VER_W);
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
+ return (warn_code);
+ }
+ if (AscStartChip(iop_base) != 1) {
+ asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
+ return (warn_code);
+ }
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC int
+AscTestExternalLram(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ PortAddr iop_base;
+ ushort q_addr;
+ ushort saved_word;
+ int sta;
+
+ iop_base = asc_dvc->iop_base;
+ sta = 0;
+ q_addr = ASC_QNO_TO_QADDR(241);
+ saved_word = AscReadLramWord(iop_base, q_addr);
+ AscSetChipLramAddr(iop_base, q_addr);
+ AscSetChipLramData(iop_base, 0x55AA);
+ DvcSleepMilliSecond(10);
+ AscSetChipLramAddr(iop_base, q_addr);
+ if (AscGetChipLramData(iop_base) == 0x55AA) {
+ sta = 1;
+ AscWriteLramWord(iop_base, q_addr, saved_word);
+ }
+ return (sta);
+}
+
+ASC_INITFUNC(
+STATIC int
+AscWriteEEPCmdReg(
+ PortAddr iop_base,
+ uchar cmd_reg
+)
+)
+{
+ uchar read_back;
+ int retry;
+
+ retry = 0;
+ while (TRUE) {
+ AscSetChipEEPCmd(iop_base, cmd_reg);
+ DvcSleepMilliSecond(1);
+ read_back = AscGetChipEEPCmd(iop_base);
+ if (read_back == cmd_reg) {
+ return (1);
+ }
+ if (retry++ > ASC_EEP_MAX_RETRY) {
+ return (0);
+ }
+ }
+}
+
+ASC_INITFUNC(
+STATIC int
+AscWriteEEPDataReg(
+ PortAddr iop_base,
+ ushort data_reg
+)
+)
+{
+ ushort read_back;
+ int retry;
+
+ retry = 0;
+ while (TRUE) {
+ AscSetChipEEPData(iop_base, data_reg);
+ DvcSleepMilliSecond(1);
+ read_back = AscGetChipEEPData(iop_base);
+ if (read_back == data_reg) {
+ return (1);
+ }
+ if (retry++ > ASC_EEP_MAX_RETRY) {
+ return (0);
+ }
+ }
+}
+
+ASC_INITFUNC(
+STATIC void
+AscWaitEEPRead(
+ void
+)
+)
+{
+ DvcSleepMilliSecond(1);
+ return;
+}
+
+ASC_INITFUNC(
+STATIC void
+AscWaitEEPWrite(
+ void
+)
+)
+{
+ DvcSleepMilliSecond(20);
+ return;
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscReadEEPWord(
+ PortAddr iop_base,
+ uchar addr
+)
+)
+{
+ ushort read_wval;
+ uchar cmd_reg;
+
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE);
+ AscWaitEEPRead();
+ cmd_reg = addr | ASC_EEP_CMD_READ;
+ AscWriteEEPCmdReg(iop_base, cmd_reg);
+ AscWaitEEPRead();
+ read_wval = AscGetChipEEPData(iop_base);
+ AscWaitEEPRead();
+ return (read_wval);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscWriteEEPWord(
+ PortAddr iop_base,
+ uchar addr,
+ ushort word_val
+)
+)
+{
+ ushort read_wval;
+
+ read_wval = AscReadEEPWord(iop_base, addr);
+ if (read_wval != word_val) {
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_ABLE);
+ AscWaitEEPRead();
+ AscWriteEEPDataReg(iop_base, word_val);
+ AscWaitEEPRead();
+ AscWriteEEPCmdReg(iop_base,
+ (uchar) ((uchar) ASC_EEP_CMD_WRITE | addr));
+ AscWaitEEPWrite();
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE);
+ AscWaitEEPRead();
+ return (AscReadEEPWord(iop_base, addr));
+ }
+ return (read_wval);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscGetEEPConfig(
+ PortAddr iop_base,
+ ASCEEP_CONFIG * cfg_buf, ushort bus_type
+)
+)
+{
+ ushort wval;
+ ushort sum;
+ ushort *wbuf;
+ int cfg_beg;
+ int cfg_end;
+ int s_addr;
+ int isa_pnp_wsize;
+
+ wbuf = (ushort *) cfg_buf;
+ sum = 0;
+ isa_pnp_wsize = 0;
+ for (s_addr = 0; s_addr < (2 + isa_pnp_wsize); s_addr++, wbuf++) {
+ wval = AscReadEEPWord(iop_base, (uchar) s_addr);
+ sum += wval;
+ *wbuf = wval;
+ }
+ if (bus_type & ASC_IS_VL) {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG_VL;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR_VL;
+ } else {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR;
+ }
+ for (s_addr = cfg_beg; s_addr <= (cfg_end - 1);
+ s_addr++, wbuf++) {
+ wval = AscReadEEPWord(iop_base, (uchar) s_addr);
+ sum += wval;
+ *wbuf = wval;
+ }
+ *wbuf = AscReadEEPWord(iop_base, (uchar) s_addr);
+ return (sum);
+}
+
+ASC_INITFUNC(
+STATIC int
+AscSetEEPConfigOnce(
+ PortAddr iop_base,
+ ASCEEP_CONFIG * cfg_buf, ushort bus_type
+)
+)
+{
+ int n_error;
+ ushort *wbuf;
+ ushort sum;
+ int s_addr;
+ int cfg_beg;
+ int cfg_end;
+
+ wbuf = (ushort *) cfg_buf;
+ n_error = 0;
+ sum = 0;
+ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
+ sum += *wbuf;
+ if (*wbuf != AscWriteEEPWord(iop_base, (uchar) s_addr, *wbuf)) {
+ n_error++;
+ }
+ }
+ if (bus_type & ASC_IS_VL) {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG_VL;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR_VL;
+ } else {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR;
+ }
+ for (s_addr = cfg_beg; s_addr <= (cfg_end - 1);
+ s_addr++, wbuf++) {
+ sum += *wbuf;
+ if (*wbuf != AscWriteEEPWord(iop_base, (uchar) s_addr, *wbuf)) {
+ n_error++;
+ }
+ }
+ *wbuf = sum;
+ if (sum != AscWriteEEPWord(iop_base, (uchar) s_addr, sum)) {
+ n_error++;
+ }
+ wbuf = (ushort *) cfg_buf;
+ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
+ if (*wbuf != AscReadEEPWord(iop_base, (uchar) s_addr)) {
+ n_error++;
+ }
+ }
+ for (s_addr = cfg_beg; s_addr <= cfg_end;
+ s_addr++, wbuf++) {
+ if (*wbuf != AscReadEEPWord(iop_base, (uchar) s_addr)) {
+ n_error++;
+ }
+ }
+ return (n_error);
+}
+
+ASC_INITFUNC(
+STATIC int
+AscSetEEPConfig(
+ PortAddr iop_base,
+ ASCEEP_CONFIG * cfg_buf, ushort bus_type
+)
+)
+{
+ int retry;
+ int n_error;
+
+ retry = 0;
+ while (TRUE) {
+ if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf,
+ bus_type)) == 0) {
+ break;
+ }
+ if (++retry > ASC_EEP_MAX_RETRY) {
+ break;
+ }
+ }
+ return (n_error);
+}
+
+STATIC void
+AscAsyncFix(
+ ASC_DVC_VAR asc_ptr_type *asc_dvc,
+ uchar tid_no,
+ ASC_SCSI_INQUIRY *inq)
+{
+ uchar dvc_type;
+ ASC_SCSI_BIT_ID_TYPE tid_bits;
+
+ dvc_type = inq->byte0.peri_dvc_type;
+ tid_bits = ASC_TIX_TO_TARGET_ID(tid_no);
+
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ASYN_USE_SYN) {
+ if (!(asc_dvc->init_sdtr & tid_bits)) {
+ if ((dvc_type == SCSI_TYPE_CDROM) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "HP ", 3) == 0)) {
+ asc_dvc->pci_fix_asyn_xfer_always |= tid_bits;
+ }
+ asc_dvc->pci_fix_asyn_xfer |= tid_bits;
+ if ((dvc_type == SCSI_TYPE_PROC) ||
+ (dvc_type == SCSI_TYPE_SCANNER)) {
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+ }
+ if ((dvc_type == SCSI_TYPE_SASD) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "TANDBERG", 8) == 0) &&
+ (AscCompareString((uchar *) inq->product_id,
+ (uchar *) " TDC 36", 7) == 0)) {
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+ }
+ if ((dvc_type == SCSI_TYPE_SASD) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "WANGTEK ", 8) == 0)) {
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+ }
+
+ if ((dvc_type == SCSI_TYPE_CDROM) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "NEC ", 8) == 0) &&
+ (AscCompareString((uchar *) inq->product_id,
+ (uchar *) "CD-ROM DRIVE ", 16) == 0)) {
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+ }
+
+ if ((dvc_type == SCSI_TYPE_CDROM) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "YAMAHA", 6) == 0) &&
+ (AscCompareString((uchar *) inq->product_id,
+ (uchar *) "CDR400", 6) == 0)) {
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+ }
+ if (asc_dvc->pci_fix_asyn_xfer & tid_bits) {
+ AscSetRunChipSynRegAtID(asc_dvc->iop_base, tid_no,
+ ASYN_SDTR_DATA_FIX_PCI_REV_AB);
+ }
+ }
+ }
+ return;
+}
+
+STATIC int
+AscTagQueuingSafe(ASC_SCSI_INQUIRY *inq)
+{
+ if ((inq->add_len >= 32) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "QUANTUM XP34301", 15) == 0) &&
+ (AscCompareString((uchar *) inq->product_rev_level,
+ (uchar *) "1071", 4) == 0))
+ {
+ return 0;
+ }
+ return 1;
+}
+
+STATIC void
+AscInquiryHandling(ASC_DVC_VAR asc_ptr_type *asc_dvc,
+ uchar tid_no, ASC_SCSI_INQUIRY *inq)
+{
+ ASC_SCSI_BIT_ID_TYPE tid_bit = ASC_TIX_TO_TARGET_ID(tid_no);
+ ASC_SCSI_BIT_ID_TYPE orig_init_sdtr, orig_use_tagged_qng;
+
+ orig_init_sdtr = asc_dvc->init_sdtr;
+ orig_use_tagged_qng = asc_dvc->use_tagged_qng;
+
+ asc_dvc->init_sdtr &= ~tid_bit;
+ asc_dvc->cfg->can_tagged_qng &= ~tid_bit;
+ asc_dvc->use_tagged_qng &= ~tid_bit;
+
+ if (inq->byte3.rsp_data_fmt >= 2 || inq->byte2.ansi_apr_ver >= 2) {
+ if ((asc_dvc->cfg->sdtr_enable & tid_bit) && inq->byte7.Sync) {
+ asc_dvc->init_sdtr |= tid_bit;
+ }
+ if ((asc_dvc->cfg->cmd_qng_enabled & tid_bit) && inq->byte7.CmdQue) {
+ if (AscTagQueuingSafe(inq)) {
+ asc_dvc->use_tagged_qng |= tid_bit;
+ asc_dvc->cfg->can_tagged_qng |= tid_bit;
+ }
+ }
+ }
+ if (orig_use_tagged_qng != asc_dvc->use_tagged_qng) {
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_DISC_ENABLE_B,
+ asc_dvc->cfg->disc_enable);
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_USE_TAGGED_QNG_B,
+ asc_dvc->use_tagged_qng);
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_CAN_TAGGED_QNG_B,
+ asc_dvc->cfg->can_tagged_qng);
+
+ asc_dvc->max_dvc_qng[tid_no] =
+ asc_dvc->cfg->max_tag_qng[tid_no];
+ AscWriteLramByte(asc_dvc->iop_base,
+ (ushort) (ASCV_MAX_DVC_QNG_BEG + tid_no),
+ asc_dvc->max_dvc_qng[tid_no]);
+ }
+ if (orig_init_sdtr != asc_dvc->init_sdtr) {
+ AscAsyncFix(asc_dvc, tid_no, inq);
+ }
+ return;
+}
+
+STATIC int
+AscCompareString(
+ ruchar * str1,
+ ruchar * str2,
+ int len
+)
+{
+ int i;
+ int diff;
+
+ for (i = 0; i < len; i++) {
+ diff = (int) (str1[i] - str2[i]);
+ if (diff != 0)
+ return (diff);
+ }
+ return (0);
+}
+
+STATIC uchar
+AscReadLramByte(
+ PortAddr iop_base,
+ ushort addr
+)
+{
+ uchar byte_data;
+ ushort word_data;
+
+ if (isodd_word(addr)) {
+ AscSetChipLramAddr(iop_base, addr - 1);
+ word_data = AscGetChipLramData(iop_base);
+ byte_data = (uchar) ((word_data >> 8) & 0xFF);
+ } else {
+ AscSetChipLramAddr(iop_base, addr);
+ word_data = AscGetChipLramData(iop_base);
+ byte_data = (uchar) (word_data & 0xFF);
+ }
+ return (byte_data);
+}
+
+STATIC ushort
+AscReadLramWord(
+ PortAddr iop_base,
+ ushort addr
+)
+{
+ ushort word_data;
+
+ AscSetChipLramAddr(iop_base, addr);
+ word_data = AscGetChipLramData(iop_base);
+ return (word_data);
+}
+
+STATIC ulong
+AscReadLramDWord(
+ PortAddr iop_base,
+ ushort addr
+)
+{
+ ushort val_low, val_high;
+ ulong dword_data;
+
+ AscSetChipLramAddr(iop_base, addr);
+ val_low = AscGetChipLramData(iop_base);
+ val_high = AscGetChipLramData(iop_base);
+ dword_data = ((ulong) val_high << 16) | (ulong) val_low;
+ return (dword_data);
+}
+
+STATIC void
+AscWriteLramWord(
+ PortAddr iop_base,
+ ushort addr,
+ ushort word_val
+)
+{
+ AscSetChipLramAddr(iop_base, addr);
+ AscSetChipLramData(iop_base, word_val);
+ return;
+}
+
+STATIC void
+AscWriteLramDWord(
+ PortAddr iop_base,
+ ushort addr,
+ ulong dword_val
+)
+{
+ ushort word_val;
+
+ AscSetChipLramAddr(iop_base, addr);
+ word_val = (ushort) dword_val;
+ AscSetChipLramData(iop_base, word_val);
+ word_val = (ushort) (dword_val >> 16);
+ AscSetChipLramData(iop_base, word_val);
+ return;
+}
+
+STATIC void
+AscWriteLramByte(
+ PortAddr iop_base,
+ ushort addr,
+ uchar byte_val
+)
+{
+ ushort word_data;
+
+ if (isodd_word(addr)) {
+ addr--;
+ word_data = AscReadLramWord(iop_base, addr);
+ word_data &= 0x00FF;
+ word_data |= (((ushort) byte_val << 8) & 0xFF00);
+ } else {
+ word_data = AscReadLramWord(iop_base, addr);
+ word_data &= 0xFF00;
+ word_data |= ((ushort) byte_val & 0x00FF);
+ }
+ AscWriteLramWord(iop_base, addr, word_data);
+ return;
+}
+
+STATIC void
+AscMemWordCopyToLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort * s_buffer,
+ int words
+)
+{
+ AscSetChipLramAddr(iop_base, s_addr);
+ DvcOutPortWords(iop_base + IOP_RAM_DATA, s_buffer, words);
+ return;
+}
+
+STATIC void
+AscMemDWordCopyToLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ulong * s_buffer,
+ int dwords
+)
+{
+ AscSetChipLramAddr(iop_base, s_addr);
+ DvcOutPortDWords(iop_base + IOP_RAM_DATA, s_buffer, dwords);
+ return;
+}
+
+STATIC void
+AscMemWordCopyFromLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort * d_buffer,
+ int words
+)
+{
+ AscSetChipLramAddr(iop_base, s_addr);
+ DvcInPortWords(iop_base + IOP_RAM_DATA, d_buffer, words);
+ return;
+}
+
+STATIC ulong
+AscMemSumLramWord(
+ PortAddr iop_base,
+ ushort s_addr,
+ rint words
+)
+{
+ ulong sum;
+ int i;
+
+ sum = 0L;
+ for (i = 0; i < words; i++, s_addr += 2) {
+ sum += AscReadLramWord(iop_base, s_addr);
+ }
+ return (sum);
+}
+
+STATIC void
+AscMemWordSetLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort set_wval,
+ rint words
+)
+{
+ rint i;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < words; i++) {
+ AscSetChipLramData(iop_base, set_wval);
+ }
+ return;
+}
+
+
+/*
+ * --- Adv Library Functions
+ */
+
+/* a_qswap.h */
+STATIC unsigned char _adv_mcode_buf[] ASC_INITDATA = {
+ 0x9C, 0xF0, 0x80, 0x01, 0x00, 0xF0, 0x44, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x01, 0xD6, 0x11, 0x00, 0x00, 0x70, 0x01,
+ 0x30, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x10, 0x2D, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x56, 0x34, 0x12,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0xF7, 0x70, 0x01, 0x0C, 0x1C, 0x06, 0xF7, 0x02, 0x00, 0x00, 0xF2, 0xD6, 0x0A,
+ 0x04, 0xF7, 0x70, 0x01, 0x06, 0xF7, 0x02, 0x00, 0x3E, 0x57, 0x3C, 0x56, 0x0C, 0x1C, 0x00, 0xFC,
+ 0xA6, 0x00, 0x01, 0x58, 0xAA, 0x13, 0x20, 0xF0, 0xA6, 0x03, 0x06, 0xEC, 0xB9, 0x00, 0x0E, 0x47,
+ 0x03, 0xE6, 0x10, 0x00, 0xCE, 0x45, 0x02, 0x13, 0x3E, 0x57, 0x06, 0xEA, 0xB9, 0x00, 0x47, 0x4B,
+ 0x03, 0xF6, 0xE0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x01, 0x48, 0x4E, 0x12, 0x03, 0xF6, 0xC0, 0x00,
+ 0x00, 0xF2, 0x68, 0x0A, 0x41, 0x58, 0x03, 0xF6, 0xD0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x49, 0x44,
+ 0x59, 0xF0, 0x0A, 0x02, 0x03, 0xF6, 0xE0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x44, 0x58, 0x00, 0xF2,
+ 0xE2, 0x0D, 0x02, 0xCC, 0x4A, 0xE4, 0x01, 0x00, 0x55, 0xF0, 0x08, 0x03, 0x45, 0xF4, 0x02, 0x00,
+ 0x83, 0x5A, 0x04, 0xCC, 0x01, 0x4A, 0x12, 0x12, 0x00, 0xF2, 0xE2, 0x0D, 0x00, 0xCD, 0x48, 0xE4,
+ 0x01, 0x00, 0xE9, 0x13, 0x00, 0xF2, 0xC6, 0x0F, 0xFA, 0x10, 0x0E, 0x47, 0x03, 0xE6, 0x10, 0x00,
+ 0xCE, 0x45, 0x02, 0x13, 0x3E, 0x57, 0xCE, 0x47, 0x97, 0x13, 0x04, 0xEC, 0xB4, 0x00, 0x00, 0xF2,
+ 0xE2, 0x0D, 0x00, 0xCD, 0x48, 0xE4, 0x00, 0x00, 0x12, 0x12, 0x3E, 0x57, 0x06, 0xCC, 0x45, 0xF4,
+ 0x02, 0x00, 0x83, 0x5A, 0x00, 0xCC, 0x00, 0xEA, 0xB4, 0x00, 0x92, 0x10, 0x00, 0xF0, 0x8C, 0x01,
+ 0x43, 0xF0, 0x5C, 0x02, 0x44, 0xF0, 0x60, 0x02, 0x45, 0xF0, 0x64, 0x02, 0x46, 0xF0, 0x68, 0x02,
+ 0x47, 0xF0, 0x6E, 0x02, 0x48, 0xF0, 0x9E, 0x02, 0xB9, 0x54, 0x62, 0x10, 0x00, 0x1C, 0x5A, 0x10,
+ 0x02, 0x1C, 0x56, 0x10, 0x1E, 0x1C, 0x52, 0x10, 0x00, 0xF2, 0x1E, 0x11, 0x50, 0x10, 0x06, 0xFC,
+ 0xA8, 0x00, 0x03, 0xF6, 0xBE, 0x00, 0x00, 0xF2, 0x4E, 0x0A, 0x8C, 0x10, 0x01, 0xF6, 0x01, 0x00,
+ 0x01, 0xFA, 0xA8, 0x00, 0x00, 0xF2, 0x2C, 0x0B, 0x06, 0x10, 0xB9, 0x54, 0x01, 0xFA, 0xA8, 0x00,
+ 0x03, 0xF6, 0xBE, 0x00, 0x00, 0xF2, 0x58, 0x0A, 0x01, 0xFC, 0xA8, 0x00, 0x20, 0x10, 0x58, 0x1C,
+ 0x00, 0xF2, 0x1C, 0x0B, 0x5A, 0x1C, 0x01, 0xF6, 0x01, 0x00, 0x38, 0x54, 0x00, 0xFA, 0xA6, 0x00,
+ 0x01, 0xFA, 0xA8, 0x00, 0x20, 0x1C, 0x00, 0xF0, 0x72, 0x01, 0x01, 0xF6, 0x01, 0x00, 0x38, 0x54,
+ 0x00, 0xFA, 0xA6, 0x00, 0x01, 0xFA, 0xA8, 0x00, 0x20, 0x1C, 0x00, 0xF0, 0x80, 0x01, 0x03, 0xF6,
+ 0xE0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x01, 0x48, 0x0A, 0x13, 0x00, 0xF2, 0x38, 0x10, 0x00, 0xF2,
+ 0x54, 0x0F, 0x24, 0x10, 0x03, 0xF6, 0xC0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x02, 0xF6, 0xD0, 0x00,
+ 0x02, 0x57, 0x03, 0x59, 0x01, 0xCC, 0x49, 0x44, 0x5B, 0xF0, 0x04, 0x03, 0x00, 0xF2, 0x9C, 0x0F,
+ 0x00, 0xF0, 0x80, 0x01, 0x00, 0xF2, 0x14, 0x10, 0x0C, 0x1C, 0x02, 0x4B, 0xBF, 0x57, 0x9E, 0x43,
+ 0x77, 0x57, 0x07, 0x4B, 0x20, 0xF0, 0xA6, 0x03, 0x40, 0x1C, 0x1E, 0xF0, 0x30, 0x03, 0x26, 0xF0,
+ 0x2C, 0x03, 0xA0, 0xF0, 0x1A, 0x03, 0x11, 0xF0, 0xA6, 0x03, 0x12, 0x10, 0x9F, 0xF0, 0x3E, 0x03,
+ 0x46, 0x1C, 0x82, 0xE7, 0x05, 0x00, 0x9E, 0xE7, 0x11, 0x00, 0x00, 0xF0, 0x06, 0x0A, 0x0C, 0x1C,
+ 0x48, 0x1C, 0x46, 0x1C, 0x38, 0x54, 0x00, 0xEC, 0xBA, 0x00, 0x08, 0x44, 0x00, 0xEA, 0xBA, 0x00,
+ 0x03, 0xF6, 0xC0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x08, 0x44, 0x00, 0x4C, 0x82, 0xE7, 0x02, 0x00,
+ 0x00, 0xF2, 0x12, 0x11, 0x00, 0xF2, 0x12, 0x11, 0x85, 0xF0, 0x70, 0x03, 0x00, 0xF2, 0x60, 0x0B,
+ 0x06, 0xF0, 0x80, 0x03, 0x09, 0xF0, 0x24, 0x09, 0x1E, 0xF0, 0xFC, 0x09, 0x00, 0xF0, 0x02, 0x0A,
+ 0x00, 0xFC, 0xBE, 0x00, 0x98, 0x57, 0x55, 0xF0, 0xAC, 0x04, 0x01, 0xE6, 0x0C, 0x00, 0x00, 0xF2,
+ 0x4E, 0x0D, 0x00, 0xF2, 0x12, 0x11, 0x00, 0xF2, 0xBC, 0x11, 0x00, 0xF2, 0xC8, 0x11, 0x01, 0xF0,
+ 0x7C, 0x02, 0x00, 0xF0, 0x8A, 0x02, 0x46, 0x1C, 0x0C, 0x1C, 0x67, 0x1B, 0xBF, 0x57, 0x77, 0x57,
+ 0x02, 0x4B, 0x48, 0x1C, 0x32, 0x1C, 0x00, 0xF2, 0x92, 0x0D, 0x30, 0x1C, 0x96, 0xF0, 0xBC, 0x03,
+ 0xB1, 0xF0, 0xC0, 0x03, 0x1E, 0xF0, 0xFC, 0x09, 0x85, 0xF0, 0x02, 0x0A, 0x00, 0xFC, 0xBE, 0x00,
+ 0x98, 0x57, 0x14, 0x12, 0x01, 0xE6, 0x0C, 0x00, 0x00, 0xF2, 0x4E, 0x0D, 0x00, 0xF2, 0x12, 0x11,
+ 0x01, 0xF0, 0x7C, 0x02, 0x00, 0xF0, 0x8A, 0x02, 0x03, 0xF6, 0xE0, 0x00, 0x00, 0xF2, 0x68, 0x0A,
+ 0x01, 0x48, 0x55, 0xF0, 0x98, 0x04, 0x03, 0x82, 0x03, 0xFC, 0xA0, 0x00, 0x9B, 0x57, 0x40, 0x12,
+ 0x69, 0x18, 0x00, 0xF2, 0x12, 0x11, 0x85, 0xF0, 0x42, 0x04, 0x69, 0x08, 0x00, 0xF2, 0x12, 0x11,
+ 0x85, 0xF0, 0x02, 0x0A, 0x68, 0x08, 0x4C, 0x44, 0x28, 0x12, 0x44, 0x48, 0x03, 0xF6, 0xE0, 0x00,
+ 0x00, 0xF2, 0x68, 0x0A, 0x45, 0x58, 0x00, 0xF2, 0xF6, 0x0D, 0x00, 0xCC, 0x01, 0x48, 0x55, 0xF0,
+ 0x98, 0x04, 0x4C, 0x44, 0xEF, 0x13, 0x00, 0xF2, 0xC6, 0x0F, 0x00, 0xF2, 0x14, 0x10, 0x08, 0x10,
+ 0x68, 0x18, 0x45, 0x5A, 0x00, 0xF2, 0xF6, 0x0D, 0x04, 0x80, 0x18, 0xE4, 0x10, 0x00, 0x28, 0x12,
+ 0x01, 0xE6, 0x06, 0x00, 0x04, 0x80, 0x18, 0xE4, 0x01, 0x00, 0x04, 0x12, 0x01, 0xE6, 0x0D, 0x00,
+ 0x00, 0xF2, 0x4E, 0x0D, 0x00, 0xF2, 0x12, 0x11, 0x04, 0xE6, 0x02, 0x00, 0x9E, 0xE7, 0x15, 0x00,
+ 0x01, 0xF0, 0x1C, 0x0A, 0x00, 0xF0, 0x02, 0x0A, 0x69, 0x08, 0x05, 0x80, 0x48, 0xE4, 0x00, 0x00,
+ 0x0C, 0x12, 0x00, 0xE6, 0x11, 0x00, 0x00, 0xEA, 0xB8, 0x00, 0x00, 0xF2, 0xB6, 0x10, 0x82, 0xE7,
+ 0x02, 0x00, 0x1C, 0x90, 0x40, 0x5C, 0x00, 0x16, 0x01, 0xE6, 0x06, 0x00, 0x00, 0xF2, 0x4E, 0x0D,
+ 0x01, 0xF0, 0x80, 0x01, 0x1E, 0xF0, 0x80, 0x01, 0x00, 0xF0, 0xA0, 0x04, 0x42, 0x5B, 0x06, 0xF7,
+ 0x03, 0x00, 0x46, 0x59, 0xBF, 0x57, 0x77, 0x57, 0x01, 0xE6, 0x80, 0x00, 0x07, 0x80, 0x31, 0x44,
+ 0x04, 0x80, 0x18, 0xE4, 0x20, 0x00, 0x56, 0x13, 0x20, 0x80, 0x48, 0xE4, 0x03, 0x00, 0x4E, 0x12,
+ 0x00, 0xFC, 0xA2, 0x00, 0x98, 0x57, 0x55, 0xF0, 0x1C, 0x05, 0x31, 0xE4, 0x40, 0x00, 0x00, 0xFC,
+ 0xA0, 0x00, 0x98, 0x57, 0x36, 0x12, 0x4C, 0x1C, 0x00, 0xF2, 0x12, 0x11, 0x89, 0x48, 0x00, 0xF2,
+ 0x12, 0x11, 0x86, 0xF0, 0x2E, 0x05, 0x82, 0xE7, 0x06, 0x00, 0x1B, 0x80, 0x48, 0xE4, 0x22, 0x00,
+ 0x5B, 0xF0, 0x0C, 0x05, 0x48, 0xE4, 0x20, 0x00, 0x59, 0xF0, 0x10, 0x05, 0x00, 0xE6, 0x20, 0x00,
+ 0x09, 0x48, 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0x2E, 0x05, 0x83, 0x80, 0x04, 0x10, 0x00, 0xF2,
+ 0xA2, 0x0D, 0x00, 0xE6, 0x01, 0x00, 0x00, 0xEA, 0x26, 0x01, 0x01, 0xEA, 0x27, 0x01, 0x04, 0x80,
+ 0x18, 0xE4, 0x10, 0x00, 0x36, 0x12, 0xB9, 0x54, 0x00, 0xF2, 0xF6, 0x0E, 0x01, 0xE6, 0x06, 0x00,
+ 0x04, 0x80, 0x18, 0xE4, 0x01, 0x00, 0x04, 0x12, 0x01, 0xE6, 0x0D, 0x00, 0x00, 0xF2, 0x4E, 0x0D,
+ 0x00, 0xF2, 0x12, 0x11, 0x00, 0xF2, 0xBC, 0x11, 0x00, 0xF2, 0xC8, 0x11, 0x04, 0xE6, 0x02, 0x00,
+ 0x9E, 0xE7, 0x15, 0x00, 0x01, 0xF0, 0x1C, 0x0A, 0x00, 0xF0, 0x02, 0x0A, 0x00, 0xFC, 0x20, 0x01,
+ 0x98, 0x57, 0x34, 0x12, 0x00, 0xFC, 0x24, 0x01, 0x98, 0x57, 0x2C, 0x13, 0xB9, 0x54, 0x00, 0xF2,
+ 0xF6, 0x0E, 0x86, 0xF0, 0xA8, 0x05, 0x03, 0xF6, 0x01, 0x00, 0x00, 0xF2, 0x8C, 0x0E, 0x85, 0xF0,
+ 0x9E, 0x05, 0x82, 0xE7, 0x03, 0x00, 0x00, 0xF2, 0x60, 0x0B, 0x82, 0xE7, 0x02, 0x00, 0x00, 0xFC,
+ 0x24, 0x01, 0xB0, 0x57, 0x00, 0xFA, 0x24, 0x01, 0x00, 0xFC, 0x9E, 0x00, 0x98, 0x57, 0x5A, 0x12,
+ 0x00, 0xFC, 0xB6, 0x00, 0x98, 0x57, 0x52, 0x13, 0x03, 0xE6, 0x0C, 0x00, 0x00, 0xFC, 0x9C, 0x00,
+ 0x98, 0x57, 0x04, 0x13, 0x03, 0xE6, 0x19, 0x00, 0x05, 0xE6, 0x08, 0x00, 0x00, 0xF6, 0x00, 0x01,
+ 0x00, 0x57, 0x00, 0x57, 0x03, 0x58, 0x00, 0xDC, 0x18, 0xF4, 0x00, 0x80, 0x04, 0x13, 0x05, 0xE6,
+ 0x0F, 0x00, 0xB9, 0x54, 0x00, 0xF2, 0xF6, 0x0E, 0x86, 0xF0, 0x0A, 0x06, 0x00, 0xF2, 0xBA, 0x0E,
+ 0x85, 0xF0, 0x00, 0x06, 0x82, 0xE7, 0x03, 0x00, 0x00, 0xF2, 0x60, 0x0B, 0x82, 0xE7, 0x02, 0x00,
+ 0x00, 0xFC, 0xB6, 0x00, 0xB0, 0x57, 0x00, 0xFA, 0xB6, 0x00, 0x01, 0xF6, 0x01, 0x00, 0x00, 0xF2,
+ 0xF6, 0x0E, 0x9C, 0x32, 0x4E, 0x1C, 0x32, 0x1C, 0x00, 0xF2, 0x92, 0x0D, 0x30, 0x1C, 0x82, 0xE7,
+ 0x04, 0x00, 0xB1, 0xF0, 0x22, 0x06, 0x0A, 0xF0, 0x3E, 0x06, 0x05, 0xF0, 0xD6, 0x06, 0x06, 0xF0,
+ 0xDC, 0x06, 0x09, 0xF0, 0x24, 0x09, 0x1E, 0xF0, 0xFC, 0x09, 0x00, 0xF0, 0x02, 0x0A, 0x04, 0x80,
+ 0x18, 0xE4, 0x20, 0x00, 0x30, 0x12, 0x09, 0xE7, 0x03, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x21, 0x80,
+ 0x18, 0xE4, 0xE0, 0x00, 0x09, 0x48, 0x00, 0xF2, 0x12, 0x11, 0x09, 0xE7, 0x00, 0x00, 0x00, 0xF2,
+ 0x12, 0x11, 0x09, 0xE7, 0x00, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x99, 0xA4, 0x00, 0xF2, 0x12, 0x11,
+ 0x09, 0xE7, 0x00, 0x00, 0x9A, 0x10, 0x04, 0x80, 0x18, 0xE4, 0x02, 0x00, 0x34, 0x12, 0x09, 0xE7,
+ 0x1B, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x21, 0x80, 0x18, 0xE4, 0xE0, 0x00, 0x09, 0x48, 0x00, 0xF2,
+ 0x12, 0x11, 0x09, 0xE7, 0x00, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x09, 0xE7, 0x00, 0x00, 0x00, 0xF2,
+ 0x12, 0x11, 0x09, 0xE7, 0x01, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x09, 0xE7, 0x00, 0x00, 0x00, 0xF0,
+ 0x0C, 0x09, 0xBB, 0x55, 0x9A, 0x81, 0x03, 0xF7, 0x20, 0x00, 0x09, 0x6F, 0x93, 0x45, 0x55, 0xF0,
+ 0xE2, 0x06, 0xB1, 0xF0, 0xC2, 0x06, 0x0A, 0xF0, 0xBA, 0x06, 0x09, 0xF0, 0x24, 0x09, 0x1E, 0xF0,
+ 0xFC, 0x09, 0x00, 0xF0, 0x02, 0x0A, 0x00, 0xF2, 0x60, 0x0B, 0x47, 0x10, 0x09, 0xE7, 0x08, 0x00,
+ 0x41, 0x10, 0x05, 0x80, 0x48, 0xE4, 0x00, 0x00, 0x1E, 0x12, 0x00, 0xE6, 0x11, 0x00, 0x00, 0xEA,
+ 0xB8, 0x00, 0x00, 0xF2, 0xB6, 0x10, 0x2C, 0x90, 0xAE, 0x90, 0x08, 0x50, 0x8A, 0x50, 0x38, 0x54,
+ 0x1F, 0x40, 0x00, 0xF2, 0xB4, 0x0D, 0x08, 0x10, 0x08, 0x90, 0x8A, 0x90, 0x30, 0x50, 0xB2, 0x50,
+ 0x9C, 0x32, 0x0C, 0x92, 0x8E, 0x92, 0x38, 0x54, 0x04, 0x80, 0x30, 0xE4, 0x08, 0x00, 0x04, 0x40,
+ 0x0C, 0x1C, 0x00, 0xF6, 0x03, 0x00, 0xB1, 0xF0, 0x26, 0x07, 0x9E, 0xF0, 0x3A, 0x07, 0x01, 0x48,
+ 0x55, 0xF0, 0xFC, 0x09, 0x0C, 0x1C, 0x10, 0x44, 0xED, 0x10, 0x0B, 0xF0, 0x5E, 0x07, 0x0C, 0xF0,
+ 0x62, 0x07, 0x05, 0xF0, 0x52, 0x07, 0x06, 0xF0, 0x58, 0x07, 0x09, 0xF0, 0x24, 0x09, 0x00, 0xF0,
+ 0x02, 0x0A, 0x00, 0xF2, 0x60, 0x0B, 0xCF, 0x10, 0x09, 0xE7, 0x08, 0x00, 0xC9, 0x10, 0x2E, 0x1C,
+ 0x02, 0x10, 0x2C, 0x1C, 0xAA, 0xF0, 0x64, 0x07, 0xAC, 0xF0, 0x72, 0x07, 0x40, 0x10, 0x34, 0x1C,
+ 0xF3, 0x10, 0xAD, 0xF0, 0x7C, 0x07, 0xC8, 0x10, 0x36, 0x1C, 0xE9, 0x10, 0x2B, 0xF0, 0x82, 0x08,
+ 0x6B, 0x18, 0x18, 0xF4, 0x00, 0xFE, 0x20, 0x12, 0x01, 0x58, 0xD2, 0xF0, 0x82, 0x08, 0x76, 0x18,
+ 0x18, 0xF4, 0x03, 0x00, 0xEC, 0x12, 0x00, 0xFC, 0x22, 0x01, 0x18, 0xF4, 0x01, 0x00, 0xE2, 0x12,
+ 0x0B, 0xF0, 0x64, 0x07, 0x0C, 0xF0, 0x64, 0x07, 0x36, 0x1C, 0x34, 0x1C, 0xB7, 0x10, 0x38, 0x54,
+ 0xB9, 0x54, 0x84, 0x80, 0x19, 0xE4, 0x20, 0x00, 0xB2, 0x13, 0x85, 0x80, 0x81, 0x48, 0x66, 0x12,
+ 0x04, 0x80, 0x18, 0xE4, 0x08, 0x00, 0x58, 0x13, 0x1F, 0x80, 0x08, 0x44, 0xC8, 0x44, 0x9F, 0x12,
+ 0x1F, 0x40, 0x34, 0x91, 0xB6, 0x91, 0x44, 0x55, 0xE5, 0x55, 0x02, 0xEC, 0xB8, 0x00, 0x02, 0x49,
+ 0xBB, 0x55, 0x82, 0x81, 0xC0, 0x55, 0x48, 0xF4, 0x0F, 0x00, 0x5A, 0xF0, 0x1A, 0x08, 0x4A, 0xE4,
+ 0x17, 0x00, 0xD5, 0xF0, 0xFA, 0x07, 0x02, 0xF6, 0x0F, 0x00, 0x02, 0xF4, 0x02, 0x00, 0x02, 0xEA,
+ 0xB8, 0x00, 0x04, 0x91, 0x86, 0x91, 0x02, 0x4B, 0x2C, 0x90, 0x08, 0x50, 0x2E, 0x90, 0x0A, 0x50,
+ 0x2C, 0x51, 0xAE, 0x51, 0x00, 0xF2, 0xB6, 0x10, 0x38, 0x54, 0x00, 0xF2, 0xB4, 0x0D, 0x56, 0x10,
+ 0x34, 0x91, 0xB6, 0x91, 0x0C, 0x10, 0x04, 0x80, 0x18, 0xE4, 0x08, 0x00, 0x41, 0x12, 0x0C, 0x91,
+ 0x8E, 0x91, 0x04, 0x80, 0x18, 0xE4, 0xF7, 0x00, 0x04, 0x40, 0x30, 0x90, 0xB2, 0x90, 0x36, 0x10,
+ 0x02, 0x80, 0x48, 0xE4, 0x10, 0x00, 0x31, 0x12, 0x82, 0xE7, 0x10, 0x00, 0x84, 0x80, 0x19, 0xE4,
+ 0x20, 0x00, 0x10, 0x13, 0x0C, 0x90, 0x8E, 0x90, 0x5D, 0xF0, 0x78, 0x07, 0x0C, 0x58, 0x8D, 0x58,
+ 0x00, 0xF0, 0x64, 0x07, 0x38, 0x54, 0xB9, 0x54, 0x19, 0x80, 0xF1, 0x10, 0x3A, 0x55, 0x19, 0x81,
+ 0xBB, 0x55, 0x10, 0x90, 0x92, 0x90, 0x10, 0x58, 0x91, 0x58, 0x14, 0x59, 0x95, 0x59, 0x00, 0xF0,
+ 0x64, 0x07, 0x04, 0x80, 0x18, 0xE4, 0x20, 0x00, 0x06, 0x12, 0x6C, 0x19, 0x19, 0x41, 0x7C, 0x10,
+ 0x6C, 0x19, 0x0C, 0x51, 0xED, 0x19, 0x8E, 0x51, 0x6B, 0x18, 0x18, 0xF4, 0x00, 0xFF, 0x02, 0x13,
+ 0x6A, 0x10, 0x01, 0x58, 0xD2, 0xF0, 0xC0, 0x08, 0x76, 0x18, 0x18, 0xF4, 0x03, 0x00, 0x0A, 0x12,
+ 0x00, 0xFC, 0x22, 0x01, 0x18, 0xF4, 0x01, 0x00, 0x06, 0x13, 0x9E, 0xE7, 0x16, 0x00, 0x4C, 0x10,
+ 0xD1, 0xF0, 0xCA, 0x08, 0x9E, 0xE7, 0x17, 0x00, 0x42, 0x10, 0xD0, 0xF0, 0xD4, 0x08, 0x9E, 0xE7,
+ 0x19, 0x00, 0x38, 0x10, 0xCF, 0xF0, 0xDE, 0x08, 0x9E, 0xE7, 0x20, 0x00, 0x2E, 0x10, 0xCE, 0xF0,
+ 0xE8, 0x08, 0x9E, 0xE7, 0x21, 0x00, 0x24, 0x10, 0xCD, 0xF0, 0xF2, 0x08, 0x9E, 0xE7, 0x22, 0x00,
+ 0x1A, 0x10, 0xCC, 0xF0, 0x04, 0x09, 0x84, 0x80, 0x19, 0xE4, 0x04, 0x00, 0x06, 0x12, 0x9E, 0xE7,
+ 0x12, 0x00, 0x08, 0x10, 0xCB, 0xF0, 0x0C, 0x09, 0x9E, 0xE7, 0x24, 0x00, 0xB1, 0xF0, 0x0C, 0x09,
+ 0x05, 0xF0, 0x1E, 0x09, 0x09, 0xF0, 0x24, 0x09, 0x1E, 0xF0, 0xFC, 0x09, 0xE4, 0x10, 0x00, 0xF2,
+ 0x60, 0x0B, 0xE9, 0x10, 0x9C, 0x32, 0x82, 0xE7, 0x20, 0x00, 0x32, 0x1C, 0xE9, 0x09, 0x00, 0xF2,
+ 0x12, 0x11, 0x85, 0xF0, 0x02, 0x0A, 0x69, 0x08, 0x01, 0xF0, 0x44, 0x09, 0x1E, 0xF0, 0xFC, 0x09,
+ 0x00, 0xF0, 0x38, 0x09, 0x30, 0x44, 0x06, 0x12, 0x9E, 0xE7, 0x42, 0x00, 0xB8, 0x10, 0x04, 0xF6,
+ 0x01, 0x00, 0xB3, 0x45, 0x74, 0x12, 0x04, 0x80, 0x18, 0xE4, 0x20, 0x00, 0x22, 0x13, 0x4B, 0xE4,
+ 0x02, 0x00, 0x36, 0x12, 0x4B, 0xE4, 0x28, 0x00, 0xAC, 0x13, 0x00, 0xF2, 0xBC, 0x11, 0x00, 0xF2,
+ 0xC8, 0x11, 0x03, 0xF6, 0xD0, 0x00, 0xFA, 0x14, 0x82, 0xE7, 0x01, 0x00, 0x00, 0xF0, 0x80, 0x01,
+ 0x9E, 0xE7, 0x44, 0x00, 0x4B, 0xE4, 0x02, 0x00, 0x06, 0x12, 0x03, 0xE6, 0x02, 0x00, 0x76, 0x10,
+ 0x00, 0xF2, 0xA2, 0x0D, 0x03, 0xE6, 0x02, 0x00, 0x6C, 0x10, 0x00, 0xF2, 0xA2, 0x0D, 0x19, 0x82,
+ 0x34, 0x46, 0x0A, 0x13, 0x03, 0xE6, 0x02, 0x00, 0x9E, 0xE7, 0x43, 0x00, 0x68, 0x10, 0x04, 0x80,
+ 0x30, 0xE4, 0x20, 0x00, 0x04, 0x40, 0x00, 0xF2, 0xBC, 0x11, 0x00, 0xF2, 0xC8, 0x11, 0x82, 0xE7,
+ 0x01, 0x00, 0x06, 0xF7, 0x02, 0x00, 0x00, 0xF0, 0x08, 0x03, 0x04, 0x80, 0x18, 0xE4, 0x20, 0x00,
+ 0x06, 0x12, 0x03, 0xE6, 0x02, 0x00, 0x3E, 0x10, 0x04, 0x80, 0x18, 0xE4, 0x02, 0x00, 0x3A, 0x12,
+ 0x04, 0x80, 0x18, 0xE4, 0xFD, 0x00, 0x04, 0x40, 0x1C, 0x1C, 0x9D, 0xF0, 0xEA, 0x09, 0x1C, 0x1C,
+ 0x9D, 0xF0, 0xF0, 0x09, 0xC1, 0x10, 0x9E, 0xE7, 0x13, 0x00, 0x0A, 0x10, 0x9E, 0xE7, 0x41, 0x00,
+ 0x04, 0x10, 0x9E, 0xE7, 0x24, 0x00, 0x00, 0xFC, 0xBE, 0x00, 0x98, 0x57, 0xD5, 0xF0, 0x8A, 0x02,
+ 0x04, 0xE6, 0x04, 0x00, 0x06, 0x10, 0x04, 0xE6, 0x04, 0x00, 0x9D, 0x41, 0x1C, 0x42, 0x9F, 0xE7,
+ 0x00, 0x00, 0x06, 0xF7, 0x02, 0x00, 0x03, 0xF6, 0xE0, 0x00, 0x3C, 0x14, 0x44, 0x58, 0x45, 0x58,
+ 0x00, 0xF2, 0xF6, 0x0D, 0x00, 0xF2, 0x7E, 0x10, 0x00, 0xF2, 0xC6, 0x0F, 0x3C, 0x14, 0x1E, 0x1C,
+ 0x00, 0xF0, 0x80, 0x01, 0x12, 0x1C, 0x22, 0x1C, 0xD2, 0x14, 0x00, 0xF0, 0x72, 0x01, 0x83, 0x59,
+ 0x03, 0xDC, 0x73, 0x57, 0x80, 0x5D, 0x00, 0x16, 0x83, 0x59, 0x03, 0xDC, 0x38, 0x54, 0x70, 0x57,
+ 0x33, 0x54, 0x3B, 0x54, 0x80, 0x5D, 0x00, 0x16, 0x03, 0x57, 0x83, 0x59, 0x38, 0x54, 0x00, 0xCC,
+ 0x00, 0x16, 0x03, 0x57, 0x83, 0x59, 0x00, 0x4C, 0x00, 0x16, 0x02, 0x80, 0x48, 0xE4, 0x01, 0x00,
+ 0x0E, 0x12, 0x48, 0xE4, 0x05, 0x00, 0x08, 0x12, 0x00, 0xF2, 0xBC, 0x11, 0x00, 0xF2, 0xC8, 0x11,
+ 0xC1, 0x5A, 0x3A, 0x55, 0x02, 0xEC, 0xB5, 0x00, 0x45, 0x59, 0x00, 0xF2, 0xF6, 0x0D, 0x83, 0x58,
+ 0x30, 0xE7, 0x00, 0x00, 0x10, 0x4D, 0x30, 0xE7, 0x40, 0x00, 0x10, 0x4F, 0x38, 0x90, 0xBA, 0x90,
+ 0x10, 0x5C, 0x80, 0x5C, 0x83, 0x5A, 0x10, 0x4E, 0x04, 0xEA, 0xB5, 0x00, 0x43, 0x5B, 0x03, 0xF4,
+ 0xE0, 0x00, 0x83, 0x59, 0x04, 0xCC, 0x01, 0x4A, 0x0A, 0x12, 0x45, 0x5A, 0x00, 0xF2, 0xF6, 0x0D,
+ 0x00, 0xF2, 0x38, 0x10, 0x00, 0x16, 0x08, 0x1C, 0x00, 0xFC, 0xAC, 0x00, 0x06, 0x58, 0x67, 0x18,
+ 0x18, 0xF4, 0x8F, 0xE1, 0x01, 0xFC, 0xAE, 0x00, 0x19, 0xF4, 0x70, 0x1E, 0xB0, 0x54, 0x07, 0x58,
+ 0x00, 0xFC, 0xB0, 0x00, 0x08, 0x58, 0x00, 0xFC, 0xB2, 0x00, 0x09, 0x58, 0x0A, 0x1C, 0x00, 0xE6,
+ 0x0F, 0x00, 0x00, 0xEA, 0xB9, 0x00, 0x38, 0x54, 0x00, 0xFA, 0x24, 0x01, 0x00, 0xFA, 0xB6, 0x00,
+ 0x18, 0x1C, 0x14, 0x1C, 0x10, 0x1C, 0x32, 0x1C, 0x12, 0x1C, 0x00, 0x16, 0x3E, 0x57, 0x0C, 0x14,
+ 0x0E, 0x47, 0x07, 0xE6, 0x10, 0x00, 0xCE, 0x47, 0xF5, 0x13, 0x00, 0x16, 0x00, 0xF2, 0xA2, 0x0D,
+ 0x02, 0x4B, 0x03, 0xF6, 0xE0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x01, 0x48, 0x20, 0x12, 0x44, 0x58,
+ 0x45, 0x58, 0x9E, 0xE7, 0x15, 0x00, 0x9C, 0xE7, 0x04, 0x00, 0x00, 0xF2, 0xF6, 0x0D, 0x00, 0xF2,
+ 0x7E, 0x10, 0x00, 0xF2, 0xC6, 0x0F, 0x00, 0xF2, 0x7A, 0x0A, 0x1E, 0x1C, 0xD5, 0x10, 0x00, 0x16,
+ 0x69, 0x08, 0x48, 0xE4, 0x04, 0x00, 0x64, 0x12, 0x48, 0xE4, 0x02, 0x00, 0x20, 0x12, 0x48, 0xE4,
+ 0x03, 0x00, 0x1A, 0x12, 0x48, 0xE4, 0x08, 0x00, 0x14, 0x12, 0x48, 0xE4, 0x01, 0x00, 0xF0, 0x12,
+ 0x48, 0xE4, 0x07, 0x00, 0x12, 0x12, 0x01, 0xE6, 0x07, 0x00, 0x00, 0xF2, 0x4E, 0x0D, 0x00, 0xF2,
+ 0x12, 0x11, 0x05, 0xF0, 0x60, 0x0B, 0x00, 0x16, 0x00, 0xE6, 0x01, 0x00, 0x00, 0xEA, 0x99, 0x00,
+ 0x02, 0x80, 0x48, 0xE4, 0x03, 0x00, 0xE7, 0x12, 0x48, 0xE4, 0x06, 0x00, 0xE1, 0x12, 0x01, 0xE6,
+ 0x06, 0x00, 0x00, 0xF2, 0x4E, 0x0D, 0x00, 0xF2, 0x12, 0x11, 0x04, 0xE6, 0x02, 0x00, 0x9E, 0xE7,
+ 0x15, 0x00, 0x01, 0xF0, 0x1C, 0x0A, 0x00, 0xF0, 0x02, 0x0A, 0x00, 0x16, 0x02, 0x80, 0x48, 0xE4,
+ 0x10, 0x00, 0x1C, 0x12, 0x82, 0xE7, 0x08, 0x00, 0x3C, 0x56, 0x03, 0x82, 0x00, 0xF2, 0xE2, 0x0D,
+ 0x30, 0xE7, 0x08, 0x00, 0x04, 0xF7, 0x70, 0x01, 0x06, 0xF7, 0x02, 0x00, 0x00, 0xF0, 0x80, 0x01,
+ 0x6C, 0x19, 0xED, 0x19, 0x5D, 0xF0, 0xD4, 0x0B, 0x44, 0x55, 0xE5, 0x55, 0x59, 0xF0, 0x52, 0x0C,
+ 0x04, 0x55, 0xA5, 0x55, 0x1F, 0x80, 0x01, 0xEC, 0xB8, 0x00, 0x82, 0x48, 0x82, 0x80, 0x49, 0x44,
+ 0x2E, 0x13, 0x01, 0xEC, 0xB8, 0x00, 0x41, 0xE4, 0x02, 0x00, 0x01, 0xEA, 0xB8, 0x00, 0x49, 0xE4,
+ 0x11, 0x00, 0x59, 0xF0, 0x2E, 0x0C, 0x01, 0xE6, 0x17, 0x00, 0x01, 0xEA, 0xB8, 0x00, 0x02, 0x4B,
+ 0x88, 0x90, 0xAC, 0x50, 0x8A, 0x90, 0xAE, 0x50, 0x01, 0xEC, 0xB8, 0x00, 0x82, 0x48, 0x82, 0x80,
+ 0x10, 0x44, 0x02, 0x4B, 0x1F, 0x40, 0xC0, 0x44, 0x00, 0xF2, 0xB4, 0x0D, 0x04, 0x55, 0xA5, 0x55,
+ 0x9F, 0x10, 0x0C, 0x51, 0x8E, 0x51, 0x30, 0x90, 0xB2, 0x90, 0x00, 0x56, 0xA1, 0x56, 0x30, 0x50,
+ 0xB2, 0x50, 0x34, 0x90, 0xB6, 0x90, 0x40, 0x56, 0xE1, 0x56, 0x34, 0x50, 0xB6, 0x50, 0x65, 0x10,
+ 0xB1, 0xF0, 0x70, 0x0C, 0x85, 0xF0, 0xCA, 0x0B, 0xE9, 0x09, 0x4B, 0xE4, 0x03, 0x00, 0x78, 0x12,
+ 0x4B, 0xE4, 0x02, 0x00, 0x01, 0x13, 0xB1, 0xF0, 0x86, 0x0C, 0x85, 0xF0, 0xCA, 0x0B, 0x69, 0x08,
+ 0x48, 0xE4, 0x03, 0x00, 0xD5, 0xF0, 0x86, 0x0B, 0x00, 0xF2, 0x12, 0x11, 0x85, 0xF0, 0xCA, 0x0B,
+ 0xE8, 0x09, 0x3C, 0x56, 0x00, 0xFC, 0x20, 0x01, 0x98, 0x57, 0x02, 0x13, 0xBB, 0x45, 0x4B, 0xE4,
+ 0x00, 0x00, 0x08, 0x12, 0x03, 0xE6, 0x01, 0x00, 0x04, 0xF6, 0x00, 0x80, 0xA8, 0x14, 0xD2, 0x14,
+ 0x30, 0x1C, 0x02, 0x80, 0x48, 0xE4, 0x03, 0x00, 0x10, 0x13, 0x00, 0xFC, 0xB6, 0x00, 0x98, 0x57,
+ 0x02, 0x13, 0x4C, 0x1C, 0x3E, 0x1C, 0x00, 0xF0, 0x8E, 0x0B, 0x00, 0xFC, 0x24, 0x01, 0xB0, 0x57,
+ 0x00, 0xFA, 0x24, 0x01, 0x4C, 0x1C, 0x3E, 0x1C, 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0x8E, 0x0B,
+ 0x00, 0xF2, 0x8C, 0x0E, 0x00, 0xF0, 0x8E, 0x0B, 0xB1, 0xF0, 0xF8, 0x0C, 0x85, 0xF0, 0x86, 0x0B,
+ 0x69, 0x08, 0x48, 0xE4, 0x01, 0x00, 0xD5, 0xF0, 0x86, 0x0B, 0xFC, 0x14, 0x42, 0x58, 0x6C, 0x14,
+ 0x80, 0x14, 0x30, 0x1C, 0x4A, 0xF4, 0x02, 0x00, 0x55, 0xF0, 0x86, 0x0B, 0x4A, 0xF4, 0x01, 0x00,
+ 0x0E, 0x12, 0x02, 0x80, 0x48, 0xE4, 0x03, 0x00, 0x06, 0x13, 0x3E, 0x1C, 0x00, 0xF0, 0x8E, 0x0B,
+ 0x00, 0xFC, 0xB6, 0x00, 0xB0, 0x57, 0x00, 0xFA, 0xB6, 0x00, 0x4C, 0x1C, 0x3E, 0x1C, 0x00, 0xF2,
+ 0x12, 0x11, 0x86, 0xF0, 0x8E, 0x0B, 0x00, 0xF2, 0xBA, 0x0E, 0x00, 0xF0, 0x8E, 0x0B, 0x4C, 0x1C,
+ 0xB1, 0xF0, 0x50, 0x0D, 0x85, 0xF0, 0x5C, 0x0D, 0x69, 0x08, 0xF3, 0x10, 0x86, 0xF0, 0x64, 0x0D,
+ 0x4E, 0x1C, 0x89, 0x48, 0x00, 0x16, 0x00, 0xF6, 0x00, 0x01, 0x00, 0x57, 0x00, 0x57, 0x03, 0x58,
+ 0x00, 0xDC, 0x18, 0xF4, 0xFF, 0x7F, 0x30, 0x56, 0x00, 0x5C, 0x00, 0x16, 0x00, 0xF6, 0x00, 0x01,
+ 0x00, 0x57, 0x00, 0x57, 0x03, 0x58, 0x00, 0xDC, 0x18, 0xF4, 0x00, 0x80, 0x30, 0x56, 0x00, 0x5C,
+ 0x00, 0x16, 0x00, 0xF6, 0x00, 0x01, 0x00, 0x57, 0x00, 0x57, 0x03, 0x58, 0x00, 0xDC, 0x0B, 0x58,
+ 0x00, 0x16, 0x03, 0xF6, 0x24, 0x01, 0x00, 0xF2, 0x58, 0x0A, 0x03, 0xF6, 0xB6, 0x00, 0x00, 0xF2,
+ 0x58, 0x0A, 0x00, 0x16, 0x02, 0xEC, 0xB8, 0x00, 0x02, 0x49, 0x18, 0xF4, 0xFF, 0x00, 0x00, 0x54,
+ 0x00, 0x54, 0x00, 0x54, 0x00, 0xF4, 0x08, 0x00, 0xE1, 0x18, 0x80, 0x54, 0x03, 0x58, 0x00, 0xDD,
+ 0x01, 0xDD, 0x02, 0xDD, 0x03, 0xDC, 0x02, 0x4B, 0x30, 0x50, 0xB2, 0x50, 0x34, 0x51, 0xB6, 0x51,
+ 0x00, 0x16, 0x45, 0x5A, 0x1D, 0xF4, 0xFF, 0x00, 0x85, 0x56, 0x85, 0x56, 0x85, 0x56, 0x05, 0xF4,
+ 0x02, 0x12, 0x83, 0x5A, 0x00, 0x16, 0x1D, 0xF4, 0xFF, 0x00, 0x85, 0x56, 0x85, 0x56, 0x85, 0x56,
+ 0x05, 0xF4, 0x00, 0x12, 0x83, 0x5A, 0x00, 0x16, 0x38, 0x54, 0xBB, 0x55, 0x3C, 0x56, 0xBD, 0x56,
+ 0x00, 0xF2, 0x12, 0x11, 0x85, 0xF0, 0x82, 0x0E, 0xE9, 0x09, 0xC1, 0x59, 0x00, 0xF2, 0x12, 0x11,
+ 0x85, 0xF0, 0x82, 0x0E, 0xE8, 0x0A, 0x83, 0x55, 0x83, 0x55, 0x4B, 0xF4, 0x90, 0x01, 0x5C, 0xF0,
+ 0x36, 0x0E, 0xBD, 0x56, 0x40, 0x10, 0x4B, 0xF4, 0x30, 0x00, 0x59, 0xF0, 0x48, 0x0E, 0x01, 0xF6,
+ 0x0C, 0x00, 0x00, 0xF6, 0x01, 0x00, 0x2E, 0x10, 0x02, 0xFC, 0x9C, 0x00, 0x9A, 0x57, 0x14, 0x13,
+ 0x4B, 0xF4, 0x64, 0x00, 0x59, 0xF0, 0x64, 0x0E, 0x03, 0xF6, 0x64, 0x00, 0x01, 0xF6, 0x19, 0x00,
+ 0x00, 0xF6, 0x01, 0x00, 0x43, 0xF4, 0x33, 0x00, 0x56, 0xF0, 0x76, 0x0E, 0x04, 0xF4, 0x00, 0x01,
+ 0x43, 0xF4, 0x19, 0x00, 0xF3, 0x10, 0xB4, 0x56, 0xC3, 0x58, 0x02, 0xFC, 0x9E, 0x00, 0x9A, 0x57,
+ 0x08, 0x13, 0x3C, 0x56, 0x00, 0xF6, 0x02, 0x00, 0x00, 0x16, 0x00, 0x16, 0x09, 0xE7, 0x01, 0x00,
+ 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0xB8, 0x0E, 0x09, 0xE7, 0x02, 0x00, 0x00, 0xF2, 0x12, 0x11,
+ 0x86, 0xF0, 0xB8, 0x0E, 0x09, 0xE7, 0x03, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0xB8, 0x0E,
+ 0x4E, 0x1C, 0x89, 0x49, 0x00, 0xF2, 0x12, 0x11, 0x00, 0x16, 0x09, 0xE7, 0x01, 0x00, 0x00, 0xF2,
+ 0x12, 0x11, 0x86, 0xF0, 0xF2, 0x0E, 0x09, 0xE7, 0x03, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0,
+ 0xF2, 0x0E, 0x09, 0xE7, 0x01, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0xF2, 0x0E, 0x89, 0x49,
+ 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0xF2, 0x0E, 0x4E, 0x1C, 0x89, 0x4A, 0x00, 0xF2, 0x12, 0x11,
+ 0x00, 0x16, 0x3C, 0x56, 0x00, 0x16, 0x00, 0xEC, 0x26, 0x01, 0x48, 0xE4, 0x01, 0x00, 0x1E, 0x13,
+ 0x38, 0x44, 0x00, 0xEA, 0x26, 0x01, 0x49, 0xF4, 0x00, 0x00, 0x04, 0x12, 0x4E, 0x1C, 0x02, 0x10,
+ 0x4C, 0x1C, 0x01, 0xEC, 0x27, 0x01, 0x89, 0x48, 0x00, 0xF2, 0x12, 0x11, 0x02, 0x14, 0x00, 0x16,
+ 0x85, 0xF0, 0x52, 0x0F, 0x38, 0x54, 0x00, 0xEA, 0x99, 0x00, 0x00, 0xF2, 0x60, 0x0B, 0x02, 0x80,
+ 0x48, 0xE4, 0x06, 0x00, 0x1C, 0x13, 0x00, 0xEC, 0x99, 0x00, 0x48, 0xE4, 0x01, 0x00, 0x0A, 0x12,
+ 0x04, 0x80, 0x30, 0xE4, 0x01, 0x00, 0x04, 0x40, 0x08, 0x10, 0x04, 0x80, 0x18, 0xE4, 0xFE, 0x00,
+ 0x04, 0x40, 0x00, 0x16, 0x02, 0xF6, 0xE0, 0x00, 0x02, 0x57, 0x03, 0x59, 0x01, 0xCC, 0x81, 0x48,
+ 0x22, 0x12, 0x00, 0x4E, 0x83, 0x5A, 0x90, 0x4C, 0x20, 0xE7, 0x00, 0x00, 0xC3, 0x58, 0x1B, 0xF4,
+ 0xFF, 0x00, 0x83, 0x55, 0x83, 0x55, 0x83, 0x55, 0x03, 0xF4, 0x00, 0x12, 0x8B, 0x55, 0x83, 0x59,
+ 0x00, 0x4E, 0x00, 0x16, 0x00, 0x4E, 0x02, 0xF6, 0xF0, 0x00, 0x02, 0x57, 0x03, 0x59, 0x00, 0x4E,
+ 0x83, 0x5A, 0x30, 0xE7, 0x00, 0x00, 0x20, 0xE7, 0x00, 0x00, 0x00, 0x16, 0x02, 0xF6, 0xF0, 0x00,
+ 0x02, 0x57, 0x03, 0x59, 0x01, 0xCC, 0x00, 0x4E, 0x83, 0x5A, 0x30, 0xE7, 0x00, 0x00, 0x80, 0x4C,
+ 0xC3, 0x58, 0x1B, 0xF4, 0xFF, 0x00, 0x83, 0x55, 0x83, 0x55, 0x83, 0x55, 0x03, 0xF4, 0x00, 0x12,
+ 0x83, 0x59, 0x00, 0x4E, 0x00, 0x16, 0x03, 0xF6, 0xE0, 0x00, 0x03, 0x57, 0x83, 0x59, 0x3A, 0x55,
+ 0x02, 0xCC, 0x45, 0x5A, 0x00, 0xF2, 0xF6, 0x0D, 0xC0, 0x5A, 0x40, 0x5C, 0x38, 0x54, 0x00, 0xCD,
+ 0x01, 0xCC, 0x4A, 0x46, 0x0A, 0x13, 0x83, 0x59, 0x00, 0x4C, 0x01, 0x48, 0x16, 0x13, 0x0C, 0x10,
+ 0xC5, 0x58, 0x00, 0xF2, 0xF6, 0x0D, 0x00, 0x4C, 0x01, 0x48, 0x08, 0x13, 0x05, 0xF6, 0xF0, 0x00,
+ 0x05, 0x57, 0x08, 0x10, 0x45, 0x58, 0x00, 0xF2, 0xF6, 0x0D, 0x8D, 0x56, 0x83, 0x5A, 0x80, 0x4C,
+ 0x05, 0x17, 0x00, 0x16, 0x02, 0x4B, 0x06, 0xF7, 0x04, 0x00, 0x62, 0x0B, 0x03, 0x82, 0x00, 0xF2,
+ 0xE2, 0x0D, 0x02, 0x80, 0x00, 0x4C, 0x45, 0xF4, 0x02, 0x00, 0x52, 0x14, 0x06, 0xF7, 0x02, 0x00,
+ 0x06, 0x14, 0x00, 0xF2, 0x54, 0x0F, 0x00, 0x16, 0x02, 0x4B, 0x01, 0xF6, 0xFF, 0x00, 0x38, 0x1C,
+ 0x05, 0xF4, 0x04, 0x00, 0x83, 0x5A, 0x18, 0xDF, 0x19, 0xDF, 0x1D, 0xF7, 0x3C, 0x00, 0xB8, 0xF0,
+ 0x4E, 0x10, 0x9C, 0x14, 0x01, 0x48, 0x1C, 0x13, 0x0E, 0xF7, 0x3C, 0x00, 0x03, 0xF7, 0x04, 0x00,
+ 0xAF, 0x19, 0x03, 0x42, 0x45, 0xF4, 0x02, 0x00, 0x83, 0x5A, 0x02, 0xCC, 0x02, 0x41, 0x45, 0xF4,
+ 0x02, 0x00, 0x00, 0x16, 0x91, 0x44, 0xD5, 0xF0, 0x3E, 0x10, 0x00, 0xF0, 0x9E, 0x02, 0x01, 0xF6,
+ 0xFF, 0x00, 0x38, 0x1C, 0x05, 0xF4, 0x04, 0x00, 0x83, 0x5A, 0x18, 0xDF, 0x19, 0xDF, 0x0E, 0xF7,
+ 0x3C, 0x00, 0x03, 0xF7, 0x04, 0x00, 0x0F, 0x79, 0x1C, 0xF7, 0x3C, 0x00, 0xB8, 0xF0, 0x9C, 0x10,
+ 0x4E, 0x14, 0x01, 0x48, 0x06, 0x13, 0x45, 0xF4, 0x04, 0x00, 0x00, 0x16, 0x91, 0x44, 0xD5, 0xF0,
+ 0x82, 0x10, 0x00, 0xF0, 0x9E, 0x02, 0x02, 0xF6, 0xFF, 0x00, 0x38, 0x1C, 0x2C, 0xBC, 0xAE, 0xBC,
+ 0xE2, 0x08, 0x00, 0xEC, 0xB8, 0x00, 0x02, 0x48, 0x1D, 0xF7, 0x80, 0x00, 0xB8, 0xF0, 0xCC, 0x10,
+ 0x1E, 0x14, 0x01, 0x48, 0x0E, 0x13, 0x0E, 0xF7, 0x80, 0x00, 0x38, 0x54, 0x03, 0x58, 0xAF, 0x19,
+ 0x82, 0x48, 0x00, 0x16, 0x82, 0x48, 0x12, 0x45, 0xD5, 0xF0, 0xBA, 0x10, 0x00, 0xF0, 0x9E, 0x02,
+ 0x39, 0xF0, 0xF8, 0x10, 0x38, 0x44, 0x00, 0x16, 0x7E, 0x18, 0x18, 0xF4, 0x03, 0x00, 0x04, 0x13,
+ 0x61, 0x18, 0x00, 0x16, 0x38, 0x1C, 0x00, 0xFC, 0x22, 0x01, 0x18, 0xF4, 0x01, 0x00, 0xF1, 0x12,
+ 0xE3, 0x10, 0x30, 0x44, 0x30, 0x44, 0x30, 0x44, 0xB1, 0xF0, 0x18, 0x11, 0x00, 0x16, 0x3E, 0x57,
+ 0x03, 0xF6, 0xE0, 0x00, 0x03, 0x57, 0x83, 0x59, 0x04, 0xCC, 0x01, 0x4A, 0x6A, 0x12, 0x45, 0x5A,
+ 0x00, 0xF2, 0xF6, 0x0D, 0x02, 0x4B, 0x70, 0x14, 0x34, 0x13, 0x02, 0x80, 0x48, 0xE4, 0x08, 0x00,
+ 0x18, 0x12, 0x9C, 0xE7, 0x02, 0x00, 0x9E, 0xE7, 0x15, 0x00, 0x00, 0xF2, 0xC6, 0x0F, 0x00, 0xF2,
+ 0x7A, 0x0A, 0x1E, 0x1C, 0x01, 0xF6, 0x01, 0x00, 0x00, 0x16, 0x30, 0xE4, 0x10, 0x00, 0x04, 0x40,
+ 0x00, 0xF2, 0xE2, 0x0D, 0x20, 0xE7, 0x01, 0x00, 0x01, 0xF6, 0x01, 0x00, 0x00, 0x16, 0x04, 0xDC,
+ 0x01, 0x4A, 0x24, 0x12, 0x45, 0x5A, 0x00, 0xF2, 0xF6, 0x0D, 0x43, 0x5B, 0x06, 0xEC, 0x98, 0x00,
+ 0x00, 0xF2, 0x38, 0x10, 0xC6, 0x59, 0x20, 0x14, 0x0A, 0x13, 0x00, 0xF2, 0xC6, 0x0F, 0x00, 0xF2,
+ 0x14, 0x10, 0xA7, 0x10, 0x83, 0x5A, 0xD7, 0x10, 0x0E, 0x47, 0x07, 0xE6, 0x10, 0x00, 0xCE, 0x47,
+ 0x5A, 0xF0, 0x20, 0x11, 0xB9, 0x54, 0x00, 0x16, 0x14, 0x90, 0x96, 0x90, 0x02, 0xFC, 0xA8, 0x00,
+ 0x03, 0xFC, 0xAA, 0x00, 0x48, 0x55, 0x02, 0x13, 0xC9, 0x55, 0x00, 0x16, 0x00, 0xEC, 0xBA, 0x00,
+ 0x10, 0x44, 0x00, 0xEA, 0xBA, 0x00, 0x00, 0x16, 0x03, 0xF6, 0xC0, 0x00, 0x00, 0xF2, 0x68, 0x0A,
+ 0x10, 0x44, 0x00, 0x4C, 0x00, 0x16
+};
+
+unsigned short _adv_mcode_size ASC_INITDATA =
+ sizeof(_adv_mcode_buf); /* 0x11D6 */
+unsigned long _adv_mcode_chksum ASC_INITDATA = 0x03494981UL;
+
+/* a_init.c */
+/*
+ * EEPROM Configuration.
+ *
+ * All drivers should use this structure to set the default EEPROM
+ * configuration. The BIOS now uses this structure when it is built.
+ * Additional structure information can be found in a_condor.h where
+ * the structure is defined.
+ */
+STATIC ADVEEP_CONFIG
+Default_EEPROM_Config ASC_INITDATA = {
+ ADV_EEPROM_BIOS_ENABLE, /* cfg_msw */
+ 0x0000, /* cfg_lsw */
+ 0xFFFF, /* disc_enable */
+ 0xFFFF, /* wdtr_able */
+ 0xFFFF, /* sdtr_able */
+ 0xFFFF, /* start_motor */
+ 0xFFFF, /* tagqng_able */
+ 0xFFFF, /* bios_scan */
+ 0, /* scam_tolerant */
+ 7, /* adapter_scsi_id */
+ 0, /* bios_boot_delay */
+ 3, /* scsi_reset_delay */
+ 0, /* bios_id_lun */
+ 0, /* termination */
+ 0, /* reserved1 */
+ 0xFFEF, /* bios_ctrl */
+ 0xFFFF, /* ultra_able */
+ 0, /* reserved2 */
+ ASC_DEF_MAX_HOST_QNG, /* max_host_qng */
+ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */
+ 0, /* dvc_cntl */
+ 0, /* bug_fix */
+ 0, /* serial_number_word1 */
+ 0, /* serial_number_word2 */
+ 0, /* serial_number_word3 */
+ 0, /* check_sum */
+ { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }, /* oem_name[16] */
+ 0, /* dvc_err_code */
+ 0, /* adv_err_code */
+ 0, /* adv_err_addr */
+ 0, /* saved_dvc_err_code */
+ 0, /* saved_adv_err_code */
+ 0, /* saved_adv_err_addr */
+ 0 /* num_of_err */
+};
+
+/*
+ * Initialize the ADV_DVC_VAR structure.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ */
+ASC_INITFUNC(
+int
+AdvInitGetConfig(ADV_DVC_VAR *asc_dvc)
+)
+{
+ ushort warn_code;
+ AdvPortAddr iop_base;
+ uchar pci_cmd_reg;
+ int status;
+
+ warn_code = 0;
+ asc_dvc->err_code = 0;
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * PCI Command Register
+ */
+
+ if (((pci_cmd_reg = DvcAdvReadPCIConfigByte(asc_dvc,
+ AscPCIConfigCommandRegister))
+ & AscPCICmdRegBits_BusMastering)
+ != AscPCICmdRegBits_BusMastering)
+ {
+ pci_cmd_reg |= AscPCICmdRegBits_BusMastering;
+
+ DvcAdvWritePCIConfigByte(asc_dvc,
+ AscPCIConfigCommandRegister, pci_cmd_reg);
+
+ if (((DvcAdvReadPCIConfigByte(asc_dvc, AscPCIConfigCommandRegister))
+ & AscPCICmdRegBits_BusMastering)
+ != AscPCICmdRegBits_BusMastering)
+ {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ }
+
+ /*
+ * PCI Latency Timer
+ *
+ * If the "latency timer" register is 0x20 or above, then we don't need
+ * to change it. Otherwise, set it to 0x20 (i.e. set it to 0x20 if it
+ * comes up less than 0x20).
+ */
+ if (DvcAdvReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer) < 0x20) {
+ DvcAdvWritePCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer, 0x20);
+ if (DvcAdvReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer) < 0x20)
+ {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ }
+
+ /*
+ * Save the state of the PCI Configuration Command Register
+ * "Parity Error Response Control" Bit. If the bit is clear (0),
+ * in AdvInitAsc3550Driver() tell the microcode to ignore DMA
+ * parity errors.
+ */
+ asc_dvc->cfg->control_flag = 0;
+ if (((DvcAdvReadPCIConfigByte(asc_dvc, AscPCIConfigCommandRegister)
+ & AscPCICmdRegBits_ParErrRespCtrl)) == 0)
+ {
+ asc_dvc->cfg->control_flag |= CONTROL_FLAG_IGNORE_PERR;
+ }
+
+ asc_dvc->cur_host_qng = 0;
+
+ asc_dvc->cfg->lib_version = (ADV_LIB_VERSION_MAJOR << 8) |
+ ADV_LIB_VERSION_MINOR;
+ asc_dvc->cfg->chip_version =
+ AdvGetChipVersion(iop_base, asc_dvc->bus_type);
+
+ /*
+ * Reset the chip to start and allow register writes.
+ */
+ if (AdvFindSignature(iop_base) == 0)
+ {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ return ADV_ERROR;
+ }
+ else {
+
+ AdvResetChip(asc_dvc);
+
+ if ((status = AdvInitFromEEP(asc_dvc)) == ADV_ERROR)
+ {
+ return ADV_ERROR;
+ }
+ warn_code |= status;
+
+ /*
+ * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
+ * Resets should be performed.
+ */
+ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS)
+ {
+ AdvResetSCSIBus(asc_dvc);
+ }
+ }
+
+ return warn_code;
+}
+
+/*
+ * Initialize the ASC3550.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ */
+ASC_INITFUNC(
+int
+AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
+)
+{
+ AdvPortAddr iop_base;
+ ushort warn_code;
+ ulong sum;
+ int begin_addr;
+ int end_addr;
+ int code_sum;
+ int word;
+ int rql_addr; /* RISC Queue List address */
+ int i;
+ ushort scsi_cfg1;
+ uchar biosmem[ASC_MC_BIOSLEN]; /* BIOS RISC Memory 0x40-0x8F. */
+
+ /* If there is already an error, don't continue. */
+ if (asc_dvc->err_code != 0)
+ {
+ return ADV_ERROR;
+ }
+
+ warn_code = 0;
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Save the RISC memory BIOS region before writing the microcode.
+ * The BIOS may already be loaded and using its RISC LRAM region
+ * so its region must be saved and restored.
+ *
+ * Note: This code makes the assumption, which is currently true,
+ * that a chip reset does not clear RISC LRAM.
+ */
+ for (i = 0; i < ASC_MC_BIOSLEN; i++)
+ {
+ AdvReadByteLram(iop_base, ASC_MC_BIOSMEM + i, biosmem[i]);
+ }
+
+ /*
+ * Load the Microcode
+ *
+ * Write the microcode image to RISC memory starting at address 0.
+ */
+ AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0);
+ for (word = 0; word < _adv_mcode_size; word += 2)
+ {
+ AdvWriteWordAutoIncLram(iop_base,
+ *((ushort *) (&_adv_mcode_buf[word])));
+ }
+
+ /*
+ * Clear the rest of Condor's Internal RAM (8KB).
+ */
+ for (; word < ADV_CONDOR_MEMSIZE; word += 2)
+ {
+ AdvWriteWordAutoIncLram(iop_base, 0);
+ }
+
+ /*
+ * Verify the microcode checksum.
+ */
+ sum = 0;
+ AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0);
+ for (word = 0; word < _adv_mcode_size; word += 2)
+ {
+ sum += AdvReadWordAutoIncLram(iop_base);
+ }
+
+ if (sum != _adv_mcode_chksum)
+ {
+ asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM;
+ return ADV_ERROR;
+ }
+
+ /*
+ * Restore the RISC memory BIOS region.
+ */
+ for (i = 0; i < ASC_MC_BIOSLEN; i++)
+ {
+ AdvWriteByteLram(iop_base, ASC_MC_BIOSMEM + i, biosmem[i]);
+ }
+
+ /*
+ * Calculate and write the microcode code checksum to the microcode
+ * code checksum location ASC_MC_CODE_CHK_SUM (0x2C).
+ */
+ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr);
+ AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr);
+ code_sum = 0;
+ for (word = begin_addr; word < end_addr; word += 2)
+ {
+ code_sum += *((ushort *) (&_adv_mcode_buf[word]));
+ }
+ AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum);
+
+ /*
+ * Read microcode version and date.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date);
+ AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version);
+
+ /*
+ * Initialize microcode operating variables
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_ADAPTER_SCSI_ID,
+ asc_dvc->chip_scsi_id);
+
+ /*
+ * If the PCI Configuration Command Register "Parity Error Response
+ * Control" Bit was clear (0), then set the microcode variable
+ * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode
+ * to ignore DMA parity errors.
+ */
+ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR)
+ {
+ /*
+ * Note: Don't remove the use of a temporary variable in
+ * the following code, otherwise the Microsoft C compiler
+ * will turn the following lines into a no-op.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ word |= CONTROL_FLAG_IGNORE_PERR;
+ AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ }
+
+ /*
+ * Set default microcode operating variables for WDTR, SDTR, and
+ * command tag queuing based on the EEPROM configuration values.
+ *
+ * These ADV_DVC_VAR fields and the microcode variables will be
+ * changed in AdvInquiryHandling() if it is found a device is
+ * incapable of a particular feature.
+ */
+
+ /*
+ * Set the microcode ULTRA target mask from EEPROM value. The
+ * SDTR target mask overrides the ULTRA target mask in the
+ * microcode so it is safe to set this value without determining
+ * whether the device supports SDTR.
+ *
+ * Note: There is no way to know whether a device supports ULTRA
+ * speed without attempting a SDTR ULTRA speed negotiation with
+ * the device. The device will reject the speed if it does not
+ * support it by responding with an SDTR message containing a
+ * slower speed.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_ULTRA_ABLE, asc_dvc->ultra_able);
+ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable);
+
+
+ /*
+ * Set SCSI_CFG0 Microcode Default Value.
+ *
+ * The microcode will set the SCSI_CFG0 register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0,
+ PARITY_EN | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id);
+
+ /*
+ * Determine SCSI_CFG1 Microcode Default Value.
+ *
+ * The microcode will set the SCSI_CFG1 register using this value
+ * after it is started below.
+ */
+
+ /* Read current SCSI_CFG1 Register value. */
+ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1);
+
+ /*
+ * If all three connectors are in use, return an error.
+ */
+ if ((scsi_cfg1 & CABLE_ILLEGAL_A) == 0 ||
+ (scsi_cfg1 & CABLE_ILLEGAL_B) == 0)
+ {
+ asc_dvc->err_code |= ASC_IERR_ILLEGAL_CONNECTION;
+ return ADV_ERROR;
+ }
+
+ /*
+ * If the internal narrow cable is reversed all of the SCSI_CTRL
+ * register signals will be set. Check for and return an error if
+ * this condition is found.
+ */
+ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07)
+ {
+ asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * If this is a differential board and a single-ended device
+ * is attached to one of the connectors, return an error.
+ */
+ if ((scsi_cfg1 & DIFF_MODE) && (scsi_cfg1 & DIFF_SENSE) == 0)
+ {
+ asc_dvc->err_code |= ASC_IERR_SINGLE_END_DEVICE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * If automatic termination control is enabled, then set the
+ * termination value based on a table listed in a_condor.h.
+ *
+ * If manual termination was specified with an EEPROM setting
+ * then 'termination' was set-up in AdvInitFromEEP() and
+ * is ready to be 'ored' into SCSI_CFG1.
+ */
+ if (asc_dvc->cfg->termination == 0)
+ {
+ /*
+ * The software always controls termination by setting TERM_CTL_SEL.
+ * If TERM_CTL_SEL were set to 0, the hardware would set termination.
+ */
+ asc_dvc->cfg->termination |= TERM_CTL_SEL;
+
+ switch(scsi_cfg1 & CABLE_DETECT)
+ {
+ /* TERM_CTL_H: on, TERM_CTL_L: on */
+ case 0x3: case 0x7: case 0xB: case 0xD: case 0xE: case 0xF:
+ asc_dvc->cfg->termination |= (TERM_CTL_H | TERM_CTL_L);
+ break;
+
+ /* TERM_CTL_H: on, TERM_CTL_L: off */
+ case 0x1: case 0x5: case 0x9: case 0xA: case 0xC:
+ asc_dvc->cfg->termination |= TERM_CTL_H;
+ break;
+
+ /* TERM_CTL_H: off, TERM_CTL_L: off */
+ case 0x2: case 0x6:
+ break;
+ }
+ }
+
+ /*
+ * Clear any set TERM_CTL_H and TERM_CTL_L bits.
+ */
+ scsi_cfg1 &= ~TERM_CTL;
+
+ /*
+ * Invert the TERM_CTL_H and TERM_CTL_L bits and then
+ * set 'scsi_cfg1'. The TERM_POL bit does not need to be
+ * referenced, because the hardware internally inverts
+ * the Termination High and Low bits if TERM_POL is set.
+ */
+ scsi_cfg1 |= (TERM_CTL_SEL | (~asc_dvc->cfg->termination & TERM_CTL));
+
+ /*
+ * Set SCSI_CFG1 Microcode Default Value
+ *
+ * Set filter value and possibly modified termination control
+ * bits in the Microcode SCSI_CFG1 Register Value.
+ *
+ * The microcode will set the SCSI_CFG1 register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1,
+ FLTR_11_TO_20NS | scsi_cfg1);
+
+ /*
+ * Set SEL_MASK Microcode Default Value
+ *
+ * The microcode will set the SEL_MASK register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK,
+ ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id));
+
+ /*
+ * Link all the RISC Queue Lists together in a doubly-linked
+ * NULL terminated list.
+ *
+ * Skip the NULL (0) queue which is not used.
+ */
+ for (i = 1, rql_addr = ASC_MC_RISC_Q_LIST_BASE + ASC_MC_RISC_Q_LIST_SIZE;
+ i < ASC_MC_RISC_Q_TOTAL_CNT;
+ i++, rql_addr += ASC_MC_RISC_Q_LIST_SIZE)
+ {
+ /*
+ * Set the current RISC Queue List's RQL_FWD and RQL_BWD pointers
+ * in a one word write and set the state (RQL_STATE) to free.
+ */
+ AdvWriteWordLram(iop_base, rql_addr, ((i + 1) + ((i - 1) << 8)));
+ AdvWriteByteLram(iop_base, rql_addr + RQL_STATE, ASC_MC_QS_FREE);
+ }
+
+ /*
+ * Set the Host and RISC Queue List pointers.
+ *
+ * Both sets of pointers are initialized with the same values:
+ * ASC_MC_RISC_Q_FIRST(0x01) and ASC_MC_RISC_Q_LAST (0xFF).
+ */
+ AdvWriteByteLram(iop_base, ASC_MC_HOST_NEXT_READY, ASC_MC_RISC_Q_FIRST);
+ AdvWriteByteLram(iop_base, ASC_MC_HOST_NEXT_DONE, ASC_MC_RISC_Q_LAST);
+
+ AdvWriteByteLram(iop_base, ASC_MC_RISC_NEXT_READY, ASC_MC_RISC_Q_FIRST);
+ AdvWriteByteLram(iop_base, ASC_MC_RISC_NEXT_DONE, ASC_MC_RISC_Q_LAST);
+
+ /*
+ * Finally, set up the last RISC Queue List (255) with
+ * a NULL forward pointer.
+ */
+ AdvWriteWordLram(iop_base, rql_addr, (ASC_MC_NULL_Q + ((i - 1) << 8)));
+ AdvWriteByteLram(iop_base, rql_addr + RQL_STATE, ASC_MC_QS_FREE);
+
+ AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES,
+ (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR));
+
+ /*
+ * Note: Don't remove the use of a temporary variable in
+ * the following code, otherwise the Microsoft C compiler
+ * will turn the following lines into a no-op.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word);
+ AdvWriteWordRegister(iop_base, IOPW_PC, word);
+
+ /* finally, finally, gentlemen, start your engine */
+ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN);
+
+ return warn_code;
+}
+
+/*
+ * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and
+ * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while
+ * all of this is done.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ *
+ * Note: Chip is stopped on entry.
+ */
+ASC_INITFUNC(
+STATIC int
+AdvInitFromEEP(ADV_DVC_VAR *asc_dvc)
+)
+{
+ AdvPortAddr iop_base;
+ ushort warn_code;
+ ADVEEP_CONFIG eep_config;
+ int i;
+
+ iop_base = asc_dvc->iop_base;
+
+ warn_code = 0;
+
+ /*
+ * Read the board's EEPROM configuration.
+ *
+ * Set default values if a bad checksum is found.
+ */
+ if (AdvGetEEPConfig(iop_base, &eep_config) != eep_config.check_sum)
+ {
+ warn_code |= ASC_WARN_EEPROM_CHKSUM;
+
+ /*
+ * Set EEPROM default values.
+ */
+ for (i = 0; i < sizeof(ADVEEP_CONFIG); i++)
+ {
+ *((uchar *) &eep_config + i) =
+ *((uchar *) &Default_EEPROM_Config + i);
+ }
+
+ /*
+ * Assume the 6 byte board serial number that was read
+ * from EEPROM is correct even if the EEPROM checksum
+ * failed.
+ */
+ eep_config.serial_number_word3 =
+ AdvReadEEPWord(iop_base, ASC_EEP_DVC_CFG_END - 1);
+ eep_config.serial_number_word2 =
+ AdvReadEEPWord(iop_base, ASC_EEP_DVC_CFG_END - 2);
+ eep_config.serial_number_word1 =
+ AdvReadEEPWord(iop_base, ASC_EEP_DVC_CFG_END - 3);
+ AdvSetEEPConfig(iop_base, &eep_config);
+ }
+
+ /*
+ * Set ADV_DVC_VAR and ADV_DVC_CFG variables from the
+ * EEPROM configuration that was read.
+ *
+ * This is the mapping of EEPROM fields to Adv Library fields.
+ */
+ asc_dvc->wdtr_able = eep_config.wdtr_able;
+ asc_dvc->sdtr_able = eep_config.sdtr_able;
+ asc_dvc->ultra_able = eep_config.ultra_able;
+ asc_dvc->tagqng_able = eep_config.tagqng_able;
+ asc_dvc->cfg->disc_enable = eep_config.disc_enable;
+ asc_dvc->max_host_qng = eep_config.max_host_qng;
+ asc_dvc->max_dvc_qng = eep_config.max_dvc_qng;
+ asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID);
+ asc_dvc->start_motor = eep_config.start_motor;
+ asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay;
+ asc_dvc->cfg->bios_boot_wait = eep_config.bios_boot_delay;
+ asc_dvc->bios_ctrl = eep_config.bios_ctrl;
+ asc_dvc->no_scam = eep_config.scam_tolerant;
+ asc_dvc->cfg->serial1 = eep_config.serial_number_word1;
+ asc_dvc->cfg->serial2 = eep_config.serial_number_word2;
+ asc_dvc->cfg->serial3 = eep_config.serial_number_word3;
+
+ /*
+ * Set the host maximum queuing (max. 253, min. 16) and the per device
+ * maximum queuing (max. 63, min. 4).
+ */
+ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG)
+ {
+ eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG;
+ } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG)
+ {
+ /* If the value is zero, assume it is uninitialized. */
+ if (eep_config.max_host_qng == 0)
+ {
+ eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG;
+ } else
+ {
+ eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG;
+ }
+ }
+
+ if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG)
+ {
+ eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG;
+ } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG)
+ {
+ /* If the value is zero, assume it is uninitialized. */
+ if (eep_config.max_dvc_qng == 0)
+ {
+ eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG;
+ } else
+ {
+ eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG;
+ }
+ }
+
+ /*
+ * If 'max_dvc_qng' is greater than 'max_host_qng', then
+ * set 'max_dvc_qng' to 'max_host_qng'.
+ */
+ if (eep_config.max_dvc_qng > eep_config.max_host_qng)
+ {
+ eep_config.max_dvc_qng = eep_config.max_host_qng;
+ }
+
+ /*
+ * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_CFG 'max_dvc_qng'
+ * values based on possibly adjusted EEPROM values.
+ */
+ asc_dvc->max_host_qng = eep_config.max_host_qng;
+ asc_dvc->max_dvc_qng = eep_config.max_dvc_qng;
+
+
+ /*
+ * If the EEPROM 'termination' field is set to automatic (0), then set
+ * the ADV_DVC_CFG 'termination' field to automatic also.
+ *
+ * If the termination is specified with a non-zero 'termination'
+ * value check that a legal value is set and set the ADV_DVC_CFG
+ * 'termination' field appropriately.
+ */
+ if (eep_config.termination == 0)
+ {
+ asc_dvc->cfg->termination = 0; /* auto termination */
+ } else
+ {
+ /* Enable manual control with low off / high off. */
+ if (eep_config.termination == 1)
+ {
+ asc_dvc->cfg->termination = TERM_CTL_SEL;
+
+ /* Enable manual control with low off / high on. */
+ } else if (eep_config.termination == 2)
+ {
+ asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H;
+
+ /* Enable manual control with low on / high on. */
+ } else if (eep_config.termination == 3)
+ {
+ asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H | TERM_CTL_L;
+ } else
+ {
+ /*
+ * The EEPROM 'termination' field contains a bad value. Use
+ * automatic termination instead.
+ */
+ asc_dvc->cfg->termination = 0;
+ warn_code |= ASC_WARN_EEPROM_TERMINATION;
+ }
+ }
+
+ return warn_code;
+}
+
+/*
+ * Read EEPROM configuration into the specified buffer.
+ *
+ * Return a checksum based on the EEPROM configuration read.
+ */
+ASC_INITFUNC(
+STATIC ushort
+AdvGetEEPConfig(AdvPortAddr iop_base, ADVEEP_CONFIG *cfg_buf)
+)
+{
+ ushort wval, chksum;
+ ushort *wbuf;
+ int eep_addr;
+
+ wbuf = (ushort *) cfg_buf;
+ chksum = 0;
+
+ for (eep_addr = ASC_EEP_DVC_CFG_BEGIN;
+ eep_addr < ASC_EEP_DVC_CFG_END;
+ eep_addr++, wbuf++)
+ {
+ wval = AdvReadEEPWord(iop_base, eep_addr);
+ chksum += wval;
+ *wbuf = wval;
+ }
+ *wbuf = AdvReadEEPWord(iop_base, eep_addr);
+ wbuf++;
+ for (eep_addr = ASC_EEP_DVC_CTL_BEGIN;
+ eep_addr < ASC_EEP_MAX_WORD_ADDR;
+ eep_addr++, wbuf++)
+ {
+ *wbuf = AdvReadEEPWord(iop_base, eep_addr);
+ }
+ return chksum;
+}
+
+/*
+ * Read the EEPROM from specified location
+ */
+ASC_INITFUNC(
+STATIC ushort
+AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
+)
+{
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
+ ASC_EEP_CMD_READ | eep_word_addr);
+ AdvWaitEEPCmd(iop_base);
+ return AdvReadWordRegister(iop_base, IOPW_EE_DATA);
+}
+
+/*
+ * Wait for EEPROM command to complete
+ */
+ASC_INITFUNC(
+STATIC void
+AdvWaitEEPCmd(AdvPortAddr iop_base)
+)
+{
+ int eep_delay_ms;
+
+ for (eep_delay_ms = 0; eep_delay_ms < ASC_EEP_DELAY_MS; eep_delay_ms++)
+ {
+ if (AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE)
+ {
+ break;
+ }
+ DvcSleepMilliSecond(1);
+ }
+ if ((AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) == 0)
+ {
+ ADV_ASSERT(0);
+ }
+ return;
+}
+
+/*
+ * Write the EEPROM from 'cfg_buf'.
+ */
+ASC_INITFUNC(
+STATIC void
+AdvSetEEPConfig(AdvPortAddr iop_base, ADVEEP_CONFIG *cfg_buf)
+)
+{
+ ushort *wbuf;
+ ushort addr, chksum;
+
+ wbuf = (ushort *) cfg_buf;
+ chksum = 0;
+
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE);
+ AdvWaitEEPCmd(iop_base);
+
+ /*
+ * Write EEPROM from word 0 to word 15
+ */
+ for (addr = ASC_EEP_DVC_CFG_BEGIN;
+ addr < ASC_EEP_DVC_CFG_END; addr++, wbuf++)
+ {
+ chksum += *wbuf;
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, *wbuf);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ DvcSleepMilliSecond(ASC_EEP_DELAY_MS);
+ }
+
+ /*
+ * Write EEPROM checksum at word 18
+ */
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ wbuf++; /* skip over check_sum */
+
+ /*
+ * Write EEPROM OEM name at words 19 to 26
+ */
+ for (addr = ASC_EEP_DVC_CTL_BEGIN;
+ addr < ASC_EEP_MAX_WORD_ADDR; addr++, wbuf++)
+ {
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, *wbuf);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ }
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE);
+ AdvWaitEEPCmd(iop_base);
+ return;
+}
+
+/*
+ * This function resets the chip and SCSI bus
+ *
+ * It is up to the caller to add a delay to let the bus settle after
+ * calling this function.
+ *
+ * The SCSI_CFG0, SCSI_CFG1, and MEM_CFG registers are set-up in
+ * AdvInitAsc3550Driver(). Here when doing a write to one of these
+ * registers read first and then write.
+ *
+ * Note: A SCSI Bus Reset can not be done until after the EEPROM
+ * configuration is read to determine whether SCSI Bus Resets
+ * should be performed.
+ */
+ASC_INITFUNC(
+STATIC void
+AdvResetChip(ADV_DVC_VAR *asc_dvc)
+)
+{
+ AdvPortAddr iop_base;
+ ushort word;
+ uchar byte;
+
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Reset Chip.
+ */
+ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET);
+ DvcSleepMilliSecond(100);
+ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_WR_IO_REG);
+
+ /*
+ * Initialize Chip registers.
+ *
+ * Note: Don't remove the use of a temporary variable in the following
+ * code, otherwise the Microsoft C compiler will turn the following lines
+ * into a no-op.
+ */
+ byte = AdvReadByteRegister(iop_base, IOPB_MEM_CFG);
+ byte |= RAM_SZ_8KB;
+ AdvWriteByteRegister(iop_base, IOPB_MEM_CFG, byte);
+
+ word = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1);
+ word &= ~BIG_ENDIAN;
+ AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, word);
+
+ /*
+ * Setting the START_CTL_EMFU 3:2 bits sets a FIFO threshold
+ * of 128 bytes. This register is only accessible to the host.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0,
+ START_CTL_EMFU | READ_CMD_MRM);
+}
+
+/* a_advlib.c */
+/*
+ * Description:
+ * Send a SCSI request to the ASC3550 chip
+ *
+ * If there is no SG list for the request, set 'sg_entry_cnt' to 0.
+ *
+ * If 'sg_real_addr' is non-zero on entry, AscGetSGList() will not be
+ * called. It is assumed the caller has already initialized 'sg_real_addr'.
+ *
+ * Return:
+ * ADV_SUCCESS(1) - the request is in the mailbox
+ * ADV_BUSY(0) - total request count > 253, try later
+ * ADV_ERROR(-1) - invalid scsi request Q
+ */
+STATIC int
+AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc,
+ ADV_SCSI_REQ_Q *scsiq)
+{
+ if (scsiq == (ADV_SCSI_REQ_Q *) 0L)
+ {
+ /* 'scsiq' should never be NULL. */
+ ADV_ASSERT(0);
+ return ADV_ERROR;
+ }
+
+ return AdvSendScsiCmd(asc_dvc, scsiq);
+}
+
+/*
+ * Reset SCSI Bus and purge all outstanding requests.
+ *
+ * Return Value:
+ * ADV_TRUE(1) - All requests are purged and SCSI Bus is reset.
+ *
+ * Note: Should always return ADV_TRUE.
+ */
+STATIC int
+AdvResetSB(ADV_DVC_VAR *asc_dvc)
+{
+ int status;
+
+ status = AdvSendIdleCmd(asc_dvc, (ushort) IDLE_CMD_SCSI_RESET, 0L, 0);
+
+ AdvResetSCSIBus(asc_dvc);
+
+ return status;
+}
+
+/*
+ * Reset SCSI Bus and delay.
+ */
+STATIC void
+AdvResetSCSIBus(ADV_DVC_VAR *asc_dvc)
+{
+ AdvPortAddr iop_base;
+ ushort scsi_ctrl;
+
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * The microcode currently sets the SCSI Bus Reset signal while
+ * handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET command above.
+ * But the SCSI Bus Reset Hold Time in the microcode is not deterministic
+ * (it may in fact be for less than the SCSI Spec. minimum of 25 us).
+ * Therefore on return the Adv Library sets the SCSI Bus Reset signal
+ * for ASC_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
+ * than 25 us.
+ */
+ scsi_ctrl = AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL);
+ AdvWriteWordRegister(iop_base, IOPW_SCSI_CTRL,
+ scsi_ctrl | ADV_SCSI_CTRL_RSTOUT);
+ DvcDelayMicroSecond(asc_dvc, (ushort) ASC_SCSI_RESET_HOLD_TIME_US);
+ AdvWriteWordRegister(iop_base, IOPW_SCSI_CTRL,
+ scsi_ctrl & ~ADV_SCSI_CTRL_RSTOUT);
+
+ DvcSleepMilliSecond((ulong) asc_dvc->scsi_reset_wait * 1000);
+}
+
+
+/*
+ * Adv Library Interrupt Service Routine
+ *
+ * This function is called by a driver's interrupt service routine.
+ * The function disables and re-enables interrupts.
+ *
+ * When a microcode idle command is completed, the ADV_DVC_VAR
+ * 'idle_cmd_done' field is set to ADV_TRUE.
+ *
+ * Note: AdvISR() can be called when interrupts are disabled or even
+ * when there is no hardware interrupt condition present. It will
+ * always check for completed idle commands and microcode requests.
+ * This is an important feature that shouldn't be changed because it
+ * allows commands to be completed from polling mode loops.
+ *
+ * Return:
+ * ADV_TRUE(1) - interrupt was pending
+ * ADV_FALSE(0) - no interrupt was pending
+ */
+STATIC int
+AdvISR(ADV_DVC_VAR *asc_dvc)
+{
+ AdvPortAddr iop_base;
+ uchar int_stat;
+ ushort next_done_loc, target_bit;
+ int completed_q;
+ long flags;
+ ADV_SCSI_REQ_Q *scsiq;
+ ASC_REQ_SENSE *sense_data;
+ int ret;
+
+ flags = DvcEnterCritical();
+ iop_base = asc_dvc->iop_base;
+
+ if (AdvIsIntPending(iop_base))
+ {
+ ret = ADV_TRUE;
+ } else
+ {
+ ret = ADV_FALSE;
+ }
+
+ /* Reading the register clears the interrupt. */
+ int_stat = AdvReadByteRegister(iop_base, IOPB_INTR_STATUS_REG);
+
+ if (int_stat & ADV_INTR_STATUS_INTRB)
+ {
+ asc_dvc->idle_cmd_done = ADV_TRUE;
+ }
+
+ /*
+ * Notify the driver of a hardware detected SCSI Bus Reset.
+ */
+ if (int_stat & ADV_INTR_STATUS_INTRC)
+ {
+ if (asc_dvc->sbreset_callback != 0)
+ {
+ (*(ADV_SBRESET_CALLBACK) asc_dvc->sbreset_callback)(asc_dvc);
+ }
+ }
+
+ /*
+ * ASC_MC_HOST_NEXT_DONE (0x129) is actually the last completed RISC
+ * Queue List request. Its forward pointer (RQL_FWD) points to the
+ * current completed RISC Queue List request.
+ */
+ AdvReadByteLram(iop_base, ASC_MC_HOST_NEXT_DONE, next_done_loc);
+ next_done_loc = ASC_MC_RISC_Q_LIST_BASE +
+ (next_done_loc * ASC_MC_RISC_Q_LIST_SIZE) + RQL_FWD;
+
+ AdvReadByteLram(iop_base, next_done_loc, completed_q);
+
+ /* Loop until all completed Q's are processed. */
+ while (completed_q != ASC_MC_NULL_Q)
+ {
+ AdvWriteByteLram(iop_base, ASC_MC_HOST_NEXT_DONE, completed_q);
+
+ next_done_loc = ASC_MC_RISC_Q_LIST_BASE +
+ (completed_q * ASC_MC_RISC_Q_LIST_SIZE);
+
+ /*
+ * Read the ADV_SCSI_REQ_Q virtual address pointer from
+ * the RISC list entry. The microcode has changed the
+ * ADV_SCSI_REQ_Q physical address to its virtual address.
+ *
+ * Refer to comments at the end of AdvSendScsiCmd() for
+ * more information on the RISC list structure.
+ */
+ {
+ ushort lsw, msw;
+ AdvReadWordLram(iop_base, next_done_loc + RQL_PHYADDR, lsw);
+ AdvReadWordLram(iop_base, next_done_loc + RQL_PHYADDR + 2, msw);
+
+ scsiq = (ADV_SCSI_REQ_Q *) (((ulong) msw << 16) | lsw);
+ }
+ ADV_ASSERT(scsiq != NULL);
+
+ target_bit = ADV_TID_TO_TIDMASK(scsiq->target_id);
+
+ /*
+ * Clear request microcode control flag.
+ */
+ scsiq->cntl = 0;
+
+ /*
+ * Check Condition handling
+ */
+ if ((scsiq->done_status == QD_WITH_ERROR) &&
+ (scsiq->scsi_status == SS_CHK_CONDITION) &&
+ (sense_data = (ASC_REQ_SENSE *) scsiq->vsense_addr) != 0 &&
+ (scsiq->orig_sense_len - scsiq->sense_len) >= ASC_MIN_SENSE_LEN)
+ {
+ /*
+ * Command returned with a check condition and valid
+ * sense data.
+ */
+ }
+ /*
+ * If the command that completed was a SCSI INQUIRY and
+ * LUN 0 was sent the command, then process the INQUIRY
+ * command information for the device.
+ */
+ else if (scsiq->done_status == QD_NO_ERROR &&
+ scsiq->cdb[0] == SCSICMD_Inquiry &&
+ scsiq->target_lun == 0)
+ {
+ AdvInquiryHandling(asc_dvc, scsiq);
+ }
+
+
+ /* Change the RISC Queue List state to free. */
+ AdvWriteByteLram(iop_base, next_done_loc + RQL_STATE, ASC_MC_QS_FREE);
+
+ /* Get the RISC Queue List forward pointer. */
+ AdvReadByteLram(iop_base, next_done_loc + RQL_FWD, completed_q);
+
+ /*
+ * Notify the driver of the completed request by passing
+ * the ADV_SCSI_REQ_Q pointer to its callback function.
+ */
+ ADV_ASSERT(asc_dvc->cur_host_qng > 0);
+ asc_dvc->cur_host_qng--;
+ scsiq->a_flag |= ADV_SCSIQ_DONE;
+ (*(ADV_ISR_CALLBACK) asc_dvc->isr_callback)(asc_dvc, scsiq);
+ /*
+ * Note: After the driver callback function is called, 'scsiq'
+ * can no longer be referenced.
+ *
+ * Fall through and continue processing other completed
+ * requests...
+ */
+
+ /*
+ * Disable interrupts again in case the driver inadvertently
+ * enabled interrupts in its callback function.
+ *
+ * The DvcEnterCritical() return value is ignored, because
+ * the 'flags' saved when AdvISR() was first entered will be
+ * used to restore the interrupt flag on exit.
+ */
+ (void) DvcEnterCritical();
+ }
+ DvcLeaveCritical(flags);
+ return ret;
+}
+
+/*
+ * Send an idle command to the chip and wait for completion.
+ *
+ * Interrupts do not have to be enabled on entry.
+ *
+ * Return Values:
+ * ADV_TRUE - command completed successfully
+ * ADV_FALSE - command failed
+ */
+STATIC int
+AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc,
+ ushort idle_cmd,
+ ulong idle_cmd_parameter,
+ int flags)
+{
+ int last_int_level;
+ ulong i;
+ AdvPortAddr iop_base;
+ int ret;
+
+ asc_dvc->idle_cmd_done = 0;
+
+ last_int_level = DvcEnterCritical();
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Write the idle command value after the idle command parameter
+ * has been written to avoid a race condition. If the order is not
+ * followed, the microcode may process the idle command before the
+ * parameters have been written to LRAM.
+ */
+ AdvWriteDWordLram(iop_base, ASC_MC_IDLE_PARA_STAT, idle_cmd_parameter);
+ AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD, idle_cmd);
+ DvcLeaveCritical(last_int_level);
+
+ /*
+ * If the 'flags' argument contains the ADV_NOWAIT flag, then
+ * return with success.
+ */
+ if (flags & ADV_NOWAIT)
+ {
+ return ADV_TRUE;
+ }
+
+ for (i = 0; i < SCSI_WAIT_10_SEC * SCSI_MS_PER_SEC; i++)
+ {
+ /*
+ * 'idle_cmd_done' is set by AdvISR().
+ */
+ if (asc_dvc->idle_cmd_done)
+ {
+ break;
+ }
+ DvcSleepMilliSecond(1);
+
+ /*
+ * If interrupts were disabled on entry to AdvSendIdleCmd(),
+ * then they will still be disabled here. Call AdvISR() to
+ * check for the idle command completion.
+ */
+ (void) AdvISR(asc_dvc);
+ }
+
+ last_int_level = DvcEnterCritical();
+
+ if (asc_dvc->idle_cmd_done == ADV_FALSE)
+ {
+ ADV_ASSERT(0); /* The idle command should never timeout. */
+ return ADV_FALSE;
+ } else
+ {
+ AdvReadWordLram(iop_base, ASC_MC_IDLE_PARA_STAT, ret);
+ return ret;
+ }
+}
+
+/*
+ * Send the SCSI request block to the adapter
+ *
+ * Each of the 255 Adv Library/Microcode RISC Lists or mailboxes has the
+ * following structure:
+ *
+ * 0: RQL_FWD - RISC list forward pointer (1 byte)
+ * 1: RQL_BWD - RISC list backward pointer (1 byte)
+ * 2: RQL_STATE - RISC list state byte - free, ready, done, aborted (1 byte)
+ * 3: RQL_TID - request target id (1 byte)
+ * 4: RQL_PHYADDR - ADV_SCSI_REQ_Q physical pointer (4 bytes)
+ *
+ * Return:
+ * ADV_SUCCESS(1) - the request is in the mailbox
+ * ADV_BUSY(0) - total request count > 253, try later
+ */
+STATIC int
+AdvSendScsiCmd(
+ ADV_DVC_VAR *asc_dvc,
+ ADV_SCSI_REQ_Q *scsiq)
+{
+ ushort next_ready_loc;
+ uchar next_ready_loc_fwd;
+ int last_int_level;
+ AdvPortAddr iop_base;
+ long req_size;
+ ulong q_phy_addr;
+
+ /*
+ * The ADV_SCSI_REQ_Q 'target_id' field should never be equal
+ * to the host adapter ID or exceed ADV_MAX_TID.
+ */
+ if (scsiq->target_id == asc_dvc->chip_scsi_id ||
+ scsiq->target_id > ADV_MAX_TID)
+ {
+ scsiq->host_status = QHSTA_M_INVALID_DEVICE;
+ scsiq->done_status = QD_WITH_ERROR;
+ return ADV_ERROR;
+ }
+
+ iop_base = asc_dvc->iop_base;
+
+ last_int_level = DvcEnterCritical();
+
+ if (asc_dvc->cur_host_qng >= asc_dvc->max_host_qng)
+ {
+ DvcLeaveCritical(last_int_level);
+ return ADV_BUSY;
+ } else
+ {
+ ADV_ASSERT(asc_dvc->cur_host_qng < ASC_MC_RISC_Q_TOTAL_CNT);
+ asc_dvc->cur_host_qng++;
+ }
+
+ /*
+ * Clear the ADV_SCSI_REQ_Q done flag.
+ */
+ scsiq->a_flag &= ~ADV_SCSIQ_DONE;
+
+ /*
+ * Save the original sense buffer length.
+ *
+ * After the request completes 'sense_len' will be set to the residual
+ * byte count of the Auto-Request Sense if a command returns CHECK
+ * CONDITION and the Sense Data is valid indicated by 'host_status' not
+ * being set to QHSTA_M_AUTO_REQ_SENSE_FAIL. To determine the valid
+ * Sense Data Length subtract 'sense_len' from 'orig_sense_len'.
+ */
+ scsiq->orig_sense_len = scsiq->sense_len;
+
+ AdvReadByteLram(iop_base, ASC_MC_HOST_NEXT_READY, next_ready_loc);
+ next_ready_loc = ASC_MC_RISC_Q_LIST_BASE +
+ (next_ready_loc * ASC_MC_RISC_Q_LIST_SIZE);
+
+ /*
+ * Write the physical address of the Q to the mailbox.
+ * We need to skip the first four bytes, because the microcode
+ * uses them internally for linking Q's together.
+ */
+ req_size = sizeof(ADV_SCSI_REQ_Q);
+ q_phy_addr = DvcGetPhyAddr(asc_dvc, scsiq,
+ (uchar *) scsiq, &req_size,
+ ADV_IS_SCSIQ_FLAG);
+ ADV_ASSERT(ADV_DWALIGN(q_phy_addr) == q_phy_addr);
+ ADV_ASSERT(req_size >= sizeof(ADV_SCSI_REQ_Q));
+
+ scsiq->scsiq_ptr = (ADV_SCSI_REQ_Q *) scsiq;
+
+ /*
+ * The RISC list structure, which 'next_ready_loc' is a pointer
+ * to in microcode LRAM, has the format detailed in the comment
+ * header for this function.
+ *
+ * Write the ADV_SCSI_REQ_Q physical pointer to 'next_ready_loc' request.
+ */
+ AdvWriteDWordLram(iop_base, next_ready_loc + RQL_PHYADDR, q_phy_addr);
+
+ /* Write target_id to 'next_ready_loc' request. */
+ AdvWriteByteLram(iop_base, next_ready_loc + RQL_TID, scsiq->target_id);
+
+ /*
+ * Set the ASC_MC_HOST_NEXT_READY (0x128) microcode variable to
+ * the 'next_ready_loc' request forward pointer.
+ *
+ * Do this *before* changing the 'next_ready_loc' queue to QS_READY.
+ * After the state is changed to QS_READY 'RQL_FWD' will be changed
+ * by the microcode.
+ *
+ * NOTE: The temporary variable 'next_ready_loc_fwd' is required to
+ * prevent some compilers from optimizing out 'AdvReadByteLram()' if
+ * it were used as the 3rd argument to 'AdvWriteByteLram()'.
+ */
+ AdvReadByteLram(iop_base, next_ready_loc + RQL_FWD, next_ready_loc_fwd);
+ AdvWriteByteLram(iop_base, ASC_MC_HOST_NEXT_READY, next_ready_loc_fwd);
+
+ /*
+ * Change the state of 'next_ready_loc' request from QS_FREE to
+ * QS_READY which will cause the microcode to pick it up and
+ * execute it.
+ *
+ * Can't reference 'next_ready_loc' after changing the request
+ * state to QS_READY. The microcode now owns the request.
+ */
+ AdvWriteByteLram(iop_base, next_ready_loc + RQL_STATE, ASC_MC_QS_READY);
+
+ DvcLeaveCritical(last_int_level);
+ return ADV_SUCCESS;
+}
+
+/*
+ * Inquiry Information Byte 7 Handling
+ *
+ * Handle SCSI Inquiry Command information for a device by setting
+ * microcode operating variables that affect WDTR, SDTR, and Tag
+ * Queuing.
+ */
+STATIC void
+AdvInquiryHandling(
+ ADV_DVC_VAR *asc_dvc,
+ ADV_SCSI_REQ_Q *scsiq)
+{
+ AdvPortAddr iop_base;
+ uchar tid;
+ ASC_SCSI_INQUIRY *inq;
+ ushort tidmask;
+ ushort cfg_word;
+
+ /*
+ * AdvInquiryHandling() requires up to INQUIRY information Byte 7
+ * to be available.
+ *
+ * If less than 8 bytes of INQUIRY information were requested or less
+ * than 8 bytes were transferred, then return. cdb[4] is the request
+ * length and the ADV_SCSI_REQ_Q 'data_cnt' field is set by the
+ * microcode to the transfer residual count.
+ */
+ if (scsiq->cdb[4] < 8 || (scsiq->cdb[4] - scsiq->data_cnt) < 8)
+ {
+ return;
+ }
+
+ iop_base = asc_dvc->iop_base;
+ tid = scsiq->target_id;
+ inq = (ASC_SCSI_INQUIRY *) scsiq->vdata_addr;
+
+ /*
+ * WDTR, SDTR, and Tag Queuing cannot be enabled for old devices.
+ */
+ if (inq->byte3.rsp_data_fmt < 2 && inq->byte2.ansi_apr_ver < 2)
+ {
+ return;
+ } else
+ {
+ /*
+ * INQUIRY Byte 7 Handling
+ *
+ * Use a device's INQUIRY byte 7 to determine whether it
+ * supports WDTR, SDTR, and Tag Queuing. If the feature
+ * is enabled in the EEPROM and the device supports the
+ * feature, then enable it in the microcode.
+ */
+
+ tidmask = ADV_TID_TO_TIDMASK(tid);
+
+ /*
+ * Wide Transfers
+ *
+ * If the EEPROM enabled WDTR for the device and the device
+ * supports wide bus (16 bit) transfers, then turn on the
+ * device's 'wdtr_able' bit and write the new value to the
+ * microcode.
+ */
+ if ((asc_dvc->wdtr_able & tidmask) && inq->byte7.WBus16)
+ {
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word);
+ if ((cfg_word & tidmask) == 0)
+ {
+ cfg_word |= tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word);
+
+ /*
+ * Clear the microcode "WDTR negotiation" done indicator
+ * for the target to cause it to negotiate with the new
+ * setting set above.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word);
+ cfg_word &= ~tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word);
+ }
+ }
+
+ /*
+ * Synchronous Transfers
+ *
+ * If the EEPROM enabled SDTR for the device and the device
+ * supports synchronous transfers, then turn on the device's
+ * 'sdtr_able' bit. Write the new value to the microcode.
+ */
+ if ((asc_dvc->sdtr_able & tidmask) && inq->byte7.Sync)
+ {
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word);
+ if ((cfg_word & tidmask) == 0)
+ {
+ cfg_word |= tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word);
+
+ /*
+ * Clear the microcode "SDTR negotiation" done indicator
+ * for the target to cause it to negotiate with the new
+ * setting set above.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
+ cfg_word &= ~tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
+ }
+ }
+
+ /*
+ * If the EEPROM enabled Tag Queuing for device and the
+ * device supports Tag Queuing, then turn on the device's
+ * 'tagqng_enable' bit in the microcode and set the microcode
+ * maximum command count to the ADV_DVC_VAR 'max_dvc_qng'
+ * value.
+ *
+ * Tag Queuing is disabled for the BIOS which runs in polled
+ * mode and would see no benefit from Tag Queuing. Also by
+ * disabling Tag Queuing in the BIOS devices with Tag Queuing
+ * bugs will at least work with the BIOS.
+ */
+ if ((asc_dvc->tagqng_able & tidmask) && inq->byte7.CmdQue)
+ {
+ AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word);
+ cfg_word |= tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word);
+ AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid,
+ asc_dvc->max_dvc_qng);
+ }
+ }
+}
diff --git a/linux/src/drivers/scsi/advansys.h b/linux/src/drivers/scsi/advansys.h
new file mode 100644
index 0000000..72e8aef
--- /dev/null
+++ b/linux/src/drivers/scsi/advansys.h
@@ -0,0 +1,174 @@
+/* $Id: advansys.h,v 1.1 1999/04/26 05:54:08 tb Exp $ */
+
+/*
+ * advansys.h - Linux Host Driver for AdvanSys SCSI Adapters
+ *
+ * Copyright (c) 1995-1998 Advanced System Products, Inc.
+ * All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that redistributions of source
+ * code retain the above copyright notice and this comment without
+ * modification.
+ *
+ * There is an AdvanSys Linux WWW page at:
+ * http://www.advansys.com/linux.html
+ *
+ * The latest version of the AdvanSys driver is available at:
+ * ftp://ftp.advansys.com/pub/linux
+ *
+ * Please send questions, comments, bug reports to:
+ * bobf@advansys.com (Bob Frey)
+ */
+
+#ifndef _ADVANSYS_H
+#define _ADVANSYS_H
+
+/* Convert Linux Version, Patch-level, Sub-level to LINUX_VERSION_CODE. */
+#define ASC_LINUX_VERSION(V, P, S) (((V) * 65536) + ((P) * 256) + (S))
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif /* LINUX_VERSION_CODE */
+
+/*
+ * Scsi_Host_Template function prototypes.
+ */
+int advansys_detect(Scsi_Host_Template *);
+int advansys_release(struct Scsi_Host *);
+const char *advansys_info(struct Scsi_Host *);
+int advansys_command(Scsi_Cmnd *);
+int advansys_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int advansys_abort(Scsi_Cmnd *);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,89)
+int advansys_reset(Scsi_Cmnd *);
+#else /* version >= v1.3.89 */
+int advansys_reset(Scsi_Cmnd *, unsigned int);
+#endif /* version >= v1.3.89 */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+int advansys_biosparam(Disk *, int, int[]);
+#else /* version >= v1.3.0 */
+int advansys_biosparam(Disk *, kdev_t, int[]);
+extern struct proc_dir_entry proc_scsi_advansys;
+int advansys_proc_info(char *, char **, off_t, int, int, int);
+#endif /* version >= v1.3.0 */
+
+/* init/main.c setup function */
+void advansys_setup(char *, int *);
+
+/*
+ * AdvanSys Host Driver Scsi_Host_Template (struct SHT) from hosts.h.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+#define ADVANSYS { \
+ NULL, /* struct SHT *next */ \
+ NULL, /* int *usage_count */ \
+ "advansys", /* char *name */ \
+ advansys_detect, /* int (*detect)(struct SHT *) */ \
+ advansys_release, /* int (*release)(struct Scsi_Host *) */ \
+ advansys_info, /* const char *(*info)(struct Scsi_Host *) */ \
+ advansys_command, /* int (*command)(Scsi_Cmnd *) */ \
+ advansys_queuecommand, \
+ /* int (*queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ \
+ advansys_abort, /* int (*abort)(Scsi_Cmnd *) */ \
+ advansys_reset, /* int (*reset)(Scsi_Cmnd *) */ \
+ NULL, /* int (*slave_attach)(int, int) */ \
+ advansys_biosparam, /* int (* bios_param)(Disk *, int, int []) */ \
+ /* \
+ * The following fields are set per adapter in advansys_detect(). \
+ */ \
+ 0, /* int can_queue */ \
+ 0, /* int this_id */ \
+ 0, /* short unsigned int sg_tablesize */ \
+ 0, /* short cmd_per_lun */ \
+ 0, /* unsigned char present */ \
+ /* \
+ * Because the driver may control an ISA adapter 'unchecked_isa_dma' \
+ * must be set. The flag will be cleared in advansys_detect for non-ISA \
+ * adapters. Refer to the comment in scsi_module.c for more information. \
+ */ \
+ 1, /* unsigned unchecked_isa_dma:1 */ \
+ /* \
+ * All adapters controlled by this driver are capable of large \
+ * scatter-gather lists. According to the mid-level SCSI documentation \
+ * this obviates any performance gain provided by setting \
+ * 'use_clustering'. But empirically while CPU utilization is increased \
+ * by enabling clustering, I/O throughput increases as well. \
+ */ \
+ ENABLE_CLUSTERING, /* unsigned use_clustering:1 */ \
+}
+#elif LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,75)
+#define ADVANSYS { \
+ NULL, /* struct SHT *next */ \
+ NULL, \
+ /* version < v2.1.23 long *usage_count */ \
+ /* version >= v2.1.23 struct module * */ \
+ &proc_scsi_advansys, /* struct proc_dir_entry *proc_dir */ \
+ advansys_proc_info, \
+ /* int (*proc_info)(char *, char **, off_t, int, int, int) */ \
+ "advansys", /* const char *name */ \
+ advansys_detect, /* int (*detect)(struct SHT *) */ \
+ advansys_release, /* int (*release)(struct Scsi_Host *) */ \
+ advansys_info, /* const char *(*info)(struct Scsi_Host *) */ \
+ advansys_command, /* int (*command)(Scsi_Cmnd *) */ \
+ advansys_queuecommand, \
+ /* int (*queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ \
+ advansys_abort, /* int (*abort)(Scsi_Cmnd *) */ \
+ advansys_reset, \
+ /* version < v1.3.89 int (*reset)(Scsi_Cmnd *) */ \
+ /* version >= v1.3.89 int (*reset)(Scsi_Cmnd *, unsigned int) */ \
+ NULL, /* int (*slave_attach)(int, int) */ \
+ advansys_biosparam, /* int (* bios_param)(Disk *, kdev_t, int []) */ \
+ /* \
+ * The following fields are set per adapter in advansys_detect(). \
+ */ \
+ 0, /* int can_queue */ \
+ 0, /* int this_id */ \
+ 0, /* short unsigned int sg_tablesize */ \
+ 0, /* short cmd_per_lun */ \
+ 0, /* unsigned char present */ \
+ /* \
+ * Because the driver may control an ISA adapter 'unchecked_isa_dma' \
+ * must be set. The flag will be cleared in advansys_detect for non-ISA \
+ * adapters. Refer to the comment in scsi_module.c for more information. \
+ */ \
+ 1, /* unsigned unchecked_isa_dma:1 */ \
+ /* \
+ * All adapters controlled by this driver are capable of large \
+ * scatter-gather lists. According to the mid-level SCSI documentation \
+ * this obviates any performance gain provided by setting \
+ * 'use_clustering'. But empirically while CPU utilization is increased \
+ * by enabling clustering, I/O throughput increases as well. \
+ */ \
+ ENABLE_CLUSTERING, /* unsigned use_clustering:1 */ \
+}
+#else /* version >= v2.1.75 */
+#define ADVANSYS { \
+ proc_dir: &proc_scsi_advansys, \
+ proc_info: advansys_proc_info, \
+ name: "advansys", \
+ detect: advansys_detect, \
+ release: advansys_release, \
+ info: advansys_info, \
+ command: advansys_command, \
+ queuecommand: advansys_queuecommand, \
+ abort: advansys_abort, \
+ reset: advansys_reset, \
+ bios_param: advansys_biosparam, \
+ /* \
+ * Because the driver may control an ISA adapter 'unchecked_isa_dma' \
+ * must be set. The flag will be cleared in advansys_detect for non-ISA \
+ * adapters. Refer to the comment in scsi_module.c for more information. \
+ */ \
+ unchecked_isa_dma: 1, \
+ /* \
+ * All adapters controlled by this driver are capable of large \
+ * scatter-gather lists. According to the mid-level SCSI documentation \
+ * this obviates any performance gain provided by setting \
+ * 'use_clustering'. But empirically while CPU utilization is increased \
+ * by enabling clustering, I/O throughput increases as well. \
+ */ \
+ use_clustering: ENABLE_CLUSTERING, \
+}
+#endif /* version >= v2.1.75 */
+#endif /* _ADVANSYS_H */
diff --git a/linux/src/drivers/scsi/aha152x.c b/linux/src/drivers/scsi/aha152x.c
new file mode 100644
index 0000000..44fe1b0
--- /dev/null
+++ b/linux/src/drivers/scsi/aha152x.c
@@ -0,0 +1,3280 @@
+/* aha152x.c -- Adaptec AHA-152x driver
+ * Author: Jürgen E. Fischer, fischer@et-inf.fho-emden.de
+ * Copyright 1993, 1994, 1995, 1996 Jürgen E. Fischer
+ *
+ *
+ * This driver is based on
+ * fdomain.c -- Future Domain TMC-16x0 driver
+ * which is
+ * Copyright 1992, 1993 Rickard E. Faith (faith@cs.unc.edu)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ *
+ * $Id: aha152x.c,v 1.1.4.2 2007/03/27 21:04:30 tschwinge Exp $
+ *
+ * Revision 1.18 1996/09/07 20:10:40 fischer
+ * - fixed can_queue handling (multiple outstanding commands working again)
+ *
+ * Revision 1.17 1996/08/17 16:05:14 fischer
+ * - biosparam improved
+ * - interrupt verification
+ * - updated documentation
+ * - cleanups
+ *
+ * Revision 1.16 1996/06/09 00:04:56 root
+ * - added configuration symbols for insmod (aha152x/aha152x1)
+ *
+ * Revision 1.15 1996/04/30 14:52:06 fischer
+ * - proc info fixed
+ * - support for extended translation for >1GB disks
+ *
+ * Revision 1.14 1996/01/17 15:11:20 fischer
+ * - fixed lockup in MESSAGE IN phase after reconnection
+ *
+ * Revision 1.13 1996/01/09 02:15:53 fischer
+ * - some cleanups
+ * - moved request_irq behind controller initialization
+ * (to avoid spurious interrupts)
+ *
+ * Revision 1.12 1995/12/16 12:26:07 fischer
+ * - barrier()s added
+ * - configurable RESET delay added
+ *
+ * Revision 1.11 1995/12/06 21:18:35 fischer
+ * - some minor updates
+ *
+ * Revision 1.10 1995/07/22 19:18:45 fischer
+ * - support for 2 controllers
+ * - started synchronous data transfers (not working yet)
+ *
+ * Revision 1.9 1995/03/18 09:20:24 root
+ * - patches for PCMCIA and modules
+ *
+ * Revision 1.8 1995/01/21 22:07:19 root
+ * - snarf_region => request_region
+ * - aha152x_intr interface change
+ *
+ * Revision 1.7 1995/01/02 23:19:36 root
+ * - updated COMMAND_SIZE to cmd_len
+ * - changed sti() to restore_flags()
+ * - fixed some #ifdef which generated warnings
+ *
+ * Revision 1.6 1994/11/24 20:35:27 root
+ * - problem with odd number of bytes in fifo fixed
+ *
+ * Revision 1.5 1994/10/30 14:39:56 root
+ * - abort code fixed
+ * - debugging improved
+ *
+ * Revision 1.4 1994/09/12 11:33:01 root
+ * - irqaction to request_irq
+ * - abortion updated
+ *
+ * Revision 1.3 1994/08/04 13:53:05 root
+ * - updates for mid-level-driver changes
+ * - accept unexpected BUSFREE phase as error condition
+ * - parity check now configurable
+ *
+ * Revision 1.2 1994/07/03 12:56:36 root
+ * - cleaned up debugging code
+ * - more tweaking on reset delays
+ * - updated abort/reset code (pretty untested...)
+ *
+ * Revision 1.1 1994/05/28 21:18:49 root
+ * - update for mid-level interface change (abort-reset)
+ * - delays after resets adjusted for some slow devices
+ *
+ * Revision 1.0 1994/03/25 12:52:00 root
+ * - Fixed "more data than expected" problem
+ * - added new BIOS signatures
+ *
+ * Revision 0.102 1994/01/31 20:44:12 root
+ * - minor changes in insw/outsw handling
+ *
+ * Revision 0.101 1993/12/13 01:16:27 root
+ * - fixed STATUS phase (non-GOOD stati were dropped sometimes;
+ * fixes problems with CD-ROM sector size detection & media change)
+ *
+ * Revision 0.100 1993/12/10 16:58:47 root
+ * - fix for unsuccessful selections in case of non-continuous id assignments
+ * on the scsi bus.
+ *
+ * Revision 0.99 1993/10/24 16:19:59 root
+ * - fixed DATA IN (rare read errors gone)
+ *
+ * Revision 0.98 1993/10/17 12:54:44 root
+ * - fixed some recent fixes (shame on me)
+ * - moved initialization of scratch area to aha152x_queue
+ *
+ * Revision 0.97 1993/10/09 18:53:53 root
+ * - DATA IN fixed. Rarely left data in the fifo.
+ *
+ * Revision 0.96 1993/10/03 00:53:59 root
+ * - minor changes on DATA IN
+ *
+ * Revision 0.95 1993/09/24 10:36:01 root
+ * - change handling of MSGI after reselection
+ * - fixed sti/cli
+ * - minor changes
+ *
+ * Revision 0.94 1993/09/18 14:08:22 root
+ * - fixed bug in multiple outstanding command code
+ * - changed detection
+ * - support for kernel command line configuration
+ * - reset corrected
+ * - changed message handling
+ *
+ * Revision 0.93 1993/09/15 20:41:19 root
+ * - fixed bugs with multiple outstanding commands
+ *
+ * Revision 0.92 1993/09/13 02:46:33 root
+ * - multiple outstanding commands work (no problems with IBM drive)
+ *
+ * Revision 0.91 1993/09/12 20:51:46 root
+ * added multiple outstanding commands
+ * (some problem with this $%&? IBM device remain)
+ *
+ * Revision 0.9 1993/09/12 11:11:22 root
+ * - corrected auto-configuration
+ * - changed the auto-configuration (added some '#define's)
+ * - added support for dis-/reconnection
+ *
+ * Revision 0.8 1993/09/06 23:09:39 root
+ * - added support for the drive activity light
+ * - minor changes
+ *
+ * Revision 0.7 1993/09/05 14:30:15 root
+ * - improved phase detection
+ * - now using the new snarf_region code of 0.99pl13
+ *
+ * Revision 0.6 1993/09/02 11:01:38 root
+ * first public release; added some signatures and biosparam()
+ *
+ * Revision 0.5 1993/08/30 10:23:30 root
+ * fixed timing problems with my IBM drive
+ *
+ * Revision 0.4 1993/08/29 14:06:52 root
+ * fixed some problems with timeouts due incomplete commands
+ *
+ * Revision 0.3 1993/08/28 15:55:03 root
+ * writing data works too. mounted and worked on a dos partition
+ *
+ * Revision 0.2 1993/08/27 22:42:07 root
+ * reading data works. Mounted a msdos partition.
+ *
+ * Revision 0.1 1993/08/25 13:38:30 root
+ * first "damn thing doesn't work" version
+ *
+ * Revision 0.0 1993/08/14 19:54:25 root
+ * empty function bodies; detect() works.
+ *
+ *
+ **************************************************************************
+
+
+
+ DESCRIPTION:
+
+ This is the Linux low-level SCSI driver for Adaptec AHA-1520/1522 SCSI
+ host adapters.
+
+
+ CONFIGURATION ARGUMENTS:
+
+ IOPORT base io address (0x340/0x140)
+ IRQ interrupt level (9-12; default 11)
+ SCSI_ID scsi id of controller (0-7; default 7)
+ RECONNECT allow targets to disconnect from the bus (0/1; default 1 [on])
+ PARITY enable parity checking (0/1; default 1 [on])
+ SYNCHRONOUS enable synchronous transfers (0/1; default 0 [off])
+ (NOT WORKING YET)
+ DELAY: bus reset delay (default 100)
+ EXT_TRANS: enable extended translation (0/1: default 0 [off])
+ (see NOTES below)
+
+ COMPILE TIME CONFIGURATION (put into AHA152X in drivers/scsi/Makefile):
+
+ -DAUTOCONF
+ use configuration the controller reports (AHA-152x only)
+
+ -DSKIP_BIOSTEST
+ Don't test for BIOS signature (AHA-1510 or disabled BIOS)
+
+ -DSETUP0="{ IOPORT, IRQ, SCSI_ID, RECONNECT, PARITY, SYNCHRONOUS, DELAY, EXT_TRANS }"
+ override for the first controller
+
+ -DSETUP1="{ IOPORT, IRQ, SCSI_ID, RECONNECT, PARITY, SYNCHRONOUS, DELAY, EXT_TRANS }"
+ override for the second controller
+
+
+ LILO COMMAND LINE OPTIONS:
+
+ aha152x=<IOPORT>[,<IRQ>[,<SCSI-ID>[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY> [,<EXT_TRANS]]]]]]]
+
+ The normal configuration can be overridden by specifying a command line.
+ When you do this, the BIOS test is skipped. Entered values have to be
+ valid (known). Don't use values that aren't supported under normal
+ operation. If you think that you need other values: contact me.
+ For two controllers use the aha152x statement twice.
+
+
+ SYMBOLS FOR MODULE CONFIGURATION:
+
+ aha152x=IOPORT,IRQ,SCSI_ID,RECONNECT,PARITY,SYNCHRONOUS,DELAY,EXT_TRANS
+ configuration override of first controller
+
+
+ aha152x1=IOPORT,IRQ,SCSI_ID,RECONNECT,PARITY,SYNCHRONOUS,DELAY,EXT_TRANS
+ configuration override of second controller
+
+
+ NOTES ON EXT_TRANS:
+
+ SCSI uses block numbers to address blocks/sectors on a device.
+ The BIOS uses a cylinder/head/sector addressing scheme (C/H/S)
+ scheme instead. DOS expects a BIOS or driver that understands this
+ C/H/S addressing.
+
+ The number of cylinders/heads/sectors is called geometry and is required
+ as base for requests in C/H/S adressing. SCSI only knows about the
+ total capacity of disks in blocks (sectors).
+
+ Therefore the SCSI BIOS/DOS driver has to calculate a logical/virtual
+ geometry just to be able to support that addressing scheme. The geometry
+ returned by the SCSI BIOS is a pure calculation and has nothing to
+ do with the real/physical geometry of the disk (which is usually
+ irrelevant anyway).
+
+ Basically this has no impact at all on Linux, because it also uses block
+ instead of C/H/S addressing. Unfortunately C/H/S addressing is also used
+ in the partition table and therefore every operating system has to know
+ the right geometry to be able to interpret it.
+
+ Moreover there are certain limitations to the C/H/S addressing scheme,
+ namely the address space is limited to upto 255 heads, upto 63 sectors
+ and a maximum of 1023 cylinders.
+
+ The AHA-1522 BIOS calculates the geometry by fixing the number of heads
+ to 64, the number of sectors to 32 and by calculating the number of
+ cylinders by dividing the capacity reported by the disk by 64*32 (1 MB).
+ This is considered to be the default translation.
+
+ With respect to the limit of 1023 cylinders using C/H/S you can only
+ address the first GB of your disk in the partition table. Therefore
+ BIOSes of some newer controllers based on the AIC-6260/6360 support
+ extended translation. This means that the BIOS uses 255 for heads,
+ 63 for sectors and then divides the capacity of the disk by 255*63
+ (about 8 MB), as soon it sees a disk greater than 1 GB. That results
+ in a maximum of about 8 GB adressable diskspace in the partition table
+ (but there are already bigger disks out there today).
+
+ To make it even more complicated the translation mode might/might
+ not be configurable in certain BIOS setups.
+
+ This driver does some more or less failsafe guessing to get the
+ geometry right in most cases:
+
+ - for disks<1GB: use default translation (C/32/64)
+ - for disks>1GB:
+ - take current geometry from the partition table
+ (using scsicam_bios_param and accept only `valid' geometries,
+ ie. either (C/32/64) or (C/63/255)). This can be extended
+ translation even if it's not enabled in the driver.
+ - if that fails, take extended translation if enabled by override,
+ kernel or module parameter, otherwise take default translation and
+ ask the user for verification. This might on not yet partitioned
+ disks or
+
+
+ REFERENCES USED:
+
+ "AIC-6260 SCSI Chip Specification", Adaptec Corporation.
+
+ "SCSI COMPUTER SYSTEM INTERFACE - 2 (SCSI-2)", X3T9.2/86-109 rev. 10h
+
+ "Writing a SCSI device driver for Linux", Rik Faith (faith@cs.unc.edu)
+
+ "Kernel Hacker's Guide", Michael K. Johnson (johnsonm@sunsite.unc.edu)
+
+ "Adaptec 1520/1522 User's Guide", Adaptec Corporation.
+
+ Michael K. Johnson (johnsonm@sunsite.unc.edu)
+
+ Drew Eckhardt (drew@cs.colorado.edu)
+
+ Eric Youngdale (ericy@cais.com)
+
+ special thanks to Eric Youngdale for the free(!) supplying the
+ documentation on the chip.
+
+ **************************************************************************/
+
+#ifdef MACH
+#define AUTOCONF 1
+#endif
+
+#ifdef PCMCIA
+#define MODULE
+#endif
+
+#include <linux/module.h>
+
+#ifdef PCMCIA
+#undef MODULE
+#endif
+
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "sd.h"
+#include "hosts.h"
+#include "constants.h"
+#include <asm/system.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+
+#include "aha152x.h"
+#include <linux/stat.h>
+
+#include <scsi/scsicam.h>
+
+struct proc_dir_entry proc_scsi_aha152x = {
+ PROC_SCSI_AHA152X, 7, "aha152x",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* DEFINES */
+
+/* For PCMCIA cards, always use AUTOCONF */
+#if defined(PCMCIA) || defined(MODULE)
+#if !defined(AUTOCONF)
+#define AUTOCONF
+#endif
+#endif
+
+#if !defined(AUTOCONF) && !defined(SETUP0)
+#error define AUTOCONF or SETUP0
+#endif
+
+#if defined(DEBUG_AHA152X)
+
+#undef SKIP_PORTS /* don't display ports */
+
+#undef DEBUG_QUEUE /* debug queue() */
+#undef DEBUG_RESET /* debug reset() */
+#undef DEBUG_INTR /* debug intr() */
+#undef DEBUG_SELECTION /* debug selection part in intr() */
+#undef DEBUG_MSGO /* debug message out phase in intr() */
+#undef DEBUG_MSGI /* debug message in phase in intr() */
+#undef DEBUG_STATUS /* debug status phase in intr() */
+#undef DEBUG_CMD /* debug command phase in intr() */
+#undef DEBUG_DATAI /* debug data in phase in intr() */
+#undef DEBUG_DATAO /* debug data out phase in intr() */
+#undef DEBUG_ABORT /* debug abort() */
+#undef DEBUG_DONE /* debug done() */
+#undef DEBUG_BIOSPARAM /* debug biosparam() */
+
+#undef DEBUG_RACE /* debug race conditions */
+#undef DEBUG_PHASES /* debug phases (useful to trace) */
+#undef DEBUG_QUEUES /* debug reselection */
+
+/* recently used for debugging */
+#if 0
+#endif
+
+#define DEBUG_SELECTION
+#define DEBUG_PHASES
+#define DEBUG_RESET
+#define DEBUG_ABORT
+
+#define DEBUG_DEFAULT (debug_reset|debug_abort)
+
+#endif
+
+/* END OF DEFINES */
+
+extern unsigned long loops_per_sec;
+
+#define DELAY_DEFAULT 100
+
+/* some additional "phases" for getphase() */
+#define P_BUSFREE 1
+#define P_PARITY 2
+
+/* possible irq range */
+#define IRQ_MIN 9
+#define IRQ_MAX 12
+#define IRQS IRQ_MAX-IRQ_MIN+1
+
+enum {
+ not_issued = 0x0001,
+ in_selection = 0x0002,
+ disconnected = 0x0004,
+ aborted = 0x0008,
+ sent_ident = 0x0010,
+ in_other = 0x0020,
+ in_sync = 0x0040,
+ sync_ok = 0x0080,
+};
+
+#if defined(MODULE)
+#if defined(DEBUG_AHA152X)
+int aha152x[] = { 0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0, DEBUG_DEFAULT };
+int aha152x1[] = { 0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0, DEBUG_DEFAULT };
+#else
+int aha152x[] = { 0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0 };
+int aha152x1[] = { 0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0 };
+#endif
+#endif
+
+/* set by aha152x_setup according to the command line */
+static int setup_count=0;
+static struct aha152x_setup {
+ int io_port;
+ int irq;
+ int scsiid;
+ int reconnect;
+ int parity;
+ int synchronous;
+ int delay;
+ int ext_trans;
+#ifdef DEBUG_AHA152X
+ int debug;
+#endif
+ char *conf;
+} setup[2];
+
+static struct Scsi_Host *aha152x_host[IRQS];
+
+#define HOSTDATA(shpnt) ((struct aha152x_hostdata *) &shpnt->hostdata)
+#define CURRENT_SC (HOSTDATA(shpnt)->current_SC)
+#define ISSUE_SC (HOSTDATA(shpnt)->issue_SC)
+#define DISCONNECTED_SC (HOSTDATA(shpnt)->disconnected_SC)
+#define DELAY (HOSTDATA(shpnt)->delay)
+#define EXT_TRANS (HOSTDATA(shpnt)->ext_trans)
+#define SYNCRATE (HOSTDATA(shpnt)->syncrate[CURRENT_SC->target])
+#define MSG(i) (HOSTDATA(shpnt)->message[i])
+#define MSGLEN (HOSTDATA(shpnt)->message_len)
+#define ADDMSG(x) (MSG(MSGLEN++)=x)
+
+struct aha152x_hostdata {
+ Scsi_Cmnd *issue_SC;
+ Scsi_Cmnd *current_SC;
+ Scsi_Cmnd *disconnected_SC;
+ int aborting;
+ int abortion_complete;
+ int abort_result;
+ int commands;
+
+ int reconnect;
+ int parity;
+ int synchronous;
+ int delay;
+ int ext_trans;
+
+ int swint;
+
+ unsigned char syncrate[8];
+
+ unsigned char message[256];
+ int message_len;
+
+#ifdef DEBUG_AHA152X
+ int debug;
+#endif
+};
+
+void aha152x_intr(int irq, void *dev_id, struct pt_regs *);
+void aha152x_done(struct Scsi_Host *shpnt, int error);
+void aha152x_setup(char *str, int *ints);
+int aha152x_checksetup(struct aha152x_setup *setup);
+
+static void aha152x_reset_ports(struct Scsi_Host *shpnt);
+static void aha152x_panic(struct Scsi_Host *shpnt, char *msg);
+
+static void disp_ports(struct Scsi_Host *shpnt);
+static void show_command(Scsi_Cmnd *ptr);
+static void show_queues(struct Scsi_Host *shpnt);
+static void disp_enintr(struct Scsi_Host *shpnt);
+
+#if defined(DEBUG_RACE)
+static void enter_driver(const char *);
+static void leave_driver(const char *);
+#endif
+
+/* possible i/o addresses for the AIC-6260 */
+static unsigned short ports[] =
+{
+ 0x340, /* default first */
+ 0x140
+};
+#define PORT_COUNT (sizeof(ports) / sizeof(unsigned short))
+
+#if !defined(SKIP_BIOSTEST)
+/* possible locations for the Adaptec BIOS */
+static void *addresses[] =
+{
+ (void *) 0xdc000, /* default first */
+ (void *) 0xc8000,
+ (void *) 0xcc000,
+ (void *) 0xd0000,
+ (void *) 0xd4000,
+ (void *) 0xd8000,
+ (void *) 0xe0000,
+ (void *) 0xeb800, /* VTech Platinum SMP */
+ (void *) 0xf0000,
+};
+#define ADDRESS_COUNT (sizeof(addresses) / sizeof(void *))
+
+/* signatures for various AIC-6[23]60 based controllers.
+ The point in detecting signatures is to avoid useless and maybe
+ harmful probes on ports. I'm not sure that all listed boards pass
+ auto-configuration. For those which fail the BIOS signature is
+ obsolete, because user intervention to supply the configuration is
+ needed anyway. May be an information whether or not the BIOS supports
+ extended translation could be also useful here. */
+static struct signature {
+ char *signature;
+ int sig_offset;
+ int sig_length;
+} signatures[] =
+{
+ { "Adaptec AHA-1520 BIOS", 0x102e, 21 }, /* Adaptec 152x */
+ { "Adaptec AHA-1520B", 0x0b, 17 }, /* Adaptec 152x rev B */
+ { "Adaptec AHA-1520B/1522B", 0x3e20, 23 }, /* Adaptec 1520B/1522B */
+ { "Adaptec ASW-B626 BIOS", 0x1029, 21 }, /* on-board controller */
+ { "Adaptec BIOS: ASW-B626", 0x0f, 22 }, /* on-board controller */
+ { "Adaptec ASW-B626 S2", 0x2e6c, 19 }, /* on-board controller */
+ { "Adaptec BIOS:AIC-6360", 0xc, 21 }, /* on-board controller */
+ { "ScsiPro SP-360 BIOS", 0x2873, 19 }, /* ScsiPro-Controller */
+ { "GA-400 LOCAL BUS SCSI BIOS", 0x102e, 26 }, /* Gigabyte Local-Bus-SCSI */
+ { "Adaptec BIOS:AVA-282X", 0xc, 21 }, /* Adaptec 282x */
+ { "Adaptec IBM Dock II SCSI", 0x2edd, 24 }, /* IBM Thinkpad Dock II */
+ { "Adaptec BIOS:AHA-1532P", 0x1c, 22 }, /* IBM Thinkpad Dock II SCSI */
+};
+#define SIGNATURE_COUNT (sizeof(signatures) / sizeof(struct signature))
+#endif
+
+
+static void do_pause(unsigned amount) /* Pause for amount*10 milliseconds */
+{
+ unsigned long the_time = jiffies + amount; /* 0.01 seconds per jiffy */
+
+ while (jiffies < the_time)
+ barrier();
+}
+
+/*
+ * queue services:
+ */
+static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
+{
+ Scsi_Cmnd *end;
+
+ new_SC->host_scribble = (unsigned char *) NULL;
+ if(!*SC)
+ *SC=new_SC;
+ else {
+ for(end=*SC; end->host_scribble; end = (Scsi_Cmnd *) end->host_scribble)
+ ;
+ end->host_scribble = (unsigned char *) new_SC;
+ }
+}
+
+static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd **SC)
+{
+ Scsi_Cmnd *ptr;
+
+ ptr=*SC;
+ if(ptr)
+ *SC= (Scsi_Cmnd *) (*SC)->host_scribble;
+ return ptr;
+}
+
+static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, int target, int lun)
+{
+ Scsi_Cmnd *ptr, *prev;
+
+ for(ptr=*SC, prev=NULL;
+ ptr && ((ptr->target!=target) || (ptr->lun!=lun));
+ prev = ptr, ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ ;
+
+ if(ptr){
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ *SC= (Scsi_Cmnd *) ptr->host_scribble;
+ }
+
+ return ptr;
+}
+
+/*
+ * read inbound byte and wait for ACK to get low
+ */
+static void make_acklow(struct Scsi_Host *shpnt)
+{
+ SETPORT(SXFRCTL0, CH1|SPIOEN);
+ GETPORT(SCSIDAT);
+ SETPORT(SXFRCTL0, CH1);
+
+ while(TESTHI(SCSISIG, ACKI))
+ barrier();
+}
+
+/*
+ * detect current phase more reliable:
+ * phase is valid, when the target asserts REQ after we've deasserted ACK.
+ *
+ * return value is a valid phase or an error code.
+ *
+ * errorcodes:
+ * P_BUSFREE BUS FREE phase detected
+ * P_PARITY parity error in DATA phase
+ */
+static int getphase(struct Scsi_Host *shpnt)
+{
+ int phase, sstat1;
+
+ while(1) {
+ do {
+ while(!((sstat1 = GETPORT(SSTAT1)) & (BUSFREE|SCSIRSTI|REQINIT)))
+ barrier();
+ if(sstat1 & BUSFREE)
+ return P_BUSFREE;
+ if(sstat1 & SCSIRSTI) {
+ printk("aha152x: RESET IN\n");
+ SETPORT(SSTAT1, SCSIRSTI);
+ }
+ } while(TESTHI(SCSISIG, ACKI) || TESTLO(SSTAT1, REQINIT));
+
+ SETPORT(SSTAT1, CLRSCSIPERR);
+
+ phase = GETPORT(SCSISIG) & P_MASK ;
+
+ if(TESTHI(SSTAT1, SCSIPERR)) {
+ if((phase & (CDO|MSGO))==0) /* DATA phase */
+ return P_PARITY;
+
+ make_acklow(shpnt);
+ } else
+ return phase;
+ }
+}
+
+/* called from init/main.c */
+void aha152x_setup(char *str, int *ints)
+{
+ if(setup_count>2)
+ panic("aha152x: you can only configure up to two controllers\n");
+
+ setup[setup_count].conf = str;
+ setup[setup_count].io_port = ints[0] >= 1 ? ints[1] : 0x340;
+ setup[setup_count].irq = ints[0] >= 2 ? ints[2] : 11;
+ setup[setup_count].scsiid = ints[0] >= 3 ? ints[3] : 7;
+ setup[setup_count].reconnect = ints[0] >= 4 ? ints[4] : 1;
+ setup[setup_count].parity = ints[0] >= 5 ? ints[5] : 1;
+ setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 0 /* FIXME: 1 */;
+ setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
+ setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0;
+#ifdef DEBUG_AHA152X
+ setup[setup_count].debug = ints[0] >= 9 ? ints[9] : DEBUG_DEFAULT;
+ if(ints[0]>9) {
+ printk("aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
+ "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>[,<DEBUG>]]]]]]]]\n");
+#else
+ if(ints[0]>8) {
+ printk("aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
+ "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n");
+#endif
+ } else
+ setup_count++;
+}
+
+/*
+ * Test, if port_base is valid.
+ */
+static int aha152x_porttest(int io_port)
+{
+ int i;
+
+ if(check_region(io_port, IO_RANGE))
+ return 0;
+
+ SETPORT(io_port+O_DMACNTRL1, 0); /* reset stack pointer */
+ for(i=0; i<16; i++)
+ SETPORT(io_port+O_STACK, i);
+
+ SETPORT(io_port+O_DMACNTRL1, 0); /* reset stack pointer */
+ for(i=0; i<16 && GETPORT(io_port+O_STACK)==i; i++)
+ ;
+
+ return(i==16);
+}
+
+int aha152x_checksetup(struct aha152x_setup *setup)
+{
+ int i;
+
+#ifndef PCMCIA
+ for(i=0; i<PORT_COUNT && (setup->io_port != ports[i]); i++)
+ ;
+
+ if(i==PORT_COUNT)
+ return 0;
+#endif
+
+ if(!aha152x_porttest(setup->io_port))
+ return 0;
+
+ if(setup->irq<IRQ_MIN && setup->irq>IRQ_MAX)
+ return 0;
+
+ if((setup->scsiid < 0) || (setup->scsiid > 7))
+ return 0;
+
+ if((setup->reconnect < 0) || (setup->reconnect > 1))
+ return 0;
+
+ if((setup->parity < 0) || (setup->parity > 1))
+ return 0;
+
+ if((setup->synchronous < 0) || (setup->synchronous > 1))
+ return 0;
+
+ if((setup->ext_trans < 0) || (setup->ext_trans > 1))
+ return 0;
+
+
+ return 1;
+}
+
+void aha152x_swintr(int irqno, void *dev_id, struct pt_regs * regs)
+{
+ struct Scsi_Host *shpnt = aha152x_host[irqno-IRQ_MIN];
+
+ if(!shpnt)
+ panic("aha152x: catched software interrupt for unknown controller.\n");
+
+ HOSTDATA(shpnt)->swint++;
+}
+
+
+int aha152x_detect(Scsi_Host_Template * tpnt)
+{
+ int i, j, ok;
+#if defined(AUTOCONF)
+ aha152x_config conf;
+#endif
+
+ tpnt->proc_dir = &proc_scsi_aha152x;
+
+ for(i=0; i<IRQS; i++)
+ aha152x_host[i] = (struct Scsi_Host *) NULL;
+
+ if(setup_count) {
+ printk("aha152x: processing commandline: ");
+
+ for(i=0; i<setup_count; i++)
+ if(!aha152x_checksetup(&setup[i])) {
+ printk("\naha152x: %s\n", setup[i].conf);
+ printk("aha152x: invalid line (controller=%d)\n", i+1);
+ }
+
+ printk("ok\n");
+ }
+
+#ifdef SETUP0
+ if(setup_count<2) {
+ struct aha152x_setup override = SETUP0;
+
+ if(setup_count==0 || (override.io_port != setup[0].io_port))
+ if(!aha152x_checksetup(&override)) {
+ printk("\naha152x: invalid override SETUP0={0x%x,%d,%d,%d,%d,%d,%d,%d}\n",
+ override.io_port,
+ override.irq,
+ override.scsiid,
+ override.reconnect,
+ override.parity,
+ override.synchronous,
+ override.delay,
+ override.ext_trans);
+ } else
+ setup[setup_count++] = override;
+ }
+#endif
+
+#ifdef SETUP1
+ if(setup_count<2) {
+ struct aha152x_setup override = SETUP1;
+
+ if(setup_count==0 || (override.io_port != setup[0].io_port))
+ if(!aha152x_checksetup(&override)) {
+ printk("\naha152x: invalid override SETUP1={0x%x,%d,%d,%d,%d,%d,%d,%d}\n",
+ override.io_port,
+ override.irq,
+ override.scsiid,
+ override.reconnect,
+ override.parity,
+ override.synchronous,
+ override.delay,
+ override.ext_trans);
+ } else
+ setup[setup_count++] = override;
+ }
+#endif
+
+#if defined(MODULE)
+ if(setup_count<2 && aha152x[0]!=0) {
+ setup[setup_count].conf = "";
+ setup[setup_count].io_port = aha152x[0];
+ setup[setup_count].irq = aha152x[1];
+ setup[setup_count].scsiid = aha152x[2];
+ setup[setup_count].reconnect = aha152x[3];
+ setup[setup_count].parity = aha152x[4];
+ setup[setup_count].synchronous = aha152x[5];
+ setup[setup_count].delay = aha152x[6];
+ setup[setup_count].ext_trans = aha152x[7];
+#ifdef DEBUG_AHA152X
+ setup[setup_count].debug = aha152x[8];
+#endif
+ if(aha152x_checksetup(&setup[setup_count]))
+ setup_count++;
+ else
+ printk("\naha152x: invalid module argument aha152x=0x%x,%d,%d,%d,%d,%d,%d,%d\n",
+ setup[setup_count].io_port,
+ setup[setup_count].irq,
+ setup[setup_count].scsiid,
+ setup[setup_count].reconnect,
+ setup[setup_count].parity,
+ setup[setup_count].synchronous,
+ setup[setup_count].delay,
+ setup[setup_count].ext_trans);
+ }
+
+ if(setup_count<2 && aha152x1[0]!=0) {
+ setup[setup_count].conf = "";
+ setup[setup_count].io_port = aha152x1[0];
+ setup[setup_count].irq = aha152x1[1];
+ setup[setup_count].scsiid = aha152x1[2];
+ setup[setup_count].reconnect = aha152x1[3];
+ setup[setup_count].parity = aha152x1[4];
+ setup[setup_count].synchronous = aha152x1[5];
+ setup[setup_count].delay = aha152x1[6];
+ setup[setup_count].ext_trans = aha152x1[7];
+#ifdef DEBUG_AHA152X
+ setup[setup_count].debug = aha152x1[8];
+#endif
+ if(aha152x_checksetup(&setup[setup_count]))
+ setup_count++;
+ else
+ printk("\naha152x: invalid module argument aha152x1=0x%x,%d,%d,%d,%d,%d,%d,%d\n",
+ setup[setup_count].io_port,
+ setup[setup_count].irq,
+ setup[setup_count].scsiid,
+ setup[setup_count].reconnect,
+ setup[setup_count].parity,
+ setup[setup_count].synchronous,
+ setup[setup_count].delay,
+ setup[setup_count].ext_trans);
+ }
+#endif
+
+#if defined(AUTOCONF)
+ if(setup_count<2) {
+#if !defined(SKIP_BIOSTEST)
+ ok=0;
+ for(i=0; i < ADDRESS_COUNT && !ok; i++)
+ for(j=0; (j < SIGNATURE_COUNT) && !ok; j++)
+ ok=!memcmp((void *) addresses[i]+signatures[j].sig_offset,
+ (void *) signatures[j].signature,
+ (int) signatures[j].sig_length);
+
+ if(!ok && setup_count==0)
+ return 0;
+
+ printk("aha152x: BIOS test: passed, ");
+#else
+ printk("aha152x: ");
+#endif /* !SKIP_BIOSTEST */
+
+ ok=0;
+ for(i=0; i<PORT_COUNT && setup_count<2; i++) {
+ if((setup_count==1) && (setup[0].io_port == ports[i]))
+ continue;
+
+ if(aha152x_porttest(ports[i])) {
+ ok++;
+ setup[setup_count].io_port = ports[i];
+
+ conf.cf_port =
+ (GETPORT(ports[i]+O_PORTA)<<8) + GETPORT(ports[i]+O_PORTB);
+
+ setup[setup_count].irq = IRQ_MIN + conf.cf_irq;
+ setup[setup_count].scsiid = conf.cf_id;
+ setup[setup_count].reconnect = conf.cf_tardisc;
+ setup[setup_count].parity = !conf.cf_parity;
+ setup[setup_count].synchronous = 0 /* FIXME: conf.cf_syncneg */;
+ setup[setup_count].delay = DELAY_DEFAULT;
+ setup[setup_count].ext_trans = 0;
+#ifdef DEBUG_AHA152X
+ setup[setup_count].debug = DEBUG_DEFAULT;
+#endif
+ setup_count++;
+ }
+ }
+
+ if(ok)
+ printk("auto configuration: ok, ");
+ }
+#endif
+
+ printk("detected %d controller(s)\n", setup_count);
+
+ for(i=0; i<setup_count; i++) {
+ struct Scsi_Host *shpnt;
+ unsigned long int the_time;
+
+ shpnt = aha152x_host[setup[i].irq-IRQ_MIN] =
+ scsi_register(tpnt, sizeof(struct aha152x_hostdata));
+
+ shpnt->io_port = setup[i].io_port;
+ shpnt->n_io_port = IO_RANGE;
+ shpnt->irq = setup[i].irq;
+
+ ISSUE_SC = (Scsi_Cmnd *) NULL;
+ CURRENT_SC = (Scsi_Cmnd *) NULL;
+ DISCONNECTED_SC = (Scsi_Cmnd *) NULL;
+
+ HOSTDATA(shpnt)->reconnect = setup[i].reconnect;
+ HOSTDATA(shpnt)->parity = setup[i].parity;
+ HOSTDATA(shpnt)->synchronous = setup[i].synchronous;
+ HOSTDATA(shpnt)->delay = setup[i].delay;
+ HOSTDATA(shpnt)->ext_trans = setup[i].ext_trans;
+#ifdef DEBUG_AHA152X
+ HOSTDATA(shpnt)->debug = setup[i].debug;
+#endif
+
+ HOSTDATA(shpnt)->aborting = 0;
+ HOSTDATA(shpnt)->abortion_complete = 0;
+ HOSTDATA(shpnt)->abort_result = 0;
+ HOSTDATA(shpnt)->commands = 0;
+
+ HOSTDATA(shpnt)->message_len = 0;
+
+ for(j=0; j<8; j++)
+ HOSTDATA(shpnt)->syncrate[j] = 0;
+
+ SETPORT(SCSIID, setup[i].scsiid << 4);
+ shpnt->this_id=setup[i].scsiid;
+
+ if(setup[i].reconnect)
+ shpnt->can_queue=AHA152X_MAXQUEUE;
+
+ /* RESET OUT */
+ SETBITS(SCSISEQ, SCSIRSTO);
+ do_pause(30);
+ CLRBITS(SCSISEQ, SCSIRSTO);
+ do_pause(setup[i].delay);
+
+ aha152x_reset_ports(shpnt);
+
+ printk("aha152x%d: vital data: PORTBASE=0x%03x, IRQ=%d, SCSI ID=%d,"
+ " reconnect=%s, parity=%s, synchronous=%s, delay=%d, extended translation=%s\n",
+ i,
+ shpnt->io_port,
+ shpnt->irq,
+ shpnt->this_id,
+ HOSTDATA(shpnt)->reconnect ? "enabled" : "disabled",
+ HOSTDATA(shpnt)->parity ? "enabled" : "disabled",
+ HOSTDATA(shpnt)->synchronous ? "enabled" : "disabled",
+ HOSTDATA(shpnt)->delay,
+ HOSTDATA(shpnt)->ext_trans ? "enabled" : "disabled");
+
+ request_region(shpnt->io_port, IO_RANGE, "aha152x"); /* Register */
+
+ /* not expecting any interrupts */
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, 0);
+
+ SETBITS(DMACNTRL0, INTEN);
+
+ ok = request_irq(shpnt->irq, aha152x_swintr, SA_INTERRUPT, "aha152x", NULL);
+ if(ok<0) {
+ if(ok == -EINVAL)
+ printk("aha152x%d: bad IRQ %d.\n", i, shpnt->irq);
+ else if(ok == -EBUSY)
+ printk("aha152x%d: IRQ %d already in use.\n", i, shpnt->irq);
+ else
+ printk("\naha152x%d: Unexpected error code %d on requesting IRQ %d.\n", i, ok, shpnt->irq);
+ printk("aha152x: driver needs an IRQ.\n");
+
+ scsi_unregister(shpnt);
+ shpnt=aha152x_host[shpnt->irq-IRQ_MIN]=0;
+ continue;
+ }
+
+ HOSTDATA(shpnt)->swint=0;
+
+ printk("aha152x: trying software interrupt, ");
+ SETBITS(DMACNTRL0, SWINT);
+
+ the_time=jiffies+100;
+ while(!HOSTDATA(shpnt)->swint && jiffies<the_time)
+ barrier();
+
+ free_irq(shpnt->irq,0);
+
+ if(!HOSTDATA(shpnt)->swint) {
+ if(TESTHI(DMASTAT, INTSTAT)) {
+ printk("lost.\n");
+ } else {
+ printk("failed.\n");
+ }
+
+ printk("aha152x: IRQ %d possibly wrong. Please verify.\n", shpnt->irq);
+
+ scsi_unregister(shpnt);
+ shpnt=aha152x_host[shpnt->irq-IRQ_MIN]=0;
+ continue;
+ }
+
+ printk("ok.\n");
+
+ CLRBITS(DMACNTRL0, SWINT);
+
+ /* clear interrupts */
+ SETPORT(SSTAT0, 0x7f);
+ SETPORT(SSTAT1, 0xef);
+
+ if(request_irq(shpnt->irq,aha152x_intr,SA_INTERRUPT,"aha152x",NULL)<0) {
+ printk("aha152x: failed to reassign interrupt.\n");
+ }
+ }
+
+ return (setup_count>0);
+}
+
+/*
+ * Queue a command and setup interrupts for a free bus.
+ */
+int aha152x_queue(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ struct Scsi_Host *shpnt = SCpnt->host;
+ unsigned long flags;
+
+#if defined(DEBUG_RACE)
+ enter_driver("queue");
+#else
+#if defined(DEBUG_QUEUE)
+ if(HOSTDATA(shpnt)->debug & debug_queue)
+ printk("aha152x: queue(), ");
+#endif
+#endif
+
+#if defined(DEBUG_QUEUE)
+ if(HOSTDATA(shpnt)->debug & debug_queue) {
+ printk("SCpnt (target = %d lun = %d cmnd = ",
+ SCpnt->target, SCpnt->lun);
+ print_command(SCpnt->cmnd);
+ printk(", cmd_len=%d, pieces = %d size = %u), ",
+ SCpnt->cmd_len, SCpnt->use_sg, SCpnt->request_bufflen);
+ disp_ports(shpnt);
+ }
+#endif
+
+ SCpnt->scsi_done = done;
+
+ /* setup scratch area
+ SCp.ptr : buffer pointer
+ SCp.this_residual : buffer length
+ SCp.buffer : next buffer
+ SCp.buffers_residual : left buffers in list
+ SCp.phase : current state of the command */
+ SCpnt->SCp.phase = not_issued;
+ if (SCpnt->use_sg) {
+ SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer;
+ SCpnt->SCp.ptr = SCpnt->SCp.buffer->address;
+ SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
+ } else {
+ SCpnt->SCp.ptr = (char *)SCpnt->request_buffer;
+ SCpnt->SCp.this_residual = SCpnt->request_bufflen;
+ SCpnt->SCp.buffer = NULL;
+ SCpnt->SCp.buffers_residual = 0;
+ }
+
+ SCpnt->SCp.Status = CHECK_CONDITION;
+ SCpnt->SCp.Message = 0;
+ SCpnt->SCp.have_data_in = 0;
+ SCpnt->SCp.sent_command = 0;
+
+ /* Turn led on, when this is the first command. */
+ save_flags(flags);
+ cli();
+ HOSTDATA(shpnt)->commands++;
+ if(HOSTDATA(shpnt)->commands==1)
+ SETPORT(PORTA, 1);
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("i+ (%d), ", HOSTDATA(shpnt)->commands);
+#endif
+ append_SC(&ISSUE_SC, SCpnt);
+
+ /* Enable bus free interrupt, when we aren't currently on the bus */
+ if(!CURRENT_SC) {
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+ }
+ restore_flags(flags);
+
+#if defined(DEBUG_RACE)
+ leave_driver("queue");
+#endif
+
+ return 0;
+}
+
+/*
+ * We only support commands in interrupt-driven fashion
+ */
+int aha152x_command(Scsi_Cmnd *SCpnt)
+{
+ printk("aha152x: interrupt driven driver; use aha152x_queue()\n");
+ return -1;
+}
+
+/*
+ * Abort a queued command
+ * (commands that are on the bus can't be aborted easily)
+ */
+int aha152x_abort(Scsi_Cmnd *SCpnt)
+{
+ struct Scsi_Host *shpnt = SCpnt->host;
+ unsigned long flags;
+ Scsi_Cmnd *ptr, *prev;
+
+ save_flags(flags);
+ cli();
+
+#if defined(DEBUG_ABORT)
+ if(HOSTDATA(shpnt)->debug & debug_abort) {
+ printk("aha152x: abort(), SCpnt=0x%08x, ", (unsigned int) SCpnt);
+ show_queues(shpnt);
+ }
+#endif
+
+ /* look for command in issue queue */
+ for(ptr=ISSUE_SC, prev=NULL;
+ ptr && ptr!=SCpnt;
+ prev=ptr, ptr=(Scsi_Cmnd *) ptr->host_scribble)
+ ;
+
+ if(ptr) {
+ /* dequeue */
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ ISSUE_SC = (Scsi_Cmnd *) ptr->host_scribble;
+
+ HOSTDATA(shpnt)->commands--;
+
+ restore_flags(flags);
+
+ ptr->host_scribble = NULL;
+ ptr->result = DID_ABORT << 16;
+ ptr->scsi_done(ptr);
+
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ /* if the bus is busy or a command is currently processed,
+ we can't do anything more */
+ if (TESTLO(SSTAT1, BUSFREE) || (CURRENT_SC && CURRENT_SC!=SCpnt)) {
+ /* fail abortion, if bus is busy */
+
+ if(!CURRENT_SC)
+ printk("bus busy w/o current command, ");
+
+ restore_flags(flags);
+
+ return SCSI_ABORT_BUSY;
+ }
+
+ /* bus is free */
+
+ if(CURRENT_SC) {
+ HOSTDATA(shpnt)->commands--;
+
+ /* target entered bus free before COMMAND COMPLETE, nothing to abort */
+ restore_flags(flags);
+ CURRENT_SC->result = DID_ERROR << 16;
+ CURRENT_SC->scsi_done(CURRENT_SC);
+ CURRENT_SC = (Scsi_Cmnd *) NULL;
+
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ /* look for command in disconnected queue */
+ for(ptr=DISCONNECTED_SC, prev=NULL;
+ ptr && ptr!=SCpnt;
+ prev=ptr, ptr=(Scsi_Cmnd *) ptr->host_scribble)
+ ;
+
+ if(!ptr) {
+ /* command wasn't found */
+ printk("command not found\n");
+ restore_flags(flags);
+
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if(!HOSTDATA(shpnt)->aborting) {
+ /* dequeue */
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ DISCONNECTED_SC = (Scsi_Cmnd *) ptr->host_scribble;
+
+ HOSTDATA(shpnt)->commands--;
+
+ /* set command current and initiate selection,
+ let the interrupt routine take care of the abortion */
+ CURRENT_SC = ptr;
+ ptr->SCp.phase = in_selection|aborted;
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | CURRENT_SC->target);
+
+ ADDMSG(ABORT);
+
+ /* enable interrupts for SELECTION OUT DONE and SELECTION TIME OUT */
+ SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
+ SETPORT(SIMODE1, ENSELTIMO);
+
+ /* Enable SELECTION OUT sequence */
+ SETBITS(SCSISEQ, ENSELO | ENAUTOATNO);
+
+ SETBITS(DMACNTRL0, INTEN);
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_SUCCESS;
+ HOSTDATA(shpnt)->aborting++;
+ HOSTDATA(shpnt)->abortion_complete=0;
+
+ sti(); /* Hi Eric, guess what ;-) */
+
+ /* sleep until the abortion is complete */
+ while(!HOSTDATA(shpnt)->abortion_complete)
+ barrier();
+ HOSTDATA(shpnt)->aborting=0;
+
+ return HOSTDATA(shpnt)->abort_result;
+ } else {
+ /* we're already aborting a command */
+ restore_flags(flags);
+
+ return SCSI_ABORT_BUSY;
+ }
+}
+
+/*
+ * Restore default values to the AIC-6260 registers and reset the fifos
+ */
+static void aha152x_reset_ports(struct Scsi_Host *shpnt)
+{
+ /* disable interrupts */
+ SETPORT(DMACNTRL0, RSTFIFO);
+
+ SETPORT(SCSISEQ, 0);
+
+ SETPORT(SXFRCTL1, 0);
+ SETPORT(SCSISIG, 0);
+ SETPORT(SCSIRATE, 0);
+
+ /* clear all interrupt conditions */
+ SETPORT(SSTAT0, 0x7f);
+ SETPORT(SSTAT1, 0xef);
+
+ SETPORT(SSTAT4, SYNCERR|FWERR|FRERR);
+
+ SETPORT(DMACNTRL0, 0);
+ SETPORT(DMACNTRL1, 0);
+
+ SETPORT(BRSTCNTRL, 0xf1);
+
+ /* clear SCSI fifo and transfer count */
+ SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
+ SETPORT(SXFRCTL0, CH1);
+
+ /* enable interrupts */
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+}
+
+/*
+ * Reset registers, reset a hanging bus and
+ * kill active and disconnected commands for target w/o soft reset
+ */
+int aha152x_reset(Scsi_Cmnd *SCpnt, unsigned int unused)
+{
+ struct Scsi_Host *shpnt = SCpnt->host;
+ unsigned long flags;
+ Scsi_Cmnd *ptr, *prev, *next;
+
+ aha152x_reset_ports(shpnt);
+
+ /* Reset, if bus hangs */
+ if(TESTLO(SSTAT1, BUSFREE)) {
+ CLRBITS(DMACNTRL0, INTEN);
+
+#if defined(DEBUG_RESET)
+ if(HOSTDATA(shpnt)->debug & debug_reset) {
+ printk("aha152x: reset(), bus not free: SCSI RESET OUT\n");
+ show_queues(shpnt);
+ }
+#endif
+
+ ptr=CURRENT_SC;
+ if(ptr && !ptr->device->soft_reset) {
+ ptr->host_scribble = NULL;
+ ptr->result = DID_RESET << 16;
+ ptr->scsi_done(CURRENT_SC);
+ CURRENT_SC=NULL;
+ }
+
+ save_flags(flags);
+ cli();
+ prev=NULL; ptr=DISCONNECTED_SC;
+ while(ptr) {
+ if(!ptr->device->soft_reset) {
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ DISCONNECTED_SC = (Scsi_Cmnd *) ptr->host_scribble;
+
+ next = (Scsi_Cmnd *) ptr->host_scribble;
+
+ ptr->host_scribble = NULL;
+ ptr->result = DID_RESET << 16;
+ ptr->scsi_done(ptr);
+
+ ptr = next;
+ } else {
+ prev=ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble;
+ }
+ }
+ restore_flags(flags);
+
+#if defined(DEBUG_RESET)
+ if(HOSTDATA(shpnt)->debug & debug_reset) {
+ printk("commands on targets w/ soft-resets:\n");
+ show_queues(shpnt);
+ }
+#endif
+
+ /* RESET OUT */
+ SETPORT(SCSISEQ, SCSIRSTO);
+ do_pause(30);
+ SETPORT(SCSISEQ, 0);
+ do_pause(DELAY);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+
+ SETPORT(DMACNTRL0, INTEN);
+ }
+
+ return SCSI_RESET_SUCCESS;
+}
+
+/*
+ * Return the "logical geometry"
+ */
+int aha152x_biosparam(Scsi_Disk * disk, kdev_t dev, int *info_array)
+{
+ struct Scsi_Host *shpnt=disk->device->host;
+
+#if defined(DEBUG_BIOSPARAM)
+ if(HOSTDATA(shpnt)->debug & debug_biosparam)
+ printk("aha152x_biosparam: dev=%s, size=%d, ",
+ kdevname(dev), disk->capacity);
+#endif
+
+ /* try default translation */
+ info_array[0]=64;
+ info_array[1]=32;
+ info_array[2]=disk->capacity / (64 * 32);
+
+ /* for disks >1GB do some guessing */
+ if(info_array[2]>=1024) {
+ int info[3];
+
+ /* try to figure out the geometry from the partition table */
+ if(scsicam_bios_param(disk, dev, info)<0 ||
+ !((info[0]==64 && info[1]==32) || (info[0]==255 && info[1]==63))) {
+ if(EXT_TRANS) {
+ printk("aha152x: unable to verify geometry for disk with >1GB.\n"
+ " using extended translation.\n");
+ info_array[0] = 255;
+ info_array[1] = 63;
+ info_array[2] = disk->capacity / (255 * 63);
+ } else {
+ printk("aha152x: unable to verify geometry for disk with >1GB.\n"
+ " Using default translation. Please verify yourself.\n"
+ " Perhaps you need to enable extended translation in the driver.\n"
+ " See /usr/src/linux/drivers/scsi/aha152x.c for details.\n");
+ }
+ } else {
+ info_array[0]=info[0];
+ info_array[1]=info[1];
+ info_array[2]=info[2];
+
+ if(info[0]==255 && !EXT_TRANS) {
+ printk("aha152x: current partition table is using extended translation.\n"
+ " using it also, although it's not explicty enabled.\n");
+ }
+ }
+ }
+
+#if defined(DEBUG_BIOSPARAM)
+ if(HOSTDATA(shpnt)->debug & debug_biosparam) {
+ printk("bios geometry: head=%d, sec=%d, cyl=%d\n",
+ info_array[0], info_array[1], info_array[2]);
+ printk("WARNING: check, if the bios geometry is correct.\n");
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * Internal done function
+ */
+void aha152x_done(struct Scsi_Host *shpnt, int error)
+{
+ unsigned long flags;
+ Scsi_Cmnd *done_SC;
+
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done) {
+ printk("\naha152x: done(), ");
+ disp_ports(shpnt);
+ }
+#endif
+
+ if(CURRENT_SC) {
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done)
+ printk("done(%x), ", error);
+#endif
+
+ save_flags(flags);
+ cli();
+
+ done_SC = CURRENT_SC;
+ CURRENT_SC = NULL;
+
+ /* turn led off, when no commands are in the driver */
+ HOSTDATA(shpnt)->commands--;
+ if(!HOSTDATA(shpnt)->commands)
+ SETPORT(PORTA, 0); /* turn led off */
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("ok (%d), ", HOSTDATA(shpnt)->commands);
+#endif
+ restore_flags(flags);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+
+#if 0
+/* Why poll for the BUS FREE phase, when we have setup the interrupt!? */
+#if defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & debug_phases)
+ printk("BUS FREE loop, ");
+#endif
+ while(TESTLO(SSTAT1, BUSFREE))
+ barrier();
+#if defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & debug_phases)
+ printk("BUS FREE\n");
+#endif
+#endif
+
+ done_SC->result = error;
+ if(done_SC->scsi_done) {
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done)
+ printk("calling scsi_done, ");
+#endif
+ done_SC->scsi_done(done_SC);
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done)
+ printk("done returned, ");
+#endif
+ } else
+ panic("aha152x: current_SC->scsi_done() == NULL");
+ } else
+ aha152x_panic(shpnt, "done() called outside of command");
+}
+
+/*
+ * Interrupts handler (main routine of the driver)
+ */
+void aha152x_intr(int irqno, void *dev_id, struct pt_regs * regs)
+{
+ struct Scsi_Host *shpnt = aha152x_host[irqno-IRQ_MIN];
+ unsigned long flags;
+ int done=0, phase;
+
+#if defined(DEBUG_RACE)
+ enter_driver("intr");
+#else
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ printk("\naha152x: intr(), ");
+#endif
+#endif
+
+ if(!shpnt)
+ panic("aha152x: catched interrupt for unknown controller.\n");
+
+ /* no more interrupts from the controller, while we're busy.
+ INTEN has to be restored, when we're ready to leave
+ intr(). To avoid race conditions, we have to return
+ immediately afterwards. */
+ CLRBITS(DMACNTRL0, INTEN);
+ sti(); /* Yes, sti() really needs to be here */
+
+ /* disconnected target is trying to reconnect.
+ Only possible, if we have disconnected nexuses and
+ nothing is occupying the bus.
+ */
+ if(TESTHI(SSTAT0, SELDI) &&
+ DISCONNECTED_SC &&
+ (!CURRENT_SC || (CURRENT_SC->SCp.phase & in_selection)) ) {
+ int identify_msg, target, i;
+
+ /* Avoid conflicts when a target reconnects
+ while we are trying to connect to another. */
+ if(CURRENT_SC) {
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("i+, ");
+#endif
+ save_flags(flags);
+ cli();
+ append_SC(&ISSUE_SC, CURRENT_SC);
+ CURRENT_SC=NULL;
+ restore_flags(flags);
+ }
+
+ /* disable sequences */
+ SETPORT(SCSISEQ, 0);
+ SETPORT(SSTAT0, CLRSELDI);
+ SETPORT(SSTAT1, CLRBUSFREE);
+
+#if defined(DEBUG_QUEUES) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_queues|debug_phases))
+ printk("reselected, ");
+#endif
+
+ i = GETPORT(SELID) & ~(1 << shpnt->this_id);
+ target=0;
+
+ if(i==0)
+ aha152x_panic(shpnt, "reconnecting target unknown");
+
+ for(; (i & 1)==0; target++, i>>=1)
+ ;
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("SELID=%02x, target=%d, ", GETPORT(SELID), target);
+#endif
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | target);
+ SETPORT(SCSISEQ, ENRESELI);
+
+ if(TESTLO(SSTAT0, SELDI))
+ aha152x_panic(shpnt, "RESELI failed");
+
+ SETPORT(SCSIRATE, HOSTDATA(shpnt)->syncrate[target]&0x7f);
+
+ SETPORT(SCSISIG, P_MSGI);
+
+ /* Get identify message */
+ if((i=getphase(shpnt))!=P_MSGI) {
+ printk("target doesn't enter MSGI to identify (phase=%02x)\n", i);
+ aha152x_panic(shpnt, "unknown lun");
+ }
+ SETPORT(SCSISEQ, 0);
+
+ SETPORT(SXFRCTL0, CH1);
+
+ identify_msg = GETPORT(SCSIBUS);
+
+ if(!(identify_msg & IDENTIFY_BASE)) {
+ printk("target=%d, inbound message (%02x) != IDENTIFY\n",
+ target, identify_msg);
+ aha152x_panic(shpnt, "unknown lun");
+ }
+
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("identify=%02x, lun=%d, ", identify_msg, identify_msg & 0x3f);
+#endif
+
+ save_flags(flags);
+ cli();
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("d-, ");
+#endif
+ CURRENT_SC = remove_SC(&DISCONNECTED_SC, target, identify_msg & 0x3f);
+
+ if(!CURRENT_SC) {
+ printk("lun=%d, ", identify_msg & 0x3f);
+ aha152x_panic(shpnt, "no disconnected command for that lun");
+ }
+
+ CURRENT_SC->SCp.phase &= ~disconnected;
+ restore_flags(flags);
+
+ make_acklow(shpnt);
+ if(getphase(shpnt)!=P_MSGI) {
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+#if defined(DEBUG_RACE)
+ leave_driver("(reselected) intr");
+#endif
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+ }
+ }
+
+ /* Check, if we aren't busy with a command */
+ if(!CURRENT_SC) {
+ /* bus is free to issue a queued command */
+ if(TESTHI(SSTAT1, BUSFREE) && ISSUE_SC) {
+ save_flags(flags);
+ cli();
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("i-, ");
+#endif
+ CURRENT_SC = remove_first_SC(&ISSUE_SC);
+ restore_flags(flags);
+
+#if defined(DEBUG_INTR) || defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_selection|debug_phases))
+ printk("issuing command, ");
+#endif
+ CURRENT_SC->SCp.phase = in_selection;
+
+#if defined(DEBUG_INTR) || defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_selection|debug_phases))
+ printk("selecting %d, ", CURRENT_SC->target);
+#endif
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | CURRENT_SC->target);
+
+ /* Enable interrupts for SELECTION OUT DONE and SELECTION OUT INITIATED */
+ SETPORT(SXFRCTL1, HOSTDATA(shpnt)->parity ? (ENSPCHK|ENSTIMER) : ENSTIMER);
+
+ /* enable interrupts for SELECTION OUT DONE and SELECTION TIME OUT */
+ SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
+ SETPORT(SIMODE1, ENSELTIMO);
+
+ /* Enable SELECTION OUT sequence */
+ SETBITS(SCSISEQ, ENSELO | ENAUTOATNO);
+
+ } else {
+ /* No command we are busy with and no new to issue */
+ printk("aha152x: ignoring spurious interrupt, nothing to do\n");
+ if(TESTHI(DMACNTRL0, SWINT)) {
+ printk("aha152x: SWINT is set! Why?\n");
+ CLRBITS(DMACNTRL0, SWINT);
+ }
+ show_queues(shpnt);
+ }
+
+#if defined(DEBUG_RACE)
+ leave_driver("(selecting) intr");
+#endif
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+ }
+
+ /* the bus is busy with something */
+
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ disp_ports(shpnt);
+#endif
+
+ /* we are waiting for the result of a selection attempt */
+ if(CURRENT_SC->SCp.phase & in_selection) {
+ if(TESTLO(SSTAT1, SELTO)) {
+ /* no timeout */
+ if(TESTHI(SSTAT0, SELDO)) {
+ /* clear BUS FREE interrupt */
+ SETPORT(SSTAT1, CLRBUSFREE);
+
+ /* Disable SELECTION OUT sequence */
+ CLRBITS(SCSISEQ, ENSELO|ENAUTOATNO);
+
+ /* Disable SELECTION OUT DONE interrupt */
+ CLRBITS(SIMODE0, ENSELDO);
+ CLRBITS(SIMODE1, ENSELTIMO);
+
+ if(TESTLO(SSTAT0, SELDO)) {
+ printk("aha152x: passing bus free condition\n");
+
+#if defined(DEBUG_RACE)
+ leave_driver("(passing bus free) intr");
+#endif
+ SETBITS(DMACNTRL0, INTEN);
+
+ if(CURRENT_SC->SCp.phase & aborted) {
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_ERROR;
+ HOSTDATA(shpnt)->abortion_complete++;
+ }
+
+ aha152x_done(shpnt, DID_NO_CONNECT << 16);
+
+ return;
+ }
+#if defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_selection|debug_phases))
+ printk("SELDO (SELID=%x), ", GETPORT(SELID));
+#endif
+
+ /* selection was done */
+ SETPORT(SSTAT0, CLRSELDO);
+
+#if defined(DEBUG_ABORT)
+ if((HOSTDATA(shpnt)->debug & debug_abort) && (CURRENT_SC->SCp.phase & aborted))
+ printk("(ABORT) target selected, ");
+#endif
+
+ CURRENT_SC->SCp.phase &= ~in_selection;
+ CURRENT_SC->SCp.phase |= in_other;
+
+ ADDMSG(IDENTIFY(HOSTDATA(shpnt)->reconnect,CURRENT_SC->lun));
+
+ if(!(SYNCRATE&0x80) && HOSTDATA(shpnt)->synchronous) {
+ ADDMSG(EXTENDED_MESSAGE);
+ ADDMSG(3);
+ ADDMSG(EXTENDED_SDTR);
+ ADDMSG(50);
+ ADDMSG(8);
+
+ printk("outbound SDTR: ");
+ print_msg(&MSG(MSGLEN-5));
+
+ SYNCRATE=0x80;
+ CURRENT_SC->SCp.phase |= in_sync;
+ }
+
+#if defined(DEBUG_RACE)
+ leave_driver("(SELDO) intr");
+#endif
+ SETPORT(SCSIRATE, SYNCRATE&0x7f);
+
+ SETPORT(SCSISIG, P_MSGO);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENREQINIT|ENBUSFREE);
+ SETBITS(DMACNTRL0, INTEN);
+
+ return;
+ } else
+ aha152x_panic(shpnt, "neither timeout nor selection\007");
+ } else {
+#if defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_selection|debug_phases))
+ printk("SELTO, ");
+#endif
+ /* end selection attempt */
+ CLRBITS(SCSISEQ, ENSELO|ENAUTOATNO);
+
+ /* timeout */
+ SETPORT(SSTAT1, CLRSELTIMO);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+ SETBITS(DMACNTRL0, INTEN);
+#if defined(DEBUG_RACE)
+ leave_driver("(SELTO) intr");
+#endif
+
+ if(CURRENT_SC->SCp.phase & aborted) {
+#if defined(DEBUG_ABORT)
+ if(HOSTDATA(shpnt)->debug & debug_abort)
+ printk("(ABORT) selection timeout, ");
+#endif
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_ERROR;
+ HOSTDATA(shpnt)->abortion_complete++;
+ }
+
+ if(TESTLO(SSTAT0, SELINGO))
+ /* ARBITRATION not won */
+ aha152x_done(shpnt, DID_BUS_BUSY << 16);
+ else
+ /* ARBITRATION won, but SELECTION failed */
+ aha152x_done(shpnt, DID_NO_CONNECT << 16);
+
+ return;
+ }
+ }
+
+ /* enable interrupt, when target leaves current phase */
+ phase = getphase(shpnt);
+ if(!(phase & ~P_MASK)) /* "real" phase */
+ SETPORT(SCSISIG, phase);
+ SETPORT(SSTAT1, CLRPHASECHG);
+ CURRENT_SC->SCp.phase =
+ (CURRENT_SC->SCp.phase & ~((P_MASK|1)<<16)) | (phase << 16);
+
+ /* information transfer phase */
+ switch(phase) {
+ case P_MSGO: /* MESSAGE OUT */
+ {
+ int i, identify=0, abort=0;
+
+#if defined(DEBUG_INTR) || defined(DEBUG_MSGO) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_msgo|debug_phases))
+ printk("MESSAGE OUT, ");
+#endif
+ if(MSGLEN==0) {
+ ADDMSG(MESSAGE_REJECT);
+#if defined(DEBUG_MSGO)
+ if(HOSTDATA(shpnt)->debug & debug_msgo)
+ printk("unexpected MESSAGE OUT phase; rejecting, ");
+#endif
+ }
+
+ CLRBITS(SXFRCTL0, ENDMA);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENREQINIT|ENBUSFREE);
+
+ /* wait for data latch to become ready or a phase change */
+ while(TESTLO(DMASTAT, INTSTAT))
+ barrier();
+
+#if defined(DEBUG_MSGO)
+ if(HOSTDATA(shpnt)->debug & debug_msgo) {
+ int i;
+
+ printk("messages (");
+ for(i=0; i<MSGLEN; i+=print_msg(&MSG(i)), printk(" "))
+ ;
+ printk("), ");
+ }
+#endif
+
+ for(i=0; i<MSGLEN && TESTLO(SSTAT1, PHASEMIS); i++) {
+#if defined(DEBUG_MSGO)
+ if(HOSTDATA(shpnt)->debug & debug_msgo)
+ printk("%x ", MSG(i));
+#endif
+ if(i==MSGLEN-1) {
+ /* Leave MESSAGE OUT after transfer */
+ SETPORT(SSTAT1, CLRATNO);
+ }
+
+ SETPORT(SCSIDAT, MSG(i));
+
+ make_acklow(shpnt);
+ getphase(shpnt);
+
+ if(MSG(i)==IDENTIFY(HOSTDATA(shpnt)->reconnect,CURRENT_SC->lun))
+ identify++;
+
+ if(MSG(i)==ABORT)
+ abort++;
+
+ }
+
+ MSGLEN=0;
+
+ if(identify)
+ CURRENT_SC->SCp.phase |= sent_ident;
+
+ if(abort) {
+ /* revive abort(); abort() enables interrupts */
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_SUCCESS;
+ HOSTDATA(shpnt)->abortion_complete++;
+
+ CURRENT_SC->SCp.phase &= ~(P_MASK<<16);
+
+ /* exit */
+ SETBITS(DMACNTRL0, INTEN);
+#if defined(DEBUG_RACE)
+ leave_driver("(ABORT) intr");
+#endif
+ aha152x_done(shpnt, DID_ABORT<<16);
+
+ return;
+ }
+ }
+ break;
+
+ case P_CMD: /* COMMAND phase */
+#if defined(DEBUG_INTR) || defined(DEBUG_CMD) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_cmd|debug_phases))
+ printk("COMMAND, ");
+#endif
+ if(!(CURRENT_SC->SCp.sent_command)) {
+ int i;
+
+ CLRBITS(SXFRCTL0, ENDMA);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENREQINIT|ENBUSFREE);
+
+ /* wait for data latch to become ready or a phase change */
+ while(TESTLO(DMASTAT, INTSTAT))
+ barrier();
+
+ for(i=0; i<CURRENT_SC->cmd_len && TESTLO(SSTAT1, PHASEMIS); i++) {
+ SETPORT(SCSIDAT, CURRENT_SC->cmnd[i]);
+
+ make_acklow(shpnt);
+ getphase(shpnt);
+ }
+
+ if(i<CURRENT_SC->cmd_len && TESTHI(SSTAT1, PHASEMIS))
+ aha152x_panic(shpnt, "target left COMMAND");
+
+ CURRENT_SC->SCp.sent_command++;
+ } else
+ aha152x_panic(shpnt, "Nothing to send while in COMMAND");
+ break;
+
+ case P_MSGI: /* MESSAGE IN phase */
+ {
+ int start_sync=0;
+
+#if defined(DEBUG_INTR) || defined(DEBUG_MSGI) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_msgi|debug_phases))
+ printk("MESSAGE IN, ");
+#endif
+ SETPORT(SXFRCTL0, CH1);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENBUSFREE);
+
+ while(phase == P_MSGI) {
+ CURRENT_SC->SCp.Message = GETPORT(SCSIDAT);
+ switch(CURRENT_SC->SCp.Message) {
+ case DISCONNECT:
+#if defined(DEBUG_MSGI) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_msgi|debug_phases))
+ printk("target disconnected, ");
+#endif
+ CURRENT_SC->SCp.Message = 0;
+ CURRENT_SC->SCp.phase |= disconnected;
+ if(!HOSTDATA(shpnt)->reconnect)
+ aha152x_panic(shpnt, "target was not allowed to disconnect");
+
+ break;
+
+ case COMMAND_COMPLETE:
+#if defined(DEBUG_MSGI) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_msgi|debug_phases))
+ printk("inbound message (COMMAND COMPLETE), ");
+#endif
+ done++;
+ break;
+
+ case MESSAGE_REJECT:
+ if(CURRENT_SC->SCp.phase & in_sync) {
+ CURRENT_SC->SCp.phase &= ~in_sync;
+ SYNCRATE=0x80;
+ printk("synchronous rejected, ");
+ } else
+ printk("inbound message (MESSAGE REJECT), ");
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (MESSAGE REJECT), ");
+#endif
+ break;
+
+ case SAVE_POINTERS:
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (SAVE DATA POINTERS), ");
+#endif
+ break;
+
+ case RESTORE_POINTERS:
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (RESTORE DATA POINTERS), ");
+#endif
+ break;
+
+ case EXTENDED_MESSAGE:
+ {
+ char buffer[16];
+ int i;
+
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (EXTENDED MESSAGE), ");
+#endif
+ make_acklow(shpnt);
+ if(getphase(shpnt)!=P_MSGI)
+ break;
+
+ buffer[0]=EXTENDED_MESSAGE;
+ buffer[1]=GETPORT(SCSIDAT);
+
+ for(i=0; i<buffer[1] &&
+ (make_acklow(shpnt), getphase(shpnt)==P_MSGI); i++)
+ buffer[2+i]=GETPORT(SCSIDAT);
+
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ print_msg(buffer);
+#endif
+
+ switch(buffer [2]) {
+ case EXTENDED_SDTR:
+ {
+ long ticks;
+
+ if(buffer[1]!=3)
+ aha152x_panic(shpnt, "SDTR message length != 3");
+
+ if(!HOSTDATA(shpnt)->synchronous)
+ break;
+
+ printk("inbound SDTR: "); print_msg(buffer);
+
+ ticks=(buffer[3]*4+49)/50;
+
+ if(CURRENT_SC->SCp.phase & in_sync) {
+ /* we initiated SDTR */
+ if(ticks>9 || buffer[4]<1 || buffer[4]>8)
+ aha152x_panic(shpnt, "received SDTR invalid");
+
+ SYNCRATE |= ((ticks-2)<<4) + buffer[4];
+ } else if(ticks<=9 && buffer[4]>=1) {
+ if(buffer[4]>8)
+ buffer[4]=8;
+
+ ADDMSG(EXTENDED_MESSAGE);
+ ADDMSG(3);
+ ADDMSG(EXTENDED_SDTR);
+ if(ticks<4) {
+ ticks=4;
+ ADDMSG(50);
+ } else
+ ADDMSG(buffer[3]);
+
+ ADDMSG(buffer[4]);
+
+ printk("outbound SDTR: ");
+ print_msg(&MSG(MSGLEN-5));
+
+ CURRENT_SC->SCp.phase |= in_sync;
+
+ SYNCRATE |= ((ticks-2)<<4) + buffer[4];
+
+ start_sync++;
+ } else {
+ /* requested SDTR is too slow, do it asynchronously */
+ ADDMSG(MESSAGE_REJECT);
+ SYNCRATE = 0;
+ }
+
+ SETPORT(SCSIRATE, SYNCRATE&0x7f);
+ }
+ break;
+
+ case EXTENDED_MODIFY_DATA_POINTER:
+ case EXTENDED_EXTENDED_IDENTIFY:
+ case EXTENDED_WDTR:
+ default:
+ ADDMSG(MESSAGE_REJECT);
+ break;
+ }
+ }
+ break;
+
+ default:
+ printk("unsupported inbound message %x, ", CURRENT_SC->SCp.Message);
+ break;
+
+ }
+
+ make_acklow(shpnt);
+ phase=getphase(shpnt);
+ }
+
+ if(start_sync)
+ CURRENT_SC->SCp.phase |= in_sync;
+ else
+ CURRENT_SC->SCp.phase &= ~in_sync;
+
+ if(MSGLEN>0)
+ SETPORT(SCSISIG, P_MSGI|ATNO);
+
+ /* clear SCSI fifo on BUSFREE */
+ if(phase==P_BUSFREE)
+ SETPORT(SXFRCTL0, CH1|CLRCH1);
+
+ if(CURRENT_SC->SCp.phase & disconnected) {
+ save_flags(flags);
+ cli();
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("d+, ");
+#endif
+ append_SC(&DISCONNECTED_SC, CURRENT_SC);
+ CURRENT_SC->SCp.phase |= 1<<16;
+ CURRENT_SC = NULL;
+ restore_flags(flags);
+
+ SETBITS(SCSISEQ, ENRESELI);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+
+ SETBITS(DMACNTRL0, INTEN);
+
+ return;
+ }
+ }
+ break;
+
+ case P_STATUS: /* STATUS IN phase */
+#if defined(DEBUG_STATUS) || defined(DEBUG_INTR) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_status|debug_intr|debug_phases))
+ printk("STATUS, ");
+#endif
+ SETPORT(SXFRCTL0, CH1);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENREQINIT|ENBUSFREE);
+
+ if(TESTHI(SSTAT1, PHASEMIS))
+ printk("aha152x: passing STATUS phase");
+
+ CURRENT_SC->SCp.Status = GETPORT(SCSIBUS);
+ make_acklow(shpnt);
+ getphase(shpnt);
+
+#if defined(DEBUG_STATUS)
+ if(HOSTDATA(shpnt)->debug & debug_status) {
+ printk("inbound status ");
+ print_status(CURRENT_SC->SCp.Status);
+ printk(", ");
+ }
+#endif
+ break;
+
+ case P_DATAI: /* DATA IN phase */
+ {
+ int fifodata, data_count, done;
+
+#if defined(DEBUG_DATAI) || defined(DEBUG_INTR) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_datai|debug_intr|debug_phases))
+ printk("DATA IN, ");
+#endif
+
+#if 0
+ if(GETPORT(FIFOSTAT) || GETPORT(SSTAT2) & (SFULL|SFCNT))
+ printk("aha152x: P_DATAI: %d(%d) bytes left in FIFO, resetting\n",
+ GETPORT(FIFOSTAT), GETPORT(SSTAT2) & (SFULL|SFCNT));
+#endif
+
+ /* reset host fifo */
+ SETPORT(DMACNTRL0, RSTFIFO);
+ SETPORT(DMACNTRL0, RSTFIFO|ENDMA);
+
+ SETPORT(SXFRCTL0, CH1|SCSIEN|DMAEN);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+
+ /* done is set when the FIFO is empty after the target left DATA IN */
+ done=0;
+
+ /* while the target stays in DATA to transfer data */
+ while (!done) {
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("expecting data, ");
+#endif
+ /* wait for PHASEMIS or full FIFO */
+ while(TESTLO(DMASTAT, DFIFOFULL|INTSTAT))
+ barrier();
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("ok, ");
+#endif
+
+ if(TESTHI(DMASTAT, DFIFOFULL))
+ fifodata=GETPORT(FIFOSTAT);
+ else {
+ /* wait for SCSI fifo to get empty */
+ while(TESTLO(SSTAT2, SEMPTY))
+ barrier();
+
+ /* rest of data in FIFO */
+ fifodata=GETPORT(FIFOSTAT);
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("last transfer, ");
+#endif
+ done=1;
+ }
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("fifodata=%d, ", fifodata);
+#endif
+
+ while(fifodata && CURRENT_SC->SCp.this_residual) {
+ data_count=fifodata;
+
+ /* limit data transfer to size of first sg buffer */
+ if(data_count > CURRENT_SC->SCp.this_residual)
+ data_count = CURRENT_SC->SCp.this_residual;
+
+ fifodata -= data_count;
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("data_count=%d, ", data_count);
+#endif
+
+ if(data_count&1) {
+ /* get a single byte in byte mode */
+ SETBITS(DMACNTRL0, _8BIT);
+ *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT);
+ CURRENT_SC->SCp.this_residual--;
+ }
+
+ if(data_count>1) {
+ CLRBITS(DMACNTRL0, _8BIT);
+ data_count >>= 1; /* Number of words */
+ insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ /* show what comes with the last transfer */
+ if(done) {
+#if 0
+ int i;
+ unsigned char *data;
+#endif
+
+ printk("data on last transfer (%d bytes) ",
+ 2*data_count);
+#if 0
+ printk("data on last transfer (%d bytes: ",
+ 2*data_count);
+ data = (unsigned char *) CURRENT_SC->SCp.ptr;
+ for(i=0; i<2*data_count; i++)
+ printk("%2x ", *data++);
+ printk("), ");
+#endif
+ }
+#endif
+ CURRENT_SC->SCp.ptr += 2 * data_count;
+ CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ }
+
+ /* if this buffer is full and there are more buffers left */
+ if(!CURRENT_SC->SCp.this_residual &&
+ CURRENT_SC->SCp.buffers_residual) {
+ /* advance to next buffer */
+ CURRENT_SC->SCp.buffers_residual--;
+ CURRENT_SC->SCp.buffer++;
+ CURRENT_SC->SCp.ptr = CURRENT_SC->SCp.buffer->address;
+ CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ }
+ }
+
+ /*
+ * FIFO should be empty
+ */
+ if(fifodata>0) {
+ printk("aha152x: more data than expected (%d bytes)\n",
+ GETPORT(FIFOSTAT));
+ SETBITS(DMACNTRL0, _8BIT);
+ printk("aha152x: data (");
+ while(fifodata--)
+ printk("%2x ", GETPORT(DATAPORT));
+ printk(")\n");
+ }
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ if(!fifodata)
+ printk("fifo empty, ");
+ else
+ printk("something left in fifo, ");
+#endif
+ }
+
+#if defined(DEBUG_DATAI)
+ if((HOSTDATA(shpnt)->debug & debug_datai) &&
+ (CURRENT_SC->SCp.buffers_residual ||
+ CURRENT_SC->SCp.this_residual))
+ printk("left buffers (buffers=%d, bytes=%d), ",
+ CURRENT_SC->SCp.buffers_residual, CURRENT_SC->SCp.this_residual);
+#endif
+ /* transfer can be considered ended, when SCSIEN reads back zero */
+ CLRBITS(SXFRCTL0, SCSIEN|DMAEN);
+ while(TESTHI(SXFRCTL0, SCSIEN))
+ barrier();
+ CLRBITS(DMACNTRL0, ENDMA);
+
+#if defined(DEBUG_DATAI) || defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & (debug_datai|debug_intr))
+ printk("got %d bytes, ", GETSTCNT());
+#endif
+
+ CURRENT_SC->SCp.have_data_in++;
+ }
+ break;
+
+ case P_DATAO: /* DATA OUT phase */
+ {
+ int data_count;
+
+#if defined(DEBUG_DATAO) || defined(DEBUG_INTR) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_datao|debug_intr|debug_phases))
+ printk("DATA OUT, ");
+#endif
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("got data to send (bytes=%d, buffers=%d), ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual);
+#endif
+
+ if(GETPORT(FIFOSTAT) || GETPORT(SSTAT2) & (SFULL|SFCNT)) {
+ printk("%d(%d) left in FIFO, ",
+ GETPORT(FIFOSTAT), GETPORT(SSTAT2) & (SFULL|SFCNT));
+ aha152x_panic(shpnt, "FIFO should be empty");
+ }
+
+ SETPORT(SXFRCTL0, CH1|CLRSTCNT|CLRCH1);
+ SETPORT(SXFRCTL0, SCSIEN|DMAEN|CH1);
+
+ SETPORT(DMACNTRL0, WRITE_READ|RSTFIFO);
+ SETPORT(DMACNTRL0, ENDMA|WRITE_READ);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+
+ /* while current buffer is not empty or
+ there are more buffers to transfer */
+ while(TESTLO(SSTAT1, PHASEMIS) &&
+ (CURRENT_SC->SCp.this_residual ||
+ CURRENT_SC->SCp.buffers_residual)) {
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("sending data (left: bytes=%d, buffers=%d), waiting, ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual);
+#endif
+ /* transfer rest of buffer, but max. 128 byte */
+ data_count =
+ CURRENT_SC->SCp.this_residual > 128 ?
+ 128 : CURRENT_SC->SCp.this_residual ;
+
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("data_count=%d, ", data_count);
+#endif
+
+ if(data_count&1) {
+ /* put a single byte in byte mode */
+ SETBITS(DMACNTRL0, _8BIT);
+ SETPORT(DATAPORT, *CURRENT_SC->SCp.ptr++);
+ CURRENT_SC->SCp.this_residual--;
+ }
+ if(data_count>1) {
+ CLRBITS(DMACNTRL0, _8BIT);
+ data_count >>= 1; /* number of words */
+ outsw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
+ CURRENT_SC->SCp.ptr += 2 * data_count;
+ CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ }
+
+ /* wait for FIFO to get empty */
+ while(TESTLO(DMASTAT, DFIFOEMP|INTSTAT))
+ barrier();
+
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("fifo (%d bytes), transfered (%d bytes), ",
+ GETPORT(FIFOSTAT), GETSTCNT());
+#endif
+
+ /* if this buffer is empty and there are more buffers left */
+ if(TESTLO(SSTAT1, PHASEMIS) &&
+ !CURRENT_SC->SCp.this_residual &&
+ CURRENT_SC->SCp.buffers_residual) {
+ /* advance to next buffer */
+ CURRENT_SC->SCp.buffers_residual--;
+ CURRENT_SC->SCp.buffer++;
+ CURRENT_SC->SCp.ptr = CURRENT_SC->SCp.buffer->address;
+ CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ }
+ }
+
+ if(CURRENT_SC->SCp.this_residual || CURRENT_SC->SCp.buffers_residual) {
+ /* target leaves DATA OUT for an other phase (perhaps disconnect) */
+
+ /* data in fifos has to be resend */
+ data_count = GETPORT(SSTAT2) & (SFULL|SFCNT);
+
+ data_count += GETPORT(FIFOSTAT) ;
+ CURRENT_SC->SCp.ptr -= data_count;
+ CURRENT_SC->SCp.this_residual += data_count;
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("left data (bytes=%d, buffers=%d), fifos (bytes=%d), "
+ "transfer incomplete, resetting fifo, ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual,
+ data_count);
+#endif
+ SETPORT(DMACNTRL0, WRITE_READ|RSTFIFO);
+ CLRBITS(SXFRCTL0, SCSIEN|DMAEN);
+ CLRBITS(DMACNTRL0, ENDMA);
+ } else {
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("waiting for SCSI fifo to get empty, ");
+#endif
+ /* wait for SCSI fifo to get empty */
+ while(TESTLO(SSTAT2, SEMPTY))
+ barrier();
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("ok, left data (bytes=%d, buffers=%d) ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual);
+#endif
+ CLRBITS(SXFRCTL0, SCSIEN|DMAEN);
+
+ /* transfer can be considered ended, when SCSIEN reads back zero */
+ while(TESTHI(SXFRCTL0, SCSIEN))
+ barrier();
+
+ CLRBITS(DMACNTRL0, ENDMA);
+ }
+
+#if defined(DEBUG_DATAO) || defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & (debug_datao|debug_intr))
+ printk("sent %d data bytes, ", GETSTCNT());
+#endif
+ }
+ break;
+
+ case P_BUSFREE: /* BUSFREE */
+#if defined(DEBUG_RACE)
+ leave_driver("(BUSFREE) intr");
+#endif
+#if defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & debug_phases)
+ printk("unexpected BUS FREE, ");
+#endif
+ CURRENT_SC->SCp.phase &= ~(P_MASK<<16);
+
+ aha152x_done(shpnt, DID_ERROR << 16); /* Don't know any better */
+ return;
+ break;
+
+ case P_PARITY: /* parity error in DATA phase */
+#if defined(DEBUG_RACE)
+ leave_driver("(DID_PARITY) intr");
+#endif
+ printk("PARITY error in DATA phase, ");
+
+ CURRENT_SC->SCp.phase &= ~(P_MASK<<16);
+
+ SETBITS(DMACNTRL0, INTEN);
+ aha152x_done(shpnt, DID_PARITY << 16);
+ return;
+ break;
+
+ default:
+ printk("aha152x: unexpected phase\n");
+ break;
+ }
+
+ if(done) {
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ printk("command done.\n");
+#endif
+#if defined(DEBUG_RACE)
+ leave_driver("(done) intr");
+#endif
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+ SETPORT(SCSISEQ, DISCONNECTED_SC ? ENRESELI : 0);
+
+ SETBITS(DMACNTRL0, INTEN);
+
+ aha152x_done(shpnt,
+ (CURRENT_SC->SCp.Status & 0xff)
+ | ((CURRENT_SC->SCp.Message & 0xff) << 8)
+ | (DID_OK << 16));
+
+#if defined(DEBUG_RACE)
+ printk("done returned (DID_OK: Status=%x; Message=%x).\n",
+ CURRENT_SC->SCp.Status, CURRENT_SC->SCp.Message);
+#endif
+ return;
+ }
+
+ if(CURRENT_SC)
+ CURRENT_SC->SCp.phase |= 1<<16;
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ disp_enintr(shpnt);
+#endif
+#if defined(DEBUG_RACE)
+ leave_driver("(PHASEEND) intr");
+#endif
+
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+}
+
+/*
+ * Dump the current driver status and panic...
+ */
+static void aha152x_panic(struct Scsi_Host *shpnt, char *msg)
+{
+ printk("\naha152x: %s\n", msg);
+ show_queues(shpnt);
+ panic("aha152x panic");
+}
+
+/*
+ * Display registers of AIC-6260
+ */
+static void disp_ports(struct Scsi_Host *shpnt)
+{
+#ifdef DEBUG_AHA152X
+ int s;
+
+#ifdef SKIP_PORTS
+ if(HOSTDATA(shpnt)->debug & debug_skipports)
+ return;
+#endif
+
+ printk("\n%s: ", CURRENT_SC ? "on bus" : "waiting");
+
+ s=GETPORT(SCSISEQ);
+ printk("SCSISEQ (");
+ if(s & TEMODEO) printk("TARGET MODE ");
+ if(s & ENSELO) printk("SELO ");
+ if(s & ENSELI) printk("SELI ");
+ if(s & ENRESELI) printk("RESELI ");
+ if(s & ENAUTOATNO) printk("AUTOATNO ");
+ if(s & ENAUTOATNI) printk("AUTOATNI ");
+ if(s & ENAUTOATNP) printk("AUTOATNP ");
+ if(s & SCSIRSTO) printk("SCSIRSTO ");
+ printk(");");
+
+ printk(" SCSISIG (");
+ s=GETPORT(SCSISIG);
+ switch(s & P_MASK) {
+ case P_DATAO:
+ printk("DATA OUT");
+ break;
+ case P_DATAI:
+ printk("DATA IN");
+ break;
+ case P_CMD:
+ printk("COMMAND");
+ break;
+ case P_STATUS:
+ printk("STATUS");
+ break;
+ case P_MSGO:
+ printk("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ printk("MESSAGE IN");
+ break;
+ default:
+ printk("*illegal*");
+ break;
+ }
+
+ printk("); ");
+
+ printk("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
+
+ printk("SSTAT (");
+ s=GETPORT(SSTAT0);
+ if(s & TARGET) printk("TARGET ");
+ if(s & SELDO) printk("SELDO ");
+ if(s & SELDI) printk("SELDI ");
+ if(s & SELINGO) printk("SELINGO ");
+ if(s & SWRAP) printk("SWRAP ");
+ if(s & SDONE) printk("SDONE ");
+ if(s & SPIORDY) printk("SPIORDY ");
+ if(s & DMADONE) printk("DMADONE ");
+
+ s=GETPORT(SSTAT1);
+ if(s & SELTO) printk("SELTO ");
+ if(s & ATNTARG) printk("ATNTARG ");
+ if(s & SCSIRSTI) printk("SCSIRSTI ");
+ if(s & PHASEMIS) printk("PHASEMIS ");
+ if(s & BUSFREE) printk("BUSFREE ");
+ if(s & SCSIPERR) printk("SCSIPERR ");
+ if(s & PHASECHG) printk("PHASECHG ");
+ if(s & REQINIT) printk("REQINIT ");
+ printk("); ");
+
+
+ printk("SSTAT (");
+
+ s=GETPORT(SSTAT0) & GETPORT(SIMODE0);
+
+ if(s & TARGET) printk("TARGET ");
+ if(s & SELDO) printk("SELDO ");
+ if(s & SELDI) printk("SELDI ");
+ if(s & SELINGO) printk("SELINGO ");
+ if(s & SWRAP) printk("SWRAP ");
+ if(s & SDONE) printk("SDONE ");
+ if(s & SPIORDY) printk("SPIORDY ");
+ if(s & DMADONE) printk("DMADONE ");
+
+ s=GETPORT(SSTAT1) & GETPORT(SIMODE1);
+
+ if(s & SELTO) printk("SELTO ");
+ if(s & ATNTARG) printk("ATNTARG ");
+ if(s & SCSIRSTI) printk("SCSIRSTI ");
+ if(s & PHASEMIS) printk("PHASEMIS ");
+ if(s & BUSFREE) printk("BUSFREE ");
+ if(s & SCSIPERR) printk("SCSIPERR ");
+ if(s & PHASECHG) printk("PHASECHG ");
+ if(s & REQINIT) printk("REQINIT ");
+ printk("); ");
+
+ printk("SXFRCTL0 (");
+
+ s=GETPORT(SXFRCTL0);
+ if(s & SCSIEN) printk("SCSIEN ");
+ if(s & DMAEN) printk("DMAEN ");
+ if(s & CH1) printk("CH1 ");
+ if(s & CLRSTCNT) printk("CLRSTCNT ");
+ if(s & SPIOEN) printk("SPIOEN ");
+ if(s & CLRCH1) printk("CLRCH1 ");
+ printk("); ");
+
+ printk("SIGNAL (");
+
+ s=GETPORT(SCSISIG);
+ if(s & ATNI) printk("ATNI ");
+ if(s & SELI) printk("SELI ");
+ if(s & BSYI) printk("BSYI ");
+ if(s & REQI) printk("REQI ");
+ if(s & ACKI) printk("ACKI ");
+ printk("); ");
+
+ printk("SELID (%02x), ", GETPORT(SELID));
+
+ printk("SSTAT2 (");
+
+ s=GETPORT(SSTAT2);
+ if(s & SOFFSET) printk("SOFFSET ");
+ if(s & SEMPTY) printk("SEMPTY ");
+ if(s & SFULL) printk("SFULL ");
+ printk("); SFCNT (%d); ", s & (SFULL|SFCNT));
+
+ s=GETPORT(SSTAT3);
+ printk("SCSICNT (%d), OFFCNT(%d), ", (s&0xf0)>>4, s&0x0f);
+
+ printk("SSTAT4 (");
+ s=GETPORT(SSTAT4);
+ if(s & SYNCERR) printk("SYNCERR ");
+ if(s & FWERR) printk("FWERR ");
+ if(s & FRERR) printk("FRERR ");
+ printk("); ");
+
+ printk("DMACNTRL0 (");
+ s=GETPORT(DMACNTRL0);
+ printk("%s ", s & _8BIT ? "8BIT" : "16BIT");
+ printk("%s ", s & DMA ? "DMA" : "PIO" );
+ printk("%s ", s & WRITE_READ ? "WRITE" : "READ" );
+ if(s & ENDMA) printk("ENDMA ");
+ if(s & INTEN) printk("INTEN ");
+ if(s & RSTFIFO) printk("RSTFIFO ");
+ if(s & SWINT) printk("SWINT ");
+ printk("); ");
+
+ printk("DMASTAT (");
+ s=GETPORT(DMASTAT);
+ if(s & ATDONE) printk("ATDONE ");
+ if(s & WORDRDY) printk("WORDRDY ");
+ if(s & DFIFOFULL) printk("DFIFOFULL ");
+ if(s & DFIFOEMP) printk("DFIFOEMP ");
+ printk(")");
+
+ printk("\n");
+#endif
+}
+
+/*
+ * display enabled interrupts
+ */
+static void disp_enintr(struct Scsi_Host *shpnt)
+{
+ int s;
+
+ printk("enabled interrupts (");
+
+ s=GETPORT(SIMODE0);
+ if(s & ENSELDO) printk("ENSELDO ");
+ if(s & ENSELDI) printk("ENSELDI ");
+ if(s & ENSELINGO) printk("ENSELINGO ");
+ if(s & ENSWRAP) printk("ENSWRAP ");
+ if(s & ENSDONE) printk("ENSDONE ");
+ if(s & ENSPIORDY) printk("ENSPIORDY ");
+ if(s & ENDMADONE) printk("ENDMADONE ");
+
+ s=GETPORT(SIMODE1);
+ if(s & ENSELTIMO) printk("ENSELTIMO ");
+ if(s & ENATNTARG) printk("ENATNTARG ");
+ if(s & ENPHASEMIS) printk("ENPHASEMIS ");
+ if(s & ENBUSFREE) printk("ENBUSFREE ");
+ if(s & ENSCSIPERR) printk("ENSCSIPERR ");
+ if(s & ENPHASECHG) printk("ENPHASECHG ");
+ if(s & ENREQINIT) printk("ENREQINIT ");
+ printk(")\n");
+}
+
+#if defined(DEBUG_RACE)
+
+static const char *should_leave;
+static int in_driver=0;
+
+/*
+ * Only one routine can be in the driver at once.
+ */
+static void enter_driver(const char *func)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ printk("aha152x: entering %s() (%x)\n", func, jiffies);
+ if(in_driver) {
+ printk("%s should leave first.\n", should_leave);
+ panic("aha152x: already in driver\n");
+ }
+
+ in_driver++;
+ should_leave=func;
+ restore_flags(flags);
+}
+
+static void leave_driver(const char *func)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ printk("\naha152x: leaving %s() (%x)\n", func, jiffies);
+ if(!in_driver) {
+ printk("aha152x: %s already left.\n", should_leave);
+ panic("aha152x: %s already left driver.\n");
+ }
+
+ in_driver--;
+ should_leave=func;
+ restore_flags(flags);
+}
+#endif
+
+/*
+ * Show the command data of a command
+ */
+static void show_command(Scsi_Cmnd *ptr)
+{
+ printk("0x%08x: target=%d; lun=%d; cmnd=(",
+ (unsigned int) ptr, ptr->target, ptr->lun);
+
+ print_command(ptr->cmnd);
+
+ printk("); residual=%d; buffers=%d; phase |",
+ ptr->SCp.this_residual, ptr->SCp.buffers_residual);
+
+ if(ptr->SCp.phase & not_issued ) printk("not issued|");
+ if(ptr->SCp.phase & in_selection) printk("in selection|");
+ if(ptr->SCp.phase & disconnected) printk("disconnected|");
+ if(ptr->SCp.phase & aborted ) printk("aborted|");
+ if(ptr->SCp.phase & sent_ident ) printk("send_ident|");
+ if(ptr->SCp.phase & in_other) {
+ printk("; in other(");
+ switch((ptr->SCp.phase >> 16) & P_MASK) {
+ case P_DATAO:
+ printk("DATA OUT");
+ break;
+ case P_DATAI:
+ printk("DATA IN");
+ break;
+ case P_CMD:
+ printk("COMMAND");
+ break;
+ case P_STATUS:
+ printk("STATUS");
+ break;
+ case P_MSGO:
+ printk("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ printk("MESSAGE IN");
+ break;
+ default:
+ printk("*illegal*");
+ break;
+ }
+ printk(")");
+ if(ptr->SCp.phase & (1<<16))
+ printk("; phaseend");
+ }
+ printk("; next=0x%08x\n", (unsigned int) ptr->host_scribble);
+}
+
+/*
+ * Dump the queued data
+ */
+static void show_queues(struct Scsi_Host *shpnt)
+{
+ unsigned long flags;
+ Scsi_Cmnd *ptr;
+
+ save_flags(flags);
+ cli();
+ printk("QUEUE STATUS:\nissue_SC:\n");
+ for(ptr=ISSUE_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ show_command(ptr);
+
+ printk("current_SC:\n");
+ if(CURRENT_SC)
+ show_command(CURRENT_SC);
+ else
+ printk("none\n");
+
+ printk("disconnected_SC:\n");
+ for(ptr=DISCONNECTED_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ show_command(ptr);
+
+ disp_ports(shpnt);
+ disp_enintr(shpnt);
+ restore_flags(flags);
+}
+
+int aha152x_set_info(char *buffer, int length, struct Scsi_Host *shpnt)
+{
+ return(-ENOSYS); /* Currently this is a no-op */
+}
+
+#undef SPRINTF
+#define SPRINTF(args...) pos += sprintf(pos, ## args)
+
+static int get_command(char *pos, Scsi_Cmnd *ptr)
+{
+ char *start = pos;
+ int i;
+
+ SPRINTF("0x%08x: target=%d; lun=%d; cmnd=( ",
+ (unsigned int) ptr, ptr->target, ptr->lun);
+
+ for(i=0; i<COMMAND_SIZE(ptr->cmnd[0]); i++)
+ SPRINTF("0x%02x ", ptr->cmnd[i]);
+
+ SPRINTF("); residual=%d; buffers=%d; phase |",
+ ptr->SCp.this_residual, ptr->SCp.buffers_residual);
+
+ if(ptr->SCp.phase & not_issued ) SPRINTF("not issued|");
+ if(ptr->SCp.phase & in_selection) SPRINTF("in selection|");
+ if(ptr->SCp.phase & disconnected) SPRINTF("disconnected|");
+ if(ptr->SCp.phase & aborted ) SPRINTF("aborted|");
+ if(ptr->SCp.phase & sent_ident ) SPRINTF("send_ident|");
+ if(ptr->SCp.phase & in_other) {
+ SPRINTF("; in other(");
+ switch((ptr->SCp.phase >> 16) & P_MASK) {
+ case P_DATAO:
+ SPRINTF("DATA OUT");
+ break;
+ case P_DATAI:
+ SPRINTF("DATA IN");
+ break;
+ case P_CMD:
+ SPRINTF("COMMAND");
+ break;
+ case P_STATUS:
+ SPRINTF("STATUS");
+ break;
+ case P_MSGO:
+ SPRINTF("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ SPRINTF("MESSAGE IN");
+ break;
+ default:
+ SPRINTF("*illegal*");
+ break;
+ }
+ SPRINTF(")");
+ if(ptr->SCp.phase & (1<<16))
+ SPRINTF("; phaseend");
+ }
+ SPRINTF("; next=0x%08x\n", (unsigned int) ptr->host_scribble);
+
+ return(pos-start);
+}
+
+static int get_ports(struct Scsi_Host *shpnt, char *pos)
+{
+ char *start = pos;
+ int s;
+
+#ifdef SKIP_PORTS
+ if(HOSTDATA(shpnt)->debug & debug_skipports)
+ return;
+#endif
+
+ SPRINTF("\n%s: ", CURRENT_SC ? "on bus" : "waiting");
+
+ s=GETPORT(SCSISEQ);
+ SPRINTF("SCSISEQ (");
+ if(s & TEMODEO) SPRINTF("TARGET MODE ");
+ if(s & ENSELO) SPRINTF("SELO ");
+ if(s & ENSELI) SPRINTF("SELI ");
+ if(s & ENRESELI) SPRINTF("RESELI ");
+ if(s & ENAUTOATNO) SPRINTF("AUTOATNO ");
+ if(s & ENAUTOATNI) SPRINTF("AUTOATNI ");
+ if(s & ENAUTOATNP) SPRINTF("AUTOATNP ");
+ if(s & SCSIRSTO) SPRINTF("SCSIRSTO ");
+ SPRINTF(");");
+
+ SPRINTF(" SCSISIG (");
+ s=GETPORT(SCSISIG);
+ switch(s & P_MASK) {
+ case P_DATAO:
+ SPRINTF("DATA OUT");
+ break;
+ case P_DATAI:
+ SPRINTF("DATA IN");
+ break;
+ case P_CMD:
+ SPRINTF("COMMAND");
+ break;
+ case P_STATUS:
+ SPRINTF("STATUS");
+ break;
+ case P_MSGO:
+ SPRINTF("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ SPRINTF("MESSAGE IN");
+ break;
+ default:
+ SPRINTF("*illegal*");
+ break;
+ }
+
+ SPRINTF("); ");
+
+ SPRINTF("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
+
+ SPRINTF("SSTAT (");
+ s=GETPORT(SSTAT0);
+ if(s & TARGET) SPRINTF("TARGET ");
+ if(s & SELDO) SPRINTF("SELDO ");
+ if(s & SELDI) SPRINTF("SELDI ");
+ if(s & SELINGO) SPRINTF("SELINGO ");
+ if(s & SWRAP) SPRINTF("SWRAP ");
+ if(s & SDONE) SPRINTF("SDONE ");
+ if(s & SPIORDY) SPRINTF("SPIORDY ");
+ if(s & DMADONE) SPRINTF("DMADONE ");
+
+ s=GETPORT(SSTAT1);
+ if(s & SELTO) SPRINTF("SELTO ");
+ if(s & ATNTARG) SPRINTF("ATNTARG ");
+ if(s & SCSIRSTI) SPRINTF("SCSIRSTI ");
+ if(s & PHASEMIS) SPRINTF("PHASEMIS ");
+ if(s & BUSFREE) SPRINTF("BUSFREE ");
+ if(s & SCSIPERR) SPRINTF("SCSIPERR ");
+ if(s & PHASECHG) SPRINTF("PHASECHG ");
+ if(s & REQINIT) SPRINTF("REQINIT ");
+ SPRINTF("); ");
+
+
+ SPRINTF("SSTAT (");
+
+ s=GETPORT(SSTAT0) & GETPORT(SIMODE0);
+
+ if(s & TARGET) SPRINTF("TARGET ");
+ if(s & SELDO) SPRINTF("SELDO ");
+ if(s & SELDI) SPRINTF("SELDI ");
+ if(s & SELINGO) SPRINTF("SELINGO ");
+ if(s & SWRAP) SPRINTF("SWRAP ");
+ if(s & SDONE) SPRINTF("SDONE ");
+ if(s & SPIORDY) SPRINTF("SPIORDY ");
+ if(s & DMADONE) SPRINTF("DMADONE ");
+
+ s=GETPORT(SSTAT1) & GETPORT(SIMODE1);
+
+ if(s & SELTO) SPRINTF("SELTO ");
+ if(s & ATNTARG) SPRINTF("ATNTARG ");
+ if(s & SCSIRSTI) SPRINTF("SCSIRSTI ");
+ if(s & PHASEMIS) SPRINTF("PHASEMIS ");
+ if(s & BUSFREE) SPRINTF("BUSFREE ");
+ if(s & SCSIPERR) SPRINTF("SCSIPERR ");
+ if(s & PHASECHG) SPRINTF("PHASECHG ");
+ if(s & REQINIT) SPRINTF("REQINIT ");
+ SPRINTF("); ");
+
+ SPRINTF("SXFRCTL0 (");
+
+ s=GETPORT(SXFRCTL0);
+ if(s & SCSIEN) SPRINTF("SCSIEN ");
+ if(s & DMAEN) SPRINTF("DMAEN ");
+ if(s & CH1) SPRINTF("CH1 ");
+ if(s & CLRSTCNT) SPRINTF("CLRSTCNT ");
+ if(s & SPIOEN) SPRINTF("SPIOEN ");
+ if(s & CLRCH1) SPRINTF("CLRCH1 ");
+ SPRINTF("); ");
+
+ SPRINTF("SIGNAL (");
+
+ s=GETPORT(SCSISIG);
+ if(s & ATNI) SPRINTF("ATNI ");
+ if(s & SELI) SPRINTF("SELI ");
+ if(s & BSYI) SPRINTF("BSYI ");
+ if(s & REQI) SPRINTF("REQI ");
+ if(s & ACKI) SPRINTF("ACKI ");
+ SPRINTF("); ");
+
+ SPRINTF("SELID (%02x), ", GETPORT(SELID));
+
+ SPRINTF("SSTAT2 (");
+
+ s=GETPORT(SSTAT2);
+ if(s & SOFFSET) SPRINTF("SOFFSET ");
+ if(s & SEMPTY) SPRINTF("SEMPTY ");
+ if(s & SFULL) SPRINTF("SFULL ");
+ SPRINTF("); SFCNT (%d); ", s & (SFULL|SFCNT));
+
+ s=GETPORT(SSTAT3);
+ SPRINTF("SCSICNT (%d), OFFCNT(%d), ", (s&0xf0)>>4, s&0x0f);
+
+ SPRINTF("SSTAT4 (");
+ s=GETPORT(SSTAT4);
+ if(s & SYNCERR) SPRINTF("SYNCERR ");
+ if(s & FWERR) SPRINTF("FWERR ");
+ if(s & FRERR) SPRINTF("FRERR ");
+ SPRINTF("); ");
+
+ SPRINTF("DMACNTRL0 (");
+ s=GETPORT(DMACNTRL0);
+ SPRINTF("%s ", s & _8BIT ? "8BIT" : "16BIT");
+ SPRINTF("%s ", s & DMA ? "DMA" : "PIO" );
+ SPRINTF("%s ", s & WRITE_READ ? "WRITE" : "READ" );
+ if(s & ENDMA) SPRINTF("ENDMA ");
+ if(s & INTEN) SPRINTF("INTEN ");
+ if(s & RSTFIFO) SPRINTF("RSTFIFO ");
+ if(s & SWINT) SPRINTF("SWINT ");
+ SPRINTF("); ");
+
+ SPRINTF("DMASTAT (");
+ s=GETPORT(DMASTAT);
+ if(s & ATDONE) SPRINTF("ATDONE ");
+ if(s & WORDRDY) SPRINTF("WORDRDY ");
+ if(s & DFIFOFULL) SPRINTF("DFIFOFULL ");
+ if(s & DFIFOEMP) SPRINTF("DFIFOEMP ");
+ SPRINTF(")\n\n");
+
+ SPRINTF("enabled interrupts (");
+
+ s=GETPORT(SIMODE0);
+ if(s & ENSELDO) SPRINTF("ENSELDO ");
+ if(s & ENSELDI) SPRINTF("ENSELDI ");
+ if(s & ENSELINGO) SPRINTF("ENSELINGO ");
+ if(s & ENSWRAP) SPRINTF("ENSWRAP ");
+ if(s & ENSDONE) SPRINTF("ENSDONE ");
+ if(s & ENSPIORDY) SPRINTF("ENSPIORDY ");
+ if(s & ENDMADONE) SPRINTF("ENDMADONE ");
+
+ s=GETPORT(SIMODE1);
+ if(s & ENSELTIMO) SPRINTF("ENSELTIMO ");
+ if(s & ENATNTARG) SPRINTF("ENATNTARG ");
+ if(s & ENPHASEMIS) SPRINTF("ENPHASEMIS ");
+ if(s & ENBUSFREE) SPRINTF("ENBUSFREE ");
+ if(s & ENSCSIPERR) SPRINTF("ENSCSIPERR ");
+ if(s & ENPHASECHG) SPRINTF("ENPHASECHG ");
+ if(s & ENREQINIT) SPRINTF("ENREQINIT ");
+ SPRINTF(")\n");
+
+ return (pos-start);
+}
+
+#undef SPRINTF
+#define SPRINTF(args...) do { if(pos < buffer + length) pos += sprintf(pos, ## args); } while(0)
+
+int aha152x_proc_info(char *buffer, char **start,
+ off_t offset, int length, int hostno, int inout)
+{
+ int i;
+ char *pos = buffer;
+ struct Scsi_Host *shpnt;
+ unsigned long flags;
+ Scsi_Cmnd *ptr;
+
+ for(i=0, shpnt= (struct Scsi_Host *) NULL; i<IRQS; i++)
+ if(aha152x_host[i] && aha152x_host[i]->host_no == hostno)
+ shpnt=aha152x_host[i];
+
+ if(!shpnt)
+ return(-ESRCH);
+
+ if(inout) /* Has data been written to the file ? */
+ return(aha152x_set_info(buffer, length, shpnt));
+
+ SPRINTF(AHA152X_REVID "\n");
+
+ save_flags(flags);
+ cli();
+
+ SPRINTF("ioports 0x%04x to 0x%04x\n",
+ shpnt->io_port, shpnt->io_port+shpnt->n_io_port-1);
+ SPRINTF("interrupt 0x%02x\n", shpnt->irq);
+ SPRINTF("disconnection/reconnection %s\n",
+ HOSTDATA(shpnt)->reconnect ? "enabled" : "disabled");
+ SPRINTF("parity checking %s\n",
+ HOSTDATA(shpnt)->parity ? "enabled" : "disabled");
+ SPRINTF("synchronous transfers %s\n",
+ HOSTDATA(shpnt)->synchronous ? "enabled" : "disabled");
+ SPRINTF("%d commands currently queued\n", HOSTDATA(shpnt)->commands);
+
+ if(HOSTDATA(shpnt)->synchronous) {
+#if 0
+ SPRINTF("synchronously operating targets (tick=%ld ns):\n",
+ 250000000/loops_per_sec);
+ for(i=0; i<8; i++)
+ if(HOSTDATA(shpnt)->syncrate[i]&0x7f)
+ SPRINTF("target %d: period %dT/%ldns; req/ack offset %d\n",
+ i,
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2),
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2)*
+ 250000000/loops_per_sec,
+ HOSTDATA(shpnt)->syncrate[i]&0x0f);
+#else
+ SPRINTF("synchronously operating targets (tick=50 ns):\n");
+ for(i=0; i<8; i++)
+ if(HOSTDATA(shpnt)->syncrate[i]&0x7f)
+ SPRINTF("target %d: period %dT/%dns; req/ack offset %d\n",
+ i,
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2),
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2)*50,
+ HOSTDATA(shpnt)->syncrate[i]&0x0f);
+#endif
+ }
+
+#ifdef DEBUG_AHA152X
+#define PDEBUG(flags,txt) if(HOSTDATA(shpnt)->debug & flags) SPRINTF("(%s) ", txt);
+
+ SPRINTF("enabled debugging options: ");
+
+ PDEBUG(debug_skipports, "skip ports");
+ PDEBUG(debug_queue, "queue");
+ PDEBUG(debug_intr, "interrupt");
+ PDEBUG(debug_selection, "selection");
+ PDEBUG(debug_msgo, "message out");
+ PDEBUG(debug_msgi, "message in");
+ PDEBUG(debug_status, "status");
+ PDEBUG(debug_cmd, "command");
+ PDEBUG(debug_datai, "data in");
+ PDEBUG(debug_datao, "data out");
+ PDEBUG(debug_abort, "abort");
+ PDEBUG(debug_done, "done");
+ PDEBUG(debug_biosparam, "bios parameters");
+ PDEBUG(debug_phases, "phases");
+ PDEBUG(debug_queues, "queues");
+ PDEBUG(debug_reset, "reset");
+
+ SPRINTF("\n");
+#endif
+
+ SPRINTF("\nqueue status:\n");
+ if(ISSUE_SC) {
+ SPRINTF("not yet issued commands:\n");
+ for(ptr=ISSUE_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ pos += get_command(pos, ptr);
+ } else
+ SPRINTF("no not yet issued commands\n");
+
+ if(CURRENT_SC) {
+ SPRINTF("current command:\n");
+ pos += get_command(pos, CURRENT_SC);
+ } else
+ SPRINTF("no current command\n");
+
+ if(DISCONNECTED_SC) {
+ SPRINTF("disconnected commands:\n");
+ for(ptr=DISCONNECTED_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ pos += get_command(pos, ptr);
+ } else
+ SPRINTF("no disconnected commands\n");
+
+ restore_flags(flags);
+
+ pos += get_ports(shpnt, pos);
+
+ *start=buffer+offset;
+ if (pos - buffer < offset)
+ return 0;
+ else if (pos - buffer - offset < length)
+ return pos - buffer - offset;
+ else
+ return length;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AHA152X;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/aha152x.h b/linux/src/drivers/scsi/aha152x.h
new file mode 100644
index 0000000..ca1a202
--- /dev/null
+++ b/linux/src/drivers/scsi/aha152x.h
@@ -0,0 +1,357 @@
+#ifndef _AHA152X_H
+#define _AHA152X_H
+
+/*
+ * $Id: aha152x.h,v 1.1 1999/04/26 05:54:10 tb Exp $
+ */
+
+#if defined(__KERNEL__)
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include <asm/io.h>
+
+int aha152x_detect(Scsi_Host_Template *);
+int aha152x_command(Scsi_Cmnd *);
+int aha152x_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int aha152x_abort(Scsi_Cmnd *);
+int aha152x_reset(Scsi_Cmnd *, unsigned int);
+int aha152x_biosparam(Disk *, kdev_t, int*);
+int aha152x_proc_info(char *buffer, char **start, off_t offset, int length, int hostno, int inout);
+
+/* number of queueable commands
+ (unless we support more than 1 cmd_per_lun this should do) */
+#define AHA152X_MAXQUEUE 7
+
+#define AHA152X_REVID "Adaptec 152x SCSI driver; $Revision: 1.1 $"
+
+extern struct proc_dir_entry proc_scsi_aha152x;
+
+/* Initial value of Scsi_Host entry */
+#define AHA152X { /* next */ 0, \
+ /* usage_count */ 0, \
+ /* proc_dir */ &proc_scsi_aha152x, \
+ /* proc_info */ aha152x_proc_info, \
+ /* name */ AHA152X_REVID, \
+ /* detect */ aha152x_detect, \
+ /* release */ 0, \
+ /* info */ 0, \
+ /* command */ aha152x_command, \
+ /* queuecommand */ aha152x_queue, \
+ /* abort */ aha152x_abort, \
+ /* reset */ aha152x_reset, \
+ /* slave_attach */ 0, \
+ /* bios_param */ aha152x_biosparam, \
+ /* can_queue */ 1, \
+ /* this_id */ 7, \
+ /* sg_tablesize */ SG_ALL, \
+ /* cmd_per_lun */ 1, \
+ /* present */ 0, \
+ /* unchecked_isa_dma */ 0, \
+ /* use_clustering */ DISABLE_CLUSTERING }
+#endif
+
+
+/* port addresses */
+#define SCSISEQ (shpnt->io_port+0x00) /* SCSI sequence control */
+#define SXFRCTL0 (shpnt->io_port+0x01) /* SCSI transfer control 0 */
+#define SXFRCTL1 (shpnt->io_port+0x02) /* SCSI transfer control 1 */
+#define SCSISIG (shpnt->io_port+0x03) /* SCSI signal in/out */
+#define SCSIRATE (shpnt->io_port+0x04) /* SCSI rate control */
+#define SELID (shpnt->io_port+0x05) /* selection/reselection ID */
+#define SCSIID SELID /* SCSI ID */
+#define SCSIDAT (shpnt->io_port+0x06) /* SCSI latched data */
+#define SCSIBUS (shpnt->io_port+0x07) /* SCSI data bus */
+#define STCNT0 (shpnt->io_port+0x08) /* SCSI transfer count 0 */
+#define STCNT1 (shpnt->io_port+0x09) /* SCSI transfer count 1 */
+#define STCNT2 (shpnt->io_port+0x0a) /* SCSI transfer count 2 */
+#define SSTAT0 (shpnt->io_port+0x0b) /* SCSI interrupt status 0 */
+#define SSTAT1 (shpnt->io_port+0x0c) /* SCSI interrupt status 1 */
+#define SSTAT2 (shpnt->io_port+0x0d) /* SCSI interrupt status 2 */
+#define SCSITEST (shpnt->io_port+0x0e) /* SCSI test control */
+#define SSTAT3 SCSITEST /* SCSI interrupt status 3 */
+#define SSTAT4 (shpnt->io_port+0x0f) /* SCSI status 4 */
+#define SIMODE0 (shpnt->io_port+0x10) /* SCSI interrupt mode 0 */
+#define SIMODE1 (shpnt->io_port+0x11) /* SCSI interrupt mode 1 */
+#define DMACNTRL0 (shpnt->io_port+0x12) /* DMA control 0 */
+#define DMACNTRL1 (shpnt->io_port+0x13) /* DMA control 1 */
+#define DMASTAT (shpnt->io_port+0x14) /* DMA status */
+#define FIFOSTAT (shpnt->io_port+0x15) /* FIFO status */
+#define DATAPORT (shpnt->io_port+0x16) /* DATA port */
+#define BRSTCNTRL (shpnt->io_port+0x18) /* burst control */
+#define PORTA (shpnt->io_port+0x1a) /* PORT A */
+#define PORTB (shpnt->io_port+0x1b) /* PORT B */
+#define REV (shpnt->io_port+0x1c) /* revision */
+#define STACK (shpnt->io_port+0x1d) /* stack */
+#define TEST (shpnt->io_port+0x1e) /* test register */
+
+/* used in aha152x_porttest */
+#define O_PORTA 0x1a /* PORT A */
+#define O_PORTB 0x1b /* PORT B */
+#define O_DMACNTRL1 0x13 /* DMA control 1 */
+#define O_STACK 0x1d /* stack */
+#define IO_RANGE 0x20
+
+/* bits and bitmasks to ports */
+
+/* SCSI sequence control */
+#define TEMODEO 0x80
+#define ENSELO 0x40
+#define ENSELI 0x20
+#define ENRESELI 0x10
+#define ENAUTOATNO 0x08
+#define ENAUTOATNI 0x04
+#define ENAUTOATNP 0x02
+#define SCSIRSTO 0x01
+
+/* SCSI transfer control 0 */
+#define SCSIEN 0x80
+#define DMAEN 0x40
+#define CH1 0x20
+#define CLRSTCNT 0x10
+#define SPIOEN 0x08
+#define CLRCH1 0x02
+
+/* SCSI transfer control 1 */
+#define BITBUCKET 0x80
+#define SWRAPEN 0x40
+#define ENSPCHK 0x20
+#define STIMESEL 0x18 /* mask */
+#define STIMESEL_ 3
+#define ENSTIMER 0x04
+#define BYTEALIGN 0x02
+
+/* SCSI signal IN */
+#define CDI 0x80
+#define IOI 0x40
+#define MSGI 0x20
+#define ATNI 0x10
+#define SELI 0x08
+#define BSYI 0x04
+#define REQI 0x02
+#define ACKI 0x01
+
+/* SCSI Phases */
+#define P_MASK (MSGI|CDI|IOI)
+#define P_DATAO (0)
+#define P_DATAI (IOI)
+#define P_CMD (CDI)
+#define P_STATUS (CDI|IOI)
+#define P_MSGO (MSGI|CDI)
+#define P_MSGI (MSGI|CDI|IOI)
+
+/* SCSI signal OUT */
+#define CDO 0x80
+#define IOO 0x40
+#define MSGO 0x20
+#define ATNO 0x10
+#define SELO 0x08
+#define BSYO 0x04
+#define REQO 0x02
+#define ACKO 0x01
+
+/* SCSI rate control */
+#define SXFR 0x70 /* mask */
+#define SXFR_ 4
+#define SOFS 0x0f /* mask */
+
+/* SCSI ID */
+#define OID 0x70
+#define OID_ 4
+#define TID 0x07
+
+/* SCSI transfer count */
+#define GETSTCNT() ( (GETPORT(STCNT2)<<16) \
+ + (GETPORT(STCNT1)<< 8) \
+ + GETPORT(STCNT0) )
+
+#define SETSTCNT(X) { SETPORT(STCNT2, ((X) & 0xFF0000) >> 16); \
+ SETPORT(STCNT1, ((X) & 0x00FF00) >> 8); \
+ SETPORT(STCNT0, ((X) & 0x0000FF) ); }
+
+/* SCSI interrupt status */
+#define TARGET 0x80
+#define SELDO 0x40
+#define SELDI 0x20
+#define SELINGO 0x10
+#define SWRAP 0x08
+#define SDONE 0x04
+#define SPIORDY 0x02
+#define DMADONE 0x01
+
+#define SETSDONE 0x80
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRSWRAP 0x08
+#define CLRSDONE 0x04
+#define CLRSPIORDY 0x02
+#define CLRDMADONE 0x01
+
+/* SCSI status 1 */
+#define SELTO 0x80
+#define ATNTARG 0x40
+#define SCSIRSTI 0x20
+#define PHASEMIS 0x10
+#define BUSFREE 0x08
+#define SCSIPERR 0x04
+#define PHASECHG 0x02
+#define REQINIT 0x01
+
+#define CLRSELTIMO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRPHASECHG 0x02
+#define CLRREQINIT 0x01
+
+/* SCSI status 2 */
+#define SOFFSET 0x20
+#define SEMPTY 0x10
+#define SFULL 0x08
+#define SFCNT 0x07 /* mask */
+
+/* SCSI status 3 */
+#define SCSICNT 0xf0 /* mask */
+#define SCSICNT_ 4
+#define OFFCNT 0x0f /* mask */
+
+/* SCSI TEST control */
+#define SCTESTU 0x08
+#define SCTESTD 0x04
+#define STCTEST 0x01
+
+/* SCSI status 4 */
+#define SYNCERR 0x04
+#define FWERR 0x02
+#define FRERR 0x01
+
+#define CLRSYNCERR 0x04
+#define CLRFWERR 0x02
+#define CLRFRERR 0x01
+
+/* SCSI interrupt mode 0 */
+#define ENSELDO 0x40
+#define ENSELDI 0x20
+#define ENSELINGO 0x10
+#define ENSWRAP 0x08
+#define ENSDONE 0x04
+#define ENSPIORDY 0x02
+#define ENDMADONE 0x01
+
+/* SCSI interrupt mode 1 */
+#define ENSELTIMO 0x80
+#define ENATNTARG 0x40
+#define ENSCSIRST 0x20
+#define ENPHASEMIS 0x10
+#define ENBUSFREE 0x08
+#define ENSCSIPERR 0x04
+#define ENPHASECHG 0x02
+#define ENREQINIT 0x01
+
+/* DMA control 0 */
+#define ENDMA 0x80
+#define _8BIT 0x40
+#define DMA 0x20
+#define WRITE_READ 0x08
+#define INTEN 0x04
+#define RSTFIFO 0x02
+#define SWINT 0x01
+
+/* DMA control 1 */
+#define PWRDWN 0x80
+#define STK 0x07 /* mask */
+
+/* DMA status */
+#define ATDONE 0x80
+#define WORDRDY 0x40
+#define INTSTAT 0x20
+#define DFIFOFULL 0x10
+#define DFIFOEMP 0x08
+
+/* BURST control */
+#define BON 0xf0
+#define BOFF 0x0f
+
+/* TEST REGISTER */
+#define BOFFTMR 0x40
+#define BONTMR 0x20
+#define STCNTH 0x10
+#define STCNTM 0x08
+#define STCNTL 0x04
+#define SCSIBLK 0x02
+#define DMABLK 0x01
+
+/* On the AHA-152x board PORTA and PORTB contain
+ some information about the board's configuration. */
+typedef union {
+ struct {
+ unsigned reserved:2; /* reserved */
+ unsigned tardisc:1; /* Target disconnect: 0=disabled, 1=enabled */
+ unsigned syncneg:1; /* Initial sync neg: 0=disabled, 1=enabled */
+ unsigned msgclasses:2; /* Message classes
+ 0=#4
+ 1=#0, #1, #2, #3, #4
+ 2=#0, #3, #4
+ 3=#0, #4
+ */
+ unsigned boot:1; /* boot: 0=disabled, 1=enabled */
+ unsigned dma:1; /* Transfer mode: 0=PIO; 1=DMA */
+ unsigned id:3; /* SCSI-id */
+ unsigned irq:2; /* IRQ-Channel: 0,3=12, 1=10, 2=11 */
+ unsigned dmachan:2; /* DMA-Channel: 0=0, 1=5, 2=6, 3=7 */
+ unsigned parity:1; /* SCSI-parity: 1=enabled 0=disabled */
+ } fields;
+ unsigned short port;
+} aha152x_config ;
+
+#define cf_parity fields.parity
+#define cf_dmachan fields.dmachan
+#define cf_irq fields.irq
+#define cf_id fields.id
+#define cf_dma fields.dma
+#define cf_boot fields.boot
+#define cf_msgclasses fields.msgclasses
+#define cf_syncneg fields.syncneg
+#define cf_tardisc fields.tardisc
+#define cf_port port
+
+/* Some macros to manipulate ports and their bits */
+
+#define SETPORT(PORT, VAL) outb( (VAL), (PORT) )
+#define SETPORTP(PORT, VAL) outb_p( (VAL), (PORT) )
+#define SETPORTW(PORT, VAL) outw( (VAL), (PORT) )
+
+#define GETPORT(PORT) inb( PORT )
+#define GETPORTW(PORT) inw( PORT )
+
+#define SETBITS(PORT, BITS) outb( (inb(PORT) | (BITS)), (PORT) )
+#define CLRBITS(PORT, BITS) outb( (inb(PORT) & ~(BITS)), (PORT) )
+#define CLRSETBITS(PORT, CLR, SET) outb( (inb(PORT) & ~(CLR)) | (SET) , (PORT) )
+
+#define TESTHI(PORT, BITS) ((inb(PORT) & (BITS)) == BITS)
+#define TESTLO(PORT, BITS) ((inb(PORT) & (BITS)) == 0)
+
+#ifdef DEBUG_AHA152X
+enum {
+ debug_skipports = 0x0001,
+ debug_queue = 0x0002,
+ debug_intr = 0x0004,
+ debug_selection = 0x0008,
+ debug_msgo = 0x0010,
+ debug_msgi = 0x0020,
+ debug_status = 0x0040,
+ debug_cmd = 0x0080,
+ debug_datai = 0x0100,
+ debug_datao = 0x0200,
+ debug_abort = 0x0400,
+ debug_done = 0x0800,
+ debug_biosparam = 0x1000,
+ debug_phases = 0x2000,
+ debug_queues = 0x4000,
+ debug_reset = 0x8000,
+};
+#endif
+
+#endif /* _AHA152X_H */
diff --git a/linux/src/drivers/scsi/aha1542.c b/linux/src/drivers/scsi/aha1542.c
new file mode 100644
index 0000000..cc27e5c
--- /dev/null
+++ b/linux/src/drivers/scsi/aha1542.c
@@ -0,0 +1,1325 @@
+/* $Id: aha1542.c,v 1.1 1999/04/26 05:54:11 tb Exp $
+ * linux/kernel/aha1542.c
+ *
+ * Copyright (C) 1992 Tommy Thorn
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * Modified by Eric Youngdale
+ * Use request_irq and request_dma to help prevent unexpected conflicts
+ * Set up on-board DMA controller, such that we do not have to
+ * have the bios enabled to use the aha1542.
+ * Modified by David Gentzel
+ * Don't call request_dma if dma mask is 0 (for BusLogic BT-445S VL-Bus
+ * controller).
+ * Modified by Matti Aarnio
+ * Accept parameters from LILO cmd-line. -- 1-Oct-94
+ * Modified by Mike McLagan <mike.mclagan@linux.org>
+ * Recognise extended mode on AHA1542CP, different bit than 1542CF
+ * 1-Jan-97
+ * Modified by Bjorn L. Thordarson and Einar Thor Einarsson
+ * Recognize that DMA0 is valid DMA channel -- 13-Jul-98
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+
+
+#include "aha1542.h"
+
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_aha1542 = {
+ PROC_SCSI_AHA1542, 7, "aha1542",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#ifdef DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/linux/src/drivers/scsi/Attic/aha1542.c,v 1.1 1999/04/26 05:54:11 tb Exp $";
+*/
+
+/* The adaptec can be configured for quite a number of addresses, but
+I generally do not want the card poking around at random. We allow
+two addresses - this allows people to use the Adaptec with a Midi
+card, which also used 0x330 -- can be overridden with LILO! */
+
+#define MAXBOARDS 2 /* Increase this and the sizes of the
+ arrays below, if you need more.. */
+
+static unsigned int bases[MAXBOARDS]={0x330, 0x334};
+
+/* set by aha1542_setup according to the command line */
+static int setup_called[MAXBOARDS] = {0,0};
+static int setup_buson[MAXBOARDS] = {0,0};
+static int setup_busoff[MAXBOARDS] = {0,0};
+static int setup_dmaspeed[MAXBOARDS] = {-1,-1};
+
+static char *setup_str[MAXBOARDS] = {(char *)NULL,(char *)NULL};
+
+/*
+ * LILO params: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]
+ *
+ * Where: <PORTBASE> is any of the valid AHA addresses:
+ * 0x130, 0x134, 0x230, 0x234, 0x330, 0x334
+ * <BUSON> is the time (in microsecs) that AHA spends on the AT-bus
+ * when transferring data. 1542A power-on default is 11us,
+ * valid values are in range: 2..15 (decimal)
+ * <BUSOFF> is the time that AHA spends OFF THE BUS after while
+ * it is transferring data (not to monopolize the bus).
+ * Power-on default is 4us, valid range: 1..64 microseconds.
+ * <DMASPEED> Default is jumper selected (1542A: on the J1),
+ * but experimenter can alter it with this.
+ * Valid values: 5, 6, 7, 8, 10 (MB/s)
+ * Factory default is 5 MB/s.
+ */
+
+#define BIOS_TRANSLATION_1632 0 /* Used by some old 1542A boards */
+#define BIOS_TRANSLATION_6432 1 /* Default case these days */
+#define BIOS_TRANSLATION_25563 2 /* Big disk case */
+
+struct aha1542_hostdata{
+ /* This will effectively start both of them at the first mailbox */
+ int bios_translation; /* Mapping bios uses - for compatibility */
+ int aha1542_last_mbi_used;
+ int aha1542_last_mbo_used;
+ Scsi_Cmnd * SCint[AHA1542_MAILBOXES];
+ struct mailbox mb[2*AHA1542_MAILBOXES];
+ struct ccb ccb[AHA1542_MAILBOXES];
+};
+
+#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata)
+
+static struct Scsi_Host * aha_host[7] = {NULL,}; /* One for each IRQ level (9-15) */
+
+
+
+
+#define WAITnexttimeout 3000000
+
+static void setup_mailboxes(int base_io, struct Scsi_Host * shpnt);
+static int aha1542_restart(struct Scsi_Host * shost);
+
+#define aha1542_intr_reset(base) outb(IRST, CONTROL(base))
+
+#define WAIT(port, mask, allof, noneof) \
+ { register int WAITbits; \
+ register int WAITtimeout = WAITnexttimeout; \
+ while (1) { \
+ WAITbits = inb(port) & (mask); \
+ if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
+ break; \
+ if (--WAITtimeout == 0) goto fail; \
+ } \
+ }
+
+/* Similar to WAIT, except we use the udelay call to regulate the
+ amount of time we wait. */
+#define WAITd(port, mask, allof, noneof, timeout) \
+ { register int WAITbits; \
+ register int WAITtimeout = timeout; \
+ while (1) { \
+ WAITbits = inb(port) & (mask); \
+ if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
+ break; \
+ udelay(1000); \
+ if (--WAITtimeout == 0) goto fail; \
+ } \
+ }
+
+static void aha1542_stat(void)
+{
+/* int s = inb(STATUS), i = inb(INTRFLAGS);
+ printk("status=%x intrflags=%x\n", s, i, WAITnexttimeout-WAITtimeout); */
+}
+
+/* This is a bit complicated, but we need to make sure that an interrupt
+ routine does not send something out while we are in the middle of this.
+ Fortunately, it is only at boot time that multi-byte messages
+ are ever sent. */
+static int aha1542_out(unsigned int base, unchar *cmdp, int len)
+{
+ unsigned long flags = 0;
+
+ save_flags(flags);
+ if(len == 1) {
+ while(1==1){
+ WAIT(STATUS(base), CDF, 0, CDF);
+ cli();
+ if(inb(STATUS(base)) & CDF) {restore_flags(flags); continue;}
+ outb(*cmdp, DATA(base));
+ restore_flags(flags);
+ return 0;
+ }
+ } else {
+ cli();
+ while (len--)
+ {
+ WAIT(STATUS(base), CDF, 0, CDF);
+ outb(*cmdp++, DATA(base));
+ }
+ restore_flags(flags);
+ }
+ return 0;
+ fail:
+ restore_flags(flags);
+ printk("aha1542_out failed(%d): ", len+1); aha1542_stat();
+ return 1;
+}
+
+/* Only used at boot time, so we do not need to worry about latency as much
+ here */
+static int aha1542_in(unsigned int base, unchar *cmdp, int len)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ while (len--)
+ {
+ WAIT(STATUS(base), DF, DF, 0);
+ *cmdp++ = inb(DATA(base));
+ }
+ restore_flags(flags);
+ return 0;
+ fail:
+ restore_flags(flags);
+ printk("aha1542_in failed(%d): ", len+1); aha1542_stat();
+ return 1;
+}
+
+/* Similar to aha1542_in, except that we wait a very short period of time.
+ We use this if we know the board is alive and awake, but we are not sure
+ if the board will respond to the command we are about to send or not */
+static int aha1542_in1(unsigned int base, unchar *cmdp, int len)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ while (len--)
+ {
+ WAITd(STATUS(base), DF, DF, 0, 100);
+ *cmdp++ = inb(DATA(base));
+ }
+ restore_flags(flags);
+ return 0;
+ fail:
+ restore_flags(flags);
+ return 1;
+}
+
+static int makecode(unsigned hosterr, unsigned scsierr)
+{
+ switch (hosterr) {
+ case 0x0:
+ case 0xa: /* Linked command complete without error and linked normally */
+ case 0xb: /* Linked command complete without error, interrupt generated */
+ hosterr = 0;
+ break;
+
+ case 0x11: /* Selection time out-The initiator selection or target
+ reselection was not complete within the SCSI Time out period */
+ hosterr = DID_TIME_OUT;
+ break;
+
+ case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
+ than was allocated by the Data Length field or the sum of the
+ Scatter / Gather Data Length fields. */
+
+ case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
+
+ case 0x15: /* MBO command was not 00, 01 or 02-The first byte of the CB was
+ invalid. This usually indicates a software failure. */
+
+ case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid.
+ This usually indicates a software failure. */
+
+ case 0x17: /* Linked CCB does not have the same LUN-A subsequent CCB of a set
+ of linked CCB's does not specify the same logical unit number as
+ the first. */
+ case 0x18: /* Invalid Target Direction received from Host-The direction of a
+ Target Mode CCB was invalid. */
+
+ case 0x19: /* Duplicate CCB Received in Target Mode-More than once CCB was
+ received to service data transfer between the same target LUN
+ and initiator SCSI ID in the same direction. */
+
+ case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero
+ length segment or invalid segment list boundaries was received.
+ A CCB parameter was invalid. */
+ DEB(printk("Aha1542: %x %x\n", hosterr, scsierr));
+ hosterr = DID_ERROR; /* Couldn't find any better */
+ break;
+
+ case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
+ phase sequence was requested by the target. The host adapter
+ will generate a SCSI Reset Condition, notifying the host with
+ a SCRD interrupt */
+ hosterr = DID_RESET;
+ break;
+ default:
+ printk("makecode: unknown hoststatus %x\n", hosterr);
+ break;
+ }
+ return scsierr|(hosterr << 16);
+}
+
+static int aha1542_test_port(int bse, struct Scsi_Host * shpnt)
+{
+ int i;
+ unchar inquiry_cmd[] = {CMD_INQUIRY };
+ unchar inquiry_result[4];
+ unchar *cmdp;
+ int len;
+ volatile int debug = 0;
+
+ /* Quick and dirty test for presence of the card. */
+ if(inb(STATUS(bse)) == 0xff) return 0;
+
+ /* Reset the adapter. I ought to make a hard reset, but it's not really necessary */
+
+ /* DEB(printk("aha1542_test_port called \n")); */
+
+ /* In case some other card was probing here, reset interrupts */
+ aha1542_intr_reset(bse); /* reset interrupts, so they don't block */
+
+ outb(SRST|IRST/*|SCRST*/, CONTROL(bse));
+
+ i = jiffies + 2;
+ while (i>jiffies); /* Wait a little bit for things to settle down. */
+
+ debug = 1;
+ /* Expect INIT and IDLE, any of the others are bad */
+ WAIT(STATUS(bse), STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF);
+
+ debug = 2;
+ /* Shouldn't have generated any interrupts during reset */
+ if (inb(INTRFLAGS(bse))&INTRMASK) goto fail;
+
+
+ /* Perform a host adapter inquiry instead so we do not need to set
+ up the mailboxes ahead of time */
+
+ aha1542_out(bse, inquiry_cmd, 1);
+
+ debug = 3;
+ len = 4;
+ cmdp = &inquiry_result[0];
+
+ while (len--)
+ {
+ WAIT(STATUS(bse), DF, DF, 0);
+ *cmdp++ = inb(DATA(bse));
+ }
+
+ debug = 8;
+ /* Reading port should reset DF */
+ if (inb(STATUS(bse)) & DF) goto fail;
+
+ debug = 9;
+ /* When HACC, command is completed, and we're though testing */
+ WAIT(INTRFLAGS(bse), HACC, HACC, 0);
+ /* now initialize adapter */
+
+ debug = 10;
+ /* Clear interrupts */
+ outb(IRST, CONTROL(bse));
+
+ debug = 11;
+
+ return debug; /* 1 = ok */
+ fail:
+ return 0; /* 0 = not ok */
+}
+
+/* A "high" level interrupt handler */
+static void aha1542_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
+{
+ void (*my_done)(Scsi_Cmnd *) = NULL;
+ int errstatus, mbi, mbo, mbistatus;
+ int number_serviced;
+ unsigned long flags;
+ struct Scsi_Host * shost;
+ Scsi_Cmnd * SCtmp;
+ int flag;
+ int needs_restart;
+ struct mailbox * mb;
+ struct ccb *ccb;
+
+ shost = aha_host[irq - 9];
+ if(!shost) panic("Splunge!");
+
+ mb = HOSTDATA(shost)->mb;
+ ccb = HOSTDATA(shost)->ccb;
+
+#ifdef DEBUG
+ {
+ flag = inb(INTRFLAGS(shost->io_port));
+ printk("aha1542_intr_handle: ");
+ if (!(flag&ANYINTR)) printk("no interrupt?");
+ if (flag&MBIF) printk("MBIF ");
+ if (flag&MBOA) printk("MBOF ");
+ if (flag&HACC) printk("HACC ");
+ if (flag&SCRD) printk("SCRD ");
+ printk("status %02x\n", inb(STATUS(shost->io_port)));
+ };
+#endif
+ number_serviced = 0;
+ needs_restart = 0;
+
+ while(1==1){
+ flag = inb(INTRFLAGS(shost->io_port));
+
+ /* Check for unusual interrupts. If any of these happen, we should
+ probably do something special, but for now just printing a message
+ is sufficient. A SCSI reset detected is something that we really
+ need to deal with in some way. */
+ if (flag & ~MBIF) {
+ if (flag&MBOA) printk("MBOF ");
+ if (flag&HACC) printk("HACC ");
+ if (flag&SCRD) {
+ needs_restart = 1;
+ printk("SCRD ");
+ }
+ }
+
+ aha1542_intr_reset(shost->io_port);
+
+ save_flags(flags);
+ cli();
+ mbi = HOSTDATA(shost)->aha1542_last_mbi_used + 1;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+
+ do{
+ if(mb[mbi].status != 0) break;
+ mbi++;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+ } while (mbi != HOSTDATA(shost)->aha1542_last_mbi_used);
+
+ if(mb[mbi].status == 0){
+ restore_flags(flags);
+ /* Hmm, no mail. Must have read it the last time around */
+ if (!number_serviced && !needs_restart)
+ printk("aha1542.c: interrupt received, but no mail.\n");
+ /* We detected a reset. Restart all pending commands for
+ devices that use the hard reset option */
+ if(needs_restart) aha1542_restart(shost);
+ return;
+ };
+
+ mbo = (scsi2int(mb[mbi].ccbptr) - ((unsigned int) &ccb[0])) / sizeof(struct ccb);
+ mbistatus = mb[mbi].status;
+ mb[mbi].status = 0;
+ HOSTDATA(shost)->aha1542_last_mbi_used = mbi;
+ restore_flags(flags);
+
+#ifdef DEBUG
+ {
+ if (ccb[mbo].tarstat|ccb[mbo].hastat)
+ printk("aha1542_command: returning %x (status %d)\n",
+ ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status);
+ };
+#endif
+
+ if(mbistatus == 3) continue; /* Aborted command not found */
+
+#ifdef DEBUG
+ printk("...done %d %d\n",mbo, mbi);
+#endif
+
+ SCtmp = HOSTDATA(shost)->SCint[mbo];
+
+ if (!SCtmp || !SCtmp->scsi_done) {
+ printk("aha1542_intr_handle: Unexpected interrupt\n");
+ printk("tarstat=%x, hastat=%x idlun=%x ccb#=%d \n", ccb[mbo].tarstat,
+ ccb[mbo].hastat, ccb[mbo].idlun, mbo);
+ return;
+ }
+
+ my_done = SCtmp->scsi_done;
+ if (SCtmp->host_scribble) scsi_free(SCtmp->host_scribble, 512);
+
+ /* Fetch the sense data, and tuck it away, in the required slot. The
+ Adaptec automatically fetches it, and there is no guarantee that
+ we will still have it in the cdb when we come back */
+ if (ccb[mbo].tarstat == 2)
+ memcpy(SCtmp->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
+ sizeof(SCtmp->sense_buffer));
+
+
+ /* is there mail :-) */
+
+ /* more error checking left out here */
+ if (mbistatus != 1)
+ /* This is surely wrong, but I don't know what's right */
+ errstatus = makecode(ccb[mbo].hastat, ccb[mbo].tarstat);
+ else
+ errstatus = 0;
+
+#ifdef DEBUG
+ if(errstatus) printk("(aha1542 error:%x %x %x) ",errstatus,
+ ccb[mbo].hastat, ccb[mbo].tarstat);
+#endif
+
+ if (ccb[mbo].tarstat == 2) {
+#ifdef DEBUG
+ int i;
+#endif
+ DEB(printk("aha1542_intr_handle: sense:"));
+#ifdef DEBUG
+ for (i = 0; i < 12; i++)
+ printk("%02x ", ccb[mbo].cdb[ccb[mbo].cdblen+i]);
+ printk("\n");
+#endif
+ /*
+ DEB(printk("aha1542_intr_handle: buf:"));
+ for (i = 0; i < bufflen; i++)
+ printk("%02x ", ((unchar *)buff)[i]);
+ printk("\n");
+ */
+ }
+ DEB(if (errstatus) printk("aha1542_intr_handle: returning %6x\n", errstatus));
+ SCtmp->result = errstatus;
+ HOSTDATA(shost)->SCint[mbo] = NULL; /* This effectively frees up the mailbox slot, as
+ far as queuecommand is concerned */
+ my_done(SCtmp);
+ number_serviced++;
+ };
+}
+
+int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar ahacmd = CMD_START_SCSI;
+ unchar direction;
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ unchar target = SCpnt->target;
+ unchar lun = SCpnt->lun;
+ unsigned long flags;
+ void *buff = SCpnt->request_buffer;
+ int bufflen = SCpnt->request_bufflen;
+ int mbo;
+ struct mailbox * mb;
+ struct ccb *ccb;
+
+ DEB(int i);
+
+ mb = HOSTDATA(SCpnt->host)->mb;
+ ccb = HOSTDATA(SCpnt->host)->ccb;
+
+ DEB(if (target > 1) {
+ SCpnt->result = DID_TIME_OUT << 16;
+ done(SCpnt); return 0;});
+
+ if(*cmd == REQUEST_SENSE){
+#ifndef DEBUG
+ if (bufflen != sizeof(SCpnt->sense_buffer)) {
+ printk("Wrong buffer length supplied for request sense (%d)\n",bufflen);
+ };
+#endif
+ SCpnt->result = 0;
+ done(SCpnt);
+ return 0;
+ };
+
+#ifdef DEBUG
+ if (*cmd == READ_10 || *cmd == WRITE_10)
+ i = xscsi2int(cmd+2);
+ else if (*cmd == READ_6 || *cmd == WRITE_6)
+ i = scsi2int(cmd+2);
+ else
+ i = -1;
+ if (done)
+ printk("aha1542_queuecommand: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
+ else
+ printk("aha1542_command: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
+ aha1542_stat();
+ printk("aha1542_queuecommand: dumping scsi cmd:");
+ for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]);
+ printk("\n");
+ if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ return 0; /* we are still testing, so *don't* write */
+#endif
+/* Use the outgoing mailboxes in a round-robin fashion, because this
+ is how the host adapter will scan for them */
+
+ save_flags(flags);
+ cli();
+ mbo = HOSTDATA(SCpnt->host)->aha1542_last_mbo_used + 1;
+ if (mbo >= AHA1542_MAILBOXES) mbo = 0;
+
+ do{
+ if(mb[mbo].status == 0 && HOSTDATA(SCpnt->host)->SCint[mbo] == NULL)
+ break;
+ mbo++;
+ if (mbo >= AHA1542_MAILBOXES) mbo = 0;
+ } while (mbo != HOSTDATA(SCpnt->host)->aha1542_last_mbo_used);
+
+ if(mb[mbo].status || HOSTDATA(SCpnt->host)->SCint[mbo])
+ panic("Unable to find empty mailbox for aha1542.\n");
+
+ HOSTDATA(SCpnt->host)->SCint[mbo] = SCpnt; /* This will effectively prevent someone else from
+ screwing with this cdb. */
+
+ HOSTDATA(SCpnt->host)->aha1542_last_mbo_used = mbo;
+ restore_flags(flags);
+
+#ifdef DEBUG
+ printk("Sending command (%d %x)...",mbo, done);
+#endif
+
+ any2scsi(mb[mbo].ccbptr, &ccb[mbo]); /* This gets trashed for some reason*/
+
+ memset(&ccb[mbo], 0, sizeof(struct ccb));
+
+ ccb[mbo].cdblen = SCpnt->cmd_len;
+
+ direction = 0;
+ if (*cmd == READ_10 || *cmd == READ_6)
+ direction = 8;
+ else if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ direction = 16;
+
+ memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
+
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ struct chain * cptr;
+#ifdef DEBUG
+ unsigned char * ptr;
+#endif
+ int i;
+ ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather*/
+ SCpnt->host_scribble = (unsigned char *) scsi_malloc(512);
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+ cptr = (struct chain *) SCpnt->host_scribble;
+ if (cptr == NULL) panic("aha1542.c: unable to allocate DMA memory\n");
+ for(i=0; i<SCpnt->use_sg; i++) {
+ if(sgpnt[i].length == 0 || SCpnt->use_sg > 16 ||
+ (((int)sgpnt[i].address) & 1) || (sgpnt[i].length & 1)){
+ unsigned char * ptr;
+ printk("Bad segment list supplied to aha1542.c (%d, %d)\n",SCpnt->use_sg,i);
+ for(i=0;i<SCpnt->use_sg;i++){
+ printk("%d: %x %x %d\n",i,(unsigned int) sgpnt[i].address, (unsigned int) sgpnt[i].alt_address,
+ sgpnt[i].length);
+ };
+ printk("cptr %x: ",(unsigned int) cptr);
+ ptr = (unsigned char *) &cptr[i];
+ for(i=0;i<18;i++) printk("%02x ", ptr[i]);
+ panic("Foooooooood fight!");
+ };
+ any2scsi(cptr[i].dataptr, sgpnt[i].address);
+ if(((unsigned int) sgpnt[i].address) & 0xff000000) goto baddma;
+ any2scsi(cptr[i].datalen, sgpnt[i].length);
+ };
+ any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
+ any2scsi(ccb[mbo].dataptr, cptr);
+#ifdef DEBUG
+ printk("cptr %x: ",cptr);
+ ptr = (unsigned char *) cptr;
+ for(i=0;i<18;i++) printk("%02x ", ptr[i]);
+#endif
+ } else {
+ ccb[mbo].op = 0; /* SCSI Initiator Command */
+ SCpnt->host_scribble = NULL;
+ any2scsi(ccb[mbo].datalen, bufflen);
+ if(((unsigned int) buff & 0xff000000)) goto baddma;
+ any2scsi(ccb[mbo].dataptr, buff);
+ };
+ ccb[mbo].idlun = (target&7)<<5 | direction | (lun & 7); /*SCSI Target Id*/
+ ccb[mbo].rsalen = 16;
+ ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0;
+ ccb[mbo].commlinkid = 0;
+
+#ifdef DEBUG
+ { int i;
+ printk("aha1542_command: sending.. ");
+ for (i = 0; i < sizeof(ccb[mbo])-10; i++)
+ printk("%02x ", ((unchar *)&ccb[mbo])[i]);
+ };
+#endif
+
+ if (done) {
+ DEB(printk("aha1542_queuecommand: now waiting for interrupt "); aha1542_stat());
+ SCpnt->scsi_done = done;
+ mb[mbo].status = 1;
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1); /* start scsi command */
+ DEB(aha1542_stat());
+ }
+ else
+ printk("aha1542_queuecommand: done can't be NULL\n");
+
+ return 0;
+ baddma:
+ panic("Buffer at address > 16Mb used for 1542B");
+}
+
+static void internal_done(Scsi_Cmnd * SCpnt)
+{
+ SCpnt->SCp.Status++;
+}
+
+int aha1542_command(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("aha1542_command: ..calling aha1542_queuecommand\n"));
+
+ aha1542_queuecommand(SCpnt, internal_done);
+
+ SCpnt->SCp.Status = 0;
+ while (!SCpnt->SCp.Status)
+ barrier();
+ return SCpnt->result;
+}
+
+/* Initialize mailboxes */
+static void setup_mailboxes(int bse, struct Scsi_Host * shpnt)
+{
+ int i;
+ struct mailbox * mb;
+ struct ccb *ccb;
+
+ unchar cmd[5] = {CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
+
+ mb = HOSTDATA(shpnt)->mb;
+ ccb = HOSTDATA(shpnt)->ccb;
+
+ for(i=0; i<AHA1542_MAILBOXES; i++){
+ mb[i].status = mb[AHA1542_MAILBOXES+i].status = 0;
+ any2scsi(mb[i].ccbptr, &ccb[i]);
+ };
+ aha1542_intr_reset(bse); /* reset interrupts, so they don't block */
+ any2scsi((cmd+2), mb);
+ aha1542_out(bse, cmd, 5);
+ WAIT(INTRFLAGS(bse), INTRMASK, HACC, 0);
+ while (0) {
+ fail:
+ printk("aha1542_detect: failed setting up mailboxes\n");
+ }
+ aha1542_intr_reset(bse);
+}
+
+static int aha1542_getconfig(int base_io, unsigned char * irq_level, unsigned char * dma_chan, unsigned char * scsi_id)
+{
+ unchar inquiry_cmd[] = {CMD_RETCONF };
+ unchar inquiry_result[3];
+ int i;
+ i = inb(STATUS(base_io));
+ if (i & DF) {
+ i = inb(DATA(base_io));
+ };
+ aha1542_out(base_io, inquiry_cmd, 1);
+ aha1542_in(base_io, inquiry_result, 3);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ while (0) {
+ fail:
+ printk("aha1542_detect: query board settings\n");
+ }
+ aha1542_intr_reset(base_io);
+ switch(inquiry_result[0]){
+ case 0x80:
+ *dma_chan = 7;
+ break;
+ case 0x40:
+ *dma_chan = 6;
+ break;
+ case 0x20:
+ *dma_chan = 5;
+ break;
+ case 0x01:
+ *dma_chan = 0;
+ break;
+ case 0:
+ /* This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel.
+ Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */
+ *dma_chan = 0xFF;
+ break;
+ default:
+ printk("Unable to determine Adaptec DMA priority. Disabling board\n");
+ return -1;
+ };
+ switch(inquiry_result[1]){
+ case 0x40:
+ *irq_level = 15;
+ break;
+ case 0x20:
+ *irq_level = 14;
+ break;
+ case 0x8:
+ *irq_level = 12;
+ break;
+ case 0x4:
+ *irq_level = 11;
+ break;
+ case 0x2:
+ *irq_level = 10;
+ break;
+ case 0x1:
+ *irq_level = 9;
+ break;
+ default:
+ printk("Unable to determine Adaptec IRQ level. Disabling board\n");
+ return -1;
+ };
+ *scsi_id=inquiry_result[2] & 7;
+ return 0;
+}
+
+/* This function should only be called for 1542C boards - we can detect
+ the special firmware settings and unlock the board */
+
+static int aha1542_mbenable(int base)
+{
+ static unchar mbenable_cmd[3];
+ static unchar mbenable_result[2];
+ int retval;
+
+ retval = BIOS_TRANSLATION_6432;
+
+ mbenable_cmd[0]=CMD_EXTBIOS;
+ aha1542_out(base,mbenable_cmd,1);
+ if(aha1542_in1(base,mbenable_result,2))
+ return retval;
+ WAITd(INTRFLAGS(base),INTRMASK,HACC,0,100);
+ aha1542_intr_reset(base);
+
+ if ((mbenable_result[0] & 0x08) || mbenable_result[1]) {
+ mbenable_cmd[0]=CMD_MBENABLE;
+ mbenable_cmd[1]=0;
+ mbenable_cmd[2]=mbenable_result[1];
+
+ if((mbenable_result[0] & 0x08) && (mbenable_result[1] & 0x03)) retval = BIOS_TRANSLATION_25563;
+
+ aha1542_out(base,mbenable_cmd,3);
+ WAIT(INTRFLAGS(base),INTRMASK,HACC,0);
+ };
+ while(0) {
+fail:
+ printk("aha1542_mbenable: Mailbox init failed\n");
+ }
+aha1542_intr_reset(base);
+return retval;
+}
+
+/* Query the board to find out if it is a 1542 or a 1740, or whatever. */
+static int aha1542_query(int base_io, int * transl)
+{
+ unchar inquiry_cmd[] = {CMD_INQUIRY };
+ unchar inquiry_result[4];
+ int i;
+ i = inb(STATUS(base_io));
+ if (i & DF) {
+ i = inb(DATA(base_io));
+ };
+ aha1542_out(base_io, inquiry_cmd, 1);
+ aha1542_in(base_io, inquiry_result, 4);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ while (0) {
+ fail:
+ printk("aha1542_detect: query card type\n");
+ }
+ aha1542_intr_reset(base_io);
+
+ *transl = BIOS_TRANSLATION_6432; /* Default case */
+
+/* For an AHA1740 series board, we ignore the board since there is a
+ hardware bug which can lead to wrong blocks being returned if the board
+ is operating in the 1542 emulation mode. Since there is an extended mode
+ driver, we simply ignore the board and let the 1740 driver pick it up.
+*/
+
+ if (inquiry_result[0] == 0x43) {
+ printk("aha1542.c: Emulation mode not supported for AHA 174N hardware.\n");
+ return 1;
+ };
+
+ /* Always call this - boards that do not support extended bios translation
+ will ignore the command, and we will set the proper default */
+
+ *transl = aha1542_mbenable(base_io);
+
+ return 0;
+}
+
+/* called from init/main.c */
+void aha1542_setup( char *str, int *ints)
+{
+ const char *ahausage = "aha1542: usage: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]\n";
+ static int setup_idx = 0;
+ int setup_portbase;
+
+ if(setup_idx >= MAXBOARDS)
+ {
+ printk("aha1542: aha1542_setup called too many times! Bad LILO params ?\n");
+ printk(" Entryline 1: %s\n",setup_str[0]);
+ printk(" Entryline 2: %s\n",setup_str[1]);
+ printk(" This line: %s\n",str);
+ return;
+ }
+ if (ints[0] < 1 || ints[0] > 4)
+ {
+ printk("aha1542: %s\n", str );
+ printk("%s", ahausage);
+ printk("aha1542: Wrong parameters may cause system malfunction.. We try anyway..\n");
+ }
+
+ setup_called[setup_idx]=ints[0];
+ setup_str[setup_idx]=str;
+
+ setup_portbase = ints[0] >= 1 ? ints[1] : 0; /* Preserve the default value.. */
+ setup_buson [setup_idx] = ints[0] >= 2 ? ints[2] : 7;
+ setup_busoff [setup_idx] = ints[0] >= 3 ? ints[3] : 5;
+ if (ints[0] >= 4) {
+ int atbt = -1;
+ switch (ints[4]) {
+ case 5:
+ atbt = 0x00;
+ break;
+ case 6:
+ atbt = 0x04;
+ break;
+ case 7:
+ atbt = 0x01;
+ break;
+ case 8:
+ atbt = 0x02;
+ break;
+ case 10:
+ atbt = 0x03;
+ break;
+ default:
+ printk("aha1542: %s\n", str );
+ printk("%s", ahausage);
+ printk("aha1542: Valid values for DMASPEED are 5-8, 10 MB/s. Using jumper defaults.\n");
+ break;
+ }
+ setup_dmaspeed[setup_idx] = atbt;
+ }
+
+ if (setup_portbase != 0)
+ bases[setup_idx] = setup_portbase;
+
+ ++setup_idx;
+}
+
+/* return non-zero on detection */
+int aha1542_detect(Scsi_Host_Template * tpnt)
+{
+ unsigned char dma_chan;
+ unsigned char irq_level;
+ unsigned char scsi_id;
+ unsigned long flags;
+ unsigned int base_io;
+ int trans;
+ struct Scsi_Host * shpnt = NULL;
+ int count = 0;
+ int indx;
+
+ DEB(printk("aha1542_detect: \n"));
+
+ tpnt->proc_dir = &proc_scsi_aha1542;
+
+ for(indx = 0; indx < sizeof(bases)/sizeof(bases[0]); indx++)
+ if(bases[indx] != 0 && !check_region(bases[indx], 4)) {
+ shpnt = scsi_register(tpnt,
+ sizeof(struct aha1542_hostdata));
+
+ /* For now we do this - until kmalloc is more intelligent
+ we are resigned to stupid hacks like this */
+ if ((unsigned int) shpnt > 0xffffff) {
+ printk("Invalid address for shpnt with 1542.\n");
+ goto unregister;
+ }
+
+ if(!aha1542_test_port(bases[indx], shpnt)) goto unregister;
+
+
+ base_io = bases[indx];
+
+ /* Set the Bus on/off-times as not to ruin floppy performance */
+ {
+ unchar oncmd[] = {CMD_BUSON_TIME, 7};
+ unchar offcmd[] = {CMD_BUSOFF_TIME, 5};
+
+ if(setup_called[indx])
+ {
+ oncmd[1] = setup_buson[indx];
+ offcmd[1] = setup_busoff[indx];
+ }
+
+ aha1542_intr_reset(base_io);
+ aha1542_out(base_io, oncmd, 2);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ aha1542_intr_reset(base_io);
+ aha1542_out(base_io, offcmd, 2);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ if (setup_dmaspeed[indx] >= 0)
+ {
+ unchar dmacmd[] = {CMD_DMASPEED, 0};
+ dmacmd[1] = setup_dmaspeed[indx];
+ aha1542_intr_reset(base_io);
+ aha1542_out(base_io, dmacmd, 2);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ }
+ while (0) {
+ fail:
+ printk("aha1542_detect: setting bus on/off-time failed\n");
+ }
+ aha1542_intr_reset(base_io);
+ }
+ if(aha1542_query(base_io, &trans)) goto unregister;
+
+ if (aha1542_getconfig(base_io, &irq_level, &dma_chan, &scsi_id) == -1) goto unregister;
+
+ printk("Configuring Adaptec (SCSI-ID %d) at IO:%x, IRQ %d", scsi_id, base_io, irq_level);
+ if (dma_chan != 0xFF)
+ printk(", DMA priority %d", dma_chan);
+ printk("\n");
+
+ DEB(aha1542_stat());
+ setup_mailboxes(base_io, shpnt);
+
+ DEB(aha1542_stat());
+
+ DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level));
+ save_flags(flags);
+ cli();
+ if (request_irq(irq_level,aha1542_intr_handle, 0, "aha1542", NULL)) {
+ printk("Unable to allocate IRQ for adaptec controller.\n");
+ restore_flags(flags);
+ goto unregister;
+ }
+
+ if (dma_chan != 0xFF) {
+ if (request_dma(dma_chan,"aha1542")) {
+ printk("Unable to allocate DMA channel for Adaptec.\n");
+ free_irq(irq_level, NULL);
+ restore_flags(flags);
+ goto unregister;
+ }
+
+ if (dma_chan == 0 || dma_chan >= 5) {
+ set_dma_mode(dma_chan, DMA_MODE_CASCADE);
+ enable_dma(dma_chan);
+ }
+ }
+ aha_host[irq_level - 9] = shpnt;
+ shpnt->this_id = scsi_id;
+ shpnt->unique_id = base_io;
+ shpnt->io_port = base_io;
+ shpnt->n_io_port = 4; /* Number of bytes of I/O space used */
+ shpnt->dma_channel = dma_chan;
+ shpnt->irq = irq_level;
+ HOSTDATA(shpnt)->bios_translation = trans;
+ if(trans == BIOS_TRANSLATION_25563)
+ printk("aha1542.c: Using extended bios translation\n");
+ HOSTDATA(shpnt)->aha1542_last_mbi_used = (2*AHA1542_MAILBOXES - 1);
+ HOSTDATA(shpnt)->aha1542_last_mbo_used = (AHA1542_MAILBOXES - 1);
+ memset(HOSTDATA(shpnt)->SCint, 0, sizeof(HOSTDATA(shpnt)->SCint));
+ restore_flags(flags);
+#if 0
+ DEB(printk(" *** READ CAPACITY ***\n"));
+
+ {
+ unchar buf[8];
+ static unchar cmd[] = { READ_CAPACITY, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ int i;
+
+ for (i = 0; i < sizeof(buf); ++i) buf[i] = 0x87;
+ for (i = 0; i < 2; ++i)
+ if (!aha1542_command(i, cmd, buf, sizeof(buf))) {
+ printk("aha_detect: LU %d sector_size %d device_size %d\n",
+ i, xscsi2int(buf+4), xscsi2int(buf));
+ }
+ }
+
+ DEB(printk(" *** NOW RUNNING MY OWN TEST *** \n"));
+
+ for (i = 0; i < 4; ++i)
+ {
+ unsigned char cmd[10];
+ static buffer[512];
+
+ cmd[0] = READ_10;
+ cmd[1] = 0;
+ xany2scsi(cmd+2, i);
+ cmd[6] = 0;
+ cmd[7] = 0;
+ cmd[8] = 1;
+ cmd[9] = 0;
+ aha1542_command(0, cmd, buffer, 512);
+ }
+#endif
+ request_region(bases[indx], 4,"aha1542"); /* Register the IO ports that we use */
+ count++;
+ continue;
+ unregister:
+ scsi_unregister(shpnt);
+ continue;
+
+ };
+
+ return count;
+}
+
+static int aha1542_restart(struct Scsi_Host * shost)
+{
+ int i;
+ int count = 0;
+#if 0
+ unchar ahacmd = CMD_START_SCSI;
+#endif
+
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(shost)->SCint[i] &&
+ !(HOSTDATA(shost)->SCint[i]->device->soft_reset))
+ {
+#if 0
+ HOSTDATA(shost)->mb[i].status = 1; /* Indicate ready to restart... */
+#endif
+ count++;
+ }
+
+ printk("Potential to restart %d stalled commands...\n", count);
+#if 0
+ /* start scsi command */
+ if (count) aha1542_out(shost->io_port, &ahacmd, 1);
+#endif
+ return 0;
+}
+
+/* The abort command does not leave the device in a clean state where
+ it is available to be used again. Until this gets worked out, we will
+ leave it commented out. */
+
+int aha1542_abort(Scsi_Cmnd * SCpnt)
+{
+#if 0
+ unchar ahacmd = CMD_START_SCSI;
+ unsigned long flags;
+ struct mailbox * mb;
+ int mbi, mbo, i;
+
+ printk("In aha1542_abort: %x %x\n",
+ inb(STATUS(SCpnt->host->io_port)),
+ inb(INTRFLAGS(SCpnt->host->io_port)));
+
+ save_flags(flags);
+ cli();
+ mb = HOSTDATA(SCpnt->host)->mb;
+ mbi = HOSTDATA(SCpnt->host)->aha1542_last_mbi_used + 1;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+
+ do{
+ if(mb[mbi].status != 0) break;
+ mbi++;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+ } while (mbi != HOSTDATA(SCpnt->host)->aha1542_last_mbi_used);
+ restore_flags(flags);
+
+ if(mb[mbi].status) {
+ printk("Lost interrupt discovered on irq %d - attempting to recover\n",
+ SCpnt->host->irq);
+ aha1542_intr_handle(SCpnt->host->irq, NULL);
+ return 0;
+ }
+
+ /* OK, no lost interrupt. Try looking to see how many pending commands
+ we think we have. */
+
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i])
+ {
+ if(HOSTDATA(SCpnt->host)->SCint[i] == SCpnt) {
+ printk("Timed out command pending for %s\n",
+ kdevname(SCpnt->request.rq_dev));
+ if (HOSTDATA(SCpnt->host)->mb[i].status) {
+ printk("OGMB still full - restarting\n");
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1);
+ };
+ } else
+ printk("Other pending command %s\n",
+ kdevname(SCpnt->request.rq_dev));
+ }
+
+#endif
+
+ DEB(printk("aha1542_abort\n"));
+#if 0
+ save_flags(flags);
+ cli();
+ for(mbo = 0; mbo < AHA1542_MAILBOXES; mbo++)
+ if (SCpnt == HOSTDATA(SCpnt->host)->SCint[mbo]){
+ mb[mbo].status = 2; /* Abort command */
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1); /* start scsi command */
+ restore_flags(flags);
+ break;
+ };
+#endif
+ return SCSI_ABORT_SNOOZE;
+}
+
+/* We do not implement a reset function here, but the upper level code
+ assumes that it will get some kind of response for the command in
+ SCpnt. We must oblige, or the command will hang the scsi system.
+ For a first go, we assume that the 1542 notifies us with all of the
+ pending commands (it does implement soft reset, after all). */
+
+int aha1542_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+{
+ unchar ahacmd = CMD_START_SCSI;
+ int i;
+
+ /*
+ * See if a bus reset was suggested.
+ */
+ if( reset_flags & SCSI_RESET_SUGGEST_BUS_RESET )
+ {
+ /*
+ * This does a scsi reset for all devices on the bus.
+ * In principle, we could also reset the 1542 - should
+ * we do this? Try this first, and we can add that later
+ * if it turns out to be useful.
+ */
+ outb(HRST | SCRST, CONTROL(SCpnt->host->io_port));
+
+ /*
+ * Wait for the thing to settle down a bit. Unfortunately
+ * this is going to basically lock up the machine while we
+ * wait for this to complete. To be 100% correct, we need to
+ * check for timeout, and if we are doing something like this
+ * we are pretty desperate anyways.
+ */
+ WAIT(STATUS(SCpnt->host->io_port),
+ STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF);
+
+ /*
+ * We need to do this too before the 1542 can interact with
+ * us again.
+ */
+ setup_mailboxes(SCpnt->host->io_port, SCpnt->host);
+
+ /*
+ * Now try to pick up the pieces. Restart all commands
+ * that are currently active on the bus, and reset all of
+ * the datastructures. We have some time to kill while
+ * things settle down, so print a nice message.
+ */
+ printk("Sent BUS RESET to scsi host %d\n", SCpnt->host->host_no);
+
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i] != NULL)
+ {
+ Scsi_Cmnd * SCtmp;
+ SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
+ SCtmp->result = DID_RESET << 16;
+ if (SCtmp->host_scribble) scsi_free(SCtmp->host_scribble, 512);
+ printk("Sending DID_RESET for target %d\n", SCpnt->target);
+ SCtmp->scsi_done(SCpnt);
+
+ HOSTDATA(SCpnt->host)->SCint[i] = NULL;
+ HOSTDATA(SCpnt->host)->mb[i].status = 0;
+ }
+ /*
+ * Now tell the mid-level code what we did here. Since
+ * we have restarted all of the outstanding commands,
+ * then report SUCCESS.
+ */
+ return (SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET);
+fail:
+ printk("aha1542.c: Unable to perform hard reset.\n");
+ printk("Power cycle machine to reset\n");
+ return (SCSI_RESET_ERROR | SCSI_RESET_BUS_RESET);
+
+
+ }
+ else
+ {
+ /* This does a selective reset of just the one device */
+ /* First locate the ccb for this command */
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i] == SCpnt)
+ {
+ HOSTDATA(SCpnt->host)->ccb[i].op = 0x81; /* BUS DEVICE RESET */
+ /* Now tell the 1542 to flush all pending commands for this target */
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1);
+
+ /* Here is the tricky part. What to do next. Do we get an interrupt
+ for the commands that we aborted with the specified target, or
+ do we generate this on our own? Try it without first and see
+ what happens */
+ printk("Sent BUS DEVICE RESET to target %d\n", SCpnt->target);
+
+ /* If the first does not work, then try the second. I think the
+ first option is more likely to be correct. Free the command
+ block for all commands running on this target... */
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i] &&
+ HOSTDATA(SCpnt->host)->SCint[i]->target == SCpnt->target)
+ {
+ Scsi_Cmnd * SCtmp;
+ SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
+ SCtmp->result = DID_RESET << 16;
+ if (SCtmp->host_scribble) scsi_free(SCtmp->host_scribble, 512);
+ printk("Sending DID_RESET for target %d\n", SCpnt->target);
+ SCtmp->scsi_done(SCpnt);
+
+ HOSTDATA(SCpnt->host)->SCint[i] = NULL;
+ HOSTDATA(SCpnt->host)->mb[i].status = 0;
+ }
+ return SCSI_RESET_SUCCESS;
+ }
+ }
+ /* No active command at this time, so this means that each time we got
+ some kind of response the last time through. Tell the mid-level code
+ to request sense information in order to decide what to do next. */
+ return SCSI_RESET_PUNT;
+}
+
+#include "sd.h"
+
+int aha1542_biosparam(Scsi_Disk * disk, kdev_t dev, int * ip)
+{
+ int translation_algorithm;
+ int size = disk->capacity;
+
+ translation_algorithm = HOSTDATA(disk->device->host)->bios_translation;
+
+ if((size>>11) > 1024 && translation_algorithm == BIOS_TRANSLATION_25563) {
+ /* Please verify that this is the same as what DOS returns */
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = size /255/63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AHA1542;
+
+#include "scsi_module.c"
+#endif
+
diff --git a/linux/src/drivers/scsi/aha1542.h b/linux/src/drivers/scsi/aha1542.h
new file mode 100644
index 0000000..4f90c1c
--- /dev/null
+++ b/linux/src/drivers/scsi/aha1542.h
@@ -0,0 +1,170 @@
+#ifndef _AHA1542_H
+
+/* $Id: aha1542.h,v 1.1.4.1 2007/03/27 21:04:30 tschwinge Exp $
+ *
+ * Header file for the adaptec 1542 driver for Linux
+ *
+ * Revision 1.1 1992/07/24 06:27:38 root
+ * Initial revision
+ *
+ * Revision 1.2 1992/07/04 18:41:49 root
+ * Replaced distribution with current drivers
+ *
+ * Revision 1.3 1992/06/23 23:58:20 root
+ * Fixes.
+ *
+ * Revision 1.2 1992/05/26 22:13:23 root
+ * Changed bug that prevented DMA above first 2 mbytes.
+ *
+ * Revision 1.1 1992/05/22 21:00:29 root
+ * Initial revision
+ *
+ * Revision 1.1 1992/04/24 18:01:50 root
+ * Initial revision
+ *
+ * Revision 1.1 1992/04/02 03:23:13 drew
+ * Initial revision
+ *
+ * Revision 1.3 1992/01/27 14:46:29 tthorn
+ * *** empty log message ***
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+/* I/O Port interface 4.2 */
+/* READ */
+#define STATUS(base) base
+#define STST 0x80 /* Self Test in Progress */
+#define DIAGF 0x40 /* Internal Diagnostic Failure */
+#define INIT 0x20 /* Mailbox Initialization Required */
+#define IDLE 0x10 /* SCSI Host Adapter Idle */
+#define CDF 0x08 /* Command/Data Out Port Full */
+#define DF 0x04 /* Data In Port Full */
+#define INVDCMD 0x01 /* Invalid H A Command */
+#define STATMASK 0xfd /* 0x02 is reserved */
+
+#define INTRFLAGS(base) (STATUS(base)+2)
+#define ANYINTR 0x80 /* Any Interrupt */
+#define SCRD 0x08 /* SCSI Reset Detected */
+#define HACC 0x04 /* HA Command Complete */
+#define MBOA 0x02 /* MBO Empty */
+#define MBIF 0x01 /* MBI Full */
+#define INTRMASK 0x8f
+
+/* WRITE */
+#define CONTROL(base) STATUS(base)
+#define HRST 0x80 /* Hard Reset */
+#define SRST 0x40 /* Soft Reset */
+#define IRST 0x20 /* Interrupt Reset */
+#define SCRST 0x10 /* SCSI Bus Reset */
+
+/* READ/WRITE */
+#define DATA(base) (STATUS(base)+1)
+#define CMD_NOP 0x00 /* No Operation */
+#define CMD_MBINIT 0x01 /* Mailbox Initialization */
+#define CMD_START_SCSI 0x02 /* Start SCSI Command */
+#define CMD_INQUIRY 0x04 /* Adapter Inquiry */
+#define CMD_EMBOI 0x05 /* Enable MailBox Out Interrupt */
+#define CMD_BUSON_TIME 0x07 /* Set Bus-On Time */
+#define CMD_BUSOFF_TIME 0x08 /* Set Bus-Off Time */
+#define CMD_DMASPEED 0x09 /* Set AT Bus Transfer Speed */
+#define CMD_RETDEVS 0x0a /* Return Installed Devices */
+#define CMD_RETCONF 0x0b /* Return Configuration Data */
+#define CMD_RETSETUP 0x0d /* Return Setup Data */
+#define CMD_ECHO 0x1f /* ECHO Command Data */
+
+#define CMD_EXTBIOS 0x28 /* Return extend bios information only 1542C */
+#define CMD_MBENABLE 0x29 /* Set Mailbox Interface enable only 1542C */
+
+/* Mailbox Definition 5.2.1 and 5.2.2 */
+struct mailbox {
+ unchar status; /* Command/Status */
+ unchar ccbptr[3]; /* msb, .., lsb */
+};
+
+/* This is used with scatter-gather */
+struct chain {
+ unchar datalen[3]; /* Size of this part of chain */
+ unchar dataptr[3]; /* Location of data */
+};
+
+/* These belong in scsi.h also */
+#define any2scsi(up, p) \
+(up)[0] = (((unsigned long)(p)) >> 16) ; \
+(up)[1] = (((unsigned long)(p)) >> 8); \
+(up)[2] = ((unsigned long)(p));
+
+#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
+
+#define xany2scsi(up, p) \
+(up)[0] = ((long)(p)) >> 24; \
+(up)[1] = ((long)(p)) >> 16; \
+(up)[2] = ((long)(p)) >> 8; \
+(up)[3] = ((long)(p));
+
+#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ + (((long)(up)[2]) << 8) + ((long)(up)[3]) )
+
+#define MAX_CDB 12
+#define MAX_SENSE 14
+
+struct ccb { /* Command Control Block 5.3 */
+ unchar op; /* Command Control Block Operation Code */
+ unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
+ /* Outbound data transfer, length is checked*/
+ /* Inbound data transfer, length is checked */
+ /* Logical Unit Number */
+ unchar cdblen; /* SCSI Command Length */
+ unchar rsalen; /* Request Sense Allocation Length/Disable */
+ unchar datalen[3]; /* Data Length (msb, .., lsb) */
+ unchar dataptr[3]; /* Data Pointer */
+ unchar linkptr[3]; /* Link Pointer */
+ unchar commlinkid; /* Command Linking Identifier */
+ unchar hastat; /* Host Adapter Status (HASTAT) */
+ unchar tarstat; /* Target Device Status */
+ unchar reserved[2];
+ unchar cdb[MAX_CDB+MAX_SENSE];/* SCSI Command Descriptor Block */
+ /* REQUEST SENSE */
+};
+
+int aha1542_detect(Scsi_Host_Template *);
+int aha1542_command(Scsi_Cmnd *);
+int aha1542_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int aha1542_abort(Scsi_Cmnd *);
+int aha1542_reset(Scsi_Cmnd *, unsigned int);
+int aha1542_biosparam(Disk *, kdev_t, int*);
+
+#define AHA1542_MAILBOXES 8
+#define AHA1542_SCATTER 16
+#define AHA1542_CMDLUN 1
+
+#ifndef NULL
+ #define NULL 0
+#endif
+
+extern struct proc_dir_entry proc_scsi_aha1542;
+
+#define AHA1542 { NULL, NULL, \
+ &proc_scsi_aha1542,/* proc_dir_entry */ \
+ NULL, \
+ "Adaptec 1542", \
+ aha1542_detect, \
+ NULL, \
+ NULL, \
+ aha1542_command, \
+ aha1542_queuecommand, \
+ aha1542_abort, \
+ aha1542_reset, \
+ NULL, \
+ aha1542_biosparam, \
+ AHA1542_MAILBOXES, \
+ 7, \
+ AHA1542_SCATTER, \
+ AHA1542_CMDLUN, \
+ 0, \
+ 1, \
+ ENABLE_CLUSTERING}
+
+#endif
diff --git a/linux/src/drivers/scsi/aha1740.c b/linux/src/drivers/scsi/aha1740.c
new file mode 100644
index 0000000..013218c
--- /dev/null
+++ b/linux/src/drivers/scsi/aha1740.c
@@ -0,0 +1,614 @@
+/* $Id: aha1740.c,v 1.1 1999/04/26 05:54:13 tb Exp $
+ * 1993/03/31
+ * linux/kernel/aha1740.c
+ *
+ * Based loosely on aha1542.c which is
+ * Copyright (C) 1992 Tommy Thorn and
+ * Modified by Eric Youngdale
+ *
+ * This file is aha1740.c, written and
+ * Copyright (C) 1992,1993 Brad McLean
+ *
+ * Modifications to makecode and queuecommand
+ * for proper handling of multiple devices courteously
+ * provided by Michael Weller, March, 1993
+ *
+ * Multiple adapter support, extended translation detection,
+ * update to current scsi subsystem changes, proc fs support,
+ * working (!) module support based on patches from Andreas Arens,
+ * by Andreas Degert <ad@papyrus.hamburg.com>, 2/1997
+ *
+ * aha1740_makecode may still need even more work
+ * if it doesn't work for your devices, take a look.
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <asm/dma.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "aha1740.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_aha1740 = {
+ PROC_SCSI_AHA1740, 7, "aha1740",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH
+ IT WORK, THEN:
+#define DEBUG
+*/
+#ifdef DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/linux/src/drivers/scsi/Attic/aha1740.c,v 1.1 1999/04/26 05:54:13 tb Exp $";
+*/
+
+struct aha1740_hostdata {
+ unsigned int slot;
+ unsigned int translation;
+ unsigned int last_ecb_used;
+ struct ecb ecb[AHA1740_ECBS];
+};
+
+#define HOSTDATA(host) ((struct aha1740_hostdata *) &host->hostdata)
+
+/* One for each IRQ level (9-15) */
+static struct Scsi_Host * aha_host[8] = {NULL, };
+
+int aha1740_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout)
+{
+ int len;
+ struct Scsi_Host * shpnt;
+ struct aha1740_hostdata *host;
+
+ if (inout)
+ return(-ENOSYS);
+
+ for (len = 0; len < 8; len++) {
+ shpnt = aha_host[len];
+ if (shpnt && shpnt->host_no == hostno)
+ break;
+ }
+ host = HOSTDATA(shpnt);
+
+ len = sprintf(buffer, "aha174x at IO:%x, IRQ %d, SLOT %d.\n"
+ "Extended translation %sabled.\n",
+ shpnt->io_port, shpnt->irq, host->slot,
+ host->translation ? "en" : "dis");
+
+ if (offset > len) {
+ *start = buffer;
+ return 0;
+ }
+
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ return len;
+}
+
+
+int aha1740_makecode(unchar *sense, unchar *status)
+{
+ struct statusword
+ {
+ ushort don:1, /* Command Done - No Error */
+ du:1, /* Data underrun */
+ :1, qf:1, /* Queue full */
+ sc:1, /* Specification Check */
+ dor:1, /* Data overrun */
+ ch:1, /* Chaining Halted */
+ intr:1, /* Interrupt issued */
+ asa:1, /* Additional Status Available */
+ sns:1, /* Sense information Stored */
+ :1, ini:1, /* Initialization Required */
+ me:1, /* Major error or exception */
+ :1, eca:1, /* Extended Contingent alliance */
+ :1;
+ } status_word;
+ int retval = DID_OK;
+
+ status_word = * (struct statusword *) status;
+#ifdef DEBUG
+ printk("makecode from %x,%x,%x,%x %x,%x,%x,%x",
+ status[0], status[1], status[2], status[3],
+ sense[0], sense[1], sense[2], sense[3]);
+#endif
+ if (!status_word.don) /* Anything abnormal was detected */
+ {
+ if ( (status[1]&0x18) || status_word.sc ) /*Additional info available*/
+ {
+ /* Use the supplied info for further diagnostics */
+ switch ( status[2] )
+ {
+ case 0x12:
+ if ( status_word.dor )
+ retval=DID_ERROR; /* It's an Overrun */
+ /* If not overrun, assume underrun and ignore it! */
+ case 0x00: /* No info, assume no error, should not occur */
+ break;
+ case 0x11:
+ case 0x21:
+ retval=DID_TIME_OUT;
+ break;
+ case 0x0a:
+ retval=DID_BAD_TARGET;
+ break;
+ case 0x04:
+ case 0x05:
+ retval=DID_ABORT;
+ /* Either by this driver or the AHA1740 itself */
+ break;
+ default:
+ retval=DID_ERROR; /* No further diagnostics possible */
+ }
+ }
+ else
+ { /* Michael suggests, and Brad concurs: */
+ if ( status_word.qf )
+ {
+ retval = DID_TIME_OUT; /* forces a redo */
+ /* I think this specific one should not happen -Brad */
+ printk("aha1740.c: WARNING: AHA1740 queue overflow!\n");
+ }
+ else if ( status[0]&0x60 )
+ {
+ retval = DID_ERROR; /* Didn't find a better error */
+ }
+ /* In any other case return DID_OK so for example
+ CONDITION_CHECKS make it through to the appropriate
+ device driver */
+ }
+ }
+ /* Under all circumstances supply the target status -Michael */
+ return status[3] | retval << 16;
+}
+
+int aha1740_test_port(unsigned int base)
+{
+ char name[4], tmp;
+
+ /* Okay, look for the EISA ID's */
+ name[0]= 'A' -1 + ((tmp = inb(HID0(base))) >> 2); /* First character */
+ name[1]= 'A' -1 + ((tmp & 3) << 3);
+ name[1]+= ((tmp = inb(HID1(base))) >> 5)&0x7; /* Second Character */
+ name[2]= 'A' -1 + (tmp & 0x1f); /* Third Character */
+ name[3]=0;
+ tmp = inb(HID2(base));
+ if ( strcmp ( name, HID_MFG ) || inb(HID2(base)) != HID_PRD )
+ return 0; /* Not an Adaptec 174x */
+
+/* if ( inb(HID3(base)) != HID_REV )
+ printk("aha174x: Warning; board revision of %d; expected %d\n",
+ inb(HID3(base)),HID_REV); */
+
+ if ( inb(EBCNTRL(base)) != EBCNTRL_VALUE )
+ {
+ printk("aha174x: Board detected, but EBCNTRL = %x, so disabled it.\n",
+ inb(EBCNTRL(base)));
+ return 0;
+ }
+
+ if ( inb(PORTADR(base)) & PORTADDR_ENH )
+ return 1; /* Okay, we're all set */
+
+ printk("aha174x: Board detected, but not in enhanced mode, so disabled it.\n");
+ return 0;
+}
+
+/* A "high" level interrupt handler */
+void aha1740_intr_handle(int irq, void *dev_id, struct pt_regs * regs)
+{
+ void (*my_done)(Scsi_Cmnd *);
+ int errstatus, adapstat;
+ int number_serviced;
+ struct ecb *ecbptr;
+ Scsi_Cmnd *SCtmp;
+ unsigned int base;
+
+ if (!aha_host[irq - 9])
+ panic("aha1740.c: Irq from unknown host!\n");
+ base = aha_host[irq - 9]->io_port;
+ number_serviced = 0;
+
+ while(inb(G2STAT(base)) & G2STAT_INTPEND)
+ {
+ DEB(printk("aha1740_intr top of loop.\n"));
+ adapstat = inb(G2INTST(base));
+ ecbptr = (struct ecb *) bus_to_virt(inl(MBOXIN0(base)));
+ outb(G2CNTRL_IRST,G2CNTRL(base)); /* interrupt reset */
+
+ switch ( adapstat & G2INTST_MASK )
+ {
+ case G2INTST_CCBRETRY:
+ case G2INTST_CCBERROR:
+ case G2INTST_CCBGOOD:
+ /* Host Ready -> Mailbox in complete */
+ outb(G2CNTRL_HRDY,G2CNTRL(base));
+ if (!ecbptr)
+ {
+ printk("Aha1740 null ecbptr in interrupt (%x,%x,%x,%d)\n",
+ inb(G2STAT(base)),adapstat,
+ inb(G2INTST(base)), number_serviced++);
+ continue;
+ }
+ SCtmp = ecbptr->SCpnt;
+ if (!SCtmp)
+ {
+ printk("Aha1740 null SCtmp in interrupt (%x,%x,%x,%d)\n",
+ inb(G2STAT(base)),adapstat,
+ inb(G2INTST(base)), number_serviced++);
+ continue;
+ }
+ if (SCtmp->host_scribble)
+ scsi_free(SCtmp->host_scribble, 512);
+ /* Fetch the sense data, and tuck it away, in the required slot.
+ The Adaptec automatically fetches it, and there is no
+ guarantee that we will still have it in the cdb when we come
+ back */
+ if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR )
+ {
+ memcpy(SCtmp->sense_buffer, ecbptr->sense,
+ sizeof(SCtmp->sense_buffer));
+ errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
+ }
+ else
+ errstatus = 0;
+ DEB(if (errstatus) printk("aha1740_intr_handle: returning %6x\n",
+ errstatus));
+ SCtmp->result = errstatus;
+ my_done = ecbptr->done;
+ memset(ecbptr,0,sizeof(struct ecb));
+ if ( my_done )
+ my_done(SCtmp);
+ break;
+ case G2INTST_HARDFAIL:
+ printk(KERN_ALERT "aha1740 hardware failure!\n");
+ panic("aha1740.c"); /* Goodbye */
+ case G2INTST_ASNEVENT:
+ printk("aha1740 asynchronous event: %02x %02x %02x %02x %02x\n",
+ adapstat, inb(MBOXIN0(base)), inb(MBOXIN1(base)),
+ inb(MBOXIN2(base)), inb(MBOXIN3(base))); /* Say What? */
+ /* Host Ready -> Mailbox in complete */
+ outb(G2CNTRL_HRDY,G2CNTRL(base));
+ break;
+ case G2INTST_CMDGOOD:
+ /* set immediate command success flag here: */
+ break;
+ case G2INTST_CMDERROR:
+ /* Set immediate command failure flag here: */
+ break;
+ }
+ number_serviced++;
+ }
+}
+
+int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar direction;
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ unchar target = SCpnt->target;
+ struct aha1740_hostdata *host = HOSTDATA(SCpnt->host);
+ unsigned long flags;
+ void *buff = SCpnt->request_buffer;
+ int bufflen = SCpnt->request_bufflen;
+ int ecbno;
+ DEB(int i);
+
+ if(*cmd == REQUEST_SENSE)
+ {
+ if (bufflen != sizeof(SCpnt->sense_buffer))
+ {
+ printk("Wrong buffer length supplied for request sense (%d)\n",
+ bufflen);
+ }
+ SCpnt->result = 0;
+ done(SCpnt);
+ return 0;
+ }
+
+#ifdef DEBUG
+ if (*cmd == READ_10 || *cmd == WRITE_10)
+ i = xscsi2int(cmd+2);
+ else if (*cmd == READ_6 || *cmd == WRITE_6)
+ i = scsi2int(cmd+2);
+ else
+ i = -1;
+ printk("aha1740_queuecommand: dev %d cmd %02x pos %d len %d ",
+ target, *cmd, i, bufflen);
+ printk("scsi cmd:");
+ for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]);
+ printk("\n");
+#endif
+
+ /* locate an available ecb */
+ save_flags(flags);
+ cli();
+ ecbno = host->last_ecb_used + 1; /* An optimization */
+ if (ecbno >= AHA1740_ECBS)
+ ecbno = 0;
+ do {
+ if (!host->ecb[ecbno].cmdw)
+ break;
+ ecbno++;
+ if (ecbno >= AHA1740_ECBS)
+ ecbno = 0;
+ } while (ecbno != host->last_ecb_used);
+
+ if (host->ecb[ecbno].cmdw)
+ panic("Unable to find empty ecb for aha1740.\n");
+
+ host->ecb[ecbno].cmdw = AHA1740CMD_INIT; /* SCSI Initiator Command
+ doubles as reserved flag */
+
+ host->last_ecb_used = ecbno;
+ restore_flags(flags);
+
+#ifdef DEBUG
+ printk("Sending command (%d %x)...", ecbno, done);
+#endif
+
+ host->ecb[ecbno].cdblen = SCpnt->cmd_len; /* SCSI Command Descriptor Block Length */
+
+ direction = 0;
+ if (*cmd == READ_10 || *cmd == READ_6)
+ direction = 1;
+ else if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ direction = 0;
+
+ memcpy(host->ecb[ecbno].cdb, cmd, SCpnt->cmd_len);
+
+ if (SCpnt->use_sg)
+ {
+ struct scatterlist * sgpnt;
+ struct aha1740_chain * cptr;
+ int i;
+ DEB(unsigned char * ptr);
+
+ host->ecb[ecbno].sg = 1; /* SCSI Initiator Command w/scatter-gather*/
+ SCpnt->host_scribble = (unsigned char *) scsi_malloc(512);
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+ cptr = (struct aha1740_chain *) SCpnt->host_scribble;
+ if (cptr == NULL) panic("aha1740.c: unable to allocate DMA memory\n");
+ for(i=0; i<SCpnt->use_sg; i++)
+ {
+ cptr[i].datalen = sgpnt[i].length;
+ cptr[i].dataptr = virt_to_bus(sgpnt[i].address);
+ }
+ host->ecb[ecbno].datalen = SCpnt->use_sg * sizeof(struct aha1740_chain);
+ host->ecb[ecbno].dataptr = virt_to_bus(cptr);
+#ifdef DEBUG
+ printk("cptr %x: ",cptr);
+ ptr = (unsigned char *) cptr;
+ for(i=0;i<24;i++) printk("%02x ", ptr[i]);
+#endif
+ }
+ else
+ {
+ SCpnt->host_scribble = NULL;
+ host->ecb[ecbno].datalen = bufflen;
+ host->ecb[ecbno].dataptr = virt_to_bus(buff);
+ }
+ host->ecb[ecbno].lun = SCpnt->lun;
+ host->ecb[ecbno].ses = 1; /* Suppress underrun errors */
+ host->ecb[ecbno].dir = direction;
+ host->ecb[ecbno].ars = 1; /* Yes, get the sense on an error */
+ host->ecb[ecbno].senselen = 12;
+ host->ecb[ecbno].senseptr = virt_to_bus(host->ecb[ecbno].sense);
+ host->ecb[ecbno].statusptr = virt_to_bus(host->ecb[ecbno].status);
+ host->ecb[ecbno].done = done;
+ host->ecb[ecbno].SCpnt = SCpnt;
+#ifdef DEBUG
+ {
+ int i;
+ printk("aha1740_command: sending.. ");
+ for (i = 0; i < sizeof(host->ecb[ecbno]) - 10; i++)
+ printk("%02x ", ((unchar *)&host->ecb[ecbno])[i]);
+ }
+ printk("\n");
+#endif
+ if (done)
+ { /* The Adaptec Spec says the card is so fast that the loops will
+ only be executed once in the code below. Even if this was true
+ with the fastest processors when the spec was written, it doesn't
+ seem to be true with todays fast processors. We print a warning
+ if the code is executed more often than LOOPCNT_WARN. If this
+ happens, it should be investigated. If the count reaches
+ LOOPCNT_MAX, we assume something is broken; since there is no
+ way to return an error (the return value is ignored by the
+ mid-level scsi layer) we have to panic (and maybe that's the
+ best thing we can do then anyhow). */
+
+#define LOOPCNT_WARN 10 /* excessive mbxout wait -> syslog-msg */
+#define LOOPCNT_MAX 1000000 /* mbxout deadlock -> panic() after ~ 2 sec. */
+ int loopcnt;
+ unsigned int base = SCpnt->host->io_port;
+ DEB(printk("aha1740[%d] critical section\n",ecbno));
+ save_flags(flags);
+ cli();
+ for (loopcnt = 0; ; loopcnt++) {
+ if (inb(G2STAT(base)) & G2STAT_MBXOUT) break;
+ if (loopcnt == LOOPCNT_WARN) {
+ printk("aha1740[%d]_mbxout wait!\n",ecbno);
+ cli(); /* printk may have done a sti()! */
+ }
+ if (loopcnt == LOOPCNT_MAX)
+ panic("aha1740.c: mbxout busy!\n");
+ }
+ outl(virt_to_bus(host->ecb + ecbno), MBOXOUT0(base));
+ for (loopcnt = 0; ; loopcnt++) {
+ if (! (inb(G2STAT(base)) & G2STAT_BUSY)) break;
+ if (loopcnt == LOOPCNT_WARN) {
+ printk("aha1740[%d]_attn wait!\n",ecbno);
+ cli();
+ }
+ if (loopcnt == LOOPCNT_MAX)
+ panic("aha1740.c: attn wait failed!\n");
+ }
+ outb(ATTN_START | (target & 7), ATTN(base)); /* Start it up */
+ restore_flags(flags);
+ DEB(printk("aha1740[%d] request queued.\n",ecbno));
+ }
+ else
+ printk(KERN_ALERT "aha1740_queuecommand: done can't be NULL\n");
+ return 0;
+}
+
+static void internal_done(Scsi_Cmnd * SCpnt)
+{
+ SCpnt->SCp.Status++;
+}
+
+int aha1740_command(Scsi_Cmnd * SCpnt)
+{
+ aha1740_queuecommand(SCpnt, internal_done);
+ SCpnt->SCp.Status = 0;
+ while (!SCpnt->SCp.Status)
+ barrier();
+ return SCpnt->result;
+}
+
+/* Query the board for its irq_level. Nothing else matters
+ in enhanced mode on an EISA bus. */
+
+void aha1740_getconfig(unsigned int base, unsigned int *irq_level,
+ unsigned int *translation)
+{
+ static int intab[] = { 9, 10, 11, 12, 0, 14, 15, 0 };
+
+ *irq_level = intab[inb(INTDEF(base)) & 0x7];
+ *translation = inb(RESV1(base)) & 0x1;
+ outb(inb(INTDEF(base)) | 0x10, INTDEF(base));
+}
+
+int aha1740_detect(Scsi_Host_Template * tpnt)
+{
+ int count = 0, slot;
+
+ DEB(printk("aha1740_detect: \n"));
+
+ for ( slot=MINEISA; slot <= MAXEISA; slot++ )
+ {
+ int slotbase;
+ unsigned int irq_level, translation;
+ struct Scsi_Host *shpnt;
+ struct aha1740_hostdata *host;
+ slotbase = SLOTBASE(slot);
+ /*
+ * The ioports for eisa boards are generally beyond that used in the
+ * check/allocate region code, but this may change at some point,
+ * so we go through the motions.
+ */
+ if (check_region(slotbase, SLOTSIZE)) /* See if in use */
+ continue;
+ if (!aha1740_test_port(slotbase))
+ continue;
+ aha1740_getconfig(slotbase,&irq_level,&translation);
+ if ((inb(G2STAT(slotbase)) &
+ (G2STAT_MBXOUT|G2STAT_BUSY)) != G2STAT_MBXOUT)
+ { /* If the card isn't ready, hard reset it */
+ outb(G2CNTRL_HRST, G2CNTRL(slotbase));
+ outb(0, G2CNTRL(slotbase));
+ }
+ printk("Configuring aha174x at IO:%x, IRQ %d\n", slotbase, irq_level);
+ printk("aha174x: Extended translation %sabled.\n",
+ translation ? "en" : "dis");
+ DEB(printk("aha1740_detect: enable interrupt channel %d\n",irq_level));
+ if (request_irq(irq_level,aha1740_intr_handle,0,"aha1740",NULL)) {
+ printk("Unable to allocate IRQ for adaptec controller.\n");
+ continue;
+ }
+ shpnt = scsi_register(tpnt, sizeof(struct aha1740_hostdata));
+ request_region(slotbase, SLOTSIZE, "aha1740");
+ shpnt->base = 0;
+ shpnt->io_port = slotbase;
+ shpnt->n_io_port = SLOTSIZE;
+ shpnt->irq = irq_level;
+ shpnt->dma_channel = 0xff;
+ host = HOSTDATA(shpnt);
+ host->slot = slot;
+ host->translation = translation;
+ aha_host[irq_level - 9] = shpnt;
+ count++;
+ }
+ return count;
+}
+
+/* Note: They following two functions do not apply very well to the Adaptec,
+ which basically manages its own affairs quite well without our interference,
+ so I haven't put anything into them. I can faintly imagine someone with a
+ *very* badly behaved SCSI target (perhaps an old tape?) wanting the abort(),
+ but it hasn't happened yet, and doing aborts brings the Adaptec to its
+ knees. I cannot (at this moment in time) think of any reason to reset the
+ card once it's running. So there. */
+
+int aha1740_abort(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("aha1740_abort called\n"));
+ return SCSI_ABORT_SNOOZE;
+}
+
+/* We do not implement a reset function here, but the upper level code assumes
+ that it will get some kind of response for the command in SCpnt. We must
+ oblige, or the command will hang the scsi system */
+
+int aha1740_reset(Scsi_Cmnd * SCpnt, unsigned int ignored)
+{
+ DEB(printk("aha1740_reset called\n"));
+ return SCSI_RESET_PUNT;
+}
+
+int aha1740_biosparam(Disk * disk, kdev_t dev, int* ip)
+{
+ int size = disk->capacity;
+ int extended = HOSTDATA(disk->device->host)->translation;
+
+ DEB(printk("aha1740_biosparam\n"));
+ if (extended && (ip[2] > 1024))
+ {
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = size / (255 * 63);
+ }
+ else
+ {
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ }
+ return 0;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AHA1740;
+
+#include "scsi_module.c"
+#endif
+
+/* Okay, you made it all the way through. As of this writing, 3/31/93, I'm
+brad@saturn.gaylord.com or brad@bradpc.gaylord.com. I'll try to help as time
+permits if you have any trouble with this driver. Happy Linuxing! */
diff --git a/linux/src/drivers/scsi/aha1740.h b/linux/src/drivers/scsi/aha1740.h
new file mode 100644
index 0000000..478e59a
--- /dev/null
+++ b/linux/src/drivers/scsi/aha1740.h
@@ -0,0 +1,196 @@
+#ifndef _AHA1740_H
+
+/* $Id: aha1740.h,v 1.1 1999/04/26 05:54:13 tb Exp $
+ *
+ * Header file for the adaptec 1740 driver for Linux
+ *
+ * With minor revisions 3/31/93
+ * Written and (C) 1992,1993 Brad McLean. See aha1740.c
+ * for more info
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+/* Eisa Enhanced mode operation - slot locating and addressing */
+#define MINEISA 1 /* I don't have an EISA Spec to know these ranges, so I */
+#define MAXEISA 8 /* Just took my machine's specifications. Adjust to fit.*/
+ /* I just saw an ad, and bumped this from 6 to 8 */
+#define SLOTBASE(x) ((x << 12) + 0xc80)
+#define SLOTSIZE 0x5c
+
+/* EISA configuration registers & values */
+#define HID0(base) (base + 0x0)
+#define HID1(base) (base + 0x1)
+#define HID2(base) (base + 0x2)
+#define HID3(base) (base + 0x3)
+#define EBCNTRL(base) (base + 0x4)
+#define PORTADR(base) (base + 0x40)
+#define BIOSADR(base) (base + 0x41)
+#define INTDEF(base) (base + 0x42)
+#define SCSIDEF(base) (base + 0x43)
+#define BUSDEF(base) (base + 0x44)
+#define RESV0(base) (base + 0x45)
+#define RESV1(base) (base + 0x46)
+#define RESV2(base) (base + 0x47)
+
+#define HID_MFG "ADP"
+#define HID_PRD 0
+#define HID_REV 2
+#define EBCNTRL_VALUE 1
+#define PORTADDR_ENH 0x80
+/* READ */
+#define G2INTST(base) (base + 0x56)
+#define G2STAT(base) (base + 0x57)
+#define MBOXIN0(base) (base + 0x58)
+#define MBOXIN1(base) (base + 0x59)
+#define MBOXIN2(base) (base + 0x5a)
+#define MBOXIN3(base) (base + 0x5b)
+#define G2STAT2(base) (base + 0x5c)
+
+#define G2INTST_MASK 0xf0 /* isolate the status */
+#define G2INTST_CCBGOOD 0x10 /* CCB Completed */
+#define G2INTST_CCBRETRY 0x50 /* CCB Completed with a retry */
+#define G2INTST_HARDFAIL 0x70 /* Adapter Hardware Failure */
+#define G2INTST_CMDGOOD 0xa0 /* Immediate command success */
+#define G2INTST_CCBERROR 0xc0 /* CCB Completed with error */
+#define G2INTST_ASNEVENT 0xd0 /* Asynchronous Event Notification */
+#define G2INTST_CMDERROR 0xe0 /* Immediate command error */
+
+#define G2STAT_MBXOUT 4 /* Mailbox Out Empty Bit */
+#define G2STAT_INTPEND 2 /* Interrupt Pending Bit */
+#define G2STAT_BUSY 1 /* Busy Bit (attention pending) */
+
+#define G2STAT2_READY 0 /* Host Ready Bit */
+
+/* WRITE (and ReadBack) */
+#define MBOXOUT0(base) (base + 0x50)
+#define MBOXOUT1(base) (base + 0x51)
+#define MBOXOUT2(base) (base + 0x52)
+#define MBOXOUT3(base) (base + 0x53)
+#define ATTN(base) (base + 0x54)
+#define G2CNTRL(base) (base + 0x55)
+
+#define ATTN_IMMED 0x10 /* Immediate Command */
+#define ATTN_START 0x40 /* Start CCB */
+#define ATTN_ABORT 0x50 /* Abort CCB */
+
+#define G2CNTRL_HRST 0x80 /* Hard Reset */
+#define G2CNTRL_IRST 0x40 /* Clear EISA Interrupt */
+#define G2CNTRL_HRDY 0x20 /* Sets HOST ready */
+
+/* This is used with scatter-gather */
+struct aha1740_chain {
+ u32 dataptr; /* Location of data */
+ u32 datalen; /* Size of this part of chain */
+};
+
+/* These belong in scsi.h */
+#define any2scsi(up, p) \
+(up)[0] = (((unsigned long)(p)) >> 16) ; \
+(up)[1] = (((unsigned long)(p)) >> 8); \
+(up)[2] = ((unsigned long)(p));
+
+#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
+
+#define xany2scsi(up, p) \
+(up)[0] = ((long)(p)) >> 24; \
+(up)[1] = ((long)(p)) >> 16; \
+(up)[2] = ((long)(p)) >> 8; \
+(up)[3] = ((long)(p));
+
+#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ + (((long)(up)[2]) << 8) + ((long)(up)[3]) )
+
+#define MAX_CDB 12
+#define MAX_SENSE 14
+#define MAX_STATUS 32
+
+struct ecb { /* Enhanced Control Block 6.1 */
+ u16 cmdw; /* Command Word */
+ /* Flag Word 1 */
+ u16 cne:1, /* Control Block Chaining */
+ :6, di:1, /* Disable Interrupt */
+ :2, ses:1, /* Suppress Underrun error */
+ :1, sg:1, /* Scatter/Gather */
+ :1, dsb:1, /* Disable Status Block */
+ ars:1; /* Automatic Request Sense */
+ /* Flag Word 2 */
+ u16 lun:3, /* Logical Unit */
+ tag:1, /* Tagged Queuing */
+ tt:2, /* Tag Type */
+ nd:1, /* No Disconnect */
+ :1, dat:1, /* Data transfer - check direction */
+ dir:1, /* Direction of transfer 1 = datain */
+ st:1, /* Suppress Transfer */
+ chk:1, /* Calculate Checksum */
+ :2, rec:1, :1; /* Error Recovery */
+ u16 nil0; /* nothing */
+ u32 dataptr; /* Data or Scatter List ptr */
+ u32 datalen; /* Data or Scatter List len */
+ u32 statusptr; /* Status Block ptr */
+ u32 linkptr; /* Chain Address */
+ u32 nil1; /* nothing */
+ u32 senseptr; /* Sense Info Pointer */
+ u8 senselen; /* Sense Length */
+ u8 cdblen; /* CDB Length */
+ u16 datacheck; /* Data checksum */
+ u8 cdb[MAX_CDB]; /* CDB area */
+/* Hardware defined portion ends here, rest is driver defined */
+ u8 sense[MAX_SENSE]; /* Sense area */
+ u8 status[MAX_STATUS]; /* Status area */
+ Scsi_Cmnd *SCpnt; /* Link to the SCSI Command Block */
+ void (*done)(Scsi_Cmnd *); /* Completion Function */
+};
+
+#define AHA1740CMD_NOP 0x00 /* No OP */
+#define AHA1740CMD_INIT 0x01 /* Initiator SCSI Command */
+#define AHA1740CMD_DIAG 0x05 /* Run Diagnostic Command */
+#define AHA1740CMD_SCSI 0x06 /* Initialize SCSI */
+#define AHA1740CMD_SENSE 0x08 /* Read Sense Information */
+#define AHA1740CMD_DOWN 0x09 /* Download Firmware (yeah, I bet!) */
+#define AHA1740CMD_RINQ 0x0a /* Read Host Adapter Inquiry Data */
+#define AHA1740CMD_TARG 0x10 /* Target SCSI Command */
+
+int aha1740_detect(Scsi_Host_Template *);
+int aha1740_command(Scsi_Cmnd *);
+int aha1740_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int aha1740_abort(Scsi_Cmnd *);
+int aha1740_reset(Scsi_Cmnd *, unsigned int);
+int aha1740_biosparam(Disk *, kdev_t, int*);
+int aha1740_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout);
+
+#define AHA1740_ECBS 32
+#define AHA1740_SCATTER 16
+#define AHA1740_CMDLUN 1
+
+#ifndef NULL
+ #define NULL 0
+#endif
+
+extern struct proc_dir_entry proc_scsi_aha1740;
+
+#define AHA1740 {NULL, NULL, \
+ &proc_scsi_aha1740, \
+ aha1740_proc_info, \
+ "Adaptec 174x (EISA)", \
+ aha1740_detect, \
+ NULL, \
+ NULL, \
+ aha1740_command, \
+ aha1740_queuecommand, \
+ aha1740_abort, \
+ aha1740_reset, \
+ NULL, \
+ aha1740_biosparam, \
+ AHA1740_ECBS, \
+ 7, \
+ AHA1740_SCATTER, \
+ AHA1740_CMDLUN, \
+ 0, \
+ 0, \
+ ENABLE_CLUSTERING}
+
+#endif
diff --git a/linux/src/drivers/scsi/aic7xxx.c b/linux/src/drivers/scsi/aic7xxx.c
new file mode 100644
index 0000000..93bed41
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx.c
@@ -0,0 +1,11404 @@
+/*+M*************************************************************************
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * Copyright (c) 1994 John Aycock
+ * The University of Calgary Department of Computer Science.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
+ * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
+ * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
+ * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
+ * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
+ * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
+ * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
+ * ANSI SCSI-2 specification (draft 10c), ...
+ *
+ * --------------------------------------------------------------------------
+ *
+ * Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org):
+ *
+ * Substantially modified to include support for wide and twin bus
+ * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
+ * SCB paging, and other rework of the code.
+ *
+ * Parts of this driver were also based on the FreeBSD driver by
+ * Justin T. Gibbs. His copyright follows:
+ *
+ * --------------------------------------------------------------------------
+ * Copyright (c) 1994-1997 Justin Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Where this Software is combined with software released under the terms of
+ * the GNU Public License ("GPL") and the terms of the GPL would require the
+ * combined work to also be released under the terms of the GPL, the terms
+ * and conditions of this License will apply in addition to those of the
+ * GPL with the exception of any terms or conditions of this License that
+ * conflict with, or are expressly prohibited by, the GPL.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: aic7xxx.c,v 1.1.4.1 2004/01/16 22:41:26 roland Exp $
+ *---------------------------------------------------------------------------
+ *
+ * Thanks also go to (in alphabetical order) the following:
+ *
+ * Rory Bolt - Sequencer bug fixes
+ * Jay Estabrook - Initial DEC Alpha support
+ * Doug Ledford - Much needed abort/reset bug fixes
+ * Kai Makisara - DMAing of SCBs
+ *
+ * A Boot time option was also added for not resetting the scsi bus.
+ *
+ * Form: aic7xxx=extended
+ * aic7xxx=no_reset
+ * aic7xxx=ultra
+ * aic7xxx=irq_trigger:[0,1] # 0 edge, 1 level
+ * aic7xxx=verbose
+ *
+ * Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97
+ *
+ *-M*************************************************************************/
+
+/*+M**************************************************************************
+ *
+ * Further driver modifications made by Doug Ledford <dledford@redhat.com>
+ *
+ * Copyright (c) 1997-1998 Doug Ledford
+ *
+ * These changes are released under the same licensing terms as the FreeBSD
+ * driver written by Justin Gibbs. Please see his Copyright notice above
+ * for the exact terms and conditions covering my changes as well as the
+ * warranty statement.
+ *
+ * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include
+ * but are not limited to:
+ *
+ * 1: Import of the latest FreeBSD sequencer code for this driver
+ * 2: Modification of kernel code to accomodate different sequencer semantics
+ * 3: Extensive changes throughout kernel portion of driver to improve
+ * abort/reset processing and error hanndling
+ * 4: Other work contributed by various people on the Internet
+ * 5: Changes to printk information and verbosity selection code
+ * 6: General reliability related changes, especially in IRQ management
+ * 7: Modifications to the default probe/attach order for supported cards
+ * 8: SMP friendliness has been improved
+ *
+ * Overall, this driver represents a significant departure from the official
+ * aic7xxx driver released by Dan Eischen in two ways. First, in the code
+ * itself. A diff between the two version of the driver is now a several
+ * thousand line diff. Second, in approach to solving the same problem. The
+ * problem is importing the FreeBSD aic7xxx driver code to linux can be a
+ * difficult and time consuming process, that also can be error prone. Dan
+ * Eischen's official driver uses the approach that the linux and FreeBSD
+ * drivers should be as identical as possible. To that end, his next version
+ * of this driver will be using a mid-layer code library that he is developing
+ * to moderate communications between the linux mid-level SCSI code and the
+ * low level FreeBSD driver. He intends to be able to essentially drop the
+ * FreeBSD driver into the linux kernel with only a few minor tweaks to some
+ * include files and the like and get things working, making for fast easy
+ * imports of the FreeBSD code into linux.
+ *
+ * I disagree with Dan's approach. Not that I don't think his way of doing
+ * things would be nice, easy to maintain, and create a more uniform driver
+ * between FreeBSD and Linux. I have no objection to those issues. My
+ * disagreement is on the needed functionality. There simply are certain
+ * things that are done differently in FreeBSD than linux that will cause
+ * problems for this driver regardless of any middle ware Dan implements.
+ * The biggest example of this at the moment is interrupt semantics. Linux
+ * doesn't provide the same protection techniques as FreeBSD does, nor can
+ * they be easily implemented in any middle ware code since they would truly
+ * belong in the kernel proper and would effect all drivers. For the time
+ * being, I see issues such as these as major stumbling blocks to the
+ * reliability of code based upon such middle ware. Therefore, I choose to
+ * use a different approach to importing the FreeBSD code that doesn't
+ * involve any middle ware type code. My approach is to import the sequencer
+ * code from FreeBSD wholesale. Then, to only make changes in the kernel
+ * portion of the driver as they are needed for the new sequencer semantics.
+ * In this way, the portion of the driver that speaks to the rest of the
+ * linux kernel is fairly static and can be changed/modified to solve
+ * any problems one might encounter without concern for the FreeBSD driver.
+ *
+ * Note: If time and experience should prove me wrong that the middle ware
+ * code Dan writes is reliable in its operation, then I'll retract my above
+ * statements. But, for those that don't know, I'm from Missouri (in the US)
+ * and our state motto is "The Show-Me State". Well, before I will put
+ * faith into it, you'll have to show me that it works :)
+ *
+ *_M*************************************************************************/
+
+/*
+ * The next three defines are user configurable. These should be the only
+ * defines a user might need to get in here and change. There are other
+ * defines buried deeper in the code, but those really shouldn't need touched
+ * under normal conditions.
+ */
+
+/*
+ * AIC7XXX_FAKE_NEGOTIATION_CMDS
+ * We now have two distinctly different methods of device negotiation
+ * in this code. The two methods are selected by either defining or not
+ * defining this option. The difference is as follows:
+ *
+ * With AIC7XXX_FAKE_NEGOTIATION_CMDS not set (commented out)
+ * When the driver is in need of issuing a negotiation command for any
+ * given device, it will add the negotiation message on to part of a
+ * regular SCSI command for the device. In the process, if the device
+ * is configured for and using tagged queueing, then the code will
+ * also issue that single command as a non-tagged command, attach the
+ * negotiation message to that one command, and use a temporary
+ * queue depth of one to keep the untagged and tagged commands from
+ * overlapping.
+ * Pros: This doesn't use any extra SCB structures, it's simple, it
+ * works most of the time (if not all of the time now), and
+ * since we get the device capability info frmo the INQUIRY data
+ * now, shouldn't cause any problems.
+ * Cons: When we need to send a negotiation command to a device, we
+ * must use a command that is being sent to LUN 0 of the device.
+ * If we try sending one to high LUN numbers, then some devices
+ * get noticeably upset. Since we have to wait for a command with
+ * LUN == 0 to come along, we may not be able to renegotiate when
+ * we want if the user is actually using say LUN 1 of a CD Changer
+ * instead of using LUN 0 for an extended period of time.
+ *
+ * With AIC7XXX_FAKE_NEGOTIATION_CMDS defined
+ * When we need to negotiate with a device, instead of attaching our
+ * negotiation message to an existing command, we insert our own
+ * fictional Scsi_Cmnd into the chain that has the negotiation message
+ * attached to it. We send this one command as untagged regardless
+ * of the device type, and we fiddle with the queue depth the same as
+ * we would with the option unset to avoid overlapping commands. The
+ * primary difference between this and the unset option is that the
+ * negotiation message is no longer attached to a specific command,
+ * instead it is its own command and is merely triggered by a
+ * combination of both A) We need to negotiate and B) The mid level
+ * SCSI code has sent us a command. We still don't do any negotiation
+ * unless there is a valid SCSI command to be processed.
+ * Pros: This fixes the problem above in the Cons section. Since we
+ * issue our own fake command, we can set the LUN to 0 regardless
+ * of what the LUN is in the real command. It also means that if
+ * the device get's nasty over negotiation issues, it won't be
+ * showing up on a regular command, so we won't get any SENSE buffer
+ * data or STATUS_BYTE returns to the mid level code that are caused
+ * by snits in the negotiation code.
+ * Cons: We add more code, and more complexity. This means more ways
+ * in which things could break. It means a larger driver. It means
+ * more resource consumption for the fake commands. However, the
+ * biggest problem is this. Take a system where there is a CD-ROM
+ * on the SCSI bus. Someone has a CD in the CD-ROM and is using it.
+ * For some reason the SCSI bus gets reset. We don't touch the
+ * CD-ROM again for quite a period of time (so we don't renegotiate
+ * after the reset until we do touch the CD-ROM again). In the
+ * time while we aren't using the CD-ROM, the current disc is
+ * removed and a new one put in. When we go to check that disc, we
+ * will first have to renegotiate. In so doing, we issue our fake
+ * SCSI command, which happens to be TEST_UNIT_READY. The CD-ROM
+ * negotiates with us, then responds to our fake command with a
+ * CHECK_CONDITION status. We REQUEST_SENSE from the CD-ROM, it
+ * then sends the SENSE data to our fake command to tell it that
+ * it has been through a disc change. There, now we've cleared out
+ * the SENSE data along with our negotiation command, and when the
+ * real command executes, it won't pick up that the CD was changed.
+ * That's the biggest Con to this approach. In the future, I could
+ * probably code around this problem though, so this option is still
+ * viable.
+ *
+ * So, which command style should you use? I would appreciate it if people
+ * could try out both types. I want to know about any cases where one
+ * method works and the other doesn't. If one method works on significantly
+ * more systems than another, then it will become the default. If the second
+ * option turns out to work best, then I'll find a way to work around that
+ * big con I listed.
+ *
+ * -- July 7, 02:33
+ * OK...I just added some code that should make the Con listed for the
+ * fake commands a non issue now. However, it needs testing. For now,
+ * I'm going to make the default to use the fake commands, we'll see how
+ * it goes.
+ */
+
+#define AIC7XXX_FAKE_NEGOTIATION_CMDS
+
+/*
+ * AIC7XXX_STRICT_PCI_SETUP
+ * Should we assume the PCI config options on our controllers are set with
+ * sane and proper values, or should we be anal about our PCI config
+ * registers and force them to what we want? The main advantage to
+ * defining this option is on non-Intel hardware where the BIOS may not
+ * have been run to set things up, or if you have one of the BIOSless
+ * Adaptec controllers, such as a 2910, that don't get set up by the
+ * BIOS. However, keep in mind that we really do set the most important
+ * items in the driver regardless of this setting, this only controls some
+ * of the more esoteric PCI options on these cards. In that sense, I
+ * would default to leaving this off. However, if people wish to try
+ * things both ways, that would also help me to know if there are some
+ * machines where it works one way but not another.
+ *
+ * -- July 7, 17:09
+ * OK...I need this on my machine for testing, so the default is to
+ * leave it defined.
+ *
+ * -- July 7, 18:49
+ * I needed it for testing, but it didn't make any difference, so back
+ * off she goes.
+ *
+ * -- July 16, 23:04
+ * I turned it back on to try and compensate for the 2.1.x PCI code
+ * which no longer relies solely on the BIOS and now tries to set
+ * things itself.
+ */
+
+#define AIC7XXX_STRICT_PCI_SETUP
+
+/*
+ * AIC7XXX_VERBOSE_DEBUGGING
+ * This option enables a lot of extra printk();s in the code, surrounded
+ * by if (aic7xxx_verbose ...) statements. Executing all of those if
+ * statements and the extra checks can get to where it actually does have
+ * an impact on CPU usage and such, as well as code size. Disabling this
+ * define will keep some of those from becoming part of the code.
+ *
+ * NOTE: Currently, this option has no real effect, I will be adding the
+ * various #ifdef's in the code later when I've decided a section is
+ * complete and no longer needs debugging. OK...a lot of things are now
+ * surrounded by this define, so turning this off does have an impact.
+ */
+
+/*
+ * #define AIC7XXX_VERBOSE_DEBUGGING
+ */
+
+#if defined(MODULE) || defined(PCMCIA)
+#include <linux/module.h>
+#endif
+
+#if defined(PCMCIA)
+# undef MODULE
+#endif
+
+#include <stdarg.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include <linux/tqueue.h>
+#include <linux/tasks.h>
+#include "sd.h"
+#include "scsi.h"
+#include "hosts.h"
+#include "aic7xxx.h"
+
+#include "aic7xxx/sequencer.h"
+#include "aic7xxx/scsi_message.h"
+#include "aic7xxx_reg.h"
+#include <scsi/scsicam.h>
+
+#include <linux/stat.h>
+#include <linux/malloc.h> /* for kmalloc() */
+
+#include <linux/config.h> /* for CONFIG_PCI */
+
+/*
+ * To generate the correct addresses for the controller to issue
+ * on the bus. Originally added for DEC Alpha support.
+ */
+#define VIRT_TO_BUS(a) (unsigned int)virt_to_bus((void *)(a))
+
+struct proc_dir_entry proc_scsi_aic7xxx = {
+ PROC_SCSI_AIC7XXX, 7, "aic7xxx",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2,
+ 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+};
+
+#define AIC7XXX_C_VERSION "5.1.13"
+
+#define NUMBER(arr) (sizeof(arr) / sizeof(arr[0]))
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#define ALL_TARGETS -1
+#define ALL_CHANNELS -1
+#define ALL_LUNS -1
+#define MAX_TARGETS 16
+#define MAX_LUNS 8
+#ifndef TRUE
+# define TRUE 1
+#endif
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+#ifndef KERNEL_VERSION
+# define KERNEL_VERSION(x,y,z) (((x)<<16)+((y)<<8)+(z))
+#endif
+
+/*
+ * We need the bios32.h file if we are kernel version 2.1.92 or less. The
+ * full set of pci_* changes wasn't in place until 2.1.93
+ */
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,1,92)
+# if defined(__sparc_v9__) || defined(__powerpc__)
+# error "PPC and Sparc platforms are only support under 2.1.92 and above"
+# endif
+# include <linux/bios32.h>
+#endif
+
+#if defined(__powerpc__)
+# define MMAPIO
+# ifdef mb
+# undef mb
+# endif
+# define mb() \
+ __asm__ __volatile__("eieio" ::: "memory")
+#elif defined(__i386__)
+# define MMAPIO
+# ifdef mb
+# undef mb
+# endif
+# define mb() \
+ __asm__ __volatile__("lock ; addl $0,0(%%esp)": : :"memory")
+#elif defined(__alpha__)
+# ifdef mb
+# undef mb
+# endif
+# define mb() \
+ __asm__ __volatile__("mb": : :"memory")
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,0)
+# include <asm/spinlock.h>
+# include <linux/smp.h>
+# define cpuid smp_processor_id()
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+# define DRIVER_LOCK_INIT \
+ spin_lock_init(&p->spin_lock);
+# define DRIVER_LOCK \
+ if(!p->cpu_lock_count[cpuid]) { \
+ spin_lock_irqsave(&p->spin_lock, cpu_flags); \
+ p->cpu_lock_count[cpuid]++; \
+ } else { \
+ p->cpu_lock_count[cpuid]++; \
+ }
+# define DRIVER_UNLOCK \
+ if(--p->cpu_lock_count[cpuid] == 0) \
+ spin_unlock_irqrestore(&p->spin_lock, cpu_flags);
+# else
+# define DRIVER_LOCK_INIT
+# define DRIVER_LOCK
+# define DRIVER_UNLOCK
+# endif
+#else
+# define cpuid 0
+# define DRIVER_LOCK_INIT
+# define DRIVER_LOCK \
+ save_flags(cpu_flags); \
+ cli();
+# define DRIVER_UNLOCK \
+ restore_flags(cpu_flags);
+# define le32_to_cpu(x) (x)
+# define cpu_to_le32(x) (x)
+#endif
+
+/*
+ * You can try raising me if tagged queueing is enabled, or lowering
+ * me if you only have 4 SCBs.
+ */
+#ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE
+#define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE
+#else
+#define AIC7XXX_CMDS_PER_DEVICE 8
+#endif
+
+/* Set this to the delay in seconds after SCSI bus reset. */
+#ifdef CONFIG_AIC7XXX_RESET_DELAY
+#define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY
+#else
+#define AIC7XXX_RESET_DELAY 5
+#endif
+
+/*
+ * Control collection of SCSI transfer statistics for the /proc filesystem.
+ *
+ * NOTE: Do NOT enable this when running on kernels version 1.2.x and below.
+ * NOTE: This does affect performance since it has to maintain statistics.
+ */
+#ifdef CONFIG_AIC7XXX_PROC_STATS
+#define AIC7XXX_PROC_STATS
+#endif
+
+/*
+ * NOTE: Uncommenting the define below no longer has any effect, the
+ * tagged queue value array is always active now. I've added
+ * a setup option to set this particular array and I'm hoping
+ * insmod will be smart enough to set it properly as well. It's
+ * by use of this array that a person can enable tagged queueing.
+ * The DEFAULT_TAG_COMMANDS define has been changed to disable
+ * tagged queueing by default, so if your devices can handle tagged
+ * queueing you will need to add a line to their lilo.conf file like:
+ * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
+ * which will result in the first four devices on the first two
+ * controllers being set to a tagged queue depth of 32.
+ *
+ * Set this for defining the number of tagged commands on a device
+ * by device, and controller by controller basis. The first set
+ * of tagged commands will be used for the first detected aic7xxx
+ * controller, the second set will be used for the second detected
+ * aic7xxx controller, and so on. These values will *only* be used
+ * for targets that are tagged queueing capable; these values will
+ * be ignored in all other cases. The tag_commands is an array of
+ * 16 to allow for wide and twin adapters. Twin adapters will use
+ * indexes 0-7 for channel 0, and indexes 8-15 for channel 1.
+ *
+ * *** Determining commands per LUN ***
+ *
+ * When AIC7XXX_CMDS_PER_DEVICE is not defined, the driver will use its
+ * own algorithm to determine the commands/LUN. If SCB paging is
+ * enabled, which is always now, the default is 8 commands per lun
+ * that indicates it supports tagged queueing. All non-tagged devices
+ * use an internal queue depth of 3, with no more than one of those
+ * three commands active at one time.
+ */
+/* #define AIC7XXX_TAGGED_QUEUEING_BY_DEVICE */
+
+typedef struct
+{
+ unsigned char tag_commands[16]; /* Allow for wide/twin adapters. */
+} adapter_tag_info_t;
+
+/*
+ * Make a define that will tell the driver not to use tagged queueing
+ * by default.
+ */
+#ifdef CONFIG_AIC7XXX_TCQ_ON_BY_DEFAULT
+#define DEFAULT_TAG_COMMANDS {0, 0, 0, 0, 0, 0, 0, 0,\
+ 0, 0, 0, 0, 0, 0, 0, 0}
+#else
+#define DEFAULT_TAG_COMMANDS {255, 255, 255, 255, 255, 255, 255, 255,\
+ 255, 255, 255, 255, 255, 255, 255, 255}
+#endif
+
+/*
+ * Modify this as you see fit for your system. By setting tag_commands
+ * to 0, the driver will use it's own algorithm for determining the
+ * number of commands to use (see above). When 255, the driver will
+ * not enable tagged queueing for that particular device. When positive
+ * (> 0) and (< 255) the values in the array are used for the queue_depth.
+ * Note that the maximum value for an entry is 254, but you're insane if
+ * you try to use that many commands on one device.
+ *
+ * In this example, the first line will disable tagged queueing for all
+ * the devices on the first probed aic7xxx adapter.
+ *
+ * The second line enables tagged queueing with 4 commands/LUN for IDs
+ * (1, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
+ * driver to use its own algorithm for ID 1.
+ *
+ * The third line is the same as the first line.
+ *
+ * The fourth line disables tagged queueing for devices 0 and 3. It
+ * enables tagged queueing for the other IDs, with 16 commands/LUN
+ * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
+ * IDs 2, 5-7, and 9-15.
+ */
+
+/*
+ * NOTE: The below structure is for reference only, the actual structure
+ * to modify in order to change things is located around line
+ * number 1305
+adapter_tag_info_t aic7xxx_tag_info[] =
+{
+ {DEFAULT_TAG_COMMANDS},
+ {{4, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 255, 4, 4, 4}},
+ {DEFAULT_TAG_COMMANDS},
+ {{255, 16, 4, 255, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
+};
+*/
+
+static adapter_tag_info_t aic7xxx_tag_info[] =
+{
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS}
+};
+
+
+/*
+ * Define an array of board names that can be indexed by aha_type.
+ * Don't forget to change this when changing the types!
+ */
+static const char *board_names[] = {
+ "AIC-7xxx Unknown", /* AIC_NONE */
+ "Adaptec AIC-7810 Hardware RAID Controller", /* AIC_7810 */
+ "Adaptec AIC-7770 SCSI host adapter", /* AIC_7770 */
+ "Adaptec AHA-274X SCSI host adapter", /* AIC_7771 */
+ "Adaptec AHA-284X SCSI host adapter", /* AIC_284x */
+ "Adaptec AIC-7850 SCSI host adapter", /* AIC_7850 */
+ "Adaptec AIC-7855 SCSI host adapter", /* AIC_7855 */
+ "Adaptec AIC-7860 Ultra SCSI host adapter", /* AIC_7860 */
+ "Adaptec AHA-2940A Ultra SCSI host adapter", /* AIC_7861 */
+ "Adaptec AIC-7870 SCSI host adapter", /* AIC_7870 */
+ "Adaptec AHA-294X SCSI host adapter", /* AIC_7871 */
+ "Adaptec AHA-394X SCSI host adapter", /* AIC_7872 */
+ "Adaptec AHA-398X SCSI host adapter", /* AIC_7873 */
+ "Adaptec AHA-2944 SCSI host adapter", /* AIC_7874 */
+ "Adaptec AIC-7880 Ultra SCSI host adapter", /* AIC_7880 */
+ "Adaptec AHA-294X Ultra SCSI host adapter", /* AIC_7881 */
+ "Adaptec AHA-394X Ultra SCSI host adapter", /* AIC_7882 */
+ "Adaptec AHA-398X Ultra SCSI host adapter", /* AIC_7883 */
+ "Adaptec AHA-2944 Ultra SCSI host adapter", /* AIC_7884 */
+ "Adaptec AIC-7895 Ultra SCSI host adapter", /* AIC_7895 */
+ "Adaptec AIC-7890/1 Ultra2 SCSI host adapter", /* AIC_7890 */
+ "Adaptec AHA-293X Ultra2 SCSI host adapter", /* AIC_7890 */
+ "Adaptec AHA-294X Ultra2 SCSI host adapter", /* AIC_7890 */
+ "Adaptec AIC-7896/7 Ultra2 SCSI host adapter", /* AIC_7896 */
+ "Adaptec AHA-394X Ultra2 SCSI host adapter", /* AIC_7897 */
+ "Adaptec AHA-395X Ultra2 SCSI host adapter", /* AIC_7897 */
+ "Adaptec PCMCIA SCSI controller", /* card bus stuff */
+ "Adaptec AIC-7892 Ultra 160/m SCSI host adapter", /* AIC_7892 */
+ "Adaptec AIC-7899 Ultra 160/m SCSI host adapter", /* AIC_7899 */
+};
+
+/*
+ * There should be a specific return value for this in scsi.h, but
+ * it seems that most drivers ignore it.
+ */
+#define DID_UNDERFLOW DID_ERROR
+
+/*
+ * What we want to do is have the higher level scsi driver requeue
+ * the command to us. There is no specific driver status for this
+ * condition, but the higher level scsi driver will requeue the
+ * command on a DID_BUS_BUSY error.
+ *
+ * Upon further inspection and testing, it seems that DID_BUS_BUSY
+ * will *always* retry the command. We can get into an infinite loop
+ * if this happens when we really want some sort of counter that
+ * will automatically abort/reset the command after so many retries.
+ * Using DID_ERROR will do just that. (Made by a suggestion by
+ * Doug Ledford 8/1/96)
+ */
+#define DID_RETRY_COMMAND DID_ERROR
+
+#define HSCSIID 0x07
+#define SCSI_RESET 0x040
+
+/*
+ * EISA/VL-bus stuff
+ */
+#define MINSLOT 1
+#define MAXSLOT 15
+#define SLOTBASE(x) ((x) << 12)
+#define BASE_TO_SLOT(x) ((x) >> 12)
+
+/*
+ * Standard EISA Host ID regs (Offset from slot base)
+ */
+#define AHC_HID0 0x80 /* 0,1: msb of ID2, 2-7: ID1 */
+#define AHC_HID1 0x81 /* 0-4: ID3, 5-7: LSB ID2 */
+#define AHC_HID2 0x82 /* product */
+#define AHC_HID3 0x83 /* firmware revision */
+
+/*
+ * AIC-7770 I/O range to reserve for a card
+ */
+#define MINREG 0xC00
+#define MAXREG 0xCBF
+
+#define INTDEF 0x5C /* Interrupt Definition Register */
+
+/*
+ * AIC-78X0 PCI registers
+ */
+#define CLASS_PROGIF_REVID 0x08
+#define DEVREVID 0x000000FFul
+#define PROGINFC 0x0000FF00ul
+#define SUBCLASS 0x00FF0000ul
+#define BASECLASS 0xFF000000ul
+
+#define CSIZE_LATTIME 0x0C
+#define CACHESIZE 0x0000003Ful /* only 5 bits */
+#define LATTIME 0x0000FF00ul
+
+#define DEVCONFIG 0x40
+#define SCBSIZE32 0x00010000ul /* aic789X only */
+#define MPORTMODE 0x00000400ul /* aic7870 only */
+#define RAMPSM 0x00000200ul /* aic7870 only */
+#define RAMPSM_ULTRA2 0x00000004
+#define VOLSENSE 0x00000100ul
+#define SCBRAMSEL 0x00000080ul
+#define SCBRAMSEL_ULTRA2 0x00000008
+#define MRDCEN 0x00000040ul
+#define EXTSCBTIME 0x00000020ul /* aic7870 only */
+#define EXTSCBPEN 0x00000010ul /* aic7870 only */
+#define BERREN 0x00000008ul
+#define DACEN 0x00000004ul
+#define STPWLEVEL 0x00000002ul
+#define DIFACTNEGEN 0x00000001ul /* aic7870 only */
+
+#define SCAMCTL 0x1a /* Ultra2 only */
+#define CCSCBBADDR 0xf0 /* aic7895/6/7 */
+
+/*
+ * Define the different types of SEEPROMs on aic7xxx adapters
+ * and make it also represent the address size used in accessing
+ * its registers. The 93C46 chips have 1024 bits organized into
+ * 64 16-bit words, while the 93C56 chips have 2048 bits organized
+ * into 128 16-bit words. The C46 chips use 6 bits to address
+ * each word, while the C56 and C66 (4096 bits) use 8 bits to
+ * address each word.
+ */
+typedef enum {C46 = 6, C56_66 = 8} seeprom_chip_type;
+
+/*
+ *
+ * Define the format of the SEEPROM registers (16 bits).
+ *
+ */
+struct seeprom_config {
+
+/*
+ * SCSI ID Configuration Flags
+ */
+#define CFXFER 0x0007 /* synchronous transfer rate */
+#define CFSYNCH 0x0008 /* enable synchronous transfer */
+#define CFDISC 0x0010 /* enable disconnection */
+#define CFWIDEB 0x0020 /* wide bus device (wide card) */
+#define CFSYNCHISULTRA 0x0040 /* CFSYNC is an ultra offset */
+#define CFNEWULTRAFORMAT 0x0080 /* Use the Ultra2 SEEPROM format */
+#define CFSTART 0x0100 /* send start unit SCSI command */
+#define CFINCBIOS 0x0200 /* include in BIOS scan */
+#define CFRNFOUND 0x0400 /* report even if not found */
+#define CFMULTILUN 0x0800 /* probe mult luns in BIOS scan */
+#define CFWBCACHEYES 0x4000 /* Enable W-Behind Cache on drive */
+#define CFWBCACHENC 0xc000 /* Don't change W-Behind Cache */
+/* UNUSED 0x3000 */
+ unsigned short device_flags[16]; /* words 0-15 */
+
+/*
+ * BIOS Control Bits
+ */
+#define CFSUPREM 0x0001 /* support all removable drives */
+#define CFSUPREMB 0x0002 /* support removable drives for boot only */
+#define CFBIOSEN 0x0004 /* BIOS enabled */
+/* UNUSED 0x0008 */
+#define CFSM2DRV 0x0010 /* support more than two drives */
+#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */
+/* UNUSED 0x0040 */
+#define CFEXTEND 0x0080 /* extended translation enabled */
+/* UNUSED 0xFF00 */
+ unsigned short bios_control; /* word 16 */
+
+/*
+ * Host Adapter Control Bits
+ */
+#define CFAUTOTERM 0x0001 /* Perform Auto termination */
+#define CFULTRAEN 0x0002 /* Ultra SCSI speed enable (Ultra cards) */
+#define CF284XSELTO 0x0003 /* Selection timeout (284x cards) */
+#define CF284XFIFO 0x000C /* FIFO Threshold (284x cards) */
+#define CFSTERM 0x0004 /* SCSI low byte termination */
+#define CFWSTERM 0x0008 /* SCSI high byte termination (wide card) */
+#define CFSPARITY 0x0010 /* SCSI parity */
+#define CF284XSTERM 0x0020 /* SCSI low byte termination (284x cards) */
+#define CFRESETB 0x0040 /* reset SCSI bus at boot */
+#define CFBPRIMARY 0x0100 /* Channel B primary on 7895 chipsets */
+#define CFSEAUTOTERM 0x0400 /* aic7890 Perform SE Auto Term */
+#define CFLVDSTERM 0x0800 /* aic7890 LVD Termination */
+/* UNUSED 0xF280 */
+ unsigned short adapter_control; /* word 17 */
+
+/*
+ * Bus Release, Host Adapter ID
+ */
+#define CFSCSIID 0x000F /* host adapter SCSI ID */
+/* UNUSED 0x00F0 */
+#define CFBRTIME 0xFF00 /* bus release time */
+ unsigned short brtime_id; /* word 18 */
+
+/*
+ * Maximum targets
+ */
+#define CFMAXTARG 0x00FF /* maximum targets */
+/* UNUSED 0xFF00 */
+ unsigned short max_targets; /* word 19 */
+
+ unsigned short res_1[11]; /* words 20-30 */
+ unsigned short checksum; /* word 31 */
+};
+
+#define SELBUS_MASK 0x0a
+#define SELNARROW 0x00
+#define SELBUSB 0x08
+#define SINGLE_BUS 0x00
+
+#define SCB_TARGET(scb) \
+ (((scb)->hscb->target_channel_lun & TID) >> 4)
+#define SCB_LUN(scb) \
+ ((scb)->hscb->target_channel_lun & LID)
+#define SCB_IS_SCSIBUS_B(scb) \
+ (((scb)->hscb->target_channel_lun & SELBUSB) != 0)
+
+/*
+ * If an error occurs during a data transfer phase, run the command
+ * to completion - it's easier that way - making a note of the error
+ * condition in this location. This then will modify a DID_OK status
+ * into an appropriate error for the higher-level SCSI code.
+ */
+#define aic7xxx_error(cmd) ((cmd)->SCp.Status)
+
+/*
+ * Keep track of the targets returned status.
+ */
+#define aic7xxx_status(cmd) ((cmd)->SCp.sent_command)
+
+/*
+ * The position of the SCSI commands scb within the scb array.
+ */
+#define aic7xxx_position(cmd) ((cmd)->SCp.have_data_in)
+
+/*
+ * So we can keep track of our host structs
+ */
+static struct aic7xxx_host *first_aic7xxx = NULL;
+
+/*
+ * As of Linux 2.1, the mid-level SCSI code uses virtual addresses
+ * in the scatter-gather lists. We need to convert the virtual
+ * addresses to physical addresses.
+ */
+struct hw_scatterlist {
+ unsigned int address;
+ unsigned int length;
+};
+
+/*
+ * Maximum number of SG segments these cards can support.
+ */
+#define AIC7XXX_MAX_SG 128
+
+/*
+ * The maximum number of SCBs we could have for ANY type
+ * of card. DON'T FORGET TO CHANGE THE SCB MASK IN THE
+ * SEQUENCER CODE IF THIS IS MODIFIED!
+ */
+#define AIC7XXX_MAXSCB 255
+
+
+struct aic7xxx_hwscb {
+/* ------------ Begin hardware supported fields ---------------- */
+/* 0*/ unsigned char control;
+/* 1*/ unsigned char target_channel_lun; /* 4/1/3 bits */
+/* 2*/ unsigned char target_status;
+/* 3*/ unsigned char SG_segment_count;
+/* 4*/ unsigned int SG_list_pointer;
+/* 8*/ unsigned char residual_SG_segment_count;
+/* 9*/ unsigned char residual_data_count[3];
+/*12*/ unsigned int data_pointer;
+/*16*/ unsigned int data_count;
+/*20*/ unsigned int SCSI_cmd_pointer;
+/*24*/ unsigned char SCSI_cmd_length;
+/*25*/ unsigned char tag; /* Index into our kernel SCB array.
+ * Also used as the tag for tagged I/O
+ */
+#define SCB_PIO_TRANSFER_SIZE 26 /* amount we need to upload/download
+ * via PIO to initialize a transaction.
+ */
+/*26*/ unsigned char next; /* Used to thread SCBs awaiting selection
+ * or disconnected down in the sequencer.
+ */
+/*27*/ unsigned char prev;
+/*28*/ unsigned int pad; /*
+ * Unused by the kernel, but we require
+ * the padding so that the array of
+ * hardware SCBs is alligned on 32 byte
+ * boundaries so the sequencer can index
+ */
+};
+
+typedef enum {
+ SCB_FREE = 0x0000,
+ SCB_WAITINGQ = 0x0002,
+ SCB_ACTIVE = 0x0004,
+ SCB_SENSE = 0x0008,
+ SCB_ABORT = 0x0010,
+ SCB_DEVICE_RESET = 0x0020,
+ SCB_RESET = 0x0040,
+ SCB_RECOVERY_SCB = 0x0080,
+ SCB_WAS_BUSY = 0x0100,
+ SCB_MSGOUT_SENT = 0x0200,
+ SCB_MSGOUT_SDTR = 0x0400,
+ SCB_MSGOUT_WDTR = 0x0800,
+ SCB_MSGOUT_BITS = SCB_MSGOUT_SENT |
+ SCB_MSGOUT_SDTR |
+ SCB_MSGOUT_WDTR,
+ SCB_QUEUED_ABORT = 0x1000,
+ SCB_QUEUED_FOR_DONE = 0x2000
+} scb_flag_type;
+
+typedef enum {
+ AHC_FNONE = 0x00000000,
+ AHC_PAGESCBS = 0x00000001,
+ AHC_CHANNEL_B_PRIMARY = 0x00000002,
+ AHC_USEDEFAULTS = 0x00000004,
+ AHC_INDIRECT_PAGING = 0x00000008,
+ AHC_CHNLB = 0x00000020,
+ AHC_CHNLC = 0x00000040,
+ AHC_EXTEND_TRANS_A = 0x00000100,
+ AHC_EXTEND_TRANS_B = 0x00000200,
+ AHC_TERM_ENB_A = 0x00000400,
+ AHC_TERM_ENB_SE_LOW = 0x00000400,
+ AHC_TERM_ENB_B = 0x00000800,
+ AHC_TERM_ENB_SE_HIGH = 0x00000800,
+ AHC_HANDLING_REQINITS = 0x00001000,
+ AHC_TARGETMODE = 0x00002000,
+ AHC_NEWEEPROM_FMT = 0x00004000,
+ /*
+ * Here ends the FreeBSD defined flags and here begins the linux defined
+ * flags. NOTE: I did not preserve the old flag name during this change
+ * specifically to force me to evaluate what flags were being used properly
+ * and what flags weren't. This way, I could clean up the flag usage on
+ * a use by use basis. Doug Ledford
+ */
+ AHC_RESET_DELAY = 0x00080000,
+ AHC_A_SCANNED = 0x00100000,
+ AHC_B_SCANNED = 0x00200000,
+ AHC_MULTI_CHANNEL = 0x00400000,
+ AHC_BIOS_ENABLED = 0x00800000,
+ AHC_SEEPROM_FOUND = 0x01000000,
+ AHC_TERM_ENB_LVD = 0x02000000,
+ AHC_ABORT_PENDING = 0x04000000,
+ AHC_RESET_PENDING = 0x08000000,
+#define AHC_IN_ISR_BIT 28
+ AHC_IN_ISR = 0x10000000,
+ AHC_IN_ABORT = 0x20000000,
+ AHC_IN_RESET = 0x40000000,
+ AHC_EXTERNAL_SRAM = 0x80000000
+} ahc_flag_type;
+
+typedef enum {
+ AHC_NONE = 0x0000,
+ AHC_CHIPID_MASK = 0x00ff,
+ AHC_AIC7770 = 0x0001,
+ AHC_AIC7850 = 0x0002,
+ AHC_AIC7860 = 0x0003,
+ AHC_AIC7870 = 0x0004,
+ AHC_AIC7880 = 0x0005,
+ AHC_AIC7890 = 0x0006,
+ AHC_AIC7895 = 0x0007,
+ AHC_AIC7896 = 0x0008,
+ AHC_AIC7892 = 0x0009,
+ AHC_AIC7899 = 0x000a,
+ AHC_VL = 0x0100,
+ AHC_EISA = 0x0200,
+ AHC_PCI = 0x0400,
+} ahc_chip;
+
+typedef enum {
+ AHC_FENONE = 0x0000,
+ AHC_ULTRA = 0x0001,
+ AHC_ULTRA2 = 0x0002,
+ AHC_WIDE = 0x0004,
+ AHC_TWIN = 0x0008,
+ AHC_MORE_SRAM = 0x0010,
+ AHC_CMD_CHAN = 0x0020,
+ AHC_QUEUE_REGS = 0x0040,
+ AHC_SG_PRELOAD = 0x0080,
+ AHC_SPIOCAP = 0x0100,
+ AHC_ULTRA160 = 0x0200,
+ AHC_AIC7770_FE = AHC_FENONE,
+ AHC_AIC7850_FE = AHC_SPIOCAP,
+ AHC_AIC7860_FE = AHC_ULTRA|AHC_SPIOCAP,
+ AHC_AIC7870_FE = AHC_FENONE,
+ AHC_AIC7880_FE = AHC_ULTRA,
+ AHC_AIC7890_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2|
+ AHC_QUEUE_REGS|AHC_SG_PRELOAD,
+ AHC_AIC7895_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA,
+ AHC_AIC7896_FE = AHC_AIC7890_FE,
+ AHC_AIC7892_FE = AHC_AIC7890_FE|AHC_ULTRA160,
+ AHC_AIC7899_FE = AHC_AIC7890_FE|AHC_ULTRA160,
+} ahc_feature;
+
+struct aic7xxx_scb {
+ struct aic7xxx_hwscb *hscb; /* corresponding hardware scb */
+ Scsi_Cmnd *cmd; /* Scsi_Cmnd for this scb */
+ struct aic7xxx_scb *q_next; /* next scb in queue */
+ volatile scb_flag_type flags; /* current state of scb */
+ struct hw_scatterlist *sg_list; /* SG list in adapter format */
+ unsigned char tag_action;
+ unsigned char sg_count;
+ unsigned char sense_cmd[6]; /*
+ * Allocate 6 characters for
+ * sense command.
+ */
+ unsigned int sg_length; /* We init this during buildscb so we
+ * don't have to calculate anything
+ * during underflow/overflow/stat code
+ */
+ void *kmalloc_ptr;
+};
+
+/*
+ * Define a linked list of SCBs.
+ */
+typedef struct {
+ struct aic7xxx_scb *head;
+ struct aic7xxx_scb *tail;
+} scb_queue_type;
+
+static struct {
+ unsigned char errno;
+ const char *errmesg;
+} hard_error[] = {
+ { ILLHADDR, "Illegal Host Access" },
+ { ILLSADDR, "Illegal Sequencer Address referenced" },
+ { ILLOPCODE, "Illegal Opcode in sequencer program" },
+ { SQPARERR, "Sequencer Ram Parity Error" },
+ { DPARERR, "Data-Path Ram Parity Error" },
+ { MPARERR, "Scratch Ram/SCB Array Ram Parity Error" },
+ { PCIERRSTAT,"PCI Error detected" },
+ { CIOPARERR, "CIOBUS Parity Error" }
+};
+
+static unsigned char
+generic_sense[] = { REQUEST_SENSE, 0, 0, 0, 255, 0 };
+
+typedef struct {
+ scb_queue_type free_scbs; /*
+ * SCBs assigned to free slot on
+ * card (no paging required)
+ */
+ struct aic7xxx_scb *scb_array[AIC7XXX_MAXSCB];
+ struct aic7xxx_hwscb *hscbs;
+ unsigned char numscbs; /* current number of scbs */
+ unsigned char maxhscbs; /* hardware scbs */
+ unsigned char maxscbs; /* max scbs including pageable scbs */
+ void *hscb_kmalloc_ptr;
+} scb_data_type;
+
+struct target_cmd {
+ unsigned char mesg_bytes[4];
+ unsigned char command[28];
+};
+
+#define AHC_TRANS_CUR 0x0001
+#define AHC_TRANS_ACTIVE 0x0002
+#define AHC_TRANS_GOAL 0x0004
+#define AHC_TRANS_USER 0x0008
+#define AHC_TRANS_QUITE 0x0010
+typedef struct {
+ unsigned char cur_width;
+ unsigned char goal_width;
+ unsigned char cur_period;
+ unsigned char goal_period;
+ unsigned char cur_offset;
+ unsigned char goal_offset;
+ unsigned char user_width;
+ unsigned char user_period;
+ unsigned char user_offset;
+} transinfo_type;
+
+/*
+ * Define a structure used for each host adapter. Note, in order to avoid
+ * problems with architectures I can't test on (because I don't have one,
+ * such as the Alpha based systems) which happen to give faults for
+ * non-aligned memory accesses, care was taken to align this structure
+ * in a way that gauranteed all accesses larger than 8 bits were aligned
+ * on the appropriate boundary. It's also organized to try and be more
+ * cache line efficient. Be careful when changing this lest you might hurt
+ * overall performance and bring down the wrath of the masses.
+ */
+struct aic7xxx_host {
+ /*
+ * This is the first 64 bytes in the host struct
+ */
+
+ /*
+ * We are grouping things here....first, items that get either read or
+ * written with nearly every interrupt
+ */
+ volatile ahc_flag_type flags;
+ ahc_feature features; /* chip features */
+ unsigned long base; /* card base address */
+ volatile unsigned char *maddr; /* memory mapped address */
+ unsigned long isr_count; /* Interrupt count */
+ unsigned long spurious_int;
+ scb_data_type *scb_data;
+ volatile unsigned short needsdtr;
+ volatile unsigned short sdtr_pending;
+ volatile unsigned short needwdtr;
+ volatile unsigned short wdtr_pending;
+ struct aic7xxx_cmd_queue {
+ Scsi_Cmnd *head;
+ Scsi_Cmnd *tail;
+ } completeq;
+
+ /*
+ * Things read/written on nearly every entry into aic7xxx_queue()
+ */
+ volatile scb_queue_type waiting_scbs;
+ unsigned short discenable; /* Targets allowed to disconnect */
+ unsigned short tagenable; /* Targets using tagged I/O */
+ unsigned short orderedtag; /* Ordered Q tags allowed */
+ unsigned char unpause; /* unpause value for HCNTRL */
+ unsigned char pause; /* pause value for HCNTRL */
+ volatile unsigned char qoutfifonext;
+ volatile unsigned char activescbs; /* active scbs */
+ volatile unsigned char max_activescbs;
+ volatile unsigned char qinfifonext;
+
+#define DEVICE_PRESENT 0x01
+#define BUS_DEVICE_RESET_PENDING 0x02
+#define DEVICE_RESET_DELAY 0x04
+#define DEVICE_PRINT_SDTR 0x08
+#define DEVICE_PRINT_WDTR 0x10
+#define DEVICE_WAS_BUSY 0x20
+#define DEVICE_SCANNED 0x80
+ volatile unsigned char dev_flags[MAX_TARGETS];
+ volatile unsigned char dev_active_cmds[MAX_TARGETS];
+ volatile unsigned char dev_temp_queue_depth[MAX_TARGETS];
+ unsigned char dev_commands_sent[MAX_TARGETS];
+
+ unsigned int dev_timer_active; /* Which devs have a timer set */
+ struct timer_list dev_timer;
+ unsigned long dev_expires[MAX_TARGETS];
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,0)
+ spinlock_t spin_lock;
+ volatile unsigned char cpu_lock_count[NR_CPUS];
+#endif
+
+
+#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+ Scsi_Cmnd *dev_wdtr_cmnd[MAX_TARGETS];
+ Scsi_Cmnd *dev_sdtr_cmnd[MAX_TARGETS];
+#endif
+
+ unsigned char dev_last_queue_full[MAX_TARGETS];
+ unsigned char dev_last_queue_full_count[MAX_TARGETS];
+ unsigned char dev_max_queue_depth[MAX_TARGETS];
+
+ volatile scb_queue_type delayed_scbs[MAX_TARGETS];
+
+
+ unsigned char msg_buf[9]; /* The message for the target */
+ unsigned char msg_type;
+#define MSG_TYPE_NONE 0x00
+#define MSG_TYPE_INITIATOR_MSGOUT 0x01
+#define MSG_TYPE_INITIATOR_MSGIN 0x02
+ unsigned char msg_len; /* Length of message */
+ unsigned char msg_index; /* Index into msg_buf array */
+ transinfo_type transinfo[MAX_TARGETS];
+
+
+ /*
+ * We put the less frequently used host structure items after the more
+ * frequently used items to try and ease the burden on the cache subsystem.
+ * These entries are not *commonly* accessed, whereas the preceding entries
+ * are accessed very often. The only exceptions are the qinfifo, qoutfifo,
+ * and untagged_scbs array. But, they are often accessed only once and each
+ * access into these arrays is likely to blow a cache line, so they are put
+ * down here so we can minimize the number of cache lines required to hold
+ * the preceeding entries.
+ */
+
+ volatile unsigned char untagged_scbs[256];
+ volatile unsigned char qoutfifo[256];
+ volatile unsigned char qinfifo[256];
+ unsigned int irq; /* IRQ for this adapter */
+ int instance; /* aic7xxx instance number */
+ int scsi_id; /* host adapter SCSI ID */
+ int scsi_id_b; /* channel B for twin adapters */
+ unsigned int bios_address;
+ int board_name_index;
+ unsigned short needsdtr_copy; /* default config */
+ unsigned short needwdtr_copy; /* default config */
+ unsigned short ultraenb; /* Ultra mode target list */
+ unsigned short bios_control; /* bios control - SEEPROM */
+ unsigned short adapter_control; /* adapter control - SEEPROM */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ struct pci_dev *pdev;
+#endif
+ unsigned char pci_bus;
+ unsigned char pci_device_fn;
+ struct seeprom_config sc;
+ unsigned short sc_type;
+ unsigned short sc_size;
+ struct aic7xxx_host *next; /* allow for multiple IRQs */
+ struct Scsi_Host *host; /* pointer to scsi host */
+ int host_no; /* SCSI host number */
+ unsigned long mbase; /* I/O memory address */
+ ahc_chip chip; /* chip type */
+
+ /*
+ * Statistics Kept:
+ *
+ * Total Xfers (count for each command that has a data xfer),
+ * broken down further by reads && writes.
+ *
+ * Binned sizes, writes && reads:
+ * < 512, 512, 1-2K, 2-4K, 4-8K, 8-16K, 16-32K, 32-64K, 64K-128K, > 128K
+ *
+ * Total amounts read/written above 512 bytes (amts under ignored)
+ *
+ * NOTE: Enabling this feature is likely to cause a noticeable performance
+ * decrease as the accesses into the stats structures blows apart multiple
+ * cache lines and is CPU time consuming.
+ *
+ * NOTE: Since it doesn't really buy us much, but consumes *tons* of RAM
+ * and blows apart all sorts of cache lines, I modified this so that we
+ * no longer look at the LUN. All LUNs now go into the same bin on each
+ * device for stats purposes.
+ */
+ struct aic7xxx_xferstats {
+ long w_total; /* total writes */
+ long r_total; /* total reads */
+#ifdef AIC7XXX_PROC_STATS
+ long w_bins[8]; /* binned write */
+ long r_bins[8]; /* binned reads */
+#endif /* AIC7XXX_PROC_STATS */
+ } stats[MAX_TARGETS]; /* [(channel << 3)|target] */
+
+#if 0
+ struct target_cmd *targetcmds;
+ unsigned int num_targetcmds;
+#endif
+
+};
+
+/*
+ * Valid SCSIRATE values. (p. 3-17)
+ * Provides a mapping of transfer periods in ns/4 to the proper value to
+ * stick in the SCSIRATE reg to use that transfer rate.
+ */
+#define AHC_SYNCRATE_ULTRA2 0
+#define AHC_SYNCRATE_ULTRA 2
+#define AHC_SYNCRATE_FAST 5
+static struct aic7xxx_syncrate {
+ /* Rates in Ultra mode have bit 8 of sxfr set */
+#define ULTRA_SXFR 0x100
+ int sxfr_ultra2;
+ int sxfr;
+ unsigned char period;
+ const char *rate[2];
+} aic7xxx_syncrates[] = {
+ { 0x13, 0x000, 10, {"40.0", "80.0"} },
+ { 0x14, 0x000, 11, {"33.0", "66.6"} },
+ { 0x15, 0x100, 12, {"20.0", "40.0"} },
+ { 0x16, 0x110, 15, {"16.0", "32.0"} },
+ { 0x17, 0x120, 18, {"13.4", "26.8"} },
+ { 0x18, 0x000, 25, {"10.0", "20.0"} },
+ { 0x19, 0x010, 31, {"8.0", "16.0"} },
+ { 0x1a, 0x020, 37, {"6.67", "13.3"} },
+ { 0x1b, 0x030, 43, {"5.7", "11.4"} },
+ { 0x10, 0x040, 50, {"5.0", "10.0"} },
+ { 0x00, 0x050, 56, {"4.4", "8.8" } },
+ { 0x00, 0x060, 62, {"4.0", "8.0" } },
+ { 0x00, 0x070, 68, {"3.6", "7.2" } },
+ { 0x00, 0x000, 0, {NULL, NULL} },
+};
+
+#define CTL_OF_SCB(scb) (((scb->hscb)->target_channel_lun >> 3) & 0x1), \
+ (((scb->hscb)->target_channel_lun >> 4) & 0xf), \
+ ((scb->hscb)->target_channel_lun & 0x07)
+
+#define CTL_OF_CMD(cmd) ((cmd->channel) & 0x01), \
+ ((cmd->target) & 0x0f), \
+ ((cmd->lun) & 0x07)
+
+#define TARGET_INDEX(cmd) ((cmd)->target | ((cmd)->channel << 3))
+
+/*
+ * A nice little define to make doing our printks a little easier
+ */
+
+#define WARN_LEAD KERN_WARNING "(scsi%d:%d:%d:%d) "
+#define INFO_LEAD KERN_INFO "(scsi%d:%d:%d:%d) "
+
+/*
+ * XXX - these options apply unilaterally to _all_ 274x/284x/294x
+ * cards in the system. This should be fixed. Exceptions to this
+ * rule are noted in the comments.
+ */
+
+
+/*
+ * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
+ * has no effect on any later resets that might occur due to things like
+ * SCSI bus timeouts.
+ */
+static unsigned int aic7xxx_no_reset = 0;
+/*
+ * Certain PCI motherboards will scan PCI devices from highest to lowest,
+ * others scan from lowest to highest, and they tend to do all kinds of
+ * strange things when they come into contact with PCI bridge chips. The
+ * net result of all this is that the PCI card that is actually used to boot
+ * the machine is very hard to detect. Most motherboards go from lowest
+ * PCI slot number to highest, and the first SCSI controller found is the
+ * one you boot from. The only exceptions to this are when a controller
+ * has its BIOS disabled. So, we by default sort all of our SCSI controllers
+ * from lowest PCI slot number to highest PCI slot number. We also force
+ * all controllers with their BIOS disabled to the end of the list. This
+ * works on *almost* all computers. Where it doesn't work, we have this
+ * option. Setting this option to non-0 will reverse the order of the sort
+ * to highest first, then lowest, but will still leave cards with their BIOS
+ * disabled at the very end. That should fix everyone up unless there are
+ * really strange cirumstances.
+ */
+static int aic7xxx_reverse_scan = 0;
+/*
+ * Should we force EXTENDED translation on a controller.
+ * 0 == Use whatever is in the SEEPROM or default to off
+ * 1 == Use whatever is in the SEEPROM or default to on
+ */
+static unsigned int aic7xxx_extended = 0;
+/*
+ * The IRQ trigger method used on EISA controllers. Does not effect PCI cards.
+ * -1 = Use detected settings.
+ * 0 = Force Edge triggered mode.
+ * 1 = Force Level triggered mode.
+ */
+static int aic7xxx_irq_trigger = -1;
+/*
+ * This variable is used to override the termination settings on a controller.
+ * This should not be used under normal conditions. However, in the case
+ * that a controller does not have a readable SEEPROM (so that we can't
+ * read the SEEPROM settings directly) and that a controller has a buggered
+ * version of the cable detection logic, this can be used to force the
+ * correct termination. It is preferable to use the manual termination
+ * settings in the BIOS if possible, but some motherboard controllers store
+ * those settings in a format we can't read. In other cases, auto term
+ * should also work, but the chipset was put together with no auto term
+ * logic (common on motherboard controllers). In those cases, we have
+ * 32 bits here to work with. That's good for 8 controllers/channels. The
+ * bits are organized as 4 bits per channel, with scsi0 getting the lowest
+ * 4 bits in the int. A 1 in a bit position indicates the termination setting
+ * that corresponds to that bit should be enabled, a 0 is disabled.
+ * It looks something like this:
+ *
+ * 0x0f = 1111-Single Ended Low Byte Termination on/off
+ * ||\-Single Ended High Byte Termination on/off
+ * |\-LVD Low Byte Termination on/off
+ * \-LVD High Byte Termination on/off
+ *
+ * For non-Ultra2 controllers, the upper 2 bits are not important. So, to
+ * enable both high byte and low byte termination on scsi0, I would need to
+ * make sure that the override_term variable was set to 0x03 (bits 0011).
+ * To make sure that all termination is enabled on an Ultra2 controller at
+ * scsi2 and only high byte termination on scsi1 and high and low byte
+ * termination on scsi0, I would set override_term=0xf23 (bits 1111 0010 0011)
+ *
+ * For the most part, users should never have to use this, that's why I
+ * left it fairly cryptic instead of easy to understand. If you need it,
+ * most likely someone will be telling you what your's needs to be set to.
+ */
+static int aic7xxx_override_term = -1;
+/*
+ * Certain motherboard chipset controllers tend to screw
+ * up the polarity of the term enable output pin. Use this variable
+ * to force the correct polarity for your system. This is a bitfield variable
+ * similar to the previous one, but this one has one bit per channel instead
+ * of four.
+ * 0 = Force the setting to active low.
+ * 1 = Force setting to active high.
+ * Most Adaptec cards are active high, several motherboards are active low.
+ * To force a 2940 card at SCSI 0 to active high and a motherboard 7895
+ * controller at scsi1 and scsi2 to active low, and a 2910 card at scsi3
+ * to active high, you would need to set stpwlev=0x9 (bits 1001).
+ *
+ * People shouldn't need to use this, but if you are experiencing lots of
+ * SCSI timeout problems, this may help. There is one sure way to test what
+ * this option needs to be. Using a boot floppy to boot the system, configure
+ * your system to enable all SCSI termination (in the Adaptec SCSI BIOS) and
+ * if needed then also pass a value to override_term to make sure that the
+ * driver is enabling SCSI termination, then set this variable to either 0
+ * or 1. When the driver boots, make sure there are *NO* SCSI cables
+ * connected to your controller. If it finds and inits the controller
+ * without problem, then the setting you passed to stpwlev was correct. If
+ * the driver goes into a reset loop and hangs the system, then you need the
+ * other setting for this variable. If neither setting lets the machine
+ * boot then you have definite termination problems that may not be fixable.
+ */
+static int aic7xxx_stpwlev = -1;
+/*
+ * Set this to non-0 in order to force the driver to panic the kernel
+ * and print out debugging info on a SCSI abort or reset cycle.
+ */
+static int aic7xxx_panic_on_abort = 0;
+/*
+ * PCI bus parity checking of the Adaptec controllers. This is somewhat
+ * dubious at best. To my knowledge, this option has never actually
+ * solved a PCI parity problem, but on certain machines with broken PCI
+ * chipset configurations, it can generate tons of false error messages.
+ * It's included in the driver for completeness.
+ * 0 = Shut off PCI parity check
+ * -1 = Normal polarity pci parity checking
+ * 1 = reverse polarity pci parity checking
+ *
+ * NOTE: you can't actually pass -1 on the lilo prompt. So, to set this
+ * variable to -1 you would actually want to simply pass the variable
+ * name without a number. That will invert the 0 which will result in
+ * -1.
+ */
+static int aic7xxx_pci_parity = 0;
+/*
+ * Set this to any non-0 value to cause us to dump the contents of all
+ * the card's registers in a hex dump format tailored to each model of
+ * controller.
+ *
+ * NOTE: THE CONTROLLER IS LEFT IN AN UNUSEABLE STATE BY THIS OPTION.
+ * YOU CANNOT BOOT UP WITH THIS OPTION, IT IS FOR DEBUGGING PURPOSES
+ * ONLY
+ */
+static int aic7xxx_dump_card = 0;
+/*
+ * Set this to a non-0 value to make us dump out the 32 bit instruction
+ * registers on the card after completing the sequencer download. This
+ * allows the actual sequencer download to be verified. It is possible
+ * to use this option and still boot up and run your system. This is
+ * only intended for debugging purposes.
+ */
+static int aic7xxx_dump_sequencer = 0;
+/*
+ * Certain newer motherboards have put new PCI based devices into the
+ * IO spaces that used to typically be occupied by VLB or EISA cards.
+ * This overlap can cause these newer motherboards to lock up when scanned
+ * for older EISA and VLB devices. Setting this option to non-0 will
+ * cause the driver to skip scanning for any VLB or EISA controllers and
+ * only support the PCI controllers. NOTE: this means that if the kernel
+ * os compiled with PCI support disabled, then setting this to non-0
+ * would result in never finding any devices :)
+ */
+static int aic7xxx_no_probe = 0;
+
+/*
+ * So that insmod can find the variable and make it point to something
+ */
+#ifdef MODULE
+static char * aic7xxx = NULL;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,18)
+MODULE_PARM(aic7xxx, "s");
+#endif
+
+/*
+ * Just in case someone uses commas to separate items on the insmod
+ * command line, we define a dummy buffer here to avoid having insmod
+ * write wild stuff into our code segment
+ */
+static char dummy_buffer[60] = "Please don't trounce on me insmod!!\n";
+
+#endif
+
+#define VERBOSE_NORMAL 0x0000
+#define VERBOSE_NEGOTIATION 0x0001
+#define VERBOSE_SEQINT 0x0002
+#define VERBOSE_SCSIINT 0x0004
+#define VERBOSE_PROBE 0x0008
+#define VERBOSE_PROBE2 0x0010
+#define VERBOSE_NEGOTIATION2 0x0020
+#define VERBOSE_MINOR_ERROR 0x0040
+#define VERBOSE_TRACING 0x0080
+#define VERBOSE_ABORT 0x0f00
+#define VERBOSE_ABORT_MID 0x0100
+#define VERBOSE_ABORT_FIND 0x0200
+#define VERBOSE_ABORT_PROCESS 0x0400
+#define VERBOSE_ABORT_RETURN 0x0800
+#define VERBOSE_RESET 0xf000
+#define VERBOSE_RESET_MID 0x1000
+#define VERBOSE_RESET_FIND 0x2000
+#define VERBOSE_RESET_PROCESS 0x4000
+#define VERBOSE_RESET_RETURN 0x8000
+static int aic7xxx_verbose = VERBOSE_NORMAL | VERBOSE_NEGOTIATION |
+ VERBOSE_PROBE; /* verbose messages */
+
+
+/****************************************************************************
+ *
+ * We're going to start putting in function declarations so that order of
+ * functions is no longer important. As needed, they are added here.
+ *
+ ***************************************************************************/
+
+static void aic7xxx_panic_abort(struct aic7xxx_host *p, Scsi_Cmnd *cmd);
+static void aic7xxx_print_card(struct aic7xxx_host *p);
+static void aic7xxx_print_scratch_ram(struct aic7xxx_host *p);
+static void aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded);
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+static void aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer);
+#endif
+
+/****************************************************************************
+ *
+ * These functions are now used. They happen to be wrapped in useless
+ * inb/outb port read/writes around the real reads and writes because it
+ * seems that certain very fast CPUs have a problem dealing with us when
+ * going at full speed.
+ *
+ ***************************************************************************/
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0)
+static inline void
+mdelay(int milliseconds)
+{
+ int i;
+
+ for(i=0; i<milliseconds; i++)
+ udelay(1000);
+}
+
+static inline int
+time_after_eq(unsigned long a, unsigned long b)
+{
+ return((long)((a) - (b)) >= 0L);
+}
+
+static inline int
+timer_pending(struct timer_list *timer)
+{
+ return( timer->prev != NULL );
+}
+
+#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
+
+#endif
+
+static inline unsigned char
+aic_inb(struct aic7xxx_host *p, long port)
+{
+#ifdef MMAPIO
+ unsigned char x;
+ if(p->maddr)
+ {
+ x = p->maddr[port];
+ }
+ else
+ {
+ x = inb(p->base + port);
+ }
+ mb();
+ return(x);
+#else
+ return(inb(p->base + port));
+#endif
+}
+
+static inline void
+aic_outb(struct aic7xxx_host *p, unsigned char val, long port)
+{
+#ifdef MMAPIO
+ if(p->maddr)
+ {
+ p->maddr[port] = val;
+ }
+ else
+ {
+ outb(val, p->base + port);
+ }
+ mb();
+#else
+ outb(val, p->base + port);
+#endif
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_setup
+ *
+ * Description:
+ * Handle Linux boot parameters. This routine allows for assigning a value
+ * to a parameter with a ':' between the parameter and the value.
+ * ie. aic7xxx=unpause:0x0A,extended
+ *-F*************************************************************************/
+void
+aic7xxx_setup(char *s, int *dummy)
+{
+ int i, n;
+ char *p;
+ char *end;
+
+ static struct {
+ const char *name;
+ unsigned int *flag;
+ } options[] = {
+ { "extended", &aic7xxx_extended },
+ { "no_reset", &aic7xxx_no_reset },
+ { "irq_trigger", &aic7xxx_irq_trigger },
+ { "verbose", &aic7xxx_verbose },
+ { "reverse_scan",&aic7xxx_reverse_scan },
+ { "override_term", &aic7xxx_override_term },
+ { "stpwlev", &aic7xxx_stpwlev },
+ { "no_probe", &aic7xxx_no_probe },
+ { "panic_on_abort", &aic7xxx_panic_on_abort },
+ { "pci_parity", &aic7xxx_pci_parity },
+ { "dump_card", &aic7xxx_dump_card },
+ { "dump_sequencer", &aic7xxx_dump_sequencer },
+ { "tag_info", NULL }
+ };
+
+ end = strchr(s, '\0');
+
+ for (p = strtok(s, ",."); p; p = strtok(NULL, ",."))
+ {
+ for (i = 0; i < NUMBER(options); i++)
+ {
+ n = strlen(options[i].name);
+ if (!strncmp(options[i].name, p, n))
+ {
+ if (!strncmp(p, "tag_info", n))
+ {
+ if (p[n] == ':')
+ {
+ char *base;
+ char *tok, *tok_end, *tok_end2;
+ char tok_list[] = { '.', ',', '{', '}', '\0' };
+ int i, instance = -1, device = -1;
+ unsigned char done = FALSE;
+
+ base = p;
+ tok = base + n + 1; /* Forward us just past the ':' */
+ tok_end = strchr(tok, '\0');
+ if (tok_end < end)
+ *tok_end = ',';
+ while(!done)
+ {
+ switch(*tok)
+ {
+ case '{':
+ if (instance == -1)
+ instance = 0;
+ else if (device == -1)
+ device = 0;
+ tok++;
+ break;
+ case '}':
+ if (device != -1)
+ device = -1;
+ else if (instance != -1)
+ instance = -1;
+ tok++;
+ break;
+ case ',':
+ case '.':
+ if (instance == -1)
+ done = TRUE;
+ else if (device >= 0)
+ device++;
+ else if (instance >= 0)
+ instance++;
+ if ( (device >= MAX_TARGETS) ||
+ (instance >= NUMBER(aic7xxx_tag_info)) )
+ done = TRUE;
+ tok++;
+ if (!done)
+ {
+ base = tok;
+ }
+ break;
+ case '\0':
+ done = TRUE;
+ break;
+ default:
+ done = TRUE;
+ tok_end = strchr(tok, '\0');
+ for(i=0; tok_list[i]; i++)
+ {
+ tok_end2 = strchr(tok, tok_list[i]);
+ if ( (tok_end2) && (tok_end2 < tok_end) )
+ {
+ tok_end = tok_end2;
+ done = FALSE;
+ }
+ }
+ if ( (instance >= 0) && (device >= 0) &&
+ (instance < NUMBER(aic7xxx_tag_info)) &&
+ (device < MAX_TARGETS) )
+ aic7xxx_tag_info[instance].tag_commands[device] =
+ simple_strtoul(tok, NULL, 0) & 0xff;
+ tok = tok_end;
+ break;
+ }
+ }
+ while((p != base) && (p != NULL))
+ p = strtok(NULL, ",.");
+ }
+ }
+ else if (p[n] == ':')
+ {
+ *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
+ }
+ else if (!strncmp(p, "verbose", n))
+ {
+ *(options[i].flag) = 0xff09;
+ }
+ else
+ {
+ *(options[i].flag) = ~(*(options[i].flag));
+ }
+ }
+ }
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * pause_sequencer
+ *
+ * Description:
+ * Pause the sequencer and wait for it to actually stop - this
+ * is important since the sequencer can disable pausing for critical
+ * sections.
+ *-F*************************************************************************/
+static inline void
+pause_sequencer(struct aic7xxx_host *p)
+{
+ aic_outb(p, p->pause, HCNTRL);
+ while ((aic_inb(p, HCNTRL) & PAUSE) == 0)
+ {
+ ;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * unpause_sequencer
+ *
+ * Description:
+ * Unpause the sequencer. Unremarkable, yet done often enough to
+ * warrant an easy way to do it.
+ *-F*************************************************************************/
+static inline void
+unpause_sequencer(struct aic7xxx_host *p, int unpause_always)
+{
+ if (unpause_always ||
+ ( !(aic_inb(p, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) &&
+ !(p->flags & AHC_HANDLING_REQINITS) ) )
+ {
+ aic_outb(p, p->unpause, HCNTRL);
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * restart_sequencer
+ *
+ * Description:
+ * Restart the sequencer program from address zero. This assumes
+ * that the sequencer is already paused.
+ *-F*************************************************************************/
+static inline void
+restart_sequencer(struct aic7xxx_host *p)
+{
+ aic_outb(p, 0, SEQADDR0);
+ aic_outb(p, 0, SEQADDR1);
+ aic_outb(p, FASTMODE, SEQCTL);
+}
+
+/*
+ * We include the aic7xxx_seq.c file here so that the other defines have
+ * already been made, and so that it comes before the code that actually
+ * downloads the instructions (since we don't typically use function
+ * prototype, our code has to be ordered that way, it's a left-over from
+ * the original driver days.....I should fix it some time DL).
+ */
+#include "aic7xxx_seq.c"
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_check_patch
+ *
+ * Description:
+ * See if the next patch to download should be downloaded.
+ *-F*************************************************************************/
+static int
+aic7xxx_check_patch(struct aic7xxx_host *p,
+ struct sequencer_patch **start_patch, int start_instr, int *skip_addr)
+{
+ struct sequencer_patch *cur_patch;
+ struct sequencer_patch *last_patch;
+ int num_patches;
+
+ num_patches = sizeof(sequencer_patches)/sizeof(struct sequencer_patch);
+ last_patch = &sequencer_patches[num_patches];
+ cur_patch = *start_patch;
+
+ while ((cur_patch < last_patch) && (start_instr == cur_patch->begin))
+ {
+ if (cur_patch->patch_func(p) == 0)
+ {
+ /*
+ * Start rejecting code.
+ */
+ *skip_addr = start_instr + cur_patch->skip_instr;
+ cur_patch += cur_patch->skip_patch;
+ }
+ else
+ {
+ /*
+ * Found an OK patch. Advance the patch pointer to the next patch
+ * and wait for our instruction pointer to get here.
+ */
+ cur_patch++;
+ }
+ }
+
+ *start_patch = cur_patch;
+ if (start_instr < *skip_addr)
+ /*
+ * Still skipping
+ */
+ return (0);
+ return(1);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_download_instr
+ *
+ * Description:
+ * Find the next patch to download.
+ *-F*************************************************************************/
+static void
+aic7xxx_download_instr(struct aic7xxx_host *p, int instrptr,
+ unsigned char *dconsts)
+{
+ union ins_formats instr;
+ struct ins_format1 *fmt1_ins;
+ struct ins_format3 *fmt3_ins;
+ unsigned char opcode;
+
+ instr = *(union ins_formats*) &seqprog[instrptr * 4];
+
+ instr.integer = le32_to_cpu(instr.integer);
+
+ fmt1_ins = &instr.format1;
+ fmt3_ins = NULL;
+
+ /* Pull the opcode */
+ opcode = instr.format1.opcode;
+ switch (opcode)
+ {
+ case AIC_OP_JMP:
+ case AIC_OP_JC:
+ case AIC_OP_JNC:
+ case AIC_OP_CALL:
+ case AIC_OP_JNE:
+ case AIC_OP_JNZ:
+ case AIC_OP_JE:
+ case AIC_OP_JZ:
+ {
+ struct sequencer_patch *cur_patch;
+ int address_offset;
+ unsigned int address;
+ int skip_addr;
+ int i;
+
+ fmt3_ins = &instr.format3;
+ address_offset = 0;
+ address = fmt3_ins->address;
+ cur_patch = sequencer_patches;
+ skip_addr = 0;
+
+ for (i = 0; i < address;)
+ {
+ aic7xxx_check_patch(p, &cur_patch, i, &skip_addr);
+ if (skip_addr > i)
+ {
+ int end_addr;
+
+ end_addr = MIN(address, skip_addr);
+ address_offset += end_addr - i;
+ i = skip_addr;
+ }
+ else
+ {
+ i++;
+ }
+ }
+ address -= address_offset;
+ fmt3_ins->address = address;
+ /* Fall Through to the next code section */
+ }
+ case AIC_OP_OR:
+ case AIC_OP_AND:
+ case AIC_OP_XOR:
+ case AIC_OP_ADD:
+ case AIC_OP_ADC:
+ case AIC_OP_BMOV:
+ if (fmt1_ins->parity != 0)
+ {
+ fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
+ }
+ fmt1_ins->parity = 0;
+ /* Fall Through to the next code section */
+ case AIC_OP_ROL:
+ if ((p->features & AHC_ULTRA2) != 0)
+ {
+ int i, count;
+
+ /* Calculate odd parity for the instruction */
+ for ( i=0, count=0; i < 31; i++)
+ {
+ unsigned int mask;
+
+ mask = 0x01 << i;
+ if ((instr.integer & mask) != 0)
+ count++;
+ }
+ if (!(count & 0x01))
+ instr.format1.parity = 1;
+ }
+ else
+ {
+ if (fmt3_ins != NULL)
+ {
+ instr.integer = fmt3_ins->immediate |
+ (fmt3_ins->source << 8) |
+ (fmt3_ins->address << 16) |
+ (fmt3_ins->opcode << 25);
+ }
+ else
+ {
+ instr.integer = fmt1_ins->immediate |
+ (fmt1_ins->source << 8) |
+ (fmt1_ins->destination << 16) |
+ (fmt1_ins->ret << 24) |
+ (fmt1_ins->opcode << 25);
+ }
+ }
+ aic_outb(p, (instr.integer & 0xff), SEQRAM);
+ aic_outb(p, ((instr.integer >> 8) & 0xff), SEQRAM);
+ aic_outb(p, ((instr.integer >> 16) & 0xff), SEQRAM);
+ aic_outb(p, ((instr.integer >> 24) & 0xff), SEQRAM);
+ break;
+
+ default:
+ panic("aic7xxx: Unknown opcode encountered in sequencer program.");
+ break;
+ }
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_loadseq
+ *
+ * Description:
+ * Load the sequencer code into the controller memory.
+ *-F*************************************************************************/
+static void
+aic7xxx_loadseq(struct aic7xxx_host *p)
+{
+ struct sequencer_patch *cur_patch;
+ int i;
+ int downloaded;
+ int skip_addr;
+ unsigned char download_consts[4] = {0, 0, 0, 0};
+
+ if (aic7xxx_verbose & VERBOSE_PROBE)
+ {
+ printk(KERN_INFO "(scsi%d) Downloading sequencer code...", p->host_no);
+ }
+#if 0
+ download_consts[TMODE_NUMCMDS] = p->num_targetcmds;
+#endif
+ download_consts[TMODE_NUMCMDS] = 0;
+ cur_patch = &sequencer_patches[0];
+ downloaded = 0;
+ skip_addr = 0;
+
+ aic_outb(p, PERRORDIS|LOADRAM|FAILDIS|FASTMODE, SEQCTL);
+ aic_outb(p, 0, SEQADDR0);
+ aic_outb(p, 0, SEQADDR1);
+
+ for (i = 0; i < sizeof(seqprog) / 4; i++)
+ {
+ if (aic7xxx_check_patch(p, &cur_patch, i, &skip_addr) == 0)
+ {
+ /* Skip this instruction for this configuration. */
+ continue;
+ }
+ aic7xxx_download_instr(p, i, &download_consts[0]);
+ downloaded++;
+ }
+
+ aic_outb(p, 0, SEQADDR0);
+ aic_outb(p, 0, SEQADDR1);
+ aic_outb(p, FASTMODE | FAILDIS, SEQCTL);
+ unpause_sequencer(p, TRUE);
+ mdelay(1);
+ pause_sequencer(p);
+ aic_outb(p, FASTMODE, SEQCTL);
+ if (aic7xxx_verbose & VERBOSE_PROBE)
+ {
+ printk(" %d instructions downloaded\n", downloaded);
+ }
+ if (aic7xxx_dump_sequencer)
+ aic7xxx_print_sequencer(p, downloaded);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_print_sequencer
+ *
+ * Description:
+ * Print the contents of the sequencer memory to the screen.
+ *-F*************************************************************************/
+static void
+aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded)
+{
+ int i, k, temp;
+
+ aic_outb(p, PERRORDIS|LOADRAM|FAILDIS|FASTMODE, SEQCTL);
+ aic_outb(p, 0, SEQADDR0);
+ aic_outb(p, 0, SEQADDR1);
+
+ k = 0;
+ for (i=0; i < downloaded; i++)
+ {
+ if ( k == 0 )
+ printk("%03x: ", i);
+ temp = aic_inb(p, SEQRAM);
+ temp |= (aic_inb(p, SEQRAM) << 8);
+ temp |= (aic_inb(p, SEQRAM) << 16);
+ temp |= (aic_inb(p, SEQRAM) << 24);
+ printk("%08x", temp);
+ if ( ++k == 8 )
+ {
+ printk("\n");
+ k = 0;
+ }
+ else
+ printk(" ");
+ }
+ aic_outb(p, 0, SEQADDR0);
+ aic_outb(p, 0, SEQADDR1);
+ aic_outb(p, FASTMODE | FAILDIS, SEQCTL);
+ unpause_sequencer(p, TRUE);
+ mdelay(1);
+ pause_sequencer(p);
+ aic_outb(p, FASTMODE, SEQCTL);
+ printk("\n");
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_delay
+ *
+ * Description:
+ * Delay for specified amount of time. We use mdelay because the timer
+ * interrupt is not guaranteed to be enabled. This will cause an
+ * infinite loop since jiffies (clock ticks) is not updated.
+ *-F*************************************************************************/
+static void
+aic7xxx_delay(int seconds)
+{
+ mdelay(seconds * 1000);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_info
+ *
+ * Description:
+ * Return a string describing the driver.
+ *-F*************************************************************************/
+const char *
+aic7xxx_info(struct Scsi_Host *dooh)
+{
+ static char buffer[256];
+ char *bp;
+ struct aic7xxx_host *p;
+
+ bp = &buffer[0];
+ p = (struct aic7xxx_host *)dooh->hostdata;
+ memset(bp, 0, sizeof(buffer));
+ strcpy(bp, "Adaptec AHA274x/284x/294x (EISA/VLB/PCI-Fast SCSI) ");
+ strcat(bp, AIC7XXX_C_VERSION);
+ strcat(bp, "/");
+ strcat(bp, AIC7XXX_H_VERSION);
+ strcat(bp, "\n");
+ strcat(bp, " <");
+ strcat(bp, board_names[p->board_name_index]);
+ strcat(bp, ">");
+
+ return(bp);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_find_syncrate
+ *
+ * Description:
+ * Look up the valid period to SCSIRATE conversion in our table
+ *-F*************************************************************************/
+static struct aic7xxx_syncrate *
+aic7xxx_find_syncrate(struct aic7xxx_host *p, unsigned int *period,
+ unsigned int maxsync)
+{
+ struct aic7xxx_syncrate *syncrate;
+
+ syncrate = &aic7xxx_syncrates[maxsync];
+ while ( (syncrate->rate[0] != NULL) &&
+ (!(p->features & AHC_ULTRA2) || syncrate->sxfr_ultra2) )
+ {
+ if ( *period <= syncrate->period )
+ {
+ /*
+ * When responding to a target that requests sync, the requested rate
+ * may fall between two rates that we can output, but still be a rate
+ * that we can receive. Because of this, we want to respond with the
+ * same rate that it sent to us even if the persiod we use to send
+ * data to it is lower. Only lower the response period if we must.
+ */
+ if(syncrate == &aic7xxx_syncrates[maxsync])
+ {
+ *period = syncrate->period;
+ }
+ break;
+ }
+ syncrate++;
+ }
+ if ( (*period == 0) || (syncrate->rate[0] == NULL) ||
+ ((p->features & AHC_ULTRA2) && (syncrate->sxfr_ultra2 == 0)) )
+ {
+ /*
+ * Use async transfers for this target
+ */
+ *period = 0;
+ syncrate = NULL;
+ }
+ return (syncrate);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_find_period
+ *
+ * Description:
+ * Look up the valid SCSIRATE to period conversion in our table
+ *-F*************************************************************************/
+static unsigned int
+aic7xxx_find_period(struct aic7xxx_host *p, unsigned int scsirate,
+ unsigned int maxsync)
+{
+ struct aic7xxx_syncrate *syncrate;
+
+ if ((p->features & AHC_ULTRA2) != 0)
+ {
+ scsirate &= SXFR_ULTRA2;
+ }
+ else
+ {
+ scsirate &= SXFR;
+ }
+
+ syncrate = &aic7xxx_syncrates[maxsync];
+ while (syncrate->rate[0] != NULL)
+ {
+ if ((p->features & AHC_ULTRA2) != 0)
+ {
+ if (syncrate->sxfr_ultra2 == 0)
+ break;
+ else if (scsirate == syncrate->sxfr_ultra2)
+ return (syncrate->period);
+ }
+ else if (scsirate == (syncrate->sxfr & ~ULTRA_SXFR))
+ {
+ return (syncrate->period);
+ }
+ syncrate++;
+ }
+ return (0); /* async */
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_validate_offset
+ *
+ * Description:
+ * Set a valid offset value for a particular card in use and transfer
+ * settings in use.
+ *-F*************************************************************************/
+static void
+aic7xxx_validate_offset(struct aic7xxx_host *p,
+ struct aic7xxx_syncrate *syncrate, unsigned int *offset, int wide)
+{
+ unsigned int maxoffset;
+
+ /* Limit offset to what the card (and device) can do */
+ if (syncrate == NULL)
+ {
+ maxoffset = 0;
+ }
+ else if (p->features & AHC_ULTRA2)
+ {
+ maxoffset = MAX_OFFSET_ULTRA2;
+ }
+ else
+ {
+ if (wide)
+ maxoffset = MAX_OFFSET_16BIT;
+ else
+ maxoffset = MAX_OFFSET_8BIT;
+ }
+ *offset = MIN(*offset, maxoffset);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_set_syncrate
+ *
+ * Description:
+ * Set the actual syncrate down in the card and in our host structs
+ *-F*************************************************************************/
+static void
+aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate,
+ int target, int channel, unsigned int period, unsigned int offset,
+ unsigned int type)
+{
+ unsigned char tindex;
+ unsigned short target_mask;
+ unsigned char lun;
+ unsigned int old_period, old_offset;
+
+ tindex = target | (channel << 3);
+ target_mask = 0x01 << tindex;
+ lun = aic_inb(p, SCB_TCL) & 0x07;
+
+ if (syncrate == NULL)
+ {
+ period = 0;
+ offset = 0;
+ }
+
+ old_period = p->transinfo[tindex].cur_period;
+ old_offset = p->transinfo[tindex].cur_offset;
+
+
+ if (type & AHC_TRANS_CUR)
+ {
+ unsigned int scsirate;
+
+ scsirate = aic_inb(p, TARG_SCSIRATE + tindex);
+ if (p->features & AHC_ULTRA2)
+ {
+ scsirate &= ~SXFR_ULTRA2;
+ if (syncrate != NULL)
+ {
+ scsirate |= syncrate->sxfr_ultra2;
+ }
+ if (type & AHC_TRANS_ACTIVE)
+ {
+ aic_outb(p, offset, SCSIOFFSET);
+ }
+ aic_outb(p, offset, TARG_OFFSET + tindex);
+ }
+ else /* Not an Ultra2 controller */
+ {
+ scsirate &= ~(SXFR|SOFS);
+ p->ultraenb &= ~target_mask;
+ if (syncrate != NULL)
+ {
+ if (syncrate->sxfr & ULTRA_SXFR)
+ {
+ p->ultraenb |= target_mask;
+ }
+ scsirate |= (syncrate->sxfr & SXFR);
+ scsirate |= (offset & SOFS);
+ }
+ if (type & AHC_TRANS_ACTIVE)
+ {
+ unsigned char sxfrctl0;
+
+ sxfrctl0 = aic_inb(p, SXFRCTL0);
+ sxfrctl0 &= ~FAST20;
+ if (p->ultraenb & target_mask)
+ sxfrctl0 |= FAST20;
+ aic_outb(p, sxfrctl0, SXFRCTL0);
+ }
+ aic_outb(p, p->ultraenb & 0xff, ULTRA_ENB);
+ aic_outb(p, (p->ultraenb >> 8) & 0xff, ULTRA_ENB + 1 );
+ }
+ if (type & AHC_TRANS_ACTIVE)
+ {
+ aic_outb(p, scsirate, SCSIRATE);
+ }
+ aic_outb(p, scsirate, TARG_SCSIRATE + tindex);
+ p->transinfo[tindex].cur_period = period;
+ p->transinfo[tindex].cur_offset = offset;
+ if ( !(type & AHC_TRANS_QUITE) &&
+ (aic7xxx_verbose & VERBOSE_NEGOTIATION) &&
+ (p->dev_flags[tindex] & DEVICE_PRINT_SDTR) )
+ {
+ if (offset)
+ {
+ int rate_mod = (scsirate & WIDEXFER) ? 1 : 0;
+
+ printk(INFO_LEAD "Synchronous at %s Mbyte/sec, "
+ "offset %d.\n", p->host_no, channel, target, lun,
+ syncrate->rate[rate_mod], offset);
+ }
+ else
+ {
+ printk(INFO_LEAD "Using asynchronous transfers.\n",
+ p->host_no, channel, target, lun);
+ }
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_SDTR;
+ }
+ }
+
+ if (type & AHC_TRANS_GOAL)
+ {
+ p->transinfo[tindex].goal_period = period;
+ p->transinfo[tindex].goal_offset = offset;
+ }
+
+ if (type & AHC_TRANS_USER)
+ {
+ p->transinfo[tindex].user_period = period;
+ p->transinfo[tindex].user_offset = offset;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_set_width
+ *
+ * Description:
+ * Set the actual width down in the card and in our host structs
+ *-F*************************************************************************/
+static void
+aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, int lun,
+ unsigned int width, unsigned int type)
+{
+ unsigned char tindex;
+ unsigned short target_mask;
+ unsigned int old_width, new_offset;
+
+ tindex = target | (channel << 3);
+ target_mask = 1 << tindex;
+
+ old_width = p->transinfo[tindex].cur_width;
+
+ if (p->features & AHC_ULTRA2)
+ new_offset = MAX_OFFSET_ULTRA2;
+ else if (width == MSG_EXT_WDTR_BUS_16_BIT)
+ new_offset = MAX_OFFSET_16BIT;
+ else
+ new_offset = MAX_OFFSET_8BIT;
+
+ if (type & AHC_TRANS_CUR)
+ {
+ unsigned char scsirate;
+
+ scsirate = aic_inb(p, TARG_SCSIRATE + tindex);
+
+ scsirate &= ~WIDEXFER;
+ if (width == MSG_EXT_WDTR_BUS_16_BIT)
+ scsirate |= WIDEXFER;
+
+ aic_outb(p, scsirate, TARG_SCSIRATE + tindex);
+
+ if (type & AHC_TRANS_ACTIVE)
+ aic_outb(p, scsirate, SCSIRATE);
+
+ p->transinfo[tindex].cur_width = width;
+
+ if ((aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ (p->dev_flags[tindex] & DEVICE_PRINT_WDTR))
+ {
+ printk(INFO_LEAD "Using %s transfers\n", p->host_no, channel, target,
+ lun, (scsirate & WIDEXFER) ? "Wide(16bit)" : "Narrow(8bit)" );
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_WDTR;
+ }
+ }
+
+ if (type & AHC_TRANS_GOAL)
+ p->transinfo[tindex].goal_width = width;
+ if (type & AHC_TRANS_USER)
+ p->transinfo[tindex].user_width = width;
+
+ /*
+ * Having just set the width, the SDTR should come next, and we need a valid
+ * offset for the SDTR. So, we make sure we put a valid one in here now as
+ * the goal_offset.
+ */
+ if (p->transinfo[tindex].goal_offset)
+ p->transinfo[tindex].goal_offset = new_offset;
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * scbq_init
+ *
+ * Description:
+ * SCB queue initialization.
+ *
+ *-F*************************************************************************/
+static void
+scbq_init(volatile scb_queue_type *queue)
+{
+ queue->head = NULL;
+ queue->tail = NULL;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * scbq_insert_head
+ *
+ * Description:
+ * Add an SCB to the head of the list.
+ *
+ *-F*************************************************************************/
+static inline void
+scbq_insert_head(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags;
+#endif
+
+ DRIVER_LOCK
+ scb->q_next = queue->head;
+ queue->head = scb;
+ if (queue->tail == NULL) /* If list was empty, update tail. */
+ queue->tail = queue->head;
+ DRIVER_UNLOCK
+}
+
+/*+F*************************************************************************
+ * Function:
+ * scbq_remove_head
+ *
+ * Description:
+ * Remove an SCB from the head of the list.
+ *
+ *-F*************************************************************************/
+static inline struct aic7xxx_scb *
+scbq_remove_head(volatile scb_queue_type *queue)
+{
+ struct aic7xxx_scb * scbp;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags;
+#endif
+
+ DRIVER_LOCK
+ scbp = queue->head;
+ if (queue->head != NULL)
+ queue->head = queue->head->q_next;
+ if (queue->head == NULL) /* If list is now empty, update tail. */
+ queue->tail = NULL;
+ DRIVER_UNLOCK
+ return(scbp);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * scbq_remove
+ *
+ * Description:
+ * Removes an SCB from the list.
+ *
+ *-F*************************************************************************/
+static inline void
+scbq_remove(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags;
+#endif
+
+ DRIVER_LOCK
+ if (queue->head == scb)
+ {
+ /* At beginning of queue, remove from head. */
+ scbq_remove_head(queue);
+ }
+ else
+ {
+ struct aic7xxx_scb *curscb = queue->head;
+
+ /*
+ * Search until the next scb is the one we're looking for, or
+ * we run out of queue.
+ */
+ while ((curscb != NULL) && (curscb->q_next != scb))
+ {
+ curscb = curscb->q_next;
+ }
+ if (curscb != NULL)
+ {
+ /* Found it. */
+ curscb->q_next = scb->q_next;
+ if (scb->q_next == NULL)
+ {
+ /* Update the tail when removing the tail. */
+ queue->tail = curscb;
+ }
+ }
+ }
+ DRIVER_UNLOCK
+}
+
+/*+F*************************************************************************
+ * Function:
+ * scbq_insert_tail
+ *
+ * Description:
+ * Add an SCB at the tail of the list.
+ *
+ *-F*************************************************************************/
+static inline void
+scbq_insert_tail(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags;
+#endif
+
+ DRIVER_LOCK
+ scb->q_next = NULL;
+ if (queue->tail != NULL) /* Add the scb at the end of the list. */
+ queue->tail->q_next = scb;
+ queue->tail = scb; /* Update the tail. */
+ if (queue->head == NULL) /* If list was empty, update head. */
+ queue->head = queue->tail;
+ DRIVER_UNLOCK
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_match_scb
+ *
+ * Description:
+ * Checks to see if an scb matches the target/channel as specified.
+ * If target is ALL_TARGETS (-1), then we're looking for any device
+ * on the specified channel; this happens when a channel is going
+ * to be reset and all devices on that channel must be aborted.
+ *-F*************************************************************************/
+static int
+aic7xxx_match_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb,
+ int target, int channel, int lun, unsigned char tag)
+{
+ int targ = (scb->hscb->target_channel_lun >> 4) & 0x0F;
+ int chan = (scb->hscb->target_channel_lun >> 3) & 0x01;
+ int slun = scb->hscb->target_channel_lun & 0x07;
+ int match;
+
+ match = ((chan == channel) || (channel == ALL_CHANNELS));
+ if (match != 0)
+ match = ((targ == target) || (target == ALL_TARGETS));
+ if (match != 0)
+ match = ((lun == slun) || (lun == ALL_LUNS));
+ if (match != 0)
+ match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
+
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ {
+ printk(KERN_INFO "(scsi%d:%d:%d:%d:tag%d) %s search criteria"
+ " (scsi%d:%d:%d:%d:tag%d)\n", p->host_no, CTL_OF_SCB(scb),
+ scb->hscb->tag, (match) ? "matches" : "doesn't match",
+ p->host_no, channel, target, lun, tag);
+ }
+
+ return (match);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_add_curscb_to_free_list
+ *
+ * Description:
+ * Adds the current scb (in SCBPTR) to the list of free SCBs.
+ *-F*************************************************************************/
+static void
+aic7xxx_add_curscb_to_free_list(struct aic7xxx_host *p)
+{
+ /*
+ * Invalidate the tag so that aic7xxx_find_scb doesn't think
+ * it's active
+ */
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic_outb(p, 0, SCB_CONTROL);
+
+ aic_outb(p, aic_inb(p, FREE_SCBH), SCB_NEXT);
+ aic_outb(p, aic_inb(p, SCBPTR), FREE_SCBH);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_rem_scb_from_disc_list
+ *
+ * Description:
+ * Removes the current SCB from the disconnected list and adds it
+ * to the free list.
+ *-F*************************************************************************/
+static unsigned char
+aic7xxx_rem_scb_from_disc_list(struct aic7xxx_host *p, unsigned char scbptr)
+{
+ unsigned char next;
+ unsigned char prev;
+
+ aic_outb(p, scbptr, SCBPTR);
+ next = aic_inb(p, SCB_NEXT);
+ prev = aic_inb(p, SCB_PREV);
+ aic7xxx_add_curscb_to_free_list(p);
+
+ if (prev != SCB_LIST_NULL)
+ {
+ aic_outb(p, prev, SCBPTR);
+ aic_outb(p, next, SCB_NEXT);
+ }
+ else
+ {
+ aic_outb(p, next, DISCONNECTED_SCBH);
+ }
+
+ if (next != SCB_LIST_NULL)
+ {
+ aic_outb(p, next, SCBPTR);
+ aic_outb(p, prev, SCB_PREV);
+ }
+ return next;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_busy_target
+ *
+ * Description:
+ * Set the specified target busy.
+ *-F*************************************************************************/
+static inline void
+aic7xxx_busy_target(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ p->untagged_scbs[scb->hscb->target_channel_lun] = scb->hscb->tag;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_index_busy_target
+ *
+ * Description:
+ * Returns the index of the busy target, and optionally sets the
+ * target inactive.
+ *-F*************************************************************************/
+static inline unsigned char
+aic7xxx_index_busy_target(struct aic7xxx_host *p, unsigned char tcl,
+ int unbusy)
+{
+ unsigned char busy_scbid;
+
+ busy_scbid = p->untagged_scbs[tcl];
+ if (unbusy)
+ {
+ p->untagged_scbs[tcl] = SCB_LIST_NULL;
+ }
+ return (busy_scbid);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_find_scb
+ *
+ * Description:
+ * Look through the SCB array of the card and attempt to find the
+ * hardware SCB that corresponds to the passed in SCB. Return
+ * SCB_LIST_NULL if unsuccessful. This routine assumes that the
+ * card is already paused.
+ *-F*************************************************************************/
+static unsigned char
+aic7xxx_find_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ unsigned char saved_scbptr;
+ unsigned char curindex;
+
+ saved_scbptr = aic_inb(p, SCBPTR);
+ curindex = 0;
+ for (curindex = 0; curindex < p->scb_data->maxhscbs; curindex++)
+ {
+ aic_outb(p, curindex, SCBPTR);
+ if (aic_inb(p, SCB_TAG) == scb->hscb->tag)
+ {
+ break;
+ }
+ }
+ aic_outb(p, saved_scbptr, SCBPTR);
+ if (curindex >= p->scb_data->maxhscbs)
+ {
+ curindex = SCB_LIST_NULL;
+ }
+
+ return (curindex);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_allocate_scb
+ *
+ * Description:
+ * Get an SCB from the free list or by allocating a new one.
+ *-F*************************************************************************/
+static int
+aic7xxx_allocate_scb(struct aic7xxx_host *p)
+{
+ struct aic7xxx_scb *scbp = NULL;
+ int scb_size = sizeof(struct aic7xxx_scb) +
+ sizeof (struct hw_scatterlist) * AIC7XXX_MAX_SG;
+ int i;
+ int step = PAGE_SIZE / 1024;
+ unsigned long scb_count = 0;
+ struct hw_scatterlist *hsgp;
+ struct aic7xxx_scb *scb_ap;
+ unsigned long temp;
+
+
+ if (p->scb_data->numscbs < p->scb_data->maxscbs)
+ {
+ /*
+ * Calculate the optimal number of SCBs to allocate.
+ *
+ * NOTE: This formula works because the sizeof(sg_array) is always
+ * 1024. Therefore, scb_size * i would always be > PAGE_SIZE *
+ * (i/step). The (i-1) allows the left hand side of the equation
+ * to grow into the right hand side to a point of near perfect
+ * efficiency since scb_size * (i -1) is growing slightly faster
+ * than the right hand side. If the number of SG array elements
+ * is changed, this function may not be near so efficient any more.
+ */
+ for ( i=step;; i *= 2 )
+ {
+ if ( (scb_size * (i-1)) >= ( (PAGE_SIZE * (i/step)) - 64 ) )
+ {
+ i /= 2;
+ break;
+ }
+ }
+ scb_count = MIN( (i-1), p->scb_data->maxscbs - p->scb_data->numscbs);
+ scb_ap = (struct aic7xxx_scb *)kmalloc(scb_size * scb_count, GFP_ATOMIC);
+ if (scb_ap != NULL)
+ {
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ {
+ if (p->scb_data->numscbs == 0)
+ printk(INFO_LEAD "Allocating initial %ld SCB structures.\n",
+ p->host_no, -1, -1, -1, scb_count);
+ else
+ printk(INFO_LEAD "Allocating %ld additional SCB structures.\n",
+ p->host_no, -1, -1, -1, scb_count);
+ }
+#endif
+ memset(scb_ap, 0, scb_count * scb_size);
+ temp = (unsigned long) &scb_ap[scb_count];
+ temp += 1023;
+ temp &= ~1023;
+ hsgp = (struct hw_scatterlist *)temp;
+ for (i=0; i < scb_count; i++)
+ {
+ scbp = &scb_ap[i];
+ scbp->hscb = &p->scb_data->hscbs[p->scb_data->numscbs];
+ scbp->sg_list = &hsgp[i * AIC7XXX_MAX_SG];
+ memset(scbp->hscb, 0, sizeof(struct aic7xxx_hwscb));
+ scbp->hscb->tag = p->scb_data->numscbs;
+ /*
+ * Place in the scb array; never is removed
+ */
+ p->scb_data->scb_array[p->scb_data->numscbs++] = scbp;
+ scbq_insert_head(&p->scb_data->free_scbs, scbp);
+ }
+ scbp->kmalloc_ptr = scb_ap;
+ }
+ else
+ {
+ return(0);
+ }
+ }
+ return(scb_count);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_queue_cmd_complete
+ *
+ * Description:
+ * Due to race conditions present in the SCSI subsystem, it is easier
+ * to queue completed commands, then call scsi_done() on them when
+ * we're finished. This function queues the completed commands.
+ *-F*************************************************************************/
+static void
+aic7xxx_queue_cmd_complete(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
+{
+ cmd->host_scribble = (char *)p->completeq.head;
+ p->completeq.head = cmd;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_done_cmds_complete
+ *
+ * Description:
+ * Process the completed command queue.
+ *-F*************************************************************************/
+static void
+aic7xxx_done_cmds_complete(struct aic7xxx_host *p)
+{
+ Scsi_Cmnd *cmd;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags = 0;
+#endif
+
+ DRIVER_LOCK
+ while (p->completeq.head != NULL)
+ {
+ cmd = p->completeq.head;
+ p->completeq.head = (Scsi_Cmnd *)cmd->host_scribble;
+ cmd->host_scribble = NULL;
+ cmd->scsi_done(cmd);
+ }
+ DRIVER_UNLOCK
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_free_scb
+ *
+ * Description:
+ * Free the scb and insert into the free scb list.
+ *-F*************************************************************************/
+static void
+aic7xxx_free_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+
+ scb->flags = SCB_FREE;
+ scb->cmd = NULL;
+ scb->sg_count = 0;
+ scb->sg_length = 0;
+ scb->tag_action = 0;
+ scb->hscb->control = 0;
+ scb->hscb->target_status = 0;
+ scb->hscb->target_channel_lun = SCB_LIST_NULL;
+
+ scbq_insert_head(&p->scb_data->free_scbs, scb);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_done
+ *
+ * Description:
+ * Calls the higher level scsi done function and frees the scb.
+ *-F*************************************************************************/
+static void
+aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ Scsi_Cmnd *cmd = scb->cmd;
+ int tindex = TARGET_INDEX(cmd);
+ struct aic7xxx_scb *scbp;
+ unsigned char queue_depth;
+
+ if (scb->flags & SCB_RECOVERY_SCB)
+ {
+ p->flags &= ~AHC_ABORT_PENDING;
+ }
+ if (scb->flags & SCB_RESET)
+ {
+ cmd->result = (DID_RESET << 16) | (cmd->result & 0xffff);
+ }
+ else if (scb->flags & SCB_ABORT)
+ {
+ cmd->result = (DID_RESET << 16) | (cmd->result & 0xffff);
+ }
+ else if (!(p->dev_flags[tindex] & DEVICE_SCANNED))
+ {
+ if ( (cmd->cmnd[0] == INQUIRY) && (cmd->result == DID_OK) )
+ {
+ char *buffer;
+
+ p->dev_flags[tindex] |= DEVICE_PRESENT;
+ if(cmd->use_sg)
+ {
+ struct scatterlist *sg;
+
+ sg = (struct scatterlist *)cmd->request_buffer;
+ buffer = (char *)sg[0].address;
+ }
+ else
+ {
+ buffer = (char *)cmd->request_buffer;
+ }
+#define WIDE_INQUIRY_BITS 0x60
+#define SYNC_INQUIRY_BITS 0x10
+ if ( (buffer[7] & WIDE_INQUIRY_BITS) &&
+ (p->features & AHC_WIDE) )
+ {
+ p->needwdtr |= (1<<tindex);
+ p->needwdtr_copy |= (1<<tindex);
+ if ( (p->flags & AHC_SEEPROM_FOUND) &&
+ (p->transinfo[tindex].user_width != MSG_EXT_WDTR_BUS_16_BIT) )
+ p->transinfo[tindex].goal_width = MSG_EXT_WDTR_BUS_8_BIT;
+ else
+ p->transinfo[tindex].goal_width = MSG_EXT_WDTR_BUS_16_BIT;
+ }
+ else
+ {
+ p->needwdtr &= ~(1<<tindex);
+ p->needwdtr_copy &= ~(1<<tindex);
+ pause_sequencer(p);
+ aic7xxx_set_width(p, cmd->target, cmd->channel, cmd->lun,
+ MSG_EXT_WDTR_BUS_8_BIT, (AHC_TRANS_ACTIVE |
+ AHC_TRANS_GOAL |
+ AHC_TRANS_CUR) );
+ unpause_sequencer(p, FALSE);
+ }
+ if (buffer[7] & SYNC_INQUIRY_BITS)
+ {
+ p->needsdtr |= (1<<tindex);
+ p->needsdtr_copy |= (1<<tindex);
+
+ if (p->flags & AHC_SEEPROM_FOUND)
+ {
+ p->transinfo[tindex].goal_period = p->transinfo[tindex].user_period;
+ p->transinfo[tindex].goal_offset = p->transinfo[tindex].user_offset;
+ }
+ else
+ {
+ if (p->features & AHC_ULTRA2)
+ {
+ p->transinfo[tindex].goal_period =
+ aic7xxx_syncrates[AHC_SYNCRATE_ULTRA2].period;
+ }
+ else if (p->features & AHC_ULTRA)
+ {
+ p->transinfo[tindex].goal_period =
+ aic7xxx_syncrates[AHC_SYNCRATE_ULTRA].period;
+ }
+ else
+ {
+ p->transinfo[tindex].goal_period =
+ aic7xxx_syncrates[AHC_SYNCRATE_FAST].period;
+ }
+ if (p->features & AHC_ULTRA2)
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ else if (p->transinfo[tindex].goal_width == MSG_EXT_WDTR_BUS_16_BIT)
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ else
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ }
+ }
+ else
+ {
+ p->needsdtr &= ~(1<<tindex);
+ p->needsdtr_copy &= ~(1<<tindex);
+ p->transinfo[tindex].goal_period = 0;
+ p->transinfo[tindex].goal_offset = 0;
+ }
+ p->dev_flags[tindex] |= DEVICE_SCANNED;
+ p->dev_flags[tindex] |= DEVICE_PRINT_WDTR | DEVICE_PRINT_SDTR;
+#undef WIDE_INQUIRY_BITS
+#undef SYNC_INQUIRY_BITS
+ }
+ }
+ else if ((scb->flags & (SCB_MSGOUT_WDTR | SCB_MSGOUT_SDTR)) != 0)
+ {
+ unsigned short mask;
+ int message_error = FALSE;
+
+ mask = 0x01 << tindex;
+
+ /*
+ * Check to see if we get an invalid message or a message error
+ * after failing to negotiate a wide or sync transfer message.
+ */
+ if ((scb->flags & SCB_SENSE) &&
+ ((scb->cmd->sense_buffer[12] == 0x43) || /* INVALID_MESSAGE */
+ (scb->cmd->sense_buffer[12] == 0x49))) /* MESSAGE_ERROR */
+ {
+ message_error = TRUE;
+ }
+
+ if (scb->flags & SCB_MSGOUT_WDTR)
+ {
+ p->wdtr_pending &= ~mask;
+ if (message_error)
+ {
+ if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ (p->dev_flags[tindex] & DEVICE_PRINT_WDTR) )
+ {
+ printk(INFO_LEAD "Device failed to complete Wide Negotiation "
+ "processing and\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "returned a sense error code for invalid message, "
+ "disabling future\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "Wide negotiation to this device.\n", p->host_no,
+ CTL_OF_SCB(scb));
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_WDTR;
+ }
+ p->needwdtr &= ~mask;
+ p->needwdtr_copy &= ~mask;
+ }
+ }
+ if (scb->flags & SCB_MSGOUT_SDTR)
+ {
+ p->sdtr_pending &= ~mask;
+ if (message_error)
+ {
+ if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ (p->dev_flags[tindex] & DEVICE_PRINT_SDTR) )
+ {
+ printk(INFO_LEAD "Device failed to complete Sync Negotiation "
+ "processing and\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "returned a sense error code for invalid message, "
+ "disabling future\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "Sync negotiation to this device.\n", p->host_no,
+ CTL_OF_SCB(scb));
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_SDTR;
+ }
+ p->needsdtr &= ~mask;
+ p->needsdtr_copy &= ~mask;
+ }
+ }
+ }
+ queue_depth = p->dev_temp_queue_depth[tindex];
+ if (queue_depth >= p->dev_active_cmds[tindex])
+ {
+ scbp = scbq_remove_head(&p->delayed_scbs[tindex]);
+ if (scbp)
+ {
+ if (queue_depth == 1)
+ {
+ /*
+ * Give extra preference to untagged devices, such as CD-R devices
+ * This makes it more likely that a drive *won't* stuff up while
+ * waiting on data at a critical time, such as CD-R writing and
+ * audio CD ripping operations. Should also benefit tape drives.
+ */
+ scbq_insert_head(&p->waiting_scbs, scbp);
+ }
+ else
+ {
+ scbq_insert_tail(&p->waiting_scbs, scbp);
+ }
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Moving SCB from delayed to waiting queue.\n",
+ p->host_no, CTL_OF_SCB(scbp));
+#endif
+ if (queue_depth > p->dev_active_cmds[tindex])
+ {
+ scbp = scbq_remove_head(&p->delayed_scbs[tindex]);
+ if (scbp)
+ scbq_insert_tail(&p->waiting_scbs, scbp);
+ }
+ }
+ }
+ if ( !(scb->tag_action) && (p->tagenable & (1<<tindex)) )
+ {
+ p->dev_temp_queue_depth[tindex] = p->dev_max_queue_depth[tindex];
+ }
+ p->dev_active_cmds[tindex]--;
+ p->activescbs--;
+
+ /*
+ * If this was an untagged I/O, unbusy the target so the sequencer won't
+ * mistake things later
+ */
+ if (aic7xxx_index_busy_target(p, scb->hscb->target_channel_lun, FALSE) ==
+ scb->hscb->tag)
+ {
+ aic7xxx_index_busy_target(p, scb->hscb->target_channel_lun, TRUE);
+ }
+
+ {
+ int actual;
+
+ /*
+ * XXX: we should actually know how much actually transferred
+ * XXX: for each command, but apparently that's too difficult.
+ *
+ * We set a lower limit of 512 bytes on the transfer length. We
+ * ignore anything less than this because we don't have a real
+ * reason to count it. Read/Writes to tapes are usually about 20K
+ * and disks are a minimum of 512 bytes unless you want to count
+ * non-read/write commands (such as TEST_UNIT_READY) which we don't
+ */
+ actual = scb->sg_length;
+ if ((actual >= 512) && (((cmd->result >> 16) & 0xf) == DID_OK))
+ {
+ struct aic7xxx_xferstats *sp;
+#ifdef AIC7XXX_PROC_STATS
+ long *ptr;
+ int x;
+#endif /* AIC7XXX_PROC_STATS */
+
+ sp = &p->stats[TARGET_INDEX(cmd)];
+
+ /*
+ * For block devices, cmd->request.cmd is always == either READ or
+ * WRITE. For character devices, this isn't always set properly, so
+ * we check data_cmnd[0]. This catches the conditions for st.c, but
+ * I'm still not sure if request.cmd is valid for sg devices.
+ */
+ if ( (cmd->request.cmd == WRITE) || (cmd->data_cmnd[0] == WRITE_6) ||
+ (cmd->data_cmnd[0] == WRITE_FILEMARKS) )
+ {
+ sp->w_total++;
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if ( (sp->w_total > 16) && (aic7xxx_verbose > 0xffff) )
+ aic7xxx_verbose &= 0xffff;
+#endif
+#ifdef AIC7XXX_PROC_STATS
+ ptr = sp->w_bins;
+#endif /* AIC7XXX_PROC_STATS */
+ }
+ else
+ {
+ sp->r_total++;
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if ( (sp->r_total > 16) && (aic7xxx_verbose > 0xffff) )
+ aic7xxx_verbose &= 0xffff;
+#endif
+#ifdef AIC7XXX_PROC_STATS
+ ptr = sp->r_bins;
+#endif /* AIC7XXX_PROC_STATS */
+ }
+#ifdef AIC7XXX_PROC_STATS
+ x = -10;
+ while(actual)
+ {
+ actual >>= 1;
+ x++;
+ }
+ if (x < 0)
+ {
+ ptr[0]++;
+ }
+ else if (x > 7)
+ {
+ ptr[7]++;
+ }
+ else
+ {
+ ptr[x]++;
+ }
+#endif /* AIC7XXX_PROC_STATS */
+ }
+ }
+ aic7xxx_free_scb(p, scb);
+ aic7xxx_queue_cmd_complete(p, cmd);
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_run_done_queue
+ *
+ * Description:
+ * Calls the aic7xxx_done() for the Scsi_Cmnd of each scb in the
+ * aborted list, and adds each scb to the free list. If complete
+ * is TRUE, we also process the commands complete list.
+ *-F*************************************************************************/
+static void
+aic7xxx_run_done_queue(struct aic7xxx_host *p, /*complete*/ int complete)
+{
+ struct aic7xxx_scb *scb;
+ int i, found = 0;
+
+ for (i = 0; i < p->scb_data->numscbs; i++)
+ {
+ scb = p->scb_data->scb_array[i];
+ if (scb->flags & SCB_QUEUED_FOR_DONE)
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Aborting scb %d\n",
+ p->host_no, CTL_OF_SCB(scb), scb->hscb->tag);
+ found++;
+ aic7xxx_done(p, scb);
+ }
+ }
+ if (aic7xxx_verbose & (VERBOSE_ABORT_RETURN | VERBOSE_RESET_RETURN))
+ {
+ printk(INFO_LEAD "%d commands found and queued for "
+ "completion.\n", p->host_no, -1, -1, -1, found);
+ }
+ if (complete)
+ {
+ aic7xxx_done_cmds_complete(p);
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_abort_waiting_scb
+ *
+ * Description:
+ * Manipulate the waiting for selection list and return the
+ * scb that follows the one that we remove.
+ *-F*************************************************************************/
+static unsigned char
+aic7xxx_abort_waiting_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb,
+ unsigned char scbpos, unsigned char prev)
+{
+ unsigned char curscb, next;
+
+ /*
+ * Select the SCB we want to abort and pull the next pointer out of it.
+ */
+ curscb = aic_inb(p, SCBPTR);
+ aic_outb(p, scbpos, SCBPTR);
+ next = aic_inb(p, SCB_NEXT);
+
+ aic7xxx_add_curscb_to_free_list(p);
+
+ /*
+ * Update the waiting list
+ */
+ if (prev == SCB_LIST_NULL)
+ {
+ /*
+ * First in the list
+ */
+ aic_outb(p, next, WAITING_SCBH);
+ }
+ else
+ {
+ /*
+ * Select the scb that pointed to us and update its next pointer.
+ */
+ aic_outb(p, prev, SCBPTR);
+ aic_outb(p, next, SCB_NEXT);
+ }
+ /*
+ * Point us back at the original scb position and inform the SCSI
+ * system that the command has been aborted.
+ */
+ aic_outb(p, curscb, SCBPTR);
+ return (next);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_search_qinfifo
+ *
+ * Description:
+ * Search the queue-in FIFO for matching SCBs and conditionally
+ * requeue. Returns the number of matching SCBs.
+ *-F*************************************************************************/
+static int
+aic7xxx_search_qinfifo(struct aic7xxx_host *p, int target, int channel,
+ int lun, unsigned char tag, int flags, int requeue,
+ volatile scb_queue_type *queue)
+{
+ int found;
+ unsigned char qinpos, qintail;
+ struct aic7xxx_scb *scbp;
+
+ found = 0;
+ qinpos = aic_inb(p, QINPOS);
+ qintail = p->qinfifonext;
+
+ p->qinfifonext = qinpos;
+
+ while (qinpos != qintail)
+ {
+ scbp = p->scb_data->scb_array[p->qinfifo[qinpos++]];
+ if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
+ {
+ /*
+ * We found an scb that needs to be removed.
+ */
+ if (requeue && (queue != NULL))
+ {
+ if (scbp->flags & SCB_WAITINGQ)
+ {
+ scbq_remove(queue, scbp);
+ scbq_remove(&p->waiting_scbs, scbp);
+ scbq_remove(&p->delayed_scbs[TARGET_INDEX(scbp->cmd)], scbp);
+ p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ p->activescbs++;
+ }
+ scbq_insert_tail(queue, scbp);
+ p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]--;
+ p->activescbs--;
+ scbp->flags |= SCB_WAITINGQ;
+ if ( !(scbp->tag_action & TAG_ENB) )
+ {
+ aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun,
+ TRUE);
+ }
+ }
+ else if (requeue)
+ {
+ p->qinfifo[p->qinfifonext++] = scbp->hscb->tag;
+ }
+ else
+ {
+ /*
+ * Preserve any SCB_RECOVERY_SCB flags on this scb then set the
+ * flags we were called with, presumeably so aic7xxx_run_done_queue
+ * can find this scb
+ */
+ scbp->flags = flags | (scbp->flags & SCB_RECOVERY_SCB);
+ if (aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun,
+ FALSE) == scbp->hscb->tag)
+ {
+ aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun,
+ TRUE);
+ }
+ }
+ found++;
+ }
+ else
+ {
+ p->qinfifo[p->qinfifonext++] = scbp->hscb->tag;
+ }
+ }
+ /*
+ * Now that we've done the work, clear out any left over commands in the
+ * qinfifo and update the KERNEL_QINPOS down on the card.
+ *
+ * NOTE: This routine expect the sequencer to already be paused when
+ * it is run....make sure it's that way!
+ */
+ qinpos = p->qinfifonext;
+ while(qinpos != qintail)
+ {
+ p->qinfifo[qinpos++] = SCB_LIST_NULL;
+ }
+ if (p->features & AHC_QUEUE_REGS)
+ aic_outb(p, p->qinfifonext, HNSCB_QOFF);
+ else
+ aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
+
+ return (found);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_scb_on_qoutfifo
+ *
+ * Description:
+ * Is the scb that was passed to us currently on the qoutfifo?
+ *-F*************************************************************************/
+static int
+aic7xxx_scb_on_qoutfifo(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ int i=0;
+
+ while(p->qoutfifo[(p->qoutfifonext + i) & 0xff ] != SCB_LIST_NULL)
+ {
+ if(p->qoutfifo[(p->qoutfifonext + i) & 0xff ] == scb->hscb->tag)
+ return TRUE;
+ else
+ i++;
+ }
+ return FALSE;
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset_device
+ *
+ * Description:
+ * The device at the given target/channel has been reset. Abort
+ * all active and queued scbs for that target/channel. This function
+ * need not worry about linked next pointers because if was a MSG_ABORT_TAG
+ * then we had a tagged command (no linked next), if it was MSG_ABORT or
+ * MSG_BUS_DEV_RESET then the device won't know about any commands any more
+ * and no busy commands will exist, and if it was a bus reset, then nothing
+ * knows about any linked next commands any more. In all cases, we don't
+ * need to worry about the linked next or busy scb, we just need to clear
+ * them.
+ *-F*************************************************************************/
+static void
+aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
+ int lun, unsigned char tag)
+{
+ struct aic7xxx_scb *scbp;
+ unsigned char active_scb, tcl;
+ int i = 0, j, init_lists = FALSE;
+
+ /*
+ * Restore this when we're done
+ */
+ active_scb = aic_inb(p, SCBPTR);
+
+ if (aic7xxx_verbose & (VERBOSE_RESET_PROCESS | VERBOSE_ABORT_PROCESS))
+ printk(INFO_LEAD "Reset device, active_scb %d\n",
+ p->host_no, channel, target, lun, active_scb);
+ /*
+ * Deal with the busy target and linked next issues.
+ */
+ {
+ int min_target, max_target;
+ struct aic7xxx_scb *scbp, *prev_scbp;
+
+ /* Make all targets 'relative' to bus A. */
+ if (target == ALL_TARGETS)
+ {
+ switch (channel)
+ {
+ case 0:
+ min_target = 0;
+ max_target = (p->features & AHC_WIDE) ? 15 : 7;
+ break;
+ case 1:
+ min_target = 8;
+ max_target = 15;
+ break;
+ case ALL_CHANNELS:
+ default:
+ min_target = 0;
+ max_target = (p->features & (AHC_TWIN|AHC_WIDE)) ? 15 : 7;
+ break;
+ }
+ }
+ else
+ {
+ min_target = target | (channel << 3);
+ max_target = min_target;
+ }
+
+
+ for (i = min_target; i <= max_target; i++)
+ {
+ if ( i == p->scsi_id )
+ {
+ continue;
+ }
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning up status information "
+ "and delayed_scbs.\n", p->host_no, channel, i, lun);
+ p->dev_flags[i] &= ~BUS_DEVICE_RESET_PENDING;
+ if ( tag == SCB_LIST_NULL )
+ {
+ p->dev_flags[i] |= DEVICE_PRINT_WDTR | DEVICE_PRINT_SDTR |
+ DEVICE_RESET_DELAY;
+ p->dev_expires[i] = jiffies + (4 * HZ);
+ p->dev_timer_active |= (0x01 << i);
+ p->dev_last_queue_full_count[i] = 0;
+ p->dev_last_queue_full[i] = 0;
+ p->dev_temp_queue_depth[i] =
+ p->dev_max_queue_depth[i];
+ }
+ for(j=0; j<MAX_LUNS; j++)
+ {
+ if (channel == 1)
+ tcl = ((i << 4) & 0x70) | (channel << 3) | j;
+ else
+ tcl = (i << 4) | (channel << 3) | j;
+ if ( (aic7xxx_index_busy_target(p, tcl, FALSE) == tag) ||
+ (tag == SCB_LIST_NULL) )
+ aic7xxx_index_busy_target(p, tcl, /* unbusy */ TRUE);
+ }
+ j = 0;
+ prev_scbp = NULL;
+ scbp = p->delayed_scbs[i].head;
+ while ( (scbp != NULL) && (j++ <= (p->scb_data->numscbs + 1)) )
+ {
+ prev_scbp = scbp;
+ scbp = scbp->q_next;
+ if ( prev_scbp == scbp )
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
+ printk(WARN_LEAD "Yikes!! scb->q_next == scb "
+ "in the delayed_scbs queue!\n", p->host_no, channel, i, lun);
+ scbp = NULL;
+ prev_scbp->q_next = NULL;
+ p->delayed_scbs[i].tail = prev_scbp;
+ }
+ if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag))
+ {
+ scbq_remove(&p->delayed_scbs[i], prev_scbp);
+ if (prev_scbp->flags & SCB_WAITINGQ)
+ {
+ p->dev_active_cmds[i]++;
+ p->activescbs++;
+ }
+ prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ }
+ }
+ if ( j > (p->scb_data->maxscbs + 1) )
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
+ printk(WARN_LEAD "Yikes!! There's a loop in the "
+ "delayed_scbs queue!\n", p->host_no, channel, i, lun);
+ scbq_init(&p->delayed_scbs[i]);
+ }
+ if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) ||
+ time_after_eq(p->dev_timer.expires, p->dev_expires[i]) )
+ {
+ del_timer(&p->dev_timer);
+ p->dev_timer.expires = p->dev_expires[i];
+ add_timer(&p->dev_timer);
+ p->dev_timer_active |= (0x01 << MAX_TARGETS);
+ }
+ }
+ }
+
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning QINFIFO.\n", p->host_no, channel, target, lun );
+ aic7xxx_search_qinfifo(p, target, channel, lun, tag,
+ SCB_RESET | SCB_QUEUED_FOR_DONE, /* requeue */ FALSE, NULL);
+
+/*
+ * Search the waiting_scbs queue for matches, this catches any SCB_QUEUED
+ * ABORT/RESET commands.
+ */
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning waiting_scbs.\n", p->host_no, channel,
+ target, lun );
+ {
+ struct aic7xxx_scb *scbp, *prev_scbp;
+
+ j = 0;
+ prev_scbp = NULL;
+ scbp = p->waiting_scbs.head;
+ while ( (scbp != NULL) && (j++ <= (p->scb_data->numscbs + 1)) )
+ {
+ prev_scbp = scbp;
+ scbp = scbp->q_next;
+ if ( prev_scbp == scbp )
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
+ printk(WARN_LEAD "Yikes!! scb->q_next == scb "
+ "in the waiting_scbs queue!\n", p->host_no, CTL_OF_SCB(scbp));
+ scbp = NULL;
+ prev_scbp->q_next = NULL;
+ p->waiting_scbs.tail = prev_scbp;
+ }
+ if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag))
+ {
+ scbq_remove(&p->waiting_scbs, prev_scbp);
+ if (prev_scbp->flags & SCB_WAITINGQ)
+ {
+ p->dev_active_cmds[TARGET_INDEX(prev_scbp->cmd)]++;
+ p->activescbs++;
+ }
+ prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ }
+ }
+ if ( j > (p->scb_data->maxscbs + 1) )
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
+ printk(WARN_LEAD "Yikes!! There's a loop in the "
+ "waiting_scbs queue!\n", p->host_no, channel, target, lun);
+ scbq_init(&p->waiting_scbs);
+ }
+ }
+
+
+ /*
+ * Search waiting for selection list.
+ */
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning waiting for selection "
+ "list.\n", p->host_no, channel, target, lun);
+ {
+ unsigned char next, prev, scb_index;
+
+ next = aic_inb(p, WAITING_SCBH); /* Start at head of list. */
+ prev = SCB_LIST_NULL;
+ j = 0;
+ while ( (next != SCB_LIST_NULL) && (j++ <= (p->scb_data->maxscbs + 1)) )
+ {
+ aic_outb(p, next, SCBPTR);
+ scb_index = aic_inb(p, SCB_TAG);
+ if (scb_index >= p->scb_data->numscbs)
+ {
+ /*
+ * No aic7xxx_verbose check here.....we want to see this since it
+ * means either the kernel driver or the sequencer screwed things up
+ */
+ printk(WARN_LEAD "Waiting List inconsistency; SCB index=%d, "
+ "numscbs=%d\n", p->host_no, channel, target, lun, scb_index,
+ p->scb_data->numscbs);
+ next = aic_inb(p, SCB_NEXT);
+ aic7xxx_add_curscb_to_free_list(p);
+ }
+ else
+ {
+ scbp = p->scb_data->scb_array[scb_index];
+ if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
+ {
+ next = aic7xxx_abort_waiting_scb(p, scbp, next, prev);
+ if (scbp->flags & SCB_WAITINGQ)
+ {
+ p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ p->activescbs++;
+ }
+ scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ if (prev == SCB_LIST_NULL)
+ {
+ /*
+ * This is either the first scb on the waiting list, or we
+ * have already yanked the first and haven't left any behind.
+ * Either way, we need to turn off the selection hardware if
+ * it isn't already off.
+ */
+ aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ);
+ aic_outb(p, CLRSELTIMEO, CLRSINT1);
+ }
+ }
+ else
+ {
+ prev = next;
+ next = aic_inb(p, SCB_NEXT);
+ }
+ }
+ }
+ if ( j > (p->scb_data->maxscbs + 1) )
+ {
+ printk(WARN_LEAD "Yikes!! There is a loop in the waiting for "
+ "selection list!\n", p->host_no, channel, target, lun);
+ init_lists = TRUE;
+ }
+ }
+
+ /*
+ * Go through disconnected list and remove any entries we have queued
+ * for completion, zeroing their control byte too.
+ */
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning disconnected scbs "
+ "list.\n", p->host_no, channel, target, lun);
+ if (p->flags & AHC_PAGESCBS)
+ {
+ unsigned char next, prev, scb_index;
+
+ next = aic_inb(p, DISCONNECTED_SCBH);
+ prev = SCB_LIST_NULL;
+ j = 0;
+ while ( (next != SCB_LIST_NULL) && (j++ <= (p->scb_data->maxscbs + 1)) )
+ {
+ aic_outb(p, next, SCBPTR);
+ scb_index = aic_inb(p, SCB_TAG);
+ if (scb_index > p->scb_data->numscbs)
+ {
+ printk(WARN_LEAD "Disconnected List inconsistency; SCB index=%d, "
+ "numscbs=%d\n", p->host_no, channel, target, lun, scb_index,
+ p->scb_data->numscbs);
+ next = aic7xxx_rem_scb_from_disc_list(p, next);
+ }
+ else
+ {
+ scbp = p->scb_data->scb_array[scb_index];
+ if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
+ {
+ next = aic7xxx_rem_scb_from_disc_list(p, next);
+ if (scbp->flags & SCB_WAITINGQ)
+ {
+ p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ p->activescbs++;
+ }
+ scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ scbp->hscb->control = 0;
+ }
+ else
+ {
+ prev = next;
+ next = aic_inb(p, SCB_NEXT);
+ }
+ }
+ }
+ if ( j > (p->scb_data->maxscbs + 1) )
+ {
+ printk(WARN_LEAD "Yikes!! There is a loop in the disconnected list!\n",
+ p->host_no, channel, target, lun);
+ init_lists = TRUE;
+ }
+ }
+
+ /*
+ * Walk the free list making sure no entries on the free list have
+ * a valid SCB_TAG value or SCB_CONTROL byte.
+ */
+ if (p->flags & AHC_PAGESCBS)
+ {
+ unsigned char next;
+
+ j = 0;
+ next = aic_inb(p, FREE_SCBH);
+ if ( (next >= p->scb_data->maxhscbs) && (next != SCB_LIST_NULL) )
+ {
+ printk(WARN_LEAD "Bogus FREE_SCBH!.\n", p->host_no, channel,
+ target, lun);
+ init_lists = TRUE;
+ next = SCB_LIST_NULL;
+ }
+ while ( (next != SCB_LIST_NULL) && (j++ <= (p->scb_data->maxscbs + 1)) )
+ {
+ aic_outb(p, next, SCBPTR);
+ if (aic_inb(p, SCB_TAG) < p->scb_data->numscbs)
+ {
+ printk(WARN_LEAD "Free list inconsistency!.\n", p->host_no, channel,
+ target, lun);
+ init_lists = TRUE;
+ next = SCB_LIST_NULL;
+ }
+ else
+ {
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic_outb(p, 0, SCB_CONTROL);
+ next = aic_inb(p, SCB_NEXT);
+ }
+ }
+ if ( j > (p->scb_data->maxscbs + 1) )
+ {
+ printk(WARN_LEAD "Yikes!! There is a loop in the free list!\n",
+ p->host_no, channel, target, lun);
+ init_lists = TRUE;
+ }
+ }
+
+ /*
+ * Go through the hardware SCB array looking for commands that
+ * were active but not on any list.
+ */
+ if (init_lists)
+ {
+ aic_outb(p, SCB_LIST_NULL, FREE_SCBH);
+ aic_outb(p, SCB_LIST_NULL, WAITING_SCBH);
+ aic_outb(p, SCB_LIST_NULL, DISCONNECTED_SCBH);
+ }
+ for (i = p->scb_data->maxhscbs - 1; i >= 0; i--)
+ {
+ unsigned char scbid;
+
+ aic_outb(p, i, SCBPTR);
+ if (init_lists)
+ {
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic_outb(p, SCB_LIST_NULL, SCB_NEXT);
+ aic_outb(p, SCB_LIST_NULL, SCB_PREV);
+ aic_outb(p, 0, SCB_CONTROL);
+ aic7xxx_add_curscb_to_free_list(p);
+ }
+ else
+ {
+ scbid = aic_inb(p, SCB_TAG);
+ if (scbid < p->scb_data->numscbs)
+ {
+ scbp = p->scb_data->scb_array[scbid];
+ if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
+ {
+ aic_outb(p, 0, SCB_CONTROL);
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic7xxx_add_curscb_to_free_list(p);
+ }
+ }
+ }
+ }
+
+ /*
+ * Go through the entire SCB array now and look for commands for
+ * for this target that are stillactive. These are other (most likely
+ * tagged) commands that were disconnected when the reset occurred.
+ * Any commands we find here we know this about, it wasn't on any queue,
+ * it wasn't in the qinfifo, it wasn't in the disconnected or waiting
+ * lists, so it really must have been a paged out SCB. In that case,
+ * we shouldn't need to bother with updating any counters, just mark
+ * the correct flags and go on.
+ */
+ for (i = 0; i < p->scb_data->numscbs; i++)
+ {
+ scbp = p->scb_data->scb_array[i];
+ if ((scbp->flags & SCB_ACTIVE) &&
+ aic7xxx_match_scb(p, scbp, target, channel, lun, tag) &&
+ !aic7xxx_scb_on_qoutfifo(p, scbp))
+ {
+ if (scbp->flags & SCB_WAITINGQ)
+ {
+ scbq_remove(&p->waiting_scbs, scbp);
+ scbq_remove(&p->delayed_scbs[TARGET_INDEX(scbp->cmd)], scbp);
+ p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ p->activescbs++;
+ }
+ scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ }
+ }
+
+ aic_outb(p, active_scb, SCBPTR);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_clear_intstat
+ *
+ * Description:
+ * Clears the interrupt status.
+ *-F*************************************************************************/
+static void
+aic7xxx_clear_intstat(struct aic7xxx_host *p)
+{
+ /* Clear any interrupt conditions this may have caused. */
+ aic_outb(p, CLRSELDO | CLRSELDI | CLRSELINGO, CLRSINT0);
+ aic_outb(p, CLRSELTIMEO | CLRATNO | CLRSCSIRSTI | CLRBUSFREE | CLRSCSIPERR |
+ CLRPHASECHG | CLRREQINIT, CLRSINT1);
+ aic_outb(p, CLRSCSIINT | CLRSEQINT | CLRBRKADRINT | CLRPARERR, CLRINT);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset_current_bus
+ *
+ * Description:
+ * Reset the current SCSI bus.
+ *-F*************************************************************************/
+static void
+aic7xxx_reset_current_bus(struct aic7xxx_host *p)
+{
+
+ /* Disable reset interrupts. */
+ aic_outb(p, aic_inb(p, SIMODE1) & ~ENSCSIRST, SIMODE1);
+
+ /* Turn off the bus' current operations, after all, we shouldn't have any
+ * valid commands left to cause a RSELI and SELO once we've tossed the
+ * bus away with this reset, so we might as well shut down the sequencer
+ * until the bus is restarted as oppossed to saving the current settings
+ * and restoring them (which makes no sense to me). */
+
+ /* Turn on the bus reset. */
+ aic_outb(p, aic_inb(p, SCSISEQ) | SCSIRSTO, SCSISEQ);
+ while ( (aic_inb(p, SCSISEQ) & SCSIRSTO) == 0)
+ mdelay(5);
+
+ mdelay(10);
+
+ /* Turn off the bus reset. */
+ aic_outb(p, 0, SCSISEQ);
+ mdelay(5);
+
+ aic7xxx_clear_intstat(p);
+ /* Re-enable reset interrupts. */
+ aic_outb(p, aic_inb(p, SIMODE1) | ENSCSIRST, SIMODE1);
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset_channel
+ *
+ * Description:
+ * Reset the channel.
+ *-F*************************************************************************/
+static void
+aic7xxx_reset_channel(struct aic7xxx_host *p, int channel, int initiate_reset)
+{
+ unsigned long offset_min, offset_max;
+ unsigned char sblkctl;
+ int cur_channel;
+
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Reset channel called, %s initiate reset.\n",
+ p->host_no, channel, -1, -1, (initiate_reset==TRUE) ? "will" : "won't" );
+
+
+ if (channel == 1)
+ {
+ p->needsdtr |= (p->needsdtr_copy & 0xFF00);
+ p->sdtr_pending &= 0x00FF;
+ offset_min = 8;
+ offset_max = 16;
+ }
+ else
+ {
+ if (p->features & AHC_WIDE)
+ {
+ p->needsdtr = p->needsdtr_copy;
+ p->needwdtr = p->needwdtr_copy;
+ p->sdtr_pending = 0x0;
+ p->wdtr_pending = 0x0;
+ offset_min = 0;
+ offset_max = 16;
+ }
+ else
+ {
+ /* Channel A */
+ p->needsdtr |= (p->needsdtr_copy & 0x00FF);
+ p->sdtr_pending &= 0xFF00;
+ offset_min = 0;
+ offset_max = 8;
+ }
+ }
+
+ while (offset_min < offset_max)
+ {
+ /*
+ * Revert to async/narrow transfers until we renegotiate.
+ */
+ aic_outb(p, 0, TARG_SCSIRATE + offset_min);
+ if (p->features & AHC_ULTRA2)
+ {
+ aic_outb(p, 0, TARG_OFFSET + offset_min);
+ }
+ offset_min++;
+ }
+
+ /*
+ * Reset the bus and unpause/restart the controller
+ */
+ sblkctl = aic_inb(p, SBLKCTL);
+ if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
+ cur_channel = (sblkctl & SELBUSB) >> 3;
+ else
+ cur_channel = 0;
+ if ( (cur_channel != channel) && (p->features & AHC_TWIN) )
+ {
+ /*
+ * Case 1: Command for another bus is active
+ */
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Stealthily resetting idle channel.\n", p->host_no,
+ channel, -1, -1);
+ /*
+ * Stealthily reset the other bus without upsetting the current bus.
+ */
+ aic_outb(p, sblkctl ^ SELBUSB, SBLKCTL);
+ aic_outb(p, aic_inb(p, SIMODE1) & ~ENBUSFREE, SIMODE1);
+ if (initiate_reset)
+ {
+ aic7xxx_reset_current_bus(p);
+ }
+ aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), SCSISEQ);
+ aic7xxx_clear_intstat(p);
+ aic_outb(p, sblkctl, SBLKCTL);
+ }
+ else
+ {
+ /*
+ * Case 2: A command from this bus is active or we're idle.
+ */
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Resetting currently active channel.\n", p->host_no,
+ channel, -1, -1);
+ aic_outb(p, aic_inb(p, SIMODE1) & ~(ENBUSFREE|ENREQINIT),
+ SIMODE1);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+ p->msg_type = MSG_TYPE_NONE;
+ p->msg_len = 0;
+ if (initiate_reset)
+ {
+ aic7xxx_reset_current_bus(p);
+ }
+ aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), SCSISEQ);
+ aic7xxx_clear_intstat(p);
+ }
+ if (aic7xxx_verbose & VERBOSE_RESET_RETURN)
+ printk(INFO_LEAD "Channel reset\n", p->host_no, channel, -1, -1);
+ /*
+ * Clean up all the state information for the pending transactions
+ * on this bus.
+ */
+ aic7xxx_reset_device(p, ALL_TARGETS, channel, ALL_LUNS, SCB_LIST_NULL);
+
+ if ( !(p->features & AHC_TWIN) )
+ {
+ restart_sequencer(p);
+ }
+
+ return;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_run_waiting_queues
+ *
+ * Description:
+ * Scan the awaiting_scbs queue downloading and starting as many
+ * scbs as we can.
+ *-F*************************************************************************/
+static void
+aic7xxx_run_waiting_queues(struct aic7xxx_host *p)
+{
+ struct aic7xxx_scb *scb;
+ int tindex;
+ int sent;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags = 0;
+#endif
+
+
+ if (p->waiting_scbs.head == NULL)
+ return;
+
+ sent = 0;
+
+ /*
+ * First handle SCBs that are waiting but have been assigned a slot.
+ */
+ DRIVER_LOCK
+ while ((scb = scbq_remove_head(&p->waiting_scbs)) != NULL)
+ {
+ tindex = TARGET_INDEX(scb->cmd);
+ if ( !scb->tag_action && (p->tagenable & (1<<tindex)) )
+ {
+ p->dev_temp_queue_depth[tindex] = 1;
+ }
+ if ( (p->dev_active_cmds[tindex] >=
+ p->dev_temp_queue_depth[tindex]) ||
+ (p->dev_flags[tindex] & (DEVICE_RESET_DELAY|DEVICE_WAS_BUSY)) ||
+ (p->flags & AHC_RESET_DELAY) )
+ {
+ scbq_insert_tail(&p->delayed_scbs[tindex], scb);
+ }
+ else
+ {
+ scb->flags &= ~SCB_WAITINGQ;
+ p->dev_active_cmds[tindex]++;
+ p->activescbs++;
+ if ( !(scb->tag_action) )
+ {
+ aic7xxx_busy_target(p, scb);
+ }
+ p->qinfifo[p->qinfifonext++] = scb->hscb->tag;
+ sent++;
+ }
+ }
+ if (sent)
+ {
+ if (p->features & AHC_QUEUE_REGS)
+ aic_outb(p, p->qinfifonext, HNSCB_QOFF);
+ else
+ {
+ pause_sequencer(p);
+ aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
+ unpause_sequencer(p, FALSE);
+ }
+ if (p->activescbs > p->max_activescbs)
+ p->max_activescbs = p->activescbs;
+ }
+ DRIVER_UNLOCK
+}
+
+#ifdef CONFIG_PCI
+
+#define DPE 0x80
+#define SSE 0x40
+#define RMA 0x20
+#define RTA 0x10
+#define STA 0x08
+#define DPR 0x01
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_pci_intr
+ *
+ * Description:
+ * Check the scsi card for PCI errors and clear the interrupt
+ *
+ * NOTE: If you don't have this function and a 2940 card encounters
+ * a PCI error condition, the machine will end up locked as the
+ * interrupt handler gets slammed with non-stop PCI error interrupts
+ *-F*************************************************************************/
+static void
+aic7xxx_pci_intr(struct aic7xxx_host *p)
+{
+ unsigned char status1;
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_read_config_byte(p->pdev, PCI_STATUS + 1, &status1);
+#else
+ pcibios_read_config_byte(p->pci_bus, p->pci_device_fn,
+ PCI_STATUS + 1, &status1);
+#endif
+
+ if ( (status1 & DPE) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Data Parity Error during PCI address or PCI write"
+ "phase.\n", p->host_no, -1, -1, -1);
+ if ( (status1 & SSE) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Signal System Error Detected\n", p->host_no,
+ -1, -1, -1);
+ if ( (status1 & RMA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Received a PCI Master Abort\n", p->host_no,
+ -1, -1, -1);
+ if ( (status1 & RTA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Received a PCI Target Abort\n", p->host_no,
+ -1, -1, -1);
+ if ( (status1 & STA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Signaled a PCI Target Abort\n", p->host_no,
+ -1, -1, -1);
+ if ( (status1 & DPR) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Data Parity Error has been reported via PCI pin "
+ "PERR#\n", p->host_no, -1, -1, -1);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_write_config_byte(p->pdev, PCI_STATUS + 1, status1);
+#else
+ pcibios_write_config_byte(p->pci_bus, p->pci_device_fn,
+ PCI_STATUS + 1, status1);
+#endif
+ if (status1 & (DPR|RMA|RTA))
+ aic_outb(p, CLRPARERR, CLRINT);
+
+ if ( (aic7xxx_panic_on_abort) && (p->spurious_int > 500) )
+ aic7xxx_panic_abort(p, NULL);
+
+}
+#endif /* CONFIG_PCI */
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_timer
+ *
+ * Description:
+ * Take expired extries off of delayed queues and place on waiting queue
+ * then run waiting queue to start commands.
+ ***************************************************************************/
+static void
+aic7xxx_timer(struct aic7xxx_host *p)
+{
+ int i, j;
+ unsigned long cpu_flags = 0;
+ struct aic7xxx_scb *scb;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ DRIVER_LOCK
+#else
+ spin_lock_irqsave(&io_request_lock, cpu_flags);
+#endif
+ p->dev_timer_active &= ~(0x01 << MAX_TARGETS);
+ if ( (p->dev_timer_active & (0x01 << p->scsi_id)) &&
+ time_after_eq(jiffies, p->dev_expires[p->scsi_id]) )
+ {
+ p->flags &= ~AHC_RESET_DELAY;
+ p->dev_timer_active &= ~(0x01 << p->scsi_id);
+ }
+ for(i=0; i<MAX_TARGETS; i++)
+ {
+ if ( (i != p->scsi_id) &&
+ (p->dev_timer_active & (0x01 << i)) &&
+ time_after_eq(jiffies, p->dev_expires[i]) )
+ {
+ p->dev_timer_active &= ~(0x01 << i);
+ p->dev_flags[i] &= ~(DEVICE_RESET_DELAY|DEVICE_WAS_BUSY);
+ p->dev_temp_queue_depth[i] = p->dev_max_queue_depth[i];
+ j = 0;
+ while ( ((scb = scbq_remove_head(&p->delayed_scbs[i])) != NULL) &&
+ (j++ < p->scb_data->numscbs) )
+ {
+ scbq_insert_tail(&p->waiting_scbs, scb);
+ }
+ if (j == p->scb_data->numscbs)
+ {
+ printk(INFO_LEAD "timer: Yikes, loop in delayed_scbs list.\n",
+ p->host_no, 0, i, -1);
+ scbq_init(&p->delayed_scbs[i]);
+ scbq_init(&p->waiting_scbs);
+ /*
+ * Well, things are screwed now, wait for a reset to clean the junk
+ * out.
+ */
+ }
+ }
+ else if ( p->dev_timer_active & (0x01 << i) )
+ {
+ if ( p->dev_timer_active & (0x01 << MAX_TARGETS) )
+ {
+ if ( time_after_eq(p->dev_timer.expires, p->dev_expires[i]) )
+ {
+ p->dev_timer.expires = p->dev_expires[i];
+ }
+ }
+ else
+ {
+ p->dev_timer.expires = p->dev_expires[i];
+ p->dev_timer_active |= (0x01 << MAX_TARGETS);
+ }
+ }
+ }
+ if ( p->dev_timer_active & (0x01 << MAX_TARGETS) )
+ {
+ add_timer(&p->dev_timer);
+ }
+
+ aic7xxx_run_waiting_queues(p);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ DRIVER_UNLOCK
+#else
+ spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+#endif
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_construct_sdtr
+ *
+ * Description:
+ * Constucts a synchronous data transfer message in the message
+ * buffer on the sequencer.
+ *-F*************************************************************************/
+static void
+aic7xxx_construct_sdtr(struct aic7xxx_host *p, unsigned char period,
+ unsigned char offset)
+{
+ p->msg_buf[p->msg_index++] = MSG_EXTENDED;
+ p->msg_buf[p->msg_index++] = MSG_EXT_SDTR_LEN;
+ p->msg_buf[p->msg_index++] = MSG_EXT_SDTR;
+ p->msg_buf[p->msg_index++] = period;
+ p->msg_buf[p->msg_index++] = offset;
+ p->msg_len += 5;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_construct_wdtr
+ *
+ * Description:
+ * Constucts a wide data transfer message in the message buffer
+ * on the sequencer.
+ *-F*************************************************************************/
+static void
+aic7xxx_construct_wdtr(struct aic7xxx_host *p, unsigned char bus_width)
+{
+ p->msg_buf[p->msg_index++] = MSG_EXTENDED;
+ p->msg_buf[p->msg_index++] = MSG_EXT_WDTR_LEN;
+ p->msg_buf[p->msg_index++] = MSG_EXT_WDTR;
+ p->msg_buf[p->msg_index++] = bus_width;
+ p->msg_len += 4;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_calc_residual
+ *
+ * Description:
+ * Calculate the residual data not yet transferred.
+ *-F*************************************************************************/
+static void
+aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ struct aic7xxx_hwscb *hscb;
+ Scsi_Cmnd *cmd;
+ int actual, i;
+
+ cmd = scb->cmd;
+ hscb = scb->hscb;
+
+ /*
+ * Don't destroy valid residual information with
+ * residual coming from a check sense operation.
+ */
+ if (((scb->hscb->control & DISCONNECTED) == 0) &&
+ (scb->flags & SCB_SENSE) == 0)
+ {
+ /*
+ * We had an underflow. At this time, there's only
+ * one other driver that bothers to check for this,
+ * and cmd->underflow seems to be set rather half-
+ * heartedly in the higher-level SCSI code.
+ */
+ actual = scb->sg_length;
+ for (i=1; i < hscb->residual_SG_segment_count; i++)
+ {
+ actual -= scb->sg_list[scb->sg_count - i].length;
+ }
+ actual -= (hscb->residual_data_count[2] << 16) |
+ (hscb->residual_data_count[1] << 8) |
+ hscb->residual_data_count[0];
+
+ if (actual < cmd->underflow)
+ {
+ if (aic7xxx_verbose & VERBOSE_MINOR_ERROR)
+ printk(INFO_LEAD "Underflow - Wanted %u, %s %u, residual SG "
+ "count %d.\n", p->host_no, CTL_OF_SCB(scb), cmd->underflow,
+ (cmd->request.cmd == WRITE) ? "wrote" : "read", actual,
+ hscb->residual_SG_segment_count);
+ aic7xxx_error(cmd) = DID_RETRY_COMMAND;
+ aic7xxx_status(cmd) = hscb->target_status;
+ }
+ }
+
+ /*
+ * Clean out the residual information in the SCB for the
+ * next consumer.
+ */
+ hscb->residual_data_count[2] = 0;
+ hscb->residual_data_count[1] = 0;
+ hscb->residual_data_count[0] = 0;
+ hscb->residual_SG_segment_count = 0;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_handle_device_reset
+ *
+ * Description:
+ * Interrupt handler for sequencer interrupts (SEQINT).
+ *-F*************************************************************************/
+static void
+aic7xxx_handle_device_reset(struct aic7xxx_host *p, int target, int channel)
+{
+ unsigned short targ_mask;
+ unsigned char tindex = target;
+
+ tindex |= ((channel & 0x01) << 3);
+
+ targ_mask = (0x01 << tindex);
+ /*
+ * Go back to async/narrow transfers and renegotiate.
+ */
+ p->needsdtr |= (p->needsdtr_copy & targ_mask);
+ p->needwdtr |= (p->needwdtr_copy & targ_mask);
+ p->sdtr_pending &= ~targ_mask;
+ p->wdtr_pending &= ~targ_mask;
+ aic_outb(p, 0, TARG_SCSIRATE + tindex);
+ if (p->features & AHC_ULTRA2)
+ aic_outb(p, 0, TARG_OFFSET + tindex);
+ aic7xxx_reset_device(p, target, channel, ALL_LUNS, SCB_LIST_NULL);
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Bus Device Reset delivered.\n", p->host_no, channel,
+ target, -1);
+ aic7xxx_run_done_queue(p, /*complete*/ FALSE);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_handle_seqint
+ *
+ * Description:
+ * Interrupt handler for sequencer interrupts (SEQINT).
+ *-F*************************************************************************/
+static void
+aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
+{
+ struct aic7xxx_scb *scb;
+ unsigned short target_mask;
+ unsigned char target, lun, tindex;
+ unsigned char queue_flag = FALSE;
+ char channel;
+
+ target = ((aic_inb(p, SAVED_TCL) >> 4) & 0x0f);
+ if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
+ channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3;
+ else
+ channel = 0;
+ tindex = target + (channel << 3);
+ lun = aic_inb(p, SAVED_TCL) & 0x07;
+ target_mask = (0x01 << tindex);
+
+ /*
+ * Go ahead and clear the SEQINT now, that avoids any interrupt race
+ * conditions later on in case we enable some other interrupt.
+ */
+ aic_outb(p, CLRSEQINT, CLRINT);
+ switch (intstat & SEQINT_MASK)
+ {
+ case NO_MATCH:
+ {
+ aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP),
+ SCSISEQ);
+ printk(WARN_LEAD "No active SCB for reconnecting target - Issuing "
+ "BUS DEVICE RESET.\n", p->host_no, channel, target, lun);
+ printk(WARN_LEAD " SAVED_TCL=0x%x, ARG_1=0x%x, SEQADDR=0x%x\n",
+ p->host_no, channel, target, lun,
+ aic_inb(p, SAVED_TCL), aic_inb(p, ARG_1),
+ (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0));
+ }
+ break;
+
+ case SEND_REJECT:
+ {
+ if (aic7xxx_verbose & VERBOSE_MINOR_ERROR)
+ printk(INFO_LEAD "Rejecting unknown message (0x%x) received from "
+ "target, SEQ_FLAGS=0x%x\n", p->host_no, channel, target, lun,
+ aic_inb(p, ACCUM), aic_inb(p, SEQ_FLAGS));
+ }
+ break;
+
+ case NO_IDENT:
+ {
+ /*
+ * The reconnecting target either did not send an identify
+ * message, or did, but we didn't find an SCB to match and
+ * before it could respond to our ATN/abort, it hit a dataphase.
+ * The only safe thing to do is to blow it away with a bus
+ * reset.
+ */
+ if (aic7xxx_verbose & (VERBOSE_SEQINT | VERBOSE_RESET_MID))
+ printk(INFO_LEAD "Target did not send an IDENTIFY message; "
+ "LASTPHASE 0x%x, SAVED_TCL 0x%x\n", p->host_no, channel, target,
+ lun, aic_inb(p, LASTPHASE), aic_inb(p, SAVED_TCL));
+
+ aic7xxx_reset_channel(p, channel, /*initiate reset*/ TRUE);
+ aic7xxx_run_done_queue(p, FALSE);
+
+ }
+ break;
+
+ case BAD_PHASE:
+ if (aic_inb(p, LASTPHASE) == P_BUSFREE)
+ {
+ if (aic7xxx_verbose & VERBOSE_SEQINT)
+ printk(INFO_LEAD "Missed busfree.\n", p->host_no, channel,
+ target, lun);
+ restart_sequencer(p);
+ }
+ else
+ {
+ if (aic7xxx_verbose & VERBOSE_SEQINT)
+ printk(INFO_LEAD "Unknown scsi bus phase, continuing\n", p->host_no,
+ channel, target, lun);
+ }
+ break;
+
+ case EXTENDED_MSG:
+ {
+ p->msg_type = MSG_TYPE_INITIATOR_MSGIN;
+ p->msg_len = 0;
+ p->msg_index = 0;
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Enabling REQINITs for MSG_IN\n", p->host_no,
+ channel, target, lun);
+#endif
+
+ /*
+ * To actually receive the message, simply turn on
+ * REQINIT interrupts and let our interrupt handler
+ * do the rest (REQINIT should already be true).
+ */
+ p->flags |= AHC_HANDLING_REQINITS;
+ aic_outb(p, aic_inb(p, SIMODE1) | ENREQINIT, SIMODE1);
+
+ /*
+ * We don't want the sequencer unpaused yet so we return early
+ */
+ return;
+ }
+
+ case REJECT_MSG:
+ {
+ /*
+ * What we care about here is if we had an outstanding SDTR
+ * or WDTR message for this target. If we did, this is a
+ * signal that the target is refusing negotiation.
+ */
+ unsigned char scb_index;
+ unsigned char last_msg;
+
+ scb_index = aic_inb(p, SCB_TAG);
+ scb = p->scb_data->scb_array[scb_index];
+ last_msg = aic_inb(p, LAST_MSG);
+
+ if ( (last_msg == MSG_IDENTIFYFLAG) &&
+ (scb->tag_action) &&
+ !(scb->flags & SCB_MSGOUT_BITS) )
+ {
+ if (scb->tag_action == MSG_ORDERED_Q_TAG)
+ {
+ /*
+ * OK...the device seems able to accept tagged commands, but
+ * not ordered tag commands, only simple tag commands. So, we
+ * disable ordered tag commands and go on with life just like
+ * normal.
+ */
+ p->orderedtag &= ~target_mask;
+ scb->tag_action = MSG_SIMPLE_Q_TAG;
+ scb->hscb->control &= ~SCB_TAG_TYPE;
+ scb->hscb->control |= MSG_SIMPLE_Q_TAG;
+ aic_outb(p, scb->hscb->control, SCB_CONTROL);
+ /*
+ * OK..we set the tag type to simple tag command, now we re-assert
+ * ATNO and hope this will take us into the identify phase again
+ * so we can resend the tag type and info to the device.
+ */
+ aic_outb(p, MSG_IDENTIFYFLAG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
+ }
+ else if (scb->tag_action == MSG_SIMPLE_Q_TAG)
+ {
+ unsigned char i, reset = 0;
+ struct aic7xxx_scb *scbp;
+ int old_verbose;
+ /*
+ * Hmmmm....the device is flaking out on tagged commands. The
+ * bad thing is that we already have tagged commands enabled in
+ * the device struct in the mid level code. We also have a queue
+ * set according to the tagged queue depth. Gonna have to live
+ * with it by controlling our queue depth internally and making
+ * sure we don't set the tagged command flag any more.
+ */
+ p->tagenable &= ~target_mask;
+ p->orderedtag &= ~target_mask;
+ p->dev_max_queue_depth[tindex] =
+ p->dev_temp_queue_depth[tindex] = 1;
+ /*
+ * We set this command up as a bus device reset. However, we have
+ * to clear the tag type as it's causing us problems. We shouldnt
+ * have to worry about any other commands being active, since if
+ * the device is refusing tagged commands, this should be the
+ * first tagged command sent to the device, however, we do have
+ * to worry about any other tagged commands that may already be
+ * in the qinfifo. The easiest way to do this, is to issue a BDR,
+ * send all the commands back to the mid level code, then let them
+ * come back and get rebuilt as untagged commands.
+ */
+ scb->tag_action = 0;
+ scb->hscb->control &= ~(TAG_ENB | SCB_TAG_TYPE);
+ aic_outb(p, scb->hscb->control, SCB_CONTROL);
+
+ old_verbose = aic7xxx_verbose;
+ aic7xxx_verbose &= ~(VERBOSE_RESET|VERBOSE_ABORT);
+ for (i=0; i!=p->scb_data->numscbs; i++)
+ {
+ scbp = p->scb_data->scb_array[i];
+ if ((scbp->flags & SCB_ACTIVE) && (scbp != scb))
+ {
+ if (aic7xxx_match_scb(p, scbp, target, channel, lun, i))
+ {
+ aic7xxx_reset_device(p, target, channel, lun, i);
+ reset++;
+ }
+ aic7xxx_run_done_queue(p, FALSE);
+ }
+ }
+ aic7xxx_verbose = old_verbose;
+ /*
+ * Wait until after the for loop to set the busy index since
+ * aic7xxx_reset_device will clear the busy index during its
+ * operation.
+ */
+ aic7xxx_busy_target(p, scb);
+ printk(INFO_LEAD "Device is refusing tagged commands, using "
+ "untagged I/O.\n", p->host_no, channel, target, lun);
+ aic_outb(p, MSG_IDENTIFYFLAG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
+ }
+ }
+ else if (scb->flags & SCB_MSGOUT_WDTR)
+ {
+ /*
+ * note 8bit xfers and clear flag
+ */
+ p->needwdtr &= ~target_mask;
+ p->needwdtr_copy &= ~target_mask;
+ p->wdtr_pending &= ~target_mask;
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT,
+ (AHC_TRANS_ACTIVE|AHC_TRANS_GOAL|AHC_TRANS_CUR));
+ aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0,
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
+ if ( (p->needsdtr_copy & target_mask) &&
+ !(p->sdtr_pending & target_mask) )
+ {
+ p->sdtr_pending |= target_mask;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ }
+ }
+ else if (scb->flags & SCB_MSGOUT_SDTR)
+ {
+ /*
+ * note asynch xfers and clear flag
+ */
+ p->needsdtr &= ~target_mask;
+ p->needsdtr_copy &= ~target_mask;
+ p->sdtr_pending &= ~target_mask;
+ scb->flags &= ~SCB_MSGOUT_SDTR;
+ aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0,
+ (AHC_TRANS_CUR|AHC_TRANS_ACTIVE|AHC_TRANS_GOAL));
+ }
+ else if (aic7xxx_verbose & VERBOSE_SEQINT)
+ {
+ /*
+ * Otherwise, we ignore it.
+ */
+ printk(INFO_LEAD "Received MESSAGE_REJECT for unknown cause. "
+ "Ignoring.\n", p->host_no, channel, target, lun);
+ }
+ }
+ break;
+
+ case BAD_STATUS:
+ {
+ unsigned char scb_index;
+ struct aic7xxx_hwscb *hscb;
+ Scsi_Cmnd *cmd;
+
+ /* The sequencer will notify us when a command has an error that
+ * would be of interest to the kernel. This allows us to leave
+ * the sequencer running in the common case of command completes
+ * without error. The sequencer will have DMA'd the SCB back
+ * up to us, so we can reference the drivers SCB array.
+ *
+ * Set the default return value to 0 indicating not to send
+ * sense. The sense code will change this if needed and this
+ * reduces code duplication.
+ */
+ aic_outb(p, 0, RETURN_1);
+ scb_index = aic_inb(p, SCB_TAG);
+ if (scb_index > p->scb_data->numscbs)
+ {
+ printk(WARN_LEAD "Invalid SCB during SEQINT 0x%02x, SCB_TAG %d.\n",
+ p->host_no, channel, target, lun, intstat, scb_index);
+ break;
+ }
+ scb = p->scb_data->scb_array[scb_index];
+ hscb = scb->hscb;
+
+ if (!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk(WARN_LEAD "Invalid SCB during SEQINT 0x%x, scb %d, flags 0x%x,"
+ " cmd 0x%lx.\n", p->host_no, channel, target, lun, intstat,
+ scb_index, scb->flags, (unsigned long) scb->cmd);
+ }
+ else
+ {
+ cmd = scb->cmd;
+ hscb->target_status = aic_inb(p, SCB_TARGET_STATUS);
+ aic7xxx_status(cmd) = hscb->target_status;
+
+ cmd->result = hscb->target_status;
+
+ switch (status_byte(hscb->target_status))
+ {
+ case GOOD:
+ if (aic7xxx_verbose & VERBOSE_SEQINT)
+ printk(INFO_LEAD "Interrupted for status of GOOD???\n",
+ p->host_no, CTL_OF_SCB(scb));
+ break;
+
+ case COMMAND_TERMINATED:
+ case CHECK_CONDITION:
+ if ( !(scb->flags & SCB_SENSE) )
+ {
+ /*
+ * XXX - How do we save the residual (if there is one).
+ */
+ if ( hscb->residual_SG_segment_count != 0 )
+ aic7xxx_calculate_residual(p, scb);
+
+ /*
+ * Send a sense command to the requesting target.
+ * XXX - revisit this and get rid of the memcopys.
+ */
+ memcpy(&scb->sense_cmd[0], &generic_sense[0],
+ sizeof(generic_sense));
+
+ scb->sense_cmd[1] = (cmd->lun << 5);
+ scb->sense_cmd[4] = sizeof(cmd->sense_buffer);
+
+ scb->sg_list[0].address =
+ cpu_to_le32(VIRT_TO_BUS(&cmd->sense_buffer[0]));
+ scb->sg_list[0].length =
+ cpu_to_le32(sizeof(cmd->sense_buffer));
+
+ /*
+ * XXX - We should allow disconnection, but can't as it
+ * might allow overlapped tagged commands.
+ */
+ /* hscb->control &= DISCENB; */
+ hscb->control = 0;
+ hscb->target_status = 0;
+ hscb->SG_list_pointer =
+ cpu_to_le32(VIRT_TO_BUS(&scb->sg_list[0]));
+ hscb->data_pointer = scb->sg_list[0].address;
+ hscb->data_count = scb->sg_list[0].length;
+ hscb->SCSI_cmd_pointer =
+ cpu_to_le32(VIRT_TO_BUS(&scb->sense_cmd[0]));
+ hscb->SCSI_cmd_length = COMMAND_SIZE(scb->sense_cmd[0]);
+ hscb->residual_SG_segment_count = 0;
+ hscb->residual_data_count[0] = 0;
+ hscb->residual_data_count[1] = 0;
+ hscb->residual_data_count[2] = 0;
+
+ scb->sg_count = hscb->SG_segment_count = 1;
+ scb->sg_length = sizeof(cmd->sense_buffer);
+ scb->tag_action = 0;
+ /*
+ * This problem could be caused if the target has lost power
+ * or found some other way to loose the negotiation settings,
+ * so if needed, we'll re-negotiate while doing the sense cmd.
+ * However, if this SCB already was attempting to negotiate,
+ * then we assume this isn't the problem and skip this part.
+ */
+#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+ if ( (scb->cmd->cmnd[0] != TEST_UNIT_READY) &&
+ (p->dev_flags[tindex] & DEVICE_SCANNED) &&
+ !(p->wdtr_pending & target_mask) &&
+ !(p->sdtr_pending & target_mask) )
+ {
+ p->needwdtr |= (p->needwdtr_copy & target_mask);
+ p->needsdtr |= (p->needsdtr_copy & target_mask);
+ }
+ else if ( (scb->cmd == p->dev_wdtr_cmnd[tindex]) ||
+ (scb->cmd == p->dev_sdtr_cmnd[tindex]) )
+ {
+ /*
+ * This is already a negotiation command, so we must have
+ * already done either WDTR or SDTR (or maybe both). So
+ * we simply check sdtr_pending and needsdtr to see if we
+ * should throw out SDTR on this command.
+ *
+ * Note: Don't check the needsdtr_copy here, instead just
+ * check to see if WDTR wiped out our SDTR and set needsdtr.
+ * Even if WDTR did wipe out SDTR and set needsdtr, if
+ * parse_msg() then turned around and started our SDTR
+ * in back to back fasion, then conclusion of that should
+ * have negated any needsdtr setting. That's why we only
+ * check needsdtr and sdtr_pending.
+ */
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ if ( (scb->cmd == p->dev_wdtr_cmnd[tindex]) &&
+ !(p->sdtr_pending & target_mask) &&
+ (p->needsdtr & target_mask) )
+ {
+ p->sdtr_pending |= target_mask;
+ hscb->control |= MK_MESSAGE;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ }
+
+ /*
+ * This is the important part though. We are getting sense
+ * info back from this device. It's going into a fake
+ * command. We need to put that into the real command
+ * instead so that the mid level SCSI code can act upon it.
+ * So, when we set up these fake commands, the next pointer
+ * is used to point to the real command. Use that to change
+ * the address of our sense_buffer[] to the real command.
+ * However, don't do this if the real command is also a
+ * TEST_UNIT_READY as it will most likely pull down its own
+ * SENSE information anyway.
+ */
+ if (cmd->next->cmnd[0] != TEST_UNIT_READY)
+ {
+ scb->sg_list[0].address =
+ cpu_to_le32(VIRT_TO_BUS(&cmd->next->sense_buffer[0]));
+ hscb->data_pointer = scb->sg_list[0].address;
+ }
+ }
+#else
+ if ( (scb->cmd->cmnd[0] != TEST_UNIT_READY) &&
+ !(scb->flags & SCB_MSGOUT_BITS) &&
+ (scb->cmd->lun == 0) &&
+ (p->dev_flags[TARGET_INDEX(scb->cmd)] & DEVICE_SCANNED) )
+ {
+ if ( (p->needwdtr_copy & target_mask) &&
+ !(p->wdtr_pending & target_mask) &&
+ !(p->sdtr_pending & target_mask) )
+ {
+ p->needwdtr |= target_mask;
+ p->wdtr_pending |= target_mask;
+ hscb->control |= MK_MESSAGE;
+ scb->flags |= SCB_MSGOUT_WDTR;
+ }
+ if ( p->needsdtr_copy & target_mask )
+ {
+ p->needsdtr |= target_mask;
+ if ( !(p->wdtr_pending & target_mask) &&
+ !(p->sdtr_pending & target_mask) )
+ {
+ p->sdtr_pending |= target_mask;
+ hscb->control |= MK_MESSAGE;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ }
+ }
+ }
+ else
+ scb->flags &= ~SCB_MSGOUT_BITS;
+#endif /* AIC7XXX_FAKE_NEGOTIATION_CMDS */
+ scb->flags |= SCB_SENSE;
+ /*
+ * Ensure the target is busy since this will be an
+ * an untagged request.
+ */
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ {
+ if (scb->flags & SCB_MSGOUT_BITS)
+ printk(INFO_LEAD "Requesting SENSE with %s\n", p->host_no,
+ CTL_OF_SCB(scb), (scb->flags & SCB_MSGOUT_SDTR) ?
+ "SDTR" : "WDTR");
+ else
+ printk(INFO_LEAD "Requesting SENSE, no MSG\n", p->host_no,
+ CTL_OF_SCB(scb));
+ }
+#endif
+ aic7xxx_busy_target(p, scb);
+ aic_outb(p, SEND_SENSE, RETURN_1);
+ aic7xxx_error(cmd) = DID_OK;
+ break;
+ } /* first time sense, no errors */
+ aic7xxx_error(cmd) = DID_OK;
+ scb->flags &= ~SCB_SENSE;
+ break;
+
+ case QUEUE_FULL:
+ queue_flag = TRUE; /* Mark that this is a QUEUE_FULL and */
+ case BUSY: /* drop through to here */
+ {
+ struct aic7xxx_scb *next_scbp, *prev_scbp;
+ unsigned char active_hscb, next_hscb, prev_hscb, scb_index;
+ /*
+ * We have to look three places for queued commands:
+ * 1: QINFIFO
+ * 2: p->waiting_scbs queue
+ * 3: WAITING_SCBS list on card (for commands that are started
+ * but haven't yet made it to the device)
+ */
+ aic7xxx_search_qinfifo(p, target, channel, lun,
+ SCB_LIST_NULL, 0, TRUE,
+ &p->delayed_scbs[tindex]);
+ next_scbp = p->waiting_scbs.head;
+ while ( next_scbp != NULL )
+ {
+ prev_scbp = next_scbp;
+ next_scbp = next_scbp->q_next;
+ if ( aic7xxx_match_scb(p, prev_scbp, target, channel, lun,
+ SCB_LIST_NULL) )
+ {
+ scbq_remove(&p->waiting_scbs, prev_scbp);
+ scbq_insert_tail(&p->delayed_scbs[tindex],
+ prev_scbp);
+ }
+ }
+ next_scbp = NULL;
+ active_hscb = aic_inb(p, SCBPTR);
+ prev_hscb = next_hscb = scb_index = SCB_LIST_NULL;
+ next_hscb = aic_inb(p, WAITING_SCBH);
+ while (next_hscb != SCB_LIST_NULL)
+ {
+ aic_outb(p, next_hscb, SCBPTR);
+ scb_index = aic_inb(p, SCB_TAG);
+ if (scb_index < p->scb_data->numscbs)
+ {
+ next_scbp = p->scb_data->scb_array[scb_index];
+ if (aic7xxx_match_scb(p, next_scbp, target, channel, lun,
+ SCB_LIST_NULL) )
+ {
+ if (next_scbp->flags & SCB_WAITINGQ)
+ {
+ p->dev_active_cmds[tindex]++;
+ p->activescbs--;
+ scbq_remove(&p->delayed_scbs[tindex], next_scbp);
+ scbq_remove(&p->waiting_scbs, next_scbp);
+ }
+ scbq_insert_head(&p->delayed_scbs[tindex],
+ next_scbp);
+ next_scbp->flags |= SCB_WAITINGQ;
+ p->dev_active_cmds[tindex]--;
+ p->activescbs--;
+ next_hscb = aic_inb(p, SCB_NEXT);
+ aic_outb(p, 0, SCB_CONTROL);
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic7xxx_add_curscb_to_free_list(p);
+ if (prev_hscb == SCB_LIST_NULL)
+ {
+ /* We were first on the list,
+ * so we kill the selection
+ * hardware. Let the sequencer
+ * re-init the hardware itself
+ */
+ aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ);
+ aic_outb(p, CLRSELTIMEO, CLRSINT1);
+ aic_outb(p, next_hscb, WAITING_SCBH);
+ }
+ else
+ {
+ aic_outb(p, prev_hscb, SCBPTR);
+ aic_outb(p, next_hscb, SCB_NEXT);
+ }
+ }
+ else
+ {
+ prev_hscb = next_hscb;
+ next_hscb = aic_inb(p, SCB_NEXT);
+ }
+ } /* scb_index >= p->scb_data->numscbs */
+ }
+ aic_outb(p, active_hscb, SCBPTR);
+ if (scb->flags & SCB_WAITINGQ)
+ {
+ scbq_remove(&p->delayed_scbs[tindex], scb);
+ scbq_remove(&p->waiting_scbs, scb);
+ p->dev_active_cmds[tindex]++;
+ p->activescbs++;
+ }
+ scbq_insert_head(&p->delayed_scbs[tindex], scb);
+ p->dev_active_cmds[tindex]--;
+ p->activescbs--;
+ scb->flags |= SCB_WAITINGQ | SCB_WAS_BUSY;
+
+ if ( !(p->dev_timer_active & (0x01 << tindex)) )
+ {
+ p->dev_timer_active |= (0x01 << tindex);
+ if ( p->dev_active_cmds[tindex] )
+ {
+ p->dev_expires[tindex] = jiffies + HZ;
+ }
+ else
+ {
+ p->dev_expires[tindex] = jiffies + (HZ / 10);
+ }
+ if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) )
+ {
+ p->dev_timer.expires = p->dev_expires[tindex];
+ p->dev_timer_active |= (0x01 << MAX_TARGETS);
+ add_timer(&p->dev_timer);
+ }
+ else if ( time_after_eq(p->dev_timer.expires,
+ p->dev_expires[tindex]) )
+ {
+ del_timer(&p->dev_timer);
+ p->dev_timer.expires = p->dev_expires[tindex];
+ add_timer(&p->dev_timer);
+ }
+ }
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose & VERBOSE_MINOR_ERROR)
+ {
+ if (queue_flag)
+ printk(INFO_LEAD "Queue full received; queue depth %d, "
+ "active %d\n", p->host_no, CTL_OF_SCB(scb),
+ p->dev_max_queue_depth[tindex],
+ p->dev_active_cmds[tindex]);
+ else
+ printk(INFO_LEAD "Target busy\n", p->host_no, CTL_OF_SCB(scb));
+
+ }
+#endif
+ if (queue_flag)
+ {
+ p->dev_temp_queue_depth[tindex] =
+ p->dev_active_cmds[tindex];
+ if ( p->dev_last_queue_full[tindex] !=
+ p->dev_active_cmds[tindex] )
+ {
+ p->dev_last_queue_full[tindex] =
+ p->dev_active_cmds[tindex];
+ p->dev_last_queue_full_count[tindex] = 0;
+ }
+ else
+ {
+ p->dev_last_queue_full_count[tindex]++;
+ }
+ if ( (p->dev_last_queue_full_count[tindex] > 14) &&
+ (p->dev_active_cmds[tindex] > 4) )
+ {
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ printk(INFO_LEAD "Queue depth reduced to %d\n", p->host_no,
+ CTL_OF_SCB(scb), p->dev_active_cmds[tindex]);
+ p->dev_max_queue_depth[tindex] =
+ p->dev_active_cmds[tindex];
+ p->dev_last_queue_full[tindex] = 0;
+ p->dev_last_queue_full_count[tindex] = 0;
+ }
+ else
+ {
+ p->dev_flags[tindex] |= DEVICE_WAS_BUSY;
+ }
+ }
+ break;
+ }
+
+ default:
+ if (aic7xxx_verbose & VERBOSE_SEQINT)
+ printk(INFO_LEAD "Unexpected target status 0x%x.\n", p->host_no,
+ CTL_OF_SCB(scb), scb->hscb->target_status);
+ if (!aic7xxx_error(cmd))
+ {
+ aic7xxx_error(cmd) = DID_RETRY_COMMAND;
+ }
+ break;
+ } /* end switch */
+ } /* end else of */
+ }
+ break;
+
+ case AWAITING_MSG:
+ {
+ unsigned char scb_index, msg_out;
+
+ scb_index = aic_inb(p, SCB_TAG);
+ msg_out = aic_inb(p, MSG_OUT);
+ scb = p->scb_data->scb_array[scb_index];
+ p->msg_index = p->msg_len = 0;
+ /*
+ * This SCB had a MK_MESSAGE set in its control byte informing
+ * the sequencer that we wanted to send a special message to
+ * this target.
+ */
+
+ if ( !(scb->flags & SCB_DEVICE_RESET) &&
+ (aic_inb(p, MSG_OUT) == MSG_IDENTIFYFLAG) &&
+ (scb->hscb->control & TAG_ENB) )
+ {
+ p->msg_buf[p->msg_index++] = scb->tag_action;
+ p->msg_buf[p->msg_index++] = scb->hscb->tag;
+ p->msg_len += 2;
+ }
+
+ if (scb->flags & SCB_DEVICE_RESET)
+ {
+ p->msg_buf[p->msg_index++] = MSG_BUS_DEV_RESET;
+ p->msg_len++;
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Bus device reset mailed.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+ else if (scb->flags & SCB_ABORT)
+ {
+ if (scb->tag_action)
+ {
+ p->msg_buf[p->msg_index++] = MSG_ABORT_TAG;
+ }
+ else
+ {
+ p->msg_buf[p->msg_index++] = MSG_ABORT;
+ }
+ p->msg_len++;
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "Abort message mailed.\n", p->host_no,
+ CTL_OF_SCB(scb));
+ }
+ else if (scb->flags & SCB_MSGOUT_WDTR)
+ {
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Sending WDTR message.\n", p->host_no,
+ CTL_OF_SCB(scb));
+#endif
+ aic7xxx_construct_wdtr(p,
+ p->transinfo[TARGET_INDEX(scb->cmd)].goal_width);
+ }
+ else if (scb->flags & SCB_MSGOUT_SDTR)
+ {
+ unsigned int max_sync, period;
+ /*
+ * We need to set an accurate goal_offset instead of
+ * the ridiculously high one we default to. We should
+ * now know if we are wide. Plus, the WDTR code will
+ * set our goal_offset for us as well.
+ */
+ if (p->transinfo[tindex].goal_offset)
+ {
+ if (p->features & AHC_ULTRA2)
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ else if (p->transinfo[tindex].cur_width == MSG_EXT_WDTR_BUS_16_BIT)
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ else
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ }
+ /*
+ * Now that the device is selected, use the bits in SBLKCTL and
+ * SSTAT2 to determine the max sync rate for this device.
+ */
+ if (p->features & AHC_ULTRA2)
+ {
+ if ( (aic_inb(p, SBLKCTL) & ENAB40) &&
+ !(aic_inb(p, SSTAT2) & EXP_ACTIVE) )
+ {
+ max_sync = AHC_SYNCRATE_ULTRA2;
+ }
+ else
+ {
+ max_sync = AHC_SYNCRATE_ULTRA;
+ }
+ }
+ else if (p->features & AHC_ULTRA)
+ {
+ max_sync = AHC_SYNCRATE_ULTRA;
+ }
+ else
+ {
+ max_sync = AHC_SYNCRATE_FAST;
+ }
+ period = p->transinfo[tindex].goal_period;
+ aic7xxx_find_syncrate(p, &period, max_sync);
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Sending SDTR %d/%d message.\n", p->host_no,
+ CTL_OF_SCB(scb),
+ p->transinfo[tindex].goal_period,
+ p->transinfo[tindex].goal_offset);
+#endif
+ aic7xxx_construct_sdtr(p, period,
+ p->transinfo[tindex].goal_offset);
+ }
+ else
+ {
+ sti();
+ panic("aic7xxx: AWAITING_MSG for an SCB that does "
+ "not have a waiting message.\n");
+ }
+ /*
+ * We've set everything up to send our message, now to actually do
+ * so we need to enable reqinit interrupts and let the interrupt
+ * handler do the rest. We don't want to unpause the sequencer yet
+ * though so we'll return early. We also have to make sure that
+ * we clear the SEQINT *BEFORE* we set the REQINIT handler active
+ * or else it's possible on VLB cards to loose the first REQINIT
+ * interrupt. Edge triggered EISA cards could also loose this
+ * interrupt, although PCI and level triggered cards should not
+ * have this problem since they continually interrupt the kernel
+ * until we take care of the situation.
+ */
+ scb->flags |= SCB_MSGOUT_SENT;
+ p->msg_index = 0;
+ p->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+ p->flags |= AHC_HANDLING_REQINITS;
+ aic_outb(p, aic_inb(p, SIMODE1) | ENREQINIT, SIMODE1);
+ return;
+ }
+ break;
+
+ case DATA_OVERRUN:
+ {
+ unsigned char scb_index = aic_inb(p, SCB_TAG);
+ unsigned char lastphase = aic_inb(p, LASTPHASE);
+ unsigned int i;
+
+ scb = (p->scb_data->scb_array[scb_index]);
+ /*
+ * XXX - What do we really want to do on an overrun? The
+ * mid-level SCSI code should handle this, but for now,
+ * we'll just indicate that the command should retried.
+ * If we retrieved sense info on this target, then the
+ * base SENSE info should have been saved prior to the
+ * overrun error. In that case, we return DID_OK and let
+ * the mid level code pick up on the sense info. Otherwise
+ * we return DID_ERROR so the command will get retried.
+ */
+ if ( !(scb->flags & SCB_SENSE) )
+ {
+ printk(WARN_LEAD "Data overrun detected in %s phase, tag %d;\n",
+ p->host_no, CTL_OF_SCB(scb),
+ (lastphase == P_DATAIN) ? "Data-In" : "Data-Out", scb->hscb->tag);
+ printk(KERN_WARNING " %s seen Data Phase. Length=%d, NumSGs=%d.\n",
+ (aic_inb(p, SEQ_FLAGS) & DPHASE) ? "Have" : "Haven't",
+ scb->sg_length, scb->sg_count);
+ for (i = 0; i < scb->sg_count; i++)
+ {
+ printk(KERN_WARNING " sg[%d] - Addr 0x%x : Length %d\n",
+ i,
+ le32_to_cpu(scb->sg_list[i].address),
+ le32_to_cpu(scb->sg_list[i].length) );
+ }
+ aic7xxx_error(scb->cmd) = DID_ERROR;
+ }
+ else
+ printk(INFO_LEAD "Data Overrun during SEND_SENSE operation.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+ break;
+
+#if AIC7XXX_NOT_YET
+ case TRACEPOINT:
+ {
+ printk(INFO_LEAD "Tracepoint #1 reached.\n", p->host_no, channel,
+ target, lun);
+ }
+ break;
+
+ case TRACEPOINT2:
+ {
+ printk(INFO_LEAD "Tracepoint #2 reached.\n", p->host_no, channel,
+ target, lun);
+ }
+ break;
+
+ /* XXX Fill these in later */
+ case MSG_BUFFER_BUSY:
+ printk("aic7xxx: Message buffer busy.\n");
+ break;
+ case MSGIN_PHASEMIS:
+ printk("aic7xxx: Message-in phasemis.\n");
+ break;
+#endif
+
+ default: /* unknown */
+ printk(WARN_LEAD "Unknown SEQINT, INTSTAT 0x%x, SCSISIGI 0x%x.\n",
+ p->host_no, channel, target, lun, intstat,
+ aic_inb(p, SCSISIGI));
+ break;
+ }
+
+ /*
+ * Clear the sequencer interrupt and unpause the sequencer.
+ */
+ unpause_sequencer(p, /* unpause always */ TRUE);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_parse_msg
+ *
+ * Description:
+ * Parses incoming messages into actions on behalf of
+ * aic7xxx_handle_reqinit
+ *_F*************************************************************************/
+static int
+aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ int reject, reply, done;
+ unsigned char target_scsirate, tindex;
+ unsigned short target_mask;
+ unsigned char target, channel, lun;
+
+ target = scb->cmd->target;
+ channel = scb->cmd->channel;
+ lun = scb->cmd->lun;
+ reply = reject = done = FALSE;
+ tindex = TARGET_INDEX(scb->cmd);
+ target_scsirate = aic_inb(p, TARG_SCSIRATE + tindex);
+ target_mask = (0x01 << tindex);
+
+ /*
+ * Parse as much of the message as is availible,
+ * rejecting it if we don't support it. When
+ * the entire message is availible and has been
+ * handled, return TRUE indicating that we have
+ * parsed an entire message.
+ */
+
+ if (p->msg_buf[0] != MSG_EXTENDED)
+ {
+ reject = TRUE;
+ }
+
+ /*
+ * Just accept the length byte outright and perform
+ * more checking once we know the message type.
+ */
+
+ if ( !reject && (p->msg_len > 2) )
+ {
+ switch(p->msg_buf[2])
+ {
+ case MSG_EXT_SDTR:
+ {
+ unsigned int period, offset;
+ unsigned char maxsync, saved_offset;
+ struct aic7xxx_syncrate *syncrate;
+
+ if (p->msg_buf[1] != MSG_EXT_SDTR_LEN)
+ {
+ reject = TRUE;
+ break;
+ }
+
+ if (p->msg_len < (MSG_EXT_SDTR_LEN + 2))
+ {
+ break;
+ }
+
+ period = p->msg_buf[3];
+ saved_offset = offset = p->msg_buf[4];
+
+ if (p->features & AHC_ULTRA2)
+ {
+ if ( (aic_inb(p, SBLKCTL) & ENAB40) &&
+ !(aic_inb(p, SSTAT2) & EXP_ACTIVE) )
+ {
+ maxsync = AHC_SYNCRATE_ULTRA2;
+ }
+ else
+ {
+ maxsync = AHC_SYNCRATE_ULTRA;
+ }
+ }
+ else if (p->features & AHC_ULTRA)
+ {
+ maxsync = AHC_SYNCRATE_ULTRA;
+ }
+ else
+ {
+ maxsync = AHC_SYNCRATE_FAST;
+ }
+ /*
+ * We might have a device that is starting negotiation with us
+ * before we can start up negotiation with it....be prepared to
+ * have a device ask for a higher speed then we want to give it
+ * in that case
+ */
+ if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) !=
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR) )
+ {
+ if (!(p->dev_flags[tindex] & DEVICE_SCANNED))
+ {
+ /*
+ * Not only is the device starting this up, but it also hasn't
+ * been scanned yet, so this would likely be our TUR or our
+ * INQUIRY command at scan time, so we need to use the
+ * settings from the SEEPROM if they existed. Of course, even
+ * if we didn't find a SEEPROM, we stuffed default values into
+ * the user settings anyway, so use those in all cases.
+ */
+ p->transinfo[tindex].goal_period =
+ p->transinfo[tindex].user_period;
+ p->transinfo[tindex].goal_offset =
+ p->transinfo[tindex].user_offset;
+ p->needsdtr_copy |= target_mask;
+ }
+ if ( !p->transinfo[tindex].goal_offset )
+ period = 255;
+ if ( p->transinfo[tindex].goal_period > period )
+ period = p->transinfo[tindex].goal_period;
+ }
+
+ syncrate = aic7xxx_find_syncrate(p, &period, maxsync);
+ aic7xxx_validate_offset(p, syncrate, &offset,
+ target_scsirate & WIDEXFER);
+ aic7xxx_set_syncrate(p, syncrate, target, channel, period,
+ offset, AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+
+ /*
+ * Did we drop to async? If so, are we sending a reply? If we are,
+ * then we have to make sure that the reply value reflects the proper
+ * settings so we need to set the goal values according to what
+ * we need to send.
+ */
+ if ( (offset == 0) || (offset != saved_offset) ||
+ ((scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) !=
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR) ) )
+ {
+ aic7xxx_set_syncrate(p, syncrate, target, channel, period,
+ offset, AHC_TRANS_GOAL|AHC_TRANS_QUITE);
+ if ( offset == 0 )
+ {
+ p->needsdtr_copy &= ~target_mask;
+ }
+ }
+
+ /*
+ * Did we start this, if not, or if we went to low and had to
+ * go async, then send an SDTR back to the target
+ */
+ p->needsdtr &= ~target_mask;
+ p->sdtr_pending &= ~target_mask;
+ if ( ((scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) ==
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) &&
+ (offset == saved_offset) )
+ {
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ }
+ else
+ {
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ }
+ done = TRUE;
+ break;
+ }
+ case MSG_EXT_WDTR:
+ {
+ unsigned char bus_width;
+
+ if (p->msg_buf[1] != MSG_EXT_WDTR_LEN)
+ {
+ reject = TRUE;
+ break;
+ }
+
+ if (p->msg_len < (MSG_EXT_WDTR_LEN + 2))
+ {
+ break;
+ }
+
+ bus_width = p->msg_buf[3];
+ if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_WDTR)) ==
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_WDTR) )
+ {
+ switch(bus_width)
+ {
+ default:
+ {
+ reject = TRUE;
+ if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ ((p->dev_flags[tindex] & DEVICE_PRINT_WDTR) ||
+ (aic7xxx_verbose > 0xffff)) )
+ {
+ printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n",
+ p->host_no, CTL_OF_SCB(scb), 8 * (0x01 << bus_width));
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_WDTR;
+ }
+ } /* We fall through on purpose */
+ case MSG_EXT_WDTR_BUS_8_BIT:
+ {
+ bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+ p->needwdtr_copy &= ~target_mask;
+ break;
+ }
+ case MSG_EXT_WDTR_BUS_16_BIT:
+ {
+ break;
+ }
+ }
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ p->wdtr_pending &= ~target_mask;
+ p->needwdtr &= ~target_mask;
+ }
+ else
+ {
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ scb->flags |= SCB_MSGOUT_WDTR;
+ reply = TRUE;
+ if ( !(p->dev_flags[tindex] & DEVICE_SCANNED) )
+ {
+ /*
+ * Well, we now know the WDTR and SYNC caps of this device since
+ * it contacted us first, mark it as such and copy the user stuff
+ * over to the goal stuff.
+ */
+ p->transinfo[tindex].goal_period =
+ p->transinfo[tindex].user_period;
+ p->transinfo[tindex].goal_offset =
+ p->transinfo[tindex].user_offset;
+ p->transinfo[tindex].goal_width =
+ p->transinfo[tindex].user_width;
+ p->needwdtr_copy |= target_mask;
+ p->needsdtr_copy |= target_mask;
+ }
+ switch(bus_width)
+ {
+ default:
+ {
+ if ( (p->features & AHC_WIDE) &&
+ (p->transinfo[tindex].goal_width ==
+ MSG_EXT_WDTR_BUS_16_BIT) )
+ {
+ bus_width = MSG_EXT_WDTR_BUS_16_BIT;
+ break;
+ }
+ } /* Fall through if we aren't a wide card */
+ case MSG_EXT_WDTR_BUS_8_BIT:
+ {
+ p->needwdtr_copy &= ~target_mask;
+ bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+ aic7xxx_set_width(p, target, channel, lun, bus_width,
+ AHC_TRANS_GOAL|AHC_TRANS_QUITE);
+ break;
+ }
+ }
+ p->needwdtr &= ~target_mask;
+ p->wdtr_pending &= ~target_mask;
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ }
+ aic7xxx_set_width(p, target, channel, lun, bus_width,
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+
+ /*
+ * By virtue of the SCSI spec, a WDTR message negates any existing
+ * SDTR negotiations. So, even if needsdtr isn't marked for this
+ * device, we still have to do a new SDTR message if the device
+ * supports SDTR at all. Therefore, we check needsdtr_copy instead
+ * of needstr.
+ */
+ aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0,
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
+ if ( (p->needsdtr_copy & target_mask) &&
+ !(p->sdtr_pending & target_mask))
+ {
+ p->needsdtr |= target_mask;
+ if ( !reject && !reply )
+ {
+ scb->flags &= ~SCB_MSGOUT_WDTR;
+ if (p->transinfo[tindex].goal_period)
+ {
+ p->sdtr_pending |= target_mask;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ }
+ }
+ }
+ done = TRUE;
+ break;
+ }
+ default:
+ {
+ reject = TRUE;
+ break;
+ }
+ } /* end of switch(p->msg_type) */
+ } /* end of if (!reject && (p->msg_len > 2)) */
+
+ if (reject)
+ {
+ aic_outb(p, MSG_MESSAGE_REJECT, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ done = TRUE;
+ }
+ return(done);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_handle_reqinit
+ *
+ * Description:
+ * Interrupt handler for REQINIT interrupts (used to transfer messages to
+ * and from devices).
+ *_F*************************************************************************/
+static void
+aic7xxx_handle_reqinit(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ unsigned char lastbyte;
+ unsigned char phasemis;
+ int done = FALSE;
+
+ switch(p->msg_type)
+ {
+ case MSG_TYPE_INITIATOR_MSGOUT:
+ {
+ if (p->msg_len == 0)
+ panic("aic7xxx: REQINIT with no active message!\n");
+
+ lastbyte = (p->msg_index == (p->msg_len - 1));
+ phasemis = ( aic_inb(p, SCSISIGI) & PHASE_MASK) != P_MESGOUT;
+
+ if (lastbyte || phasemis)
+ {
+ /* Time to end the message */
+ p->msg_len = 0;
+ p->msg_type = MSG_TYPE_NONE;
+ /*
+ * NOTE-TO-MYSELF: If you clear the REQINIT after you
+ * disable REQINITs, then cases of REJECT_MSG stop working
+ * and hang the bus
+ */
+ aic_outb(p, aic_inb(p, SIMODE1) & ~ENREQINIT, SIMODE1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+
+ if (phasemis == 0)
+ {
+ aic_outb(p, p->msg_buf[p->msg_index], SINDEX);
+ aic_outb(p, 0, RETURN_1);
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Completed sending of REQINIT message.\n",
+ p->host_no, CTL_OF_SCB(scb));
+#endif
+ }
+ else
+ {
+ aic_outb(p, MSGOUT_PHASEMIS, RETURN_1);
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "PHASEMIS while sending REQINIT message.\n",
+ p->host_no, CTL_OF_SCB(scb));
+#endif
+ }
+ unpause_sequencer(p, TRUE);
+ }
+ else
+ {
+ /*
+ * Present the byte on the bus (clearing REQINIT) but don't
+ * unpause the sequencer.
+ */
+ aic_outb(p, CLRREQINIT, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ aic_outb(p, p->msg_buf[p->msg_index++], SCSIDATL);
+ }
+ break;
+ }
+ case MSG_TYPE_INITIATOR_MSGIN:
+ {
+ phasemis = ( aic_inb(p, SCSISIGI) & PHASE_MASK ) != P_MESGIN;
+
+ if (phasemis == 0)
+ {
+ p->msg_len++;
+ /* Pull the byte in without acking it */
+ p->msg_buf[p->msg_index] = aic_inb(p, SCSIBUSL);
+ done = aic7xxx_parse_msg(p, scb);
+ /* Ack the byte */
+ aic_outb(p, CLRREQINIT, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ aic_inb(p, SCSIDATL);
+ p->msg_index++;
+ }
+ if (phasemis || done)
+ {
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ {
+ if (phasemis)
+ printk(INFO_LEAD "PHASEMIS while receiving REQINIT message.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ else
+ printk(INFO_LEAD "Completed receipt of REQINIT message.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+#endif
+ /* Time to end our message session */
+ p->msg_len = 0;
+ p->msg_type = MSG_TYPE_NONE;
+ aic_outb(p, aic_inb(p, SIMODE1) & ~ENREQINIT, SIMODE1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+ unpause_sequencer(p, TRUE);
+ }
+ break;
+ }
+ default:
+ {
+ panic("aic7xxx: Unknown REQINIT message type.\n");
+ break;
+ }
+ } /* End of switch(p->msg_type) */
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_handle_scsiint
+ *
+ * Description:
+ * Interrupt handler for SCSI interrupts (SCSIINT).
+ *-F*************************************************************************/
+static void
+aic7xxx_handle_scsiint(struct aic7xxx_host *p, unsigned char intstat)
+{
+ unsigned char scb_index;
+ unsigned char status;
+ struct aic7xxx_scb *scb;
+
+ scb_index = aic_inb(p, SCB_TAG);
+ status = aic_inb(p, SSTAT1);
+
+ if (scb_index < p->scb_data->numscbs)
+ {
+ scb = p->scb_data->scb_array[scb_index];
+ if ((scb->flags & SCB_ACTIVE) == 0)
+ {
+ scb = NULL;
+ }
+ }
+ else
+ {
+ scb = NULL;
+ }
+
+
+ if ((status & SCSIRSTI) != 0)
+ {
+ int channel;
+
+ if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
+ channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3;
+ else
+ channel = 0;
+
+ if (aic7xxx_verbose & VERBOSE_RESET)
+ printk(WARN_LEAD "Someone else reset the channel!!\n",
+ p->host_no, channel, -1, -1);
+ /*
+ * Go through and abort all commands for the channel, but do not
+ * reset the channel again.
+ */
+ aic7xxx_reset_channel(p, channel, /* Initiate Reset */ FALSE);
+ aic7xxx_run_done_queue(p, FALSE);
+ scb = NULL;
+ }
+ else if ( ((status & BUSFREE) != 0) && ((status & SELTO) == 0) )
+ {
+ /*
+ * First look at what phase we were last in. If it's message-out,
+ * chances are pretty good that the bus free was in response to
+ * one of our abort requests.
+ */
+ unsigned char lastphase = aic_inb(p, LASTPHASE);
+ unsigned char saved_tcl = aic_inb(p, SAVED_TCL);
+ unsigned char target = (saved_tcl >> 4) & 0x0F;
+ int channel;
+ int printerror = TRUE;
+
+ if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
+ channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3;
+ else
+ channel = 0;
+
+ aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP),
+ SCSISEQ);
+ if (lastphase == P_MESGOUT)
+ {
+ unsigned char message;
+
+ message = aic_inb(p, SINDEX);
+
+ if ((message == MSG_ABORT) || (message == MSG_ABORT_TAG))
+ {
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB %d abort delivered.\n", p->host_no,
+ CTL_OF_SCB(scb), scb->hscb->tag);
+ aic7xxx_reset_device(p, target, channel, ALL_LUNS,
+ (message == MSG_ABORT) ? SCB_LIST_NULL : scb->hscb->tag );
+ aic7xxx_run_done_queue(p, FALSE);
+ scb = NULL;
+ printerror = 0;
+ }
+ else if (message == MSG_BUS_DEV_RESET)
+ {
+ aic7xxx_handle_device_reset(p, target, channel);
+ scb = NULL;
+ printerror = 0;
+ }
+ }
+ if (printerror != 0)
+ {
+ if (scb != NULL)
+ {
+ unsigned char tag;
+
+ if ((scb->hscb->control & TAG_ENB) != 0)
+ {
+ tag = scb->hscb->tag;
+ }
+ else
+ {
+ tag = SCB_LIST_NULL;
+ }
+ aic7xxx_reset_device(p, target, channel, ALL_LUNS, tag);
+ aic7xxx_run_done_queue(p, FALSE);
+ }
+ printk(INFO_LEAD "Unexpected busfree, LASTPHASE = 0x%x, "
+ "SEQADDR = 0x%x\n", p->host_no, channel, target, -1, lastphase,
+ (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0));
+ scb = NULL;
+ }
+ aic_outb(p, MSG_NOOP, MSG_OUT);
+ aic_outb(p, aic_inb(p, SIMODE1) & ~(ENBUSFREE|ENREQINIT),
+ SIMODE1);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+ aic_outb(p, CLRBUSFREE, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ restart_sequencer(p);
+ unpause_sequencer(p, TRUE);
+ }
+ else if ((status & SELTO) != 0)
+ {
+ unsigned char scbptr;
+ unsigned char nextscb;
+ Scsi_Cmnd *cmd;
+
+ scbptr = aic_inb(p, WAITING_SCBH);
+ if (scbptr > p->scb_data->maxhscbs)
+ {
+ /*
+ * I'm still trying to track down exactly how this happens, but until
+ * I find it, this code will make sure we aren't passing bogus values
+ * into the SCBPTR register, even if that register will just wrap
+ * things around, we still don't like having out of range variables.
+ *
+ * NOTE: Don't check the aic7xxx_verbose variable, I want this message
+ * to always be displayed.
+ */
+ printk(INFO_LEAD "Invalid WAITING_SCBH value %d, improvising.\n",
+ p->host_no, -1, -1, -1, scbptr);
+ if (p->scb_data->maxhscbs > 4)
+ scbptr &= (p->scb_data->maxhscbs - 1);
+ else
+ scbptr &= 0x03;
+ }
+ aic_outb(p, scbptr, SCBPTR);
+ scb_index = aic_inb(p, SCB_TAG);
+
+ scb = NULL;
+ if (scb_index < p->scb_data->numscbs)
+ {
+ scb = p->scb_data->scb_array[scb_index];
+ if ((scb->flags & SCB_ACTIVE) == 0)
+ {
+ scb = NULL;
+ }
+ }
+ if (scb == NULL)
+ {
+ printk(WARN_LEAD "Referenced SCB %d not valid during SELTO.\n",
+ p->host_no, -1, -1, -1, scb_index);
+ printk(KERN_WARNING " SCSISEQ = 0x%x SEQADDR = 0x%x SSTAT0 = 0x%x "
+ "SSTAT1 = 0x%x\n", aic_inb(p, SCSISEQ),
+ aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8),
+ aic_inb(p, SSTAT0), aic_inb(p, SSTAT1));
+ if (aic7xxx_panic_on_abort)
+ aic7xxx_panic_abort(p, NULL);
+ }
+ else
+ {
+ cmd = scb->cmd;
+ cmd->result = (DID_TIME_OUT << 16);
+
+ /*
+ * Clear out this hardware SCB
+ */
+ aic_outb(p, 0, SCB_CONTROL);
+
+ /*
+ * Clear out a few values in the card that are in an undetermined
+ * state.
+ */
+ aic_outb(p, MSG_NOOP, MSG_OUT);
+
+ /*
+ * Shift the waiting for selection queue forward
+ */
+ nextscb = aic_inb(p, SCB_NEXT);
+ aic_outb(p, nextscb, WAITING_SCBH);
+
+ /*
+ * Put this SCB back on the free list.
+ */
+ aic7xxx_add_curscb_to_free_list(p);
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Selection Timeout.\n", p->host_no, CTL_OF_SCB(scb));
+#endif
+ if (scb->flags & SCB_QUEUED_ABORT)
+ {
+ /*
+ * We know that this particular SCB had to be the queued abort since
+ * the disconnected SCB would have gotten a reconnect instead.
+ * What we need to do then is to let the command timeout again so
+ * we get a reset since this abort just failed.
+ */
+ cmd->result = 0;
+ scb = NULL;
+ }
+ }
+ /*
+ * Restarting the sequencer will stop the selection and make sure devices
+ * are allowed to reselect in.
+ */
+ aic_outb(p, 0, SCSISEQ);
+ aic_outb(p, aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE), SIMODE1);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+ aic_outb(p, CLRSELTIMEO | CLRBUSFREE, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ restart_sequencer(p);
+ unpause_sequencer(p, TRUE);
+ }
+ else if (scb == NULL)
+ {
+ printk(WARN_LEAD "aic7xxx_isr - referenced scb not valid "
+ "during scsiint 0x%x scb(%d)\n"
+ " SIMODE0 0x%x, SIMODE1 0x%x, SSTAT0 0x%x, SEQADDR 0x%x\n",
+ p->host_no, -1, -1, -1, status, scb_index, aic_inb(p, SIMODE0),
+ aic_inb(p, SIMODE1), aic_inb(p, SSTAT0),
+ (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0));
+ /*
+ * Turn off the interrupt and set status to zero, so that it
+ * falls through the rest of the SCSIINT code.
+ */
+ aic_outb(p, status, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ unpause_sequencer(p, /* unpause always */ TRUE);
+ scb = NULL;
+ }
+ else if (status & SCSIPERR)
+ {
+ /*
+ * Determine the bus phase and queue an appropriate message.
+ */
+ char *phase;
+ Scsi_Cmnd *cmd;
+ unsigned char mesg_out = MSG_NOOP;
+ unsigned char lastphase = aic_inb(p, LASTPHASE);
+
+ cmd = scb->cmd;
+ switch (lastphase)
+ {
+ case P_DATAOUT:
+ phase = "Data-Out";
+ break;
+ case P_DATAIN:
+ phase = "Data-In";
+ mesg_out = MSG_INITIATOR_DET_ERR;
+ break;
+ case P_COMMAND:
+ phase = "Command";
+ break;
+ case P_MESGOUT:
+ phase = "Message-Out";
+ break;
+ case P_STATUS:
+ phase = "Status";
+ mesg_out = MSG_INITIATOR_DET_ERR;
+ break;
+ case P_MESGIN:
+ phase = "Message-In";
+ mesg_out = MSG_PARITY_ERROR;
+ break;
+ default:
+ phase = "unknown";
+ break;
+ }
+
+ /*
+ * A parity error has occurred during a data
+ * transfer phase. Flag it and continue.
+ */
+ printk(WARN_LEAD "Parity error during %s phase.\n",
+ p->host_no, CTL_OF_SCB(scb), phase);
+
+ /*
+ * We've set the hardware to assert ATN if we get a parity
+ * error on "in" phases, so all we need to do is stuff the
+ * message buffer with the appropriate message. "In" phases
+ * have set mesg_out to something other than MSG_NOP.
+ */
+ if (mesg_out != MSG_NOOP)
+ {
+ aic_outb(p, mesg_out, MSG_OUT);
+ scb = NULL;
+ }
+ aic_outb(p, CLRSCSIPERR, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ unpause_sequencer(p, /* unpause_always */ TRUE);
+ }
+ else if ( (status & REQINIT) &&
+ (p->flags & AHC_HANDLING_REQINITS) )
+ {
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Handling REQINIT, SSTAT1=0x%x.\n", p->host_no,
+ CTL_OF_SCB(scb), aic_inb(p, SSTAT1));
+#endif
+ aic7xxx_handle_reqinit(p, scb);
+ return;
+ }
+ else
+ {
+ /*
+ * We don't know what's going on. Turn off the
+ * interrupt source and try to continue.
+ */
+ if (aic7xxx_verbose & VERBOSE_SCSIINT)
+ printk(INFO_LEAD "Unknown SCSIINT status, SSTAT1(0x%x).\n",
+ p->host_no, -1, -1, -1, status);
+ aic_outb(p, status, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ unpause_sequencer(p, /* unpause always */ TRUE);
+ scb = NULL;
+ }
+ if (scb != NULL)
+ {
+ aic7xxx_done(p, scb);
+ }
+}
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+static void
+aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer)
+{
+ unsigned char saved_scbptr, free_scbh, dis_scbh, wait_scbh, temp;
+ int i, bogus, lost;
+ static unsigned char scb_status[AIC7XXX_MAXSCB];
+
+#define SCB_NO_LIST 0
+#define SCB_FREE_LIST 1
+#define SCB_WAITING_LIST 2
+#define SCB_DISCONNECTED_LIST 4
+#define SCB_CURRENTLY_ACTIVE 8
+
+ /*
+ * Note, these checks will fail on a regular basis once the machine moves
+ * beyond the bus scan phase. The problem is race conditions concerning
+ * the scbs and where they are linked in. When you have 30 or so commands
+ * outstanding on the bus, and run this twice with every interrupt, the
+ * chances get pretty good that you'll catch the sequencer with an SCB
+ * only partially linked in. Therefore, once we pass the scan phase
+ * of the bus, we really should disable this function.
+ */
+ bogus = FALSE;
+ memset(&scb_status[0], 0, sizeof(scb_status));
+ pause_sequencer(p);
+ saved_scbptr = aic_inb(p, SCBPTR);
+ if (saved_scbptr >= p->scb_data->maxhscbs)
+ {
+ printk("Bogus SCBPTR %d\n", saved_scbptr);
+ bogus = TRUE;
+ }
+ scb_status[saved_scbptr] = SCB_CURRENTLY_ACTIVE;
+ free_scbh = aic_inb(p, FREE_SCBH);
+ if ( (free_scbh != SCB_LIST_NULL) &&
+ (free_scbh >= p->scb_data->maxhscbs) )
+ {
+ printk("Bogus FREE_SCBH %d\n", free_scbh);
+ bogus = TRUE;
+ }
+ else
+ {
+ temp = free_scbh;
+ while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) )
+ {
+ if(scb_status[temp] & 0x07)
+ {
+ printk("HSCB %d on multiple lists, status 0x%02x", temp,
+ scb_status[temp] | SCB_FREE_LIST);
+ bogus = TRUE;
+ }
+ scb_status[temp] |= SCB_FREE_LIST;
+ aic_outb(p, temp, SCBPTR);
+ temp = aic_inb(p, SCB_NEXT);
+ }
+ }
+
+ dis_scbh = aic_inb(p, DISCONNECTED_SCBH);
+ if ( (dis_scbh != SCB_LIST_NULL) &&
+ (dis_scbh >= p->scb_data->maxhscbs) )
+ {
+ printk("Bogus DISCONNECTED_SCBH %d\n", dis_scbh);
+ bogus = TRUE;
+ }
+ else
+ {
+ temp = dis_scbh;
+ while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) )
+ {
+ if(scb_status[temp] & 0x07)
+ {
+ printk("HSCB %d on multiple lists, status 0x%02x", temp,
+ scb_status[temp] | SCB_DISCONNECTED_LIST);
+ bogus = TRUE;
+ }
+ scb_status[temp] |= SCB_DISCONNECTED_LIST;
+ aic_outb(p, temp, SCBPTR);
+ temp = aic_inb(p, SCB_NEXT);
+ }
+ }
+
+ wait_scbh = aic_inb(p, WAITING_SCBH);
+ if ( (wait_scbh != SCB_LIST_NULL) &&
+ (wait_scbh >= p->scb_data->maxhscbs) )
+ {
+ printk("Bogus WAITING_SCBH %d\n", wait_scbh);
+ bogus = TRUE;
+ }
+ else
+ {
+ temp = wait_scbh;
+ while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) )
+ {
+ if(scb_status[temp] & 0x07)
+ {
+ printk("HSCB %d on multiple lists, status 0x%02x", temp,
+ scb_status[temp] | SCB_WAITING_LIST);
+ bogus = TRUE;
+ }
+ scb_status[temp] |= SCB_WAITING_LIST;
+ aic_outb(p, temp, SCBPTR);
+ temp = aic_inb(p, SCB_NEXT);
+ }
+ }
+
+ lost=0;
+ for(i=0; i < p->scb_data->maxhscbs; i++)
+ {
+ aic_outb(p, i, SCBPTR);
+ temp = aic_inb(p, SCB_NEXT);
+ if ( ((temp != SCB_LIST_NULL) &&
+ (temp >= p->scb_data->maxhscbs)) )
+ {
+ printk("HSCB %d bad, SCB_NEXT invalid(%d).\n", i, temp);
+ bogus = TRUE;
+ }
+ if ( temp == i )
+ {
+ printk("HSCB %d bad, SCB_NEXT points to self.\n", i);
+ bogus = TRUE;
+ }
+ temp = aic_inb(p, SCB_PREV);
+ if ((temp != SCB_LIST_NULL) &&
+ (temp >= p->scb_data->maxhscbs))
+ {
+ printk("HSCB %d bad, SCB_PREV invalid(%d).\n", i, temp);
+ bogus = TRUE;
+ }
+ if (scb_status[i] == 0)
+ lost++;
+ if (lost > 1)
+ {
+ printk("Too many lost scbs.\n");
+ bogus=TRUE;
+ }
+ }
+ aic_outb(p, saved_scbptr, SCBPTR);
+ unpause_sequencer(p, FALSE);
+ if (bogus)
+ {
+ printk("Bogus parameters found in card SCB array structures.\n");
+ printk("%s\n", buffer);
+ aic7xxx_panic_abort(p, NULL);
+ }
+ return;
+}
+#endif
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_isr
+ *
+ * Description:
+ * SCSI controller interrupt handler.
+ *-F*************************************************************************/
+static void
+aic7xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct aic7xxx_host *p;
+ unsigned char intstat;
+
+ p = (struct aic7xxx_host *)dev_id;
+
+ /*
+ * Just a few sanity checks. Make sure that we have an int pending.
+ * Also, if PCI, then we are going to check for a PCI bus error status
+ * should we get too many spurious interrupts.
+ */
+ if (!((intstat = aic_inb(p, INTSTAT)) & INT_PEND))
+ {
+#ifdef CONFIG_PCI
+ if ( (p->chip & AHC_PCI) && (p->spurious_int > 500) &&
+ !(p->flags & AHC_HANDLING_REQINITS) )
+ {
+ if ( aic_inb(p, ERROR) & PCIERRSTAT )
+ {
+ aic7xxx_pci_intr(p);
+ }
+ p->spurious_int = 0;
+ }
+ else if ( !(p->flags & AHC_HANDLING_REQINITS) )
+ {
+ p->spurious_int++;
+ }
+#endif
+ return;
+ }
+
+ p->spurious_int = 0;
+
+ /*
+ * Keep track of interrupts for /proc/scsi
+ */
+ p->isr_count++;
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if ( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) &&
+ (aic7xxx_panic_on_abort) && (p->flags & AHC_PAGESCBS) )
+ aic7xxx_check_scbs(p, "Bogus settings at start of interrupt.");
+#endif
+
+ /*
+ * Handle all the interrupt sources - especially for SCSI
+ * interrupts, we won't get a second chance at them.
+ */
+ if (intstat & CMDCMPLT)
+ {
+ struct aic7xxx_scb *scb = NULL;
+ Scsi_Cmnd *cmd;
+ unsigned char scb_index;
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if(aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Command Complete Int.\n", p->host_no, -1, -1, -1);
+#endif
+
+ /*
+ * Clear interrupt status before running the completion loop.
+ * This eliminates a race condition whereby a command could
+ * complete between the last check of qoutfifo and the
+ * CLRCMDINT statement. This would result in us thinking the
+ * qoutfifo was empty when it wasn't, and in actuality be a lost
+ * completion interrupt. With multiple devices or tagged queueing
+ * this could be very bad if we caught all but the last completion
+ * and no more are imediately sent.
+ */
+ aic_outb(p, CLRCMDINT, CLRINT);
+ /*
+ * The sequencer will continue running when it
+ * issues this interrupt. There may be >1 commands
+ * finished, so loop until we've processed them all.
+ */
+
+ while (p->qoutfifo[p->qoutfifonext] != SCB_LIST_NULL)
+ {
+ scb_index = p->qoutfifo[p->qoutfifonext];
+ p->qoutfifo[p->qoutfifonext++] = SCB_LIST_NULL;
+ if ( scb_index >= p->scb_data->numscbs )
+ scb = NULL;
+ else
+ scb = p->scb_data->scb_array[scb_index];
+ if (scb == NULL)
+ {
+ printk(WARN_LEAD "CMDCMPLT with invalid SCB index %d\n", p->host_no,
+ -1, -1, -1, scb_index);
+ continue;
+ }
+ else if (!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk(WARN_LEAD "CMDCMPLT without command for SCB %d, SCB flags "
+ "0x%x, cmd 0x%lx\n", p->host_no, -1, -1, -1, scb_index, scb->flags,
+ (unsigned long) scb->cmd);
+ continue;
+ }
+ else if (scb->flags & SCB_QUEUED_ABORT)
+ {
+ pause_sequencer(p);
+ if ( ((aic_inb(p, LASTPHASE) & PHASE_MASK) != P_BUSFREE) &&
+ (aic_inb(p, SCB_TAG) == scb->hscb->tag) )
+ {
+ unpause_sequencer(p, FALSE);
+ continue;
+ }
+ aic7xxx_reset_device(p, scb->cmd->target, scb->cmd->channel,
+ scb->cmd->lun, scb->hscb->tag);
+ scb->flags &= ~(SCB_QUEUED_FOR_DONE | SCB_RESET | SCB_ABORT |
+ SCB_QUEUED_ABORT);
+ unpause_sequencer(p, FALSE);
+ }
+ else if (scb->flags & SCB_ABORT)
+ {
+ /*
+ * We started to abort this, but it completed on us, let it
+ * through as successful
+ */
+ scb->flags &= ~(SCB_ABORT|SCB_RESET);
+ }
+ switch (status_byte(scb->hscb->target_status))
+ {
+ case QUEUE_FULL:
+ case BUSY:
+ scb->hscb->target_status = 0;
+ scb->cmd->result = 0;
+ aic7xxx_error(scb->cmd) = DID_OK;
+ break;
+ default:
+ cmd = scb->cmd;
+ if (scb->hscb->residual_SG_segment_count != 0)
+ {
+ aic7xxx_calculate_residual(p, scb);
+ }
+ cmd->result |= (aic7xxx_error(cmd) << 16);
+ aic7xxx_done(p, scb);
+ break;
+ }
+ }
+ }
+
+ if (intstat & BRKADRINT)
+ {
+ int i;
+ unsigned char errno = aic_inb(p, ERROR);
+
+ printk(KERN_ERR "(scsi%d) BRKADRINT error(0x%x):\n", p->host_no, errno);
+ for (i = 0; i < NUMBER(hard_error); i++)
+ {
+ if (errno & hard_error[i].errno)
+ {
+ printk(KERN_ERR " %s\n", hard_error[i].errmesg);
+ }
+ }
+ printk(KERN_ERR "(scsi%d) SEQADDR=0x%x\n", p->host_no,
+ (((aic_inb(p, SEQADDR1) << 8) & 0x100) | aic_inb(p, SEQADDR0)));
+ if (aic7xxx_panic_on_abort)
+ aic7xxx_panic_abort(p, NULL);
+#ifdef CONFIG_PCI
+ if (errno & PCIERRSTAT)
+ aic7xxx_pci_intr(p);
+#endif
+ if (errno & (SQPARERR | ILLOPCODE | ILLSADDR))
+ {
+ sti();
+ panic("aic7xxx: unrecoverable BRKADRINT.\n");
+ }
+ if (errno & ILLHADDR)
+ {
+ printk(KERN_ERR "(scsi%d) BUG! Driver accessed chip without first "
+ "pausing controller!\n", p->host_no);
+ }
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (errno & DPARERR)
+ {
+ if (aic_inb(p, DMAPARAMS) & DIRECTION)
+ printk("(scsi%d) while DMAing SCB from host to card.\n", p->host_no);
+ else
+ printk("(scsi%d) while DMAing SCB from card to host.\n", p->host_no);
+ }
+#endif
+ aic_outb(p, CLRPARERR | CLRBRKADRINT, CLRINT);
+ unpause_sequencer(p, FALSE);
+ }
+
+ if (intstat & SEQINT)
+ {
+ aic7xxx_handle_seqint(p, intstat);
+ }
+
+ if (intstat & SCSIINT)
+ {
+ aic7xxx_handle_scsiint(p, intstat);
+ }
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if ( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) &&
+ (aic7xxx_panic_on_abort) && (p->flags & AHC_PAGESCBS) )
+ aic7xxx_check_scbs(p, "Bogus settings at end of interrupt.");
+#endif
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * do_aic7xxx_isr
+ *
+ * Description:
+ * This is a gross hack to solve a problem in linux kernels 2.1.85 and
+ * above. Please, children, do not try this at home, and if you ever see
+ * anything like it, please inform the Gross Hack Police immediately
+ *-F*************************************************************************/
+static void
+do_aic7xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned long cpu_flags;
+ struct aic7xxx_host *p;
+
+ p = (struct aic7xxx_host *)dev_id;
+ if(!p)
+ return;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,95)
+ spin_lock_irqsave(&io_request_lock, cpu_flags);
+ if(test_and_set_bit(AHC_IN_ISR_BIT, &p->flags))
+ {
+ return;
+ }
+ do
+ {
+ aic7xxx_isr(irq, dev_id, regs);
+ } while ( (aic_inb(p, INTSTAT) & INT_PEND) );
+ aic7xxx_done_cmds_complete(p);
+ aic7xxx_run_waiting_queues(p);
+ clear_bit(AHC_IN_ISR_BIT, &p->flags);
+ spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+#else
+ if(set_bit(AHC_IN_ISR_BIT, (int *)&p->flags))
+ {
+ return;
+ }
+ DRIVER_LOCK
+ do
+ {
+ aic7xxx_isr(irq, dev_id, regs);
+ } while ( (aic_inb(p, INTSTAT) & INT_PEND) );
+ DRIVER_UNLOCK
+ aic7xxx_done_cmds_complete(p);
+ aic7xxx_run_waiting_queues(p);
+ clear_bit(AHC_IN_ISR_BIT, (int *)&p->flags);
+#endif
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_device_queue_depth
+ *
+ * Description:
+ * Determines the queue depth for a given device. There are two ways
+ * a queue depth can be obtained for a tagged queueing device. One
+ * way is the default queue depth which is determined by whether
+ * AIC7XXX_CMDS_PER_DEVICE is defined. If it is defined, then it is used
+ * as the default queue depth. Otherwise, we use either 4 or 8 as the
+ * default queue depth (dependent on the number of hardware SCBs).
+ * The other way we determine queue depth is through the use of the
+ * aic7xxx_tag_info array which is enabled by defining
+ * AIC7XXX_TAGGED_QUEUEING_BY_DEVICE. This array can be initialized
+ * with queue depths for individual devices. It also allows tagged
+ * queueing to be [en|dis]abled for a specific adapter.
+ *-F*************************************************************************/
+static void
+aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
+{
+ int default_depth = 3;
+ unsigned char tindex;
+ unsigned short target_mask;
+
+ tindex = device->id | (device->channel << 3);
+ target_mask = (1 << tindex);
+
+ device->queue_depth = default_depth;
+ p->dev_temp_queue_depth[tindex] = 1;
+ p->dev_max_queue_depth[tindex] = 1;
+ p->tagenable &= ~target_mask;
+
+ if (device->tagged_supported)
+ {
+ int tag_enabled = TRUE;
+
+ default_depth = AIC7XXX_CMDS_PER_DEVICE;
+
+ if (!(p->discenable & target_mask))
+ {
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ printk(INFO_LEAD "Disconnection disabled, unable to "
+ "enable tagged queueing.\n",
+ p->host_no, device->channel, device->id, device->lun);
+ }
+ else
+ {
+ if (p->instance >= NUMBER(aic7xxx_tag_info))
+ {
+ static int print_warning = TRUE;
+ if(print_warning)
+ {
+ printk(KERN_INFO "aic7xxx: WARNING, insufficient tag_info instances for"
+ " installed controllers.\n");
+ printk(KERN_INFO "aic7xxx: Please update the aic7xxx_tag_info array in"
+ " the aic7xxx.c source file.\n");
+ print_warning = FALSE;
+ }
+ device->queue_depth = default_depth;
+ }
+ else
+ {
+
+ if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 255)
+ {
+ tag_enabled = FALSE;
+ device->queue_depth = 3; /* Tagged queueing is disabled. */
+ }
+ else if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 0)
+ {
+ device->queue_depth = default_depth;
+ }
+ else
+ {
+ device->queue_depth =
+ aic7xxx_tag_info[p->instance].tag_commands[tindex];
+ }
+ }
+ if ((device->tagged_queue == 0) && tag_enabled)
+ {
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
+ printk(INFO_LEAD "Enabled tagged queuing, queue depth %d.\n",
+ p->host_no, device->channel, device->id,
+ device->lun, device->queue_depth);
+ }
+ p->dev_max_queue_depth[tindex] = device->queue_depth;
+ p->dev_temp_queue_depth[tindex] = device->queue_depth;
+ p->tagenable |= target_mask;
+ p->orderedtag |= target_mask;
+ device->tagged_queue = 1;
+ device->current_tag = SCB_LIST_NULL;
+ }
+ }
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_select_queue_depth
+ *
+ * Description:
+ * Sets the queue depth for each SCSI device hanging off the input
+ * host adapter. We use a queue depth of 2 for devices that do not
+ * support tagged queueing. If AIC7XXX_CMDS_PER_LUN is defined, we
+ * use that for tagged queueing devices; otherwise we use our own
+ * algorithm for determining the queue depth based on the maximum
+ * SCBs for the controller.
+ *-F*************************************************************************/
+static void
+aic7xxx_select_queue_depth(struct Scsi_Host *host,
+ Scsi_Device *scsi_devs)
+{
+ Scsi_Device *device;
+ struct aic7xxx_host *p = (struct aic7xxx_host *) host->hostdata;
+ int scbnum;
+
+ scbnum = 0;
+ for (device = scsi_devs; device != NULL; device = device->next)
+ {
+ if (device->host == host)
+ {
+ aic7xxx_device_queue_depth(p, device);
+ scbnum += device->queue_depth;
+ }
+ }
+ while (scbnum > p->scb_data->numscbs)
+ {
+ /*
+ * Pre-allocate the needed SCBs to get around the possibility of having
+ * to allocate some when memory is more or less exhausted and we need
+ * the SCB in order to perform a swap operation (possible deadlock)
+ */
+ if ( aic7xxx_allocate_scb(p) == 0 )
+ return;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_probe
+ *
+ * Description:
+ * Probing for EISA boards: it looks like the first two bytes
+ * are a manufacturer code - three characters, five bits each:
+ *
+ * BYTE 0 BYTE 1 BYTE 2 BYTE 3
+ * ?1111122 22233333 PPPPPPPP RRRRRRRR
+ *
+ * The characters are baselined off ASCII '@', so add that value
+ * to each to get the real ASCII code for it. The next two bytes
+ * appear to be a product and revision number, probably vendor-
+ * specific. This is what is being searched for at each port,
+ * and what should probably correspond to the ID= field in the
+ * ECU's .cfg file for the card - if your card is not detected,
+ * make sure your signature is listed in the array.
+ *
+ * The fourth byte's lowest bit seems to be an enabled/disabled
+ * flag (rest of the bits are reserved?).
+ *
+ * NOTE: This function is only needed on Intel and Alpha platforms,
+ * the other platforms we support don't have EISA/VLB busses. So,
+ * we #ifdef this entire function to avoid compiler warnings about
+ * an unused function.
+ *-F*************************************************************************/
+#if defined(__i386__) || defined(__alpha__)
+static int
+aic7xxx_probe(int slot, int base, ahc_flag_type *flags)
+{
+ int i;
+ unsigned char buf[4];
+
+ static struct {
+ int n;
+ unsigned char signature[sizeof(buf)];
+ ahc_chip type;
+ int bios_disabled;
+ } AIC7xxx[] = {
+ { 4, { 0x04, 0x90, 0x77, 0x70 },
+ AHC_AIC7770|AHC_EISA, FALSE }, /* mb 7770 */
+ { 4, { 0x04, 0x90, 0x77, 0x71 },
+ AHC_AIC7770|AHC_EISA, FALSE }, /* host adapter 274x */
+ { 4, { 0x04, 0x90, 0x77, 0x56 },
+ AHC_AIC7770|AHC_VL, FALSE }, /* 284x BIOS enabled */
+ { 4, { 0x04, 0x90, 0x77, 0x57 },
+ AHC_AIC7770|AHC_VL, TRUE } /* 284x BIOS disabled */
+ };
+
+ /*
+ * The VL-bus cards need to be primed by
+ * writing before a signature check.
+ */
+ for (i = 0; i < sizeof(buf); i++)
+ {
+ outb(0x80 + i, base);
+ buf[i] = inb(base + i);
+ }
+
+ for (i = 0; i < NUMBER(AIC7xxx); i++)
+ {
+ /*
+ * Signature match on enabled card?
+ */
+ if (!memcmp(buf, AIC7xxx[i].signature, AIC7xxx[i].n))
+ {
+ if (inb(base + 4) & 1)
+ {
+ if (AIC7xxx[i].bios_disabled)
+ {
+ *flags |= AHC_USEDEFAULTS;
+ }
+ else
+ {
+ *flags |= AHC_BIOS_ENABLED;
+ }
+ return (i);
+ }
+
+ printk("aic7xxx: <Adaptec 7770 SCSI Host Adapter> "
+ "disabled at slot %d, ignored.\n", slot);
+ }
+ }
+
+ return (-1);
+}
+#endif /* (__i386__) || (__alpha__) */
+
+
+/*+F*************************************************************************
+ * Function:
+ * read_2840_seeprom
+ *
+ * Description:
+ * Reads the 2840 serial EEPROM and returns 1 if successful and 0 if
+ * not successful.
+ *
+ * See read_seeprom (for the 2940) for the instruction set of the 93C46
+ * chip.
+ *
+ * The 2840 interface to the 93C46 serial EEPROM is through the
+ * STATUS_2840 and SEECTL_2840 registers. The CS_2840, CK_2840, and
+ * DO_2840 bits of the SEECTL_2840 register are connected to the chip
+ * select, clock, and data out lines respectively of the serial EEPROM.
+ * The DI_2840 bit of the STATUS_2840 is connected to the data in line
+ * of the serial EEPROM. The EEPROM_TF bit of STATUS_2840 register is
+ * useful in that it gives us an 800 nsec timer. After a read from the
+ * SEECTL_2840 register the timing flag is cleared and goes high 800 nsec
+ * later.
+ *-F*************************************************************************/
+static int
+read_284x_seeprom(struct aic7xxx_host *p, struct seeprom_config *sc)
+{
+ int i = 0, k = 0;
+ unsigned char temp;
+ unsigned short checksum = 0;
+ unsigned short *seeprom = (unsigned short *) sc;
+ struct seeprom_cmd {
+ unsigned char len;
+ unsigned char bits[3];
+ };
+ struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
+
+#define CLOCK_PULSE(p) \
+ while ((aic_inb(p, STATUS_2840) & EEPROM_TF) == 0) \
+ { \
+ ; /* Do nothing */ \
+ } \
+ (void) aic_inb(p, SEECTL_2840);
+
+ /*
+ * Read the first 32 registers of the seeprom. For the 2840,
+ * the 93C46 SEEPROM is a 1024-bit device with 64 16-bit registers
+ * but only the first 32 are used by Adaptec BIOS. The loop
+ * will range from 0 to 31.
+ */
+ for (k = 0; k < (sizeof(*sc) / 2); k++)
+ {
+ /*
+ * Send chip select for one clock cycle.
+ */
+ aic_outb(p, CK_2840 | CS_2840, SEECTL_2840);
+ CLOCK_PULSE(p);
+
+ /*
+ * Now we're ready to send the read command followed by the
+ * address of the 16-bit register we want to read.
+ */
+ for (i = 0; i < seeprom_read.len; i++)
+ {
+ temp = CS_2840 | seeprom_read.bits[i];
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ temp = temp ^ CK_2840;
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ }
+ /*
+ * Send the 6 bit address (MSB first, LSB last).
+ */
+ for (i = 5; i >= 0; i--)
+ {
+ temp = k;
+ temp = (temp >> i) & 1; /* Mask out all but lower bit. */
+ temp = CS_2840 | temp;
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ temp = temp ^ CK_2840;
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ }
+
+ /*
+ * Now read the 16 bit register. An initial 0 precedes the
+ * register contents which begins with bit 15 (MSB) and ends
+ * with bit 0 (LSB). The initial 0 will be shifted off the
+ * top of our word as we let the loop run from 0 to 16.
+ */
+ for (i = 0; i <= 16; i++)
+ {
+ temp = CS_2840;
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ temp = temp ^ CK_2840;
+ seeprom[k] = (seeprom[k] << 1) | (aic_inb(p, STATUS_2840) & DI_2840);
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ }
+ /*
+ * The serial EEPROM has a checksum in the last word. Keep a
+ * running checksum for all words read except for the last
+ * word. We'll verify the checksum after all words have been
+ * read.
+ */
+ if (k < (sizeof(*sc) / 2) - 1)
+ {
+ checksum = checksum + seeprom[k];
+ }
+
+ /*
+ * Reset the chip select for the next command cycle.
+ */
+ aic_outb(p, 0, SEECTL_2840);
+ CLOCK_PULSE(p);
+ aic_outb(p, CK_2840, SEECTL_2840);
+ CLOCK_PULSE(p);
+ aic_outb(p, 0, SEECTL_2840);
+ CLOCK_PULSE(p);
+ }
+
+#if 0
+ printk("Computed checksum 0x%x, checksum read 0x%x\n", checksum, sc->checksum);
+ printk("Serial EEPROM:");
+ for (k = 0; k < (sizeof(*sc) / 2); k++)
+ {
+ if (((k % 8) == 0) && (k != 0))
+ {
+ printk("\n ");
+ }
+ printk(" 0x%x", seeprom[k]);
+ }
+ printk("\n");
+#endif
+
+ if (checksum != sc->checksum)
+ {
+ printk("aic7xxx: SEEPROM checksum error, ignoring SEEPROM settings.\n");
+ return (0);
+ }
+
+ return (1);
+#undef CLOCK_PULSE
+}
+
+/*+F*************************************************************************
+ * Function:
+ * acquire_seeprom
+ *
+ * Description:
+ * Acquires access to the memory port on PCI controllers.
+ *-F*************************************************************************/
+static int
+acquire_seeprom(struct aic7xxx_host *p)
+{
+ int wait;
+
+ /*
+ * Request access of the memory port. When access is
+ * granted, SEERDY will go high. We use a 1 second
+ * timeout which should be near 1 second more than
+ * is needed. Reason: after the 7870 chip reset, there
+ * should be no contention.
+ */
+ aic_outb(p, SEEMS, SEECTL);
+ wait = 1000; /* 1000 msec = 1 second */
+ while ((wait > 0) && ((aic_inb(p, SEECTL) & SEERDY) == 0))
+ {
+ wait--;
+ mdelay(1); /* 1 msec */
+ }
+ if ((aic_inb(p, SEECTL) & SEERDY) == 0)
+ {
+ aic_outb(p, 0, SEECTL);
+ return (0);
+ }
+ return (1);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * release_seeprom
+ *
+ * Description:
+ * Releases access to the memory port on PCI controllers.
+ *-F*************************************************************************/
+static void
+release_seeprom(struct aic7xxx_host *p)
+{
+ aic_outb(p, 0, SEECTL);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * read_seeprom
+ *
+ * Description:
+ * Reads the serial EEPROM and returns 1 if successful and 0 if
+ * not successful.
+ *
+ * The instruction set of the 93C46/56/66 chips is as follows:
+ *
+ * Start OP
+ * Function Bit Code Address Data Description
+ * -------------------------------------------------------------------
+ * READ 1 10 A5 - A0 Reads data stored in memory,
+ * starting at specified address
+ * EWEN 1 00 11XXXX Write enable must precede
+ * all programming modes
+ * ERASE 1 11 A5 - A0 Erase register A5A4A3A2A1A0
+ * WRITE 1 01 A5 - A0 D15 - D0 Writes register
+ * ERAL 1 00 10XXXX Erase all registers
+ * WRAL 1 00 01XXXX D15 - D0 Writes to all registers
+ * EWDS 1 00 00XXXX Disables all programming
+ * instructions
+ * *Note: A value of X for address is a don't care condition.
+ * *Note: The 93C56 and 93C66 have 8 address bits.
+ *
+ *
+ * The 93C46 has a four wire interface: clock, chip select, data in, and
+ * data out. In order to perform one of the above functions, you need
+ * to enable the chip select for a clock period (typically a minimum of
+ * 1 usec, with the clock high and low a minimum of 750 and 250 nsec
+ * respectively. While the chip select remains high, you can clock in
+ * the instructions (above) starting with the start bit, followed by the
+ * OP code, Address, and Data (if needed). For the READ instruction, the
+ * requested 16-bit register contents is read from the data out line but
+ * is preceded by an initial zero (leading 0, followed by 16-bits, MSB
+ * first). The clock cycling from low to high initiates the next data
+ * bit to be sent from the chip.
+ *
+ * The 78xx interface to the 93C46 serial EEPROM is through the SEECTL
+ * register. After successful arbitration for the memory port, the
+ * SEECS bit of the SEECTL register is connected to the chip select.
+ * The SEECK, SEEDO, and SEEDI are connected to the clock, data out,
+ * and data in lines respectively. The SEERDY bit of SEECTL is useful
+ * in that it gives us an 800 nsec timer. After a write to the SEECTL
+ * register, the SEERDY goes high 800 nsec later. The one exception
+ * to this is when we first request access to the memory port. The
+ * SEERDY goes high to signify that access has been granted and, for
+ * this case, has no implied timing.
+ *-F*************************************************************************/
+static int
+read_seeprom(struct aic7xxx_host *p, int offset,
+ unsigned short *scarray, unsigned int len, seeprom_chip_type chip)
+{
+ int i = 0, k;
+ unsigned char temp;
+ unsigned short checksum = 0;
+ struct seeprom_cmd {
+ unsigned char len;
+ unsigned char bits[3];
+ };
+ struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
+
+#define CLOCK_PULSE(p) \
+ while ((aic_inb(p, SEECTL) & SEERDY) == 0) \
+ { \
+ ; /* Do nothing */ \
+ }
+
+ /*
+ * Request access of the memory port.
+ */
+ if (acquire_seeprom(p) == 0)
+ {
+ return (0);
+ }
+
+ /*
+ * Read 'len' registers of the seeprom. For the 7870, the 93C46
+ * SEEPROM is a 1024-bit device with 64 16-bit registers but only
+ * the first 32 are used by Adaptec BIOS. Some adapters use the
+ * 93C56 SEEPROM which is a 2048-bit device. The loop will range
+ * from 0 to 'len' - 1.
+ */
+ for (k = 0; k < len; k++)
+ {
+ /*
+ * Send chip select for one clock cycle.
+ */
+ aic_outb(p, SEEMS | SEECK | SEECS, SEECTL);
+ CLOCK_PULSE(p);
+
+ /*
+ * Now we're ready to send the read command followed by the
+ * address of the 16-bit register we want to read.
+ */
+ for (i = 0; i < seeprom_read.len; i++)
+ {
+ temp = SEEMS | SEECS | (seeprom_read.bits[i] << 1);
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ temp = temp ^ SEECK;
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ }
+ /*
+ * Send the 6 or 8 bit address (MSB first, LSB last).
+ */
+ for (i = ((int) chip - 1); i >= 0; i--)
+ {
+ temp = k + offset;
+ temp = (temp >> i) & 1; /* Mask out all but lower bit. */
+ temp = SEEMS | SEECS | (temp << 1);
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ temp = temp ^ SEECK;
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ }
+
+ /*
+ * Now read the 16 bit register. An initial 0 precedes the
+ * register contents which begins with bit 15 (MSB) and ends
+ * with bit 0 (LSB). The initial 0 will be shifted off the
+ * top of our word as we let the loop run from 0 to 16.
+ */
+ for (i = 0; i <= 16; i++)
+ {
+ temp = SEEMS | SEECS;
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ temp = temp ^ SEECK;
+ scarray[k] = (scarray[k] << 1) | (aic_inb(p, SEECTL) & SEEDI);
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ }
+
+ /*
+ * The serial EEPROM should have a checksum in the last word.
+ * Keep a running checksum for all words read except for the
+ * last word. We'll verify the checksum after all words have
+ * been read.
+ */
+ if (k < (len - 1))
+ {
+ checksum = checksum + scarray[k];
+ }
+
+ /*
+ * Reset the chip select for the next command cycle.
+ */
+ aic_outb(p, SEEMS, SEECTL);
+ CLOCK_PULSE(p);
+ aic_outb(p, SEEMS | SEECK, SEECTL);
+ CLOCK_PULSE(p);
+ aic_outb(p, SEEMS, SEECTL);
+ CLOCK_PULSE(p);
+ }
+
+ /*
+ * Release access to the memory port and the serial EEPROM.
+ */
+ release_seeprom(p);
+
+#if 0
+ printk("Computed checksum 0x%x, checksum read 0x%x\n",
+ checksum, scarray[len - 1]);
+ printk("Serial EEPROM:");
+ for (k = 0; k < len; k++)
+ {
+ if (((k % 8) == 0) && (k != 0))
+ {
+ printk("\n ");
+ }
+ printk(" 0x%x", scarray[k]);
+ }
+ printk("\n");
+#endif
+ if ( (checksum != scarray[len - 1]) || (checksum == 0) )
+ {
+ return (0);
+ }
+
+ return (1);
+#undef CLOCK_PULSE
+}
+
+/*+F*************************************************************************
+ * Function:
+ * write_brdctl
+ *
+ * Description:
+ * Writes a value to the BRDCTL register.
+ *-F*************************************************************************/
+static void
+write_brdctl(struct aic7xxx_host *p, unsigned char value)
+{
+ unsigned char brdctl;
+
+ if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895)
+ {
+ brdctl = BRDSTB;
+ if (p->flags & AHC_CHNLB)
+ brdctl |= BRDCS;
+ }
+ else if (p->features & AHC_ULTRA2)
+ brdctl = 0;
+ else
+ brdctl = BRDSTB | BRDCS;
+ aic_outb(p, brdctl, BRDCTL);
+ udelay(1);
+ brdctl |= value;
+ aic_outb(p, brdctl, BRDCTL);
+ udelay(1);
+ if (p->features & AHC_ULTRA2)
+ brdctl |= BRDSTB_ULTRA2;
+ else
+ brdctl &= ~BRDSTB;
+ aic_outb(p, brdctl, BRDCTL);
+ udelay(1);
+ if (p->features & AHC_ULTRA2)
+ brdctl = 0;
+ else
+ brdctl &= ~BRDCS;
+ aic_outb(p, brdctl, BRDCTL);
+ udelay(1);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * read_brdctl
+ *
+ * Description:
+ * Reads the BRDCTL register.
+ *-F*************************************************************************/
+static unsigned char
+read_brdctl(struct aic7xxx_host *p)
+{
+ unsigned char brdctl, value;
+
+ if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895)
+ {
+ brdctl = BRDRW;
+ if (p->flags & AHC_CHNLB)
+ brdctl |= BRDCS;
+ }
+ else if (p->features & AHC_ULTRA2)
+ brdctl = BRDRW_ULTRA2;
+ else
+ brdctl = BRDRW | BRDCS;
+ aic_outb(p, brdctl, BRDCTL);
+ udelay(1);
+ value = aic_inb(p, BRDCTL);
+ aic_outb(p, 0, BRDCTL);
+ udelay(1);
+ return (value);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic785x_cable_detect
+ *
+ * Description:
+ * Detect the cables that are present on aic785x class controller chips
+ *-F*************************************************************************/
+static void
+aic785x_cable_detect(struct aic7xxx_host *p, int *int_50,
+ int *ext_present, int *eeprom)
+{
+ unsigned char brdctl;
+
+ aic_outb(p, BRDRW | BRDCS, BRDCTL);
+ udelay(1);
+ aic_outb(p, 0, BRDCTL);
+ udelay(1);
+ brdctl = aic_inb(p, BRDCTL);
+ udelay(1);
+ *int_50 = !(brdctl & BRDDAT5);
+ *ext_present = !(brdctl & BRDDAT6);
+ *eeprom = (aic_inb(p, SPIOCAP) & EEPROM);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic787x_cable_detect
+ *
+ * Description:
+ * Detect the cables that are present on aic787x class controller chips
+ *
+ * NOTE: This functions assumes the SEEPROM will have already been aquired
+ * prior to invocation of this function.
+ *-F*************************************************************************/
+static void
+aic787x_cable_detect(struct aic7xxx_host *p, int *int_50, int *int_68,
+ int *ext_present, int *eeprom)
+{
+ unsigned char brdctl;
+
+ /*
+ * First read the status of our cables. Set the rom bank to
+ * 0 since the bank setting serves as a multiplexor for the
+ * cable detection logic. BRDDAT5 controls the bank switch.
+ */
+ write_brdctl(p, 0);
+
+ /*
+ * Now we read the state of the two internal connectors. BRDDAT6
+ * is internal 50, BRDDAT7 is internal 68. For each, the cable is
+ * present if the bit is 0
+ */
+ brdctl = read_brdctl(p);
+ *int_50 = !(brdctl & BRDDAT6);
+ *int_68 = !(brdctl & BRDDAT7);
+
+ /*
+ * Set the bank bit in brdctl and then read the external cable state
+ * and the EEPROM status
+ */
+ write_brdctl(p, BRDDAT5);
+ brdctl = read_brdctl(p);
+
+ *ext_present = !(brdctl & BRDDAT6);
+ *eeprom = !(brdctl & BRDDAT7);
+
+ /*
+ * We're done, the calling function will release the SEEPROM for us
+ */
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic787x_ultra2_term_detect
+ *
+ * Description:
+ * Detect the termination settings present on ultra2 class controllers
+ *
+ * NOTE: This functions assumes the SEEPROM will have already been aquired
+ * prior to invocation of this function.
+ *-F*************************************************************************/
+static void
+aic7xxx_ultra2_term_detect(struct aic7xxx_host *p, int *enableSE_low,
+ int *enableSE_high, int *enableLVD_low,
+ int *enableLVD_high, int *eprom_present)
+{
+ unsigned char brdctl;
+
+ brdctl = read_brdctl(p);
+
+ *eprom_present = (brdctl & BRDDAT7);
+ *enableSE_high = (brdctl & BRDDAT6);
+ *enableSE_low = (brdctl & BRDDAT5);
+ *enableLVD_high = (brdctl & BRDDAT4);
+ *enableLVD_low = (brdctl & BRDDAT3);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * configure_termination
+ *
+ * Description:
+ * Configures the termination settings on PCI adapters that have
+ * SEEPROMs available.
+ *-F*************************************************************************/
+static void
+configure_termination(struct aic7xxx_host *p)
+{
+ int internal50_present = 0;
+ int internal68_present = 0;
+ int external_present = 0;
+ int eprom_present = 0;
+ int enableSE_low = 0;
+ int enableSE_high = 0;
+ int enableLVD_low = 0;
+ int enableLVD_high = 0;
+ unsigned char brddat = 0;
+ unsigned char max_target = 0;
+ unsigned char sxfrctl1 = aic_inb(p, SXFRCTL1);
+
+ if (acquire_seeprom(p))
+ {
+ if (p->features & (AHC_WIDE|AHC_TWIN))
+ max_target = 16;
+ else
+ max_target = 8;
+ aic_outb(p, SEEMS | SEECS, SEECTL);
+ sxfrctl1 &= ~STPWEN;
+ if ( (p->adapter_control & CFAUTOTERM) ||
+ (p->features & AHC_ULTRA2) )
+ {
+ if ( (p->adapter_control & CFAUTOTERM) && !(p->features & AHC_ULTRA2) )
+ {
+ printk(KERN_INFO "(scsi%d) Warning - detected auto-termination\n",
+ p->host_no);
+ printk(KERN_INFO "(scsi%d) Please verify driver detected settings are "
+ "correct.\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) If not, then please properly set the device "
+ "termination\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) in the Adaptec SCSI BIOS by hitting CTRL-A "
+ "when prompted\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) during machine bootup.\n", p->host_no);
+ }
+ /* Configure auto termination. */
+
+ if (p->features & AHC_ULTRA2)
+ {
+ if (aic7xxx_override_term == -1)
+ aic7xxx_ultra2_term_detect(p, &enableSE_low, &enableSE_high,
+ &enableLVD_low, &enableLVD_high,
+ &eprom_present);
+ if (!(p->adapter_control & CFSEAUTOTERM))
+ {
+ enableSE_low = (p->adapter_control & CFSTERM);
+ enableSE_high = (p->adapter_control & CFWSTERM);
+ }
+ if (!(p->adapter_control & CFAUTOTERM))
+ {
+ enableLVD_low = enableLVD_high = (p->adapter_control & CFLVDSTERM);
+ }
+ internal50_present = 0;
+ internal68_present = 1;
+ external_present = 1;
+ }
+ else if ( (p->chip & AHC_CHIPID_MASK) >= AHC_AIC7870 )
+ {
+ aic787x_cable_detect(p, &internal50_present, &internal68_present,
+ &external_present, &eprom_present);
+ }
+ else
+ {
+ aic785x_cable_detect(p, &internal50_present, &external_present,
+ &eprom_present);
+ }
+
+ if (max_target <= 8)
+ internal68_present = 0;
+
+ if ( !(p->features & AHC_ULTRA2) )
+ {
+ if (max_target > 8)
+ {
+ printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Int-68 %s, "
+ "Ext-68 %s)\n", p->host_no,
+ internal50_present ? "YES" : "NO",
+ internal68_present ? "YES" : "NO",
+ external_present ? "YES" : "NO");
+ }
+ else
+ {
+ printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Ext-50 %s)\n",
+ p->host_no,
+ internal50_present ? "YES" : "NO",
+ external_present ? "YES" : "NO");
+ }
+ }
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) EEPROM %s present.\n", p->host_no,
+ eprom_present ? "is" : "is not");
+
+ /*
+ * Now set the termination based on what we found. BRDDAT6
+ * controls wide termination enable.
+ * Flash Enable = BRDDAT7
+ * SE High Term Enable = BRDDAT6
+ * SE Low Term Enable = BRDDAT5 (7890)
+ * LVD High Term Enable = BRDDAT4 (7890)
+ */
+ if ( !(p->features & AHC_ULTRA2) &&
+ (internal50_present && internal68_present && external_present) )
+ {
+ printk(KERN_INFO "(scsi%d) Illegal cable configuration!! Only two\n",
+ p->host_no);
+ printk(KERN_INFO "(scsi%d) connectors on the SCSI controller may be "
+ "in use at a time!\n", p->host_no);
+ /*
+ * Force termination (low and high byte) on. This is safer than
+ * leaving it completely off, especially since this message comes
+ * most often from motherboard controllers that don't even have 3
+ * connectors, but instead are failing the cable detection.
+ */
+ internal50_present = external_present = 0;
+ enableSE_high = enableSE_low = 1;
+ }
+
+ if ((max_target > 8) &&
+ ((external_present == 0) || (internal68_present == 0) ||
+ (enableSE_high != 0)))
+ {
+ brddat |= BRDDAT6;
+ p->flags |= AHC_TERM_ENB_SE_HIGH;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) SE High byte termination Enabled\n",
+ p->host_no);
+ }
+
+ if ( (((internal50_present ? 1 : 0) +
+ (internal68_present ? 1 : 0) +
+ (external_present ? 1 : 0)) <= 1) ||
+ (enableSE_low != 0) )
+ {
+ if (p->features & AHC_ULTRA2)
+ brddat |= BRDDAT5;
+ else
+ sxfrctl1 |= STPWEN;
+ p->flags |= AHC_TERM_ENB_SE_LOW;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) SE Low byte termination Enabled\n",
+ p->host_no);
+ }
+
+ if (enableLVD_low != 0)
+ {
+ sxfrctl1 |= STPWEN;
+ p->flags |= AHC_TERM_ENB_LVD;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) LVD Low byte termination Enabled\n",
+ p->host_no);
+ }
+
+ if (enableLVD_high != 0)
+ {
+ brddat |= BRDDAT4;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) LVD High byte termination Enabled\n",
+ p->host_no);
+ }
+ }
+ else
+ {
+ if (p->adapter_control & CFSTERM)
+ {
+ if (p->features & AHC_ULTRA2)
+ brddat |= BRDDAT5;
+ else
+ sxfrctl1 |= STPWEN;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) SE Low byte termination Enabled\n",
+ p->host_no);
+ }
+
+ if (p->adapter_control & CFWSTERM)
+ {
+ brddat |= BRDDAT6;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) SE High byte termination Enabled\n",
+ p->host_no);
+ }
+ }
+ write_brdctl(p, brddat);
+ release_seeprom(p);
+ aic_outb(p, sxfrctl1, SXFRCTL1);
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * detect_maxscb
+ *
+ * Description:
+ * Detects the maximum number of SCBs for the controller and returns
+ * the count and a mask in p (p->maxscbs, p->qcntmask).
+ *-F*************************************************************************/
+static void
+detect_maxscb(struct aic7xxx_host *p)
+{
+ int i;
+
+ /*
+ * It's possible that we've already done this for multichannel
+ * adapters.
+ */
+ if (p->scb_data->maxhscbs == 0)
+ {
+ /*
+ * We haven't initialized the SCB settings yet. Walk the SCBs to
+ * determince how many there are.
+ */
+ aic_outb(p, 0, FREE_SCBH);
+
+ for (i = 0; i < AIC7XXX_MAXSCB; i++)
+ {
+ aic_outb(p, i, SCBPTR);
+ aic_outb(p, i, SCB_CONTROL);
+ if (aic_inb(p, SCB_CONTROL) != i)
+ break;
+ aic_outb(p, 0, SCBPTR);
+ if (aic_inb(p, SCB_CONTROL) != 0)
+ break;
+
+ aic_outb(p, i, SCBPTR);
+ aic_outb(p, 0, SCB_CONTROL); /* Clear the control byte. */
+ aic_outb(p, i + 1, SCB_NEXT); /* Set the next pointer. */
+ aic_outb(p, i - 1, SCB_PREV); /* Set the prev pointer. */
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG); /* Make the tag invalid. */
+ aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS); /* no busy untagged */
+ aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+1);/* targets active yet */
+ aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+2);
+ aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+3);
+ }
+
+ /* Make sure the last SCB terminates the free list. */
+ aic_outb(p, i - 1, SCBPTR);
+ aic_outb(p, SCB_LIST_NULL, SCB_NEXT);
+
+ /* Ensure we clear the first (0) SCBs control byte. */
+ aic_outb(p, 0, SCBPTR);
+ aic_outb(p, 0, SCB_CONTROL);
+
+ p->scb_data->maxhscbs = i;
+ /*
+ * Use direct indexing instead for speed
+ */
+ if ( i == AIC7XXX_MAXSCB )
+ p->flags &= ~AHC_PAGESCBS;
+ }
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_register
+ *
+ * Description:
+ * Register a Adaptec aic7xxx chip SCSI controller with the kernel.
+ *-F*************************************************************************/
+static int
+aic7xxx_register(Scsi_Host_Template *template, struct aic7xxx_host *p,
+ int reset_delay)
+{
+ int i, result;
+ int max_targets;
+ int found = 1;
+ unsigned char term, scsi_conf;
+ struct Scsi_Host *host;
+
+ /*
+ * Lock out other contenders for our i/o space.
+ */
+ request_region(p->base, MAXREG - MINREG, "aic7xxx");
+
+
+ host = p->host;
+
+ p->scb_data->maxscbs = AIC7XXX_MAXSCB;
+ host->can_queue = AIC7XXX_MAXSCB;
+ host->cmd_per_lun = 3;
+ host->sg_tablesize = AIC7XXX_MAX_SG;
+ host->select_queue_depths = aic7xxx_select_queue_depth;
+ host->this_id = p->scsi_id;
+ host->io_port = p->base;
+ host->n_io_port = 0xFF;
+ host->base = (unsigned char *) p->mbase;
+ host->irq = p->irq;
+ if (p->features & AHC_WIDE)
+ {
+ host->max_id = 16;
+ }
+ if (p->features & AHC_TWIN)
+ {
+ host->max_channel = 1;
+ }
+
+ p->host = host;
+ p->host_no = host->host_no;
+ host->unique_id = p->instance;
+ p->isr_count = 0;
+ p->next = NULL;
+ p->completeq.head = NULL;
+ p->completeq.tail = NULL;
+ scbq_init(&p->scb_data->free_scbs);
+ scbq_init(&p->waiting_scbs);
+ init_timer(&p->dev_timer);
+ p->dev_timer.data = (unsigned long)p;
+ p->dev_timer.function = (void *)aic7xxx_timer;
+ p->dev_timer_active = 0;
+
+ for (i = 0; i < NUMBER(p->untagged_scbs); i++)
+ {
+ p->untagged_scbs[i] = SCB_LIST_NULL;
+ p->qinfifo[i] = SCB_LIST_NULL;
+ p->qoutfifo[i] = SCB_LIST_NULL;
+ }
+ /*
+ * We currently have no commands of any type
+ */
+ p->qinfifonext = 0;
+ p->qoutfifonext = 0;
+
+ for (i = 0; i < MAX_TARGETS; i++)
+ {
+ p->dev_commands_sent[i] = 0;
+ p->dev_flags[i] = 0;
+ p->dev_active_cmds[i] = 0;
+ p->dev_last_queue_full[i] = 0;
+ p->dev_last_queue_full_count[i] = 0;
+ p->dev_max_queue_depth[i] = 1;
+ p->dev_temp_queue_depth[i] = 1;
+ p->dev_expires[i] = 0;
+ scbq_init(&p->delayed_scbs[i]);
+ }
+
+ printk(KERN_INFO "(scsi%d) <%s> found at ", p->host_no,
+ board_names[p->board_name_index]);
+ switch(p->chip)
+ {
+ case (AHC_AIC7770|AHC_EISA):
+ printk("EISA slot %d\n", p->pci_device_fn);
+ break;
+ case (AHC_AIC7770|AHC_VL):
+ printk("VLB slot %d\n", p->pci_device_fn);
+ break;
+ default:
+ printk("PCI %d/%d\n", PCI_SLOT(p->pci_device_fn),
+ PCI_FUNC(p->pci_device_fn));
+ break;
+ }
+ if (p->features & AHC_TWIN)
+ {
+ printk(KERN_INFO "(scsi%d) Twin Channel, A SCSI ID %d, B SCSI ID %d, ",
+ p->host_no, p->scsi_id, p->scsi_id_b);
+ }
+ else
+ {
+ char *channel;
+
+ channel = "";
+
+ if ((p->flags & AHC_MULTI_CHANNEL) != 0)
+ {
+ channel = " A";
+
+ if ( (p->flags & (AHC_CHNLB|AHC_CHNLC)) != 0 )
+ {
+ channel = (p->flags & AHC_CHNLB) ? " B" : " C";
+ }
+ }
+ if (p->features & AHC_WIDE)
+ {
+ printk(KERN_INFO "(scsi%d) Wide ", p->host_no);
+ }
+ else
+ {
+ printk(KERN_INFO "(scsi%d) Narrow ", p->host_no);
+ }
+ printk("Channel%s, SCSI ID=%d, ", channel, p->scsi_id);
+ }
+ aic_outb(p, 0, SEQ_FLAGS);
+
+ detect_maxscb(p);
+
+
+ printk("%d/%d SCBs\n", p->scb_data->maxhscbs, p->scb_data->maxscbs);
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk(KERN_INFO "(scsi%d) BIOS %sabled, IO Port 0x%lx, IRQ %d\n",
+ p->host_no, (p->flags & AHC_BIOS_ENABLED) ? "en" : "dis",
+ p->base, p->irq);
+ printk(KERN_INFO "(scsi%d) IO Memory at 0x%lx, MMAP Memory at 0x%lx\n",
+ p->host_no, p->mbase, (unsigned long)p->maddr);
+ }
+
+#ifdef CONFIG_PCI
+ /*
+ * Now that we know our instance number, we can set the flags we need to
+ * force termination if need be.
+ */
+ if (aic7xxx_stpwlev != -1)
+ {
+ /*
+ * This option only applies to PCI controllers.
+ */
+ if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI)
+ {
+ unsigned char devconfig;
+
+#if LINUX_KERNEL_VERSION > KERNEL_VERSION(2,1,92)
+ pci_read_config_byte(p->pdev, DEVCONFIG, &devconfig);
+#else
+ pcibios_read_config_byte(p->pci_bus, p->pci_device_fn,
+ DEVCONFIG, &devconfig);
+#endif
+ if ( (aic7xxx_stpwlev >> p->instance) & 0x01 )
+ {
+ devconfig |= 0x02;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("(scsi%d) Force setting STPWLEV bit\n", p->host_no);
+ }
+ else
+ {
+ devconfig &= ~0x02;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("(scsi%d) Force clearing STPWLEV bit\n", p->host_no);
+ }
+#if LINUX_KERNEL_VERSION > KERNEL_VERSION(2,1,92)
+ pci_write_config_byte(p->pdev, DEVCONFIG, devconfig);
+#else
+ pcibios_write_config_byte(p->pci_bus, p->pci_device_fn,
+ DEVCONFIG, devconfig);
+#endif
+ }
+ }
+#endif
+
+ /*
+ * That took care of devconfig and stpwlev, now for the actual termination
+ * settings.
+ */
+ if (aic7xxx_override_term != -1)
+ {
+ /*
+ * Again, this only applies to PCI controllers. We don't have problems
+ * with the termination on 274x controllers to the best of my knowledge.
+ */
+ if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI)
+ {
+ unsigned char term_override;
+
+ term_override = ( (aic7xxx_override_term >> (p->instance * 4)) & 0x0f);
+ p->adapter_control &=
+ ~(CFSTERM|CFWSTERM|CFLVDSTERM|CFAUTOTERM|CFSEAUTOTERM);
+ if ( (p->features & AHC_ULTRA2) && (term_override & 0x0c) )
+ {
+ p->adapter_control |= CFLVDSTERM;
+ }
+ if (term_override & 0x02)
+ {
+ p->adapter_control |= CFWSTERM;
+ }
+ if (term_override & 0x01)
+ {
+ p->adapter_control |= CFSTERM;
+ }
+ }
+ }
+
+ if ( (p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1) )
+ {
+ if (p->features & AHC_SPIOCAP)
+ {
+ if ( aic_inb(p, SPIOCAP) & SSPIOCPS )
+ /*
+ * Update the settings in sxfrctl1 to match the termination
+ * settings.
+ */
+ configure_termination(p);
+ }
+ else if ((p->chip & AHC_CHIPID_MASK) >= AHC_AIC7870)
+ {
+ configure_termination(p);
+ }
+ }
+
+ /*
+ * Clear out any possible pending interrupts.
+ */
+ aic7xxx_clear_intstat(p);
+
+ /*
+ * Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels
+ */
+ if (p->features & AHC_TWIN)
+ {
+ /* Select channel B */
+ aic_outb(p, aic_inb(p, SBLKCTL) | SELBUSB, SBLKCTL);
+
+ term = ((p->flags & AHC_TERM_ENB_B) != 0) ? STPWEN : 0;
+ aic_outb(p, p->scsi_id_b, SCSIID);
+ scsi_conf = aic_inb(p, SCSICONF + 1);
+ aic_outb(p, DFON | SPIOEN, SXFRCTL0);
+ aic_outb(p, (scsi_conf & ENSPCHK) | STIMESEL | term |
+ ENSTIMER | ACTNEGEN, SXFRCTL1);
+ aic_outb(p, 0, SIMODE0);
+ aic_outb(p, ENSELTIMO | ENSCSIRST | ENSCSIPERR, SIMODE1);
+ aic_outb(p, 0, SCSIRATE);
+
+ /* Select channel A */
+ aic_outb(p, aic_inb(p, SBLKCTL) & ~SELBUSB, SBLKCTL);
+ }
+
+ term = ((p->flags & AHC_TERM_ENB_SE_LOW) != 0) ? STPWEN : 0;
+ if (p->features & AHC_ULTRA2)
+ aic_outb(p, p->scsi_id, SCSIID_ULTRA2);
+ else
+ aic_outb(p, p->scsi_id, SCSIID);
+ scsi_conf = aic_inb(p, SCSICONF);
+ aic_outb(p, DFON | SPIOEN, SXFRCTL0);
+ aic_outb(p, (scsi_conf & ENSPCHK) | STIMESEL | term |
+ ENSTIMER | ACTNEGEN, SXFRCTL1);
+ aic_outb(p, 0, SIMODE0);
+ aic_outb(p, ENSELTIMO | ENSCSIRST | ENSCSIPERR, SIMODE1);
+ aic_outb(p, 0, SCSIRATE);
+ if ( p->features & AHC_ULTRA2)
+ aic_outb(p, 0, SCSIOFFSET);
+
+ /*
+ * Look at the information that board initialization or the board
+ * BIOS has left us. In the lower four bits of each target's
+ * scratch space any value other than 0 indicates that we should
+ * initiate synchronous transfers. If it's zero, the user or the
+ * BIOS has decided to disable synchronous negotiation to that
+ * target so we don't activate the needsdtr flag.
+ */
+ if ((p->features & (AHC_TWIN|AHC_WIDE)) == 0)
+ {
+ max_targets = 8;
+ }
+ else
+ {
+ max_targets = 16;
+ }
+
+ if (!(aic7xxx_no_reset))
+ {
+ /*
+ * If we reset the bus, then clear the transfer settings, else leave
+ * them be
+ */
+ for (i = 0; i < max_targets; i++)
+ {
+ aic_outb(p, 0, TARG_SCSIRATE + i);
+ if (p->features & AHC_ULTRA2)
+ {
+ aic_outb(p, 0, TARG_OFFSET + i);
+ }
+ p->transinfo[i].cur_offset = 0;
+ p->transinfo[i].cur_period = 0;
+ p->transinfo[i].cur_width = MSG_EXT_WDTR_BUS_8_BIT;
+ }
+
+ /*
+ * If we reset the bus, then clear the transfer settings, else leave
+ * them be.
+ */
+ aic_outb(p, 0, ULTRA_ENB);
+ aic_outb(p, 0, ULTRA_ENB + 1);
+ p->ultraenb = 0;
+ }
+
+ /*
+ * Allocate enough hardware scbs to handle the maximum number of
+ * concurrent transactions we can have. We have to make sure that
+ * the allocated memory is contiguous memory. The Linux kmalloc
+ * routine should only allocate contiguous memory, but note that
+ * this could be a problem if kmalloc() is changed.
+ */
+ {
+ size_t array_size;
+ unsigned int hscb_physaddr;
+ unsigned long temp;
+
+ array_size = p->scb_data->maxscbs * sizeof(struct aic7xxx_hwscb);
+ if (p->scb_data->hscbs == NULL)
+ {
+ /*
+ * A little padding so we can align thing the way we want
+ */
+ p->scb_data->hscbs = kmalloc(array_size + 0x1f, GFP_ATOMIC);
+ }
+ if (p->scb_data->hscbs == NULL)
+ {
+ printk("(scsi%d) Unable to allocate hardware SCB array; "
+ "failing detection.\n", p->host_no);
+ p->irq = 0;
+ return(0);
+ }
+ /*
+ * Save the actual kmalloc buffer pointer off, then align our
+ * buffer to a 32 byte boundary
+ */
+ p->scb_data->hscb_kmalloc_ptr = p->scb_data->hscbs;
+ temp = (unsigned long)p->scb_data->hscbs;
+ temp += 0x1f;
+ temp &= ~0x1f;
+ p->scb_data->hscbs = (struct aic7xxx_hwscb *)temp;
+ /* At least the control byte of each SCB needs to be 0. */
+ memset(p->scb_data->hscbs, 0, array_size);
+
+ /* Tell the sequencer where it can find the hardware SCB array. */
+ hscb_physaddr = VIRT_TO_BUS(p->scb_data->hscbs);
+ aic_outb(p, hscb_physaddr & 0xFF, HSCB_ADDR);
+ aic_outb(p, (hscb_physaddr >> 8) & 0xFF, HSCB_ADDR + 1);
+ aic_outb(p, (hscb_physaddr >> 16) & 0xFF, HSCB_ADDR + 2);
+ aic_outb(p, (hscb_physaddr >> 24) & 0xFF, HSCB_ADDR + 3);
+
+ /* Set up the fifo areas at the same time */
+ hscb_physaddr = VIRT_TO_BUS(&p->untagged_scbs[0]);
+ aic_outb(p, hscb_physaddr & 0xFF, SCBID_ADDR);
+ aic_outb(p, (hscb_physaddr >> 8) & 0xFF, SCBID_ADDR + 1);
+ aic_outb(p, (hscb_physaddr >> 16) & 0xFF, SCBID_ADDR + 2);
+ aic_outb(p, (hscb_physaddr >> 24) & 0xFF, SCBID_ADDR + 3);
+ }
+
+ /* The Q-FIFOs we just set up are all empty */
+ aic_outb(p, 0, QINPOS);
+ aic_outb(p, 0, KERNEL_QINPOS);
+ aic_outb(p, 0, QOUTPOS);
+
+ if(p->features & AHC_QUEUE_REGS)
+ {
+ aic_outb(p, SCB_QSIZE_256, QOFF_CTLSTA);
+ aic_outb(p, 0, SDSCB_QOFF);
+ aic_outb(p, 0, SNSCB_QOFF);
+ aic_outb(p, 0, HNSCB_QOFF);
+ }
+
+ /*
+ * We don't have any waiting selections or disconnected SCBs.
+ */
+ aic_outb(p, SCB_LIST_NULL, WAITING_SCBH);
+ aic_outb(p, SCB_LIST_NULL, DISCONNECTED_SCBH);
+
+ /*
+ * Message out buffer starts empty
+ */
+ aic_outb(p, MSG_NOOP, MSG_OUT);
+ aic_outb(p, MSG_NOOP, LAST_MSG);
+
+ /*
+ * Set all the other asundry items that haven't been set yet.
+ * This includes just dumping init values to a lot of registers simply
+ * to make sure they've been touched and are ready for use parity wise
+ * speaking.
+ */
+ aic_outb(p, 0, TMODE_CMDADDR);
+ aic_outb(p, 0, TMODE_CMDADDR + 1);
+ aic_outb(p, 0, TMODE_CMDADDR + 2);
+ aic_outb(p, 0, TMODE_CMDADDR + 3);
+ aic_outb(p, 0, TMODE_CMDADDR_NEXT);
+
+ /*
+ * Link us into the list of valid hosts
+ */
+ p->next = first_aic7xxx;
+ first_aic7xxx = p;
+
+ /*
+ * Clear out any possible pending interrupts, again.
+ */
+ aic7xxx_clear_intstat(p);
+
+ /*
+ * Allocate the first set of scbs for this controller. This is to stream-
+ * line code elsewhere in the driver. If we have to check for the existence
+ * of scbs in certain code sections, it slows things down. However, as
+ * soon as we register the IRQ for this card, we could get an interrupt that
+ * includes possibly the SCSI_RSTI interrupt. If we catch that interrupt
+ * then we are likely to segfault if we don't have at least one chunk of
+ * SCBs allocated or add checks all through the reset code to make sure
+ * that the SCBs have been allocated which is an invalid running condition
+ * and therefore I think it's preferable to simply pre-allocate the first
+ * chunk of SCBs.
+ */
+ aic7xxx_allocate_scb(p);
+
+ /*
+ * Load the sequencer program, then re-enable the board -
+ * resetting the AIC-7770 disables it, leaving the lights
+ * on with nobody home.
+ */
+ aic7xxx_loadseq(p);
+
+ /*
+ * Make sure the AUTOFLUSHDIS bit is *not* set in the SBLKCTL register
+ */
+ aic_outb(p, aic_inb(p, SBLKCTL) & ~AUTOFLUSHDIS, SBLKCTL);
+
+ if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
+ {
+ aic_outb(p, ENABLE, BCTL); /* Enable the boards BUS drivers. */
+ }
+
+ if ( !(aic7xxx_no_reset) )
+ {
+ if (p->features & AHC_TWIN)
+ {
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) Resetting channel B\n", p->host_no);
+ aic_outb(p, aic_inb(p, SBLKCTL) | SELBUSB, SBLKCTL);
+ aic7xxx_reset_current_bus(p);
+ aic_outb(p, aic_inb(p, SBLKCTL) & ~SELBUSB, SBLKCTL);
+ }
+ /* Reset SCSI bus A. */
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ { /* In case we are a 3940, 3985, or 7895, print the right channel */
+ char *channel = "";
+ if (p->flags & AHC_MULTI_CHANNEL)
+ {
+ channel = " A";
+ if (p->flags & (AHC_CHNLB|AHC_CHNLC))
+ channel = (p->flags & AHC_CHNLB) ? " B" : " C";
+ }
+ printk(KERN_INFO "(scsi%d) Resetting channel%s\n", p->host_no, channel);
+ }
+
+ /*
+ * Some of the new Ultra2 chipsets need a longer delay after a chip
+ * reset than just the init setup creates, so we have to delay here
+ * before we go into a reset in order to make the chips happy.
+ */
+ if (p->features & AHC_ULTRA2)
+ mdelay(250);
+ aic7xxx_reset_current_bus(p);
+
+ /*
+ * Delay for the reset delay.
+ */
+ if (!reset_delay)
+ aic7xxx_delay(AIC7XXX_RESET_DELAY);
+ }
+ else
+ {
+ if (!reset_delay)
+ {
+ printk(KERN_INFO "(scsi%d) Not resetting SCSI bus. Note: Don't use "
+ "the no_reset\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) option unless you have a verifiable need "
+ "for it.\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) The no_reset option is known to break some "
+ "systems,\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) and is not supported by the driver author\n",
+ p->host_no);
+ aic7xxx_delay(AIC7XXX_RESET_DELAY);
+ }
+ }
+
+ /*
+ * Register IRQ with the kernel. Only allow sharing IRQs with
+ * PCI devices.
+ */
+ if (!(p->chip & AHC_PCI))
+ {
+ result = (request_irq(p->irq, do_aic7xxx_isr, 0, "aic7xxx", p));
+ }
+ else
+ {
+ result = (request_irq(p->irq, do_aic7xxx_isr, SA_SHIRQ,
+ "aic7xxx", p));
+ if (result < 0)
+ {
+ result = (request_irq(p->irq, do_aic7xxx_isr, SA_INTERRUPT | SA_SHIRQ,
+ "aic7xxx", p));
+ }
+ }
+ if (result < 0)
+ {
+ printk(KERN_WARNING "(scsi%d) Couldn't register IRQ %d, ignoring "
+ "controller.\n", p->host_no, p->irq);
+ p->irq = 0;
+ return (0);
+ }
+
+ unpause_sequencer(p, /* unpause_always */ TRUE);
+
+ return (found);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_chip_reset
+ *
+ * Description:
+ * Perform a chip reset on the aic7xxx SCSI controller. The controller
+ * is paused upon return.
+ *-F*************************************************************************/
+int
+aic7xxx_chip_reset(struct aic7xxx_host *p)
+{
+ unsigned char sblkctl;
+ int wait;
+
+ /*
+ * For some 274x boards, we must clear the CHIPRST bit and pause
+ * the sequencer. For some reason, this makes the driver work.
+ */
+ aic_outb(p, PAUSE | CHIPRST, HCNTRL);
+
+ /*
+ * In the future, we may call this function as a last resort for
+ * error handling. Let's be nice and not do any unecessary delays.
+ */
+ wait = 1000; /* 1 second (1000 * 1 msec) */
+ while (--wait && !(aic_inb(p, HCNTRL) & CHIPRSTACK))
+ {
+ mdelay(1); /* 1 msec */
+ }
+
+ pause_sequencer(p);
+
+ sblkctl = aic_inb(p, SBLKCTL) & (SELBUSB|SELWIDE);
+ if (p->chip & AHC_PCI)
+ sblkctl &= ~SELBUSB;
+ switch( sblkctl )
+ {
+ case 0: /* normal narrow card */
+ break;
+ case 2: /* Wide card */
+ p->features |= AHC_WIDE;
+ break;
+ case 8: /* Twin card */
+ p->features |= AHC_TWIN;
+ p->flags |= AHC_MULTI_CHANNEL;
+ break;
+ default: /* hmmm...we don't know what this is */
+ printk(KERN_WARNING "aic7xxx: Unsupported adapter type %d, ignoring.\n",
+ aic_inb(p, SBLKCTL) & 0x0a);
+ return(-1);
+ }
+ return(0);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_alloc
+ *
+ * Description:
+ * Allocate and initialize a host structure. Returns NULL upon error
+ * and a pointer to a aic7xxx_host struct upon success.
+ *-F*************************************************************************/
+static struct aic7xxx_host *
+aic7xxx_alloc(Scsi_Host_Template *sht, struct aic7xxx_host *temp)
+{
+ struct aic7xxx_host *p = NULL;
+ struct Scsi_Host *host;
+ int i;
+
+ /*
+ * Allocate a storage area by registering us with the mid-level
+ * SCSI layer.
+ */
+ host = scsi_register(sht, sizeof(struct aic7xxx_host));
+
+ if (host != NULL)
+ {
+ p = (struct aic7xxx_host *) host->hostdata;
+ memset(p, 0, sizeof(struct aic7xxx_host));
+ *p = *temp;
+ p->host = host;
+
+ p->scb_data = kmalloc(sizeof(scb_data_type), GFP_ATOMIC);
+ if (p->scb_data != NULL)
+ {
+ memset(p->scb_data, 0, sizeof(scb_data_type));
+ scbq_init (&p->scb_data->free_scbs);
+ }
+ else
+ {
+ /*
+ * For some reason we don't have enough memory. Free the
+ * allocated memory for the aic7xxx_host struct, and return NULL.
+ */
+ release_region(p->base, MAXREG - MINREG);
+ scsi_unregister(host);
+ return(NULL);
+ }
+ p->host_no = host->host_no;
+ p->tagenable = 0;
+ p->orderedtag = 0;
+ for (i=0; i<MAX_TARGETS; i++)
+ {
+ p->transinfo[i].goal_period = 0;
+ p->transinfo[i].goal_offset = 0;
+ p->transinfo[i].goal_width = MSG_EXT_WDTR_BUS_8_BIT;
+ }
+ DRIVER_LOCK_INIT
+ }
+ return (p);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_free
+ *
+ * Description:
+ * Frees and releases all resources associated with an instance of
+ * the driver (struct aic7xxx_host *).
+ *-F*************************************************************************/
+static void
+aic7xxx_free(struct aic7xxx_host *p)
+{
+ int i;
+
+ /*
+ * Free the allocated hardware SCB space.
+ */
+ if (p->scb_data != NULL)
+ {
+ if (p->scb_data->hscbs != NULL)
+ {
+ kfree(p->scb_data->hscb_kmalloc_ptr);
+ p->scb_data->hscbs = p->scb_data->hscb_kmalloc_ptr = NULL;
+ }
+ /*
+ * Free the driver SCBs. These were allocated on an as-need
+ * basis. We allocated these in groups depending on how many
+ * we could fit into a given amount of RAM. The tail SCB for
+ * these allocations has a pointer to the alloced area.
+ */
+ for (i = 0; i < p->scb_data->numscbs; i++)
+ {
+ if (p->scb_data->scb_array[i]->kmalloc_ptr != NULL)
+ kfree(p->scb_data->scb_array[i]->kmalloc_ptr);
+ p->scb_data->scb_array[i] = NULL;
+ }
+
+ /*
+ * Free the SCB data area.
+ */
+ kfree(p->scb_data);
+ }
+
+ /*
+ * Free any alloced Scsi_Cmnd structures that might be around for
+ * negotiation purposes....
+ */
+ for (i = 0; i < MAX_TARGETS; i++)
+ {
+ if(p->dev_wdtr_cmnd[i])
+ kfree(p->dev_wdtr_cmnd[i]);
+ if(p->dev_sdtr_cmnd[i])
+ kfree(p->dev_sdtr_cmnd[i]);
+ }
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_load_seeprom
+ *
+ * Description:
+ * Load the seeprom and configure adapter and target settings.
+ * Returns 1 if the load was successful and 0 otherwise.
+ *-F*************************************************************************/
+static void
+aic7xxx_load_seeprom(struct aic7xxx_host *p, unsigned char *sxfrctl1)
+{
+ int have_seeprom = 0;
+ int i, max_targets, mask;
+ unsigned char scsirate, scsi_conf;
+ unsigned short scarray[128];
+ struct seeprom_config *sc = (struct seeprom_config *) scarray;
+
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk(KERN_INFO "aic7xxx: Loading serial EEPROM...");
+ }
+ switch (p->chip)
+ {
+ case (AHC_AIC7770|AHC_EISA): /* None of these adapters have seeproms. */
+ if (aic_inb(p, SCSICONF) & TERM_ENB)
+ p->flags |= AHC_TERM_ENB_A;
+ if ( (p->features & AHC_TWIN) && (aic_inb(p, SCSICONF + 1) & TERM_ENB) )
+ p->flags |= AHC_TERM_ENB_B;
+ aic_outb(p, 0, DISC_DSB);
+ aic_outb(p, 0, DISC_DSB + 1);
+ break;
+
+ case (AHC_AIC7770|AHC_VL):
+ have_seeprom = read_284x_seeprom(p, (struct seeprom_config *) scarray);
+ break;
+
+ default:
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, p->sc_type);
+ if (!have_seeprom)
+ {
+ if(p->sc_type == C46)
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, C56_66);
+ else
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, C46);
+ }
+ if (!have_seeprom)
+ {
+ p->sc_size = 128;
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, p->sc_type);
+ if (!have_seeprom)
+ {
+ if(p->sc_type == C46)
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, C56_66);
+ else
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, C46);
+ }
+ }
+ break;
+ }
+
+ if (!have_seeprom)
+ {
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("\naic7xxx: No SEEPROM available.\n");
+ }
+ p->flags |= AHC_NEWEEPROM_FMT;
+ if (aic_inb(p, SCSISEQ) == 0)
+ {
+ p->flags |= AHC_USEDEFAULTS;
+ p->flags &= ~AHC_BIOS_ENABLED;
+ p->scsi_id = p->scsi_id_b = 7;
+ *sxfrctl1 |= STPWEN;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Using default values.\n");
+ }
+ }
+ else if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Using leftover BIOS values.\n");
+ }
+ if ( ((p->chip & ~AHC_CHIPID_MASK) == AHC_PCI) && (*sxfrctl1 & STPWEN) )
+ {
+ p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH;
+ sc->adapter_control &= ~CFAUTOTERM;
+ sc->adapter_control |= CFSTERM | CFWSTERM | CFLVDSTERM;
+ }
+ if (aic7xxx_extended)
+ p->flags |= (AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B);
+ else
+ p->flags &= ~(AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B);
+ }
+ else
+ {
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("done\n");
+ }
+
+ /*
+ * Note things in our flags
+ */
+ p->flags |= AHC_SEEPROM_FOUND;
+
+ /*
+ * Update the settings in sxfrctl1 to match the termination settings.
+ */
+ *sxfrctl1 = 0;
+
+ /*
+ * Get our SCSI ID from the SEEPROM setting...
+ */
+ p->scsi_id = (sc->brtime_id & CFSCSIID);
+
+ /*
+ * First process the settings that are different between the VLB
+ * and PCI adapter seeproms.
+ */
+ if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7770)
+ {
+ /* VLB adapter seeproms */
+ if (sc->bios_control & CF284XEXTEND)
+ p->flags |= AHC_EXTEND_TRANS_A;
+
+ if (sc->adapter_control & CF284XSTERM)
+ {
+ *sxfrctl1 |= STPWEN;
+ p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH;
+ }
+ }
+ else
+ {
+ /* PCI adapter seeproms */
+ if (sc->bios_control & CFEXTEND)
+ p->flags |= AHC_EXTEND_TRANS_A;
+ if (sc->bios_control & CFBIOSEN)
+ p->flags |= AHC_BIOS_ENABLED;
+ else
+ p->flags &= ~AHC_BIOS_ENABLED;
+
+ if (sc->adapter_control & CFSTERM)
+ {
+ *sxfrctl1 |= STPWEN;
+ p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH;
+ }
+ }
+ p->sc = *sc;
+ }
+
+ p->discenable = 0;
+
+ /*
+ * Limit to 16 targets just in case. The 2842 for one is known to
+ * blow the max_targets setting, future cards might also.
+ */
+ max_targets = MIN(sc->max_targets & CFMAXTARG,
+ ((p->features & (AHC_TWIN | AHC_WIDE)) ? 16 : 8));
+
+ if (have_seeprom)
+ {
+ for (i = 0; i < max_targets; i++)
+ {
+ if( ((p->features & AHC_ULTRA) &&
+ !(sc->adapter_control & CFULTRAEN) &&
+ (sc->device_flags[i] & CFSYNCHISULTRA)) ||
+ (sc->device_flags[i] & CFNEWULTRAFORMAT) )
+ {
+ p->flags |= AHC_NEWEEPROM_FMT;
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < max_targets; i++)
+ {
+ mask = (0x01 << i);
+ if (!have_seeprom)
+ {
+ if (aic_inb(p, SCSISEQ) != 0)
+ {
+ /*
+ * OK...the BIOS set things up and left behind the settings we need.
+ * Just make our sc->device_flags[i] entry match what the card has
+ * set for this device.
+ */
+ p->discenable =
+ ~(aic_inb(p, DISC_DSB) | (aic_inb(p, DISC_DSB + 1) << 8) );
+ p->ultraenb =
+ (aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8) );
+ sc->device_flags[i] = (p->discenable & mask) ? CFDISC : 0;
+ if (aic_inb(p, TARG_SCSIRATE + i) & WIDEXFER)
+ sc->device_flags[i] |= CFWIDEB;
+ if (p->features & AHC_ULTRA2)
+ {
+ if (aic_inb(p, TARG_OFFSET + i))
+ {
+ sc->device_flags[i] |= CFSYNCH;
+ sc->device_flags[i] |= (aic_inb(p, TARG_SCSIRATE + i) & 0x07);
+ if ( (aic_inb(p, TARG_SCSIRATE + i) & 0x18) == 0x18 )
+ sc->device_flags[i] |= CFSYNCHISULTRA;
+ }
+ }
+ else
+ {
+ if (aic_inb(p, TARG_SCSIRATE + i) & ~WIDEXFER)
+ {
+ sc->device_flags[i] |= CFSYNCH;
+ if (p->features & AHC_ULTRA)
+ sc->device_flags[i] |= ((p->ultraenb & mask) ?
+ CFSYNCHISULTRA : 0);
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Assume the BIOS has NOT been run on this card and nothing between
+ * the card and the devices is configured yet.
+ */
+ sc->device_flags[i] = CFDISC;
+ if (p->features & AHC_WIDE)
+ sc->device_flags[i] |= CFWIDEB;
+ if (p->features & AHC_ULTRA2)
+ sc->device_flags[i] |= 3;
+ else if (p->features & AHC_ULTRA)
+ sc->device_flags[i] |= CFSYNCHISULTRA;
+ sc->device_flags[i] |= CFSYNCH;
+ aic_outb(p, 0, TARG_SCSIRATE + i);
+ if (p->features & AHC_ULTRA2)
+ aic_outb(p, 0, TARG_OFFSET + i);
+ }
+ }
+ if (sc->device_flags[i] & CFDISC)
+ {
+ p->discenable |= mask;
+ }
+ if (p->flags & AHC_NEWEEPROM_FMT)
+ {
+ if ( (sc->device_flags[i] & CFNEWULTRAFORMAT) &&
+ !(p->features & AHC_ULTRA2) )
+ {
+ /*
+ * I know of two different Ultra BIOSes that do this differently.
+ * One on the Gigabyte 6BXU mb that wants flags[i] & CFXFER to
+ * be == to 0x03 and SYNCISULTRA to be true to mean 40MByte/s
+ * while on the IBM Netfinity 5000 they want the same thing
+ * to be something else, while flags[i] & CFXFER == 0x03 and
+ * SYNCISULTRA false should be 40MByte/s. So, we set both to
+ * 40MByte/s and the lower speeds be damned. People will have
+ * to select around the conversely mapped lower speeds in order
+ * to select lower speeds on these boards.
+ */
+ if ((sc->device_flags[i] & (CFXFER)) == 0x03)
+ {
+ sc->device_flags[i] &= ~CFXFER;
+ sc->device_flags[i] |= CFSYNCHISULTRA;
+ }
+ }
+ if (sc->device_flags[i] & CFSYNCHISULTRA)
+ {
+ p->ultraenb |= mask;
+ }
+ }
+ else if (sc->adapter_control & CFULTRAEN)
+ {
+ p->ultraenb |= mask;
+ }
+ if ( (sc->device_flags[i] & CFSYNCH) == 0)
+ {
+ sc->device_flags[i] &= ~CFXFER;
+ p->ultraenb &= ~mask;
+ p->transinfo[i].user_offset = 0;
+ p->transinfo[i].user_period = 0;
+ p->transinfo[i].cur_offset = 0;
+ p->transinfo[i].cur_period = 0;
+ p->needsdtr_copy &= ~mask;
+ }
+ else
+ {
+ if (p->features & AHC_ULTRA2)
+ {
+ p->transinfo[i].user_offset = MAX_OFFSET_ULTRA2;
+ p->transinfo[i].cur_offset = aic_inb(p, TARG_OFFSET + i);
+ scsirate = (sc->device_flags[i] & CFXFER) |
+ ((p->ultraenb & mask) ? 0x18 : 0x10);
+ p->transinfo[i].user_period = aic7xxx_find_period(p, scsirate,
+ AHC_SYNCRATE_ULTRA2);
+ p->transinfo[i].cur_period = aic7xxx_find_period(p,
+ aic_inb(p, TARG_SCSIRATE + i),
+ AHC_SYNCRATE_ULTRA2);
+ }
+ else
+ {
+ scsirate = (sc->device_flags[i] & CFXFER) << 4;
+ if (sc->device_flags[i] & CFWIDEB)
+ p->transinfo[i].user_offset = MAX_OFFSET_16BIT;
+ else
+ p->transinfo[i].user_offset = MAX_OFFSET_8BIT;
+ if (p->features & AHC_ULTRA)
+ {
+ short ultraenb;
+ ultraenb = aic_inb(p, ULTRA_ENB) |
+ (aic_inb(p, ULTRA_ENB + 1) << 8);
+ p->transinfo[i].user_period = aic7xxx_find_period(p,
+ scsirate,
+ (p->ultraenb & mask) ?
+ AHC_SYNCRATE_ULTRA :
+ AHC_SYNCRATE_FAST);
+ p->transinfo[i].cur_period = aic7xxx_find_period(p,
+ aic_inb(p, TARG_SCSIRATE + i),
+ (ultraenb & mask) ?
+ AHC_SYNCRATE_ULTRA :
+ AHC_SYNCRATE_FAST);
+ }
+ else
+ p->transinfo[i].user_period = aic7xxx_find_period(p,
+ scsirate, AHC_SYNCRATE_FAST);
+ }
+ p->needsdtr_copy |= mask;
+ }
+ if ( (sc->device_flags[i] & CFWIDEB) && (p->features & AHC_WIDE) )
+ {
+ p->transinfo[i].user_width = MSG_EXT_WDTR_BUS_16_BIT;
+ p->needwdtr_copy |= mask;
+ }
+ else
+ {
+ p->transinfo[i].user_width = MSG_EXT_WDTR_BUS_8_BIT;
+ p->needwdtr_copy &= ~mask;
+ }
+ p->transinfo[i].cur_width =
+ (aic_inb(p, TARG_SCSIRATE + i) & WIDEXFER) ?
+ MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT;
+ }
+ aic_outb(p, ~(p->discenable & 0xFF), DISC_DSB);
+ aic_outb(p, ~((p->discenable >> 8) & 0xFF), DISC_DSB + 1);
+ p->needwdtr = p->needwdtr_copy;
+ p->needsdtr = p->needsdtr_copy;
+ p->wdtr_pending = p->sdtr_pending = 0;
+
+ /*
+ * We set the p->ultraenb from the SEEPROM to begin with, but now we make
+ * it match what is already down in the card. If we are doing a reset
+ * on the card then this will get put back to a default state anyway.
+ * This allows us to not have to pre-emptively negotiate when using the
+ * no_reset option.
+ */
+ if (p->features & AHC_ULTRA)
+ p->ultraenb = aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8);
+
+
+ scsi_conf = (p->scsi_id & HSCSIID);
+
+ if(have_seeprom)
+ {
+ p->adapter_control = sc->adapter_control;
+ p->bios_control = sc->bios_control;
+
+ switch (p->chip & AHC_CHIPID_MASK)
+ {
+ case AHC_AIC7895:
+ case AHC_AIC7896:
+ if (p->adapter_control & CFBPRIMARY)
+ p->flags |= AHC_CHANNEL_B_PRIMARY;
+ default:
+ break;
+ }
+
+ if (sc->adapter_control & CFSPARITY)
+ scsi_conf |= ENSPCHK;
+ }
+ else
+ {
+ scsi_conf |= ENSPCHK | RESET_SCSI;
+ }
+
+ /*
+ * Only set the SCSICONF and SCSICONF + 1 registers if we are a PCI card.
+ * The 2842 and 2742 cards already have these registers set and we don't
+ * want to muck with them since we don't set all the bits they do.
+ */
+ if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI )
+ {
+ /* Set the host ID */
+ aic_outb(p, scsi_conf, SCSICONF);
+ /* In case we are a wide card */
+ aic_outb(p, p->scsi_id, SCSICONF + 1);
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_detect
+ *
+ * Description:
+ * Try to detect and register an Adaptec 7770 or 7870 SCSI controller.
+ *
+ * XXX - This should really be called aic7xxx_probe(). A sequence of
+ * probe(), attach()/detach(), and init() makes more sense than
+ * one do-it-all function. This may be useful when (and if) the
+ * mid-level SCSI code is overhauled.
+ *-F*************************************************************************/
+int
+aic7xxx_detect(Scsi_Host_Template *template)
+{
+ struct aic7xxx_host *temp_p = NULL;
+ struct aic7xxx_host *current_p = NULL;
+ struct aic7xxx_host *list_p = NULL;
+ int found = 0;
+#if defined(__i386__) || defined(__alpha__)
+ ahc_flag_type flags = 0;
+ int type;
+#endif
+ unsigned char sxfrctl1;
+#if defined(__i386__) || defined(__alpha__)
+ unsigned char hcntrl, hostconf;
+ unsigned int slot, base;
+#endif
+
+#ifdef MODULE
+ /*
+ * If we are called as a module, the aic7xxx pointer may not be null
+ * and it would point to our bootup string, just like on the lilo
+ * command line. IF not NULL, then process this config string with
+ * aic7xxx_setup
+ */
+ if(aic7xxx)
+ aic7xxx_setup(aic7xxx, NULL);
+ if(dummy_buffer[0] != 'P')
+ printk(KERN_WARNING "aic7xxx: Please read the file /usr/src/linux/drivers"
+ "/scsi/README.aic7xxx\n"
+ "aic7xxx: to see the proper way to specify options to the aic7xxx "
+ "module\n"
+ "aic7xxx: Specifically, don't use any commas when passing arguments to\n"
+ "aic7xxx: insmod or else it might trash certain memory areas.\n");
+#endif
+
+ template->proc_dir = &proc_scsi_aic7xxx;
+ template->sg_tablesize = AIC7XXX_MAX_SG;
+
+
+#if defined(__i386__) || defined(__alpha__)
+ /*
+ * EISA/VL-bus card signature probe.
+ */
+ slot = MINSLOT;
+ while ( (slot <= MAXSLOT) && !(aic7xxx_no_probe) )
+ {
+ base = SLOTBASE(slot) + MINREG;
+
+ if (check_region(base, MAXREG - MINREG))
+ {
+ /*
+ * Some other driver has staked a
+ * claim to this i/o region already.
+ */
+ slot++;
+ continue; /* back to the beginning of the for loop */
+ }
+ flags = 0;
+ type = aic7xxx_probe(slot, base + AHC_HID0, &flags);
+ if (type == -1)
+ {
+ slot++;
+ continue;
+ }
+ temp_p = kmalloc(sizeof(struct aic7xxx_host), GFP_ATOMIC);
+ if (temp_p == NULL)
+ {
+ printk(KERN_WARNING "aic7xxx: Unable to allocate device space.\n");
+ slot++;
+ continue; /* back to the beginning of the while loop */
+ }
+ /*
+ * Pause the card preserving the IRQ type. Allow the operator
+ * to override the IRQ trigger.
+ */
+ if (aic7xxx_irq_trigger == 1)
+ hcntrl = IRQMS; /* Level */
+ else if (aic7xxx_irq_trigger == 0)
+ hcntrl = 0; /* Edge */
+ else
+ hcntrl = inb(base + HCNTRL) & IRQMS; /* Default */
+ memset(temp_p, 0, sizeof(struct aic7xxx_host));
+ temp_p->unpause = hcntrl | INTEN;
+ temp_p->pause = hcntrl | PAUSE | INTEN;
+ temp_p->base = base;
+ temp_p->mbase = 0;
+ temp_p->maddr = 0;
+ temp_p->pci_bus = 0;
+ temp_p->pci_device_fn = slot;
+ aic_outb(temp_p, hcntrl | PAUSE, HCNTRL);
+ while( (aic_inb(temp_p, HCNTRL) & PAUSE) == 0 ) ;
+ if (aic7xxx_chip_reset(temp_p) == -1)
+ temp_p->irq = 0;
+ else
+ temp_p->irq = aic_inb(temp_p, INTDEF) & 0x0F;
+ temp_p->flags |= AHC_PAGESCBS;
+
+ switch (temp_p->irq)
+ {
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 14:
+ case 15:
+ break;
+
+ default:
+ printk(KERN_WARNING "aic7xxx: Host adapter uses unsupported IRQ "
+ "level %d, ignoring.\n", temp_p->irq);
+ kfree(temp_p);
+ slot++;
+ continue; /* back to the beginning of the while loop */
+ }
+
+ /*
+ * We are commited now, everything has been checked and this card
+ * has been found, now we just set it up
+ */
+
+ /*
+ * Insert our new struct into the list at the end
+ */
+ if (list_p == NULL)
+ {
+ list_p = current_p = temp_p;
+ }
+ else
+ {
+ current_p = list_p;
+ while (current_p->next != NULL)
+ current_p = current_p->next;
+ current_p->next = temp_p;
+ }
+
+ switch (type)
+ {
+ case 0:
+ temp_p->board_name_index = 2;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("aic7xxx: <%s> at EISA %d\n",
+ board_names[2], slot);
+ /* FALLTHROUGH */
+ case 1:
+ {
+ temp_p->chip = AHC_AIC7770 | AHC_EISA;
+ temp_p->features |= AHC_AIC7770_FE;
+ temp_p->bios_control = aic_inb(temp_p, HA_274_BIOSCTRL);
+
+ /*
+ * Get the primary channel information. Right now we don't
+ * do anything with this, but someday we will be able to inform
+ * the mid-level SCSI code which channel is primary.
+ */
+ if (temp_p->board_name_index == 0)
+ {
+ temp_p->board_name_index = 3;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("aic7xxx: <%s> at EISA %d\n",
+ board_names[3], slot);
+ }
+ if (temp_p->bios_control & CHANNEL_B_PRIMARY)
+ {
+ temp_p->flags |= AHC_CHANNEL_B_PRIMARY;
+ }
+
+ if ((temp_p->bios_control & BIOSMODE) == BIOSDISABLED)
+ {
+ temp_p->flags &= ~AHC_BIOS_ENABLED;
+ }
+ else
+ {
+ temp_p->flags &= ~AHC_USEDEFAULTS;
+ temp_p->flags |= AHC_BIOS_ENABLED;
+ if ( (temp_p->bios_control & 0x20) == 0 )
+ {
+ temp_p->bios_address = 0xcc000;
+ temp_p->bios_address += (0x4000 * (temp_p->bios_control & 0x07));
+ }
+ else
+ {
+ temp_p->bios_address = 0xd0000;
+ temp_p->bios_address += (0x8000 * (temp_p->bios_control & 0x06));
+ }
+ }
+ temp_p->adapter_control = aic_inb(temp_p, SCSICONF) << 8;
+ temp_p->adapter_control |= aic_inb(temp_p, SCSICONF + 1);
+ if (temp_p->features & AHC_WIDE)
+ {
+ temp_p->scsi_id = temp_p->adapter_control & HWSCSIID;
+ temp_p->scsi_id_b = temp_p->scsi_id;
+ }
+ else
+ {
+ temp_p->scsi_id = (temp_p->adapter_control >> 8) & HSCSIID;
+ temp_p->scsi_id_b = temp_p->adapter_control & HSCSIID;
+ }
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ break;
+ }
+
+ case 2:
+ case 3:
+ temp_p->chip = AHC_AIC7770 | AHC_VL;
+ temp_p->features |= AHC_AIC7770_FE;
+ if (type == 2)
+ temp_p->flags |= AHC_BIOS_ENABLED;
+ else
+ temp_p->flags &= ~AHC_BIOS_ENABLED;
+ if (aic_inb(temp_p, SCSICONF) & TERM_ENB)
+ sxfrctl1 = STPWEN;
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ temp_p->board_name_index = 4;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("aic7xxx: <%s> at VLB %d\n",
+ board_names[2], slot);
+ switch( aic_inb(temp_p, STATUS_2840) & BIOS_SEL )
+ {
+ case 0x00:
+ temp_p->bios_address = 0xe0000;
+ break;
+ case 0x20:
+ temp_p->bios_address = 0xc8000;
+ break;
+ case 0x40:
+ temp_p->bios_address = 0xd0000;
+ break;
+ case 0x60:
+ temp_p->bios_address = 0xd8000;
+ break;
+ default:
+ break; /* can't get here */
+ }
+ break;
+
+ default: /* Won't get here. */
+ break;
+ }
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk(KERN_INFO "aic7xxx: BIOS %sabled, IO Port 0x%lx, IRQ %d (%s)\n",
+ (temp_p->flags & AHC_USEDEFAULTS) ? "dis" : "en", temp_p->base,
+ temp_p->irq,
+ (temp_p->pause & IRQMS) ? "level sensitive" : "edge triggered");
+ printk(KERN_INFO "aic7xxx: Extended translation %sabled.\n",
+ (temp_p->flags & AHC_EXTEND_TRANS_A) ? "en" : "dis");
+ }
+
+ /*
+ * Set the FIFO threshold and the bus off time.
+ */
+ hostconf = aic_inb(temp_p, HOSTCONF);
+ aic_outb(temp_p, hostconf & DFTHRSH, BUSSPD);
+ aic_outb(temp_p, (hostconf << 2) & BOFF, BUSTIME);
+ slot++;
+ found++;
+ }
+
+#endif /* defined(__i386__) || defined(__alpha__) */
+
+#ifdef CONFIG_PCI
+ /*
+ * PCI-bus probe.
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ if (pci_present())
+#else
+ if (pcibios_present())
+#endif
+ {
+ struct
+ {
+ unsigned short vendor_id;
+ unsigned short device_id;
+ ahc_chip chip;
+ ahc_flag_type flags;
+ ahc_feature features;
+ int board_name_index;
+ unsigned short seeprom_size;
+ unsigned short seeprom_type;
+ } const aic_pdevs[] = {
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7810, AHC_NONE,
+ AHC_FNONE, AHC_FENONE, 1,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7850, AHC_AIC7850,
+ AHC_PAGESCBS, AHC_AIC7850_FE, 5,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7855, AHC_AIC7850,
+ AHC_PAGESCBS, AHC_AIC7850_FE, 6,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7821, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 7,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_3860, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 7,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7860, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 7,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7861, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 8,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7870, AHC_AIC7870,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE, 9,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7871, AHC_AIC7870,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE, 10,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7872, AHC_AIC7870,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7870_FE, 11,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7873, AHC_AIC7870,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7870_FE, 12,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7874, AHC_AIC7870,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE, 13,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7880, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 14,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7881, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 15,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7882, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7880_FE, 16,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7883, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7880_FE, 17,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7884, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7885, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7886, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7887, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7888, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7895, AHC_AIC7895,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7895_FE, 19,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890, AHC_AIC7890,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7890_FE, 20,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890B, AHC_AIC7890,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7890_FE, 20,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2930U2, AHC_AIC7890,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7890_FE, 21,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2940U2, AHC_AIC7890,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7890_FE, 22,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7896, AHC_AIC7896,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7896_FE, 23,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3940U2, AHC_AIC7896,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7896_FE, 24,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3950U2D, AHC_AIC7896,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7896_FE, 25,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_1480A, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 26,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892A, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892B, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892D, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892P, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899A, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899B, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899D, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899P, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ };
+
+ unsigned short command;
+ unsigned int devconfig, i, oldverbose;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ struct pci_dev *pdev = NULL;
+#else
+ int index;
+ unsigned int piobase, mmapbase;
+ unsigned char pci_bus, pci_devfn, pci_irq;
+#endif
+
+ for (i = 0; i < NUMBER(aic_pdevs); i++)
+ {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pdev = NULL;
+ while ((pdev = pci_find_device(aic_pdevs[i].vendor_id,
+ aic_pdevs[i].device_id,
+ pdev)))
+#else
+ index = 0;
+ while (!(pcibios_find_device(aic_pdevs[i].vendor_id,
+ aic_pdevs[i].device_id,
+ index++, &pci_bus, &pci_devfn)) )
+#endif
+ {
+ if ( i == 0 ) /* We found one, but it's the 7810 RAID cont. */
+ {
+ if (aic7xxx_verbose & (VERBOSE_PROBE|VERBOSE_PROBE2))
+ {
+ printk(KERN_INFO "aic7xxx: The 7810 RAID controller is not "
+ "supported by\n");
+ printk(KERN_INFO " this driver, we are ignoring it.\n");
+ }
+ }
+ else if ( (temp_p = kmalloc(sizeof(struct aic7xxx_host),
+ GFP_ATOMIC)) != NULL )
+ {
+ memset(temp_p, 0, sizeof(struct aic7xxx_host));
+ temp_p->chip = aic_pdevs[i].chip | AHC_PCI;
+ temp_p->flags = aic_pdevs[i].flags;
+ temp_p->features = aic_pdevs[i].features;
+ temp_p->board_name_index = aic_pdevs[i].board_name_index;
+ temp_p->sc_size = aic_pdevs[i].seeprom_size;
+ temp_p->sc_type = aic_pdevs[i].seeprom_type;
+
+ /*
+ * Read sundry information from PCI BIOS.
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ temp_p->irq = pdev->irq;
+ temp_p->pdev = pdev;
+ temp_p->pci_bus = pdev->bus->number;
+ temp_p->pci_device_fn = pdev->devfn;
+ temp_p->base = pdev->base_address[0];
+ temp_p->mbase = pdev->base_address[1];
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("aic7xxx: <%s> at PCI %d/%d\n",
+ board_names[aic_pdevs[i].board_name_index],
+ PCI_SLOT(temp_p->pdev->devfn),
+ PCI_FUNC(temp_p->pdev->devfn));
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Initial PCI_COMMAND value was 0x%x\n",
+ (int)command);
+ }
+#ifdef AIC7XXX_STRICT_PCI_SETUP
+ command |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+ PCI_COMMAND_INVALIDATE | PCI_COMMAND_MASTER |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+#else
+ command |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+#endif
+ if (aic7xxx_pci_parity == 0)
+ command &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+#ifdef AIC7XXX_STRICT_PCI_SETUP
+ pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Initial DEVCONFIG value was 0x%x\n", devconfig);
+ }
+ devconfig |= 0x80000000;
+ if ((aic7xxx_pci_parity == 0) || (aic7xxx_pci_parity == -1))
+ {
+ devconfig &= ~(0x00000008);
+ }
+ else
+ {
+ devconfig |= 0x00000008;
+ }
+ pci_write_config_dword(pdev, DEVCONFIG, devconfig);
+#endif /* AIC7XXX_STRICT_PCI_SETUP */
+#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92) */
+ temp_p->pci_bus = pci_bus;
+ temp_p->pci_device_fn = pci_devfn;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("aic7xxx: <%s> at PCI %d/%d\n",
+ board_names[aic_pdevs[i].board_name_index],
+ PCI_SLOT(temp_p->pci_device_fn),
+ PCI_FUNC(temp_p->pci_device_fn));
+ pcibios_read_config_byte(pci_bus, pci_devfn, PCI_INTERRUPT_LINE,
+ &pci_irq);
+ temp_p->irq = pci_irq;
+ pcibios_read_config_dword(pci_bus, pci_devfn, PCI_BASE_ADDRESS_0,
+ &piobase);
+ temp_p->base = piobase;
+ pcibios_read_config_dword(pci_bus, pci_devfn, PCI_BASE_ADDRESS_1,
+ &mmapbase);
+ temp_p->mbase = mmapbase;
+ pcibios_read_config_word(pci_bus, pci_devfn, PCI_COMMAND, &command);
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Initial PCI_COMMAND value was 0x%x\n",
+ (int)command);
+ }
+#ifdef AIC7XXX_STRICT_PCI_SETUP
+ command |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+ PCI_COMMAND_INVALIDATE | PCI_COMMAND_MASTER |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+#else
+ command |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+#endif
+ if (aic7xxx_pci_parity == 0)
+ command &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+ pcibios_write_config_word(pci_bus, pci_devfn, PCI_COMMAND, command);
+#ifdef AIC7XXX_STRICT_PCI_SETUP
+ pcibios_read_config_dword(pci_bus, pci_devfn, DEVCONFIG, &devconfig);
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Initial DEVCONFIG value was 0x%x\n", devconfig);
+ }
+ devconfig |= 0x80000000;
+ if ((aic7xxx_pci_parity == 0) || (aic7xxx_pci_parity == -1))
+ {
+ devconfig &= ~(0x00000008);
+ }
+ else
+ {
+ devconfig |= 0x00000008;
+ }
+ pcibios_write_config_dword(pci_bus, pci_devfn, DEVCONFIG, devconfig);
+#endif /* AIC7XXX_STRICT_PCI_SETUP */
+#endif /* LINUIX_VERSION_CODE > KERNEL_VERSION(2,1,92) */
+
+ /*
+ * The first bit (LSB) of PCI_BASE_ADDRESS_0 is always set, so
+ * we mask it off.
+ */
+ temp_p->base &= PCI_BASE_ADDRESS_IO_MASK;
+ temp_p->mbase &= PCI_BASE_ADDRESS_MEM_MASK;
+ temp_p->unpause = INTEN;
+ temp_p->pause = temp_p->unpause | PAUSE;
+ if ( ((temp_p->base == 0) &&
+ (temp_p->mbase == 0)) ||
+ (temp_p->irq == 0) )
+ {
+ printk("aic7xxx: <%s> at PCI %d/%d\n",
+ board_names[aic_pdevs[i].board_name_index],
+ PCI_SLOT(temp_p->pci_device_fn),
+ PCI_FUNC(temp_p->pci_device_fn));
+ printk("aic7xxx: Controller disabled by BIOS, ignoring.\n");
+ kfree(temp_p);
+ temp_p = NULL;
+ continue;
+ }
+
+#ifdef MMAPIO
+ {
+ unsigned long page_offset, base;
+
+ base = temp_p->mbase & PAGE_MASK;
+ page_offset = temp_p->mbase - base;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,0)
+ temp_p->maddr = ioremap_nocache(base, page_offset + 256);
+#else
+ temp_p->maddr = vremap(base, page_offset + 256);
+#endif
+ if(temp_p->maddr)
+ {
+ temp_p->maddr += page_offset;
+ /*
+ * We need to check the I/O with the MMAPed address. Some machines
+ * simply fail to work with MMAPed I/O and certain controllers.
+ */
+ if(aic_inb(temp_p, HCNTRL) == 0xff)
+ {
+ /*
+ * OK.....we failed our test....go back to programmed I/O
+ */
+ printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d\n",
+ board_names[aic_pdevs[i].board_name_index],
+ PCI_SLOT(temp_p->pci_device_fn),
+ PCI_FUNC(temp_p->pci_device_fn));
+ printk(KERN_INFO "aic7xxx: MMAPed I/O failed, reverting to "
+ "Programmed I/O.\n");
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,0)
+ iounmap((void *) (((unsigned long) temp_p->maddr) & PAGE_MASK));
+#else
+ vfree((void *) (((unsigned long) temp_p->maddr) & PAGE_MASK));
+#endif
+ temp_p->maddr = 0;
+ }
+ }
+ }
+#endif
+
+ /*
+ * We HAVE to make sure the first pause_sequencer() and all other
+ * subsequent I/O that isn't PCI config space I/O takes place
+ * after the MMAPed I/O region is configured and tested. The
+ * problem is the PowerPC architecture that doesn't support
+ * programmed I/O at all, so we have to have the MMAP I/O set up
+ * for this pause to even work on those machines.
+ */
+ pause_sequencer(temp_p);
+
+ /*
+ * Clear out any pending PCI error status messages. Also set
+ * verbose to 0 so that we don't emit strange PCI error messages
+ * while cleaning out the current status bits.
+ */
+ oldverbose = aic7xxx_verbose;
+ aic7xxx_verbose = 0;
+ aic7xxx_pci_intr(temp_p);
+ aic7xxx_verbose = oldverbose;
+
+ temp_p->bios_address = 0;
+
+ /*
+ * Remember how the card was setup in case there is no seeprom.
+ */
+ if (temp_p->features & AHC_ULTRA2)
+ temp_p->scsi_id = aic_inb(temp_p, SCSIID_ULTRA2) & OID;
+ else
+ temp_p->scsi_id = aic_inb(temp_p, SCSIID) & OID;
+ /*
+ * Get current termination setting
+ */
+ sxfrctl1 = aic_inb(temp_p, SXFRCTL1) & STPWEN;
+
+ if (aic7xxx_chip_reset(temp_p) == -1)
+ {
+ kfree(temp_p);
+ temp_p = NULL;
+ continue;
+ }
+
+ /*
+ * We need to set the CHNL? assignments before loading the SEEPROM
+ * The 3940 and 3985 cards (original stuff, not any of the later
+ * stuff) are 7870 and 7880 class chips. The Ultra2 stuff falls
+ * under 7896 and 7897. The 7895 is in a class by itself :)
+ */
+ switch (temp_p->chip & AHC_CHIPID_MASK)
+ {
+ case AHC_AIC7870: /* 3840 / 3985 */
+ case AHC_AIC7880: /* 3840 UW / 3985 UW */
+ if(temp_p->flags & AHC_MULTI_CHANNEL)
+ {
+ switch(PCI_SLOT(temp_p->pci_device_fn))
+ {
+ case 5:
+ temp_p->flags |= AHC_CHNLB;
+ break;
+ case 8:
+ temp_p->flags |= AHC_CHNLB;
+ break;
+ case 12:
+ temp_p->flags |= AHC_CHNLC;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+
+ case AHC_AIC7895: /* 7895 */
+ case AHC_AIC7896: /* 7896/7 */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ if (PCI_FUNC(temp_p->pdev->devfn) != 0)
+ {
+ temp_p->flags |= AHC_CHNLB;
+ }
+ /*
+ * The 7895 is the only chipset that sets the SCBSIZE32 param
+ * in the DEVCONFIG register. The Ultra2 chipsets use
+ * the DSCOMMAND0 register instead.
+ */
+ if ((temp_p->chip & AHC_CHIPID_MASK) == AHC_AIC7895)
+ {
+ pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
+ devconfig |= SCBSIZE32;
+ pci_write_config_dword(pdev, DEVCONFIG, devconfig);
+ }
+#else
+ if (PCI_FUNC(temp_p->pci_device_fn) != 0)
+ {
+ temp_p->flags |= AHC_CHNLB;
+ }
+ /*
+ * The 7895 is the only chipset that sets the SCBSIZE32 param
+ * in the DEVCONFIG register. The Ultra2 chipsets use
+ * the DSCOMMAND0 register instead.
+ */
+ if ((temp_p->chip & AHC_CHIPID_MASK) == AHC_AIC7895)
+ {
+ pcibios_read_config_dword(pci_bus, pci_devfn, DEVCONFIG,
+ &devconfig);
+ devconfig |= SCBSIZE32;
+ pcibios_write_config_dword(pci_bus, pci_devfn, DEVCONFIG,
+ devconfig);
+ }
+#endif
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Loading of the SEEPROM needs to come after we've set the flags
+ * to indicate possible CHNLB and CHNLC assigments. Otherwise,
+ * on 394x and 398x cards we'll end up reading the wrong settings
+ * for channels B and C
+ */
+ switch (temp_p->chip & AHC_CHIPID_MASK)
+ {
+ case AHC_AIC7890:
+ case AHC_AIC7896:
+ aic_outb(temp_p, 0, SCAMCTL);
+ /*
+ * We used to set DPARCKEN in this register, but after talking
+ * to a tech from Adaptec, I found out they don't use that
+ * particular bit in their own register settings, and when you
+ * combine that with the fact that I determined that we were
+ * seeing Data-Path Parity Errors on things we shouldn't see
+ * them on, I think there is a bug in the silicon and the way
+ * to work around it is to disable this particular check. Also
+ * This bug only showed up on certain commands, so it seems to
+ * be pattern related or some such. The commands we would
+ * typically send as a linux TEST_UNIT_READY or INQUIRY command
+ * could cause it to be triggered, while regular commands that
+ * actually made reasonable use of the SG array capabilities
+ * seemed not to cause the problem.
+ */
+ /*
+ aic_outb(temp_p, aic_inb(temp_p, DSCOMMAND0) |
+ CACHETHEN | DPARCKEN | MPARCKEN |
+ USCBSIZE32 | CIOPARCKEN,
+ DSCOMMAND0);
+ */
+ aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
+ CACHETHEN | MPARCKEN | USCBSIZE32 |
+ CIOPARCKEN) & ~DPARCKEN, DSCOMMAND0);
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ break;
+ case AHC_AIC7850:
+ case AHC_AIC7860:
+ /*
+ * Set the DSCOMMAND0 register on these cards different from
+ * on the 789x cards. Also, read the SEEPROM as well.
+ */
+ aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
+ CACHETHEN | MPARCKEN) & ~DPARCKEN,
+ DSCOMMAND0);
+ /* FALLTHROUGH */
+ default:
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ break;
+ case AHC_AIC7880:
+ /*
+ * Check the rev of the chipset before we change DSCOMMAND0
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
+#else
+ pcibios_read_config_dword(pci_bus, pci_devfn, DEVCONFIG,
+ &devconfig);
+#endif
+ if ((devconfig & 0xff) >= 1)
+ {
+ aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
+ CACHETHEN | MPARCKEN) & ~DPARCKEN,
+ DSCOMMAND0);
+ }
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ break;
+ }
+
+
+ /*
+ * and then we need another switch based on the type in order to
+ * make sure the channel B primary flag is set properly on 7895
+ * controllers....Arrrgggghhh!!! We also have to catch the fact
+ * that when you disable the BIOS on the 7895 on the Intel DK440LX
+ * motherboard, and possibly others, it only sets the BIOS disabled
+ * bit on the A channel...I think I'm starting to lean towards
+ * going postal....
+ */
+ switch(temp_p->chip & AHC_CHIPID_MASK)
+ {
+ case AHC_AIC7895:
+ case AHC_AIC7896:
+ current_p = list_p;
+ while(current_p != NULL)
+ {
+ if ( (current_p->pci_bus == temp_p->pci_bus) &&
+ (PCI_SLOT(current_p->pci_device_fn) ==
+ PCI_SLOT(temp_p->pci_device_fn)) )
+ {
+ if ( PCI_FUNC(current_p->pci_device_fn) == 0 )
+ {
+ temp_p->flags |=
+ (current_p->flags & AHC_CHANNEL_B_PRIMARY);
+ temp_p->flags &= ~(AHC_BIOS_ENABLED|AHC_USEDEFAULTS);
+ temp_p->flags |=
+ (current_p->flags & (AHC_BIOS_ENABLED|AHC_USEDEFAULTS));
+ }
+ else
+ {
+ current_p->flags |=
+ (temp_p->flags & AHC_CHANNEL_B_PRIMARY);
+ current_p->flags &= ~(AHC_BIOS_ENABLED|AHC_USEDEFAULTS);
+ current_p->flags |=
+ (temp_p->flags & (AHC_BIOS_ENABLED|AHC_USEDEFAULTS));
+ }
+ }
+ current_p = current_p->next;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * We only support external SCB RAM on the 7895/6/7 chipsets.
+ * We could support it on the 7890/1 easy enough, but I don't
+ * know of any 7890/1 based cards that have it. I do know
+ * of 7895/6/7 cards that have it and they work properly.
+ */
+ switch(temp_p->chip & AHC_CHIPID_MASK)
+ {
+ default:
+ break;
+ case AHC_AIC7895:
+ case AHC_AIC7896:
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
+#else
+ pcibios_read_config_dword(pci_bus, pci_devfn, DEVCONFIG,
+ &devconfig);
+#endif
+ if (temp_p->features & AHC_ULTRA2)
+ {
+ if (aic_inb(temp_p, DSCOMMAND0) & RAMPSM_ULTRA2)
+ {
+ aic_outb(temp_p,
+ aic_inb(temp_p, DSCOMMAND0) & ~SCBRAMSEL_ULTRA2,
+ DSCOMMAND0);
+ temp_p->flags |= AHC_EXTERNAL_SRAM;
+ devconfig |= EXTSCBPEN;
+ }
+ }
+ else if (devconfig & RAMPSM)
+ {
+ devconfig &= ~SCBRAMSEL;
+ devconfig |= EXTSCBPEN;
+ temp_p->flags |= AHC_EXTERNAL_SRAM;
+ }
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_write_config_dword(pdev, DEVCONFIG, devconfig);
+#else
+ pcibios_write_config_dword(pci_bus, pci_devfn, DEVCONFIG,
+ devconfig);
+#endif
+ if ( (temp_p->flags & AHC_EXTERNAL_SRAM) &&
+ (temp_p->flags & AHC_CHNLB) )
+ aic_outb(temp_p, 1, CCSCBBADDR);
+ break;
+ }
+
+ /*
+ * Take the LED out of diagnostic mode
+ */
+ aic_outb(temp_p,
+ (aic_inb(temp_p, SBLKCTL) & ~(DIAGLEDEN | DIAGLEDON)),
+ SBLKCTL);
+
+ /*
+ * We don't know where this is set in the SEEPROM or by the
+ * BIOS, so we default to 100%. On Ultra2 controllers, use 75%
+ * instead.
+ */
+ if (temp_p->features & AHC_ULTRA2)
+ {
+ aic_outb(temp_p, RD_DFTHRSH_75 | WR_DFTHRSH_75, DFF_THRSH);
+ }
+ else
+ {
+ aic_outb(temp_p, DFTHRSH_100, DSPCISTATUS);
+ }
+
+ if ( list_p == NULL )
+ {
+ list_p = current_p = temp_p;
+ }
+ else
+ {
+ current_p = list_p;
+ while(current_p->next != NULL)
+ current_p = current_p->next;
+ current_p->next = temp_p;
+ }
+ temp_p->next = NULL;
+ found++;
+ } /* Found an Adaptec PCI device. */
+ else /* Well, we found one, but we couldn't get any memory */
+ {
+ printk("aic7xxx: Found <%s>\n",
+ board_names[aic_pdevs[i].board_name_index]);
+ printk(KERN_INFO "aic7xxx: Unable to allocate device memory, "
+ "skipping.\n");
+ }
+ } /* while(pdev=....) */
+ } /* for PCI_DEVICES */
+ } /* PCI BIOS present */
+#endif CONFIG_PCI
+ /*
+ * Now, we re-order the probed devices by BIOS address and BUS class.
+ * In general, we follow this algorithm to make the adapters show up
+ * in the same order under linux that the computer finds them.
+ * 1: All VLB/EISA cards with BIOS_ENABLED first, according to BIOS
+ * address, going from lowest to highest.
+ * 2: All PCI controllers with BIOS_ENABLED next, according to BIOS
+ * address, going from lowest to highest.
+ * 3: Remaining VLB/EISA controllers going in slot order.
+ * 4: Remaining PCI controllers, going in PCI device order (reversable)
+ */
+
+ {
+ struct aic7xxx_host *sort_list[4] = { NULL, NULL, NULL, NULL };
+ struct aic7xxx_host *vlb, *pci;
+ struct aic7xxx_host *prev_p;
+ struct aic7xxx_host *p;
+ unsigned char left;
+
+ prev_p = vlb = pci = NULL;
+
+ temp_p = list_p;
+ while (temp_p != NULL)
+ {
+ switch(temp_p->chip & ~AHC_CHIPID_MASK)
+ {
+ case AHC_EISA:
+ case AHC_VL:
+ {
+ p = temp_p;
+ if (p->flags & AHC_BIOS_ENABLED)
+ vlb = sort_list[0];
+ else
+ vlb = sort_list[2];
+
+ if (vlb == NULL)
+ {
+ vlb = temp_p;
+ temp_p = temp_p->next;
+ vlb->next = NULL;
+ }
+ else
+ {
+ current_p = vlb;
+ prev_p = NULL;
+ while ( (current_p != NULL) &&
+ (current_p->bios_address < temp_p->bios_address))
+ {
+ prev_p = current_p;
+ current_p = current_p->next;
+ }
+ if (prev_p != NULL)
+ {
+ prev_p->next = temp_p;
+ temp_p = temp_p->next;
+ prev_p->next->next = current_p;
+ }
+ else
+ {
+ vlb = temp_p;
+ temp_p = temp_p->next;
+ vlb->next = current_p;
+ }
+ }
+
+ if (p->flags & AHC_BIOS_ENABLED)
+ sort_list[0] = vlb;
+ else
+ sort_list[2] = vlb;
+
+ break;
+ }
+ default: /* All PCI controllers fall through to default */
+ {
+
+ p = temp_p;
+ if (p->flags & AHC_BIOS_ENABLED)
+ pci = sort_list[1];
+ else
+ pci = sort_list[3];
+
+ if (pci == NULL)
+ {
+ pci = temp_p;
+ temp_p = temp_p->next;
+ pci->next = NULL;
+ }
+ else
+ {
+ current_p = pci;
+ prev_p = NULL;
+ if (!aic7xxx_reverse_scan)
+ {
+ while ( (current_p != NULL) &&
+ ( (PCI_SLOT(current_p->pci_device_fn) |
+ (current_p->pci_bus << 8)) <
+ (PCI_SLOT(temp_p->pci_device_fn) |
+ (temp_p->pci_bus << 8)) ) )
+ {
+ prev_p = current_p;
+ current_p = current_p->next;
+ }
+ }
+ else
+ {
+ while ( (current_p != NULL) &&
+ ( (PCI_SLOT(current_p->pci_device_fn) |
+ (current_p->pci_bus << 8)) >
+ (PCI_SLOT(temp_p->pci_device_fn) |
+ (temp_p->pci_bus << 8)) ) )
+ {
+ prev_p = current_p;
+ current_p = current_p->next;
+ }
+ }
+ /*
+ * Are we dealing with a 7985 where we need to sort the
+ * channels as well, if so, the bios_address values should
+ * be the same
+ */
+ if ( (current_p) && (temp_p->flags & AHC_MULTI_CHANNEL) &&
+ (temp_p->pci_bus == current_p->pci_bus) &&
+ (PCI_SLOT(temp_p->pci_device_fn) ==
+ PCI_SLOT(current_p->pci_device_fn)) )
+ {
+ if (temp_p->flags & AHC_CHNLB)
+ {
+ if ( !(temp_p->flags & AHC_CHANNEL_B_PRIMARY) )
+ {
+ prev_p = current_p;
+ current_p = current_p->next;
+ }
+ }
+ else
+ {
+ if (temp_p->flags & AHC_CHANNEL_B_PRIMARY)
+ {
+ prev_p = current_p;
+ current_p = current_p->next;
+ }
+ }
+ }
+ if (prev_p != NULL)
+ {
+ prev_p->next = temp_p;
+ temp_p = temp_p->next;
+ prev_p->next->next = current_p;
+ }
+ else
+ {
+ pci = temp_p;
+ temp_p = temp_p->next;
+ pci->next = current_p;
+ }
+ }
+
+ if (p->flags & AHC_BIOS_ENABLED)
+ sort_list[1] = pci;
+ else
+ sort_list[3] = pci;
+
+ break;
+ }
+ } /* End of switch(temp_p->type) */
+ } /* End of while (temp_p != NULL) */
+ /*
+ * At this point, the cards have been broken into 4 sorted lists, now
+ * we run through the lists in order and register each controller
+ */
+ {
+ int i;
+
+ left = found;
+ for (i=0; i<NUMBER(sort_list); i++)
+ {
+ temp_p = sort_list[i];
+ while(temp_p != NULL)
+ {
+ template->name = board_names[temp_p->board_name_index];
+ p = aic7xxx_alloc(template, temp_p);
+ if (p != NULL)
+ {
+ p->instance = found - left;
+ if (aic7xxx_register(template, p, (--left)) == 0)
+ {
+ found--;
+ aic7xxx_release(p->host);
+ scsi_unregister(p->host);
+ }
+ else if (aic7xxx_dump_card)
+ {
+ pause_sequencer(p);
+ aic7xxx_print_card(p);
+ aic7xxx_print_scratch_ram(p);
+ unpause_sequencer(p, TRUE);
+ }
+ }
+ current_p = temp_p;
+ temp_p = (struct aic7xxx_host *)temp_p->next;
+ kfree(current_p);
+ }
+ }
+ }
+ }
+ return (found);
+}
+
+#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_negotiation_complete
+ *
+ * Description:
+ * Handle completion events for our Negotiation commands. Clear out the
+ * struct and get it ready for its next use.
+ *-F*************************************************************************/
+static void
+aic7xxx_negotiation_complete(Scsi_Cmnd *cmd)
+{
+ return;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_build_negotiation_command
+ *
+ * Description:
+ * Build a Scsi_Cmnd structure to perform negotiation with or else send
+ * a pre-built command specifically for this purpose.
+ *-F*************************************************************************/
+static void
+aic7xxx_build_negotiation_cmnd(struct aic7xxx_host *p, Scsi_Cmnd *old_cmd,
+ int tindex)
+{
+
+ if ( (p->needwdtr & (1<<tindex)) && !(p->wdtr_pending & (1<<tindex)) )
+ {
+ if(p->dev_wdtr_cmnd[tindex] == NULL)
+ {
+ Scsi_Cmnd *cmd;
+
+ if (!(p->dev_wdtr_cmnd[tindex] = kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC)) )
+ {
+ return;
+ }
+ cmd = p->dev_wdtr_cmnd[tindex];
+ memset(cmd, 0, sizeof(Scsi_Cmnd));
+ memcpy(cmd, old_cmd, sizeof(Scsi_Cmnd));
+ memset(&cmd->cmnd[0], 0, sizeof(cmd->cmnd));
+ memset(&cmd->data_cmnd[0], 0, sizeof(cmd->data_cmnd));
+ cmd->lun = 0;
+ cmd->request_bufflen = 0;
+ cmd->request_buffer = NULL;
+ cmd->use_sg = cmd->old_use_sg = cmd->sglist_len = 0;
+ cmd->bufflen = 0;
+ cmd->buffer = NULL;
+ cmd->underflow = 0;
+ cmd->cmd_len = 6;
+ }
+ /*
+ * Before sending this thing out, we also amke the cmd->next pointer
+ * point to the real command so we can stuff any possible SENSE data
+ * intp the real command instead of this fake command. This has to be
+ * done each time the command is built, not just the first time, hence
+ * it's outside of the above if()...
+ */
+ p->dev_wdtr_cmnd[tindex]->next = old_cmd;
+ aic7xxx_queue(p->dev_wdtr_cmnd[tindex],
+ aic7xxx_negotiation_complete);
+ }
+ else if ( (p->needsdtr & (1<<tindex)) && !(p->sdtr_pending & (1<<tindex)) &&
+ !(p->wdtr_pending & (1<<tindex)) )
+ {
+ if(p->dev_sdtr_cmnd[tindex] == NULL)
+ {
+ Scsi_Cmnd *cmd;
+
+ if (!(p->dev_sdtr_cmnd[tindex] = kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC)) )
+ {
+ return;
+ }
+ cmd = p->dev_sdtr_cmnd[tindex];
+ memset(cmd, 0, sizeof(Scsi_Cmnd));
+ memcpy(cmd, old_cmd, sizeof(Scsi_Cmnd));
+ memset(&cmd->cmnd[0], 0, sizeof(cmd->cmnd));
+ memset(&cmd->data_cmnd[0], 0, sizeof(cmd->data_cmnd));
+ cmd->lun = 0;
+ cmd->request_bufflen = 0;
+ cmd->request_buffer = NULL;
+ cmd->use_sg = cmd->old_use_sg = cmd->sglist_len = 0;
+ cmd->bufflen = 0;
+ cmd->buffer = NULL;
+ cmd->underflow = 0;
+ cmd->cmd_len = 6;
+ }
+ /*
+ * Before sending this thing out, we also amke the cmd->next pointer
+ * point to the real command so we can stuff any possible SENSE data
+ * intp the real command instead of this fake command. This has to be
+ * done each time the command is built, not just the first time, hence
+ * it's outside of the above if()...
+ */
+ p->dev_sdtr_cmnd[tindex]->next = old_cmd;
+ aic7xxx_queue(p->dev_sdtr_cmnd[tindex],
+ aic7xxx_negotiation_complete);
+ }
+}
+
+#endif
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_print_scb
+ *
+ * Description:
+ * Dump the byte codes for an about to be sent SCB.
+ *-F*************************************************************************/
+static void
+aic7xxx_print_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ int i;
+ unsigned char *x;
+
+ x = (unsigned char *)&scb->hscb->control;
+
+ for(i=0; i<32; i++)
+ {
+ printk("%02x ", x[i]);
+ }
+ printk("\n");
+}
+#endif
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_buildscb
+ *
+ * Description:
+ * Build a SCB.
+ *-F*************************************************************************/
+static void
+aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
+ struct aic7xxx_scb *scb)
+{
+ unsigned short mask;
+ struct aic7xxx_hwscb *hscb;
+
+ mask = (0x01 << TARGET_INDEX(cmd));
+ hscb = scb->hscb;
+
+ /*
+ * Setup the control byte if we need negotiation and have not
+ * already requested it.
+ */
+ hscb->control = 0;
+ scb->tag_action = 0;
+ if (p->discenable & mask)
+ {
+ hscb->control |= DISCENB;
+ if (p->tagenable & mask)
+ {
+ cmd->tag = hscb->tag;
+ p->dev_commands_sent[TARGET_INDEX(cmd)]++;
+ if (p->dev_commands_sent[TARGET_INDEX(cmd)] < 200)
+ {
+ hscb->control |= MSG_SIMPLE_Q_TAG;
+ scb->tag_action = MSG_SIMPLE_Q_TAG;
+ }
+ else
+ {
+ if (p->orderedtag & mask)
+ {
+ hscb->control |= MSG_ORDERED_Q_TAG;
+ scb->tag_action = MSG_ORDERED_Q_TAG;
+ }
+ else
+ {
+ hscb->control |= MSG_SIMPLE_Q_TAG;
+ scb->tag_action = MSG_SIMPLE_Q_TAG;
+ }
+ p->dev_commands_sent[TARGET_INDEX(cmd)] = 0;
+ }
+ }
+ }
+ if (p->dev_flags[TARGET_INDEX(cmd)] & DEVICE_SCANNED)
+ {
+#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+ if ( (p->needwdtr & mask) && !(p->wdtr_pending & mask) )
+ {
+ if (cmd == p->dev_wdtr_cmnd[TARGET_INDEX(cmd)])
+ {
+ p->wdtr_pending |= mask;
+ scb->flags |= SCB_MSGOUT_WDTR;
+ hscb->control &= DISCENB;
+ hscb->control |= MK_MESSAGE;
+ scb->tag_action = 0;
+ }
+ else
+ {
+ aic7xxx_build_negotiation_cmnd(p, cmd, TARGET_INDEX(cmd));
+ }
+ }
+ else if ( (p->needsdtr & mask) && !(p->sdtr_pending & mask) &&
+ !(p->wdtr_pending & mask) )
+ {
+ if (cmd == p->dev_sdtr_cmnd[TARGET_INDEX(cmd)])
+ {
+ p->sdtr_pending |= mask;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ hscb->control &= DISCENB;
+ hscb->control |= MK_MESSAGE;
+ scb->tag_action = 0;
+ }
+ else if (cmd != p->dev_wdtr_cmnd[TARGET_INDEX(cmd)])
+ {
+ aic7xxx_build_negotiation_cmnd(p, cmd, TARGET_INDEX(cmd));
+ }
+ }
+#else
+ if ( (p->needwdtr & mask) && !(p->wdtr_pending & mask) &&
+ !(p->sdtr_pending & mask) && (cmd->lun == 0) )
+ {
+ p->wdtr_pending |= mask;
+ scb->flags |= SCB_MSGOUT_WDTR;
+ hscb->control &= DISCENB;
+ hscb->control |= MK_MESSAGE;
+ scb->tag_action = 0;
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Building WDTR command.\n", p->host_no,
+ CTL_OF_CMD(cmd));
+#endif
+ }
+ else if ( (p->needsdtr & mask) && !(p->wdtr_pending & mask) &&
+ !(p->sdtr_pending & mask) && (cmd->lun == 0) )
+ {
+ p->sdtr_pending |= mask;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ hscb->control &= DISCENB;
+ hscb->control |= MK_MESSAGE;
+ scb->tag_action = 0;
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Building SDTR command.\n", p->host_no,
+ CTL_OF_CMD(cmd));
+#endif
+ }
+#endif
+ }
+ hscb->target_channel_lun = ((cmd->target << 4) & 0xF0) |
+ ((cmd->channel & 0x01) << 3) | (cmd->lun & 0x07);
+
+ /*
+ * The interpretation of request_buffer and request_bufflen
+ * changes depending on whether or not use_sg is zero; a
+ * non-zero use_sg indicates the number of elements in the
+ * scatter-gather array.
+ */
+
+ /*
+ * XXX - this relies on the host data being stored in a
+ * little-endian format.
+ */
+ hscb->SCSI_cmd_length = cmd->cmd_len;
+ hscb->SCSI_cmd_pointer = cpu_to_le32(VIRT_TO_BUS(cmd->cmnd));
+
+ if (cmd->use_sg)
+ {
+ struct scatterlist *sg; /* Must be mid-level SCSI code scatterlist */
+
+ /*
+ * We must build an SG list in adapter format, as the kernel's SG list
+ * cannot be used directly because of data field size (__alpha__)
+ * differences and the kernel SG list uses virtual addresses where
+ * we need physical addresses.
+ */
+ int i;
+
+ sg = (struct scatterlist *)cmd->request_buffer;
+ scb->sg_length = 0;
+ /*
+ * Copy the segments into the SG array. NOTE!!! - We used to
+ * have the first entry both in the data_pointer area and the first
+ * SG element. That has changed somewhat. We still have the first
+ * entry in both places, but now we download the address of
+ * scb->sg_list[1] instead of 0 to the sg pointer in the hscb.
+ */
+ for (i = 0; i < cmd->use_sg; i++)
+ {
+ scb->sg_list[i].address = cpu_to_le32(VIRT_TO_BUS(sg[i].address));
+ scb->sg_list[i].length = cpu_to_le32(sg[i].length);
+ scb->sg_length += sg[i].length;
+ }
+ /* Copy the first SG into the data pointer area. */
+ hscb->data_pointer = scb->sg_list[0].address;
+ hscb->data_count = scb->sg_list[0].length;
+ scb->sg_count = cmd->use_sg;
+ hscb->SG_segment_count = cmd->use_sg;
+ hscb->SG_list_pointer = cpu_to_le32(VIRT_TO_BUS(&scb->sg_list[1]));
+
+ }
+ else
+ {
+ if (cmd->request_bufflen)
+ {
+ scb->sg_count = 1;
+ scb->sg_list[0].address = cpu_to_le32(VIRT_TO_BUS(cmd->request_buffer));
+ scb->sg_list[0].length = cpu_to_le32(cmd->request_bufflen);
+ scb->sg_length = cmd->request_bufflen;
+ hscb->SG_segment_count = 1;
+ hscb->SG_list_pointer = cpu_to_le32(VIRT_TO_BUS(&scb->sg_list[0]));
+ hscb->data_count = scb->sg_list[0].length;
+ hscb->data_pointer = scb->sg_list[0].address;
+ }
+ else
+ {
+ scb->sg_count = 0;
+ scb->sg_length = 0;
+ hscb->SG_segment_count = 0;
+ hscb->SG_list_pointer = 0;
+ hscb->data_count = 0;
+ hscb->data_pointer = 0;
+ }
+ }
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if((cmd->cmnd[0] == TEST_UNIT_READY) && (aic7xxx_verbose & VERBOSE_PROBE2))
+ {
+ aic7xxx_print_scb(p, scb);
+ }
+#endif
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_queue
+ *
+ * Description:
+ * Queue a SCB to the controller.
+ *-F*************************************************************************/
+int
+aic7xxx_queue(Scsi_Cmnd *cmd, void (*fn)(Scsi_Cmnd *))
+{
+ struct aic7xxx_host *p;
+ struct aic7xxx_scb *scb;
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ int tindex = TARGET_INDEX(cmd);
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags = 0;
+#endif
+
+ p = (struct aic7xxx_host *) cmd->host->hostdata;
+ /*
+ * Check to see if channel was scanned.
+ */
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (!(p->flags & AHC_A_SCANNED) && (cmd->channel == 0))
+ {
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(INFO_LEAD "Scanning channel for devices.\n",
+ p->host_no, 0, -1, -1);
+ p->flags |= AHC_A_SCANNED;
+ }
+ else
+ {
+ if (!(p->flags & AHC_B_SCANNED) && (cmd->channel == 1))
+ {
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(INFO_LEAD "Scanning channel for devices.\n",
+ p->host_no, 1, -1, -1);
+ p->flags |= AHC_B_SCANNED;
+ }
+ }
+
+ if (p->dev_active_cmds[tindex] > (cmd->device->queue_depth + 1))
+ {
+ printk(WARN_LEAD "Commands queued exceeds queue "
+ "depth, active=%d\n",
+ p->host_no, CTL_OF_CMD(cmd),
+ p->dev_active_cmds[tindex]);
+ if ( p->dev_active_cmds[tindex] > 220 )
+ p->dev_active_cmds[tindex] = 0;
+ }
+#endif
+
+ scb = scbq_remove_head(&p->scb_data->free_scbs);
+ if (scb == NULL)
+ {
+ DRIVER_LOCK
+ aic7xxx_allocate_scb(p);
+ DRIVER_UNLOCK
+ scb = scbq_remove_head(&p->scb_data->free_scbs);
+ }
+ if (scb == NULL)
+ {
+ printk(WARN_LEAD "Couldn't get a free SCB.\n", p->host_no,
+ CTL_OF_CMD(cmd));
+ cmd->result = (DID_BUS_BUSY << 16);
+ DRIVER_LOCK
+ aic7xxx_queue_cmd_complete(p, cmd);
+ DRIVER_UNLOCK
+ return 0;
+ }
+ else
+ {
+ scb->cmd = cmd;
+ aic7xxx_position(cmd) = scb->hscb->tag;
+
+ /*
+ * Construct the SCB beforehand, so the sequencer is
+ * paused a minimal amount of time.
+ */
+ aic7xxx_buildscb(p, cmd, scb);
+
+ /*
+ * Make sure the Scsi_Cmnd pointer is saved, the struct it points to
+ * is set up properly, and the parity error flag is reset, then send
+ * the SCB to the sequencer and watch the fun begin.
+ */
+ cmd->scsi_done = fn;
+ cmd->result = DID_OK;
+ memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+ aic7xxx_error(cmd) = DID_OK;
+ aic7xxx_status(cmd) = 0;
+ cmd->host_scribble = NULL;
+
+ scb->flags |= SCB_ACTIVE | SCB_WAITINGQ;
+
+ DRIVER_LOCK
+ scbq_insert_tail(&p->waiting_scbs, scb);
+ if ( (p->flags & (AHC_IN_ISR | AHC_IN_ABORT | AHC_IN_RESET)) == 0)
+ {
+ aic7xxx_run_waiting_queues(p);
+ }
+ DRIVER_UNLOCK
+ }
+ return (0);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_bus_device_reset
+ *
+ * Description:
+ * Abort or reset the current SCSI command(s). If the scb has not
+ * previously been aborted, then we attempt to send a BUS_DEVICE_RESET
+ * message to the target. If the scb has previously been unsuccessfully
+ * aborted, then we will reset the channel and have all devices renegotiate.
+ * Returns an enumerated type that indicates the status of the operation.
+ *-F*************************************************************************/
+static int
+aic7xxx_bus_device_reset(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
+{
+ struct aic7xxx_scb *scb;
+ struct aic7xxx_hwscb *hscb;
+ int result = -1;
+ int channel;
+ unsigned char saved_scbptr, lastphase;
+ unsigned char hscb_index;
+ int disconnected;
+
+ scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
+ hscb = scb->hscb;
+
+ lastphase = aic_inb(p, LASTPHASE);
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ {
+ printk(INFO_LEAD "Bus Device reset, scb flags 0x%x, ",
+ p->host_no, CTL_OF_SCB(scb), scb->flags);
+ switch (lastphase)
+ {
+ case P_DATAOUT:
+ printk("Data-Out phase\n");
+ break;
+ case P_DATAIN:
+ printk("Data-In phase\n");
+ break;
+ case P_COMMAND:
+ printk("Command phase\n");
+ break;
+ case P_MESGOUT:
+ printk("Message-Out phase\n");
+ break;
+ case P_STATUS:
+ printk("Status phase\n");
+ break;
+ case P_MESGIN:
+ printk("Message-In phase\n");
+ break;
+ default:
+ /*
+ * We're not in a valid phase, so assume we're idle.
+ */
+ printk("while idle, LASTPHASE = 0x%x\n", lastphase);
+ break;
+ }
+ printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 "
+ "0x%x\n", p->host_no, CTL_OF_SCB(scb),
+ aic_inb(p, SCSISIGI),
+ aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8),
+ aic_inb(p, SSTAT0), aic_inb(p, SSTAT1));
+ }
+
+ channel = cmd->channel;
+
+ /*
+ * Send a Device Reset Message:
+ * The target that is holding up the bus may not be the same as
+ * the one that triggered this timeout (different commands have
+ * different timeout lengths). Our strategy here is to queue an
+ * abort message to the timed out target if it is disconnected.
+ * Otherwise, if we have an active target we stuff the message buffer
+ * with an abort message and assert ATN in the hopes that the target
+ * will let go of the bus and go to the mesgout phase. If this
+ * fails, we'll get another timeout a few seconds later which will
+ * attempt a bus reset.
+ */
+ saved_scbptr = aic_inb(p, SCBPTR);
+ disconnected = FALSE;
+
+ if (lastphase != P_BUSFREE)
+ {
+ if (aic_inb(p, SCB_TAG) >= p->scb_data->numscbs)
+ {
+ printk(WARN_LEAD "Invalid SCB ID %d is active, "
+ "SCB flags = 0x%x.\n", p->host_no,
+ CTL_OF_CMD(cmd), scb->hscb->tag, scb->flags);
+ return(SCSI_RESET_ERROR);
+ }
+ if (scb->hscb->tag == aic_inb(p, SCB_TAG))
+ {
+ if ( (lastphase != P_MESGOUT) && (lastphase != P_MESGIN) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Device reset message in "
+ "message buffer\n", p->host_no, CTL_OF_SCB(scb));
+ scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
+ aic7xxx_error(scb->cmd) = DID_RESET;
+ p->dev_flags[TARGET_INDEX(scb->cmd)] |=
+ BUS_DEVICE_RESET_PENDING;
+ /* Send the abort message to the active SCB. */
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, lastphase | ATNO, SCSISIGO);
+ return(SCSI_RESET_PENDING);
+ }
+ else
+ {
+ /* We want to send out the message, but it could screw an already */
+ /* in place and being used message. Instead, we return an error */
+ /* to try and start the bus reset phase since this command is */
+ /* probably hung (aborts failed, and now reset is failing). We */
+ /* also make sure to set BUS_DEVICE_RESET_PENDING so we won't try */
+ /* any more on this device, but instead will escalate to a bus or */
+ /* host reset (additionally, we won't try to abort any more). */
+ printk(WARN_LEAD "Device reset, Message buffer "
+ "in use\n", p->host_no, CTL_OF_SCB(scb));
+ scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
+ aic7xxx_error(scb->cmd) = DID_RESET;
+ p->dev_flags[TARGET_INDEX(scb->cmd)] |=
+ BUS_DEVICE_RESET_PENDING;
+ return(SCSI_RESET_ERROR);
+ }
+ }
+ } /* if (last_phase != P_BUSFREE).....indicates we are idle and can work */
+ hscb_index = aic7xxx_find_scb(p, scb);
+ if (hscb_index == SCB_LIST_NULL)
+ {
+ disconnected = (aic7xxx_scb_on_qoutfifo(p, scb)) ? FALSE : TRUE;
+ }
+ else
+ {
+ aic_outb(p, hscb_index, SCBPTR);
+ if (aic_inb(p, SCB_CONTROL) & DISCONNECTED)
+ {
+ disconnected = TRUE;
+ }
+ }
+ if (disconnected)
+ {
+ /*
+ * Simply set the MK_MESSAGE flag and the SEQINT handler will do
+ * the rest on a reconnect.
+ */
+ scb->hscb->control |= MK_MESSAGE;
+ scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
+ p->dev_flags[TARGET_INDEX(scb->cmd)] |=
+ BUS_DEVICE_RESET_PENDING;
+ if (hscb_index != SCB_LIST_NULL)
+ {
+ unsigned char scb_control;
+
+ aic_outb(p, hscb_index, SCBPTR);
+ scb_control = aic_inb(p, SCB_CONTROL);
+ aic_outb(p, scb_control | MK_MESSAGE, SCB_CONTROL);
+ }
+ /*
+ * Actually requeue this SCB in case we can select the
+ * device before it reconnects. If the transaction we
+ * want to abort is not tagged, then this will be the only
+ * outstanding command and we can simply shove it on the
+ * qoutfifo and be done. If it is tagged, then it goes right
+ * in with all the others, no problem :) We need to add it
+ * to the qinfifo and let the sequencer know it is there.
+ * Now, the only problem left to deal with is, *IF* this
+ * command completes, in spite of the MK_MESSAGE bit in the
+ * control byte, then we need to pick that up in the interrupt
+ * routine and clean things up. This *shouldn't* ever happen.
+ */
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Queueing device reset "
+ "command.\n", p->host_no, CTL_OF_SCB(scb));
+ p->qinfifo[p->qinfifonext++] = scb->hscb->tag;
+ if (p->features & AHC_QUEUE_REGS)
+ aic_outb(p, p->qinfifonext, HNSCB_QOFF);
+ else
+ aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
+ scb->flags |= SCB_QUEUED_ABORT;
+ result = SCSI_RESET_PENDING;
+ }
+ else if (result == -1)
+ {
+ result = SCSI_RESET_ERROR;
+ }
+ aic_outb(p, saved_scbptr, SCBPTR);
+ return (result);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_panic_abort
+ *
+ * Description:
+ * Abort the current SCSI command(s).
+ *-F*************************************************************************/
+void
+aic7xxx_panic_abort(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0)
+ int i, mask, found, need_tag;
+ struct aic7xxx_scb *scb;
+ unsigned char qinpos, hscbp;
+
+ found = FALSE;
+#endif
+
+ printk("aic7xxx driver version %s/%s\n", AIC7XXX_C_VERSION,
+ UTS_RELEASE);
+ printk("Controller type:\n %s\n", board_names[p->board_name_index]);
+ printk("p->flags=0x%x, p->chip=0x%x, p->features=0x%x, "
+ "sequencer %s paused\n",
+ p->flags, p->chip, p->features,
+ (aic_inb(p, HCNTRL) & PAUSE) ? "is" : "isn't" );
+ pause_sequencer(p);
+ disable_irq(p->irq);
+ aic7xxx_print_card(p);
+ aic7xxx_print_scratch_ram(p);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0)
+ for(i=0; i<MAX_TARGETS; i++)
+ {
+ if(p->dev_flags[i] & DEVICE_PRESENT)
+ {
+ mask = (0x01 << i);
+ printk(INFO_LEAD "dev_flags=0x%x, WDTR:%c/%c/%c, SDTR:%c/%c/%c,"
+ " q_depth=%d:%d\n",
+ p->host_no, 0, i, 0, p->dev_flags[i],
+ (p->wdtr_pending & mask) ? 'Y' : 'N',
+ (p->needwdtr & mask) ? 'Y' : 'N',
+ (p->needwdtr_copy & mask) ? 'Y' : 'N',
+ (p->sdtr_pending & mask) ? 'Y' : 'N',
+ (p->needsdtr & mask) ? 'Y' : 'N',
+ (p->needsdtr_copy & mask) ? 'Y' : 'N',
+ p->dev_active_cmds[i],
+ p->dev_max_queue_depth[i] );
+ printk(INFO_LEAD "targ_scsirate=0x%x", p->host_no, 0, i, 0,
+ aic_inb(p, TARG_SCSIRATE + i));
+ if (p->features & AHC_ULTRA2)
+ printk(", targ_offset=%d", aic_inb(p, TARG_OFFSET + i));
+ printk("\n");
+ }
+ }
+ /*
+ * Search for this command and see if we can't track it down, it's the
+ * one causing the timeout. Print out this command first, then all other
+ * active commands afterwords.
+ */
+ need_tag = -1;
+ if ( cmd )
+ {
+ scb = p->scb_data->scb_array[aic7xxx_position(cmd)];
+ if ( (scb->flags & SCB_ACTIVE) && (scb->cmd == cmd) )
+ {
+ printk("Timed out command is scb #%d:\n", scb->hscb->tag);
+ printk("Tag%d: flags=0x%x, control=0x%x, TCL=0x%x, %s\n", scb->hscb->tag,
+ scb->flags, scb->hscb->control, scb->hscb->target_channel_lun,
+ (scb->flags & SCB_WAITINGQ) ? "WAITINGQ" : "Sent" );
+ need_tag = scb->hscb->tag;
+ if (scb->flags & SCB_WAITINGQ) found=TRUE;
+ }
+ }
+ printk("QINFIFO: (TAG) ");
+ qinpos = aic_inb(p, QINPOS);
+ while ( qinpos != p->qinfifonext )
+ {
+ if (p->qinfifo[qinpos] == need_tag)
+ found=TRUE;
+ printk("%d ", p->qinfifo[qinpos++]);
+ }
+ printk("\n");
+ printk("Current SCB: (SCBPTR/TAG/CONTROL) %d/%d/0x%x\n", aic_inb(p, SCBPTR),
+ aic_inb(p, SCB_TAG), aic_inb(p, SCB_CONTROL) );
+ if (aic_inb(p, SCB_TAG) == need_tag) found=TRUE;
+ printk("WAITING_SCBS: (SCBPTR/TAG/CONTROL) %d->",
+ hscbp = aic_inb(p, WAITING_SCBH));
+ while (hscbp != SCB_LIST_NULL)
+ {
+ aic_outb(p, hscbp, SCBPTR);
+ printk("%d/%d/0x%x ", hscbp, aic_inb(p, SCB_TAG), aic_inb(p, SCB_CONTROL));
+ hscbp = aic_inb(p, SCB_NEXT);
+ if (aic_inb(p, SCB_TAG) == need_tag) found=TRUE;
+ }
+ printk("\n");
+ printk("DISCONNECTED_SCBS: (SCBPTR/TAG/CONTROL) %d->",
+ hscbp = aic_inb(p, DISCONNECTED_SCBH));
+ while (hscbp != SCB_LIST_NULL)
+ {
+ aic_outb(p, hscbp, SCBPTR);
+ printk("%d/%d/0x%x ", hscbp, aic_inb(p, SCB_TAG), aic_inb(p, SCB_CONTROL));
+ hscbp = aic_inb(p, SCB_NEXT);
+ if (aic_inb(p, SCB_TAG) == need_tag) found=TRUE;
+ }
+ printk("\n");
+ printk("FREE_SCBS: (SCBPTR/TAG/CONTROL) %d->",
+ hscbp = aic_inb(p, FREE_SCBH));
+ while (hscbp != SCB_LIST_NULL)
+ {
+ aic_outb(p, hscbp, SCBPTR);
+ printk("%d/%d/0x%x ", hscbp, aic_inb(p, SCB_TAG), aic_inb(p, SCB_CONTROL));
+ hscbp = aic_inb(p, SCB_NEXT);
+ }
+ printk("\n");
+
+ if (found == FALSE)
+ {
+ /*
+ * We haven't found the offending SCB yet, and it should be around
+ * somewhere, so go look for it in the cards SCBs.
+ */
+ printk("SCBPTR CONTROL TAG PREV NEXT\n");
+ for(i=0; i<p->scb_data->maxhscbs; i++)
+ {
+ aic_outb(p, i, SCBPTR);
+ printk(" %3d %02x %02x %02x %02x\n", i,
+ aic_inb(p, SCB_CONTROL), aic_inb(p, SCB_TAG),
+ aic_inb(p, SCB_PREV), aic_inb(p, SCB_NEXT));
+ }
+ }
+
+
+ for (i=0; i < p->scb_data->numscbs; i++)
+ {
+ scb = p->scb_data->scb_array[i];
+ if ( (scb->flags & SCB_ACTIVE) && (scb->cmd != cmd) )
+ {
+ printk("Tag%d: flags=0x%x, control=0x%x, TCL=0x%x, %s\n", scb->hscb->tag,
+ scb->flags, scb->hscb->control, scb->hscb->target_channel_lun,
+ (scb->flags & SCB_WAITINGQ) ? "WAITINGQ" : "Sent" );
+ }
+ }
+#endif
+ sti();
+ for(;;) barrier();
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_abort
+ *
+ * Description:
+ * Abort the current SCSI command(s).
+ *-F*************************************************************************/
+int
+aic7xxx_abort(Scsi_Cmnd *cmd)
+{
+ struct aic7xxx_scb *scb = NULL;
+ struct aic7xxx_host *p;
+ int result, found=0;
+ unsigned char tmp_char, saved_hscbptr, next_hscbptr, prev_hscbptr;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags = 0;
+#endif
+ Scsi_Cmnd *cmd_next, *cmd_prev;
+
+ p = (struct aic7xxx_host *) cmd->host->hostdata;
+ scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
+
+ /*
+ * I added a new config option to the driver: "panic_on_abort" that will
+ * cause the driver to panic and the machine to stop on the first abort
+ * or reset call into the driver. At that point, it prints out a lot of
+ * usefull information for me which I can then use to try and debug the
+ * problem. Simply enable the boot time prompt in order to activate this
+ * code.
+ */
+ if (aic7xxx_panic_on_abort)
+ aic7xxx_panic_abort(p, cmd);
+
+ DRIVER_LOCK
+
+/*
+ * Run the isr to grab any command in the QOUTFIFO and any other misc.
+ * assundry tasks. This should also set up the bh handler if there is
+ * anything to be done, but it won't run until we are done here since
+ * we are following a straight code path without entering the scheduler
+ * code.
+ */
+
+ pause_sequencer(p);
+ while ( (aic_inb(p, INTSTAT) & INT_PEND) && !(p->flags & AHC_IN_ISR))
+ {
+ aic7xxx_isr(p->irq, p, (void *)NULL);
+ pause_sequencer(p);
+ aic7xxx_done_cmds_complete(p);
+ }
+
+ if ((scb == NULL) || (cmd->serial_number != cmd->serial_number_at_timeout))
+ /* Totally bogus cmd since it points beyond our */
+ { /* valid SCB range or doesn't even match it's own*/
+ /* timeout serial number. */
+ if (aic7xxx_verbose & VERBOSE_ABORT_MID)
+ printk(INFO_LEAD "Abort called with bogus Scsi_Cmnd "
+ "pointer.\n", p->host_no, CTL_OF_CMD(cmd));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_NOT_RUNNING);
+ }
+ if (scb->cmd != cmd) /* Hmmm...either this SCB is currently free with a */
+ { /* NULL cmd pointer (NULLed out when freed) or it */
+ /* has already been recycled for another command */
+ /* Either way, this SCB has nothing to do with this*/
+ /* command and we need to deal with cmd without */
+ /* touching the SCB. */
+ /* The theory here is to return a value that will */
+ /* make the queued for complete command actually */
+ /* finish successfully, or to indicate that we */
+ /* don't have this cmd any more and the mid level */
+ /* code needs to find it. */
+ cmd_next = p->completeq.head;
+ cmd_prev = NULL;
+ while (cmd_next != NULL)
+ {
+ if (cmd_next == cmd)
+ {
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "Abort called for command "
+ "on completeq, completing.\n", p->host_no, CTL_OF_CMD(cmd));
+ if ( cmd_prev == NULL )
+ p->completeq.head = (Scsi_Cmnd *)cmd_next->host_scribble;
+ else
+ cmd_prev->host_scribble = cmd_next->host_scribble;
+ cmd_next->scsi_done(cmd_next);
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_NOT_RUNNING); /* It's already back as a successful
+ * completion */
+ }
+ cmd_prev = cmd_next;
+ cmd_next = (Scsi_Cmnd *)cmd_next->host_scribble;
+ }
+ if (aic7xxx_verbose & VERBOSE_ABORT_MID)
+ printk(INFO_LEAD "Abort called for already completed"
+ " command.\n", p->host_no, CTL_OF_CMD(cmd));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_NOT_RUNNING);
+ }
+
+/* At this point we know the following:
+ * the SCB pointer is valid
+ * the command pointer passed in to us and the scb->cmd pointer match
+ * this then means that the command we need to abort is the same as the
+ * command held by the scb pointer and is a valid abort request.
+ * Now, we just have to figure out what to do from here. Current plan is:
+ * if we have already been here on this command, escalate to a reset
+ * if scb is on waiting list or QINFIFO, send it back as aborted, but
+ * we also need to be aware of the possibility that we could be using
+ * a faked negotiation command that is holding this command up, if
+ * so we need to take care of that command instead, which means we
+ * would then treat this one like it was sitting around disconnected
+ * instead.
+ * if scb is on WAITING_SCB list in sequencer, free scb and send back
+ * if scb is disconnected and not completed, abort with abort message
+ * if scb is currently running, then it may be causing the bus to hang
+ * so we want a return value that indicates a reset would be appropriate
+ * if the command does not finish shortly
+ * if scb is already complete but not on completeq, we're screwed because
+ * this can't happen (except if the command is in the QOUTFIFO, in which
+ * case we would like it to complete successfully instead of having to
+ * to be re-done)
+ * All other scenarios already dealt with by previous code.
+ */
+
+ if ( scb->flags & (SCB_ABORT | SCB_RESET | SCB_QUEUED_ABORT) )
+ {
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB aborted once already, "
+ "escalating.\n", p->host_no, CTL_OF_SCB(scb));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_SNOOZE);
+ }
+ if ( (p->flags & (AHC_RESET_PENDING | AHC_ABORT_PENDING)) ||
+ (p->dev_flags[TARGET_INDEX(scb->cmd)] &
+ BUS_DEVICE_RESET_PENDING) )
+ {
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "Reset/Abort pending for this "
+ "device, not wasting our time.\n", p->host_no, CTL_OF_SCB(scb));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_PENDING);
+ }
+
+ found = 0;
+ p->flags |= AHC_IN_ABORT;
+ if (aic7xxx_verbose & VERBOSE_ABORT)
+ printk(INFO_LEAD "Aborting scb %d, flags 0x%x\n",
+ p->host_no, CTL_OF_SCB(scb), scb->hscb->tag, scb->flags);
+
+/*
+ * First, let's check to see if the currently running command is our target
+ * since if it is, the return is fairly easy and quick since we don't want
+ * to touch the command in case it might complete, but we do want a timeout
+ * in case it's actually hung, so we really do nothing, but tell the mid
+ * level code to reset the timeout.
+ */
+
+ if ( scb->hscb->tag == aic_inb(p, SCB_TAG) )
+ {
+ /*
+ * Check to see if the sequencer is just sitting on this command, or
+ * if it's actively being run.
+ */
+ result = aic_inb(p, LASTPHASE);
+ switch (result)
+ {
+ case P_DATAOUT: /* For any of these cases, we can assume we are */
+ case P_DATAIN: /* an active command and act according. For */
+ case P_COMMAND: /* anything else we are going to fall on through*/
+ case P_STATUS: /* The SCSI_ABORT_SNOOZE will give us two abort */
+ case P_MESGOUT: /* chances to finish and then escalate to a */
+ case P_MESGIN: /* reset call */
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB is currently active. "
+ "Waiting on completion.\n", p->host_no, CTL_OF_SCB(scb));
+ unpause_sequencer(p, FALSE);
+ p->flags &= ~AHC_IN_ABORT;
+ scb->flags |= SCB_RECOVERY_SCB; /* Note the fact that we've been */
+ p->flags |= AHC_ABORT_PENDING; /* here so we will know not to */
+ DRIVER_UNLOCK /* muck with other SCBs if this */
+ return(SCSI_ABORT_PENDING); /* one doesn't complete and clear */
+ break; /* out. */
+ default:
+ break;
+ }
+ }
+
+ if ((found == 0) && (scb->flags & SCB_WAITINGQ))
+ {
+ int tindex = TARGET_INDEX(cmd);
+#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+ unsigned short mask;
+
+ mask = (1 << tindex);
+
+ if (p->wdtr_pending & mask)
+ {
+ if (p->dev_wdtr_cmnd[tindex]->next != cmd)
+ found = 1;
+ else
+ found = 0;
+ }
+ else if (p->sdtr_pending & mask)
+ {
+ if (p->dev_sdtr_cmnd[tindex]->next != cmd)
+ found = 1;
+ else
+ found = 0;
+ }
+ else
+ {
+ found = 1;
+ }
+ if (found == 0)
+ {
+ /*
+ * OK..this means the command we are currently getting an abort
+ * for has an outstanding negotiation command in front of it.
+ * We don't really have a way to tie back into the negotiation
+ * commands, so we just send this back as pending, then it
+ * will get reset in 2 seconds.
+ */
+ unpause_sequencer(p, TRUE);
+ scb->flags |= SCB_ABORT;
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_PENDING);
+ }
+#endif
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB found on waiting list and "
+ "aborted.\n", p->host_no, CTL_OF_SCB(scb));
+ scbq_remove(&p->waiting_scbs, scb);
+ scbq_remove(&p->delayed_scbs[tindex], scb);
+ p->dev_active_cmds[tindex]++;
+ p->activescbs++;
+ scb->flags &= ~(SCB_WAITINGQ | SCB_ACTIVE);
+ scb->flags |= SCB_ABORT | SCB_QUEUED_FOR_DONE;
+ found = 1;
+ }
+
+/*
+ * We just checked the waiting_q, now for the QINFIFO
+ */
+ if ( found == 0 )
+ {
+ if ( ((found = aic7xxx_search_qinfifo(p, cmd->target,
+ cmd->channel,
+ cmd->lun, scb->hscb->tag, SCB_ABORT | SCB_QUEUED_FOR_DONE,
+ FALSE, NULL)) != 0) &&
+ (aic7xxx_verbose & VERBOSE_ABORT_PROCESS))
+ printk(INFO_LEAD "SCB found in QINFIFO and "
+ "aborted.\n", p->host_no, CTL_OF_SCB(scb));
+ }
+
+/*
+ * QINFIFO, waitingq, completeq done. Next, check WAITING_SCB list in card
+ */
+
+ if ( found == 0 )
+ {
+ unsigned char scb_next_ptr;
+ prev_hscbptr = SCB_LIST_NULL;
+ saved_hscbptr = aic_inb(p, SCBPTR);
+ next_hscbptr = aic_inb(p, WAITING_SCBH);
+ while ( next_hscbptr != SCB_LIST_NULL )
+ {
+ aic_outb(p, next_hscbptr, SCBPTR );
+ if ( scb->hscb->tag == aic_inb(p, SCB_TAG) )
+ {
+ found = 1;
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB found on hardware waiting"
+ " list and aborted.\n", p->host_no, CTL_OF_SCB(scb));
+ if ( prev_hscbptr == SCB_LIST_NULL )
+ {
+ aic_outb(p, aic_inb(p, SCB_NEXT), WAITING_SCBH);
+ /* stop the selection since we just
+ * grabbed the scb out from under the
+ * card
+ */
+ aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ);
+ aic_outb(p, CLRSELTIMEO, CLRSINT1);
+ }
+ else
+ {
+ scb_next_ptr = aic_inb(p, SCB_NEXT);
+ aic_outb(p, prev_hscbptr, SCBPTR);
+ aic_outb(p, scb_next_ptr, SCB_NEXT);
+ aic_outb(p, next_hscbptr, SCBPTR);
+ }
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic_outb(p, 0, SCB_CONTROL);
+ aic7xxx_add_curscb_to_free_list(p);
+ scb->flags = SCB_ABORT | SCB_QUEUED_FOR_DONE;
+ break;
+ }
+ prev_hscbptr = next_hscbptr;
+ next_hscbptr = aic_inb(p, SCB_NEXT);
+ }
+ aic_outb(p, saved_hscbptr, SCBPTR );
+ }
+
+/*
+ * Hmmm...completeq, QOUTFIFO, QINFIFO, WAITING_SCBH, waitingq all checked.
+ * OK...the sequencer's paused, interrupts are off, and we haven't found the
+ * command anyplace where it could be easily aborted. Time for the hard
+ * work. We also know the command is valid. This essentially means the
+ * command is disconnected, or connected but not into any phases yet, which
+ * we know due to the tests we ran earlier on the current active scb phase.
+ * At this point we can queue the abort tag and go on with life.
+ */
+
+ if ( found == 0 )
+ {
+ p->flags |= AHC_ABORT_PENDING;
+ scb->flags |= SCB_QUEUED_ABORT | SCB_ABORT | SCB_RECOVERY_SCB;
+ scb->hscb->control |= MK_MESSAGE;
+ result=aic7xxx_find_scb(p, scb);
+ if ( result != SCB_LIST_NULL )
+ {
+ saved_hscbptr = aic_inb(p, SCBPTR);
+ aic_outb(p, result, SCBPTR);
+ tmp_char = aic_inb(p, SCB_CONTROL);
+ aic_outb(p, tmp_char | MK_MESSAGE, SCB_CONTROL);
+ aic_outb(p, saved_hscbptr, SCBPTR);
+ }
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB disconnected. Queueing Abort"
+ " SCB.\n", p->host_no, CTL_OF_SCB(scb));
+ p->qinfifo[p->qinfifonext++] = scb->hscb->tag;
+ if (p->features & AHC_QUEUE_REGS)
+ aic_outb(p, p->qinfifonext, HNSCB_QOFF);
+ else
+ aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
+ }
+ if (found)
+ {
+ aic7xxx_run_done_queue(p, TRUE);
+ aic7xxx_run_waiting_queues(p);
+ }
+ p->flags &= ~AHC_IN_ABORT;
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+
+/*
+ * On the return value. If we found the command and aborted it, then we know
+ * it's already sent back and there is no reason for a further timeout, so
+ * we use SCSI_ABORT_SUCCESS. On the queued abort side, we aren't so certain
+ * there hasn't been a bus hang or something that might keep the abort from
+ * from completing. Therefore, we use SCSI_ABORT_PENDING. The first time this
+ * is passed back, the timeout on the command gets extended, the second time
+ * we pass this back, the mid level SCSI code calls our reset function, which
+ * would shake loose a hung bus.
+ */
+ if ( found != 0 )
+ return(SCSI_ABORT_SUCCESS);
+ else
+ return(SCSI_ABORT_PENDING);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset
+ *
+ * Description:
+ * Resetting the bus always succeeds - is has to, otherwise the
+ * kernel will panic! Try a surgical technique - sending a BUS
+ * DEVICE RESET message - on the offending target before pulling
+ * the SCSI bus reset line.
+ *-F*************************************************************************/
+int
+aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
+{
+ struct aic7xxx_scb *scb = NULL;
+ struct aic7xxx_host *p;
+ int tindex;
+ int result = -1;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags = 0;
+#endif
+#define DEVICE_RESET 0x01
+#define BUS_RESET 0x02
+#define HOST_RESET 0x04
+#define FAIL 0x08
+#define RESET_DELAY 0x10
+ int action;
+ Scsi_Cmnd *cmd_prev, *cmd_next;
+
+
+ if ( cmd == NULL )
+ {
+ printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL Scsi_Cmnd "
+ "pointer, failing.\n");
+ return(SCSI_RESET_SNOOZE);
+ }
+
+ p = (struct aic7xxx_host *) cmd->host->hostdata;
+ scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
+ tindex = TARGET_INDEX(cmd);
+
+ /*
+ * I added a new config option to the driver: "panic_on_abort" that will
+ * cause the driver to panic and the machine to stop on the first abort
+ * or reset call into the driver. At that point, it prints out a lot of
+ * usefull information for me which I can then use to try and debug the
+ * problem. Simply enable the boot time prompt in order to activate this
+ * code.
+ */
+ if (aic7xxx_panic_on_abort)
+ aic7xxx_panic_abort(p, cmd);
+
+ DRIVER_LOCK
+
+ pause_sequencer(p);
+ while ( (aic_inb(p, INTSTAT) & INT_PEND) && !(p->flags & AHC_IN_ISR))
+ {
+ aic7xxx_isr(p->irq, p, (void *)NULL );
+ pause_sequencer(p);
+ aic7xxx_done_cmds_complete(p);
+ }
+
+ if (scb == NULL)
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_MID)
+ printk(INFO_LEAD "Reset called with bogus Scsi_Cmnd"
+ "->SCB mapping, improvising.\n", p->host_no, CTL_OF_CMD(cmd));
+ if ( flags & SCSI_RESET_SUGGEST_HOST_RESET )
+ {
+ action = HOST_RESET;
+ }
+ else
+ {
+ action = BUS_RESET;
+ }
+ }
+ else if (scb->cmd != cmd)
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_MID)
+ printk(INFO_LEAD "Reset called with recycled SCB "
+ "for cmd.\n", p->host_no, CTL_OF_CMD(cmd));
+ cmd_prev = NULL;
+ cmd_next = p->completeq.head;
+ while ( cmd_next != NULL )
+ {
+ if (cmd_next == cmd)
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_RETURN)
+ printk(INFO_LEAD "Reset, found cmd on completeq"
+ ", completing.\n", p->host_no, CTL_OF_CMD(cmd));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_RESET_NOT_RUNNING);
+ }
+ cmd_prev = cmd_next;
+ cmd_next = (Scsi_Cmnd *)cmd_next->host_scribble;
+ }
+ if ( !(flags & SCSI_RESET_SYNCHRONOUS) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_RETURN)
+ printk(INFO_LEAD "Reset, cmd not found,"
+ " failing.\n", p->host_no, CTL_OF_CMD(cmd));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_RESET_NOT_RUNNING);
+ }
+ else
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_MID)
+ printk(INFO_LEAD "Reset called, no scb, "
+ "flags 0x%x\n", p->host_no, CTL_OF_CMD(cmd), flags);
+ scb = NULL;
+ action = HOST_RESET;
+ }
+ }
+ else
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_MID)
+ printk(INFO_LEAD "Reset called, scb %d, flags "
+ "0x%x\n", p->host_no, CTL_OF_SCB(scb), scb->hscb->tag, scb->flags);
+ if ( aic7xxx_scb_on_qoutfifo(p, scb) )
+ {
+ if(aic7xxx_verbose & VERBOSE_RESET_RETURN)
+ printk(INFO_LEAD "SCB on qoutfifo, returning.\n", p->host_no,
+ CTL_OF_SCB(scb));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_RESET_NOT_RUNNING);
+ }
+ if ( flags & SCSI_RESET_SUGGEST_HOST_RESET )
+ {
+ action = HOST_RESET;
+ }
+ else if ( flags & SCSI_RESET_SUGGEST_BUS_RESET )
+ {
+ action = BUS_RESET;
+ }
+ else
+ {
+ action = DEVICE_RESET;
+ }
+ }
+ if ( (action & DEVICE_RESET) &&
+ (p->dev_flags[tindex] & BUS_DEVICE_RESET_PENDING) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Bus device reset already sent to "
+ "device, escalating.\n", p->host_no, CTL_OF_CMD(cmd));
+ action = BUS_RESET;
+ }
+ if ( (action & DEVICE_RESET) &&
+ (scb->flags & SCB_QUEUED_ABORT) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ {
+ printk(INFO_LEAD "Have already attempted to reach "
+ "device with queued\n", p->host_no, CTL_OF_CMD(cmd));
+ printk(INFO_LEAD "message, will escalate to bus "
+ "reset.\n", p->host_no, CTL_OF_CMD(cmd));
+ }
+ action = BUS_RESET;
+ }
+ if ( (action & DEVICE_RESET) &&
+ (p->flags & (AHC_RESET_PENDING | AHC_ABORT_PENDING)) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Bus device reset stupid when "
+ "other action has failed.\n", p->host_no, CTL_OF_CMD(cmd));
+ action = BUS_RESET;
+ }
+ if ( (action & BUS_RESET) && !(p->features & AHC_TWIN) )
+ {
+ action = HOST_RESET;
+ }
+ if ( (p->dev_flags[tindex] & DEVICE_RESET_DELAY) &&
+ !(action & (HOST_RESET | BUS_RESET)))
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ {
+ printk(INFO_LEAD "Reset called too soon after last "
+ "reset without requesting\n", p->host_no, CTL_OF_CMD(cmd));
+ printk(INFO_LEAD "bus or host reset, escalating.\n", p->host_no,
+ CTL_OF_CMD(cmd));
+ }
+ action = BUS_RESET;
+ }
+ if ( (p->flags & AHC_RESET_DELAY) &&
+ (action & (HOST_RESET | BUS_RESET)) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Reset called too soon after "
+ "last bus reset, delaying.\n", p->host_no, CTL_OF_CMD(cmd));
+ action = RESET_DELAY;
+ }
+/*
+ * By this point, we want to already know what we are going to do and
+ * only have the following code implement our course of action.
+ */
+ switch (action)
+ {
+ case RESET_DELAY:
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_RESET_PENDING);
+ break;
+ case FAIL:
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_RESET_ERROR);
+ break;
+ case DEVICE_RESET:
+ p->flags |= AHC_IN_RESET;
+ result = aic7xxx_bus_device_reset(p, cmd);
+ aic7xxx_run_done_queue(p, TRUE);
+ /* We can't rely on run_waiting_queues to unpause the sequencer for
+ * PCI based controllers since we use AAP */
+ aic7xxx_run_waiting_queues(p);
+ unpause_sequencer(p, FALSE);
+ p->flags &= ~AHC_IN_RESET;
+ DRIVER_UNLOCK
+ return(result);
+ break;
+ case BUS_RESET:
+ case HOST_RESET:
+ default:
+ p->flags |= AHC_IN_RESET | AHC_RESET_DELAY;
+ p->dev_expires[p->scsi_id] = jiffies + (3 * HZ);
+ p->dev_timer_active |= (0x01 << p->scsi_id);
+ if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) ||
+ time_after_eq(p->dev_timer.expires, p->dev_expires[p->scsi_id]) )
+ {
+ del_timer(&p->dev_timer);
+ p->dev_timer.expires = p->dev_expires[p->scsi_id];
+ add_timer(&p->dev_timer);
+ p->dev_timer_active |= (0x01 << MAX_TARGETS);
+ }
+ aic7xxx_reset_channel(p, cmd->channel, TRUE);
+ if ( (p->features & AHC_TWIN) && (action & HOST_RESET) )
+ {
+ aic7xxx_reset_channel(p, cmd->channel ^ 0x01, TRUE);
+ restart_sequencer(p);
+ }
+ if (action != HOST_RESET)
+ result = SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET;
+ else
+ {
+ result = SCSI_RESET_SUCCESS | SCSI_RESET_HOST_RESET;
+ aic_outb(p, aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE),
+ SIMODE1);
+ aic7xxx_clear_intstat(p);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+ p->msg_type = MSG_TYPE_NONE;
+ p->msg_index = 0;
+ p->msg_len = 0;
+ }
+ aic7xxx_run_done_queue(p, TRUE);
+ /*
+ * If this a SCSI_RESET_SYNCHRONOUS then the command we were given is
+ * in need of being re-started, so send it on through to aic7xxx_queue
+ * and let it set until the delay is over. This keeps it from dying
+ * entirely and avoids getting a bogus dead command back through the
+ * mid-level code due to too many retries.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,132)
+ if ( flags & SCSI_RESET_SYNCHRONOUS )
+ {
+ cmd->result = DID_BUS_BUSY << 16;
+ cmd->done(cmd);
+ }
+#endif
+ p->flags &= ~AHC_IN_RESET;
+ /*
+ * We can't rely on run_waiting_queues to unpause the sequencer for
+ * PCI based controllers since we use AAP. NOTE: this also sets
+ * the timer for the one command we might have queued in the case
+ * of a synch reset.
+ */
+ aic7xxx_run_waiting_queues(p);
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(result);
+ break;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_biosparam
+ *
+ * Description:
+ * Return the disk geometry for the given SCSI device.
+ *-F*************************************************************************/
+int
+aic7xxx_biosparam(Disk *disk, kdev_t dev, int geom[])
+{
+ int heads, sectors, cylinders, ret;
+ struct aic7xxx_host *p;
+ struct buffer_head *bh;
+
+ p = (struct aic7xxx_host *) disk->device->host->hostdata;
+ bh = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, 1024);
+
+ if ( bh )
+ {
+ ret = scsi_partsize(bh, disk->capacity, &geom[2], &geom[0], &geom[1]);
+ brelse(bh);
+ if ( ret != -1 )
+ return(ret);
+ }
+
+ heads = 64;
+ sectors = 32;
+ cylinders = disk->capacity / (heads * sectors);
+
+ if ((p->flags & AHC_EXTEND_TRANS_A) && (cylinders > 1024))
+ {
+ heads = 255;
+ sectors = 63;
+ cylinders = disk->capacity / (heads * sectors);
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return (0);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_release
+ *
+ * Description:
+ * Free the passed in Scsi_Host memory structures prior to unloading the
+ * module.
+ *-F*************************************************************************/
+int
+aic7xxx_release(struct Scsi_Host *host)
+{
+ struct aic7xxx_host *p = (struct aic7xxx_host *) host->hostdata;
+ struct aic7xxx_host *next, *prev;
+
+ if(p->irq)
+ free_irq(p->irq, p);
+ release_region(p->base, MAXREG - MINREG);
+#ifdef MMAPIO
+ if(p->maddr)
+ {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0)
+ vfree((void *) (((unsigned long) p->maddr) & PAGE_MASK));
+#else
+ iounmap((void *) (((unsigned long) p->maddr) & PAGE_MASK));
+#endif
+ }
+#endif /* MMAPIO */
+ prev = NULL;
+ next = first_aic7xxx;
+ while(next != NULL)
+ {
+ if(next == p)
+ {
+ if(prev == NULL)
+ first_aic7xxx = next->next;
+ else
+ prev->next = next->next;
+ }
+ else
+ {
+ prev = next;
+ }
+ next = next->next;
+ }
+ aic7xxx_free(p);
+ return(0);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_print_card
+ *
+ * Description:
+ * Print out all of the control registers on the card
+ *
+ * NOTE: This function is not yet safe for use on the VLB and EISA
+ * controllers, so it isn't used on those controllers at all.
+ *-F*************************************************************************/
+static void
+aic7xxx_print_card(struct aic7xxx_host *p)
+{
+ int i, j, k, chip;
+ static struct register_ranges {
+ int num_ranges;
+ int range_val[32];
+ } cards_ds[] = {
+ { 0, {0,} }, /* none */
+ {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1f, 0x1f, 0x60, 0x60, /*7771*/
+ 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9b, 0x9f} },
+ { 9, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7850*/
+ 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
+ { 9, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7860*/
+ 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
+ {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1c, 0x1f, 0x60, 0x60, /*7870*/
+ 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
+ {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1a, 0x1c, 0x1f, 0x60, 0x60, /*7880*/
+ 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
+ {16, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7890*/
+ 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f,
+ 0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc,
+ 0xfe, 0xff} },
+ {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1b, 0x1f, 0x60, 0x60, /*7895*/
+ 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a,
+ 0x9f, 0x9f, 0xe0, 0xf1} },
+ {16, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7896*/
+ 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f,
+ 0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc,
+ 0xfe, 0xff} },
+ };
+#ifdef CONFIG_PCI
+ static struct register_ranges cards_ns[] = {
+ { 0, {0,} }, /* none */
+ { 0, {0,} }, /* 7771 */
+ { 7, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x28, 0x2b, 0x30, 0x33,
+ 0x3c, 0x41, 0x43, 0x47} },
+ { 7, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x28, 0x2b, 0x30, 0x33,
+ 0x3c, 0x41, 0x43, 0x47} },
+ { 5, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x30, 0x33, 0x3c, 0x41} },
+ { 5, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x30, 0x34, 0x3c, 0x47} },
+ { 5, {0x04, 0x08, 0x0c, 0x1b, 0x30, 0x34, 0x3c, 0x43, 0xdc, 0xe3} },
+ { 6, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x30, 0x34, 0x3c, 0x47,
+ 0xdc, 0xe3} },
+ { 6, {0x04, 0x08, 0x0c, 0x1b, 0x30, 0x34, 0x3c, 0x43, 0xdc, 0xe3,
+ 0xff, 0xff} }
+ };
+#endif
+ chip = p->chip & AHC_CHIPID_MASK;
+ /*
+ * Let's run through the PCI space first....
+ */
+ printk("%s at ",
+ board_names[p->board_name_index]);
+ switch(p->chip & ~AHC_CHIPID_MASK)
+ {
+ case AHC_VL:
+ printk("VLB Slot %d.\n", p->pci_device_fn);
+ break;
+ case AHC_EISA:
+ printk("EISA Slot %d.\n", p->pci_device_fn);
+ break;
+ case AHC_PCI:
+ default:
+ printk("PCI %d/%d.\n", PCI_SLOT(p->pci_device_fn),
+ PCI_FUNC(p->pci_device_fn));
+ break;
+ }
+
+#ifdef CONFIG_PCI
+ {
+ unsigned char temp;
+
+ printk("PCI Dump:\n");
+ k=0;
+ for(i=0; i<cards_ns[chip].num_ranges; i++)
+ {
+ for(j = cards_ns[chip].range_val[ i * 2 ];
+ j <= cards_ns[chip].range_val[ i * 2 + 1 ] ;
+ j++)
+ {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_read_config_byte(p->pdev, j, &temp);
+#else
+ pcibios_read_config_byte(p->pci_bus, p->pci_device_fn, j, &temp);
+#endif
+ printk("%02x:%02x ", j, temp);
+ if(++k == 13)
+ {
+ printk("\n");
+ k = 0;
+ }
+ }
+ }
+ }
+ if(k != 0)
+ printk("\n");
+#endif /* CONFIG_PCI */
+
+ /*
+ * Now the registers on the card....
+ */
+ printk("Card Dump:\n");
+ k = 0;
+ for(i=0; i<cards_ds[chip].num_ranges; i++)
+ {
+ for(j = cards_ds[chip].range_val[ i * 2 ];
+ j <= cards_ds[chip].range_val[ i * 2 + 1 ] ;
+ j++)
+ {
+ printk("%02x:%02x ", j, aic_inb(p, j));
+ if(++k == 13)
+ {
+ printk("\n");
+ k=0;
+ }
+ }
+ }
+ if(k != 0)
+ printk("\n");
+ if (p->flags & AHC_SEEPROM_FOUND)
+ {
+ unsigned short *sc1;
+ sc1 = (unsigned short *)&p->sc;
+
+ printk("SEEPROM dump.\n");
+ for(i=1; i<=32; i++)
+ {
+ printk("0x%04x", sc1[i-1]);
+ if ( (i % 8) == 0 )
+ printk("\n");
+ else
+ printk(" ");
+ }
+ }
+
+ /*
+ * If this was an Ultra2 controller, then we just hosed the card in terms
+ * of the QUEUE REGS. This function is only called at init time or by
+ * the panic_abort function, so it's safe to assume a generic init time
+ * setting here
+ */
+
+ if(p->features & AHC_QUEUE_REGS)
+ {
+ aic_outb(p, 0, SDSCB_QOFF);
+ aic_outb(p, 0, SNSCB_QOFF);
+ aic_outb(p, 0, HNSCB_QOFF);
+ }
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_print_scratch_ram
+ *
+ * Description:
+ * Print out the scratch RAM values on the card.
+ *-F*************************************************************************/
+static void
+aic7xxx_print_scratch_ram(struct aic7xxx_host *p)
+{
+ int i, k;
+
+ k = 0;
+ printk("Scratch RAM:\n");
+ for(i = SRAM_BASE; i < SEQCTL; i++)
+ {
+ printk("%02x:%02x ", i, aic_inb(p, i));
+ if(++k == 13)
+ {
+ printk("\n");
+ k=0;
+ }
+ }
+ if (p->features & AHC_MORE_SRAM)
+ {
+ for(i = TARG_OFFSET; i < 0x80; i++)
+ {
+ printk("%02x:%02x ", i, aic_inb(p, i));
+ if(++k == 13)
+ {
+ printk("\n");
+ k=0;
+ }
+ }
+ }
+ printk("\n");
+}
+
+
+#include "aic7xxx_proc.c"
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AIC7XXX;
+
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 2
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -2
+ * c-argdecl-indent: 2
+ * c-label-offset: -2
+ * c-continued-statement-offset: 2
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/aic7xxx.h b/linux/src/drivers/scsi/aic7xxx.h
new file mode 100644
index 0000000..8d18f3c
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx.h
@@ -0,0 +1,114 @@
+/*+M*************************************************************************
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * Copyright (c) 1994 John Aycock
+ * The University of Calgary Department of Computer Science.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: aic7xxx.h,v 1.1.4.1 2004/01/16 22:41:26 roland Exp $
+ *-M*************************************************************************/
+#ifndef _aic7xxx_h
+#define _aic7xxx_h
+
+#define AIC7XXX_H_VERSION "3.2.4"
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif
+
+#ifndef KERNEL_VERSION
+#define KERNEL_VERSION(x,y,z) (((x)<<16)+((y)<<8)+(z))
+#endif
+
+#if defined(__i386__)
+# define AIC7XXX_BIOSPARAM aic7xxx_biosparam
+#else
+# define AIC7XXX_BIOSPARAM NULL
+#endif
+
+/*
+ * Scsi_Host_Template (see hosts.h) for AIC-7xxx - some fields
+ * to do with card config are filled in after the card is detected.
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,65)
+#define AIC7XXX { \
+ next: NULL, \
+ module: NULL, \
+ proc_dir: NULL, \
+ proc_info: aic7xxx_proc_info, \
+ name: NULL, \
+ detect: aic7xxx_detect, \
+ release: aic7xxx_release, \
+ info: aic7xxx_info, \
+ command: NULL, \
+ queuecommand: aic7xxx_queue, \
+ eh_strategy_handler: NULL, \
+ eh_abort_handler: NULL, \
+ eh_device_reset_handler: NULL, \
+ eh_bus_reset_handler: NULL, \
+ eh_host_reset_handler: NULL, \
+ abort: aic7xxx_abort, \
+ reset: aic7xxx_reset, \
+ slave_attach: NULL, \
+ bios_param: AIC7XXX_BIOSPARAM, \
+ can_queue: 255, /* max simultaneous cmds */\
+ this_id: -1, /* scsi id of host adapter */\
+ sg_tablesize: 0, /* max scatter-gather cmds */\
+ cmd_per_lun: 3, /* cmds per lun (linked cmds) */\
+ present: 0, /* number of 7xxx's present */\
+ unchecked_isa_dma: 0, /* no memory DMA restrictions */\
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 0 \
+}
+#else
+#define AIC7XXX { \
+ next: NULL, \
+ usage_count: NULL, \
+ proc_dir: NULL, \
+ proc_info: aic7xxx_proc_info, \
+ name: NULL, \
+ detect: aic7xxx_detect, \
+ release: aic7xxx_release, \
+ info: aic7xxx_info, \
+ command: NULL, \
+ queuecommand: aic7xxx_queue, \
+ abort: aic7xxx_abort, \
+ reset: aic7xxx_reset, \
+ slave_attach: NULL, \
+ bios_param: AIC7XXX_BIOSPARAM, \
+ can_queue: 255, /* max simultaneous cmds */\
+ this_id: -1, /* scsi id of host adapter */\
+ sg_tablesize: 0, /* max scatter-gather cmds */\
+ cmd_per_lun: 3, /* cmds per lun (linked cmds) */\
+ present: 0, /* number of 7xxx's present */\
+ unchecked_isa_dma: 0, /* no memory DMA restrictions */\
+ use_clustering: ENABLE_CLUSTERING \
+}
+#endif
+
+extern int aic7xxx_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
+extern int aic7xxx_biosparam(Disk *, kdev_t, int[]);
+extern int aic7xxx_detect(Scsi_Host_Template *);
+extern int aic7xxx_command(Scsi_Cmnd *);
+extern int aic7xxx_reset(Scsi_Cmnd *, unsigned int);
+extern int aic7xxx_abort(Scsi_Cmnd *);
+extern int aic7xxx_release(struct Scsi_Host *);
+
+extern const char *aic7xxx_info(struct Scsi_Host *);
+
+extern int aic7xxx_proc_info(char *, char **, off_t, int, int, int);
+
+#endif /* _aic7xxx_h */
diff --git a/linux/src/drivers/scsi/aic7xxx/scsi_message.h b/linux/src/drivers/scsi/aic7xxx/scsi_message.h
new file mode 100644
index 0000000..16c4013
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx/scsi_message.h
@@ -0,0 +1,41 @@
+/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */
+#define MSG_CMDCOMPLETE 0x00 /* M/M */
+#define MSG_EXTENDED 0x01 /* O/O */
+#define MSG_SAVEDATAPOINTER 0x02 /* O/O */
+#define MSG_RESTOREPOINTERS 0x03 /* O/O */
+#define MSG_DISCONNECT 0x04 /* O/O */
+#define MSG_INITIATOR_DET_ERR 0x05 /* M/M */
+#define MSG_ABORT 0x06 /* O/M */
+#define MSG_MESSAGE_REJECT 0x07 /* M/M */
+#define MSG_NOOP 0x08 /* M/M */
+#define MSG_PARITY_ERROR 0x09 /* M/M */
+#define MSG_LINK_CMD_COMPLETE 0x0a /* O/O */
+#define MSG_LINK_CMD_COMPLETEF 0x0b /* O/O */
+#define MSG_BUS_DEV_RESET 0x0c /* O/M */
+#define MSG_ABORT_TAG 0x0d /* O/O */
+#define MSG_CLEAR_QUEUE 0x0e /* O/O */
+#define MSG_INIT_RECOVERY 0x0f /* O/O */
+#define MSG_REL_RECOVERY 0x10 /* O/O */
+#define MSG_TERM_IO_PROC 0x11 /* O/O */
+
+/* Messages (2 byte) */
+#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */
+#define MSG_HEAD_OF_Q_TAG 0x21 /* O/O */
+#define MSG_ORDERED_Q_TAG 0x22 /* O/O */
+#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */
+
+/* Identify message */ /* M/M */
+#define MSG_IDENTIFYFLAG 0x80
+#define MSG_IDENTIFY_DISCFLAG 0x40
+#define MSG_IDENTIFY(lun, disc) (((disc) ? 0xc0 : MSG_IDENTIFYFLAG) | (lun))
+#define MSG_ISIDENTIFY(m) ((m) & MSG_IDENTIFYFLAG)
+
+/* Extended messages (opcode and length) */
+#define MSG_EXT_SDTR 0x01
+#define MSG_EXT_SDTR_LEN 0x03
+
+#define MSG_EXT_WDTR 0x03
+#define MSG_EXT_WDTR_LEN 0x02
+#define MSG_EXT_WDTR_BUS_8_BIT 0x00
+#define MSG_EXT_WDTR_BUS_16_BIT 0x01
+#define MSG_EXT_WDTR_BUS_32_BIT 0x02
diff --git a/linux/src/drivers/scsi/aic7xxx/sequencer.h b/linux/src/drivers/scsi/aic7xxx/sequencer.h
new file mode 100644
index 0000000..7c0121e
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx/sequencer.h
@@ -0,0 +1,135 @@
+/*
+ * Instruction formats for the sequencer program downloaded to
+ * Aic7xxx SCSI host adapters
+ *
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Where this Software is combined with software released under the terms of
+ * the GNU Public License ("GPL") and the terms of the GPL would require the
+ * combined work to also be released under the terms of the GPL, the terms
+ * and conditions of this License will apply in addition to those of the
+ * GPL with the exception of any terms or conditions of this License that
+ * conflict with, or are expressly prohibited by, the GPL.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: sequencer.h,v 1.1 1999/04/26 05:55:33 tb Exp $
+ */
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+struct ins_format1 {
+ unsigned int
+ immediate : 8,
+ source : 9,
+ destination : 9,
+ ret : 1,
+ opcode : 4,
+ parity : 1;
+};
+
+struct ins_format2 {
+ unsigned int
+ shift_control : 8,
+ source : 9,
+ destination : 9,
+ ret : 1,
+ opcode : 4,
+ parity : 1;
+};
+
+struct ins_format3 {
+ unsigned int
+ immediate : 8,
+ source : 9,
+ address : 10,
+ opcode : 4,
+ parity : 1;
+};
+#elif defined(__BIG_ENDIAN_BITFIELD)
+struct ins_format1 {
+ unsigned int
+ parity : 1,
+ opcode : 4,
+ ret : 1,
+ destination : 9,
+ source : 9,
+ immediate : 8;
+};
+
+struct ins_format2 {
+ unsigned int
+ parity : 1,
+ opcode : 4,
+ ret : 1,
+ destination : 9,
+ source : 9,
+ shift_control : 8;
+};
+
+struct ins_format3 {
+ unsigned int
+ parity : 1,
+ opcode : 4,
+ address : 10,
+ source : 9,
+ immediate : 8;
+};
+#endif
+
+union ins_formats {
+ struct ins_format1 format1;
+ struct ins_format2 format2;
+ struct ins_format3 format3;
+ unsigned char bytes[4];
+ unsigned int integer;
+};
+struct instruction {
+ union ins_formats format;
+ unsigned int srcline;
+ struct symbol *patch_label;
+ struct {
+ struct instruction *stqe_next;
+ } links;
+};
+
+#define AIC_OP_OR 0x0
+#define AIC_OP_AND 0x1
+#define AIC_OP_XOR 0x2
+#define AIC_OP_ADD 0x3
+#define AIC_OP_ADC 0x4
+#define AIC_OP_ROL 0x5
+#define AIC_OP_BMOV 0x6
+
+#define AIC_OP_JMP 0x8
+#define AIC_OP_JC 0x9
+#define AIC_OP_JNC 0xa
+#define AIC_OP_CALL 0xb
+#define AIC_OP_JNE 0xc
+#define AIC_OP_JNZ 0xd
+#define AIC_OP_JE 0xe
+#define AIC_OP_JZ 0xf
+
+/* Pseudo Ops */
+#define AIC_OP_SHL 0x10
+#define AIC_OP_SHR 0x20
+#define AIC_OP_ROR 0x30
diff --git a/linux/src/drivers/scsi/aic7xxx_proc.c b/linux/src/drivers/scsi/aic7xxx_proc.c
new file mode 100644
index 0000000..87665d0
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx_proc.c
@@ -0,0 +1,384 @@
+/*+M*************************************************************************
+ * Adaptec AIC7xxx device driver proc support for Linux.
+ *
+ * Copyright (c) 1995, 1996 Dean W. Gehnert
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * ----------------------------------------------------------------
+ * o Modified from the EATA-DMA /proc support.
+ * o Additional support for device block statistics provided by
+ * Matthew Jacob.
+ * o Correction of overflow by Heinz Mauelshagen
+ * o Adittional corrections by Doug Ledford
+ *
+ * Dean W. Gehnert, deang@teleport.com, 05/01/96
+ *
+ * $Id: aic7xxx_proc.c,v 1.1.4.1 2004/01/16 22:41:26 roland Exp $
+ *-M*************************************************************************/
+
+#define BLS (&aic7xxx_buffer[size])
+#define HDRB \
+" < 2K 2K+ 4K+ 8K+ 16K+ 32K+ 64K+ 128K+"
+
+#ifdef PROC_DEBUG
+extern int vsprintf(char *, const char *, va_list);
+
+static void
+proc_debug(const char *fmt, ...)
+{
+ va_list ap;
+ char buf[256];
+
+ va_start(ap, fmt);
+ vsprintf(buf, fmt, ap);
+ printk(buf);
+ va_end(ap);
+}
+#else /* PROC_DEBUG */
+# define proc_debug(fmt, args...)
+#endif /* PROC_DEBUG */
+
+static int aic7xxx_buffer_size = 0;
+static char *aic7xxx_buffer = NULL;
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_set_info
+ *
+ * Description:
+ * Set parameters for the driver from the /proc filesystem.
+ *-F*************************************************************************/
+int
+aic7xxx_set_info(char *buffer, int length, struct Scsi_Host *HBAptr)
+{
+ proc_debug("aic7xxx_set_info(): %s\n", buffer);
+ return (-ENOSYS); /* Currently this is a no-op */
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_proc_info
+ *
+ * Description:
+ * Return information to handle /proc support for the driver.
+ *-F*************************************************************************/
+int
+aic7xxx_proc_info ( char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+ struct Scsi_Host *HBAptr;
+ struct aic7xxx_host *p;
+ int size = 0;
+ unsigned char i;
+ struct aic7xxx_xferstats *sp;
+ unsigned char target;
+
+ HBAptr = NULL;
+
+ for(p=first_aic7xxx; p->host->host_no != hostno; p=p->next)
+ ;
+
+ if (!p)
+ {
+ size += sprintf(buffer, "Can't find adapter for host number %d\n", hostno);
+ if (size > length)
+ {
+ return (size);
+ }
+ else
+ {
+ return (length);
+ }
+ }
+
+ HBAptr = p->host;
+
+ if (inout == TRUE) /* Has data been written to the file? */
+ {
+ return (aic7xxx_set_info(buffer, length, HBAptr));
+ }
+
+ p = (struct aic7xxx_host *) HBAptr->hostdata;
+
+ /*
+ * It takes roughly 1K of space to hold all relevant card info, not
+ * counting any proc stats, so we start out with a 1.5k buffer size and
+ * if proc_stats is defined, then we sweep the stats structure to see
+ * how many drives we will be printing out for and add 384 bytes per
+ * device with active stats.
+ *
+ * Hmmmm...that 1.5k seems to keep growing as items get added so they
+ * can be easily viewed for debugging purposes. So, we bumped that
+ * 1.5k to 4k so we can quit having to bump it all the time.
+ */
+
+ size = 4096;
+ for (target = 0; target < MAX_TARGETS; target++)
+ {
+ if (p->dev_flags[target] & DEVICE_PRESENT)
+#ifdef AIC7XXX_PROC_STATS
+ size += 512;
+#else
+ size += 256;
+#endif
+ }
+ if (aic7xxx_buffer_size != size)
+ {
+ if (aic7xxx_buffer != NULL)
+ {
+ kfree(aic7xxx_buffer);
+ aic7xxx_buffer_size = 0;
+ }
+ aic7xxx_buffer = kmalloc(size, GFP_KERNEL);
+ }
+ if (aic7xxx_buffer == NULL)
+ {
+ size = sprintf(buffer, "AIC7xxx - kmalloc error at line %d\n",
+ __LINE__);
+ return size;
+ }
+ aic7xxx_buffer_size = size;
+
+ size = 0;
+ size += sprintf(BLS, "Adaptec AIC7xxx driver version: ");
+ size += sprintf(BLS, "%s/", AIC7XXX_C_VERSION);
+ size += sprintf(BLS, "%s", AIC7XXX_H_VERSION);
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, "Compile Options:\n");
+#ifdef CONFIG_AIC7XXX_TCQ_ON_BY_DEFAULT
+ size += sprintf(BLS, " TCQ Enabled By Default : Enabled\n");
+#else
+ size += sprintf(BLS, " TCQ Enabled By Default : Disabled\n");
+#endif
+#ifdef AIC7XXX_PROC_STATS
+ size += sprintf(BLS, " AIC7XXX_PROC_STATS : Enabled\n");
+#else
+ size += sprintf(BLS, " AIC7XXX_PROC_STATS : Disabled\n");
+#endif
+ size += sprintf(BLS, " AIC7XXX_RESET_DELAY : %d\n", AIC7XXX_RESET_DELAY);
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, "Adapter Configuration:\n");
+ size += sprintf(BLS, " SCSI Adapter: %s\n",
+ board_names[p->board_name_index]);
+ if (p->flags & AHC_TWIN)
+ size += sprintf(BLS, " Twin Channel\n");
+ else
+ {
+ char *channel = "";
+ char *ultra = "";
+ char *wide = "Narrow ";
+ if (p->flags & AHC_MULTI_CHANNEL)
+ {
+ channel = " Channel A";
+ if (p->flags & (AHC_CHNLB|AHC_CHNLC))
+ channel = (p->flags & AHC_CHNLB) ? " Channel B" : " Channel C";
+ }
+ if (p->features & AHC_WIDE)
+ wide = "Wide ";
+ if (p->features & AHC_ULTRA2)
+ ultra = "Ultra2-LVD/SE ";
+ else if (p->features & AHC_ULTRA)
+ ultra = "Ultra ";
+ size += sprintf(BLS, " %s%sController%s\n",
+ ultra, wide, channel);
+ }
+ if( !(p->maddr) )
+ {
+ size += sprintf(BLS, " Programmed I/O Base: %lx\n", p->base);
+ }
+ else
+ {
+ size += sprintf(BLS, " PCI MMAPed I/O Base: 0x%lx\n", p->mbase);
+ }
+ if( (p->chip & (AHC_VL | AHC_EISA)) )
+ {
+ size += sprintf(BLS, " BIOS Memory Address: 0x%08x\n", p->bios_address);
+ }
+ size += sprintf(BLS, " Adapter SEEPROM Config: %s\n",
+ (p->flags & AHC_SEEPROM_FOUND) ? "SEEPROM found and used." :
+ ((p->flags & AHC_USEDEFAULTS) ? "SEEPROM not found, using defaults." :
+ "SEEPROM not found, using leftover BIOS values.") );
+ size += sprintf(BLS, " Adaptec SCSI BIOS: %s\n",
+ (p->flags & AHC_BIOS_ENABLED) ? "Enabled" : "Disabled");
+ size += sprintf(BLS, " IRQ: %d\n", HBAptr->irq);
+ size += sprintf(BLS, " SCBs: Active %d, Max Active %d,\n",
+ p->activescbs, p->max_activescbs);
+ size += sprintf(BLS, " Allocated %d, HW %d, "
+ "Page %d\n", p->scb_data->numscbs, p->scb_data->maxhscbs,
+ p->scb_data->maxscbs);
+ if (p->flags & AHC_EXTERNAL_SRAM)
+ size += sprintf(BLS, " Using External SCB SRAM\n");
+ size += sprintf(BLS, " Interrupts: %ld", p->isr_count);
+ if (p->chip & AHC_EISA)
+ {
+ size += sprintf(BLS, " %s\n",
+ (p->pause & IRQMS) ? "(Level Sensitive)" : "(Edge Triggered)");
+ }
+ else
+ {
+ size += sprintf(BLS, "\n");
+ }
+ size += sprintf(BLS, " BIOS Control Word: 0x%04x\n",
+ p->bios_control);
+ size += sprintf(BLS, " Adapter Control Word: 0x%04x\n",
+ p->adapter_control);
+ size += sprintf(BLS, " Extended Translation: %sabled\n",
+ (p->flags & AHC_EXTEND_TRANS_A) ? "En" : "Dis");
+ size += sprintf(BLS, "Disconnect Enable Flags: 0x%04x\n", p->discenable);
+ if (p->features & (AHC_ULTRA | AHC_ULTRA2))
+ {
+ size += sprintf(BLS, " Ultra Enable Flags: 0x%04x\n", p->ultraenb);
+ }
+ size += sprintf(BLS, " Tag Queue Enable Flags: 0x%04x\n", p->tagenable);
+ size += sprintf(BLS, "Ordered Queue Tag Flags: 0x%04x\n", p->orderedtag);
+ size += sprintf(BLS, "Default Tag Queue Depth: %d\n", AIC7XXX_CMDS_PER_DEVICE);
+ size += sprintf(BLS, " Tagged Queue By Device array for aic7xxx host "
+ "instance %d:\n", p->instance);
+ size += sprintf(BLS, " {");
+ for(i=0; i < (MAX_TARGETS - 1); i++)
+ size += sprintf(BLS, "%d,",aic7xxx_tag_info[p->instance].tag_commands[i]);
+ size += sprintf(BLS, "%d}\n",aic7xxx_tag_info[p->instance].tag_commands[i]);
+ size += sprintf(BLS, " Actual queue depth per device for aic7xxx host "
+ "instance %d:\n", p->instance);
+ size += sprintf(BLS, " {");
+ for(i=0; i < (MAX_TARGETS - 1); i++)
+ size += sprintf(BLS, "%d,", p->dev_max_queue_depth[i]);
+ size += sprintf(BLS, "%d}\n", p->dev_max_queue_depth[i]);
+
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, "Statistics:\n\n");
+ for (target = 0; target < MAX_TARGETS; target++)
+ {
+ sp = &p->stats[target];
+ if ((p->dev_flags[target] & DEVICE_PRESENT) == 0)
+ {
+ continue;
+ }
+ if (p->features & AHC_TWIN)
+ {
+ size += sprintf(BLS, "(scsi%d:%d:%d:%d)\n",
+ p->host_no, (target >> 3), (target & 0x7), 0);
+ }
+ else
+ {
+ size += sprintf(BLS, "(scsi%d:%d:%d:%d)\n",
+ p->host_no, 0, target, 0);
+ }
+ size += sprintf(BLS, " Device using %s/%s",
+ (p->transinfo[target].cur_width == MSG_EXT_WDTR_BUS_16_BIT) ?
+ "Wide" : "Narrow",
+ (p->transinfo[target].cur_offset != 0) ?
+ "Sync transfers at " : "Async transfers.\n" );
+ if (p->transinfo[target].cur_offset != 0)
+ {
+ struct aic7xxx_syncrate *sync_rate;
+ int period = p->transinfo[target].cur_period;
+ int rate = (p->transinfo[target].cur_width ==
+ MSG_EXT_WDTR_BUS_16_BIT) ? 1 : 0;
+
+ sync_rate = aic7xxx_find_syncrate(p, &period, AHC_SYNCRATE_ULTRA2);
+ if (sync_rate != NULL)
+ {
+ size += sprintf(BLS, "%s MByte/sec, offset %d\n",
+ sync_rate->rate[rate],
+ p->transinfo[target].cur_offset );
+ }
+ else
+ {
+ size += sprintf(BLS, "3.3 MByte/sec, offset %d\n",
+ p->transinfo[target].cur_offset );
+ }
+ }
+ size += sprintf(BLS, " Transinfo settings: ");
+ size += sprintf(BLS, "current(%d/%d/%d), ",
+ p->transinfo[target].cur_period,
+ p->transinfo[target].cur_offset,
+ p->transinfo[target].cur_width);
+ size += sprintf(BLS, "goal(%d/%d/%d), ",
+ p->transinfo[target].goal_period,
+ p->transinfo[target].goal_offset,
+ p->transinfo[target].goal_width);
+ size += sprintf(BLS, "user(%d/%d/%d)\n",
+ p->transinfo[target].user_period,
+ p->transinfo[target].user_offset,
+ p->transinfo[target].user_width);
+#ifdef AIC7XXX_PROC_STATS
+ size += sprintf(BLS, " Total transfers %ld (%ld reads and %ld writes)\n",
+ sp->r_total + sp->w_total, sp->r_total, sp->w_total);
+ size += sprintf(BLS, "%s\n", HDRB);
+ size += sprintf(BLS, " Reads:");
+ for (i = 0; i < NUMBER(sp->r_bins); i++)
+ {
+ size += sprintf(BLS, " %7ld", sp->r_bins[i]);
+ }
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, " Writes:");
+ for (i = 0; i < NUMBER(sp->w_bins); i++)
+ {
+ size += sprintf(BLS, " %7ld", sp->w_bins[i]);
+ }
+ size += sprintf(BLS, "\n");
+#else
+ size += sprintf(BLS, " Total transfers %ld (%ld reads and %ld writes)\n",
+ sp->r_total + sp->w_total, sp->r_total, sp->w_total);
+#endif /* AIC7XXX_PROC_STATS */
+ size += sprintf(BLS, "\n\n");
+ }
+
+ if (size >= aic7xxx_buffer_size)
+ {
+ printk(KERN_WARNING "aic7xxx: Overflow in aic7xxx_proc.c\n");
+ }
+
+ if (offset > size - 1)
+ {
+ kfree(aic7xxx_buffer);
+ aic7xxx_buffer = NULL;
+ aic7xxx_buffer_size = length = 0;
+ *start = NULL;
+ }
+ else
+ {
+ *start = &aic7xxx_buffer[offset]; /* Start of wanted data */
+ if (size - offset < length)
+ {
+ length = size - offset;
+ }
+ }
+
+ return (length);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 2
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -2
+ * c-argdecl-indent: 2
+ * c-label-offset: -2
+ * c-continued-statement-offset: 2
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/aic7xxx_reg.h b/linux/src/drivers/scsi/aic7xxx_reg.h
new file mode 100644
index 0000000..d12d1b6
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx_reg.h
@@ -0,0 +1,587 @@
+/*
+ * DO NOT EDIT - This file is automatically generated.
+ */
+
+#define SCSISEQ 0x00
+#define TEMODE 0x80
+#define ENSELO 0x40
+#define ENSELI 0x20
+#define ENRSELI 0x10
+#define ENAUTOATNO 0x08
+#define ENAUTOATNI 0x04
+#define ENAUTOATNP 0x02
+#define SCSIRSTO 0x01
+
+#define SXFRCTL0 0x01
+#define DFON 0x80
+#define DFPEXP 0x40
+#define FAST20 0x20
+#define CLRSTCNT 0x10
+#define SPIOEN 0x08
+#define SCAMEN 0x04
+#define CLRCHN 0x02
+
+#define SXFRCTL1 0x02
+#define BITBUCKET 0x80
+#define SWRAPEN 0x40
+#define ENSPCHK 0x20
+#define STIMESEL 0x18
+#define ENSTIMER 0x04
+#define ACTNEGEN 0x02
+#define STPWEN 0x01
+
+#define SCSISIGO 0x03
+#define CDO 0x80
+#define IOO 0x40
+#define MSGO 0x20
+#define ATNO 0x10
+#define SELO 0x08
+#define BSYO 0x04
+#define REQO 0x02
+#define ACKO 0x01
+
+#define SCSISIGI 0x03
+#define ATNI 0x10
+#define SELI 0x08
+#define BSYI 0x04
+#define REQI 0x02
+#define ACKI 0x01
+
+#define SCSIRATE 0x04
+#define WIDEXFER 0x80
+#define SXFR_ULTRA2 0x7f
+#define SXFR 0x70
+#define SOFS 0x0f
+
+#define SCSIID 0x05
+#define SCSIOFFSET 0x05
+#define SOFS_ULTRA2 0x7f
+
+#define SCSIDATL 0x06
+
+#define SCSIDATH 0x07
+
+#define STCNT 0x08
+
+#define CLRSINT0 0x0b
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRSWRAP 0x08
+#define CLRSPIORDY 0x02
+
+#define SSTAT0 0x0b
+#define TARGET 0x80
+#define SELDO 0x40
+#define SELDI 0x20
+#define SELINGO 0x10
+#define IOERR 0x08
+#define SWRAP 0x08
+#define SDONE 0x04
+#define SPIORDY 0x02
+#define DMADONE 0x01
+
+#define CLRSINT1 0x0c
+#define CLRSELTIMEO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRPHASECHG 0x02
+#define CLRREQINIT 0x01
+
+#define SSTAT1 0x0c
+#define SELTO 0x80
+#define ATNTARG 0x40
+#define SCSIRSTI 0x20
+#define PHASEMIS 0x10
+#define BUSFREE 0x08
+#define SCSIPERR 0x04
+#define PHASECHG 0x02
+#define REQINIT 0x01
+
+#define SSTAT2 0x0d
+#define OVERRUN 0x80
+#define SFCNT 0x1f
+#define EXP_ACTIVE 0x10
+
+#define SSTAT3 0x0e
+#define SCSICNT 0xf0
+#define OFFCNT 0x0f
+
+#define SCSIID_ULTRA2 0x0f
+#define OID 0x0f
+
+#define SIMODE0 0x10
+#define ENSELDO 0x40
+#define ENSELDI 0x20
+#define ENSELINGO 0x10
+#define ENIOERR 0x08
+#define ENSWRAP 0x08
+#define ENSDONE 0x04
+#define ENSPIORDY 0x02
+#define ENDMADONE 0x01
+
+#define SIMODE1 0x11
+#define ENSELTIMO 0x80
+#define ENATNTARG 0x40
+#define ENSCSIRST 0x20
+#define ENPHASEMIS 0x10
+#define ENBUSFREE 0x08
+#define ENSCSIPERR 0x04
+#define ENPHASECHG 0x02
+#define ENREQINIT 0x01
+
+#define SCSIBUSL 0x12
+
+#define SCSIBUSH 0x13
+
+#define SHADDR 0x14
+
+#define SELTIMER 0x18
+#define STAGE6 0x20
+#define STAGE5 0x10
+#define STAGE4 0x08
+#define STAGE3 0x04
+#define STAGE2 0x02
+#define STAGE1 0x01
+
+#define SELID 0x19
+#define SELID_MASK 0xf0
+#define ONEBIT 0x08
+
+#define SPIOCAP 0x1b
+#define SOFT1 0x80
+#define SOFT0 0x40
+#define SOFTCMDEN 0x20
+#define HAS_BRDCTL 0x10
+#define SEEPROM 0x08
+#define EEPROM 0x04
+#define ROM 0x02
+#define SSPIOCPS 0x01
+
+#define BRDCTL 0x1d
+#define BRDDAT7 0x80
+#define BRDDAT6 0x40
+#define BRDDAT5 0x20
+#define BRDDAT4 0x10
+#define BRDSTB 0x10
+#define BRDCS 0x08
+#define BRDDAT3 0x08
+#define BRDDAT2 0x04
+#define BRDRW 0x04
+#define BRDRW_ULTRA2 0x02
+#define BRDCTL1 0x02
+#define BRDSTB_ULTRA2 0x01
+#define BRDCTL0 0x01
+
+#define SEECTL 0x1e
+#define EXTARBACK 0x80
+#define EXTARBREQ 0x40
+#define SEEMS 0x20
+#define SEERDY 0x10
+#define SEECS 0x08
+#define SEECK 0x04
+#define SEEDO 0x02
+#define SEEDI 0x01
+
+#define SBLKCTL 0x1f
+#define DIAGLEDEN 0x80
+#define DIAGLEDON 0x40
+#define AUTOFLUSHDIS 0x20
+#define ENAB40 0x08
+#define ENAB20 0x04
+#define SELWIDE 0x02
+#define XCVR 0x01
+
+#define SRAM_BASE 0x20
+
+#define TARG_SCSIRATE 0x20
+
+#define ULTRA_ENB 0x30
+
+#define DISC_DSB 0x32
+
+#define MSG_OUT 0x34
+
+#define DMAPARAMS 0x35
+#define PRELOADEN 0x80
+#define WIDEODD 0x40
+#define SCSIEN 0x20
+#define SDMAENACK 0x10
+#define SDMAEN 0x10
+#define HDMAEN 0x08
+#define HDMAENACK 0x08
+#define DIRECTION 0x04
+#define FIFOFLUSH 0x02
+#define FIFORESET 0x01
+
+#define SEQ_FLAGS 0x36
+#define IDENTIFY_SEEN 0x80
+#define SCBPTR_VALID 0x20
+#define DPHASE 0x10
+#define AMTARGET 0x08
+#define WIDE_BUS 0x02
+#define TWIN_BUS 0x01
+
+#define SAVED_TCL 0x37
+
+#define SG_COUNT 0x38
+
+#define SG_NEXT 0x39
+
+#define LASTPHASE 0x3d
+#define P_MESGIN 0xe0
+#define PHASE_MASK 0xe0
+#define P_STATUS 0xc0
+#define P_MESGOUT 0xa0
+#define P_COMMAND 0x80
+#define CDI 0x80
+#define IOI 0x40
+#define P_DATAIN 0x40
+#define MSGI 0x20
+#define P_BUSFREE 0x01
+#define P_DATAOUT 0x00
+
+#define WAITING_SCBH 0x3e
+
+#define DISCONNECTED_SCBH 0x3f
+
+#define FREE_SCBH 0x40
+
+#define HSCB_ADDR 0x41
+
+#define SCBID_ADDR 0x45
+
+#define TMODE_CMDADDR 0x49
+
+#define KERNEL_QINPOS 0x4d
+
+#define QINPOS 0x4e
+
+#define QOUTPOS 0x4f
+
+#define TMODE_CMDADDR_NEXT 0x50
+
+#define ARG_1 0x51
+#define RETURN_1 0x51
+#define SEND_MSG 0x80
+#define SEND_SENSE 0x40
+#define SEND_REJ 0x20
+#define MSGOUT_PHASEMIS 0x10
+
+#define ARG_2 0x52
+#define RETURN_2 0x52
+
+#define LAST_MSG 0x53
+
+#define PREFETCH_CNT 0x54
+
+#define SCSICONF 0x5a
+#define TERM_ENB 0x80
+#define RESET_SCSI 0x40
+#define HWSCSIID 0x0f
+#define HSCSIID 0x07
+
+#define HOSTCONF 0x5d
+
+#define HA_274_BIOSCTRL 0x5f
+#define BIOSMODE 0x30
+#define BIOSDISABLED 0x30
+#define CHANNEL_B_PRIMARY 0x08
+
+#define SEQCTL 0x60
+#define PERRORDIS 0x80
+#define PAUSEDIS 0x40
+#define FAILDIS 0x20
+#define FASTMODE 0x10
+#define BRKADRINTEN 0x08
+#define STEP 0x04
+#define SEQRESET 0x02
+#define LOADRAM 0x01
+
+#define SEQRAM 0x61
+
+#define SEQADDR0 0x62
+
+#define SEQADDR1 0x63
+#define SEQADDR1_MASK 0x01
+
+#define ACCUM 0x64
+
+#define SINDEX 0x65
+
+#define DINDEX 0x66
+
+#define ALLONES 0x69
+
+#define ALLZEROS 0x6a
+
+#define NONE 0x6a
+
+#define FLAGS 0x6b
+#define ZERO 0x02
+#define CARRY 0x01
+
+#define SINDIR 0x6c
+
+#define DINDIR 0x6d
+
+#define FUNCTION1 0x6e
+
+#define STACK 0x6f
+
+#define TARG_OFFSET 0x70
+
+#define BCTL 0x84
+#define ACE 0x08
+#define ENABLE 0x01
+
+#define DSCOMMAND0 0x84
+#define INTSCBRAMSEL 0x08
+#define RAMPS 0x04
+#define USCBSIZE32 0x02
+#define CIOPARCKEN 0x01
+
+#define DSCOMMAND 0x84
+#define CACHETHEN 0x80
+#define DPARCKEN 0x40
+#define MPARCKEN 0x20
+#define EXTREQLCK 0x10
+
+#define BUSTIME 0x85
+#define BOFF 0xf0
+#define BON 0x0f
+
+#define BUSSPD 0x86
+#define DFTHRSH 0xc0
+#define STBOFF 0x38
+#define STBON 0x07
+
+#define DSPCISTATUS 0x86
+#define DFTHRSH_100 0xc0
+
+#define HCNTRL 0x87
+#define POWRDN 0x40
+#define SWINT 0x10
+#define IRQMS 0x08
+#define PAUSE 0x04
+#define INTEN 0x02
+#define CHIPRST 0x01
+#define CHIPRSTACK 0x01
+
+#define HADDR 0x88
+
+#define HCNT 0x8c
+
+#define SCBPTR 0x90
+
+#define INTSTAT 0x91
+#define SEQINT_MASK 0xf1
+#define DATA_OVERRUN 0xe1
+#define MSGIN_PHASEMIS 0xd1
+#define TRACEPOINT2 0xc1
+#define TRACEPOINT 0xb1
+#define AWAITING_MSG 0xa1
+#define RESIDUAL 0x81
+#define BAD_STATUS 0x71
+#define REJECT_MSG 0x61
+#define ABORT_REQUESTED 0x51
+#define EXTENDED_MSG 0x41
+#define NO_MATCH 0x31
+#define NO_IDENT 0x21
+#define SEND_REJECT 0x11
+#define INT_PEND 0x0f
+#define BRKADRINT 0x08
+#define SCSIINT 0x04
+#define CMDCMPLT 0x02
+#define BAD_PHASE 0x01
+#define SEQINT 0x01
+
+#define CLRINT 0x92
+#define CLRPARERR 0x10
+#define CLRBRKADRINT 0x08
+#define CLRSCSIINT 0x04
+#define CLRCMDINT 0x02
+#define CLRSEQINT 0x01
+
+#define ERROR 0x92
+#define CIOPARERR 0x80
+#define PCIERRSTAT 0x40
+#define MPARERR 0x20
+#define DPARERR 0x10
+#define SQPARERR 0x08
+#define ILLOPCODE 0x04
+#define ILLSADDR 0x02
+#define ILLHADDR 0x01
+
+#define DFCNTRL 0x93
+
+#define DFSTATUS 0x94
+#define PRELOAD_AVAIL 0x80
+#define DWORDEMP 0x20
+#define MREQPEND 0x10
+#define HDONE 0x08
+#define DFTHRESH 0x04
+#define FIFOFULL 0x02
+#define FIFOEMP 0x01
+
+#define DFDAT 0x99
+
+#define SCBCNT 0x9a
+#define SCBAUTO 0x80
+#define SCBCNT_MASK 0x1f
+
+#define QINFIFO 0x9b
+
+#define QINCNT 0x9c
+
+#define QOUTFIFO 0x9d
+
+#define QOUTCNT 0x9e
+
+#define SFUNCT 0x9f
+
+#define SCB_CONTROL 0xa0
+#define MK_MESSAGE 0x80
+#define DISCENB 0x40
+#define TAG_ENB 0x20
+#define DISCONNECTED 0x04
+#define SCB_TAG_TYPE 0x03
+
+#define SCB_BASE 0xa0
+
+#define SCB_TCL 0xa1
+#define TID 0xf0
+#define SELBUSB 0x08
+#define LID 0x07
+
+#define SCB_TARGET_STATUS 0xa2
+
+#define SCB_SGCOUNT 0xa3
+
+#define SCB_SGPTR 0xa4
+
+#define SCB_RESID_SGCNT 0xa8
+
+#define SCB_RESID_DCNT 0xa9
+
+#define SCB_DATAPTR 0xac
+
+#define SCB_DATACNT 0xb0
+
+#define SCB_CMDPTR 0xb4
+
+#define SCB_CMDLEN 0xb8
+
+#define SCB_TAG 0xb9
+
+#define SCB_NEXT 0xba
+
+#define SCB_PREV 0xbb
+
+#define SCB_BUSYTARGETS 0xbc
+
+#define SEECTL_2840 0xc0
+#define CS_2840 0x04
+#define CK_2840 0x02
+#define DO_2840 0x01
+
+#define STATUS_2840 0xc1
+#define EEPROM_TF 0x80
+#define BIOS_SEL 0x60
+#define ADSEL 0x1e
+#define DI_2840 0x01
+
+#define CCHADDR 0xe0
+
+#define CCHCNT 0xe8
+
+#define CCSGRAM 0xe9
+
+#define CCSGADDR 0xea
+
+#define CCSGCTL 0xeb
+#define CCSGDONE 0x80
+#define CCSGEN 0x08
+#define FLAG 0x02
+#define CCSGRESET 0x01
+
+#define CCSCBRAM 0xec
+
+#define CCSCBADDR 0xed
+
+#define CCSCBCTL 0xee
+#define CCSCBDONE 0x80
+#define ARRDONE 0x40
+#define CCARREN 0x10
+#define CCSCBEN 0x08
+#define CCSCBDIR 0x04
+#define CCSCBRESET 0x01
+
+#define CCSCBCNT 0xef
+
+#define CCSCBPTR 0xf1
+
+#define HNSCB_QOFF 0xf4
+
+#define SNSCB_QOFF 0xf6
+
+#define SDSCB_QOFF 0xf8
+
+#define QOFF_CTLSTA 0xfa
+#define SCB_AVAIL 0x40
+#define SNSCB_ROLLOVER 0x20
+#define SDSCB_ROLLOVER 0x10
+#define SCB_QSIZE 0x07
+#define SCB_QSIZE_256 0x06
+
+#define DFF_THRSH 0xfb
+#define WR_DFTHRSH 0x70
+#define WR_DFTHRSH_MAX 0x70
+#define WR_DFTHRSH_90 0x60
+#define WR_DFTHRSH_85 0x50
+#define WR_DFTHRSH_75 0x40
+#define WR_DFTHRSH_63 0x30
+#define WR_DFTHRSH_50 0x20
+#define WR_DFTHRSH_25 0x10
+#define RD_DFTHRSH_MAX 0x07
+#define RD_DFTHRSH 0x07
+#define RD_DFTHRSH_90 0x06
+#define RD_DFTHRSH_85 0x05
+#define RD_DFTHRSH_75 0x04
+#define RD_DFTHRSH_63 0x03
+#define RD_DFTHRSH_50 0x02
+#define RD_DFTHRSH_25 0x01
+#define WR_DFTHRSH_MIN 0x00
+#define RD_DFTHRSH_MIN 0x00
+
+#define SG_CACHEPTR 0xfc
+#define SG_USER_DATA 0xfc
+#define LAST_SEG 0x02
+#define LAST_SEG_DONE 0x01
+
+
+#define CMD_GROUP2_BYTE_DELTA 0xfa
+#define MAX_OFFSET_8BIT 0x0f
+#define BUS_16_BIT 0x01
+#define QINFIFO_OFFSET 0x02
+#define CMD_GROUP5_BYTE_DELTA 0x0b
+#define CMD_GROUP_CODE_SHIFT 0x05
+#define MAX_OFFSET_ULTRA2 0x7f
+#define MAX_OFFSET_16BIT 0x08
+#define BUS_8_BIT 0x00
+#define QOUTFIFO_OFFSET 0x01
+#define UNTAGGEDSCB_OFFSET 0x00
+#define CCSGRAM_MAXSEGS 0x10
+#define SCB_LIST_NULL 0xff
+#define SG_SIZEOF 0x08
+#define CMD_GROUP4_BYTE_DELTA 0x04
+#define CMD_GROUP0_BYTE_DELTA 0xfc
+#define HOST_MSG 0xff
+#define BUS_32_BIT 0x02
+#define CCSGADDR_MAX 0x80
+
+
+/* Downloaded Constant Definitions */
+#define TMODE_NUMCMDS 0x00
diff --git a/linux/src/drivers/scsi/aic7xxx_seq.c b/linux/src/drivers/scsi/aic7xxx_seq.c
new file mode 100644
index 0000000..9205cc4
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx_seq.c
@@ -0,0 +1,769 @@
+/*
+ * DO NOT EDIT - This file is automatically generated.
+ */
+static unsigned char seqprog[] = {
+ 0xff, 0x6a, 0x06, 0x08,
+ 0x32, 0x6a, 0x00, 0x00,
+ 0x12, 0x6a, 0x00, 0x00,
+ 0xff, 0x6a, 0xd6, 0x09,
+ 0xff, 0x6a, 0xdc, 0x09,
+ 0x00, 0x65, 0x38, 0x59,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0xff, 0x4e, 0xc8, 0x08,
+ 0xbf, 0x60, 0xc0, 0x08,
+ 0x60, 0x0b, 0x7c, 0x68,
+ 0x40, 0x00, 0x0e, 0x68,
+ 0x08, 0x1f, 0x3e, 0x10,
+ 0x60, 0x0b, 0x7c, 0x68,
+ 0x40, 0x00, 0x0e, 0x68,
+ 0x08, 0x1f, 0x3e, 0x10,
+ 0xff, 0x3e, 0x3e, 0x60,
+ 0x40, 0xfa, 0x10, 0x78,
+ 0xff, 0xf6, 0xd4, 0x08,
+ 0x01, 0x4e, 0x9c, 0x18,
+ 0x40, 0x60, 0xc0, 0x00,
+ 0x00, 0x4d, 0x10, 0x70,
+ 0x01, 0x4e, 0x9c, 0x18,
+ 0xbf, 0x60, 0xc0, 0x08,
+ 0x00, 0x6a, 0x72, 0x5c,
+ 0xff, 0x4e, 0xc8, 0x18,
+ 0x02, 0x6a, 0x88, 0x5b,
+ 0xff, 0x52, 0x20, 0x09,
+ 0x0d, 0x6a, 0x6a, 0x00,
+ 0x00, 0x52, 0xfe, 0x5b,
+ 0xff, 0x3e, 0x74, 0x09,
+ 0xff, 0x90, 0x7c, 0x08,
+ 0xff, 0x3e, 0x20, 0x09,
+ 0x00, 0x65, 0x44, 0x58,
+ 0x00, 0x65, 0x0e, 0x40,
+ 0xf7, 0x1f, 0xca, 0x08,
+ 0x08, 0xa1, 0xc8, 0x08,
+ 0x00, 0x65, 0xca, 0x00,
+ 0xff, 0x65, 0x3e, 0x08,
+ 0xf0, 0xa1, 0xc8, 0x08,
+ 0x0f, 0x0f, 0x1e, 0x08,
+ 0x00, 0x0f, 0x1e, 0x00,
+ 0xf0, 0xa1, 0xc8, 0x08,
+ 0x0f, 0x05, 0x0a, 0x08,
+ 0x00, 0x05, 0x0a, 0x00,
+ 0x5a, 0x6a, 0x00, 0x04,
+ 0x12, 0x65, 0xc8, 0x00,
+ 0x00, 0x01, 0x02, 0x00,
+ 0x31, 0x6a, 0xca, 0x00,
+ 0x80, 0x37, 0x64, 0x68,
+ 0xff, 0x65, 0xca, 0x18,
+ 0xff, 0x37, 0xdc, 0x08,
+ 0xff, 0x6e, 0xc8, 0x08,
+ 0x00, 0x6c, 0x6c, 0x78,
+ 0x20, 0x01, 0x02, 0x00,
+ 0x4c, 0x37, 0xc8, 0x28,
+ 0x08, 0x1f, 0x74, 0x78,
+ 0x08, 0x37, 0x6e, 0x00,
+ 0x08, 0x64, 0xc8, 0x00,
+ 0x70, 0x64, 0xca, 0x18,
+ 0xff, 0x6c, 0x0a, 0x08,
+ 0x20, 0x64, 0xca, 0x18,
+ 0xff, 0x6c, 0x08, 0x0c,
+ 0x40, 0x0b, 0x04, 0x69,
+ 0x80, 0x0b, 0xf6, 0x78,
+ 0xa4, 0x6a, 0x06, 0x00,
+ 0x40, 0x6a, 0x16, 0x00,
+ 0x10, 0x03, 0xf2, 0x78,
+ 0xff, 0x50, 0xc8, 0x08,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x49, 0x6a, 0xee, 0x5b,
+ 0x01, 0x6a, 0x26, 0x01,
+ 0xff, 0x6a, 0xca, 0x08,
+ 0x08, 0x01, 0x02, 0x00,
+ 0x02, 0x0b, 0x92, 0x78,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0xff, 0x06, 0xcc, 0x08,
+ 0xff, 0x66, 0x32, 0x09,
+ 0x01, 0x65, 0xca, 0x18,
+ 0x80, 0x66, 0xa0, 0x78,
+ 0xff, 0x66, 0xa2, 0x08,
+ 0x10, 0x03, 0x90, 0x68,
+ 0xfc, 0x65, 0xc8, 0x18,
+ 0x00, 0x65, 0xa8, 0x48,
+ 0xff, 0x6a, 0x32, 0x01,
+ 0x01, 0x64, 0x18, 0x19,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0x84, 0x6a, 0x06, 0x00,
+ 0x08, 0x01, 0x02, 0x00,
+ 0x02, 0x0b, 0xb2, 0x78,
+ 0xff, 0x06, 0xc8, 0x08,
+ 0xff, 0x64, 0x32, 0x09,
+ 0xff, 0x6a, 0xca, 0x08,
+ 0x5b, 0x64, 0xc8, 0x28,
+ 0x00, 0x62, 0xc4, 0x18,
+ 0xfc, 0x65, 0xca, 0x18,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0xfa, 0x65, 0xca, 0x18,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0x04, 0x65, 0xca, 0x18,
+ 0x0b, 0x65, 0xca, 0x18,
+ 0xff, 0x65, 0xc8, 0x08,
+ 0x00, 0x8c, 0x18, 0x19,
+ 0x02, 0x0b, 0xce, 0x78,
+ 0x01, 0x65, 0xd4, 0x60,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0xff, 0x06, 0x32, 0x09,
+ 0xff, 0x65, 0xca, 0x18,
+ 0xff, 0x65, 0xce, 0x68,
+ 0x0a, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0x64, 0x5c,
+ 0x40, 0x51, 0xe6, 0x78,
+ 0xe4, 0x6a, 0x06, 0x00,
+ 0x08, 0x01, 0x02, 0x00,
+ 0x04, 0x6a, 0x18, 0x5b,
+ 0x01, 0x50, 0xa0, 0x18,
+ 0x00, 0x50, 0xec, 0xe0,
+ 0xff, 0x6a, 0xa0, 0x08,
+ 0xff, 0x6a, 0x3a, 0x01,
+ 0x02, 0x6a, 0x22, 0x01,
+ 0x40, 0x51, 0xf2, 0x68,
+ 0xff, 0x6a, 0x06, 0x08,
+ 0x00, 0x65, 0x0e, 0x40,
+ 0x20, 0x6a, 0x16, 0x00,
+ 0xf0, 0x19, 0x6e, 0x08,
+ 0x08, 0x6a, 0x18, 0x00,
+ 0x08, 0x11, 0x22, 0x00,
+ 0x08, 0x6a, 0x5a, 0x58,
+ 0x08, 0x6a, 0x68, 0x00,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x12, 0x6a, 0x00, 0x00,
+ 0x40, 0x6a, 0x16, 0x00,
+ 0xff, 0x3e, 0x20, 0x09,
+ 0xff, 0xba, 0x7c, 0x08,
+ 0xff, 0xa1, 0x6e, 0x08,
+ 0x08, 0x6a, 0x18, 0x00,
+ 0x08, 0x11, 0x22, 0x00,
+ 0x08, 0x6a, 0x5a, 0x58,
+ 0x80, 0x6a, 0x68, 0x00,
+ 0x80, 0x36, 0x6c, 0x00,
+ 0x00, 0x65, 0xd2, 0x5b,
+ 0xff, 0x3d, 0xc8, 0x08,
+ 0xbf, 0x64, 0x48, 0x79,
+ 0x80, 0x64, 0xf0, 0x71,
+ 0xa0, 0x64, 0x0e, 0x72,
+ 0xc0, 0x64, 0x08, 0x72,
+ 0xe0, 0x64, 0x52, 0x72,
+ 0x01, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x18, 0x41,
+ 0xf7, 0x11, 0x22, 0x08,
+ 0x00, 0x65, 0x38, 0x59,
+ 0xff, 0x06, 0xd4, 0x08,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0x09, 0x0c, 0x32, 0x79,
+ 0x08, 0x0c, 0x0e, 0x68,
+ 0x01, 0x6a, 0x22, 0x01,
+ 0xff, 0x6a, 0x26, 0x09,
+ 0xff, 0x6a, 0x08, 0x08,
+ 0xdf, 0x01, 0x02, 0x08,
+ 0x01, 0x6a, 0x7a, 0x00,
+ 0x03, 0x36, 0x6c, 0x0c,
+ 0x08, 0x6a, 0xcc, 0x00,
+ 0xa9, 0x6a, 0xe8, 0x5b,
+ 0x00, 0x65, 0x66, 0x41,
+ 0xa8, 0x6a, 0x6a, 0x00,
+ 0x79, 0x6a, 0x6a, 0x00,
+ 0x40, 0x3d, 0x50, 0x69,
+ 0x04, 0x35, 0x6a, 0x00,
+ 0x00, 0x65, 0x3a, 0x5b,
+ 0x80, 0x6a, 0xd4, 0x01,
+ 0x10, 0x36, 0x42, 0x69,
+ 0x10, 0x36, 0x6c, 0x00,
+ 0x07, 0xac, 0x10, 0x31,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0xac, 0x6a, 0xe0, 0x5b,
+ 0x00, 0x65, 0xda, 0x5b,
+ 0xff, 0xa3, 0x70, 0x08,
+ 0x39, 0x6a, 0xcc, 0x00,
+ 0xa4, 0x6a, 0xe6, 0x5b,
+ 0xff, 0x38, 0x74, 0x69,
+ 0x80, 0x02, 0x04, 0x00,
+ 0xe7, 0x35, 0x6a, 0x08,
+ 0x03, 0x69, 0x18, 0x31,
+ 0xff, 0x6a, 0x10, 0x00,
+ 0xff, 0x6a, 0x12, 0x00,
+ 0xff, 0x6a, 0x14, 0x00,
+ 0x01, 0x38, 0x7a, 0x61,
+ 0x02, 0xfc, 0xf8, 0x01,
+ 0xbf, 0x35, 0x6a, 0x08,
+ 0xff, 0x69, 0xca, 0x08,
+ 0xff, 0x35, 0x26, 0x09,
+ 0x04, 0x0b, 0x7e, 0x69,
+ 0x04, 0x0b, 0x8a, 0x69,
+ 0x10, 0x0c, 0x80, 0x79,
+ 0x04, 0x0b, 0x88, 0x69,
+ 0xff, 0x6a, 0xca, 0x08,
+ 0x00, 0x35, 0x22, 0x5b,
+ 0x80, 0x02, 0xd6, 0x69,
+ 0xff, 0x65, 0xc8, 0x79,
+ 0xff, 0x38, 0x70, 0x18,
+ 0xff, 0x38, 0xc8, 0x79,
+ 0x80, 0xea, 0xaa, 0x61,
+ 0xef, 0x38, 0xc8, 0x18,
+ 0x80, 0x6a, 0xc8, 0x00,
+ 0x00, 0x65, 0x9c, 0x49,
+ 0x33, 0x38, 0xc8, 0x28,
+ 0xff, 0x64, 0xd0, 0x09,
+ 0x04, 0x39, 0xc0, 0x31,
+ 0x09, 0x6a, 0xd6, 0x01,
+ 0x80, 0xeb, 0xa2, 0x79,
+ 0xf7, 0xeb, 0xd6, 0x09,
+ 0x08, 0xeb, 0xa6, 0x69,
+ 0x01, 0x6a, 0xd6, 0x01,
+ 0x08, 0xe9, 0x10, 0x31,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x39, 0x6a, 0xe6, 0x5b,
+ 0x08, 0x6a, 0x18, 0x01,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0x0d, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0x64, 0x5c,
+ 0x88, 0x6a, 0x54, 0x5c,
+ 0x00, 0x65, 0xda, 0x5b,
+ 0xff, 0x6a, 0xc8, 0x08,
+ 0x08, 0x39, 0x72, 0x18,
+ 0x00, 0x3a, 0x74, 0x20,
+ 0x10, 0x0c, 0x66, 0x79,
+ 0x80, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0xe0, 0x59,
+ 0xff, 0x08, 0x52, 0x09,
+ 0xff, 0x09, 0x54, 0x09,
+ 0xff, 0x0a, 0x56, 0x09,
+ 0xff, 0x38, 0x50, 0x09,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x00, 0x65, 0xe0, 0x59,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x7f, 0x02, 0x04, 0x08,
+ 0xe1, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x04, 0x93, 0xea, 0x69,
+ 0xdf, 0x93, 0x26, 0x09,
+ 0x20, 0x93, 0xe4, 0x69,
+ 0x02, 0x93, 0x26, 0x01,
+ 0x01, 0x94, 0xe6, 0x79,
+ 0xd7, 0x93, 0x26, 0x09,
+ 0x08, 0x93, 0xec, 0x69,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x00, 0x65, 0x3a, 0x5b,
+ 0x02, 0xfc, 0xf8, 0x01,
+ 0x05, 0xb4, 0x10, 0x31,
+ 0x02, 0x6a, 0x1a, 0x31,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0xb4, 0x6a, 0xe4, 0x5b,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0x00, 0x65, 0xda, 0x5b,
+ 0x3d, 0x6a, 0x22, 0x5b,
+ 0xac, 0x6a, 0x22, 0x5b,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x00, 0x65, 0x3a, 0x5b,
+ 0xff, 0x06, 0x44, 0x09,
+ 0x00, 0x65, 0x18, 0x41,
+ 0xff, 0x34, 0xca, 0x08,
+ 0x80, 0x65, 0x32, 0x62,
+ 0x0f, 0xa1, 0xca, 0x08,
+ 0x07, 0xa1, 0xca, 0x08,
+ 0x40, 0xa0, 0xc8, 0x08,
+ 0x00, 0x65, 0xca, 0x00,
+ 0x80, 0x65, 0xca, 0x00,
+ 0x80, 0xa0, 0x22, 0x7a,
+ 0xff, 0x65, 0x0c, 0x08,
+ 0x00, 0x65, 0x34, 0x42,
+ 0x20, 0xa0, 0x3a, 0x7a,
+ 0xff, 0x65, 0x0c, 0x08,
+ 0x00, 0x65, 0xd2, 0x5b,
+ 0xa0, 0x3d, 0x46, 0x62,
+ 0x23, 0xa0, 0x0c, 0x08,
+ 0x00, 0x65, 0xd2, 0x5b,
+ 0xa0, 0x3d, 0x46, 0x62,
+ 0x00, 0xb9, 0x3a, 0x42,
+ 0xff, 0x65, 0x3a, 0x62,
+ 0xa1, 0x6a, 0x22, 0x01,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0x10, 0x51, 0x46, 0x72,
+ 0x40, 0x6a, 0x18, 0x00,
+ 0xff, 0x65, 0x0c, 0x08,
+ 0x00, 0x65, 0xd2, 0x5b,
+ 0xa0, 0x3d, 0x46, 0x62,
+ 0x10, 0x3d, 0x06, 0x00,
+ 0x00, 0x65, 0x0e, 0x42,
+ 0x40, 0x6a, 0x18, 0x00,
+ 0xff, 0x34, 0xa6, 0x08,
+ 0x80, 0x34, 0x4e, 0x62,
+ 0x7f, 0xa0, 0x40, 0x09,
+ 0x08, 0x6a, 0x68, 0x00,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x64, 0x6a, 0x12, 0x5b,
+ 0x80, 0x64, 0xbe, 0x6a,
+ 0x04, 0x64, 0xa4, 0x72,
+ 0x02, 0x64, 0xaa, 0x72,
+ 0x00, 0x6a, 0x6c, 0x72,
+ 0x03, 0x64, 0xba, 0x72,
+ 0x01, 0x64, 0xa0, 0x72,
+ 0x07, 0x64, 0x00, 0x73,
+ 0x08, 0x64, 0x68, 0x72,
+ 0x11, 0x6a, 0x22, 0x01,
+ 0x07, 0x6a, 0x04, 0x5b,
+ 0xff, 0x06, 0xd4, 0x08,
+ 0x00, 0x65, 0x18, 0x41,
+ 0xff, 0xa8, 0x70, 0x6a,
+ 0xff, 0xa2, 0x88, 0x7a,
+ 0x01, 0x6a, 0x6a, 0x00,
+ 0x00, 0xb9, 0xfe, 0x5b,
+ 0xff, 0xa2, 0x88, 0x7a,
+ 0x71, 0x6a, 0x22, 0x01,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0x40, 0x51, 0x88, 0x62,
+ 0x0d, 0x6a, 0x6a, 0x00,
+ 0x00, 0xb9, 0xfe, 0x5b,
+ 0xff, 0x3e, 0x74, 0x09,
+ 0xff, 0x90, 0x7c, 0x08,
+ 0x00, 0x65, 0x44, 0x58,
+ 0x00, 0x65, 0x2a, 0x41,
+ 0x20, 0xa0, 0x90, 0x6a,
+ 0xff, 0x37, 0xc8, 0x08,
+ 0x00, 0x6a, 0xa8, 0x5b,
+ 0xff, 0x6a, 0xbe, 0x5b,
+ 0xff, 0xf8, 0xc8, 0x08,
+ 0xff, 0x4f, 0xc8, 0x08,
+ 0x01, 0x6a, 0xa8, 0x5b,
+ 0x00, 0xb9, 0xbe, 0x5b,
+ 0x01, 0x4f, 0x9e, 0x18,
+ 0x02, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x6c, 0x5c,
+ 0x00, 0x65, 0x2a, 0x41,
+ 0x41, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x04, 0xa0, 0x40, 0x01,
+ 0x00, 0x65, 0x84, 0x5c,
+ 0x00, 0x65, 0x2a, 0x41,
+ 0x10, 0x36, 0x68, 0x7a,
+ 0xff, 0x38, 0x46, 0x09,
+ 0xa4, 0x6a, 0xcc, 0x00,
+ 0x39, 0x6a, 0xe6, 0x5b,
+ 0xac, 0x6a, 0xcc, 0x00,
+ 0x14, 0x6a, 0xe6, 0x5b,
+ 0xa9, 0x6a, 0xe8, 0x5b,
+ 0x00, 0x65, 0x68, 0x42,
+ 0xef, 0x36, 0x6c, 0x08,
+ 0x00, 0x65, 0x68, 0x42,
+ 0x0f, 0x64, 0xc8, 0x08,
+ 0x07, 0x64, 0xc8, 0x08,
+ 0x00, 0x37, 0x6e, 0x00,
+ 0x00, 0x65, 0x78, 0x5b,
+ 0xff, 0x51, 0xce, 0x72,
+ 0x20, 0x36, 0xde, 0x7a,
+ 0x00, 0x90, 0x5c, 0x5b,
+ 0x00, 0x65, 0xe0, 0x42,
+ 0xff, 0x06, 0xd4, 0x08,
+ 0x00, 0x65, 0xd2, 0x5b,
+ 0xe0, 0x3d, 0xfa, 0x62,
+ 0x20, 0x12, 0xfa, 0x62,
+ 0x51, 0x6a, 0x08, 0x5b,
+ 0xff, 0x51, 0x20, 0x09,
+ 0x20, 0xa0, 0xfa, 0x7a,
+ 0x00, 0x90, 0x5c, 0x5b,
+ 0x00, 0x65, 0x56, 0x5b,
+ 0xff, 0x37, 0xc8, 0x08,
+ 0x00, 0xa1, 0xf2, 0x62,
+ 0x04, 0xa0, 0xf2, 0x7a,
+ 0xfb, 0xa0, 0x40, 0x09,
+ 0x80, 0x36, 0x6c, 0x00,
+ 0x80, 0xa0, 0x68, 0x7a,
+ 0x7f, 0xa0, 0x40, 0x09,
+ 0xff, 0x6a, 0x04, 0x5b,
+ 0x00, 0x65, 0x68, 0x42,
+ 0x04, 0xa0, 0xf8, 0x7a,
+ 0x00, 0x65, 0x84, 0x5c,
+ 0x00, 0x65, 0xfa, 0x42,
+ 0x00, 0x65, 0x6c, 0x5c,
+ 0x31, 0x6a, 0x22, 0x01,
+ 0x0c, 0x6a, 0x04, 0x5b,
+ 0x00, 0x65, 0x68, 0x42,
+ 0x61, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x68, 0x42,
+ 0x10, 0x3d, 0x06, 0x00,
+ 0xff, 0x65, 0x68, 0x0c,
+ 0xff, 0x06, 0xd4, 0x08,
+ 0x01, 0x0c, 0x0a, 0x7b,
+ 0x04, 0x0c, 0x0a, 0x6b,
+ 0xe0, 0x03, 0x7a, 0x08,
+ 0xe0, 0x3d, 0x1e, 0x63,
+ 0xff, 0x65, 0xcc, 0x08,
+ 0xff, 0x12, 0xda, 0x0c,
+ 0xff, 0x06, 0xd4, 0x0c,
+ 0xff, 0x65, 0x0c, 0x08,
+ 0x02, 0x0b, 0x1a, 0x7b,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0xd1, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x18, 0x41,
+ 0xff, 0x65, 0x26, 0x09,
+ 0x01, 0x0b, 0x32, 0x6b,
+ 0x10, 0x0c, 0x24, 0x7b,
+ 0x04, 0x0b, 0x2c, 0x6b,
+ 0xff, 0x6a, 0xca, 0x08,
+ 0x04, 0x93, 0x30, 0x6b,
+ 0x01, 0x94, 0x2e, 0x7b,
+ 0x10, 0x94, 0x30, 0x6b,
+ 0xc7, 0x93, 0x26, 0x09,
+ 0xff, 0x99, 0xd4, 0x08,
+ 0x08, 0x93, 0x34, 0x6b,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x80, 0x36, 0x38, 0x6b,
+ 0x21, 0x6a, 0x22, 0x05,
+ 0xff, 0x65, 0x20, 0x09,
+ 0xff, 0x51, 0x46, 0x63,
+ 0xff, 0x37, 0xc8, 0x08,
+ 0xa1, 0x6a, 0x50, 0x43,
+ 0xff, 0x51, 0xc8, 0x08,
+ 0xb9, 0x6a, 0x50, 0x43,
+ 0xff, 0xba, 0x54, 0x73,
+ 0xff, 0xba, 0x20, 0x09,
+ 0xff, 0x65, 0xca, 0x18,
+ 0x00, 0x6c, 0x4a, 0x63,
+ 0xff, 0x90, 0xca, 0x0c,
+ 0xff, 0x6a, 0xca, 0x04,
+ 0x20, 0x36, 0x72, 0x7b,
+ 0x00, 0x90, 0x3e, 0x5b,
+ 0xff, 0x65, 0x72, 0x73,
+ 0xff, 0xba, 0x66, 0x73,
+ 0xff, 0xbb, 0xcc, 0x08,
+ 0xff, 0xba, 0x20, 0x09,
+ 0xff, 0x66, 0x76, 0x09,
+ 0xff, 0x65, 0x20, 0x09,
+ 0xff, 0xbb, 0x70, 0x73,
+ 0xff, 0xba, 0xcc, 0x08,
+ 0xff, 0xbb, 0x20, 0x09,
+ 0xff, 0x66, 0x74, 0x09,
+ 0xff, 0x65, 0x20, 0x0d,
+ 0xff, 0xba, 0x7e, 0x0c,
+ 0x00, 0x6a, 0x72, 0x5c,
+ 0x0d, 0x6a, 0x6a, 0x00,
+ 0x00, 0x51, 0xfe, 0x43,
+ 0xff, 0x3f, 0xcc, 0x73,
+ 0xff, 0x6a, 0xa2, 0x00,
+ 0x00, 0x3f, 0x3e, 0x5b,
+ 0xff, 0x65, 0xcc, 0x73,
+ 0x20, 0x36, 0x6c, 0x00,
+ 0x20, 0xa0, 0x86, 0x6b,
+ 0xff, 0xb9, 0xa2, 0x0c,
+ 0xff, 0x6a, 0xa2, 0x04,
+ 0xff, 0x65, 0xa4, 0x08,
+ 0xe0, 0x6a, 0xcc, 0x00,
+ 0x45, 0x6a, 0xf2, 0x5b,
+ 0x01, 0x6a, 0xd0, 0x01,
+ 0x09, 0x6a, 0xd6, 0x01,
+ 0x80, 0xeb, 0x92, 0x7b,
+ 0x01, 0x6a, 0xd6, 0x01,
+ 0x01, 0xe9, 0xa4, 0x34,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x45, 0x6a, 0xf2, 0x5b,
+ 0x01, 0x6a, 0x18, 0x01,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0x0d, 0x6a, 0x26, 0x01,
+ 0x00, 0x65, 0x64, 0x5c,
+ 0xff, 0x99, 0xa4, 0x0c,
+ 0xff, 0x65, 0xa4, 0x08,
+ 0xe0, 0x6a, 0xcc, 0x00,
+ 0x45, 0x6a, 0xf2, 0x5b,
+ 0x01, 0x6a, 0xd0, 0x01,
+ 0x01, 0x6a, 0xdc, 0x05,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x45, 0x6a, 0xf2, 0x5b,
+ 0x01, 0x6a, 0x18, 0x01,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0x01, 0x6a, 0x26, 0x05,
+ 0x01, 0x65, 0xd8, 0x31,
+ 0x09, 0xee, 0xdc, 0x01,
+ 0x80, 0xee, 0xc2, 0x7b,
+ 0xff, 0x6a, 0xdc, 0x0d,
+ 0xff, 0x65, 0x32, 0x09,
+ 0x0a, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0x64, 0x44,
+ 0xff, 0x37, 0xc8, 0x08,
+ 0x00, 0x6a, 0x88, 0x5b,
+ 0xff, 0x52, 0xa2, 0x0c,
+ 0x01, 0x0c, 0xd2, 0x7b,
+ 0x04, 0x0c, 0xd2, 0x6b,
+ 0xe0, 0x03, 0x7a, 0x08,
+ 0xff, 0x3d, 0x06, 0x0c,
+ 0xff, 0x8c, 0x10, 0x08,
+ 0xff, 0x8d, 0x12, 0x08,
+ 0xff, 0x8e, 0x14, 0x0c,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x0c,
+ 0x3d, 0x64, 0xa4, 0x28,
+ 0x55, 0x64, 0xc8, 0x28,
+ 0x00, 0x6c, 0xda, 0x18,
+ 0xff, 0x52, 0xc8, 0x08,
+ 0x00, 0x6c, 0xda, 0x20,
+ 0xff, 0x6a, 0xc8, 0x08,
+ 0x00, 0x6c, 0xda, 0x20,
+ 0x00, 0x6c, 0xda, 0x24,
+ 0xff, 0x65, 0xc8, 0x08,
+ 0xe0, 0x6a, 0xcc, 0x00,
+ 0x41, 0x6a, 0xee, 0x5b,
+ 0xff, 0x90, 0xe2, 0x09,
+ 0x20, 0x6a, 0xd0, 0x01,
+ 0x04, 0x35, 0x10, 0x7c,
+ 0x1d, 0x6a, 0xdc, 0x01,
+ 0xdc, 0xee, 0x0c, 0x64,
+ 0x00, 0x65, 0x1c, 0x44,
+ 0x01, 0x6a, 0xdc, 0x01,
+ 0x20, 0xa0, 0xd8, 0x31,
+ 0x09, 0xee, 0xdc, 0x01,
+ 0x80, 0xee, 0x16, 0x7c,
+ 0x19, 0x6a, 0xdc, 0x01,
+ 0xd8, 0xee, 0x1a, 0x64,
+ 0xff, 0x6a, 0xdc, 0x09,
+ 0x18, 0xee, 0x1e, 0x6c,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x41, 0x6a, 0xee, 0x5b,
+ 0x20, 0x6a, 0x18, 0x01,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0xff, 0x35, 0x26, 0x09,
+ 0x04, 0x35, 0x48, 0x6c,
+ 0xa0, 0x6a, 0xca, 0x00,
+ 0x20, 0x65, 0xc8, 0x18,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0x00, 0x65, 0x34, 0x64,
+ 0x0a, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0x64, 0x5c,
+ 0x04, 0x35, 0x38, 0x7b,
+ 0xa0, 0x6a, 0x54, 0x5c,
+ 0x00, 0x65, 0x56, 0x5c,
+ 0x00, 0x65, 0x56, 0x5c,
+ 0x00, 0x65, 0x56, 0x44,
+ 0xff, 0x65, 0xcc, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x0c,
+ 0x08, 0x94, 0x64, 0x7c,
+ 0xf7, 0x93, 0x26, 0x09,
+ 0x08, 0x93, 0x68, 0x6c,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0xff, 0x40, 0x74, 0x09,
+ 0xff, 0x90, 0x80, 0x08,
+ 0xff, 0x6a, 0x72, 0x05,
+ 0xff, 0x40, 0x80, 0x64,
+ 0xff, 0x3f, 0x78, 0x64,
+ 0xff, 0x6a, 0xca, 0x04,
+ 0xff, 0x3f, 0x20, 0x09,
+ 0x01, 0x6a, 0x6a, 0x00,
+ 0x00, 0xb9, 0xfe, 0x5b,
+ 0x00, 0x90, 0x5c, 0x43,
+ 0xff, 0x40, 0x20, 0x09,
+ 0xff, 0xba, 0x80, 0x0c,
+ 0xff, 0x6a, 0x76, 0x01,
+ 0xff, 0x3f, 0x74, 0x09,
+ 0xff, 0x90, 0x7e, 0x08,
+ 0xff, 0xba, 0x38, 0x73,
+ 0xff, 0xba, 0x20, 0x09,
+ 0xff, 0x3f, 0x76, 0x09,
+ 0xff, 0x3f, 0x20, 0x0d,
+};
+
+static int aic7xxx_patch12_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch12_func(struct aic7xxx_host *p)
+{
+ return ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895);
+}
+
+static int aic7xxx_patch11_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch11_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_WIDE) != 0);
+}
+
+static int aic7xxx_patch10_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch10_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_ULTRA2) == 0);
+}
+
+static int aic7xxx_patch9_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch9_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_ULTRA) != 0);
+}
+
+static int aic7xxx_patch8_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch8_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_ULTRA2) != 0);
+}
+
+static int aic7xxx_patch7_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch7_func(struct aic7xxx_host *p)
+{
+ return ((p->flags & AHC_PAGESCBS) == 0);
+}
+
+static int aic7xxx_patch6_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch6_func(struct aic7xxx_host *p)
+{
+ return ((p->flags & AHC_PAGESCBS) != 0);
+}
+
+static int aic7xxx_patch5_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch5_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_QUEUE_REGS) != 0);
+}
+
+static int aic7xxx_patch4_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch4_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_TWIN) != 0);
+}
+
+static int aic7xxx_patch3_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch3_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_QUEUE_REGS) == 0);
+}
+
+static int aic7xxx_patch2_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch2_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_CMD_CHAN) != 0);
+}
+
+static int aic7xxx_patch1_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch1_func(struct aic7xxx_host *p)
+{
+ return ((p->flags & AHC_TARGETMODE) != 0);
+}
+
+static int aic7xxx_patch0_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch0_func(struct aic7xxx_host *p)
+{
+ return (0);
+}
+
+struct sequencer_patch {
+ int (*patch_func)(struct aic7xxx_host *);
+ unsigned int begin :10,
+ skip_instr :10,
+ skip_patch :12;
+} sequencer_patches[] = {
+ { aic7xxx_patch1_func, 1, 1, 2 },
+ { aic7xxx_patch0_func, 2, 1, 1 },
+ { aic7xxx_patch2_func, 3, 2, 1 },
+ { aic7xxx_patch3_func, 7, 1, 1 },
+ { aic7xxx_patch3_func, 8, 1, 1 },
+ { aic7xxx_patch4_func, 11, 4, 1 },
+ { aic7xxx_patch5_func, 16, 3, 2 },
+ { aic7xxx_patch0_func, 19, 4, 1 },
+ { aic7xxx_patch6_func, 23, 1, 1 },
+ { aic7xxx_patch7_func, 26, 1, 1 },
+ { aic7xxx_patch4_func, 34, 4, 1 },
+ { aic7xxx_patch8_func, 38, 3, 2 },
+ { aic7xxx_patch0_func, 41, 3, 1 },
+ { aic7xxx_patch9_func, 47, 7, 1 },
+ { aic7xxx_patch4_func, 55, 3, 1 },
+ { aic7xxx_patch8_func, 58, 2, 1 },
+ { aic7xxx_patch1_func, 63, 60, 1 },
+ { aic7xxx_patch8_func, 164, 1, 2 },
+ { aic7xxx_patch0_func, 165, 1, 1 },
+ { aic7xxx_patch2_func, 169, 1, 1 },
+ { aic7xxx_patch2_func, 172, 1, 2 },
+ { aic7xxx_patch0_func, 173, 2, 1 },
+ { aic7xxx_patch10_func, 175, 1, 1 },
+ { aic7xxx_patch8_func, 182, 1, 2 },
+ { aic7xxx_patch0_func, 183, 3, 1 },
+ { aic7xxx_patch8_func, 187, 1, 2 },
+ { aic7xxx_patch0_func, 188, 1, 1 },
+ { aic7xxx_patch8_func, 189, 7, 2 },
+ { aic7xxx_patch0_func, 196, 1, 1 },
+ { aic7xxx_patch2_func, 201, 13, 2 },
+ { aic7xxx_patch0_func, 214, 8, 1 },
+ { aic7xxx_patch10_func, 222, 1, 1 },
+ { aic7xxx_patch8_func, 227, 1, 1 },
+ { aic7xxx_patch8_func, 228, 1, 1 },
+ { aic7xxx_patch8_func, 233, 1, 1 },
+ { aic7xxx_patch8_func, 235, 2, 1 },
+ { aic7xxx_patch8_func, 240, 8, 1 },
+ { aic7xxx_patch8_func, 249, 1, 1 },
+ { aic7xxx_patch2_func, 250, 2, 2 },
+ { aic7xxx_patch0_func, 252, 4, 1 },
+ { aic7xxx_patch10_func, 256, 2, 2 },
+ { aic7xxx_patch0_func, 258, 1, 1 },
+ { aic7xxx_patch11_func, 265, 1, 2 },
+ { aic7xxx_patch0_func, 266, 1, 1 },
+ { aic7xxx_patch5_func, 328, 1, 2 },
+ { aic7xxx_patch0_func, 329, 1, 1 },
+ { aic7xxx_patch3_func, 332, 1, 1 },
+ { aic7xxx_patch11_func, 351, 1, 2 },
+ { aic7xxx_patch0_func, 352, 1, 1 },
+ { aic7xxx_patch6_func, 356, 1, 1 },
+ { aic7xxx_patch7_func, 364, 3, 2 },
+ { aic7xxx_patch0_func, 367, 1, 1 },
+ { aic7xxx_patch1_func, 396, 3, 1 },
+ { aic7xxx_patch10_func, 410, 1, 1 },
+ { aic7xxx_patch2_func, 453, 7, 2 },
+ { aic7xxx_patch0_func, 460, 8, 1 },
+ { aic7xxx_patch2_func, 469, 4, 2 },
+ { aic7xxx_patch0_func, 473, 6, 1 },
+ { aic7xxx_patch2_func, 479, 4, 2 },
+ { aic7xxx_patch0_func, 483, 3, 1 },
+ { aic7xxx_patch2_func, 512, 17, 4 },
+ { aic7xxx_patch12_func, 520, 4, 2 },
+ { aic7xxx_patch0_func, 524, 2, 1 },
+ { aic7xxx_patch0_func, 529, 33, 1 },
+ { aic7xxx_patch6_func, 566, 2, 1 },
+ { aic7xxx_patch6_func, 569, 9, 1 },
+
+};
diff --git a/linux/src/drivers/scsi/constants.c b/linux/src/drivers/scsi/constants.c
new file mode 100644
index 0000000..1495a5d
--- /dev/null
+++ b/linux/src/drivers/scsi/constants.c
@@ -0,0 +1,683 @@
+/*
+ * ASCII values for a number of symbolic constants, printing functions,
+ * etc.
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/blk.h>
+#include <linux/kernel.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#define CONST_COMMAND 0x01
+#define CONST_STATUS 0x02
+#define CONST_SENSE 0x04
+#define CONST_XSENSE 0x08
+#define CONST_CMND 0x10
+#define CONST_MSG 0x20
+#define CONST_HOST 0x40
+#define CONST_DRIVER 0x80
+
+static const char unknown[] = "UNKNOWN";
+
+#ifdef CONFIG_SCSI_CONSTANTS
+#ifdef CONSTANTS
+#undef CONSTANTS
+#endif
+#define CONSTANTS (CONST_COMMAND | CONST_STATUS | CONST_SENSE | CONST_XSENSE \
+ | CONST_CMND | CONST_MSG | CONST_HOST | CONST_DRIVER)
+#endif
+
+#if (CONSTANTS & CONST_COMMAND)
+static const char * group_0_commands[] = {
+/* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense",
+/* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reasssign Blocks",
+/* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown,
+/* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
+/* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve",
+/* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit",
+/* 1c-1d */ "Receive Diagnostic", "Send Diagnostic",
+/* 1e-1f */ "Prevent/Allow Medium Removal", unknown,
+};
+
+
+static const char *group_1_commands[] = {
+/* 20-22 */ unknown, unknown, unknown,
+/* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)",
+/* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown,
+/* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal",
+/* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position",
+/* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data",
+/* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer",
+/* 3d-3f */ "Update Block", "Read Long", "Write Long",
+};
+
+
+static const char *group_2_commands[] = {
+/* 40-41 */ "Change Definition", "Write Same",
+/* 42-48 */ unknown, "Read TOC", unknown, unknown, unknown, unknown, unknown,
+/* 49-4f */ unknown, unknown, unknown, "Log Select", "Log Sense", unknown, unknown,
+/* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)",
+/* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown,
+/* 5c-5f */ unknown, unknown, unknown,
+};
+
+
+
+#define group(opcode) (((opcode) >> 5) & 7)
+
+#define RESERVED_GROUP 0
+#define VENDOR_GROUP 1
+#define NOTEXT_GROUP 2
+
+static const char **commands[] = {
+ group_0_commands, group_1_commands, group_2_commands,
+ (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP,
+ (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP,
+ (const char **) VENDOR_GROUP
+};
+
+static const char reserved[] = "RESERVED";
+static const char vendor[] = "VENDOR SPECIFIC";
+
+static void print_opcode(int opcode) {
+ const char **table = commands[ group(opcode) ];
+ switch ((unsigned long) table) {
+ case RESERVED_GROUP:
+ printk("%s(0x%02x) ", reserved, opcode);
+ break;
+ case NOTEXT_GROUP:
+ printk("%s(0x%02x) ", unknown, opcode);
+ break;
+ case VENDOR_GROUP:
+ printk("%s(0x%02x) ", vendor, opcode);
+ break;
+ default:
+ if (table[opcode & 0x1f] != unknown)
+ printk("%s ",table[opcode & 0x1f]);
+ else
+ printk("%s(0x%02x) ", unknown, opcode);
+ break;
+ }
+}
+#else /* CONST & CONST_COMMAND */
+static void print_opcode(int opcode) {
+ printk("0x%02x ", opcode);
+}
+#endif
+
+void print_command (unsigned char *command) {
+ int i,s;
+ print_opcode(command[0]);
+ for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ printk("%02x ", command[i]);
+ printk("\n");
+}
+
+#if (CONSTANTS & CONST_STATUS)
+static const char * statuses[] = {
+/* 0-4 */ "Good", "Check Condition", "Condition Good", unknown, "Busy",
+/* 5-9 */ unknown, unknown, unknown, "Intermediate Good", unknown,
+/* a-d */ "Intermediate Good", unknown, "Reservation Conflict", unknown,
+/* e-f */ unknown, unknown,
+};
+#endif
+
+void print_status (int status) {
+ status = (status >> 1) & 0xf;
+#if (CONSTANTS & CONST_STATUS)
+ printk("%s ",statuses[status]);
+#else
+ printk("0x%0x ", status);
+#endif
+}
+
+#if (CONSTANTS & CONST_XSENSE)
+#define D 0x001 /* DIRECT ACCESS DEVICE (disk) */
+#define T 0x002 /* SEQUENTIAL ACCESS DEVICE (tape) */
+#define L 0x004 /* PRINTER DEVICE */
+#define P 0x008 /* PROCESSOR DEVICE */
+#define W 0x010 /* WRITE ONCE READ MULTIPLE DEVICE */
+#define R 0x020 /* READ ONLY (CD-ROM) DEVICE */
+#define S 0x040 /* SCANNER DEVICE */
+#define O 0x080 /* OPTICAL MEMORY DEVICE */
+#define M 0x100 /* MEDIA CHANGER DEVICE */
+#define C 0x200 /* COMMUNICATION DEVICE */
+
+struct error_info{
+ unsigned char code1, code2;
+ unsigned short int devices;
+ const char * text;
+};
+
+struct error_info2{
+ unsigned char code1, code2_min, code2_max;
+ unsigned short int devices;
+ const char * text;
+};
+
+static struct error_info2 additional2[] =
+{
+ {0x40,0x00,0x7f,D,"Ram failure (%x)"},
+ {0x40,0x80,0xff,D|T|L|P|W|R|S|O|M|C,"Diagnostic failure on component (%x)"},
+ {0x41,0x00,0xff,D,"Data path failure (%x)"},
+ {0x42,0x00,0xff,D,"Power-on or self-test failure (%x)"},
+ {0, 0, 0, 0, NULL}
+};
+
+static struct error_info additional[] =
+{
+ {0x00,0x01,T,"Filemark detected"},
+ {0x00,0x02,T|S,"End-of-partition/medium detected"},
+ {0x00,0x03,T,"Setmark detected"},
+ {0x00,0x04,T|S,"Beginning-of-partition/medium detected"},
+ {0x00,0x05,T|S,"End-of-data detected"},
+ {0x00,0x06,D|T|L|P|W|R|S|O|M|C,"I/O process terminated"},
+ {0x00,0x11,R,"Audio play operation in progress"},
+ {0x00,0x12,R,"Audio play operation paused"},
+ {0x00,0x13,R,"Audio play operation successfully completed"},
+ {0x00,0x14,R,"Audio play operation stopped due to error"},
+ {0x00,0x15,R,"No current audio status to return"},
+ {0x01,0x00,D|W|O,"No index/sector signal"},
+ {0x02,0x00,D|W|R|O|M,"No seek complete"},
+ {0x03,0x00,D|T|L|W|S|O,"Peripheral device write fault"},
+ {0x03,0x01,T,"No write current"},
+ {0x03,0x02,T,"Excessive write errors"},
+ {0x04,0x00,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit not ready, cause not reportable"},
+ {0x04,0x01,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit is in process of becoming ready"},
+ {0x04,0x02,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit not ready, initializing command required"},
+ {0x04,0x03,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit not ready, manual intervention required"},
+ {0x04,0x04,D|T|L|O,"Logical unit not ready, format in progress"},
+ {0x05,0x00,D|T|L|W|R|S|O|M|C,"Logical unit does not respond to selection"},
+ {0x06,0x00,D|W|R|O|M,"No reference position found"},
+ {0x07,0x00,D|T|L|W|R|S|O|M,"Multiple peripheral devices selected"},
+ {0x08,0x00,D|T|L|W|R|S|O|M|C,"Logical unit communication failure"},
+ {0x08,0x01,D|T|L|W|R|S|O|M|C,"Logical unit communication time-out"},
+ {0x08,0x02,D|T|L|W|R|S|O|M|C,"Logical unit communication parity error"},
+ {0x09,0x00,D|T|W|R|O,"Track following error"},
+ {0x09,0x01,W|R|O,"Tracking servo failure"},
+ {0x09,0x02,W|R|O,"Focus servo failure"},
+ {0x09,0x03,W|R|O,"Spindle servo failure"},
+ {0x0A,0x00,D|T|L|P|W|R|S|O|M|C,"Error log overflow"},
+ {0x0C,0x00,T|S,"Write error"},
+ {0x0C,0x01,D|W|O,"Write error recovered with auto reallocation"},
+ {0x0C,0x02,D|W|O,"Write error - auto reallocation failed"},
+ {0x10,0x00,D|W|O,"Id crc or ecc error"},
+ {0x11,0x00,D|T|W|R|S|O,"Unrecovered read error"},
+ {0x11,0x01,D|T|W|S|O,"Read retries exhausted"},
+ {0x11,0x02,D|T|W|S|O,"Error too long to correct"},
+ {0x11,0x03,D|T|W|S|O,"Multiple read errors"},
+ {0x11,0x04,D|W|O,"Unrecovered read error - auto reallocate failed"},
+ {0x11,0x05,W|R|O,"L-ec uncorrectable error"},
+ {0x11,0x06,W|R|O,"Circ unrecovered error"},
+ {0x11,0x07,W|O,"Data resynchronization error"},
+ {0x11,0x08,T,"Incomplete block read"},
+ {0x11,0x09,T,"No gap found"},
+ {0x11,0x0A,D|T|O,"Miscorrected error"},
+ {0x11,0x0B,D|W|O,"Unrecovered read error - recommend reassignment"},
+ {0x11,0x0C,D|W|O,"Unrecovered read error - recommend rewrite the data"},
+ {0x12,0x00,D|W|O,"Address mark not found for id field"},
+ {0x13,0x00,D|W|O,"Address mark not found for data field"},
+ {0x14,0x00,D|T|L|W|R|S|O,"Recorded entity not found"},
+ {0x14,0x01,D|T|W|R|O,"Record not found"},
+ {0x14,0x02,T,"Filemark or setmark not found"},
+ {0x14,0x03,T,"End-of-data not found"},
+ {0x14,0x04,T,"Block sequence error"},
+ {0x15,0x00,D|T|L|W|R|S|O|M,"Random positioning error"},
+ {0x15,0x01,D|T|L|W|R|S|O|M,"Mechanical positioning error"},
+ {0x15,0x02,D|T|W|R|O,"Positioning error detected by read of medium"},
+ {0x16,0x00,D|W|O,"Data synchronization mark error"},
+ {0x17,0x00,D|T|W|R|S|O,"Recovered data with no error correction applied"},
+ {0x17,0x01,D|T|W|R|S|O,"Recovered data with retries"},
+ {0x17,0x02,D|T|W|R|O,"Recovered data with positive head offset"},
+ {0x17,0x03,D|T|W|R|O,"Recovered data with negative head offset"},
+ {0x17,0x04,W|R|O,"Recovered data with retries and/or circ applied"},
+ {0x17,0x05,D|W|R|O,"Recovered data using previous sector id"},
+ {0x17,0x06,D|W|O,"Recovered data without ecc - data auto-reallocated"},
+ {0x17,0x07,D|W|O,"Recovered data without ecc - recommend reassignment"},
+ {0x18,0x00,D|T|W|R|O,"Recovered data with error correction applied"},
+ {0x18,0x01,D|W|R|O,"Recovered data with error correction and retries applied"},
+ {0x18,0x02,D|W|R|O,"Recovered data - data auto-reallocated"},
+ {0x18,0x03,R,"Recovered data with circ"},
+ {0x18,0x04,R,"Recovered data with lec"},
+ {0x18,0x05,D|W|R|O,"Recovered data - recommend reassignment"},
+ {0x19,0x00,D|O,"Defect list error"},
+ {0x19,0x01,D|O,"Defect list not available"},
+ {0x19,0x02,D|O,"Defect list error in primary list"},
+ {0x19,0x03,D|O,"Defect list error in grown list"},
+ {0x1A,0x00,D|T|L|P|W|R|S|O|M|C,"Parameter list length error"},
+ {0x1B,0x00,D|T|L|P|W|R|S|O|M|C,"Synchronous data transfer error"},
+ {0x1C,0x00,D|O,"Defect list not found"},
+ {0x1C,0x01,D|O,"Primary defect list not found"},
+ {0x1C,0x02,D|O,"Grown defect list not found"},
+ {0x1D,0x00,D|W|O,"Miscompare during verify operation"},
+ {0x1E,0x00,D|W|O,"Recovered id with ecc correction"},
+ {0x20,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid command operation code"},
+ {0x21,0x00,D|T|W|R|O|M,"Logical block address out of range"},
+ {0x21,0x01,M,"Invalid element address"},
+ {0x22,0x00,D,"Illegal function (should use 20 00, 24 00, or 26 00)"},
+ {0x24,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid field in cdb"},
+ {0x25,0x00,D|T|L|P|W|R|S|O|M|C,"Logical unit not supported"},
+ {0x26,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid field in parameter list"},
+ {0x26,0x01,D|T|L|P|W|R|S|O|M|C,"Parameter not supported"},
+ {0x26,0x02,D|T|L|P|W|R|S|O|M|C,"Parameter value invalid"},
+ {0x26,0x03,D|T|L|P|W|R|S|O|M|C,"Threshold parameters not supported"},
+ {0x27,0x00,D|T|W|O,"Write protected"},
+ {0x28,0x00,D|T|L|P|W|R|S|O|M|C,"Not ready to ready transition (medium may have changed)"},
+ {0x28,0x01,M,"Import or export element accessed"},
+ {0x29,0x00,D|T|L|P|W|R|S|O|M|C,"Power on, reset, or bus device reset occurred"},
+ {0x2A,0x00,D|T|L|W|R|S|O|M|C,"Parameters changed"},
+ {0x2A,0x01,D|T|L|W|R|S|O|M|C,"Mode parameters changed"},
+ {0x2A,0x02,D|T|L|W|R|S|O|M|C,"Log parameters changed"},
+ {0x2B,0x00,D|T|L|P|W|R|S|O|C,"Copy cannot execute since host cannot disconnect"},
+ {0x2C,0x00,D|T|L|P|W|R|S|O|M|C,"Command sequence error"},
+ {0x2C,0x01,S,"Too many windows specified"},
+ {0x2C,0x02,S,"Invalid combination of windows specified"},
+ {0x2D,0x00,T,"Overwrite error on update in place"},
+ {0x2F,0x00,D|T|L|P|W|R|S|O|M|C,"Commands cleared by another initiator"},
+ {0x30,0x00,D|T|W|R|O|M,"Incompatible medium installed"},
+ {0x30,0x01,D|T|W|R|O,"Cannot read medium - unknown format"},
+ {0x30,0x02,D|T|W|R|O,"Cannot read medium - incompatible format"},
+ {0x30,0x03,D|T,"Cleaning cartridge installed"},
+ {0x31,0x00,D|T|W|O,"Medium format corrupted"},
+ {0x31,0x01,D|L|O,"Format command failed"},
+ {0x32,0x00,D|W|O,"No defect spare location available"},
+ {0x32,0x01,D|W|O,"Defect list update failure"},
+ {0x33,0x00,T,"Tape length error"},
+ {0x36,0x00,L,"Ribbon, ink, or toner failure"},
+ {0x37,0x00,D|T|L|W|R|S|O|M|C,"Rounded parameter"},
+ {0x39,0x00,D|T|L|W|R|S|O|M|C,"Saving parameters not supported"},
+ {0x3A,0x00,D|T|L|W|R|S|O|M,"Medium not present"},
+ {0x3B,0x00,T|L,"Sequential positioning error"},
+ {0x3B,0x01,T,"Tape position error at beginning-of-medium"},
+ {0x3B,0x02,T,"Tape position error at end-of-medium"},
+ {0x3B,0x03,L,"Tape or electronic vertical forms unit not ready"},
+ {0x3B,0x04,L,"Slew failure"},
+ {0x3B,0x05,L,"Paper jam"},
+ {0x3B,0x06,L,"Failed to sense top-of-form"},
+ {0x3B,0x07,L,"Failed to sense bottom-of-form"},
+ {0x3B,0x08,T,"Reposition error"},
+ {0x3B,0x09,S,"Read past end of medium"},
+ {0x3B,0x0A,S,"Read past beginning of medium"},
+ {0x3B,0x0B,S,"Position past end of medium"},
+ {0x3B,0x0C,S,"Position past beginning of medium"},
+ {0x3B,0x0D,M,"Medium destination element full"},
+ {0x3B,0x0E,M,"Medium source element empty"},
+ {0x3D,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid bits in identify message"},
+ {0x3E,0x00,D|T|L|P|W|R|S|O|M|C,"Logical unit has not self-configured yet"},
+ {0x3F,0x00,D|T|L|P|W|R|S|O|M|C,"Target operating conditions have changed"},
+ {0x3F,0x01,D|T|L|P|W|R|S|O|M|C,"Microcode has been changed"},
+ {0x3F,0x02,D|T|L|P|W|R|S|O|M|C,"Changed operating definition"},
+ {0x3F,0x03,D|T|L|P|W|R|S|O|M|C,"Inquiry data has changed"},
+ {0x43,0x00,D|T|L|P|W|R|S|O|M|C,"Message error"},
+ {0x44,0x00,D|T|L|P|W|R|S|O|M|C,"Internal target failure"},
+ {0x45,0x00,D|T|L|P|W|R|S|O|M|C,"Select or reselect failure"},
+ {0x46,0x00,D|T|L|P|W|R|S|O|M|C,"Unsuccessful soft reset"},
+ {0x47,0x00,D|T|L|P|W|R|S|O|M|C,"Scsi parity error"},
+ {0x48,0x00,D|T|L|P|W|R|S|O|M|C,"Initiator detected error message received"},
+ {0x49,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid message error"},
+ {0x4A,0x00,D|T|L|P|W|R|S|O|M|C,"Command phase error"},
+ {0x4B,0x00,D|T|L|P|W|R|S|O|M|C,"Data phase error"},
+ {0x4C,0x00,D|T|L|P|W|R|S|O|M|C,"Logical unit failed self-configuration"},
+ {0x4E,0x00,D|T|L|P|W|R|S|O|M|C,"Overlapped commands attempted"},
+ {0x50,0x00,T,"Write append error"},
+ {0x50,0x01,T,"Write append position error"},
+ {0x50,0x02,T,"Position error related to timing"},
+ {0x51,0x00,T|O,"Erase failure"},
+ {0x52,0x00,T,"Cartridge fault"},
+ {0x53,0x00,D|T|L|W|R|S|O|M,"Media load or eject failed"},
+ {0x53,0x01,T,"Unload tape failure"},
+ {0x53,0x02,D|T|W|R|O|M,"Medium removal prevented"},
+ {0x54,0x00,P,"Scsi to host system interface failure"},
+ {0x55,0x00,P,"System resource failure"},
+ {0x57,0x00,R,"Unable to recover table-of-contents"},
+ {0x58,0x00,O,"Generation does not exist"},
+ {0x59,0x00,O,"Updated block read"},
+ {0x5A,0x00,D|T|L|P|W|R|S|O|M,"Operator request or state change input (unspecified)"},
+ {0x5A,0x01,D|T|W|R|O|M,"Operator medium removal request"},
+ {0x5A,0x02,D|T|W|O,"Operator selected write protect"},
+ {0x5A,0x03,D|T|W|O,"Operator selected write permit"},
+ {0x5B,0x00,D|T|L|P|W|R|S|O|M,"Log exception"},
+ {0x5B,0x01,D|T|L|P|W|R|S|O|M,"Threshold condition met"},
+ {0x5B,0x02,D|T|L|P|W|R|S|O|M,"Log counter at maximum"},
+ {0x5B,0x03,D|T|L|P|W|R|S|O|M,"Log list codes exhausted"},
+ {0x5C,0x00,D|O,"Rpl status change"},
+ {0x5C,0x01,D|O,"Spindles synchronized"},
+ {0x5C,0x02,D|O,"Spindles not synchronized"},
+ {0x60,0x00,S,"Lamp failure"},
+ {0x61,0x00,S,"Video acquisition error"},
+ {0x61,0x01,S,"Unable to acquire video"},
+ {0x61,0x02,S,"Out of focus"},
+ {0x62,0x00,S,"Scan head positioning error"},
+ {0x63,0x00,R,"End of user area encountered on this track"},
+ {0x64,0x00,R,"Illegal mode for this track"},
+ {0, 0, 0, NULL}
+};
+#endif
+
+#if (CONSTANTS & CONST_SENSE)
+static const char *snstext[] = {
+ "None", /* There is no sense information */
+ "Recovered Error", /* The last command completed successfully
+ but used error correction */
+ "Not Ready", /* The addressed target is not ready */
+ "Medium Error", /* Data error detected on the medium */
+ "Hardware Error", /* Controller or device failure */
+ "Illegal Request",
+ "Unit Attention", /* Removable medium was changed, or
+ the target has been reset */
+ "Data Protect", /* Access to the data is blocked */
+ "Blank Check", /* Reached unexpected written or unwritten
+ region of the medium */
+ "Key=9", /* Vendor specific */
+ "Copy Aborted", /* COPY or COMPARE was aborted */
+ "Aborted Command", /* The target aborted the command */
+ "Equal", /* A SEARCH DATA command found data equal */
+ "Volume Overflow", /* Medium full with still data to be written */
+ "Miscompare", /* Source data and data on the medium
+ do not agree */
+ "Key=15" /* Reserved */
+};
+#endif
+
+/* Print sense information */
+void print_sense(const char * devclass, Scsi_Cmnd * SCpnt)
+{
+ int i, s;
+ int sense_class, valid, code;
+ unsigned char * sense_buffer = SCpnt->sense_buffer;
+ const char * error = NULL;
+
+ sense_class = (sense_buffer[0] >> 4) & 0x07;
+ code = sense_buffer[0] & 0xf;
+ valid = sense_buffer[0] & 0x80;
+
+ if (sense_class == 7) { /* extended sense data */
+ s = sense_buffer[7] + 8;
+ if(s > sizeof(SCpnt->sense_buffer))
+ s = sizeof(SCpnt->sense_buffer);
+
+ if (!valid)
+ printk("extra data not valid ");
+
+ if (sense_buffer[2] & 0x80)
+ printk( "FMK "); /* current command has read a filemark */
+ if (sense_buffer[2] & 0x40)
+ printk( "EOM "); /* end-of-medium condition exists */
+ if (sense_buffer[2] & 0x20)
+ printk( "ILI "); /* incorrect block length requested */
+
+ switch (code) {
+ case 0x0:
+ error = "Current"; /* error concerns current command */
+ break;
+ case 0x1:
+ error = "Deferred"; /* error concerns some earlier command */
+ /* e.g., an earlier write to disk cache succeeded, but
+ now the disk discovers that it cannot write the data */
+ break;
+ default:
+ error = "Invalid";
+ }
+
+ printk("%s error ", error);
+
+#if (CONSTANTS & CONST_SENSE)
+ printk( "%s%s: sense key %s\n", devclass,
+ kdevname(SCpnt->request.rq_dev), snstext[sense_buffer[2] & 0x0f]);
+#else
+ printk("%s%s: sns = %2x %2x\n", devclass,
+ kdevname(SCpnt->request.rq_dev), sense_buffer[0], sense_buffer[2]);
+#endif
+
+ /* Check to see if additional sense information is available */
+ if(sense_buffer[7] + 7 < 13 ||
+ (sense_buffer[12] == 0 && sense_buffer[13] == 0)) goto done;
+
+#if (CONSTANTS & CONST_XSENSE)
+ for(i=0; additional[i].text; i++)
+ if(additional[i].code1 == sense_buffer[12] &&
+ additional[i].code2 == sense_buffer[13])
+ printk("Additional sense indicates %s\n", additional[i].text);
+
+ for(i=0; additional2[i].text; i++)
+ if(additional2[i].code1 == sense_buffer[12] &&
+ additional2[i].code2_min >= sense_buffer[13] &&
+ additional2[i].code2_max <= sense_buffer[13]) {
+ printk("Additional sense indicates ");
+ printk(additional2[i].text, sense_buffer[13]);
+ printk("\n");
+ };
+#else
+ printk("ASC=%2x ASCQ=%2x\n", sense_buffer[12], sense_buffer[13]);
+#endif
+ } else { /* non-extended sense data */
+
+ /*
+ * Standard says:
+ * sense_buffer[0] & 0200 : address valid
+ * sense_buffer[0] & 0177 : vendor-specific error code
+ * sense_buffer[1] & 0340 : vendor-specific
+ * sense_buffer[1..3] : 21-bit logical block address
+ */
+
+#if (CONSTANTS & CONST_SENSE)
+ if (sense_buffer[0] < 15)
+ printk("%s%s: old sense key %s\n", devclass,
+ kdevname(SCpnt->request.rq_dev), snstext[sense_buffer[0] & 0x0f]);
+ else
+#endif
+ printk("%s%s: sns = %2x %2x\n", devclass,
+ kdevname(SCpnt->request.rq_dev), sense_buffer[0], sense_buffer[2]);
+
+ printk("Non-extended sense class %d code 0x%0x ", sense_class, code);
+ s = 4;
+ }
+
+ done:
+#if !(CONSTANTS & CONST_SENSE)
+ printk("Raw sense data:");
+ for (i = 0; i < s; ++i)
+ printk("0x%02x ", sense_buffer[i]);
+ printk("\n");
+#endif
+ return;
+}
+
+#if (CONSTANTS & CONST_MSG)
+static const char *one_byte_msgs[] = {
+/* 0x00 */ "Command Complete", NULL, "Save Pointers",
+/* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error",
+/* 0x06 */ "Abort", "Message Reject", "Nop", "Message Parity Error",
+/* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag",
+/* 0x0c */ "Bus device reset", "Abort Tag", "Clear Queue",
+/* 0x0f */ "Initiate Recovery", "Release Recovery"
+};
+
+#define NO_ONE_BYTE_MSGS (sizeof(one_byte_msgs) / sizeof (const char *))
+
+static const char *two_byte_msgs[] = {
+/* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag"
+/* 0x23 */ "Ignore Wide Residue"
+};
+
+#define NO_TWO_BYTE_MSGS (sizeof(two_byte_msgs) / sizeof (const char *))
+
+static const char *extended_msgs[] = {
+/* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request",
+/* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request"
+};
+
+#define NO_EXTENDED_MSGS (sizeof(two_byte_msgs) / sizeof (const char *))
+#endif /* (CONSTANTS & CONST_MSG) */
+
+int print_msg (const unsigned char *msg) {
+ int len = 0, i;
+ if (msg[0] == EXTENDED_MESSAGE) {
+ len = 3 + msg[1];
+#if (CONSTANTS & CONST_MSG)
+ if (msg[2] < NO_EXTENDED_MSGS)
+ printk ("%s ", extended_msgs[msg[2]]);
+ else
+ printk ("Extended Message, reserved code (0x%02x) ", (int) msg[2]);
+ switch (msg[2]) {
+ case EXTENDED_MODIFY_DATA_POINTER:
+ printk("pointer = %d", (int) (msg[3] << 24) | (msg[4] << 16) |
+ (msg[5] << 8) | msg[6]);
+ break;
+ case EXTENDED_SDTR:
+ printk("period = %d ns, offset = %d", (int) msg[3] * 4, (int)
+ msg[4]);
+ break;
+ case EXTENDED_WDTR:
+ printk("width = 2^%d bytes", msg[3]);
+ break;
+ default:
+ for (i = 2; i < len; ++i)
+ printk("%02x ", msg[i]);
+ }
+#else
+ for (i = 0; i < len; ++i)
+ printk("%02x ", msg[i]);
+#endif
+ /* Identify */
+ } else if (msg[0] & 0x80) {
+#if (CONSTANTS & CONST_MSG)
+ printk("Identify disconnect %sallowed %s %d ",
+ (msg[0] & 0x40) ? "" : "not ",
+ (msg[0] & 0x20) ? "target routine" : "lun",
+ msg[0] & 0x7);
+#else
+ printk("%02x ", msg[0]);
+#endif
+ len = 1;
+ /* Normal One byte */
+ } else if (msg[0] < 0x1f) {
+#if (CONSTANTS & CONST_MSG)
+ if (msg[0] < NO_ONE_BYTE_MSGS)
+ printk(one_byte_msgs[msg[0]]);
+ else
+ printk("reserved (%02x) ", msg[0]);
+#else
+ printk("%02x ", msg[0]);
+#endif
+ len = 1;
+ /* Two byte */
+ } else if (msg[0] <= 0x2f) {
+#if (CONSTANTS & CONST_MSG)
+ if ((msg[0] - 0x20) < NO_TWO_BYTE_MSGS)
+ printk("%s %02x ", two_byte_msgs[msg[0] - 0x20],
+ msg[1]);
+ else
+ printk("reserved two byte (%02x %02x) ",
+ msg[0], msg[1]);
+#else
+ printk("%02x %02x", msg[0], msg[1]);
+#endif
+ len = 2;
+ } else
+#if (CONSTANTS & CONST_MSG)
+ printk(reserved);
+#else
+ printk("%02x ", msg[0]);
+#endif
+ return len;
+}
+
+void print_Scsi_Cmnd (Scsi_Cmnd *cmd) {
+ printk("scsi%d : destination target %d, lun %d\n",
+ cmd->host->host_no,
+ cmd->target,
+ cmd->lun);
+ printk(" command = ");
+ print_command (cmd->cmnd);
+}
+
+#if (CONSTANTS & CONST_HOST)
+static const char * hostbyte_table[]={
+"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
+"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",NULL};
+
+void print_hostbyte(int scsiresult)
+{ static int maxcode=0;
+ int i;
+
+ if(!maxcode) {
+ for(i=0;hostbyte_table[i];i++) ;
+ maxcode=i-1;
+ }
+ printk("Hostbyte=0x%02x",host_byte(scsiresult));
+ if(host_byte(scsiresult)>maxcode) {
+ printk("is invalid ");
+ return;
+ }
+ printk("(%s) ",hostbyte_table[host_byte(scsiresult)]);
+}
+#else
+void print_hostbyte(int scsiresult)
+{ printk("Hostbyte=0x%02x ",host_byte(scsiresult));
+}
+#endif
+
+#if (CONSTANTS & CONST_DRIVER)
+static const char * driverbyte_table[]={
+"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
+"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD",NULL };
+
+static const char * driversuggest_table[]={"SUGGEST_OK",
+"SUGGEST_RETRY", "SUGGEST_ABORT", "SUGGEST_REMAP", "SUGGEST_DIE",
+unknown,unknown,unknown, "SUGGEST_SENSE",NULL};
+
+
+void print_driverbyte(int scsiresult)
+{ static int driver_max=0,suggest_max=0;
+ int i,dr=driver_byte(scsiresult)&DRIVER_MASK,
+ su=(driver_byte(scsiresult)&SUGGEST_MASK)>>4;
+
+ if(!driver_max) {
+ for(i=0;driverbyte_table[i];i++) ;
+ driver_max=i;
+ for(i=0;driversuggest_table[i];i++) ;
+ suggest_max=i;
+ }
+ printk("Driverbyte=0x%02x",driver_byte(scsiresult));
+ printk("(%s,%s) ",
+ dr<driver_max ? driverbyte_table[dr]:"invalid",
+ su<suggest_max ? driversuggest_table[su]:"invalid");
+}
+#else
+void print_driverbyte(int scsiresult)
+{ printk("Driverbyte=0x%02x ",driver_byte(scsiresult));
+}
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/constants.h b/linux/src/drivers/scsi/constants.h
new file mode 100644
index 0000000..e10527e
--- /dev/null
+++ b/linux/src/drivers/scsi/constants.h
@@ -0,0 +1,6 @@
+#ifndef _CONSTANTS_H
+#define _CONSTANTS_H
+extern int print_msg(unsigned char *);
+extern void print_status(int);
+extern void print_Scsi_Cmnd (Scsi_Cmnd *);
+#endif /* def _CONSTANTS_H */
diff --git a/linux/src/drivers/scsi/dc390.h b/linux/src/drivers/scsi/dc390.h
new file mode 100644
index 0000000..18c7e03
--- /dev/null
+++ b/linux/src/drivers/scsi/dc390.h
@@ -0,0 +1,147 @@
+/***********************************************************************
+ * FILE NAME : DC390.H *
+ * BY : C.L. Huang *
+ * Description: Device Driver for Tekram DC-390(T) PCI SCSI *
+ * Bus Master Host Adapter *
+ ***********************************************************************/
+
+/* Kernel version autodetection */
+
+#include <linux/version.h>
+/* Convert Linux Version, Patch-level, Sub-level to LINUX_VERSION_CODE. */
+#define ASC_LINUX_VERSION(V, P, S) (((V) * 65536) + ((P) * 256) + (S))
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,50)
+#define VERSION_ELF_1_2_13
+#elseif LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,95)
+#define VERSION_1_3_85
+#else
+#define VERSION_2_0_0
+#endif
+
+/*
+ * AMD 53C974 driver, header file
+ */
+
+#ifndef DC390_H
+#define DC390_H
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#ifdef VERSION_2_0_0
+#include <scsi/scsicam.h>
+#else
+#include <linux/scsicam.h>
+#endif
+
+extern int DC390_detect(Scsi_Host_Template *psht);
+extern int DC390_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
+extern int DC390_abort(Scsi_Cmnd *cmd);
+
+#ifdef VERSION_2_0_0
+extern int DC390_reset(Scsi_Cmnd *cmd, unsigned int resetFlags);
+#else
+extern int DC390_reset(Scsi_Cmnd *cmd);
+#endif
+
+#ifdef VERSION_ELF_1_2_13
+extern int DC390_bios_param(Disk *disk, int devno, int geom[]);
+#else
+extern int DC390_bios_param(Disk *disk, kdev_t devno, int geom[]);
+#endif
+
+#ifdef MODULE
+static int DC390_release(struct Scsi_Host *);
+#else
+#define DC390_release NULL
+#endif
+
+#ifndef VERSION_ELF_1_2_13
+extern struct proc_dir_entry proc_scsi_tmscsim;
+extern int tmscsim_proc_info(char *buffer, char **start, off_t offset, int length, int hostno, int inout);
+#endif
+
+#ifdef VERSION_2_0_0
+
+#define DC390_T { \
+ NULL, /* *next */ \
+ NULL, /* *usage_count */ \
+ &proc_scsi_tmscsim, /* *proc_dir */ \
+ tmscsim_proc_info, /* (*proc_info)() */ \
+ "Tekram DC390(T) V1.11 Feb-05-1997", /* *name */ \
+ DC390_detect, \
+ DC390_release, /* (*release)() */ \
+ NULL, /* *(*info)() */ \
+ NULL, /* (*command)() */ \
+ DC390_queue_command, \
+ DC390_abort, \
+ DC390_reset, \
+ NULL, /* slave attach */\
+ DC390_bios_param, \
+ 10,/* can queue(-1) */ \
+ 7, /* id(-1) */ \
+ SG_ALL, \
+ 2, /* cmd per lun(2) */ \
+ 0, /* present */ \
+ 0, /* unchecked isa dma */ \
+ DISABLE_CLUSTERING \
+ }
+#endif
+
+
+#ifdef VERSION_1_3_85
+
+#define DC390_T { \
+ NULL, /* *next */ \
+ NULL, /* *usage_count */ \
+ &proc_scsi_tmscsim, /* *proc_dir */ \
+ tmscsim_proc_info, /* (*proc_info)() */ \
+ "Tekram DC390(T) V1.11 Feb-05-1997", /* *name */ \
+ DC390_detect, \
+ DC390_release, /* (*release)() */ \
+ NULL, /* *(*info)() */ \
+ NULL, /* (*command)() */ \
+ DC390_queue_command, \
+ DC390_abort, \
+ DC390_reset, \
+ NULL, /* slave attach */\
+ DC390_bios_param, \
+ 10,/* can queue(-1) */ \
+ 7, /* id(-1) */ \
+ SG_ALL, \
+ 2, /* cmd per lun(2) */ \
+ 0, /* present */ \
+ 0, /* unchecked isa dma */ \
+ DISABLE_CLUSTERING \
+ }
+#endif
+
+
+#ifdef VERSION_ELF_1_2_13
+
+#define DC390_T { \
+ NULL, \
+ NULL, \
+ "Tekram DC390(T) V1.11 Feb-05-1997",\
+ DC390_detect, \
+ DC390_release, \
+ NULL, /* info */ \
+ NULL, /* command, deprecated */ \
+ DC390_queue_command, \
+ DC390_abort, \
+ DC390_reset, \
+ NULL, /* slave attach */\
+ DC390_bios_param, \
+ 10,/* can queue(-1) */ \
+ 7, /* id(-1) */ \
+ 16,/* old (SG_ALL) */ \
+ 2, /* cmd per lun(2) */ \
+ 0, /* present */ \
+ 0, /* unchecked isa dma */ \
+ DISABLE_CLUSTERING \
+ }
+#endif
+
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
+#endif /* DC390_H */
diff --git a/linux/src/drivers/scsi/dtc.c b/linux/src/drivers/scsi/dtc.c
new file mode 100644
index 0000000..94c3e33
--- /dev/null
+++ b/linux/src/drivers/scsi/dtc.c
@@ -0,0 +1,400 @@
+
+#define AUTOSENSE
+#define PSEUDO_DMA
+#define DONT_USE_INTR
+#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */
+#define xNDEBUG (NDEBUG_INTR+NDEBUG_RESELECTION+\
+ NDEBUG_SELECTION+NDEBUG_ARBITRATION)
+#define DMA_WORKS_RIGHT
+
+
+/*
+ * DTC 3180/3280 driver, by
+ * Ray Van Tassle rayvt@comm.mot.com
+ *
+ * taken from ...
+ * Trantor T128/T128F/T228 driver by...
+ *
+ * Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * DISTRIBUTION RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+*/
+
+/*
+ * Options :
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
+ * increase compared to polled I/O.
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers.
+ * You probably want this.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - since the board is memory mapped,
+ * a BIOS signature is scanned for to locate the registers.
+ * An interrupt is triggered to autoprobe for the interrupt
+ * line.
+ *
+ * 2. With command line overrides - dtc=address,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+*/
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+#if 0
+#define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+
+
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/blk.h>
+#include <asm/io.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "dtc.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+#include<linux/stat.h>
+#include<linux/string.h>
+
+struct proc_dir_entry proc_scsi_dtc = {
+ PROC_SCSI_T128, 7, "dtc3x80",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+ };
+
+
+static struct override {
+ __u32 address;
+ int irq;
+} overrides
+#ifdef OVERRIDE
+[] = OVERRIDE;
+#else
+[4] = {{0, IRQ_AUTO}, {0, IRQ_AUTO}, {0, IRQ_AUTO},
+ {0, IRQ_AUTO}};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+static struct base {
+ __u32 address;
+ int noauto;
+} bases[] = {{0xcc000, 0}, {0xc8000, 0},
+{0xdc000, 0}, {0xd8000, 0}};
+
+#define NO_BASES (sizeof (bases) / sizeof (struct base))
+
+static const struct signature {
+ const char *string;
+ int offset;
+} signatures[] = { {"DATA TECHNOLOGY CORPORATION BIOS", 0x25}, };
+
+#define NO_SIGNATURES (sizeof (signatures) / sizeof (struct signature))
+
+/*
+ * Function : dtc_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+*/
+
+void dtc_setup(char *str, int *ints) {
+ static int commandline_current = 0;
+ int i;
+ if (ints[0] != 2)
+ printk("dtc_setup: usage dtc=address,irq\n");
+ else
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].address = ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].address == ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : int dtc_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : detects and initializes DTC 3180/3280 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+*/
+
+
+int dtc_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0, current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned char *base;
+ int sig, count;
+
+ tpnt->proc_dir = &proc_scsi_dtc;
+ tpnt->proc_info = &dtc_proc_info;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ base = NULL;
+
+ if (overrides[current_override].address)
+ base = (unsigned char *)overrides[current_override].address;
+ else
+ for (; !base && (current_base < NO_BASES); ++current_base) {
+#if (DTCDEBUG & DTCDEBUG_INIT)
+ printk("scsi : probing address %08x\n", (unsigned int) bases[current_base].address);
+#endif
+ for (sig = 0; sig < NO_SIGNATURES; ++sig)
+ if (!bases[current_base].noauto && !memcmp
+ ((unsigned char *)(bases[current_base].address + signatures[sig].offset),
+ signatures[sig].string, strlen(signatures[sig].string))) {
+ base = (unsigned char *)bases[current_base].address;
+#if (DTCDEBUG & DTCDEBUG_INIT)
+ printk("scsi-dtc : detected board.\n");
+#endif
+ break;
+ }
+ }
+
+#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT)
+ printk("scsi-dtc : base = %08x\n", (unsigned int) base);
+#endif
+
+ if (!base)
+ break;
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->base = base;
+
+ NCR5380_init(instance, 0);
+
+ NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR); /* Enable int's */
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, DTC_IRQS);
+
+#ifndef DONT_USE_INTR
+/* With interrupts enabled, it will sometimes hang when doing heavy
+ * reads. So better not enable them until I finger it out. */
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, dtc_intr, SA_INTERRUPT, "dtc")) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+#else
+ if (instance->irq != IRQ_NONE)
+ printk("scsi%d : interrupts not used. Might as well not jumper it.\n",
+ instance->host_no);
+ instance->irq = IRQ_NONE;
+#endif
+#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ printk("scsi%d : at 0x%05X", instance->host_no, (int)instance->base);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, DTC_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : int dtc_biosparam(Disk * disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+*/
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+*/
+
+int dtc_biosparam(Disk * disk, kdev_t dev, int * ip)
+{
+ int size = disk->capacity;
+
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+
+/****************************************************************
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, reads len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+*/
+
+static int dtc_maxi = 0;
+static int dtc_wmaxi = 0;
+
+static inline int NCR5380_pread (struct Scsi_Host *instance,
+ unsigned char *dst, int len)
+ {
+ unsigned char *d = dst;
+ int i; /* For counting time spent in the poll-loop */
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ i = 0;
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE);
+ if (instance->irq == IRQ_NONE)
+ NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ);
+ else
+ NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE);
+ NCR5380_write(DTC_BLK_CNT, len >> 7); /* Block count */
+ rtrc(1);
+ while (len > 0) {
+ rtrc(2);
+ while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
+ ++i;
+ rtrc(3);
+ memcpy(d, (char *)(base + DTC_DATA_BUF), 128);
+ d += 128;
+ len -= 128;
+ rtrc(7); /*** with int's on, it sometimes hangs after here.
+ * Looks like something makes HBNR go away. */
+ }
+ rtrc(4);
+ while ( !(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
+ ++i;
+ NCR5380_write(MODE_REG, 0); /* Clear the operating mode */
+ rtrc(0);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ if (i > dtc_maxi)
+ dtc_maxi = i;
+ return(0);
+}
+
+/****************************************************************
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+*/
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance,
+ unsigned char *src, int len) {
+ int i;
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE);
+ /* set direction (write) */
+ if (instance->irq == IRQ_NONE)
+ NCR5380_write(DTC_CONTROL_REG, 0);
+ else
+ NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR);
+ NCR5380_write(DTC_BLK_CNT, len >> 7); /* Block count */
+ for (i = 0; len > 0; ++i) {
+ rtrc(5);
+ /* Poll until the host buffer can accept data. */
+ while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
+ ++i;
+ rtrc(3);
+ memcpy((char *)(base + DTC_DATA_BUF), src, 128);
+ src += 128;
+ len -= 128;
+ }
+ rtrc(4);
+ while ( !(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
+ ++i;
+ rtrc(6);
+ /* Wait until the last byte has been sent to the disk */
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
+ ++i;
+ rtrc(7);
+ /* Check for parity error here. fixme. */
+ NCR5380_write(MODE_REG, 0); /* Clear the operating mode */
+ rtrc(0);
+ if (i > dtc_wmaxi)
+ dtc_wmaxi = i;
+ return (0);
+}
+
+#include "NCR5380.c"
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = DTC3x80;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/dtc.h b/linux/src/drivers/scsi/dtc.h
new file mode 100644
index 0000000..4c41237
--- /dev/null
+++ b/linux/src/drivers/scsi/dtc.h
@@ -0,0 +1,169 @@
+/*
+ * DTC controller, taken from T128 driver by...
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * DISTRIBUTION RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ *
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+#ifndef DTC3280_H
+#define DTC3280_H
+
+#define DTC_PUBLIC_RELEASE 1
+
+/*#define DTCDEBUG 0x1*/
+#define DTCDEBUG_INIT 0x1
+#define DTCDEBUG_TRANSFER 0x2
+
+/*
+ * The DTC3180 & 3280 boards are memory mapped.
+ *
+ */
+
+/*
+ */
+/* Offset from DTC_5380_OFFSET */
+#define DTC_CONTROL_REG 0x100 /* rw */
+#define D_CR_ACCESS 0x80 /* ro set=can access 3280 registers */
+#define CSR_DIR_READ 0x40 /* rw direction, 1 = read 0 = write */
+
+#define CSR_RESET 0x80 /* wo Resets 53c400 */
+#define CSR_5380_REG 0x80 /* ro 5380 registers can be accessed */
+#define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */
+#define CSR_SCSI_BUFF_INTR 0x20 /* rw Enable int on transfer ready */
+#define CSR_5380_INTR 0x10 /* rw Enable 5380 interrupts */
+#define CSR_SHARED_INTR 0x08 /* rw Interrupt sharing */
+#define CSR_HOST_BUF_NOT_RDY 0x04 /* ro Host buffer not ready */
+#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer ready */
+#define CSR_GATED_5380_IRQ 0x01 /* ro Last block xferred */
+#define CSR_INT_BASE (CSR_SCSI_BUFF_INTR | CSR_5380_INTR)
+
+
+#define DTC_BLK_CNT 0x101 /* rw
+ * # of 128-byte blocks to transfer */
+
+
+#define D_CR_ACCESS 0x80 /* ro set=can access 3280 registers */
+
+#define DTC_SWITCH_REG 0x3982 /* ro - DIP switches */
+#define DTC_RESUME_XFER 0x3982 /* wo - resume data xfer
+ * after disconnect/reconnect*/
+
+#define DTC_5380_OFFSET 0x3880 /* 8 registers here, see NCR5380.h */
+
+/*!!!! for dtc, it's a 128 byte buffer at 3900 !!! */
+#define DTC_DATA_BUF 0x3900 /* rw 128 bytes long */
+
+
+#ifndef ASM
+int dtc_abort(Scsi_Cmnd *);
+int dtc_biosparam(Disk *, kdev_t, int*);
+int dtc_detect(Scsi_Host_Template *);
+int dtc_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int dtc_reset(Scsi_Cmnd *, unsigned int reset_flags);
+int dtc_proc_info (char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+/*
+ * I hadn't thought of this with the earlier drivers - but to prevent
+ * macro definition conflicts, we shouldn't define all of the internal
+ * macros when this is being used solely for the host stub.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define DTC3x80 {NULL, NULL, NULL, NULL, \
+ "DTC 3180/3280 ", dtc_detect, NULL, \
+ NULL, \
+ NULL, dtc_queue_command, dtc_abort, dtc_reset, NULL, \
+ dtc_biosparam, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+
+#ifndef HOSTS_C
+
+#define NCR5380_implementation_fields \
+ volatile unsigned char *base
+
+#define NCR5380_local_declare() \
+ volatile unsigned char *base
+
+#define NCR5380_setup(instance) \
+ base = (volatile unsigned char *) (instance)->base
+
+#define DTC_address(reg) (base + DTC_5380_OFFSET + reg)
+
+#define dbNCR5380_read(reg) \
+ (rval=*(DTC_address(reg)), \
+ (((unsigned char) printk("DTC : read register %d at addr %08x is: %02x\n"\
+ , (reg), (int)DTC_address(reg), rval)), rval ) )
+
+#define dbNCR5380_write(reg, value) do { \
+ printk("DTC : write %02x to register %d at address %08x\n", \
+ (value), (reg), (int)DTC_address(reg)); \
+ *(DTC_address(reg)) = (value);} while(0)
+
+
+#if !(DTCDEBUG & DTCDEBUG_TRANSFER)
+#define NCR5380_read(reg) (*(DTC_address(reg)))
+#define NCR5380_write(reg, value) (*(DTC_address(reg)) = (value))
+#else
+#define NCR5380_read(reg) (*(DTC_address(reg)))
+#define xNCR5380_read(reg) \
+ (((unsigned char) printk("DTC : read register %d at address %08x\n"\
+ , (reg), DTC_address(reg))), *(DTC_address(reg)))
+
+#define NCR5380_write(reg, value) do { \
+ printk("DTC : write %02x to register %d at address %08x\n", \
+ (value), (reg), (int)DTC_address(reg)); \
+ *(DTC_address(reg)) = (value); } while(0)
+#endif
+
+#define NCR5380_intr dtc_intr
+#define NCR5380_queue_command dtc_queue_command
+#define NCR5380_abort dtc_abort
+#define NCR5380_reset dtc_reset
+#define NCR5380_proc_info dtc_proc_info
+
+/* 15 12 11 10
+ 1001 1100 0000 0000 */
+
+#define DTC_IRQS 0x9c00
+
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* DTC3280_H */
diff --git a/linux/src/drivers/scsi/eata.c b/linux/src/drivers/scsi/eata.c
new file mode 100644
index 0000000..49f0827
--- /dev/null
+++ b/linux/src/drivers/scsi/eata.c
@@ -0,0 +1,2331 @@
+/*
+ * eata.c - Low-level driver for EATA/DMA SCSI host adapters.
+ *
+ * 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111
+ * + Added command line option (rs:[y|n]) to reverse the scan order
+ * of PCI boards. The default is rs:y, which reverses the BIOS order
+ * while registering PCI boards. The default value rs:y generates
+ * the same order of all previous revisions of this driver.
+ * Pls. note that "BIOS order" might have been reversed itself
+ * after the 2.1.9x PCI modifications in the linux kernel.
+ * The rs value is ignored when the explicit list of addresses
+ * is used by the "eata=port0,port1,..." command line option.
+ * + Added command line option (et:[y|n]) to force use of extended
+ * translation (255 heads, 63 sectors) as disk geometry.
+ * The default is et:n, which uses the disk geometry returned
+ * by scsicam_bios_param. The default value et:n is compatible with
+ * all previous revisions of this driver.
+ *
+ * 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104
+ * Increased busy timeout from 10 msec. to 200 msec. while
+ * processing interrupts.
+ *
+ * 16 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102
+ * Improved abort handling during the eh recovery process.
+ *
+ * 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101
+ * The driver is now fully SMP safe, including the
+ * abort and reset routines.
+ * Added command line options (eh:[y|n]) to choose between
+ * new_eh_code and the old scsi code.
+ * If linux version >= 2.1.101 the default is eh:y, while the eh
+ * option is ignored for previous releases and the old scsi code
+ * is used.
+ *
+ * 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97
+ * Reworked interrupt handler.
+ *
+ * 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95
+ * Major reliability improvement: when a batch with overlapping
+ * requests is detected, requests are queued one at a time
+ * eliminating any possible board or drive reordering.
+ *
+ * 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95
+ * Improved SMP support (if linux version >= 2.1.95).
+ *
+ * 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94
+ * Added support for new PCI code and IO-APIC remapping of irqs.
+ * Performance improvement: when sequential i/o is detected,
+ * always use direct sort instead of reverse sort.
+ *
+ * 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92
+ * io_port is now unsigned long.
+ *
+ * 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88
+ * Use new scsi error handling code (if linux version >= 2.1.88).
+ * Use new interrupt code.
+ *
+ * 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55
+ * Use of udelay inside the wait loops to avoid timeout
+ * problems with fast cpus.
+ * Removed check about useless calls to the interrupt service
+ * routine (reported on SMP systems only).
+ * At initialization time "sorted/unsorted" is displayed instead
+ * of "linked/unlinked" to reinforce the fact that "linking" is
+ * nothing but "elevator sorting" in the actual implementation.
+ *
+ * 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38
+ * Use of serial_number_at_timeout in abort and reset processing.
+ * Use of the __initfunc and __initdata macro in setup code.
+ * Minor cleanups in the list_statistics code.
+ * Increased controller busy timeout in order to better support
+ * slow SCSI devices.
+ *
+ * 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26
+ * When loading as a module, parameter passing is now supported
+ * both in 2.0 and in 2.1 style.
+ * Fixed data transfer direction for some SCSI opcodes.
+ * Immediate acknowledge to request sense commands.
+ * Linked commands to each disk device are now reordered by elevator
+ * sorting. Rare cases in which reordering of write requests could
+ * cause wrong results are managed.
+ * Fixed spurious timeouts caused by long simple queue tag sequences.
+ * New command line option (tm:[0-3]) to choose the type of tags:
+ * 0 -> mixed (default); 1 -> simple; 2 -> head; 3 -> ordered.
+ *
+ * 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28
+ * Added command line options to enable/disable linked commands
+ * (lc:[y|n]), tagged commands (tc:[y|n]) and to set the max queue
+ * depth (mq:xx). Default is "eata=lc:n,tc:n,mq:16".
+ * Improved command linking.
+ * Documented how to setup RAID-0 with DPT SmartRAID boards.
+ *
+ * 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27
+ * Added linked command support.
+ * Improved detection of PCI boards using ISA base addresses.
+ *
+ * 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27
+ * Added support for tagged commands and queue depth adjustment.
+ *
+ * 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26
+ * When CONFIG_PCI is defined, BIOS32 is used to include in the
+ * list of i/o ports to be probed all the PCI SCSI controllers.
+ * The list of i/o ports to be probed can be overwritten by the
+ * "eata=port0,port1,...." boot command line option.
+ * Scatter/gather lists are now allocated by a number of kmalloc
+ * calls, in order to avoid the previous size limit of 64Kb.
+ *
+ * 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25
+ * Added support for EATA 2.0C, PCI, multichannel and wide SCSI.
+ *
+ * 27 Sep 1996 rev. 2.12 for linux 2.1.0
+ * Portability cleanups (virtual/bus addressing, little/big endian
+ * support).
+ *
+ * 09 Jul 1996 rev. 2.11 for linux 2.0.4
+ * Number of internal retries is now limited.
+ *
+ * 16 Apr 1996 rev. 2.10 for linux 1.3.90
+ * New argument "reset_flags" to the reset routine.
+ *
+ * 6 Jul 1995 rev. 2.01 for linux 1.3.7
+ * Update required by the new /proc/scsi support.
+ *
+ * 11 Mar 1995 rev. 2.00 for linux 1.2.0
+ * Fixed a bug which prevented media change detection for removable
+ * disk drives.
+ *
+ * 23 Feb 1995 rev. 1.18 for linux 1.1.94
+ * Added a check for scsi_register returning NULL.
+ *
+ * 11 Feb 1995 rev. 1.17 for linux 1.1.91
+ * Now DEBUG_RESET is disabled by default.
+ * Register a board even if it does not assert DMA protocol support
+ * (DPT SK2011B does not report correctly the dmasup bit).
+ *
+ * 9 Feb 1995 rev. 1.16 for linux 1.1.90
+ * Use host->wish_block instead of host->block.
+ * New list of Data Out SCSI commands.
+ *
+ * 8 Feb 1995 rev. 1.15 for linux 1.1.89
+ * Cleared target_time_out counter while performing a reset.
+ * All external symbols renamed to avoid possible name conflicts.
+ *
+ * 28 Jan 1995 rev. 1.14 for linux 1.1.86
+ * Added module support.
+ * Log and do a retry when a disk drive returns a target status
+ * different from zero on a recovered error.
+ *
+ * 24 Jan 1995 rev. 1.13 for linux 1.1.85
+ * Use optimized board configuration, with a measured performance
+ * increase in the range 10%-20% on i/o throughput.
+ *
+ * 16 Jan 1995 rev. 1.12 for linux 1.1.81
+ * Fix mscp structure comments (no functional change).
+ * Display a message if check_region detects a port address
+ * already in use.
+ *
+ * 17 Dec 1994 rev. 1.11 for linux 1.1.74
+ * Use the scsicam_bios_param routine. This allows an easy
+ * migration path from disk partition tables created using
+ * different SCSI drivers and non optimal disk geometry.
+ *
+ * 15 Dec 1994 rev. 1.10 for linux 1.1.74
+ * Added support for ISA EATA boards (DPT PM2011, DPT PM2021).
+ * The host->block flag is set for all the detected ISA boards.
+ * The detect routine no longer enforces LEVEL triggering
+ * for EISA boards, it just prints a warning message.
+ *
+ * 30 Nov 1994 rev. 1.09 for linux 1.1.68
+ * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
+ * Added optional support for using a single board at a time.
+ *
+ * 18 Nov 1994 rev. 1.08 for linux 1.1.64
+ * Forces sg_tablesize = 64 and can_queue = 64 if these
+ * values are not correctly detected (DPT PM2012).
+ *
+ * 14 Nov 1994 rev. 1.07 for linux 1.1.63 Final BETA release.
+ * 04 Aug 1994 rev. 1.00 for linux 1.1.39 First BETA release.
+ *
+ *
+ * This driver is based on the CAM (Common Access Method Committee)
+ * EATA (Enhanced AT Bus Attachment) rev. 2.0A, using DMA protocol.
+ *
+ * Copyright (C) 1994-1998 Dario Ballabio (dario@milano.europe.dg.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that redistributions of source
+ * code retain the above copyright notice and this comment without
+ * modification.
+ *
+ */
+
+/*
+ *
+ * Here is a brief description of the DPT SCSI host adapters.
+ * All these boards provide an EATA/DMA compatible programming interface
+ * and are fully supported by this driver in any configuration, including
+ * multiple SCSI channels:
+ *
+ * PM2011B/9X - Entry Level ISA
+ * PM2021A/9X - High Performance ISA
+ * PM2012A Old EISA
+ * PM2012B Old EISA
+ * PM2022A/9X - Entry Level EISA
+ * PM2122A/9X - High Performance EISA
+ * PM2322A/9X - Extra High Performance EISA
+ * PM3021 - SmartRAID Adapter for ISA
+ * PM3222 - SmartRAID Adapter for EISA (PM3222W is 16-bit wide SCSI)
+ * PM3224 - SmartRAID Adapter for PCI (PM3224W is 16-bit wide SCSI)
+ *
+ * The above list is just an indication: as a matter of fact all DPT
+ * boards using the EATA/DMA protocol are supported by this driver,
+ * since they use exactely the same programming interface.
+ *
+ * The DPT PM2001 provides only the EATA/PIO interface and hence is not
+ * supported by this driver.
+ *
+ * This code has been tested with up to 3 Distributed Processing Technology
+ * PM2122A/9X (DPT SCSI BIOS v002.D1, firmware v05E.0) EISA controllers,
+ * in any combination of private and shared IRQ.
+ * PCI support has been tested using up to 2 DPT PM3224W (DPT SCSI BIOS
+ * v003.D0, firmware v07G.0).
+ *
+ * DPT SmartRAID boards support "Hardware Array" - a group of disk drives
+ * which are all members of the same RAID-0, RAID-1 or RAID-5 array implemented
+ * in host adapter hardware. Hardware Arrays are fully compatible with this
+ * driver, since they look to it as a single disk drive.
+ *
+ * WARNING: to create a RAID-0 "Hardware Array" you must select "Other Unix"
+ * as the current OS in the DPTMGR "Initial System Installation" menu.
+ * Otherwise RAID-0 is generated as an "Array Group" (i.e. software RAID-0),
+ * which is not supported by the actual SCSI subsystem.
+ * To get the "Array Group" functionality, the Linux MD driver must be used
+ * instead of the DPT "Array Group" feature.
+ *
+ * Multiple ISA, EISA and PCI boards can be configured in the same system.
+ * It is suggested to put all the EISA boards on the same IRQ level, all
+ * the PCI boards on another IRQ level, while ISA boards cannot share
+ * interrupts.
+ *
+ * If you configure multiple boards on the same IRQ, the interrupt must
+ * be _level_ triggered (not _edge_ triggered).
+ *
+ * This driver detects EATA boards by probes at fixed port addresses,
+ * so no BIOS32 or PCI BIOS support is required.
+ * The suggested way to detect a generic EATA PCI board is to force on it
+ * any unused EISA address, even if there are other controllers on the EISA
+ * bus, or even if you system has no EISA bus at all.
+ * Do not force any ISA address on EATA PCI boards.
+ *
+ * If PCI bios support is configured into the kernel, BIOS32 is used to
+ * include in the list of i/o ports to be probed all the PCI SCSI controllers.
+ *
+ * Due to a DPT BIOS "feature", it might not be possible to force an EISA
+ * address on more then a single DPT PCI board, so in this case you have to
+ * let the PCI BIOS assign the addresses.
+ *
+ * The sequence of detection probes is:
+ *
+ * - ISA 0x1F0;
+ * - PCI SCSI controllers (only if BIOS32 is available);
+ * - EISA/PCI 0x1C88 through 0xFC88 (corresponding to EISA slots 1 to 15);
+ * - ISA 0x170, 0x230, 0x330.
+ *
+ * The above list of detection probes can be totally replaced by the
+ * boot command line option: "eata=port0,port1,port2,...", where the
+ * port0, port1... arguments are ISA/EISA/PCI addresses to be probed.
+ * For example using "eata=0x7410,0x7450,0x230", the driver probes
+ * only the two PCI addresses 0x7410 and 0x7450 and the ISA address 0x230,
+ * in this order; "eata=0" totally disables this driver.
+ *
+ * After the optional list of detection probes, other possible command line
+ * options are:
+ *
+ * eh:y use new scsi code (linux 2.2 only);
+ * eh:n use old scsi code;
+ * et:y force use of extended translation (255 heads, 63 sectors);
+ * et:n use disk geometry detected by scsicam_bios_param;
+ * rs:y reverse scan order while detecting PCI boards;
+ * rs:n use BIOS order while detecting PCI boards;
+ * lc:y enables linked commands;
+ * lc:n disables linked commands;
+ * tc:y enables tagged commands;
+ * tc:n disables tagged commands;
+ * tm:0 use head/simple/ordered queue tag sequences;
+ * tm:1 use only simple queue tags;
+ * tm:2 use only head of queue tags;
+ * tm:3 use only ordered queue tags;
+ * mq:xx set the max queue depth to the value xx (2 <= xx <= 32).
+ *
+ * The default value is: "eata=lc:n,tc:n,mq:16,tm:0,et:n,rs:n".
+ * An example using the list of detection probes could be:
+ * "eata=0x7410,0x230,lc:y,tc:n,mq:4,eh:n,et:n".
+ *
+ * When loading as a module, parameters can be specified as well.
+ * The above example would be (use 1 in place of y and 0 in place of n):
+ *
+ * modprobe eata io_port=0x7410,0x230 linked_comm=1 tagged_comm=0 \
+ * max_queue_depth=4 tag_mode=0 use_new_eh_code=0 \
+ * ext_tran=0 rev_scan=1
+ *
+ * ----------------------------------------------------------------------------
+ * In this implementation, linked commands are designed to work with any DISK
+ * or CD-ROM, since this linking has only the intent of clustering (time-wise)
+ * and reordering by elevator sorting commands directed to each device,
+ * without any relation with the actual SCSI protocol between the controller
+ * and the device.
+ * If Q is the queue depth reported at boot time for each device (also named
+ * cmds/lun) and Q > 2, whenever there is already an active command to the
+ * device all other commands to the same device (up to Q-1) are kept waiting
+ * in the elevator sorting queue. When the active command completes, the
+ * commands in this queue are sorted by sector address. The sort is chosen
+ * between increasing or decreasing by minimizing the seek distance between
+ * the sector of the commands just completed and the sector of the first
+ * command in the list to be sorted.
+ * Trivial math assures that the unsorted average seek distance when doing
+ * random seeks over S sectors is S/3.
+ * When (Q-1) requests are uniformly distributed over S sectors, the average
+ * distance between two adjacent requests is S/((Q-1) + 1), so the sorted
+ * average seek distance for (Q-1) random requests over S sectors is S/Q.
+ * The elevator sorting hence divides the seek distance by a factor Q/3.
+ * The above pure geometric remarks are valid in all cases and the
+ * driver effectively reduces the seek distance by the predicted factor
+ * when there are Q concurrent read i/o operations on the device, but this
+ * does not necessarily results in a noticeable performance improvement:
+ * your mileage may vary....
+ *
+ * Note: command reordering inside a batch of queued commands could cause
+ * wrong results only if there is at least one write request and the
+ * intersection (sector-wise) of all requests is not empty.
+ * When the driver detects a batch including overlapping requests
+ * (a really rare event) strict serial (pid) order is enforced.
+ * ----------------------------------------------------------------------------
+ * The extended translation option (et:y) is useful when using large physical
+ * disks/arrays. It could also be useful when switching between Adaptec boards
+ * and DPT boards without reformatting the disk.
+ * When a boot disk is partitioned with extended translation, in order to
+ * be able to boot it with a DPT board is could be necessary to add to
+ * lilo.conf additional commands as in the following example:
+ *
+ * fix-table
+ * disk=/dev/sda bios=0x80 sectors=63 heads=128 cylindres=546
+ *
+ * where the above geometry should be replaced with the one reported at
+ * power up by the DPT controller.
+ * ----------------------------------------------------------------------------
+ *
+ * The boards are named EATA0, EATA1,... according to the detection order.
+ *
+ * In order to support multiple ISA boards in a reliable way,
+ * the driver sets host->wish_block = TRUE for all ISA boards.
+ */
+
+#include <linux/version.h>
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+#define MAX_INT_PARAM 10
+
+#if defined(MODULE)
+#include <linux/module.h>
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,26)
+MODULE_PARM(io_port, "1-" __MODULE_STRING(MAX_INT_PARAM) "i");
+MODULE_PARM(linked_comm, "i");
+MODULE_PARM(tagged_comm, "i");
+MODULE_PARM(link_statistics, "i");
+MODULE_PARM(max_queue_depth, "i");
+MODULE_PARM(tag_mode, "i");
+MODULE_PARM(use_new_eh_code, "i");
+MODULE_PARM(ext_tran, "i");
+MODULE_PARM(rev_scan, "i");
+MODULE_AUTHOR("Dario Ballabio");
+#endif
+
+#endif
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include "eata.h"
+#include <linux/stat.h>
+#include <linux/config.h>
+#include <linux/pci.h>
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,93)
+#include <linux/bios32.h>
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,36)
+#include <linux/init.h>
+#else
+#define __initfunc(A) A
+#define __initdata
+#define __init
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+#include <asm/spinlock.h>
+#define IRQ_FLAGS
+#define IRQ_LOCK
+#define IRQ_LOCK_SAVE
+#define IRQ_UNLOCK
+#define IRQ_UNLOCK_RESTORE
+#define SPIN_FLAGS unsigned long spin_flags;
+#define SPIN_LOCK spin_lock_irq(&io_request_lock);
+#define SPIN_LOCK_SAVE spin_lock_irqsave(&io_request_lock, spin_flags);
+#define SPIN_UNLOCK spin_unlock_irq(&io_request_lock);
+#define SPIN_UNLOCK_RESTORE \
+ spin_unlock_irqrestore(&io_request_lock, spin_flags);
+static int use_new_eh_code = TRUE;
+#else
+#define IRQ_FLAGS unsigned long irq_flags;
+#define IRQ_LOCK cli();
+#define IRQ_LOCK_SAVE do {save_flags(irq_flags); cli();} while (0);
+#define IRQ_UNLOCK sti();
+#define IRQ_UNLOCK_RESTORE do {restore_flags(irq_flags);} while (0);
+#define SPIN_FLAGS
+#define SPIN_LOCK
+#define SPIN_LOCK_SAVE
+#define SPIN_UNLOCK
+#define SPIN_UNLOCK_RESTORE
+static int use_new_eh_code = FALSE;
+#endif
+
+struct proc_dir_entry proc_scsi_eata2x = {
+ PROC_SCSI_EATA2X, 6, "eata2x",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* Subversion values */
+#define ISA 0
+#define ESA 1
+
+#undef FORCE_CONFIG
+
+#undef DEBUG_LINKED_COMMANDS
+#undef DEBUG_DETECT
+#undef DEBUG_PCI_DETECT
+#undef DEBUG_INTERRUPT
+#undef DEBUG_RESET
+#undef DEBUG_GENERATE_ERRORS
+#undef DEBUG_GENERATE_ABORTS
+#undef DEBUG_GEOMETRY
+
+#define MAX_ISA 4
+#define MAX_VESA 0
+#define MAX_EISA 15
+#define MAX_PCI 16
+#define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI)
+#define MAX_CHANNEL 4
+#define MAX_LUN 32
+#define MAX_TARGET 32
+#define MAX_MAILBOXES 64
+#define MAX_SGLIST 64
+#define MAX_LARGE_SGLIST 122
+#define MAX_INTERNAL_RETRIES 64
+#define MAX_CMD_PER_LUN 2
+#define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN)
+
+#define SKIP ULONG_MAX
+#define FALSE 0
+#define TRUE 1
+#define FREE 0
+#define IN_USE 1
+#define LOCKED 2
+#define IN_RESET 3
+#define IGNORE 4
+#define READY 5
+#define ABORTING 6
+#define NO_DMA 0xff
+#define MAXLOOP 10000
+#define TAG_MIXED 0
+#define TAG_SIMPLE 1
+#define TAG_HEAD 2
+#define TAG_ORDERED 3
+
+#define REG_CMD 7
+#define REG_STATUS 7
+#define REG_AUX_STATUS 8
+#define REG_DATA 0
+#define REG_DATA2 1
+#define REG_SEE 6
+#define REG_LOW 2
+#define REG_LM 3
+#define REG_MID 4
+#define REG_MSB 5
+#define REGION_SIZE 9
+#define MAX_ISA_ADDR 0x03ff
+#define MIN_EISA_ADDR 0x1c88
+#define MAX_EISA_ADDR 0xfc88
+#define BSY_ASSERTED 0x80
+#define DRQ_ASSERTED 0x08
+#define ABSY_ASSERTED 0x01
+#define IRQ_ASSERTED 0x02
+#define READ_CONFIG_PIO 0xf0
+#define SET_CONFIG_PIO 0xf1
+#define SEND_CP_PIO 0xf2
+#define RECEIVE_SP_PIO 0xf3
+#define TRUNCATE_XFR_PIO 0xf4
+#define RESET_PIO 0xf9
+#define READ_CONFIG_DMA 0xfd
+#define SET_CONFIG_DMA 0xfe
+#define SEND_CP_DMA 0xff
+#define ASOK 0x00
+#define ASST 0x01
+
+#define ARRAY_SIZE(arr) (sizeof (arr) / sizeof (arr)[0])
+#define YESNO(a) ((a) ? 'y' : 'n')
+#define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM)
+
+/* "EATA", in Big Endian format */
+#define EATA_SIGNATURE 0x41544145
+
+/* Number of valid bytes in the board config structure for EATA 2.0x */
+#define EATA_2_0A_SIZE 28
+#define EATA_2_0B_SIZE 30
+#define EATA_2_0C_SIZE 34
+
+/* Board info structure */
+struct eata_info {
+ ulong data_len; /* Number of valid bytes after this field */
+ ulong sign; /* ASCII "EATA" signature */
+ unchar :4, /* unused low nibble */
+ version:4; /* EATA version, should be 0x1 */
+ unchar ocsena:1, /* Overlap Command Support Enabled */
+ tarsup:1, /* Target Mode Supported */
+ trnxfr:1, /* Truncate Transfer Cmd NOT Necessary */
+ morsup:1, /* More Supported */
+ dmasup:1, /* DMA Supported */
+ drqvld:1, /* DRQ Index (DRQX) is valid */
+ ata:1, /* This is an ATA device */
+ haaval:1; /* Host Adapter Address Valid */
+ ushort cp_pad_len; /* Number of pad bytes after cp_len */
+ unchar host_addr[4]; /* Host Adapter SCSI ID for channels 3, 2, 1, 0 */
+ ulong cp_len; /* Number of valid bytes in cp */
+ ulong sp_len; /* Number of valid bytes in sp */
+ ushort queue_size; /* Max number of cp that can be queued */
+ ushort unused;
+ ushort scatt_size; /* Max number of entries in scatter/gather table */
+ unchar irq:4, /* Interrupt Request assigned to this controller */
+ irq_tr:1, /* 0 for edge triggered, 1 for level triggered */
+ second:1, /* 1 if this is a secondary (not primary) controller */
+ drqx:2; /* DRQ Index (0=DMA0, 1=DMA7, 2=DMA6, 3=DMA5) */
+ unchar sync; /* 1 if scsi target id 7...0 is running sync scsi */
+
+ /* Structure extension defined in EATA 2.0B */
+ unchar isaena:1, /* ISA i/o addressing is disabled/enabled */
+ forcaddr:1, /* Port address has been forced */
+ large_sg:1, /* 1 if large SG lists are supported */
+ res1:1,
+ :4;
+ unchar max_id:5, /* Max SCSI target ID number */
+ max_chan:3; /* Max SCSI channel number on this board */
+
+ /* Structure extension defined in EATA 2.0C */
+ unchar max_lun; /* Max SCSI LUN number */
+ unchar :4,
+ m1:1, /* This is a PCI with an M1 chip installed */
+ idquest:1, /* RAIDNUM returned is questionable */
+ pci:1, /* This board is PCI */
+ eisa:1; /* This board is EISA */
+ unchar raidnum; /* Uniquely identifies this HBA in a system */
+ unchar notused;
+
+ ushort ipad[247];
+ };
+
+/* Board config structure */
+struct eata_config {
+ ushort len; /* Number of bytes following this field */
+ unchar edis:1, /* Disable EATA interface after config command */
+ ocena:1, /* Overlapped Commands Enabled */
+ mdpena:1, /* Transfer all Modified Data Pointer Messages */
+ tarena:1, /* Target Mode Enabled for this controller */
+ :4;
+ unchar cpad[511];
+ };
+
+/* Returned status packet structure */
+struct mssp {
+ unchar adapter_status:7, /* State related to current command */
+ eoc:1; /* End Of Command (1 = command completed) */
+ unchar target_status; /* SCSI status received after data transfer */
+ unchar unused[2];
+ ulong inv_res_len; /* Number of bytes not transferred */
+ struct mscp *cpp; /* Address set in cp */
+ char mess[12];
+ };
+
+struct sg_list {
+ unsigned int address; /* Segment Address */
+ unsigned int num_bytes; /* Segment Length */
+ };
+
+/* MailBox SCSI Command Packet */
+struct mscp {
+ unchar sreset:1, /* SCSI Bus Reset Signal should be asserted */
+ init:1, /* Re-initialize controller and self test */
+ reqsen:1, /* Transfer Request Sense Data to addr using DMA */
+ sg:1, /* Use Scatter/Gather */
+ :1,
+ interp:1, /* The controller interprets cp, not the target */
+ dout:1, /* Direction of Transfer is Out (Host to Target) */
+ din:1; /* Direction of Transfer is In (Target to Host) */
+ unchar sense_len; /* Request Sense Length */
+ unchar unused[3];
+ unchar fwnest:1, /* Send command to a component of an Array Group */
+ :7;
+ unchar phsunit:1, /* Send to Target Physical Unit (bypass RAID) */
+ iat:1, /* Inhibit Address Translation */
+ hbaci:1, /* Inhibit HBA Caching for this command */
+ :5;
+ unchar target:5, /* SCSI target ID */
+ channel:3; /* SCSI channel number */
+ unchar lun:5, /* SCSI logical unit number */
+ luntar:1, /* This cp is for Target (not LUN) */
+ dispri:1, /* Disconnect Privilege granted */
+ one:1; /* 1 */
+ unchar mess[3]; /* Massage to/from Target */
+ unchar cdb[12]; /* Command Descriptor Block */
+ ulong data_len; /* If sg=0 Data Length, if sg=1 sglist length */
+ struct mscp *cpp; /* Address to be returned in sp */
+ ulong data_address; /* If sg=0 Data Address, if sg=1 sglist address */
+ ulong sp_addr; /* Address where sp is DMA'ed when cp completes */
+ ulong sense_addr; /* Address where Sense Data is DMA'ed on error */
+ Scsi_Cmnd *SCpnt;
+ unsigned int index; /* cp index */
+ struct sg_list *sglist;
+ };
+
+struct hostdata {
+ struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
+ unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
+ unsigned int last_cp_used; /* Index of last mailbox used */
+ unsigned int iocount; /* Total i/o done for this board */
+ int board_number; /* Number of this board */
+ char board_name[16]; /* Name of this board */
+ char board_id[256]; /* data from INQUIRY on this board */
+ int in_reset; /* True if board is doing a reset */
+ int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */
+ int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If TRUE redo i/o on target */
+ unsigned int retries; /* Number of internal retries */
+ unsigned long last_retried_pid; /* Pid of last retried command */
+ unsigned char subversion; /* Bus type, either ISA or EISA/PCI */
+ unsigned char protocol_rev; /* EATA 2.0 rev., 'A' or 'B' or 'C' */
+ struct mssp sp[2]; /* Returned status for this board */
+ };
+
+static struct Scsi_Host *sh[MAX_BOARDS + 1];
+static const char *driver_name = "EATA";
+static char sha[MAX_BOARDS];
+
+/* Initialize num_boards so that ihdlr can work while detect is in progress */
+static unsigned int num_boards = MAX_BOARDS;
+
+static unsigned long io_port[] __initdata = {
+
+ /* Space for MAX_INT_PARAM ports usable while loading as a module */
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+ SKIP, SKIP,
+
+ /* First ISA */
+ 0x1f0,
+
+ /* Space for MAX_PCI ports possibly reported by PCI_BIOS */
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+
+ /* MAX_EISA ports */
+ 0x1c88, 0x2c88, 0x3c88, 0x4c88, 0x5c88, 0x6c88, 0x7c88, 0x8c88,
+ 0x9c88, 0xac88, 0xbc88, 0xcc88, 0xdc88, 0xec88, 0xfc88,
+
+ /* Other (MAX_ISA - 1) ports */
+ 0x170, 0x230, 0x330,
+
+ /* End of list */
+ 0x0
+ };
+
+#define HD(board) ((struct hostdata *) &sh[board]->hostdata)
+#define BN(board) (HD(board)->board_name)
+
+#define H2DEV(x) htonl(x)
+#define DEV2H(x) H2DEV(x)
+#define V2DEV(addr) ((addr) ? H2DEV(virt_to_bus((void *)addr)) : 0)
+#define DEV2V(addr) ((addr) ? DEV2H(bus_to_virt((unsigned long)addr)) : 0)
+
+static void do_interrupt_handler(int, void *, struct pt_regs *);
+static void flush_dev(Scsi_Device *, unsigned long, unsigned int, unsigned int);
+static int do_trace = FALSE;
+static int setup_done = FALSE;
+static int link_statistics = 0;
+static int tag_mode = TAG_MIXED;
+static int ext_tran = FALSE;
+static int rev_scan = TRUE;
+
+#if defined(CONFIG_SCSI_EATA_TAGGED_QUEUE)
+static int tagged_comm = TRUE;
+#else
+static int tagged_comm = FALSE;
+#endif
+
+#if defined(CONFIG_SCSI_EATA_LINKED_COMMANDS)
+static int linked_comm = TRUE;
+#else
+static int linked_comm = FALSE;
+#endif
+
+#if defined(CONFIG_SCSI_EATA_MAX_TAGS)
+static int max_queue_depth = CONFIG_SCSI_EATA_MAX_TAGS;
+#else
+static int max_queue_depth = MAX_CMD_PER_LUN;
+#endif
+
+static void select_queue_depths(struct Scsi_Host *host, Scsi_Device *devlist) {
+ Scsi_Device *dev;
+ int j, ntag = 0, nuntag = 0, tqd, utqd;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ j = ((struct hostdata *) host->hostdata)->board_number;
+
+ for(dev = devlist; dev; dev = dev->next) {
+
+ if (dev->host != host) continue;
+
+ if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm))
+ ntag++;
+ else
+ nuntag++;
+ }
+
+ utqd = MAX_CMD_PER_LUN;
+
+ tqd = (host->can_queue - utqd * nuntag) / (ntag ? ntag : 1);
+
+ if (tqd > max_queue_depth) tqd = max_queue_depth;
+
+ if (tqd < MAX_CMD_PER_LUN) tqd = MAX_CMD_PER_LUN;
+
+ for(dev = devlist; dev; dev = dev->next) {
+ char *tag_suffix = "", *link_suffix = "";
+
+ if (dev->host != host) continue;
+
+ if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm))
+ dev->queue_depth = tqd;
+ else
+ dev->queue_depth = utqd;
+
+ if (TLDEV(dev->type)) {
+ if (linked_comm && dev->queue_depth > 2)
+ link_suffix = ", sorted";
+ else
+ link_suffix = ", unsorted";
+ }
+
+ if (tagged_comm && dev->tagged_supported && TLDEV(dev->type)) {
+ dev->tagged_queue = 1;
+ dev->current_tag = 1;
+ }
+
+ if (dev->tagged_supported && TLDEV(dev->type) && dev->tagged_queue)
+ tag_suffix = ", tagged";
+ else if (dev->tagged_supported && TLDEV(dev->type))
+ tag_suffix = ", untagged";
+
+ printk("%s: scsi%d, channel %d, id %d, lun %d, cmds/lun %d%s%s.\n",
+ BN(j), host->host_no, dev->channel, dev->id, dev->lun,
+ dev->queue_depth, link_suffix, tag_suffix);
+ }
+
+ IRQ_UNLOCK_RESTORE
+ return;
+}
+
+static inline int wait_on_busy(unsigned long iobase, unsigned int loop) {
+
+ while (inb(iobase + REG_AUX_STATUS) & ABSY_ASSERTED) {
+ udelay(1L);
+ if (--loop == 0) return TRUE;
+ }
+
+ return FALSE;
+}
+
+static inline int do_dma(unsigned long iobase, unsigned int addr, unchar cmd) {
+
+ if (wait_on_busy(iobase, (addr ? MAXLOOP * 100 : MAXLOOP))) return TRUE;
+
+ if ((addr = V2DEV(addr))) {
+ outb((char) (addr >> 24), iobase + REG_LOW);
+ outb((char) (addr >> 16), iobase + REG_LM);
+ outb((char) (addr >> 8), iobase + REG_MID);
+ outb((char) addr, iobase + REG_MSB);
+ }
+
+ outb(cmd, iobase + REG_CMD);
+ return FALSE;
+}
+
+static inline int read_pio(unsigned long iobase, ushort *start, ushort *end) {
+ unsigned int loop = MAXLOOP;
+ ushort *p;
+
+ for (p = start; p <= end; p++) {
+
+ while (!(inb(iobase + REG_STATUS) & DRQ_ASSERTED)) {
+ udelay(1L);
+ if (--loop == 0) return TRUE;
+ }
+
+ loop = MAXLOOP;
+ *p = inw(iobase);
+ }
+
+ return FALSE;
+}
+
+__initfunc (static inline int
+ get_pci_irq(unsigned long port_base, unsigned char *apic_irq)) {
+
+#if defined(CONFIG_PCI)
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+
+ unsigned int addr;
+ struct pci_dev *dev = NULL;
+
+ if (!pci_present()) return FALSE;
+
+ while((dev = pci_find_class(PCI_CLASS_STORAGE_SCSI << 8, dev))) {
+
+ if (pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &addr)) continue;
+
+#if defined(DEBUG_PCI_DETECT)
+ printk("%s: get_pci_irq, bus %d, devfn 0x%x, addr 0x%x, apic_irq %u.\n",
+ driver_name, dev->bus->number, dev->devfn, addr, dev->irq);
+#endif
+
+ if ((addr & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_IO)
+ continue;
+
+ if ((addr & PCI_BASE_ADDRESS_IO_MASK) + PCI_BASE_ADDRESS_0 == port_base) {
+ *apic_irq = dev->irq;
+ return TRUE;
+ }
+
+ }
+
+#endif /* end new style PCI code */
+
+#endif /* end CONFIG_PCI */
+
+ return FALSE;
+}
+
+__initfunc (static inline int port_detect \
+ (unsigned long port_base, unsigned int j, Scsi_Host_Template *tpnt)) {
+ unsigned char irq, dma_channel, subversion, i;
+ unsigned char protocol_rev, apic_irq;
+ struct eata_info info;
+ char *bus_type, dma_name[16], tag_type;
+
+ /* Allowed DMA channels for ISA (0 indicates reserved) */
+ unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
+
+ char name[16];
+
+ sprintf(name, "%s%d", driver_name, j);
+
+ printk("\rprobing eata on %lx", port_base);
+
+ if(check_region(port_base, REGION_SIZE)) {
+ printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base);
+ return FALSE;
+ }
+
+ if (do_dma(port_base, 0, READ_CONFIG_PIO)) return FALSE;
+
+ /* Read the info structure */
+ if (read_pio(port_base, (ushort *)&info, (ushort *)&info.ipad[0]))
+ return FALSE;
+
+ /* Check the controller "EATA" signature */
+ if (info.sign != EATA_SIGNATURE) return FALSE;
+
+ if (DEV2H(info.data_len) < EATA_2_0A_SIZE) {
+ printk("%s: config structure size (%ld bytes) too short, detaching.\n",
+ name, DEV2H(info.data_len));
+ return FALSE;
+ }
+ else if (DEV2H(info.data_len) == EATA_2_0A_SIZE)
+ protocol_rev = 'A';
+ else if (DEV2H(info.data_len) == EATA_2_0B_SIZE)
+ protocol_rev = 'B';
+ else
+ protocol_rev = 'C';
+
+ if (!setup_done && j > 0 && j <= MAX_PCI) {
+ bus_type = "PCI";
+ subversion = ESA;
+ }
+ else if (port_base > MAX_EISA_ADDR || (protocol_rev == 'C' && info.pci)) {
+ bus_type = "PCI";
+ subversion = ESA;
+ }
+ else if (port_base >= MIN_EISA_ADDR || (protocol_rev == 'C' && info.eisa)) {
+ bus_type = "EISA";
+ subversion = ESA;
+ }
+ else if (protocol_rev == 'C' && !info.eisa && !info.pci) {
+ bus_type = "ISA";
+ subversion = ISA;
+ }
+ else if (port_base > MAX_ISA_ADDR) {
+ bus_type = "PCI";
+ subversion = ESA;
+ }
+ else {
+ bus_type = "ISA";
+ subversion = ISA;
+ }
+
+ if (!info.haaval || info.ata) {
+ printk("%s: address 0x%03lx, unusable %s board (%d%d), detaching.\n",
+ name, port_base, bus_type, info.haaval, info.ata);
+ return FALSE;
+ }
+
+ if (info.drqvld) {
+
+ if (subversion == ESA)
+ printk("%s: warning, weird %s board using DMA.\n", name, bus_type);
+
+ subversion = ISA;
+ dma_channel = dma_channel_table[3 - info.drqx];
+ }
+ else {
+
+ if (subversion == ISA)
+ printk("%s: warning, weird %s board not using DMA.\n", name, bus_type);
+
+ subversion = ESA;
+ dma_channel = NO_DMA;
+ }
+
+ if (!info.dmasup)
+ printk("%s: warning, DMA protocol support not asserted.\n", name);
+
+ irq = info.irq;
+
+ if (subversion == ESA && !info.irq_tr)
+ printk("%s: warning, LEVEL triggering is suggested for IRQ %u.\n",
+ name, irq);
+
+ if (get_pci_irq(port_base, &apic_irq) && (irq != apic_irq)) {
+ printk("%s: IRQ %u mapped to IO-APIC IRQ %u.\n", name, irq, apic_irq);
+ irq = apic_irq;
+ }
+
+ /* Board detected, allocate its IRQ */
+ if (request_irq(irq, do_interrupt_handler,
+ SA_INTERRUPT | ((subversion == ESA) ? SA_SHIRQ : 0),
+ driver_name, (void *) &sha[j])) {
+ printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
+ return FALSE;
+ }
+
+ if (subversion == ISA && request_dma(dma_channel, driver_name)) {
+ printk("%s: unable to allocate DMA channel %u, detaching.\n",
+ name, dma_channel);
+ free_irq(irq, &sha[j]);
+ return FALSE;
+ }
+
+#if defined(FORCE_CONFIG)
+ {
+ struct eata_config config;
+
+ /* Set board configuration */
+ memset((char *)&config, 0, sizeof(struct eata_config));
+ config.len = (ushort) htons((ushort)510);
+ config.ocena = TRUE;
+
+ if (do_dma(port_base, (unsigned int)&config, SET_CONFIG_DMA)) {
+ printk("%s: busy timeout sending configuration, detaching.\n", name);
+ return FALSE;
+ }
+ }
+#endif
+
+ sh[j] = scsi_register(tpnt, sizeof(struct hostdata));
+
+ if (sh[j] == NULL) {
+ printk("%s: unable to register host, detaching.\n", name);
+
+ free_irq(irq, &sha[j]);
+
+ if (subversion == ISA) free_dma(dma_channel);
+
+ return FALSE;
+ }
+
+ sh[j]->io_port = port_base;
+ sh[j]->unique_id = port_base;
+ sh[j]->n_io_port = REGION_SIZE;
+ sh[j]->dma_channel = dma_channel;
+ sh[j]->irq = irq;
+ sh[j]->sg_tablesize = (ushort) ntohs(info.scatt_size);
+ sh[j]->this_id = (ushort) info.host_addr[3];
+ sh[j]->can_queue = (ushort) ntohs(info.queue_size);
+ sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
+ sh[j]->select_queue_depths = select_queue_depths;
+
+ /* Register the I/O space that we use */
+ request_region(sh[j]->io_port, sh[j]->n_io_port, driver_name);
+
+ memset(HD(j), 0, sizeof(struct hostdata));
+ HD(j)->subversion = subversion;
+ HD(j)->protocol_rev = protocol_rev;
+ HD(j)->board_number = j;
+
+ if (HD(j)->subversion == ESA)
+ sh[j]->unchecked_isa_dma = FALSE;
+ else {
+ sh[j]->wish_block = TRUE;
+ sh[j]->unchecked_isa_dma = TRUE;
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ }
+
+ strcpy(BN(j), name);
+
+ /* DPT PM2012 does not allow to detect sg_tablesize correctly */
+ if (sh[j]->sg_tablesize > MAX_SGLIST || sh[j]->sg_tablesize < 2) {
+ printk("%s: detect, wrong n. of SG lists %d, fixed.\n",
+ BN(j), sh[j]->sg_tablesize);
+ sh[j]->sg_tablesize = MAX_SGLIST;
+ }
+
+ /* DPT PM2012 does not allow to detect can_queue correctly */
+ if (sh[j]->can_queue > MAX_MAILBOXES || sh[j]->can_queue < 2) {
+ printk("%s: detect, wrong n. of mbox %d, fixed.\n",
+ BN(j), sh[j]->can_queue);
+ sh[j]->can_queue = MAX_MAILBOXES;
+ }
+
+ if (protocol_rev != 'A') {
+
+ if (info.max_chan > 0 && info.max_chan < MAX_CHANNEL)
+ sh[j]->max_channel = info.max_chan;
+
+ if (info.max_id > 7 && info.max_id < MAX_TARGET)
+ sh[j]->max_id = info.max_id + 1;
+
+ if (info.large_sg && sh[j]->sg_tablesize == MAX_SGLIST)
+ sh[j]->sg_tablesize = MAX_LARGE_SGLIST;
+ }
+
+ if (protocol_rev == 'C') {
+
+ if (info.max_lun > 7 && info.max_lun < MAX_LUN)
+ sh[j]->max_lun = info.max_lun + 1;
+ }
+
+ if (dma_channel == NO_DMA) sprintf(dma_name, "%s", "BMST");
+ else sprintf(dma_name, "DMA %u", dma_channel);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ if (! ((&HD(j)->cp[i])->sglist = kmalloc(
+ sh[j]->sg_tablesize * sizeof(struct sg_list),
+ (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) {
+ printk("%s: kmalloc SGlist failed, mbox %d, detaching.\n", BN(j), i);
+ eata2x_release(sh[j]);
+ return FALSE;
+ }
+
+ if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN)
+ max_queue_depth = MAX_TAGGED_CMD_PER_LUN;
+
+ if (max_queue_depth < MAX_CMD_PER_LUN) max_queue_depth = MAX_CMD_PER_LUN;
+
+ if (tagged_comm) {
+ if (tag_mode == TAG_SIMPLE) tag_type = '1';
+ else if (tag_mode == TAG_HEAD) tag_type = '2';
+ else if (tag_mode == TAG_ORDERED) tag_type = '3';
+ else tag_type = 'y';
+ }
+ else tag_type = 'n';
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+ sh[j]->hostt->use_new_eh_code = use_new_eh_code;
+#else
+ use_new_eh_code = FALSE;
+#endif
+
+ if (j == 0) {
+ printk("EATA/DMA 2.0x: Copyright (C) 1994-1998 Dario Ballabio.\n");
+ printk("%s config options -> tc:%c, lc:%c, mq:%d, eh:%c, rs:%c, et:%c.\n",
+ driver_name, tag_type, YESNO(linked_comm), max_queue_depth,
+ YESNO(use_new_eh_code), YESNO(rev_scan), YESNO(ext_tran));
+ }
+
+ printk("%s: 2.0%c, %s 0x%03lx, IRQ %u, %s, SG %d, MB %d.\n",
+ BN(j), HD(j)->protocol_rev, bus_type, (unsigned long)sh[j]->io_port,
+ sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue);
+
+ if (sh[j]->max_id > 8 || sh[j]->max_lun > 8)
+ printk("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n",
+ BN(j), sh[j]->max_id, sh[j]->max_lun);
+
+ for (i = 0; i <= sh[j]->max_channel; i++)
+ printk("%s: SCSI channel %u enabled, host target ID %d.\n",
+ BN(j), i, info.host_addr[3 - i]);
+
+#if defined(DEBUG_DETECT)
+ printk("%s: Vers. 0x%x, ocs %u, tar %u, trnxfr %u, more %u, SYNC 0x%x, "\
+ "sec. %u, infol %ld, cpl %ld spl %ld.\n", name, info.version,
+ info.ocsena, info.tarsup, info.trnxfr, info.morsup, info.sync,
+ info.second, DEV2H(info.data_len), DEV2H(info.cp_len),
+ DEV2H(info.sp_len));
+
+ if (protocol_rev == 'B' || protocol_rev == 'C')
+ printk("%s: isaena %u, forcaddr %u, max_id %u, max_chan %u, "\
+ "large_sg %u, res1 %u.\n", name, info.isaena, info.forcaddr,
+ info.max_id, info.max_chan, info.large_sg, info.res1);
+
+ if (protocol_rev == 'C')
+ printk("%s: max_lun %u, m1 %u, idquest %u, pci %u, eisa %u, "\
+ "raidnum %u.\n", name, info.max_lun, info.m1, info.idquest,
+ info.pci, info.eisa, info.raidnum);
+#endif
+
+ return TRUE;
+}
+
+__initfunc (void eata2x_setup(char *str, int *ints)) {
+ int i, argc = ints[0];
+ char *cur = str, *pc;
+
+ if (argc > 0) {
+
+ if (argc > MAX_INT_PARAM) argc = MAX_INT_PARAM;
+
+ for (i = 0; i < argc; i++) io_port[i] = ints[i + 1];
+
+ io_port[i] = 0;
+ setup_done = TRUE;
+ }
+
+ while (cur && (pc = strchr(cur, ':'))) {
+ int val = 0, c = *++pc;
+
+ if (c == 'n' || c == 'N') val = FALSE;
+ else if (c == 'y' || c == 'Y') val = TRUE;
+ else val = (int) simple_strtoul(pc, NULL, 0);
+
+ if (!strncmp(cur, "lc:", 3)) linked_comm = val;
+ else if (!strncmp(cur, "tc:", 3)) tagged_comm = val;
+ else if (!strncmp(cur, "tm:", 3)) tag_mode = val;
+ else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val;
+ else if (!strncmp(cur, "ls:", 3)) link_statistics = val;
+ else if (!strncmp(cur, "eh:", 3)) use_new_eh_code = val;
+ else if (!strncmp(cur, "et:", 3)) ext_tran = val;
+ else if (!strncmp(cur, "rs:", 3)) rev_scan = val;
+
+ if ((cur = strchr(cur, ','))) ++cur;
+ }
+
+ return;
+}
+
+__initfunc (static void add_pci_ports(void)) {
+
+#if defined(CONFIG_PCI)
+
+ unsigned int addr, k;
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+
+ struct pci_dev *dev = NULL;
+
+ if (!pci_present()) return;
+
+ for (k = 0; k < MAX_PCI; k++) {
+
+ if (!(dev = pci_find_class(PCI_CLASS_STORAGE_SCSI << 8, dev))) break;
+
+ if (pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &addr)) continue;
+
+#if defined(DEBUG_PCI_DETECT)
+ printk("%s: detect, seq. %d, bus %d, devfn 0x%x, addr 0x%x.\n",
+ driver_name, k, dev->bus->number, dev->devfn, addr);
+#endif
+
+ if ((addr & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_IO)
+ continue;
+
+ /* Order addresses according to rev_scan value */
+ io_port[MAX_INT_PARAM + (rev_scan ? (MAX_PCI - k) : (1 + k))] =
+ (addr & PCI_BASE_ADDRESS_IO_MASK) + PCI_BASE_ADDRESS_0;
+ }
+
+#else /* else old style PCI code */
+
+ unsigned short i = 0;
+ unsigned char bus, devfn;
+
+ if (!pcibios_present()) return;
+
+ for (k = 0; k < MAX_PCI; k++) {
+
+ if (pcibios_find_class(PCI_CLASS_STORAGE_SCSI << 8, i++, &bus, &devfn)
+ != PCIBIOS_SUCCESSFUL) break;
+
+ if (pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &addr)
+ != PCIBIOS_SUCCESSFUL) continue;
+
+#if defined(DEBUG_PCI_DETECT)
+ printk("%s: detect, seq. %d, bus %d, devfn 0x%x, addr 0x%x.\n",
+ driver_name, k, bus, devfn, addr);
+#endif
+
+ if ((addr & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_IO)
+ continue;
+
+ /* Order addresses according to rev_scan value */
+ io_port[MAX_INT_PARAM + (rev_scan ? (MAX_PCI - k) : (1 + k))] =
+ (addr & PCI_BASE_ADDRESS_IO_MASK) + PCI_BASE_ADDRESS_0;
+ }
+
+#endif /* end old style PCI code */
+
+#endif /* end CONFIG_PCI */
+
+ return;
+}
+
+__initfunc (int eata2x_detect(Scsi_Host_Template *tpnt)) {
+ unsigned int j = 0, k;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ tpnt->proc_dir = &proc_scsi_eata2x;
+
+#if defined(MODULE)
+ /* io_port could have been modified when loading as a module */
+ if(io_port[0] != SKIP) {
+ setup_done = TRUE;
+ io_port[MAX_INT_PARAM] = 0;
+ }
+#endif
+
+ for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL;
+
+ if (!setup_done) add_pci_ports();
+
+ for (k = 0; io_port[k]; k++) {
+
+ if (io_port[k] == SKIP) continue;
+
+ if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt)) j++;
+ }
+
+ num_boards = j;
+ IRQ_UNLOCK_RESTORE
+ return j;
+}
+
+static inline void build_sg_list(struct mscp *cpp, Scsi_Cmnd *SCpnt) {
+ unsigned int k;
+ struct scatterlist *sgpnt;
+
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+
+ for (k = 0; k < SCpnt->use_sg; k++) {
+ cpp->sglist[k].address = V2DEV(sgpnt[k].address);
+ cpp->sglist[k].num_bytes = H2DEV(sgpnt[k].length);
+ }
+
+ cpp->data_address = V2DEV(cpp->sglist);
+ cpp->data_len = H2DEV((SCpnt->use_sg * sizeof(struct sg_list)));
+}
+
+static inline int do_qcomm(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
+ unsigned int i, j, k;
+ struct mscp *cpp;
+ struct mssp *spp;
+
+ static const unsigned char data_out_cmds[] = {
+ 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e,
+ 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40,
+ 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b
+ };
+
+ static const unsigned char data_none_cmds[] = {
+ 0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e,
+ 0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47,
+ 0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5
+ };
+
+ /* j is the board number */
+ j = ((struct hostdata *) SCpnt->host->hostdata)->board_number;
+
+ if (SCpnt->host_scribble)
+ panic("%s: qcomm, pid %ld, SCpnt %p already active.\n",
+ BN(j), SCpnt->pid, SCpnt);
+
+ /* i is the mailbox number, look for the first free mailbox
+ starting from last_cp_used */
+ i = HD(j)->last_cp_used + 1;
+
+ for (k = 0; k < sh[j]->can_queue; k++, i++) {
+
+ if (i >= sh[j]->can_queue) i = 0;
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ HD(j)->last_cp_used = i;
+ break;
+ }
+ }
+
+ if (k == sh[j]->can_queue) {
+ printk("%s: qcomm, no free mailbox.\n", BN(j));
+ return 1;
+ }
+
+ /* Set pointer to control packet structure */
+ cpp = &HD(j)->cp[i];
+
+ memset(cpp, 0, sizeof(struct mscp) - sizeof(struct sg_list *));
+
+ /* Set pointer to status packet structure */
+ spp = &HD(j)->sp[0];
+
+ /* The EATA protocol uses Big Endian format */
+ cpp->sp_addr = V2DEV(spp);
+
+ cpp->cpp = cpp;
+ SCpnt->scsi_done = done;
+ cpp->index = i;
+ SCpnt->host_scribble = (unsigned char *) &cpp->index;
+
+ if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCpnt->channel, SCpnt->target,
+ SCpnt->lun, SCpnt->pid);
+
+ for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++)
+ if (SCpnt->cmnd[0] == data_out_cmds[k]) {
+ cpp->dout = TRUE;
+ break;
+ }
+
+ if ((cpp->din = !cpp->dout))
+ for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++)
+ if (SCpnt->cmnd[0] == data_none_cmds[k]) {
+ cpp->din = FALSE;
+ break;
+ }
+
+ cpp->reqsen = TRUE;
+ cpp->dispri = TRUE;
+#if 0
+ if (SCpnt->device->type == TYPE_TAPE) cpp->hbaci = TRUE;
+#endif
+ cpp->one = TRUE;
+ cpp->channel = SCpnt->channel;
+ cpp->target = SCpnt->target;
+ cpp->lun = SCpnt->lun;
+ cpp->SCpnt = SCpnt;
+ cpp->sense_addr = V2DEV(SCpnt->sense_buffer);
+ cpp->sense_len = sizeof SCpnt->sense_buffer;
+
+ if (SCpnt->device->tagged_queue) {
+
+ if (HD(j)->target_redo[SCpnt->target][SCpnt->channel] ||
+ HD(j)->target_to[SCpnt->target][SCpnt->channel])
+ cpp->mess[0] = ORDERED_QUEUE_TAG;
+ else if (tag_mode == TAG_SIMPLE) cpp->mess[0] = SIMPLE_QUEUE_TAG;
+ else if (tag_mode == TAG_HEAD) cpp->mess[0] = HEAD_OF_QUEUE_TAG;
+ else if (tag_mode == TAG_ORDERED) cpp->mess[0] = ORDERED_QUEUE_TAG;
+ else if (SCpnt->device->current_tag == 0)
+ cpp->mess[0] = ORDERED_QUEUE_TAG;
+ else if (SCpnt->device->current_tag == 1)
+ cpp->mess[0] = HEAD_OF_QUEUE_TAG;
+ else
+ cpp->mess[0] = SIMPLE_QUEUE_TAG;
+
+ cpp->mess[1] = SCpnt->device->current_tag++;
+ }
+
+ if (SCpnt->use_sg) {
+ cpp->sg = TRUE;
+ build_sg_list(cpp, SCpnt);
+ }
+ else {
+ cpp->data_address = V2DEV(SCpnt->request_buffer);
+ cpp->data_len = H2DEV(SCpnt->request_bufflen);
+ }
+
+ memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type)) {
+ HD(j)->cp_stat[i] = READY;
+ flush_dev(SCpnt->device, SCpnt->request.sector, j, FALSE);
+ return 0;
+ }
+
+ /* Send control packet to the board */
+ if (do_dma(sh[j]->io_port, (unsigned int) cpp, SEND_CP_DMA)) {
+ SCpnt->host_scribble = NULL;
+ printk("%s: qcomm, target %d.%d:%d, pid %ld, adapter busy.\n",
+ BN(j), SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid);
+ return 1;
+ }
+
+ HD(j)->cp_stat[i] = IN_USE;
+ return 0;
+}
+
+int eata2x_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_qcomm(SCpnt, done);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+static inline int do_old_abort(Scsi_Cmnd *SCarg) {
+ unsigned int i, j;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL ||
+ (SCarg->serial_number_at_timeout &&
+ (SCarg->serial_number != SCarg->serial_number_at_timeout))) {
+ printk("%s: abort, target %d.%d:%d, pid %ld inactive.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ printk("%s: abort, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ if (inb(sh[j]->io_port + REG_AUX_STATUS) & IRQ_ASSERTED)
+ printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
+
+ return SCSI_ABORT_SNOOZE;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ SCarg->result = DID_ABORT << 16;
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n",
+ BN(j), i, SCarg->pid);
+ SCarg->scsi_done(SCarg);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+int eata2x_old_abort(Scsi_Cmnd *SCarg) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_old_abort(SCarg);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+static inline int do_abort(Scsi_Cmnd *SCarg) {
+ unsigned int i, j;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL) {
+ printk("%s: abort, target %d.%d:%d, pid %ld inactive.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+ return SUCCESS;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ printk("%s: abort, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ return SUCCESS;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ if (inb(sh[j]->io_port + REG_AUX_STATUS) & IRQ_ASSERTED)
+ printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
+
+ if (SCarg->eh_state == SCSI_STATE_TIMEOUT) {
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d, eh_state timeout, pid %ld.\n",
+ BN(j), i, SCarg->pid);
+ return SUCCESS;
+ }
+
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ return SUCCESS;
+ }
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ SCarg->result = DID_ABORT << 16;
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n",
+ BN(j), i, SCarg->pid);
+ SCarg->scsi_done(SCarg);
+ return SUCCESS;
+ }
+
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+int eata2x_abort(Scsi_Cmnd *SCarg) {
+
+ return do_abort(SCarg);
+}
+
+#endif /* new_eh_code */
+
+static inline int do_old_reset(Scsi_Cmnd *SCarg) {
+ unsigned int i, j, time, k, c, limit = 0;
+ int arg_done = FALSE;
+ Scsi_Cmnd *SCpnt;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+ printk("%s: reset, enter, target %d.%d:%d, pid %ld.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->pid);
+
+ if (SCarg->serial_number_at_timeout &&
+ (SCarg->serial_number != SCarg->serial_number_at_timeout)) {
+ printk("%s: reset, pid %ld, reset not running.\n", BN(j), SCarg->pid);
+ return SCSI_RESET_NOT_RUNNING;
+ }
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ HD(j)->retries = 0;
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++) {
+ HD(j)->target_redo[k][c] = TRUE;
+ HD(j)->target_to[k][c] = 0;
+ }
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ if (!(SCpnt = HD(j)->cp[i].SCpnt))
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ HD(j)->cp_stat[i] = ABORTING;
+ printk("%s: reset, mbox %d aborting, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else {
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (do_dma(sh[j]->io_port, 0, RESET_PIO)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined(DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+ SPIN_UNLOCK
+ IRQ_UNLOCK
+ time = jiffies;
+ while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
+ IRQ_LOCK
+ SPIN_LOCK
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else if (HD(j)->cp_stat[i] == ABORTING) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox was never queued to the adapter */
+ HD(j)->cp_stat[i] = FREE;
+
+ printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else
+
+ /* Any other mailbox has already been set free by interrupt */
+ continue;
+
+ SCpnt->scsi_done(SCpnt);
+ IRQ_LOCK
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+
+ if (arg_done) {
+ printk("%s: reset, exit, success.\n", BN(j));
+ return SCSI_RESET_SUCCESS;
+ }
+ else {
+ printk("%s: reset, exit, wakeup.\n", BN(j));
+ return SCSI_RESET_PUNT;
+ }
+}
+
+int eata2x_old_reset(Scsi_Cmnd *SCarg, unsigned int reset_flags) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_old_reset(SCarg);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+static inline int do_reset(Scsi_Cmnd *SCarg) {
+ unsigned int i, j, time, k, c, limit = 0;
+ int arg_done = FALSE;
+ Scsi_Cmnd *SCpnt;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+ printk("%s: reset, enter, target %d.%d:%d, pid %ld.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->pid);
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ return FAILED;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ HD(j)->retries = 0;
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++) {
+ HD(j)->target_redo[k][c] = TRUE;
+ HD(j)->target_to[k][c] = 0;
+ }
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ if (!(SCpnt = HD(j)->cp[i].SCpnt))
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ HD(j)->cp_stat[i] = ABORTING;
+ printk("%s: reset, mbox %d aborting, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else {
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (do_dma(sh[j]->io_port, 0, RESET_PIO)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined(DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+ SPIN_UNLOCK
+ IRQ_UNLOCK
+ time = jiffies;
+ while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
+ IRQ_LOCK
+ SPIN_LOCK
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else if (HD(j)->cp_stat[i] == ABORTING) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox was never queued to the adapter */
+ HD(j)->cp_stat[i] = FREE;
+
+ printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else
+
+ /* Any other mailbox has already been set free by interrupt */
+ continue;
+
+ SCpnt->scsi_done(SCpnt);
+ IRQ_LOCK
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+
+ if (arg_done) printk("%s: reset, exit, pid %ld done.\n", BN(j), SCarg->pid);
+ else printk("%s: reset, exit.\n", BN(j));
+
+ return SUCCESS;
+}
+
+int eata2x_reset(Scsi_Cmnd *SCarg) {
+
+ return do_reset(SCarg);
+}
+
+#endif /* new_eh_code */
+
+int eata2x_biosparam(Disk *disk, kdev_t dev, int *dkinfo) {
+ int size = disk->capacity;
+
+ if (ext_tran || (scsicam_bios_param(disk, dev, dkinfo) < 0)) {
+ dkinfo[0] = 255;
+ dkinfo[1] = 63;
+ dkinfo[2] = size / (dkinfo[0] * dkinfo[1]);
+ }
+
+#if defined (DEBUG_GEOMETRY)
+ printk ("%s: biosparam, head=%d, sec=%d, cyl=%d.\n", driver_name,
+ dkinfo[0], dkinfo[1], dkinfo[2]);
+#endif
+
+ return FALSE;
+}
+
+static void sort(unsigned long sk[], unsigned int da[], unsigned int n,
+ unsigned int rev) {
+ unsigned int i, j, k, y;
+ unsigned long x;
+
+ for (i = 0; i < n - 1; i++) {
+ k = i;
+
+ for (j = k + 1; j < n; j++)
+ if (rev) {
+ if (sk[j] > sk[k]) k = j;
+ }
+ else {
+ if (sk[j] < sk[k]) k = j;
+ }
+
+ if (k != i) {
+ x = sk[k]; sk[k] = sk[i]; sk[i] = x;
+ y = da[k]; da[k] = da[i]; da[i] = y;
+ }
+ }
+
+ return;
+ }
+
+static inline int reorder(unsigned int j, unsigned long cursec,
+ unsigned int ihdlr, unsigned int il[], unsigned int n_ready) {
+ Scsi_Cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n;
+ unsigned int rev = FALSE, s = TRUE, r = TRUE;
+ unsigned int input_only = TRUE, overlap = FALSE;
+ unsigned long sl[n_ready], pl[n_ready], ll[n_ready];
+ unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0;
+ unsigned long ioseek = 0;
+
+ static unsigned int flushcount = 0, batchcount = 0, sortcount = 0;
+ static unsigned int readycount = 0, ovlcount = 0, inputcount = 0;
+ static unsigned int readysorted = 0, revcount = 0;
+ static unsigned long seeksorted = 0, seeknosort = 0;
+
+ if (link_statistics && !(++flushcount % link_statistics))
+ printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"\
+ " av %ldK as %ldK.\n", flushcount, batchcount, inputcount,
+ ovlcount, readycount, readysorted, sortcount, revcount,
+ seeknosort / (readycount + 1),
+ seeksorted / (readycount + 1));
+
+ if (n_ready <= 1) return FALSE;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (!cpp->din) input_only = FALSE;
+
+ if (SCpnt->request.sector < minsec) minsec = SCpnt->request.sector;
+ if (SCpnt->request.sector > maxsec) maxsec = SCpnt->request.sector;
+
+ sl[n] = SCpnt->request.sector;
+ ioseek += SCpnt->request.nr_sectors;
+
+ if (!n) continue;
+
+ if (sl[n] < sl[n - 1]) s = FALSE;
+ if (sl[n] > sl[n - 1]) r = FALSE;
+
+ if (link_statistics) {
+ if (sl[n] > sl[n - 1])
+ seek += sl[n] - sl[n - 1];
+ else
+ seek += sl[n - 1] - sl[n];
+ }
+
+ }
+
+ if (link_statistics) {
+ if (cursec > sl[0]) seek += cursec - sl[0]; else seek += sl[0] - cursec;
+ }
+
+ if (cursec > ((maxsec + minsec) / 2)) rev = TRUE;
+
+ if (ioseek > ((maxsec - minsec) / 2)) rev = FALSE;
+
+ if (!((rev && r) || (!rev && s))) sort(sl, il, n_ready, rev);
+
+ if (!input_only) for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+ ll[n] = SCpnt->request.nr_sectors; pl[n] = SCpnt->pid;
+
+ if (!n) continue;
+
+ if ((sl[n] == sl[n - 1]) || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n]))
+ || (rev && ((sl[n] + ll[n]) > sl[n - 1]))) overlap = TRUE;
+ }
+
+ if (overlap) sort(pl, il, n_ready, FALSE);
+
+ if (link_statistics) {
+ if (cursec > sl[0]) iseek = cursec - sl[0]; else iseek = sl[0] - cursec;
+ batchcount++; readycount += n_ready, seeknosort += seek / 1024;
+ if (input_only) inputcount++;
+ if (overlap) { ovlcount++; seeksorted += iseek / 1024; }
+ else seeksorted += (iseek + maxsec - minsec) / 1024;
+ if (rev && !r) { revcount++; readysorted += n_ready; }
+ if (!rev && !s) { sortcount++; readysorted += n_ready; }
+ }
+
+#if defined(DEBUG_LINKED_COMMANDS)
+ if (link_statistics && (overlap || !(flushcount % link_statistics)))
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+ printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\
+ " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
+ (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
+ SCpnt->lun, SCpnt->pid, k, flushcount, n_ready,
+ SCpnt->request.sector, SCpnt->request.nr_sectors, cursec,
+ YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
+ YESNO(overlap), cpp->din);
+ }
+#endif
+ return overlap;
+}
+
+static void flush_dev(Scsi_Device *dev, unsigned long cursec, unsigned int j,
+ unsigned int ihdlr) {
+ Scsi_Cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES];
+
+ for (k = 0; k < sh[j]->can_queue; k++) {
+
+ if (HD(j)->cp_stat[k] != READY && HD(j)->cp_stat[k] != IN_USE) continue;
+
+ cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (SCpnt->device != dev) continue;
+
+ if (HD(j)->cp_stat[k] == IN_USE) return;
+
+ il[n_ready++] = k;
+ }
+
+ if (reorder(j, cursec, ihdlr, il, n_ready)) n_ready = 1;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (do_dma(sh[j]->io_port, (unsigned int) cpp, SEND_CP_DMA)) {
+ printk("%s: %s, target %d.%d:%d, pid %ld, mbox %d, adapter"\
+ " busy, will abort.\n", BN(j), (ihdlr ? "ihdlr" : "qcomm"),
+ SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid, k);
+ HD(j)->cp_stat[k] = ABORTING;
+ continue;
+ }
+
+ HD(j)->cp_stat[k] = IN_USE;
+ }
+
+}
+
+static inline void ihdlr(int irq, unsigned int j) {
+ Scsi_Cmnd *SCpnt;
+ unsigned int i, k, c, status, tstatus, reg;
+ struct mssp *dspp, *spp;
+ struct mscp *cpp;
+
+ if (sh[j]->irq != irq)
+ panic("%s: ihdlr, irq %d, sh[j]->irq %d.\n", BN(j), irq, sh[j]->irq);
+
+ /* Check if this board need to be serviced */
+ if (!(inb(sh[j]->io_port + REG_AUX_STATUS) & IRQ_ASSERTED)) return;
+
+ HD(j)->iocount++;
+
+ if (do_trace) printk("%s: ihdlr, enter, irq %d, count %d.\n", BN(j), irq,
+ HD(j)->iocount);
+
+ /* Check if this board is still busy */
+ if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) {
+ reg = inb(sh[j]->io_port + REG_STATUS);
+ printk("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n",
+ BN(j), irq, reg, HD(j)->iocount);
+ return;
+ }
+
+ dspp = &HD(j)->sp[0];
+ spp = &HD(j)->sp[1];
+
+ /* Make a local copy just before clearing the interrupt indication */
+ memcpy(spp, dspp, sizeof(struct mssp));
+
+ /* Clear the completion flag and cp pointer on the dynamic copy of sp */
+ memset(dspp, 0, sizeof(struct mssp));
+
+ /* Read the status register to clear the interrupt indication */
+ reg = inb(sh[j]->io_port + REG_STATUS);
+
+ /* Reject any sp with supspect data */
+ if (spp->eoc == FALSE)
+ printk("%s: ihdlr, spp->eoc == FALSE, irq %d, reg 0x%x, count %d.\n",
+ BN(j), irq, reg, HD(j)->iocount);
+ if (spp->cpp == NULL)
+ printk("%s: ihdlr, spp->cpp == NULL, irq %d, reg 0x%x, count %d.\n",
+ BN(j), irq, reg, HD(j)->iocount);
+ if (spp->eoc == FALSE || spp->cpp == NULL) return;
+
+ cpp = spp->cpp;
+
+#if defined(DEBUG_GENERATE_ABORTS)
+ if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 500) < 3)) return;
+#endif
+
+ /* Find the mailbox to be serviced on this board */
+ i = cpp - HD(j)->cp;
+
+ if (cpp < HD(j)->cp || cpp >= HD(j)->cp + sh[j]->can_queue
+ || i >= sh[j]->can_queue)
+ panic("%s: ihdlr, invalid mscp bus address %p, cp0 %p.\n", BN(j),
+ cpp, HD(j)->cp);
+
+ if (HD(j)->cp_stat[i] == IGNORE) {
+ HD(j)->cp_stat[i] = FREE;
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: ihdlr, mbox %d unlocked, count %d.\n", BN(j), i,
+ HD(j)->iocount);
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: ihdlr, mbox %d is free, count %d.\n", BN(j), i,
+ HD(j)->iocount);
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == IN_RESET)
+ printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i);
+ else if (HD(j)->cp_stat[i] != IN_USE)
+ panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n",
+ BN(j), i, HD(j)->cp_stat[i]);
+
+ HD(j)->cp_stat[i] = FREE;
+ SCpnt = cpp->SCpnt;
+
+ if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", BN(j), i,
+ SCpnt->pid, SCpnt);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n",
+ BN(j), i, SCpnt->pid, *(unsigned int *)SCpnt->host_scribble);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type))
+ flush_dev(SCpnt->device, SCpnt->request.sector, j, TRUE);
+
+ tstatus = status_byte(spp->target_status);
+
+#if defined(DEBUG_GENERATE_ERRORS)
+ if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 200) < 2))
+ spp->adapter_status = 0x01;
+#endif
+
+ switch (spp->adapter_status) {
+ case ASOK: /* status OK */
+
+ /* Forces a reset if a disk drive keeps returning BUSY */
+ if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
+ status = DID_ERROR << 16;
+
+ /* If there was a bus reset, redo operation on each target */
+ else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK
+ && HD(j)->target_redo[SCpnt->target][SCpnt->channel])
+ status = DID_BUS_BUSY << 16;
+
+ /* Works around a flaw in scsi.c */
+ else if (tstatus == CHECK_CONDITION
+ && SCpnt->device->type == TYPE_DISK
+ && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
+ status = DID_BUS_BUSY << 16;
+
+ else
+ status = DID_OK << 16;
+
+ if (tstatus == GOOD)
+ HD(j)->target_redo[SCpnt->target][SCpnt->channel] = FALSE;
+
+ if (spp->target_status && SCpnt->device->type == TYPE_DISK)
+ printk("%s: ihdlr, target %d.%d:%d, pid %ld, "\
+ "target_status 0x%x, sense key 0x%x.\n", BN(j),
+ SCpnt->channel, SCpnt->target, SCpnt->lun,
+ SCpnt->pid, spp->target_status,
+ SCpnt->sense_buffer[2]);
+
+ HD(j)->target_to[SCpnt->target][SCpnt->channel] = 0;
+
+ if (HD(j)->last_retried_pid == SCpnt->pid) HD(j)->retries = 0;
+
+ break;
+ case ASST: /* Selection Time Out */
+ case 0x02: /* Command Time Out */
+
+ if (HD(j)->target_to[SCpnt->target][SCpnt->channel] > 1)
+ status = DID_ERROR << 16;
+ else {
+ status = DID_TIME_OUT << 16;
+ HD(j)->target_to[SCpnt->target][SCpnt->channel]++;
+ }
+
+ break;
+
+ /* Perform a limited number of internal retries */
+ case 0x03: /* SCSI Bus Reset Received */
+ case 0x04: /* Initial Controller Power-up */
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++)
+ HD(j)->target_redo[k][c] = TRUE;
+
+ if (SCpnt->device->type != TYPE_TAPE
+ && HD(j)->retries < MAX_INTERNAL_RETRIES) {
+
+#if defined(DID_SOFT_ERROR)
+ status = DID_SOFT_ERROR << 16;
+#else
+ status = DID_BUS_BUSY << 16;
+#endif
+ HD(j)->retries++;
+ HD(j)->last_retried_pid = SCpnt->pid;
+ }
+ else
+ status = DID_ERROR << 16;
+
+ break;
+ case 0x05: /* Unexpected Bus Phase */
+ case 0x06: /* Unexpected Bus Free */
+ case 0x07: /* Bus Parity Error */
+ case 0x08: /* SCSI Hung */
+ case 0x09: /* Unexpected Message Reject */
+ case 0x0a: /* SCSI Bus Reset Stuck */
+ case 0x0b: /* Auto Request-Sense Failed */
+ case 0x0c: /* Controller Ram Parity Error */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ }
+
+ SCpnt->result = status | spp->target_status;
+
+#if defined(DEBUG_INTERRUPT)
+ if (SCpnt->result || do_trace)
+#else
+ if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) ||
+ (spp->adapter_status != ASOK &&
+ spp->adapter_status != ASST && HD(j)->iocount <= 1000) ||
+ do_trace || msg_byte(spp->target_status))
+#endif
+ printk("%s: ihdlr, mbox %2d, err 0x%x:%x,"\
+ " target %d.%d:%d, pid %ld, reg 0x%x, count %d.\n",
+ BN(j), i, spp->adapter_status, spp->target_status,
+ SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid,
+ reg, HD(j)->iocount);
+
+ /* Set the command state to inactive */
+ SCpnt->host_scribble = NULL;
+
+ SCpnt->scsi_done(SCpnt);
+
+ if (do_trace) printk("%s: ihdlr, exit, irq %d, count %d.\n", BN(j), irq,
+ HD(j)->iocount);
+
+ return;
+}
+
+static void do_interrupt_handler(int irq, void *shap, struct pt_regs *regs) {
+ unsigned int j;
+ IRQ_FLAGS
+ SPIN_FLAGS
+
+ /* Check if the interrupt must be processed by this handler */
+ if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return;
+
+ SPIN_LOCK_SAVE
+ IRQ_LOCK_SAVE
+ ihdlr(irq, j);
+ IRQ_UNLOCK_RESTORE
+ SPIN_UNLOCK_RESTORE
+}
+
+int eata2x_release(struct Scsi_Host *shpnt) {
+ unsigned int i, j;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+
+ for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++);
+
+ if (sh[j] == NULL) panic("%s: release, invalid Scsi_Host pointer.\n",
+ driver_name);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ if ((&HD(j)->cp[i])->sglist) kfree((&HD(j)->cp[i])->sglist);
+
+ free_irq(sh[j]->irq, &sha[j]);
+
+ if (sh[j]->dma_channel != NO_DMA) free_dma(sh[j]->dma_channel);
+
+ release_region(sh[j]->io_port, sh[j]->n_io_port);
+ scsi_unregister(sh[j]);
+ IRQ_UNLOCK_RESTORE
+ return FALSE;
+}
+
+#if defined(MODULE)
+Scsi_Host_Template driver_template = EATA;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/eata.h b/linux/src/drivers/scsi/eata.h
new file mode 100644
index 0000000..f1641f4
--- /dev/null
+++ b/linux/src/drivers/scsi/eata.h
@@ -0,0 +1,60 @@
+/*
+ * eata.h - used by the low-level driver for EATA/DMA SCSI host adapters.
+ */
+#ifndef _EATA_H
+#define _EATA_H
+
+#include <scsi/scsicam.h>
+#include <linux/version.h>
+
+int eata2x_detect(Scsi_Host_Template *);
+int eata2x_release(struct Scsi_Host *);
+int eata2x_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int eata2x_abort(Scsi_Cmnd *);
+int eata2x_old_abort(Scsi_Cmnd *);
+int eata2x_reset(Scsi_Cmnd *);
+int eata2x_old_reset(Scsi_Cmnd *, unsigned int);
+int eata2x_biosparam(Disk *, kdev_t, int *);
+
+#define EATA_VERSION "4.33.00"
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+#define EATA { \
+ name: "EATA/DMA 2.0x rev. " EATA_VERSION " ", \
+ detect: eata2x_detect, \
+ release: eata2x_release, \
+ queuecommand: eata2x_queuecommand, \
+ abort: eata2x_old_abort, \
+ reset: eata2x_old_reset, \
+ eh_abort_handler: eata2x_abort, \
+ eh_device_reset_handler: NULL, \
+ eh_bus_reset_handler: NULL, \
+ eh_host_reset_handler: eata2x_reset, \
+ bios_param: eata2x_biosparam, \
+ this_id: 7, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 1 /* Enable new error code */ \
+ }
+
+#else /* Use old scsi code */
+
+#define EATA { \
+ name: "EATA/DMA 2.0x rev. " EATA_VERSION " ", \
+ detect: eata2x_detect, \
+ release: eata2x_release, \
+ queuecommand: eata2x_queuecommand, \
+ abort: eata2x_old_abort, \
+ reset: eata2x_old_reset, \
+ bios_param: eata2x_biosparam, \
+ this_id: 7, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING \
+ }
+
+#endif
+
+#endif
diff --git a/linux/src/drivers/scsi/eata_dma.c b/linux/src/drivers/scsi/eata_dma.c
new file mode 100644
index 0000000..c019813
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_dma.c
@@ -0,0 +1,1603 @@
+/************************************************************
+ * *
+ * Linux EATA SCSI driver *
+ * *
+ * based on the CAM document CAM/89-004 rev. 2.0c, *
+ * DPT's driver kit, some internal documents and source, *
+ * and several other Linux scsi drivers and kernel docs. *
+ * *
+ * The driver currently: *
+ * -supports all ISA based EATA-DMA boards *
+ * like PM2011, PM2021, PM2041, PM3021 *
+ * -supports all EISA based EATA-DMA boards *
+ * like PM2012B, PM2022, PM2122, PM2322, PM2042, *
+ * PM3122, PM3222, PM3332 *
+ * -supports all PCI based EATA-DMA boards *
+ * like PM2024, PM2124, PM2044, PM2144, PM3224, *
+ * PM3334 *
+ * -supports the Wide, Ultra Wide and Differential *
+ * versions of the boards *
+ * -supports multiple HBAs with & without IRQ sharing *
+ * -supports all SCSI channels on multi channel boards *
+ * -supports ix86 and MIPS, untested on ALPHA *
+ * -needs identical IDs on all channels of a HBA *
+ * -can be loaded as module *
+ * -displays statistical and hardware information *
+ * in /proc/scsi/eata_dma *
+ * -provides rudimentary latency measurement *
+ * possibilities via /proc/scsi/eata_dma/<hostnum> *
+ * *
+ * (c)1993-96 Michael Neuffer *
+ * mike@i-Connect.Net *
+ * neuffer@mail.uni-mainz.de *
+ * *
+ * This program is free software; you can redistribute it *
+ * and/or modify it under the terms of the GNU General *
+ * Public License as published by the Free Software *
+ * Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the *
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A *
+ * PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. *
+ * *
+ * You should have received a copy of the GNU General *
+ * Public License along with this kernel; if not, write to *
+ * the Free Software Foundation, Inc., 675 Mass Ave, *
+ * Cambridge, MA 02139, USA. *
+ * *
+ * I have to thank DPT for their excellent support. I took *
+ * me almost a year and a stopover at their HQ, on my first *
+ * trip to the USA, to get it, but since then they've been *
+ * very helpful and tried to give me all the infos and *
+ * support I need. *
+ * *
+ * Thanks also to Simon Shapiro, Greg Hosler and Mike *
+ * Jagdis who did a lot of testing and found quite a number *
+ * of bugs during the development. *
+ ************************************************************
+ * last change: 96/10/21 OS: Linux 2.0.23 *
+ ************************************************************/
+
+/* Look in eata_dma.h for configuration and revision information */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/in.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/types.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#ifdef __mips__
+#include <asm/cachectl.h>
+#endif
+#include <linux/blk.h>
+#include "scsi.h"
+#include "sd.h"
+#include "hosts.h"
+#include "eata_dma.h"
+#include "eata_dma_proc.h"
+
+#include <linux/stat.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_eata_dma = {
+ PROC_SCSI_EATA, 8, "eata_dma",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+static u32 ISAbases[] =
+{0x1F0, 0x170, 0x330, 0x230};
+static unchar EISAbases[] =
+{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static uint registered_HBAs = 0;
+static struct Scsi_Host *last_HBA = NULL;
+static struct Scsi_Host *first_HBA = NULL;
+static unchar reg_IRQ[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static unchar reg_IRQL[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static struct eata_sp *status = 0; /* Statuspacket array */
+static void *dma_scratch = 0;
+
+static struct eata_register *fake_int_base;
+static int fake_int_result;
+static int fake_int_happened;
+
+static ulong int_counter = 0;
+static ulong queue_counter = 0;
+
+void eata_scsi_done (Scsi_Cmnd * scmd)
+{
+ scmd->request.rq_status = RQ_SCSI_DONE;
+
+ if (scmd->request.sem != NULL)
+ up(scmd->request.sem);
+
+ return;
+}
+
+void eata_fake_int_handler(s32 irq, void *dev_id, struct pt_regs * regs)
+{
+ fake_int_result = inb((ulong)fake_int_base + HA_RSTATUS);
+ fake_int_happened = TRUE;
+ DBG(DBG_INTR3, printk("eata_fake_int_handler called irq%d base %p"
+ " res %#x\n", irq, fake_int_base, fake_int_result));
+ return;
+}
+
+#include "eata_dma_proc.c"
+
+#ifdef MODULE
+int eata_release(struct Scsi_Host *sh)
+{
+ uint i;
+ if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq, NULL);
+ else reg_IRQ[sh->irq]--;
+
+ scsi_init_free((void *)status, 512);
+ scsi_init_free((void *)dma_scratch - 4, 1024);
+ for (i = 0; i < sh->can_queue; i++){ /* Free all SG arrays */
+ if(SD(sh)->ccb[i].sg_list != NULL)
+ scsi_init_free((void *) SD(sh)->ccb[i].sg_list,
+ sh->sg_tablesize * sizeof(struct eata_sg_list));
+ }
+
+ if (SD(sh)->channel == 0) {
+ if (sh->dma_channel != BUSMASTER) free_dma(sh->dma_channel);
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ }
+ return(TRUE);
+}
+#endif
+
+
+inline void eata_latency_in(struct eata_ccb *cp, hostdata *hd)
+{
+ uint time;
+ time = jiffies - cp->timestamp;
+ if(hd->all_lat[1] > time)
+ hd->all_lat[1] = time;
+ if(hd->all_lat[2] < time)
+ hd->all_lat[2] = time;
+ hd->all_lat[3] += time;
+ hd->all_lat[0]++;
+ if((cp->rw_latency) == WRITE) { /* was WRITE */
+ if(hd->writes_lat[cp->sizeindex][1] > time)
+ hd->writes_lat[cp->sizeindex][1] = time;
+ if(hd->writes_lat[cp->sizeindex][2] < time)
+ hd->writes_lat[cp->sizeindex][2] = time;
+ hd->writes_lat[cp->sizeindex][3] += time;
+ hd->writes_lat[cp->sizeindex][0]++;
+ } else if((cp->rw_latency) == READ) {
+ if(hd->reads_lat[cp->sizeindex][1] > time)
+ hd->reads_lat[cp->sizeindex][1] = time;
+ if(hd->reads_lat[cp->sizeindex][2] < time)
+ hd->reads_lat[cp->sizeindex][2] = time;
+ hd->reads_lat[cp->sizeindex][3] += time;
+ hd->reads_lat[cp->sizeindex][0]++;
+ }
+}
+
+inline void eata_latency_out(struct eata_ccb *cp, Scsi_Cmnd *cmd)
+{
+ int x, z;
+ short *sho;
+ long *lon;
+ x = 0; /* just to keep GCC quiet */
+ cp->timestamp = jiffies; /* For latency measurements */
+ switch(cmd->cmnd[0]) {
+ case WRITE_6:
+ x = cmd->cmnd[4]/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_6:
+ x = cmd->cmnd[4]/2;
+ cp->rw_latency = READ;
+ break;
+ case WRITE_10:
+ sho = (short *) &cmd->cmnd[7];
+ x = ntohs(*sho)/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_10:
+ sho = (short *) &cmd->cmnd[7];
+ x = ntohs(*sho)/2;
+ cp->rw_latency = READ;
+ break;
+ case WRITE_12:
+ lon = (long *) &cmd->cmnd[6];
+ x = ntohl(*lon)/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_12:
+ lon = (long *) &cmd->cmnd[6];
+ x = ntohl(*lon)/2;
+ cp->rw_latency = READ;
+ break;
+ default:
+ cp->rw_latency = OTHER;
+ break;
+ }
+ if (cmd->cmnd[0] == WRITE_6 || cmd->cmnd[0] == WRITE_10 ||
+ cmd->cmnd[0] == WRITE_12 || cmd->cmnd[0] == READ_6 ||
+ cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == READ_12) {
+ for(z = 0; (x > (1 << z)) && (z <= 11); z++)
+ /* nothing */;
+ cp->sizeindex = z;
+ }
+}
+
+
+void eata_int_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+ uint i, result = 0;
+ uint hba_stat, scsi_stat, eata_stat;
+ Scsi_Cmnd *cmd;
+ struct eata_ccb *ccb;
+ struct eata_sp *sp;
+ uint base;
+ uint x;
+ struct Scsi_Host *sh;
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if (sh->irq != irq)
+ continue;
+
+ while(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+
+ int_counter++;
+
+ sp = &SD(sh)->sp;
+#ifdef __mips__
+ sys_cacheflush(sp, sizeof(struct eata_sp), 2);
+#endif
+ ccb = sp->ccb;
+
+ if(ccb == NULL) {
+ eata_stat = inb((uint)sh->base + HA_RSTATUS);
+ printk("eata_dma: int_handler, Spurious IRQ %d "
+ "received. CCB pointer not set.\n", irq);
+ break;
+ }
+
+ cmd = ccb->cmd;
+ base = (uint) cmd->host->base;
+ hba_stat = sp->hba_stat;
+
+ scsi_stat = (sp->scsi_stat >> 1) & 0x1f;
+
+ if (sp->EOC == FALSE) {
+ eata_stat = inb(base + HA_RSTATUS);
+ printk(KERN_WARNING "eata_dma: int_handler, board: %x cmd %lx "
+ "returned unfinished.\n"
+ "EATA: %x HBA: %x SCSI: %x spadr %lx spadrirq %lx, "
+ "irq%d\n", base, (long)ccb, eata_stat, hba_stat,
+ scsi_stat,(long)&status, (long)&status[irq], irq);
+ cmd->result = DID_ERROR << 16;
+ ccb->status = FREE;
+ cmd->scsi_done(cmd);
+ break;
+ }
+
+ sp->EOC = FALSE; /* Clean out this flag */
+
+ if (ccb->status == LOCKED || ccb->status == RESET) {
+ printk("eata_dma: int_handler, reseted command pid %ld returned"
+ "\n", cmd->pid);
+ DBG(DBG_INTR && DBG_DELAY, DELAY(1));
+ }
+
+ eata_stat = inb(base + HA_RSTATUS);
+ DBG(DBG_INTR, printk("IRQ %d received, base %#.4x, pid %ld, "
+ "target: %x, lun: %x, ea_s: %#.2x, hba_s: "
+ "%#.2x \n", irq, base, cmd->pid, cmd->target,
+ cmd->lun, eata_stat, hba_stat));
+
+ switch (hba_stat) {
+ case HA_NO_ERROR: /* NO Error */
+ if(HD(cmd)->do_latency == TRUE && ccb->timestamp)
+ eata_latency_in(ccb, HD(cmd));
+ result = DID_OK << 16;
+ break;
+ case HA_ERR_SEL_TO: /* Selection Timeout */
+ case HA_ERR_CMD_TO: /* Command Timeout */
+ result = DID_TIME_OUT << 16;
+ break;
+ case HA_BUS_RESET: /* SCSI Bus Reset Received */
+ result = DID_RESET << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: BUS RESET "
+ "received on cmd %ld\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ break;
+ case HA_INIT_POWERUP: /* Initial Controller Power-up */
+ if (cmd->device->type != TYPE_TAPE)
+ result = DID_BUS_BUSY << 16;
+ else
+ result = DID_ERROR << 16;
+
+ for (i = 0; i < MAXTARGET; i++)
+ DBG(DBG_STATUS, printk(KERN_DEBUG "scsi%d: cmd pid %ld "
+ "returned with INIT_POWERUP\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ break;
+ case HA_CP_ABORT_NA:
+ case HA_CP_ABORTED:
+ result = DID_ABORT << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: aborted cmd "
+ "returned\n", HD(cmd)->HBA_number));
+ break;
+ case HA_CP_RESET_NA:
+ case HA_CP_RESET:
+ HD(cmd)->resetlevel[cmd->channel] = 0;
+ result = DID_RESET << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: reseted cmd "
+ "pid %ldreturned\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ case HA_SCSI_HUNG: /* SCSI Hung */
+ printk(KERN_ERR "scsi%d: SCSI hung\n", HD(cmd)->HBA_number);
+ result = DID_ERROR << 16;
+ break;
+ case HA_RSENSE_FAIL: /* Auto Request-Sense Failed */
+ DBG(DBG_STATUS, printk(KERN_ERR "scsi%d: Auto Request Sense "
+ "Failed\n", HD(cmd)->HBA_number));
+ result = DID_ERROR << 16;
+ break;
+ case HA_UNX_BUSPHASE: /* Unexpected Bus Phase */
+ case HA_UNX_BUS_FREE: /* Unexpected Bus Free */
+ case HA_BUS_PARITY: /* Bus Parity Error */
+ case HA_UNX_MSGRJCT: /* Unexpected Message Reject */
+ case HA_RESET_STUCK: /* SCSI Bus Reset Stuck */
+ case HA_PARITY_ERR: /* Controller Ram Parity */
+ default:
+ result = DID_ERROR << 16;
+ break;
+ }
+ cmd->result = result | (scsi_stat << 1);
+
+#if DBG_INTR2
+ if (scsi_stat || result || hba_stat || eata_stat != 0x50
+ || cmd->scsi_done == NULL || cmd->device->id == 7)
+ printk("HBA: %d, channel %d, id: %d, lun %d, pid %ld:\n"
+ "eata_stat %#x, hba_stat %#.2x, scsi_stat %#.2x, "
+ "sense_key: %#x, result: %#.8x\n", x,
+ cmd->device->channel, cmd->device->id, cmd->device->lun,
+ cmd->pid, eata_stat, hba_stat, scsi_stat,
+ cmd->sense_buffer[2] & 0xf, cmd->result);
+ DBG(DBG_INTR&&DBG_DELAY,DELAY(1));
+#endif
+
+ ccb->status = FREE; /* now we can release the slot */
+ cmd->scsi_done(cmd);
+ }
+ }
+
+ return;
+}
+
+inline int eata_send_command(u32 addr, u32 base, u8 command)
+{
+ long loop = R_LIMIT;
+
+ while (inb(base + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0)
+ return(FALSE);
+
+ if(addr != (u32) NULL)
+ addr = virt_to_bus((void *)addr);
+
+ /*
+ * This is overkill.....but the MIPSen seem to need this
+ * and it will be optimized away for i86 and ALPHA machines.
+ */
+ flush_cache_all();
+
+ /* And now the address in nice little byte chunks */
+#ifdef __LITTLE_ENDIAN
+ outb(addr, base + HA_WDMAADDR);
+ outb(addr >> 8, base + HA_WDMAADDR + 1);
+ outb(addr >> 16, base + HA_WDMAADDR + 2);
+ outb(addr >> 24, base + HA_WDMAADDR + 3);
+#else
+ outb(addr >> 24, base + HA_WDMAADDR);
+ outb(addr >> 16, base + HA_WDMAADDR + 1);
+ outb(addr >> 8, base + HA_WDMAADDR + 2);
+ outb(addr, base + HA_WDMAADDR + 3);
+#endif
+ outb(command, base + HA_WCOMMAND);
+ return(TRUE);
+}
+
+inline int eata_send_immediate(u32 base, u32 addr, u8 ifc, u8 code, u8 code2)
+{
+ if(addr != (u32) NULL)
+ addr = virt_to_bus((void *)addr);
+
+ /*
+ * This is overkill.....but the MIPSen seem to need this
+ * and it will be optimized away for i86 and ALPHA machines.
+ */
+ flush_cache_all();
+
+ outb(0x0, base + HA_WDMAADDR - 1);
+ if(addr){
+#ifdef __LITTLE_ENDIAN
+ outb(addr, base + HA_WDMAADDR);
+ outb(addr >> 8, base + HA_WDMAADDR + 1);
+ outb(addr >> 16, base + HA_WDMAADDR + 2);
+ outb(addr >> 24, base + HA_WDMAADDR + 3);
+#else
+ outb(addr >> 24, base + HA_WDMAADDR);
+ outb(addr >> 16, base + HA_WDMAADDR + 1);
+ outb(addr >> 8, base + HA_WDMAADDR + 2);
+ outb(addr, base + HA_WDMAADDR + 3);
+#endif
+ } else {
+ outb(0x0, base + HA_WDMAADDR);
+ outb(0x0, base + HA_WDMAADDR + 1);
+ outb(code2, base + HA_WCODE2);
+ outb(code, base + HA_WCODE);
+ }
+
+ outb(ifc, base + HA_WIFC);
+ outb(EATA_CMD_IMMEDIATE, base + HA_WCOMMAND);
+ return(TRUE);
+}
+
+int eata_queue(Scsi_Cmnd * cmd, void (* done) (Scsi_Cmnd *))
+{
+ unsigned int i, x, y;
+ ulong flags;
+ hostdata *hd;
+ struct Scsi_Host *sh;
+ struct eata_ccb *ccb;
+ struct scatterlist *sl;
+
+
+ save_flags(flags);
+ cli();
+
+#if 0
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_queue.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+#endif
+
+ queue_counter++;
+
+ hd = HD(cmd);
+ sh = cmd->host;
+
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->sense_buffer[0] != 0) {
+ DBG(DBG_REQSENSE, printk(KERN_DEBUG "Tried to REQUEST SENSE\n"));
+ cmd->result = DID_OK << 16;
+ done(cmd);
+
+ return(0);
+ }
+
+ /* check for free slot */
+ for (y = hd->last_ccb + 1, x = 0; x < sh->can_queue; x++, y++) {
+ if (y >= sh->can_queue)
+ y = 0;
+ if (hd->ccb[y].status == FREE)
+ break;
+ }
+
+ hd->last_ccb = y;
+
+ if (x >= sh->can_queue) {
+ cmd->result = DID_BUS_BUSY << 16;
+ DBG(DBG_QUEUE && DBG_ABNORM,
+ printk(KERN_CRIT "eata_queue pid %ld, HBA QUEUE FULL..., "
+ "returning DID_BUS_BUSY\n", cmd->pid));
+ done(cmd);
+ restore_flags(flags);
+ return(0);
+ }
+ ccb = &hd->ccb[y];
+
+ memset(ccb, 0, sizeof(struct eata_ccb) - sizeof(struct eata_sg_list *));
+
+ ccb->status = USED; /* claim free slot */
+
+ restore_flags(flags);
+
+ DBG(DBG_QUEUE, printk("eata_queue pid %ld, target: %x, lun: %x, y %d\n",
+ cmd->pid, cmd->target, cmd->lun, y));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ if(hd->do_latency == TRUE)
+ eata_latency_out(ccb, cmd);
+
+ cmd->scsi_done = (void *)done;
+
+ switch (cmd->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
+ case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12:
+ case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW:
+ case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea: /* alternate number for WRITE LONG */
+ ccb->DataOut = TRUE; /* Output mode */
+ break;
+ case TEST_UNIT_READY:
+ default:
+ ccb->DataIn = TRUE; /* Input mode */
+ }
+
+ /* FIXME: This will have to be changed once the midlevel driver
+ * allows different HBA IDs on every channel.
+ */
+ if (cmd->target == sh->this_id)
+ ccb->Interpret = TRUE; /* Interpret command */
+
+ if (cmd->use_sg) {
+ ccb->scatter = TRUE; /* SG mode */
+ if (ccb->sg_list == NULL) {
+ ccb->sg_list = kmalloc(sh->sg_tablesize * sizeof(struct eata_sg_list),
+ GFP_ATOMIC | GFP_DMA);
+ }
+ if (ccb->sg_list == NULL)
+ panic("eata_dma: Run out of DMA memory for SG lists !\n");
+ ccb->cp_dataDMA = htonl(virt_to_bus(ccb->sg_list));
+
+ ccb->cp_datalen = htonl(cmd->use_sg * sizeof(struct eata_sg_list));
+ sl=(struct scatterlist *)cmd->request_buffer;
+ for(i = 0; i < cmd->use_sg; i++, sl++){
+ ccb->sg_list[i].data = htonl(virt_to_bus(sl->address));
+ ccb->sg_list[i].len = htonl((u32) sl->length);
+ }
+ } else {
+ ccb->scatter = FALSE;
+ ccb->cp_datalen = htonl(cmd->request_bufflen);
+ ccb->cp_dataDMA = htonl(virt_to_bus(cmd->request_buffer));
+ }
+
+ ccb->Auto_Req_Sen = TRUE;
+ ccb->cp_reqDMA = htonl(virt_to_bus(cmd->sense_buffer));
+ ccb->reqlen = sizeof(cmd->sense_buffer);
+
+ ccb->cp_id = cmd->target;
+ ccb->cp_channel = cmd->channel;
+ ccb->cp_lun = cmd->lun;
+ ccb->cp_dispri = TRUE;
+ ccb->cp_identify = TRUE;
+ memcpy(ccb->cp_cdb, cmd->cmnd, cmd->cmd_len);
+
+ ccb->cp_statDMA = htonl(virt_to_bus(&(hd->sp)));
+
+ ccb->cp_viraddr = ccb; /* This will be passed thru, so we don't need to
+ * convert it */
+ ccb->cmd = cmd;
+ cmd->host_scribble = (char *)&hd->ccb[y];
+
+ if(eata_send_command((u32) ccb, (u32) sh->base, EATA_CMD_DMA_SEND_CP) == FALSE) {
+ cmd->result = DID_BUS_BUSY << 16;
+ DBG(DBG_QUEUE && DBG_ABNORM,
+ printk("eata_queue target %d, pid %ld, HBA busy, "
+ "returning DID_BUS_BUSY\n",cmd->target, cmd->pid));
+ ccb->status = FREE;
+ done(cmd);
+ return(0);
+ }
+ DBG(DBG_QUEUE, printk("Queued base %#.4x pid: %ld target: %x lun: %x "
+ "slot %d irq %d\n", (s32)sh->base, cmd->pid,
+ cmd->target, cmd->lun, y, sh->irq));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ return(0);
+}
+
+
+int eata_abort(Scsi_Cmnd * cmd)
+{
+ ulong loop = HZ / 2;
+ ulong flags;
+ int x;
+ struct Scsi_Host *sh;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_abort called pid: %ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ /* Some interrupt controllers seem to loose interrupts */
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_abort.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+
+ while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY) {
+ if (--loop == 0) {
+ printk("eata_dma: abort, timeout error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_ERROR);
+ }
+ }
+ if (CD(cmd)->status == RESET) {
+ printk("eata_dma: abort, command reset error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == LOCKED) {
+ DBG(DBG_ABNORM, printk("eata_dma: abort, queue slot locked.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ if (CD(cmd)->status == USED) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_BUSY\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_BUSY); /* SNOOZE */
+ }
+ if (CD(cmd)->status == FREE) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_NOT_RUNNING\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ restore_flags(flags);
+ panic("eata_dma: abort: invalid slot status\n");
+}
+
+int eata_reset(Scsi_Cmnd * cmd, unsigned int resetflags)
+{
+ uint x;
+ ulong loop = loops_per_sec / 3;
+ ulong flags;
+ unchar success = FALSE;
+ Scsi_Cmnd *sp;
+ struct Scsi_Host *sh;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_reset called pid:%ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_reset.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+
+ if (HD(cmd)->state == RESET) {
+ printk("eata_reset: exit, already in reset.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_ERROR);
+ }
+
+ while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0) {
+ printk("eata_reset: exit, timeout error.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_ERROR);
+ }
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+ if (HD(cmd)->ccb[x].status == FREE)
+ continue;
+
+ if (HD(cmd)->ccb[x].status == LOCKED) {
+ HD(cmd)->ccb[x].status = FREE;
+ printk("eata_reset: locked slot %d forced free.\n", x);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ continue;
+ }
+
+
+ sp = HD(cmd)->ccb[x].cmd;
+ HD(cmd)->ccb[x].status = RESET;
+
+ if (sp == NULL)
+ panic("eata_reset: slot %d, sp==NULL.\n", x);
+
+ printk("eata_reset: slot %d in reset, pid %ld.\n", x, sp->pid);
+
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ if (sp == cmd)
+ success = TRUE;
+ }
+
+ /* hard reset the HBA */
+ inb((u32) (cmd->host->base) + HA_RSTATUS); /* This might cause trouble */
+ eata_send_command(0, (u32) cmd->host->base, EATA_CMD_RESET);
+
+ HD(cmd)->state = RESET;
+
+ DBG(DBG_ABNORM, printk("eata_reset: board reset done, enabling "
+ "interrupts.\n"));
+
+ DELAY(2); /* In theorie we should get interrupts and set free all
+ * used queueslots */
+
+ DBG(DBG_ABNORM, printk("eata_reset: interrupts disabled again.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ /* Skip slots already set free by interrupt and those that
+ * are still LOCKED from the last reset */
+ if (HD(cmd)->ccb[x].status != RESET)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ sp->result = DID_RESET << 16;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(cmd)->ccb[x].status = LOCKED;
+
+ printk("eata_reset: slot %d locked, DID_RESET, pid %ld done.\n",
+ x, sp->pid);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ sp->scsi_done(sp);
+ }
+
+ HD(cmd)->state = FALSE;
+ restore_flags(flags);
+
+ if (success) {
+ DBG(DBG_ABNORM, printk("eata_reset: exit, pending.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_PENDING);
+ } else {
+ DBG(DBG_ABNORM, printk("eata_reset: exit, wakeup.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_PUNT);
+ }
+}
+
+/* Here we try to determine the optimum queue depth for
+ * each attached device.
+ *
+ * At the moment the algorithm is rather simple
+ */
+static void eata_select_queue_depths(struct Scsi_Host *host,
+ Scsi_Device *devicelist)
+{
+ Scsi_Device *device;
+ int devcount = 0;
+ int factor = 0;
+
+#if CRIPPLE_QUEUE
+ for(device = devicelist; device != NULL; device = device->next) {
+ if(device->host == host)
+ device->queue_depth = 2;
+ }
+#else
+ /* First we do a sample run go find out what we have */
+ for(device = devicelist; device != NULL; device = device->next) {
+ if (device->host == host) {
+ devcount++;
+ switch(device->type) {
+ case TYPE_DISK:
+ case TYPE_MOD:
+ factor += TYPE_DISK_QUEUE;
+ break;
+ case TYPE_TAPE:
+ factor += TYPE_TAPE_QUEUE;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ factor += TYPE_ROM_QUEUE;
+ break;
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ default:
+ factor += TYPE_OTHER_QUEUE;
+ break;
+ }
+ }
+ }
+
+ DBG(DBG_REGISTER, printk(KERN_DEBUG "scsi%d: needed queueslots %d\n",
+ host->host_no, factor));
+
+ if(factor == 0) /* We don't want to get a DIV BY ZERO error */
+ factor = 1;
+
+ factor = (SD(host)->queuesize * 10) / factor;
+
+ DBG(DBG_REGISTER, printk(KERN_DEBUG "scsi%d: using factor %dE-1\n",
+ host->host_no, factor));
+
+ /* Now that have the factor we can set the individual queuesizes */
+ for(device = devicelist; device != NULL; device = device->next) {
+ if(device->host == host) {
+ if(SD(device->host)->bustype != IS_ISA){
+ switch(device->type) {
+ case TYPE_DISK:
+ case TYPE_MOD:
+ device->queue_depth = (TYPE_DISK_QUEUE * factor) / 10;
+ break;
+ case TYPE_TAPE:
+ device->queue_depth = (TYPE_TAPE_QUEUE * factor) / 10;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ device->queue_depth = (TYPE_ROM_QUEUE * factor) / 10;
+ break;
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ default:
+ device->queue_depth = (TYPE_OTHER_QUEUE * factor) / 10;
+ break;
+ }
+ } else /* ISA forces us to limit the queue depth because of the
+ * bounce buffer memory overhead. I know this is cruel */
+ device->queue_depth = 2;
+
+ /*
+ * It showed that we need to set an upper limit of commands
+ * we can allow to queue for a single device on the bus.
+ * If we get above that limit, the broken midlevel SCSI code
+ * will produce bogus timeouts and aborts en masse. :-(
+ */
+ if(device->queue_depth > UPPER_DEVICE_QUEUE_LIMIT)
+ device->queue_depth = UPPER_DEVICE_QUEUE_LIMIT;
+ if(device->queue_depth == 0)
+ device->queue_depth = 1;
+
+ printk(KERN_INFO "scsi%d: queue depth for target %d on channel %d "
+ "set to %d\n", host->host_no, device->id, device->channel,
+ device->queue_depth);
+ }
+ }
+#endif
+}
+
+#if CHECK_BLINK
+int check_blink_state(long base)
+{
+ ushort loops = 10;
+ u32 blinkindicator;
+ u32 state = 0x12345678;
+ u32 oldstate = 0;
+
+ blinkindicator = htonl(0x54504442);
+ while ((loops--) && (state != oldstate)) {
+ oldstate = state;
+ state = inl((uint) base + 1);
+ }
+
+ DBG(DBG_BLINK, printk("Did Blink check. Status: %d\n",
+ (state == oldstate) && (state == blinkindicator)));
+
+ if ((state == oldstate) && (state == blinkindicator))
+ return(TRUE);
+ else
+ return (FALSE);
+}
+#endif
+
+char * get_board_data(u32 base, u32 irq, u32 id)
+{
+ struct eata_ccb *cp;
+ struct eata_sp *sp;
+ static char *buff;
+ ulong i;
+
+ cp = (struct eata_ccb *) scsi_init_malloc(sizeof(struct eata_ccb),
+ GFP_ATOMIC | GFP_DMA);
+ sp = (struct eata_sp *) scsi_init_malloc(sizeof(struct eata_sp),
+ GFP_ATOMIC | GFP_DMA);
+
+ buff = dma_scratch;
+
+ memset(cp, 0, sizeof(struct eata_ccb));
+ memset(sp, 0, sizeof(struct eata_sp));
+ memset(buff, 0, 256);
+
+ cp->DataIn = TRUE;
+ cp->Interpret = TRUE; /* Interpret command */
+ cp->cp_dispri = TRUE;
+ cp->cp_identify = TRUE;
+
+ cp->cp_datalen = htonl(56);
+ cp->cp_dataDMA = htonl(virt_to_bus(buff));
+ cp->cp_statDMA = htonl(virt_to_bus(sp));
+ cp->cp_viraddr = cp;
+
+ cp->cp_id = id;
+ cp->cp_lun = 0;
+
+ cp->cp_cdb[0] = INQUIRY;
+ cp->cp_cdb[1] = 0;
+ cp->cp_cdb[2] = 0;
+ cp->cp_cdb[3] = 0;
+ cp->cp_cdb[4] = 56;
+ cp->cp_cdb[5] = 0;
+
+ fake_int_base = (struct eata_register *) base;
+ fake_int_result = FALSE;
+ fake_int_happened = FALSE;
+
+ eata_send_command((u32) cp, (u32) base, EATA_CMD_DMA_SEND_CP);
+
+ i = jiffies + (3 * HZ);
+ while (fake_int_happened == FALSE && jiffies <= i)
+ barrier();
+
+ DBG(DBG_INTR3, printk(KERN_DEBUG "fake_int_result: %#x hbastat %#x "
+ "scsistat %#x, buff %p sp %p\n",
+ fake_int_result, (u32) (sp->hba_stat /*& 0x7f*/),
+ (u32) sp->scsi_stat, buff, sp));
+
+ scsi_init_free((void *)cp, sizeof(struct eata_ccb));
+ scsi_init_free((void *)sp, sizeof(struct eata_sp));
+
+ if ((fake_int_result & HA_SERROR) || jiffies > i){
+ printk(KERN_WARNING "eata_dma: trying to reset HBA at %x to clear "
+ "possible blink state\n", base);
+ /* hard reset the HBA */
+ inb((u32) (base) + HA_RSTATUS);
+ eata_send_command(0, base, EATA_CMD_RESET);
+ DELAY(1);
+ return (NULL);
+ } else
+ return (buff);
+}
+
+
+int get_conf_PIO(u32 base, struct get_conf *buf)
+{
+ ulong loop = R_LIMIT;
+ u16 *p;
+
+ if(check_region(base, 9))
+ return (FALSE);
+
+ memset(buf, 0, sizeof(struct get_conf));
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return (FALSE);
+
+ fake_int_base = (struct eata_register *) base;
+ fake_int_result = FALSE;
+ fake_int_happened = FALSE;
+
+ DBG(DBG_PIO && DBG_PROBE,
+ printk("Issuing PIO READ CONFIG to HBA at %#x\n", base));
+ eata_send_command(0, base, EATA_CMD_PIO_READ_CONFIG);
+
+ loop = R_LIMIT;
+ for (p = (u16 *) buf;
+ (long)p <= ((long)buf + (sizeof(struct get_conf) / 2)); p++) {
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ if (--loop == 0)
+ return (FALSE);
+
+ loop = R_LIMIT;
+ *p = inw(base + HA_RDATA);
+ }
+
+ if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { /* Error ? */
+ if (htonl(EATA_SIGNATURE) == buf->signature) {
+ DBG(DBG_PIO&&DBG_PROBE, printk("EATA Controller found at %x "
+ "EATA Level: %x\n", (uint) base,
+ (uint) (buf->version)));
+
+ while (inb(base + HA_RSTATUS) & HA_SDRQ)
+ inw(base + HA_RDATA);
+ return (TRUE);
+ }
+ } else {
+ DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during transfer "
+ "for HBA at %lx\n", (long)base));
+ }
+ return (FALSE);
+}
+
+
+void print_config(struct get_conf *gc)
+{
+ printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d DMAS:%d\n",
+ (u32) ntohl(gc->len), gc->version,
+ gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support,
+ gc->DMA_support);
+ printk("DMAV:%d HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n",
+ gc->DMA_valid, gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2],
+ gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND);
+ printk("IRQ:%d IRQT:%d DMAC:%d FORCADR:%d SG_64K:%d SG_UAE:%d MID:%d "
+ "MCH:%d MLUN:%d\n",
+ gc->IRQ, gc->IRQ_TR, (8 - gc->DMA_channel) & 7, gc->FORCADR,
+ gc->SG_64K, gc->SG_UAE, gc->MAX_ID, gc->MAX_CHAN, gc->MAX_LUN);
+ printk("RIDQ:%d PCI:%d EISA:%d\n",
+ gc->ID_qest, gc->is_PCI, gc->is_EISA);
+ DBG(DPT_DEBUG, DELAY(14));
+}
+
+short register_HBA(u32 base, struct get_conf *gc, Scsi_Host_Template * tpnt,
+ u8 bustype)
+{
+ ulong size = 0;
+ unchar dma_channel = 0;
+ char *buff = 0;
+ unchar bugs = 0;
+ struct Scsi_Host *sh;
+ hostdata *hd;
+ int x;
+
+
+ DBG(DBG_REGISTER, print_config(gc));
+
+ if (gc->DMA_support == FALSE) {
+ printk("The EATA HBA at %#.4x does not support DMA.\n"
+ "Please use the EATA-PIO driver.\n", base);
+ return (FALSE);
+ }
+ if(gc->HAA_valid == FALSE || ntohl(gc->len) < 0x22)
+ gc->MAX_CHAN = 0;
+
+ if (reg_IRQ[gc->IRQ] == FALSE) { /* Interrupt already registered ? */
+ if (!request_irq(gc->IRQ, (void *) eata_fake_int_handler, SA_INTERRUPT,
+ "eata_dma", NULL)){
+ reg_IRQ[gc->IRQ]++;
+ if (!gc->IRQ_TR)
+ reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */
+ } else {
+ printk("Couldn't allocate IRQ %d, Sorry.", gc->IRQ);
+ return (FALSE);
+ }
+ } else { /* More than one HBA on this IRQ */
+ if (reg_IRQL[gc->IRQ] == TRUE) {
+ printk("Can't support more than one HBA on this IRQ,\n"
+ " if the IRQ is edge triggered. Sorry.\n");
+ return (FALSE);
+ } else
+ reg_IRQ[gc->IRQ]++;
+ }
+
+
+ /* If DMA is supported but DMA_valid isn't set to indicate that
+ * the channel number is given we must have pre 2.0 firmware (1.7?)
+ * which leaves us to guess since the "newer ones" also don't set the
+ * DMA_valid bit.
+ */
+ if (gc->DMA_support && !gc->DMA_valid && gc->DMA_channel) {
+ printk(KERN_WARNING "eata_dma: If you are using a pre 2.0 firmware "
+ "please update it !\n"
+ " You can get new firmware releases from ftp.dpt.com\n");
+ gc->DMA_channel = (base == 0x1f0 ? 3 /* DMA=5 */ : 2 /* DMA=6 */);
+ gc->DMA_valid = TRUE;
+ }
+
+ /* if gc->DMA_valid it must be an ISA HBA and we have to register it */
+ dma_channel = BUSMASTER;
+ if (gc->DMA_valid) {
+ if (request_dma(dma_channel = (8 - gc->DMA_channel) & 7, "eata_dma")) {
+ printk(KERN_WARNING "Unable to allocate DMA channel %d for ISA HBA"
+ " at %#.4x.\n", dma_channel, base);
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+ }
+
+ if (dma_channel != BUSMASTER) {
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ }
+
+ if (bustype != IS_EISA && bustype != IS_ISA)
+ buff = get_board_data(base, gc->IRQ, gc->scsi_id[3]);
+
+ if (buff == NULL) {
+ if (bustype == IS_EISA || bustype == IS_ISA) {
+ bugs = bugs || BROKEN_INQUIRY;
+ } else {
+ if (gc->DMA_support == FALSE)
+ printk(KERN_WARNING "HBA at %#.4x doesn't support DMA. "
+ "Sorry\n", base);
+ else
+ printk(KERN_WARNING "HBA at %#.4x does not react on INQUIRY. "
+ "Sorry.\n", base);
+ if (gc->DMA_valid)
+ free_dma(dma_channel);
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+ }
+
+ if (gc->DMA_support == FALSE && buff != NULL)
+ printk(KERN_WARNING "HBA %.12sat %#.4x doesn't set the DMA_support "
+ "flag correctly.\n", &buff[16], base);
+
+ request_region(base, 9, "eata_dma"); /* We already checked the
+ * availability, so this
+ * should not fail.
+ */
+
+ if(ntohs(gc->queuesiz) == 0) {
+ gc->queuesiz = ntohs(64);
+ printk(KERN_WARNING "Warning: Queue size has to be corrected. Assuming"
+ " 64 queueslots\n"
+ " This might be a PM2012B with a defective Firmware\n"
+ " Contact DPT support@dpt.com for an upgrade\n");
+ }
+
+ size = sizeof(hostdata) + ((sizeof(struct eata_ccb) + sizeof(long))
+ * ntohs(gc->queuesiz));
+
+ DBG(DBG_REGISTER, printk("scsi_register size: %ld\n", size));
+
+ sh = scsi_register(tpnt, size);
+
+ if(sh != NULL) {
+
+ hd = SD(sh);
+
+ memset(hd->reads, 0, sizeof(u32) * 26);
+
+ sh->select_queue_depths = eata_select_queue_depths;
+
+ hd->bustype = bustype;
+
+ /*
+ * If we are using a ISA board, we can't use extended SG,
+ * because we would need excessive amounts of memory for
+ * bounce buffers.
+ */
+ if (gc->SG_64K==TRUE && ntohs(gc->SGsiz)==64 && hd->bustype!=IS_ISA){
+ sh->sg_tablesize = SG_SIZE_BIG;
+ } else {
+ sh->sg_tablesize = ntohs(gc->SGsiz);
+ if (sh->sg_tablesize > SG_SIZE || sh->sg_tablesize == 0) {
+ if (sh->sg_tablesize == 0)
+ printk(KERN_WARNING "Warning: SG size had to be fixed.\n"
+ "This might be a PM2012 with a defective Firmware"
+ "\nContact DPT support@dpt.com for an upgrade\n");
+ sh->sg_tablesize = SG_SIZE;
+ }
+ }
+ hd->sgsize = sh->sg_tablesize;
+ }
+
+ if(sh != NULL) {
+ sh->can_queue = hd->queuesize = ntohs(gc->queuesiz);
+ sh->cmd_per_lun = 0;
+ }
+
+ if(sh == NULL) {
+ DBG(DBG_REGISTER, printk(KERN_NOTICE "eata_dma: couldn't register HBA"
+ " at%x \n", base));
+ scsi_unregister(sh);
+ if (gc->DMA_valid)
+ free_dma(dma_channel);
+
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+
+
+ hd->broken_INQUIRY = (bugs & BROKEN_INQUIRY);
+
+ if(hd->broken_INQUIRY == TRUE) {
+ strcpy(hd->vendor, "DPT");
+ strcpy(hd->name, "??????????");
+ strcpy(hd->revision, "???.?");
+ hd->firmware_revision = 0;
+ } else {
+ strncpy(hd->vendor, &buff[8], 8);
+ hd->vendor[8] = 0;
+ strncpy(hd->name, &buff[16], 17);
+ hd->name[17] = 0;
+ hd->revision[0] = buff[32];
+ hd->revision[1] = buff[33];
+ hd->revision[2] = buff[34];
+ hd->revision[3] = '.';
+ hd->revision[4] = buff[35];
+ hd->revision[5] = 0;
+ hd->firmware_revision = (buff[32] << 24) + (buff[33] << 16)
+ + (buff[34] << 8) + buff[35];
+ }
+
+ if (hd->firmware_revision >= (('0'<<24) + ('7'<<16) + ('G'<< 8) + '0'))
+ hd->immediate_support = 1;
+ else
+ hd->immediate_support = 0;
+
+ switch (ntohl(gc->len)) {
+ case 0x1c:
+ hd->EATA_revision = 'a';
+ break;
+ case 0x1e:
+ hd->EATA_revision = 'b';
+ break;
+ case 0x22:
+ hd->EATA_revision = 'c';
+ break;
+ case 0x24:
+ hd->EATA_revision = 'z';
+ default:
+ hd->EATA_revision = '?';
+ }
+
+
+ if(ntohl(gc->len) >= 0x22) {
+ sh->max_id = gc->MAX_ID + 1;
+ sh->max_lun = gc->MAX_LUN + 1;
+ } else {
+ sh->max_id = 8;
+ sh->max_lun = 8;
+ }
+
+ hd->HBA_number = sh->host_no;
+ hd->channel = gc->MAX_CHAN;
+ sh->max_channel = gc->MAX_CHAN;
+ sh->unique_id = base;
+ sh->base = (char *) base;
+ sh->io_port = base;
+ sh->n_io_port = 9;
+ sh->irq = gc->IRQ;
+ sh->dma_channel = dma_channel;
+
+ /* FIXME:
+ * SCSI midlevel code should support different HBA ids on every channel
+ */
+ sh->this_id = gc->scsi_id[3];
+
+ if (gc->SECOND)
+ hd->primary = FALSE;
+ else
+ hd->primary = TRUE;
+
+ sh->wish_block = FALSE;
+
+ if (hd->bustype != IS_ISA) {
+ sh->unchecked_isa_dma = FALSE;
+ } else {
+ sh->unchecked_isa_dma = TRUE; /* We're doing ISA DMA */
+ }
+
+ for(x = 0; x <= 11; x++){ /* Initialize min. latency */
+ hd->writes_lat[x][1] = 0xffffffff;
+ hd->reads_lat[x][1] = 0xffffffff;
+ }
+ hd->all_lat[1] = 0xffffffff;
+
+ hd->next = NULL; /* build a linked list of all HBAs */
+ hd->prev = last_HBA;
+ if(hd->prev != NULL)
+ SD(hd->prev)->next = sh;
+ last_HBA = sh;
+ if (first_HBA == NULL)
+ first_HBA = sh;
+ registered_HBAs++;
+
+ return (TRUE);
+}
+
+
+
+void find_EISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ u32 base;
+ int i;
+
+#if CHECKPAL
+ u8 pal1, pal2, pal3;
+#endif
+
+ for (i = 0; i < MAXEISA; i++) {
+ if (EISAbases[i] == TRUE) { /* Still a possibility ? */
+
+ base = 0x1c88 + (i * 0x1000);
+#if CHECKPAL
+ pal1 = inb((u16)base - 8);
+ pal2 = inb((u16)base - 7);
+ pal3 = inb((u16)base - 6);
+
+ if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) ||
+ ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) && (pal3 == NEC_ID3))||
+ ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) && (pal3 == ATT_ID3))){
+ DBG(DBG_PROBE, printk("EISA EATA id tags found: %x %x %x \n",
+ (int)pal1, (int)pal2, (int)pal3));
+#endif
+ if (get_conf_PIO(base, buf) == TRUE) {
+ if (buf->IRQ) {
+ DBG(DBG_EISA, printk("Registering EISA HBA\n"));
+ register_HBA(base, buf, tpnt, IS_EISA);
+ } else
+ printk("eata_dma: No valid IRQ. HBA removed from list\n");
+ }
+#if CHECK_BLINK
+ else {
+ if (check_blink_state(base))
+ printk("HBA is in BLINK state. Consult your HBAs "
+ "Manual to correct this.\n");
+ }
+#endif
+ /* Nothing found here so we take it from the list */
+ EISAbases[i] = 0;
+#if CHECKPAL
+ }
+#endif
+ }
+ }
+ return;
+}
+
+void find_ISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ int i;
+
+ for (i = 0; i < MAXISA; i++) {
+ if (ISAbases[i]) {
+ if (get_conf_PIO(ISAbases[i],buf) == TRUE){
+ DBG(DBG_ISA, printk("Registering ISA HBA\n"));
+ register_HBA(ISAbases[i], buf, tpnt, IS_ISA);
+ }
+#if CHECK_BLINK
+ else {
+ if (check_blink_state(ISAbases[i]))
+ printk("HBA is in BLINK state. Consult your HBAs "
+ "Manual to correct this.\n");
+ }
+#endif
+ ISAbases[i] = 0;
+ }
+ }
+ return;
+}
+
+void find_PCI(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+
+#ifndef CONFIG_PCI
+ printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n");
+#else
+
+ u8 pci_bus, pci_device_fn;
+ static s16 pci_index = 0; /* Device index to PCI BIOS calls */
+ u32 base = 0;
+ u16 com_adr;
+ u16 rev_device;
+ u32 error, i, x;
+ u8 pal1, pal2, pal3;
+
+ if (pcibios_present()) {
+ for (i = 0; i <= MAXPCI; ++i, ++pci_index) {
+ if (pcibios_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT,
+ pci_index, &pci_bus, &pci_device_fn))
+ break;
+ DBG(DBG_PROBE && DBG_PCI,
+ printk("eata_dma: find_PCI, HBA at bus %d, device %d,"
+ " function %d, index %d\n", (s32)pci_bus,
+ (s32)((pci_device_fn & 0xf8) >> 3),
+ (s32)(pci_device_fn & 7), pci_index));
+
+ if (!(error = pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_CLASS_DEVICE, &rev_device))) {
+ if (rev_device == PCI_CLASS_STORAGE_SCSI) {
+ if (!(error = pcibios_read_config_word(pci_bus,
+ pci_device_fn, PCI_COMMAND,
+ (u16 *) & com_adr))) {
+ if (!((com_adr & PCI_COMMAND_IO) &&
+ (com_adr & PCI_COMMAND_MASTER))) {
+ printk("eata_dma: find_PCI, HBA has IO or"
+ " BUSMASTER mode disabled\n");
+ continue;
+ }
+ } else
+ printk("eata_dma: find_PCI, error %x while reading "
+ "PCI_COMMAND\n", error);
+ } else
+ printk("eata_dma: find_PCI, DEVICECLASSID %x didn't match\n",
+ rev_device);
+ } else {
+ printk("eata_dma: find_PCI, error %x while reading "
+ "PCI_CLASS_BASE\n",
+ error);
+ continue;
+ }
+
+ if (!(error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, (int *) &base))){
+
+ /* Check if the address is valid */
+ if (base & 0x01) {
+ base &= 0xfffffffe;
+ /* EISA tag there ? */
+ pal1 = inb(base);
+ pal2 = inb(base + 1);
+ pal3 = inb(base + 2);
+ if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) ||
+ ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) &&
+ (pal3 == NEC_ID3)) ||
+ ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) &&
+ (pal3 == ATT_ID3)))
+ base += 0x08;
+ else
+ base += 0x10; /* Now, THIS is the real address */
+
+ if (base != 0x1f8) {
+ /* We didn't find it in the primary search */
+ if (get_conf_PIO(base, buf) == TRUE) {
+
+ /* OK. We made it till here, so we can go now
+ * and register it. We only have to check and
+ * eventually remove it from the EISA and ISA list
+ */
+ DBG(DBG_PCI, printk("Registering PCI HBA\n"));
+ register_HBA(base, buf, tpnt, IS_PCI);
+
+ if (base < 0x1000) {
+ for (x = 0; x < MAXISA; ++x) {
+ if (ISAbases[x] == base) {
+ ISAbases[x] = 0;
+ break;
+ }
+ }
+ } else if ((base & 0x0fff) == 0x0c88)
+ EISAbases[(base >> 12) & 0x0f] = 0;
+ continue; /* break; */
+ }
+#if CHECK_BLINK
+ else if (check_blink_state(base) == TRUE) {
+ printk("eata_dma: HBA is in BLINK state.\n"
+ "Consult your HBAs manual to correct this.\n");
+ }
+#endif
+ }
+ }
+ } else {
+ printk("eata_dma: error %x while reading "
+ "PCI_BASE_ADDRESS_0\n", error);
+ }
+ }
+ } else {
+ printk("eata_dma: No BIOS32 extensions present. This driver release "
+ "still depends on it.\n"
+ " Skipping scan for PCI HBAs. \n");
+ }
+#endif /* #ifndef CONFIG_PCI */
+ return;
+}
+
+int eata_detect(Scsi_Host_Template * tpnt)
+{
+ struct Scsi_Host *HBA_ptr;
+ struct get_conf gc;
+ int i;
+
+ DBG((DBG_PROBE && DBG_DELAY) || DPT_DEBUG,
+ printk("Using lots of delays to let you read the debugging output\n"));
+
+ tpnt->proc_dir = &proc_scsi_eata_dma;
+
+ status = scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA);
+ dma_scratch = scsi_init_malloc(1024, GFP_ATOMIC | GFP_DMA);
+
+ if(status == NULL || dma_scratch == NULL) {
+ printk("eata_dma: can't allocate enough memory to probe for hosts !\n");
+ return(0);
+ }
+
+ dma_scratch += 4;
+
+ find_PCI(&gc, tpnt);
+
+ find_EISA(&gc, tpnt);
+
+ find_ISA(&gc, tpnt);
+
+ for (i = 0; i < MAXIRQ; i++) { /* Now that we know what we have, we */
+ if (reg_IRQ[i] >= 1){ /* exchange the interrupt handler which */
+ free_irq(i, NULL); /* we used for probing with the real one */
+ request_irq(i, (void *)(eata_int_handler), SA_INTERRUPT|SA_SHIRQ,
+ "eata_dma", NULL);
+ }
+ }
+
+ HBA_ptr = first_HBA;
+
+ if (registered_HBAs != 0) {
+ printk("EATA (Extended Attachment) driver version: %d.%d%s"
+ "\ndeveloped in co-operation with DPT\n"
+ "(c) 1993-96 Michael Neuffer, mike@i-Connect.Net\n",
+ VER_MAJOR, VER_MINOR, VER_SUB);
+ printk("Registered HBAs:");
+ printk("\nHBA no. Boardtype Revis EATA Bus BaseIO IRQ"
+ " DMA Ch ID Pr QS S/G IS\n");
+ for (i = 1; i <= registered_HBAs; i++) {
+ printk("scsi%-2d: %.12s v%s 2.0%c %s %#.4x %2d",
+ HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
+ SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P')?
+ "PCI ":(SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ",
+ (u32) HBA_ptr->base, HBA_ptr->irq);
+ if(HBA_ptr->dma_channel != BUSMASTER)
+ printk(" %2x ", HBA_ptr->dma_channel);
+ else
+ printk(" %s", "BMST");
+ printk(" %d %d %c %3d %3d %c\n",
+ SD(HBA_ptr)->channel+1, HBA_ptr->this_id,
+ (SD(HBA_ptr)->primary == TRUE)?'Y':'N',
+ HBA_ptr->can_queue, HBA_ptr->sg_tablesize,
+ (SD(HBA_ptr)->immediate_support == TRUE)?'Y':'N');
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+ } else {
+ scsi_init_free((void *)status, 512);
+ }
+
+ scsi_init_free((void *)dma_scratch - 4, 1024);
+
+ DBG(DPT_DEBUG, DELAY(12));
+
+ return(registered_HBAs);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = EATA_DMA;
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_dma.h b/linux/src/drivers/scsi/eata_dma.h
new file mode 100644
index 0000000..a23931b
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_dma.h
@@ -0,0 +1,128 @@
+/********************************************************
+* Header file for eata_dma.c Linux EATA-DMA SCSI driver *
+* (c) 1993-96 Michael Neuffer *
+* mike@i-Connect.Net *
+* neuffer@mail.uni-mainz.de *
+*********************************************************
+* last change: 96/10/14 *
+********************************************************/
+
+#ifndef _EATA_DMA_H
+#define _EATA_DMA_H
+
+#ifndef HOSTS_C
+
+#include "eata_generic.h"
+
+
+#define VER_MAJOR 2
+#define VER_MINOR 5
+#define VER_SUB "9b"
+
+
+/************************************************************************
+ * Here you can switch parts of the code on and of *
+ ************************************************************************/
+
+#define CHECKPAL 0 /* EISA pal checking on/off */
+#define CHECK_BLINK 1 /* Switch Blink state check off, might *
+ * be nessessary for some MIPS machines*/
+#define CRIPPLE_QUEUE 0 /* Only enable this if the interrupt
+ * controller on your motherboard is
+ * broken and you are experiencing
+ * massive interrupt losses */
+
+/************************************************************************
+ * Debug options. *
+ * Enable DEBUG and whichever options you require. *
+ ************************************************************************/
+#define DEBUG_EATA 1 /* Enable debug code. */
+#define DPT_DEBUG 0 /* Bobs special */
+#define DBG_DELAY 0 /* Build in delays so debug messages can be
+ * be read before they vanish of the top of
+ * the screen! */
+#define DBG_PROBE 0 /* Debug probe routines. */
+#define DBG_PCI 0 /* Trace PCI routines */
+#define DBG_EISA 0 /* Trace EISA routines */
+#define DBG_ISA 0 /* Trace ISA routines */
+#define DBG_BLINK 0 /* Trace Blink check */
+#define DBG_PIO 0 /* Trace get_config_PIO */
+#define DBG_COM 0 /* Trace command call */
+#define DBG_QUEUE 0 /* Trace command queueing. */
+#define DBG_QUEUE2 0 /* Trace command queueing SG. */
+#define DBG_INTR 0 /* Trace interrupt service routine. */
+#define DBG_INTR2 0 /* Trace interrupt service routine. */
+#define DBG_INTR3 0 /* Trace get_board_data interrupts. */
+#define DBG_REQSENSE 0 /* Trace request sense commands */
+#define DBG_RESET 0 /* Trace reset calls */
+#define DBG_STATUS 0 /* Trace status generation */
+#define DBG_PROC 0 /* Debug proc-fs related statistics */
+#define DBG_PROC_WRITE 0
+#define DBG_REGISTER 0 /* */
+#define DBG_ABNORM 1 /* Debug abnormal actions (reset, abort)*/
+
+#if DEBUG_EATA
+#define DBG(x, y) if ((x)) {y;}
+#else
+#define DBG(x, y)
+#endif
+
+#endif /* !HOSTS_C */
+
+int eata_detect(Scsi_Host_Template *);
+const char *eata_info(struct Scsi_Host *);
+int eata_command(Scsi_Cmnd *);
+int eata_queue(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int eata_abort(Scsi_Cmnd *);
+int eata_reset(Scsi_Cmnd *, unsigned int);
+int eata_proc_info(char *, char **, off_t, int, int, int);
+#ifdef MODULE
+int eata_release(struct Scsi_Host *);
+#else
+#define eata_release NULL
+#endif
+
+#include <scsi/scsicam.h>
+
+#define EATA_DMA { \
+ NULL, NULL, \
+ NULL, /* proc_dir_entry */ \
+ eata_proc_info, /* procinfo */ \
+ "EATA (Extended Attachment) HBA driver", \
+ eata_detect, \
+ eata_release, \
+ NULL, NULL, \
+ eata_queue, \
+ eata_abort, \
+ eata_reset, \
+ NULL, /* Slave attach */ \
+ scsicam_bios_param, \
+ 0, /* Canqueue */ \
+ 0, /* this_id */ \
+ 0, /* sg_tablesize */ \
+ 0, /* cmd_per_lun */ \
+ 0, /* present */ \
+ 1, /* True if ISA */ \
+ ENABLE_CLUSTERING }
+
+
+#endif /* _EATA_DMA_H */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_dma_proc.c b/linux/src/drivers/scsi/eata_dma_proc.c
new file mode 100644
index 0000000..14a4c96
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_dma_proc.c
@@ -0,0 +1,493 @@
+
+void swap_statistics(u8 *p)
+{
+ u32 y;
+ u32 *lp, h_lp;
+ u16 *sp, h_sp;
+ u8 *bp;
+
+ lp = (u32 *)p;
+ sp = ((short *)lp) + 1; /* Convert Header */
+ h_sp = *sp = ntohs(*sp);
+ lp++;
+
+ do {
+ sp = (u16 *)lp; /* Convert SubHeader */
+ *sp = ntohs(*sp);
+ bp = (u8 *) lp;
+ y = *(bp + 3);
+ lp++;
+ for (h_lp = (u32)lp; (u32)lp < h_lp + ((u32)*(bp + 3)); lp++)
+ *lp = ntohl(*lp);
+ }while ((u32)lp < ((u32)p) + 4 + h_sp);
+
+}
+
+/*
+ * eata_set_info
+ * buffer : pointer to the data that has been written to the hostfile
+ * length : number of bytes written to the hostfile
+ * HBA_ptr: pointer to the Scsi_Host struct
+ */
+int eata_set_info(char *buffer, int length, struct Scsi_Host *HBA_ptr)
+{
+ int orig_length = length;
+
+ if (length >= 8 && strncmp(buffer, "eata_dma", 8) == 0) {
+ buffer += 9;
+ length -= 9;
+ if(length >= 8 && strncmp(buffer, "latency", 7) == 0) {
+ SD(HBA_ptr)->do_latency = TRUE;
+ return(orig_length);
+ }
+
+ if(length >=10 && strncmp(buffer, "nolatency", 9) == 0) {
+ SD(HBA_ptr)->do_latency = FALSE;
+ return(orig_length);
+ }
+
+ printk("Unknown command:%s length: %d\n", buffer, length);
+ } else
+ printk("Wrong Signature:%10s\n", buffer);
+
+ return(-EINVAL);
+}
+
+/*
+ * eata_proc_info
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is being written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+int eata_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+
+ Scsi_Device *scd, SDev;
+ struct Scsi_Host *HBA_ptr;
+ Scsi_Cmnd scmd;
+ char cmnd[12];
+ static u8 buff[512];
+ static u8 buff2[512];
+ hst_cmd_stat *rhcs, *whcs;
+ coco *cc;
+ scsitrans *st;
+ scsimod *sm;
+ hobu *hb;
+ scbu *sb;
+ boty *bt;
+ memco *mc;
+ firm *fm;
+ subinf *si;
+ pcinf *pi;
+ arrlim *al;
+ int i, x;
+ int size, len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+ scd = NULL;
+
+ HBA_ptr = first_HBA;
+ for (i = 1; i <= registered_HBAs; i++) {
+ if (HBA_ptr->host_no == hostno)
+ break;
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+
+ if(inout == TRUE) /* Has data been written to the file ? */
+ return(eata_set_info(buffer, length, HBA_ptr));
+
+ if (offset == 0)
+ memset(buff, 0, sizeof(buff));
+
+ cc = (coco *) (buff + 0x148);
+ st = (scsitrans *)(buff + 0x164);
+ sm = (scsimod *) (buff + 0x16c);
+ hb = (hobu *) (buff + 0x172);
+ sb = (scbu *) (buff + 0x178);
+ bt = (boty *) (buff + 0x17e);
+ mc = (memco *) (buff + 0x186);
+ fm = (firm *) (buff + 0x18e);
+ si = (subinf *) (buff + 0x196);
+ pi = (pcinf *) (buff + 0x19c);
+ al = (arrlim *) (buff + 0x1a2);
+
+ size = sprintf(buffer+len, "EATA (Extended Attachment) driver version: "
+ "%d.%d%s\n",VER_MAJOR, VER_MINOR, VER_SUB);
+ len += size; pos = begin + len;
+ size = sprintf(buffer + len, "queued commands: %10ld\n"
+ "processed interrupts:%10ld\n", queue_counter, int_counter);
+ len += size; pos = begin + len;
+
+ size = sprintf(buffer + len, "\nscsi%-2d: HBA %.10s\n",
+ HBA_ptr->host_no, SD(HBA_ptr)->name);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Firmware revision: v%s\n",
+ SD(HBA_ptr)->revision);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Hardware Configuration:\n");
+ len += size;
+ pos = begin + len;
+
+ if(SD(HBA_ptr)->broken_INQUIRY == TRUE) {
+ if (HBA_ptr->dma_channel == BUSMASTER)
+ size = sprintf(buffer + len, "DMA: BUSMASTER\n");
+ else
+ size = sprintf(buffer + len, "DMA: %d\n", HBA_ptr->dma_channel);
+ len += size;
+ pos = begin + len;
+
+ size = sprintf(buffer + len, "Base IO : %#.4x\n", (u32) HBA_ptr->base);
+ len += size;
+ pos = begin + len;
+
+ size = sprintf(buffer + len, "Host Bus: EISA\n");
+ len += size;
+ pos = begin + len;
+
+ } else {
+ memset(&SDev, 0, sizeof(Scsi_Device));
+ memset(&scmd, 0, sizeof(Scsi_Cmnd));
+
+ SDev.host = HBA_ptr;
+ SDev.id = HBA_ptr->this_id;
+ SDev.lun = 0;
+ SDev.channel = 0;
+
+ cmnd[0] = LOG_SENSE;
+ cmnd[1] = 0;
+ cmnd[2] = 0x33 + (3<<6);
+ cmnd[3] = 0;
+ cmnd[4] = 0;
+ cmnd[5] = 0;
+ cmnd[6] = 0;
+ cmnd[7] = 0x00;
+ cmnd[8] = 0x66;
+ cmnd[9] = 0;
+
+ scmd.cmd_len = 10;
+
+ scmd.host = HBA_ptr;
+ scmd.device = &SDev;
+ scmd.target = HBA_ptr->this_id;
+ scmd.lun = 0;
+ scmd.channel = 0;
+ scmd.use_sg = 0;
+
+ /*
+ * Do the command and wait for it to finish.
+ */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scmd.request.rq_status = RQ_SCSI_BUSY;
+ scmd.request.sem = &sem;
+ scsi_do_cmd (&scmd, cmnd, buff + 0x144, 0x66,
+ eata_scsi_done, 1 * HZ, 1);
+ down(&sem);
+ }
+
+ size = sprintf(buffer + len, "IRQ: %2d, %s triggered\n", cc->interrupt,
+ (cc->intt == TRUE)?"level":"edge");
+ len += size;
+ pos = begin + len;
+ if (HBA_ptr->dma_channel == 0xff)
+ size = sprintf(buffer + len, "DMA: BUSMASTER\n");
+ else
+ size = sprintf(buffer + len, "DMA: %d\n", HBA_ptr->dma_channel);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "CPU: MC680%02d %dMHz\n", bt->cpu_type,
+ bt->cpu_speed);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Base IO : %#.4x\n", (u32) HBA_ptr->base);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Host Bus: %s\n",
+ (SD(HBA_ptr)->bustype == IS_PCI)?"PCI ":
+ (SD(HBA_ptr)->bustype == IS_EISA)?"EISA":"ISA ");
+
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SCSI Bus:%s%s Speed: %sMB/sec. %s\n",
+ (sb->wide == TRUE)?" WIDE":"",
+ (sb->dif == TRUE)?" DIFFERENTIAL":"",
+ (sb->speed == 0)?"5":(sb->speed == 1)?"10":"20",
+ (sb->ext == TRUE)?"With external cable detection":"");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SCSI channel expansion Module: %s present\n",
+ (bt->sx1 == TRUE)?"SX1 (one channel)":
+ ((bt->sx2 == TRUE)?"SX2 (two channels)":"not"));
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SmartRAID hardware: %spresent.\n",
+ (cc->srs == TRUE)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Type: %s\n",
+ ((cc->key == TRUE)?((bt->dmi == TRUE)?"integrated"
+ :((bt->dm4 == TRUE)?"DM401X"
+ :(bt->dm4k == TRUE)?"DM4000"
+ :"-"))
+ :"-"));
+ len += size;
+ pos = begin + len;
+
+ size = sprintf(buffer + len, " Max array groups: %d\n",
+ (al->code == 0x0e)?al->max_groups:7);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Max drives per RAID 0 array: %d\n",
+ (al->code == 0x0e)?al->raid0_drv:7);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Max drives per RAID 3/5 array: %d\n",
+ (al->code == 0x0e)?al->raid35_drv:7);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Cache Module: %spresent.\n",
+ (cc->csh)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Type: %s\n",
+ ((cc->csh == TRUE)?((bt->cmi == TRUE)?"integrated"
+ :((bt->cm4 == TRUE)?"CM401X"
+ :((bt->cm4k == TRUE)?"CM4000"
+ :"-")))
+ :"-"));
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 3; x++) {
+ size = sprintf(buffer + len, " Bank%d: %dMB with%s ECC\n",x,
+ mc->banksize[x] & 0x7f,
+ (mc->banksize[x] & 0x80)?"":"out");
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer + len, "Timer Mod.: %spresent\n",
+ (cc->tmr == TRUE)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "NVRAM : %spresent\n",
+ (cc->nvr == TRUE)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SmartROM : %sabled\n",
+ (bt->srom == TRUE)?"dis":"en");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Alarm : %s\n",
+ (bt->alrm == TRUE)?"on":"off");
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ if(SD(HBA_ptr)->do_latency == FALSE) {
+
+ cmnd[0] = LOG_SENSE;
+ cmnd[1] = 0;
+ cmnd[2] = 0x32 + (3<<6);
+ cmnd[3] = 0;
+ cmnd[4] = 0;
+ cmnd[5] = 0;
+ cmnd[6] = 0;
+ cmnd[7] = 0x01;
+ cmnd[8] = 0x44;
+ cmnd[9] = 0;
+
+ scmd.cmd_len = 10;
+
+ /*
+ * Do the command and wait for it to finish.
+ */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scmd.request.rq_status = RQ_SCSI_BUSY;
+ scmd.request.sem = &sem;
+ scsi_do_cmd (&scmd, cmnd, buff2, 0x144,
+ eata_scsi_done, 1 * HZ, 1);
+ down(&sem);
+ }
+
+ swap_statistics(buff2);
+ rhcs = (hst_cmd_stat *)(buff2 + 0x2c);
+ whcs = (hst_cmd_stat *)(buff2 + 0x8c);
+
+ for (x = 0; x <= 11; x++) {
+ SD(HBA_ptr)->reads[x] += rhcs->sizes[x];
+ SD(HBA_ptr)->writes[x] += whcs->sizes[x];
+ SD(HBA_ptr)->reads[12] += rhcs->sizes[x];
+ SD(HBA_ptr)->writes[12] += whcs->sizes[x];
+ }
+ size = sprintf(buffer + len, "Host<->Disk command statistics:\n"
+ " Reads: Writes:\n");
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 10; x++) {
+ size = sprintf(buffer+len,"%5dk:%12u %12u\n", 1 << x,
+ SD(HBA_ptr)->reads[x],
+ SD(HBA_ptr)->writes[x]);
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer+len,">1024k:%12u %12u\n",
+ SD(HBA_ptr)->reads[11],
+ SD(HBA_ptr)->writes[11]);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer+len,"Sum :%12u %12u\n",
+ SD(HBA_ptr)->reads[12],
+ SD(HBA_ptr)->writes[12]);
+ len += size;
+ pos = begin + len;
+ }
+ }
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ if(SD(HBA_ptr)->do_latency == TRUE) {
+ int factor = 1024/HZ;
+ size = sprintf(buffer + len, "Host Latency Command Statistics:\n"
+ "Current timer resolution: %2dms\n"
+ " Reads: Min:(ms) Max:(ms) Ave:(ms)\n",
+ factor);
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 10; x++) {
+ size = sprintf(buffer+len,"%5dk:%12u %12u %12u %12u\n",
+ 1 << x,
+ SD(HBA_ptr)->reads_lat[x][0],
+ (SD(HBA_ptr)->reads_lat[x][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->reads_lat[x][1] * factor),
+ SD(HBA_ptr)->reads_lat[x][2] * factor,
+ SD(HBA_ptr)->reads_lat[x][3] * factor /
+ ((SD(HBA_ptr)->reads_lat[x][0])
+ ? SD(HBA_ptr)->reads_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer+len,">1024k:%12u %12u %12u %12u\n",
+ SD(HBA_ptr)->reads_lat[11][0],
+ (SD(HBA_ptr)->reads_lat[11][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->reads_lat[11][1] * factor),
+ SD(HBA_ptr)->reads_lat[11][2] * factor,
+ SD(HBA_ptr)->reads_lat[11][3] * factor /
+ ((SD(HBA_ptr)->reads_lat[x][0])
+ ? SD(HBA_ptr)->reads_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ size = sprintf(buffer + len,
+ " Writes: Min:(ms) Max:(ms) Ave:(ms)\n");
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 10; x++) {
+ size = sprintf(buffer+len,"%5dk:%12u %12u %12u %12u\n",
+ 1 << x,
+ SD(HBA_ptr)->writes_lat[x][0],
+ (SD(HBA_ptr)->writes_lat[x][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->writes_lat[x][1] * factor),
+ SD(HBA_ptr)->writes_lat[x][2] * factor,
+ SD(HBA_ptr)->writes_lat[x][3] * factor /
+ ((SD(HBA_ptr)->writes_lat[x][0])
+ ? SD(HBA_ptr)->writes_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer+len,">1024k:%12u %12u %12u %12u\n",
+ SD(HBA_ptr)->writes_lat[11][0],
+ (SD(HBA_ptr)->writes_lat[11][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->writes_lat[x][1] * factor),
+ SD(HBA_ptr)->writes_lat[11][2] * factor,
+ SD(HBA_ptr)->writes_lat[11][3] * factor /
+ ((SD(HBA_ptr)->writes_lat[x][0])
+ ? SD(HBA_ptr)->writes_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+
+#if 0
+ scd = scsi_devices;
+
+ size = sprintf(buffer+len,"Attached devices: %s\n", (scd)?"":"none");
+ len += size;
+ pos = begin + len;
+
+ while (scd) {
+ if (scd->host == HBA_ptr) {
+ proc_print_scsidevice(scd, buffer, &size, len);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+ scd = scd->next;
+ }
+#endif
+
+ stop_output:
+ DBG(DBG_PROC, printk("2pos: %ld offset: %ld len: %d\n", pos, offset, len));
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len = length; /* Ending slop */
+ DBG(DBG_PROC, printk("3pos: %ld offset: %ld len: %d\n", pos, offset, len));
+
+ return (len);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_dma_proc.h b/linux/src/drivers/scsi/eata_dma_proc.h
new file mode 100644
index 0000000..d49f348
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_dma_proc.h
@@ -0,0 +1,260 @@
+
+struct lun_map {
+ __u8 id:5,
+ chan:3;
+ __u8 lun;
+};
+
+typedef struct emul_pp {
+ __u8 p_code:6,
+ null:1,
+ p_save:1;
+ __u8 p_length;
+ __u16 cylinder;
+ __u8 heads;
+ __u8 sectors;
+ __u8 null2;
+ __u8 s_lunmap:4,
+ ems:1;
+ __u16 drive_type; /* In Little Endian ! */
+ struct lun_map lunmap[4];
+}emulpp;
+
+
+/* Log Sense pages */
+
+typedef struct log_sheader {
+ __u8 page_code,
+ reserved;
+ __u16 length;
+}logsh;
+
+
+/* Log Sense Statistics */
+
+typedef struct read_command_statistics {
+ __u16 code; /* 0x01 */
+ __u8 flags;
+ __u8 length; /* 0x24 */
+ __u32 h_commands,
+ uncached,
+ la_cmds,
+ la_blks,
+ la_hits,
+ missed,
+ hits,
+ seq_la_blks,
+ seq_la_hits;
+}r_cmd_stat;
+
+typedef struct write_command_statistics {
+ __u16 code; /* 0x03 */
+ __u8 flags;
+ __u8 length; /* 0x28 */
+ __u32 h_commands,
+ uncached,
+ thru,
+ bypass,
+ soft_err,
+ hits,
+ b_idle,
+ b_activ,
+ b_blks,
+ b_blks_clean;
+}w_cmd_stat;
+
+typedef struct host_command_statistics {
+ __u16 code; /* 0x02, 0x04 */
+ __u8 flags;
+ __u8 length; /* 0x30 */
+ __u32 sizes[12];
+}hst_cmd_stat;
+
+typedef struct physical_command_statistics {
+ __u16 code; /* 0x06, 0x07 */
+ __u8 flags;
+ __u8 length; /* 0x34 */
+ __u32 sizes[13];
+}phy_cmd_stat;
+
+typedef struct misc_device_statistics {
+ __u16 code; /* 0x05 */
+ __u8 flags;
+ __u8 length; /* 0x10 */
+ __u32 disconnect,
+ pass_thru,
+ sg_commands,
+ stripe_boundary_crosses;
+}msc_stats;
+
+/* Configuration Pages */
+
+typedef struct controller_configuration {
+ __u16 code; /* 0x01 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 intt:1,
+ sec:1,
+ csh:1,
+ key:1,
+ tmr:1,
+ srs:1,
+ nvr:1;
+ __u8 interrupt;
+}coco;
+
+typedef struct controller_hardware_errors {
+ __u16 code; /* 0x02 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 unused:1,
+ per:1;
+ __u8 interrupt;
+}coher;
+
+typedef struct memory_map {
+ __u16 code; /* 0x03, 0x04 */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u32 memory_map;
+}mema;
+
+typedef struct scsi_transfer {
+ __u16 code; /* 0x05 */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 offset,
+ period;
+ __u16 speed;
+}scsitrans;
+
+typedef struct scsi_modes {
+ __u16 code; /* 0x06 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 que:1,
+ cdis:1,
+ wtru:1,
+ dasd:1,
+ ncr:1,
+ awre:1;
+ __u8 reserved;
+}scsimod;
+
+typedef struct host_bus {
+ __u16 code; /* 0x07 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 speed:6,
+ pci:1,
+ eisa:1;
+ __u8 reserved;
+}hobu;
+
+typedef struct scsi_bus {
+ __u16 code; /* 0x08 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 speed:4,
+ res:1,
+ ext:1,
+ wide:1,
+ dif:1;
+ __u8 busnum;
+}scbu;
+
+typedef struct board_type {
+ __u16 code; /* 0x09 */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 unused:1,
+ cmi:1,
+ dmi:1,
+ cm4k:1,
+ cm4:1,
+ dm4k:1,
+ dm4:1,
+ hba:1;
+ __u8 cpu_type,
+ cpu_speed;
+ __u8 sx1:1,
+ sx2:1,
+ unused2:4,
+ alrm:1,
+ srom:1;
+}boty;
+
+typedef struct memory_config {
+ __u16 code; /* 0x0a */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 banksize[4];
+}memco;
+
+typedef struct firmware_info {
+ __u16 code; /* 0x0b */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 dnld:1,
+ bs528:1,
+ fmt:1,
+ fw528:1;
+ __u8 unused1,
+ fw_type,
+ unused;
+}firm;
+
+typedef struct subsystem_info {
+ __u16 code; /* 0x0c */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 shlf:1,
+ swap:1,
+ noss:1;
+ __u8 reserved;
+}subinf;
+
+typedef struct per_channel_info {
+ __u16 code; /* 0x0d */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 channel;
+ __u8 shlf:1,
+ swap:1,
+ noss:1,
+ srs:1,
+ que:1,
+ ext:1,
+ wide:1,
+ diff:1;
+}pcinf;
+
+typedef struct array_limits {
+ __u16 code; /* 0x0e */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 max_groups,
+ raid0_drv,
+ raid35_drv,
+ unused;
+}arrlim;
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/linux/src/drivers/scsi/eata_generic.h b/linux/src/drivers/scsi/eata_generic.h
new file mode 100644
index 0000000..c884def
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_generic.h
@@ -0,0 +1,414 @@
+/********************************************************
+* Header file for eata_dma.c and eata_pio.c *
+* Linux EATA SCSI drivers *
+* (c) 1993-96 Michael Neuffer *
+* mike@i-Connect.Net *
+* neuffer@mail.uni-mainz.de *
+*********************************************************
+* last change: 96/08/14 *
+********************************************************/
+
+
+#ifndef _EATA_GENERIC_H
+#define _EATA_GENERIC_H
+
+
+
+/*********************************************
+ * Misc. definitions *
+ *********************************************/
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define min(a,b) ((a<b)?(a):(b))
+
+#define R_LIMIT 0x20000
+
+#define MAXISA 4
+#define MAXEISA 16
+#define MAXPCI 16
+#define MAXIRQ 16
+#define MAXTARGET 16
+#define MAXCHANNEL 3
+
+#define IS_ISA 'I'
+#define IS_EISA 'E'
+#define IS_PCI 'P'
+
+#define BROKEN_INQUIRY 1
+
+#define BUSMASTER 0xff
+#define PIO 0xfe
+
+#define EATA_SIGNATURE 0x45415441 /* BIG ENDIAN coded "EATA" sig. */
+
+#define DPT_ID1 0x12
+#define DPT_ID2 0x14
+
+#define ATT_ID1 0x06
+#define ATT_ID2 0x94
+#define ATT_ID3 0x0
+
+#define NEC_ID1 0x38
+#define NEC_ID2 0xa3
+#define NEC_ID3 0x82
+
+
+#define EATA_CP_SIZE 44
+
+#define MAX_PCI_DEVICES 32 /* Maximum # Of Devices Per Bus */
+#define MAX_METHOD_2 16 /* Max Devices For Method 2 */
+#define MAX_PCI_BUS 16 /* Maximum # Of Busses Allowed */
+
+#define SG_SIZE 64
+#define SG_SIZE_BIG 252 /* max. 8096 elements, 64k */
+
+#define UPPER_DEVICE_QUEUE_LIMIT 64 /* The limit we have to set for the
+ * device queue to keep the broken
+ * midlevel SCSI code from producing
+ * bogus timeouts
+ */
+
+#define TYPE_DISK_QUEUE 16
+#define TYPE_TAPE_QUEUE 4
+#define TYPE_ROM_QUEUE 4
+#define TYPE_OTHER_QUEUE 2
+
+#define FREE 0
+#define OK 0
+#define NO_TIMEOUT 0
+#define USED 1
+#define TIMEOUT 2
+#define RESET 4
+#define LOCKED 8
+#define ABORTED 16
+
+#define READ 0
+#define WRITE 1
+#define OTHER 2
+
+#define HD(cmd) ((hostdata *)&(cmd->host->hostdata))
+#define CD(cmd) ((struct eata_ccb *)(cmd->host_scribble))
+#define SD(host) ((hostdata *)&(host->hostdata))
+
+#define DELAY(x) { ulong flags, i; \
+ save_flags(flags); sti(); \
+ i = jiffies + (x * HZ); \
+ while (jiffies < i); \
+ restore_flags(flags); }
+
+/***********************************************
+ * EATA Command & Register definitions *
+ ***********************************************/
+#define PCI_REG_DPTconfig 0x40
+#define PCI_REG_PumpModeAddress 0x44
+#define PCI_REG_PumpModeData 0x48
+#define PCI_REG_ConfigParam1 0x50
+#define PCI_REG_ConfigParam2 0x54
+
+
+#define EATA_CMD_PIO_SETUPTEST 0xc6
+#define EATA_CMD_PIO_READ_CONFIG 0xf0
+#define EATA_CMD_PIO_SET_CONFIG 0xf1
+#define EATA_CMD_PIO_SEND_CP 0xf2
+#define EATA_CMD_PIO_RECEIVE_SP 0xf3
+#define EATA_CMD_PIO_TRUNC 0xf4
+
+#define EATA_CMD_RESET 0xf9
+#define EATA_CMD_IMMEDIATE 0xfa
+
+#define EATA_CMD_DMA_READ_CONFIG 0xfd
+#define EATA_CMD_DMA_SET_CONFIG 0xfe
+#define EATA_CMD_DMA_SEND_CP 0xff
+
+#define ECS_EMULATE_SENSE 0xd4
+
+#define EATA_GENERIC_ABORT 0x00
+#define EATA_SPECIFIC_RESET 0x01
+#define EATA_BUS_RESET 0x02
+#define EATA_SPECIFIC_ABORT 0x03
+#define EATA_QUIET_INTR 0x04
+#define EATA_COLD_BOOT_HBA 0x06 /* Only as a last resort */
+#define EATA_FORCE_IO 0x07
+
+#define HA_CTRLREG 0x206 /* control register for HBA */
+#define HA_CTRL_DISINT 0x02 /* CTRLREG: disable interrupts */
+#define HA_CTRL_RESCPU 0x04 /* CTRLREG: reset processor */
+#define HA_CTRL_8HEADS 0x08 /* CTRLREG: set for drives with*
+ * >=8 heads (WD1003 rudimentary :-) */
+
+#define HA_WCOMMAND 0x07 /* command register offset */
+#define HA_WIFC 0x06 /* immediate command offset */
+#define HA_WCODE 0x05
+#define HA_WCODE2 0x04
+#define HA_WDMAADDR 0x02 /* DMA address LSB offset */
+#define HA_RAUXSTAT 0x08 /* aux status register offset*/
+#define HA_RSTATUS 0x07 /* status register offset */
+#define HA_RDATA 0x00 /* data register (16bit) */
+#define HA_WDATA 0x00 /* data register (16bit) */
+
+#define HA_ABUSY 0x01 /* aux busy bit */
+#define HA_AIRQ 0x02 /* aux IRQ pending bit */
+#define HA_SERROR 0x01 /* pr. command ended in error*/
+#define HA_SMORE 0x02 /* more data soon to come */
+#define HA_SCORR 0x04 /* data corrected */
+#define HA_SDRQ 0x08 /* data request active */
+#define HA_SSC 0x10 /* seek complete */
+#define HA_SFAULT 0x20 /* write fault */
+#define HA_SREADY 0x40 /* drive ready */
+#define HA_SBUSY 0x80 /* drive busy */
+#define HA_SDRDY HA_SSC+HA_SREADY+HA_SDRQ
+
+/**********************************************
+ * Message definitions *
+ **********************************************/
+
+#define HA_NO_ERROR 0x00 /* No Error */
+#define HA_ERR_SEL_TO 0x01 /* Selection Timeout */
+#define HA_ERR_CMD_TO 0x02 /* Command Timeout */
+#define HA_BUS_RESET 0x03 /* SCSI Bus Reset Received */
+#define HA_INIT_POWERUP 0x04 /* Initial Controller Power-up */
+#define HA_UNX_BUSPHASE 0x05 /* Unexpected Bus Phase */
+#define HA_UNX_BUS_FREE 0x06 /* Unexpected Bus Free */
+#define HA_BUS_PARITY 0x07 /* Bus Parity Error */
+#define HA_SCSI_HUNG 0x08 /* SCSI Hung */
+#define HA_UNX_MSGRJCT 0x09 /* Unexpected Message Rejected */
+#define HA_RESET_STUCK 0x0a /* SCSI Bus Reset Stuck */
+#define HA_RSENSE_FAIL 0x0b /* Auto Request-Sense Failed */
+#define HA_PARITY_ERR 0x0c /* Controller Ram Parity Error */
+#define HA_CP_ABORT_NA 0x0d /* Abort Message sent to non-active cmd */
+#define HA_CP_ABORTED 0x0e /* Abort Message sent to active cmd */
+#define HA_CP_RESET_NA 0x0f /* Reset Message sent to non-active cmd */
+#define HA_CP_RESET 0x10 /* Reset Message sent to active cmd */
+#define HA_ECC_ERR 0x11 /* Controller Ram ECC Error */
+#define HA_PCI_PARITY 0x12 /* PCI Parity Error */
+#define HA_PCI_MABORT 0x13 /* PCI Master Abort */
+#define HA_PCI_TABORT 0x14 /* PCI Target Abort */
+#define HA_PCI_STABORT 0x15 /* PCI Signaled Target Abort */
+
+/**********************************************
+ * Other definitions *
+ **********************************************/
+
+struct reg_bit { /* reading this one will clear the interrupt */
+ __u8 error:1; /* previous command ended in an error */
+ __u8 more:1; /* more DATA coming soon, poll BSY & DRQ (PIO) */
+ __u8 corr:1; /* data read was successfully corrected with ECC*/
+ __u8 drq:1; /* data request active */
+ __u8 sc:1; /* seek complete */
+ __u8 fault:1; /* write fault */
+ __u8 ready:1; /* drive ready */
+ __u8 busy:1; /* controller busy */
+};
+
+struct reg_abit { /* reading this won't clear the interrupt */
+ __u8 abusy:1; /* auxiliary busy */
+ __u8 irq:1; /* set when drive interrupt is asserted */
+ __u8 dummy:6;
+};
+
+struct eata_register { /* EATA register set */
+ __u8 data_reg[2]; /* R, couldn't figure this one out */
+ __u8 cp_addr[4]; /* W, CP address register */
+ union {
+ __u8 command; /* W, command code: [read|set] conf, send CP*/
+ struct reg_bit status; /* R, see register_bit1 */
+ __u8 statusbyte;
+ } ovr;
+ struct reg_abit aux_stat; /* R, see register_bit2 */
+};
+
+struct get_conf { /* Read Configuration Array */
+ __u32 len; /* Should return 0x22, 0x24, etc */
+ __u32 signature; /* Signature MUST be "EATA" */
+ __u8 version2:4,
+ version:4; /* EATA Version level */
+ __u8 OCS_enabled:1, /* Overlap Command Support enabled */
+ TAR_support:1, /* SCSI Target Mode supported */
+ TRNXFR:1, /* Truncate Transfer Cmd not necessary *
+ * Only used in PIO Mode */
+ MORE_support:1, /* MORE supported (only PIO Mode) */
+ DMA_support:1, /* DMA supported Driver uses only *
+ * this mode */
+ DMA_valid:1, /* DRQ value in Byte 30 is valid */
+ ATA:1, /* ATA device connected (not supported) */
+ HAA_valid:1; /* Hostadapter Address is valid */
+
+ __u16 cppadlen; /* Number of pad bytes send after CD data *
+ * set to zero for DMA commands */
+ __u8 scsi_id[4]; /* SCSI ID of controller 2-0 Byte 0 res. *
+ * if not, zero is returned */
+ __u32 cplen; /* CP length: number of valid cp bytes */
+ __u32 splen; /* Number of bytes returned after *
+ * Receive SP command */
+ __u16 queuesiz; /* max number of queueable CPs */
+ __u16 dummy;
+ __u16 SGsiz; /* max number of SG table entries */
+ __u8 IRQ:4, /* IRQ used this HA */
+ IRQ_TR:1, /* IRQ Trigger: 0=edge, 1=level */
+ SECOND:1, /* This is a secondary controller */
+ DMA_channel:2; /* DRQ index, DRQ is 2comp of DRQX */
+ __u8 sync; /* device at ID 7 tru 0 is running in *
+ * synchronous mode, this will disappear */
+ __u8 DSBLE:1, /* ISA i/o addressing is disabled */
+ FORCADR:1, /* i/o address has been forced */
+ SG_64K:1,
+ SG_UAE:1,
+ :4;
+ __u8 MAX_ID:5, /* Max number of SCSI target IDs */
+ MAX_CHAN:3; /* Number of SCSI busses on HBA */
+ __u8 MAX_LUN; /* Max number of LUNs */
+ __u8 :3,
+ AUTOTRM:1,
+ M1_inst:1,
+ ID_qest:1, /* Raidnum ID is questionable */
+ is_PCI:1, /* HBA is PCI */
+ is_EISA:1; /* HBA is EISA */
+ __u8 RAIDNUM; /* unique HBA identifier */
+ __u8 unused[474];
+};
+
+struct eata_sg_list
+{
+ __u32 data;
+ __u32 len;
+};
+
+struct eata_ccb { /* Send Command Packet structure */
+
+ __u8 SCSI_Reset:1, /* Cause a SCSI Bus reset on the cmd */
+ HBA_Init:1, /* Cause Controller to reinitialize */
+ Auto_Req_Sen:1, /* Do Auto Request Sense on errors */
+ scatter:1, /* Data Ptr points to a SG Packet */
+ Resrvd:1, /* RFU */
+ Interpret:1, /* Interpret the SCSI cdb of own use */
+ DataOut:1, /* Data Out phase with command */
+ DataIn:1; /* Data In phase with command */
+ __u8 reqlen; /* Request Sense Length *
+ * Valid if Auto_Req_Sen=1 */
+ __u8 unused[3];
+ __u8 FWNEST:1, /* send cmd to phys RAID component */
+ unused2:7;
+ __u8 Phsunit:1, /* physical unit on mirrored pair */
+ I_AT:1, /* inhibit address translation */
+ I_HBA_C:1, /* HBA inhibit caching */
+ unused3:5;
+
+ __u8 cp_id:5, /* SCSI Device ID of target */
+ cp_channel:3; /* SCSI Channel # of HBA */
+ __u8 cp_lun:3,
+ :2,
+ cp_luntar:1, /* CP is for target ROUTINE */
+ cp_dispri:1, /* Grant disconnect privilege */
+ cp_identify:1; /* Always TRUE */
+ __u8 cp_msg1; /* Message bytes 0-3 */
+ __u8 cp_msg2;
+ __u8 cp_msg3;
+ __u8 cp_cdb[12]; /* Command Descriptor Block */
+ __u32 cp_datalen; /* Data Transfer Length *
+ * If scatter=1 len of sg package */
+ void *cp_viraddr; /* address of this ccb */
+ __u32 cp_dataDMA; /* Data Address, if scatter=1 *
+ * address of scatter packet */
+ __u32 cp_statDMA; /* address for Status Packet */
+ __u32 cp_reqDMA; /* Request Sense Address, used if *
+ * CP command ends with error */
+ /* Additional CP info begins here */
+ __u32 timestamp; /* Needed to measure command latency */
+ __u32 timeout;
+ __u8 sizeindex;
+ __u8 rw_latency;
+ __u8 retries;
+ __u8 status; /* status of this queueslot */
+ Scsi_Cmnd *cmd; /* address of cmd */
+ struct eata_sg_list *sg_list;
+};
+
+
+struct eata_sp {
+ __u8 hba_stat:7, /* HBA status */
+ EOC:1; /* True if command finished */
+ __u8 scsi_stat; /* Target SCSI status */
+ __u8 reserved[2];
+ __u32 residue_len; /* Number of bytes not transferred */
+ struct eata_ccb *ccb; /* Address set in COMMAND PACKET */
+ __u8 msg[12];
+};
+
+typedef struct hstd {
+ __u8 vendor[9];
+ __u8 name[18];
+ __u8 revision[6];
+ __u8 EATA_revision;
+ __u32 firmware_revision;
+ __u8 HBA_number;
+ __u8 bustype; /* bustype of HBA */
+ __u8 channel; /* # of avail. scsi channels */
+ __u8 state; /* state of HBA */
+ __u8 primary; /* true if primary */
+ __u8 more_support:1, /* HBA supports MORE flag */
+ immediate_support:1, /* HBA supports IMMEDIATE CMDs*/
+ broken_INQUIRY:1; /* This is an EISA HBA with *
+ * broken INQUIRY */
+ __u8 do_latency; /* Latency measurement flag */
+ __u32 reads[13];
+ __u32 writes[13];
+ __u32 reads_lat[12][4];
+ __u32 writes_lat[12][4];
+ __u32 all_lat[4];
+ __u8 resetlevel[MAXCHANNEL];
+ __u32 last_ccb; /* Last used ccb */
+ __u32 cplen; /* size of CP in words */
+ __u16 cppadlen; /* pad length of cp in words */
+ __u16 queuesize;
+ __u16 sgsize; /* # of entries in the SG list*/
+ __u16 devflags; /* bits set for detected devices */
+ __u8 hostid; /* SCSI ID of HBA */
+ __u8 moresupport; /* HBA supports MORE flag */
+ struct Scsi_Host *next;
+ struct Scsi_Host *prev;
+ struct eata_sp sp; /* status packet */
+ struct eata_ccb ccb[0]; /* ccb array begins here */
+}hostdata;
+
+/* structure for max. 2 emulated drives */
+struct drive_geom_emul {
+ __u8 trans; /* translation flag 1=transl */
+ __u8 channel; /* SCSI channel number */
+ __u8 HBA; /* HBA number (prim/sec) */
+ __u8 id; /* drive id */
+ __u8 lun; /* drive lun */
+ __u32 heads; /* number of heads */
+ __u32 sectors; /* number of sectors */
+ __u32 cylinder; /* number of cylinders */
+};
+
+struct geom_emul {
+ __u8 bios_drives; /* number of emulated drives */
+ struct drive_geom_emul drv[2]; /* drive structures */
+};
+
+#endif /* _EATA_GENERIC_H */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_pio.c b/linux/src/drivers/scsi/eata_pio.c
new file mode 100644
index 0000000..469b720
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_pio.c
@@ -0,0 +1,1042 @@
+/************************************************************
+ * *
+ * Linux EATA SCSI PIO driver *
+ * *
+ * based on the CAM document CAM/89-004 rev. 2.0c, *
+ * DPT's driver kit, some internal documents and source, *
+ * and several other Linux scsi drivers and kernel docs. *
+ * *
+ * The driver currently: *
+ * -supports all EATA-PIO boards *
+ * -only supports DASD devices *
+ * *
+ * (c)1993-96 Michael Neuffer, Alfred Arnold *
+ * neuffer@goofy.zdv.uni-mainz.de *
+ * a.arnold@kfa-juelich.de *
+ * *
+ * This program is free software; you can redistribute it *
+ * and/or modify it under the terms of the GNU General *
+ * Public License as published by the Free Software *
+ * Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the *
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A *
+ * PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. *
+ * *
+ * You should have received a copy of the GNU General *
+ * Public License along with this kernel; if not, write to *
+ * the Free Software Foundation, Inc., 675 Mass Ave, *
+ * Cambridge, MA 02139, USA. *
+ * *
+ ************************************************************
+ * last change: 96/07/16 OS: Linux 2.0.8 *
+ ************************************************************/
+
+/* Look in eata_pio.h for configuration information */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/in.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include "eata_pio.h"
+#include "eata_dma_proc.h"
+#include "scsi.h"
+#include "sd.h"
+
+#include <linux/stat.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_eata_pio = {
+ PROC_SCSI_EATA_PIO, 9, "eata_pio",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+static uint ISAbases[MAXISA] =
+{0x1F0, 0x170, 0x330, 0x230};
+static uint ISAirqs[MAXISA] =
+{14,12,15,11};
+static unchar EISAbases[] =
+{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static uint registered_HBAs = 0;
+static struct Scsi_Host *last_HBA = NULL;
+static struct Scsi_Host *first_HBA = NULL;
+static unchar reg_IRQ[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static unchar reg_IRQL[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+static ulong int_counter = 0;
+static ulong queue_counter = 0;
+
+#include "eata_pio_proc.c"
+
+#ifdef MODULE
+int eata_pio_release(struct Scsi_Host *sh)
+{
+ if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq, NULL);
+ else reg_IRQ[sh->irq]--;
+ if (SD(sh)->channel == 0) {
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ }
+ return(TRUE);
+}
+#endif
+
+void IncStat(Scsi_Pointer *SCp, uint Increment)
+{
+ SCp->ptr+=Increment;
+ if ((SCp->this_residual-=Increment)==0)
+ {
+ if ((--SCp->buffers_residual)==0) SCp->Status=FALSE;
+ else
+ {
+ SCp->buffer++;
+ SCp->ptr=SCp->buffer->address;
+ SCp->this_residual=SCp->buffer->length;
+ }
+ }
+}
+
+void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+ uint eata_stat = 0xfffff;
+ Scsi_Cmnd *cmd;
+ hostdata *hd;
+ struct eata_ccb *cp;
+ uint base;
+ ulong flags;
+ uint x,z;
+ struct Scsi_Host *sh;
+ ushort zwickel=0;
+ unchar stat,odd;
+
+ save_flags(flags);
+ cli();
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev) {
+ if (sh->irq != irq)
+ continue;
+ if (inb((uint)sh->base + HA_RSTATUS) & HA_SBUSY)
+ continue;
+
+ int_counter++;
+
+ hd=SD(sh);
+
+ cp = &hd->ccb[0];
+ cmd = cp->cmd;
+ base = (uint) cmd->host->base;
+
+ do
+ {
+ stat=inb(base+HA_RSTATUS);
+ if (stat&HA_SDRQ)
+ if (cp->DataIn)
+ {
+ z=256; odd=FALSE;
+ while ((cmd->SCp.Status)&&((z>0)||(odd)))
+ {
+ if (odd)
+ {
+ *(cmd->SCp.ptr)=zwickel>>8;
+ IncStat(&cmd->SCp,1);
+ odd=FALSE;
+ }
+ x=min(z,cmd->SCp.this_residual/2);
+ insw(base+HA_RDATA,cmd->SCp.ptr,x);
+ z-=x;
+ IncStat(&cmd->SCp,2*x);
+ if ((z>0)&&(cmd->SCp.this_residual==1))
+ {
+ zwickel=inw(base+HA_RDATA);
+ *(cmd->SCp.ptr)=zwickel&0xff;
+ IncStat(&cmd->SCp,1); z--;
+ odd=TRUE;
+ }
+ }
+ while (z>0) {
+ zwickel=inw(base+HA_RDATA);
+ z--;
+ }
+ }
+ else /* cp->DataOut */
+ {
+ odd=FALSE; z=256;
+ while ((cmd->SCp.Status)&&((z>0)||(odd)))
+ {
+ if (odd)
+ {
+ zwickel+=*(cmd->SCp.ptr)<<8;
+ IncStat(&cmd->SCp,1);
+ outw(zwickel,base+HA_RDATA);
+ z--;
+ odd=FALSE;
+ }
+ x=min(z,cmd->SCp.this_residual/2);
+ outsw(base+HA_RDATA,cmd->SCp.ptr,x);
+ z-=x;
+ IncStat(&cmd->SCp,2*x);
+ if ((z>0)&&(cmd->SCp.this_residual==1))
+ {
+ zwickel=*(cmd->SCp.ptr);
+ zwickel&=0xff;
+ IncStat(&cmd->SCp,1);
+ odd=TRUE;
+ }
+ }
+ while (z>0||odd) {
+ outw(zwickel,base+HA_RDATA);
+ z--;
+ odd=FALSE;
+ }
+ }
+ }
+ while ((stat&HA_SDRQ)||((stat&HA_SMORE)&&hd->moresupport));
+
+ /* terminate handler if HBA goes busy again, i.e. transfers
+ * more data */
+
+ if (stat&HA_SBUSY) break;
+
+ /* OK, this is quite stupid, but I haven't found any correct
+ * way to get HBA&SCSI status so far */
+
+ if (!(inb(base+HA_RSTATUS)&HA_SERROR))
+ {
+ cmd->result=(DID_OK<<16);
+ hd->devflags|=(1<<cp->cp_id);
+ }
+ else if (hd->devflags&1<<cp->cp_id)
+ cmd->result=(DID_OK<<16)+0x02;
+ else cmd->result=(DID_NO_CONNECT<<16);
+
+ if (cp->status == LOCKED) {
+ cp->status = FREE;
+ eata_stat = inb(base + HA_RSTATUS);
+ printk(KERN_NOTICE "eata_pio: int_handler, freeing locked "
+ "queueslot\n");
+ DBG(DBG_INTR&&DBG_DELAY,DELAY(1));
+ restore_flags(flags);
+ return;
+ }
+
+#if DBG_INTR2
+ if (stat != 0x50)
+ printk(KERN_DEBUG "stat: %#.2x, result: %#.8x\n", stat,
+ cmd->result);
+ DBG(DBG_INTR&&DBG_DELAY,DELAY(1));
+#endif
+
+ cp->status = FREE; /* now we can release the slot */
+
+ restore_flags(flags);
+ cmd->scsi_done(cmd);
+ save_flags(flags);
+ cli();
+ }
+ restore_flags(flags);
+
+ return;
+}
+
+inline uint eata_pio_send_command(uint base, unchar command)
+{
+ uint loop = HZ/2;
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return(TRUE);
+
+ /* Enable interrupts for HBA. It is not the best way to do it at this
+ * place, but I hope that it doesn't interfere with the IDE driver
+ * initialization this way */
+
+ outb(HA_CTRL_8HEADS,base+HA_CTRLREG);
+
+ outb(command, base + HA_WCOMMAND);
+ return(FALSE);
+}
+
+int eata_pio_queue(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ uint x, y;
+ long flags;
+ uint base;
+
+ hostdata *hd;
+ struct Scsi_Host *sh;
+ struct eata_ccb *cp;
+
+ save_flags(flags);
+ cli();
+
+ queue_counter++;
+
+ hd = HD(cmd);
+ sh = cmd->host;
+ base = (uint) sh->base;
+
+ /* use only slot 0, as 2001 can handle only one cmd at a time */
+
+ y = x = 0;
+
+ if (hd->ccb[y].status!=FREE) {
+
+ DBG(DBG_QUEUE, printk(KERN_EMERG "can_queue %d, x %d, y %d\n",
+ sh->can_queue,x,y));
+#if DEBUG_EATA
+ panic(KERN_EMERG "eata_pio: run out of queue slots cmdno:%ld "
+ "intrno: %ld\n", queue_counter, int_counter);
+#else
+ panic(KERN_EMERG "eata_pio: run out of queue slots....\n");
+#endif
+ }
+
+ cp = &hd->ccb[y];
+
+ memset(cp, 0, sizeof(struct eata_ccb));
+ memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+
+ cp->status = USED; /* claim free slot */
+
+ DBG(DBG_QUEUE, printk(KERN_DEBUG "eata_pio_queue pid %ld, target: %x, lun:"
+ " %x, y %d\n", cmd->pid, cmd->target, cmd->lun, y));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ cmd->scsi_done = (void *)done;
+
+ switch (cmd->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
+ case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12:
+ case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW:
+ case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea: /* alternate number for WRITE LONG */
+ cp->DataOut = TRUE; /* Output mode */
+ break;
+ case TEST_UNIT_READY:
+ default:
+ cp->DataIn = TRUE; /* Input mode */
+ }
+
+ cp->Interpret = (cmd->target == hd->hostid);
+ cp->cp_datalen = htonl((ulong)cmd->request_bufflen);
+ cp->Auto_Req_Sen = FALSE;
+ cp->cp_reqDMA = htonl(0);
+ cp->reqlen = 0;
+
+ cp->cp_id = cmd->target;
+ cp->cp_lun = cmd->lun;
+ cp->cp_dispri = FALSE;
+ cp->cp_identify = TRUE;
+ memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
+
+ cp->cp_statDMA = htonl(0);
+
+ cp->cp_viraddr = cp;
+ cp->cmd = cmd;
+ cmd->host_scribble = (char *)&hd->ccb[y];
+
+ if (cmd->use_sg == 0)
+ {
+ cmd->SCp.buffers_residual=1;
+ cmd->SCp.ptr = cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.buffer = NULL;
+ } else {
+ cmd->SCp.buffer = cmd->request_buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ }
+ cmd->SCp.Status = (cmd->SCp.this_residual != 0); /* TRUE as long as bytes
+ * are to transfer */
+
+ if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP))
+ {
+ cmd->result = DID_BUS_BUSY << 16;
+ printk(KERN_NOTICE "eata_pio_queue target %d, pid %ld, HBA busy, "
+ "returning DID_BUS_BUSY, done.\n", cmd->target, cmd->pid);
+ done(cmd);
+ cp->status = FREE;
+ restore_flags(flags);
+ return (0);
+ }
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ));
+ outsw(base + HA_RDATA, cp, hd->cplen);
+ outb(EATA_CMD_PIO_TRUNC, base + HA_WCOMMAND);
+ for (x = 0; x < hd->cppadlen; x++) outw(0, base + HA_RDATA);
+
+ DBG(DBG_QUEUE,printk(KERN_DEBUG "Queued base %#.4lx pid: %ld target: %x "
+ "lun: %x slot %d irq %d\n", (long)sh->base, cmd->pid,
+ cmd->target, cmd->lun, y, sh->irq));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ restore_flags(flags);
+ return (0);
+}
+
+int eata_pio_abort(Scsi_Cmnd * cmd)
+{
+ ulong flags;
+ uint loop = HZ;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_abort called pid: %ld "
+ "target: %x lun: %x reason %x\n", cmd->pid,
+ cmd->target, cmd->lun, cmd->abort_reason));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+
+ while (inb((uint)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0) {
+ printk(KERN_WARNING "eata_pio: abort, timeout error.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == FREE) {
+ DBG(DBG_ABNORM, printk(KERN_WARNING "Returning: SCSI_ABORT_NOT_RUNNING\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ if (CD(cmd)->status == USED) {
+ DBG(DBG_ABNORM, printk(KERN_WARNING "Returning: SCSI_ABORT_BUSY\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_BUSY); /* SNOOZE */
+ }
+ if (CD(cmd)->status == RESET) {
+ restore_flags(flags);
+ printk(KERN_WARNING "eata_pio: abort, command reset error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == LOCKED) {
+ restore_flags(flags);
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio: abort, queue slot "
+ "locked.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ restore_flags(flags);
+ panic("eata_pio: abort: invalid slot status\n");
+}
+
+int eata_pio_reset(Scsi_Cmnd * cmd, unsigned int dummy)
+{
+ uint x, time, limit = 0;
+ ulong flags;
+ unchar success = FALSE;
+ Scsi_Cmnd *sp;
+
+ save_flags(flags);
+ cli();
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset called pid:%ld target:"
+ " %x lun: %x reason %x\n", cmd->pid, cmd->target,
+ cmd->lun, cmd->abort_reason));
+
+ if (HD(cmd)->state == RESET) {
+ printk(KERN_WARNING "eata_pio_reset: exit, already in reset.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_ERROR);
+ }
+
+ /* force all slots to be free */
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ if (HD(cmd)->ccb[x].status == FREE)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ HD(cmd)->ccb[x].status = RESET;
+ printk(KERN_WARNING "eata_pio_reset: slot %d in reset, pid %ld.\n", x,
+ sp->pid);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ if (sp == NULL)
+ panic("eata_pio_reset: slot %d, sp==NULL.\n", x);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ }
+
+ /* hard reset the HBA */
+ outb(EATA_CMD_RESET, (uint) cmd->host->base+HA_WCOMMAND);
+
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: board reset done.\n"));
+ HD(cmd)->state = RESET;
+
+ time = jiffies;
+ while (jiffies < (time + (3 * HZ)) && limit++ < 10000000);
+
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: interrupts disabled, "
+ "loops %d.\n", limit));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ /* Skip slots already set free by interrupt */
+ if (HD(cmd)->ccb[x].status != RESET)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ sp->result = DID_RESET << 16;
+
+ /* This mailbox is terminated */
+ printk(KERN_WARNING "eata_pio_reset: reset ccb %d.\n",x);
+ HD(cmd)->ccb[x].status = FREE;
+
+ restore_flags(flags);
+ sp->scsi_done(sp);
+ cli();
+ }
+
+ HD(cmd)->state = FALSE;
+ restore_flags(flags);
+
+ if (success) { /* hmmm... */
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: exit, success.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_SUCCESS);
+ } else {
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: exit, wakeup.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_PUNT);
+ }
+}
+
+char * get_pio_board_data(ulong base, uint irq, uint id, ulong cplen, ushort cppadlen)
+{
+ struct eata_ccb cp;
+ static char buff[256];
+ int z;
+
+ memset(&cp, 0, sizeof(struct eata_ccb));
+ memset(buff, 0, sizeof(buff));
+
+ cp.DataIn = TRUE;
+ cp.Interpret = TRUE; /* Interpret command */
+
+ cp.cp_datalen = htonl(254);
+ cp.cp_dataDMA = htonl(0);
+
+ cp.cp_id = id;
+ cp.cp_lun = 0;
+
+ cp.cp_cdb[0] = INQUIRY;
+ cp.cp_cdb[1] = 0;
+ cp.cp_cdb[2] = 0;
+ cp.cp_cdb[3] = 0;
+ cp.cp_cdb[4] = 254;
+ cp.cp_cdb[5] = 0;
+
+ if (eata_pio_send_command((uint) base, EATA_CMD_PIO_SEND_CP))
+ return (NULL);
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ));
+ outsw(base + HA_RDATA, &cp, cplen);
+ outb(EATA_CMD_PIO_TRUNC, base + HA_WCOMMAND);
+ for (z = 0; z < cppadlen; z++) outw(0, base + HA_RDATA);
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY);
+ if (inb(base + HA_RSTATUS) & HA_SERROR)
+ return (NULL);
+ else if (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ return (NULL);
+ else
+ {
+ insw(base+HA_RDATA, &buff, 127);
+ while (inb(base + HA_RSTATUS)&HA_SDRQ) inw(base + HA_RDATA);
+ return (buff);
+ }
+}
+
+int get_pio_conf_PIO(u32 base, struct get_conf *buf)
+{
+ ulong loop = HZ/2;
+ int z;
+ ushort *p;
+
+ if(check_region(base, 9))
+ return (FALSE);
+
+ memset(buf, 0, sizeof(struct get_conf));
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return (FALSE);
+
+ DBG(DBG_PIO && DBG_PROBE,
+ printk(KERN_DEBUG "Issuing PIO READ CONFIG to HBA at %#x\n", base));
+ eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG);
+
+ loop = HZ/2;
+ for (p = (ushort *) buf;
+ (long)p <= ((long)buf + (sizeof(struct get_conf) / 2)); p++) {
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ if (--loop == 0)
+ return (FALSE);
+
+ loop = HZ/2;
+ *p = inw(base + HA_RDATA);
+ }
+ if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { /* Error ? */
+ if (htonl(EATA_SIGNATURE) == buf->signature) {
+ DBG(DBG_PIO&&DBG_PROBE, printk(KERN_NOTICE "EATA Controller found "
+ "at %#4x EATA Level: %x\n", base,
+ (uint) (buf->version)));
+
+ while (inb(base + HA_RSTATUS) & HA_SDRQ)
+ inw(base + HA_RDATA);
+ if(ALLOW_DMA_BOARDS == FALSE) {
+ for (z = 0; z < MAXISA; z++)
+ if (base == ISAbases[z]) {
+ buf->IRQ = ISAirqs[z];
+ break;
+ }
+ }
+ return (TRUE);
+ }
+ } else {
+ DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during transfer "
+ "for HBA at %x\n", base));
+ }
+ return (FALSE);
+}
+
+void print_pio_config(struct get_conf *gc)
+{
+ printk("Please check values: (read config data)\n");
+ printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n",
+ (uint) ntohl(gc->len), gc->version,
+ gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support);
+ printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n",
+ gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2],
+ gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND);
+ printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n",
+ gc->IRQ, gc->IRQ_TR, gc->FORCADR,
+ gc->MAX_CHAN, gc->ID_qest);
+ DBG(DPT_DEBUG, DELAY(14));
+}
+
+static uint print_selftest(uint base)
+{
+ unchar buffer[512];
+#ifdef VERBOSE_SETUP
+ int z;
+#endif
+
+ printk("eata_pio: executing controller self test & setup...\n");
+ while (inb(base + HA_RSTATUS) & HA_SBUSY);
+ outb(EATA_CMD_PIO_SETUPTEST, base + HA_WCOMMAND);
+ do {
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ /* nothing */ ;
+ if (inb(base + HA_RSTATUS) & HA_SDRQ)
+ {
+ insw(base + HA_RDATA, &buffer, 256);
+#ifdef VERBOSE_SETUP
+ /* no beeps please... */
+ for (z = 0; z < 511 && buffer[z]; z++)
+ if (buffer[z] != 7) printk("%c", buffer[z]);
+#endif
+ }
+ } while (inb(base+HA_RSTATUS) & (HA_SBUSY|HA_SDRQ));
+
+ return (!(inb(base+HA_RSTATUS) & HA_SERROR));
+}
+
+int register_pio_HBA(long base, struct get_conf *gc, Scsi_Host_Template * tpnt)
+{
+ ulong size = 0;
+ char *buff;
+ ulong cplen;
+ ushort cppadlen;
+ struct Scsi_Host *sh;
+ hostdata *hd;
+
+ DBG(DBG_REGISTER, print_pio_config(gc));
+
+ if (gc->DMA_support == TRUE) {
+ printk("HBA at %#.4lx supports DMA. Please use EATA-DMA driver.\n",base);
+ if(ALLOW_DMA_BOARDS == FALSE)
+ return (FALSE);
+ }
+
+ if ((buff = get_pio_board_data((uint)base, gc->IRQ, gc->scsi_id[3],
+ cplen =(htonl(gc->cplen )+1)/2,
+ cppadlen=(htons(gc->cppadlen)+1)/2)) == NULL)
+ {
+ printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", (ulong) base);
+ return (FALSE);
+ }
+
+ if (print_selftest(base) == FALSE && ALLOW_DMA_BOARDS == FALSE)
+ {
+ printk("HBA at %#lx failed while performing self test & setup.\n",
+ (ulong) base);
+ return (FALSE);
+ }
+
+ if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */
+ if (!request_irq(gc->IRQ, eata_pio_int_handler, SA_INTERRUPT,
+ "EATA-PIO", NULL)){
+ reg_IRQ[gc->IRQ]++;
+ if (!gc->IRQ_TR)
+ reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */
+ } else {
+ printk("Couldn't allocate IRQ %d, Sorry.\n", gc->IRQ);
+ return (FALSE);
+ }
+ } else { /* More than one HBA on this IRQ */
+ if (reg_IRQL[gc->IRQ] == TRUE) {
+ printk("Can't support more than one HBA on this IRQ,\n"
+ " if the IRQ is edge triggered. Sorry.\n");
+ return (FALSE);
+ } else
+ reg_IRQ[gc->IRQ]++;
+ }
+
+ request_region(base, 8, "eata_pio");
+
+ size = sizeof(hostdata) + (sizeof(struct eata_ccb) * ntohs(gc->queuesiz));
+
+ sh = scsi_register(tpnt, size);
+ hd = SD(sh);
+
+ memset(hd->ccb, 0, (sizeof(struct eata_ccb) * ntohs(gc->queuesiz)));
+ memset(hd->reads, 0, sizeof(ulong) * 26);
+
+ strncpy(SD(sh)->vendor, &buff[8], 8);
+ SD(sh)->vendor[8] = 0;
+ strncpy(SD(sh)->name, &buff[16], 17);
+ SD(sh)->name[17] = 0;
+ SD(sh)->revision[0] = buff[32];
+ SD(sh)->revision[1] = buff[33];
+ SD(sh)->revision[2] = buff[34];
+ SD(sh)->revision[3] = '.';
+ SD(sh)->revision[4] = buff[35];
+ SD(sh)->revision[5] = 0;
+
+ switch (ntohl(gc->len)) {
+ case 0x1c:
+ SD(sh)->EATA_revision = 'a';
+ break;
+ case 0x1e:
+ SD(sh)->EATA_revision = 'b';
+ break;
+ case 0x22:
+ SD(sh)->EATA_revision = 'c';
+ break;
+ case 0x24:
+ SD(sh)->EATA_revision = 'z';
+ default:
+ SD(sh)->EATA_revision = '?';
+ }
+
+ if(ntohl(gc->len) >= 0x22) {
+ if (gc->is_PCI == TRUE)
+ hd->bustype = IS_PCI;
+ else if (gc->is_EISA == TRUE)
+ hd->bustype = IS_EISA;
+ else
+ hd->bustype = IS_ISA;
+ } else {
+ if (buff[21] == '4')
+ hd->bustype = IS_PCI;
+ else if (buff[21] == '2')
+ hd->bustype = IS_EISA;
+ else
+ hd->bustype = IS_ISA;
+ }
+
+ SD(sh)->cplen=cplen;
+ SD(sh)->cppadlen=cppadlen;
+ SD(sh)->hostid=gc->scsi_id[3];
+ SD(sh)->devflags=1<<gc->scsi_id[3];
+ SD(sh)->moresupport=gc->MORE_support;
+ sh->unique_id = base;
+ sh->base = (char *) base;
+ sh->io_port = base;
+ sh->n_io_port = 8;
+ sh->irq = gc->IRQ;
+ sh->dma_channel = PIO;
+ sh->this_id = gc->scsi_id[3];
+ sh->can_queue = 1;
+ sh->cmd_per_lun = 1;
+ sh->sg_tablesize = SG_ALL;
+
+ hd->channel = 0;
+
+ sh->max_id = 8;
+ sh->max_lun = 8;
+
+ if (gc->SECOND)
+ hd->primary = FALSE;
+ else
+ hd->primary = TRUE;
+
+ sh->unchecked_isa_dma = FALSE; /* We can only do PIO */
+
+ hd->next = NULL; /* build a linked list of all HBAs */
+ hd->prev = last_HBA;
+ if(hd->prev != NULL)
+ SD(hd->prev)->next = sh;
+ last_HBA = sh;
+ if (first_HBA == NULL)
+ first_HBA = sh;
+ registered_HBAs++;
+ return (1);
+}
+
+void find_pio_ISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ int i;
+
+ for (i = 0; i < MAXISA; i++) {
+ if (ISAbases[i]) {
+ if (get_pio_conf_PIO(ISAbases[i], buf) == TRUE){
+ register_pio_HBA(ISAbases[i], buf, tpnt);
+ }
+ ISAbases[i] = 0;
+ }
+ }
+ return;
+}
+
+void find_pio_EISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ u32 base;
+ int i;
+
+#if CHECKPAL
+ u8 pal1, pal2, pal3;
+#endif
+
+ for (i = 0; i < MAXEISA; i++) {
+ if (EISAbases[i] == TRUE) { /* Still a possibility ? */
+
+ base = 0x1c88 + (i * 0x1000);
+#if CHECKPAL
+ pal1 = inb((u16)base - 8);
+ pal2 = inb((u16)base - 7);
+ pal3 = inb((u16)base - 6);
+
+ if (((pal1 == 0x12) && (pal2 == 0x14)) ||
+ ((pal1 == 0x38) && (pal2 == 0xa3) && (pal3 == 0x82)) ||
+ ((pal1 == 0x06) && (pal2 == 0x94) && (pal3 == 0x24))) {
+ DBG(DBG_PROBE, printk(KERN_NOTICE "EISA EATA id tags found: "
+ "%x %x %x \n",
+ (int)pal1, (int)pal2, (int)pal3));
+#endif
+ if (get_pio_conf_PIO(base, buf) == TRUE) {
+ DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf));
+ if (buf->IRQ) {
+ register_pio_HBA(base, buf, tpnt);
+ } else
+ printk(KERN_NOTICE "eata_dma: No valid IRQ. HBA "
+ "removed from list\n");
+ }
+ /* Nothing found here so we take it from the list */
+ EISAbases[i] = 0;
+#if CHECKPAL
+ }
+#endif
+ }
+ }
+ return;
+}
+
+void find_pio_PCI(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+
+#ifndef CONFIG_PCI
+ printk(KERN_ERR "eata_pio: kernel PCI support not enabled. Skipping scan "
+ "for PCI HBAs.\n");
+#else
+
+ u8 pci_bus, pci_device_fn;
+ static s16 pci_index = 0; /* Device index to PCI BIOS calls */
+ u32 base = 0;
+ u16 com_adr;
+ u16 rev_device;
+ u32 error, i, x;
+
+ if (pcibios_present()) {
+ for (i = 0; i <= MAXPCI; ++i, ++pci_index) {
+ if (pcibios_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT,
+ pci_index, &pci_bus, &pci_device_fn))
+ break;
+ DBG(DBG_PROBE && DBG_PCI,
+ printk("eata_pio: HBA at bus %d, device %d,"
+ " function %d, index %d\n", (s32)pci_bus,
+ (s32)((pci_device_fn & 0xf8) >> 3),
+ (s32)(pci_device_fn & 7), pci_index));
+
+ if (!(error = pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_CLASS_DEVICE, &rev_device))) {
+ if (rev_device == PCI_CLASS_STORAGE_SCSI) {
+ if (!(error = pcibios_read_config_word(pci_bus,
+ pci_device_fn, PCI_COMMAND,
+ (u16 *) & com_adr))) {
+ if (!((com_adr & PCI_COMMAND_IO) &&
+ (com_adr & PCI_COMMAND_MASTER))) {
+ printk("HBA has IO or BUSMASTER mode disabled\n");
+ continue;
+ }
+ } else
+ printk("eata_pio: error %x while reading "
+ "PCI_COMMAND\n", error);
+ } else
+ printk("DEVICECLASSID %x didn't match\n", rev_device);
+ } else {
+ printk("eata_pio: error %x while reading PCI_CLASS_BASE\n",
+ error);
+ continue;
+ }
+
+ if (!(error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, (int *) &base))){
+
+ /* Check if the address is valid */
+ if (base & 0x01) {
+ base &= 0xfffffffe;
+ /* EISA tag there ? */
+ if ((inb(base) == 0x12) && (inb(base + 1) == 0x14))
+ continue; /* Jep, it's forced, so move on */
+ base += 0x10; /* Now, THIS is the real address */
+ if (base != 0x1f8) {
+ /* We didn't find it in the primary search */
+ if (get_pio_conf_PIO(base, buf) == TRUE) {
+ if (buf->FORCADR) /* If the address is forced */
+ continue; /* we'll find it later */
+
+ /* OK. We made it till here, so we can go now
+ * and register it. We only have to check and
+ * eventually remove it from the EISA and ISA list
+ */
+
+ register_pio_HBA(base, buf, tpnt);
+
+ if (base < 0x1000) {
+ for (x = 0; x < MAXISA; ++x) {
+ if (ISAbases[x] == base) {
+ ISAbases[x] = 0;
+ break;
+ }
+ }
+ } else if ((base & 0x0fff) == 0x0c88) {
+ x = (base >> 12) & 0x0f;
+ EISAbases[x] = 0;
+ }
+ continue; /* break; */
+ }
+ }
+ }
+ } else
+ printk("eata_pio: error %x while reading "
+ "PCI_BASE_ADDRESS_0\n", error);
+ }
+ } else
+ printk("eata_pio: No BIOS32 extensions present. This driver release "
+ "still depends on it.\n"
+ " Skipping scan for PCI HBAs.\n");
+#endif /* #ifndef CONFIG_PCI */
+ return;
+}
+
+
+int eata_pio_detect(Scsi_Host_Template * tpnt)
+{
+ struct Scsi_Host *HBA_ptr;
+ struct get_conf gc;
+ int i;
+
+ DBG((DBG_PROBE && DBG_DELAY) || DPT_DEBUG,
+ printk("Using lots of delays to let you read the debugging output\n"));
+
+ tpnt->proc_dir = &proc_scsi_eata_pio;
+
+ find_pio_PCI(&gc, tpnt);
+
+ find_pio_EISA(&gc, tpnt);
+
+ find_pio_ISA(&gc, tpnt);
+
+ for (i = 0; i < MAXIRQ; i++)
+ if (reg_IRQ[i])
+ request_irq(i, eata_pio_int_handler, SA_INTERRUPT, "EATA-PIO", NULL);
+
+ HBA_ptr = first_HBA;
+
+ if (registered_HBAs != 0) {
+ printk("EATA (Extended Attachment) PIO driver version: %d.%d%s\n"
+ "(c) 1993-95 Michael Neuffer, neuffer@goofy.zdv.uni-mainz.de\n"
+ " Alfred Arnold, a.arnold@kfa-juelich.de\n"
+ "This release only supports DASD devices (harddisks)\n",
+ VER_MAJOR, VER_MINOR, VER_SUB);
+
+ printk("Registered HBAs:\n");
+ printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:"
+ " QS: SG: CPL:\n");
+ for (i = 1; i <= registered_HBAs; i++) {
+ printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4x %2d %d %d %c"
+ " %2d %2d %2d\n",
+ HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
+ SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P')?
+ "PCI ":(SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ",
+ (uint) HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel,
+ HBA_ptr->this_id, (SD(HBA_ptr)->primary == TRUE)?'Y':'N',
+ HBA_ptr->can_queue, HBA_ptr->sg_tablesize,
+ HBA_ptr->cmd_per_lun);
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+ }
+ DBG(DPT_DEBUG,DELAY(12));
+
+ return (registered_HBAs);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = EATA_PIO;
+
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_pio.h b/linux/src/drivers/scsi/eata_pio.h
new file mode 100644
index 0000000..333f7c4
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_pio.h
@@ -0,0 +1,116 @@
+/********************************************************
+* Header file for eata_pio.c Linux EATA-PIO SCSI driver *
+* (c) 1993-96 Michael Neuffer *
+*********************************************************
+* last change: 96/05/05 *
+********************************************************/
+
+
+#ifndef _EATA_PIO_H
+#define _EATA_PIO_H
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include <scsi/scsicam.h>
+
+#ifndef HOSTS_C
+#include "eata_generic.h"
+
+#define VER_MAJOR 0
+#define VER_MINOR 0
+#define VER_SUB "1b"
+
+/************************************************************************
+ * Here you can switch parts of the code on and of *
+ ************************************************************************/
+
+#define VERBOSE_SETUP /* show startup screen of 2001 */
+#define ALLOW_DMA_BOARDS 1
+
+/************************************************************************
+ * Debug options. *
+ * Enable DEBUG and whichever options you require. *
+ ************************************************************************/
+#define DEBUG_EATA 1 /* Enable debug code. */
+#define DPT_DEBUG 0 /* Bobs special */
+#define DBG_DELAY 0 /* Build in delays so debug messages can be
+ * be read before they vanish of the top of
+ * the screen!
+ */
+#define DBG_PROBE 0 /* Debug probe routines. */
+#define DBG_ISA 0 /* Trace ISA routines */
+#define DBG_EISA 0 /* Trace EISA routines */
+#define DBG_PCI 0 /* Trace PCI routines */
+#define DBG_PIO 0 /* Trace get_config_PIO */
+#define DBG_COM 0 /* Trace command call */
+#define DBG_QUEUE 0 /* Trace command queueing. */
+#define DBG_INTR 0 /* Trace interrupt service routine. */
+#define DBG_INTR2 0 /* Trace interrupt service routine. */
+#define DBG_PROC 0 /* Debug proc-fs related statistics */
+#define DBG_PROC_WRITE 0
+#define DBG_REGISTER 0 /* */
+#define DBG_ABNORM 1 /* Debug abnormal actions (reset, abort) */
+
+#if DEBUG_EATA
+#define DBG(x, y) if ((x)) {y;}
+#else
+#define DBG(x, y)
+#endif
+
+#endif /* !HOSTS_C */
+
+int eata_pio_detect(Scsi_Host_Template *);
+const char *eata_pio_info(struct Scsi_Host *);
+int eata_pio_command(Scsi_Cmnd *);
+int eata_pio_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int eata_pio_abort(Scsi_Cmnd *);
+int eata_pio_reset(Scsi_Cmnd *, unsigned int);
+int eata_pio_proc_info(char *, char **, off_t, int, int, int);
+#ifdef MODULE
+int eata_pio_release(struct Scsi_Host *);
+#else
+#define eata_pio_release NULL
+#endif
+
+
+#define EATA_PIO { \
+ NULL, NULL, \
+ NULL, /* proc_dir_entry */ \
+ eata_pio_proc_info, /* procinfo */ \
+ "EATA (Extended Attachment) PIO driver", \
+ eata_pio_detect, \
+ eata_pio_release, \
+ NULL, NULL, \
+ eata_pio_queue, \
+ eata_pio_abort, \
+ eata_pio_reset, \
+ NULL, /* Slave attach */ \
+ scsicam_bios_param, \
+ 0, /* Canqueue */ \
+ 0, /* this_id */ \
+ 0, /* sg_tablesize */ \
+ 0, /* cmd_per_lun */ \
+ 0, /* present */ \
+ 1, /* True if ISA */ \
+ ENABLE_CLUSTERING }
+
+#endif /* _EATA_PIO_H */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_pio_proc.c b/linux/src/drivers/scsi/eata_pio_proc.c
new file mode 100644
index 0000000..54783f2
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_pio_proc.c
@@ -0,0 +1,135 @@
+
+/*
+ * eata_set_info
+ * buffer : pointer to the data that has been written to the hostfile
+ * length : number of bytes written to the hostfile
+ * HBA_ptr: pointer to the Scsi_Host struct
+ */
+int eata_pio_set_info(char *buffer, int length, struct Scsi_Host *HBA_ptr)
+{
+ DBG(DBG_PROC_WRITE, printk("%s\n", buffer));
+ return(-ENOSYS); /* Currently this is a no-op */
+}
+
+/*
+ * eata_proc_info
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is being written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+int eata_pio_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+
+ Scsi_Device *scd;
+ struct Scsi_Host *HBA_ptr;
+ static u8 buff[512];
+ int i;
+ int size, len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+
+ HBA_ptr = first_HBA;
+ for (i = 1; i <= registered_HBAs; i++) {
+ if (HBA_ptr->host_no == hostno)
+ break;
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+
+ if(inout == TRUE) /* Has data been written to the file ? */
+ return(eata_pio_set_info(buffer, length, HBA_ptr));
+
+ if (offset == 0)
+ memset(buff, 0, sizeof(buff));
+
+ size = sprintf(buffer+len, "EATA (Extended Attachment) PIO driver version: "
+ "%d.%d%s\n",VER_MAJOR, VER_MINOR, VER_SUB);
+ len += size; pos = begin + len;
+ size = sprintf(buffer + len, "queued commands: %10ld\n"
+ "processed interrupts:%10ld\n", queue_counter, int_counter);
+ len += size; pos = begin + len;
+
+ size = sprintf(buffer + len, "\nscsi%-2d: HBA %.10s\n",
+ HBA_ptr->host_no, SD(HBA_ptr)->name);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Firmware revision: v%s\n",
+ SD(HBA_ptr)->revision);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "IO: PIO\n");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Base IO : %#.4x\n", (u32) HBA_ptr->base);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Host Bus: %s\n",
+ (SD(HBA_ptr)->bustype == 'P')?"PCI ":
+ (SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ");
+
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ scd = scsi_devices;
+
+ size = sprintf(buffer+len,"Attached devices: %s\n", (scd)?"":"none");
+ len += size;
+ pos = begin + len;
+
+ while (scd) {
+ if (scd->host == HBA_ptr) {
+ proc_print_scsidevice(scd, buffer, &size, len);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+ scd = scd->next;
+ }
+
+ stop_output:
+ DBG(DBG_PROC, printk("2pos: %ld offset: %ld len: %d\n", pos, offset, len));
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len = length; /* Ending slop */
+ DBG(DBG_PROC, printk("3pos: %ld offset: %ld len: %d\n", pos, offset, len));
+
+ return (len);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/linux/src/drivers/scsi/fdomain.c b/linux/src/drivers/scsi/fdomain.c
new file mode 100644
index 0000000..df71f5c
--- /dev/null
+++ b/linux/src/drivers/scsi/fdomain.c
@@ -0,0 +1,2082 @@
+/* fdomain.c -- Future Domain TMC-16x0 SCSI driver
+ * Created: Sun May 3 18:53:19 1992 by faith@cs.unc.edu
+ * Revised: Sat Nov 2 09:27:47 1996 by root@cs.unc.edu
+ * Author: Rickard E. Faith, faith@cs.unc.edu
+ * Copyright 1992, 1993, 1994, 1995, 1996 Rickard E. Faith
+ *
+ * $Id: fdomain.c,v 1.1 1999/04/26 05:54:32 tb Exp $
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ **************************************************************************
+
+ SUMMARY:
+
+ Future Domain BIOS versions supported for autodetect:
+ 2.0, 3.0, 3.2, 3.4 (1.0), 3.5 (2.0), 3.6, 3.61
+ Chips are supported:
+ TMC-1800, TMC-18C50, TMC-18C30, TMC-36C70
+ Boards supported:
+ Future Domain TMC-1650, TMC-1660, TMC-1670, TMC-1680, TMC-1610M/MER/MEX
+ Future Domain TMC-3260 (PCI)
+ Quantum ISA-200S, ISA-250MG
+ Adaptec AHA-2920 (PCI)
+ IBM ?
+ LILO command-line options:
+ fdomain=<PORT_BASE>,<IRQ>[,<ADAPTER_ID>]
+
+
+
+ DESCRIPTION:
+
+ This is the Linux low-level SCSI driver for Future Domain TMC-1660/1680
+ TMC-1650/1670, and TMC-3260 SCSI host adapters. The 1650 and 1670 have a
+ 25-pin external connector, whereas the 1660 and 1680 have a SCSI-2 50-pin
+ high-density external connector. The 1670 and 1680 have floppy disk
+ controllers built in. The TMC-3260 is a PCI bus card.
+
+ Future Domain's older boards are based on the TMC-1800 chip, and this
+ driver was originally written for a TMC-1680 board with the TMC-1800 chip.
+ More recently, boards are being produced with the TMC-18C50 and TMC-18C30
+ chips. The latest and greatest board may not work with this driver. If
+ you have to patch this driver so that it will recognize your board's BIOS
+ signature, then the driver may fail to function after the board is
+ detected.
+
+ Please note that the drive ordering that Future Domain implemented in BIOS
+ versions 3.4 and 3.5 is the opposite of the order (currently) used by the
+ rest of the SCSI industry. If you have BIOS version 3.4 or 3.5, and have
+ more then one drive, then the drive ordering will be the reverse of that
+ which you see under DOS. For example, under DOS SCSI ID 0 will be D: and
+ SCSI ID 1 will be C: (the boot device). Under Linux, SCSI ID 0 will be
+ /dev/sda and SCSI ID 1 will be /dev/sdb. The Linux ordering is consistent
+ with that provided by all the other SCSI drivers for Linux. If you want
+ this changed, you will probably have to patch the higher level SCSI code.
+ If you do so, please send me patches that are protected by #ifdefs.
+
+ If you have a TMC-8xx or TMC-9xx board, then this is not the driver for
+ your board. Please refer to the Seagate driver for more information and
+ possible support.
+
+
+
+ HISTORY:
+
+ Linux Driver Driver
+ Version Version Date Support/Notes
+
+ 0.0 3 May 1992 V2.0 BIOS; 1800 chip
+ 0.97 1.9 28 Jul 1992
+ 0.98.6 3.1 27 Nov 1992
+ 0.99 3.2 9 Dec 1992
+
+ 0.99.3 3.3 10 Jan 1993 V3.0 BIOS
+ 0.99.5 3.5 18 Feb 1993
+ 0.99.10 3.6 15 May 1993 V3.2 BIOS; 18C50 chip
+ 0.99.11 3.17 3 Jul 1993 (now under RCS)
+ 0.99.12 3.18 13 Aug 1993
+ 0.99.14 5.6 31 Oct 1993 (reselection code removed)
+
+ 0.99.15 5.9 23 Jan 1994 V3.4 BIOS (preliminary)
+ 1.0.8/1.1.1 5.15 1 Apr 1994 V3.4 BIOS; 18C30 chip (preliminary)
+ 1.0.9/1.1.3 5.16 7 Apr 1994 V3.4 BIOS; 18C30 chip
+ 1.1.38 5.18 30 Jul 1994 36C70 chip (PCI version of 18C30)
+ 1.1.62 5.20 2 Nov 1994 V3.5 BIOS
+ 1.1.73 5.22 7 Dec 1994 Quantum ISA-200S board; V2.0 BIOS
+
+ 1.1.82 5.26 14 Jan 1995 V3.5 BIOS; TMC-1610M/MER/MEX board
+ 1.2.10 5.28 5 Jun 1995 Quantum ISA-250MG board; V2.0, V2.01 BIOS
+ 1.3.4 5.31 23 Jun 1995 PCI BIOS-32 detection (preliminary)
+ 1.3.7 5.33 4 Jul 1995 PCI BIOS-32 detection
+ 1.3.28 5.36 17 Sep 1995 V3.61 BIOS; LILO command-line support
+ 1.3.34 5.39 12 Oct 1995 V3.60 BIOS; /proc
+ 1.3.72 5.39 8 Feb 1996 Adaptec AHA-2920 board
+ 1.3.85 5.41 4 Apr 1996
+ 2.0.12 5.44 8 Aug 1996 Use ID 7 for all PCI cards
+
+
+
+ REFERENCES USED:
+
+ "TMC-1800 SCSI Chip Specification (FDC-1800T)", Future Domain Corporation,
+ 1990.
+
+ "Technical Reference Manual: 18C50 SCSI Host Adapter Chip", Future Domain
+ Corporation, January 1992.
+
+ "LXT SCSI Products: Specifications and OEM Technical Manual (Revision
+ B/September 1991)", Maxtor Corporation, 1991.
+
+ "7213S product Manual (Revision P3)", Maxtor Corporation, 1992.
+
+ "Draft Proposed American National Standard: Small Computer System
+ Interface - 2 (SCSI-2)", Global Engineering Documents. (X3T9.2/86-109,
+ revision 10h, October 17, 1991)
+
+ Private communications, Drew Eckhardt (drew@cs.colorado.edu) and Eric
+ Youngdale (ericy@cais.com), 1992.
+
+ Private communication, Tuong Le (Future Domain Engineering department),
+ 1994. (Disk geometry computations for Future Domain BIOS version 3.4, and
+ TMC-18C30 detection.)
+
+ Hogan, Thom. The Programmer's PC Sourcebook. Microsoft Press, 1988. Page
+ 60 (2.39: Disk Partition Table Layout).
+
+ "18C30 Technical Reference Manual", Future Domain Corporation, 1993, page
+ 6-1.
+
+
+
+ NOTES ON REFERENCES:
+
+ The Maxtor manuals were free. Maxtor telephone technical support is
+ great!
+
+ The Future Domain manuals were $25 and $35. They document the chip, not
+ the TMC-16x0 boards, so some information I had to guess at. In 1992,
+ Future Domain sold DOS BIOS source for $250 and the UN*X driver source was
+ $750, but these required a non-disclosure agreement, so even if I could
+ have afforded them, they would *not* have been useful for writing this
+ publically distributable driver. Future Domain technical support has
+ provided some information on the phone and have sent a few useful FAXs.
+ They have been much more helpful since they started to recognize that the
+ word "Linux" refers to an operating system :-).
+
+
+
+ ALPHA TESTERS:
+
+ There are many other alpha testers that come and go as the driver
+ develops. The people listed here were most helpful in times of greatest
+ need (mostly early on -- I've probably left out a few worthy people in
+ more recent times):
+
+ Todd Carrico (todd@wutc.wustl.edu), Dan Poirier (poirier@cs.unc.edu ), Ken
+ Corey (kenc@sol.acs.unt.edu), C. de Bruin (bruin@bruin@sterbbs.nl), Sakari
+ Aaltonen (sakaria@vipunen.hit.fi), John Rice (rice@xanth.cs.odu.edu), Brad
+ Yearwood (brad@optilink.com), and Ray Toy (toy@soho.crd.ge.com).
+
+ Special thanks to Tien-Wan Yang (twyang@cs.uh.edu), who graciously lent me
+ his 18C50-based card for debugging. He is the sole reason that this
+ driver works with the 18C50 chip.
+
+ Thanks to Dave Newman (dnewman@crl.com) for providing initial patches for
+ the version 3.4 BIOS.
+
+ Thanks to James T. McKinley (mckinley@msupa.pa.msu.edu) for providing
+ patches that support the TMC-3260, a PCI bus card with the 36C70 chip.
+ The 36C70 chip appears to be "completely compatible" with the 18C30 chip.
+
+ Thanks to Eric Kasten (tigger@petroglyph.cl.msu.edu) for providing the
+ patch for the version 3.5 BIOS.
+
+ Thanks for Stephen Henson (shenson@nyx10.cs.du.edu) for providing the
+ patch for the Quantum ISA-200S SCSI adapter.
+
+ Thanks to Adam Bowen for the signature to the 1610M/MER/MEX scsi cards, to
+ Martin Andrews (andrewm@ccfadm.eeg.ccf.org) for the signature to some
+ random TMC-1680 repackaged by IBM; and to Mintak Ng (mintak@panix.com) for
+ the version 3.61 BIOS signature.
+
+ Thanks for Mark Singer (elf@netcom.com) and Richard Simpson
+ (rsimpson@ewrcsdra.demon.co.uk) for more Quantum signatures and detective
+ work on the Quantum RAM layout.
+
+ Special thanks to James T. McKinley (mckinley@msupa.pa.msu.edu) for
+ providing patches for proper PCI BIOS32-mediated detection of the TMC-3260
+ card (a PCI bus card with the 36C70 chip). Please send James PCI-related
+ bug reports.
+
+ Thanks to Tom Cavin (tec@usa1.com) for preliminary command-line option
+ patches.
+
+ All of the alpha testers deserve much thanks.
+
+
+
+ NOTES ON USER DEFINABLE OPTIONS:
+
+ DEBUG: This turns on the printing of various debug information.
+
+ ENABLE_PARITY: This turns on SCSI parity checking. With the current
+ driver, all attached devices must support SCSI parity. If none of your
+ devices support parity, then you can probably get the driver to work by
+ turning this option off. I have no way of testing this, however, and it
+ would appear that no one ever uses this option.
+
+ FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the
+ 18C30 chip have a 2k cache). When this many 512 byte blocks are filled by
+ the SCSI device, an interrupt will be raised. Therefore, this could be as
+ low as 0, or as high as 16. Note, however, that values which are too high
+ or too low seem to prevent any interrupts from occurring, and thereby lock
+ up the machine. I have found that 2 is a good number, but throughput may
+ be increased by changing this value to values which are close to 2.
+ Please let me know if you try any different values.
+
+ DO_DETECT: This activates some old scan code which was needed before the
+ high level drivers got fixed. If you are having trouble with the driver,
+ turning this on should not hurt, and might help. Please let me know if
+ this is the case, since this code will be removed from future drivers.
+
+ RESELECTION: This is no longer an option, since I gave up trying to
+ implement it in version 4.x of this driver. It did not improve
+ performance at all and made the driver unstable (because I never found one
+ of the two race conditions which were introduced by the multiple
+ outstanding command code). The instability seems a very high price to pay
+ just so that you don't have to wait for the tape to rewind. If you want
+ this feature implemented, send me patches. I'll be happy to send a copy
+ of my (broken) driver to anyone who would like to see a copy.
+
+ **************************************************************************/
+
+#ifdef PCMCIA
+#define MODULE
+#endif
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#ifdef PCMCIA
+#undef MODULE
+#endif
+
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "fdomain.h"
+#include <asm/system.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/stat.h>
+
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_fdomain = {
+ PROC_SCSI_FDOMAIN, 7, "fdomain",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define VERSION "$Revision: 1.1 $"
+
+/* START OF USER DEFINABLE OPTIONS */
+
+#define DEBUG 1 /* Enable debugging output */
+#define ENABLE_PARITY 1 /* Enable SCSI Parity */
+#define FIFO_COUNT 2 /* Number of 512 byte blocks before INTR */
+#define DO_DETECT 0 /* Do device detection here (see scsi.c) */
+
+/* END OF USER DEFINABLE OPTIONS */
+
+#if DEBUG
+#define EVERY_ACCESS 0 /* Write a line on every scsi access */
+#define ERRORS_ONLY 1 /* Only write a line if there is an error */
+#define DEBUG_DETECT 0 /* Debug fdomain_16x0_detect() */
+#define DEBUG_MESSAGES 1 /* Debug MESSAGE IN phase */
+#define DEBUG_ABORT 1 /* Debug abort() routine */
+#define DEBUG_RESET 1 /* Debug reset() routine */
+#define DEBUG_RACE 1 /* Debug interrupt-driven race condition */
+#else
+#define EVERY_ACCESS 0 /* LEAVE THESE ALONE--CHANGE THE ONES ABOVE */
+#define ERRORS_ONLY 0
+#define DEBUG_DETECT 0
+#define DEBUG_MESSAGES 0
+#define DEBUG_ABORT 0
+#define DEBUG_RESET 0
+#define DEBUG_RACE 0
+#endif
+
+/* Errors are reported on the line, so we don't need to report them again */
+#if EVERY_ACCESS
+#undef ERRORS_ONLY
+#define ERRORS_ONLY 0
+#endif
+
+#if ENABLE_PARITY
+#define PARITY_MASK 0x08
+#else
+#define PARITY_MASK 0x00
+#endif
+
+enum chip_type {
+ unknown = 0x00,
+ tmc1800 = 0x01,
+ tmc18c50 = 0x02,
+ tmc18c30 = 0x03,
+};
+
+enum {
+ in_arbitration = 0x02,
+ in_selection = 0x04,
+ in_other = 0x08,
+ disconnect = 0x10,
+ aborted = 0x20,
+ sent_ident = 0x40,
+};
+
+enum in_port_type {
+ Read_SCSI_Data = 0,
+ SCSI_Status = 1,
+ TMC_Status = 2,
+ FIFO_Status = 3, /* tmc18c50/tmc18c30 only */
+ Interrupt_Cond = 4, /* tmc18c50/tmc18c30 only */
+ LSB_ID_Code = 5,
+ MSB_ID_Code = 6,
+ Read_Loopback = 7,
+ SCSI_Data_NoACK = 8,
+ Interrupt_Status = 9,
+ Configuration1 = 10,
+ Configuration2 = 11, /* tmc18c50/tmc18c30 only */
+ Read_FIFO = 12,
+ FIFO_Data_Count = 14
+};
+
+enum out_port_type {
+ Write_SCSI_Data = 0,
+ SCSI_Cntl = 1,
+ Interrupt_Cntl = 2,
+ SCSI_Mode_Cntl = 3,
+ TMC_Cntl = 4,
+ Memory_Cntl = 5, /* tmc18c50/tmc18c30 only */
+ Write_Loopback = 7,
+ IO_Control = 11, /* tmc18c30 only */
+ Write_FIFO = 12
+};
+
+static int port_base = 0;
+static void *bios_base = NULL;
+static int bios_major = 0;
+static int bios_minor = 0;
+static int PCI_bus = 0;
+static int Quantum = 0; /* Quantum board variant */
+static int interrupt_level = 0;
+static volatile int in_command = 0;
+static Scsi_Cmnd *current_SC = NULL;
+static enum chip_type chip = unknown;
+static int adapter_mask = 0;
+static int this_id = 0;
+static int setup_called = 0;
+
+#if DEBUG_RACE
+static volatile int in_interrupt_flag = 0;
+#endif
+
+static int SCSI_Mode_Cntl_port;
+static int FIFO_Data_Count_port;
+static int Interrupt_Cntl_port;
+static int Interrupt_Status_port;
+static int Read_FIFO_port;
+static int Read_SCSI_Data_port;
+static int SCSI_Cntl_port;
+static int SCSI_Data_NoACK_port;
+static int SCSI_Status_port;
+static int TMC_Cntl_port;
+static int TMC_Status_port;
+static int Write_FIFO_port;
+static int Write_SCSI_Data_port;
+
+static int FIFO_Size = 0x2000; /* 8k FIFO for
+ pre-tmc18c30 chips */
+
+extern void fdomain_16x0_intr( int irq, void *dev_id, struct pt_regs * regs );
+
+static void *addresses[] = {
+ (void *)0xc8000,
+ (void *)0xca000,
+ (void *)0xce000,
+ (void *)0xde000,
+ (void *)0xcc000, /* Extra addresses for PCI boards */
+ (void *)0xd0000,
+ (void *)0xe0000,
+};
+#define ADDRESS_COUNT (sizeof( addresses ) / sizeof( unsigned ))
+
+static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
+#define PORT_COUNT (sizeof( ports ) / sizeof( unsigned short ))
+
+static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
+
+/*
+
+ READ THIS BEFORE YOU ADD A SIGNATURE!
+
+ READING THIS SHORT NOTE CAN SAVE YOU LOTS OF TIME!
+
+ READ EVERY WORD, ESPECIALLY THE WORD *NOT*
+
+ This driver works *ONLY* for Future Domain cards using the TMC-1800,
+ TMC-18C50, or TMC-18C30 chip. This includes models TMC-1650, 1660, 1670,
+ and 1680.
+
+ The following BIOS signature signatures are for boards which do *NOT*
+ work with this driver (these TMC-8xx and TMC-9xx boards may work with the
+ Seagate driver):
+
+ FUTURE DOMAIN CORP. (C) 1986-1988 V4.0I 03/16/88
+ FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89
+ FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89
+ FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90
+ FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90
+ FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90
+ FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92
+
+*/
+
+struct signature {
+ const char *signature;
+ int sig_offset;
+ int sig_length;
+ int major_bios_version;
+ int minor_bios_version;
+ int flag; /* 1 == PCI_bus, 2 == ISA_200S, 3 == ISA_250MG, 4 == ISA_200S */
+} signatures[] = {
+ /* 1 2 3 4 5 6 */
+ /* 123456789012345678901234567890123456789012345678901234567890 */
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 5, 50, 2, 0, 0 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V1.07/28/89", 5, 50, 2, 0, 0 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 72, 50, 2, 0, 2 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.0", 73, 43, 2, 0, 3 },
+ { "FUTURE DOMAIN CORP. (C) 1991 1800-V2.0.", 72, 39, 2, 0, 4 },
+ { "FUTURE DOMAIN CORP. (C) 1992 V3.00.004/02/92", 5, 44, 3, 0, 0 },
+ { "FUTURE DOMAIN TMC-18XX (C) 1993 V3.203/12/93", 5, 44, 3, 2, 0 },
+ { "IBM F1 P2 BIOS v1.0104/29/93", 5, 28, 3, -1, 0 },
+ { "Future Domain Corp. V1.0008/18/93", 5, 33, 3, 4, 0 },
+ { "Future Domain Corp. V1.0008/18/93", 26, 33, 3, 4, 1 },
+ { "Adaptec AHA-2920 PCI-SCSI Card", 42, 31, 3, -1, 1 },
+ { "IBM F1 P264/32", 5, 14, 3, -1, 1 },
+ /* This next signature may not be a 3.5 bios */
+ { "Future Domain Corp. V2.0108/18/93", 5, 33, 3, 5, 0 },
+ { "FUTURE DOMAIN CORP. V3.5008/18/93", 5, 34, 3, 5, 0 },
+ { "FUTURE DOMAIN 18c30/18c50/1800 (C) 1994 V3.5", 5, 44, 3, 5, 0 },
+ { "FUTURE DOMAIN CORP. V3.6008/18/93", 5, 34, 3, 6, 0 },
+ { "FUTURE DOMAIN CORP. V3.6108/18/93", 5, 34, 3, 6, 0 },
+ { "FUTURE DOMAIN TMC-18XX", 5, 22, -1, -1, 0 },
+
+ /* READ NOTICE ABOVE *BEFORE* YOU WASTE YOUR TIME ADDING A SIGNATURE
+ Also, fix the disk geometry code for your signature and send your
+ changes for faith@cs.unc.edu. Above all, do *NOT* change any old
+ signatures!
+
+ Note that the last line will match a "generic" 18XX bios. Because
+ Future Domain has changed the host SCSI ID and/or the location of the
+ geometry information in the on-board RAM area for each of the first
+ three BIOS's, it is still important to enter a fully qualified
+ signature in the table for any new BIOS's (after the host SCSI ID and
+ geometry location are verified). */
+};
+
+#define SIGNATURE_COUNT (sizeof( signatures ) / sizeof( struct signature ))
+
+static void print_banner( struct Scsi_Host *shpnt )
+{
+ if (!shpnt) return; /* This won't ever happen */
+
+ if (bios_major < 0 && bios_minor < 0) {
+ printk( "scsi%d <fdomain>: No BIOS; using scsi id %d\n",
+ shpnt->host_no, shpnt->this_id );
+ } else {
+ printk( "scsi%d <fdomain>: BIOS version ", shpnt->host_no );
+
+ if (bios_major >= 0) printk( "%d.", bios_major );
+ else printk( "?." );
+
+ if (bios_minor >= 0) printk( "%d", bios_minor );
+ else printk( "?." );
+
+ printk( " at 0x%x using scsi id %d\n",
+ (unsigned)bios_base, shpnt->this_id );
+ }
+
+ /* If this driver works for later FD PCI
+ boards, we will have to modify banner
+ for additional PCI cards, but for now if
+ it's PCI it's a TMC-3260 - JTM */
+ printk( "scsi%d <fdomain>: %s chip at 0x%x irq ",
+ shpnt->host_no,
+ chip == tmc1800 ? "TMC-1800"
+ : (chip == tmc18c50 ? "TMC-18C50"
+ : (chip == tmc18c30 ?
+ (PCI_bus ? "TMC-36C70 (PCI bus)" : "TMC-18C30")
+ : "Unknown")),
+ port_base );
+
+ if (interrupt_level) printk( "%d", interrupt_level );
+ else printk( "<none>" );
+
+ printk( "\n" );
+}
+
+void fdomain_setup( char *str, int *ints )
+{
+ if (setup_called++ || ints[0] < 2 || ints[0] > 3) {
+ printk( "fdomain: usage: fdomain=<PORT_BASE>,<IRQ>[,<ADAPTER_ID>]\n" );
+ printk( "fdomain: bad LILO parameters?\n" );
+ }
+
+ port_base = ints[0] >= 1 ? ints[1] : 0;
+ interrupt_level = ints[0] >= 2 ? ints[2] : 0;
+ this_id = ints[0] >= 3 ? ints[3] : 0;
+
+ bios_major = bios_minor = -1; /* Use geometry for BIOS version >= 3.4 */
+}
+
+
+static void do_pause( unsigned amount ) /* Pause for amount*10 milliseconds */
+{
+ unsigned long the_time = jiffies + amount; /* 0.01 seconds per jiffy */
+
+ while (jiffies < the_time);
+}
+
+inline static void fdomain_make_bus_idle( void )
+{
+ outb( 0, SCSI_Cntl_port );
+ outb( 0, SCSI_Mode_Cntl_port );
+ if (chip == tmc18c50 || chip == tmc18c30)
+ outb( 0x21 | PARITY_MASK, TMC_Cntl_port ); /* Clear forced intr. */
+ else
+ outb( 0x01 | PARITY_MASK, TMC_Cntl_port );
+}
+
+static int fdomain_is_valid_port( int port )
+{
+#if DEBUG_DETECT
+ printk( " (%x%x),",
+ inb( port + MSB_ID_Code ), inb( port + LSB_ID_Code ) );
+#endif
+
+ /* The MCA ID is a unique id for each MCA compatible board. We
+ are using ISA boards, but Future Domain provides the MCA ID
+ anyway. We can use this ID to ensure that this is a Future
+ Domain TMC-1660/TMC-1680.
+ */
+
+ if (inb( port + LSB_ID_Code ) != 0xe9) { /* test for 0x6127 id */
+ if (inb( port + LSB_ID_Code ) != 0x27) return 0;
+ if (inb( port + MSB_ID_Code ) != 0x61) return 0;
+ chip = tmc1800;
+ } else { /* test for 0xe960 id */
+ if (inb( port + MSB_ID_Code ) != 0x60) return 0;
+ chip = tmc18c50;
+
+#if 0
+
+ /* Try to toggle 32-bit mode. This only
+ works on an 18c30 chip. (User reports
+ say this works, so we should switch to
+ it in the near future.) */
+
+ outb( 0x80, port + IO_Control );
+ if ((inb( port + Configuration2 ) & 0x80) == 0x80) {
+ outb( 0x00, port + IO_Control );
+ if ((inb( port + Configuration2 ) & 0x80) == 0x00) {
+ chip = tmc18c30;
+ FIFO_Size = 0x800; /* 2k FIFO */
+ }
+ }
+#else
+
+ /* That should have worked, but appears to
+ have problems. Let's assume it is an
+ 18c30 if the RAM is disabled. */
+
+ if (inb( port + Configuration2 ) & 0x02) {
+ chip = tmc18c30;
+ FIFO_Size = 0x800; /* 2k FIFO */
+ }
+#endif
+ /* If that failed, we are an 18c50. */
+ }
+
+ return 1;
+}
+
+static int fdomain_test_loopback( void )
+{
+ int i;
+ int result;
+
+ for (i = 0; i < 255; i++) {
+ outb( i, port_base + Write_Loopback );
+ result = inb( port_base + Read_Loopback );
+ if (i != result)
+ return 1;
+ }
+ return 0;
+}
+
+/* fdomain_get_irq assumes that we have a valid MCA ID for a
+ TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the
+ bios_base matches these ports. If someone was unlucky enough to have
+ purchased more than one Future Domain board, then they will have to
+ modify this code, as we only detect one board here. [The one with the
+ lowest bios_base.]
+
+ Note that this routine is only used for systems without a PCI BIOS32
+ (e.g., ISA bus). For PCI bus systems, this routine will likely fail
+ unless one of the IRQs listed in the ints array is used by the board.
+ Sometimes it is possible to use the computer's BIOS setup screen to
+ configure a PCI system so that one of these IRQs will be used by the
+ Future Domain card. */
+
+static int fdomain_get_irq( int base )
+{
+ int options = inb( base + Configuration1 );
+
+#if DEBUG_DETECT
+ printk( " Options = %x\n", options );
+#endif
+
+ /* Check for board with lowest bios_base --
+ this isn't valid for the 18c30 or for
+ boards on the PCI bus, so just assume we
+ have the right board. */
+
+ if (chip != tmc18c30
+ && !PCI_bus
+ && addresses[ (options & 0xc0) >> 6 ] != bios_base) return 0;
+
+ return ints[ (options & 0x0e) >> 1 ];
+}
+
+static int fdomain_isa_detect( int *irq, int *iobase )
+{
+ int i;
+ int base;
+ int flag = 0;
+
+ if (bios_major == 2) {
+ /* The TMC-1660/TMC-1680 has a RAM area just after the BIOS ROM.
+ Assuming the ROM is enabled (otherwise we wouldn't have been
+ able to read the ROM signature :-), then the ROM sets up the
+ RAM area with some magic numbers, such as a list of port
+ base addresses and a list of the disk "geometry" reported to
+ DOS (this geometry has nothing to do with physical geometry).
+ */
+
+ switch (Quantum) {
+ case 2: /* ISA_200S */
+ case 3: /* ISA_250MG */
+ base = *((char *)bios_base + 0x1fa2)
+ + (*((char *)bios_base + 0x1fa3) << 8);
+ break;
+ case 4: /* ISA_200S (another one) */
+ base = *((char *)bios_base + 0x1fa3)
+ + (*((char *)bios_base + 0x1fa4) << 8);
+ break;
+ default:
+ base = *((char *)bios_base + 0x1fcc)
+ + (*((char *)bios_base + 0x1fcd) << 8);
+ break;
+ }
+
+#if DEBUG_DETECT
+ printk( " %x,", base );
+#endif
+
+ for (flag = 0, i = 0; !flag && i < PORT_COUNT; i++) {
+ if (base == ports[i])
+ ++flag;
+ }
+
+ if (flag && fdomain_is_valid_port( base )) {
+ *irq = fdomain_get_irq( base );
+ *iobase = base;
+ return 1;
+ }
+
+ /* This is a bad sign. It usually means that someone patched the
+ BIOS signature list (the signatures variable) to contain a BIOS
+ signature for a board *OTHER THAN* the TMC-1660/TMC-1680. */
+
+#if DEBUG_DETECT
+ printk( " RAM FAILED, " );
+#endif
+ }
+
+ /* Anyway, the alternative to finding the address in the RAM is to just
+ search through every possible port address for one that is attached
+ to the Future Domain card. Don't panic, though, about reading all
+ these random port addresses -- there are rumors that the Future
+ Domain BIOS does something very similar.
+
+ Do not, however, check ports which the kernel knows are being used by
+ another driver. */
+
+ for (i = 0; i < PORT_COUNT; i++) {
+ base = ports[i];
+ if (check_region( base, 0x10 )) {
+#if DEBUG_DETECT
+ printk( " (%x inuse),", base );
+#endif
+ continue;
+ }
+#if DEBUG_DETECT
+ printk( " %x,", base );
+#endif
+ if ((flag = fdomain_is_valid_port( base ))) break;
+ }
+
+ if (!flag) return 0; /* iobase not found */
+
+ *irq = fdomain_get_irq( base );
+ *iobase = base;
+
+ return 1; /* success */
+}
+
+static int fdomain_pci_nobios_detect( int *irq, int *iobase )
+{
+ int i;
+ int flag = 0;
+
+ /* The proper way of doing this is to use ask the PCI bus for the device
+ IRQ and interrupt level. But we can't do that if PCI BIOS32 support
+ isn't compiled into the kernel, or if a PCI BIOS32 isn't present.
+
+ Instead, we scan down a bunch of addresses (Future Domain tech
+ support says we will probably find the address before we get to
+ 0xf800). This works fine on some systems -- other systems may have
+ to scan more addresses. If you have to modify this section for your
+ installation, please send mail to faith@cs.unc.edu. */
+
+ for (i = 0xfff8; i > 0xe000; i -= 8) {
+ if (check_region( i, 0x10 )) {
+#if DEBUG_DETECT
+ printk( " (%x inuse)," , i );
+#endif
+ continue;
+ }
+ if ((flag = fdomain_is_valid_port( i ))) break;
+ }
+
+ if (!flag) return 0; /* iobase not found */
+
+ *irq = fdomain_get_irq( i );
+ *iobase = i;
+
+ return 1; /* success */
+}
+
+/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
+ iobase) This function gets the Interrupt Level and I/O base address from
+ the PCI configuration registers. The I/O base address is masked with
+ 0xfff8 since on my card the address read from the PCI config registers
+ is off by one from the actual I/O base address necessary for accessing
+ the status and control registers on the card (PCI config register gives
+ 0xf801, actual address is 0xf800). This is likely a bug in the FD
+ config code that writes to the PCI registers, however using a mask
+ should be safe since I think the scan done by the card to determine the
+ I/O base is done in increments of 8 (i.e., 0xf800, 0xf808, ...), at
+ least the old scan code we used to use to get the I/O base did... Also,
+ the device ID from the PCI config registers is 0x0 and should be 0x60e9
+ as it is in the status registers (offset 5 from I/O base). If this is
+ changed in future hardware/BIOS changes it will need to be fixed in this
+ detection function. Comments, bug reports, etc... on this function
+ should be sent to mckinley@msupa.pa.msu.edu - James T. McKinley. */
+
+#ifdef CONFIG_PCI
+static int fdomain_pci_bios_detect( int *irq, int *iobase )
+{
+ int error;
+ unsigned char pci_bus, pci_dev_fn; /* PCI bus & device function */
+ unsigned char pci_irq; /* PCI interrupt line */
+ unsigned int pci_base; /* PCI I/O base address */
+ unsigned short pci_vendor, pci_device; /* PCI vendor & device IDs */
+
+ /* If the PCI BIOS doesn't exist, use the old-style detection routines.
+ Otherwise, get the I/O base address and interrupt from the PCI config
+ registers. */
+
+ if (!pcibios_present()) return fdomain_pci_nobios_detect( irq, iobase );
+
+#if DEBUG_DETECT
+ /* Tell how to print a list of the known PCI devices from bios32 and
+ list vendor and device IDs being used if in debug mode. */
+
+ printk( "\nINFO: cat /proc/pci to see list of PCI devices from bios32\n" );
+ printk( "\nTMC-3260 detect:"
+ " Using PCI Vendor ID: 0x%x, PCI Device ID: 0x%x\n",
+ PCI_VENDOR_ID_FD,
+ PCI_DEVICE_ID_FD_36C70 );
+#endif
+
+ /* We will have to change this if more than 1 PCI bus is present and the
+ FD scsi host is not on the first bus (i.e., a PCI to PCI bridge,
+ which is not supported by bios32 right now anyway). This should
+ probably be done by a call to pcibios_find_device but I can't get it
+ to work... Also the device ID reported from the PCI config registers
+ does not match the device ID quoted in the tech manual or available
+ from offset 5 from the I/O base address. It should be 0x60E9, but it
+ is 0x0 if read from the PCI config registers. I guess the FD folks
+ neglected to write it to the PCI registers... This loop is necessary
+ to get the device function (at least until someone can get
+ pcibios_find_device to work, I cannot but 53c7,8xx.c uses it...). */
+
+ pci_bus = 0;
+
+ for (pci_dev_fn = 0x0; pci_dev_fn < 0xff; pci_dev_fn++) {
+ pcibios_read_config_word( pci_bus,
+ pci_dev_fn,
+ PCI_VENDOR_ID,
+ &pci_vendor );
+
+ if (pci_vendor == PCI_VENDOR_ID_FD) {
+ pcibios_read_config_word( pci_bus,
+ pci_dev_fn,
+ PCI_DEVICE_ID,
+ &pci_device );
+
+ if (pci_device == PCI_DEVICE_ID_FD_36C70) {
+ /* Break out once we have the correct device. If other FD
+ PCI devices are added to this driver we will need to add
+ an or of the other PCI_DEVICE_ID_FD_XXXXX's here. */
+ break;
+ } else {
+ /* If we can't find an FD scsi card we give up. */
+ return 0;
+ }
+ }
+ }
+
+#if DEBUG_DETECT
+ printk( "Future Domain 36C70 : at PCI bus %u, device %u, function %u\n",
+ pci_bus,
+ (pci_dev_fn & 0xf8) >> 3,
+ pci_dev_fn & 7 );
+#endif
+
+ /* We now have the appropriate device function for the FD board so we
+ just read the PCI config info from the registers. */
+
+ if ((error = pcibios_read_config_dword( pci_bus,
+ pci_dev_fn,
+ PCI_BASE_ADDRESS_0,
+ &pci_base ))
+ || (error = pcibios_read_config_byte( pci_bus,
+ pci_dev_fn,
+ PCI_INTERRUPT_LINE,
+ &pci_irq ))) {
+ printk ( "PCI ERROR: Future Domain 36C70 not initializing"
+ " due to error reading configuration space\n" );
+ return 0;
+ } else {
+#if DEBUG_DETECT
+ printk( "TMC-3260 PCI: IRQ = %u, I/O base = 0x%lx\n",
+ pci_irq, pci_base );
+#endif
+
+ /* Now we have the I/O base address and interrupt from the PCI
+ configuration registers. Unfortunately it seems that the I/O base
+ address is off by one on my card so I mask it with 0xfff8. This
+ must be some kind of goof in the FD code that does the autoconfig
+ and writes to the PCI registers (or maybe I just don't understand
+ something). If they fix it in later versions of the card or BIOS
+ we may have to adjust the address based on the signature or
+ something... */
+
+ *irq = pci_irq;
+ *iobase = (pci_base & 0xfff8);
+
+#if DEBUG_DETECT
+ printk( "TMC-3260 fix: Masking I/O base address with 0xff00.\n" );
+ printk( "TMC-3260: IRQ = %d, I/O base = 0x%x\n", *irq, *iobase );
+#endif
+
+ if (!fdomain_is_valid_port( *iobase )) return 0;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+int fdomain_16x0_detect( Scsi_Host_Template *tpnt )
+{
+ int i, j;
+ int retcode;
+ struct Scsi_Host *shpnt;
+#if DO_DETECT
+ const int buflen = 255;
+ Scsi_Cmnd SCinit;
+ unsigned char do_inquiry[] = { INQUIRY, 0, 0, 0, buflen, 0 };
+ unsigned char do_request_sense[] = { REQUEST_SENSE, 0, 0, 0, buflen, 0 };
+ unsigned char do_read_capacity[] = { READ_CAPACITY,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ unsigned char buf[buflen];
+#endif
+
+#if DEBUG_DETECT
+ printk( "fdomain_16x0_detect()," );
+#endif
+ tpnt->proc_dir = &proc_scsi_fdomain;
+
+ if (setup_called) {
+#if DEBUG_DETECT
+ printk( "no BIOS, using port_base = 0x%x, irq = %d\n",
+ port_base, interrupt_level );
+#endif
+ if (!fdomain_is_valid_port( port_base )) {
+ printk( "fdomain: cannot locate chip at port base 0x%x\n",
+ port_base );
+ printk( "fdomain: bad LILO parameters?\n" );
+ return 0;
+ }
+ } else {
+ int flag = 0;
+
+ for (i = 0; !bios_base && i < ADDRESS_COUNT; i++) {
+#if DEBUG_DETECT
+ printk( " %x(%x),", (unsigned)addresses[i], (unsigned)bios_base );
+#endif
+ for (j = 0; !bios_base && j < SIGNATURE_COUNT; j++) {
+ if (!memcmp( ((char *)addresses[i] + signatures[j].sig_offset),
+ signatures[j].signature, signatures[j].sig_length )) {
+ bios_major = signatures[j].major_bios_version;
+ bios_minor = signatures[j].minor_bios_version;
+ PCI_bus = (signatures[j].flag == 1);
+ Quantum = (signatures[j].flag > 1) ? signatures[j].flag : 0;
+ bios_base = addresses[i];
+ }
+ }
+ }
+
+ if (!bios_base) {
+#if DEBUG_DETECT
+ printk( " FAILED: NO BIOS\n" );
+#endif
+ return 0;
+ }
+
+ if (!PCI_bus) {
+ flag = fdomain_isa_detect( &interrupt_level, &port_base );
+ } else {
+#ifdef CONFIG_PCI
+ flag = fdomain_pci_bios_detect( &interrupt_level, &port_base );
+#else
+ flag = fdomain_pci_nobios_detect( &interrupt_level, &port_base );
+#endif
+ }
+
+ if (!flag) {
+#if DEBUG_DETECT
+ printk( " FAILED: NO PORT\n" );
+#endif
+#ifdef CONFIG_PCI
+ printk( "\nTMC-3260 36C70 PCI scsi chip detection failed.\n" );
+ printk( "Send mail to mckinley@msupa.pa.msu.edu.\n" );
+#endif
+ return 0; /* Cannot find valid set of ports */
+ }
+ }
+
+ SCSI_Mode_Cntl_port = port_base + SCSI_Mode_Cntl;
+ FIFO_Data_Count_port = port_base + FIFO_Data_Count;
+ Interrupt_Cntl_port = port_base + Interrupt_Cntl;
+ Interrupt_Status_port = port_base + Interrupt_Status;
+ Read_FIFO_port = port_base + Read_FIFO;
+ Read_SCSI_Data_port = port_base + Read_SCSI_Data;
+ SCSI_Cntl_port = port_base + SCSI_Cntl;
+ SCSI_Data_NoACK_port = port_base + SCSI_Data_NoACK;
+ SCSI_Status_port = port_base + SCSI_Status;
+ TMC_Cntl_port = port_base + TMC_Cntl;
+ TMC_Status_port = port_base + TMC_Status;
+ Write_FIFO_port = port_base + Write_FIFO;
+ Write_SCSI_Data_port = port_base + Write_SCSI_Data;
+
+ fdomain_16x0_reset( NULL, 0 );
+
+ if (fdomain_test_loopback()) {
+#if DEBUG_DETECT
+ printk( "fdomain: LOOPBACK TEST FAILED, FAILING DETECT!\n" );
+#endif
+ if (setup_called) {
+ printk( "fdomain: loopback test failed at port base 0x%x\n",
+ port_base );
+ printk( "fdomain: bad LILO parameters?\n" );
+ }
+ return 0;
+ }
+
+ if (this_id) {
+ tpnt->this_id = (this_id & 0x07);
+ adapter_mask = (1 << tpnt->this_id);
+ } else {
+ if (PCI_bus || (bios_major == 3 && bios_minor >= 2) || bios_major < 0) {
+ tpnt->this_id = 7;
+ adapter_mask = 0x80;
+ } else {
+ tpnt->this_id = 6;
+ adapter_mask = 0x40;
+ }
+ }
+
+ /* Print out a banner here in case we can't
+ get resources. */
+
+ shpnt = scsi_register( tpnt, 0 );
+ shpnt->irq = interrupt_level;
+ shpnt->io_port = port_base;
+ shpnt->n_io_port = 0x10;
+ print_banner( shpnt );
+
+ /* Log IRQ with kernel */
+ if (!interrupt_level) {
+ panic( "fdomain: *NO* interrupt level selected!\n" );
+ } else {
+ /* Register the IRQ with the kernel */
+
+ retcode = request_irq( interrupt_level,
+ fdomain_16x0_intr, SA_INTERRUPT, "fdomain", NULL);
+
+ if (retcode < 0) {
+ if (retcode == -EINVAL) {
+ printk( "fdomain: IRQ %d is bad!\n", interrupt_level );
+ printk( " This shouldn't happen!\n" );
+ printk( " Send mail to faith@cs.unc.edu\n" );
+ } else if (retcode == -EBUSY) {
+ printk( "fdomain: IRQ %d is already in use!\n", interrupt_level );
+ printk( " Please use another IRQ!\n" );
+ } else {
+ printk( "fdomain: Error getting IRQ %d\n", interrupt_level );
+ printk( " This shouldn't happen!\n" );
+ printk( " Send mail to faith@cs.unc.edu\n" );
+ }
+ panic( "fdomain: Driver requires interruptions\n" );
+ }
+ }
+
+ /* Log I/O ports with kernel */
+ request_region( port_base, 0x10, "fdomain" );
+
+#if DO_DETECT
+
+ /* These routines are here because of the way the SCSI bus behaves after
+ a reset. This appropriate behavior was not handled correctly by the
+ higher level SCSI routines when I first wrote this driver. Now,
+ however, correct scan routines are part of scsi.c and these routines
+ are no longer needed. However, this code is still good for
+ debugging. */
+
+ SCinit.request_buffer = SCinit.buffer = buf;
+ SCinit.request_bufflen = SCinit.bufflen = sizeof(buf)-1;
+ SCinit.use_sg = 0;
+ SCinit.lun = 0;
+
+ printk( "fdomain: detection routine scanning for devices:\n" );
+ for (i = 0; i < 8; i++) {
+ SCinit.target = i;
+ if (i == tpnt->this_id) /* Skip host adapter */
+ continue;
+ memcpy(SCinit.cmnd, do_request_sense, sizeof(do_request_sense));
+ retcode = fdomain_16x0_command(&SCinit);
+ if (!retcode) {
+ memcpy(SCinit.cmnd, do_inquiry, sizeof(do_inquiry));
+ retcode = fdomain_16x0_command(&SCinit);
+ if (!retcode) {
+ printk( " SCSI ID %d: ", i );
+ for (j = 8; j < (buf[4] < 32 ? buf[4] : 32); j++)
+ printk( "%c", buf[j] >= 20 ? buf[j] : ' ' );
+ memcpy(SCinit.cmnd, do_read_capacity, sizeof(do_read_capacity));
+ retcode = fdomain_16x0_command(&SCinit);
+ if (!retcode) {
+ unsigned long blocks, size, capacity;
+
+ blocks = (buf[0] << 24) | (buf[1] << 16)
+ | (buf[2] << 8) | buf[3];
+ size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
+ capacity = +( +(blocks / 1024L) * +(size * 10L)) / 1024L;
+
+ printk( "%lu MB (%lu byte blocks)",
+ ((capacity + 5L) / 10L), size );
+ } else {
+ memcpy(SCinit.cmnd, do_request_sense, sizeof(do_request_sense));
+ retcode = fdomain_16x0_command(&SCinit);
+ }
+ printk ("\n" );
+ } else {
+ memcpy(SCinit.cmnd, do_request_sense, sizeof(do_request_sense));
+ retcode = fdomain_16x0_command(&SCinit);
+ }
+ }
+ }
+#endif
+
+ return 1; /* Maximum of one adapter will be detected. */
+}
+
+const char *fdomain_16x0_info( struct Scsi_Host *ignore )
+{
+ static char buffer[80];
+ char *pt;
+
+ strcpy( buffer, "Future Domain TMC-16x0 SCSI driver, version" );
+ if (strchr( VERSION, ':')) { /* Assume VERSION is an RCS Revision string */
+ strcat( buffer, strchr( VERSION, ':' ) + 1 );
+ pt = strrchr( buffer, '$') - 1;
+ if (!pt) /* Stripped RCS Revision string? */
+ pt = buffer + strlen( buffer ) - 1;
+ if (*pt != ' ')
+ ++pt;
+ *pt = '\0';
+ } else { /* Assume VERSION is a number */
+ strcat( buffer, " " VERSION );
+ }
+
+ return buffer;
+}
+
+ /* First pass at /proc information routine. */
+/*
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is being written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+int fdomain_16x0_proc_info( char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout )
+{
+ const char *info = fdomain_16x0_info( NULL );
+ int len;
+ int pos;
+ int begin;
+
+ if (inout) return(-ENOSYS);
+
+ begin = 0;
+ strcpy( buffer, info );
+ strcat( buffer, "\n" );
+
+ pos = len = strlen( buffer );
+
+ if(pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin);
+ if(len > length) len = length;
+
+ return(len);
+}
+
+#if 0
+static int fdomain_arbitrate( void )
+{
+ int status = 0;
+ unsigned long timeout;
+
+#if EVERY_ACCESS
+ printk( "fdomain_arbitrate()\n" );
+#endif
+
+ outb( 0x00, SCSI_Cntl_port ); /* Disable data drivers */
+ outb( adapter_mask, port_base + SCSI_Data_NoACK ); /* Set our id bit */
+ outb( 0x04 | PARITY_MASK, TMC_Cntl_port ); /* Start arbitration */
+
+ timeout = jiffies + 50; /* 500 mS */
+ while (jiffies < timeout) {
+ status = inb( TMC_Status_port ); /* Read adapter status */
+ if (status & 0x02) /* Arbitration complete */
+ return 0;
+ }
+
+ /* Make bus idle */
+ fdomain_make_bus_idle();
+
+#if EVERY_ACCESS
+ printk( "Arbitration failed, status = %x\n", status );
+#endif
+#if ERRORS_ONLY
+ printk( "fdomain: Arbitration failed, status = %x\n", status );
+#endif
+ return 1;
+}
+#endif
+
+static int fdomain_select( int target )
+{
+ int status;
+ unsigned long timeout;
+ static int flag = 0;
+
+
+ outb( 0x82, SCSI_Cntl_port ); /* Bus Enable + Select */
+ outb( adapter_mask | (1 << target), SCSI_Data_NoACK_port );
+
+ /* Stop arbitration and enable parity */
+ outb( PARITY_MASK, TMC_Cntl_port );
+
+ timeout = jiffies + 35; /* 350mS -- because of timeouts
+ (was 250mS) */
+
+ while (jiffies < timeout) {
+ status = inb( SCSI_Status_port ); /* Read adapter status */
+ if (status & 1) { /* Busy asserted */
+ /* Enable SCSI Bus (on error, should make bus idle with 0) */
+ outb( 0x80, SCSI_Cntl_port );
+ return 0;
+ }
+ }
+ /* Make bus idle */
+ fdomain_make_bus_idle();
+#if EVERY_ACCESS
+ if (!target) printk( "Selection failed\n" );
+#endif
+#if ERRORS_ONLY
+ if (!target) {
+ if (!flag) /* Skip first failure for all chips. */
+ ++flag;
+ else
+ printk( "fdomain: Selection failed\n" );
+ }
+#endif
+ return 1;
+}
+
+void my_done( int error )
+{
+ if (in_command) {
+ in_command = 0;
+ outb( 0x00, Interrupt_Cntl_port );
+ fdomain_make_bus_idle();
+ current_SC->result = error;
+ if (current_SC->scsi_done)
+ current_SC->scsi_done( current_SC );
+ else panic( "fdomain: current_SC->scsi_done() == NULL" );
+ } else {
+ panic( "fdomain: my_done() called outside of command\n" );
+ }
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+}
+
+void fdomain_16x0_intr( int irq, void *dev_id, struct pt_regs * regs )
+{
+ int status;
+ int done = 0;
+ unsigned data_count;
+
+ /* The fdomain_16x0_intr is only called via
+ the interrupt handler. The goal of the
+ sti() here is to allow other
+ interruptions while this routine is
+ running. */
+
+ sti(); /* Yes, we really want sti() here */
+
+ outb( 0x00, Interrupt_Cntl_port );
+
+ /* We usually have one spurious interrupt after each command. Ignore it. */
+ if (!in_command || !current_SC) { /* Spurious interrupt */
+#if EVERY_ACCESS
+ printk( "Spurious interrupt, in_command = %d, current_SC = %x\n",
+ in_command, current_SC );
+#endif
+ return;
+ }
+
+ /* Abort calls my_done, so we do nothing here. */
+ if (current_SC->SCp.phase & aborted) {
+#if DEBUG_ABORT
+ printk( "Interrupt after abort, ignoring\n" );
+#endif
+ /*
+ return; */
+ }
+
+#if DEBUG_RACE
+ ++in_interrupt_flag;
+#endif
+
+ if (current_SC->SCp.phase & in_arbitration) {
+ status = inb( TMC_Status_port ); /* Read adapter status */
+ if (!(status & 0x02)) {
+#if EVERY_ACCESS
+ printk( " AFAIL " );
+#endif
+ my_done( DID_BUS_BUSY << 16 );
+ return;
+ }
+ current_SC->SCp.phase = in_selection;
+
+ outb( 0x40 | FIFO_COUNT, Interrupt_Cntl_port );
+
+ outb( 0x82, SCSI_Cntl_port ); /* Bus Enable + Select */
+ outb( adapter_mask | (1 << current_SC->target), SCSI_Data_NoACK_port );
+
+ /* Stop arbitration and enable parity */
+ outb( 0x10 | PARITY_MASK, TMC_Cntl_port );
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return;
+ } else if (current_SC->SCp.phase & in_selection) {
+ status = inb( SCSI_Status_port );
+ if (!(status & 0x01)) {
+ /* Try again, for slow devices */
+ if (fdomain_select( current_SC->target )) {
+#if EVERY_ACCESS
+ printk( " SFAIL " );
+#endif
+ my_done( DID_NO_CONNECT << 16 );
+ return;
+ } else {
+#if EVERY_ACCESS
+ printk( " AltSel " );
+#endif
+ /* Stop arbitration and enable parity */
+ outb( 0x10 | PARITY_MASK, TMC_Cntl_port );
+ }
+ }
+ current_SC->SCp.phase = in_other;
+ outb( 0x90 | FIFO_COUNT, Interrupt_Cntl_port );
+ outb( 0x80, SCSI_Cntl_port );
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return;
+ }
+
+ /* current_SC->SCp.phase == in_other: this is the body of the routine */
+
+ status = inb( SCSI_Status_port );
+
+ if (status & 0x10) { /* REQ */
+
+ switch (status & 0x0e) {
+
+ case 0x08: /* COMMAND OUT */
+ outb( current_SC->cmnd[current_SC->SCp.sent_command++],
+ Write_SCSI_Data_port );
+#if EVERY_ACCESS
+ printk( "CMD = %x,",
+ current_SC->cmnd[ current_SC->SCp.sent_command - 1] );
+#endif
+ break;
+ case 0x00: /* DATA OUT -- tmc18c50/tmc18c30 only */
+ if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
+ current_SC->SCp.have_data_in = -1;
+ outb( 0xd0 | PARITY_MASK, TMC_Cntl_port );
+ }
+ break;
+ case 0x04: /* DATA IN -- tmc18c50/tmc18c30 only */
+ if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
+ current_SC->SCp.have_data_in = 1;
+ outb( 0x90 | PARITY_MASK, TMC_Cntl_port );
+ }
+ break;
+ case 0x0c: /* STATUS IN */
+ current_SC->SCp.Status = inb( Read_SCSI_Data_port );
+#if EVERY_ACCESS
+ printk( "Status = %x, ", current_SC->SCp.Status );
+#endif
+#if ERRORS_ONLY
+ if (current_SC->SCp.Status
+ && current_SC->SCp.Status != 2
+ && current_SC->SCp.Status != 8) {
+ printk( "fdomain: target = %d, command = %x, status = %x\n",
+ current_SC->target,
+ current_SC->cmnd[0],
+ current_SC->SCp.Status );
+ }
+#endif
+ break;
+ case 0x0a: /* MESSAGE OUT */
+ outb( MESSAGE_REJECT, Write_SCSI_Data_port ); /* Reject */
+ break;
+ case 0x0e: /* MESSAGE IN */
+ current_SC->SCp.Message = inb( Read_SCSI_Data_port );
+#if EVERY_ACCESS
+ printk( "Message = %x, ", current_SC->SCp.Message );
+#endif
+ if (!current_SC->SCp.Message) ++done;
+#if DEBUG_MESSAGES || EVERY_ACCESS
+ if (current_SC->SCp.Message) {
+ printk( "fdomain: message = %x\n", current_SC->SCp.Message );
+ }
+#endif
+ break;
+ }
+ }
+
+ if (chip == tmc1800
+ && !current_SC->SCp.have_data_in
+ && (current_SC->SCp.sent_command
+ >= current_SC->cmd_len)) {
+ /* We have to get the FIFO direction
+ correct, so I've made a table based
+ on the SCSI Standard of which commands
+ appear to require a DATA OUT phase.
+ */
+ /*
+ p. 94: Command for all device types
+ CHANGE DEFINITION 40 DATA OUT
+ COMPARE 39 DATA OUT
+ COPY 18 DATA OUT
+ COPY AND VERIFY 3a DATA OUT
+ INQUIRY 12
+ LOG SELECT 4c DATA OUT
+ LOG SENSE 4d
+ MODE SELECT (6) 15 DATA OUT
+ MODE SELECT (10) 55 DATA OUT
+ MODE SENSE (6) 1a
+ MODE SENSE (10) 5a
+ READ BUFFER 3c
+ RECEIVE DIAGNOSTIC RESULTS 1c
+ REQUEST SENSE 03
+ SEND DIAGNOSTIC 1d DATA OUT
+ TEST UNIT READY 00
+ WRITE BUFFER 3b DATA OUT
+
+ p.178: Commands for direct-access devices (not listed on p. 94)
+ FORMAT UNIT 04 DATA OUT
+ LOCK-UNLOCK CACHE 36
+ PRE-FETCH 34
+ PREVENT-ALLOW MEDIUM REMOVAL 1e
+ READ (6)/RECEIVE 08
+ READ (10) 3c
+ READ CAPACITY 25
+ READ DEFECT DATA (10) 37
+ READ LONG 3e
+ REASSIGN BLOCKS 07 DATA OUT
+ RELEASE 17
+ RESERVE 16 DATA OUT
+ REZERO UNIT/REWIND 01
+ SEARCH DATA EQUAL (10) 31 DATA OUT
+ SEARCH DATA HIGH (10) 30 DATA OUT
+ SEARCH DATA LOW (10) 32 DATA OUT
+ SEEK (6) 0b
+ SEEK (10) 2b
+ SET LIMITS (10) 33
+ START STOP UNIT 1b
+ SYNCHRONIZE CACHE 35
+ VERIFY (10) 2f
+ WRITE (6)/PRINT/SEND 0a DATA OUT
+ WRITE (10)/SEND 2a DATA OUT
+ WRITE AND VERIFY (10) 2e DATA OUT
+ WRITE LONG 3f DATA OUT
+ WRITE SAME 41 DATA OUT ?
+
+ p. 261: Commands for sequential-access devices (not previously listed)
+ ERASE 19
+ LOAD UNLOAD 1b
+ LOCATE 2b
+ READ BLOCK LIMITS 05
+ READ POSITION 34
+ READ REVERSE 0f
+ RECOVER BUFFERED DATA 14
+ SPACE 11
+ WRITE FILEMARKS 10 ?
+
+ p. 298: Commands for printer devices (not previously listed)
+ ****** NOT SUPPORTED BY THIS DRIVER, since 0b is SEEK (6) *****
+ SLEW AND PRINT 0b DATA OUT -- same as seek
+ STOP PRINT 1b
+ SYNCHRONIZE BUFFER 10
+
+ p. 315: Commands for processor devices (not previously listed)
+
+ p. 321: Commands for write-once devices (not previously listed)
+ MEDIUM SCAN 38
+ READ (12) a8
+ SEARCH DATA EQUAL (12) b1 DATA OUT
+ SEARCH DATA HIGH (12) b0 DATA OUT
+ SEARCH DATA LOW (12) b2 DATA OUT
+ SET LIMITS (12) b3
+ VERIFY (12) af
+ WRITE (12) aa DATA OUT
+ WRITE AND VERIFY (12) ae DATA OUT
+
+ p. 332: Commands for CD-ROM devices (not previously listed)
+ PAUSE/RESUME 4b
+ PLAY AUDIO (10) 45
+ PLAY AUDIO (12) a5
+ PLAY AUDIO MSF 47
+ PLAY TRACK RELATIVE (10) 49
+ PLAY TRACK RELATIVE (12) a9
+ READ HEADER 44
+ READ SUB-CHANNEL 42
+ READ TOC 43
+
+ p. 370: Commands for scanner devices (not previously listed)
+ GET DATA BUFFER STATUS 34
+ GET WINDOW 25
+ OBJECT POSITION 31
+ SCAN 1b
+ SET WINDOW 24 DATA OUT
+
+ p. 391: Commands for optical memory devices (not listed)
+ ERASE (10) 2c
+ ERASE (12) ac
+ MEDIUM SCAN 38 DATA OUT
+ READ DEFECT DATA (12) b7
+ READ GENERATION 29
+ READ UPDATED BLOCK 2d
+ UPDATE BLOCK 3d DATA OUT
+
+ p. 419: Commands for medium changer devices (not listed)
+ EXCHANGE MEDIUM 46
+ INITIALIZE ELEMENT STATUS 07
+ MOVE MEDIUM a5
+ POSITION TO ELEMENT 2b
+ READ ELEMENT STATUS b8
+ REQUEST VOL. ELEMENT ADDRESS b5
+ SEND VOLUME TAG b6 DATA OUT
+
+ p. 454: Commands for communications devices (not listed previously)
+ GET MESSAGE (6) 08
+ GET MESSAGE (10) 28
+ GET MESSAGE (12) a8
+ */
+
+ switch (current_SC->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case 0x3f: case 0x41:
+
+ case 0xb1: case 0xb0: case 0xb2:
+ case 0xaa: case 0xae:
+
+ case 0x24:
+
+ case 0x38: case 0x3d:
+
+ case 0xb6:
+
+ case 0xea: /* alternate number for WRITE LONG */
+
+ current_SC->SCp.have_data_in = -1;
+ outb( 0xd0 | PARITY_MASK, TMC_Cntl_port );
+ break;
+
+ case 0x00:
+ default:
+
+ current_SC->SCp.have_data_in = 1;
+ outb( 0x90 | PARITY_MASK, TMC_Cntl_port );
+ break;
+ }
+ }
+
+ if (current_SC->SCp.have_data_in == -1) { /* DATA OUT */
+ while ( (data_count = FIFO_Size - inw( FIFO_Data_Count_port )) > 512 ) {
+#if EVERY_ACCESS
+ printk( "DC=%d, ", data_count ) ;
+#endif
+ if (data_count > current_SC->SCp.this_residual)
+ data_count = current_SC->SCp.this_residual;
+ if (data_count > 0) {
+#if EVERY_ACCESS
+ printk( "%d OUT, ", data_count );
+#endif
+ if (data_count == 1) {
+ outb( *current_SC->SCp.ptr++, Write_FIFO_port );
+ --current_SC->SCp.this_residual;
+ } else {
+ data_count >>= 1;
+ outsw( Write_FIFO_port, current_SC->SCp.ptr, data_count );
+ current_SC->SCp.ptr += 2 * data_count;
+ current_SC->SCp.this_residual -= 2 * data_count;
+ }
+ }
+ if (!current_SC->SCp.this_residual) {
+ if (current_SC->SCp.buffers_residual) {
+ --current_SC->SCp.buffers_residual;
+ ++current_SC->SCp.buffer;
+ current_SC->SCp.ptr = current_SC->SCp.buffer->address;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ } else
+ break;
+ }
+ }
+ }
+
+ if (current_SC->SCp.have_data_in == 1) { /* DATA IN */
+ while ((data_count = inw( FIFO_Data_Count_port )) > 0) {
+#if EVERY_ACCESS
+ printk( "DC=%d, ", data_count );
+#endif
+ if (data_count > current_SC->SCp.this_residual)
+ data_count = current_SC->SCp.this_residual;
+ if (data_count) {
+#if EVERY_ACCESS
+ printk( "%d IN, ", data_count );
+#endif
+ if (data_count == 1) {
+ *current_SC->SCp.ptr++ = inb( Read_FIFO_port );
+ --current_SC->SCp.this_residual;
+ } else {
+ data_count >>= 1; /* Number of words */
+ insw( Read_FIFO_port, current_SC->SCp.ptr, data_count );
+ current_SC->SCp.ptr += 2 * data_count;
+ current_SC->SCp.this_residual -= 2 * data_count;
+ }
+ }
+ if (!current_SC->SCp.this_residual
+ && current_SC->SCp.buffers_residual) {
+ --current_SC->SCp.buffers_residual;
+ ++current_SC->SCp.buffer;
+ current_SC->SCp.ptr = current_SC->SCp.buffer->address;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ }
+ }
+ }
+
+ if (done) {
+#if EVERY_ACCESS
+ printk( " ** IN DONE %d ** ", current_SC->SCp.have_data_in );
+#endif
+
+#if ERRORS_ONLY
+ if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
+ if ((unsigned char)(*((char *)current_SC->request_buffer+2)) & 0x0f) {
+ unsigned char key;
+ unsigned char code;
+ unsigned char qualifier;
+
+ key = (unsigned char)(*((char *)current_SC->request_buffer + 2))
+ & 0x0f;
+ code = (unsigned char)(*((char *)current_SC->request_buffer + 12));
+ qualifier = (unsigned char)(*((char *)current_SC->request_buffer
+ + 13));
+
+ if (key != UNIT_ATTENTION
+ && !(key == NOT_READY
+ && code == 0x04
+ && (!qualifier || qualifier == 0x02 || qualifier == 0x01))
+ && !(key == ILLEGAL_REQUEST && (code == 0x25
+ || code == 0x24
+ || !code)))
+
+ printk( "fdomain: REQUEST SENSE "
+ "Key = %x, Code = %x, Qualifier = %x\n",
+ key, code, qualifier );
+ }
+ }
+#endif
+#if EVERY_ACCESS
+ printk( "BEFORE MY_DONE. . ." );
+#endif
+ my_done( (current_SC->SCp.Status & 0xff)
+ | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16) );
+#if EVERY_ACCESS
+ printk( "RETURNING.\n" );
+#endif
+
+ } else {
+ if (current_SC->SCp.phase & disconnect) {
+ outb( 0xd0 | FIFO_COUNT, Interrupt_Cntl_port );
+ outb( 0x00, SCSI_Cntl_port );
+ } else {
+ outb( 0x90 | FIFO_COUNT, Interrupt_Cntl_port );
+ }
+ }
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return;
+}
+
+int fdomain_16x0_queue( Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ if (in_command) {
+ panic( "fdomain: fdomain_16x0_queue() NOT REENTRANT!\n" );
+ }
+#if EVERY_ACCESS
+ printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
+ SCpnt->target,
+ *(unsigned char *)SCpnt->cmnd,
+ SCpnt->use_sg,
+ SCpnt->request_bufflen );
+#endif
+
+ fdomain_make_bus_idle();
+
+ current_SC = SCpnt; /* Save this for the done function */
+ current_SC->scsi_done = done;
+
+ /* Initialize static data */
+
+ if (current_SC->use_sg) {
+ current_SC->SCp.buffer =
+ (struct scatterlist *)current_SC->request_buffer;
+ current_SC->SCp.ptr = current_SC->SCp.buffer->address;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
+ } else {
+ current_SC->SCp.ptr = (char *)current_SC->request_buffer;
+ current_SC->SCp.this_residual = current_SC->request_bufflen;
+ current_SC->SCp.buffer = NULL;
+ current_SC->SCp.buffers_residual = 0;
+ }
+
+
+ current_SC->SCp.Status = 0;
+ current_SC->SCp.Message = 0;
+ current_SC->SCp.have_data_in = 0;
+ current_SC->SCp.sent_command = 0;
+ current_SC->SCp.phase = in_arbitration;
+
+ /* Start arbitration */
+ outb( 0x00, Interrupt_Cntl_port );
+ outb( 0x00, SCSI_Cntl_port ); /* Disable data drivers */
+ outb( adapter_mask, SCSI_Data_NoACK_port ); /* Set our id bit */
+ ++in_command;
+ outb( 0x20, Interrupt_Cntl_port );
+ outb( 0x14 | PARITY_MASK, TMC_Cntl_port ); /* Start arbitration */
+
+ return 0;
+}
+
+/* The following code, which simulates the old-style command function, was
+ taken from Tommy Thorn's aha1542.c file. This code is Copyright (C)
+ 1992 Tommy Thorn. */
+
+static volatile int internal_done_flag = 0;
+static volatile int internal_done_errcode = 0;
+
+static void internal_done( Scsi_Cmnd *SCpnt )
+{
+ internal_done_errcode = SCpnt->result;
+ ++internal_done_flag;
+}
+
+int fdomain_16x0_command( Scsi_Cmnd *SCpnt )
+{
+ fdomain_16x0_queue( SCpnt, internal_done );
+
+ while (!internal_done_flag)
+ ;
+ internal_done_flag = 0;
+ return internal_done_errcode;
+}
+
+/* End of code derived from Tommy Thorn's work. */
+
+void print_info( Scsi_Cmnd *SCpnt )
+{
+ unsigned int imr;
+ unsigned int irr;
+ unsigned int isr;
+
+ if (!SCpnt || !SCpnt->host) {
+ printk( "fdomain: cannot provide detailed information\n" );
+ }
+
+ printk( "%s\n", fdomain_16x0_info( SCpnt->host ) );
+ print_banner( SCpnt->host );
+ switch (SCpnt->SCp.phase) {
+ case in_arbitration: printk( "arbitration " ); break;
+ case in_selection: printk( "selection " ); break;
+ case in_other: printk( "other " ); break;
+ default: printk( "unknown " ); break;
+ }
+
+ printk( "(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n",
+ SCpnt->SCp.phase,
+ SCpnt->target,
+ *(unsigned char *)SCpnt->cmnd,
+ SCpnt->use_sg,
+ SCpnt->request_bufflen );
+ printk( "sent_command = %d, have_data_in = %d, timeout = %d\n",
+ SCpnt->SCp.sent_command,
+ SCpnt->SCp.have_data_in,
+ SCpnt->timeout );
+#if DEBUG_RACE
+ printk( "in_interrupt_flag = %d\n", in_interrupt_flag );
+#endif
+
+ imr = (inb( 0x0a1 ) << 8) + inb( 0x21 );
+ outb( 0x0a, 0xa0 );
+ irr = inb( 0xa0 ) << 8;
+ outb( 0x0a, 0x20 );
+ irr += inb( 0x20 );
+ outb( 0x0b, 0xa0 );
+ isr = inb( 0xa0 ) << 8;
+ outb( 0x0b, 0x20 );
+ isr += inb( 0x20 );
+
+ /* Print out interesting information */
+ printk( "IMR = 0x%04x", imr );
+ if (imr & (1 << interrupt_level))
+ printk( " (masked)" );
+ printk( ", IRR = 0x%04x, ISR = 0x%04x\n", irr, isr );
+
+ printk( "SCSI Status = 0x%02x\n", inb( SCSI_Status_port ) );
+ printk( "TMC Status = 0x%02x", inb( TMC_Status_port ) );
+ if (inb( TMC_Status_port & 1))
+ printk( " (interrupt)" );
+ printk( "\n" );
+ printk( "Interrupt Status = 0x%02x", inb( Interrupt_Status_port ) );
+ if (inb( Interrupt_Status_port ) & 0x08)
+ printk( " (enabled)" );
+ printk( "\n" );
+ if (chip == tmc18c50 || chip == tmc18c30) {
+ printk( "FIFO Status = 0x%02x\n", inb( port_base + FIFO_Status ) );
+ printk( "Int. Condition = 0x%02x\n",
+ inb( port_base + Interrupt_Cond ) );
+ }
+ printk( "Configuration 1 = 0x%02x\n", inb( port_base + Configuration1 ) );
+ if (chip == tmc18c50 || chip == tmc18c30)
+ printk( "Configuration 2 = 0x%02x\n",
+ inb( port_base + Configuration2 ) );
+}
+
+int fdomain_16x0_abort( Scsi_Cmnd *SCpnt)
+{
+ unsigned long flags;
+#if EVERY_ACCESS || ERRORS_ONLY || DEBUG_ABORT
+ printk( "fdomain: abort " );
+#endif
+
+ save_flags( flags );
+ cli();
+ if (!in_command) {
+#if EVERY_ACCESS || ERRORS_ONLY
+ printk( " (not in command)\n" );
+#endif
+ restore_flags( flags );
+ return SCSI_ABORT_NOT_RUNNING;
+ } else printk( "\n" );
+
+#if DEBUG_ABORT
+ print_info( SCpnt );
+#endif
+
+ fdomain_make_bus_idle();
+
+ current_SC->SCp.phase |= aborted;
+
+ current_SC->result = DID_ABORT << 16;
+
+ restore_flags( flags );
+
+ /* Aborts are not done well. . . */
+ my_done( DID_ABORT << 16 );
+
+ return SCSI_ABORT_SUCCESS;
+}
+
+int fdomain_16x0_reset( Scsi_Cmnd *SCpnt, unsigned int flags )
+{
+#if DEBUG_RESET
+ static int called_once = 0;
+#endif
+
+#if ERRORS_ONLY
+ if (SCpnt) printk( "fdomain: SCSI Bus Reset\n" );
+#endif
+
+#if DEBUG_RESET
+ if (called_once) print_info( current_SC );
+ called_once = 1;
+#endif
+
+ outb( 1, SCSI_Cntl_port );
+ do_pause( 2 );
+ outb( 0, SCSI_Cntl_port );
+ do_pause( 115 );
+ outb( 0, SCSI_Mode_Cntl_port );
+ outb( PARITY_MASK, TMC_Cntl_port );
+
+ /* Unless this is the very first call (i.e., SCPnt == NULL), everything
+ is probably hosed at this point. We will, however, try to keep
+ things going by informing the high-level code that we need help. */
+
+ return SCSI_RESET_WAKEUP;
+}
+
+#include "sd.h"
+#include <scsi/scsi_ioctl.h>
+
+int fdomain_16x0_biosparam( Scsi_Disk *disk, kdev_t dev, int *info_array )
+{
+ int drive;
+ unsigned char buf[512 + sizeof( int ) * 2];
+ int size = disk->capacity;
+ int *sizes = (int *)buf;
+ unsigned char *data = (unsigned char *)(sizes + 2);
+ unsigned char do_read[] = { READ_6, 0, 0, 0, 1, 0 };
+ int retcode;
+ struct drive_info {
+ unsigned short cylinders;
+ unsigned char heads;
+ unsigned char sectors;
+ } *i;
+
+ /* NOTES:
+ The RAM area starts at 0x1f00 from the bios_base address.
+
+ For BIOS Version 2.0:
+
+ The drive parameter table seems to start at 0x1f30.
+ The first byte's purpose is not known.
+ Next is the cylinder, head, and sector information.
+ The last 4 bytes appear to be the drive's size in sectors.
+ The other bytes in the drive parameter table are unknown.
+ If anyone figures them out, please send me mail, and I will
+ update these notes.
+
+ Tape drives do not get placed in this table.
+
+ There is another table at 0x1fea:
+ If the byte is 0x01, then the SCSI ID is not in use.
+ If the byte is 0x18 or 0x48, then the SCSI ID is in use,
+ although tapes don't seem to be in this table. I haven't
+ seen any other numbers (in a limited sample).
+
+ 0x1f2d is a drive count (i.e., not including tapes)
+
+ The table at 0x1fcc are I/O ports addresses for the various
+ operations. I calculate these by hand in this driver code.
+
+
+
+ For the ISA-200S version of BIOS Version 2.0:
+
+ The drive parameter table starts at 0x1f33.
+
+ WARNING: Assume that the table entry is 25 bytes long. Someone needs
+ to check this for the Quantum ISA-200S card.
+
+
+
+ For BIOS Version 3.2:
+
+ The drive parameter table starts at 0x1f70. Each entry is
+ 0x0a bytes long. Heads are one less than we need to report.
+ */
+
+ drive = MINOR(dev) / 16;
+
+ if (bios_major == 2) {
+ switch (Quantum) {
+ case 2: /* ISA_200S */
+ /* The value of 25 has never been verified.
+ It should probably be 15. */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f33 + drive * 25 );
+ break;
+ case 3: /* ISA_250MG */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f36 + drive * 15 );
+ break;
+ case 4: /* ISA_200S (another one) */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f34 + drive * 15 );
+ break;
+ default:
+ i = (struct drive_info *)( (char *)bios_base + 0x1f31 + drive * 25 );
+ break;
+ }
+ info_array[0] = i->heads;
+ info_array[1] = i->sectors;
+ info_array[2] = i->cylinders;
+ } else if (bios_major == 3
+ && bios_minor >= 0
+ && bios_minor < 4) { /* 3.0 and 3.2 BIOS */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f71 + drive * 10 );
+ info_array[0] = i->heads + 1;
+ info_array[1] = i->sectors;
+ info_array[2] = i->cylinders;
+ } else { /* 3.4 BIOS (and up?) */
+ /* This algorithm was provided by Future Domain (much thanks!). */
+
+ sizes[0] = 0; /* zero bytes out */
+ sizes[1] = 512; /* one sector in */
+ memcpy( data, do_read, sizeof( do_read ) );
+ retcode = kernel_scsi_ioctl( disk->device,
+ SCSI_IOCTL_SEND_COMMAND,
+ (void *)buf );
+ if (!retcode /* SCSI command ok */
+ && data[511] == 0xaa && data[510] == 0x55 /* Partition table valid */
+ && data[0x1c2]) { /* Partition type */
+
+ /* The partition table layout is as follows:
+
+ Start: 0x1b3h
+ Offset: 0 = partition status
+ 1 = starting head
+ 2 = starting sector and cylinder (word, encoded)
+ 4 = partition type
+ 5 = ending head
+ 6 = ending sector and cylinder (word, encoded)
+ 8 = starting absolute sector (double word)
+ c = number of sectors (double word)
+ Signature: 0x1fe = 0x55aa
+
+ So, this algorithm assumes:
+ 1) the first partition table is in use,
+ 2) the data in the first entry is correct, and
+ 3) partitions never divide cylinders
+
+ Note that (1) may be FALSE for NetBSD (and other BSD flavors),
+ as well as for Linux. Note also, that Linux doesn't pay any
+ attention to the fields that are used by this algorithm -- it
+ only uses the absolute sector data. Recent versions of Linux's
+ fdisk(1) will fill this data in correctly, and forthcoming
+ versions will check for consistency.
+
+ Checking for a non-zero partition type is not part of the
+ Future Domain algorithm, but it seemed to be a reasonable thing
+ to do, especially in the Linux and BSD worlds. */
+
+ info_array[0] = data[0x1c3] + 1; /* heads */
+ info_array[1] = data[0x1c4] & 0x3f; /* sectors */
+ } else {
+
+ /* Note that this new method guarantees that there will always be
+ less than 1024 cylinders on a platter. This is good for drives
+ up to approximately 7.85GB (where 1GB = 1024 * 1024 kB). */
+
+ if ((unsigned int)size >= 0x7e0000U) {
+ info_array[0] = 0xff; /* heads = 255 */
+ info_array[1] = 0x3f; /* sectors = 63 */
+ } else if ((unsigned int)size >= 0x200000U) {
+ info_array[0] = 0x80; /* heads = 128 */
+ info_array[1] = 0x3f; /* sectors = 63 */
+ } else {
+ info_array[0] = 0x40; /* heads = 64 */
+ info_array[1] = 0x20; /* sectors = 32 */
+ }
+ }
+ /* For both methods, compute the cylinders */
+ info_array[2] = (unsigned int)size / (info_array[0] * info_array[1] );
+ }
+
+ return 0;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = FDOMAIN_16X0;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/fdomain.h b/linux/src/drivers/scsi/fdomain.h
new file mode 100644
index 0000000..bea9998
--- /dev/null
+++ b/linux/src/drivers/scsi/fdomain.h
@@ -0,0 +1,61 @@
+/* fdomain.h -- Header for Future Domain TMC-16x0 driver
+ * Created: Sun May 3 18:47:33 1992 by faith@cs.unc.edu
+ * Revised: Thu Oct 12 13:21:35 1995 by r.faith@ieee.org
+ * Author: Rickard E. Faith, faith@cs.unc.edu
+ * Copyright 1992, 1993, 1994, 1995 Rickard E. Faith
+ *
+ * $Id: fdomain.h,v 1.1 1999/04/26 05:54:33 tb Exp $
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+#ifndef _FDOMAIN_H
+#define _FDOMAIN_H
+
+int fdomain_16x0_detect( Scsi_Host_Template * );
+int fdomain_16x0_command( Scsi_Cmnd * );
+int fdomain_16x0_abort( Scsi_Cmnd * );
+const char *fdomain_16x0_info( struct Scsi_Host * );
+int fdomain_16x0_reset( Scsi_Cmnd *, unsigned int );
+int fdomain_16x0_queue( Scsi_Cmnd *, void (*done)(Scsi_Cmnd *) );
+int fdomain_16x0_biosparam( Disk *, kdev_t, int * );
+int fdomain_16x0_proc_info( char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout );
+
+extern struct proc_dir_entry proc_scsi_fdomain;
+
+#define FDOMAIN_16X0 { NULL, \
+ NULL, \
+ NULL, \
+ fdomain_16x0_proc_info, \
+ NULL, \
+ fdomain_16x0_detect, \
+ NULL, \
+ fdomain_16x0_info, \
+ fdomain_16x0_command, \
+ fdomain_16x0_queue, \
+ fdomain_16x0_abort, \
+ fdomain_16x0_reset, \
+ NULL, \
+ fdomain_16x0_biosparam, \
+ 1, \
+ 6, \
+ 64, \
+ 1, \
+ 0, \
+ 0, \
+ DISABLE_CLUSTERING }
+#endif
diff --git a/linux/src/drivers/scsi/g_NCR5380.c b/linux/src/drivers/scsi/g_NCR5380.c
new file mode 100644
index 0000000..a141b93
--- /dev/null
+++ b/linux/src/drivers/scsi/g_NCR5380.c
@@ -0,0 +1,729 @@
+/*
+ * Generic Generic NCR5380 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
+ * K.Lentin@cs.monash.edu.au
+ *
+ * ALPHA RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * TODO : flesh out DMA support, find some one actually using this (I have
+ * a memory mapped Trantor board that works fine)
+ */
+
+/*
+ * Options :
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. With command line overrides - NCR5380=port,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is
+ * specified as an array of address, irq, dma, board tuples. Ie, for
+ * one board at 0x350, IRQ5, no dma, I could say
+ * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}}
+ *
+ * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ *
+ * 3. When included as a module, with arguments passed on the command line:
+ * ncr_irq=xx the interrupt
+ * ncr_addr=xx the port or base address (for port or memory
+ * mapped, resp.)
+ * ncr_dma=xx the DMA
+ * ncr_5380=1 to set up for a NCR5380 board
+ * ncr_53c400=1 to set up for a NCR53C400 board
+ * e.g.
+ * modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
+ * for a port mapped NCR5380 board or
+ * modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
+ * for a memory mapped NCR53C400 board with interrupts disabled.
+ *
+ * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ *
+ */
+
+#define AUTOPROBE_IRQ
+#define AUTOSENSE
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SCSI_GENERIC_NCR53C400
+#define NCR53C400_PSEUDO_DMA 1
+#define PSEUDO_DMA
+#define NCR53C400
+#define NCR5380_STATS
+#undef NCR5380_STAT_LIMIT
+#endif
+#if defined(CONFIG_SCSI_G_NCR5380_PORT) && defined(CONFIG_SCSI_G_NCR5380_MEM)
+#error You can not configure the Generic NCR 5380 SCSI Driver for memory mapped I/O and port mapped I/O at the same time (yet)
+#endif
+#if !defined(CONFIG_SCSI_G_NCR5380_PORT) && !defined(CONFIG_SCSI_G_NCR5380_MEM)
+#error You must configure the Generic NCR 5380 SCSI Driver for one of memory mapped I/O and port mapped I/O.
+#endif
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "g_NCR5380.h"
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_g_ncr5380 = {
+ PROC_SCSI_GENERIC_NCR5380, 9, "g_NCR5380",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define NCR_NOT_SET 0
+static int ncr_irq=NCR_NOT_SET;
+static int ncr_dma=NCR_NOT_SET;
+static int ncr_addr=NCR_NOT_SET;
+static int ncr_5380=NCR_NOT_SET;
+static int ncr_53c400=NCR_NOT_SET;
+
+static struct override {
+ NCR5380_implementation_fields;
+ int irq;
+ int dma;
+ int board; /* Use NCR53c400, Ricoh, etc. extensions ? */
+} overrides
+#ifdef GENERIC_NCR5380_OVERRIDE
+ [] = GENERIC_NCR5380_OVERRIDE
+#else
+ [1] = {{0,},};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+/*
+ * Function : static internal_setup(int board, char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : board - either BOARD_NCR5380 for a normal NCR5380 board,
+ * or BOARD_NCR53C400 for a NCR53C400 board. str - unused, ints -
+ * array of integer parameters with ints[0] equal to the number of ints.
+ *
+ */
+
+static void internal_setup(int board, char *str, int *ints) {
+ static int commandline_current = 0;
+ switch (board) {
+ case BOARD_NCR5380:
+ if (ints[0] != 2 && ints[0] != 3) {
+ printk("generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n");
+ return;
+ }
+ case BOARD_NCR53C400:
+ if (ints[0] != 2) {
+ printk("generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n");
+ return;
+ }
+ }
+
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type)ints[1];
+ overrides[commandline_current].irq = ints[2];
+ if (ints[0] == 3)
+ overrides[commandline_current].dma = ints[3];
+ else
+ overrides[commandline_current].dma = DMA_NONE;
+ overrides[commandline_current].board = board;
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : generic_NCR5380_setup (char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ */
+
+void generic_NCR5380_setup (char *str, int *ints) {
+ internal_setup (BOARD_NCR5380, str, ints);
+}
+
+/*
+ * Function : generic_NCR53C400_setup (char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ */
+
+void generic_NCR53C400_setup (char *str, int *ints) {
+ internal_setup (BOARD_NCR53C400, str, ints);
+}
+
+/*
+ * Function : int generic_NCR5380_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : initializes generic NCR5380 driver based on the
+ * command line / compile time port and irq definitions.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int generic_NCR5380_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0;
+ int count;
+ int flags = 0;
+ struct Scsi_Host *instance;
+
+ if (ncr_irq != NCR_NOT_SET)
+ overrides[0].irq=ncr_irq;
+ if (ncr_dma != NCR_NOT_SET)
+ overrides[0].dma=ncr_dma;
+ if (ncr_addr != NCR_NOT_SET)
+ overrides[0].NCR5380_map_name=(NCR5380_map_type)ncr_addr;
+ if (ncr_5380 != NCR_NOT_SET)
+ overrides[0].board=BOARD_NCR5380;
+ else if (ncr_53c400 != NCR_NOT_SET)
+ overrides[0].board=BOARD_NCR53C400;
+
+ tpnt->proc_dir = &proc_scsi_g_ncr5380;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ if (!(overrides[current_override].NCR5380_map_name))
+ continue;
+
+ switch (overrides[current_override].board) {
+ case BOARD_NCR5380:
+ flags = FLAG_NO_PSEUDO_DMA;
+ break;
+ case BOARD_NCR53C400:
+ flags = FLAG_NCR53C400;
+ break;
+ }
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->NCR5380_instance_name = overrides[current_override].NCR5380_map_name;
+
+ NCR5380_init(instance, flags);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, 0xffff);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, generic_NCR5380_intr, SA_INTERRUPT, "NCR5380", NULL)) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+
+ printk("scsi%d : at " STRVAL(NCR5380_map_name) " 0x%x", instance->host_no, (unsigned int)instance->NCR5380_instance_name);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, GENERIC_NCR5380_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+const char * generic_NCR5380_info (struct Scsi_Host* host) {
+ static const char string[]="Generic NCR5380/53C400 Driver";
+ return string;
+}
+
+int generic_NCR5380_release_resources(struct Scsi_Host * instance)
+{
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+ if (instance->irq != IRQ_NONE)
+ free_irq(instance->irq, NULL);
+
+ return 0;
+}
+
+#ifdef BIOSPARAM
+/*
+ * Function : int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+#endif
+
+#if NCR53C400_PSEUDO_DMA
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int i;
+ int bl;
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: About to read %d blocks for %d bytes\n", blocks, len);
+#endif
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE | CSR_TRANS_DIR);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: %d blocks left\n", blocks);
+#endif
+
+ if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ if (blocks)
+ printk("53C400r: blocks still == %d\n", blocks);
+ else
+ printk("53C400r: Exiting loop\n");
+#endif
+ break;
+ }
+
+#if 1
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk("53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Waiting for buffer, bl=%d\n", bl);
+#endif
+
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Transferring 128 bytes\n");
+#endif
+
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ dst[start+i] = NCR5380_read(C400_HOST_BUFFER);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+
+ if (blocks) {
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: EXTRA: Waiting for buffer\n");
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Transferring EXTRA 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ dst[start+i] = NCR5380_read(C400_HOST_BUFFER);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: No EXTRA required\n");
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Final values: blocks=%d start=%d\n", blocks, start);
+#endif
+
+ if (!(NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ printk("53C400r: no 53C80 gated irq after transfer");
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: Got 53C80 interrupt and tried to clear it\n");
+#endif
+
+/* DON'T DO THIS - THEY NEVER ARRIVE!
+ printk("53C400r: Waiting for 53C80 registers\n");
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+*/
+
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER))
+ printk("53C400r: no end dma signal\n");
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: end dma as expected\n");
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ return 0;
+}
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int i;
+ int bl;
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: About to write %d blocks for %d bytes\n", blocks, len);
+#endif
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk("53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+
+ if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ if (blocks)
+ printk("53C400w: exiting loop, blocks still == %d\n", blocks);
+ else
+ printk("53C400w: exiting loop\n");
+#endif
+ break;
+ }
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: %d blocks left\n", blocks);
+
+ printk("53C400w: waiting for buffer, bl=%d\n", bl);
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: transferring 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start+i]);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+ if (blocks) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: EXTRA waiting for buffer\n");
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: transferring EXTRA 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start+i]);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ else
+ printk("53C400w: No EXTRA required\n");
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: Final values: blocks=%d start=%d\n", blocks, start);
+#endif
+
+#if 0
+ printk("53C400w: waiting for registers to be available\n");
+ THEY NEVER DO!
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+ printk("53C400w: Got em\n");
+#endif
+
+ /* Let's wait for this instead - could be ugly */
+ /* All documentation says to check for this. Maybe my hardware is too
+ * fast. Waiting for it seems to work fine! KLL
+ */
+ while (!(i = NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ ;
+
+ /*
+ * I know. i is certainly != 0 here but the loop is new. See previous
+ * comment.
+ */
+ if (i) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got 53C80 gated irq (last block)\n");
+#endif
+ if (!((i=NCR5380_read(BUS_AND_STATUS_REG)) & BASR_END_DMA_TRANSFER))
+ printk("53C400w: No END OF DMA bit - WHOOPS! BASR=%0x\n",i);
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ else
+ printk("53C400w: Got END OF DMA\n");
+#endif
+ }
+ else
+ printk("53C400w: no 53C80 gated irq after transfer (last block)\n");
+
+#if 0
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) {
+ printk("53C400w: no end dma signal\n");
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: waiting for last byte...\n");
+#endif
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got last byte.\n");
+ printk("53C400w: pwrite exiting with status 0, whoopee!\n");
+#endif
+ return 0;
+}
+#endif /* PSEUDO_DMA */
+
+#include "NCR5380.c"
+
+#define PRINTP(x) len += sprintf(buffer+len, x)
+#define ANDP ,
+
+static int sprint_opcode(char* buffer, int len, int opcode) {
+ int start = len;
+ PRINTP("0x%02x " ANDP opcode);
+ return len-start;
+}
+
+static int sprint_command (char* buffer, int len, unsigned char *command) {
+ int i,s,start=len;
+ len += sprint_opcode(buffer, len, command[0]);
+ for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ PRINTP("%02x " ANDP command[i]);
+ PRINTP("\n");
+ return len-start;
+}
+
+static int sprint_Scsi_Cmnd (char* buffer, int len, Scsi_Cmnd *cmd) {
+ int start = len;
+ PRINTP("host number %d destination target %d, lun %d\n" ANDP
+ cmd->host->host_no ANDP
+ cmd->target ANDP
+ cmd->lun);
+ PRINTP(" command = ");
+ len += sprint_command (buffer, len, cmd->cmnd);
+ return len-start;
+}
+
+int generic_NCR5380_proc_info(char* buffer, char** start, off_t offset, int length, int hostno, int inout)
+{
+ int len = 0;
+ NCR5380_local_declare();
+ unsigned char status;
+ int i;
+ struct Scsi_Host *scsi_ptr;
+ Scsi_Cmnd *ptr;
+ Scsi_Device *dev;
+ struct NCR5380_hostdata *hostdata;
+
+ cli();
+
+ for (scsi_ptr = first_instance; scsi_ptr; scsi_ptr=scsi_ptr->next)
+ if (scsi_ptr->host_no == hostno)
+ break;
+ NCR5380_setup(scsi_ptr);
+ hostdata = (struct NCR5380_hostdata *)scsi_ptr->hostdata;
+
+ PRINTP("SCSI host number %d : %s\n" ANDP scsi_ptr->host_no ANDP scsi_ptr->hostt->name);
+ PRINTP("Generic NCR5380 driver version %d\n" ANDP GENERIC_NCR5380_PUBLIC_RELEASE);
+ PRINTP("NCR5380 core version %d\n" ANDP NCR5380_PUBLIC_RELEASE);
+#ifdef NCR53C400
+ PRINTP("NCR53C400 extension version %d\n" ANDP NCR53C400_PUBLIC_RELEASE);
+ PRINTP("NCR53C400 card%s detected\n" ANDP (((struct NCR5380_hostdata *)scsi_ptr->hostdata)->flags & FLAG_NCR53C400)?"":" not");
+# if NCR53C400_PSEUDO_DMA
+ PRINTP("NCR53C400 pseudo DMA used\n");
+# endif
+#else
+ PRINTP("NO NCR53C400 driver extensions\n");
+#endif
+ PRINTP("Using %s mapping at %s 0x%x, " ANDP STRVAL(NCR5380_map_config) ANDP STRVAL(NCR5380_map_name) ANDP scsi_ptr->NCR5380_instance_name);
+ if (scsi_ptr->irq == IRQ_NONE)
+ PRINTP("no interrupt\n");
+ else
+ PRINTP("on interrupt %d\n" ANDP scsi_ptr->irq);
+
+#ifdef NCR5380_STATS
+ if (hostdata->connected || hostdata->issue_queue || hostdata->disconnected_queue)
+ PRINTP("There are commands pending, transfer rates may be crud\n");
+ if (hostdata->pendingr)
+ PRINTP(" %d pending reads" ANDP hostdata->pendingr);
+ if (hostdata->pendingw)
+ PRINTP(" %d pending writes" ANDP hostdata->pendingw);
+ if (hostdata->pendingr || hostdata->pendingw)
+ PRINTP("\n");
+ for (dev = scsi_devices; dev; dev=dev->next) {
+ if (dev->host == scsi_ptr) {
+ unsigned long br = hostdata->bytes_read[dev->id];
+ unsigned long bw = hostdata->bytes_write[dev->id];
+ long tr = hostdata->time_read[dev->id] / HZ;
+ long tw = hostdata->time_write[dev->id] / HZ;
+
+ PRINTP(" T:%d %s " ANDP dev->id ANDP (dev->type < MAX_SCSI_DEVICE_CODE) ? scsi_device_types[(int)dev->type] : "Unknown");
+ for (i=0; i<8; i++)
+ if (dev->vendor[i] >= 0x20)
+ *(buffer+(len++)) = dev->vendor[i];
+ *(buffer+(len++)) = ' ';
+ for (i=0; i<16; i++)
+ if (dev->model[i] >= 0x20)
+ *(buffer+(len++)) = dev->model[i];
+ *(buffer+(len++)) = ' ';
+ for (i=0; i<4; i++)
+ if (dev->rev[i] >= 0x20)
+ *(buffer+(len++)) = dev->rev[i];
+ *(buffer+(len++)) = ' ';
+
+ PRINTP("\n%10ld kb read in %5ld secs" ANDP br/1024 ANDP tr);
+ if (tr)
+ PRINTP(" @ %5ld bps" ANDP br / tr);
+
+ PRINTP("\n%10ld kb written in %5ld secs" ANDP bw/1024 ANDP tw);
+ if (tw)
+ PRINTP(" @ %5ld bps" ANDP bw / tw);
+ PRINTP("\n");
+ }
+ }
+#endif
+
+ status = NCR5380_read(STATUS_REG);
+ if (!(status & SR_REQ))
+ PRINTP("REQ not asserted, phase unknown.\n");
+ else {
+ for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
+ (phases[i].value != (status & PHASE_MASK)); ++i)
+ ;
+ PRINTP("Phase %s\n" ANDP phases[i].name);
+ }
+
+ if (!hostdata->connected) {
+ PRINTP("No currently connected command\n");
+ } else {
+ len += sprint_Scsi_Cmnd (buffer, len, (Scsi_Cmnd *) hostdata->connected);
+ }
+
+ PRINTP("issue_queue\n");
+
+ for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ len += sprint_Scsi_Cmnd (buffer, len, ptr);
+
+ PRINTP("disconnected_queue\n");
+
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ len += sprint_Scsi_Cmnd (buffer, len, ptr);
+
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ sti();
+ return len;
+}
+
+#undef PRINTP
+#undef ANDP
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = GENERIC_NCR5380;
+
+#include <linux/module.h>
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/g_NCR5380.h b/linux/src/drivers/scsi/g_NCR5380.h
new file mode 100644
index 0000000..a30e133
--- /dev/null
+++ b/linux/src/drivers/scsi/g_NCR5380.h
@@ -0,0 +1,162 @@
+/*
+ * Generic Generic NCR5380 driver defines
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
+ * K.Lentin@cs.monash.edu.au
+ *
+ * ALPHA RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+#ifndef GENERIC_NCR5380_H
+#define GENERIC_NCR5380_H
+
+#include <linux/config.h>
+
+#define GENERIC_NCR5380_PUBLIC_RELEASE 1
+
+#ifdef NCR53C400
+#define BIOSPARAM
+#define NCR5380_BIOSPARAM generic_NCR5380_biosparam
+#else
+#define NCR5380_BIOSPARAM NULL
+#endif
+
+#ifndef ASM
+int generic_NCR5380_abort(Scsi_Cmnd *);
+int generic_NCR5380_detect(Scsi_Host_Template *);
+int generic_NCR5380_release_resources(struct Scsi_Host *);
+int generic_NCR5380_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int generic_NCR5380_reset(Scsi_Cmnd *, unsigned int);
+int notyet_generic_proc_info (char *buffer ,char **start, off_t offset,
+ int length, int hostno, int inout);
+const char* generic_NCR5380_info(struct Scsi_Host *);
+#ifdef BIOSPARAM
+int generic_NCR5380_biosparam(Disk *, kdev_t, int *);
+#endif
+
+int generic_NCR5380_proc_info(char* buffer, char** start, off_t offset, int length, int hostno, int inout);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 16
+#endif
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define GENERIC_NCR5380 {NULL, NULL, NULL, \
+ generic_NCR5380_proc_info, \
+ "Generic NCR5380/NCR53C400 Scsi Driver", \
+ generic_NCR5380_detect, generic_NCR5380_release_resources, \
+ (void *)generic_NCR5380_info, NULL, \
+ generic_NCR5380_queue_command, generic_NCR5380_abort, \
+ generic_NCR5380_reset, NULL, \
+ NCR5380_BIOSPARAM, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+
+#ifndef HOSTS_C
+
+#define __STRVAL(x) #x
+#define STRVAL(x) __STRVAL(x)
+
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+
+#define NCR5380_map_config port
+
+#define NCR5380_map_type int
+
+#define NCR5380_map_name port
+
+#define NCR5380_instance_name io_port
+
+#define NCR53C400_register_offset 0
+
+#define NCR53C400_address_adjust 8
+
+#ifdef NCR53C400
+#define NCR5380_region_size 16
+#else
+#define NCR5380_region_size 8
+#endif
+
+#define NCR5380_read(reg) (inb(NCR5380_map_name + (reg)))
+#define NCR5380_write(reg, value) (outb((value), (NCR5380_map_name + (reg))))
+
+#else
+/* therefore CONFIG_SCSI_G_NCR5380_MEM */
+
+#define NCR5380_map_config memory
+
+#define NCR5380_map_type volatile unsigned char*
+
+#define NCR5380_map_name base
+
+#define NCR5380_instance_name base
+
+#define NCR53C400_register_offset 0x108
+
+#define NCR53C400_address_adjust 0
+
+#define NCR53C400_mem_base 0x3880
+
+#define NCR53C400_host_buffer 0x3900
+
+#define NCR5380_region_size 0x3a00
+
+
+#define NCR5380_read(reg) (*(NCR5380_map_name + NCR53C400_mem_base + (reg)))
+#define NCR5380_write(reg, value) (*(NCR5380_map_name + NCR53C400_mem_base + (reg)) = value)
+
+#endif
+
+#define NCR5380_implementation_fields \
+ NCR5380_map_type NCR5380_map_name
+
+#define NCR5380_local_declare() \
+ register NCR5380_implementation_fields
+
+#define NCR5380_setup(instance) \
+ NCR5380_map_name = (NCR5380_map_type)((instance)->NCR5380_instance_name)
+
+#define NCR5380_intr generic_NCR5380_intr
+#define NCR5380_queue_command generic_NCR5380_queue_command
+#define NCR5380_abort generic_NCR5380_abort
+#define NCR5380_reset generic_NCR5380_reset
+#define NCR5380_pread generic_NCR5380_pread
+#define NCR5380_pwrite generic_NCR5380_pwrite
+#define NCR5380_proc_info notyet_generic_proc_info
+
+#define BOARD_NCR5380 0
+#define BOARD_NCR53C400 1
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* GENERIC_NCR5380_H */
+
diff --git a/linux/src/drivers/scsi/gdth.c b/linux/src/drivers/scsi/gdth.c
new file mode 100644
index 0000000..0a4bef8
--- /dev/null
+++ b/linux/src/drivers/scsi/gdth.c
@@ -0,0 +1,3598 @@
+/************************************************************************
+ * GDT ISA/EISA/PCI Disk Array Controller driver for Linux *
+ * *
+ * gdth.c *
+ * Copyright (C) 1995-98 ICP vortex Computersysteme GmbH, Achim Leubner *
+ * *
+ * <achim@vortex.de> *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published *
+ * by the Free Software Foundation; either version 2 of the License, *
+ * or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this kernel; if not, write to the Free Software *
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
+ * *
+ * Tested with Linux 1.2.13, ..., 2.1.103 *
+ * *
+ * Revision 1.16 1998/09/28 16:08:46 achim
+ * GDT_PCIMPR: DPMEM remapping, if required
+ * mdelay() added
+ *
+ * Revision 1.15 1998/06/03 14:54:06 achim
+ * gdth_delay(), gdth_flush() implemented
+ * Bugfix: gdth_release() changed
+ *
+ * Revision 1.14 1998/05/22 10:01:17 achim
+ * mj: pcibios_strerror() removed
+ * Improved SMP support (if version >= 2.1.95)
+ * gdth_halt(): halt_called flag added (if version < 2.1)
+ *
+ * Revision 1.13 1998/04/16 09:14:57 achim
+ * Reserve drives (for raw service) implemented
+ * New error handling code enabled
+ * Get controller name from board_info() IOCTL
+ * Final round of PCI device driver patches by Martin Mares
+ *
+ * Revision 1.12 1998/03/03 09:32:37 achim
+ * Fibre channel controller support added
+ *
+ * Revision 1.11 1998/01/27 16:19:14 achim
+ * SA_SHIRQ added
+ * add_timer()/del_timer() instead of GDTH_TIMER
+ * scsi_add_timer()/scsi_del_timer() instead of SCSI_TIMER
+ * New error handling included
+ *
+ * Revision 1.10 1997/10/31 12:29:57 achim
+ * Read heads/sectors from host drive
+ *
+ * Revision 1.9 1997/09/04 10:07:25 achim
+ * IO-mapping with virt_to_bus(), readb(), writeb(), ...
+ * register_reboot_notifier() to get a notify on shutdown used
+ *
+ * Revision 1.8 1997/04/02 12:14:30 achim
+ * Version 1.00 (see gdth.h), tested with kernel 2.0.29
+ *
+ * Revision 1.7 1997/03/12 13:33:37 achim
+ * gdth_reset() changed, new async. events
+ *
+ * Revision 1.6 1997/03/04 14:01:11 achim
+ * Shutdown routine gdth_halt() implemented
+ *
+ * Revision 1.5 1997/02/21 09:08:36 achim
+ * New controller included (RP, RP1, RP2 series)
+ * IOCTL interface implemented
+ *
+ * Revision 1.4 1996/07/05 12:48:55 achim
+ * Function gdth_bios_param() implemented
+ * New constant GDTH_MAXC_P_L inserted
+ * GDT_WRITE_THR, GDT_EXT_INFO implemented
+ * Function gdth_reset() changed
+ *
+ * Revision 1.3 1996/05/10 09:04:41 achim
+ * Small changes for Linux 1.2.13
+ *
+ * Revision 1.2 1996/05/09 12:45:27 achim
+ * Loadable module support implemented
+ * /proc support corrections made
+ *
+ * Revision 1.1 1996/04/11 07:35:57 achim
+ * Initial revision
+ *
+ *
+ * $Id: gdth.c,v 1.1.4.1 2007/03/27 21:04:30 tschwinge Exp $
+ ************************************************************************/
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/proc_fs.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#if LINUX_VERSION_CODE >= 0x020100
+#include <linux/reboot.h>
+#else
+#include <linux/bios32.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#if LINUX_VERSION_CODE >= 0x02015F
+#include <asm/spinlock.h>
+#endif
+
+#if LINUX_VERSION_CODE >= 0x010300
+#include <linux/blk.h>
+#else
+#include "../block/blk.h"
+#endif
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "gdth.h"
+
+/****************************************************************************/
+
+/* LILO params: gdth=<IRQ>
+ *
+ * Where: <IRQ> is any of the valid IRQs for EISA controllers (10,11,12,14)
+ * Sets the IRQ of the GDT3000/3020 EISA controller to this value,
+ * if the IRQ can not automat. detect (controller BIOS disabled)
+ * See gdth_init_eisa()
+ *
+ * You can use the command line gdth=0 to disable the driver
+ */
+static unchar irqs[MAXHA] = {0xff};
+static unchar disable_gdth_scan = FALSE;
+
+/* Reserve drives for raw service: Fill the following structure with the
+ * appropriate values: Controller number, Channel, Target ID
+ */
+static gdth_reserve_str reserve_list[] = {
+ /* { 0, 1, 4 }, Example: Controller 0, Channel B, ID 4 */
+ { 0xff, 0xff, 0xff } /* end of list */
+};
+
+/****************************************************************************/
+
+#if LINUX_VERSION_CODE >= 0x02015F
+static void gdth_interrupt(int irq,void *dev_id,struct pt_regs *regs);
+static void do_gdth_interrupt(int irq,void *dev_id,struct pt_regs *regs);
+#elif LINUX_VERSION_CODE >= 0x010346
+static void gdth_interrupt(int irq,void *dev_id,struct pt_regs *regs);
+#else
+static void gdth_interrupt(int irq,struct pt_regs *regs);
+#endif
+static int gdth_sync_event(int hanum,int service,unchar index,Scsi_Cmnd *scp);
+static int gdth_async_event(int hanum,int service);
+
+static void gdth_putq(int hanum,Scsi_Cmnd *scp,unchar priority);
+static void gdth_next(int hanum);
+static int gdth_fill_raw_cmd(int hanum,Scsi_Cmnd *scp,unchar b);
+static int gdth_special_cmd(int hanum,Scsi_Cmnd *scp,unchar b);
+static gdth_evt_str *gdth_store_event(ushort source, ushort idx,
+ gdth_evt_data *evt);
+static int gdth_read_event(int handle, gdth_evt_str *estr);
+static void gdth_readapp_event(unchar application, gdth_evt_str *estr);
+static void gdth_clear_events(void);
+
+static void gdth_copy_internal_data(Scsi_Cmnd *scp,char *buffer,ushort count);
+static int gdth_internal_cache_cmd(int hanum,Scsi_Cmnd *scp,
+ unchar b,ulong *flags);
+static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive);
+
+static int gdth_search_eisa(ushort eisa_adr);
+static int gdth_search_isa(ulong bios_adr);
+static int gdth_search_pci(ushort device_id,ushort index,gdth_pci_str *pcistr);
+static int gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha);
+static int gdth_init_isa(ulong bios_adr,gdth_ha_str *ha);
+static int gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha);
+
+static void gdth_enable_int(int hanum);
+static int gdth_get_status(unchar *pIStatus,int irq);
+static int gdth_test_busy(int hanum);
+static int gdth_get_cmd_index(int hanum);
+static void gdth_release_event(int hanum);
+static int gdth_wait(int hanum,int index,ulong time);
+static int gdth_internal_cmd(int hanum,unchar service,ushort opcode,ulong p1,
+ ulong p2,ulong p3);
+static int gdth_search_drives(int hanum);
+
+static void *gdth_mmap(ulong paddr, ulong size);
+static void gdth_munmap(void *addr);
+
+static const char *gdth_ctr_name(int hanum);
+
+static void gdth_flush(int hanum);
+#if LINUX_VERSION_CODE >= 0x020100
+static int gdth_halt(struct notifier_block *nb, ulong event, void *buf);
+#else
+static int halt_called = FALSE;
+void gdth_halt(void);
+#endif
+
+#ifdef DEBUG_GDTH
+static unchar DebugState = DEBUG_GDTH;
+extern int sys_syslog(int,char*,int);
+#define LOGEN sys_syslog(7,NULL,0)
+
+#ifdef __SERIAL__
+#define MAX_SERBUF 160
+static void ser_init(void);
+static void ser_puts(char *str);
+static void ser_putc(char c);
+static int ser_printk(const char *fmt, ...);
+static char strbuf[MAX_SERBUF+1];
+#ifdef __COM2__
+#define COM_BASE 0x2f8
+#else
+#define COM_BASE 0x3f8
+#endif
+static void ser_init()
+{
+ unsigned port=COM_BASE;
+
+ outb(0x80,port+3);
+ outb(0,port+1);
+ /* 19200 Baud, if 9600: outb(12,port) */
+ outb(6, port);
+ outb(3,port+3);
+ outb(0,port+1);
+ /*
+ ser_putc('I');
+ ser_putc(' ');
+ */
+}
+
+static void ser_puts(char *str)
+{
+ char *ptr;
+
+ ser_init();
+ for (ptr=str;*ptr;++ptr)
+ ser_putc(*ptr);
+}
+
+static void ser_putc(char c)
+{
+ unsigned port=COM_BASE;
+
+ while ((inb(port+5) & 0x20)==0);
+ outb(c,port);
+ if (c==0x0a)
+ {
+ while ((inb(port+5) & 0x20)==0);
+ outb(0x0d,port);
+ }
+}
+
+static int ser_printk(const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args,fmt);
+ i = vsprintf(strbuf,fmt,args);
+ ser_puts(strbuf);
+ va_end(args);
+ return i;
+}
+
+#define TRACE(a) {if (DebugState==1) {ser_printk a;}}
+#define TRACE2(a) {if (DebugState==1 || DebugState==2) {ser_printk a;}}
+#define TRACE3(a) {if (DebugState!=0) {ser_printk a;}}
+
+#else /* !__SERIAL__ */
+#define TRACE(a) {if (DebugState==1) {LOGEN;printk a;}}
+#define TRACE2(a) {if (DebugState==1 || DebugState==2) {LOGEN;printk a;}}
+#define TRACE3(a) {if (DebugState!=0) {LOGEN;printk a;}}
+#endif
+
+#else /* !DEBUG */
+#define TRACE(a)
+#define TRACE2(a)
+#define TRACE3(a)
+#endif
+
+#ifdef GDTH_STATISTICS
+static ulong max_rq=0, max_index=0, max_sg=0;
+static ulong act_ints=0, act_ios=0, act_stats=0, act_rq=0;
+static struct timer_list gdth_timer;
+#endif
+
+#define PTR2USHORT(a) (ushort)(ulong)(a)
+#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
+#define INDEX_OK(i,t) ((i)<sizeof(t)/sizeof((t)[0]))
+
+#define NUMDATA(a) ( (gdth_num_str *)((a)->hostdata))
+#define HADATA(a) (&((gdth_ext_str *)((a)->hostdata))->haext)
+#define CMDDATA(a) (&((gdth_ext_str *)((a)->hostdata))->cmdext)
+#define DMADATA(a) (&((gdth_ext_str *)((a)->hostdata))->dmaext)
+
+
+#if LINUX_VERSION_CODE < 0x010300
+static void *gdth_mmap(ulong paddr, ulong size)
+{
+ if (paddr >= high_memory)
+ return NULL;
+ else
+ return (void *)paddr;
+}
+static void gdth_munmap(void *addr)
+{
+}
+inline ulong virt_to_phys(volatile void *addr)
+{
+ return (ulong)addr;
+}
+inline void *phys_to_virt(ulong addr)
+{
+ return (void *)addr;
+}
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+#define readb(addr) (*(volatile unchar *)(addr))
+#define readw(addr) (*(volatile ushort *)(addr))
+#define readl(addr) (*(volatile ulong *)(addr))
+#define writeb(b,addr) (*(volatile unchar *)(addr) = (b))
+#define writew(b,addr) (*(volatile ushort *)(addr) = (b))
+#define writel(b,addr) (*(volatile ulong *)(addr) = (b))
+#define memset_io(a,b,c) memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+
+#elif LINUX_VERSION_CODE < 0x020100
+static int remapped = FALSE;
+static void *gdth_mmap(ulong paddr, ulong size)
+{
+ if ( paddr >= high_memory) {
+ remapped = TRUE;
+ return vremap(paddr, size);
+ } else {
+ return (void *)paddr;
+ }
+}
+static void gdth_munmap(void *addr)
+{
+ if (remapped)
+ vfree(addr);
+ remapped = FALSE;
+}
+#else
+static void *gdth_mmap(ulong paddr, ulong size)
+{
+ return ioremap(paddr, size);
+}
+static void gdth_munmap(void *addr)
+{
+ return iounmap(addr);
+}
+#endif
+
+
+static unchar gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */
+static unchar gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */
+static unchar gdth_polling; /* polling if TRUE */
+static unchar gdth_from_wait = FALSE; /* gdth_wait() */
+static int wait_index,wait_hanum; /* gdth_wait() */
+static int gdth_ctr_count = 0; /* controller count */
+static int gdth_ctr_vcount = 0; /* virt. ctr. count */
+static int gdth_ctr_released = 0; /* gdth_release() */
+static struct Scsi_Host *gdth_ctr_tab[MAXHA]; /* controller table */
+static struct Scsi_Host *gdth_ctr_vtab[MAXHA*MAXBUS]; /* virt. ctr. table */
+static unchar gdth_write_through = FALSE; /* write through */
+static char *gdth_ioctl_tab[4][MAXHA]; /* ioctl buffer */
+static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
+static int elastidx;
+static int eoldidx;
+
+static struct {
+ Scsi_Cmnd *cmnd; /* pending request */
+ ushort service; /* service */
+} gdth_cmd_tab[GDTH_MAXCMDS][MAXHA]; /* table of pend. requests */
+
+#define DIN 1 /* IN data direction */
+#define DOU 2 /* OUT data direction */
+#define DNO DIN /* no data transfer */
+#define DUN DIN /* unknown data direction */
+static unchar gdth_direction_tab[0x100] = {
+ DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
+ DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
+ DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DIN,DUN,DUN,DIN,DIN,DIN,
+ DIN,DIN,DIN,DNO,DIN,DNO,DNO,DIN,DIN,DIN,DIN,DIN,DIN,DIN,DIN,DIN,
+ DIN,DIN,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DIN,DIN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DIN,DUN,DNO,DUN,DIN,DIN,
+ DIN,DIN,DIN,DNO,DUN,DIN,DIN,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
+};
+
+/* __initfunc, __initdata macros */
+#if LINUX_VERSION_CODE >= 0x020126
+#include <linux/init.h>
+#else
+#define __initfunc(A) A
+#define __initdata
+#define __init
+#endif
+
+/* /proc support */
+#if LINUX_VERSION_CODE >= 0x010300
+#include <linux/stat.h>
+struct proc_dir_entry proc_scsi_gdth = {
+ PROC_SCSI_GDTH, 4, "gdth",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+#include "gdth_proc.h"
+#include "gdth_proc.c"
+#endif
+
+#if LINUX_VERSION_CODE >= 0x020100
+/* notifier block to get a notify on system shutdown/halt/reboot */
+static struct notifier_block gdth_notifier = {
+ gdth_halt, NULL, 0
+};
+#endif
+
+static void gdth_delay(int milliseconds)
+{
+ if (milliseconds == 0) {
+ udelay(1);
+ } else {
+#if LINUX_VERSION_CODE >= 0x020168
+ mdelay(milliseconds);
+#else
+ int i;
+ for (i = 0; i < milliseconds; ++i)
+ udelay(1000);
+#endif
+ }
+}
+
+/* controller search and initialization functions */
+
+__initfunc (static int gdth_search_eisa(ushort eisa_adr))
+{
+ ulong id;
+
+ TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr));
+ id = inl(eisa_adr+ID0REG);
+ if (id == GDT3A_ID || id == GDT3B_ID) { /* GDT3000A or GDT3000B */
+ if ((inb(eisa_adr+EISAREG) & 8) == 0)
+ return 0; /* not EISA configured */
+ return 1;
+ }
+ if (id == GDT3_ID) /* GDT3000 */
+ return 1;
+
+ return 0;
+}
+
+
+__initfunc (static int gdth_search_isa(ulong bios_adr))
+{
+ void *addr;
+ ulong id;
+
+ TRACE(("gdth_search_isa() bios adr. %lx\n",bios_adr));
+ if ((addr = gdth_mmap(bios_adr+BIOS_ID_OFFS, sizeof(ulong))) != NULL) {
+ id = readl(addr);
+ gdth_munmap(addr);
+ if (id == GDT2_ID) /* GDT2000 */
+ return 1;
+ }
+ return 0;
+}
+
+
+__initfunc (static int gdth_search_pci(ushort device_id,ushort index,gdth_pci_str *pcistr))
+{
+ int error;
+ ulong base0,base1,base2;
+
+ TRACE(("gdth_search_pci() device_id %d, index %d\n",
+ device_id,index));
+
+#if LINUX_VERSION_CODE >= 0x20155
+ if (!pci_present())
+ return 0;
+#else
+ if (!pcibios_present())
+ return 0;
+#endif
+
+ if (pcibios_find_device(PCI_VENDOR_ID_VORTEX,device_id,index,
+ &pcistr->bus,&pcistr->device_fn))
+ return 0;
+
+ /* GDT PCI controller found, now read resources from config space */
+#if LINUX_VERSION_CODE >= 0x20155
+ {
+ struct pci_dev *pdev = pci_find_slot(pcistr->bus, pcistr->device_fn);
+ base0 = pdev->base_address[0];
+ base1 = pdev->base_address[1];
+ base2 = pdev->base_address[2];
+ if ((error = pcibios_read_config_dword(pcistr->bus,pcistr->device_fn,
+ PCI_ROM_ADDRESS,
+ (int *) &pcistr->bios))) {
+ printk("GDT-PCI: error %d reading configuration space", error);
+ return -1;
+ }
+ pcistr->irq = pdev->irq;
+ }
+#else
+#if LINUX_VERSION_CODE >= 0x010300
+#define GDTH_BASEP (int *)
+#else
+#define GDTH_BASEP
+#endif
+ if ((error = pcibios_read_config_dword(pcistr->bus,pcistr->device_fn,
+ PCI_BASE_ADDRESS_0,
+ GDTH_BASEP&base0)) ||
+ (error = pcibios_read_config_dword(pcistr->bus,pcistr->device_fn,
+ PCI_BASE_ADDRESS_1,
+ GDTH_BASEP&base1)) ||
+ (error = pcibios_read_config_dword(pcistr->bus,pcistr->device_fn,
+ PCI_BASE_ADDRESS_2,
+ GDTH_BASEP&base2)) ||
+ (error = pcibios_read_config_dword(pcistr->bus,pcistr->device_fn,
+ PCI_ROM_ADDRESS,
+ GDTH_BASEP&pcistr->bios)) ||
+ (error = pcibios_read_config_byte(pcistr->bus,pcistr->device_fn,
+ PCI_INTERRUPT_LINE,&pcistr->irq))) {
+ printk("GDT-PCI: error %d reading configuration space", error);
+ return -1;
+ }
+#endif
+
+ pcistr->device_id = device_id;
+ if (device_id <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000 or GDT6000B */
+ device_id >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */
+ if ((base0 & PCI_BASE_ADDRESS_SPACE)!=PCI_BASE_ADDRESS_SPACE_MEMORY)
+ return -1;
+ pcistr->dpmem = base0 & PCI_BASE_ADDRESS_MEM_MASK;
+ } else { /* GDT6110, GDT6120, .. */
+ if ((base0 & PCI_BASE_ADDRESS_SPACE)!=PCI_BASE_ADDRESS_SPACE_MEMORY ||
+ (base2 & PCI_BASE_ADDRESS_SPACE)!=PCI_BASE_ADDRESS_SPACE_MEMORY ||
+ (base1 & PCI_BASE_ADDRESS_SPACE)!=PCI_BASE_ADDRESS_SPACE_IO)
+ return -1;
+ pcistr->dpmem = base2 & PCI_BASE_ADDRESS_MEM_MASK;
+ pcistr->io_mm = base0 & PCI_BASE_ADDRESS_MEM_MASK;
+ pcistr->io = base1 & PCI_BASE_ADDRESS_IO_MASK;
+ }
+ return 1;
+}
+
+
+__initfunc (static int gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha))
+{
+ ulong retries,id;
+ unchar prot_ver,eisacf,i,irq_found;
+
+ TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr));
+
+ /* disable board interrupts, deinitialize services */
+ outb(0xff,eisa_adr+EDOORREG);
+ outb(0x00,eisa_adr+EDENABREG);
+ outb(0x00,eisa_adr+EINTENABREG);
+
+ outb(0xff,eisa_adr+LDOORREG);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (inb(eisa_adr+EDOORREG) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-EISA: Initialization error (DEINIT failed)\n");
+ return 0;
+ }
+ gdth_delay(1);
+ TRACE2(("wait for DEINIT: retries=%ld\n",retries));
+ }
+ prot_ver = inb(eisa_adr+MAILBOXREG);
+ outb(0xff,eisa_adr+EDOORREG);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-EISA: Illegal protocol version\n");
+ return 0;
+ }
+ ha->bmic = eisa_adr;
+ ha->brd_phys = (ulong)eisa_adr >> 12;
+
+ outl(0,eisa_adr+MAILBOXREG);
+ outl(0,eisa_adr+MAILBOXREG+4);
+ outl(0,eisa_adr+MAILBOXREG+8);
+ outl(0,eisa_adr+MAILBOXREG+12);
+
+ /* detect IRQ */
+ if ((id = inl(eisa_adr+ID0REG)) == GDT3_ID) {
+ ha->type = GDT_EISA;
+ ha->stype = id;
+ outl(1,eisa_adr+MAILBOXREG+8);
+ outb(0xfe,eisa_adr+LDOORREG);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (inb(eisa_adr+EDOORREG) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-EISA: Initialization error (get IRQ failed)\n");
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ ha->irq = inb(eisa_adr+MAILBOXREG);
+ outb(0xff,eisa_adr+EDOORREG);
+ TRACE2(("GDT3000/3020: IRQ=%d\n",ha->irq));
+ /* check the result */
+ if (ha->irq == 0) {
+ TRACE2(("Unknown IRQ, check IRQ table from cmd line !\n"));
+ for (i=0,irq_found=FALSE; i<MAXHA && irqs[i]!=0xff; ++i) {
+ if (irqs[i]!=0) {
+ irq_found=TRUE;
+ break;
+ }
+ }
+ if (irq_found) {
+ ha->irq = irqs[i];
+ irqs[i] = 0;
+ printk("GDT-EISA: Can not detect controller IRQ,\n");
+ printk("Use IRQ setting from command line (IRQ = %d)\n",
+ ha->irq);
+ } else {
+ printk("GDT-EISA: Initialization error (unknown IRQ), Enable\n");
+ printk("the controller BIOS or use command line parameters\n");
+ return 0;
+ }
+ }
+ } else {
+ eisacf = inb(eisa_adr+EISAREG) & 7;
+ if (eisacf > 4) /* level triggered */
+ eisacf -= 4;
+ ha->irq = gdth_irq_tab[eisacf];
+ ha->type = GDT_EISA;
+ ha->stype= id;
+ }
+ return 1;
+}
+
+
+__initfunc (static int gdth_init_isa(ulong bios_adr,gdth_ha_str *ha))
+{
+ register gdt2_dpram_str *dp2_ptr;
+ int i;
+ unchar irq_drq,prot_ver;
+ ulong retries;
+
+ TRACE(("gdth_init_isa() bios adr. %lx\n",bios_adr));
+
+ ha->brd = gdth_mmap(bios_adr, sizeof(gdt2_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-ISA: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ dp2_ptr = (gdt2_dpram_str *)ha->brd;
+ writeb(1, &dp2_ptr->io.memlock); /* switch off write protection */
+ /* reset interface area */
+ memset_io((char *)&dp2_ptr->u,0,sizeof(dp2_ptr->u));
+ if (readl(&dp2_ptr->u) != 0) {
+ printk("GDT-PCI: Initialization error (DPMEM write error)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ /* disable board interrupts, read DRQ and IRQ */
+ writeb(0xff, &dp2_ptr->io.irqdel);
+ writeb(0x00, &dp2_ptr->io.irqen);
+ writeb(0x00, &dp2_ptr->u.ic.S_Status);
+ writeb(0x00, &dp2_ptr->u.ic.Cmd_Index);
+
+ irq_drq = readb(&dp2_ptr->io.rq);
+ for (i=0; i<3; ++i) {
+ if ((irq_drq & 1)==0)
+ break;
+ irq_drq >>= 1;
+ }
+ ha->drq = gdth_drq_tab[i];
+
+ irq_drq = readb(&dp2_ptr->io.rq) >> 3;
+ for (i=1; i<5; ++i) {
+ if ((irq_drq & 1)==0)
+ break;
+ irq_drq >>= 1;
+ }
+ ha->irq = gdth_irq_tab[i];
+
+ /* deinitialize services */
+ writel(bios_adr, &dp2_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp2_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp2_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp2_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-ISA: Initialization error (DEINIT failed)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (unchar)readl(&dp2_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp2_ptr->u.ic.Status);
+ writeb(0xff, &dp2_ptr->io.irqdel);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-ISA: Illegal protocol version\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_ISA;
+ ha->ic_all_size = sizeof(dp2_ptr->u);
+ ha->stype= GDT2_ID;
+ ha->brd_phys = bios_adr >> 4;
+
+ /* special request to controller BIOS */
+ writel(0x00, &dp2_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp2_ptr->u.ic.S_Info[1]);
+ writel(0x01, &dp2_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp2_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp2_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp2_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp2_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-ISA: Initialization error\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp2_ptr->u.ic.Status);
+ writeb(0xff, &dp2_ptr->io.irqdel);
+ return 1;
+}
+
+
+__initfunc (static int gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha))
+{
+ register gdt6_dpram_str *dp6_ptr;
+ register gdt6c_dpram_str *dp6c_ptr;
+ register gdt6m_dpram_str *dp6m_ptr;
+ ulong retries;
+ unchar prot_ver;
+ int i, found = FALSE;
+
+ TRACE(("gdth_init_pci()\n"));
+
+ ha->brd_phys = (pcistr->bus << 8) | (pcistr->device_fn & 0xf8);
+ ha->stype = (ulong)pcistr->device_id;
+ ha->irq = pcistr->irq;
+
+ if (ha->stype <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000 or GDT6000B */
+ TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
+ ha->brd = gdth_mmap(pcistr->dpmem, sizeof(gdt6_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ dp6_ptr = (gdt6_dpram_str *)ha->brd;
+ /* reset interface area */
+ memset_io((char *)&dp6_ptr->u,0,sizeof(dp6_ptr->u));
+ if (readl(&dp6_ptr->u) != 0) {
+ printk("GDT-PCI: Initialization error (DPMEM write error)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ /* disable board interrupts, deinit services */
+ writeb(0xff, &dp6_ptr->io.irqdel);
+ writeb(0x00, &dp6_ptr->io.irqen);;
+ writeb(0x00, &dp6_ptr->u.ic.S_Status);
+ writeb(0x00, &dp6_ptr->u.ic.Cmd_Index);
+
+ writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp6_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error (DEINIT failed)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (unchar)readl(&dp6_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp6_ptr->u.ic.S_Status);
+ writeb(0xff, &dp6_ptr->io.irqdel);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-PCI: Illegal protocol version\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_PCI;
+ ha->ic_all_size = sizeof(dp6_ptr->u);
+
+ /* special command to controller BIOS */
+ writel(0x00, &dp6_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp6_ptr->u.ic.S_Info[1]);
+ writel(0x01, &dp6_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp6_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp6_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp6_ptr->u.ic.S_Status);
+ writeb(0xff, &dp6_ptr->io.irqdel);
+
+ } else if (ha->stype <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, GDT6120, .. */
+ ha->plx = (gdt6c_plx_regs *)pcistr->io;
+ TRACE2(("init_pci_new() dpmem %lx io %lx irq %d\n",
+ pcistr->dpmem,(ulong)ha->plx,ha->irq));
+ ha->brd = gdth_mmap(pcistr->dpmem, sizeof(gdt6c_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ dp6c_ptr = (gdt6c_dpram_str *)ha->brd;
+ /* reset interface area */
+ memset_io((char *)&dp6c_ptr->u,0,sizeof(dp6c_ptr->u));
+ if (readl(&dp6c_ptr->u) != 0) {
+ printk("GDT-PCI: Initialization error (DPMEM write error)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ /* disable board interrupts, deinit services */
+ outb(0x00,PTR2USHORT(&ha->plx->control1));
+ outb(0xff,PTR2USHORT(&ha->plx->edoor_reg));
+
+ writeb(0x00, &dp6c_ptr->u.ic.S_Status);
+ writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index);
+
+ writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx);
+
+ outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
+
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error (DEINIT failed)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (unchar)readl(&dp6c_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp6c_ptr->u.ic.Status);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-PCI: Illegal protocol version\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_PCINEW;
+ ha->ic_all_size = sizeof(dp6c_ptr->u);
+
+ /* special command to controller BIOS */
+ writel(0x00, &dp6c_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp6c_ptr->u.ic.S_Info[1]);
+ writel(0x01, &dp6c_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp6c_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx);
+
+ outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
+
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp6c_ptr->u.ic.S_Status);
+
+ } else { /* MPR */
+ TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
+ ha->brd = gdth_mmap(pcistr->dpmem, sizeof(gdt6m_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+
+ /* check and reset interface area */
+ dp6m_ptr = (gdt6m_dpram_str *)ha->brd;
+ writel(DPMEM_MAGIC, &dp6m_ptr->u);
+ if (readl(&dp6m_ptr->u) != DPMEM_MAGIC) {
+ printk("GDT-PCI: Cannot access DPMEM at 0x%x (shadowed?)\n",
+ (int)ha->brd);
+ found = FALSE;
+ for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
+ pcibios_write_config_dword( pcistr->bus, pcistr->device_fn,
+ PCI_BASE_ADDRESS_0, i );
+ gdth_munmap( ha->brd );
+ ha->brd = gdth_mmap(i, sizeof(gdt6m_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ dp6m_ptr = (gdt6m_dpram_str *)ha->brd;
+ writel(DPMEM_MAGIC, &dp6m_ptr->u);
+ if (readl(&dp6m_ptr->u) == DPMEM_MAGIC) {
+ printk("GDT-PCI: Use free address at 0x%x\n",
+ (int)ha->brd);
+ found = TRUE;
+ break;
+ }
+ }
+ if (!found) {
+ printk("GDT-PCI: No free address found!\n");
+ gdth_munmap( ha->brd );
+ return 0;
+ }
+ }
+ memset_io((char *)&dp6m_ptr->u,0,sizeof(dp6m_ptr->u));
+
+ /* disable board interrupts, deinit services */
+ writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) | 4,
+ &dp6m_ptr->i960r.edoor_en_reg);
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(0x00, &dp6m_ptr->u.ic.S_Status);
+ writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index);
+
+ writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx);
+ writeb(1, &dp6m_ptr->i960r.ldoor_reg);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error (DEINIT failed)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (unchar)readl(&dp6m_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp6m_ptr->u.ic.S_Status);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-PCI: Illegal protocol version\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_PCIMPR;
+ ha->ic_all_size = sizeof(dp6m_ptr->u);
+
+ /* special command to controller BIOS */
+ writel(0x00, &dp6m_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp6m_ptr->u.ic.S_Info[1]);
+ writel(0x01, &dp6m_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp6m_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx);
+ writeb(1, &dp6m_ptr->i960r.ldoor_reg);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp6m_ptr->u.ic.S_Status);
+ }
+
+ return 1;
+}
+
+
+/* controller protocol functions */
+
+__initfunc (static void gdth_enable_int(int hanum))
+{
+ gdth_ha_str *ha;
+ ulong flags;
+ gdt2_dpram_str *dp2_ptr;
+ gdt6_dpram_str *dp6_ptr;
+ gdt6m_dpram_str *dp6m_ptr;
+
+ TRACE(("gdth_enable_int() hanum %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ save_flags(flags);
+ cli();
+
+ if (ha->type == GDT_EISA) {
+ outb(0xff, ha->bmic + EDOORREG);
+ outb(0xff, ha->bmic + EDENABREG);
+ outb(0x01, ha->bmic + EINTENABREG);
+ } else if (ha->type == GDT_ISA) {
+ dp2_ptr = (gdt2_dpram_str *)ha->brd;
+ writeb(1, &dp2_ptr->io.irqdel);
+ writeb(0, &dp2_ptr->u.ic.Cmd_Index);
+ writeb(1, &dp2_ptr->io.irqen);
+ } else if (ha->type == GDT_PCI) {
+ dp6_ptr = (gdt6_dpram_str *)ha->brd;
+ writeb(1, &dp6_ptr->io.irqdel);
+ writeb(0, &dp6_ptr->u.ic.Cmd_Index);
+ writeb(1, &dp6_ptr->io.irqen);
+ } else if (ha->type == GDT_PCINEW) {
+ outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
+ outb(0x03, PTR2USHORT(&ha->plx->control1));
+ } else if (ha->type == GDT_PCIMPR) {
+ dp6m_ptr = (gdt6m_dpram_str *)ha->brd;
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4,
+ &dp6m_ptr->i960r.edoor_en_reg);
+ }
+ restore_flags(flags);
+}
+
+
+static int gdth_get_status(unchar *pIStatus,int irq)
+{
+ register gdth_ha_str *ha;
+ int i;
+
+ TRACE(("gdth_get_status() irq %d ctr_count %d\n",
+ irq,gdth_ctr_count));
+
+ *pIStatus = 0;
+ for (i=0; i<gdth_ctr_count; ++i) {
+ ha = HADATA(gdth_ctr_tab[i]);
+ if (ha->irq != (unchar)irq) /* check IRQ */
+ continue;
+ if (ha->type == GDT_EISA)
+ *pIStatus = inb((ushort)ha->bmic + EDOORREG);
+ else if (ha->type == GDT_ISA)
+ *pIStatus = readb(&((gdt2_dpram_str *)ha->brd)->u.ic.Cmd_Index);
+ else if (ha->type == GDT_PCI)
+ *pIStatus = readb(&((gdt6_dpram_str *)ha->brd)->u.ic.Cmd_Index);
+ else if (ha->type == GDT_PCINEW)
+ *pIStatus = inb(PTR2USHORT(&ha->plx->edoor_reg));
+ else if (ha->type == GDT_PCIMPR)
+ *pIStatus = readb(&((gdt6m_dpram_str *)ha->brd)->i960r.edoor_reg);
+
+ if (*pIStatus)
+ return i; /* board found */
+ }
+ return -1;
+}
+
+
+static int gdth_test_busy(int hanum)
+{
+ register gdth_ha_str *ha;
+ register int gdtsema0 = 0;
+
+ TRACE(("gdth_test_busy() hanum %d\n",hanum));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ if (ha->type == GDT_EISA)
+ gdtsema0 = (int)inb(ha->bmic + SEMA0REG);
+ else if (ha->type == GDT_ISA)
+ gdtsema0 = (int)readb(&((gdt2_dpram_str *)ha->brd)->u.ic.Sema0);
+ else if (ha->type == GDT_PCI)
+ gdtsema0 = (int)readb(&((gdt6_dpram_str *)ha->brd)->u.ic.Sema0);
+ else if (ha->type == GDT_PCINEW)
+ gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
+ else if (ha->type == GDT_PCIMPR)
+ gdtsema0 = (int)readb(&((gdt6m_dpram_str *)ha->brd)->i960r.sema0_reg);
+
+ return (gdtsema0 & 1);
+}
+
+
+static int gdth_get_cmd_index(int hanum)
+{
+ register gdth_ha_str *ha;
+ int i;
+
+ TRACE(("gdth_get_cmd_index() hanum %d\n",hanum));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ for (i=0; i<GDTH_MAXCMDS; ++i) {
+ if (gdth_cmd_tab[i][hanum].cmnd == UNUSED_CMND) {
+ gdth_cmd_tab[i][hanum].cmnd = ha->pccb->RequestBuffer;
+ gdth_cmd_tab[i][hanum].service = ha->pccb->Service;
+ ha->pccb->CommandIndex = (ulong)i+2;
+ return (i+2);
+ }
+ }
+ return 0;
+}
+
+
+static void gdth_set_sema0(int hanum)
+{
+ register gdth_ha_str *ha;
+
+ TRACE(("gdth_set_sema0() hanum %d\n",hanum));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ if (ha->type == GDT_EISA)
+ outb(1, ha->bmic + SEMA0REG);
+ else if (ha->type == GDT_ISA)
+ writeb(1, &((gdt2_dpram_str *)ha->brd)->u.ic.Sema0);
+ else if (ha->type == GDT_PCI)
+ writeb(1, &((gdt6_dpram_str *)ha->brd)->u.ic.Sema0);
+ else if (ha->type == GDT_PCINEW)
+ outb(1, PTR2USHORT(&ha->plx->sema0_reg));
+ else if (ha->type == GDT_PCIMPR)
+ writeb(1, &((gdt6m_dpram_str *)ha->brd)->i960r.sema0_reg);
+
+}
+
+
+static void gdth_copy_command(int hanum)
+{
+ register gdth_ha_str *ha;
+ register gdth_cmd_str *cmd_ptr;
+ register gdt6m_dpram_str *dp6m_ptr;
+ register gdt6c_dpram_str *dp6c_ptr;
+ gdt6_dpram_str *dp6_ptr;
+ gdt2_dpram_str *dp2_ptr;
+ ushort cp_count,dp_offset,cmd_no;
+
+ TRACE(("gdth_copy_command() hanum %d\n",hanum));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cp_count = ha->cmd_len;
+ dp_offset= ha->cmd_offs_dpmem;
+ cmd_no = ha->cmd_cnt;
+ cmd_ptr = ha->pccb;
+
+ ++ha->cmd_cnt;
+ if (ha->type == GDT_EISA)
+ return; /* no DPMEM, no copy */
+
+ /* set cpcount dword aligned */
+ if (cp_count & 3)
+ cp_count += (4 - (cp_count & 3));
+
+ ha->cmd_offs_dpmem += cp_count;
+
+ /* set offset and service, copy command to DPMEM */
+ if (ha->type == GDT_ISA) {
+ dp2_ptr = (gdt2_dpram_str *)ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp2_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((ushort)cmd_ptr->Service,
+ &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ } else if (ha->type == GDT_PCI) {
+ dp6_ptr = (gdt6_dpram_str *)ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((ushort)cmd_ptr->Service,
+ &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ } else if (ha->type == GDT_PCINEW) {
+ dp6c_ptr = (gdt6c_dpram_str *)ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((ushort)cmd_ptr->Service,
+ &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ } else if (ha->type == GDT_PCIMPR) {
+ dp6m_ptr = (gdt6m_dpram_str *)ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((ushort)cmd_ptr->Service,
+ &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ }
+}
+
+
+static void gdth_release_event(int hanum)
+{
+ register gdth_ha_str *ha;
+
+#ifdef GDTH_STATISTICS
+ ulong i,j;
+ for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
+ if (gdth_cmd_tab[j][hanum].cmnd != UNUSED_CMND)
+ ++i;
+ }
+ if (max_index < i) {
+ max_index = i;
+ TRACE3(("GDT: max_index = %d\n",(ushort)i));
+ }
+#endif
+
+ TRACE(("gdth_release_event() hanum %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ if (ha->pccb->OpCode == GDT_INIT)
+ ha->pccb->Service |= 0x80;
+
+ if (ha->type == GDT_EISA) {
+ outb(ha->pccb->Service, ha->bmic + LDOORREG);
+ if (ha->pccb->OpCode == GDT_INIT) /* store DMA buffer */
+ outl((ulong)ha->pccb, ha->bmic + MAILBOXREG);
+ } else if (ha->type == GDT_ISA)
+ writeb(0, &((gdt2_dpram_str *)ha->brd)->io.event);
+ else if (ha->type == GDT_PCI)
+ writeb(0, &((gdt6_dpram_str *)ha->brd)->io.event);
+ else if (ha->type == GDT_PCINEW)
+ outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
+ else if (ha->type == GDT_PCIMPR)
+ writeb(1, &((gdt6m_dpram_str *)ha->brd)->i960r.ldoor_reg);
+}
+
+
+static int gdth_wait(int hanum,int index,ulong time)
+{
+ gdth_ha_str *ha;
+ int answer_found = FALSE;
+
+ TRACE(("gdth_wait() hanum %d index %d time %ld\n",hanum,index,time));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ if (index == 0)
+ return 1; /* no wait required */
+
+ gdth_from_wait = TRUE;
+ do {
+#if LINUX_VERSION_CODE >= 0x010346
+ gdth_interrupt((int)ha->irq,NULL,NULL);
+#else
+ gdth_interrupt((int)ha->irq,NULL);
+#endif
+ if (wait_hanum==hanum && wait_index==index) {
+ answer_found = TRUE;
+ break;
+ }
+ gdth_delay(1);
+ } while (--time);
+ gdth_from_wait = FALSE;
+
+ while (gdth_test_busy(hanum))
+ gdth_delay(0);
+
+ return (answer_found);
+}
+
+
+static int gdth_internal_cmd(int hanum,unchar service,ushort opcode,ulong p1,
+ ulong p2,ulong p3)
+{
+ register gdth_ha_str *ha;
+ register gdth_cmd_str *cmd_ptr;
+ int retries,index;
+
+ TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cmd_ptr = ha->pccb;
+ memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str));
+
+ /* make command */
+ for (retries = INIT_RETRIES;;) {
+ cmd_ptr->Service = service;
+ cmd_ptr->RequestBuffer = INTERNAL_CMND;
+ if (!(index=gdth_get_cmd_index(hanum))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+ gdth_set_sema0(hanum);
+ cmd_ptr->OpCode = opcode;
+ cmd_ptr->BoardNode = LOCALBOARD;
+ if (service == CACHESERVICE) {
+ if (opcode == GDT_IOCTL) {
+ cmd_ptr->u.ioctl.subfunc = p1;
+ cmd_ptr->u.ioctl.channel = p2;
+ cmd_ptr->u.ioctl.param_size = (ushort)p3;
+ cmd_ptr->u.ioctl.p_param = virt_to_bus(ha->pscratch);
+ } else {
+ cmd_ptr->u.cache.DeviceNo = (ushort)p1;
+ cmd_ptr->u.cache.BlockNo = p2;
+ }
+ } else if (service == SCSIRAWSERVICE) {
+ cmd_ptr->u.raw.direction = p1;
+ cmd_ptr->u.raw.bus = (unchar)p2;
+ cmd_ptr->u.raw.target = (unchar)p3;
+ cmd_ptr->u.raw.lun = 0;
+ }
+ ha->cmd_len = sizeof(gdth_cmd_str);
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_cnt = 0;
+ gdth_copy_command(hanum);
+ gdth_release_event(hanum);
+ gdth_delay(20);
+ if (!gdth_wait(hanum,index,INIT_TIMEOUT)) {
+ printk("GDT: Initialization error (timeout service %d)\n",service);
+ return 0;
+ }
+ if (ha->status != S_BSY || --retries == 0)
+ break;
+ gdth_delay(1);
+ }
+
+ return (ha->status != S_OK ? 0:1);
+}
+
+
+/* search for devices */
+
+__initfunc (static int gdth_search_drives(int hanum))
+{
+ register gdth_ha_str *ha;
+ ushort cdev_cnt,i;
+ unchar b,t,pos_found;
+ ulong drv_cyls, drv_hds, drv_secs;
+ ulong bus_no;
+ gdth_getch_str *chn;
+ gdth_iochan_str *ioc;
+
+ TRACE(("gdth_search_drives() hanum %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ /* initialize controller services, at first: screen service */
+ if (!gdth_internal_cmd(hanum,SCREENSERVICE,GDT_INIT,0,0,0)) {
+ printk("GDT: Initialization error screen service (code %d)\n",
+ ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
+
+ /* initialize cache service */
+ if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_INIT,LINUX_OS,0,0)) {
+ printk("GDT: Initialization error cache service (code %d)\n",
+ ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
+ cdev_cnt = (ushort)ha->info;
+
+ /* mount all cache devices */
+ gdth_internal_cmd(hanum,CACHESERVICE,GDT_MOUNT,0xffff,1,0);
+ TRACE2(("gdth_search_drives(): mountall CACHESERVICE OK\n"));
+
+ /* initialize cache service after mountall */
+ if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_INIT,LINUX_OS,0,0)) {
+ printk("GDT: Initialization error cache service (code %d)\n",
+ ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives() CACHES. init. after mountall\n"));
+ cdev_cnt = (ushort)ha->info;
+
+ /* detect number of SCSI buses - try new IOCTL */
+ ioc = (gdth_iochan_str *)DMADATA(gdth_ctr_tab[hanum]);
+ ioc->version = -1UL;
+ ioc->list_entries = MAXBUS;
+ ioc->first_chan = 0;
+ ioc->last_chan = MAXBUS-1;
+ ioc->list_offset = GDTOFFSOF(gdth_iochan_str, list[0]);
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,GET_IOCHAN_DESC,
+ INVALID_CHANNEL,sizeof(gdth_iochan_str))) {
+ TRACE2(("GET_IOCHAN_DESC supported!\n"));
+ ha->bus_cnt = ioc->chan_count;
+ for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no)
+ if (ioc->list[bus_no].proc_id < MAXID)
+ ha->id[bus_no][ioc->list[bus_no].proc_id].type = SIOP_DTYP;
+ } else {
+ /* old method */
+ chn = (gdth_getch_str *)DMADATA(gdth_ctr_tab[hanum]);
+ for (bus_no = 0; bus_no < MAXBUS; ++bus_no) {
+ chn->channel_no = bus_no;
+ if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,
+ SCSI_CHAN_CNT | L_CTRL_PATTERN,
+ IO_CHANNEL | INVALID_CHANNEL,
+ sizeof(gdth_getch_str))) {
+ if (bus_no == 0) {
+ printk("GDT: Error detecting SCSI channel count (0x%x)\n",
+ ha->status);
+ return 0;
+ }
+ break;
+ }
+ if (chn->siop_id < MAXID)
+ ha->id[bus_no][chn->siop_id].type = SIOP_DTYP;
+ }
+ ha->bus_cnt = (unchar)bus_no;
+ }
+ TRACE2(("gdth_search_drives() %d SCSI channels\n",ha->bus_cnt));
+
+ /* read cache configuration */
+ if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,CACHE_INFO,
+ INVALID_CHANNEL,sizeof(gdth_cinfo_str))) {
+ printk("GDT: Initialization error cache service (code %d)\n",
+ ha->status);
+ return 0;
+ }
+ ha->cpar = ((gdth_cinfo_str *)DMADATA(gdth_ctr_tab[hanum]))->cpar;
+ TRACE2(("gdth_search_drives() cinfo: vs %lx sta %d str %d dw %d b %d\n",
+ ha->cpar.version,ha->cpar.state,ha->cpar.strategy,
+ ha->cpar.write_back,ha->cpar.block_size));
+
+ /* read board info, fill ctr_name[] */
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,BOARD_INFO,
+ INVALID_CHANNEL,sizeof(gdth_binfo_str))) {
+ TRACE2(("BOARD_INFO supported!\n"));
+ strcpy(ha->ctr_name, ((gdth_binfo_str *)DMADATA(gdth_ctr_tab[hanum]))->type_string);
+ } else {
+ strcpy(ha->ctr_name, gdth_ctr_name(hanum));
+ }
+ TRACE2(("Controller name: %s\n",ha->ctr_name));
+
+ /* initialize raw service */
+ if (!gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_INIT,0,0,0)) {
+ printk("GDT: Initialization error raw service (code %d)\n",
+ ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n"));
+
+ /* set/get features raw service (scatter/gather) */
+ ha->raw_feat = 0;
+ if (gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_SET_FEAT,SCATTER_GATHER,
+ 0,0)) {
+ TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n"));
+ if (gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_GET_FEAT,0,0,0)) {
+ TRACE2(("gdth_search_dr(): get feat RAWSERVICE %ld\n",
+ ha->info));
+ ha->raw_feat = (ushort)ha->info;
+ }
+ }
+
+ /* set/get features cache service (equal to raw service) */
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_SET_FEAT,0,
+ SCATTER_GATHER,0)) {
+ TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n"));
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_GET_FEAT,0,0,0)) {
+ TRACE2(("gdth_search_dr(): get feat CACHESERV. %ld\n",
+ ha->info));
+ ha->cache_feat = (ushort)ha->info;
+ }
+ }
+
+ /* reserve drives for raw service */
+ for (i = 0; reserve_list[i].hanum != 0xff; ++i) {
+ if (reserve_list[i].hanum < MAXHA && reserve_list[i].hanum == hanum &&
+ reserve_list[i].bus < MAXBUS && reserve_list[i].id < MAXID) {
+ TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d\n",
+ reserve_list[i].hanum, reserve_list[i].bus,
+ reserve_list[i].id));
+ if (!gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_RESERVE,0,
+ reserve_list[i].bus, reserve_list[i].id)) {
+ printk("GDT: Error raw service (RESERVE, code %d)\n",
+ ha->status);
+ }
+ }
+ }
+
+ /* scanning for raw devices */
+ for (b=0; b<ha->bus_cnt; ++b) {
+ for (t=0; t<MAXID; ++t) {
+ TRACE(("gdth_search_drives() rawd. bus %d id %d\n",b,t));
+ if (ha->id[b][t].type != SIOP_DTYP &&
+ gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_INFO,0,b,t)) {
+ ha->id[b][t].type = RAW_DTYP;
+ }
+ }
+ }
+
+ /* scanning for cache devices */
+ for (i=0; i<cdev_cnt && i<MAX_HDRIVES; ++i) {
+ TRACE(("gdth_search_drives() cachedev. %d\n",i));
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_INFO,i,0,0)) {
+ /* dynamic relation between host drive number and Bus/ID */
+ /* search free position */
+ pos_found = FALSE;
+ for (b=0,t=0; b<ha->bus_cnt; ++b) {
+ for (t=0; t<MAXID; ++t) {
+ if (ha->id[b][t].type == EMPTY_DTYP) {
+ pos_found = TRUE;
+ break;
+ }
+ }
+ if (pos_found)
+ break;
+ }
+ TRACE(("gdth_search_dr() drive %d free pos at bus/id %d/%d\n",
+ i,b,t));
+
+ ha->id[b][t].type = CACHE_DTYP;
+ ha->id[b][t].devtype = 0;
+ ha->id[b][t].size = ha->info;
+ ha->id[b][t].hostdrive = i;
+
+ /* evaluate mapping (sectors per head, heads per cylinder) */
+ ha->id[b][t].size &= ~SECS32;
+ if (ha->info2 == 0) {
+ drv_cyls = ha->id[b][t].size /HEADS/SECS;
+ if (drv_cyls <= MAXCYLS) {
+ drv_hds = HEADS;
+ drv_secs= SECS;
+ } else { /* too high for 64*32 */
+ drv_cyls = ha->id[b][t].size /MEDHEADS/MEDSECS;
+ if (drv_cyls <= MAXCYLS) {
+ drv_hds = MEDHEADS;
+ drv_secs= MEDSECS;
+ } else { /* too high for 127*63 */
+ drv_cyls = ha->id[b][t].size /BIGHEADS/BIGSECS;
+ drv_hds = BIGHEADS;
+ drv_secs= BIGSECS;
+ }
+ }
+ } else {
+ drv_hds = ha->info2 & 0xff;
+ drv_secs = (ha->info2 >> 8) & 0xff;
+ drv_cyls = ha->id[b][t].size /drv_hds/drv_secs;
+ }
+ ha->id[b][t].heads = (unchar)drv_hds;
+ ha->id[b][t].secs = (unchar)drv_secs;
+ /* round size */
+ ha->id[b][t].size = drv_cyls * drv_hds * drv_secs;
+ TRACE2(("gdth_search_dr() cdr. %d size %ld hds %ld scs %ld\n",
+ i,ha->id[b][t].size,drv_hds,drv_secs));
+
+ /* get informations about device */
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_DEVTYPE,i,
+ 0,0)) {
+ TRACE(("gdth_search_dr() cache drive %d devtype %ld\n",
+ i,ha->info));
+ ha->id[b][t].devtype = (ushort)ha->info;
+ }
+ }
+ }
+
+ TRACE(("gdth_search_drives() OK\n"));
+ return 1;
+}
+
+
+/* command queueing/sending functions */
+
+static void gdth_putq(int hanum,Scsi_Cmnd *scp,unchar priority)
+{
+ register gdth_ha_str *ha;
+ register Scsi_Cmnd *pscp;
+ register Scsi_Cmnd *nscp;
+ ulong flags;
+ unchar b, t;
+
+ TRACE(("gdth_putq() priority %d\n",priority));
+ save_flags(flags);
+ cli();
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ scp->SCp.this_residual = (int)priority;
+ gdth_update_timeout(hanum, scp, scp->timeout_per_command * 6);
+#if LINUX_VERSION_CODE >= 0x020000
+ b = scp->channel;
+#else
+ b = NUMDATA(nscp->host)->busnum;
+#endif
+ t = scp->target;
+#if LINUX_VERSION_CODE >= 0x010300
+ if (priority >= DEFAULT_PRI && ha->id[b][t].lock) {
+ TRACE2(("gdth_putq(): locked IO -> update_timeout()\n"));
+ scp->SCp.buffers_residual = gdth_update_timeout(hanum, scp, 0);
+ }
+#endif
+
+ if (ha->req_first==NULL) {
+ ha->req_first = scp; /* queue was empty */
+ scp->SCp.ptr = NULL;
+ } else { /* queue not empty */
+ pscp = ha->req_first;
+ nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ /* priority: 0-highest,..,0xff-lowest */
+ while (nscp && (unchar)nscp->SCp.this_residual <= priority) {
+ pscp = nscp;
+ nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ }
+ pscp->SCp.ptr = (char *)scp;
+ scp->SCp.ptr = (char *)nscp;
+ }
+ restore_flags(flags);
+
+#ifdef GDTH_STATISTICS
+ flags = 0;
+ for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ ++flags;
+ if (max_rq < flags) {
+ max_rq = flags;
+ TRACE3(("GDT: max_rq = %d\n",(ushort)max_rq));
+ }
+#endif
+}
+
+static void gdth_next(int hanum)
+{
+ register gdth_ha_str *ha;
+ register Scsi_Cmnd *pscp;
+ register Scsi_Cmnd *nscp;
+ unchar b, t, next_cmd, firsttime;
+ ushort hdrive;
+ ulong flags;
+ int cmd_index;
+
+ TRACE(("gdth_next() hanum %d\n",hanum));
+ save_flags(flags);
+ cli();
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ ha->cmd_cnt = ha->cmd_offs_dpmem = 0;
+ next_cmd = firsttime = TRUE;
+ cmd_index = 0;
+
+ for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) {
+ if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr)
+ pscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+#if LINUX_VERSION_CODE >= 0x020000
+ b = nscp->channel;
+#else
+ b = NUMDATA(nscp->host)->busnum;
+#endif
+ t = nscp->target;
+ if (nscp->SCp.this_residual < DEFAULT_PRI || !ha->id[b][t].lock) {
+
+ if (firsttime) {
+ if (gdth_test_busy(hanum)) { /* controller busy ? */
+ TRACE(("gdth_next() controller %d busy !\n",hanum));
+ if (!gdth_polling) {
+ restore_flags(flags);
+ return;
+ }
+ while (gdth_test_busy(hanum))
+ gdth_delay(1);
+ }
+ firsttime = FALSE;
+ }
+
+#if LINUX_VERSION_CODE >= 0x010300
+ if (nscp->done == gdth_scsi_done) {
+ if (!(cmd_index=gdth_special_cmd(hanum,nscp,b)))
+ next_cmd = FALSE;
+ } else
+#endif
+ if (ha->id[b][t].type != CACHE_DTYP) {
+ if (!(cmd_index=gdth_fill_raw_cmd(hanum,nscp,b)))
+ next_cmd = FALSE;
+ } else {
+ hdrive = ha->id[b][t].hostdrive;
+ switch (nscp->cmnd[0]) {
+ case TEST_UNIT_READY:
+ case INQUIRY:
+ case REQUEST_SENSE:
+ case READ_CAPACITY:
+ case VERIFY:
+ case START_STOP:
+ case MODE_SENSE:
+ TRACE2(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
+ nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
+ nscp->cmnd[4],nscp->cmnd[5]));
+ gdth_internal_cache_cmd(hanum,nscp,b,&flags);
+ break;
+
+ case ALLOW_MEDIUM_REMOVAL:
+ TRACE2(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
+ nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
+ nscp->cmnd[4],nscp->cmnd[5]));
+ if ( (nscp->cmnd[4]&1) && !(ha->id[b][t].devtype&1) ) {
+ TRACE2(("Prevent r. nonremov. drive->do nothing\n"));
+ nscp->result = DID_OK << 16;
+ restore_flags( flags );
+ if (!nscp->SCp.have_data_in)
+ nscp->SCp.have_data_in++;
+ else
+ nscp->scsi_done(nscp);
+ save_flags( flags );
+ cli();
+ } else {
+ nscp->cmnd[3] = (ha->id[b][t].devtype&1) ? 1:0;
+ TRACE2(("Prevent/allow r. %d rem. drive %d\n",
+ nscp->cmnd[4],nscp->cmnd[3]));
+ if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,hdrive)))
+ next_cmd = FALSE;
+ }
+ break;
+
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,hdrive)))
+ next_cmd = FALSE;
+ break;
+
+ default:
+ TRACE2(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
+ nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
+ nscp->cmnd[4],nscp->cmnd[5]));
+ printk("GDT: Unknown SCSI command 0x%x to cache service !\n",
+ nscp->cmnd[0]);
+ nscp->result = DID_ABORT << 16;
+ restore_flags( flags );
+ if (!nscp->SCp.have_data_in)
+ nscp->SCp.have_data_in++;
+ else
+ nscp->scsi_done(nscp);
+ save_flags( flags );
+ cli();
+ break;
+ }
+ }
+
+ if (!next_cmd)
+ break;
+ if (nscp == ha->req_first)
+ ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr;
+ else
+ pscp->SCp.ptr = nscp->SCp.ptr;
+ if (gdth_polling)
+ break;
+ }
+ }
+
+ if (ha->cmd_cnt > 0) {
+ gdth_release_event(hanum);
+ }
+
+ restore_flags(flags);
+
+ if (gdth_polling && ha->cmd_cnt > 0) {
+ if (!gdth_wait(hanum,cmd_index,POLL_TIMEOUT))
+ printk("GDT: Controller %d: Command %d timed out !\n",
+ hanum,cmd_index);
+ }
+}
+
+static void gdth_copy_internal_data(Scsi_Cmnd *scp,char *buffer,ushort count)
+{
+ ushort cpcount,i;
+ ushort cpsum,cpnow;
+ struct scatterlist *sl;
+
+ cpcount = count<=(ushort)scp->bufflen ? count:(ushort)scp->bufflen;
+ if (scp->use_sg) {
+ sl = (struct scatterlist *)scp->request_buffer;
+ for (i=0,cpsum=0; i<scp->use_sg; ++i,++sl) {
+ cpnow = (ushort)sl->length;
+ TRACE(("copy_internal() now %d sum %d count %d %d\n",
+ cpnow,cpsum,cpcount,(ushort)scp->bufflen));
+ if (cpsum+cpnow > cpcount)
+ cpnow = cpcount - cpsum;
+ cpsum += cpnow;
+ memcpy((char*)sl->address,buffer,cpnow);
+ if (cpsum == cpcount)
+ break;
+ buffer += cpnow;
+ }
+ } else {
+ TRACE(("copy_internal() count %d\n",cpcount));
+ memcpy((char*)scp->request_buffer,buffer,cpcount);
+ }
+}
+
+static int gdth_internal_cache_cmd(int hanum,Scsi_Cmnd *scp,
+ unchar b,ulong *flags)
+{
+ register gdth_ha_str *ha;
+ ushort hdrive;
+ unchar t;
+ gdth_inq_data inq;
+ gdth_rdcap_data rdc;
+ gdth_sense_data sd;
+ gdth_modep_data mpd;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ t = scp->target;
+ hdrive = ha->id[b][t].hostdrive;
+ TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n",
+ scp->cmnd[0],hdrive));
+
+ if (scp->lun !=0)
+ scp->result = DID_BAD_TARGET << 16;
+ else {
+ switch (scp->cmnd[0]) {
+ case TEST_UNIT_READY:
+ case VERIFY:
+ case START_STOP:
+ TRACE2(("Test/Verify/Start hdrive %d\n",hdrive));
+ break;
+
+ case INQUIRY:
+ TRACE2(("Inquiry hdrive %d devtype %d\n",
+ hdrive,ha->id[b][t].devtype));
+ inq.type_qual = (ha->id[b][t].devtype&4) ? TYPE_ROM:TYPE_DISK;
+ /* you can here set all disks to removable, if you want to do
+ a flush using the ALLOW_MEDIUM_REMOVAL command */
+ inq.modif_rmb = ha->id[b][t].devtype&1 ? 0x80:0x00;
+ inq.version = 2;
+ inq.resp_aenc = 2;
+ inq.add_length= 32;
+ strcpy(inq.vendor,"ICP ");
+ sprintf(inq.product,"Host Drive #%02d",hdrive);
+ strcpy(inq.revision," ");
+ gdth_copy_internal_data(scp,(char*)&inq,sizeof(gdth_inq_data));
+ break;
+
+ case REQUEST_SENSE:
+ TRACE2(("Request sense hdrive %d\n",hdrive));
+ sd.errorcode = 0x70;
+ sd.segno = 0x00;
+ sd.key = NO_SENSE;
+ sd.info = 0;
+ sd.add_length= 0;
+ gdth_copy_internal_data(scp,(char*)&sd,sizeof(gdth_sense_data));
+ break;
+
+ case MODE_SENSE:
+ TRACE2(("Mode sense hdrive %d\n",hdrive));
+ memset((char*)&mpd,0,sizeof(gdth_modep_data));
+ mpd.hd.data_length = sizeof(gdth_modep_data);
+ mpd.hd.dev_par = (ha->id[b][t].devtype&2) ? 0x80:0;
+ mpd.hd.bd_length = sizeof(mpd.bd);
+ mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
+ mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
+ mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
+ gdth_copy_internal_data(scp,(char*)&mpd,sizeof(gdth_modep_data));
+ break;
+
+ case READ_CAPACITY:
+ TRACE2(("Read capacity hdrive %d\n",hdrive));
+ rdc.last_block_no = ntohl(ha->id[b][t].size-1);
+ rdc.block_length = ntohl(SECTOR_SIZE);
+ gdth_copy_internal_data(scp,(char*)&rdc,sizeof(gdth_rdcap_data));
+ break;
+
+ default:
+ TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0]));
+ break;
+ }
+ scp->result = DID_OK << 16;
+ }
+
+ restore_flags(*flags);
+ if (!scp->SCp.have_data_in)
+ scp->SCp.have_data_in++;
+ else
+ scp->scsi_done(scp);
+ save_flags(*flags);
+ cli();
+ return 1;
+}
+
+static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive)
+{
+ register gdth_ha_str *ha;
+ register gdth_cmd_str *cmdp;
+ struct scatterlist *sl;
+ ushort i;
+ int cmd_index;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cmdp = ha->pccb;
+ TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
+ scp->cmnd[0],scp->cmd_len,hdrive));
+
+ if (ha->type==GDT_EISA && ha->cmd_cnt>0)
+ return 0;
+
+ cmdp->Service = CACHESERVICE;
+ cmdp->RequestBuffer = scp;
+ /* search free command index */
+ if (!(cmd_index=gdth_get_cmd_index(hanum))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+ /* if it's the first command, set command semaphore */
+ if (ha->cmd_cnt == 0)
+ gdth_set_sema0(hanum);
+
+ /* fill command */
+ if (scp->cmnd[0]==ALLOW_MEDIUM_REMOVAL) {
+ if (scp->cmnd[4] & 1) /* prevent ? */
+ cmdp->OpCode = GDT_MOUNT;
+ else if (scp->cmnd[3] & 1) /* removable drive ? */
+ cmdp->OpCode = GDT_UNMOUNT;
+ else
+ cmdp->OpCode = GDT_FLUSH;
+ } else {
+ if (scp->cmnd[0]==WRITE_6 || scp->cmnd[0]==WRITE_10) {
+ if (gdth_write_through)
+ cmdp->OpCode = GDT_WRITE_THR;
+ else
+ cmdp->OpCode = GDT_WRITE;
+ } else {
+ cmdp->OpCode = GDT_READ;
+ }
+ }
+
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.cache.DeviceNo = hdrive;
+
+ if (scp->cmnd[0]==ALLOW_MEDIUM_REMOVAL) {
+ cmdp->u.cache.BlockNo = 1;
+ cmdp->u.cache.sg_canz = 0;
+ } else {
+ if (scp->cmd_len != 6) {
+ cmdp->u.cache.BlockNo = ntohl(*(ulong*)&scp->cmnd[2]);
+ cmdp->u.cache.BlockCnt= (ulong)ntohs(*(ushort*)&scp->cmnd[7]);
+ } else {
+ cmdp->u.cache.BlockNo = ntohl(*(ulong*)&scp->cmnd[0]) & 0x001fffffUL;
+ cmdp->u.cache.BlockCnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
+ }
+
+ if (scp->use_sg) {
+ cmdp->u.cache.DestAddr= -1UL;
+ sl = (struct scatterlist *)scp->request_buffer;
+ for (i=0; i<scp->use_sg; ++i,++sl) {
+ cmdp->u.cache.sg_lst[i].sg_ptr = virt_to_bus(sl->address);
+ cmdp->u.cache.sg_lst[i].sg_len = (ulong)sl->length;
+ }
+ cmdp->u.cache.sg_canz = (ulong)i;
+
+#ifdef GDTH_STATISTICS
+ if (max_sg < (ulong)i) {
+ max_sg = (ulong)i;
+ TRACE3(("GDT: max_sg = %d\n",i));
+ }
+#endif
+ if (i<GDTH_MAXSG)
+ cmdp->u.cache.sg_lst[i].sg_len = 0;
+ } else {
+ if (ha->cache_feat & SCATTER_GATHER) {
+ cmdp->u.cache.DestAddr = -1UL;
+ cmdp->u.cache.sg_canz = 1;
+ cmdp->u.cache.sg_lst[0].sg_ptr = virt_to_bus(scp->request_buffer);
+ cmdp->u.cache.sg_lst[0].sg_len = scp->request_bufflen;
+ cmdp->u.cache.sg_lst[1].sg_len = 0;
+ } else {
+ cmdp->u.cache.DestAddr = virt_to_bus(scp->request_buffer);
+ cmdp->u.cache.sg_canz= 0;
+ }
+ }
+ }
+ TRACE(("cache cmd: addr. %lx sganz %lx sgptr0 %lx sglen0 %lx\n",
+ cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
+ cmdp->u.cache.sg_lst[0].sg_ptr,
+ cmdp->u.cache.sg_lst[0].sg_len));
+ TRACE(("cache cmd: cmd %d blockno. %ld, blockcnt %ld\n",
+ cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
+
+ /* evaluate command size, check space */
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
+ (ushort)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
+ if (ha->cmd_len & 3)
+ ha->cmd_len += (4 - (ha->cmd_len & 3));
+
+ if (ha->cmd_cnt > 0) {
+ if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
+ ha->ic_all_size) {
+ TRACE2(("gdth_fill_cache() DPMEM overflow\n"));
+ gdth_cmd_tab[cmd_index-2][hanum].cmnd = UNUSED_CMND;
+ return 0;
+ }
+ }
+
+ /* copy command */
+ gdth_copy_command(hanum);
+ return cmd_index;
+}
+
+static int gdth_fill_raw_cmd(int hanum,Scsi_Cmnd *scp,unchar b)
+{
+ register gdth_ha_str *ha;
+ register gdth_cmd_str *cmdp;
+ struct scatterlist *sl;
+ ushort i;
+ int cmd_index;
+ unchar t,l;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ t = scp->target;
+ l = scp->lun;
+ cmdp = ha->pccb;
+ TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
+ scp->cmnd[0],b,t,l));
+
+ if (ha->type==GDT_EISA && ha->cmd_cnt>0)
+ return 0;
+
+ cmdp->Service = SCSIRAWSERVICE;
+ cmdp->RequestBuffer = scp;
+ /* search free command index */
+ if (!(cmd_index=gdth_get_cmd_index(hanum))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+ /* if it's the first command, set command semaphore */
+ if (ha->cmd_cnt == 0)
+ gdth_set_sema0(hanum);
+
+ /* fill command */
+ cmdp->OpCode = GDT_WRITE; /* always */
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.raw.reserved = 0;
+ cmdp->u.raw.mdisc_time = 0;
+ cmdp->u.raw.mcon_time = 0;
+ cmdp->u.raw.clen = scp->cmd_len;
+ cmdp->u.raw.target = t;
+ cmdp->u.raw.lun = l;
+ cmdp->u.raw.bus = b;
+ cmdp->u.raw.priority = 0;
+ cmdp->u.raw.link_p = NULL;
+ cmdp->u.raw.sdlen = scp->request_bufflen;
+ cmdp->u.raw.sense_len = 16;
+ cmdp->u.raw.sense_data = virt_to_bus(scp->sense_buffer);
+ cmdp->u.raw.direction =
+ gdth_direction_tab[scp->cmnd[0]]==DOU ? DATA_OUT : DATA_IN;
+ memcpy(cmdp->u.raw.cmd,scp->cmnd,12);
+
+ if (scp->use_sg) {
+ cmdp->u.raw.sdata = -1UL;
+ sl = (struct scatterlist *)scp->request_buffer;
+ for (i=0; i<scp->use_sg; ++i,++sl) {
+ cmdp->u.raw.sg_lst[i].sg_ptr = virt_to_bus(sl->address);
+ cmdp->u.raw.sg_lst[i].sg_len = (ulong)sl->length;
+ }
+ cmdp->u.raw.sg_ranz = (ulong)i;
+
+#ifdef GDTH_STATISTICS
+ if (max_sg < (ulong)i) {
+ max_sg = (ulong)i;
+ TRACE3(("GDT: max_sg = %d\n",i));
+ }
+#endif
+ if (i<GDTH_MAXSG)
+ cmdp->u.raw.sg_lst[i].sg_len = 0;
+ } else {
+ if (ha->raw_feat & SCATTER_GATHER) {
+ cmdp->u.raw.sdata = -1UL;
+ cmdp->u.raw.sg_ranz= 1;
+ cmdp->u.raw.sg_lst[0].sg_ptr = virt_to_bus(scp->request_buffer);
+ cmdp->u.raw.sg_lst[0].sg_len = scp->request_bufflen;
+ cmdp->u.raw.sg_lst[1].sg_len = 0;
+ } else {
+ cmdp->u.raw.sdata = virt_to_bus(scp->request_buffer);
+ cmdp->u.raw.sg_ranz= 0;
+ }
+ }
+ TRACE(("raw cmd: addr. %lx sganz %lx sgptr0 %lx sglen0 %lx\n",
+ cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
+ cmdp->u.raw.sg_lst[0].sg_ptr,
+ cmdp->u.raw.sg_lst[0].sg_len));
+
+ /* evaluate command size, check space */
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
+ (ushort)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
+ if (ha->cmd_len & 3)
+ ha->cmd_len += (4 - (ha->cmd_len & 3));
+
+ if (ha->cmd_cnt > 0) {
+ if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
+ ha->ic_all_size) {
+ TRACE2(("gdth_fill_raw() DPMEM overflow\n"));
+ gdth_cmd_tab[cmd_index-2][hanum].cmnd = UNUSED_CMND;
+ return 0;
+ }
+ }
+
+ /* copy command */
+ gdth_copy_command(hanum);
+ return cmd_index;
+}
+
+static int gdth_special_cmd(int hanum,Scsi_Cmnd *scp,unchar b)
+{
+ register gdth_ha_str *ha;
+ register gdth_cmd_str *cmdp;
+ int cmd_index;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cmdp= ha->pccb;
+ TRACE2(("gdth_special_cmd(): "));
+
+ if (ha->type==GDT_EISA && ha->cmd_cnt>0)
+ return 0;
+
+ memcpy( cmdp, scp->request_buffer, sizeof(gdth_cmd_str));
+ cmdp->RequestBuffer = scp;
+
+ /* search free command index */
+ if (!(cmd_index=gdth_get_cmd_index(hanum))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+
+ /* if it's the first command, set command semaphore */
+ if (ha->cmd_cnt == 0)
+ gdth_set_sema0(hanum);
+
+ /* evaluate command size, check space */
+ if (cmdp->OpCode == GDT_IOCTL) {
+ TRACE2(("IOCTL\n"));
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(ulong);
+ } else if (cmdp->Service == CACHESERVICE) {
+ TRACE2(("cache command %d\n",cmdp->OpCode));
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str);
+ } else if (cmdp->Service == SCSIRAWSERVICE) {
+ TRACE2(("raw command %d/%d\n",cmdp->OpCode,cmdp->u.raw.cmd[0]));
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str);
+ }
+
+ if (ha->cmd_len & 3)
+ ha->cmd_len += (4 - (ha->cmd_len & 3));
+
+ if (ha->cmd_cnt > 0) {
+ if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
+ ha->ic_all_size) {
+ TRACE2(("gdth_special_cmd() DPMEM overflow\n"));
+ gdth_cmd_tab[cmd_index-2][hanum].cmnd = UNUSED_CMND;
+ return 0;
+ }
+ }
+
+ /* copy command */
+ gdth_copy_command(hanum);
+ return cmd_index;
+}
+
+
+/* Controller event handling functions */
+static gdth_evt_str *gdth_store_event(ushort source, ushort idx,
+ gdth_evt_data *evt)
+{
+ gdth_evt_str *e;
+ ulong flags;
+ struct timeval tv;
+
+ TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
+ if (source == 0) /* no source -> no event */
+ return 0;
+
+ save_flags(flags);
+ cli();
+ if (ebuffer[elastidx].event_source == source &&
+ ebuffer[elastidx].event_idx == idx &&
+ !memcmp((char *)&ebuffer[elastidx].event_data.eu,
+ (char *)&evt->eu, evt->size)) {
+ e = &ebuffer[elastidx];
+ do_gettimeofday(&tv);
+ e->last_stamp = tv.tv_sec;
+ ++e->same_count;
+ } else {
+ if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
+ ++elastidx;
+ if (elastidx == MAX_EVENTS)
+ elastidx = 0;
+ if (elastidx == eoldidx) { /* reached mark ? */
+ ++eoldidx;
+ if (eoldidx == MAX_EVENTS)
+ eoldidx = 0;
+ }
+ }
+ e = &ebuffer[elastidx];
+ e->event_source = source;
+ e->event_idx = idx;
+ do_gettimeofday(&tv);
+ e->first_stamp = e->last_stamp = tv.tv_sec;
+ e->same_count = 1;
+ e->event_data = *evt;
+ }
+ restore_flags(flags);
+ return e;
+}
+
+static int gdth_read_event(int handle, gdth_evt_str *estr)
+{
+ gdth_evt_str *e;
+ int eindex;
+ ulong flags;
+
+ TRACE2(("gdth_read_event() handle %d\n", handle));
+ save_flags(flags);
+ cli();
+ if (handle == -1)
+ eindex = eoldidx;
+ else
+ eindex = handle;
+ estr->event_source = 0;
+
+ if (eindex >= MAX_EVENTS) {
+ restore_flags(flags);
+ return eindex;
+ }
+ e = &ebuffer[eindex];
+ if (e->event_source != 0) {
+ if (eindex != elastidx) {
+ if (++eindex == MAX_EVENTS)
+ eindex = 0;
+ } else {
+ eindex = -1;
+ }
+ memcpy(estr, e, sizeof(gdth_evt_str));
+ }
+ restore_flags(flags);
+ return eindex;
+}
+
+static void gdth_readapp_event(unchar application, gdth_evt_str *estr)
+{
+ gdth_evt_str *e;
+ int eindex;
+ ulong flags;
+ unchar found = FALSE;
+
+ TRACE2(("gdth_readapp_event() app. %d\n", application));
+ save_flags(flags);
+ cli();
+ eindex = eoldidx;
+ for (;;) {
+ e = &ebuffer[eindex];
+ if (e->event_source == 0)
+ break;
+ if ((e->application & application) == 0) {
+ e->application |= application;
+ found = TRUE;
+ break;
+ }
+ if (eindex == elastidx)
+ break;
+ if (++eindex == MAX_EVENTS)
+ eindex = 0;
+ }
+ if (found)
+ memcpy(estr, e, sizeof(gdth_evt_str));
+ else
+ estr->event_source = 0;
+ restore_flags(flags);
+}
+
+static void gdth_clear_events()
+{
+ ulong flags;
+
+ TRACE(("gdth_clear_events()"));
+ save_flags(flags);
+ cli();
+
+ eoldidx = elastidx = 0;
+ ebuffer[0].event_source = 0;
+ restore_flags(flags);
+}
+
+
+/* SCSI interface functions */
+
+#if LINUX_VERSION_CODE >= 0x02015F
+static void do_gdth_interrupt(int irq,void *dev_id,struct pt_regs *regs)
+{
+ ulong flags;
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ gdth_interrupt(irq, dev_id, regs);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+}
+#endif
+
+#if LINUX_VERSION_CODE >= 0x010346
+static void gdth_interrupt(int irq,void *dev_id,struct pt_regs *regs)
+#else
+static void gdth_interrupt(int irq,struct pt_regs *regs)
+#endif
+{
+ register gdth_ha_str *ha;
+ gdt6m_dpram_str *dp6m_ptr;
+ gdt6_dpram_str *dp6_ptr;
+ gdt2_dpram_str *dp2_ptr;
+ Scsi_Cmnd *scp;
+ int hanum;
+ unchar IStatus;
+ ushort CmdStatus, Service = 0;
+ ulong InfoBytes, InfoBytes2 = 0;
+ gdth_evt_data dvr;
+
+ TRACE(("gdth_interrupt() IRQ %d\n",irq));
+
+ /* if polling and not from gdth_wait() -> return */
+ if (gdth_polling) {
+ if (!gdth_from_wait) {
+ return;
+ }
+ }
+
+ wait_index = 0;
+
+ /* search controller */
+ if ((hanum = gdth_get_status(&IStatus,irq)) == -1) {
+ /*
+ TRACE2(("gdth_interrupt(): Spurious interrupt received\n"));
+ */
+ return;
+ }
+
+#ifdef GDTH_STATISTICS
+ ++act_ints;
+#endif
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ if (ha->type == GDT_EISA) {
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ CmdStatus = inw(ha->bmic + MAILBOXREG+8);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,CmdStatus));
+ if (IStatus == ASYNCINDEX) { /* async. event ? */
+ Service = inw(ha->bmic + MAILBOXREG+10);
+ InfoBytes2 = inl(ha->bmic + MAILBOXREG+4);
+ }
+ } else /* no error */
+ CmdStatus = S_OK;
+ InfoBytes = inl(ha->bmic + MAILBOXREG+12);
+ if (gdth_polling) /* init. -> more info */
+ InfoBytes2 = inl(ha->bmic + MAILBOXREG+4);
+ outb(0xff, ha->bmic + EDOORREG); /* acknowledge interrupt */
+ outb(0x00, ha->bmic + SEMA1REG); /* reset status semaphore */
+ } else if (ha->type == GDT_ISA) {
+ dp2_ptr = (gdt2_dpram_str *)ha->brd;
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ CmdStatus = readw(&dp2_ptr->u.ic.Status);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,CmdStatus));
+ if (IStatus == ASYNCINDEX) { /* async. event ? */
+ Service = readw(&dp2_ptr->u.ic.Service);
+ InfoBytes2 = readl(&dp2_ptr->u.ic.Info[1]);
+ }
+ } else /* no error */
+ CmdStatus = S_OK;
+ InfoBytes = readl(&dp2_ptr->u.ic.Info[0]);
+ if (gdth_polling) /* init. -> more info */
+ InfoBytes2 = readl(&dp2_ptr->u.ic.Info[1]);
+ writeb(0xff, &dp2_ptr->io.irqdel); /* acknowledge interrupt */
+ writeb(0, &dp2_ptr->u.ic.Cmd_Index); /* reset command index */
+ writeb(0, &dp2_ptr->io.Sema1); /* reset status semaphore */
+ } else if (ha->type == GDT_PCI) {
+ dp6_ptr = (gdt6_dpram_str *)ha->brd;
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ CmdStatus = readw(&dp6_ptr->u.ic.Status);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,CmdStatus));
+ if (IStatus == ASYNCINDEX) { /* async. event ? */
+ Service = readw(&dp6_ptr->u.ic.Service);
+ InfoBytes2 = readl(&dp6_ptr->u.ic.Info[1]);
+ }
+ } else /* no error */
+ CmdStatus = S_OK;
+ InfoBytes = readl(&dp6_ptr->u.ic.Info[0]);
+ if (gdth_polling) /* init. -> more info */
+ InfoBytes2 = readl(&dp6_ptr->u.ic.Info[1]);
+ writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */
+ writeb(0, &dp6_ptr->u.ic.Cmd_Index); /* reset command index */
+ writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */
+ } else if (ha->type == GDT_PCINEW) {
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ CmdStatus = inw(PTR2USHORT(&ha->plx->status));
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,CmdStatus));
+ if (IStatus == ASYNCINDEX) { /* async. event ? */
+ Service = inw(PTR2USHORT(&ha->plx->service));
+ InfoBytes2 = inl(PTR2USHORT(&ha->plx->info[1]));
+ }
+ } else
+ CmdStatus = S_OK;
+
+ InfoBytes = inl(PTR2USHORT(&ha->plx->info[0]));
+ if (gdth_polling) /* init. -> more info */
+ InfoBytes2 = inl(PTR2USHORT(&ha->plx->info[1]));
+ outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
+ outb(0x00, PTR2USHORT(&ha->plx->sema1_reg));
+ } else if (ha->type == GDT_PCIMPR) {
+ dp6m_ptr = (gdt6m_dpram_str *)ha->brd;
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ CmdStatus = readw(&dp6m_ptr->i960r.status);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,CmdStatus));
+ if (IStatus == ASYNCINDEX) { /* async. event ? */
+ Service = readw(&dp6m_ptr->i960r.service);
+ InfoBytes2 = readl(&dp6m_ptr->i960r.info[1]);
+ }
+ } else /* no error */
+ CmdStatus = S_OK;
+ InfoBytes = readl(&dp6m_ptr->i960r.info[0]);
+ if (gdth_polling) /* init. -> more info */
+ InfoBytes2 = readl(&dp6m_ptr->i960r.info[1]);
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(0, &dp6m_ptr->i960r.sema1_reg);
+ } else {
+ TRACE2(("gdth_interrupt() unknown controller type\n"));
+ return;
+ }
+
+ TRACE(("gdth_interrupt() index %d stat %d info %ld\n",
+ IStatus,CmdStatus,InfoBytes));
+ ha->status = CmdStatus;
+ ha->info = InfoBytes;
+ ha->info2 = InfoBytes2;
+
+ if (gdth_from_wait) {
+ wait_hanum = hanum;
+ wait_index = (int)IStatus;
+ }
+
+ if (IStatus == ASYNCINDEX) {
+ TRACE2(("gdth_interrupt() async. event\n"));
+ gdth_async_event(hanum,Service);
+ } else {
+ if (IStatus == SPEZINDEX) {
+ TRACE2(("Service unknown or not initialized !\n"));
+ dvr.size = sizeof(dvr.eu.driver);
+ dvr.eu.driver.ionode = hanum;
+ gdth_store_event(ES_DRIVER, 4, &dvr);
+ return;
+ }
+ scp = gdth_cmd_tab[IStatus-2][hanum].cmnd;
+ Service = gdth_cmd_tab[IStatus-2][hanum].service;
+ gdth_cmd_tab[IStatus-2][hanum].cmnd = UNUSED_CMND;
+ if (scp == UNUSED_CMND) {
+ TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus));
+ dvr.size = sizeof(dvr.eu.driver);
+ dvr.eu.driver.ionode = hanum;
+ dvr.eu.driver.index = IStatus;
+ gdth_store_event(ES_DRIVER, 1, &dvr);
+ return;
+ }
+ if (scp == INTERNAL_CMND) {
+ TRACE(("gdth_interrupt() answer to internal command\n"));
+ return;
+ }
+ TRACE(("gdth_interrupt() sync. status\n"));
+ gdth_sync_event(hanum,Service,IStatus,scp);
+ }
+ gdth_next(hanum);
+}
+
+static int gdth_sync_event(int hanum,int service,unchar index,Scsi_Cmnd *scp)
+{
+ register gdth_ha_str *ha;
+ gdth_msg_str *msg;
+ gdth_cmd_str *cmdp;
+ char c='\r';
+ ushort i;
+ gdth_evt_data dvr;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cmdp = ha->pccb;
+ TRACE(("gdth_sync_event() scp %lx serv %d status %d\n",
+ (ulong)scp,service,ha->status));
+
+ if (service == SCREENSERVICE) {
+ msg = (gdth_msg_str *)ha->pscratch;
+ TRACE(("len: %ld, answer: %d, ext: %d, alen: %ld\n",
+ msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen));
+ if (msg->msg_len)
+ if (!(msg->msg_answer && msg->msg_ext)) {
+ msg->msg_text[msg->msg_len] = '\0';
+ printk("%s",msg->msg_text);
+ }
+
+ if (msg->msg_ext && !msg->msg_answer) {
+ while (gdth_test_busy(hanum))
+ gdth_delay(0);
+ cmdp->Service = SCREENSERVICE;
+ cmdp->RequestBuffer = SCREEN_CMND;
+ gdth_get_cmd_index(hanum);
+ gdth_set_sema0(hanum);
+ cmdp->OpCode = GDT_READ;
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.screen.reserved = 0;
+ cmdp->u.screen.msg_handle= msg->msg_handle;
+ cmdp->u.screen.msg_addr = (ulong)msg;
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.msg_addr)
+ + sizeof(ulong);
+ ha->cmd_cnt = 0;
+ gdth_copy_command(hanum);
+ gdth_release_event(hanum);
+ return 1;
+ }
+
+ if (msg->msg_answer && msg->msg_alen) {
+ for (i=0; i<msg->msg_alen && i<MSGLEN; ++i) {
+ /* getchar() ?? */
+ /* .. */
+ if (c == '\r')
+ break;
+ msg->msg_text[i] = c;
+ }
+ msg->msg_alen -= i;
+ if (c!='\r' && msg->msg_alen!=0) {
+ msg->msg_answer = 1;
+ msg->msg_ext = 1;
+ } else {
+ msg->msg_ext = 0;
+ msg->msg_answer = 0;
+ }
+ msg->msg_len = i;
+ while (gdth_test_busy(hanum))
+ gdth_delay(0);
+ cmdp->Service = SCREENSERVICE;
+ cmdp->RequestBuffer = SCREEN_CMND;
+ gdth_get_cmd_index(hanum);
+ gdth_set_sema0(hanum);
+ cmdp->OpCode = GDT_WRITE;
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.screen.reserved = 0;
+ cmdp->u.screen.msg_handle= msg->msg_handle;
+ cmdp->u.screen.msg_addr = (ulong)msg;
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.msg_addr)
+ + sizeof(ulong);
+ ha->cmd_cnt = 0;
+ gdth_copy_command(hanum);
+ gdth_release_event(hanum);
+ return 1;
+ }
+ printk("\n");
+
+ } else {
+ scp->SCp.Message = (int)ha->status;
+ /* cache or raw service */
+ if (ha->status == S_OK) {
+ scp->result = DID_OK << 16;
+ } else if (ha->status == S_BSY) {
+ TRACE2(("Controller busy -> retry !\n"));
+ gdth_putq(hanum,scp,scp->SCp.this_residual);
+ return 1;
+ } else {
+ if (service == CACHESERVICE) {
+ memset((char*)scp->sense_buffer,0,16);
+ scp->sense_buffer[0] = 0x70;
+ scp->sense_buffer[2] = NOT_READY;
+ scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+
+ if (scp->done != gdth_scsi_done) {
+ dvr.size = sizeof(dvr.eu.sync);
+ dvr.eu.sync.ionode = hanum;
+ dvr.eu.sync.service = service;
+ dvr.eu.sync.status = ha->status;
+ dvr.eu.sync.info = ha->info;
+ dvr.eu.sync.hostdrive =
+#if LINUX_VERSION_CODE >= 0x020000
+ ha->id[scp->channel][scp->target].hostdrive;
+#else
+ ha->id[NUMDATA(scp->host)->busnum][scp->target].hostdrive;
+#endif
+ if (ha->status >= 0x8000)
+ gdth_store_event(ES_SYNC, 0, &dvr);
+ else
+ gdth_store_event(ES_SYNC, service, &dvr);
+ }
+ } else {
+ if (ha->status!=S_RAW_SCSI || ha->status==S_RAW_ILL || ha->info>=0x100) {
+ scp->result = DID_BAD_TARGET << 16;
+ } else {
+ scp->result = (DID_OK << 16) | ha->info;
+ }
+ }
+ }
+ if (!scp->SCp.have_data_in)
+ scp->SCp.have_data_in++;
+ else
+ scp->scsi_done(scp);
+ }
+
+ return 1;
+}
+
+static char *async_cache_tab[] = {
+/* 0*/ "\011\000\002\002\002\004\002\006\004"
+ "GDT HA %u, service %u, async. status %u/%lu unknown",
+/* 1*/ "\011\000\002\002\002\004\002\006\004"
+ "GDT HA %u, service %u, async. status %u/%lu unknown",
+/* 2*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu not ready",
+/* 3*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
+/* 4*/ "\005\000\002\006\004"
+ "GDT HA %u, mirror update on Host Drive %lu failed",
+/* 5*/ "\005\000\002\006\004"
+ "GDT HA %u, Mirror Drive %lu failed",
+/* 6*/ "\005\000\002\006\004"
+ "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
+/* 7*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu write protected",
+/* 8*/ "\005\000\002\006\004"
+ "GDT HA %u, media changed in Host Drive %lu",
+/* 9*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu is offline",
+/*10*/ "\005\000\002\006\004"
+ "GDT HA %u, media change of Mirror Drive %lu",
+/*11*/ "\005\000\002\006\004"
+ "GDT HA %u, Mirror Drive %lu is write protected",
+/*12*/ "\005\000\002\006\004"
+ "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!",
+/*13*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Array Drive %u: Cache Drive %u failed",
+/*14*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: FAIL state entered",
+/*15*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: error",
+/*16*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u",
+/*17*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: parity build failed",
+/*18*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive rebuild failed",
+/*19*/ "\007\000\002\010\002"
+ "GDT HA %u, Test of Hot Fix %u failed",
+/*20*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive build finished successfully",
+/*21*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive rebuild finished successfully",
+/*22*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Array Drive %u: Hot Fix %u activated",
+/*23*/ "\005\000\002\006\002"
+ "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error",
+/*24*/ "\005\000\002\010\002"
+ "GDT HA %u, mirror update on Cache Drive %u completed",
+/*25*/ "\005\000\002\010\002"
+ "GDT HA %u, mirror update on Cache Drive %lu failed",
+/*26*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive rebuild started",
+/*27*/ "\005\000\002\012\001"
+ "GDT HA %u, Fault bus %u: SHELF OK detected",
+/*28*/ "\005\000\002\012\001"
+ "GDT HA %u, Fault bus %u: SHELF not OK detected",
+/*29*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started",
+/*30*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: new disk detected",
+/*31*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: old disk detected",
+/*32*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is illegal",
+/*33*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: illegal device detected",
+/*34*/ "\011\000\002\012\001\013\001\006\004"
+ "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)",
+/*35*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: disk write protected",
+/*36*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: disk not available",
+/*37*/ "\007\000\002\012\001\006\004"
+ "GDT HA %u, Fault bus %u: swap detected (%lu)",
+/*38*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully",
+/*39*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug",
+/*40*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted",
+/*41*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started",
+/*42*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive build started",
+/*43*/ "\003\000\002"
+ "GDT HA %u, DRAM parity error detected",
+/*44*/ "\005\000\002\006\002"
+ "GDT HA %u, Mirror Drive %u: update started",
+/*45*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Mirror Drive %u: Hot Fix %u activated",
+/*46*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available",
+/*47*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available",
+/*48*/ "\005\000\002\006\002"
+ "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available",
+/*49*/ "\005\000\002\006\002"
+ "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available",
+/*50*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received",
+/*51*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand started",
+/*52*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand finished successfully",
+/*53*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand failed",
+/*54*/ "\003\000\002"
+ "GDT HA %u, CPU temperature critical",
+/*55*/ "\003\000\002"
+ "GDT HA %u, CPU temperature OK",
+/*56*/ "\005\000\002\006\004"
+ "GDT HA %u, Host drive %lu created",
+/*57*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand restarted",
+/*58*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand stopped",
+};
+
+
+static int gdth_async_event(int hanum,int service)
+{
+ gdth_stackframe stack;
+ gdth_evt_data dvr;
+ char *f = NULL;
+ int i,j;
+ gdth_ha_str *ha;
+ gdth_msg_str *msg;
+ gdth_cmd_str *cmdp;
+ int cmd_index;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cmdp= ha->pccb;
+ msg = (gdth_msg_str *)ha->pscratch;
+ TRACE2(("gdth_async_event() ha %d serv %d\n",
+ hanum,service));
+
+ if (service == SCREENSERVICE) {
+ if (ha->status == MSG_REQUEST) {
+ while (gdth_test_busy(hanum))
+ gdth_delay(0);
+ cmdp->Service = SCREENSERVICE;
+ cmdp->RequestBuffer = SCREEN_CMND;
+ cmd_index = gdth_get_cmd_index(hanum);
+ gdth_set_sema0(hanum);
+ cmdp->OpCode = GDT_READ;
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.screen.reserved = 0;
+ cmdp->u.screen.msg_handle= MSG_INV_HANDLE;
+ cmdp->u.screen.msg_addr = (ulong)msg;
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.msg_addr)
+ + sizeof(ulong);
+ ha->cmd_cnt = 0;
+ gdth_copy_command(hanum);
+ if (ha->type == GDT_EISA)
+ printk("[EISA slot %d] ",(ushort)ha->brd_phys);
+ else if (ha->type == GDT_ISA)
+ printk("[DPMEM 0x%4X] ",(ushort)ha->brd_phys);
+ else
+ printk("[PCI %d/%d] ",(ushort)(ha->brd_phys>>8),
+ (ushort)((ha->brd_phys>>3)&0x1f));
+ gdth_release_event(hanum);
+ }
+
+ } else {
+ dvr.size = sizeof(dvr.eu.async);
+ dvr.eu.async.ionode = hanum;
+ dvr.eu.async.service = service;
+ dvr.eu.async.status = ha->status;
+ dvr.eu.async.info = ha->info;
+ *(ulong *)dvr.eu.async.scsi_coord = ha->info2;
+ gdth_store_event(ES_ASYNC, service, &dvr);
+
+ if (service==CACHESERVICE && INDEX_OK(ha->status,async_cache_tab)) {
+ TRACE2(("GDT: Async. event cache service, event no.: %d\n",
+ ha->status));
+
+ f = async_cache_tab[ha->status];
+
+ /* i: parameter to push, j: stack element to fill */
+ for (j=0,i=1; i < f[0]; i+=2) {
+ switch (f[i+1]) {
+ case 4:
+ stack.b[j++] = *(ulong*)&dvr.eu.stream[(int)f[i]];
+ break;
+ case 2:
+ stack.b[j++] = *(ushort*)&dvr.eu.stream[(int)f[i]];
+ break;
+ case 1:
+ stack.b[j++] = *(unchar*)&dvr.eu.stream[(int)f[i]];
+ break;
+ default:
+ break;
+ }
+ }
+
+ printk(&f[f[0]],stack); printk("\n");
+
+ } else {
+ printk("GDT: Unknown async. event service %d event no. %d\n",
+ service,ha->status);
+ }
+ }
+ return 1;
+}
+
+
+#ifdef GDTH_STATISTICS
+void gdth_timeout(ulong data)
+{
+ ulong flags,i;
+ Scsi_Cmnd *nscp;
+ gdth_ha_str *ha;
+ int hanum = 0;
+
+ save_flags(flags);
+ cli();
+
+ for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i)
+ if (gdth_cmd_tab[i][hanum].cmnd != UNUSED_CMND)
+ ++act_stats;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ ++act_rq;
+
+ TRACE2(("gdth_to(): ints %ld, ios %ld, act_stats %ld, act_rq %ld\n",
+ act_ints, act_ios, act_stats, act_rq));
+ act_ints = act_ios = 0;
+
+ gdth_timer.expires = jiffies + 30 * HZ;
+ add_timer(&gdth_timer);
+ restore_flags(flags);
+}
+#endif
+
+
+__initfunc (int gdth_detect(Scsi_Host_Template *shtp))
+{
+ struct Scsi_Host *shp;
+ gdth_ha_str *ha;
+ unsigned long flags;
+ ulong isa_bios;
+ ushort eisa_slot,device_id,index;
+ gdth_pci_str pcistr;
+ int i,j,hanum;
+#if LINUX_VERSION_CODE < 0x020000
+ unchar b;
+#endif
+
+#ifdef DEBUG_GDTH
+ printk("GDT: This driver contains debugging information !! Trace level = %d\n",
+ DebugState);
+ printk(" Destination of debugging information: ");
+#ifdef __SERIAL__
+#ifdef __COM2__
+ printk("Serial port COM2\n");
+#else
+ printk("Serial port COM1\n");
+#endif
+#else
+ printk("Console\n");
+#endif
+ gdth_delay(3000);
+#endif
+
+ TRACE(("gdth_detect()\n"));
+
+ if (disable_gdth_scan) {
+ printk("GDT: Controller driver disabled from command line !\n");
+ return 0;
+ }
+
+ /* initializations */
+ gdth_polling = TRUE;
+ for (i=0; i<GDTH_MAXCMDS; ++i)
+ for (j=0; j<MAXHA; ++j)
+ gdth_cmd_tab[i][j].cmnd = UNUSED_CMND;
+ for (i=0; i<4; ++i)
+ for (j=0; j<MAXHA; ++j)
+ gdth_ioctl_tab[i][j] = NULL;
+ gdth_clear_events();
+
+ /* scanning for controllers, at first: ISA controller */
+ for (isa_bios=0xc8000UL; isa_bios<=0xd8000UL; isa_bios+=0x8000UL) {
+ if (gdth_search_isa(isa_bios)) { /* controller found */
+ shp = scsi_register(shtp,sizeof(gdth_ext_str));
+ ha = HADATA(shp);
+ if (!gdth_init_isa(isa_bios,ha)) {
+ scsi_unregister(shp);
+ continue;
+ }
+ /* controller found and initialized */
+ printk("Configuring GDT-ISA HA at BIOS 0x%05lX IRQ %u DRQ %u\n",
+ isa_bios,ha->irq,ha->drq);
+
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x02015F
+ if (request_irq(ha->irq,do_gdth_interrupt,SA_INTERRUPT,"gdth",NULL))
+#elif LINUX_VERSION_CODE >= 0x010346
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT,"gdth",NULL))
+#else
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT,"gdth"))
+#endif
+ {
+ printk("GDT-ISA: Unable to allocate IRQ\n");
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+ if (request_dma(ha->drq,"gdth")) {
+ printk("GDT-ISA: Unable to allocate DMA channel\n");
+#if LINUX_VERSION_CODE >= 0x010346
+ free_irq(ha->irq,NULL);
+#else
+ free_irq(ha->irq);
+#endif
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+ set_dma_mode(ha->drq,DMA_MODE_CASCADE);
+ enable_dma(ha->drq);
+ shp->unchecked_isa_dma = 1;
+ shp->irq = ha->irq;
+ shp->dma_channel = ha->drq;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[0][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ hanum = gdth_ctr_count;
+ gdth_ctr_tab[gdth_ctr_count++] = shp;
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum= 0;
+
+ ha->pccb = CMDDATA(shp);
+ ha->pscratch = DMADATA(shp);
+ ha->req_first = NULL;
+ for (i=0; i<MAXBUS; ++i) {
+ for (j=0; j<MAXID; ++j) {
+ ha->id[i][j].type = EMPTY_DTYP;
+ ha->id[i][j].lock = 0;
+ ha->id[i][j].heads = 0;
+ }
+ }
+ restore_flags(flags);
+
+ if (!gdth_search_drives(hanum)) {
+ printk("GDT-ISA: Error during device scan\n");
+ --gdth_ctr_count;
+ --gdth_ctr_vcount;
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x010346
+ free_irq(ha->irq,NULL);
+#else
+ free_irq(ha->irq);
+#endif
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+
+#if LINUX_VERSION_CODE >= 0x020000
+ shp->max_id = 8;
+ shp->max_lun = MAXLUN;
+ shp->max_channel = ha->bus_cnt - 1;
+#else
+ /* register addit. SCSI channels as virtual controllers */
+ for (b=1; b<ha->bus_cnt; ++b) {
+ shp = scsi_register(shtp,sizeof(gdth_num_str));
+ shp->unchecked_isa_dma = 1;
+ shp->irq = ha->irq;
+ shp->dma_channel = ha->drq;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[b][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum = b;
+ }
+#endif
+
+ gdth_enable_int(hanum);
+ }
+ }
+
+ /* scanning for EISA controllers */
+ for (eisa_slot=0x1000; eisa_slot<=0x8000; eisa_slot+=0x1000) {
+ if (gdth_search_eisa(eisa_slot)) { /* controller found */
+ shp = scsi_register(shtp,sizeof(gdth_ext_str));
+ ha = HADATA(shp);
+ if (!gdth_init_eisa(eisa_slot,ha)) {
+ scsi_unregister(shp);
+ continue;
+ }
+ /* controller found and initialized */
+ printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
+ eisa_slot>>12,ha->irq);
+
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x02015F
+ if (request_irq(ha->irq,do_gdth_interrupt,SA_INTERRUPT,"gdth",NULL))
+#elif LINUX_VERSION_CODE >= 0x010346
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT,"gdth",NULL))
+#else
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT,"gdth"))
+#endif
+ {
+ printk("GDT-EISA: Unable to allocate IRQ\n");
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+ shp->unchecked_isa_dma = 0;
+ shp->irq = ha->irq;
+ shp->dma_channel = 0xff;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[0][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ hanum = gdth_ctr_count;
+ gdth_ctr_tab[gdth_ctr_count++] = shp;
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum= 0;
+ TRACE2(("EISA detect Bus 0: shp %lx hanum %d\n",
+ (ulong)shp,NUMDATA(shp)->hanum));
+
+ ha->pccb = CMDDATA(shp);
+ ha->pscratch = DMADATA(shp);
+ ha->req_first = NULL;
+ for (i=0; i<MAXBUS; ++i) {
+ for (j=0; j<MAXID; ++j) {
+ ha->id[i][j].type = EMPTY_DTYP;
+ ha->id[i][j].lock = 0;
+ ha->id[i][j].heads = 0;
+ }
+ }
+ restore_flags(flags);
+
+ if (!gdth_search_drives(hanum)) {
+ printk("GDT-EISA: Error during device scan\n");
+ --gdth_ctr_count;
+ --gdth_ctr_vcount;
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x010346
+ free_irq(ha->irq,NULL);
+#else
+ free_irq(ha->irq);
+#endif
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+
+#if LINUX_VERSION_CODE >= 0x020000
+ shp->max_id = 8;
+ shp->max_lun = MAXLUN;
+ shp->max_channel = ha->bus_cnt - 1;
+#else
+ /* register addit. SCSI channels as virtual controllers */
+ for (b=1; b<ha->bus_cnt; ++b) {
+ shp = scsi_register(shtp,sizeof(gdth_num_str));
+ shp->unchecked_isa_dma = 0;
+ shp->irq = ha->irq;
+ shp->dma_channel = 0xff;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[b][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum = b;
+ TRACE2(("EISA detect Bus %d: shp %lx hanum %d\n",
+ NUMDATA(shp)->busnum,(ulong)shp,
+ NUMDATA(shp)->hanum));
+ }
+#endif
+
+ gdth_enable_int(hanum);
+ }
+ }
+
+ /* scanning for PCI controllers */
+ for (device_id = 0; device_id <= PCI_DEVICE_ID_VORTEX_GDTMAXRP; ++device_id) {
+ if (device_id > PCI_DEVICE_ID_VORTEX_GDT6555 &&
+ device_id < PCI_DEVICE_ID_VORTEX_GDT6x17RP)
+ continue;
+ for (index = 0; ; ++index) {
+ if (!gdth_search_pci(device_id,index,&pcistr))
+ break; /* next device_id */
+ shp = scsi_register(shtp,sizeof(gdth_ext_str));
+ ha = HADATA(shp);
+ if (!gdth_init_pci(&pcistr,ha)) {
+ scsi_unregister(shp);
+ continue;
+ }
+ /* controller found and initialized */
+ printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
+ pcistr.bus,pcistr.device_fn>>3,ha->irq);
+
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x02015F
+ if (request_irq(ha->irq,do_gdth_interrupt,SA_INTERRUPT|SA_SHIRQ,"gdth",NULL))
+#elif LINUX_VERSION_CODE >= 0x010346
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT|SA_SHIRQ,"gdth",NULL))
+#else
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT|SA_SHIRQ,"gdth"))
+#endif
+ {
+ printk("GDT-PCI: Unable to allocate IRQ\n");
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+ shp->unchecked_isa_dma = 0;
+ shp->irq = ha->irq;
+ shp->dma_channel = 0xff;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[0][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ hanum = gdth_ctr_count;
+ gdth_ctr_tab[gdth_ctr_count++] = shp;
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum= 0;
+
+ ha->pccb = CMDDATA(shp);
+ ha->pscratch = DMADATA(shp);
+ ha->req_first = NULL;
+ for (i=0; i<MAXBUS; ++i) {
+ for (j=0; j<MAXID; ++j) {
+ ha->id[i][j].type = EMPTY_DTYP;
+ ha->id[i][j].lock = 0;
+ ha->id[i][j].heads = 0;
+ }
+ }
+ restore_flags(flags);
+
+ if (!gdth_search_drives(hanum)) {
+ printk("GDT-PCI: Error during device scan\n");
+ --gdth_ctr_count;
+ --gdth_ctr_vcount;
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x010346
+ free_irq(ha->irq,NULL);
+#else
+ free_irq(ha->irq);
+#endif
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+
+#if LINUX_VERSION_CODE >= 0x020000
+ shp->max_id = MAXID;
+ shp->max_lun = MAXLUN;
+ shp->max_channel = ha->bus_cnt - 1;
+#else
+ /* register addit. SCSI channels as virtual controllers */
+ for (b=1; b<ha->bus_cnt; ++b) {
+ shp = scsi_register(shtp,sizeof(gdth_num_str));
+ shp->unchecked_isa_dma = 0;
+ shp->irq = ha->irq;
+ shp->dma_channel = 0xff;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[b][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum = b;
+ }
+#endif
+
+ gdth_enable_int(hanum);
+ }
+ }
+
+ TRACE2(("gdth_detect() %d controller detected\n",gdth_ctr_count));
+ if (gdth_ctr_count > 0) {
+#ifdef GDTH_STATISTICS
+ TRACE2(("gdth_detect(): Initializing timer !\n"));
+ init_timer(&gdth_timer);
+ gdth_timer.expires = jiffies + HZ;
+ gdth_timer.data = 0L;
+ gdth_timer.function = gdth_timeout;
+ add_timer(&gdth_timer);
+#endif
+#if LINUX_VERSION_CODE >= 0x020100
+ register_reboot_notifier(&gdth_notifier);
+#endif
+ }
+ gdth_polling = FALSE;
+ return gdth_ctr_vcount;
+}
+
+
+int gdth_release(struct Scsi_Host *shp)
+{
+ unsigned long flags;
+
+ TRACE2(("gdth_release()\n"));
+
+ if (NUMDATA(shp)->busnum == 0) {
+ gdth_flush(NUMDATA(shp)->hanum);
+
+ save_flags(flags);
+ cli();
+ if (shp->irq) {
+#if LINUX_VERSION_CODE >= 0x010346
+ free_irq(shp->irq,NULL);
+#else
+ free_irq(shp->irq);
+#endif
+ }
+ if (shp->dma_channel != 0xff) {
+ free_dma(shp->dma_channel);
+ }
+ restore_flags(flags);
+ gdth_ctr_released++;
+ TRACE2(("gdth_release(): HA %d of %d\n",
+ gdth_ctr_released, gdth_ctr_count));
+
+ if (gdth_ctr_released == gdth_ctr_count) {
+#ifdef GDTH_STATISTICS
+ del_timer(&gdth_timer);
+#endif
+#if LINUX_VERSION_CODE >= 0x020100
+ unregister_reboot_notifier(&gdth_notifier);
+#endif
+ }
+ }
+
+ scsi_unregister(shp);
+ return 0;
+}
+
+
+static const char *gdth_ctr_name(int hanum)
+{
+ gdth_ha_str *ha;
+
+ TRACE2(("gdth_ctr_name()\n"));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ if (ha->type == GDT_EISA) {
+ switch (ha->stype) {
+ case GDT3_ID:
+ return("GDT3000/3020");
+ case GDT3A_ID:
+ return("GDT3000A/3020A/3050A");
+ case GDT3B_ID:
+ return("GDT3000B/3010A");
+ }
+ } else if (ha->type == GDT_ISA) {
+ return("GDT2000/2020");
+ } else if (ha->type == GDT_PCI) {
+ switch (ha->stype) {
+ case PCI_DEVICE_ID_VORTEX_GDT60x0:
+ return("GDT6000/6020/6050");
+ case PCI_DEVICE_ID_VORTEX_GDT6000B:
+ return("GDT6000B/6010");
+ }
+ }
+ /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */
+
+ return("");
+}
+
+const char *gdth_info(struct Scsi_Host *shp)
+{
+ int hanum;
+ gdth_ha_str *ha;
+
+ TRACE2(("gdth_info()\n"));
+ hanum = NUMDATA(shp)->hanum;
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ return ((const char *)ha->ctr_name);
+}
+
+/* old error handling */
+int gdth_abort(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_abort() reason %d\n",scp->abort_reason));
+ return SCSI_ABORT_SNOOZE;
+}
+
+#if LINUX_VERSION_CODE >= 0x010346
+int gdth_reset(Scsi_Cmnd *scp, unsigned int reset_flags)
+#else
+int gdth_reset(Scsi_Cmnd *scp)
+#endif
+{
+ TRACE2(("gdth_reset()\n"));
+ return SCSI_RESET_PUNT;
+}
+
+#if LINUX_VERSION_CODE >= 0x02015F
+/* new error handling */
+int gdth_eh_abort(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_eh_abort()\n"));
+ return FAILED;
+}
+
+int gdth_eh_device_reset(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_eh_device_reset()\n"));
+ return FAILED;
+}
+
+int gdth_eh_bus_reset(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_eh_bus_reset()\n"));
+ return FAILED;
+}
+
+int gdth_eh_host_reset(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_eh_host_reset()\n"));
+ return FAILED;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= 0x010300
+int gdth_bios_param(Disk *disk,kdev_t dev,int *ip)
+#else
+int gdth_bios_param(Disk *disk,int dev,int *ip)
+#endif
+{
+ unchar b, t;
+ int hanum;
+ gdth_ha_str *ha;
+ int drv_hds, drv_secs;
+
+ hanum = NUMDATA(disk->device->host)->hanum;
+ b = disk->device->channel;
+ t = disk->device->id;
+ TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", hanum, b, t));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ if (ha->id[b][t].heads == 0) {
+ /* raw device: evaluate mapping (sectors per head, heads per cylinder) */
+ if (disk->capacity /HEADS/SECS <= MAXCYLS) {
+ drv_hds = HEADS;
+ drv_secs= SECS;
+ } else if (disk->capacity /MEDHEADS/MEDSECS <= MAXCYLS) {
+ drv_hds = MEDHEADS;
+ drv_secs= MEDSECS;
+ } else {
+ drv_hds = BIGHEADS;
+ drv_secs= BIGSECS;
+ }
+ ha->id[b][t].heads = drv_hds;
+ ha->id[b][t].secs = drv_secs;
+ TRACE2(("gdth_bios_param(): raw device -> params evaluated\n"));
+ }
+
+ ip[0] = ha->id[b][t].heads;
+ ip[1] = ha->id[b][t].secs;
+ ip[2] = disk->capacity / ip[0] / ip[1];
+
+ TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n",
+ ip[0],ip[1],ip[2]));
+ return 0;
+}
+
+
+static void internal_done(Scsi_Cmnd *scp)
+{
+ scp->SCp.sent_command++;
+}
+
+int gdth_command(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_command()\n"));
+
+ scp->SCp.sent_command = 0;
+ gdth_queuecommand(scp,internal_done);
+
+ while (!scp->SCp.sent_command)
+ barrier();
+ return scp->result;
+}
+
+
+int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *))
+{
+ int hanum;
+ int priority;
+
+ TRACE(("gdth_queuecommand() cmd 0x%x id %d lun %d\n",
+ scp->cmnd[0],scp->target,scp->lun));
+
+ scp->scsi_done = (void *)done;
+ scp->SCp.have_data_in = 1;
+ hanum = NUMDATA(scp->host)->hanum;
+#ifdef GDTH_STATISTICS
+ ++act_ios;
+#endif
+
+ priority = DEFAULT_PRI;
+#if LINUX_VERSION_CODE >= 0x010300
+ if (scp->done == gdth_scsi_done)
+ priority = scp->SCp.this_residual;
+#endif
+ gdth_putq( hanum, scp, priority );
+ gdth_next( hanum );
+ return 0;
+}
+
+/* flush routine */
+static void gdth_flush(int hanum)
+{
+ int i, j;
+ gdth_ha_str *ha;
+ Scsi_Cmnd scp;
+ Scsi_Device sdev;
+ gdth_cmd_str gdtcmd;
+ char cmnd[12];
+
+ TRACE2(("gdth_flush() hanum %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ memset(&sdev,0,sizeof(Scsi_Device));
+ memset(&scp, 0,sizeof(Scsi_Cmnd));
+ sdev.host = gdth_ctr_tab[hanum];
+ sdev.id = sdev.host->this_id;
+ scp.cmd_len = 12;
+ scp.host = gdth_ctr_tab[hanum];
+ scp.target = sdev.host->this_id;
+ scp.device = &sdev;
+ scp.use_sg = 0;
+
+ for (i = 0; i < MAXBUS; ++i) {
+ for (j = 0; j < MAXID; ++j) {
+ if (ha->id[i][j].type == CACHE_DTYP) {
+ gdtcmd.BoardNode = LOCALBOARD;
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_FLUSH;
+ gdtcmd.u.cache.DeviceNo = ha->id[i][j].hostdrive;
+ gdtcmd.u.cache.BlockNo = 1;
+ gdtcmd.u.cache.sg_canz = 0;
+ TRACE2(("gdth_flush(): flush ha %d drive %d\n",
+ hanum, ha->id[i][j].hostdrive));
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scp.request.rq_status = RQ_SCSI_BUSY;
+ scp.request.sem = &sem;
+ scp.SCp.this_residual = IOCTL_PRI;
+ scsi_do_cmd(&scp, cmnd, &gdtcmd,
+ sizeof(gdth_cmd_str), gdth_scsi_done,
+ 30*HZ, 1);
+ down(&sem);
+ }
+ }
+ }
+ }
+}
+
+/* shutdown routine */
+#if LINUX_VERSION_CODE >= 0x020100
+static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
+#else
+void gdth_halt(void)
+#endif
+{
+ int hanum;
+ Scsi_Cmnd scp;
+ Scsi_Device sdev;
+ gdth_cmd_str gdtcmd;
+ char cmnd[12];
+
+#if LINUX_VERSION_CODE >= 0x020100
+ TRACE2(("gdth_halt() event %d\n",event));
+ if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
+ return NOTIFY_DONE;
+#else
+ TRACE2(("gdth_halt()\n"));
+ if (halt_called) {
+ TRACE2(("already called\n"));
+ return;
+ }
+ halt_called = TRUE;
+#endif
+ printk("GDT: Flushing all host drives .. ");
+ for (hanum = 0; hanum < gdth_ctr_count; ++hanum) {
+ gdth_flush(hanum);
+
+ /* controller reset */
+ memset(&sdev,0,sizeof(Scsi_Device));
+ memset(&scp, 0,sizeof(Scsi_Cmnd));
+ sdev.host = gdth_ctr_tab[hanum];
+ sdev.id = sdev.host->this_id;
+ scp.cmd_len = 12;
+ scp.host = gdth_ctr_tab[hanum];
+ scp.target = sdev.host->this_id;
+ scp.device = &sdev;
+ scp.use_sg = 0;
+
+ gdtcmd.BoardNode = LOCALBOARD;
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_RESET;
+ TRACE2(("gdth_halt(): reset controller %d\n", hanum));
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scp.request.rq_status = RQ_SCSI_BUSY;
+ scp.request.sem = &sem;
+ scp.SCp.this_residual = IOCTL_PRI;
+ scsi_do_cmd(&scp, cmnd, &gdtcmd,
+ sizeof(gdth_cmd_str), gdth_scsi_done,
+ 10*HZ, 1);
+ down(&sem);
+ }
+ }
+ printk("Done.\n");
+
+#ifdef GDTH_STATISTICS
+ del_timer(&gdth_timer);
+#endif
+#if LINUX_VERSION_CODE >= 0x020100
+ unregister_reboot_notifier(&gdth_notifier);
+ return NOTIFY_OK;
+#endif
+}
+
+
+/* called from init/main.c */
+__initfunc (void gdth_setup(char *str,int *ints))
+{
+ static size_t setup_idx = 0;
+
+ TRACE2(("gdth_setup() str %s ints[0] %d ints[1] %d\n",
+ str ? str:"NULL", ints[0],
+ ints[0] ? ints[1]:0));
+
+ if (setup_idx >= MAXHA) {
+ printk("GDT: gdth_setup() called too many times. Bad LILO params ?\n");
+ return;
+ }
+ if (ints[0] != 1) {
+ printk("GDT: Illegal command line !\n");
+ printk("Usage: gdth=<IRQ>\n");
+ printk("Where: <IRQ>: valid EISA controller IRQ (10,11,12,14)\n");
+ printk(" or 0 to disable controller driver\n");
+ return;
+ }
+ if (ints[1] == 10 || ints[1] == 11 || ints[1] == 12 || ints[1] == 14) {
+ irqs[setup_idx++] = ints[1];
+ irqs[setup_idx] = 0xff;
+ return;
+ }
+ if (ints[1] == 0) {
+ disable_gdth_scan = TRUE;
+ return;
+ }
+ printk("GDT: Invalid IRQ (%d) specified\n",ints[1]);
+}
+
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = GDTH;
+#include "scsi_module.c"
+#endif
+
diff --git a/linux/src/drivers/scsi/gdth.h b/linux/src/drivers/scsi/gdth.h
new file mode 100644
index 0000000..6eafd1f
--- /dev/null
+++ b/linux/src/drivers/scsi/gdth.h
@@ -0,0 +1,819 @@
+#ifndef _GDTH_H
+#define _GDTH_H
+
+/*
+ * Header file for the GDT ISA/EISA/PCI Disk Array Controller driver for Linux
+ *
+ * gdth.h Copyright (C) 1995-98 ICP vortex Computersysteme GmbH, Achim Leubner
+ * See gdth.c for further informations and
+ * below for supported controller types
+ *
+ * <achim@vortex.de>
+ *
+ * $Id: gdth.h,v 1.1 1999/04/26 05:54:37 tb Exp $
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+
+#ifndef NULL
+#define NULL 0
+#endif
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/* defines, macros */
+
+/* driver version */
+#define GDTH_VERSION_STR "1.07"
+#define GDTH_VERSION 1
+#define GDTH_SUBVERSION 7
+
+/* protocol version */
+#define PROTOCOL_VERSION 1
+
+/* controller classes */
+#define GDT_ISA 0x01 /* ISA controller */
+#define GDT_EISA 0x02 /* EISA controller */
+#define GDT_PCI 0x03 /* PCI controller */
+#define GDT_PCINEW 0x04 /* new PCI controller */
+#define GDT_PCIMPR 0x05 /* PCI MPR controller */
+/* GDT_EISA, controller subtypes EISA */
+#define GDT3_ID 0x0130941c /* GDT3000/3020 */
+#define GDT3A_ID 0x0230941c /* GDT3000A/3020A/3050A */
+#define GDT3B_ID 0x0330941c /* GDT3000B/3010A */
+/* GDT_ISA */
+#define GDT2_ID 0x0120941c /* GDT2000/2020 */
+/* vendor ID, device IDs (PCI) */
+/* these defines should already exist in <linux/pci.h> */
+#ifndef PCI_VENDOR_ID_VORTEX
+#define PCI_VENDOR_ID_VORTEX 0x1119 /* PCI controller vendor ID */
+#endif
+#ifndef PCI_DEVICE_ID_VORTEX_GDT60x0
+/* GDT_PCI */
+#define PCI_DEVICE_ID_VORTEX_GDT60x0 0 /* GDT6000/6020/6050 */
+#define PCI_DEVICE_ID_VORTEX_GDT6000B 1 /* GDT6000B/6010 */
+/* GDT_PCINEW */
+#define PCI_DEVICE_ID_VORTEX_GDT6x10 2 /* GDT6110/6510 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x20 3 /* GDT6120/6520 */
+#define PCI_DEVICE_ID_VORTEX_GDT6530 4 /* GDT6530 */
+#define PCI_DEVICE_ID_VORTEX_GDT6550 5 /* GDT6550 */
+/* GDT_PCINEW, wide/ultra SCSI controllers */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17 6 /* GDT6117/6517 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27 7 /* GDT6127/6527 */
+#define PCI_DEVICE_ID_VORTEX_GDT6537 8 /* GDT6537 */
+#define PCI_DEVICE_ID_VORTEX_GDT6557 9 /* GDT6557/6557-ECC */
+/* GDT_PCINEW, wide SCSI controllers */
+#define PCI_DEVICE_ID_VORTEX_GDT6x15 10 /* GDT6115/6515 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x25 11 /* GDT6125/6525 */
+#define PCI_DEVICE_ID_VORTEX_GDT6535 12 /* GDT6535 */
+#define PCI_DEVICE_ID_VORTEX_GDT6555 13 /* GDT6555/6555-ECC */
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDT6x17RP
+/* GDT_MPR, RP series, wide/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x100 /* GDT6117RP/GDT6517RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x101 /* GDT6127RP/GDT6527RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x102 /* GDT6537RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x103 /* GDT6557RP */
+/* GDT_MPR, RP series, narrow/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x104 /* GDT6111RP/GDT6511RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x105 /* GDT6121RP/GDT6521RP */
+/* GDT_MPR, RP1 series, wide/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP1 0x110 /* GDT6117RP1/GDT6517RP1 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP1 0x111 /* GDT6127RP1/GDT6527RP1 */
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP1 0x112 /* GDT6537RP1 */
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP1 0x113 /* GDT6557RP1 */
+/* GDT_MPR, RP1 series, narrow/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP1 0x114 /* GDT6111RP1/GDT6511RP1 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP1 0x115 /* GDT6121RP1/GDT6521RP1 */
+/* GDT_MPR, RP2 series, wide/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP2 0x120 /* GDT6117RP2/GDT6517RP2 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP2 0x121 /* GDT6127RP2/GDT6527RP2 */
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP2 0x122 /* GDT6537RP2 */
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP2 0x123 /* GDT6557RP2 */
+/* GDT_MPR, RP2 series, narrow/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP2 0x124 /* GDT6111RP2/GDT6511RP2 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP2 0x125 /* GDT6121RP2/GDT6521RP2 */
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDT6519RD
+/* GDT_MPR, Fibre Channel */
+#define PCI_DEVICE_ID_VORTEX_GDT6519RD 0x210 /* GDT6519RD */
+#define PCI_DEVICE_ID_VORTEX_GDT6529RD 0x211 /* GDT6529RD */
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDTMAXRP
+/* GDT_MPR, last device ID */
+#define PCI_DEVICE_ID_VORTEX_GDTMAXRP 0x2ff
+#endif
+
+/* limits */
+#define GDTH_SCRATCH 4096 /* 4KB scratch buffer */
+#define GDTH_MAXCMDS 124
+#define GDTH_MAXC_P_L 16 /* max. cmds per lun */
+#define MAXOFFSETS 128
+#define MAXHA 8
+#define MAXID 16
+#define MAXLUN 8
+#define MAXBUS 6
+#define MAX_HDRIVES 35 /* max. host drive count */
+#define MAX_EVENTS 100 /* event buffer count */
+#define MAXCYLS 1024
+#define HEADS 64
+#define SECS 32 /* mapping 64*32 */
+#define MEDHEADS 127
+#define MEDSECS 63 /* mapping 127*63 */
+#define BIGHEADS 255
+#define BIGSECS 63 /* mapping 255*63 */
+
+/* special command ptr. */
+#define UNUSED_CMND ((Scsi_Cmnd *)-1)
+#define INTERNAL_CMND ((Scsi_Cmnd *)-2)
+#define SCREEN_CMND ((Scsi_Cmnd *)-3)
+#define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND)
+
+/* device types */
+#define EMPTY_DTYP 0
+#define CACHE_DTYP 1
+#define RAW_DTYP 2
+#define SIOP_DTYP 3 /* the SCSI processor */
+
+/* controller services */
+#define SCSIRAWSERVICE 3
+#define CACHESERVICE 9
+#define SCREENSERVICE 11
+
+/* screenservice defines */
+#define MSG_INV_HANDLE -1 /* special message handle */
+#define MSGLEN 16 /* size of message text */
+#define MSG_SIZE 34 /* size of message structure */
+#define MSG_REQUEST 0 /* async. event: message */
+
+/* cacheservice defines */
+#define SECTOR_SIZE 0x200 /* always 512 bytes per sector */
+
+/* DPMEM constants */
+#define DPMEM_MAGIC 0xC0FFEE11
+#define IC_HEADER_BYTES 48
+#define IC_QUEUE_BYTES 4
+#define DPMEM_COMMAND_OFFSET IC_HEADER_BYTES+IC_QUEUE_BYTES*MAXOFFSETS
+
+/* service commands */
+#define GDT_INIT 0 /* service initialization */
+#define GDT_READ 1 /* read command */
+#define GDT_WRITE 2 /* write command */
+#define GDT_INFO 3 /* information about devices */
+#define GDT_FLUSH 4 /* flush dirty cache buffers */
+#define GDT_IOCTL 5 /* ioctl command */
+#define GDT_DEVTYPE 9 /* additional information */
+#define GDT_MOUNT 10 /* mount cache device */
+#define GDT_UNMOUNT 11 /* unmount cache device */
+#define GDT_SET_FEAT 12 /* set feat. (scatter/gather) */
+#define GDT_GET_FEAT 13 /* get features */
+#define GDT_RESERVE 14 /* reserve dev. to raw service */
+#define GDT_WRITE_THR 16 /* write through */
+#define GDT_EXT_INFO 18 /* extended info */
+#define GDT_RESET 19 /* controller reset */
+
+/* IOCTL command defines */
+#define SCSI_CHAN_CNT 5 /* subfunctions */
+#define GET_IOCHAN_DESC 0x5e
+#define L_CTRL_PATTERN 0x20000000L
+#define CACHE_INFO 4
+#define CACHE_CONFIG 5
+#define BOARD_INFO 0x28
+#define IO_CHANNEL 0x00020000L /* channels */
+#define INVALID_CHANNEL 0x0000ffffL
+
+/* IOCTLs */
+#define GDTIOCTL_MASK ('J'<<8)
+#define GDTIOCTL_GENERAL (GDTIOCTL_MASK | 0) /* general IOCTL */
+#define GDTIOCTL_DRVERS (GDTIOCTL_MASK | 1) /* get driver version */
+#define GDTIOCTL_CTRTYPE (GDTIOCTL_MASK | 2) /* get controller type */
+#define GDTIOCTL_CTRCNT (GDTIOCTL_MASK | 5) /* get controller count */
+#define GDTIOCTL_LOCKDRV (GDTIOCTL_MASK | 6) /* lock host drive */
+#define GDTIOCTL_LOCKCHN (GDTIOCTL_MASK | 7) /* lock channel */
+#define GDTIOCTL_EVENT (GDTIOCTL_MASK | 8) /* read controller events */
+
+/* service errors */
+#define S_OK 1 /* no error */
+#define S_BSY 7 /* controller busy */
+#define S_RAW_SCSI 12 /* raw serv.: target error */
+#define S_RAW_ILL 0xff /* raw serv.: illegal */
+
+/* timeout values */
+#define INIT_RETRIES 10000 /* 10000 * 1ms = 10s */
+#define INIT_TIMEOUT 100000 /* 1000 * 1ms = 1s */
+#define POLL_TIMEOUT 10000 /* 10000 * 1ms = 10s */
+
+/* priorities */
+#define DEFAULT_PRI 0x20
+#define IOCTL_PRI 0x10
+
+/* data directions */
+#define DATA_IN 0x01000000L /* data from target */
+#define DATA_OUT 0x00000000L /* data to target */
+
+/* BMIC registers (EISA controllers) */
+#define ID0REG 0x0c80 /* board ID */
+#define EINTENABREG 0x0c89 /* interrupt enable */
+#define SEMA0REG 0x0c8a /* command semaphore */
+#define SEMA1REG 0x0c8b /* status semaphore */
+#define LDOORREG 0x0c8d /* local doorbell */
+#define EDENABREG 0x0c8e /* EISA system doorbell enable */
+#define EDOORREG 0x0c8f /* EISA system doorbell */
+#define MAILBOXREG 0x0c90 /* mailbox reg. (16 bytes) */
+#define EISAREG 0x0cc0 /* EISA configuration */
+
+/* other defines */
+#define LINUX_OS 8 /* used for cache optim. */
+#define SCATTER_GATHER 1 /* s/g feature */
+#define GDTH_MAXSG 32 /* max. s/g elements */
+#define SECS32 0x1f /* round capacity */
+#define BIOS_ID_OFFS 0x10 /* offset contr. ID in ISABIOS */
+#define LOCALBOARD 0 /* board node always 0 */
+#define ASYNCINDEX 0 /* cmd index async. event */
+#define SPEZINDEX 1 /* cmd index unknown service */
+#define GDT_WR_THROUGH 0x100 /* WRITE_THROUGH supported */
+
+/* typedefs */
+
+#pragma pack(1)
+
+typedef struct {
+ char buffer[GDTH_SCRATCH]; /* scratch buffer */
+} gdth_scratch_str;
+
+/* screenservice message */
+typedef struct {
+ ulong msg_handle; /* message handle */
+ ulong msg_len; /* size of message */
+ ulong msg_alen; /* answer length */
+ unchar msg_answer; /* answer flag */
+ unchar msg_ext; /* more messages */
+ unchar msg_reserved[2];
+ char msg_text[MSGLEN+2]; /* the message text */
+} gdth_msg_str;
+
+/* get channel count IOCTL */
+typedef struct {
+ ulong channel_no; /* number of channel */
+ ulong drive_cnt; /* number of drives */
+ unchar siop_id; /* SCSI processor ID */
+ unchar siop_state; /* SCSI processor state */
+} gdth_getch_str;
+
+/* get raw channel count IOCTL (NEW!) */
+typedef struct {
+ ulong version; /* version of information (-1UL: newest) */
+ unchar list_entries; /* list entry count */
+ unchar first_chan; /* first channel number */
+ unchar last_chan; /* last channel number */
+ unchar chan_count; /* (R) channel count */
+ ulong list_offset; /* offset of list[0] */
+ struct {
+ unchar proc_id; /* processor id */
+ unchar proc_defect; /* defect ? */
+ unchar reserved[2];
+ } list[MAXBUS];
+} gdth_iochan_str;
+
+/* cache info/config IOCTL */
+typedef struct {
+ ulong version; /* firmware version */
+ ushort state; /* cache state (on/off) */
+ ushort strategy; /* cache strategy */
+ ushort write_back; /* write back state (on/off) */
+ ushort block_size; /* cache block size */
+} gdth_cpar_str;
+
+typedef struct {
+ ulong csize; /* cache size */
+ ulong read_cnt; /* read/write counter */
+ ulong write_cnt;
+ ulong tr_hits; /* hits */
+ ulong sec_hits;
+ ulong sec_miss; /* misses */
+} gdth_cstat_str;
+
+typedef struct {
+ gdth_cpar_str cpar;
+ gdth_cstat_str cstat;
+} gdth_cinfo_str;
+
+/* board info IOCTL */
+typedef struct {
+ ulong ser_no; /* serial no. */
+ unchar oem_id[2]; /* OEM ID */
+ ushort ep_flags; /* eprom flags */
+ ulong proc_id; /* processor ID */
+ ulong memsize; /* memory size (bytes) */
+ unchar mem_banks; /* memory banks */
+ unchar chan_type; /* channel type */
+ unchar chan_count; /* channel count */
+ unchar rdongle_pres; /* dongle present? */
+ ulong epr_fw_ver; /* (eprom) firmware version */
+ ulong upd_fw_ver; /* (update) firmware version */
+ ulong upd_revision; /* update revision */
+ char type_string[16]; /* controller name */
+ char raid_string[16]; /* RAID firmware name */
+ unchar update_pres; /* update present? */
+ unchar xor_pres; /* XOR engine present? */
+ unchar prom_type; /* ROM type (eprom/flash eprom) */
+ unchar prom_count; /* number of ROM devices */
+ ulong dup_pres; /* duplexing module present? */
+ ulong chan_pres; /* number of expansion channels */
+ ulong mem_pres; /* memory expansion installed? */
+ unchar ft_bus_system; /* fault bus supported? */
+ unchar subtype_valid; /* board_subtype valid? */
+ unchar board_subtype; /* controller subtype/hardware level */
+ unchar ramparity_pres; /* RAM parity check hardware present? */
+} gdth_binfo_str;
+
+/* scatter/gather element */
+typedef struct {
+ ulong sg_ptr; /* address */
+ ulong sg_len; /* length */
+} gdth_sg_str;
+
+/* command structure */
+typedef struct {
+ ulong BoardNode; /* board node (always 0) */
+ ulong CommandIndex; /* command number */
+ ushort OpCode; /* the command (READ,..) */
+ union {
+ struct {
+ ushort DeviceNo; /* number of cache drive */
+ ulong BlockNo; /* block number */
+ ulong BlockCnt; /* block count */
+ ulong DestAddr; /* dest. addr. (if s/g: -1) */
+ ulong sg_canz; /* s/g element count */
+ gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
+ } cache; /* cache service cmd. str. */
+ struct {
+ ushort param_size; /* size of p_param buffer */
+ ulong subfunc; /* IOCTL function */
+ ulong channel; /* device */
+ ulong p_param; /* buffer */
+ } ioctl; /* IOCTL command structure */
+ struct {
+ ushort reserved;
+ ulong msg_handle; /* message handle */
+ ulong msg_addr; /* message buffer address */
+ } screen; /* screen service cmd. str. */
+ struct {
+ ushort reserved;
+ ulong direction; /* data direction */
+ ulong mdisc_time; /* disc. time (0: no timeout)*/
+ ulong mcon_time; /* connect time(0: no to.) */
+ ulong sdata; /* dest. addr. (if s/g: -1) */
+ ulong sdlen; /* data length (bytes) */
+ ulong clen; /* SCSI cmd. length(6,10,12) */
+ unchar cmd[12]; /* SCSI command */
+ unchar target; /* target ID */
+ unchar lun; /* LUN */
+ unchar bus; /* SCSI bus number */
+ unchar priority; /* only 0 used */
+ ulong sense_len; /* sense data length */
+ ulong sense_data; /* sense data addr. */
+ struct raw *link_p; /* linked cmds (not supp.) */
+ ulong sg_ranz; /* s/g element count */
+ gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
+ } raw; /* raw service cmd. struct. */
+ } u;
+ /* additional variables */
+ unchar Service; /* controller service */
+ ushort Status; /* command result */
+ ulong Info; /* additional information */
+ Scsi_Cmnd *RequestBuffer; /* request buffer */
+} gdth_cmd_str;
+
+/* controller event structure */
+#define ES_ASYNC 1
+#define ES_DRIVER 2
+#define ES_TEST 3
+#define ES_SYNC 4
+typedef struct {
+ ushort size; /* size of structure */
+ union {
+ char stream[16];
+ struct {
+ ushort ionode;
+ ushort service;
+ ulong index;
+ } driver;
+ struct {
+ ushort ionode;
+ ushort service;
+ ushort status;
+ ulong info;
+ unchar scsi_coord[3];
+ } async;
+ struct {
+ ushort ionode;
+ ushort service;
+ ushort status;
+ ulong info;
+ ushort hostdrive;
+ unchar scsi_coord[3];
+ unchar sense_key;
+ } sync;
+ struct {
+ ulong l1, l2, l3, l4;
+ } test;
+ } eu;
+} gdth_evt_data;
+
+typedef struct {
+ ulong first_stamp;
+ ulong last_stamp;
+ ushort same_count;
+ ushort event_source;
+ ushort event_idx;
+ unchar application;
+ unchar reserved;
+ gdth_evt_data event_data;
+} gdth_evt_str;
+
+
+/* DPRAM structures */
+
+/* interface area ISA/PCI */
+typedef struct {
+ unchar S_Cmd_Indx; /* special command */
+ unchar volatile S_Status; /* status special command */
+ ushort reserved1;
+ ulong S_Info[4]; /* add. info special command */
+ unchar volatile Sema0; /* command semaphore */
+ unchar reserved2[3];
+ unchar Cmd_Index; /* command number */
+ unchar reserved3[3];
+ ushort volatile Status; /* command status */
+ ushort Service; /* service(for async.events) */
+ ulong Info[2]; /* additional info */
+ struct {
+ ushort offset; /* command offs. in the DPRAM*/
+ ushort serv_id; /* service */
+ } comm_queue[MAXOFFSETS]; /* command queue */
+ ulong bios_reserved[2];
+ unchar gdt_dpr_cmd[1]; /* commands */
+} gdt_dpr_if;
+
+/* SRAM structure PCI controllers */
+typedef struct {
+ ulong magic; /* controller ID from BIOS */
+ ushort need_deinit; /* switch betw. BIOS/driver */
+ unchar switch_support; /* see need_deinit */
+ unchar padding[9];
+ unchar os_used[16]; /* OS code per service */
+ unchar unused[28];
+ unchar fw_magic; /* contr. ID from firmware */
+} gdt_pci_sram;
+
+/* SRAM structure EISA controllers (but NOT GDT3000/3020) */
+typedef struct {
+ unchar os_used[16]; /* OS code per service */
+ ushort need_deinit; /* switch betw. BIOS/driver */
+ unchar switch_support; /* see need_deinit */
+ unchar padding;
+} gdt_eisa_sram;
+
+
+/* DPRAM ISA controllers */
+typedef struct {
+ union {
+ struct {
+ unchar bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */
+ ulong magic; /* controller (EISA) ID */
+ ushort need_deinit; /* switch betw. BIOS/driver */
+ unchar switch_support; /* see need_deinit */
+ unchar padding[9];
+ unchar os_used[16]; /* OS code per service */
+ } dp_sram;
+ unchar bios_area[0x4000]; /* 16KB reserved for BIOS */
+ } bu;
+ union {
+ gdt_dpr_if ic; /* interface area */
+ unchar if_area[0x3000]; /* 12KB for interface */
+ } u;
+ struct {
+ unchar memlock; /* write protection DPRAM */
+ unchar event; /* release event */
+ unchar irqen; /* board interrupts enable */
+ unchar irqdel; /* acknowledge board int. */
+ unchar volatile Sema1; /* status semaphore */
+ unchar rq; /* IRQ/DRQ configuration */
+ } io;
+} gdt2_dpram_str;
+
+/* DPRAM PCI controllers */
+typedef struct {
+ union {
+ gdt_dpr_if ic; /* interface area */
+ unchar if_area[0xff0-sizeof(gdt_pci_sram)];
+ } u;
+ gdt_pci_sram gdt6sr; /* SRAM structure */
+ struct {
+ unchar unused0[1];
+ unchar volatile Sema1; /* command semaphore */
+ unchar unused1[3];
+ unchar irqen; /* board interrupts enable */
+ unchar unused2[2];
+ unchar event; /* release event */
+ unchar unused3[3];
+ unchar irqdel; /* acknowledge board int. */
+ unchar unused4[3];
+ } io;
+} gdt6_dpram_str;
+
+/* PLX register structure (new PCI controllers) */
+typedef struct {
+ unchar cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
+ unchar unused1[0x3f];
+ unchar volatile sema0_reg; /* command semaphore */
+ unchar volatile sema1_reg; /* status semaphore */
+ unchar unused2[2];
+ ushort volatile status; /* command status */
+ ushort service; /* service */
+ ulong info[2]; /* additional info */
+ unchar unused3[0x10];
+ unchar ldoor_reg; /* PCI to local doorbell */
+ unchar unused4[3];
+ unchar volatile edoor_reg; /* local to PCI doorbell */
+ unchar unused5[3];
+ unchar control0; /* control0 register(unused) */
+ unchar control1; /* board interrupts enable */
+ unchar unused6[0x16];
+} gdt6c_plx_regs;
+
+/* DPRAM new PCI controllers */
+typedef struct {
+ union {
+ gdt_dpr_if ic; /* interface area */
+ unchar if_area[0x4000-sizeof(gdt_pci_sram)];
+ } u;
+ gdt_pci_sram gdt6sr; /* SRAM structure */
+} gdt6c_dpram_str;
+
+/* i960 register structure (PCI MPR controllers) */
+typedef struct {
+ unchar unused1[16];
+ unchar volatile sema0_reg; /* command semaphore */
+ unchar unused2;
+ unchar volatile sema1_reg; /* status semaphore */
+ unchar unused3;
+ ushort volatile status; /* command status */
+ ushort service; /* service */
+ ulong info[2]; /* additional info */
+ unchar ldoor_reg; /* PCI to local doorbell */
+ unchar unused4[11];
+ unchar volatile edoor_reg; /* local to PCI doorbell */
+ unchar unused5[7];
+ unchar edoor_en_reg; /* board interrupts enable */
+ unchar unused6[27];
+ ulong unused7[1004]; /* size: 4 KB */
+} gdt6m_i960_regs;
+
+/* DPRAM PCI MPR controllers */
+typedef struct {
+ gdt6m_i960_regs i960r; /* 4KB i960 registers */
+ union {
+ gdt_dpr_if ic; /* interface area */
+ unchar if_area[0x3000-sizeof(gdt_pci_sram)];
+ } u;
+ gdt_pci_sram gdt6sr; /* SRAM structure */
+} gdt6m_dpram_str;
+
+
+/* PCI resources */
+typedef struct {
+ ushort device_id; /* device ID (0,..,9) */
+ unchar bus; /* PCI bus */
+ unchar device_fn; /* PCI device/function no. */
+ ulong dpmem; /* DPRAM address */
+ ulong io; /* IO address */
+ ulong io_mm; /* IO address mem. mapped */
+ ulong bios; /* BIOS address */
+ unchar irq; /* IRQ */
+} gdth_pci_str;
+
+
+/* controller information structure */
+typedef struct {
+ unchar bus_cnt; /* SCSI bus count */
+ unchar type; /* controller class */
+ ushort raw_feat; /* feat. raw service (s/g,..) */
+ ulong stype; /* controller subtype */
+ ushort cache_feat; /* feat. cache serv. (s/g,..) */
+ ushort bmic; /* BMIC address (EISA) */
+ void *brd; /* DPRAM address */
+ ulong brd_phys; /* slot number/BIOS address */
+ gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */
+ gdth_cmd_str *pccb; /* address command structure */
+ gdth_scratch_str *pscratch;
+ unchar irq; /* IRQ */
+ unchar drq; /* DRQ (ISA controllers) */
+ ushort status; /* command status */
+ ulong info;
+ ulong info2; /* additional info */
+ Scsi_Cmnd *req_first; /* top of request queue */
+ struct {
+ unchar type; /* device type */
+ unchar heads; /* mapping */
+ unchar secs;
+ unchar lock; /* drive locked ? (hot plug) */
+ ushort hostdrive; /* host drive number */
+ ushort devtype; /* further information */
+ ulong size; /* capacity */
+ } id[MAXBUS][MAXID];
+ ushort cmd_cnt; /* command count in DPRAM */
+ ushort cmd_len; /* length of actual command */
+ ushort cmd_offs_dpmem; /* actual offset in DPRAM */
+ ushort ic_all_size; /* sizeof DPRAM interf. area */
+ unchar reserved;
+ unchar mode; /* information from /proc */
+ ushort param_size;
+ gdth_cpar_str cpar; /* controller cache par. */
+ char ctr_name[16]; /* controller name */
+} gdth_ha_str;
+
+/* structure for scsi_register(), SCSI bus != 0 */
+typedef struct {
+ ushort hanum;
+ ushort busnum;
+} gdth_num_str;
+
+/* structure for scsi_register() */
+typedef struct {
+ gdth_num_str numext; /* must be the first element */
+ gdth_ha_str haext;
+ gdth_cmd_str cmdext;
+ gdth_scratch_str dmaext;
+} gdth_ext_str;
+
+
+/* INQUIRY data format */
+typedef struct {
+ unchar type_qual;
+ unchar modif_rmb;
+ unchar version;
+ unchar resp_aenc;
+ unchar add_length;
+ unchar reserved1;
+ unchar reserved2;
+ unchar misc;
+ unchar vendor[8];
+ unchar product[16];
+ unchar revision[4];
+} gdth_inq_data;
+
+/* READ_CAPACITY data format */
+typedef struct {
+ ulong last_block_no;
+ ulong block_length;
+} gdth_rdcap_data;
+
+/* REQUEST_SENSE data format */
+typedef struct {
+ unchar errorcode;
+ unchar segno;
+ unchar key;
+ ulong info;
+ unchar add_length;
+ ulong cmd_info;
+ unchar adsc;
+ unchar adsq;
+ unchar fruc;
+ unchar key_spec[3];
+} gdth_sense_data;
+
+/* MODE_SENSE data format */
+typedef struct {
+ struct {
+ unchar data_length;
+ unchar med_type;
+ unchar dev_par;
+ unchar bd_length;
+ } hd;
+ struct {
+ unchar dens_code;
+ unchar block_count[3];
+ unchar reserved;
+ unchar block_length[3];
+ } bd;
+} gdth_modep_data;
+
+/* stack frame */
+typedef struct {
+ ulong b[10]; /* 32 bit compiler ! */
+} gdth_stackframe;
+
+#pragma pack()
+
+
+/* data structure for reserve drives */
+typedef struct {
+ unchar hanum;
+ unchar bus;
+ unchar id;
+} gdth_reserve_str;
+
+
+/* function prototyping */
+
+int gdth_detect(Scsi_Host_Template *);
+int gdth_release(struct Scsi_Host *);
+int gdth_command(Scsi_Cmnd *);
+int gdth_queuecommand(Scsi_Cmnd *,void (*done)(Scsi_Cmnd *));
+int gdth_abort(Scsi_Cmnd *);
+#if LINUX_VERSION_CODE >= 0x010346
+int gdth_reset(Scsi_Cmnd *, unsigned int reset_flags);
+#else
+int gdth_reset(Scsi_Cmnd *);
+#endif
+const char *gdth_info(struct Scsi_Host *);
+
+#if LINUX_VERSION_CODE >= 0x02015F
+int gdth_bios_param(Disk *,kdev_t,int *);
+extern struct proc_dir_entry proc_scsi_gdth;
+int gdth_proc_info(char *,char **,off_t,int,int,int);
+int gdth_eh_abort(Scsi_Cmnd *scp);
+int gdth_eh_device_reset(Scsi_Cmnd *scp);
+int gdth_eh_bus_reset(Scsi_Cmnd *scp);
+int gdth_eh_host_reset(Scsi_Cmnd *scp);
+#define GDTH { proc_dir: &proc_scsi_gdth, \
+ proc_info: gdth_proc_info, \
+ name: "GDT SCSI Disk Array Controller",\
+ detect: gdth_detect, \
+ release: gdth_release, \
+ info: gdth_info, \
+ command: gdth_command, \
+ queuecommand: gdth_queuecommand, \
+ eh_abort_handler: gdth_eh_abort, \
+ eh_device_reset_handler: gdth_eh_device_reset, \
+ eh_bus_reset_handler: gdth_eh_bus_reset, \
+ eh_host_reset_handler: gdth_eh_host_reset, \
+ abort: gdth_abort, \
+ reset: gdth_reset, \
+ bios_param: gdth_bios_param, \
+ can_queue: GDTH_MAXCMDS, \
+ this_id: -1, \
+ sg_tablesize: GDTH_MAXSG, \
+ cmd_per_lun: GDTH_MAXC_P_L, \
+ present: 0, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 1 /* use new error code */ }
+#elif LINUX_VERSION_CODE >= 0x010300
+int gdth_bios_param(Disk *,kdev_t,int *);
+extern struct proc_dir_entry proc_scsi_gdth;
+int gdth_proc_info(char *,char **,off_t,int,int,int);
+#define GDTH { NULL, NULL, \
+ &proc_scsi_gdth, \
+ gdth_proc_info, \
+ "GDT SCSI Disk Array Controller", \
+ gdth_detect, \
+ gdth_release, \
+ gdth_info, \
+ gdth_command, \
+ gdth_queuecommand, \
+ gdth_abort, \
+ gdth_reset, \
+ NULL, \
+ gdth_bios_param, \
+ GDTH_MAXCMDS, \
+ -1, \
+ GDTH_MAXSG, \
+ GDTH_MAXC_P_L, \
+ 0, \
+ 1, \
+ ENABLE_CLUSTERING}
+#else
+int gdth_bios_param(Disk *,int,int *);
+#define GDTH { NULL, NULL, \
+ "GDT SCSI Disk Array Controller", \
+ gdth_detect, \
+ gdth_release, \
+ gdth_info, \
+ gdth_command, \
+ gdth_queuecommand, \
+ gdth_abort, \
+ gdth_reset, \
+ NULL, \
+ gdth_bios_param, \
+ GDTH_MAXCMDS, \
+ -1, \
+ GDTH_MAXSG, \
+ GDTH_MAXC_P_L, \
+ 0, \
+ 1, \
+ ENABLE_CLUSTERING}
+#endif
+
+#endif
+
diff --git a/linux/src/drivers/scsi/gdth_ioctl.h b/linux/src/drivers/scsi/gdth_ioctl.h
new file mode 100644
index 0000000..bf15554
--- /dev/null
+++ b/linux/src/drivers/scsi/gdth_ioctl.h
@@ -0,0 +1,86 @@
+#ifndef _GDTH_IOCTL_H
+#define _GDTH_IOCTL_H
+
+/* gdth_ioctl.h
+ * $Id: gdth_ioctl.h,v 1.1 1999/04/26 05:54:37 tb Exp $
+ */
+
+/* IOCTLs */
+#define GDTIOCTL_MASK ('J'<<8)
+#define GDTIOCTL_GENERAL (GDTIOCTL_MASK | 0) /* general IOCTL */
+#define GDTIOCTL_DRVERS (GDTIOCTL_MASK | 1) /* get driver version */
+#define GDTIOCTL_CTRTYPE (GDTIOCTL_MASK | 2) /* get controller type */
+#define GDTIOCTL_OSVERS (GDTIOCTL_MASK | 3) /* get OS version */
+#define GDTIOCTL_CTRCNT (GDTIOCTL_MASK | 5) /* get controller count */
+#define GDTIOCTL_LOCKDRV (GDTIOCTL_MASK | 6) /* lock host drive */
+#define GDTIOCTL_LOCKCHN (GDTIOCTL_MASK | 7) /* lock channel */
+#define GDTIOCTL_EVENT (GDTIOCTL_MASK | 8) /* read controller events */
+
+#define GDTIOCTL_MAGIC 0x06030f07UL
+
+
+/* IOCTL structure (write) */
+typedef struct {
+ ulong magic; /* IOCTL magic */
+ ushort ioctl; /* IOCTL */
+ ushort ionode; /* controller number */
+ ushort service; /* controller service */
+ ushort timeout; /* timeout */
+ union {
+ struct {
+ unchar command[512]; /* controller command */
+ unchar data[1]; /* add. data */
+ } general;
+ struct {
+ unchar lock; /* lock/unlock */
+ unchar drive_cnt; /* drive count */
+ ushort drives[35]; /* drives */
+ } lockdrv;
+ struct {
+ unchar lock; /* lock/unlock */
+ unchar channel; /* channel */
+ } lockchn;
+ struct {
+ int erase; /* erase event ? */
+ int handle;
+ } event;
+ } iu;
+} gdth_iowr_str;
+
+/* IOCTL structure (read) */
+typedef struct {
+ ulong size; /* buffer size */
+ ulong status; /* IOCTL error code */
+ union {
+ struct {
+ unchar data[1]; /* data */
+ } general;
+ struct {
+ ushort version; /* driver version */
+ } drvers;
+ struct {
+ unchar type; /* controller type */
+ ushort info; /* slot etc. */
+ ushort oem_id; /* OEM ID */
+ ushort bios_ver; /* not used */
+ ushort access; /* not used */
+ ushort ext_type; /* extended type */
+ } ctrtype;
+ struct {
+ unchar version; /* OS version */
+ unchar subversion; /* OS subversion */
+ ushort revision; /* revision */
+ } osvers;
+ struct {
+ ushort count; /* controller count */
+ } ctrcnt;
+ struct {
+ int handle;
+ unchar evt[32]; /* event structure */
+ } event;
+ } iu;
+} gdth_iord_str;
+
+
+#endif
+
diff --git a/linux/src/drivers/scsi/gdth_proc.c b/linux/src/drivers/scsi/gdth_proc.c
new file mode 100644
index 0000000..8764d55
--- /dev/null
+++ b/linux/src/drivers/scsi/gdth_proc.c
@@ -0,0 +1,656 @@
+/* gdth_proc.c
+ * $Id: gdth_proc.c,v 1.1 1999/04/26 05:54:38 tb Exp $
+ */
+
+#include "gdth_ioctl.h"
+
+int gdth_proc_info(char *buffer,char **start,off_t offset,int length,
+ int hostno,int inout)
+{
+ int hanum,busnum,i;
+
+ TRACE2(("gdth_proc_info() length %d ha %d offs %d inout %d\n",
+ length,hostno,(int)offset,inout));
+
+ for (i=0; i<gdth_ctr_vcount; ++i) {
+ if (gdth_ctr_vtab[i]->host_no == hostno)
+ break;
+ }
+ if (i==gdth_ctr_vcount)
+ return(-EINVAL);
+
+ hanum = NUMDATA(gdth_ctr_vtab[i])->hanum;
+ busnum= NUMDATA(gdth_ctr_vtab[i])->busnum;
+
+ if (inout)
+ return(gdth_set_info(buffer,length,i,hanum,busnum));
+ else
+ return(gdth_get_info(buffer,start,offset,length,i,hanum,busnum));
+}
+
+static int gdth_set_info(char *buffer,int length,int vh,int hanum,int busnum)
+{
+ int ret_val;
+ Scsi_Cmnd scp;
+ Scsi_Device sdev;
+ gdth_iowr_str *piowr;
+
+ TRACE2(("gdth_set_info() ha %d bus %d\n",hanum,busnum));
+ piowr = (gdth_iowr_str *)buffer;
+
+ memset(&sdev,0,sizeof(Scsi_Device));
+ memset(&scp, 0,sizeof(Scsi_Cmnd));
+ sdev.host = gdth_ctr_vtab[vh];
+ sdev.id = sdev.host->this_id;
+ scp.cmd_len = 12;
+ scp.host = gdth_ctr_vtab[vh];
+ scp.target = sdev.host->this_id;
+ scp.device = &sdev;
+ scp.use_sg = 0;
+
+ if (length >= 4) {
+ if (strncmp(buffer,"gdth",4) == 0) {
+ buffer += 5;
+ length -= 5;
+ ret_val = gdth_set_asc_info( buffer, length, hanum, scp );
+ } else if (piowr->magic == GDTIOCTL_MAGIC) {
+ ret_val = gdth_set_bin_info( buffer, length, hanum, scp );
+ } else {
+ printk("GDT: Wrong signature: %6s\n",buffer);
+ ret_val = -EINVAL;
+ }
+ } else {
+ ret_val = -EINVAL;
+ }
+ return ret_val;
+}
+
+static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd scp)
+{
+ int orig_length, drive, wb_mode;
+ char cmnd[12];
+ int i, j, found;
+ gdth_ha_str *ha;
+ gdth_cmd_str gdtcmd;
+ gdth_cpar_str *pcpar;
+
+ TRACE2(("gdth_set_asc_info() ha %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ memset(cmnd, 0,10);
+ orig_length = length + 5;
+ drive = -1;
+ wb_mode = 0;
+ found = FALSE;
+
+ if (length >= 5 && strncmp(buffer,"flush",5)==0) {
+ buffer += 6;
+ length -= 6;
+ if (length && *buffer>='0' && *buffer<='9') {
+ drive = (int)(*buffer-'0');
+ ++buffer; --length;
+ if (length && *buffer>='0' && *buffer<='9') {
+ drive = drive*10 + (int)(*buffer-'0');
+ ++buffer; --length;
+ }
+ printk("GDT: Flushing host drive %d .. ",drive);
+ } else {
+ printk("GDT: Flushing all host drives .. ");
+ }
+ for (i = 0; i < MAXBUS; ++i) {
+ for (j = 0; j < MAXID; ++j) {
+ if (ha->id[i][j].type == CACHE_DTYP) {
+ if (drive != -1 &&
+ ha->id[i][j].hostdrive != (ushort)drive)
+ continue;
+ found = TRUE;
+ gdtcmd.BoardNode = LOCALBOARD;
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_FLUSH;
+ gdtcmd.u.cache.DeviceNo = ha->id[i][j].hostdrive;
+ gdtcmd.u.cache.BlockNo = 1;
+ gdtcmd.u.cache.sg_canz = 0;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scp.request.rq_status = RQ_SCSI_BUSY;
+ scp.request.sem = &sem;
+ scp.SCp.this_residual = IOCTL_PRI;
+ scsi_do_cmd(&scp, cmnd, &gdtcmd,
+ sizeof(gdth_cmd_str), gdth_scsi_done,
+ 30*HZ, 1);
+ down(&sem);
+ }
+ }
+ }
+ }
+ if (!found)
+ printk("\nNo host drive found !\n");
+ else
+ printk("Done.\n");
+ return(orig_length);
+ }
+
+ if (length >= 7 && strncmp(buffer,"wbp_off",7)==0) {
+ buffer += 8;
+ length -= 8;
+ printk("GDT: Disabling write back permanently .. ");
+ wb_mode = 1;
+ } else if (length >= 6 && strncmp(buffer,"wbp_on",6)==0) {
+ buffer += 7;
+ length -= 7;
+ printk("GDT: Enabling write back permanently .. ");
+ wb_mode = 2;
+ } else if (length >= 6 && strncmp(buffer,"wb_off",6)==0) {
+ buffer += 7;
+ length -= 7;
+ printk("GDT: Disabling write back commands .. ");
+ if (ha->cache_feat & GDT_WR_THROUGH) {
+ gdth_write_through = TRUE;
+ printk("Done.\n");
+ } else {
+ printk("Not supported !\n");
+ }
+ return(orig_length);
+ } else if (length >= 5 && strncmp(buffer,"wb_on",5)==0) {
+ buffer += 6;
+ length -= 6;
+ printk("GDT: Enabling write back commands .. ");
+ gdth_write_through = FALSE;
+ printk("Done.\n");
+ return(orig_length);
+ }
+
+ if (wb_mode) {
+ pcpar = (gdth_cpar_str *)kmalloc( sizeof(gdth_cpar_str),
+ GFP_ATOMIC | GFP_DMA );
+ if (pcpar == NULL) {
+ TRACE2(("gdth_set_info(): Unable to allocate memory.\n"));
+ printk("Unable to allocate memory.\n");
+ return(-EINVAL);
+ }
+ memcpy( pcpar, &ha->cpar, sizeof(gdth_cpar_str) );
+ gdtcmd.BoardNode = LOCALBOARD;
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_IOCTL;
+ gdtcmd.u.ioctl.p_param = virt_to_bus(pcpar);
+ gdtcmd.u.ioctl.param_size = sizeof(gdth_cpar_str);
+ gdtcmd.u.ioctl.subfunc = CACHE_CONFIG;
+ gdtcmd.u.ioctl.channel = INVALID_CHANNEL;
+ pcpar->write_back = wb_mode==1 ? 0:1;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scp.request.rq_status = RQ_SCSI_BUSY;
+ scp.request.sem = &sem;
+ scp.SCp.this_residual = IOCTL_PRI;
+ scsi_do_cmd(&scp, cmnd, &gdtcmd, sizeof(gdth_cmd_str),
+ gdth_scsi_done, 30*HZ, 1);
+ down(&sem);
+ }
+ kfree( pcpar );
+ printk("Done.\n");
+ return(orig_length);
+ }
+
+ printk("GDT: Unknown command: %s Length: %d\n",buffer,length);
+ return(-EINVAL);
+}
+
+static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Cmnd scp)
+{
+ char cmnd[12];
+ int id;
+ unchar i, j, k, found;
+ gdth_ha_str *ha;
+ gdth_iowr_str *piowr;
+ gdth_iord_str *piord;
+ gdth_cmd_str *pcmd;
+ ulong *ppadd;
+ ulong add_size, flags;
+
+
+ TRACE2(("gdth_set_bin_info() ha %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ memset(cmnd, 0,10);
+ piowr = (gdth_iowr_str *)buffer;
+ piord = NULL;
+ pcmd = NULL;
+
+ if (length < GDTOFFSOF(gdth_iowr_str,iu))
+ return(-EINVAL);
+
+ switch (piowr->ioctl) {
+ case GDTIOCTL_GENERAL:
+ if (length < GDTOFFSOF(gdth_iowr_str,iu.general.data[0]))
+ return(-EINVAL);
+ pcmd = (gdth_cmd_str *)piowr->iu.general.command;
+ pcmd->Service = piowr->service;
+ if (pcmd->OpCode == GDT_IOCTL) {
+ ppadd = &pcmd->u.ioctl.p_param;
+ add_size = pcmd->u.ioctl.param_size;
+ } else if (piowr->service == CACHESERVICE) {
+ add_size = pcmd->u.cache.BlockCnt * SECTOR_SIZE;
+ if (ha->cache_feat & SCATTER_GATHER) {
+ ppadd = &pcmd->u.cache.sg_lst[0].sg_ptr;
+ pcmd->u.cache.DestAddr = -1UL;
+ pcmd->u.cache.sg_lst[0].sg_len = add_size;
+ pcmd->u.cache.sg_canz = 1;
+ } else {
+ ppadd = &pcmd->u.cache.DestAddr;
+ pcmd->u.cache.sg_canz = 0;
+ }
+ } else if (piowr->service == SCSIRAWSERVICE) {
+ add_size = pcmd->u.raw.sdlen;
+ if (ha->raw_feat & SCATTER_GATHER) {
+ ppadd = &pcmd->u.raw.sg_lst[0].sg_ptr;
+ pcmd->u.raw.sdata = -1UL;
+ pcmd->u.raw.sg_lst[0].sg_len = add_size;
+ pcmd->u.raw.sg_ranz = 1;
+ } else {
+ ppadd = &pcmd->u.raw.sdata;
+ pcmd->u.raw.sg_ranz = 0;
+ }
+ } else {
+ return(-EINVAL);
+ }
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) + add_size );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+
+ piord->size = sizeof(gdth_iord_str) + add_size;
+ if (add_size > 0) {
+ memcpy(piord->iu.general.data, piowr->iu.general.data, add_size);
+ *ppadd = virt_to_bus(piord->iu.general.data);
+ }
+ /* do IOCTL */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scp.request.rq_status = RQ_SCSI_BUSY;
+ scp.request.sem = &sem;
+ scp.SCp.this_residual = IOCTL_PRI;
+ scsi_do_cmd(&scp, cmnd, pcmd,
+ sizeof(gdth_cmd_str), gdth_scsi_done,
+ piowr->timeout*HZ, 1);
+ down(&sem);
+ piord->status = (ulong)scp.SCp.Message;
+ }
+ break;
+
+ case GDTIOCTL_DRVERS:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ piord->iu.drvers.version = (GDTH_VERSION<<8) | GDTH_SUBVERSION;
+ break;
+
+ case GDTIOCTL_CTRTYPE:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ if (ha->type == GDT_ISA || ha->type == GDT_EISA) {
+ piord->iu.ctrtype.type = (unchar)((ha->stype>>20) - 0x10);
+ } else if (ha->type != GDT_PCIMPR) {
+ piord->iu.ctrtype.type = (unchar)((ha->stype<<8) + 6);
+ } else {
+ piord->iu.ctrtype.type = 0xfe;
+ piord->iu.ctrtype.ext_type = 0x6000 | ha->stype;
+ }
+ piord->iu.ctrtype.info = ha->brd_phys;
+ piord->iu.ctrtype.oem_id = (ushort)GDT3_ID;
+ break;
+
+ case GDTIOCTL_CTRCNT:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ piord->iu.ctrcnt.count = (ushort)gdth_ctr_count;
+ break;
+
+ case GDTIOCTL_OSVERS:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ piord->iu.osvers.version = (unchar)(LINUX_VERSION_CODE >> 16);
+ piord->iu.osvers.subversion = (unchar)(LINUX_VERSION_CODE >> 8);
+ piord->iu.osvers.revision = (ushort)(LINUX_VERSION_CODE & 0xff);
+ break;
+
+ case GDTIOCTL_LOCKDRV:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ for (i = k = 0; i < piowr->iu.lockdrv.drive_cnt; ++i) {
+ found = FALSE;
+ for (j = 0; j < ha->bus_cnt; ++j) {
+ for (k = 0; k < MAXID; ++k) {
+ if (ha->id[j][k].type == CACHE_DTYP &&
+ ha->id[j][k].hostdrive == piowr->iu.lockdrv.drives[i]) {
+ found = TRUE;
+ break;
+ }
+ }
+ if (found)
+ break;
+ }
+ if (!found)
+ continue;
+
+ if (piowr->iu.lockdrv.lock) {
+ save_flags( flags );
+ cli();
+ ha->id[j][k].lock = 1;
+ restore_flags( flags );
+ gdth_wait_completion( hanum, j, k );
+ gdth_stop_timeout( hanum, j, k );
+ } else {
+ save_flags( flags );
+ cli();
+ ha->id[j][k].lock = 0;
+ restore_flags( flags );
+ gdth_start_timeout( hanum, j, k );
+ gdth_next( hanum );
+ }
+ }
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ break;
+
+ case GDTIOCTL_LOCKCHN:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ for (k = 0, j = piowr->iu.lockchn.channel; k < MAXID; ++k) {
+ if (ha->id[j][k].type != RAW_DTYP)
+ continue;
+
+ if (piowr->iu.lockchn.lock) {
+ save_flags( flags );
+ cli();
+ ha->id[j][k].lock = 1;
+ restore_flags( flags );
+ gdth_wait_completion( hanum, j, k );
+ gdth_stop_timeout( hanum, j, k );
+ } else {
+ save_flags( flags );
+ cli();
+ ha->id[j][k].lock = 0;
+ restore_flags( flags );
+ gdth_start_timeout( hanum, j, k );
+ gdth_next( hanum );
+ }
+ }
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ break;
+
+ case GDTIOCTL_EVENT:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ if (piowr->iu.event.erase == 0) {
+ piord->iu.event.handle = gdth_read_event( piowr->iu.event.handle,
+ (gdth_evt_str *)piord->iu.event.evt );
+ } else {
+ piord->iu.event.handle = piowr->iu.event.handle;
+ gdth_readapp_event( (unchar)piowr->iu.event.erase,
+ (gdth_evt_str *)piord->iu.event.evt );
+ }
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ break;
+
+ default:
+ return(-EINVAL);
+ }
+ /* we return a buffer ID to detect the right buffer during READ-IOCTL */
+ return id;
+}
+
+static int gdth_get_info(char *buffer,char **start,off_t offset,
+ int length,int vh,int hanum,int busnum)
+{
+ int size = 0,len = 0;
+ off_t begin = 0,pos = 0;
+ gdth_ha_str *ha;
+ gdth_iord_str *piord;
+ int id;
+
+ TRACE2(("gdth_get_info() ha %d bus %d\n",hanum,busnum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ id = length;
+
+ /* look for buffer ID in length */
+ if (id > 4) {
+#if LINUX_VERSION_CODE >= 0x020000
+ size = sprintf(buffer+len,
+ "%s SCSI Disk Array Controller\n",
+ ha->ctr_name);
+#else
+ size = sprintf(buffer+len,
+ "%s SCSI Disk Array Controller (SCSI Bus %d)\n",
+ ha->ctr_name,busnum);
+#endif
+ len += size; pos = begin + len;
+ size = sprintf(buffer+len,
+ "Firmware Version: %d.%2d\tDriver Version: %s\n",
+ (unchar)(ha->cpar.version>>8),
+ (unchar)(ha->cpar.version),GDTH_VERSION_STR);
+ len += size; pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ } else {
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ if (piord == NULL)
+ goto stop_output;
+ length = piord->size;
+ memcpy(buffer+len, (char *)piord, length);
+ gdth_ioctl_free(hanum, id);
+ len += length; pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+
+stop_output:
+ *start = buffer +(offset-begin);
+ len -= (offset-begin);
+ if (len > length)
+ len = length;
+ TRACE2(("get_info() len %d pos %d begin %d offset %d length %d size %d\n",
+ len,(int)pos,(int)begin,(int)offset,length,size));
+ return(len);
+}
+
+
+void gdth_scsi_done(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_scsi_done()\n"));
+
+ scp->request.rq_status = RQ_SCSI_DONE;
+
+ if (scp->request.sem != NULL)
+ up(scp->request.sem);
+}
+
+static int gdth_ioctl_alloc(int hanum, ushort size)
+{
+ ulong flags;
+ int i;
+
+ if (size == 0)
+ return -1;
+
+ save_flags(flags);
+ cli();
+
+ for (i = 0; i < 4; ++i) {
+ if (gdth_ioctl_tab[i][hanum] == NULL) {
+ gdth_ioctl_tab[i][hanum] = kmalloc( size, GFP_ATOMIC | GFP_DMA );
+ break;
+ }
+ }
+
+ restore_flags(flags);
+ if (i == 4 || gdth_ioctl_tab[i][hanum] == NULL)
+ return -1;
+ return (i+1);
+}
+
+static void gdth_ioctl_free(int hanum, int idx)
+{
+ ulong flags;
+
+ save_flags(flags);
+ cli();
+
+ kfree( gdth_ioctl_tab[idx-1][hanum] );
+ gdth_ioctl_tab[idx-1][hanum] = NULL;
+
+ restore_flags(flags);
+}
+
+static void gdth_wait_completion(int hanum, int busnum, int id)
+{
+ ulong flags;
+ int i;
+ Scsi_Cmnd *scp;
+
+ save_flags(flags);
+ cli();
+
+ for (i = 0; i < GDTH_MAXCMDS; ++i) {
+ scp = gdth_cmd_tab[i][hanum].cmnd;
+#if LINUX_VERSION_CODE >= 0x020000
+ if (!SPECIAL_SCP(scp) && scp->target == (unchar)id &&
+ scp->channel == (unchar)busnum)
+#else
+ if (!SPECIAL_SCP(scp) && scp->target == (unchar)id &&
+ NUMDATA(scp->host)->busnum == (unchar)busnum)
+#endif
+ {
+ scp->SCp.have_data_in = 0;
+ restore_flags(flags);
+ while (!scp->SCp.have_data_in)
+ barrier();
+ scp->scsi_done(scp);
+ save_flags(flags);
+ cli();
+ }
+ }
+ restore_flags(flags);
+}
+
+static void gdth_stop_timeout(int hanum, int busnum, int id)
+{
+ ulong flags;
+ Scsi_Cmnd *scp;
+ gdth_ha_str *ha;
+
+ save_flags(flags);
+ cli();
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
+#if LINUX_VERSION_CODE >= 0x020000
+ if (scp->target == (unchar)id &&
+ scp->channel == (unchar)busnum)
+#else
+ if (scp->target == (unchar)id &&
+ NUMDATA(scp->host)->busnum == (unchar)busnum)
+#endif
+ {
+ TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
+ scp->SCp.buffers_residual = gdth_update_timeout(hanum, scp, 0);
+ }
+ }
+ restore_flags(flags);
+}
+
+static void gdth_start_timeout(int hanum, int busnum, int id)
+{
+ ulong flags;
+ Scsi_Cmnd *scp;
+ gdth_ha_str *ha;
+
+ save_flags(flags);
+ cli();
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
+#if LINUX_VERSION_CODE >= 0x020000
+ if (scp->target == (unchar)id &&
+ scp->channel == (unchar)busnum)
+#else
+ if (scp->target == (unchar)id &&
+ NUMDATA(scp->host)->busnum == (unchar)busnum)
+#endif
+ {
+ TRACE2(("gdth_start_timeout(): update_timeout()\n"));
+ gdth_update_timeout(hanum, scp, scp->SCp.buffers_residual);
+ }
+ }
+ restore_flags(flags);
+}
+
+static int gdth_update_timeout(int hanum, Scsi_Cmnd *scp, int timeout)
+{
+ ulong flags;
+ int oldto;
+
+ save_flags(flags);
+ cli();
+ oldto = scp->timeout_per_command;
+ scp->timeout_per_command = timeout;
+
+#if LINUX_VERSION_CODE >= 0x02014B
+ if (timeout == 0) {
+ del_timer(&scp->eh_timeout);
+ scp->eh_timeout.data = (unsigned long) NULL;
+ scp->eh_timeout.expires = 0;
+ } else {
+ if (scp->eh_timeout.data != (unsigned long) NULL)
+ del_timer(&scp->eh_timeout);
+ scp->eh_timeout.data = (unsigned long) scp;
+ scp->eh_timeout.expires = jiffies + timeout;
+ add_timer(&scp->eh_timeout);
+ }
+#else
+ if (timeout > 0) {
+ if (timer_table[SCSI_TIMER].expires == 0) {
+ timer_table[SCSI_TIMER].expires = jiffies + timeout;
+ timer_active |= 1 << SCSI_TIMER;
+ } else {
+ if (jiffies + timeout < timer_table[SCSI_TIMER].expires)
+ timer_table[SCSI_TIMER].expires = jiffies + timeout;
+ }
+ }
+#endif
+
+ restore_flags(flags);
+ return oldto;
+}
+
diff --git a/linux/src/drivers/scsi/gdth_proc.h b/linux/src/drivers/scsi/gdth_proc.h
new file mode 100644
index 0000000..708b077
--- /dev/null
+++ b/linux/src/drivers/scsi/gdth_proc.h
@@ -0,0 +1,24 @@
+#ifndef _GDTH_PROC_H
+#define _GDTH_PROC_H
+
+/* gdth_proc.h
+ * $Id: gdth_proc.h,v 1.1 1999/04/26 05:54:39 tb Exp $
+ */
+
+static int gdth_set_info(char *buffer,int length,int vh,int hanum,int busnum);
+static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd scp);
+static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Cmnd scp);
+static int gdth_get_info(char *buffer,char **start,off_t offset,
+ int length,int vh,int hanum,int busnum);
+
+static int gdth_ioctl_alloc(int hanum, ushort size);
+static void gdth_ioctl_free(int hanum, int id);
+static void gdth_wait_completion(int hanum, int busnum, int id);
+static void gdth_stop_timeout(int hanum, int busnum, int id);
+static void gdth_start_timeout(int hanum, int busnum, int id);
+static int gdth_update_timeout(int hanum, Scsi_Cmnd *scp, int timeout);
+
+void gdth_scsi_done(Scsi_Cmnd *scp);
+
+#endif
+
diff --git a/linux/src/drivers/scsi/hosts.c b/linux/src/drivers/scsi/hosts.c
new file mode 100644
index 0000000..0f1bedd
--- /dev/null
+++ b/linux/src/drivers/scsi/hosts.c
@@ -0,0 +1,554 @@
+/*
+ * hosts.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * mid to lowlevel SCSI driver interface
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ */
+
+
+/*
+ * This file contains the medium level SCSI
+ * host interface initialization, as well as the scsi_hosts array of SCSI
+ * hosts currently present in the system.
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/blk.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+
+#include "scsi.h"
+
+#ifndef NULL
+#define NULL 0L
+#endif
+
+#define HOSTS_C
+
+#include "hosts.h"
+
+#ifdef CONFIG_A3000_SCSI
+#include "a3000.h"
+#endif
+
+#ifdef CONFIG_A2091_SCSI
+#include "a2091.h"
+#endif
+
+#ifdef CONFIG_GVP11_SCSI
+#include "gvp11.h"
+#endif
+
+#ifdef CONFIG_ATARI_SCSI
+#include "atari_scsi.h"
+#endif
+
+#ifdef CONFIG_SCSI_ADVANSYS
+#include "advansys.h"
+#endif
+
+#ifdef CONFIG_SCSI_AHA152X
+#include "aha152x.h"
+#endif
+
+#ifdef CONFIG_SCSI_AHA1542
+#include "aha1542.h"
+#endif
+
+#ifdef CONFIG_SCSI_AHA1740
+#include "aha1740.h"
+#endif
+
+#ifdef CONFIG_SCSI_AIC7XXX
+#include "aic7xxx.h"
+#endif
+
+#ifdef CONFIG_SCSI_BUSLOGIC
+#include "BusLogic.h"
+#endif
+
+#ifdef CONFIG_SCSI_EATA_DMA
+#include "eata_dma.h"
+#endif
+
+#ifdef CONFIG_SCSI_EATA_PIO
+#include "eata_pio.h"
+#endif
+
+#ifdef CONFIG_SCSI_U14_34F
+#include "u14-34f.h"
+#endif
+
+#ifdef CONFIG_SCSI_FUTURE_DOMAIN
+#include "fdomain.h"
+#endif
+
+#ifdef CONFIG_SCSI_GENERIC_NCR5380
+#include "g_NCR5380.h"
+#endif
+
+#ifdef CONFIG_SCSI_IN2000
+#include "in2000.h"
+#endif
+
+#ifdef CONFIG_SCSI_PAS16
+#include "pas16.h"
+#endif
+
+#ifdef CONFIG_SCSI_QLOGIC_FAS
+#include "qlogicfas.h"
+#endif
+
+#ifdef CONFIG_SCSI_QLOGIC_ISP
+#include "qlogicisp.h"
+#endif
+
+#ifdef CONFIG_SCSI_SEAGATE
+#include "seagate.h"
+#endif
+
+#ifdef CONFIG_SCSI_T128
+#include "t128.h"
+#endif
+
+#ifdef CONFIG_SCSI_DTC3280
+#include "dtc.h"
+#endif
+
+#ifdef CONFIG_SCSI_NCR53C7xx
+#include "53c7,8xx.h"
+#endif
+
+#ifdef CONFIG_SCSI_SYM53C8XX
+#include "sym53c8xx.h"
+#endif
+
+#ifdef CONFIG_SCSI_NCR53C8XX
+#include "ncr53c8xx.h"
+#endif
+
+#ifdef CONFIG_SCSI_ULTRASTOR
+#include "ultrastor.h"
+#endif
+
+#ifdef CONFIG_SCSI_7000FASST
+#include "wd7000.h"
+#endif
+
+#ifdef CONFIG_SCSI_EATA
+#include "eata.h"
+#endif
+
+#ifdef CONFIG_SCSI_NCR53C406A
+#include "NCR53c406a.h"
+#endif
+
+#ifdef CONFIG_SCSI_DC390T
+#include "dc390.h"
+#endif
+
+#ifdef CONFIG_SCSI_AM53C974
+#include "AM53C974.h"
+#endif
+
+#ifdef CONFIG_SCSI_MEGARAID
+#include "megaraid.h"
+#endif
+
+#ifdef CONFIG_SCSI_PPA
+#include "ppa.h"
+#endif
+
+#ifdef CONFIG_SCSI_SUNESP
+#include "esp.h"
+#endif
+
+#ifdef CONFIG_BLK_DEV_IDESCSI
+#include "ide-scsi.h"
+#endif
+
+#ifdef CONFIG_SCSI_GDTH
+#include "gdth.h"
+#endif
+
+#ifdef CONFIG_SCSI_DEBUG
+#include "scsi_debug.h"
+#endif
+
+
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/linux/src/drivers/scsi/Attic/hosts.c,v 1.1 1999/04/26 05:54:40 tb Exp $";
+*/
+
+/*
+ * The scsi host entries should be in the order you wish the
+ * cards to be detected. A driver may appear more than once IFF
+ * it can deal with being detected (and therefore initialized)
+ * with more than one simultaneous host number, can handle being
+ * reentrant, etc.
+ *
+ * They may appear in any order, as each SCSI host is told which host
+ * number it is during detection.
+ */
+
+/* This is a placeholder for controllers that are not configured into
+ * the system - we do this to ensure that the controller numbering is
+ * always consistent, no matter how the kernel is configured. */
+
+#define NO_CONTROLLER {NULL, NULL, NULL, NULL, NULL, NULL, NULL, \
+ NULL, NULL, 0, 0, 0, 0, 0, 0}
+
+/*
+ * When figure is run, we don't want to link to any object code. Since
+ * the macro for each host will contain function pointers, we cannot
+ * use it and instead must use a "blank" that does no such
+ * idiocy.
+ */
+
+Scsi_Host_Template * scsi_hosts = NULL;
+
+static Scsi_Host_Template builtin_scsi_hosts[] =
+{
+#ifdef CONFIG_AMIGA
+#ifdef CONFIG_A3000_SCSI
+ A3000_SCSI,
+#endif
+#ifdef CONFIG_A2091_SCSI
+ A2091_SCSI,
+#endif
+#ifdef CONFIG_GVP11_SCSI
+ GVP11_SCSI,
+#endif
+#endif
+
+#ifdef CONFIG_ATARI
+#ifdef CONFIG_ATARI_SCSI
+ ATARI_SCSI,
+#endif
+#endif
+
+#ifdef CONFIG_SCSI_ADVANSYS
+ ADVANSYS,
+#endif
+/* BusLogic must come before aha1542.c */
+#ifdef CONFIG_SCSI_BUSLOGIC
+ BUSLOGIC,
+#endif
+#ifdef CONFIG_SCSI_U14_34F
+ ULTRASTOR_14_34F,
+#endif
+#ifdef CONFIG_SCSI_ULTRASTOR
+ ULTRASTOR_14F,
+#endif
+#ifdef CONFIG_SCSI_AHA152X
+ AHA152X,
+#endif
+#ifdef CONFIG_SCSI_AHA1542
+ AHA1542,
+#endif
+#ifdef CONFIG_SCSI_AHA1740
+ AHA1740,
+#endif
+#ifdef CONFIG_SCSI_AIC7XXX
+ AIC7XXX,
+#endif
+#ifdef CONFIG_SCSI_FUTURE_DOMAIN
+ FDOMAIN_16X0,
+#endif
+#ifdef CONFIG_SCSI_IN2000
+ IN2000,
+#endif
+#ifdef CONFIG_SCSI_GENERIC_NCR5380
+ GENERIC_NCR5380,
+#endif
+#ifdef CONFIG_SCSI_NCR53C406A /* 53C406A should come before QLOGIC */
+ NCR53c406a,
+#endif
+#ifdef CONFIG_SCSI_QLOGIC_FAS
+ QLOGICFAS,
+#endif
+#ifdef CONFIG_SCSI_QLOGIC_ISP
+ QLOGICISP,
+#endif
+#ifdef CONFIG_SCSI_PAS16
+ MV_PAS16,
+#endif
+#ifdef CONFIG_SCSI_SEAGATE
+ SEAGATE_ST0X,
+#endif
+#ifdef CONFIG_SCSI_T128
+ TRANTOR_T128,
+#endif
+#ifdef CONFIG_SCSI_DTC3280
+ DTC3x80,
+#endif
+#ifdef CONFIG_SCSI_DC390T
+ DC390_T,
+#endif
+#ifdef CONFIG_SCSI_NCR53C7xx
+ NCR53c7xx,
+#endif
+#ifdef CONFIG_SCSI_SYM53C8XX
+ SYM53C8XX,
+#endif
+#ifdef CONFIG_SCSI_NCR53C8XX
+ NCR53C8XX,
+#endif
+#ifdef CONFIG_SCSI_EATA_DMA
+ EATA_DMA,
+#endif
+#ifdef CONFIG_SCSI_EATA_PIO
+ EATA_PIO,
+#endif
+#ifdef CONFIG_SCSI_7000FASST
+ WD7000,
+#endif
+#ifdef CONFIG_SCSI_EATA
+ EATA,
+#endif
+#ifdef CONFIG_SCSI_AM53C974
+ AM53C974,
+#endif
+#ifdef CONFIG_SCSI_MEGARAID
+ MEGARAID,
+#endif
+#ifdef CONFIG_SCSI_PPA
+ PPA,
+#endif
+#ifdef CONFIG_SCSI_SUNESP
+ SCSI_SPARC_ESP,
+#endif
+#ifdef CONFIG_SCSI_GDTH
+ GDTH,
+#endif
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ IDESCSI,
+#endif
+#ifdef CONFIG_SCSI_DEBUG
+ SCSI_DEBUG,
+#endif
+};
+
+#define MAX_SCSI_HOSTS (sizeof(builtin_scsi_hosts) / sizeof(Scsi_Host_Template))
+
+
+/*
+ * Our semaphores and timeout counters, where size depends on
+ * MAX_SCSI_HOSTS here.
+ */
+
+struct Scsi_Host * scsi_hostlist = NULL;
+struct Scsi_Device_Template * scsi_devicelist = NULL;
+
+int max_scsi_hosts = 0;
+int next_scsi_host = 0;
+
+void
+scsi_unregister(struct Scsi_Host * sh){
+ struct Scsi_Host * shpnt;
+
+ if(scsi_hostlist == sh)
+ scsi_hostlist = sh->next;
+ else {
+ shpnt = scsi_hostlist;
+ while(shpnt->next != sh) shpnt = shpnt->next;
+ shpnt->next = shpnt->next->next;
+ }
+
+ /* If we are removing the last host registered, it is safe to reuse
+ * its host number (this avoids "holes" at boot time) (DB)
+ * It is also safe to reuse those of numbers directly below which have
+ * been released earlier (to avoid some holes in numbering).
+ */
+ if(sh->host_no == max_scsi_hosts - 1) {
+ while(--max_scsi_hosts >= next_scsi_host) {
+ shpnt = scsi_hostlist;
+ while(shpnt && shpnt->host_no != max_scsi_hosts - 1)
+ shpnt = shpnt->next;
+ if(shpnt)
+ break;
+ }
+ }
+ next_scsi_host--;
+ scsi_init_free((char *) sh, sizeof(struct Scsi_Host) + sh->extra_bytes);
+}
+
+/* We call this when we come across a new host adapter. We only do this
+ * once we are 100% sure that we want to use this host adapter - it is a
+ * pain to reverse this, so we try to avoid it
+ */
+
+struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){
+ struct Scsi_Host * retval, *shpnt;
+ retval = (struct Scsi_Host *)scsi_init_malloc(sizeof(struct Scsi_Host) + j,
+ (tpnt->unchecked_isa_dma && j ? GFP_DMA : 0) | GFP_ATOMIC);
+ retval->host_busy = 0;
+ retval->block = NULL;
+ retval->wish_block = 0;
+ if(j > 0xffff) panic("Too many extra bytes requested\n");
+ retval->extra_bytes = j;
+ retval->loaded_as_module = scsi_loadable_module_flag;
+ retval->host_no = max_scsi_hosts++; /* never reuse host_no (DB) */
+ next_scsi_host++;
+ retval->host_queue = NULL;
+ retval->host_wait = NULL;
+ retval->last_reset = 0;
+ retval->irq = 0;
+ retval->dma_channel = 0xff;
+
+ /* These three are default values which can be overridden */
+ retval->max_channel = 0;
+ retval->max_id = 8;
+ retval->max_lun = 8;
+
+ retval->unique_id = 0;
+ retval->io_port = 0;
+ retval->hostt = tpnt;
+ retval->next = NULL;
+#ifdef DEBUG
+ printk("Register %x %x: %d\n", (int)retval, (int)retval->hostt, j);
+#endif
+
+ /* The next six are the default values which can be overridden
+ * if need be */
+ retval->this_id = tpnt->this_id;
+ retval->can_queue = tpnt->can_queue;
+ retval->sg_tablesize = tpnt->sg_tablesize;
+ retval->cmd_per_lun = tpnt->cmd_per_lun;
+ retval->unchecked_isa_dma = tpnt->unchecked_isa_dma;
+ retval->use_clustering = tpnt->use_clustering;
+
+ retval->select_queue_depths = NULL;
+
+ if(!scsi_hostlist)
+ scsi_hostlist = retval;
+ else
+ {
+ shpnt = scsi_hostlist;
+ while(shpnt->next) shpnt = shpnt->next;
+ shpnt->next = retval;
+ }
+
+ return retval;
+}
+
+int
+scsi_register_device(struct Scsi_Device_Template * sdpnt)
+{
+ if(sdpnt->next) panic("Device already registered");
+ sdpnt->next = scsi_devicelist;
+ scsi_devicelist = sdpnt;
+ return 0;
+}
+
+unsigned int scsi_init()
+{
+ static int called = 0;
+ int i, pcount;
+ Scsi_Host_Template * tpnt;
+ struct Scsi_Host * shpnt;
+ const char * name;
+
+ if(called) return 0;
+
+ called = 1;
+ for (tpnt = &builtin_scsi_hosts[0], i = 0; i < MAX_SCSI_HOSTS; ++i, tpnt++)
+ {
+ /*
+ * Initialize our semaphores. -1 is interpreted to mean
+ * "inactive" - where as 0 will indicate a time out condition.
+ */
+ printk("\rprobing scsi %d/%d: %s \e[K", tpnt-builtin_scsi_hosts, MAX_SCSI_HOSTS, tpnt->name);
+
+ pcount = next_scsi_host;
+ if ((tpnt->detect) &&
+ (tpnt->present =
+ tpnt->detect(tpnt)))
+ {
+ /* The only time this should come up is when people use
+ * some kind of patched driver of some kind or another. */
+ if(pcount == next_scsi_host) {
+ if(tpnt->present > 1)
+ panic("Failure to register low-level scsi driver");
+ /* The low-level driver failed to register a driver. We
+ * can do this now. */
+ scsi_register(tpnt,0);
+ }
+ tpnt->next = scsi_hosts;
+ scsi_hosts = tpnt;
+
+ /* Add the driver to /proc/scsi */
+#if CONFIG_PROC_FS
+ build_proc_dir_entries(tpnt);
+#endif
+ }
+ }
+ printk("\ndone\n");
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ {
+ if(shpnt->hostt->info)
+ name = shpnt->hostt->info(shpnt);
+ else
+ name = shpnt->hostt->name;
+ printk ("scsi%d : %s\n", /* And print a little message */
+ shpnt->host_no, name);
+ }
+
+ printk ("scsi : %d host%s.\n", next_scsi_host,
+ (next_scsi_host == 1) ? "" : "s");
+
+ scsi_make_blocked_list();
+
+ /* Now attach the high level drivers */
+#ifdef CONFIG_BLK_DEV_SD
+ scsi_register_device(&sd_template);
+#endif
+#ifdef CONFIG_BLK_DEV_SR
+ scsi_register_device(&sr_template);
+#endif
+#ifdef CONFIG_CHR_DEV_ST
+ scsi_register_device(&st_template);
+#endif
+#ifdef CONFIG_CHR_DEV_SG
+ scsi_register_device(&sg_template);
+#endif
+
+#if 0
+ max_scsi_hosts = next_scsi_host;
+#endif
+ return 0;
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/hosts.h b/linux/src/drivers/scsi/hosts.h
new file mode 100644
index 0000000..8f3f4e0
--- /dev/null
+++ b/linux/src/drivers/scsi/hosts.h
@@ -0,0 +1,405 @@
+/*
+ * hosts.h Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * mid to low-level SCSI driver interface header
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Further modified by Eric Youngdale to support multiple host adapters
+ * of the same type.
+ */
+
+#ifndef _HOSTS_H
+#define _HOSTS_H
+
+/*
+ $Header: cvs/gnumach/linux/src/drivers/scsi/Attic/hosts.h,v 1.1 1999/04/26 05:54:41 tb Exp $
+*/
+
+#include <linux/proc_fs.h>
+
+/* It is senseless to set SG_ALL any higher than this - the performance
+ * does not get any better, and it wastes memory
+ */
+#define SG_NONE 0
+#define SG_ALL 0xff
+
+#define DISABLE_CLUSTERING 0
+#define ENABLE_CLUSTERING 1
+
+/* The various choices mean:
+ * NONE: Self evident. Host adapter is not capable of scatter-gather.
+ * ALL: Means that the host adapter module can do scatter-gather,
+ * and that there is no limit to the size of the table to which
+ * we scatter/gather data.
+ * Anything else: Indicates the maximum number of chains that can be
+ * used in one scatter-gather request.
+ */
+
+/*
+ * The Scsi_Host_Template type has all that is needed to interface with a SCSI
+ * host in a device independent matter. There is one entry for each different
+ * type of host adapter that is supported on the system.
+ */
+
+typedef struct scsi_disk Disk;
+
+typedef struct SHT
+{
+
+ /* Used with loadable modules so we can construct a linked list. */
+ struct SHT * next;
+
+ /* Used with loadable modules so that we know when it is safe to unload */
+ long * usage_count;
+
+ /* The pointer to the /proc/scsi directory entry */
+ struct proc_dir_entry *proc_dir;
+
+ /* proc-fs info function.
+ * Can be used to export driver statistics and other infos to the world
+ * outside the kernel ie. userspace and it also provides an interface
+ * to feed the driver with information. Check eata_dma_proc.c for reference
+ */
+ int (*proc_info)(char *, char **, off_t, int, int, int);
+
+ /*
+ * The name pointer is a pointer to the name of the SCSI
+ * device detected.
+ */
+ const char *name;
+
+ /*
+ * The detect function shall return non zero on detection,
+ * indicating the number of host adapters of this particular
+ * type were found. It should also
+ * initialize all data necessary for this particular
+ * SCSI driver. It is passed the host number, so this host
+ * knows where the first entry is in the scsi_hosts[] array.
+ *
+ * Note that the detect routine MUST not call any of the mid level
+ * functions to queue commands because things are not guaranteed
+ * to be set up yet. The detect routine can send commands to
+ * the host adapter as long as the program control will not be
+ * passed to scsi.c in the processing of the command. Note
+ * especially that scsi_malloc/scsi_free must not be called.
+ */
+ int (* detect)(struct SHT *);
+
+ /* Used with loadable modules to unload the host structures. Note:
+ * there is a default action built into the modules code which may
+ * be sufficient for most host adapters. Thus you may not have to supply
+ * this at all.
+ */
+ int (*release)(struct Scsi_Host *);
+
+ /*
+ * The info function will return whatever useful
+ * information the developer sees fit. If not provided, then
+ * the name field will be used instead.
+ */
+ const char *(* info)(struct Scsi_Host *);
+
+ /*
+ * The command function takes a target, a command (this is a SCSI
+ * command formatted as per the SCSI spec, nothing strange), a
+ * data buffer pointer, and data buffer length pointer. The return
+ * is a status int, bit fielded as follows :
+ * Byte What
+ * 0 SCSI status code
+ * 1 SCSI 1 byte message
+ * 2 host error return.
+ * 3 mid level error return
+ */
+ int (* command)(Scsi_Cmnd *);
+
+ /*
+ * The QueueCommand function works in a similar manner
+ * to the command function. It takes an additional parameter,
+ * void (* done)(int host, int code) which is passed the host
+ * # and exit result when the command is complete.
+ * Host number is the POSITION IN THE hosts array of THIS
+ * host adapter.
+ */
+ int (* queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+
+ /*
+ * Since the mid level driver handles time outs, etc, we want to
+ * be able to abort the current command. Abort returns 0 if the
+ * abortion was successful. The field SCpnt->abort reason
+ * can be filled in with the appropriate reason why we wanted
+ * the abort in the first place, and this will be used
+ * in the mid-level code instead of the host_byte().
+ * If non-zero, the code passed to it
+ * will be used as the return code, otherwise
+ * DID_ABORT should be returned.
+ *
+ * Note that the scsi driver should "clean up" after itself,
+ * resetting the bus, etc. if necessary.
+ */
+ int (* abort)(Scsi_Cmnd *);
+
+ /*
+ * The reset function will reset the SCSI bus. Any executing
+ * commands should fail with a DID_RESET in the host byte.
+ * The Scsi_Cmnd is passed so that the reset routine can figure
+ * out which host adapter should be reset, and also which command
+ * within the command block was responsible for the reset in
+ * the first place. Some hosts do not implement a reset function,
+ * and these hosts must call scsi_request_sense(SCpnt) to keep
+ * the command alive.
+ */
+ int (* reset)(Scsi_Cmnd *, unsigned int);
+
+ /*
+ * This function is used to select synchronous communications,
+ * which will result in a higher data throughput. Not implemented
+ * yet.
+ */
+ int (* slave_attach)(int, int);
+
+ /*
+ * This function determines the bios parameters for a given
+ * harddisk. These tend to be numbers that are made up by
+ * the host adapter. Parameters:
+ * size, device number, list (heads, sectors, cylinders)
+ */
+ int (* bios_param)(Disk *, kdev_t, int []);
+
+ /*
+ * This determines if we will use a non-interrupt driven
+ * or an interrupt driven scheme, It is set to the maximum number
+ * of simultaneous commands a given host adapter will accept.
+ */
+ int can_queue;
+
+ /*
+ * In many instances, especially where disconnect / reconnect are
+ * supported, our host also has an ID on the SCSI bus. If this is
+ * the case, then it must be reserved. Please set this_id to -1 if
+ * your setup is in single initiator mode, and the host lacks an
+ * ID.
+ */
+ int this_id;
+
+ /*
+ * This determines the degree to which the host adapter is capable
+ * of scatter-gather.
+ */
+ short unsigned int sg_tablesize;
+
+ /*
+ * True if this host adapter can make good use of linked commands.
+ * This will allow more than one command to be queued to a given
+ * unit on a given host. Set this to the maximum number of command
+ * blocks to be provided for each device. Set this to 1 for one
+ * command block per lun, 2 for two, etc. Do not set this to 0.
+ * You should make sure that the host adapter will do the right thing
+ * before you try setting this above 1.
+ */
+ short cmd_per_lun;
+
+ /*
+ * present contains counter indicating how many boards of this
+ * type were found when we did the scan.
+ */
+ unsigned char present;
+
+ /*
+ * true if this host adapter uses unchecked DMA onto an ISA bus.
+ */
+ unsigned unchecked_isa_dma:1;
+
+ /*
+ * true if this host adapter can make good use of clustering.
+ * I originally thought that if the tablesize was large that it
+ * was a waste of CPU cycles to prepare a cluster list, but
+ * it works out that the Buslogic is faster if you use a smaller
+ * number of segments (i.e. use clustering). I guess it is
+ * inefficient.
+ */
+ unsigned use_clustering:1;
+
+} Scsi_Host_Template;
+
+/*
+ * The scsi_hosts array is the array containing the data for all
+ * possible <supported> scsi hosts. This is similar to the
+ * Scsi_Host_Template, except that we have one entry for each
+ * actual physical host adapter on the system, stored as a linked
+ * list. Note that if there are 2 aha1542 boards, then there will
+ * be two Scsi_Host entries, but only 1 Scsi_Host_Template entry.
+ */
+
+struct Scsi_Host
+{
+ struct Scsi_Host * next;
+ unsigned short extra_bytes;
+ volatile unsigned char host_busy;
+ char host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
+ unsigned long last_reset;
+ struct wait_queue *host_wait;
+ Scsi_Cmnd *host_queue;
+ Scsi_Host_Template * hostt;
+
+ /*
+ * These three parameters can be used to allow for wide scsi,
+ * and for host adapters that support multiple busses
+ * The first two should be set to 1 more than the actual max id
+ * or lun (i.e. 8 for normal systems).
+ */
+ unsigned int max_id;
+ unsigned int max_lun;
+ unsigned int max_channel;
+
+ /*
+ * Pointer to a circularly linked list - this indicates the hosts
+ * that should be locked out of performing I/O while we have an active
+ * command on this host.
+ */
+ struct Scsi_Host * block;
+ unsigned wish_block:1;
+
+ /* These parameters should be set by the detect routine */
+ unsigned char *base;
+ unsigned int io_port;
+ unsigned char n_io_port;
+ unsigned char irq;
+ unsigned char dma_channel;
+
+ /*
+ * This is a unique identifier that must be assigned so that we
+ * have some way of identifying each detected host adapter properly
+ * and uniquely. For hosts that do not support more than one card
+ * in the system at one time, this does not need to be set. It is
+ * initialized to 0 in scsi_register.
+ */
+ unsigned int unique_id;
+
+ /*
+ * The rest can be copied from the template, or specifically
+ * initialized, as required.
+ */
+
+ int this_id;
+ int can_queue;
+ short cmd_per_lun;
+ short unsigned int sg_tablesize;
+ unsigned unchecked_isa_dma:1;
+ unsigned use_clustering:1;
+ /*
+ * True if this host was loaded as a loadable module
+ */
+ unsigned loaded_as_module:1;
+
+ void (*select_queue_depths)(struct Scsi_Host *, Scsi_Device *);
+
+ unsigned long hostdata[0]; /* Used for storage of host specific stuff */
+};
+
+extern struct Scsi_Host * scsi_hostlist;
+extern struct Scsi_Device_Template * scsi_devicelist;
+
+extern Scsi_Host_Template * scsi_hosts;
+
+extern void build_proc_dir_entries(Scsi_Host_Template *);
+
+
+/*
+ * scsi_init initializes the scsi hosts.
+ */
+
+/*
+ * We use these goofy things because the MM is not set up when we init
+ * the scsi subsystem. By using these functions we can write code that
+ * looks normal. Also, it makes it possible to use the same code for a
+ * loadable module.
+ */
+
+extern void * scsi_init_malloc(unsigned int size, int priority);
+extern void scsi_init_free(char * ptr, unsigned int size);
+
+extern int next_scsi_host;
+
+extern int scsi_loadable_module_flag;
+unsigned int scsi_init(void);
+extern struct Scsi_Host * scsi_register(Scsi_Host_Template *, int j);
+extern void scsi_unregister(struct Scsi_Host * i);
+
+#define BLANK_HOST {"", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+struct Scsi_Device_Template
+{
+ struct Scsi_Device_Template * next;
+ const char * name;
+ const char * tag;
+ long * usage_count; /* Used for loadable modules */
+ unsigned char scsi_type;
+ unsigned char major;
+ unsigned char nr_dev; /* Number currently attached */
+ unsigned char dev_noticed; /* Number of devices detected. */
+ unsigned char dev_max; /* Current size of arrays */
+ unsigned blk:1; /* 0 if character device */
+ int (*detect)(Scsi_Device *); /* Returns 1 if we can attach this device */
+ int (*init)(void); /* Sizes arrays based upon number of devices
+ * detected */
+ void (*finish)(void); /* Perform initialization after attachment */
+ int (*attach)(Scsi_Device *); /* Attach devices to arrays */
+ void (*detach)(Scsi_Device *);
+};
+
+extern struct Scsi_Device_Template sd_template;
+extern struct Scsi_Device_Template st_template;
+extern struct Scsi_Device_Template sr_template;
+extern struct Scsi_Device_Template sg_template;
+
+int scsi_register_device(struct Scsi_Device_Template * sdpnt);
+
+/* These are used by loadable modules */
+extern int scsi_register_module(int, void *);
+extern void scsi_unregister_module(int, void *);
+
+/* The different types of modules that we can load and unload */
+#define MODULE_SCSI_HA 1
+#define MODULE_SCSI_CONST 2
+#define MODULE_SCSI_IOCTL 3
+#define MODULE_SCSI_DEV 4
+
+
+/*
+ * This is an ugly hack. If we expect to be able to load devices at run time,
+ * we need to leave extra room in some of the data structures. Doing a
+ * realloc to enlarge the structures would be riddled with race conditions,
+ * so until a better solution is discovered, we use this crude approach
+ */
+#define SD_EXTRA_DEVS 2
+#define ST_EXTRA_DEVS 2
+#define SR_EXTRA_DEVS 2
+#define SG_EXTRA_DEVS (SD_EXTRA_DEVS + SR_EXTRA_DEVS + ST_EXTRA_DEVS)
+
+#endif
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/in2000.c b/linux/src/drivers/scsi/in2000.c
new file mode 100644
index 0000000..aaa260c
--- /dev/null
+++ b/linux/src/drivers/scsi/in2000.c
@@ -0,0 +1,2379 @@
+/*
+ * in2000.c - Linux device driver for the
+ * Always IN2000 ISA SCSI card.
+ *
+ * Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ * john@geolog.com
+ * jshiffle@netcom.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Drew Eckhardt's excellent 'Generic NCR5380' sources provided
+ * much of the inspiration and some of the code for this driver.
+ * The Linux IN2000 driver distributed in the Linux kernels through
+ * version 1.2.13 was an extremely valuable reference on the arcane
+ * (and still mysterious) workings of the IN2000's fifo. It also
+ * is where I lifted in2000_biosparam(), the gist of the card
+ * detection scheme, and other bits of code. Many thanks to the
+ * talented and courageous people who wrote, contributed to, and
+ * maintained that driver (including Brad McLean, Shaun Savage,
+ * Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey,
+ * Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric
+ * Youngdale). I should also mention the driver written by
+ * Hamish Macdonald for the (GASP!) Amiga A2091 card, included
+ * in the Linux-m68k distribution; it gave me a good initial
+ * understanding of the proper way to run a WD33c93 chip, and I
+ * ended up stealing lots of code from it.
+ *
+ * _This_ driver is (I feel) an improvement over the old one in
+ * several respects:
+ * - All problems relating to the data size of a SCSI request are
+ * gone (as far as I know). The old driver couldn't handle
+ * swapping to partitions because that involved 4k blocks, nor
+ * could it deal with the st.c tape driver unmodified, because
+ * that usually involved 4k - 32k blocks. The old driver never
+ * quite got away from a morbid dependence on 2k block sizes -
+ * which of course is the size of the card's fifo.
+ *
+ * - Target Disconnection/Reconnection is now supported. Any
+ * system with more than one device active on the SCSI bus
+ * will benefit from this. The driver defaults to what I'm
+ * calling 'adaptive disconnect' - meaning that each command
+ * is evaluated individually as to whether or not it should
+ * be run with the option to disconnect/reselect (if the
+ * device chooses), or as a "SCSI-bus-hog".
+ *
+ * - Synchronous data transfers are now supported. Because there
+ * are a few devices (and many improperly terminated systems)
+ * that choke when doing sync, the default is sync DISABLED
+ * for all devices. This faster protocol can (and should!)
+ * be enabled on selected devices via the command-line.
+ *
+ * - Runtime operating parameters can now be specified through
+ * either the LILO or the 'insmod' command line. For LILO do:
+ * "in2000=blah,blah,blah"
+ * and with insmod go like:
+ * "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah"
+ * The defaults should be good for most people. See the comment
+ * for 'setup_strings' below for more details.
+ *
+ * - The old driver relied exclusively on what the Western Digital
+ * docs call "Combination Level 2 Commands", which are a great
+ * idea in that the CPU is relieved of a lot of interrupt
+ * overhead. However, by accepting a certain (user-settable)
+ * amount of additional interrupts, this driver achieves
+ * better control over the SCSI bus, and data transfers are
+ * almost as fast while being much easier to define, track,
+ * and debug.
+ *
+ * - You can force detection of a card whose BIOS has been disabled.
+ *
+ * - Multiple IN2000 cards might almost be supported. I've tried to
+ * keep it in mind, but have no way to test...
+ *
+ *
+ * TODO:
+ * tagged queuing. multiple cards.
+ *
+ *
+ * NOTE:
+ * When using this or any other SCSI driver as a module, you'll
+ * find that with the stock kernel, at most _two_ SCSI hard
+ * drives will be linked into the device list (ie, usable).
+ * If your IN2000 card has more than 2 disks on its bus, you
+ * might want to change the define of 'SD_EXTRA_DEVS' in the
+ * 'hosts.h' file from 2 to whatever is appropriate. It took
+ * me a while to track down this surprisingly obscure and
+ * undocumented little "feature".
+ *
+ *
+ * People with bug reports, wish-lists, complaints, comments,
+ * or improvements are asked to pah-leeez email me (John Shifflett)
+ * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get
+ * this thing into as good a shape as possible, and I'm positive
+ * there are lots of lurking bugs and "Stupid Places".
+ *
+ */
+
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+
+#include <linux/blk.h>
+#include <linux/stat.h>
+
+#include "scsi.h"
+#include "sd.h"
+#include "hosts.h"
+
+#define IN2000_VERSION "1.33"
+#define IN2000_DATE "26/August/1998"
+
+#include "in2000.h"
+
+
+/*
+ * 'setup_strings' is a single string used to pass operating parameters and
+ * settings from the kernel/module command-line to the driver. 'setup_args[]'
+ * is an array of strings that define the compile-time default values for
+ * these settings. If Linux boots with a LILO or insmod command-line, those
+ * settings are combined with 'setup_args[]'. Note that LILO command-lines
+ * are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix.
+ * The driver recognizes the following keywords (lower case required) and
+ * arguments:
+ *
+ * - ioport:addr -Where addr is IO address of a (usually ROM-less) card.
+ * - noreset -No optional args. Prevents SCSI bus reset at boot time.
+ * - nosync:x -x is a bitmask where the 1st 7 bits correspond with
+ * the 7 possible SCSI devices (bit 0 for device #0, etc).
+ * Set a bit to PREVENT sync negotiation on that device.
+ * The driver default is sync DISABLED on all devices.
+ * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer
+ * period. Default is 500; acceptable values are 250 - 1000.
+ * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them.
+ * x = 1 does 'adaptive' disconnects, which is the default
+ * and generally the best choice.
+ * - debug:x -If 'DEBUGGING_ON' is defined, x is a bitmask that causes
+ * various types of debug output to printed - see the DB_xxx
+ * defines in in2000.h
+ * - proc:x -If 'PROC_INTERFACE' is defined, x is a bitmask that
+ * determines how the /proc interface works and what it
+ * does - see the PR_xxx defines in in2000.h
+ *
+ * Syntax Notes:
+ * - Numeric arguments can be decimal or the '0x' form of hex notation. There
+ * _must_ be a colon between a keyword and its numeric argument, with no
+ * spaces.
+ * - Keywords are separated by commas, no spaces, in the standard kernel
+ * command-line manner.
+ * - A keyword in the 'nth' comma-separated command-line member will overwrite
+ * the 'nth' element of setup_args[]. A blank command-line member (in
+ * other words, a comma with no preceding keyword) will _not_ overwrite
+ * the corresponding setup_args[] element.
+ *
+ * A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'):
+ * - in2000=ioport:0x220,noreset
+ * - in2000=period:250,disconnect:2,nosync:0x03
+ * - in2000=debug:0x1e
+ * - in2000=proc:3
+ */
+
+/* Normally, no defaults are specified... */
+static char *setup_args[] =
+ {"","","","","","","","",""};
+
+/* filled in by 'insmod' */
+static char *setup_strings = 0;
+
+#ifdef MODULE_PARM
+MODULE_PARM(setup_strings, "s");
+#endif
+
+
+static struct Scsi_Host *instance_list = 0;
+
+
+
+static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
+{
+ write1_io(reg_num,IO_WD_ADDR);
+ return read1_io(IO_WD_DATA);
+}
+
+
+#define READ_AUX_STAT() read1_io(IO_WD_ASR)
+
+
+static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value)
+{
+ write1_io(reg_num,IO_WD_ADDR);
+ write1_io(value,IO_WD_DATA);
+}
+
+
+static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd)
+{
+/* while (READ_AUX_STAT() & ASR_CIP)
+ printk("|");*/
+ write1_io(WD_COMMAND,IO_WD_ADDR);
+ write1_io(cmd,IO_WD_DATA);
+}
+
+
+static uchar read_1_byte(struct IN2000_hostdata *hostdata)
+{
+uchar asr, x = 0;
+
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393_cmd(hostdata,WD_CMD_TRANS_INFO|0x80);
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ x = read_3393(hostdata,WD_DATA);
+ } while (!(asr & ASR_INT));
+ return x;
+}
+
+
+static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value)
+{
+ write1_io(WD_TRANSFER_COUNT_MSB,IO_WD_ADDR);
+ write1_io((value >> 16),IO_WD_DATA);
+ write1_io((value >> 8),IO_WD_DATA);
+ write1_io(value,IO_WD_DATA);
+}
+
+
+static unsigned long read_3393_count(struct IN2000_hostdata *hostdata)
+{
+unsigned long value;
+
+ write1_io(WD_TRANSFER_COUNT_MSB,IO_WD_ADDR);
+ value = read1_io(IO_WD_DATA) << 16;
+ value |= read1_io(IO_WD_DATA) << 8;
+ value |= read1_io(IO_WD_DATA);
+ return value;
+}
+
+
+/* The 33c93 needs to be told which direction a command transfers its
+ * data; we use this function to figure it out. Returns true if there
+ * will be a DATA_OUT phase with this command, false otherwise.
+ * (Thanks to Joerg Dorchain for the research and suggestion.)
+ */
+static int is_dir_out(Scsi_Cmnd *cmd)
+{
+ switch (cmd->cmnd[0]) {
+ case WRITE_6: case WRITE_10: case WRITE_12:
+ case WRITE_LONG: case WRITE_SAME: case WRITE_BUFFER:
+ case WRITE_VERIFY: case WRITE_VERIFY_12:
+ case COMPARE: case COPY: case COPY_VERIFY:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case SEARCH_EQUAL_12: case SEARCH_HIGH_12: case SEARCH_LOW_12:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case MODE_SELECT: case MODE_SELECT_10: case LOG_SELECT:
+ case SEND_DIAGNOSTIC: case CHANGE_DEFINITION: case UPDATE_BLOCK:
+ case SET_WINDOW: case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+
+static struct sx_period sx_table[] = {
+ { 1, 0x20},
+ {252, 0x20},
+ {376, 0x30},
+ {500, 0x40},
+ {624, 0x50},
+ {752, 0x60},
+ {876, 0x70},
+ {1000,0x00},
+ {0, 0} };
+
+static int round_period(unsigned int period)
+{
+int x;
+
+ for (x=1; sx_table[x].period_ns; x++) {
+ if ((period <= sx_table[x-0].period_ns) &&
+ (period > sx_table[x-1].period_ns)) {
+ return x;
+ }
+ }
+ return 7;
+}
+
+static uchar calc_sync_xfer(unsigned int period, unsigned int offset)
+{
+uchar result;
+
+ period *= 4; /* convert SDTR code to ns */
+ result = sx_table[round_period(period)].reg_value;
+ result |= (offset < OPTIMUM_SX_OFF)?offset:OPTIMUM_SX_OFF;
+ return result;
+}
+
+
+
+static void in2000_execute(struct Scsi_Host *instance);
+
+int in2000_queuecommand (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+{
+struct IN2000_hostdata *hostdata;
+Scsi_Cmnd *tmp;
+unsigned long flags;
+
+ hostdata = (struct IN2000_hostdata *)cmd->host->hostdata;
+
+DB(DB_QUEUE_COMMAND,printk("Q-%d-%02x-%ld(",cmd->target,cmd->cmnd[0],cmd->pid))
+
+/* Set up a few fields in the Scsi_Cmnd structure for our own use:
+ * - host_scribble is the pointer to the next cmd in the input queue
+ * - scsi_done points to the routine we call when a cmd is finished
+ * - result is what you'd expect
+ */
+
+ cmd->host_scribble = NULL;
+ cmd->scsi_done = done;
+ cmd->result = 0;
+
+/* We use the Scsi_Pointer structure that's included with each command
+ * as a scratchpad (as it's intended to be used!). The handy thing about
+ * the SCp.xxx fields is that they're always associated with a given
+ * cmd, and are preserved across disconnect-reselect. This means we
+ * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages
+ * if we keep all the critical pointers and counters in SCp:
+ * - SCp.ptr is the pointer into the RAM buffer
+ * - SCp.this_residual is the size of that buffer
+ * - SCp.buffer points to the current scatter-gather buffer
+ * - SCp.buffers_residual tells us how many S.G. buffers there are
+ * - SCp.have_data_in helps keep track of >2048 byte transfers
+ * - SCp.sent_command is not used
+ * - SCp.phase records this command's SRCID_ER bit setting
+ */
+
+ if (cmd->use_sg) {
+ cmd->SCp.buffer = (struct scatterlist *)cmd->buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ cmd->SCp.ptr = (char *)cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ }
+ else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *)cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen;
+ }
+ cmd->SCp.have_data_in = 0;
+
+/* We don't set SCp.phase here - that's done in in2000_execute() */
+
+/* WD docs state that at the conclusion of a "LEVEL2" command, the
+ * status byte can be retrieved from the LUN register. Apparently,
+ * this is the case only for *uninterrupted* LEVEL2 commands! If
+ * there are any unexpected phases entered, even if they are 100%
+ * legal (different devices may choose to do things differently),
+ * the LEVEL2 command sequence is exited. This often occurs prior
+ * to receiving the status byte, in which case the driver does a
+ * status phase interrupt and gets the status byte on its own.
+ * While such a command can then be "resumed" (ie restarted to
+ * finish up as a LEVEL2 command), the LUN register will NOT be
+ * a valid status byte at the command's conclusion, and we must
+ * use the byte obtained during the earlier interrupt. Here, we
+ * preset SCp.Status to an illegal value (0xff) so that when
+ * this command finally completes, we can tell where the actual
+ * status byte is stored.
+ */
+
+ cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
+
+/* We need to disable interrupts before messing with the input
+ * queue and calling in2000_execute().
+ */
+
+ save_flags(flags);
+ cli();
+
+ /*
+ * Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE
+ * commands are added to the head of the queue so that the desired
+ * sense data is not lost before REQUEST_SENSE executes.
+ */
+
+ if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ cmd->host_scribble = (uchar *)hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ }
+ else { /* find the end of the queue */
+ for (tmp=(Scsi_Cmnd *)hostdata->input_Q; tmp->host_scribble;
+ tmp=(Scsi_Cmnd *)tmp->host_scribble)
+ ;
+ tmp->host_scribble = (uchar *)cmd;
+ }
+
+/* We know that there's at least one command in 'input_Q' now.
+ * Go see if any of them are runnable!
+ */
+
+ in2000_execute(cmd->host);
+
+DB(DB_QUEUE_COMMAND,printk(")Q-%ld ",cmd->pid))
+
+ restore_flags(flags);
+ return 0;
+}
+
+
+
+/*
+ * This routine attempts to start a scsi command. If the host_card is
+ * already connected, we give up immediately. Otherwise, look through
+ * the input_Q, using the first command we find that's intended
+ * for a currently non-busy target/lun.
+ * Note that this function is always called with interrupts already
+ * disabled (either from in2000_queuecommand() or in2000_intr()).
+ */
+static void in2000_execute (struct Scsi_Host *instance)
+{
+struct IN2000_hostdata *hostdata;
+Scsi_Cmnd *cmd, *prev;
+int i;
+unsigned short *sp;
+unsigned short f;
+unsigned short flushbuf[16];
+
+
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+
+DB(DB_EXECUTE,printk("EX("))
+
+ if (hostdata->selecting || hostdata->connected) {
+
+DB(DB_EXECUTE,printk(")EX-0 "))
+
+ return;
+ }
+
+ /*
+ * Search through the input_Q for a command destined
+ * for an idle target/lun.
+ */
+
+ cmd = (Scsi_Cmnd *)hostdata->input_Q;
+ prev = 0;
+ while (cmd) {
+ if (!(hostdata->busy[cmd->target] & (1 << cmd->lun)))
+ break;
+ prev = cmd;
+ cmd = (Scsi_Cmnd *)cmd->host_scribble;
+ }
+
+ /* quit if queue empty or all possible targets are busy */
+
+ if (!cmd) {
+
+DB(DB_EXECUTE,printk(")EX-1 "))
+
+ return;
+ }
+
+ /* remove command from queue */
+
+ if (prev)
+ prev->host_scribble = cmd->host_scribble;
+ else
+ hostdata->input_Q = (Scsi_Cmnd *)cmd->host_scribble;
+
+#ifdef PROC_STATISTICS
+ hostdata->cmd_cnt[cmd->target]++;
+#endif
+
+/*
+ * Start the selection process
+ */
+
+ if (is_dir_out(cmd))
+ write_3393(hostdata,WD_DESTINATION_ID, cmd->target);
+ else
+ write_3393(hostdata,WD_DESTINATION_ID, cmd->target | DSTID_DPD);
+
+/* Now we need to figure out whether or not this command is a good
+ * candidate for disconnect/reselect. We guess to the best of our
+ * ability, based on a set of hierarchical rules. When several
+ * devices are operating simultaneously, disconnects are usually
+ * an advantage. In a single device system, or if only 1 device
+ * is being accessed, transfers usually go faster if disconnects
+ * are not allowed:
+ *
+ * + Commands should NEVER disconnect if hostdata->disconnect =
+ * DIS_NEVER (this holds for tape drives also), and ALWAYS
+ * disconnect if hostdata->disconnect = DIS_ALWAYS.
+ * + Tape drive commands should always be allowed to disconnect.
+ * + Disconnect should be allowed if disconnected_Q isn't empty.
+ * + Commands should NOT disconnect if input_Q is empty.
+ * + Disconnect should be allowed if there are commands in input_Q
+ * for a different target/lun. In this case, the other commands
+ * should be made disconnect-able, if not already.
+ *
+ * I know, I know - this code would flunk me out of any
+ * "C Programming 101" class ever offered. But it's easy
+ * to change around and experiment with for now.
+ */
+
+ cmd->SCp.phase = 0; /* assume no disconnect */
+ if (hostdata->disconnect == DIS_NEVER)
+ goto no;
+ if (hostdata->disconnect == DIS_ALWAYS)
+ goto yes;
+ if (cmd->device->type == 1) /* tape drive? */
+ goto yes;
+ if (hostdata->disconnected_Q) /* other commands disconnected? */
+ goto yes;
+ if (!(hostdata->input_Q)) /* input_Q empty? */
+ goto no;
+ for (prev=(Scsi_Cmnd *)hostdata->input_Q; prev;
+ prev=(Scsi_Cmnd *)prev->host_scribble) {
+ if ((prev->target != cmd->target) || (prev->lun != cmd->lun)) {
+ for (prev=(Scsi_Cmnd *)hostdata->input_Q; prev;
+ prev=(Scsi_Cmnd *)prev->host_scribble)
+ prev->SCp.phase = 1;
+ goto yes;
+ }
+ }
+ goto no;
+
+yes:
+ cmd->SCp.phase = 1;
+
+#ifdef PROC_STATISTICS
+ hostdata->disc_allowed_cnt[cmd->target]++;
+#endif
+
+no:
+ write_3393(hostdata,WD_SOURCE_ID,((cmd->SCp.phase)?SRCID_ER:0));
+
+ write_3393(hostdata,WD_TARGET_LUN, cmd->lun);
+ write_3393(hostdata,WD_SYNCHRONOUS_TRANSFER,hostdata->sync_xfer[cmd->target]);
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+
+ if ((hostdata->level2 <= L2_NONE) ||
+ (hostdata->sync_stat[cmd->target] == SS_UNSET)) {
+
+ /*
+ * Do a 'Select-With-ATN' command. This will end with
+ * one of the following interrupts:
+ * CSR_RESEL_AM: failure - can try again later.
+ * CSR_TIMEOUT: failure - give up.
+ * CSR_SELECT: success - proceed.
+ */
+
+ hostdata->selecting = cmd;
+
+/* Every target has its own synchronous transfer setting, kept in
+ * the sync_xfer array, and a corresponding status byte in sync_stat[].
+ * Each target's sync_stat[] entry is initialized to SS_UNSET, and its
+ * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET
+ * means that the parameters are undetermined as yet, and that we
+ * need to send an SDTR message to this device after selection is
+ * complete. We set SS_FIRST to tell the interrupt routine to do so,
+ * unless we don't want to even _try_ synchronous transfers: In this
+ * case we set SS_SET to make the defaults final.
+ */
+ if (hostdata->sync_stat[cmd->target] == SS_UNSET) {
+ if (hostdata->sync_off & (1 << cmd->target))
+ hostdata->sync_stat[cmd->target] = SS_SET;
+ else
+ hostdata->sync_stat[cmd->target] = SS_FIRST;
+ }
+ hostdata->state = S_SELECTING;
+ write_3393_count(hostdata,0); /* this guarantees a DATA_PHASE interrupt */
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN);
+ }
+
+ else {
+
+ /*
+ * Do a 'Select-With-ATN-Xfer' command. This will end with
+ * one of the following interrupts:
+ * CSR_RESEL_AM: failure - can try again later.
+ * CSR_TIMEOUT: failure - give up.
+ * anything else: success - proceed.
+ */
+
+ hostdata->connected = cmd;
+ write_3393(hostdata,WD_COMMAND_PHASE, 0);
+
+ /* copy command_descriptor_block into WD chip
+ * (take advantage of auto-incrementing)
+ */
+
+ write1_io(WD_CDB_1, IO_WD_ADDR);
+ for (i=0; i<cmd->cmd_len; i++)
+ write1_io(cmd->cmnd[i], IO_WD_DATA);
+
+ /* The wd33c93 only knows about Group 0, 1, and 5 commands when
+ * it's doing a 'select-and-transfer'. To be safe, we write the
+ * size of the CDB into the OWN_ID register for every case. This
+ * way there won't be problems with vendor-unique, audio, etc.
+ */
+
+ write_3393(hostdata, WD_OWN_ID, cmd->cmd_len);
+
+ /* When doing a non-disconnect command, we can save ourselves a DATA
+ * phase interrupt later by setting everything up now. With writes we
+ * need to pre-fill the fifo; if there's room for the 32 flush bytes,
+ * put them in there too - that'll avoid a fifo interrupt. Reads are
+ * somewhat simpler.
+ * KLUDGE NOTE: It seems that you can't completely fill the fifo here:
+ * This results in the IO_FIFO_COUNT register rolling over to zero,
+ * and apparently the gate array logic sees this as empty, not full,
+ * so the 3393 chip is never signalled to start reading from the
+ * fifo. Or maybe it's seen as a permanent fifo interrupt condition.
+ * Regardless, we fix this by temporarily pretending that the fifo
+ * is 16 bytes smaller. (I see now that the old driver has a comment
+ * about "don't fill completely" in an analogous place - must be the
+ * same deal.) This results in CDROM, swap partitions, and tape drives
+ * needing an extra interrupt per write command - I think we can live
+ * with that!
+ */
+
+ if (!(cmd->SCp.phase)) {
+ write_3393_count(hostdata, cmd->SCp.this_residual);
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
+ write1_io(0, IO_FIFO_WRITE); /* clear fifo counter, write mode */
+
+ if (is_dir_out(cmd)) {
+ hostdata->fifo = FI_FIFO_WRITING;
+ if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16) )
+ i = IN2000_FIFO_SIZE - 16;
+ cmd->SCp.have_data_in = i; /* this much data in fifo */
+ i >>= 1; /* Gulp. Assuming modulo 2. */
+ sp = (unsigned short *)cmd->SCp.ptr;
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_WRITE_IO
+
+ FAST_WRITE2_IO();
+#else
+ while (i--)
+ write2_io(*sp++,IO_FIFO);
+
+#endif
+
+ /* Is there room for the flush bytes? */
+
+ if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) {
+ sp = flushbuf;
+ i = 16;
+
+#ifdef FAST_WRITE_IO
+
+ FAST_WRITE2_IO();
+#else
+ while (i--)
+ write2_io(0,IO_FIFO);
+
+#endif
+
+ }
+ }
+
+ else {
+ write1_io(0, IO_FIFO_READ); /* put fifo in read mode */
+ hostdata->fifo = FI_FIFO_READING;
+ cmd->SCp.have_data_in = 0; /* nothing transfered yet */
+ }
+
+ }
+ else {
+ write_3393_count(hostdata,0); /* this guarantees a DATA_PHASE interrupt */
+ }
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ }
+
+ /*
+ * Since the SCSI bus can handle only 1 connection at a time,
+ * we get out of here now. If the selection fails, or when
+ * the command disconnects, we'll come back to this routine
+ * to search the input_Q again...
+ */
+
+DB(DB_EXECUTE,printk("%s%ld)EX-2 ",(cmd->SCp.phase)?"d:":"",cmd->pid))
+
+}
+
+
+
+static void transfer_pio(uchar *buf, int cnt,
+ int data_in_dir, struct IN2000_hostdata *hostdata)
+{
+uchar asr;
+
+DB(DB_TRANSFER,printk("(%p,%d,%s)",buf,cnt,data_in_dir?"in":"out"))
+
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393_count(hostdata,cnt);
+ write_3393_cmd(hostdata,WD_CMD_TRANS_INFO);
+ if (data_in_dir) {
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ *buf++ = read_3393(hostdata,WD_DATA);
+ } while (!(asr & ASR_INT));
+ }
+ else {
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ write_3393(hostdata,WD_DATA, *buf++);
+ } while (!(asr & ASR_INT));
+ }
+
+ /* Note: we are returning with the interrupt UN-cleared.
+ * Since (presumably) an entire I/O operation has
+ * completed, the bus phase is probably different, and
+ * the interrupt routine will discover this when it
+ * responds to the uncleared int.
+ */
+
+}
+
+
+
+static void transfer_bytes(Scsi_Cmnd *cmd, int data_in_dir)
+{
+struct IN2000_hostdata *hostdata;
+unsigned short *sp;
+unsigned short f;
+int i;
+
+ hostdata = (struct IN2000_hostdata *)cmd->host->hostdata;
+
+/* Normally, you'd expect 'this_residual' to be non-zero here.
+ * In a series of scatter-gather transfers, however, this
+ * routine will usually be called with 'this_residual' equal
+ * to 0 and 'buffers_residual' non-zero. This means that a
+ * previous transfer completed, clearing 'this_residual', and
+ * now we need to setup the next scatter-gather buffer as the
+ * source or destination for THIS transfer.
+ */
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+ }
+
+/* Set up hardware registers */
+
+ write_3393(hostdata,WD_SYNCHRONOUS_TRANSFER,hostdata->sync_xfer[cmd->target]);
+ write_3393_count(hostdata,cmd->SCp.this_residual);
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
+ write1_io(0,IO_FIFO_WRITE); /* zero counter, assume write */
+
+/* Reading is easy. Just issue the command and return - we'll
+ * get an interrupt later when we have actual data to worry about.
+ */
+
+ if (data_in_dir) {
+ write1_io(0,IO_FIFO_READ);
+ if ((hostdata->level2 >= L2_DATA) ||
+ (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
+ write_3393(hostdata,WD_COMMAND_PHASE,0x45);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ }
+ else
+ write_3393_cmd(hostdata,WD_CMD_TRANS_INFO);
+ hostdata->fifo = FI_FIFO_READING;
+ cmd->SCp.have_data_in = 0;
+ return;
+ }
+
+/* Writing is more involved - we'll start the WD chip and write as
+ * much data to the fifo as we can right now. Later interrupts will
+ * write any bytes that don't make it at this stage.
+ */
+
+ if ((hostdata->level2 >= L2_DATA) ||
+ (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
+ write_3393(hostdata,WD_COMMAND_PHASE,0x45);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ }
+ else
+ write_3393_cmd(hostdata,WD_CMD_TRANS_INFO);
+ hostdata->fifo = FI_FIFO_WRITING;
+ sp = (unsigned short *)cmd->SCp.ptr;
+
+ if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE)
+ i = IN2000_FIFO_SIZE;
+ cmd->SCp.have_data_in = i;
+ i >>= 1; /* Gulp. We assume this_residual is modulo 2 */
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_WRITE_IO
+
+ FAST_WRITE2_IO();
+#else
+ while (i--)
+ write2_io(*sp++,IO_FIFO);
+
+#endif
+
+}
+
+
+/* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this
+ * function in order to work in an SMP environment. (I'd be surprised
+ * if the driver is ever used by anyone on a real multi-CPU motherboard,
+ * but it _does_ need to be able to compile and run in an SMP kernel.)
+ */
+
+static void in2000_intr (int irqnum, void * dev_id, struct pt_regs *ptregs)
+{
+struct Scsi_Host *instance;
+struct IN2000_hostdata *hostdata;
+Scsi_Cmnd *patch, *cmd;
+uchar asr, sr, phs, id, lun, *ucp, msg;
+int i,j;
+unsigned long length;
+unsigned short *sp;
+unsigned short f;
+unsigned long flags;
+
+ for (instance = instance_list; instance; instance = instance->next) {
+ if (instance->irq == irqnum)
+ break;
+ }
+ if (!instance) {
+ printk("*** Hmm... interrupts are screwed up! ***\n");
+ return;
+ }
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+
+/* Get the spin_lock and disable further ints, for SMP */
+
+ CLISPIN_LOCK(flags);
+
+#ifdef PROC_STATISTICS
+ hostdata->int_cnt++;
+#endif
+
+/* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the
+ * WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined
+ * with a big logic array, so it's a little different than what you might
+ * expect). As far as I know, there's no reason that BOTH can't be active
+ * at the same time, but there's a problem: while we can read the 3393
+ * to tell if _it_ wants an interrupt, I don't know of a way to ask the
+ * fifo the same question. The best we can do is check the 3393 and if
+ * it _isn't_ the source of the interrupt, then we can be pretty sure
+ * that the fifo is the culprit.
+ * UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the
+ * IO_FIFO_COUNT register mirrors the fifo interrupt state. I
+ * assume that bit clear means interrupt active. As it turns
+ * out, the driver really doesn't need to check for this after
+ * all, so my remarks above about a 'problem' can safely be
+ * ignored. The way the logic is set up, there's no advantage
+ * (that I can see) to worrying about it.
+ *
+ * It seems that the fifo interrupt signal is negated when we extract
+ * bytes during read or write bytes during write.
+ * - fifo will interrupt when data is moving from it to the 3393, and
+ * there are 31 (or less?) bytes left to go. This is sort of short-
+ * sighted: what if you don't WANT to do more? In any case, our
+ * response is to push more into the fifo - either actual data or
+ * dummy bytes if need be. Note that we apparently have to write at
+ * least 32 additional bytes to the fifo after an interrupt in order
+ * to get it to release the ones it was holding on to - writing fewer
+ * than 32 will result in another fifo int.
+ * UPDATE: Again, info from Bill Earnest makes this more understandable:
+ * 32 bytes = two counts of the fifo counter register. He tells
+ * me that the fifo interrupt is a non-latching signal derived
+ * from a straightforward boolean interpretation of the 7
+ * highest bits of the fifo counter and the fifo-read/fifo-write
+ * state. Who'd a thought?
+ */
+
+ write1_io(0, IO_LED_ON);
+ asr = READ_AUX_STAT();
+ if (!(asr & ASR_INT)) { /* no WD33c93 interrupt? */
+
+/* Ok. This is definitely a FIFO-only interrupt.
+ *
+ * If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read,
+ * maybe more to come from the SCSI bus. Read as many as we can out of the
+ * fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and
+ * update have_data_in afterwards.
+ *
+ * If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move
+ * into the WD3393 chip (I think the interrupt happens when there are 31
+ * bytes left, but it may be fewer...). The 3393 is still waiting, so we
+ * shove some more into the fifo, which gets things moving again. If the
+ * original SCSI command specified more than 2048 bytes, there may still
+ * be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]).
+ * Don't forget to update have_data_in. If we've already written out the
+ * entire buffer, feed 32 dummy bytes to the fifo - they're needed to
+ * push out the remaining real data.
+ * (Big thanks to Bill Earnest for getting me out of the mud in here.)
+ */
+
+ cmd = (Scsi_Cmnd *)hostdata->connected; /* assume we're connected */
+CHECK_NULL(cmd,"fifo_int")
+
+ if (hostdata->fifo == FI_FIFO_READING) {
+
+DB(DB_FIFO,printk("{R:%02x} ",read1_io(IO_FIFO_COUNT)))
+
+ sp = (unsigned short *)(cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i = read1_io(IO_FIFO_COUNT) & 0xfe;
+ i <<= 2; /* # of words waiting in the fifo */
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_READ_IO
+
+ FAST_READ2_IO();
+#else
+ while (i--)
+ *sp++ = read2_io(IO_FIFO);
+
+#endif
+
+ i = sp - (unsigned short *)(cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i <<= 1;
+ cmd->SCp.have_data_in += i;
+ }
+
+ else if (hostdata->fifo == FI_FIFO_WRITING) {
+
+DB(DB_FIFO,printk("{W:%02x} ",read1_io(IO_FIFO_COUNT)))
+
+/* If all bytes have been written to the fifo, flush out the stragglers.
+ * Note that while writing 16 dummy words seems arbitrary, we don't
+ * have another choice that I can see. What we really want is to read
+ * the 3393 transfer count register (that would tell us how many bytes
+ * needed flushing), but the TRANSFER_INFO command hasn't completed
+ * yet (not enough bytes!) and that register won't be accessible. So,
+ * we use 16 words - a number obtained through trial and error.
+ * UPDATE: Bill says this is exactly what Always does, so there.
+ * More thanks due him for help in this section.
+ */
+
+ if (cmd->SCp.this_residual == cmd->SCp.have_data_in) {
+ i = 16;
+ while (i--) /* write 32 dummy bytes */
+ write2_io(0,IO_FIFO);
+ }
+
+/* If there are still bytes left in the SCSI buffer, write as many as we
+ * can out to the fifo.
+ */
+
+ else {
+ sp = (unsigned short *)(cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i = cmd->SCp.this_residual - cmd->SCp.have_data_in; /* bytes yet to go */
+ j = read1_io(IO_FIFO_COUNT) & 0xfe;
+ j <<= 2; /* how many words the fifo has room for */
+ if ((j << 1) > i)
+ j = (i >> 1);
+ while (j--)
+ write2_io(*sp++,IO_FIFO);
+
+ i = sp - (unsigned short *)(cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i <<= 1;
+ cmd->SCp.have_data_in += i;
+ }
+ }
+
+ else {
+ printk("*** Spurious FIFO interrupt ***");
+ }
+
+ write1_io(0, IO_LED_OFF);
+
+/* release the SMP spin_lock and restore irq state */
+ CLISPIN_UNLOCK(flags);
+ return;
+ }
+
+/* This interrupt was triggered by the WD33c93 chip. The fifo interrupt
+ * may also be asserted, but we don't bother to check it: we get more
+ * detailed info from FIFO_READING and FIFO_WRITING (see below).
+ */
+
+ cmd = (Scsi_Cmnd *)hostdata->connected; /* assume we're connected */
+ sr = read_3393(hostdata,WD_SCSI_STATUS); /* clear the interrupt */
+ phs = read_3393(hostdata,WD_COMMAND_PHASE);
+
+ if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) {
+ printk("\nNR:wd-intr-1\n");
+ write1_io(0, IO_LED_OFF);
+
+/* release the SMP spin_lock and restore irq state */
+ CLISPIN_UNLOCK(flags);
+ return;
+ }
+
+DB(DB_INTR,printk("{%02x:%02x-",asr,sr))
+
+/* After starting a FIFO-based transfer, the next _WD3393_ interrupt is
+ * guaranteed to be in response to the completion of the transfer.
+ * If we were reading, there's probably data in the fifo that needs
+ * to be copied into RAM - do that here. Also, we have to update
+ * 'this_residual' and 'ptr' based on the contents of the
+ * TRANSFER_COUNT register, in case the device decided to do an
+ * intermediate disconnect (a device may do this if it has to
+ * do a seek, or just to be nice and let other devices have
+ * some bus time during long transfers).
+ * After doing whatever is necessary with the fifo, we go on and
+ * service the WD3393 interrupt normally.
+ */
+
+ if (hostdata->fifo == FI_FIFO_READING) {
+
+/* buffer index = start-of-buffer + #-of-bytes-already-read */
+
+ sp = (unsigned short *)(cmd->SCp.ptr + cmd->SCp.have_data_in);
+
+/* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */
+
+ i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in;
+ i >>= 1; /* Gulp. We assume this will always be modulo 2 */
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_READ_IO
+
+ FAST_READ2_IO();
+#else
+ while (i--)
+ *sp++ = read2_io(IO_FIFO);
+
+#endif
+
+ hostdata->fifo = FI_FIFO_UNUSED;
+ length = cmd->SCp.this_residual;
+ cmd->SCp.this_residual = read_3393_count(hostdata);
+ cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+
+DB(DB_TRANSFER,printk("(%p,%d)",cmd->SCp.ptr,cmd->SCp.this_residual))
+
+ }
+
+ else if (hostdata->fifo == FI_FIFO_WRITING) {
+ hostdata->fifo = FI_FIFO_UNUSED;
+ length = cmd->SCp.this_residual;
+ cmd->SCp.this_residual = read_3393_count(hostdata);
+ cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+
+DB(DB_TRANSFER,printk("(%p,%d)",cmd->SCp.ptr,cmd->SCp.this_residual))
+
+ }
+
+/* Respond to the specific WD3393 interrupt - there are quite a few! */
+
+ switch (sr) {
+
+ case CSR_TIMEOUT:
+DB(DB_INTR,printk("TIMEOUT"))
+
+ if (hostdata->state == S_RUNNING_LEVEL2)
+ hostdata->connected = NULL;
+ else {
+ cmd = (Scsi_Cmnd *)hostdata->selecting; /* get a valid cmd */
+CHECK_NULL(cmd,"csr_timeout")
+ hostdata->selecting = NULL;
+ }
+
+ cmd->result = DID_NO_CONNECT << 16;
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->state = S_UNCONNECTED;
+ cmd->scsi_done(cmd);
+
+/* We are not connected to a target - check to see if there
+ * are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ break;
+
+
+/* Note: this interrupt should not occur in a LEVEL2 command */
+
+ case CSR_SELECT:
+DB(DB_INTR,printk("SELECT"))
+ hostdata->connected = cmd = (Scsi_Cmnd *)hostdata->selecting;
+CHECK_NULL(cmd,"csr_select")
+ hostdata->selecting = NULL;
+
+ /* construct an IDENTIFY message with correct disconnect bit */
+
+ hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->lun);
+ if (cmd->SCp.phase)
+ hostdata->outgoing_msg[0] |= 0x40;
+
+ if (hostdata->sync_stat[cmd->target] == SS_FIRST) {
+#ifdef SYNC_DEBUG
+printk(" sending SDTR ");
+#endif
+
+ hostdata->sync_stat[cmd->target] = SS_WAITING;
+
+ /* tack on a 2nd message to ask about synchronous transfers */
+
+ hostdata->outgoing_msg[1] = EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[2] = 3;
+ hostdata->outgoing_msg[3] = EXTENDED_SDTR;
+ hostdata->outgoing_msg[4] = OPTIMUM_SX_PER/4;
+ hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF;
+ hostdata->outgoing_len = 6;
+ }
+ else
+ hostdata->outgoing_len = 1;
+
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_XFER_DONE|PHS_DATA_IN:
+ case CSR_UNEXP |PHS_DATA_IN:
+ case CSR_SRV_REQ |PHS_DATA_IN:
+DB(DB_INTR,printk("IN-%d.%d",cmd->SCp.this_residual,cmd->SCp.buffers_residual))
+ transfer_bytes(cmd, DATA_IN_DIR);
+ if (hostdata->state != S_RUNNING_LEVEL2)
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_XFER_DONE|PHS_DATA_OUT:
+ case CSR_UNEXP |PHS_DATA_OUT:
+ case CSR_SRV_REQ |PHS_DATA_OUT:
+DB(DB_INTR,printk("OUT-%d.%d",cmd->SCp.this_residual,cmd->SCp.buffers_residual))
+ transfer_bytes(cmd, DATA_OUT_DIR);
+ if (hostdata->state != S_RUNNING_LEVEL2)
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+/* Note: this interrupt should not occur in a LEVEL2 command */
+
+ case CSR_XFER_DONE|PHS_COMMAND:
+ case CSR_UNEXP |PHS_COMMAND:
+ case CSR_SRV_REQ |PHS_COMMAND:
+DB(DB_INTR,printk("CMND-%02x,%ld",cmd->cmnd[0],cmd->pid))
+ transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata);
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_XFER_DONE|PHS_STATUS:
+ case CSR_UNEXP |PHS_STATUS:
+ case CSR_SRV_REQ |PHS_STATUS:
+DB(DB_INTR,printk("STATUS="))
+
+ cmd->SCp.Status = read_1_byte(hostdata);
+DB(DB_INTR,printk("%02x",cmd->SCp.Status))
+ if (hostdata->level2 >= L2_BASIC) {
+ sr = read_3393(hostdata,WD_SCSI_STATUS); /* clear interrupt */
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_3393(hostdata,WD_COMMAND_PHASE, 0x50);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ }
+ else {
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+
+ case CSR_XFER_DONE|PHS_MESS_IN:
+ case CSR_UNEXP |PHS_MESS_IN:
+ case CSR_SRV_REQ |PHS_MESS_IN:
+DB(DB_INTR,printk("MSG_IN="))
+
+ msg = read_1_byte(hostdata);
+ sr = read_3393(hostdata,WD_SCSI_STATUS); /* clear interrupt */
+
+ hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
+ if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
+ msg = EXTENDED_MESSAGE;
+ else
+ hostdata->incoming_ptr = 0;
+
+ cmd->SCp.Message = msg;
+ switch (msg) {
+
+ case COMMAND_COMPLETE:
+DB(DB_INTR,printk("CCMP-%ld",cmd->pid))
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_PRE_CMP_DISC;
+ break;
+
+ case SAVE_POINTERS:
+DB(DB_INTR,printk("SDP"))
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+
+ case RESTORE_POINTERS:
+DB(DB_INTR,printk("RDP"))
+ if (hostdata->level2 >= L2_BASIC) {
+ write_3393(hostdata,WD_COMMAND_PHASE, 0x45);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ }
+ else {
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+ case DISCONNECT:
+DB(DB_INTR,printk("DIS"))
+ cmd->device->disconnect = 1;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_PRE_TMP_DISC;
+ break;
+
+ case MESSAGE_REJECT:
+DB(DB_INTR,printk("REJ"))
+#ifdef SYNC_DEBUG
+printk("-REJ-");
+#endif
+ if (hostdata->sync_stat[cmd->target] == SS_WAITING)
+ hostdata->sync_stat[cmd->target] = SS_SET;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+
+ case EXTENDED_MESSAGE:
+DB(DB_INTR,printk("EXT"))
+
+ ucp = hostdata->incoming_msg;
+
+#ifdef SYNC_DEBUG
+printk("%02x",ucp[hostdata->incoming_ptr]);
+#endif
+ /* Is this the last byte of the extended message? */
+
+ if ((hostdata->incoming_ptr >= 2) &&
+ (hostdata->incoming_ptr == (ucp[1] + 1))) {
+
+ switch (ucp[2]) { /* what's the EXTENDED code? */
+ case EXTENDED_SDTR:
+ id = calc_sync_xfer(ucp[3],ucp[4]);
+ if (hostdata->sync_stat[cmd->target] != SS_WAITING) {
+
+/* A device has sent an unsolicited SDTR message; rather than go
+ * through the effort of decoding it and then figuring out what
+ * our reply should be, we're just gonna say that we have a
+ * synchronous fifo depth of 0. This will result in asynchronous
+ * transfers - not ideal but so much easier.
+ * Actually, this is OK because it assures us that if we don't
+ * specifically ask for sync transfers, we won't do any.
+ */
+
+ write_3393_cmd(hostdata,WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[1] = 3;
+ hostdata->outgoing_msg[2] = EXTENDED_SDTR;
+ hostdata->outgoing_msg[3] = hostdata->default_sx_per/4;
+ hostdata->outgoing_msg[4] = 0;
+ hostdata->outgoing_len = 5;
+ hostdata->sync_xfer[cmd->target] =
+ calc_sync_xfer(hostdata->default_sx_per/4,0);
+ }
+ else {
+ hostdata->sync_xfer[cmd->target] = id;
+ }
+#ifdef SYNC_DEBUG
+printk("sync_xfer=%02x",hostdata->sync_xfer[cmd->target]);
+#endif
+ hostdata->sync_stat[cmd->target] = SS_SET;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ case EXTENDED_WDTR:
+ write_3393_cmd(hostdata,WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ printk("sending WDTR ");
+ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[1] = 2;
+ hostdata->outgoing_msg[2] = EXTENDED_WDTR;
+ hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */
+ hostdata->outgoing_len = 4;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ default:
+ write_3393_cmd(hostdata,WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ printk("Rejecting Unknown Extended Message(%02x). ",ucp[2]);
+ hostdata->outgoing_msg[0] = MESSAGE_REJECT;
+ hostdata->outgoing_len = 1;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ }
+ hostdata->incoming_ptr = 0;
+ }
+
+ /* We need to read more MESS_IN bytes for the extended message */
+
+ else {
+ hostdata->incoming_ptr++;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+ default:
+ printk("Rejecting Unknown Message(%02x) ",msg);
+ write_3393_cmd(hostdata,WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ hostdata->outgoing_msg[0] = MESSAGE_REJECT;
+ hostdata->outgoing_len = 1;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+
+/* Note: this interrupt will occur only after a LEVEL2 command */
+
+ case CSR_SEL_XFER_DONE:
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_3393(hostdata,WD_SOURCE_ID, SRCID_ER);
+ if (phs == 0x60) {
+DB(DB_INTR,printk("SX-DONE-%ld",cmd->pid))
+ cmd->SCp.Message = COMMAND_COMPLETE;
+ lun = read_3393(hostdata,WD_TARGET_LUN);
+DB(DB_INTR,printk(":%d.%d",cmd->SCp.Status,lun))
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->state = S_UNCONNECTED;
+ if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
+ cmd->SCp.Status = lun;
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ }
+ else {
+ printk("%02x:%02x:%02x-%ld: Unknown SEL_XFER_DONE phase!!---",asr,sr,phs,cmd->pid);
+ }
+ break;
+
+
+/* Note: this interrupt will occur only after a LEVEL2 command */
+
+ case CSR_SDP:
+DB(DB_INTR,printk("SDP"))
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_3393(hostdata,WD_COMMAND_PHASE, 0x41);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ break;
+
+
+ case CSR_XFER_DONE|PHS_MESS_OUT:
+ case CSR_UNEXP |PHS_MESS_OUT:
+ case CSR_SRV_REQ |PHS_MESS_OUT:
+DB(DB_INTR,printk("MSG_OUT="))
+
+/* To get here, we've probably requested MESSAGE_OUT and have
+ * already put the correct bytes in outgoing_msg[] and filled
+ * in outgoing_len. We simply send them out to the SCSI bus.
+ * Sometimes we get MESSAGE_OUT phase when we're not expecting
+ * it - like when our SDTR message is rejected by a target. Some
+ * targets send the REJECT before receiving all of the extended
+ * message, and then seem to go back to MESSAGE_OUT for a byte
+ * or two. Not sure why, or if I'm doing something wrong to
+ * cause this to happen. Regardless, it seems that sending
+ * NOP messages in these situations results in no harm and
+ * makes everyone happy.
+ */
+
+ if (hostdata->outgoing_len == 0) {
+ hostdata->outgoing_len = 1;
+ hostdata->outgoing_msg[0] = NOP;
+ }
+ transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len,
+ DATA_OUT_DIR, hostdata);
+DB(DB_INTR,printk("%02x",hostdata->outgoing_msg[0]))
+ hostdata->outgoing_len = 0;
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_UNEXP_DISC:
+
+/* I think I've seen this after a request-sense that was in response
+ * to an error condition, but not sure. We certainly need to do
+ * something when we get this interrupt - the question is 'what?'.
+ * Let's think positively, and assume some command has finished
+ * in a legal manner (like a command that provokes a request-sense),
+ * so we treat it as a normal command-complete-disconnect.
+ */
+
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_3393(hostdata,WD_SOURCE_ID, SRCID_ER);
+ if (cmd == NULL) {
+ printk(" - Already disconnected! ");
+ hostdata->state = S_UNCONNECTED;
+
+/* release the SMP spin_lock and restore irq state */
+ CLISPIN_UNLOCK(flags);
+ return;
+ }
+DB(DB_INTR,printk("UNEXP_DISC-%ld",cmd->pid))
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->state = S_UNCONNECTED;
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ break;
+
+
+ case CSR_DISC:
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_3393(hostdata,WD_SOURCE_ID, SRCID_ER);
+DB(DB_INTR,printk("DISC-%ld",cmd->pid))
+ if (cmd == NULL) {
+ printk(" - Already disconnected! ");
+ hostdata->state = S_UNCONNECTED;
+ }
+ switch (hostdata->state) {
+ case S_PRE_CMP_DISC:
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->state = S_UNCONNECTED;
+DB(DB_INTR,printk(":%d",cmd->SCp.Status))
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+ break;
+ case S_PRE_TMP_DISC:
+ case S_RUNNING_LEVEL2:
+ cmd->host_scribble = (uchar *)hostdata->disconnected_Q;
+ hostdata->disconnected_Q = cmd;
+ hostdata->connected = NULL;
+ hostdata->state = S_UNCONNECTED;
+
+#ifdef PROC_STATISTICS
+ hostdata->disc_done_cnt[cmd->target]++;
+#endif
+
+ break;
+ default:
+ printk("*** Unexpected DISCONNECT interrupt! ***");
+ hostdata->state = S_UNCONNECTED;
+ }
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ break;
+
+
+ case CSR_RESEL_AM:
+DB(DB_INTR,printk("RESEL"))
+
+ /* First we have to make sure this reselection didn't */
+ /* happen during Arbitration/Selection of some other device. */
+ /* If yes, put losing command back on top of input_Q. */
+
+ if (hostdata->level2 <= L2_NONE) {
+
+ if (hostdata->selecting) {
+ cmd = (Scsi_Cmnd *)hostdata->selecting;
+ hostdata->selecting = NULL;
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ cmd->host_scribble = (uchar *)hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ }
+ }
+
+ else {
+
+ if (cmd) {
+ if (phs == 0x00) {
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ cmd->host_scribble = (uchar *)hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ }
+ else {
+ printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---",asr,sr,phs);
+ while (1)
+ printk("\r");
+ }
+ }
+
+ }
+
+ /* OK - find out which device reselected us. */
+
+ id = read_3393(hostdata,WD_SOURCE_ID);
+ id &= SRCID_MASK;
+
+ /* and extract the lun from the ID message. (Note that we don't
+ * bother to check for a valid message here - I guess this is
+ * not the right way to go, but....)
+ */
+
+ lun = read_3393(hostdata,WD_DATA);
+ if (hostdata->level2 < L2_RESELECT)
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ lun &= 7;
+
+ /* Now we look for the command that's reconnecting. */
+
+ cmd = (Scsi_Cmnd *)hostdata->disconnected_Q;
+ patch = NULL;
+ while (cmd) {
+ if (id == cmd->target && lun == cmd->lun)
+ break;
+ patch = cmd;
+ cmd = (Scsi_Cmnd *)cmd->host_scribble;
+ }
+
+ /* Hmm. Couldn't find a valid command.... What to do? */
+
+ if (!cmd) {
+ printk("---TROUBLE: target %d.%d not in disconnect queue---",id,lun);
+ break;
+ }
+
+ /* Ok, found the command - now start it up again. */
+
+ if (patch)
+ patch->host_scribble = cmd->host_scribble;
+ else
+ hostdata->disconnected_Q = (Scsi_Cmnd *)cmd->host_scribble;
+ hostdata->connected = cmd;
+
+ /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]'
+ * because these things are preserved over a disconnect.
+ * But we DO need to fix the DPD bit so it's correct for this command.
+ */
+
+ if (is_dir_out(cmd))
+ write_3393(hostdata,WD_DESTINATION_ID,cmd->target);
+ else
+ write_3393(hostdata,WD_DESTINATION_ID,cmd->target | DSTID_DPD);
+ if (hostdata->level2 >= L2_RESELECT) {
+ write_3393_count(hostdata,0); /* we want a DATA_PHASE interrupt */
+ write_3393(hostdata,WD_COMMAND_PHASE, 0x45);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ }
+ else
+ hostdata->state = S_CONNECTED;
+
+DB(DB_INTR,printk("-%ld",cmd->pid))
+ break;
+
+ default:
+ printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--",asr,sr,phs);
+ }
+
+ write1_io(0, IO_LED_OFF);
+
+DB(DB_INTR,printk("} "))
+
+/* release the SMP spin_lock and restore irq state */
+ CLISPIN_UNLOCK(flags);
+
+}
+
+
+
+#define RESET_CARD 0
+#define RESET_CARD_AND_BUS 1
+#define B_FLAG 0x80
+
+static int reset_hardware(struct Scsi_Host *instance, int type)
+{
+struct IN2000_hostdata *hostdata;
+int qt,x;
+unsigned long flags;
+
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+
+ write1_io(0, IO_LED_ON);
+ if (type == RESET_CARD_AND_BUS) {
+ write1_io(0,IO_CARD_RESET);
+ x = read1_io(IO_HARDWARE);
+ }
+ x = read_3393(hostdata,WD_SCSI_STATUS); /* clear any WD intrpt */
+ write_3393(hostdata,WD_OWN_ID, instance->this_id |
+ OWNID_EAF | OWNID_RAF | OWNID_FS_8);
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393(hostdata,WD_SYNCHRONOUS_TRANSFER,
+ calc_sync_xfer(hostdata->default_sx_per/4,DEFAULT_SX_OFF));
+ save_flags(flags);
+ cli();
+ write1_io(0,IO_FIFO_WRITE); /* clear fifo counter */
+ write1_io(0,IO_FIFO_READ); /* start fifo out in read mode */
+ write_3393(hostdata,WD_COMMAND, WD_CMD_RESET);
+ while (!(READ_AUX_STAT() & ASR_INT))
+ ; /* wait for RESET to complete */
+
+ x = read_3393(hostdata,WD_SCSI_STATUS); /* clear interrupt */
+ restore_flags(flags);
+ write_3393(hostdata,WD_QUEUE_TAG,0xa5); /* any random number */
+ qt = read_3393(hostdata,WD_QUEUE_TAG);
+ if (qt == 0xa5) {
+ x |= B_FLAG;
+ write_3393(hostdata,WD_QUEUE_TAG,0);
+ }
+ write_3393(hostdata,WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE);
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write1_io(0, IO_LED_OFF);
+ return x;
+}
+
+
+
+int in2000_reset(Scsi_Cmnd *cmd, unsigned int reset_flags)
+{
+unsigned long flags;
+struct Scsi_Host *instance;
+struct IN2000_hostdata *hostdata;
+int x;
+
+ instance = cmd->host;
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+
+ printk("scsi%d: Reset. ", instance->host_no);
+ save_flags(flags);
+ cli();
+
+ /* do scsi-reset here */
+
+ reset_hardware(instance, RESET_CARD_AND_BUS);
+ for (x = 0; x < 8; x++) {
+ hostdata->busy[x] = 0;
+ hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER/4,DEFAULT_SX_OFF);
+ hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
+ }
+ hostdata->input_Q = NULL;
+ hostdata->selecting = NULL;
+ hostdata->connected = NULL;
+ hostdata->disconnected_Q = NULL;
+ hostdata->state = S_UNCONNECTED;
+ hostdata->fifo = FI_FIFO_UNUSED;
+ hostdata->incoming_ptr = 0;
+ hostdata->outgoing_len = 0;
+
+ cmd->result = DID_RESET << 16;
+ restore_flags(flags);
+ return 0;
+}
+
+
+
+int in2000_abort (Scsi_Cmnd *cmd)
+{
+struct Scsi_Host *instance;
+struct IN2000_hostdata *hostdata;
+Scsi_Cmnd *tmp, *prev;
+unsigned long flags;
+uchar sr, asr;
+unsigned long timeout;
+
+ save_flags (flags);
+ cli();
+
+ instance = cmd->host;
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+
+ printk ("scsi%d: Abort-", instance->host_no);
+ printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ",
+ READ_AUX_STAT(),read_3393_count(hostdata),cmd->SCp.this_residual,cmd->SCp.buffers_residual,
+ cmd->SCp.have_data_in,read1_io(IO_FIFO_COUNT));
+
+/*
+ * Case 1 : If the command hasn't been issued yet, we simply remove it
+ * from the inout_Q.
+ */
+
+ tmp = (Scsi_Cmnd *)hostdata->input_Q;
+ prev = 0;
+ while (tmp) {
+ if (tmp == cmd) {
+ if (prev)
+ prev->host_scribble = cmd->host_scribble;
+ cmd->host_scribble = NULL;
+ cmd->result = DID_ABORT << 16;
+ printk("scsi%d: Abort - removing command %ld from input_Q. ",
+ instance->host_no, cmd->pid);
+ cmd->scsi_done(cmd);
+ restore_flags(flags);
+ return SCSI_ABORT_SUCCESS;
+ }
+ prev = tmp;
+ tmp = (Scsi_Cmnd *)tmp->host_scribble;
+ }
+
+/*
+ * Case 2 : If the command is connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail.
+ */
+
+ if (hostdata->connected == cmd) {
+
+ printk("scsi%d: Aborting connected command %ld - ",
+ instance->host_no, cmd->pid);
+
+ printk("sending wd33c93 ABORT command - ");
+ write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393_cmd(hostdata, WD_CMD_ABORT);
+
+/* Now we have to attempt to flush out the FIFO... */
+
+ printk("flushing fifo - ");
+ timeout = 1000000;
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ read_3393(hostdata, WD_DATA);
+ } while (!(asr & ASR_INT) && timeout-- > 0);
+ sr = read_3393(hostdata, WD_SCSI_STATUS);
+ printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ",
+ asr, sr, read_3393_count(hostdata), timeout);
+
+ /*
+ * Abort command processed.
+ * Still connected.
+ * We must disconnect.
+ */
+
+ printk("sending wd33c93 DISCONNECT command - ");
+ write_3393_cmd(hostdata, WD_CMD_DISCONNECT);
+
+ timeout = 1000000;
+ asr = READ_AUX_STAT();
+ while ((asr & ASR_CIP) && timeout-- > 0)
+ asr = READ_AUX_STAT();
+ sr = read_3393(hostdata, WD_SCSI_STATUS);
+ printk("asr=%02x, sr=%02x.",asr,sr);
+
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->connected = NULL;
+ hostdata->state = S_UNCONNECTED;
+ cmd->result = DID_ABORT << 16;
+ cmd->scsi_done(cmd);
+
+ in2000_execute (instance);
+
+ restore_flags(flags);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * Case 3: If the command is currently disconnected from the bus,
+ * we're not going to expend much effort here: Let's just return
+ * an ABORT_SNOOZE and hope for the best...
+ */
+
+ for (tmp=(Scsi_Cmnd *)hostdata->disconnected_Q; tmp;
+ tmp=(Scsi_Cmnd *)tmp->host_scribble)
+ if (cmd == tmp) {
+ restore_flags(flags);
+ printk("Sending ABORT_SNOOZE. ");
+ return SCSI_ABORT_SNOOZE;
+ }
+
+/*
+ * Case 4 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke.
+ */
+
+ in2000_execute (instance);
+
+ restore_flags(flags);
+ printk("scsi%d: warning : SCSI command probably completed successfully"
+ " before abortion. ", instance->host_no);
+ return SCSI_ABORT_NOT_RUNNING;
+}
+
+
+
+#define MAX_IN2000_HOSTS 3
+#define MAX_SETUP_ARGS (sizeof(setup_args) / sizeof(char *))
+#define SETUP_BUFFER_SIZE 200
+static char setup_buffer[SETUP_BUFFER_SIZE];
+static char setup_used[MAX_SETUP_ARGS];
+static int done_setup = 0;
+
+in2000__INITFUNC( void in2000_setup (char *str, int *ints) )
+{
+int i;
+char *p1,*p2;
+
+ strncpy(setup_buffer,str,SETUP_BUFFER_SIZE);
+ setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0';
+ p1 = setup_buffer;
+ i = 0;
+ while (*p1 && (i < MAX_SETUP_ARGS)) {
+ p2 = strchr(p1, ',');
+ if (p2) {
+ *p2 = '\0';
+ if (p1 != p2)
+ setup_args[i] = p1;
+ p1 = p2 + 1;
+ i++;
+ }
+ else {
+ setup_args[i] = p1;
+ break;
+ }
+ }
+ for (i=0; i<MAX_SETUP_ARGS; i++)
+ setup_used[i] = 0;
+ done_setup = 1;
+}
+
+
+/* check_setup_args() returns index if key found, 0 if not
+ */
+
+in2000__INITFUNC( static int check_setup_args(char *key, int *flags, int *val, char *buf) )
+{
+int x;
+char *cp;
+
+ for (x=0; x<MAX_SETUP_ARGS; x++) {
+ if (setup_used[x])
+ continue;
+ if (!strncmp(setup_args[x], key, strlen(key)))
+ break;
+ }
+ if (x == MAX_SETUP_ARGS)
+ return 0;
+ setup_used[x] = 1;
+ cp = setup_args[x] + strlen(key);
+ *val = -1;
+ if (*cp != ':')
+ return ++x;
+ cp++;
+ if ((*cp >= '0') && (*cp <= '9')) {
+ *val = simple_strtoul(cp,NULL,0);
+ }
+ return ++x;
+}
+
+
+
+/* The "correct" (ie portable) way to access memory-mapped hardware
+ * such as the IN2000 EPROM and dip switch is through the use of
+ * special macros declared in 'asm/io.h'. We use readb() and readl()
+ * when reading from the card's BIOS area in in2000_detect().
+ */
+static const unsigned int *bios_tab[] in2000__INITDATA = {
+ (unsigned int *)0xc8000,
+ (unsigned int *)0xd0000,
+ (unsigned int *)0xd8000,
+ 0
+ };
+
+static const unsigned short base_tab[] in2000__INITDATA = {
+ 0x220,
+ 0x200,
+ 0x110,
+ 0x100,
+ };
+
+static const int int_tab[] in2000__INITDATA = {
+ 15,
+ 14,
+ 11,
+ 10
+ };
+
+
+in2000__INITFUNC( int in2000_detect(Scsi_Host_Template * tpnt) )
+{
+struct Scsi_Host *instance;
+struct IN2000_hostdata *hostdata;
+int detect_count;
+int bios;
+int x;
+unsigned short base;
+uchar switches;
+uchar hrev;
+int flags;
+int val;
+char buf[32];
+
+/* Thanks to help from Bill Earnest, probing for IN2000 cards is a
+ * pretty straightforward and fool-proof operation. There are 3
+ * possible locations for the IN2000 EPROM in memory space - if we
+ * find a BIOS signature, we can read the dip switch settings from
+ * the byte at BIOS+32 (shadowed in by logic on the card). From 2
+ * of the switch bits we get the card's address in IO space. There's
+ * an image of the dip switch there, also, so we have a way to back-
+ * check that this really is an IN2000 card. Very nifty. Use the
+ * 'ioport:xx' command-line parameter if your BIOS EPROM is absent
+ * or disabled.
+ */
+
+ if (!done_setup && setup_strings)
+ in2000_setup(setup_strings,0);
+
+ detect_count = 0;
+ for (bios = 0; bios_tab[bios]; bios++) {
+ if (check_setup_args("ioport",&flags,&val,buf)) {
+ base = val;
+ switches = ~inb(base + IO_SWITCHES) & 0xff;
+ printk("Forcing IN2000 detection at IOport 0x%x ",base);
+ bios = 2;
+ }
+/*
+ * There have been a couple of BIOS versions with different layouts
+ * for the obvious ID strings. We look for the 2 most common ones and
+ * hope that they cover all the cases...
+ */
+ else if (readl(bios_tab[bios]+0x04) == 0x41564f4e ||
+ readl(bios_tab[bios]+0x0c) == 0x61776c41) {
+ printk("Found IN2000 BIOS at 0x%x ",(unsigned int)bios_tab[bios]);
+
+/* Read the switch image that's mapped into EPROM space */
+
+ switches = ~((readb(bios_tab[bios]+0x08) & 0xff));
+
+/* Find out where the IO space is */
+
+ x = switches & (SW_ADDR0 | SW_ADDR1);
+ base = base_tab[x];
+
+/* Check for the IN2000 signature in IO space. */
+
+ x = ~inb(base + IO_SWITCHES) & 0xff;
+ if (x != switches) {
+ printk("Bad IO signature: %02x vs %02x.\n",x,switches);
+ continue;
+ }
+ }
+ else
+ continue;
+
+/* OK. We have a base address for the IO ports - run a few safety checks */
+
+ if (!(switches & SW_BIT7)) { /* I _think_ all cards do this */
+ printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n",base);
+ continue;
+ }
+
+/* Let's assume any hardware version will work, although the driver
+ * has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll
+ * print out the rev number for reference later, but accept them all.
+ */
+
+ hrev = inb(base + IO_HARDWARE);
+
+ /* Bit 2 tells us if interrupts are disabled */
+ if (switches & SW_DISINT) {
+ printk("The IN-2000 SCSI card at IOport 0x%03x ",base);
+ printk("is not configured for interrupt operation!\n");
+ printk("This driver requires an interrupt: cancelling detection.\n");
+ continue;
+ }
+
+/* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now
+ * initialize it.
+ */
+
+ tpnt->proc_dir = &proc_scsi_in2000; /* done more than once? harmless. */
+ detect_count++;
+ instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata));
+ if (!instance_list)
+ instance_list = instance;
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+ instance->io_port = hostdata->io_base = base;
+ hostdata->dip_switch = switches;
+ hostdata->hrev = hrev;
+
+ write1_io(0,IO_FIFO_WRITE); /* clear fifo counter */
+ write1_io(0,IO_FIFO_READ); /* start fifo out in read mode */
+ write1_io(0,IO_INTR_MASK); /* allow all ints */
+ x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
+ if (request_irq(x, in2000_intr, SA_INTERRUPT, "in2000", NULL)) {
+ printk("in2000_detect: Unable to allocate IRQ.\n");
+ detect_count--;
+ continue;
+ }
+ instance->irq = x;
+ instance->n_io_port = 13;
+ request_region(base, 13, "in2000"); /* lock in this IO space for our use */
+
+ for (x = 0; x < 8; x++) {
+ hostdata->busy[x] = 0;
+ hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER/4,DEFAULT_SX_OFF);
+ hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
+#ifdef PROC_STATISTICS
+ hostdata->cmd_cnt[x] = 0;
+ hostdata->disc_allowed_cnt[x] = 0;
+ hostdata->disc_done_cnt[x] = 0;
+#endif
+ }
+ hostdata->input_Q = NULL;
+ hostdata->selecting = NULL;
+ hostdata->connected = NULL;
+ hostdata->disconnected_Q = NULL;
+ hostdata->state = S_UNCONNECTED;
+ hostdata->fifo = FI_FIFO_UNUSED;
+ hostdata->level2 = L2_BASIC;
+ hostdata->disconnect = DIS_ADAPTIVE;
+ hostdata->args = DEBUG_DEFAULTS;
+ hostdata->incoming_ptr = 0;
+ hostdata->outgoing_len = 0;
+ hostdata->default_sx_per = DEFAULT_SX_PER;
+
+/* Older BIOS's had a 'sync on/off' switch - use its setting */
+
+ if (readl(bios_tab[bios]+0x04) == 0x41564f4e && (switches & SW_SYNC_DOS5))
+ hostdata->sync_off = 0x00; /* sync defaults to on */
+ else
+ hostdata->sync_off = 0xff; /* sync defaults to off */
+
+#ifdef PROC_INTERFACE
+ hostdata->proc = PR_VERSION|PR_INFO|PR_STATISTICS|
+ PR_CONNECTED|PR_INPUTQ|PR_DISCQ|
+ PR_STOP;
+#ifdef PROC_STATISTICS
+ hostdata->int_cnt = 0;
+#endif
+#endif
+
+ if (check_setup_args("nosync",&flags,&val,buf))
+ hostdata->sync_off = val;
+
+ if (check_setup_args("period",&flags,&val,buf))
+ hostdata->default_sx_per = sx_table[round_period((unsigned int)val)].period_ns;
+
+ if (check_setup_args("disconnect",&flags,&val,buf)) {
+ if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS))
+ hostdata->disconnect = val;
+ else
+ hostdata->disconnect = DIS_ADAPTIVE;
+ }
+
+ if (check_setup_args("noreset",&flags,&val,buf))
+ hostdata->args ^= A_NO_SCSI_RESET;
+
+ if (check_setup_args("level2",&flags,&val,buf))
+ hostdata->level2 = val;
+
+ if (check_setup_args("debug",&flags,&val,buf))
+ hostdata->args = (val & DB_MASK);
+
+#ifdef PROC_INTERFACE
+ if (check_setup_args("proc",&flags,&val,buf))
+ hostdata->proc = val;
+#endif
+
+
+ x = reset_hardware(instance,(hostdata->args & A_NO_SCSI_RESET)?RESET_CARD:RESET_CARD_AND_BUS);
+
+ hostdata->microcode = read_3393(hostdata,WD_CDB_1);
+ if (x & 0x01) {
+ if (x & B_FLAG)
+ hostdata->chip = C_WD33C93B;
+ else
+ hostdata->chip = C_WD33C93A;
+ }
+ else
+ hostdata->chip = C_WD33C93;
+
+ printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ",
+ (switches & 0x7f),
+ instance->irq,hostdata->io_base,
+ (switches & SW_FLOPPY)?"Yes":"No",
+ (switches & SW_SYNC_DOS5)?"Yes":"No");
+ printk("hardware_ver=%02x chip=%s microcode=%02x\n",
+ hrev,
+ (hostdata->chip==C_WD33C93)?"WD33c93":
+ (hostdata->chip==C_WD33C93A)?"WD33c93A":
+ (hostdata->chip==C_WD33C93B)?"WD33c93B":"unknown",
+ hostdata->microcode);
+#ifdef DEBUGGING_ON
+ printk("setup_args = ");
+ for (x=0; x<MAX_SETUP_ARGS; x++)
+ printk("%s,",setup_args[x]);
+ printk("\n");
+#endif
+ if (hostdata->sync_off == 0xff)
+ printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n");
+ printk("IN2000 driver version %s - %s\n",IN2000_VERSION,IN2000_DATE);
+ }
+
+ return detect_count;
+}
+
+
+/* NOTE: I lifted this function straight out of the old driver,
+ * and have not tested it. Presumably it does what it's
+ * supposed to do...
+ */
+
+int in2000_biosparam(Disk *disk, kdev_t dev, int *iinfo)
+{
+int size;
+
+ size = disk->capacity;
+ iinfo[0] = 64;
+ iinfo[1] = 32;
+ iinfo[2] = size >> 11;
+
+/* This should approximate the large drive handling that the DOS ASPI manager
+ uses. Drives very near the boundaries may not be handled correctly (i.e.
+ near 2.0 Gb and 4.0 Gb) */
+
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 64;
+ iinfo[1] = 63;
+ iinfo[2] = disk->capacity / (iinfo[0] * iinfo[1]);
+ }
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 128;
+ iinfo[1] = 63;
+ iinfo[2] = disk->capacity / (iinfo[0] * iinfo[1]);
+ }
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 255;
+ iinfo[1] = 63;
+ iinfo[2] = disk->capacity / (iinfo[0] * iinfo[1]);
+ }
+ return 0;
+}
+
+
+
+struct proc_dir_entry proc_scsi_in2000 = {
+ PROC_SCSI_IN2000, 6, "in2000",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+ };
+
+
+int in2000_proc_info(char *buf, char **start, off_t off, int len, int hn, int in)
+{
+
+#ifdef PROC_INTERFACE
+
+char *bp;
+char tbuf[128];
+unsigned long flags;
+struct Scsi_Host *instance;
+struct IN2000_hostdata *hd;
+Scsi_Cmnd *cmd;
+int x,i;
+static int stop = 0;
+
+ for (instance=instance_list; instance; instance=instance->next) {
+ if (instance->host_no == hn)
+ break;
+ }
+ if (!instance) {
+ printk("*** Hmm... Can't find host #%d!\n",hn);
+ return (-ESRCH);
+ }
+ hd = (struct IN2000_hostdata *)instance->hostdata;
+
+/* If 'in' is TRUE we need to _read_ the proc file. We accept the following
+ * keywords (same format as command-line, but only ONE per read):
+ * debug
+ * disconnect
+ * period
+ * resync
+ * proc
+ */
+
+ if (in) {
+ buf[len] = '\0';
+ bp = buf;
+ if (!strncmp(bp,"debug:",6)) {
+ bp += 6;
+ hd->args = simple_strtoul(bp,NULL,0) & DB_MASK;
+ }
+ else if (!strncmp(bp,"disconnect:",11)) {
+ bp += 11;
+ x = simple_strtoul(bp,NULL,0);
+ if (x < DIS_NEVER || x > DIS_ALWAYS)
+ x = DIS_ADAPTIVE;
+ hd->disconnect = x;
+ }
+ else if (!strncmp(bp,"period:",7)) {
+ bp += 7;
+ x = simple_strtoul(bp,NULL,0);
+ hd->default_sx_per = sx_table[round_period((unsigned int)x)].period_ns;
+ }
+ else if (!strncmp(bp,"resync:",7)) {
+ bp += 7;
+ x = simple_strtoul(bp,NULL,0);
+ for (i=0; i<7; i++)
+ if (x & (1<<i))
+ hd->sync_stat[i] = SS_UNSET;
+ }
+ else if (!strncmp(bp,"proc:",5)) {
+ bp += 5;
+ hd->proc = simple_strtoul(bp,NULL,0);
+ }
+ else if (!strncmp(bp,"level2:",7)) {
+ bp += 7;
+ hd->level2 = simple_strtoul(bp,NULL,0);
+ }
+ return len;
+ }
+
+ save_flags(flags);
+ cli();
+ bp = buf;
+ *bp = '\0';
+ if (hd->proc & PR_VERSION) {
+ /* Don't create varied object files each time this file is compiled. */
+ /* sprintf(tbuf,"\nVersion %s - %s. Compiled %s %s",
+ IN2000_VERSION,IN2000_DATE,__DATE__,__TIME__); */
+ sprintf(tbuf,"\nVersion %s - %s.",
+ IN2000_VERSION,IN2000_DATE);
+ strcat(bp,tbuf);
+ }
+ if (hd->proc & PR_INFO) {
+ sprintf(tbuf,"\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s",
+ (hd->dip_switch & 0x7f), instance->irq, hd->io_base,
+ (hd->dip_switch & 0x40)?"Yes":"No",
+ (hd->dip_switch & 0x20)?"Yes":"No");
+ strcat(bp,tbuf);
+ strcat(bp,"\nsync_xfer[] = ");
+ for (x=0; x<7; x++) {
+ sprintf(tbuf,"\t%02x",hd->sync_xfer[x]);
+ strcat(bp,tbuf);
+ }
+ strcat(bp,"\nsync_stat[] = ");
+ for (x=0; x<7; x++) {
+ sprintf(tbuf,"\t%02x",hd->sync_stat[x]);
+ strcat(bp,tbuf);
+ }
+ }
+#ifdef PROC_STATISTICS
+ if (hd->proc & PR_STATISTICS) {
+ strcat(bp,"\ncommands issued: ");
+ for (x=0; x<7; x++) {
+ sprintf(tbuf,"\t%ld",hd->cmd_cnt[x]);
+ strcat(bp,tbuf);
+ }
+ strcat(bp,"\ndisconnects allowed:");
+ for (x=0; x<7; x++) {
+ sprintf(tbuf,"\t%ld",hd->disc_allowed_cnt[x]);
+ strcat(bp,tbuf);
+ }
+ strcat(bp,"\ndisconnects done: ");
+ for (x=0; x<7; x++) {
+ sprintf(tbuf,"\t%ld",hd->disc_done_cnt[x]);
+ strcat(bp,tbuf);
+ }
+ sprintf(tbuf,"\ninterrupts: \t%ld",hd->int_cnt);
+ strcat(bp,tbuf);
+ }
+#endif
+ if (hd->proc & PR_CONNECTED) {
+ strcat(bp,"\nconnected: ");
+ if (hd->connected) {
+ cmd = (Scsi_Cmnd *)hd->connected;
+ sprintf(tbuf," %ld-%d:%d(%02x)",
+ cmd->pid, cmd->target, cmd->lun, cmd->cmnd[0]);
+ strcat(bp,tbuf);
+ }
+ }
+ if (hd->proc & PR_INPUTQ) {
+ strcat(bp,"\ninput_Q: ");
+ cmd = (Scsi_Cmnd *)hd->input_Q;
+ while (cmd) {
+ sprintf(tbuf," %ld-%d:%d(%02x)",
+ cmd->pid, cmd->target, cmd->lun, cmd->cmnd[0]);
+ strcat(bp,tbuf);
+ cmd = (Scsi_Cmnd *)cmd->host_scribble;
+ }
+ }
+ if (hd->proc & PR_DISCQ) {
+ strcat(bp,"\ndisconnected_Q:");
+ cmd = (Scsi_Cmnd *)hd->disconnected_Q;
+ while (cmd) {
+ sprintf(tbuf," %ld-%d:%d(%02x)",
+ cmd->pid, cmd->target, cmd->lun, cmd->cmnd[0]);
+ strcat(bp,tbuf);
+ cmd = (Scsi_Cmnd *)cmd->host_scribble;
+ }
+ }
+ if (hd->proc & PR_TEST) {
+ ; /* insert your own custom function here */
+ }
+ strcat(bp,"\n");
+ restore_flags(flags);
+ *start = buf;
+ if (stop) {
+ stop = 0;
+ return 0; /* return 0 to signal end-of-file */
+ }
+ if (off > 0x40000) /* ALWAYS stop after 256k bytes have been read */
+ stop = 1;;
+ if (hd->proc & PR_STOP) /* stop every other time */
+ stop = 1;
+ return strlen(bp);
+
+#else /* PROC_INTERFACE */
+
+ return 0;
+
+#endif /* PROC_INTERFACE */
+
+}
+
+
+#ifdef MODULE
+
+Scsi_Host_Template driver_template = IN2000;
+
+#include "scsi_module.c"
+
+#endif
+
diff --git a/linux/src/drivers/scsi/in2000.h b/linux/src/drivers/scsi/in2000.h
new file mode 100644
index 0000000..732bab8
--- /dev/null
+++ b/linux/src/drivers/scsi/in2000.h
@@ -0,0 +1,465 @@
+/*
+ * in2000.h - Linux device driver definitions for the
+ * Always IN2000 ISA SCSI card.
+ *
+ * IMPORTANT: This file is for version 1.33 - 26/Aug/1998
+ *
+ * Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ * john@geolog.com
+ * jshiffle@netcom.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef IN2000_H
+#define IN2000_H
+
+#include <asm/io.h>
+
+#define PROC_INTERFACE /* add code for /proc/scsi/in2000/xxx interface */
+#ifdef PROC_INTERFACE
+#define PROC_STATISTICS /* add code for keeping various real time stats */
+#endif
+
+#define SYNC_DEBUG /* extra info on sync negotiation printed */
+#define DEBUGGING_ON /* enable command-line debugging bitmask */
+#define DEBUG_DEFAULTS 0 /* default bitmask - change from command-line */
+
+#define FAST_READ_IO /* No problems with these on my machine */
+#define FAST_WRITE_IO
+
+#ifdef DEBUGGING_ON
+#define DB(f,a) if (hostdata->args & (f)) a;
+#define CHECK_NULL(p,s) /* if (!(p)) {printk("\n"); while (1) printk("NP:%s\r",(s));} */
+#else
+#define DB(f,a)
+#define CHECK_NULL(p,s)
+#endif
+
+#define uchar unsigned char
+
+#define read1_io(a) (inb(hostdata->io_base+(a)))
+#define read2_io(a) (inw(hostdata->io_base+(a)))
+#define write1_io(b,a) (outb((b),hostdata->io_base+(a)))
+#define write2_io(w,a) (outw((w),hostdata->io_base+(a)))
+
+/* These inline assembly defines are derived from a patch
+ * sent to me by Bill Earnest. He's done a lot of very
+ * valuable thinking, testing, and coding during his effort
+ * to squeeze more speed out of this driver. I really think
+ * that we are doing IO at close to the maximum now with
+ * the fifo. (And yes, insw uses 'edi' while outsw uses
+ * 'esi'. Thanks Bill!)
+ */
+
+#define FAST_READ2_IO() \
+({ \
+int __dummy_1,__dummy_2; \
+ __asm__ __volatile__ ("\n \
+ cld \n \
+ orl %%ecx, %%ecx \n \
+ jz 1f \n \
+ rep \n \
+ insw (%%dx),%%es:(%%edi) \n \
+1: " \
+ : "=D" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2) /* output */ \
+ : "2" (f), "0" (sp), "1" (i) /* input */ \
+ ); /* trashed */ \
+})
+
+#define FAST_WRITE2_IO() \
+({ \
+int __dummy_1,__dummy_2; \
+ __asm__ __volatile__ ("\n \
+ cld \n \
+ orl %%ecx, %%ecx \n \
+ jz 1f \n \
+ rep \n \
+ outsw %%ds:(%%esi),(%%dx) \n \
+1: " \
+ : "=S" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2)/* output */ \
+ : "2" (f), "0" (sp), "1" (i) /* input */ \
+ ); /* trashed */ \
+})
+
+/* IN2000 io_port offsets */
+#define IO_WD_ASR 0x00 /* R - 3393 auxstat reg */
+#define ASR_INT 0x80
+#define ASR_LCI 0x40
+#define ASR_BSY 0x20
+#define ASR_CIP 0x10
+#define ASR_PE 0x02
+#define ASR_DBR 0x01
+#define IO_WD_ADDR 0x00 /* W - 3393 address reg */
+#define IO_WD_DATA 0x01 /* R/W - rest of 3393 regs */
+#define IO_FIFO 0x02 /* R/W - in2000 dual-port fifo (16 bits) */
+#define IN2000_FIFO_SIZE 2048 /* fifo capacity in bytes */
+#define IO_CARD_RESET 0x03 /* W - in2000 start master reset */
+#define IO_FIFO_COUNT 0x04 /* R - in2000 fifo counter */
+#define IO_FIFO_WRITE 0x05 /* W - clear fifo counter, start write */
+#define IO_FIFO_READ 0x07 /* W - start fifo read */
+#define IO_LED_OFF 0x08 /* W - turn off in2000 activity LED */
+#define IO_SWITCHES 0x08 /* R - read in2000 dip switch */
+#define SW_ADDR0 0x01 /* bit 0 = bit 0 of index to io addr */
+#define SW_ADDR1 0x02 /* bit 1 = bit 1 of index io addr */
+#define SW_DISINT 0x04 /* bit 2 true if ints disabled */
+#define SW_INT0 0x08 /* bit 3 = bit 0 of index to interrupt */
+#define SW_INT1 0x10 /* bit 4 = bit 1 of index to interrupt */
+#define SW_INT_SHIFT 3 /* shift right this amount to right justify int bits */
+#define SW_SYNC_DOS5 0x20 /* bit 5 used by Always BIOS */
+#define SW_FLOPPY 0x40 /* bit 6 true if floppy enabled */
+#define SW_BIT7 0x80 /* bit 7 hardwired true (ground) */
+#define IO_LED_ON 0x09 /* W - turn on in2000 activity LED */
+#define IO_HARDWARE 0x0a /* R - read in2000 hardware rev, stop reset */
+#define IO_INTR_MASK 0x0c /* W - in2000 interrupt mask reg */
+#define IMASK_WD 0x01 /* WD33c93 interrupt mask */
+#define IMASK_FIFO 0x02 /* FIFO interrupt mask */
+
+/* wd register names */
+#define WD_OWN_ID 0x00
+#define WD_CONTROL 0x01
+#define WD_TIMEOUT_PERIOD 0x02
+#define WD_CDB_1 0x03
+#define WD_CDB_2 0x04
+#define WD_CDB_3 0x05
+#define WD_CDB_4 0x06
+#define WD_CDB_5 0x07
+#define WD_CDB_6 0x08
+#define WD_CDB_7 0x09
+#define WD_CDB_8 0x0a
+#define WD_CDB_9 0x0b
+#define WD_CDB_10 0x0c
+#define WD_CDB_11 0x0d
+#define WD_CDB_12 0x0e
+#define WD_TARGET_LUN 0x0f
+#define WD_COMMAND_PHASE 0x10
+#define WD_SYNCHRONOUS_TRANSFER 0x11
+#define WD_TRANSFER_COUNT_MSB 0x12
+#define WD_TRANSFER_COUNT 0x13
+#define WD_TRANSFER_COUNT_LSB 0x14
+#define WD_DESTINATION_ID 0x15
+#define WD_SOURCE_ID 0x16
+#define WD_SCSI_STATUS 0x17
+#define WD_COMMAND 0x18
+#define WD_DATA 0x19
+#define WD_QUEUE_TAG 0x1a
+#define WD_AUXILIARY_STATUS 0x1f
+
+/* WD commands */
+#define WD_CMD_RESET 0x00
+#define WD_CMD_ABORT 0x01
+#define WD_CMD_ASSERT_ATN 0x02
+#define WD_CMD_NEGATE_ACK 0x03
+#define WD_CMD_DISCONNECT 0x04
+#define WD_CMD_RESELECT 0x05
+#define WD_CMD_SEL_ATN 0x06
+#define WD_CMD_SEL 0x07
+#define WD_CMD_SEL_ATN_XFER 0x08
+#define WD_CMD_SEL_XFER 0x09
+#define WD_CMD_RESEL_RECEIVE 0x0a
+#define WD_CMD_RESEL_SEND 0x0b
+#define WD_CMD_WAIT_SEL_RECEIVE 0x0c
+#define WD_CMD_TRANS_ADDR 0x18
+#define WD_CMD_TRANS_INFO 0x20
+#define WD_CMD_TRANSFER_PAD 0x21
+#define WD_CMD_SBT_MODE 0x80
+
+/* SCSI Bus Phases */
+#define PHS_DATA_OUT 0x00
+#define PHS_DATA_IN 0x01
+#define PHS_COMMAND 0x02
+#define PHS_STATUS 0x03
+#define PHS_MESS_OUT 0x06
+#define PHS_MESS_IN 0x07
+
+/* Command Status Register definitions */
+
+ /* reset state interrupts */
+#define CSR_RESET 0x00
+#define CSR_RESET_AF 0x01
+
+ /* successful completion interrupts */
+#define CSR_RESELECT 0x10
+#define CSR_SELECT 0x11
+#define CSR_SEL_XFER_DONE 0x16
+#define CSR_XFER_DONE 0x18
+
+ /* paused or aborted interrupts */
+#define CSR_MSGIN 0x20
+#define CSR_SDP 0x21
+#define CSR_SEL_ABORT 0x22
+#define CSR_RESEL_ABORT 0x25
+#define CSR_RESEL_ABORT_AM 0x27
+#define CSR_ABORT 0x28
+
+ /* terminated interrupts */
+#define CSR_INVALID 0x40
+#define CSR_UNEXP_DISC 0x41
+#define CSR_TIMEOUT 0x42
+#define CSR_PARITY 0x43
+#define CSR_PARITY_ATN 0x44
+#define CSR_BAD_STATUS 0x45
+#define CSR_UNEXP 0x48
+
+ /* service required interrupts */
+#define CSR_RESEL 0x80
+#define CSR_RESEL_AM 0x81
+#define CSR_DISC 0x85
+#define CSR_SRV_REQ 0x88
+
+ /* Own ID/CDB Size register */
+#define OWNID_EAF 0x08
+#define OWNID_EHP 0x10
+#define OWNID_RAF 0x20
+#define OWNID_FS_8 0x00
+#define OWNID_FS_12 0x40
+#define OWNID_FS_16 0x80
+
+ /* Control register */
+#define CTRL_HSP 0x01
+#define CTRL_HA 0x02
+#define CTRL_IDI 0x04
+#define CTRL_EDI 0x08
+#define CTRL_HHP 0x10
+#define CTRL_POLLED 0x00
+#define CTRL_BURST 0x20
+#define CTRL_BUS 0x40
+#define CTRL_DMA 0x80
+
+ /* Timeout Period register */
+#define TIMEOUT_PERIOD_VALUE 20 /* results in 200 ms. */
+
+ /* Synchronous Transfer Register */
+#define STR_FSS 0x80
+
+ /* Destination ID register */
+#define DSTID_DPD 0x40
+#define DATA_OUT_DIR 0
+#define DATA_IN_DIR 1
+#define DSTID_SCC 0x80
+
+ /* Source ID register */
+#define SRCID_MASK 0x07
+#define SRCID_SIV 0x08
+#define SRCID_DSP 0x20
+#define SRCID_ES 0x40
+#define SRCID_ER 0x80
+
+
+
+#define ILLEGAL_STATUS_BYTE 0xff
+
+
+#define DEFAULT_SX_PER 500 /* (ns) fairly safe */
+#define DEFAULT_SX_OFF 0 /* aka async */
+
+#define OPTIMUM_SX_PER 252 /* (ns) best we can do (mult-of-4) */
+#define OPTIMUM_SX_OFF 12 /* size of in2000 fifo */
+
+struct sx_period {
+ unsigned int period_ns;
+ uchar reg_value;
+ };
+
+
+struct IN2000_hostdata {
+ struct Scsi_Host *next;
+ uchar chip; /* what kind of wd33c93 chip? */
+ uchar microcode; /* microcode rev if 'B' */
+ unsigned short io_base; /* IO port base */
+ unsigned int dip_switch; /* dip switch settings */
+ unsigned int hrev; /* hardware revision of card */
+ volatile uchar busy[8]; /* index = target, bit = lun */
+ volatile Scsi_Cmnd *input_Q; /* commands waiting to be started */
+ volatile Scsi_Cmnd *selecting; /* trying to select this command */
+ volatile Scsi_Cmnd *connected; /* currently connected command */
+ volatile Scsi_Cmnd *disconnected_Q;/* commands waiting for reconnect */
+ uchar state; /* what we are currently doing */
+ uchar fifo; /* what the FIFO is up to */
+ uchar level2; /* extent to which Level-2 commands are used */
+ uchar disconnect; /* disconnect/reselect policy */
+ unsigned int args; /* set from command-line argument */
+ uchar incoming_msg[8]; /* filled during message_in phase */
+ int incoming_ptr; /* mainly used with EXTENDED messages */
+ uchar outgoing_msg[8]; /* send this during next message_out */
+ int outgoing_len; /* length of outgoing message */
+ unsigned int default_sx_per; /* default transfer period for SCSI bus */
+ uchar sync_xfer[8]; /* sync_xfer reg settings per target */
+ uchar sync_stat[8]; /* status of sync negotiation per target */
+ uchar sync_off; /* bit mask: don't use sync with these targets */
+#ifdef PROC_INTERFACE
+ uchar proc; /* bit mask: what's in proc output */
+#ifdef PROC_STATISTICS
+ unsigned long cmd_cnt[8]; /* # of commands issued per target */
+ unsigned long int_cnt; /* # of interrupts serviced */
+ unsigned long disc_allowed_cnt[8]; /* # of disconnects allowed per target */
+ unsigned long disc_done_cnt[8]; /* # of disconnects done per target*/
+#endif
+#endif
+ };
+
+
+/* defines for hostdata->chip */
+
+#define C_WD33C93 0
+#define C_WD33C93A 1
+#define C_WD33C93B 2
+#define C_UNKNOWN_CHIP 100
+
+/* defines for hostdata->state */
+
+#define S_UNCONNECTED 0
+#define S_SELECTING 1
+#define S_RUNNING_LEVEL2 2
+#define S_CONNECTED 3
+#define S_PRE_TMP_DISC 4
+#define S_PRE_CMP_DISC 5
+
+/* defines for hostdata->fifo */
+
+#define FI_FIFO_UNUSED 0
+#define FI_FIFO_READING 1
+#define FI_FIFO_WRITING 2
+
+/* defines for hostdata->level2 */
+/* NOTE: only the first 3 are trustworthy at this point -
+ * having trouble when more than 1 device is reading/writing
+ * at the same time...
+ */
+
+#define L2_NONE 0 /* no combination commands - we get lots of ints */
+#define L2_SELECT 1 /* start with SEL_ATN_XFER, but never resume it */
+#define L2_BASIC 2 /* resume after STATUS ints & RDP messages */
+#define L2_DATA 3 /* resume after DATA_IN/OUT ints */
+#define L2_MOST 4 /* resume after anything except a RESELECT int */
+#define L2_RESELECT 5 /* resume after everything, including RESELECT ints */
+#define L2_ALL 6 /* always resume */
+
+/* defines for hostdata->disconnect */
+
+#define DIS_NEVER 0
+#define DIS_ADAPTIVE 1
+#define DIS_ALWAYS 2
+
+/* defines for hostdata->args */
+
+#define DB_TEST 1<<0
+#define DB_FIFO 1<<1
+#define DB_QUEUE_COMMAND 1<<2
+#define DB_EXECUTE 1<<3
+#define DB_INTR 1<<4
+#define DB_TRANSFER 1<<5
+#define DB_MASK 0x3f
+
+#define A_NO_SCSI_RESET 1<<15
+
+
+/* defines for hostdata->sync_xfer[] */
+
+#define SS_UNSET 0
+#define SS_FIRST 1
+#define SS_WAITING 2
+#define SS_SET 3
+
+/* defines for hostdata->proc */
+
+#define PR_VERSION 1<<0
+#define PR_INFO 1<<1
+#define PR_STATISTICS 1<<2
+#define PR_CONNECTED 1<<3
+#define PR_INPUTQ 1<<4
+#define PR_DISCQ 1<<5
+#define PR_TEST 1<<6
+#define PR_STOP 1<<7
+
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < 0x020100 /* 2.0.xx */
+# define in2000__INITFUNC(function) function
+# define in2000__INIT
+# define in2000__INITDATA
+# define CLISPIN_LOCK(flags) do { save_flags(flags); cli(); } while(0)
+# define CLISPIN_UNLOCK(flags) restore_flags(flags)
+#else /* 2.1.xxx */
+# include <linux/init.h>
+# include <asm/spinlock.h>
+# define in2000__INITFUNC(function) __initfunc(function)
+# define in2000__INIT __init
+# define in2000__INITDATA __initdata
+# define CLISPIN_LOCK(flags) spin_lock_irqsave(&io_request_lock, flags)
+# define CLISPIN_UNLOCK(flags) spin_unlock_irqrestore(&io_request_lock, flags)
+#endif
+
+
+int in2000_detect(Scsi_Host_Template *) in2000__INIT;
+int in2000_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int in2000_abort(Scsi_Cmnd *);
+void in2000_setup(char *, int *) in2000__INIT;
+int in2000_proc_info(char *, char **, off_t, int, int, int);
+extern struct proc_dir_entry proc_scsi_in2000;
+int in2000_biosparam(struct scsi_disk *, kdev_t, int *);
+int in2000_reset(Scsi_Cmnd *, unsigned int);
+
+
+#define IN2000_CAN_Q 16
+#define IN2000_SG SG_ALL
+#define IN2000_CPL 2
+#define IN2000_HOST_ID 7
+
+#if LINUX_VERSION_CODE < 0x020100 /* 2.0.xx */
+
+#define IN2000 { NULL, /* link pointer for modules */ \
+ NULL, /* usage_count for modules */ \
+ &proc_scsi_in2000, /* pointer to /proc/scsi directory entry */ \
+ in2000_proc_info, /* pointer to proc info function */ \
+ "Always IN2000", /* device name */ \
+ in2000_detect, /* returns number of in2000's found */ \
+ NULL, /* optional unload function for modules */ \
+ NULL, /* optional misc info function */ \
+ NULL, /* send scsi command, wait for completion */ \
+ in2000_queuecommand, /* queue scsi command, don't wait */ \
+ in2000_abort, /* abort current command */ \
+ in2000_reset, /* reset scsi bus */ \
+ NULL, /* slave_attach - unused */ \
+ in2000_biosparam, /* figures out BIOS parameters for lilo, etc */ \
+ IN2000_CAN_Q, /* max commands we can queue up */ \
+ IN2000_HOST_ID, /* host-adapter scsi id */ \
+ IN2000_SG, /* scatter-gather table size */ \
+ IN2000_CPL, /* commands per lun */ \
+ 0, /* board counter */ \
+ 0, /* unchecked dma */ \
+ DISABLE_CLUSTERING \
+ }
+
+#else /* 2.1.xxx */
+
+#define IN2000 { proc_dir: &proc_scsi_in2000, /* pointer to /proc/scsi directory entry */ \
+ proc_info: in2000_proc_info, /* pointer to proc info function */ \
+ name: "Always IN2000", /* device name */ \
+ detect: in2000_detect, /* returns number of in2000's found */ \
+ queuecommand: in2000_queuecommand, /* queue scsi command, don't wait */ \
+ abort: in2000_abort, /* abort current command */ \
+ reset: in2000_reset, /* reset scsi bus */ \
+ bios_param: in2000_biosparam, /* figures out BIOS parameters for lilo, etc */ \
+ can_queue: IN2000_CAN_Q, /* max commands we can queue up */ \
+ this_id: IN2000_HOST_ID, /* host-adapter scsi id */ \
+ sg_tablesize: IN2000_SG, /* scatter-gather table size */ \
+ cmd_per_lun: IN2000_CPL, /* commands per lun */ \
+ use_clustering: DISABLE_CLUSTERING, /* ENABLE_CLUSTERING may speed things up */ \
+ use_new_eh_code: 0 /* new error code - not using it yet */ \
+ }
+
+#endif
+
+
+#endif /* IN2000_H */
diff --git a/linux/src/drivers/scsi/ncr53c8xx.c b/linux/src/drivers/scsi/ncr53c8xx.c
new file mode 100644
index 0000000..0a58429
--- /dev/null
+++ b/linux/src/drivers/scsi/ncr53c8xx.c
@@ -0,0 +1,10795 @@
+/******************************************************************************
+** Device driver for the PCI-SCSI NCR538XX controller family.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
+** and is currently maintained by
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+** And has been ported to NetBSD by
+** Charles M. Hannum <mycroft@gnu.ai.mit.edu>
+**
+**-----------------------------------------------------------------------------
+**
+** Brief history
+**
+** December 10 1995 by Gerard Roudier:
+** Initial port to Linux.
+**
+** June 23 1996 by Gerard Roudier:
+** Support for 64 bits architectures (Alpha).
+**
+** November 30 1996 by Gerard Roudier:
+** Support for Fast-20 scsi.
+** Support for large DMA fifo and 128 dwords bursting.
+**
+** February 27 1997 by Gerard Roudier:
+** Support for Fast-40 scsi.
+** Support for on-Board RAM.
+**
+** May 3 1997 by Gerard Roudier:
+** Full support for scsi scripts instructions pre-fetching.
+**
+** May 19 1997 by Richard Waltham <dormouse@farsrobt.demon.co.uk>:
+** Support for NvRAM detection and reading.
+**
+** August 18 1997 by Cort <cort@cs.nmt.edu>:
+** Support for Power/PC (Big Endian).
+**
+*******************************************************************************
+*/
+
+/*
+** 30 January 1998, version 2.5f.1
+**
+** Supported SCSI-II features:
+** Synchronous negotiation
+** Wide negotiation (depends on the NCR Chip)
+** Enable disconnection
+** Tagged command queuing
+** Parity checking
+** Etc...
+**
+** Supported NCR chips:
+** 53C810 (8 bits, Fast SCSI-2, no rom BIOS)
+** 53C815 (8 bits, Fast SCSI-2, on board rom BIOS)
+** 53C820 (Wide, Fast SCSI-2, no rom BIOS)
+** 53C825 (Wide, Fast SCSI-2, on board rom BIOS)
+** 53C860 (8 bits, Fast 20, no rom BIOS)
+** 53C875 (Wide, Fast 20, on board rom BIOS)
+** 53C895 (Wide, Fast 40, on board rom BIOS)
+**
+** Other features:
+** Memory mapped IO (linux-1.3.X and above only)
+** Module
+** Shared IRQ (since linux-1.3.72)
+*/
+
+#define SCSI_NCR_DEBUG_FLAGS (0)
+
+#define NCR_GETCC_WITHMSG
+
+/*==========================================================
+**
+** Include files
+**
+**==========================================================
+*/
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/stat.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+#include <linux/blk.h>
+#else
+#include "../block/blk.h"
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,35)
+#include <linux/init.h>
+#else
+#ifndef __initdata
+#define __initdata
+#endif
+#ifndef __initfunc
+#define __initfunc(__arginit) __arginit
+#endif
+#endif
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+#include "sd.h"
+
+#include <linux/types.h>
+
+/*
+** Define the BSD style u_int32 type
+*/
+typedef u32 u_int32;
+
+#include "ncr53c8xx.h"
+
+/*==========================================================
+**
+** Configuration and Debugging
+**
+**==========================================================
+*/
+
+/*
+** SCSI address of this device.
+** The boot routines should have set it.
+** If not, use this.
+*/
+
+#ifndef SCSI_NCR_MYADDR
+#define SCSI_NCR_MYADDR (7)
+#endif
+
+/*
+** The maximum number of tags per logic unit.
+** Used only for disk devices that support tags.
+*/
+
+#ifndef SCSI_NCR_MAX_TAGS
+#define SCSI_NCR_MAX_TAGS (4)
+#endif
+
+/*
+** Number of targets supported by the driver.
+** n permits target numbers 0..n-1.
+** Default is 7, meaning targets #0..#6.
+** #7 .. is myself.
+*/
+
+#ifdef SCSI_NCR_MAX_TARGET
+#define MAX_TARGET (SCSI_NCR_MAX_TARGET)
+#else
+#define MAX_TARGET (16)
+#endif
+
+/*
+** Number of logic units supported by the driver.
+** n enables logic unit numbers 0..n-1.
+** The common SCSI devices require only
+** one lun, so take 1 as the default.
+*/
+
+#ifdef SCSI_NCR_MAX_LUN
+#define MAX_LUN SCSI_NCR_MAX_LUN
+#else
+#define MAX_LUN (1)
+#endif
+
+/*
+** Asynchronous pre-scaler (ns). Shall be 40
+*/
+
+#ifndef SCSI_NCR_MIN_ASYNC
+#define SCSI_NCR_MIN_ASYNC (40)
+#endif
+
+/*
+** The maximum number of jobs scheduled for starting.
+** There should be one slot per target, and one slot
+** for each tag of each target in use.
+** The calculation below is actually quite silly ...
+*/
+
+#ifdef SCSI_NCR_CAN_QUEUE
+#define MAX_START (SCSI_NCR_CAN_QUEUE + 4)
+#else
+#define MAX_START (MAX_TARGET + 7 * SCSI_NCR_MAX_TAGS)
+#endif
+
+/*
+** The maximum number of segments a transfer is split into.
+*/
+
+#define MAX_SCATTER (SCSI_NCR_MAX_SCATTER)
+
+/*
+** Io mapped or memory mapped.
+*/
+
+#if defined(SCSI_NCR_IOMAPPED)
+#define NCR_IOMAPPED
+#endif
+
+/*
+** other
+*/
+
+#define NCR_SNOOP_TIMEOUT (1000000)
+
+/*==========================================================
+**
+** Defines for Linux.
+**
+** Linux and Bsd kernel functions are quite different.
+** These defines allow a minimum change of the original
+** code.
+**
+**==========================================================
+*/
+
+ /*
+ ** Obvious definitions
+ */
+
+#define printf printk
+#define u_char unsigned char
+#define u_short unsigned short
+#define u_int unsigned int
+#define u_long unsigned long
+
+#ifndef MACH
+typedef u_long vm_offset_t;
+typedef int vm_size_t;
+#endif
+
+#define bcopy(s, d, n) memcpy((d), (s), (n))
+#define bzero(d, n) memset((d), 0, (n))
+
+#ifndef offsetof
+#define offsetof(t, m) ((size_t) (&((t *)0)->m))
+#endif
+
+/*
+** Address translation
+**
+** On Linux 1.3.X, virt_to_bus() must be used to translate
+** virtual memory addresses of the kernel data segment into
+** IO bus adresses.
+** On i386 architecture, IO bus addresses match the physical
+** addresses. But on other architectures they can be different.
+** In the original Bsd driver, vtophys() is called to translate
+** data addresses to IO bus addresses. In order to minimize
+** change, I decide to define vtophys() as virt_to_bus().
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+#define vtophys(p) virt_to_bus(p)
+
+/*
+** Memory mapped IO
+**
+** Since linux-2.1, we must use ioremap() to map the io memory space.
+** iounmap() to unmap it. That allows portability.
+** Linux 1.3.X and 2.0.X allow to remap physical pages addresses greater
+** than the highest physical memory address to kernel virtual pages with
+** vremap() / vfree(). That was not portable but worked with i386
+** architecture.
+*/
+
+#ifndef NCR_IOMAPPED
+__initfunc(
+static vm_offset_t remap_pci_mem(u_long base, u_long size)
+)
+{
+ u_long page_base = ((u_long) base) & PAGE_MASK;
+ u_long page_offs = ((u_long) base) - page_base;
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+ u_long page_remapped = (u_long) ioremap(page_base, page_offs+size);
+#else
+ u_long page_remapped = (u_long) vremap(page_base, page_offs+size);
+#endif
+
+ return (vm_offset_t) (page_remapped ? (page_remapped + page_offs) : 0UL);
+}
+
+__initfunc(
+static void unmap_pci_mem(vm_offset_t vaddr, u_long size)
+)
+{
+ if (vaddr)
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+ iounmap((void *) (vaddr & PAGE_MASK));
+#else
+ vfree((void *) (vaddr & PAGE_MASK));
+#endif
+}
+#endif /* !NCR_IOMAPPED */
+
+#else /* linux-1.2.13 */
+
+/*
+** Linux 1.2.X assumes that addresses (virtual, physical, bus)
+** are the same.
+**
+** I have not found how to do MMIO. It seems that only processes can
+** map high physical pages to virtual (Xservers can do MMIO).
+*/
+
+#define vtophys(p) ((u_long) (p))
+#endif
+
+/*
+** Insert a delay in micro-seconds.
+*/
+
+static void DELAY(long us)
+{
+ for (;us>1000;us-=1000) udelay(1000);
+ if (us) udelay(us);
+}
+
+/*
+** Internal data structure allocation.
+**
+** Linux scsi memory poor pool is adjusted for the need of
+** middle-level scsi driver.
+** We allocate our control blocks in the kernel memory pool
+** to avoid scsi pool shortage.
+** I notice that kmalloc() returns NULL during host attach under
+** Linux 1.2.13. But this ncr driver is reliable enough to
+** accomodate with this joke.
+**
+** kmalloc() only ensure 8 bytes boundary alignment.
+** The NCR need better alignment for cache line bursting.
+** The global header is moved betewen the NCB and CCBs and need
+** origin and destination addresses to have same lower four bits.
+**
+** We use 32 boundary alignment for NCB and CCBs and offset multiple
+** of 32 for global header fields. That's too much but at least enough.
+*/
+
+#define ALIGN_SIZE(shift) (1UL << shift)
+#define ALIGN_MASK(shift) (~(ALIGN_SIZE(shift)-1))
+
+#define NCB_ALIGN_SHIFT 5
+#define CCB_ALIGN_SHIFT 5
+#define LCB_ALIGN_SHIFT 5
+#define SCR_ALIGN_SHIFT 5
+
+#define NCB_ALIGN_SIZE ALIGN_SIZE(NCB_ALIGN_SHIFT)
+#define NCB_ALIGN_MASK ALIGN_MASK(NCB_ALIGN_SHIFT)
+#define CCB_ALIGN_SIZE ALIGN_SIZE(CCB_ALIGN_SHIFT)
+#define CCB_ALIGN_MASK ALIGN_MASK(CCB_ALIGN_SHIFT)
+#define SCR_ALIGN_SIZE ALIGN_SIZE(SCR_ALIGN_SHIFT)
+#define SCR_ALIGN_MASK ALIGN_MASK(SCR_ALIGN_SHIFT)
+
+static void *m_alloc(int size, int a_shift)
+{
+ u_long addr;
+ void *ptr;
+ u_long a_size, a_mask;
+
+ if (a_shift < 3)
+ a_shift = 3;
+
+ a_size = ALIGN_SIZE(a_shift);
+ a_mask = ALIGN_MASK(a_shift);
+
+ ptr = (void *) kmalloc(size + a_size, GFP_ATOMIC);
+ if (ptr) {
+ addr = (((u_long) ptr) + a_size) & a_mask;
+ *((void **) (addr - sizeof(void *))) = ptr;
+ ptr = (void *) addr;
+ }
+
+ return ptr;
+}
+
+#ifdef MODULE
+static void m_free(void *ptr, int size)
+{
+ u_long addr;
+
+ if (ptr) {
+ addr = (u_long) ptr;
+ ptr = *((void **) (addr - sizeof(void *)));
+
+ kfree(ptr);
+ }
+}
+#endif
+
+/*
+** Transfer direction
+**
+** Low-level scsi drivers under Linux do not receive the expected
+** data transfer direction from upper scsi drivers.
+** The driver will only check actual data direction for common
+** scsi opcodes. Other ones may cause problem, since they may
+** depend on device type or be vendor specific.
+** I would prefer to never trust the device for data direction,
+** but that is not possible.
+**
+** The original driver requires the expected direction to be known.
+** The Linux version of the driver has been enhanced in order to
+** be able to transfer data in the direction choosen by the target.
+*/
+
+#define XferNone 0
+#define XferIn 1
+#define XferOut 2
+#define XferBoth 3
+static int guess_xfer_direction(int opcode);
+
+/*
+** Head of list of NCR boards
+**
+** For kernel version < 1.3.70, host is retrieved by its irq level.
+** For later kernels, the internal host control block address
+** (struct ncb) is used as device id parameter of the irq stuff.
+*/
+
+static struct Scsi_Host *first_host = NULL;
+static Scsi_Host_Template *the_template = NULL;
+
+
+/*
+** /proc directory entry and proc_info function
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+struct proc_dir_entry proc_scsi_ncr53c8xx = {
+ PROC_SCSI_NCR53C8XX, 9, "ncr53c8xx",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+# ifdef SCSI_NCR_PROC_INFO_SUPPORT
+int ncr53c8xx_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int func);
+# endif
+#endif
+
+/*
+** Table of target capabilities.
+**
+** This bitmap is anded with the byte 7 of inquiry data on completion of
+** INQUIRY command.
+** The driver never see zeroed bits and will ignore the corresponding
+** capabilities of the target.
+*/
+
+static struct {
+ unsigned char and_map[MAX_TARGET];
+} target_capabilities[SCSI_NCR_MAX_HOST] = { NCR53C8XX_TARGET_CAPABILITIES };
+
+/*
+** Driver setup.
+**
+** This structure is initialized from linux config options.
+** It can be overridden at boot-up by the boot command line.
+*/
+struct ncr_driver_setup {
+ unsigned master_parity : 1;
+ unsigned scsi_parity : 1;
+ unsigned disconnection : 1;
+ unsigned special_features : 2;
+ unsigned ultra_scsi : 2;
+ unsigned force_sync_nego: 1;
+ unsigned reverse_probe: 1;
+ unsigned pci_fix_up: 4;
+ u_char use_nvram;
+ u_char verbose;
+ u_char default_tags;
+ u_short default_sync;
+ u_short debug;
+ u_char burst_max;
+ u_char led_pin;
+ u_char max_wide;
+ u_char settle_delay;
+ u_char diff_support;
+ u_char irqm;
+ u_char bus_check;
+};
+
+static struct ncr_driver_setup
+ driver_setup = SCSI_NCR_DRIVER_SETUP;
+
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+static struct ncr_driver_setup
+ driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
+#ifdef MODULE
+char *ncr53c8xx = 0; /* command line passed by insmod */
+#endif
+#endif
+
+/*
+** Other Linux definitions
+*/
+
+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) + ((scsi_code) & 0x7f))
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,0,0)
+static void ncr53c8xx_select_queue_depths(struct Scsi_Host *host, struct scsi_device *devlist);
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,70)
+static void ncr53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs);
+#else
+static void ncr53c8xx_intr(int irq, struct pt_regs * regs);
+#endif
+
+static void ncr53c8xx_timeout(unsigned long np);
+
+#define initverbose (driver_setup.verbose)
+#define bootverbose (np->verbose)
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+/*
+** Symbios NvRAM data format
+*/
+#define SYMBIOS_NVRAM_SIZE 368
+#define SYMBIOS_NVRAM_ADDRESS 0x100
+
+struct Symbios_nvram {
+/* Header 6 bytes */
+ u_short start_marker; /* 0x0000 */
+ u_short byte_count; /* excluding header/trailer */
+ u_short checksum;
+
+/* Controller set up 20 bytes */
+ u_short word0; /* 0x3000 */
+ u_short word2; /* 0x0000 */
+ u_short word4; /* 0x0000 */
+ u_short flags;
+#define SYMBIOS_SCAM_ENABLE (1)
+#define SYMBIOS_PARITY_ENABLE (1<<1)
+#define SYMBIOS_VERBOSE_MSGS (1<<2)
+ u_short flags1;
+#define SYMBIOS_SCAN_HI_LO (1)
+ u_short word10; /* 0x00 */
+ u_short flags3; /* 0x00 */
+#define SYMBIOS_REMOVABLE_FLAGS (3) /* 0=none, 1=bootable, 2=all */
+ u_char host_id;
+ u_char byte15; /* 0x04 */
+ u_short word16; /* 0x0410 */
+ u_short word18; /* 0x0000 */
+
+/* Boot order 14 bytes * 4 */
+ struct Symbios_host{
+ u_char word0; /* 0x0004:ok / 0x0000:nok */
+ u_short device_id; /* PCI device id */
+ u_short vendor_id; /* PCI vendor id */
+ u_char byte6; /* 0x00 */
+ u_char device_fn; /* PCI device/function number << 3*/
+ u_short word8;
+ u_short flags;
+#define SYMBIOS_INIT_SCAN_AT_BOOT (1)
+ u_short io_port; /* PCI io_port address */
+ } host[4];
+
+/* Targets 8 bytes * 16 */
+ struct Symbios_target {
+ u_short flags;
+#define SYMBIOS_DISCONNECT_ENABLE (1)
+#define SYMBIOS_SCAN_AT_BOOT_TIME (1<<1)
+#define SYMBIOS_SCAN_LUNS (1<<2)
+#define SYMBIOS_QUEUE_TAGS_ENABLED (1<<3)
+ u_char bus_width; /* 0x08/0x10 */
+ u_char sync_offset;
+ u_char sync_period; /* 4*period factor */
+ u_char byte6; /* 0x00 */
+ u_short timeout;
+ } target[16];
+ u_char spare_devices[19*8];
+ u_char trailer[6]; /* 0xfe 0xfe 0x00 0x00 0x00 0x00 */
+};
+typedef struct Symbios_nvram Symbios_nvram;
+typedef struct Symbios_host Symbios_host;
+typedef struct Symbios_target Symbios_target;
+
+/*
+** Tekram NvRAM data format.
+*/
+#define TEKRAM_NVRAM_SIZE 64
+#define TEKRAM_NVRAM_ADDRESS 0
+
+struct Tekram_nvram {
+ struct Tekram_target {
+ u_char flags;
+#define TEKRAM_PARITY_CHECK (1)
+#define TEKRAM_SYNC_NEGO (1<<1)
+#define TEKRAM_DISCONNECT_ENABLE (1<<2)
+#define TEKRAM_START_CMD (1<<3)
+#define TEKRAM_TAGGED_COMMANDS (1<<4)
+#define TEKRAM_WIDE_NEGO (1<<5)
+ u_char sync_index;
+ u_short word2;
+ } target[16];
+ u_char host_id;
+ u_char flags;
+#define TEKRAM_MORE_THAN_2_DRIVES (1)
+#define TEKRAM_DRIVES_SUP_1GB (1<<1)
+#define TEKRAM_RESET_ON_POWER_ON (1<<2)
+#define TEKRAM_ACTIVE_NEGATION (1<<3)
+#define TEKRAM_IMMEDIATE_SEEK (1<<4)
+#define TEKRAM_SCAN_LUNS (1<<5)
+#define TEKRAM_REMOVABLE_FLAGS (3<<6) /* 0: disable; 1: boot device; 2:all */
+ u_char boot_delay_index;
+ u_char max_tags_index;
+ u_short flags1;
+#define TEKRAM_F2_F6_ENABLED (1)
+ u_short spare[29];
+};
+typedef struct Tekram_nvram Tekram_nvram;
+typedef struct Tekram_target Tekram_target;
+
+static u_char Tekram_sync[12] __initdata = {25,31,37,43,50,62,75,125,12,15,18,21};
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Structures used by ncr53c8xx_detect/ncr53c8xx_pci_init to
+** transmit device configuration to the ncr_attach() function.
+*/
+typedef struct {
+ int bus;
+ u_char device_fn;
+ u_int base;
+ u_int base_2;
+ u_int io_port;
+ int irq;
+/* port and reg fields to use INB, OUTB macros */
+ u_int port;
+ volatile struct ncr_reg *reg;
+} ncr_slot;
+
+typedef struct {
+ int type;
+#define SCSI_NCR_SYMBIOS_NVRAM (1)
+#define SCSI_NCR_TEKRAM_NVRAM (2)
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ union {
+ Symbios_nvram Symbios;
+ Tekram_nvram Tekram;
+ } data;
+#endif
+} ncr_nvram;
+
+/*
+** Structure used by ncr53c8xx_detect/ncr53c8xx_pci_init
+** to save data on each detected board for ncr_attach().
+*/
+typedef struct {
+ ncr_slot slot;
+ ncr_chip chip;
+ ncr_nvram *nvram;
+ int attach_done;
+} ncr_device;
+
+/*==========================================================
+**
+** Debugging tags
+**
+**==========================================================
+*/
+
+#define DEBUG_ALLOC (0x0001)
+#define DEBUG_PHASE (0x0002)
+#define DEBUG_POLL (0x0004)
+#define DEBUG_QUEUE (0x0008)
+#define DEBUG_RESULT (0x0010)
+#define DEBUG_SCATTER (0x0020)
+#define DEBUG_SCRIPT (0x0040)
+#define DEBUG_TINY (0x0080)
+#define DEBUG_TIMING (0x0100)
+#define DEBUG_NEGO (0x0200)
+#define DEBUG_TAGS (0x0400)
+#define DEBUG_FREEZE (0x0800)
+#define DEBUG_RESTART (0x1000)
+
+/*
+** Enable/Disable debug messages.
+** Can be changed at runtime too.
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ #define DEBUG_FLAGS ncr_debug
+#else
+ #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
+#endif
+
+
+
+/*==========================================================
+**
+** assert ()
+**
+**==========================================================
+**
+** modified copy from 386bsd:/usr/include/sys/assert.h
+**
+**----------------------------------------------------------
+*/
+
+#define assert(expression) { \
+ if (!(expression)) { \
+ (void)printf(\
+ "assertion \"%s\" failed: file \"%s\", line %d\n", \
+ #expression, \
+ __FILE__, __LINE__); \
+ } \
+}
+
+/*==========================================================
+**
+** Big/Little endian support.
+**
+**==========================================================
+*/
+
+/*
+** If the NCR uses big endian addressing mode over the
+** PCI, actual io register addresses for byte and word
+** accesses must be changed according to lane routing.
+** Btw, ncr_offb() and ncr_offw() macros only apply to
+** constants and so donnot generate bloated code.
+*/
+
+#if defined(SCSI_NCR_BIG_ENDIAN)
+
+#define ncr_offb(o) (((o)&~3)+((~((o)&3))&3))
+#define ncr_offw(o) (((o)&~3)+((~((o)&3))&2))
+
+#else
+
+#define ncr_offb(o) (o)
+#define ncr_offw(o) (o)
+
+#endif
+
+/*
+** If the CPU and the NCR use same endian-ness adressing,
+** no byte reordering is needed for script patching.
+** Macro cpu_to_scr() is to be used for script patching.
+** Macro scr_to_cpu() is to be used for getting a DWORD
+** from the script.
+*/
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_le32(dw)
+#define scr_to_cpu(dw) le32_to_cpu(dw)
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_be32(dw)
+#define scr_to_cpu(dw) be32_to_cpu(dw)
+
+#else
+
+#define cpu_to_scr(dw) (dw)
+#define scr_to_cpu(dw) (dw)
+
+#endif
+
+/*==========================================================
+**
+** Access to the controller chip.
+**
+** If NCR_IOMAPPED is defined, only IO are used by the driver.
+**
+**==========================================================
+*/
+
+/*
+** If the CPU and the NCR use same endian-ness adressing,
+** no byte reordering is needed for accessing chip io
+** registers. Functions suffixed by '_raw' are assumed
+** to access the chip over the PCI without doing byte
+** reordering. Functions suffixed by '_l2b' are
+** assumed to perform little-endian to big-endian byte
+** reordering, those suffixed by '_b2l' blah, blah,
+** blah, ...
+*/
+
+#if defined(NCR_IOMAPPED)
+
+/*
+** IO mapped only input / ouput
+*/
+
+#define INB_OFF(o) inb (np->port + ncr_offb(o))
+#define OUTB_OFF(o, val) outb ((val), np->port + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_l2b (np->port + ncr_offw(o))
+#define INL_OFF(o) inl_l2b (np->port + (o))
+
+#define OUTW_OFF(o, val) outw_b2l ((val), np->port + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_b2l ((val), np->port + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_b2l (np->port + ncr_offw(o))
+#define INL_OFF(o) inl_b2l (np->port + (o))
+
+#define OUTW_OFF(o, val) outw_l2b ((val), np->port + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_l2b ((val), np->port + (o))
+
+#else
+
+#define INW_OFF(o) inw_raw (np->port + ncr_offw(o))
+#define INL_OFF(o) inl_raw (np->port + (o))
+
+#define OUTW_OFF(o, val) outw_raw ((val), np->port + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_raw ((val), np->port + (o))
+
+#endif /* ENDIANs */
+
+#else /* defined NCR_IOMAPPED */
+
+/*
+** MEMORY mapped IO input / output
+*/
+
+#define INB_OFF(o) readb((char *)np->reg + ncr_offb(o))
+#define OUTB_OFF(o, val) writeb((val), (char *)np->reg + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_l2b((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_l2b((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_b2l((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_b2l((val), (char *)np->reg + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_b2l((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_b2l((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_l2b((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_l2b((val), (char *)np->reg + (o))
+
+#else
+
+#define INW_OFF(o) readw_raw((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_raw((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_raw((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_raw((val), (char *)np->reg + (o))
+
+#endif
+
+#endif /* defined NCR_IOMAPPED */
+
+#define INB(r) INB_OFF (offsetof(struct ncr_reg,r))
+#define INW(r) INW_OFF (offsetof(struct ncr_reg,r))
+#define INL(r) INL_OFF (offsetof(struct ncr_reg,r))
+
+#define OUTB(r, val) OUTB_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTW(r, val) OUTW_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTL(r, val) OUTL_OFF (offsetof(struct ncr_reg,r), (val))
+
+/*
+** Set bit field ON, OFF
+*/
+
+#define OUTONB(r, m) OUTB(r, INB(r) | (m))
+#define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
+#define OUTONW(r, m) OUTW(r, INW(r) | (m))
+#define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
+#define OUTONL(r, m) OUTL(r, INL(r) | (m))
+#define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
+
+
+/*==========================================================
+**
+** Command control block states.
+**
+**==========================================================
+*/
+
+#define HS_IDLE (0)
+#define HS_BUSY (1)
+#define HS_NEGOTIATE (2) /* sync/wide data transfer*/
+#define HS_DISCONNECT (3) /* Disconnected by target */
+
+#define HS_COMPLETE (4)
+#define HS_SEL_TIMEOUT (5) /* Selection timeout */
+#define HS_RESET (6) /* SCSI reset */
+#define HS_ABORTED (7) /* Transfer aborted */
+#define HS_TIMEOUT (8) /* Software timeout */
+#define HS_FAIL (9) /* SCSI or PCI bus errors */
+#define HS_UNEXPECTED (10) /* Unexpected disconnect */
+
+#define HS_DONEMASK (0xfc)
+
+/*==========================================================
+**
+** Software Interrupt Codes
+**
+**==========================================================
+*/
+
+#define SIR_SENSE_RESTART (1)
+#define SIR_SENSE_FAILED (2)
+#define SIR_STALL_RESTART (3)
+#define SIR_STALL_QUEUE (4)
+#define SIR_NEGO_SYNC (5)
+#define SIR_NEGO_WIDE (6)
+#define SIR_NEGO_FAILED (7)
+#define SIR_NEGO_PROTO (8)
+#define SIR_REJECT_RECEIVED (9)
+#define SIR_REJECT_SENT (10)
+#define SIR_IGN_RESIDUE (11)
+#define SIR_MISSING_SAVE (12)
+#define SIR_DATA_IO_IS_OUT (13)
+#define SIR_DATA_IO_IS_IN (14)
+#define SIR_MAX (14)
+
+/*==========================================================
+**
+** Extended error codes.
+** xerr_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define XE_OK (0)
+#define XE_EXTRA_DATA (1) /* unexpected data phase */
+#define XE_BAD_PHASE (2) /* illegal phase (4/5) */
+
+/*==========================================================
+**
+** Negotiation status.
+** nego_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define NS_SYNC (1)
+#define NS_WIDE (2)
+
+/*==========================================================
+**
+** "Special features" of targets.
+** quirks field of struct tcb.
+** actualquirks field of struct ccb.
+**
+**==========================================================
+*/
+
+#define QUIRK_AUTOSAVE (0x01)
+#define QUIRK_NOMSG (0x02)
+#define QUIRK_NOSYNC (0x10)
+#define QUIRK_NOWIDE16 (0x20)
+#define QUIRK_UPDATE (0x80)
+
+/*==========================================================
+**
+** Capability bits in Inquire response byte 7.
+**
+**==========================================================
+*/
+
+#define INQ7_QUEUE (0x02)
+#define INQ7_SYNC (0x10)
+#define INQ7_WIDE16 (0x20)
+
+/*==========================================================
+**
+** Misc.
+**
+**==========================================================
+*/
+
+#define CCB_MAGIC (0xf2691ad2)
+
+/*==========================================================
+**
+** Declaration of structs.
+**
+**==========================================================
+*/
+
+struct tcb;
+struct lcb;
+struct ccb;
+struct ncb;
+struct script;
+
+typedef struct ncb * ncb_p;
+typedef struct tcb * tcb_p;
+typedef struct lcb * lcb_p;
+typedef struct ccb * ccb_p;
+
+struct link {
+ ncrcmd l_cmd;
+ ncrcmd l_paddr;
+};
+
+struct usrcmd {
+ u_long target;
+ u_long lun;
+ u_long data;
+ u_long cmd;
+};
+
+#define UC_SETSYNC 10
+#define UC_SETTAGS 11
+#define UC_SETDEBUG 12
+#define UC_SETORDER 13
+#define UC_SETWIDE 14
+#define UC_SETFLAG 15
+#define UC_CLEARPROF 16
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+#define UC_DEBUG_ERROR_RECOVERY 17
+#endif
+
+#define UF_TRACE (0x01)
+#define UF_NODISC (0x02)
+#define UF_NOSCAN (0x04)
+
+/*---------------------------------------
+**
+** Timestamps for profiling
+**
+**---------------------------------------
+*/
+
+struct tstamp {
+ u_long start;
+ u_long end;
+ u_long select;
+ u_long command;
+ u_long status;
+ u_long disconnect;
+ u_long reselect;
+};
+
+/*
+** profiling data (per device)
+*/
+
+struct profile {
+ u_long num_trans;
+ u_long num_kbytes;
+ u_long rest_bytes;
+ u_long num_disc;
+ u_long num_break;
+ u_long num_int;
+ u_long num_fly;
+ u_long ms_setup;
+ u_long ms_data;
+ u_long ms_disc;
+ u_long ms_post;
+};
+
+/*==========================================================
+**
+** Declaration of structs: target control block
+**
+**==========================================================
+*/
+
+struct tcb {
+ /*
+ ** during reselection the ncr jumps to this point
+ ** with SFBR set to the encoded target number
+ ** with bit 7 set.
+ ** if it's not this target, jump to the next.
+ **
+ ** JUMP IF (SFBR != #target#)
+ ** @(next tcb)
+ */
+
+ struct link jump_tcb;
+
+ /*
+ ** load the actual values for the sxfer and the scntl3
+ ** register (sync/wide mode).
+ **
+ ** SCR_COPY (1);
+ ** @(sval field of this tcb)
+ ** @(sxfer register)
+ ** SCR_COPY (1);
+ ** @(wval field of this tcb)
+ ** @(scntl3 register)
+ */
+
+ ncrcmd getscr[6];
+
+ /*
+ ** if next message is "identify"
+ ** then load the message to SFBR,
+ ** else load 0 to SFBR.
+ **
+ ** CALL
+ ** <RESEL_LUN>
+ */
+
+ struct link call_lun;
+
+ /*
+ ** now look for the right lun.
+ **
+ ** JUMP
+ ** @(first ccb of this lun)
+ */
+
+ struct link jump_lcb;
+
+ /*
+ ** pointer to interrupted getcc ccb
+ */
+
+ ccb_p hold_cp;
+
+ /*
+ ** pointer to ccb used for negotiating.
+ ** Avoid to start a nego for all queued commands
+ ** when tagged command queuing is enabled.
+ */
+
+ ccb_p nego_cp;
+
+ /*
+ ** statistical data
+ */
+
+ u_long transfers;
+ u_long bytes;
+
+ /*
+ ** user settable limits for sync transfer
+ ** and tagged commands.
+ ** These limits are read from the NVRAM if present.
+ */
+
+ u_char usrsync;
+ u_char usrwide;
+ u_char usrtags;
+ u_char usrflag;
+
+ u_char numtags;
+ u_char maxtags;
+ u_short num_good;
+
+ /*
+ ** negotiation of wide and synch transfer.
+ ** device quirks.
+ */
+
+/*0*/ u_char minsync;
+/*1*/ u_char sval;
+/*2*/ u_short period;
+/*0*/ u_char maxoffs;
+
+/*1*/ u_char quirks;
+
+/*2*/ u_char widedone;
+/*3*/ u_char wval;
+ /*
+ ** inquire data
+ */
+#define MAX_INQUIRE 36
+ u_char inqdata[MAX_INQUIRE];
+
+ /*
+ ** the lcb's of this tcb
+ */
+
+ lcb_p lp[MAX_LUN];
+};
+
+/*==========================================================
+**
+** Declaration of structs: lun control block
+**
+**==========================================================
+*/
+
+struct lcb {
+ /*
+ ** during reselection the ncr jumps to this point
+ ** with SFBR set to the "Identify" message.
+ ** if it's not this lun, jump to the next.
+ **
+ ** JUMP IF (SFBR != #lun#)
+ ** @(next lcb of this target)
+ */
+
+ struct link jump_lcb;
+
+ /*
+ ** if next message is "simple tag",
+ ** then load the tag to SFBR,
+ ** else load 0 to SFBR.
+ **
+ ** CALL
+ ** <RESEL_TAG>
+ */
+
+ struct link call_tag;
+
+ /*
+ ** now look for the right ccb.
+ **
+ ** JUMP
+ ** @(first ccb of this lun)
+ */
+
+ struct link jump_ccb;
+
+ /*
+ ** start of the ccb chain
+ */
+
+ ccb_p next_ccb;
+
+ /*
+ ** Control of tagged queueing
+ */
+
+ u_char reqccbs;
+ u_char actccbs;
+ u_char reqlink;
+ u_char actlink;
+ u_char usetags;
+ u_char lasttag;
+
+ /*
+ ** Linux specific fields:
+ ** Number of active commands and current credit.
+ ** Should be managed by the generic scsi driver
+ */
+
+ u_char active;
+ u_char opennings;
+
+ /*-----------------------------------------------
+ ** Flag to force M_ORDERED_TAG on next command
+ ** in order to avoid spurious timeout when
+ ** M_SIMPLE_TAG is used for all operations.
+ **-----------------------------------------------
+ */
+ u_char force_ordered_tag;
+#define NCR_TIMEOUT_INCREASE (5*HZ)
+};
+
+/*==========================================================
+**
+** Declaration of structs: COMMAND control block
+**
+**==========================================================
+**
+** This substructure is copied from the ccb to a
+** global address after selection (or reselection)
+** and copied back before disconnect.
+**
+** These fields are accessible to the script processor.
+**
+**----------------------------------------------------------
+*/
+
+struct head {
+ /*
+ ** Execution of a ccb starts at this point.
+ ** It's a jump to the "SELECT" label
+ ** of the script.
+ **
+ ** After successful selection the script
+ ** processor overwrites it with a jump to
+ ** the IDLE label of the script.
+ */
+
+ struct link launch;
+
+ /*
+ ** Saved data pointer.
+ ** Points to the position in the script
+ ** responsible for the actual transfer
+ ** of data.
+ ** It's written after reception of a
+ ** "SAVE_DATA_POINTER" message.
+ ** The goalpointer points after
+ ** the last transfer command.
+ */
+
+ u_int32 savep;
+ u_int32 lastp;
+ u_int32 goalp;
+
+ /*
+ ** The virtual address of the ccb
+ ** containing this header.
+ */
+
+ ccb_p cp;
+
+ /*
+ ** space for some timestamps to gather
+ ** profiling data about devices and this driver.
+ */
+
+ struct tstamp stamp;
+
+ /*
+ ** status fields.
+ */
+
+ u_char scr_st[4]; /* script status */
+ u_char status[4]; /* host status. Must be the last */
+ /* DWORD of the CCB header */
+};
+
+/*
+** The status bytes are used by the host and the script processor.
+**
+** The byte corresponding to the host_status must be stored in the
+** last DWORD of the CCB header since it is used for command
+** completion (ncr_wakeup()). Doing so, we are sure that the header
+** has been entirely copied back to the CCB when the host_status is
+** seen complete by the CPU.
+**
+** The last four bytes (status[4]) are copied to the scratchb register
+** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect,
+** and copied back just after disconnecting.
+** Inside the script the XX_REG are used.
+**
+** The first four bytes (scr_st[4]) are used inside the script by
+** "COPY" commands.
+** Because source and destination must have the same alignment
+** in a DWORD, the fields HAVE to be at the choosen offsets.
+** xerr_st 0 (0x34) scratcha
+** sync_st 1 (0x05) sxfer
+** wide_st 3 (0x03) scntl3
+*/
+
+/*
+** Last four bytes (script)
+*/
+#define QU_REG scr0
+#define HS_REG scr1
+#define HS_PRT nc_scr1
+#define SS_REG scr2
+#define PS_REG scr3
+
+/*
+** Last four bytes (host)
+*/
+#define actualquirks phys.header.status[0]
+#define host_status phys.header.status[1]
+#define scsi_status phys.header.status[2]
+#define parity_status phys.header.status[3]
+
+/*
+** First four bytes (script)
+*/
+#define xerr_st header.scr_st[0]
+#define sync_st header.scr_st[1]
+#define nego_st header.scr_st[2]
+#define wide_st header.scr_st[3]
+
+/*
+** First four bytes (host)
+*/
+#define xerr_status phys.xerr_st
+#define sync_status phys.sync_st
+#define nego_status phys.nego_st
+#define wide_status phys.wide_st
+
+/*==========================================================
+**
+** Declaration of structs: Data structure block
+**
+**==========================================================
+**
+** During execution of a ccb by the script processor,
+** the DSA (data structure address) register points
+** to this substructure of the ccb.
+** This substructure contains the header with
+** the script-processor-changable data and
+** data blocks for the indirect move commands.
+**
+**----------------------------------------------------------
+*/
+
+struct dsb {
+
+ /*
+ ** Header.
+ ** Has to be the first entry,
+ ** because it's jumped to by the
+ ** script processor
+ */
+
+ struct head header;
+
+ /*
+ ** Table data for Script
+ */
+
+ struct scr_tblsel select;
+ struct scr_tblmove smsg ;
+ struct scr_tblmove smsg2 ;
+ struct scr_tblmove cmd ;
+ struct scr_tblmove scmd ;
+ struct scr_tblmove sense ;
+ struct scr_tblmove data [MAX_SCATTER];
+};
+
+/*==========================================================
+**
+** Declaration of structs: Command control block.
+**
+**==========================================================
+**
+** During execution of a ccb by the script processor,
+** the DSA (data structure address) register points
+** to this substructure of the ccb.
+** This substructure contains the header with
+** the script-processor-changable data and then
+** data blocks for the indirect move commands.
+**
+**----------------------------------------------------------
+*/
+
+
+struct ccb {
+ /*
+ ** This field forces 32 bytes alignement for phys.header,
+ ** in order to use cache line bursting when copying it
+ ** to the ncb.
+ */
+
+ struct link filler[2];
+
+ /*
+ ** during reselection the ncr jumps to this point.
+ ** If a "SIMPLE_TAG" message was received,
+ ** then SFBR is set to the tag.
+ ** else SFBR is set to 0
+ ** If looking for another tag, jump to the next ccb.
+ **
+ ** JUMP IF (SFBR != #TAG#)
+ ** @(next ccb of this lun)
+ */
+
+ struct link jump_ccb;
+
+ /*
+ ** After execution of this call, the return address
+ ** (in the TEMP register) points to the following
+ ** data structure block.
+ ** So copy it to the DSA register, and start
+ ** processing of this data structure.
+ **
+ ** CALL
+ ** <RESEL_TMP>
+ */
+
+ struct link call_tmp;
+
+ /*
+ ** This is the data structure which is
+ ** to be executed by the script processor.
+ */
+
+ struct dsb phys;
+
+ /*
+ ** If a data transfer phase is terminated too early
+ ** (after reception of a message (i.e. DISCONNECT)),
+ ** we have to prepare a mini script to transfer
+ ** the rest of the data.
+ */
+
+ ncrcmd patch[8];
+
+ /*
+ ** The general SCSI driver provides a
+ ** pointer to a control block.
+ */
+
+ Scsi_Cmnd *cmd;
+ int data_len;
+
+ /*
+ ** We prepare a message to be sent after selection,
+ ** and a second one to be sent after getcc selection.
+ ** Contents are IDENTIFY and SIMPLE_TAG.
+ ** While negotiating sync or wide transfer,
+ ** a SDTM or WDTM message is appended.
+ */
+
+ u_char scsi_smsg [8];
+ u_char scsi_smsg2[8];
+
+ /*
+ ** Lock this ccb.
+ ** Flag is used while looking for a free ccb.
+ */
+
+ u_long magic;
+
+ /*
+ ** Physical address of this instance of ccb
+ */
+
+ u_long p_ccb;
+
+ /*
+ ** Completion time out for this job.
+ ** It's set to time of start + allowed number of seconds.
+ */
+
+ u_long tlimit;
+
+ /*
+ ** All ccbs of one hostadapter are chained.
+ */
+
+ ccb_p link_ccb;
+
+ /*
+ ** All ccbs of one target/lun are chained.
+ */
+
+ ccb_p next_ccb;
+
+ /*
+ ** Sense command
+ */
+
+ u_char sensecmd[6];
+
+ /*
+ ** Tag for this transfer.
+ ** It's patched into jump_ccb.
+ ** If it's not zero, a SIMPLE_TAG
+ ** message is included in smsg.
+ */
+
+ u_char tag;
+
+ /*
+ ** Number of segments of the scatter list.
+ ** Used for recalculation of savep/goalp/lastp on
+ ** SIR_DATA_IO_IS_OUT interrupt.
+ */
+
+ u_char segments;
+};
+
+#define CCB_PHYS(cp,lbl) (cp->p_ccb + offsetof(struct ccb, lbl))
+
+/*==========================================================
+**
+** Declaration of structs: NCR device descriptor
+**
+**==========================================================
+*/
+
+struct ncb {
+ /*
+ ** The global header.
+ ** Accessible to both the host and the
+ ** script-processor.
+ ** Is 32 bytes aligned since ncb is, in order to
+ ** allow cache line bursting when copying it from or
+ ** to ccbs.
+ */
+ struct head header;
+
+ /*-----------------------------------------------
+ ** Specific Linux fields
+ **-----------------------------------------------
+ */
+ int unit; /* Unit number */
+ char chip_name[8]; /* Chip name */
+ char inst_name[16]; /* Instance name */
+ struct timer_list timer; /* Timer link header */
+ int ncr_cache; /* Cache test variable */
+ Scsi_Cmnd *waiting_list; /* Waiting list header for commands */
+ /* that we can't put into the squeue */
+ u_long settle_time; /* Reset in progess */
+ u_char release_stage; /* Synchronisation stage on release */
+ u_char verbose; /* Boot verbosity for this controller*/
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ u_char debug_error_recovery;
+ u_char stalling;
+ u_char assert_atn;
+#endif
+
+ /*-----------------------------------------------
+ ** Added field to support differences
+ ** between ncr chips.
+ ** sv_xxx are some io register bit value at start-up and
+ ** so assumed to have been set by the sdms bios.
+ ** rv_xxx are the bit fields of io register that will keep
+ ** the features used by the driver.
+ **-----------------------------------------------
+ */
+ u_short device_id;
+ u_char revision_id;
+
+ u_char sv_scntl0;
+ u_char sv_scntl3;
+ u_char sv_dmode;
+ u_char sv_dcntl;
+ u_char sv_ctest3;
+ u_char sv_ctest4;
+ u_char sv_ctest5;
+ u_char sv_gpcntl;
+ u_char sv_stest2;
+ u_char sv_stest4;
+
+ u_char rv_scntl0;
+ u_char rv_scntl3;
+ u_char rv_dmode;
+ u_char rv_dcntl;
+ u_char rv_ctest3;
+ u_char rv_ctest4;
+ u_char rv_ctest5;
+ u_char rv_stest2;
+
+ u_char scsi_mode;
+
+ /*-----------------------------------------------
+ ** Scripts ..
+ **-----------------------------------------------
+ **
+ ** During reselection the ncr jumps to this point.
+ ** The SFBR register is loaded with the encoded target id.
+ **
+ ** Jump to the first target.
+ **
+ ** JUMP
+ ** @(next tcb)
+ */
+ struct link jump_tcb;
+
+ /*-----------------------------------------------
+ ** Configuration ..
+ **-----------------------------------------------
+ **
+ ** virtual and physical addresses
+ ** of the 53c810 chip.
+ */
+ vm_offset_t vaddr;
+ vm_offset_t paddr;
+
+ vm_offset_t vaddr2;
+ vm_offset_t paddr2;
+
+ /*
+ ** pointer to the chip's registers.
+ */
+ volatile
+ struct ncr_reg* reg;
+
+ /*
+ ** A copy of the scripts, relocated for this ncb.
+ */
+ struct script *script0;
+ struct scripth *scripth0;
+
+ /*
+ ** Scripts instance virtual address.
+ */
+ struct script *script;
+ struct scripth *scripth;
+
+ /*
+ ** Scripts instance physical address.
+ */
+ u_long p_script;
+ u_long p_scripth;
+
+ /*
+ ** The SCSI address of the host adapter.
+ */
+ u_char myaddr;
+
+ /*
+ ** Max dwords burst supported by the adapter.
+ */
+ u_char maxburst; /* log base 2 of dwords burst */
+
+ /*
+ ** timing parameters
+ */
+ u_char minsync; /* Minimum sync period factor */
+ u_char maxsync; /* Maximum sync period factor */
+ u_char maxoffs; /* Max scsi offset */
+ u_char multiplier; /* Clock multiplier (1,2,4) */
+ u_char clock_divn; /* Number of clock divisors */
+ u_long clock_khz; /* SCSI clock frequency in KHz */
+ u_int features; /* Chip features map */
+
+
+ /*-----------------------------------------------
+ ** Link to the generic SCSI driver
+ **-----------------------------------------------
+ */
+
+ /* struct scsi_link sc_link; */
+
+ /*-----------------------------------------------
+ ** Job control
+ **-----------------------------------------------
+ **
+ ** Commands from user
+ */
+ struct usrcmd user;
+ u_char order;
+
+ /*
+ ** Target data
+ */
+ struct tcb target[MAX_TARGET];
+
+ /*
+ ** Start queue.
+ */
+ u_int32 squeue [MAX_START];
+ u_short squeueput;
+ u_short actccbs;
+
+ /*
+ ** Timeout handler
+ */
+#if 0
+ u_long heartbeat;
+ u_short ticks;
+ u_short latetime;
+#endif
+ u_long lasttime;
+
+ /*-----------------------------------------------
+ ** Debug and profiling
+ **-----------------------------------------------
+ **
+ ** register dump
+ */
+ struct ncr_reg regdump;
+ u_long regtime;
+
+ /*
+ ** Profiling data
+ */
+ struct profile profile;
+ u_long disc_phys;
+ u_long disc_ref;
+
+ /*
+ ** The global control block.
+ ** It's used only during the configuration phase.
+ ** A target control block will be created
+ ** after the first successful transfer.
+ */
+ struct ccb *ccb;
+
+ /*
+ ** message buffers.
+ ** Should be longword aligned,
+ ** because they're written with a
+ ** COPY script command.
+ */
+ u_char msgout[8];
+ u_char msgin [8];
+ u_int32 lastmsg;
+
+ /*
+ ** Buffer for STATUS_IN phase.
+ */
+ u_char scratch;
+
+ /*
+ ** controller chip dependent maximal transfer width.
+ */
+ u_char maxwide;
+
+ /*
+ ** option for M_IDENTIFY message: enables disconnecting
+ */
+ u_char disc;
+
+ /*
+ ** address of the ncr control registers in io space
+ */
+ u_int port;
+
+ /*
+ ** irq level
+ */
+ u_short irq;
+};
+
+#define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl))
+#define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth, lbl))
+
+/*==========================================================
+**
+**
+** Script for NCR-Processor.
+**
+** Use ncr_script_fill() to create the variable parts.
+** Use ncr_script_copy_and_bind() to make a copy and
+** bind to physical addresses.
+**
+**
+**==========================================================
+**
+** We have to know the offsets of all labels before
+** we reach them (for forward jumps).
+** Therefore we declare a struct here.
+** If you make changes inside the script,
+** DON'T FORGET TO CHANGE THE LENGTHS HERE!
+**
+**----------------------------------------------------------
+*/
+
+/*
+** Script fragments which are loaded into the on-board RAM
+** of 825A, 875 and 895 chips.
+*/
+struct script {
+ ncrcmd start [ 4];
+ ncrcmd start0 [ 2];
+ ncrcmd start1 [ 3];
+ ncrcmd startpos [ 1];
+ ncrcmd trysel [ 8];
+ ncrcmd skip [ 8];
+ ncrcmd skip2 [ 3];
+ ncrcmd idle [ 2];
+ ncrcmd select [ 22];
+ ncrcmd prepare [ 4];
+ ncrcmd loadpos [ 14];
+ ncrcmd prepare2 [ 24];
+ ncrcmd setmsg [ 5];
+ ncrcmd clrack [ 2];
+ ncrcmd dispatch [ 38];
+ ncrcmd no_data [ 17];
+ ncrcmd checkatn [ 10];
+ ncrcmd command [ 15];
+ ncrcmd status [ 27];
+ ncrcmd msg_in [ 26];
+ ncrcmd msg_bad [ 6];
+ ncrcmd complete [ 13];
+ ncrcmd cleanup [ 12];
+ ncrcmd cleanup0 [ 11];
+ ncrcmd signal [ 10];
+ ncrcmd save_dp [ 5];
+ ncrcmd restore_dp [ 5];
+ ncrcmd disconnect [ 12];
+ ncrcmd disconnect0 [ 5];
+ ncrcmd disconnect1 [ 23];
+ ncrcmd msg_out [ 9];
+ ncrcmd msg_out_done [ 7];
+ ncrcmd badgetcc [ 6];
+ ncrcmd reselect [ 8];
+ ncrcmd reselect1 [ 8];
+ ncrcmd reselect2 [ 8];
+ ncrcmd resel_tmp [ 5];
+ ncrcmd resel_lun [ 18];
+ ncrcmd resel_tag [ 24];
+ ncrcmd data_io [ 6];
+ ncrcmd data_in [MAX_SCATTER * 4 + 4];
+};
+
+/*
+** Script fragments which stay in main memory for all chips.
+*/
+struct scripth {
+ ncrcmd tryloop [MAX_START*5+2];
+ ncrcmd msg_parity [ 6];
+ ncrcmd msg_reject [ 8];
+ ncrcmd msg_ign_residue [ 32];
+ ncrcmd msg_extended [ 18];
+ ncrcmd msg_ext_2 [ 18];
+ ncrcmd msg_wdtr [ 27];
+ ncrcmd msg_ext_3 [ 18];
+ ncrcmd msg_sdtr [ 27];
+ ncrcmd msg_out_abort [ 10];
+ ncrcmd getcc [ 4];
+ ncrcmd getcc1 [ 5];
+#ifdef NCR_GETCC_WITHMSG
+ ncrcmd getcc2 [ 33];
+#else
+ ncrcmd getcc2 [ 14];
+#endif
+ ncrcmd getcc3 [ 10];
+ ncrcmd data_out [MAX_SCATTER * 4 + 4];
+ ncrcmd aborttag [ 4];
+ ncrcmd abort [ 22];
+ ncrcmd snooptest [ 9];
+ ncrcmd snoopend [ 2];
+};
+
+/*==========================================================
+**
+**
+** Function headers.
+**
+**
+**==========================================================
+*/
+
+static void ncr_alloc_ccb (ncb_p np, u_long t, u_long l);
+static void ncr_complete (ncb_p np, ccb_p cp);
+static void ncr_exception (ncb_p np);
+static void ncr_free_ccb (ncb_p np, ccb_p cp, u_long t, u_long l);
+static void ncr_getclock (ncb_p np, int mult);
+static void ncr_selectclock (ncb_p np, u_char scntl3);
+static ccb_p ncr_get_ccb (ncb_p np, u_long t,u_long l);
+static void ncr_init (ncb_p np, int reset, char * msg, u_long code);
+static int ncr_int_sbmc (ncb_p np);
+static int ncr_int_par (ncb_p np);
+static void ncr_int_ma (ncb_p np);
+static void ncr_int_sir (ncb_p np);
+static void ncr_int_sto (ncb_p np);
+static u_long ncr_lookup (char* id);
+static void ncr_negotiate (struct ncb* np, struct tcb* tp);
+static void ncr_opennings (ncb_p np, lcb_p lp, Scsi_Cmnd * xp);
+
+#ifdef SCSI_NCR_PROFILE_SUPPORT
+static void ncb_profile (ncb_p np, ccb_p cp);
+#endif
+
+static void ncr_script_copy_and_bind
+ (ncb_p np, ncrcmd *src, ncrcmd *dst, int len);
+static void ncr_script_fill (struct script * scr, struct scripth * scripth);
+static int ncr_scatter (ccb_p cp, Scsi_Cmnd *cmd);
+static void ncr_setmaxtags (ncb_p np, tcb_p tp, u_long numtags);
+static void ncr_getsync (ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p);
+static void ncr_setsync (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer);
+static void ncr_settags (tcb_p tp, lcb_p lp);
+static void ncr_setwide (ncb_p np, ccb_p cp, u_char wide, u_char ack);
+static int ncr_show_msg (u_char * msg);
+static int ncr_snooptest (ncb_p np);
+static void ncr_timeout (ncb_p np);
+static void ncr_wakeup (ncb_p np, u_long code);
+static void ncr_start_reset (ncb_p np, int settle_delay);
+static int ncr_reset_scsi_bus (ncb_p np, int enab_int, int settle_delay);
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+static void ncr_usercmd (ncb_p np);
+#endif
+
+static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device);
+
+static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd);
+static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd);
+static void process_waiting_list(ncb_p np, int sts);
+
+#define remove_from_waiting_list(np, cmd) \
+ retrieve_from_waiting_list(1, (np), (cmd))
+#define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
+#define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static int ncr_get_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram);
+static int ncr_get_Tekram_nvram (ncr_slot *np, Tekram_nvram *nvram);
+#endif
+
+/*==========================================================
+**
+**
+** Global static data.
+**
+**
+**==========================================================
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
+#endif
+
+static inline char *ncr_name (ncb_p np)
+{
+ return np->inst_name;
+}
+
+
+/*==========================================================
+**
+**
+** Scripts for NCR-Processor.
+**
+** Use ncr_script_bind for binding to physical addresses.
+**
+**
+**==========================================================
+**
+** NADDR generates a reference to a field of the controller data.
+** PADDR generates a reference to another part of the script.
+** RADDR generates a reference to a script processor register.
+** FADDR generates a reference to a script processor register
+** with offset.
+**
+**----------------------------------------------------------
+*/
+
+#define RELOC_SOFTC 0x40000000
+#define RELOC_LABEL 0x50000000
+#define RELOC_REGISTER 0x60000000
+#define RELOC_KVAR 0x70000000
+#define RELOC_LABELH 0x80000000
+#define RELOC_MASK 0xf0000000
+
+#define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label))
+#define PADDR(label) (RELOC_LABEL | offsetof(struct script, label))
+#define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label))
+#define RADDR(label) (RELOC_REGISTER | REG(label))
+#define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
+#define KVAR(which) (RELOC_KVAR | (which))
+
+#define SCRIPT_KVAR_JIFFIES (0)
+
+#define SCRIPT_KVAR_FIRST SCRIPT_KVAR_JIFFIES
+#define SCRIPT_KVAR_LAST SCRIPT_KVAR_JIFFIES
+
+/*
+ * Kernel variables referenced in the scripts.
+ * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
+ */
+static void *script_kvars[] __initdata =
+ { (void *)&jiffies };
+
+static struct script script0 __initdata = {
+/*--------------------------< START >-----------------------*/ {
+#if 0
+ /*
+ ** Claim to be still alive ...
+ */
+ SCR_COPY (sizeof (((struct ncb *)0)->heartbeat)),
+ KVAR(SCRIPT_KVAR_JIFFIES),
+ NADDR (heartbeat),
+#endif
+ /*
+ ** Make data structure address invalid.
+ ** clear SIGP.
+ */
+ SCR_LOAD_REG (dsa, 0xff),
+ 0,
+ SCR_FROM_REG (ctest2),
+ 0,
+}/*-------------------------< START0 >----------------------*/,{
+ /*
+ ** Hook for interrupted GetConditionCode.
+ ** Will be patched to ... IFTRUE by
+ ** the interrupt handler.
+ */
+ SCR_INT ^ IFFALSE (0),
+ SIR_SENSE_RESTART,
+
+}/*-------------------------< START1 >----------------------*/,{
+ /*
+ ** Hook for stalled start queue.
+ ** Will be patched to IFTRUE by the interrupt handler.
+ */
+ SCR_INT ^ IFFALSE (0),
+ SIR_STALL_RESTART,
+ /*
+ ** Then jump to a certain point in tryloop.
+ ** Due to the lack of indirect addressing the code
+ ** is self modifying here.
+ */
+ SCR_JUMP,
+}/*-------------------------< STARTPOS >--------------------*/,{
+ PADDRH(tryloop),
+}/*-------------------------< TRYSEL >----------------------*/,{
+ /*
+ ** Now:
+ ** DSA: Address of a Data Structure
+ ** or Address of the IDLE-Label.
+ **
+ ** TEMP: Address of a script, which tries to
+ ** start the NEXT entry.
+ **
+ ** Save the TEMP register into the SCRATCHA register.
+ ** Then copy the DSA to TEMP and RETURN.
+ ** This is kind of an indirect jump.
+ ** (The script processor has NO stack, so the
+ ** CALL is actually a jump and link, and the
+ ** RETURN is an indirect jump.)
+ **
+ ** If the slot was empty, DSA contains the address
+ ** of the IDLE part of this script. The processor
+ ** jumps to IDLE and waits for a reselect.
+ ** It will wake up and try the same slot again
+ ** after the SIGP bit becomes set by the host.
+ **
+ ** If the slot was not empty, DSA contains
+ ** the address of the phys-part of a ccb.
+ ** The processor jumps to this address.
+ ** phys starts with head,
+ ** head starts with launch,
+ ** so actually the processor jumps to
+ ** the lauch part.
+ ** If the entry is scheduled for execution,
+ ** then launch contains a jump to SELECT.
+ ** If it's not scheduled, it contains a jump to IDLE.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ RADDR (scratcha),
+ SCR_COPY (4),
+ RADDR (dsa),
+ RADDR (temp),
+ SCR_RETURN,
+ 0
+
+}/*-------------------------< SKIP >------------------------*/,{
+ /*
+ ** This entry has been canceled.
+ ** Next time use the next slot.
+ */
+ SCR_COPY (4),
+ RADDR (scratcha),
+ PADDR (startpos),
+ /*
+ ** patch the launch field.
+ ** should look like an idle process.
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDR (skip2),
+ SCR_COPY (8),
+ PADDR (idle),
+}/*-------------------------< SKIP2 >-----------------------*/,{
+ 0,
+ SCR_JUMP,
+ PADDR(start),
+}/*-------------------------< IDLE >------------------------*/,{
+ /*
+ ** Nothing to do?
+ ** Wait for reselect.
+ */
+ SCR_JUMP,
+ PADDR(reselect),
+
+}/*-------------------------< SELECT >----------------------*/,{
+ /*
+ ** DSA contains the address of a scheduled
+ ** data structure.
+ **
+ ** SCRATCHA contains the address of the script,
+ ** which starts the next entry.
+ **
+ ** Set Initiator mode.
+ **
+ ** (Target mode is left as an exercise for the reader)
+ */
+
+ SCR_CLR (SCR_TRG),
+ 0,
+ SCR_LOAD_REG (HS_REG, 0xff),
+ 0,
+
+ /*
+ ** And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select),
+ PADDR (reselect),
+
+ /*
+ ** Now there are 4 possibilities:
+ **
+ ** (1) The ncr looses arbitration.
+ ** This is ok, because it will try again,
+ ** when the bus becomes idle.
+ ** (But beware of the timeout function!)
+ **
+ ** (2) The ncr is reselected.
+ ** Then the script processor takes the jump
+ ** to the RESELECT label.
+ **
+ ** (3) The ncr completes the selection.
+ ** Then it will execute the next statement.
+ **
+ ** (4) There is a selection timeout.
+ ** Then the ncr should interrupt the host and stop.
+ ** Unfortunately, it seems to continue execution
+ ** of the script. But it will fail with an
+ ** IID-interrupt on the next WHEN.
+ */
+
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ 0,
+
+ /*
+ ** Save target id to ctest0 register
+ */
+
+ SCR_FROM_REG (sdid),
+ 0,
+ SCR_TO_REG (ctest0),
+ 0,
+ /*
+ ** Send the IDENTIFY and SIMPLE_TAG messages
+ ** (and the M_X_SYNC_REQ message)
+ */
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct dsb, smsg),
+#ifdef undef /* XXX better fail than try to deal with this ... */
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ -16,
+#endif
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ /*
+ ** Selection complete.
+ ** Next time use the next slot.
+ */
+ SCR_COPY (4),
+ RADDR (scratcha),
+ PADDR (startpos),
+}/*-------------------------< PREPARE >----------------------*/,{
+ /*
+ ** The ncr doesn't have an indirect load
+ ** or store command. So we have to
+ ** copy part of the control block to a
+ ** fixed place, where we can access it.
+ **
+ ** We patch the address part of a
+ ** COPY command with the DSA-register.
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDR (loadpos),
+ /*
+ ** then we do the actual copy.
+ */
+ SCR_COPY (sizeof (struct head)),
+ /*
+ ** continued after the next label ...
+ */
+
+}/*-------------------------< LOADPOS >---------------------*/,{
+ 0,
+ NADDR (header),
+ /*
+ ** Mark this ccb as not scheduled.
+ */
+ SCR_COPY (8),
+ PADDR (idle),
+ NADDR (header.launch),
+ /*
+ ** Set a time stamp for this selection
+ */
+ SCR_COPY (sizeof (u_long)),
+ KVAR(SCRIPT_KVAR_JIFFIES),
+ NADDR (header.stamp.select),
+ /*
+ ** load the savep (saved pointer) into
+ ** the TEMP register (actual pointer)
+ */
+ SCR_COPY (4),
+ NADDR (header.savep),
+ RADDR (temp),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_COPY (4),
+ NADDR (header.status),
+ RADDR (scr0),
+
+}/*-------------------------< PREPARE2 >---------------------*/,{
+ /*
+ ** Load the synchronous mode register
+ */
+ SCR_COPY (1),
+ NADDR (sync_st),
+ RADDR (sxfer),
+ /*
+ ** Load the wide mode and timing register
+ */
+ SCR_COPY (1),
+ NADDR (wide_st),
+ RADDR (scntl3),
+ /*
+ ** Initialize the msgout buffer with a NOOP message.
+ */
+ SCR_LOAD_REG (scratcha, M_NOOP),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgout),
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgin),
+ /*
+ ** Message in phase ?
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** Extended or reject message ?
+ */
+ SCR_FROM_REG (sbdl),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+ PADDR (msg_in),
+ SCR_JUMP ^ IFTRUE (DATA (M_REJECT)),
+ PADDRH (msg_reject),
+ /*
+ ** normal processing
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< SETMSG >----------------------*/,{
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgout),
+ SCR_SET (SCR_ATN),
+ 0,
+}/*-------------------------< CLRACK >----------------------*/,{
+ /*
+ ** Terminate possible pending message phase.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+
+}/*-----------------------< DISPATCH >----------------------*/,{
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+ /*
+ ** remove bogus output signals
+ */
+ SCR_REG_REG (socl, SCR_AND, CACK|CATN),
+ 0,
+ SCR_RETURN ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ 0,
+ /*
+ ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 4.
+ ** Possible data corruption during Memory Write and Invalidate.
+ ** This work-around resets the addressing logic prior to the
+ ** start of the first MOVE of a DATA IN phase.
+ ** (See README.ncr53c8xx for more information)
+ */
+ SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)),
+ 20,
+ SCR_COPY (4),
+ RADDR (scratcha),
+ RADDR (scratcha),
+ SCR_RETURN,
+ 0,
+
+ SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
+ PADDR (msg_out),
+ SCR_JUMP ^ IFTRUE (IF (SCR_MSG_IN)),
+ PADDR (msg_in),
+ SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
+ PADDR (command),
+ SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
+ PADDR (status),
+ /*
+ ** Discard one illegal phase byte, if required.
+ */
+ SCR_LOAD_REG (scratcha, XE_BAD_PHASE),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (xerr_st),
+ SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
+ NADDR (scratch),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< NO_DATA >--------------------*/,{
+ /*
+ ** The target wants to tranfer too much data
+ ** or in the wrong direction.
+ ** Remember that in extended error.
+ */
+ SCR_LOAD_REG (scratcha, XE_EXTRA_DATA),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (xerr_st),
+ /*
+ ** Discard one data byte, if required.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_DATA_OUT,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_DATA_IN,
+ NADDR (scratch),
+ /*
+ ** .. and repeat as required.
+ */
+ SCR_CALL,
+ PADDR (dispatch),
+ SCR_JUMP,
+ PADDR (no_data),
+}/*-------------------------< CHECKATN >--------------------*/,{
+ /*
+ ** If AAP (bit 1 of scntl0 register) is set
+ ** and a parity error is detected,
+ ** the script processor asserts ATN.
+ **
+ ** The target should switch to a MSG_OUT phase
+ ** to get the message.
+ */
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (CATN, CATN)),
+ PADDR (dispatch),
+ /*
+ ** count it
+ */
+ SCR_REG_REG (PS_REG, SCR_ADD, 1),
+ 0,
+ /*
+ ** Prepare a M_ID_ERROR message
+ ** (initiator detected error).
+ ** The target should retry the transfer.
+ */
+ SCR_LOAD_REG (scratcha, M_ID_ERROR),
+ 0,
+ SCR_JUMP,
+ PADDR (setmsg),
+
+}/*-------------------------< COMMAND >--------------------*/,{
+ /*
+ ** If this is not a GETCC transfer ...
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (S_CHECK_COND)),
+ 28,
+ /*
+ ** ... set a timestamp ...
+ */
+ SCR_COPY (sizeof (u_long)),
+ KVAR(SCRIPT_KVAR_JIFFIES),
+ NADDR (header.stamp.command),
+ /*
+ ** ... and send the command
+ */
+ SCR_MOVE_TBL ^ SCR_COMMAND,
+ offsetof (struct dsb, cmd),
+ SCR_JUMP,
+ PADDR (dispatch),
+ /*
+ ** Send the GETCC command
+ */
+/*>>>*/ SCR_MOVE_TBL ^ SCR_COMMAND,
+ offsetof (struct dsb, scmd),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< STATUS >--------------------*/,{
+ /*
+ ** set the timestamp.
+ */
+ SCR_COPY (sizeof (u_long)),
+ KVAR(SCRIPT_KVAR_JIFFIES),
+ NADDR (header.stamp.status),
+ /*
+ ** If this is a GETCC transfer,
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (S_CHECK_COND)),
+ 40,
+ /*
+ ** get the status
+ */
+ SCR_MOVE_ABS (1) ^ SCR_STATUS,
+ NADDR (scratch),
+ /*
+ ** Save status to scsi_status.
+ ** Mark as complete.
+ ** And wait for disconnect.
+ */
+ SCR_TO_REG (SS_REG),
+ 0,
+ SCR_REG_REG (SS_REG, SCR_OR, S_SENSE),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+ 0,
+ SCR_JUMP,
+ PADDR (checkatn),
+ /*
+ ** If it was no GETCC transfer,
+ ** save the status to scsi_status.
+ */
+/*>>>*/ SCR_MOVE_ABS (1) ^ SCR_STATUS,
+ NADDR (scratch),
+ SCR_TO_REG (SS_REG),
+ 0,
+ /*
+ ** if it was no check condition ...
+ */
+ SCR_JUMP ^ IFTRUE (DATA (S_CHECK_COND)),
+ PADDR (checkatn),
+ /*
+ ** ... mark as complete.
+ */
+ SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+ 0,
+ SCR_JUMP,
+ PADDR (checkatn),
+
+}/*-------------------------< MSG_IN >--------------------*/,{
+ /*
+ ** Get the first byte of the message
+ ** and save it to SCRATCHA.
+ **
+ ** The script processor doesn't negate the
+ ** ACK signal after this transfer.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[0]),
+ /*
+ ** Check for message parity error.
+ */
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ SCR_FROM_REG (scratcha),
+ 0,
+ /*
+ ** Parity was ok, handle this message.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
+ PADDR (complete),
+ SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
+ PADDR (save_dp),
+ SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
+ PADDR (restore_dp),
+ SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
+ PADDR (disconnect),
+ SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+ PADDRH (msg_extended),
+ SCR_JUMP ^ IFTRUE (DATA (M_NOOP)),
+ PADDR (clrack),
+ SCR_JUMP ^ IFTRUE (DATA (M_REJECT)),
+ PADDRH (msg_reject),
+ SCR_JUMP ^ IFTRUE (DATA (M_IGN_RESIDUE)),
+ PADDRH (msg_ign_residue),
+ /*
+ ** Rest of the messages left as
+ ** an exercise ...
+ **
+ ** Unimplemented messages:
+ ** fall through to MSG_BAD.
+ */
+}/*-------------------------< MSG_BAD >------------------*/,{
+ /*
+ ** unimplemented message - reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_SENT,
+ SCR_LOAD_REG (scratcha, M_REJECT),
+ 0,
+ SCR_JUMP,
+ PADDR (setmsg),
+
+}/*-------------------------< COMPLETE >-----------------*/,{
+ /*
+ ** Complete message.
+ **
+ ** If it's not the get condition code,
+ ** copy TEMP register to LASTP in header.
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFTRUE (MASK (S_SENSE, S_SENSE)),
+ 12,
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (header.lastp),
+/*>>>*/ /*
+ ** When we terminate the cycle by clearing ACK,
+ ** the target may disconnect immediately.
+ **
+ ** We don't want to be told of an
+ ** "unexpected disconnect",
+ ** so we disable this feature.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ /*
+ ** Terminate cycle ...
+ */
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** ... and wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+}/*-------------------------< CLEANUP >-------------------*/,{
+ /*
+ ** dsa: Pointer to ccb
+ ** or xxxxxxFF (no ccb)
+ **
+ ** HS_REG: Host-Status (<>0!)
+ */
+ SCR_FROM_REG (dsa),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (0xff)),
+ PADDR (signal),
+ /*
+ ** dsa is valid.
+ ** save the status registers
+ */
+ SCR_COPY (4),
+ RADDR (scr0),
+ NADDR (header.status),
+ /*
+ ** and copy back the header to the ccb.
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDR (cleanup0),
+ SCR_COPY (sizeof (struct head)),
+ NADDR (header),
+}/*-------------------------< CLEANUP0 >--------------------*/,{
+ 0,
+
+ /*
+ ** If command resulted in "check condition"
+ ** status and is not yet completed,
+ ** try to get the condition code.
+ */
+ SCR_FROM_REG (HS_REG),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (0, HS_DONEMASK)),
+ 16,
+ SCR_FROM_REG (SS_REG),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (S_CHECK_COND)),
+ PADDRH(getcc2),
+ /*
+ ** And make the DSA register invalid.
+ */
+/*>>>*/ SCR_LOAD_REG (dsa, 0xff), /* invalid */
+ 0,
+}/*-------------------------< SIGNAL >----------------------*/,{
+ /*
+ ** if status = queue full,
+ ** reinsert in startqueue and stall queue.
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (S_QUEUE_FULL)),
+ SIR_STALL_QUEUE,
+ /*
+ ** if job completed ...
+ */
+ SCR_FROM_REG (HS_REG),
+ 0,
+ /*
+ ** ... signal completion to the host
+ */
+ SCR_INT_FLY ^ IFFALSE (MASK (0, HS_DONEMASK)),
+ 0,
+ /*
+ ** Auf zu neuen Schandtaten!
+ */
+ SCR_JUMP,
+ PADDR(start),
+
+}/*-------------------------< SAVE_DP >------------------*/,{
+ /*
+ ** SAVE_DP message:
+ ** Copy TEMP register to SAVEP in header.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (header.savep),
+ SCR_JUMP,
+ PADDR (clrack),
+}/*-------------------------< RESTORE_DP >---------------*/,{
+ /*
+ ** RESTORE_DP message:
+ ** Copy SAVEP in header to TEMP register.
+ */
+ SCR_COPY (4),
+ NADDR (header.savep),
+ RADDR (temp),
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< DISCONNECT >---------------*/,{
+ /*
+ ** If QUIRK_AUTOSAVE is set,
+ ** do an "save pointer" operation.
+ */
+ SCR_FROM_REG (QU_REG),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE)),
+ 12,
+ /*
+ ** like SAVE_DP message:
+ ** Copy TEMP register to SAVEP in header.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (header.savep),
+/*>>>*/ /*
+ ** Check if temp==savep or temp==goalp:
+ ** if not, log a missing save pointer message.
+ ** In fact, it's a comparison mod 256.
+ **
+ ** Hmmm, I hadn't thought that I would be urged to
+ ** write this kind of ugly self modifying code.
+ **
+ ** It's unbelievable, but the ncr53c8xx isn't able
+ ** to subtract one register from another.
+ */
+ SCR_FROM_REG (temp),
+ 0,
+ /*
+ ** You are not expected to understand this ..
+ **
+ ** CAUTION: only little endian architectures supported! XXX
+ */
+ SCR_COPY_F (1),
+ NADDR (header.savep),
+ PADDR (disconnect0),
+}/*-------------------------< DISCONNECT0 >--------------*/,{
+/*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (1)),
+ 20,
+ /*
+ ** neither this
+ */
+ SCR_COPY_F (1),
+ NADDR (header.goalp),
+ PADDR (disconnect1),
+}/*-------------------------< DISCONNECT1 >--------------*/,{
+ SCR_INT ^ IFFALSE (DATA (1)),
+ SIR_MISSING_SAVE,
+/*>>>*/
+
+ /*
+ ** DISCONNECTing ...
+ **
+ ** disable the "unexpected disconnect" feature,
+ ** and remove the ACK signal.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** Wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** Profiling:
+ ** Set a time stamp,
+ ** and count the disconnects.
+ */
+ SCR_COPY (sizeof (u_long)),
+ KVAR(SCRIPT_KVAR_JIFFIES),
+ NADDR (header.stamp.disconnect),
+ SCR_COPY (4),
+ NADDR (disc_phys),
+ RADDR (temp),
+ SCR_REG_REG (temp, SCR_ADD, 0x01),
+ 0,
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (disc_phys),
+ /*
+ ** Status is: DISCONNECTED.
+ */
+ SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
+ 0,
+ SCR_JUMP,
+ PADDR (cleanup),
+
+}/*-------------------------< MSG_OUT >-------------------*/,{
+ /*
+ ** The target requests a message.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ /*
+ ** If it was no ABORT message ...
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_ABORT)),
+ PADDRH (msg_out_abort),
+ /*
+ ** ... wait for the next phase
+ ** if it's a message out, send it again, ...
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDR (msg_out),
+}/*-------------------------< MSG_OUT_DONE >--------------*/,{
+ /*
+ ** ... else clear the message ...
+ */
+ SCR_LOAD_REG (scratcha, M_NOOP),
+ 0,
+ SCR_COPY (4),
+ RADDR (scratcha),
+ NADDR (msgout),
+ /*
+ ** ... and process the next phase
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*------------------------< BADGETCC >---------------------*/,{
+ /*
+ ** If SIGP was set, clear it and try again.
+ */
+ SCR_FROM_REG (ctest2),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)),
+ PADDRH (getcc2),
+ SCR_INT,
+ SIR_SENSE_FAILED,
+}/*-------------------------< RESELECT >--------------------*/,{
+ /*
+ ** This NOP will be patched with LED OFF
+ ** SCR_REG_REG (gpreg, SCR_OR, 0x01)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** make the DSA invalid.
+ */
+ SCR_LOAD_REG (dsa, 0xff),
+ 0,
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** Sleep waiting for a reselection.
+ ** If SIGP is set, special treatment.
+ **
+ ** Zu allem bereit ..
+ */
+ SCR_WAIT_RESEL,
+ PADDR(reselect2),
+}/*-------------------------< RESELECT1 >--------------------*/,{
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** ... zu nichts zu gebrauchen ?
+ **
+ ** load the target id into the SFBR
+ ** and jump to the control block.
+ **
+ ** Look at the declarations of
+ ** - struct ncb
+ ** - struct tcb
+ ** - struct lcb
+ ** - struct ccb
+ ** to understand what's going on.
+ */
+ SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
+ 0,
+ SCR_TO_REG (ctest0),
+ 0,
+ SCR_JUMP,
+ NADDR (jump_tcb),
+}/*-------------------------< RESELECT2 >-------------------*/,{
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** If it's not connected :(
+ ** -> interrupted by SIGP bit.
+ ** Jump to start.
+ */
+ SCR_FROM_REG (ctest2),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)),
+ PADDR (start),
+ SCR_JUMP,
+ PADDR (reselect),
+
+}/*-------------------------< RESEL_TMP >-------------------*/,{
+ /*
+ ** The return address in TEMP
+ ** is in fact the data structure address,
+ ** so copy it to the DSA register.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ RADDR (dsa),
+ SCR_JUMP,
+ PADDR (prepare),
+
+}/*-------------------------< RESEL_LUN >-------------------*/,{
+ /*
+ ** come back to this point
+ ** to get an IDENTIFY message
+ ** Wait for a msg_in phase.
+ */
+/*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ 48,
+ /*
+ ** message phase
+ ** It's not a sony, it's a trick:
+ ** read the data without acknowledging it.
+ */
+ SCR_FROM_REG (sbdl),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (M_IDENTIFY, 0x98)),
+ 32,
+ /*
+ ** It WAS an Identify message.
+ ** get it and ack it!
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** Mask out the lun.
+ */
+ SCR_REG_REG (sfbr, SCR_AND, 0x07),
+ 0,
+ SCR_RETURN,
+ 0,
+ /*
+ ** No message phase or no IDENTIFY message:
+ ** return 0.
+ */
+/*>>>*/ SCR_LOAD_SFBR (0),
+ 0,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< RESEL_TAG >-------------------*/,{
+ /*
+ ** come back to this point
+ ** to get a SIMPLE_TAG message
+ ** Wait for a MSG_IN phase.
+ */
+/*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ 64,
+ /*
+ ** message phase
+ ** It's a trick - read the data
+ ** without acknowledging it.
+ */
+ SCR_FROM_REG (sbdl),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (M_SIMPLE_TAG)),
+ 48,
+ /*
+ ** It WAS a SIMPLE_TAG message.
+ ** get it and ack it!
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** Wait for the second byte (the tag)
+ */
+/*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ 24,
+ /*
+ ** Get it and ack it!
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ SCR_CLR (SCR_ACK|SCR_CARRY),
+ 0,
+ SCR_RETURN,
+ 0,
+ /*
+ ** No message phase or no SIMPLE_TAG message
+ ** or no second byte: return 0.
+ */
+/*>>>*/ SCR_LOAD_SFBR (0),
+ 0,
+ SCR_SET (SCR_CARRY),
+ 0,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< DATA_IO >--------------------*/,{
+/*
+** Because Linux does not provide xfer data direction
+** to low-level scsi drivers, we must trust the target
+** for actual data direction when we cannot guess it.
+** The programmed interrupt patches savep, lastp, goalp,
+** etc.., and restarts the scsi script at data_out/in.
+*/
+ SCR_INT ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ SIR_DATA_IO_IS_OUT,
+ SCR_INT ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ SIR_DATA_IO_IS_IN,
+ SCR_JUMP,
+ PADDR (no_data),
+
+}/*-------------------------< DATA_IN >--------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTER parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTER >=========
+** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)),
+** || PADDR (checkatn),
+** || SCR_MOVE_TBL ^ SCR_DATA_IN,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+** SCR_CALL,
+** PADDR (checkatn),
+** SCR_JUMP,
+** PADDR (no_data),
+*/
+0
+}/*--------------------------------------------------------*/
+};
+
+static struct scripth scripth0 __initdata = {
+/*-------------------------< TRYLOOP >---------------------*/{
+/*
+** Load an entry of the start queue into dsa
+** and try to start it by jumping to TRYSEL.
+**
+** Because the size depends on the
+** #define MAX_START parameter, it is filled
+** in at runtime.
+**
+**-----------------------------------------------------------
+**
+** ##===========< I=0; i<MAX_START >===========
+** || SCR_COPY (4),
+** || NADDR (squeue[i]),
+** || RADDR (dsa),
+** || SCR_CALL,
+** || PADDR (trysel),
+** ##==========================================
+**
+** SCR_JUMP,
+** PADDRH(tryloop),
+**
+**-----------------------------------------------------------
+*/
+0
+},/*-------------------------< MSG_PARITY >---------------*/{
+ /*
+ ** count it
+ */
+ SCR_REG_REG (PS_REG, SCR_ADD, 0x01),
+ 0,
+ /*
+ ** send a "message parity error" message.
+ */
+ SCR_LOAD_REG (scratcha, M_PARITY),
+ 0,
+ SCR_JUMP,
+ PADDR (setmsg),
+}/*-------------------------< MSG_REJECT >---------------*/,{
+ /*
+ ** If a negotiation was in progress,
+ ** negotiation failed.
+ */
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+ /*
+ ** else make host log this message
+ */
+ SCR_INT ^ IFFALSE (DATA (HS_NEGOTIATE)),
+ SIR_REJECT_RECEIVED,
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< MSG_IGN_RESIDUE >----------*/,{
+ /*
+ ** Terminate cycle
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get residue size.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ /*
+ ** Check for message parity error.
+ */
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ SCR_FROM_REG (scratcha),
+ 0,
+ /*
+ ** Size is 0 .. ignore message.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (0)),
+ PADDR (clrack),
+ /*
+ ** Size is not 1 .. have to interrupt.
+ */
+/*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (1)),
+ 40,
+ /*
+ ** Check for residue byte in swide register
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
+ 16,
+ /*
+ ** There IS data in the swide register.
+ ** Discard it.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ SCR_JUMP,
+ PADDR (clrack),
+ /*
+ ** Load again the size to the sfbr register.
+ */
+/*>>>*/ SCR_FROM_REG (scratcha),
+ 0,
+/*>>>*/ SCR_INT,
+ SIR_IGN_RESIDUE,
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< MSG_EXTENDED >-------------*/,{
+ /*
+ ** Terminate cycle
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get length.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ /*
+ ** Check for message parity error.
+ */
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ SCR_FROM_REG (scratcha),
+ 0,
+ /*
+ */
+ SCR_JUMP ^ IFTRUE (DATA (3)),
+ PADDRH (msg_ext_3),
+ SCR_JUMP ^ IFFALSE (DATA (2)),
+ PADDR (msg_bad),
+}/*-------------------------< MSG_EXT_2 >----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get extended message code.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[2]),
+ /*
+ ** Check for message parity error.
+ */
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ SCR_FROM_REG (scratcha),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (M_X_WIDE_REQ)),
+ PADDRH (msg_wdtr),
+ /*
+ ** unknown extended message
+ */
+ SCR_JUMP,
+ PADDR (msg_bad)
+}/*-------------------------< MSG_WDTR >-----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get data bus width
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[3]),
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ /*
+ ** let the host do the real work.
+ */
+ SCR_INT,
+ SIR_NEGO_WIDE,
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ SIR_NEGO_PROTO,
+ /*
+ ** Send the M_X_WIDE_REQ
+ */
+ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ SCR_JUMP,
+ PADDR (msg_out_done),
+
+}/*-------------------------< MSG_EXT_3 >----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get extended message code.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[2]),
+ /*
+ ** Check for message parity error.
+ */
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ SCR_FROM_REG (scratcha),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (M_X_SYNC_REQ)),
+ PADDRH (msg_sdtr),
+ /*
+ ** unknown extended message
+ */
+ SCR_JUMP,
+ PADDR (msg_bad)
+
+}/*-------------------------< MSG_SDTR >-----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get period and offset
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ NADDR (msgin[3]),
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ /*
+ ** let the host do the real work.
+ */
+ SCR_INT,
+ SIR_NEGO_SYNC,
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ SIR_NEGO_PROTO,
+ /*
+ ** Send the M_X_SYNC_REQ
+ */
+ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ SCR_JUMP,
+ PADDR (msg_out_done),
+
+}/*-------------------------< MSG_OUT_ABORT >-------------*/,{
+ /*
+ ** After ABORT message,
+ **
+ ** expect an immediate disconnect, ...
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** ... and set the status to "ABORTED"
+ */
+ SCR_LOAD_REG (HS_REG, HS_ABORTED),
+ 0,
+ SCR_JUMP,
+ PADDR (cleanup),
+
+}/*-------------------------< GETCC >-----------------------*/,{
+ /*
+ ** The ncr doesn't have an indirect load
+ ** or store command. So we have to
+ ** copy part of the control block to a
+ ** fixed place, where we can modify it.
+ **
+ ** We patch the address part of a COPY command
+ ** with the address of the dsa register ...
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDRH (getcc1),
+ /*
+ ** ... then we do the actual copy.
+ */
+ SCR_COPY (sizeof (struct head)),
+}/*-------------------------< GETCC1 >----------------------*/,{
+ 0,
+ NADDR (header),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_COPY (4),
+ NADDR (header.status),
+ RADDR (scr0),
+}/*-------------------------< GETCC2 >----------------------*/,{
+ /*
+ ** Get the condition code from a target.
+ **
+ ** DSA points to a data structure.
+ ** Set TEMP to the script location
+ ** that receives the condition code.
+ **
+ ** Because there is no script command
+ ** to load a longword into a register,
+ ** we use a CALL command.
+ */
+/*<<<*/ SCR_CALLR,
+ 24,
+ /*
+ ** Get the condition code.
+ */
+ SCR_MOVE_TBL ^ SCR_DATA_IN,
+ offsetof (struct dsb, sense),
+ /*
+ ** No data phase may follow!
+ */
+ SCR_CALL,
+ PADDR (checkatn),
+ SCR_JUMP,
+ PADDR (no_data),
+/*>>>*/
+
+ /*
+ ** The CALL jumps to this point.
+ ** Prepare for a RESTORE_POINTER message.
+ ** Save the TEMP register into the saved pointer.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (header.savep),
+ /*
+ ** Load scratcha, because in case of a selection timeout,
+ ** the host will expect a new value for startpos in
+ ** the scratcha register.
+ */
+ SCR_COPY (4),
+ PADDR (startpos),
+ RADDR (scratcha),
+#ifdef NCR_GETCC_WITHMSG
+ /*
+ ** If QUIRK_NOMSG is set, select without ATN.
+ ** and don't send a message.
+ */
+ SCR_FROM_REG (QU_REG),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (QUIRK_NOMSG, QUIRK_NOMSG)),
+ PADDRH(getcc3),
+ /*
+ ** Then try to connect to the target.
+ ** If we are reselected, special treatment
+ ** of the current job is required before
+ ** accepting the reselection.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select),
+ PADDR(badgetcc),
+ /*
+ ** save target id.
+ */
+ SCR_FROM_REG (sdid),
+ 0,
+ SCR_TO_REG (ctest0),
+ 0,
+ /*
+ ** Send the IDENTIFY message.
+ ** In case of short transfer, remove ATN.
+ */
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct dsb, smsg2),
+ SCR_CLR (SCR_ATN),
+ 0,
+ /*
+ ** save the first byte of the message.
+ */
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ SCR_JUMP,
+ PADDR (prepare2),
+
+#endif
+}/*-------------------------< GETCC3 >----------------------*/,{
+ /*
+ ** Try to connect to the target.
+ ** If we are reselected, special treatment
+ ** of the current job is required before
+ ** accepting the reselection.
+ **
+ ** Silly target won't accept a message.
+ ** Select without ATN.
+ */
+ SCR_SEL_TBL ^ offsetof (struct dsb, select),
+ PADDR(badgetcc),
+ /*
+ ** save target id.
+ */
+ SCR_FROM_REG (sdid),
+ 0,
+ SCR_TO_REG (ctest0),
+ 0,
+ /*
+ ** Force error if selection timeout
+ */
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ 0,
+ /*
+ ** don't negotiate.
+ */
+ SCR_JUMP,
+ PADDR (prepare2),
+
+}/*-------------------------< DATA_OUT >-------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTER parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTER >=========
+** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+** || PADDR (dispatch),
+** || SCR_MOVE_TBL ^ SCR_DATA_OUT,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+** SCR_CALL,
+** PADDR (dispatch),
+** SCR_JUMP,
+** PADDR (no_data),
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< ABORTTAG >-------------------*/,{
+ /*
+ ** Abort a bad reselection.
+ ** Set the message to ABORT vs. ABORT_TAG
+ */
+ SCR_LOAD_REG (scratcha, M_ABORT_TAG),
+ 0,
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+}/*-------------------------< ABORT >----------------------*/,{
+ SCR_LOAD_REG (scratcha, M_ABORT),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgout),
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** and send it.
+ ** we expect an immediate disconnect
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< SNOOPTEST >-------------------*/,{
+ /*
+ ** Read the variable.
+ */
+ SCR_COPY (4),
+ NADDR(ncr_cache),
+ RADDR (scratcha),
+ /*
+ ** Write the variable.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR(ncr_cache),
+ /*
+ ** Read back the variable.
+ */
+ SCR_COPY (4),
+ NADDR(ncr_cache),
+ RADDR (temp),
+}/*-------------------------< SNOOPEND >-------------------*/,{
+ /*
+ ** And stop.
+ */
+ SCR_INT,
+ 99,
+}/*--------------------------------------------------------*/
+};
+
+/*==========================================================
+**
+**
+** Fill in #define dependent parts of the script
+**
+**
+**==========================================================
+*/
+
+__initfunc(
+void ncr_script_fill (struct script * scr, struct scripth * scrh)
+)
+{
+ int i;
+ ncrcmd *p;
+
+ p = scrh->tryloop;
+ for (i=0; i<MAX_START; i++) {
+ *p++ =SCR_COPY (4);
+ *p++ =NADDR (squeue[i]);
+ *p++ =RADDR (dsa);
+ *p++ =SCR_CALL;
+ *p++ =PADDR (trysel);
+ };
+ *p++ =SCR_JUMP;
+ *p++ =PADDRH(tryloop);
+
+ assert ((u_long)p == (u_long)&scrh->tryloop + sizeof (scrh->tryloop));
+
+ p = scr->data_in;
+
+ for (i=0; i<MAX_SCATTER; i++) {
+ *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN));
+ *p++ =PADDR (checkatn);
+ *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN;
+ *p++ =offsetof (struct dsb, data[i]);
+ };
+
+ *p++ =SCR_CALL;
+ *p++ =PADDR (checkatn);
+ *p++ =SCR_JUMP;
+ *p++ =PADDR (no_data);
+
+ assert ((u_long)p == (u_long)&scr->data_in + sizeof (scr->data_in));
+
+ p = scrh->data_out;
+
+ for (i=0; i<MAX_SCATTER; i++) {
+ *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT));
+ *p++ =PADDR (dispatch);
+ *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT;
+ *p++ =offsetof (struct dsb, data[i]);
+ };
+
+ *p++ =SCR_CALL;
+ *p++ =PADDR (dispatch);
+ *p++ =SCR_JUMP;
+ *p++ =PADDR (no_data);
+
+ assert ((u_long)p == (u_long)&scrh->data_out + sizeof (scrh->data_out));
+}
+
+/*==========================================================
+**
+**
+** Copy and rebind a script.
+**
+**
+**==========================================================
+*/
+
+__initfunc(
+static void ncr_script_copy_and_bind (ncb_p np, ncrcmd *src, ncrcmd *dst, int len)
+)
+{
+ ncrcmd opcode, new, old, tmp1, tmp2;
+ ncrcmd *start, *end;
+ int relocs;
+ int opchanged = 0;
+
+ start = src;
+ end = src + len/4;
+
+ while (src < end) {
+
+ opcode = *src++;
+ *dst++ = cpu_to_scr(opcode);
+
+ /*
+ ** If we forget to change the length
+ ** in struct script, a field will be
+ ** padded with 0. This is an illegal
+ ** command.
+ */
+
+ if (opcode == 0) {
+ printf ("%s: ERROR0 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ DELAY (1000000);
+ };
+
+ if (DEBUG_FLAGS & DEBUG_SCRIPT)
+ printf ("%p: <%x>\n",
+ (src-1), (unsigned)opcode);
+
+ /*
+ ** We don't have to decode ALL commands
+ */
+ switch (opcode >> 28) {
+
+ case 0xc:
+ /*
+ ** COPY has TWO arguments.
+ */
+ relocs = 2;
+ tmp1 = src[0];
+ if ((tmp1 & RELOC_MASK) == RELOC_KVAR)
+ tmp1 = 0;
+ tmp2 = src[1];
+ if ((tmp2 & RELOC_MASK) == RELOC_KVAR)
+ tmp2 = 0;
+ if ((tmp1 ^ tmp2) & 3) {
+ printf ("%s: ERROR1 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ DELAY (1000000);
+ }
+ /*
+ ** If PREFETCH feature not enabled, remove
+ ** the NO FLUSH bit if present.
+ */
+ if ((opcode & SCR_NO_FLUSH) && !(np->features & FE_PFEN)) {
+ dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH);
+ ++opchanged;
+ }
+ break;
+
+ case 0x0:
+ /*
+ ** MOVE (absolute address)
+ */
+ relocs = 1;
+ break;
+
+ case 0x8:
+ /*
+ ** JUMP / CALL
+ ** don't relocate if relative :-)
+ */
+ if (opcode & 0x00800000)
+ relocs = 0;
+ else
+ relocs = 1;
+ break;
+
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ relocs = 1;
+ break;
+
+ default:
+ relocs = 0;
+ break;
+ };
+
+ if (relocs) {
+ while (relocs--) {
+ old = *src++;
+
+ switch (old & RELOC_MASK) {
+ case RELOC_REGISTER:
+ new = (old & ~RELOC_MASK) + np->paddr;
+ break;
+ case RELOC_LABEL:
+ new = (old & ~RELOC_MASK) + np->p_script;
+ break;
+ case RELOC_LABELH:
+ new = (old & ~RELOC_MASK) + np->p_scripth;
+ break;
+ case RELOC_SOFTC:
+ new = (old & ~RELOC_MASK) + vtophys(np);
+ break;
+ case RELOC_KVAR:
+ if (((old & ~RELOC_MASK) <
+ SCRIPT_KVAR_FIRST) ||
+ ((old & ~RELOC_MASK) >
+ SCRIPT_KVAR_LAST))
+ panic("ncr KVAR out of range");
+ new = vtophys(script_kvars[old &
+ ~RELOC_MASK]);
+ break;
+ case 0:
+ /* Don't relocate a 0 address. */
+ if (old == 0) {
+ new = old;
+ break;
+ }
+ /* fall through */
+ default:
+ panic("ncr_script_copy_and_bind: weird relocation %x\n", old);
+ break;
+ }
+
+ *dst++ = cpu_to_scr(new);
+ }
+ } else
+ *dst++ = cpu_to_scr(*src++);
+
+ };
+ if (bootverbose > 1 && opchanged)
+ printf("%s: NO FLUSH bit removed from %d script instructions\n",
+ ncr_name(np), opchanged);
+}
+
+/*==========================================================
+**
+**
+** Auto configuration: attach and init a host adapter.
+**
+**
+**==========================================================
+*/
+
+/*
+** Linux host data structure
+**
+** The script area is allocated in the host data structure
+** because kmalloc() returns NULL during scsi initialisations
+** with Linux 1.2.X
+*/
+
+struct host_data {
+ struct ncb *ncb;
+
+ char ncb_align[NCB_ALIGN_SIZE-1]; /* Filler for alignment */
+ struct ncb _ncb_data;
+
+ char ccb_align[CCB_ALIGN_SIZE-1]; /* Filler for alignment */
+ struct ccb _ccb_data;
+
+ char scr_align[SCR_ALIGN_SIZE-1]; /* Filler for alignment */
+ struct script script_data;
+
+ struct scripth scripth_data;
+};
+
+/*
+** Print something which allow to retrieve the controler type, unit,
+** target, lun concerned by a kernel message.
+*/
+
+#define PRINT_LUN(np, target, lun) \
+printf(KERN_INFO "%s-<%d,%d>: ", ncr_name(np), (int) (target), (int) (lun))
+
+static void PRINT_ADDR(Scsi_Cmnd *cmd)
+{
+ struct host_data *host_data = (struct host_data *) cmd->host->hostdata;
+ ncb_p np = host_data->ncb;
+ if (np) PRINT_LUN(np, cmd->target, cmd->lun);
+}
+
+/*==========================================================
+**
+** NCR chip clock divisor table.
+** Divisors are multiplied by 10,000,000 in order to make
+** calculations more simple.
+**
+**==========================================================
+*/
+
+#define _5M 5000000
+static u_long div_10M[] =
+ {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
+
+
+/*===============================================================
+**
+** Prepare io register values used by ncr_init() according
+** to selected and supported features.
+**
+** NCR chips allow burst lengths of 2, 4, 8, 16, 32, 64, 128
+** transfers. 32,64,128 are only supported by 875 and 895 chips.
+** We use log base 2 (burst length) as internal code, with
+** value 0 meaning "burst disabled".
+**
+**===============================================================
+*/
+
+/*
+ * Burst length from burst code.
+ */
+#define burst_length(bc) (!(bc))? 0 : 1 << (bc)
+
+/*
+ * Burst code from io register bits.
+ */
+#define burst_code(dmode, ctest4, ctest5) \
+ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
+
+/*
+ * Set initial io register bits from burst code.
+ */
+static inline void ncr_init_burst(ncb_p np, u_char bc)
+{
+ np->rv_ctest4 &= ~0x80;
+ np->rv_dmode &= ~(0x3 << 6);
+ np->rv_ctest5 &= ~0x4;
+
+ if (!bc) {
+ np->rv_ctest4 |= 0x80;
+ }
+ else {
+ --bc;
+ np->rv_dmode |= ((bc & 0x3) << 6);
+ np->rv_ctest5 |= (bc & 0x4);
+ }
+}
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+
+/*
+** Get target set-up from Symbios format NVRAM.
+*/
+
+__initfunc(
+static void
+ ncr_Symbios_setup_target(ncb_p np, int target, Symbios_nvram *nvram)
+)
+{
+ tcb_p tp = &np->target[target];
+ Symbios_target *tn = &nvram->target[target];
+
+ tp->usrsync = tn->sync_period ? (tn->sync_period + 3) / 4 : 255;
+ tp->usrwide = tn->bus_width == 0x10 ? 1 : 0;
+ tp->usrtags =
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SCSI_NCR_MAX_TAGS : 0;
+
+ if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
+ tp->usrflag |= UF_NODISC;
+ if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
+ tp->usrflag |= UF_NOSCAN;
+}
+
+/*
+** Get target set-up from Tekram format NVRAM.
+*/
+
+__initfunc(
+static void
+ ncr_Tekram_setup_target(ncb_p np, int target, Tekram_nvram *nvram)
+)
+{
+ tcb_p tp = &np->target[target];
+ struct Tekram_target *tn = &nvram->target[target];
+ int i;
+
+ if (tn->flags & TEKRAM_SYNC_NEGO) {
+ i = tn->sync_index & 0xf;
+ tp->usrsync = i < 12 ? Tekram_sync[i] : 255;
+ }
+
+ tp->usrwide = (tn->flags & TEKRAM_WIDE_NEGO) ? 1 : 0;
+
+ if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
+ tp->usrtags = 2 << nvram->max_tags_index;
+ if (tp->usrtags > SCSI_NCR_MAX_TAGS)
+ tp->usrtags = SCSI_NCR_MAX_TAGS;
+ }
+
+ if (!(tn->flags & TEKRAM_DISCONNECT_ENABLE))
+ tp->usrflag = UF_NODISC;
+
+ /* If any device does not support parity, we will not use this option */
+ if (!(tn->flags & TEKRAM_PARITY_CHECK))
+ np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */
+}
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+__initfunc(
+static int ncr_prepare_setting(ncb_p np, ncr_nvram *nvram)
+)
+{
+ u_char burst_max;
+ u_long period;
+ int i;
+
+ /*
+ ** Save assumed BIOS setting
+ */
+
+ np->sv_scntl0 = INB(nc_scntl0) & 0x0a;
+ np->sv_scntl3 = INB(nc_scntl3) & 0x07;
+ np->sv_dmode = INB(nc_dmode) & 0xce;
+ np->sv_dcntl = INB(nc_dcntl) & 0xa8;
+ np->sv_ctest3 = INB(nc_ctest3) & 0x01;
+ np->sv_ctest4 = INB(nc_ctest4) & 0x80;
+ np->sv_ctest5 = INB(nc_ctest5) & 0x24;
+ np->sv_gpcntl = INB(nc_gpcntl);
+ np->sv_stest2 = INB(nc_stest2) & 0x20;
+ np->sv_stest4 = INB(nc_stest4);
+
+ /*
+ ** Wide ?
+ */
+
+ np->maxwide = (np->features & FE_WIDE)? 1 : 0;
+
+ /*
+ ** Get the frequency of the chip's clock.
+ ** Find the right value for scntl3.
+ */
+
+ if (np->features & FE_QUAD)
+ np->multiplier = 4;
+ else if (np->features & FE_DBLR)
+ np->multiplier = 2;
+ else
+ np->multiplier = 1;
+
+ np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000;
+ np->clock_khz *= np->multiplier;
+
+ if (np->clock_khz != 40000)
+ ncr_getclock(np, np->multiplier);
+
+ /*
+ * Divisor to be used for async (timer pre-scaler).
+ */
+ i = np->clock_divn - 1;
+ while (i >= 0) {
+ --i;
+ if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz > div_10M[i]) {
+ ++i;
+ break;
+ }
+ }
+ np->rv_scntl3 = i+1;
+
+ /*
+ * Minimum synchronous period factor supported by the chip.
+ * Btw, 'period' is in tenths of nanoseconds.
+ */
+
+ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
+ if (period <= 250) np->minsync = 10;
+ else if (period <= 303) np->minsync = 11;
+ else if (period <= 500) np->minsync = 12;
+ else np->minsync = (period + 40 - 1) / 40;
+
+ /*
+ * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
+ */
+
+ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2)))
+ np->minsync = 25;
+ else if (np->minsync < 12 && !(np->features & FE_ULTRA2))
+ np->minsync = 12;
+
+ /*
+ * Maximum synchronous period factor supported by the chip.
+ */
+
+ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
+ np->maxsync = period > 2540 ? 254 : period / 10;
+
+ /*
+ ** Prepare initial value of other IO registers
+ */
+#if defined SCSI_NCR_TRUST_BIOS_SETTING
+ np->rv_scntl0 = np->sv_scntl0;
+ np->rv_dmode = np->sv_dmode;
+ np->rv_dcntl = np->sv_dcntl;
+ np->rv_ctest3 = np->sv_ctest3;
+ np->rv_ctest4 = np->sv_ctest4;
+ np->rv_ctest5 = np->sv_ctest5;
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5);
+#else
+
+ /*
+ ** Select burst length (dwords)
+ */
+ burst_max = driver_setup.burst_max;
+ if (burst_max == 255)
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5);
+ if (burst_max > 7)
+ burst_max = 7;
+ if (burst_max > np->maxburst)
+ burst_max = np->maxburst;
+
+ /*
+ ** Select all supported special features
+ */
+ if (np->features & FE_ERL)
+ np->rv_dmode |= ERL; /* Enable Read Line */
+ if (np->features & FE_BOF)
+ np->rv_dmode |= BOF; /* Burst Opcode Fetch */
+ if (np->features & FE_ERMP)
+ np->rv_dmode |= ERMP; /* Enable Read Multiple */
+ if (np->features & FE_PFEN)
+ np->rv_dcntl |= PFEN; /* Prefetch Enable */
+ if (np->features & FE_CLSE)
+ np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
+ if (np->features & FE_WRIE)
+ np->rv_ctest3 |= WRIE; /* Write and Invalidate */
+ if (np->features & FE_DFS)
+ np->rv_ctest5 |= DFS; /* Dma Fifo Size */
+
+ /*
+ ** Select some other
+ */
+ if (driver_setup.master_parity)
+ np->rv_ctest4 |= MPEE; /* Master parity checking */
+ if (driver_setup.scsi_parity)
+ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ /*
+ ** Get parity checking, host ID and verbose mode from NVRAM
+ **/
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ np->myaddr = nvram->data.Tekram.host_id & 0x0f;
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
+ np->rv_scntl0 &= ~0x0a;
+ np->myaddr = nvram->data.Symbios.host_id & 0x0f;
+ if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
+ np->verbose += 1;
+ break;
+ }
+ }
+#endif
+ /*
+ ** Get SCSI addr of host adapter (set by bios?).
+ */
+ if (!np->myaddr) np->myaddr = INB(nc_scid) & 0x07;
+ if (!np->myaddr) np->myaddr = SCSI_NCR_MYADDR;
+
+
+#endif /* SCSI_NCR_TRUST_BIOS_SETTING */
+
+ /*
+ * Prepare initial io register bits for burst length
+ */
+ ncr_init_burst(np, burst_max);
+
+ /*
+ ** Set differential mode and LED support.
+ ** Ignore these features for boards known to use a
+ ** specific GPIO wiring (Tekram only for now).
+ ** Probe initial setting of GPREG and GPCNTL for
+ ** other ones.
+ */
+ if (!nvram || nvram->type != SCSI_NCR_TEKRAM_NVRAM) {
+ switch(driver_setup.diff_support) {
+ case 3:
+ if (INB(nc_gpreg) & 0x08)
+ break;
+ case 2:
+ np->rv_stest2 |= 0x20;
+ break;
+ case 1:
+ np->rv_stest2 |= (np->sv_stest2 & 0x20);
+ break;
+ default:
+ break;
+ }
+ }
+ if ((driver_setup.led_pin ||
+ (nvram && nvram->type == SCSI_NCR_SYMBIOS_NVRAM)) &&
+ !(np->sv_gpcntl & 0x01))
+ np->features |= FE_LED0;
+
+ /*
+ ** Set irq mode.
+ */
+ switch(driver_setup.irqm) {
+ case 2:
+ np->rv_dcntl |= IRQM;
+ break;
+ case 1:
+ np->rv_dcntl |= (np->sv_dcntl & IRQM);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ ** Configure targets according to driver setup.
+ ** If NVRAM present get targets setup from NVRAM.
+ ** Allow to override sync, wide and NOSCAN from
+ ** boot command line.
+ */
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ tcb_p tp = &np->target[i];
+
+ tp->usrsync = 255;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ ncr_Tekram_setup_target(np, i, &nvram->data.Tekram);
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ ncr_Symbios_setup_target(np, i, &nvram->data.Symbios);
+ break;
+ }
+ if (driver_setup.use_nvram & 0x2)
+ tp->usrsync = driver_setup.default_sync;
+ if (driver_setup.use_nvram & 0x4)
+ tp->usrwide = driver_setup.max_wide;
+ if (driver_setup.use_nvram & 0x8)
+ tp->usrflag &= ~UF_NOSCAN;
+ }
+ else {
+#else
+ if (1) {
+#endif
+ tp->usrsync = driver_setup.default_sync;
+ tp->usrwide = driver_setup.max_wide;
+ tp->usrtags = driver_setup.default_tags;
+ if (!driver_setup.disconnection)
+ np->target[i].usrflag = UF_NODISC;
+ }
+ }
+
+ /*
+ ** Announce all that stuff to user.
+ */
+
+ i = nvram ? nvram->type : 0;
+ printf(KERN_INFO "%s: %sID %d, Fast-%d%s%s\n", ncr_name(np),
+ i == SCSI_NCR_SYMBIOS_NVRAM ? "Symbios format NVRAM, " :
+ (i == SCSI_NCR_TEKRAM_NVRAM ? "Tekram format NVRAM, " : ""),
+ np->myaddr,
+ np->minsync < 12 ? 40 : (np->minsync < 25 ? 20 : 10),
+ (np->rv_scntl0 & 0xa) ? ", Parity Checking" : ", NO Parity",
+ (np->rv_stest2 & 0x20) ? ", Differential" : "");
+
+ if (bootverbose > 1) {
+ printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
+ np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
+
+ printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
+ np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
+ }
+
+ if (bootverbose && np->paddr2)
+ printf (KERN_INFO "%s: on-board RAM at 0x%lx\n",
+ ncr_name(np), np->paddr2);
+
+ return 0;
+}
+
+
+#ifdef SCSI_NCR_DEBUG_NVRAM
+
+__initfunc(
+void ncr_display_Symbios_nvram(ncb_p np, Symbios_nvram *nvram)
+)
+{
+ int i;
+
+ /* display Symbios nvram host data */
+ printf("%s: HOST ID=%d%s%s%s%s\n",
+ ncr_name(np), nvram->host_id & 0x0f,
+ (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
+ (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERSBOSE" :"",
+ (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
+
+ /* display Symbios nvram drive data */
+ for (i = 0 ; i < 15 ; i++) {
+ struct Symbios_target *tn = &nvram->target[i];
+ printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
+ ncr_name(np), i,
+ (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
+ (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
+ tn->bus_width,
+ tn->sync_period / 4,
+ tn->timeout);
+ }
+}
+
+static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120};
+
+__initfunc(
+void ncr_display_Tekram_nvram(ncb_p np, Tekram_nvram *nvram)
+)
+{
+ int i, tags, boot_delay;
+ char *rem;
+
+ /* display Tekram nvram host data */
+ tags = 2 << nvram->max_tags_index;
+ boot_delay = 0;
+ if (nvram->boot_delay_index < 6)
+ boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
+ switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
+ default:
+ case 0: rem = ""; break;
+ case 1: rem = " REMOVABLE=boot device"; break;
+ case 2: rem = " REMOVABLE=all"; break;
+ }
+
+ printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+ ncr_name(np), nvram->host_id & 0x0f,
+ (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"",
+ (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
+ (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
+ (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
+ (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
+ (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
+ (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
+ rem, boot_delay, tags);
+
+ /* display Tekram nvram drive data */
+ for (i = 0; i <= 15; i++) {
+ int sync, j;
+ struct Tekram_target *tn = &nvram->target[i];
+ j = tn->sync_index & 0xf;
+ sync = j < 12 ? Tekram_sync[j] : 255;
+ printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
+ ncr_name(np), i,
+ (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
+ (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
+ (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & TEKRAM_START_CMD) ? " START" : "",
+ (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
+ (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
+ sync);
+ }
+}
+#endif /* SCSI_NCR_DEBUG_NVRAM */
+
+/*
+** Host attach and initialisations.
+**
+** Allocate host data and ncb structure.
+** Request IO region and remap MMIO region.
+** Do chip initialization.
+** If all is OK, install interrupt handling and
+** start the timer daemon.
+*/
+
+__initfunc(
+static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device)
+)
+{
+ struct host_data *host_data;
+ ncb_p np;
+ struct Scsi_Host *instance = 0;
+ u_long flags = 0;
+ ncr_nvram *nvram = device->nvram;
+
+printf(KERN_INFO "ncr53c%s-%d: rev=0x%02x, base=0x%x, io_port=0x%x, irq=%d\n",
+ device->chip.name, unit, device->chip.revision_id, device->slot.base,
+ device->slot.io_port, device->slot.irq);
+
+ /*
+ ** Allocate host_data structure
+ */
+ if (!(instance = scsi_register(tpnt, sizeof(*host_data))))
+ goto attach_error;
+
+ /*
+ ** Initialize structure.
+ */
+ host_data = (struct host_data *) instance->hostdata;
+
+ /*
+ ** Align np and first ccb to 32 boundary for cache line
+ ** bursting when copying the global header.
+ */
+ np = (ncb_p) (((u_long) &host_data->_ncb_data) & NCB_ALIGN_MASK);
+ host_data->ncb = np;
+ bzero (np, sizeof (*np));
+
+ np->ccb = (ccb_p) (((u_long) &host_data->_ccb_data) & CCB_ALIGN_MASK);
+ bzero (np->ccb, sizeof (*np->ccb));
+
+ /*
+ ** Store input informations in the host data structure.
+ */
+ strncpy(np->chip_name, device->chip.name, sizeof(np->chip_name) - 1);
+ np->unit = unit;
+ np->verbose = driver_setup.verbose;
+ sprintf(np->inst_name, "ncr53c%s-%d", np->chip_name, np->unit);
+ np->device_id = device->chip.device_id;
+ np->revision_id = device->chip.revision_id;
+ np->features = device->chip.features;
+ np->clock_divn = device->chip.nr_divisor;
+ np->maxoffs = device->chip.offset_max;
+ np->maxburst = device->chip.burst_max;
+
+ np->script0 =
+ (struct script *) (((u_long) &host_data->script_data) & SCR_ALIGN_MASK);
+ np->scripth0 = &host_data->scripth_data;
+
+ /*
+ ** Initialize timer structure
+ **
+ */
+ init_timer(&np->timer);
+ np->timer.data = (unsigned long) np;
+ np->timer.function = ncr53c8xx_timeout;
+
+ /*
+ ** Try to map the controller chip to
+ ** virtual and physical memory.
+ */
+
+ np->paddr = device->slot.base;
+ np->paddr2 = (np->features & FE_RAM)? device->slot.base_2 : 0;
+
+#ifndef NCR_IOMAPPED
+ np->vaddr = remap_pci_mem((u_long) np->paddr, (u_long) 128);
+ if (!np->vaddr) {
+ printf("%s: can't map memory mapped IO region\n", ncr_name(np));
+ goto attach_error;
+ }
+ else
+ if (bootverbose > 1)
+ printf("%s: using memory mapped IO at virtual address 0x%lx\n", ncr_name(np), (u_long) np->vaddr);
+
+ /*
+ ** Make the controller's registers available.
+ ** Now the INB INW INL OUTB OUTW OUTL macros
+ ** can be used safely.
+ */
+
+ np->reg = (struct ncr_reg*) np->vaddr;
+
+#endif /* !defined NCR_IOMAPPED */
+
+ /*
+ ** Try to map the controller chip into iospace.
+ */
+
+ request_region(device->slot.io_port, 128, "ncr53c8xx");
+ np->port = device->slot.io_port;
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ ncr_display_Symbios_nvram(np, &nvram->data.Symbios);
+#endif
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ ncr_display_Tekram_nvram(np, &nvram->data.Tekram);
+#endif
+ break;
+ default:
+ nvram = 0;
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ printf("%s: NVRAM: None or invalid data.\n", ncr_name(np));
+#endif
+ }
+ }
+#endif
+
+ /*
+ ** Do chip dependent initialization.
+ */
+ (void)ncr_prepare_setting(np, nvram);
+
+#ifndef NCR_IOMAPPED
+ if (np->paddr2 && sizeof(struct script) <= 4096) {
+ np->vaddr2 = remap_pci_mem((u_long) np->paddr2, (u_long) 4096);
+ if (!np->vaddr2) {
+ printf("%s: can't map memory mapped IO region\n", ncr_name(np));
+ goto attach_error;
+ }
+ else
+ if (bootverbose > 1)
+ printf("%s: on-board ram mapped at virtual address 0x%lx\n", ncr_name(np), (u_long) np->vaddr2);
+ }
+#endif /* !defined NCR_IOMAPPED */
+
+ /*
+ ** Fill Linux host instance structure
+ */
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+ instance->max_channel = 0;
+ instance->max_id = np->maxwide ? 16 : 8;
+ instance->max_lun = SCSI_NCR_MAX_LUN;
+#endif
+#ifndef NCR_IOMAPPED
+ instance->base = (char *) np->reg;
+#endif
+ instance->irq = device->slot.irq;
+ instance->io_port = device->slot.io_port;
+ instance->n_io_port = 128;
+ instance->dma_channel = 0;
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,0,0)
+ instance->select_queue_depths = ncr53c8xx_select_queue_depths;
+#endif
+
+ /*
+ ** Patch script to physical addresses
+ */
+ ncr_script_fill (&script0, &scripth0);
+
+ np->scripth = np->scripth0;
+ np->p_scripth = vtophys(np->scripth);
+
+ np->script = (np->vaddr2) ? (struct script *) np->vaddr2 : np->script0;
+ np->p_script = (np->vaddr2) ? np->paddr2 : vtophys(np->script0);
+
+ ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script));
+ ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth));
+ np->ccb->p_ccb = vtophys (np->ccb);
+
+ /*
+ ** Patch the script for LED support.
+ */
+
+ if (np->features & FE_LED0) {
+ np->script0->reselect[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR, 0x01));
+ np->script0->reselect1[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ np->script0->reselect2[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ }
+
+ /*
+ ** init data structure
+ */
+
+ np->jump_tcb.l_cmd = cpu_to_scr(SCR_JUMP);
+ np->jump_tcb.l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort));
+
+ /*
+ ** Reset chip.
+ */
+
+ OUTB (nc_istat, SRST);
+ DELAY (1000);
+ OUTB (nc_istat, 0 );
+
+ /*
+ ** Now check the cache handling of the pci chipset.
+ */
+
+ if (ncr_snooptest (np)) {
+ printf ("CACHE INCORRECTLY CONFIGURED.\n");
+ goto attach_error;
+ };
+
+ /*
+ ** Install the interrupt handler.
+ */
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,70)
+#ifdef SCSI_NCR_SHARE_IRQ
+ if (bootverbose > 1)
+ printf("%s: requesting shared irq %d (dev_id=0x%lx)\n",
+ ncr_name(np), device->slot.irq, (u_long) np);
+ if (request_irq(device->slot.irq, ncr53c8xx_intr,
+ SA_INTERRUPT|SA_SHIRQ, "ncr53c8xx", np)) {
+#else
+ if (request_irq(device->slot.irq, ncr53c8xx_intr,
+ SA_INTERRUPT, "ncr53c8xx", np)) {
+#endif
+#else
+ if (request_irq(device->slot.irq, ncr53c8xx_intr,
+ SA_INTERRUPT, "ncr53c8xx")) {
+#endif
+ printf("%s: request irq %d failure\n", ncr_name(np), device->slot.irq);
+ goto attach_error;
+ }
+ np->irq = device->slot.irq;
+
+ /*
+ ** After SCSI devices have been opened, we cannot
+ ** reset the bus safely, so we do it here.
+ ** Interrupt handler does the real work.
+ ** Process the reset exception,
+ ** if interrupts are not enabled yet.
+ ** Then enable disconnects.
+ */
+ save_flags(flags); cli();
+ if (ncr_reset_scsi_bus(np, 0, driver_setup.settle_delay) != 0) {
+ printf("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, TERMINATION, DEVICE POWER etc.!\n", ncr_name(np));
+ restore_flags(flags);
+ goto attach_error;
+ }
+ ncr_exception (np);
+ restore_flags(flags);
+
+ np->disc = 1;
+
+ /*
+ ** The middle-level SCSI driver does not
+ ** wait devices to settle.
+ ** Wait synchronously if more than 2 seconds.
+ */
+ if (driver_setup.settle_delay > 2) {
+ printf("%s: waiting %d seconds for scsi devices to settle...\n",
+ ncr_name(np), driver_setup.settle_delay);
+ DELAY(1000000UL * driver_setup.settle_delay);
+ }
+
+ /*
+ ** Now let the generic SCSI driver
+ ** look for the SCSI devices on the bus ..
+ */
+
+ /*
+ ** start the timeout daemon
+ */
+ np->lasttime=0;
+ ncr_timeout (np);
+
+ /*
+ ** use SIMPLE TAG messages by default
+ */
+#ifdef SCSI_NCR_ALWAYS_SIMPLE_TAG
+ np->order = M_SIMPLE_TAG;
+#endif
+
+ /*
+ ** Done.
+ */
+ if (!the_template) {
+ the_template = instance->hostt;
+ first_host = instance;
+ }
+
+ return 0;
+
+attach_error:
+ if (!instance) return -1;
+ printf("%s: detaching...\n", ncr_name(np));
+#ifndef NCR_IOMAPPED
+ if (np->vaddr) {
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing memory mapped IO region %lx[%d]\n", ncr_name(np), (u_long) np->vaddr, 128);
+#endif
+ unmap_pci_mem((vm_offset_t) np->vaddr, (u_long) 128);
+ }
+ if (np->vaddr2) {
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing memory mapped IO region %lx[%d]\n", ncr_name(np), (u_long) np->vaddr2, 4096);
+#endif
+ unmap_pci_mem((vm_offset_t) np->vaddr2, (u_long) 4096);
+ }
+#endif
+ if (np->port) {
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing IO region %x[%d]\n", ncr_name(np), np->port, 128);
+#endif
+ release_region(np->port, 128);
+ }
+ if (np->irq) {
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: freeing irq %d\n", ncr_name(np), np->irq);
+#endif
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,70)
+ free_irq(np->irq, np);
+#else
+ free_irq(np->irq);
+#endif
+ }
+ scsi_unregister(instance);
+
+ return -1;
+ }
+
+/*==========================================================
+**
+**
+** Start execution of a SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+int ncr_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
+{
+ struct Scsi_Host *host = cmd->host;
+/* Scsi_Device *device = cmd->device; */
+ struct host_data *host_data = (struct host_data *) host->hostdata;
+ ncb_p np = host_data->ncb;
+ tcb_p tp = &np->target[cmd->target];
+
+ ccb_p cp;
+ lcb_p lp;
+
+ int segments;
+ u_char qidx, nego, idmsg, *msgptr;
+ u_int msglen, msglen2;
+ u_long flags;
+ int xfer_direction;
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.buffer = NULL;
+
+ /*---------------------------------------------
+ **
+ ** Some shortcuts ...
+ **
+ **---------------------------------------------
+ */
+ if ((cmd->target == np->myaddr ) ||
+ (cmd->target >= MAX_TARGET) ||
+ (cmd->lun >= MAX_LUN )) {
+ return(DID_BAD_TARGET);
+ }
+
+ /*---------------------------------------------
+ **
+ ** Complete the 1st TEST UNIT READY command
+ ** with error condition if the device is
+ ** flagged NOSCAN, in order to speed up
+ ** the boot.
+ **
+ **---------------------------------------------
+ */
+ if (cmd->cmnd[0] == 0 && (tp->usrflag & UF_NOSCAN)) {
+ tp->usrflag &= ~UF_NOSCAN;
+ return DID_BAD_TARGET;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_TINY) {
+ PRINT_ADDR(cmd);
+ printf ("CMD=%x ", cmd->cmnd[0]);
+ }
+
+ /*---------------------------------------------------
+ **
+ ** Assign a ccb / bind cmd.
+ ** If resetting, shorten settle_time if necessary
+ ** in order to avoid spurious timeouts.
+ ** If resetting or no free ccb,
+ ** insert cmd into the waiting list.
+ **
+ **----------------------------------------------------
+ */
+ save_flags(flags); cli();
+
+ if (np->settle_time && cmd->timeout_per_command >= HZ &&
+ np->settle_time > jiffies + cmd->timeout_per_command - HZ) {
+ np->settle_time = jiffies + cmd->timeout_per_command - HZ;
+ }
+
+ if (np->settle_time || !(cp=ncr_get_ccb (np, cmd->target, cmd->lun))) {
+ insert_into_waiting_list(np, cmd);
+ restore_flags(flags);
+ return(DID_OK);
+ }
+ cp->cmd = cmd;
+
+ /*---------------------------------------------------
+ **
+ ** Enable tagged queue if asked by scsi ioctl
+ **
+ **----------------------------------------------------
+ */
+ if (!tp->usrtags && cmd->device && cmd->device->tagged_queue) {
+ tp->usrtags = SCSI_NCR_MAX_TAGS;
+ ncr_setmaxtags (np, tp, SCSI_NCR_MAX_TAGS);
+ }
+
+ /*---------------------------------------------------
+ **
+ ** timestamp
+ **
+ **----------------------------------------------------
+ */
+#ifdef SCSI_NCR_PROFILE_SUPPORT
+ bzero (&cp->phys.header.stamp, sizeof (struct tstamp));
+ cp->phys.header.stamp.start = jiffies;
+#endif
+
+ /*----------------------------------------------------
+ **
+ ** Get device quirks from a speciality table.
+ **
+ ** @GENSCSI@
+ ** This should be a part of the device table
+ ** in "scsi_conf.c".
+ **
+ **----------------------------------------------------
+ */
+ if (tp->quirks & QUIRK_UPDATE) {
+ tp->quirks = ncr_lookup ((char*) &tp->inqdata[0]);
+#ifndef NCR_GETCC_WITHMSG
+ if (tp->quirks) {
+ PRINT_ADDR(cmd);
+ printf ("quirks=%x.\n", tp->quirks);
+ }
+#endif
+ }
+
+ /*---------------------------------------------------
+ **
+ ** negotiation required?
+ **
+ ** Only SCSI-II devices.
+ ** To negotiate with SCSI-I devices is dangerous, since
+ ** Synchronous Negotiation protocol is optional, and
+ ** INQUIRY data do not contains capabilities in byte 7.
+ **----------------------------------------------------
+ */
+
+ nego = 0;
+
+ if (cmd->lun == 0 && !tp->nego_cp &&
+ (tp->inqdata[2] & 0x7) >= 2 && tp->inqdata[7]) {
+ /*
+ ** negotiate wide transfers ?
+ */
+
+ if (!tp->widedone) {
+ if (tp->inqdata[7] & INQ7_WIDE16) {
+ nego = NS_WIDE;
+ } else
+ tp->widedone=1;
+ };
+
+ /*
+ ** negotiate synchronous transfers?
+ */
+
+ if (!nego && !tp->period) {
+ if ( 1
+#if defined (CDROM_ASYNC)
+ && ((tp->inqdata[0] & 0x1f) != 5)
+#endif
+ && (tp->inqdata[7] & INQ7_SYNC)) {
+ nego = NS_SYNC;
+ } else {
+ tp->period =0xffff;
+ tp->sval = 0xe0;
+ PRINT_ADDR(cmd);
+ printf ("asynchronous.\n");
+ };
+ };
+
+ /*
+ ** remember nego is pending for the target.
+ ** Avoid to start a nego for all queued commands
+ ** when tagged command queuing is enabled.
+ */
+
+ if (nego)
+ tp->nego_cp = cp;
+ };
+
+ /*---------------------------------------------------
+ **
+ ** choose a new tag ...
+ **
+ **----------------------------------------------------
+ */
+
+ if ((lp = tp->lp[cmd->lun]) && (lp->usetags)) {
+ /*
+ ** assign a tag to this ccb!
+ */
+ while (!cp->tag) {
+ ccb_p cp2 = lp->next_ccb;
+ lp->lasttag = lp->lasttag % 255 + 1;
+ while (cp2 && cp2->tag != lp->lasttag)
+ cp2 = cp2->next_ccb;
+ if (cp2) continue;
+ cp->tag=lp->lasttag;
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_ADDR(cmd);
+ printf ("using tag #%d.\n", cp->tag);
+ }
+ }
+ } else {
+ cp->tag=0;
+ }
+
+ /*----------------------------------------------------
+ **
+ ** Build the identify / tag / sdtr message
+ **
+ **----------------------------------------------------
+ */
+
+ idmsg = M_IDENTIFY | cmd->lun;
+
+ if (cp != np->ccb && ((np->disc && !(tp->usrflag & UF_NODISC)) || cp->tag))
+ idmsg |= 0x40;
+
+ msgptr = cp->scsi_smsg;
+ msglen = 0;
+ msgptr[msglen++] = idmsg;
+
+ if (cp->tag) {
+ char tag;
+
+ tag = np->order;
+ if (tag == 0) {
+ /*
+ ** Ordered write ops, unordered read ops.
+ */
+ switch (cmd->cmnd[0]) {
+ case 0x08: /* READ_SMALL (6) */
+ case 0x28: /* READ_BIG (10) */
+ case 0xa8: /* READ_HUGE (12) */
+ tag = M_SIMPLE_TAG;
+ break;
+ default:
+ tag = M_ORDERED_TAG;
+ }
+ }
+ /*
+ ** Have to force ordered tag to avoid timeouts
+ */
+ if ((lp = tp->lp[cmd->lun]) && (lp->force_ordered_tag)) {
+ tag = M_ORDERED_TAG;
+ lp->force_ordered_tag = 0;
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_ADDR(cmd);
+ printf ("Ordered Queue Tag forced\n");
+ }
+ }
+ msgptr[msglen++] = tag;
+ msgptr[msglen++] = cp -> tag;
+ }
+
+ switch (nego) {
+ case NS_SYNC:
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 3;
+ msgptr[msglen++] = M_X_SYNC_REQ;
+ msgptr[msglen++] = tp->maxoffs ? tp->minsync : 0;
+ msgptr[msglen++] = tp->maxoffs;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("sync msgout: ");
+ ncr_show_msg (&cp->scsi_smsg [msglen-5]);
+ printf (".\n");
+ };
+ break;
+ case NS_WIDE:
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 2;
+ msgptr[msglen++] = M_X_WIDE_REQ;
+ msgptr[msglen++] = tp->usrwide;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("wide msgout: ");
+ ncr_show_msg (&cp->scsi_smsg [msglen-4]);
+ printf (".\n");
+ };
+ break;
+ };
+
+ /*----------------------------------------------------
+ **
+ ** Build the identify message for getcc.
+ **
+ **----------------------------------------------------
+ */
+
+ cp -> scsi_smsg2 [0] = idmsg;
+ msglen2 = 1;
+
+ /*----------------------------------------------------
+ **
+ ** Build the data descriptors
+ **
+ **----------------------------------------------------
+ */
+
+ segments = ncr_scatter (cp, cp->cmd);
+
+ if (segments < 0) {
+ ncr_free_ccb(np, cp, cmd->target, cmd->lun);
+ restore_flags(flags);
+ return(DID_ERROR);
+ }
+
+ /*----------------------------------------------------
+ **
+ ** Guess xfer direction.
+ ** Spare some CPU by testing here frequently opcode.
+ **
+ **----------------------------------------------------
+ */
+ switch((int) cmd->cmnd[0]) {
+ case 0x08: /* READ(6) 08 */
+ case 0x28: /* READ(10) 28 */
+ case 0xA8: /* READ(12) A8 */
+ xfer_direction = XferIn;
+ break;
+ case 0x0A: /* WRITE(6) 0A */
+ case 0x2A: /* WRITE(10) 2A */
+ case 0xAA: /* WRITE(12) AA */
+ xfer_direction = XferOut;
+ break;
+ default:
+ xfer_direction = guess_xfer_direction((int) cmd->cmnd[0]);
+ break;
+ }
+
+ /*----------------------------------------------------
+ **
+ ** Set the SAVED_POINTER.
+ **
+ **----------------------------------------------------
+ */
+
+ cp->segments = segments;
+ if (!cp->data_len)
+ xfer_direction = XferNone;
+
+ switch (xfer_direction) {
+ u_long endp;
+ default:
+ case XferBoth:
+ cp->phys.header.savep =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, data_io));
+ cp->phys.header.goalp = cp->phys.header.savep;
+ break;
+ case XferIn:
+ endp = NCB_SCRIPT_PHYS (np, data_in) + MAX_SCATTER*16;
+ cp->phys.header.goalp = cpu_to_scr(endp + 8);
+ cp->phys.header.savep = cpu_to_scr(endp - segments*16);
+ break;
+ case XferOut:
+ endp = NCB_SCRIPTH_PHYS (np, data_out) + MAX_SCATTER*16;
+ cp->phys.header.goalp = cpu_to_scr(endp + 8);
+ cp->phys.header.savep = cpu_to_scr(endp - segments*16);
+ break;
+ case XferNone:
+ cp->phys.header.savep =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, no_data));
+ cp->phys.header.goalp = cp->phys.header.savep;
+ break;
+ }
+
+ cp->phys.header.lastp = cp->phys.header.savep;
+
+ /*----------------------------------------------------
+ **
+ ** fill in ccb
+ **
+ **----------------------------------------------------
+ **
+ **
+ ** physical -> virtual backlink
+ ** Generic SCSI command
+ */
+ cp->phys.header.cp = cp;
+ /*
+ ** Startqueue
+ */
+ cp->phys.header.launch.l_paddr =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
+ cp->phys.header.launch.l_cmd = cpu_to_scr(SCR_JUMP);
+ /*
+ ** select
+ */
+ cp->phys.select.sel_id = cmd->target;
+ cp->phys.select.sel_scntl3 = tp->wval;
+ cp->phys.select.sel_sxfer = tp->sval;
+ /*
+ ** message
+ */
+ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg));
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ cp->phys.smsg2.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2));
+ cp->phys.smsg2.size = cpu_to_scr(msglen2);
+ /*
+ ** command
+ */
+ cp->phys.cmd.addr = cpu_to_scr(vtophys (&cmd->cmnd[0]));
+ cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len);
+ /*
+ ** sense command
+ */
+ cp->phys.scmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd));
+ cp->phys.scmd.size = cpu_to_scr(6);
+ /*
+ ** patch requested size into sense command
+ */
+ cp->sensecmd[0] = 0x03;
+ cp->sensecmd[1] = cmd->lun << 5;
+ cp->sensecmd[4] = sizeof(cmd->sense_buffer);
+ /*
+ ** sense data
+ */
+ cp->phys.sense.addr =
+ cpu_to_scr(vtophys (&cmd->sense_buffer[0]));
+ cp->phys.sense.size = cpu_to_scr(sizeof(cmd->sense_buffer));
+ /*
+ ** status
+ */
+ cp->actualquirks = tp->quirks;
+ cp->host_status = nego ? HS_NEGOTIATE : HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->parity_status = 0;
+
+ cp->xerr_status = XE_OK;
+ cp->sync_status = tp->sval;
+ cp->nego_status = nego;
+ cp->wide_status = tp->wval;
+
+ /*----------------------------------------------------
+ **
+ ** Critical region: start this job.
+ **
+ **----------------------------------------------------
+ */
+
+ /*
+ ** reselect pattern and activate this job.
+ */
+
+ cp->jump_ccb.l_cmd =
+ cpu_to_scr((SCR_JUMP ^ IFFALSE (DATA (cp->tag))));
+
+ /* Compute a time limit greater than the middle-level driver one */
+ if (cmd->timeout_per_command > 0)
+ cp->tlimit = jiffies + cmd->timeout_per_command + NCR_TIMEOUT_INCREASE;
+ else
+ cp->tlimit = jiffies + 3600 * HZ; /* No timeout=one hour */
+ cp->magic = CCB_MAGIC;
+
+ /*
+ ** insert into start queue.
+ */
+
+ qidx = np->squeueput + 1;
+ if (qidx >= MAX_START) qidx=0;
+ np->squeue [qidx ] = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->squeue [np->squeueput] = cpu_to_scr(CCB_PHYS (cp, phys));
+ np->squeueput = qidx;
+
+ if(DEBUG_FLAGS & DEBUG_QUEUE)
+ printf ("%s: queuepos=%d tryoffset=%d.\n", ncr_name (np),
+ np->squeueput,
+ (unsigned)(scr_to_cpu(np->script->startpos[0]) -
+ (NCB_SCRIPTH_PHYS (np, tryloop))));
+
+ /*
+ ** Script processor may be waiting for reselect.
+ ** Wake it up.
+ */
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if (!np->stalling)
+#endif
+ OUTB (nc_istat, SIGP);
+
+ /*
+ ** and reenable interrupts
+ */
+ restore_flags(flags);
+
+ /*
+ ** Command is successfully queued.
+ */
+
+ return(DID_OK);
+}
+
+/*==========================================================
+**
+**
+** Start reset process.
+** If reset in progress do nothing.
+** The interrupt handler will reinitialize the chip.
+** The timeout handler will wait for settle_time before
+** clearing it and so resuming command processing.
+**
+**
+**==========================================================
+*/
+static void ncr_start_reset(ncb_p np, int settle_delay)
+{
+ u_long flags;
+
+ save_flags(flags); cli();
+
+ if (!np->settle_time) {
+ (void) ncr_reset_scsi_bus(np, 1, settle_delay);
+ }
+ restore_flags(flags);
+}
+
+static int ncr_reset_scsi_bus(ncb_p np, int enab_int, int settle_delay)
+{
+ u_int32 term;
+ int retv = 0;
+
+ np->settle_time = jiffies + settle_delay * HZ;
+
+ if (bootverbose > 1)
+ printf("%s: resetting, "
+ "command processing suspended for %d seconds\n",
+ ncr_name(np), settle_delay);
+
+ OUTB (nc_istat, SRST);
+ DELAY (1000);
+ OUTB (nc_istat, 0);
+ if (enab_int)
+ OUTW (nc_sien, RST);
+ /*
+ ** Enable Tolerant, reset IRQD if present and
+ ** properly set IRQ mode, prior to resetting the bus.
+ */
+ OUTB (nc_stest3, TE);
+ OUTB (nc_dcntl, (np->rv_dcntl & IRQM));
+ OUTB (nc_scntl1, CRST);
+ DELAY (100);
+
+ if (!driver_setup.bus_check)
+ goto out;
+ /*
+ ** Check for no terminators or SCSI bus shorts to ground.
+ ** Read SCSI data bus, data parity bits and control signals.
+ ** We are expecting RESET to be TRUE and other signals to be
+ ** FALSE.
+ */
+ term = INB(nc_sstat0); /* rst, sdp0 */
+ term = ((term & 2) << 7) + ((term & 1) << 16);
+ term |= ((INB(nc_sstat2) & 0x01) << 25) | /* sdp1 */
+ (INW(nc_sbdl) << 9) | /* d15-0 */
+ INB(nc_sbcl); /* req, ack, bsy, sel, atn, msg, cd, io */
+
+ if (!(np->features & FE_WIDE))
+ term &= 0x3ffff;
+
+ if (term != (2<<7)) {
+ printf("%s: suspicious SCSI data while resetting the BUS.\n",
+ ncr_name(np));
+ printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
+ "0x%lx, expecting 0x%lx\n",
+ ncr_name(np),
+ (np->features & FE_WIDE) ? "dp1,d15-8," : "",
+ (u_long)term, (u_long)(2<<7));
+ if (driver_setup.bus_check == 1)
+ retv = 1;
+ }
+out:
+ OUTB (nc_scntl1, 0);
+ return retv;
+}
+
+/*==========================================================
+**
+**
+** Reset the SCSI BUS.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+int ncr_reset_bus (Scsi_Cmnd *cmd, int sync_reset)
+{
+ struct Scsi_Host *host = cmd->host;
+/* Scsi_Device *device = cmd->device; */
+ struct host_data *host_data = (struct host_data *) host->hostdata;
+ ncb_p np = host_data->ncb;
+ ccb_p cp;
+ u_long flags;
+ int found;
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if (np->stalling)
+ np->stalling = 0;
+#endif
+
+ save_flags(flags); cli();
+/*
+ * Return immediately if reset is in progress.
+ */
+ if (np->settle_time) {
+ restore_flags(flags);
+ return SCSI_RESET_PUNT;
+ }
+/*
+ * Start the reset process.
+ * The script processor is then assumed to be stopped.
+ * Commands will now be queued in the waiting list until a settle
+ * delay of 2 seconds will be completed.
+ */
+ ncr_start_reset(np, driver_setup.settle_delay);
+/*
+ * First, look in the wakeup list
+ */
+ for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd) {
+ found = 1;
+ break;
+ }
+ }
+/*
+ * Then, look in the waiting list
+ */
+ if (!found && retrieve_from_waiting_list(0, np, cmd))
+ found = 1;
+/*
+ * Wake-up all awaiting commands with DID_RESET.
+ */
+ reset_waiting_list(np);
+/*
+ * Wake-up all pending commands with HS_RESET -> DID_RESET.
+ */
+ ncr_wakeup(np, HS_RESET);
+/*
+ * If the involved command was not in a driver queue, and the
+ * scsi driver told us reset is synchronous, and the command is not
+ * currently in the waiting list, complete it with DID_RESET status,
+ * in order to keep it alive.
+ */
+ if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
+ cmd->result = ScsiResult(DID_RESET, 0);
+ cmd->scsi_done(cmd);
+ }
+
+ restore_flags(flags);
+
+ return SCSI_RESET_SUCCESS;
+}
+
+/*==========================================================
+**
+**
+** Abort an SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_abort_command (Scsi_Cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->host;
+/* Scsi_Device *device = cmd->device; */
+ struct host_data *host_data = (struct host_data *) host->hostdata;
+ ncb_p np = host_data->ncb;
+ ccb_p cp;
+ u_long flags;
+ int found;
+ int retv;
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if (np->stalling == 2)
+ np->stalling = 0;
+#endif
+
+ save_flags(flags); cli();
+/*
+ * First, look for the scsi command in the waiting list
+ */
+ if (remove_from_waiting_list(np, cmd)) {
+ cmd->result = ScsiResult(DID_ABORT, 0);
+ cmd->scsi_done(cmd);
+ restore_flags(flags);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * Then, look in the wakeup list
+ */
+ for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (np->settle_time) {
+ restore_flags(flags);
+ return SCSI_ABORT_SNOOZE;
+ }
+
+ /*
+ ** Disable reselect.
+ ** Remove it from startqueue.
+ ** Set cp->tlimit to 0. The ncr_timeout() handler will use
+ ** this condition in order to complete the canceled command
+ ** after the script skipped the ccb, if necessary.
+ */
+ cp->jump_ccb.l_cmd = cpu_to_scr(SCR_JUMP);
+ if (cp->phys.header.launch.l_paddr ==
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, select))) {
+ printf ("%s: abort ccb=%p (skip)\n", ncr_name (np), cp);
+ cp->phys.header.launch.l_paddr =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, skip));
+ }
+
+ cp->tlimit = 0;
+ retv = SCSI_ABORT_PENDING;
+
+ /*
+ ** If there are no requests, the script
+ ** processor will sleep on SEL_WAIT_RESEL.
+ ** Let's wake it up, since it may have to work.
+ */
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if (!np->stalling)
+#endif
+ OUTB (nc_istat, SIGP);
+
+ restore_flags(flags);
+
+ return retv;
+}
+
+/*==========================================================
+**
+** Linux release module stuff.
+**
+** Called before unloading the module
+** Detach the host.
+** We have to free resources and halt the NCR chip
+**
+**==========================================================
+*/
+
+#ifdef MODULE
+static int ncr_detach(ncb_p np)
+{
+ ccb_p cp;
+ tcb_p tp;
+ lcb_p lp;
+ int target, lun;
+ int i;
+
+ printf("%s: releasing host resources\n", ncr_name(np));
+
+/*
+** Stop the ncr_timeout process
+** Set release_stage to 1 and wait that ncr_timeout() set it to 2.
+*/
+
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: stopping the timer\n", ncr_name(np));
+#endif
+ np->release_stage = 1;
+ for (i = 50 ; i && np->release_stage != 2 ; i--) DELAY(100000);
+ if (np->release_stage != 2)
+ printf("%s: the timer seems to be already stopped\n", ncr_name(np));
+ else np->release_stage = 2;
+
+/*
+** Disable chip interrupts
+*/
+
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: disabling chip interrupts\n", ncr_name(np));
+#endif
+ OUTW (nc_sien , 0);
+ OUTB (nc_dien , 0);
+
+/*
+** Free irq
+*/
+
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: freeing irq %d\n", ncr_name(np), np->irq);
+#endif
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,70)
+ free_irq(np->irq, np);
+#else
+ free_irq(np->irq);
+#endif
+
+ /*
+ ** Reset NCR chip
+ ** Restore bios setting for automatic clock detection.
+ */
+
+ printf("%s: resetting chip\n", ncr_name(np));
+ OUTB (nc_istat, SRST);
+ DELAY (1000);
+ OUTB (nc_istat, 0 );
+
+ OUTB(nc_dmode, np->sv_dmode);
+ OUTB(nc_dcntl, np->sv_dcntl);
+ OUTB(nc_ctest3, np->sv_ctest3);
+ OUTB(nc_ctest4, np->sv_ctest4);
+ OUTB(nc_ctest5, np->sv_ctest5);
+ OUTB(nc_gpcntl, np->sv_gpcntl);
+ OUTB(nc_stest2, np->sv_stest2);
+
+ ncr_selectclock(np, np->sv_scntl3);
+
+ /*
+ ** Release Memory mapped IO region and IO mapped region
+ */
+
+#ifndef NCR_IOMAPPED
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing memory mapped IO region %lx[%d]\n", ncr_name(np), (u_long) np->vaddr, 128);
+#endif
+ unmap_pci_mem((vm_offset_t) np->vaddr, (u_long) 128);
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing memory mapped IO region %lx[%d]\n", ncr_name(np), (u_long) np->vaddr2, 4096);
+#endif
+ unmap_pci_mem((vm_offset_t) np->vaddr2, (u_long) 4096);
+#endif
+
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing IO region %x[%d]\n", ncr_name(np), np->port, 128);
+#endif
+ release_region(np->port, 128);
+
+ /*
+ ** Free allocated ccb(s)
+ */
+
+ while ((cp=np->ccb->link_ccb) != NULL) {
+ np->ccb->link_ccb = cp->link_ccb;
+ if (cp->host_status) {
+ printf("%s: shall free an active ccb (host_status=%d)\n",
+ ncr_name(np), cp->host_status);
+ }
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: freeing ccb (%lx)\n", ncr_name(np), (u_long) cp);
+#endif
+ m_free(cp, sizeof(*cp));
+ }
+
+ /*
+ ** Free allocated tp(s)
+ */
+
+ for (target = 0; target < MAX_TARGET ; target++) {
+ tp=&np->target[target];
+ for (lun = 0 ; lun < MAX_LUN ; lun++) {
+ lp = tp->lp[lun];
+ if (lp) {
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: freeing lp (%lx)\n", ncr_name(np), (u_long) lp);
+#endif
+ m_free(lp, sizeof(*lp));
+ }
+ }
+ }
+
+ printf("%s: host resources successfully released\n", ncr_name(np));
+
+ return 1;
+}
+#endif
+
+/*==========================================================
+**
+**
+** Complete execution of a SCSI command.
+** Signal completion to the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+
+void ncr_complete (ncb_p np, ccb_p cp)
+{
+ Scsi_Cmnd *cmd;
+ tcb_p tp;
+ lcb_p lp;
+
+ /*
+ ** Sanity check
+ */
+
+ if (!cp || (cp->magic!=CCB_MAGIC) || !cp->cmd) return;
+ cp->magic = 1;
+ cp->tlimit= 0;
+ cmd = cp->cmd;
+
+ /*
+ ** No Reselect anymore.
+ */
+ cp->jump_ccb.l_cmd = cpu_to_scr(SCR_JUMP);
+
+ /*
+ ** No starting.
+ */
+ cp->phys.header.launch.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+
+ /*
+ ** timestamp
+ ** Optional, spare some CPU time
+ */
+#ifdef SCSI_NCR_PROFILE_SUPPORT
+ ncb_profile (np, cp);
+#endif
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printf ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp & 0xfff,
+ cp->host_status,cp->scsi_status);
+
+ cmd = cp->cmd;
+ cp->cmd = NULL;
+ tp = &np->target[cmd->target];
+ lp = tp->lp[cmd->lun];
+
+ /*
+ ** We donnot queue more than 1 ccb per target
+ ** with negotiation at any time. If this ccb was
+ ** used for negotiation, clear this info in the tcb.
+ */
+
+ if (cp == tp->nego_cp)
+ tp->nego_cp = 0;
+
+ /*
+ ** Check for parity errors.
+ */
+
+ if (cp->parity_status) {
+ PRINT_ADDR(cmd);
+ printf ("%d parity error(s), fallback.\n", cp->parity_status);
+ /*
+ ** fallback to asynch transfer.
+ */
+ tp->usrsync=255;
+ tp->period = 0;
+ }
+
+ /*
+ ** Check for extended errors.
+ */
+
+ if (cp->xerr_status != XE_OK) {
+ PRINT_ADDR(cmd);
+ switch (cp->xerr_status) {
+ case XE_EXTRA_DATA:
+ printf ("extraneous data discarded.\n");
+ break;
+ case XE_BAD_PHASE:
+ printf ("illegal scsi phase (4/5).\n");
+ break;
+ default:
+ printf ("extended error %d.\n", cp->xerr_status);
+ break;
+ }
+ if (cp->host_status==HS_COMPLETE)
+ cp->host_status = HS_FAIL;
+ }
+
+ /*
+ ** Check the status.
+ */
+ if ( (cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_GOOD ||
+ cp->scsi_status == S_COND_MET)) {
+ /*
+ ** All went well (GOOD status).
+ ** CONDITION MET status is returned on
+ ** `Pre-Fetch' or `Search data' success.
+ */
+ cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+
+ /*
+ ** if (cp->phys.header.lastp != cp->phys.header.goalp)...
+ **
+ ** @RESID@
+ ** Could dig out the correct value for resid,
+ ** but it would be quite complicated.
+ **
+ ** The ah1542.c driver sets it to 0 too ...
+ */
+
+ /*
+ ** Try to assign a ccb to this nexus
+ */
+ ncr_alloc_ccb (np, cmd->target, cmd->lun);
+
+ /*
+ ** On inquire cmd (0x12) save some data.
+ ** Clear questionnable capacities.
+ */
+ if (cmd->lun == 0 && cmd->cmnd[0] == 0x12) {
+ if (np->unit < SCSI_NCR_MAX_HOST) {
+ if (driver_setup.force_sync_nego)
+ ((char *) cmd->request_buffer)[7] |= INQ7_SYNC;
+ else
+ ((char *) cmd->request_buffer)[7] &=
+ (target_capabilities[np->unit].and_map[cmd->target]);
+ }
+ bcopy ( cmd->request_buffer,
+ &tp->inqdata,
+ sizeof (tp->inqdata));
+
+ /*
+ ** set number of tags
+ */
+ ncr_setmaxtags (np, tp, driver_setup.default_tags);
+ /*
+ ** prepare negotiation of synch and wide.
+ */
+ ncr_negotiate (np, tp);
+
+ /*
+ ** force quirks update before next command start
+ */
+ tp->quirks |= QUIRK_UPDATE;
+ }
+
+ /*
+ ** Announce changes to the generic driver.
+ */
+ if (lp) {
+ ncr_settags (tp, lp);
+ if (lp->reqlink != lp->actlink)
+ ncr_opennings (np, lp, cmd);
+ };
+
+ tp->bytes += cp->data_len;
+ tp->transfers ++;
+
+ /*
+ ** If tags was reduced due to queue full,
+ ** increase tags if 100 good status received.
+ */
+ if (tp->numtags < tp->maxtags) {
+ ++tp->num_good;
+ if (tp->num_good >= 100) {
+ tp->num_good = 0;
+ ++tp->numtags;
+ if (tp->numtags == 1) {
+ PRINT_ADDR(cmd);
+ printf("tagged command queueing resumed\n");
+ }
+ }
+ }
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == (S_SENSE|S_GOOD) ||
+ cp->scsi_status == (S_SENSE|S_CHECK_COND))) {
+
+ /*
+ ** Check condition code
+ */
+ cmd->result = ScsiResult(DID_OK, S_CHECK_COND);
+
+ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
+ u_char * p = (u_char*) & cmd->sense_buffer;
+ int i;
+ printf ("\n%s: sense data:", ncr_name (np));
+ for (i=0; i<14; i++) printf (" %x", *p++);
+ printf (".\n");
+ }
+
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_BUSY ||
+ cp->scsi_status == S_CONFLICT)) {
+
+ /*
+ ** Target is busy.
+ */
+ cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_QUEUE_FULL)) {
+
+ /*
+ ** Target is stuffed.
+ */
+ cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+
+ /*
+ ** Suspend tagged queuing and start good status counter.
+ ** Announce changes to the generic driver.
+ */
+ if (tp->numtags) {
+ PRINT_ADDR(cmd);
+ printf("QUEUE FULL! suspending tagged command queueing\n");
+ tp->numtags = 0;
+ tp->num_good = 0;
+ if (lp) {
+ ncr_settags (tp, lp);
+ if (lp->reqlink != lp->actlink)
+ ncr_opennings (np, lp, cmd);
+ };
+ }
+ } else if ((cp->host_status == HS_SEL_TIMEOUT)
+ || (cp->host_status == HS_TIMEOUT)) {
+
+ /*
+ ** No response
+ */
+ cmd->result = ScsiResult(DID_TIME_OUT, cp->scsi_status);
+
+ } else if (cp->host_status == HS_RESET) {
+
+ /*
+ ** SCSI bus reset
+ */
+ cmd->result = ScsiResult(DID_RESET, cp->scsi_status);
+
+ } else if (cp->host_status == HS_ABORTED) {
+
+ /*
+ ** Transfer aborted
+ */
+ cmd->result = ScsiResult(DID_ABORT, cp->scsi_status);
+
+ } else {
+
+ /*
+ ** Other protocol messes
+ */
+ PRINT_ADDR(cmd);
+ printf ("COMMAND FAILED (%x %x) @%p.\n",
+ cp->host_status, cp->scsi_status, cp);
+
+ cmd->result = ScsiResult(DID_ERROR, cp->scsi_status);
+ }
+
+ /*
+ ** trace output
+ */
+
+ if (tp->usrflag & UF_TRACE) {
+ u_char * p;
+ int i;
+ PRINT_ADDR(cmd);
+ printf (" CMD:");
+ p = (u_char*) &cmd->cmnd[0];
+ for (i=0; i<cmd->cmd_len; i++) printf (" %x", *p++);
+
+ if (cp->host_status==HS_COMPLETE) {
+ switch (cp->scsi_status) {
+ case S_GOOD:
+ printf (" GOOD");
+ break;
+ case S_CHECK_COND:
+ printf (" SENSE:");
+ p = (u_char*) &cmd->sense_buffer;
+ for (i=0; i<14; i++)
+ printf (" %x", *p++);
+ break;
+ default:
+ printf (" STAT: %x\n", cp->scsi_status);
+ break;
+ }
+ } else printf (" HOSTERROR: %x", cp->host_status);
+ printf ("\n");
+ }
+
+ /*
+ ** Free this ccb
+ */
+ ncr_free_ccb (np, cp, cmd->target, cmd->lun);
+
+ /*
+ ** requeue awaiting scsi commands
+ */
+ if (np->waiting_list) requeue_waiting_list(np);
+
+ /*
+ ** signal completion to generic driver.
+ */
+ cmd->scsi_done (cmd);
+}
+
+/*==========================================================
+**
+**
+** Signal all (or one) control block done.
+**
+**
+**==========================================================
+*/
+
+void ncr_wakeup (ncb_p np, u_long code)
+{
+ /*
+ ** Starting at the default ccb and following
+ ** the links, complete all jobs with a
+ ** host_status greater than "disconnect".
+ **
+ ** If the "code" parameter is not zero,
+ ** complete all jobs that are not IDLE.
+ */
+
+ ccb_p cp = np->ccb;
+ while (cp) {
+ switch (cp->host_status) {
+
+ case HS_IDLE:
+ break;
+
+ case HS_DISCONNECT:
+ if(DEBUG_FLAGS & DEBUG_TINY) printf ("D");
+ /* fall through */
+
+ case HS_BUSY:
+ case HS_NEGOTIATE:
+ if (!code) break;
+ cp->host_status = code;
+
+ /* fall through */
+
+ default:
+ ncr_complete (np, cp);
+ break;
+ };
+ cp = cp -> link_ccb;
+ };
+}
+
+/*==========================================================
+**
+**
+** Start NCR chip.
+**
+**
+**==========================================================
+*/
+
+void ncr_init (ncb_p np, int reset, char * msg, u_long code)
+{
+ int i;
+
+ /*
+ ** Reset chip if asked, otherwise just clear fifos.
+ */
+ if (reset) {
+ OUTB (nc_istat, SRST);
+ DELAY (10000);
+ }
+ else {
+ OUTB (nc_stest3, TE|CSF);
+ OUTONB (nc_ctest3, CLF);
+ }
+
+ /*
+ ** Message.
+ */
+
+ if (msg) printf (KERN_INFO "%s: restart (%s).\n", ncr_name (np), msg);
+
+ /*
+ ** Clear Start Queue
+ */
+ for (i=0;i<MAX_START;i++)
+ np -> squeue [i] = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+
+ /*
+ ** Start at first entry.
+ */
+ np->squeueput = 0;
+ np->script0->startpos[0] = cpu_to_scr(NCB_SCRIPTH_PHYS (np, tryloop));
+ np->script0->start0 [0] = cpu_to_scr(SCR_INT ^ IFFALSE (0));
+
+ /*
+ ** Wakeup all pending jobs.
+ */
+ ncr_wakeup (np, code);
+
+ /*
+ ** Init chip.
+ */
+
+ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */
+ OUTB (nc_scntl0, np->rv_scntl0 | 0xc0);
+ /* full arb., ena parity, par->ATN */
+ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
+
+ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
+
+ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
+ OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */
+ OUTB (nc_istat , SIGP ); /* Signal Process */
+ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */
+ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
+
+ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
+ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */
+ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */
+
+ OUTB (nc_stest2, EXT|np->rv_stest2); /* Extended Sreq/Sack filtering */
+ OUTB (nc_stest3, TE); /* TolerANT enable */
+ OUTB (nc_stime0, 0x0d ); /* HTH disabled STO 0.4 sec. */
+
+ /*
+ ** Disable disconnects.
+ */
+
+ np->disc = 0;
+
+ /*
+ ** Enable GPIO0 pin for writing if LED support.
+ */
+
+ if (np->features & FE_LED0) {
+ OUTOFFB (nc_gpcntl, 0x01);
+ }
+
+ /*
+ ** Upload the script into on-board RAM
+ */
+ if (np->vaddr2) {
+ if (bootverbose)
+ printf ("%s: copying script fragments into the on-board RAM ...\n", ncr_name(np));
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,0,0)
+ memcpy_toio(np->script, np->script0, sizeof(struct script));
+#else
+ memcpy(np->script, np->script0, sizeof(struct script));
+#endif
+ }
+
+ /*
+ ** enable ints
+ */
+
+ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST);
+ OUTB (nc_dien , MDPE|BF|ABRT|SSI|SIR|IID);
+
+ /*
+ ** For 895/6 enable SBMC interrupt and save current SCSI bus mode.
+ */
+ if (np->features & FE_ULTRA2) {
+ OUTONW (nc_sien, SBMC);
+ np->scsi_mode = INB (nc_stest4) & SMODE;
+ }
+
+ /*
+ ** Fill in target structure.
+ ** Reinitialize usrsync.
+ ** Reinitialize usrwide.
+ ** Prepare sync negotiation according to actual SCSI bus mode.
+ */
+
+ for (i=0;i<MAX_TARGET;i++) {
+ tcb_p tp = &np->target[i];
+
+ tp->sval = 0;
+ tp->wval = np->rv_scntl3;
+
+ if (tp->usrsync != 255) {
+ if (tp->usrsync <= np->maxsync) {
+ if (tp->usrsync < np->minsync) {
+ tp->usrsync = np->minsync;
+ }
+ }
+ else
+ tp->usrsync = 255;
+ };
+
+ if (tp->usrwide > np->maxwide)
+ tp->usrwide = np->maxwide;
+
+ ncr_negotiate (np, tp);
+ }
+
+ /*
+ ** Start script processor.
+ */
+
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start));
+}
+
+/*==========================================================
+**
+** Prepare the negotiation values for wide and
+** synchronous transfers.
+**
+**==========================================================
+*/
+
+static void ncr_negotiate (struct ncb* np, struct tcb* tp)
+{
+ /*
+ ** minsync unit is 4ns !
+ */
+
+ u_long minsync = tp->usrsync;
+
+ /*
+ ** SCSI bus mode limit
+ */
+
+ if (np->scsi_mode && np->scsi_mode == SMODE_SE) {
+ if (minsync < 12) minsync = 12;
+ }
+
+ /*
+ ** if not scsi 2
+ ** don't believe FAST!
+ */
+
+ if ((minsync < 50) && (tp->inqdata[2] & 0x0f) < 2)
+ minsync=50;
+
+ /*
+ ** our limit ..
+ */
+
+ if (minsync < np->minsync)
+ minsync = np->minsync;
+
+ /*
+ ** divider limit
+ */
+
+ if (minsync > np->maxsync)
+ minsync = 255;
+
+ tp->minsync = minsync;
+ tp->maxoffs = (minsync<255 ? np->maxoffs : 0);
+
+ /*
+ ** period=0: has to negotiate sync transfer
+ */
+
+ tp->period=0;
+
+ /*
+ ** widedone=0: has to negotiate wide transfer
+ */
+ tp->widedone=0;
+}
+
+/*==========================================================
+**
+** Get clock factor and sync divisor for a given
+** synchronous factor period.
+** Returns the clock factor (in sxfer) and scntl3
+** synchronous divisor field.
+**
+**==========================================================
+*/
+
+static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p)
+{
+ u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */
+ int div = np->clock_divn; /* Number of divisors supported */
+ u_long fak; /* Sync factor in sxfer */
+ u_long per; /* Period in tenths of ns */
+ u_long kpc; /* (per * clk) */
+
+ /*
+ ** Compute the synchronous period in tenths of nano-seconds
+ */
+ if (sfac <= 10) per = 250;
+ else if (sfac == 11) per = 303;
+ else if (sfac == 12) per = 500;
+ else per = 40 * sfac;
+
+ /*
+ ** Look for the greatest clock divisor that allows an
+ ** input speed faster than the period.
+ */
+ kpc = per * clk;
+ while (--div >= 0)
+ if (kpc >= (div_10M[div] << 2)) break;
+
+ /*
+ ** Calculate the lowest clock factor that allows an output
+ ** speed not faster than the period.
+ */
+ fak = (kpc - 1) / div_10M[div] + 1;
+
+#if 0 /* This optimization does not seem very usefull */
+
+ per = (fak * div_10M[div]) / clk;
+
+ /*
+ ** Why not to try the immediate lower divisor and to choose
+ ** the one that allows the fastest output speed ?
+ ** We don't want input speed too much greater than output speed.
+ */
+ if (div >= 1 && fak < 8) {
+ u_long fak2, per2;
+ fak2 = (kpc - 1) / div_10M[div-1] + 1;
+ per2 = (fak2 * div_10M[div-1]) / clk;
+ if (per2 < per && fak2 <= 8) {
+ fak = fak2;
+ per = per2;
+ --div;
+ }
+ }
+#endif
+
+ if (fak < 4) fak = 4; /* Should never happen, too bad ... */
+
+ /*
+ ** Compute and return sync parameters for the ncr
+ */
+ *fakp = fak - 4;
+ *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0);
+}
+
+
+/*==========================================================
+**
+** Set actual values, sync status and patch all ccbs of
+** a target according to new sync/wide agreement.
+**
+**==========================================================
+*/
+
+static void ncr_set_sync_wide_status (ncb_p np, u_char target)
+{
+ ccb_p cp;
+ tcb_p tp = &np->target[target];
+
+ /*
+ ** set actual value and sync_status
+ */
+ OUTB (nc_sxfer, tp->sval);
+ np->sync_st = tp->sval;
+ OUTB (nc_scntl3, tp->wval);
+ np->wide_st = tp->wval;
+
+ /*
+ ** patch ALL ccbs of this target.
+ */
+ for (cp = np->ccb; cp; cp = cp->link_ccb) {
+ if (!cp->cmd) continue;
+ if (cp->cmd->target != target) continue;
+ cp->sync_status = tp->sval;
+ cp->wide_status = tp->wval;
+ };
+}
+
+/*==========================================================
+**
+** Switch sync mode for current job and it's target
+**
+**==========================================================
+*/
+
+static void ncr_setsync (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer)
+{
+ Scsi_Cmnd *cmd;
+ tcb_p tp;
+ u_char target = INB (nc_ctest0) & 0x0f;
+ u_char idiv;
+
+ assert (cp);
+ if (!cp) return;
+
+ cmd = cp->cmd;
+ assert (cmd);
+ if (!cmd) return;
+ assert (target == (cmd->target & 0xf));
+
+ tp = &np->target[target];
+
+ if (!scntl3 || !(sxfer & 0x1f))
+ scntl3 = np->rv_scntl3;
+ scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS) | (np->rv_scntl3 & 0x07);
+
+ /*
+ ** Deduce the value of controller sync period from scntl3.
+ ** period is in tenths of nano-seconds.
+ */
+
+ idiv = ((scntl3 >> 4) & 0x7);
+ if ((sxfer & 0x1f) && idiv)
+ tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ else
+ tp->period = 0xffff;
+
+ /*
+ ** Stop there if sync parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ PRINT_ADDR(cmd);
+ if (sxfer & 0x01f) {
+ unsigned f10 = 100000 << (tp->widedone ? tp->widedone -1 : 0);
+ unsigned mb10 = (f10 + tp->period/2) / tp->period;
+ char *scsi;
+
+ /*
+ ** Disable extended Sreq/Sack filtering
+ */
+ if (tp->period <= 2000) OUTOFFB (nc_stest2, EXT);
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (tp->period < 500) scsi = "FAST-40";
+ else if (tp->period < 1000) scsi = "FAST-20";
+ else if (tp->period < 2000) scsi = "FAST-10";
+ else scsi = "FAST-5";
+
+ printf ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi,
+ tp->widedone > 1 ? "WIDE " : "",
+ mb10 / 10, mb10 % 10, tp->period / 10, sxfer & 0x1f);
+ } else
+ printf ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : "");
+
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+/*==========================================================
+**
+** Switch wide mode for current job and it's target
+** SCSI specs say: a SCSI device that accepts a WDTR
+** message shall reset the synchronous agreement to
+** asynchronous mode.
+**
+**==========================================================
+*/
+
+static void ncr_setwide (ncb_p np, ccb_p cp, u_char wide, u_char ack)
+{
+ Scsi_Cmnd *cmd;
+ u_short target = INB (nc_ctest0) & 0x0f;
+ tcb_p tp;
+ u_char scntl3;
+ u_char sxfer;
+
+ assert (cp);
+ if (!cp) return;
+
+ cmd = cp->cmd;
+ assert (cmd);
+ if (!cmd) return;
+ assert (target == (cmd->target & 0xf));
+
+ tp = &np->target[target];
+ tp->widedone = wide+1;
+ scntl3 = (tp->wval & (~EWS)) | (wide ? EWS : 0);
+
+ sxfer = ack ? 0 : tp->sval;
+
+ /*
+ ** Stop there if sync/wide parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (bootverbose >= 2) {
+ PRINT_ADDR(cmd);
+ if (scntl3 & EWS)
+ printf ("WIDE SCSI (16 bit) enabled.\n");
+ else
+ printf ("WIDE SCSI disabled.\n");
+ }
+
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+/*==========================================================
+**
+** Switch tagged mode for a target.
+**
+**==========================================================
+*/
+
+static void ncr_setmaxtags (ncb_p np, tcb_p tp, u_long numtags)
+{
+ int l;
+ if (numtags > tp->usrtags)
+ numtags = tp->usrtags;
+ tp->numtags = numtags;
+ tp->maxtags = numtags;
+
+ for (l=0; l<MAX_LUN; l++) {
+ lcb_p lp;
+ u_char wastags;
+
+ if (!tp) break;
+ lp=tp->lp[l];
+ if (!lp) continue;
+
+ wastags = lp->usetags;
+ ncr_settags (tp, lp);
+
+ if (numtags > 1 && lp->reqccbs > 1) {
+ PRINT_LUN(np, tp - np->target, l);
+ printf("using tagged command queueing, up to %ld cmds/lun\n", numtags);
+ }
+ else if (numtags <= 1 && wastags) {
+ PRINT_LUN(np, tp - np->target, l);
+ printf("disabling tagged command queueing\n");
+ }
+ };
+}
+
+static void ncr_settags (tcb_p tp, lcb_p lp)
+{
+ u_char reqtags, tmp;
+
+ if ((!tp) || (!lp)) return;
+
+ /*
+ ** only devices conformant to ANSI Version >= 2
+ ** only devices capable of tagges commands
+ ** only disk devices
+ ** only if enabled by user ..
+ */
+ if (( tp->inqdata[2] & 0x7) >= 2 &&
+ ( tp->inqdata[7] & INQ7_QUEUE) && ((tp->inqdata[0] & 0x1f)==0x00)
+ && tp->numtags > 1) {
+ reqtags = tp->numtags;
+ if (lp->actlink <= 1)
+ lp->usetags=reqtags;
+ } else {
+ reqtags = 1;
+ if (lp->actlink <= 1)
+ lp->usetags=0;
+ };
+
+ /*
+ ** don't announce more than available.
+ */
+ tmp = lp->actccbs;
+ if (tmp > reqtags) tmp = reqtags;
+ lp->reqlink = tmp;
+
+ /*
+ ** don't discard if announced.
+ */
+ tmp = lp->actlink;
+ if (tmp < reqtags) tmp = reqtags;
+ lp->reqccbs = tmp;
+}
+
+/*----------------------------------------------------
+**
+** handle user commands
+**
+**----------------------------------------------------
+*/
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+
+static void ncr_usercmd (ncb_p np)
+{
+ u_char t;
+ tcb_p tp;
+
+ switch (np->user.cmd) {
+
+ case 0: return;
+
+ case UC_SETSYNC:
+ for (t=0; t<MAX_TARGET; t++) {
+ if (!((np->user.target>>t)&1)) continue;
+ tp = &np->target[t];
+ tp->usrsync = np->user.data;
+ ncr_negotiate (np, tp);
+ };
+ break;
+
+ case UC_SETTAGS:
+ if (np->user.data > SCSI_NCR_MAX_TAGS)
+ np->user.data = SCSI_NCR_MAX_TAGS;
+ for (t=0; t<MAX_TARGET; t++) {
+ if (!((np->user.target>>t)&1)) continue;
+ np->target[t].usrtags = np->user.data;
+ ncr_setmaxtags (np, &np->target[t], np->user.data);
+ };
+ break;
+
+ case UC_SETDEBUG:
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = np->user.data;
+#endif
+ break;
+
+ case UC_SETORDER:
+ np->order = np->user.data;
+ break;
+
+ case UC_SETWIDE:
+ for (t=0; t<MAX_TARGET; t++) {
+ u_long size;
+ if (!((np->user.target>>t)&1)) continue;
+ tp = &np->target[t];
+ size = np->user.data;
+ if (size > np->maxwide) size=np->maxwide;
+ tp->usrwide = size;
+ ncr_negotiate (np, tp);
+ };
+ break;
+
+ case UC_SETFLAG:
+ for (t=0; t<MAX_TARGET; t++) {
+ if (!((np->user.target>>t)&1)) continue;
+ tp = &np->target[t];
+ tp->usrflag = np->user.data;
+ };
+ break;
+
+ case UC_CLEARPROF:
+ bzero(&np->profile, sizeof(np->profile));
+ break;
+#ifdef UC_DEBUG_ERROR_RECOVERY
+ case UC_DEBUG_ERROR_RECOVERY:
+ np->debug_error_recovery = np->user.data;
+ break;
+#endif
+ }
+ np->user.cmd=0;
+}
+#endif
+
+
+/*=====================================================================
+**
+** Embedded error recovery debugging code.
+**
+**=====================================================================
+**
+** This code is conditionned by SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT.
+** It only can be enabled after boot-up with a control command.
+**
+** Every 30 seconds the timer handler of the driver decides to
+** change the behaviour of the driver in order to trigger errors.
+**
+** If last command was "debug_error_recovery sge", the driver
+** sets sync offset of all targets that use sync transfers to 2,
+** and so hopes a SCSI gross error at the next read operation.
+**
+** If last command was "debug_error_recovery abort", the driver
+** does not signal new scsi commands to the script processor, until
+** it is asked to abort or reset a command by the mid-level driver.
+**
+** If last command was "debug_error_recovery reset", the driver
+** does not signal new scsi commands to the script processor, until
+** it is asked to reset a command by the mid-level driver.
+**
+** If last command was "debug_error_recovery parity", the driver
+** will assert ATN on the next DATA IN phase mismatch, and so will
+** behave as if a parity error had been detected.
+**
+** The command "debug_error_recovery none" makes the driver behave
+** normaly.
+**
+**=====================================================================
+*/
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+static void ncr_trigger_errors (ncb_p np)
+{
+ /*
+ ** If np->debug_error_recovery is not zero, we want to
+ ** simulate common errors in order to test error recovery.
+ */
+ do {
+ static u_long last = 0l;
+
+ if (!np->debug_error_recovery)
+ break;
+ if (!last)
+ last = jiffies;
+ else if (jiffies < last + 30*HZ)
+ break;
+ last = jiffies;
+ /*
+ * This one triggers SCSI gross errors.
+ */
+ if (np->debug_error_recovery == 1) {
+ int i;
+ printf("%s: testing error recovery from SCSI gross error...\n", ncr_name(np));
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ if (np->target[i].sval & 0x1f) {
+ np->target[i].sval &= ~0x1f;
+ np->target[i].sval += 2;
+ }
+ }
+ }
+ /*
+ * This one triggers abort from the mid-level driver.
+ */
+ else if (np->debug_error_recovery == 2) {
+ printf("%s: testing error recovery from mid-level driver abort()...\n", ncr_name(np));
+ np->stalling = 2;
+ }
+ /*
+ * This one triggers reset from the mid-level driver.
+ */
+ else if (np->debug_error_recovery == 3) {
+ printf("%s: testing error recovery from mid-level driver reset()...\n", ncr_name(np));
+ np->stalling = 3;
+ }
+ /*
+ * This one set ATN on phase mismatch in DATA IN phase and so
+ * will behave as on scsi parity error detected.
+ */
+ else if (np->debug_error_recovery == 4) {
+ printf("%s: testing data in parity error...\n", ncr_name(np));
+ np->assert_atn = 1;
+ }
+ } while (0);
+}
+#endif
+
+/*==========================================================
+**
+**
+** ncr timeout handler.
+**
+**
+**==========================================================
+**
+** Misused to keep the driver running when
+** interrupts are not configured correctly.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_timeout (ncb_p np)
+{
+ u_long thistime = jiffies;
+ u_long count = 0;
+ ccb_p cp;
+ u_long flags;
+
+ /*
+ ** If release process in progress, let's go
+ ** Set the release stage from 1 to 2 to synchronize
+ ** with the release process.
+ */
+
+ if (np->release_stage) {
+ if (np->release_stage == 1) np->release_stage = 2;
+ return;
+ }
+
+ np->timer.expires =
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+ jiffies +
+#endif
+ SCSI_NCR_TIMER_INTERVAL;
+
+ add_timer(&np->timer);
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ ncr_trigger_errors (np);
+#endif
+
+ /*
+ ** If we are resetting the ncr, wait for settle_time before
+ ** clearing it. Then command processing will be resumed.
+ */
+ if (np->settle_time) {
+ if (np->settle_time <= thistime) {
+ if (bootverbose > 1)
+ printf("%s: command processing resumed\n", ncr_name(np));
+ save_flags(flags); cli();
+ np->settle_time = 0;
+ np->disc = 1;
+ requeue_waiting_list(np);
+ restore_flags(flags);
+ }
+ return;
+ }
+
+ /*
+ ** Since the generic scsi driver only allows us 0.5 second
+ ** to perform abort of a command, we must look at ccbs about
+ ** every 0.25 second.
+ */
+ if (np->lasttime + (HZ>>2) <= thistime) {
+ /*
+ ** block ncr interrupts
+ */
+ save_flags(flags); cli();
+
+ np->lasttime = thistime;
+
+ /*
+ ** Reset profile data to avoid ugly overflow
+ ** (Limited to 1024 GB for 32 bit architecture)
+ */
+ if (np->profile.num_kbytes > (~0UL >> 2))
+ bzero(&np->profile, sizeof(np->profile));
+
+ /*----------------------------------------------------
+ **
+ ** handle ncr chip timeouts
+ **
+ ** Assumption:
+ ** We have a chance to arbitrate for the
+ ** SCSI bus at least every 10 seconds.
+ **
+ **----------------------------------------------------
+ */
+#if 0
+ if (thistime < np->heartbeat + HZ + HZ)
+ np->latetime = 0;
+ else
+ np->latetime++;
+#endif
+
+ /*----------------------------------------------------
+ **
+ ** handle ccb timeouts
+ **
+ **----------------------------------------------------
+ */
+
+ for (cp=np->ccb; cp; cp=cp->link_ccb) {
+ /*
+ ** look for timed out ccbs.
+ */
+ if (!cp->host_status) continue;
+ count++;
+ /*
+ ** Have to force ordered tag to avoid timeouts
+ */
+ if (cp->cmd && cp->tlimit && cp->tlimit <=
+ thistime + NCR_TIMEOUT_INCREASE + SCSI_NCR_TIMEOUT_ALERT) {
+ lcb_p lp;
+ lp = np->target[cp->cmd->target].lp[cp->cmd->lun];
+ if (lp && !lp->force_ordered_tag) {
+ lp->force_ordered_tag = 1;
+ }
+ }
+ /*
+ ** ncr_abort_command() cannot complete canceled
+ ** commands immediately. It sets tlimit to zero
+ ** and ask the script to skip the scsi process if
+ ** necessary. We have to complete this work here.
+ */
+
+ if (cp->tlimit) continue;
+
+ switch (cp->host_status) {
+
+ case HS_BUSY:
+ case HS_NEGOTIATE:
+ /*
+ ** still in start queue ?
+ */
+ if (cp->phys.header.launch.l_paddr ==
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, skip)))
+ continue;
+
+ /* fall through */
+ case HS_DISCONNECT:
+ cp->host_status=HS_ABORTED;
+ };
+ cp->tag = 0;
+
+ /*
+ ** wakeup this ccb.
+ */
+ ncr_complete (np, cp);
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if (!np->stalling)
+#endif
+ OUTB (nc_istat, SIGP);
+ }
+ restore_flags(flags);
+ }
+
+#ifdef SCSI_NCR_BROKEN_INTR
+ if (INB(nc_istat) & (INTF|SIP|DIP)) {
+
+ /*
+ ** Process pending interrupts.
+ */
+ save_flags(flags); cli();
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("{");
+ ncr_exception (np);
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("}");
+ restore_flags(flags);
+ }
+#endif /* SCSI_NCR_BROKEN_INTR */
+}
+
+/*==========================================================
+**
+** log message for real hard errors
+**
+** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)."
+** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf."
+**
+** exception register:
+** ds: dstat
+** si: sist
+**
+** SCSI bus lines:
+** so: control lines as driver by NCR.
+** si: control lines as seen by NCR.
+** sd: scsi data lines as seen by NCR.
+**
+** wide/fastmode:
+** sxfer: (see the manual)
+** scntl3: (see the manual)
+**
+** current script command:
+** dsp: script adress (relative to start of script).
+** dbc: first word of script command.
+**
+** First 16 register of the chip:
+** r0..rf
+**
+**==========================================================
+*/
+
+static void ncr_log_hard_error(ncb_p np, u_short sist, u_char dstat)
+{
+ u_int32 dsp;
+ int script_ofs;
+ int script_size;
+ char *script_name;
+ u_char *script_base;
+ int i;
+
+ dsp = INL (nc_dsp);
+
+ if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) {
+ script_ofs = dsp - np->p_script;
+ script_size = sizeof(struct script);
+ script_base = (u_char *) np->script;
+ script_name = "script";
+ }
+ else if (np->p_scripth < dsp &&
+ dsp <= np->p_scripth + sizeof(struct scripth)) {
+ script_ofs = dsp - np->p_scripth;
+ script_size = sizeof(struct scripth);
+ script_base = (u_char *) np->scripth;
+ script_name = "scripth";
+ } else {
+ script_ofs = dsp;
+ script_size = 0;
+ script_base = 0;
+ script_name = "mem";
+ }
+
+ printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n",
+ ncr_name (np), (unsigned)INB (nc_ctest0)&0x0f, dstat, sist,
+ (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl),
+ (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs,
+ (unsigned)INL (nc_dbc));
+
+ if (((script_ofs & 3) == 0) &&
+ (unsigned)script_ofs < script_size) {
+ printf ("%s: script cmd = %08x\n", ncr_name(np),
+ (int) *(ncrcmd *)(script_base + script_ofs));
+ }
+
+ printf ("%s: regdump:", ncr_name(np));
+ for (i=0; i<16;i++)
+ printf (" %02x", (unsigned)INB_OFF(i));
+ printf (".\n");
+}
+
+/*============================================================
+**
+** ncr chip exception handler.
+**
+**============================================================
+**
+** In normal cases, interrupt conditions occur one at a
+** time. The ncr is able to stack in some extra registers
+** other interrupts that will occurs after the first one.
+** But severall interrupts may occur at the same time.
+**
+** We probably should only try to deal with the normal
+** case, but it seems that multiple interrupts occur in
+** some cases that are not abnormal at all.
+**
+** The most frequent interrupt condition is Phase Mismatch.
+** We should want to service this interrupt quickly.
+** A SCSI parity error may be delivered at the same time.
+** The SIR interrupt is not very frequent in this driver,
+** since the INTFLY is likely used for command completion
+** signaling.
+** The Selection Timeout interrupt may be triggered with
+** IID and/or UDC.
+** The SBMC interrupt (SCSI Bus Mode Change) may probably
+** occur at any time.
+**
+** This handler try to deal as cleverly as possible with all
+** the above.
+**
+**============================================================
+*/
+
+void ncr_exception (ncb_p np)
+{
+ u_char istat, dstat;
+ u_short sist;
+ int i;
+
+ /*
+ ** interrupt on the fly ?
+ ** Since the global header may be copied back to a CCB
+ ** using a posted PCI memory write, the last operation on
+ ** the istat register is a READ in order to flush posted
+ ** PCI commands (Btw, the 'do' loop is probably useless).
+ */
+ istat = INB (nc_istat);
+ if (istat & INTF) {
+ do {
+ OUTB (nc_istat, (istat & SIGP) | INTF);
+ istat = INB (nc_istat);
+ } while (istat & INTF);
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F ");
+ np->profile.num_fly++;
+ ncr_wakeup (np, 0);
+ };
+
+ if (!(istat & (SIP|DIP)))
+ return;
+
+ np->profile.num_int++;
+
+ if (istat & CABRT)
+ OUTB (nc_istat, CABRT);
+
+ /*
+ ** Steinbach's Guideline for Systems Programming:
+ ** Never test for an error condition you don't know how to handle.
+ */
+
+ sist = (istat & SIP) ? INW (nc_sist) : 0;
+ dstat = (istat & DIP) ? INB (nc_dstat) : 0;
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printf ("<%d|%x:%x|%x:%x>",
+ (int)INB(nc_scr0),
+ dstat,sist,
+ (unsigned)INL(nc_dsp),
+ (unsigned)INL(nc_dbc));
+
+ /*========================================================
+ ** First, interrupts we want to service cleanly.
+ **
+ ** Phase mismatch is the most frequent interrupt, and
+ ** so we have to service it as quickly and as cleanly
+ ** as possible.
+ ** Programmed interrupts are rarely used in this driver,
+ ** but we must handle them cleanly anyway.
+ ** We try to deal with PAR and SBMC combined with
+ ** some other interrupt(s).
+ **=========================================================
+ */
+
+ if (!(sist & (STO|GEN|HTH|SGE|UDC|RST)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if ((sist & SBMC) && ncr_int_sbmc (np))
+ return;
+ if ((sist & PAR) && ncr_int_par (np))
+ return;
+ if (sist & MA) {
+ ncr_int_ma (np);
+ return;
+ }
+ if (dstat & SIR) {
+ ncr_int_sir (np);
+ return;
+ }
+ /*
+ ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 2.
+ */
+ if (!(sist & (SBMC|PAR)) && !(dstat & SSI)) {
+ printf( "%s: unknown interrupt(s) ignored, "
+ "ISTAT=%x DSTAT=%x SIST=%x\n",
+ ncr_name(np), istat, dstat, sist);
+ return;
+ }
+
+ OUTONB (nc_dcntl, (STD|NOCOM));
+ return;
+ };
+
+ /*========================================================
+ ** Now, interrupts that need some fixing up.
+ ** Order and multiple interrupts is so less important.
+ **
+ ** If SRST has been asserted, we just reset the chip.
+ **
+ ** Selection is intirely handled by the chip. If the
+ ** chip says STO, we trust it. Seems some other
+ ** interrupts may occur at the same time (UDC, IID), so
+ ** we ignore them. In any case we do enough fix-up
+ ** in the service routine.
+ ** We just exclude some fatal dma errors.
+ **=========================================================
+ */
+
+ if (sist & RST) {
+ ncr_init (np, 1, bootverbose ? "scsi reset" : NULL, HS_RESET);
+ return;
+ };
+
+ if ((sist & STO) &&
+ !(dstat & (MDPE|BF|ABRT))) {
+ /*
+ ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 1.
+ */
+ OUTONB (nc_ctest3, CLF);
+
+ ncr_int_sto (np);
+ return;
+ };
+
+ /*=========================================================
+ ** Now, interrupts we are not able to recover cleanly.
+ ** (At least for the moment).
+ **
+ ** Do the register dump.
+ ** Log message for real hard errors.
+ ** Clear all fifos.
+ ** For MDPE, BF, ABORT, IID, SGE and HTH we reset the
+ ** BUS and the chip.
+ ** We are more soft for UDC.
+ **=========================================================
+ */
+ if (jiffies - np->regtime > 10*HZ) {
+ np->regtime = jiffies;
+ for (i = 0; i<sizeof(np->regdump); i++)
+ ((char*)&np->regdump)[i] = INB_OFF(i);
+ np->regdump.nc_dstat = dstat;
+ np->regdump.nc_sist = sist;
+ };
+
+ ncr_log_hard_error(np, sist, dstat);
+
+ printf ("%s: have to clear fifos.\n", ncr_name (np));
+ OUTB (nc_stest3, TE|CSF);
+ OUTONB (nc_ctest3, CLF);
+
+ if ((sist & (SGE)) ||
+ (dstat & (MDPE|BF|ABORT|IID))) {
+ ncr_start_reset(np, driver_setup.settle_delay);
+ return;
+ };
+
+ if (sist & HTH) {
+ printf ("%s: handshake timeout\n", ncr_name(np));
+ ncr_start_reset(np, driver_setup.settle_delay);
+ return;
+ };
+
+ if (sist & UDC) {
+ printf ("%s: unexpected disconnect\n", ncr_name(np));
+ if (INB (nc_scr1) != 0xff) {
+ OUTB (nc_scr1, HS_UNEXPECTED);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup));
+ };
+ ncr_start_reset(np, driver_setup.settle_delay);
+ return;
+ };
+
+ /*=========================================================
+ ** We just miss the cause of the interrupt. :(
+ ** Print a message. The timeout will do the real work.
+ **=========================================================
+ */
+ printf ("%s: unknown interrupt\n", ncr_name(np));
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for selection timeout
+**
+**==========================================================
+**
+** There seems to be a bug in the 53c810.
+** Although a STO-Interrupt is pending,
+** it continues executing script commands.
+** But it will fail and interrupt (IID) on
+** the next instruction where it's looking
+** for a valid phase.
+**
+**----------------------------------------------------------
+*/
+
+void ncr_int_sto (ncb_p np)
+{
+ u_long dsa, scratcha, diff;
+ ccb_p cp;
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("T");
+
+ /*
+ ** look for ccb and set the status.
+ */
+
+ dsa = INL (nc_dsa);
+ cp = np->ccb;
+ while (cp && (CCB_PHYS (cp, phys) != dsa))
+ cp = cp->link_ccb;
+
+ if (cp) {
+ cp-> host_status = HS_SEL_TIMEOUT;
+ ncr_complete (np, cp);
+ };
+
+ /*
+ ** repair start queue
+ */
+
+ scratcha = INL (nc_scratcha);
+ diff = scratcha - NCB_SCRIPTH_PHYS (np, tryloop);
+
+/* assert ((diff <= MAX_START * 20) && !(diff % 20));*/
+
+ if ((diff <= MAX_START * 20) && !(diff % 20)) {
+ np->script->startpos[0] = cpu_to_scr(scratcha);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start));
+ return;
+ };
+ ncr_init (np, 1, "selection timeout", HS_FAIL);
+ np->disc = 1;
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI bus mode change
+**
+**==========================================================
+**
+** spi2-r12 11.2.3 says a transceiver mode change must
+** generate a reset event and a device that detects a reset
+** event shall initiate a hard reset. It says also that a
+** device that detects a mode change shall set data transfer
+** mode to eight bit asynchronous, etc...
+** So, just resetting should be enough.
+**
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_int_sbmc (ncb_p np)
+{
+ u_char scsi_mode = INB (nc_stest4) & SMODE;
+
+ printf("%s: SCSI bus mode change from %x to %x.\n",
+ ncr_name(np), np->scsi_mode, scsi_mode);
+
+ np->scsi_mode = scsi_mode;
+
+ /*
+ ** Suspend command processing for 1 second and
+ ** reinitialize all except the chip.
+ */
+ np->settle_time = jiffies + HZ;
+ ncr_init (np, 0, bootverbose ? "scsi mode change" : NULL, HS_RESET);
+
+ return 1;
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI parity error.
+**
+**==========================================================
+**
+** SCSI parity errors are handled by the SCSI script.
+** So, we just print some message.
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_int_par (ncb_p np)
+{
+ printf("%s: SCSI parity error detected\n", ncr_name(np));
+ return 0;
+}
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for phase errors.
+**
+**
+**==========================================================
+**
+** We have to construct a new transfer descriptor,
+** to transfer the rest of the current block.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_ma (ncb_p np)
+{
+ u_int32 dbc;
+ u_int32 rest;
+ u_int32 dsp;
+ u_int32 dsa;
+ u_int32 nxtdsp;
+ u_int32 *vdsp;
+ u_int32 oadr, olen;
+ u_int32 *tblp;
+ ncrcmd *newcmd;
+ u_char cmd, sbcl;
+ ccb_p cp;
+
+ dsp = INL (nc_dsp);
+ dbc = INL (nc_dbc);
+ sbcl = INB (nc_sbcl);
+
+ cmd = dbc >> 24;
+ rest = dbc & 0xffffff;
+
+ /*
+ ** Take into account dma fifo and various buffers and latches,
+ ** only if the interrupted phase is an OUTPUT phase.
+ */
+
+ if ((cmd & 1) == 0) {
+ u_char ctest5, ss0, ss2;
+ u_short delta;
+
+ ctest5 = (np->rv_ctest5 & DFS) ? INB (nc_ctest5) : 0;
+ if (ctest5 & DFS)
+ delta=(((ctest5 << 8) | (INB (nc_dfifo) & 0xff)) - rest) & 0x3ff;
+ else
+ delta=(INB (nc_dfifo) - rest) & 0x7f;
+
+ /*
+ ** The data in the dma fifo has not been transfered to
+ ** the target -> add the amount to the rest
+ ** and clear the data.
+ ** Check the sstat2 register in case of wide transfer.
+ */
+
+ rest += delta;
+ ss0 = INB (nc_sstat0);
+ if (ss0 & OLF) rest++;
+ if (ss0 & ORF) rest++;
+ if (INB(nc_scntl3) & EWS) {
+ ss2 = INB (nc_sstat2);
+ if (ss2 & OLF1) rest++;
+ if (ss2 & ORF1) rest++;
+ };
+
+ OUTONB (nc_ctest3, CLF ); /* clear dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
+
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+ printf ("P%x%x RL=%d D=%d SS0=%x ", cmd&7, sbcl&7,
+ (unsigned) rest, (unsigned) delta, ss0);
+
+ } else {
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+ printf ("P%x%x RL=%d ", cmd&7, sbcl&7, rest);
+ if ((cmd & 7) != 1) {
+ OUTONB (nc_ctest3, CLF );
+ OUTB (nc_stest3, TE|CSF);
+ }
+ }
+
+ /*
+ ** locate matching cp
+ */
+ dsa = INL (nc_dsa);
+ cp = np->ccb;
+ while (cp && (CCB_PHYS (cp, phys) != dsa))
+ cp = cp->link_ccb;
+
+ if (!cp) {
+ printf ("%s: SCSI phase error fixup: CCB already dequeued (0x%08lx)\n",
+ ncr_name (np), (u_long) np->header.cp);
+ return;
+ }
+ if (cp != np->header.cp) {
+ printf ("%s: SCSI phase error fixup: CCB address mismatch (0x%08lx != 0x%08lx)\n",
+ ncr_name (np), (u_long) cp, (u_long) np->header.cp);
+/* return;*/
+ }
+
+ /*
+ ** find the interrupted script command,
+ ** and the address at which to continue.
+ */
+
+ if (dsp == vtophys (&cp->patch[2])) {
+ vdsp = &cp->patch[0];
+ nxtdsp = vdsp[3];
+ } else if (dsp == vtophys (&cp->patch[6])) {
+ vdsp = &cp->patch[4];
+ nxtdsp = vdsp[3];
+ } else if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) {
+ vdsp = (u_int32 *) ((char*)np->script - np->p_script + dsp -8);
+ nxtdsp = dsp;
+ } else {
+ vdsp = (u_int32 *) ((char*)np->scripth - np->p_scripth + dsp -8);
+ nxtdsp = dsp;
+ };
+
+ /*
+ ** log the information
+ */
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printf ("\nCP=%p CP2=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
+ cp, np->header.cp,
+ (unsigned)dsp,
+ (unsigned)nxtdsp, vdsp, cmd);
+ };
+
+ /*
+ ** get old startaddress and old length.
+ */
+
+ oadr = scr_to_cpu(vdsp[1]);
+
+ if (cmd & 0x10) { /* Table indirect */
+ tblp = (u_int32 *) ((char*) &cp->phys + oadr);
+ olen = scr_to_cpu(tblp[0]);
+ oadr = scr_to_cpu(tblp[1]);
+ } else {
+ tblp = (u_int32 *) 0;
+ olen = scr_to_cpu(vdsp[0]) & 0xffffff;
+ };
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
+ (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
+ tblp,
+ (unsigned) olen,
+ (unsigned) oadr);
+ };
+
+ /*
+ ** check cmd against assumed interrupted script command.
+ */
+
+ if (cmd != (scr_to_cpu(vdsp[0]) >> 24)) {
+ PRINT_ADDR(cp->cmd);
+ printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
+ (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24);
+
+ return;
+ }
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if ((cmd & 7) == 1 && np->assert_atn) {
+ np->assert_atn = 0;
+ OUTONB(nc_socl, CATN);
+ }
+#endif
+
+ /*
+ ** if old phase not dataphase, leave here.
+ */
+
+ if (cmd & 0x06) {
+ PRINT_ADDR(cp->cmd);
+ printf ("phase change %x-%x %d@%08x resid=%d.\n",
+ cmd&7, sbcl&7, (unsigned)olen,
+ (unsigned)oadr, (unsigned)rest);
+
+ OUTONB (nc_dcntl, (STD|NOCOM));
+ return;
+ };
+
+ /*
+ ** choose the correct patch area.
+ ** if savep points to one, choose the other.
+ */
+
+ newcmd = cp->patch;
+ if (cp->phys.header.savep == cpu_to_scr(vtophys (newcmd))) newcmd+=4;
+
+ /*
+ ** fillin the commands
+ */
+
+ newcmd[0] = cpu_to_scr(((cmd & 0x0f) << 24) | rest);
+ newcmd[1] = cpu_to_scr(oadr + olen - rest);
+ newcmd[2] = cpu_to_scr(SCR_JUMP);
+ newcmd[3] = cpu_to_scr(nxtdsp);
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ PRINT_ADDR(cp->cmd);
+ printf ("newcmd[%d] %x %x %x %x.\n",
+ (int) (newcmd - cp->patch),
+ (unsigned)scr_to_cpu(newcmd[0]),
+ (unsigned)scr_to_cpu(newcmd[1]),
+ (unsigned)scr_to_cpu(newcmd[2]),
+ (unsigned)scr_to_cpu(newcmd[3]));
+ }
+ /*
+ ** fake the return address (to the patch).
+ ** and restart script processor at dispatcher.
+ */
+ np->profile.num_break++;
+ OUTL (nc_temp, vtophys (newcmd));
+ if ((cmd & 7) == 0)
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch));
+ else
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, checkatn));
+}
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for programmed interrupts.
+**
+**
+**==========================================================
+*/
+
+static int ncr_show_msg (u_char * msg)
+{
+ u_char i;
+ printf ("%x",*msg);
+ if (*msg==M_EXTENDED) {
+ for (i=1;i<8;i++) {
+ if (i-1>msg[1]) break;
+ printf ("-%x",msg[i]);
+ };
+ return (i+1);
+ } else if ((*msg & 0xf0) == 0x20) {
+ printf ("-%x",msg[1]);
+ return (2);
+ };
+ return (1);
+}
+
+void ncr_int_sir (ncb_p np)
+{
+ u_char scntl3;
+ u_char chg, ofs, per, fak, wide;
+ u_char num = INB (nc_dsps);
+ ccb_p cp=0;
+ u_long dsa;
+ u_char target = INB (nc_ctest0) & 0x0f;
+ tcb_p tp = &np->target[target];
+ int i;
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num);
+
+ switch (num) {
+ case SIR_SENSE_RESTART:
+ case SIR_STALL_RESTART:
+ break;
+ case SIR_STALL_QUEUE: /* Ignore, just restart the script */
+ goto out;
+
+ default:
+ /*
+ ** lookup the ccb
+ */
+ dsa = INL (nc_dsa);
+ cp = np->ccb;
+ while (cp && (CCB_PHYS (cp, phys) != dsa))
+ cp = cp->link_ccb;
+
+ assert (cp);
+ if (!cp)
+ goto out;
+ assert (cp == np->header.cp);
+ if (cp != np->header.cp)
+ goto out;
+ }
+
+ switch (num) {
+ u_long endp;
+ case SIR_DATA_IO_IS_OUT:
+ case SIR_DATA_IO_IS_IN:
+/*
+** We did not guess the direction of transfer. We have to wait for
+** actual data direction driven by the target before setting
+** pointers. We must patch the global header too.
+*/
+ if (num == SIR_DATA_IO_IS_OUT) {
+ endp = NCB_SCRIPTH_PHYS (np, data_out) + MAX_SCATTER*16;
+ cp->phys.header.goalp = cpu_to_scr(endp + 8);
+ cp->phys.header.savep =
+ cpu_to_scr(endp - cp->segments*16);
+ } else {
+ endp = NCB_SCRIPT_PHYS (np, data_in) + MAX_SCATTER*16;
+ cp->phys.header.goalp = cpu_to_scr(endp + 8);
+ cp->phys.header.savep =
+ cpu_to_scr(endp - cp->segments*16);
+ }
+
+ cp->phys.header.lastp = cp->phys.header.savep;
+ np->header.savep = cp->phys.header.savep;
+ np->header.goalp = cp->phys.header.goalp;
+ np->header.lastp = cp->phys.header.lastp;
+
+ OUTL (nc_temp, scr_to_cpu(np->header.savep));
+ OUTL (nc_dsp, scr_to_cpu(np->header.savep));
+ return;
+ /* break; */
+
+/*--------------------------------------------------------------------
+**
+** Processing of interrupted getcc selects
+**
+**--------------------------------------------------------------------
+*/
+
+ case SIR_SENSE_RESTART:
+ /*------------------------------------------
+ ** Script processor is idle.
+ ** Look for interrupted "check cond"
+ **------------------------------------------
+ */
+
+ if (DEBUG_FLAGS & DEBUG_RESTART)
+ printf ("%s: int#%d",ncr_name (np),num);
+ cp = (ccb_p) 0;
+ for (i=0; i<MAX_TARGET; i++) {
+ if (DEBUG_FLAGS & DEBUG_RESTART) printf (" t%d", i);
+ tp = &np->target[i];
+ if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+");
+ cp = tp->hold_cp;
+ if (!cp) continue;
+ if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+");
+ if ((cp->host_status==HS_BUSY) &&
+ (cp->scsi_status==S_CHECK_COND))
+ break;
+ if (DEBUG_FLAGS & DEBUG_RESTART) printf ("- (remove)");
+ tp->hold_cp = cp = (ccb_p) 0;
+ };
+
+ if (cp) {
+ if (DEBUG_FLAGS & DEBUG_RESTART)
+ printf ("+ restart job ..\n");
+ OUTL (nc_dsa, CCB_PHYS (cp, phys));
+ OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, getcc));
+ return;
+ };
+
+ /*
+ ** no job, resume normal processing
+ */
+ if (DEBUG_FLAGS & DEBUG_RESTART) printf (" -- remove trap\n");
+ np->script->start0[0] = cpu_to_scr(SCR_INT ^ IFFALSE (0));
+ break;
+
+ case SIR_SENSE_FAILED:
+ /*-------------------------------------------
+ ** While trying to select for
+ ** getting the condition code,
+ ** a target reselected us.
+ **-------------------------------------------
+ */
+ if (DEBUG_FLAGS & DEBUG_RESTART) {
+ PRINT_ADDR(cp->cmd);
+ printf ("in getcc reselect by t%d.\n",
+ (int)INB(nc_ssid) & 0x0f);
+ }
+
+ /*
+ ** Mark this job
+ */
+ cp->host_status = HS_BUSY;
+ cp->scsi_status = S_CHECK_COND;
+ np->target[cp->cmd->target].hold_cp = cp;
+
+ /*
+ ** And patch code to restart it.
+ */
+ np->script->start0[0] = cpu_to_scr(SCR_INT);
+ break;
+
+/*-----------------------------------------------------------------------------
+**
+** Was Sie schon immer ueber transfermode negotiation wissen wollten ...
+**
+** We try to negotiate sync and wide transfer only after
+** a successfull inquire command. We look at byte 7 of the
+** inquire data to determine the capabilities of the target.
+**
+** When we try to negotiate, we append the negotiation message
+** to the identify and (maybe) simple tag message.
+** The host status field is set to HS_NEGOTIATE to mark this
+** situation.
+**
+** If the target doesn't answer this message immidiately
+** (as required by the standard), the SIR_NEGO_FAIL interrupt
+** will be raised eventually.
+** The handler removes the HS_NEGOTIATE status, and sets the
+** negotiated value to the default (async / nowide).
+**
+** If we receive a matching answer immediately, we check it
+** for validity, and set the values.
+**
+** If we receive a Reject message immediately, we assume the
+** negotiation has failed, and fall back to standard values.
+**
+** If we receive a negotiation message while not in HS_NEGOTIATE
+** state, it's a target initiated negotiation. We prepare a
+** (hopefully) valid answer, set our parameters, and send back
+** this answer to the target.
+**
+** If the target doesn't fetch the answer (no message out phase),
+** we assume the negotiation has failed, and fall back to default
+** settings.
+**
+** When we set the values, we adjust them in all ccbs belonging
+** to this target, in the controller's register, and in the "phys"
+** field of the controller's struct ncb.
+**
+** Possible cases: hs sir msg_in value send goto
+** We try to negotiate:
+** -> target doesnt't msgin NEG FAIL noop defa. - dispatch
+** -> target rejected our msg NEG FAIL reject defa. - dispatch
+** -> target answered (ok) NEG SYNC sdtr set - clrack
+** -> target answered (!ok) NEG SYNC sdtr defa. REJ--->msg_bad
+** -> target answered (ok) NEG WIDE wdtr set - clrack
+** -> target answered (!ok) NEG WIDE wdtr defa. REJ--->msg_bad
+** -> any other msgin NEG FAIL noop defa. - dispatch
+**
+** Target tries to negotiate:
+** -> incoming message --- SYNC sdtr set SDTR -
+** -> incoming message --- WIDE wdtr set WDTR -
+** We sent our answer:
+** -> target doesn't msgout --- PROTO ? defa. - dispatch
+**
+**-----------------------------------------------------------------------------
+*/
+
+ case SIR_NEGO_FAILED:
+ /*-------------------------------------------------------
+ **
+ ** Negotiation failed.
+ ** Target doesn't send an answer message,
+ ** or target rejected our message.
+ **
+ ** Remove negotiation request.
+ **
+ **-------------------------------------------------------
+ */
+ OUTB (HS_PRT, HS_BUSY);
+
+ /* fall through */
+
+ case SIR_NEGO_PROTO:
+ /*-------------------------------------------------------
+ **
+ ** Negotiation failed.
+ ** Target doesn't fetch the answer message.
+ **
+ **-------------------------------------------------------
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("negotiation failed sir=%x status=%x.\n",
+ num, cp->nego_status);
+ };
+
+ /*
+ ** any error in negotiation:
+ ** fall back to default mode.
+ */
+ switch (cp->nego_status) {
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0);
+ break;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+
+ };
+ np->msgin [0] = M_NOOP;
+ np->msgout[0] = M_NOOP;
+ cp->nego_status = 0;
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch));
+ break;
+
+ case SIR_NEGO_SYNC:
+ /*
+ ** Synchronous request message received.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("sync msgin: ");
+ (void) ncr_show_msg (np->msgin);
+ printf (".\n");
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ per = np->msgin[3];
+ ofs = np->msgin[4];
+ if (ofs==0) per=255;
+
+ /*
+ ** if target sends SDTR message,
+ ** it CAN transfer synch.
+ */
+
+ if (ofs)
+ tp->inqdata[7] |= INQ7_SYNC;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (per < np->minsync)
+ {chg = 1; per = np->minsync;}
+ if (per < tp->minsync)
+ {chg = 1; per = tp->minsync;}
+ if (ofs > tp->maxoffs)
+ {chg = 1; ofs = tp->maxoffs;}
+
+ /*
+ ** Check against controller limits.
+ */
+ fak = 7;
+ scntl3 = 0;
+ if (ofs != 0) {
+ ncr_getsync(np, per, &fak, &scntl3);
+ if (fak > 7) {
+ chg = 1;
+ ofs = 0;
+ }
+ }
+ if (ofs == 0) {
+ fak = 7;
+ per = 0;
+ scntl3 = 0;
+ tp->minsync = 0;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("sync: per=%d scntl3=0x%x ofs=%d fak=%d chg=%d.\n",
+ per, scntl3, ofs, fak, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+
+ case NS_SYNC:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ ncr_setsync (np, cp, 0, 0xe0);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ };
+ return;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request.
+ ** Check against the table of target capabilities.
+ ** If target not capable force M_REJECT and asynchronous.
+ */
+ if (np->unit < SCSI_NCR_MAX_HOST) {
+ tp->inqdata[7] &=
+ (target_capabilities[np->unit].and_map[target]);
+ if (!(tp->inqdata[7] & INQ7_SYNC)) {
+ ofs = 0;
+ fak = 7;
+ }
+ }
+
+ /*
+ ** It was a request. Set value and
+ ** prepare an answer message
+ */
+
+ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 3;
+ np->msgout[2] = M_X_SYNC_REQ;
+ np->msgout[3] = per;
+ np->msgout[4] = ofs;
+
+ cp->nego_status = NS_SYNC;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("sync msgout: ");
+ (void) ncr_show_msg (np->msgout);
+ printf (".\n");
+ }
+
+ if (!ofs) {
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad));
+ return;
+ }
+ np->msgin [0] = M_NOOP;
+
+ break;
+
+ case SIR_NEGO_WIDE:
+ /*
+ ** Wide request message received.
+ */
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("wide msgin: ");
+ (void) ncr_show_msg (np->msgin);
+ printf (".\n");
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ wide = np->msgin[3];
+
+ /*
+ ** if target sends WDTR message,
+ ** it CAN transfer wide.
+ */
+
+ if (wide)
+ tp->inqdata[7] |= INQ7_WIDE16;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (wide > tp->usrwide)
+ {chg = 1; wide = tp->usrwide;}
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("wide: wide=%d chg=%d.\n", wide, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+
+ case NS_WIDE:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ ncr_setwide (np, cp, 0, 1);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+ ncr_setwide (np, cp, wide, 1);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ };
+ return;
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request, set value and
+ ** prepare an answer message
+ */
+
+ ncr_setwide (np, cp, wide, 1);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 2;
+ np->msgout[2] = M_X_WIDE_REQ;
+ np->msgout[3] = wide;
+
+ np->msgin [0] = M_NOOP;
+
+ cp->nego_status = NS_WIDE;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("wide msgout: ");
+ (void) ncr_show_msg (np->msgin);
+ printf (".\n");
+ }
+ break;
+
+/*--------------------------------------------------------------------
+**
+** Processing of special messages
+**
+**--------------------------------------------------------------------
+*/
+
+ case SIR_REJECT_RECEIVED:
+ /*-----------------------------------------------
+ **
+ ** We received a M_REJECT message.
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd);
+ printf ("M_REJECT received (%x:%x).\n",
+ (unsigned)scr_to_cpu(np->lastmsg), np->msgout[0]);
+ break;
+
+ case SIR_REJECT_SENT:
+ /*-----------------------------------------------
+ **
+ ** We received an unknown message
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd);
+ printf ("M_REJECT sent for ");
+ (void) ncr_show_msg (np->msgin);
+ printf (".\n");
+ break;
+
+/*--------------------------------------------------------------------
+**
+** Processing of special messages
+**
+**--------------------------------------------------------------------
+*/
+
+ case SIR_IGN_RESIDUE:
+ /*-----------------------------------------------
+ **
+ ** We received an IGNORE RESIDUE message,
+ ** which couldn't be handled by the script.
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd);
+ printf ("M_IGN_RESIDUE received, but not yet implemented.\n");
+ break;
+
+ case SIR_MISSING_SAVE:
+ /*-----------------------------------------------
+ **
+ ** We received an DISCONNECT message,
+ ** but the datapointer wasn't saved before.
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd);
+ printf ("M_DISCONNECT received, but datapointer not saved: "
+ "data=%x save=%x goal=%x.\n",
+ (unsigned) INL (nc_temp),
+ (unsigned) scr_to_cpu(np->header.savep),
+ (unsigned) scr_to_cpu(np->header.goalp));
+ break;
+
+#if 0 /* This stuff does not work */
+/*--------------------------------------------------------------------
+**
+** Processing of a "S_QUEUE_FULL" status.
+**
+** The current command has been rejected,
+** because there are too many in the command queue.
+** We have started too many commands for that target.
+**
+** If possible, reinsert at head of queue.
+** Stall queue until there are no disconnected jobs
+** (ncr is REALLY idle). Then restart processing.
+**
+** We should restart the current job after the controller
+** has become idle. But this is not yet implemented.
+**
+**--------------------------------------------------------------------
+*/
+ case SIR_STALL_QUEUE:
+ /*-----------------------------------------------
+ **
+ ** Stall the start queue.
+ **
+ **-----------------------------------------------
+ */
+ PRINT_ADDR(cp->cmd);
+ printf ("queue full.\n");
+
+ np->script->start1[0] = cpu_to_scr(SCR_INT);
+
+ /*
+ ** Try to disable tagged transfers.
+ */
+ ncr_setmaxtags (np, &np->target[target], 0);
+
+ /*
+ ** @QUEUE@
+ **
+ ** Should update the launch field of the
+ ** current job to be able to restart it.
+ ** Then prepend it to the start queue.
+ */
+
+ /* fall through */
+
+ case SIR_STALL_RESTART:
+ /*-----------------------------------------------
+ **
+ ** Enable selecting again,
+ ** if NO disconnected jobs.
+ **
+ **-----------------------------------------------
+ */
+ /*
+ ** Look for a disconnected job.
+ */
+ cp = np->ccb;
+ while (cp && cp->host_status != HS_DISCONNECT)
+ cp = cp->link_ccb;
+
+ /*
+ ** if there is one, ...
+ */
+ if (cp) {
+ /*
+ ** wait for reselection
+ */
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, reselect));
+ return;
+ };
+
+ /*
+ ** else remove the interrupt.
+ */
+
+ printf ("%s: queue empty.\n", ncr_name (np));
+ np->script->start1[0] = cpu_to_scr(SCR_INT ^ IFFALSE (0));
+ break;
+#endif /* This stuff does not work */
+ };
+
+out:
+ OUTONB (nc_dcntl, (STD|NOCOM));
+}
+
+/*==========================================================
+**
+**
+** Aquire a control block
+**
+**
+**==========================================================
+*/
+
+static ccb_p ncr_get_ccb
+ (ncb_p np, u_long target, u_long lun)
+{
+ lcb_p lp;
+ ccb_p cp = (ccb_p) 0;
+
+ /*
+ ** Lun structure available ?
+ */
+
+ lp = np->target[target].lp[lun];
+
+ if (lp && lp->opennings && (!lp->active || lp->active < lp->reqlink)) {
+
+ cp = lp->next_ccb;
+
+ /*
+ ** Look for free CCB
+ */
+
+ while (cp && cp->magic) cp = cp->next_ccb;
+
+ /*
+ ** Increment active commands and decrement credit.
+ */
+
+ if (cp) {
+ ++lp->active;
+ --lp->opennings;
+ }
+ }
+
+ /*
+ ** if nothing available, take the default.
+ ** DANGEROUS, because this ccb is not suitable for
+ ** reselection.
+ ** If lp->actccbs > 0 wait for a suitable ccb to be free.
+ */
+ if ((!cp) && lp && lp->actccbs > 0)
+ return ((ccb_p) 0);
+
+ if (!cp) cp = np->ccb;
+
+ /*
+ ** Wait until available.
+ */
+#if 0
+ while (cp->magic) {
+ if (flags & SCSI_NOSLEEP) break;
+ if (tsleep ((caddr_t)cp, PRIBIO|PCATCH, "ncr", 0))
+ break;
+ };
+#endif
+
+ if (cp->magic)
+ return ((ccb_p) 0);
+
+ cp->magic = 1;
+ return (cp);
+}
+
+/*==========================================================
+**
+**
+** Release one control block
+**
+**
+**==========================================================
+*/
+
+void ncr_free_ccb (ncb_p np, ccb_p cp, u_long target, u_long lun)
+{
+ lcb_p lp;
+
+ /*
+ ** sanity
+ */
+
+ assert (cp != NULL);
+
+ /*
+ ** Decrement active commands and increment credit.
+ */
+
+ lp = np->target[target].lp[lun];
+ if (lp) {
+ --lp->active;
+ ++lp->opennings;
+ }
+
+ cp -> host_status = HS_IDLE;
+ cp -> magic = 0;
+#if 0
+ if (cp == np->ccb)
+ wakeup ((caddr_t) cp);
+#endif
+}
+
+/*==========================================================
+**
+**
+** Allocation of resources for Targets/Luns/Tags.
+**
+**
+**==========================================================
+*/
+
+static void ncr_alloc_ccb (ncb_p np, u_long target, u_long lun)
+{
+ tcb_p tp;
+ lcb_p lp;
+ ccb_p cp;
+
+ assert (np != NULL);
+
+ if (target>=MAX_TARGET) return;
+ if (lun >=MAX_LUN ) return;
+
+ tp=&np->target[target];
+
+ if (!tp->jump_tcb.l_cmd) {
+ /*
+ ** initialize it.
+ */
+ tp->jump_tcb.l_cmd =
+ cpu_to_scr((SCR_JUMP^IFFALSE (DATA (0x80 + target))));
+ tp->jump_tcb.l_paddr = np->jump_tcb.l_paddr;
+
+ tp->getscr[0] = (np->features & FE_PFEN) ?
+ cpu_to_scr(SCR_COPY(1)):cpu_to_scr(SCR_COPY_F(1));
+ tp->getscr[1] = cpu_to_scr(vtophys (&tp->sval));
+ tp->getscr[2] =
+ cpu_to_scr(np->paddr + offsetof (struct ncr_reg, nc_sxfer));
+
+ tp->getscr[3] = (np->features & FE_PFEN) ?
+ cpu_to_scr(SCR_COPY(1)):cpu_to_scr(SCR_COPY_F(1));
+ tp->getscr[4] = cpu_to_scr(vtophys (&tp->wval));
+ tp->getscr[5] =
+ cpu_to_scr(np->paddr + offsetof (struct ncr_reg, nc_scntl3));
+
+ assert (( (offsetof(struct ncr_reg, nc_sxfer) ^
+ offsetof(struct tcb , sval )) &3) == 0);
+ assert (( (offsetof(struct ncr_reg, nc_scntl3) ^
+ offsetof(struct tcb , wval )) &3) == 0);
+
+ tp->call_lun.l_cmd = cpu_to_scr(SCR_CALL);
+ tp->call_lun.l_paddr =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_lun));
+
+ tp->jump_lcb.l_cmd = cpu_to_scr(SCR_JUMP);
+ tp->jump_lcb.l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort));
+ np->jump_tcb.l_paddr = cpu_to_scr(vtophys (&tp->jump_tcb));
+ }
+
+ /*
+ ** Logic unit control block
+ */
+ lp = tp->lp[lun];
+ if (!lp) {
+ /*
+ ** Allocate a lcb
+ */
+ lp = (lcb_p) m_alloc (sizeof (struct lcb), LCB_ALIGN_SHIFT);
+ if (!lp) return;
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC) {
+ PRINT_LUN(np, target, lun);
+ printf ("new lcb @%p.\n", lp);
+ }
+
+ /*
+ ** Initialize it
+ */
+ bzero (lp, sizeof (*lp));
+ lp->jump_lcb.l_cmd =
+ cpu_to_scr(SCR_JUMP ^ IFFALSE (DATA (lun)));
+ lp->jump_lcb.l_paddr = tp->jump_lcb.l_paddr;
+
+ lp->call_tag.l_cmd = cpu_to_scr(SCR_CALL);
+ lp->call_tag.l_paddr =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_tag));
+
+ lp->jump_ccb.l_cmd = cpu_to_scr(SCR_JUMP);
+ lp->jump_ccb.l_paddr =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, aborttag));
+
+ lp->actlink = 1;
+
+ lp->active = 1;
+
+ /*
+ ** Chain into LUN list
+ */
+ tp->jump_lcb.l_paddr = cpu_to_scr(vtophys (&lp->jump_lcb));
+ tp->lp[lun] = lp;
+
+ ncr_setmaxtags (np, tp, driver_setup.default_tags);
+ }
+
+ /*
+ ** Allocate ccbs up to lp->reqccbs.
+ */
+
+ /*
+ ** Limit possible number of ccbs.
+ **
+ ** If tagged command queueing is enabled,
+ ** can use more than one ccb.
+ */
+ if (np->actccbs >= MAX_START-2) return;
+ if (lp->actccbs && (lp->actccbs >= lp->reqccbs))
+ return;
+
+ /*
+ ** Allocate a ccb
+ */
+ cp = (ccb_p) m_alloc (sizeof (struct ccb), CCB_ALIGN_SHIFT);
+ if (!cp)
+ return;
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC) {
+ PRINT_LUN(np, target, lun);
+ printf ("new ccb @%p.\n", cp);
+ }
+
+ /*
+ ** Count it
+ */
+ lp->actccbs++;
+ np->actccbs++;
+
+ /*
+ ** Initialize it
+ */
+ bzero (cp, sizeof (*cp));
+
+ /*
+ ** Fill in physical addresses
+ */
+
+ cp->p_ccb = vtophys (cp);
+
+ /*
+ ** Chain into reselect list
+ */
+ cp->jump_ccb.l_cmd = cpu_to_scr(SCR_JUMP);
+ cp->jump_ccb.l_paddr = lp->jump_ccb.l_paddr;
+ lp->jump_ccb.l_paddr = cpu_to_scr(CCB_PHYS (cp, jump_ccb));
+ cp->call_tmp.l_cmd = cpu_to_scr(SCR_CALL);
+ cp->call_tmp.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_tmp));
+
+ /*
+ ** Chain into wakeup list
+ */
+ cp->link_ccb = np->ccb->link_ccb;
+ np->ccb->link_ccb = cp;
+
+ /*
+ ** Chain into CCB list
+ */
+ cp->next_ccb = lp->next_ccb;
+ lp->next_ccb = cp;
+}
+
+/*==========================================================
+**
+**
+** Announce the number of ccbs/tags to the scsi driver.
+**
+**
+**==========================================================
+*/
+
+static void ncr_opennings (ncb_p np, lcb_p lp, Scsi_Cmnd * cmd)
+{
+ /*
+ ** want to reduce the number ...
+ */
+ if (lp->actlink > lp->reqlink) {
+
+ /*
+ ** Try to reduce the count.
+ ** We assume to run at splbio ..
+ */
+ u_char diff = lp->actlink - lp->reqlink;
+
+ if (!diff) return;
+
+ if (diff > lp->opennings)
+ diff = lp->opennings;
+
+ lp->opennings -= diff;
+
+ lp->actlink -= diff;
+ if (DEBUG_FLAGS & DEBUG_TAGS)
+ printf ("%s: actlink: diff=%d, new=%d, req=%d\n",
+ ncr_name(np), diff, lp->actlink, lp->reqlink);
+ return;
+ };
+
+ /*
+ ** want to increase the number ?
+ */
+ if (lp->reqlink > lp->actlink) {
+ u_char diff = lp->reqlink - lp->actlink;
+
+ lp->opennings += diff;
+
+ lp->actlink += diff;
+#if 0
+ wakeup ((caddr_t) xp->sc_link);
+#endif
+ if (DEBUG_FLAGS & DEBUG_TAGS)
+ printf ("%s: actlink: diff=%d, new=%d, req=%d\n",
+ ncr_name(np), diff, lp->actlink, lp->reqlink);
+ };
+}
+
+/*==========================================================
+**
+**
+** Build Scatter Gather Block
+**
+**
+**==========================================================
+**
+** The transfer area may be scattered among
+** several non adjacent physical pages.
+**
+** We may use MAX_SCATTER blocks.
+**
+**----------------------------------------------------------
+*/
+
+/*
+** We try to reduce the number of interrupts caused
+** by unexpected phase changes due to disconnects.
+** A typical harddisk may disconnect before ANY block.
+** If we wanted to avoid unexpected phase changes at all
+** we had to use a break point every 512 bytes.
+** Of course the number of scatter/gather blocks is
+** limited.
+** Under Linux, the scatter/gatter blocks are provided by
+** the generic driver. We just have to copy addresses and
+** sizes to the data segment array.
+*/
+
+static int ncr_scatter(ccb_p cp, Scsi_Cmnd *cmd)
+{
+ struct scr_tblmove *data;
+ int segment = 0;
+ int use_sg = (int) cmd->use_sg;
+
+#if 0
+ bzero (cp->phys.data, sizeof (cp->phys.data));
+#endif
+ data = cp->phys.data;
+ cp->data_len = 0;
+
+ if (!use_sg) {
+ if (cmd->request_bufflen) {
+ data = &data[MAX_SCATTER - 1];
+ data[0].addr = cpu_to_scr(vtophys(cmd->request_buffer));
+ data[0].size = cpu_to_scr(cmd->request_bufflen);
+ cp->data_len = cmd->request_bufflen;
+ segment = 1;
+ }
+ }
+ else if (use_sg <= MAX_SCATTER) {
+ struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
+
+ data = &data[MAX_SCATTER - use_sg];
+ while (segment < use_sg) {
+ data[segment].addr =
+ cpu_to_scr(vtophys(scatter[segment].address));
+ data[segment].size =
+ cpu_to_scr(scatter[segment].length);
+ cp->data_len += scatter[segment].length;
+ ++segment;
+ }
+ }
+ else {
+ return -1;
+ }
+
+ return segment;
+}
+
+/*==========================================================
+**
+**
+** Test the pci bus snoop logic :-(
+**
+** Has to be called with interrupts disabled.
+**
+**
+**==========================================================
+*/
+
+#ifndef NCR_IOMAPPED
+__initfunc(
+static int ncr_regtest (struct ncb* np)
+)
+{
+ register volatile u_long data;
+ /*
+ ** ncr registers may NOT be cached.
+ ** write 0xffffffff to a read only register area,
+ ** and try to read it back.
+ */
+ data = 0xffffffff;
+ OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data);
+ data = INL_OFF(offsetof(struct ncr_reg, nc_dstat));
+#if 1
+ if (data == 0xffffffff) {
+#else
+ if ((data & 0xe2f0fffd) != 0x02000080) {
+#endif
+ printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
+ (unsigned) data);
+ return (0x10);
+ };
+ return (0);
+}
+#endif
+
+__initfunc(
+static int ncr_snooptest (struct ncb* np)
+)
+{
+ u_long ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc, err=0;
+ int i;
+#ifndef NCR_IOMAPPED
+ if (np->reg) {
+ err |= ncr_regtest (np);
+ if (err) return (err);
+ }
+#endif
+ /*
+ ** init
+ */
+ pc = NCB_SCRIPTH_PHYS (np, snooptest);
+ host_wr = 1;
+ ncr_wr = 2;
+ /*
+ ** Set memory and register.
+ */
+ np->ncr_cache = cpu_to_scr(host_wr);
+ OUTL (nc_temp, ncr_wr);
+ /*
+ ** Start script (exchange values)
+ */
+ OUTL (nc_dsp, pc);
+ /*
+ ** Wait 'til done (with timeout)
+ */
+ for (i=0; i<NCR_SNOOP_TIMEOUT; i++)
+ if (INB(nc_istat) & (INTF|SIP|DIP))
+ break;
+ /*
+ ** Save termination position.
+ */
+ pc = INL (nc_dsp);
+ /*
+ ** Read memory and register.
+ */
+ host_rd = scr_to_cpu(np->ncr_cache);
+ ncr_rd = INL (nc_scratcha);
+ ncr_bk = INL (nc_temp);
+ /*
+ ** Reset ncr chip
+ */
+ OUTB (nc_istat, SRST);
+ DELAY (1000);
+ OUTB (nc_istat, 0 );
+ /*
+ ** check for timeout
+ */
+ if (i>=NCR_SNOOP_TIMEOUT) {
+ printf ("CACHE TEST FAILED: timeout.\n");
+ return (0x20);
+ };
+ /*
+ ** Check termination position.
+ */
+ if (pc != NCB_SCRIPTH_PHYS (np, snoopend)+8) {
+ printf ("CACHE TEST FAILED: script execution failed.\n");
+ printf ("start=%08lx, pc=%08lx, end=%08lx\n",
+ (u_long) NCB_SCRIPTH_PHYS (np, snooptest), pc,
+ (u_long) NCB_SCRIPTH_PHYS (np, snoopend) +8);
+ return (0x40);
+ };
+ /*
+ ** Show results.
+ */
+ if (host_wr != ncr_rd) {
+ printf ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n",
+ (int) host_wr, (int) ncr_rd);
+ err |= 1;
+ };
+ if (host_rd != ncr_wr) {
+ printf ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n",
+ (int) ncr_wr, (int) host_rd);
+ err |= 2;
+ };
+ if (ncr_bk != ncr_wr) {
+ printf ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n",
+ (int) ncr_wr, (int) ncr_bk);
+ err |= 4;
+ };
+ return (err);
+}
+
+/*==========================================================
+**
+**
+** Profiling the drivers and targets performance.
+**
+**
+**==========================================================
+*/
+
+#ifdef SCSI_NCR_PROFILE_SUPPORT
+
+/*
+** Compute the difference in jiffies ticks.
+*/
+
+#define ncr_delta(from, to) \
+ ( ((to) && (from))? (to) - (from) : -1 )
+
+#define PROFILE cp->phys.header.stamp
+static void ncb_profile (ncb_p np, ccb_p cp)
+{
+ int co, st, en, di, se, post,work,disc;
+ u_long diff;
+
+ PROFILE.end = jiffies;
+
+ st = ncr_delta (PROFILE.start,PROFILE.status);
+ if (st<0) return; /* status not reached */
+
+ co = ncr_delta (PROFILE.start,PROFILE.command);
+ if (co<0) return; /* command not executed */
+
+ en = ncr_delta (PROFILE.start,PROFILE.end),
+ di = ncr_delta (PROFILE.start,PROFILE.disconnect),
+ se = ncr_delta (PROFILE.start,PROFILE.select);
+ post = en - st;
+
+ /*
+ ** @PROFILE@ Disconnect time invalid if multiple disconnects
+ */
+
+ if (di>=0) disc = se-di; else disc = 0;
+
+ work = (st - co) - disc;
+
+ diff = (np->disc_phys - np->disc_ref) & 0xff;
+ np->disc_ref += diff;
+
+ np->profile.num_trans += 1;
+ if (cp->cmd) {
+ np->profile.num_kbytes += (cp->cmd->request_bufflen >> 10);
+ np->profile.rest_bytes += (cp->cmd->request_bufflen & (0x400-1));
+ if (np->profile.rest_bytes >= 0x400) {
+ ++np->profile.num_kbytes;
+ np->profile.rest_bytes -= 0x400;
+ }
+ }
+ np->profile.num_disc += diff;
+ np->profile.ms_setup += co;
+ np->profile.ms_data += work;
+ np->profile.ms_disc += disc;
+ np->profile.ms_post += post;
+}
+#undef PROFILE
+
+#endif /* SCSI_NCR_PROFILE_SUPPORT */
+
+/*==========================================================
+**
+**
+** Device lookup.
+**
+** @GENSCSI@ should be integrated to scsiconf.c
+**
+**
+**==========================================================
+*/
+
+struct table_entry {
+ char * manufacturer;
+ char * model;
+ char * version;
+ u_long info;
+};
+
+static struct table_entry device_tab[] =
+{
+#ifdef NCR_GETCC_WITHMSG
+ {"", "", "", QUIRK_NOMSG},
+ {"SONY", "SDT-5000", "3.17", QUIRK_NOMSG},
+ {"WangDAT", "Model 2600", "01.7", QUIRK_NOMSG},
+ {"WangDAT", "Model 3200", "02.2", QUIRK_NOMSG},
+ {"WangDAT", "Model 1300", "02.4", QUIRK_NOMSG},
+#endif
+ {"", "", "", 0} /* catch all: must be last entry. */
+};
+
+static u_long ncr_lookup(char * id)
+{
+ struct table_entry * p = device_tab;
+ char *d, *r, c;
+
+ for (;;p++) {
+
+ d = id+8;
+ r = p->manufacturer;
+ while ((c=*r++)) if (c!=*d++) break;
+ if (c) continue;
+
+ d = id+16;
+ r = p->model;
+ while ((c=*r++)) if (c!=*d++) break;
+ if (c) continue;
+
+ d = id+32;
+ r = p->version;
+ while ((c=*r++)) if (c!=*d++) break;
+ if (c) continue;
+
+ return (p->info);
+ }
+}
+
+/*==========================================================
+**
+** Determine the ncr's clock frequency.
+** This is essential for the negotiation
+** of the synchronous transfer rate.
+**
+**==========================================================
+**
+** Note: we have to return the correct value.
+** THERE IS NO SAVE DEFAULT VALUE.
+**
+** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
+** 53C860 and 53C875 rev. 1 support fast20 transfers but
+** do not have a clock doubler and so are provided with a
+** 80 MHz clock. All other fast20 boards incorporate a doubler
+** and so should be delivered with a 40 MHz clock.
+** The future fast40 chips (895/895) use a 40 Mhz base clock
+** and provide a clock quadrupler (160 Mhz). The code below
+** tries to deal as cleverly as possible with all this stuff.
+**
+**----------------------------------------------------------
+*/
+
+/*
+ * Select NCR SCSI clock frequency
+ */
+static void ncr_selectclock(ncb_p np, u_char scntl3)
+{
+ if (np->multiplier < 2) {
+ OUTB(nc_scntl3, scntl3);
+ return;
+ }
+
+ if (bootverbose >= 2)
+ printf ("%s: enabling clock multiplier\n", ncr_name(np));
+
+ OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */
+ if (np->multiplier > 2) { /* Poll bit 5 of stest4 for quadrupler */
+ int i = 20;
+ while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
+ DELAY(20);
+ if (!i)
+ printf("%s: the chip cannot lock the frequency\n", ncr_name(np));
+ } else /* Wait 20 micro-seconds for doubler */
+ DELAY(20);
+ OUTB(nc_stest3, HSC); /* Halt the scsi clock */
+ OUTB(nc_scntl3, scntl3);
+ OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
+ OUTB(nc_stest3, 0x00|TE); /* Restart scsi clock */
+}
+
+
+/*
+ * calculate NCR SCSI clock frequency (in KHz)
+ */
+__initfunc(
+static unsigned ncrgetfreq (ncb_p np, int gen)
+)
+{
+ unsigned ms = 0;
+
+ /*
+ * Measure GEN timer delay in order
+ * to calculate SCSI clock frequency
+ *
+ * This code will never execute too
+ * many loop iterations (if DELAY is
+ * reasonably correct). It could get
+ * too low a delay (too high a freq.)
+ * if the CPU is slow executing the
+ * loop for some reason (an NMI, for
+ * example). For this reason we will
+ * if multiple measurements are to be
+ * performed trust the higher delay
+ * (lower frequency returned).
+ */
+ OUTB (nc_stest1, 0); /* make sure clock doubler is OFF */
+ OUTW (nc_sien , 0); /* mask all scsi interrupts */
+ (void) INW (nc_sist); /* clear pending scsi interrupt */
+ OUTB (nc_dien , 0); /* mask all dma interrupts */
+ (void) INW (nc_sist); /* another one, just to be sure :) */
+ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
+ while (!(INW(nc_sist) & GEN) && ms++ < 100000)
+ DELAY(1000); /* count ms */
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ /*
+ * set prescaler to divide by whatever 0 means
+ * 0 ought to choose divide by 2, but appears
+ * to set divide by 3.5 mode in my 53c810 ...
+ */
+ OUTB (nc_scntl3, 0);
+
+ if (bootverbose >= 2)
+ printf ("%s: Delay (GEN=%d): %u msec\n", ncr_name(np), gen, ms);
+ /*
+ * adjust for prescaler, and convert into KHz
+ */
+ return ms ? ((1 << gen) * 4340) / ms : 0;
+}
+
+/*
+ * Get/probe NCR SCSI clock frequency
+ */
+__initfunc(
+static void ncr_getclock (ncb_p np, int mult)
+)
+{
+ unsigned char scntl3 = INB(nc_scntl3);
+ unsigned char stest1 = INB(nc_stest1);
+ unsigned f1;
+
+ np->multiplier = 1;
+ f1 = 40000;
+
+ /*
+ ** True with 875 or 895 with clock multiplier selected
+ */
+ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
+ if (bootverbose >= 2)
+ printf ("%s: clock multiplier found\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+
+ /*
+ ** If multiplier not found or scntl3 not 7,5,3,
+ ** reset chip and get frequency from general purpose timer.
+ ** Otherwise trust scntl3 BIOS setting.
+ */
+ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
+ unsigned f2;
+
+ OUTB(nc_istat, SRST); DELAY(5); OUTB(nc_istat, 0);
+
+ (void) ncrgetfreq (np, 11); /* throw away first result */
+ f1 = ncrgetfreq (np, 11);
+ f2 = ncrgetfreq (np, 11);
+
+ if (bootverbose)
+ printf ("%s: NCR clock is %uKHz, %uKHz\n", ncr_name(np), f1, f2);
+
+ if (f1 > f2) f1 = f2; /* trust lower result */
+
+ if (f1 < 45000) f1 = 40000;
+ else if (f1 < 55000) f1 = 50000;
+ else f1 = 80000;
+
+ if (f1 < 80000 && mult > 1) {
+ if (bootverbose >= 2)
+ printf ("%s: clock multiplier assumed\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+ } else {
+ if ((scntl3 & 7) == 3) f1 = 40000;
+ else if ((scntl3 & 7) == 5) f1 = 80000;
+ else f1 = 160000;
+
+ f1 /= np->multiplier;
+ }
+
+ /*
+ ** Compute controller synchronous parameters.
+ */
+ f1 *= np->multiplier;
+ np->clock_khz = f1;
+}
+
+/*===================== LINUX ENTRY POINTS SECTION ==========================*/
+
+#ifndef uchar
+#define uchar unsigned char
+#endif
+
+#ifndef ushort
+#define ushort unsigned short
+#endif
+
+#ifndef ulong
+#define ulong unsigned long
+#endif
+
+/* ---------------------------------------------------------------------
+**
+** Driver setup from the boot command line
+**
+** ---------------------------------------------------------------------
+*/
+
+__initfunc(
+void ncr53c8xx_setup(char *str, int *ints)
+)
+{
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+ char *cur = str;
+ char *pc, *pv;
+ int val;
+ int base;
+ int c;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ val = 0;
+ pv = pc;
+ c = *++pv;
+ if (c == 'n')
+ val = 0;
+ else if (c == 'y')
+ val = 1;
+ else {
+ base = 0;
+#if 0
+ if (c == '0') {
+ c = *pv++;
+ base = 8;
+ }
+ if (c == 'x') {
+ ++pv;
+ base = 16;
+ }
+ else if (c >= '0' && c <= '9')
+ base = 10;
+ else
+ break;
+#endif
+ val = (int) simple_strtoul(pv, NULL, base);
+ }
+
+ if (!strncmp(cur, "mpar:", 5))
+ driver_setup.master_parity = val;
+ else if (!strncmp(cur, "spar:", 5))
+ driver_setup.scsi_parity = val;
+ else if (!strncmp(cur, "disc:", 5))
+ driver_setup.disconnection = val;
+ else if (!strncmp(cur, "specf:", 6))
+ driver_setup.special_features = val;
+ else if (!strncmp(cur, "ultra:", 6))
+ driver_setup.ultra_scsi = val;
+ else if (!strncmp(cur, "fsn:", 4))
+ driver_setup.force_sync_nego = val;
+ else if (!strncmp(cur, "revprob:", 8))
+ driver_setup.reverse_probe = val;
+ else if (!strncmp(cur, "tags:", 5)) {
+ if (val > SCSI_NCR_MAX_TAGS)
+ val = SCSI_NCR_MAX_TAGS;
+ driver_setup.default_tags = val;
+ }
+ else if (!strncmp(cur, "sync:", 5))
+ driver_setup.default_sync = val;
+ else if (!strncmp(cur, "verb:", 5))
+ driver_setup.verbose = val;
+ else if (!strncmp(cur, "debug:", 6))
+ driver_setup.debug = val;
+ else if (!strncmp(cur, "burst:", 6))
+ driver_setup.burst_max = val;
+ else if (!strncmp(cur, "led:", 4))
+ driver_setup.led_pin = val;
+ else if (!strncmp(cur, "wide:", 5))
+ driver_setup.max_wide = val? 1:0;
+ else if (!strncmp(cur, "settle:", 7))
+ driver_setup.settle_delay= val;
+ else if (!strncmp(cur, "diff:", 5))
+ driver_setup.diff_support= val;
+ else if (!strncmp(cur, "irqm:", 5))
+ driver_setup.irqm = val;
+ else if (!strncmp(cur, "pcifix:", 7))
+ driver_setup.pci_fix_up = val;
+ else if (!strncmp(cur, "buschk:", 7))
+ driver_setup.bus_check = val;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ else if (!strncmp(cur, "nvram:", 6))
+ driver_setup.use_nvram = val;
+#endif
+
+ else if (!strncmp(cur, "safe:", 5) && val)
+ memcpy(&driver_setup, &driver_safe_setup, sizeof(driver_setup));
+ else
+ printf("ncr53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
+
+#ifdef MODULE
+ if ((cur = strchr(cur, ' ')) != NULL)
+#else
+ if ((cur = strchr(cur, ',')) != NULL)
+#endif
+ ++cur;
+ }
+#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
+}
+
+static int ncr53c8xx_pci_init(Scsi_Host_Template *tpnt,
+ uchar bus, uchar device_fn, ncr_device *device);
+
+/*
+** Linux entry point for NCR53C8XX devices detection routine.
+**
+** Called by the middle-level scsi drivers at initialization time,
+** or at module installation.
+**
+** Read the PCI configuration and try to attach each
+** detected NCR board.
+**
+** If NVRAM is present, try to attach boards according to
+** the used defined boot order.
+**
+** Returns the number of boards successfully attached.
+*/
+
+__initfunc(
+static void ncr_print_driver_setup(void)
+)
+{
+#define YesNo(y) y ? 'y' : 'n'
+ printk("ncr53c8xx: setup=disc:%c,specf:%d,ultra:%c,tags:%d,sync:%d,burst:%d,wide:%c,diff:%d\n",
+ YesNo(driver_setup.disconnection),
+ driver_setup.special_features,
+ YesNo(driver_setup.ultra_scsi),
+ driver_setup.default_tags,
+ driver_setup.default_sync,
+ driver_setup.burst_max,
+ YesNo(driver_setup.max_wide),
+ driver_setup.diff_support);
+ printk("ncr53c8xx: setup=mpar:%c,spar:%c,fsn=%c,verb:%d,debug:0x%x,led:%c,settle:%d,irqm:%d\n",
+ YesNo(driver_setup.master_parity),
+ YesNo(driver_setup.scsi_parity),
+ YesNo(driver_setup.force_sync_nego),
+ driver_setup.verbose,
+ driver_setup.debug,
+ YesNo(driver_setup.led_pin),
+ driver_setup.settle_delay,
+ driver_setup.irqm);
+#undef YesNo
+}
+
+/*
+** NCR53C8XX devices description table and chip ids list.
+*/
+
+static ncr_chip ncr_chip_table[] __initdata = SCSI_NCR_CHIP_TABLE;
+static ushort ncr_chip_ids[] __initdata = SCSI_NCR_CHIP_IDS;
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+__initfunc(
+static int
+ncr_attach_using_nvram(Scsi_Host_Template *tpnt, int nvram_index, int count, ncr_device device[])
+)
+{
+ int i, j;
+ int attach_count = 0;
+ ncr_nvram *nvram;
+ ncr_device *devp;
+
+ if (!nvram_index)
+ return 0;
+
+ /* find first Symbios NVRAM if there is one as we need to check it for host boot order */
+ for (i = 0, nvram_index = -1; i < count; i++) {
+ devp = &device[i];
+ nvram = devp->nvram;
+ if (!nvram)
+ continue;
+ if (nvram->type == SCSI_NCR_SYMBIOS_NVRAM) {
+ if (nvram_index == -1)
+ nvram_index = i;
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ printf("ncr53c8xx: NVRAM: Symbios format Boot Block, 53c%s, PCI bus %d, device %d, function %d\n",
+ devp->chip.name, devp->slot.bus,
+ (int) (devp->slot.device_fn & 0xf8) >> 3,
+ (int) devp->slot.device_fn & 7);
+ for (j = 0 ; j < 4 ; j++) {
+ Symbios_host *h = &nvram->data.Symbios.host[j];
+ printf("ncr53c8xx: BOOT[%d] device_id=%04x vendor_id=%04x device_fn=%02x io_port=%04x %s\n",
+ j, h->device_id, h->vendor_id,
+ h->device_fn, h->io_port,
+ (h->flags & SYMBIOS_INIT_SCAN_AT_BOOT) ? "SCAN AT BOOT" : "");
+ }
+ }
+ else if (nvram->type == SCSI_NCR_TEKRAM_NVRAM) {
+ /* display Tekram nvram data */
+ printf("ncr53c8xx: NVRAM: Tekram format data, 53c%s, PCI bus %d, device %d, function %d\n",
+ devp->chip.name, devp->slot.bus,
+ (int) (devp->slot.device_fn & 0xf8) >> 3,
+ (int) devp->slot.device_fn & 7);
+#endif
+ }
+ }
+
+ if (nvram_index >= 0 && nvram_index < count)
+ nvram = device[nvram_index].nvram;
+ else
+ nvram = 0;
+
+ if (!nvram)
+ goto out;
+
+ /*
+ ** check devices in the boot record against devices detected.
+ ** attach devices if we find a match. boot table records that
+ ** do not match any detected devices will be ignored.
+ ** devices that do not match any boot table will not be attached
+ ** here but will attempt to be attached during the device table
+ ** rescan.
+ */
+ for (i = 0; i < 4; i++) {
+ Symbios_host *h = &nvram->data.Symbios.host[i];
+ for (j = 0 ; j < count ; j++) {
+ devp = &device[j];
+ if (h->device_fn == devp->slot.device_fn &&
+#if 0 /* bus number location in nvram ? */
+ h->bus == devp->slot.bus &&
+#endif
+ h->device_id == devp->chip.device_id)
+ break;
+ }
+ if (j < count && !devp->attach_done) {
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ devp->attach_done = 1;
+ }
+ }
+
+out:
+ return attach_count;
+}
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+__initfunc(
+int ncr53c8xx_detect(Scsi_Host_Template *tpnt)
+)
+{
+ int i, j;
+ int chips;
+ int count = 0;
+ uchar bus, device_fn;
+ short index;
+ int attach_count = 0;
+ ncr_device device[8];
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_nvram nvram[4];
+ int k, nvrams;
+#endif
+ int hosts;
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ int nvram_index = 0;
+#endif
+ if (initverbose >= 2)
+ ncr_print_driver_setup();
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = driver_setup.debug;
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+ tpnt->proc_dir = &proc_scsi_ncr53c8xx;
+# ifdef SCSI_NCR_PROC_INFO_SUPPORT
+ tpnt->proc_info = ncr53c8xx_proc_info;
+# endif
+#endif
+
+#if defined(SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT) && defined(MODULE)
+if (ncr53c8xx)
+ ncr53c8xx_setup(ncr53c8xx, (int *) 0);
+#endif
+
+ /*
+ ** Detect all 53c8xx hosts and then attach them.
+ **
+ ** If we are using NVRAM, once all hosts are detected, we need to check
+ ** any NVRAM for boot order in case detect and boot order differ and
+ ** attach them using the order in the NVRAM.
+ **
+ ** If no NVRAM is found or data appears invalid attach boards in the
+ ** the order they are detected.
+ */
+
+ if (!pcibios_present())
+ return 0;
+
+ chips = sizeof(ncr_chip_ids) / sizeof(ncr_chip_ids[0]);
+ hosts = sizeof(device) / sizeof(device[0]);
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ k = 0;
+ if (driver_setup.use_nvram & 0x1)
+ nvrams = sizeof(nvram) / sizeof(nvram[0]);
+ else
+ nvrams = 0;
+#endif
+
+ for (j = 0; j < chips ; ++j) {
+ i = driver_setup.reverse_probe ? chips-1 - j : j;
+ for (index = 0; ; index++) {
+ char *msg = "";
+ if ((pcibios_find_device(PCI_VENDOR_ID_NCR, ncr_chip_ids[i],
+ index, &bus, &device_fn)) ||
+ (count == hosts))
+ break;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ device[count].nvram = k < nvrams ? &nvram[k] : 0;
+#else
+ device[count].nvram = 0;
+#endif
+ if (ncr53c8xx_pci_init(tpnt, bus, device_fn, &device[count])) {
+ device[count].nvram = 0;
+ continue;
+ }
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (device[count].nvram) {
+ ++k;
+ nvram_index |= device[count].nvram->type;
+ switch (device[count].nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ msg = "with Tekram NVRAM";
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ msg = "with Symbios NVRAM";
+ break;
+ default:
+ msg = "";
+ device[count].nvram = 0;
+ --k;
+ }
+ }
+#endif
+ printf(KERN_INFO "ncr53c8xx: 53c%s detected %s\n",
+ device[count].chip.name, msg);
+ ++count;
+ }
+ }
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ attach_count = ncr_attach_using_nvram(tpnt, nvram_index, count, device);
+#endif
+ /*
+ ** rescan device list to make sure all boards attached.
+ ** devices without boot records will not be attached yet
+ ** so try to attach them here.
+ */
+ for (i= 0; i < count; i++) {
+ if (!device[i].attach_done &&
+ !ncr_attach (tpnt, attach_count, &device[i])) {
+ attach_count++;
+ }
+ }
+
+ return attach_count;
+}
+
+/*
+** Read and check the PCI configuration for any detected NCR
+** boards and save data for attaching after all boards have
+** been detected.
+*/
+
+__initfunc(
+static int ncr53c8xx_pci_init(Scsi_Host_Template *tpnt,
+ uchar bus, uchar device_fn, ncr_device *device)
+)
+{
+ ushort vendor_id, device_id, command;
+ uchar cache_line_size, latency_timer;
+ uchar irq, revision;
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+ uint base, base_2, io_port;
+#else
+ ulong base, base_2;
+#endif
+ int i;
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_nvram *nvram = device->nvram;
+#endif
+ ncr_chip *chip;
+
+ printk(KERN_INFO "ncr53c8xx: at PCI bus %d, device %d, function %d\n",
+ bus, (int) (device_fn & 0xf8) >> 3, (int) device_fn & 7);
+ /*
+ * Read info from the PCI config space.
+ * pcibios_read_config_xxx() functions are assumed to be used for
+ * successfully detected PCI devices.
+ * Expecting error conditions from them is just paranoia,
+ * thus void cast.
+ */
+ (void) pcibios_read_config_word(bus, device_fn,
+ PCI_VENDOR_ID, &vendor_id);
+ (void) pcibios_read_config_word(bus, device_fn,
+ PCI_DEVICE_ID, &device_id);
+ (void) pcibios_read_config_word(bus, device_fn,
+ PCI_COMMAND, &command);
+ (void) pcibios_read_config_dword(bus, device_fn,
+ PCI_BASE_ADDRESS_0, &io_port);
+ (void) pcibios_read_config_dword(bus, device_fn,
+ PCI_BASE_ADDRESS_1, &base);
+ (void) pcibios_read_config_dword(bus, device_fn,
+ PCI_BASE_ADDRESS_2, &base_2);
+ (void) pcibios_read_config_byte(bus, device_fn,
+ PCI_CLASS_REVISION,&revision);
+ (void) pcibios_read_config_byte(bus, device_fn,
+ PCI_INTERRUPT_LINE, &irq);
+ (void) pcibios_read_config_byte(bus, device_fn,
+ PCI_CACHE_LINE_SIZE, &cache_line_size);
+ (void) pcibios_read_config_byte(bus, device_fn,
+ PCI_LATENCY_TIMER, &latency_timer);
+
+ /*
+ * Check if the chip is supported
+ */
+ chip = 0;
+ for (i = 0; i < sizeof(ncr_chip_table)/sizeof(ncr_chip_table[0]); i++) {
+ if (device_id != ncr_chip_table[i].device_id)
+ continue;
+ if (revision > ncr_chip_table[i].revision_id)
+ continue;
+ chip = &device->chip;
+ memcpy(chip, &ncr_chip_table[i], sizeof(*chip));
+ chip->revision_id = revision;
+ break;
+ }
+ if (!chip) {
+ printk("ncr53c8xx: not initializing, device not supported\n");
+ return -1;
+ }
+
+#ifdef __powerpc__
+ /*
+ * Severall fix-up for power/pc.
+ * Should not be performed by the driver.
+ */
+ if ((command &
+ (PCI_COMMAND_MASTER|PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) !=
+ (PCI_COMMAND_MASTER|PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) {
+ printk("ncr53c8xx : setting PCI master/io/command bit\n");
+ command |= PCI_COMMAND_MASTER|PCI_COMMAND_IO|PCI_COMMAND_MEMORY;
+ pcibios_write_config_word(bus, device_fn, PCI_COMMAND, command);
+ }
+ if (io_port >= 0x10000000) {
+ io_port = (io_port & 0x00FFFFFF) | 0x01000000;
+ pcibios_write_config_dword(bus, device_fn, PCI_BASE_ADDRESS_0, io_port);
+ }
+ if (base >= 0x10000000) {
+ base = (base & 0x00FFFFFF) | 0x01000000;
+ pcibios_write_config_dword(bus, device_fn, PCI_BASE_ADDRESS_1, base);
+ }
+#endif
+
+ /*
+ * Check availability of IO space, memory space and master capability.
+ */
+ if (command & PCI_COMMAND_IO) {
+ if ((io_port & 3) != 1) {
+ printk("ncr53c8xx: disabling I/O mapping since base address 0 (0x%x)\n"
+ " bits 0..1 indicate a non-IO mapping\n", (int) io_port);
+ io_port = 0;
+ }
+ else
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ }
+ else
+ io_port = 0;
+
+ if (command & PCI_COMMAND_MEMORY) {
+ if ((base & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
+ printk("ncr53c8xx: disabling memory mapping since base address 1\n"
+ " contains a non-memory mapping\n");
+ base = 0;
+ }
+ else
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ }
+ else
+ base = 0;
+
+ if (!io_port && !base) {
+ printk("ncr53c8xx: not initializing, both I/O and memory mappings disabled\n");
+ return -1;
+ }
+
+ base_2 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ if (io_port && check_region (io_port, 128)) {
+ printk("ncr53c8xx: IO region 0x%x to 0x%x is in use\n",
+ (int) io_port, (int) (io_port + 127));
+ return -1;
+ }
+
+ if (!(command & PCI_COMMAND_MASTER)) {
+ printk("ncr53c8xx: not initializing, BUS MASTERING was disabled\n");
+ return -1;
+ }
+
+ /*
+ * Fix some features according to driver setup.
+ */
+ if (!(driver_setup.special_features & 1))
+ chip->features &= ~FE_SPECIAL_SET;
+ else {
+ if (driver_setup.special_features & 2)
+ chip->features &= ~FE_WRIE;
+ }
+ if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2)) {
+ chip->features |= FE_ULTRA;
+ chip->features &= ~FE_ULTRA2;
+ }
+ if (driver_setup.ultra_scsi < 1)
+ chip->features &= ~FE_ULTRA;
+ if (!driver_setup.max_wide)
+ chip->features &= ~FE_WIDE;
+
+
+#ifdef SCSI_NCR_PCI_FIX_UP_SUPPORT
+
+ /*
+ * Try to fix up PCI config according to wished features.
+ */
+#if defined(__i386) && !defined(MODULE)
+ if ((driver_setup.pci_fix_up & 1) &&
+ (chip->features & FE_CLSE) && cache_line_size == 0) {
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,75)
+ extern char x86;
+ switch(x86) {
+#else
+ switch(boot_cpu_data.x86) {
+#endif
+ case 4: cache_line_size = 4; break;
+ case 5: cache_line_size = 8; break;
+ }
+ if (cache_line_size)
+ (void) pcibios_write_config_byte(bus, device_fn,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ if (initverbose)
+ printk("ncr53c8xx: setting PCI_CACHE_LINE_SIZE to %d (fix-up).\n", cache_line_size);
+ }
+
+ if ((driver_setup.pci_fix_up & 2) && cache_line_size &&
+ (chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ command |= PCI_COMMAND_INVALIDATE;
+ (void) pcibios_write_config_word(bus, device_fn,
+ PCI_COMMAND, command);
+ if (initverbose)
+ printk("ncr53c8xx: setting PCI_COMMAND_INVALIDATE bit (fix-up).\n");
+ }
+#endif
+ /*
+ * Fix up for old chips that support READ LINE but not CACHE LINE SIZE.
+ * - If CACHE LINE SIZE is unknown, set burst max to 32 bytes = 8 dwords
+ * and donnot enable READ LINE.
+ * - Otherwise set it to the CACHE LINE SIZE (power of 2 assumed).
+ */
+
+ if (!(chip->features & FE_CLSE)) {
+ int burst_max = chip->burst_max;
+ if (cache_line_size == 0) {
+ chip->features &= ~FE_ERL;
+ if (burst_max > 3)
+ burst_max = 3;
+ }
+ else {
+ while (cache_line_size < (1 << burst_max))
+ --burst_max;
+ }
+ chip->burst_max = burst_max;
+ }
+
+ /*
+ * Tune PCI LATENCY TIMER according to burst max length transfer.
+ * (latency timer >= burst length + 6, we add 10 to be quite sure)
+ * If current value is zero, the device has probably been configured
+ * for no bursting due to some broken hardware.
+ */
+
+ if (latency_timer == 0 && chip->burst_max)
+ printk("ncr53c8xx: PCI_LATENCY_TIMER=0, bursting should'nt be allowed.\n");
+
+ if ((driver_setup.pci_fix_up & 4) && chip->burst_max) {
+ uchar lt = (1 << chip->burst_max) + 6 + 10;
+ if (latency_timer < lt) {
+ latency_timer = lt;
+ if (initverbose)
+ printk("ncr53c8xx: setting PCI_LATENCY_TIMER to %d bus clocks (fix-up).\n", latency_timer);
+ (void) pcibios_write_config_byte(bus, device_fn,
+ PCI_LATENCY_TIMER, latency_timer);
+ }
+ }
+
+ /*
+ * Fix up for recent chips that support CACHE LINE SIZE.
+ * If PCI config space is not OK, remove features that shall not be
+ * used by the chip. No need to trigger possible chip bugs.
+ */
+
+ if ((chip->features & FE_CLSE) && cache_line_size == 0) {
+ chip->features &= ~FE_CACHE_SET;
+ printk("ncr53c8xx: PCI_CACHE_LINE_SIZE not set, features based on CACHE LINE SIZE not used.\n");
+ }
+
+ if ((chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ chip->features &= ~FE_WRIE;
+ printk("ncr53c8xx: PCI_COMMAND_INVALIDATE not set, WRITE AND INVALIDATE not used\n");
+ }
+
+#endif /* SCSI_NCR_PCI_FIX_UP_SUPPORT */
+
+ /* initialise ncr_device structure with items required by ncr_attach */
+ device->slot.bus = bus;
+ device->slot.device_fn = device_fn;
+ device->slot.base = base;
+ device->slot.base_2 = base_2;
+ device->slot.io_port = io_port;
+ device->slot.irq = irq;
+ device->attach_done = 0;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (!nvram)
+ goto out;
+
+ /*
+ ** Get access to chip IO registers
+ */
+#ifdef NCR_IOMAPPED
+ request_region(io_port, 128, "ncr53c8xx");
+ device->slot.port = io_port;
+#else
+ device->slot.reg = (struct ncr_reg *) remap_pci_mem((ulong) base, 128);
+ if (!device->slot.reg)
+ goto out;
+#endif
+
+ /*
+ ** Try to read SYMBIOS nvram.
+ ** Data can be used to order booting of boards.
+ **
+ ** Data is saved in ncr_device structure if NVRAM found. This
+ ** is then used to find drive boot order for ncr_attach().
+ **
+ ** NVRAM data is passed to Scsi_Host_Template later during ncr_attach()
+ ** for any device set up.
+ **
+ ** Try to read TEKRAM nvram if Symbios nvram not found.
+ */
+
+ if (!ncr_get_Symbios_nvram(&device->slot, &nvram->data.Symbios))
+ nvram->type = SCSI_NCR_SYMBIOS_NVRAM;
+ else if (!ncr_get_Tekram_nvram(&device->slot, &nvram->data.Tekram))
+ nvram->type = SCSI_NCR_TEKRAM_NVRAM;
+ else
+ nvram->type = 0;
+out:
+ /*
+ ** Release access to chip IO registers
+ */
+#ifdef NCR_IOMAPPED
+ release_region(device->slot.port, 128);
+#else
+ unmap_pci_mem((vm_offset_t) device->slot.reg, (u_long) 128);
+#endif
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+ return 0;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,0,0)
+/*
+** Linux select queue depths function
+*/
+static void ncr53c8xx_select_queue_depths(struct Scsi_Host *host, struct scsi_device *devlist)
+{
+ struct scsi_device *device;
+
+ for (device = devlist; device; device = device->next) {
+ if (device->host == host) {
+#if SCSI_NCR_MAX_TAGS > 1
+ if (device->tagged_supported) {
+ device->queue_depth = SCSI_NCR_MAX_TAGS;
+ }
+ else {
+ device->queue_depth = 2;
+ }
+#else
+ device->queue_depth = 1;
+#endif
+
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx_select_queue_depth: id=%d, lun=%d, queue_depth=%d\n",
+ device->id, device->lun, device->queue_depth);
+#endif
+ }
+ }
+}
+#endif
+
+/*
+** Linux entry point of queuecommand() function
+*/
+
+int ncr53c8xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
+{
+ int sts;
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx_queue_command\n");
+#endif
+
+ if ((sts = ncr_queue_command(cmd, done)) != DID_OK) {
+ cmd->result = ScsiResult(sts, 0);
+ done(cmd);
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx : command not queued - result=%d\n", sts);
+#endif
+ return sts;
+ }
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx : command successfully queued\n");
+#endif
+ return sts;
+}
+
+/*
+** Linux entry point of the interrupt handler.
+** Fort linux versions > 1.3.70, we trust the kernel for
+** passing the internal host descriptor as 'dev_id'.
+** Otherwise, we scan the host list and call the interrupt
+** routine for each host that uses this IRQ.
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,70)
+static void ncr53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs)
+{
+#ifdef DEBUG_NCR53C8XX
+ printk("ncr53c8xx : interrupt received\n");
+#endif
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("[");
+ ncr_exception((ncb_p) dev_id);
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("]\n");
+}
+
+#else
+static void ncr53c8xx_intr(int irq, struct pt_regs * regs)
+{
+ struct Scsi_Host *host;
+ struct host_data *host_data;
+
+ for (host = first_host; host; host = host->next) {
+ if (host->hostt == the_template && host->irq == irq) {
+ host_data = (struct host_data *) host->hostdata;
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("[");
+ ncr_exception(host_data->ncb);
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("]\n");
+ }
+ }
+}
+#endif
+
+/*
+** Linux entry point of the timer handler
+*/
+
+static void ncr53c8xx_timeout(unsigned long np)
+{
+ ncr_timeout((ncb_p) np);
+}
+
+/*
+** Linux entry point of reset() function
+*/
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+
+int ncr53c8xx_reset(Scsi_Cmnd *cmd, unsigned int reset_flags)
+{
+ int sts;
+ unsigned long flags;
+
+ printk("ncr53c8xx_reset: pid=%lu reset_flags=%x serial_number=%ld serial_number_at_timeout=%ld\n",
+ cmd->pid, reset_flags, cmd->serial_number, cmd->serial_number_at_timeout);
+
+ save_flags(flags); cli();
+
+ /*
+ * We have to just ignore reset requests in some situations.
+ */
+#if defined SCSI_RESET_NOT_RUNNING
+ if (cmd->serial_number != cmd->serial_number_at_timeout) {
+ sts = SCSI_RESET_NOT_RUNNING;
+ goto out;
+ }
+#endif
+ /*
+ * If the mid-level driver told us reset is synchronous, it seems
+ * that we must call the done() callback for the involved command,
+ * even if this command was not queued to the low-level driver,
+ * before returning SCSI_RESET_SUCCESS.
+ */
+
+ sts = ncr_reset_bus(cmd,
+ (reset_flags & (SCSI_RESET_SYNCHRONOUS | SCSI_RESET_ASYNCHRONOUS)) == SCSI_RESET_SYNCHRONOUS);
+ /*
+ * Since we always reset the controller, when we return success,
+ * we add this information to the return code.
+ */
+#if defined SCSI_RESET_HOST_RESET
+ if (sts == SCSI_RESET_SUCCESS)
+ sts |= SCSI_RESET_HOST_RESET;
+#endif
+
+out:
+ restore_flags(flags);
+ return sts;
+}
+#else
+int ncr53c8xx_reset(Scsi_Cmnd *cmd)
+{
+ printk("ncr53c8xx_reset: command pid %lu\n", cmd->pid);
+ return ncr_reset_bus(cmd, 1);
+}
+#endif
+
+/*
+** Linux entry point of abort() function
+*/
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+
+int ncr53c8xx_abort(Scsi_Cmnd *cmd)
+{
+ int sts;
+ unsigned long flags;
+
+ printk("ncr53c8xx_abort: pid=%lu serial_number=%ld serial_number_at_timeout=%ld\n",
+ cmd->pid, cmd->serial_number, cmd->serial_number_at_timeout);
+
+ save_flags(flags); cli();
+
+ /*
+ * We have to just ignore abort requests in some situations.
+ */
+ if (cmd->serial_number != cmd->serial_number_at_timeout) {
+ sts = SCSI_ABORT_NOT_RUNNING;
+ goto out;
+ }
+
+ sts = ncr_abort_command(cmd);
+out:
+ restore_flags(flags);
+ return sts;
+}
+#else
+int ncr53c8xx_abort(Scsi_Cmnd *cmd)
+{
+ printk("ncr53c8xx_abort: command pid %lu\n", cmd->pid);
+ return ncr_abort_command(cmd);
+}
+#endif
+
+#ifdef MODULE
+int ncr53c8xx_release(struct Scsi_Host *host)
+{
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx : release\n");
+#endif
+ ncr_detach(((struct host_data *) host->hostdata)->ncb);
+
+ return 1;
+}
+#endif
+
+
+/*
+** Scsi command waiting list management.
+**
+** It may happen that we cannot insert a scsi command into the start queue,
+** in the following circumstances.
+** Too few preallocated ccb(s),
+** maxtags < cmd_per_lun of the Linux host control block,
+** etc...
+** Such scsi commands are inserted into a waiting list.
+** When a scsi command complete, we try to requeue the commands of the
+** waiting list.
+*/
+
+#define next_wcmd host_scribble
+
+static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd)
+{
+ Scsi_Cmnd *wcmd;
+
+#ifdef DEBUG_WAITING_LIST
+ printf("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ cmd->next_wcmd = 0;
+ if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
+ else {
+ while ((wcmd->next_wcmd) != 0)
+ wcmd = (Scsi_Cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = (char *) cmd;
+ }
+}
+
+static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd)
+{
+ Scsi_Cmnd *wcmd;
+
+ if (!(wcmd = np->waiting_list)) return 0;
+ while (wcmd->next_wcmd) {
+ if (cmd == (Scsi_Cmnd *) wcmd->next_wcmd) {
+ if (to_remove) {
+ wcmd->next_wcmd = cmd->next_wcmd;
+ cmd->next_wcmd = 0;
+ }
+#ifdef DEBUG_WAITING_LIST
+ printf("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ return cmd;
+ }
+ }
+ return 0;
+}
+
+static void process_waiting_list(ncb_p np, int sts)
+{
+ Scsi_Cmnd *waiting_list, *wcmd;
+
+ waiting_list = np->waiting_list;
+ np->waiting_list = 0;
+
+#ifdef DEBUG_WAITING_LIST
+ if (waiting_list) printf("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
+#endif
+ while ((wcmd = waiting_list) != 0) {
+ waiting_list = (Scsi_Cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = 0;
+ if (sts == DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printf("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd);
+#endif
+ sts = ncr_queue_command(wcmd, wcmd->scsi_done);
+ }
+ if (sts != DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printf("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
+#endif
+ wcmd->result = ScsiResult(sts, 0);
+ wcmd->scsi_done(wcmd);
+ }
+ }
+}
+
+#undef next_wcmd
+
+/*
+** Returns data transfer direction for common op-codes.
+*/
+
+static int guess_xfer_direction(int opcode)
+{
+ int d;
+
+ switch(opcode) {
+ case 0x12: /* INQUIRY 12 */
+ case 0x4D: /* LOG SENSE 4D */
+ case 0x5A: /* MODE SENSE(10) 5A */
+ case 0x1A: /* MODE SENSE(6) 1A */
+ case 0x3C: /* READ BUFFER 3C */
+ case 0x1C: /* RECEIVE DIAGNOSTIC RESULTS 1C */
+ case 0x03: /* REQUEST SENSE 03 */
+ d = XferIn;
+ break;
+ case 0x39: /* COMPARE 39 */
+ case 0x3A: /* COPY AND VERIFY 3A */
+ case 0x18: /* COPY 18 */
+ case 0x4C: /* LOG SELECT 4C */
+ case 0x55: /* MODE SELECT(10) 55 */
+ case 0x3B: /* WRITE BUFFER 3B */
+ case 0x1D: /* SEND DIAGNOSTIC 1D */
+ case 0x40: /* CHANGE DEFINITION 40 */
+ case 0x15: /* MODE SELECT(6) 15 */
+ d = XferOut;
+ break;
+ case 0x00: /* TEST UNIT READY 00 */
+ d = XferNone;
+ break;
+ default:
+ d = XferBoth;
+ break;
+ }
+
+ return d;
+}
+
+
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+
+/*=========================================================================
+** Proc file system stuff
+**
+** A read operation returns profile information.
+** A write operation is a control command.
+** The string is parsed in the driver code and the command is passed
+** to the ncr_usercmd() function.
+**=========================================================================
+*/
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+#define digit_to_bin(c) ((c) - '0')
+#define is_space(c) ((c) == ' ' || (c) == '\t')
+
+static int skip_spaces(char *ptr, int len)
+{
+ int cnt, c;
+
+ for (cnt = len; cnt > 0 && (c = *ptr++) && is_space(c); cnt--);
+
+ return (len - cnt);
+}
+
+static int get_int_arg(char *ptr, int len, u_long *pv)
+{
+ int cnt, c;
+ u_long v;
+
+ for (v = 0, cnt = len; cnt > 0 && (c = *ptr++) && is_digit(c); cnt--) {
+ v = (v * 10) + digit_to_bin(c);
+ }
+
+ if (pv)
+ *pv = v;
+
+ return (len - cnt);
+}
+
+static int is_keyword(char *ptr, int len, char *verb)
+{
+ int verb_len = strlen(verb);
+
+ if (len >= strlen(verb) && !memcmp(verb, ptr, verb_len))
+ return verb_len;
+ else
+ return 0;
+
+}
+
+#define SKIP_SPACES(min_spaces) \
+ if ((arg_len = skip_spaces(ptr, len)) < (min_spaces)) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+#define GET_INT_ARG(v) \
+ if (!(arg_len = get_int_arg(ptr, len, &(v)))) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+
+/*
+** Parse a control command
+*/
+
+static int ncr_user_command(ncb_p np, char *buffer, int length)
+{
+ char *ptr = buffer;
+ int len = length;
+ struct usrcmd *uc = &np->user;
+ int arg_len;
+ u_long target;
+
+ bzero(uc, sizeof(*uc));
+
+ if (len > 0 && ptr[len-1] == '\n')
+ --len;
+
+ if ((arg_len = is_keyword(ptr, len, "setsync")) != 0)
+ uc->cmd = UC_SETSYNC;
+ else if ((arg_len = is_keyword(ptr, len, "settags")) != 0)
+ uc->cmd = UC_SETTAGS;
+ else if ((arg_len = is_keyword(ptr, len, "setorder")) != 0)
+ uc->cmd = UC_SETORDER;
+ else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0)
+ uc->cmd = UC_SETWIDE;
+ else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
+ uc->cmd = UC_SETDEBUG;
+ else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0)
+ uc->cmd = UC_SETFLAG;
+ else if ((arg_len = is_keyword(ptr, len, "clearprof")) != 0)
+ uc->cmd = UC_CLEARPROF;
+#ifdef UC_DEBUG_ERROR_RECOVERY
+ else if ((arg_len = is_keyword(ptr, len, "debug_error_recovery")) != 0)
+ uc->cmd = UC_DEBUG_ERROR_RECOVERY;
+#endif
+ else
+ arg_len = 0;
+
+#ifdef DEBUG_PROC_INFO
+printf("ncr_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
+#endif
+
+ if (!arg_len)
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+
+ switch(uc->cmd) {
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ case UC_SETFLAG:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
+ ptr += arg_len; len -= arg_len;
+ uc->target = ~0;
+ } else {
+ GET_INT_ARG(target);
+ uc->target = (1<<target);
+#ifdef DEBUG_PROC_INFO
+printf("ncr_user_command: target=%ld\n", target);
+#endif
+ }
+ break;
+ }
+
+ switch(uc->cmd) {
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ SKIP_SPACES(1);
+ GET_INT_ARG(uc->data);
+#ifdef DEBUG_PROC_INFO
+printf("ncr_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+ case UC_SETORDER:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "simple")))
+ uc->data = M_SIMPLE_TAG;
+ else if ((arg_len = is_keyword(ptr, len, "ordered")))
+ uc->data = M_ORDERED_TAG;
+ else if ((arg_len = is_keyword(ptr, len, "default")))
+ uc->data = 0;
+ else
+ return -EINVAL;
+ break;
+ case UC_SETDEBUG:
+ while (len > 0) {
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "alloc")))
+ uc->data |= DEBUG_ALLOC;
+ else if ((arg_len = is_keyword(ptr, len, "phase")))
+ uc->data |= DEBUG_PHASE;
+ else if ((arg_len = is_keyword(ptr, len, "poll")))
+ uc->data |= DEBUG_POLL;
+ else if ((arg_len = is_keyword(ptr, len, "queue")))
+ uc->data |= DEBUG_QUEUE;
+ else if ((arg_len = is_keyword(ptr, len, "result")))
+ uc->data |= DEBUG_RESULT;
+ else if ((arg_len = is_keyword(ptr, len, "scatter")))
+ uc->data |= DEBUG_SCATTER;
+ else if ((arg_len = is_keyword(ptr, len, "script")))
+ uc->data |= DEBUG_SCRIPT;
+ else if ((arg_len = is_keyword(ptr, len, "tiny")))
+ uc->data |= DEBUG_TINY;
+ else if ((arg_len = is_keyword(ptr, len, "timing")))
+ uc->data |= DEBUG_TIMING;
+ else if ((arg_len = is_keyword(ptr, len, "nego")))
+ uc->data |= DEBUG_NEGO;
+ else if ((arg_len = is_keyword(ptr, len, "tags")))
+ uc->data |= DEBUG_TAGS;
+ else if ((arg_len = is_keyword(ptr, len, "freeze")))
+ uc->data |= DEBUG_FREEZE;
+ else if ((arg_len = is_keyword(ptr, len, "restart")))
+ uc->data |= DEBUG_RESTART;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+#ifdef DEBUG_PROC_INFO
+printf("ncr_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+ case UC_SETFLAG:
+ while (len > 0) {
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "trace")))
+ uc->data |= UF_TRACE;
+ else if ((arg_len = is_keyword(ptr, len, "no_disc")))
+ uc->data |= UF_NODISC;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+ break;
+#ifdef UC_DEBUG_ERROR_RECOVERY
+ case UC_DEBUG_ERROR_RECOVERY:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "sge")))
+ uc->data = 1;
+ else if ((arg_len = is_keyword(ptr, len, "abort")))
+ uc->data = 2;
+ else if ((arg_len = is_keyword(ptr, len, "reset")))
+ uc->data = 3;
+ else if ((arg_len = is_keyword(ptr, len, "parity")))
+ uc->data = 4;
+ else if ((arg_len = is_keyword(ptr, len, "none")))
+ uc->data = 0;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ if (len)
+ return -EINVAL;
+ else {
+ long flags;
+
+ save_flags(flags); cli();
+ ncr_usercmd (np);
+ restore_flags(flags);
+ }
+ return length;
+}
+
+#endif /* SCSI_NCR_USER_COMMAND_SUPPORT */
+
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+
+struct info_str
+{
+ char *buffer;
+ int length;
+ int offset;
+ int pos;
+};
+
+static void copy_mem_info(struct info_str *info, char *data, int len)
+{
+ if (info->pos + len > info->length)
+ len = info->length - info->pos;
+
+ if (info->pos + len < info->offset) {
+ info->pos += len;
+ return;
+ }
+ if (info->pos < info->offset) {
+ data += (info->offset - info->pos);
+ len -= (info->offset - info->pos);
+ }
+
+ if (len > 0) {
+ memcpy(info->buffer + info->pos, data, len);
+ info->pos += len;
+ }
+}
+
+static int copy_info(struct info_str *info, char *fmt, ...)
+{
+ va_list args;
+ char buf[81];
+ int len;
+
+ va_start(args, fmt);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+
+ copy_mem_info(info, buf, len);
+ return len;
+}
+
+/*
+** Copy formatted profile information into the input buffer.
+*/
+
+#define to_ms(t) ((t) * 1000 / HZ)
+
+static int ncr_host_info(ncb_p np, char *ptr, off_t offset, int len)
+{
+ struct info_str info;
+
+ info.buffer = ptr;
+ info.length = len;
+ info.offset = offset;
+ info.pos = 0;
+
+ copy_info(&info, "General information:\n");
+ copy_info(&info, " Chip NCR53C%s, ", np->chip_name);
+ copy_info(&info, "device id 0x%x, ", np->device_id);
+ copy_info(&info, "revision id 0x%x\n", np->revision_id);
+
+ copy_info(&info, " IO port address 0x%lx, ", (u_long) np->port);
+ copy_info(&info, "IRQ number %d\n", (int) np->irq);
+
+#ifndef NCR_IOMAPPED
+ if (np->reg)
+ copy_info(&info, " Using memory mapped IO at virtual address 0x%lx\n",
+ (u_long) np->reg);
+#endif
+ copy_info(&info, " Synchronous period factor %d, ", (int) np->minsync);
+ copy_info(&info, "max commands per lun %d\n", SCSI_NCR_MAX_TAGS);
+
+ if (driver_setup.debug || driver_setup.verbose > 1) {
+ copy_info(&info, " Debug flags 0x%x, ", driver_setup.debug);
+ copy_info(&info, "verbosity level %d\n", driver_setup.verbose);
+ }
+
+#ifdef SCSI_NCR_PROFILE_SUPPORT
+ copy_info(&info, "Profiling information:\n");
+ copy_info(&info, " %-12s = %lu\n", "num_trans",np->profile.num_trans);
+ copy_info(&info, " %-12s = %lu\n", "num_kbytes",np->profile.num_kbytes);
+ copy_info(&info, " %-12s = %lu\n", "num_disc", np->profile.num_disc);
+ copy_info(&info, " %-12s = %lu\n", "num_break",np->profile.num_break);
+ copy_info(&info, " %-12s = %lu\n", "num_int", np->profile.num_int);
+ copy_info(&info, " %-12s = %lu\n", "num_fly", np->profile.num_fly);
+ copy_info(&info, " %-12s = %lu\n", "ms_setup", to_ms(np->profile.ms_setup));
+ copy_info(&info, " %-12s = %lu\n", "ms_data", to_ms(np->profile.ms_data));
+ copy_info(&info, " %-12s = %lu\n", "ms_disc", to_ms(np->profile.ms_disc));
+ copy_info(&info, " %-12s = %lu\n", "ms_post", to_ms(np->profile.ms_post));
+#endif
+
+ return info.pos > info.offset? info.pos - info.offset : 0;
+}
+
+#endif /* SCSI_NCR_USER_INFO_SUPPORT */
+
+/*
+** Entry point of the scsi proc fs of the driver.
+** - func = 0 means read (returns profile data)
+** - func = 1 means write (parse user control command)
+*/
+
+int ncr53c8xx_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int func)
+{
+ struct Scsi_Host *host;
+ struct host_data *host_data;
+ ncb_p ncb = 0;
+ int retv;
+
+#ifdef DEBUG_PROC_INFO
+printf("ncr53c8xx_proc_info: hostno=%d, func=%d\n", hostno, func);
+#endif
+
+ for (host = first_host; host; host = host->next) {
+ if (host->hostt == the_template && host->host_no == hostno) {
+ host_data = (struct host_data *) host->hostdata;
+ ncb = host_data->ncb;
+ break;
+ }
+ }
+
+ if (!ncb)
+ return -EINVAL;
+
+ if (func) {
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+ retv = ncr_user_command(ncb, buffer, length);
+#else
+ retv = -EINVAL;
+#endif
+ }
+ else {
+ if (start)
+ *start = buffer;
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+ retv = ncr_host_info(ncb, buffer, offset, length);
+#else
+ retv = -EINVAL;
+#endif
+ }
+
+ return retv;
+}
+
+
+/*=========================================================================
+** End of proc file system stuff
+**=========================================================================
+*/
+#endif
+
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+
+/* ---------------------------------------------------------------------
+**
+** Try reading Symbios format nvram
+**
+** ---------------------------------------------------------------------
+**
+** GPOI0 - data in/data out
+** GPIO1 - clock
+**
+** return 0 if NVRAM data OK, 1 if NVRAM data not OK
+** ---------------------------------------------------------------------
+*/
+
+#define SET_BIT 0
+#define CLR_BIT 1
+#define SET_CLK 2
+#define CLR_CLK 3
+
+static u_short nvram_read_data(ncr_slot *np, u_char *data, int len, u_char *gpreg, u_char *gpcntl);
+static void nvram_start(ncr_slot *np, u_char *gpreg);
+static void nvram_write_byte(ncr_slot *np, u_char *ack_data, u_char write_data, u_char *gpreg, u_char *gpcntl);
+static void nvram_read_byte(ncr_slot *np, u_char *read_data, u_char ack_data, u_char *gpreg, u_char *gpcntl);
+static void nvram_readAck(ncr_slot *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl);
+static void nvram_writeAck(ncr_slot *np, u_char write_bit, u_char *gpreg, u_char *gpcntl);
+static void nvram_doBit(ncr_slot *np, u_char *read_bit, u_char write_bit, u_char *gpreg);
+static void nvram_stop(ncr_slot *np, u_char *gpreg);
+static void nvram_setBit(ncr_slot *np, u_char write_bit, u_char *gpreg, int bit_mode);
+
+__initfunc(
+static int ncr_get_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram)
+)
+{
+ static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_short csum;
+ u_char ack_data;
+ int retv = 1;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+ gpcntl = old_gpcntl & 0xfc;
+
+ /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+ OUTB (nc_gpreg, old_gpreg);
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* this is to set NVRAM into a known state with GPIO0/1 both low */
+ gpreg = old_gpreg;
+ nvram_setBit(np, 0, &gpreg, CLR_CLK);
+ nvram_setBit(np, 0, &gpreg, CLR_BIT);
+
+ /* now set NVRAM inactive with GPIO0/1 both high */
+ nvram_stop(np, &gpreg);
+
+ /* activate NVRAM */
+ nvram_start(np, &gpreg);
+
+ /* write device code and random address MSB */
+ nvram_write_byte(np, &ack_data,
+ 0xa0 | ((SYMBIOS_NVRAM_ADDRESS >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* write random address LSB */
+ nvram_write_byte(np, &ack_data,
+ (SYMBIOS_NVRAM_ADDRESS & 0x7f) << 1, &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* regenerate START state to set up for reading */
+ nvram_start(np, &gpreg);
+
+ /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
+ nvram_write_byte(np, &ack_data,
+ 0xa1 | ((SYMBIOS_NVRAM_ADDRESS >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* now set up GPIO0 for inputting data */
+ gpcntl |= 0x01;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all active data - only part of total NVRAM */
+ csum = nvram_read_data(np,
+ (u_char *) nvram, sizeof(*nvram), &gpreg, &gpcntl);
+
+ /* finally put NVRAM back in inactive mode */
+ gpcntl &= 0xfe;
+ OUTB (nc_gpcntl, gpcntl);
+ nvram_stop(np, &gpreg);
+
+#ifdef SCSI_NCR_DEBUG_NVRAM
+printf("ncr53c8xx: NvRAM marker=%x trailer=%x %x %x %x %x %x byte_count=%d/%d checksum=%x/%x\n",
+ nvram->start_marker,
+ nvram->trailer[0], nvram->trailer[1], nvram->trailer[2],
+ nvram->trailer[3], nvram->trailer[4], nvram->trailer[5],
+ nvram->byte_count, sizeof(*nvram) - 12,
+ nvram->checksum, csum);
+#endif
+
+ /* check valid NVRAM signature, verify byte count and checksum */
+ if (nvram->start_marker == 0 &&
+ !memcmp(nvram->trailer, Symbios_trailer, 6) &&
+ nvram->byte_count == sizeof(*nvram) - 12 &&
+ csum == nvram->checksum)
+ retv = 0;
+out:
+ /* return GPIO0/1 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+/*
+ * Read Symbios NvRAM data and compute checksum.
+ */
+__initfunc(
+static u_short nvram_read_data(ncr_slot *np, u_char *data, int len, u_char *gpreg, u_char *gpcntl)
+)
+{
+ int x;
+ u_short csum;
+
+ for (x = 0; x < len; x++)
+ nvram_read_byte(np, &data[x], (x == (len - 1)), gpreg, gpcntl);
+
+ for (x = 6, csum = 0; x < len - 6; x++)
+ csum += data[x];
+
+ return csum;
+}
+
+/*
+ * Send START condition to NVRAM to wake it up.
+ */
+__initfunc(
+static void nvram_start(ncr_slot *np, u_char *gpreg)
+)
+{
+ nvram_setBit(np, 1, gpreg, SET_BIT);
+ nvram_setBit(np, 0, gpreg, SET_CLK);
+ nvram_setBit(np, 0, gpreg, CLR_BIT);
+ nvram_setBit(np, 0, gpreg, CLR_CLK);
+}
+
+/*
+ * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
+ * GPIO0 must already be set as an output
+ */
+__initfunc(
+static void nvram_write_byte(ncr_slot *np, u_char *ack_data, u_char write_data, u_char *gpreg, u_char *gpcntl)
+)
+{
+ int x;
+
+ for (x = 0; x < 8; x++)
+ nvram_doBit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
+
+ nvram_readAck(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * READ a byte from the NVRAM and then send an ACK to say we have got it,
+ * GPIO0 must already be set as an input
+ */
+__initfunc(
+static void nvram_read_byte(ncr_slot *np, u_char *read_data, u_char ack_data, u_char *gpreg, u_char *gpcntl)
+)
+{
+ int x;
+ u_char read_bit;
+
+ *read_data = 0;
+ for (x = 0; x < 8; x++) {
+ nvram_doBit(np, &read_bit, 1, gpreg);
+ *read_data |= ((read_bit & 0x01) << (7 - x));
+ }
+
+ nvram_writeAck(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * Output an ACK to the NVRAM after reading,
+ * change GPIO0 to output and when done back to an input
+ */
+__initfunc(
+static void nvram_writeAck(ncr_slot *np, u_char write_bit, u_char *gpreg, u_char *gpcntl)
+)
+{
+ OUTB (nc_gpcntl, *gpcntl & 0xfe);
+ nvram_doBit(np, 0, write_bit, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Input an ACK from NVRAM after writing,
+ * change GPIO0 to input and when done back to an output
+ */
+__initfunc(
+static void nvram_readAck(ncr_slot *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl)
+)
+{
+ OUTB (nc_gpcntl, *gpcntl | 0x01);
+ nvram_doBit(np, read_bit, 1, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Read or write a bit to the NVRAM,
+ * read if GPIO0 input else write if GPIO0 output
+ */
+__initfunc(
+static void nvram_doBit(ncr_slot *np, u_char *read_bit, u_char write_bit, u_char *gpreg)
+)
+{
+ nvram_setBit(np, write_bit, gpreg, SET_BIT);
+ nvram_setBit(np, 0, gpreg, SET_CLK);
+ if (read_bit)
+ *read_bit = INB (nc_gpreg);
+ nvram_setBit(np, 0, gpreg, CLR_CLK);
+ nvram_setBit(np, 0, gpreg, CLR_BIT);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
+ */
+__initfunc(
+static void nvram_stop(ncr_slot *np, u_char *gpreg)
+)
+{
+ nvram_setBit(np, 0, gpreg, SET_CLK);
+ nvram_setBit(np, 1, gpreg, SET_BIT);
+}
+
+/*
+ * Set/clear data/clock bit in GPIO0
+ */
+__initfunc(
+static void nvram_setBit(ncr_slot *np, u_char write_bit, u_char *gpreg, int bit_mode)
+)
+{
+ DELAY(5);
+ switch (bit_mode){
+ case SET_BIT:
+ *gpreg |= write_bit;
+ break;
+ case CLR_BIT:
+ *gpreg &= 0xfe;
+ break;
+ case SET_CLK:
+ *gpreg |= 0x02;
+ break;
+ case CLR_CLK:
+ *gpreg &= 0xfd;
+ break;
+
+ }
+ OUTB (nc_gpreg, *gpreg);
+ DELAY(5);
+}
+
+#undef SET_BIT 0
+#undef CLR_BIT 1
+#undef SET_CLK 2
+#undef CLR_CLK 3
+
+
+/* ---------------------------------------------------------------------
+**
+** Try reading Tekram format nvram
+**
+** ---------------------------------------------------------------------
+**
+** GPOI0 - data in
+** GPIO1 - data out
+** GPIO2 - clock
+** GPIO4 - chip select
+**
+** return 0 if NVRAM data OK, 1 if NVRAM data not OK
+** ---------------------------------------------------------------------
+*/
+
+static u_short Tnvram_read_data(ncr_slot *np, u_short *data, int len, u_char *gpreg);
+static void Tnvram_Send_Command(ncr_slot *np, u_short write_data, u_char *read_bit, u_char *gpreg);
+static void Tnvram_Read_Word(ncr_slot *np, u_short *nvram_data, u_char *gpreg);
+static void Tnvram_Read_Bit(ncr_slot *np, u_char *read_bit, u_char *gpreg);
+static void Tnvram_Write_Bit(ncr_slot *np, u_char write_bit, u_char *gpreg);
+static void Tnvram_Stop(ncr_slot *np, u_char *gpreg);
+static void Tnvram_Clk(ncr_slot *np, u_char *gpreg);
+
+__initfunc(
+static int ncr_get_Tekram_nvram (ncr_slot *np, Tekram_nvram *nvram)
+)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_short csum;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+
+ /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
+ 1/2/4 out */
+ gpreg = old_gpreg & 0xe9;
+ OUTB (nc_gpreg, gpreg);
+ gpcntl = (old_gpcntl & 0xe9) | 0x09;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all of NVRAM, 64 words */
+ csum = Tnvram_read_data(np, (u_short *) nvram,
+ sizeof(*nvram) / sizeof(short), &gpreg);
+
+ /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ /* check data valid */
+ if (csum != 0x1234)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Read Tekram NvRAM data and compute checksum.
+ */
+__initfunc(
+static u_short Tnvram_read_data(ncr_slot *np, u_short *data, int len, u_char *gpreg)
+)
+{
+ u_char read_bit;
+ u_short csum;
+ int x;
+
+ for (x = 0, csum = 0; x < len; x++) {
+
+ /* output read command and address */
+ Tnvram_Send_Command(np, 0x180 | x, &read_bit, gpreg);
+ if (read_bit & 0x01)
+ return 0; /* Force bad checksum */
+
+ Tnvram_Read_Word(np, &data[x], gpreg);
+ csum += data[x];
+
+ Tnvram_Stop(np, gpreg);
+ }
+
+ return csum;
+}
+
+/*
+ * Send read command and address to NVRAM
+ */
+__initfunc(
+static void Tnvram_Send_Command(ncr_slot *np, u_short write_data, u_char *read_bit, u_char *gpreg)
+)
+{
+ int x;
+
+ /* send 9 bits, start bit (1), command (2), address (6) */
+ for (x = 0; x < 9; x++)
+ Tnvram_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
+
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * READ a byte from the NVRAM
+ */
+__initfunc(
+static void Tnvram_Read_Word(ncr_slot *np, u_short *nvram_data, u_char *gpreg)
+)
+{
+ int x;
+ u_char read_bit;
+
+ *nvram_data = 0;
+ for (x = 0; x < 16; x++) {
+ Tnvram_Read_Bit(np, &read_bit, gpreg);
+
+ if (read_bit & 0x01)
+ *nvram_data |= (0x01 << (15 - x));
+ else
+ *nvram_data &= ~(0x01 << (15 - x));
+ }
+}
+
+/*
+ * Read bit from NVRAM
+ */
+__initfunc(
+static void Tnvram_Read_Bit(ncr_slot *np, u_char *read_bit, u_char *gpreg)
+)
+{
+ DELAY(2);
+ Tnvram_Clk(np, gpreg);
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * Write bit to GPIO0
+ */
+__initfunc(
+static void Tnvram_Write_Bit(ncr_slot *np, u_char write_bit, u_char *gpreg)
+)
+{
+ if (write_bit & 0x01)
+ *gpreg |= 0x02;
+ else
+ *gpreg &= 0xfd;
+
+ *gpreg |= 0x10;
+
+ OUTB (nc_gpreg, *gpreg);
+ DELAY(2);
+
+ Tnvram_Clk(np, gpreg);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
+ */
+__initfunc(
+static void Tnvram_Stop(ncr_slot *np, u_char *gpreg)
+)
+{
+ *gpreg &= 0xef;
+ OUTB (nc_gpreg, *gpreg);
+ DELAY(2);
+
+ Tnvram_Clk(np, gpreg);
+}
+
+/*
+ * Pulse clock bit in GPIO0
+ */
+__initfunc(
+static void Tnvram_Clk(ncr_slot *np, u_char *gpreg)
+)
+{
+ OUTB (nc_gpreg, *gpreg | 0x04);
+ DELAY(2);
+ OUTB (nc_gpreg, *gpreg);
+}
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Module stuff
+*/
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = NCR53C8XX;
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/ncr53c8xx.h b/linux/src/drivers/scsi/ncr53c8xx.h
new file mode 100644
index 0000000..0342438
--- /dev/null
+++ b/linux/src/drivers/scsi/ncr53c8xx.h
@@ -0,0 +1,1220 @@
+/******************************************************************************
+** Device driver for the PCI-SCSI NCR538XX controller family.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
+** and is currently maintained by
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+** And has been ported to NetBSD by
+** Charles M. Hannum <mycroft@gnu.ai.mit.edu>
+**
+*******************************************************************************
+*/
+
+#ifndef NCR53C8XX_H
+#define NCR53C8XX_H
+
+/*
+** Name and revision of the driver
+*/
+#define SCSI_NCR_DRIVER_NAME "ncr53c8xx - revision 2.5f.1"
+
+/*
+** Check supported Linux versions
+*/
+
+#if !defined(LINUX_VERSION_CODE)
+#include <linux/version.h>
+#endif
+#include <linux/config.h>
+
+/*
+** During make dep of linux-1.2.13, LINUX_VERSION_CODE is undefined
+** Under linux-1.3.X, all seems to be OK.
+** So, we have only to define it under 1.2.13
+*/
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#if !defined(LINUX_VERSION_CODE)
+#define LINUX_VERSION_CODE LinuxVersionCode(1,2,13)
+#endif
+
+/*
+** Normal IO or memory mapped IO.
+**
+** Memory mapped IO only works with linux-1.3.X
+** If your motherboard does not work with memory mapped IO,
+** define SCSI_NCR_IOMAPPED for PATCHLEVEL 3 too.
+*/
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(1,3,0)
+# define SCSI_NCR_IOMAPPED
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+//# define SCSI_NCR_PROC_INFO_SUPPORT
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,72)
+# define SCSI_NCR_SHARE_IRQ
+#endif
+
+/*
+** If you want a driver as small as possible, donnot define the
+** following options.
+*/
+
+#define SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+#define SCSI_NCR_DEBUG_INFO_SUPPORT
+#define SCSI_NCR_PCI_FIX_UP_SUPPORT
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+# define SCSI_NCR_PROFILE_SUPPORT
+# define SCSI_NCR_USER_COMMAND_SUPPORT
+# define SCSI_NCR_USER_INFO_SUPPORT
+/* # define SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT */
+#endif
+
+/*==========================================================
+**
+** nvram settings - #define SCSI_NCR_NVRAM_SUPPORT to enable
+**
+**==========================================================
+*/
+
+#ifdef CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#define SCSI_NCR_NVRAM_SUPPORT
+/* #define SCSI_NCR_DEBUG_NVRAM */
+#endif
+
+/* ---------------------------------------------------------------------
+** Take into account kernel configured parameters.
+** Most of these options can be overridden at startup by a command line.
+** ---------------------------------------------------------------------
+*/
+
+/*
+ * For Ultra2 SCSI support option, use special features and allow 40Mhz
+ * synchronous data transfers.
+ */
+#define SCSI_NCR_SETUP_SPECIAL_FEATURES (3)
+#define SCSI_NCR_SETUP_ULTRA_SCSI (2)
+#define SCSI_NCR_MAX_SYNC (40)
+
+/*
+ * Allow tags from 2 to 12, default 4
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#if CONFIG_SCSI_NCR53C8XX_MAX_TAGS < 2
+#define SCSI_NCR_MAX_TAGS (2)
+#elif CONFIG_SCSI_NCR53C8XX_MAX_TAGS > 12
+#define SCSI_NCR_MAX_TAGS (12)
+#else
+#define SCSI_NCR_MAX_TAGS CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#endif
+#else
+#define SCSI_NCR_MAX_TAGS (4)
+#endif
+
+/*
+ * Allow tagged command queuing support if configured with default number
+ * of tags set to max (see above).
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_TAGGED_QUEUE
+#define SCSI_NCR_SETUP_DEFAULT_TAGS SCSI_NCR_MAX_TAGS
+#else
+#define SCSI_NCR_SETUP_DEFAULT_TAGS (0)
+#endif
+
+/*
+ * Use normal IO if configured. Forced for alpha and ppc.
+ */
+#if defined(CONFIG_SCSI_NCR53C8XX_IOMAPPED)
+#define SCSI_NCR_IOMAPPED
+#elif defined(__alpha__) || defined(__powerpc__)
+#define SCSI_NCR_IOMAPPED
+#endif
+
+/*
+ * Sync transfer frequency at startup.
+ * Allow from 5Mhz to 40Mhz default 10 Mhz.
+ */
+#ifndef CONFIG_SCSI_NCR53C8XX_SYNC
+#define CONFIG_SCSI_NCR53C8XX_SYNC (5)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC > SCSI_NCR_MAX_SYNC
+#define SCSI_NCR_SETUP_DEFAULT_SYNC SCSI_NCR_MAX_SYNC
+#endif
+
+#if CONFIG_SCSI_NCR53C8XX_SYNC == 0
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (255)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 5
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (50)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 20
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (250/(CONFIG_SCSI_NCR53C8XX_SYNC))
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 33
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (11)
+#else
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (10)
+#endif
+
+/*
+ * Disallow disconnections at boot-up
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_NO_DISCONNECT
+#define SCSI_NCR_SETUP_DISCONNECTION (0)
+#else
+#define SCSI_NCR_SETUP_DISCONNECTION (1)
+#endif
+
+/*
+ * Force synchronous negotiation for all targets
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_FORCE_SYNC_NEGO
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (1)
+#else
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (0)
+#endif
+
+/*
+ * Disable master parity checking (flawed hardwares need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_MPARITY_CHECK
+#define SCSI_NCR_SETUP_MASTER_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_MASTER_PARITY (1)
+#endif
+
+/*
+ * Disable scsi parity checking (flawed devices may need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_PARITY_CHECK
+#define SCSI_NCR_SETUP_SCSI_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_SCSI_PARITY (1)
+#endif
+
+/*
+ * Vendor specific stuff
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT
+#define SCSI_NCR_SETUP_LED_PIN (1)
+#define SCSI_NCR_SETUP_DIFF_SUPPORT (3)
+#else
+#define SCSI_NCR_SETUP_LED_PIN (0)
+#define SCSI_NCR_SETUP_DIFF_SUPPORT (0)
+#endif
+
+/*
+ * Settle time after reset at boot-up
+ */
+#define SCSI_NCR_SETUP_SETTLE_TIME (2)
+
+/*
+** Other parameters not configurable with "make config"
+** Avoid to change these constants, unless you know what you are doing.
+*/
+
+#define SCSI_NCR_ALWAYS_SIMPLE_TAG
+#define SCSI_NCR_MAX_SCATTER (127)
+#define SCSI_NCR_MAX_TARGET (16)
+#define SCSI_NCR_MAX_HOST (2)
+#define SCSI_NCR_TIMEOUT_ALERT (3*HZ)
+
+#define SCSI_NCR_CAN_QUEUE (7*SCSI_NCR_MAX_TAGS)
+#define SCSI_NCR_CMD_PER_LUN (SCSI_NCR_MAX_TAGS)
+#define SCSI_NCR_SG_TABLESIZE (SCSI_NCR_MAX_SCATTER)
+
+#define SCSI_NCR_TIMER_INTERVAL ((HZ+5-1)/5)
+
+#if 1 /* defined CONFIG_SCSI_MULTI_LUN */
+#define SCSI_NCR_MAX_LUN (8)
+#else
+#define SCSI_NCR_MAX_LUN (1)
+#endif
+
+/*
+** Define Scsi_Host_Template parameters
+**
+** Used by hosts.c and ncr53c8xx.c with module configuration.
+*/
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,98)
+#include <scsi/scsicam.h>
+#else
+#include <linux/scsicam.h>
+#endif
+
+int ncr53c8xx_abort(Scsi_Cmnd *);
+int ncr53c8xx_detect(Scsi_Host_Template *tpnt);
+int ncr53c8xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,98)
+int ncr53c8xx_reset(Scsi_Cmnd *, unsigned int);
+#else
+int ncr53c8xx_reset(Scsi_Cmnd *);
+#endif
+
+#ifdef MODULE
+int ncr53c8xx_release(struct Scsi_Host *);
+#else
+#define ncr53c8xx_release NULL
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,75)
+
+#define NCR53C8XX { name: SCSI_NCR_DRIVER_NAME, \
+ detect: ncr53c8xx_detect, \
+ release: ncr53c8xx_release, \
+ queuecommand: ncr53c8xx_queue_command,\
+ abort: ncr53c8xx_abort, \
+ reset: ncr53c8xx_reset, \
+ bios_param: scsicam_bios_param, \
+ can_queue: SCSI_NCR_CAN_QUEUE, \
+ this_id: 7, \
+ sg_tablesize: SCSI_NCR_SG_TABLESIZE, \
+ cmd_per_lun: SCSI_NCR_CMD_PER_LUN, \
+ use_clustering: DISABLE_CLUSTERING}
+
+#elif LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+
+#define NCR53C8XX { NULL, NULL, NULL, NULL, \
+ SCSI_NCR_DRIVER_NAME, ncr53c8xx_detect, \
+ ncr53c8xx_release, NULL, NULL, \
+ ncr53c8xx_queue_command,ncr53c8xx_abort, \
+ ncr53c8xx_reset, NULL, scsicam_bios_param, \
+ SCSI_NCR_CAN_QUEUE, 7, \
+ SCSI_NCR_SG_TABLESIZE, SCSI_NCR_CMD_PER_LUN, \
+ 0, 0, DISABLE_CLUSTERING}
+
+#else
+
+#define NCR53C8XX { NULL, NULL, \
+ SCSI_NCR_DRIVER_NAME, ncr53c8xx_detect, \
+ ncr53c8xx_release, NULL, NULL, \
+ ncr53c8xx_queue_command,ncr53c8xx_abort, \
+ ncr53c8xx_reset, NULL, scsicam_bios_param, \
+ SCSI_NCR_CAN_QUEUE, 7, \
+ SCSI_NCR_SG_TABLESIZE, SCSI_NCR_CMD_PER_LUN, \
+ 0, 0, DISABLE_CLUSTERING}
+
+#endif /* LINUX_VERSION_CODE */
+
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
+
+#ifndef HOSTS_C
+
+/*
+** IO functions definition for big/little endian support.
+** For now, the NCR is only supported in little endian addressing mode,
+** and big endian byte ordering is only supported for the PPC.
+** MMIO is not used on PPC.
+*/
+
+#ifdef __BIG_ENDIAN
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#error "BIG ENDIAN byte ordering needs kernel version >= 2.1.0"
+#endif
+
+#ifdef __powerpc__
+#define inw_l2b inw
+#define inl_l2b inl
+#define outw_b2l outw
+#define outl_b2l outl
+#else
+#error "Support for BIG ENDIAN is only available for the PowerPC"
+#endif
+
+#else /* Assumed x86 or alpha */
+
+#define inw_raw inw
+#define inl_raw inl
+#define outw_raw outw
+#define outl_raw outl
+#define readw_raw readw
+#define readl_raw readl
+#define writew_raw writew
+#define writel_raw writel
+
+#endif
+
+#ifdef SCSI_NCR_BIG_ENDIAN
+#error "The NCR in BIG ENDIAN adressing mode is not (yet) supported"
+#endif
+
+/*
+** NCR53C8XX Device Ids
+*/
+
+#ifndef PCI_DEVICE_ID_NCR_53C810
+#define PCI_DEVICE_ID_NCR_53C810 1
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C810AP
+#define PCI_DEVICE_ID_NCR_53C810AP 5
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C815
+#define PCI_DEVICE_ID_NCR_53C815 4
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C820
+#define PCI_DEVICE_ID_NCR_53C820 2
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C825
+#define PCI_DEVICE_ID_NCR_53C825 3
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C860
+#define PCI_DEVICE_ID_NCR_53C860 6
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C875
+#define PCI_DEVICE_ID_NCR_53C875 0xf
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C875J
+#define PCI_DEVICE_ID_NCR_53C875J 0x8f
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C885
+#define PCI_DEVICE_ID_NCR_53C885 0xd
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C895
+#define PCI_DEVICE_ID_NCR_53C895 0xc
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C896
+#define PCI_DEVICE_ID_NCR_53C896 0xb
+#endif
+
+/*
+** NCR53C8XX devices features table.
+*/
+typedef struct {
+ unsigned short device_id;
+ unsigned short revision_id;
+ char *name;
+ unsigned char burst_max;
+ unsigned char offset_max;
+ unsigned char nr_divisor;
+ unsigned int features;
+#define FE_LED0 (1<<0)
+#define FE_WIDE (1<<1)
+#define FE_ULTRA (1<<2)
+#define FE_ULTRA2 (1<<3)
+#define FE_DBLR (1<<4)
+#define FE_QUAD (1<<5)
+#define FE_ERL (1<<6)
+#define FE_CLSE (1<<7)
+#define FE_WRIE (1<<8)
+#define FE_ERMP (1<<9)
+#define FE_BOF (1<<10)
+#define FE_DFS (1<<11)
+#define FE_PFEN (1<<12)
+#define FE_LDSTR (1<<13)
+#define FE_RAM (1<<14)
+#define FE_CLK80 (1<<15)
+#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP)
+#define FE_SCSI_SET (FE_WIDE|FE_ULTRA|FE_ULTRA2|FE_DBLR|FE_QUAD|F_CLK80)
+#define FE_SPECIAL_SET (FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM)
+} ncr_chip;
+
+/*
+** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 3.
+** Memory Read transaction terminated by a retry followed by
+** Memory Read Line command.
+*/
+#define FE_CACHE0_SET (FE_CACHE_SET & ~FE_ERL)
+
+/*
+** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 5.
+** On paper, this errata is harmless. But it is a good reason for
+** using a shorter programmed burst length (64 DWORDS instead of 128).
+*/
+
+#define SCSI_NCR_CHIP_TABLE \
+{ \
+ {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, \
+ FE_ERL} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, \
+ FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, \
+ FE_ERL|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C820, 0xff, "820", 4, 8, 4, \
+ FE_WIDE|FE_ERL} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 4, 8, 4, \
+ FE_WIDE|FE_ERL|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, \
+ FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, \
+ FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C875J,0xff, "875J", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 7, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 7, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+}
+
+/*
+ * List of supported NCR chip ids
+ */
+#define SCSI_NCR_CHIP_IDS \
+{ \
+ PCI_DEVICE_ID_NCR_53C810, \
+ PCI_DEVICE_ID_NCR_53C815, \
+ PCI_DEVICE_ID_NCR_53C820, \
+ PCI_DEVICE_ID_NCR_53C825, \
+ PCI_DEVICE_ID_NCR_53C860, \
+ PCI_DEVICE_ID_NCR_53C875, \
+ PCI_DEVICE_ID_NCR_53C875J, \
+ PCI_DEVICE_ID_NCR_53C885, \
+ PCI_DEVICE_ID_NCR_53C895, \
+ PCI_DEVICE_ID_NCR_53C896 \
+}
+
+/*
+** Initial setup.
+** Can be overriden at startup by a command line.
+*/
+#define SCSI_NCR_DRIVER_SETUP \
+{ \
+ SCSI_NCR_SETUP_MASTER_PARITY, \
+ SCSI_NCR_SETUP_SCSI_PARITY, \
+ SCSI_NCR_SETUP_DISCONNECTION, \
+ SCSI_NCR_SETUP_SPECIAL_FEATURES, \
+ SCSI_NCR_SETUP_ULTRA_SCSI, \
+ SCSI_NCR_SETUP_FORCE_SYNC_NEGO, \
+ 0, \
+ 0, \
+ 1, \
+ 1, \
+ SCSI_NCR_SETUP_DEFAULT_TAGS, \
+ SCSI_NCR_SETUP_DEFAULT_SYNC, \
+ 0x00, \
+ 7, \
+ SCSI_NCR_SETUP_LED_PIN, \
+ 1, \
+ SCSI_NCR_SETUP_SETTLE_TIME, \
+ SCSI_NCR_SETUP_DIFF_SUPPORT, \
+ 0, \
+ 1 \
+}
+
+/*
+** Boot fail safe setup.
+** Override initial setup from boot command line:
+** ncr53c8xx=safe:y
+*/
+#define SCSI_NCR_DRIVER_SAFE_SETUP \
+{ \
+ 0, \
+ 1, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 1, \
+ 2, \
+ 0, \
+ 255, \
+ 0x00, \
+ 255, \
+ 0, \
+ 0, \
+ 10, \
+ 1, \
+ 1, \
+ 1 \
+}
+
+/*
+** Define the table of target capabilities by host and target
+**
+** If you have problems with a scsi device, note the host unit and the
+** corresponding target number.
+**
+** Edit the corresponding entry of the table below and try successively:
+** NQ7_Questionnable
+** NQ7_IdeLike
+**
+** This bitmap is anded with the byte 7 of inquiry data on completion of
+** INQUIRY command.
+** The driver never see the zeroed bits and will ignore the corresponding
+** capabilities of the target.
+*/
+
+#define INQ7_SftRe 1
+#define INQ7_CmdQueue (1<<1) /* Tagged Command */
+#define INQ7_Reserved (1<<2)
+#define INQ7_Linked (1<<3)
+#define INQ7_Sync (1<<4) /* Synchronous Negotiation */
+#define INQ7_WBus16 (1<<5)
+#define INQ7_WBus32 (1<<6)
+#define INQ7_RelAdr (1<<7)
+
+#define INQ7_IdeLike 0
+#define INQ7_Scsi1Like INQ7_IdeLike
+#define INQ7_Perfect 0xff
+#define INQ7_Questionnable ~(INQ7_CmdQueue|INQ7_Sync)
+#define INQ7_VeryQuestionnable \
+ ~(INQ7_CmdQueue|INQ7_Sync|INQ7_WBus16|INQ7_WBus32)
+
+#define INQ7_Default INQ7_Perfect
+
+#define NCR53C8XX_TARGET_CAPABILITIES \
+/* Host 0 */ \
+{ \
+ { \
+ /* Target 0 */ INQ7_Default, \
+ /* Target 1 */ INQ7_Default, \
+ /* Target 2 */ INQ7_Default, \
+ /* Target 3 */ INQ7_Default, \
+ /* Target 4 */ INQ7_Default, \
+ /* Target 5 */ INQ7_Default, \
+ /* Target 6 */ INQ7_Default, \
+ /* Target 7 */ INQ7_Default, \
+ /* Target 8 */ INQ7_Default, \
+ /* Target 9 */ INQ7_Default, \
+ /* Target 10 */ INQ7_Default, \
+ /* Target 11 */ INQ7_Default, \
+ /* Target 12 */ INQ7_Default, \
+ /* Target 13 */ INQ7_Default, \
+ /* Target 14 */ INQ7_Default, \
+ /* Target 15 */ INQ7_Default, \
+ } \
+}, \
+/* Host 1 */ \
+{ \
+ { \
+ /* Target 0 */ INQ7_Default, \
+ /* Target 1 */ INQ7_Default, \
+ /* Target 2 */ INQ7_Default, \
+ /* Target 3 */ INQ7_Default, \
+ /* Target 4 */ INQ7_Default, \
+ /* Target 5 */ INQ7_Default, \
+ /* Target 6 */ INQ7_Default, \
+ /* Target 7 */ INQ7_Default, \
+ /* Target 8 */ INQ7_Default, \
+ /* Target 9 */ INQ7_Default, \
+ /* Target 10 */ INQ7_Default, \
+ /* Target 11 */ INQ7_Default, \
+ /* Target 12 */ INQ7_Default, \
+ /* Target 13 */ INQ7_Default, \
+ /* Target 14 */ INQ7_Default, \
+ /* Target 15 */ INQ7_Default, \
+ } \
+}
+
+/*
+** Replace the proc_dir_entry of the standard ncr driver.
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+#if defined(CONFIG_SCSI_NCR53C7xx) || !defined(CONFIG_SCSI_NCR53C8XX)
+#define PROC_SCSI_NCR53C8XX PROC_SCSI_NCR53C7xx
+#endif
+#endif
+
+/**************** ORIGINAL CONTENT of ncrreg.h from FreeBSD ******************/
+
+/*-----------------------------------------------------------------
+**
+** The ncr 53c810 register structure.
+**
+**-----------------------------------------------------------------
+*/
+
+struct ncr_reg {
+/*00*/ u_char nc_scntl0; /* full arb., ena parity, par->ATN */
+
+/*01*/ u_char nc_scntl1; /* no reset */
+ #define ISCON 0x10 /* connected to scsi */
+ #define CRST 0x08 /* force reset */
+
+/*02*/ u_char nc_scntl2; /* no disconnect expected */
+ #define SDU 0x80 /* cmd: disconnect will raise error */
+ #define CHM 0x40 /* sta: chained mode */
+ #define WSS 0x08 /* sta: wide scsi send [W]*/
+ #define WSR 0x01 /* sta: wide scsi received [W]*/
+
+/*03*/ u_char nc_scntl3; /* cnf system clock dependent */
+ #define EWS 0x08 /* cmd: enable wide scsi [W]*/
+ #define ULTRA 0x80 /* cmd: ULTRA enable */
+
+/*04*/ u_char nc_scid; /* cnf host adapter scsi address */
+ #define RRE 0x40 /* r/w:e enable response to resel. */
+ #define SRE 0x20 /* r/w:e enable response to select */
+
+/*05*/ u_char nc_sxfer; /* ### Sync speed and count */
+
+/*06*/ u_char nc_sdid; /* ### Destination-ID */
+
+/*07*/ u_char nc_gpreg; /* ??? IO-Pins */
+
+/*08*/ u_char nc_sfbr; /* ### First byte in phase */
+
+/*09*/ u_char nc_socl;
+ #define CREQ 0x80 /* r/w: SCSI-REQ */
+ #define CACK 0x40 /* r/w: SCSI-ACK */
+ #define CBSY 0x20 /* r/w: SCSI-BSY */
+ #define CSEL 0x10 /* r/w: SCSI-SEL */
+ #define CATN 0x08 /* r/w: SCSI-ATN */
+ #define CMSG 0x04 /* r/w: SCSI-MSG */
+ #define CC_D 0x02 /* r/w: SCSI-C_D */
+ #define CI_O 0x01 /* r/w: SCSI-I_O */
+
+/*0a*/ u_char nc_ssid;
+
+/*0b*/ u_char nc_sbcl;
+
+/*0c*/ u_char nc_dstat;
+ #define DFE 0x80 /* sta: dma fifo empty */
+ #define MDPE 0x40 /* int: master data parity error */
+ #define BF 0x20 /* int: script: bus fault */
+ #define ABRT 0x10 /* int: script: command aborted */
+ #define SSI 0x08 /* int: script: single step */
+ #define SIR 0x04 /* int: script: interrupt instruct. */
+ #define IID 0x01 /* int: script: illegal instruct. */
+
+/*0d*/ u_char nc_sstat0;
+ #define ILF 0x80 /* sta: data in SIDL register lsb */
+ #define ORF 0x40 /* sta: data in SODR register lsb */
+ #define OLF 0x20 /* sta: data in SODL register lsb */
+ #define AIP 0x10 /* sta: arbitration in progress */
+ #define LOA 0x08 /* sta: arbitration lost */
+ #define WOA 0x04 /* sta: arbitration won */
+ #define IRST 0x02 /* sta: scsi reset signal */
+ #define SDP 0x01 /* sta: scsi parity signal */
+
+/*0e*/ u_char nc_sstat1;
+ #define FF3210 0xf0 /* sta: bytes in the scsi fifo */
+
+/*0f*/ u_char nc_sstat2;
+ #define ILF1 0x80 /* sta: data in SIDL register msb[W]*/
+ #define ORF1 0x40 /* sta: data in SODR register msb[W]*/
+ #define OLF1 0x20 /* sta: data in SODL register msb[W]*/
+ #define DM 0x04 /* sta: DIFFSENS mismatch (895/6 only) */
+ #define LDSC 0x02 /* sta: disconnect & reconnect */
+
+/*10*/ u_int32 nc_dsa; /* --> Base page */
+
+/*14*/ u_char nc_istat; /* --> Main Command and status */
+ #define CABRT 0x80 /* cmd: abort current operation */
+ #define SRST 0x40 /* mod: reset chip */
+ #define SIGP 0x20 /* r/w: message from host to ncr */
+ #define SEM 0x10 /* r/w: message between host + ncr */
+ #define CON 0x08 /* sta: connected to scsi */
+ #define INTF 0x04 /* sta: int on the fly (reset by wr)*/
+ #define SIP 0x02 /* sta: scsi-interrupt */
+ #define DIP 0x01 /* sta: host/script interrupt */
+
+/*15*/ u_char nc_15_;
+/*16*/ u_char nc_16_;
+/*17*/ u_char nc_17_;
+
+/*18*/ u_char nc_ctest0;
+/*19*/ u_char nc_ctest1;
+
+/*1a*/ u_char nc_ctest2;
+ #define CSIGP 0x40
+
+/*1b*/ u_char nc_ctest3;
+ #define FLF 0x08 /* cmd: flush dma fifo */
+ #define CLF 0x04 /* cmd: clear dma fifo */
+ #define FM 0x02 /* mod: fetch pin mode */
+ #define WRIE 0x01 /* mod: write and invalidate enable */
+
+/*1c*/ u_int32 nc_temp; /* ### Temporary stack */
+
+/*20*/ u_char nc_dfifo;
+/*21*/ u_char nc_ctest4;
+ #define BDIS 0x80 /* mod: burst disable */
+ #define MPEE 0x08 /* mod: master parity error enable */
+
+/*22*/ u_char nc_ctest5;
+ #define DFS 0x20 /* mod: dma fifo size */
+/*23*/ u_char nc_ctest6;
+
+/*24*/ u_int32 nc_dbc; /* ### Byte count and command */
+/*28*/ u_int32 nc_dnad; /* ### Next command register */
+/*2c*/ u_int32 nc_dsp; /* --> Script Pointer */
+/*30*/ u_int32 nc_dsps; /* --> Script pointer save/opcode#2 */
+/*34*/ u_int32 nc_scratcha; /* ??? Temporary register a */
+
+/*38*/ u_char nc_dmode;
+ #define BL_2 0x80 /* mod: burst length shift value +2 */
+ #define BL_1 0x40 /* mod: burst length shift value +1 */
+ #define ERL 0x08 /* mod: enable read line */
+ #define ERMP 0x04 /* mod: enable read multiple */
+ #define BOF 0x02 /* mod: burst op code fetch */
+
+/*39*/ u_char nc_dien;
+/*3a*/ u_char nc_dwt;
+
+/*3b*/ u_char nc_dcntl; /* --> Script execution control */
+
+ #define CLSE 0x80 /* mod: cache line size enable */
+ #define PFF 0x40 /* cmd: pre-fetch flush */
+ #define PFEN 0x20 /* mod: pre-fetch enable */
+ #define SSM 0x10 /* mod: single step mode */
+ #define IRQM 0x08 /* mod: irq mode (1 = totem pole !) */
+ #define STD 0x04 /* cmd: start dma mode */
+ #define IRQD 0x02 /* mod: irq disable */
+ #define NOCOM 0x01 /* cmd: protect sfbr while reselect */
+
+/*3c*/ u_int32 nc_adder;
+
+/*40*/ u_short nc_sien; /* -->: interrupt enable */
+/*42*/ u_short nc_sist; /* <--: interrupt status */
+ #define SBMC 0x1000/* sta: SCSI Bus Mode Change (895/6 only) */
+ #define STO 0x0400/* sta: timeout (select) */
+ #define GEN 0x0200/* sta: timeout (general) */
+ #define HTH 0x0100/* sta: timeout (handshake) */
+ #define MA 0x80 /* sta: phase mismatch */
+ #define CMP 0x40 /* sta: arbitration complete */
+ #define SEL 0x20 /* sta: selected by another device */
+ #define RSL 0x10 /* sta: reselected by another device*/
+ #define SGE 0x08 /* sta: gross error (over/underflow)*/
+ #define UDC 0x04 /* sta: unexpected disconnect */
+ #define RST 0x02 /* sta: scsi bus reset detected */
+ #define PAR 0x01 /* sta: scsi parity error */
+
+/*44*/ u_char nc_slpar;
+/*45*/ u_char nc_swide;
+/*46*/ u_char nc_macntl;
+/*47*/ u_char nc_gpcntl;
+/*48*/ u_char nc_stime0; /* cmd: timeout for select&handshake*/
+/*49*/ u_char nc_stime1; /* cmd: timeout user defined */
+/*4a*/ u_short nc_respid; /* sta: Reselect-IDs */
+
+/*4c*/ u_char nc_stest0;
+
+/*4d*/ u_char nc_stest1;
+ #define DBLEN 0x08 /* clock doubler running */
+ #define DBLSEL 0x04 /* clock doubler selected */
+
+
+/*4e*/ u_char nc_stest2;
+ #define ROF 0x40 /* reset scsi offset (after gross error!) */
+ #define EXT 0x02 /* extended filtering */
+
+/*4f*/ u_char nc_stest3;
+ #define TE 0x80 /* c: tolerAnt enable */
+ #define HSC 0x20 /* c: Halt SCSI Clock */
+ #define CSF 0x02 /* c: clear scsi fifo */
+
+/*50*/ u_short nc_sidl; /* Lowlevel: latched from scsi data */
+/*52*/ u_char nc_stest4;
+ #define SMODE 0xc0 /* SCSI bus mode (895/6 only) */
+ #define SMODE_HVD 0x40 /* High Voltage Differential */
+ #define SMODE_SE 0x80 /* Single Ended */
+ #define SMODE_LVD 0xc0 /* Low Voltage Differential */
+ #define LCKFRQ 0x20 /* Frequency Lock (895/6 only) */
+
+/*53*/ u_char nc_53_;
+/*54*/ u_short nc_sodl; /* Lowlevel: data out to scsi data */
+/*56*/ u_short nc_56_;
+/*58*/ u_short nc_sbdl; /* Lowlevel: data from scsi data */
+/*5a*/ u_short nc_5a_;
+/*5c*/ u_char nc_scr0; /* Working register B */
+/*5d*/ u_char nc_scr1; /* */
+/*5e*/ u_char nc_scr2; /* */
+/*5f*/ u_char nc_scr3; /* */
+/*60*/
+};
+
+/*-----------------------------------------------------------
+**
+** Utility macros for the script.
+**
+**-----------------------------------------------------------
+*/
+
+#define REGJ(p,r) (offsetof(struct ncr_reg, p ## r))
+#define REG(r) REGJ (nc_, r)
+
+#ifndef TARGET_MODE
+#define TARGET_MODE 0
+#endif
+
+typedef u_int32 ncrcmd;
+
+/*-----------------------------------------------------------
+**
+** SCSI phases
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_DATA_OUT 0x00000000
+#define SCR_DATA_IN 0x01000000
+#define SCR_COMMAND 0x02000000
+#define SCR_STATUS 0x03000000
+#define SCR_ILG_OUT 0x04000000
+#define SCR_ILG_IN 0x05000000
+#define SCR_MSG_OUT 0x06000000
+#define SCR_MSG_IN 0x07000000
+
+/*-----------------------------------------------------------
+**
+** Data transfer via SCSI.
+**
+**-----------------------------------------------------------
+**
+** MOVE_ABS (LEN)
+** <<start address>>
+**
+** MOVE_IND (LEN)
+** <<dnad_offset>>
+**
+** MOVE_TBL
+** <<dnad_offset>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_MOVE_ABS(l) ((0x08000000 ^ (TARGET_MODE << 1ul)) | (l))
+#define SCR_MOVE_IND(l) ((0x28000000 ^ (TARGET_MODE << 1ul)) | (l))
+#define SCR_MOVE_TBL (0x18000000 ^ (TARGET_MODE << 1ul))
+
+struct scr_tblmove {
+ u_int32 size;
+ u_int32 addr;
+};
+
+/*-----------------------------------------------------------
+**
+** Selection
+**
+**-----------------------------------------------------------
+**
+** SEL_ABS | SCR_ID (0..7) [ | REL_JMP]
+** <<alternate_address>>
+**
+** SEL_TBL | << dnad_offset>> [ | REL_JMP]
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SEL_ABS 0x40000000
+#define SCR_SEL_ABS_ATN 0x41000000
+#define SCR_SEL_TBL 0x42000000
+#define SCR_SEL_TBL_ATN 0x43000000
+
+struct scr_tblsel {
+ u_char sel_0;
+ u_char sel_sxfer;
+ u_char sel_id;
+ u_char sel_scntl3;
+};
+
+#define SCR_JMP_REL 0x04000000
+#define SCR_ID(id) (((u_int32)(id)) << 16)
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** WAIT_DISC
+** dummy: <<alternate_address>>
+**
+** WAIT_RESEL
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_WAIT_DISC 0x48000000
+#define SCR_WAIT_RESEL 0x50000000
+
+/*-----------------------------------------------------------
+**
+** Bit Set / Reset
+**
+**-----------------------------------------------------------
+**
+** SET (flags {|.. })
+**
+** CLR (flags {|.. })
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SET(f) (0x58000000 | (f))
+#define SCR_CLR(f) (0x60000000 | (f))
+
+#define SCR_CARRY 0x00000400
+#define SCR_TRG 0x00000200
+#define SCR_ACK 0x00000040
+#define SCR_ATN 0x00000008
+
+
+
+
+/*-----------------------------------------------------------
+**
+** Memory to memory move
+**
+**-----------------------------------------------------------
+**
+** COPY (bytecount)
+** << source_address >>
+** << destination_address >>
+**
+** SCR_COPY sets the NO FLUSH option by default.
+** SCR_COPY_F does not set this option.
+**
+** For chips which do not support this option,
+** ncr_copy_and_bind() will remove this bit.
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_FLUSH 0x01000000
+
+#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n))
+#define SCR_COPY_F(n) (0xc0000000 | (n))
+
+/*-----------------------------------------------------------
+**
+** Register move and binary operations
+**
+**-----------------------------------------------------------
+**
+** SFBR_REG (reg, op, data) reg = SFBR op data
+** << 0 >>
+**
+** REG_SFBR (reg, op, data) SFBR = reg op data
+** << 0 >>
+**
+** REG_REG (reg, op, data) reg = reg op data
+** << 0 >>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_REG_OFS(ofs) ((ofs) << 16ul)
+
+#define SCR_SFBR_REG(reg,op,data) \
+ (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | ((data)<<8ul))
+
+#define SCR_REG_SFBR(reg,op,data) \
+ (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | ((data)<<8ul))
+
+#define SCR_REG_REG(reg,op,data) \
+ (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | ((data)<<8ul))
+
+
+#define SCR_LOAD 0x00000000
+#define SCR_SHL 0x01000000
+#define SCR_OR 0x02000000
+#define SCR_XOR 0x03000000
+#define SCR_AND 0x04000000
+#define SCR_SHR 0x05000000
+#define SCR_ADD 0x06000000
+#define SCR_ADDC 0x07000000
+
+/*-----------------------------------------------------------
+**
+** FROM_REG (reg) reg = SFBR
+** << 0 >>
+**
+** TO_REG (reg) SFBR = reg
+** << 0 >>
+**
+** LOAD_REG (reg, data) reg = <data>
+** << 0 >>
+**
+** LOAD_SFBR(data) SFBR = <data>
+** << 0 >>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_FROM_REG(reg) \
+ SCR_REG_SFBR(reg,SCR_OR,0)
+
+#define SCR_TO_REG(reg) \
+ SCR_SFBR_REG(reg,SCR_OR,0)
+
+#define SCR_LOAD_REG(reg,data) \
+ SCR_REG_REG(reg,SCR_LOAD,data)
+
+#define SCR_LOAD_SFBR(data) \
+ (SCR_REG_SFBR (gpreg, SCR_LOAD, data))
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** JUMP [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** JUMPR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** CALL [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** CALLR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** RETURN [ | IFTRUE/IFFALSE ( ... ) ]
+** <<dummy>>
+**
+** INT [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** INT_FLY [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** Conditions:
+** WHEN (phase)
+** IF (phase)
+** CARRY
+** DATA (data, mask)
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_OP 0x80000000
+#define SCR_JUMP 0x80080000
+#define SCR_JUMPR 0x80880000
+#define SCR_CALL 0x88080000
+#define SCR_CALLR 0x88880000
+#define SCR_RETURN 0x90080000
+#define SCR_INT 0x98080000
+#define SCR_INT_FLY 0x98180000
+
+#define IFFALSE(arg) (0x00080000 | (arg))
+#define IFTRUE(arg) (0x00000000 | (arg))
+
+#define WHEN(phase) (0x00030000 | (phase))
+#define IF(phase) (0x00020000 | (phase))
+
+#define DATA(D) (0x00040000 | ((D) & 0xff))
+#define MASK(D,M) (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff))
+
+#define CARRYSET (0x00200000)
+
+/*-----------------------------------------------------------
+**
+** SCSI constants.
+**
+**-----------------------------------------------------------
+*/
+
+/*
+** Messages
+*/
+
+#define M_COMPLETE (0x00)
+#define M_EXTENDED (0x01)
+#define M_SAVE_DP (0x02)
+#define M_RESTORE_DP (0x03)
+#define M_DISCONNECT (0x04)
+#define M_ID_ERROR (0x05)
+#define M_ABORT (0x06)
+#define M_REJECT (0x07)
+#define M_NOOP (0x08)
+#define M_PARITY (0x09)
+#define M_LCOMPLETE (0x0a)
+#define M_FCOMPLETE (0x0b)
+#define M_RESET (0x0c)
+#define M_ABORT_TAG (0x0d)
+#define M_CLEAR_QUEUE (0x0e)
+#define M_INIT_REC (0x0f)
+#define M_REL_REC (0x10)
+#define M_TERMINATE (0x11)
+#define M_SIMPLE_TAG (0x20)
+#define M_HEAD_TAG (0x21)
+#define M_ORDERED_TAG (0x22)
+#define M_IGN_RESIDUE (0x23)
+#define M_IDENTIFY (0x80)
+
+#define M_X_MODIFY_DP (0x00)
+#define M_X_SYNC_REQ (0x01)
+#define M_X_WIDE_REQ (0x03)
+
+/*
+** Status
+*/
+
+#define S_GOOD (0x00)
+#define S_CHECK_COND (0x02)
+#define S_COND_MET (0x04)
+#define S_BUSY (0x08)
+#define S_INT (0x10)
+#define S_INT_COND_MET (0x14)
+#define S_CONFLICT (0x18)
+#define S_TERMINATED (0x20)
+#define S_QUEUE_FULL (0x28)
+#define S_ILLEGAL (0xff)
+#define S_SENSE (0x80)
+
+/*
+ * End of ncrreg from FreeBSD
+ */
+
+#endif /* !defined HOSTS_C */
+
+#endif /* defined NCR53C8XX_H */
diff --git a/linux/src/drivers/scsi/pas16.c b/linux/src/drivers/scsi/pas16.c
new file mode 100644
index 0000000..bd96420
--- /dev/null
+++ b/linux/src/drivers/scsi/pas16.c
@@ -0,0 +1,576 @@
+#define AUTOSENSE
+#define PSEUDO_DMA
+#define FOO
+#define UNSAFE /* Not unsafe for PAS16 -- use it */
+
+/*
+ * This driver adapted from Drew Eckhardt's Trantor T128 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * ( Based on T128 - DISTRIBUTION RELEASE 3. )
+ *
+ * Modified to work with the Pro Audio Spectrum/Studio 16
+ * by John Weidman.
+ *
+ *
+ * For more information, please consult
+ *
+ * Media Vision
+ * (510) 770-8600
+ * (800) 348-7116
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * Options :
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512
+ * bytes at a time. Since interrupts are disabled by default during
+ * these transfers, we might need this to give reasonable interrupt
+ * service time if the transfer size gets too large.
+ *
+ * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
+ * increase compared to polled I/O.
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. This
+ * parameter comes from the NCR5380 code. It is NOT unsafe with
+ * the PAS16 and you should use it. If you don't you will have
+ * a problem with dropped characters during high speed
+ * communications during SCSI transfers. If you really don't
+ * want to use UNSAFE you can try defining LIMIT_TRANSFERSIZE or
+ * twiddle with the transfer size in the high level code.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - There are many different models of
+ * the Pro Audio Spectrum/Studio 16, and I only have one of
+ * them, so this may require a little tweaking. An interrupt
+ * is triggered to autoprobe for the interrupt line. Note:
+ * with the newer model boards, the interrupt is set via
+ * software after reset using the default_irq for the
+ * current board number.
+ *
+ *
+ * 2. With command line overrides - pas16=port,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 3. With the PAS16_OVERRIDE compile time define. This is
+ * specified as an array of address, irq tuples. Ie, for
+ * one board at the default 0x388 address, IRQ10, I could say
+ * -DPAS16_OVERRIDE={{0x388, 10}}
+ * NOTE: Untested.
+ *
+ * Note that if the override methods are used, place holders must
+ * be specified for other boards in the system.
+ *
+ *
+ * Configuration notes :
+ * The current driver does not support interrupt sharing with the
+ * sound portion of the card. If you use the same irq for the
+ * scsi port and sound you will have problems. Either use
+ * a different irq for the scsi port or don't use interrupts
+ * for the scsi port.
+ *
+ * If you have problems with your card not being recognized, use
+ * the LILO command line override. Try to get it recognized without
+ * interrupts. Ie, for a board at the default 0x388 base port,
+ * boot: linux pas16=0x388,255
+ *
+ * (255 is the IRQ_NONE constant in NCR5380.h)
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "pas16.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_pas16 = {
+ PROC_SCSI_PAS16, 5, "pas16",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+static int pas_maxi = 0;
+static int pas_wmaxi = 0;
+
+
+int scsi_irq_translate[] =
+ { 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 7, 8, 9, 0, 10, 11 };
+
+/* The default_irqs array contains values used to set the irq into the
+ * board via software (as must be done on newer model boards without
+ * irq jumpers on the board). The first value in the array will be
+ * assigned to logical board 0, the next to board 1, etc.
+ */
+int default_irqs[] = { PAS16_DEFAULT_BOARD_1_IRQ,
+ PAS16_DEFAULT_BOARD_2_IRQ,
+ PAS16_DEFAULT_BOARD_3_IRQ,
+ PAS16_DEFAULT_BOARD_4_IRQ
+ };
+
+static struct override {
+ unsigned short io_port;
+ int irq;
+} overrides
+#ifdef PAS16_OVERRIDE
+ [] = PAS16_OVERRIDE;
+#else
+ [4] = {{0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO},
+ {0,IRQ_AUTO}};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+static struct base {
+ unsigned short io_port;
+ int noauto;
+} bases[] = { {PAS16_DEFAULT_BASE_1, 0},
+ {PAS16_DEFAULT_BASE_2, 0},
+ {PAS16_DEFAULT_BASE_3, 0},
+ {PAS16_DEFAULT_BASE_4, 0}
+ };
+
+#define NO_BASES (sizeof (bases) / sizeof (struct base))
+
+unsigned short pas16_offset[ 8 ] =
+ {
+ 0x1c00, /* OUTPUT_DATA_REG */
+ 0x1c01, /* INITIATOR_COMMAND_REG */
+ 0x1c02, /* MODE_REG */
+ 0x1c03, /* TARGET_COMMAND_REG */
+ 0x3c00, /* STATUS_REG ro, SELECT_ENABLE_REG wo */
+ 0x3c01, /* BUS_AND_STATUS_REG ro, START_DMA_SEND_REG wo */
+ 0x3c02, /* INPUT_DATA_REGISTER ro, (N/A on PAS16 ?)
+ * START_DMA_TARGET_RECEIVE_REG wo
+ */
+ 0x3c03, /* RESET_PARITY_INTERRUPT_REG ro,
+ * START_DMA_INITIATOR_RECEIVE_REG wo
+ */
+ };
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+#if 1
+#define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+
+
+/*
+ * Function : enable_board( int board_num, unsigned short port )
+ *
+ * Purpose : set address in new model board
+ *
+ * Inputs : board_num - logical board number 0-3, port - base address
+ *
+ */
+
+void enable_board( int board_num, unsigned short port )
+{
+ outb( 0xbc + board_num, MASTER_ADDRESS_PTR );
+ outb( port >> 2, MASTER_ADDRESS_PTR );
+}
+
+
+
+/*
+ * Function : init_board( unsigned short port, int irq )
+ *
+ * Purpose : Set the board up to handle the SCSI interface
+ *
+ * Inputs : port - base address of the board,
+ * irq - irq to assign to the SCSI port
+ * force_irq - set it even if it conflicts with sound driver
+ *
+ */
+
+void init_board( unsigned short io_port, int irq, int force_irq )
+{
+ unsigned int tmp;
+ unsigned int pas_irq_code;
+
+ /* Initialize the SCSI part of the board */
+
+ outb( 0x30, io_port + P_TIMEOUT_COUNTER_REG ); /* Timeout counter */
+ outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET ); /* Reset TC */
+ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */
+
+ NCR5380_read( RESET_PARITY_INTERRUPT_REG );
+
+ /* Set the SCSI interrupt pointer without mucking up the sound
+ * interrupt pointer in the same byte.
+ */
+ pas_irq_code = ( irq < 16 ) ? scsi_irq_translate[irq] : 0;
+ tmp = inb( io_port + IO_CONFIG_3 );
+
+ if( (( tmp & 0x0f ) == pas_irq_code) && pas_irq_code > 0
+ && !force_irq )
+ {
+ printk( "pas16: WARNING: Can't use same irq as sound "
+ "driver -- interrupts disabled\n" );
+ /* Set up the drive parameters, disable 5380 interrupts */
+ outb( 0x4d, io_port + SYS_CONFIG_4 );
+ }
+ else
+ {
+ tmp = ( tmp & 0x0f ) | ( pas_irq_code << 4 );
+ outb( tmp, io_port + IO_CONFIG_3 );
+
+ /* Set up the drive parameters and enable 5380 interrupts */
+ outb( 0x6d, io_port + SYS_CONFIG_4 );
+ }
+}
+
+
+/*
+ * Function : pas16_hw_detect( unsigned short board_num )
+ *
+ * Purpose : determine if a pas16 board is present
+ *
+ * Inputs : board_num - logical board number ( 0 - 3 )
+ *
+ * Returns : 0 if board not found, 1 if found.
+ */
+
+int pas16_hw_detect( unsigned short board_num )
+{
+ unsigned char board_rev, tmp;
+ unsigned short io_port = bases[ board_num ].io_port;
+
+ /* See if we can find a PAS16 board at the address associated
+ * with this logical board number.
+ */
+
+ /* First, attempt to take a newer model board out of reset and
+ * give it a base address. This shouldn't affect older boards.
+ */
+ enable_board( board_num, io_port );
+
+ /* Now see if it looks like a PAS16 board */
+ board_rev = inb( io_port + PCB_CONFIG );
+
+ if( board_rev == 0xff )
+ return 0;
+
+ tmp = board_rev ^ 0xe0;
+
+ outb( tmp, io_port + PCB_CONFIG );
+ tmp = inb( io_port + PCB_CONFIG );
+ outb( board_rev, io_port + PCB_CONFIG );
+
+ if( board_rev != tmp ) /* Not a PAS-16 */
+ return 0;
+
+ if( ( inb( io_port + OPERATION_MODE_1 ) & 0x03 ) != 0x03 )
+ return 0; /* return if no SCSI interface found */
+
+ /* Mediavision has some new model boards that return ID bits
+ * that indicate a SCSI interface, but they're not (LMS). We'll
+ * put in an additional test to try to weed them out.
+ */
+
+ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */
+ NCR5380_write( MODE_REG, 0x20 ); /* Is it really SCSI? */
+ if( NCR5380_read( MODE_REG ) != 0x20 ) /* Write to a reg. */
+ return 0; /* and try to read */
+ NCR5380_write( MODE_REG, 0x00 ); /* it back. */
+ if( NCR5380_read( MODE_REG ) != 0x00 )
+ return 0;
+
+ return 1;
+}
+
+
+/*
+ * Function : pas16_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+ */
+
+void pas16_setup(char *str, int *ints) {
+ static int commandline_current = 0;
+ int i;
+ if (ints[0] != 2)
+ printk("pas16_setup : usage pas16=io_port,irq\n");
+ else
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].io_port = (unsigned short) ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].io_port == (unsigned short) ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : int pas16_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : detects and initializes PAS16 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int pas16_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0;
+ static unsigned short current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned short io_port;
+ int count;
+
+ tpnt->proc_dir = &proc_scsi_pas16;
+ tpnt->proc_info = &pas16_proc_info;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ io_port = 0;
+
+ if (overrides[current_override].io_port)
+ {
+ io_port = overrides[current_override].io_port;
+ enable_board( current_override, io_port );
+ init_board( io_port, overrides[current_override].irq, 1 );
+ }
+ else
+ for (; !io_port && (current_base < NO_BASES); ++current_base) {
+#if (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : probing io_port %04x\n", (unsigned int) bases[current_base].io_port);
+#endif
+ if ( !bases[current_base].noauto &&
+ pas16_hw_detect( current_base ) ){
+ io_port = bases[current_base].io_port;
+ init_board( io_port, default_irqs[ current_base ], 0 );
+#if (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : detected board.\n");
+#endif
+ }
+ }
+
+
+#if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : io_port = %04x\n", (unsigned int) io_port);
+#endif
+
+ if (!io_port)
+ break;
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->io_port = io_port;
+
+ NCR5380_init(instance, 0);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, pas16_intr, SA_INTERRUPT, "pas16", NULL)) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ /* Disable 5380 interrupts, leave drive params the same */
+ outb( 0x4d, io_port + SYS_CONFIG_4 );
+ outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 );
+ }
+
+#if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ printk("scsi%d : at 0x%04x", instance->host_no, (int)
+ instance->io_port);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, PAS16_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : int pas16_biosparam(Disk *disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int pas16_biosparam(Disk * disk, kdev_t dev, int * ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11; /* I think I have it as /(32*64) */
+ if( ip[2] > 1024 ) { /* yes, >, not >= */
+ ip[0]=255;
+ ip[1]=63;
+ ip[2]=size/(63*255);
+ if( ip[2] > 1023 ) /* yes >1023... */
+ ip[2] = 1023;
+ }
+
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
+ int len) {
+ register unsigned char *d = dst;
+ register unsigned short reg = (unsigned short) (instance->io_port +
+ P_DATA_REG_OFFSET);
+ register int i = len;
+ int ii = 0;
+
+ while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) )
+ ++ii;
+
+ insb( reg, d, i );
+
+ if ( inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
+ outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
+ printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
+ instance->host_no);
+ return -1;
+ }
+ if (ii > pas_maxi)
+ pas_maxi = ii;
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
+ int len) {
+ register unsigned char *s = src;
+ register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
+ register int i = len;
+ int ii = 0;
+
+ while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) )
+ ++ii;
+
+ outsb( reg, s, i );
+
+ if (inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
+ outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
+ printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
+ instance->host_no);
+ return -1;
+ }
+ if (ii > pas_maxi)
+ pas_wmaxi = ii;
+ return 0;
+}
+
+#include "NCR5380.c"
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = MV_PAS16;
+
+#include <linux/module.h>
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/pas16.h b/linux/src/drivers/scsi/pas16.h
new file mode 100644
index 0000000..a1bda1f
--- /dev/null
+++ b/linux/src/drivers/scsi/pas16.h
@@ -0,0 +1,196 @@
+/*
+ * This driver adapted from Drew Eckhardt's Trantor T128 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * ( Based on T128 - DISTRIBUTION RELEASE 3. )
+ *
+ * Modified to work with the Pro Audio Spectrum/Studio 16
+ * by John Weidman.
+ *
+ *
+ * For more information, please consult
+ *
+ * Media Vision
+ * (510) 770-8600
+ * (800) 348-7116
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+
+#ifndef PAS16_H
+#define PAS16_H
+
+#define PAS16_PUBLIC_RELEASE 3
+
+#define PDEBUG_INIT 0x1
+#define PDEBUG_TRANSFER 0x2
+
+#define PAS16_DEFAULT_BASE_1 0x388
+#define PAS16_DEFAULT_BASE_2 0x384
+#define PAS16_DEFAULT_BASE_3 0x38c
+#define PAS16_DEFAULT_BASE_4 0x288
+
+#define PAS16_DEFAULT_BOARD_1_IRQ 10
+#define PAS16_DEFAULT_BOARD_2_IRQ 12
+#define PAS16_DEFAULT_BOARD_3_IRQ 14
+#define PAS16_DEFAULT_BOARD_4_IRQ 15
+
+
+/*
+ * The Pro Audio Spectrum boards are I/O mapped. They use a Zilog 5380
+ * SCSI controller, which is the equivalent of NCR's 5380. "Pseudo-DMA"
+ * architecture is used, where a PAL drives the DMA signals on the 5380
+ * allowing fast, blind transfers with proper handshaking.
+ */
+
+
+/* The Time-out Counter register is used to safe-guard against a stuck
+ * bus (in the case of RDY driven handshake) or a stuck byte (if 16-Bit
+ * DMA conversion is used). The counter uses a 28.224MHz clock
+ * divided by 14 as its clock source. In the case of a stuck byte in
+ * the holding register, an interrupt is generated (and mixed with the
+ * one with the drive) using the CD-ROM interrupt pointer.
+ */
+
+#define P_TIMEOUT_COUNTER_REG 0x4000
+#define P_TC_DISABLE 0x80 /* Set to 0 to enable timeout int. */
+ /* Bits D6-D0 contain timeout count */
+
+
+#define P_TIMEOUT_STATUS_REG_OFFSET 0x4001
+#define P_TS_TIM 0x80 /* check timeout status */
+ /* Bits D6-D4 N/U */
+#define P_TS_ARM_DRQ_INT 0x08 /* Arm DRQ Int. When set high,
+ * the next rising edge will
+ * cause a CD-ROM interrupt.
+ * When set low, the interrupt
+ * will be cleared. There is
+ * no status available for
+ * this interrupt.
+ */
+#define P_TS_ENABLE_TO_ERR_INTERRUPT /* Enable timeout error int. */
+#define P_TS_ENABLE_WAIT /* Enable Wait */
+
+#define P_TS_CT 0x01 /* clear timeout. Note: writing
+ * to this register clears the
+ * timeout error int. or status
+ */
+
+
+/*
+ * The data register reads/writes to/from the 5380 in pseudo-DMA mode
+ */
+
+#define P_DATA_REG_OFFSET 0x5c00 /* rw */
+
+#define P_STATUS_REG_OFFSET 0x5c01 /* ro */
+#define P_ST_RDY 0x80 /* 5380 DDRQ Status */
+
+#define P_IRQ_STATUS 0x5c03
+#define P_IS_IRQ 0x80 /* DIRQ status */
+
+#define PCB_CONFIG 0x803
+#define MASTER_ADDRESS_PTR 0x9a01 /* Fixed position - no relo */
+#define SYS_CONFIG_4 0x8003
+#define WAIT_STATE 0xbc00
+#define OPERATION_MODE_1 0xec03
+#define IO_CONFIG_3 0xf002
+
+
+#ifndef ASM
+int pas16_abort(Scsi_Cmnd *);
+int pas16_biosparam(Disk *, kdev_t, int*);
+int pas16_detect(Scsi_Host_Template *);
+int pas16_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int pas16_reset(Scsi_Cmnd *, unsigned int);
+int pas16_proc_info (char *buffer ,char **start, off_t offset,
+ int length, int hostno, int inout);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+/*
+ * I hadn't thought of this with the earlier drivers - but to prevent
+ * macro definition conflicts, we shouldn't define all of the internal
+ * macros when this is being used solely for the host stub.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define MV_PAS16 {NULL, NULL, NULL, NULL, \
+ "Pro Audio Spectrum-16 SCSI", \
+ pas16_detect, NULL, NULL, \
+ NULL, pas16_queue_command, pas16_abort, pas16_reset, NULL, \
+ pas16_biosparam, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+#ifndef HOSTS_C
+
+#define NCR5380_implementation_fields \
+ volatile unsigned short io_port
+
+#define NCR5380_local_declare() \
+ volatile unsigned short io_port
+
+#define NCR5380_setup(instance) \
+ io_port = (instance)->io_port
+
+#define PAS16_io_port(reg) ( io_port + pas16_offset[(reg)] )
+
+#if !(PDEBUG & PDEBUG_TRANSFER)
+#define NCR5380_read(reg) ( inb(PAS16_io_port(reg)) )
+#define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) )
+#else
+#define NCR5380_read(reg) \
+ (((unsigned char) printk("scsi%d : read register %d at io_port %04x\n"\
+ , instance->hostno, (reg), PAS16_io_port(reg))), inb( PAS16_io_port(reg)) )
+
+#define NCR5380_write(reg, value) \
+ (printk("scsi%d : write %02x to register %d at io_port %04x\n", \
+ instance->hostno, (value), (reg), PAS16_io_port(reg)), \
+ outb( (value),PAS16_io_port(reg) ) )
+
+#endif
+
+
+#define NCR5380_intr pas16_intr
+#define NCR5380_queue_command pas16_queue_command
+#define NCR5380_abort pas16_abort
+#define NCR5380_reset pas16_reset
+#define NCR5380_proc_info pas16_proc_info
+
+/* 15 14 12 10 7 5 3
+ 1101 0100 1010 1000 */
+
+#define PAS16_IRQS 0xd4a8
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* PAS16_H */
diff --git a/linux/src/drivers/scsi/ppa.c b/linux/src/drivers/scsi/ppa.c
new file mode 100644
index 0000000..fd224f9
--- /dev/null
+++ b/linux/src/drivers/scsi/ppa.c
@@ -0,0 +1,1464 @@
+/* ppa.c -- low level driver for the IOMEGA PPA3
+ * parallel port SCSI host adapter.
+ *
+ * (The PPA3 is the embedded controller in the ZIP drive.)
+ *
+ * (c) 1995,1996 Grant R. Guenther, grant@torque.net,
+ * under the terms of the GNU Public License.
+ *
+ * Current Maintainer: David Campbell (Perth, Western Australia)
+ * campbell@gear.torque.net
+ * dcampbel@p01.as17.honeywell.com.au
+ *
+ * My unoffical company acronym list is 21 pages long:
+ * FLA: Four letter acronym with built in facility for
+ * future expansion to five letters.
+ */
+
+#include <linux/config.h>
+
+/* The following #define is to avoid a clash with hosts.c */
+#define PPA_CODE 1
+#ifndef HAVE_PC87332
+#define HAVE_PC87332 0
+#endif
+#define PPA_PROBE_SPP 0x0001
+#define PPA_PROBE_PS2 0x0002
+#define PPA_PROBE_ECR 0x0010
+#define PPA_PROBE_EPP17 0x0100
+#define PPA_PROBE_EPP19 0x0200
+int port_probe(unsigned short);
+
+#include <linux/blk.h>
+#include "sd.h"
+#include "hosts.h"
+typedef struct {
+ int base; /* Actual port address */
+ int mode; /* Transfer mode */
+ int host; /* Host number (for proc) */
+ Scsi_Cmnd *cur_cmd; /* Current queued command */
+ struct tq_struct ppa_tq; /* Polling interupt stuff */
+ unsigned long jstart; /* Jiffies at start */
+ unsigned failed:1; /* Failure flag */
+} ppa_struct;
+
+#define PPA_EMPTY \
+{-1, /* base */ \
+PPA_AUTODETECT, /* mode */ \
+-1, /* host */ \
+NULL, /* cur_cmd */ \
+{0, 0, ppa_interrupt, NULL}, \
+0, /* jstart */ \
+0 /* failed */ \
+}
+
+#include "ppa.h"
+#undef CONFIG_PARPORT
+#define NO_HOSTS 4
+static ppa_struct ppa_hosts[NO_HOSTS] =
+{PPA_EMPTY, PPA_EMPTY, PPA_EMPTY, PPA_EMPTY};
+
+#define PPA_BASE(x) ppa_hosts[(x)].base
+
+int base[NO_HOSTS] =
+{0x03bc, 0x0378, 0x0278, 0x0000};
+#define parbus_base base
+#define parbus_no NO_HOSTS
+
+static inline int ppa_pb_claim(int host_no)
+{
+ if (ppa_hosts[host_no].cur_cmd)
+ ppa_hosts[host_no].cur_cmd->SCp.phase++;
+ return 0;
+}
+
+/***************************************************************************
+ * Parallel port probing routines *
+ ***************************************************************************/
+
+#ifndef MODULE
+/*
+ * Command line parameters (for built-in driver):
+ *
+ * Syntax: ppa=base[,mode[,use_sg]]
+ *
+ * For example: ppa=0x378 or ppa=0x378,0,3
+ *
+ */
+
+void ppa_setup(char *str, int *ints)
+{
+ static int x = 0;
+
+ if (x == 0) { /* Disable ALL known ports */
+ int i;
+
+ for (i = 0; i < NO_HOSTS; i++)
+ parbus_base[i] = 0x0000;
+ }
+ switch (ints[0]) {
+ case 3:
+ ppa_sg = ints[3];
+ case 2:
+ ppa_hosts[x].mode = ints[2];
+ parbus_base[x] = ints[1];
+ break;
+ default:
+ printk("PPA: I only use between 2 to 3 parameters.\n");
+ break;
+ }
+ x++;
+ }
+#else
+Scsi_Host_Template driver_template = PPA;
+#include "scsi_module.c"
+#endif
+
+/*
+ * Start of Chipset kludges
+ */
+
+#if HAVE_PC87332 > 0
+#warning PC87332 Kludge code included
+static inline int pc87332_port(int host_no)
+{
+ /* A routine to detect and kludge pc87332 chipsets into the
+ * "optimum" mode for parallel port data transfer.
+ * This assumes EPP is better than ECP...
+ * (Which it is for disk drives but not printers and scanners)
+ */
+ int base = ppa_hosts[host_no].base;
+
+ /* This is where an pc87332 can hide */
+ unsigned short index_addr[4] =
+ {
+ 0x0398, 0x026e, 0x015c, 0x002e
+ };
+
+ /* Bits 0&1 of FAR (Function Address Register) which specify where
+ * the LPT port will show up at.
+ */
+ unsigned short port_ref[4] =
+ {
+ 0x378, 0x3bc, 0x278, 0xffff
+ };
+
+ unsigned char a;
+ int loop;
+
+ for (loop = 0; loop < 4; loop++) {
+ /* Clear the "wax" out of the pc87332, only needed after hard
+ * reset.
+ */
+ inb(index_addr[loop]);
+ inb(index_addr[loop]);
+ inb(index_addr[loop]);
+ inb(index_addr[loop]);
+
+ /* Anyone home ?? */
+ outb(0xff, index_addr[loop]);
+ a = inb(index_addr[loop]);
+ switch (a) {
+ case (0x0f): /* PC87732 */
+ break;
+ case (0x1f): /* PC87306 */
+ break;
+ case (0x7f): /* PC87??? */
+ break;
+ default:
+ continue;
+ } /* Is this pc87332 on the desired port */
+ outb(0x01, index_addr[loop]);
+ a = inb(index_addr[loop] + 1);
+ if (port_ref[a & 0x03] != base)
+ continue;
+
+ /* Found a pc87332 */
+ printk("NatSemi PC87332 (or variant) at 0x%04x\n", base);
+
+ /* Try to enable EPP modes
+ * with hardware data direction
+ */
+ if (base != 0x3bc) {
+ /* EPP 1.9 */
+ outb(0x04, index_addr[loop]);
+ a = inb(index_addr[loop] + 1);
+ printk("Old reg1 = %02x\n", a);
+ /* 0x01 for EPP 1.7, 0x03 for EPP 1.9, 0x0c for ECP */
+ a = (a & 0xf0) | 0x03;
+ outb(a, index_addr[loop] + 1);
+ outb(a, index_addr[loop] + 1);
+
+ /* Software data direction selection */
+ outb(0x02, index_addr[loop]);
+ a = inb(index_addr[loop] + 1);
+ printk("Old reg2 = %02x\n", a);
+ /* 0x80 for software, 0x00 for hardware */
+ a = (a & 0x7f) | 0x80;
+ outb(a, index_addr[loop] + 1);
+ outb(a, index_addr[loop] + 1);
+ ppa_hosts[host_no].mode = PPA_EPP_32;
+ } else {
+ /* There is not enough address space for the 0x3bc port
+ * to have EPP registers so we will kludge it into an
+ * ECP
+ * port to allow bi-directional byte mode...
+ */
+ /* ECP */
+ outb(0x04, index_addr[loop]);
+ a = inb(index_addr[loop] + 1);
+ a = (a & 0xfb) | 0x06;
+ outb(a, index_addr[loop] + 1);
+ outb(a, index_addr[loop] + 1);
+ ppa_hosts[host_no].mode = PPA_PS2;
+ }
+
+ outb(0x04, index_addr[loop]);
+ a = inb(index_addr[loop] + 1);
+ return ppa_hosts[host_no].mode;
+ }
+ return 0;
+ }
+#else
+#define pc87332_port(x)
+#endif /* HAVE_PC87332 */
+
+static inline int generic_port(int host_no)
+{
+ /* Generic parallel port detection
+ * This will try to discover if the port is
+ * EPP, ECP, PS/2 or NIBBLE (In that order, approx....)
+ */
+ unsigned int save_ctr, save_ecr, r;
+ int ppb = PPA_BASE(host_no);
+
+ save_ctr = r_ctr(ppb);
+ save_ecr = r_ecr(ppb);
+ r = port_probe(ppb);
+ w_ecr(ppb, save_ecr);
+ w_ctr(ppb, save_ctr);
+
+ if (r & PPA_PROBE_SPP)
+ ppa_hosts[host_no].mode = PPA_NIBBLE;
+
+ if (r & PPA_PROBE_PS2) {
+ ppa_hosts[host_no].mode = PPA_PS2;
+ if (r & PPA_PROBE_ECR)
+ w_ecr(ppb, 0x20);
+ }
+ if ((r & PPA_PROBE_EPP17) || (r & PPA_PROBE_EPP19)) {
+ /* ppa_hosts[host_no].mode = PPA_EPP_32; */
+ if (r & PPA_PROBE_ECR)
+ w_ecr(ppb, 0x80);
+ }
+ return ppa_hosts[host_no].mode;
+}
+
+int ppa_detect(Scsi_Host_Template * host)
+{
+ struct Scsi_Host *hreg;
+ int ports;
+ int i, nhosts;
+
+ printk("ppa: Version %s\n", PPA_VERSION);
+ nhosts = 0;
+
+ for (i = 0; i < parbus_no; i++) {
+ if (parbus_base[i] == 0x0000)
+ continue;
+ ppa_hosts[i].base = parbus_base[i];
+
+ /* sanity checks */
+ if (check_region(parbus_base[i],
+ (parbus_base[i] == 0x03bc) ? 3 : 8))
+ continue;
+
+ pc87332_port(i);
+ if (!generic_port(i))
+ continue;
+
+ if (ppa_init(i))
+ continue;
+
+ /* now the glue ... */
+ switch (ppa_hosts[i].mode) {
+ case PPA_NIBBLE:
+ case PPA_PS2:
+ ports = 3;
+ break;
+ case PPA_EPP_8:
+ case PPA_EPP_16:
+ case PPA_EPP_32:
+ ports = 8;
+ break;
+ default: /* Never gets here */
+ continue;
+ }
+ request_region(ppa_hosts[i].base, ports, "ppa");
+ host->can_queue = PPA_CAN_QUEUE;
+ host->sg_tablesize = ppa_sg;
+ hreg = scsi_register(host, 0);
+ hreg->io_port = ppa_hosts[i].base;
+ hreg->n_io_port = ports;
+ hreg->dma_channel = -1;
+ hreg->unique_id = i;
+ ppa_hosts[i].host = hreg->host_no;
+ nhosts++;
+ }
+ if (nhosts == 0)
+ return 0;
+ else
+ return 1; /* return number of hosts detected */
+}
+
+/* This is to give the ppa driver a way to modify the timings (and other
+ * parameters) by writing to the /proc/scsi/ppa/0 file.
+ * Very simple method really... (To simple, no error checking :( )
+ * Reason: Kernel hackers HATE having to unload and reload modules for
+ * testing...
+ * Also gives a method to use a script to obtain optimum timings (TODO)
+ */
+
+static inline int ppa_strncmp(const char *a, const char *b, int len)
+{
+ int loop;
+ for (loop = 0; loop < len; loop++)
+ if (a[loop] != b[loop])
+ return 1;
+
+ return 0;
+}
+static inline int ppa_proc_write(int hostno, char *buffer, int length)
+{
+ unsigned long x;
+
+ if ((length > 5) && (ppa_strncmp(buffer, "mode=", 5) == 0)) {
+ x = simple_strtoul(buffer + 5, NULL, 0);
+ ppa_hosts[hostno].mode = x;
+ return length;
+ }
+ printk("ppa /proc: invalid variable\n");
+ return (-EINVAL);
+}
+
+int ppa_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout)
+{
+ int i;
+ int len = 0;
+
+ for (i = 0; i < 4; i++)
+ if (ppa_hosts[i].host == hostno)
+ break;
+
+ if (inout)
+ return ppa_proc_write(i, buffer, length);
+
+ len += sprintf(buffer + len, "Version : %s\n", PPA_VERSION);
+ len += sprintf(buffer + len, "Port : 0x%04x\n", ppa_hosts[i].base);
+ len += sprintf(buffer + len, "Mode : %s\n", PPA_MODE_STRING[ppa_hosts[i].mode]);
+
+ /* Request for beyond end of buffer */
+ if (offset > len)
+ return 0;
+
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ return len;
+} /* end of ppa.c */
+static int device_check(int host_no);
+
+#if PPA_DEBUG > 0
+#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
+ y, __FUNCTION__, __LINE__); ppa_fail_func(x,y);
+static inline void ppa_fail_func(int host_no, int error_code)
+#else
+static inline void ppa_fail(int host_no, int error_code)
+ #endif
+{
+ /* If we fail a device then we trash status / message bytes */
+ if (ppa_hosts[host_no].cur_cmd) {
+ ppa_hosts[host_no].cur_cmd->result = error_code << 16;
+ ppa_hosts[host_no].failed = 1;
+ }
+}
+
+/*
+ * Wait for the high bit to be set.
+ *
+ * In principle, this could be tied to an interrupt, but the adapter
+ * doesn't appear to be designed to support interrupts. We spin on
+ * the 0x80 ready bit.
+ */
+static unsigned char ppa_wait(int host_no)
+{
+ int k;
+ unsigned short ppb = PPA_BASE(host_no);
+ unsigned char r;
+
+ k = PPA_SPIN_TMO;
+ do {
+ r = r_str(ppb);
+ k--;
+ udelay(1);
+ }
+ while (!(r & 0x80) && (k));
+
+ /*
+ * return some status information.
+ * Semantics: 0xc0 = ZIP wants more data
+ * 0xd0 = ZIP wants to send more data
+ * 0xe0 = ZIP is expecting SCSI command data
+ * 0xf0 = end of transfer, ZIP is sending status
+ */
+ if (k)
+ return (r & 0xf0);
+
+ /* Counter expired - Time out occurred */
+ ppa_fail(host_no, DID_TIME_OUT);
+ printk("ppa timeout in ppa_wait\n");
+ return 0; /* command timed out */
+}
+
+/*
+ * output a string, in whatever mode is available, according to the
+ * PPA protocol.
+ */
+static inline void epp_reset(unsigned short ppb)
+{
+ int i;
+
+ i = r_str(ppb);
+ w_str(ppb, i);
+ w_str(ppb, i & 0xfe);
+}
+
+static inline void ecp_sync(unsigned short ppb)
+{
+ int i;
+
+ if ((r_ecr(ppb) & 0xe0) != 0x80)
+ return;
+
+ for (i = 0; i < 100; i++) {
+ if (r_ecr(ppb) & 0x01)
+ return;
+ udelay(5);
+ }
+ printk("ppa: ECP sync failed as data still present in FIFO.\n");
+}
+
+/*
+ * Here is the asm code for the SPP/PS2 protocols for the i386.
+ * This has been optimised for speed on 386/486 machines. There will
+ * be very little improvement on the current 586+ machines as it is the
+ * IO statements which will limit throughput.
+ */
+#ifdef __i386__
+#define BYTE_OUT(reg) \
+ " movb " #reg ",%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " addl $2,%%edx\n" \
+ " movb $0x0e,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " movb $0x0c,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " subl $2,%%edx\n"
+
+static inline int ppa_byte_out(unsigned short base, char *buffer, unsigned int len)
+{
+ int i;
+
+ for (i = len; i; i--) {
+ w_dtr(base, *buffer++);
+ w_ctr(base, 0xe);
+ w_ctr(base, 0xc);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+#define BYTE_IN(reg) \
+ " inb (%%dx),%%al\n" \
+ " movb %%al," #reg "\n" \
+ " addl $2,%%edx\n" \
+ " movb $0x27,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " movb $0x25,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " subl $2,%%edx\n"
+
+static inline int ppa_byte_in(unsigned short base, char *buffer, int len)
+{
+ int i;
+
+ for (i = len; i; i--) {
+ *buffer++ = r_dtr(base);
+ w_ctr(base, 0x27);
+ w_ctr(base, 0x25);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+#define NIBBLE_IN(reg) \
+ " incl %%edx\n" \
+ " movb $0x04,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " decl %%edx\n" \
+ " inb (%%dx),%%al\n" \
+ " andb $0xf0,%%al\n" \
+ " movb %%al," #reg "\n" \
+ " incl %%edx\n" \
+ " movb $0x06,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " decl %%edx\n" \
+ " inb (%%dx),%%al\n" \
+ " shrb $4,%%al\n" \
+ " orb %%al," #reg "\n"
+
+static inline int ppa_nibble_in(unsigned short base, char *buffer, int len)
+{
+ for (; len; len--) {
+ unsigned char h;
+
+ w_ctr(base, 0x4);
+ h = r_str(base) & 0xf0;
+ w_ctr(base, 0x6);
+ *buffer++ = h | ((r_str(base) & 0xf0) >> 4);
+ }
+ return 1; /* All went well - we hope! */
+}
+#else /* Old style C routines */
+
+static inline int ppa_byte_out(unsigned short base, const char *buffer, int len)
+{
+ unsigned short ctr_p = base + 2;
+ int i;
+
+ for (i = len; i; i--) {
+ outb(*buffer++, base);
+ outb(0xe, ctr_p);
+ outb(0xc, ctr_p);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+static inline int ppa_byte_in(unsigned short base, char *buffer, int len)
+{
+ unsigned short ctr_p = base + 2;
+ int i;
+
+ for (i = len; i; i--) {
+ *buffer++ = inb(base);
+ outb(0x27, ctr_p);
+ outb(0x25, ctr_p);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+static inline int ppa_nibble_in(unsigned short str_p, char *buffer, int len)
+{
+ unsigned short ctr_p = str_p + 1;
+ unsigned char h, l;
+ int i;
+
+ for (i = len; i; i--) {
+ outb(0x4, ctr_p);
+ h = inb(str_p);
+ outb(0x6, ctr_p);
+ l = inb(str_p);
+ *buffer++ = (h & 0xf0) | ((l & 0xf0) >> 4);
+ }
+ return 1; /* All went well - we hope! */
+ }
+ #endif
+
+static inline int ppa_epp_out(unsigned short epp_p, unsigned short str_p, const char *buffer, int len)
+{
+ int i;
+ for (i = len; i; i--) {
+ outb(*buffer++, epp_p);
+#ifdef CONFIG_SCSI_PPA_HAVE_PEDANTIC
+ if (inb(str_p) & 0x01)
+ return 0;
+ #endif
+ }
+ return 1;
+ }
+
+static int ppa_out(int host_no, char *buffer, int len)
+{
+ int r;
+ unsigned short ppb = PPA_BASE(host_no);
+
+ r = ppa_wait(host_no);
+
+ if ((r & 0x50) != 0x40) {
+ ppa_fail(host_no, DID_ERROR);
+ return 0;
+ }
+ switch (ppa_hosts[host_no].mode) {
+ case PPA_NIBBLE:
+ case PPA_PS2:
+ /* 8 bit output, with a loop */
+ r = ppa_byte_out(ppb, buffer, len);
+ break;
+
+ case PPA_EPP_32:
+ case PPA_EPP_16:
+ case PPA_EPP_8:
+ epp_reset(ppb);
+ w_ctr(ppb, 0x4);
+#ifdef CONFIG_SCSI_PPA_HAVE_PEDANTIC
+ r = ppa_epp_out(ppb + 4, ppb + 1, buffer, len);
+#else
+ if (!(((long) buffer | len) & 0x03))
+ outsl(ppb + 4, buffer, len >> 2);
+ else
+ outsb(ppb + 4, buffer, len);
+ w_ctr(ppb, 0xc);
+ r = !(r_str(ppb) & 0x01);
+#endif
+ w_ctr(ppb, 0xc);
+ ecp_sync(ppb);
+ break;
+
+ default:
+ printk("PPA: bug in ppa_out()\n");
+ r = 0;
+ }
+ return r;
+}
+
+static inline int ppa_epp_in(int epp_p, int str_p, char *buffer, int len)
+{
+ int i;
+ for (i = len; i; i--) {
+ *buffer++ = inb(epp_p);
+#ifdef CONFIG_SCSI_PPA_HAVE_PEDANTIC
+ if (inb(str_p) & 0x01)
+ return 0;
+#endif
+ }
+ return 1;
+ }
+
+static int ppa_in(int host_no, char *buffer, int len)
+{
+ int r;
+ unsigned short ppb = PPA_BASE(host_no);
+
+ r = ppa_wait(host_no);
+
+ if ((r & 0x50) != 0x50) {
+ ppa_fail(host_no, DID_ERROR);
+ return 0;
+ }
+ switch (ppa_hosts[host_no].mode) {
+ case PPA_NIBBLE:
+ /* 4 bit input, with a loop */
+ r = ppa_nibble_in(ppb + 1, buffer, len);
+ w_ctr(ppb, 0xc);
+ break;
+
+ case PPA_PS2:
+ /* 8 bit input, with a loop */
+ w_ctr(ppb, 0x25);
+ r = ppa_byte_in(ppb, buffer, len);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+ break;
+
+ case PPA_EPP_32:
+ case PPA_EPP_16:
+ case PPA_EPP_8:
+ epp_reset(ppb);
+ w_ctr(ppb, 0x24);
+#ifdef CONFIG_SCSI_PPA_HAVE_PEDANTIC
+ r = ppa_epp_in(ppb + 4, ppb + 1, buffer, len);
+ #else
+ if (!(((long) buffer | len) & 0x03))
+ insl(ppb + 4, buffer, len >> 2);
+ else
+ insb(ppb + 4, buffer, len);
+ w_ctr(ppb, 0x2c);
+ r = !(r_str(ppb) & 0x01);
+#endif
+ w_ctr(ppb, 0x2c);
+ ecp_sync(ppb);
+ break;
+
+ default:
+ printk("PPA: bug in ppa_ins()\n");
+ r = 0;
+ break;
+ }
+ return r;
+}
+
+/* end of ppa_io.h */
+static inline void ppa_d_pulse(unsigned short ppb, unsigned char b)
+{
+ w_dtr(ppb, b);
+ w_ctr(ppb, 0xc);
+ w_ctr(ppb, 0xe);
+ w_ctr(ppb, 0xc);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+}
+
+static void ppa_disconnect(int host_no)
+{
+ unsigned short ppb = PPA_BASE(host_no);
+
+ ppa_d_pulse(ppb, 0);
+ ppa_d_pulse(ppb, 0x3c);
+ ppa_d_pulse(ppb, 0x20);
+ ppa_d_pulse(ppb, 0xf);
+}
+
+static inline void ppa_c_pulse(unsigned short ppb, unsigned char b)
+{
+ w_dtr(ppb, b);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0x6);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+}
+
+static inline void ppa_connect(int host_no, int flag)
+{
+ unsigned short ppb = PPA_BASE(host_no);
+
+ ppa_c_pulse(ppb, 0);
+ ppa_c_pulse(ppb, 0x3c);
+ ppa_c_pulse(ppb, 0x20);
+ if ((flag == CONNECT_EPP_MAYBE) &&
+ IN_EPP_MODE(ppa_hosts[host_no].mode))
+ ppa_c_pulse(ppb, 0xcf);
+ else
+ ppa_c_pulse(ppb, 0x8f);
+}
+
+static int ppa_select(int host_no, int target)
+{
+ int k;
+ unsigned short ppb = PPA_BASE(host_no);
+
+ /*
+ * Bit 6 (0x40) is the device selected bit.
+ * First we must wait till the current device goes off line...
+ */
+ k = PPA_SELECT_TMO;
+ do {
+ k--;
+ } while ((r_str(ppb) & 0x40) && (k));
+ if (!k)
+ return 0;
+
+ w_dtr(ppb, (1 << target));
+ w_ctr(ppb, 0xe);
+ w_ctr(ppb, 0xc);
+ w_dtr(ppb, 0x80); /* This is NOT the initator */
+ w_ctr(ppb, 0x8);
+
+ k = PPA_SELECT_TMO;
+ do {
+ k--;
+ }
+ while (!(r_str(ppb) & 0x40) && (k));
+ if (!k)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * This is based on a trace of what the Iomega DOS 'guest' driver does.
+ * I've tried several different kinds of parallel ports with guest and
+ * coded this to react in the same ways that it does.
+ *
+ * The return value from this function is just a hint about where the
+ * handshaking failed.
+ *
+ */
+static int ppa_init(int host_no)
+{
+ int retv;
+ unsigned short ppb = PPA_BASE(host_no);
+
+ ppa_disconnect(host_no);
+ ppa_connect(host_no, CONNECT_NORMAL);
+
+ retv = 2; /* Failed */
+
+ w_ctr(ppb, 0xe);
+ if ((r_str(ppb) & 0x08) == 0x08)
+ retv--;
+
+ w_ctr(ppb, 0xc);
+ if ((r_str(ppb) & 0x08) == 0x00)
+ retv--;
+
+ /* This is a SCSI BUS reset signal */
+ if (!retv) {
+ w_dtr(ppb, 0x40);
+ w_ctr(ppb, 0x08);
+ udelay(30);
+ w_ctr(ppb, 0x0c);
+ udelay(1000); /* Allow devices to settle down */
+ }
+ ppa_disconnect(host_no);
+ udelay(1000); /* Another delay to allow devices to settle */
+
+ if (!retv)
+ retv = device_check(host_no);
+
+ return retv;
+}
+
+static inline int ppa_send_command(Scsi_Cmnd * cmd)
+{
+ int host_no = cmd->host->unique_id;
+ int k;
+
+ w_ctr(PPA_BASE(host_no), 0x0c);
+
+ for (k = 0; k < cmd->cmd_len; k++)
+ if (!ppa_out(host_no, &cmd->cmnd[k], 1))
+ return 0;
+ return 1;
+}
+
+/*
+ * The bulk flag enables some optimisations in the data transfer loops,
+ * it should be true for any command that transfers data in integral
+ * numbers of sectors.
+ *
+ * The driver appears to remain stable if we speed up the parallel port
+ * i/o in this function, but not elsewhere.
+ */
+static int ppa_completion(Scsi_Cmnd * cmd)
+{
+ /* Return codes:
+ * -1 Error
+ * 0 Told to schedule
+ * 1 Finished data transfer
+ */
+ int host_no = cmd->host->unique_id;
+ unsigned short ppb = PPA_BASE(host_no);
+ unsigned long start_jiffies = jiffies;
+
+ unsigned char r, v;
+ int fast, bulk, status;
+
+ v = cmd->cmnd[0];
+ bulk = ((v == READ_6) ||
+ (v == READ_10) ||
+ (v == WRITE_6) ||
+ (v == WRITE_10));
+
+ /*
+ * We only get here if the drive is ready to comunicate,
+ * hence no need for a full ppa_wait.
+ */
+ r = (r_str(ppb) & 0xf0);
+
+ while (r != (unsigned char) 0xf0) {
+ /*
+ * If we have been running for more than a full timer tick
+ * then take a rest.
+ */
+ if (jiffies > start_jiffies + 1)
+ return 0;
+
+ if (((r & 0xc0) != 0xc0) || (cmd->SCp.this_residual <= 0)) {
+ ppa_fail(host_no, DID_ERROR);
+ return -1; /* ERROR_RETURN */
+ }
+ /* determine if we should use burst I/O */ fast = (bulk && (cmd->SCp.this_residual >= PPA_BURST_SIZE))
+ ? PPA_BURST_SIZE : 1;
+
+ if (r == (unsigned char) 0xc0)
+ status = ppa_out(host_no, cmd->SCp.ptr, fast);
+ else
+ status = ppa_in(host_no, cmd->SCp.ptr, fast);
+
+ cmd->SCp.ptr += fast;
+ cmd->SCp.this_residual -= fast;
+
+ if (!status) {
+ ppa_fail(host_no, DID_BUS_BUSY);
+ return -1; /* ERROR_RETURN */
+ }
+ if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
+ /* if scatter/gather, advance to the next segment */
+ if (cmd->SCp.buffers_residual--) {
+ cmd->SCp.buffer++;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+ }
+ }
+ /* Now check to see if the drive is ready to comunicate */
+ r = (r_str(ppb) & 0xf0);
+ /* If not, drop back down to the scheduler and wait a timer tick */
+ if (!(r & 0x80))
+ return 0;
+ }
+ return 1; /* FINISH_RETURN */
+}
+
+/*
+ * Since the PPA itself doesn't generate interrupts, we use
+ * the scheduler's task queue to generate a stream of call-backs and
+ * complete the request when the drive is ready.
+ */
+static void ppa_interrupt(void *data)
+{
+ ppa_struct *tmp = (ppa_struct *) data;
+ Scsi_Cmnd *cmd = tmp->cur_cmd;
+
+ if (!cmd) {
+ printk("PPA: bug in ppa_interrupt\n");
+ return;
+ }
+ if (ppa_engine(tmp, cmd)) {
+ tmp->ppa_tq.data = (void *) tmp;
+ tmp->ppa_tq.sync = 0;
+ queue_task(&tmp->ppa_tq, &tq_timer);
+ return;
+ }
+ /* Command must of completed hence it is safe to let go... */
+#if PPA_DEBUG > 0
+ switch ((cmd->result >> 16) & 0xff) {
+ case DID_OK:
+ break;
+ case DID_NO_CONNECT:
+ printk("ppa: no device at SCSI ID %i\n", cmd->target);
+ break;
+ case DID_BUS_BUSY:
+ printk("ppa: BUS BUSY - EPP timeout detected\n");
+ break;
+ case DID_TIME_OUT:
+ printk("ppa: unknown timeout\n");
+ break;
+ case DID_ABORT:
+ printk("ppa: told to abort\n");
+ break;
+ case DID_PARITY:
+ printk("ppa: parity error (???)\n");
+ break;
+ case DID_ERROR:
+ printk("ppa: internal driver error\n");
+ break;
+ case DID_RESET:
+ printk("ppa: told to reset device\n");
+ break;
+ case DID_BAD_INTR:
+ printk("ppa: bad interrupt (???)\n");
+ break;
+ default:
+ printk("ppa: bad return code (%02x)\n", (cmd->result >> 16) & 0xff);
+ }
+ #endif
+
+ if (cmd->SCp.phase > 1)
+ ppa_disconnect(cmd->host->unique_id);
+
+ tmp->cur_cmd = 0;
+ cmd->scsi_done(cmd);
+ return;
+}
+
+static int ppa_engine(ppa_struct * tmp, Scsi_Cmnd * cmd)
+{
+ int host_no = cmd->host->unique_id;
+ unsigned short ppb = PPA_BASE(host_no);
+ unsigned char l = 0, h = 0;
+ int retv;
+
+ /* First check for any errors that may have occurred
+ * Here we check for internal errors
+ */
+ if (tmp->failed)
+ return 0;
+
+ switch (cmd->SCp.phase) {
+ case 0: /* Phase 0 - Waiting for parport */
+ if ((jiffies - tmp->jstart) > HZ) {
+ /*
+ * We waited more than a second
+ * for parport to call us
+ */
+ ppa_fail(host_no, DID_BUS_BUSY);
+ return 0;
+ }
+ return 1; /* wait until ppa_wakeup claims parport */
+ case 1: /* Phase 1 - Connected */
+ { /* Perform a sanity check for cable unplugged */
+ int retv = 2; /* Failed */
+
+ ppa_connect(host_no, CONNECT_EPP_MAYBE);
+
+ w_ctr(ppb, 0xe);
+ if ((r_str(ppb) & 0x08) == 0x08)
+ retv--;
+
+ w_ctr(ppb, 0xc);
+ if ((r_str(ppb) & 0x08) == 0x00)
+ retv--;
+
+ if (retv)
+ if ((jiffies - tmp->jstart) > (1 * HZ)) {
+ printk("ppa: Parallel port cable is unplugged!!\n");
+ ppa_fail(host_no, DID_BUS_BUSY);
+ return 0;
+ } else {
+ ppa_disconnect(host_no);
+ return 1; /* Try again in a jiffy */
+ }
+ cmd->SCp.phase++;
+ }
+
+ case 2: /* Phase 2 - We are now talking to the scsi bus */
+ if (!ppa_select(host_no, cmd->target)) {
+ ppa_fail(host_no, DID_NO_CONNECT);
+ return 0;
+ }
+ cmd->SCp.phase++;
+
+ case 3: /* Phase 3 - Ready to accept a command */
+ w_ctr(ppb, 0x0c);
+ if (!(r_str(ppb) & 0x80))
+ return 1;
+
+ if (!ppa_send_command(cmd))
+ return 0;
+ cmd->SCp.phase++;
+
+ case 4: /* Phase 4 - Setup scatter/gather buffers */
+ if (cmd->use_sg) {
+ /* if many buffers are available, start filling the first */
+ cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+ } else {
+ /* else fill the only available buffer */
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.ptr = cmd->request_buffer;
+ }
+ cmd->SCp.buffers_residual = cmd->use_sg;
+ cmd->SCp.phase++;
+
+ case 5: /* Phase 5 - Data transfer stage */
+ w_ctr(ppb, 0x0c);
+ if (!(r_str(ppb) & 0x80))
+ return 1;
+
+ retv = ppa_completion(cmd);
+ if (retv == -1)
+ return 0;
+ if (retv == 0)
+ return 1;
+ cmd->SCp.phase++;
+
+ case 6: /* Phase 6 - Read status/message */
+ cmd->result = DID_OK << 16;
+ /* Check for data overrun */
+ if (ppa_wait(host_no) != (unsigned char) 0xf0) {
+ ppa_fail(host_no, DID_ERROR);
+ return 0;
+ }
+ if (ppa_in(host_no, &l, 1)) { /* read status byte */
+ /* Check for optional message byte */
+ if (ppa_wait(host_no) == (unsigned char) 0xf0)
+ ppa_in(host_no, &h, 1);
+ cmd->result = (DID_OK << 16) + (h << 8) + (l & STATUS_MASK);
+ }
+ return 0; /* Finished */
+ break;
+
+ default:
+ printk("ppa: Invalid scsi phase\n");
+ }
+ return 0;
+}
+
+int ppa_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ int host_no = cmd->host->unique_id;
+
+ if (ppa_hosts[host_no].cur_cmd) {
+ printk("PPA: bug in ppa_queuecommand\n");
+ return 0;
+ }
+ ppa_hosts[host_no].failed = 0;
+ ppa_hosts[host_no].jstart = jiffies;
+ ppa_hosts[host_no].cur_cmd = cmd;
+ cmd->scsi_done = done;
+ cmd->result = DID_ERROR << 16; /* default return code */
+ cmd->SCp.phase = 0; /* bus free */
+
+ ppa_pb_claim(host_no);
+
+ ppa_hosts[host_no].ppa_tq.data = ppa_hosts + host_no;
+ ppa_hosts[host_no].ppa_tq.sync = 0;
+ queue_task(&ppa_hosts[host_no].ppa_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+
+ return 0;
+}
+
+/*
+ * Apparently the disk->capacity attribute is off by 1 sector
+ * for all disk drives. We add the one here, but it should really
+ * be done in sd.c. Even if it gets fixed there, this will still
+ * work.
+ */
+int ppa_biosparam(Disk * disk, kdev_t dev, int ip[])
+{
+ ip[0] = 0x40;
+ ip[1] = 0x20;
+ ip[2] = (disk->capacity + 1) / (ip[0] * ip[1]);
+ if (ip[2] > 1024) {
+ ip[0] = 0xff;
+ ip[1] = 0x3f;
+ ip[2] = (disk->capacity + 1) / (ip[0] * ip[1]);
+ if (ip[2] > 1023)
+ ip[2] = 1023;
+ }
+ return 0;
+}
+
+int ppa_abort(Scsi_Cmnd * cmd)
+{
+ /*
+ * There is no method for aborting commands since Iomega
+ * have tied the SCSI_MESSAGE line high in the interface
+ */
+
+ switch (cmd->SCp.phase) {
+ case 0: /* Do not have access to parport */
+ case 1: /* Have not connected to interface */
+ cmd->result = DID_ABORT;
+ cmd->done(cmd);
+ return SCSI_ABORT_SUCCESS;
+ break;
+ default: /* SCSI command sent, can not abort */
+ return SCSI_ABORT_BUSY;
+ break;
+ }
+}
+
+int ppa_reset(Scsi_Cmnd * cmd, unsigned int x)
+{
+ int host_no = cmd->host->unique_id;
+ int ppb = PPA_BASE(host_no);
+
+ /*
+ * PHASE1:
+ * Bring the interface crashing down on whatever is running
+ * hopefully this will kill the request.
+ * Bring back up the interface, reset the drive (and anything
+ * attached for that manner)
+ */
+ if (cmd)
+ if (cmd->SCp.phase)
+ ppa_disconnect(cmd->host->unique_id);
+
+ ppa_connect(host_no, CONNECT_NORMAL);
+ w_dtr(ppb, 0x40);
+ w_ctr(ppb, 0x8);
+ udelay(30);
+ w_ctr(ppb, 0xc);
+ udelay(1000); /* delay for devices to settle down */
+ ppa_disconnect(host_no);
+ udelay(1000); /* Additional delay to allow devices to settle down */
+
+ /*
+ * PHASE2:
+ * Sanity check for the sake of mid-level driver
+ */
+ if (!cmd) {
+ printk("ppa bus reset called for invalid command.\n");
+ return SCSI_RESET_NOT_RUNNING;
+ }
+ /*
+ * PHASE3:
+ * Flag the current command as having died due to reset
+ */
+ ppa_connect(host_no, CONNECT_NORMAL);
+ ppa_fail(host_no, DID_RESET);
+
+ /* Since the command was already on the timer queue ppa_interrupt
+ * will be called shortly.
+ */
+ return SCSI_RESET_PENDING;
+}
+
+static int device_check(int host_no)
+{
+ /* This routine looks for a device and then attempts to use EPP
+ to send a command. If all goes as planned then EPP is available. */
+
+ static char cmd[6] =
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ int loop, old_mode, status, k, ppb = PPA_BASE(host_no);
+ unsigned char l;
+
+ old_mode = ppa_hosts[host_no].mode;
+ for (loop = 0; loop < 8; loop++) {
+ /* Attempt to use EPP for Test Unit Ready */
+ if ((ppb & 0x0007) == 0x0000)
+ ppa_hosts[host_no].mode = PPA_EPP_32;
+
+ second_pass:
+ ppa_connect(host_no, CONNECT_EPP_MAYBE);
+ /* Select SCSI device */
+ if (!ppa_select(host_no, loop)) {
+ ppa_disconnect(host_no);
+ continue;
+ }
+ printk("ppa: Found device at ID %i, Attempting to use %s\n", loop,
+ PPA_MODE_STRING[ppa_hosts[host_no].mode]);
+
+ /* Send SCSI command */
+ status = 1;
+ w_ctr(ppb, 0x0c);
+ for (l = 0; (l < 6) && (status); l++)
+ status = ppa_out(host_no, cmd, 1);
+
+ if (!status) {
+ ppa_disconnect(host_no);
+ ppa_connect(host_no, CONNECT_EPP_MAYBE);
+ w_dtr(ppb, 0x40);
+ w_ctr(ppb, 0x08);
+ udelay(30);
+ w_ctr(ppb, 0x0c);
+ udelay(1000);
+ ppa_disconnect(host_no);
+ udelay(1000);
+ if (ppa_hosts[host_no].mode == PPA_EPP_32) {
+ ppa_hosts[host_no].mode = old_mode;
+ goto second_pass;
+ }
+ printk("ppa: Unable to establish communication, aborting driver load.\n");
+ return 1;
+ }
+ w_ctr(ppb, 0x0c);
+ k = 1000000; /* 1 Second */
+ do {
+ l = r_str(ppb);
+ k--;
+ udelay(1);
+ } while (!(l & 0x80) && (k));
+
+ l &= 0xf0;
+
+ if (l != 0xf0) {
+ ppa_disconnect(host_no);
+ ppa_connect(host_no, CONNECT_EPP_MAYBE);
+ w_dtr(ppb, 0x40);
+ w_ctr(ppb, 0x08);
+ udelay(30);
+ w_ctr(ppb, 0x0c);
+ udelay(1000);
+ ppa_disconnect(host_no);
+ udelay(1000);
+ if (ppa_hosts[host_no].mode == PPA_EPP_32) {
+ ppa_hosts[host_no].mode = old_mode;
+ goto second_pass;
+ }
+ printk("ppa: Unable to establish communication, aborting driver load.\n");
+ return 1;
+ }
+ ppa_disconnect(host_no);
+ printk("ppa: Communication established with ID %i using %s\n", loop,
+ PPA_MODE_STRING[ppa_hosts[host_no].mode]);
+ return 0;
+ }
+ printk("ppa: No devices found, aborting driver load.\n");
+ return 1;
+}
+
+#define PPA_ID "ppa: "
+
+int port_probe(unsigned short port)
+{
+ int retv = 0;
+ unsigned char a, b, c;
+ unsigned int i, j;
+
+
+ printk(PPA_ID "Probing port %04x\n", port);
+
+/* ##### ###### ######
+ * # # # # # #
+ * # # # # #
+ * ##### ###### ######
+ * # # #
+ * # # # #
+ * ##### # #
+ */
+
+ outb(0x0c, port + 0x402);
+ outb(0x0c, port + 0x002);
+ outb(0x55, port);
+ a = inb(port);
+ if (a != 0x55)
+ return retv;
+ printk(PPA_ID " SPP port present\n");
+
+ retv += PPA_PROBE_SPP;
+
+/* ####### ##### ######
+ * # # # # #
+ * # # # #
+ * ##### # ######
+ * # # #
+ * # # # #
+ * ####### ##### #
+ */
+
+ for (i = 1024; i > 0; i--) { /* clear at most 1k of data from FIFO */
+ a = inb(port + 0x402);
+ if ((a & 0x03) == 0x03)
+ goto no_ecp;
+ if (a & 0x01)
+ break;
+ inb(port + 0x400); /* Remove byte from FIFO */
+ }
+
+ if (i <= 0)
+ goto no_ecp;
+
+ b = a ^ 3;
+ outb(b, port + 0x402);
+ c = inb(port + 0x402);
+
+ if (a == c) {
+ outb(0xc0, port + 0x402); /* FIFO test */
+ j = 0;
+ while (!(inb(port + 0x402) & 0x01) && (j < 1024)) {
+ inb(port + 0x400);
+ j++;
+ }
+ if (j >= 1024)
+ goto no_ecp;
+ i = 0;
+ j = 0;
+ while (!(inb(port + 0x402) & 0x02) && (j < 1024)) {
+ outb(0x00, port + 0x400);
+ i++;
+ j++;
+ }
+ if (j >= 1024)
+ goto no_ecp;
+ j = 0;
+ while (!(inb(port + 0x402) & 0x01) && (j < 1024)) {
+ inb(port + 0x400);
+ j++;
+ }
+ if (j >= 1024)
+ goto no_ecp;
+ printk(PPA_ID " ECP with a %i byte FIFO present\n", i);
+
+ retv += PPA_PROBE_ECR;
+ }
+/* ###### ##### #####
+ * # # # # # #
+ * # # # #
+ * ###### ##### #####
+ * # # #
+ * # # # #
+ * # ##### #######
+ */
+
+ no_ecp:
+ if (retv & PPA_PROBE_ECR)
+ outb(0x20, port + 0x402);
+
+ outb(0x55, port);
+ outb(0x0c, port + 2);
+ a = inb(port);
+ outb(0x55, port);
+ outb(0x2c, port + 2);
+ b = inb(port);
+ if (a != b) {
+ printk(PPA_ID " PS/2 bidirectional port present\n");
+ retv += PPA_PROBE_PS2;
+ }
+/* ####### ###### ######
+ * # # # # #
+ * # # # # #
+ * ##### ###### ######
+ * # # #
+ * # # #
+ * ####### # #
+ */
+
+ if (port & 0x007) {
+ printk(PPA_ID " EPP not supported at this address\n");
+ return retv;
+ }
+ if (retv & PPA_PROBE_ECR) {
+ for (i = 0x00; i < 0x80; i += 0x20) {
+ outb(i, port + 0x402);
+
+ a = inb(port + 1);
+ outb(a, port + 1);
+ outb(a & 0xfe, port + 1);
+ a = inb(port + 1);
+ if (!(a & 0x01)) {
+ printk(PPA_ID " Failed Intel bug check. (Phony EPP in ECP)\n");
+ return retv;
+ }
+ }
+ printk(PPA_ID " Passed Intel bug check.\n");
+ outb(0x80, port + 0x402);
+ }
+ a = inb(port + 1);
+ outb(a, port + 1);
+ outb(a & 0xfe, port + 1);
+ a = inb(port + 1);
+
+ if (a & 0x01) {
+ outb(0x0c, port + 0x402);
+ outb(0x0c, port + 0x002);
+ return retv;
+ }
+
+ outb(0x04, port + 2);
+ inb(port + 4);
+ a = inb(port + 1);
+ outb(a, port + 1);
+ outb(a & 0xfe, port + 1);
+
+ if (a & 0x01) {
+ printk(PPA_ID " EPP 1.9 with hardware direction protocol\n");
+ retv += PPA_PROBE_EPP19;
+ } else {
+ /* The EPP timeout bit was not set, this could either be:
+ * EPP 1.7
+ * EPP 1.9 with software direction
+ */
+ outb(0x24, port + 2);
+ inb(port + 4);
+ a = inb(port + 1);
+ outb(a, port + 1);
+ outb(a & 0xfe, port + 1);
+ if (a & 0x01) {
+ printk(PPA_ID " EPP 1.9 with software direction protocol\n");
+ retv += PPA_PROBE_EPP19;
+ } else {
+ printk(PPA_ID " EPP 1.7\n");
+ retv += PPA_PROBE_EPP17;
+ }
+ }
+
+ outb(0x0c, port + 0x402);
+ outb(0x0c, port + 0x002);
+ return retv;
+}
diff --git a/linux/src/drivers/scsi/ppa.h b/linux/src/drivers/scsi/ppa.h
new file mode 100644
index 0000000..1497c20
--- /dev/null
+++ b/linux/src/drivers/scsi/ppa.h
@@ -0,0 +1,176 @@
+/* Driver for the PPA3 parallel port SCSI HBA embedded in
+ * the Iomega ZIP drive
+ *
+ * (c) 1996 Grant R. Guenther grant@torque.net
+ * David Campbell campbell@torque.net
+ *
+ * All comments to David.
+ */
+
+#include <linux/config.h> /* CONFIG_SCSI_PPA_HAVE_PEDANTIC */
+#ifndef _PPA_H
+#define _PPA_H
+
+#define PPA_VERSION "1.42"
+
+#if 0
+/* Use the following to enable certain chipset support
+ * Default is PEDANTIC = 3
+ */
+#ifndef CONFIG_SCSI_PPA_HAVE_PEDANTIC
+#define CONFIG_SCSI_PPA_HAVE_PEDANTIC 3
+#endif
+#endif
+
+/*
+ * this driver has been hacked by Matteo Frigo (athena@theory.lcs.mit.edu)
+ * to support EPP and scatter-gather. [0.26-athena]
+ *
+ * additional hacks by David Campbell
+ * in response to this driver "mis-behaving" on his machine.
+ * Fixed EPP to handle "software" changing of EPP port data direction.
+ * Chased down EPP timeouts
+ * Made this driver "kernel version friendly" [0.28-athena]
+ *
+ * [ Stuff removed ]
+ *
+ * Compiled against 2.1.53.
+ * Rebuilt ppa_abort() function, should handle unplugged cable.
+ * [1.35s]
+ *
+ * PPA now auto probes for EPP on base address which are aligned on
+ * 8 byte boundaries (0x278 & 0x378) using the attached devices.
+ * This hopefully avoids the nasty problem of trying to detect EPP.
+ * Tested on 2.1.53 [1.36]
+ *
+ * The id_probe utility no longer performs read/write tests.
+ * Additional code included for checking the Intel ECP bug
+ * (Bit 0 of STR stuck low which fools the EPP detection routine)
+ * [1.37]
+ *
+ * Oops! Got the bit sign mixed up for the Intel bug check.
+ * Found that an additional delay is required during SCSI resets
+ * to allow devices to settle down.
+ * [1.38]
+ *
+ * Fixed all problems in the parport sharing scheme. Now ppa can be safe
+ * used with lp or other parport devices on the same parallel port.
+ * 1997 by Andrea Arcangeli
+ * [1.39]
+ *
+ * Little fix in ppa engine to ensure that ppa don' t release parport
+ * or disconnect in wrong cases.
+ * 1997 by Andrea Arcangeli
+ * [1.40]
+ *
+ * Corrected ppa.h for 2.1.x kernels (>=2.1.85)
+ * Modified "Nat Semi Kludge" for extended chipsets
+ * [1.41]
+ *
+ * Fixed id_probe for EPP 1.9 chipsets (misdetected as EPP 1.7)
+ * [1.42]
+ */
+/* ------ END OF USER CONFIGURABLE PARAMETERS ----- */
+
+#ifdef PPA_CODE
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/tqueue.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include "sd.h"
+#include "hosts.h"
+/* batteries not included :-) */
+
+/*
+ * modes in which the driver can operate
+ */
+#define PPA_AUTODETECT 0 /* Autodetect mode */
+#define PPA_NIBBLE 1 /* work in standard 4 bit mode */
+#define PPA_PS2 2 /* PS/2 byte mode */
+#define PPA_EPP_8 3 /* EPP mode, 8 bit */
+#define PPA_EPP_16 4 /* EPP mode, 16 bit */
+#define PPA_EPP_32 5 /* EPP mode, 32 bit */
+#define PPA_UNKNOWN 6 /* Just in case... */
+
+static char *PPA_MODE_STRING[] =
+{
+ "Autodetect",
+ "SPP",
+ "PS/2",
+ "EPP 8 bit",
+ "EPP 16 bit",
+ "EPP 32 bit",
+ "Unknown"};
+
+/* This is a global option */
+int ppa_sg = SG_ALL; /* enable/disable scatter-gather. */
+
+/* other options */
+#define PPA_CAN_QUEUE 1 /* use "queueing" interface */
+#define PPA_BURST_SIZE 512 /* data burst size */
+#define PPA_SELECT_TMO 5000 /* how long to wait for target ? */
+#define PPA_SPIN_TMO 50000 /* ppa_wait loop limiter */
+#define PPA_DEBUG 0 /* debuging option */
+#define IN_EPP_MODE(x) (x == PPA_EPP_8 || x == PPA_EPP_16 || x == PPA_EPP_32)
+
+/* args to ppa_connect */
+#define CONNECT_EPP_MAYBE 1
+#define CONNECT_NORMAL 0
+
+#define r_dtr(x) (unsigned char)inb((x))
+#define r_str(x) (unsigned char)inb((x)+1)
+#define r_ctr(x) (unsigned char)inb((x)+2)
+#define r_epp(x) (unsigned char)inb((x)+4)
+#define r_fifo(x) (unsigned char)inb((x)+0x400)
+#define r_ecr(x) (unsigned char)inb((x)+0x402)
+
+#define w_dtr(x,y) outb(y, (x))
+#define w_str(x,y) outb(y, (x)+1)
+#define w_ctr(x,y) outb(y, (x)+2)
+#define w_epp(x,y) outb(y, (x)+4)
+#define w_fifo(x,y) outb(y, (x)+0x400)
+#define w_ecr(x,y) outb(y, (x)+0x402)
+
+static int ppa_engine(ppa_struct *, Scsi_Cmnd *);
+static int ppa_in(int, char *, int);
+static int ppa_init(int);
+static void ppa_interrupt(void *);
+static int ppa_out(int, char *, int);
+
+struct proc_dir_entry proc_scsi_ppa =
+{PROC_SCSI_PPA, 3, "ppa", S_IFDIR | S_IRUGO | S_IXUGO, 2};
+#else
+extern struct proc_dir_entry proc_scsi_ppa;
+#endif
+
+int ppa_detect(Scsi_Host_Template *);
+const char *ppa_info(struct Scsi_Host *);
+int ppa_queuecommand(Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
+int ppa_abort(Scsi_Cmnd *);
+int ppa_reset(Scsi_Cmnd *, unsigned int);
+int ppa_proc_info(char *, char **, off_t, int, int, int);
+int ppa_biosparam(Disk *, kdev_t, int *);
+
+#define PPA { proc_dir: &proc_scsi_ppa, \
+ proc_info: ppa_proc_info, \
+ name: "Iomega parport ZIP drive", \
+ detect: ppa_detect, \
+ queuecommand: ppa_queuecommand, \
+ abort: ppa_abort, \
+ reset: ppa_reset, \
+ bios_param: ppa_biosparam, \
+ this_id: -1, \
+ sg_tablesize: SG_ALL, \
+ cmd_per_lun: 1, \
+ use_clustering: ENABLE_CLUSTERING \
+}
+#endif /* _PPA_H */
diff --git a/linux/src/drivers/scsi/qlogicfas.c b/linux/src/drivers/scsi/qlogicfas.c
new file mode 100644
index 0000000..b5cb9dd
--- /dev/null
+++ b/linux/src/drivers/scsi/qlogicfas.c
@@ -0,0 +1,679 @@
+/*----------------------------------------------------------------*/
+/*
+ Qlogic linux driver - work in progress. No Warranty express or implied.
+ Use at your own risk. Support Tort Reform so you won't have to read all
+ these silly disclaimers.
+
+ Copyright 1994, Tom Zerucha.
+ zerucha@shell.portal.com
+
+ Additional Code, and much appreciated help by
+ Michael A. Griffith
+ grif@cs.ucr.edu
+
+ Thanks to Eric Youngdale and Dave Hinds for loadable module and PCMCIA
+ help respectively, and for suffering through my foolishness during the
+ debugging process.
+
+ Reference Qlogic FAS408 Technical Manual, 53408-510-00A, May 10, 1994
+ (you can reference it, but it is incomplete and inaccurate in places)
+
+ Version 0.45 6/9/96 - kernel 1.2.0+
+
+ Functions as standalone, loadable, and PCMCIA driver, the latter from
+ Dave Hind's PCMCIA package.
+
+ Redistributable under terms of the GNU Public License
+
+*/
+/*----------------------------------------------------------------*/
+/* Configuration */
+
+/* Set the following to 2 to use normal interrupt (active high/totempole-
+ tristate), otherwise use 0 (REQUIRED FOR PCMCIA) for active low, open
+ drain */
+#define QL_INT_ACTIVE_HIGH 2
+
+/* Set the following to 1 to enable the use of interrupts. Note that 0 tends
+ to be more stable, but slower (or ties up the system more) */
+#define QL_USE_IRQ 1
+
+/* Set the following to max out the speed of the PIO PseudoDMA transfers,
+ again, 0 tends to be slower, but more stable. */
+#define QL_TURBO_PDMA 1
+
+/* This should be 1 to enable parity detection */
+#define QL_ENABLE_PARITY 1
+
+/* This will reset all devices when the driver is initialized (during bootup).
+ The other linux drivers don't do this, but the DOS drivers do, and after
+ using DOS or some kind of crash or lockup this will bring things back
+ without requiring a cold boot. It does take some time to recover from a
+ reset, so it is slower, and I have seen timeouts so that devices weren't
+ recognized when this was set. */
+#define QL_RESET_AT_START 0
+
+/* crystal frequency in megahertz (for offset 5 and 9)
+ Please set this for your card. Most Qlogic cards are 40 Mhz. The
+ Control Concepts ISA (not VLB) is 24 Mhz */
+#define XTALFREQ 40
+
+/**********/
+/* DANGER! modify these at your own risk */
+/* SLOWCABLE can usually be reset to zero if you have a clean setup and
+ proper termination. The rest are for synchronous transfers and other
+ advanced features if your device can transfer faster than 5Mb/sec.
+ If you are really curious, email me for a quick howto until I have
+ something official */
+/**********/
+
+/*****/
+/* config register 1 (offset 8) options */
+/* This needs to be set to 1 if your cabling is long or noisy */
+#define SLOWCABLE 1
+
+/*****/
+/* offset 0xc */
+/* This will set fast (10Mhz) synchronous timing when set to 1
+ For this to have an effect, FASTCLK must also be 1 */
+#define FASTSCSI 0
+
+/* This when set to 1 will set a faster sync transfer rate */
+#define FASTCLK 0
+/*(XTALFREQ>25?1:0)*/
+
+/*****/
+/* offset 6 */
+/* This is the sync transfer divisor, XTALFREQ/X will be the maximum
+ achievable data rate (assuming the rest of the system is capable
+ and set properly) */
+#define SYNCXFRPD 5
+/*(XTALFREQ/5)*/
+
+/*****/
+/* offset 7 */
+/* This is the count of how many synchronous transfers can take place
+ i.e. how many reqs can occur before an ack is given.
+ The maximum value for this is 15, the upper bits can modify
+ REQ/ACK assertion and deassertion during synchronous transfers
+ If this is 0, the bus will only transfer asynchronously */
+#define SYNCOFFST 0
+/* for the curious, bits 7&6 control the deassertion delay in 1/2 cycles
+ of the 40Mhz clock. If FASTCLK is 1, specifying 01 (1/2) will
+ cause the deassertion to be early by 1/2 clock. Bits 5&4 control
+ the assertion delay, also in 1/2 clocks (FASTCLK is ignored here). */
+
+/*----------------------------------------------------------------*/
+#ifdef PCMCIA
+#undef QL_INT_ACTIVE_HIGH
+#define QL_INT_ACTIVE_HIGH 0
+#define MODULE
+#endif
+
+#include <linux/module.h>
+
+#ifdef PCMCIA
+#undef MODULE
+#endif
+
+#include <linux/blk.h> /* to get disk capacity */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/unistd.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include "sd.h"
+#include "hosts.h"
+#include "qlogicfas.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_qlogicfas = {
+ PROC_SCSI_QLOGICFAS, 6, "qlogicfas",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/*----------------------------------------------------------------*/
+/* driver state info, local to driver */
+static int qbase = 0; /* Port */
+static int qinitid; /* initiator ID */
+static int qabort; /* Flag to cause an abort */
+static int qlirq = -1; /* IRQ being used */
+static char qinfo[80]; /* description */
+static Scsi_Cmnd *qlcmd; /* current command being processed */
+
+static int qlcfg5 = ( XTALFREQ << 5 ); /* 15625/512 */
+static int qlcfg6 = SYNCXFRPD;
+static int qlcfg7 = SYNCOFFST;
+static int qlcfg8 = ( SLOWCABLE << 7 ) | ( QL_ENABLE_PARITY << 4 );
+static int qlcfg9 = ( ( XTALFREQ + 4 ) / 5 );
+static int qlcfgc = ( FASTCLK << 3 ) | ( FASTSCSI << 4 );
+
+/*----------------------------------------------------------------*/
+/* The qlogic card uses two register maps - These macros select which one */
+#define REG0 ( outb( inb( qbase + 0xd ) & 0x7f , qbase + 0xd ), outb( 4 , qbase + 0xd ))
+#define REG1 ( outb( inb( qbase + 0xd ) | 0x80 , qbase + 0xd ), outb( 0xb4 | QL_INT_ACTIVE_HIGH , qbase + 0xd ))
+
+/* following is watchdog timeout in microseconds */
+#define WATCHDOG 5000000
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at and as a simple profiler) */
+
+#if 0
+#define rtrc(i) {inb(0x3da);outb(0x31,0x3c0);outb((i),0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+
+/*----------------------------------------------------------------*/
+/* local functions */
+/*----------------------------------------------------------------*/
+static void ql_zap(void);
+/* error recovery - reset everything */
+void ql_zap()
+{
+int x;
+unsigned long flags;
+ save_flags( flags );
+ cli();
+ x = inb(qbase + 0xd);
+ REG0;
+ outb(3, qbase + 3); /* reset SCSI */
+ outb(2, qbase + 3); /* reset chip */
+ if (x & 0x80)
+ REG1;
+ restore_flags( flags );
+}
+
+/*----------------------------------------------------------------*/
+/* do pseudo-dma */
+static int ql_pdma(int phase, char *request, int reqlen)
+{
+int j;
+ j = 0;
+ if (phase & 1) { /* in */
+#if QL_TURBO_PDMA
+rtrc(4)
+ /* empty fifo in large chunks */
+ if( reqlen >= 128 && (inb( qbase + 8 ) & 2) ) { /* full */
+ insl( qbase + 4, request, 32 );
+ reqlen -= 128;
+ request += 128;
+ }
+ while( reqlen >= 84 && !( j & 0xc0 ) ) /* 2/3 */
+ if( (j=inb( qbase + 8 )) & 4 ) {
+ insl( qbase + 4, request, 21 );
+ reqlen -= 84;
+ request += 84;
+ }
+ if( reqlen >= 44 && (inb( qbase + 8 ) & 8) ) { /* 1/3 */
+ insl( qbase + 4, request, 11 );
+ reqlen -= 44;
+ request += 44;
+ }
+#endif
+ /* until both empty and int (or until reclen is 0) */
+rtrc(7)
+ j = 0;
+ while( reqlen && !( (j & 0x10) && (j & 0xc0) ) ) {
+ /* while bytes to receive and not empty */
+ j &= 0xc0;
+ while ( reqlen && !( (j=inb(qbase + 8)) & 0x10 ) ) {
+ *request++ = inb(qbase + 4);
+ reqlen--;
+ }
+ if( j & 0x10 )
+ j = inb(qbase+8);
+
+ }
+ }
+ else { /* out */
+#if QL_TURBO_PDMA
+rtrc(4)
+ if( reqlen >= 128 && inb( qbase + 8 ) & 0x10 ) { /* empty */
+ outsl(qbase + 4, request, 32 );
+ reqlen -= 128;
+ request += 128;
+ }
+ while( reqlen >= 84 && !( j & 0xc0 ) ) /* 1/3 */
+ if( !((j=inb( qbase + 8 )) & 8) ) {
+ outsl( qbase + 4, request, 21 );
+ reqlen -= 84;
+ request += 84;
+ }
+ if( reqlen >= 40 && !(inb( qbase + 8 ) & 4 ) ) { /* 2/3 */
+ outsl( qbase + 4, request, 10 );
+ reqlen -= 40;
+ request += 40;
+ }
+#endif
+ /* until full and int (or until reclen is 0) */
+rtrc(7)
+ j = 0;
+ while( reqlen && !( (j & 2) && (j & 0xc0) ) ) {
+ /* while bytes to send and not full */
+ while ( reqlen && !( (j=inb(qbase + 8)) & 2 ) ) {
+ outb(*request++, qbase + 4);
+ reqlen--;
+ }
+ if( j & 2 )
+ j = inb(qbase+8);
+ }
+ }
+/* maybe return reqlen */
+ return inb( qbase + 8 ) & 0xc0;
+}
+
+/*----------------------------------------------------------------*/
+/* wait for interrupt flag (polled - not real hardware interrupt) */
+static int ql_wai(void)
+{
+int i,k;
+ k = 0;
+ i = jiffies + WATCHDOG;
+ while ( i > jiffies && !qabort && !((k = inb(qbase + 4)) & 0xe0))
+ barrier();
+ if (i <= jiffies)
+ return (DID_TIME_OUT);
+ if (qabort)
+ return (qabort == 1 ? DID_ABORT : DID_RESET);
+ if (k & 0x60)
+ ql_zap();
+ if (k & 0x20)
+ return (DID_PARITY);
+ if (k & 0x40)
+ return (DID_ERROR);
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+/* initiate scsi command - queueing handler */
+static void ql_icmd(Scsi_Cmnd * cmd)
+{
+unsigned int i;
+unsigned long flags;
+
+ qabort = 0;
+
+ save_flags( flags );
+ cli();
+ REG0;
+/* clearing of interrupts and the fifo is needed */
+ inb(qbase + 5); /* clear interrupts */
+ if (inb(qbase + 5)) /* if still interrupting */
+ outb(2, qbase + 3); /* reset chip */
+ else if (inb(qbase + 7) & 0x1f)
+ outb(1, qbase + 3); /* clear fifo */
+ while (inb(qbase + 5)); /* clear ints */
+ REG1;
+ outb(1, qbase + 8); /* set for PIO pseudo DMA */
+ outb(0, qbase + 0xb); /* disable ints */
+ inb(qbase + 8); /* clear int bits */
+ REG0;
+ outb(0x40, qbase + 0xb); /* enable features */
+
+/* configurables */
+ outb( qlcfgc , qbase + 0xc);
+/* config: no reset interrupt, (initiator) bus id */
+ outb( 0x40 | qlcfg8 | qinitid, qbase + 8);
+ outb( qlcfg7 , qbase + 7 );
+ outb( qlcfg6 , qbase + 6 );
+/**/
+ outb(qlcfg5, qbase + 5); /* select timer */
+ outb(qlcfg9 & 7, qbase + 9); /* prescaler */
+/* outb(0x99, qbase + 5); */
+ outb(cmd->target, qbase + 4);
+
+ for (i = 0; i < cmd->cmd_len; i++)
+ outb(cmd->cmnd[i], qbase + 2);
+ qlcmd = cmd;
+ outb(0x41, qbase + 3); /* select and send command */
+ restore_flags( flags );
+}
+/*----------------------------------------------------------------*/
+/* process scsi command - usually after interrupt */
+static unsigned int ql_pcmd(Scsi_Cmnd * cmd)
+{
+unsigned int i, j, k;
+unsigned int result; /* ultimate return result */
+unsigned int status; /* scsi returned status */
+unsigned int message; /* scsi returned message */
+unsigned int phase; /* recorded scsi phase */
+unsigned int reqlen; /* total length of transfer */
+struct scatterlist *sglist; /* scatter-gather list pointer */
+unsigned int sgcount; /* sg counter */
+
+rtrc(1)
+ j = inb(qbase + 6);
+ i = inb(qbase + 5);
+ if (i == 0x20) {
+ return (DID_NO_CONNECT << 16);
+ }
+ i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */
+ if (i != 0x18) {
+ printk("Ql:Bad Interrupt status:%02x\n", i);
+ ql_zap();
+ return (DID_BAD_INTR << 16);
+ }
+ j &= 7; /* j = inb( qbase + 7 ) >> 5; */
+/* correct status is supposed to be step 4 */
+/* it sometimes returns step 3 but with 0 bytes left to send */
+/* We can try stuffing the FIFO with the max each time, but we will get a
+ sequence of 3 if any bytes are left (but we do flush the FIFO anyway */
+ if(j != 3 && j != 4) {
+ printk("Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n", j, i, inb( qbase+7 ) & 0x1f );
+ ql_zap();
+ return (DID_ERROR << 16);
+ }
+ result = DID_OK;
+ if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
+ outb(1, qbase + 3); /* clear fifo */
+/* note that request_bufflen is the total xfer size when sg is used */
+ reqlen = cmd->request_bufflen;
+/* note that it won't work if transfers > 16M are requested */
+ if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */
+rtrc(2)
+ outb(reqlen, qbase); /* low-mid xfer cnt */
+ outb(reqlen >> 8, qbase+1); /* low-mid xfer cnt */
+ outb(reqlen >> 16, qbase + 0xe); /* high xfer cnt */
+ outb(0x90, qbase + 3); /* command do xfer */
+/* PIO pseudo DMA to buffer or sglist */
+ REG1;
+ if (!cmd->use_sg)
+ ql_pdma(phase, cmd->request_buffer, cmd->request_bufflen);
+ else {
+ sgcount = cmd->use_sg;
+ sglist = cmd->request_buffer;
+ while (sgcount--) {
+ if (qabort) {
+ REG0;
+ return ((qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ }
+ if (ql_pdma(phase, sglist->address, sglist->length))
+ break;
+ sglist++;
+ }
+ }
+ REG0;
+rtrc(2)
+/* wait for irq (split into second state of irq handler if this can take time) */
+ if ((k = ql_wai()))
+ return (k << 16);
+ k = inb(qbase + 5); /* should be 0x10, bus service */
+ }
+/*** Enter Status (and Message In) Phase ***/
+ k = jiffies + WATCHDOG;
+ while ( k > jiffies && !qabort && !(inb(qbase + 4) & 6)); /* wait for status phase */
+ if ( k <= jiffies ) {
+ ql_zap();
+ return (DID_TIME_OUT << 16);
+ }
+ while (inb(qbase + 5)); /* clear pending ints */
+ if (qabort)
+ return ((qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ outb(0x11, qbase + 3); /* get status and message */
+ if ((k = ql_wai()))
+ return (k << 16);
+ i = inb(qbase + 5); /* get chip irq stat */
+ j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */
+ status = inb(qbase + 2);
+ message = inb(qbase + 2);
+/* should get function complete int if Status and message, else bus serv if only status */
+ if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) {
+ printk("Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j);
+ result = DID_ERROR;
+ }
+ outb(0x12, qbase + 3); /* done, disconnect */
+rtrc(1)
+ if ((k = ql_wai()))
+ return (k << 16);
+/* should get bus service interrupt and disconnect interrupt */
+ i = inb(qbase + 5); /* should be bus service */
+ while (!qabort && ((i & 0x20) != 0x20)) {
+ barrier();
+ i |= inb(qbase + 5);
+ }
+rtrc(0)
+ if (qabort)
+ return ((qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ return (result << 16) | (message << 8) | (status & STATUS_MASK);
+}
+
+#if QL_USE_IRQ
+/*----------------------------------------------------------------*/
+/* interrupt handler */
+static void ql_ihandl(int irq, void *dev_id, struct pt_regs * regs)
+{
+Scsi_Cmnd *icmd;
+ REG0;
+ if (!(inb(qbase + 4) & 0x80)) /* false alarm? */
+ return;
+ if (qlcmd == NULL) { /* no command to process? */
+ int i;
+ i = 16;
+ while (i-- && inb(qbase + 5)); /* maybe also ql_zap() */
+ return;
+ }
+ icmd = qlcmd;
+ icmd->result = ql_pcmd(icmd);
+ qlcmd = NULL;
+/* if result is CHECK CONDITION done calls qcommand to request sense */
+ (icmd->scsi_done) (icmd);
+}
+#endif
+
+/*----------------------------------------------------------------*/
+/* global functions */
+/*----------------------------------------------------------------*/
+/* non queued command */
+#if QL_USE_IRQ
+static void qlidone(Scsi_Cmnd * cmd) {}; /* null function */
+#endif
+
+/* command process */
+int qlogicfas_command(Scsi_Cmnd * cmd)
+{
+int k;
+#if QL_USE_IRQ
+ if (qlirq >= 0) {
+ qlogicfas_queuecommand(cmd, qlidone);
+ while (qlcmd != NULL);
+ return cmd->result;
+ }
+#endif
+/* non-irq version */
+ if (cmd->target == qinitid)
+ return (DID_BAD_TARGET << 16);
+ ql_icmd(cmd);
+ if ((k = ql_wai()))
+ return (k << 16);
+ return ql_pcmd(cmd);
+
+}
+
+#if QL_USE_IRQ
+/*----------------------------------------------------------------*/
+/* queued command */
+int qlogicfas_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ if(cmd->target == qinitid) {
+ cmd->result = DID_BAD_TARGET << 16;
+ done(cmd);
+ return 0;
+ }
+
+ cmd->scsi_done = done;
+/* wait for the last command's interrupt to finish */
+ while (qlcmd != NULL)
+ barrier();
+ ql_icmd(cmd);
+ return 0;
+}
+#else
+int qlogicfas_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ return 1;
+}
+#endif
+
+#ifdef PCMCIA
+/*----------------------------------------------------------------*/
+/* allow PCMCIA code to preset the port */
+/* port should be 0 and irq to -1 respectively for autoprobing */
+void qlogicfas_preset(int port, int irq)
+{
+ qbase=port;
+ qlirq=irq;
+}
+#endif
+
+/*----------------------------------------------------------------*/
+/* look for qlogic card and init if found */
+int qlogicfas_detect(Scsi_Host_Template * host)
+{
+int i, j; /* these are only used by IRQ detect */
+int qltyp; /* type of chip */
+struct Scsi_Host *hreg; /* registered host structure */
+unsigned long flags;
+
+host->proc_dir = &proc_scsi_qlogicfas;
+
+/* Qlogic Cards only exist at 0x230 or 0x330 (the chip itself decodes the
+ address - I check 230 first since MIDI cards are typically at 330
+
+ Theoretically, two Qlogic cards can coexist in the same system. This
+ should work by simply using this as a loadable module for the second
+ card, but I haven't tested this.
+*/
+
+ if( !qbase ) {
+ for (qbase = 0x230; qbase < 0x430; qbase += 0x100) {
+ if( check_region( qbase , 0x10 ) )
+ continue;
+ REG1;
+ if ( ( (inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7 )
+ && ( (inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7 ) )
+ break;
+ }
+ if (qbase == 0x430)
+ return 0;
+ }
+ else
+ printk( "Ql: Using preset base address of %03x\n", qbase );
+
+ qltyp = inb(qbase + 0xe) & 0xf8;
+ qinitid = host->this_id;
+ if (qinitid < 0)
+ qinitid = 7; /* if no ID, use 7 */
+ outb(1, qbase + 8); /* set for PIO pseudo DMA */
+ REG0;
+ outb(0x40 | qlcfg8 | qinitid, qbase + 8); /* (ini) bus id, disable scsi rst */
+ outb(qlcfg5, qbase + 5); /* select timer */
+ outb(qlcfg9, qbase + 9); /* prescaler */
+#if QL_RESET_AT_START
+ outb( 3 , qbase + 3 );
+ REG1;
+ while( inb( qbase + 0xf ) & 4 );
+ REG0;
+#endif
+#if QL_USE_IRQ
+/* IRQ probe - toggle pin and check request pending */
+
+ if( qlirq == -1 ) {
+ save_flags( flags );
+ cli();
+ i = 0xffff;
+ j = 3;
+ outb(0x90, qbase + 3); /* illegal command - cause interrupt */
+ REG1;
+ outb(10, 0x20); /* access pending interrupt map */
+ outb(10, 0xa0);
+ while (j--) {
+ outb(0xb0 | QL_INT_ACTIVE_HIGH , qbase + 0xd); /* int pin off */
+ i &= ~(inb(0x20) | (inb(0xa0) << 8)); /* find IRQ off */
+ outb(0xb4 | QL_INT_ACTIVE_HIGH , qbase + 0xd); /* int pin on */
+ i &= inb(0x20) | (inb(0xa0) << 8); /* find IRQ on */
+ }
+ REG0;
+ while (inb(qbase + 5)); /* purge int */
+ j = -1;
+ while (i) /* find on bit */
+ i >>= 1, j++; /* should check for exactly 1 on */
+ qlirq = j;
+ restore_flags( flags );
+ }
+ else
+ printk( "Ql: Using preset IRQ %d\n", qlirq );
+
+ if (qlirq >= 0 && !request_irq(qlirq, ql_ihandl, 0, "qlogicfas", NULL))
+ host->can_queue = 1;
+#endif
+ request_region( qbase , 0x10 ,"qlogicfas");
+ hreg = scsi_register( host , 0 ); /* no host data */
+ hreg->io_port = qbase;
+ hreg->n_io_port = 16;
+ hreg->dma_channel = -1;
+ if( qlirq != -1 )
+ hreg->irq = qlirq;
+
+ sprintf(qinfo, "Qlogicfas Driver version 0.45, chip %02X at %03X, IRQ %d, TPdma:%d",
+ qltyp, qbase, qlirq, QL_TURBO_PDMA );
+ host->name = qinfo;
+
+ return 1;
+}
+
+/*----------------------------------------------------------------*/
+/* return bios parameters */
+int qlogicfas_biosparam(Disk * disk, kdev_t dev, int ip[])
+{
+/* This should mimic the DOS Qlogic driver's behavior exactly */
+ ip[0] = 0x40;
+ ip[1] = 0x20;
+ ip[2] = disk->capacity / (ip[0] * ip[1]);
+ if (ip[2] > 1024) {
+ ip[0] = 0xff;
+ ip[1] = 0x3f;
+ ip[2] = disk->capacity / (ip[0] * ip[1]);
+ if (ip[2] > 1023)
+ ip[2] = 1023;
+ }
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+/* abort command in progress */
+int qlogicfas_abort(Scsi_Cmnd * cmd)
+{
+ qabort = 1;
+ ql_zap();
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+/* reset SCSI bus */
+int qlogicfas_reset(Scsi_Cmnd * cmd, unsigned int flags)
+{
+ qabort = 2;
+ ql_zap();
+ return 1;
+}
+
+/*----------------------------------------------------------------*/
+/* return info string */
+const char *qlogicfas_info(struct Scsi_Host * host)
+{
+ return qinfo;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = QLOGICFAS;
+
+#include "scsi_module.c"
+#endif
+
diff --git a/linux/src/drivers/scsi/qlogicfas.h b/linux/src/drivers/scsi/qlogicfas.h
new file mode 100644
index 0000000..5a1dfdb
--- /dev/null
+++ b/linux/src/drivers/scsi/qlogicfas.h
@@ -0,0 +1,43 @@
+#ifndef _QLOGICFAS_H
+#define _QLOGICFAS_H
+
+int qlogicfas_detect(Scsi_Host_Template * );
+const char * qlogicfas_info(struct Scsi_Host *);
+int qlogicfas_command(Scsi_Cmnd *);
+int qlogicfas_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int qlogicfas_abort(Scsi_Cmnd *);
+int qlogicfas_reset(Scsi_Cmnd *, unsigned int flags);
+int qlogicfas_biosparam(Disk *, kdev_t, int[]);
+
+#ifndef NULL
+#define NULL (0)
+#endif
+
+#define QLOGICFAS { \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ qlogicfas_detect, \
+ NULL, \
+ qlogicfas_info, \
+ qlogicfas_command, \
+ qlogicfas_queuecommand, \
+ qlogicfas_abort, \
+ qlogicfas_reset, \
+ NULL, \
+ qlogicfas_biosparam, \
+ 0, \
+ -1, \
+ SG_ALL, \
+ 1, \
+ 0, \
+ 0, \
+ DISABLE_CLUSTERING \
+}
+
+#endif /* _QLOGICFAS_H */
+
+
+
diff --git a/linux/src/drivers/scsi/qlogicisp.c b/linux/src/drivers/scsi/qlogicisp.c
new file mode 100644
index 0000000..ebee05d
--- /dev/null
+++ b/linux/src/drivers/scsi/qlogicisp.c
@@ -0,0 +1,1767 @@
+/*
+ * QLogic ISP1020 Intelligent SCSI Processor Driver (PCI)
+ * Written by Erik H. Moe, ehm@cris.com
+ * Copyright 1995, Erik H. Moe
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/* Renamed and updated to 1.3.x by Michael Griffith <grif@cs.ucr.edu> */
+
+/*
+ * $Date: 2007/03/27 21:04:30 $
+ * $Revision: 1.1.4.2 $
+ *
+ * Revision 0.5 1995/09/22 02:23:15 root
+ * do auto request sense
+ *
+ * Revision 0.4 1995/08/07 04:44:33 root
+ * supply firmware with driver.
+ * numerous bug fixes/general cleanup of code.
+ *
+ * Revision 0.3 1995/07/16 16:15:39 root
+ * added reset/abort code.
+ *
+ * Revision 0.2 1995/06/29 03:14:19 root
+ * fixed biosparam.
+ * added queue protocol.
+ *
+ * Revision 0.1 1995/06/25 01:55:45 root
+ * Initial release.
+ *
+ */
+
+#include <linux/blk.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "sd.h"
+#include "hosts.h"
+#include "qlogicisp.h"
+
+/* Configuration section *****************************************************/
+
+/* Set the following macro to 1 to reload the ISP1020's firmware. This is
+ the latest firmware provided by QLogic. This may be an earlier/later
+ revision than supplied by your board. */
+
+#define RELOAD_FIRMWARE 0
+
+/* Set the following macro to 1 to reload the ISP1020's defaults from nvram.
+ If you are not sure of your settings, leave this alone, the driver will
+ use a set of 'safe' defaults */
+
+#define USE_NVRAM_DEFAULTS 0
+
+/* Macros used for debugging */
+
+#define DEBUG_ISP1020 0
+#define DEBUG_ISP1020_INT 0
+#define DEBUG_ISP1020_SETUP 0
+#define TRACE_ISP 0
+
+#define DEFAULT_LOOP_COUNT 1000000
+
+/* End Configuration section *************************************************/
+
+#include <linux/module.h>
+
+#if TRACE_ISP
+
+# define TRACE_BUF_LEN (32*1024)
+
+struct {
+ u_long next;
+ struct {
+ u_long time;
+ u_int index;
+ u_int addr;
+ u_char * name;
+ } buf[TRACE_BUF_LEN];
+} trace;
+
+#define TRACE(w, i, a) \
+{ \
+ unsigned long flags; \
+ \
+ save_flags(flags); \
+ cli(); \
+ trace.buf[trace.next].name = (w); \
+ trace.buf[trace.next].time = jiffies; \
+ trace.buf[trace.next].index = (i); \
+ trace.buf[trace.next].addr = (long) (a); \
+ trace.next = (trace.next + 1) & (TRACE_BUF_LEN - 1); \
+ restore_flags(flags); \
+}
+
+#else
+# define TRACE(w, i, a)
+#endif
+
+#if DEBUG_ISP1020
+#define ENTER(x) printk("isp1020 : entering %s()\n", x);
+#define LEAVE(x) printk("isp1020 : leaving %s()\n", x);
+#define DEBUG(x) x
+#else
+#define ENTER(x)
+#define LEAVE(x)
+#define DEBUG(x)
+#endif /* DEBUG_ISP1020 */
+
+#if DEBUG_ISP1020_INTR
+#define ENTER_INTR(x) printk("isp1020 : entering %s()\n", x);
+#define LEAVE_INTR(x) printk("isp1020 : leaving %s()\n", x);
+#define DEBUG_INTR(x) x
+#else
+#define ENTER_INTR(x)
+#define LEAVE_INTR(x)
+#define DEBUG_INTR(x)
+#endif /* DEBUG ISP1020_INTR */
+
+#define ISP1020_REV_ID 1
+
+#define MAX_TARGETS 16
+#define MAX_LUNS 8
+
+/* host configuration and control registers */
+#define HOST_HCCR 0xc0 /* host command and control */
+
+/* pci bus interface registers */
+#define PCI_ID_LOW 0x00 /* vendor id */
+#define PCI_ID_HIGH 0x02 /* device id */
+#define ISP_CFG0 0x04 /* configuration register #0 */
+#define ISP_CFG1 0x06 /* configuration register #1 */
+#define PCI_INTF_CTL 0x08 /* pci interface control */
+#define PCI_INTF_STS 0x0a /* pci interface status */
+#define PCI_SEMAPHORE 0x0c /* pci semaphore */
+#define PCI_NVRAM 0x0e /* pci nvram interface */
+
+/* mailbox registers */
+#define MBOX0 0x70 /* mailbox 0 */
+#define MBOX1 0x72 /* mailbox 1 */
+#define MBOX2 0x74 /* mailbox 2 */
+#define MBOX3 0x76 /* mailbox 3 */
+#define MBOX4 0x78 /* mailbox 4 */
+#define MBOX5 0x7a /* mailbox 5 */
+
+/* mailbox command complete status codes */
+#define MBOX_COMMAND_COMPLETE 0x4000
+#define INVALID_COMMAND 0x4001
+#define HOST_INTERFACE_ERROR 0x4002
+#define TEST_FAILED 0x4003
+#define COMMAND_ERROR 0x4005
+#define COMMAND_PARAM_ERROR 0x4006
+
+/* async event status codes */
+#define ASYNC_SCSI_BUS_RESET 0x8001
+#define SYSTEM_ERROR 0x8002
+#define REQUEST_TRANSFER_ERROR 0x8003
+#define RESPONSE_TRANSFER_ERROR 0x8004
+#define REQUEST_QUEUE_WAKEUP 0x8005
+#define EXECUTION_TIMEOUT_RESET 0x8006
+
+struct Entry_header {
+ u_char entry_type;
+ u_char entry_cnt;
+ u_char sys_def_1;
+ u_char flags;
+};
+
+/* entry header type commands */
+#define ENTRY_COMMAND 1
+#define ENTRY_CONTINUATION 2
+#define ENTRY_STATUS 3
+#define ENTRY_MARKER 4
+#define ENTRY_EXTENDED_COMMAND 5
+
+/* entry header flag definitions */
+#define EFLAG_CONTINUATION 1
+#define EFLAG_BUSY 2
+#define EFLAG_BAD_HEADER 4
+#define EFLAG_BAD_PAYLOAD 8
+
+struct dataseg {
+ u_int d_base;
+ u_int d_count;
+};
+
+struct Command_Entry {
+ struct Entry_header hdr;
+ u_int handle;
+ u_char target_lun;
+ u_char target_id;
+ u_short cdb_length;
+ u_short control_flags;
+ u_short rsvd;
+ u_short time_out;
+ u_short segment_cnt;
+ u_char cdb[12];
+ struct dataseg dataseg[4];
+};
+
+/* command entry control flag definitions */
+#define CFLAG_NODISC 0x01
+#define CFLAG_HEAD_TAG 0x02
+#define CFLAG_ORDERED_TAG 0x04
+#define CFLAG_SIMPLE_TAG 0x08
+#define CFLAG_TAR_RTN 0x10
+#define CFLAG_READ 0x20
+#define CFLAG_WRITE 0x40
+
+struct Ext_Command_Entry {
+ struct Entry_header hdr;
+ u_int handle;
+ u_char target_lun;
+ u_char target_id;
+ u_short cdb_length;
+ u_short control_flags;
+ u_short rsvd;
+ u_short time_out;
+ u_short segment_cnt;
+ u_char cdb[44];
+};
+
+struct Continuation_Entry {
+ struct Entry_header hdr;
+ u_int reserved;
+ struct dataseg dataseg[7];
+};
+
+struct Marker_Entry {
+ struct Entry_header hdr;
+ u_int reserved;
+ u_char target_lun;
+ u_char target_id;
+ u_char modifier;
+ u_char rsvd;
+ u_char rsvds[52];
+};
+
+/* marker entry modifier definitions */
+#define SYNC_DEVICE 0
+#define SYNC_TARGET 1
+#define SYNC_ALL 2
+
+struct Status_Entry {
+ struct Entry_header hdr;
+ u_int handle;
+ u_short scsi_status;
+ u_short completion_status;
+ u_short state_flags;
+ u_short status_flags;
+ u_short time;
+ u_short req_sense_len;
+ u_int residual;
+ u_char rsvd[8];
+ u_char req_sense_data[32];
+};
+
+/* status entry completion status definitions */
+#define CS_COMPLETE 0x0000
+#define CS_INCOMPLETE 0x0001
+#define CS_DMA_ERROR 0x0002
+#define CS_TRANSPORT_ERROR 0x0003
+#define CS_RESET_OCCURRED 0x0004
+#define CS_ABORTED 0x0005
+#define CS_TIMEOUT 0x0006
+#define CS_DATA_OVERRUN 0x0007
+#define CS_COMMAND_OVERRUN 0x0008
+#define CS_STATUS_OVERRUN 0x0009
+#define CS_BAD_MESSAGE 0x000a
+#define CS_NO_MESSAGE_OUT 0x000b
+#define CS_EXT_ID_FAILED 0x000c
+#define CS_IDE_MSG_FAILED 0x000d
+#define CS_ABORT_MSG_FAILED 0x000e
+#define CS_REJECT_MSG_FAILED 0x000f
+#define CS_NOP_MSG_FAILED 0x0010
+#define CS_PARITY_ERROR_MSG_FAILED 0x0011
+#define CS_DEVICE_RESET_MSG_FAILED 0x0012
+#define CS_ID_MSG_FAILED 0x0013
+#define CS_UNEXP_BUS_FREE 0x0014
+/* as per app note #83120-514-06a: */
+#define CS_DATA_UNDERRUN 0x0015
+#define CS_INVALID_ENTRY_TYPE 0x001b
+#define CS_DEVICE_QUEUE_FULL 0x001c
+#define CS_SCSI_PHASE_SKIPPED 0x001d
+#define CS_ARS_FAILED 0x001e /* auto Req. Sense failed */
+
+/* status entry state flag definitions */
+#define SF_GOT_BUS 0x0100
+#define SF_GOT_TARGET 0x0200
+#define SF_SENT_CDB 0x0400
+#define SF_TRANSFERRED_DATA 0x0800
+#define SF_GOT_STATUS 0x1000
+#define SF_GOT_SENSE 0x2000
+
+/* status entry status flag definitions */
+#define STF_DISCONNECT 0x0001
+#define STF_SYNCHRONOUS 0x0002
+#define STF_PARITY_ERROR 0x0004
+#define STF_BUS_RESET 0x0008
+#define STF_DEVICE_RESET 0x0010
+#define STF_ABORTED 0x0020
+#define STF_TIMEOUT 0x0040
+#define STF_NEGOTIATION 0x0080
+
+/* interface control commands */
+#define ISP_RESET 0x0001
+#define ISP_EN_INT 0x0002
+#define ISP_EN_RISC 0x0004
+
+/* host control commands */
+#define HCCR_NOP 0x0000
+#define HCCR_RESET 0x1000
+#define HCCR_PAUSE 0x2000
+#define HCCR_RELEASE 0x3000
+#define HCCR_SINGLE_STEP 0x4000
+#define HCCR_SET_HOST_INTR 0x5000
+#define HCCR_CLEAR_HOST_INTR 0x6000
+#define HCCR_CLEAR_RISC_INTR 0x7000
+#define HCCR_BP_ENABLE 0x8000
+#define HCCR_BIOS_DISABLE 0x9000
+#define HCCR_TEST_MODE 0xf000
+
+#define RISC_BUSY 0x0004
+
+/* mailbox commands */
+#define MBOX_NO_OP 0x0000
+#define MBOX_LOAD_RAM 0x0001
+#define MBOX_EXEC_FIRMWARE 0x0002
+#define MBOX_DUMP_RAM 0x0003
+#define MBOX_WRITE_RAM_WORD 0x0004
+#define MBOX_READ_RAM_WORD 0x0005
+#define MBOX_MAILBOX_REG_TEST 0x0006
+#define MBOX_VERIFY_CHECKSUM 0x0007
+#define MBOX_ABOUT_FIRMWARE 0x0008
+#define MBOX_CHECK_FIRMWARE 0x000e
+#define MBOX_INIT_REQ_QUEUE 0x0010
+#define MBOX_INIT_RES_QUEUE 0x0011
+#define MBOX_EXECUTE_IOCB 0x0012
+#define MBOX_WAKE_UP 0x0013
+#define MBOX_STOP_FIRMWARE 0x0014
+#define MBOX_ABORT 0x0015
+#define MBOX_ABORT_DEVICE 0x0016
+#define MBOX_ABORT_TARGET 0x0017
+#define MBOX_BUS_RESET 0x0018
+#define MBOX_STOP_QUEUE 0x0019
+#define MBOX_START_QUEUE 0x001a
+#define MBOX_SINGLE_STEP_QUEUE 0x001b
+#define MBOX_ABORT_QUEUE 0x001c
+#define MBOX_GET_DEV_QUEUE_STATUS 0x001d
+#define MBOX_GET_FIRMWARE_STATUS 0x001f
+#define MBOX_GET_INIT_SCSI_ID 0x0020
+#define MBOX_GET_SELECT_TIMEOUT 0x0021
+#define MBOX_GET_RETRY_COUNT 0x0022
+#define MBOX_GET_TAG_AGE_LIMIT 0x0023
+#define MBOX_GET_CLOCK_RATE 0x0024
+#define MBOX_GET_ACT_NEG_STATE 0x0025
+#define MBOX_GET_ASYNC_DATA_SETUP_TIME 0x0026
+#define MBOX_GET_PCI_PARAMS 0x0027
+#define MBOX_GET_TARGET_PARAMS 0x0028
+#define MBOX_GET_DEV_QUEUE_PARAMS 0x0029
+#define MBOX_SET_INIT_SCSI_ID 0x0030
+#define MBOX_SET_SELECT_TIMEOUT 0x0031
+#define MBOX_SET_RETRY_COUNT 0x0032
+#define MBOX_SET_TAG_AGE_LIMIT 0x0033
+#define MBOX_SET_CLOCK_RATE 0x0034
+#define MBOX_SET_ACTIVE_NEG_STATE 0x0035
+#define MBOX_SET_ASYNC_DATA_SETUP_TIME 0x0036
+#define MBOX_SET_PCI_CONTROL_PARAMS 0x0037
+#define MBOX_SET_TARGET_PARAMS 0x0038
+#define MBOX_SET_DEV_QUEUE_PARAMS 0x0039
+#define MBOX_RETURN_BIOS_BLOCK_ADDR 0x0040
+#define MBOX_WRITE_FOUR_RAM_WORDS 0x0041
+#define MBOX_EXEC_BIOS_IOCB 0x0042
+
+unsigned short risc_code_addr01 = 0x1000 ;
+
+#define PACKB(a, b) (((a)<<4)|(b))
+
+const u_char mbox_param[] = {
+ PACKB(1, 1), /* MBOX_NO_OP */
+ PACKB(5, 5), /* MBOX_LOAD_RAM */
+ PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */
+ PACKB(5, 5), /* MBOX_DUMP_RAM */
+ PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */
+ PACKB(2, 3), /* MBOX_READ_RAM_WORD */
+ PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */
+ PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */
+ PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */
+ PACKB(0, 0), /* 0x0009 */
+ PACKB(0, 0), /* 0x000a */
+ PACKB(0, 0), /* 0x000b */
+ PACKB(0, 0), /* 0x000c */
+ PACKB(0, 0), /* 0x000d */
+ PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */
+ PACKB(0, 0), /* 0x000f */
+ PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */
+ PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */
+ PACKB(4, 4), /* MBOX_EXECUTE_IOCB */
+ PACKB(2, 2), /* MBOX_WAKE_UP */
+ PACKB(1, 6), /* MBOX_STOP_FIRMWARE */
+ PACKB(4, 4), /* MBOX_ABORT */
+ PACKB(2, 2), /* MBOX_ABORT_DEVICE */
+ PACKB(3, 3), /* MBOX_ABORT_TARGET */
+ PACKB(2, 2), /* MBOX_BUS_RESET */
+ PACKB(2, 3), /* MBOX_STOP_QUEUE */
+ PACKB(2, 3), /* MBOX_START_QUEUE */
+ PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */
+ PACKB(2, 3), /* MBOX_ABORT_QUEUE */
+ PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */
+ PACKB(0, 0), /* 0x001e */
+ PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */
+ PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */
+ PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */
+ PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */
+ PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */
+ PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */
+ PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */
+ PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */
+ PACKB(1, 3), /* MBOX_GET_PCI_PARAMS */
+ PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */
+ PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */
+ PACKB(0, 0), /* 0x002a */
+ PACKB(0, 0), /* 0x002b */
+ PACKB(0, 0), /* 0x002c */
+ PACKB(0, 0), /* 0x002d */
+ PACKB(0, 0), /* 0x002e */
+ PACKB(0, 0), /* 0x002f */
+ PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */
+ PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */
+ PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */
+ PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */
+ PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */
+ PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */
+ PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */
+ PACKB(3, 3), /* MBOX_SET_PCI_CONTROL_PARAMS */
+ PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */
+ PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */
+ PACKB(0, 0), /* 0x003a */
+ PACKB(0, 0), /* 0x003b */
+ PACKB(0, 0), /* 0x003c */
+ PACKB(0, 0), /* 0x003d */
+ PACKB(0, 0), /* 0x003e */
+ PACKB(0, 0), /* 0x003f */
+ PACKB(1, 2), /* MBOX_RETURN_BIOS_BLOCK_ADDR */
+ PACKB(6, 1), /* MBOX_WRITE_FOUR_RAM_WORDS */
+ PACKB(2, 3) /* MBOX_EXEC_BIOS_IOCB */
+};
+
+#define MAX_MBOX_COMMAND (sizeof(mbox_param)/sizeof(u_short))
+
+struct host_param {
+ u_short fifo_threshold;
+ u_short host_adapter_enable;
+ u_short initiator_scsi_id;
+ u_short bus_reset_delay;
+ u_short retry_count;
+ u_short retry_delay;
+ u_short async_data_setup_time;
+ u_short req_ack_active_negation;
+ u_short data_line_active_negation;
+ u_short data_dma_burst_enable;
+ u_short command_dma_burst_enable;
+ u_short tag_aging;
+ u_short selection_timeout;
+ u_short max_queue_depth;
+};
+
+/*
+ * Device Flags:
+ *
+ * Bit Name
+ * ---------
+ * 7 Disconnect Privilege
+ * 6 Parity Checking
+ * 5 Wide Data Transfers
+ * 4 Synchronous Data Transfers
+ * 3 Tagged Queuing
+ * 2 Automatic Request Sense
+ * 1 Stop Queue on Check Condition
+ * 0 Renegotiate on Error
+ */
+
+struct dev_param {
+ u_short device_flags;
+ u_short execution_throttle;
+ u_short synchronous_period;
+ u_short synchronous_offset;
+ u_short device_enable;
+ u_short reserved; /* pad */
+};
+
+/*
+ * The result queue can be quite a bit smaller since continuation entries
+ * do not show up there:
+ */
+#define RES_QUEUE_LEN ((QLOGICISP_REQ_QUEUE_LEN + 1) / 8 - 1)
+#define QUEUE_ENTRY_LEN 64
+
+struct isp1020_hostdata {
+ u_char bus;
+ u_char revision;
+ u_char device_fn;
+ struct host_param host_param;
+ struct dev_param dev_param[MAX_TARGETS];
+
+ /* result and request queues (shared with isp1020): */
+ u_int req_in_ptr; /* index of next request slot */
+ u_int res_out_ptr; /* index of next result slot */
+
+ /* this is here so the queues are nicely aligned */
+ long send_marker; /* do we need to send a marker? */
+
+ char res[RES_QUEUE_LEN+1][QUEUE_ENTRY_LEN];
+ char req[QLOGICISP_REQ_QUEUE_LEN+1][QUEUE_ENTRY_LEN];
+};
+
+/* queue length's _must_ be power of two: */
+#define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))
+#define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \
+ QLOGICISP_REQ_QUEUE_LEN)
+#define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
+
+struct Scsi_Host *irq2host[NR_IRQS];
+
+static void isp1020_enable_irqs(struct Scsi_Host *);
+static void isp1020_disable_irqs(struct Scsi_Host *);
+static int isp1020_init(struct Scsi_Host *);
+static int isp1020_reset_hardware(struct Scsi_Host *);
+static int isp1020_set_defaults(struct Scsi_Host *);
+static int isp1020_load_parameters(struct Scsi_Host *);
+static int isp1020_mbox_command(struct Scsi_Host *, u_short []);
+static int isp1020_return_status(struct Status_Entry *);
+static void isp1020_intr_handler(int, void *, struct pt_regs *);
+
+#if USE_NVRAM_DEFAULTS
+static int isp1020_get_defaults(struct Scsi_Host *);
+static int isp1020_verify_nvram(struct Scsi_Host *);
+static u_short isp1020_read_nvram_word(struct Scsi_Host *, u_short);
+#endif
+
+#if DEBUG_ISP1020
+static void isp1020_print_scsi_cmd(Scsi_Cmnd *);
+#endif
+#if DEBUG_ISP1020_INTR
+static void isp1020_print_status_entry(struct Status_Entry *);
+#endif
+
+static struct proc_dir_entry proc_scsi_isp1020 = {
+ PROC_SCSI_QLOGICISP, 7, "isp1020",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+
+static inline void isp1020_enable_irqs(struct Scsi_Host *host)
+{
+ outw(ISP_EN_INT|ISP_EN_RISC, host->io_port + PCI_INTF_CTL);
+}
+
+
+static inline void isp1020_disable_irqs(struct Scsi_Host *host)
+{
+ outw(0x0, host->io_port + PCI_INTF_CTL);
+}
+
+
+int isp1020_detect(Scsi_Host_Template *tmpt)
+{
+ int hosts = 0;
+ u_short index;
+ u_char bus, device_fn;
+ struct Scsi_Host *host;
+ struct isp1020_hostdata *hostdata;
+
+ ENTER("isp1020_detect");
+
+ tmpt->proc_dir = &proc_scsi_isp1020;
+
+ if (pcibios_present() == 0) {
+ printk("qlogicisp : PCI bios not present\n");
+ return 0;
+ }
+
+ memset(irq2host, 0, sizeof(irq2host));
+
+ for (index = 0; pcibios_find_device(PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_ISP1020,
+ index, &bus, &device_fn) == 0;
+ index++)
+ {
+ host = scsi_register(tmpt, sizeof(struct isp1020_hostdata));
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+
+ memset(hostdata, 0, sizeof(struct isp1020_hostdata));
+ hostdata->bus = bus;
+ hostdata->device_fn = device_fn;
+
+ if (isp1020_init(host) || isp1020_reset_hardware(host)
+#if USE_NVRAM_DEFAULTS
+ || isp1020_get_defaults(host)
+#else
+ || isp1020_set_defaults(host)
+#endif /* USE_NVRAM_DEFAULTS */
+ || isp1020_load_parameters(host)) {
+ scsi_unregister(host);
+ continue;
+ }
+
+ host->this_id = hostdata->host_param.initiator_scsi_id;
+
+ if (request_irq(host->irq, isp1020_intr_handler, SA_INTERRUPT,
+ "qlogicisp", NULL))
+ {
+ printk("qlogicisp : interrupt %d already in use\n",
+ host->irq);
+ scsi_unregister(host);
+ continue;
+ }
+
+ if (check_region(host->io_port, 0xff)) {
+ printk("qlogicisp : i/o region 0x%04x-0x%04x already "
+ "in use\n",
+ host->io_port, host->io_port + 0xff);
+ free_irq(host->irq, NULL);
+ scsi_unregister(host);
+ continue;
+ }
+
+ request_region(host->io_port, 0xff, "qlogicisp");
+ irq2host[host->irq] = host;
+
+ outw(0x0, host->io_port + PCI_SEMAPHORE);
+ outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
+ isp1020_enable_irqs(host);
+
+ hosts++;
+ }
+
+ LEAVE("isp1020_detect");
+
+ return hosts;
+}
+
+
+int isp1020_release(struct Scsi_Host *host)
+{
+ struct isp1020_hostdata *hostdata;
+
+ ENTER("isp1020_release");
+
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+
+ outw(0x0, host->io_port + PCI_INTF_CTL);
+ free_irq(host->irq, NULL);
+
+ release_region(host->io_port, 0xff);
+
+ LEAVE("isp1020_release");
+
+ return 0;
+}
+
+
+const char *isp1020_info(struct Scsi_Host *host)
+{
+ static char buf[80];
+ struct isp1020_hostdata *hostdata;
+
+ ENTER("isp1020_info");
+
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+ sprintf(buf,
+ "QLogic ISP1020 SCSI on PCI bus %d device %d irq %d base 0x%x",
+ hostdata->bus, (hostdata->device_fn & 0xf8) >> 3, host->irq,
+ host->io_port);
+
+ LEAVE("isp1020_info");
+
+ return buf;
+}
+
+
+/*
+ * The middle SCSI layer ensures that queuecommand never gets invoked
+ * concurrently with itself or the interrupt handler (though the
+ * interrupt handler may call this routine as part of
+ * request-completion handling).
+ */
+int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (*done)(Scsi_Cmnd *))
+{
+ int i, sg_count, n, num_free;
+ u_int in_ptr, out_ptr;
+ struct dataseg * ds;
+ struct scatterlist *sg;
+ struct Command_Entry *cmd;
+ struct Continuation_Entry *cont;
+ struct Scsi_Host *host;
+ struct isp1020_hostdata *hostdata;
+
+ ENTER("isp1020_queuecommand");
+
+ host = Cmnd->host;
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+ Cmnd->scsi_done = done;
+
+ DEBUG(isp1020_print_scsi_cmd(Cmnd));
+
+ out_ptr = inw(host->io_port + MBOX4);
+ in_ptr = hostdata->req_in_ptr;
+
+ DEBUG(printk("qlogicisp : request queue depth %d\n",
+ REQ_QUEUE_DEPTH(in_ptr, out_ptr)));
+
+ cmd = (struct Command_Entry *) &hostdata->req[in_ptr][0];
+ in_ptr = (in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN;
+ if (in_ptr == out_ptr) {
+ printk("qlogicisp : request queue overflow\n");
+ return 1;
+ }
+
+ if (hostdata->send_marker) {
+ struct Marker_Entry *marker;
+
+ TRACE("queue marker", in_ptr, 0);
+
+ DEBUG(printk("qlogicisp : adding marker entry\n"));
+ marker = (struct Marker_Entry *) cmd;
+ memset(marker, 0, sizeof(struct Marker_Entry));
+
+ marker->hdr.entry_type = ENTRY_MARKER;
+ marker->hdr.entry_cnt = 1;
+ marker->modifier = SYNC_ALL;
+
+ hostdata->send_marker = 0;
+
+ if (((in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN) == out_ptr) {
+ outw(in_ptr, host->io_port + MBOX4);
+ hostdata->req_in_ptr = in_ptr;
+ printk("qlogicisp : request queue overflow\n");
+ return 1;
+ }
+ cmd = (struct Command_Entry *) &hostdata->req[in_ptr][0];
+ in_ptr = (in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN;
+ }
+
+ TRACE("queue command", in_ptr, Cmnd);
+
+ memset(cmd, 0, sizeof(struct Command_Entry));
+
+ cmd->hdr.entry_type = ENTRY_COMMAND;
+ cmd->hdr.entry_cnt = 1;
+
+ cmd->handle = (u_int) virt_to_bus(Cmnd);
+ cmd->target_lun = Cmnd->lun;
+ cmd->target_id = Cmnd->target;
+ cmd->cdb_length = Cmnd->cmd_len;
+ cmd->control_flags = CFLAG_READ | CFLAG_WRITE;
+ cmd->time_out = 30;
+
+ memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
+
+ if (Cmnd->use_sg) {
+ cmd->segment_cnt = sg_count = Cmnd->use_sg;
+ sg = (struct scatterlist *) Cmnd->request_buffer;
+ ds = cmd->dataseg;
+
+ /* fill in first four sg entries: */
+ n = sg_count;
+ if (n > 4)
+ n = 4;
+ for (i = 0; i < n; i++) {
+ ds[i].d_base = (u_int) virt_to_bus(sg->address);
+ ds[i].d_count = sg->length;
+ ++sg;
+ }
+ sg_count -= 4;
+
+ while (sg_count > 0) {
+ ++cmd->hdr.entry_cnt;
+ cont = (struct Continuation_Entry *)
+ &hostdata->req[in_ptr][0];
+ in_ptr = (in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN;
+ if (in_ptr == out_ptr) {
+ printk("isp1020: unexpected request queue "
+ "overflow\n");
+ return 1;
+ }
+ TRACE("queue continuation", in_ptr, 0);
+ cont->hdr.entry_type = ENTRY_CONTINUATION;
+ cont->hdr.entry_cnt = 0;
+ cont->hdr.sys_def_1 = 0;
+ cont->hdr.flags = 0;
+ cont->reserved = 0;
+ ds = cont->dataseg;
+ n = sg_count;
+ if (n > 7)
+ n = 7;
+ for (i = 0; i < n; ++i) {
+ ds[i].d_base = (u_int)virt_to_bus(sg->address);
+ ds[i].d_count = sg->length;
+ ++sg;
+ }
+ sg_count -= n;
+ }
+ } else {
+ cmd->dataseg[0].d_base =
+ (u_int) virt_to_bus(Cmnd->request_buffer);
+ cmd->dataseg[0].d_count =
+ (u_int) Cmnd->request_bufflen;
+ cmd->segment_cnt = 1;
+ }
+
+ outw(in_ptr, host->io_port + MBOX4);
+ hostdata->req_in_ptr = in_ptr;
+
+ num_free = QLOGICISP_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr);
+ host->can_queue = host->host_busy + num_free;
+ host->sg_tablesize = QLOGICISP_MAX_SG(num_free);
+
+ LEAVE("isp1020_queuecommand");
+
+ return 0;
+}
+
+
+#define ASYNC_EVENT_INTERRUPT 0x01
+
+void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ Scsi_Cmnd *Cmnd;
+ struct Status_Entry *sts;
+ struct Scsi_Host *host;
+ struct isp1020_hostdata *hostdata;
+ u_int in_ptr, out_ptr;
+ u_short status;
+
+ ENTER_INTR("isp1020_intr_handler");
+
+ host = irq2host[irq];
+ if (!host) {
+ printk("qlogicisp : unexpected interrupt on line %d\n", irq);
+ return;
+ }
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+
+ DEBUG_INTR(printk("qlogicisp : interrupt on line %d\n", irq));
+
+ if (!(inw(host->io_port + PCI_INTF_STS) & 0x04)) {
+ /* spurious interrupts can happen legally */
+ DEBUG_INTR(printk("qlogicisp: got spurious interrupt\n"));
+ return;
+ }
+ in_ptr = inw(host->io_port + MBOX5);
+ outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
+
+ if ((inw(host->io_port + PCI_SEMAPHORE) & ASYNC_EVENT_INTERRUPT)) {
+ status = inw(host->io_port + MBOX0);
+
+ DEBUG_INTR(printk("qlogicisp : mbox completion status: %x\n",
+ status));
+
+ switch (status) {
+ case ASYNC_SCSI_BUS_RESET:
+ case EXECUTION_TIMEOUT_RESET:
+ hostdata->send_marker = 1;
+ break;
+ case INVALID_COMMAND:
+ case HOST_INTERFACE_ERROR:
+ case COMMAND_ERROR:
+ case COMMAND_PARAM_ERROR:
+ printk("qlogicisp : bad mailbox return status\n");
+ break;
+ }
+ outw(0x0, host->io_port + PCI_SEMAPHORE);
+ }
+ out_ptr = hostdata->res_out_ptr;
+
+ DEBUG_INTR(printk("qlogicisp : response queue update\n"));
+ DEBUG_INTR(printk("qlogicisp : response queue depth %d\n",
+ QUEUE_DEPTH(in_ptr, out_ptr)));
+
+ while (out_ptr != in_ptr) {
+ sts = (struct Status_Entry *) &hostdata->res[out_ptr][0];
+ out_ptr = (out_ptr + 1) & RES_QUEUE_LEN;
+
+ Cmnd = (Scsi_Cmnd *) bus_to_virt(sts->handle);
+
+ TRACE("done", out_ptr, Cmnd);
+
+ if (sts->completion_status == CS_RESET_OCCURRED
+ || sts->completion_status == CS_ABORTED
+ || (sts->status_flags & STF_BUS_RESET))
+ hostdata->send_marker = 1;
+
+ if (sts->state_flags & SF_GOT_SENSE)
+ memcpy(Cmnd->sense_buffer, sts->req_sense_data,
+ sizeof(Cmnd->sense_buffer));
+
+ DEBUG_INTR(isp1020_print_status_entry(sts));
+
+ if (sts->hdr.entry_type == ENTRY_STATUS)
+ Cmnd->result = isp1020_return_status(sts);
+ else
+ Cmnd->result = DID_ERROR << 16;
+
+ outw(out_ptr, host->io_port + MBOX5);
+ (*Cmnd->scsi_done)(Cmnd);
+ }
+ hostdata->res_out_ptr = out_ptr;
+
+ LEAVE_INTR("isp1020_intr_handler");
+}
+
+
+static int isp1020_return_status(struct Status_Entry *sts)
+{
+ int host_status = DID_ERROR;
+#if DEBUG_ISP1020_INTR
+ static char *reason[] = {
+ "DID_OK",
+ "DID_NO_CONNECT",
+ "DID_BUS_BUSY",
+ "DID_TIME_OUT",
+ "DID_BAD_TARGET",
+ "DID_ABORT",
+ "DID_PARITY",
+ "DID_ERROR",
+ "DID_RESET",
+ "DID_BAD_INTR"
+ };
+#endif /* DEBUG_ISP1020_INTR */
+
+ ENTER("isp1020_return_status");
+
+ DEBUG(printk("qlogicisp : completion status = 0x%04x\n",
+ sts->completion_status));
+
+ switch(sts->completion_status) {
+ case CS_COMPLETE:
+ host_status = DID_OK;
+ break;
+ case CS_INCOMPLETE:
+ if (!(sts->state_flags & SF_GOT_BUS))
+ host_status = DID_NO_CONNECT;
+ else if (!(sts->state_flags & SF_GOT_TARGET))
+ host_status = DID_BAD_TARGET;
+ else if (!(sts->state_flags & SF_SENT_CDB))
+ host_status = DID_ERROR;
+ else if (!(sts->state_flags & SF_TRANSFERRED_DATA))
+ host_status = DID_ERROR;
+ else if (!(sts->state_flags & SF_GOT_STATUS))
+ host_status = DID_ERROR;
+ else if (!(sts->state_flags & SF_GOT_SENSE))
+ host_status = DID_ERROR;
+ break;
+ case CS_DMA_ERROR:
+ case CS_TRANSPORT_ERROR:
+ host_status = DID_ERROR;
+ break;
+ case CS_RESET_OCCURRED:
+ host_status = DID_RESET;
+ break;
+ case CS_ABORTED:
+ host_status = DID_ABORT;
+ break;
+ case CS_TIMEOUT:
+ host_status = DID_TIME_OUT;
+ break;
+ case CS_DATA_OVERRUN:
+ case CS_COMMAND_OVERRUN:
+ case CS_STATUS_OVERRUN:
+ case CS_BAD_MESSAGE:
+ case CS_NO_MESSAGE_OUT:
+ case CS_EXT_ID_FAILED:
+ case CS_IDE_MSG_FAILED:
+ case CS_ABORT_MSG_FAILED:
+ case CS_NOP_MSG_FAILED:
+ case CS_PARITY_ERROR_MSG_FAILED:
+ case CS_DEVICE_RESET_MSG_FAILED:
+ case CS_ID_MSG_FAILED:
+ case CS_UNEXP_BUS_FREE:
+ case CS_INVALID_ENTRY_TYPE:
+ case CS_DEVICE_QUEUE_FULL:
+ case CS_SCSI_PHASE_SKIPPED:
+ case CS_ARS_FAILED:
+ host_status = DID_ERROR;
+ break;
+ case CS_DATA_UNDERRUN:
+ host_status = DID_OK;
+ break;
+ default:
+ printk("qlogicisp : unknown completion status 0x%04x\n",
+ sts->completion_status);
+ host_status = DID_ERROR;
+ break;
+ }
+
+ DEBUG_INTR(printk("qlogicisp : host status (%s) scsi status %x\n",
+ reason[host_status], sts->scsi_status));
+
+ LEAVE("isp1020_return_status");
+
+ return (sts->scsi_status & STATUS_MASK) | (host_status << 16);
+}
+
+
+int isp1020_abort(Scsi_Cmnd *Cmnd)
+{
+ u_short param[6];
+ struct Scsi_Host *host;
+ struct isp1020_hostdata *hostdata;
+ int return_status = SCSI_ABORT_SUCCESS;
+ u_int cmdaddr = virt_to_bus(Cmnd);
+
+ ENTER("isp1020_abort");
+
+ host = Cmnd->host;
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+
+ isp1020_disable_irqs(host);
+
+ param[0] = MBOX_ABORT;
+ param[1] = (((u_short) Cmnd->target) << 8) | Cmnd->lun;
+ param[2] = cmdaddr >> 16;
+ param[3] = cmdaddr & 0xffff;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicisp : scsi abort failure: %x\n", param[0]);
+ return_status = SCSI_ABORT_ERROR;
+ }
+
+ isp1020_enable_irqs(host);
+
+ LEAVE("isp1020_abort");
+
+ return return_status;
+}
+
+
+int isp1020_reset(Scsi_Cmnd *Cmnd, unsigned int reset_flags)
+{
+ u_short param[6];
+ struct Scsi_Host *host;
+ struct isp1020_hostdata *hostdata;
+ int return_status = SCSI_RESET_SUCCESS;
+
+ ENTER("isp1020_reset");
+
+ host = Cmnd->host;
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+
+ param[0] = MBOX_BUS_RESET;
+ param[1] = hostdata->host_param.bus_reset_delay;
+
+ isp1020_disable_irqs(host);
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicisp : scsi bus reset failure: %x\n", param[0]);
+ return_status = SCSI_RESET_ERROR;
+ }
+
+ isp1020_enable_irqs(host);
+
+ LEAVE("isp1020_reset");
+
+ return return_status;;
+}
+
+
+int isp1020_biosparam(Disk *disk, kdev_t n, int ip[])
+{
+ int size = disk->capacity;
+
+ ENTER("isp1020_biosparam");
+
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ if (ip[2] > 1024) {
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = size / (ip[0] * ip[1]);
+ if (ip[2] > 1023)
+ ip[2] = 1023;
+ }
+
+ LEAVE("isp1020_biosparam");
+
+ return 0;
+}
+
+
+static int isp1020_reset_hardware(struct Scsi_Host *host)
+{
+ u_short param[6];
+ int loop_count;
+
+ ENTER("isp1020_reset_hardware");
+
+ outw(ISP_RESET, host->io_port + PCI_INTF_CTL);
+ outw(HCCR_RESET, host->io_port + HOST_HCCR);
+ outw(HCCR_RELEASE, host->io_port + HOST_HCCR);
+ outw(HCCR_BIOS_DISABLE, host->io_port + HOST_HCCR);
+
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && inw(host->io_port + HOST_HCCR) == RISC_BUSY)
+ barrier();
+ if (!loop_count)
+ printk("qlogicisp: reset_hardware loop timeout\n");
+
+ outw(0, host->io_port + ISP_CFG1);
+
+#if DEBUG_ISP1020
+ printk("qlogicisp : mbox 0 0x%04x \n", inw(host->io_port + MBOX0));
+ printk("qlogicisp : mbox 1 0x%04x \n", inw(host->io_port + MBOX1));
+ printk("qlogicisp : mbox 2 0x%04x \n", inw(host->io_port + MBOX2));
+ printk("qlogicisp : mbox 3 0x%04x \n", inw(host->io_port + MBOX3));
+ printk("qlogicisp : mbox 4 0x%04x \n", inw(host->io_port + MBOX4));
+ printk("qlogicisp : mbox 5 0x%04x \n", inw(host->io_port + MBOX5));
+#endif /* DEBUG_ISP1020 */
+
+ DEBUG(printk("qlogicisp : loading risc ram\n"));
+
+#if RELOAD_FIRMWARE
+ {
+ int i;
+ for (i = 0; i < risc_code_length01; i++) {
+ param[0] = MBOX_WRITE_RAM_WORD;
+ param[1] = risc_code_addr01 + i;
+ param[2] = risc_code01[i];
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicisp : firmware load failure\n");
+ return 1;
+ }
+ }
+ }
+#endif /* RELOAD_FIRMWARE */
+
+ DEBUG(printk("qlogicisp : verifying checksum\n"));
+
+ param[0] = MBOX_VERIFY_CHECKSUM;
+ param[1] = risc_code_addr01;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicisp : ram checksum failure\n");
+ return 1;
+ }
+
+ DEBUG(printk("qlogicisp : executing firmware\n"));
+
+ param[0] = MBOX_EXEC_FIRMWARE;
+ param[1] = risc_code_addr01;
+
+ isp1020_mbox_command(host, param);
+
+ param[0] = MBOX_ABOUT_FIRMWARE;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicisp : about firmware failure\n");
+ return 1;
+ }
+
+ DEBUG(printk("qlogicisp : firmware major revision %d\n", param[1]));
+ DEBUG(printk("qlogicisp : firmware minor revision %d\n", param[2]));
+
+ LEAVE("isp1020_reset_hardware");
+
+ return 0;
+}
+
+
+static int isp1020_init(struct Scsi_Host *sh)
+{
+ u_int io_base;
+ struct isp1020_hostdata *hostdata;
+ u_char bus, device_fn, revision, irq;
+ u_short vendor_id, device_id, command;
+
+ ENTER("isp1020_init");
+
+ hostdata = (struct isp1020_hostdata *) sh->hostdata;
+ bus = hostdata->bus;
+ device_fn = hostdata->device_fn;
+
+ if (pcibios_read_config_word(bus, device_fn, PCI_VENDOR_ID, &vendor_id)
+ || pcibios_read_config_word(bus, device_fn,
+ PCI_DEVICE_ID, &device_id)
+ || pcibios_read_config_word(bus, device_fn,
+ PCI_COMMAND, &command)
+ || pcibios_read_config_dword(bus, device_fn,
+ PCI_BASE_ADDRESS_0, &io_base)
+ || pcibios_read_config_byte(bus, device_fn,
+ PCI_CLASS_REVISION, &revision)
+ || pcibios_read_config_byte(bus, device_fn,
+ PCI_INTERRUPT_LINE, &irq))
+ {
+ printk("qlogicisp : error reading PCI configuration\n");
+ return 1;
+ }
+
+ if (vendor_id != PCI_VENDOR_ID_QLOGIC) {
+ printk("qlogicisp : 0x%04x is not QLogic vendor ID\n",
+ vendor_id);
+ return 1;
+ }
+
+ if (device_id != PCI_DEVICE_ID_QLOGIC_ISP1020) {
+ printk("qlogicisp : 0x%04x does not match ISP1020 device id\n",
+ device_id);
+ return 1;
+ }
+
+ if (command & PCI_COMMAND_IO && (io_base & 3) == 1)
+ io_base &= PCI_BASE_ADDRESS_IO_MASK;
+ else {
+ printk("qlogicisp : i/o mapping is disabled\n");
+ return 1;
+ }
+
+ if (!(command & PCI_COMMAND_MASTER)) {
+ printk("qlogicisp : bus mastering is disabled\n");
+ return 1;
+ }
+
+ if (revision != ISP1020_REV_ID)
+ printk("qlogicisp : new isp1020 revision ID (%d)\n", revision);
+
+ if (inw(io_base + PCI_ID_LOW) != PCI_VENDOR_ID_QLOGIC
+ || inw(io_base + PCI_ID_HIGH) != PCI_DEVICE_ID_QLOGIC_ISP1020)
+ {
+ printk("qlogicisp : can't decode i/o address space at 0x%x\n",
+ io_base);
+ return 1;
+ }
+
+ hostdata->revision = revision;
+
+ sh->irq = irq;
+ sh->io_port = io_base;
+
+ LEAVE("isp1020_init");
+
+ return 0;
+}
+
+
+#if USE_NVRAM_DEFAULTS
+
+static int isp1020_get_defaults(struct Scsi_Host *host)
+{
+ int i;
+ u_short value;
+ struct isp1020_hostdata *hostdata =
+ (struct isp1020_hostdata *) host->hostdata;
+
+ ENTER("isp1020_get_defaults");
+
+ if (!isp1020_verify_nvram(host)) {
+ printk("qlogicisp : nvram checksum failure\n");
+ printk("qlogicisp : attempting to use default parameters\n");
+ return isp1020_set_defaults(host);
+ }
+
+ value = isp1020_read_nvram_word(host, 2);
+ hostdata->host_param.fifo_threshold = (value >> 8) & 0x03;
+ hostdata->host_param.host_adapter_enable = (value >> 11) & 0x01;
+ hostdata->host_param.initiator_scsi_id = (value >> 12) & 0x0f;
+
+ value = isp1020_read_nvram_word(host, 3);
+ hostdata->host_param.bus_reset_delay = value & 0xff;
+ hostdata->host_param.retry_count = value >> 8;
+
+ value = isp1020_read_nvram_word(host, 4);
+ hostdata->host_param.retry_delay = value & 0xff;
+ hostdata->host_param.async_data_setup_time = (value >> 8) & 0x0f;
+ hostdata->host_param.req_ack_active_negation = (value >> 12) & 0x01;
+ hostdata->host_param.data_line_active_negation = (value >> 13) & 0x01;
+ hostdata->host_param.data_dma_burst_enable = (value >> 14) & 0x01;
+ hostdata->host_param.command_dma_burst_enable = (value >> 15);
+
+ value = isp1020_read_nvram_word(host, 5);
+ hostdata->host_param.tag_aging = value & 0xff;
+
+ value = isp1020_read_nvram_word(host, 6);
+ hostdata->host_param.selection_timeout = value & 0xffff;
+
+ value = isp1020_read_nvram_word(host, 7);
+ hostdata->host_param.max_queue_depth = value & 0xffff;
+
+#if DEBUG_ISP1020_SETUP
+ printk("qlogicisp : fifo threshold=%d\n",
+ hostdata->host_param.fifo_threshold);
+ printk("qlogicisp : initiator scsi id=%d\n",
+ hostdata->host_param.initiator_scsi_id);
+ printk("qlogicisp : bus reset delay=%d\n",
+ hostdata->host_param.bus_reset_delay);
+ printk("qlogicisp : retry count=%d\n",
+ hostdata->host_param.retry_count);
+ printk("qlogicisp : retry delay=%d\n",
+ hostdata->host_param.retry_delay);
+ printk("qlogicisp : async data setup time=%d\n",
+ hostdata->host_param.async_data_setup_time);
+ printk("qlogicisp : req/ack active negation=%d\n",
+ hostdata->host_param.req_ack_active_negation);
+ printk("qlogicisp : data line active negation=%d\n",
+ hostdata->host_param.data_line_active_negation);
+ printk("qlogicisp : data DMA burst enable=%d\n",
+ hostdata->host_param.data_dma_burst_enable);
+ printk("qlogicisp : command DMA burst enable=%d\n",
+ hostdata->host_param.command_dma_burst_enable);
+ printk("qlogicisp : tag age limit=%d\n",
+ hostdata->host_param.tag_aging);
+ printk("qlogicisp : selection timeout limit=%d\n",
+ hostdata->host_param.selection_timeout);
+ printk("qlogicisp : max queue depth=%d\n",
+ hostdata->host_param.max_queue_depth);
+#endif /* DEBUG_ISP1020_SETUP */
+
+ for (i = 0; i < MAX_TARGETS; i++) {
+
+ value = isp1020_read_nvram_word(host, 14 + i * 3);
+ hostdata->dev_param[i].device_flags = value & 0xff;
+ hostdata->dev_param[i].execution_throttle = value >> 8;
+
+ value = isp1020_read_nvram_word(host, 15 + i * 3);
+ hostdata->dev_param[i].synchronous_period = value & 0xff;
+ hostdata->dev_param[i].synchronous_offset = (value >> 8) & 0x0f;
+ hostdata->dev_param[i].device_enable = (value >> 12) & 0x01;
+
+#if DEBUG_ISP1020_SETUP
+ printk("qlogicisp : target 0x%02x\n", i);
+ printk("qlogicisp : device flags=0x%02x\n",
+ hostdata->dev_param[i].device_flags);
+ printk("qlogicisp : execution throttle=%d\n",
+ hostdata->dev_param[i].execution_throttle);
+ printk("qlogicisp : synchronous period=%d\n",
+ hostdata->dev_param[i].synchronous_period);
+ printk("qlogicisp : synchronous offset=%d\n",
+ hostdata->dev_param[i].synchronous_offset);
+ printk("qlogicisp : device enable=%d\n",
+ hostdata->dev_param[i].device_enable);
+#endif /* DEBUG_ISP1020_SETUP */
+ }
+
+ LEAVE("isp1020_get_defaults");
+
+ return 0;
+}
+
+
+#define ISP1020_NVRAM_LEN 0x40
+#define ISP1020_NVRAM_SIG1 0x5349
+#define ISP1020_NVRAM_SIG2 0x2050
+
+static int isp1020_verify_nvram(struct Scsi_Host *host)
+{
+ int i;
+ u_short value;
+ u_char checksum = 0;
+
+ for (i = 0; i < ISP1020_NVRAM_LEN; i++) {
+ value = isp1020_read_nvram_word(host, i);
+
+ switch (i) {
+ case 0:
+ if (value != ISP1020_NVRAM_SIG1) return 0;
+ break;
+ case 1:
+ if (value != ISP1020_NVRAM_SIG2) return 0;
+ break;
+ case 2:
+ if ((value & 0xff) != 0x02) return 0;
+ break;
+ }
+ checksum += value & 0xff;
+ checksum += value >> 8;
+ }
+
+ return (checksum == 0);
+}
+
+#define NVRAM_DELAY() udelay(2) /* 2 microsecond delay */
+
+
+u_short isp1020_read_nvram_word(struct Scsi_Host *host, u_short byte)
+{
+ int i;
+ u_short value, output, input;
+
+ byte &= 0x3f; byte |= 0x0180;
+
+ for (i = 8; i >= 0; i--) {
+ output = ((byte >> i) & 0x1) ? 0x4 : 0x0;
+ outw(output | 0x2, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ outw(output | 0x3, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ outw(output | 0x2, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ }
+
+ for (i = 0xf, value = 0; i >= 0; i--) {
+ value <<= 1;
+ outw(0x3, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ input = inw(host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ outw(0x2, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ if (input & 0x8) value |= 1;
+ }
+
+ outw(0x0, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+
+ return value;
+}
+
+#endif /* USE_NVRAM_DEFAULTS */
+
+
+static int isp1020_set_defaults(struct Scsi_Host *host)
+{
+ struct isp1020_hostdata *hostdata =
+ (struct isp1020_hostdata *) host->hostdata;
+ int i;
+
+ ENTER("isp1020_set_defaults");
+
+ hostdata->host_param.fifo_threshold = 2;
+ hostdata->host_param.host_adapter_enable = 1;
+ hostdata->host_param.initiator_scsi_id = 7;
+ hostdata->host_param.bus_reset_delay = 3;
+ hostdata->host_param.retry_count = 0;
+ hostdata->host_param.retry_delay = 1;
+ hostdata->host_param.async_data_setup_time = 6;
+ hostdata->host_param.req_ack_active_negation = 1;
+ hostdata->host_param.data_line_active_negation = 1;
+ hostdata->host_param.data_dma_burst_enable = 1;
+ hostdata->host_param.command_dma_burst_enable = 1;
+ hostdata->host_param.tag_aging = 8;
+ hostdata->host_param.selection_timeout = 250;
+ hostdata->host_param.max_queue_depth = 256;
+
+ for (i = 0; i < MAX_TARGETS; i++) {
+ hostdata->dev_param[i].device_flags = 0xfd;
+ hostdata->dev_param[i].execution_throttle = 16;
+ hostdata->dev_param[i].synchronous_period = 25;
+ hostdata->dev_param[i].synchronous_offset = 12;
+ hostdata->dev_param[i].device_enable = 1;
+ }
+
+ LEAVE("isp1020_set_defaults");
+
+ return 0;
+}
+
+
+static int isp1020_load_parameters(struct Scsi_Host *host)
+{
+ int i, k;
+ u_int queue_addr;
+ u_short param[6];
+ u_short isp_cfg1;
+ unsigned long flags;
+ struct isp1020_hostdata *hostdata =
+ (struct isp1020_hostdata *) host->hostdata;
+
+ ENTER("isp1020_load_parameters");
+
+ save_flags(flags);
+ cli();
+
+ outw(hostdata->host_param.fifo_threshold, host->io_port + ISP_CFG1);
+
+ param[0] = MBOX_SET_INIT_SCSI_ID;
+ param[1] = hostdata->host_param.initiator_scsi_id;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set initiator id failure\n");
+ return 1;
+ }
+
+ param[0] = MBOX_SET_RETRY_COUNT;
+ param[1] = hostdata->host_param.retry_count;
+ param[2] = hostdata->host_param.retry_delay;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set retry count failure\n");
+ return 1;
+ }
+
+ param[0] = MBOX_SET_ASYNC_DATA_SETUP_TIME;
+ param[1] = hostdata->host_param.async_data_setup_time;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : async data setup time failure\n");
+ return 1;
+ }
+
+ param[0] = MBOX_SET_ACTIVE_NEG_STATE;
+ param[1] = (hostdata->host_param.req_ack_active_negation << 4)
+ | (hostdata->host_param.data_line_active_negation << 5);
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set active negation state failure\n");
+ return 1;
+ }
+
+ param[0] = MBOX_SET_PCI_CONTROL_PARAMS;
+ param[1] = hostdata->host_param.data_dma_burst_enable << 1;
+ param[2] = hostdata->host_param.command_dma_burst_enable << 1;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set pci control parameter failure\n");
+ return 1;
+ }
+
+ isp_cfg1 = inw(host->io_port + ISP_CFG1);
+
+ if (hostdata->host_param.data_dma_burst_enable
+ || hostdata->host_param.command_dma_burst_enable)
+ isp_cfg1 |= 0x0004;
+ else
+ isp_cfg1 &= 0xfffb;
+
+ outw(isp_cfg1, host->io_port + ISP_CFG1);
+
+ param[0] = MBOX_SET_TAG_AGE_LIMIT;
+ param[1] = hostdata->host_param.tag_aging;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set tag age limit failure\n");
+ return 1;
+ }
+
+ param[0] = MBOX_SET_SELECT_TIMEOUT;
+ param[1] = hostdata->host_param.selection_timeout;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set selection timeout failure\n");
+ return 1;
+ }
+
+ for (i = 0; i < MAX_TARGETS; i++) {
+
+ if (!hostdata->dev_param[i].device_enable)
+ continue;
+
+ param[0] = MBOX_SET_TARGET_PARAMS;
+ param[1] = i << 8;
+ param[2] = hostdata->dev_param[i].device_flags << 8;
+ param[3] = (hostdata->dev_param[i].synchronous_offset << 8)
+ | hostdata->dev_param[i].synchronous_period;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set target parameter failure\n");
+ return 1;
+ }
+
+ for (k = 0; k < MAX_LUNS; k++) {
+
+ param[0] = MBOX_SET_DEV_QUEUE_PARAMS;
+ param[1] = (i << 8) | k;
+ param[2] = hostdata->host_param.max_queue_depth;
+ param[3] = hostdata->dev_param[i].execution_throttle;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set device queue "
+ "parameter failure\n");
+ return 1;
+ }
+ }
+ }
+
+ queue_addr = (u_int) virt_to_bus(&hostdata->res[0][0]);
+
+ param[0] = MBOX_INIT_RES_QUEUE;
+ param[1] = RES_QUEUE_LEN + 1;
+ param[2] = (u_short) (queue_addr >> 16);
+ param[3] = (u_short) (queue_addr & 0xffff);
+ param[4] = 0;
+ param[5] = 0;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set response queue failure\n");
+ return 1;
+ }
+
+ queue_addr = (u_int) virt_to_bus(&hostdata->req[0][0]);
+
+ param[0] = MBOX_INIT_REQ_QUEUE;
+ param[1] = QLOGICISP_REQ_QUEUE_LEN + 1;
+ param[2] = (u_short) (queue_addr >> 16);
+ param[3] = (u_short) (queue_addr & 0xffff);
+ param[4] = 0;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set request queue failure\n");
+ return 1;
+ }
+
+ restore_flags(flags);
+
+ LEAVE("isp1020_load_parameters");
+
+ return 0;
+}
+
+
+/*
+ * currently, this is only called during initialization or abort/reset,
+ * at which times interrupts are disabled, so polling is OK, I guess...
+ */
+static int isp1020_mbox_command(struct Scsi_Host *host, u_short param[])
+{
+ int loop_count;
+
+ if (mbox_param[param[0]] == 0)
+ return 1;
+
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && inw(host->io_port + HOST_HCCR) & 0x0080)
+ barrier();
+ if (!loop_count)
+ printk("qlogicisp: mbox_command loop timeout #1\n");
+
+ switch(mbox_param[param[0]] >> 4) {
+ case 6: outw(param[5], host->io_port + MBOX5);
+ case 5: outw(param[4], host->io_port + MBOX4);
+ case 4: outw(param[3], host->io_port + MBOX3);
+ case 3: outw(param[2], host->io_port + MBOX2);
+ case 2: outw(param[1], host->io_port + MBOX1);
+ case 1: outw(param[0], host->io_port + MBOX0);
+ }
+
+ outw(0x0, host->io_port + PCI_SEMAPHORE);
+ outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
+ outw(HCCR_SET_HOST_INTR, host->io_port + HOST_HCCR);
+
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && !(inw(host->io_port + PCI_INTF_STS) & 0x04))
+ barrier();
+ if (!loop_count)
+ printk("qlogicisp: mbox_command loop timeout #2\n");
+
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && inw(host->io_port + MBOX0) == 0x04)
+ barrier();
+ if (!loop_count)
+ printk("qlogicisp: mbox_command loop timeout #3\n");
+
+ switch(mbox_param[param[0]] & 0xf) {
+ case 6: param[5] = inw(host->io_port + MBOX5);
+ case 5: param[4] = inw(host->io_port + MBOX4);
+ case 4: param[3] = inw(host->io_port + MBOX3);
+ case 3: param[2] = inw(host->io_port + MBOX2);
+ case 2: param[1] = inw(host->io_port + MBOX1);
+ case 1: param[0] = inw(host->io_port + MBOX0);
+ }
+
+ outw(0x0, host->io_port + PCI_SEMAPHORE);
+ outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
+
+ return 0;
+}
+
+
+#if DEBUG_ISP1020_INTR
+
+void isp1020_print_status_entry(struct Status_Entry *status)
+{
+ int i;
+
+ printk("qlogicisp : entry count = 0x%02x, type = 0x%02x, flags = 0x%02x\n",
+ status->hdr.entry_cnt, status->hdr.entry_type, status->hdr.flags);
+ printk("qlogicisp : scsi status = 0x%04x, completion status = 0x%04x\n",
+ status->scsi_status, status->completion_status);
+ printk("qlogicisp : state flags = 0x%04x, status flags = 0x%04x\n",
+ status->state_flags, status->status_flags);
+ printk("qlogicisp : time = 0x%04x, request sense length = 0x%04x\n",
+ status->time, status->req_sense_len);
+ printk("qlogicisp : residual transfer length = 0x%08x\n", status->residual);
+
+ for (i = 0; i < status->req_sense_len; i++)
+ printk("qlogicisp : sense data = 0x%02x\n", status->req_sense_data[i]);
+}
+
+#endif /* DEBUG_ISP1020_INTR */
+
+
+#if DEBUG_ISP1020
+
+void isp1020_print_scsi_cmd(Scsi_Cmnd *cmd)
+{
+ int i;
+
+ printk("qlogicisp : target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
+ cmd->target, cmd->lun, cmd->cmd_len);
+ printk("qlogicisp : command = ");
+ for (i = 0; i < cmd->cmd_len; i++)
+ printk("0x%02x ", cmd->cmnd[i]);
+ printk("\n");
+}
+
+#endif /* DEBUG_ISP1020 */
+
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = QLOGICISP;
+
+#include "scsi_module.c"
+#endif /* MODULE */
diff --git a/linux/src/drivers/scsi/qlogicisp.h b/linux/src/drivers/scsi/qlogicisp.h
new file mode 100644
index 0000000..b3e052c
--- /dev/null
+++ b/linux/src/drivers/scsi/qlogicisp.h
@@ -0,0 +1,98 @@
+/*
+ * QLogic ISP1020 Intelligent SCSI Processor Driver (PCI)
+ * Written by Erik H. Moe, ehm@cris.com
+ * Copyright 1995, Erik H. Moe
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/* Renamed and updated to 1.3.x by Michael Griffith <grif@cs.ucr.edu> */
+
+/*
+ * $Date: 2007/03/27 21:04:30 $
+ * $Revision: 1.1.4.2 $
+ *
+ * Revision 0.5 1995/09/22 02:32:56 root
+ * do auto request sense
+ *
+ * Revision 0.4 1995/08/07 04:48:28 root
+ * supply firmware with driver.
+ * numerous bug fixes/general cleanup of code.
+ *
+ * Revision 0.3 1995/07/16 16:17:16 root
+ * added reset/abort code.
+ *
+ * Revision 0.2 1995/06/29 03:19:43 root
+ * fixed biosparam.
+ * added queue protocol.
+ *
+ * Revision 0.1 1995/06/25 01:56:13 root
+ * Initial release.
+ *
+ */
+
+#ifndef _QLOGICISP_H
+#define _QLOGICISP_H
+
+/*
+ * With the qlogic interface, every queue slot can hold a SCSI
+ * command with up to 4 scatter/gather entries. If we need more
+ * than 4 entries, continuation entries can be used that hold
+ * another 7 entries each. Unlike for other drivers, this means
+ * that the maximum number of scatter/gather entries we can
+ * support at any given time is a function of the number of queue
+ * slots available. That is, host->can_queue and host->sg_tablesize
+ * are dynamic and _not_ independent. This all works fine because
+ * requests are queued serially and the scatter/gather limit is
+ * determined for each queue request anew.
+ */
+#define QLOGICISP_REQ_QUEUE_LEN 63 /* must be power of two - 1 */
+#define QLOGICISP_MAX_SG(ql) (4 + ((ql) > 0) ? 7*((ql) - 1) : 0)
+
+int isp1020_detect(Scsi_Host_Template *);
+int isp1020_release(struct Scsi_Host *);
+const char * isp1020_info(struct Scsi_Host *);
+int isp1020_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int isp1020_abort(Scsi_Cmnd *);
+int isp1020_reset(Scsi_Cmnd *, unsigned int);
+int isp1020_biosparam(Disk *, kdev_t, int[]);
+
+#ifndef NULL
+#define NULL (0)
+#endif
+
+static struct proc_dir_entry proc_scsi_isp1020;
+
+#define QLOGICISP { \
+ /* next */ NULL, \
+ /* usage_count */ NULL, \
+ /* proc dir */ NULL, \
+ /* procfs info */ NULL, \
+ /* name */ NULL, \
+ /* detect */ isp1020_detect, \
+ /* release */ isp1020_release, \
+ /* info */ isp1020_info, \
+ /* command */ NULL, \
+ /* queuecommand */ isp1020_queuecommand, \
+ /* abort */ isp1020_abort, \
+ /* reset */ isp1020_reset, \
+ /* slave_attach */ NULL, \
+ /* bios_param */ isp1020_biosparam, \
+ /* can_queue */ QLOGICISP_REQ_QUEUE_LEN, \
+ /* this_id */ -1, \
+ /* sg_tablesize */ QLOGICISP_MAX_SG(QLOGICISP_REQ_QUEUE_LEN), \
+ /* cmd_per_lun */ 1, \
+ /* present */ 0, \
+ /* unchecked_isa_dma */ 0, \
+ /* use_clustering */ DISABLE_CLUSTERING \
+}
+
+#endif /* _QLOGICISP_H */
diff --git a/linux/src/drivers/scsi/scripts.h b/linux/src/drivers/scsi/scripts.h
new file mode 100644
index 0000000..482b0c2
--- /dev/null
+++ b/linux/src/drivers/scsi/scripts.h
@@ -0,0 +1,1357 @@
+/***********************************************************************
+;* File Name : SCRIPTS.H *
+;* Description:SCRIPT language for NCR53c825A,875 SCRIPT processor*
+;* *
+;***********************************************************************
+
+;==========================================================
+; NCR 53C810,53C815,53C820,53C825,53C825A,53C860,53C875
+; Script language definition for assembly programming
+;==========================================================
+
+;==========================================================
+; DMA Command
+;==========================================================
+*/
+#define DCMD_BLOCK_MOVE 0
+#define DCMD_IO 0x040000000 /*;BIT30 */
+#define DCMD_RD_WRT 0x040000000 /*;BIT30 */
+#define DCMD_XFER_CTRL 0x080000000 /*;BIT31 */
+#define DCMD_MEM_MOVE 0x0C0000000 /*;(BIT31+BIT30) */
+#define DCMD_LOAD_STORE 0x0E0000000 /*;(BIT31+BIT30+BIT29) */
+/*;==========================================================*/
+#define INDIRECT_ADDR 0x20000000 /*;BIT29 */
+#define TABLE_INDIRECT 0x10000000 /*;BIT28 */
+#define BLOCK_MOVE 0x08000000 /*;BIT27 */
+#define CHAIN_MOVE 0
+/*; SCSI phase definition */
+#define DATA_OUT_ 0x00000000 /*;data out phase */
+#define DATA_IN_ 0x01000000 /*;BIT24 ; data in phase */
+#define COMMAND_ 0x02000000 /*;BIT25 ; command phase */
+#define STATUS_ 0x03000000 /*;(BIT25+BIT24) ; status phase */
+#define RESERVED_OUT 0x04000000 /*;BIT26 */
+#define RESERVED_IN 0x05000000 /*;(BIT26+BIT24) */
+#define MSG_OUT_ 0x06000000 /*;(BIT26+BIT25) ; message in phase */
+#define MSG_IN_ 0x07000000 /*;(BIT26+BIT25+BIT24);message out phase */
+/*;----------------------------------------------------------*/
+#define DCMD_SELECT 0x40000000 /*;DCMD_IO+0 */
+#define DCMD_SELECT_ATN 0x41000000 /*;(DCMD_IO+BIT24) */
+#define DCMD_WAIT_DISC 0x48000000 /*;(DCMD_IO+BIT27) */
+#define DCMD_WAIT_RESEL 0x50000000 /*;(DCMD_IO+BIT28) */
+#define DCMD_SET_CARRY 0x58000400 /*;(DCMD_IO+BIT28+BIT27+BIT10) */
+#define DCMD_SET_ACK 0x58000040 /*;(DCMD_IO+BIT28+BIT27+BIT6) */
+#define DCMD_SET_ATN 0x58000008 /*;(DCMD_IO+BIT28+BIT27+BIT3) */
+#define DCMD_CLR_CARRY 0x60000400 /*;(DCMD_IO+BIT29+BIT10) */
+#define DCMD_CLR_ACK 0x60000040 /*;(DCMD_IO+BIT29+BIT6) */
+#define DCMD_CLR_ATN 0x60000008 /*;(DCMD_IO+BIT29+BIT3) */
+#define RELATIVE_ADDR 0x04000000 /*;BIT26 */
+#define IO_TABLE_INDIR 0x02000000 /*;BIT25 */
+/*;----------------------------------------------------------*/
+#define MOVE_FROM_SFBR 0x68000000 /*;(DCMD_RD_WRT+BIT29+BIT27) */
+#define MOVE_TO_SFBR 0x70000000 /*;(DCMD_RD_WRT+BIT29+BIT28) */
+#define RD_MODIFY_WRT 0x78000000 /*;(DCMD_RD_WRT+BIT29+BIT28+BIT27) */
+#define OP_MOVE_DATA 0
+#define OP_SHIFT_LEFT_C 0x01000000 /*;BIT24 */
+#define OP_OR 0x02000000 /*;BIT25 */
+#define OP_XOR 0x03000000 /*;(BIT25+BIT24) */
+#define OP_AND 0x04000000 /*;BIT26 */
+#define OP_SHIFT_RIGHT_C 0x05000000 /*;(BIT26+BIT24) */
+#define OP_ADD_DATA 0x06000000 /*;(BIT26+BIT25) */
+#define OP_ADD_DATA_C 0x07000000 /*;(BIT26+BIT25+BIT24) */
+#define USE_SFBR 0x00800000 /*;BIT23 */
+/*;----------------------------------------------------------*/
+#define DCMD_JUMP 0x80000000 /*;DCMD_XFER_CTRL+0 */
+#define DCMD_CALL 0x88000000 /*;(DCMD_XFER_CTRL+BIT27) */
+#define DCMD_RETURN 0x90000000 /*;(DCMD_XFER_CTRL+BIT28) */
+#define DCMD_INT 0x98000000 /*;(DCMD_XFER_CTRL+BIT28+BIT27) */
+#define RELATIVE_ 0x00800000 /*;BIT23 */
+#define IF_CARRY 0x00200000 /*;BIT21 */
+#define INT_ON_FLY_ 0x00100000 /*;BIT20 */
+#define IF_TRUE 0x00080000 /*;BIT19 */
+#define IF_NOT 0
+#define DATA_CMP 0x00040000 /*;BIT18 */
+#define PHASE_CMP 0x00020000 /*;BIT17 */
+#define WAIT_PHASE_VALID 0x00010000 /*;BIT16 */
+/*;----------------------------------------------------------*/
+#define DSA_RELATIVE 0x10000000 /*;BIT28 */
+#define FLUSH_PREFETCH 0x02000000 /*;BIT25 */
+#define DCMD_LOAD 0x0E1000000 /*;(DCMD_LOAD_STORE+BIT24) */
+#define DCMD_STORE 0x0E0000000 /*;DCMD_LOAD_STORE */
+/*
+;==========================================================
+; SCSI message EQUATES
+;==========================================================
+*/
+#define CMD_COMPLETE 0
+#define EXT_MSG 1
+#define SAVE_PTR 2
+#define RESTORE_PTR 3
+#define DISCONNECTMSG 4
+#define INITIATOR_ERR 5
+#define ABORTMSG 6
+#define MSG_REJECT 7
+#define NOPMSG 8
+#define MSG_PARITY 9
+#define LINK_CMD_CPL 0x0a
+#define LINK_CMD_FLAG 0x0b
+#define RESET_DEVICE 0x0c
+#define IDENTIFYMSG 0x80
+#define SIMPLE_TAG 0x20
+#define IGNORE_WIDE_RES 0x23
+/*
+;==========================================================
+; Operation assumption
+; 1. If phase mismatch during Xfer PAD ==> do nothing
+; Else compute FIXUP needed
+; 2. After phase mismatch ==> Set to Xfer PAD
+; 3. At disconnection ==> Modify return address
+; 4. 1st restore ptr after reselection is ignored
+; 5. If Xfer PAD is done ==> Error
+;==========================================================
+*/
+/* static start_script
+ static reselected
+ static reselecttag
+ static select0
+ static select1
+ static check_phase
+ static status1_phase
+ static command_phase
+ static jump_table0
+ static jump_tableB
+ static din_phase
+ static din_phaseB
+ static din_pad_0
+ static din_pad_addrB
+ static dout_phase
+ static dout_phaseB
+ static dout_pad_0
+ static dout_pad_addrB
+ static jump_tablew
+ static jump_tableW
+ static din_phase1
+ static din_phaseW
+ static din_pad_1
+ static din_pad_addrW
+ static dout_phase1
+ static dout_phaseW
+ static dout_pad_1
+ static dout_pad_addrW
+ static mout_phase
+ static status_phase
+ static min_phase
+ static set_atn
+ static clr_atn
+ static end_script
+ static start_mov
+ static SrcPhysAddr
+ static DesPhysAddr
+*/
+ULONG start_script[]={
+/*
+;==========================================================
+; Wait for reselection
+;==========================================================
+*/
+ DCMD_WAIT_RESEL
+ };
+ULONG jmp_select0[]={
+ 0 /* offset select0 */
+ };
+ULONG reselected[]={
+ RD_MODIFY_WRT+OP_OR+0x200+0x340000, /* (2 shl 8) or (__scratcha shl 16) */
+ 0,
+
+ DCMD_INT+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+MSG_IN_,
+ __RESELECTED,
+
+ BLOCK_MOVE+MSG_IN_+1 /* ;move in ID byte */
+ };
+ULONG ACB_msgin123_1[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_INT+IF_TRUE,
+ __RESELECTED1
+ };
+ULONG reselecttag[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ BLOCK_MOVE+MSG_IN_+2 /* ;move 2 msg bytes */
+ };
+ULONG ACB_msgin123_2[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_INT+IF_TRUE,
+ __RESELECTEDT
+ };
+/*
+;==========================================================
+; Select
+; Case 1 : Only identify message is to be sent
+; Case 2 : Synchronous negotiation is requested
+;==========================================================
+*/
+ULONG select0[]={
+ DCMD_INT+IF_TRUE,
+ __SIGNAL
+ };
+ULONG select1[]={ /* ; Select with ATN */
+
+ DCMD_SELECT_ATN+IO_TABLE_INDIR /* +offset SRB.__select ;4200h or 0100H */
+ };
+ULONG jmp_reselected[]={
+ 0, /* offset reselected, */
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+MSG_OUT_
+ };
+ULONG jmp_check_phase[]={
+ 0, /* offset check_phase, */
+
+ TABLE_INDIRECT+BLOCK_MOVE+MSG_OUT_
+ };
+ULONG SRB_msgout0[]={
+ 0 /* offset SRB.__msgout0 */
+ };
+ULONG check_phase[]={
+ DCMD_RETURN+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0,
+
+ DCMD_RETURN+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0
+ };
+ULONG status1_phase[]={
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+STATUS_
+ };
+ULONG jmp_status_phase[]={
+ 0, /* offset status_phase,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+COMMAND_
+ };
+ULONG jmp_command_phase[]={
+ 0, /* offset command_phase, */
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+MSG_IN_
+ };
+ULONG jmp_min_phase[]={
+ 0, /* offset min_phase,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+MSG_OUT_
+ };
+ULONG jmp_mout_phase[]={
+ 0, /* offset mout_phase,*/
+
+ DCMD_INT+IF_TRUE,
+ __FATALERROR
+ };
+/*
+;==========================================================
+; Command phase
+;==========================================================
+*/
+ULONG command_phase[]={
+ DCMD_CLR_ATN,
+ 0,
+ TABLE_INDIRECT+BLOCK_MOVE+COMMAND_
+ };
+ULONG SRB_command[]={
+ 0, /* offset SRB.__command,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase1[]={
+ 0 /* offset check_phase */
+ };
+/*
+;==========================================================
+; Data phase jump table for 8 bit operation
+;==========================================================
+*/
+ULONG jmp_dio_phaseB[]={
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 128,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0 /* offset dout_phaseB+ 128 */
+ };
+ULONG jump_table0[]={
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_
+ };
+ULONG jmp_din_pad_0[]={
+ 0, /* offset din_pad_0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_
+ };
+ULONG jmp_dout_pad_0[]={
+ 0 /* offset dout_pad_0 */
+ };
+
+#define jump_tableB jump_table0
+/*
+;==========================================================
+; Data in phase
+;==========================================================
+*/
+ULONG din_phaseB[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment16,*/
+
+ RD_MODIFY_WRT+OP_OR+0x100+0x340000, /*;(1 shl 8) or (__scratcha shl 16)*/
+ 0,
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+DATA_IN_
+ };
+ULONG jmp_status1_phase[]={
+ 0 /* offset status1_phase */
+ };
+
+#define din_phase din_phaseB
+
+ULONG din_pad_0[]={
+ RD_MODIFY_WRT+OP_OR+0x340000+0x400, /*;(4 shl 8) or (__scratcha shl 16)*/
+ 0
+ };
+ULONG din_pad_addrB[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_
+ };
+ULONG SRB_SegmentPad[]={
+ 0, /* offset SRB.SegmentPad,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_din_pad_addrB[]={
+ 0 /* offset din_pad_addrB */
+ };
+/*
+;==========================================================
+; Data out phase
+;==========================================================
+*/
+ULONG dout_phaseB[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment16,*/
+
+ RD_MODIFY_WRT+OP_OR+0x100+0x340000, /*;(1 shl 8) or (__scratcha shl 16)*/
+ 0,
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+DATA_OUT_
+ };
+ULONG jmp_status1_phase1[]={
+ 0 /* offset status1_phase */
+ };
+
+#define dout_phase dout_phaseB
+
+ULONG dout_pad_0[]={
+ RD_MODIFY_WRT+OP_OR+0x340000+0x400, /*;(4 shl 8) or (__scratcha shl 16)*/
+ 0
+ };
+ULONG dout_pad_addrB[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_
+ };
+ULONG SRB_SegmentPad1[]={
+ 0, /* offset SRB.SegmentPad,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_dout_pad_addrB[]={
+ 0 /* offset dout_pad_addrB */
+ };
+/*
+;==========================================================
+; Data phase jump table for WIDE SCSI operation
+;==========================================================
+*/
+ULONG jmp_dio_phaseW[]={
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 128,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0 /* offset dout_phaseW+ 128 */
+ };
+ULONG jump_tablew[]={
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_
+ };
+ULONG jmp_din_pad_1[]={
+ 0, /* offset din_pad_1,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_
+ };
+ULONG jmp_dout_pad_1[]={
+ 0 /* offset dout_pad_1 */
+ };
+
+#define jump_tableW jump_tablew
+/*
+;==========================================================
+; Data in phase
+;==========================================================
+*/
+ULONG din_phaseW[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment16,*/
+
+ RD_MODIFY_WRT+OP_OR+0x340000+0x100, /*;(1 shl 8) or (__scratcha shl 16)*/
+ 0,
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+DATA_IN_
+ };
+ULONG jmp_status1_phase2[]={
+ 0 /* offset status1_phase */
+ };
+
+#define din_phase1 din_phaseW
+
+ULONG din_pad_1[]={
+ RD_MODIFY_WRT+OP_OR+0x340000+0x400, /*;(4 shl 8) or (__scratcha shl 16)*/
+ 0
+ };
+ULONG din_pad_addrW[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_
+ };
+ULONG SRB_SegmentPad2[]={
+ 0, /* offset SRB.SegmentPad,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_din_pad_addrW[]={
+ 0 /* offset din_pad_addrW */
+ };
+/*
+;==========================================================
+; Data out phase
+;==========================================================
+*/
+ULONG dout_phaseW[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment15,*/
+/*; 18000000h or DATA_OUT_ */
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment16,*/
+
+ RD_MODIFY_WRT+OP_OR+0x340000+0x100, /*;(1 shl 8) or (__scratcha shl 16)*/
+ 0,
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+DATA_OUT_
+ };
+ULONG jmp_status1_phase3[]={
+ 0 /* offset status1_phase */
+ };
+
+#define dout_phase1 dout_phaseW
+
+ULONG dout_pad_1[]={
+ RD_MODIFY_WRT+OP_OR+0x340000+0x400, /*;(4 shl 8) or (__scratcha shl 16)*/
+ 0
+ };
+ULONG dout_pad_addrW[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_
+ };
+ULONG SRB_SegmentPad3[]={
+ 0, /* offset SRB.SegmentPad,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_dout_pad_addrW[]={
+ 0 /* offset dout_pad_addrW */
+ };
+/*
+;==========================================================
+; message out phase
+;==========================================================
+*/
+ULONG mout_phase[]={
+ DCMD_SET_ATN,
+ 0,
+
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+MSG_OUT_
+ };
+ULONG SRB_msgout01[]={
+ 0, /* offset SRB.__msgout0,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase2[]={
+ 0 /* offset check_phase */
+ };
+/*
+;==========================================================
+; Status phase process
+;==========================================================
+*/
+ULONG status_phase[]={
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+STATUS_+1
+ };
+ULONG ACB_status[]={
+ 0 /* offset ACB.status */
+ };
+/*
+;==========================================================
+; message in phase
+;==========================================================
+*/
+ULONG min_phase[]={
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+MSG_IN_+1
+ };
+ULONG ACB_msgin123_3[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_JUMP+IF_NOT+DATA_CMP+CMD_COMPLETE
+ };
+ULONG jmp_jump_msgok[]={
+ 0 /* offset jump_msgok */
+ };
+/*
+;==========================================================
+; command complete message
+;==========================================================
+*/
+ULONG msg__0[]={
+ RD_MODIFY_WRT+OP_AND+0x20000+0x7F00, /*;(7FH shl 8) or (__scntl2 shl 16)*/
+ 0,
+
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_WAIT_DISC,
+ 0,
+
+ DCMD_INT+IF_TRUE,
+ __COMPLETE
+ };
+/*
+;==========================================================
+; Other message
+;==========================================================
+*/
+ULONG jump_msgok[]={
+ DCMD_JUMP+IF_TRUE+DATA_CMP+SAVE_PTR
+ };
+ULONG jmp_msg__a[]={
+ 0, /* offset msg__a,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+RESTORE_PTR
+ };
+ULONG jmp_msg__3[]={
+ 0, /* offset msg__3,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+DISCONNECTMSG
+ };
+ULONG jmp_msg__4[]={
+ 0, /* offset msg__4,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+EXT_MSG
+ };
+ULONG jmp_msg__1[]={
+ 0, /* offset msg__1,*/
+
+ DCMD_INT+IF_TRUE+DATA_CMP+MSG_REJECT,
+ __MSGREJECT,
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+LINK_CMD_CPL
+ };
+ULONG jmp_msg__a1[]={
+ 0, /* offset msg__a,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+LINK_CMD_FLAG
+ };
+ULONG jmp_msg__a2[]={
+ 0, /* offset msg__a,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+IGNORE_WIDE_RES
+ };
+ULONG jmp_msg__23[]={
+ 0, /* offset msg__23,*/
+
+ DCMD_INT+IF_TRUE,
+ __MSGUNKNOWN
+ };
+/*
+;==========================================================
+; Extended message
+;==========================================================
+*/
+ULONG msg__1[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+MSG_IN_+1 /* ;ext msg len */
+ };
+ULONG ACB_msgin123_4[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+3
+ };
+ULONG jmp_msg___3[]={
+ 0, /* offset msg___3,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+2
+ };
+ULONG jmp_msg___2[]={
+ 0, /* offset msg___2,*/
+
+ DCMD_INT+IF_TRUE,
+ __MSGEXTEND
+ };
+
+ULONG msg___3[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+MSG_IN_+3
+ };
+ULONG ACB_msgin123_5[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_INT+IF_TRUE,
+ __MSGSYNC
+ };
+
+ULONG msg___2[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+MSG_IN_+2
+ };
+ULONG ACB_msgin123_6[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_INT+IF_TRUE,
+ __MSGWIDE
+ };
+/*
+;############################################################
+; for synchronous negotiation
+; 1. Active ==> INT3, restart at data__1_2
+; 2. Passive ==> INT3, prepare message out, restart at data__1_1
+; 3. Disable ==> INT3, prepare message out, restart at data__1_1
+;############################################################
+*/
+ULONG set_atn[]={
+ DCMD_SET_ATN,
+ 0
+ };
+ULONG msg__a[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase3[]={
+ 0 /* offset check_phase */
+ };
+
+ULONG msg__23[]={ /* ; ignore wide residue */
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+MSG_IN_+1
+ };
+ULONG ACB_msgin123_7[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase4[]={
+ 0 /* offset check_phase */
+ };
+
+ULONG msg__3[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase5[]={
+ 0 /* offset check_phase */
+ };
+
+ULONG msg__4[]={ /* ; disconnect */
+ RD_MODIFY_WRT+OP_AND+0x20000+0x7F00, /*;(7FH shl 8) or (__scntl2 shl 16)*/
+ 0,
+
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_WAIT_DISC,
+ 0,
+
+ DCMD_INT+IF_TRUE,
+ __DISCONNECTED
+ };
+
+ULONG clr_atn[]={
+ DCMD_CLR_ATN,
+ 0,
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase6[]={
+ 0 /* offset check_phase */
+ };
+/*
+;==========================================================
+; Used for script operation
+;==========================================================
+*/
+ULONG start_mov[]={
+/*; DCMD_MEM_MOVE+(OFFSET DGROUP:end_script - OFFSET DGROUP:start_script) ;Memory move SCRIPTS instruction*/
+ DCMD_MEM_MOVE+0x1000 /*;Memory move SCRIPTS instruction ( 4K )*/
+ };
+ULONG SrcPhysAddr[]={
+ 0 /*; source */
+ };
+ULONG DesPhysAddr[]={
+ 0, /*; destination */
+
+ DCMD_INT+IF_TRUE, /*; script interrupt, */
+ 0,
+
+ DCMD_INT+IF_NOT, /*; script interrupt */
+ 0
+ };
+ULONG end_script[]={0};
+/***********************************************************************/
+
diff --git a/linux/src/drivers/scsi/scsi.c b/linux/src/drivers/scsi/scsi.c
new file mode 100644
index 0000000..62c4b10
--- /dev/null
+++ b/linux/src/drivers/scsi/scsi.c
@@ -0,0 +1,3585 @@
+/*
+ * scsi.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * generic mid-level SCSI driver
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Bug correction thanks go to :
+ * Rik Faith <faith@cs.unc.edu>
+ * Tommy Thorn <tthorn>
+ * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Native multichannel, wide scsi, /proc/scsi and hot plugging
+ * support added by Michael Neuffer <mike@i-connect.net>
+ *
+ * Added request_module("scsi_hostadapter") for kerneld:
+ * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/conf.modules)
+ * Bjorn Ekwall <bj0rn@blox.se>
+ *
+ * Major improvements to the timeout, abort, and reset processing,
+ * as well as performance modifications for large queue depths by
+ * Leonard N. Zubkoff <lnz@dandelion.com>
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+
+#ifdef CONFIG_KERNELD
+#include <linux/kerneld.h>
+#endif
+
+#undef USE_STATIC_SCSI_MEMORY
+
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/linux/src/drivers/scsi/Attic/scsi.c,v 1.1 1999/04/26 05:54:57 tb Exp $";
+*/
+
+
+/* Command groups 3 and 4 are reserved and should never be used. */
+const unsigned char scsi_command_size[8] = { 6, 10, 10, 12, 12, 12, 10, 10 };
+
+#define INTERNAL_ERROR (panic ("Internal error in file %s, line %d.\n", __FILE__, __LINE__))
+
+/*
+ * PAGE_SIZE must be a multiple of the sector size (512). True
+ * for all reasonably recent architectures (even the VAX...).
+ */
+#define SECTOR_SIZE 512
+#define SECTORS_PER_PAGE (PAGE_SIZE/SECTOR_SIZE)
+
+#if SECTORS_PER_PAGE <= 8
+ typedef unsigned char FreeSectorBitmap;
+#elif SECTORS_PER_PAGE <= 32
+ typedef unsigned int FreeSectorBitmap;
+#else
+# error You lose.
+#endif
+
+static void scsi_done (Scsi_Cmnd *SCpnt);
+static int update_timeout (Scsi_Cmnd *, int);
+static void print_inquiry(unsigned char *data);
+static void scsi_times_out (Scsi_Cmnd * SCpnt);
+static int scan_scsis_single (int channel,int dev,int lun,int * max_scsi_dev ,
+ int * sparse_lun, Scsi_Device ** SDpnt, Scsi_Cmnd * SCpnt,
+ struct Scsi_Host *shpnt, char * scsi_result);
+void scsi_build_commandblocks(Scsi_Device * SDpnt);
+
+#ifdef CONFIG_MODULES
+extern struct symbol_table scsi_symbol_table;
+#endif
+
+static FreeSectorBitmap * dma_malloc_freelist = NULL;
+static int scsi_need_isa_bounce_buffers;
+static unsigned int dma_sectors = 0;
+unsigned int dma_free_sectors = 0;
+unsigned int need_isa_buffer = 0;
+static unsigned char ** dma_malloc_pages = NULL;
+
+static int time_start;
+static int time_elapsed;
+static volatile struct Scsi_Host * host_active = NULL;
+#define SCSI_BLOCK(HOST) ((HOST->block && host_active && HOST != host_active) \
+ || (HOST->can_queue && HOST->host_busy >= HOST->can_queue))
+
+const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
+{
+ "Direct-Access ",
+ "Sequential-Access",
+ "Printer ",
+ "Processor ",
+ "WORM ",
+ "CD-ROM ",
+ "Scanner ",
+ "Optical Device ",
+ "Medium Changer ",
+ "Communications "
+};
+
+
+/*
+ * global variables :
+ * scsi_devices an array of these specifying the address for each
+ * (host, id, LUN)
+ */
+
+Scsi_Device * scsi_devices = NULL;
+
+/* Process ID of SCSI commands */
+unsigned long scsi_pid = 0;
+
+static unsigned long serial_number = 0;
+
+static unsigned char generic_sense[6] = {REQUEST_SENSE, 0,0,0, 255, 0};
+static void resize_dma_pool(void);
+
+/* This variable is merely a hook so that we can debug the kernel with gdb. */
+Scsi_Cmnd * last_cmnd = NULL;
+
+/* This is the pointer to the /proc/scsi code.
+ * It is only initialized to !=0 if the scsi code is present
+ */
+#if CONFIG_PROC_FS
+extern int (* dispatch_scsi_info_ptr)(int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+extern int dispatch_scsi_info(int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+
+struct proc_dir_entry proc_scsi_scsi = {
+ PROC_SCSI_SCSI, 4, "scsi",
+ S_IFREG | S_IRUGO | S_IWUSR, 1, 0, 0, 0,
+ NULL,
+ NULL, NULL,
+ NULL, NULL, NULL
+};
+#endif
+
+/*
+ * This is the number of clock ticks we should wait before we time out
+ * and abort the command. This is for where the scsi.c module generates
+ * the command, not where it originates from a higher level, in which
+ * case the timeout is specified there.
+ *
+ * ABORT_TIMEOUT and RESET_TIMEOUT are the timeouts for RESET and ABORT
+ * respectively.
+ */
+
+#ifdef DEBUG_TIMEOUT
+static void scsi_dump_status(void);
+#endif
+
+
+#ifdef DEBUG
+ #define SCSI_TIMEOUT (5*HZ)
+#else
+ #define SCSI_TIMEOUT (2*HZ)
+#endif
+
+#ifdef DEBUG
+ #define SENSE_TIMEOUT SCSI_TIMEOUT
+ #define ABORT_TIMEOUT SCSI_TIMEOUT
+ #define RESET_TIMEOUT SCSI_TIMEOUT
+#else
+ #define SENSE_TIMEOUT (5*HZ/10)
+ #define RESET_TIMEOUT (5*HZ/10)
+ #define ABORT_TIMEOUT (5*HZ/10)
+#endif
+
+#define MIN_RESET_DELAY (2*HZ)
+
+/* Do not call reset on error if we just did a reset within 15 sec. */
+#define MIN_RESET_PERIOD (15*HZ)
+
+/* The following devices are known not to tolerate a lun != 0 scan for
+ * one reason or another. Some will respond to all luns, others will
+ * lock up.
+ */
+
+#define BLIST_NOLUN 0x01
+#define BLIST_FORCELUN 0x02
+#define BLIST_BORKEN 0x04
+#define BLIST_KEY 0x08
+#define BLIST_SINGLELUN 0x10
+#define BLIST_NOTQ 0x20
+#define BLIST_SPARSELUN 0x40
+#define BLIST_MAX5LUN 0x80
+
+struct dev_info{
+ const char * vendor;
+ const char * model;
+ const char * revision; /* Latest revision known to be bad. Not used yet */
+ unsigned flags;
+};
+
+/*
+ * This is what was previously known as the blacklist. The concept
+ * has been expanded so that we can specify other types of things we
+ * need to be aware of.
+ */
+static struct dev_info device_list[] =
+{
+{"TEAC","CD-R55S","1.0H", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"CHINON","CD-ROM CDS-431","H42", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"CHINON","CD-ROM CDS-535","Q14", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"DENON","DRD-25X","V", BLIST_NOLUN}, /* Locks up if probed for lun != 0 */
+{"HITACHI","DK312C","CM81", BLIST_NOLUN}, /* Responds to all lun - dtg */
+{"HITACHI","DK314C","CR21" , BLIST_NOLUN}, /* responds to all lun */
+{"IMS", "CDD521/10","2.06", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"MAXTOR","XT-3280","PR02", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"MAXTOR","XT-4380S","B3C", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"MAXTOR","MXT-1240S","I1.2", BLIST_NOLUN}, /* Locks up when LUN>0 polled */
+{"MAXTOR","XT-4170S","B5A", BLIST_NOLUN}, /* Locks-up sometimes when LUN>0 polled. */
+{"MAXTOR","XT-8760S","B7B", BLIST_NOLUN}, /* guess what? */
+{"MEDIAVIS","RENO CD-ROMX2A","2.03",BLIST_NOLUN},/*Responds to all lun */
+{"MICROP", "4110", "*", BLIST_NOTQ}, /* Buggy Tagged Queuing */
+{"NEC","CD-ROM DRIVE:841","1.0", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"RODIME","RO3000S","2.33", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"SANYO", "CRD-250S", "1.20", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for aha152x controller, which causes
+ * SCSI code to reset bus.*/
+{"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for aha152x controller, which causes
+ * SCSI code to reset bus.*/
+{"SEAGATE", "ST296","921", BLIST_NOLUN}, /* Responds to all lun */
+{"SEAGATE","ST1581","6538",BLIST_NOLUN}, /* Responds to all lun */
+{"SONY","CD-ROM CDU-541","4.3d", BLIST_NOLUN},
+{"SONY","CD-ROM CDU-55S","1.0i", BLIST_NOLUN},
+{"SONY","CD-ROM CDU-561","1.7x", BLIST_NOLUN},
+{"TANDBERG","TDC 3600","U07", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"TEAC","CD-ROM","1.06", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for seagate controller, which causes
+ * SCSI code to reset bus.*/
+{"TEXEL","CD-ROM","1.06", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for seagate controller, which causes
+ * SCSI code to reset bus.*/
+{"QUANTUM","LPS525S","3110", BLIST_NOLUN}, /* Locks sometimes if polled for lun != 0 */
+{"QUANTUM","PD1225S","3110", BLIST_NOLUN}, /* Locks sometimes if polled for lun != 0 */
+{"MEDIAVIS","CDR-H93MV","1.31", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"SANKYO", "CP525","6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
+{"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
+{"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
+{"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
+
+/*
+ * Other types of devices that have special flags.
+ */
+{"SONY","CD-ROM CDU-8001","*", BLIST_BORKEN},
+{"TEXEL","CD-ROM","1.06", BLIST_BORKEN},
+{"IOMEGA","Io20S *F","*", BLIST_KEY},
+{"INSITE","Floptical F*8I","*", BLIST_KEY},
+{"INSITE","I325VM","*", BLIST_KEY},
+{"NRC","MBR-7","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"NRC","MBR-7.4","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"REGAL","CDC-4X","*", BLIST_MAX5LUN | BLIST_SINGLELUN},
+{"NAKAMICH","MJ-4.8S","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"NAKAMICH","MJ-5.16S","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"PIONEER","CD-ROM DRM-600","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"PIONEER","CD-ROM DRM-602X","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"PIONEER","CD-ROM DRM-604X","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"EMULEX","MD21/S2 ESDI","*", BLIST_SINGLELUN},
+{"CANON","IPUBJD","*", BLIST_SPARSELUN},
+{"MATSHITA","PD","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"YAMAHA","CDR100","1.00", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"YAMAHA","CDR102","1.00", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"nCipher","Fastness Crypto","*", BLIST_FORCELUN},
+/*
+ * Must be at end of list...
+ */
+{NULL, NULL, NULL}
+};
+
+static int get_device_flags(unsigned char * response_data){
+ int i = 0;
+ unsigned char * pnt;
+ for(i=0; 1; i++){
+ if(device_list[i].vendor == NULL) return 0;
+ pnt = &response_data[8];
+ while(*pnt && *pnt == ' ') pnt++;
+ if(memcmp(device_list[i].vendor, pnt,
+ strlen(device_list[i].vendor))) continue;
+ pnt = &response_data[16];
+ while(*pnt && *pnt == ' ') pnt++;
+ if(memcmp(device_list[i].model, pnt,
+ strlen(device_list[i].model))) continue;
+ return device_list[i].flags;
+ }
+ return 0;
+}
+
+void scsi_make_blocked_list(void) {
+ int block_count = 0, index;
+ unsigned long flags;
+ struct Scsi_Host * sh[128], * shpnt;
+
+ /*
+ * Create a circular linked list from the scsi hosts which have
+ * the "wish_block" field in the Scsi_Host structure set.
+ * The blocked list should include all the scsi hosts using ISA DMA.
+ * In some systems, using two dma channels simultaneously causes
+ * unpredictable results.
+ * Among the scsi hosts in the blocked list, only one host at a time
+ * is allowed to have active commands queued. The transition from
+ * one active host to the next one is allowed only when host_busy == 0
+ * for the active host (which implies host_busy == 0 for all the hosts
+ * in the list). Moreover for block devices the transition to a new
+ * active host is allowed only when a request is completed, since a
+ * block device request can be divided into multiple scsi commands
+ * (when there are few sg lists or clustering is disabled).
+ *
+ * (DB, 4 Feb 1995)
+ */
+
+ save_flags(flags);
+ cli();
+ host_active = NULL;
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next) {
+
+#if 0
+ /*
+ * Is this is a candidate for the blocked list?
+ * Useful to put into the blocked list all the hosts whose driver
+ * does not know about the host->block feature.
+ */
+ if (shpnt->unchecked_isa_dma) shpnt->wish_block = 1;
+#endif
+
+ if (shpnt->wish_block) sh[block_count++] = shpnt;
+ }
+
+ if (block_count == 1) sh[0]->block = NULL;
+
+ else if (block_count > 1) {
+
+ for(index = 0; index < block_count - 1; index++) {
+ sh[index]->block = sh[index + 1];
+ printk("scsi%d : added to blocked host list.\n",
+ sh[index]->host_no);
+ }
+
+ sh[block_count - 1]->block = sh[0];
+ printk("scsi%d : added to blocked host list.\n",
+ sh[index]->host_no);
+ }
+
+ restore_flags(flags);
+}
+
+static void scan_scsis_done (Scsi_Cmnd * SCpnt)
+{
+
+#ifdef DEBUG
+ printk ("scan_scsis_done(%p, %06x)\n", SCpnt->host, SCpnt->result);
+#endif
+ SCpnt->request.rq_status = RQ_SCSI_DONE;
+
+ if (SCpnt->request.sem != NULL)
+ up(SCpnt->request.sem);
+}
+
+#ifdef CONFIG_SCSI_MULTI_LUN
+static int max_scsi_luns = 8;
+#else
+static int max_scsi_luns = 1;
+#endif
+
+void scsi_luns_setup(char *str, int *ints) {
+ if (ints[0] != 1)
+ printk("scsi_luns_setup : usage max_scsi_luns=n (n should be between 1 and 8)\n");
+ else
+ max_scsi_luns = ints[1];
+}
+
+/*
+ * Detecting SCSI devices :
+ * We scan all present host adapter's busses, from ID 0 to ID (max_id).
+ * We use the INQUIRY command, determine device type, and pass the ID /
+ * lun address of all sequential devices to the tape driver, all random
+ * devices to the disk driver.
+ */
+static void scan_scsis (struct Scsi_Host *shpnt, unchar hardcoded,
+ unchar hchannel, unchar hid, unchar hlun)
+{
+ int dev, lun, channel;
+ unsigned char scsi_result0[256];
+ unsigned char *scsi_result;
+ Scsi_Device *SDpnt;
+ int max_dev_lun, sparse_lun;
+ Scsi_Cmnd *SCpnt;
+
+ SCpnt = (Scsi_Cmnd *) scsi_init_malloc (sizeof (Scsi_Cmnd), GFP_ATOMIC | GFP_DMA);
+ SDpnt = (Scsi_Device *) scsi_init_malloc (sizeof (Scsi_Device), GFP_ATOMIC);
+ memset (SCpnt, 0, sizeof (Scsi_Cmnd));
+
+
+ /* Make sure we have something that is valid for DMA purposes */
+ scsi_result = ( ( !shpnt->unchecked_isa_dma )
+ ? &scsi_result0[0] : scsi_init_malloc (512, GFP_DMA));
+
+ if (scsi_result == NULL) {
+ printk ("Unable to obtain scsi_result buffer\n");
+ goto leave;
+ }
+
+ /* We must chain ourself in the host_queue, so commands can time out */
+ if(shpnt->host_queue)
+ shpnt->host_queue->prev = SCpnt;
+ SCpnt->next = shpnt->host_queue;
+ SCpnt->prev = NULL;
+ shpnt->host_queue = SCpnt;
+
+
+ if (hardcoded == 1) {
+ Scsi_Device *oldSDpnt=SDpnt;
+ struct Scsi_Device_Template * sdtpnt;
+ channel = hchannel;
+ if(channel > shpnt->max_channel) goto leave;
+ dev = hid;
+ if(dev >= shpnt->max_id) goto leave;
+ lun = hlun;
+ if(lun >= shpnt->max_lun) goto leave;
+ scan_scsis_single (channel, dev, lun, &max_dev_lun, &sparse_lun,
+ &SDpnt, SCpnt, shpnt, scsi_result);
+ if(SDpnt!=oldSDpnt) {
+
+ /* it could happen the blockdevice hasn't yet been inited */
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
+
+ oldSDpnt->scsi_request_fn = NULL;
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->attach) {
+ (*sdtpnt->attach)(oldSDpnt);
+ if(oldSDpnt->attached) scsi_build_commandblocks(oldSDpnt);}
+ resize_dma_pool();
+
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
+ if(sdtpnt->finish && sdtpnt->nr_dev)
+ {(*sdtpnt->finish)();}
+ }
+ }
+
+ }
+ else {
+ for (channel = 0; channel <= shpnt->max_channel; channel++) {
+ for (dev = 0; dev < shpnt->max_id; ++dev) {
+ if (shpnt->this_id != dev) {
+
+ /*
+ * We need the for so our continue, etc. work fine. We put this in
+ * a variable so that we can override it during the scan if we
+ * detect a device *KNOWN* to have multiple logical units.
+ */
+ max_dev_lun = (max_scsi_luns < shpnt->max_lun ?
+ max_scsi_luns : shpnt->max_lun);
+ sparse_lun = 0;
+ for (lun = 0; lun < max_dev_lun; ++lun) {
+ if (!scan_scsis_single (channel, dev, lun, &max_dev_lun,
+ &sparse_lun, &SDpnt, SCpnt, shpnt,
+ scsi_result)
+ && !sparse_lun)
+ break; /* break means don't probe further for luns!=0 */
+ } /* for lun ends */
+ } /* if this_id != id ends */
+ } /* for dev ends */
+ } /* for channel ends */
+ } /* if/else hardcoded */
+
+ leave:
+
+ {/* Unchain SCpnt from host_queue */
+ Scsi_Cmnd *prev, *next, *hqptr;
+ for(hqptr = shpnt->host_queue; hqptr != SCpnt; hqptr = hqptr->next) ;
+ if(hqptr) {
+ prev = hqptr->prev;
+ next = hqptr->next;
+ if(prev)
+ prev->next = next;
+ else
+ shpnt->host_queue = next;
+ if(next) next->prev = prev;
+ }
+ }
+
+ /* Last device block does not exist. Free memory. */
+ if (SDpnt != NULL)
+ scsi_init_free ((char *) SDpnt, sizeof (Scsi_Device));
+
+ if (SCpnt != NULL)
+ scsi_init_free ((char *) SCpnt, sizeof (Scsi_Cmnd));
+
+ /* If we allocated a buffer so we could do DMA, free it now */
+ if (scsi_result != &scsi_result0[0] && scsi_result != NULL)
+ scsi_init_free (scsi_result, 512);
+
+}
+
+/*
+ * The worker for scan_scsis.
+ * Returning 0 means Please don't ask further for lun!=0, 1 means OK go on.
+ * Global variables used : scsi_devices(linked list)
+ */
+int scan_scsis_single (int channel, int dev, int lun, int *max_dev_lun,
+ int *sparse_lun, Scsi_Device **SDpnt2, Scsi_Cmnd * SCpnt,
+ struct Scsi_Host * shpnt, char *scsi_result)
+{
+ unsigned char scsi_cmd[12];
+ struct Scsi_Device_Template *sdtpnt;
+ Scsi_Device * SDtail, *SDpnt=*SDpnt2;
+ int bflags, type=-1;
+
+ SDtail = scsi_devices;
+ if (scsi_devices)
+ while (SDtail->next)
+ SDtail = SDtail->next;
+
+ memset (SDpnt, 0, sizeof (Scsi_Device));
+ SDpnt->host = shpnt;
+ SDpnt->id = dev;
+ SDpnt->lun = lun;
+ SDpnt->channel = channel;
+
+ /* Some low level driver could use device->type (DB) */
+ SDpnt->type = -1;
+
+ /*
+ * Assume that the device will have handshaking problems, and then fix this
+ * field later if it turns out it doesn't
+ */
+ SDpnt->borken = 1;
+ SDpnt->was_reset = 0;
+ SDpnt->expecting_cc_ua = 0;
+
+ scsi_cmd[0] = TEST_UNIT_READY;
+ scsi_cmd[1] = lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[4] = scsi_cmd[5] = 0;
+
+ SCpnt->host = SDpnt->host;
+ SCpnt->device = SDpnt;
+ SCpnt->target = SDpnt->id;
+ SCpnt->lun = SDpnt->lun;
+ SCpnt->channel = SDpnt->channel;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ scsi_do_cmd (SCpnt, (void *) scsi_cmd,
+ (void *) scsi_result,
+ 256, scan_scsis_done, SCSI_TIMEOUT + 4 * HZ, 5);
+ down (&sem);
+ }
+
+#if defined(DEBUG) || defined(DEBUG_INIT)
+ printk ("scsi: scan_scsis_single id %d lun %d. Return code 0x%08x\n",
+ dev, lun, SCpnt->result);
+ print_driverbyte(SCpnt->result); print_hostbyte(SCpnt->result);
+ printk("\n");
+#endif
+
+ if (SCpnt->result) {
+ if (((driver_byte (SCpnt->result) & DRIVER_SENSE) ||
+ (status_byte (SCpnt->result) & CHECK_CONDITION)) &&
+ ((SCpnt->sense_buffer[0] & 0x70) >> 4) == 7) {
+ if (((SCpnt->sense_buffer[2] & 0xf) != NOT_READY) &&
+ ((SCpnt->sense_buffer[2] & 0xf) != UNIT_ATTENTION) &&
+ ((SCpnt->sense_buffer[2] & 0xf) != ILLEGAL_REQUEST || lun > 0))
+ return 1;
+ }
+ else
+ return 0;
+ }
+
+#if defined (DEBUG) || defined(DEBUG_INIT)
+ printk ("scsi: performing INQUIRY\n");
+#endif
+ /*
+ * Build an INQUIRY command block.
+ */
+ scsi_cmd[0] = INQUIRY;
+ scsi_cmd[1] = (lun << 5) & 0xe0;
+ scsi_cmd[2] = 0;
+ scsi_cmd[3] = 0;
+ scsi_cmd[4] = 255;
+ scsi_cmd[5] = 0;
+ SCpnt->cmd_len = 0;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ scsi_do_cmd (SCpnt, (void *) scsi_cmd,
+ (void *) scsi_result,
+ 256, scan_scsis_done, SCSI_TIMEOUT, 3);
+ down (&sem);
+ }
+
+#if defined(DEBUG) || defined(DEBUG_INIT)
+ printk ("scsi: INQUIRY %s with code 0x%x\n",
+ SCpnt->result ? "failed" : "successful", SCpnt->result);
+#endif
+
+ if (SCpnt->result)
+ return 0; /* assume no peripheral if any sort of error */
+
+ /*
+ * Check the peripheral qualifier field - this tells us whether LUNS
+ * are supported here or not.
+ */
+ if( (scsi_result[0] >> 5) == 3 )
+ {
+ return 0; /* assume no peripheral if any sort of error */
+ }
+
+ /*
+ * It would seem some TOSHIBA CDROM gets things wrong
+ */
+ if (!strncmp (scsi_result + 8, "TOSHIBA", 7) &&
+ !strncmp (scsi_result + 16, "CD-ROM", 6) &&
+ scsi_result[0] == TYPE_DISK) {
+ scsi_result[0] = TYPE_ROM;
+ scsi_result[1] |= 0x80; /* removable */
+ }
+
+ if (!strncmp (scsi_result + 8, "NEC", 3)) {
+ if (!strncmp (scsi_result + 16, "CD-ROM DRIVE:84 ", 16) ||
+ !strncmp (scsi_result + 16, "CD-ROM DRIVE:25", 15))
+ SDpnt->manufacturer = SCSI_MAN_NEC_OLDCDR;
+ else
+ SDpnt->manufacturer = SCSI_MAN_NEC;
+ }
+ else if (!strncmp (scsi_result + 8, "TOSHIBA", 7))
+ SDpnt->manufacturer = SCSI_MAN_TOSHIBA;
+ else if (!strncmp (scsi_result + 8, "SONY", 4))
+ SDpnt->manufacturer = SCSI_MAN_SONY;
+ else if (!strncmp (scsi_result + 8, "PIONEER", 7))
+ SDpnt->manufacturer = SCSI_MAN_PIONEER;
+ else
+ SDpnt->manufacturer = SCSI_MAN_UNKNOWN;
+
+ memcpy (SDpnt->vendor, scsi_result + 8, 8);
+ memcpy (SDpnt->model, scsi_result + 16, 16);
+ memcpy (SDpnt->rev, scsi_result + 32, 4);
+
+ SDpnt->removable = (0x80 & scsi_result[1]) >> 7;
+ SDpnt->lockable = SDpnt->removable;
+ SDpnt->changed = 0;
+ SDpnt->access_count = 0;
+ SDpnt->busy = 0;
+ SDpnt->has_cmdblocks = 0;
+ /*
+ * Currently, all sequential devices are assumed to be tapes, all random
+ * devices disk, with the appropriate read only flags set for ROM / WORM
+ * treated as RO.
+ */
+ switch (type = (scsi_result[0] & 0x1f)) {
+ case TYPE_TAPE:
+ case TYPE_DISK:
+ case TYPE_MOD:
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ case TYPE_MEDIUM_CHANGER:
+ SDpnt->writeable = 1;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ SDpnt->writeable = 0;
+ break;
+ default:
+ printk ("scsi: unknown type %d\n", type);
+ }
+
+ SDpnt->single_lun = 0;
+ SDpnt->soft_reset =
+ (scsi_result[7] & 1) && ((scsi_result[3] & 7) == 2);
+ SDpnt->random = (type == TYPE_TAPE) ? 0 : 1;
+ SDpnt->type = (type & 0x1f);
+
+ print_inquiry (scsi_result);
+
+ for (sdtpnt = scsi_devicelist; sdtpnt;
+ sdtpnt = sdtpnt->next)
+ if (sdtpnt->detect)
+ SDpnt->attached +=
+ (*sdtpnt->detect) (SDpnt);
+
+ SDpnt->scsi_level = scsi_result[2] & 0x07;
+ if (SDpnt->scsi_level >= 2 ||
+ (SDpnt->scsi_level == 1 &&
+ (scsi_result[3] & 0x0f) == 1))
+ SDpnt->scsi_level++;
+
+ /*
+ * Accommodate drivers that want to sleep when they should be in a polling
+ * loop.
+ */
+ SDpnt->disconnect = 0;
+
+ /*
+ * Get any flags for this device.
+ */
+ bflags = get_device_flags (scsi_result);
+
+ /*
+ * Set the tagged_queue flag for SCSI-II devices that purport to support
+ * tagged queuing in the INQUIRY data.
+ */
+ SDpnt->tagged_queue = 0;
+ if ((SDpnt->scsi_level >= SCSI_2) &&
+ (scsi_result[7] & 2) &&
+ !(bflags & BLIST_NOTQ)) {
+ SDpnt->tagged_supported = 1;
+ SDpnt->current_tag = 0;
+ }
+
+ /*
+ * Some revisions of the Texel CD ROM drives have handshaking problems when
+ * used with the Seagate controllers. Before we know what type of device
+ * we're talking to, we assume it's borken and then change it here if it
+ * turns out that it isn't a TEXEL drive.
+ */
+ if ((bflags & BLIST_BORKEN) == 0)
+ SDpnt->borken = 0;
+
+ /*
+ * If we want to only allow I/O to one of the luns attached to this device
+ * at a time, then we set this flag.
+ */
+ if (bflags & BLIST_SINGLELUN)
+ SDpnt->single_lun = 1;
+
+ /*
+ * These devices need this "key" to unlock the devices so we can use it
+ */
+ if ((bflags & BLIST_KEY) != 0) {
+ printk ("Unlocked floptical drive.\n");
+ SDpnt->lockable = 0;
+ scsi_cmd[0] = MODE_SENSE;
+ scsi_cmd[1] = (lun << 5) & 0xe0;
+ scsi_cmd[2] = 0x2e;
+ scsi_cmd[3] = 0;
+ scsi_cmd[4] = 0x2a;
+ scsi_cmd[5] = 0;
+ SCpnt->cmd_len = 0;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt, (void *) scsi_cmd,
+ (void *) scsi_result, 0x2a,
+ scan_scsis_done, SCSI_TIMEOUT, 3);
+ down (&sem);
+ }
+ }
+ /* Add this device to the linked list at the end */
+ if (SDtail)
+ SDtail->next = SDpnt;
+ else
+ scsi_devices = SDpnt;
+ SDtail = SDpnt;
+
+ SDpnt = (Scsi_Device *) scsi_init_malloc (sizeof (Scsi_Device), GFP_ATOMIC);
+ *SDpnt2=SDpnt;
+ if (!SDpnt)
+ printk ("scsi: scan_scsis_single: Cannot malloc\n");
+
+
+ /*
+ * Some scsi devices cannot be polled for lun != 0 due to firmware bugs
+ */
+ if (bflags & BLIST_NOLUN)
+ return 0; /* break; */
+
+ /*
+ * If this device is known to support sparse multiple units, override the
+ * other settings, and scan all of them.
+ */
+ if (bflags & BLIST_SPARSELUN) {
+ *max_dev_lun = 8;
+ *sparse_lun = 1;
+ return 1;
+ }
+
+ /*
+ * If this device is known to support multiple units, override the other
+ * settings, and scan all of them.
+ */
+ if (bflags & BLIST_FORCELUN) {
+ *max_dev_lun = 8;
+ return 1;
+ }
+
+ /*
+ * REGAL CDC-4X: avoid hang after LUN 4
+ */
+ if (bflags & BLIST_MAX5LUN) {
+ *max_dev_lun = 5;
+ return 1;
+ }
+
+ /*
+ * We assume the device can't handle lun!=0 if: - it reports scsi-0 (ANSI
+ * SCSI Revision 0) (old drives like MAXTOR XT-3280) or - it reports scsi-1
+ * (ANSI SCSI Revision 1) and Response Data Format 0
+ */
+ if (((scsi_result[2] & 0x07) == 0)
+ ||
+ ((scsi_result[2] & 0x07) == 1 &&
+ (scsi_result[3] & 0x0f) == 0))
+ return 0;
+ return 1;
+}
+
+/*
+ * Flag bits for the internal_timeout array
+ */
+#define NORMAL_TIMEOUT 0
+#define IN_ABORT 1
+#define IN_RESET 2
+#define IN_RESET2 4
+#define IN_RESET3 8
+
+/*
+ * This is our time out function, called when the timer expires for a
+ * given host adapter. It will attempt to abort the currently executing
+ * command, that failing perform a kernel panic.
+ */
+
+static void scsi_times_out (Scsi_Cmnd * SCpnt)
+{
+
+ switch (SCpnt->internal_timeout & (IN_ABORT | IN_RESET | IN_RESET2 | IN_RESET3))
+ {
+ case NORMAL_TIMEOUT:
+ {
+#ifdef DEBUG_TIMEOUT
+ scsi_dump_status();
+#endif
+ }
+
+ if (!scsi_abort (SCpnt, DID_TIME_OUT))
+ return;
+ case IN_ABORT:
+ printk("SCSI host %d abort (pid %ld) timed out - resetting\n",
+ SCpnt->host->host_no, SCpnt->pid);
+ if (!scsi_reset (SCpnt, SCSI_RESET_ASYNCHRONOUS))
+ return;
+ case IN_RESET:
+ case (IN_ABORT | IN_RESET):
+ /* This might be controversial, but if there is a bus hang,
+ * you might conceivably want the machine up and running
+ * esp if you have an ide disk.
+ */
+ printk("SCSI host %d channel %d reset (pid %ld) timed out - "
+ "trying harder\n",
+ SCpnt->host->host_no, SCpnt->channel, SCpnt->pid);
+ SCpnt->internal_timeout &= ~IN_RESET;
+ SCpnt->internal_timeout |= IN_RESET2;
+ scsi_reset (SCpnt,
+ SCSI_RESET_ASYNCHRONOUS | SCSI_RESET_SUGGEST_BUS_RESET);
+ return;
+ case IN_RESET2:
+ case (IN_ABORT | IN_RESET2):
+ /* Obviously the bus reset didn't work.
+ * Let's try even harder and call for an HBA reset.
+ * Maybe the HBA itself crashed and this will shake it loose.
+ */
+ printk("SCSI host %d reset (pid %ld) timed out - trying to shake it loose\n",
+ SCpnt->host->host_no, SCpnt->pid);
+ SCpnt->internal_timeout &= ~(IN_RESET | IN_RESET2);
+ SCpnt->internal_timeout |= IN_RESET3;
+ scsi_reset (SCpnt,
+ SCSI_RESET_ASYNCHRONOUS | SCSI_RESET_SUGGEST_HOST_RESET);
+ return;
+
+ default:
+ printk("SCSI host %d reset (pid %ld) timed out again -\n",
+ SCpnt->host->host_no, SCpnt->pid);
+ printk("probably an unrecoverable SCSI bus or device hang.\n");
+ return;
+
+ }
+
+}
+
+
+/* This function takes a quick look at a request, and decides if it
+ * can be queued now, or if there would be a stall while waiting for
+ * something else to finish. This routine assumes that interrupts are
+ * turned off when entering the routine. It is the responsibility
+ * of the calling code to ensure that this is the case.
+ */
+
+Scsi_Cmnd * request_queueable (struct request * req, Scsi_Device * device)
+{
+ Scsi_Cmnd * SCpnt = NULL;
+ int tablesize;
+ Scsi_Cmnd * found = NULL;
+ struct buffer_head * bh, *bhp;
+
+ if (!device)
+ panic ("No device passed to request_queueable().\n");
+
+ if (req && req->rq_status == RQ_INACTIVE)
+ panic("Inactive in request_queueable");
+
+ /*
+ * Look for a free command block. If we have been instructed not to queue
+ * multiple commands to multi-lun devices, then check to see what else is
+ * going for this device first.
+ */
+
+ if (!device->single_lun) {
+ SCpnt = device->device_queue;
+ while(SCpnt){
+ if(SCpnt->request.rq_status == RQ_INACTIVE) break;
+ SCpnt = SCpnt->device_next;
+ }
+ } else {
+ SCpnt = device->host->host_queue;
+ while(SCpnt){
+ if(SCpnt->channel == device->channel
+ && SCpnt->target == device->id) {
+ if (SCpnt->lun == device->lun) {
+ if(found == NULL
+ && SCpnt->request.rq_status == RQ_INACTIVE)
+ {
+ found=SCpnt;
+ }
+ }
+ if(SCpnt->request.rq_status != RQ_INACTIVE) {
+ /*
+ * I think that we should really limit things to one
+ * outstanding command per device - this is what tends
+ * to trip up buggy firmware.
+ */
+ return NULL;
+ }
+ }
+ SCpnt = SCpnt->next;
+ }
+ SCpnt = found;
+ }
+
+ if (!SCpnt) return NULL;
+
+ if (SCSI_BLOCK(device->host)) return NULL;
+
+ if (req) {
+ memcpy(&SCpnt->request, req, sizeof(struct request));
+ tablesize = device->host->sg_tablesize;
+ bhp = bh = req->bh;
+ if(!tablesize) bh = NULL;
+ /* Take a quick look through the table to see how big it is.
+ * We already have our copy of req, so we can mess with that
+ * if we want to.
+ */
+ while(req->nr_sectors && bh){
+ bhp = bhp->b_reqnext;
+ if(!bhp || !CONTIGUOUS_BUFFERS(bh,bhp)) tablesize--;
+ req->nr_sectors -= bh->b_size >> 9;
+ req->sector += bh->b_size >> 9;
+ if(!tablesize) break;
+ bh = bhp;
+ }
+ if(req->nr_sectors && bh && bh->b_reqnext){ /* Any leftovers? */
+ SCpnt->request.bhtail = bh;
+ req->bh = bh->b_reqnext; /* Divide request */
+ bh->b_reqnext = NULL;
+ bh = req->bh;
+
+ /* Now reset things so that req looks OK */
+ SCpnt->request.nr_sectors -= req->nr_sectors;
+ req->current_nr_sectors = bh->b_size >> 9;
+ req->buffer = bh->b_data;
+ SCpnt->request.sem = NULL; /* Wait until whole thing done */
+ } else {
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+ }
+ } else {
+ SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Busy, but no request */
+ SCpnt->request.sem = NULL; /* And no one is waiting for the device
+ * either */
+ }
+
+ SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
+ SCpnt->old_use_sg = 0;
+ SCpnt->transfersize = 0;
+ SCpnt->underflow = 0;
+ SCpnt->cmd_len = 0;
+
+/* Since not everyone seems to set the device info correctly
+ * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
+ */
+ SCpnt->channel = device->channel;
+ SCpnt->lun = device->lun;
+ SCpnt->target = device->id;
+
+ return SCpnt;
+}
+
+/* This function returns a structure pointer that will be valid for
+ * the device. The wait parameter tells us whether we should wait for
+ * the unit to become free or not. We are also able to tell this routine
+ * not to return a descriptor if the host is unable to accept any more
+ * commands for the time being. We need to keep in mind that there is no
+ * guarantee that the host remain not busy. Keep in mind the
+ * request_queueable function also knows the internal allocation scheme
+ * of the packets for each device
+ */
+
+Scsi_Cmnd * allocate_device (struct request ** reqp, Scsi_Device * device,
+ int wait)
+{
+ kdev_t dev;
+ struct request * req = NULL;
+ int tablesize;
+ unsigned long flags;
+ struct buffer_head * bh, *bhp;
+ struct Scsi_Host * host;
+ Scsi_Cmnd * SCpnt = NULL;
+ Scsi_Cmnd * SCwait = NULL;
+ Scsi_Cmnd * found = NULL;
+
+ if (!device)
+ panic ("No device passed to allocate_device().\n");
+
+ if (reqp) req = *reqp;
+
+ /* See if this request has already been queued by an interrupt routine */
+ if (req) {
+ if(req->rq_status == RQ_INACTIVE) return NULL;
+ dev = req->rq_dev;
+ } else
+ dev = 0; /* unused */
+
+ host = device->host;
+
+ if (intr_count && SCSI_BLOCK(host)) return NULL;
+
+ while (1==1){
+ if (!device->single_lun) {
+ SCpnt = device->device_queue;
+ while(SCpnt){
+ SCwait = SCpnt;
+ if(SCpnt->request.rq_status == RQ_INACTIVE) break;
+ SCpnt = SCpnt->device_next;
+ }
+ } else {
+ SCpnt = device->host->host_queue;
+ while(SCpnt){
+ if(SCpnt->channel == device->channel
+ && SCpnt->target == device->id) {
+ if (SCpnt->lun == device->lun) {
+ SCwait = SCpnt;
+ if(found == NULL
+ && SCpnt->request.rq_status == RQ_INACTIVE)
+ {
+ found=SCpnt;
+ }
+ }
+ if(SCpnt->request.rq_status != RQ_INACTIVE) {
+ /*
+ * I think that we should really limit things to one
+ * outstanding command per device - this is what tends
+ * to trip up buggy firmware.
+ */
+ found = NULL;
+ break;
+ }
+ }
+ SCpnt = SCpnt->next;
+ }
+ SCpnt = found;
+ }
+
+ save_flags(flags);
+ cli();
+ /* See if this request has already been queued by an interrupt routine
+ */
+ if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) {
+ restore_flags(flags);
+ return NULL;
+ }
+ if (!SCpnt || SCpnt->request.rq_status != RQ_INACTIVE) /* Might have changed */
+ {
+#if 1 /* NEW CODE */
+ if (wait && SCwait && SCwait->request.rq_status != RQ_INACTIVE){
+ sleep_on(&device->device_wait);
+ restore_flags(flags);
+ } else {
+ restore_flags(flags);
+ if (!wait) return NULL;
+ if (!SCwait) {
+ printk("Attempt to allocate device channel %d,"
+ " target %d, lun %d\n", device->channel,
+ device->id, device->lun);
+ panic("No device found in allocate_device\n");
+ }
+ }
+#else /* ORIGINAL CODE */
+ restore_flags(flags);
+ if(!wait) return NULL;
+ if (!SCwait) {
+ printk("Attempt to allocate device channel %d, target"
+ " %d, lun %d\n", device->channel, device->id,
+ device->lun);
+ panic("No device found in allocate_device\n");
+ }
+ SCSI_SLEEP(&device->device_wait,
+ (SCwait->request.rq_status != RQ_INACTIVE));
+#endif
+ } else {
+ if (req) {
+ memcpy(&SCpnt->request, req, sizeof(struct request));
+ tablesize = device->host->sg_tablesize;
+ bhp = bh = req->bh;
+ if(!tablesize) bh = NULL;
+ /* Take a quick look through the table to see how big it is.
+ * We already have our copy of req, so we can mess with that
+ * if we want to.
+ */
+ while(req->nr_sectors && bh){
+ bhp = bhp->b_reqnext;
+ if(!bhp || !CONTIGUOUS_BUFFERS(bh,bhp)) tablesize--;
+ req->nr_sectors -= bh->b_size >> 9;
+ req->sector += bh->b_size >> 9;
+ if(!tablesize) break;
+ bh = bhp;
+ }
+ if(req->nr_sectors && bh && bh->b_reqnext){/* Any leftovers? */
+ SCpnt->request.bhtail = bh;
+ req->bh = bh->b_reqnext; /* Divide request */
+ bh->b_reqnext = NULL;
+ bh = req->bh;
+ /* Now reset things so that req looks OK */
+ SCpnt->request.nr_sectors -= req->nr_sectors;
+ req->current_nr_sectors = bh->b_size >> 9;
+ req->buffer = bh->b_data;
+ SCpnt->request.sem = NULL; /* Wait until whole thing done*/
+ }
+ else
+ {
+ req->rq_status = RQ_INACTIVE;
+ *reqp = req->next;
+ wake_up(&wait_for_request);
+ }
+ } else {
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = NULL; /* And no one is waiting for this
+ * to complete */
+ }
+ restore_flags(flags);
+ break;
+ }
+ }
+
+ SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
+ SCpnt->old_use_sg = 0;
+ SCpnt->transfersize = 0; /* No default transfer size */
+ SCpnt->cmd_len = 0;
+
+ SCpnt->underflow = 0; /* Do not flag underflow conditions */
+
+ /* Since not everyone seems to set the device info correctly
+ * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
+ */
+ SCpnt->channel = device->channel;
+ SCpnt->lun = device->lun;
+ SCpnt->target = device->id;
+
+ return SCpnt;
+}
+
+/*
+ * This is inline because we have stack problemes if we recurse to deeply.
+ */
+
+inline void internal_cmnd (Scsi_Cmnd * SCpnt)
+{
+ unsigned long flags, timeout;
+ struct Scsi_Host * host;
+#ifdef DEBUG_DELAY
+ unsigned long clock;
+#endif
+
+#if DEBUG
+ unsigned long *ret = 0;
+#ifdef __mips__
+ __asm__ __volatile__ ("move\t%0,$31":"=r"(ret));
+#else
+ ret = __builtin_return_address(0);
+#endif
+#endif
+
+ host = SCpnt->host;
+
+ save_flags(flags);
+ cli();
+ /* Assign a unique nonzero serial_number. */
+ if (++serial_number == 0) serial_number = 1;
+ SCpnt->serial_number = serial_number;
+
+ /*
+ * We will wait MIN_RESET_DELAY clock ticks after the last reset so
+ * we can avoid the drive not being ready.
+ */
+ timeout = host->last_reset + MIN_RESET_DELAY;
+ if (jiffies < timeout) {
+ int ticks_remaining = timeout - jiffies;
+ /*
+ * NOTE: This may be executed from within an interrupt
+ * handler! This is bad, but for now, it'll do. The irq
+ * level of the interrupt handler has been masked out by the
+ * platform dependent interrupt handling code already, so the
+ * sti() here will not cause another call to the SCSI host's
+ * interrupt handler (assuming there is one irq-level per
+ * host).
+ */
+ sti();
+ while (--ticks_remaining >= 0) udelay(1000000/HZ);
+ host->last_reset = jiffies - MIN_RESET_DELAY;
+ }
+ restore_flags(flags);
+
+ update_timeout(SCpnt, SCpnt->timeout_per_command);
+
+ /*
+ * We will use a queued command if possible, otherwise we will emulate the
+ * queuing and calling of completion function ourselves.
+ */
+#ifdef DEBUG
+ printk("internal_cmnd (host = %d, channel = %d, target = %d, "
+ "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
+ SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
+ SCpnt->buffer, SCpnt->bufflen, SCpnt->done);
+#endif
+
+ if (host->can_queue)
+ {
+#ifdef DEBUG
+ printk("queuecommand : routine at %p\n",
+ host->hostt->queuecommand);
+#endif
+ /* This locking tries to prevent all sorts of races between
+ * queuecommand and the interrupt code. In effect,
+ * we are only allowed to be in queuecommand once at
+ * any given time, and we can only be in the interrupt
+ * handler and the queuecommand function at the same time
+ * when queuecommand is called while servicing the
+ * interrupt.
+ */
+
+ if(!intr_count && SCpnt->host->irq)
+ disable_irq(SCpnt->host->irq);
+
+ host->hostt->queuecommand (SCpnt, scsi_done);
+
+ if(!intr_count && SCpnt->host->irq)
+ enable_irq(SCpnt->host->irq);
+ }
+ else
+ {
+ int temp;
+
+#ifdef DEBUG
+ printk("command() : routine at %p\n", host->hostt->command);
+#endif
+ temp = host->hostt->command (SCpnt);
+ SCpnt->result = temp;
+#ifdef DEBUG_DELAY
+ clock = jiffies + 4 * HZ;
+ while (jiffies < clock) barrier();
+ printk("done(host = %d, result = %04x) : routine at %p\n",
+ host->host_no, temp, host->hostt->command);
+#endif
+ scsi_done(SCpnt);
+ }
+#ifdef DEBUG
+ printk("leaving internal_cmnd()\n");
+#endif
+}
+
+static void scsi_request_sense (Scsi_Cmnd * SCpnt)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ SCpnt->flags |= WAS_SENSE | ASKED_FOR_SENSE;
+ update_timeout(SCpnt, SENSE_TIMEOUT);
+ restore_flags(flags);
+
+
+ memcpy ((void *) SCpnt->cmnd , (void *) generic_sense,
+ sizeof(generic_sense));
+
+ SCpnt->cmnd[1] = SCpnt->lun << 5;
+ SCpnt->cmnd[4] = sizeof(SCpnt->sense_buffer);
+
+ SCpnt->request_buffer = &SCpnt->sense_buffer;
+ SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer);
+ SCpnt->use_sg = 0;
+ SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+ internal_cmnd (SCpnt);
+}
+
+
+
+/*
+ * scsi_do_cmd sends all the commands out to the low-level driver. It
+ * handles the specifics required for each low level driver - ie queued
+ * or non queued. It also prevents conflicts when different high level
+ * drivers go for the same host at the same time.
+ */
+
+void scsi_do_cmd (Scsi_Cmnd * SCpnt, const void *cmnd ,
+ void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *),
+ int timeout, int retries)
+{
+ unsigned long flags;
+ struct Scsi_Host * host = SCpnt->host;
+
+#ifdef DEBUG
+ {
+ int i;
+ int target = SCpnt->target;
+ printk ("scsi_do_cmd (host = %d, channel = %d target = %d, "
+ "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
+ "retries = %d)\n"
+ "command : " , host->host_no, SCpnt->channel, target, buffer,
+ bufflen, done, timeout, retries);
+ for (i = 0; i < 10; ++i)
+ printk ("%02x ", ((unsigned char *) cmnd)[i]);
+ printk("\n");
+ }
+#endif
+
+ if (!host)
+ {
+ panic ("Invalid or not present host.\n");
+ }
+
+
+ /*
+ * We must prevent reentrancy to the lowlevel host driver. This prevents
+ * it - we enter a loop until the host we want to talk to is not busy.
+ * Race conditions are prevented, as interrupts are disabled in between the
+ * time we check for the host being not busy, and the time we mark it busy
+ * ourselves.
+ */
+
+ save_flags(flags);
+ cli();
+ SCpnt->pid = scsi_pid++;
+
+ while (SCSI_BLOCK(host)) {
+ restore_flags(flags);
+ SCSI_SLEEP(&host->host_wait, SCSI_BLOCK(host));
+ cli();
+ }
+
+ if (host->block) host_active = host;
+
+ host->host_busy++;
+ restore_flags(flags);
+
+ /*
+ * Our own function scsi_done (which marks the host as not busy, disables
+ * the timeout counter, etc) will be called by us or by the
+ * scsi_hosts[host].queuecommand() function needs to also call
+ * the completion function for the high level driver.
+ */
+
+ memcpy ((void *) SCpnt->data_cmnd , (const void *) cmnd, 12);
+#if 0
+ SCpnt->host = host;
+ SCpnt->channel = channel;
+ SCpnt->target = target;
+ SCpnt->lun = (SCpnt->data_cmnd[1] >> 5);
+#endif
+ SCpnt->reset_chain = NULL;
+ SCpnt->serial_number = 0;
+ SCpnt->bufflen = bufflen;
+ SCpnt->buffer = buffer;
+ SCpnt->flags = 0;
+ SCpnt->retries = 0;
+ SCpnt->allowed = retries;
+ SCpnt->done = done;
+ SCpnt->timeout_per_command = timeout;
+
+ memcpy ((void *) SCpnt->cmnd , (const void *) cmnd, 12);
+ /* Zero the sense buffer. Some host adapters automatically request
+ * sense on error. 0 is not a valid sense code.
+ */
+ memset ((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
+ SCpnt->request_buffer = buffer;
+ SCpnt->request_bufflen = bufflen;
+ SCpnt->old_use_sg = SCpnt->use_sg;
+ if (SCpnt->cmd_len == 0)
+ SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+ SCpnt->old_cmd_len = SCpnt->cmd_len;
+
+ /* Start the timer ticking. */
+
+ SCpnt->internal_timeout = NORMAL_TIMEOUT;
+ SCpnt->abort_reason = 0;
+ internal_cmnd (SCpnt);
+
+#ifdef DEBUG
+ printk ("Leaving scsi_do_cmd()\n");
+#endif
+}
+
+static int check_sense (Scsi_Cmnd * SCpnt)
+{
+ /* If there is no sense information, request it. If we have already
+ * requested it, there is no point in asking again - the firmware must
+ * be confused.
+ */
+ if (((SCpnt->sense_buffer[0] & 0x70) >> 4) != 7) {
+ if(!(SCpnt->flags & ASKED_FOR_SENSE))
+ return SUGGEST_SENSE;
+ else
+ return SUGGEST_RETRY;
+ }
+
+ SCpnt->flags &= ~ASKED_FOR_SENSE;
+
+#ifdef DEBUG_INIT
+ printk("scsi%d, channel%d : ", SCpnt->host->host_no, SCpnt->channel);
+ print_sense("", SCpnt);
+ printk("\n");
+#endif
+ if (SCpnt->sense_buffer[2] & 0xe0)
+ return SUGGEST_ABORT;
+
+ switch (SCpnt->sense_buffer[2] & 0xf)
+ {
+ case NO_SENSE:
+ return 0;
+ case RECOVERED_ERROR:
+ return SUGGEST_IS_OK;
+
+ case ABORTED_COMMAND:
+ return SUGGEST_RETRY;
+ case NOT_READY:
+ case UNIT_ATTENTION:
+ /*
+ * If we are expecting a CC/UA because of a bus reset that we
+ * performed, treat this just as a retry. Otherwise this is
+ * information that we should pass up to the upper-level driver
+ * so that we can deal with it there.
+ */
+ if( SCpnt->device->expecting_cc_ua )
+ {
+ SCpnt->device->expecting_cc_ua = 0;
+ return SUGGEST_RETRY;
+ }
+ return SUGGEST_ABORT;
+
+ /* these three are not supported */
+ case COPY_ABORTED:
+ case VOLUME_OVERFLOW:
+ case MISCOMPARE:
+
+ case MEDIUM_ERROR:
+ return SUGGEST_REMAP;
+ case BLANK_CHECK:
+ case DATA_PROTECT:
+ case HARDWARE_ERROR:
+ case ILLEGAL_REQUEST:
+ default:
+ return SUGGEST_ABORT;
+ }
+}
+
+/* This function is the mid-level interrupt routine, which decides how
+ * to handle error conditions. Each invocation of this function must
+ * do one and *only* one of the following:
+ *
+ * (1) Call last_cmnd[host].done. This is done for fatal errors and
+ * normal completion, and indicates that the handling for this
+ * request is complete.
+ * (2) Call internal_cmnd to requeue the command. This will result in
+ * scsi_done being called again when the retry is complete.
+ * (3) Call scsi_request_sense. This asks the host adapter/drive for
+ * more information about the error condition. When the information
+ * is available, scsi_done will be called again.
+ * (4) Call reset(). This is sort of a last resort, and the idea is that
+ * this may kick things loose and get the drive working again. reset()
+ * automatically calls scsi_request_sense, and thus scsi_done will be
+ * called again once the reset is complete.
+ *
+ * If none of the above actions are taken, the drive in question
+ * will hang. If more than one of the above actions are taken by
+ * scsi_done, then unpredictable behavior will result.
+ */
+static void scsi_done (Scsi_Cmnd * SCpnt)
+{
+ int status=0;
+ int exit=0;
+ int checked;
+ int oldto;
+ struct Scsi_Host * host = SCpnt->host;
+ int result = SCpnt->result;
+ SCpnt->serial_number = 0;
+ oldto = update_timeout(SCpnt, 0);
+
+#ifdef DEBUG_TIMEOUT
+ if(result) printk("Non-zero result in scsi_done %x %d:%d\n",
+ result, SCpnt->target, SCpnt->lun);
+#endif
+
+ /* If we requested an abort, (and we got it) then fix up the return
+ * status to say why
+ */
+ if(host_byte(result) == DID_ABORT && SCpnt->abort_reason)
+ SCpnt->result = result = (result & 0xff00ffff) |
+ (SCpnt->abort_reason << 16);
+
+
+#define FINISHED 0
+#define MAYREDO 1
+#define REDO 3
+#define PENDING 4
+
+#ifdef DEBUG
+ printk("In scsi_done(host = %d, result = %06x)\n", host->host_no, result);
+#endif
+
+ if(SCpnt->flags & WAS_SENSE)
+ {
+ SCpnt->use_sg = SCpnt->old_use_sg;
+ SCpnt->cmd_len = SCpnt->old_cmd_len;
+ }
+
+ switch (host_byte(result))
+ {
+ case DID_OK:
+ if (status_byte(result) && (SCpnt->flags & WAS_SENSE))
+ /* Failed to obtain sense information */
+ {
+ SCpnt->flags &= ~WAS_SENSE;
+#if 0 /* This cannot possibly be correct. */
+ SCpnt->internal_timeout &= ~SENSE_TIMEOUT;
+#endif
+
+ if (!(SCpnt->flags & WAS_RESET))
+ {
+ printk("scsi%d : channel %d target %d lun %d request sense"
+ " failed, performing reset.\n",
+ SCpnt->host->host_no, SCpnt->channel, SCpnt->target,
+ SCpnt->lun);
+ scsi_reset(SCpnt, SCSI_RESET_SYNCHRONOUS);
+ return;
+ }
+ else
+ {
+ exit = (DRIVER_HARD | SUGGEST_ABORT);
+ status = FINISHED;
+ }
+ }
+ else switch(msg_byte(result))
+ {
+ case COMMAND_COMPLETE:
+ switch (status_byte(result))
+ {
+ case GOOD:
+ if (SCpnt->flags & WAS_SENSE)
+ {
+#ifdef DEBUG
+ printk ("In scsi_done, GOOD status, COMMAND COMPLETE, "
+ "parsing sense information.\n");
+#endif
+ SCpnt->flags &= ~WAS_SENSE;
+#if 0 /* This cannot possibly be correct. */
+ SCpnt->internal_timeout &= ~SENSE_TIMEOUT;
+#endif
+
+ switch (checked = check_sense(SCpnt))
+ {
+ case SUGGEST_SENSE:
+ case 0:
+#ifdef DEBUG
+ printk("NO SENSE. status = REDO\n");
+#endif
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+ case SUGGEST_IS_OK:
+ break;
+ case SUGGEST_REMAP:
+#ifdef DEBUG
+ printk("SENSE SUGGEST REMAP - status = FINISHED\n");
+#endif
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ case SUGGEST_RETRY:
+#ifdef DEBUG
+ printk("SENSE SUGGEST RETRY - status = MAYREDO\n");
+#endif
+ status = MAYREDO;
+ exit = DRIVER_SENSE | SUGGEST_RETRY;
+ break;
+ case SUGGEST_ABORT:
+#ifdef DEBUG
+ printk("SENSE SUGGEST ABORT - status = FINISHED");
+#endif
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ default:
+ printk ("Internal error %s %d \n", __FILE__,
+ __LINE__);
+ }
+ } /* end WAS_SENSE */
+ else
+ {
+#ifdef DEBUG
+ printk("COMMAND COMPLETE message returned, "
+ "status = FINISHED. \n");
+#endif
+ exit = DRIVER_OK;
+ status = FINISHED;
+ }
+ break;
+
+ case CHECK_CONDITION:
+ case COMMAND_TERMINATED:
+ switch (check_sense(SCpnt))
+ {
+ case 0:
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+ case SUGGEST_REMAP:
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ case SUGGEST_RETRY:
+ status = MAYREDO;
+ exit = DRIVER_SENSE | SUGGEST_RETRY;
+ break;
+ case SUGGEST_ABORT:
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ case SUGGEST_SENSE:
+ scsi_request_sense (SCpnt);
+ status = PENDING;
+ break;
+ }
+ break;
+
+ case CONDITION_GOOD:
+ case INTERMEDIATE_GOOD:
+ case INTERMEDIATE_C_GOOD:
+ break;
+
+ case BUSY:
+ case QUEUE_FULL:
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+
+ case RESERVATION_CONFLICT:
+ printk("scsi%d, channel %d : RESERVATION CONFLICT performing"
+ " reset.\n", SCpnt->host->host_no, SCpnt->channel);
+ scsi_reset(SCpnt, SCSI_RESET_SYNCHRONOUS);
+ return;
+#if 0
+ exit = DRIVER_SOFT | SUGGEST_ABORT;
+ status = MAYREDO;
+ break;
+#endif
+ default:
+ printk ("Internal error %s %d \n"
+ "status byte = %d \n", __FILE__,
+ __LINE__, status_byte(result));
+
+ }
+ break;
+ default:
+ panic("scsi: unsupported message byte %d received\n",
+ msg_byte(result));
+ }
+ break;
+ case DID_TIME_OUT:
+#ifdef DEBUG
+ printk("Host returned DID_TIME_OUT - ");
+#endif
+
+ if (SCpnt->flags & WAS_TIMEDOUT)
+ {
+#ifdef DEBUG
+ printk("Aborting\n");
+#endif
+ /*
+ Allow TEST_UNIT_READY and INQUIRY commands to timeout early
+ without causing resets. All other commands should be retried.
+ */
+ if (SCpnt->cmnd[0] != TEST_UNIT_READY &&
+ SCpnt->cmnd[0] != INQUIRY)
+ status = MAYREDO;
+ exit = (DRIVER_TIMEOUT | SUGGEST_ABORT);
+ }
+ else
+ {
+#ifdef DEBUG
+ printk ("Retrying.\n");
+#endif
+ SCpnt->flags |= WAS_TIMEDOUT;
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ status = REDO;
+ }
+ break;
+ case DID_BUS_BUSY:
+ case DID_PARITY:
+ status = REDO;
+ break;
+ case DID_NO_CONNECT:
+#ifdef DEBUG
+ printk("Couldn't connect.\n");
+#endif
+ exit = (DRIVER_HARD | SUGGEST_ABORT);
+ break;
+ case DID_ERROR:
+ status = MAYREDO;
+ exit = (DRIVER_HARD | SUGGEST_ABORT);
+ break;
+ case DID_BAD_TARGET:
+ case DID_ABORT:
+ exit = (DRIVER_INVALID | SUGGEST_ABORT);
+ break;
+ case DID_RESET:
+ if (SCpnt->flags & IS_RESETTING)
+ {
+ SCpnt->flags &= ~IS_RESETTING;
+ status = REDO;
+ break;
+ }
+
+ if(msg_byte(result) == GOOD &&
+ status_byte(result) == CHECK_CONDITION) {
+ switch (check_sense(SCpnt)) {
+ case 0:
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+ case SUGGEST_REMAP:
+ case SUGGEST_RETRY:
+ status = MAYREDO;
+ exit = DRIVER_SENSE | SUGGEST_RETRY;
+ break;
+ case SUGGEST_ABORT:
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ case SUGGEST_SENSE:
+ scsi_request_sense (SCpnt);
+ status = PENDING;
+ break;
+ }
+ } else {
+ status=REDO;
+ exit = SUGGEST_RETRY;
+ }
+ break;
+ default :
+ exit = (DRIVER_ERROR | SUGGEST_DIE);
+ }
+
+ switch (status)
+ {
+ case FINISHED:
+ case PENDING:
+ break;
+ case MAYREDO:
+#ifdef DEBUG
+ printk("In MAYREDO, allowing %d retries, have %d\n",
+ SCpnt->allowed, SCpnt->retries);
+#endif
+ if ((++SCpnt->retries) < SCpnt->allowed)
+ {
+ if ((SCpnt->retries >= (SCpnt->allowed >> 1))
+ && !(SCpnt->host->last_reset > 0 &&
+ jiffies < SCpnt->host->last_reset + MIN_RESET_PERIOD)
+ && !(SCpnt->flags & WAS_RESET))
+ {
+ printk("scsi%d channel %d : resetting for second half of retries.\n",
+ SCpnt->host->host_no, SCpnt->channel);
+ scsi_reset(SCpnt, SCSI_RESET_SYNCHRONOUS);
+ break;
+ }
+
+ }
+ else
+ {
+ status = FINISHED;
+ break;
+ }
+ /* fall through to REDO */
+
+ case REDO:
+
+ if (SCpnt->flags & WAS_SENSE)
+ scsi_request_sense(SCpnt);
+ else
+ {
+ memcpy ((void *) SCpnt->cmnd,
+ (void*) SCpnt->data_cmnd,
+ sizeof(SCpnt->data_cmnd));
+ SCpnt->request_buffer = SCpnt->buffer;
+ SCpnt->request_bufflen = SCpnt->bufflen;
+ SCpnt->use_sg = SCpnt->old_use_sg;
+ SCpnt->cmd_len = SCpnt->old_cmd_len;
+ internal_cmnd (SCpnt);
+ }
+ break;
+ default:
+ INTERNAL_ERROR;
+ }
+
+ if (status == FINISHED) {
+#ifdef DEBUG
+ printk("Calling done function - at address %p\n", SCpnt->done);
+#endif
+ host->host_busy--; /* Indicate that we are free */
+
+ if (host->block && host->host_busy == 0) {
+ host_active = NULL;
+
+ /* For block devices "wake_up" is done in end_scsi_request */
+ if (MAJOR(SCpnt->request.rq_dev) != SCSI_DISK_MAJOR &&
+ MAJOR(SCpnt->request.rq_dev) != SCSI_CDROM_MAJOR) {
+ struct Scsi_Host * next;
+
+ for (next = host->block; next != host; next = next->block)
+ wake_up(&next->host_wait);
+ }
+
+ }
+
+ wake_up(&host->host_wait);
+ SCpnt->result = result | ((exit & 0xff) << 24);
+ SCpnt->use_sg = SCpnt->old_use_sg;
+ SCpnt->cmd_len = SCpnt->old_cmd_len;
+ SCpnt->done (SCpnt);
+ }
+
+#undef FINISHED
+#undef REDO
+#undef MAYREDO
+#undef PENDING
+}
+
+/*
+ * The scsi_abort function interfaces with the abort() function of the host
+ * we are aborting, and causes the current command to not complete. The
+ * caller should deal with any error messages or status returned on the
+ * next call.
+ *
+ * This will not be called reentrantly for a given host.
+ */
+
+/*
+ * Since we're nice guys and specified that abort() and reset()
+ * can be non-reentrant. The internal_timeout flags are used for
+ * this.
+ */
+
+
+int scsi_abort (Scsi_Cmnd * SCpnt, int why)
+{
+ int oldto;
+ unsigned long flags;
+ struct Scsi_Host * host = SCpnt->host;
+
+ while(1)
+ {
+ save_flags(flags);
+ cli();
+
+ /*
+ * Protect against races here. If the command is done, or we are
+ * on a different command forget it.
+ */
+ if (SCpnt->serial_number != SCpnt->serial_number_at_timeout) {
+ restore_flags(flags);
+ return 0;
+ }
+
+ if (SCpnt->internal_timeout & IN_ABORT)
+ {
+ restore_flags(flags);
+ while (SCpnt->internal_timeout & IN_ABORT)
+ barrier();
+ }
+ else
+ {
+ SCpnt->internal_timeout |= IN_ABORT;
+ oldto = update_timeout(SCpnt, ABORT_TIMEOUT);
+
+ if ((SCpnt->flags & IS_RESETTING) && SCpnt->device->soft_reset) {
+ /* OK, this command must have died when we did the
+ * reset. The device itself must have lied.
+ */
+ printk("Stale command on %d %d:%d appears to have died when"
+ " the bus was reset\n",
+ SCpnt->channel, SCpnt->target, SCpnt->lun);
+ }
+
+ restore_flags(flags);
+ if (!host->host_busy) {
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ update_timeout(SCpnt, oldto);
+ return 0;
+ }
+ printk("scsi : aborting command due to timeout : pid %lu, scsi%d,"
+ " channel %d, id %d, lun %d ",
+ SCpnt->pid, SCpnt->host->host_no, (int) SCpnt->channel,
+ (int) SCpnt->target, (int) SCpnt->lun);
+ print_command (SCpnt->cmnd);
+ if (SCpnt->serial_number != SCpnt->serial_number_at_timeout)
+ return 0;
+ SCpnt->abort_reason = why;
+ switch(host->hostt->abort(SCpnt)) {
+ /* We do not know how to abort. Try waiting another
+ * time increment and see if this helps. Set the
+ * WAS_TIMEDOUT flag set so we do not try this twice
+ */
+ case SCSI_ABORT_BUSY: /* Tough call - returning 1 from
+ * this is too severe
+ */
+ case SCSI_ABORT_SNOOZE:
+ if(why == DID_TIME_OUT) {
+ save_flags(flags);
+ cli();
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ if(SCpnt->flags & WAS_TIMEDOUT) {
+ restore_flags(flags);
+ return 1; /* Indicate we cannot handle this.
+ * We drop down into the reset handler
+ * and try again
+ */
+ } else {
+ SCpnt->flags |= WAS_TIMEDOUT;
+ oldto = SCpnt->timeout_per_command;
+ update_timeout(SCpnt, oldto);
+ }
+ restore_flags(flags);
+ }
+ return 0;
+ case SCSI_ABORT_PENDING:
+ if(why != DID_TIME_OUT) {
+ save_flags(flags);
+ cli();
+ update_timeout(SCpnt, oldto);
+ restore_flags(flags);
+ }
+ return 0;
+ case SCSI_ABORT_SUCCESS:
+ /* We should have already aborted this one. No
+ * need to adjust timeout
+ */
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ return 0;
+ case SCSI_ABORT_NOT_RUNNING:
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ update_timeout(SCpnt, 0);
+ return 0;
+ case SCSI_ABORT_ERROR:
+ default:
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ return 1;
+ }
+ }
+ }
+}
+
+
+/* Mark a single SCSI Device as having been reset. */
+
+static inline void scsi_mark_device_reset(Scsi_Device *Device)
+{
+ Device->was_reset = 1;
+ Device->expecting_cc_ua = 1;
+}
+
+
+/* Mark all SCSI Devices on a specific Host as having been reset. */
+
+void scsi_mark_host_reset(struct Scsi_Host *Host)
+{
+ Scsi_Cmnd *SCpnt;
+ for (SCpnt = Host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ scsi_mark_device_reset(SCpnt->device);
+}
+
+
+/* Mark all SCSI Devices on a specific Host Bus as having been reset. */
+
+void scsi_mark_bus_reset(struct Scsi_Host *Host, int channel)
+{
+ Scsi_Cmnd *SCpnt;
+ for (SCpnt = Host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if (SCpnt->channel == channel)
+ scsi_mark_device_reset(SCpnt->device);
+}
+
+
+int scsi_reset (Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+{
+ int temp;
+ unsigned long flags;
+ Scsi_Cmnd * SCpnt1;
+ struct Scsi_Host * host = SCpnt->host;
+
+ printk("SCSI bus is being reset for host %d channel %d.\n",
+ host->host_no, SCpnt->channel);
+
+#if 0
+ /*
+ * First of all, we need to make a recommendation to the low-level
+ * driver as to whether a BUS_DEVICE_RESET should be performed,
+ * or whether we should do a full BUS_RESET. There is no simple
+ * algorithm here - we basically use a series of heuristics
+ * to determine what we should do.
+ */
+ SCpnt->host->suggest_bus_reset = FALSE;
+
+ /*
+ * First see if all of the active devices on the bus have
+ * been jammed up so that we are attempting resets. If so,
+ * then suggest a bus reset. Forcing a bus reset could
+ * result in some race conditions, but no more than
+ * you would usually get with timeouts. We will cross
+ * that bridge when we come to it.
+ *
+ * This is actually a pretty bad idea, since a sequence of
+ * commands will often timeout together and this will cause a
+ * Bus Device Reset followed immediately by a SCSI Bus Reset.
+ * If all of the active devices really are jammed up, the
+ * Bus Device Reset will quickly timeout and scsi_times_out
+ * will follow up with a SCSI Bus Reset anyway.
+ */
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if( SCpnt1->request.rq_status != RQ_INACTIVE
+ && (SCpnt1->flags & (WAS_RESET | IS_RESETTING)) == 0 )
+ break;
+ SCpnt1 = SCpnt1->next;
+ }
+ if( SCpnt1 == NULL ) {
+ reset_flags |= SCSI_RESET_SUGGEST_BUS_RESET;
+ }
+
+ /*
+ * If the code that called us is suggesting a hard reset, then
+ * definitely request it. This usually occurs because a
+ * BUS_DEVICE_RESET times out.
+ *
+ * Passing reset_flags along takes care of this automatically.
+ */
+ if( reset_flags & SCSI_RESET_SUGGEST_BUS_RESET ) {
+ SCpnt->host->suggest_bus_reset = TRUE;
+ }
+#endif
+
+ while (1) {
+ save_flags(flags);
+ cli();
+
+ /*
+ * Protect against races here. If the command is done, or we are
+ * on a different command forget it.
+ */
+ if (reset_flags & SCSI_RESET_ASYNCHRONOUS)
+ if (SCpnt->serial_number != SCpnt->serial_number_at_timeout) {
+ restore_flags(flags);
+ return 0;
+ }
+
+ if (SCpnt->internal_timeout & IN_RESET)
+ {
+ restore_flags(flags);
+ while (SCpnt->internal_timeout & IN_RESET)
+ barrier();
+ }
+ else
+ {
+ SCpnt->internal_timeout |= IN_RESET;
+ update_timeout(SCpnt, RESET_TIMEOUT);
+
+ if (host->host_busy)
+ {
+ restore_flags(flags);
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if (SCpnt1->request.rq_status != RQ_INACTIVE) {
+#if 0
+ if (!(SCpnt1->flags & IS_RESETTING) &&
+ !(SCpnt1->internal_timeout & IN_ABORT))
+ scsi_abort(SCpnt1, DID_RESET);
+#endif
+ SCpnt1->flags |= (WAS_RESET | IS_RESETTING);
+ }
+ SCpnt1 = SCpnt1->next;
+ }
+
+ host->last_reset = jiffies;
+ temp = host->hostt->reset(SCpnt, reset_flags);
+ /*
+ This test allows the driver to introduce an additional bus
+ settle time delay by setting last_reset up to 20 seconds in
+ the future. In the normal case where the driver does not
+ modify last_reset, it must be assumed that the actual bus
+ reset occurred immediately prior to the return to this code,
+ and so last_reset must be updated to the current time, so
+ that the delay in internal_cmnd will guarantee at least a
+ MIN_RESET_DELAY bus settle time.
+ */
+ if ((host->last_reset < jiffies) ||
+ (host->last_reset > (jiffies + 20 * HZ)))
+ host->last_reset = jiffies;
+ }
+ else
+ {
+ if (!host->block) host->host_busy++;
+ restore_flags(flags);
+ host->last_reset = jiffies;
+ SCpnt->flags |= (WAS_RESET | IS_RESETTING);
+ temp = host->hostt->reset(SCpnt, reset_flags);
+ if ((host->last_reset < jiffies) ||
+ (host->last_reset > (jiffies + 20 * HZ)))
+ host->last_reset = jiffies;
+ if (!host->block) host->host_busy--;
+ }
+
+#ifdef DEBUG
+ printk("scsi reset function returned %d\n", temp);
+#endif
+
+ /*
+ * Now figure out what we need to do, based upon
+ * what the low level driver said that it did.
+ * If the result is SCSI_RESET_SUCCESS, SCSI_RESET_PENDING,
+ * or SCSI_RESET_WAKEUP, then the low level driver did a
+ * bus device reset or bus reset, so we should go through
+ * and mark one or all of the devices on that bus
+ * as having been reset.
+ */
+ switch(temp & SCSI_RESET_ACTION) {
+ case SCSI_RESET_SUCCESS:
+ if (temp & SCSI_RESET_HOST_RESET)
+ scsi_mark_host_reset(host);
+ else if (temp & SCSI_RESET_BUS_RESET)
+ scsi_mark_bus_reset(host, SCpnt->channel);
+ else scsi_mark_device_reset(SCpnt->device);
+ save_flags(flags);
+ cli();
+ SCpnt->internal_timeout &= ~(IN_RESET|IN_RESET2|IN_RESET3);
+ restore_flags(flags);
+ return 0;
+ case SCSI_RESET_PENDING:
+ if (temp & SCSI_RESET_HOST_RESET)
+ scsi_mark_host_reset(host);
+ else if (temp & SCSI_RESET_BUS_RESET)
+ scsi_mark_bus_reset(host, SCpnt->channel);
+ else scsi_mark_device_reset(SCpnt->device);
+ case SCSI_RESET_NOT_RUNNING:
+ return 0;
+ case SCSI_RESET_PUNT:
+ SCpnt->internal_timeout &= ~(IN_RESET|IN_RESET2|IN_RESET3);
+ scsi_request_sense (SCpnt);
+ return 0;
+ case SCSI_RESET_WAKEUP:
+ if (temp & SCSI_RESET_HOST_RESET)
+ scsi_mark_host_reset(host);
+ else if (temp & SCSI_RESET_BUS_RESET)
+ scsi_mark_bus_reset(host, SCpnt->channel);
+ else scsi_mark_device_reset(SCpnt->device);
+ SCpnt->internal_timeout &= ~(IN_RESET|IN_RESET2|IN_RESET3);
+ scsi_request_sense (SCpnt);
+ /*
+ * If a bus reset was performed, we
+ * need to wake up each and every command
+ * that was active on the bus or if it was a HBA
+ * reset all active commands on all channels
+ */
+ if( temp & SCSI_RESET_HOST_RESET )
+ {
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if (SCpnt1->request.rq_status != RQ_INACTIVE
+ && SCpnt1 != SCpnt)
+ scsi_request_sense (SCpnt1);
+ SCpnt1 = SCpnt1->next;
+ }
+ } else if( temp & SCSI_RESET_BUS_RESET ) {
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if(SCpnt1->request.rq_status != RQ_INACTIVE
+ && SCpnt1 != SCpnt
+ && SCpnt1->channel == SCpnt->channel)
+ scsi_request_sense (SCpnt);
+ SCpnt1 = SCpnt1->next;
+ }
+ }
+ return 0;
+ case SCSI_RESET_SNOOZE:
+ /* In this case, we set the timeout field to 0
+ * so that this command does not time out any more,
+ * and we return 1 so that we get a message on the
+ * screen.
+ */
+ save_flags(flags);
+ cli();
+ SCpnt->internal_timeout &= ~(IN_RESET|IN_RESET2|IN_RESET3);
+ update_timeout(SCpnt, 0);
+ restore_flags(flags);
+ /* If you snooze, you lose... */
+ case SCSI_RESET_ERROR:
+ default:
+ return 1;
+ }
+
+ return temp;
+ }
+ }
+}
+
+
+static void scsi_main_timeout(void)
+{
+ /*
+ * We must not enter update_timeout with a timeout condition still pending.
+ */
+
+ int timed_out;
+ unsigned long flags;
+ struct Scsi_Host * host;
+ Scsi_Cmnd * SCpnt = NULL;
+
+ save_flags(flags);
+ cli();
+
+ update_timeout(NULL, 0);
+
+ /*
+ * Find all timers such that they have 0 or negative (shouldn't happen)
+ * time remaining on them.
+ */
+ timed_out = 0;
+ for (host = scsi_hostlist; host; host = host->next) {
+ for (SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if (SCpnt->timeout == -1)
+ {
+ SCpnt->timeout = 0;
+ SCpnt->serial_number_at_timeout = SCpnt->serial_number;
+ ++timed_out;
+ }
+ }
+ if (timed_out > 0) {
+ for (host = scsi_hostlist; host; host = host->next) {
+ for (SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if (SCpnt->serial_number_at_timeout > 0 &&
+ SCpnt->serial_number_at_timeout == SCpnt->serial_number)
+ {
+ restore_flags(flags);
+ scsi_times_out(SCpnt);
+ SCpnt->serial_number_at_timeout = 0;
+ cli();
+ }
+ }
+ }
+ restore_flags(flags);
+}
+
+/*
+ * The strategy is to cause the timer code to call scsi_times_out()
+ * when the soonest timeout is pending.
+ * The arguments are used when we are queueing a new command, because
+ * we do not want to subtract the time used from this time, but when we
+ * set the timer, we want to take this value into account.
+ */
+
+static int update_timeout(Scsi_Cmnd * SCset, int timeout)
+{
+ unsigned int least, used;
+ unsigned int oldto;
+ unsigned long flags;
+ struct Scsi_Host * host;
+ Scsi_Cmnd * SCpnt = NULL;
+
+ save_flags(flags);
+ cli();
+
+ oldto = 0;
+
+ /*
+ * This routine can be a performance bottleneck under high loads, since
+ * it is called twice per SCSI operation: once when internal_cmnd is
+ * called, and again when scsi_done completes the command. To limit
+ * the load this routine can cause, we shortcut processing if no clock
+ * ticks have occurred since the last time it was called.
+ */
+
+ if (jiffies == time_start && timer_table[SCSI_TIMER].expires > 0) {
+ if(SCset){
+ oldto = SCset->timeout;
+ SCset->timeout = timeout;
+ if (timeout > 0 &&
+ jiffies + timeout < timer_table[SCSI_TIMER].expires)
+ timer_table[SCSI_TIMER].expires = jiffies + timeout;
+ }
+ restore_flags(flags);
+ return oldto;
+ }
+
+ /*
+ * Figure out how much time has passed since the last time the timeouts
+ * were updated
+ */
+ used = (time_start) ? (jiffies - time_start) : 0;
+
+ /*
+ * Find out what is due to timeout soonest, and adjust all timeouts for
+ * the amount of time that has passed since the last time we called
+ * update_timeout.
+ */
+
+ oldto = 0;
+
+ if(SCset){
+ oldto = SCset->timeout - used;
+ SCset->timeout = timeout;
+ }
+
+ least = 0xffffffff;
+
+ for(host = scsi_hostlist; host; host = host->next)
+ for(SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if (SCpnt->timeout > 0) {
+ if (SCpnt != SCset)
+ SCpnt->timeout -= used;
+ if(SCpnt->timeout <= 0) SCpnt->timeout = -1;
+ if(SCpnt->timeout > 0 && SCpnt->timeout < least)
+ least = SCpnt->timeout;
+ }
+
+ /*
+ * If something is due to timeout again, then we will set the next timeout
+ * interrupt to occur. Otherwise, timeouts are disabled.
+ */
+
+ if (least != 0xffffffff)
+ {
+ time_start = jiffies;
+ timer_table[SCSI_TIMER].expires = (time_elapsed = least) + jiffies;
+ timer_active |= 1 << SCSI_TIMER;
+ }
+ else
+ {
+ timer_table[SCSI_TIMER].expires = time_start = time_elapsed = 0;
+ timer_active &= ~(1 << SCSI_TIMER);
+ }
+ restore_flags(flags);
+ return oldto;
+}
+
+#ifdef CONFIG_MODULES
+static int scsi_register_host(Scsi_Host_Template *);
+static void scsi_unregister_host(Scsi_Host_Template *);
+#endif
+
+void *scsi_malloc(unsigned int len)
+{
+ unsigned int nbits, mask;
+ unsigned long flags;
+ int i, j;
+ if(len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
+ return NULL;
+
+ save_flags(flags);
+ cli();
+ nbits = len >> 9;
+ mask = (1 << nbits) - 1;
+
+ for(i=0;i < dma_sectors / SECTORS_PER_PAGE; i++)
+ for(j=0; j<=SECTORS_PER_PAGE - nbits; j++){
+ if ((dma_malloc_freelist[i] & (mask << j)) == 0){
+ dma_malloc_freelist[i] |= (mask << j);
+ restore_flags(flags);
+ dma_free_sectors -= nbits;
+#ifdef DEBUG
+ printk("SMalloc: %d %p\n",len, dma_malloc_pages[i] + (j << 9));
+#endif
+ return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
+ }
+ }
+ restore_flags(flags);
+ return NULL; /* Nope. No more */
+}
+
+int scsi_free(void *obj, unsigned int len)
+{
+ unsigned int page, sector, nbits, mask;
+ unsigned long flags;
+
+#ifdef DEBUG
+ unsigned long ret = 0;
+
+#ifdef __mips__
+ __asm__ __volatile__ ("move\t%0,$31":"=r"(ret));
+#else
+ ret = __builtin_return_address(0);
+#endif
+ printk("scsi_free %p %d\n",obj, len);
+#endif
+
+ for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
+ unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
+ if ((unsigned long) obj >= page_addr &&
+ (unsigned long) obj < page_addr + PAGE_SIZE)
+ {
+ sector = (((unsigned long) obj) - page_addr) >> 9;
+
+ nbits = len >> 9;
+ mask = (1 << nbits) - 1;
+
+ if ((mask << sector) >= (1 << SECTORS_PER_PAGE))
+ panic ("scsi_free:Bad memory alignment");
+
+ save_flags(flags);
+ cli();
+ if((dma_malloc_freelist[page] &
+ (mask << sector)) != (mask<<sector)){
+#ifdef DEBUG
+ printk("scsi_free(obj=%p, len=%d) called from %08lx\n",
+ obj, len, ret);
+#endif
+ panic("scsi_free:Trying to free unused memory");
+ }
+ dma_free_sectors += nbits;
+ dma_malloc_freelist[page] &= ~(mask << sector);
+ restore_flags(flags);
+ return 0;
+ }
+ }
+ panic("scsi_free:Bad offset");
+}
+
+
+int scsi_loadable_module_flag; /* Set after we scan builtin drivers */
+
+void * scsi_init_malloc(unsigned int size, int priority)
+{
+ void * retval;
+
+ /*
+ * For buffers used by the DMA pool, we assume page aligned
+ * structures.
+ */
+ if ((size % PAGE_SIZE) == 0) {
+ int order, a_size;
+ for (order = 0, a_size = PAGE_SIZE;
+ a_size < size; order++, a_size <<= 1)
+ ;
+ retval = (void *) __get_dma_pages(priority & GFP_LEVEL_MASK,
+ order);
+ } else
+ retval = kmalloc(size, priority);
+
+ if (retval)
+ memset(retval, 0, size);
+ return retval;
+}
+
+
+void scsi_init_free(char * ptr, unsigned int size)
+{
+ /*
+ * We need this special code here because the DMA pool assumes
+ * page aligned data. Besides, it is wasteful to allocate
+ * page sized chunks with kmalloc.
+ */
+ if ((size % PAGE_SIZE) == 0) {
+ int order, a_size;
+
+ for (order = 0, a_size = PAGE_SIZE;
+ a_size < size; order++, a_size <<= 1)
+ ;
+ free_pages((unsigned long)ptr, order);
+ } else
+ kfree(ptr);
+}
+
+void scsi_build_commandblocks(Scsi_Device * SDpnt)
+{
+ struct Scsi_Host *host = SDpnt->host;
+ int j;
+ Scsi_Cmnd * SCpnt;
+
+ if (SDpnt->queue_depth == 0)
+ SDpnt->queue_depth = host->cmd_per_lun;
+ SDpnt->device_queue = NULL;
+
+ for(j=0;j<SDpnt->queue_depth;j++){
+ SCpnt = (Scsi_Cmnd *)
+ scsi_init_malloc(sizeof(Scsi_Cmnd),
+ GFP_ATOMIC |
+ (host->unchecked_isa_dma ? GFP_DMA : 0));
+ SCpnt->host = host;
+ SCpnt->device = SDpnt;
+ SCpnt->target = SDpnt->id;
+ SCpnt->lun = SDpnt->lun;
+ SCpnt->channel = SDpnt->channel;
+ SCpnt->request.rq_status = RQ_INACTIVE;
+ SCpnt->use_sg = 0;
+ SCpnt->old_use_sg = 0;
+ SCpnt->old_cmd_len = 0;
+ SCpnt->timeout = 0;
+ SCpnt->underflow = 0;
+ SCpnt->transfersize = 0;
+ SCpnt->serial_number = 0;
+ SCpnt->serial_number_at_timeout = 0;
+ SCpnt->host_scribble = NULL;
+ if(host->host_queue)
+ host->host_queue->prev = SCpnt;
+ SCpnt->next = host->host_queue;
+ SCpnt->prev = NULL;
+ host->host_queue = SCpnt;
+ SCpnt->device_next = SDpnt->device_queue;
+ SDpnt->device_queue = SCpnt;
+ }
+ SDpnt->has_cmdblocks = 1;
+}
+
+/*
+ * scsi_dev_init() is our initialization routine, which in turn calls host
+ * initialization, bus scanning, and sd/st initialization routines.
+ */
+
+int scsi_dev_init(void)
+{
+ Scsi_Device * SDpnt;
+ struct Scsi_Host * shpnt;
+ struct Scsi_Device_Template * sdtpnt;
+#ifdef FOO_ON_YOU
+ return;
+#endif
+
+ /* Yes we're here... */
+#if CONFIG_PROC_FS
+ dispatch_scsi_info_ptr = dispatch_scsi_info;
+#endif
+
+ /* Init a few things so we can "malloc" memory. */
+ scsi_loadable_module_flag = 0;
+
+ timer_table[SCSI_TIMER].fn = scsi_main_timeout;
+ timer_table[SCSI_TIMER].expires = 0;
+
+#ifdef CONFIG_MODULES
+ register_symtab(&scsi_symbol_table);
+#endif
+
+ /* Register the /proc/scsi/scsi entry */
+#if CONFIG_PROC_FS
+ proc_scsi_register(0, &proc_scsi_scsi);
+#endif
+
+ /* initialize all hosts */
+ scsi_init();
+
+ scsi_devices = (Scsi_Device *) NULL;
+
+ for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+ scan_scsis(shpnt,0,0,0,0); /* scan for scsi devices */
+ if (shpnt->select_queue_depths != NULL)
+ (shpnt->select_queue_depths)(shpnt, scsi_devices);
+ }
+
+ printk("scsi : detected ");
+ for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if (sdtpnt->dev_noticed && sdtpnt->name)
+ printk("%d SCSI %s%s ", sdtpnt->dev_noticed, sdtpnt->name,
+ (sdtpnt->dev_noticed != 1) ? "s" : "");
+ printk("total.\n");
+
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
+
+ for (SDpnt=scsi_devices; SDpnt; SDpnt = SDpnt->next) {
+ SDpnt->scsi_request_fn = NULL;
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt);
+ if(SDpnt->attached) scsi_build_commandblocks(SDpnt);
+ }
+
+
+ /*
+ * This should build the DMA pool.
+ */
+ resize_dma_pool();
+
+ /*
+ * OK, now we finish the initialization by doing spin-up, read
+ * capacity, etc, etc
+ */
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->finish && sdtpnt->nr_dev)
+ (*sdtpnt->finish)();
+
+ scsi_loadable_module_flag = 1;
+
+ return 0;
+}
+
+static void print_inquiry(unsigned char *data)
+{
+ int i;
+
+ printk(" Vendor: ");
+ for (i = 8; i < 16; i++)
+ {
+ if (data[i] >= 0x20 && i < data[4] + 5)
+ printk("%c", data[i]);
+ else
+ printk(" ");
+ }
+
+ printk(" Model: ");
+ for (i = 16; i < 32; i++)
+ {
+ if (data[i] >= 0x20 && i < data[4] + 5)
+ printk("%c", data[i]);
+ else
+ printk(" ");
+ }
+
+ printk(" Rev: ");
+ for (i = 32; i < 36; i++)
+ {
+ if (data[i] >= 0x20 && i < data[4] + 5)
+ printk("%c", data[i]);
+ else
+ printk(" ");
+ }
+
+ printk("\n");
+
+ i = data[0] & 0x1f;
+
+ printk(" Type: %s ",
+ i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : "Unknown " );
+ printk(" ANSI SCSI revision: %02x", data[2] & 0x07);
+ if ((data[2] & 0x07) == 1 && (data[3] & 0x0f) == 1)
+ printk(" CCS\n");
+ else
+ printk("\n");
+}
+
+
+#ifdef CONFIG_PROC_FS
+int scsi_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+ Scsi_Cmnd *SCpnt;
+ struct Scsi_Device_Template *SDTpnt;
+ Scsi_Device *scd, *scd_h = NULL;
+ struct Scsi_Host *HBA_ptr;
+ char *p;
+ int host, channel, id, lun;
+ int size, len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+
+ scd = scsi_devices;
+ HBA_ptr = scsi_hostlist;
+
+ if(inout == 0) {
+ size = sprintf(buffer+len,"Attached devices: %s\n", (scd)?"":"none");
+ len += size;
+ pos = begin + len;
+ while (HBA_ptr) {
+#if 0
+ size += sprintf(buffer+len,"scsi%2d: %s\n", (int) HBA_ptr->host_no,
+ HBA_ptr->hostt->procname);
+ len += size;
+ pos = begin + len;
+#endif
+ scd = scsi_devices;
+ while (scd) {
+ if (scd->host == HBA_ptr) {
+ proc_print_scsidevice(scd, buffer, &size, len);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+ scd = scd->next;
+ }
+ HBA_ptr = HBA_ptr->next;
+ }
+
+ stop_output:
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len = length; /* Ending slop */
+ return (len);
+ }
+
+ if(!buffer || length < 25 || strncmp("scsi", buffer, 4))
+ return(-EINVAL);
+
+ /*
+ * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
+ * with "0 1 2 3" replaced by your "Host Channel Id Lun".
+ * Consider this feature BETA.
+ * CAUTION: This is not for hotplugging your peripherals. As
+ * SCSI was not designed for this you could damage your
+ * hardware !
+ * However perhaps it is legal to switch on an
+ * already connected device. It is perhaps not
+ * guaranteed this device doesn't corrupt an ongoing data transfer.
+ */
+ if(!strncmp("add-single-device", buffer + 5, 17)) {
+ p = buffer + 23;
+
+ host = simple_strtoul(p, &p, 0);
+ channel = simple_strtoul(p+1, &p, 0);
+ id = simple_strtoul(p+1, &p, 0);
+ lun = simple_strtoul(p+1, &p, 0);
+
+ printk("scsi singledevice %d %d %d %d\n", host, channel,
+ id, lun);
+
+ while(scd && (scd->host->host_no != host
+ || scd->channel != channel
+ || scd->id != id
+ || scd->lun != lun)) {
+ scd = scd->next;
+ }
+ if(scd)
+ return(-ENOSYS); /* We do not yet support unplugging */
+ while(HBA_ptr && HBA_ptr->host_no != host)
+ HBA_ptr = HBA_ptr->next;
+
+ if(!HBA_ptr)
+ return(-ENXIO);
+
+ scan_scsis (HBA_ptr, 1, channel, id, lun);
+ return(length);
+
+ }
+
+ /*
+ * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
+ * with "0 1 2 3" replaced by your "Host Channel Id Lun".
+ *
+ * Consider this feature pre-BETA.
+ *
+ * CAUTION: This is not for hotplugging your peripherals. As
+ * SCSI was not designed for this you could damage your
+ * hardware and thoroughly confuse the SCSI subsystem.
+ *
+ */
+ else if(!strncmp("remove-single-device", buffer + 5, 20)) {
+ p = buffer + 26;
+
+ host = simple_strtoul(p, &p, 0);
+ channel = simple_strtoul(p+1, &p, 0);
+ id = simple_strtoul(p+1, &p, 0);
+ lun = simple_strtoul(p+1, &p, 0);
+
+ while(scd != NULL) {
+ if(scd->host->host_no == host
+ && scd->channel == channel
+ && scd->id == id
+ && scd->lun == lun){
+ break;
+ }
+ scd_h = scd;
+ scd = scd->next;
+ }
+
+ if(scd == NULL)
+ return(-ENODEV); /* there is no such device attached */
+
+ if(scd->access_count)
+ return(-EBUSY);
+
+ SDTpnt = scsi_devicelist;
+ while(SDTpnt != NULL) {
+ if(SDTpnt->detach) (*SDTpnt->detach)(scd);
+ SDTpnt = SDTpnt->next;
+ }
+
+ if(scd->attached == 0) {
+ /*
+ * Nobody is using this device any more.
+ * Free all of the command structures.
+ */
+ for(SCpnt=scd->host->host_queue; SCpnt; SCpnt = SCpnt->next){
+ if(SCpnt->device == scd) {
+ if(SCpnt->prev != NULL)
+ SCpnt->prev->next = SCpnt->next;
+ if(SCpnt->next != NULL)
+ SCpnt->next->prev = SCpnt->prev;
+ if(SCpnt == scd->host->host_queue)
+ scd->host->host_queue = SCpnt->next;
+ scsi_init_free((char *) SCpnt, sizeof(*SCpnt));
+ }
+ }
+ /* Now we can remove the device structure */
+ if(scd_h != NULL) {
+ scd_h->next = scd->next;
+ } else if (scsi_devices == scd) {
+ /* We had a hit on the first entry of the device list */
+ scsi_devices = scd->next;
+ }
+ scsi_init_free((char *) scd, sizeof(Scsi_Device));
+ } else {
+ return(-EBUSY);
+ }
+ return(0);
+ }
+ return(-EINVAL);
+}
+#endif
+
+/*
+ * Go through the device list and recompute the most appropriate size
+ * for the dma pool. Then grab more memory (as required).
+ */
+static void resize_dma_pool(void)
+{
+ int i;
+ unsigned long size;
+ struct Scsi_Host * shpnt;
+ struct Scsi_Host * host = NULL;
+ Scsi_Device * SDpnt;
+ unsigned long flags;
+ FreeSectorBitmap * new_dma_malloc_freelist = NULL;
+ unsigned int new_dma_sectors = 0;
+ unsigned int new_need_isa_buffer = 0;
+ unsigned char ** new_dma_malloc_pages = NULL;
+
+ if( !scsi_devices )
+ {
+ /*
+ * Free up the DMA pool.
+ */
+ if( dma_free_sectors != dma_sectors )
+ panic("SCSI DMA pool memory leak %d %d\n",dma_free_sectors,dma_sectors);
+
+ for(i=0; i < dma_sectors / SECTORS_PER_PAGE; i++)
+ scsi_init_free(dma_malloc_pages[i], PAGE_SIZE);
+ if (dma_malloc_pages)
+ scsi_init_free((char *) dma_malloc_pages,
+ (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages));
+ dma_malloc_pages = NULL;
+ if (dma_malloc_freelist)
+ scsi_init_free((char *) dma_malloc_freelist,
+ (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_freelist));
+ dma_malloc_freelist = NULL;
+ dma_sectors = 0;
+ dma_free_sectors = 0;
+ return;
+ }
+ /* Next, check to see if we need to extend the DMA buffer pool */
+
+ new_dma_sectors = 2*SECTORS_PER_PAGE; /* Base value we use */
+
+ if (high_memory-1 > ISA_DMA_THRESHOLD)
+ scsi_need_isa_bounce_buffers = 1;
+ else
+ scsi_need_isa_bounce_buffers = 0;
+
+ if (scsi_devicelist)
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ new_dma_sectors += SECTORS_PER_PAGE; /* Increment for each host */
+
+ for (SDpnt=scsi_devices; SDpnt; SDpnt = SDpnt->next) {
+ host = SDpnt->host;
+
+ /*
+ * sd and sr drivers allocate scatterlists.
+ * sr drivers may allocate for each command 1x2048 or 2x1024 extra
+ * buffers for 2k sector size and 1k fs.
+ * sg driver allocates buffers < 4k.
+ * st driver does not need buffers from the dma pool.
+ * estimate 4k buffer/command for devices of unknown type (should panic).
+ */
+ if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
+ SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
+ new_dma_sectors += ((host->sg_tablesize *
+ sizeof(struct scatterlist) + 511) >> 9) *
+ SDpnt->queue_depth;
+ if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM)
+ new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth;
+ }
+ else if (SDpnt->type == TYPE_SCANNER ||
+ SDpnt->type == TYPE_PROCESSOR ||
+ SDpnt->type == TYPE_MEDIUM_CHANGER) {
+ new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
+ }
+ else {
+ if (SDpnt->type != TYPE_TAPE) {
+ printk("resize_dma_pool: unknown device type %d\n", SDpnt->type);
+ new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
+ }
+ }
+
+ if(host->unchecked_isa_dma &&
+ scsi_need_isa_bounce_buffers &&
+ SDpnt->type != TYPE_TAPE) {
+ new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
+ SDpnt->queue_depth;
+ new_need_isa_buffer++;
+ }
+ }
+
+#ifdef DEBUG_INIT
+ printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);
+#endif
+
+ /* limit DMA memory to 32MB: */
+ new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
+
+ /*
+ * We never shrink the buffers - this leads to
+ * race conditions that I would rather not even think
+ * about right now.
+ */
+ if( new_dma_sectors < dma_sectors )
+ new_dma_sectors = dma_sectors;
+
+ if (new_dma_sectors)
+ {
+ size = (new_dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
+ new_dma_malloc_freelist = (FreeSectorBitmap *) scsi_init_malloc(size, GFP_ATOMIC);
+ memset(new_dma_malloc_freelist, 0, size);
+
+ size = (new_dma_sectors / SECTORS_PER_PAGE)*sizeof(*new_dma_malloc_pages);
+ new_dma_malloc_pages = (unsigned char **) scsi_init_malloc(size, GFP_ATOMIC);
+ memset(new_dma_malloc_pages, 0, size);
+ }
+
+ /*
+ * If we need more buffers, expand the list.
+ */
+ if( new_dma_sectors > dma_sectors ) {
+ for(i=dma_sectors / SECTORS_PER_PAGE; i< new_dma_sectors / SECTORS_PER_PAGE; i++)
+ new_dma_malloc_pages[i] = (unsigned char *)
+ scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
+ }
+
+ /* When we dick with the actual DMA list, we need to
+ * protect things
+ */
+ save_flags(flags);
+ cli();
+ if (dma_malloc_freelist)
+ {
+ size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
+ memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
+ scsi_init_free((char *) dma_malloc_freelist, size);
+ }
+ dma_malloc_freelist = new_dma_malloc_freelist;
+
+ if (dma_malloc_pages)
+ {
+ size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages);
+ memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
+ scsi_init_free((char *) dma_malloc_pages, size);
+ }
+
+ dma_free_sectors += new_dma_sectors - dma_sectors;
+ dma_malloc_pages = new_dma_malloc_pages;
+ dma_sectors = new_dma_sectors;
+ need_isa_buffer = new_need_isa_buffer;
+ restore_flags(flags);
+
+#ifdef DEBUG_INIT
+ printk("resize_dma_pool: dma free sectors = %d\n", dma_free_sectors);
+ printk("resize_dma_pool: dma sectors = %d\n", dma_sectors);
+ printk("resize_dma_pool: need isa buffers = %d\n", need_isa_buffer);
+#endif
+}
+
+#ifdef CONFIG_MODULES /* a big #ifdef block... */
+
+/*
+ * This entry point should be called by a loadable module if it is trying
+ * add a low level scsi driver to the system.
+ */
+static int scsi_register_host(Scsi_Host_Template * tpnt)
+{
+ int pcount;
+ struct Scsi_Host * shpnt;
+ Scsi_Device * SDpnt;
+ struct Scsi_Device_Template * sdtpnt;
+ const char * name;
+
+ if (tpnt->next || !tpnt->detect) return 1;/* Must be already loaded, or
+ * no detect routine available
+ */
+ pcount = next_scsi_host;
+ if ((tpnt->present = tpnt->detect(tpnt)))
+ {
+ if(pcount == next_scsi_host) {
+ if(tpnt->present > 1) {
+ printk("Failure to register low-level scsi driver");
+ scsi_unregister_host(tpnt);
+ return 1;
+ }
+ /* The low-level driver failed to register a driver. We
+ * can do this now.
+ */
+ scsi_register(tpnt,0);
+ }
+ tpnt->next = scsi_hosts; /* Add to the linked list */
+ scsi_hosts = tpnt;
+
+ /* Add the new driver to /proc/scsi */
+#if CONFIG_PROC_FS
+ build_proc_dir_entries(tpnt);
+#endif
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ if(shpnt->hostt == tpnt)
+ {
+ if(tpnt->info)
+ name = tpnt->info(shpnt);
+ else
+ name = tpnt->name;
+ printk ("scsi%d : %s\n", /* And print a little message */
+ shpnt->host_no, name);
+ }
+
+ printk ("scsi : %d host%s.\n", next_scsi_host,
+ (next_scsi_host == 1) ? "" : "s");
+
+ scsi_make_blocked_list();
+
+ /* The next step is to call scan_scsis here. This generates the
+ * Scsi_Devices entries
+ */
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ if(shpnt->hostt == tpnt) {
+ scan_scsis(shpnt,0,0,0,0);
+ if (shpnt->select_queue_depths != NULL)
+ (shpnt->select_queue_depths)(shpnt, scsi_devices);
+ }
+
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
+
+ /* Next we create the Scsi_Cmnd structures for this host */
+
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ if(SDpnt->host->hostt == tpnt)
+ {
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt);
+ if(SDpnt->attached) scsi_build_commandblocks(SDpnt);
+ }
+
+ /*
+ * Now that we have all of the devices, resize the DMA pool,
+ * as required. */
+ resize_dma_pool();
+
+
+ /* This does any final handling that is required. */
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->finish && sdtpnt->nr_dev)
+ (*sdtpnt->finish)();
+ }
+
+#if defined(USE_STATIC_SCSI_MEMORY)
+ printk ("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
+ (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
+ (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
+ (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
+#endif
+
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Similarly, this entry point should be called by a loadable module if it
+ * is trying to remove a low level scsi driver from the system.
+ */
+static void scsi_unregister_host(Scsi_Host_Template * tpnt)
+{
+ Scsi_Host_Template * SHT, *SHTp;
+ Scsi_Device *sdpnt, * sdppnt, * sdpnt1;
+ Scsi_Cmnd * SCpnt;
+ unsigned long flags;
+ struct Scsi_Device_Template * sdtpnt;
+ struct Scsi_Host * shpnt, *sh1;
+ int pcount;
+
+ /* First verify that this host adapter is completely free with no pending
+ * commands */
+
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next)
+ if(sdpnt->host->hostt == tpnt && sdpnt->host->hostt->usage_count
+ && *sdpnt->host->hostt->usage_count) return;
+
+ for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
+ {
+ if (shpnt->hostt != tpnt) continue;
+ for(SCpnt = shpnt->host_queue; SCpnt; SCpnt = SCpnt->next)
+ {
+ save_flags(flags);
+ cli();
+ if(SCpnt->request.rq_status != RQ_INACTIVE) {
+ restore_flags(flags);
+ for(SCpnt = shpnt->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if(SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
+ SCpnt->request.rq_status = RQ_INACTIVE;
+ printk("Device busy???\n");
+ return;
+ }
+ SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
+ restore_flags(flags);
+ }
+ }
+ /* Next we detach the high level drivers from the Scsi_Device structures */
+
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next)
+ if(sdpnt->host->hostt == tpnt)
+ {
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->detach) (*sdtpnt->detach)(sdpnt);
+ /* If something still attached, punt */
+ if (sdpnt->attached) {
+ printk("Attached usage count = %d\n", sdpnt->attached);
+ return;
+ }
+ }
+
+ /* Next we free up the Scsi_Cmnd structures for this host */
+
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next)
+ if(sdpnt->host->hostt == tpnt)
+ while (sdpnt->host->host_queue) {
+ SCpnt = sdpnt->host->host_queue->next;
+ scsi_init_free((char *) sdpnt->host->host_queue, sizeof(Scsi_Cmnd));
+ sdpnt->host->host_queue = SCpnt;
+ if (SCpnt) SCpnt->prev = NULL;
+ sdpnt->has_cmdblocks = 0;
+ }
+
+ /* Next free up the Scsi_Device structures for this host */
+
+ sdppnt = NULL;
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt1)
+ {
+ sdpnt1 = sdpnt->next;
+ if (sdpnt->host->hostt == tpnt) {
+ if (sdppnt)
+ sdppnt->next = sdpnt->next;
+ else
+ scsi_devices = sdpnt->next;
+ scsi_init_free((char *) sdpnt, sizeof (Scsi_Device));
+ } else
+ sdppnt = sdpnt;
+ }
+
+ /* Next we go through and remove the instances of the individual hosts
+ * that were detected */
+
+ shpnt = scsi_hostlist;
+ while(shpnt) {
+ sh1 = shpnt->next;
+ if(shpnt->hostt == tpnt) {
+ if(shpnt->loaded_as_module) {
+ pcount = next_scsi_host;
+ /* Remove the /proc/scsi directory entry */
+#if CONFIG_PROC_FS
+ proc_scsi_unregister(tpnt->proc_dir,
+ shpnt->host_no + PROC_SCSI_FILE);
+#endif
+ if(tpnt->release)
+ (*tpnt->release)(shpnt);
+ else {
+ /* This is the default case for the release function.
+ * It should do the right thing for most correctly
+ * written host adapters.
+ */
+ if (shpnt->irq) free_irq(shpnt->irq, NULL);
+ if (shpnt->dma_channel != 0xff) free_dma(shpnt->dma_channel);
+ if (shpnt->io_port && shpnt->n_io_port)
+ release_region(shpnt->io_port, shpnt->n_io_port);
+ }
+ if(pcount == next_scsi_host) scsi_unregister(shpnt);
+ tpnt->present--;
+ }
+ }
+ shpnt = sh1;
+ }
+
+ /*
+ * If there are absolutely no more hosts left, it is safe
+ * to completely nuke the DMA pool. The resize operation will
+ * do the right thing and free everything.
+ */
+ if( !scsi_devices )
+ resize_dma_pool();
+
+ printk ("scsi : %d host%s.\n", next_scsi_host,
+ (next_scsi_host == 1) ? "" : "s");
+
+#if defined(USE_STATIC_SCSI_MEMORY)
+ printk ("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
+ (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
+ (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
+ (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
+#endif
+
+ scsi_make_blocked_list();
+
+ /* There were some hosts that were loaded at boot time, so we cannot
+ do any more than this */
+ if (tpnt->present) return;
+
+ /* OK, this is the very last step. Remove this host adapter from the
+ linked list. */
+ for(SHTp=NULL, SHT=scsi_hosts; SHT; SHTp=SHT, SHT=SHT->next)
+ if(SHT == tpnt) {
+ if(SHTp)
+ SHTp->next = SHT->next;
+ else
+ scsi_hosts = SHT->next;
+ SHT->next = NULL;
+ break;
+ }
+
+ /* Rebuild the /proc/scsi directory entries */
+#if CONFIG_PROC_FS
+ proc_scsi_unregister(tpnt->proc_dir, tpnt->proc_dir->low_ino);
+#endif
+ MOD_DEC_USE_COUNT;
+}
+
+/*
+ * This entry point should be called by a loadable module if it is trying
+ * add a high level scsi driver to the system.
+ */
+static int scsi_register_device_module(struct Scsi_Device_Template * tpnt)
+{
+ Scsi_Device * SDpnt;
+
+ if (tpnt->next) return 1;
+
+ scsi_register_device(tpnt);
+ /*
+ * First scan the devices that we know about, and see if we notice them.
+ */
+
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ if(tpnt->detect) SDpnt->attached += (*tpnt->detect)(SDpnt);
+
+ /*
+ * If any of the devices would match this driver, then perform the
+ * init function.
+ */
+ if(tpnt->init && tpnt->dev_noticed)
+ if ((*tpnt->init)()) return 1;
+
+ /*
+ * Now actually connect the devices to the new driver.
+ */
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ {
+ if(tpnt->attach) (*tpnt->attach)(SDpnt);
+ /*
+ * If this driver attached to the device, and we no longer
+ * have anything attached, release the scsi command blocks.
+ */
+ if(SDpnt->attached && SDpnt->has_cmdblocks == 0)
+ scsi_build_commandblocks(SDpnt);
+ }
+
+ /*
+ * This does any final handling that is required.
+ */
+ if(tpnt->finish && tpnt->nr_dev) (*tpnt->finish)();
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int scsi_unregister_device(struct Scsi_Device_Template * tpnt)
+{
+ Scsi_Device * SDpnt;
+ Scsi_Cmnd * SCpnt;
+ struct Scsi_Device_Template * spnt;
+ struct Scsi_Device_Template * prev_spnt;
+
+ /*
+ * If we are busy, this is not going to fly.
+ */
+ if( *tpnt->usage_count != 0) return 0;
+ /*
+ * Next, detach the devices from the driver.
+ */
+
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ {
+ if(tpnt->detach) (*tpnt->detach)(SDpnt);
+ if(SDpnt->attached == 0)
+ {
+ /*
+ * Nobody is using this device any more. Free all of the
+ * command structures.
+ */
+ for(SCpnt = SDpnt->host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ {
+ if(SCpnt->device == SDpnt)
+ {
+ if(SCpnt->prev != NULL)
+ SCpnt->prev->next = SCpnt->next;
+ if(SCpnt->next != NULL)
+ SCpnt->next->prev = SCpnt->prev;
+ if(SCpnt == SDpnt->host->host_queue)
+ SDpnt->host->host_queue = SCpnt->next;
+ scsi_init_free((char *) SCpnt, sizeof(*SCpnt));
+ }
+ }
+ SDpnt->has_cmdblocks = 0;
+ }
+ }
+ /*
+ * Extract the template from the linked list.
+ */
+ spnt = scsi_devicelist;
+ prev_spnt = NULL;
+ while(spnt != tpnt)
+ {
+ prev_spnt = spnt;
+ spnt = spnt->next;
+ }
+ if(prev_spnt == NULL)
+ scsi_devicelist = tpnt->next;
+ else
+ prev_spnt->next = spnt->next;
+
+ MOD_DEC_USE_COUNT;
+ /*
+ * Final cleanup for the driver is done in the driver sources in the
+ * cleanup function.
+ */
+ return 0;
+}
+
+
+int scsi_register_module(int module_type, void * ptr)
+{
+ switch(module_type){
+ case MODULE_SCSI_HA:
+ return scsi_register_host((Scsi_Host_Template *) ptr);
+
+ /* Load upper level device handler of some kind */
+ case MODULE_SCSI_DEV:
+#ifdef CONFIG_KERNELD
+ if (scsi_hosts == NULL)
+ request_module("scsi_hostadapter");
+#endif
+ return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
+ /* The rest of these are not yet implemented */
+
+ /* Load constants.o */
+ case MODULE_SCSI_CONST:
+
+ /* Load specialized ioctl handler for some device. Intended for
+ * cdroms that have non-SCSI2 audio command sets. */
+ case MODULE_SCSI_IOCTL:
+
+ default:
+ return 1;
+ }
+}
+
+void scsi_unregister_module(int module_type, void * ptr)
+{
+ switch(module_type) {
+ case MODULE_SCSI_HA:
+ scsi_unregister_host((Scsi_Host_Template *) ptr);
+ break;
+ case MODULE_SCSI_DEV:
+ scsi_unregister_device((struct Scsi_Device_Template *) ptr);
+ break;
+ /* The rest of these are not yet implemented. */
+ case MODULE_SCSI_CONST:
+ case MODULE_SCSI_IOCTL:
+ break;
+ default:
+ }
+ return;
+}
+
+#endif /* CONFIG_MODULES */
+
+#ifdef DEBUG_TIMEOUT
+static void
+scsi_dump_status(void)
+{
+ int i;
+ struct Scsi_Host * shpnt;
+ Scsi_Cmnd * SCpnt;
+ printk("Dump of scsi parameters:\n");
+ i = 0;
+ for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
+ for(SCpnt=shpnt->host_queue; SCpnt; SCpnt = SCpnt->next)
+ {
+ /* (0) 0:0:0:0 (802 123434 8 8 0) (3 3 2) (%d %d %d) %d %x */
+ printk("(%d) %d:%d:%d:%d (%s %ld %ld %ld %d) (%d %d %x) (%d %d %d) %x %x %x\n",
+ i++, SCpnt->host->host_no,
+ SCpnt->channel,
+ SCpnt->target,
+ SCpnt->lun,
+ kdevname(SCpnt->request.rq_dev),
+ SCpnt->request.sector,
+ SCpnt->request.nr_sectors,
+ SCpnt->request.current_nr_sectors,
+ SCpnt->use_sg,
+ SCpnt->retries,
+ SCpnt->allowed,
+ SCpnt->flags,
+ SCpnt->timeout_per_command,
+ SCpnt->timeout,
+ SCpnt->internal_timeout,
+ SCpnt->cmnd[0],
+ SCpnt->sense_buffer[2],
+ SCpnt->result);
+ }
+ printk("wait_for_request = %p\n", wait_for_request);
+ /* Now dump the request lists for each block device */
+ printk("Dump of pending block device requests\n");
+ for(i=0; i<MAX_BLKDEV; i++)
+ if(blk_dev[i].current_request)
+ {
+ struct request * req;
+ printk("%d: ", i);
+ req = blk_dev[i].current_request;
+ while(req) {
+ printk("(%s %d %ld %ld %ld) ",
+ kdevname(req->rq_dev),
+ req->cmd,
+ req->sector,
+ req->nr_sectors,
+ req->current_nr_sectors);
+ req = req->next;
+ }
+ printk("\n");
+ }
+}
+#endif
+
+#ifdef MODULE
+
+int init_module(void) {
+ unsigned long size;
+
+ /*
+ * This makes /proc/scsi visible.
+ */
+#if CONFIG_PROC_FS
+ dispatch_scsi_info_ptr = dispatch_scsi_info;
+#endif
+
+ timer_table[SCSI_TIMER].fn = scsi_main_timeout;
+ timer_table[SCSI_TIMER].expires = 0;
+ register_symtab(&scsi_symbol_table);
+ scsi_loadable_module_flag = 1;
+
+ /* Register the /proc/scsi/scsi entry */
+#if CONFIG_PROC_FS
+ proc_scsi_register(0, &proc_scsi_scsi);
+#endif
+
+
+ dma_sectors = PAGE_SIZE / SECTOR_SIZE;
+ dma_free_sectors= dma_sectors;
+ /*
+ * Set up a minimal DMA buffer list - this will be used during scan_scsis
+ * in some cases.
+ */
+
+ /* One bit per sector to indicate free/busy */
+ size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
+ dma_malloc_freelist = (unsigned char *) scsi_init_malloc(size, GFP_ATOMIC);
+ memset(dma_malloc_freelist, 0, size);
+
+ /* One pointer per page for the page list */
+ dma_malloc_pages = (unsigned char **)
+ scsi_init_malloc((dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages), GFP_ATOMIC);
+ dma_malloc_pages[0] = (unsigned char *)
+ scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
+ return 0;
+}
+
+void cleanup_module( void)
+{
+#if CONFIG_PROC_FS
+ proc_scsi_unregister(0, PROC_SCSI_SCSI);
+
+ /* No, we're not here anymore. Don't show the /proc/scsi files. */
+ dispatch_scsi_info_ptr = 0L;
+#endif
+
+ /*
+ * Free up the DMA pool.
+ */
+ resize_dma_pool();
+
+ timer_table[SCSI_TIMER].fn = NULL;
+ timer_table[SCSI_TIMER].expires = 0;
+}
+#endif /* MODULE */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/scsi.h b/linux/src/drivers/scsi/scsi.h
new file mode 100644
index 0000000..13052ba
--- /dev/null
+++ b/linux/src/drivers/scsi/scsi.h
@@ -0,0 +1,650 @@
+/*
+ * scsi.h Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ * generic SCSI package header file by
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+
+#ifndef _SCSI_H
+#define _SCSI_H
+
+/*
+ * Some of the public constants are being moved to this file.
+ * We include it here so that what came from where is transparent.
+ */
+#include <scsi/scsi.h>
+
+#include <linux/random.h>
+
+
+/*
+ * Some defs, in case these are not defined elsewhere.
+ */
+#ifndef TRUE
+# define TRUE 1
+#endif
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+
+extern void scsi_make_blocked_list(void);
+extern volatile int in_scan_scsis;
+extern const unsigned char scsi_command_size[8];
+#define COMMAND_SIZE(opcode) scsi_command_size[((opcode) >> 5) & 7]
+#define IDENTIFY_BASE 0x80
+#define IDENTIFY(can_disconnect, lun) (IDENTIFY_BASE |\
+ ((can_disconnect) ? 0x40 : 0) |\
+ ((lun) & 0x07))
+#define MAX_SCSI_DEVICE_CODE 10
+extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
+
+
+
+/*
+ * the return of the status word will be in the following format :
+ * The low byte is the status returned by the SCSI command,
+ * with vendor specific bits masked.
+ *
+ * The next byte is the message which followed the SCSI status.
+ * This allows a stos to be used, since the Intel is a little
+ * endian machine.
+ *
+ * The final byte is a host return code, which is one of the following.
+ *
+ * IE
+ * lsb msb
+ * status msg host code
+ *
+ * Our errors returned by OUR driver, NOT SCSI message. Or'd with
+ * SCSI message passed back to driver <IF any>.
+ */
+
+
+#define DID_OK 0x00 /* NO error */
+#define DID_NO_CONNECT 0x01 /* Couldn't connect before timeout period */
+#define DID_BUS_BUSY 0x02 /* BUS stayed busy through time out period */
+#define DID_TIME_OUT 0x03 /* TIMED OUT for other reason */
+#define DID_BAD_TARGET 0x04 /* BAD target. */
+#define DID_ABORT 0x05 /* Told to abort for some other reason */
+#define DID_PARITY 0x06 /* Parity error */
+#define DID_ERROR 0x07 /* Internal error */
+#define DID_RESET 0x08 /* Reset by somebody. */
+#define DID_BAD_INTR 0x09 /* Got an interrupt we weren't expecting. */
+#define DRIVER_OK 0x00 /* Driver status */
+
+/*
+ * These indicate the error that occurred, and what is available.
+ */
+
+#define DRIVER_BUSY 0x01
+#define DRIVER_SOFT 0x02
+#define DRIVER_MEDIA 0x03
+#define DRIVER_ERROR 0x04
+
+#define DRIVER_INVALID 0x05
+#define DRIVER_TIMEOUT 0x06
+#define DRIVER_HARD 0x07
+#define DRIVER_SENSE 0x08
+
+#define SUGGEST_RETRY 0x10
+#define SUGGEST_ABORT 0x20
+#define SUGGEST_REMAP 0x30
+#define SUGGEST_DIE 0x40
+#define SUGGEST_SENSE 0x80
+#define SUGGEST_IS_OK 0xff
+
+#define DRIVER_MASK 0x0f
+#define SUGGEST_MASK 0xf0
+
+#define MAX_COMMAND_SIZE 12
+
+/*
+ * SCSI command sets
+ */
+
+#define SCSI_UNKNOWN 0
+#define SCSI_1 1
+#define SCSI_1_CCS 2
+#define SCSI_2 3
+
+/*
+ * Every SCSI command starts with a one byte OP-code.
+ * The next byte's high three bits are the LUN of the
+ * device. Any multi-byte quantities are stored high byte
+ * first, and may have a 5 bit MSB in the same byte
+ * as the LUN.
+ */
+
+/*
+ * Manufacturers list
+ */
+
+#define SCSI_MAN_UNKNOWN 0
+#define SCSI_MAN_NEC 1
+#define SCSI_MAN_TOSHIBA 2
+#define SCSI_MAN_NEC_OLDCDR 3
+#define SCSI_MAN_SONY 4
+#define SCSI_MAN_PIONEER 5
+
+/*
+ * As the scsi do command functions are intelligent, and may need to
+ * redo a command, we need to keep track of the last command
+ * executed on each one.
+ */
+
+#define WAS_RESET 0x01
+#define WAS_TIMEDOUT 0x02
+#define WAS_SENSE 0x04
+#define IS_RESETTING 0x08
+#define IS_ABORTING 0x10
+#define ASKED_FOR_SENSE 0x20
+
+/*
+ * The scsi_device struct contains what we know about each given scsi
+ * device.
+ */
+
+typedef struct scsi_device {
+ struct scsi_device * next; /* Used for linked list */
+
+ unsigned char id, lun, channel;
+
+ unsigned int manufacturer; /* Manufacturer of device, for using
+ * vendor-specific cmd's */
+ int attached; /* # of high level drivers attached to
+ * this */
+ int access_count; /* Count of open channels/mounts */
+ struct wait_queue * device_wait;/* Used to wait if device is busy */
+ struct Scsi_Host * host;
+ void (*scsi_request_fn)(void); /* Used to jumpstart things after an
+ * ioctl */
+ struct scsi_cmnd *device_queue; /* queue of SCSI Command structures */
+ void *hostdata; /* available to low-level driver */
+ char type;
+ char scsi_level;
+ char vendor[8], model[16], rev[4];
+ unsigned char current_tag; /* current tag */
+ unsigned char sync_min_period; /* Not less than this period */
+ unsigned char sync_max_offset; /* Not greater than this offset */
+ unsigned char queue_depth; /* How deep a queue to use */
+
+ unsigned writeable:1;
+ unsigned removable:1;
+ unsigned random:1;
+ unsigned has_cmdblocks:1;
+ unsigned changed:1; /* Data invalid due to media change */
+ unsigned busy:1; /* Used to prevent races */
+ unsigned lockable:1; /* Able to prevent media removal */
+ unsigned borken:1; /* Tell the Seagate driver to be
+ * painfully slow on this device */
+ unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
+ unsigned tagged_queue:1; /* SCSI-II tagged queuing enabled */
+ unsigned disconnect:1; /* can disconnect */
+ unsigned soft_reset:1; /* Uses soft reset option */
+ unsigned sync:1; /* Negotiate for sync transfers */
+ unsigned single_lun:1; /* Indicates we should only allow I/O to
+ * one of the luns for the device at a
+ * time. */
+ unsigned was_reset:1; /* There was a bus reset on the bus for
+ * this device */
+ unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
+ * because we did a bus reset. */
+} Scsi_Device;
+
+/*
+ * Use these to separate status msg and our bytes
+ */
+
+#define status_byte(result) (((result) >> 1) & 0x1f)
+#define msg_byte(result) (((result) >> 8) & 0xff)
+#define host_byte(result) (((result) >> 16) & 0xff)
+#define driver_byte(result) (((result) >> 24) & 0xff)
+#define suggestion(result) (driver_byte(result) & SUGGEST_MASK)
+
+#define sense_class(sense) (((sense) >> 4) & 0x7)
+#define sense_error(sense) ((sense) & 0xf)
+#define sense_valid(sense) ((sense) & 0x80);
+
+/*
+ * These are the SCSI devices available on the system.
+ */
+
+extern Scsi_Device * scsi_devices;
+
+extern struct hd_struct * sd;
+
+#if defined(MAJOR_NR) && (MAJOR_NR == SCSI_DISK_MAJOR)
+extern struct hd_struct * sd;
+#endif
+
+/*
+ * Initializes all SCSI devices. This scans all scsi busses.
+ */
+
+extern int scsi_dev_init (void);
+
+struct scatterlist {
+ char * address; /* Location data is to be transferred to */
+ char * alt_address; /* Location of actual if address is a
+ * dma indirect buffer. NULL otherwise */
+ unsigned int length;
+};
+
+#ifdef __alpha__
+# define ISA_DMA_THRESHOLD (~0UL)
+#else
+# define ISA_DMA_THRESHOLD (0x00ffffff)
+#endif
+#define CONTIGUOUS_BUFFERS(X,Y) ((X->b_data+X->b_size) == Y->b_data)
+
+
+/*
+ * These are the return codes for the abort and reset functions. The mid-level
+ * code uses these to decide what to do next. Each of the low level abort
+ * and reset functions must correctly indicate what it has done.
+ * The descriptions are written from the point of view of the mid-level code,
+ * so that the return code is telling the mid-level drivers exactly what
+ * the low level driver has already done, and what remains to be done.
+ */
+
+/* We did not do anything.
+ * Wait some more for this command to complete, and if this does not work,
+ * try something more serious. */
+#define SCSI_ABORT_SNOOZE 0
+
+/* This means that we were able to abort the command. We have already
+ * called the mid-level done function, and do not expect an interrupt that
+ * will lead to another call to the mid-level done function for this command */
+#define SCSI_ABORT_SUCCESS 1
+
+/* We called for an abort of this command, and we should get an interrupt
+ * when this succeeds. Thus we should not restore the timer for this
+ * command in the mid-level abort function. */
+#define SCSI_ABORT_PENDING 2
+
+/* Unable to abort - command is currently on the bus. Grin and bear it. */
+#define SCSI_ABORT_BUSY 3
+
+/* The command is not active in the low level code. Command probably
+ * finished. */
+#define SCSI_ABORT_NOT_RUNNING 4
+
+/* Something went wrong. The low level driver will indicate the correct
+ * error condition when it calls scsi_done, so the mid-level abort function
+ * can simply wait until this comes through */
+#define SCSI_ABORT_ERROR 5
+
+/* We do not know how to reset the bus, or we do not want to. Bummer.
+ * Anyway, just wait a little more for the command in question, and hope that
+ * it eventually finishes. If it never finishes, the SCSI device could
+ * hang, so use this with caution. */
+#define SCSI_RESET_SNOOZE 0
+
+/* We do not know how to reset the bus, or we do not want to. Bummer.
+ * We have given up on this ever completing. The mid-level code will
+ * request sense information to decide how to proceed from here. */
+#define SCSI_RESET_PUNT 1
+
+/* This means that we were able to reset the bus. We have restarted all of
+ * the commands that should be restarted, and we should be able to continue
+ * on normally from here. We do not expect any interrupts that will return
+ * DID_RESET to any of the other commands in the host_queue, and the mid-level
+ * code does not need to do anything special to keep the commands alive.
+ * If a hard reset was performed then all outstanding commands on the
+ * bus have been restarted. */
+#define SCSI_RESET_SUCCESS 2
+
+/* We called for a reset of this bus, and we should get an interrupt
+ * when this succeeds. Each command should get its own status
+ * passed up to scsi_done, but this has not happened yet.
+ * If a hard reset was performed, then we expect an interrupt
+ * for *each* of the outstanding commands that will have the
+ * effect of restarting the commands.
+ */
+#define SCSI_RESET_PENDING 3
+
+/* We did a reset, but do not expect an interrupt to signal DID_RESET.
+ * This tells the upper level code to request the sense info, and this
+ * should keep the command alive. */
+#define SCSI_RESET_WAKEUP 4
+
+/* The command is not active in the low level code. Command probably
+ finished. */
+#define SCSI_RESET_NOT_RUNNING 5
+
+/* Something went wrong, and we do not know how to fix it. */
+#define SCSI_RESET_ERROR 6
+
+#define SCSI_RESET_SYNCHRONOUS 0x01
+#define SCSI_RESET_ASYNCHRONOUS 0x02
+#define SCSI_RESET_SUGGEST_BUS_RESET 0x04
+#define SCSI_RESET_SUGGEST_HOST_RESET 0x08
+/*
+ * This is a bitmask that is ored with one of the above codes.
+ * It tells the mid-level code that we did a hard reset.
+ */
+#define SCSI_RESET_BUS_RESET 0x100
+/*
+ * This is a bitmask that is ored with one of the above codes.
+ * It tells the mid-level code that we did a host adapter reset.
+ */
+#define SCSI_RESET_HOST_RESET 0x200
+/*
+ * Used to mask off bits and to obtain the basic action that was
+ * performed.
+ */
+#define SCSI_RESET_ACTION 0xff
+
+void * scsi_malloc(unsigned int);
+int scsi_free(void *, unsigned int);
+extern unsigned int dma_free_sectors; /* How much room do we have left */
+extern unsigned int need_isa_buffer; /* True if some devices need indirection
+ * buffers */
+
+/*
+ * The Scsi_Cmnd structure is used by scsi.c internally, and for communication
+ * with low level drivers that support multiple outstanding commands.
+ */
+typedef struct scsi_pointer {
+ char * ptr; /* data pointer */
+ int this_residual; /* left in this buffer */
+ struct scatterlist *buffer; /* which buffer */
+ int buffers_residual; /* how many buffers left */
+
+ volatile int Status;
+ volatile int Message;
+ volatile int have_data_in;
+ volatile int sent_command;
+ volatile int phase;
+} Scsi_Pointer;
+
+typedef struct scsi_cmnd {
+ struct Scsi_Host * host;
+ Scsi_Device * device;
+ unsigned char target, lun, channel;
+ unsigned char cmd_len;
+ unsigned char old_cmd_len;
+ struct scsi_cmnd *next, *prev, *device_next, *reset_chain;
+
+ /* These elements define the operation we are about to perform */
+ unsigned char cmnd[12];
+ unsigned request_bufflen; /* Actual request size */
+
+ void * request_buffer; /* Actual requested buffer */
+
+ /* These elements define the operation we ultimately want to perform */
+ unsigned char data_cmnd[12];
+ unsigned short old_use_sg; /* We save use_sg here when requesting
+ * sense info */
+ unsigned short use_sg; /* Number of pieces of scatter-gather */
+ unsigned short sglist_len; /* size of malloc'd scatter-gather list */
+ unsigned short abort_reason;/* If the mid-level code requests an
+ * abort, this is the reason. */
+ unsigned bufflen; /* Size of data buffer */
+ void *buffer; /* Data buffer */
+
+ unsigned underflow; /* Return error if less than this amount is
+ * transfered */
+
+ unsigned transfersize; /* How much we are guaranteed to transfer with
+ * each SCSI transfer (ie, between disconnect /
+ * reconnects. Probably == sector size */
+
+
+ struct request request; /* A copy of the command we are working on */
+
+ unsigned char sense_buffer[16]; /* Sense for this command, if needed */
+
+ /*
+ A SCSI Command is assigned a nonzero serial_number when internal_cmnd
+ passes it to the driver's queue command function. The serial_number
+ is cleared when scsi_done is entered indicating that the command has
+ been completed. If a timeout occurs, the serial number at the moment
+ of timeout is copied into serial_number_at_timeout. By subsequently
+ comparing the serial_number and serial_number_at_timeout fields
+ during abort or reset processing, we can detect whether the command
+ has already completed. This also detects cases where the command has
+ completed and the SCSI Command structure has already being reused
+ for another command, so that we can avoid incorrectly aborting or
+ resetting the new command.
+ */
+
+ unsigned long serial_number;
+ unsigned long serial_number_at_timeout;
+
+ int retries;
+ int allowed;
+ int timeout_per_command, timeout_total, timeout;
+
+ /*
+ * We handle the timeout differently if it happens when a reset,
+ * abort, etc are in process.
+ */
+ unsigned volatile char internal_timeout;
+
+ unsigned flags;
+
+ /* These variables are for the cdrom only. Once we have variable size
+ * buffers in the buffer cache, they will go away. */
+ int this_count;
+ /* End of special cdrom variables */
+
+ /* Low-level done function - can be used by low-level driver to point
+ * to completion function. Not used by mid/upper level code. */
+ void (*scsi_done)(struct scsi_cmnd *);
+ void (*done)(struct scsi_cmnd *); /* Mid-level done function */
+
+ /*
+ * The following fields can be written to by the host specific code.
+ * Everything else should be left alone.
+ */
+
+ Scsi_Pointer SCp; /* Scratchpad used by some host adapters */
+
+ unsigned char * host_scribble; /* The host adapter is allowed to
+ * call scsi_malloc and get some memory
+ * and hang it here. The host adapter
+ * is also expected to call scsi_free
+ * to release this memory. (The memory
+ * obtained by scsi_malloc is guaranteed
+ * to be at an address < 16Mb). */
+
+ int result; /* Status code from lower level driver */
+
+ unsigned char tag; /* SCSI-II queued command tag */
+ unsigned long pid; /* Process ID, starts at 0 */
+} Scsi_Cmnd;
+
+/*
+ * scsi_abort aborts the current command that is executing on host host.
+ * The error code, if non zero is returned in the host byte, otherwise
+ * DID_ABORT is returned in the hostbyte.
+ */
+
+extern int scsi_abort (Scsi_Cmnd *, int code);
+
+extern void scsi_do_cmd (Scsi_Cmnd *, const void *cmnd ,
+ void *buffer, unsigned bufflen,
+ void (*done)(struct scsi_cmnd *),
+ int timeout, int retries);
+
+
+extern Scsi_Cmnd * allocate_device(struct request **, Scsi_Device *, int);
+
+extern Scsi_Cmnd * request_queueable(struct request *, Scsi_Device *);
+extern int scsi_reset (Scsi_Cmnd *, unsigned int);
+
+extern int max_scsi_hosts;
+
+extern void proc_print_scsidevice(Scsi_Device *, char *, int *, int);
+
+extern void print_command(unsigned char *);
+extern void print_sense(const char *, Scsi_Cmnd *);
+extern void print_driverbyte(int scsiresult);
+extern void print_hostbyte(int scsiresult);
+
+extern void scsi_mark_host_reset(struct Scsi_Host *Host);
+extern void scsi_mark_bus_reset(struct Scsi_Host *Host, int channel);
+
+#if defined(MAJOR_NR) && (MAJOR_NR != SCSI_TAPE_MAJOR)
+#include "hosts.h"
+
+static Scsi_Cmnd * end_scsi_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
+{
+ struct request * req;
+ struct buffer_head * bh;
+
+ req = &SCpnt->request;
+ req->errors = 0;
+ if (!uptodate) {
+#if defined(MAJOR_NR) && (MAJOR_NR == SCSI_DISK_MAJOR)
+ printk(DEVICE_NAME " I/O error: dev %s, sector %lu, absolute sector %lu\n",
+ kdevname(req->rq_dev), req->sector,
+ req->sector + sd[MINOR(SCpnt->request.rq_dev)].start_sect);
+#else
+ printk(DEVICE_NAME " I/O error: dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+#endif
+ }
+
+ do {
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ req->nr_sectors -= bh->b_size >> 9;
+ req->sector += bh->b_size >> 9;
+ bh->b_reqnext = NULL;
+ /*
+ * This is our 'MD IO has finished' event handler.
+ * note that b_state should be cached in a register
+ * anyways, so the overhead if this checking is almost
+ * zero. But anyways .. we never get OO for free :)
+ */
+ if (test_bit(BH_MD, &bh->b_state)) {
+ struct md_personality * pers=(struct md_personality *)bh->personality;
+ pers->end_request(bh,uptodate);
+ }
+ /*
+ * the normal (nonmirrored and no RAID5) case:
+ */
+ else {
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+ }
+ sectors -= bh->b_size >> 9;
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_scsi_request: buffer-list destroyed\n");
+ }
+ }
+ }
+ } while(sectors && bh);
+ if (req->bh){
+ req->buffer = bh->b_data;
+ return SCpnt;
+ }
+ DEVICE_OFF(req->rq_dev);
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+ add_blkdev_randomness(MAJOR(req->rq_dev));
+
+ if (SCpnt->host->block) {
+ struct Scsi_Host * next;
+
+ for (next = SCpnt->host->block; next != SCpnt->host;
+ next = next->block)
+ wake_up(&next->host_wait);
+ }
+
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+ wake_up(&SCpnt->device->device_wait);
+ return NULL;
+}
+
+
+/* This is just like INIT_REQUEST, but we need to be aware of the fact
+ * that an interrupt may start another request, so we run this with interrupts
+ * turned off
+ */
+#define INIT_SCSI_REQUEST \
+ if (!CURRENT) { \
+ CLEAR_INTR; \
+ restore_flags(flags); \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed");\
+ if (CURRENT->bh) { \
+ if (!buffer_locked(CURRENT->bh)) \
+ panic(DEVICE_NAME ": block not locked"); \
+ }
+#endif
+
+#ifdef MACH
+#define SCSI_SLEEP(QUEUE, CONDITION) { \
+ if (CONDITION) { \
+ struct wait_queue wait = { NULL, NULL}; \
+ add_wait_queue(QUEUE, &wait); \
+ for(;;) { \
+ if (CONDITION) { \
+ if (intr_count) \
+ panic("scsi: trying to call schedule() in interrupt" \
+ ", file %s, line %d.\n", __FILE__, __LINE__); \
+ schedule(); \
+ } \
+ else \
+ break; \
+ } \
+ remove_wait_queue(QUEUE, &wait);\
+ }; }
+#else /* !MACH */
+#define SCSI_SLEEP(QUEUE, CONDITION) { \
+ if (CONDITION) { \
+ struct wait_queue wait = { current, NULL}; \
+ add_wait_queue(QUEUE, &wait); \
+ for(;;) { \
+ current->state = TASK_UNINTERRUPTIBLE; \
+ if (CONDITION) { \
+ if (intr_count) \
+ panic("scsi: trying to call schedule() in interrupt" \
+ ", file %s, line %d.\n", __FILE__, __LINE__); \
+ schedule(); \
+ } \
+ else \
+ break; \
+ } \
+ remove_wait_queue(QUEUE, &wait);\
+ current->state = TASK_RUNNING; \
+ }; }
+#endif /* !MACH */
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/scsi_ioctl.c b/linux/src/drivers/scsi/scsi_ioctl.c
new file mode 100644
index 0000000..7691859
--- /dev/null
+++ b/linux/src/drivers/scsi/scsi_ioctl.c
@@ -0,0 +1,452 @@
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/page.h>
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include <scsi/scsi_ioctl.h>
+
+#define NORMAL_RETRIES 5
+#define NORMAL_TIMEOUT (10 * HZ)
+#define FORMAT_UNIT_TIMEOUT (2 * 60 * 60 * HZ)
+#define START_STOP_TIMEOUT (60 * HZ)
+#define MOVE_MEDIUM_TIMEOUT (5 * 60 * HZ)
+#define READ_ELEMENT_STATUS_TIMEOUT (5 * 60 * HZ)
+
+#define MAX_BUF PAGE_SIZE
+
+#define max(a,b) (((a) > (b)) ? (a) : (b))
+
+/*
+ * If we are told to probe a host, we will return 0 if the host is not
+ * present, 1 if the host is present, and will return an identifying
+ * string at *arg, if arg is non null, filling to the length stored at
+ * (int *) arg
+ */
+
+static int ioctl_probe(struct Scsi_Host * host, void *buffer)
+{
+ int temp, result;
+ unsigned int len,slen;
+ const char * string;
+
+ if ((temp = host->hostt->present) && buffer) {
+ result = verify_area(VERIFY_READ, buffer, sizeof(long));
+ if (result) return result;
+
+ len = get_user ((unsigned int *) buffer);
+ if(host->hostt->info)
+ string = host->hostt->info(host);
+ else
+ string = host->hostt->name;
+ if(string) {
+ slen = strlen(string);
+ if (len > slen)
+ len = slen + 1;
+ result = verify_area(VERIFY_WRITE, buffer, len);
+ if (result) return result;
+
+ memcpy_tofs (buffer, string, len);
+ }
+ }
+ return temp;
+}
+
+/*
+ *
+ * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host.
+ * The NORMAL_TIMEOUT and NORMAL_RETRIES variables are used.
+ *
+ * dev is the SCSI device struct ptr, *(int *) arg is the length of the
+ * input data, if any, not including the command string & counts,
+ * *((int *)arg + 1) is the output buffer size in bytes.
+ *
+ * *(char *) ((int *) arg)[2] the actual command byte.
+ *
+ * Note that if more than MAX_BUF bytes are requested to be transfered,
+ * the ioctl will fail with error EINVAL. MAX_BUF can be increased in
+ * the future by increasing the size that scsi_malloc will accept.
+ *
+ * This size *does not* include the initial lengths that were passed.
+ *
+ * The SCSI command is read from the memory location immediately after the
+ * length words, and the input data is right after the command. The SCSI
+ * routines know the command size based on the opcode decode.
+ *
+ * The output area is then filled in starting from the command byte.
+ */
+
+static void scsi_ioctl_done (Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+static int ioctl_internal_command(Scsi_Device *dev, char * cmd,
+ int timeout, int retries)
+{
+ int result;
+ Scsi_Cmnd * SCpnt;
+
+ SCpnt = allocate_device(NULL, dev, 1);
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd(SCpnt, cmd, NULL, 0, scsi_ioctl_done, timeout, retries);
+ down(&sem);
+ }
+
+ if(driver_byte(SCpnt->result) != 0)
+ switch(SCpnt->sense_buffer[2] & 0xf) {
+ case ILLEGAL_REQUEST:
+ if(cmd[0] == ALLOW_MEDIUM_REMOVAL) dev->lockable = 0;
+ else printk("SCSI device (ioctl) reports ILLEGAL REQUEST.\n");
+ break;
+ case NOT_READY: /* This happens if there is no disc in drive */
+ if(dev->removable){
+ printk(KERN_INFO "Device not ready. Make sure there is a disc in the drive.\n");
+ break;
+ };
+ case UNIT_ATTENTION:
+ if (dev->removable){
+ dev->changed = 1;
+ SCpnt->result = 0; /* This is no longer considered an error */
+ printk(KERN_INFO "Disc change detected.\n");
+ break;
+ };
+ default: /* Fall through for non-removable media */
+ printk("SCSI error: host %d id %d lun %d return code = %x\n",
+ dev->host->host_no,
+ dev->id,
+ dev->lun,
+ SCpnt->result);
+ printk("\tSense class %x, sense error %x, extended sense %x\n",
+ sense_class(SCpnt->sense_buffer[0]),
+ sense_error(SCpnt->sense_buffer[0]),
+ SCpnt->sense_buffer[2] & 0xf);
+
+ };
+
+ result = SCpnt->result;
+ SCpnt->request.rq_status = RQ_INACTIVE;
+
+ if (!SCpnt->device->was_reset && SCpnt->device->scsi_request_fn)
+ (*SCpnt->device->scsi_request_fn)();
+
+ wake_up(&SCpnt->device->device_wait);
+ return result;
+}
+
+/*
+ * This interface is depreciated - users should use the scsi generics
+ * interface instead, as this is a more flexible approach to performing
+ * generic SCSI commands on a device.
+ */
+int scsi_ioctl_send_command(Scsi_Device *dev, void *buffer)
+{
+ char * buf;
+ unsigned char cmd[12];
+ char * cmd_in;
+ Scsi_Cmnd * SCpnt;
+ unsigned char opcode;
+ int inlen, outlen, cmdlen;
+ int needed, buf_needed;
+ int timeout, retries, result;
+
+ if (!buffer)
+ return -EINVAL;
+
+
+ /*
+ * Verify that we can read at least this much.
+ */
+ result = verify_area(VERIFY_READ, buffer, 2*sizeof(long) + 1);
+ if (result) return result;
+
+ /*
+ * The structure that we are passed should look like:
+ *
+ * struct sdata {
+ * unsigned int inlen;
+ * unsigned int outlen;
+ * unsigned char cmd[]; # However many bytes are used for cmd.
+ * unsigned char data[];
+ * };
+ */
+ inlen = get_user((unsigned int *) buffer);
+ outlen = get_user( ((unsigned int *) buffer) + 1);
+
+ /*
+ * We do not transfer more than MAX_BUF with this interface.
+ * If the user needs to transfer more data than this, they
+ * should use scsi_generics instead.
+ */
+ if( inlen > MAX_BUF ) return -EINVAL;
+ if( outlen > MAX_BUF ) return -EINVAL;
+
+ cmd_in = (char *) ( ((int *)buffer) + 2);
+ opcode = get_user(cmd_in);
+
+ needed = buf_needed = (inlen > outlen ? inlen : outlen);
+ if(buf_needed){
+ buf_needed = (buf_needed + 511) & ~511;
+ if (buf_needed > MAX_BUF) buf_needed = MAX_BUF;
+ buf = (char *) scsi_malloc(buf_needed);
+ if (!buf) return -ENOMEM;
+ memset(buf, 0, buf_needed);
+ } else
+ buf = NULL;
+
+ /*
+ * Obtain the command from the user's address space.
+ */
+ cmdlen = COMMAND_SIZE(opcode);
+
+ result = verify_area(VERIFY_READ, cmd_in,
+ cmdlen + inlen > MAX_BUF ? MAX_BUF : inlen);
+ if (result) return result;
+
+ memcpy_fromfs ((void *) cmd, cmd_in, cmdlen);
+
+ /*
+ * Obtain the data to be sent to the device (if any).
+ */
+ memcpy_fromfs ((void *) buf,
+ (void *) (cmd_in + cmdlen),
+ inlen);
+
+ /*
+ * Set the lun field to the correct value.
+ */
+ cmd[1] = ( cmd[1] & 0x1f ) | (dev->lun << 5);
+
+ switch (opcode)
+ {
+ case FORMAT_UNIT:
+ timeout = FORMAT_UNIT_TIMEOUT;
+ retries = 1;
+ break;
+ case START_STOP:
+ timeout = START_STOP_TIMEOUT;
+ retries = NORMAL_RETRIES;
+ break;
+ case MOVE_MEDIUM:
+ timeout = MOVE_MEDIUM_TIMEOUT;
+ retries = NORMAL_RETRIES;
+ break;
+ case READ_ELEMENT_STATUS:
+ timeout = READ_ELEMENT_STATUS_TIMEOUT;
+ retries = NORMAL_RETRIES;
+ break;
+ default:
+ timeout = NORMAL_TIMEOUT;
+ retries = NORMAL_RETRIES;
+ break;
+ }
+
+#ifndef DEBUG_NO_CMD
+
+ SCpnt = allocate_device(NULL, dev, 1);
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd(SCpnt, cmd, buf, needed, scsi_ioctl_done,
+ timeout, retries);
+ down(&sem);
+ }
+
+ /*
+ * If there was an error condition, pass the info back to the user.
+ */
+ if(SCpnt->result) {
+ result = verify_area(VERIFY_WRITE,
+ cmd_in,
+ sizeof(SCpnt->sense_buffer));
+ if (result) return result;
+ memcpy_tofs((void *) cmd_in,
+ SCpnt->sense_buffer,
+ sizeof(SCpnt->sense_buffer));
+ } else {
+ result = verify_area(VERIFY_WRITE, cmd_in, outlen);
+ if (result) return result;
+ memcpy_tofs ((void *) cmd_in, buf, outlen);
+ }
+ result = SCpnt->result;
+
+ SCpnt->request.rq_status = RQ_INACTIVE;
+
+ if (buf) scsi_free(buf, buf_needed);
+
+ if(SCpnt->device->scsi_request_fn)
+ (*SCpnt->device->scsi_request_fn)();
+
+ wake_up(&SCpnt->device->device_wait);
+ return result;
+#else
+ {
+ int i;
+ printk("scsi_ioctl : device %d. command = ", dev->id);
+ for (i = 0; i < 12; ++i)
+ printk("%02x ", cmd[i]);
+ printk("\nbuffer =");
+ for (i = 0; i < 20; ++i)
+ printk("%02x ", buf[i]);
+ printk("\n");
+ printk("inlen = %d, outlen = %d, cmdlen = %d\n",
+ inlen, outlen, cmdlen);
+ printk("buffer = %d, cmd_in = %d\n", buffer, cmd_in);
+ }
+ return 0;
+#endif
+}
+
+/*
+ * the scsi_ioctl() function differs from most ioctls in that it does
+ * not take a major/minor number as the dev field. Rather, it takes
+ * a pointer to a scsi_devices[] element, a structure.
+ */
+int scsi_ioctl (Scsi_Device *dev, int cmd, void *arg)
+{
+ int result;
+ char scsi_cmd[12];
+
+ /* No idea how this happens.... */
+ if (!dev) return -ENXIO;
+
+ switch (cmd) {
+ case SCSI_IOCTL_GET_IDLUN:
+ result = verify_area(VERIFY_WRITE, (void *) arg, 2*sizeof(long));
+ if (result) return result;
+
+ put_user(dev->id
+ + (dev->lun << 8)
+ + (dev->channel << 16)
+ + ((dev->host->hostt->proc_dir->low_ino & 0xff) << 24),
+ (unsigned long *) arg);
+ put_user( dev->host->unique_id, (unsigned long *) arg+1);
+ return 0;
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ result = verify_area(VERIFY_WRITE, (void *) arg, sizeof(int));
+ if (result) return result;
+ put_user( dev->host->host_no, (int *) arg);
+ return 0;
+ case SCSI_IOCTL_TAGGED_ENABLE:
+ if(!suser()) return -EACCES;
+ if(!dev->tagged_supported) return -EINVAL;
+ dev->tagged_queue = 1;
+ dev->current_tag = 1;
+ return 0;
+ case SCSI_IOCTL_TAGGED_DISABLE:
+ if(!suser()) return -EACCES;
+ if(!dev->tagged_supported) return -EINVAL;
+ dev->tagged_queue = 0;
+ dev->current_tag = 0;
+ return 0;
+ case SCSI_IOCTL_PROBE_HOST:
+ return ioctl_probe(dev->host, arg);
+ case SCSI_IOCTL_SEND_COMMAND:
+ if(!suser() || securelevel > 0) return -EACCES;
+ return scsi_ioctl_send_command((Scsi_Device *) dev, arg);
+ case SCSI_IOCTL_DOORLOCK:
+ if (!dev->removable || !dev->lockable) return 0;
+ scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = SCSI_REMOVAL_PREVENT;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
+ NORMAL_TIMEOUT, NORMAL_RETRIES);
+ break;
+ case SCSI_IOCTL_DOORUNLOCK:
+ if (!dev->removable || !dev->lockable) return 0;
+ scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = SCSI_REMOVAL_ALLOW;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
+ NORMAL_TIMEOUT, NORMAL_RETRIES);
+ case SCSI_IOCTL_TEST_UNIT_READY:
+ scsi_cmd[0] = TEST_UNIT_READY;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = 0;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
+ NORMAL_TIMEOUT, NORMAL_RETRIES);
+ break;
+ case SCSI_IOCTL_START_UNIT:
+ scsi_cmd[0] = START_STOP;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = 1;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
+ START_STOP_TIMEOUT, NORMAL_RETRIES);
+ break;
+ case SCSI_IOCTL_STOP_UNIT:
+ scsi_cmd[0] = START_STOP;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = 0;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
+ START_STOP_TIMEOUT, NORMAL_RETRIES);
+ break;
+ default :
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Just like scsi_ioctl, only callable from kernel space with no
+ * fs segment fiddling.
+ */
+
+int kernel_scsi_ioctl (Scsi_Device *dev, int cmd, void *arg) {
+ unsigned long oldfs;
+ int tmp;
+ oldfs = get_fs();
+ set_fs(get_ds());
+ tmp = scsi_ioctl (dev, cmd, arg);
+ set_fs(oldfs);
+ return tmp;
+}
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/scsi_proc.c b/linux/src/drivers/scsi/scsi_proc.c
new file mode 100644
index 0000000..d1fa28d
--- /dev/null
+++ b/linux/src/drivers/scsi/scsi_proc.c
@@ -0,0 +1,302 @@
+/*
+ * linux/drivers/scsi/scsi_proc.c
+ *
+ * The functions in this file provide an interface between
+ * the PROC file system and the SCSI device drivers
+ * It is mainly used for debugging, statistics and to pass
+ * information directly to the lowlevel driver.
+ *
+ * (c) 1995 Michael Neuffer neuffer@goofy.zdv.uni-mainz.de
+ * Version: 0.99.8 last change: 95/09/13
+ *
+ * generic command parser provided by:
+ * Andreas Heilwagen <crashcar@informatik.uni-koblenz.de>
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/malloc.h>
+#include <linux/proc_fs.h>
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#ifndef TRUE
+#define TRUE 1
+#define FALSE 0
+#endif
+
+extern int scsi_proc_info(char *, char **, off_t, int, int, int);
+
+struct scsi_dir {
+ struct proc_dir_entry entry;
+ char name[4];
+};
+
+
+/* generic_proc_info
+ * Used if the driver currently has no own support for /proc/scsi
+ */
+int generic_proc_info(char *buffer, char **start, off_t offset,
+ int length, int inode, int inout)
+{
+ int len, pos, begin;
+
+ if(inout == TRUE)
+ return(-ENOSYS); /* This is a no-op */
+
+ begin = 0;
+ pos = len = sprintf(buffer,
+ "The driver does not yet support the proc-fs\n");
+ if(pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin);
+ if(len > length)
+ len = length;
+
+ return(len);
+}
+
+/* dispatch_scsi_info is the central dispatcher
+ * It is the interface between the proc-fs and the SCSI subsystem code
+ */
+extern int dispatch_scsi_info(int ino, char *buffer, char **start,
+ off_t offset, int length, int func)
+{
+ struct Scsi_Host *hpnt = scsi_hostlist;
+
+ if(ino == PROC_SCSI_SCSI) {
+ /*
+ * This is for the scsi core, rather than any specific
+ * lowlevel driver.
+ */
+ return(scsi_proc_info(buffer, start, offset, length, 0, func));
+ }
+
+ while(hpnt) {
+ if (ino == (hpnt->host_no + PROC_SCSI_FILE)) {
+ if(hpnt->hostt->proc_info == NULL)
+ return generic_proc_info(buffer, start, offset, length,
+ hpnt->host_no, func);
+ else
+ return(hpnt->hostt->proc_info(buffer, start, offset,
+ length, hpnt->host_no, func));
+ }
+ hpnt = hpnt->next;
+ }
+ return(-EBADF);
+}
+
+void build_proc_dir_entries(Scsi_Host_Template *tpnt)
+{
+ struct Scsi_Host *hpnt;
+
+ struct scsi_dir *scsi_hba_dir;
+
+ proc_scsi_register(0, tpnt->proc_dir);
+
+ hpnt = scsi_hostlist;
+ while (hpnt) {
+ if (tpnt == hpnt->hostt) {
+ scsi_hba_dir = scsi_init_malloc(sizeof(struct scsi_dir), GFP_KERNEL);
+ if(scsi_hba_dir == NULL)
+ panic("Not enough memory to register SCSI HBA in /proc/scsi !\n");
+ memset(scsi_hba_dir, 0, sizeof(struct scsi_dir));
+ scsi_hba_dir->entry.low_ino = PROC_SCSI_FILE + hpnt->host_no;
+ scsi_hba_dir->entry.namelen = sprintf(scsi_hba_dir->name,"%d",
+ hpnt->host_no);
+ scsi_hba_dir->entry.name = scsi_hba_dir->name;
+ scsi_hba_dir->entry.mode = S_IFREG | S_IRUGO | S_IWUSR;
+ proc_scsi_register(tpnt->proc_dir, &scsi_hba_dir->entry);
+ }
+ hpnt = hpnt->next;
+ }
+}
+
+/*
+ * parseHandle *parseInit(char *buf, char *cmdList, int cmdNum);
+ * gets a pointer to a null terminated data buffer
+ * and a list of commands with blanks as delimiter
+ * in between.
+ * The commands have to be alphanumerically sorted.
+ * cmdNum has to contain the number of commands.
+ * On success, a pointer to a handle structure
+ * is returned, NULL on failure
+ *
+ * int parseOpt(parseHandle *handle, char **param);
+ * processes the next parameter. On success, the
+ * index of the appropriate command in the cmdList
+ * is returned, starting with zero.
+ * param points to the null terminated parameter string.
+ * On failure, -1 is returned.
+ *
+ * The databuffer buf may only contain pairs of commands
+ * options, separated by blanks:
+ * <Command> <Parameter> [<Command> <Parameter>]*
+ */
+
+typedef struct
+{
+ char *buf, /* command buffer */
+ *cmdList, /* command list */
+ *bufPos, /* actual position */
+ **cmdPos, /* cmdList index */
+ cmdNum; /* cmd number */
+} parseHandle;
+
+
+inline int parseFree (parseHandle *handle) /* free memory */
+{
+ kfree (handle->cmdPos);
+ kfree (handle);
+
+ return(-1);
+}
+
+
+parseHandle *parseInit(char *buf, char *cmdList, int cmdNum)
+{
+ char *ptr; /* temp pointer */
+ parseHandle *handle; /* new handle */
+
+ if (!buf || !cmdList) /* bad input ? */
+ return(NULL);
+ if ((handle = (parseHandle*) kmalloc(sizeof(parseHandle), GFP_KERNEL)) == 0)
+ return(NULL); /* out of memory */
+ if ((handle->cmdPos = (char**) kmalloc(sizeof(int) * cmdNum, GFP_KERNEL)) == 0) {
+ kfree(handle);
+ return(NULL); /* out of memory */
+ }
+
+ handle->buf = handle->bufPos = buf; /* init handle */
+ handle->cmdList = cmdList;
+ handle->cmdNum = cmdNum;
+
+ handle->cmdPos[cmdNum = 0] = cmdList;
+ for (ptr = cmdList; *ptr; ptr++) { /* scan command string */
+ if(*ptr == ' ') { /* and insert zeroes */
+ *ptr++ = 0;
+ handle->cmdPos[++cmdNum] = ptr++;
+ }
+ }
+ return(handle);
+}
+
+
+int parseOpt(parseHandle *handle, char **param)
+{
+ int cmdIndex = 0,
+ cmdLen = 0;
+ char *startPos;
+
+ if (!handle) /* invalid handle */
+ return(parseFree(handle));
+ /* skip spaces */
+ for (; *(handle->bufPos) && *(handle->bufPos) == ' '; handle->bufPos++);
+ if (!*(handle->bufPos))
+ return(parseFree(handle)); /* end of data */
+
+ startPos = handle->bufPos; /* store cmd start */
+ for (; handle->cmdPos[cmdIndex][cmdLen] && *(handle->bufPos); handle->bufPos++)
+ { /* no string end? */
+ for (;;)
+ {
+ if (*(handle->bufPos) == handle->cmdPos[cmdIndex][cmdLen])
+ break; /* char matches ? */
+ else
+ if (memcmp(startPos, (char*)(handle->cmdPos[++cmdIndex]), cmdLen))
+ return(parseFree(handle)); /* unknown command */
+
+ if (cmdIndex >= handle->cmdNum)
+ return(parseFree(handle)); /* unknown command */
+ }
+
+ cmdLen++; /* next char */
+ }
+
+ /* Get param. First skip all blanks, then insert zero after param */
+
+ for (; *(handle->bufPos) && *(handle->bufPos) == ' '; handle->bufPos++);
+ *param = handle->bufPos;
+
+ for (; *(handle->bufPos) && *(handle->bufPos) != ' '; handle->bufPos++);
+ *(handle->bufPos++) = 0;
+
+ return(cmdIndex);
+}
+
+void proc_print_scsidevice(Scsi_Device *scd, char *buffer, int *size, int len)
+{
+ int x, y = *size;
+
+ y = sprintf(buffer + len,
+ "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ",
+ scd->host->host_no, scd->channel, scd->id, scd->lun);
+ for (x = 0; x < 8; x++) {
+ if (scd->vendor[x] >= 0x20)
+ y += sprintf(buffer + len + y, "%c", scd->vendor[x]);
+ else
+ y += sprintf(buffer + len + y," ");
+ }
+ y += sprintf(buffer + len + y, " Model: ");
+ for (x = 0; x < 16; x++) {
+ if (scd->model[x] >= 0x20)
+ y += sprintf(buffer + len + y, "%c", scd->model[x]);
+ else
+ y += sprintf(buffer + len + y, " ");
+ }
+ y += sprintf(buffer + len + y, " Rev: ");
+ for (x = 0; x < 4; x++) {
+ if (scd->rev[x] >= 0x20)
+ y += sprintf(buffer + len + y, "%c", scd->rev[x]);
+ else
+ y += sprintf(buffer + len + y, " ");
+ }
+ y += sprintf(buffer + len + y, "\n");
+
+ y += sprintf(buffer + len + y, " Type: %s ",
+ scd->type < MAX_SCSI_DEVICE_CODE ?
+ scsi_device_types[(int)scd->type] : "Unknown " );
+ y += sprintf(buffer + len + y, " ANSI"
+ " SCSI revision: %02x", (scd->scsi_level < 3)?1:2);
+ if (scd->scsi_level == 2)
+ y += sprintf(buffer + len + y, " CCS\n");
+ else
+ y += sprintf(buffer + len + y, "\n");
+
+ *size = y;
+ return;
+}
+
+/*
+ * Overrides for Emacs so that we get a uniform tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/scsicam.c b/linux/src/drivers/scsi/scsicam.c
new file mode 100644
index 0000000..c3fb879
--- /dev/null
+++ b/linux/src/drivers/scsi/scsicam.c
@@ -0,0 +1,229 @@
+/*
+ * scsicam.c - SCSI CAM support functions, use for HDIO_GETGEO, etc.
+ *
+ * Copyright 1993, 1994 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@Colorado.EDU
+ * +1 (303) 786-7975
+ *
+ * For more information, please consult the SCSI-CAM draft.
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/blk.h>
+#include <asm/unaligned.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <scsi/scsicam.h>
+
+static int setsize(unsigned long capacity,unsigned int *cyls,unsigned int *hds,
+ unsigned int *secs);
+
+/*
+ * Function : int scsicam_bios_param (Disk *disk, int dev, int *ip)
+ *
+ * Purpose : to determine the BIOS mapping used for a drive in a
+ * SCSI-CAM system, storing the results in ip as required
+ * by the HDIO_GETGEO ioctl().
+ *
+ * Returns : -1 on failure, 0 on success.
+ *
+ */
+
+int scsicam_bios_param (Disk *disk, /* SCSI disk */
+ kdev_t dev, /* Device major, minor */
+ int *ip /* Heads, sectors, cylinders in that order */) {
+
+ struct buffer_head *bh;
+ int ret_code;
+ int size = disk->capacity;
+ unsigned long temp_cyl;
+
+ if (!(bh = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, 1024)))
+ return -1;
+
+ /* try to infer mapping from partition table */
+ ret_code = scsi_partsize (bh, (unsigned long) size, (unsigned int *) ip + 2,
+ (unsigned int *) ip + 0, (unsigned int *) ip + 1);
+ brelse (bh);
+
+ if (ret_code == -1) {
+ /* pick some standard mapping with at most 1024 cylinders,
+ and at most 62 sectors per track - this works up to
+ 7905 MB */
+ ret_code = setsize ((unsigned long) size, (unsigned int *) ip + 2,
+ (unsigned int *) ip + 0, (unsigned int *) ip + 1);
+ }
+
+ /* if something went wrong, then apparently we have to return
+ a geometry with more than 1024 cylinders */
+ if (ret_code || ip[0] > 255 || ip[1] > 63) {
+ ip[0] = 64;
+ ip[1] = 32;
+ temp_cyl = size / (ip[0] * ip[1]);
+ if (temp_cyl > 65534) {
+ ip[0] = 255;
+ ip[1] = 63;
+ }
+ ip[2] = size / (ip[0] * ip[1]);
+ }
+
+ return 0;
+}
+
+/*
+ * Function : static int scsi_partsize(struct buffer_head *bh, unsigned long
+ * capacity,unsigned int *cyls, unsigned int *hds, unsigned int *secs);
+ *
+ * Purpose : to determine the BIOS mapping used to create the partition
+ * table, storing the results in *cyls, *hds, and *secs
+ *
+ * Returns : -1 on failure, 0 on success.
+ *
+ */
+
+int scsi_partsize(struct buffer_head *bh, unsigned long capacity,
+ unsigned int *cyls, unsigned int *hds, unsigned int *secs) {
+ struct partition *p, *largest = NULL;
+ int i, largest_cyl;
+ int cyl, ext_cyl, end_head, end_cyl, end_sector;
+ unsigned int logical_end, physical_end, ext_physical_end;
+
+
+ if (*(unsigned short *) (bh->b_data+510) == 0xAA55) {
+ for (largest_cyl = -1, p = (struct partition *)
+ (0x1BE + bh->b_data), i = 0; i < 4; ++i, ++p) {
+ if (!p->sys_ind)
+ continue;
+#ifdef DEBUG
+ printk ("scsicam_bios_param : partition %d has system \n",
+ i);
+#endif
+ cyl = p->cyl + ((p->sector & 0xc0) << 2);
+ if (cyl > largest_cyl) {
+ largest_cyl = cyl;
+ largest = p;
+ }
+ }
+ }
+
+ if (largest) {
+ end_cyl = largest->end_cyl + ((largest->end_sector & 0xc0) << 2);
+ end_head = largest->end_head;
+ end_sector = largest->end_sector & 0x3f;
+
+ if( end_head + 1 == 0 || end_sector == 0 ) return -1;
+
+#ifdef DEBUG
+ printk ("scsicam_bios_param : end at h = %d, c = %d, s = %d\n",
+ end_head, end_cyl, end_sector);
+#endif
+
+ physical_end = end_cyl * (end_head + 1) * end_sector +
+ end_head * end_sector + end_sector;
+
+ /* This is the actual _sector_ number at the end */
+ logical_end = get_unaligned(&largest->start_sect)
+ + get_unaligned(&largest->nr_sects);
+
+ /* This is for >1023 cylinders */
+ ext_cyl= (logical_end-(end_head * end_sector + end_sector))
+ /(end_head + 1) / end_sector;
+ ext_physical_end = ext_cyl * (end_head + 1) * end_sector +
+ end_head * end_sector + end_sector;
+
+#ifdef DEBUG
+ printk("scsicam_bios_param : logical_end=%d physical_end=%d ext_physical_end=%d ext_cyl=%d\n"
+ ,logical_end,physical_end,ext_physical_end,ext_cyl);
+#endif
+
+ if ((logical_end == physical_end) ||
+ (end_cyl==1023 && ext_physical_end==logical_end)) {
+ *secs = end_sector;
+ *hds = end_head + 1;
+ *cyls = capacity / ((end_head + 1) * end_sector);
+ return 0;
+ }
+
+#ifdef DEBUG
+ printk ("scsicam_bios_param : logical (%u) != physical (%u)\n",
+ logical_end, physical_end);
+#endif
+ }
+ return -1;
+}
+
+/*
+ * Function : static int setsize(unsigned long capacity,unsigned int *cyls,
+ * unsigned int *hds, unsigned int *secs);
+ *
+ * Purpose : to determine a near-optimal int 0x13 mapping for a
+ * SCSI disk in terms of lost space of size capacity, storing
+ * the results in *cyls, *hds, and *secs.
+ *
+ * Returns : -1 on failure, 0 on success.
+ *
+ * Extracted from
+ *
+ * WORKING X3T9.2
+ * DRAFT 792D
+ *
+ *
+ * Revision 6
+ * 10-MAR-94
+ * Information technology -
+ * SCSI-2 Common access method
+ * transport and SCSI interface module
+ *
+ * ANNEX A :
+ *
+ * setsize() converts a read capacity value to int 13h
+ * head-cylinder-sector requirements. It minimizes the value for
+ * number of heads and maximizes the number of cylinders. This
+ * will support rather large disks before the number of heads
+ * will not fit in 4 bits (or 6 bits). This algorithm also
+ * minimizes the number of sectors that will be unused at the end
+ * of the disk while allowing for very large disks to be
+ * accommodated. This algorithm does not use physical geometry.
+ */
+
+static int setsize(unsigned long capacity,unsigned int *cyls,unsigned int *hds,
+ unsigned int *secs) {
+ unsigned int rv = 0;
+ unsigned long heads, sectors, cylinders, temp;
+
+ cylinders = 1024L; /* Set number of cylinders to max */
+ sectors = 62L; /* Maximize sectors per track */
+
+ temp = cylinders * sectors; /* Compute divisor for heads */
+ heads = capacity / temp; /* Compute value for number of heads */
+ if (capacity % temp) { /* If no remainder, done! */
+ heads++; /* Else, increment number of heads */
+ temp = cylinders * heads; /* Compute divisor for sectors */
+ sectors = capacity / temp; /* Compute value for sectors per
+ track */
+ if (capacity % temp) { /* If no remainder, done! */
+ sectors++; /* Else, increment number of sectors */
+ temp = heads * sectors; /* Compute divisor for cylinders */
+ cylinders = capacity / temp;/* Compute number of cylinders */
+ }
+ }
+ if (cylinders == 0) rv=(unsigned)-1;/* Give error if 0 cylinders */
+
+ *cyls = (unsigned int) cylinders; /* Stuff return values */
+ *secs = (unsigned int) sectors;
+ *hds = (unsigned int) heads;
+ return(rv);
+}
diff --git a/linux/src/drivers/scsi/scsiio.c b/linux/src/drivers/scsi/scsiio.c
new file mode 100644
index 0000000..cea68b8
--- /dev/null
+++ b/linux/src/drivers/scsi/scsiio.c
@@ -0,0 +1,1537 @@
+/***********************************************************************
+ * FILE NAME : SCSIIO.C *
+ * BY : C.L. Huang *
+ * Description: Device Driver for Tekram DC-390W/U/F (T) PCI SCSI *
+ * Bus Master Host Adapter *
+ ***********************************************************************/
+
+
+static void
+PrepareSG( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ ULONG retAddr,wlval;
+ USHORT wval,i;
+ PSGL psgl;
+ PSGE psge;
+
+
+ retAddr = pACB->jmp_table8;
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ retAddr += jmp_table16;
+ wval = (USHORT)(pSRB->SGcount);
+ wval <<= 4; /* 16 bytes per entry, datain=8, dataout=8 */
+ /* (4 bytes for count, 4 bytes for addr) */
+ retAddr -= (ULONG)wval;
+ pSRB->ReturnAddr = retAddr; /* return address for SCRIPT */
+ if(wval)
+ {
+ wval >>= 1;
+ wlval = (ULONG) pSRB->SegmentPad;
+ wlval -= (ULONG)wval;
+ wval >>= 3;
+ psge = (PSGE) wlval;
+ psgl = pSRB->pSegmentList;
+ for(i=0; i<wval; i++)
+ {
+#ifndef VERSION_ELF_1_2_13
+ psge->SGXPtr = virt_to_phys( psgl->address );
+#else
+ psge->SGXPtr = (ULONG) psgl->address;
+#endif
+ psge->SGXLen = psgl->length;
+ psge++;
+ psgl++;
+ }
+ }
+}
+
+
+static void
+DC390W_StartSCSI( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ USHORT ioport;
+ UCHAR bval;
+
+ pSRB->TagNumber = 31;
+ ioport = pACB->IOPortBase;
+ bval = SIGNAL_PROC;
+ outb(bval,ioport+ISTAT);
+ pACB->pActiveDCB = pDCB;
+ pDCB->pActiveSRB = pSRB;
+ return;
+}
+
+
+#ifndef VERSION_ELF_1_2_13
+static void
+DC390W_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
+#else
+static void
+DC390W_Interrupt( int irq, struct pt_regs *regs)
+#endif
+{
+ PACB pACB;
+ PDCB pDCB;
+ ULONG wlval;
+ USHORT ioport = 0;
+ USHORT wval, i;
+ void (*stateV)( PACB );
+ UCHAR istat = 0;
+ UCHAR bval;
+
+ pACB = pACB_start;
+ if( pACB == NULL )
+ return;
+ for( i=0; i < adapterCnt; i++ )
+ {
+ if( pACB->IRQLevel == (UCHAR) irq )
+ {
+ ioport = pACB->IOPortBase;
+ istat = inb( ioport+ISTAT );
+ if( istat & (ABORT_OP+SCSI_INT_PENDING+DMA_INT_PENDING) )
+ break;
+ else
+ pACB = pACB->pNextACB;
+ }
+ else
+ {
+ pACB = pACB->pNextACB;
+ }
+ }
+
+ if( pACB == (PACB )-1 )
+ {
+ printk("DC390W_intr: Spurious interrupt detected!\n");
+ return;
+ }
+
+
+#ifdef DC390W_DEBUG1
+ printk("Istate=%2x,",istat);
+#endif
+ /* if Abort operation occurred, reset abort bit before reading DMA status
+ to prevent further aborted interrupt. */
+
+ if(istat & ABORT_OP)
+ {
+ istat &= ~ABORT_OP;
+ outb(istat,ioport+ISTAT);
+ }
+
+ pDCB = pACB->pActiveDCB;
+ bval = inb(ioport+CTEST2); /* Clear Signal Bit */
+
+ /* If Scsi Interrupt, then clear Interrupt Status by reading
+ Scsi interrupt status register 0. */
+
+ wlval = 0;
+ if(istat & SCSI_INT_PENDING)
+ {
+ wlval = (ULONG)inw( ioport+SIST0 );
+ wlval <<= 8;
+ }
+
+ /* If DMA Interrupt, then read the DMA status register to see what happen */
+
+ if(istat & DMA_INT_PENDING)
+ {
+ bval = inb(ioport+DSTAT);
+ wlval |= (ULONG) bval;
+ }
+
+#ifdef DC390W_DEBUG1
+ printk("IDstate=%8x,",(UINT) wlval);
+#endif
+ if(wlval & ( (SEL_TIMEOUT << 16)+
+ ((SCSI_GERROR+UNEXPECT_DISC+SCSI_RESET) << 8)+
+ ILLEGAL_INSTRUC+ABORT_) )
+ {
+ ExceptionHandler( wlval, pACB, pDCB );
+ }
+ else if( wlval & SCRIPTS_INT )
+ {
+ wval = inw( ioport+DSPS );
+ stateV = (void *) IntVector[wval];
+ stateV( pACB );
+ }
+ else if( wlval & ( PARITY_ERROR << 8) )
+ ParityError( pACB, pDCB );
+ else if( wlval & ( PHASE_MISMATCH << 8) )
+ PhaseMismatch( pACB );
+ return;
+}
+
+
+static void
+ExceptionHandler(ULONG wlval, PACB pACB, PDCB pDCB)
+{
+ PSRB pSRB;
+ UCHAR bval;
+ USHORT ioport;
+
+/* disconnect/scsi reset/illegal instruction */
+
+ ioport = pACB->IOPortBase;
+ if(wlval & ( (SCSI_RESET+SCSI_GERROR) << 8) )
+ DC390W_ScsiRstDetect( pACB );
+ else if(wlval & ABORT_)
+ {
+#ifdef DC390W_DEBUG0
+ printk("AboRst,");
+#endif
+ if( !InitialTime )
+ DC390W_ResetSCSIBus2( pACB );
+ }
+ else if(wlval & (SEL_TIMEOUT << 16) )
+ {
+ pACB->status = SCSI_STAT_SEL_TIMEOUT;
+#ifdef DC390W_DEBUG1
+ printk("Selto,");
+#endif
+ DC390W_CmdCompleted( pACB );
+ }
+ else if(wlval & (UNEXPECT_DISC << 8) )
+ {
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+ pSRB = pDCB->pActiveSRB;
+ if( pSRB->SRBState & DO_SYNC_NEGO )
+ {
+ pDCB->DevMode &= ~SYNC_NEGO_;
+ pACB->status = SCSI_STAT_CHECKCOND;
+ DC390W_CmdCompleted( pACB );
+ }
+ else if( pSRB->SRBState & DO_WIDE_NEGO )
+ {
+ pDCB->DevMode &= ~WIDE_NEGO_;
+ pACB->status = SCSI_STAT_CHECKCOND;
+ DC390W_CmdCompleted( pACB );
+ }
+ else
+ {
+ pACB->status = SCSI_STAT_UNEXP_BUS_F;
+ DC390W_CmdCompleted( pACB );
+ }
+#ifdef DC390W_DEBUG0
+ printk("Uxpbf,");
+#endif
+ }
+ else
+ {
+#ifdef DC390W_DEBUG0
+ printk("Except,");
+#endif
+ DC390W_ResetSCSIBus( pACB );
+ }
+}
+
+
+static void
+ParityError( PACB pACB, PDCB pDCB )
+{
+ ULONG ioport;
+ UCHAR bval,msg;
+ ULONG wlval;
+ PSRB pSRB;
+
+ ioport = pACB->IOPortBase;
+ bval = inb(ioport+SCRATCHA);
+ if(bval & RE_SELECTED_)
+ {
+#ifdef DC390W_DEBUG0
+ printk("ParityErr,");
+#endif
+ DC390W_ResetSCSIBus( pACB );
+ return;
+ }
+ else
+ {
+ pSRB = pDCB->pActiveSRB;
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+
+ bval = inb(ioport+DCMD);
+ bval &= 0x07; /* get phase bits */
+ if(bval == 0x07) /* message in phase */
+ {
+ msg = MSG_PARITY_ERROR;
+ wlval = pACB->jmp_clear_ack;
+ }
+ else
+ {
+ msg = MSG_INITIATOR_ERROR;
+ wlval = pACB->jmp_next;
+ }
+ pSRB->__msgout0[0] = 1;
+ pSRB->MsgOutBuf[0] = msg;
+ outl(wlval,(ioport+DSP));
+ return;
+ }
+}
+
+
+static void
+DC390W_Signal( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ USHORT ioport;
+ ULONG wlval, flags;
+ UCHAR bval,msgcnt,tagnum;
+
+ save_flags(flags);
+ cli();
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+#ifdef DC390W_DEBUG0
+ printk("Signal,Cmd=%2x", pSRB->CmdBlock[0]);
+#endif
+ wlval = pSRB->PhysSRB;
+ outl(wlval,(ioport+DSA));
+ wlval = pSRB->ReturnAddr;
+ outl(wlval,(ioport+TEMP));
+ msgcnt = 1;
+ bval = pDCB->IdentifyMsg;
+ pSRB->MsgOutBuf[0] = bval;
+ if( (pSRB->CmdBlock[0] != INQUIRY) &&
+ (pSRB->CmdBlock[0] != REQUEST_SENSE) )
+ {
+ if(pDCB->MaxCommand > 1)
+ {
+ wlval = 1;
+ tagnum = 0;
+ while( wlval & pDCB->TagMask )
+ {
+ wlval = wlval << 1;
+ tagnum++;
+ }
+ pDCB->TagMask |= wlval;
+ pSRB->TagNumber = tagnum;
+ pSRB->MsgOutBuf[1] = MSG_SIMPLE_QTAG;
+ pSRB->MsgOutBuf[2] = tagnum;
+ msgcnt = 3;
+ }
+ }
+ else
+ {
+ pSRB->MsgOutBuf[0] &= 0xBF; /* Diable Disconnected */
+ if(pSRB->CmdBlock[0] == INQUIRY)
+ {
+ if(bval & 0x07)
+ goto type_6_3;
+ }
+ if(pDCB->DevMode & WIDE_NEGO_)
+ {
+ msgcnt = 5;
+ *((PULONG) &(pSRB->MsgOutBuf[1])) = 0x01030201;
+ pSRB->SRBState |= DO_WIDE_NEGO;
+ }
+ else if(pDCB->DevMode & SYNC_NEGO_)
+ {
+ msgcnt = 6;
+ *((PULONG) &(pSRB->MsgOutBuf[1])) = 0x00010301;
+ pSRB->MsgOutBuf[4] = pDCB->NegoPeriod;
+ pSRB->MsgOutBuf[5] = SYNC_NEGO_OFFSET;
+ pSRB->SRBState |= DO_SYNC_NEGO;
+ }
+ }
+type_6_3:
+ pSRB->__msgout0[0] = (ULONG) msgcnt;
+ wlval = 0;
+ outl(wlval,(ioport+SCRATCHA));
+ bval = pDCB->DCBscntl0;
+ outb(bval,ioport+SCNTL0);
+ pSRB->__select = *((PULONG) &(pDCB->DCBselect));
+#ifdef DC390W_DEBUG0
+ printk("__sel=%8x,", (UINT)(pSRB->__select));
+#endif
+ wlval = pACB->jmp_select;
+ outl(wlval,(ioport+DSP));
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+DC390W_MessageWide( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ PUCHAR msgoutPtr;
+ USHORT ioport;
+ ULONG wlval;
+ UCHAR bval,msgcnt;
+
+
+#ifdef DC390W_DEBUG0
+ printk("MsgWide,");
+#endif
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ msgcnt = 0;
+ pDCB->DCBscntl3 &= ~EN_WIDE_SCSI;
+ msgoutPtr = pSRB->MsgOutBuf;
+ if( pSRB->SRBState & DO_WIDE_NEGO )
+ {
+ pSRB->SRBState &= ~DO_WIDE_NEGO;
+ if( pACB->msgin123[0] == 3 )
+ {
+ bval = pACB->msgin123[1];
+ if(bval == 1)
+ {
+ pDCB->DCBscntl3 |= EN_WIDE_SCSI;
+ goto x5;
+ }
+ if(bval < 1)
+ goto x5;
+ }
+ }
+
+/*type_11_1:*/
+ msgcnt = 1;
+ *msgoutPtr = MSG_REJECT_;
+ msgoutPtr++;
+x5:
+ bval = pDCB->DCBscntl3;
+ outb(bval,ioport+SCNTL3);
+ AdjustTemp(pACB,pDCB,pSRB);
+ SetXferRate(pACB,pDCB);
+ if( pDCB->DevMode & SYNC_NEGO_ )
+ {
+ *((PULONG)msgoutPtr) = 0x00010301;
+ *(msgoutPtr + 3) = pDCB->NegoPeriod;
+ *(msgoutPtr + 4) = SYNC_NEGO_OFFSET;
+ msgcnt += 5;
+ pSRB->SRBState |= DO_SYNC_NEGO;
+ }
+
+ pSRB->__msgout0[0] = (ULONG) msgcnt;
+ wlval = pACB->jmp_clear_ack;
+ if(msgcnt)
+ wlval = pACB->jmp_set_atn;
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+DC390W_MessageSync( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ USHORT ioport;
+ ULONG wlval;
+ USHORT wval,wval1;
+ UCHAR bval,bval1;
+
+#ifdef DC390W_DEBUG0
+ printk("MsgSync,");
+#endif
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ if( !(pSRB->SRBState & DO_SYNC_NEGO) )
+ goto MessageExtnd;
+ pSRB->SRBState &= ~DO_SYNC_NEGO;
+ if(pACB->msgin123[0] != 1)
+ {
+MessageExtnd:
+ pSRB->__msgout0[0] = 1;
+ pSRB->MsgOutBuf[0] = MSG_REJECT_;
+ wlval = pACB->jmp_set_atn;
+ outl(wlval,(ioport+DSP));
+ return;
+ }
+ bval = pACB->msgin123[2]; /* offset */
+asyncx:
+ pDCB->DCBsxfer = bval;
+ if(bval == 0) /* if offset or period == 0, async */
+ {
+ if( pACB->AdaptType == DC390W )
+ bval = SYNC_CLK_F2+ASYNC_CLK_F2;
+ else
+ bval = SYNC_CLK_F4+ASYNC_CLK_F4;
+ pDCB->DCBscntl3 = bval;
+ }
+ else
+ {
+ bval = pACB->msgin123[1];
+ if(bval == 0)
+ goto asyncx;
+ pDCB->SyncPeriod = bval;
+ wval = (USHORT)bval;
+ wval <<= 3;
+ bval = pDCB->DCBscntl3;
+ bval &= 0x0f;
+ if(wval < 200) /* < 100 ns ==> Fast-20 */
+ {
+ bval |= 0x90; /* Fast-20 and div 1 */
+ bval1 = 25; /* 12.5 ns */
+ }
+ else if(wval < 400)
+ {
+ bval |= 0x30; /* 1 cycle = 25ns */
+ bval1 = 50;
+ }
+ else /* Non Fast */
+ {
+ bval |= 0x50; /* 1 cycle = 50ns */
+ bval1 = 100;
+ }
+ if( pACB->AdaptType == DC390W )
+ bval -= 0x20; /* turn down to 40Mhz scsi clock */
+ /* assume 390W will not receive fast-20 */
+ wval1 = wval;
+ wval /= bval1;
+ if(wval * bval1 < wval1)
+ wval++;
+ /* XFERP TP2 TP1 TP0 */
+ wval -= 4; /* 4 0 0 0 */
+ /* 5 0 0 1 */
+ wval <<= 5;
+ pDCB->DCBsxfer |= (UCHAR)wval;
+ pDCB->DCBscntl3 = bval;
+ }
+/*sync_2:*/
+ SetXferRate( pACB,pDCB );
+ wlval = pACB->jmp_clear_ack;
+/*sync_3:*/
+ bval = pDCB->DCBscntl3;
+ outb(bval,ioport+SCNTL3);
+ bval = pDCB->DCBsxfer;
+ outb(bval,ioport+SXFER);
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+DC390W_MsgReject( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ ULONG wlval;
+ USHORT ioport;
+ UCHAR bval;
+
+#ifdef DC390W_DEBUG0
+ printk("Msgrjt,");
+#endif
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ wlval = pACB->jmp_clear_ack;
+ if(pSRB->SRBState & DO_WIDE_NEGO)
+ {
+ pSRB->SRBState &= ~DO_WIDE_NEGO;
+ pDCB->DCBscntl3 &= ~EN_WIDE_SCSI;
+ AdjustTemp( pACB, pDCB, pSRB );
+ SetXferRate( pACB, pDCB );
+ if( pDCB->DevMode & SYNC_NEGO_ )
+ {
+ *((PULONG) &(pSRB->MsgOutBuf[0])) = 0x00010301;
+ pSRB->MsgOutBuf[3] = pDCB->NegoPeriod;
+ pSRB->MsgOutBuf[4] = SYNC_NEGO_OFFSET;
+ pSRB->__msgout0[0] = 5;
+ pSRB->SRBState |= DO_SYNC_NEGO;
+ wlval = pACB->jmp_set_atn;
+ }
+ }
+ else
+ {
+ if(pSRB->SRBState & DO_SYNC_NEGO)
+ {
+ pSRB->SRBState &= ~DO_SYNC_NEGO;
+ pDCB->DCBsxfer = 0; /* reject sync msg, set aync */
+ if( pACB->AdaptType == DC390W )
+ bval = SYNC_CLK_F2+ASYNC_CLK_F2;
+ else
+ bval = SYNC_CLK_F4+ASYNC_CLK_F4;
+ pDCB->DCBscntl3 = bval;
+ SetXferRate(pACB,pDCB);
+ wlval = pACB->jmp_clear_ack;
+ }
+ }
+ ioport = pACB->IOPortBase;
+ bval = pDCB->DCBscntl3;
+ outb(bval,ioport+SCNTL3);
+ bval = pDCB->DCBsxfer;
+ outb(bval,ioport+SXFER);
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+AdjustTemp( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ USHORT ioport;
+ ULONG wlval;
+
+ wlval = pSRB->ReturnAddr;
+ if(wlval <= pACB->jmp_table8)
+ {
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ wlval += jmp_table16;
+ }
+ else
+ {
+ if((pDCB->DCBscntl3 & EN_WIDE_SCSI) == 0)
+ wlval -= jmp_table16;
+ }
+ pSRB->ReturnAddr = wlval;
+ ioport = pACB->IOPortBase;
+ outl(wlval,(ioport+TEMP));
+ return;
+}
+
+
+static void
+SetXferRate( PACB pACB, PDCB pDCB )
+{
+ UCHAR bval;
+ USHORT cnt, i;
+ PDCB ptr;
+
+ if( !(pDCB->IdentifyMsg & 0x07) )
+ {
+ if( pACB->scan_devices )
+ {
+ CurrDCBscntl3 = pDCB->DCBscntl3;
+ }
+ else
+ {
+ ptr = pACB->pLinkDCB;
+ cnt = pACB->DeviceCnt;
+ bval = pDCB->UnitSCSIID;
+ for(i=0; i<cnt; i++)
+ {
+ if( ptr->UnitSCSIID == bval )
+ {
+ ptr->DCBsxfer = pDCB->DCBsxfer;
+ ptr->DCBscntl3 = pDCB->DCBscntl3;
+ }
+ ptr = ptr->pNextDCB;
+ }
+ }
+ }
+ return;
+}
+
+
+static void
+DC390W_UnknownMsg( PACB pACB )
+{
+ PSRB pSRB;
+ ULONG wlval;
+ USHORT ioport;
+
+ pSRB = pACB->pActiveDCB->pActiveSRB;
+ pSRB->__msgout0[0] = 1;
+ pSRB->MsgOutBuf[0] = MSG_REJECT_;
+ wlval = pACB->jmp_set_atn;
+ ioport = pACB->IOPortBase;
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+DC390W_MessageExtnd( PACB pACB )
+{
+ DC390W_UnknownMsg( pACB );
+}
+
+
+static void
+DC390W_Disconnected( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ ULONG wlval, flags;
+ USHORT ioport;
+ UCHAR bval;
+
+#ifdef DC390W_DEBUG0
+ printk("Discnet,");
+#endif
+ save_flags(flags);
+ cli();
+ pDCB = pACB->pActiveDCB;
+ if (! pDCB)
+ {
+#ifdef DC390W_DEBUG0
+ printk("ACB:%08lx->ActiveDCB:%08lx !,", (ULONG)pACB, (ULONG)pDCB);
+#endif
+ restore_flags(flags); return;
+ }
+
+ pSRB = pDCB->pActiveSRB;
+
+ ioport = pACB->IOPortBase;
+ bval = inb(ioport+SCRATCHA);
+ pSRB->ScratchABuf = bval;
+ pSRB->SRBState |= SRB_DISCONNECT; /* 1.02 */
+ wlval = pACB->jmp_reselect;
+ outl(wlval,(ioport+DSP));
+ pACB->pActiveDCB = 0;
+ DoWaitingSRB( pACB );
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+DC390W_Reselected( PACB pACB )
+{
+#ifdef DC390W_DEBUG0
+ printk("Rsel,");
+#endif
+ pACB->msgin123[0] = 0x80; /* set identify byte 80h */
+ DC390W_Reselected1(pACB);
+ return;
+}
+
+
+static void
+DC390W_Reselected1( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ USHORT ioport, wval;
+ ULONG wlval, flags;
+ UCHAR bval;
+
+
+#ifdef DC390W_DEBUG0
+ printk("Rsel1,");
+#endif
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ if(pDCB)
+ {
+ pSRB = pDCB->pActiveSRB;
+ RewaitSRB( pDCB, pSRB );
+ }
+
+ wval = (USHORT) (pACB->msgin123[0]);
+ wval = (wval & 7) << 8; /* get LUN */
+ wval |= (USHORT) (inb(ioport+SSID) & 0x0f); /* get ID */
+ pDCB = pACB->pLinkDCB;
+ while( *((PUSHORT) &pDCB->UnitSCSIID) != wval )
+ pDCB = pDCB->pNextDCB;
+ pACB->pActiveDCB = pDCB;
+ bval = pDCB->DCBscntl3;
+ outb(bval,ioport+SCNTL3);
+ bval = pDCB->DCBsxfer;
+ outb(bval,ioport+SXFER);
+ bval = pDCB->DCBscntl0;
+ outb(bval,ioport+SCNTL0);
+ if(pDCB->MaxCommand > 1)
+ {
+ wlval = pACB->jmp_reselecttag;
+ outl(wlval,(ioport+DSP));
+ }
+ else
+ {
+ pSRB = pDCB->pActiveSRB;
+ if( !pSRB || !(pSRB->SRBState & SRB_DISCONNECT) )
+ {
+ save_flags(flags);
+ cli();
+ pSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB->pNextSRB;
+ restore_flags(flags);
+ pSRB->SRBState = SRB_UNEXPECT_RESEL;
+ pDCB->pActiveSRB = pSRB;
+ pSRB->MsgOutBuf[0] = MSG_ABORT;
+ pSRB->__msgout0[0] = 1;
+ }
+ pSRB->SRBState &= ~SRB_DISCONNECT;
+ wlval = pSRB->PhysSRB;
+ outl(wlval,(ioport+DSA));
+ wlval = pSRB->ReturnAddr;
+ outl(wlval,(ioport+TEMP));
+ bval = pSRB->ScratchABuf;
+ outb(bval,ioport+SCRATCHA);
+ if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
+ wlval = pACB->jmp_set_atn;
+ else
+ wlval = pACB->jmp_clear_ack;
+ outl(wlval,(ioport+DSP));
+ }
+ return;
+}
+
+
+static void
+DC390W_ReselectedT( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB, psrb1;
+ USHORT ioport;
+ ULONG wlval, flags;
+ UCHAR bval;
+
+#ifdef DC390W_DEBUG0
+ printk("RselT,");
+#endif
+ ioport = pACB->IOPortBase;
+ bval = pACB->msgin123[1];
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pGoingSRB;
+ psrb1 = pDCB->pGoingLast;
+ if( !pSRB )
+ goto UXP_RSL;
+ for(;;)
+ {
+ if(pSRB->TagNumber != bval)
+ {
+ if( pSRB != psrb1 )
+ pSRB = pSRB->pNextSRB;
+ else
+ goto UXP_RSL;
+ }
+ else
+ break;
+ }
+ if( !(pSRB->SRBState & SRB_DISCONNECT) )
+ {
+UXP_RSL:
+ save_flags(flags);
+ cli();
+ pSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB->pNextSRB;
+ restore_flags(flags);
+ pSRB->SRBState = SRB_UNEXPECT_RESEL;
+ pDCB->pActiveSRB = pSRB;
+ pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
+ pSRB->__msgout0[0] = 1;
+ }
+ else
+ {
+ pSRB->SRBState &= ~SRB_DISCONNECT;
+ pDCB->pActiveSRB = pSRB;
+ }
+ wlval = pSRB->PhysSRB;
+ outl(wlval,(ioport+DSA));
+ wlval = pSRB->ReturnAddr;
+ outl(wlval,(ioport+TEMP));
+ bval = pSRB->ScratchABuf;
+ outb(bval,ioport+SCRATCHA);
+ if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
+ wlval = pACB->jmp_set_atn;
+ else
+ wlval = pACB->jmp_clear_ack;
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+DC390W_RestorePtr( PACB pACB )
+{
+ PSRB pSRB;
+ USHORT ioport;
+ ULONG wlval;
+
+ pSRB = pACB->pActiveDCB->pActiveSRB;
+ wlval = pSRB->ReturnAddr;
+ ioport = pACB->IOPortBase;
+ outl(wlval,(ioport+TEMP));
+ wlval = inl(ioport+DSP);
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+PhaseMismatch( PACB pACB )
+{
+ USHORT ioport;
+ ULONG wlval,swlval;
+ USHORT wval;
+ UCHAR bval,phase;
+ PDCB pDCB;
+
+#ifdef DC390W_DEBUG0
+ printk("Mismatch,");
+#endif
+ ioport = pACB->IOPortBase;
+ bval = inb(ioport+SCRATCHA);
+ if(bval & OVER_RUN_) /* xfer PAD */
+ {
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+ wlval = pACB->jmp_next; /* check phase */
+ outl(wlval,(ioport+DSP));
+ return;
+ }
+ pDCB = pACB->pActiveDCB;
+ wlval = inl(ioport+DBC);
+ phase = (UCHAR)((wlval & 0x07000000) >> 24);
+ wlval &= 0xffffff; /* bytes not xferred */
+ if( phase == SCSI_DATA_IN )
+ {
+ swlval = pACB->jmp_din8;
+ if( pDCB->DCBscntl3 & EN_WIDE_SCSI )
+ swlval += jmp_din16;
+ DataIOcommon(pACB,swlval,wlval);
+ }
+ else if( phase == SCSI_DATA_OUT )
+ {
+ wval = (USHORT)inb(ioport+CTEST5);
+ wval <<= 8;
+ bval = inb(ioport+DFIFO);
+ wval |= (USHORT) bval;
+ wval -= ((USHORT)(wlval & 0xffff));
+ wval &= 0x3ff;
+ wlval += (ULONG)wval; /* # of bytes remains in FIFO */
+ bval = inb(ioport+SSTAT0);
+ if(bval & SODR_LSB_FULL)
+ wlval++; /* data left in Scsi Output Data Buffer */
+ if(bval & SODL_LSB_FULL)
+ wlval++; /* data left in Scsi Output Data Latch */
+ swlval = pACB->jmp_dout8;
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ {
+ swlval += jmp_dout16;
+ bval = inb(ioport+SSTAT2);
+ if(bval & SODR_MSB_FULL)
+ wlval++;
+ if(bval & SODL_MSB_FULL)
+ wlval++;
+ }
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+ DataIOcommon(pACB,swlval,wlval);
+ }
+ else
+ {
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+ if(phase == SCSI_MSG_OUT)
+ wlval = pACB->jmp_clear_atn;
+ else
+ wlval = pACB->jmp_next; /* check phase */
+ outl(wlval,(ioport+DSP));
+ }
+ return;
+}
+
+
+static void
+DataIOcommon( PACB pACB, ULONG Swlval, ULONG Cwlval )
+{
+ /* Swlval - script address */
+ /* Cwlval - bytes not xferred */
+ PDCB pDCB;
+ PSRB pSRB;
+ PSGE Segptr;
+ USHORT ioport;
+ ULONG wlval,swlval,dataXferCnt;
+ UCHAR bval,bvald;
+
+ ioport = pACB->IOPortBase;
+ wlval = inl((ioport+DSP));
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ wlval -= Swlval;
+ bval = inb(ioport+SBCL);
+ bval &= 0x07;
+ if(bval == SCSI_MSG_IN)
+ {
+ bval = pDCB->DCBscntl3;
+ bval &= ~EN_WIDE_SCSI;
+ outb(bval,ioport+SCNTL3);
+ bval = inb(ioport+SBDL);
+ bvald = pDCB->DCBscntl3; /* enable WIDE SCSI */
+ outb(bvald,ioport+SCNTL3);
+ if(bval == MSG_DISCONNECT || bval == MSG_SAVE_PTR)
+ {
+ Segptr = (PSGE)((ULONG) &(pSRB->Segment0[0][0]) + wlval);
+ dataXferCnt = Segptr->SGXLen - Cwlval;
+ Segptr->SGXLen = Cwlval; /* modified count */
+ Segptr->SGXPtr += dataXferCnt; /* modified address */
+ swlval = pACB->jmp_table8;
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ swlval += jmp_table16;
+ wlval <<= 1;
+ swlval += wlval;
+ swlval = swlval - ((MAX_SG_LIST_BUF+1) * 16);
+ pSRB->ReturnAddr = swlval;
+ }
+ }
+ else if( Cwlval ) /* Remaining not xferred -- UNDER_RUN */
+ {
+ Segptr = (PSGE)((ULONG) &(pSRB->Segment0[0][0]) + wlval);
+ dataXferCnt = Segptr->SGXLen - Cwlval;
+ Segptr->SGXLen = Cwlval; /* modified count */
+ Segptr->SGXPtr += dataXferCnt; /* modified address */
+ swlval = pACB->jmp_table8;
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ swlval += jmp_table16;
+ wlval <<= 1;
+ swlval += wlval;
+ swlval = swlval - ((MAX_SG_LIST_BUF+1) * 16);
+ pSRB->RemainSegPtr = swlval;
+ }
+/* pm__1: */
+ wlval = pSRB->ReturnAddr;
+ outl(wlval,(ioport+TEMP));
+ wlval = pACB->jmp_next;
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+DC390W_CmdCompleted( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ USHORT ioport;
+ ULONG wlval, flags;
+ UCHAR bval;
+
+#ifdef DC390W_DEBUG0
+ printk("Cmplete,");
+#endif
+ save_flags(flags);
+ cli();
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ pDCB->pActiveSRB = NULL;
+ ioport = pACB->IOPortBase;
+
+ bval = inb(ioport+SCRATCHA);
+ pSRB->ScratchABuf = bval; /* save status */
+ bval = pSRB->TagNumber;
+ if(pDCB->MaxCommand > 1)
+ pDCB->TagMask &= (~(1 << bval)); /* free tag mask */
+ pACB->pActiveDCB = NULL; /* no active device */
+ wlval = pACB->jmp_reselect; /* enable reselection */
+ outl(wlval,(ioport+DSP));
+ SRBdone( pACB, pDCB, pSRB);
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+SRBdone( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ PSRB psrb;
+ UCHAR bval, bval1, i, j, status;
+ PSCSICMD pcmd;
+ PSCSI_INQDATA ptr;
+ USHORT disable_tag;
+ ULONG flags;
+ PSGE ptr1;
+ PSGL ptr2;
+ ULONG wlval,swlval;
+
+ pcmd = pSRB->pcmd;
+ status = pACB->status;
+ if(pSRB->SRBFlag & AUTO_REQSENSE)
+ {
+ pSRB->SRBFlag &= ~AUTO_REQSENSE;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = SCSI_STAT_CHECKCOND;
+ if(status == SCSI_STAT_CHECKCOND)
+ {
+ pcmd->result = DID_BAD_TARGET << 16;
+ goto ckc_e;
+ }
+ if(pSRB->RetryCnt == 0)
+ {
+ *((PULONG) &(pSRB->CmdBlock[0])) = pSRB->Segment0[0][0];
+ pSRB->XferredLen = pSRB->Segment0[2][1];
+ if( (pSRB->XferredLen) &&
+ (pSRB->XferredLen >= pcmd->underflow) )
+ {
+ pcmd->result |= (DID_OK << 16);
+ }
+ else
+ pcmd->result = (DRIVER_SENSE << 24) | (DRIVER_OK << 16) |
+ SCSI_STAT_CHECKCOND;
+ goto ckc_e;
+ }
+ else
+ {
+ pSRB->RetryCnt--;
+ pSRB->TargetStatus = 0;
+ *((PULONG) &(pSRB->CmdBlock[0])) = pSRB->Segment0[0][0];
+ *((PULONG) &(pSRB->CmdBlock[4])) = pSRB->Segment0[0][1];
+ *((PULONG) &(pSRB->CmdBlock[8])) = pSRB->Segment0[1][0];
+ pSRB->__command[0] = pSRB->Segment0[1][1] & 0xff;
+ pSRB->SGcount = (UCHAR) (pSRB->Segment0[1][1] >> 8);
+ *((PULONG) &(pSRB->pSegmentList))= pSRB->Segment0[2][0];
+ if( pSRB->CmdBlock[0] == TEST_UNIT_READY )
+ {
+ pcmd->result = (DRIVER_SENSE << 24) | (DRIVER_OK << 16) |
+ SCSI_STAT_CHECKCOND;
+ goto ckc_e;
+ }
+ pcmd->result |= (DRIVER_SENSE << 24);
+ PrepareSG(pACB,pDCB,pSRB);
+ pSRB->XferredLen = 0;
+ DC390W_StartSCSI( pACB, pDCB, pSRB );
+ return;
+ }
+ }
+ if( status )
+ {
+ if( status == SCSI_STAT_CHECKCOND)
+ {
+ if( !(pSRB->ScratchABuf & SRB_OK) && (pSRB->SGcount) && (pSRB->RemainSegPtr) )
+ {
+ wlval = pSRB->RemainSegPtr;
+ swlval = pACB->jmp_table8;
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ swlval += jmp_table16;
+ swlval -= wlval;
+ swlval >>= 4;
+ bval = (UCHAR) swlval;
+ wlval = 0;
+ ptr1 = (PSGE) &pSRB->Segment0[MAX_SG_LIST_BUF+1][0];
+ for( i=0; i< bval; i++)
+ {
+ wlval += ptr1->SGXLen;
+ ptr1--;
+ }
+
+ bval = pSRB->SGcount;
+ swlval = 0;
+ ptr2 = pSRB->pSegmentList;
+ for( i=0; i< bval; i++)
+ {
+ swlval += ptr2->length;
+ ptr2++;
+ }
+ pSRB->XferredLen = swlval - wlval;
+ pSRB->RemainSegPtr = 0;
+#ifdef DC390W_DEBUG0
+ printk("XferredLen=%8x,NotXferLen=%8x,",(UINT) pSRB->XferredLen,(UINT) wlval);
+#endif
+ }
+ RequestSense( pACB, pDCB, pSRB );
+ return;
+ }
+ else if( status == SCSI_STAT_QUEUEFULL )
+ {
+ bval = (UCHAR) pDCB->GoingSRBCnt;
+ bval--;
+ pDCB->MaxCommand = bval;
+ RewaitSRB( pDCB, pSRB );
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ return;
+ }
+ else if(status == SCSI_STAT_SEL_TIMEOUT)
+ {
+ pSRB->AdaptStatus = H_SEL_TIMEOUT;
+ pSRB->TargetStatus = 0;
+ pcmd->result = DID_BAD_TARGET << 16;
+ }
+ else if(status == SCSI_STAT_UNEXP_BUS_F)
+ {
+ pSRB->AdaptStatus = H_UNEXP_BUS_FREE;
+ pSRB->TargetStatus = 0;
+ pcmd->result |= DID_NO_CONNECT << 16;
+ }
+ else if(status == SCSI_STAT_BUS_RST_DETECT )
+ {
+ pSRB->AdaptStatus = H_ABORT;
+ pSRB->TargetStatus = 0;
+ pcmd->result = DID_RESET << 16;
+ }
+ else
+ {
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = status;
+ if( pSRB->RetryCnt )
+ {
+ pSRB->RetryCnt--;
+ pSRB->TargetStatus = 0;
+ PrepareSG(pACB,pDCB,pSRB);
+ pSRB->XferredLen = 0;
+ DC390W_StartSCSI( pACB, pDCB, pSRB );
+ return;
+ }
+ else
+ {
+ pcmd->result |= (DID_ERROR << 16) | (ULONG) (pACB->msgin123[0] << 8) |
+ (ULONG) status;
+ }
+ }
+ }
+ else
+ {
+ status = pSRB->ScratchABuf;
+ if(status & OVER_RUN_)
+ {
+ pSRB->AdaptStatus = H_OVER_UNDER_RUN;
+ pSRB->TargetStatus = 0;
+ pcmd->result |= (DID_OK << 16) | (pACB->msgin123[0] << 8);
+ }
+ else /* No error */
+ {
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ pcmd->result |= (DID_OK << 16);
+ }
+ }
+ckc_e:
+
+ if( pACB->scan_devices )
+ {
+ if( pSRB->CmdBlock[0] == TEST_UNIT_READY )
+ {
+ if(pcmd->result != (DID_OK << 16))
+ {
+ if( pcmd->result & SCSI_STAT_CHECKCOND )
+ {
+ goto RTN_OK;
+ }
+ else
+ {
+ pACB->DCBmap[pcmd->target] &= ~(1 << pcmd->lun);
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ if( (pcmd->target == pACB->max_id) &&
+ ((pcmd->lun == 0) || (pcmd->lun == pACB->max_lun)) )
+ {
+ pACB->scan_devices = 0;
+ }
+ }
+ }
+ else
+ {
+RTN_OK:
+ pPrevDCB->pNextDCB = pDCB;
+ pDCB->pNextDCB = pACB->pLinkDCB;
+ if( (pcmd->target == pACB->max_id) && (pcmd->lun == pACB->max_lun) )
+ pACB->scan_devices = END_SCAN;
+ }
+ }
+ else if( pSRB->CmdBlock[0] == INQUIRY )
+ {
+ if( (pcmd->target == pACB->max_id) &&
+ (pcmd->lun == pACB->max_lun) )
+ {
+ pACB->scan_devices = 0;
+ }
+ ptr = (PSCSI_INQDATA) (pcmd->request_buffer);
+ if( pcmd->use_sg )
+ ptr = (PSCSI_INQDATA) (((PSGL) ptr)->address);
+ bval1 = ptr->DevType & SCSI_DEVTYPE;
+ if(bval1 == SCSI_NODEV)
+ {
+ pACB->DCBmap[pcmd->target] &= ~(1 << pcmd->lun);
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ }
+ else
+ {
+ pACB->DeviceCnt++;
+ pPrevDCB = pDCB;
+ pACB->pDCB_free = (PDCB) ((ULONG) (pACB->pDCB_free) + sizeof( DC390W_DCB ));
+ pDCB->DevType = bval1;
+ if(bval1 == TYPE_DISK || bval1 == TYPE_MOD)
+ {
+ if( (((ptr->Vers & 0x07) >= 2) || ((ptr->RDF & 0x0F) == 2)) &&
+ (ptr->Flags & SCSI_INQ_CMDQUEUE) &&
+ (pDCB->DevMode & TAG_QUEUING_) &&
+ (pDCB->DevMode & EN_DISCONNECT_) )
+ {
+ disable_tag = 0;
+ for(i=0; i<BADDEVCNT; i++)
+ {
+ for(j=0; j<28; j++)
+ {
+ if( ((PUCHAR)ptr)[8+j] != baddevname[i][j])
+ break;
+ }
+ if(j == 28)
+ {
+ disable_tag = 1;
+ break;
+ }
+ }
+
+ if( !disable_tag )
+ {
+ pDCB->MaxCommand = pACB->TagMaxNum;
+ pDCB->TagMask = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ save_flags( flags );
+ cli();
+/* ReleaseSRB( pDCB, pSRB ); */
+
+ if(pSRB == pDCB->pGoingSRB )
+ {
+ pDCB->pGoingSRB = pSRB->pNextSRB;
+ }
+ else
+ {
+ psrb = pDCB->pGoingSRB;
+ while( psrb->pNextSRB != pSRB )
+ psrb = psrb->pNextSRB;
+ psrb->pNextSRB = pSRB->pNextSRB;
+ if( pSRB == pDCB->pGoingLast )
+ pDCB->pGoingLast = psrb;
+ }
+ pSRB->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB;
+ pDCB->GoingSRBCnt--;
+
+ DoWaitingSRB( pACB );
+ restore_flags(flags);
+
+/* Notify cmd done */
+ pcmd->scsi_done( pcmd );
+
+ if( pDCB->QIORBCnt )
+ DoNextCmd( pACB, pDCB );
+ return;
+}
+
+
+static void
+DoingSRB_Done( PACB pACB )
+{
+ PDCB pDCB, pdcb;
+ PSRB psrb, psrb2;
+ USHORT cnt, i;
+ PSCSICMD pcmd;
+
+ pDCB = pACB->pLinkDCB;
+ pdcb = pDCB;
+ do
+ {
+ cnt = pdcb->GoingSRBCnt;
+ psrb = pdcb->pGoingSRB;
+ for( i=0; i<cnt; i++)
+ {
+ psrb2 = psrb->pNextSRB;
+ pcmd = psrb->pcmd;
+ pcmd->result = DID_RESET << 16;
+
+/* ReleaseSRB( pDCB, pSRB ); */
+
+ psrb->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = psrb;
+
+ pcmd->scsi_done( pcmd );
+ psrb = psrb2;
+ }
+ pdcb->GoingSRBCnt = 0;;
+ pdcb->pGoingSRB = NULL;
+ pdcb->TagMask = 0;
+ pdcb = pdcb->pNextDCB;
+ }
+ while( pdcb != pDCB );
+}
+
+
+static void
+DC390W_ResetSCSIBus( PACB pACB )
+{
+ USHORT ioport;
+ UCHAR bval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ pACB->ACBFlag |= RESET_DEV;
+ ioport = pACB->IOPortBase;
+ bval = ABORT_OP;
+ outb(bval,ioport+ISTAT);
+ udelay(25);
+ bval = 0;
+ outb(bval,ioport+ISTAT);
+
+ bval = ASSERT_RST;
+ outb(bval,ioport+SCNTL1);
+ udelay(25); /* 25 us */
+ bval = 0;
+ outb(bval,ioport+SCNTL1);
+ restore_flags(flags);
+ return;
+}
+
+
+
+static void
+DC390W_ResetSCSIBus2( PACB pACB )
+{
+ USHORT ioport;
+ UCHAR bval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ ioport = pACB->IOPortBase;
+ bval = ASSERT_RST;
+ outb(bval,ioport+SCNTL1);
+ udelay(25); /* 25 us */
+ bval = 0;
+ outb(bval,ioport+SCNTL1);
+ restore_flags(flags);
+ return;
+}
+
+
+
+static void
+DC390W_ScsiRstDetect( PACB pACB )
+{
+ ULONG wlval, flags;
+ USHORT ioport;
+ UCHAR bval;
+
+ save_flags(flags);
+ sti();
+#ifdef DC390W_DEBUG0
+ printk("Reset_Detect0,");
+#endif
+/* delay 1 sec */
+ wlval = jiffies + HZ;
+ while( jiffies < wlval );
+/* USHORT i;
+ for( i=0; i<1000; i++ )
+ udelay(1000); */
+
+ cli();
+ ioport = pACB->IOPortBase;
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+
+ if( pACB->ACBFlag & RESET_DEV )
+ pACB->ACBFlag |= RESET_DONE;
+ else
+ {
+ pACB->ACBFlag |= RESET_DETECT;
+
+ ResetDevParam( pACB );
+/* DoingSRB_Done( pACB ); ???? */
+ RecoverSRB( pACB );
+ pACB->pActiveDCB = NULL;
+ wlval = pACB->jmp_reselect;
+ outl(wlval,(ioport+DSP));
+ pACB->ACBFlag = 0;
+ DoWaitingSRB( pACB );
+ }
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+RequestSense( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ PSCSICMD pcmd;
+
+ pSRB->SRBFlag |= AUTO_REQSENSE;
+ pSRB->Segment0[0][0] = *((PULONG) &(pSRB->CmdBlock[0]));
+ pSRB->Segment0[0][1] = *((PULONG) &(pSRB->CmdBlock[4]));
+ pSRB->Segment0[1][0] = *((PULONG) &(pSRB->CmdBlock[8]));
+ pSRB->Segment0[1][1] = pSRB->__command[0] | (pSRB->SGcount << 8);
+ pSRB->Segment0[2][0] = *((PULONG) &(pSRB->pSegmentList));
+ pSRB->Segment0[2][1] = pSRB->XferredLen;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+
+ pcmd = pSRB->pcmd;
+
+ pSRB->Segmentx.address = (PUCHAR) &(pcmd->sense_buffer);
+ pSRB->Segmentx.length = sizeof(pcmd->sense_buffer);
+ pSRB->pSegmentList = &pSRB->Segmentx;
+ pSRB->SGcount = 1;
+
+ *((PULONG) &(pSRB->CmdBlock[0])) = 0x00000003;
+ pSRB->CmdBlock[1] = pDCB->IdentifyMsg << 5;
+ *((PUSHORT) &(pSRB->CmdBlock[4])) = sizeof(pcmd->sense_buffer);
+ pSRB->__command[0] = 6;
+ PrepareSG( pACB, pDCB, pSRB );
+ pSRB->XferredLen = 0;
+ DC390W_StartSCSI( pACB, pDCB, pSRB );
+ return;
+}
+
+
+static void
+DC390W_MessageOut( PACB pACB )
+{
+ DC390W_FatalError( pACB );
+}
+
+
+static void
+DC390W_FatalError( PACB pACB )
+{
+ PSRB pSRB;
+ PDCB pDCB;
+ ULONG flags;
+
+#ifdef DC390W_DEBUG0
+ printk("DC390W: Fatal Error!!\n");
+#endif
+
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
+ {
+ save_flags(flags);
+ cli();
+ pSRB->SRBState &= ~SRB_UNEXPECT_RESEL;
+ pSRB->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB;
+ pACB->pActiveDCB = NULL;
+ pDCB->pActiveSRB = NULL;
+ restore_flags(flags);
+ DoWaitingSRB( pACB );
+ }
+ else
+ DC390W_ResetSCSIBus(pACB);
+ return;
+}
+
+
+static void
+DC390W_Debug( PACB pACB )
+{
+ ULONG wlval;
+ USHORT ioport;
+
+ ioport = pACB->IOPortBase;
+ wlval = inl(ioport+DSP);
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
diff --git a/linux/src/drivers/scsi/scsiiom.c b/linux/src/drivers/scsi/scsiiom.c
new file mode 100644
index 0000000..97801d7
--- /dev/null
+++ b/linux/src/drivers/scsi/scsiiom.c
@@ -0,0 +1,1540 @@
+/***********************************************************************
+ * FILE NAME : SCSIIOM.C *
+ * BY : C.L. Huang, ching@tekram.com.tw *
+ * Description: Device Driver for Tekram DC-390 (T) PCI SCSI *
+ * Bus Master Host Adapter *
+ ***********************************************************************/
+
+
+static USHORT
+DC390_StartSCSI( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ USHORT ioport, rc;
+ UCHAR bval, bval1, i, cnt;
+ PUCHAR ptr;
+ ULONG wlval;
+
+ pSRB->TagNumber = 31;
+ ioport = pACB->IOPortBase;
+ bval = pDCB->UnitSCSIID;
+ outb(bval,ioport+Scsi_Dest_ID);
+ bval = pDCB->SyncPeriod;
+ outb(bval,ioport+Sync_Period);
+ bval = pDCB->SyncOffset;
+ outb(bval,ioport+Sync_Offset);
+ bval = pDCB->CtrlR1;
+ outb(bval,ioport+CtrlReg1);
+ bval = pDCB->CtrlR3;
+ outb(bval,ioport+CtrlReg3);
+ bval = pDCB->CtrlR4;
+ outb(bval,ioport+CtrlReg4);
+ bval = CLEAR_FIFO_CMD; /* Flush FIFO */
+ outb(bval,ioport+ScsiCmd);
+
+ pSRB->ScsiPhase = SCSI_NOP0;
+ bval = pDCB->IdentifyMsg;
+ if( !(pDCB->SyncMode & EN_ATN_STOP) )
+ {
+ if( (pSRB->CmdBlock[0] == INQUIRY) ||
+ (pSRB->CmdBlock[0] == REQUEST_SENSE) ||
+ (pSRB->SRBFlag & AUTO_REQSENSE) )
+ {
+ bval &= 0xBF; /* NO disconnection */
+ outb(bval,ioport+ScsiFifo);
+ bval1 = SELECT_W_ATN;
+ pSRB->SRBState = SRB_START_;
+ if( pDCB->SyncMode & SYNC_ENABLE )
+ {
+ if( !(pDCB->IdentifyMsg & 7) ||
+ (pSRB->CmdBlock[0] != INQUIRY) )
+ {
+ bval1 = SEL_W_ATN_STOP;
+ pSRB->SRBState = SRB_MSGOUT;
+ }
+ }
+ }
+ else
+ {
+ if(pDCB->SyncMode & EN_TAG_QUEUING)
+ {
+ outb(bval,ioport+ScsiFifo);
+ bval = MSG_SIMPLE_QTAG;
+ outb(bval,ioport+ScsiFifo);
+ wlval = 1;
+ bval = 0;
+ while( wlval & pDCB->TagMask )
+ {
+ wlval = wlval << 1;
+ bval++;
+ }
+ outb(bval,ioport+ScsiFifo);
+ pDCB->TagMask |= wlval;
+ pSRB->TagNumber = bval;
+ bval1 = SEL_W_ATN2;
+ pSRB->SRBState = SRB_START_;
+ }
+ else
+ {
+ outb(bval,ioport+ScsiFifo);
+ bval1 = SELECT_W_ATN;
+ pSRB->SRBState = SRB_START_;
+ }
+ }
+
+ if( pSRB->SRBFlag & AUTO_REQSENSE )
+ {
+ bval = REQUEST_SENSE;
+ outb(bval,ioport+ScsiFifo);
+ bval = pDCB->IdentifyMsg << 5;
+ outb(bval,ioport+ScsiFifo);
+ bval = 0;
+ outb(bval,ioport+ScsiFifo);
+ outb(bval,ioport+ScsiFifo);
+ bval = sizeof(pSRB->pcmd->sense_buffer);
+ outb(bval,ioport+ScsiFifo);
+ bval = 0;
+ outb(bval,ioport+ScsiFifo);
+ }
+ else
+ {
+ cnt = pSRB->ScsiCmdLen;
+ ptr = (PUCHAR) pSRB->CmdBlock;
+ for(i=0; i<cnt; i++)
+ {
+ bval = *ptr++;
+ outb(bval,ioport+ScsiFifo);
+ }
+ }
+ }
+ else /* ATN_STOP */
+ {
+ if( (pSRB->CmdBlock[0] == INQUIRY) ||
+ (pSRB->CmdBlock[0] == REQUEST_SENSE) ||
+ (pSRB->SRBFlag & AUTO_REQSENSE) )
+ {
+ bval &= 0xBF;
+ outb(bval,ioport+ScsiFifo);
+ bval1 = SELECT_W_ATN;
+ pSRB->SRBState = SRB_START_;
+ if( pDCB->SyncMode & SYNC_ENABLE )
+ {
+ if( !(pDCB->IdentifyMsg & 7) ||
+ (pSRB->CmdBlock[0] != INQUIRY) )
+ {
+ bval1 = SEL_W_ATN_STOP;
+ pSRB->SRBState = SRB_MSGOUT;
+ }
+ }
+ }
+ else
+ {
+ if(pDCB->SyncMode & EN_TAG_QUEUING)
+ {
+ outb(bval,ioport+ScsiFifo);
+ pSRB->MsgOutBuf[0] = MSG_SIMPLE_QTAG;
+ wlval = 1;
+ bval = 0;
+ while( wlval & pDCB->TagMask )
+ {
+ wlval = wlval << 1;
+ bval++;
+ }
+ pDCB->TagMask |= wlval;
+ pSRB->TagNumber = bval;
+ pSRB->MsgOutBuf[1] = bval;
+ pSRB->MsgCnt = 2;
+ bval1 = SEL_W_ATN_STOP;
+ pSRB->SRBState = SRB_START_;
+ }
+ else
+ {
+ outb(bval,ioport+ScsiFifo);
+ pSRB->MsgOutBuf[0] = MSG_NOP;
+ pSRB->MsgCnt = 1;
+ pSRB->SRBState = SRB_START_;
+ bval1 = SEL_W_ATN_STOP;
+ }
+ }
+ }
+ bval = inb( ioport+Scsi_Status );
+ if( bval & INTERRUPT )
+ {
+ pSRB->SRBState = SRB_READY;
+ pDCB->TagMask &= ~( 1 << pSRB->TagNumber );
+ rc = 1;
+ }
+ else
+ {
+ pSRB->ScsiPhase = SCSI_NOP1;
+ pACB->pActiveDCB = pDCB;
+ pDCB->pActiveSRB = pSRB;
+ rc = 0;
+ outb(bval1,ioport+ScsiCmd);
+ }
+ return( rc );
+}
+
+
+#ifndef VERSION_ELF_1_2_13
+static void
+DC390_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
+#else
+static void
+DC390_Interrupt( int irq, struct pt_regs *regs)
+#endif
+{
+ PACB pACB;
+ PDCB pDCB;
+ PSRB pSRB;
+ USHORT ioport = 0;
+ USHORT phase, i;
+ void (*stateV)( PACB, PSRB, PUCHAR );
+ UCHAR istate = 0;
+ UCHAR sstatus=0, istatus;
+
+ pACB = pACB_start;
+ if( pACB == NULL )
+ return;
+ for( i=0; i < adapterCnt; i++ )
+ {
+ if( pACB->IRQLevel == (UCHAR) irq )
+ {
+ ioport = pACB->IOPortBase;
+ sstatus = inb( ioport+Scsi_Status );
+ if( sstatus & INTERRUPT )
+ break;
+ else
+ pACB = pACB->pNextACB;
+ }
+ else
+ {
+ pACB = pACB->pNextACB;
+ }
+ }
+
+#ifdef DC390_DEBUG1
+ printk("sstatus=%2x,",sstatus);
+#endif
+
+ if( pACB == (PACB )-1 )
+ {
+ printk("DC390: Spurious interrupt detected!\n");
+ return;
+ }
+
+ istate = inb( ioport+Intern_State );
+ istatus = inb( ioport+INT_Status );
+
+#ifdef DC390_DEBUG1
+ printk("Istatus=%2x,",istatus);
+#endif
+
+ if(istatus & DISCONNECTED)
+ {
+ DC390_Disconnect( pACB );
+ return;
+ }
+
+ if(istatus & RESELECTED)
+ {
+ DC390_Reselect( pACB );
+ return;
+ }
+
+ if(istatus & INVALID_CMD)
+ {
+ DC390_InvalidCmd( pACB );
+ return;
+ }
+
+ if(istatus & SCSI_RESET)
+ {
+ DC390_ScsiRstDetect( pACB );
+ return;
+ }
+
+ if( istatus & (SUCCESSFUL_OP+SERVICE_REQUEST) )
+ {
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ if( pDCB )
+ {
+ if( pDCB->DCBFlag & ABORT_DEV_ )
+ EnableMsgOut( pACB, pSRB );
+ }
+
+ phase = (USHORT) pSRB->ScsiPhase;
+ stateV = (void *) DC390_phase0[phase];
+ stateV( pACB, pSRB, &sstatus );
+
+ pSRB->ScsiPhase = sstatus & 7;
+ phase = (USHORT) sstatus & 7;
+ stateV = (void *) DC390_phase1[phase];
+ stateV( pACB, pSRB, &sstatus );
+ }
+}
+
+
+static void
+DC390_DataOut_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR sstatus, bval;
+ USHORT ioport;
+ PSGL psgl;
+ ULONG ResidCnt, xferCnt;
+
+ ioport = pACB->IOPortBase;
+ sstatus = *psstatus;
+
+ if( !(pSRB->SRBState & SRB_XFERPAD) )
+ {
+ if( sstatus & PARITY_ERR )
+ pSRB->SRBStatus |= PARITY_ERROR;
+
+ if( sstatus & COUNT_2_ZERO )
+ {
+ bval = inb(ioport+DMA_Status);
+ while( !(bval & DMA_XFER_DONE) )
+ bval = inb(ioport+DMA_Status);
+ pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
+ pSRB->SGIndex++;
+ if( pSRB->SGIndex < pSRB->SGcount )
+ {
+ pSRB->pSegmentList++;
+ psgl = pSRB->pSegmentList;
+
+#ifndef VERSION_ELF_1_2_13
+ pSRB->SGPhysAddr = virt_to_phys( psgl->address );
+#else
+ pSRB->SGPhysAddr = (ULONG) psgl->address;
+#endif
+ pSRB->SGToBeXferLen = (ULONG) psgl->length;
+ }
+ else
+ pSRB->SGToBeXferLen = 0;
+ }
+ else
+ {
+ bval = inb( ioport+Current_Fifo );
+ bval &= 0x1f;
+ ResidCnt = (ULONG) inb(ioport+CtcReg_High);
+ ResidCnt = ResidCnt << 8;
+ ResidCnt |= (ULONG) inb(ioport+CtcReg_Mid);
+ ResidCnt = ResidCnt << 8;
+ ResidCnt |= (ULONG) inb(ioport+CtcReg_Low);
+ ResidCnt += (ULONG) bval;
+
+ xferCnt = pSRB->SGToBeXferLen - ResidCnt;
+ pSRB->SGPhysAddr += xferCnt;
+ pSRB->TotalXferredLen += xferCnt;
+ pSRB->SGToBeXferLen = ResidCnt;
+ }
+ }
+ bval = WRITE_DIRECTION+DMA_IDLE_CMD;
+ outb( bval, ioport+DMA_Cmd);
+}
+
+static void
+DC390_DataIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR sstatus, bval;
+ USHORT i, ioport, residual;
+ PSGL psgl;
+ ULONG ResidCnt, xferCnt;
+ PUCHAR ptr;
+
+
+ ioport = pACB->IOPortBase;
+ sstatus = *psstatus;
+
+ if( !(pSRB->SRBState & SRB_XFERPAD) )
+ {
+ if( sstatus & PARITY_ERR )
+ pSRB->SRBStatus |= PARITY_ERROR;
+
+ if( sstatus & COUNT_2_ZERO )
+ {
+ bval = inb(ioport+DMA_Status);
+ while( !(bval & DMA_XFER_DONE) )
+ bval = inb(ioport+DMA_Status);
+
+ bval = READ_DIRECTION+DMA_IDLE_CMD;
+ outb( bval, ioport+DMA_Cmd);
+
+ pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
+ pSRB->SGIndex++;
+ if( pSRB->SGIndex < pSRB->SGcount )
+ {
+ pSRB->pSegmentList++;
+ psgl = pSRB->pSegmentList;
+
+#ifndef VERSION_ELF_1_2_13
+ pSRB->SGPhysAddr = virt_to_phys( psgl->address );
+#else
+ pSRB->SGPhysAddr = (ULONG) psgl->address;
+#endif
+ pSRB->SGToBeXferLen = (ULONG) psgl->length;
+ }
+ else
+ pSRB->SGToBeXferLen = 0;
+ }
+ else /* phase changed */
+ {
+ residual = 0;
+ bval = inb(ioport+Current_Fifo);
+ while( bval & 0x1f )
+ {
+ if( (bval & 0x1f) == 1 )
+ {
+ for(i=0; i< 0x100; i++)
+ {
+ bval = inb(ioport+Current_Fifo);
+ if( !(bval & 0x1f) )
+ goto din_1;
+ else if( i == 0x0ff )
+ {
+ residual = 1; /* ;1 residual byte */
+ goto din_1;
+ }
+ }
+ }
+ else
+ bval = inb(ioport+Current_Fifo);
+ }
+din_1:
+ bval = READ_DIRECTION+DMA_BLAST_CMD;
+ outb(bval, ioport+DMA_Cmd);
+ for(i=0; i<0x8000; i++)
+ {
+ bval = inb(ioport+DMA_Status);
+ if(bval & BLAST_COMPLETE)
+ break;
+ }
+ bval = READ_DIRECTION+DMA_IDLE_CMD;
+ outb(bval, ioport+DMA_Cmd);
+
+ ResidCnt = (ULONG) inb(ioport+CtcReg_High);
+ ResidCnt = ResidCnt << 8;
+ ResidCnt |= (ULONG) inb(ioport+CtcReg_Mid);
+ ResidCnt = ResidCnt << 8;
+ ResidCnt |= (ULONG) inb(ioport+CtcReg_Low);
+
+ xferCnt = pSRB->SGToBeXferLen - ResidCnt;
+ pSRB->SGPhysAddr += xferCnt;
+ pSRB->TotalXferredLen += xferCnt;
+ pSRB->SGToBeXferLen = ResidCnt;
+
+ if( residual )
+ {
+ bval = inb(ioport+ScsiFifo); /* get residual byte */
+#ifndef VERSION_ELF_1_2_13
+ ptr = (PUCHAR) phys_to_virt( pSRB->SGPhysAddr );
+#else
+ ptr = (PUCHAR) pSRB->SGPhysAddr;
+#endif
+ *ptr = bval;
+ pSRB->SGPhysAddr++;
+ pSRB->TotalXferredLen++;
+ pSRB->SGToBeXferLen--;
+ }
+ }
+ }
+}
+
+static void
+DC390_Command_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+}
+
+static void
+DC390_Status_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR bval;
+ USHORT ioport;
+
+ ioport = pACB->IOPortBase;
+ bval = inb(ioport+ScsiFifo);
+ pSRB->TargetStatus = bval;
+ bval++;
+ bval = inb(ioport+ScsiFifo); /* get message */
+ pSRB->EndMessage = bval;
+
+ *psstatus = SCSI_NOP0;
+ pSRB->SRBState = SRB_COMPLETED;
+ bval = MSG_ACCEPTED_CMD;
+ outb(bval, ioport+ScsiCmd);
+}
+
+static void
+DC390_MsgOut_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ if( pSRB->SRBState & (SRB_UNEXPECT_RESEL+SRB_ABORT_SENT) )
+ *psstatus = SCSI_NOP0;
+}
+
+static void
+DC390_MsgIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR bval;
+ USHORT ioport, wval, wval1;
+ PDCB pDCB;
+ PSRB psrb;
+
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+
+ bval = inb( ioport+ScsiFifo );
+ if( !(pSRB->SRBState & SRB_MSGIN_MULTI) )
+ {
+ if(bval == MSG_DISCONNECT)
+ {
+ pSRB->SRBState = SRB_DISCONNECT;
+ }
+ else if( bval == MSG_SAVE_PTR )
+ goto min6;
+ else if( (bval == MSG_EXTENDED) || ((bval >= MSG_SIMPLE_QTAG) &&
+ (bval <= MSG_ORDER_QTAG)) )
+ {
+ pSRB->SRBState |= SRB_MSGIN_MULTI;
+ pSRB->MsgInBuf[0] = bval;
+ pSRB->MsgCnt = 1;
+ pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
+ }
+ else if(bval == MSG_REJECT_)
+ {
+ bval = RESET_ATN_CMD;
+ outb(bval, ioport+ScsiCmd);
+ if( pSRB->SRBState & DO_SYNC_NEGO)
+ goto set_async;
+ }
+ else if( bval == MSG_RESTORE_PTR)
+ goto min6;
+ else
+ goto min6;
+ }
+ else
+ { /* minx: */
+
+ *pSRB->pMsgPtr = bval;
+ pSRB->MsgCnt++;
+ pSRB->pMsgPtr++;
+ if( (pSRB->MsgInBuf[0] >= MSG_SIMPLE_QTAG) &&
+ (pSRB->MsgInBuf[0] <= MSG_ORDER_QTAG) )
+ {
+ if( pSRB->MsgCnt == 2)
+ {
+ pSRB->SRBState = 0;
+ bval = pSRB->MsgInBuf[1];
+ pSRB = pDCB->pGoingSRB;
+ psrb = pDCB->pGoingLast;
+ if( pSRB )
+ {
+ for( ;; )
+ {
+ if(pSRB->TagNumber != bval)
+ {
+ if( pSRB == psrb )
+ goto mingx0;
+ pSRB = pSRB->pNextSRB;
+ }
+ else
+ break;
+ }
+ if( pDCB->DCBFlag & ABORT_DEV_ )
+ {
+ pSRB->SRBState = SRB_ABORT_SENT;
+ EnableMsgOut( pACB, pSRB );
+ }
+ if( !(pSRB->SRBState & SRB_DISCONNECT) )
+ goto mingx0;
+ pDCB->pActiveSRB = pSRB;
+ pSRB->SRBState = SRB_DATA_XFER;
+ }
+ else
+ {
+mingx0:
+ pSRB = pACB->pTmpSRB;
+ pSRB->SRBState = SRB_UNEXPECT_RESEL;
+ pDCB->pActiveSRB = pSRB;
+ pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
+ EnableMsgOut2( pACB, pSRB );
+ }
+ }
+ }
+ else if( (pSRB->MsgInBuf[0] == MSG_EXTENDED) && (pSRB->MsgCnt == 5) )
+ {
+ pSRB->SRBState &= ~(SRB_MSGIN_MULTI+DO_SYNC_NEGO);
+ if( (pSRB->MsgInBuf[1] != 3) || (pSRB->MsgInBuf[2] != 1) )
+ { /* reject_msg: */
+ pSRB->MsgCnt = 1;
+ pSRB->MsgInBuf[0] = MSG_REJECT_;
+ bval = SET_ATN_CMD;
+ outb(bval, ioport+ScsiCmd);
+ }
+ else if( !(pSRB->MsgInBuf[3]) || !(pSRB->MsgInBuf[4]) )
+ {
+set_async:
+ pDCB = pSRB->pSRBDCB;
+ pDCB->SyncMode &= ~(SYNC_ENABLE+SYNC_NEGO_DONE);
+ pDCB->SyncPeriod = 0;
+ pDCB->SyncOffset = 0;
+ pDCB->CtrlR3 = FAST_CLK; /* ;non_fast */
+ pDCB->CtrlR4 &= 0x3f;
+ pDCB->CtrlR4 |= EATER_25NS; /* ; 25ns glitch eater */
+ goto re_prog;
+ }
+ else
+ { /* set_sync: */
+
+ pDCB = pSRB->pSRBDCB;
+ pDCB->SyncMode |= SYNC_ENABLE+SYNC_NEGO_DONE;
+ pDCB->SyncOffset &= 0x0f0;
+ pDCB->SyncOffset |= pSRB->MsgInBuf[4];
+ pDCB->NegoPeriod = pSRB->MsgInBuf[3];
+ wval = (USHORT) pSRB->MsgInBuf[3];
+ wval = wval << 2;
+ wval--;
+ wval1 = wval / 25;
+ if( (wval1 * 25) != wval)
+ wval1++;
+ bval = FAST_CLK+FAST_SCSI;
+ pDCB->CtrlR4 &= 0x3f;
+ if(wval1 >= 8)
+ {
+ wval1--;
+ bval = FAST_CLK; /* ;fast clock/normal scsi */
+ pDCB->CtrlR4 |= EATER_25NS; /* ;25 ns glitch eater */
+ }
+ pDCB->CtrlR3 = bval;
+ pDCB->SyncPeriod = (UCHAR)wval1;
+re_prog:
+ bval = pDCB->SyncPeriod;
+ outb(bval, ioport+Sync_Period);
+ bval = pDCB->SyncOffset;
+ outb(bval, ioport+Sync_Offset);
+ bval = pDCB->CtrlR3;
+ outb(bval, ioport+CtrlReg3);
+ bval = pDCB->CtrlR4;
+ outb(bval, ioport+CtrlReg4);
+ SetXferRate( pACB, pDCB);
+ }
+ }
+ }
+min6:
+ *psstatus = SCSI_NOP0;
+ bval = MSG_ACCEPTED_CMD;
+ outb(bval, ioport+ScsiCmd);
+}
+
+static void
+DataIO_Comm( PACB pACB, PSRB pSRB, UCHAR ioDir)
+{
+ PSGL psgl;
+ UCHAR bval;
+ USHORT ioport;
+ ULONG lval;
+
+
+ ioport = pACB->IOPortBase;
+ if( pSRB->SGIndex < pSRB->SGcount )
+ {
+ bval = DMA_IDLE_CMD | ioDir; /* ;+EN_DMA_INT */
+ outb( bval, ioport+DMA_Cmd);
+ if( !pSRB->SGToBeXferLen )
+ {
+ psgl = pSRB->pSegmentList;
+#ifndef VERSION_ELF_1_2_13
+ pSRB->SGPhysAddr = virt_to_phys( psgl->address );
+#else
+ pSRB->SGPhysAddr = (ULONG) psgl->address;
+#endif
+ pSRB->SGToBeXferLen = (ULONG) psgl->length;
+ }
+ lval = pSRB->SGToBeXferLen;
+ bval = (UCHAR) lval;
+ outb(bval,ioport+CtcReg_Low);
+ lval = lval >> 8;
+ bval = (UCHAR) lval;
+ outb(bval,ioport+CtcReg_Mid);
+ lval = lval >> 8;
+ bval = (UCHAR) lval;
+ outb(bval,ioport+CtcReg_High);
+
+ lval = pSRB->SGToBeXferLen;
+ outl(lval, ioport+DMA_XferCnt);
+
+ lval = pSRB->SGPhysAddr;
+ outl( lval, ioport+DMA_XferAddr);
+
+ bval = DMA_COMMAND+INFO_XFER_CMD;
+ outb(bval, ioport+ScsiCmd);
+
+ pSRB->SRBState = SRB_DATA_XFER;
+
+ bval = DMA_IDLE_CMD | ioDir; /* ;+EN_DMA_INT */
+ outb(bval, ioport+DMA_Cmd);
+
+ bval = DMA_START_CMD | ioDir; /* ;+EN_DMA_INT */
+ outb(bval, ioport+DMA_Cmd);
+ }
+ else /* xfer pad */
+ {
+ if( pSRB->SGcount )
+ {
+ pSRB->AdaptStatus = H_OVER_UNDER_RUN;
+ pSRB->SRBStatus |= OVER_RUN;
+ }
+ bval = 0;
+ outb(bval,ioport+CtcReg_Low);
+ outb(bval,ioport+CtcReg_Mid);
+ outb(bval,ioport+CtcReg_High);
+
+ pSRB->SRBState |= SRB_XFERPAD;
+ bval = DMA_COMMAND+XFER_PAD_BYTE;
+ outb(bval, ioport+ScsiCmd);
+/*
+ bval = DMA_IDLE_CMD | ioDir; ;+EN_DMA_INT
+ outb(bval, ioport+DMA_Cmd);
+ bval = DMA_START_CMD | ioDir; ;+EN_DMA_INT
+ outb(bval, ioport+DMA_Cmd);
+*/
+ }
+}
+
+
+static void
+DC390_DataOutPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR ioDir;
+
+ ioDir = WRITE_DIRECTION;
+ DataIO_Comm( pACB, pSRB, ioDir);
+}
+
+static void
+DC390_DataInPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR ioDir;
+
+ ioDir = READ_DIRECTION;
+ DataIO_Comm( pACB, pSRB, ioDir);
+}
+
+static void
+DC390_CommandPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ PDCB pDCB;
+ UCHAR bval;
+ PUCHAR ptr;
+ USHORT ioport, i, cnt;
+
+
+ ioport = pACB->IOPortBase;
+ bval = RESET_ATN_CMD;
+ outb(bval, ioport+ScsiCmd);
+ bval = CLEAR_FIFO_CMD;
+ outb(bval, ioport+ScsiCmd);
+ if( !(pSRB->SRBFlag & AUTO_REQSENSE) )
+ {
+ cnt = (USHORT) pSRB->ScsiCmdLen;
+ ptr = (PUCHAR) pSRB->CmdBlock;
+ for(i=0; i < cnt; i++)
+ {
+ outb(*ptr, ioport+ScsiFifo);
+ ptr++;
+ }
+ }
+ else
+ {
+ bval = REQUEST_SENSE;
+ outb(bval, ioport+ScsiFifo);
+ pDCB = pACB->pActiveDCB;
+ bval = pDCB->IdentifyMsg << 5;
+ outb(bval, ioport+ScsiFifo);
+ bval = 0;
+ outb(bval, ioport+ScsiFifo);
+ outb(bval, ioport+ScsiFifo);
+ bval = sizeof(pSRB->pcmd->sense_buffer);
+ outb(bval, ioport+ScsiFifo);
+ bval = 0;
+ outb(bval, ioport+ScsiFifo);
+ }
+ pSRB->SRBState = SRB_COMMAND;
+ bval = INFO_XFER_CMD;
+ outb(bval, ioport+ScsiCmd);
+}
+
+static void
+DC390_StatusPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR bval;
+ USHORT ioport;
+
+ ioport = pACB->IOPortBase;
+ bval = CLEAR_FIFO_CMD;
+ outb(bval, ioport+ScsiCmd);
+ pSRB->SRBState = SRB_STATUS;
+ bval = INITIATOR_CMD_CMPLTE;
+ outb(bval, ioport+ScsiCmd);
+}
+
+static void
+DC390_MsgOutPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR bval;
+ USHORT ioport, i, cnt;
+ PUCHAR ptr;
+ PDCB pDCB;
+
+ ioport = pACB->IOPortBase;
+ bval = CLEAR_FIFO_CMD;
+ outb(bval, ioport+ScsiCmd);
+ pDCB = pACB->pActiveDCB;
+ if( !(pSRB->SRBState & SRB_MSGOUT) )
+ {
+ cnt = pSRB->MsgCnt;
+ if( cnt )
+ {
+ ptr = (PUCHAR) pSRB->MsgOutBuf;
+ for(i=0; i < cnt; i++)
+ {
+ outb(*ptr, ioport+ScsiFifo);
+ ptr++;
+ }
+ pSRB->MsgCnt = 0;
+ if( (pDCB->DCBFlag & ABORT_DEV_) &&
+ (pSRB->MsgOutBuf[0] == MSG_ABORT) )
+ pSRB->SRBState = SRB_ABORT_SENT;
+ }
+ else
+ {
+ bval = MSG_ABORT; /* ??? MSG_NOP */
+ if( (pSRB->CmdBlock[0] == INQUIRY ) ||
+ (pSRB->CmdBlock[0] == REQUEST_SENSE) ||
+ (pSRB->SRBFlag & AUTO_REQSENSE) )
+ {
+ if( pDCB->SyncMode & SYNC_ENABLE )
+ goto mop1;
+ }
+ outb(bval, ioport+ScsiFifo);
+ }
+ bval = INFO_XFER_CMD;
+ outb( bval, ioport+ScsiCmd);
+ }
+ else
+ {
+mop1:
+ bval = MSG_EXTENDED;
+ outb(bval, ioport+ScsiFifo);
+ bval = 3; /* ;length of extended msg */
+ outb(bval, ioport+ScsiFifo);
+ bval = 1; /* ; sync nego */
+ outb(bval, ioport+ScsiFifo);
+ bval = pDCB->NegoPeriod;
+ outb(bval, ioport+ScsiFifo);
+ bval = SYNC_NEGO_OFFSET;
+ outb(bval, ioport+ScsiFifo);
+ pSRB->SRBState |= DO_SYNC_NEGO;
+ bval = INFO_XFER_CMD;
+ outb(bval, ioport+ScsiCmd);
+ }
+}
+
+static void
+DC390_MsgInPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR bval;
+ USHORT ioport;
+
+ ioport = pACB->IOPortBase;
+ bval = CLEAR_FIFO_CMD;
+ outb(bval, ioport+ScsiCmd);
+ if( !(pSRB->SRBState & SRB_MSGIN) )
+ {
+ pSRB->SRBState &= SRB_DISCONNECT;
+ pSRB->SRBState |= SRB_MSGIN;
+ }
+ bval = INFO_XFER_CMD;
+ outb(bval, ioport+ScsiCmd);
+}
+
+static void
+DC390_Nop_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+}
+
+static void
+DC390_Nop_1( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+}
+
+
+static void
+SetXferRate( PACB pACB, PDCB pDCB )
+{
+ UCHAR bval;
+ USHORT cnt, i;
+ PDCB ptr;
+
+ if( !(pDCB->IdentifyMsg & 0x07) )
+ {
+ if( pACB->scan_devices )
+ {
+ CurrSyncOffset = pDCB->SyncOffset;
+ }
+ else
+ {
+ ptr = pACB->pLinkDCB;
+ cnt = pACB->DeviceCnt;
+ bval = pDCB->UnitSCSIID;
+ for(i=0; i<cnt; i++)
+ {
+ if( ptr->UnitSCSIID == bval )
+ {
+ ptr->SyncPeriod = pDCB->SyncPeriod;
+ ptr->SyncOffset = pDCB->SyncOffset;
+ ptr->CtrlR3 = pDCB->CtrlR3;
+ ptr->CtrlR4 = pDCB->CtrlR4;
+ ptr->SyncMode = pDCB->SyncMode;
+ }
+ ptr = ptr->pNextDCB;
+ }
+ }
+ }
+ return;
+}
+
+
+static void
+DC390_Disconnect( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB, psrb;
+ ULONG flags;
+ USHORT ioport, i, cnt;
+ UCHAR bval;
+
+#ifdef DC390_DEBUG0
+ printk("DISC,");
+#endif
+
+ save_flags(flags);
+ cli();
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ if (!pDCB)
+ {
+#ifdef DC390_DEBUG0
+ printk("ACB:%08lx->ActiveDCB:%08lx !,",(ULONG)pACB,(ULONG)pDCB);
+#endif
+ restore_flags(flags); return;
+ }
+ pSRB = pDCB->pActiveSRB;
+ pACB->pActiveDCB = 0;
+ pSRB->ScsiPhase = SCSI_NOP0;
+ bval = EN_SEL_RESEL;
+ outb(bval, ioport+ScsiCmd);
+ if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
+ {
+ pSRB->SRBState = 0;
+ DoWaitingSRB( pACB );
+ }
+ else if( pSRB->SRBState & SRB_ABORT_SENT )
+ {
+ pDCB->TagMask = 0;
+ pDCB->DCBFlag = 0;
+ cnt = pDCB->GoingSRBCnt;
+ pDCB->GoingSRBCnt = 0;
+ pSRB = pDCB->pGoingSRB;
+ for( i=0; i < cnt; i++)
+ {
+ psrb = pSRB->pNextSRB;
+ pSRB->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB;
+ pSRB = psrb;
+ }
+ pDCB->pGoingSRB = 0;
+ DoWaitingSRB( pACB );
+ }
+ else
+ {
+ if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) ||
+ !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) )
+ { /* Selection time out */
+ if( !(pACB->scan_devices) )
+ {
+ pSRB->SRBState = SRB_READY;
+ RewaitSRB( pDCB, pSRB);
+ }
+ else
+ {
+ pSRB->TargetStatus = SCSI_STAT_SEL_TIMEOUT;
+ goto disc1;
+ }
+ }
+ else if( pSRB->SRBState & SRB_DISCONNECT )
+ {
+ DoWaitingSRB( pACB );
+ }
+ else if( pSRB->SRBState & SRB_COMPLETED )
+ {
+disc1:
+ if(pDCB->MaxCommand > 1)
+ {
+ bval = pSRB->TagNumber;
+ pDCB->TagMask &= (~(1 << bval)); /* free tag mask */
+ }
+ pDCB->pActiveSRB = 0;
+ pSRB->SRBState = SRB_FREE;
+ SRBdone( pACB, pDCB, pSRB);
+ }
+ }
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+DC390_Reselect( PACB pACB )
+{
+ PDCB pDCB, pdcb;
+ PSRB pSRB;
+ USHORT ioport, wval;
+ UCHAR bval, bval1;
+
+
+#ifdef DC390_DEBUG0
+ printk("RSEL,");
+#endif
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ if( pDCB )
+ { /* Arbitration lost but Reselection win */
+ pSRB = pDCB->pActiveSRB;
+ if( !( pACB->scan_devices ) )
+ {
+ pSRB->SRBState = SRB_READY;
+ RewaitSRB( pDCB, pSRB);
+ }
+ }
+ bval = inb(ioport+ScsiFifo); /* get ID */
+ bval = bval ^ pACB->HostID_Bit;
+ wval = 0;
+ bval1 = 1;
+ for(;;)
+ {
+ if( !(bval & bval1) )
+ {
+ bval1 = bval1 << 1;
+ wval++;
+ }
+ else
+ break;
+ }
+ wval |= ( (USHORT) inb(ioport+ScsiFifo) & 7) << 8; /* get LUN */
+ pDCB = pACB->pLinkDCB;
+ pdcb = pDCB;
+ while( wval != *((PUSHORT) &pDCB->UnitSCSIID) )
+ {
+ pDCB = pDCB->pNextDCB;
+ if( pDCB == pdcb )
+ return;
+ }
+ pACB->pActiveDCB = pDCB;
+ if( pDCB->SyncMode & EN_TAG_QUEUING )
+ {
+ pSRB = pACB->pTmpSRB;
+ pDCB->pActiveSRB = pSRB;
+ }
+ else
+ {
+ pSRB = pDCB->pActiveSRB;
+ if( !pSRB || !(pSRB->SRBState & SRB_DISCONNECT) )
+ {
+ pSRB= pACB->pTmpSRB;
+ pSRB->SRBState = SRB_UNEXPECT_RESEL;
+ pDCB->pActiveSRB = pSRB;
+ EnableMsgOut( pACB, pSRB );
+ }
+ else
+ {
+ if( pDCB->DCBFlag & ABORT_DEV_ )
+ {
+ pSRB->SRBState = SRB_ABORT_SENT;
+ EnableMsgOut( pACB, pSRB );
+ }
+ else
+ pSRB->SRBState = SRB_DATA_XFER;
+ }
+ }
+ pSRB->ScsiPhase = SCSI_NOP0;
+ bval = pDCB->UnitSCSIID;
+ outb( bval, ioport+Scsi_Dest_ID);
+ bval = pDCB->SyncPeriod;
+ outb(bval, ioport+Sync_Period);
+ bval = pDCB->SyncOffset;
+ outb( bval, ioport+Sync_Offset);
+ bval = pDCB->CtrlR1;
+ outb(bval, ioport+CtrlReg1);
+ bval = pDCB->CtrlR3;
+ outb(bval, ioport+CtrlReg3);
+ bval = pDCB->CtrlR4; /* ; Glitch eater */
+ outb(bval, ioport+CtrlReg4);
+ bval = MSG_ACCEPTED_CMD; /* ;to rls the /ACK signal */
+ outb(bval, ioport+ScsiCmd);
+}
+
+
+static void
+SRBdone( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ PSRB psrb;
+ UCHAR bval, bval1, i, j, status;
+ PSCSICMD pcmd;
+ PSCSI_INQDATA ptr;
+ USHORT disable_tag;
+ ULONG flags;
+ PSGL ptr2;
+ ULONG swlval;
+
+ pcmd = pSRB->pcmd;
+ status = pSRB->TargetStatus;
+ if(pSRB->SRBFlag & AUTO_REQSENSE)
+ {
+ pSRB->SRBFlag &= ~AUTO_REQSENSE;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = SCSI_STAT_CHECKCOND;
+ if(status == SCSI_STAT_CHECKCOND)
+ {
+ pcmd->result = DID_BAD_TARGET << 16;
+ goto ckc_e;
+ }
+ if(pSRB->RetryCnt == 0)
+ {
+ *((PULONG) &(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
+ pSRB->TotalXferredLen = pSRB->Segment1[1];
+ if( (pSRB->TotalXferredLen) &&
+ (pSRB->TotalXferredLen >= pcmd->underflow) )
+ pcmd->result |= (DID_OK << 16);
+ else
+ pcmd->result = (DRIVER_SENSE << 24) | (DRIVER_OK << 16) |
+ SCSI_STAT_CHECKCOND;
+#ifdef DC390_DEBUG0
+ printk("Cmd=%2x,Result=%8x,XferL=%8x,",pSRB->CmdBlock[0],
+ (UINT) pcmd->result, (UINT) pSRB->TotalXferredLen);
+#endif
+ goto ckc_e;
+ }
+ else
+ {
+ pSRB->RetryCnt--;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ *((PULONG) &(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
+ *((PULONG) &(pSRB->CmdBlock[4])) = pSRB->Segment0[1];
+ if( pSRB->CmdBlock[0] == TEST_UNIT_READY )
+ {
+ pcmd->result = (DRIVER_SENSE << 24) | (DRIVER_OK << 16) |
+ SCSI_STAT_CHECKCOND;
+ goto ckc_e;
+ }
+ pcmd->result |= (DRIVER_SENSE << 24);
+ pSRB->SGcount = (UCHAR) pSRB->Segment1[0];
+ pSRB->ScsiCmdLen = (UCHAR) (pSRB->Segment1[0] >> 8);
+ pSRB->SGIndex = 0;
+ pSRB->TotalXferredLen = 0;
+ pSRB->SGToBeXferLen = 0;
+ if( pcmd->use_sg )
+ pSRB->pSegmentList = (PSGL) pcmd->request_buffer;
+ else if( pcmd->request_buffer )
+ {
+ pSRB->pSegmentList = (PSGL) &pSRB->Segmentx;
+ pSRB->Segmentx.address = (PUCHAR) pcmd->request_buffer;
+ pSRB->Segmentx.length = pcmd->request_bufflen;
+ }
+ if( DC390_StartSCSI( pACB, pDCB, pSRB ) )
+ RewaitSRB( pDCB, pSRB );
+ return;
+ }
+ }
+ if( status )
+ {
+ if( status == SCSI_STAT_CHECKCOND)
+ {
+ if( (pSRB->SGIndex < pSRB->SGcount) && (pSRB->SGcount) && (pSRB->SGToBeXferLen) )
+ {
+ bval = pSRB->SGcount;
+ swlval = 0;
+ ptr2 = pSRB->pSegmentList;
+ for( i=pSRB->SGIndex; i < bval; i++)
+ {
+ swlval += ptr2->length;
+ ptr2++;
+ }
+#ifdef DC390_DEBUG0
+ printk("XferredLen=%8x,NotXferLen=%8x,",
+ (UINT) pSRB->TotalXferredLen, (UINT) swlval);
+#endif
+ }
+ RequestSense( pACB, pDCB, pSRB );
+ return;
+ }
+ else if( status == SCSI_STAT_QUEUEFULL )
+ {
+ bval = (UCHAR) pDCB->GoingSRBCnt;
+ bval--;
+ pDCB->MaxCommand = bval;
+ RewaitSRB( pDCB, pSRB );
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ return;
+ }
+ else if(status == SCSI_STAT_SEL_TIMEOUT)
+ {
+ pSRB->AdaptStatus = H_SEL_TIMEOUT;
+ pSRB->TargetStatus = 0;
+ pcmd->result = DID_BAD_TARGET << 16;
+ }
+ else
+ {
+ pSRB->AdaptStatus = 0;
+ if( pSRB->RetryCnt )
+ {
+ pSRB->RetryCnt--;
+ pSRB->TargetStatus = 0;
+ pSRB->SGIndex = 0;
+ pSRB->TotalXferredLen = 0;
+ pSRB->SGToBeXferLen = 0;
+ if( pcmd->use_sg )
+ pSRB->pSegmentList = (PSGL) pcmd->request_buffer;
+ else if( pcmd->request_buffer )
+ {
+ pSRB->pSegmentList = (PSGL) &pSRB->Segmentx;
+ pSRB->Segmentx.address = (PUCHAR) pcmd->request_buffer;
+ pSRB->Segmentx.length = pcmd->request_bufflen;
+ }
+ if( DC390_StartSCSI( pACB, pDCB, pSRB ) )
+ RewaitSRB( pDCB, pSRB );
+ return;
+ }
+ else
+ {
+ pcmd->result |= (DID_ERROR << 16) | (ULONG) (pSRB->EndMessage << 8) |
+ (ULONG) status;
+ }
+ }
+ }
+ else
+ {
+ status = pSRB->AdaptStatus;
+ if(status & H_OVER_UNDER_RUN)
+ {
+ pSRB->TargetStatus = 0;
+ pcmd->result |= (DID_OK << 16) | (pSRB->EndMessage << 8);
+ }
+ else if( pSRB->SRBStatus & PARITY_ERROR)
+ {
+ pcmd->result |= (DID_PARITY << 16) | (pSRB->EndMessage << 8);
+ }
+ else /* No error */
+ {
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ pcmd->result |= (DID_OK << 16);
+ }
+ }
+
+ckc_e:
+ if( pACB->scan_devices )
+ {
+ if( pSRB->CmdBlock[0] == TEST_UNIT_READY )
+ {
+ if(pcmd->result != (DID_OK << 16))
+ {
+ if( pcmd->result & SCSI_STAT_CHECKCOND )
+ {
+ goto RTN_OK;
+ }
+ else
+ {
+ pACB->DCBmap[pcmd->target] &= ~(1 << pcmd->lun);
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ if( (pcmd->target == pACB->max_id) &&
+ ((pcmd->lun == 0) || (pcmd->lun == pACB->max_lun)) )
+ {
+ pACB->scan_devices = 0;
+ }
+ }
+ }
+ else
+ {
+RTN_OK:
+ pPrevDCB->pNextDCB = pDCB;
+ pDCB->pNextDCB = pACB->pLinkDCB;
+ if( (pcmd->target == pACB->max_id) && (pcmd->lun == pACB->max_lun) )
+ pACB->scan_devices = END_SCAN;
+ }
+ }
+ else if( pSRB->CmdBlock[0] == INQUIRY )
+ {
+ if( (pcmd->target == pACB->max_id) &&
+ (pcmd->lun == pACB->max_lun) )
+ {
+ pACB->scan_devices = 0;
+ }
+ ptr = (PSCSI_INQDATA) (pcmd->request_buffer);
+ if( pcmd->use_sg )
+ ptr = (PSCSI_INQDATA) (((PSGL) ptr)->address);
+ bval1 = ptr->DevType & SCSI_DEVTYPE;
+ if(bval1 == SCSI_NODEV)
+ {
+ pACB->DCBmap[pcmd->target] &= ~(1 << pcmd->lun);
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ }
+ else
+ {
+ pACB->DeviceCnt++;
+ pPrevDCB = pDCB;
+ pACB->pDCB_free = (PDCB) ((ULONG) (pACB->pDCB_free) + sizeof( DC390_DCB ));
+ pDCB->DevType = bval1;
+ if(bval1 == TYPE_DISK || bval1 == TYPE_MOD)
+ {
+ if( (((ptr->Vers & 0x07) >= 2) || ((ptr->RDF & 0x0F) == 2)) &&
+ (ptr->Flags & SCSI_INQ_CMDQUEUE) &&
+ (pDCB->DevMode & TAG_QUEUING_) &&
+ (pDCB->DevMode & EN_DISCONNECT_) )
+ {
+ disable_tag = 0;
+ for(i=0; i<BADDEVCNT; i++)
+ {
+ for(j=0; j<28; j++)
+ {
+ if( ((PUCHAR)ptr)[8+j] != baddevname1[i][j])
+ break;
+ }
+ if(j == 28)
+ {
+ disable_tag = 1;
+ break;
+ }
+ }
+
+ if( !disable_tag )
+ {
+ pDCB->MaxCommand = pACB->TagMaxNum;
+ pDCB->SyncMode |= EN_TAG_QUEUING;
+ pDCB->TagMask = 0;
+ }
+ else
+ {
+ pDCB->SyncMode |= EN_ATN_STOP;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ save_flags( flags );
+ cli();
+/* ReleaseSRB( pDCB, pSRB ); */
+
+ if(pSRB == pDCB->pGoingSRB )
+ {
+ pDCB->pGoingSRB = pSRB->pNextSRB;
+ }
+ else
+ {
+ psrb = pDCB->pGoingSRB;
+ while( psrb->pNextSRB != pSRB )
+ psrb = psrb->pNextSRB;
+ psrb->pNextSRB = pSRB->pNextSRB;
+ if( pSRB == pDCB->pGoingLast )
+ pDCB->pGoingLast = psrb;
+ }
+ pSRB->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB;
+ pDCB->GoingSRBCnt--;
+
+ DoWaitingSRB( pACB );
+ restore_flags(flags);
+
+/* Notify cmd done */
+ pcmd->scsi_done( pcmd );
+
+ if( pDCB->QIORBCnt )
+ DoNextCmd( pACB, pDCB );
+ return;
+}
+
+
+static void
+DoingSRB_Done( PACB pACB )
+{
+ PDCB pDCB, pdcb;
+ PSRB psrb, psrb2;
+ USHORT cnt, i;
+ PSCSICMD pcmd;
+
+ pDCB = pACB->pLinkDCB;
+ pdcb = pDCB;
+ do
+ {
+ cnt = pdcb->GoingSRBCnt;
+ psrb = pdcb->pGoingSRB;
+ for( i=0; i<cnt; i++)
+ {
+ psrb2 = psrb->pNextSRB;
+ pcmd = psrb->pcmd;
+ pcmd->result = DID_RESET << 16;
+
+/* ReleaseSRB( pDCB, pSRB ); */
+
+ psrb->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = psrb;
+
+ pcmd->scsi_done( pcmd );
+ psrb = psrb2;
+ }
+ pdcb->GoingSRBCnt = 0;;
+ pdcb->pGoingSRB = NULL;
+ pdcb->TagMask = 0;
+ pdcb = pdcb->pNextDCB;
+ }
+ while( pdcb != pDCB );
+}
+
+
+static void
+DC390_ResetSCSIBus( PACB pACB )
+{
+ USHORT ioport;
+ UCHAR bval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ pACB->ACBFlag |= RESET_DEV;
+ ioport = pACB->IOPortBase;
+
+ bval = DMA_IDLE_CMD;
+ outb(bval,ioport+DMA_Cmd);
+
+ bval = RST_SCSI_BUS_CMD;
+ outb(bval,ioport+ScsiCmd);
+
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+DC390_ScsiRstDetect( PACB pACB )
+{
+ ULONG wlval, flags;
+ USHORT ioport;
+ UCHAR bval;
+
+#ifdef DC390_DEBUG0
+ printk("RST_DETEC");
+#endif
+ save_flags(flags);
+ sti();
+ wlval = jiffies + HZ;
+ while( jiffies < wlval ); /* delay 1 sec */
+
+ cli();
+ ioport = pACB->IOPortBase;
+ bval = DMA_IDLE_CMD;
+ outb(bval,ioport+DMA_Cmd);
+ bval = CLEAR_FIFO_CMD;
+ outb(bval,ioport+ScsiCmd);
+
+ if( pACB->ACBFlag & RESET_DEV )
+ pACB->ACBFlag |= RESET_DONE;
+ else
+ {
+ pACB->ACBFlag |= RESET_DETECT;
+
+ ResetDevParam( pACB );
+/* DoingSRB_Done( pACB ); ???? */
+ RecoverSRB( pACB );
+ pACB->pActiveDCB = NULL;
+ pACB->ACBFlag = 0;
+ DoWaitingSRB( pACB );
+ }
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+RequestSense( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ PSCSICMD pcmd;
+
+ pSRB->SRBFlag |= AUTO_REQSENSE;
+ pSRB->Segment0[0] = *((PULONG) &(pSRB->CmdBlock[0]));
+ pSRB->Segment0[1] = *((PULONG) &(pSRB->CmdBlock[4]));
+ pSRB->Segment1[0] = (ULONG) ((pSRB->ScsiCmdLen << 8) + pSRB->SGcount);
+ pSRB->Segment1[1] = pSRB->TotalXferredLen;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+
+ pcmd = pSRB->pcmd;
+
+ pSRB->Segmentx.address = (PUCHAR) &(pcmd->sense_buffer);
+ pSRB->Segmentx.length = sizeof(pcmd->sense_buffer);
+ pSRB->pSegmentList = &pSRB->Segmentx;
+ pSRB->SGcount = 1;
+ pSRB->SGIndex = 0;
+
+ *((PULONG) &(pSRB->CmdBlock[0])) = 0x00000003;
+ pSRB->CmdBlock[1] = pDCB->IdentifyMsg << 5;
+ *((PUSHORT) &(pSRB->CmdBlock[4])) = sizeof(pcmd->sense_buffer);
+ pSRB->ScsiCmdLen = 6;
+
+ pSRB->TotalXferredLen = 0;
+ pSRB->SGToBeXferLen = 0;
+ if( DC390_StartSCSI( pACB, pDCB, pSRB ) )
+ RewaitSRB( pDCB, pSRB );
+}
+
+
+static void
+EnableMsgOut2( PACB pACB, PSRB pSRB )
+{
+ USHORT ioport;
+ UCHAR bval;
+
+ ioport = pACB->IOPortBase;
+ pSRB->MsgCnt = 1;
+ bval = SET_ATN_CMD;
+ outb(bval, ioport+ScsiCmd);
+}
+
+
+static void
+EnableMsgOut( PACB pACB, PSRB pSRB )
+{
+ pSRB->MsgOutBuf[0] = MSG_ABORT;
+ EnableMsgOut2( pACB, pSRB );
+}
+
+
+static void
+DC390_InvalidCmd( PACB pACB )
+{
+ UCHAR bval;
+ USHORT ioport;
+ PSRB pSRB;
+
+ pSRB = pACB->pActiveDCB->pActiveSRB;
+ if( pSRB->SRBState & (SRB_START_+SRB_MSGOUT) )
+ {
+ ioport = pACB->IOPortBase;
+ bval = CLEAR_FIFO_CMD;
+ outb(bval,(ioport+ScsiCmd));
+ }
+}
+
diff --git a/linux/src/drivers/scsi/sd.c b/linux/src/drivers/scsi/sd.c
new file mode 100644
index 0000000..eab64dd
--- /dev/null
+++ b/linux/src/drivers/scsi/sd.c
@@ -0,0 +1,1691 @@
+/*
+ * sd.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * Linux scsi disk driver
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale ericy@cais.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Modified by Eric Youngdale eric@aib.com to support loadable
+ * low-level scsi drivers.
+ */
+
+#include <linux/module.h>
+#ifdef MODULE
+/*
+ * This is a variable in scsi.c that is set when we are processing something
+ * after boot time. By definition, this is true when we are a loadable module
+ * ourselves.
+ */
+#define MODULE_FLAG 1
+#else
+#define MODULE_FLAG scsi_loadable_module_flag
+#endif /* MODULE */
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+
+#include <asm/system.h>
+
+#define MAJOR_NR SCSI_DISK_MAJOR
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <scsi/scsi_ioctl.h>
+#include "constants.h"
+
+#include <linux/genhd.h>
+
+/*
+ * static const char RCSid[] = "$Header:";
+ */
+
+#define MAX_RETRIES 5
+
+/*
+ * Time out in seconds for disks and Magneto-opticals (which are slower).
+ */
+
+#define SD_TIMEOUT (20 * HZ)
+#define SD_MOD_TIMEOUT (25 * HZ)
+
+#define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
+ SC->device->type != TYPE_MOD)
+
+struct hd_struct * sd;
+
+Scsi_Disk * rscsi_disks = NULL;
+static int * sd_sizes;
+static int * sd_blocksizes;
+static int * sd_hardsizes; /* Hardware sector size */
+
+extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+static int check_scsidisk_media_change(kdev_t);
+static int fop_revalidate_scsidisk(kdev_t);
+
+static int sd_init_onedisk(int);
+
+static void requeue_sd_request (Scsi_Cmnd * SCpnt);
+
+static int sd_init(void);
+static void sd_finish(void);
+static int sd_attach(Scsi_Device *);
+static int sd_detect(Scsi_Device *);
+static void sd_detach(Scsi_Device *);
+
+struct Scsi_Device_Template sd_template =
+{ NULL, "disk", "sd", NULL, TYPE_DISK,
+ SCSI_DISK_MAJOR, 0, 0, 0, 1,
+ sd_detect, sd_init,
+ sd_finish, sd_attach, sd_detach
+};
+
+static int sd_open(struct inode * inode, struct file * filp)
+{
+ int target;
+ target = DEVICE_NR(inode->i_rdev);
+
+ if(target >= sd_template.dev_max || !rscsi_disks[target].device)
+ return -ENXIO; /* No such device */
+
+ /*
+ * Make sure that only one process can do a check_change_disk at one time.
+ * This is also used to lock out further access when the partition table
+ * is being re-read.
+ */
+
+ while (rscsi_disks[target].device->busy)
+ barrier();
+ if(rscsi_disks[target].device->removable) {
+ check_disk_change(inode->i_rdev);
+
+ /*
+ * If the drive is empty, just let the open fail.
+ */
+ if ( !rscsi_disks[target].ready )
+ return -ENXIO;
+
+ /*
+ * Similarly, if the device has the write protect tab set,
+ * have the open fail if the user expects to be able to write
+ * to the thing.
+ */
+ if ( (rscsi_disks[target].write_prot) && (filp->f_mode & 2) )
+ return -EROFS;
+ }
+
+ /*
+ * See if we are requesting a non-existent partition. Do this
+ * after checking for disk change.
+ */
+ if(sd_sizes[MINOR(inode->i_rdev)] == 0)
+ return -ENXIO;
+
+ if(rscsi_disks[target].device->removable)
+ if(!rscsi_disks[target].device->access_count)
+ sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
+
+ rscsi_disks[target].device->access_count++;
+ if (rscsi_disks[target].device->host->hostt->usage_count)
+ (*rscsi_disks[target].device->host->hostt->usage_count)++;
+ if(sd_template.usage_count) (*sd_template.usage_count)++;
+ return 0;
+}
+
+static void sd_release(struct inode * inode, struct file * file)
+{
+ int target;
+ fsync_dev(inode->i_rdev);
+
+ target = DEVICE_NR(inode->i_rdev);
+
+ rscsi_disks[target].device->access_count--;
+ if (rscsi_disks[target].device->host->hostt->usage_count)
+ (*rscsi_disks[target].device->host->hostt->usage_count)--;
+ if(sd_template.usage_count) (*sd_template.usage_count)--;
+
+ if(rscsi_disks[target].device->removable) {
+ if(!rscsi_disks[target].device->access_count)
+ sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
+ }
+}
+
+static void sd_geninit(struct gendisk *);
+
+static struct file_operations sd_fops = {
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ sd_ioctl, /* ioctl */
+ NULL, /* mmap */
+ sd_open, /* open code */
+ sd_release, /* release */
+ block_fsync, /* fsync */
+ NULL, /* fasync */
+ check_scsidisk_media_change, /* Disk change */
+ fop_revalidate_scsidisk /* revalidate */
+};
+
+static struct gendisk sd_gendisk = {
+ MAJOR_NR, /* Major number */
+ "sd", /* Major name */
+ 4, /* Bits to shift to get real from partition */
+ 1 << 4, /* Number of partitions per real */
+ 0, /* maximum number of real */
+ sd_geninit, /* init function */
+ NULL, /* hd struct */
+ NULL, /* block sizes */
+ 0, /* number */
+ NULL, /* internal */
+ NULL /* next */
+};
+
+static void sd_geninit (struct gendisk *ignored)
+{
+ int i;
+
+ for (i = 0; i < sd_template.dev_max; ++i)
+ if(rscsi_disks[i].device)
+ sd[i << 4].nr_sects = rscsi_disks[i].capacity;
+#if 0
+ /* No longer needed - we keep track of this as we attach/detach */
+ sd_gendisk.nr_real = sd_template.dev_max;
+#endif
+}
+
+/*
+ * rw_intr is the interrupt routine for the device driver.
+ * It will be notified on the end of a SCSI read / write, and
+ * will take one of several actions based on success or failure.
+ */
+
+static void rw_intr (Scsi_Cmnd *SCpnt)
+{
+ int result = SCpnt->result;
+ int this_count = SCpnt->bufflen >> 9;
+ int good_sectors = (result == 0 ? this_count : 0);
+ int block_sectors = 1;
+
+#ifdef DEBUG
+ printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev),
+ SCpnt->host->host_no, result);
+#endif
+
+ /*
+ Handle MEDIUM ERRORs that indicate partial success. Since this is a
+ relatively rare error condition, no care is taken to avoid unnecessary
+ additional work such as memcpy's that could be avoided.
+ */
+
+ if (driver_byte(result) != 0 && /* An error occurred */
+ SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
+ SCpnt->sense_buffer[2] == MEDIUM_ERROR)
+ {
+ long error_sector = (SCpnt->sense_buffer[3] << 24) |
+ (SCpnt->sense_buffer[4] << 16) |
+ (SCpnt->sense_buffer[5] << 8) |
+ SCpnt->sense_buffer[6];
+ int sector_size =
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].sector_size;
+ if (SCpnt->request.bh != NULL)
+ block_sectors = SCpnt->request.bh->b_size >> 9;
+ if (sector_size == 1024)
+ {
+ error_sector <<= 1;
+ if (block_sectors < 2) block_sectors = 2;
+ }
+ else if (sector_size == 256)
+ error_sector >>= 1;
+ error_sector -= sd[MINOR(SCpnt->request.rq_dev)].start_sect;
+ error_sector &= ~ (block_sectors - 1);
+ good_sectors = error_sector - SCpnt->request.sector;
+ if (good_sectors < 0 || good_sectors >= this_count)
+ good_sectors = 0;
+ }
+
+ /*
+ * Handle RECOVERED ERRORs that indicate success after recovery action
+ * by the target device.
+ */
+
+ if (SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
+ SCpnt->sense_buffer[2] == RECOVERED_ERROR)
+ {
+ printk("scsidisk recovered I/O error: dev %s, sector %lu, absolute sector %lu\n",
+ kdevname(SCpnt->request.rq_dev), SCpnt->request.sector,
+ SCpnt->request.sector + sd[MINOR(SCpnt->request.rq_dev)].start_sect);
+ good_sectors = this_count;
+ result = 0;
+ }
+
+ /*
+ * First case : we assume that the command succeeded. One of two things
+ * will happen here. Either we will be finished, or there will be more
+ * sectors that we were unable to read last time.
+ */
+
+ if (good_sectors > 0) {
+
+#ifdef DEBUG
+ printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
+ SCpnt->request.nr_sectors);
+ printk("use_sg is %d\n ",SCpnt->use_sg);
+#endif
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+#ifdef DEBUG
+ printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address,
+ sgpnt[i].length);
+#endif
+ if (sgpnt[i].alt_address) {
+ if (SCpnt->request.cmd == READ)
+ memcpy(sgpnt[i].alt_address, sgpnt[i].address,
+ sgpnt[i].length);
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ }
+ }
+
+ /* Free list of scatter-gather pointers */
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len);
+ } else {
+ if (SCpnt->buffer != SCpnt->request.buffer) {
+#ifdef DEBUG
+ printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+#endif
+ if (SCpnt->request.cmd == READ)
+ memcpy(SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+ scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ }
+ }
+ /*
+ * If multiple sectors are requested in one buffer, then
+ * they will have been finished off by the first command.
+ * If not, then we have a multi-buffer command.
+ */
+ if (SCpnt->request.nr_sectors > this_count)
+ {
+ SCpnt->request.errors = 0;
+
+ if (!SCpnt->request.bh)
+ {
+#ifdef DEBUG
+ printk("sd%c : handling page request, no buffer\n",
+ 'a' + MINOR(SCpnt->request.rq_dev));
+#endif
+ /*
+ * The SCpnt->request.nr_sectors field is always done in
+ * 512 byte sectors, even if this really isn't the case.
+ */
+ panic("sd.c: linked page request (%lx %x)",
+ SCpnt->request.sector, this_count);
+ }
+ }
+ SCpnt = end_scsi_request(SCpnt, 1, good_sectors);
+ if (result == 0)
+ {
+ requeue_sd_request(SCpnt);
+ return;
+ }
+ }
+
+ if (good_sectors == 0) {
+
+ /* Free up any indirection buffers we allocated for DMA purposes. */
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+#ifdef DEBUG
+ printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+#endif
+ if (sgpnt[i].alt_address) {
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ }
+ }
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
+ } else {
+#ifdef DEBUG
+ printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+#endif
+ if (SCpnt->buffer != SCpnt->request.buffer)
+ scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ }
+ }
+
+ /*
+ * Now, if we were good little boys and girls, Santa left us a request
+ * sense buffer. We can extract information from this, so we
+ * can choose a block to remap, etc.
+ */
+
+ if (driver_byte(result) != 0) {
+ if (suggestion(result) == SUGGEST_REMAP) {
+#ifdef REMAP
+ /*
+ * Not yet implemented. A read will fail after being remapped,
+ * a write will call the strategy routine again.
+ */
+ if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].remap
+ {
+ result = 0;
+ }
+ else
+#endif
+ }
+
+ if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
+ if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
+ if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
+ /* detected disc change. set a bit and quietly refuse
+ * further access.
+ */
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sd_request(SCpnt);
+ return;
+ }
+ else
+ {
+ /*
+ * Must have been a power glitch, or a bus reset.
+ * Could not have been a media change, so we just retry
+ * the request and see what happens.
+ */
+ requeue_sd_request(SCpnt);
+ return;
+ }
+ }
+ }
+
+
+ /* If we had an ILLEGAL REQUEST returned, then we may have
+ * performed an unsupported command. The only thing this should be
+ * would be a ten byte read where only a six byte read was supported.
+ * Also, on a system where READ CAPACITY failed, we have read past
+ * the end of the disk.
+ */
+
+ if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
+ if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
+ requeue_sd_request(SCpnt);
+ result = 0;
+ } else {
+ /* ???? */
+ }
+ }
+
+ if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
+ printk("scsi%d: MEDIUM ERROR on channel %d, id %d, lun %d, CDB: ",
+ SCpnt->host->host_no, (int) SCpnt->channel,
+ (int) SCpnt->target, (int) SCpnt->lun);
+ print_command(SCpnt->cmnd);
+ print_sense("sd", SCpnt);
+ SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
+ requeue_sd_request(SCpnt);
+ return;
+ }
+ } /* driver byte != 0 */
+ if (result) {
+ printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
+
+ if (driver_byte(result) & DRIVER_SENSE)
+ print_sense("sd", SCpnt);
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
+ requeue_sd_request(SCpnt);
+ return;
+ }
+}
+
+/*
+ * requeue_sd_request() is the request handler function for the sd driver.
+ * Its function in life is to take block device requests, and translate
+ * them to SCSI commands.
+ */
+
+static void do_sd_request (void)
+{
+ Scsi_Cmnd * SCpnt = NULL;
+ Scsi_Device * SDev;
+ struct request * req = NULL;
+ unsigned long flags;
+ int flag = 0;
+
+ save_flags(flags);
+ while (1==1){
+ cli();
+ if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
+ restore_flags(flags);
+ return;
+ }
+
+ INIT_SCSI_REQUEST;
+ SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;
+
+ /*
+ * I am not sure where the best place to do this is. We need
+ * to hook in a place where we are likely to come if in user
+ * space.
+ */
+ if( SDev->was_reset )
+ {
+ /*
+ * We need to relock the door, but we might
+ * be in an interrupt handler. Only do this
+ * from user space, since we do not want to
+ * sleep from an interrupt.
+ */
+ if( SDev->removable && !intr_count )
+ {
+ scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
+ /* scsi_ioctl may allow CURRENT to change, so start over. */
+ SDev->was_reset = 0;
+ continue;
+ }
+ SDev->was_reset = 0;
+ }
+
+ /* We have to be careful here. allocate_device will get a free pointer,
+ * but there is no guarantee that it is queueable. In normal usage,
+ * we want to call this, because other types of devices may have the
+ * host all tied up, and we want to make sure that we have at least
+ * one request pending for this type of device. We can also come
+ * through here while servicing an interrupt, because of the need to
+ * start another command. If we call allocate_device more than once,
+ * then the system can wedge if the command is not queueable. The
+ * request_queueable function is safe because it checks to make sure
+ * that the host is able to take another command before it returns
+ * a pointer.
+ */
+
+ if (flag++ == 0)
+ SCpnt = allocate_device(&CURRENT,
+ rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0);
+ else SCpnt = NULL;
+
+ /*
+ * The following restore_flags leads to latency problems. FIXME.
+ * Using a "sti()" gets rid of the latency problems but causes
+ * race conditions and crashes.
+ */
+ restore_flags(flags);
+
+ /* This is a performance enhancement. We dig down into the request
+ * list and try to find a queueable request (i.e. device not busy,
+ * and host able to accept another command. If we find one, then we
+ * queue it. This can make a big difference on systems with more than
+ * one disk drive. We want to have the interrupts off when monkeying
+ * with the request list, because otherwise the kernel might try to
+ * slip in a request in between somewhere.
+ */
+
+ if (!SCpnt && sd_template.nr_dev > 1){
+ struct request *req1;
+ req1 = NULL;
+ cli();
+ req = CURRENT;
+ while(req){
+ SCpnt = request_queueable(req,
+ rscsi_disks[DEVICE_NR(req->rq_dev)].device);
+ if(SCpnt) break;
+ req1 = req;
+ req = req->next;
+ }
+ if (SCpnt && req->rq_status == RQ_INACTIVE) {
+ if (req == CURRENT)
+ CURRENT = CURRENT->next;
+ else
+ req1->next = req->next;
+ }
+ restore_flags(flags);
+ }
+
+ if (!SCpnt) return; /* Could not find anything to do */
+
+ /* Queue command */
+ requeue_sd_request(SCpnt);
+ } /* While */
+}
+
+static void requeue_sd_request (Scsi_Cmnd * SCpnt)
+{
+ int dev, devm, block, this_count;
+ unsigned char cmd[12];
+ int bounce_size, contiguous;
+ int max_sg;
+ struct buffer_head * bh, *bhp;
+ char * buff, *bounce_buffer;
+
+ repeat:
+
+ if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
+ do_sd_request();
+ return;
+ }
+
+ devm = MINOR(SCpnt->request.rq_dev);
+ dev = DEVICE_NR(SCpnt->request.rq_dev);
+
+ block = SCpnt->request.sector;
+ this_count = 0;
+
+#ifdef DEBUG
+ printk("Doing sd request, dev = %d, block = %d\n", devm, block);
+#endif
+
+ if (devm >= (sd_template.dev_max << 4) ||
+ !rscsi_disks[dev].device ||
+ block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
+ {
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+
+ block += sd[devm].start_sect;
+
+ if (rscsi_disks[dev].device->changed)
+ {
+ /*
+ * quietly refuse to do anything to a changed disc until the changed
+ * bit has been reset
+ */
+ /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+
+#ifdef DEBUG
+ printk("sd%c : real dev = /dev/sd%c, block = %d\n",
+ 'a' + devm, dev, block);
+#endif
+
+ /*
+ * If we have a 1K hardware sectorsize, prevent access to single
+ * 512 byte sectors. In theory we could handle this - in fact
+ * the scsi cdrom driver must be able to handle this because
+ * we typically use 1K blocksizes, and cdroms typically have
+ * 2K hardware sectorsizes. Of course, things are simpler
+ * with the cdrom, since it is read-only. For performance
+ * reasons, the filesystems should be able to handle this
+ * and not force the scsi disk driver to use bounce buffers
+ * for this.
+ */
+ if (rscsi_disks[dev].sector_size == 1024)
+ if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
+ printk("sd.c:Bad block number requested");
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+
+ switch (SCpnt->request.cmd)
+ {
+ case WRITE :
+ if (!rscsi_disks[dev].device->writeable)
+ {
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+ cmd[0] = WRITE_6;
+ break;
+ case READ :
+ cmd[0] = READ_6;
+ break;
+ default :
+ panic ("Unknown sd command %d\n", SCpnt->request.cmd);
+ }
+
+ SCpnt->this_count = 0;
+
+ /* If the host adapter can deal with very large scatter-gather
+ * requests, it is a waste of time to cluster
+ */
+ contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
+ bounce_buffer = NULL;
+ bounce_size = (SCpnt->request.nr_sectors << 9);
+
+ /* First see if we need a bounce buffer for this request. If we do, make
+ * sure that we can allocate a buffer. Do not waste space by allocating
+ * a bounce buffer if we are straddling the 16Mb line
+ */
+ if (contiguous && SCpnt->request.bh &&
+ ((long) SCpnt->request.bh->b_data)
+ + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
+ && SCpnt->host->unchecked_isa_dma) {
+ if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
+ bounce_buffer = (char *) scsi_malloc(bounce_size);
+ if(!bounce_buffer) contiguous = 0;
+ }
+
+ if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
+ for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
+ bhp = bhp->b_reqnext) {
+ if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
+ if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
+ contiguous = 0;
+ break;
+ }
+ }
+ if (!SCpnt->request.bh || contiguous) {
+
+ /* case of page request (i.e. raw device), or unlinked buffer */
+ this_count = SCpnt->request.nr_sectors;
+ buff = SCpnt->request.buffer;
+ SCpnt->use_sg = 0;
+
+ } else if (SCpnt->host->sg_tablesize == 0 ||
+ (need_isa_buffer && dma_free_sectors <= 10)) {
+
+ /* Case of host adapter that cannot scatter-gather. We also
+ * come here if we are running low on DMA buffer memory. We set
+ * a threshold higher than that we would need for this request so
+ * we leave room for other requests. Even though we would not need
+ * it all, we need to be conservative, because if we run low enough
+ * we have no choice but to panic.
+ */
+ if (SCpnt->host->sg_tablesize != 0 &&
+ need_isa_buffer &&
+ dma_free_sectors <= 10)
+ printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
+
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = SCpnt->request.buffer;
+ SCpnt->use_sg = 0;
+
+ } else {
+
+ /* Scatter-gather capable host adapter */
+ struct scatterlist * sgpnt;
+ int count, this_count_max;
+ int counted;
+
+ bh = SCpnt->request.bh;
+ this_count = 0;
+ this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
+ count = 0;
+ bhp = NULL;
+ while(bh) {
+ if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
+ if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
+ !CLUSTERABLE_DEVICE(SCpnt) ||
+ (SCpnt->host->unchecked_isa_dma &&
+ ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
+ if (count < SCpnt->host->sg_tablesize) count++;
+ else break;
+ }
+ this_count += (bh->b_size >> 9);
+ bhp = bh;
+ bh = bh->b_reqnext;
+ }
+#if 0
+ if(SCpnt->host->unchecked_isa_dma &&
+ ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
+#endif
+ SCpnt->use_sg = count; /* Number of chains */
+ /* scsi_malloc can only allocate in chunks of 512 bytes */
+ count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;
+
+ SCpnt->sglist_len = count;
+ max_sg = count / sizeof(struct scatterlist);
+ if(SCpnt->host->sg_tablesize < max_sg)
+ max_sg = SCpnt->host->sg_tablesize;
+ sgpnt = (struct scatterlist * ) scsi_malloc(count);
+ if (!sgpnt) {
+ printk("Warning - running *really* short on DMA buffers\n");
+ SCpnt->use_sg = 0; /* No memory left - bail out */
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = SCpnt->request.buffer;
+ } else {
+ memset(sgpnt, 0, count); /* Zero so it is easy to fill, but only
+ * if memory is available
+ */
+ buff = (char *) sgpnt;
+ counted = 0;
+ for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
+ count < SCpnt->use_sg && bh;
+ count++, bh = bhp) {
+
+ bhp = bh->b_reqnext;
+
+ if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
+ sgpnt[count].length += bh->b_size;
+ counted += bh->b_size >> 9;
+
+ if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
+ ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
+ !sgpnt[count].alt_address) {
+ sgpnt[count].alt_address = sgpnt[count].address;
+ /* We try to avoid exhausting the DMA pool, since it is
+ * easier to control usage here. In other places we might
+ * have a more pressing need, and we would be screwed if
+ * we ran out */
+ if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
+ sgpnt[count].address = NULL;
+ } else {
+ sgpnt[count].address =
+ (char *) scsi_malloc(sgpnt[count].length);
+ }
+ /* If we start running low on DMA buffers, we abort the
+ * scatter-gather operation, and free all of the memory
+ * we have allocated. We want to ensure that all scsi
+ * operations are able to do at least a non-scatter/gather
+ * operation */
+ if(sgpnt[count].address == NULL){ /* Out of dma memory */
+#if 0
+ printk("Warning: Running low on SCSI DMA buffers");
+ /* Try switching back to a non s-g operation. */
+ while(--count >= 0){
+ if(sgpnt[count].alt_address)
+ scsi_free(sgpnt[count].address,
+ sgpnt[count].length);
+ }
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = SCpnt->request.buffer;
+ SCpnt->use_sg = 0;
+ scsi_free(sgpnt, SCpnt->sglist_len);
+#endif
+ SCpnt->use_sg = count;
+ this_count = counted -= bh->b_size >> 9;
+ break;
+ }
+ }
+
+ /* Only cluster buffers if we know that we can supply DMA
+ * buffers large enough to satisfy the request. Do not cluster
+ * a new request if this would mean that we suddenly need to
+ * start using DMA bounce buffers */
+ if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
+ && CLUSTERABLE_DEVICE(SCpnt)) {
+ char * tmp;
+
+ if (((long) sgpnt[count].address) + sgpnt[count].length +
+ bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
+ (SCpnt->host->unchecked_isa_dma) &&
+ !sgpnt[count].alt_address) continue;
+
+ if(!sgpnt[count].alt_address) {count--; continue; }
+ if(dma_free_sectors > 10)
+ tmp = (char *) scsi_malloc(sgpnt[count].length
+ + bhp->b_size);
+ else {
+ tmp = NULL;
+ max_sg = SCpnt->use_sg;
+ }
+ if(tmp){
+ scsi_free(sgpnt[count].address, sgpnt[count].length);
+ sgpnt[count].address = tmp;
+ count--;
+ continue;
+ }
+
+ /* If we are allowed another sg chain, then increment
+ * counter so we can insert it. Otherwise we will end
+ up truncating */
+
+ if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
+ } /* contiguous buffers */
+ } /* for loop */
+
+ /* This is actually how many we are going to transfer */
+ this_count = counted;
+
+ if(count < SCpnt->use_sg || SCpnt->use_sg
+ > SCpnt->host->sg_tablesize){
+ bh = SCpnt->request.bh;
+ printk("Use sg, count %d %x %d\n",
+ SCpnt->use_sg, count, dma_free_sectors);
+ printk("maxsg = %x, counted = %d this_count = %d\n",
+ max_sg, counted, this_count);
+ while(bh){
+ printk("[%p %lx] ", bh->b_data, bh->b_size);
+ bh = bh->b_reqnext;
+ }
+ if(SCpnt->use_sg < 16)
+ for(count=0; count<SCpnt->use_sg; count++)
+ printk("{%d:%p %p %d} ", count,
+ sgpnt[count].address,
+ sgpnt[count].alt_address,
+ sgpnt[count].length);
+ panic("Ooops");
+ }
+
+ if (SCpnt->request.cmd == WRITE)
+ for(count=0; count<SCpnt->use_sg; count++)
+ if(sgpnt[count].alt_address)
+ memcpy(sgpnt[count].address, sgpnt[count].alt_address,
+ sgpnt[count].length);
+ } /* Able to malloc sgpnt */
+ } /* Host adapter capable of scatter-gather */
+
+ /* Now handle the possibility of DMA to addresses > 16Mb */
+
+ if(SCpnt->use_sg == 0){
+ if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
+ (SCpnt->host->unchecked_isa_dma)) {
+ if(bounce_buffer)
+ buff = bounce_buffer;
+ else
+ buff = (char *) scsi_malloc(this_count << 9);
+ if(buff == NULL) { /* Try backing off a bit if we are low on mem*/
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = (char *) scsi_malloc(this_count << 9);
+ if(!buff) panic("Ran out of DMA buffers.");
+ }
+ if (SCpnt->request.cmd == WRITE)
+ memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
+ }
+ }
+#ifdef DEBUG
+ printk("sd%c : %s %d/%d 512 byte blocks.\n",
+ 'a' + devm,
+ (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
+ this_count, SCpnt->request.nr_sectors);
+#endif
+
+ cmd[1] = (SCpnt->lun << 5) & 0xe0;
+
+ if (rscsi_disks[dev].sector_size == 1024){
+ if(block & 1) panic("sd.c:Bad block number requested");
+ if(this_count & 1) panic("sd.c:Bad block number requested");
+ block = block >> 1;
+ this_count = this_count >> 1;
+ }
+
+ if (rscsi_disks[dev].sector_size == 256){
+ block = block << 1;
+ this_count = this_count << 1;
+ }
+
+ if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
+ {
+ if (this_count > 0xffff)
+ this_count = 0xffff;
+
+ cmd[0] += READ_10 - READ_6 ;
+ cmd[2] = (unsigned char) (block >> 24) & 0xff;
+ cmd[3] = (unsigned char) (block >> 16) & 0xff;
+ cmd[4] = (unsigned char) (block >> 8) & 0xff;
+ cmd[5] = (unsigned char) block & 0xff;
+ cmd[6] = cmd[9] = 0;
+ cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
+ cmd[8] = (unsigned char) this_count & 0xff;
+ }
+ else
+ {
+ if (this_count > 0xff)
+ this_count = 0xff;
+
+ cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+ cmd[2] = (unsigned char) ((block >> 8) & 0xff);
+ cmd[3] = (unsigned char) block & 0xff;
+ cmd[4] = (unsigned char) this_count;
+ cmd[5] = 0;
+ }
+
+ /*
+ * We shouldn't disconnect in the middle of a sector, so with a dumb
+ * host adapter, it's safe to assume that we can at least transfer
+ * this many bytes between each connect / disconnect.
+ */
+
+ SCpnt->transfersize = rscsi_disks[dev].sector_size;
+ SCpnt->underflow = this_count << 9;
+ scsi_do_cmd (SCpnt, (void *) cmd, buff,
+ this_count * rscsi_disks[dev].sector_size,
+ rw_intr,
+ (SCpnt->device->type == TYPE_DISK ?
+ SD_TIMEOUT : SD_MOD_TIMEOUT),
+ MAX_RETRIES);
+}
+
+static int check_scsidisk_media_change(kdev_t full_dev){
+ int retval;
+ int target;
+ struct inode inode;
+ int flag = 0;
+
+ target = DEVICE_NR(full_dev);
+
+ if (target >= sd_template.dev_max ||
+ !rscsi_disks[target].device) {
+ printk("SCSI disk request error: invalid device.\n");
+ return 0;
+ }
+
+ if(!rscsi_disks[target].device->removable) return 0;
+
+ inode.i_rdev = full_dev; /* This is all we really need here */
+
+ /* Using Start/Stop enables differentiation between drive with
+ * no cartridge loaded - NOT READY, drive with changed cartridge -
+ * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
+ * This also handles drives that auto spin down. eg iomega jaz 1GB
+ * as this will spin up the drive.
+ */
+ retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_START_UNIT, 0);
+
+ if(retval){ /* Unable to test, unit probably not ready. This usually
+ * means there is no disc in the drive. Mark as changed,
+ * and we will figure it out later once the drive is
+ * available again. */
+
+ rscsi_disks[target].ready = 0;
+ rscsi_disks[target].device->changed = 1;
+ return 1; /* This will force a flush, if called from
+ * check_disk_change */
+ }
+
+ /*
+ * for removable scsi disk ( FLOPTICAL ) we have to recognise the
+ * presence of disk in the drive. This is kept in the Scsi_Disk
+ * struct and tested at open ! Daniel Roche ( dan@lectra.fr )
+ */
+
+ rscsi_disks[target].ready = 1; /* FLOPTICAL */
+
+ retval = rscsi_disks[target].device->changed;
+ if(!flag) rscsi_disks[target].device->changed = 0;
+ return retval;
+}
+
+static void sd_init_done (Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+static int sd_init_onedisk(int i)
+{
+ unsigned char cmd[12];
+ unsigned char *buffer;
+ unsigned long spintime;
+ int the_result, retries;
+ Scsi_Cmnd * SCpnt;
+
+ /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is
+ * considered a fatal error, and many devices report such an error
+ * just after a scsi bus reset.
+ */
+
+ SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
+ buffer = (unsigned char *) scsi_malloc(512);
+
+ spintime = 0;
+
+ /* Spin up drives, as required. Only do this at boot time */
+ /* Spinup needs to be done for module loads too. */
+ do{
+ retries = 0;
+ while(retries < 3)
+ {
+ cmd[0] = TEST_UNIT_READY;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ memset ((void *) &cmd[2], 0, 8);
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ /* Mark as really busy again */
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ the_result = SCpnt->result;
+ retries++;
+ if( the_result == 0
+ || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
+ break;
+ }
+
+ /* Look for non-removable devices that return NOT_READY.
+ * Issue command to spin up drive for these cases. */
+ if(the_result && !rscsi_disks[i].device->removable &&
+ SCpnt->sense_buffer[2] == NOT_READY) {
+ unsigned long time1;
+ if(!spintime){
+#ifdef MACH
+ printk( "sd%d: Spinning up disk...", i);
+#else
+ printk( "sd%c: Spinning up disk...", 'a' + i );
+#endif
+ cmd[0] = START_STOP;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ cmd[1] |= 1; /* Return immediately */
+ memset ((void *) &cmd[2], 0, 8);
+ cmd[4] = 1; /* Start spin cycle */
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ /* Mark as really busy again */
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ spintime = jiffies;
+ }
+
+ time1 = jiffies + HZ;
+ while(jiffies < time1); /* Wait 1 second for next try */
+ printk( "." );
+ }
+ } while(the_result && spintime && spintime+100*HZ > jiffies);
+ if (spintime) {
+ if (the_result)
+ printk( "not responding...\n" );
+ else
+ printk( "ready\n" );
+ }
+
+ retries = 3;
+ do {
+ cmd[0] = READ_CAPACITY;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ memset ((void *) &cmd[2], 0, 8);
+ memset ((void *) buffer, 0, 8);
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ /* Mark as really busy again */
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 8, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem); /* sleep until it is ready */
+ }
+
+ the_result = SCpnt->result;
+ retries--;
+
+ } while(the_result && retries);
+
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
+
+ wake_up(&SCpnt->device->device_wait);
+
+ /* Wake up a process waiting for device */
+
+ /*
+ * The SCSI standard says:
+ * "READ CAPACITY is necessary for self configuring software"
+ * While not mandatory, support of READ CAPACITY is strongly encouraged.
+ * We used to die if we couldn't successfully do a READ CAPACITY.
+ * But, now we go on about our way. The side effects of this are
+ *
+ * 1. We can't know block size with certainty. I have said "512 bytes
+ * is it" as this is most common.
+ *
+ * 2. Recovery from when some one attempts to read past the end of the
+ * raw device will be slower.
+ */
+
+ if (the_result)
+ {
+#ifdef MACH
+ printk ("sd%d : READ CAPACITY failed.\n"
+ "sd%d : status = %x, message = %02x, host = %d, driver = %02x \n",
+ i, i,
+#else
+ printk ("sd%c : READ CAPACITY failed.\n"
+ "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
+ 'a' + i, 'a' + i,
+#endif
+ status_byte(the_result),
+ msg_byte(the_result),
+ host_byte(the_result),
+ driver_byte(the_result)
+ );
+ if (driver_byte(the_result) & DRIVER_SENSE)
+#ifdef MACH
+ printk("sd%d : extended sense code = %1x \n",
+ i, SCpnt->sense_buffer[2] & 0xf);
+#else
+ printk("sd%c : extended sense code = %1x \n",
+ 'a' + i, SCpnt->sense_buffer[2] & 0xf);
+#endif
+ else
+#ifdef MACH
+ printk("sd%d : sense not available. \n", i);
+#else
+ printk("sd%c : sense not available. \n", 'a' + i);
+#endif
+
+#ifdef MACH
+ printk("sd%d : block size assumed to be 512 bytes, disk size 1GB. \n",
+ i);
+#else
+ printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n",
+ 'a' + i);
+#endif
+ rscsi_disks[i].capacity = 0x1fffff;
+ rscsi_disks[i].sector_size = 512;
+
+ /* Set dirty bit for removable devices if not ready - sometimes drives
+ * will not report this properly. */
+ if(rscsi_disks[i].device->removable &&
+ SCpnt->sense_buffer[2] == NOT_READY)
+ rscsi_disks[i].device->changed = 1;
+
+ }
+ else
+ {
+ /*
+ * FLOPTICAL , if read_capa is ok , drive is assumed to be ready
+ */
+ rscsi_disks[i].ready = 1;
+
+ rscsi_disks[i].capacity = 1 + ((buffer[0] << 24) |
+ (buffer[1] << 16) |
+ (buffer[2] << 8) |
+ buffer[3]);
+
+ rscsi_disks[i].sector_size = (buffer[4] << 24) |
+ (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+
+ if (rscsi_disks[i].sector_size == 0) {
+ rscsi_disks[i].sector_size = 512;
+#ifdef MACH
+ printk("sd%d : sector size 0 reported, assuming 512.\n", i);
+#else
+ printk("sd%c : sector size 0 reported, assuming 512.\n", 'a' + i);
+#endif
+ }
+
+
+ if (rscsi_disks[i].sector_size != 512 &&
+ rscsi_disks[i].sector_size != 1024 &&
+ rscsi_disks[i].sector_size != 256)
+ {
+#ifdef MACH
+ printk ("sd%d : unsupported sector size %d.\n",
+ i, rscsi_disks[i].sector_size);
+#else
+ printk ("sd%c : unsupported sector size %d.\n",
+ 'a' + i, rscsi_disks[i].sector_size);
+#endif
+ if(rscsi_disks[i].device->removable){
+ rscsi_disks[i].capacity = 0;
+ } else {
+ printk ("scsi : deleting disk entry.\n");
+ rscsi_disks[i].device = NULL;
+ sd_template.nr_dev--;
+ sd_gendisk.nr_real--;
+ return i;
+ }
+ }
+ {
+ /*
+ * The msdos fs needs to know the hardware sector size
+ * So I have created this table. See ll_rw_blk.c
+ * Jacques Gelinas (Jacques@solucorp.qc.ca)
+ */
+ int m, mb;
+ int sz_quot, sz_rem;
+ int hard_sector = rscsi_disks[i].sector_size;
+ /* There are 16 minors allocated for each major device */
+ for (m=i<<4; m<((i+1)<<4); m++){
+ sd_hardsizes[m] = hard_sector;
+ }
+ mb = rscsi_disks[i].capacity / 1024 * hard_sector / 1024;
+ /* sz = div(m/100, 10); this seems to not be in the libr */
+ m = (mb + 50) / 100;
+ sz_quot = m / 10;
+ sz_rem = m - (10 * sz_quot);
+#ifdef MACH
+ printk ("SCSI device sd%d: hdwr sector= %d bytes."
+ " Sectors= %d [%d MB] [%d.%1d GB]\n",
+ i, hard_sector, rscsi_disks[i].capacity,
+ mb, sz_quot, sz_rem);
+#else
+ printk ("SCSI device sd%c: hdwr sector= %d bytes."
+ " Sectors= %d [%d MB] [%d.%1d GB]\n",
+ i+'a', hard_sector, rscsi_disks[i].capacity,
+ mb, sz_quot, sz_rem);
+#endif
+ }
+ if(rscsi_disks[i].sector_size == 1024)
+ rscsi_disks[i].capacity <<= 1; /* Change into 512 byte sectors */
+ if(rscsi_disks[i].sector_size == 256)
+ rscsi_disks[i].capacity >>= 1; /* Change into 512 byte sectors */
+ }
+
+
+ /*
+ * Unless otherwise specified, this is not write protected.
+ */
+ rscsi_disks[i].write_prot = 0;
+ if ( rscsi_disks[i].device->removable && rscsi_disks[i].ready ) {
+ /* FLOPTICAL */
+
+ /*
+ * for removable scsi disk ( FLOPTICAL ) we have to recognise
+ * the Write Protect Flag. This flag is kept in the Scsi_Disk struct
+ * and tested at open !
+ * Daniel Roche ( dan@lectra.fr )
+ */
+
+ memset ((void *) &cmd[0], 0, 8);
+ cmd[0] = MODE_SENSE;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ cmd[2] = 1; /* page code 1 ?? */
+ cmd[4] = 12;
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ /* same code as READCAPA !! */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy again */
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ the_result = SCpnt->result;
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
+ wake_up(&SCpnt->device->device_wait);
+
+ if ( the_result ) {
+#ifdef MACH
+ printk ("sd%d: test WP failed, assume Write Protected\n",i);
+#else
+ printk ("sd%c: test WP failed, assume Write Protected\n",i+'a');
+#endif
+ rscsi_disks[i].write_prot = 1;
+ } else {
+ rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0);
+#ifdef MACH
+ printk ("sd%d: Write Protect is %s\n",i,
+ rscsi_disks[i].write_prot ? "on" : "off");
+#else
+ printk ("sd%c: Write Protect is %s\n",i+'a',
+ rscsi_disks[i].write_prot ? "on" : "off");
+#endif
+ }
+
+ } /* check for write protect */
+
+ rscsi_disks[i].ten = 1;
+ rscsi_disks[i].remap = 1;
+ scsi_free(buffer, 512);
+ return i;
+}
+
+/*
+ * The sd_init() function looks at all SCSI drives present, determines
+ * their size, and reads partition table entries for them.
+ */
+
+static int sd_registered = 0;
+
+static int sd_init()
+{
+ int i;
+
+ if (sd_template.dev_noticed == 0) return 0;
+
+ if(!sd_registered) {
+ if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
+ printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
+ return 1;
+ }
+ sd_registered++;
+ }
+
+ /* We do not support attaching loadable devices yet. */
+ if(rscsi_disks) return 0;
+
+ sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
+
+ rscsi_disks = (Scsi_Disk *)
+ scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
+ memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
+
+ sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int), GFP_ATOMIC);
+ memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
+
+ sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int), GFP_ATOMIC);
+
+ sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int), GFP_ATOMIC);
+
+ for(i=0;i<(sd_template.dev_max << 4);i++){
+ sd_blocksizes[i] = 1024;
+ sd_hardsizes[i] = 512;
+ }
+ blksize_size[MAJOR_NR] = sd_blocksizes;
+ hardsect_size[MAJOR_NR] = sd_hardsizes;
+ sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(struct hd_struct),
+ GFP_ATOMIC);
+
+
+ sd_gendisk.max_nr = sd_template.dev_max;
+ sd_gendisk.part = sd;
+ sd_gendisk.sizes = sd_sizes;
+ sd_gendisk.real_devices = (void *) rscsi_disks;
+ return 0;
+}
+
+static void sd_finish(void)
+{
+ struct gendisk *gendisk;
+ int i;
+
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+
+ for (gendisk = gendisk_head; gendisk != NULL; gendisk = gendisk->next)
+ if (gendisk == &sd_gendisk)
+ break;
+ if (gendisk == NULL)
+ {
+ sd_gendisk.next = gendisk_head;
+ gendisk_head = &sd_gendisk;
+ }
+
+ for (i = 0; i < sd_template.dev_max; ++i)
+ if (!rscsi_disks[i].capacity &&
+ rscsi_disks[i].device)
+ {
+ if (MODULE_FLAG
+ && !rscsi_disks[i].has_part_table) {
+ sd_sizes[i << 4] = rscsi_disks[i].capacity;
+ /* revalidate does sd_init_onedisk via MAYBE_REINIT*/
+ revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4), 0);
+ }
+ else
+ i=sd_init_onedisk(i);
+ rscsi_disks[i].has_part_table = 1;
+ }
+
+ /* If our host adapter is capable of scatter-gather, then we increase
+ * the read-ahead to 16 blocks (32 sectors). If not, we use
+ * a two block (4 sector) read ahead.
+ */
+ if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
+ read_ahead[MAJOR_NR] = 120; /* 120 sector read-ahead */
+ else
+ read_ahead[MAJOR_NR] = 4; /* 4 sector read-ahead */
+
+ return;
+}
+
+static int sd_detect(Scsi_Device * SDp){
+ if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
+
+#ifdef MACH
+ printk("Detected scsi %sdisk sd%d at scsi%d, channel %d, id %d, lun %d\n",
+ SDp->removable ? "removable " : "",
+ sd_template.dev_noticed++,
+ SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
+#else
+ printk("Detected scsi %sdisk sd%c at scsi%d, channel %d, id %d, lun %d\n",
+ SDp->removable ? "removable " : "",
+ 'a'+ (sd_template.dev_noticed++),
+ SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
+#endif
+
+ return 1;
+}
+
+static int sd_attach(Scsi_Device * SDp){
+ Scsi_Disk * dpnt;
+ int i;
+
+ if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
+
+ if(sd_template.nr_dev >= sd_template.dev_max) {
+ SDp->attached--;
+ return 1;
+ }
+
+ for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
+ if(!dpnt->device) break;
+
+ if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
+
+ SDp->scsi_request_fn = do_sd_request;
+ rscsi_disks[i].device = SDp;
+ rscsi_disks[i].has_part_table = 0;
+ sd_template.nr_dev++;
+ sd_gendisk.nr_real++;
+ return 0;
+}
+
+#define DEVICE_BUSY rscsi_disks[target].device->busy
+#define USAGE rscsi_disks[target].device->access_count
+#define CAPACITY rscsi_disks[target].capacity
+#define MAYBE_REINIT sd_init_onedisk(target)
+#define GENDISK_STRUCT sd_gendisk
+
+/* This routine is called to flush all partitions and partition tables
+ * for a changed scsi disk, and then re-read the new partition table.
+ * If we are revalidating a disk because of a media change, then we
+ * enter with usage == 0. If we are using an ioctl, we automatically have
+ * usage == 1 (we need an open channel to use an ioctl :-), so this
+ * is our limit.
+ */
+int revalidate_scsidisk(kdev_t dev, int maxusage){
+ int target;
+ struct gendisk * gdev;
+ unsigned long flags;
+ int max_p;
+ int start;
+ int i;
+
+ target = DEVICE_NR(dev);
+ gdev = &GENDISK_STRUCT;
+
+ save_flags(flags);
+ cli();
+ if (DEVICE_BUSY || USAGE > maxusage) {
+ restore_flags(flags);
+ printk("Device busy for revalidation (usage=%d)\n", USAGE);
+ return -EBUSY;
+ }
+ DEVICE_BUSY = 1;
+ restore_flags(flags);
+
+ max_p = gdev->max_p;
+ start = target << gdev->minor_shift;
+
+ for (i=max_p - 1; i >=0 ; i--) {
+ int minor = start+i;
+ kdev_t devi = MKDEV(MAJOR_NR, minor);
+ sync_dev(devi);
+ invalidate_inodes(devi);
+ invalidate_buffers(devi);
+ gdev->part[minor].start_sect = 0;
+ gdev->part[minor].nr_sects = 0;
+ /*
+ * Reset the blocksize for everything so that we can read
+ * the partition table.
+ */
+ blksize_size[MAJOR_NR][minor] = 1024;
+ }
+
+#ifdef MAYBE_REINIT
+ MAYBE_REINIT;
+#endif
+
+ gdev->part[start].nr_sects = CAPACITY;
+ resetup_one_dev(gdev, target);
+
+ DEVICE_BUSY = 0;
+ return 0;
+}
+
+static int fop_revalidate_scsidisk(kdev_t dev){
+ return revalidate_scsidisk(dev, 0);
+}
+
+
+static void sd_detach(Scsi_Device * SDp)
+{
+ Scsi_Disk * dpnt;
+ int i;
+ int max_p;
+ int start;
+
+ for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
+ if(dpnt->device == SDp) {
+
+ /* If we are disconnecting a disk driver, sync and invalidate
+ * everything */
+ max_p = sd_gendisk.max_p;
+ start = i << sd_gendisk.minor_shift;
+
+ for (i=max_p - 1; i >=0 ; i--) {
+ int minor = start+i;
+ kdev_t devi = MKDEV(MAJOR_NR, minor);
+ sync_dev(devi);
+ invalidate_inodes(devi);
+ invalidate_buffers(devi);
+ sd_gendisk.part[minor].start_sect = 0;
+ sd_gendisk.part[minor].nr_sects = 0;
+ sd_sizes[minor] = 0;
+ }
+
+ dpnt->has_part_table = 0;
+ dpnt->device = NULL;
+ dpnt->capacity = 0;
+ SDp->attached--;
+ sd_template.dev_noticed--;
+ sd_template.nr_dev--;
+ sd_gendisk.nr_real--;
+ return;
+ }
+ return;
+}
+
+#ifdef MODULE
+
+int init_module(void) {
+ sd_template.usage_count = &mod_use_count_;
+ return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
+}
+
+void cleanup_module( void)
+{
+ struct gendisk * prev_sdgd;
+ struct gendisk * sdgd;
+
+ scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
+ unregister_blkdev(SCSI_DISK_MAJOR, "sd");
+ sd_registered--;
+ if( rscsi_disks != NULL )
+ {
+ scsi_init_free((char *) rscsi_disks,
+ (sd_template.dev_noticed + SD_EXTRA_DEVS)
+ * sizeof(Scsi_Disk));
+
+ scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sd,
+ (sd_template.dev_max << 4) * sizeof(struct hd_struct));
+ /*
+ * Now remove sd_gendisk from the linked list
+ */
+ sdgd = gendisk_head;
+ prev_sdgd = NULL;
+ while(sdgd != &sd_gendisk)
+ {
+ prev_sdgd = sdgd;
+ sdgd = sdgd->next;
+ }
+
+ if(sdgd != &sd_gendisk)
+ printk("sd_gendisk not in disk chain.\n");
+ else {
+ if(prev_sdgd != NULL)
+ prev_sdgd->next = sdgd->next;
+ else
+ gendisk_head = sdgd->next;
+ }
+ }
+
+ blksize_size[MAJOR_NR] = NULL;
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_size[MAJOR_NR] = NULL;
+ hardsect_size[MAJOR_NR] = NULL;
+ read_ahead[MAJOR_NR] = 0;
+ sd_template.dev_max = 0;
+}
+#endif /* MODULE */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/sd.h b/linux/src/drivers/scsi/sd.h
new file mode 100644
index 0000000..02b3437
--- /dev/null
+++ b/linux/src/drivers/scsi/sd.h
@@ -0,0 +1,65 @@
+/*
+ * sd.h Copyright (C) 1992 Drew Eckhardt
+ * SCSI disk driver header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+#ifndef _SD_H
+#define _SD_H
+/*
+ $Header: cvs/gnumach/linux/src/drivers/scsi/Attic/sd.h,v 1.1 1999/04/26 05:55:03 tb Exp $
+*/
+
+#ifndef _SCSI_H
+#include "scsi.h"
+#endif
+
+#ifndef _GENDISK_H
+#include <linux/genhd.h>
+#endif
+
+extern struct hd_struct * sd;
+
+typedef struct scsi_disk {
+ unsigned capacity; /* size in blocks */
+ unsigned sector_size; /* size in bytes */
+ Scsi_Device *device;
+ unsigned char ready; /* flag ready for FLOPTICAL */
+ unsigned char write_prot; /* flag write_protect for rmvable dev */
+ unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */
+ unsigned char sector_bit_shift; /* power of 2 sectors per FS block */
+ unsigned ten:1; /* support ten byte read / write */
+ unsigned remap:1; /* support remapping */
+ unsigned has_part_table:1; /* has partition table */
+} Scsi_Disk;
+
+extern Scsi_Disk * rscsi_disks;
+
+extern int revalidate_scsidisk(kdev_t dev, int maxusage);
+
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/linux/src/drivers/scsi/sd_ioctl.c b/linux/src/drivers/scsi/sd_ioctl.c
new file mode 100644
index 0000000..4c58f04
--- /dev/null
+++ b/linux/src/drivers/scsi/sd_ioctl.c
@@ -0,0 +1,128 @@
+/*
+ * drivers/scsi/sd_ioctl.c
+ *
+ * ioctl handling for SCSI disks
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/errno.h>
+
+#include <asm/segment.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include <scsi/scsi_ioctl.h>
+#include "hosts.h"
+#include "sd.h"
+#include <scsi/scsicam.h> /* must follow "hosts.h" */
+
+int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
+{
+ kdev_t dev = inode->i_rdev;
+ int error;
+ struct Scsi_Host * host;
+ int diskinfo[4];
+ struct hd_geometry *loc = (struct hd_geometry *) arg;
+
+ switch (cmd) {
+ case HDIO_GETGEO: /* Return BIOS disk parameters */
+ if (!loc) return -EINVAL;
+#ifndef MACH
+ error = verify_area(VERIFY_WRITE, loc, sizeof(*loc));
+ if (error)
+ return error;
+#endif
+ host = rscsi_disks[MINOR(dev) >> 4].device->host;
+
+/* default to most commonly used values */
+
+ diskinfo[0] = 0x40;
+ diskinfo[1] = 0x20;
+ diskinfo[2] = rscsi_disks[MINOR(dev) >> 4].capacity >> 11;
+
+/* override with calculated, extended default, or driver values */
+
+ if(host->hostt->bios_param != NULL)
+ host->hostt->bios_param(&rscsi_disks[MINOR(dev) >> 4],
+ dev,
+ &diskinfo[0]);
+ else scsicam_bios_param(&rscsi_disks[MINOR(dev) >> 4],
+ dev, &diskinfo[0]);
+
+#ifdef MACH
+ loc->heads = diskinfo[0];
+ loc->sectors = diskinfo[1];
+ loc->cylinders = diskinfo[2];
+ loc->start = sd[MINOR(inode->i_rdev)].start_sect;
+#else
+ put_user(diskinfo[0], &loc->heads);
+ put_user(diskinfo[1], &loc->sectors);
+ put_user(diskinfo[2], &loc->cylinders);
+ put_user(sd[MINOR(inode->i_rdev)].start_sect, &loc->start);
+#endif
+ return 0;
+ case BLKGETSIZE: /* Return device size */
+ if (!arg) return -EINVAL;
+ error = verify_area(VERIFY_WRITE, (long *) arg, sizeof(long));
+ if (error)
+ return error;
+ put_user(sd[MINOR(inode->i_rdev)].nr_sects,
+ (long *) arg);
+ return 0;
+
+ case BLKRASET:
+ if (!suser())
+ return -EACCES;
+ if(!(inode->i_rdev)) return -EINVAL;
+ if(arg > 0xff) return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+
+ case BLKRAGET:
+ if (!arg)
+ return -EINVAL;
+ error = verify_area(VERIFY_WRITE, (int *) arg, sizeof(int));
+ if (error)
+ return error;
+ put_user(read_ahead[MAJOR(inode->i_rdev)], (int *) arg);
+ return 0;
+
+ case BLKFLSBUF:
+ if(!suser()) return -EACCES;
+ if(!(inode->i_rdev)) return -EINVAL;
+ fsync_dev(inode->i_rdev);
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ case BLKRRPART: /* Re-read partition tables */
+ return revalidate_scsidisk(dev, 1);
+
+ RO_IOCTLS(dev, arg);
+
+ default:
+ return scsi_ioctl(rscsi_disks[MINOR(dev) >> 4].device , cmd, (void *) arg);
+ }
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/seagate.c b/linux/src/drivers/scsi/seagate.c
new file mode 100644
index 0000000..3dd8f9d
--- /dev/null
+++ b/linux/src/drivers/scsi/seagate.c
@@ -0,0 +1,1679 @@
+/*
+ * seagate.c Copyright (C) 1992, 1993 Drew Eckhardt
+ * low level scsi driver for ST01/ST02, Future Domain TMC-885,
+ * TMC-950 by
+ *
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ *
+ * Note : TMC-880 boards don't work because they have two bits in
+ * the status register flipped, I'll fix this "RSN"
+ *
+ * This card does all the I/O via memory mapped I/O, so there is no need
+ * to check or allocate a region of the I/O address space.
+ */
+
+/*
+ * Configuration :
+ * To use without BIOS -DOVERRIDE=base_address -DCONTROLLER=FD or SEAGATE
+ * -DIRQ will override the default of 5.
+ * Note: You can now set these options from the kernel's "command line".
+ * The syntax is:
+ *
+ * st0x=ADDRESS,IRQ (for a Seagate controller)
+ * or:
+ * tmc8xx=ADDRESS,IRQ (for a TMC-8xx or TMC-950 controller)
+ * eg:
+ * tmc8xx=0xC8000,15
+ *
+ * will configure the driver for a TMC-8xx style controller using IRQ 15
+ * with a base address of 0xC8000.
+ *
+ * -DFAST or -DFAST32 will use blind transfers where possible
+ *
+ * -DARBITRATE will cause the host adapter to arbitrate for the
+ * bus for better SCSI-II compatibility, rather than just
+ * waiting for BUS FREE and then doing its thing. Should
+ * let us do one command per Lun when I integrate my
+ * reorganization changes into the distribution sources.
+ *
+ * -DSLOW_HANDSHAKE will allow compatibility with broken devices that don't
+ * handshake fast enough (ie, some CD ROM's) for the Seagate
+ * code.
+ *
+ * -DSLOW_RATE=x, x some number will let you specify a default
+ * transfer rate if handshaking isn't working correctly.
+ */
+
+#ifdef MACH
+#define ARBITRATE
+#define SLOW_HANDSHAKE
+#define FAST32
+#endif
+
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/config.h>
+#include <linux/proc_fs.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "seagate.h"
+#include "constants.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_seagate = {
+ PROC_SCSI_SEAGATE, 7, "seagate",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+
+#ifndef IRQ
+#define IRQ 5
+#endif
+
+#if (defined(FAST32) && !defined(FAST))
+#define FAST
+#endif
+
+#if defined(SLOW_RATE) && !defined(SLOW_HANDSHAKE)
+#define SLOW_HANDSHAKE
+#endif
+
+#if defined(SLOW_HANDSHAKE) && !defined(SLOW_RATE)
+#define SLOW_RATE 50
+#endif
+
+
+#if defined(LINKED)
+#undef LINKED /* Linked commands are currently broken ! */
+#endif
+
+static int internal_command(unsigned char target, unsigned char lun,
+ const void *cmnd,
+ void *buff, int bufflen, int reselect);
+
+static int incommand; /*
+ set if arbitration has finished and we are
+ in some command phase.
+ */
+
+static const void *base_address = NULL; /*
+ Where the card ROM starts,
+ used to calculate memory mapped
+ register location.
+ */
+#ifdef notyet
+static volatile int abort_confirm = 0;
+#endif
+
+static volatile void *st0x_cr_sr; /*
+ control register write,
+ status register read.
+ 256 bytes in length.
+
+ Read is status of SCSI BUS,
+ as per STAT masks.
+
+ */
+
+
+static volatile void *st0x_dr; /*
+ data register, read write
+ 256 bytes in length.
+ */
+
+
+static volatile int st0x_aborted=0; /*
+ set when we are aborted, ie by a time out, etc.
+ */
+
+static unsigned char controller_type = 0; /* set to SEAGATE for ST0x boards or FD for TMC-8xx boards */
+static unsigned char irq = IRQ;
+
+#define retcode(result) (((result) << 16) | (message << 8) | status)
+#define STATUS (*(volatile unsigned char *) st0x_cr_sr)
+#define CONTROL STATUS
+#define DATA (*(volatile unsigned char *) st0x_dr)
+#define WRITE_CONTROL(d) { writeb((d), st0x_cr_sr); }
+#define WRITE_DATA(d) { writeb((d), st0x_dr); }
+
+void st0x_setup (char *str, int *ints) {
+ controller_type = SEAGATE;
+ base_address = (void *) ints[1];
+ irq = ints[2];
+}
+
+void tmc8xx_setup (char *str, int *ints) {
+ controller_type = FD;
+ base_address = (void *) ints[1];
+ irq = ints[2];
+}
+
+
+#ifndef OVERRIDE
+static const char * seagate_bases[] = {
+ (char *) 0xc8000, (char *) 0xca000, (char *) 0xcc000,
+ (char *) 0xce000, (char *) 0xdc000, (char *) 0xde000
+};
+
+typedef struct {
+ const char *signature ;
+ unsigned offset;
+ unsigned length;
+ unsigned char type;
+} Signature;
+
+static const Signature signatures[] = {
+#ifdef CONFIG_SCSI_SEAGATE
+{"ST01 v1.7 (C) Copyright 1987 Seagate", 15, 37, SEAGATE},
+{"SCSI BIOS 2.00 (C) Copyright 1987 Seagate", 15, 40, SEAGATE},
+
+/*
+ * The following two lines are NOT mistakes. One detects ROM revision
+ * 3.0.0, the other 3.2. Since seagate has only one type of SCSI adapter,
+ * and this is not going to change, the "SEAGATE" and "SCSI" together
+ * are probably "good enough"
+ */
+
+{"SEAGATE SCSI BIOS ",16, 17, SEAGATE},
+{"SEAGATE SCSI BIOS ",17, 17, SEAGATE},
+
+/*
+ * However, future domain makes several incompatible SCSI boards, so specific
+ * signatures must be used.
+ */
+
+{"FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89", 5, 46, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89", 5, 46, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90",5, 47, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90",5, 47, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90", 5, 46, FD},
+{"FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92", 5, 44, FD},
+{"IBM F1 BIOS V1.1004/30/92", 5, 25, FD},
+{"FUTURE DOMAIN TMC-950", 5, 21, FD},
+#endif /* CONFIG_SCSI_SEAGATE */
+}
+;
+
+#define NUM_SIGNATURES (sizeof(signatures) / sizeof(Signature))
+#endif /* n OVERRIDE */
+
+/*
+ * hostno stores the hostnumber, as told to us by the init routine.
+ */
+
+static int hostno = -1;
+static void seagate_reconnect_intr(int, void *, struct pt_regs *);
+
+#ifdef FAST
+static int fast = 1;
+#endif
+
+#ifdef SLOW_HANDSHAKE
+/*
+ * Support for broken devices :
+ * The Seagate board has a handshaking problem. Namely, a lack
+ * thereof for slow devices. You can blast 600K/second through
+ * it if you are polling for each byte, more if you do a blind
+ * transfer. In the first case, with a fast device, REQ will
+ * transition high-low or high-low-high before your loop restarts
+ * and you'll have no problems. In the second case, the board
+ * will insert wait states for up to 13.2 usecs for REQ to
+ * transition low->high, and everything will work.
+ *
+ * However, there's nothing in the state machine that says
+ * you *HAVE* to see a high-low-high set of transitions before
+ * sending the next byte, and slow things like the Trantor CD ROMS
+ * will break because of this.
+ *
+ * So, we need to slow things down, which isn't as simple as it
+ * seems. We can't slow things down period, because then people
+ * who don't recompile their kernels will shoot me for ruining
+ * their performance. We need to do it on a case per case basis.
+ *
+ * The best for performance will be to, only for borken devices
+ * (this is stored on a per-target basis in the scsi_devices array)
+ *
+ * Wait for a low->high transition before continuing with that
+ * transfer. If we timeout, continue anyways. We don't need
+ * a long timeout, because REQ should only be asserted until the
+ * corresponding ACK is received and processed.
+ *
+ * Note that we can't use the system timer for this, because of
+ * resolution, and we *really* can't use the timer chip since
+ * gettimeofday() and the beeper routines use that. So,
+ * the best thing for us to do will be to calibrate a timing
+ * loop in the initialization code using the timer chip before
+ * gettimeofday() can screw with it.
+ */
+
+static int borken_calibration = 0;
+static void borken_init (void) {
+ register int count = 0, start = jiffies + 1, stop = start + 25;
+
+ while (jiffies < start);
+ for (;jiffies < stop; ++count);
+
+/*
+ * Ok, we now have a count for .25 seconds. Convert to a
+ * count per second and divide by transfer rate in K.
+ */
+
+ borken_calibration = (count * 4) / (SLOW_RATE*1024);
+
+ if (borken_calibration < 1)
+ borken_calibration = 1;
+#if (DEBUG & DEBUG_BORKEN)
+ printk("scsi%d : borken calibrated to %dK/sec, %d cycles per transfer\n",
+ hostno, BORKEN_RATE, borken_calibration);
+#endif
+}
+
+static inline void borken_wait(void) {
+ register int count;
+ for (count = borken_calibration; count && (STATUS & STAT_REQ);
+ --count);
+#if (DEBUG & DEBUG_BORKEN)
+ if (count)
+ printk("scsi%d : borken timeout\n", hostno);
+#endif
+}
+
+#endif /* def SLOW_HANDSHAKE */
+
+int seagate_st0x_detect (Scsi_Host_Template * tpnt)
+ {
+ struct Scsi_Host *instance;
+#ifndef OVERRIDE
+ int i,j;
+#endif
+
+ tpnt->proc_dir = &proc_scsi_seagate;
+/*
+ * First, we try for the manual override.
+ */
+#ifdef DEBUG
+ printk("Autodetecting ST0x / TMC-8xx\n");
+#endif
+
+ if (hostno != -1)
+ {
+ printk ("ERROR : seagate_st0x_detect() called twice.\n");
+ return 0;
+ }
+
+ /* If the user specified the controller type from the command line,
+ controller_type will be non-zero, so don't try to detect one */
+
+ if (!controller_type) {
+#ifdef OVERRIDE
+ base_address = (void *) OVERRIDE;
+
+/* CONTROLLER is used to override controller (SEAGATE or FD). PM: 07/01/93 */
+#ifdef CONTROLLER
+ controller_type = CONTROLLER;
+#else
+#error Please use -DCONTROLLER=SEAGATE or -DCONTROLLER=FD to override controller type
+#endif /* CONTROLLER */
+#ifdef DEBUG
+ printk("Base address overridden to %x, controller type is %s\n",
+ base_address,controller_type == SEAGATE ? "SEAGATE" : "FD");
+#endif
+#else /* OVERRIDE */
+/*
+ * To detect this card, we simply look for the signature
+ * from the BIOS version notice in all the possible locations
+ * of the ROM's. This has a nice side effect of not trashing
+ * any register locations that might be used by something else.
+ *
+ * XXX - note that we probably should be probing the address
+ * space for the on-board RAM instead.
+ */
+
+ for (i = 0; i < (sizeof (seagate_bases) / sizeof (char * )); ++i)
+ for (j = 0; !base_address && j < NUM_SIGNATURES; ++j)
+ if (!memcmp ((const void *) (seagate_bases[i] +
+ signatures[j].offset), (const void *) signatures[j].signature,
+ signatures[j].length)) {
+ base_address = (const void *) seagate_bases[i];
+ controller_type = signatures[j].type;
+ }
+#endif /* OVERRIDE */
+ } /* (! controller_type) */
+
+ tpnt->this_id = (controller_type == SEAGATE) ? 7 : 6;
+ tpnt->name = (controller_type == SEAGATE) ? ST0X_ID_STR : FD_ID_STR;
+
+ if (base_address)
+ {
+ st0x_cr_sr =(void *) (((const unsigned char *) base_address) + (controller_type == SEAGATE ? 0x1a00 : 0x1c00));
+ st0x_dr = (void *) (((const unsigned char *) base_address ) + (controller_type == SEAGATE ? 0x1c00 : 0x1e00));
+#ifdef DEBUG
+ printk("%s detected. Base address = %x, cr = %x, dr = %x\n", tpnt->name, base_address, st0x_cr_sr, st0x_dr);
+#endif
+/*
+ * At all times, we will use IRQ 5. Should also check for IRQ3 if we
+ * loose our first interrupt.
+ */
+ instance = scsi_register(tpnt, 0);
+ hostno = instance->host_no;
+ if (request_irq((int) irq, seagate_reconnect_intr, SA_INTERRUPT,
+ (controller_type == SEAGATE) ? "seagate" : "tmc-8xx", NULL)) {
+ printk("scsi%d : unable to allocate IRQ%d\n",
+ hostno, (int) irq);
+ return 0;
+ }
+ instance->irq = irq;
+ instance->io_port = (unsigned int) base_address;
+#ifdef SLOW_HANDSHAKE
+ borken_init();
+#endif
+
+ printk("%s options:"
+#ifdef ARBITRATE
+ " ARBITRATE"
+#endif
+#ifdef SLOW_HANDSHAKE
+ " SLOW_HANDSHAKE"
+#endif
+#ifdef FAST
+#ifdef FAST32
+ " FAST32"
+#else
+ " FAST"
+#endif
+#endif
+#ifdef LINKED
+ " LINKED"
+#endif
+ "\n", tpnt->name);
+ return 1;
+ }
+ else
+ {
+#ifdef DEBUG
+ printk("ST0x / TMC-8xx not detected.\n");
+#endif
+ return 0;
+ }
+ }
+
+const char *seagate_st0x_info(struct Scsi_Host * shpnt) {
+ static char buffer[64];
+ sprintf(buffer, "%s at irq %d, address 0x%05X",
+ (controller_type == SEAGATE) ? ST0X_ID_STR : FD_ID_STR,
+ irq, (unsigned int)base_address);
+ return buffer;
+}
+
+int seagate_st0x_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout)
+{
+ const char *info = seagate_st0x_info(NULL);
+ int len;
+ int pos;
+ int begin;
+
+ if (inout) return(-ENOSYS);
+
+ begin = 0;
+ strcpy(buffer,info);
+ strcat(buffer,"\n");
+
+ pos = len = strlen(buffer);
+
+ if (pos<offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ *start = buffer + (offset - begin);
+ len -= (offset - begin);
+ if ( len > length ) len = length;
+ return(len);
+}
+
+/*
+ * These are our saved pointers for the outstanding command that is
+ * waiting for a reconnect
+ */
+
+static unsigned char current_target, current_lun;
+static unsigned char *current_cmnd, *current_data;
+static int current_nobuffs;
+static struct scatterlist *current_buffer;
+static int current_bufflen;
+
+#ifdef LINKED
+
+/*
+ * linked_connected indicates whether or not we are currently connected to
+ * linked_target, linked_lun and in an INFORMATION TRANSFER phase,
+ * using linked commands.
+ */
+
+static int linked_connected = 0;
+static unsigned char linked_target, linked_lun;
+#endif
+
+
+static void (*done_fn)(Scsi_Cmnd *) = NULL;
+static Scsi_Cmnd * SCint = NULL;
+
+/*
+ * These control whether or not disconnect / reconnect will be attempted,
+ * or are being attempted.
+ */
+
+#define NO_RECONNECT 0
+#define RECONNECT_NOW 1
+#define CAN_RECONNECT 2
+
+#ifdef LINKED
+
+/*
+ * LINKED_RIGHT indicates that we are currently connected to the correct target
+ * for this command, LINKED_WRONG indicates that we are connected to the wrong
+ * target. Note that these imply CAN_RECONNECT.
+ */
+
+#define LINKED_RIGHT 3
+#define LINKED_WRONG 4
+#endif
+
+/*
+ * This determines if we are expecting to reconnect or not.
+ */
+
+static int should_reconnect = 0;
+
+/*
+ * The seagate_reconnect_intr routine is called when a target reselects the
+ * host adapter. This occurs on the interrupt triggered by the target
+ * asserting SEL.
+ */
+
+static void seagate_reconnect_intr(int irq, void *dev_id, struct pt_regs *regs)
+ {
+ int temp;
+ Scsi_Cmnd * SCtmp;
+
+/* enable all other interrupts. */
+ sti();
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : seagate_reconnect_intr() called\n", hostno);
+#endif
+
+ if (!should_reconnect)
+ printk("scsi%d: unexpected interrupt.\n", hostno);
+ else {
+ should_reconnect = 0;
+
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : internal_command("
+ "%d, %08x, %08x, %d, RECONNECT_NOW\n", hostno,
+ current_target, current_data, current_bufflen);
+#endif
+
+ temp = internal_command (current_target, current_lun,
+ current_cmnd, current_data, current_bufflen,
+ RECONNECT_NOW);
+
+ if (msg_byte(temp) != DISCONNECT) {
+ if (done_fn) {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : done_fn(%d,%08x)", hostno,
+ hostno, temp);
+#endif
+ if(!SCint) panic("SCint == NULL in seagate");
+ SCtmp = SCint;
+ SCint = NULL;
+ SCtmp->result = temp;
+ done_fn (SCtmp);
+ } else
+ printk("done_fn() not defined.\n");
+ }
+ }
+ }
+
+/*
+ * The seagate_st0x_queue_command() function provides a queued interface
+ * to the seagate SCSI driver. Basically, it just passes control onto the
+ * seagate_command() function, after fixing it so that the done_fn()
+ * is set to the one passed to the function. We have to be very careful,
+ * because there are some commands on some devices that do not disconnect,
+ * and if we simply call the done_fn when the command is done then another
+ * command is started and queue_command is called again... We end up
+ * overflowing the kernel stack, and this tends not to be such a good idea.
+ */
+
+static int recursion_depth = 0;
+
+int seagate_st0x_queue_command (Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+ {
+ int result, reconnect;
+ Scsi_Cmnd * SCtmp;
+
+ done_fn = done;
+ current_target = SCpnt->target;
+ current_lun = SCpnt->lun;
+ current_cmnd = SCpnt->cmnd;
+ current_data = (unsigned char *) SCpnt->request_buffer;
+ current_bufflen = SCpnt->request_bufflen;
+ SCint = SCpnt;
+ if(recursion_depth) {
+ return 0;
+ };
+ recursion_depth++;
+ do{
+#ifdef LINKED
+/*
+ * Set linked command bit in control field of SCSI command.
+ */
+
+ current_cmnd[SCpnt->cmd_len] |= 0x01;
+ if (linked_connected) {
+#if (DEBUG & DEBUG_LINKED)
+ printk("scsi%d : using linked commands, current I_T_L nexus is ",
+ hostno);
+#endif
+ if ((linked_target == current_target) &&
+ (linked_lun == current_lun)) {
+#if (DEBUG & DEBUG_LINKED)
+ printk("correct\n");
+#endif
+ reconnect = LINKED_RIGHT;
+ } else {
+#if (DEBUG & DEBUG_LINKED)
+ printk("incorrect\n");
+#endif
+ reconnect = LINKED_WRONG;
+ }
+ } else
+#endif /* LINKED */
+ reconnect = CAN_RECONNECT;
+
+
+
+
+
+ result = internal_command (SCint->target, SCint->lun, SCint->cmnd, SCint->request_buffer,
+ SCint->request_bufflen,
+ reconnect);
+ if (msg_byte(result) == DISCONNECT) break;
+ SCtmp = SCint;
+ SCint = NULL;
+ SCtmp->result = result;
+ done_fn (SCtmp);
+ } while(SCint);
+ recursion_depth--;
+ return 0;
+ }
+
+int seagate_st0x_command (Scsi_Cmnd * SCpnt) {
+ return internal_command (SCpnt->target, SCpnt->lun, SCpnt->cmnd, SCpnt->request_buffer,
+ SCpnt->request_bufflen,
+ (int) NO_RECONNECT);
+}
+
+static int internal_command(unsigned char target, unsigned char lun, const void *cmnd,
+ void *buff, int bufflen, int reselect) {
+ int len = 0;
+ unsigned char *data = NULL;
+ struct scatterlist *buffer = NULL;
+ int nobuffs = 0;
+ int clock;
+ int temp;
+#ifdef SLOW_HANDSHAKE
+ int borken; /* Does the current target require Very Slow I/O ? */
+#endif
+
+
+#if (DEBUG & PHASE_DATAIN) || (DEBUG & PHASE_DATOUT)
+ int transfered = 0;
+#endif
+
+#if (((DEBUG & PHASE_ETC) == PHASE_ETC) || (DEBUG & PRINT_COMMAND) || \
+ (DEBUG & PHASE_EXIT))
+ int i;
+#endif
+
+#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
+ int phase=0, newphase;
+#endif
+
+ int done = 0;
+ unsigned char status = 0;
+ unsigned char message = 0;
+ register unsigned char status_read;
+
+ unsigned transfersize = 0, underflow = 0;
+
+ incommand = 0;
+ st0x_aborted = 0;
+
+#ifdef SLOW_HANDSHAKE
+ borken = (int) SCint->device->borken;
+#endif
+
+#if (DEBUG & PRINT_COMMAND)
+ printk ("scsi%d : target = %d, command = ", hostno, target);
+ print_command((unsigned char *) cmnd);
+ printk("\n");
+#endif
+
+#if (DEBUG & PHASE_RESELECT)
+ switch (reselect) {
+ case RECONNECT_NOW :
+ printk("scsi%d : reconnecting\n", hostno);
+ break;
+#ifdef LINKED
+ case LINKED_RIGHT :
+ printk("scsi%d : connected, can reconnect\n", hostno);
+ break;
+ case LINKED_WRONG :
+ printk("scsi%d : connected to wrong target, can reconnect\n",
+ hostno);
+ break;
+#endif
+ case CAN_RECONNECT :
+ printk("scsi%d : allowed to reconnect\n", hostno);
+ break;
+ default :
+ printk("scsi%d : not allowed to reconnect\n", hostno);
+ }
+#endif
+
+
+ if (target == (controller_type == SEAGATE ? 7 : 6))
+ return DID_BAD_TARGET;
+
+/*
+ * We work it differently depending on if this is "the first time,"
+ * or a reconnect. If this is a reselect phase, then SEL will
+ * be asserted, and we must skip selection / arbitration phases.
+ */
+
+ switch (reselect) {
+ case RECONNECT_NOW:
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : phase RESELECT \n", hostno);
+#endif
+
+/*
+ * At this point, we should find the logical or of our ID and the original
+ * target's ID on the BUS, with BSY, SEL, and I/O signals asserted.
+ *
+ * After ARBITRATION phase is completed, only SEL, BSY, and the
+ * target ID are asserted. A valid initiator ID is not on the bus
+ * until IO is asserted, so we must wait for that.
+ */
+ clock = jiffies + 10;
+ for (;;) {
+ temp = STATUS;
+ if ((temp & STAT_IO) && !(temp & STAT_BSY))
+ break;
+
+ if (jiffies > clock) {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : RESELECT timed out while waiting for IO .\n",
+ hostno);
+#endif
+ return (DID_BAD_INTR << 16);
+ }
+ }
+
+/*
+ * After I/O is asserted by the target, we can read our ID and its
+ * ID off of the BUS.
+ */
+
+ if (!((temp = DATA) & (controller_type == SEAGATE ? 0x80 : 0x40)))
+ {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : detected reconnect request to different target.\n"
+ "\tData bus = %d\n", hostno, temp);
+#endif
+ return (DID_BAD_INTR << 16);
+ }
+
+ if (!(temp & (1 << current_target)))
+ {
+ printk("scsi%d : Unexpected reselect interrupt. Data bus = %d\n",
+ hostno, temp);
+ return (DID_BAD_INTR << 16);
+ }
+
+ buffer=current_buffer;
+ cmnd=current_cmnd; /* WDE add */
+ data=current_data; /* WDE add */
+ len=current_bufflen; /* WDE add */
+ nobuffs=current_nobuffs;
+
+/*
+ * We have determined that we have been selected. At this point,
+ * we must respond to the reselection by asserting BSY ourselves
+ */
+
+#if 1
+ CONTROL = (BASE_CMD | CMD_DRVR_ENABLE | CMD_BSY);
+#else
+ CONTROL = (BASE_CMD | CMD_BSY);
+#endif
+
+/*
+ * The target will drop SEL, and raise BSY, at which time we must drop
+ * BSY.
+ */
+
+ for (clock = jiffies + 10; (jiffies < clock) && (STATUS & STAT_SEL););
+
+ if (jiffies >= clock)
+ {
+ CONTROL = (BASE_CMD | CMD_INTR);
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : RESELECT timed out while waiting for SEL.\n",
+ hostno);
+#endif
+ return (DID_BAD_INTR << 16);
+ }
+
+ CONTROL = BASE_CMD;
+
+/*
+ * At this point, we have connected with the target and can get
+ * on with our lives.
+ */
+ break;
+ case CAN_RECONNECT:
+
+#ifdef LINKED
+/*
+ * This is a bletcherous hack, just as bad as the Unix #! interpreter stuff.
+ * If it turns out we are using the wrong I_T_L nexus, the easiest way to deal
+ * with it is to go into our INFORMATION TRANSFER PHASE code, send a ABORT
+ * message on MESSAGE OUT phase, and then loop back to here.
+ */
+
+connect_loop :
+
+#endif
+
+#if (DEBUG & PHASE_BUS_FREE)
+ printk ("scsi%d : phase = BUS FREE \n", hostno);
+#endif
+
+/*
+ * BUS FREE PHASE
+ *
+ * On entry, we make sure that the BUS is in a BUS FREE
+ * phase, by insuring that both BSY and SEL are low for
+ * at least one bus settle delay. Several reads help
+ * eliminate wire glitch.
+ */
+
+ clock = jiffies + ST0X_BUS_FREE_DELAY;
+
+#if !defined (ARBITRATE)
+ while (((STATUS | STATUS | STATUS) &
+ (STAT_BSY | STAT_SEL)) &&
+ (!st0x_aborted) && (jiffies < clock));
+
+ if (jiffies > clock)
+ return retcode(DID_BUS_BUSY);
+ else if (st0x_aborted)
+ return retcode(st0x_aborted);
+#endif
+
+#if (DEBUG & PHASE_SELECTION)
+ printk("scsi%d : phase = SELECTION\n", hostno);
+#endif
+
+ clock = jiffies + ST0X_SELECTION_DELAY;
+
+/*
+ * Arbitration/selection procedure :
+ * 1. Disable drivers
+ * 2. Write HOST adapter address bit
+ * 3. Set start arbitration.
+ * 4. We get either ARBITRATION COMPLETE or SELECT at this
+ * point.
+ * 5. OR our ID and targets on bus.
+ * 6. Enable SCSI drivers and asserted SEL and ATTN
+ */
+
+#if defined(ARBITRATE)
+ cli();
+ CONTROL = 0;
+ DATA = (controller_type == SEAGATE) ? 0x80 : 0x40;
+ CONTROL = CMD_START_ARB;
+ sti();
+ while (!((status_read = STATUS) & (STAT_ARB_CMPL | STAT_SEL)) &&
+ (jiffies < clock) && !st0x_aborted);
+
+ if (!(status_read & STAT_ARB_CMPL)) {
+#if (DEBUG & PHASE_SELECTION)
+ if (status_read & STAT_SEL)
+ printk("scsi%d : arbitration lost\n", hostno);
+ else
+ printk("scsi%d : arbitration timeout.\n", hostno);
+#endif
+ CONTROL = BASE_CMD;
+ return retcode(DID_NO_CONNECT);
+ };
+
+#if (DEBUG & PHASE_SELECTION)
+ printk("scsi%d : arbitration complete\n", hostno);
+#endif
+#endif
+
+
+/*
+ * When the SCSI device decides that we're gawking at it, it will
+ * respond by asserting BUSY on the bus.
+ *
+ * Note : the Seagate ST-01/02 product manual says that we should
+ * twiddle the DATA register before the control register. However,
+ * this does not work reliably so we do it the other way around.
+ *
+ * Probably could be a problem with arbitration too, we really should
+ * try this with a SCSI protocol or logic analyzer to see what is
+ * going on.
+ */
+ cli();
+ DATA = (unsigned char) ((1 << target) | (controller_type == SEAGATE ? 0x80 : 0x40));
+ CONTROL = BASE_CMD | CMD_DRVR_ENABLE | CMD_SEL |
+ (reselect ? CMD_ATTN : 0);
+ sti();
+ while (!((status_read = STATUS) & STAT_BSY) &&
+ (jiffies < clock) && !st0x_aborted)
+
+#if 0 && (DEBUG & PHASE_SELECTION)
+ {
+ temp = clock - jiffies;
+
+ if (!(jiffies % 5))
+ printk("seagate_st0x_timeout : %d \r",temp);
+
+ }
+ printk("Done. \n");
+ printk("scsi%d : status = %02x, seagate_st0x_timeout = %d, aborted = %02x \n",
+ hostno, status_read, temp, st0x_aborted);
+#else
+ ;
+#endif
+
+
+ if ((jiffies >= clock) && !(status_read & STAT_BSY))
+ {
+#if (DEBUG & PHASE_SELECTION)
+ printk ("scsi%d : NO CONNECT with target %d, status = %x \n",
+ hostno, target, STATUS);
+#endif
+ return retcode(DID_NO_CONNECT);
+ }
+
+/*
+ * If we have been aborted, and we have a command in progress, IE the
+ * target still has BSY asserted, then we will reset the bus, and
+ * notify the midlevel driver to expect sense.
+ */
+
+ if (st0x_aborted) {
+ CONTROL = BASE_CMD;
+ if (STATUS & STAT_BSY) {
+ printk("scsi%d : BST asserted after we've been aborted.\n",
+ hostno);
+ seagate_st0x_reset(NULL, 0);
+ return retcode(DID_RESET);
+ }
+ return retcode(st0x_aborted);
+ }
+
+/* Establish current pointers. Take into account scatter / gather */
+
+ if ((nobuffs = SCint->use_sg)) {
+#if (DEBUG & DEBUG_SG)
+ {
+ int i;
+ printk("scsi%d : scatter gather requested, using %d buffers.\n",
+ hostno, nobuffs);
+ for (i = 0; i < nobuffs; ++i)
+ printk("scsi%d : buffer %d address = %08x length = %d\n",
+ hostno, i, buffer[i].address, buffer[i].length);
+ }
+#endif
+
+ buffer = (struct scatterlist *) SCint->buffer;
+ len = buffer->length;
+ data = (unsigned char *) buffer->address;
+ } else {
+#if (DEBUG & DEBUG_SG)
+ printk("scsi%d : scatter gather not requested.\n", hostno);
+#endif
+ buffer = NULL;
+ len = SCint->request_bufflen;
+ data = (unsigned char *) SCint->request_buffer;
+ }
+
+#if (DEBUG & (PHASE_DATAIN | PHASE_DATAOUT))
+ printk("scsi%d : len = %d\n", hostno, len);
+#endif
+
+ break;
+#ifdef LINKED
+ case LINKED_RIGHT:
+ break;
+ case LINKED_WRONG:
+ break;
+#endif
+ }
+
+/*
+ * There are several conditions under which we wish to send a message :
+ * 1. When we are allowing disconnect / reconnect, and need to establish
+ * the I_T_L nexus via an IDENTIFY with the DiscPriv bit set.
+ *
+ * 2. When we are doing linked commands, are have the wrong I_T_L nexus
+ * established and want to send an ABORT message.
+ */
+
+
+ CONTROL = BASE_CMD | CMD_DRVR_ENABLE |
+ (((reselect == CAN_RECONNECT)
+#ifdef LINKED
+ || (reselect == LINKED_WRONG)
+#endif
+ ) ? CMD_ATTN : 0) ;
+
+/*
+ * INFORMATION TRANSFER PHASE
+ *
+ * The nasty looking read / write inline assembler loops we use for
+ * DATAIN and DATAOUT phases are approximately 4-5 times as fast as
+ * the 'C' versions - since we're moving 1024 bytes of data, this
+ * really adds up.
+ */
+
+#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
+ printk("scsi%d : phase = INFORMATION TRANSFER\n", hostno);
+#endif
+
+ incommand = 1;
+ transfersize = SCint->transfersize;
+ underflow = SCint->underflow;
+
+
+/*
+ * Now, we poll the device for status information,
+ * and handle any requests it makes. Note that since we are unsure of
+ * how much data will be flowing across the system, etc and cannot
+ * make reasonable timeouts, that we will instead have the midlevel
+ * driver handle any timeouts that occur in this phase.
+ */
+
+ while (((status_read = STATUS) & STAT_BSY) && !st0x_aborted && !done)
+ {
+#ifdef PARITY
+ if (status_read & STAT_PARITY)
+ {
+ printk("scsi%d : got parity error\n", hostno);
+ st0x_aborted = DID_PARITY;
+ }
+#endif
+
+ if (status_read & STAT_REQ)
+ {
+#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
+ if ((newphase = (status_read & REQ_MASK)) != phase)
+ {
+ phase = newphase;
+ switch (phase)
+ {
+ case REQ_DATAOUT:
+ printk("scsi%d : phase = DATA OUT\n",
+ hostno);
+ break;
+ case REQ_DATAIN :
+ printk("scsi%d : phase = DATA IN\n",
+ hostno);
+ break;
+ case REQ_CMDOUT :
+ printk("scsi%d : phase = COMMAND OUT\n",
+ hostno);
+ break;
+ case REQ_STATIN :
+ printk("scsi%d : phase = STATUS IN\n",
+ hostno);
+ break;
+ case REQ_MSGOUT :
+ printk("scsi%d : phase = MESSAGE OUT\n",
+ hostno);
+ break;
+ case REQ_MSGIN :
+ printk("scsi%d : phase = MESSAGE IN\n",
+ hostno);
+ break;
+ default :
+ printk("scsi%d : phase = UNKNOWN\n",
+ hostno);
+ st0x_aborted = DID_ERROR;
+ }
+ }
+#endif
+ switch (status_read & REQ_MASK)
+ {
+ case REQ_DATAOUT :
+/*
+ * If we are in fast mode, then we simply splat the data out
+ * in word-sized chunks as fast as we can.
+ */
+
+#ifdef FAST
+if (!len) {
+#if 0
+ printk("scsi%d: underflow to target %d lun %d \n",
+ hostno, target, lun);
+ st0x_aborted = DID_ERROR;
+ fast = 0;
+#endif
+ break;
+}
+
+if (fast && transfersize && !(len % transfersize) && (len >= transfersize)
+#ifdef FAST32
+ && !(transfersize % 4)
+#endif
+ ) {
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer, underflow = %d, transfersize = %d\n"
+ " len = %d, data = %08x\n", hostno, SCint->underflow,
+ SCint->transfersize, len, data);
+#endif
+
+ {
+#ifdef FAST32
+ unsigned int *iop = phys_to_virt (st0x_dr);
+ const unsigned int *dp = (unsigned int *) data;
+ int xferlen = transfersize >> 2;
+#else
+ unsigned char *iop = phys_to_virt (st0x_dr);
+ const unsigned char *dp = data;
+ int xferlen = transfersize;
+#endif
+ for (; xferlen; --xferlen)
+ *iop = *dp++;
+ }
+
+ len -= transfersize;
+ data += transfersize;
+
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer complete len = %d data = %08x\n",
+ hostno, len, data);
+#endif
+
+
+} else
+#endif
+
+{
+/*
+ * We loop as long as we are in a data out phase, there is data to send,
+ * and BSY is still active.
+ */
+
+ while (len)
+ {
+ unsigned char stat;
+
+ stat = STATUS;
+ if (!(stat & STAT_BSY) || ((stat & REQ_MASK) != REQ_DATAOUT))
+ break;
+ if (stat & STAT_REQ)
+ {
+ WRITE_DATA (*data++);
+ --len;
+ }
+ }
+}
+
+ if (!len && nobuffs) {
+ --nobuffs;
+ ++buffer;
+ len = buffer->length;
+ data = (unsigned char *) buffer->address;
+#if (DEBUG & DEBUG_SG)
+ printk("scsi%d : next scatter-gather buffer len = %d address = %08x\n",
+ hostno, len, data);
+#endif
+ }
+ break;
+
+ case REQ_DATAIN :
+#ifdef SLOW_HANDSHAKE
+ if (borken) {
+#if (DEBUG & (PHASE_DATAIN))
+ transfered += len;
+#endif
+ for (; len && (STATUS & (REQ_MASK | STAT_REQ)) == (REQ_DATAIN |
+ STAT_REQ); --len) {
+ *data++ = DATA;
+ borken_wait();
+}
+#if (DEBUG & (PHASE_DATAIN))
+ transfered -= len;
+#endif
+ } else
+#endif
+#ifdef FAST
+if (fast && transfersize && !(len % transfersize) && (len >= transfersize)
+#ifdef FAST32
+ && !(transfersize % 4)
+#endif
+ ) {
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer, underflow = %d, transfersize = %d\n"
+ " len = %d, data = %08x\n", hostno, SCint->underflow,
+ SCint->transfersize, len, data);
+#endif
+ {
+#ifdef FAST32
+ const unsigned int *iop = phys_to_virt (st0x_dr);
+ unsigned int *dp = (unsigned int *) data;
+ int xferlen = len >> 2;
+#else
+ const unsigned char *iop = phys_to_virt (st0x_dr);
+ unsigned char *dp = data;
+ int xferlen = len;
+#endif
+ for (; xferlen; --xferlen)
+ *dp++ = *iop;
+ }
+
+ len -= transfersize;
+ data += transfersize;
+
+#if (DEBUG & PHASE_DATAIN)
+ printk("scsi%d: transfered += %d\n", hostno, transfersize);
+ transfered += transfersize;
+#endif
+
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer complete len = %d data = %08x\n",
+ hostno, len, data);
+#endif
+
+} else
+#endif
+{
+
+#if (DEBUG & PHASE_DATAIN)
+ printk("scsi%d: transfered += %d\n", hostno, len);
+ transfered += len; /* Assume we'll transfer it all, then
+ subtract what we *didn't* transfer */
+#endif
+
+/*
+ * We loop as long as we are in a data in phase, there is room to read,
+ * and BSY is still active
+ */
+
+ while (len)
+ {
+ unsigned char stat;
+
+ stat = STATUS;
+ if (!(stat & STAT_BSY) || ((stat & REQ_MASK) != REQ_DATAIN))
+ break;
+ if (stat & STAT_REQ)
+ {
+ *data++ = DATA;
+ --len;
+ }
+ }
+
+#if (DEBUG & PHASE_DATAIN)
+ printk("scsi%d: transfered -= %d\n", hostno, len);
+ transfered -= len; /* Since we assumed all of Len got
+ * transfered, correct our mistake */
+#endif
+}
+
+ if (!len && nobuffs) {
+ --nobuffs;
+ ++buffer;
+ len = buffer->length;
+ data = (unsigned char *) buffer->address;
+#if (DEBUG & DEBUG_SG)
+ printk("scsi%d : next scatter-gather buffer len = %d address = %08x\n",
+ hostno, len, data);
+#endif
+ }
+
+ break;
+
+ case REQ_CMDOUT :
+ while (((status_read = STATUS) & STAT_BSY) &&
+ ((status_read & REQ_MASK) == REQ_CMDOUT))
+ if (status_read & STAT_REQ) {
+ DATA = *(const unsigned char *) cmnd;
+ cmnd = 1+(const unsigned char *) cmnd;
+#ifdef SLOW_HANDSHAKE
+ if (borken)
+ borken_wait();
+#endif
+ }
+ break;
+
+ case REQ_STATIN :
+ status = DATA;
+ break;
+
+ case REQ_MSGOUT :
+/*
+ * We can only have sent a MSG OUT if we requested to do this
+ * by raising ATTN. So, we must drop ATTN.
+ */
+
+ CONTROL = BASE_CMD | CMD_DRVR_ENABLE;
+/*
+ * If we are reconnecting, then we must send an IDENTIFY message in
+ * response to MSGOUT.
+ */
+ switch (reselect) {
+ case CAN_RECONNECT:
+ DATA = IDENTIFY(1, lun);
+
+#if (DEBUG & (PHASE_RESELECT | PHASE_MSGOUT))
+ printk("scsi%d : sent IDENTIFY message.\n", hostno);
+#endif
+ break;
+#ifdef LINKED
+ case LINKED_WRONG:
+ DATA = ABORT;
+ linked_connected = 0;
+ reselect = CAN_RECONNECT;
+ goto connect_loop;
+#if (DEBUG & (PHASE_MSGOUT | DEBUG_LINKED))
+ printk("scsi%d : sent ABORT message to cancel incorrect I_T_L nexus.\n", hostno);
+#endif
+#endif /* LINKED */
+#if (DEBUG & DEBUG_LINKED)
+ printk("correct\n");
+#endif
+ default:
+ DATA = NOP;
+ printk("scsi%d : target %d requested MSGOUT, sent NOP message.\n", hostno, target);
+ }
+ break;
+
+ case REQ_MSGIN :
+ switch (message = DATA) {
+ case DISCONNECT :
+ should_reconnect = 1;
+ current_data = data; /* WDE add */
+ current_buffer = buffer;
+ current_bufflen = len; /* WDE add */
+ current_nobuffs = nobuffs;
+#ifdef LINKED
+ linked_connected = 0;
+#endif
+ done=1;
+#if (DEBUG & (PHASE_RESELECT | PHASE_MSGIN))
+ printk("scsi%d : disconnected.\n", hostno);
+#endif
+ break;
+
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+#endif
+ case COMMAND_COMPLETE :
+/*
+ * Note : we should check for underflow here.
+ */
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : command complete.\n", hostno);
+#endif
+ done = 1;
+ break;
+ case ABORT :
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : abort message.\n", hostno);
+#endif
+ done=1;
+ break;
+ case SAVE_POINTERS :
+ current_buffer = buffer;
+ current_bufflen = len; /* WDE add */
+ current_data = data; /* WDE mod */
+ current_nobuffs = nobuffs;
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : pointers saved.\n", hostno);
+#endif
+ break;
+ case RESTORE_POINTERS:
+ buffer=current_buffer;
+ cmnd=current_cmnd;
+ data=current_data; /* WDE mod */
+ len=current_bufflen;
+ nobuffs=current_nobuffs;
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : pointers restored.\n", hostno);
+#endif
+ break;
+ default:
+
+/*
+ * IDENTIFY distinguishes itself from the other messages by setting the
+ * high byte.
+ *
+ * Note : we need to handle at least one outstanding command per LUN,
+ * and need to hash the SCSI command for that I_T_L nexus based on the
+ * known ID (at this point) and LUN.
+ */
+
+ if (message & 0x80) {
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : IDENTIFY message received from id %d, lun %d.\n",
+ hostno, target, message & 7);
+#endif
+ } else {
+
+/*
+ * We should go into a MESSAGE OUT phase, and send a MESSAGE_REJECT
+ * if we run into a message that we don't like. The seagate driver
+ * needs some serious restructuring first though.
+ */
+
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : unknown message %d from target %d.\n",
+ hostno, message, target);
+#endif
+ }
+ }
+ break;
+
+ default :
+ printk("scsi%d : unknown phase.\n", hostno);
+ st0x_aborted = DID_ERROR;
+ }
+
+#ifdef SLOW_HANDSHAKE
+/*
+ * I really don't care to deal with borken devices in each single
+ * byte transfer case (ie, message in, message out, status), so
+ * I'll do the wait here if necessary.
+ */
+ if (borken)
+ borken_wait();
+#endif
+
+ } /* if ends */
+ } /* while ends */
+
+#if (DEBUG & (PHASE_DATAIN | PHASE_DATAOUT | PHASE_EXIT))
+ printk("scsi%d : Transfered %d bytes\n", hostno, transfered);
+#endif
+
+#if (DEBUG & PHASE_EXIT)
+#if 0 /* Doesn't work for scatter / gather */
+ printk("Buffer : \n");
+ for (i = 0; i < 20; ++i)
+ printk ("%02x ", ((unsigned char *) data)[i]); /* WDE mod */
+ printk("\n");
+#endif
+ printk("scsi%d : status = ", hostno);
+ print_status(status);
+ printk("message = %02x\n", message);
+#endif
+
+
+/* We shouldn't reach this until *after* BSY has been deasserted */
+#ifdef notyet
+ if (st0x_aborted) {
+ if (STATUS & STAT_BSY) {
+ seagate_st0x_reset(NULL);
+ st0x_aborted = DID_RESET;
+ }
+ abort_confirm = 1;
+ }
+#endif
+
+#ifdef LINKED
+else {
+/*
+ * Fix the message byte so that unsuspecting high level drivers don't
+ * puke when they see a LINKED COMMAND message in place of the COMMAND
+ * COMPLETE they may be expecting. Shouldn't be necessary, but it's
+ * better to be on the safe side.
+ *
+ * A non LINKED* message byte will indicate that the command completed,
+ * and we are now disconnected.
+ */
+
+ switch (message) {
+ case LINKED_CMD_COMPLETE :
+ case LINKED_FLG_CMD_COMPLETE :
+ message = COMMAND_COMPLETE;
+ linked_target = current_target;
+ linked_lun = current_lun;
+ linked_connected = 1;
+#if (DEBUG & DEBUG_LINKED)
+ printk("scsi%d : keeping I_T_L nexus established for linked command.\n",
+ hostno);
+#endif
+/*
+ * We also will need to adjust status to accommodate intermediate conditions.
+ */
+ if ((status == INTERMEDIATE_GOOD) ||
+ (status == INTERMEDIATE_C_GOOD))
+ status = GOOD;
+
+ break;
+/*
+ * We should also handle what are "normal" termination messages
+ * here (ABORT, BUS_DEVICE_RESET?, and COMMAND_COMPLETE individually,
+ * and flake if things aren't right.
+ */
+
+ default :
+#if (DEBUG & DEBUG_LINKED)
+ printk("scsi%d : closing I_T_L nexus.\n", hostno);
+#endif
+ linked_connected = 0;
+ }
+ }
+#endif /* LINKED */
+
+
+
+
+ if (should_reconnect) {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : exiting seagate_st0x_queue_command() with reconnect enabled.\n",
+ hostno);
+#endif
+ CONTROL = BASE_CMD | CMD_INTR ;
+ } else
+ CONTROL = BASE_CMD;
+
+ return retcode (st0x_aborted);
+ }
+
+int seagate_st0x_abort (Scsi_Cmnd * SCpnt)
+ {
+ st0x_aborted = DID_ABORT;
+
+ return SCSI_ABORT_PENDING;
+ }
+
+/*
+ the seagate_st0x_reset function resets the SCSI bus
+*/
+
+int seagate_st0x_reset (Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+ {
+ unsigned clock;
+ /*
+ No timeouts - this command is going to fail because
+ it was reset.
+ */
+
+#ifdef DEBUG
+ printk("In seagate_st0x_reset()\n");
+#endif
+
+
+ /* assert RESET signal on SCSI bus. */
+
+ CONTROL = BASE_CMD | CMD_RST;
+ clock=jiffies+2;
+
+
+ /* Wait. */
+
+ while (jiffies < clock);
+
+ CONTROL = BASE_CMD;
+
+ st0x_aborted = DID_RESET;
+
+#ifdef DEBUG
+ printk("SCSI bus reset.\n");
+#endif
+ return SCSI_RESET_WAKEUP;
+ }
+
+#include <asm/segment.h>
+#include "sd.h"
+#include <scsi/scsi_ioctl.h>
+
+int seagate_st0x_biosparam(Disk * disk, kdev_t dev, int* ip) {
+ unsigned char buf[256 + sizeof(int) * 2], cmd[6], *data, *page;
+ int *sizes, result, formatted_sectors, total_sectors;
+ int cylinders, heads, sectors;
+ int capacity;
+
+/*
+ * Only SCSI-I CCS drives and later implement the necessary mode sense
+ * pages.
+ */
+
+ if (disk->device->scsi_level < 2)
+ return -1;
+
+ sizes = (int *) buf;
+ data = (unsigned char *) (sizes + 2);
+
+ cmd[0] = MODE_SENSE;
+ cmd[1] = (disk->device->lun << 5) & 0xe5;
+ cmd[2] = 0x04; /* Read page 4, rigid disk geometry page current values */
+ cmd[3] = 0;
+ cmd[4] = 255;
+ cmd[5] = 0;
+
+/*
+ * We are transferring 0 bytes in the out direction, and expect to get back
+ * 24 bytes for each mode page.
+ */
+
+ sizes[0] = 0;
+ sizes[1] = 256;
+
+ memcpy (data, cmd, 6);
+
+ if (!(result = kernel_scsi_ioctl (disk->device, SCSI_IOCTL_SEND_COMMAND, (void *) buf))) {
+/*
+ * The mode page lies beyond the MODE SENSE header, with length 4, and
+ * the BLOCK DESCRIPTOR, with length header[3].
+ */
+
+ page = data + 4 + data[3];
+ heads = (int) page[5];
+ cylinders = (page[2] << 16) | (page[3] << 8) | page[4];
+
+ cmd[2] = 0x03; /* Read page 3, format page current values */
+ memcpy (data, cmd, 6);
+
+ if (!(result = kernel_scsi_ioctl (disk->device, SCSI_IOCTL_SEND_COMMAND, (void *) buf))) {
+ page = data + 4 + data[3];
+ sectors = (page[10] << 8) | page[11];
+
+
+/*
+ * Get the total number of formatted sectors from the block descriptor,
+ * so we can tell how many are being used for alternates.
+ */
+
+ formatted_sectors = (data[4 + 1] << 16) | (data[4 + 2] << 8) |
+ data[4 + 3] ;
+
+ total_sectors = (heads * cylinders * sectors);
+
+/*
+ * Adjust the real geometry by subtracting
+ * (spare sectors / (heads * tracks)) cylinders from the number of cylinders.
+ *
+ * It appears that the CE cylinder CAN be a partial cylinder.
+ */
+
+
+printk("scsi%d : heads = %d cylinders = %d sectors = %d total = %d formatted = %d\n",
+ hostno, heads, cylinders, sectors, total_sectors, formatted_sectors);
+
+ if (!heads || !sectors || !cylinders)
+ result = -1;
+ else
+ cylinders -= ((total_sectors - formatted_sectors) / (heads * sectors));
+
+/*
+ * Now, we need to do a sanity check on the geometry to see if it is
+ * BIOS compatible. The maximum BIOS geometry is 1024 cylinders *
+ * 256 heads * 64 sectors.
+ */
+
+ if ((cylinders > 1024) || (sectors > 64)) {
+ /* The Seagate's seem to have some mapping
+ * Multiple heads * sectors * cyl to get capacity
+ * Then start rounding down. */
+ capacity = heads * sectors * cylinders;
+ sectors = 17; /* Old MFM Drives use this, so does the Seagate */
+ heads = 2;
+ capacity = capacity / sectors;
+ while (cylinders > 1024)
+ {
+ heads *= 2; /* For some reason, they go in multiples */
+ cylinders = capacity / heads;
+ }
+ }
+ ip[0] = heads;
+ ip[1] = sectors;
+ ip[2] = cylinders;
+
+/*
+ * There should be an alternate mapping for things the seagate doesn't
+ * understand, but I couldn't say what it is with reasonable certainty.
+ */
+
+ }
+ }
+
+ return result;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = SEAGATE_ST0X;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/seagate.h b/linux/src/drivers/scsi/seagate.h
new file mode 100644
index 0000000..da18dbe
--- /dev/null
+++ b/linux/src/drivers/scsi/seagate.h
@@ -0,0 +1,139 @@
+/*
+ * seagate.h Copyright (C) 1992 Drew Eckhardt
+ * low level scsi driver header for ST01/ST02 by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ */
+
+#ifndef _SEAGATE_H
+ #define SEAGATE_H
+/*
+ $Header
+*/
+#ifndef ASM
+int seagate_st0x_detect(Scsi_Host_Template *);
+int seagate_st0x_command(Scsi_Cmnd *);
+int seagate_st0x_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+
+int seagate_st0x_abort(Scsi_Cmnd *);
+const char *seagate_st0x_info(struct Scsi_Host *);
+int seagate_st0x_reset(Scsi_Cmnd *, unsigned int);
+int seagate_st0x_proc_info(char *,char **,off_t,int,int,int);
+
+#ifndef NULL
+ #define NULL 0
+#endif
+
+#include <linux/kdev_t.h>
+int seagate_st0x_biosparam(Disk *, kdev_t, int*);
+
+#define SEAGATE_ST0X { NULL, NULL, NULL, seagate_st0x_proc_info, \
+ NULL, seagate_st0x_detect, \
+ NULL, \
+ seagate_st0x_info, seagate_st0x_command, \
+ seagate_st0x_queue_command, seagate_st0x_abort, \
+ seagate_st0x_reset, NULL, seagate_st0x_biosparam, \
+ 1, 7, SG_ALL, 1, 0, 0, DISABLE_CLUSTERING}
+#endif
+
+
+/*
+ defining PARITY causes parity data to be checked
+*/
+
+#define PARITY
+
+
+/*
+ Thanks to Brian Antoine for the example code in his Messy-Loss ST-01
+ driver, and Mitsugu Suzuki for information on the ST-01
+ SCSI host.
+*/
+
+/*
+ CONTROL defines
+*/
+
+#define CMD_RST 0x01
+#define CMD_SEL 0x02
+#define CMD_BSY 0x04
+#define CMD_ATTN 0x08
+#define CMD_START_ARB 0x10
+#define CMD_EN_PARITY 0x20
+#define CMD_INTR 0x40
+#define CMD_DRVR_ENABLE 0x80
+
+/*
+ STATUS
+*/
+
+#define STAT_BSY 0x01
+#define STAT_MSG 0x02
+#define STAT_IO 0x04
+#define STAT_CD 0x08
+#define STAT_REQ 0x10
+#define STAT_SEL 0x20
+#define STAT_PARITY 0x40
+#define STAT_ARB_CMPL 0x80
+
+/*
+ REQUESTS
+*/
+
+#define REQ_MASK (STAT_CD | STAT_IO | STAT_MSG)
+#define REQ_DATAOUT 0
+#define REQ_DATAIN STAT_IO
+#define REQ_CMDOUT STAT_CD
+#define REQ_STATIN (STAT_CD | STAT_IO)
+#define REQ_MSGOUT (STAT_MSG | STAT_CD)
+#define REQ_MSGIN (STAT_MSG | STAT_CD | STAT_IO)
+
+extern volatile int seagate_st0x_timeout;
+
+#ifdef PARITY
+ #define BASE_CMD CMD_EN_PARITY
+#else
+ #define BASE_CMD 0
+#endif
+
+/*
+ Debugging code
+*/
+
+#define PHASE_BUS_FREE 1
+#define PHASE_ARBITRATION 2
+#define PHASE_SELECTION 4
+#define PHASE_DATAIN 8
+#define PHASE_DATAOUT 0x10
+#define PHASE_CMDOUT 0x20
+#define PHASE_MSGIN 0x40
+#define PHASE_MSGOUT 0x80
+#define PHASE_STATUSIN 0x100
+#define PHASE_ETC (PHASE_DATAIN | PHASE_DATA_OUT | PHASE_CMDOUT | PHASE_MSGIN | PHASE_MSGOUT | PHASE_STATUSIN)
+#define PRINT_COMMAND 0x200
+#define PHASE_EXIT 0x400
+#define PHASE_RESELECT 0x800
+#define DEBUG_FAST 0x1000
+#define DEBUG_SG 0x2000
+#define DEBUG_LINKED 0x4000
+#define DEBUG_BORKEN 0x8000
+
+/*
+ * Control options - these are timeouts specified in .01 seconds.
+ */
+
+/* 30, 20 work */
+#define ST0X_BUS_FREE_DELAY 25
+#define ST0X_SELECTION_DELAY 25
+
+#define eoi() __asm__("push %%eax\nmovb $0x20, %%al\noutb %%al, $0x20\npop %%eax"::)
+
+#define SEAGATE 1 /* these determine the type of the controller */
+#define FD 2
+
+#define ST0X_ID_STR "Seagate ST-01/ST-02"
+#define FD_ID_STR "TMC-8XX/TMC-950"
+
+#endif
+
diff --git a/linux/src/drivers/scsi/sr.c b/linux/src/drivers/scsi/sr.c
new file mode 100644
index 0000000..be64fb1
--- /dev/null
+++ b/linux/src/drivers/scsi/sr.c
@@ -0,0 +1,1290 @@
+/*
+ * sr.c Copyright (C) 1992 David Giller
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * adapted from:
+ * sd.c Copyright (C) 1992 Drew Eckhardt
+ * Linux scsi disk driver by
+ * Drew Eckhardt <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale ericy@cais.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Modified by Eric Youngdale eric@aib.com to support loadable
+ * low-level scsi drivers.
+ *
+ * Modified by Thomas Quinot thomas@melchior.cuivre.fdn.fr to
+ * provide auto-eject.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cdrom.h>
+#include <linux/interrupt.h>
+#include <asm/system.h>
+
+#define MAJOR_NR SCSI_CDROM_MAJOR
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sr.h"
+#include <scsi/scsi_ioctl.h> /* For the door lock/unlock commands */
+#include "constants.h"
+
+#define MAX_RETRIES 3
+#define SR_TIMEOUT (30 * HZ)
+
+static int sr_init(void);
+static void sr_finish(void);
+static int sr_attach(Scsi_Device *);
+static int sr_detect(Scsi_Device *);
+static void sr_detach(Scsi_Device *);
+
+struct Scsi_Device_Template sr_template = {NULL, "cdrom", "sr", NULL, TYPE_ROM,
+ SCSI_CDROM_MAJOR, 0, 0, 0, 1,
+ sr_detect, sr_init,
+ sr_finish, sr_attach, sr_detach};
+
+Scsi_CD * scsi_CDs = NULL;
+static int * sr_sizes;
+
+static int * sr_blocksizes;
+
+static int sr_open(struct inode *, struct file *);
+void get_sectorsize(int);
+void sr_photocd(struct inode *);
+
+extern int sr_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+void requeue_sr_request (Scsi_Cmnd * SCpnt);
+static int check_cdrom_media_change(kdev_t);
+
+static void sr_release(struct inode * inode, struct file * file)
+{
+ sync_dev(inode->i_rdev);
+ if(! --scsi_CDs[MINOR(inode->i_rdev)].device->access_count)
+ {
+ sr_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
+ if (scsi_CDs[MINOR(inode->i_rdev)].auto_eject)
+ sr_ioctl(inode, NULL, CDROMEJECT, 0);
+ }
+ if (scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)
+ (*scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)--;
+ if(sr_template.usage_count) (*sr_template.usage_count)--;
+}
+
+static struct file_operations sr_fops =
+{
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ sr_ioctl, /* ioctl */
+ NULL, /* mmap */
+ sr_open, /* special open code */
+ sr_release, /* release */
+ NULL, /* fsync */
+ NULL, /* fasync */
+ check_cdrom_media_change, /* Disk change */
+ NULL /* revalidate */
+};
+
+/*
+ * This function checks to see if the media has been changed in the
+ * CDROM drive. It is possible that we have already sensed a change,
+ * or the drive may have sensed one and not yet reported it. We must
+ * be ready for either case. This function always reports the current
+ * value of the changed bit. If flag is 0, then the changed bit is reset.
+ * This function could be done as an ioctl, but we would need to have
+ * an inode for that to work, and we do not always have one.
+ */
+
+int check_cdrom_media_change(kdev_t full_dev){
+ int retval, target;
+ struct inode inode;
+ int flag = 0;
+
+ target = MINOR(full_dev);
+
+ if (target >= sr_template.nr_dev) {
+ printk("CD-ROM request error: invalid device.\n");
+ return 0;
+ };
+
+ inode.i_rdev = full_dev; /* This is all we really need here */
+ retval = sr_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
+
+ if(retval){ /* Unable to test, unit probably not ready. This usually
+ * means there is no disc in the drive. Mark as changed,
+ * and we will figure it out later once the drive is
+ * available again. */
+
+ scsi_CDs[target].device->changed = 1;
+ return 1; /* This will force a flush, if called from
+ * check_disk_change */
+ };
+
+ retval = scsi_CDs[target].device->changed;
+ if(!flag) {
+ scsi_CDs[target].device->changed = 0;
+ /* If the disk changed, the capacity will now be different,
+ * so we force a re-read of this information */
+ if (retval) scsi_CDs[target].needs_sector_size = 1;
+ };
+ return retval;
+}
+
+/*
+ * rw_intr is the interrupt routine for the device driver. It will be notified on the
+ * end of a SCSI read / write, and will take on of several actions based on success or failure.
+ */
+
+static void rw_intr (Scsi_Cmnd * SCpnt)
+{
+ int result = SCpnt->result;
+ int this_count = SCpnt->this_count;
+ int good_sectors = (result == 0 ? this_count : 0);
+ int block_sectors = 0;
+
+#ifdef DEBUG
+ printk("sr.c done: %x %x\n",result, SCpnt->request.bh->b_data);
+#endif
+ /*
+ Handle MEDIUM ERRORs or VOLUME OVERFLOWs that indicate partial success.
+ Since this is a relatively rare error condition, no care is taken to
+ avoid unnecessary additional work such as memcpy's that could be avoided.
+ */
+
+ if (driver_byte(result) != 0 && /* An error occurred */
+ SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
+ (SCpnt->sense_buffer[2] == MEDIUM_ERROR ||
+ SCpnt->sense_buffer[2] == VOLUME_OVERFLOW ||
+ SCpnt->sense_buffer[2] == ILLEGAL_REQUEST))
+ {
+ long error_sector = (SCpnt->sense_buffer[3] << 24) |
+ (SCpnt->sense_buffer[4] << 16) |
+ (SCpnt->sense_buffer[5] << 8) |
+ SCpnt->sense_buffer[6];
+ int device_nr = DEVICE_NR(SCpnt->request.rq_dev);
+ if (SCpnt->request.bh != NULL)
+ block_sectors = SCpnt->request.bh->b_size >> 9;
+ if (block_sectors < 4) block_sectors = 4;
+ if (scsi_CDs[device_nr].sector_size == 2048)
+ error_sector <<= 2;
+ error_sector &= ~ (block_sectors - 1);
+ good_sectors = error_sector - SCpnt->request.sector;
+ if (good_sectors < 0 || good_sectors >= this_count)
+ good_sectors = 0;
+ /*
+ The SCSI specification allows for the value returned by READ
+ CAPACITY to be up to 75 2K sectors past the last readable
+ block. Therefore, if we hit a medium error within the last
+ 75 2K sectors, we decrease the saved size value.
+ */
+ if ((error_sector >> 1) < sr_sizes[device_nr] &&
+ scsi_CDs[device_nr].capacity - error_sector < 4*75)
+ sr_sizes[device_nr] = error_sector >> 1;
+ }
+
+ if (good_sectors > 0)
+ { /* Some sectors were read successfully. */
+ if (SCpnt->use_sg == 0) {
+ if (SCpnt->buffer != SCpnt->request.buffer)
+ {
+ int offset;
+ offset = (SCpnt->request.sector % 4) << 9;
+ memcpy((char *)SCpnt->request.buffer,
+ (char *)SCpnt->buffer + offset,
+ good_sectors << 9);
+ /* Even though we are not using scatter-gather, we look
+ * ahead and see if there is a linked request for the
+ * other half of this buffer. If there is, then satisfy
+ * it. */
+ if((offset == 0) && good_sectors == 2 &&
+ SCpnt->request.nr_sectors > good_sectors &&
+ SCpnt->request.bh &&
+ SCpnt->request.bh->b_reqnext &&
+ SCpnt->request.bh->b_reqnext->b_size == 1024) {
+ memcpy((char *)SCpnt->request.bh->b_reqnext->b_data,
+ (char *)SCpnt->buffer + 1024,
+ 1024);
+ good_sectors += 2;
+ };
+
+ scsi_free(SCpnt->buffer, 2048);
+ }
+ } else {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+ if (sgpnt[i].alt_address) {
+ if (sgpnt[i].alt_address != sgpnt[i].address) {
+ memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
+ };
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ };
+ };
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
+ if(SCpnt->request.sector % 4) good_sectors -= 2;
+ /* See if there is a padding record at the end that needs to be removed */
+ if(good_sectors > SCpnt->request.nr_sectors)
+ good_sectors -= 2;
+ };
+
+#ifdef DEBUG
+ printk("(%x %x %x) ",SCpnt->request.bh, SCpnt->request.nr_sectors,
+ good_sectors);
+#endif
+ if (SCpnt->request.nr_sectors > this_count)
+ {
+ SCpnt->request.errors = 0;
+ if (!SCpnt->request.bh)
+ panic("sr.c: linked page request (%lx %x)",
+ SCpnt->request.sector, this_count);
+ }
+
+ SCpnt = end_scsi_request(SCpnt, 1, good_sectors); /* All done */
+ if (result == 0)
+ {
+ requeue_sr_request(SCpnt);
+ return;
+ }
+ }
+
+ if (good_sectors == 0) {
+ /* We only come through here if no sectors were read successfully. */
+
+ /* Free up any indirection buffers we allocated for DMA purposes. */
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+ if (sgpnt[i].alt_address) {
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ }
+ }
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
+ } else {
+ if (SCpnt->buffer != SCpnt->request.buffer)
+ scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ }
+
+ }
+
+ if (driver_byte(result) != 0) {
+ if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
+ if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
+ /* detected disc change. set a bit and quietly refuse
+ * further access. */
+
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sr_request(SCpnt);
+ return;
+ }
+ }
+
+ if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
+ printk("CD-ROM error: ");
+ print_sense("sr", SCpnt);
+ printk("command was: ");
+ print_command(SCpnt->cmnd);
+ if (scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
+ requeue_sr_request(SCpnt);
+ result = 0;
+ return;
+ } else {
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sr_request(SCpnt); /* Do next request */
+ return;
+ }
+
+ }
+
+ if (SCpnt->sense_buffer[2] == NOT_READY) {
+ printk("CD-ROM not ready. Make sure you have a disc in the drive.\n");
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sr_request(SCpnt); /* Do next request */
+ return;
+ }
+
+ if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
+ printk("scsi%d: MEDIUM ERROR on "
+ "channel %d, id %d, lun %d, CDB: ",
+ SCpnt->host->host_no, (int) SCpnt->channel,
+ (int) SCpnt->target, (int) SCpnt->lun);
+ print_command(SCpnt->cmnd);
+ print_sense("sr", SCpnt);
+ SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
+ requeue_sr_request(SCpnt);
+ return;
+ }
+
+ if (SCpnt->sense_buffer[2] == VOLUME_OVERFLOW) {
+ printk("scsi%d: VOLUME OVERFLOW on "
+ "channel %d, id %d, lun %d, CDB: ",
+ SCpnt->host->host_no, (int) SCpnt->channel,
+ (int) SCpnt->target, (int) SCpnt->lun);
+ print_command(SCpnt->cmnd);
+ print_sense("sr", SCpnt);
+ SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
+ requeue_sr_request(SCpnt);
+ return;
+ }
+ }
+
+ /* We only get this far if we have an error we have not recognized */
+ if(result) {
+ printk("SCSI CD error : host %d id %d lun %d return code = %03x\n",
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->lun,
+ result);
+
+ if (status_byte(result) == CHECK_CONDITION)
+ print_sense("sr", SCpnt);
+
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
+ requeue_sr_request(SCpnt);
+ }
+}
+
+/*
+ * Here I tried to implement support for multisession-CD's
+ *
+ * Much of this has do be done with vendor-specific SCSI-commands, because
+ * multisession is newer than the SCSI-II standard.
+ * So I have to complete it step by step. Useful information is welcome.
+ *
+ * Actually works:
+ * - NEC: Detection and support of multisession CD's. Special handling
+ * for XA-disks is not necessary.
+ *
+ * - TOSHIBA: setting density is done here now, mounting PhotoCD's should
+ * work now without running the program "set_density"
+ * Multisession CD's are supported too.
+ *
+ * Gerd Knorr <kraxel@cs.tu-berlin.de>
+ */
+/*
+ * 19950704 operator@melchior.cuivre.fdn.fr (Thomas Quinot)
+ *
+ * - SONY: Same as Nec.
+ *
+ * - PIONEER: works with SONY code (may be others too ?)
+ */
+
+void sr_photocd(struct inode *inode)
+{
+ unsigned long sector,min,sec,frame;
+ unsigned char buf[40]; /* the buffer for the ioctl */
+ unsigned char *cmd; /* the scsi-command */
+ unsigned char *send; /* the data we send to the drive ... */
+ unsigned char *rec; /* ... and get back */
+ int rc,is_xa,no_multi;
+
+ if (scsi_CDs[MINOR(inode->i_rdev)].xa_flags & 0x02) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: CDROM and/or driver do not support multisession CD's");
+#endif
+ return;
+ }
+
+ if (!suser()) {
+ /* I'm not the superuser, so SCSI_IOCTL_SEND_COMMAND isn't allowed
+ * for me. That's why mpcd_sector will be initialized with zero,
+ * because I'm not able to get the right value. Necessary only if
+ * access_count is 1, else no disk change happened since the last
+ * call of this function and we can keep the old value.
+ */
+ if (1 == scsi_CDs[MINOR(inode->i_rdev)].device->access_count) {
+ scsi_CDs[MINOR(inode->i_rdev)].mpcd_sector = 0;
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags &= ~0x01;
+ }
+ return;
+ }
+
+ sector = 0;
+ is_xa = 0;
+ no_multi = 0;
+ cmd = rec = &buf[8];
+
+ switch(scsi_CDs[MINOR(inode->i_rdev)].device->manufacturer) {
+
+ case SCSI_MAN_NEC:
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: use NEC code\n");
+#endif
+ memset(buf,0,40);
+ *((unsigned int*)buf) = 0x0; /* we send nothing... */
+ *((unsigned int*)buf+1) = 0x16; /* and receive 0x16 bytes */
+ cmd[0] = 0xde;
+ cmd[1] = 0x03;
+ cmd[2] = 0xb0;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ if (rc != 0x28000002) /* drop "not ready" */
+ printk(KERN_WARNING"sr_photocd: ioctl error (NEC): 0x%x\n",rc);
+ break;
+ }
+ if (rec[14] != 0 && rec[14] != 0xb0) {
+ printk(KERN_INFO"sr_photocd: (NEC) Hmm, seems the CDROM doesn't support multisession CD's\n");
+ no_multi = 1;
+ break;
+ }
+ min = (unsigned long) rec[15]/16*10 + (unsigned long) rec[15]%16;
+ sec = (unsigned long) rec[16]/16*10 + (unsigned long) rec[16]%16;
+ frame = (unsigned long) rec[17]/16*10 + (unsigned long) rec[17]%16;
+ sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame;
+ is_xa = (rec[14] == 0xb0);
+#ifdef DEBUG
+ if (sector) {
+ printk(KERN_DEBUG "sr_photocd: multisession CD detected. start: %lu\n",sector);
+ }
+#endif
+ break;
+
+ case SCSI_MAN_TOSHIBA:
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: use TOSHIBA code\n");
+#endif
+
+ /* we request some disc information (is it a XA-CD ?,
+ * where starts the last session ?) */
+ memset(buf,0,40);
+ *((unsigned int*)buf) = (unsigned int) 0;
+ *((unsigned int*)buf+1) = (unsigned int) 4; /* receive 4 bytes */
+ cmd[0] = (unsigned char) 0x00c7;
+ cmd[1] = (unsigned char) 3;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ if (rc == 0x28000002) {
+ /* Got a "not ready" - error. No chance to find out if this is
+ * because there is no CD in the drive or because the drive
+ * don't knows multisession CD's. So I need to do an extra
+ * check... */
+ if (!kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_TEST_UNIT_READY, NULL)) {
+ printk(KERN_INFO "sr_photocd: (TOSHIBA) Hmm, seems the CDROM doesn't support multisession CD's\n");
+ no_multi = 1;
+ }
+ } else
+ printk(KERN_INFO"sr_photocd: ioctl error (TOSHIBA #1): 0x%x\n",rc);
+ break; /* if the first ioctl fails, we don't call the second one */
+ }
+ is_xa = (rec[0] == 0x20);
+ min = (unsigned long) rec[1]/16*10 + (unsigned long) rec[1]%16;
+ sec = (unsigned long) rec[2]/16*10 + (unsigned long) rec[2]%16;
+ frame = (unsigned long) rec[3]/16*10 + (unsigned long) rec[3]%16;
+ sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame;
+ if (sector) {
+ sector -= CD_BLOCK_OFFSET;
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: multisession CD detected: start: %lu\n",sector);
+#endif
+ }
+
+ /* now we do a get_density... */
+ memset(buf,0,40);
+ *((unsigned int*)buf) = (unsigned int) 0;
+ *((unsigned int*)buf+1) = (unsigned int) 12;
+ cmd[0] = (unsigned char) MODE_SENSE;
+ cmd[2] = (unsigned char) 1;
+ cmd[4] = (unsigned char) 12;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ printk(KERN_WARNING "sr_photocd: ioctl error (TOSHIBA #2): 0x%x\n",rc);
+ break;
+ }
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: get_density: 0x%x\n",rec[4]);
+#endif
+
+ /* ...and only if necessary a set_density */
+ if ((rec[4] != 0x81 && is_xa) || (rec[4] != 0 && !is_xa)) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: doing set_density\n");
+#endif
+ memset(buf,0,40);
+ *((unsigned int*)buf) = (unsigned int) 12; /* send 12 bytes */
+ *((unsigned int*)buf+1) = (unsigned int) 0;
+ cmd[0] = (unsigned char) MODE_SELECT;
+ cmd[1] = (unsigned char) (1 << 4);
+ cmd[4] = (unsigned char) 12;
+ send = &cmd[6]; /* this is a 6-Byte command */
+ send[ 3] = (unsigned char) 0x08; /* data for cmd */
+ /* density 0x81 for XA, 0 else */
+ send[ 4] = (is_xa) ?
+ (unsigned char) 0x81 : (unsigned char) 0;
+ send[10] = (unsigned char) 0x08;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ printk(KERN_WARNING "sr_photocd: ioctl error (TOSHIBA #3): 0x%x\n",rc);
+ }
+ /* The set_density command may have changed the
+ * sector size or capacity. */
+ scsi_CDs[MINOR(inode->i_rdev)].needs_sector_size = 1;
+ }
+ break;
+
+ case SCSI_MAN_SONY: /* Thomas QUINOT <thomas@melchior.cuivre.fdn.fr> */
+ case SCSI_MAN_PIONEER:
+ case SCSI_MAN_UNKNOWN:
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: use SONY/PIONEER code\n");
+#endif
+ get_sectorsize(MINOR(inode->i_rdev)); /* spinup (avoid timeout) */
+ memset(buf,0,40);
+ *((unsigned int*)buf) = 0x0; /* we send nothing... */
+ *((unsigned int*)buf+1) = 0x0c; /* and receive 0x0c bytes */
+ cmd[0] = READ_TOC;
+ cmd[8] = 0x0c;
+ cmd[9] = 0x40;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+
+ if (rc != 0) {
+ if (rc != 0x28000002) /* drop "not ready" */
+ printk(KERN_WARNING "sr_photocd: ioctl error (SONY/PIONEER): 0x%x\n",rc);
+ break;
+ }
+ if ((rec[0] << 8) + rec[1] < 0x0a) {
+ printk(KERN_INFO "sr_photocd: (SONY/PIONEER) Hmm, seems the CDROM doesn't support multisession CD's\n");
+ no_multi = 1;
+ break;
+ }
+ sector = rec[11] + (rec[10] << 8) + (rec[9] << 16) + (rec[8] << 24);
+ is_xa = !!sector;
+#ifdef DEBUG
+ if (sector)
+ printk (KERN_DEBUG "sr_photocd: multisession CD detected. start: %lu\n",sector);
+#endif
+ break;
+
+ case SCSI_MAN_NEC_OLDCDR:
+ default:
+ sector = 0;
+ no_multi = 1;
+ break; }
+
+ scsi_CDs[MINOR(inode->i_rdev)].mpcd_sector = sector;
+ if (is_xa)
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags |= 0x01;
+ else
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags &= ~0x01;
+ if (no_multi)
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags |= 0x02;
+ return;
+}
+
+static int sr_open(struct inode * inode, struct file * filp)
+{
+ if(MINOR(inode->i_rdev) >= sr_template.nr_dev ||
+ !scsi_CDs[MINOR(inode->i_rdev)].device) return -ENXIO; /* No such device */
+
+ if (filp->f_mode & 2)
+ return -EROFS;
+
+ if(sr_template.usage_count) (*sr_template.usage_count)++;
+
+ sr_ioctl(inode,filp,CDROMCLOSETRAY,0);
+ check_disk_change(inode->i_rdev);
+
+ if(!scsi_CDs[MINOR(inode->i_rdev)].device->access_count++)
+ sr_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
+ if (scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)
+ (*scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)++;
+
+ sr_photocd(inode);
+
+ /* If this device did not have media in the drive at boot time, then
+ * we would have been unable to get the sector size. Check to see if
+ * this is the case, and try again.
+ */
+
+ if(scsi_CDs[MINOR(inode->i_rdev)].needs_sector_size)
+ get_sectorsize(MINOR(inode->i_rdev));
+
+ return 0;
+}
+
+
+/*
+ * do_sr_request() is the request handler function for the sr driver.
+ * Its function in life is to take block device requests, and
+ * translate them to SCSI commands.
+ */
+
+static void do_sr_request (void)
+{
+ Scsi_Cmnd * SCpnt = NULL;
+ struct request * req = NULL;
+ Scsi_Device * SDev;
+ unsigned long flags;
+ int flag = 0;
+
+ while (1==1){
+ save_flags(flags);
+ cli();
+ if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
+ restore_flags(flags);
+ return;
+ };
+
+ INIT_SCSI_REQUEST;
+
+ SDev = scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device;
+
+ /*
+ * I am not sure where the best place to do this is. We need
+ * to hook in a place where we are likely to come if in user
+ * space.
+ */
+ if( SDev->was_reset )
+ {
+ /*
+ * We need to relock the door, but we might
+ * be in an interrupt handler. Only do this
+ * from user space, since we do not want to
+ * sleep from an interrupt.
+ */
+ if( SDev->removable && !intr_count )
+ {
+ scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
+ }
+ SDev->was_reset = 0;
+ }
+
+ if (flag++ == 0)
+ SCpnt = allocate_device(&CURRENT,
+ scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device, 0);
+ else SCpnt = NULL;
+ restore_flags(flags);
+
+ /* This is a performance enhancement. We dig down into the request list and
+ * try to find a queueable request (i.e. device not busy, and host able to
+ * accept another command. If we find one, then we queue it. This can
+ * make a big difference on systems with more than one disk drive. We want
+ * to have the interrupts off when monkeying with the request list, because
+ * otherwise the kernel might try to slip in a request in between somewhere. */
+
+ if (!SCpnt && sr_template.nr_dev > 1){
+ struct request *req1;
+ req1 = NULL;
+ save_flags(flags);
+ cli();
+ req = CURRENT;
+ while(req){
+ SCpnt = request_queueable(req,
+ scsi_CDs[DEVICE_NR(req->rq_dev)].device);
+ if(SCpnt) break;
+ req1 = req;
+ req = req->next;
+ };
+ if (SCpnt && req->rq_status == RQ_INACTIVE) {
+ if (req == CURRENT)
+ CURRENT = CURRENT->next;
+ else
+ req1->next = req->next;
+ };
+ restore_flags(flags);
+ };
+
+ if (!SCpnt)
+ return; /* Could not find anything to do */
+
+ wake_up(&wait_for_request);
+
+ /* Queue command */
+ requeue_sr_request(SCpnt);
+ }; /* While */
+}
+
+void requeue_sr_request (Scsi_Cmnd * SCpnt)
+{
+ unsigned int dev, block, realcount;
+ unsigned char cmd[12], *buffer, tries;
+ int this_count, start, end_rec;
+
+ tries = 2;
+
+ repeat:
+ if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
+ do_sr_request();
+ return;
+ }
+
+ dev = MINOR(SCpnt->request.rq_dev);
+ block = SCpnt->request.sector;
+ buffer = NULL;
+ this_count = 0;
+
+ if (dev >= sr_template.nr_dev) {
+ /* printk("CD-ROM request error: invalid device.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ tries = 2;
+ goto repeat;
+ }
+
+ if (!scsi_CDs[dev].use) {
+ /* printk("CD-ROM request error: device marked not in use.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ tries = 2;
+ goto repeat;
+ }
+
+ if (scsi_CDs[dev].device->changed) {
+ /*
+ * quietly refuse to do anything to a changed disc
+ * until the changed bit has been reset
+ */
+ /* printk("CD-ROM has been changed. Prohibiting further I/O.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ tries = 2;
+ goto repeat;
+ }
+
+ switch (SCpnt->request.cmd)
+ {
+ case WRITE:
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ break;
+ case READ :
+ cmd[0] = READ_6;
+ break;
+ default :
+ panic ("Unknown sr command %d\n", SCpnt->request.cmd);
+ }
+
+ cmd[1] = (SCpnt->lun << 5) & 0xe0;
+
+ /*
+ * Now do the grungy work of figuring out which sectors we need, and
+ * where in memory we are going to put them.
+ *
+ * The variables we need are:
+ *
+ * this_count= number of 512 byte sectors being read
+ * block = starting cdrom sector to read.
+ * realcount = # of cdrom sectors to read
+ *
+ * The major difference between a scsi disk and a scsi cdrom
+ * is that we will always use scatter-gather if we can, because we can
+ * work around the fact that the buffer cache has a block size of 1024,
+ * and we have 2048 byte sectors. This code should work for buffers that
+ * are any multiple of 512 bytes long.
+ */
+
+ SCpnt->use_sg = 0;
+
+ if (SCpnt->host->sg_tablesize > 0 &&
+ (!need_isa_buffer ||
+ dma_free_sectors >= 10)) {
+ struct buffer_head * bh;
+ struct scatterlist * sgpnt;
+ int count, this_count_max;
+ bh = SCpnt->request.bh;
+ this_count = 0;
+ count = 0;
+ this_count_max = (scsi_CDs[dev].ten ? 0xffff : 0xff) << 4;
+ /* Calculate how many links we can use. First see if we need
+ * a padding record at the start */
+ this_count = SCpnt->request.sector % 4;
+ if(this_count) count++;
+ while(bh && count < SCpnt->host->sg_tablesize) {
+ if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
+ this_count += (bh->b_size >> 9);
+ count++;
+ bh = bh->b_reqnext;
+ };
+ /* Fix up in case of an odd record at the end */
+ end_rec = 0;
+ if(this_count % 4) {
+ if (count < SCpnt->host->sg_tablesize) {
+ count++;
+ end_rec = (4 - (this_count % 4)) << 9;
+ this_count += 4 - (this_count % 4);
+ } else {
+ count--;
+ this_count -= (this_count % 4);
+ };
+ };
+ SCpnt->use_sg = count; /* Number of chains */
+ /* scsi_malloc can only allocate in chunks of 512 bytes */
+ count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;
+
+ SCpnt->sglist_len = count;
+ sgpnt = (struct scatterlist * ) scsi_malloc(count);
+ if (!sgpnt) {
+ printk("Warning - running *really* short on DMA buffers\n");
+ SCpnt->use_sg = 0; /* No memory left - bail out */
+ } else {
+ buffer = (unsigned char *) sgpnt;
+ count = 0;
+ bh = SCpnt->request.bh;
+ if(SCpnt->request.sector % 4) {
+ sgpnt[count].length = (SCpnt->request.sector % 4) << 9;
+ sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
+ if(!sgpnt[count].address) panic("SCSI DMA pool exhausted.");
+ sgpnt[count].alt_address = sgpnt[count].address; /* Flag to delete
+ if needed */
+ count++;
+ };
+ for(bh = SCpnt->request.bh; count < SCpnt->use_sg;
+ count++, bh = bh->b_reqnext) {
+ if (bh) { /* Need a placeholder at the end of the record? */
+ sgpnt[count].address = bh->b_data;
+ sgpnt[count].length = bh->b_size;
+ sgpnt[count].alt_address = NULL;
+ } else {
+ sgpnt[count].address = (char *) scsi_malloc(end_rec);
+ if(!sgpnt[count].address) panic("SCSI DMA pool exhausted.");
+ sgpnt[count].length = end_rec;
+ sgpnt[count].alt_address = sgpnt[count].address;
+ if (count+1 != SCpnt->use_sg) panic("Bad sr request list");
+ break;
+ };
+ if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
+ ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
+ sgpnt[count].alt_address = sgpnt[count].address;
+ /* We try to avoid exhausting the DMA pool, since it is easier
+ * to control usage here. In other places we might have a more
+ * pressing need, and we would be screwed if we ran out */
+ if(dma_free_sectors < (sgpnt[count].length >> 9) + 5) {
+ sgpnt[count].address = NULL;
+ } else {
+ sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
+ };
+ /* If we start running low on DMA buffers, we abort the scatter-gather
+ * operation, and free all of the memory we have allocated. We want to
+ * ensure that all scsi operations are able to do at least a non-scatter/gather
+ * operation */
+ if(sgpnt[count].address == NULL){ /* Out of dma memory */
+ printk("Warning: Running low on SCSI DMA buffers");
+ /* Try switching back to a non scatter-gather operation. */
+ while(--count >= 0){
+ if(sgpnt[count].alt_address)
+ scsi_free(sgpnt[count].address, sgpnt[count].length);
+ };
+ SCpnt->use_sg = 0;
+ scsi_free(buffer, SCpnt->sglist_len);
+ break;
+ }; /* if address == NULL */
+ }; /* if need DMA fixup */
+ }; /* for loop to fill list */
+#ifdef DEBUG
+ printk("SR: %d %d %d %d %d *** ",SCpnt->use_sg, SCpnt->request.sector,
+ this_count,
+ SCpnt->request.current_nr_sectors,
+ SCpnt->request.nr_sectors);
+ for(count=0; count<SCpnt->use_sg; count++)
+ printk("SGlist: %d %x %x %x\n", count,
+ sgpnt[count].address,
+ sgpnt[count].alt_address,
+ sgpnt[count].length);
+#endif
+ }; /* Able to allocate scatter-gather list */
+ };
+
+ if (SCpnt->use_sg == 0){
+ /* We cannot use scatter-gather. Do this the old fashion way */
+ if (!SCpnt->request.bh)
+ this_count = SCpnt->request.nr_sectors;
+ else
+ this_count = (SCpnt->request.bh->b_size >> 9);
+
+ start = block % 4;
+ if (start)
+ {
+ this_count = ((this_count > 4 - start) ?
+ (4 - start) : (this_count));
+ buffer = (unsigned char *) scsi_malloc(2048);
+ }
+ else if (this_count < 4)
+ {
+ buffer = (unsigned char *) scsi_malloc(2048);
+ }
+ else
+ {
+ this_count -= this_count % 4;
+ buffer = (unsigned char *) SCpnt->request.buffer;
+ if (((long) buffer) + (this_count << 9) > ISA_DMA_THRESHOLD &&
+ SCpnt->host->unchecked_isa_dma)
+ buffer = (unsigned char *) scsi_malloc(this_count << 9);
+ }
+ };
+
+ if (scsi_CDs[dev].sector_size == 2048)
+ block = block >> 2; /* These are the sectors that the cdrom uses */
+ else
+ block = block & 0xfffffffc;
+
+ realcount = (this_count + 3) / 4;
+
+ if (scsi_CDs[dev].sector_size == 512) realcount = realcount << 2;
+
+ if (((realcount > 0xff) || (block > 0x1fffff)) && scsi_CDs[dev].ten)
+ {
+ if (realcount > 0xffff)
+ {
+ realcount = 0xffff;
+ this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
+ }
+
+ cmd[0] += READ_10 - READ_6 ;
+ cmd[2] = (unsigned char) (block >> 24) & 0xff;
+ cmd[3] = (unsigned char) (block >> 16) & 0xff;
+ cmd[4] = (unsigned char) (block >> 8) & 0xff;
+ cmd[5] = (unsigned char) block & 0xff;
+ cmd[6] = cmd[9] = 0;
+ cmd[7] = (unsigned char) (realcount >> 8) & 0xff;
+ cmd[8] = (unsigned char) realcount & 0xff;
+ }
+ else
+ {
+ if (realcount > 0xff)
+ {
+ realcount = 0xff;
+ this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
+ }
+
+ cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+ cmd[2] = (unsigned char) ((block >> 8) & 0xff);
+ cmd[3] = (unsigned char) block & 0xff;
+ cmd[4] = (unsigned char) realcount;
+ cmd[5] = 0;
+ }
+
+#ifdef DEBUG
+ {
+ int i;
+ printk("ReadCD: %d %d %d %d\n",block, realcount, buffer, this_count);
+ printk("Use sg: %d\n", SCpnt->use_sg);
+ printk("Dumping command: ");
+ for(i=0; i<12; i++) printk("%2.2x ", cmd[i]);
+ printk("\n");
+ };
+#endif
+
+ /* Some dumb host adapters can speed transfers by knowing the
+ * minimum transfersize in advance.
+ *
+ * We shouldn't disconnect in the middle of a sector, but the cdrom
+ * sector size can be larger than the size of a buffer and the
+ * transfer may be split to the size of a buffer. So it's safe to
+ * assume that we can at least transfer the minimum of the buffer
+ * size (1024) and the sector size between each connect / disconnect.
+ */
+
+ SCpnt->transfersize = (scsi_CDs[dev].sector_size > 1024) ?
+ 1024 : scsi_CDs[dev].sector_size;
+
+ SCpnt->this_count = this_count;
+ scsi_do_cmd (SCpnt, (void *) cmd, buffer,
+ realcount * scsi_CDs[dev].sector_size,
+ rw_intr, SR_TIMEOUT, MAX_RETRIES);
+}
+
+static int sr_detect(Scsi_Device * SDp){
+
+ if(SDp->type != TYPE_ROM && SDp->type != TYPE_WORM) return 0;
+
+#ifdef MACH
+ printk("Detected scsi CD-ROM cd%d at scsi%d, channel %d, id %d, lun %d\n",
+#else
+ printk("Detected scsi CD-ROM sr%d at scsi%d, channel %d, id %d, lun %d\n",
+#endif
+ sr_template.dev_noticed++,
+ SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
+
+ return 1;
+}
+
+static int sr_attach(Scsi_Device * SDp){
+ Scsi_CD * cpnt;
+ int i;
+
+ if(SDp->type != TYPE_ROM && SDp->type != TYPE_WORM) return 1;
+
+ if (sr_template.nr_dev >= sr_template.dev_max)
+ {
+ SDp->attached--;
+ return 1;
+ }
+
+ for(cpnt = scsi_CDs, i=0; i<sr_template.dev_max; i++, cpnt++)
+ if(!cpnt->device) break;
+
+ if(i >= sr_template.dev_max) panic ("scsi_devices corrupt (sr)");
+
+ SDp->scsi_request_fn = do_sr_request;
+ scsi_CDs[i].device = SDp;
+ sr_template.nr_dev++;
+ if(sr_template.nr_dev > sr_template.dev_max)
+ panic ("scsi_devices corrupt (sr)");
+ return 0;
+}
+
+
+static void sr_init_done (Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+void get_sectorsize(int i){
+ unsigned char cmd[12];
+ unsigned char *buffer;
+ int the_result, retries;
+ Scsi_Cmnd * SCpnt;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ SCpnt = allocate_device(NULL, scsi_CDs[i].device, 1);
+
+ retries = 3;
+ do {
+ cmd[0] = READ_CAPACITY;
+ cmd[1] = (scsi_CDs[i].device->lun << 5) & 0xe0;
+ memset ((void *) &cmd[2], 0, 8);
+ SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy */
+ SCpnt->cmd_len = 0;
+
+ memset(buffer, 0, 8);
+
+ /* Do the command and wait.. */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sr_init_done, SR_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ the_result = SCpnt->result;
+ retries--;
+
+ } while(the_result && retries);
+
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
+
+ wake_up(&SCpnt->device->device_wait);
+
+ if (the_result) {
+ scsi_CDs[i].capacity = 0x1fffff;
+ scsi_CDs[i].sector_size = 2048; /* A guess, just in case */
+ scsi_CDs[i].needs_sector_size = 1;
+ } else {
+ scsi_CDs[i].capacity = 1 + ((buffer[0] << 24) |
+ (buffer[1] << 16) |
+ (buffer[2] << 8) |
+ buffer[3]);
+ scsi_CDs[i].sector_size = (buffer[4] << 24) |
+ (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+ switch (scsi_CDs[i].sector_size) {
+ /*
+ * HP 4020i CD-Recorder reports 2340 byte sectors
+ * Philips CD-Writers report 2352 byte sectors
+ *
+ * Use 2k sectors for them..
+ */
+ case 0: case 2340: case 2352:
+ scsi_CDs[i].sector_size = 2048;
+ /* fall through */
+ case 2048:
+ scsi_CDs[i].capacity *= 4;
+ /* fall through */
+ case 512:
+ break;
+ default:
+#ifdef MACH
+ printk ("cd%d : unsupported sector size %d.\n",
+ i, scsi_CDs[i].sector_size);
+#else
+ printk ("scd%d : unsupported sector size %d.\n",
+ i, scsi_CDs[i].sector_size);
+#endif
+ scsi_CDs[i].capacity = 0;
+ scsi_CDs[i].needs_sector_size = 1;
+ }
+ scsi_CDs[i].needs_sector_size = 0;
+ sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9);
+ };
+ scsi_free(buffer, 512);
+}
+
+static int sr_registered = 0;
+
+static int sr_init()
+{
+ int i;
+
+ if(sr_template.dev_noticed == 0) return 0;
+
+ if(!sr_registered) {
+ if (register_blkdev(MAJOR_NR,"sr",&sr_fops)) {
+ printk("Unable to get major %d for SCSI-CD\n",MAJOR_NR);
+ return 1;
+ }
+ sr_registered++;
+ }
+
+
+ if (scsi_CDs) return 0;
+ sr_template.dev_max = sr_template.dev_noticed + SR_EXTRA_DEVS;
+ scsi_CDs = (Scsi_CD *) scsi_init_malloc(sr_template.dev_max * sizeof(Scsi_CD), GFP_ATOMIC);
+ memset(scsi_CDs, 0, sr_template.dev_max * sizeof(Scsi_CD));
+
+ sr_sizes = (int *) scsi_init_malloc(sr_template.dev_max * sizeof(int), GFP_ATOMIC);
+ memset(sr_sizes, 0, sr_template.dev_max * sizeof(int));
+
+ sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max *
+ sizeof(int), GFP_ATOMIC);
+ for(i=0;i<sr_template.dev_max;i++) sr_blocksizes[i] = 2048;
+ blksize_size[MAJOR_NR] = sr_blocksizes;
+ return 0;
+}
+
+void sr_finish()
+{
+ int i;
+
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_size[MAJOR_NR] = sr_sizes;
+
+ for (i = 0; i < sr_template.nr_dev; ++i)
+ {
+ /* If we have already seen this, then skip it. Comes up
+ * with loadable modules. */
+ if (scsi_CDs[i].capacity) continue;
+ scsi_CDs[i].capacity = 0x1fffff;
+ scsi_CDs[i].sector_size = 2048; /* A guess, just in case */
+ scsi_CDs[i].needs_sector_size = 1;
+#if 0
+ /* seems better to leave this for later */
+ get_sectorsize(i);
+ printk("Scd sectorsize = %d bytes.\n", scsi_CDs[i].sector_size);
+#endif
+ scsi_CDs[i].use = 1;
+ scsi_CDs[i].ten = 1;
+ scsi_CDs[i].remap = 1;
+ scsi_CDs[i].auto_eject = 0; /* Default is not to eject upon unmount. */
+ sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9);
+ }
+
+
+ /* If our host adapter is capable of scatter-gather, then we increase
+ * the read-ahead to 16 blocks (32 sectors). If not, we use
+ * a two block (4 sector) read ahead. */
+ if(scsi_CDs[0].device && scsi_CDs[0].device->host->sg_tablesize)
+ read_ahead[MAJOR_NR] = 32; /* 32 sector read-ahead. Always removable. */
+ else
+ read_ahead[MAJOR_NR] = 4; /* 4 sector read-ahead */
+
+ return;
+}
+
+static void sr_detach(Scsi_Device * SDp)
+{
+ Scsi_CD * cpnt;
+ int i;
+
+ for(cpnt = scsi_CDs, i=0; i<sr_template.dev_max; i++, cpnt++)
+ if(cpnt->device == SDp) {
+ kdev_t devi = MKDEV(MAJOR_NR, i);
+
+ /*
+ * Since the cdrom is read-only, no need to sync the device.
+ * We should be kind to our buffer cache, however.
+ */
+ invalidate_inodes(devi);
+ invalidate_buffers(devi);
+
+ /*
+ * Reset things back to a sane state so that one can re-load a new
+ * driver (perhaps the same one).
+ */
+ cpnt->device = NULL;
+ cpnt->capacity = 0;
+ SDp->attached--;
+ sr_template.nr_dev--;
+ sr_template.dev_noticed--;
+ sr_sizes[i] = 0;
+ return;
+ }
+ return;
+}
+
+
+#ifdef MODULE
+
+int init_module(void) {
+ sr_template.usage_count = &mod_use_count_;
+ return scsi_register_module(MODULE_SCSI_DEV, &sr_template);
+}
+
+void cleanup_module( void)
+{
+ scsi_unregister_module(MODULE_SCSI_DEV, &sr_template);
+ unregister_blkdev(SCSI_CDROM_MAJOR, "sr");
+ sr_registered--;
+ if(scsi_CDs != NULL) {
+ scsi_init_free((char *) scsi_CDs,
+ (sr_template.dev_noticed + SR_EXTRA_DEVS)
+ * sizeof(Scsi_CD));
+
+ scsi_init_free((char *) sr_sizes, sr_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sr_blocksizes, sr_template.dev_max * sizeof(int));
+ }
+
+ blksize_size[MAJOR_NR] = NULL;
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_size[MAJOR_NR] = NULL;
+ read_ahead[MAJOR_NR] = 0;
+
+ sr_template.dev_max = 0;
+}
+#endif /* MODULE */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/sr.h b/linux/src/drivers/scsi/sr.h
new file mode 100644
index 0000000..381678a
--- /dev/null
+++ b/linux/src/drivers/scsi/sr.h
@@ -0,0 +1,40 @@
+/*
+ * sr.h by David Giller
+ * CD-ROM disk driver header file
+ *
+ * adapted from:
+ * sd.h Copyright (C) 1992 Drew Eckhardt
+ * SCSI disk driver header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+
+#ifndef _SR_H
+#define _SR_H
+
+#include "scsi.h"
+
+typedef struct
+ {
+ unsigned capacity; /* size in blocks */
+ unsigned sector_size; /* size in bytes */
+ Scsi_Device *device;
+ unsigned long mpcd_sector; /* for reading multisession-CD's */
+ char xa_flags; /* some flags for handling XA-CD's */
+ unsigned char sector_bit_size; /* sector size = 2^sector_bit_size */
+ unsigned char sector_bit_shift; /* sectors/FS block = 2^sector_bit_shift*/
+ unsigned needs_sector_size:1; /* needs to get sector size */
+ unsigned ten:1; /* support ten byte commands */
+ unsigned remap:1; /* support remapping */
+ unsigned use:1; /* is this device still supportable */
+ unsigned auto_eject:1; /* auto-eject medium on last release. */
+ } Scsi_CD;
+
+extern Scsi_CD * scsi_CDs;
+
+#endif
diff --git a/linux/src/drivers/scsi/sr_ioctl.c b/linux/src/drivers/scsi/sr_ioctl.c
new file mode 100644
index 0000000..6d8b633
--- /dev/null
+++ b/linux/src/drivers/scsi/sr_ioctl.c
@@ -0,0 +1,607 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <asm/segment.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sr.h"
+#include <scsi/scsi_ioctl.h>
+
+#include <linux/cdrom.h>
+
+extern void get_sectorsize(int);
+extern void sr_photocd(struct inode *);
+
+#define IOCTL_RETRIES 3
+/* The CDROM is fairly slow, so we need a little extra time */
+/* In fact, it is very slow if it has to spin up first */
+#define IOCTL_TIMEOUT 3000
+
+static void sr_ioctl_done(Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+/* We do our own retries because we want to know what the specific
+ error code is. Normally the UNIT_ATTENTION code will automatically
+ clear after one error */
+
+static int do_ioctl(int target, unsigned char * sr_cmd, void * buffer, unsigned buflength)
+{
+ Scsi_Cmnd * SCpnt;
+ int result;
+
+ SCpnt = allocate_device(NULL, scsi_CDs[target].device, 1);
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd(SCpnt,
+ (void *) sr_cmd, buffer, buflength, sr_ioctl_done,
+ IOCTL_TIMEOUT, IOCTL_RETRIES);
+ down(&sem);
+ }
+
+ result = SCpnt->result;
+
+ /* Minimal error checking. Ignore cases we know about, and report the rest. */
+ if(driver_byte(result) != 0)
+ switch(SCpnt->sense_buffer[2] & 0xf) {
+ case UNIT_ATTENTION:
+ scsi_CDs[target].device->changed = 1;
+ printk("Disc change detected.\n");
+ break;
+ case NOT_READY: /* This happens if there is no disc in drive */
+ printk("CDROM not ready. Make sure there is a disc in the drive.\n");
+ break;
+ case ILLEGAL_REQUEST:
+ /* CDROMCLOSETRAY should not print an error for caddy drives. */
+ if (!(sr_cmd[0] == START_STOP && sr_cmd[4] == 0x03))
+ printk("CDROM (ioctl) reports ILLEGAL REQUEST.\n");
+ break;
+ default:
+ printk("SCSI CD error: host %d id %d lun %d return code = %03x\n",
+ scsi_CDs[target].device->host->host_no,
+ scsi_CDs[target].device->id,
+ scsi_CDs[target].device->lun,
+ result);
+ printk("\tSense class %x, sense error %x, extended sense %x\n",
+ sense_class(SCpnt->sense_buffer[0]),
+ sense_error(SCpnt->sense_buffer[0]),
+ SCpnt->sense_buffer[2] & 0xf);
+
+ };
+
+ result = SCpnt->result;
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Deallocate */
+ wake_up(&SCpnt->device->device_wait);
+ /* Wake up a process waiting for device*/
+ return result;
+}
+
+int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
+{
+ u_char sr_cmd[12];
+
+ kdev_t dev = inode->i_rdev;
+ int result, target, err;
+
+ target = MINOR(dev);
+
+ if (target >= sr_template.nr_dev ||
+ !scsi_CDs[target].device) return -ENXIO;
+
+ switch (cmd)
+ {
+ /* Sun-compatible */
+ case CDROMPAUSE:
+
+ sr_cmd[0] = SCMD_PAUSE_RESUME;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = 0;
+ sr_cmd[5] = sr_cmd[6] = sr_cmd[7] = 0;
+ sr_cmd[8] = 0;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+
+ case CDROMRESUME:
+
+ sr_cmd[0] = SCMD_PAUSE_RESUME;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = 0;
+ sr_cmd[5] = sr_cmd[6] = sr_cmd[7] = 0;
+ sr_cmd[8] = 1;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+
+ return result;
+
+ case CDROMPLAYMSF:
+ {
+ struct cdrom_msf msf;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (msf));
+ if (err) return err;
+
+ memcpy_fromfs(&msf, (void *) arg, sizeof(msf));
+
+ sr_cmd[0] = SCMD_PLAYAUDIO_MSF;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = 0;
+ sr_cmd[3] = msf.cdmsf_min0;
+ sr_cmd[4] = msf.cdmsf_sec0;
+ sr_cmd[5] = msf.cdmsf_frame0;
+ sr_cmd[6] = msf.cdmsf_min1;
+ sr_cmd[7] = msf.cdmsf_sec1;
+ sr_cmd[8] = msf.cdmsf_frame1;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+ }
+
+ case CDROMPLAYBLK:
+ {
+ struct cdrom_blk blk;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (blk));
+ if (err) return err;
+
+ memcpy_fromfs(&blk, (void *) arg, sizeof(blk));
+
+ sr_cmd[0] = SCMD_PLAYAUDIO10;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = blk.from >> 24;
+ sr_cmd[3] = blk.from >> 16;
+ sr_cmd[4] = blk.from >> 8;
+ sr_cmd[5] = blk.from;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = blk.len >> 8;
+ sr_cmd[8] = blk.len;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+ }
+
+ case CDROMPLAYTRKIND:
+ {
+ struct cdrom_ti ti;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (ti));
+ if (err) return err;
+
+ memcpy_fromfs(&ti, (void *) arg, sizeof(ti));
+
+ sr_cmd[0] = SCMD_PLAYAUDIO_TI;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = 0;
+ sr_cmd[3] = 0;
+ sr_cmd[4] = ti.cdti_trk0;
+ sr_cmd[5] = ti.cdti_ind0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = ti.cdti_trk1;
+ sr_cmd[8] = ti.cdti_ind1;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+
+ return result;
+ }
+
+ case CDROMREADTOCHDR:
+ {
+ struct cdrom_tochdr tochdr;
+ char * buffer;
+
+ sr_cmd[0] = SCMD_READ_TOC;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5);
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = 0; /* MSB of length (12) */
+ sr_cmd[8] = 12; /* LSB of length */
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl(target, sr_cmd, buffer, 12);
+
+ tochdr.cdth_trk0 = buffer[2];
+ tochdr.cdth_trk1 = buffer[3];
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_tochdr));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &tochdr, sizeof (struct cdrom_tochdr));
+
+ return result;
+ }
+
+ case CDROMREADTOCENTRY:
+ {
+ struct cdrom_tocentry tocentry;
+ unsigned char * buffer;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (struct cdrom_tocentry));
+ if (err) return err;
+
+ memcpy_fromfs (&tocentry, (void *) arg, sizeof (struct cdrom_tocentry));
+
+ sr_cmd[0] = SCMD_READ_TOC;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) |
+ (tocentry.cdte_format == CDROM_MSF ? 0x02 : 0);
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = tocentry.cdte_track;
+ sr_cmd[7] = 0; /* MSB of length (12) */
+ sr_cmd[8] = 12; /* LSB of length */
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl (target, sr_cmd, buffer, 12);
+
+ tocentry.cdte_ctrl = buffer[5] & 0xf;
+ tocentry.cdte_adr = buffer[5] >> 4;
+ tocentry.cdte_datamode = (tocentry.cdte_ctrl & 0x04) ? 1 : 0;
+ if (tocentry.cdte_format == CDROM_MSF) {
+ tocentry.cdte_addr.msf.minute = buffer[9];
+ tocentry.cdte_addr.msf.second = buffer[10];
+ tocentry.cdte_addr.msf.frame = buffer[11];
+ }
+ else
+ tocentry.cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ + buffer[10]) << 8) + buffer[11];
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_tocentry));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &tocentry, sizeof (struct cdrom_tocentry));
+
+ return result;
+ }
+
+ case CDROMSTOP:
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 1;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+
+ case CDROMSTART:
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 1;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 1;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+
+ case CDROMCLOSETRAY:
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device -> lun) << 5);
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 0x03;
+
+ if ((result = do_ioctl(target, sr_cmd, NULL, 255)))
+ return result;
+
+ /* Gather information about newly inserted disc */
+ check_disk_change (inode->i_rdev);
+ sr_ioctl (inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
+ sr_photocd (inode);
+
+ if (scsi_CDs[MINOR(inode->i_rdev)].needs_sector_size)
+ get_sectorsize (MINOR(inode->i_rdev));
+
+ return 0;
+
+ case CDROMEJECT:
+ /*
+ * Allow 0 for access count for auto-eject feature.
+ */
+ if (scsi_CDs[target].device -> access_count > 1)
+ return -EBUSY;
+
+ sr_ioctl (inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device -> lun) << 5) | 1;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 0x02;
+
+ if (!(result = do_ioctl(target, sr_cmd, NULL, 255)))
+ scsi_CDs[target].device -> changed = 1;
+
+ return result;
+
+ case CDROMEJECT_SW:
+ scsi_CDs[target].auto_eject = !!arg;
+ return 0;
+
+ case CDROMVOLCTRL:
+ {
+ char * buffer, * mask;
+ struct cdrom_volctrl volctrl;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (struct cdrom_volctrl));
+ if (err) return err;
+
+ memcpy_fromfs (&volctrl, (void *) arg, sizeof (struct cdrom_volctrl));
+
+ /* First we get the current params so we can just twiddle the volume */
+
+ sr_cmd[0] = MODE_SENSE;
+ sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5;
+ sr_cmd[2] = 0xe; /* Want mode page 0xe, CDROM audio params */
+ sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ if ((result = do_ioctl (target, sr_cmd, buffer, 28))) {
+ printk ("Hosed while obtaining audio mode page\n");
+ scsi_free(buffer, 512);
+ return result;
+ }
+
+ sr_cmd[0] = MODE_SENSE;
+ sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5;
+ sr_cmd[2] = 0x4e; /* Want the mask for mode page 0xe */
+ sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ mask = (unsigned char *) scsi_malloc(512);
+ if(!mask) {
+ scsi_free(buffer, 512);
+ return -ENOMEM;
+ };
+
+ if ((result = do_ioctl (target, sr_cmd, mask, 28))) {
+ printk ("Hosed while obtaining mask for audio mode page\n");
+ scsi_free(buffer, 512);
+ scsi_free(mask, 512);
+ return result;
+ }
+
+ /* Now mask and substitute our own volume and reuse the rest */
+ buffer[0] = 0; /* Clear reserved field */
+
+ buffer[21] = volctrl.channel0 & mask[21];
+ buffer[23] = volctrl.channel1 & mask[23];
+ buffer[25] = volctrl.channel2 & mask[25];
+ buffer[27] = volctrl.channel3 & mask[27];
+
+ sr_cmd[0] = MODE_SELECT;
+ sr_cmd[1] = ((scsi_CDs[target].device -> lun) << 5) | 0x10; /* Params are SCSI-2 */
+ sr_cmd[2] = sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ result = do_ioctl (target, sr_cmd, buffer, 28);
+ scsi_free(buffer, 512);
+ scsi_free(mask, 512);
+ return result;
+ }
+
+ case CDROMVOLREAD:
+ {
+ char * buffer;
+ struct cdrom_volctrl volctrl;
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_volctrl));
+ if (err) return err;
+
+ /* Get the current params */
+
+ sr_cmd[0] = MODE_SENSE;
+ sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5;
+ sr_cmd[2] = 0xe; /* Want mode page 0xe, CDROM audio params */
+ sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ if ((result = do_ioctl (target, sr_cmd, buffer, 28))) {
+ printk ("(CDROMVOLREAD) Hosed while obtaining audio mode page\n");
+ scsi_free(buffer, 512);
+ return result;
+ }
+
+ volctrl.channel0 = buffer[21];
+ volctrl.channel1 = buffer[23];
+ volctrl.channel2 = buffer[25];
+ volctrl.channel3 = buffer[27];
+
+ memcpy_tofs ((void *) arg, &volctrl, sizeof (struct cdrom_volctrl));
+
+ scsi_free(buffer, 512);
+
+ return 0;
+ }
+
+ case CDROMSUBCHNL:
+ {
+ struct cdrom_subchnl subchnl;
+ char * buffer;
+
+ sr_cmd[0] = SCMD_READ_SUBCHANNEL;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 0x02; /* MSF format */
+ sr_cmd[2] = 0x40; /* I do want the subchannel info */
+ sr_cmd[3] = 0x01; /* Give me current position info */
+ sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = 0;
+ sr_cmd[8] = 16;
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char*) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl(target, sr_cmd, buffer, 16);
+
+ subchnl.cdsc_audiostatus = buffer[1];
+ subchnl.cdsc_format = CDROM_MSF;
+ subchnl.cdsc_ctrl = buffer[5] & 0xf;
+ subchnl.cdsc_trk = buffer[6];
+ subchnl.cdsc_ind = buffer[7];
+
+ subchnl.cdsc_reladdr.msf.minute = buffer[13];
+ subchnl.cdsc_reladdr.msf.second = buffer[14];
+ subchnl.cdsc_reladdr.msf.frame = buffer[15];
+ subchnl.cdsc_absaddr.msf.minute = buffer[9];
+ subchnl.cdsc_absaddr.msf.second = buffer[10];
+ subchnl.cdsc_absaddr.msf.frame = buffer[11];
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_subchnl));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &subchnl, sizeof (struct cdrom_subchnl));
+ return result;
+ }
+
+ case CDROM_GET_UPC:
+ {
+ struct cdrom_mcn mcn;
+ char * buffer;
+
+ sr_cmd[0] = SCMD_READ_SUBCHANNEL;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5);
+ sr_cmd[2] = 0x40; /* I do want the subchannel info */
+ sr_cmd[3] = 0x02; /* Give me medium catalog number info */
+ sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = 0;
+ sr_cmd[8] = 24;
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char*) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl(target, sr_cmd, buffer, 24);
+
+ memcpy (mcn.medium_catalog_number, buffer + 9, 13);
+ mcn.medium_catalog_number[13] = 0;
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_mcn));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &mcn, sizeof (struct cdrom_mcn));
+ return result;
+ }
+
+ /* these are compatible with the ide-cd driver */
+ case CDROMREADRAW:
+ case CDROMREADMODE1:
+ case CDROMREADMODE2:
+ return -EINVAL;
+
+ /* block-copy from ../block/sbpcd.c with some adjustments... */
+ case CDROMMULTISESSION: /* tell start-of-last-session to user */
+ {
+ struct cdrom_multisession ms_info;
+ long lba;
+
+ err = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(struct cdrom_multisession));
+ if (err) return (err);
+
+ memcpy_fromfs(&ms_info, (void *) arg, sizeof(struct cdrom_multisession));
+
+ if (ms_info.addr_format==CDROM_MSF) { /* MSF-bin requested */
+ lba = scsi_CDs[target].mpcd_sector+CD_BLOCK_OFFSET;
+ ms_info.addr.msf.minute = lba / (CD_SECS*CD_FRAMES);
+ lba %= CD_SECS*CD_FRAMES;
+ ms_info.addr.msf.second = lba / CD_FRAMES;
+ ms_info.addr.msf.frame = lba % CD_FRAMES;
+ } else if (ms_info.addr_format==CDROM_LBA) /* lba requested */
+ ms_info.addr.lba=scsi_CDs[target].mpcd_sector;
+ else return (-EINVAL);
+
+ ms_info.xa_flag=scsi_CDs[target].xa_flags & 0x01;
+
+ err=verify_area(VERIFY_WRITE,(void *) arg,
+ sizeof(struct cdrom_multisession));
+ if (err) return (err);
+
+ memcpy_tofs((void *) arg, &ms_info, sizeof(struct cdrom_multisession));
+ return (0);
+ }
+
+ case BLKRAGET:
+ if (!arg)
+ return -EINVAL;
+ err = verify_area(VERIFY_WRITE, (int *) arg, sizeof(int));
+ if (err)
+ return err;
+ put_user(read_ahead[MAJOR(inode->i_rdev)], (int *) arg);
+ return 0;
+
+ case BLKRASET:
+ if(!suser())
+ return -EACCES;
+ if(!(inode->i_rdev))
+ return -EINVAL;
+ if(arg > 0xff)
+ return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+
+ RO_IOCTLS(dev,arg);
+
+ case CDROMRESET:
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ default:
+ return scsi_ioctl(scsi_CDs[target].device,cmd,(void *) arg);
+ }
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/sym53c8xx.c b/linux/src/drivers/scsi/sym53c8xx.c
new file mode 100644
index 0000000..f496954
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx.c
@@ -0,0 +1,14696 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+/*
+** Supported SCSI features:
+** Synchronous data transfers
+** Wide16 SCSI BUS
+** Disconnection/Reselection
+** Tagged command queuing
+** SCSI Parity checking
+**
+** Supported NCR/SYMBIOS chips:
+** 53C810A (8 bits, Fast 10, no rom BIOS)
+** 53C825A (Wide, Fast 10, on-board rom BIOS)
+** 53C860 (8 bits, Fast 20, no rom BIOS)
+** 53C875 (Wide, Fast 20, on-board rom BIOS)
+** 53C876 (Wide, Fast 20 Dual, on-board rom BIOS)
+** 53C895 (Wide, Fast 40, on-board rom BIOS)
+** 53C895A (Wide, Fast 40, on-board rom BIOS)
+** 53C896 (Wide, Fast 40 Dual, on-board rom BIOS)
+** 53C897 (Wide, Fast 40 Dual, on-board rom BIOS)
+** 53C1510D (Wide, Fast 40 Dual, on-board rom BIOS)
+** 53C1010 (Wide, Fast 80 Dual, on-board rom BIOS)
+** 53C1010_66(Wide, Fast 80 Dual, on-board rom BIOS, 33/66MHz PCI)
+**
+** Other features:
+** Memory mapped IO
+** Module
+** Shared IRQ
+*/
+
+/*
+** Name and version of the driver
+*/
+#define SCSI_NCR_DRIVER_NAME "sym53c8xx-1.7.1-20000726"
+
+#define SCSI_NCR_DEBUG_FLAGS (0)
+
+#define NAME53C "sym53c"
+#define NAME53C8XX "sym53c8xx"
+
+/*==========================================================
+**
+** Include files
+**
+**==========================================================
+*/
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,17)
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+#include <asm/spinlock.h>
+#endif
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/stat.h>
+
+#include <linux/version.h>
+#include <linux/blk.h>
+
+#ifdef CONFIG_ALL_PPC
+#include <asm/prom.h>
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,35)
+#include <linux/init.h>
+#endif
+
+#ifndef __init
+#define __init
+#endif
+#ifndef __initdata
+#define __initdata
+#endif
+
+#if LINUX_VERSION_CODE <= LinuxVersionCode(2,1,92)
+#include <linux/bios32.h>
+#endif
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+#include "sd.h"
+
+#include <linux/types.h>
+
+/*
+** Define BITS_PER_LONG for earlier linux versions.
+*/
+#ifndef BITS_PER_LONG
+#if (~0UL) == 0xffffffffUL
+#define BITS_PER_LONG 32
+#else
+#define BITS_PER_LONG 64
+#endif
+#endif
+
+/*
+** Define the BSD style u_int32 and u_int64 type.
+** Are in fact u_int32_t and u_int64_t :-)
+*/
+typedef u32 u_int32;
+typedef u64 u_int64;
+
+#include "sym53c8xx.h"
+
+/*
+** Donnot compile integrity checking code for Linux-2.3.0
+** and above since SCSI data structures are not ready yet.
+*/
+/* #if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0) */
+#if 0
+#define SCSI_NCR_INTEGRITY_CHECKING
+#endif
+
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+
+/*
+** Hmmm... What complex some PCI-HOST bridges actually are,
+** despite the fact that the PCI specifications are looking
+** so smart and simple! ;-)
+*/
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,47)
+#define SCSI_NCR_DYNAMIC_DMA_MAPPING
+#endif
+
+/*==========================================================
+**
+** A la VMS/CAM-3 queue management.
+** Implemented from linux list management.
+**
+**==========================================================
+*/
+
+typedef struct xpt_quehead {
+ struct xpt_quehead *flink; /* Forward pointer */
+ struct xpt_quehead *blink; /* Backward pointer */
+} XPT_QUEHEAD;
+
+#define xpt_que_init(ptr) do { \
+ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
+} while (0)
+
+static inline void __xpt_que_add(struct xpt_quehead * new,
+ struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = new;
+ new->flink = flink;
+ new->blink = blink;
+ blink->flink = new;
+}
+
+static inline void __xpt_que_del(struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = blink;
+ blink->flink = flink;
+}
+
+static inline int xpt_que_empty(struct xpt_quehead *head)
+{
+ return head->flink == head;
+}
+
+static inline void xpt_que_splice(struct xpt_quehead *list,
+ struct xpt_quehead *head)
+{
+ struct xpt_quehead *first = list->flink;
+
+ if (first != list) {
+ struct xpt_quehead *last = list->blink;
+ struct xpt_quehead *at = head->flink;
+
+ first->blink = head;
+ head->flink = first;
+
+ last->flink = at;
+ at->blink = last;
+ }
+}
+
+#define xpt_que_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+
+#define xpt_insque(new, pos) __xpt_que_add(new, pos, (pos)->flink)
+
+#define xpt_remque(el) __xpt_que_del((el)->blink, (el)->flink)
+
+#define xpt_insque_head(new, head) __xpt_que_add(new, head, (head)->flink)
+
+static inline struct xpt_quehead *xpt_remque_head(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->flink;
+
+ if (elem != head)
+ __xpt_que_del(head, elem->flink);
+ else
+ elem = 0;
+ return elem;
+}
+
+#define xpt_insque_tail(new, head) __xpt_que_add(new, (head)->blink, head)
+
+static inline struct xpt_quehead *xpt_remque_tail(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->blink;
+
+ if (elem != head)
+ __xpt_que_del(elem->blink, head);
+ else
+ elem = 0;
+ return elem;
+}
+
+/*==========================================================
+**
+** Configuration and Debugging
+**
+**==========================================================
+*/
+
+/*
+** SCSI address of this device.
+** The boot routines should have set it.
+** If not, use this.
+*/
+
+#ifndef SCSI_NCR_MYADDR
+#define SCSI_NCR_MYADDR (7)
+#endif
+
+/*
+** The maximum number of tags per logic unit.
+** Used only for devices that support tags.
+*/
+
+#ifndef SCSI_NCR_MAX_TAGS
+#define SCSI_NCR_MAX_TAGS (8)
+#endif
+
+/*
+** TAGS are actually unlimited (256 tags/lun).
+** But Linux only supports 255. :)
+*/
+#if SCSI_NCR_MAX_TAGS > 255
+#define MAX_TAGS 255
+#else
+#define MAX_TAGS SCSI_NCR_MAX_TAGS
+#endif
+
+/*
+** Since the ncr chips only have a 8 bit ALU, we try to be clever
+** about offset calculation in the TASK TABLE per LUN that is an
+** array of DWORDS = 4 bytes.
+*/
+#if MAX_TAGS > (512/4)
+#define MAX_TASKS (1024/4)
+#elif MAX_TAGS > (256/4)
+#define MAX_TASKS (512/4)
+#else
+#define MAX_TASKS (256/4)
+#endif
+
+/*
+** This one means 'NO TAG for this job'
+*/
+#define NO_TAG (256)
+
+/*
+** Number of targets supported by the driver.
+** n permits target numbers 0..n-1.
+** Default is 16, meaning targets #0..#15.
+** #7 .. is myself.
+*/
+
+#ifdef SCSI_NCR_MAX_TARGET
+#define MAX_TARGET (SCSI_NCR_MAX_TARGET)
+#else
+#define MAX_TARGET (16)
+#endif
+
+/*
+** Number of logic units supported by the driver.
+** n enables logic unit numbers 0..n-1.
+** The common SCSI devices require only
+** one lun, so take 1 as the default.
+*/
+
+#ifdef SCSI_NCR_MAX_LUN
+#define MAX_LUN 64
+#else
+#define MAX_LUN (1)
+#endif
+
+/*
+** Asynchronous pre-scaler (ns). Shall be 40 for
+** the SCSI timings to be compliant.
+*/
+
+#ifndef SCSI_NCR_MIN_ASYNC
+#define SCSI_NCR_MIN_ASYNC (40)
+#endif
+
+/*
+** The maximum number of jobs scheduled for starting.
+** We allocate 4 entries more than the value we announce
+** to the SCSI upper layer. Guess why ! :-)
+*/
+
+#ifdef SCSI_NCR_CAN_QUEUE
+#define MAX_START (SCSI_NCR_CAN_QUEUE + 4)
+#else
+#define MAX_START (MAX_TARGET + 7 * MAX_TAGS)
+#endif
+
+/*
+** We donnot want to allocate more than 1 PAGE for the
+** the start queue and the done queue. We hard-code entry
+** size to 8 in order to let cpp do the checking.
+** Allows 512-4=508 pending IOs for i386 but Linux seems for
+** now not able to provide the driver with this amount of IOs.
+*/
+#if MAX_START > PAGE_SIZE/8
+#undef MAX_START
+#define MAX_START (PAGE_SIZE/8)
+#endif
+
+/*
+** The maximum number of segments a transfer is split into.
+** We support up to 127 segments for both read and write.
+*/
+
+#define MAX_SCATTER (SCSI_NCR_MAX_SCATTER)
+#define SCR_SG_SIZE (2)
+
+/*
+** other
+*/
+
+#define NCR_SNOOP_TIMEOUT (1000000)
+
+/*==========================================================
+**
+** Miscallaneous BSDish defines.
+**
+**==========================================================
+*/
+
+#define u_char unsigned char
+#define u_short unsigned short
+#define u_int unsigned int
+#define u_long unsigned long
+
+#ifndef bcopy
+#define bcopy(s, d, n) memcpy((d), (s), (n))
+#endif
+
+#ifndef bzero
+#define bzero(d, n) memset((d), 0, (n))
+#endif
+
+#ifndef offsetof
+#define offsetof(t, m) ((size_t) (&((t *)0)->m))
+#endif
+
+/*
+** Simple Wrapper to kernel PCI bus interface.
+**
+** This wrapper allows to get rid of old kernel PCI interface
+** and still allows to preserve linux-2.0 compatibilty.
+** In fact, it is mostly an incomplete emulation of the new
+** PCI code for pre-2.2 kernels. When kernel-2.0 support
+** will be dropped, we will just have to remove most of this
+** code.
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0)
+
+typedef struct pci_dev *pcidev_t;
+#define PCIDEV_NULL (0)
+#define PciBusNumber(d) (d)->bus->number
+#define PciDeviceFn(d) (d)->devfn
+#define PciVendorId(d) (d)->vendor
+#define PciDeviceId(d) (d)->device
+#define PciIrqLine(d) (d)->irq
+
+#if LINUX_VERSION_CODE > LinuxVersionCode(2,3,12)
+
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->resource[index].start;
+ if ((pdev->resource[index].flags & 0x7) == 0x4)
+ ++index;
+ return ++index;
+}
+#else
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->base_address[index++];
+ if ((*base & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ *base |= (((u_long)pdev->base_address[index]) << 32);
+#endif
+ ++index;
+ }
+ return index;
+}
+#endif
+
+#else /* Incomplete emulation of current PCI code for pre-2.2 kernels */
+
+typedef unsigned int pcidev_t;
+#define PCIDEV_NULL (~0u)
+#define PciBusNumber(d) ((d)>>8)
+#define PciDeviceFn(d) ((d)&0xff)
+#define __PciDev(busn, devfn) (((busn)<<8)+(devfn))
+
+#define pci_present pcibios_present
+
+#define pci_read_config_byte(d, w, v) \
+ pcibios_read_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_word(d, w, v) \
+ pcibios_read_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_dword(d, w, v) \
+ pcibios_read_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+#define pci_write_config_byte(d, w, v) \
+ pcibios_write_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_word(d, w, v) \
+ pcibios_write_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_dword(d, w, v) \
+ pcibios_write_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+static pcidev_t __init
+pci_find_device(unsigned int vendor, unsigned int device, pcidev_t prev)
+{
+ static unsigned short pci_index;
+ int retv;
+ unsigned char bus_number, device_fn;
+
+ if (prev == PCIDEV_NULL)
+ pci_index = 0;
+ else
+ ++pci_index;
+ retv = pcibios_find_device (vendor, device, pci_index,
+ &bus_number, &device_fn);
+ return retv ? PCIDEV_NULL : __PciDev(bus_number, device_fn);
+}
+
+static u_short __init PciVendorId(pcidev_t dev)
+{
+ u_short vendor_id;
+ pci_read_config_word(dev, PCI_VENDOR_ID, &vendor_id);
+ return vendor_id;
+}
+
+static u_short __init PciDeviceId(pcidev_t dev)
+{
+ u_short device_id;
+ pci_read_config_word(dev, PCI_DEVICE_ID, &device_id);
+ return device_id;
+}
+
+static u_int __init PciIrqLine(pcidev_t dev)
+{
+ u_char irq;
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ return irq;
+}
+
+static int __init
+pci_get_base_address(pcidev_t dev, int offset, u_long *base)
+{
+ u_int32 tmp;
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base = tmp;
+ offset += sizeof(u_int32);
+ if ((tmp & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base |= (((u_long)tmp) << 32);
+#endif
+ offset += sizeof(u_int32);
+ }
+ return offset;
+}
+
+#endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0) */
+
+/*==========================================================
+**
+** Debugging tags
+**
+**==========================================================
+*/
+
+#define DEBUG_ALLOC (0x0001)
+#define DEBUG_PHASE (0x0002)
+#define DEBUG_QUEUE (0x0008)
+#define DEBUG_RESULT (0x0010)
+#define DEBUG_POINTER (0x0020)
+#define DEBUG_SCRIPT (0x0040)
+#define DEBUG_TINY (0x0080)
+#define DEBUG_TIMING (0x0100)
+#define DEBUG_NEGO (0x0200)
+#define DEBUG_TAGS (0x0400)
+#define DEBUG_IC (0x0800)
+
+/*
+** Enable/Disable debug messages.
+** Can be changed at runtime too.
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
+ #define DEBUG_FLAGS ncr_debug
+#else
+ #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
+#endif
+
+/*
+** SMP threading.
+**
+** Assuming that SMP systems are generally high end systems and may
+** use several SCSI adapters, we are using one lock per controller
+** instead of some global one. For the moment (linux-2.1.95), driver's
+** entry points are called with the 'io_request_lock' lock held, so:
+** - We are uselessly loosing a couple of micro-seconds to lock the
+** controller data structure.
+** - But the driver is not broken by design for SMP and so can be
+** more resistant to bugs or bad changes in the IO sub-system code.
+** - A small advantage could be that the interrupt code is grained as
+** wished (e.g.: threaded by controller).
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+
+spinlock_t sym53c8xx_lock = SPIN_LOCK_UNLOCKED;
+#define NCR_LOCK_DRIVER(flags) spin_lock_irqsave(&sym53c8xx_lock, flags)
+#define NCR_UNLOCK_DRIVER(flags) spin_unlock_irqrestore(&sym53c8xx_lock,flags)
+
+#define NCR_INIT_LOCK_NCB(np) spin_lock_init(&np->smp_lock);
+#define NCR_LOCK_NCB(np, flags) spin_lock_irqsave(&np->smp_lock, flags)
+#define NCR_UNLOCK_NCB(np, flags) spin_unlock_irqrestore(&np->smp_lock, flags)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) \
+ spin_lock_irqsave(&io_request_lock, flags)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) \
+ spin_unlock_irqrestore(&io_request_lock, flags)
+
+#else
+
+#define NCR_LOCK_DRIVER(flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_DRIVER(flags) do { restore_flags(flags); } while (0)
+
+#define NCR_INIT_LOCK_NCB(np) do { } while (0)
+#define NCR_LOCK_NCB(np, flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_NCB(np, flags) do { restore_flags(flags); } while (0)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) do {;} while (0)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) do {;} while (0)
+
+#endif
+
+/*
+** Memory mapped IO
+**
+** Since linux-2.1, we must use ioremap() to map the io memory space.
+** iounmap() to unmap it. That allows portability.
+** Linux 1.3.X and 2.0.X allow to remap physical pages addresses greater
+** than the highest physical memory address to kernel virtual pages with
+** vremap() / vfree(). That was not portable but worked with i386
+** architecture.
+*/
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#define ioremap vremap
+#define iounmap vfree
+#endif
+
+#ifdef __sparc__
+# include <asm/irq.h>
+# if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0)
+ /* ioremap/iounmap broken in 2.2.x on Sparc. -DaveM */
+# define ioremap(base, size) ((u_long) __va(base))
+# define iounmap(vaddr)
+# endif
+# define pcivtobus(p) bus_dvma_to_mem(p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((void *)(a), (const void *)(b), (c))
+#elif defined(__alpha__)
+# define pcivtobus(p) ((p) & 0xfffffffful)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#else /* others */
+# define pcivtobus(p) (p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#endif
+
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+static u_long __init remap_pci_mem(u_long base, u_long size)
+{
+ u_long page_base = ((u_long) base) & PAGE_MASK;
+ u_long page_offs = ((u_long) base) - page_base;
+ u_long page_remapped = (u_long) ioremap(page_base, page_offs+size);
+
+ return page_remapped? (page_remapped + page_offs) : 0UL;
+}
+
+static void __init unmap_pci_mem(u_long vaddr, u_long size)
+{
+ if (vaddr)
+ iounmap((void *) (vaddr & PAGE_MASK));
+}
+
+#endif /* not def SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+
+/*
+** Insert a delay in micro-seconds and milli-seconds.
+** -------------------------------------------------
+** Under Linux, udelay() is restricted to delay < 1 milli-second.
+** In fact, it generally works for up to 1 second delay.
+** Since 2.1.105, the mdelay() function is provided for delays
+** in milli-seconds.
+** Under 2.0 kernels, udelay() is an inline function that is very
+** inaccurate on Pentium processors.
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,105)
+#define UDELAY udelay
+#define MDELAY mdelay
+#else
+static void UDELAY(long us) { udelay(us); }
+static void MDELAY(long ms) { while (ms--) UDELAY(1000); }
+#endif
+
+/*
+** Simple power of two buddy-like allocator
+** ----------------------------------------
+** This simple code is not intended to be fast, but to provide
+** power of 2 aligned memory allocations.
+** Since the SCRIPTS processor only supplies 8 bit arithmetic,
+** this allocator allows simple and fast address calculations
+** from the SCRIPTS code. In addition, cache line alignment
+** is guaranteed for power of 2 cache line size.
+** Enhanced in linux-2.3.44 to provide a memory pool per pcidev
+** to support dynamic dma mapping. (I would have preferred a
+** real bus astraction, btw).
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+#define __GetFreePages(flags, order) __get_free_pages(flags, order)
+#else
+#define __GetFreePages(flags, order) __get_free_pages(flags, order, 0)
+#endif
+
+#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
+#if PAGE_SIZE >= 8192
+#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
+#else
+#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */
+#endif
+#define MEMO_FREE_UNUSED /* Free unused pages immediately */
+#define MEMO_WARN 1
+#define MEMO_GFP_FLAGS GFP_ATOMIC
+#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
+#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
+#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
+
+typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
+typedef pcidev_t m_bush_t; /* Something that addresses DMAable */
+
+typedef struct m_link { /* Link between free memory chunks */
+ struct m_link *next;
+} m_link_s;
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+typedef struct m_vtob { /* Virtual to Bus address translation */
+ struct m_vtob *next;
+ m_addr_t vaddr;
+ m_addr_t baddr;
+} m_vtob_s;
+#define VTOB_HASH_SHIFT 5
+#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
+#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
+#define VTOB_HASH_CODE(m) \
+ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
+#endif
+
+typedef struct m_pool { /* Memory pool of a given kind */
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ m_bush_t bush;
+ m_addr_t (*getp)(struct m_pool *);
+ void (*freep)(struct m_pool *, m_addr_t);
+#define M_GETP() mp->getp(mp)
+#define M_FREEP(p) mp->freep(mp, p)
+#define GetPages() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define FreePages(p) free_pages(p, MEMO_PAGE_ORDER)
+ int nump;
+ m_vtob_s *(vtob[VTOB_HASH_SIZE]);
+ struct m_pool *next;
+#else
+#define M_GETP() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define M_FREEP(p) free_pages(p, MEMO_PAGE_ORDER)
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+ struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];
+} m_pool_s;
+
+static void *___m_alloc(m_pool_s *mp, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ int j;
+ m_addr_t a;
+ m_link_s *h = mp->h;
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return 0;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ j = i;
+ while (!h[j].next) {
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ h[j].next = (m_link_s *) M_GETP();
+ if (h[j].next)
+ h[j].next->next = 0;
+ break;
+ }
+ ++j;
+ s <<= 1;
+ }
+ a = (m_addr_t) h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (m_link_s *) (a+s);
+ h[j].next->next = 0;
+ }
+ }
+#ifdef DEBUG
+ printk("___m_alloc(%d) = %p\n", size, (void *) a);
+#endif
+ return (void *) a;
+}
+
+static void ___m_free(m_pool_s *mp, void *ptr, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ m_link_s *q;
+ m_addr_t a, b;
+ m_link_s *h = mp->h;
+
+#ifdef DEBUG
+ printk("___m_free(%p, %d)\n", ptr, size);
+#endif
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ a = (m_addr_t) ptr;
+
+ while (1) {
+#ifdef MEMO_FREE_UNUSED
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ M_FREEP(a);
+ break;
+ }
+#endif
+ b = a ^ s;
+ q = &h[i];
+ while (q->next && q->next != (m_link_s *) b) {
+ q = q->next;
+ }
+ if (!q->next) {
+ ((m_link_s *) a)->next = h[i].next;
+ h[i].next = (m_link_s *) a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ ++i;
+ }
+}
+
+static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags)
+{
+ void *p;
+
+ p = ___m_alloc(mp, size);
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("new %-10s[%4d] @%p.\n", name, size, p);
+
+ if (p)
+ bzero(p, size);
+ else if (uflags & MEMO_WARN)
+ printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size);
+
+ return p;
+}
+
+#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)
+
+static void __m_free(m_pool_s *mp, void *ptr, int size, char *name)
+{
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
+
+ ___m_free(mp, ptr, size);
+
+}
+
+/*
+ * With pci bus iommu support, we use a default pool of unmapped memory
+ * for memory we donnot need to DMA from/to and one pool per pcidev for
+ * memory accessed by the PCI chip. `mp0' is the default not DMAable pool.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+static m_pool_s mp0;
+
+#else
+
+static m_addr_t ___mp0_getp(m_pool_s *mp)
+{
+ m_addr_t m = GetPages();
+ if (m)
+ ++mp->nump;
+ return m;
+}
+
+static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
+{
+ FreePages(m);
+ --mp->nump;
+}
+
+static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep};
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+static void *m_calloc(int size, char *name)
+{
+ u_long flags;
+ void *m;
+ NCR_LOCK_DRIVER(flags);
+ m = __m_calloc(&mp0, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+ return m;
+}
+
+static void m_free(void *ptr, int size, char *name)
+{
+ u_long flags;
+ NCR_LOCK_DRIVER(flags);
+ __m_free(&mp0, ptr, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+/*
+ * DMAable pools.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Without pci bus iommu support, all the memory is assumed DMAable */
+
+#define __m_calloc_dma(b, s, n) m_calloc(s, n)
+#define __m_free_dma(b, p, s, n) m_free(p, s, n)
+#define __vtobus(b, p) virt_to_bus(p)
+
+#else
+
+/*
+ * With pci bus iommu support, we maintain one pool per pcidev and a
+ * hashed reverse table for virtual to bus physical address translations.
+ */
+static m_addr_t ___dma_getp(m_pool_s *mp)
+{
+ m_addr_t vp;
+ m_vtob_s *vbp;
+
+ vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB");
+ if (vbp) {
+ dma_addr_t daddr;
+ vp = (m_addr_t) pci_alloc_consistent(mp->bush,
+ PAGE_SIZE<<MEMO_PAGE_ORDER,
+ &daddr);
+ if (vp) {
+ int hc = VTOB_HASH_CODE(vp);
+ vbp->vaddr = vp;
+ vbp->baddr = daddr;
+ vbp->next = mp->vtob[hc];
+ mp->vtob[hc] = vbp;
+ ++mp->nump;
+ return vp;
+ }
+ else
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ }
+ return 0;
+}
+
+static void ___dma_freep(m_pool_s *mp, m_addr_t m)
+{
+ m_vtob_s **vbpp, *vbp;
+ int hc = VTOB_HASH_CODE(m);
+
+ vbpp = &mp->vtob[hc];
+ while (*vbpp && (*vbpp)->vaddr != m)
+ vbpp = &(*vbpp)->next;
+ if (*vbpp) {
+ vbp = *vbpp;
+ *vbpp = (*vbpp)->next;
+ pci_free_consistent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
+ (void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ --mp->nump;
+ }
+}
+
+static inline m_pool_s *___get_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next);
+ return mp;
+}
+
+static m_pool_s *___cre_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL");
+ if (mp) {
+ bzero(mp, sizeof(*mp));
+ mp->bush = bush;
+ mp->getp = ___dma_getp;
+ mp->freep = ___dma_freep;
+ mp->next = mp0.next;
+ mp0.next = mp;
+ }
+ return mp;
+}
+
+static void ___del_dma_pool(m_pool_s *p)
+{
+ struct m_pool **pp = &mp0.next;
+
+ while (*pp && *pp != p)
+ pp = &(*pp)->next;
+ if (*pp) {
+ *pp = (*pp)->next;
+ __m_free(&mp0, p, sizeof(*p), "MPOOL");
+ }
+}
+
+static void *__m_calloc_dma(m_bush_t bush, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+ void *m = 0;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (!mp)
+ mp = ___cre_dma_pool(bush);
+ if (mp)
+ m = __m_calloc(mp, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+
+ return m;
+}
+
+static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp)
+ __m_free(mp, m, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+static m_addr_t __vtobus(m_bush_t bush, void *m)
+{
+ u_long flags;
+ m_pool_s *mp;
+ int hc = VTOB_HASH_CODE(m);
+ m_vtob_s *vp = 0;
+ m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp) {
+ vp = mp->vtob[hc];
+ while (vp && (m_addr_t) vp->vaddr != a)
+ vp = vp->next;
+ }
+ NCR_UNLOCK_DRIVER(flags);
+ return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
+}
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->pdev, s, n)
+#define _m_free_dma(np, p, s, n) __m_free_dma(np->pdev, p, s, n)
+#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)
+#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)
+#define _vtobus(np, p) __vtobus(np->pdev, p)
+#define vtobus(p) _vtobus(np, p)
+
+/*
+ * Deal with DMA mapping/unmapping.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Linux versions prior to pci bus iommu kernel interface */
+
+#define __unmap_scsi_data(pdev, cmd) do {; } while (0)
+#define __map_scsi_single_data(pdev, cmd) (__vtobus(pdev,(cmd)->request_buffer))
+#define __map_scsi_sg_data(pdev, cmd) ((cmd)->use_sg)
+#define __sync_scsi_data(pdev, cmd) do {; } while (0)
+
+#define scsi_sg_dma_address(sc) vtobus((sc)->address)
+#define scsi_sg_dma_len(sc) ((sc)->length)
+
+#else
+
+/* Linux version with pci bus iommu kernel interface */
+
+/* To keep track of the dma mapping (sg/single) that has been set */
+#define __data_mapped SCp.phase
+#define __data_mapping SCp.have_data_in
+
+static void __unmap_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_unmap_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+ cmd->__data_mapped = 0;
+}
+
+static u_long __map_scsi_single_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ dma_addr_t mapping;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->request_bufflen == 0)
+ return 0;
+
+ mapping = pci_map_single(pdev, cmd->request_buffer,
+ cmd->request_bufflen, dma_dir);
+ cmd->__data_mapped = 1;
+ cmd->__data_mapping = mapping;
+
+ return mapping;
+}
+
+static int __map_scsi_sg_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int use_sg;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->use_sg == 0)
+ return 0;
+
+ use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ cmd->__data_mapped = 2;
+ cmd->__data_mapping = use_sg;
+
+ return use_sg;
+}
+
+static void __sync_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_dma_sync_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_dma_sync_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+}
+
+#define scsi_sg_dma_address(sc) sg_dma_address(sc)
+#define scsi_sg_dma_len(sc) sg_dma_len(sc)
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->pdev, cmd)
+#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->pdev, cmd)
+#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->pdev, cmd)
+#define sync_scsi_data(np, cmd) __sync_scsi_data(np->pdev, cmd)
+
+
+/*
+ * Print out some buffer.
+ */
+static void ncr_print_hex(u_char *p, int n)
+{
+ while (n-- > 0)
+ printk (" %x", *p++);
+}
+
+static void ncr_printl_hex(char *label, u_char *p, int n)
+{
+ printk("%s", label);
+ ncr_print_hex(p, n);
+ printk (".\n");
+}
+
+/*
+** Transfer direction
+**
+** Until some linux kernel version near 2.3.40, low-level scsi
+** drivers were not told about data transfer direction.
+** We check the existence of this feature that has been expected
+** for a _long_ time by all SCSI driver developers by just
+** testing against the definition of SCSI_DATA_UNKNOWN. Indeed
+** this is a hack, but testing against a kernel version would
+** have been a shame. ;-)
+*/
+#ifdef SCSI_DATA_UNKNOWN
+
+#define scsi_data_direction(cmd) (cmd->sc_data_direction)
+
+#else
+
+#define SCSI_DATA_UNKNOWN 0
+#define SCSI_DATA_WRITE 1
+#define SCSI_DATA_READ 2
+#define SCSI_DATA_NONE 3
+
+static __inline__ int scsi_data_direction(Scsi_Cmnd *cmd)
+{
+ int direction;
+
+ switch((int) cmd->cmnd[0]) {
+ case 0x08: /* READ(6) 08 */
+ case 0x28: /* READ(10) 28 */
+ case 0xA8: /* READ(12) A8 */
+ direction = SCSI_DATA_READ;
+ break;
+ case 0x0A: /* WRITE(6) 0A */
+ case 0x2A: /* WRITE(10) 2A */
+ case 0xAA: /* WRITE(12) AA */
+ direction = SCSI_DATA_WRITE;
+ break;
+ default:
+ direction = SCSI_DATA_UNKNOWN;
+ break;
+ }
+
+ return direction;
+}
+
+#endif /* SCSI_DATA_UNKNOWN */
+
+/*
+** Head of list of NCR boards
+**
+** For kernel version < 1.3.70, host is retrieved by its irq level.
+** For later kernels, the internal host control block address
+** (struct ncb) is used as device id parameter of the irq stuff.
+*/
+
+static struct Scsi_Host *first_host = NULL;
+
+
+/*
+** /proc directory entry and proc_info function
+*/
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,3,27)
+static struct proc_dir_entry proc_scsi_sym53c8xx = {
+ PROC_SCSI_SYM53C8XX, 9, NAME53C8XX,
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+#endif
+static int sym53c8xx_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int func);
+#endif
+
+/*
+** Driver setup.
+**
+** This structure is initialized from linux config options.
+** It can be overridden at boot-up by the boot command line.
+*/
+static struct ncr_driver_setup
+ driver_setup = SCSI_NCR_DRIVER_SETUP;
+
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+static struct ncr_driver_setup
+ driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
+# ifdef MODULE
+char *sym53c8xx = 0; /* command line passed by insmod */
+# if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,30)
+MODULE_PARM(sym53c8xx, "s");
+# endif
+# endif
+#endif
+
+/*
+** Other Linux definitions
+*/
+#define SetScsiResult(cmd, h_sts, s_sts) \
+ cmd->result = (((h_sts) << 16) + ((s_sts) & 0x7f))
+
+/* We may have to remind our amnesiac SCSI layer of the reason of the abort */
+#if 0
+#define SetScsiAbortResult(cmd) \
+ SetScsiResult( \
+ cmd, \
+ (cmd)->abort_reason == DID_TIME_OUT ? DID_TIME_OUT : DID_ABORT, \
+ 0xff)
+#else
+#define SetScsiAbortResult(cmd) SetScsiResult(cmd, DID_ABORT, 0xff)
+#endif
+
+static void sym53c8xx_select_queue_depths(
+ struct Scsi_Host *host, struct scsi_device *devlist);
+static void sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs);
+static void sym53c8xx_timeout(unsigned long np);
+
+#define initverbose (driver_setup.verbose)
+#define bootverbose (np->verbose)
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static u_char Tekram_sync[16] __initdata =
+ {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10};
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Structures used by sym53c8xx_detect/sym53c8xx_pci_init to
+** transmit device configuration to the ncr_attach() function.
+*/
+typedef struct {
+ int bus;
+ u_char device_fn;
+ u_long base;
+ u_long base_2;
+ u_long io_port;
+ int irq;
+/* port and reg fields to use INB, OUTB macros */
+ u_long base_io;
+ volatile struct ncr_reg *reg;
+} ncr_slot;
+
+typedef struct {
+ int type;
+#define SCSI_NCR_SYMBIOS_NVRAM (1)
+#define SCSI_NCR_TEKRAM_NVRAM (2)
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ union {
+ Symbios_nvram Symbios;
+ Tekram_nvram Tekram;
+ } data;
+#endif
+} ncr_nvram;
+
+/*
+** Structure used by sym53c8xx_detect/sym53c8xx_pci_init
+** to save data on each detected board for ncr_attach().
+*/
+typedef struct {
+ pcidev_t pdev;
+ ncr_slot slot;
+ ncr_chip chip;
+ ncr_nvram *nvram;
+ u_char host_id;
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ u_char pqs_pds;
+#endif
+ int attach_done;
+} ncr_device;
+
+/*==========================================================
+**
+** assert ()
+**
+**==========================================================
+**
+** modified copy from 386bsd:/usr/include/sys/assert.h
+**
+**----------------------------------------------------------
+*/
+
+#define assert(expression) { \
+ if (!(expression)) { \
+ (void)panic( \
+ "assertion \"%s\" failed: file \"%s\", line %d\n", \
+ #expression, \
+ __FILE__, __LINE__); \
+ } \
+}
+
+/*==========================================================
+**
+** Command control block states.
+**
+**==========================================================
+*/
+
+#define HS_IDLE (0)
+#define HS_BUSY (1)
+#define HS_NEGOTIATE (2) /* sync/wide data transfer*/
+#define HS_DISCONNECT (3) /* Disconnected by target */
+
+#define HS_DONEMASK (0x80)
+#define HS_COMPLETE (4|HS_DONEMASK)
+#define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
+#define HS_RESET (6|HS_DONEMASK) /* SCSI reset */
+#define HS_ABORTED (7|HS_DONEMASK) /* Transfer aborted */
+#define HS_TIMEOUT (8|HS_DONEMASK) /* Software timeout */
+#define HS_FAIL (9|HS_DONEMASK) /* SCSI or PCI bus errors */
+#define HS_UNEXPECTED (10|HS_DONEMASK)/* Unexpected disconnect */
+
+#define DSA_INVALID 0xffffffff
+
+/*==========================================================
+**
+** Software Interrupt Codes
+**
+**==========================================================
+*/
+
+#define SIR_BAD_STATUS (1)
+#define SIR_SEL_ATN_NO_MSG_OUT (2)
+#define SIR_MSG_RECEIVED (3)
+#define SIR_MSG_WEIRD (4)
+#define SIR_NEGO_FAILED (5)
+#define SIR_NEGO_PROTO (6)
+#define SIR_SCRIPT_STOPPED (7)
+#define SIR_REJECT_TO_SEND (8)
+#define SIR_SWIDE_OVERRUN (9)
+#define SIR_SODL_UNDERRUN (10)
+#define SIR_RESEL_NO_MSG_IN (11)
+#define SIR_RESEL_NO_IDENTIFY (12)
+#define SIR_RESEL_BAD_LUN (13)
+#define SIR_TARGET_SELECTED (14)
+#define SIR_RESEL_BAD_I_T_L (15)
+#define SIR_RESEL_BAD_I_T_L_Q (16)
+#define SIR_ABORT_SENT (17)
+#define SIR_RESEL_ABORTED (18)
+#define SIR_MSG_OUT_DONE (19)
+#define SIR_AUTO_SENSE_DONE (20)
+#define SIR_DUMMY_INTERRUPT (21)
+#define SIR_DATA_OVERRUN (22)
+#define SIR_BAD_PHASE (23)
+#define SIR_MAX (23)
+
+/*==========================================================
+**
+** Extended error bits.
+** xerr_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define XE_EXTRA_DATA (1) /* unexpected data phase */
+#define XE_BAD_PHASE (2) /* illegal phase (4/5) */
+#define XE_PARITY_ERR (4) /* unrecovered SCSI parity error */
+#define XE_SODL_UNRUN (1<<3)
+#define XE_SWIDE_OVRUN (1<<4)
+
+/*==========================================================
+**
+** Negotiation status.
+** nego_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define NS_NOCHANGE (0)
+#define NS_SYNC (1)
+#define NS_WIDE (2)
+#define NS_PPR (4)
+
+/*==========================================================
+**
+** "Special features" of targets.
+** quirks field of struct tcb.
+** actualquirks field of struct ccb.
+**
+**==========================================================
+*/
+
+#define QUIRK_AUTOSAVE (0x01)
+
+/*==========================================================
+**
+** Capability bits in Inquire response byte 7.
+**
+**==========================================================
+*/
+
+#define INQ7_QUEUE (0x02)
+#define INQ7_SYNC (0x10)
+#define INQ7_WIDE16 (0x20)
+
+/*==========================================================
+**
+** A CCB hashed table is used to retrieve CCB address
+** from DSA value.
+**
+**==========================================================
+*/
+
+#define CCB_HASH_SHIFT 8
+#define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
+#define CCB_HASH_MASK (CCB_HASH_SIZE-1)
+#define CCB_HASH_CODE(dsa) (((dsa) >> 11) & CCB_HASH_MASK)
+
+/*==========================================================
+**
+** Declaration of structs.
+**
+**==========================================================
+*/
+
+struct tcb;
+struct lcb;
+struct ccb;
+struct ncb;
+struct script;
+
+typedef struct ncb * ncb_p;
+typedef struct tcb * tcb_p;
+typedef struct lcb * lcb_p;
+typedef struct ccb * ccb_p;
+
+struct link {
+ ncrcmd l_cmd;
+ ncrcmd l_paddr;
+};
+
+struct usrcmd {
+ u_long target;
+ u_long lun;
+ u_long data;
+ u_long cmd;
+};
+
+#define UC_SETSYNC 10
+#define UC_SETTAGS 11
+#define UC_SETDEBUG 12
+#define UC_SETORDER 13
+#define UC_SETWIDE 14
+#define UC_SETFLAG 15
+#define UC_SETVERBOSE 17
+#define UC_RESETDEV 18
+#define UC_CLEARDEV 19
+
+#define UF_TRACE (0x01)
+#define UF_NODISC (0x02)
+#define UF_NOSCAN (0x04)
+
+/*========================================================================
+**
+** Declaration of structs: target control block
+**
+**========================================================================
+*/
+struct tcb {
+ /*----------------------------------------------------------------
+ ** LUN tables.
+ ** An array of bus addresses is used on reselection by
+ ** the SCRIPT.
+ **----------------------------------------------------------------
+ */
+ u_int32 *luntbl; /* lcbs bus address table */
+ u_int32 b_luntbl; /* bus address of this table */
+ u_int32 b_lun0; /* bus address of lun0 */
+ lcb_p l0p; /* lcb of LUN #0 (normal case) */
+#if MAX_LUN > 1
+ lcb_p *lmp; /* Other lcb's [1..MAX_LUN] */
+#endif
+ /*----------------------------------------------------------------
+ ** Target capabilities.
+ **----------------------------------------------------------------
+ */
+ u_char inq_done; /* Target capabilities received */
+ u_char inq_byte7; /* Contains these capabilities */
+
+ /*----------------------------------------------------------------
+ ** Some flags.
+ **----------------------------------------------------------------
+ */
+ u_char to_reset; /* This target is to be reset */
+
+ /*----------------------------------------------------------------
+ ** Pointer to the ccb used for negotiation.
+ ** Prevent from starting a negotiation for all queued commands
+ ** when tagged command queuing is enabled.
+ **----------------------------------------------------------------
+ */
+ ccb_p nego_cp;
+
+ /*----------------------------------------------------------------
+ ** negotiation of wide and synch transfer and device quirks.
+ ** sval, wval and uval are read from SCRIPTS and so have alignment
+ ** constraints.
+ **----------------------------------------------------------------
+ */
+/*0*/ u_char minsync;
+/*1*/ u_char sval;
+/*2*/ u_short period;
+/*0*/ u_char maxoffs;
+/*1*/ u_char quirks;
+/*2*/ u_char widedone;
+/*3*/ u_char wval;
+/*0*/ u_char uval;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ u_char ic_min_sync;
+ u_char ic_max_width;
+ u_char ic_done;
+#endif
+ u_char ic_maximums_set;
+ u_char ppr_negotiation;
+
+ /*----------------------------------------------------------------
+ ** User settable limits and options.
+ ** These limits are read from the NVRAM if present.
+ **----------------------------------------------------------------
+ */
+ u_char usrsync;
+ u_char usrwide;
+ u_short usrtags;
+ u_char usrflag;
+};
+
+/*========================================================================
+**
+** Declaration of structs: lun control block
+**
+**========================================================================
+*/
+struct lcb {
+ /*----------------------------------------------------------------
+ ** On reselection, SCRIPTS use this value as a JUMP address
+ ** after the IDENTIFY has been successfully received.
+ ** This field is set to 'resel_tag' if TCQ is enabled and
+ ** to 'resel_notag' if TCQ is disabled.
+ ** (Must be at zero due to bad lun handling on reselection)
+ **----------------------------------------------------------------
+ */
+/*0*/ u_int32 resel_task;
+
+ /*----------------------------------------------------------------
+ ** Task table used by the script processor to retrieve the
+ ** task corresponding to a reselected nexus. The TAG is used
+ ** as offset to determine the corresponding entry.
+ ** Each entry contains the associated CCB bus address.
+ **----------------------------------------------------------------
+ */
+ u_int32 tasktbl_0; /* Used if TCQ not enabled */
+ u_int32 *tasktbl;
+ u_int32 b_tasktbl;
+
+ /*----------------------------------------------------------------
+ ** CCB queue management.
+ **----------------------------------------------------------------
+ */
+ XPT_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
+ XPT_QUEHEAD wait_ccbq; /* Queue of waiting for IO CCBs */
+ u_short busyccbs; /* CCBs busy for this lun */
+ u_short queuedccbs; /* CCBs queued to the controller*/
+ u_short queuedepth; /* Queue depth for this lun */
+ u_short scdev_depth; /* SCSI device queue depth */
+ u_short maxnxs; /* Max possible nexuses */
+
+ /*----------------------------------------------------------------
+ ** Control of tagged command queuing.
+ ** Tags allocation is performed using a circular buffer.
+ ** This avoids using a loop for tag allocation.
+ **----------------------------------------------------------------
+ */
+ u_short ia_tag; /* Tag allocation index */
+ u_short if_tag; /* Tag release index */
+ u_char *cb_tags; /* Circular tags buffer */
+ u_char inq_byte7; /* Store unit CmdQ capability */
+ u_char usetags; /* Command queuing is active */
+ u_char to_clear; /* User wants to clear all tasks*/
+ u_short maxtags; /* Max NR of tags asked by user */
+ u_short numtags; /* Current number of tags */
+
+ /*----------------------------------------------------------------
+ ** QUEUE FULL and ORDERED tag control.
+ **----------------------------------------------------------------
+ */
+ u_short num_good; /* Nr of GOOD since QUEUE FULL */
+ u_short tags_sum[2]; /* Tags sum counters */
+ u_char tags_si; /* Current index to tags sum */
+ u_long tags_stime; /* Last time we switch tags_sum */
+};
+
+/*========================================================================
+**
+** Declaration of structs: actions for a task.
+**
+**========================================================================
+**
+** It is part of the CCB and is called by the scripts processor to
+** start or restart the data structure (nexus).
+**
+**------------------------------------------------------------------------
+*/
+struct action {
+ u_int32 start;
+ u_int32 restart;
+};
+
+/*========================================================================
+**
+** Declaration of structs: Phase mismatch context.
+**
+**========================================================================
+**
+** It is part of the CCB and is used as parameters for the DATA
+** pointer. We need two contexts to handle correctly the SAVED
+** DATA POINTER.
+**
+**------------------------------------------------------------------------
+*/
+struct pm_ctx {
+ struct scr_tblmove sg; /* Updated interrupted SG block */
+ u_int32 ret; /* SCRIPT return address */
+};
+
+/*========================================================================
+**
+** Declaration of structs: global HEADER.
+**
+**========================================================================
+**
+** In earlier driver versions, this substructure was copied from the
+** ccb to a global address after selection (or reselection) and copied
+** back before disconnect. Since we are now using LOAD/STORE DSA
+** RELATIVE instructions, the script is able to access directly these
+** fields, and so, this header is no more copied.
+**
+**------------------------------------------------------------------------
+*/
+
+struct head {
+ /*----------------------------------------------------------------
+ ** Start and restart SCRIPTS addresses (must be at 0).
+ **----------------------------------------------------------------
+ */
+ struct action go;
+
+ /*----------------------------------------------------------------
+ ** Saved data pointer.
+ ** Points to the position in the script responsible for the
+ ** actual transfer of data.
+ ** It's written after reception of a SAVE_DATA_POINTER message.
+ ** The goalpointer points after the last transfer command.
+ **----------------------------------------------------------------
+ */
+ u_int32 savep;
+ u_int32 lastp;
+ u_int32 goalp;
+
+ /*----------------------------------------------------------------
+ ** Alternate data pointer.
+ ** They are copied back to savep/lastp/goalp by the SCRIPTS
+ ** when the direction is unknown and the device claims data out.
+ **----------------------------------------------------------------
+ */
+ u_int32 wlastp;
+ u_int32 wgoalp;
+
+ /*----------------------------------------------------------------
+ ** Status fields.
+ **----------------------------------------------------------------
+ */
+ u_char status[4]; /* host status */
+};
+
+/*
+** LUN control block lookup.
+** We use a direct pointer for LUN #0, and a table of pointers
+** which is only allocated for devices that support LUN(s) > 0.
+*/
+#if MAX_LUN <= 1
+#define ncr_lp(np, tp, lun) (!lun) ? (tp)->l0p : 0
+#else
+#define ncr_lp(np, tp, lun) \
+ (!lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(lun)] : 0
+#endif
+
+/*
+** The status bytes are used by the host and the script processor.
+**
+** The four bytes (status[4]) are copied to the scratchb register
+** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect,
+** and copied back just after disconnecting.
+** Inside the script the XX_REG are used.
+*/
+
+/*
+** Last four bytes (script)
+*/
+#define QU_REG scr0
+#define HS_REG scr1
+#define HS_PRT nc_scr1
+#define SS_REG scr2
+#define SS_PRT nc_scr2
+#define HF_REG scr3
+#define HF_PRT nc_scr3
+
+/*
+** Last four bytes (host)
+*/
+#define actualquirks phys.header.status[0]
+#define host_status phys.header.status[1]
+#define scsi_status phys.header.status[2]
+#define host_flags phys.header.status[3]
+
+/*
+** Host flags
+*/
+#define HF_IN_PM0 1u
+#define HF_IN_PM1 (1u<<1)
+#define HF_ACT_PM (1u<<2)
+#define HF_DP_SAVED (1u<<3)
+#define HF_AUTO_SENSE (1u<<4)
+#define HF_DATA_IN (1u<<5)
+#define HF_PM_TO_C (1u<<6)
+#define HF_EXT_ERR (1u<<7)
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+#define HF_HINT_IARB (1u<<7)
+#endif
+
+/*
+** This one is stolen from QU_REG.:)
+*/
+#define HF_DATA_ST (1u<<7)
+
+/*==========================================================
+**
+** Declaration of structs: Data structure block
+**
+**==========================================================
+**
+** During execution of a ccb by the script processor,
+** the DSA (data structure address) register points
+** to this substructure of the ccb.
+** This substructure contains the header with
+** the script-processor-changable data and
+** data blocks for the indirect move commands.
+**
+**----------------------------------------------------------
+*/
+
+struct dsb {
+
+ /*
+ ** Header.
+ */
+
+ struct head header;
+
+ /*
+ ** Table data for Script
+ */
+
+ struct scr_tblsel select;
+ struct scr_tblmove smsg ;
+ struct scr_tblmove smsg_ext ;
+ struct scr_tblmove cmd ;
+ struct scr_tblmove sense ;
+ struct scr_tblmove wresid;
+ struct scr_tblmove data [MAX_SCATTER];
+
+ /*
+ ** Phase mismatch contexts.
+ ** We need two to handle correctly the
+ ** SAVED DATA POINTER.
+ */
+
+ struct pm_ctx pm0;
+ struct pm_ctx pm1;
+};
+
+
+/*========================================================================
+**
+** Declaration of structs: Command control block.
+**
+**========================================================================
+*/
+struct ccb {
+ /*----------------------------------------------------------------
+ ** This is the data structure which is pointed by the DSA
+ ** register when it is executed by the script processor.
+ ** It must be the first entry.
+ **----------------------------------------------------------------
+ */
+ struct dsb phys;
+
+ /*----------------------------------------------------------------
+ ** The general SCSI driver provides a
+ ** pointer to a control block.
+ **----------------------------------------------------------------
+ */
+ Scsi_Cmnd *cmd; /* SCSI command */
+ u_char cdb_buf[16]; /* Copy of CDB */
+ u_char sense_buf[64];
+ int data_len; /* Total data length */
+ int segments; /* Number of SG segments */
+
+ /*----------------------------------------------------------------
+ ** Message areas.
+ ** We prepare a message to be sent after selection.
+ ** We may use a second one if the command is rescheduled
+ ** due to CHECK_CONDITION or QUEUE FULL status.
+ ** Contents are IDENTIFY and SIMPLE_TAG.
+ ** While negotiating sync or wide transfer,
+ ** a SDTR or WDTR message is appended.
+ **----------------------------------------------------------------
+ */
+ u_char scsi_smsg [12];
+ u_char scsi_smsg2[12];
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous status'.
+ **----------------------------------------------------------------
+ */
+ u_char nego_status; /* Negotiation status */
+ u_char xerr_status; /* Extended error flags */
+ u_int32 extra_bytes; /* Extraneous bytes transferred */
+
+ /*----------------------------------------------------------------
+ ** Saved info for auto-sense
+ **----------------------------------------------------------------
+ */
+ u_char sv_scsi_status;
+ u_char sv_xerr_status;
+
+ /*----------------------------------------------------------------
+ ** Other fields.
+ **----------------------------------------------------------------
+ */
+ u_long p_ccb; /* BUS address of this CCB */
+ u_char sensecmd[6]; /* Sense command */
+ u_char to_abort; /* This CCB is to be aborted */
+ u_short tag; /* Tag for this transfer */
+ /* NO_TAG means no tag */
+ u_char tags_si; /* Lun tags sum index (0,1) */
+
+ u_char target;
+ u_char lun;
+ u_short queued;
+ ccb_p link_ccb; /* Host adapter CCB chain */
+ ccb_p link_ccbh; /* Host adapter CCB hash chain */
+ XPT_QUEHEAD link_ccbq; /* Link to unit CCB queue */
+ u_int32 startp; /* Initial data pointer */
+ u_int32 lastp0; /* Initial 'lastp' */
+ int ext_sg; /* Extreme data pointer, used */
+ int ext_ofs; /* to calculate the residual. */
+ int resid;
+};
+
+#define CCB_PHYS(cp,lbl) (cp->p_ccb + offsetof(struct ccb, lbl))
+
+
+/*========================================================================
+**
+** Declaration of structs: NCR device descriptor
+**
+**========================================================================
+*/
+struct ncb {
+ /*----------------------------------------------------------------
+ ** Idle task and invalid task actions and their bus
+ ** addresses.
+ **----------------------------------------------------------------
+ */
+ struct action idletask;
+ struct action notask;
+ struct action bad_i_t_l;
+ struct action bad_i_t_l_q;
+ u_long p_idletask;
+ u_long p_notask;
+ u_long p_bad_i_t_l;
+ u_long p_bad_i_t_l_q;
+
+ /*----------------------------------------------------------------
+ ** Dummy lun table to protect us against target returning bad
+ ** lun number on reselection.
+ **----------------------------------------------------------------
+ */
+ u_int32 *badluntbl; /* Table physical address */
+ u_int32 resel_badlun; /* SCRIPT handler BUS address */
+
+ /*----------------------------------------------------------------
+ ** Bit 32-63 of the on-chip RAM bus address in LE format.
+ ** The START_RAM64 script loads the MMRS and MMWS from this
+ ** field.
+ **----------------------------------------------------------------
+ */
+ u_int32 scr_ram_seg;
+
+ /*----------------------------------------------------------------
+ ** CCBs management queues.
+ **----------------------------------------------------------------
+ */
+ Scsi_Cmnd *waiting_list; /* Commands waiting for a CCB */
+ /* when lcb is not allocated. */
+ Scsi_Cmnd *done_list; /* Commands waiting for done() */
+ /* callback to be invoked. */
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+ spinlock_t smp_lock; /* Lock for SMP threading */
+#endif
+
+ /*----------------------------------------------------------------
+ ** Chip and controller indentification.
+ **----------------------------------------------------------------
+ */
+ int unit; /* Unit number */
+ char chip_name[8]; /* Chip name */
+ char inst_name[16]; /* ncb instance name */
+
+ /*----------------------------------------------------------------
+ ** Initial value of some IO register bits.
+ ** These values are assumed to have been set by BIOS, and may
+ ** be used for probing adapter implementation differences.
+ **----------------------------------------------------------------
+ */
+ u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
+ sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_stest1, sv_scntl4;
+
+ /*----------------------------------------------------------------
+ ** Actual initial value of IO register bits used by the
+ ** driver. They are loaded at initialisation according to
+ ** features that are to be enabled.
+ **----------------------------------------------------------------
+ */
+ u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
+ rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
+
+ /*----------------------------------------------------------------
+ ** Target data.
+ ** Target control block bus address array used by the SCRIPT
+ ** on reselection.
+ **----------------------------------------------------------------
+ */
+ struct tcb target[MAX_TARGET];
+ u_int32 *targtbl;
+
+ /*----------------------------------------------------------------
+ ** Virtual and physical bus addresses of the chip.
+ **----------------------------------------------------------------
+ */
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ u_long base_va; /* MMIO base virtual address */
+ u_long base2_va; /* On-chip RAM virtual address */
+#endif
+ u_long base_ba; /* MMIO base bus address */
+ u_long base_io; /* IO space base address */
+ u_long base_ws; /* (MM)IO window size */
+ u_long base2_ba; /* On-chip RAM bus address */
+ u_long base2_ws; /* On-chip RAM window size */
+ u_int irq; /* IRQ number */
+ volatile /* Pointer to volatile for */
+ struct ncr_reg *reg; /* memory mapped IO. */
+
+ /*----------------------------------------------------------------
+ ** SCRIPTS virtual and physical bus addresses.
+ ** 'script' is loaded in the on-chip RAM if present.
+ ** 'scripth' stays in main memory for all chips except the
+ ** 53C895A and 53C896 that provide 8K on-chip RAM.
+ **----------------------------------------------------------------
+ */
+ struct script *script0; /* Copies of script and scripth */
+ struct scripth *scripth0; /* relocated for this ncb. */
+ u_long p_script; /* Actual script and scripth */
+ u_long p_scripth; /* bus addresses. */
+ u_long p_scripth0;
+
+ /*----------------------------------------------------------------
+ ** General controller parameters and configuration.
+ **----------------------------------------------------------------
+ */
+ pcidev_t pdev;
+ u_short device_id; /* PCI device id */
+ u_char revision_id; /* PCI device revision id */
+ u_char bus; /* PCI BUS number */
+ u_char device_fn; /* PCI BUS device and function */
+ u_char myaddr; /* SCSI id of the adapter */
+ u_char maxburst; /* log base 2 of dwords burst */
+ u_char maxwide; /* Maximum transfer width */
+ u_char minsync; /* Minimum sync period factor */
+ u_char maxsync; /* Maximum sync period factor */
+ u_char maxoffs; /* Max scsi offset */
+ u_char multiplier; /* Clock multiplier (1,2,4) */
+ u_char clock_divn; /* Number of clock divisors */
+ u_long clock_khz; /* SCSI clock frequency in KHz */
+ u_int features; /* Chip features map */
+
+ /*----------------------------------------------------------------
+ ** Range for the PCI clock frequency measurement result
+ ** that ensures the algorithm used by the driver can be
+ ** trusted for the SCSI clock frequency measurement.
+ ** (Assuming a PCI clock frequency of 33 MHz).
+ **----------------------------------------------------------------
+ */
+ u_int pciclock_min;
+ u_int pciclock_max;
+
+ /*----------------------------------------------------------------
+ ** Start queue management.
+ ** It is filled up by the host processor and accessed by the
+ ** SCRIPTS processor in order to start SCSI commands.
+ **----------------------------------------------------------------
+ */
+ u_long p_squeue; /* Start queue BUS address */
+ u_int32 *squeue; /* Start queue virtual address */
+ u_short squeueput; /* Next free slot of the queue */
+ u_short actccbs; /* Number of allocated CCBs */
+ u_short queuedepth; /* Start queue depth */
+
+ /*----------------------------------------------------------------
+ ** Command completion queue.
+ ** It is the same size as the start queue to avoid overflow.
+ **----------------------------------------------------------------
+ */
+ u_short dqueueget; /* Next position to scan */
+ u_int32 *dqueue; /* Completion (done) queue */
+
+ /*----------------------------------------------------------------
+ ** Timeout handler.
+ **----------------------------------------------------------------
+ */
+ struct timer_list timer; /* Timer handler link header */
+ u_long lasttime;
+ u_long settle_time; /* Resetting the SCSI BUS */
+
+ /*----------------------------------------------------------------
+ ** Debugging and profiling.
+ **----------------------------------------------------------------
+ */
+ struct ncr_reg regdump; /* Register dump */
+ u_long regtime; /* Time it has been done */
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous buffers accessed by the scripts-processor.
+ ** They shall be DWORD aligned, because they may be read or
+ ** written with a script command.
+ **----------------------------------------------------------------
+ */
+ u_char msgout[12]; /* Buffer for MESSAGE OUT */
+ u_char msgin [12]; /* Buffer for MESSAGE IN */
+ u_int32 lastmsg; /* Last SCSI message sent */
+ u_char scratch; /* Scratch for SCSI receive */
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous configuration and status parameters.
+ **----------------------------------------------------------------
+ */
+ u_char scsi_mode; /* Current SCSI BUS mode */
+ u_char order; /* Tag order to use */
+ u_char verbose; /* Verbosity for this controller*/
+ u_int32 ncr_cache; /* Used for cache test at init. */
+ u_long p_ncb; /* BUS address of this NCB */
+
+ /*----------------------------------------------------------------
+ ** CCB lists and queue.
+ **----------------------------------------------------------------
+ */
+ ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */
+ struct ccb *ccbc; /* CCB chain */
+ XPT_QUEHEAD free_ccbq; /* Queue of available CCBs */
+
+ /*----------------------------------------------------------------
+ ** IMMEDIATE ARBITRATION (IARB) control.
+ ** We keep track in 'last_cp' of the last CCB that has been
+ ** queued to the SCRIPTS processor and clear 'last_cp' when
+ ** this CCB completes. If last_cp is not zero at the moment
+ ** we queue a new CCB, we set a flag in 'last_cp' that is
+ ** used by the SCRIPTS as a hint for setting IARB.
+ ** We donnot set more than 'iarb_max' consecutive hints for
+ ** IARB in order to leave devices a chance to reselect.
+ ** By the way, any non zero value of 'iarb_max' is unfair. :)
+ **----------------------------------------------------------------
+ */
+#ifdef SCSI_NCR_IARB_SUPPORT
+ struct ccb *last_cp; /* Last queud CCB used for IARB */
+ u_short iarb_max; /* Max. # consecutive IARB hints*/
+ u_short iarb_count; /* Actual # of these hints */
+#endif
+
+ /*----------------------------------------------------------------
+ ** We need the LCB in order to handle disconnections and
+ ** to count active CCBs for task management. So, we use
+ ** a unique CCB for LUNs we donnot have the LCB yet.
+ ** This queue normally should have at most 1 element.
+ **----------------------------------------------------------------
+ */
+ XPT_QUEHEAD b0_ccbq;
+
+ /*----------------------------------------------------------------
+ ** We use a different scatter function for 896 rev 1.
+ **----------------------------------------------------------------
+ */
+ int (*scatter) (ncb_p, ccb_p, Scsi_Cmnd *);
+
+ /*----------------------------------------------------------------
+ ** Command abort handling.
+ ** We need to synchronize tightly with the SCRIPTS
+ ** processor in order to handle things correctly.
+ **----------------------------------------------------------------
+ */
+ u_char abrt_msg[4]; /* Message to send buffer */
+ struct scr_tblmove abrt_tbl; /* Table for the MOV of it */
+ struct scr_tblsel abrt_sel; /* Sync params for selection */
+ u_char istat_sem; /* Tells the chip to stop (SEM) */
+
+ /*----------------------------------------------------------------
+ ** Fields that should be removed or changed.
+ **----------------------------------------------------------------
+ */
+ struct usrcmd user; /* Command from user */
+ volatile u_char release_stage; /* Synchronisation stage on release */
+
+ /*----------------------------------------------------------------
+ ** Fields that are used (primarily) for integrity check
+ **----------------------------------------------------------------
+ */
+ unsigned char check_integrity; /* Enable midlayer integ. check on
+ * bus scan. */
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ unsigned char check_integ_par; /* Set if par or Init. Det. error
+ * used only during integ check */
+#endif
+};
+
+#define NCB_PHYS(np, lbl) (np->p_ncb + offsetof(struct ncb, lbl))
+#define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl))
+#define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl))
+#define NCB_SCRIPTH0_PHYS(np,lbl) (np->p_scripth0+offsetof (struct scripth,lbl))
+
+/*==========================================================
+**
+**
+** Script for NCR-Processor.
+**
+** Use ncr_script_fill() to create the variable parts.
+** Use ncr_script_copy_and_bind() to make a copy and
+** bind to physical addresses.
+**
+**
+**==========================================================
+**
+** We have to know the offsets of all labels before
+** we reach them (for forward jumps).
+** Therefore we declare a struct here.
+** If you make changes inside the script,
+** DONT FORGET TO CHANGE THE LENGTHS HERE!
+**
+**----------------------------------------------------------
+*/
+
+/*
+** Script fragments which are loaded into the on-chip RAM
+** of 825A, 875, 876, 895, 895A and 896 chips.
+*/
+struct script {
+ ncrcmd start [ 14];
+ ncrcmd getjob_begin [ 4];
+ ncrcmd getjob_end [ 4];
+ ncrcmd select [ 8];
+ ncrcmd wf_sel_done [ 2];
+ ncrcmd send_ident [ 2];
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd select2 [ 8];
+#else
+ ncrcmd select2 [ 2];
+#endif
+ ncrcmd command [ 2];
+ ncrcmd dispatch [ 28];
+ ncrcmd sel_no_cmd [ 10];
+ ncrcmd init [ 6];
+ ncrcmd clrack [ 4];
+ ncrcmd disp_status [ 4];
+ ncrcmd datai_done [ 26];
+ ncrcmd datao_done [ 12];
+ ncrcmd ign_i_w_r_msg [ 4];
+ ncrcmd datai_phase [ 2];
+ ncrcmd datao_phase [ 4];
+ ncrcmd msg_in [ 2];
+ ncrcmd msg_in2 [ 10];
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd status [ 14];
+#else
+ ncrcmd status [ 10];
+#endif
+ ncrcmd complete [ 8];
+#ifdef SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+ ncrcmd complete2 [ 12];
+#else
+ ncrcmd complete2 [ 10];
+#endif
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ ncrcmd done [ 18];
+#else
+ ncrcmd done [ 14];
+#endif
+ ncrcmd done_end [ 2];
+ ncrcmd save_dp [ 8];
+ ncrcmd restore_dp [ 4];
+ ncrcmd disconnect [ 20];
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd idle [ 4];
+#else
+ ncrcmd idle [ 2];
+#endif
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd ungetjob [ 6];
+#else
+ ncrcmd ungetjob [ 4];
+#endif
+ ncrcmd reselect [ 4];
+ ncrcmd reselected [ 20];
+ ncrcmd resel_scntl4 [ 30];
+#if MAX_TASKS*4 > 512
+ ncrcmd resel_tag [ 18];
+#elif MAX_TASKS*4 > 256
+ ncrcmd resel_tag [ 12];
+#else
+ ncrcmd resel_tag [ 8];
+#endif
+ ncrcmd resel_go [ 6];
+ ncrcmd resel_notag [ 2];
+ ncrcmd resel_dsa [ 8];
+ ncrcmd data_in [MAX_SCATTER * SCR_SG_SIZE];
+ ncrcmd data_in2 [ 4];
+ ncrcmd data_out [MAX_SCATTER * SCR_SG_SIZE];
+ ncrcmd data_out2 [ 4];
+ ncrcmd pm0_data [ 12];
+ ncrcmd pm0_data_out [ 6];
+ ncrcmd pm0_data_end [ 6];
+ ncrcmd pm1_data [ 12];
+ ncrcmd pm1_data_out [ 6];
+ ncrcmd pm1_data_end [ 6];
+};
+
+/*
+** Script fragments which stay in main memory for all chips
+** except for the 895A and 896 that support 8K on-chip RAM.
+*/
+struct scripth {
+ ncrcmd start64 [ 2];
+ ncrcmd no_data [ 2];
+ ncrcmd sel_for_abort [ 18];
+ ncrcmd sel_for_abort_1 [ 2];
+ ncrcmd select_no_atn [ 8];
+ ncrcmd wf_sel_done_no_atn [ 4];
+
+ ncrcmd msg_in_etc [ 14];
+ ncrcmd msg_received [ 4];
+ ncrcmd msg_weird_seen [ 4];
+ ncrcmd msg_extended [ 20];
+ ncrcmd msg_bad [ 6];
+ ncrcmd msg_weird [ 4];
+ ncrcmd msg_weird1 [ 8];
+
+ ncrcmd wdtr_resp [ 6];
+ ncrcmd send_wdtr [ 4];
+ ncrcmd sdtr_resp [ 6];
+ ncrcmd send_sdtr [ 4];
+ ncrcmd ppr_resp [ 6];
+ ncrcmd send_ppr [ 4];
+ ncrcmd nego_bad_phase [ 4];
+ ncrcmd msg_out [ 4];
+ ncrcmd msg_out_done [ 4];
+ ncrcmd data_ovrun [ 2];
+ ncrcmd data_ovrun1 [ 22];
+ ncrcmd data_ovrun2 [ 8];
+ ncrcmd abort_resel [ 16];
+ ncrcmd resend_ident [ 4];
+ ncrcmd ident_break [ 4];
+ ncrcmd ident_break_atn [ 4];
+ ncrcmd sdata_in [ 6];
+ ncrcmd data_io [ 2];
+ ncrcmd data_io_com [ 8];
+ ncrcmd data_io_out [ 12];
+ ncrcmd resel_bad_lun [ 4];
+ ncrcmd bad_i_t_l [ 4];
+ ncrcmd bad_i_t_l_q [ 4];
+ ncrcmd bad_status [ 6];
+ ncrcmd tweak_pmj [ 12];
+ ncrcmd pm_handle [ 20];
+ ncrcmd pm_handle1 [ 4];
+ ncrcmd pm_save [ 4];
+ ncrcmd pm0_save [ 14];
+ ncrcmd pm1_save [ 14];
+
+ /* WSR handling */
+#ifdef SYM_DEBUG_PM_WITH_WSR
+ ncrcmd pm_wsr_handle [ 44];
+#else
+ ncrcmd pm_wsr_handle [ 42];
+#endif
+ ncrcmd wsr_ma_helper [ 4];
+
+ /* Data area */
+ ncrcmd zero [ 1];
+ ncrcmd scratch [ 1];
+ ncrcmd scratch1 [ 1];
+ ncrcmd pm0_data_addr [ 1];
+ ncrcmd pm1_data_addr [ 1];
+ ncrcmd saved_dsa [ 1];
+ ncrcmd saved_drs [ 1];
+ ncrcmd done_pos [ 1];
+ ncrcmd startpos [ 1];
+ ncrcmd targtbl [ 1];
+ /* End of data area */
+
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ ncrcmd start_ram [ 1];
+ ncrcmd script0_ba [ 4];
+ ncrcmd start_ram64 [ 3];
+ ncrcmd script0_ba64 [ 3];
+ ncrcmd scripth0_ba64 [ 6];
+ ncrcmd ram_seg64 [ 1];
+#endif
+ ncrcmd snooptest [ 6];
+ ncrcmd snoopend [ 2];
+};
+
+/*==========================================================
+**
+**
+** Function headers.
+**
+**
+**==========================================================
+*/
+
+static ccb_p ncr_alloc_ccb (ncb_p np);
+static void ncr_complete (ncb_p np, ccb_p cp);
+static void ncr_exception (ncb_p np);
+static void ncr_free_ccb (ncb_p np, ccb_p cp);
+static ccb_p ncr_ccb_from_dsa(ncb_p np, u_long dsa);
+static void ncr_init_tcb (ncb_p np, u_char tn);
+static lcb_p ncr_alloc_lcb (ncb_p np, u_char tn, u_char ln);
+static lcb_p ncr_setup_lcb (ncb_p np, u_char tn, u_char ln,
+ u_char *inq_data);
+static void ncr_getclock (ncb_p np, int mult);
+static u_int ncr_getpciclock (ncb_p np);
+static void ncr_selectclock (ncb_p np, u_char scntl3);
+static ccb_p ncr_get_ccb (ncb_p np, u_char tn, u_char ln);
+static void ncr_init (ncb_p np, int reset, char * msg, u_long code);
+static void ncr_int_sbmc (ncb_p np);
+static void ncr_int_par (ncb_p np, u_short sist);
+static void ncr_int_ma (ncb_p np);
+static void ncr_int_sir (ncb_p np);
+static void ncr_int_sto (ncb_p np);
+static void ncr_int_udc (ncb_p np);
+static void ncr_negotiate (ncb_p np, tcb_p tp);
+static int ncr_prepare_nego(ncb_p np, ccb_p cp, u_char *msgptr);
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+static int ncr_ic_nego(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd, u_char *msgptr);
+#endif
+static void ncr_script_copy_and_bind
+ (ncb_p np, ncrcmd *src, ncrcmd *dst, int len);
+static void ncr_script_fill (struct script * scr, struct scripth * scripth);
+static int ncr_scatter_896R1 (ncb_p np, ccb_p cp, Scsi_Cmnd *cmd);
+static int ncr_scatter (ncb_p np, ccb_p cp, Scsi_Cmnd *cmd);
+static void ncr_getsync (ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p);
+static void ncr_get_xfer_info(ncb_p np, tcb_p tp, u_char *factor, u_char *offset, u_char *width);
+static void ncr_setsync (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer, u_char scntl4);
+static void ncr_set_sync_wide_status (ncb_p np, u_char target);
+static void ncr_setup_tags (ncb_p np, u_char tn, u_char ln);
+static void ncr_setwide (ncb_p np, ccb_p cp, u_char wide, u_char ack);
+static void ncr_setsyncwide (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer, u_char scntl4, u_char wide);
+static int ncr_show_msg (u_char * msg);
+static void ncr_print_msg (ccb_p cp, char *label, u_char * msg);
+static int ncr_snooptest (ncb_p np);
+static void ncr_timeout (ncb_p np);
+static void ncr_wakeup (ncb_p np, u_long code);
+static int ncr_wakeup_done (ncb_p np);
+static void ncr_start_next_ccb (ncb_p np, lcb_p lp, int maxn);
+static void ncr_put_start_queue(ncb_p np, ccb_p cp);
+static void ncr_chip_reset (ncb_p np);
+static void ncr_soft_reset (ncb_p np);
+static void ncr_start_reset (ncb_p np);
+static int ncr_reset_scsi_bus (ncb_p np, int enab_int, int settle_delay);
+static int ncr_compute_residual (ncb_p np, ccb_p cp);
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+static void ncr_usercmd (ncb_p np);
+#endif
+
+static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device);
+static void ncr_free_resources(ncb_p np);
+
+static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd);
+static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd);
+static void process_waiting_list(ncb_p np, int sts);
+
+#define remove_from_waiting_list(np, cmd) \
+ retrieve_from_waiting_list(1, (np), (cmd))
+#define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
+#define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static void ncr_get_nvram (ncr_device *devp, ncr_nvram *nvp);
+static int sym_read_Tekram_nvram (ncr_slot *np, u_short device_id,
+ Tekram_nvram *nvram);
+static int sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram);
+#endif
+
+/*==========================================================
+**
+**
+** Global static data.
+**
+**
+**==========================================================
+*/
+
+static inline char *ncr_name (ncb_p np)
+{
+ return np->inst_name;
+}
+
+
+/*==========================================================
+**
+**
+** Scripts for NCR-Processor.
+**
+** Use ncr_script_bind for binding to physical addresses.
+**
+**
+**==========================================================
+**
+** NADDR generates a reference to a field of the controller data.
+** PADDR generates a reference to another part of the script.
+** RADDR generates a reference to a script processor register.
+** FADDR generates a reference to a script processor register
+** with offset.
+**
+**----------------------------------------------------------
+*/
+
+#define RELOC_SOFTC 0x40000000
+#define RELOC_LABEL 0x50000000
+#define RELOC_REGISTER 0x60000000
+#if 0
+#define RELOC_KVAR 0x70000000
+#endif
+#define RELOC_LABELH 0x80000000
+#define RELOC_MASK 0xf0000000
+
+#define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label))
+#define PADDR(label) (RELOC_LABEL | offsetof(struct script, label))
+#define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label))
+#define RADDR(label) (RELOC_REGISTER | REG(label))
+#define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
+#define KVAR(which) (RELOC_KVAR | (which))
+
+#define SCR_DATA_ZERO 0xf00ff00f
+
+#ifdef RELOC_KVAR
+#define SCRIPT_KVAR_JIFFIES (0)
+#define SCRIPT_KVAR_FIRST SCRIPT_KVAR_JIFFIES
+#define SCRIPT_KVAR_LAST SCRIPT_KVAR_JIFFIES
+/*
+ * Kernel variables referenced in the scripts.
+ * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
+ */
+static void *script_kvars[] __initdata =
+ { (void *)&jiffies };
+#endif
+
+static struct script script0 __initdata = {
+/*--------------------------< START >-----------------------*/ {
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** Clear SIGP.
+ */
+ SCR_FROM_REG (ctest2),
+ 0,
+
+ /*
+ ** Stop here if the C code wants to perform
+ ** some error recovery procedure manually.
+ ** (Indicate this by setting SEM in ISTAT)
+ */
+ SCR_FROM_REG (istat),
+ 0,
+ /*
+ ** Report to the C code the next position in
+ ** the start queue the SCRIPTS will schedule.
+ ** The C code must not change SCRATCHA.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (startpos),
+ SCR_INT ^ IFTRUE (MASK (SEM, SEM)),
+ SIR_SCRIPT_STOPPED,
+
+ /*
+ ** Start the next job.
+ **
+ ** @DSA = start point for this job.
+ ** SCRATCHA = address of this job in the start queue.
+ **
+ ** We will restore startpos with SCRATCHA if we fails the
+ ** arbitration or if it is the idle job.
+ **
+ ** The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS
+ ** is a critical path. If it is partially executed, it then
+ ** may happen that the job address is not yet in the DSA
+ ** and the the next queue position points to the next JOB.
+ */
+ SCR_LOAD_ABS (dsa, 4),
+ PADDRH (startpos),
+ SCR_LOAD_REL (temp, 4),
+ 4,
+}/*-------------------------< GETJOB_BEGIN >------------------*/,{
+ SCR_STORE_ABS (temp, 4),
+ PADDRH (startpos),
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+}/*-------------------------< GETJOB_END >--------------------*/,{
+ SCR_LOAD_REL (temp, 4),
+ 0,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< SELECT >----------------------*/,{
+ /*
+ ** DSA contains the address of a scheduled
+ ** data structure.
+ **
+ ** SCRATCHA contains the address of the start queue
+ ** entry which points to the next job.
+ **
+ ** Set Initiator mode.
+ **
+ ** (Target mode is left as an exercise for the reader)
+ */
+
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select),
+ PADDR (ungetjob),
+ /*
+ ** Now there are 4 possibilities:
+ **
+ ** (1) The ncr looses arbitration.
+ ** This is ok, because it will try again,
+ ** when the bus becomes idle.
+ ** (But beware of the timeout function!)
+ **
+ ** (2) The ncr is reselected.
+ ** Then the script processor takes the jump
+ ** to the RESELECT label.
+ **
+ ** (3) The ncr wins arbitration.
+ ** Then it will execute SCRIPTS instruction until
+ ** the next instruction that checks SCSI phase.
+ ** Then will stop and wait for selection to be
+ ** complete or selection time-out to occur.
+ **
+ ** After having won arbitration, the ncr SCRIPTS
+ ** processor is able to execute instructions while
+ ** the SCSI core is performing SCSI selection. But
+ ** some script instruction that is not waiting for
+ ** a valid phase (or selection timeout) to occur
+ ** breaks the selection procedure, by probably
+ ** affecting timing requirements.
+ ** So we have to wait immediately for the next phase
+ ** or the selection to complete or time-out.
+ */
+
+ /*
+ ** load the savep (saved pointer) into
+ ** the actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+
+}/*-------------------------< WF_SEL_DONE >----------------------*/,{
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ SIR_SEL_ATN_NO_MSG_OUT,
+}/*-------------------------< SEND_IDENT >----------------------*/,{
+ /*
+ ** Selection complete.
+ ** Send the IDENTIFY and SIMPLE_TAG messages
+ ** (and the M_X_SYNC_REQ / M_X_WIDE_REQ message)
+ */
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct dsb, smsg),
+}/*-------------------------< SELECT2 >----------------------*/,{
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** Set IMMEDIATE ARBITRATION if we have been given
+ ** a hint to do so. (Some job to do after this one).
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)),
+ 8,
+ SCR_REG_REG (scntl1, SCR_OR, IARB),
+ 0,
+#endif
+ /*
+ ** Anticipate the COMMAND phase.
+ ** This is the PHASE we expect at this point.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
+ PADDR (sel_no_cmd),
+
+}/*-------------------------< COMMAND >--------------------*/,{
+ /*
+ ** ... and send the command
+ */
+ SCR_MOVE_TBL ^ SCR_COMMAND,
+ offsetof (struct dsb, cmd),
+
+}/*-----------------------< DISPATCH >----------------------*/,{
+ /*
+ ** MSG_IN is the only phase that shall be
+ ** entered at least once for each (re)selection.
+ ** So we test it first.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR (msg_in),
+ SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)),
+ PADDR (datao_phase),
+ SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)),
+ PADDR (datai_phase),
+ SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
+ PADDR (status),
+ SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
+ PADDR (command),
+ SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
+ PADDRH (msg_out),
+ /*
+ * Discard as many illegal phases as
+ * required and tell the C code about.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)),
+ 16,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)),
+ -16,
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)),
+ 16,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)),
+ -16,
+ SCR_INT,
+ SIR_BAD_PHASE,
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*---------------------< SEL_NO_CMD >----------------------*/,{
+ /*
+ ** The target does not switch to command
+ ** phase after IDENTIFY has been sent.
+ **
+ ** If it stays in MSG OUT phase send it
+ ** the IDENTIFY again.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDRH (resend_ident),
+ /*
+ ** If target does not switch to MSG IN phase
+ ** and we sent a negotiation, assert the
+ ** failure immediately.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+ /*
+ ** Jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< INIT >------------------------*/,{
+ /*
+ ** Wait for the SCSI RESET signal to be
+ ** inactive before restarting operations,
+ ** since the chip may hang on SEL_ATN
+ ** if SCSI RESET is active.
+ */
+ SCR_FROM_REG (sstat0),
+ 0,
+ SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)),
+ -16,
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< CLRACK >----------------------*/,{
+ /*
+ ** Terminate possible pending message phase.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DISP_STATUS >----------------------*/,{
+ /*
+ ** Anticipate STATUS phase.
+ **
+ ** Does spare 3 SCRIPTS instructions when we have
+ ** completed the INPUT of the data.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+ PADDR (status),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DATAI_DONE >-------------------*/,{
+ /*
+ * If the device wants us to send more data,
+ * we must count the extra bytes.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** If the SWIDE is not full, jump to dispatcher.
+ ** We anticipate a STATUS phase.
+ ** If we get later an IGNORE WIDE RESIDUE, we
+ ** will alias it as a MODIFY DP (-1).
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (WSR, WSR)),
+ PADDR (disp_status),
+ /*
+ ** The SWIDE is full.
+ ** Clear this condition.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ /*
+ * We are expecting an IGNORE RESIDUE message
+ * from the device, otherwise we are in data
+ * overrun condition. Check against MSG_IN phase.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_SWIDE_OVERRUN,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (disp_status),
+ /*
+ * We are in MSG_IN phase,
+ * Read the first byte of the message.
+ * If it is not an IGNORE RESIDUE message,
+ * signal overrun and jump to message
+ * processing.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[0]),
+ SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+ SIR_SWIDE_OVERRUN,
+ SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+ PADDR (msg_in2),
+
+ /*
+ * We got the message we expected.
+ * Read the 2nd byte, and jump to dispatcher.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR (disp_status),
+
+}/*-------------------------< DATAO_DONE >-------------------*/,{
+ /*
+ * If the device wants us to send more data,
+ * we must count the extra bytes.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ PADDRH (data_ovrun),
+ /*
+ ** If the SODL is not full jump to dispatcher.
+ ** We anticipate a MSG IN phase or a STATUS phase.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (WSS, WSS)),
+ PADDR (disp_status),
+ /*
+ ** The SODL is full, clear this condition.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSS),
+ 0,
+ /*
+ ** And signal a DATA UNDERRUN condition
+ ** to the C code.
+ */
+ SCR_INT,
+ SIR_SODL_UNDERRUN,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< IGN_I_W_R_MSG >--------------*/,{
+ /*
+ ** We jump here from the phase mismatch interrupt,
+ ** When we have a SWIDE and the device has presented
+ ** a IGNORE WIDE RESIDUE message on the BUS.
+ ** We just have to throw away this message and then
+ ** to jump to dispatcher.
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ NADDR (scratch),
+ /*
+ ** Clear ACK and jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< DATAI_PHASE >------------------*/,{
+ SCR_RETURN,
+ 0,
+}/*-------------------------< DATAO_PHASE >------------------*/,{
+ /*
+ ** Patch for 53c1010_66 only - to allow A0 part
+ ** to operate properly in a 33MHz PCI bus.
+ **
+ ** SCR_REG_REG(scntl4, SCR_OR, 0x0c),
+ ** 0,
+ */
+ SCR_NO_OP,
+ 0,
+ SCR_RETURN,
+ 0,
+}/*-------------------------< MSG_IN >--------------------*/,{
+ /*
+ ** Get the first byte of the message.
+ **
+ ** The script processor doesn't negate the
+ ** ACK signal after this transfer.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[0]),
+}/*-------------------------< MSG_IN2 >--------------------*/,{
+ /*
+ ** Check first against 1 byte messages
+ ** that we handle from SCRIPTS.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
+ PADDR (complete),
+ SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
+ PADDR (disconnect),
+ SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
+ PADDR (save_dp),
+ SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
+ PADDR (restore_dp),
+ /*
+ ** We handle all other messages from the
+ ** C code, so no need to waste on-chip RAM
+ ** for those ones.
+ */
+ SCR_JUMP,
+ PADDRH (msg_in_etc),
+
+}/*-------------------------< STATUS >--------------------*/,{
+ /*
+ ** get the status
+ */
+ SCR_MOVE_ABS (1) ^ SCR_STATUS,
+ NADDR (scratch),
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If STATUS is not GOOD, clear IMMEDIATE ARBITRATION,
+ ** since we may have to tamper the start queue from
+ ** the C code.
+ */
+ SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)),
+ 8,
+ SCR_REG_REG (scntl1, SCR_AND, ~IARB),
+ 0,
+#endif
+ /*
+ ** save status to scsi_status.
+ ** mark as complete.
+ */
+ SCR_TO_REG (SS_REG),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+ 0,
+ /*
+ ** Anticipate the MESSAGE PHASE for
+ ** the TASK COMPLETE message.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR (msg_in),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< COMPLETE >-----------------*/,{
+ /*
+ ** Complete message.
+ **
+ ** Copy the data pointer to LASTP in header.
+ */
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct ccb, phys.header.lastp),
+ /*
+ ** When we terminate the cycle by clearing ACK,
+ ** the target may disconnect immediately.
+ **
+ ** We don't want to be told of an
+ ** "unexpected disconnect",
+ ** so we disable this feature.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ /*
+ ** Terminate cycle ...
+ */
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** ... and wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+}/*-------------------------< COMPLETE2 >-----------------*/,{
+ /*
+ ** Save host status to header.
+ */
+ SCR_STORE_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+
+#ifdef SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+ /*
+ ** Some bridges may reorder DMA writes to memory.
+ ** We donnot want the CPU to deal with completions
+ ** without all the posted write having been flushed
+ ** to memory. This DUMMY READ should flush posted
+ ** buffers prior to the CPU having to deal with
+ ** completions.
+ */
+ SCR_LOAD_REL (scr0, 4), /* DUMMY READ */
+ offsetof (struct ccb, phys.header.status),
+#endif
+ /*
+ ** If command resulted in not GOOD status,
+ ** call the C code if needed.
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+ SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
+ PADDRH (bad_status),
+
+ /*
+ ** If we performed an auto-sense, call
+ ** the C code to synchronyze task aborts
+ ** with UNIT ATTENTION conditions.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ SCR_INT ^ IFTRUE (MASK (HF_AUTO_SENSE, HF_AUTO_SENSE)),
+ SIR_AUTO_SENSE_DONE,
+
+}/*------------------------< DONE >-----------------*/,{
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ /*
+ ** It seems that some bridges flush everything
+ ** when the INTR line is raised. For these ones,
+ ** we can just ensure that the INTR line will be
+ ** raised before each completion. So, if it happens
+ ** that we have been faster that the CPU, we just
+ ** have to synchronize with it. A dummy programmed
+ ** interrupt will do the trick.
+ ** Note that we overlap at most 1 IO with the CPU
+ ** in this situation and that the IRQ line must not
+ ** be shared.
+ */
+ SCR_FROM_REG (istat),
+ 0,
+ SCR_INT ^ IFTRUE (MASK (INTF, INTF)),
+ SIR_DUMMY_INTERRUPT,
+#endif
+ /*
+ ** Copy the DSA to the DONE QUEUE and
+ ** signal completion to the host.
+ ** If we are interrupted between DONE
+ ** and DONE_END, we must reset, otherwise
+ ** the completed CCB will be lost.
+ */
+ SCR_STORE_ABS (dsa, 4),
+ PADDRH (saved_dsa),
+ SCR_LOAD_ABS (dsa, 4),
+ PADDRH (done_pos),
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (saved_dsa),
+ SCR_STORE_REL (scratcha, 4),
+ 0,
+ /*
+ ** The instruction below reads the DONE QUEUE next
+ ** free position from memory.
+ ** In addition it ensures that all PCI posted writes
+ ** are flushed and so the DSA value of the done
+ ** CCB is visible by the CPU before INTFLY is raised.
+ */
+ SCR_LOAD_REL (temp, 4),
+ 4,
+ SCR_INT_FLY,
+ 0,
+ SCR_STORE_ABS (temp, 4),
+ PADDRH (done_pos),
+}/*------------------------< DONE_END >-----------------*/,{
+ SCR_JUMP,
+ PADDR (start),
+
+}/*-------------------------< SAVE_DP >------------------*/,{
+ /*
+ ** Clear ACK immediately.
+ ** No need to delay it.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** Keep track we received a SAVE DP, so
+ ** we will switch to the other PM context
+ ** on the next PM since the DP may point
+ ** to the current PM context.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
+ 0,
+ /*
+ ** SAVE_DP message:
+ ** Copy the data pointer to SAVEP in header.
+ */
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< RESTORE_DP >---------------*/,{
+ /*
+ ** RESTORE_DP message:
+ ** Copy SAVEP in header to actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< DISCONNECT >---------------*/,{
+ /*
+ ** DISCONNECTing ...
+ **
+ ** disable the "unexpected disconnect" feature,
+ ** and remove the ACK signal.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** Wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** Status is: DISCONNECTED.
+ */
+ SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
+ 0,
+ /*
+ ** Save host status to header.
+ */
+ SCR_STORE_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+ /*
+ ** If QUIRK_AUTOSAVE is set,
+ ** do an "save pointer" operation.
+ */
+ SCR_FROM_REG (QU_REG),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE)),
+ PADDR (start),
+ /*
+ ** like SAVE_DP message:
+ ** Remember we saved the data pointer.
+ ** Copy data pointer to SAVEP in header.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
+ 0,
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_JUMP,
+ PADDR (start),
+
+}/*-------------------------< IDLE >------------------------*/,{
+ /*
+ ** Nothing to do?
+ ** Wait for reselect.
+ ** This NOP will be patched with LED OFF
+ ** SCR_REG_REG (gpreg, SCR_OR, 0x01)
+ */
+ SCR_NO_OP,
+ 0,
+#ifdef SCSI_NCR_IARB_SUPPORT
+ SCR_JUMPR,
+ 8,
+#endif
+}/*-------------------------< UNGETJOB >-----------------*/,{
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** Set IMMEDIATE ARBITRATION, for the next time.
+ ** This will give us better chance to win arbitration
+ ** for the job we just wanted to do.
+ */
+ SCR_REG_REG (scntl1, SCR_OR, IARB),
+ 0,
+#endif
+ /*
+ ** We are not able to restart the SCRIPTS if we are
+ ** interrupted and these instruction haven't been
+ ** all executed. BTW, this is very unlikely to
+ ** happen, but we check that from the C code.
+ */
+ SCR_LOAD_REG (dsa, 0xff),
+ 0,
+ SCR_STORE_ABS (scratcha, 4),
+ PADDRH (startpos),
+}/*-------------------------< RESELECT >--------------------*/,{
+ /*
+ ** make the host status invalid.
+ */
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** Sleep waiting for a reselection.
+ ** If SIGP is set, special treatment.
+ **
+ ** Zu allem bereit ..
+ */
+ SCR_WAIT_RESEL,
+ PADDR(start),
+}/*-------------------------< RESELECTED >------------------*/,{
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** load the target id into the sdid
+ */
+ SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
+ 0,
+ SCR_TO_REG (sdid),
+ 0,
+ /*
+ ** load the target control block address
+ */
+ SCR_LOAD_ABS (dsa, 4),
+ PADDRH (targtbl),
+ SCR_SFBR_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_AND, 0x3c),
+ 0,
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ /*
+ ** Load the synchronous transfer registers.
+ */
+ SCR_LOAD_REL (scntl3, 1),
+ offsetof(struct tcb, wval),
+ SCR_LOAD_REL (sxfer, 1),
+ offsetof(struct tcb, sval),
+}/*-------------------------< RESEL_SCNTL4 >------------------*/,{
+ /*
+ ** Write with uval value. Patch if device
+ ** does not support Ultra3.
+ **
+ ** SCR_LOAD_REL (scntl4, 1),
+ ** offsetof(struct tcb, uval),
+ */
+
+ SCR_NO_OP,
+ 0,
+ /*
+ * We expect MESSAGE IN phase.
+ * If not, get help from the C code.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_RESEL_NO_MSG_IN,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+
+ /*
+ * If IDENTIFY LUN #0, use a faster path
+ * to find the LCB structure.
+ */
+ SCR_JUMPR ^ IFTRUE (MASK (0x80, 0xbf)),
+ 56,
+ /*
+ * If message isn't an IDENTIFY,
+ * tell the C code about.
+ */
+ SCR_INT ^ IFFALSE (MASK (0x80, 0x80)),
+ SIR_RESEL_NO_IDENTIFY,
+ /*
+ * It is an IDENTIFY message,
+ * Load the LUN control block address.
+ */
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct tcb, b_luntbl),
+ SCR_SFBR_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_AND, 0xfc),
+ 0,
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ SCR_JUMPR,
+ 8,
+ /*
+ ** LUN 0 special case (but usual one :))
+ */
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct tcb, b_lun0),
+
+ /*
+ ** Load the reselect task action for this LUN.
+ ** Load the tasks DSA array for this LUN.
+ ** Call the action.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct lcb, resel_task),
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct lcb, b_tasktbl),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< RESEL_TAG >-------------------*/,{
+ /*
+ ** ACK the IDENTIFY or TAG previously received
+ */
+
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** Read IDENTIFY + SIMPLE + TAG using a single MOVE.
+ ** Agressive optimization, is'nt it?
+ ** No need to test the SIMPLE TAG message, since the
+ ** driver only supports conformant devices for tags. ;-)
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ /*
+ ** Read the TAG from the SIDL.
+ ** Still an aggressive optimization. ;-)
+ ** Compute the CCB indirect jump address which
+ ** is (#TAG*2 & 0xfc) due to tag numbering using
+ ** 1,3,5..MAXTAGS*2+1 actual values.
+ */
+ SCR_REG_SFBR (sidl, SCR_SHL, 0),
+ 0,
+#if MAX_TASKS*4 > 512
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 2),
+ 0,
+ SCR_REG_REG (sfbr, SCR_SHL, 0),
+ 0,
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 1),
+ 0,
+#elif MAX_TASKS*4 > 256
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 1),
+ 0,
+#endif
+ /*
+ ** Retrieve the DSA of this task.
+ ** JUMP indirectly to the restart point of the CCB.
+ */
+ SCR_SFBR_REG (dsa, SCR_AND, 0xfc),
+ 0,
+}/*-------------------------< RESEL_GO >-------------------*/,{
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct ccb, phys.header.go.restart),
+ SCR_RETURN,
+ 0,
+ /* In normal situations we branch to RESEL_DSA */
+}/*-------------------------< RESEL_NOTAG >-------------------*/,{
+ /*
+ ** JUMP indirectly to the restart point of the CCB.
+ */
+ SCR_JUMP,
+ PADDR (resel_go),
+
+}/*-------------------------< RESEL_DSA >-------------------*/,{
+ /*
+ ** Ack the IDENTIFY or TAG previously received.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** load the savep (saved pointer) into
+ ** the actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+ /*
+ ** Jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DATA_IN >--------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTER parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTER >=========
+** || SCR_CHMOV_TBL ^ SCR_DATA_IN,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< DATA_IN2 >-------------------*/,{
+ SCR_CALL,
+ PADDR (datai_done),
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+}/*-------------------------< DATA_OUT >--------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTER parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTER >=========
+** || SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< DATA_OUT2 >-------------------*/,{
+ SCR_CALL,
+ PADDR (datao_done),
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+
+}/*-------------------------< PM0_DATA >--------------------*/,{
+ /*
+ ** Read our host flags to SFBR, so we will be able
+ ** to check against the data direction we expect.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ ** Check against actual DATA PHASE.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR (pm0_data_out),
+ /*
+ ** Actual phase is DATA IN.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+ 0,
+ /*
+ ** Move the data to memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.pm0.sg),
+ SCR_JUMP,
+ PADDR (pm0_data_end),
+}/*-------------------------< PM0_DATA_OUT >----------------*/,{
+ /*
+ ** Actual phase is DATA OUT.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+ 0,
+ /*
+ ** Move the data from memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ offsetof (struct ccb, phys.pm0.sg),
+}/*-------------------------< PM0_DATA_END >----------------*/,{
+ /*
+ ** Clear the flag that told we were moving
+ ** data from the PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)),
+ 0,
+ /*
+ ** Return to the previous DATA script which
+ ** is guaranteed by design (if no bug) to be
+ ** the main DATA script for this transfer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.pm0.ret),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< PM1_DATA >--------------------*/,{
+ /*
+ ** Read our host flags to SFBR, so we will be able
+ ** to check against the data direction we expect.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ ** Check against actual DATA PHASE.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR (pm1_data_out),
+ /*
+ ** Actual phase is DATA IN.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+ 0,
+ /*
+ ** Move the data to memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.pm1.sg),
+ SCR_JUMP,
+ PADDR (pm1_data_end),
+}/*-------------------------< PM1_DATA_OUT >----------------*/,{
+ /*
+ ** Actual phase is DATA OUT.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+ 0,
+ /*
+ ** Move the data from memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ offsetof (struct ccb, phys.pm1.sg),
+}/*-------------------------< PM1_DATA_END >----------------*/,{
+ /*
+ ** Clear the flag that told we were moving
+ ** data from the PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)),
+ 0,
+ /*
+ ** Return to the previous DATA script which
+ ** is guaranteed by design (if no bug) to be
+ ** the main DATA script for this transfer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.pm1.ret),
+ SCR_RETURN,
+ 0,
+}/*---------------------------------------------------------*/
+};
+
+
+static struct scripth scripth0 __initdata = {
+/*------------------------< START64 >-----------------------*/{
+ /*
+ ** SCRIPT entry point for the 895A and the 896.
+ ** For now, there is no specific stuff for that
+ ** chip at this point, but this may come.
+ */
+ SCR_JUMP,
+ PADDR (init),
+}/*-------------------------< NO_DATA >-------------------*/,{
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+}/*-----------------------< SEL_FOR_ABORT >------------------*/,{
+ /*
+ ** We are jumped here by the C code, if we have
+ ** some target to reset or some disconnected
+ ** job to abort. Since error recovery is a serious
+ ** busyness, we will really reset the SCSI BUS, if
+ ** case of a SCSI interrupt occurring in this path.
+ */
+
+ /*
+ ** Set initiator mode.
+ */
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct ncb, abrt_sel),
+ PADDR (reselect),
+
+ /*
+ ** Wait for the selection to complete or
+ ** the selection to time out.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ -8,
+ /*
+ ** Call the C code.
+ */
+ SCR_INT,
+ SIR_TARGET_SELECTED,
+ /*
+ ** The C code should let us continue here.
+ ** Send the 'kiss of death' message.
+ ** We expect an immediate disconnect once
+ ** the target has eaten the message.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct ncb, abrt_tbl),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** Tell the C code that we are done.
+ */
+ SCR_INT,
+ SIR_ABORT_SENT,
+}/*-----------------------< SEL_FOR_ABORT_1 >--------------*/,{
+ /*
+ ** Jump at scheduler.
+ */
+ SCR_JUMP,
+ PADDR (start),
+
+}/*------------------------< SELECT_NO_ATN >-----------------*/,{
+ /*
+ ** Set Initiator mode.
+ ** And try to select this target without ATN.
+ */
+
+ SCR_CLR (SCR_TRG),
+ 0,
+ SCR_SEL_TBL ^ offsetof (struct dsb, select),
+ PADDR (ungetjob),
+ /*
+ ** load the savep (saved pointer) into
+ ** the actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+
+}/*------------------------< WF_SEL_DONE_NO_ATN >-----------------*/,{
+ /*
+ ** Wait immediately for the next phase or
+ ** the selection to complete or time-out.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ 0,
+ SCR_JUMP,
+ PADDR (select2),
+
+}/*-------------------------< MSG_IN_ETC >--------------------*/,{
+ /*
+ ** If it is an EXTENDED (variable size message)
+ ** Handle it.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+ PADDRH (msg_extended),
+ /*
+ ** Let the C code handle any other
+ ** 1 byte message.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)),
+ PADDRH (msg_received),
+ SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)),
+ PADDRH (msg_received),
+ /*
+ ** We donnot handle 2 bytes messages from SCRIPTS.
+ ** So, let the C code deal with these ones too.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)),
+ PADDRH (msg_weird_seen),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ SCR_JUMP,
+ PADDRH (msg_received),
+
+}/*-------------------------< MSG_RECEIVED >--------------------*/,{
+ SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */
+ 0,
+ SCR_INT,
+ SIR_MSG_RECEIVED,
+
+}/*-------------------------< MSG_WEIRD_SEEN >------------------*/,{
+ SCR_LOAD_REL (scratcha1, 4), /* DUMMY READ */
+ 0,
+ SCR_INT,
+ SIR_MSG_WEIRD,
+
+}/*-------------------------< MSG_EXTENDED >--------------------*/,{
+ /*
+ ** Clear ACK and get the next byte
+ ** assumed to be the message length.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ /*
+ ** Try to catch some unlikely situations as 0 length
+ ** or too large the length.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (0)),
+ PADDRH (msg_weird_seen),
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_REG_REG (sfbr, SCR_ADD, (256-8)),
+ 0,
+ SCR_JUMP ^ IFTRUE (CARRYSET),
+ PADDRH (msg_weird_seen),
+ /*
+ ** We donnot handle extended messages from SCRIPTS.
+ ** Read the amount of data correponding to the
+ ** message length and call the C code.
+ */
+ SCR_STORE_REL (scratcha, 1),
+ offsetof (struct dsb, smsg_ext.size),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_TBL ^ SCR_MSG_IN,
+ offsetof (struct dsb, smsg_ext),
+ SCR_JUMP,
+ PADDRH (msg_received),
+
+}/*-------------------------< MSG_BAD >------------------*/,{
+ /*
+ ** unimplemented message - reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_TO_SEND,
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< MSG_WEIRD >--------------------*/,{
+ /*
+ ** weird message received
+ ** ignore all MSG IN phases and reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_TO_SEND,
+ SCR_SET (SCR_ATN),
+ 0,
+}/*-------------------------< MSG_WEIRD1 >--------------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (scratch),
+ SCR_JUMP,
+ PADDRH (msg_weird1),
+}/*-------------------------< WDTR_RESP >----------------*/,{
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_WDTR >----------------*/,{
+ /*
+ ** Send the M_X_WIDE_REQ
+ */
+ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_JUMP,
+ PADDRH (msg_out_done),
+
+}/*-------------------------< SDTR_RESP >-------------*/,{
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_SDTR >-------------*/,{
+ /*
+ ** Send the M_X_SYNC_REQ
+ */
+ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_JUMP,
+ PADDRH (msg_out_done),
+
+}/*-------------------------< PPR_RESP >-------------*/,{
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_PPR >-------------*/,{
+ /*
+ ** Send the M_X_PPR_REQ
+ */
+ SCR_MOVE_ABS (8) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_JUMP,
+ PADDRH (msg_out_done),
+
+}/*-------------------------< NEGO_BAD_PHASE >------------*/,{
+ SCR_INT,
+ SIR_NEGO_PROTO,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< MSG_OUT >-------------------*/,{
+ /*
+ ** The target requests a message.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ /*
+ ** ... wait for the next phase
+ ** if it's a message out, send it again, ...
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDRH (msg_out),
+}/*-------------------------< MSG_OUT_DONE >--------------*/,{
+ /*
+ ** ... else clear the message ...
+ */
+ SCR_INT,
+ SIR_MSG_OUT_DONE,
+ /*
+ ** ... and process the next phase
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DATA_OVRUN >-----------------------*/,{
+ /*
+ * Use scratcha to count the extra bytes.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (zero),
+}/*-------------------------< DATA_OVRUN1 >----------------------*/,{
+ /*
+ * The target may want to transfer too much data.
+ *
+ * If phase is DATA OUT write 1 byte and count it.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+ 16,
+ SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
+ NADDR (scratch),
+ SCR_JUMP,
+ PADDRH (data_ovrun2),
+ /*
+ * If WSR is set, clear this condition, and
+ * count this byte.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
+ 16,
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ SCR_JUMP,
+ PADDRH (data_ovrun2),
+ /*
+ * Finally check against DATA IN phase.
+ * Signal data overrun to the C code
+ * and jump to dispatcher if not so.
+ * Read 1 byte otherwise and count it.
+ */
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ 16,
+ SCR_INT,
+ SIR_DATA_OVERRUN,
+ SCR_JUMP,
+ PADDR (dispatch),
+ SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
+ NADDR (scratch),
+}/*-------------------------< DATA_OVRUN2 >----------------------*/,{
+ /*
+ * Count this byte.
+ * This will allow to return a negative
+ * residual to user.
+ */
+ SCR_REG_REG (scratcha, SCR_ADD, 0x01),
+ 0,
+ SCR_REG_REG (scratcha1, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (scratcha2, SCR_ADDC, 0),
+ 0,
+ /*
+ * .. and repeat as required.
+ */
+ SCR_JUMP,
+ PADDRH (data_ovrun1),
+
+}/*-------------------------< ABORT_RESEL >----------------*/,{
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** send the abort/abortag/reset message
+ ** we expect an immediate disconnect
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ SCR_INT,
+ SIR_RESEL_ABORTED,
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< RESEND_IDENT >-------------------*/,{
+ /*
+ ** The target stays in MSG OUT phase after having acked
+ ** Identify [+ Tag [+ Extended message ]]. Targets shall
+ ** behave this way on parity error.
+ ** We must send it again all the messages.
+ */
+ SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */
+ 0, /* 1rst ACK = 90 ns. Hope the NCR is'nt too fast */
+ SCR_JUMP,
+ PADDR (send_ident),
+}/*-------------------------< IDENT_BREAK >-------------------*/,{
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR (select2),
+}/*-------------------------< IDENT_BREAK_ATN >----------------*/,{
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR (select2),
+}/*-------------------------< SDATA_IN >-------------------*/,{
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct dsb, sense),
+ SCR_CALL,
+ PADDR (datai_done),
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+}/*-------------------------< DATA_IO >--------------------*/,{
+ /*
+ ** We jump here if the data direction was unknown at the
+ ** time we had to queue the command to the scripts processor.
+ ** Pointers had been set as follow in this situation:
+ ** savep --> DATA_IO
+ ** lastp --> start pointer when DATA_IN
+ ** goalp --> goal pointer when DATA_IN
+ ** wlastp --> start pointer when DATA_OUT
+ ** wgoalp --> goal pointer when DATA_OUT
+ ** This script sets savep/lastp/goalp according to the
+ ** direction chosen by the target.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ PADDRH(data_io_out),
+}/*-------------------------< DATA_IO_COM >-----------------*/,{
+ /*
+ ** Direction is DATA IN.
+ ** Warning: we jump here, even when phase is DATA OUT.
+ */
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.lastp),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.savep),
+
+ /*
+ ** Jump to the SCRIPTS according to actual direction.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< DATA_IO_OUT >-----------------*/,{
+ /*
+ ** Direction is DATA OUT.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_DATA_IN)),
+ 0,
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.wlastp),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.lastp),
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.wgoalp),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.goalp),
+ SCR_JUMP,
+ PADDRH(data_io_com),
+
+}/*-------------------------< RESEL_BAD_LUN >---------------*/,{
+ /*
+ ** Message is an IDENTIFY, but lun is unknown.
+ ** Signal problem to C code for logging the event.
+ ** Send a M_ABORT to clear all pending tasks.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_LUN,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< BAD_I_T_L >------------------*/,{
+ /*
+ ** We donnot have a task for that I_T_L.
+ ** Signal problem to C code for logging the event.
+ ** Send a M_ABORT message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< BAD_I_T_L_Q >----------------*/,{
+ /*
+ ** We donnot have a task that matches the tag.
+ ** Signal problem to C code for logging the event.
+ ** Send a M_ABORTTAG message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L_Q,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< BAD_STATUS >-----------------*/,{
+ /*
+ ** Anything different from INTERMEDIATE
+ ** CONDITION MET should be a bad SCSI status,
+ ** given that GOOD status has already been tested.
+ ** Call the C code.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (startpos),
+ SCR_INT ^ IFFALSE (DATA (S_COND_MET)),
+ SIR_BAD_STATUS,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< TWEAK_PMJ >------------------*/,{
+ /*
+ ** Disable PM handling from SCRIPTS for the data phase
+ ** and so force PM to be handled from C code if HF_PM_TO_C
+ ** flag is set.
+ */
+ SCR_FROM_REG(HF_REG),
+ 0,
+ SCR_JUMPR ^ IFTRUE (MASK (HF_PM_TO_C, HF_PM_TO_C)),
+ 16,
+ SCR_REG_REG (ccntl0, SCR_OR, ENPMJ),
+ 0,
+ SCR_RETURN,
+ 0,
+ SCR_REG_REG (ccntl0, SCR_AND, (~ENPMJ)),
+ 0,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< PM_HANDLE >------------------*/,{
+ /*
+ ** Phase mismatch handling.
+ **
+ ** Since we have to deal with 2 SCSI data pointers
+ ** (current and saved), we need at least 2 contexts.
+ ** Each context (pm0 and pm1) has a saved area, a
+ ** SAVE mini-script and a DATA phase mini-script.
+ */
+ /*
+ ** Get the PM handling flags.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ ** If no flags (1rst PM for example), avoid
+ ** all the below heavy flags testing.
+ ** This makes the normal case a bit faster.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED))),
+ PADDRH (pm_handle1),
+ /*
+ ** If we received a SAVE DP, switch to the
+ ** other PM context since the savep may point
+ ** to the current PM context.
+ */
+ SCR_JUMPR ^ IFFALSE (MASK (HF_DP_SAVED, HF_DP_SAVED)),
+ 8,
+ SCR_REG_REG (sfbr, SCR_XOR, HF_ACT_PM),
+ 0,
+ /*
+ ** If we have been interrupt in a PM DATA mini-script,
+ ** we take the return address from the corresponding
+ ** saved area.
+ ** This ensure the return address always points to the
+ ** main DATA script for this transfer.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1))),
+ PADDRH (pm_handle1),
+ SCR_JUMPR ^ IFFALSE (MASK (HF_IN_PM0, HF_IN_PM0)),
+ 16,
+ SCR_LOAD_REL (ia, 4),
+ offsetof(struct ccb, phys.pm0.ret),
+ SCR_JUMP,
+ PADDRH (pm_save),
+ SCR_LOAD_REL (ia, 4),
+ offsetof(struct ccb, phys.pm1.ret),
+ SCR_JUMP,
+ PADDRH (pm_save),
+}/*-------------------------< PM_HANDLE1 >-----------------*/,{
+ /*
+ ** Normal case.
+ ** Update the return address so that it
+ ** will point after the interrupted MOVE.
+ */
+ SCR_REG_REG (ia, SCR_ADD, 8),
+ 0,
+ SCR_REG_REG (ia1, SCR_ADDC, 0),
+ 0,
+}/*-------------------------< PM_SAVE >--------------------*/,{
+ /*
+ ** Clear all the flags that told us if we were
+ ** interrupted in a PM DATA mini-script and/or
+ ** we received a SAVE DP.
+ */
+ SCR_SFBR_REG (HF_REG, SCR_AND, (~(HF_IN_PM0|HF_IN_PM1|HF_DP_SAVED))),
+ 0,
+ /*
+ ** Choose the current PM context.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_ACT_PM, HF_ACT_PM)),
+ PADDRH (pm1_save),
+}/*-------------------------< PM0_SAVE >-------------------*/,{
+ SCR_STORE_REL (ia, 4),
+ offsetof(struct ccb, phys.pm0.ret),
+ /*
+ ** If WSR bit is set, either UA and RBC may
+ ** have to be changed whatever the device wants
+ ** to ignore this residue ot not.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
+ PADDRH (pm_wsr_handle),
+ /*
+ ** Save the remaining byte count, the updated
+ ** address and the return address.
+ */
+ SCR_STORE_REL (rbc, 4),
+ offsetof(struct ccb, phys.pm0.sg.size),
+ SCR_STORE_REL (ua, 4),
+ offsetof(struct ccb, phys.pm0.sg.addr),
+ /*
+ ** Set the current pointer at the PM0 DATA mini-script.
+ */
+ SCR_LOAD_ABS (temp, 4),
+ PADDRH (pm0_data_addr),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< PM1_SAVE >-------------------*/,{
+ SCR_STORE_REL (ia, 4),
+ offsetof(struct ccb, phys.pm1.ret),
+ /*
+ ** If WSR bit is set, either UA and RBC may
+ ** have been changed whatever the device wants
+ ** to ignore this residue or not.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
+ PADDRH (pm_wsr_handle),
+ /*
+ ** Save the remaining byte count, the updated
+ ** address and the return address.
+ */
+ SCR_STORE_REL (rbc, 4),
+ offsetof(struct ccb, phys.pm1.sg.size),
+ SCR_STORE_REL (ua, 4),
+ offsetof(struct ccb, phys.pm1.sg.addr),
+ /*
+ ** Set the current pointer at the PM1 DATA mini-script.
+ */
+ SCR_LOAD_ABS (temp, 4),
+ PADDRH (pm1_data_addr),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*--------------------------< PM_WSR_HANDLE >-----------------------*/,{
+ /*
+ * Phase mismatch handling from SCRIPT with WSR set.
+ * Such a condition can occur if the chip wants to
+ * execute a CHMOV(size > 1) when the WSR bit is
+ * set and the target changes PHASE.
+ */
+#ifdef SYM_DEBUG_PM_WITH_WSR
+ /*
+ * Some debugging may still be needed.:)
+ */
+ SCR_INT,
+ SIR_PM_WITH_WSR,
+#endif
+ /*
+ * We must move the residual byte to memory.
+ *
+ * UA contains bit 0..31 of the address to
+ * move the residual byte.
+ * Move it to the table indirect.
+ */
+ SCR_STORE_REL (ua, 4),
+ offsetof (struct ccb, phys.wresid.addr),
+ /*
+ * Increment UA (move address to next position).
+ */
+ SCR_REG_REG (ua, SCR_ADD, 1),
+ 0,
+ SCR_REG_REG (ua1, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (ua2, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (ua3, SCR_ADDC, 0),
+ 0,
+ /*
+ * Compute SCRATCHA as:
+ * - size to transfer = 1 byte.
+ * - bit 24..31 = high address bit [32...39].
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (zero),
+ SCR_REG_REG (scratcha, SCR_OR, 1),
+ 0,
+ SCR_FROM_REG (rbc3),
+ 0,
+ SCR_TO_REG (scratcha3),
+ 0,
+ /*
+ * Move this value to the table indirect.
+ */
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.wresid.size),
+ /*
+ * Wait for a valid phase.
+ * While testing with bogus QUANTUM drives, the C1010
+ * sometimes raised a spurious phase mismatch with
+ * WSR and the CHMOV(1) triggered another PM.
+ * Waiting explicitely for the PHASE seemed to avoid
+ * the nested phase mismatch. Btw, this didn't happen
+ * using my IBM drives.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ 0,
+ /*
+ * Perform the move of the residual byte.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.wresid),
+ /*
+ * We can now handle the phase mismatch with UA fixed.
+ * RBC[0..23]=0 is a special case that does not require
+ * a PM context. The C code also checks against this.
+ */
+ SCR_FROM_REG (rbc),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ SCR_FROM_REG (rbc1),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ SCR_FROM_REG (rbc2),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ /*
+ * RBC[0..23]=0.
+ * Not only we donnot need a PM context, but this would
+ * lead to a bogus CHMOV(0). This condition means that
+ * the residual was the last byte to move from this CHMOV.
+ * So, we just have to move the current data script pointer
+ * (i.e. TEMP) to the SCRIPTS address following the
+ * interrupted CHMOV and jump to dispatcher.
+ */
+ SCR_STORE_ABS (ia, 4),
+ PADDRH (scratch),
+ SCR_LOAD_ABS (temp, 4),
+ PADDRH (scratch),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*--------------------------< WSR_MA_HELPER >-----------------------*/,{
+ /*
+ * Helper for the C code when WSR bit is set.
+ * Perform the move of the residual byte.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.wresid),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< ZERO >------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SCRATCH >---------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SCRATCH1 >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< PM0_DATA_ADDR >---------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< PM1_DATA_ADDR >---------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SAVED_DSA >-------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SAVED_DRS >-------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< DONE_POS >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< STARTPOS >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< TARGTBL >---------------------*/,{
+ SCR_DATA_ZERO,
+
+
+/*
+** We may use MEMORY MOVE instructions to load the on chip-RAM,
+** if it happens that mapping PCI memory is not possible.
+** But writing the RAM from the CPU is the preferred method,
+** since PCI 2.2 seems to disallow PCI self-mastering.
+*/
+
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+
+}/*-------------------------< START_RAM >-------------------*/,{
+ /*
+ ** Load the script into on-chip RAM,
+ ** and jump to start point.
+ */
+ SCR_COPY (sizeof (struct script)),
+}/*-------------------------< SCRIPT0_BA >--------------------*/,{
+ 0,
+ PADDR (start),
+ SCR_JUMP,
+ PADDR (init),
+
+}/*-------------------------< START_RAM64 >--------------------*/,{
+ /*
+ ** Load the RAM and start for 64 bit PCI (895A,896).
+ ** Both scripts (script and scripth) are loaded into
+ ** the RAM which is 8K (4K for 825A/875/895).
+ ** We also need to load some 32-63 bit segments
+ ** address of the SCRIPTS processor.
+ ** LOAD/STORE ABSOLUTE always refers to on-chip RAM
+ ** in our implementation. The main memory is
+ ** accessed using LOAD/STORE DSA RELATIVE.
+ */
+ SCR_LOAD_REL (mmws, 4),
+ offsetof (struct ncb, scr_ram_seg),
+ SCR_COPY (sizeof(struct script)),
+}/*-------------------------< SCRIPT0_BA64 >--------------------*/,{
+ 0,
+ PADDR (start),
+ SCR_COPY (sizeof(struct scripth)),
+}/*-------------------------< SCRIPTH0_BA64 >--------------------*/,{
+ 0,
+ PADDRH (start64),
+ SCR_LOAD_REL (mmrs, 4),
+ offsetof (struct ncb, scr_ram_seg),
+ SCR_JUMP64,
+ PADDRH (start64),
+}/*-------------------------< RAM_SEG64 >--------------------*/,{
+ 0,
+
+#endif /* SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+
+}/*-------------------------< SNOOPTEST >-------------------*/,{
+ /*
+ ** Read the variable.
+ */
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof(struct ncb, ncr_cache),
+ SCR_STORE_REL (temp, 4),
+ offsetof(struct ncb, ncr_cache),
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct ncb, ncr_cache),
+}/*-------------------------< SNOOPEND >-------------------*/,{
+ /*
+ ** And stop.
+ */
+ SCR_INT,
+ 99,
+}/*--------------------------------------------------------*/
+};
+
+/*==========================================================
+**
+**
+** Fill in #define dependent parts of the script
+**
+**
+**==========================================================
+*/
+
+void __init ncr_script_fill (struct script * scr, struct scripth * scrh)
+{
+ int i;
+ ncrcmd *p;
+
+ p = scr->data_in;
+ for (i=0; i<MAX_SCATTER; i++) {
+ *p++ =SCR_CHMOV_TBL ^ SCR_DATA_IN;
+ *p++ =offsetof (struct dsb, data[i]);
+ };
+
+ assert ((u_long)p == (u_long)&scr->data_in + sizeof (scr->data_in));
+
+ p = scr->data_out;
+
+ for (i=0; i<MAX_SCATTER; i++) {
+ *p++ =SCR_CHMOV_TBL ^ SCR_DATA_OUT;
+ *p++ =offsetof (struct dsb, data[i]);
+ };
+
+ assert ((u_long)p == (u_long)&scr->data_out + sizeof (scr->data_out));
+}
+
+/*==========================================================
+**
+**
+** Copy and rebind a script.
+**
+**
+**==========================================================
+*/
+
+static void __init
+ncr_script_copy_and_bind (ncb_p np,ncrcmd *src,ncrcmd *dst,int len)
+{
+ ncrcmd opcode, new, old, tmp1, tmp2;
+ ncrcmd *start, *end;
+ int relocs;
+ int opchanged = 0;
+
+ start = src;
+ end = src + len/4;
+
+ while (src < end) {
+
+ opcode = *src++;
+ *dst++ = cpu_to_scr(opcode);
+
+ /*
+ ** If we forget to change the length
+ ** in struct script, a field will be
+ ** padded with 0. This is an illegal
+ ** command.
+ */
+
+ if (opcode == 0) {
+ printk (KERN_INFO "%s: ERROR0 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ MDELAY (10000);
+ continue;
+ };
+
+ /*
+ ** We use the bogus value 0xf00ff00f ;-)
+ ** to reserve data area in SCRIPTS.
+ */
+ if (opcode == SCR_DATA_ZERO) {
+ dst[-1] = 0;
+ continue;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_SCRIPT)
+ printk (KERN_INFO "%p: <%x>\n",
+ (src-1), (unsigned)opcode);
+
+ /*
+ ** We don't have to decode ALL commands
+ */
+ switch (opcode >> 28) {
+
+ case 0xf:
+ /*
+ ** LOAD / STORE DSA relative, don't relocate.
+ */
+ relocs = 0;
+ break;
+ case 0xe:
+ /*
+ ** LOAD / STORE absolute.
+ */
+ relocs = 1;
+ break;
+ case 0xc:
+ /*
+ ** COPY has TWO arguments.
+ */
+ relocs = 2;
+ tmp1 = src[0];
+ tmp2 = src[1];
+#ifdef RELOC_KVAR
+ if ((tmp1 & RELOC_MASK) == RELOC_KVAR)
+ tmp1 = 0;
+ if ((tmp2 & RELOC_MASK) == RELOC_KVAR)
+ tmp2 = 0;
+#endif
+ if ((tmp1 ^ tmp2) & 3) {
+ printk (KERN_ERR"%s: ERROR1 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ MDELAY (1000);
+ }
+ /*
+ ** If PREFETCH feature not enabled, remove
+ ** the NO FLUSH bit if present.
+ */
+ if ((opcode & SCR_NO_FLUSH) &&
+ !(np->features & FE_PFEN)) {
+ dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH);
+ ++opchanged;
+ }
+ break;
+
+ case 0x0:
+ /*
+ ** MOVE/CHMOV (absolute address)
+ */
+ if (!(np->features & FE_WIDE))
+ dst[-1] = cpu_to_scr(opcode | OPC_MOVE);
+ relocs = 1;
+ break;
+
+ case 0x1:
+ /*
+ ** MOVE/CHMOV (table indirect)
+ */
+ if (!(np->features & FE_WIDE))
+ dst[-1] = cpu_to_scr(opcode | OPC_MOVE);
+ relocs = 0;
+ break;
+
+ case 0x8:
+ /*
+ ** JUMP / CALL
+ ** dont't relocate if relative :-)
+ */
+ if (opcode & 0x00800000)
+ relocs = 0;
+ else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/
+ relocs = 2;
+ else
+ relocs = 1;
+ break;
+
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ relocs = 1;
+ break;
+
+ default:
+ relocs = 0;
+ break;
+ };
+
+ if (!relocs) {
+ *dst++ = cpu_to_scr(*src++);
+ continue;
+ }
+ while (relocs--) {
+ old = *src++;
+
+ switch (old & RELOC_MASK) {
+ case RELOC_REGISTER:
+ new = (old & ~RELOC_MASK) + pcivtobus(np->base_ba);
+ break;
+ case RELOC_LABEL:
+ new = (old & ~RELOC_MASK) + np->p_script;
+ break;
+ case RELOC_LABELH:
+ new = (old & ~RELOC_MASK) + np->p_scripth;
+ break;
+ case RELOC_SOFTC:
+ new = (old & ~RELOC_MASK) + np->p_ncb;
+ break;
+#ifdef RELOC_KVAR
+ case RELOC_KVAR:
+ new=0;
+ if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) ||
+ ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST))
+ panic("ncr KVAR out of range");
+ new = vtobus(script_kvars[old & ~RELOC_MASK]);
+#endif
+ break;
+ case 0:
+ /* Don't relocate a 0 address. */
+ if (old == 0) {
+ new = old;
+ break;
+ }
+ /* fall through */
+ default:
+ new = 0; /* For 'cc' not to complain */
+ panic("ncr_script_copy_and_bind: "
+ "weird relocation %x\n", old);
+ break;
+ }
+
+ *dst++ = cpu_to_scr(new);
+ }
+ };
+}
+
+/*==========================================================
+**
+**
+** Auto configuration: attach and init a host adapter.
+**
+**
+**==========================================================
+*/
+
+/*
+** Linux host data structure.
+*/
+
+struct host_data {
+ struct ncb *ncb;
+};
+
+/*
+** Print something which allows to retrieve the controler type, unit,
+** target, lun concerned by a kernel message.
+*/
+
+static void PRINT_TARGET(ncb_p np, int target)
+{
+ printk(KERN_INFO "%s-<%d,*>: ", ncr_name(np), target);
+}
+
+static void PRINT_LUN(ncb_p np, int target, int lun)
+{
+ printk(KERN_INFO "%s-<%d,%d>: ", ncr_name(np), target, lun);
+}
+
+static void PRINT_ADDR(Scsi_Cmnd *cmd)
+{
+ struct host_data *host_data = (struct host_data *) cmd->host->hostdata;
+ PRINT_LUN(host_data->ncb, cmd->target, cmd->lun);
+}
+
+/*==========================================================
+**
+** NCR chip clock divisor table.
+** Divisors are multiplied by 10,000,000 in order to make
+** calculations more simple.
+**
+**==========================================================
+*/
+
+#define _5M 5000000
+static u_long div_10M[] =
+ {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
+
+
+/*===============================================================
+**
+** Prepare io register values used by ncr_init() according
+** to selected and supported features.
+**
+** NCR/SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
+** 128 transfers. All chips support at least 16 transfers bursts.
+** The 825A, 875 and 895 chips support bursts of up to 128
+** transfers and the 895A and 896 support bursts of up to 64
+** transfers. All other chips support up to 16 transfers bursts.
+**
+** For PCI 32 bit data transfers each transfer is a DWORD (4 bytes).
+** It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
+** Only the 896 is able to perform 64 bit data transfers.
+**
+** We use log base 2 (burst length) as internal code, with
+** value 0 meaning "burst disabled".
+**
+**===============================================================
+*/
+
+/*
+ * Burst length from burst code.
+ */
+#define burst_length(bc) (!(bc))? 0 : 1 << (bc)
+
+/*
+ * Burst code from io register bits.
+ */
+#define burst_code(dmode, ctest4, ctest5) \
+ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
+
+/*
+ * Set initial io register bits from burst code.
+ */
+static inline void ncr_init_burst(ncb_p np, u_char bc)
+{
+ np->rv_ctest4 &= ~0x80;
+ np->rv_dmode &= ~(0x3 << 6);
+ np->rv_ctest5 &= ~0x4;
+
+ if (!bc) {
+ np->rv_ctest4 |= 0x80;
+ }
+ else {
+ --bc;
+ np->rv_dmode |= ((bc & 0x3) << 6);
+ np->rv_ctest5 |= (bc & 0x4);
+ }
+}
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+
+/*
+** Get target set-up from Symbios format NVRAM.
+*/
+
+static void __init
+ncr_Symbios_setup_target(ncb_p np, int target, Symbios_nvram *nvram)
+{
+ tcb_p tp = &np->target[target];
+ Symbios_target *tn = &nvram->target[target];
+
+ tp->usrsync = tn->sync_period ? (tn->sync_period + 3) / 4 : 255;
+ tp->usrwide = tn->bus_width == 0x10 ? 1 : 0;
+ tp->usrtags =
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? MAX_TAGS : 0;
+
+ if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
+ tp->usrflag |= UF_NODISC;
+ if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
+ tp->usrflag |= UF_NOSCAN;
+}
+
+/*
+** Get target set-up from Tekram format NVRAM.
+*/
+
+static void __init
+ncr_Tekram_setup_target(ncb_p np, int target, Tekram_nvram *nvram)
+{
+ tcb_p tp = &np->target[target];
+ struct Tekram_target *tn = &nvram->target[target];
+ int i;
+
+ if (tn->flags & TEKRAM_SYNC_NEGO) {
+ i = tn->sync_index & 0xf;
+ tp->usrsync = Tekram_sync[i];
+ }
+
+ tp->usrwide = (tn->flags & TEKRAM_WIDE_NEGO) ? 1 : 0;
+
+ if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
+ tp->usrtags = 2 << nvram->max_tags_index;
+ }
+
+ if (!(tn->flags & TEKRAM_DISCONNECT_ENABLE))
+ tp->usrflag = UF_NODISC;
+
+ /* If any device does not support parity, we will not use this option */
+ if (!(tn->flags & TEKRAM_PARITY_CHECK))
+ np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */
+}
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Save initial settings of some IO registers.
+** Assumed to have been set by BIOS.
+*/
+static void __init ncr_save_initial_setting(ncb_p np)
+{
+ np->sv_scntl0 = INB(nc_scntl0) & 0x0a;
+ np->sv_dmode = INB(nc_dmode) & 0xce;
+ np->sv_dcntl = INB(nc_dcntl) & 0xa8;
+ np->sv_ctest3 = INB(nc_ctest3) & 0x01;
+ np->sv_ctest4 = INB(nc_ctest4) & 0x80;
+ np->sv_gpcntl = INB(nc_gpcntl);
+ np->sv_stest2 = INB(nc_stest2) & 0x20;
+ np->sv_stest4 = INB(nc_stest4);
+ np->sv_stest1 = INB(nc_stest1);
+
+ np->sv_scntl3 = INB(nc_scntl3) & 0x07;
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66) ){
+ /*
+ ** C1010 always uses large fifo, bit 5 rsvd
+ ** scntl4 used ONLY with C1010
+ */
+ np->sv_ctest5 = INB(nc_ctest5) & 0x04 ;
+ np->sv_scntl4 = INB(nc_scntl4);
+ }
+ else {
+ np->sv_ctest5 = INB(nc_ctest5) & 0x24 ;
+ np->sv_scntl4 = 0;
+ }
+}
+
+/*
+** Prepare io register values used by ncr_init()
+** according to selected and supported features.
+*/
+static int __init ncr_prepare_setting(ncb_p np, ncr_nvram *nvram)
+{
+ u_char burst_max;
+ u_long period;
+ int i;
+
+ /*
+ ** Wide ?
+ */
+
+ np->maxwide = (np->features & FE_WIDE)? 1 : 0;
+
+ /*
+ ** Get the frequency of the chip's clock.
+ ** Find the right value for scntl3.
+ */
+
+ if (np->features & FE_QUAD)
+ np->multiplier = 4;
+ else if (np->features & FE_DBLR)
+ np->multiplier = 2;
+ else
+ np->multiplier = 1;
+
+ np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000;
+ np->clock_khz *= np->multiplier;
+
+ if (np->clock_khz != 40000)
+ ncr_getclock(np, np->multiplier);
+
+ /*
+ * Divisor to be used for async (timer pre-scaler).
+ *
+ * Note: For C1010 the async divisor is 2(8) if he
+ * quadrupler is disabled (enabled).
+ */
+
+ if ( (np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+
+ np->rv_scntl3 = 0;
+ }
+ else
+ {
+ i = np->clock_divn - 1;
+ while (--i >= 0) {
+ if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz
+ > div_10M[i]) {
+ ++i;
+ break;
+ }
+ }
+ np->rv_scntl3 = i+1;
+ }
+
+
+ /*
+ * Save the ultra3 register for the C1010/C1010_66
+ */
+
+ np->rv_scntl4 = np->sv_scntl4;
+
+ /*
+ * Minimum synchronous period factor supported by the chip.
+ * Btw, 'period' is in tenths of nanoseconds.
+ */
+
+ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
+ if (period <= 250) np->minsync = 10;
+ else if (period <= 303) np->minsync = 11;
+ else if (period <= 500) np->minsync = 12;
+ else np->minsync = (period + 40 - 1) / 40;
+
+ /*
+ * Fix up. If sync. factor is 10 (160000Khz clock) and chip
+ * supports ultra3, then min. sync. period 12.5ns and the factor is 9
+ */
+
+ if ((np->minsync == 10) && (np->features & FE_ULTRA3))
+ np->minsync = 9;
+
+ /*
+ * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
+ *
+ * Transfer period minimums: SCSI-1 200 (50); Fast 100 (25)
+ * Ultra 50 (12); Ultra2 (6); Ultra3 (3)
+ */
+
+ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
+ np->minsync = 25;
+ else if (np->minsync < 12 && (np->features & FE_ULTRA))
+ np->minsync = 12;
+ else if (np->minsync < 10 && (np->features & FE_ULTRA2))
+ np->minsync = 10;
+ else if (np->minsync < 9 && (np->features & FE_ULTRA3))
+ np->minsync = 9;
+
+ /*
+ * Maximum synchronous period factor supported by the chip.
+ */
+
+ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
+ np->maxsync = period > 2540 ? 254 : period / 10;
+
+ /*
+ ** 64 bit (53C895A or 53C896) ?
+ */
+ if (np->features & FE_64BIT)
+#ifdef SCSI_NCR_USE_64BIT_DAC
+ np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
+#else
+ np->rv_ccntl1 |= (DDAC);
+#endif
+
+ /*
+ ** Phase mismatch handled by SCRIPTS (53C895A, 53C896 or C1010) ?
+ */
+ if (np->features & FE_NOPM)
+ np->rv_ccntl0 |= (ENPMJ);
+
+ /*
+ ** Prepare initial value of other IO registers
+ */
+#if defined SCSI_NCR_TRUST_BIOS_SETTING
+ np->rv_scntl0 = np->sv_scntl0;
+ np->rv_dmode = np->sv_dmode;
+ np->rv_dcntl = np->sv_dcntl;
+ np->rv_ctest3 = np->sv_ctest3;
+ np->rv_ctest4 = np->sv_ctest4;
+ np->rv_ctest5 = np->sv_ctest5;
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5);
+#else
+
+ /*
+ ** Select burst length (dwords)
+ */
+ burst_max = driver_setup.burst_max;
+ if (burst_max == 255)
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5);
+ if (burst_max > 7)
+ burst_max = 7;
+ if (burst_max > np->maxburst)
+ burst_max = np->maxburst;
+
+ /*
+ ** DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
+ ** This chip and the 860 Rev 1 may wrongly use PCI cache line
+ ** based transactions on LOAD/STORE instructions. So we have
+ ** to prevent these chips from using such PCI transactions in
+ ** this driver. The generic sym53c8xx driver that does not use
+ ** LOAD/STORE instructions does not need this work-around.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_NCR_53C810 &&
+ np->revision_id >= 0x10 && np->revision_id <= 0x11) ||
+ (np->device_id == PCI_DEVICE_ID_NCR_53C860 &&
+ np->revision_id <= 0x1))
+ np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
+
+ /*
+ ** DEL ? - 53C1010 Rev 1 - Part Number 609-0393638
+ ** 64-bit Slave Cycles must be disabled.
+ */
+ if ( ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) && (np->revision_id < 0x02) )
+ || (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66 ) )
+ np->rv_ccntl1 |= 0x10;
+
+ /*
+ ** Select all supported special features.
+ ** If we are using on-board RAM for scripts, prefetch (PFEN)
+ ** does not help, but burst op fetch (BOF) does.
+ ** Disabling PFEN makes sure BOF will be used.
+ */
+ if (np->features & FE_ERL)
+ np->rv_dmode |= ERL; /* Enable Read Line */
+ if (np->features & FE_BOF)
+ np->rv_dmode |= BOF; /* Burst Opcode Fetch */
+ if (np->features & FE_ERMP)
+ np->rv_dmode |= ERMP; /* Enable Read Multiple */
+#if 1
+ if ((np->features & FE_PFEN) && !np->base2_ba)
+#else
+ if (np->features & FE_PFEN)
+#endif
+ np->rv_dcntl |= PFEN; /* Prefetch Enable */
+ if (np->features & FE_CLSE)
+ np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
+ if (np->features & FE_WRIE)
+ np->rv_ctest3 |= WRIE; /* Write and Invalidate */
+
+
+ if ( (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) &&
+ (np->features & FE_DFS))
+ np->rv_ctest5 |= DFS; /* Dma Fifo Size */
+ /* C1010/C1010_66 always large fifo */
+
+ /*
+ ** Select some other
+ */
+ if (driver_setup.master_parity)
+ np->rv_ctest4 |= MPEE; /* Master parity checking */
+ if (driver_setup.scsi_parity)
+ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ /*
+ ** Get parity checking, host ID and verbose mode from NVRAM
+ **/
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ np->myaddr = nvram->data.Tekram.host_id & 0x0f;
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
+ np->rv_scntl0 &= ~0x0a;
+ np->myaddr = nvram->data.Symbios.host_id & 0x0f;
+ if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
+ np->verbose += 1;
+ break;
+ }
+ }
+#endif
+ /*
+ ** Get SCSI addr of host adapter (set by bios?).
+ */
+ if (np->myaddr == 255) {
+ np->myaddr = INB(nc_scid) & 0x07;
+ if (!np->myaddr)
+ np->myaddr = SCSI_NCR_MYADDR;
+ }
+
+#endif /* SCSI_NCR_TRUST_BIOS_SETTING */
+
+ /*
+ * Prepare initial io register bits for burst length
+ */
+ ncr_init_burst(np, burst_max);
+
+ /*
+ ** Set SCSI BUS mode.
+ **
+ ** - ULTRA2 chips (895/895A/896)
+ ** and ULTRA 3 chips (1010) report the current
+ ** BUS mode through the STEST4 IO register.
+ ** - For previous generation chips (825/825A/875),
+ ** user has to tell us how to check against HVD,
+ ** since a 100% safe algorithm is not possible.
+ */
+ np->scsi_mode = SMODE_SE;
+ if (np->features & (FE_ULTRA2 | FE_ULTRA3))
+ np->scsi_mode = (np->sv_stest4 & SMODE);
+ else if (np->features & FE_DIFF) {
+ switch(driver_setup.diff_support) {
+ case 4: /* Trust previous settings if present, then GPIO3 */
+ if (np->sv_scntl3) {
+ if (np->sv_stest2 & 0x20)
+ np->scsi_mode = SMODE_HVD;
+ break;
+ }
+ case 3: /* SYMBIOS controllers report HVD through GPIO3 */
+ if (nvram && nvram->type != SCSI_NCR_SYMBIOS_NVRAM)
+ break;
+ if (INB(nc_gpreg) & 0x08)
+ break;
+ case 2: /* Set HVD unconditionally */
+ np->scsi_mode = SMODE_HVD;
+ case 1: /* Trust previous settings for HVD */
+ if (np->sv_stest2 & 0x20)
+ np->scsi_mode = SMODE_HVD;
+ break;
+ default:/* Don't care about HVD */
+ break;
+ }
+ }
+ if (np->scsi_mode == SMODE_HVD)
+ np->rv_stest2 |= 0x20;
+
+ /*
+ ** Set LED support from SCRIPTS.
+ ** Ignore this feature for boards known to use a
+ ** specific GPIO wiring and for the 895A or 896
+ ** that drive the LED directly.
+ ** Also probe initial setting of GPIO0 as output.
+ */
+ if ((driver_setup.led_pin ||
+ (nvram && nvram->type == SCSI_NCR_SYMBIOS_NVRAM)) &&
+ !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
+ np->features |= FE_LED0;
+
+ /*
+ ** Set irq mode.
+ */
+ switch(driver_setup.irqm & 3) {
+ case 2:
+ np->rv_dcntl |= IRQM;
+ break;
+ case 1:
+ np->rv_dcntl |= (np->sv_dcntl & IRQM);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ ** Configure targets according to driver setup.
+ ** If NVRAM present get targets setup from NVRAM.
+ ** Allow to override sync, wide and NOSCAN from
+ ** boot command line.
+ */
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ tcb_p tp = &np->target[i];
+
+ tp->usrsync = 255;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ ncr_Tekram_setup_target(np, i, &nvram->data.Tekram);
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ ncr_Symbios_setup_target(np, i, &nvram->data.Symbios);
+ break;
+ }
+ if (driver_setup.use_nvram & 0x2)
+ tp->usrsync = driver_setup.default_sync;
+ if (driver_setup.use_nvram & 0x4)
+ tp->usrwide = driver_setup.max_wide;
+ if (driver_setup.use_nvram & 0x8)
+ tp->usrflag &= ~UF_NOSCAN;
+ }
+ else {
+#else
+ if (1) {
+#endif
+ tp->usrsync = driver_setup.default_sync;
+ tp->usrwide = driver_setup.max_wide;
+ tp->usrtags = MAX_TAGS;
+ if (!driver_setup.disconnection)
+ np->target[i].usrflag = UF_NODISC;
+ }
+ }
+
+ /*
+ ** Announce all that stuff to user.
+ */
+
+ i = nvram ? nvram->type : 0;
+ printk(KERN_INFO "%s: %sID %d, Fast-%d%s%s\n", ncr_name(np),
+ i == SCSI_NCR_SYMBIOS_NVRAM ? "Symbios format NVRAM, " :
+ (i == SCSI_NCR_TEKRAM_NVRAM ? "Tekram format NVRAM, " : ""),
+ np->myaddr,
+ np->minsync < 10 ? 80 :
+ (np->minsync < 12 ? 40 : (np->minsync < 25 ? 20 : 10) ),
+ (np->rv_scntl0 & 0xa) ? ", Parity Checking" : ", NO Parity",
+ (np->rv_stest2 & 0x20) ? ", Differential" : "");
+
+ if (bootverbose > 1) {
+ printk (KERN_INFO "%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
+ np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
+
+ printk (KERN_INFO "%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
+ np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
+ }
+
+ if (bootverbose && np->base2_ba)
+ printk (KERN_INFO "%s: on-chip RAM at 0x%lx\n",
+ ncr_name(np), np->base2_ba);
+
+ return 0;
+}
+
+
+#ifdef SCSI_NCR_DEBUG_NVRAM
+
+void __init ncr_display_Symbios_nvram(ncb_p np, Symbios_nvram *nvram)
+{
+ int i;
+
+ /* display Symbios nvram host data */
+ printk(KERN_DEBUG "%s: HOST ID=%d%s%s%s%s%s\n",
+ ncr_name(np), nvram->host_id & 0x0f,
+ (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
+ (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"",
+ (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"",
+ (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
+
+ /* display Symbios nvram drive data */
+ for (i = 0 ; i < 15 ; i++) {
+ struct Symbios_target *tn = &nvram->target[i];
+ printk(KERN_DEBUG "%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
+ ncr_name(np), i,
+ (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
+ (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
+ tn->bus_width,
+ tn->sync_period / 4,
+ tn->timeout);
+ }
+}
+
+static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120};
+
+void __init ncr_display_Tekram_nvram(ncb_p np, Tekram_nvram *nvram)
+{
+ int i, tags, boot_delay;
+ char *rem;
+
+ /* display Tekram nvram host data */
+ tags = 2 << nvram->max_tags_index;
+ boot_delay = 0;
+ if (nvram->boot_delay_index < 6)
+ boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
+ switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
+ default:
+ case 0: rem = ""; break;
+ case 1: rem = " REMOVABLE=boot device"; break;
+ case 2: rem = " REMOVABLE=all"; break;
+ }
+
+ printk(KERN_DEBUG
+ "%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+ ncr_name(np), nvram->host_id & 0x0f,
+ (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"",
+ (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
+ (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
+ (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
+ (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
+ (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
+ (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
+ rem, boot_delay, tags);
+
+ /* display Tekram nvram drive data */
+ for (i = 0; i <= 15; i++) {
+ int sync, j;
+ struct Tekram_target *tn = &nvram->target[i];
+ j = tn->sync_index & 0xf;
+ sync = Tekram_sync[j];
+ printk(KERN_DEBUG "%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
+ ncr_name(np), i,
+ (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
+ (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
+ (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & TEKRAM_START_CMD) ? " START" : "",
+ (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
+ (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
+ sync);
+ }
+}
+#endif /* SCSI_NCR_DEBUG_NVRAM */
+
+/*
+** Host attach and initialisations.
+**
+** Allocate host data and ncb structure.
+** Request IO region and remap MMIO region.
+** Do chip initialization.
+** If all is OK, install interrupt handling and
+** start the timer daemon.
+*/
+
+static int __init
+ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device)
+{
+ struct host_data *host_data;
+ ncb_p np = 0;
+ struct Scsi_Host *instance = 0;
+ u_long flags = 0;
+ ncr_nvram *nvram = device->nvram;
+ int i;
+
+ printk(KERN_INFO NAME53C "%s-%d: rev 0x%x on pci bus %d device %d function %d "
+#ifdef __sparc__
+ "irq %s\n",
+#else
+ "irq %d\n",
+#endif
+ device->chip.name, unit, device->chip.revision_id,
+ device->slot.bus, (device->slot.device_fn & 0xf8) >> 3,
+ device->slot.device_fn & 7,
+#ifdef __sparc__
+ __irq_itoa(device->slot.irq));
+#else
+ device->slot.irq);
+#endif
+
+ /*
+ ** Allocate host_data structure
+ */
+ if (!(instance = scsi_register(tpnt, sizeof(*host_data))))
+ goto attach_error;
+ host_data = (struct host_data *) instance->hostdata;
+
+ /*
+ ** Allocate the host control block.
+ */
+ np = __m_calloc_dma(device->pdev, sizeof(struct ncb), "NCB");
+ if (!np)
+ goto attach_error;
+ NCR_INIT_LOCK_NCB(np);
+ np->pdev = device->pdev;
+ np->p_ncb = vtobus(np);
+ host_data->ncb = np;
+
+ /*
+ ** Store input informations in the host data structure.
+ */
+ strncpy(np->chip_name, device->chip.name, sizeof(np->chip_name) - 1);
+ np->unit = unit;
+ np->verbose = driver_setup.verbose;
+ sprintf(np->inst_name, NAME53C "%s-%d", np->chip_name, np->unit);
+ np->device_id = device->chip.device_id;
+ np->revision_id = device->chip.revision_id;
+ np->bus = device->slot.bus;
+ np->device_fn = device->slot.device_fn;
+ np->features = device->chip.features;
+ np->clock_divn = device->chip.nr_divisor;
+ np->maxoffs = device->chip.offset_max;
+ np->maxburst = device->chip.burst_max;
+ np->myaddr = device->host_id;
+
+ /*
+ ** Allocate the start queue.
+ */
+ np->squeue = (ncrcmd *)
+ m_calloc_dma(sizeof(ncrcmd)*(MAX_START*2), "SQUEUE");
+ if (!np->squeue)
+ goto attach_error;
+ np->p_squeue = vtobus(np->squeue);
+
+ /*
+ ** Allocate the done queue.
+ */
+ np->dqueue = (ncrcmd *)
+ m_calloc_dma(sizeof(ncrcmd)*(MAX_START*2), "DQUEUE");
+ if (!np->dqueue)
+ goto attach_error;
+
+ /*
+ ** Allocate the target bus address array.
+ */
+ np->targtbl = (u_int32 *) m_calloc_dma(256, "TARGTBL");
+ if (!np->targtbl)
+ goto attach_error;
+
+ /*
+ ** Allocate SCRIPTS areas
+ */
+ np->script0 = (struct script *)
+ m_calloc_dma(sizeof(struct script), "SCRIPT");
+ if (!np->script0)
+ goto attach_error;
+ np->scripth0 = (struct scripth *)
+ m_calloc_dma(sizeof(struct scripth), "SCRIPTH");
+ if (!np->scripth0)
+ goto attach_error;
+
+ /*
+ ** Initialyze the CCB free queue and,
+ ** allocate some CCB. We need at least ONE.
+ */
+ xpt_que_init(&np->free_ccbq);
+ xpt_que_init(&np->b0_ccbq);
+ if (!ncr_alloc_ccb(np))
+ goto attach_error;
+
+ /*
+ ** Initialize timer structure
+ **
+ */
+ init_timer(&np->timer);
+ np->timer.data = (unsigned long) np;
+ np->timer.function = sym53c8xx_timeout;
+
+ /*
+ ** Try to map the controller chip to
+ ** virtual and physical memory.
+ */
+
+ np->base_ba = device->slot.base;
+ np->base_ws = (np->features & FE_IO256)? 256 : 128;
+ np->base2_ba = (np->features & FE_RAM)? device->slot.base_2 : 0;
+
+#ifndef SCSI_NCR_IOMAPPED
+ np->base_va = remap_pci_mem(np->base_ba, np->base_ws);
+ if (!np->base_va) {
+ printk(KERN_ERR "%s: can't map PCI MMIO region\n",ncr_name(np));
+ goto attach_error;
+ }
+ else if (bootverbose > 1)
+ printk(KERN_INFO "%s: using memory mapped IO\n", ncr_name(np));
+
+ /*
+ ** Make the controller's registers available.
+ ** Now the INB INW INL OUTB OUTW OUTL macros
+ ** can be used safely.
+ */
+
+ np->reg = (struct ncr_reg *) np->base_va;
+
+#endif /* !defined SCSI_NCR_IOMAPPED */
+
+ /*
+ ** If on-chip RAM is used, make sure SCRIPTS isn't too large.
+ */
+ if (np->base2_ba && sizeof(struct script) > 4096) {
+ printk(KERN_ERR "%s: script too large.\n", ncr_name(np));
+ goto attach_error;
+ }
+
+ /*
+ ** Try to map the controller chip into iospace.
+ */
+
+ if (device->slot.io_port) {
+ request_region(device->slot.io_port, np->base_ws, NAME53C8XX);
+ np->base_io = device->slot.io_port;
+ }
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ ncr_display_Symbios_nvram(np, &nvram->data.Symbios);
+#endif
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ ncr_display_Tekram_nvram(np, &nvram->data.Tekram);
+#endif
+ break;
+ default:
+ nvram = 0;
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ printk(KERN_DEBUG "%s: NVRAM: None or invalid data.\n", ncr_name(np));
+#endif
+ }
+ }
+#endif
+
+ /*
+ ** Save setting of some IO registers, so we will
+ ** be able to probe specific implementations.
+ */
+ ncr_save_initial_setting (np);
+
+ /*
+ ** Reset the chip now, since it has been reported
+ ** that SCSI clock calibration may not work properly
+ ** if the chip is currently active.
+ */
+ ncr_chip_reset (np);
+
+ /*
+ ** Do chip dependent initialization.
+ */
+ (void) ncr_prepare_setting(np, nvram);
+
+ /*
+ ** Check the PCI clock frequency if needed.
+ **
+ ** Must be done after ncr_prepare_setting since it destroys
+ ** STEST1 that is used to probe for the clock multiplier.
+ **
+ ** The range is currently [22688 - 45375 Khz], given
+ ** the values used by ncr_getclock().
+ ** This calibration of the frequecy measurement
+ ** algorithm against the PCI clock frequency is only
+ ** performed if the driver has had to measure the SCSI
+ ** clock due to other heuristics not having been enough
+ ** to deduce the SCSI clock frequency.
+ **
+ ** When the chip has been initialized correctly by the
+ ** SCSI BIOS, the driver deduces the presence of the
+ ** clock multiplier and the value of the SCSI clock from
+ ** initial values of IO registers, and therefore no
+ ** clock measurement is performed.
+ ** Normally the driver should never have to measure any
+ ** clock, unless the controller may use a 80 MHz clock
+ ** or has a clock multiplier and any of the following
+ ** condition is met:
+ **
+ ** - No SCSI BIOS is present.
+ ** - SCSI BIOS did'nt enable the multiplier for some reason.
+ ** - User has disabled the controller from the SCSI BIOS.
+ ** - User booted the O/S from another O/S that did'nt enable
+ ** the multiplier for some reason.
+ **
+ ** As a result, the driver may only have to measure some
+ ** frequency in very unusual situations.
+ **
+ ** For this reality test against the PCI clock to really
+ ** protect against flaws in the udelay() calibration or
+ ** driver problem that affect the clock measurement
+ ** algorithm, the actual PCI clock frequency must be 33 MHz.
+ */
+ i = np->pciclock_max ? ncr_getpciclock(np) : 0;
+ if (i && (i < np->pciclock_min || i > np->pciclock_max)) {
+ printk(KERN_ERR "%s: PCI clock (%u KHz) is out of range "
+ "[%u KHz - %u KHz].\n",
+ ncr_name(np), i, np->pciclock_min, np->pciclock_max);
+ goto attach_error;
+ }
+
+ /*
+ ** Patch script to physical addresses
+ */
+ ncr_script_fill (&script0, &scripth0);
+
+ np->p_script = vtobus(np->script0);
+ np->p_scripth = vtobus(np->scripth0);
+ np->p_scripth0 = np->p_scripth;
+
+ if (np->base2_ba) {
+ np->p_script = pcivtobus(np->base2_ba);
+ if (np->features & FE_RAM8K) {
+ np->base2_ws = 8192;
+ np->p_scripth = np->p_script + 4096;
+#if BITS_PER_LONG > 32
+ np->scr_ram_seg = cpu_to_scr(np->base2_ba >> 32);
+#endif
+ }
+ else
+ np->base2_ws = 4096;
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ np->base2_va = remap_pci_mem(np->base2_ba, np->base2_ws);
+ if (!np->base2_va) {
+ printk(KERN_ERR "%s: can't map PCI MEMORY region\n",
+ ncr_name(np));
+ goto attach_error;
+ }
+#endif
+ }
+
+ ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script));
+ ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth));
+
+ /*
+ ** Patch some variables in SCRIPTS
+ */
+ np->scripth0->pm0_data_addr[0] =
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, pm0_data));
+ np->scripth0->pm1_data_addr[0] =
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, pm1_data));
+
+ /*
+ ** Patch if not Ultra 3 - Do not write to scntl4
+ */
+ if (np->features & FE_ULTRA3) {
+ np->script0->resel_scntl4[0] = cpu_to_scr(SCR_LOAD_REL (scntl4, 1));
+ np->script0->resel_scntl4[1] = cpu_to_scr(offsetof(struct tcb, uval));
+ }
+
+
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ np->scripth0->script0_ba[0] = cpu_to_scr(vtobus(np->script0));
+ np->scripth0->script0_ba64[0] = cpu_to_scr(vtobus(np->script0));
+ np->scripth0->scripth0_ba64[0] = cpu_to_scr(vtobus(np->scripth0));
+ np->scripth0->ram_seg64[0] = np->scr_ram_seg;
+#endif
+ /*
+ ** Prepare the idle and invalid task actions.
+ */
+ np->idletask.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->idletask.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l));
+ np->p_idletask = NCB_PHYS(np, idletask);
+
+ np->notask.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->notask.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l));
+ np->p_notask = NCB_PHYS(np, notask);
+
+ np->bad_i_t_l.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->bad_i_t_l.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l));
+ np->p_bad_i_t_l = NCB_PHYS(np, bad_i_t_l);
+
+ np->bad_i_t_l_q.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->bad_i_t_l_q.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np,bad_i_t_l_q));
+ np->p_bad_i_t_l_q = NCB_PHYS(np, bad_i_t_l_q);
+
+ /*
+ ** Allocate and prepare the bad lun table.
+ */
+ np->badluntbl = m_calloc_dma(256, "BADLUNTBL");
+ if (!np->badluntbl)
+ goto attach_error;
+
+ assert (offsetof(struct lcb, resel_task) == 0);
+ np->resel_badlun = cpu_to_scr(NCB_SCRIPTH_PHYS(np, resel_bad_lun));
+
+ for (i = 0 ; i < 64 ; i++)
+ np->badluntbl[i] = cpu_to_scr(NCB_PHYS(np, resel_badlun));
+
+ /*
+ ** Prepare the target bus address array.
+ */
+ np->scripth0->targtbl[0] = cpu_to_scr(vtobus(np->targtbl));
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ np->targtbl[i] = cpu_to_scr(NCB_PHYS(np, target[i]));
+ np->target[i].b_luntbl = cpu_to_scr(vtobus(np->badluntbl));
+ np->target[i].b_lun0 = cpu_to_scr(NCB_PHYS(np, resel_badlun));
+ }
+
+ /*
+ ** Patch the script for LED support.
+ */
+
+ if (np->features & FE_LED0) {
+ np->script0->idle[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR, 0x01));
+ np->script0->reselected[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ np->script0->start[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ }
+
+ /*
+ ** Patch the script to provide an extra clock cycle on
+ ** data out phase - 53C1010_66MHz part only.
+ */
+ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66){
+ np->script0->datao_phase[0] =
+ cpu_to_scr(SCR_REG_REG(scntl4, SCR_OR, 0x0c));
+ }
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If user does not want to use IMMEDIATE ARBITRATION
+ ** when we are reselected while attempting to arbitrate,
+ ** patch the SCRIPTS accordingly with a SCRIPT NO_OP.
+ */
+ if (!(driver_setup.iarb & 1))
+ np->script0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
+ /*
+ ** If user wants IARB to be set when we win arbitration
+ ** and have other jobs, compute the max number of consecutive
+ ** settings of IARB hint before we leave devices a chance to
+ ** arbitrate for reselection.
+ */
+ np->iarb_max = (driver_setup.iarb >> 4);
+#endif
+
+ /*
+ ** DEL 472 - 53C896 Rev 1 - Part Number 609-0393055 - ITEM 5.
+ */
+ if (np->device_id == PCI_DEVICE_ID_NCR_53C896 &&
+ np->revision_id <= 0x1 && (np->features & FE_NOPM)) {
+ np->scatter = ncr_scatter_896R1;
+ np->script0->datai_phase[0] = cpu_to_scr(SCR_JUMP);
+ np->script0->datai_phase[1] =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, tweak_pmj));
+ np->script0->datao_phase[0] = cpu_to_scr(SCR_JUMP);
+ np->script0->datao_phase[1] =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, tweak_pmj));
+ }
+ else
+#ifdef DEBUG_896R1
+ np->scatter = ncr_scatter_896R1;
+#else
+ np->scatter = ncr_scatter;
+#endif
+
+ /*
+ ** Reset chip.
+ ** We should use ncr_soft_reset(), but we donnot want to do
+ ** so, since we may not be safe if ABRT interrupt occurs due
+ ** to the BIOS or previous O/S having enable this interrupt.
+ **
+ ** For C1010 need to set ABRT bit prior to SRST if SCRIPTs
+ ** are running. Not true in this case.
+ */
+ ncr_chip_reset(np);
+
+ /*
+ ** Now check the cache handling of the pci chipset.
+ */
+
+ if (ncr_snooptest (np)) {
+ printk (KERN_ERR "CACHE INCORRECTLY CONFIGURED.\n");
+ goto attach_error;
+ };
+
+ /*
+ ** Install the interrupt handler.
+ ** If we synchonize the C code with SCRIPTS on interrupt,
+ ** we donnot want to share the INTR line at all.
+ */
+ if (request_irq(device->slot.irq, sym53c8xx_intr,
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ ((driver_setup.irqm & 0x20) ? 0 : SA_INTERRUPT),
+#else
+ ((driver_setup.irqm & 0x10) ? 0 : SA_SHIRQ) |
+
+#if 0 && LINUX_VERSION_CODE < LinuxVersionCode(2,2,0)
+ ((driver_setup.irqm & 0x20) ? 0 : SA_INTERRUPT),
+#else
+ 0,
+#endif
+#endif
+ NAME53C8XX, np)) {
+ printk(KERN_ERR "%s: request irq %d failure\n",
+ ncr_name(np), device->slot.irq);
+ goto attach_error;
+ }
+ np->irq = device->slot.irq;
+
+ /*
+ ** After SCSI devices have been opened, we cannot
+ ** reset the bus safely, so we do it here.
+ ** Interrupt handler does the real work.
+ ** Process the reset exception,
+ ** if interrupts are not enabled yet.
+ ** Then enable disconnects.
+ */
+ NCR_LOCK_NCB(np, flags);
+ if (ncr_reset_scsi_bus(np, 0, driver_setup.settle_delay) != 0) {
+ printk(KERN_ERR "%s: FATAL ERROR: CHECK SCSI BUS - CABLES, TERMINATION, DEVICE POWER etc.!\n", ncr_name(np));
+
+ NCR_UNLOCK_NCB(np, flags);
+ goto attach_error;
+ }
+ ncr_exception (np);
+
+ /*
+ ** The middle-level SCSI driver does not
+ ** wait for devices to settle.
+ ** Wait synchronously if more than 2 seconds.
+ */
+ if (driver_setup.settle_delay > 2) {
+ printk(KERN_INFO "%s: waiting %d seconds for scsi devices to settle...\n",
+ ncr_name(np), driver_setup.settle_delay);
+ MDELAY (1000 * driver_setup.settle_delay);
+ }
+
+ /*
+ ** start the timeout daemon
+ */
+ np->lasttime=0;
+ ncr_timeout (np);
+
+ /*
+ ** use SIMPLE TAG messages by default
+ */
+#ifdef SCSI_NCR_ALWAYS_SIMPLE_TAG
+ np->order = M_SIMPLE_TAG;
+#endif
+
+ /*
+ ** Done.
+ */
+ if (!first_host)
+ first_host = instance;
+
+ /*
+ ** Fill Linux host instance structure
+ ** and return success.
+ */
+ instance->max_channel = 0;
+ instance->this_id = np->myaddr;
+ instance->max_id = np->maxwide ? 16 : 8;
+ instance->max_lun = MAX_LUN;
+#ifndef SCSI_NCR_IOMAPPED
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,29)
+ instance->base = (unsigned long) np->reg;
+#else
+ instance->base = (char *) np->reg;
+#endif
+#endif
+ instance->irq = np->irq;
+ instance->unique_id = np->base_io;
+ instance->io_port = np->base_io;
+ instance->n_io_port = np->base_ws;
+ instance->dma_channel = 0;
+ instance->cmd_per_lun = MAX_TAGS;
+ instance->can_queue = (MAX_START-4);
+
+ np->check_integrity = 0;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ instance->check_integrity = 0;
+
+#ifdef SCSI_NCR_ENABLE_INTEGRITY_CHECK
+ if ( !(driver_setup.bus_check & 0x04) ) {
+ np->check_integrity = 1;
+ instance->check_integrity = 1;
+ }
+#endif
+#endif
+
+ instance->select_queue_depths = sym53c8xx_select_queue_depths;
+
+ NCR_UNLOCK_NCB(np, flags);
+
+ /*
+ ** Now let the generic SCSI driver
+ ** look for the SCSI devices on the bus ..
+ */
+ return 0;
+
+attach_error:
+ if (!instance) return -1;
+ printk(KERN_INFO "%s: giving up ...\n", ncr_name(np));
+ if (np)
+ ncr_free_resources(np);
+ scsi_unregister(instance);
+
+ return -1;
+ }
+
+
+/*
+** Free controller resources.
+*/
+static void ncr_free_resources(ncb_p np)
+{
+ ccb_p cp;
+ tcb_p tp;
+ lcb_p lp;
+ int target, lun;
+
+ if (np->irq)
+ free_irq(np->irq, np);
+ if (np->base_io)
+ release_region(np->base_io, np->base_ws);
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ if (np->base_va)
+ unmap_pci_mem(np->base_va, np->base_ws);
+ if (np->base2_va)
+ unmap_pci_mem(np->base2_va, np->base2_ws);
+#endif
+ if (np->scripth0)
+ m_free_dma(np->scripth0, sizeof(struct scripth), "SCRIPTH");
+ if (np->script0)
+ m_free_dma(np->script0, sizeof(struct script), "SCRIPT");
+ if (np->squeue)
+ m_free_dma(np->squeue, sizeof(ncrcmd)*(MAX_START*2), "SQUEUE");
+ if (np->dqueue)
+ m_free_dma(np->dqueue, sizeof(ncrcmd)*(MAX_START*2),"DQUEUE");
+
+ while ((cp = np->ccbc) != NULL) {
+ np->ccbc = cp->link_ccb;
+ m_free_dma(cp, sizeof(*cp), "CCB");
+ }
+
+ if (np->badluntbl)
+ m_free_dma(np->badluntbl, 256,"BADLUNTBL");
+
+ for (target = 0; target < MAX_TARGET ; target++) {
+ tp = &np->target[target];
+ for (lun = 0 ; lun < MAX_LUN ; lun++) {
+ lp = ncr_lp(np, tp, lun);
+ if (!lp)
+ continue;
+ if (lp->tasktbl != &lp->tasktbl_0)
+ m_free_dma(lp->tasktbl, MAX_TASKS*4, "TASKTBL");
+ if (lp->cb_tags)
+ m_free(lp->cb_tags, MAX_TAGS, "CB_TAGS");
+ m_free_dma(lp, sizeof(*lp), "LCB");
+ }
+#if MAX_LUN > 1
+ if (tp->lmp)
+ m_free(tp->lmp, MAX_LUN * sizeof(lcb_p), "LMP");
+ if (tp->luntbl)
+ m_free_dma(tp->luntbl, 256, "LUNTBL");
+#endif
+ }
+
+ if (np->targtbl)
+ m_free_dma(np->targtbl, 256, "TARGTBL");
+
+ m_free_dma(np, sizeof(*np), "NCB");
+}
+
+
+/*==========================================================
+**
+**
+** Done SCSI commands list management.
+**
+** We donnot enter the scsi_done() callback immediately
+** after a command has been seen as completed but we
+** insert it into a list which is flushed outside any kind
+** of driver critical section.
+** This allows to do minimal stuff under interrupt and
+** inside critical sections and to also avoid locking up
+** on recursive calls to driver entry points under SMP.
+** In fact, the only kernel point which is entered by the
+** driver with a driver lock set is get_free_pages(GFP_ATOMIC...)
+** that shall not reenter the driver under any circumstance.
+**
+**==========================================================
+*/
+static inline void ncr_queue_done_cmd(ncb_p np, Scsi_Cmnd *cmd)
+{
+ unmap_scsi_data(np, cmd);
+ cmd->host_scribble = (char *) np->done_list;
+ np->done_list = cmd;
+}
+
+static inline void ncr_flush_done_cmds(Scsi_Cmnd *lcmd)
+{
+ Scsi_Cmnd *cmd;
+
+ while (lcmd) {
+ cmd = lcmd;
+ lcmd = (Scsi_Cmnd *) cmd->host_scribble;
+ cmd->scsi_done(cmd);
+ }
+}
+
+/*==========================================================
+**
+**
+** Prepare the next negotiation message for integrity check,
+** if needed.
+**
+** Fill in the part of message buffer that contains the
+** negotiation and the nego_status field of the CCB.
+** Returns the size of the message in bytes.
+**
+** If tp->ppr_negotiation is 1 and a M_REJECT occurs, then
+** we disable ppr_negotiation. If the first ppr_negotiation is
+** successful, set this flag to 2.
+**
+**==========================================================
+*/
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+static int ncr_ic_nego(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd, u_char *msgptr)
+{
+ tcb_p tp = &np->target[cp->target];
+ int msglen = 0;
+ int nego = 0;
+ u_char new_width, new_offset, new_period;
+ u_char no_increase;
+
+ if (tp->ppr_negotiation == 1) /* PPR message successful */
+ tp->ppr_negotiation = 2;
+
+ if (tp->inq_done) {
+
+ if (!tp->ic_maximums_set) {
+ tp->ic_maximums_set = 1;
+
+ /*
+ * Check against target, host and user limits
+ */
+ if ( (tp->inq_byte7 & INQ7_WIDE16) &&
+ np->maxwide && tp->usrwide)
+ tp->ic_max_width = 1;
+ else
+ tp->ic_max_width = 0;
+
+
+ if ((tp->inq_byte7 & INQ7_SYNC) && tp->maxoffs)
+ tp->ic_min_sync = (tp->minsync < np->minsync) ?
+ np->minsync : tp->minsync;
+ else
+ tp->ic_min_sync = 255;
+
+ tp->period = 1;
+ tp->widedone = 1;
+
+ /*
+ * Enable PPR negotiation - only if Ultra3 support
+ * is accessible.
+ */
+
+#if 0
+ if (tp->ic_max_width && (tp->ic_min_sync != 255 ))
+ tp->ppr_negotiation = 1;
+#endif
+ tp->ppr_negotiation = 0;
+ if (np->features & FE_ULTRA3) {
+ if (tp->ic_max_width && (tp->ic_min_sync == 0x09))
+ tp->ppr_negotiation = 1;
+ }
+
+ if (!tp->ppr_negotiation)
+ cmd->ic_nego &= ~NS_PPR;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_IC) {
+ printk("%s: cmd->ic_nego %d, 1st byte 0x%2X\n",
+ ncr_name(np), cmd->ic_nego, cmd->cmnd[0]);
+ }
+
+ /* Previous command recorded a parity or an initiator
+ * detected error condition. Force bus to narrow for this
+ * target. Clear flag. Negotation on request sense.
+ * Note: kernel forces 2 bus resets :o( but clears itself out.
+ * Minor bug? in scsi_obsolete.c (ugly)
+ */
+ if (np->check_integ_par) {
+ printk("%s: Parity Error. Target set to narrow.\n",
+ ncr_name(np));
+ tp->ic_max_width = 0;
+ tp->widedone = tp->period = 0;
+ }
+
+ /* Initializing:
+ * If ic_nego == NS_PPR, we are in the initial test for
+ * PPR messaging support. If driver flag is clear, then
+ * either we don't support PPR nego (narrow or async device)
+ * or this is the second TUR and we have had a M. REJECT
+ * or unexpected disconnect on the first PPR negotiation.
+ * Do not negotiate, reset nego flags (in case a reset has
+ * occurred), clear ic_nego and return.
+ * General case: Kernel will clear flag on a fallback.
+ * Do only SDTR or WDTR in the future.
+ */
+ if (!tp->ppr_negotiation && (cmd->ic_nego == NS_PPR )) {
+ tp->ppr_negotiation = 0;
+ cmd->ic_nego &= ~NS_PPR;
+ tp->widedone = tp->period = 1;
+ return msglen;
+ }
+ else if (( tp->ppr_negotiation && !(cmd->ic_nego & NS_PPR )) ||
+ (!tp->ppr_negotiation && (cmd->ic_nego & NS_PPR )) ) {
+ tp->ppr_negotiation = 0;
+ cmd->ic_nego &= ~NS_PPR;
+ }
+
+ /*
+ * Always check the PPR nego. flag bit if ppr_negotiation
+ * is set. If the ic_nego PPR bit is clear,
+ * there must have been a fallback. Do only
+ * WDTR / SDTR in the future.
+ */
+ if ((tp->ppr_negotiation) && (!(cmd->ic_nego & NS_PPR)))
+ tp->ppr_negotiation = 0;
+
+ /* In case of a bus reset, ncr_negotiate will reset
+ * the flags tp->widedone and tp->period to 0, forcing
+ * a new negotiation. Do WDTR then SDTR. If PPR, do both.
+ * Do NOT increase the period. It is possible for the Scsi_Cmnd
+ * flags to be set to increase the period when a bus reset
+ * occurs - we don't want to change anything.
+ */
+
+ no_increase = 0;
+
+ if (tp->ppr_negotiation && (!tp->widedone) && (!tp->period) ) {
+ cmd->ic_nego = NS_PPR;
+ tp->widedone = tp->period = 1;
+ no_increase = 1;
+ }
+ else if (!tp->widedone) {
+ cmd->ic_nego = NS_WIDE;
+ tp->widedone = 1;
+ no_increase = 1;
+ }
+ else if (!tp->period) {
+ cmd->ic_nego = NS_SYNC;
+ tp->period = 1;
+ no_increase = 1;
+ }
+
+ new_width = cmd->ic_nego_width & tp->ic_max_width;
+
+ switch (cmd->ic_nego_sync) {
+ case 2: /* increase the period */
+ if (!no_increase) {
+ if (tp->ic_min_sync <= 0x09)
+ tp->ic_min_sync = 0x0A;
+ else if (tp->ic_min_sync <= 0x0A)
+ tp->ic_min_sync = 0x0C;
+ else if (tp->ic_min_sync <= 0x0C)
+ tp->ic_min_sync = 0x19;
+ else if (tp->ic_min_sync <= 0x19)
+ tp->ic_min_sync *= 2;
+ else {
+ tp->ic_min_sync = 255;
+ cmd->ic_nego_sync = 0;
+ tp->maxoffs = 0;
+ }
+ }
+ new_period = tp->maxoffs?tp->ic_min_sync:0;
+ new_offset = tp->maxoffs;
+ break;
+
+ case 1: /* nego. to maximum */
+ new_period = tp->maxoffs?tp->ic_min_sync:0;
+ new_offset = tp->maxoffs;
+ break;
+
+ case 0: /* nego to async */
+ default:
+ new_period = 0;
+ new_offset = 0;
+ break;
+ };
+
+
+ nego = NS_NOCHANGE;
+ if (tp->ppr_negotiation) {
+ u_char options_byte = 0;
+
+ /*
+ ** Must make sure data is consistent.
+ ** If period is 9 and sync, must be wide and DT bit set.
+ ** else period must be larger. If the width is 0,
+ ** reset bus to wide but increase the period to 0x0A.
+ ** Note: The strange else clause is due to the integrity check.
+ ** If fails at 0x09, wide, the I.C. code will redo at the same
+ ** speed but a narrow bus. The driver must take care of slowing
+ ** the bus speed down.
+ **
+ ** The maximum offset in ST mode is 31, in DT mode 62 (1010/1010_66 only)
+ */
+ if ( (new_period==0x09) && new_offset) {
+ if (new_width)
+ options_byte = 0x02;
+ else {
+ tp->ic_min_sync = 0x0A;
+ new_period = 0x0A;
+ cmd->ic_nego_width = 1;
+ new_width = 1;
+ new_offset &= 0x1f;
+ }
+ }
+ else if (new_period > 0x09)
+ new_offset &= 0x1f;
+
+ nego = NS_PPR;
+
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 6;
+ msgptr[msglen++] = M_X_PPR_REQ;
+ msgptr[msglen++] = new_period;
+ msgptr[msglen++] = 0;
+ msgptr[msglen++] = new_offset;
+ msgptr[msglen++] = new_width;
+ msgptr[msglen++] = options_byte;
+
+ }
+ else {
+ switch (cmd->ic_nego & ~NS_PPR) {
+ case NS_WIDE:
+ /*
+ ** WDTR negotiation on if device supports
+ ** wide or if wide device forced narrow
+ ** due to a parity error.
+ */
+
+ cmd->ic_nego_width &= tp->ic_max_width;
+
+ if (tp->ic_max_width | np->check_integ_par) {
+ nego = NS_WIDE;
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 2;
+ msgptr[msglen++] = M_X_WIDE_REQ;
+ msgptr[msglen++] = new_width;
+ }
+ break;
+
+ case NS_SYNC:
+ /*
+ ** negotiate synchronous transfers
+ ** Target must support sync transfers.
+ ** Min. period = 0x0A, maximum offset of 31=0x1f.
+ */
+
+ if (tp->inq_byte7 & INQ7_SYNC) {
+
+ if (new_offset && (new_period < 0x0A)) {
+ tp->ic_min_sync = 0x0A;
+ new_period = 0x0A;
+ }
+ nego = NS_SYNC;
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 3;
+ msgptr[msglen++] = M_X_SYNC_REQ;
+ msgptr[msglen++] = new_period;
+ msgptr[msglen++] = new_offset & 0x1f;
+ }
+ else
+ cmd->ic_nego_sync = 0;
+ break;
+
+ case NS_NOCHANGE:
+ break;
+ }
+ }
+
+ };
+
+ cp->nego_status = nego;
+ np->check_integ_par = 0;
+
+ if (nego) {
+ tp->nego_cp = cp;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, nego == NS_WIDE ?
+ "wide/narrow msgout":
+ (nego == NS_SYNC ? "sync/async msgout" : "ppr msgout"),
+ msgptr);
+ };
+ };
+
+ return msglen;
+}
+#endif /* SCSI_NCR_INTEGRITY_CHECKING */
+
+/*==========================================================
+**
+**
+** Prepare the next negotiation message if needed.
+**
+** Fill in the part of message buffer that contains the
+** negotiation and the nego_status field of the CCB.
+** Returns the size of the message in bytes.
+**
+**
+**==========================================================
+*/
+
+
+static int ncr_prepare_nego(ncb_p np, ccb_p cp, u_char *msgptr)
+{
+ tcb_p tp = &np->target[cp->target];
+ int msglen = 0;
+ int nego = 0;
+ u_char width, offset, factor, last_byte;
+
+ if (!np->check_integrity) {
+ /* If integrity checking disabled, enable PPR messaging
+ * if device supports wide, sync and ultra 3
+ */
+ if (tp->ppr_negotiation == 1) /* PPR message successful */
+ tp->ppr_negotiation = 2;
+
+ if ((tp->inq_done) && (!tp->ic_maximums_set)) {
+ tp->ic_maximums_set = 1;
+
+ /*
+ * Issue PPR only if board is capable
+ * and set-up for Ultra3 transfers.
+ */
+ tp->ppr_negotiation = 0;
+ if ( (np->features & FE_ULTRA3) &&
+ (tp->usrwide) && (tp->maxoffs) &&
+ (tp->minsync == 0x09) )
+ tp->ppr_negotiation = 1;
+ }
+ }
+
+ if (tp->inq_done) {
+ /*
+ * Get the current width, offset and period
+ */
+ ncr_get_xfer_info( np, tp, &factor,
+ &offset, &width);
+
+ /*
+ ** negotiate wide transfers ?
+ */
+
+ if (!tp->widedone) {
+ if (tp->inq_byte7 & INQ7_WIDE16) {
+ if (tp->ppr_negotiation)
+ nego = NS_PPR;
+ else
+ nego = NS_WIDE;
+
+ width = tp->usrwide;
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if (tp->ic_done)
+ width &= tp->ic_max_width;
+#endif
+ } else
+ tp->widedone=1;
+
+ };
+
+ /*
+ ** negotiate synchronous transfers?
+ */
+
+ if ((nego != NS_WIDE) && !tp->period) {
+ if (tp->inq_byte7 & INQ7_SYNC) {
+ if (tp->ppr_negotiation)
+ nego = NS_PPR;
+ else
+ nego = NS_SYNC;
+
+ /* Check for async flag */
+ if (tp->maxoffs == 0) {
+ offset = 0;
+ factor = 0;
+ }
+ else {
+ offset = tp->maxoffs;
+ factor = tp->minsync;
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if ((tp->ic_done) &&
+ (factor < tp->ic_min_sync))
+ factor = tp->ic_min_sync;
+#endif
+ }
+
+ } else {
+ offset = 0;
+ factor = 0;
+ tp->period =0xffff;
+ PRINT_TARGET(np, cp->target);
+ printk ("target did not report SYNC.\n");
+ };
+ };
+ };
+
+ switch (nego) {
+ case NS_PPR:
+ /*
+ ** Must make sure data is consistent.
+ ** If period is 9 and sync, must be wide and DT bit set
+ ** else period must be larger.
+ ** Maximum offset is 31=0x1f is ST mode, 62 if DT mode
+ */
+ last_byte = 0;
+ if ( (factor==9) && offset) {
+ if (!width) {
+ factor = 0x0A;
+ offset &= 0x1f;
+ }
+ else
+ last_byte = 0x02;
+ }
+ else if (factor > 0x09)
+ offset &= 0x1f;
+
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 6;
+ msgptr[msglen++] = M_X_PPR_REQ;
+ msgptr[msglen++] = factor;
+ msgptr[msglen++] = 0;
+ msgptr[msglen++] = offset;
+ msgptr[msglen++] = width;
+ msgptr[msglen++] = last_byte;
+ break;
+ case NS_SYNC:
+ /*
+ ** Never negotiate faster than Ultra 2 (25ns periods)
+ */
+ if (offset && (factor < 0x0A)) {
+ factor = 0x0A;
+ tp->minsync = 0x0A;
+ }
+
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 3;
+ msgptr[msglen++] = M_X_SYNC_REQ;
+ msgptr[msglen++] = factor;
+ msgptr[msglen++] = offset & 0x1f;
+ break;
+ case NS_WIDE:
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 2;
+ msgptr[msglen++] = M_X_WIDE_REQ;
+ msgptr[msglen++] = width;
+ break;
+ };
+
+ cp->nego_status = nego;
+
+ if (nego) {
+ tp->nego_cp = cp;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, nego == NS_WIDE ?
+ "wide msgout":
+ (nego == NS_SYNC ? "sync msgout" : "ppr msgout"),
+ msgptr);
+ };
+ };
+
+ return msglen;
+}
+
+/*==========================================================
+**
+**
+** Start execution of a SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_queue_command (ncb_p np, Scsi_Cmnd *cmd)
+{
+/* Scsi_Device *device = cmd->device; */
+ tcb_p tp = &np->target[cmd->target];
+ lcb_p lp = ncr_lp(np, tp, cmd->lun);
+ ccb_p cp;
+
+ u_char idmsg, *msgptr;
+ u_int msglen;
+ int direction;
+ u_int32 lastp, goalp;
+
+ /*---------------------------------------------
+ **
+ ** Some shortcuts ...
+ **
+ **---------------------------------------------
+ */
+ if ((cmd->target == np->myaddr ) ||
+ (cmd->target >= MAX_TARGET) ||
+ (cmd->lun >= MAX_LUN )) {
+ return(DID_BAD_TARGET);
+ }
+
+ /*---------------------------------------------
+ **
+ ** Complete the 1st TEST UNIT READY command
+ ** with error condition if the device is
+ ** flagged NOSCAN, in order to speed up
+ ** the boot.
+ **
+ **---------------------------------------------
+ */
+ if (cmd->cmnd[0] == 0 && (tp->usrflag & UF_NOSCAN)) {
+ tp->usrflag &= ~UF_NOSCAN;
+ return DID_BAD_TARGET;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_TINY) {
+ PRINT_ADDR(cmd);
+ printk ("CMD=%x ", cmd->cmnd[0]);
+ }
+
+ /*---------------------------------------------------
+ **
+ ** Assign a ccb / bind cmd.
+ ** If resetting, shorten settle_time if necessary
+ ** in order to avoid spurious timeouts.
+ ** If resetting or no free ccb,
+ ** insert cmd into the waiting list.
+ **
+ **----------------------------------------------------
+ */
+ if (np->settle_time && cmd->timeout_per_command >= HZ) {
+ u_long tlimit = ktime_get(cmd->timeout_per_command - HZ);
+ if (ktime_dif(np->settle_time, tlimit) > 0)
+ np->settle_time = tlimit;
+ }
+
+ if (np->settle_time || !(cp=ncr_get_ccb (np, cmd->target, cmd->lun))) {
+ insert_into_waiting_list(np, cmd);
+ return(DID_OK);
+ }
+ cp->cmd = cmd;
+
+ /*---------------------------------------------------
+ **
+ ** Enable tagged queue if asked by scsi ioctl
+ **
+ **----------------------------------------------------
+ */
+#if 0 /* This stuff was only usefull for linux-1.2.13 */
+ if (lp && !lp->numtags && cmd->device && cmd->device->tagged_queue) {
+ lp->numtags = tp->usrtags;
+ ncr_setup_tags (np, cp->target, cp->lun);
+ }
+#endif
+
+ /*----------------------------------------------------
+ **
+ ** Build the identify / tag / sdtr message
+ **
+ **----------------------------------------------------
+ */
+
+ idmsg = M_IDENTIFY | cp->lun;
+
+ if (cp ->tag != NO_TAG || (lp && !(tp->usrflag & UF_NODISC)))
+ idmsg |= 0x40;
+
+ msgptr = cp->scsi_smsg;
+ msglen = 0;
+ msgptr[msglen++] = idmsg;
+
+ if (cp->tag != NO_TAG) {
+ char order = np->order;
+
+ /*
+ ** Force ordered tag if necessary to avoid timeouts
+ ** and to preserve interactivity.
+ */
+ if (lp && ktime_exp(lp->tags_stime)) {
+ lp->tags_si = !(lp->tags_si);
+ if (lp->tags_sum[lp->tags_si]) {
+ order = M_ORDERED_TAG;
+ if ((DEBUG_FLAGS & DEBUG_TAGS)||bootverbose>0){
+ PRINT_ADDR(cmd);
+ printk("ordered tag forced.\n");
+ }
+ }
+ lp->tags_stime = ktime_get(3*HZ);
+ }
+
+ if (order == 0) {
+ /*
+ ** Ordered write ops, unordered read ops.
+ */
+ switch (cmd->cmnd[0]) {
+ case 0x08: /* READ_SMALL (6) */
+ case 0x28: /* READ_BIG (10) */
+ case 0xa8: /* READ_HUGE (12) */
+ order = M_SIMPLE_TAG;
+ break;
+ default:
+ order = M_ORDERED_TAG;
+ }
+ }
+ msgptr[msglen++] = order;
+ /*
+ ** For less than 128 tags, actual tags are numbered
+ ** 1,3,5,..2*MAXTAGS+1,since we may have to deal
+ ** with devices that have problems with #TAG 0 or too
+ ** great #TAG numbers. For more tags (up to 256),
+ ** we use directly our tag number.
+ */
+#if MAX_TASKS > (512/4)
+ msgptr[msglen++] = cp->tag;
+#else
+ msgptr[msglen++] = (cp->tag << 1) + 1;
+#endif
+ }
+
+ cp->host_flags = 0;
+
+ /*----------------------------------------------------
+ **
+ ** Build the data descriptors
+ **
+ **----------------------------------------------------
+ */
+
+ direction = scsi_data_direction(cmd);
+ if (direction != SCSI_DATA_NONE) {
+ cp->segments = np->scatter (np, cp, cp->cmd);
+ if (cp->segments < 0) {
+ ncr_free_ccb(np, cp);
+ return(DID_ERROR);
+ }
+ }
+ else {
+ cp->data_len = 0;
+ cp->segments = 0;
+ }
+
+ /*---------------------------------------------------
+ **
+ ** negotiation required?
+ **
+ ** (nego_status is filled by ncr_prepare_nego())
+ **
+ **---------------------------------------------------
+ */
+
+ cp->nego_status = 0;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if ((np->check_integrity && tp->ic_done) || !np->check_integrity) {
+ if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) {
+ msglen += ncr_prepare_nego (np, cp, msgptr + msglen);
+ }
+ }
+ else if (np->check_integrity && (cmd->ic_in_progress)) {
+ msglen += ncr_ic_nego (np, cp, cmd, msgptr + msglen);
+ }
+ else if (np->check_integrity && cmd->ic_complete) {
+ u_long current_period;
+ u_char current_offset, current_width, current_factor;
+
+ ncr_get_xfer_info (np, tp, &current_factor,
+ &current_offset, &current_width);
+
+ tp->ic_max_width = current_width;
+ tp->ic_min_sync = current_factor;
+
+ if (current_factor == 9) current_period = 125;
+ else if (current_factor == 10) current_period = 250;
+ else if (current_factor == 11) current_period = 303;
+ else if (current_factor == 12) current_period = 500;
+ else current_period = current_factor * 40;
+
+ /*
+ * Negotiation for this target is complete. Update flags.
+ */
+ tp->period = current_period;
+ tp->widedone = 1;
+ tp->ic_done = 1;
+
+ printk("%s: Integrity Check Complete: \n", ncr_name(np));
+
+ printk("%s: %s %s SCSI", ncr_name(np),
+ current_offset?"SYNC":"ASYNC",
+ tp->ic_max_width?"WIDE":"NARROW");
+ if (current_offset) {
+ u_long mbs = 10000 * (tp->ic_max_width + 1);
+
+ printk(" %d.%d MB/s",
+ (int) (mbs / current_period), (int) (mbs % current_period));
+
+ printk(" (%d ns, %d offset)\n",
+ (int) current_period/10, current_offset);
+ }
+ else
+ printk(" %d MB/s. \n ", (tp->ic_max_width+1)*5);
+ }
+#else
+ if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) {
+ msglen += ncr_prepare_nego (np, cp, msgptr + msglen);
+ }
+#endif /* SCSI_NCR_INTEGRITY_CHECKING */
+
+
+ /*----------------------------------------------------
+ **
+ ** Determine xfer direction.
+ **
+ **----------------------------------------------------
+ */
+ if (!cp->data_len)
+ direction = SCSI_DATA_NONE;
+
+ /*
+ ** If data direction is UNKNOWN, speculate DATA_READ
+ ** but prepare alternate pointers for WRITE in case
+ ** of our speculation will be just wrong.
+ ** SCRIPTS will swap values if needed.
+ */
+ switch(direction) {
+ case SCSI_DATA_UNKNOWN:
+ case SCSI_DATA_WRITE:
+ goalp = NCB_SCRIPT_PHYS (np, data_out2) + 8;
+ lastp = goalp - 8 - (cp->segments * (SCR_SG_SIZE*4));
+ if (direction != SCSI_DATA_UNKNOWN)
+ break;
+ cp->phys.header.wgoalp = cpu_to_scr(goalp);
+ cp->phys.header.wlastp = cpu_to_scr(lastp);
+ /* fall through */
+ case SCSI_DATA_READ:
+ cp->host_flags |= HF_DATA_IN;
+ goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8;
+ lastp = goalp - 8 - (cp->segments * (SCR_SG_SIZE*4));
+ break;
+ default:
+ case SCSI_DATA_NONE:
+ lastp = goalp = NCB_SCRIPTH_PHYS (np, no_data);
+ break;
+ }
+
+ /*
+ ** Set all pointers values needed by SCRIPTS.
+ ** If direction is unknown, start at data_io.
+ */
+ cp->phys.header.lastp = cpu_to_scr(lastp);
+ cp->phys.header.goalp = cpu_to_scr(goalp);
+
+ if (direction == SCSI_DATA_UNKNOWN)
+ cp->phys.header.savep =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, data_io));
+ else
+ cp->phys.header.savep= cpu_to_scr(lastp);
+
+ /*
+ ** Save the initial data pointer in order to be able
+ ** to redo the command.
+ ** We also have to save the initial lastp, since it
+ ** will be changed to DATA_IO if we don't know the data
+ ** direction and the device completes the command with
+ ** QUEUE FULL status (without entering the data phase).
+ */
+ cp->startp = cp->phys.header.savep;
+ cp->lastp0 = cp->phys.header.lastp;
+
+ /*----------------------------------------------------
+ **
+ ** fill in ccb
+ **
+ **----------------------------------------------------
+ **
+ **
+ ** physical -> virtual backlink
+ ** Generic SCSI command
+ */
+
+ /*
+ ** Startqueue
+ */
+ cp->phys.header.go.start = cpu_to_scr(NCB_SCRIPT_PHYS (np,select));
+ cp->phys.header.go.restart = cpu_to_scr(NCB_SCRIPT_PHYS (np,resel_dsa));
+ /*
+ ** select
+ */
+ cp->phys.select.sel_id = cp->target;
+ cp->phys.select.sel_scntl3 = tp->wval;
+ cp->phys.select.sel_sxfer = tp->sval;
+ cp->phys.select.sel_scntl4 = tp->uval;
+ /*
+ ** message
+ */
+ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg));
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ /*
+ ** command
+ */
+ memcpy(cp->cdb_buf, cmd->cmnd, MIN(cmd->cmd_len, sizeof(cp->cdb_buf)));
+ cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, cdb_buf[0]));
+ cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len);
+
+ /*
+ ** status
+ */
+ cp->actualquirks = tp->quirks;
+ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->xerr_status = 0;
+ cp->extra_bytes = 0;
+
+ /*
+ ** extreme data pointer.
+ ** shall be positive, so -1 is lower than lowest.:)
+ */
+ cp->ext_sg = -1;
+ cp->ext_ofs = 0;
+
+ /*----------------------------------------------------
+ **
+ ** Critical region: start this job.
+ **
+ **----------------------------------------------------
+ */
+
+ /*
+ ** activate this job.
+ */
+
+ /*
+ ** insert next CCBs into start queue.
+ ** 2 max at a time is enough to flush the CCB wait queue.
+ */
+ if (lp)
+ ncr_start_next_ccb(np, lp, 2);
+ else
+ ncr_put_start_queue(np, cp);
+
+ /*
+ ** Command is successfully queued.
+ */
+
+ return(DID_OK);
+}
+
+
+/*==========================================================
+**
+**
+** Insert a CCB into the start queue and wake up the
+** SCRIPTS processor.
+**
+**
+**==========================================================
+*/
+
+static void ncr_start_next_ccb(ncb_p np, lcb_p lp, int maxn)
+{
+ XPT_QUEHEAD *qp;
+ ccb_p cp;
+
+ while (maxn-- && lp->queuedccbs < lp->queuedepth) {
+ qp = xpt_remque_head(&lp->wait_ccbq);
+ if (!qp)
+ break;
+ ++lp->queuedccbs;
+ cp = xpt_que_entry(qp, struct ccb, link_ccbq);
+ xpt_insque_tail(qp, &lp->busy_ccbq);
+ lp->tasktbl[cp->tag == NO_TAG ? 0 : cp->tag] =
+ cpu_to_scr(cp->p_ccb);
+ ncr_put_start_queue(np, cp);
+ }
+}
+
+static void ncr_put_start_queue(ncb_p np, ccb_p cp)
+{
+ u_short qidx;
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If the previously queued CCB is not yet done,
+ ** set the IARB hint. The SCRIPTS will go with IARB
+ ** for this job when starting the previous one.
+ ** We leave devices a chance to win arbitration by
+ ** not using more than 'iarb_max' consecutive
+ ** immediate arbitrations.
+ */
+ if (np->last_cp && np->iarb_count < np->iarb_max) {
+ np->last_cp->host_flags |= HF_HINT_IARB;
+ ++np->iarb_count;
+ }
+ else
+ np->iarb_count = 0;
+ np->last_cp = cp;
+#endif
+
+ /*
+ ** insert into start queue.
+ */
+ qidx = np->squeueput + 2;
+ if (qidx >= MAX_START*2) qidx = 0;
+
+ np->squeue [qidx] = cpu_to_scr(np->p_idletask);
+ MEMORY_BARRIER();
+ np->squeue [np->squeueput] = cpu_to_scr(cp->p_ccb);
+
+ np->squeueput = qidx;
+ cp->queued = 1;
+
+ if (DEBUG_FLAGS & DEBUG_QUEUE)
+ printk ("%s: queuepos=%d.\n", ncr_name (np), np->squeueput);
+
+ /*
+ ** Script processor may be waiting for reselect.
+ ** Wake it up.
+ */
+ MEMORY_BARRIER();
+ OUTB (nc_istat, SIGP|np->istat_sem);
+}
+
+
+/*==========================================================
+**
+** Soft reset the chip.
+**
+** Some 896 and 876 chip revisions may hang-up if we set
+** the SRST (soft reset) bit at the wrong time when SCRIPTS
+** are running.
+** So, we need to abort the current operation prior to
+** soft resetting the chip.
+**
+**==========================================================
+*/
+
+static void ncr_chip_reset (ncb_p np)
+{
+ OUTB (nc_istat, SRST);
+ UDELAY (10);
+ OUTB (nc_istat, 0);
+}
+
+static void ncr_soft_reset(ncb_p np)
+{
+ u_char istat;
+ int i;
+
+ OUTB (nc_istat, CABRT);
+ for (i = 1000000 ; i ; --i) {
+ istat = INB (nc_istat);
+ if (istat & SIP) {
+ INW (nc_sist);
+ continue;
+ }
+ if (istat & DIP) {
+ OUTB (nc_istat, 0);
+ INB (nc_dstat);
+ break;
+ }
+ }
+ if (!i)
+ printk("%s: unable to abort current chip operation.\n",
+ ncr_name(np));
+ ncr_chip_reset(np);
+}
+
+/*==========================================================
+**
+**
+** Start reset process.
+** The interrupt handler will reinitialize the chip.
+** The timeout handler will wait for settle_time before
+** clearing it and so resuming command processing.
+**
+**
+**==========================================================
+*/
+static void ncr_start_reset(ncb_p np)
+{
+ (void) ncr_reset_scsi_bus(np, 1, driver_setup.settle_delay);
+}
+
+static int ncr_reset_scsi_bus(ncb_p np, int enab_int, int settle_delay)
+{
+ u_int32 term;
+ int retv = 0;
+
+ np->settle_time = ktime_get(settle_delay * HZ);
+
+ if (bootverbose > 1)
+ printk("%s: resetting, "
+ "command processing suspended for %d seconds\n",
+ ncr_name(np), settle_delay);
+
+ ncr_soft_reset(np); /* Soft reset the chip */
+ UDELAY (2000); /* The 895/6 need time for the bus mode to settle */
+ if (enab_int)
+ OUTW (nc_sien, RST);
+ /*
+ ** Enable Tolerant, reset IRQD if present and
+ ** properly set IRQ mode, prior to resetting the bus.
+ */
+ OUTB (nc_stest3, TE);
+ OUTB (nc_dcntl, (np->rv_dcntl & IRQM));
+ OUTB (nc_scntl1, CRST);
+ UDELAY (200);
+
+ if (!driver_setup.bus_check)
+ goto out;
+ /*
+ ** Check for no terminators or SCSI bus shorts to ground.
+ ** Read SCSI data bus, data parity bits and control signals.
+ ** We are expecting RESET to be TRUE and other signals to be
+ ** FALSE.
+ */
+ term = INB(nc_sstat0);
+ term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
+ term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */
+ ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */
+ ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */
+ INB(nc_sbcl); /* req ack bsy sel atn msg cd io */
+
+ if (!(np->features & FE_WIDE))
+ term &= 0x3ffff;
+
+ if (term != (2<<7)) {
+ printk("%s: suspicious SCSI data while resetting the BUS.\n",
+ ncr_name(np));
+ printk("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
+ "0x%lx, expecting 0x%lx\n",
+ ncr_name(np),
+ (np->features & FE_WIDE) ? "dp1,d15-8," : "",
+ (u_long)term, (u_long)(2<<7));
+ if (driver_setup.bus_check == 1)
+ retv = 1;
+ }
+out:
+ OUTB (nc_scntl1, 0);
+ return retv;
+}
+
+/*==========================================================
+**
+**
+** Reset the SCSI BUS.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_reset_bus (ncb_p np, Scsi_Cmnd *cmd, int sync_reset)
+{
+/* Scsi_Device *device = cmd->device; */
+ ccb_p cp;
+ int found;
+
+/*
+ * Return immediately if reset is in progress.
+ */
+ if (np->settle_time) {
+ return SCSI_RESET_PUNT;
+ }
+/*
+ * Start the reset process.
+ * The script processor is then assumed to be stopped.
+ * Commands will now be queued in the waiting list until a settle
+ * delay of 2 seconds will be completed.
+ */
+ ncr_start_reset(np);
+/*
+ * First, look in the wakeup list
+ */
+ for (found=0, cp=np->ccbc; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd) {
+ found = 1;
+ break;
+ }
+ }
+/*
+ * Then, look in the waiting list
+ */
+ if (!found && retrieve_from_waiting_list(0, np, cmd))
+ found = 1;
+/*
+ * Wake-up all awaiting commands with DID_RESET.
+ */
+ reset_waiting_list(np);
+/*
+ * Wake-up all pending commands with HS_RESET -> DID_RESET.
+ */
+ ncr_wakeup(np, HS_RESET);
+/*
+ * If the involved command was not in a driver queue, and the
+ * scsi driver told us reset is synchronous, and the command is not
+ * currently in the waiting list, complete it with DID_RESET status,
+ * in order to keep it alive.
+ */
+ if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
+ SetScsiResult(cmd, DID_RESET, 0);
+ ncr_queue_done_cmd(np, cmd);
+ }
+
+ return SCSI_RESET_SUCCESS;
+}
+
+/*==========================================================
+**
+**
+** Abort an SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_abort_command (ncb_p np, Scsi_Cmnd *cmd)
+{
+/* Scsi_Device *device = cmd->device; */
+ ccb_p cp;
+
+/*
+ * First, look for the scsi command in the waiting list
+ */
+ if (remove_from_waiting_list(np, cmd)) {
+ SetScsiAbortResult(cmd);
+ ncr_queue_done_cmd(np, cmd);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * Then, look in the wakeup list
+ */
+ for (cp=np->ccbc; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd)
+ break;
+ }
+
+ if (!cp) {
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ /*
+ ** Keep track we have to abort this job.
+ */
+ cp->to_abort = 1;
+
+ /*
+ ** Tell the SCRIPTS processor to stop
+ ** and synchronize with us.
+ */
+ np->istat_sem = SEM;
+
+ /*
+ ** If there are no requests, the script
+ ** processor will sleep on SEL_WAIT_RESEL.
+ ** Let's wake it up, since it may have to work.
+ */
+ OUTB (nc_istat, SIGP|SEM);
+
+ /*
+ ** Tell user we are working for him.
+ */
+ return SCSI_ABORT_PENDING;
+}
+
+/*==========================================================
+**
+** Linux release module stuff.
+**
+** Called before unloading the module
+** Detach the host.
+** We have to free resources and halt the NCR chip
+**
+**==========================================================
+*/
+
+#ifdef MODULE
+static int ncr_detach(ncb_p np)
+{
+ int i;
+
+ printk("%s: detaching ...\n", ncr_name(np));
+
+/*
+** Stop the ncr_timeout process
+** Set release_stage to 1 and wait that ncr_timeout() set it to 2.
+*/
+ np->release_stage = 1;
+ for (i = 50 ; i && np->release_stage != 2 ; i--) MDELAY (100);
+ if (np->release_stage != 2)
+ printk("%s: the timer seems to be already stopped\n",
+ ncr_name(np));
+ else np->release_stage = 2;
+
+/*
+** Reset NCR chip.
+** We should use ncr_soft_reset(), but we donnot want to do
+** so, since we may not be safe if interrupts occur.
+*/
+
+ printk("%s: resetting chip\n", ncr_name(np));
+ ncr_chip_reset(np);
+
+/*
+** Restore bios setting for automatic clock detection.
+*/
+ OUTB(nc_dmode, np->sv_dmode);
+ OUTB(nc_dcntl, np->sv_dcntl);
+ OUTB(nc_ctest3, np->sv_ctest3);
+ OUTB(nc_ctest4, np->sv_ctest4);
+ OUTB(nc_ctest5, np->sv_ctest5);
+ OUTB(nc_gpcntl, np->sv_gpcntl);
+ OUTB(nc_stest2, np->sv_stest2);
+
+ ncr_selectclock(np, np->sv_scntl3);
+/*
+** Free host resources
+*/
+ ncr_free_resources(np);
+
+ return 1;
+}
+#endif
+
+/*==========================================================
+**
+**
+** Complete execution of a SCSI command.
+** Signal completion to the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+
+void ncr_complete (ncb_p np, ccb_p cp)
+{
+ Scsi_Cmnd *cmd;
+ tcb_p tp;
+ lcb_p lp;
+
+ /*
+ ** Sanity check
+ */
+ if (!cp || !cp->cmd)
+ return;
+
+ /*
+ ** Print some debugging info.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printk ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp,
+ cp->host_status,cp->scsi_status);
+
+ /*
+ ** Get command, target and lun pointers.
+ */
+
+ cmd = cp->cmd;
+ cp->cmd = NULL;
+ tp = &np->target[cp->target];
+ lp = ncr_lp(np, tp, cp->lun);
+
+ /*
+ ** We donnot queue more than 1 ccb per target
+ ** with negotiation at any time. If this ccb was
+ ** used for negotiation, clear this info in the tcb.
+ */
+
+ if (cp == tp->nego_cp)
+ tp->nego_cp = 0;
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** We just complete the last queued CCB.
+ ** Clear this info that is no more relevant.
+ */
+ if (cp == np->last_cp)
+ np->last_cp = 0;
+#endif
+
+ /*
+ ** If auto-sense performed, change scsi status,
+ ** Otherwise, compute the residual.
+ */
+ if (cp->host_flags & HF_AUTO_SENSE) {
+ cp->scsi_status = cp->sv_scsi_status;
+ cp->xerr_status = cp->sv_xerr_status;
+ }
+ else {
+ cp->resid = 0;
+ if (cp->xerr_status ||
+ cp->phys.header.lastp != cp->phys.header.goalp)
+ cp->resid = ncr_compute_residual(np, cp);
+ }
+
+ /*
+ ** Check for extended errors.
+ */
+
+ if (cp->xerr_status) {
+ if (cp->xerr_status & XE_PARITY_ERR) {
+ PRINT_ADDR(cmd);
+ printk ("unrecovered SCSI parity error.\n");
+ }
+ if (cp->xerr_status & XE_EXTRA_DATA) {
+ PRINT_ADDR(cmd);
+ printk ("extraneous data discarded.\n");
+ }
+ if (cp->xerr_status & XE_BAD_PHASE) {
+ PRINT_ADDR(cmd);
+ printk ("illegal scsi phase (4/5).\n");
+ }
+ if (cp->xerr_status & XE_SODL_UNRUN) {
+ PRINT_ADDR(cmd);
+ printk ("ODD transfer in DATA OUT phase.\n");
+ }
+ if (cp->xerr_status & XE_SWIDE_OVRUN){
+ PRINT_ADDR(cmd);
+ printk ("ODD transfer in DATA IN phase.\n");
+ }
+
+ if (cp->host_status==HS_COMPLETE)
+ cp->host_status = HS_FAIL;
+ }
+
+ /*
+ ** Print out any error for debugging purpose.
+ */
+ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
+ if (cp->host_status!=HS_COMPLETE || cp->scsi_status!=S_GOOD ||
+ cp->resid) {
+ PRINT_ADDR(cmd);
+ printk ("ERROR: cmd=%x host_status=%x scsi_status=%x "
+ "data_len=%d residual=%d\n",
+ cmd->cmnd[0], cp->host_status, cp->scsi_status,
+ cp->data_len, cp->resid);
+ }
+ }
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,99)
+ /*
+ ** Move residual byte count to user structure.
+ */
+ cmd->resid = cp->resid;
+#endif
+ /*
+ ** Check the status.
+ */
+ if ( (cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_GOOD ||
+ cp->scsi_status == S_COND_MET)) {
+ /*
+ ** All went well (GOOD status).
+ ** CONDITION MET status is returned on
+ ** `Pre-Fetch' or `Search data' success.
+ */
+ SetScsiResult(cmd, DID_OK, cp->scsi_status);
+
+ /*
+ ** Allocate the lcb if not yet.
+ */
+ if (!lp)
+ ncr_alloc_lcb (np, cp->target, cp->lun);
+
+ /*
+ ** On standard INQUIRY response (EVPD and CmDt
+ ** not set), setup logical unit according to
+ ** announced capabilities (we need the 1rst 7 bytes).
+ */
+ if (cmd->cmnd[0] == 0x12 && !(cmd->cmnd[1] & 0x3) &&
+ cmd->cmnd[4] >= 7 && !cmd->use_sg) {
+ sync_scsi_data(np, cmd); /* SYNC the data */
+ ncr_setup_lcb (np, cp->target, cp->lun,
+ (char *) cmd->request_buffer);
+ }
+
+ /*
+ ** If tags was reduced due to queue full,
+ ** increase tags if 1000 good status received.
+ */
+ if (lp && lp->usetags && lp->numtags < lp->maxtags) {
+ ++lp->num_good;
+ if (lp->num_good >= 1000) {
+ lp->num_good = 0;
+ ++lp->numtags;
+ ncr_setup_tags (np, cp->target, cp->lun);
+ }
+ }
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_CHECK_COND)) {
+ /*
+ ** Check condition code
+ */
+ SetScsiResult(cmd, DID_OK, S_CHECK_COND);
+
+ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
+ PRINT_ADDR(cmd);
+ ncr_printl_hex("sense data:", cmd->sense_buffer, 14);
+ }
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_CONFLICT)) {
+ /*
+ ** Reservation Conflict condition code
+ */
+ SetScsiResult(cmd, DID_OK, S_CONFLICT);
+
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_BUSY ||
+ cp->scsi_status == S_QUEUE_FULL)) {
+
+ /*
+ ** Target is busy.
+ */
+ SetScsiResult(cmd, DID_OK, cp->scsi_status);
+
+ } else if ((cp->host_status == HS_SEL_TIMEOUT)
+ || (cp->host_status == HS_TIMEOUT)) {
+
+ /*
+ ** No response
+ */
+ SetScsiResult(cmd, DID_TIME_OUT, cp->scsi_status);
+
+ } else if (cp->host_status == HS_RESET) {
+
+ /*
+ ** SCSI bus reset
+ */
+ SetScsiResult(cmd, DID_RESET, cp->scsi_status);
+
+ } else if (cp->host_status == HS_ABORTED) {
+
+ /*
+ ** Transfer aborted
+ */
+ SetScsiAbortResult(cmd);
+
+ } else {
+ int did_status;
+
+ /*
+ ** Other protocol messes
+ */
+ PRINT_ADDR(cmd);
+ printk ("COMMAND FAILED (%x %x) @%p.\n",
+ cp->host_status, cp->scsi_status, cp);
+
+ did_status = DID_ERROR;
+ if (cp->xerr_status & XE_PARITY_ERR)
+ did_status = DID_PARITY;
+
+ SetScsiResult(cmd, did_status, cp->scsi_status);
+ }
+
+ /*
+ ** trace output
+ */
+
+ if (tp->usrflag & UF_TRACE) {
+ PRINT_ADDR(cmd);
+ printk (" CMD:");
+ ncr_print_hex(cmd->cmnd, cmd->cmd_len);
+
+ if (cp->host_status==HS_COMPLETE) {
+ switch (cp->scsi_status) {
+ case S_GOOD:
+ printk (" GOOD");
+ break;
+ case S_CHECK_COND:
+ printk (" SENSE:");
+ ncr_print_hex(cmd->sense_buffer, 14);
+ break;
+ default:
+ printk (" STAT: %x\n", cp->scsi_status);
+ break;
+ }
+ } else printk (" HOSTERROR: %x", cp->host_status);
+ printk ("\n");
+ }
+
+ /*
+ ** Free this ccb
+ */
+ ncr_free_ccb (np, cp);
+
+ /*
+ ** requeue awaiting scsi commands for this lun.
+ */
+ if (lp && lp->queuedccbs < lp->queuedepth &&
+ !xpt_que_empty(&lp->wait_ccbq))
+ ncr_start_next_ccb(np, lp, 2);
+
+ /*
+ ** requeue awaiting scsi commands for this controller.
+ */
+ if (np->waiting_list)
+ requeue_waiting_list(np);
+
+ /*
+ ** signal completion to generic driver.
+ */
+ ncr_queue_done_cmd(np, cmd);
+}
+
+/*==========================================================
+**
+**
+** Signal all (or one) control block done.
+**
+**
+**==========================================================
+*/
+
+/*
+** The NCR has completed CCBs.
+** Look at the DONE QUEUE.
+**
+** On architectures that may reorder LOAD/STORE operations,
+** a memory barrier may be needed after the reading of the
+** so-called `flag' and prior to dealing with the data.
+*/
+int ncr_wakeup_done (ncb_p np)
+{
+ ccb_p cp;
+ int i, n;
+ u_long dsa;
+
+ n = 0;
+ i = np->dqueueget;
+ while (1) {
+ dsa = scr_to_cpu(np->dqueue[i]);
+ if (!dsa)
+ break;
+ np->dqueue[i] = 0;
+ if ((i = i+2) >= MAX_START*2)
+ i = 0;
+
+ cp = ncr_ccb_from_dsa(np, dsa);
+ if (cp) {
+ MEMORY_BARRIER();
+ ncr_complete (np, cp);
+ ++n;
+ }
+ else
+ printk (KERN_ERR "%s: bad DSA (%lx) in done queue.\n",
+ ncr_name(np), dsa);
+ }
+ np->dqueueget = i;
+
+ return n;
+}
+
+/*
+** Complete all active CCBs.
+*/
+void ncr_wakeup (ncb_p np, u_long code)
+{
+ ccb_p cp = np->ccbc;
+
+ while (cp) {
+ if (cp->host_status != HS_IDLE) {
+ cp->host_status = code;
+ ncr_complete (np, cp);
+ }
+ cp = cp->link_ccb;
+ }
+}
+
+/*==========================================================
+**
+**
+** Start NCR chip.
+**
+**
+**==========================================================
+*/
+
+void ncr_init (ncb_p np, int reset, char * msg, u_long code)
+{
+ int i;
+ u_long phys;
+
+ /*
+ ** Reset chip if asked, otherwise just clear fifos.
+ */
+
+ if (reset)
+ ncr_soft_reset(np);
+ else {
+ OUTB (nc_stest3, TE|CSF);
+ OUTONB (nc_ctest3, CLF);
+ }
+
+ /*
+ ** Message.
+ */
+
+ if (msg) printk (KERN_INFO "%s: restart (%s).\n", ncr_name (np), msg);
+
+ /*
+ ** Clear Start Queue
+ */
+ phys = np->p_squeue;
+ np->queuedepth = MAX_START - 1; /* 1 entry needed as end marker */
+ for (i = 0; i < MAX_START*2; i += 2) {
+ np->squeue[i] = cpu_to_scr(np->p_idletask);
+ np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
+ }
+ np->squeue[MAX_START*2-1] = cpu_to_scr(phys);
+
+
+ /*
+ ** Start at first entry.
+ */
+ np->squeueput = 0;
+ np->scripth0->startpos[0] = cpu_to_scr(phys);
+
+ /*
+ ** Clear Done Queue
+ */
+ phys = vtobus(np->dqueue);
+ for (i = 0; i < MAX_START*2; i += 2) {
+ np->dqueue[i] = 0;
+ np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
+ }
+ np->dqueue[MAX_START*2-1] = cpu_to_scr(phys);
+
+ /*
+ ** Start at first entry.
+ */
+ np->scripth0->done_pos[0] = cpu_to_scr(phys);
+ np->dqueueget = 0;
+
+ /*
+ ** Wakeup all pending jobs.
+ */
+ ncr_wakeup (np, code);
+
+ /*
+ ** Init chip.
+ */
+
+ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */
+ UDELAY (2000); /* The 895 needs time for the bus mode to settle */
+
+ OUTB (nc_scntl0, np->rv_scntl0 | 0xc0);
+ /* full arb., ena parity, par->ATN */
+ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
+
+ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
+
+ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
+ OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */
+ OUTB (nc_istat , SIGP ); /* Signal Process */
+ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */
+ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
+
+ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
+ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */
+ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66)){
+ OUTB (nc_stest2, EXT|np->rv_stest2);
+ /* Extended Sreq/Sack filtering, not supported in C1010/C1010_66 */
+ }
+ OUTB (nc_stest3, TE); /* TolerANT enable */
+ OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */
+
+ /*
+ ** DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
+ ** Disable overlapped arbitration for all dual-function
+ ** devices, regardless revision id.
+ ** We may consider it is a post-chip-design feature. ;-)
+ **
+ ** Errata applies to all 896 and 1010 parts.
+ */
+ if (np->device_id == PCI_DEVICE_ID_NCR_53C875)
+ OUTB (nc_ctest0, (1<<5));
+ else if (np->device_id == PCI_DEVICE_ID_NCR_53C896 ||
+ np->device_id == PCI_DEVICE_ID_LSI_53C1010 ||
+ np->device_id == PCI_DEVICE_ID_LSI_53C1010_66 )
+ np->rv_ccntl0 |= DPR;
+
+ /*
+ ** C1010_66MHz rev 0 part requies AIPCNTL1 bit 3 to be set.
+ */
+ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)
+ OUTB(nc_aipcntl1, (1<<3));
+
+ /*
+ ** If 64 bit (895A/896/1010/1010_66) write the CCNTL1 register to
+ ** enable 40 bit address table indirect addressing for MOVE.
+ ** Also write CCNTL0 if 64 bit chip, since this register seems
+ ** to only be used by 64 bit cores.
+ */
+ if (np->features & FE_64BIT) {
+ OUTB (nc_ccntl0, np->rv_ccntl0);
+ OUTB (nc_ccntl1, np->rv_ccntl1);
+ }
+
+ /*
+ ** If phase mismatch handled by scripts (53C895A or 53C896
+ ** or 53C1010 or 53C1010_66), set PM jump addresses.
+ */
+
+ if (np->features & FE_NOPM) {
+ printk(KERN_INFO "%s: handling phase mismatch from SCRIPTS.\n",
+ ncr_name(np));
+ OUTL (nc_pmjad1, NCB_SCRIPTH_PHYS (np, pm_handle));
+ OUTL (nc_pmjad2, NCB_SCRIPTH_PHYS (np, pm_handle));
+ }
+
+ /*
+ ** Enable GPIO0 pin for writing if LED support from SCRIPTS.
+ ** Also set GPIO5 and clear GPIO6 if hardware LED control.
+ */
+
+ if (np->features & FE_LED0)
+ OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01);
+ else if (np->features & FE_LEDC)
+ OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20);
+
+
+ /*
+ ** enable ints
+ */
+
+ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
+ OUTB (nc_dien , MDPE|BF|SSI|SIR|IID);
+
+ /*
+ ** For 895/895A/896/c1010
+ ** Enable SBMC interrupt and save current SCSI bus mode.
+ */
+ if ( (np->features & FE_ULTRA2) || (np->features & FE_ULTRA3) ) {
+ OUTONW (nc_sien, SBMC);
+ np->scsi_mode = INB (nc_stest4) & SMODE;
+ }
+
+ /*
+ ** Fill in target structure.
+ ** Reinitialize usrsync.
+ ** Reinitialize usrwide.
+ ** Prepare sync negotiation according to actual SCSI bus mode.
+ */
+
+ for (i=0;i<MAX_TARGET;i++) {
+ tcb_p tp = &np->target[i];
+
+ tp->to_reset = 0;
+
+ tp->sval = 0;
+ tp->wval = np->rv_scntl3;
+ tp->uval = np->rv_scntl4;
+
+ if (tp->usrsync != 255) {
+ if (tp->usrsync <= np->maxsync) {
+ if (tp->usrsync < np->minsync) {
+ tp->usrsync = np->minsync;
+ }
+ }
+ else
+ tp->usrsync = 255;
+ };
+
+ if (tp->usrwide > np->maxwide)
+ tp->usrwide = np->maxwide;
+
+ ncr_negotiate (np, tp);
+ }
+
+ /*
+ ** Download SCSI SCRIPTS to on-chip RAM if present,
+ ** and start script processor.
+ ** We do the download preferently from the CPU.
+ ** For platforms that may not support PCI memory mapping,
+ ** we use a simple SCRIPTS that performs MEMORY MOVEs.
+ */
+ if (np->base2_ba) {
+ if (bootverbose)
+ printk ("%s: Downloading SCSI SCRIPTS.\n",
+ ncr_name(np));
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ if (np->base2_ws == 8192)
+ phys = NCB_SCRIPTH0_PHYS (np, start_ram64);
+ else
+ phys = NCB_SCRIPTH_PHYS (np, start_ram);
+#else
+ if (np->base2_ws == 8192) {
+ memcpy_to_pci(np->base2_va + 4096,
+ np->scripth0, sizeof(struct scripth));
+ OUTL (nc_mmws, np->scr_ram_seg);
+ OUTL (nc_mmrs, np->scr_ram_seg);
+ OUTL (nc_sfs, np->scr_ram_seg);
+ phys = NCB_SCRIPTH_PHYS (np, start64);
+ }
+ else
+ phys = NCB_SCRIPT_PHYS (np, init);
+ memcpy_to_pci(np->base2_va, np->script0, sizeof(struct script));
+#endif /* SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+ }
+ else
+ phys = NCB_SCRIPT_PHYS (np, init);
+
+ np->istat_sem = 0;
+
+ OUTL (nc_dsa, np->p_ncb);
+ OUTL_DSP (phys);
+}
+
+/*==========================================================
+**
+** Prepare the negotiation values for wide and
+** synchronous transfers.
+**
+**==========================================================
+*/
+
+static void ncr_negotiate (struct ncb* np, struct tcb* tp)
+{
+ /*
+ ** minsync unit is 4ns !
+ */
+
+ u_long minsync = tp->usrsync;
+
+ /*
+ ** SCSI bus mode limit
+ */
+
+ if (np->scsi_mode && np->scsi_mode == SMODE_SE) {
+ if (minsync < 12) minsync = 12;
+ }
+
+ /*
+ ** our limit ..
+ */
+
+ if (minsync < np->minsync)
+ minsync = np->minsync;
+
+ /*
+ ** divider limit
+ */
+
+ if (minsync > np->maxsync)
+ minsync = 255;
+
+ tp->minsync = minsync;
+ tp->maxoffs = (minsync<255 ? np->maxoffs : 0);
+
+ /*
+ ** period=0: has to negotiate sync transfer
+ */
+
+ tp->period=0;
+
+ /*
+ ** widedone=0: has to negotiate wide transfer
+ */
+ tp->widedone=0;
+}
+
+/*==========================================================
+**
+** Get clock factor and sync divisor for a given
+** synchronous factor period.
+** Returns the clock factor (in sxfer) and scntl3
+** synchronous divisor field.
+**
+**==========================================================
+*/
+
+static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p)
+{
+ u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */
+ int div = np->clock_divn; /* Number of divisors supported */
+ u_long fak; /* Sync factor in sxfer */
+ u_long per; /* Period in tenths of ns */
+ u_long kpc; /* (per * clk) */
+
+ /*
+ ** Compute the synchronous period in tenths of nano-seconds
+ ** from sfac.
+ **
+ ** Note, if sfac == 9, DT is being used. Double the period of 125
+ ** to 250.
+ */
+ if (sfac <= 10) per = 250;
+ else if (sfac == 11) per = 303;
+ else if (sfac == 12) per = 500;
+ else per = 40 * sfac;
+
+ /*
+ ** Look for the greatest clock divisor that allows an
+ ** input speed faster than the period.
+ */
+ kpc = per * clk;
+ while (--div >= 0)
+ if (kpc >= (div_10M[div] << 2)) break;
+
+ /*
+ ** Calculate the lowest clock factor that allows an output
+ ** speed not faster than the period.
+ */
+ fak = (kpc - 1) / div_10M[div] + 1;
+
+#if 0 /* This optimization does not seem very usefull */
+
+ per = (fak * div_10M[div]) / clk;
+
+ /*
+ ** Why not to try the immediate lower divisor and to choose
+ ** the one that allows the fastest output speed ?
+ ** We dont want input speed too much greater than output speed.
+ */
+ if (div >= 1 && fak < 8) {
+ u_long fak2, per2;
+ fak2 = (kpc - 1) / div_10M[div-1] + 1;
+ per2 = (fak2 * div_10M[div-1]) / clk;
+ if (per2 < per && fak2 <= 8) {
+ fak = fak2;
+ per = per2;
+ --div;
+ }
+ }
+#endif
+
+ if (fak < 4) fak = 4; /* Should never happen, too bad ... */
+
+ /*
+ ** Compute and return sync parameters for the ncr
+ */
+ *fakp = fak - 4;
+
+ /*
+ ** If sfac < 25, and 8xx parts, desire that the chip operate at
+ ** least at Ultra speeds. Must set bit 7 of scntl3.
+ ** For C1010, do not set this bit. If operating at Ultra3 speeds,
+ ** set the U3EN bit instead.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ *scntl3p = (div+1) << 4;
+ *fakp = 0;
+ }
+ else {
+ *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0);
+ *fakp = fak - 4;
+ }
+}
+
+/*==========================================================
+**
+** Utility routine to return the current bus width
+** synchronous period and offset.
+** Utilizes target sval, wval and uval
+**
+**==========================================================
+*/
+static void ncr_get_xfer_info(ncb_p np, tcb_p tp, u_char *factor,
+ u_char *offset, u_char *width)
+{
+
+ u_char idiv;
+ u_long period;
+
+ *width = (tp->wval & EWS) ? 1 : 0;
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ *offset = (tp->sval & 0x3f);
+ else
+ *offset = (tp->sval & 0x1f);
+
+ /*
+ * Midlayer signal to the driver that all of the scsi commands
+ * for the integrity check have completed. Save the negotiated
+ * parameters (extracted from sval, wval and uval).
+ * See ncr_setsync for alg. details.
+ */
+
+ idiv = (tp->wval>>4) & 0x07;
+
+ if ( *offset && idiv ) {
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)){
+ if (tp->uval & 0x80)
+ period = (2*div_10M[idiv-1])/np->clock_khz;
+ else
+ period = (4*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ period = (((tp->sval>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ period = 0xffff;
+
+ if (period <= 125) *factor = 9;
+ else if (period <= 250) *factor = 10;
+ else if (period <= 303) *factor = 11;
+ else if (period <= 500) *factor = 12;
+ else *factor = (period + 40 - 1) / 40;
+
+}
+
+
+/*==========================================================
+**
+** Set actual values, sync status and patch all ccbs of
+** a target according to new sync/wide agreement.
+**
+**==========================================================
+*/
+
+static void ncr_set_sync_wide_status (ncb_p np, u_char target)
+{
+ ccb_p cp = np->ccbc;
+ tcb_p tp = &np->target[target];
+
+ /*
+ ** set actual value and sync_status
+ **
+ ** TEMP register contains current scripts address
+ ** which is data type/direction/dependent.
+ */
+ OUTB (nc_sxfer, tp->sval);
+ OUTB (nc_scntl3, tp->wval);
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ OUTB (nc_scntl4, tp->uval);
+
+ /*
+ ** patch ALL ccbs of this target.
+ */
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status == HS_IDLE)
+ continue;
+ if (cp->target != target)
+ continue;
+ cp->phys.select.sel_scntl3 = tp->wval;
+ cp->phys.select.sel_sxfer = tp->sval;
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ cp->phys.select.sel_scntl4 = tp->uval;
+ };
+}
+
+/*==========================================================
+**
+** Switch sync mode for current job and it's target
+**
+**==========================================================
+*/
+
+static void ncr_setsync (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer,
+ u_char scntl4)
+{
+ tcb_p tp;
+ u_char target = INB (nc_sdid) & 0x0f;
+ u_char idiv;
+ u_char offset;
+
+ assert (cp);
+ if (!cp) return;
+
+ assert (target == (cp->target & 0xf));
+
+ tp = &np->target[target];
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ offset = sxfer & 0x3f; /* bits 5-0 */
+ scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS);
+ scntl4 = (scntl4 & 0x80);
+ }
+ else {
+ offset = sxfer & 0x1f; /* bits 4-0 */
+ if (!scntl3 || !offset)
+ scntl3 = np->rv_scntl3;
+
+ scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS) |
+ (np->rv_scntl3 & 0x07);
+ }
+
+
+ /*
+ ** Deduce the value of controller sync period from scntl3.
+ ** period is in tenths of nano-seconds.
+ */
+
+ idiv = ((scntl3 >> 4) & 0x7);
+ if ( offset && idiv) {
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ /* Note: If extra data hold clocks are used,
+ * the formulas below must be modified.
+ * When scntl4 == 0, ST mode.
+ */
+ if (scntl4 & 0x80)
+ tp->period = (2*div_10M[idiv-1])/np->clock_khz;
+ else
+ tp->period = (4*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = 0xffff;
+
+
+ /*
+ ** Stop there if sync parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3 && tp->uval == scntl4) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+ tp->uval = scntl4;
+
+ /*
+ ** Bells and whistles ;-)
+ ** Donnot announce negotiations due to auto-sense,
+ ** unless user really want us to be verbose. :)
+ */
+ if ( bootverbose < 2 && (cp->host_flags & HF_AUTO_SENSE))
+ goto next;
+ PRINT_TARGET(np, target);
+ if (offset) {
+ unsigned f10 = 100000 << (tp->widedone ? tp->widedone -1 : 0);
+ unsigned mb10 = (f10 + tp->period/2) / tp->period;
+ char *scsi;
+
+ /*
+ ** Disable extended Sreq/Sack filtering
+ */
+ if ((tp->period <= 2000) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ OUTOFFB (nc_stest2, EXT);
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (tp->period < 250) scsi = "FAST-80";
+ else if (tp->period < 500) scsi = "FAST-40";
+ else if (tp->period < 1000) scsi = "FAST-20";
+ else if (tp->period < 2000) scsi = "FAST-10";
+ else scsi = "FAST-5";
+
+ printk ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi,
+ tp->widedone > 1 ? "WIDE " : "",
+ mb10 / 10, mb10 % 10, tp->period / 10, offset);
+ } else
+ printk ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : "");
+next:
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+
+/*==========================================================
+**
+** Switch wide mode for current job and it's target
+** SCSI specs say: a SCSI device that accepts a WDTR
+** message shall reset the synchronous agreement to
+** asynchronous mode.
+**
+**==========================================================
+*/
+
+static void ncr_setwide (ncb_p np, ccb_p cp, u_char wide, u_char ack)
+{
+ u_short target = INB (nc_sdid) & 0x0f;
+ tcb_p tp;
+ u_char scntl3;
+ u_char sxfer;
+
+ assert (cp);
+ if (!cp) return;
+
+ assert (target == (cp->target & 0xf));
+
+ tp = &np->target[target];
+ tp->widedone = wide+1;
+ scntl3 = (tp->wval & (~EWS)) | (wide ? EWS : 0);
+
+ sxfer = ack ? 0 : tp->sval;
+
+ /*
+ ** Stop there if sync/wide parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (bootverbose >= 2) {
+ PRINT_TARGET(np, target);
+ if (scntl3 & EWS)
+ printk ("WIDE SCSI (16 bit) enabled.\n");
+ else
+ printk ("WIDE SCSI disabled.\n");
+ }
+
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+
+/*==========================================================
+**
+** Switch sync/wide mode for current job and it's target
+** PPR negotiations only
+**
+**==========================================================
+*/
+
+static void ncr_setsyncwide (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer,
+ u_char scntl4, u_char wide)
+{
+ tcb_p tp;
+ u_char target = INB (nc_sdid) & 0x0f;
+ u_char idiv;
+ u_char offset;
+
+ assert (cp);
+ if (!cp) return;
+
+ assert (target == (cp->target & 0xf));
+
+ tp = &np->target[target];
+ tp->widedone = wide+1;
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ offset = sxfer & 0x3f; /* bits 5-0 */
+ scntl3 = (scntl3 & 0xf0) | (wide ? EWS : 0);
+ scntl4 = (scntl4 & 0x80);
+ }
+ else {
+ offset = sxfer & 0x1f; /* bits 4-0 */
+ if (!scntl3 || !offset)
+ scntl3 = np->rv_scntl3;
+
+ scntl3 = (scntl3 & 0xf0) | (wide ? EWS : 0) |
+ (np->rv_scntl3 & 0x07);
+ }
+
+
+ /*
+ ** Deduce the value of controller sync period from scntl3.
+ ** period is in tenths of nano-seconds.
+ */
+
+ idiv = ((scntl3 >> 4) & 0x7);
+ if ( offset && idiv) {
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ /* Note: If extra data hold clocks are used,
+ * the formulas below must be modified.
+ * When scntl4 == 0, ST mode.
+ */
+ if (scntl4 & 0x80)
+ tp->period = (2*div_10M[idiv-1])/np->clock_khz;
+ else
+ tp->period = (4*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = 0xffff;
+
+
+ /*
+ ** Stop there if sync parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3 && tp->uval == scntl4) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+ tp->uval = scntl4;
+
+ /*
+ ** Bells and whistles ;-)
+ ** Donnot announce negotiations due to auto-sense,
+ ** unless user really want us to be verbose. :)
+ */
+ if ( bootverbose < 2 && (cp->host_flags & HF_AUTO_SENSE))
+ goto next;
+ PRINT_TARGET(np, target);
+ if (offset) {
+ unsigned f10 = 100000 << (tp->widedone ? tp->widedone -1 : 0);
+ unsigned mb10 = (f10 + tp->period/2) / tp->period;
+ char *scsi;
+
+ /*
+ ** Disable extended Sreq/Sack filtering
+ */
+ if ((tp->period <= 2000) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ OUTOFFB (nc_stest2, EXT);
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (tp->period < 250) scsi = "FAST-80";
+ else if (tp->period < 500) scsi = "FAST-40";
+ else if (tp->period < 1000) scsi = "FAST-20";
+ else if (tp->period < 2000) scsi = "FAST-10";
+ else scsi = "FAST-5";
+
+ printk ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi,
+ tp->widedone > 1 ? "WIDE " : "",
+ mb10 / 10, mb10 % 10, tp->period / 10, offset);
+ } else
+ printk ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : "");
+next:
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+
+
+
+/*==========================================================
+**
+** Switch tagged mode for a target.
+**
+**==========================================================
+*/
+
+static void ncr_setup_tags (ncb_p np, u_char tn, u_char ln)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+ u_short reqtags, maxdepth;
+
+ /*
+ ** Just in case ...
+ */
+ if ((!tp) || (!lp))
+ return;
+
+ /*
+ ** If SCSI device queue depth is not yet set, leave here.
+ */
+ if (!lp->scdev_depth)
+ return;
+
+ /*
+ ** Donnot allow more tags than the SCSI driver can queue
+ ** for this device.
+ ** Donnot allow more tags than we can handle.
+ */
+ maxdepth = lp->scdev_depth;
+ if (maxdepth > lp->maxnxs) maxdepth = lp->maxnxs;
+ if (lp->maxtags > maxdepth) lp->maxtags = maxdepth;
+ if (lp->numtags > maxdepth) lp->numtags = maxdepth;
+
+ /*
+ ** only devices conformant to ANSI Version >= 2
+ ** only devices capable of tagged commands
+ ** only if enabled by user ..
+ */
+ if ((lp->inq_byte7 & INQ7_QUEUE) && lp->numtags > 1) {
+ reqtags = lp->numtags;
+ } else {
+ reqtags = 1;
+ };
+
+ /*
+ ** Update max number of tags
+ */
+ lp->numtags = reqtags;
+ if (lp->numtags > lp->maxtags)
+ lp->maxtags = lp->numtags;
+
+ /*
+ ** If we want to switch tag mode, we must wait
+ ** for no CCB to be active.
+ */
+ if (reqtags > 1 && lp->usetags) { /* Stay in tagged mode */
+ if (lp->queuedepth == reqtags) /* Already announced */
+ return;
+ lp->queuedepth = reqtags;
+ }
+ else if (reqtags <= 1 && !lp->usetags) { /* Stay in untagged mode */
+ lp->queuedepth = reqtags;
+ return;
+ }
+ else { /* Want to switch tag mode */
+ if (lp->busyccbs) /* If not yet safe, return */
+ return;
+ lp->queuedepth = reqtags;
+ lp->usetags = reqtags > 1 ? 1 : 0;
+ }
+
+ /*
+ ** Patch the lun mini-script, according to tag mode.
+ */
+ lp->resel_task = lp->usetags?
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_tag)) :
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag));
+
+ /*
+ ** Announce change to user.
+ */
+ if (bootverbose) {
+ PRINT_LUN(np, tn, ln);
+ if (lp->usetags)
+ printk("tagged command queue depth set to %d\n", reqtags);
+ else
+ printk("tagged command queueing disabled\n");
+ }
+}
+
+/*----------------------------------------------------
+**
+** handle user commands
+**
+**----------------------------------------------------
+*/
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+
+static void ncr_usercmd (ncb_p np)
+{
+ u_char t;
+ tcb_p tp;
+ int ln;
+ u_long size;
+
+ switch (np->user.cmd) {
+ case 0: return;
+
+ case UC_SETDEBUG:
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = np->user.data;
+#endif
+ break;
+
+ case UC_SETORDER:
+ np->order = np->user.data;
+ break;
+
+ case UC_SETVERBOSE:
+ np->verbose = np->user.data;
+ break;
+
+ default:
+ /*
+ ** We assume that other commands apply to targets.
+ ** This should always be the case and avoid the below
+ ** 4 lines to be repeated 5 times.
+ */
+ for (t = 0; t < MAX_TARGET; t++) {
+ if (!((np->user.target >> t) & 1))
+ continue;
+ tp = &np->target[t];
+
+ switch (np->user.cmd) {
+
+ case UC_SETSYNC:
+ tp->usrsync = np->user.data;
+ ncr_negotiate (np, tp);
+ break;
+
+ case UC_SETWIDE:
+ size = np->user.data;
+ if (size > np->maxwide)
+ size=np->maxwide;
+ tp->usrwide = size;
+ ncr_negotiate (np, tp);
+ break;
+
+ case UC_SETTAGS:
+ tp->usrtags = np->user.data;
+ for (ln = 0; ln < MAX_LUN; ln++) {
+ lcb_p lp;
+ lp = ncr_lp(np, tp, ln);
+ if (!lp)
+ continue;
+ lp->numtags = np->user.data;
+ lp->maxtags = lp->numtags;
+ ncr_setup_tags (np, t, ln);
+ }
+ break;
+
+ case UC_RESETDEV:
+ tp->to_reset = 1;
+ np->istat_sem = SEM;
+ OUTB (nc_istat, SIGP|SEM);
+ break;
+
+ case UC_CLEARDEV:
+ for (ln = 0; ln < MAX_LUN; ln++) {
+ lcb_p lp;
+ lp = ncr_lp(np, tp, ln);
+ if (lp)
+ lp->to_clear = 1;
+ }
+ np->istat_sem = SEM;
+ OUTB (nc_istat, SIGP|SEM);
+ break;
+
+ case UC_SETFLAG:
+ tp->usrflag = np->user.data;
+ break;
+ }
+ }
+ break;
+ }
+ np->user.cmd=0;
+}
+#endif
+
+/*==========================================================
+**
+**
+** ncr timeout handler.
+**
+**
+**==========================================================
+**
+** Misused to keep the driver running when
+** interrupts are not configured correctly.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_timeout (ncb_p np)
+{
+ u_long thistime = ktime_get(0);
+
+ /*
+ ** If release process in progress, let's go
+ ** Set the release stage from 1 to 2 to synchronize
+ ** with the release process.
+ */
+
+ if (np->release_stage) {
+ if (np->release_stage == 1) np->release_stage = 2;
+ return;
+ }
+
+#ifdef SCSI_NCR_PCIQ_BROKEN_INTR
+ np->timer.expires = ktime_get((HZ+9)/10);
+#else
+ np->timer.expires = ktime_get(SCSI_NCR_TIMER_INTERVAL);
+#endif
+ add_timer(&np->timer);
+
+ /*
+ ** If we are resetting the ncr, wait for settle_time before
+ ** clearing it. Then command processing will be resumed.
+ */
+ if (np->settle_time) {
+ if (np->settle_time <= thistime) {
+ if (bootverbose > 1)
+ printk("%s: command processing resumed\n", ncr_name(np));
+ np->settle_time = 0;
+ requeue_waiting_list(np);
+ }
+ return;
+ }
+
+ /*
+ ** Nothing to do for now, but that may come.
+ */
+ if (np->lasttime + 4*HZ < thistime) {
+ np->lasttime = thistime;
+ }
+
+#ifdef SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+ /*
+ ** Some way-broken PCI bridges may lead to
+ ** completions being lost when the clearing
+ ** of the INTFLY flag by the CPU occurs
+ ** concurrently with the chip raising this flag.
+ ** If this ever happen, lost completions will
+ ** be reaped here.
+ */
+ ncr_wakeup_done(np);
+#endif
+
+#ifdef SCSI_NCR_PCIQ_BROKEN_INTR
+ if (INB(nc_istat) & (INTF|SIP|DIP)) {
+
+ /*
+ ** Process pending interrupts.
+ */
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("{");
+ ncr_exception (np);
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("}");
+ }
+#endif /* SCSI_NCR_PCIQ_BROKEN_INTR */
+}
+
+/*==========================================================
+**
+** log message for real hard errors
+**
+** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)."
+** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf."
+**
+** exception register:
+** ds: dstat
+** si: sist
+**
+** SCSI bus lines:
+** so: control lines as driven by NCR.
+** si: control lines as seen by NCR.
+** sd: scsi data lines as seen by NCR.
+**
+** wide/fastmode:
+** sxfer: (see the manual)
+** scntl3: (see the manual)
+**
+** current script command:
+** dsp: script address (relative to start of script).
+** dbc: first word of script command.
+**
+** First 24 register of the chip:
+** r0..rf
+**
+**==========================================================
+*/
+
+static void ncr_log_hard_error(ncb_p np, u_short sist, u_char dstat)
+{
+ u_int32 dsp;
+ int script_ofs;
+ int script_size;
+ char *script_name;
+ u_char *script_base;
+ int i;
+
+ dsp = INL (nc_dsp);
+
+ if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) {
+ script_ofs = dsp - np->p_script;
+ script_size = sizeof(struct script);
+ script_base = (u_char *) np->script0;
+ script_name = "script";
+ }
+ else if (np->p_scripth < dsp &&
+ dsp <= np->p_scripth + sizeof(struct scripth)) {
+ script_ofs = dsp - np->p_scripth;
+ script_size = sizeof(struct scripth);
+ script_base = (u_char *) np->scripth0;
+ script_name = "scripth";
+ } else {
+ script_ofs = dsp;
+ script_size = 0;
+ script_base = 0;
+ script_name = "mem";
+ }
+
+ printk ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n",
+ ncr_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist,
+ (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl),
+ (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs,
+ (unsigned)INL (nc_dbc));
+
+ if (((script_ofs & 3) == 0) &&
+ (unsigned)script_ofs < script_size) {
+ printk ("%s: script cmd = %08x\n", ncr_name(np),
+ scr_to_cpu((int) *(ncrcmd *)(script_base + script_ofs)));
+ }
+
+ printk ("%s: regdump:", ncr_name(np));
+ for (i=0; i<24;i++)
+ printk (" %02x", (unsigned)INB_OFF(i));
+ printk (".\n");
+}
+
+/*============================================================
+**
+** ncr chip exception handler.
+**
+**============================================================
+**
+** In normal situations, interrupt conditions occur one at
+** a time. But when something bad happens on the SCSI BUS,
+** the chip may raise several interrupt flags before
+** stopping and interrupting the CPU. The additionnal
+** interrupt flags are stacked in some extra registers
+** after the SIP and/or DIP flag has been raised in the
+** ISTAT. After the CPU has read the interrupt condition
+** flag from SIST or DSTAT, the chip unstacks the other
+** interrupt flags and sets the corresponding bits in
+** SIST or DSTAT. Since the chip starts stacking once the
+** SIP or DIP flag is set, there is a small window of time
+** where the stacking does not occur.
+**
+** Typically, multiple interrupt conditions may happen in
+** the following situations:
+**
+** - SCSI parity error + Phase mismatch (PAR|MA)
+** When an parity error is detected in input phase
+** and the device switches to msg-in phase inside a
+** block MOV.
+** - SCSI parity error + Unexpected disconnect (PAR|UDC)
+** When a stupid device does not want to handle the
+** recovery of an SCSI parity error.
+** - Some combinations of STO, PAR, UDC, ...
+** When using non compliant SCSI stuff, when user is
+** doing non compliant hot tampering on the BUS, when
+** something really bad happens to a device, etc ...
+**
+** The heuristic suggested by SYMBIOS to handle
+** multiple interrupts is to try unstacking all
+** interrupts conditions and to handle them on some
+** priority based on error severity.
+** This will work when the unstacking has been
+** successful, but we cannot be 100 % sure of that,
+** since the CPU may have been faster to unstack than
+** the chip is able to stack. Hmmm ... But it seems that
+** such a situation is very unlikely to happen.
+**
+** If this happen, for example STO catched by the CPU
+** then UDC happenning before the CPU have restarted
+** the SCRIPTS, the driver may wrongly complete the
+** same command on UDC, since the SCRIPTS didn't restart
+** and the DSA still points to the same command.
+** We avoid this situation by setting the DSA to an
+** invalid value when the CCB is completed and before
+** restarting the SCRIPTS.
+**
+** Another issue is that we need some section of our
+** recovery procedures to be somehow uninterruptible and
+** that the SCRIPTS processor does not provides such a
+** feature. For this reason, we handle recovery preferently
+** from the C code and check against some SCRIPTS
+** critical sections from the C code.
+**
+** Hopefully, the interrupt handling of the driver is now
+** able to resist to weird BUS error conditions, but donnot
+** ask me for any guarantee that it will never fail. :-)
+** Use at your own decision and risk.
+**
+**============================================================
+*/
+
+void ncr_exception (ncb_p np)
+{
+ u_char istat, istatc;
+ u_char dstat;
+ u_short sist;
+ int i;
+
+ /*
+ ** interrupt on the fly ?
+ **
+ ** A `dummy read' is needed to ensure that the
+ ** clear of the INTF flag reaches the device
+ ** before the scanning of the DONE queue.
+ */
+ istat = INB (nc_istat);
+ if (istat & INTF) {
+ OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem);
+ istat = INB (nc_istat); /* DUMMY READ */
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("F ");
+ (void)ncr_wakeup_done (np);
+ };
+
+ if (!(istat & (SIP|DIP)))
+ return;
+
+#if 0 /* We should never get this one */
+ if (istat & CABRT)
+ OUTB (nc_istat, CABRT);
+#endif
+
+ /*
+ ** Steinbach's Guideline for Systems Programming:
+ ** Never test for an error condition you don't know how to handle.
+ */
+
+ /*========================================================
+ ** PAR and MA interrupts may occur at the same time,
+ ** and we need to know of both in order to handle
+ ** this situation properly. We try to unstack SCSI
+ ** interrupts for that reason. BTW, I dislike a LOT
+ ** such a loop inside the interrupt routine.
+ ** Even if DMA interrupt stacking is very unlikely to
+ ** happen, we also try unstacking these ones, since
+ ** this has no performance impact.
+ **=========================================================
+ */
+ sist = 0;
+ dstat = 0;
+ istatc = istat;
+ do {
+ if (istatc & SIP)
+ sist |= INW (nc_sist);
+ if (istatc & DIP)
+ dstat |= INB (nc_dstat);
+ istatc = INB (nc_istat);
+ istat |= istatc;
+ } while (istatc & (SIP|DIP));
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printk ("<%d|%x:%x|%x:%x>",
+ (int)INB(nc_scr0),
+ dstat,sist,
+ (unsigned)INL(nc_dsp),
+ (unsigned)INL(nc_dbc));
+
+ /*
+ ** On paper, a memory barrier may be needed here.
+ ** And since we are paranoid ... :)
+ */
+ MEMORY_BARRIER();
+
+ /*========================================================
+ ** First, interrupts we want to service cleanly.
+ **
+ ** Phase mismatch (MA) is the most frequent interrupt
+ ** for chip earlier than the 896 and so we have to service
+ ** it as quickly as possible.
+ ** A SCSI parity error (PAR) may be combined with a phase
+ ** mismatch condition (MA).
+ ** Programmed interrupts (SIR) are used to call the C code
+ ** from SCRIPTS.
+ ** The single step interrupt (SSI) is not used in this
+ ** driver.
+ **=========================================================
+ */
+
+ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if (sist & PAR) ncr_int_par (np, sist);
+ else if (sist & MA) ncr_int_ma (np);
+ else if (dstat & SIR) ncr_int_sir (np);
+ else if (dstat & SSI) OUTONB_STD ();
+ else goto unknown_int;
+ return;
+ };
+
+ /*========================================================
+ ** Now, interrupts that donnot happen in normal
+ ** situations and that we may need to recover from.
+ **
+ ** On SCSI RESET (RST), we reset everything.
+ ** On SCSI BUS MODE CHANGE (SBMC), we complete all
+ ** active CCBs with RESET status, prepare all devices
+ ** for negotiating again and restart the SCRIPTS.
+ ** On STO and UDC, we complete the CCB with the corres-
+ ** ponding status and restart the SCRIPTS.
+ **=========================================================
+ */
+
+ if (sist & RST) {
+ ncr_init (np, 1, bootverbose ? "scsi reset" : NULL, HS_RESET);
+ return;
+ };
+
+ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
+
+ if (!(sist & (GEN|HTH|SGE)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if (sist & SBMC) ncr_int_sbmc (np);
+ else if (sist & STO) ncr_int_sto (np);
+ else if (sist & UDC) ncr_int_udc (np);
+ else goto unknown_int;
+ return;
+ };
+
+ /*=========================================================
+ ** Now, interrupts we are not able to recover cleanly.
+ **
+ ** Do the register dump.
+ ** Log message for hard errors.
+ ** Reset everything.
+ **=========================================================
+ */
+ if (ktime_exp(np->regtime)) {
+ np->regtime = ktime_get(10*HZ);
+ for (i = 0; i<sizeof(np->regdump); i++)
+ ((char*)&np->regdump)[i] = INB_OFF(i);
+ np->regdump.nc_dstat = dstat;
+ np->regdump.nc_sist = sist;
+ };
+
+ ncr_log_hard_error(np, sist, dstat);
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ u_char ctest4_o, ctest4_m;
+ u_char shadow;
+
+ /*
+ * Get shadow register data
+ * Write 1 to ctest4
+ */
+ ctest4_o = INB(nc_ctest4);
+
+ OUTB(nc_ctest4, ctest4_o | 0x10);
+
+ ctest4_m = INB(nc_ctest4);
+ shadow = INW_OFF(0x42);
+
+ OUTB(nc_ctest4, ctest4_o);
+
+ printk("%s: ctest4/sist original 0x%x/0x%X mod: 0x%X/0x%x\n",
+ ncr_name(np), ctest4_o, sist, ctest4_m, shadow);
+ }
+
+ if ((sist & (GEN|HTH|SGE)) ||
+ (dstat & (MDPE|BF|ABRT|IID))) {
+ ncr_start_reset(np);
+ return;
+ };
+
+unknown_int:
+ /*=========================================================
+ ** We just miss the cause of the interrupt. :(
+ ** Print a message. The timeout will do the real work.
+ **=========================================================
+ */
+ printk( "%s: unknown interrupt(s) ignored, "
+ "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
+ ncr_name(np), istat, dstat, sist);
+}
+
+
+/*==========================================================
+**
+** generic recovery from scsi interrupt
+**
+**==========================================================
+**
+** The doc says that when the chip gets an SCSI interrupt,
+** it tries to stop in an orderly fashion, by completing
+** an instruction fetch that had started or by flushing
+** the DMA fifo for a write to memory that was executing.
+** Such a fashion is not enough to know if the instruction
+** that was just before the current DSP value has been
+** executed or not.
+**
+** There are 3 small SCRIPTS sections that deal with the
+** start queue and the done queue that may break any
+** assomption from the C code if we are interrupted
+** inside, so we reset if it happens. Btw, since these
+** SCRIPTS sections are executed while the SCRIPTS hasn't
+** started SCSI operations, it is very unlikely to happen.
+**
+** All the driver data structures are supposed to be
+** allocated from the same 4 GB memory window, so there
+** is a 1 to 1 relationship between DSA and driver data
+** structures. Since we are careful :) to invalidate the
+** DSA when we complete a command or when the SCRIPTS
+** pushes a DSA into a queue, we can trust it when it
+** points to a CCB.
+**
+**----------------------------------------------------------
+*/
+static void ncr_recover_scsi_int (ncb_p np, u_char hsts)
+{
+ u_int32 dsp = INL (nc_dsp);
+ u_int32 dsa = INL (nc_dsa);
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+
+ /*
+ ** If we haven't been interrupted inside the SCRIPTS
+ ** critical pathes, we can safely restart the SCRIPTS
+ ** and trust the DSA value if it matches a CCB.
+ */
+ if ((!(dsp > NCB_SCRIPT_PHYS (np, getjob_begin) &&
+ dsp < NCB_SCRIPT_PHYS (np, getjob_end) + 1)) &&
+ (!(dsp > NCB_SCRIPT_PHYS (np, ungetjob) &&
+ dsp < NCB_SCRIPT_PHYS (np, reselect) + 1)) &&
+ (!(dsp > NCB_SCRIPTH_PHYS (np, sel_for_abort) &&
+ dsp < NCB_SCRIPTH_PHYS (np, sel_for_abort_1) + 1)) &&
+ (!(dsp > NCB_SCRIPT_PHYS (np, done) &&
+ dsp < NCB_SCRIPT_PHYS (np, done_end) + 1))) {
+ if (cp) {
+ cp->host_status = hsts;
+ ncr_complete (np, cp);
+ }
+ OUTL (nc_dsa, DSA_INVALID);
+ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
+ }
+ else
+ goto reset_all;
+
+ return;
+
+reset_all:
+ ncr_start_reset(np);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for selection timeout
+**
+**==========================================================
+**
+** There seems to be a bug in the 53c810.
+** Although a STO-Interrupt is pending,
+** it continues executing script commands.
+** But it will fail and interrupt (IID) on
+** the next instruction where it's looking
+** for a valid phase.
+**
+**----------------------------------------------------------
+*/
+
+void ncr_int_sto (ncb_p np)
+{
+ u_int32 dsp = INL (nc_dsp);
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("T");
+
+ if (dsp == NCB_SCRIPT_PHYS (np, wf_sel_done) + 8 ||
+ !(driver_setup.recovery & 1))
+ ncr_recover_scsi_int(np, HS_SEL_TIMEOUT);
+ else
+ ncr_start_reset(np);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for unexpected disconnect
+**
+**==========================================================
+**
+**----------------------------------------------------------
+*/
+void ncr_int_udc (ncb_p np)
+{
+ u_int32 dsa = INL (nc_dsa);
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+ tcb_p tp = &np->target[cp->target];
+
+ /*
+ * Fix Up. Some disks respond to a PPR negotation with
+ * a bus free instead of a message reject.
+ * Disable ppr negotiation if this is first time
+ * tried ppr negotiation.
+ */
+
+ if (tp->ppr_negotiation == 1)
+ tp->ppr_negotiation = 0;
+
+ printk ("%s: unexpected disconnect\n", ncr_name(np));
+ ncr_recover_scsi_int(np, HS_UNEXPECTED);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI bus mode change
+**
+**==========================================================
+**
+** spi2-r12 11.2.3 says a transceiver mode change must
+** generate a reset event and a device that detects a reset
+** event shall initiate a hard reset. It says also that a
+** device that detects a mode change shall set data transfer
+** mode to eight bit asynchronous, etc...
+** So, just resetting should be enough.
+**
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_sbmc (ncb_p np)
+{
+ u_char scsi_mode = INB (nc_stest4) & SMODE;
+
+ printk("%s: SCSI bus mode change from %x to %x.\n",
+ ncr_name(np), np->scsi_mode, scsi_mode);
+
+ np->scsi_mode = scsi_mode;
+
+
+ /*
+ ** Suspend command processing for 1 second and
+ ** reinitialize all except the chip.
+ */
+ np->settle_time = ktime_get(1*HZ);
+ ncr_init (np, 0, bootverbose ? "scsi mode change" : NULL, HS_RESET);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI parity error.
+**
+**==========================================================
+**
+** When the chip detects a SCSI parity error and is
+** currently executing a (CH)MOV instruction, it does
+** not interrupt immediately, but tries to finish the
+** transfer of the current scatter entry before
+** interrupting. The following situations may occur:
+**
+** - The complete scatter entry has been transferred
+** without the device having changed phase.
+** The chip will then interrupt with the DSP pointing
+** to the instruction that follows the MOV.
+**
+** - A phase mismatch occurs before the MOV finished
+** and phase errors are to be handled by the C code.
+** The chip will then interrupt with both PAR and MA
+** conditions set.
+**
+** - A phase mismatch occurs before the MOV finished and
+** phase errors are to be handled by SCRIPTS (895A or 896).
+** The chip will load the DSP with the phase mismatch
+** JUMP address and interrupt the host processor.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_par (ncb_p np, u_short sist)
+{
+ u_char hsts = INB (HS_PRT);
+ u_int32 dsp = INL (nc_dsp);
+ u_int32 dbc = INL (nc_dbc);
+ u_int32 dsa = INL (nc_dsa);
+ u_char sbcl = INB (nc_sbcl);
+ u_char cmd = dbc >> 24;
+ int phase = cmd & 7;
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+
+ printk("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
+ ncr_name(np), hsts, dbc, sbcl);
+
+ /*
+ ** Check that the chip is connected to the SCSI BUS.
+ */
+ if (!(INB (nc_scntl1) & ISCON)) {
+ if (!(driver_setup.recovery & 1)) {
+ ncr_recover_scsi_int(np, HS_FAIL);
+ return;
+ }
+ goto reset_all;
+ }
+
+ /*
+ ** If the nexus is not clearly identified, reset the bus.
+ ** We will try to do better later.
+ */
+ if (!cp)
+ goto reset_all;
+
+ /*
+ ** Check instruction was a MOV, direction was INPUT and
+ ** ATN is asserted.
+ */
+ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
+ goto reset_all;
+
+ /*
+ ** Keep track of the parity error.
+ */
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_PARITY_ERR;
+
+ /*
+ ** Prepare the message to send to the device.
+ */
+ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ /*
+ ** Save error message. For integrity check use only.
+ */
+ if (np->check_integrity)
+ np->check_integ_par = np->msgout[0];
+#endif
+
+ /*
+ ** If the old phase was DATA IN or DT DATA IN phase,
+ ** we have to deal with the 3 situations described above.
+ ** For other input phases (MSG IN and STATUS), the device
+ ** must resend the whole thing that failed parity checking
+ ** or signal error. So, jumping to dispatcher should be OK.
+ */
+ if ((phase == 1) || (phase == 5)) {
+ /* Phase mismatch handled by SCRIPTS */
+ if (dsp == NCB_SCRIPTH_PHYS (np, pm_handle))
+ OUTL_DSP (dsp);
+ /* Phase mismatch handled by the C code */
+ else if (sist & MA)
+ ncr_int_ma (np);
+ /* No phase mismatch occurred */
+ else {
+ OUTL (nc_temp, dsp);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, dispatch));
+ }
+ }
+ else
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ return;
+
+reset_all:
+ ncr_start_reset(np);
+ return;
+}
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for phase errors.
+**
+**
+**==========================================================
+**
+** We have to construct a new transfer descriptor,
+** to transfer the rest of the current block.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_ma (ncb_p np)
+{
+ u_int32 dbc;
+ u_int32 rest;
+ u_int32 dsp;
+ u_int32 dsa;
+ u_int32 nxtdsp;
+ u_int32 *vdsp;
+ u_int32 oadr, olen;
+ u_int32 *tblp;
+ u_int32 newcmd;
+ u_int delta;
+ u_char cmd;
+ u_char hflags, hflags0;
+ struct pm_ctx *pm;
+ ccb_p cp;
+
+ dsp = INL (nc_dsp);
+ dbc = INL (nc_dbc);
+ dsa = INL (nc_dsa);
+
+ cmd = dbc >> 24;
+ rest = dbc & 0xffffff;
+ delta = 0;
+
+ /*
+ ** locate matching cp.
+ */
+ cp = ncr_ccb_from_dsa(np, dsa);
+
+ if (DEBUG_FLAGS & DEBUG_PHASE)
+ printk("CCB = %2x %2x %2x %2x %2x %2x\n",
+ cp->cmd->cmnd[0], cp->cmd->cmnd[1], cp->cmd->cmnd[2],
+ cp->cmd->cmnd[3], cp->cmd->cmnd[4], cp->cmd->cmnd[5]);
+
+ /*
+ ** Donnot take into account dma fifo and various buffers in
+ ** INPUT phase since the chip flushes everything before
+ ** raising the MA interrupt for interrupted INPUT phases.
+ ** For DATA IN phase, we will check for the SWIDE later.
+ */
+ if ((cmd & 7) != 1 && (cmd & 7) != 5) {
+ u_int32 dfifo;
+ u_char ss0, ss2;
+
+ /*
+ ** If C1010, DFBC contains number of bytes in DMA fifo.
+ ** else read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ delta = INL(nc_dfbc) & 0xffff;
+ else {
+ dfifo = INL(nc_dfifo);
+
+ /*
+ ** Calculate remaining bytes in DMA fifo.
+ ** C1010 - always large fifo, value in dfbc
+ ** Otherwise, (CTEST5 = dfifo >> 16)
+ */
+ if (dfifo & (DFS << 16))
+ delta = ((((dfifo >> 8) & 0x300) |
+ (dfifo & 0xff)) - rest) & 0x3ff;
+ else
+ delta = ((dfifo & 0xff) - rest) & 0x7f;
+
+ /*
+ ** The data in the dma fifo has not been
+ ** transferred to the target -> add the amount
+ ** to the rest and clear the data.
+ ** Check the sstat2 register in case of wide
+ ** transfer.
+ */
+
+ }
+
+ rest += delta;
+ ss0 = INB (nc_sstat0);
+ if (ss0 & OLF) rest++;
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) && (ss0 & ORF))
+ rest++;
+ if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
+ ss2 = INB (nc_sstat2);
+ if (ss2 & OLF1) rest++;
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) && (ss2 & ORF))
+ rest++;
+ };
+
+ /*
+ ** Clear fifos.
+ */
+ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* scsi fifo */
+ }
+
+ /*
+ ** log the information
+ */
+
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+ printk ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7,
+ (unsigned) rest, (unsigned) delta);
+
+ /*
+ ** try to find the interrupted script command,
+ ** and the address at which to continue.
+ */
+ vdsp = 0;
+ nxtdsp = 0;
+ if (dsp > np->p_script &&
+ dsp <= np->p_script + sizeof(struct script)) {
+ vdsp = (u_int32 *)((char*)np->script0 + (dsp-np->p_script-8));
+ nxtdsp = dsp;
+ }
+ else if (dsp > np->p_scripth &&
+ dsp <= np->p_scripth + sizeof(struct scripth)) {
+ vdsp = (u_int32 *)((char*)np->scripth0 + (dsp-np->p_scripth-8));
+ nxtdsp = dsp;
+ }
+
+ /*
+ ** log the information
+ */
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printk ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
+ cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
+ };
+
+ if (!vdsp) {
+ printk ("%s: interrupted SCRIPT address not found.\n",
+ ncr_name (np));
+ goto reset_all;
+ }
+
+ if (!cp) {
+ printk ("%s: SCSI phase error fixup: CCB already dequeued.\n",
+ ncr_name (np));
+ goto reset_all;
+ }
+
+ /*
+ ** get old startaddress and old length.
+ */
+
+ oadr = scr_to_cpu(vdsp[1]);
+
+ if (cmd & 0x10) { /* Table indirect */
+ tblp = (u_int32 *) ((char*) &cp->phys + oadr);
+ olen = scr_to_cpu(tblp[0]);
+ oadr = scr_to_cpu(tblp[1]);
+ } else {
+ tblp = (u_int32 *) 0;
+ olen = scr_to_cpu(vdsp[0]) & 0xffffff;
+ };
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printk ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
+ (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
+ tblp,
+ (unsigned) olen,
+ (unsigned) oadr);
+ };
+
+ /*
+ ** check cmd against assumed interrupted script command.
+ ** If dt data phase, the MOVE instruction hasn't bit 4 of
+ ** the phase.
+ */
+
+ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
+ PRINT_ADDR(cp->cmd);
+ printk ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
+ (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24);
+
+ goto reset_all;
+ };
+
+ /*
+ ** if old phase not dataphase, leave here.
+ ** C/D line is low if data.
+ */
+
+ if (cmd & 0x02) {
+ PRINT_ADDR(cp->cmd);
+ printk ("phase change %x-%x %d@%08x resid=%d.\n",
+ cmd&7, INB(nc_sbcl)&7, (unsigned)olen,
+ (unsigned)oadr, (unsigned)rest);
+ goto unexpected_phase;
+ };
+
+ /*
+ ** Choose the correct PM save area.
+ **
+ ** Look at the PM_SAVE SCRIPT if you want to understand
+ ** this stuff. The equivalent code is implemented in
+ ** SCRIPTS for the 895A and 896 that are able to handle
+ ** PM from the SCRIPTS processor.
+ */
+
+ hflags0 = INB (HF_PRT);
+ hflags = hflags0;
+
+ if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
+ if (hflags & HF_IN_PM0)
+ nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
+ else if (hflags & HF_IN_PM1)
+ nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
+
+ if (hflags & HF_DP_SAVED)
+ hflags ^= HF_ACT_PM;
+ }
+
+ if (!(hflags & HF_ACT_PM)) {
+ pm = &cp->phys.pm0;
+ newcmd = NCB_SCRIPT_PHYS(np, pm0_data);
+ }
+ else {
+ pm = &cp->phys.pm1;
+ newcmd = NCB_SCRIPT_PHYS(np, pm1_data);
+ }
+
+ hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
+ if (hflags != hflags0)
+ OUTB (HF_PRT, hflags);
+
+ /*
+ ** fillin the phase mismatch context
+ */
+
+ pm->sg.addr = cpu_to_scr(oadr + olen - rest);
+ pm->sg.size = cpu_to_scr(rest);
+ pm->ret = cpu_to_scr(nxtdsp);
+
+ /*
+ ** If we have a SWIDE,
+ ** - prepare the address to write the SWIDE from SCRIPTS,
+ ** - compute the SCRIPTS address to restart from,
+ ** - move current data pointer context by one byte.
+ */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ if ( ((cmd & 7) == 1 || (cmd & 7) == 5)
+ && cp && (cp->phys.select.sel_scntl3 & EWS) &&
+ (INB (nc_scntl2) & WSR)) {
+ u32 tmp;
+
+#ifdef SYM_DEBUG_PM_WITH_WSR
+ PRINT_ADDR(cp);
+ printf ("MA interrupt with WSR set - "
+ "pm->sg.addr=%x - pm->sg.size=%d\n",
+ pm->sg.addr, pm->sg.size);
+#endif
+ /*
+ * Set up the table indirect for the MOVE
+ * of the residual byte and adjust the data
+ * pointer context.
+ */
+ tmp = scr_to_cpu(pm->sg.addr);
+ cp->phys.wresid.addr = cpu_to_scr(tmp);
+ pm->sg.addr = cpu_to_scr(tmp + 1);
+ tmp = scr_to_cpu(pm->sg.size);
+ cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
+ pm->sg.size = cpu_to_scr(tmp - 1);
+
+ /*
+ * If only the residual byte is to be moved,
+ * no PM context is needed.
+ */
+ if ((tmp&0xffffff) == 1)
+ newcmd = pm->ret;
+
+ /*
+ * Prepare the address of SCRIPTS that will
+ * move the residual byte to memory.
+ */
+ nxtdsp = NCB_SCRIPTH_PHYS (np, wsr_ma_helper);
+ }
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ PRINT_ADDR(cp->cmd);
+ printk ("PM %x %x %x / %x %x %x.\n",
+ hflags0, hflags, newcmd,
+ (unsigned)scr_to_cpu(pm->sg.addr),
+ (unsigned)scr_to_cpu(pm->sg.size),
+ (unsigned)scr_to_cpu(pm->ret));
+ }
+
+ /*
+ ** Restart the SCRIPTS processor.
+ */
+
+ OUTL (nc_temp, newcmd);
+ OUTL_DSP (nxtdsp);
+ return;
+
+ /*
+ ** Unexpected phase changes that occurs when the current phase
+ ** is not a DATA IN or DATA OUT phase are due to error conditions.
+ ** Such event may only happen when the SCRIPTS is using a
+ ** multibyte SCSI MOVE.
+ **
+ ** Phase change Some possible cause
+ **
+ ** COMMAND --> MSG IN SCSI parity error detected by target.
+ ** COMMAND --> STATUS Bad command or refused by target.
+ ** MSG OUT --> MSG IN Message rejected by target.
+ ** MSG OUT --> COMMAND Bogus target that discards extended
+ ** negotiation messages.
+ **
+ ** The code below does not care of the new phase and so
+ ** trusts the target. Why to annoy it ?
+ ** If the interrupted phase is COMMAND phase, we restart at
+ ** dispatcher.
+ ** If a target does not get all the messages after selection,
+ ** the code assumes blindly that the target discards extended
+ ** messages and clears the negotiation status.
+ ** If the target does not want all our response to negotiation,
+ ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
+ ** bloat for such a should_not_happen situation).
+ ** In all other situation, we reset the BUS.
+ ** Are these assumptions reasonnable ? (Wait and see ...)
+ */
+unexpected_phase:
+ dsp -= 8;
+ nxtdsp = 0;
+
+ switch (cmd & 7) {
+ case 2: /* COMMAND phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ break;
+#if 0
+ case 3: /* STATUS phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ break;
+#endif
+ case 6: /* MSG OUT phase */
+ /*
+ ** If the device may want to use untagged when we want
+ ** tagged, we prepare an IDENTIFY without disc. granted,
+ ** since we will not be able to handle reselect.
+ ** Otherwise, we just don't care.
+ */
+ if (dsp == NCB_SCRIPT_PHYS (np, send_ident)) {
+ if (cp->tag != NO_TAG && olen - rest <= 3) {
+ cp->host_status = HS_BUSY;
+ np->msgout[0] = M_IDENTIFY | cp->lun;
+ nxtdsp = NCB_SCRIPTH_PHYS (np, ident_break_atn);
+ }
+ else
+ nxtdsp = NCB_SCRIPTH_PHYS (np, ident_break);
+ }
+ else if (dsp == NCB_SCRIPTH_PHYS (np, send_wdtr) ||
+ dsp == NCB_SCRIPTH_PHYS (np, send_sdtr) ||
+ dsp == NCB_SCRIPTH_PHYS (np, send_ppr)) {
+ nxtdsp = NCB_SCRIPTH_PHYS (np, nego_bad_phase);
+ }
+ break;
+#if 0
+ case 7: /* MSG IN phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, clrack);
+ break;
+#endif
+ }
+
+ if (nxtdsp) {
+ OUTL_DSP (nxtdsp);
+ return;
+ }
+
+reset_all:
+ ncr_start_reset(np);
+}
+
+/*==========================================================
+**
+** ncr chip handler for QUEUE FULL and CHECK CONDITION
+**
+**==========================================================
+**
+** On QUEUE FULL status, we set the actual tagged command
+** queue depth to the number of disconnected CCBs that is
+** hopefully a good value to avoid further QUEUE FULL.
+**
+** On CHECK CONDITION or COMMAND TERMINATED, we use the
+** CCB of the failed command for performing a REQUEST
+** SENSE SCSI command.
+**
+** We do not want to change the order commands will be
+** actually queued to the device after we received a
+** QUEUE FULL status. We also want to properly deal with
+** contingent allegiance condition. For these reasons,
+** we remove from the start queue all commands for this
+** LUN that haven't been yet queued to the device and
+** put them back in the correponding LUN queue, then
+** requeue the CCB that failed in front of the LUN queue.
+** I just hope this not to be performed too often. :)
+**
+** If we are using IMMEDIATE ARBITRATION, we clear the
+** IARB hint for every commands we encounter in order not
+** to be stuck with a won arbitration and no job to queue
+** to a device.
+**----------------------------------------------------------
+*/
+
+static void ncr_sir_to_redo(ncb_p np, int num, ccb_p cp)
+{
+ Scsi_Cmnd *cmd = cp->cmd;
+ tcb_p tp = &np->target[cp->target];
+ lcb_p lp = ncr_lp(np, tp, cp->lun);
+ ccb_p cp2;
+ int busyccbs = 1;
+ u_int32 startp;
+ u_char s_status = INB (SS_PRT);
+ int msglen;
+ int i, j;
+
+
+ /*
+ ** If the LCB is not yet available, then only
+ ** 1 IO is accepted, so we should have it.
+ */
+ if (!lp)
+ goto next;
+ /*
+ ** Remove all CCBs queued to the chip for that LUN and put
+ ** them back in the LUN CCB wait queue.
+ */
+ busyccbs = lp->queuedccbs;
+ i = (INL (nc_scratcha) - np->p_squeue) / 4;
+ j = i;
+ while (i != np->squeueput) {
+ cp2 = ncr_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
+ assert(cp2);
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /* IARB hints may not be relevant any more. Forget them. */
+ cp2->host_flags &= ~HF_HINT_IARB;
+#endif
+ if (cp2 && cp2->target == cp->target && cp2->lun == cp->lun) {
+ xpt_remque(&cp2->link_ccbq);
+ xpt_insque_head(&cp2->link_ccbq, &lp->wait_ccbq);
+ --lp->queuedccbs;
+ cp2->queued = 0;
+ }
+ else {
+ if (i != j)
+ np->squeue[j] = np->squeue[i];
+ if ((j += 2) >= MAX_START*2) j = 0;
+ }
+ if ((i += 2) >= MAX_START*2) i = 0;
+ }
+ if (i != j) /* Copy back the idle task if needed */
+ np->squeue[j] = np->squeue[i];
+ np->squeueput = j; /* Update our current start queue pointer */
+
+ /*
+ ** Requeue the interrupted CCB in front of the
+ ** LUN CCB wait queue to preserve ordering.
+ */
+ xpt_remque(&cp->link_ccbq);
+ xpt_insque_head(&cp->link_ccbq, &lp->wait_ccbq);
+ --lp->queuedccbs;
+ cp->queued = 0;
+
+next:
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /* IARB hint may not be relevant any more. Forget it. */
+ cp->host_flags &= ~HF_HINT_IARB;
+ if (np->last_cp)
+ np->last_cp = 0;
+#endif
+
+ /*
+ ** Now we can restart the SCRIPTS processor safely.
+ */
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
+
+ switch(s_status) {
+ default:
+ case S_BUSY:
+ ncr_complete(np, cp);
+ break;
+ case S_QUEUE_FULL:
+ if (!lp || !lp->queuedccbs) {
+ ncr_complete(np, cp);
+ break;
+ }
+ if (bootverbose >= 1) {
+ PRINT_ADDR(cmd);
+ printk ("QUEUE FULL! %d busy, %d disconnected CCBs\n",
+ busyccbs, lp->queuedccbs);
+ }
+ /*
+ ** Decrease number of tags to the number of
+ ** disconnected commands.
+ */
+ if (lp->queuedccbs < lp->numtags) {
+ lp->numtags = lp->queuedccbs;
+ lp->num_good = 0;
+ ncr_setup_tags (np, cp->target, cp->lun);
+ }
+ /*
+ ** Repair the offending CCB.
+ */
+ cp->phys.header.savep = cp->startp;
+ cp->phys.header.lastp = cp->lastp0;
+ cp->host_status = HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->xerr_status = 0;
+ cp->extra_bytes = 0;
+ cp->host_flags &= (HF_PM_TO_C|HF_DATA_IN);
+
+ break;
+
+ case S_TERMINATED:
+ case S_CHECK_COND:
+ /*
+ ** If we were requesting sense, give up.
+ */
+ if (cp->host_flags & HF_AUTO_SENSE) {
+ ncr_complete(np, cp);
+ break;
+ }
+
+ /*
+ ** Save SCSI status and extended error.
+ ** Compute the data residual now.
+ */
+ cp->sv_scsi_status = cp->scsi_status;
+ cp->sv_xerr_status = cp->xerr_status;
+ cp->resid = ncr_compute_residual(np, cp);
+
+ /*
+ ** Device returned CHECK CONDITION status.
+ ** Prepare all needed data strutures for getting
+ ** sense data.
+ */
+
+ /*
+ ** identify message
+ */
+ cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun;
+ msglen = 1;
+
+ /*
+ ** If we are currently using anything different from
+ ** async. 8 bit data transfers with that target,
+ ** start a negotiation, since the device may want
+ ** to report us a UNIT ATTENTION condition due to
+ ** a cause we currently ignore, and we donnot want
+ ** to be stuck with WIDE and/or SYNC data transfer.
+ **
+ ** cp->nego_status is filled by ncr_prepare_nego().
+ **
+ ** Do NOT negotiate if performing integrity check
+ ** or if integrity check has completed, all check
+ ** conditions will have been cleared.
+ */
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if (DEBUG_FLAGS & DEBUG_IC) {
+ printk("%s: ncr_sir_to_redo: ic_done %2X, in_progress %2X\n",
+ ncr_name(np), tp->ic_done, cp->cmd->ic_in_progress);
+ }
+
+ /*
+ ** If parity error during integrity check,
+ ** set the target width to narrow. Otherwise,
+ ** do not negotiate on a request sense.
+ */
+ if ( np->check_integ_par && np->check_integrity
+ && cp->cmd->ic_in_progress ) {
+ cp->nego_status = 0;
+ msglen +=
+ ncr_ic_nego (np, cp, cmd ,&cp->scsi_smsg2[msglen]);
+ }
+
+ if (!np->check_integrity ||
+ (np->check_integrity &&
+ (!cp->cmd->ic_in_progress && !tp->ic_done)) ) {
+ ncr_negotiate(np, tp);
+ cp->nego_status = 0;
+ {
+ u_char sync_offset;
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ sync_offset = tp->sval & 0x3f;
+ else
+ sync_offset = tp->sval & 0x1f;
+
+ if ((tp->wval & EWS) || sync_offset)
+ msglen +=
+ ncr_prepare_nego (np, cp, &cp->scsi_smsg2[msglen]);
+ }
+
+ }
+#else
+ ncr_negotiate(np, tp);
+ cp->nego_status = 0;
+ if ((tp->wval & EWS) || (tp->sval & 0x1f))
+ msglen +=
+ ncr_prepare_nego (np, cp, &cp->scsi_smsg2[msglen]);
+#endif /* SCSI_NCR_INTEGRITY_CHECKING */
+
+ /*
+ ** Message table indirect structure.
+ */
+ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2));
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ /*
+ ** sense command
+ */
+ cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd));
+ cp->phys.cmd.size = cpu_to_scr(6);
+
+ /*
+ ** patch requested size into sense command
+ */
+ cp->sensecmd[0] = 0x03;
+ cp->sensecmd[1] = cp->lun << 5;
+ cp->sensecmd[4] = sizeof(cp->sense_buf);
+
+ /*
+ ** sense data
+ */
+ bzero(cp->sense_buf, sizeof(cp->sense_buf));
+ cp->phys.sense.addr = cpu_to_scr(CCB_PHYS(cp,sense_buf[0]));
+ cp->phys.sense.size = cpu_to_scr(sizeof(cp->sense_buf));
+
+ /*
+ ** requeue the command.
+ */
+ startp = NCB_SCRIPTH_PHYS (np, sdata_in);
+
+ cp->phys.header.savep = cpu_to_scr(startp);
+ cp->phys.header.goalp = cpu_to_scr(startp + 16);
+ cp->phys.header.lastp = cpu_to_scr(startp);
+ cp->phys.header.wgoalp = cpu_to_scr(startp + 16);
+ cp->phys.header.wlastp = cpu_to_scr(startp);
+
+ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->host_flags = (HF_AUTO_SENSE|HF_DATA_IN);
+
+ cp->phys.header.go.start =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
+
+ /*
+ ** If lp not yet allocated, requeue the command.
+ */
+ if (!lp)
+ ncr_put_start_queue(np, cp);
+ break;
+ }
+
+ /*
+ ** requeue awaiting scsi commands for this lun.
+ */
+ if (lp)
+ ncr_start_next_ccb(np, lp, 1);
+
+ return;
+}
+
+/*----------------------------------------------------------
+**
+** After a device has accepted some management message
+** as BUS DEVICE RESET, ABORT TASK, etc ..., or when
+** a device signals a UNIT ATTENTION condition, some
+** tasks are thrown away by the device. We are required
+** to reflect that on our tasks list since the device
+** will never complete these tasks.
+**
+** This function completes all disconnected CCBs for a
+** given target that matches the following criteria:
+** - lun=-1 means any logical UNIT otherwise a given one.
+** - task=-1 means any task, otherwise a given one.
+**----------------------------------------------------------
+*/
+static int ncr_clear_tasks(ncb_p np, u_char hsts,
+ int target, int lun, int task)
+{
+ int i = 0;
+ ccb_p cp;
+
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->target != target)
+ continue;
+ if (lun != -1 && cp->lun != lun)
+ continue;
+ if (task != -1 && cp->tag != NO_TAG && cp->scsi_smsg[2] != task)
+ continue;
+ cp->host_status = hsts;
+ cp->scsi_status = S_ILLEGAL;
+ ncr_complete(np, cp);
+ ++i;
+ }
+ return i;
+}
+
+/*==========================================================
+**
+** ncr chip handler for TASKS recovery.
+**
+**==========================================================
+**
+** We cannot safely abort a command, while the SCRIPTS
+** processor is running, since we just would be in race
+** with it.
+**
+** As long as we have tasks to abort, we keep the SEM
+** bit set in the ISTAT. When this bit is set, the
+** SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED)
+** each time it enters the scheduler.
+**
+** If we have to reset a target, clear tasks of a unit,
+** or to perform the abort of a disconnected job, we
+** restart the SCRIPTS for selecting the target. Once
+** selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
+** If it loses arbitration, the SCRIPTS will interrupt again
+** the next time it will enter its scheduler, and so on ...
+**
+** On SIR_TARGET_SELECTED, we scan for the more
+** appropriate thing to do:
+**
+** - If nothing, we just sent a M_ABORT message to the
+** target to get rid of the useless SCSI bus ownership.
+** According to the specs, no tasks shall be affected.
+** - If the target is to be reset, we send it a M_RESET
+** message.
+** - If a logical UNIT is to be cleared , we send the
+** IDENTIFY(lun) + M_ABORT.
+** - If an untagged task is to be aborted, we send the
+** IDENTIFY(lun) + M_ABORT.
+** - If a tagged task is to be aborted, we send the
+** IDENTIFY(lun) + task attributes + M_ABORT_TAG.
+**
+** Once our 'kiss of death' :) message has been accepted
+** by the target, the SCRIPTS interrupts again
+** (SIR_ABORT_SENT). On this interrupt, we complete
+** all the CCBs that should have been aborted by the
+** target according to our message.
+**
+**----------------------------------------------------------
+*/
+static void ncr_sir_task_recovery(ncb_p np, int num)
+{
+ ccb_p cp;
+ tcb_p tp;
+ int target=-1, lun=-1, task;
+ int i, k;
+ u_char *p;
+
+ switch(num) {
+ /*
+ ** The SCRIPTS processor stopped before starting
+ ** the next command in order to allow us to perform
+ ** some task recovery.
+ */
+ case SIR_SCRIPT_STOPPED:
+
+ /*
+ ** Do we have any target to reset or unit to clear ?
+ */
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ tp = &np->target[i];
+ if (tp->to_reset || (tp->l0p && tp->l0p->to_clear)) {
+ target = i;
+ break;
+ }
+ if (!tp->lmp)
+ continue;
+ for (k = 1 ; k < MAX_LUN ; k++) {
+ if (tp->lmp[k] && tp->lmp[k]->to_clear) {
+ target = i;
+ break;
+ }
+ }
+ if (target != -1)
+ break;
+ }
+
+ /*
+ ** If not, look at the CCB list for any
+ ** disconnected CCB to be aborted.
+ */
+ if (target == -1) {
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->to_abort) {
+ target = cp->target;
+ break;
+ }
+ }
+ }
+
+ /*
+ ** If some target is to be selected,
+ ** prepare and start the selection.
+ */
+ if (target != -1) {
+ tp = &np->target[target];
+ np->abrt_sel.sel_id = target;
+ np->abrt_sel.sel_scntl3 = tp->wval;
+ np->abrt_sel.sel_sxfer = tp->sval;
+ np->abrt_sel.sel_scntl4 = tp->uval;
+ OUTL(nc_dsa, np->p_ncb);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sel_for_abort));
+ return;
+ }
+
+ /*
+ ** Nothing is to be selected, so we donnot need
+ ** to synchronize with the SCRIPTS anymore.
+ ** Remove the SEM flag from the ISTAT.
+ */
+ np->istat_sem = 0;
+ OUTB (nc_istat, SIGP);
+
+ /*
+ ** Now look at CCBs to abort that haven't started yet.
+ ** Remove all those CCBs from the start queue and
+ ** complete them with appropriate status.
+ ** Btw, the SCRIPTS processor is still stopped, so
+ ** we are not in race.
+ */
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_BUSY &&
+ cp->host_status != HS_NEGOTIATE)
+ continue;
+ if (!cp->to_abort)
+ continue;
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If we are using IMMEDIATE ARBITRATION, we donnot
+ ** want to cancel the last queued CCB, since the
+ ** SCRIPTS may have anticipated the selection.
+ */
+ if (cp == np->last_cp) {
+ cp->to_abort = 0;
+ continue;
+ }
+#endif
+ /*
+ ** Compute index of next position in the start
+ ** queue the SCRIPTS will schedule.
+ */
+ i = (INL (nc_scratcha) - np->p_squeue) / 4;
+
+ /*
+ ** Remove the job from the start queue.
+ */
+ k = -1;
+ while (1) {
+ if (i == np->squeueput)
+ break;
+ if (k == -1) { /* Not found yet */
+ if (cp == ncr_ccb_from_dsa(np,
+ scr_to_cpu(np->squeue[i])))
+ k = i; /* Found */
+ }
+ else {
+ /*
+ ** Once found, we have to move
+ ** back all jobs by 1 position.
+ */
+ np->squeue[k] = np->squeue[i];
+ k += 2;
+ if (k >= MAX_START*2)
+ k = 0;
+ }
+
+ i += 2;
+ if (i >= MAX_START*2)
+ i = 0;
+ }
+ if (k != -1) {
+ np->squeue[k] = np->squeue[i]; /* Idle task */
+ np->squeueput = k; /* Start queue pointer */
+ }
+ cp->host_status = HS_ABORTED;
+ cp->scsi_status = S_ILLEGAL;
+ ncr_complete(np, cp);
+ }
+ break;
+ /*
+ ** The SCRIPTS processor has selected a target
+ ** we may have some manual recovery to perform for.
+ */
+ case SIR_TARGET_SELECTED:
+ target = (INB (nc_sdid) & 0xf);
+ tp = &np->target[target];
+
+ np->abrt_tbl.addr = vtobus(np->abrt_msg);
+
+ /*
+ ** If the target is to be reset, prepare a
+ ** M_RESET message and clear the to_reset flag
+ ** since we donnot expect this operation to fail.
+ */
+ if (tp->to_reset) {
+ np->abrt_msg[0] = M_RESET;
+ np->abrt_tbl.size = 1;
+ tp->to_reset = 0;
+ break;
+ }
+
+ /*
+ ** Otherwise, look for some logical unit to be cleared.
+ */
+ if (tp->l0p && tp->l0p->to_clear)
+ lun = 0;
+ else if (tp->lmp) {
+ for (k = 1 ; k < MAX_LUN ; k++) {
+ if (tp->lmp[k] && tp->lmp[k]->to_clear) {
+ lun = k;
+ break;
+ }
+ }
+ }
+
+ /*
+ ** If a logical unit is to be cleared, prepare
+ ** an IDENTIFY(lun) + ABORT MESSAGE.
+ */
+ if (lun != -1) {
+ lcb_p lp = ncr_lp(np, tp, lun);
+ lp->to_clear = 0; /* We donnot expect to fail here */
+ np->abrt_msg[0] = M_IDENTIFY | lun;
+ np->abrt_msg[1] = M_ABORT;
+ np->abrt_tbl.size = 2;
+ break;
+ }
+
+ /*
+ ** Otherwise, look for some disconnected job to
+ ** abort for this target.
+ */
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->target != target)
+ continue;
+ if (cp->to_abort)
+ break;
+ }
+
+ /*
+ ** If we have none, probably since the device has
+ ** completed the command before we won abitration,
+ ** send a M_ABORT message without IDENTIFY.
+ ** According to the specs, the device must just
+ ** disconnect the BUS and not abort any task.
+ */
+ if (!cp) {
+ np->abrt_msg[0] = M_ABORT;
+ np->abrt_tbl.size = 1;
+ break;
+ }
+
+ /*
+ ** We have some task to abort.
+ ** Set the IDENTIFY(lun)
+ */
+ np->abrt_msg[0] = M_IDENTIFY | cp->lun;
+
+ /*
+ ** If we want to abort an untagged command, we
+ ** will send a IDENTIFY + M_ABORT.
+ ** Otherwise (tagged command), we will send
+ ** a IDENTITFY + task attributes + ABORT TAG.
+ */
+ if (cp->tag == NO_TAG) {
+ np->abrt_msg[1] = M_ABORT;
+ np->abrt_tbl.size = 2;
+ }
+ else {
+ np->abrt_msg[1] = cp->scsi_smsg[1];
+ np->abrt_msg[2] = cp->scsi_smsg[2];
+ np->abrt_msg[3] = M_ABORT_TAG;
+ np->abrt_tbl.size = 4;
+ }
+ cp->to_abort = 0; /* We donnot expect to fail here */
+ break;
+
+ /*
+ ** The target has accepted our message and switched
+ ** to BUS FREE phase as we expected.
+ */
+ case SIR_ABORT_SENT:
+ target = (INB (nc_sdid) & 0xf);
+ tp = &np->target[target];
+
+ /*
+ ** If we didn't abort anything, leave here.
+ */
+ if (np->abrt_msg[0] == M_ABORT)
+ break;
+
+ /*
+ ** If we sent a M_RESET, then a hardware reset has
+ ** been performed by the target.
+ ** - Reset everything to async 8 bit
+ ** - Tell ourself to negotiate next time :-)
+ ** - Prepare to clear all disconnected CCBs for
+ ** this target from our task list (lun=task=-1)
+ */
+ lun = -1;
+ task = -1;
+ if (np->abrt_msg[0] == M_RESET) {
+ tp->sval = 0;
+ tp->wval = np->rv_scntl3;
+ tp->uval = np->rv_scntl4;
+ ncr_set_sync_wide_status(np, target);
+ ncr_negotiate(np, tp);
+ }
+
+ /*
+ ** Otherwise, check for the LUN and TASK(s)
+ ** concerned by the cancelation.
+ ** If it is not ABORT_TAG then it is CLEAR_QUEUE
+ ** or an ABORT message :-)
+ */
+ else {
+ lun = np->abrt_msg[0] & 0x3f;
+ if (np->abrt_msg[1] == M_ABORT_TAG)
+ task = np->abrt_msg[2];
+ }
+
+ /*
+ ** Complete all the CCBs the device should have
+ ** aborted due to our 'kiss of death' message.
+ */
+ (void) ncr_clear_tasks(np, HS_ABORTED, target, lun, task);
+ break;
+
+ /*
+ ** We have performed a auto-sense that succeeded.
+ ** If the device reports a UNIT ATTENTION condition
+ ** due to a RESET condition, we must complete all
+ ** disconnect CCBs for this unit since the device
+ ** shall have thrown them away.
+ ** Since I haven't time to guess what the specs are
+ ** expecting for other UNIT ATTENTION conditions, I
+ ** decided to only care about RESET conditions. :)
+ */
+ case SIR_AUTO_SENSE_DONE:
+ cp = ncr_ccb_from_dsa(np, INL (nc_dsa));
+ if (!cp)
+ break;
+ memcpy(cp->cmd->sense_buffer, cp->sense_buf,
+ sizeof(cp->cmd->sense_buffer));
+ p = &cp->cmd->sense_buffer[0];
+
+ if (p[0] != 0x70 || p[2] != 0x6 || p[12] != 0x29)
+ break;
+#if 0
+ (void) ncr_clear_tasks(np, HS_RESET, cp->target, cp->lun, -1);
+#endif
+ break;
+ }
+
+ /*
+ ** Print to the log the message we intend to send.
+ */
+ if (num == SIR_TARGET_SELECTED) {
+ PRINT_TARGET(np, target);
+ ncr_printl_hex("control msgout:", np->abrt_msg,
+ np->abrt_tbl.size);
+ np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
+ }
+
+ /*
+ ** Let the SCRIPTS processor continue.
+ */
+ OUTONB_STD ();
+}
+
+
+/*==========================================================
+**
+** Gérard's alchemy:) that deals with with the data
+** pointer for both MDP and the residual calculation.
+**
+**==========================================================
+**
+** I didn't want to bloat the code by more than 200
+** lignes for the handling of both MDP and the residual.
+** This has been achieved by using a data pointer
+** representation consisting in an index in the data
+** array (dp_sg) and a negative offset (dp_ofs) that
+** have the following meaning:
+**
+** - dp_sg = MAX_SCATTER
+** we are at the end of the data script.
+** - dp_sg < MAX_SCATTER
+** dp_sg points to the next entry of the scatter array
+** we want to transfer.
+** - dp_ofs < 0
+** dp_ofs represents the residual of bytes of the
+** previous entry scatter entry we will send first.
+** - dp_ofs = 0
+** no residual to send first.
+**
+** The function ncr_evaluate_dp() accepts an arbitray
+** offset (basically from the MDP message) and returns
+** the corresponding values of dp_sg and dp_ofs.
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_evaluate_dp(ncb_p np, ccb_p cp, u_int32 scr, int *ofs)
+{
+ u_int32 dp_scr;
+ int dp_ofs, dp_sg, dp_sgmin;
+ int tmp;
+ struct pm_ctx *pm;
+
+ /*
+ ** Compute the resulted data pointer in term of a script
+ ** address within some DATA script and a signed byte offset.
+ */
+ dp_scr = scr;
+ dp_ofs = *ofs;
+ if (dp_scr == NCB_SCRIPT_PHYS (np, pm0_data))
+ pm = &cp->phys.pm0;
+ else if (dp_scr == NCB_SCRIPT_PHYS (np, pm1_data))
+ pm = &cp->phys.pm1;
+ else
+ pm = 0;
+
+ if (pm) {
+ dp_scr = scr_to_cpu(pm->ret);
+ dp_ofs -= scr_to_cpu(pm->sg.size);
+ }
+
+ /*
+ ** Deduce the index of the sg entry.
+ ** Keep track of the index of the first valid entry.
+ ** If result is dp_sg = MAX_SCATTER, then we are at the
+ ** end of the data and vice-versa.
+ */
+ tmp = scr_to_cpu(cp->phys.header.goalp);
+ dp_sg = MAX_SCATTER;
+ if (dp_scr != tmp)
+ dp_sg -= (tmp - 8 - (int)dp_scr) / (SCR_SG_SIZE*4);
+ dp_sgmin = MAX_SCATTER - cp->segments;
+
+ /*
+ ** Move to the sg entry the data pointer belongs to.
+ **
+ ** If we are inside the data area, we expect result to be:
+ **
+ ** Either,
+ ** dp_ofs = 0 and dp_sg is the index of the sg entry
+ ** the data pointer belongs to (or the end of the data)
+ ** Or,
+ ** dp_ofs < 0 and dp_sg is the index of the sg entry
+ ** the data pointer belongs to + 1.
+ */
+ if (dp_ofs < 0) {
+ int n;
+ while (dp_sg > dp_sgmin) {
+ --dp_sg;
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ n = dp_ofs + (tmp & 0xffffff);
+ if (n > 0) {
+ ++dp_sg;
+ break;
+ }
+ dp_ofs = n;
+ }
+ }
+ else if (dp_ofs > 0) {
+ while (dp_sg < MAX_SCATTER) {
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ dp_ofs -= (tmp & 0xffffff);
+ ++dp_sg;
+ if (dp_ofs <= 0)
+ break;
+ }
+ }
+
+ /*
+ ** Make sure the data pointer is inside the data area.
+ ** If not, return some error.
+ */
+ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
+ goto out_err;
+ else if (dp_sg > MAX_SCATTER || (dp_sg == MAX_SCATTER && dp_ofs > 0))
+ goto out_err;
+
+ /*
+ ** Save the extreme pointer if needed.
+ */
+ if (dp_sg > cp->ext_sg ||
+ (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
+ cp->ext_sg = dp_sg;
+ cp->ext_ofs = dp_ofs;
+ }
+
+ /*
+ ** Return data.
+ */
+ *ofs = dp_ofs;
+ return dp_sg;
+
+out_err:
+ return -1;
+}
+
+/*==========================================================
+**
+** ncr chip handler for MODIFY DATA POINTER MESSAGE
+**
+**==========================================================
+**
+** We also call this function on IGNORE WIDE RESIDUE
+** messages that do not match a SWIDE full condition.
+** Btw, we assume in that situation that such a message
+** is equivalent to a MODIFY DATA POINTER (offset=-1).
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_modify_dp(ncb_p np, tcb_p tp, ccb_p cp, int ofs)
+{
+ int dp_ofs = ofs;
+ u_int32 dp_scr = INL (nc_temp);
+ u_int32 dp_ret;
+ u_int32 tmp;
+ u_char hflags;
+ int dp_sg;
+ struct pm_ctx *pm;
+
+ /*
+ ** Not supported for auto_sense;
+ */
+ if (cp->host_flags & HF_AUTO_SENSE)
+ goto out_reject;
+
+ /*
+ ** Apply our alchemy:) (see comments in ncr_evaluate_dp()),
+ ** to the resulted data pointer.
+ */
+ dp_sg = ncr_evaluate_dp(np, cp, dp_scr, &dp_ofs);
+ if (dp_sg < 0)
+ goto out_reject;
+
+ /*
+ ** And our alchemy:) allows to easily calculate the data
+ ** script address we want to return for the next data phase.
+ */
+ dp_ret = cpu_to_scr(cp->phys.header.goalp);
+ dp_ret = dp_ret - 8 - (MAX_SCATTER - dp_sg) * (SCR_SG_SIZE*4);
+
+ /*
+ ** If offset / scatter entry is zero we donnot need
+ ** a context for the new current data pointer.
+ */
+ if (dp_ofs == 0) {
+ dp_scr = dp_ret;
+ goto out_ok;
+ }
+
+ /*
+ ** Get a context for the new current data pointer.
+ */
+ hflags = INB (HF_PRT);
+
+ if (hflags & HF_DP_SAVED)
+ hflags ^= HF_ACT_PM;
+
+ if (!(hflags & HF_ACT_PM)) {
+ pm = &cp->phys.pm0;
+ dp_scr = NCB_SCRIPT_PHYS (np, pm0_data);
+ }
+ else {
+ pm = &cp->phys.pm1;
+ dp_scr = NCB_SCRIPT_PHYS (np, pm1_data);
+ }
+
+ hflags &= ~(HF_DP_SAVED);
+
+ OUTB (HF_PRT, hflags);
+
+ /*
+ ** Set up the new current data pointer.
+ ** ofs < 0 there, and for the next data phase, we
+ ** want to transfer part of the data of the sg entry
+ ** corresponding to index dp_sg-1 prior to returning
+ ** to the main data script.
+ */
+ pm->ret = cpu_to_scr(dp_ret);
+ tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
+ tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
+ pm->sg.addr = cpu_to_scr(tmp);
+ pm->sg.size = cpu_to_scr(-dp_ofs);
+
+out_ok:
+ OUTL (nc_temp, dp_scr);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ return;
+
+out_reject:
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+}
+
+
+/*==========================================================
+**
+** ncr chip calculation of the data residual.
+**
+**==========================================================
+**
+** As I used to say, the requirement of data residual
+** in SCSI is broken, useless and cannot be achieved
+** without huge complexity.
+** But most OSes and even the official CAM require it.
+** When stupidity happens to be so widely spread inside
+** a community, it gets hard to convince.
+**
+** Anyway, I don't care, since I am not going to use
+** any software that considers this data residual as
+** a relevant information. :)
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_compute_residual(ncb_p np, ccb_p cp)
+{
+ int dp_sg, dp_sgmin, tmp;
+ int resid=0;
+ int dp_ofs = 0;
+
+ /*
+ * Check for some data lost or just thrown away.
+ * We are not required to be quite accurate in this
+ * situation. Btw, if we are odd for output and the
+ * device claims some more data, it may well happen
+ * than our residual be zero. :-)
+ */
+ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
+ if (cp->xerr_status & XE_EXTRA_DATA)
+ resid -= cp->extra_bytes;
+ if (cp->xerr_status & XE_SODL_UNRUN)
+ ++resid;
+ if (cp->xerr_status & XE_SWIDE_OVRUN)
+ --resid;
+ }
+
+
+ /*
+ ** If SCRIPTS reaches its goal point, then
+ ** there is no additionnal residual.
+ */
+ if (cp->phys.header.lastp == cp->phys.header.goalp)
+ return resid;
+
+ /*
+ ** If the last data pointer is data_io (direction
+ ** unknown), then no data transfer should have
+ ** taken place.
+ */
+ if (cp->phys.header.lastp == NCB_SCRIPTH_PHYS (np, data_io))
+ return cp->data_len;
+
+ /*
+ ** If no data transfer occurs, or if the data
+ ** pointer is weird, return full residual.
+ */
+ if (cp->startp == cp->phys.header.lastp ||
+ ncr_evaluate_dp(np, cp, scr_to_cpu(cp->phys.header.lastp),
+ &dp_ofs) < 0) {
+ return cp->data_len;
+ }
+
+ /*
+ ** We are now full comfortable in the computation
+ ** of the data residual (2's complement).
+ */
+ dp_sgmin = MAX_SCATTER - cp->segments;
+ resid = -cp->ext_ofs;
+ for (dp_sg = cp->ext_sg; dp_sg < MAX_SCATTER; ++dp_sg) {
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ resid += (tmp & 0xffffff);
+ }
+
+ /*
+ ** Hopefully, the result is not too wrong.
+ */
+ return resid;
+}
+
+/*==========================================================
+**
+** Print out the containt of a SCSI message.
+**
+**==========================================================
+*/
+
+static int ncr_show_msg (u_char * msg)
+{
+ u_char i;
+ printk ("%x",*msg);
+ if (*msg==M_EXTENDED) {
+ for (i=1;i<8;i++) {
+ if (i-1>msg[1]) break;
+ printk ("-%x",msg[i]);
+ };
+ return (i+1);
+ } else if ((*msg & 0xf0) == 0x20) {
+ printk ("-%x",msg[1]);
+ return (2);
+ };
+ return (1);
+}
+
+static void ncr_print_msg (ccb_p cp, char *label, u_char *msg)
+{
+ if (cp)
+ PRINT_ADDR(cp->cmd);
+ if (label)
+ printk ("%s: ", label);
+
+ (void) ncr_show_msg (msg);
+ printk (".\n");
+}
+
+/*===================================================================
+**
+** Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
+**
+**===================================================================
+**
+** Was Sie schon immer ueber transfermode negotiation wissen wollten ...
+**
+** We try to negotiate sync and wide transfer only after
+** a successfull inquire command. We look at byte 7 of the
+** inquire data to determine the capabilities of the target.
+**
+** When we try to negotiate, we append the negotiation message
+** to the identify and (maybe) simple tag message.
+** The host status field is set to HS_NEGOTIATE to mark this
+** situation.
+**
+** If the target doesn't answer this message immediately
+** (as required by the standard), the SIR_NEGO_FAILED interrupt
+** will be raised eventually.
+** The handler removes the HS_NEGOTIATE status, and sets the
+** negotiated value to the default (async / nowide).
+**
+** If we receive a matching answer immediately, we check it
+** for validity, and set the values.
+**
+** If we receive a Reject message immediately, we assume the
+** negotiation has failed, and fall back to standard values.
+**
+** If we receive a negotiation message while not in HS_NEGOTIATE
+** state, it's a target initiated negotiation. We prepare a
+** (hopefully) valid answer, set our parameters, and send back
+** this answer to the target.
+**
+** If the target doesn't fetch the answer (no message out phase),
+** we assume the negotiation has failed, and fall back to default
+** settings (SIR_NEGO_PROTO interrupt).
+**
+** When we set the values, we adjust them in all ccbs belonging
+** to this target, in the controller's register, and in the "phys"
+** field of the controller's struct ncb.
+**
+**---------------------------------------------------------------------
+*/
+
+/*==========================================================
+**
+** ncr chip handler for SYNCHRONOUS DATA TRANSFER
+** REQUEST (SDTR) message.
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_sync_nego(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ u_char scntl3, scntl4;
+ u_char chg, ofs, per, fak;
+
+ /*
+ ** Synchronous request message received.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "sync msg in", np->msgin);
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ per = np->msgin[3];
+ ofs = np->msgin[4];
+ if (ofs==0) per=255;
+
+ /*
+ ** if target sends SDTR message,
+ ** it CAN transfer synch.
+ */
+
+ if (ofs)
+ tp->inq_byte7 |= INQ7_SYNC;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (per < np->minsync)
+ {chg = 1; per = np->minsync;}
+ if (per < tp->minsync)
+ {chg = 1; per = tp->minsync;}
+ if (ofs > tp->maxoffs)
+ {chg = 1; ofs = tp->maxoffs;}
+
+ /*
+ ** Check against controller limits.
+ */
+ fak = 7;
+ scntl3 = 0;
+ scntl4 = 0;
+ if (ofs != 0) {
+ ncr_getsync(np, per, &fak, &scntl3);
+ if (fak > 7) {
+ chg = 1;
+ ofs = 0;
+ }
+ }
+ if (ofs == 0) {
+ fak = 7;
+ per = 0;
+ scntl3 = 0;
+ scntl4 = 0;
+ tp->minsync = 0;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printk ("sync: per=%d scntl3=0x%x scntl4=0x%x ofs=%d fak=%d chg=%d.\n",
+ per, scntl3, scntl4, ofs, fak, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+ case NS_SYNC:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs,0);
+ else
+ ncr_setsync (np, cp, scntl3, ofs, scntl4);
+
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ };
+ return;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request. Set value and
+ ** prepare an answer message
+ */
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs,0);
+ else
+ ncr_setsync (np, cp, scntl3, ofs, scntl4);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 3;
+ np->msgout[2] = M_X_SYNC_REQ;
+ np->msgout[3] = per;
+ np->msgout[4] = ofs;
+
+ cp->nego_status = NS_SYNC;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "sync msgout", np->msgout);
+ }
+
+ np->msgin [0] = M_NOOP;
+
+ if (!ofs)
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ else
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sdtr_resp));
+}
+
+/*==========================================================
+**
+** ncr chip handler for WIDE DATA TRANSFER REQUEST
+** (WDTR) message.
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_wide_nego(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ u_char chg, wide;
+
+ /*
+ ** Wide request message received.
+ */
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "wide msgin", np->msgin);
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ wide = np->msgin[3];
+
+ /*
+ ** if target sends WDTR message,
+ ** it CAN transfer wide.
+ */
+
+ if (wide)
+ tp->inq_byte7 |= INQ7_WIDE16;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (wide > tp->usrwide)
+ {chg = 1; wide = tp->usrwide;}
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printk ("wide: wide=%d chg=%d.\n", wide, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+ case NS_WIDE:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ ncr_setwide (np, cp, 0, 1);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+ ncr_setwide (np, cp, wide, 1);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ };
+ return;
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request, set value and
+ ** prepare an answer message
+ */
+
+ ncr_setwide (np, cp, wide, 1);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 2;
+ np->msgout[2] = M_X_WIDE_REQ;
+ np->msgout[3] = wide;
+
+ np->msgin [0] = M_NOOP;
+
+ cp->nego_status = NS_WIDE;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "wide msgout", np->msgout);
+ }
+
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, wdtr_resp));
+}
+/*==========================================================
+**
+** ncr chip handler for PARALLEL PROTOCOL REQUEST
+** (PPR) message.
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_ppr_nego(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ u_char scntl3, scntl4;
+ u_char chg, ofs, per, fak, wth, dt;
+
+ /*
+ ** PPR message received.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "ppr msg in", np->msgin);
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ per = np->msgin[3];
+ ofs = np->msgin[5];
+ wth = np->msgin[6];
+ dt = np->msgin[7];
+ if (ofs==0) per=255;
+
+ /*
+ ** if target sends sync (wide),
+ ** it CAN transfer synch (wide).
+ */
+
+ if (ofs)
+ tp->inq_byte7 |= INQ7_SYNC;
+
+ if (wth)
+ tp->inq_byte7 |= INQ7_WIDE16;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (wth > tp->usrwide)
+ {chg = 1; wth = tp->usrwide;}
+ if (per < np->minsync)
+ {chg = 1; per = np->minsync;}
+ if (per < tp->minsync)
+ {chg = 1; per = tp->minsync;}
+ if (ofs > tp->maxoffs)
+ {chg = 1; ofs = tp->maxoffs;}
+
+ /*
+ ** Check against controller limits.
+ */
+ fak = 7;
+ scntl3 = 0;
+ scntl4 = 0;
+ if (ofs != 0) {
+ scntl4 = dt ? 0x80 : 0;
+ ncr_getsync(np, per, &fak, &scntl3);
+ if (fak > 7) {
+ chg = 1;
+ ofs = 0;
+ }
+ }
+ if (ofs == 0) {
+ fak = 7;
+ per = 0;
+ scntl3 = 0;
+ scntl4 = 0;
+ tp->minsync = 0;
+ }
+
+ /*
+ ** If target responds with Ultra 3 speed
+ ** but narrow or not DT, reject.
+ ** If target responds with DT request
+ ** but not Ultra3 speeds, reject message,
+ ** reset min sync for target to 0x0A and
+ ** set flags to re-negotiate.
+ */
+
+ if ((per == 0x09) && ofs && (!wth || !dt))
+ chg = 1;
+ else if (( (per > 0x09) && dt) )
+ chg = 2;
+
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printk ("ppr: wth=%d per=%d scntl3=0x%x scntl4=0x%x ofs=%d fak=%d chg=%d.\n",
+ wth, per, scntl3, scntl4, ofs, fak, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+ case NS_PPR:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ if (chg == 2) {
+ /* Send message reject and reset flags for
+ ** host to re-negotiate with min period 0x0A.
+ */
+ tp->minsync = 0x0A;
+ tp->period = 0;
+ tp->widedone = 0;
+ }
+ ncr_setsyncwide (np, cp, 0, 0xe0, 0, 0);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsyncwide (np, cp, scntl3, (fak<<5)|ofs,0, wth);
+ else
+ ncr_setsyncwide (np, cp, scntl3, ofs, scntl4, wth);
+
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+
+ };
+ return;
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ break;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request. Set value and
+ ** prepare an answer message
+ **
+ ** If narrow or not DT and requesting Ultra3
+ ** slow the bus down and force ST. If not
+ ** requesting Ultra3, force ST.
+ ** Max offset is 31=0x1f if ST mode.
+ */
+
+ if ((per == 0x09) && ofs && (!wth || !dt)) {
+ per = 0x0A;
+ dt = 0;
+ ofs &= 0x1f;
+ }
+ else if ( (per > 0x09) && dt) {
+ dt = 0;
+ ofs &= 0x1f;
+ }
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsyncwide (np, cp, scntl3, (fak<<5)|ofs,0, wth);
+ else
+ ncr_setsyncwide (np, cp, scntl3, ofs, scntl4, wth);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 6;
+ np->msgout[2] = M_X_PPR_REQ;
+ np->msgout[3] = per;
+ np->msgout[4] = 0;
+ np->msgout[5] = ofs;
+ np->msgout[6] = wth;
+ np->msgout[7] = dt;
+
+ cp->nego_status = NS_PPR;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "ppr msgout", np->msgout);
+ }
+
+ np->msgin [0] = M_NOOP;
+
+ if (!ofs)
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ else
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, ppr_resp));
+}
+
+
+
+/*
+** Reset SYNC or WIDE to default settings.
+** Called when a negotiation does not succeed either
+** on rejection or on protocol error.
+*/
+static void ncr_nego_default(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ /*
+ ** any error in negotiation:
+ ** fall back to default mode.
+ */
+ switch (cp->nego_status) {
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ break;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+
+ case NS_PPR:
+ /*
+ * ppr_negotiation is set to 1 on the first ppr nego command.
+ * If ppr is successful, it is reset to 2.
+ * If unsuccessful it is reset to 0.
+ */
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ tcb_p tp=&np->target[cp->target];
+ u_char factor, offset, width;
+
+ ncr_get_xfer_info ( np, tp, &factor, &offset, &width);
+
+ printk("Current factor %d offset %d width %d\n",
+ factor, offset, width);
+ }
+ if (tp->ppr_negotiation == 2)
+ ncr_setsyncwide (np, cp, 0, 0xe0, 0, 0);
+ else if (tp->ppr_negotiation == 1) {
+
+ /* First ppr command has received a M REJECT.
+ * Do not change the existing wide/sync parameter
+ * values (asyn/narrow if this as the first nego;
+ * may be different if target initiates nego.).
+ */
+ tp->ppr_negotiation = 0;
+ }
+ else
+ {
+ tp->ppr_negotiation = 0;
+ ncr_setwide (np, cp, 0, 0);
+ }
+ break;
+ };
+ np->msgin [0] = M_NOOP;
+ np->msgout[0] = M_NOOP;
+ cp->nego_status = 0;
+}
+
+/*==========================================================
+**
+** ncr chip handler for MESSAGE REJECT received for
+** a WIDE or SYNCHRONOUS negotiation.
+**
+** clear the PPR negotiation flag, all future nego.
+** will be SDTR and WDTR
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_nego_rejected(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ ncr_nego_default(np, tp, cp);
+ OUTB (HS_PRT, HS_BUSY);
+}
+
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for programmed interrupts.
+**
+**
+**==========================================================
+*/
+
+void ncr_int_sir (ncb_p np)
+{
+ u_char num = INB (nc_dsps);
+ u_long dsa = INL (nc_dsa);
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+ u_char target = INB (nc_sdid) & 0x0f;
+ tcb_p tp = &np->target[target];
+ int tmp;
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("I#%d", num);
+
+ switch (num) {
+ /*
+ ** See comments in the SCRIPTS code.
+ */
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ case SIR_DUMMY_INTERRUPT:
+ goto out;
+#endif
+
+ /*
+ ** The C code is currently trying to recover from something.
+ ** Typically, user want to abort some command.
+ */
+ case SIR_SCRIPT_STOPPED:
+ case SIR_TARGET_SELECTED:
+ case SIR_ABORT_SENT:
+ case SIR_AUTO_SENSE_DONE:
+ ncr_sir_task_recovery(np, num);
+ return;
+ /*
+ ** The device didn't go to MSG OUT phase after having
+ ** been selected with ATN. We donnot want to handle
+ ** that.
+ */
+ case SIR_SEL_ATN_NO_MSG_OUT:
+ printk ("%s:%d: No MSG OUT phase after selection with ATN.\n",
+ ncr_name (np), target);
+ goto out_stuck;
+ /*
+ ** The device didn't switch to MSG IN phase after
+ ** having reseleted the initiator.
+ */
+ case SIR_RESEL_NO_MSG_IN:
+ /*
+ ** After reselection, the device sent a message that wasn't
+ ** an IDENTIFY.
+ */
+ case SIR_RESEL_NO_IDENTIFY:
+ /*
+ ** If devices reselecting without sending an IDENTIFY
+ ** message still exist, this should help.
+ ** We just assume lun=0, 1 CCB, no tag.
+ */
+ if (tp->l0p) {
+ OUTL (nc_dsa, scr_to_cpu(tp->l0p->tasktbl[0]));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, resel_go));
+ return;
+ }
+ /*
+ ** The device reselected a LUN we donnot know of.
+ */
+ case SIR_RESEL_BAD_LUN:
+ np->msgout[0] = M_RESET;
+ goto out;
+ /*
+ ** The device reselected for an untagged nexus and we
+ ** haven't any.
+ */
+ case SIR_RESEL_BAD_I_T_L:
+ np->msgout[0] = M_ABORT;
+ goto out;
+ /*
+ ** The device reselected for a tagged nexus that we donnot
+ ** have.
+ */
+ case SIR_RESEL_BAD_I_T_L_Q:
+ np->msgout[0] = M_ABORT_TAG;
+ goto out;
+ /*
+ ** The SCRIPTS let us know that the device has grabbed
+ ** our message and will abort the job.
+ */
+ case SIR_RESEL_ABORTED:
+ np->lastmsg = np->msgout[0];
+ np->msgout[0] = M_NOOP;
+ printk ("%s:%d: message %x sent on bad reselection.\n",
+ ncr_name (np), target, np->lastmsg);
+ goto out;
+ /*
+ ** The SCRIPTS let us know that a message has been
+ ** successfully sent to the device.
+ */
+ case SIR_MSG_OUT_DONE:
+ np->lastmsg = np->msgout[0];
+ np->msgout[0] = M_NOOP;
+ /* Should we really care of that */
+ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
+ if (cp) {
+ cp->xerr_status &= ~XE_PARITY_ERR;
+ if (!cp->xerr_status)
+ OUTOFFB (HF_PRT, HF_EXT_ERR);
+ }
+ }
+ goto out;
+ /*
+ ** The device didn't send a GOOD SCSI status.
+ ** We may have some work to do prior to allow
+ ** the SCRIPTS processor to continue.
+ */
+ case SIR_BAD_STATUS:
+ if (!cp)
+ goto out;
+ ncr_sir_to_redo(np, num, cp);
+ return;
+ /*
+ ** We are asked by the SCRIPTS to prepare a
+ ** REJECT message.
+ */
+ case SIR_REJECT_TO_SEND:
+ ncr_print_msg(cp, "M_REJECT to send for ", np->msgin);
+ np->msgout[0] = M_REJECT;
+ goto out;
+ /*
+ ** We have been ODD at the end of a DATA IN
+ ** transfer and the device didn't send a
+ ** IGNORE WIDE RESIDUE message.
+ ** It is a data overrun condition.
+ */
+ case SIR_SWIDE_OVERRUN:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_SWIDE_OVRUN;
+ }
+ goto out;
+ /*
+ ** We have been ODD at the end of a DATA OUT
+ ** transfer.
+ ** It is a data underrun condition.
+ */
+ case SIR_SODL_UNDERRUN:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_SODL_UNRUN;
+ }
+ goto out;
+ /*
+ ** The device wants us to tranfer more data than
+ ** expected or in the wrong direction.
+ ** The number of extra bytes is in scratcha.
+ ** It is a data overrun condition.
+ */
+ case SIR_DATA_OVERRUN:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_EXTRA_DATA;
+ cp->extra_bytes += INL (nc_scratcha);
+ }
+ goto out;
+ /*
+ ** The device switched to an illegal phase (4/5).
+ */
+ case SIR_BAD_PHASE:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_BAD_PHASE;
+ }
+ goto out;
+ /*
+ ** We received a message.
+ */
+ case SIR_MSG_RECEIVED:
+ if (!cp)
+ goto out_stuck;
+ switch (np->msgin [0]) {
+ /*
+ ** We received an extended message.
+ ** We handle MODIFY DATA POINTER, SDTR, WDTR
+ ** and reject all other extended messages.
+ */
+ case M_EXTENDED:
+ switch (np->msgin [2]) {
+ case M_X_MODIFY_DP:
+ if (DEBUG_FLAGS & DEBUG_POINTER)
+ ncr_print_msg(cp,"modify DP",np->msgin);
+ tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
+ (np->msgin[5]<<8) + (np->msgin[6]);
+ ncr_modify_dp(np, tp, cp, tmp);
+ return;
+ case M_X_SYNC_REQ:
+ ncr_sync_nego(np, tp, cp);
+ return;
+ case M_X_WIDE_REQ:
+ ncr_wide_nego(np, tp, cp);
+ return;
+ case M_X_PPR_REQ:
+ ncr_ppr_nego(np, tp, cp);
+ return;
+ default:
+ goto out_reject;
+ }
+ break;
+ /*
+ ** We received a 1/2 byte message not handled from SCRIPTS.
+ ** We are only expecting MESSAGE REJECT and IGNORE WIDE
+ ** RESIDUE messages that haven't been anticipated by
+ ** SCRIPTS on SWIDE full condition. Unanticipated IGNORE
+ ** WIDE RESIDUE messages are aliased as MODIFY DP (-1).
+ */
+ case M_IGN_RESIDUE:
+ if (DEBUG_FLAGS & DEBUG_POINTER)
+ ncr_print_msg(cp,"ign wide residue", np->msgin);
+ ncr_modify_dp(np, tp, cp, -1);
+ return;
+ case M_REJECT:
+ if (INB (HS_PRT) == HS_NEGOTIATE)
+ ncr_nego_rejected(np, tp, cp);
+ else {
+ PRINT_ADDR(cp->cmd);
+ printk ("M_REJECT received (%x:%x).\n",
+ scr_to_cpu(np->lastmsg), np->msgout[0]);
+ }
+ goto out_clrack;
+ break;
+ default:
+ goto out_reject;
+ }
+ break;
+ /*
+ ** We received an unknown message.
+ ** Ignore all MSG IN phases and reject it.
+ */
+ case SIR_MSG_WEIRD:
+ ncr_print_msg(cp, "WEIRD message received", np->msgin);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_weird));
+ return;
+ /*
+ ** Negotiation failed.
+ ** Target does not send us the reply.
+ ** Remove the HS_NEGOTIATE status.
+ */
+ case SIR_NEGO_FAILED:
+ OUTB (HS_PRT, HS_BUSY);
+ /*
+ ** Negotiation failed.
+ ** Target does not want answer message.
+ */
+ case SIR_NEGO_PROTO:
+ ncr_nego_default(np, tp, cp);
+ goto out;
+ };
+
+out:
+ OUTONB_STD ();
+ return;
+out_reject:
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ return;
+out_clrack:
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ return;
+out_stuck:
+ return;
+}
+
+
+/*==========================================================
+**
+**
+** Aquire a control block
+**
+**
+**==========================================================
+*/
+
+static ccb_p ncr_get_ccb (ncb_p np, u_char tn, u_char ln)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+ u_short tag = NO_TAG;
+ XPT_QUEHEAD *qp;
+ ccb_p cp = (ccb_p) 0;
+
+ /*
+ ** Allocate a new CCB if needed.
+ */
+ if (xpt_que_empty(&np->free_ccbq))
+ (void) ncr_alloc_ccb(np);
+
+ /*
+ ** Look for a free CCB
+ */
+ qp = xpt_remque_head(&np->free_ccbq);
+ if (!qp)
+ goto out;
+ cp = xpt_que_entry(qp, struct ccb, link_ccbq);
+
+ /*
+ ** If the LCB is not yet available and we already
+ ** have queued a CCB for a LUN without LCB,
+ ** give up. Otherwise all is fine. :-)
+ */
+ if (!lp) {
+ if (xpt_que_empty(&np->b0_ccbq))
+ xpt_insque_head(&cp->link_ccbq, &np->b0_ccbq);
+ else
+ goto out_free;
+ } else {
+ /*
+ ** Tune tag mode if asked by user.
+ */
+ if (lp->queuedepth != lp->numtags) {
+ ncr_setup_tags(np, tn, ln);
+ }
+
+ /*
+ ** Get a tag for this nexus if required.
+ ** Keep from using more tags than we can handle.
+ */
+ if (lp->usetags) {
+ if (lp->busyccbs < lp->maxnxs) {
+ tag = lp->cb_tags[lp->ia_tag];
+ ++lp->ia_tag;
+ if (lp->ia_tag == MAX_TAGS)
+ lp->ia_tag = 0;
+ cp->tags_si = lp->tags_si;
+ ++lp->tags_sum[cp->tags_si];
+ }
+ else
+ goto out_free;
+ }
+
+ /*
+ ** Put the CCB in the LUN wait queue and
+ ** count it as busy.
+ */
+ xpt_insque_tail(&cp->link_ccbq, &lp->wait_ccbq);
+ ++lp->busyccbs;
+ }
+
+ /*
+ ** Remember all informations needed to free this CCB.
+ */
+ cp->to_abort = 0;
+ cp->tag = tag;
+ cp->target = tn;
+ cp->lun = ln;
+
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_LUN(np, tn, ln);
+ printk ("ccb @%p using tag %d.\n", cp, tag);
+ }
+
+out:
+ return cp;
+out_free:
+ xpt_insque_head(&cp->link_ccbq, &np->free_ccbq);
+ return (ccb_p) 0;
+}
+
+/*==========================================================
+**
+**
+** Release one control block
+**
+**
+**==========================================================
+*/
+
+static void ncr_free_ccb (ncb_p np, ccb_p cp)
+{
+ tcb_p tp = &np->target[cp->target];
+ lcb_p lp = ncr_lp(np, tp, cp->lun);
+
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_LUN(np, cp->target, cp->lun);
+ printk ("ccb @%p freeing tag %d.\n", cp, cp->tag);
+ }
+
+ /*
+ ** If lun control block available, make available
+ ** the task slot and the tag if any.
+ ** Decrement counters.
+ */
+ if (lp) {
+ if (cp->tag != NO_TAG) {
+ lp->cb_tags[lp->if_tag++] = cp->tag;
+ if (lp->if_tag == MAX_TAGS)
+ lp->if_tag = 0;
+ --lp->tags_sum[cp->tags_si];
+ lp->tasktbl[cp->tag] = cpu_to_scr(np->p_bad_i_t_l_q);
+ } else {
+ lp->tasktbl[0] = cpu_to_scr(np->p_bad_i_t_l);
+ }
+ --lp->busyccbs;
+ if (cp->queued) {
+ --lp->queuedccbs;
+ }
+ }
+
+ /*
+ ** Make this CCB available.
+ */
+ xpt_remque(&cp->link_ccbq);
+ xpt_insque_head(&cp->link_ccbq, &np->free_ccbq);
+ cp -> host_status = HS_IDLE;
+ cp -> queued = 0;
+}
+
+/*------------------------------------------------------------------------
+** Allocate a CCB and initialize its fixed part.
+**------------------------------------------------------------------------
+**------------------------------------------------------------------------
+*/
+static ccb_p ncr_alloc_ccb(ncb_p np)
+{
+ ccb_p cp = 0;
+ int hcode;
+
+ /*
+ ** Allocate memory for this CCB.
+ */
+ cp = m_calloc_dma(sizeof(struct ccb), "CCB");
+ if (!cp)
+ return 0;
+
+ /*
+ ** Count it and initialyze it.
+ */
+ np->actccbs++;
+
+ /*
+ ** Remember virtual and bus address of this ccb.
+ */
+ cp->p_ccb = vtobus(cp);
+
+ /*
+ ** Insert this ccb into the hashed list.
+ */
+ hcode = CCB_HASH_CODE(cp->p_ccb);
+ cp->link_ccbh = np->ccbh[hcode];
+ np->ccbh[hcode] = cp;
+
+ /*
+ ** Initialyze the start and restart actions.
+ */
+ cp->phys.header.go.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ cp->phys.header.go.restart = cpu_to_scr(NCB_SCRIPTH_PHYS(np,bad_i_t_l));
+
+ /*
+ ** Initilialyze some other fields.
+ */
+ cp->phys.smsg_ext.addr = cpu_to_scr(NCB_PHYS(np, msgin[2]));
+
+ /*
+ ** Chain into wakeup list and free ccb queue.
+ */
+ cp->link_ccb = np->ccbc;
+ np->ccbc = cp;
+
+ xpt_insque_head(&cp->link_ccbq, &np->free_ccbq);
+
+ return cp;
+}
+
+/*------------------------------------------------------------------------
+** Look up a CCB from a DSA value.
+**------------------------------------------------------------------------
+**------------------------------------------------------------------------
+*/
+static ccb_p ncr_ccb_from_dsa(ncb_p np, u_long dsa)
+{
+ int hcode;
+ ccb_p cp;
+
+ hcode = CCB_HASH_CODE(dsa);
+ cp = np->ccbh[hcode];
+ while (cp) {
+ if (cp->p_ccb == dsa)
+ break;
+ cp = cp->link_ccbh;
+ }
+
+ return cp;
+}
+
+/*==========================================================
+**
+**
+** Allocation of resources for Targets/Luns/Tags.
+**
+**
+**==========================================================
+*/
+
+
+/*------------------------------------------------------------------------
+** Target control block initialisation.
+**------------------------------------------------------------------------
+** This data structure is fully initialized after a SCSI command
+** has been successfully completed for this target.
+**------------------------------------------------------------------------
+*/
+static void ncr_init_tcb (ncb_p np, u_char tn)
+{
+ /*
+ ** Check some alignments required by the chip.
+ */
+ assert (( (offsetof(struct ncr_reg, nc_sxfer) ^
+ offsetof(struct tcb , sval )) &3) == 0);
+ assert (( (offsetof(struct ncr_reg, nc_scntl3) ^
+ offsetof(struct tcb , wval )) &3) == 0);
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)){
+ assert (( (offsetof(struct ncr_reg, nc_scntl4) ^
+ offsetof(struct tcb , uval )) &3) == 0);
+ }
+}
+
+/*------------------------------------------------------------------------
+** Lun control block allocation and initialization.
+**------------------------------------------------------------------------
+** This data structure is allocated and initialized after a SCSI
+** command has been successfully completed for this target/lun.
+**------------------------------------------------------------------------
+*/
+static lcb_p ncr_alloc_lcb (ncb_p np, u_char tn, u_char ln)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+
+ /*
+ ** Already done, return.
+ */
+ if (lp)
+ return lp;
+
+ /*
+ ** Initialize the target control block if not yet.
+ */
+ ncr_init_tcb(np, tn);
+
+ /*
+ ** Allocate the lcb bus address array.
+ ** Compute the bus address of this table.
+ */
+ if (ln && !tp->luntbl) {
+ int i;
+
+ tp->luntbl = m_calloc_dma(256, "LUNTBL");
+ if (!tp->luntbl)
+ goto fail;
+ for (i = 0 ; i < 64 ; i++)
+ tp->luntbl[i] = cpu_to_scr(NCB_PHYS(np, resel_badlun));
+ tp->b_luntbl = cpu_to_scr(vtobus(tp->luntbl));
+ }
+
+ /*
+ ** Allocate the table of pointers for LUN(s) > 0, if needed.
+ */
+ if (ln && !tp->lmp) {
+ tp->lmp = m_calloc(MAX_LUN * sizeof(lcb_p), "LMP");
+ if (!tp->lmp)
+ goto fail;
+ }
+
+ /*
+ ** Allocate the lcb.
+ ** Make it available to the chip.
+ */
+ lp = m_calloc_dma(sizeof(struct lcb), "LCB");
+ if (!lp)
+ goto fail;
+ if (ln) {
+ tp->lmp[ln] = lp;
+ tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
+ }
+ else {
+ tp->l0p = lp;
+ tp->b_lun0 = cpu_to_scr(vtobus(lp));
+ }
+
+ /*
+ ** Initialize the CCB queue headers.
+ */
+ xpt_que_init(&lp->busy_ccbq);
+ xpt_que_init(&lp->wait_ccbq);
+
+ /*
+ ** Set max CCBs to 1 and use the default task array
+ ** by default.
+ */
+ lp->maxnxs = 1;
+ lp->tasktbl = &lp->tasktbl_0;
+ lp->b_tasktbl = cpu_to_scr(vtobus(lp->tasktbl));
+ lp->tasktbl[0] = cpu_to_scr(np->p_notask);
+ lp->resel_task = cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag));
+
+ /*
+ ** Initialize command queuing control.
+ */
+ lp->busyccbs = 1;
+ lp->queuedccbs = 1;
+ lp->queuedepth = 1;
+fail:
+ return lp;
+}
+
+
+/*------------------------------------------------------------------------
+** Lun control block setup on INQUIRY data received.
+**------------------------------------------------------------------------
+** We only support WIDE, SYNC for targets and CMDQ for logical units.
+** This setup is done on each INQUIRY since we are expecting user
+** will play with CHANGE DEFINITION commands. :-)
+**------------------------------------------------------------------------
+*/
+static lcb_p ncr_setup_lcb (ncb_p np, u_char tn, u_char ln, u_char *inq_data)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+ u_char inq_byte7;
+ int i;
+
+ /*
+ ** If no lcb, try to allocate it.
+ */
+ if (!lp && !(lp = ncr_alloc_lcb(np, tn, ln)))
+ goto fail;
+
+#if 0 /* No more used. Left here as provision */
+ /*
+ ** Get device quirks.
+ */
+ tp->quirks = 0;
+ if (tp->quirks && bootverbose) {
+ PRINT_LUN(np, tn, ln);
+ printk ("quirks=%x.\n", tp->quirks);
+ }
+#endif
+
+ /*
+ ** Evaluate trustable target/unit capabilities.
+ ** We only believe device version >= SCSI-2 that
+ ** use appropriate response data format (2).
+ ** But it seems that some CCS devices also
+ ** support SYNC and I donnot want to frustrate
+ ** anybody. ;-)
+ */
+ inq_byte7 = 0;
+ if ((inq_data[2] & 0x7) >= 2 && (inq_data[3] & 0xf) == 2)
+ inq_byte7 = inq_data[7];
+ else if ((inq_data[2] & 0x7) == 1 && (inq_data[3] & 0xf) == 1)
+ inq_byte7 = INQ7_SYNC;
+
+ /*
+ ** Throw away announced LUN capabilities if we are told
+ ** that there is no real device supported by the logical unit.
+ */
+ if ((inq_data[0] & 0xe0) > 0x20 || (inq_data[0] & 0x1f) == 0x1f)
+ inq_byte7 &= (INQ7_SYNC | INQ7_WIDE16);
+
+ /*
+ ** If user is wanting SYNC, force this feature.
+ */
+ if (driver_setup.force_sync_nego)
+ inq_byte7 |= INQ7_SYNC;
+
+ /*
+ ** Prepare negotiation if SIP capabilities have changed.
+ */
+ tp->inq_done = 1;
+ if ((inq_byte7 ^ tp->inq_byte7) & (INQ7_SYNC | INQ7_WIDE16)) {
+ tp->inq_byte7 = inq_byte7;
+ ncr_negotiate(np, tp);
+ }
+
+ /*
+ ** If unit supports tagged commands, allocate and
+ ** initialyze the task table if not yet.
+ */
+ if ((inq_byte7 & INQ7_QUEUE) && lp->tasktbl == &lp->tasktbl_0) {
+ lp->tasktbl = m_calloc_dma(MAX_TASKS*4, "TASKTBL");
+ if (!lp->tasktbl) {
+ lp->tasktbl = &lp->tasktbl_0;
+ goto fail;
+ }
+ lp->b_tasktbl = cpu_to_scr(vtobus(lp->tasktbl));
+ for (i = 0 ; i < MAX_TASKS ; i++)
+ lp->tasktbl[i] = cpu_to_scr(np->p_notask);
+
+ lp->cb_tags = m_calloc(MAX_TAGS, "CB_TAGS");
+ if (!lp->cb_tags)
+ goto fail;
+ for (i = 0 ; i < MAX_TAGS ; i++)
+ lp->cb_tags[i] = i;
+
+ lp->maxnxs = MAX_TAGS;
+ lp->tags_stime = ktime_get(3*HZ);
+ }
+
+ /*
+ ** Adjust tagged queueing status if needed.
+ */
+ if ((inq_byte7 ^ lp->inq_byte7) & INQ7_QUEUE) {
+ lp->inq_byte7 = inq_byte7;
+ lp->numtags = lp->maxtags;
+ ncr_setup_tags (np, tn, ln);
+ }
+
+fail:
+ return lp;
+}
+
+/*==========================================================
+**
+**
+** Build Scatter Gather Block
+**
+**
+**==========================================================
+**
+** The transfer area may be scattered among
+** several non adjacent physical pages.
+**
+** We may use MAX_SCATTER blocks.
+**
+**----------------------------------------------------------
+*/
+
+/*
+** We try to reduce the number of interrupts caused
+** by unexpected phase changes due to disconnects.
+** A typical harddisk may disconnect before ANY block.
+** If we wanted to avoid unexpected phase changes at all
+** we had to use a break point every 512 bytes.
+** Of course the number of scatter/gather blocks is
+** limited.
+** Under Linux, the scatter/gatter blocks are provided by
+** the generic driver. We just have to copy addresses and
+** sizes to the data segment array.
+*/
+
+/*
+** For 64 bit systems, we use the 8 upper bits of the size field
+** to provide bus address bits 32-39 to the SCRIPTS processor.
+** This allows the 895A and 896 to address up to 1 TB of memory.
+** For 32 bit chips on 64 bit systems, we must be provided with
+** memory addresses that fit into the first 32 bit bus address
+** range and so, this does not matter and we expect an error from
+** the chip if this ever happen.
+**
+** We use a separate function for the case Linux does not provide
+** a scatter list in order to allow better code optimization
+** for the case we have a scatter list (BTW, for now this just wastes
+** about 40 bytes of code for x86, but my guess is that the scatter
+** code will get more complex later).
+*/
+
+#ifdef SCSI_NCR_USE_64BIT_DAC
+#define SCATTER_ONE(data, badd, len) \
+ (data)->addr = cpu_to_scr(badd); \
+ (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len);
+#else
+#define SCATTER_ONE(data, badd, len) \
+ (data)->addr = cpu_to_scr(badd); \
+ (data)->size = cpu_to_scr(len);
+#endif
+
+#define CROSS_16MB(p, n) (((((u_long) p) + n - 1) ^ ((u_long) p)) & ~0xffffff)
+
+static int ncr_scatter_no_sglist(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
+{
+ struct scr_tblmove *data = &cp->phys.data[MAX_SCATTER-1];
+ int segment;
+
+ cp->data_len = cmd->request_bufflen;
+
+ if (cmd->request_bufflen) {
+ u_long baddr = map_scsi_single_data(np, cmd);
+
+ SCATTER_ONE(data, baddr, cmd->request_bufflen);
+ if (CROSS_16MB(baddr, cmd->request_bufflen)) {
+ cp->host_flags |= HF_PM_TO_C;
+#ifdef DEBUG_896R1
+printk("He! we are crossing a 16 MB boundary (0x%lx, 0x%x)\n",
+ baddr, cmd->request_bufflen);
+#endif
+ }
+ segment = 1;
+ }
+ else
+ segment = 0;
+
+ return segment;
+}
+
+/*
+** DEL 472 - 53C896 Rev 1 - Part Number 609-0393055 - ITEM 5.
+**
+** We disable data phase mismatch handling from SCRIPTS for data
+** transfers that contains scatter/gather entries that cross
+** a 16 MB boundary.
+** We use a different scatter function for 896 rev. 1 that needs
+** such a work-around. Doing so, we do not affect performance for
+** other chips.
+** This problem should not be triggered for disk IOs under Linux,
+** since such IOs are performed using pages and buffers that are
+** nicely power-of-two sized and aligned. But, since this may change
+** at any time, a work-around was required.
+*/
+static int ncr_scatter_896R1(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
+{
+ int segn;
+ int use_sg = (int) cmd->use_sg;
+
+ cp->data_len = 0;
+
+ if (!use_sg)
+ segn = ncr_scatter_no_sglist(np, cp, cmd);
+ else if (use_sg > MAX_SCATTER)
+ segn = -1;
+ else {
+ struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
+ struct scr_tblmove *data;
+
+ use_sg = map_scsi_sg_data(np, cmd);
+ data = &cp->phys.data[MAX_SCATTER - use_sg];
+
+ for (segn = 0; segn < use_sg; segn++) {
+ u_long baddr = scsi_sg_dma_address(&scatter[segn]);
+ unsigned int len = scsi_sg_dma_len(&scatter[segn]);
+
+ SCATTER_ONE(&data[segn],
+ baddr,
+ len);
+ if (CROSS_16MB(baddr, scatter[segn].length)) {
+ cp->host_flags |= HF_PM_TO_C;
+#ifdef DEBUG_896R1
+printk("He! we are crossing a 16 MB boundary (0x%lx, 0x%x)\n",
+ baddr, scatter[segn].length);
+#endif
+ }
+ cp->data_len += len;
+ }
+ }
+
+ return segn;
+}
+
+static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
+{
+ int segment;
+ int use_sg = (int) cmd->use_sg;
+
+ cp->data_len = 0;
+
+ if (!use_sg)
+ segment = ncr_scatter_no_sglist(np, cp, cmd);
+ else if (use_sg > MAX_SCATTER)
+ segment = -1;
+ else {
+ struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
+ struct scr_tblmove *data;
+
+ use_sg = map_scsi_sg_data(np, cmd);
+ data = &cp->phys.data[MAX_SCATTER - use_sg];
+
+ for (segment = 0; segment < use_sg; segment++) {
+ u_long baddr = scsi_sg_dma_address(&scatter[segment]);
+ unsigned int len = scsi_sg_dma_len(&scatter[segment]);
+
+ SCATTER_ONE(&data[segment],
+ baddr,
+ len);
+ cp->data_len += len;
+ }
+ }
+
+ return segment;
+}
+
+/*==========================================================
+**
+**
+** Test the pci bus snoop logic :-(
+**
+** Has to be called with interrupts disabled.
+**
+**
+**==========================================================
+*/
+
+#ifndef SCSI_NCR_IOMAPPED
+static int __init ncr_regtest (struct ncb* np)
+{
+ register volatile u_int32 data;
+ /*
+ ** ncr registers may NOT be cached.
+ ** write 0xffffffff to a read only register area,
+ ** and try to read it back.
+ */
+ data = 0xffffffff;
+ OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data);
+ data = INL_OFF(offsetof(struct ncr_reg, nc_dstat));
+#if 1
+ if (data == 0xffffffff) {
+#else
+ if ((data & 0xe2f0fffd) != 0x02000080) {
+#endif
+ printk ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
+ (unsigned) data);
+ return (0x10);
+ };
+ return (0);
+}
+#endif
+
+static int __init ncr_snooptest (struct ncb* np)
+{
+ u_int32 ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc;
+ int i, err=0;
+#ifndef SCSI_NCR_IOMAPPED
+ if (np->reg) {
+ err |= ncr_regtest (np);
+ if (err) return (err);
+ }
+#endif
+ /*
+ ** init
+ */
+ pc = NCB_SCRIPTH0_PHYS (np, snooptest);
+ host_wr = 1;
+ ncr_wr = 2;
+ /*
+ ** Set memory and register.
+ */
+ np->ncr_cache = cpu_to_scr(host_wr);
+ OUTL (nc_temp, ncr_wr);
+ /*
+ ** Start script (exchange values)
+ */
+ OUTL (nc_dsa, np->p_ncb);
+ OUTL_DSP (pc);
+ /*
+ ** Wait 'til done (with timeout)
+ */
+ for (i=0; i<NCR_SNOOP_TIMEOUT; i++)
+ if (INB(nc_istat) & (INTF|SIP|DIP))
+ break;
+ /*
+ ** Save termination position.
+ */
+ pc = INL (nc_dsp);
+ /*
+ ** Read memory and register.
+ */
+ host_rd = scr_to_cpu(np->ncr_cache);
+ ncr_rd = INL (nc_scratcha);
+ ncr_bk = INL (nc_temp);
+
+ /*
+ ** check for timeout
+ */
+ if (i>=NCR_SNOOP_TIMEOUT) {
+ printk ("CACHE TEST FAILED: timeout.\n");
+ return (0x20);
+ };
+ /*
+ ** Check termination position.
+ */
+ if (pc != NCB_SCRIPTH0_PHYS (np, snoopend)+8) {
+ printk ("CACHE TEST FAILED: script execution failed.\n");
+ printk ("start=%08lx, pc=%08lx, end=%08lx\n",
+ (u_long) NCB_SCRIPTH0_PHYS (np, snooptest), (u_long) pc,
+ (u_long) NCB_SCRIPTH0_PHYS (np, snoopend) +8);
+ return (0x40);
+ };
+ /*
+ ** Show results.
+ */
+ if (host_wr != ncr_rd) {
+ printk ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n",
+ (int) host_wr, (int) ncr_rd);
+ err |= 1;
+ };
+ if (host_rd != ncr_wr) {
+ printk ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n",
+ (int) ncr_wr, (int) host_rd);
+ err |= 2;
+ };
+ if (ncr_bk != ncr_wr) {
+ printk ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n",
+ (int) ncr_wr, (int) ncr_bk);
+ err |= 4;
+ };
+ return (err);
+}
+
+/*==========================================================
+**
+** Determine the ncr's clock frequency.
+** This is essential for the negotiation
+** of the synchronous transfer rate.
+**
+**==========================================================
+**
+** Note: we have to return the correct value.
+** THERE IS NO SAFE DEFAULT VALUE.
+**
+** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
+** 53C860 and 53C875 rev. 1 support fast20 transfers but
+** do not have a clock doubler and so are provided with a
+** 80 MHz clock. All other fast20 boards incorporate a doubler
+** and so should be delivered with a 40 MHz clock.
+** The recent fast40 chips (895/896/895A) and the
+** fast80 chip (C1010) use a 40 Mhz base clock
+** and provide a clock quadrupler (160 Mhz). The code below
+** tries to deal as cleverly as possible with all this stuff.
+**
+**----------------------------------------------------------
+*/
+
+/*
+ * Select NCR SCSI clock frequency
+ */
+static void ncr_selectclock(ncb_p np, u_char scntl3)
+{
+ if (np->multiplier < 2) {
+ OUTB(nc_scntl3, scntl3);
+ return;
+ }
+
+ if (bootverbose >= 2)
+ printk ("%s: enabling clock multiplier\n", ncr_name(np));
+
+ OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */
+
+ if ( (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) &&
+ (np->multiplier > 2)) {
+ int i = 20; /* Poll bit 5 of stest4 for quadrupler */
+ while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
+ UDELAY (20);
+ if (!i)
+ printk("%s: the chip cannot lock the frequency\n",
+ ncr_name(np));
+
+ } else /* Wait 120 micro-seconds for multiplier*/
+ UDELAY (120);
+
+ OUTB(nc_stest3, HSC); /* Halt the scsi clock */
+ OUTB(nc_scntl3, scntl3);
+ OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
+ OUTB(nc_stest3, 0x00); /* Restart scsi clock */
+}
+
+
+/*
+ * calculate NCR SCSI clock frequency (in KHz)
+ */
+static unsigned __init ncrgetfreq (ncb_p np, int gen)
+{
+ unsigned int ms = 0;
+ unsigned int f;
+ int count;
+
+ /*
+ * Measure GEN timer delay in order
+ * to calculate SCSI clock frequency
+ *
+ * This code will never execute too
+ * many loop iterations (if DELAY is
+ * reasonably correct). It could get
+ * too low a delay (too high a freq.)
+ * if the CPU is slow executing the
+ * loop for some reason (an NMI, for
+ * example). For this reason we will
+ * if multiple measurements are to be
+ * performed trust the higher delay
+ * (lower frequency returned).
+ */
+ OUTW (nc_sien , 0x0);/* mask all scsi interrupts */
+ /* enable general purpose timer */
+ (void) INW (nc_sist); /* clear pending scsi interrupt */
+ OUTB (nc_dien , 0); /* mask all dma interrupts */
+ (void) INW (nc_sist); /* another one, just to be sure :) */
+ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
+ /* Temporary fix for udelay issue with Alpha
+ platform */
+ while (!(INW(nc_sist) & GEN) && ms++ < 100000) {
+ /* count 1ms */
+ for (count = 0; count < 10; count++)
+ UDELAY (100);
+ }
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ /*
+ * set prescaler to divide by whatever 0 means
+ * 0 ought to choose divide by 2, but appears
+ * to set divide by 3.5 mode in my 53c810 ...
+ */
+ OUTB (nc_scntl3, 0);
+
+ /*
+ * adjust for prescaler, and convert into KHz
+ * scale values derived empirically. C1010 uses
+ * different dividers
+ */
+#if 0
+ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010)
+ f = ms ? ((1 << gen) * 2866 ) / ms : 0;
+ else
+#endif
+ f = ms ? ((1 << gen) * 4340) / ms : 0;
+
+ if (bootverbose >= 2)
+ printk ("%s: Delay (GEN=%d): %u msec, %u KHz\n",
+ ncr_name(np), gen, ms, f);
+
+ return f;
+}
+
+static unsigned __init ncr_getfreq (ncb_p np)
+{
+ u_int f1, f2;
+ int gen = 11;
+
+ (void) ncrgetfreq (np, gen); /* throw away first result */
+ f1 = ncrgetfreq (np, gen);
+ f2 = ncrgetfreq (np, gen);
+ if (f1 > f2) f1 = f2; /* trust lower result */
+ return f1;
+}
+
+/*
+ * Get/probe NCR SCSI clock frequency
+ */
+static void __init ncr_getclock (ncb_p np, int mult)
+{
+ unsigned char scntl3 = np->sv_scntl3;
+ unsigned char stest1 = np->sv_stest1;
+ unsigned f1;
+
+ np->multiplier = 1;
+ f1 = 40000;
+
+ /*
+ ** True with 875/895/896/895A with clock multiplier selected
+ */
+ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier found\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+
+ /*
+ ** If multiplier not found but a C1010, assume a mult of 4.
+ ** If multiplier not found or scntl3 not 7,5,3,
+ ** reset chip and get frequency from general purpose timer.
+ ** Otherwise trust scntl3 BIOS setting.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ f1=40000;
+ np->multiplier = mult;
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier assumed\n", ncr_name(np));
+ }
+ else if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
+ OUTB (nc_stest1, 0); /* make sure doubler is OFF */
+ f1 = ncr_getfreq (np);
+
+ if (bootverbose)
+ printk ("%s: NCR clock is %uKHz\n", ncr_name(np), f1);
+
+ if (f1 < 55000) f1 = 40000;
+ else f1 = 80000;
+
+ /*
+ ** Suggest to also check the PCI clock frequency
+ ** to make sure our frequency calculation algorithm
+ ** is not too biased.
+ */
+ if (np->features & FE_66MHZ) {
+ np->pciclock_min = (66000*55+80-1)/80;
+ np->pciclock_max = (66000*55)/40;
+ }
+ else {
+ np->pciclock_min = (33000*55+80-1)/80;
+ np->pciclock_max = (33000*55)/40;
+ }
+
+ if (f1 == 40000 && mult > 1) {
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier assumed\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+ } else {
+ if ((scntl3 & 7) == 3) f1 = 40000;
+ else if ((scntl3 & 7) == 5) f1 = 80000;
+ else f1 = 160000;
+
+ f1 /= np->multiplier;
+ }
+
+ /*
+ ** Compute controller synchronous parameters.
+ */
+ f1 *= np->multiplier;
+ np->clock_khz = f1;
+}
+
+/*
+ * Get/probe PCI clock frequency
+ */
+static u_int __init ncr_getpciclock (ncb_p np)
+{
+ static u_int f;
+
+ OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
+ f = ncr_getfreq (np);
+ OUTB (nc_stest1, 0);
+
+ return f;
+}
+
+/*===================== LINUX ENTRY POINTS SECTION ==========================*/
+
+#ifndef uchar
+#define uchar unsigned char
+#endif
+
+#ifndef ushort
+#define ushort unsigned short
+#endif
+
+#ifndef ulong
+#define ulong unsigned long
+#endif
+
+/* ---------------------------------------------------------------------
+**
+** Driver setup from the boot command line
+**
+** ---------------------------------------------------------------------
+*/
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+#define OPT_TAGS 1
+#define OPT_MASTER_PARITY 2
+#define OPT_SCSI_PARITY 3
+#define OPT_DISCONNECTION 4
+#define OPT_SPECIAL_FEATURES 5
+#define OPT_ULTRA_SCSI 6
+#define OPT_FORCE_SYNC_NEGO 7
+#define OPT_REVERSE_PROBE 8
+#define OPT_DEFAULT_SYNC 9
+#define OPT_VERBOSE 10
+#define OPT_DEBUG 11
+#define OPT_BURST_MAX 12
+#define OPT_LED_PIN 13
+#define OPT_MAX_WIDE 14
+#define OPT_SETTLE_DELAY 15
+#define OPT_DIFF_SUPPORT 16
+#define OPT_IRQM 17
+#define OPT_PCI_FIX_UP 18
+#define OPT_BUS_CHECK 19
+#define OPT_OPTIMIZE 20
+#define OPT_RECOVERY 21
+#define OPT_SAFE_SETUP 22
+#define OPT_USE_NVRAM 23
+#define OPT_EXCLUDE 24
+#define OPT_HOST_ID 25
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+#define OPT_IARB 26
+#endif
+
+static char setup_token[] __initdata =
+ "tags:" "mpar:"
+ "spar:" "disc:"
+ "specf:" "ultra:"
+ "fsn:" "revprob:"
+ "sync:" "verb:"
+ "debug:" "burst:"
+ "led:" "wide:"
+ "settle:" "diff:"
+ "irqm:" "pcifix:"
+ "buschk:" "optim:"
+ "recovery:"
+ "safe:" "nvram:"
+ "excl:" "hostid:"
+#ifdef SCSI_NCR_IARB_SUPPORT
+ "iarb:"
+#endif
+ ; /* DONNOT REMOVE THIS ';' */
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+static int __init get_setup_token(char *p)
+{
+ char *cur = setup_token;
+ char *pc;
+ int i = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ ++pc;
+ ++i;
+ if (!strncmp(p, cur, pc - cur))
+ return i;
+ cur = pc;
+ }
+ return 0;
+}
+
+
+int __init sym53c8xx_setup(char *str)
+{
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+ char *cur = str;
+ char *pc, *pv;
+ unsigned long val;
+ int i, c;
+ int xi = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ char *pe;
+
+ val = 0;
+ pv = pc;
+ c = *++pv;
+
+ if (c == 'n')
+ val = 0;
+ else if (c == 'y')
+ val = 1;
+ else
+ val = (int) simple_strtoul(pv, &pe, 0);
+
+ switch (get_setup_token(cur)) {
+ case OPT_TAGS:
+ driver_setup.default_tags = val;
+ if (pe && *pe == '/') {
+ i = 0;
+ while (*pe && *pe != ARG_SEP &&
+ i < sizeof(driver_setup.tag_ctrl)-1) {
+ driver_setup.tag_ctrl[i++] = *pe++;
+ }
+ driver_setup.tag_ctrl[i] = '\0';
+ }
+ break;
+ case OPT_MASTER_PARITY:
+ driver_setup.master_parity = val;
+ break;
+ case OPT_SCSI_PARITY:
+ driver_setup.scsi_parity = val;
+ break;
+ case OPT_DISCONNECTION:
+ driver_setup.disconnection = val;
+ break;
+ case OPT_SPECIAL_FEATURES:
+ driver_setup.special_features = val;
+ break;
+ case OPT_ULTRA_SCSI:
+ driver_setup.ultra_scsi = val;
+ break;
+ case OPT_FORCE_SYNC_NEGO:
+ driver_setup.force_sync_nego = val;
+ break;
+ case OPT_REVERSE_PROBE:
+ driver_setup.reverse_probe = val;
+ break;
+ case OPT_DEFAULT_SYNC:
+ driver_setup.default_sync = val;
+ break;
+ case OPT_VERBOSE:
+ driver_setup.verbose = val;
+ break;
+ case OPT_DEBUG:
+ driver_setup.debug = val;
+ break;
+ case OPT_BURST_MAX:
+ driver_setup.burst_max = val;
+ break;
+ case OPT_LED_PIN:
+ driver_setup.led_pin = val;
+ break;
+ case OPT_MAX_WIDE:
+ driver_setup.max_wide = val? 1:0;
+ break;
+ case OPT_SETTLE_DELAY:
+ driver_setup.settle_delay = val;
+ break;
+ case OPT_DIFF_SUPPORT:
+ driver_setup.diff_support = val;
+ break;
+ case OPT_IRQM:
+ driver_setup.irqm = val;
+ break;
+ case OPT_PCI_FIX_UP:
+ driver_setup.pci_fix_up = val;
+ break;
+ case OPT_BUS_CHECK:
+ driver_setup.bus_check = val;
+ break;
+ case OPT_OPTIMIZE:
+ driver_setup.optimize = val;
+ break;
+ case OPT_RECOVERY:
+ driver_setup.recovery = val;
+ break;
+ case OPT_USE_NVRAM:
+ driver_setup.use_nvram = val;
+ break;
+ case OPT_SAFE_SETUP:
+ memcpy(&driver_setup, &driver_safe_setup,
+ sizeof(driver_setup));
+ break;
+ case OPT_EXCLUDE:
+ if (xi < SCSI_NCR_MAX_EXCLUDES)
+ driver_setup.excludes[xi++] = val;
+ break;
+ case OPT_HOST_ID:
+ driver_setup.host_id = val;
+ break;
+#ifdef SCSI_NCR_IARB_SUPPORT
+ case OPT_IARB:
+ driver_setup.iarb = val;
+ break;
+#endif
+ default:
+ printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
+ break;
+ }
+
+ if ((cur = strchr(cur, ARG_SEP)) != NULL)
+ ++cur;
+ }
+#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
+ return 1;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,13)
+#ifndef MODULE
+__setup("sym53c8xx=", sym53c8xx_setup);
+#endif
+#endif
+
+static int
+sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device);
+
+/*
+** Linux entry point for SYM53C8XX devices detection routine.
+**
+** Called by the middle-level scsi drivers at initialization time,
+** or at module installation.
+**
+** Read the PCI configuration and try to attach each
+** detected NCR board.
+**
+** If NVRAM is present, try to attach boards according to
+** the used defined boot order.
+**
+** Returns the number of boards successfully attached.
+*/
+
+static void __init ncr_print_driver_setup(void)
+{
+#define YesNo(y) y ? 'y' : 'n'
+ printk (NAME53C8XX ": setup=disc:%c,specf:%d,ultra:%d,tags:%d,sync:%d,"
+ "burst:%d,wide:%c,diff:%d,revprob:%c,buschk:0x%x\n",
+ YesNo(driver_setup.disconnection),
+ driver_setup.special_features,
+ driver_setup.ultra_scsi,
+ driver_setup.default_tags,
+ driver_setup.default_sync,
+ driver_setup.burst_max,
+ YesNo(driver_setup.max_wide),
+ driver_setup.diff_support,
+ YesNo(driver_setup.reverse_probe),
+ driver_setup.bus_check);
+
+ printk (NAME53C8XX ": setup=mpar:%c,spar:%c,fsn=%c,verb:%d,debug:0x%x,"
+ "led:%c,settle:%d,irqm:0x%x,nvram:0x%x,pcifix:0x%x\n",
+ YesNo(driver_setup.master_parity),
+ YesNo(driver_setup.scsi_parity),
+ YesNo(driver_setup.force_sync_nego),
+ driver_setup.verbose,
+ driver_setup.debug,
+ YesNo(driver_setup.led_pin),
+ driver_setup.settle_delay,
+ driver_setup.irqm,
+ driver_setup.use_nvram,
+ driver_setup.pci_fix_up);
+#undef YesNo
+}
+
+/*===================================================================
+** SYM53C8XX devices description table and chip ids list.
+**===================================================================
+*/
+
+static ncr_chip ncr_chip_table[] __initdata = SCSI_NCR_CHIP_TABLE;
+static ushort ncr_chip_ids[] __initdata = SCSI_NCR_CHIP_IDS;
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+/*===================================================================
+** Detect all NCR PQS/PDS boards and keep track of their bus nr.
+**
+** The NCR PQS or PDS card is constructed as a DEC bridge
+** behind which sit a proprietary NCR memory controller and
+** four or two 53c875s as separate devices. In its usual mode
+** of operation, the 875s are slaved to the memory controller
+** for all transfers. We can tell if an 875 is part of a
+** PQS/PDS or not since if it is, it will be on the same bus
+** as the memory controller. To operate with the Linux
+** driver, the memory controller is disabled and the 875s
+** freed to function independently. The only wrinkle is that
+** the preset SCSI ID (which may be zero) must be read in from
+** a special configuration space register of the 875
+**===================================================================
+*/
+#define SCSI_NCR_MAX_PQS_BUS 16
+static int pqs_bus[SCSI_NCR_MAX_PQS_BUS] __initdata = { 0 };
+
+static void __init ncr_detect_pqs_pds(void)
+{
+ short index;
+ pcidev_t dev = PCIDEV_NULL;
+
+ for(index=0; index < SCSI_NCR_MAX_PQS_BUS; index++) {
+ u_char tmp;
+
+ dev = pci_find_device(0x101a, 0x0009, dev);
+ if (dev == PCIDEV_NULL) {
+ pqs_bus[index] = -1;
+ break;
+ }
+ printk(KERN_INFO NAME53C8XX ": NCR PQS/PDS memory controller detected on bus %d\n", PciBusNumber(dev));
+ pci_read_config_byte(dev, 0x44, &tmp);
+ /* bit 1: allow individual 875 configuration */
+ tmp |= 0x2;
+ pci_write_config_byte(dev, 0x44, tmp);
+ pci_read_config_byte(dev, 0x45, &tmp);
+ /* bit 2: drive individual 875 interrupts to the bus */
+ tmp |= 0x4;
+ pci_write_config_byte(dev, 0x45, tmp);
+
+ pqs_bus[index] = PciBusNumber(dev);
+ }
+}
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+/*===================================================================
+** Detect all 53c8xx hosts and then attach them.
+**
+** If we are using NVRAM, once all hosts are detected, we need to
+** check any NVRAM for boot order in case detect and boot order
+** differ and attach them using the order in the NVRAM.
+**
+** If no NVRAM is found or data appears invalid attach boards in
+** the the order they are detected.
+**===================================================================
+*/
+int __init sym53c8xx_detect(Scsi_Host_Template *tpnt)
+{
+ pcidev_t pcidev;
+ int i, j, chips, hosts, count;
+ int attach_count = 0;
+ ncr_device *devtbl, *devp;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_nvram nvram0, nvram, *nvp;
+#endif
+
+ /*
+ ** PCI is required.
+ */
+ if (!pci_present())
+ return 0;
+
+ /*
+ ** Initialize driver general stuff.
+ */
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,3,27)
+ tpnt->proc_dir = &proc_scsi_sym53c8xx;
+#else
+ tpnt->proc_name = NAME53C8XX;
+#endif
+ tpnt->proc_info = sym53c8xx_proc_info;
+#endif
+
+#if defined(SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT) && defined(MODULE)
+if (sym53c8xx)
+ sym53c8xx_setup(sym53c8xx);
+#endif
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = driver_setup.debug;
+#endif
+
+ if (initverbose >= 2)
+ ncr_print_driver_setup();
+
+ /*
+ ** Allocate the device table since we donnot want to
+ ** overflow the kernel stack.
+ ** 1 x 4K PAGE is enough for more than 40 devices for i386.
+ */
+ devtbl = m_calloc(PAGE_SIZE, "devtbl");
+ if (!devtbl)
+ return 0;
+
+ /*
+ ** Detect all NCR PQS/PDS memory controllers.
+ */
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ ncr_detect_pqs_pds();
+#endif
+
+ /*
+ ** Detect all 53c8xx hosts.
+ ** Save the first Symbios NVRAM content if any
+ ** for the boot order.
+ */
+ chips = sizeof(ncr_chip_ids) / sizeof(ncr_chip_ids[0]);
+ hosts = PAGE_SIZE / sizeof(*devtbl);
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ nvp = (driver_setup.use_nvram & 0x1) ? &nvram0 : 0;
+#endif
+ j = 0;
+ count = 0;
+ pcidev = PCIDEV_NULL;
+ while (1) {
+ char *msg = "";
+ if (count >= hosts)
+ break;
+ if (j >= chips)
+ break;
+ i = driver_setup.reverse_probe ? chips - 1 - j : j;
+ pcidev = pci_find_device(PCI_VENDOR_ID_NCR, ncr_chip_ids[i],
+ pcidev);
+ if (pcidev == PCIDEV_NULL) {
+ ++j;
+ continue;
+ }
+ /* Some HW as the HP LH4 may report twice PCI devices */
+ for (i = 0; i < count ; i++) {
+ if (devtbl[i].slot.bus == PciBusNumber(pcidev) &&
+ devtbl[i].slot.device_fn == PciDeviceFn(pcidev))
+ break;
+ }
+ if (i != count) /* Ignore this device if we already have it */
+ continue;
+ devp = &devtbl[count];
+ devp->host_id = driver_setup.host_id;
+ devp->attach_done = 0;
+ if (sym53c8xx_pci_init(tpnt, pcidev, devp)) {
+ continue;
+ }
+ ++count;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvp) {
+ ncr_get_nvram(devp, nvp);
+ switch(nvp->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ /*
+ * Switch to the other nvram buffer, so that
+ * nvram0 will contain the first Symbios
+ * format NVRAM content with boot order.
+ */
+ nvp = &nvram;
+ msg = "with Symbios NVRAM";
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+ msg = "with Tekram NVRAM";
+ break;
+ }
+ }
+#endif
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ if (devp->pqs_pds)
+ msg = "(NCR PQS/PDS)";
+#endif
+ printk(KERN_INFO NAME53C8XX ": 53c%s detected %s\n",
+ devp->chip.name, msg);
+ }
+
+ /*
+ ** If we have found a SYMBIOS NVRAM, use first the NVRAM boot
+ ** sequence as device boot order.
+ ** check devices in the boot record against devices detected.
+ ** attach devices if we find a match. boot table records that
+ ** do not match any detected devices will be ignored.
+ ** devices that do not match any boot table will not be attached
+ ** here but will attempt to be attached during the device table
+ ** rescan.
+ */
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (!nvp || nvram0.type != SCSI_NCR_SYMBIOS_NVRAM)
+ goto next;
+ for (i = 0; i < 4; i++) {
+ Symbios_host *h = &nvram0.data.Symbios.host[i];
+ for (j = 0 ; j < count ; j++) {
+ devp = &devtbl[j];
+ if (h->device_fn != devp->slot.device_fn ||
+ h->bus_nr != devp->slot.bus ||
+ h->device_id != devp->chip.device_id)
+ continue;
+ if (devp->attach_done)
+ continue;
+ if (h->flags & SYMBIOS_INIT_SCAN_AT_BOOT) {
+ ncr_get_nvram(devp, nvp);
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+ else if (!(driver_setup.use_nvram & 0x80))
+ printk(KERN_INFO NAME53C8XX
+ ": 53c%s state OFF thus not attached\n",
+ devp->chip.name);
+ else
+ continue;
+
+ devp->attach_done = 1;
+ break;
+ }
+ }
+next:
+#endif
+
+ /*
+ ** Rescan device list to make sure all boards attached.
+ ** Devices without boot records will not be attached yet
+ ** so try to attach them here.
+ */
+ for (i= 0; i < count; i++) {
+ devp = &devtbl[i];
+ if (!devp->attach_done) {
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_get_nvram(devp, nvp);
+#endif
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+ }
+
+ m_free(devtbl, PAGE_SIZE, "devtbl");
+
+ return attach_count;
+}
+
+/*===================================================================
+** Read and check the PCI configuration for any detected NCR
+** boards and save data for attaching after all boards have
+** been detected.
+**===================================================================
+*/
+static int __init
+sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device)
+{
+ u_short vendor_id, device_id, command, status_reg;
+ u_char cache_line_size, latency_timer;
+ u_char suggested_cache_line_size = 0;
+ u_char pci_fix_up = driver_setup.pci_fix_up;
+ u_char revision;
+ u_int irq;
+ u_long base, base_2, io_port;
+ int i;
+ ncr_chip *chip;
+
+ printk(KERN_INFO NAME53C8XX ": at PCI bus %d, device %d, function %d\n",
+ PciBusNumber(pdev),
+ (int) (PciDeviceFn(pdev) & 0xf8) >> 3,
+ (int) (PciDeviceFn(pdev) & 7));
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ if (!pci_dma_supported(pdev, (dma_addr_t) (0xffffffffUL))) {
+ printk(KERN_WARNING NAME53C8XX
+ "32 BIT PCI BUS DMA ADDRESSING NOT SUPPORTED\n");
+ return -1;
+ }
+#endif
+
+ /*
+ ** Read info from the PCI config space.
+ ** pci_read_config_xxx() functions are assumed to be used for
+ ** successfully detected PCI devices.
+ */
+ vendor_id = PciVendorId(pdev);
+ device_id = PciDeviceId(pdev);
+ irq = PciIrqLine(pdev);
+ i = 0;
+ i = pci_get_base_address(pdev, i, &io_port);
+ i = pci_get_base_address(pdev, i, &base);
+ (void) pci_get_base_address(pdev, i, &base_2);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
+ pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ /*
+ ** Match the BUS number for PQS/PDS devices.
+ ** Read the SCSI ID from a special register mapped
+ ** into the configuration space of the individual
+ ** 875s. This register is set up by the PQS bios
+ */
+ for(i = 0; i < SCSI_NCR_MAX_PQS_BUS && pqs_bus[i] != -1; i++) {
+ u_char tmp;
+ if (pqs_bus[i] == PciBusNumber(pdev)) {
+ pci_read_config_byte(pdev, 0x84, &tmp);
+ device->pqs_pds = 1;
+ device->host_id = tmp;
+ break;
+ }
+ }
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+ /*
+ ** If user excludes this chip, donnot initialize it.
+ */
+ for (i = 0 ; i < SCSI_NCR_MAX_EXCLUDES ; i++) {
+ if (driver_setup.excludes[i] ==
+ (io_port & PCI_BASE_ADDRESS_IO_MASK))
+ return -1;
+ }
+ /*
+ ** Check if the chip is supported
+ */
+ chip = 0;
+ for (i = 0; i < sizeof(ncr_chip_table)/sizeof(ncr_chip_table[0]); i++) {
+ if (device_id != ncr_chip_table[i].device_id)
+ continue;
+ if (revision > ncr_chip_table[i].revision_id)
+ continue;
+ if (!(ncr_chip_table[i].features & FE_LDSTR))
+ break;
+ chip = &device->chip;
+ memcpy(chip, &ncr_chip_table[i], sizeof(*chip));
+ chip->revision_id = revision;
+ break;
+ }
+
+ /*
+ ** Ignore Symbios chips controlled by SISL RAID controller.
+ ** This controller sets value 0x52414944 at RAM end - 16.
+ */
+#if defined(__i386__) && !defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED)
+ if (chip && (base_2 & PCI_BASE_ADDRESS_MEM_MASK)) {
+ unsigned int ram_size, ram_val;
+ u_long ram_ptr;
+
+ if (chip->features & FE_RAM8K)
+ ram_size = 8192;
+ else
+ ram_size = 4096;
+
+ ram_ptr = remap_pci_mem(base_2 & PCI_BASE_ADDRESS_MEM_MASK,
+ ram_size);
+ if (ram_ptr) {
+ ram_val = readl_raw(ram_ptr + ram_size - 16);
+ unmap_pci_mem(ram_ptr, ram_size);
+ if (ram_val == 0x52414944) {
+ printk(NAME53C8XX": not initializing, "
+ "driven by SISL RAID controller.\n");
+ return -1;
+ }
+ }
+ }
+#endif /* i386 and PCI MEMORY accessible */
+
+ if (!chip) {
+ printk(NAME53C8XX ": not initializing, device not supported\n");
+ return -1;
+ }
+
+#ifdef __powerpc__
+ /*
+ ** Fix-up for power/pc.
+ ** Should not be performed by the driver.
+ */
+ if ((command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ != (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": setting%s%s...\n",
+ (command & PCI_COMMAND_IO) ? "" : " PCI_COMMAND_IO",
+ (command & PCI_COMMAND_MEMORY) ? "" : " PCI_COMMAND_MEMORY");
+ command |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,2,0)
+ if ( is_prep ) {
+ if (io_port >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating io_port (Wacky IBM)");
+ io_port = (io_port & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_0, io_port);
+ }
+ if (base >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base (Wacky IBM)");
+ base = (base & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_1, base);
+ }
+ if (base_2 >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base2 (Wacky IBM)");
+ base_2 = (base_2 & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_2, base_2);
+ }
+ }
+#endif
+#endif /* __powerpc__ */
+
+#if defined(__sparc__) && (LINUX_VERSION_CODE < LinuxVersionCode(2,3,0))
+ /*
+ ** Fix-ups for sparc.
+ **
+ ** I wrote: Should not be performed by the driver,
+ ** Guy wrote: but how can OBP know each and every PCI card,
+ ** if they don't use Fcode?
+ ** I replied: no need to know each and every PCI card, just
+ ** be skilled enough to understand the PCI specs.
+ */
+
+ /*
+ ** PCI configuration is based on configuration registers being
+ ** coherent with hardware and software resource identifications.
+ ** This is fairly simple, but seems still too complex for Sparc.
+ */
+ base = __pa(base);
+ base_2 = __pa(base_2);
+
+ if (!cache_line_size)
+ suggested_cache_line_size = 16;
+
+ driver_setup.pci_fix_up |= 0x7;
+
+#endif /* __sparc__ */
+
+#if defined(__i386__) && !defined(MODULE)
+ if (!cache_line_size) {
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,75)
+ extern char x86;
+ switch(x86) {
+#else
+ switch(boot_cpu_data.x86) {
+#endif
+ case 4: suggested_cache_line_size = 4; break;
+ case 6:
+ case 5: suggested_cache_line_size = 8; break;
+ }
+ }
+#endif /* __i386__ */
+
+ /*
+ ** Check availability of IO space, memory space.
+ ** Enable master capability if not yet.
+ **
+ ** We shouldn't have to care about the IO region when
+ ** we are using MMIO. But calling check_region() from
+ ** both the ncr53c8xx and the sym53c8xx drivers prevents
+ ** from attaching devices from the both drivers.
+ ** If you have a better idea, let me know.
+ */
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (!(command & PCI_COMMAND_IO)) {
+ printk(NAME53C8XX ": I/O base address (0x%lx) disabled.\n",
+ (long) io_port);
+ io_port = 0;
+ }
+#endif
+ if (!(command & PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": PCI_COMMAND_MEMORY not set.\n");
+ base = 0;
+ base_2 = 0;
+ }
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ base_2 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (io_port && check_region (io_port, 128)) {
+ printk(NAME53C8XX ": IO region 0x%lx[0..127] is in use\n",
+ (long) io_port);
+ io_port = 0;
+ }
+ if (!io_port)
+ return -1;
+#endif
+#ifndef SCSI_NCR_IOMAPPED
+ if (!base) {
+ printk(NAME53C8XX ": MMIO base address disabled.\n");
+ return -1;
+ }
+#endif
+
+ /*
+ ** Set MASTER capable and PARITY bit, if not yet.
+ */
+ if ((command & (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY))
+ != (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY)) {
+ printk(NAME53C8XX ": setting%s%s...(fix-up)\n",
+ (command & PCI_COMMAND_MASTER) ? "" : " PCI_COMMAND_MASTER",
+ (command & PCI_COMMAND_PARITY) ? "" : " PCI_COMMAND_PARITY");
+ command |= (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+ /*
+ ** Fix some features according to driver setup.
+ */
+ if (!(driver_setup.special_features & 1))
+ chip->features &= ~FE_SPECIAL_SET;
+ else {
+ if (driver_setup.special_features & 2)
+ chip->features &= ~FE_WRIE;
+ if (driver_setup.special_features & 4)
+ chip->features &= ~FE_NOPM;
+ }
+
+ /*
+ ** Work around for errant bit in 895A. The 66Mhz
+ ** capable bit is set erroneously. Clear this bit.
+ ** (Item 1 DEL 533)
+ **
+ ** Make sure Config space and Features agree.
+ **
+ ** Recall: writes are not normal to status register -
+ ** write a 1 to clear and a 0 to leave unchanged.
+ ** Can only reset bits.
+ */
+ if (chip->features & FE_66MHZ) {
+ if (!(status_reg & PCI_STATUS_66MHZ))
+ chip->features &= ~FE_66MHZ;
+ }
+ else {
+ if (status_reg & PCI_STATUS_66MHZ) {
+ status_reg = PCI_STATUS_66MHZ;
+ pci_write_config_word(pdev, PCI_STATUS, status_reg);
+ pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+ }
+ }
+
+ if (driver_setup.ultra_scsi < 3 && (chip->features & FE_ULTRA3)) {
+ chip->features |= FE_ULTRA2;
+ chip->features &= ~FE_ULTRA3;
+ }
+ if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2)) {
+ chip->features |= FE_ULTRA;
+ chip->features &= ~FE_ULTRA2;
+ }
+ if (driver_setup.ultra_scsi < 1)
+ chip->features &= ~FE_ULTRA;
+
+ if (!driver_setup.max_wide)
+ chip->features &= ~FE_WIDE;
+
+ /*
+ * C1010 Ultra3 support requires 16 bit data transfers.
+ */
+ if (!driver_setup.max_wide && (chip->features & FE_ULTRA3)) {
+ chip->features |= FE_ULTRA2;
+ chip->features |= ~FE_ULTRA3;
+ }
+
+ /*
+ ** Some features are required to be enabled in order to
+ ** work around some chip problems. :) ;)
+ ** (ITEM 12 of a DEL about the 896 I haven't yet).
+ ** We must ensure the chip will use WRITE AND INVALIDATE.
+ ** The revision number limit is for now arbitrary.
+ */
+ if (device_id == PCI_DEVICE_ID_NCR_53C896 && revision <= 0x10) {
+ chip->features |= (FE_WRIE | FE_CLSE);
+ pci_fix_up |= 3; /* Force appropriate PCI fix-up */
+ }
+
+#ifdef SCSI_NCR_PCI_FIX_UP_SUPPORT
+ /*
+ ** Try to fix up PCI config according to wished features.
+ */
+ if ((pci_fix_up & 1) && (chip->features & FE_CLSE) &&
+ !cache_line_size && suggested_cache_line_size) {
+ cache_line_size = suggested_cache_line_size;
+ pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ printk(NAME53C8XX ": PCI_CACHE_LINE_SIZE set to %d (fix-up).\n",
+ cache_line_size);
+ }
+
+ if ((pci_fix_up & 2) && cache_line_size &&
+ (chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ printk(NAME53C8XX": setting PCI_COMMAND_INVALIDATE (fix-up)\n");
+ command |= PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+ /*
+ ** Tune PCI LATENCY TIMER according to burst max length transfer.
+ ** (latency timer >= burst length + 6, we add 10 to be quite sure)
+ */
+
+ if (chip->burst_max && (latency_timer == 0 || (pci_fix_up & 4))) {
+ uchar lt = (1 << chip->burst_max) + 6 + 10;
+ if (latency_timer < lt) {
+ printk(NAME53C8XX
+ ": changing PCI_LATENCY_TIMER from %d to %d.\n",
+ (int) latency_timer, (int) lt);
+ latency_timer = lt;
+ pci_write_config_byte(pdev,
+ PCI_LATENCY_TIMER, latency_timer);
+ }
+ }
+
+#endif /* SCSI_NCR_PCI_FIX_UP_SUPPORT */
+
+ /*
+ ** Initialise ncr_device structure with items required by ncr_attach.
+ */
+ device->pdev = pdev;
+ device->slot.bus = PciBusNumber(pdev);
+ device->slot.device_fn = PciDeviceFn(pdev);
+ device->slot.base = base;
+ device->slot.base_2 = base_2;
+ device->slot.io_port = io_port;
+ device->slot.irq = irq;
+ device->attach_done = 0;
+
+ return 0;
+}
+
+
+/*===================================================================
+** Detect and try to read SYMBIOS and TEKRAM NVRAM.
+**
+** Data can be used to order booting of boards.
+**
+** Data is saved in ncr_device structure if NVRAM found. This
+** is then used to find drive boot order for ncr_attach().
+**
+** NVRAM data is passed to Scsi_Host_Template later during
+** ncr_attach() for any device set up.
+*===================================================================
+*/
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static void __init ncr_get_nvram(ncr_device *devp, ncr_nvram *nvp)
+{
+ devp->nvram = nvp;
+ if (!nvp)
+ return;
+ /*
+ ** Get access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ request_region(devp->slot.io_port, 128, NAME53C8XX);
+ devp->slot.base_io = devp->slot.io_port;
+#else
+ devp->slot.reg = (struct ncr_reg *) remap_pci_mem(devp->slot.base, 128);
+ if (!devp->slot.reg)
+ return;
+#endif
+
+ /*
+ ** Try to read SYMBIOS nvram.
+ ** Try to read TEKRAM nvram if Symbios nvram not found.
+ */
+ if (!sym_read_Symbios_nvram(&devp->slot, &nvp->data.Symbios))
+ nvp->type = SCSI_NCR_SYMBIOS_NVRAM;
+ else if (!sym_read_Tekram_nvram(&devp->slot, devp->chip.device_id,
+ &nvp->data.Tekram))
+ nvp->type = SCSI_NCR_TEKRAM_NVRAM;
+ else {
+ nvp->type = 0;
+ devp->nvram = 0;
+ }
+
+ /*
+ ** Release access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ release_region(devp->slot.base_io, 128);
+#else
+ unmap_pci_mem((u_long) devp->slot.reg, 128ul);
+#endif
+
+}
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Linux select queue depths function
+*/
+
+#define DEF_DEPTH (driver_setup.default_tags)
+#define ALL_TARGETS -2
+#define NO_TARGET -1
+#define ALL_LUNS -2
+#define NO_LUN -1
+
+static int device_queue_depth(ncb_p np, int target, int lun)
+{
+ int c, h, t, u, v;
+ char *p = driver_setup.tag_ctrl;
+ char *ep;
+
+ h = -1;
+ t = NO_TARGET;
+ u = NO_LUN;
+ while ((c = *p++) != 0) {
+ v = simple_strtoul(p, &ep, 0);
+ switch(c) {
+ case '/':
+ ++h;
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ case 't':
+ if (t != target)
+ t = (target == v) ? v : NO_TARGET;
+ u = ALL_LUNS;
+ break;
+ case 'u':
+ if (u != lun)
+ u = (lun == v) ? v : NO_LUN;
+ break;
+ case 'q':
+ if (h == np->unit &&
+ (t == ALL_TARGETS || t == target) &&
+ (u == ALL_LUNS || u == lun))
+ return v;
+ break;
+ case '-':
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ default:
+ break;
+ }
+ p = ep;
+ }
+ return DEF_DEPTH;
+}
+
+static void sym53c8xx_select_queue_depths(struct Scsi_Host *host, struct scsi_device *devlist)
+{
+ struct scsi_device *device;
+
+ for (device = devlist; device; device = device->next) {
+ ncb_p np;
+ tcb_p tp;
+ lcb_p lp;
+ int numtags;
+
+ if (device->host != host)
+ continue;
+
+ np = ((struct host_data *) host->hostdata)->ncb;
+ tp = &np->target[device->id];
+ lp = ncr_lp(np, tp, device->lun);
+
+ /*
+ ** Select queue depth from driver setup.
+ ** Donnot use more than configured by user.
+ ** Use at least 2.
+ ** Donnot use more than our maximum.
+ */
+ numtags = device_queue_depth(np, device->id, device->lun);
+ if (numtags > tp->usrtags)
+ numtags = tp->usrtags;
+ if (!device->tagged_supported)
+ numtags = 1;
+ device->queue_depth = numtags;
+ if (device->queue_depth < 2)
+ device->queue_depth = 2;
+ if (device->queue_depth > MAX_TAGS)
+ device->queue_depth = MAX_TAGS;
+
+ /*
+ ** Since the queue depth is not tunable under Linux,
+ ** we need to know this value in order not to
+ ** announce stupid things to user.
+ */
+ if (lp) {
+ lp->numtags = lp->maxtags = numtags;
+ lp->scdev_depth = device->queue_depth;
+ }
+ ncr_setup_tags (np, device->id, device->lun);
+
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n",
+ np->unit, device->id, device->lun, device->queue_depth);
+#endif
+ }
+}
+
+/*
+** Linux entry point for info() function
+*/
+const char *sym53c8xx_info (struct Scsi_Host *host)
+{
+ return SCSI_NCR_DRIVER_NAME;
+}
+
+/*
+** Linux entry point of queuecommand() function
+*/
+
+int sym53c8xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
+{
+ ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb;
+ unsigned long flags;
+ int sts;
+
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx_queue_command\n");
+#endif
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.buffer = NULL;
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ cmd->__data_mapped = 0;
+ cmd->__data_mapping = 0;
+#endif
+
+ NCR_LOCK_NCB(np, flags);
+
+ if ((sts = ncr_queue_command(np, cmd)) != DID_OK) {
+ SetScsiResult(cmd, sts, 0);
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx : command not queued - result=%d\n", sts);
+#endif
+ }
+#ifdef DEBUG_SYM53C8XX
+ else
+printk("sym53c8xx : command successfully queued\n");
+#endif
+
+ NCR_UNLOCK_NCB(np, flags);
+
+ if (sts != DID_OK) {
+ unmap_scsi_data(np, cmd);
+ done(cmd);
+ }
+
+ return sts;
+}
+
+/*
+** Linux entry point of the interrupt handler.
+** Since linux versions > 1.3.70, we trust the kernel for
+** passing the internal host descriptor as 'dev_id'.
+** Otherwise, we scan the host list and call the interrupt
+** routine for each host that uses this IRQ.
+*/
+
+static void sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs)
+{
+ unsigned long flags;
+ ncb_p np = (ncb_p) dev_id;
+ Scsi_Cmnd *done_list;
+
+#ifdef DEBUG_SYM53C8XX
+ printk("sym53c8xx : interrupt received\n");
+#endif
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("[");
+
+ NCR_LOCK_NCB(np, flags);
+ ncr_exception(np);
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("]\n");
+
+ if (done_list) {
+ NCR_LOCK_SCSI_DONE(np, flags);
+ ncr_flush_done_cmds(done_list);
+ NCR_UNLOCK_SCSI_DONE(np, flags);
+ }
+}
+
+/*
+** Linux entry point of the timer handler
+*/
+
+static void sym53c8xx_timeout(unsigned long npref)
+{
+ ncb_p np = (ncb_p) npref;
+ unsigned long flags;
+ Scsi_Cmnd *done_list;
+
+ NCR_LOCK_NCB(np, flags);
+ ncr_timeout((ncb_p) np);
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ if (done_list) {
+ NCR_LOCK_SCSI_DONE(np, flags);
+ ncr_flush_done_cmds(done_list);
+ NCR_UNLOCK_SCSI_DONE(np, flags);
+ }
+}
+
+/*
+** Linux entry point of reset() function
+*/
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+int sym53c8xx_reset(Scsi_Cmnd *cmd, unsigned int reset_flags)
+#else
+int sym53c8xx_reset(Scsi_Cmnd *cmd)
+#endif
+{
+ ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb;
+ int sts;
+ unsigned long flags;
+ Scsi_Cmnd *done_list;
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ printk("sym53c8xx_reset: pid=%lu reset_flags=%x serial_number=%ld serial_number_at_timeout=%ld\n",
+ cmd->pid, reset_flags, cmd->serial_number, cmd->serial_number_at_timeout);
+#else
+ printk("sym53c8xx_reset: command pid %lu\n", cmd->pid);
+#endif
+
+ NCR_LOCK_NCB(np, flags);
+
+ /*
+ * We have to just ignore reset requests in some situations.
+ */
+#if defined SCSI_RESET_NOT_RUNNING
+ if (cmd->serial_number != cmd->serial_number_at_timeout) {
+ sts = SCSI_RESET_NOT_RUNNING;
+ goto out;
+ }
+#endif
+ /*
+ * If the mid-level driver told us reset is synchronous, it seems
+ * that we must call the done() callback for the involved command,
+ * even if this command was not queued to the low-level driver,
+ * before returning SCSI_RESET_SUCCESS.
+ */
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ sts = ncr_reset_bus(np, cmd,
+ (reset_flags & (SCSI_RESET_SYNCHRONOUS | SCSI_RESET_ASYNCHRONOUS)) == SCSI_RESET_SYNCHRONOUS);
+#else
+ sts = ncr_reset_bus(np, cmd, 0);
+#endif
+
+ /*
+ * Since we always reset the controller, when we return success,
+ * we add this information to the return code.
+ */
+#if defined SCSI_RESET_HOST_RESET
+ if (sts == SCSI_RESET_SUCCESS)
+ sts |= SCSI_RESET_HOST_RESET;
+#endif
+
+out:
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ ncr_flush_done_cmds(done_list);
+
+ return sts;
+}
+
+/*
+** Linux entry point of abort() function
+*/
+
+int sym53c8xx_abort(Scsi_Cmnd *cmd)
+{
+ ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb;
+ int sts;
+ unsigned long flags;
+ Scsi_Cmnd *done_list;
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ printk("sym53c8xx_abort: pid=%lu serial_number=%ld serial_number_at_timeout=%ld\n",
+ cmd->pid, cmd->serial_number, cmd->serial_number_at_timeout);
+#else
+ printk("sym53c8xx_abort: command pid %lu\n", cmd->pid);
+#endif
+
+ NCR_LOCK_NCB(np, flags);
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ /*
+ * We have to just ignore abort requests in some situations.
+ */
+ if (cmd->serial_number != cmd->serial_number_at_timeout) {
+ sts = SCSI_ABORT_NOT_RUNNING;
+ goto out;
+ }
+#endif
+
+ sts = ncr_abort_command(np, cmd);
+out:
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ ncr_flush_done_cmds(done_list);
+
+ return sts;
+}
+
+
+#ifdef MODULE
+int sym53c8xx_release(struct Scsi_Host *host)
+{
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx : release\n");
+#endif
+ ncr_detach(((struct host_data *) host->hostdata)->ncb);
+
+ return 1;
+}
+#endif
+
+
+/*
+** Scsi command waiting list management.
+**
+** It may happen that we cannot insert a scsi command into the start queue,
+** in the following circumstances.
+** Too few preallocated ccb(s),
+** maxtags < cmd_per_lun of the Linux host control block,
+** etc...
+** Such scsi commands are inserted into a waiting list.
+** When a scsi command complete, we try to requeue the commands of the
+** waiting list.
+*/
+
+#define next_wcmd host_scribble
+
+static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd)
+{
+ Scsi_Cmnd *wcmd;
+
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ cmd->next_wcmd = 0;
+ if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
+ else {
+ while ((wcmd->next_wcmd) != 0)
+ wcmd = (Scsi_Cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = (char *) cmd;
+ }
+}
+
+static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd)
+{
+ Scsi_Cmnd **pcmd = &np->waiting_list;
+
+ while (*pcmd) {
+ if (cmd == *pcmd) {
+ if (to_remove) {
+ *pcmd = (Scsi_Cmnd *) cmd->next_wcmd;
+ cmd->next_wcmd = 0;
+ }
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ return cmd;
+ }
+ pcmd = (Scsi_Cmnd **) &(*pcmd)->next_wcmd;
+ }
+ return 0;
+}
+
+static void process_waiting_list(ncb_p np, int sts)
+{
+ Scsi_Cmnd *waiting_list, *wcmd;
+
+ waiting_list = np->waiting_list;
+ np->waiting_list = 0;
+
+#ifdef DEBUG_WAITING_LIST
+ if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
+#endif
+ while ((wcmd = waiting_list) != 0) {
+ waiting_list = (Scsi_Cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = 0;
+ if (sts == DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd);
+#endif
+ sts = ncr_queue_command(np, wcmd);
+ }
+ if (sts != DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
+#endif
+ SetScsiResult(wcmd, sts, 0);
+ ncr_queue_done_cmd(np, wcmd);
+ }
+ }
+}
+
+#undef next_wcmd
+
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+
+/*=========================================================================
+** Proc file system stuff
+**
+** A read operation returns adapter information.
+** A write operation is a control command.
+** The string is parsed in the driver code and the command is passed
+** to the ncr_usercmd() function.
+**=========================================================================
+*/
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+#define digit_to_bin(c) ((c) - '0')
+#define is_space(c) ((c) == ' ' || (c) == '\t')
+
+static int skip_spaces(char *ptr, int len)
+{
+ int cnt, c;
+
+ for (cnt = len; cnt > 0 && (c = *ptr++) && is_space(c); cnt--);
+
+ return (len - cnt);
+}
+
+static int get_int_arg(char *ptr, int len, u_long *pv)
+{
+ int cnt, c;
+ u_long v;
+
+ for (v = 0, cnt = len; cnt > 0 && (c = *ptr++) && is_digit(c); cnt--) {
+ v = (v * 10) + digit_to_bin(c);
+ }
+
+ if (pv)
+ *pv = v;
+
+ return (len - cnt);
+}
+
+static int is_keyword(char *ptr, int len, char *verb)
+{
+ int verb_len = strlen(verb);
+
+ if (len >= strlen(verb) && !memcmp(verb, ptr, verb_len))
+ return verb_len;
+ else
+ return 0;
+
+}
+
+#define SKIP_SPACES(min_spaces) \
+ if ((arg_len = skip_spaces(ptr, len)) < (min_spaces)) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+#define GET_INT_ARG(v) \
+ if (!(arg_len = get_int_arg(ptr, len, &(v)))) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+
+/*
+** Parse a control command
+*/
+
+static int ncr_user_command(ncb_p np, char *buffer, int length)
+{
+ char *ptr = buffer;
+ int len = length;
+ struct usrcmd *uc = &np->user;
+ int arg_len;
+ u_long target;
+
+ bzero(uc, sizeof(*uc));
+
+ if (len > 0 && ptr[len-1] == '\n')
+ --len;
+
+ if ((arg_len = is_keyword(ptr, len, "setsync")) != 0)
+ uc->cmd = UC_SETSYNC;
+ else if ((arg_len = is_keyword(ptr, len, "settags")) != 0)
+ uc->cmd = UC_SETTAGS;
+ else if ((arg_len = is_keyword(ptr, len, "setorder")) != 0)
+ uc->cmd = UC_SETORDER;
+ else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0)
+ uc->cmd = UC_SETVERBOSE;
+ else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0)
+ uc->cmd = UC_SETWIDE;
+ else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
+ uc->cmd = UC_SETDEBUG;
+ else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0)
+ uc->cmd = UC_SETFLAG;
+ else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0)
+ uc->cmd = UC_RESETDEV;
+ else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0)
+ uc->cmd = UC_CLEARDEV;
+ else
+ arg_len = 0;
+
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
+#endif
+
+ if (!arg_len)
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+
+ switch(uc->cmd) {
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ case UC_SETFLAG:
+ case UC_RESETDEV:
+ case UC_CLEARDEV:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
+ ptr += arg_len; len -= arg_len;
+ uc->target = ~0;
+ } else {
+ GET_INT_ARG(target);
+ uc->target = (1<<target);
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: target=%ld\n", target);
+#endif
+ }
+ break;
+ }
+
+ switch(uc->cmd) {
+ case UC_SETVERBOSE:
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ SKIP_SPACES(1);
+ GET_INT_ARG(uc->data);
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+ case UC_SETORDER:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "simple")))
+ uc->data = M_SIMPLE_TAG;
+ else if ((arg_len = is_keyword(ptr, len, "ordered")))
+ uc->data = M_ORDERED_TAG;
+ else if ((arg_len = is_keyword(ptr, len, "default")))
+ uc->data = 0;
+ else
+ return -EINVAL;
+ break;
+ case UC_SETDEBUG:
+ while (len > 0) {
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "alloc")))
+ uc->data |= DEBUG_ALLOC;
+ else if ((arg_len = is_keyword(ptr, len, "phase")))
+ uc->data |= DEBUG_PHASE;
+ else if ((arg_len = is_keyword(ptr, len, "queue")))
+ uc->data |= DEBUG_QUEUE;
+ else if ((arg_len = is_keyword(ptr, len, "result")))
+ uc->data |= DEBUG_RESULT;
+ else if ((arg_len = is_keyword(ptr, len, "pointer")))
+ uc->data |= DEBUG_POINTER;
+ else if ((arg_len = is_keyword(ptr, len, "script")))
+ uc->data |= DEBUG_SCRIPT;
+ else if ((arg_len = is_keyword(ptr, len, "tiny")))
+ uc->data |= DEBUG_TINY;
+ else if ((arg_len = is_keyword(ptr, len, "timing")))
+ uc->data |= DEBUG_TIMING;
+ else if ((arg_len = is_keyword(ptr, len, "nego")))
+ uc->data |= DEBUG_NEGO;
+ else if ((arg_len = is_keyword(ptr, len, "tags")))
+ uc->data |= DEBUG_TAGS;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+ case UC_SETFLAG:
+ while (len > 0) {
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "trace")))
+ uc->data |= UF_TRACE;
+ else if ((arg_len = is_keyword(ptr, len, "no_disc")))
+ uc->data |= UF_NODISC;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (len)
+ return -EINVAL;
+ else {
+ long flags;
+
+ NCR_LOCK_NCB(np, flags);
+ ncr_usercmd (np);
+ NCR_UNLOCK_NCB(np, flags);
+ }
+ return length;
+}
+
+#endif /* SCSI_NCR_USER_COMMAND_SUPPORT */
+
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+
+struct info_str
+{
+ char *buffer;
+ int length;
+ int offset;
+ int pos;
+};
+
+static void copy_mem_info(struct info_str *info, char *data, int len)
+{
+ if (info->pos + len > info->length)
+ len = info->length - info->pos;
+
+ if (info->pos + len < info->offset) {
+ info->pos += len;
+ return;
+ }
+ if (info->pos < info->offset) {
+ data += (info->offset - info->pos);
+ len -= (info->offset - info->pos);
+ }
+
+ if (len > 0) {
+ memcpy(info->buffer + info->pos, data, len);
+ info->pos += len;
+ }
+}
+
+static int copy_info(struct info_str *info, char *fmt, ...)
+{
+ va_list args;
+ char buf[81];
+ int len;
+
+ va_start(args, fmt);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+
+ copy_mem_info(info, buf, len);
+ return len;
+}
+
+/*
+** Copy formatted information into the input buffer.
+*/
+
+static int ncr_host_info(ncb_p np, char *ptr, off_t offset, int len)
+{
+ struct info_str info;
+#ifdef CONFIG_ALL_PPC
+ struct device_node* of_node;
+#endif
+
+ info.buffer = ptr;
+ info.length = len;
+ info.offset = offset;
+ info.pos = 0;
+
+ copy_info(&info, "General information:\n");
+ copy_info(&info, " Chip " NAME53C "%s, device id 0x%x, "
+ "revision id 0x%x\n",
+ np->chip_name, np->device_id, np->revision_id);
+ copy_info(&info, " On PCI bus %d, device %d, function %d, "
+#ifdef __sparc__
+ "IRQ %s\n",
+#else
+ "IRQ %d\n",
+#endif
+ np->bus, (np->device_fn & 0xf8) >> 3, np->device_fn & 7,
+#ifdef __sparc__
+ __irq_itoa(np->irq));
+#else
+ (int) np->irq);
+#endif
+#ifdef CONFIG_ALL_PPC
+ of_node = find_pci_device_OFnode(np->bus, np->device_fn);
+ if (of_node && of_node->full_name)
+ copy_info(&info, "PPC OpenFirmware path : %s\n", of_node->full_name);
+#endif
+ copy_info(&info, " Synchronous period factor %d, "
+ "max commands per lun %d\n",
+ (int) np->minsync, MAX_TAGS);
+
+ if (driver_setup.debug || driver_setup.verbose > 1) {
+ copy_info(&info, " Debug flags 0x%x, verbosity level %d\n",
+ driver_setup.debug, driver_setup.verbose);
+ }
+
+ return info.pos > info.offset? info.pos - info.offset : 0;
+}
+
+#endif /* SCSI_NCR_USER_INFO_SUPPORT */
+
+/*
+** Entry point of the scsi proc fs of the driver.
+** - func = 0 means read (returns adapter infos)
+** - func = 1 means write (parse user control command)
+*/
+
+static int sym53c8xx_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int func)
+{
+ struct Scsi_Host *host;
+ struct host_data *host_data;
+ ncb_p ncb = 0;
+ int retv;
+
+#ifdef DEBUG_PROC_INFO
+printk("sym53c8xx_proc_info: hostno=%d, func=%d\n", hostno, func);
+#endif
+
+ for (host = first_host; host; host = host->next) {
+ if (host->hostt != first_host->hostt)
+ continue;
+ if (host->host_no == hostno) {
+ host_data = (struct host_data *) host->hostdata;
+ ncb = host_data->ncb;
+ break;
+ }
+ }
+
+ if (!ncb)
+ return -EINVAL;
+
+ if (func) {
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+ retv = ncr_user_command(ncb, buffer, length);
+#else
+ retv = -EINVAL;
+#endif
+ }
+ else {
+ if (start)
+ *start = buffer;
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+ retv = ncr_host_info(ncb, buffer, offset, length);
+#else
+ retv = -EINVAL;
+#endif
+ }
+
+ return retv;
+}
+
+
+/*=========================================================================
+** End of proc file system stuff
+**=========================================================================
+*/
+#endif
+
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+
+/*
+ * 24C16 EEPROM reading.
+ *
+ * GPOI0 - data in/data out
+ * GPIO1 - clock
+ * Symbios NVRAM wiring now also used by Tekram.
+ */
+
+#define SET_BIT 0
+#define CLR_BIT 1
+#define SET_CLK 2
+#define CLR_CLK 3
+
+/*
+ * Set/clear data/clock bit in GPIO0
+ */
+static void __init
+S24C16_set_bit(ncr_slot *np, u_char write_bit, u_char *gpreg, int bit_mode)
+{
+ UDELAY (5);
+ switch (bit_mode){
+ case SET_BIT:
+ *gpreg |= write_bit;
+ break;
+ case CLR_BIT:
+ *gpreg &= 0xfe;
+ break;
+ case SET_CLK:
+ *gpreg |= 0x02;
+ break;
+ case CLR_CLK:
+ *gpreg &= 0xfd;
+ break;
+
+ }
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (5);
+}
+
+/*
+ * Send START condition to NVRAM to wake it up.
+ */
+static void __init S24C16_start(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
+ */
+static void __init S24C16_stop(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+}
+
+/*
+ * Read or write a bit to the NVRAM,
+ * read if GPIO0 input else write if GPIO0 output
+ */
+static void __init
+S24C16_do_bit(ncr_slot *np, u_char *read_bit, u_char write_bit, u_char *gpreg)
+{
+ S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ if (read_bit)
+ *read_bit = INB (nc_gpreg);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+}
+
+/*
+ * Output an ACK to the NVRAM after reading,
+ * change GPIO0 to output and when done back to an input
+ */
+static void __init
+S24C16_write_ack(ncr_slot *np, u_char write_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl & 0xfe);
+ S24C16_do_bit(np, 0, write_bit, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Input an ACK from NVRAM after writing,
+ * change GPIO0 to input and when done back to an output
+ */
+static void __init
+S24C16_read_ack(ncr_slot *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl | 0x01);
+ S24C16_do_bit(np, read_bit, 1, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
+ * GPIO0 must already be set as an output
+ */
+static void __init
+S24C16_write_byte(ncr_slot *np, u_char *ack_data, u_char write_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+
+ for (x = 0; x < 8; x++)
+ S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
+
+ S24C16_read_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * READ a byte from the NVRAM and then send an ACK to say we have got it,
+ * GPIO0 must already be set as an input
+ */
+static void __init
+S24C16_read_byte(ncr_slot *np, u_char *read_data, u_char ack_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+ u_char read_bit;
+
+ *read_data = 0;
+ for (x = 0; x < 8; x++) {
+ S24C16_do_bit(np, &read_bit, 1, gpreg);
+ *read_data |= ((read_bit & 0x01) << (7 - x));
+ }
+
+ S24C16_write_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * Read 'len' bytes starting at 'offset'.
+ */
+static int __init
+sym_read_S24C16_nvram (ncr_slot *np, int offset, u_char *data, int len)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_char ack_data;
+ int retv = 1;
+ int x;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+ gpcntl = old_gpcntl & 0xfc;
+
+ /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+ OUTB (nc_gpreg, old_gpreg);
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* this is to set NVRAM into a known state with GPIO0/1 both low */
+ gpreg = old_gpreg;
+ S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
+
+ /* now set NVRAM inactive with GPIO0/1 both high */
+ S24C16_stop(np, &gpreg);
+
+ /* activate NVRAM */
+ S24C16_start(np, &gpreg);
+
+ /* write device code and random address MSB */
+ S24C16_write_byte(np, &ack_data,
+ 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* write random address LSB */
+ S24C16_write_byte(np, &ack_data,
+ offset & 0xff, &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* regenerate START state to set up for reading */
+ S24C16_start(np, &gpreg);
+
+ /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
+ S24C16_write_byte(np, &ack_data,
+ 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* now set up GPIO0 for inputting data */
+ gpcntl |= 0x01;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all requested data - only part of total NVRAM */
+ for (x = 0; x < len; x++)
+ S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
+
+ /* finally put NVRAM back in inactive mode */
+ gpcntl &= 0xfe;
+ OUTB (nc_gpcntl, gpcntl);
+ S24C16_stop(np, &gpreg);
+ retv = 0;
+out:
+ /* return GPIO0/1 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+#undef SET_BIT
+#undef CLR_BIT
+#undef SET_CLK
+#undef CLR_CLK
+
+/*
+ * Try reading Symbios NVRAM.
+ * Return 0 if OK.
+ */
+static int __init sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram)
+{
+ static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ /* probe the 24c16 and read the SYMBIOS 24c16 area */
+ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
+ return 1;
+
+ /* check valid NVRAM signature, verify byte count and checksum */
+ if (nvram->type != 0 ||
+ memcmp(nvram->trailer, Symbios_trailer, 6) ||
+ nvram->byte_count != len - 12)
+ return 1;
+
+ /* verify checksum */
+ for (x = 6, csum = 0; x < len - 6; x++)
+ csum += data[x];
+ if (csum != nvram->checksum)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * 93C46 EEPROM reading.
+ *
+ * GPOI0 - data in
+ * GPIO1 - data out
+ * GPIO2 - clock
+ * GPIO4 - chip select
+ *
+ * Used by Tekram.
+ */
+
+/*
+ * Pulse clock bit in GPIO0
+ */
+static void __init T93C46_Clk(ncr_slot *np, u_char *gpreg)
+{
+ OUTB (nc_gpreg, *gpreg | 0x04);
+ UDELAY (2);
+ OUTB (nc_gpreg, *gpreg);
+}
+
+/*
+ * Read bit from NVRAM
+ */
+static void __init T93C46_Read_Bit(ncr_slot *np, u_char *read_bit, u_char *gpreg)
+{
+ UDELAY (2);
+ T93C46_Clk(np, gpreg);
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * Write bit to GPIO0
+ */
+static void __init T93C46_Write_Bit(ncr_slot *np, u_char write_bit, u_char *gpreg)
+{
+ if (write_bit & 0x01)
+ *gpreg |= 0x02;
+ else
+ *gpreg &= 0xfd;
+
+ *gpreg |= 0x10;
+
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
+ */
+static void __init T93C46_Stop(ncr_slot *np, u_char *gpreg)
+{
+ *gpreg &= 0xef;
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send read command and address to NVRAM
+ */
+static void __init
+T93C46_Send_Command(ncr_slot *np, u_short write_data,
+ u_char *read_bit, u_char *gpreg)
+{
+ int x;
+
+ /* send 9 bits, start bit (1), command (2), address (6) */
+ for (x = 0; x < 9; x++)
+ T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
+
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * READ 2 bytes from the NVRAM
+ */
+static void __init
+T93C46_Read_Word(ncr_slot *np, u_short *nvram_data, u_char *gpreg)
+{
+ int x;
+ u_char read_bit;
+
+ *nvram_data = 0;
+ for (x = 0; x < 16; x++) {
+ T93C46_Read_Bit(np, &read_bit, gpreg);
+
+ if (read_bit & 0x01)
+ *nvram_data |= (0x01 << (15 - x));
+ else
+ *nvram_data &= ~(0x01 << (15 - x));
+ }
+}
+
+/*
+ * Read Tekram NvRAM data.
+ */
+static int __init
+T93C46_Read_Data(ncr_slot *np, u_short *data,int len,u_char *gpreg)
+{
+ u_char read_bit;
+ int x;
+
+ for (x = 0; x < len; x++) {
+
+ /* output read command and address */
+ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
+ if (read_bit & 0x01)
+ return 1; /* Bad */
+ T93C46_Read_Word(np, &data[x], gpreg);
+ T93C46_Stop(np, gpreg);
+ }
+
+ return 0;
+}
+
+/*
+ * Try reading 93C46 Tekram NVRAM.
+ */
+static int __init
+sym_read_T93C46_nvram (ncr_slot *np, Tekram_nvram *nvram)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ int retv = 1;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+
+ /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
+ 1/2/4 out */
+ gpreg = old_gpreg & 0xe9;
+ OUTB (nc_gpreg, gpreg);
+ gpcntl = (old_gpcntl & 0xe9) | 0x09;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all of NVRAM, 64 words */
+ retv = T93C46_Read_Data(np, (u_short *) nvram,
+ sizeof(*nvram) / sizeof(short), &gpreg);
+
+ /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+/*
+ * Try reading Tekram NVRAM.
+ * Return 0 if OK.
+ */
+static int __init
+sym_read_Tekram_nvram (ncr_slot *np, u_short device_id, Tekram_nvram *nvram)
+{
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ switch (device_id) {
+ case PCI_DEVICE_ID_NCR_53C885:
+ case PCI_DEVICE_ID_NCR_53C895:
+ case PCI_DEVICE_ID_NCR_53C896:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ break;
+ case PCI_DEVICE_ID_NCR_53C875:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ if (!x)
+ break;
+ default:
+ x = sym_read_T93C46_nvram(np, nvram);
+ break;
+ }
+ if (x)
+ return 1;
+
+ /* verify checksum */
+ for (x = 0, csum = 0; x < len - 1; x += 2)
+ csum += data[x] + (data[x+1] << 8);
+ if (csum != 0x1234)
+ return 1;
+
+ return 0;
+}
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Module stuff
+*/
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = SYM53C8XX;
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/sym53c8xx.h b/linux/src/drivers/scsi/sym53c8xx.h
new file mode 100644
index 0000000..128fe16
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx.h
@@ -0,0 +1,116 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+#ifndef SYM53C8XX_H
+#define SYM53C8XX_H
+
+#include "sym53c8xx_defs.h"
+
+/*
+** Define Scsi_Host_Template parameters
+**
+** Used by hosts.c and sym53c8xx.c with module configuration.
+*/
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#include <scsi/scsicam.h>
+
+int sym53c8xx_abort(Scsi_Cmnd *);
+int sym53c8xx_detect(Scsi_Host_Template *tpnt);
+const char *sym53c8xx_info(struct Scsi_Host *host);
+int sym53c8xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int sym53c8xx_reset(Scsi_Cmnd *, unsigned int);
+
+#ifdef MODULE
+int sym53c8xx_release(struct Scsi_Host *);
+#else
+#define sym53c8xx_release NULL
+#endif
+
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,75)
+
+#define SYM53C8XX { name: "", \
+ detect: sym53c8xx_detect, \
+ release: sym53c8xx_release, \
+ info: sym53c8xx_info, \
+ queuecommand: sym53c8xx_queue_command,\
+ abort: sym53c8xx_abort, \
+ reset: sym53c8xx_reset, \
+ bios_param: scsicam_bios_param, \
+ can_queue: SCSI_NCR_CAN_QUEUE, \
+ this_id: 7, \
+ sg_tablesize: SCSI_NCR_SG_TABLESIZE, \
+ cmd_per_lun: SCSI_NCR_CMD_PER_LUN, \
+ use_clustering: DISABLE_CLUSTERING}
+
+#else
+
+#define SYM53C8XX { NULL, NULL, NULL, NULL, \
+ NULL, sym53c8xx_detect, \
+ sym53c8xx_release, sym53c8xx_info, NULL, \
+ sym53c8xx_queue_command,sym53c8xx_abort, \
+ sym53c8xx_reset, NULL, scsicam_bios_param, \
+ SCSI_NCR_CAN_QUEUE, 7, \
+ SCSI_NCR_SG_TABLESIZE, SCSI_NCR_CMD_PER_LUN, \
+ 0, 0, DISABLE_CLUSTERING}
+
+#endif /* LINUX_VERSION_CODE */
+
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
+#endif /* SYM53C8XX_H */
diff --git a/linux/src/drivers/scsi/sym53c8xx_comm.h b/linux/src/drivers/scsi/sym53c8xx_comm.h
new file mode 100644
index 0000000..ba961db
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx_comm.h
@@ -0,0 +1,2717 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+/*
+** This file contains definitions and code that the
+** sym53c8xx and ncr53c8xx drivers should share.
+** The sharing will be achieved in a further version
+** of the driver bundle. For now, only the ncr53c8xx
+** driver includes this file.
+*/
+
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+
+/*==========================================================
+**
+** Hmmm... What complex some PCI-HOST bridges actually
+** are, despite the fact that the PCI specifications
+** are looking so smart and simple! ;-)
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,47)
+#define SCSI_NCR_DYNAMIC_DMA_MAPPING
+#endif
+
+/*==========================================================
+**
+** Miscallaneous defines.
+**
+**==========================================================
+*/
+
+#define u_char unsigned char
+#define u_short unsigned short
+#define u_int unsigned int
+#define u_long unsigned long
+
+#ifndef bcopy
+#define bcopy(s, d, n) memcpy((d), (s), (n))
+#endif
+
+#ifndef bcmp
+#define bcmp(s, d, n) memcmp((d), (s), (n))
+#endif
+
+#ifndef bzero
+#define bzero(d, n) memset((d), 0, (n))
+#endif
+
+#ifndef offsetof
+#define offsetof(t, m) ((size_t) (&((t *)0)->m))
+#endif
+
+/*==========================================================
+**
+** assert ()
+**
+**==========================================================
+**
+** modified copy from 386bsd:/usr/include/sys/assert.h
+**
+**----------------------------------------------------------
+*/
+
+#define assert(expression) { \
+ if (!(expression)) { \
+ (void)panic( \
+ "assertion \"%s\" failed: file \"%s\", line %d\n", \
+ #expression, \
+ __FILE__, __LINE__); \
+ } \
+}
+
+/*==========================================================
+**
+** Debugging tags
+**
+**==========================================================
+*/
+
+#define DEBUG_ALLOC (0x0001)
+#define DEBUG_PHASE (0x0002)
+#define DEBUG_QUEUE (0x0008)
+#define DEBUG_RESULT (0x0010)
+#define DEBUG_POINTER (0x0020)
+#define DEBUG_SCRIPT (0x0040)
+#define DEBUG_TINY (0x0080)
+#define DEBUG_TIMING (0x0100)
+#define DEBUG_NEGO (0x0200)
+#define DEBUG_TAGS (0x0400)
+#define DEBUG_SCATTER (0x0800)
+#define DEBUG_IC (0x1000)
+
+/*
+** Enable/Disable debug messages.
+** Can be changed at runtime too.
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
+ #define DEBUG_FLAGS ncr_debug
+#else
+ #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
+#endif
+
+/*==========================================================
+**
+** A la VMS/CAM-3 queue management.
+** Implemented from linux list management.
+**
+**==========================================================
+*/
+
+typedef struct xpt_quehead {
+ struct xpt_quehead *flink; /* Forward pointer */
+ struct xpt_quehead *blink; /* Backward pointer */
+} XPT_QUEHEAD;
+
+#define xpt_que_init(ptr) do { \
+ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
+} while (0)
+
+static inline void __xpt_que_add(struct xpt_quehead * new,
+ struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = new;
+ new->flink = flink;
+ new->blink = blink;
+ blink->flink = new;
+}
+
+static inline void __xpt_que_del(struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = blink;
+ blink->flink = flink;
+}
+
+static inline int xpt_que_empty(struct xpt_quehead *head)
+{
+ return head->flink == head;
+}
+
+static inline void xpt_que_splice(struct xpt_quehead *list,
+ struct xpt_quehead *head)
+{
+ struct xpt_quehead *first = list->flink;
+
+ if (first != list) {
+ struct xpt_quehead *last = list->blink;
+ struct xpt_quehead *at = head->flink;
+
+ first->blink = head;
+ head->flink = first;
+
+ last->flink = at;
+ at->blink = last;
+ }
+}
+
+#define xpt_que_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+
+#define xpt_insque(new, pos) __xpt_que_add(new, pos, (pos)->flink)
+
+#define xpt_remque(el) __xpt_que_del((el)->blink, (el)->flink)
+
+#define xpt_insque_head(new, head) __xpt_que_add(new, head, (head)->flink)
+
+static inline struct xpt_quehead *xpt_remque_head(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->flink;
+
+ if (elem != head)
+ __xpt_que_del(head, elem->flink);
+ else
+ elem = 0;
+ return elem;
+}
+
+#define xpt_insque_tail(new, head) __xpt_que_add(new, (head)->blink, head)
+
+static inline struct xpt_quehead *xpt_remque_tail(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->blink;
+
+ if (elem != head)
+ __xpt_que_del(elem->blink, head);
+ else
+ elem = 0;
+ return elem;
+}
+
+/*==========================================================
+**
+** Simple Wrapper to kernel PCI bus interface.
+**
+** This wrapper allows to get rid of old kernel PCI
+** interface and still allows to preserve linux-2.0
+** compatibilty. In fact, it is mostly an incomplete
+** emulation of the new PCI code for pre-2.2 kernels.
+** When kernel-2.0 support will be dropped, we will
+** just have to remove most of this code.
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0)
+
+typedef struct pci_dev *pcidev_t;
+#define PCIDEV_NULL (0)
+#define PciBusNumber(d) (d)->bus->number
+#define PciDeviceFn(d) (d)->devfn
+#define PciVendorId(d) (d)->vendor
+#define PciDeviceId(d) (d)->device
+#define PciIrqLine(d) (d)->irq
+
+#if LINUX_VERSION_CODE > LinuxVersionCode(2,3,12)
+
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->resource[index].start;
+ if ((pdev->resource[index].flags & 0x7) == 0x4)
+ ++index;
+ return ++index;
+}
+#else
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->base_address[index++];
+ if ((*base & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ *base |= (((u_long)pdev->base_address[index]) << 32);
+#endif
+ ++index;
+ }
+ return index;
+}
+#endif
+
+#else /* Incomplete emulation of current PCI code for pre-2.2 kernels */
+
+typedef unsigned int pcidev_t;
+#define PCIDEV_NULL (~0u)
+#define PciBusNumber(d) ((d)>>8)
+#define PciDeviceFn(d) ((d)&0xff)
+#define __PciDev(busn, devfn) (((busn)<<8)+(devfn))
+
+#define pci_present pcibios_present
+
+#define pci_read_config_byte(d, w, v) \
+ pcibios_read_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_word(d, w, v) \
+ pcibios_read_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_dword(d, w, v) \
+ pcibios_read_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+#define pci_write_config_byte(d, w, v) \
+ pcibios_write_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_word(d, w, v) \
+ pcibios_write_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_dword(d, w, v) \
+ pcibios_write_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+static pcidev_t __init
+pci_find_device(unsigned int vendor, unsigned int device, pcidev_t prev)
+{
+ static unsigned short pci_index;
+ int retv;
+ unsigned char bus_number, device_fn;
+
+ if (prev == PCIDEV_NULL)
+ pci_index = 0;
+ else
+ ++pci_index;
+ retv = pcibios_find_device (vendor, device, pci_index,
+ &bus_number, &device_fn);
+ return retv ? PCIDEV_NULL : __PciDev(bus_number, device_fn);
+}
+
+static u_short __init PciVendorId(pcidev_t dev)
+{
+ u_short vendor_id;
+ pci_read_config_word(dev, PCI_VENDOR_ID, &vendor_id);
+ return vendor_id;
+}
+
+static u_short __init PciDeviceId(pcidev_t dev)
+{
+ u_short device_id;
+ pci_read_config_word(dev, PCI_DEVICE_ID, &device_id);
+ return device_id;
+}
+
+static u_int __init PciIrqLine(pcidev_t dev)
+{
+ u_char irq;
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ return irq;
+}
+
+static int __init
+pci_get_base_address(pcidev_t dev, int offset, u_long *base)
+{
+ u_int32 tmp;
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base = tmp;
+ offset += sizeof(u_int32);
+ if ((tmp & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base |= (((u_long)tmp) << 32);
+#endif
+ offset += sizeof(u_int32);
+ }
+ return offset;
+}
+
+#endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0) */
+
+/*==========================================================
+**
+** SMP threading.
+**
+** Assuming that SMP systems are generally high end
+** systems and may use several SCSI adapters, we are
+** using one lock per controller instead of some global
+** one. For the moment (linux-2.1.95), driver's entry
+** points are called with the 'io_request_lock' lock
+** held, so:
+** - We are uselessly loosing a couple of micro-seconds
+** to lock the controller data structure.
+** - But the driver is not broken by design for SMP and
+** so can be more resistant to bugs or bad changes in
+** the IO sub-system code.
+** - A small advantage could be that the interrupt code
+** is grained as wished (e.g.: by controller).
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+spinlock_t DRIVER_SMP_LOCK = SPIN_LOCK_UNLOCKED;
+#define NCR_LOCK_DRIVER(flags) spin_lock_irqsave(&DRIVER_SMP_LOCK, flags)
+#define NCR_UNLOCK_DRIVER(flags) \
+ spin_unlock_irqrestore(&DRIVER_SMP_LOCK, flags)
+
+#define NCR_INIT_LOCK_NCB(np) spin_lock_init(&np->smp_lock)
+#define NCR_LOCK_NCB(np, flags) spin_lock_irqsave(&np->smp_lock, flags)
+#define NCR_UNLOCK_NCB(np, flags) spin_unlock_irqrestore(&np->smp_lock, flags)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) \
+ spin_lock_irqsave(&io_request_lock, flags)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) \
+ spin_unlock_irqrestore(&io_request_lock, flags)
+
+#else
+
+#define NCR_LOCK_DRIVER(flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_DRIVER(flags) do { restore_flags(flags); } while (0)
+
+#define NCR_INIT_LOCK_NCB(np) do { } while (0)
+#define NCR_LOCK_NCB(np, flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_NCB(np, flags) do { restore_flags(flags); } while (0)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) do {;} while (0)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) do {;} while (0)
+
+#endif
+
+/*==========================================================
+**
+** Memory mapped IO
+**
+** Since linux-2.1, we must use ioremap() to map the io
+** memory space and iounmap() to unmap it. This allows
+** portability. Linux 1.3.X and 2.0.X allow to remap
+** physical pages addresses greater than the highest
+** physical memory address to kernel virtual pages with
+** vremap() / vfree(). That was not portable but worked
+** with i386 architecture.
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#define ioremap vremap
+#define iounmap vfree
+#endif
+
+#ifdef __sparc__
+# include <asm/irq.h>
+# define pcivtobus(p) bus_dvma_to_mem(p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#elif defined(__alpha__)
+# define pcivtobus(p) ((p) & 0xfffffffful)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#else /* others */
+# define pcivtobus(p) (p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#endif
+
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+static u_long __init remap_pci_mem(u_long base, u_long size)
+{
+ u_long page_base = ((u_long) base) & PAGE_MASK;
+ u_long page_offs = ((u_long) base) - page_base;
+ u_long page_remapped = (u_long) ioremap(page_base, page_offs+size);
+
+ return page_remapped? (page_remapped + page_offs) : 0UL;
+}
+
+static void __init unmap_pci_mem(u_long vaddr, u_long size)
+{
+ if (vaddr)
+ iounmap((void *) (vaddr & PAGE_MASK));
+}
+
+#endif /* not def SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+
+/*==========================================================
+**
+** Insert a delay in micro-seconds and milli-seconds.
+**
+** Under Linux, udelay() is restricted to delay <
+** 1 milli-second. In fact, it generally works for up
+** to 1 second delay. Since 2.1.105, the mdelay() function
+** is provided for delays in milli-seconds.
+** Under 2.0 kernels, udelay() is an inline function
+** that is very inaccurate on Pentium processors.
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,105)
+#define UDELAY udelay
+#define MDELAY mdelay
+#else
+static void UDELAY(long us) { udelay(us); }
+static void MDELAY(long ms) { while (ms--) UDELAY(1000); }
+#endif
+
+/*==========================================================
+**
+** Simple power of two buddy-like allocator.
+**
+** This simple code is not intended to be fast, but to
+** provide power of 2 aligned memory allocations.
+** Since the SCRIPTS processor only supplies 8 bit
+** arithmetic, this allocator allows simple and fast
+** address calculations from the SCRIPTS code.
+** In addition, cache line alignment is guaranteed for
+** power of 2 cache line size.
+** Enhanced in linux-2.3.44 to provide a memory pool
+** per pcidev to support dynamic dma mapping. (I would
+** have preferred a real bus astraction, btw).
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+#define __GetFreePages(flags, order) __get_free_pages(flags, order)
+#else
+#define __GetFreePages(flags, order) __get_free_pages(flags, order, 0)
+#endif
+
+#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
+#if PAGE_SIZE >= 8192
+#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
+#else
+#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */
+#endif
+#define MEMO_FREE_UNUSED /* Free unused pages immediately */
+#define MEMO_WARN 1
+#define MEMO_GFP_FLAGS GFP_ATOMIC
+#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
+#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
+#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
+
+typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
+typedef pcidev_t m_bush_t; /* Something that addresses DMAable */
+
+typedef struct m_link { /* Link between free memory chunks */
+ struct m_link *next;
+} m_link_s;
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+typedef struct m_vtob { /* Virtual to Bus address translation */
+ struct m_vtob *next;
+ m_addr_t vaddr;
+ m_addr_t baddr;
+} m_vtob_s;
+#define VTOB_HASH_SHIFT 5
+#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
+#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
+#define VTOB_HASH_CODE(m) \
+ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
+#endif
+
+typedef struct m_pool { /* Memory pool of a given kind */
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ m_bush_t bush;
+ m_addr_t (*getp)(struct m_pool *);
+ void (*freep)(struct m_pool *, m_addr_t);
+#define M_GETP() mp->getp(mp)
+#define M_FREEP(p) mp->freep(mp, p)
+#define GetPages() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define FreePages(p) free_pages(p, MEMO_PAGE_ORDER)
+ int nump;
+ m_vtob_s *(vtob[VTOB_HASH_SIZE]);
+ struct m_pool *next;
+#else
+#define M_GETP() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define M_FREEP(p) free_pages(p, MEMO_PAGE_ORDER)
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+ struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];
+} m_pool_s;
+
+static void *___m_alloc(m_pool_s *mp, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ int j;
+ m_addr_t a;
+ m_link_s *h = mp->h;
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return 0;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ j = i;
+ while (!h[j].next) {
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ h[j].next = (m_link_s *) M_GETP();
+ if (h[j].next)
+ h[j].next->next = 0;
+ break;
+ }
+ ++j;
+ s <<= 1;
+ }
+ a = (m_addr_t) h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (m_link_s *) (a+s);
+ h[j].next->next = 0;
+ }
+ }
+#ifdef DEBUG
+ printk("___m_alloc(%d) = %p\n", size, (void *) a);
+#endif
+ return (void *) a;
+}
+
+static void ___m_free(m_pool_s *mp, void *ptr, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ m_link_s *q;
+ m_addr_t a, b;
+ m_link_s *h = mp->h;
+
+#ifdef DEBUG
+ printk("___m_free(%p, %d)\n", ptr, size);
+#endif
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ a = (m_addr_t) ptr;
+
+ while (1) {
+#ifdef MEMO_FREE_UNUSED
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ M_FREEP(a);
+ break;
+ }
+#endif
+ b = a ^ s;
+ q = &h[i];
+ while (q->next && q->next != (m_link_s *) b) {
+ q = q->next;
+ }
+ if (!q->next) {
+ ((m_link_s *) a)->next = h[i].next;
+ h[i].next = (m_link_s *) a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ ++i;
+ }
+}
+
+static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags)
+{
+ void *p;
+
+ p = ___m_alloc(mp, size);
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("new %-10s[%4d] @%p.\n", name, size, p);
+
+ if (p)
+ bzero(p, size);
+ else if (uflags & MEMO_WARN)
+ printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size);
+
+ return p;
+}
+
+#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)
+
+static void __m_free(m_pool_s *mp, void *ptr, int size, char *name)
+{
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
+
+ ___m_free(mp, ptr, size);
+
+}
+
+/*
+ * With pci bus iommu support, we use a default pool of unmapped memory
+ * for memory we donnot need to DMA from/to and one pool per pcidev for
+ * memory accessed by the PCI chip. `mp0' is the default not DMAable pool.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+static m_pool_s mp0;
+
+#else
+
+static m_addr_t ___mp0_getp(m_pool_s *mp)
+{
+ m_addr_t m = GetPages();
+ if (m)
+ ++mp->nump;
+ return m;
+}
+
+static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
+{
+ FreePages(m);
+ --mp->nump;
+}
+
+static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep};
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+static void *m_calloc(int size, char *name)
+{
+ u_long flags;
+ void *m;
+ NCR_LOCK_DRIVER(flags);
+ m = __m_calloc(&mp0, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+ return m;
+}
+
+static void m_free(void *ptr, int size, char *name)
+{
+ u_long flags;
+ NCR_LOCK_DRIVER(flags);
+ __m_free(&mp0, ptr, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+/*
+ * DMAable pools.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Without pci bus iommu support, all the memory is assumed DMAable */
+
+#define __m_calloc_dma(b, s, n) m_calloc(s, n)
+#define __m_free_dma(b, p, s, n) m_free(p, s, n)
+#define __vtobus(b, p) virt_to_bus(p)
+
+#else
+
+/*
+ * With pci bus iommu support, we maintain one pool per pcidev and a
+ * hashed reverse table for virtual to bus physical address translations.
+ */
+static m_addr_t ___dma_getp(m_pool_s *mp)
+{
+ m_addr_t vp;
+ m_vtob_s *vbp;
+
+ vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB");
+ if (vbp) {
+ dma_addr_t daddr;
+ vp = (m_addr_t) pci_alloc_consistent(mp->bush,
+ PAGE_SIZE<<MEMO_PAGE_ORDER,
+ &daddr);
+ if (vp) {
+ int hc = VTOB_HASH_CODE(vp);
+ vbp->vaddr = vp;
+ vbp->baddr = daddr;
+ vbp->next = mp->vtob[hc];
+ mp->vtob[hc] = vbp;
+ ++mp->nump;
+ return vp;
+ }
+ }
+ if (vbp)
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ return 0;
+}
+
+static void ___dma_freep(m_pool_s *mp, m_addr_t m)
+{
+ m_vtob_s **vbpp, *vbp;
+ int hc = VTOB_HASH_CODE(m);
+
+ vbpp = &mp->vtob[hc];
+ while (*vbpp && (*vbpp)->vaddr != m)
+ vbpp = &(*vbpp)->next;
+ if (*vbpp) {
+ vbp = *vbpp;
+ *vbpp = (*vbpp)->next;
+ pci_free_consistent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
+ (void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ --mp->nump;
+ }
+}
+
+static inline m_pool_s *___get_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next);
+ return mp;
+}
+
+static m_pool_s *___cre_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL");
+ if (mp) {
+ bzero(mp, sizeof(*mp));
+ mp->bush = bush;
+ mp->getp = ___dma_getp;
+ mp->freep = ___dma_freep;
+ mp->next = mp0.next;
+ mp0.next = mp;
+ }
+ return mp;
+}
+
+static void ___del_dma_pool(m_pool_s *p)
+{
+ struct m_pool **pp = &mp0.next;
+
+ while (*pp && *pp != p)
+ pp = &(*pp)->next;
+ if (*pp) {
+ *pp = (*pp)->next;
+ __m_free(&mp0, p, sizeof(*p), "MPOOL");
+ }
+}
+
+static void *__m_calloc_dma(m_bush_t bush, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+ void *m = 0;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (!mp)
+ mp = ___cre_dma_pool(bush);
+ if (mp)
+ m = __m_calloc(mp, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+
+ return m;
+}
+
+static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp)
+ __m_free(mp, m, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+static m_addr_t __vtobus(m_bush_t bush, void *m)
+{
+ u_long flags;
+ m_pool_s *mp;
+ int hc = VTOB_HASH_CODE(m);
+ m_vtob_s *vp = 0;
+ m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp) {
+ vp = mp->vtob[hc];
+ while (vp && (m_addr_t) vp->vaddr != a)
+ vp = vp->next;
+ }
+ NCR_UNLOCK_DRIVER(flags);
+ return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
+}
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->pdev, s, n)
+#define _m_free_dma(np, p, s, n) __m_free_dma(np->pdev, p, s, n)
+#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)
+#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)
+#define _vtobus(np, p) __vtobus(np->pdev, p)
+#define vtobus(p) _vtobus(np, p)
+
+/*
+ * Deal with DMA mapping/unmapping.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Linux versions prior to pci bus iommu kernel interface */
+
+#define __unmap_scsi_data(pdev, cmd) do {; } while (0)
+#define __map_scsi_single_data(pdev, cmd) (__vtobus(pdev,(cmd)->request_buffer))
+#define __map_scsi_sg_data(pdev, cmd) ((cmd)->use_sg)
+#define __sync_scsi_data(pdev, cmd) do {; } while (0)
+
+#define scsi_sg_dma_address(sc) vtobus((sc)->address)
+#define scsi_sg_dma_len(sc) ((sc)->length)
+
+#else
+
+/* Linux version with pci bus iommu kernel interface */
+
+/* To keep track of the dma mapping (sg/single) that has been set */
+#define __data_mapped SCp.phase
+#define __data_mapping SCp.have_data_in
+
+static void __unmap_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_unmap_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+ cmd->__data_mapped = 0;
+}
+
+static u_long __map_scsi_single_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ dma_addr_t mapping;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->request_bufflen == 0)
+ return 0;
+
+ mapping = pci_map_single(pdev, cmd->request_buffer,
+ cmd->request_bufflen, dma_dir);
+ cmd->__data_mapped = 1;
+ cmd->__data_mapping = mapping;
+
+ return mapping;
+}
+
+static int __map_scsi_sg_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int use_sg;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->use_sg == 0)
+ return 0;
+
+ use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ cmd->__data_mapped = 2;
+ cmd->__data_mapping = use_sg;
+
+ return use_sg;
+}
+
+static void __sync_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_dma_sync_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_dma_sync_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+}
+
+#define scsi_sg_dma_address(sc) sg_dma_address(sc)
+#define scsi_sg_dma_len(sc) sg_dma_len(sc)
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->pdev, cmd)
+#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->pdev, cmd)
+#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->pdev, cmd)
+#define sync_scsi_data(np, cmd) __sync_scsi_data(np->pdev, cmd)
+
+/*==========================================================
+**
+** SCSI data transfer direction
+**
+** Until some linux kernel version near 2.3.40,
+** low-level scsi drivers were not told about data
+** transfer direction. We check the existence of this
+** feature that has been expected for a _long_ time by
+** all SCSI driver developers by just testing against
+** the definition of SCSI_DATA_UNKNOWN. Indeed this is
+** a hack, but testing against a kernel version would
+** have been a shame. ;-)
+**
+**==========================================================
+*/
+#ifdef SCSI_DATA_UNKNOWN
+
+#define scsi_data_direction(cmd) (cmd->sc_data_direction)
+
+#else
+
+#define SCSI_DATA_UNKNOWN 0
+#define SCSI_DATA_WRITE 1
+#define SCSI_DATA_READ 2
+#define SCSI_DATA_NONE 3
+
+static __inline__ int scsi_data_direction(Scsi_Cmnd *cmd)
+{
+ int direction;
+
+ switch((int) cmd->cmnd[0]) {
+ case 0x08: /* READ(6) 08 */
+ case 0x28: /* READ(10) 28 */
+ case 0xA8: /* READ(12) A8 */
+ direction = SCSI_DATA_READ;
+ break;
+ case 0x0A: /* WRITE(6) 0A */
+ case 0x2A: /* WRITE(10) 2A */
+ case 0xAA: /* WRITE(12) AA */
+ direction = SCSI_DATA_WRITE;
+ break;
+ default:
+ direction = SCSI_DATA_UNKNOWN;
+ break;
+ }
+
+ return direction;
+}
+
+#endif /* SCSI_DATA_UNKNOWN */
+
+/*==========================================================
+**
+** Driver setup.
+**
+** This structure is initialized from linux config
+** options. It can be overridden at boot-up by the boot
+** command line.
+**
+**==========================================================
+*/
+static struct ncr_driver_setup
+ driver_setup = SCSI_NCR_DRIVER_SETUP;
+
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+static struct ncr_driver_setup
+ driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
+#endif
+
+#define initverbose (driver_setup.verbose)
+#define bootverbose (np->verbose)
+
+
+/*==========================================================
+**
+** Structures used by the detection routine to transmit
+** device configuration to the attach function.
+**
+**==========================================================
+*/
+typedef struct {
+ int bus;
+ u_char device_fn;
+ u_long base;
+ u_long base_2;
+ u_long io_port;
+ int irq;
+/* port and reg fields to use INB, OUTB macros */
+ u_long base_io;
+ volatile struct ncr_reg *reg;
+} ncr_slot;
+
+/*==========================================================
+**
+** Structure used to store the NVRAM content.
+**
+**==========================================================
+*/
+typedef struct {
+ int type;
+#define SCSI_NCR_SYMBIOS_NVRAM (1)
+#define SCSI_NCR_TEKRAM_NVRAM (2)
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ union {
+ Symbios_nvram Symbios;
+ Tekram_nvram Tekram;
+ } data;
+#endif
+} ncr_nvram;
+
+/*==========================================================
+**
+** Structure used by detection routine to save data on
+** each detected board for attach.
+**
+**==========================================================
+*/
+typedef struct {
+ pcidev_t pdev;
+ ncr_slot slot;
+ ncr_chip chip;
+ ncr_nvram *nvram;
+ u_char host_id;
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ u_char pqs_pds;
+#endif
+ int attach_done;
+} ncr_device;
+
+static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device);
+
+/*==========================================================
+**
+** NVRAM detection and reading.
+**
+** Currently supported:
+** - 24C16 EEPROM with both Symbios and Tekram layout.
+** - 93C46 EEPROM with Tekram layout.
+**
+**==========================================================
+*/
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+/*
+ * 24C16 EEPROM reading.
+ *
+ * GPOI0 - data in/data out
+ * GPIO1 - clock
+ * Symbios NVRAM wiring now also used by Tekram.
+ */
+
+#define SET_BIT 0
+#define CLR_BIT 1
+#define SET_CLK 2
+#define CLR_CLK 3
+
+/*
+ * Set/clear data/clock bit in GPIO0
+ */
+static void __init
+S24C16_set_bit(ncr_slot *np, u_char write_bit, u_char *gpreg, int bit_mode)
+{
+ UDELAY (5);
+ switch (bit_mode){
+ case SET_BIT:
+ *gpreg |= write_bit;
+ break;
+ case CLR_BIT:
+ *gpreg &= 0xfe;
+ break;
+ case SET_CLK:
+ *gpreg |= 0x02;
+ break;
+ case CLR_CLK:
+ *gpreg &= 0xfd;
+ break;
+
+ }
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (5);
+}
+
+/*
+ * Send START condition to NVRAM to wake it up.
+ */
+static void __init S24C16_start(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
+ */
+static void __init S24C16_stop(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+}
+
+/*
+ * Read or write a bit to the NVRAM,
+ * read if GPIO0 input else write if GPIO0 output
+ */
+static void __init
+S24C16_do_bit(ncr_slot *np, u_char *read_bit, u_char write_bit, u_char *gpreg)
+{
+ S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ if (read_bit)
+ *read_bit = INB (nc_gpreg);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+}
+
+/*
+ * Output an ACK to the NVRAM after reading,
+ * change GPIO0 to output and when done back to an input
+ */
+static void __init
+S24C16_write_ack(ncr_slot *np, u_char write_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl & 0xfe);
+ S24C16_do_bit(np, 0, write_bit, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Input an ACK from NVRAM after writing,
+ * change GPIO0 to input and when done back to an output
+ */
+static void __init
+S24C16_read_ack(ncr_slot *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl | 0x01);
+ S24C16_do_bit(np, read_bit, 1, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
+ * GPIO0 must already be set as an output
+ */
+static void __init
+S24C16_write_byte(ncr_slot *np, u_char *ack_data, u_char write_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+
+ for (x = 0; x < 8; x++)
+ S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
+
+ S24C16_read_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * READ a byte from the NVRAM and then send an ACK to say we have got it,
+ * GPIO0 must already be set as an input
+ */
+static void __init
+S24C16_read_byte(ncr_slot *np, u_char *read_data, u_char ack_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+ u_char read_bit;
+
+ *read_data = 0;
+ for (x = 0; x < 8; x++) {
+ S24C16_do_bit(np, &read_bit, 1, gpreg);
+ *read_data |= ((read_bit & 0x01) << (7 - x));
+ }
+
+ S24C16_write_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * Read 'len' bytes starting at 'offset'.
+ */
+static int __init
+sym_read_S24C16_nvram (ncr_slot *np, int offset, u_char *data, int len)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_char ack_data;
+ int retv = 1;
+ int x;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+ gpcntl = old_gpcntl & 0xfc;
+
+ /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+ OUTB (nc_gpreg, old_gpreg);
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* this is to set NVRAM into a known state with GPIO0/1 both low */
+ gpreg = old_gpreg;
+ S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
+
+ /* now set NVRAM inactive with GPIO0/1 both high */
+ S24C16_stop(np, &gpreg);
+
+ /* activate NVRAM */
+ S24C16_start(np, &gpreg);
+
+ /* write device code and random address MSB */
+ S24C16_write_byte(np, &ack_data,
+ 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* write random address LSB */
+ S24C16_write_byte(np, &ack_data,
+ offset & 0xff, &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* regenerate START state to set up for reading */
+ S24C16_start(np, &gpreg);
+
+ /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
+ S24C16_write_byte(np, &ack_data,
+ 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* now set up GPIO0 for inputting data */
+ gpcntl |= 0x01;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all requested data - only part of total NVRAM */
+ for (x = 0; x < len; x++)
+ S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
+
+ /* finally put NVRAM back in inactive mode */
+ gpcntl &= 0xfe;
+ OUTB (nc_gpcntl, gpcntl);
+ S24C16_stop(np, &gpreg);
+ retv = 0;
+out:
+ /* return GPIO0/1 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+#undef SET_BIT 0
+#undef CLR_BIT 1
+#undef SET_CLK 2
+#undef CLR_CLK 3
+
+/*
+ * Try reading Symbios NVRAM.
+ * Return 0 if OK.
+ */
+static int __init sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram)
+{
+ static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ /* probe the 24c16 and read the SYMBIOS 24c16 area */
+ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
+ return 1;
+
+ /* check valid NVRAM signature, verify byte count and checksum */
+ if (nvram->type != 0 ||
+ memcmp(nvram->trailer, Symbios_trailer, 6) ||
+ nvram->byte_count != len - 12)
+ return 1;
+
+ /* verify checksum */
+ for (x = 6, csum = 0; x < len - 6; x++)
+ csum += data[x];
+ if (csum != nvram->checksum)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * 93C46 EEPROM reading.
+ *
+ * GPOI0 - data in
+ * GPIO1 - data out
+ * GPIO2 - clock
+ * GPIO4 - chip select
+ *
+ * Used by Tekram.
+ */
+
+/*
+ * Pulse clock bit in GPIO0
+ */
+static void __init T93C46_Clk(ncr_slot *np, u_char *gpreg)
+{
+ OUTB (nc_gpreg, *gpreg | 0x04);
+ UDELAY (2);
+ OUTB (nc_gpreg, *gpreg);
+}
+
+/*
+ * Read bit from NVRAM
+ */
+static void __init T93C46_Read_Bit(ncr_slot *np, u_char *read_bit, u_char *gpreg)
+{
+ UDELAY (2);
+ T93C46_Clk(np, gpreg);
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * Write bit to GPIO0
+ */
+static void __init T93C46_Write_Bit(ncr_slot *np, u_char write_bit, u_char *gpreg)
+{
+ if (write_bit & 0x01)
+ *gpreg |= 0x02;
+ else
+ *gpreg &= 0xfd;
+
+ *gpreg |= 0x10;
+
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
+ */
+static void __init T93C46_Stop(ncr_slot *np, u_char *gpreg)
+{
+ *gpreg &= 0xef;
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send read command and address to NVRAM
+ */
+static void __init
+T93C46_Send_Command(ncr_slot *np, u_short write_data,
+ u_char *read_bit, u_char *gpreg)
+{
+ int x;
+
+ /* send 9 bits, start bit (1), command (2), address (6) */
+ for (x = 0; x < 9; x++)
+ T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
+
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * READ 2 bytes from the NVRAM
+ */
+static void __init
+T93C46_Read_Word(ncr_slot *np, u_short *nvram_data, u_char *gpreg)
+{
+ int x;
+ u_char read_bit;
+
+ *nvram_data = 0;
+ for (x = 0; x < 16; x++) {
+ T93C46_Read_Bit(np, &read_bit, gpreg);
+
+ if (read_bit & 0x01)
+ *nvram_data |= (0x01 << (15 - x));
+ else
+ *nvram_data &= ~(0x01 << (15 - x));
+ }
+}
+
+/*
+ * Read Tekram NvRAM data.
+ */
+static int __init
+T93C46_Read_Data(ncr_slot *np, u_short *data,int len,u_char *gpreg)
+{
+ u_char read_bit;
+ int x;
+
+ for (x = 0; x < len; x++) {
+
+ /* output read command and address */
+ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
+ if (read_bit & 0x01)
+ return 1; /* Bad */
+ T93C46_Read_Word(np, &data[x], gpreg);
+ T93C46_Stop(np, gpreg);
+ }
+
+ return 0;
+}
+
+/*
+ * Try reading 93C46 Tekram NVRAM.
+ */
+static int __init
+sym_read_T93C46_nvram (ncr_slot *np, Tekram_nvram *nvram)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ int retv = 1;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+
+ /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
+ 1/2/4 out */
+ gpreg = old_gpreg & 0xe9;
+ OUTB (nc_gpreg, gpreg);
+ gpcntl = (old_gpcntl & 0xe9) | 0x09;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all of NVRAM, 64 words */
+ retv = T93C46_Read_Data(np, (u_short *) nvram,
+ sizeof(*nvram) / sizeof(short), &gpreg);
+
+ /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+/*
+ * Try reading Tekram NVRAM.
+ * Return 0 if OK.
+ */
+static int __init
+sym_read_Tekram_nvram (ncr_slot *np, u_short device_id, Tekram_nvram *nvram)
+{
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ switch (device_id) {
+ case PCI_DEVICE_ID_NCR_53C885:
+ case PCI_DEVICE_ID_NCR_53C895:
+ case PCI_DEVICE_ID_NCR_53C896:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ break;
+ case PCI_DEVICE_ID_NCR_53C875:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ if (!x)
+ break;
+ default:
+ x = sym_read_T93C46_nvram(np, nvram);
+ break;
+ }
+ if (x)
+ return 1;
+
+ /* verify checksum */
+ for (x = 0, csum = 0; x < len - 1; x += 2)
+ csum += data[x] + (data[x+1] << 8);
+ if (csum != 0x1234)
+ return 1;
+
+ return 0;
+}
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*===================================================================
+**
+** Detect and try to read SYMBIOS and TEKRAM NVRAM.
+**
+** Data can be used to order booting of boards.
+**
+** Data is saved in ncr_device structure if NVRAM found. This
+** is then used to find drive boot order for ncr_attach().
+**
+** NVRAM data is passed to Scsi_Host_Template later during
+** ncr_attach() for any device set up.
+**
+**===================================================================
+*/
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static void __init ncr_get_nvram(ncr_device *devp, ncr_nvram *nvp)
+{
+ devp->nvram = nvp;
+ if (!nvp)
+ return;
+ /*
+ ** Get access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ request_region(devp->slot.io_port, 128, NAME53C8XX);
+ devp->slot.base_io = devp->slot.io_port;
+#else
+ devp->slot.reg = (struct ncr_reg *) remap_pci_mem(devp->slot.base, 128);
+ if (!devp->slot.reg)
+ return;
+#endif
+
+ /*
+ ** Try to read SYMBIOS nvram.
+ ** Try to read TEKRAM nvram if Symbios nvram not found.
+ */
+ if (!sym_read_Symbios_nvram(&devp->slot, &nvp->data.Symbios))
+ nvp->type = SCSI_NCR_SYMBIOS_NVRAM;
+ else if (!sym_read_Tekram_nvram(&devp->slot, devp->chip.device_id,
+ &nvp->data.Tekram))
+ nvp->type = SCSI_NCR_TEKRAM_NVRAM;
+ else {
+ nvp->type = 0;
+ devp->nvram = 0;
+ }
+
+ /*
+ ** Release access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ release_region(devp->slot.base_io, 128);
+#else
+ unmap_pci_mem((u_long) devp->slot.reg, 128ul);
+#endif
+
+}
+
+/*===================================================================
+**
+** Display the content of NVRAM for debugging purpose.
+**
+**===================================================================
+*/
+#ifdef SCSI_NCR_DEBUG_NVRAM
+static void __init ncr_display_Symbios_nvram(Symbios_nvram *nvram)
+{
+ int i;
+
+ /* display Symbios nvram host data */
+ printk(KERN_DEBUG NAME53C8XX ": HOST ID=%d%s%s%s%s%s\n",
+ nvram->host_id & 0x0f,
+ (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
+ (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"",
+ (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"",
+ (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
+
+ /* display Symbios nvram drive data */
+ for (i = 0 ; i < 15 ; i++) {
+ struct Symbios_target *tn = &nvram->target[i];
+ printk(KERN_DEBUG NAME53C8XX
+ "-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
+ i,
+ (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
+ (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
+ tn->bus_width,
+ tn->sync_period / 4,
+ tn->timeout);
+ }
+}
+
+static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120};
+
+static void __init ncr_display_Tekram_nvram(Tekram_nvram *nvram)
+{
+ int i, tags, boot_delay;
+ char *rem;
+
+ /* display Tekram nvram host data */
+ tags = 2 << nvram->max_tags_index;
+ boot_delay = 0;
+ if (nvram->boot_delay_index < 6)
+ boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
+ switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
+ default:
+ case 0: rem = ""; break;
+ case 1: rem = " REMOVABLE=boot device"; break;
+ case 2: rem = " REMOVABLE=all"; break;
+ }
+
+ printk(KERN_DEBUG NAME53C8XX
+ ": HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+ nvram->host_id & 0x0f,
+ (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES":"",
+ (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
+ (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
+ (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
+ (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
+ (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
+ (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
+ rem, boot_delay, tags);
+
+ /* display Tekram nvram drive data */
+ for (i = 0; i <= 15; i++) {
+ int sync, j;
+ struct Tekram_target *tn = &nvram->target[i];
+ j = tn->sync_index & 0xf;
+ sync = Tekram_sync[j];
+ printk(KERN_DEBUG NAME53C8XX "-%d:%s%s%s%s%s%s PERIOD=%d\n",
+ i,
+ (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
+ (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
+ (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & TEKRAM_START_CMD) ? " START" : "",
+ (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
+ (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
+ sync);
+ }
+}
+#endif /* SCSI_NCR_DEBUG_NVRAM */
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+
+/*===================================================================
+**
+** Utility routines that protperly return data through /proc FS.
+**
+**===================================================================
+*/
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+
+struct info_str
+{
+ char *buffer;
+ int length;
+ int offset;
+ int pos;
+};
+
+static void copy_mem_info(struct info_str *info, char *data, int len)
+{
+ if (info->pos + len > info->length)
+ len = info->length - info->pos;
+
+ if (info->pos + len < info->offset) {
+ info->pos += len;
+ return;
+ }
+ if (info->pos < info->offset) {
+ data += (info->offset - info->pos);
+ len -= (info->offset - info->pos);
+ }
+
+ if (len > 0) {
+ memcpy(info->buffer + info->pos, data, len);
+ info->pos += len;
+ }
+}
+
+static int copy_info(struct info_str *info, char *fmt, ...)
+{
+ va_list args;
+ char buf[81];
+ int len;
+
+ va_start(args, fmt);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+
+ copy_mem_info(info, buf, len);
+ return len;
+}
+
+#endif
+
+/*===================================================================
+**
+** Driver setup from the boot command line
+**
+**===================================================================
+*/
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+#define OPT_TAGS 1
+#define OPT_MASTER_PARITY 2
+#define OPT_SCSI_PARITY 3
+#define OPT_DISCONNECTION 4
+#define OPT_SPECIAL_FEATURES 5
+#define OPT_ULTRA_SCSI 6
+#define OPT_FORCE_SYNC_NEGO 7
+#define OPT_REVERSE_PROBE 8
+#define OPT_DEFAULT_SYNC 9
+#define OPT_VERBOSE 10
+#define OPT_DEBUG 11
+#define OPT_BURST_MAX 12
+#define OPT_LED_PIN 13
+#define OPT_MAX_WIDE 14
+#define OPT_SETTLE_DELAY 15
+#define OPT_DIFF_SUPPORT 16
+#define OPT_IRQM 17
+#define OPT_PCI_FIX_UP 18
+#define OPT_BUS_CHECK 19
+#define OPT_OPTIMIZE 20
+#define OPT_RECOVERY 21
+#define OPT_SAFE_SETUP 22
+#define OPT_USE_NVRAM 23
+#define OPT_EXCLUDE 24
+#define OPT_HOST_ID 25
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+#define OPT_IARB 26
+#endif
+
+static char setup_token[] __initdata =
+ "tags:" "mpar:"
+ "spar:" "disc:"
+ "specf:" "ultra:"
+ "fsn:" "revprob:"
+ "sync:" "verb:"
+ "debug:" "burst:"
+ "led:" "wide:"
+ "settle:" "diff:"
+ "irqm:" "pcifix:"
+ "buschk:" "optim:"
+ "recovery:"
+ "safe:" "nvram:"
+ "excl:" "hostid:"
+#ifdef SCSI_NCR_IARB_SUPPORT
+ "iarb:"
+#endif
+ ; /* DONNOT REMOVE THIS ';' */
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+static int __init get_setup_token(char *p)
+{
+ char *cur = setup_token;
+ char *pc;
+ int i = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ ++pc;
+ ++i;
+ if (!strncmp(p, cur, pc - cur))
+ return i;
+ cur = pc;
+ }
+ return 0;
+}
+
+
+static int __init sym53c8xx__setup(char *str)
+{
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+ char *cur = str;
+ char *pc, *pv;
+ int i, val, c;
+ int xi = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ char *pe;
+
+ val = 0;
+ pv = pc;
+ c = *++pv;
+
+ if (c == 'n')
+ val = 0;
+ else if (c == 'y')
+ val = 1;
+ else
+ val = (int) simple_strtoul(pv, &pe, 0);
+
+ switch (get_setup_token(cur)) {
+ case OPT_TAGS:
+ driver_setup.default_tags = val;
+ if (pe && *pe == '/') {
+ i = 0;
+ while (*pe && *pe != ARG_SEP &&
+ i < sizeof(driver_setup.tag_ctrl)-1) {
+ driver_setup.tag_ctrl[i++] = *pe++;
+ }
+ driver_setup.tag_ctrl[i] = '\0';
+ }
+ break;
+ case OPT_MASTER_PARITY:
+ driver_setup.master_parity = val;
+ break;
+ case OPT_SCSI_PARITY:
+ driver_setup.scsi_parity = val;
+ break;
+ case OPT_DISCONNECTION:
+ driver_setup.disconnection = val;
+ break;
+ case OPT_SPECIAL_FEATURES:
+ driver_setup.special_features = val;
+ break;
+ case OPT_ULTRA_SCSI:
+ driver_setup.ultra_scsi = val;
+ break;
+ case OPT_FORCE_SYNC_NEGO:
+ driver_setup.force_sync_nego = val;
+ break;
+ case OPT_REVERSE_PROBE:
+ driver_setup.reverse_probe = val;
+ break;
+ case OPT_DEFAULT_SYNC:
+ driver_setup.default_sync = val;
+ break;
+ case OPT_VERBOSE:
+ driver_setup.verbose = val;
+ break;
+ case OPT_DEBUG:
+ driver_setup.debug = val;
+ break;
+ case OPT_BURST_MAX:
+ driver_setup.burst_max = val;
+ break;
+ case OPT_LED_PIN:
+ driver_setup.led_pin = val;
+ break;
+ case OPT_MAX_WIDE:
+ driver_setup.max_wide = val? 1:0;
+ break;
+ case OPT_SETTLE_DELAY:
+ driver_setup.settle_delay = val;
+ break;
+ case OPT_DIFF_SUPPORT:
+ driver_setup.diff_support = val;
+ break;
+ case OPT_IRQM:
+ driver_setup.irqm = val;
+ break;
+ case OPT_PCI_FIX_UP:
+ driver_setup.pci_fix_up = val;
+ break;
+ case OPT_BUS_CHECK:
+ driver_setup.bus_check = val;
+ break;
+ case OPT_OPTIMIZE:
+ driver_setup.optimize = val;
+ break;
+ case OPT_RECOVERY:
+ driver_setup.recovery = val;
+ break;
+ case OPT_USE_NVRAM:
+ driver_setup.use_nvram = val;
+ break;
+ case OPT_SAFE_SETUP:
+ memcpy(&driver_setup, &driver_safe_setup,
+ sizeof(driver_setup));
+ break;
+ case OPT_EXCLUDE:
+ if (xi < SCSI_NCR_MAX_EXCLUDES)
+ driver_setup.excludes[xi++] = val;
+ break;
+ case OPT_HOST_ID:
+ driver_setup.host_id = val;
+ break;
+#ifdef SCSI_NCR_IARB_SUPPORT
+ case OPT_IARB:
+ driver_setup.iarb = val;
+ break;
+#endif
+ default:
+ printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
+ break;
+ }
+
+ if ((cur = strchr(cur, ARG_SEP)) != NULL)
+ ++cur;
+ }
+#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
+ return 1;
+}
+
+/*===================================================================
+**
+** Get device queue depth from boot command line.
+**
+**===================================================================
+*/
+#define DEF_DEPTH (driver_setup.default_tags)
+#define ALL_TARGETS -2
+#define NO_TARGET -1
+#define ALL_LUNS -2
+#define NO_LUN -1
+
+static int device_queue_depth(int unit, int target, int lun)
+{
+ int c, h, t, u, v;
+ char *p = driver_setup.tag_ctrl;
+ char *ep;
+
+ h = -1;
+ t = NO_TARGET;
+ u = NO_LUN;
+ while ((c = *p++) != 0) {
+ v = simple_strtoul(p, &ep, 0);
+ switch(c) {
+ case '/':
+ ++h;
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ case 't':
+ if (t != target)
+ t = (target == v) ? v : NO_TARGET;
+ u = ALL_LUNS;
+ break;
+ case 'u':
+ if (u != lun)
+ u = (lun == v) ? v : NO_LUN;
+ break;
+ case 'q':
+ if (h == unit &&
+ (t == ALL_TARGETS || t == target) &&
+ (u == ALL_LUNS || u == lun))
+ return v;
+ break;
+ case '-':
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ default:
+ break;
+ }
+ p = ep;
+ }
+ return DEF_DEPTH;
+}
+
+/*===================================================================
+**
+** Print out information about driver configuration.
+**
+**===================================================================
+*/
+static void __init ncr_print_driver_setup(void)
+{
+#define YesNo(y) y ? 'y' : 'n'
+ printk (NAME53C8XX ": setup=disc:%c,specf:%d,ultra:%d,tags:%d,sync:%d,"
+ "burst:%d,wide:%c,diff:%d,revprob:%c,buschk:0x%x\n",
+ YesNo(driver_setup.disconnection),
+ driver_setup.special_features,
+ driver_setup.ultra_scsi,
+ driver_setup.default_tags,
+ driver_setup.default_sync,
+ driver_setup.burst_max,
+ YesNo(driver_setup.max_wide),
+ driver_setup.diff_support,
+ YesNo(driver_setup.reverse_probe),
+ driver_setup.bus_check);
+
+ printk (NAME53C8XX ": setup=mpar:%c,spar:%c,fsn=%c,verb:%d,debug:0x%x,"
+ "led:%c,settle:%d,irqm:0x%x,nvram:0x%x,pcifix:0x%x\n",
+ YesNo(driver_setup.master_parity),
+ YesNo(driver_setup.scsi_parity),
+ YesNo(driver_setup.force_sync_nego),
+ driver_setup.verbose,
+ driver_setup.debug,
+ YesNo(driver_setup.led_pin),
+ driver_setup.settle_delay,
+ driver_setup.irqm,
+ driver_setup.use_nvram,
+ driver_setup.pci_fix_up);
+#undef YesNo
+}
+
+/*===================================================================
+**
+** SYM53C8XX devices description table.
+**
+**===================================================================
+*/
+
+static ncr_chip ncr_chip_table[] __initdata = SCSI_NCR_CHIP_TABLE;
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+/*===================================================================
+**
+** Detect all NCR PQS/PDS boards and keep track of their bus nr.
+**
+** The NCR PQS or PDS card is constructed as a DEC bridge
+** behind which sit a proprietary NCR memory controller and
+** four or two 53c875s as separate devices. In its usual mode
+** of operation, the 875s are slaved to the memory controller
+** for all transfers. We can tell if an 875 is part of a
+** PQS/PDS or not since if it is, it will be on the same bus
+** as the memory controller. To operate with the Linux
+** driver, the memory controller is disabled and the 875s
+** freed to function independently. The only wrinkle is that
+** the preset SCSI ID (which may be zero) must be read in from
+** a special configuration space register of the 875.
+**
+**===================================================================
+*/
+#define SCSI_NCR_MAX_PQS_BUS 16
+static int pqs_bus[SCSI_NCR_MAX_PQS_BUS] __initdata = { 0 };
+
+static void __init ncr_detect_pqs_pds(void)
+{
+ short index;
+ pcidev_t dev = PCIDEV_NULL;
+
+ for(index=0; index < SCSI_NCR_MAX_PQS_BUS; index++) {
+ u_char tmp;
+
+ dev = pci_find_device(0x101a, 0x0009, dev);
+ if (dev == PCIDEV_NULL) {
+ pqs_bus[index] = -1;
+ break;
+ }
+ printk(KERN_INFO NAME53C8XX ": NCR PQS/PDS memory controller detected on bus %d\n", PciBusNumber(dev));
+ pci_read_config_byte(dev, 0x44, &tmp);
+ /* bit 1: allow individual 875 configuration */
+ tmp |= 0x2;
+ pci_write_config_byte(dev, 0x44, tmp);
+ pci_read_config_byte(dev, 0x45, &tmp);
+ /* bit 2: drive individual 875 interrupts to the bus */
+ tmp |= 0x4;
+ pci_write_config_byte(dev, 0x45, tmp);
+
+ pqs_bus[index] = PciBusNumber(dev);
+ }
+}
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+/*===================================================================
+**
+** Read and check the PCI configuration for any detected NCR
+** boards and save data for attaching after all boards have
+** been detected.
+**
+**===================================================================
+*/
+static int __init
+sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device)
+{
+ u_short vendor_id, device_id, command;
+ u_char cache_line_size, latency_timer;
+ u_char suggested_cache_line_size = 0;
+ u_char pci_fix_up = driver_setup.pci_fix_up;
+ u_char revision;
+ u_int irq;
+ u_long base, base_2, io_port;
+ int i;
+ ncr_chip *chip;
+
+ printk(KERN_INFO NAME53C8XX ": at PCI bus %d, device %d, function %d\n",
+ PciBusNumber(pdev),
+ (int) (PciDeviceFn(pdev) & 0xf8) >> 3,
+ (int) (PciDeviceFn(pdev) & 7));
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ if (!pci_dma_supported(pdev, (dma_addr_t) (0xffffffffUL))) {
+ printk(KERN_WARNING NAME53C8XX
+ "32 BIT PCI BUS DMA ADDRESSING NOT SUPPORTED\n");
+ return -1;
+ }
+#endif
+
+ /*
+ ** Read info from the PCI config space.
+ ** pci_read_config_xxx() functions are assumed to be used for
+ ** successfully detected PCI devices.
+ */
+ vendor_id = PciVendorId(pdev);
+ device_id = PciDeviceId(pdev);
+ irq = PciIrqLine(pdev);
+ i = 0;
+ i = pci_get_base_address(pdev, i, &io_port);
+ i = pci_get_base_address(pdev, i, &base);
+ (void) pci_get_base_address(pdev, i, &base_2);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ /*
+ ** Match the BUS number for PQS/PDS devices.
+ ** Read the SCSI ID from a special register mapped
+ ** into the configuration space of the individual
+ ** 875s. This register is set up by the PQS bios
+ */
+ for(i = 0; i < SCSI_NCR_MAX_PQS_BUS && pqs_bus[i] != -1; i++) {
+ u_char tmp;
+ if (pqs_bus[i] == PciBusNumber(pdev)) {
+ pci_read_config_byte(pdev, 0x84, &tmp);
+ device->pqs_pds = 1;
+ device->host_id = tmp;
+ break;
+ }
+ }
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+ /*
+ ** If user excludes this chip, donnot initialize it.
+ */
+ for (i = 0 ; i < SCSI_NCR_MAX_EXCLUDES ; i++) {
+ if (driver_setup.excludes[i] ==
+ (io_port & PCI_BASE_ADDRESS_IO_MASK))
+ return -1;
+ }
+ /*
+ ** Check if the chip is supported
+ */
+ if ((device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (device_id == PCI_DEVICE_ID_LSI_53C1010_66)){
+ printk(NAME53C8XX ": not initializing, device not supported\n");
+ return -1;
+ }
+ chip = 0;
+ for (i = 0; i < sizeof(ncr_chip_table)/sizeof(ncr_chip_table[0]); i++) {
+ if (device_id != ncr_chip_table[i].device_id)
+ continue;
+ if (revision > ncr_chip_table[i].revision_id)
+ continue;
+ chip = &device->chip;
+ memcpy(chip, &ncr_chip_table[i], sizeof(*chip));
+ chip->revision_id = revision;
+ break;
+ }
+
+ /*
+ ** Ignore Symbios chips controlled by SISL RAID controller.
+ ** This controller sets value 0x52414944 at RAM end - 16.
+ */
+#if defined(__i386__) && !defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED)
+ if (chip && (base_2 & PCI_BASE_ADDRESS_MEM_MASK)) {
+ unsigned int ram_size, ram_val;
+ u_long ram_ptr;
+
+ if (chip->features & FE_RAM8K)
+ ram_size = 8192;
+ else
+ ram_size = 4096;
+
+ ram_ptr = remap_pci_mem(base_2 & PCI_BASE_ADDRESS_MEM_MASK,
+ ram_size);
+ if (ram_ptr) {
+ ram_val = readl_raw(ram_ptr + ram_size - 16);
+ unmap_pci_mem(ram_ptr, ram_size);
+ if (ram_val == 0x52414944) {
+ printk(NAME53C8XX": not initializing, "
+ "driven by SISL RAID controller.\n");
+ return -1;
+ }
+ }
+ }
+#endif /* i386 and PCI MEMORY accessible */
+
+ if (!chip) {
+ printk(NAME53C8XX ": not initializing, device not supported\n");
+ return -1;
+ }
+
+#ifdef __powerpc__
+ /*
+ ** Fix-up for power/pc.
+ ** Should not be performed by the driver.
+ */
+ if ((command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ != (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": setting%s%s...\n",
+ (command & PCI_COMMAND_IO) ? "" : " PCI_COMMAND_IO",
+ (command & PCI_COMMAND_MEMORY) ? "" : " PCI_COMMAND_MEMORY");
+ command |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,2,0)
+ if ( is_prep ) {
+ if (io_port >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating io_port (Wacky IBM)");
+ io_port = (io_port & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_0, io_port);
+ }
+ if (base >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base (Wacky IBM)");
+ base = (base & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_1, base);
+ }
+ if (base_2 >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base2 (Wacky IBM)");
+ base_2 = (base_2 & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_2, base_2);
+ }
+ }
+#endif
+#endif /* __powerpc__ */
+
+#if defined(__sparc__) && (LINUX_VERSION_CODE < LinuxVersionCode(2,3,0))
+ /*
+ * Severall fix-ups for sparc.
+ *
+ * Should not be performed by the driver, which is why all
+ * this crap is cleaned up in 2.4.x
+ */
+
+ base = __pa(base);
+ base_2 = __pa(base_2);
+
+ if (!(command & PCI_COMMAND_MASTER)) {
+ if (initverbose >= 2)
+ printk("ncr53c8xx: setting PCI_COMMAND_MASTER bit (fixup)\n");
+ command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(bus, device_fn, PCI_COMMAND, command);
+ pcibios_read_config_word(bus, device_fn, PCI_COMMAND, &command);
+ }
+
+ if ((chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ if (initverbose >= 2)
+ printk("ncr53c8xx: setting PCI_COMMAND_INVALIDATE bit (fixup)\n");
+ command |= PCI_COMMAND_INVALIDATE;
+ pcibios_write_config_word(bus, device_fn, PCI_COMMAND, command);
+ pcibios_read_config_word(bus, device_fn, PCI_COMMAND, &command);
+ }
+
+ if ((chip->features & FE_CLSE) && !cache_line_size) {
+ /* PCI_CACHE_LINE_SIZE value is in 32-bit words. */
+ cache_line_size = 64 / sizeof(u_int32);
+ if (initverbose >= 2)
+ printk("ncr53c8xx: setting PCI_CACHE_LINE_SIZE to %d (fixup)\n",
+ cache_line_size);
+ pcibios_write_config_byte(bus, device_fn,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ pcibios_read_config_byte(bus, device_fn,
+ PCI_CACHE_LINE_SIZE, &cache_line_size);
+ }
+
+ if (!latency_timer) {
+ unsigned char min_gnt;
+
+ pcibios_read_config_byte(bus, device_fn,
+ PCI_MIN_GNT, &min_gnt);
+ if (min_gnt == 0)
+ latency_timer = 128;
+ else
+ latency_timer = ((min_gnt << 3) & 0xff);
+ printk("ncr53c8xx: setting PCI_LATENCY_TIMER to %d bus clocks (fixup)\n", latency_timer);
+ pcibios_write_config_byte(bus, device_fn,
+ PCI_LATENCY_TIMER, latency_timer);
+ pcibios_read_config_byte(bus, device_fn,
+ PCI_LATENCY_TIMER, &latency_timer);
+ }
+#endif /* __sparc__ && (LINUX_VERSION_CODE < LinuxVersionCode(2,3,0)) */
+
+#if defined(__i386__) && !defined(MODULE)
+ if (!cache_line_size) {
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,75)
+ extern char x86;
+ switch(x86) {
+#else
+ switch(boot_cpu_data.x86) {
+#endif
+ case 4: suggested_cache_line_size = 4; break;
+ case 6:
+ case 5: suggested_cache_line_size = 8; break;
+ }
+ }
+#endif /* __i386__ */
+
+ /*
+ ** Check availability of IO space, memory space.
+ ** Enable master capability if not yet.
+ **
+ ** We shouldn't have to care about the IO region when
+ ** we are using MMIO. But calling check_region() from
+ ** both the ncr53c8xx and the sym53c8xx drivers prevents
+ ** from attaching devices from the both drivers.
+ ** If you have a better idea, let me know.
+ */
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (!(command & PCI_COMMAND_IO)) {
+ printk(NAME53C8XX ": I/O base address (0x%lx) disabled.\n",
+ (long) io_port);
+ io_port = 0;
+ }
+#endif
+ if (!(command & PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": PCI_COMMAND_MEMORY not set.\n");
+ base = 0;
+ base_2 = 0;
+ }
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ base_2 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (io_port && check_region (io_port, 128)) {
+ printk(NAME53C8XX ": IO region 0x%lx[0..127] is in use\n",
+ (long) io_port);
+ io_port = 0;
+ }
+ if (!io_port)
+ return -1;
+#endif
+#ifndef SCSI_NCR_IOMAPPED
+ if (!base) {
+ printk(NAME53C8XX ": MMIO base address disabled.\n");
+ return -1;
+ }
+#endif
+
+/* The ncr53c8xx driver never did set the PCI parity bit. */
+/* Since setting this bit is known to trigger spurious MDPE */
+/* errors on some 895 controllers when noise on power lines is */
+/* too high, I donnot want to change previous ncr53c8xx driver */
+/* behaviour on that point (the sym53c8xx driver set this bit). */
+#if 0
+ /*
+ ** Set MASTER capable and PARITY bit, if not yet.
+ */
+ if ((command & (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY))
+ != (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY)) {
+ printk(NAME53C8XX ": setting%s%s...(fix-up)\n",
+ (command & PCI_COMMAND_MASTER) ? "" : " PCI_COMMAND_MASTER",
+ (command & PCI_COMMAND_PARITY) ? "" : " PCI_COMMAND_PARITY");
+ command |= (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+#else
+ /*
+ ** Set MASTER capable if not yet.
+ */
+ if ((command & PCI_COMMAND_MASTER) != PCI_COMMAND_MASTER) {
+ printk(NAME53C8XX ": setting PCI_COMMAND_MASTER...(fix-up)\n");
+ command |= PCI_COMMAND_MASTER;
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+#endif
+
+ /*
+ ** Fix some features according to driver setup.
+ */
+ if (!(driver_setup.special_features & 1))
+ chip->features &= ~FE_SPECIAL_SET;
+ else {
+ if (driver_setup.special_features & 2)
+ chip->features &= ~FE_WRIE;
+ if (driver_setup.special_features & 4)
+ chip->features &= ~FE_NOPM;
+ }
+ if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2)) {
+ chip->features |= FE_ULTRA;
+ chip->features &= ~FE_ULTRA2;
+ }
+ if (driver_setup.ultra_scsi < 1)
+ chip->features &= ~FE_ULTRA;
+ if (!driver_setup.max_wide)
+ chip->features &= ~FE_WIDE;
+
+ /*
+ ** Some features are required to be enabled in order to
+ ** work around some chip problems. :) ;)
+ ** (ITEM 12 of a DEL about the 896 I haven't yet).
+ ** We must ensure the chip will use WRITE AND INVALIDATE.
+ ** The revision number limit is for now arbitrary.
+ */
+ if (device_id == PCI_DEVICE_ID_NCR_53C896 && revision <= 0x10) {
+ chip->features |= (FE_WRIE | FE_CLSE);
+ pci_fix_up |= 3; /* Force appropriate PCI fix-up */
+ }
+
+#ifdef SCSI_NCR_PCI_FIX_UP_SUPPORT
+ /*
+ ** Try to fix up PCI config according to wished features.
+ */
+ if ((pci_fix_up & 1) && (chip->features & FE_CLSE) &&
+ !cache_line_size && suggested_cache_line_size) {
+ cache_line_size = suggested_cache_line_size;
+ pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ printk(NAME53C8XX ": PCI_CACHE_LINE_SIZE set to %d (fix-up).\n",
+ cache_line_size);
+ }
+
+ if ((pci_fix_up & 2) && cache_line_size &&
+ (chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ printk(NAME53C8XX": setting PCI_COMMAND_INVALIDATE (fix-up)\n");
+ command |= PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+ /*
+ ** Tune PCI LATENCY TIMER according to burst max length transfer.
+ ** (latency timer >= burst length + 6, we add 10 to be quite sure)
+ */
+
+ if (chip->burst_max && (latency_timer == 0 || (pci_fix_up & 4))) {
+ u_char lt = (1 << chip->burst_max) + 6 + 10;
+ if (latency_timer < lt) {
+ printk(NAME53C8XX
+ ": changing PCI_LATENCY_TIMER from %d to %d.\n",
+ (int) latency_timer, (int) lt);
+ latency_timer = lt;
+ pci_write_config_byte(pdev,
+ PCI_LATENCY_TIMER, latency_timer);
+ }
+ }
+
+#endif /* SCSI_NCR_PCI_FIX_UP_SUPPORT */
+
+ /*
+ ** Initialise ncr_device structure with items required by ncr_attach.
+ */
+ device->pdev = pdev;
+ device->slot.bus = PciBusNumber(pdev);
+ device->slot.device_fn = PciDeviceFn(pdev);
+ device->slot.base = base;
+ device->slot.base_2 = base_2;
+ device->slot.io_port = io_port;
+ device->slot.irq = irq;
+ device->attach_done = 0;
+
+ return 0;
+}
+
+/*===================================================================
+**
+** Detect all 53c8xx hosts and then attach them.
+**
+** If we are using NVRAM, once all hosts are detected, we need to
+** check any NVRAM for boot order in case detect and boot order
+** differ and attach them using the order in the NVRAM.
+**
+** If no NVRAM is found or data appears invalid attach boards in
+** the the order they are detected.
+**
+**===================================================================
+*/
+static int __init
+sym53c8xx__detect(Scsi_Host_Template *tpnt, u_short ncr_chip_ids[], int chips)
+{
+ pcidev_t pcidev;
+ int i, j, hosts, count;
+ int attach_count = 0;
+ ncr_device *devtbl, *devp;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_nvram nvram0, nvram, *nvp;
+#endif
+
+ /*
+ ** PCI is required.
+ */
+ if (!pci_present())
+ return 0;
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = driver_setup.debug;
+#endif
+ if (initverbose >= 2)
+ ncr_print_driver_setup();
+
+ /*
+ ** Allocate the device table since we donnot want to
+ ** overflow the kernel stack.
+ ** 1 x 4K PAGE is enough for more than 40 devices for i386.
+ */
+ devtbl = m_calloc(PAGE_SIZE, "devtbl");
+ if (!devtbl)
+ return 0;
+
+ /*
+ ** Detect all NCR PQS/PDS memory controllers.
+ */
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ ncr_detect_pqs_pds();
+#endif
+
+ /*
+ ** Detect all 53c8xx hosts.
+ ** Save the first Symbios NVRAM content if any
+ ** for the boot order.
+ */
+ hosts = PAGE_SIZE / sizeof(*devtbl);
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ nvp = (driver_setup.use_nvram & 0x1) ? &nvram0 : 0;
+#endif
+ j = 0;
+ count = 0;
+ pcidev = PCIDEV_NULL;
+ while (1) {
+ char *msg = "";
+ if (count >= hosts)
+ break;
+ if (j >= chips)
+ break;
+ i = driver_setup.reverse_probe ? chips - 1 - j : j;
+ pcidev = pci_find_device(PCI_VENDOR_ID_NCR, ncr_chip_ids[i],
+ pcidev);
+ if (pcidev == PCIDEV_NULL) {
+ ++j;
+ continue;
+ }
+ /* Some HW as the HP LH4 may report twice PCI devices */
+ for (i = 0; i < count ; i++) {
+ if (devtbl[i].slot.bus == PciBusNumber(pcidev) &&
+ devtbl[i].slot.device_fn == PciDeviceFn(pcidev))
+ break;
+ }
+ if (i != count) /* Ignore this device if we already have it */
+ continue;
+ devp = &devtbl[count];
+ devp->host_id = driver_setup.host_id;
+ devp->attach_done = 0;
+ if (sym53c8xx_pci_init(tpnt, pcidev, devp)) {
+ continue;
+ }
+ ++count;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvp) {
+ ncr_get_nvram(devp, nvp);
+ switch(nvp->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ /*
+ * Switch to the other nvram buffer, so that
+ * nvram0 will contain the first Symbios
+ * format NVRAM content with boot order.
+ */
+ nvp = &nvram;
+ msg = "with Symbios NVRAM";
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+ msg = "with Tekram NVRAM";
+ break;
+ }
+ }
+#endif
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ if (devp->pqs_pds)
+ msg = "(NCR PQS/PDS)";
+#endif
+ printk(KERN_INFO NAME53C8XX ": 53c%s detected %s\n",
+ devp->chip.name, msg);
+ }
+
+ /*
+ ** If we have found a SYMBIOS NVRAM, use first the NVRAM boot
+ ** sequence as device boot order.
+ ** check devices in the boot record against devices detected.
+ ** attach devices if we find a match. boot table records that
+ ** do not match any detected devices will be ignored.
+ ** devices that do not match any boot table will not be attached
+ ** here but will attempt to be attached during the device table
+ ** rescan.
+ */
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (!nvp || nvram0.type != SCSI_NCR_SYMBIOS_NVRAM)
+ goto next;
+ for (i = 0; i < 4; i++) {
+ Symbios_host *h = &nvram0.data.Symbios.host[i];
+ for (j = 0 ; j < count ; j++) {
+ devp = &devtbl[j];
+ if (h->device_fn != devp->slot.device_fn ||
+ h->bus_nr != devp->slot.bus ||
+ h->device_id != devp->chip.device_id)
+ continue;
+ if (devp->attach_done)
+ continue;
+ if (h->flags & SYMBIOS_INIT_SCAN_AT_BOOT) {
+ ncr_get_nvram(devp, nvp);
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+#if 0 /* Restore previous behaviour of ncr53c8xx driver */
+ else if (!(driver_setup.use_nvram & 0x80))
+ printk(KERN_INFO NAME53C8XX
+ ": 53c%s state OFF thus not attached\n",
+ devp->chip.name);
+#endif
+ else
+ continue;
+
+ devp->attach_done = 1;
+ break;
+ }
+ }
+next:
+#endif
+
+ /*
+ ** Rescan device list to make sure all boards attached.
+ ** Devices without boot records will not be attached yet
+ ** so try to attach them here.
+ */
+ for (i= 0; i < count; i++) {
+ devp = &devtbl[i];
+ if (!devp->attach_done) {
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_get_nvram(devp, nvp);
+#endif
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+ }
+
+ m_free(devtbl, PAGE_SIZE, "devtbl");
+
+ return attach_count;
+}
diff --git a/linux/src/drivers/scsi/sym53c8xx_defs.h b/linux/src/drivers/scsi/sym53c8xx_defs.h
new file mode 100644
index 0000000..10acf78
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx_defs.h
@@ -0,0 +1,1767 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+#ifndef SYM53C8XX_DEFS_H
+#define SYM53C8XX_DEFS_H
+
+/*
+** Check supported Linux versions
+*/
+
+#if !defined(LINUX_VERSION_CODE)
+#include <linux/version.h>
+#endif
+#include <linux/config.h>
+
+#ifndef LinuxVersionCode
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+#endif
+
+/*
+ * NCR PQS/PDS special device support.
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_PQS_PDS
+#define SCSI_NCR_PQS_PDS_SUPPORT
+#endif
+
+/*
+ * No more an option, enabled by default.
+ */
+#ifndef CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#define CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#endif
+
+/*
+** These options are not tunable from 'make config'
+*/
+//#define SCSI_NCR_PROC_INFO_SUPPORT
+
+/*
+** If you want a driver as small as possible, donnot define the
+** following options.
+*/
+#define SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+#define SCSI_NCR_DEBUG_INFO_SUPPORT
+#define SCSI_NCR_PCI_FIX_UP_SUPPORT
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+# define SCSI_NCR_USER_COMMAND_SUPPORT
+# define SCSI_NCR_USER_INFO_SUPPORT
+#endif
+
+/*
+** To disable integrity checking, do not define the
+** following option.
+*/
+#ifdef CONFIG_SCSI_NCR53C8XX_INTEGRITY_CHECK
+# define SCSI_NCR_ENABLE_INTEGRITY_CHECK
+#endif
+
+/*==========================================================
+**
+** nvram settings - #define SCSI_NCR_NVRAM_SUPPORT to enable
+**
+**==========================================================
+*/
+
+#ifdef CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#define SCSI_NCR_NVRAM_SUPPORT
+/* #define SCSI_NCR_DEBUG_NVRAM */
+#endif
+
+/* ---------------------------------------------------------------------
+** Take into account kernel configured parameters.
+** Most of these options can be overridden at startup by a command line.
+** ---------------------------------------------------------------------
+*/
+
+/*
+ * For Ultra2 and Ultra3 SCSI support option, use special features.
+ *
+ * Value (default) means:
+ * bit 0 : all features enabled, except:
+ * bit 1 : PCI Write And Invalidate.
+ * bit 2 : Data Phase Mismatch handling from SCRIPTS.
+ *
+ * Use boot options ncr53c8xx=specf:1 if you want all chip features to be
+ * enabled by the driver.
+ */
+#define SCSI_NCR_SETUP_SPECIAL_FEATURES (3)
+
+/*
+ * For Ultra2 and Ultra3 SCSI support allow 80Mhz synchronous data transfers.
+ * Value means:
+ * 0 - Ultra speeds disabled
+ * 1 - Ultra enabled (Maximum 20Mtrans/sec)
+ * 2 - Ultra2 enabled (Maximum 40Mtrans/sec)
+ * 3 - Ultra3 enabled (Maximum 80Mtrans/sec)
+ *
+ * Use boot options sym53c8xx=ultra:3 to enable Ultra3 support.
+ */
+
+#define SCSI_NCR_SETUP_ULTRA_SCSI (3)
+#define SCSI_NCR_MAX_SYNC (80)
+
+/*
+ * Allow tags from 2 to 256, default 8
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#if CONFIG_SCSI_NCR53C8XX_MAX_TAGS < 2
+#define SCSI_NCR_MAX_TAGS (2)
+#elif CONFIG_SCSI_NCR53C8XX_MAX_TAGS > 256
+#define SCSI_NCR_MAX_TAGS (256)
+#else
+#define SCSI_NCR_MAX_TAGS CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#endif
+#else
+#define SCSI_NCR_MAX_TAGS (8)
+#endif
+
+/*
+ * Allow tagged command queuing support if configured with default number
+ * of tags set to max (see above).
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS
+#define SCSI_NCR_SETUP_DEFAULT_TAGS CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS
+#elif defined CONFIG_SCSI_NCR53C8XX_TAGGED_QUEUE
+#define SCSI_NCR_SETUP_DEFAULT_TAGS SCSI_NCR_MAX_TAGS
+#else
+#define SCSI_NCR_SETUP_DEFAULT_TAGS (0)
+#endif
+
+/*
+ * Use normal IO if configured. Forced for alpha and powerpc.
+ * Powerpc fails copying to on-chip RAM using memcpy_toio().
+ */
+#if defined(CONFIG_SCSI_NCR53C8XX_IOMAPPED)
+#define SCSI_NCR_IOMAPPED
+#elif defined(__alpha__)
+#define SCSI_NCR_IOMAPPED
+#elif defined(__powerpc__)
+#define SCSI_NCR_IOMAPPED
+#define SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+#elif defined(__sparc__)
+#undef SCSI_NCR_IOMAPPED
+#endif
+
+/*
+ * Should we enable DAC cycles on this platform?
+ * Until further investigation we do not enable it
+ * anywhere at the moment.
+ */
+#undef SCSI_NCR_USE_64BIT_DAC
+
+/*
+ * Immediate arbitration
+ */
+#if defined(CONFIG_SCSI_NCR53C8XX_IARB)
+#define SCSI_NCR_IARB_SUPPORT
+#endif
+
+/*
+ * Should we enable DAC cycles on sparc64 platforms?
+ * Until further investigation we do not enable it
+ * anywhere at the moment.
+ */
+#undef SCSI_NCR_USE_64BIT_DAC
+
+/*
+ * Sync transfer frequency at startup.
+ * Allow from 5Mhz to 80Mhz default 20 Mhz.
+ */
+#ifndef CONFIG_SCSI_NCR53C8XX_SYNC
+#define CONFIG_SCSI_NCR53C8XX_SYNC (20)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC > SCSI_NCR_MAX_SYNC
+#undef CONFIG_SCSI_NCR53C8XX_SYNC
+#define CONFIG_SCSI_NCR53C8XX_SYNC SCSI_NCR_MAX_SYNC
+#endif
+
+#if CONFIG_SCSI_NCR53C8XX_SYNC == 0
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (255)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 5
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (50)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 20
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (250/(CONFIG_SCSI_NCR53C8XX_SYNC))
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 33
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (11)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 40
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (10)
+#else
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (9)
+#endif
+
+/*
+ * Disallow disconnections at boot-up
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_NO_DISCONNECT
+#define SCSI_NCR_SETUP_DISCONNECTION (0)
+#else
+#define SCSI_NCR_SETUP_DISCONNECTION (1)
+#endif
+
+/*
+ * Force synchronous negotiation for all targets
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_FORCE_SYNC_NEGO
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (1)
+#else
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (0)
+#endif
+
+/*
+ * Disable master parity checking (flawed hardwares need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_MPARITY_CHECK
+#define SCSI_NCR_SETUP_MASTER_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_MASTER_PARITY (1)
+#endif
+
+/*
+ * Disable scsi parity checking (flawed devices may need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_PARITY_CHECK
+#define SCSI_NCR_SETUP_SCSI_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_SCSI_PARITY (1)
+#endif
+
+/*
+ * Vendor specific stuff
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT
+#define SCSI_NCR_SETUP_LED_PIN (1)
+#define SCSI_NCR_SETUP_DIFF_SUPPORT (4)
+#else
+#define SCSI_NCR_SETUP_LED_PIN (0)
+#define SCSI_NCR_SETUP_DIFF_SUPPORT (0)
+#endif
+
+/*
+ * Settle time after reset at boot-up
+ */
+#define SCSI_NCR_SETUP_SETTLE_TIME (2)
+
+/*
+** Bridge quirks work-around option defaulted to 1.
+*/
+#ifndef SCSI_NCR_PCIQ_WORK_AROUND_OPT
+#define SCSI_NCR_PCIQ_WORK_AROUND_OPT 1
+#endif
+
+/*
+** Work-around common bridge misbehaviour.
+**
+** - Do not flush posted writes in the opposite
+** direction on read.
+** - May reorder DMA writes to memory.
+**
+** This option should not affect performances
+** significantly, so it is the default.
+*/
+#if SCSI_NCR_PCIQ_WORK_AROUND_OPT == 1
+#define SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM
+#define SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+#define SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+
+/*
+** Same as option 1, but also deal with
+** misconfigured interrupts.
+**
+** - Edge triggerred instead of level sensitive.
+** - No interrupt line connected.
+** - IRQ number misconfigured.
+**
+** If no interrupt is delivered, the driver will
+** catch the interrupt conditions 10 times per
+** second. No need to say that this option is
+** not recommended.
+*/
+#elif SCSI_NCR_PCIQ_WORK_AROUND_OPT == 2
+#define SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM
+#define SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+#define SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+#define SCSI_NCR_PCIQ_BROKEN_INTR
+
+/*
+** Some bridge designers decided to flush
+** everything prior to deliver the interrupt.
+** This option tries to deal with such a
+** behaviour.
+*/
+#elif SCSI_NCR_PCIQ_WORK_AROUND_OPT == 3
+#define SCSI_NCR_PCIQ_SYNC_ON_INTR
+#endif
+
+/*
+** Other parameters not configurable with "make config"
+** Avoid to change these constants, unless you know what you are doing.
+*/
+
+#define SCSI_NCR_ALWAYS_SIMPLE_TAG
+#define SCSI_NCR_MAX_SCATTER (127)
+#define SCSI_NCR_MAX_TARGET (16)
+
+/*
+** Compute some desirable value for CAN_QUEUE
+** and CMD_PER_LUN.
+** The driver will use lower values if these
+** ones appear to be too large.
+*/
+#define SCSI_NCR_CAN_QUEUE (8*SCSI_NCR_MAX_TAGS + 2*SCSI_NCR_MAX_TARGET)
+#define SCSI_NCR_CMD_PER_LUN (SCSI_NCR_MAX_TAGS)
+
+#define SCSI_NCR_SG_TABLESIZE (SCSI_NCR_MAX_SCATTER)
+#define SCSI_NCR_TIMER_INTERVAL (HZ)
+
+#if 1 /* defined CONFIG_SCSI_MULTI_LUN */
+#define SCSI_NCR_MAX_LUN (16)
+#else
+#define SCSI_NCR_MAX_LUN (1)
+#endif
+
+#ifndef HOSTS_C
+
+/*
+** These simple macros limit expression involving
+** kernel time values (jiffies) to some that have
+** chance not to be too much incorrect. :-)
+*/
+#define ktime_get(o) (jiffies + (u_long) o)
+#define ktime_exp(b) ((long)(jiffies) - (long)(b) >= 0)
+#define ktime_dif(a, b) ((long)(a) - (long)(b))
+/* These ones are not used in this driver */
+#define ktime_add(a, o) ((a) + (u_long)(o))
+#define ktime_sub(a, o) ((a) - (u_long)(o))
+
+
+/*
+ * IO functions definition for big/little endian CPU support.
+ * For now, the NCR is only supported in little endian addressing mode,
+ */
+
+#ifdef __BIG_ENDIAN
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#error "BIG ENDIAN byte ordering needs kernel version >= 2.1.0"
+#endif
+
+#define inw_l2b inw
+#define inl_l2b inl
+#define outw_b2l outw
+#define outl_b2l outl
+#define readw_l2b readw
+#define readl_l2b readl
+#define writew_b2l writew
+#define writel_b2l writel
+
+#else /* little endian */
+
+#if defined(__i386__) /* i386 implements full FLAT memory/MMIO model */
+#define inw_raw inw
+#define inl_raw inl
+#define outw_raw outw
+#define outl_raw outl
+#define readb_raw(a) (*(volatile unsigned char *) (a))
+#define readw_raw(a) (*(volatile unsigned short *) (a))
+#define readl_raw(a) (*(volatile unsigned int *) (a))
+#define writeb_raw(b,a) ((*(volatile unsigned char *) (a)) = (b))
+#define writew_raw(b,a) ((*(volatile unsigned short *) (a)) = (b))
+#define writel_raw(b,a) ((*(volatile unsigned int *) (a)) = (b))
+
+#else /* Other little-endian */
+#define inw_raw inw
+#define inl_raw inl
+#define outw_raw outw
+#define outl_raw outl
+#define readw_raw readw
+#define readl_raw readl
+#define writew_raw writew
+#define writel_raw writel
+
+#endif
+#endif
+
+#ifdef SCSI_NCR_BIG_ENDIAN
+#error "The NCR in BIG ENDIAN addressing mode is not (yet) supported"
+#endif
+
+
+/*
+ * IA32 architecture does not reorder STORES and prevents
+ * LOADS from passing STORES. It is called `program order'
+ * by Intel and allows device drivers to deal with memory
+ * ordering by only ensuring that the code is not reordered
+ * by the compiler when ordering is required.
+ * Other architectures implement a weaker ordering that
+ * requires memory barriers (and also IO barriers when they
+ * make sense) to be used.
+ * We want to be paranoid for ppc and ia64. :)
+ */
+
+#if defined __i386__
+#define MEMORY_BARRIER() do { ; } while(0)
+#elif defined __powerpc__
+#define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory")
+#elif defined __ia64__
+#define MEMORY_BARRIER() __asm__ volatile("mf.a; mf" : : : "memory")
+#else
+#define MEMORY_BARRIER() mb()
+#endif
+
+
+/*
+ * If the NCR uses big endian addressing mode over the
+ * PCI, actual io register addresses for byte and word
+ * accesses must be changed according to lane routing.
+ * Btw, ncr_offb() and ncr_offw() macros only apply to
+ * constants and so donnot generate bloated code.
+ */
+
+#if defined(SCSI_NCR_BIG_ENDIAN)
+
+#define ncr_offb(o) (((o)&~3)+((~((o)&3))&3))
+#define ncr_offw(o) (((o)&~3)+((~((o)&3))&2))
+
+#else
+
+#define ncr_offb(o) (o)
+#define ncr_offw(o) (o)
+
+#endif
+
+/*
+ * If the CPU and the NCR use same endian-ness addressing,
+ * no byte reordering is needed for script patching.
+ * Macro cpu_to_scr() is to be used for script patching.
+ * Macro scr_to_cpu() is to be used for getting a DWORD
+ * from the script.
+ */
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_le32(dw)
+#define scr_to_cpu(dw) le32_to_cpu(dw)
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_be32(dw)
+#define scr_to_cpu(dw) be32_to_cpu(dw)
+
+#else
+
+#define cpu_to_scr(dw) (dw)
+#define scr_to_cpu(dw) (dw)
+
+#endif
+
+/*
+ * Access to the controller chip.
+ *
+ * If SCSI_NCR_IOMAPPED is defined, the driver will use
+ * normal IOs instead of the MEMORY MAPPED IO method
+ * recommended by PCI specifications.
+ * If all PCI bridges, host brigdes and architectures
+ * would have been correctly designed for PCI, this
+ * option would be useless.
+ *
+ * If the CPU and the NCR use same endian-ness addressing,
+ * no byte reordering is needed for accessing chip io
+ * registers. Functions suffixed by '_raw' are assumed
+ * to access the chip over the PCI without doing byte
+ * reordering. Functions suffixed by '_l2b' are
+ * assumed to perform little-endian to big-endian byte
+ * reordering, those suffixed by '_b2l' blah, blah,
+ * blah, ...
+ */
+
+#if defined(SCSI_NCR_IOMAPPED)
+
+/*
+ * IO mapped only input / ouput
+ */
+
+#define INB_OFF(o) inb (np->base_io + ncr_offb(o))
+#define OUTB_OFF(o, val) outb ((val), np->base_io + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_l2b (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_l2b (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_b2l ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_b2l ((val), np->base_io + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_b2l (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_b2l (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_l2b ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_l2b ((val), np->base_io + (o))
+
+#else
+
+#define INW_OFF(o) inw_raw (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_raw (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_raw ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_raw ((val), np->base_io + (o))
+
+#endif /* ENDIANs */
+
+#else /* defined SCSI_NCR_IOMAPPED */
+
+/*
+ * MEMORY mapped IO input / output
+ */
+
+#define INB_OFF(o) readb((char *)np->reg + ncr_offb(o))
+#define OUTB_OFF(o, val) writeb((val), (char *)np->reg + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_l2b((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_l2b((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_b2l((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_b2l((val), (char *)np->reg + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_b2l((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_b2l((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_l2b((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_l2b((val), (char *)np->reg + (o))
+
+#else
+
+#define INW_OFF(o) readw_raw((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_raw((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_raw((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_raw((val), (char *)np->reg + (o))
+
+#endif
+
+#endif /* defined SCSI_NCR_IOMAPPED */
+
+#define INB(r) INB_OFF (offsetof(struct ncr_reg,r))
+#define INW(r) INW_OFF (offsetof(struct ncr_reg,r))
+#define INL(r) INL_OFF (offsetof(struct ncr_reg,r))
+
+#define OUTB(r, val) OUTB_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTW(r, val) OUTW_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTL(r, val) OUTL_OFF (offsetof(struct ncr_reg,r), (val))
+
+/*
+ * Set bit field ON, OFF
+ */
+
+#define OUTONB(r, m) OUTB(r, INB(r) | (m))
+#define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
+#define OUTONW(r, m) OUTW(r, INW(r) | (m))
+#define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
+#define OUTONL(r, m) OUTL(r, INL(r) | (m))
+#define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
+
+/*
+ * We normally want the chip to have a consistent view
+ * of driver internal data structures when we restart it.
+ * Thus these macros.
+ */
+#define OUTL_DSP(v) \
+ do { \
+ MEMORY_BARRIER(); \
+ OUTL (nc_dsp, (v)); \
+ } while (0)
+
+#define OUTONB_STD() \
+ do { \
+ MEMORY_BARRIER(); \
+ OUTONB (nc_dcntl, (STD|NOCOM)); \
+ } while (0)
+
+
+/*
+** NCR53C8XX Device Ids
+*/
+
+#ifndef PCI_DEVICE_ID_NCR_53C810
+#define PCI_DEVICE_ID_NCR_53C810 1
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C810AP
+#define PCI_DEVICE_ID_NCR_53C810AP 5
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C815
+#define PCI_DEVICE_ID_NCR_53C815 4
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C820
+#define PCI_DEVICE_ID_NCR_53C820 2
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C825
+#define PCI_DEVICE_ID_NCR_53C825 3
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C860
+#define PCI_DEVICE_ID_NCR_53C860 6
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C875
+#define PCI_DEVICE_ID_NCR_53C875 0xf
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C875J
+#define PCI_DEVICE_ID_NCR_53C875J 0x8f
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C885
+#define PCI_DEVICE_ID_NCR_53C885 0xd
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C895
+#define PCI_DEVICE_ID_NCR_53C895 0xc
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C896
+#define PCI_DEVICE_ID_NCR_53C896 0xb
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C895A
+#define PCI_DEVICE_ID_NCR_53C895A 0x12
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C1510D
+#define PCI_DEVICE_ID_NCR_53C1510D 0xa
+#endif
+
+#ifndef PCI_DEVICE_ID_LSI_53C1010
+#define PCI_DEVICE_ID_LSI_53C1010 0x20
+#endif
+
+#ifndef PCI_DEVICE_ID_LSI_53C1010_66
+#define PCI_DEVICE_ID_LSI_53C1010_66 0x21
+#endif
+
+
+/*
+** NCR53C8XX devices features table.
+*/
+typedef struct {
+ unsigned short device_id;
+ unsigned short revision_id;
+ char *name;
+ unsigned char burst_max; /* log-base-2 of max burst */
+ unsigned char offset_max;
+ unsigned char nr_divisor;
+ unsigned int features;
+#define FE_LED0 (1<<0)
+#define FE_WIDE (1<<1) /* Wide data transfers */
+#define FE_ULTRA (1<<2) /* Ultra speed 20Mtrans/sec */
+#define FE_ULTRA2 (1<<3) /* Ultra 2 - 40 Mtrans/sec */
+#define FE_DBLR (1<<4) /* Clock doubler present */
+#define FE_QUAD (1<<5) /* Clock quadrupler present */
+#define FE_ERL (1<<6) /* Enable read line */
+#define FE_CLSE (1<<7) /* Cache line size enable */
+#define FE_WRIE (1<<8) /* Write & Invalidate enable */
+#define FE_ERMP (1<<9) /* Enable read multiple */
+#define FE_BOF (1<<10) /* Burst opcode fetch */
+#define FE_DFS (1<<11) /* DMA fifo size */
+#define FE_PFEN (1<<12) /* Prefetch enable */
+#define FE_LDSTR (1<<13) /* Load/Store supported */
+#define FE_RAM (1<<14) /* On chip RAM present */
+#define FE_CLK80 (1<<15) /* Board clock is 80 MHz */
+#define FE_RAM8K (1<<16) /* On chip RAM sized 8Kb */
+#define FE_64BIT (1<<17) /* Supports 64-bit addressing */
+#define FE_IO256 (1<<18) /* Requires full 256 bytes in PCI space */
+#define FE_NOPM (1<<19) /* Scripts handles phase mismatch */
+#define FE_LEDC (1<<20) /* Hardware control of LED */
+#define FE_DIFF (1<<21) /* Support Differential SCSI */
+#define FE_ULTRA3 (1<<22) /* Ultra-3 80Mtrans/sec */
+#define FE_66MHZ (1<<23) /* 66MHz PCI Support */
+
+#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP)
+#define FE_SCSI_SET (FE_WIDE|FE_ULTRA|FE_ULTRA2|FE_DBLR|FE_QUAD|F_CLK80)
+#define FE_SPECIAL_SET (FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM)
+} ncr_chip;
+
+/*
+** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 3.
+** Memory Read transaction terminated by a retry followed by
+** Memory Read Line command.
+*/
+#define FE_CACHE0_SET (FE_CACHE_SET & ~FE_ERL)
+
+/*
+** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 5.
+** On paper, this errata is harmless. But it is a good reason for
+** using a shorter programmed burst length (64 DWORDS instead of 128).
+*/
+
+#define SCSI_NCR_CHIP_TABLE \
+{ \
+ {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, \
+ FE_ERL} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, \
+ FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, \
+ FE_ERL|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C820, 0xff, "820", 4, 8, 4, \
+ FE_WIDE|FE_ERL} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 4, 8, 4, \
+ FE_WIDE|FE_ERL|FE_BOF|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, \
+ FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, \
+ FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|\
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x0f, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x1f, "876", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x2f, "875E", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0xff, "876", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875J,0xff, "875J", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C895A, 0xff, "895a", 6, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C1510D, 0xff, "1510D", 7, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_IO256} \
+ , \
+ {PCI_DEVICE_ID_LSI_53C1010, 0xff, "1010", 6, 31, 7, \
+ FE_WIDE|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_ULTRA3} \
+ , \
+ {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010_66", 6, 31, 7, \
+ FE_WIDE|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_ULTRA3|FE_66MHZ} \
+}
+
+/*
+ * List of supported NCR chip ids
+ */
+#define SCSI_NCR_CHIP_IDS \
+{ \
+ PCI_DEVICE_ID_NCR_53C810, \
+ PCI_DEVICE_ID_NCR_53C815, \
+ PCI_DEVICE_ID_NCR_53C820, \
+ PCI_DEVICE_ID_NCR_53C825, \
+ PCI_DEVICE_ID_NCR_53C860, \
+ PCI_DEVICE_ID_NCR_53C875, \
+ PCI_DEVICE_ID_NCR_53C875J, \
+ PCI_DEVICE_ID_NCR_53C885, \
+ PCI_DEVICE_ID_NCR_53C895, \
+ PCI_DEVICE_ID_NCR_53C896, \
+ PCI_DEVICE_ID_NCR_53C895A, \
+ PCI_DEVICE_ID_NCR_53C1510D, \
+ PCI_DEVICE_ID_LSI_53C1010, \
+ PCI_DEVICE_ID_LSI_53C1010_66 \
+}
+
+/*
+** Driver setup structure.
+**
+** This structure is initialized from linux config options.
+** It can be overridden at boot-up by the boot command line.
+*/
+#define SCSI_NCR_MAX_EXCLUDES 8
+struct ncr_driver_setup {
+ u_char master_parity;
+ u_char scsi_parity;
+ u_char disconnection;
+ u_char special_features;
+ u_char ultra_scsi;
+ u_char force_sync_nego;
+ u_char reverse_probe;
+ u_char pci_fix_up;
+ u_char use_nvram;
+ u_char verbose;
+ u_char default_tags;
+ u_short default_sync;
+ u_short debug;
+ u_char burst_max;
+ u_char led_pin;
+ u_char max_wide;
+ u_char settle_delay;
+ u_char diff_support;
+ u_char irqm;
+ u_char bus_check;
+ u_char optimize;
+ u_char recovery;
+ u_char host_id;
+ u_short iarb;
+ u_long excludes[SCSI_NCR_MAX_EXCLUDES];
+ char tag_ctrl[100];
+};
+
+/*
+** Initial setup.
+** Can be overriden at startup by a command line.
+*/
+#define SCSI_NCR_DRIVER_SETUP \
+{ \
+ SCSI_NCR_SETUP_MASTER_PARITY, \
+ SCSI_NCR_SETUP_SCSI_PARITY, \
+ SCSI_NCR_SETUP_DISCONNECTION, \
+ SCSI_NCR_SETUP_SPECIAL_FEATURES, \
+ SCSI_NCR_SETUP_ULTRA_SCSI, \
+ SCSI_NCR_SETUP_FORCE_SYNC_NEGO, \
+ 0, \
+ 0, \
+ 1, \
+ 0, \
+ SCSI_NCR_SETUP_DEFAULT_TAGS, \
+ SCSI_NCR_SETUP_DEFAULT_SYNC, \
+ 0x00, \
+ 7, \
+ SCSI_NCR_SETUP_LED_PIN, \
+ 1, \
+ SCSI_NCR_SETUP_SETTLE_TIME, \
+ SCSI_NCR_SETUP_DIFF_SUPPORT, \
+ 0, \
+ 1, \
+ 0, \
+ 0, \
+ 255, \
+ 0x00 \
+}
+
+/*
+** Boot fail safe setup.
+** Override initial setup from boot command line:
+** ncr53c8xx=safe:y
+*/
+#define SCSI_NCR_DRIVER_SAFE_SETUP \
+{ \
+ 0, \
+ 1, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 1, \
+ 2, \
+ 0, \
+ 255, \
+ 0x00, \
+ 255, \
+ 0, \
+ 0, \
+ 10, \
+ 1, \
+ 1, \
+ 1, \
+ 0, \
+ 0, \
+ 255 \
+}
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+/*
+** Symbios NvRAM data format
+*/
+#define SYMBIOS_NVRAM_SIZE 368
+#define SYMBIOS_NVRAM_ADDRESS 0x100
+
+struct Symbios_nvram {
+/* Header 6 bytes */
+ u_short type; /* 0x0000 */
+ u_short byte_count; /* excluding header/trailer */
+ u_short checksum;
+
+/* Controller set up 20 bytes */
+ u_char v_major; /* 0x00 */
+ u_char v_minor; /* 0x30 */
+ u_int32 boot_crc;
+ u_short flags;
+#define SYMBIOS_SCAM_ENABLE (1)
+#define SYMBIOS_PARITY_ENABLE (1<<1)
+#define SYMBIOS_VERBOSE_MSGS (1<<2)
+#define SYMBIOS_CHS_MAPPING (1<<3)
+#define SYMBIOS_NO_NVRAM (1<<3) /* ??? */
+ u_short flags1;
+#define SYMBIOS_SCAN_HI_LO (1)
+ u_short term_state;
+#define SYMBIOS_TERM_CANT_PROGRAM (0)
+#define SYMBIOS_TERM_ENABLED (1)
+#define SYMBIOS_TERM_DISABLED (2)
+ u_short rmvbl_flags;
+#define SYMBIOS_RMVBL_NO_SUPPORT (0)
+#define SYMBIOS_RMVBL_BOOT_DEVICE (1)
+#define SYMBIOS_RMVBL_MEDIA_INSTALLED (2)
+ u_char host_id;
+ u_char num_hba; /* 0x04 */
+ u_char num_devices; /* 0x10 */
+ u_char max_scam_devices; /* 0x04 */
+ u_char num_valid_scam_devives; /* 0x00 */
+ u_char rsvd;
+
+/* Boot order 14 bytes * 4 */
+ struct Symbios_host{
+ u_short type; /* 4:8xx / 0:nok */
+ u_short device_id; /* PCI device id */
+ u_short vendor_id; /* PCI vendor id */
+ u_char bus_nr; /* PCI bus number */
+ u_char device_fn; /* PCI device/function number << 3*/
+ u_short word8;
+ u_short flags;
+#define SYMBIOS_INIT_SCAN_AT_BOOT (1)
+ u_short io_port; /* PCI io_port address */
+ } host[4];
+
+/* Targets 8 bytes * 16 */
+ struct Symbios_target {
+ u_char flags;
+#define SYMBIOS_DISCONNECT_ENABLE (1)
+#define SYMBIOS_SCAN_AT_BOOT_TIME (1<<1)
+#define SYMBIOS_SCAN_LUNS (1<<2)
+#define SYMBIOS_QUEUE_TAGS_ENABLED (1<<3)
+ u_char rsvd;
+ u_char bus_width; /* 0x08/0x10 */
+ u_char sync_offset;
+ u_short sync_period; /* 4*period factor */
+ u_short timeout;
+ } target[16];
+/* Scam table 8 bytes * 4 */
+ struct Symbios_scam {
+ u_short id;
+ u_short method;
+#define SYMBIOS_SCAM_DEFAULT_METHOD (0)
+#define SYMBIOS_SCAM_DONT_ASSIGN (1)
+#define SYMBIOS_SCAM_SET_SPECIFIC_ID (2)
+#define SYMBIOS_SCAM_USE_ORDER_GIVEN (3)
+ u_short status;
+#define SYMBIOS_SCAM_UNKNOWN (0)
+#define SYMBIOS_SCAM_DEVICE_NOT_FOUND (1)
+#define SYMBIOS_SCAM_ID_NOT_SET (2)
+#define SYMBIOS_SCAM_ID_VALID (3)
+ u_char target_id;
+ u_char rsvd;
+ } scam[4];
+
+ u_char spare_devices[15*8];
+ u_char trailer[6]; /* 0xfe 0xfe 0x00 0x00 0x00 0x00 */
+};
+typedef struct Symbios_nvram Symbios_nvram;
+typedef struct Symbios_host Symbios_host;
+typedef struct Symbios_target Symbios_target;
+typedef struct Symbios_scam Symbios_scam;
+
+/*
+** Tekram NvRAM data format.
+*/
+#define TEKRAM_NVRAM_SIZE 64
+#define TEKRAM_93C46_NVRAM_ADDRESS 0
+#define TEKRAM_24C16_NVRAM_ADDRESS 0x40
+
+struct Tekram_nvram {
+ struct Tekram_target {
+ u_char flags;
+#define TEKRAM_PARITY_CHECK (1)
+#define TEKRAM_SYNC_NEGO (1<<1)
+#define TEKRAM_DISCONNECT_ENABLE (1<<2)
+#define TEKRAM_START_CMD (1<<3)
+#define TEKRAM_TAGGED_COMMANDS (1<<4)
+#define TEKRAM_WIDE_NEGO (1<<5)
+ u_char sync_index;
+ u_short word2;
+ } target[16];
+ u_char host_id;
+ u_char flags;
+#define TEKRAM_MORE_THAN_2_DRIVES (1)
+#define TEKRAM_DRIVES_SUP_1GB (1<<1)
+#define TEKRAM_RESET_ON_POWER_ON (1<<2)
+#define TEKRAM_ACTIVE_NEGATION (1<<3)
+#define TEKRAM_IMMEDIATE_SEEK (1<<4)
+#define TEKRAM_SCAN_LUNS (1<<5)
+#define TEKRAM_REMOVABLE_FLAGS (3<<6) /* 0: disable; 1: boot device; 2:all */
+ u_char boot_delay_index;
+ u_char max_tags_index;
+ u_short flags1;
+#define TEKRAM_F2_F6_ENABLED (1)
+ u_short spare[29];
+};
+typedef struct Tekram_nvram Tekram_nvram;
+typedef struct Tekram_target Tekram_target;
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/**************** ORIGINAL CONTENT of ncrreg.h from FreeBSD ******************/
+
+/*-----------------------------------------------------------------
+**
+** The ncr 53c810 register structure.
+**
+**-----------------------------------------------------------------
+*/
+
+struct ncr_reg {
+/*00*/ u_char nc_scntl0; /* full arb., ena parity, par->ATN */
+
+/*01*/ u_char nc_scntl1; /* no reset */
+ #define ISCON 0x10 /* connected to scsi */
+ #define CRST 0x08 /* force reset */
+ #define IARB 0x02 /* immediate arbitration */
+
+/*02*/ u_char nc_scntl2; /* no disconnect expected */
+ #define SDU 0x80 /* cmd: disconnect will raise error */
+ #define CHM 0x40 /* sta: chained mode */
+ #define WSS 0x08 /* sta: wide scsi send [W]*/
+ #define WSR 0x01 /* sta: wide scsi received [W]*/
+
+/*03*/ u_char nc_scntl3; /* cnf system clock dependent */
+ #define EWS 0x08 /* cmd: enable wide scsi [W]*/
+ #define ULTRA 0x80 /* cmd: ULTRA enable */
+ /* bits 0-2, 7 rsvd for C1010 */
+
+/*04*/ u_char nc_scid; /* cnf host adapter scsi address */
+ #define RRE 0x40 /* r/w:e enable response to resel. */
+ #define SRE 0x20 /* r/w:e enable response to select */
+
+/*05*/ u_char nc_sxfer; /* ### Sync speed and count */
+ /* bits 6-7 rsvd for C1010 */
+
+/*06*/ u_char nc_sdid; /* ### Destination-ID */
+
+/*07*/ u_char nc_gpreg; /* ??? IO-Pins */
+
+/*08*/ u_char nc_sfbr; /* ### First byte in phase */
+
+/*09*/ u_char nc_socl;
+ #define CREQ 0x80 /* r/w: SCSI-REQ */
+ #define CACK 0x40 /* r/w: SCSI-ACK */
+ #define CBSY 0x20 /* r/w: SCSI-BSY */
+ #define CSEL 0x10 /* r/w: SCSI-SEL */
+ #define CATN 0x08 /* r/w: SCSI-ATN */
+ #define CMSG 0x04 /* r/w: SCSI-MSG */
+ #define CC_D 0x02 /* r/w: SCSI-C_D */
+ #define CI_O 0x01 /* r/w: SCSI-I_O */
+
+/*0a*/ u_char nc_ssid;
+
+/*0b*/ u_char nc_sbcl;
+
+/*0c*/ u_char nc_dstat;
+ #define DFE 0x80 /* sta: dma fifo empty */
+ #define MDPE 0x40 /* int: master data parity error */
+ #define BF 0x20 /* int: script: bus fault */
+ #define ABRT 0x10 /* int: script: command aborted */
+ #define SSI 0x08 /* int: script: single step */
+ #define SIR 0x04 /* int: script: interrupt instruct. */
+ #define IID 0x01 /* int: script: illegal instruct. */
+
+/*0d*/ u_char nc_sstat0;
+ #define ILF 0x80 /* sta: data in SIDL register lsb */
+ #define ORF 0x40 /* sta: data in SODR register lsb */
+ #define OLF 0x20 /* sta: data in SODL register lsb */
+ #define AIP 0x10 /* sta: arbitration in progress */
+ #define LOA 0x08 /* sta: arbitration lost */
+ #define WOA 0x04 /* sta: arbitration won */
+ #define IRST 0x02 /* sta: scsi reset signal */
+ #define SDP 0x01 /* sta: scsi parity signal */
+
+/*0e*/ u_char nc_sstat1;
+ #define FF3210 0xf0 /* sta: bytes in the scsi fifo */
+
+/*0f*/ u_char nc_sstat2;
+ #define ILF1 0x80 /* sta: data in SIDL register msb[W]*/
+ #define ORF1 0x40 /* sta: data in SODR register msb[W]*/
+ #define OLF1 0x20 /* sta: data in SODL register msb[W]*/
+ #define DM 0x04 /* sta: DIFFSENS mismatch (895/6 only) */
+ #define LDSC 0x02 /* sta: disconnect & reconnect */
+
+/*10*/ u_char nc_dsa; /* --> Base page */
+/*11*/ u_char nc_dsa1;
+/*12*/ u_char nc_dsa2;
+/*13*/ u_char nc_dsa3;
+
+/*14*/ u_char nc_istat; /* --> Main Command and status */
+ #define CABRT 0x80 /* cmd: abort current operation */
+ #define SRST 0x40 /* mod: reset chip */
+ #define SIGP 0x20 /* r/w: message from host to ncr */
+ #define SEM 0x10 /* r/w: message between host + ncr */
+ #define CON 0x08 /* sta: connected to scsi */
+ #define INTF 0x04 /* sta: int on the fly (reset by wr)*/
+ #define SIP 0x02 /* sta: scsi-interrupt */
+ #define DIP 0x01 /* sta: host/script interrupt */
+
+/*15*/ u_char nc_istat1; /* 896 only */
+/*16*/ u_char nc_mbox0; /* 896 only */
+/*17*/ u_char nc_mbox1; /* 896 only */
+
+/*18*/ u_char nc_ctest0;
+/*19*/ u_char nc_ctest1;
+
+/*1a*/ u_char nc_ctest2;
+ #define CSIGP 0x40
+ /* bits 0-2,7 rsvd for C1010 */
+
+/*1b*/ u_char nc_ctest3;
+ #define FLF 0x08 /* cmd: flush dma fifo */
+ #define CLF 0x04 /* cmd: clear dma fifo */
+ #define FM 0x02 /* mod: fetch pin mode */
+ #define WRIE 0x01 /* mod: write and invalidate enable */
+ /* bits 4-7 rsvd for C1010 */
+
+/*1c*/ u_int32 nc_temp; /* ### Temporary stack */
+
+/*20*/ u_char nc_dfifo;
+/*21*/ u_char nc_ctest4;
+ #define BDIS 0x80 /* mod: burst disable */
+ #define MPEE 0x08 /* mod: master parity error enable */
+
+/*22*/ u_char nc_ctest5;
+ #define DFS 0x20 /* mod: dma fifo size */
+ /* bits 0-1, 3-7 rsvd for C1010 */
+/*23*/ u_char nc_ctest6;
+
+/*24*/ u_int32 nc_dbc; /* ### Byte count and command */
+/*28*/ u_int32 nc_dnad; /* ### Next command register */
+/*2c*/ u_int32 nc_dsp; /* --> Script Pointer */
+/*30*/ u_int32 nc_dsps; /* --> Script pointer save/opcode#2 */
+
+/*34*/ u_char nc_scratcha; /* Temporary register a */
+/*35*/ u_char nc_scratcha1;
+/*36*/ u_char nc_scratcha2;
+/*37*/ u_char nc_scratcha3;
+
+/*38*/ u_char nc_dmode;
+ #define BL_2 0x80 /* mod: burst length shift value +2 */
+ #define BL_1 0x40 /* mod: burst length shift value +1 */
+ #define ERL 0x08 /* mod: enable read line */
+ #define ERMP 0x04 /* mod: enable read multiple */
+ #define BOF 0x02 /* mod: burst op code fetch */
+
+/*39*/ u_char nc_dien;
+/*3a*/ u_char nc_sbr;
+
+/*3b*/ u_char nc_dcntl; /* --> Script execution control */
+ #define CLSE 0x80 /* mod: cache line size enable */
+ #define PFF 0x40 /* cmd: pre-fetch flush */
+ #define PFEN 0x20 /* mod: pre-fetch enable */
+ #define SSM 0x10 /* mod: single step mode */
+ #define IRQM 0x08 /* mod: irq mode (1 = totem pole !) */
+ #define STD 0x04 /* cmd: start dma mode */
+ #define IRQD 0x02 /* mod: irq disable */
+ #define NOCOM 0x01 /* cmd: protect sfbr while reselect */
+ /* bits 0-1 rsvd for C1010 */
+
+/*3c*/ u_int32 nc_adder;
+
+/*40*/ u_short nc_sien; /* -->: interrupt enable */
+/*42*/ u_short nc_sist; /* <--: interrupt status */
+ #define SBMC 0x1000/* sta: SCSI Bus Mode Change (895/6 only) */
+ #define STO 0x0400/* sta: timeout (select) */
+ #define GEN 0x0200/* sta: timeout (general) */
+ #define HTH 0x0100/* sta: timeout (handshake) */
+ #define MA 0x80 /* sta: phase mismatch */
+ #define CMP 0x40 /* sta: arbitration complete */
+ #define SEL 0x20 /* sta: selected by another device */
+ #define RSL 0x10 /* sta: reselected by another device*/
+ #define SGE 0x08 /* sta: gross error (over/underflow)*/
+ #define UDC 0x04 /* sta: unexpected disconnect */
+ #define RST 0x02 /* sta: scsi bus reset detected */
+ #define PAR 0x01 /* sta: scsi parity error */
+
+/*44*/ u_char nc_slpar;
+/*45*/ u_char nc_swide;
+/*46*/ u_char nc_macntl;
+/*47*/ u_char nc_gpcntl;
+/*48*/ u_char nc_stime0; /* cmd: timeout for select&handshake*/
+/*49*/ u_char nc_stime1; /* cmd: timeout user defined */
+/*4a*/ u_short nc_respid; /* sta: Reselect-IDs */
+
+/*4c*/ u_char nc_stest0;
+
+/*4d*/ u_char nc_stest1;
+ #define SCLK 0x80 /* Use the PCI clock as SCSI clock */
+ #define DBLEN 0x08 /* clock doubler running */
+ #define DBLSEL 0x04 /* clock doubler selected */
+
+
+/*4e*/ u_char nc_stest2;
+ #define ROF 0x40 /* reset scsi offset (after gross error!) */
+ #define EXT 0x02 /* extended filtering */
+
+/*4f*/ u_char nc_stest3;
+ #define TE 0x80 /* c: tolerAnt enable */
+ #define HSC 0x20 /* c: Halt SCSI Clock */
+ #define CSF 0x02 /* c: clear scsi fifo */
+
+/*50*/ u_short nc_sidl; /* Lowlevel: latched from scsi data */
+/*52*/ u_char nc_stest4;
+ #define SMODE 0xc0 /* SCSI bus mode (895/6 only) */
+ #define SMODE_HVD 0x40 /* High Voltage Differential */
+ #define SMODE_SE 0x80 /* Single Ended */
+ #define SMODE_LVD 0xc0 /* Low Voltage Differential */
+ #define LCKFRQ 0x20 /* Frequency Lock (895/6 only) */
+ /* bits 0-5 rsvd for C1010 */
+
+/*53*/ u_char nc_53_;
+/*54*/ u_short nc_sodl; /* Lowlevel: data out to scsi data */
+/*56*/ u_char nc_ccntl0; /* Chip Control 0 (896) */
+ #define ENPMJ 0x80 /* Enable Phase Mismatch Jump */
+ #define PMJCTL 0x40 /* Phase Mismatch Jump Control */
+ #define ENNDJ 0x20 /* Enable Non Data PM Jump */
+ #define DISFC 0x10 /* Disable Auto FIFO Clear */
+ #define DILS 0x02 /* Disable Internal Load/Store */
+ #define DPR 0x01 /* Disable Pipe Req */
+
+/*57*/ u_char nc_ccntl1; /* Chip Control 1 (896) */
+ #define ZMOD 0x80 /* High Impedance Mode */
+ #define DIC 0x10 /* Disable Internal Cycles */
+ #define DDAC 0x08 /* Disable Dual Address Cycle */
+ #define XTIMOD 0x04 /* 64-bit Table Ind. Indexing Mode */
+ #define EXTIBMV 0x02 /* Enable 64-bit Table Ind. BMOV */
+ #define EXDBMV 0x01 /* Enable 64-bit Direct BMOV */
+
+/*58*/ u_short nc_sbdl; /* Lowlevel: data from scsi data */
+/*5a*/ u_short nc_5a_;
+
+/*5c*/ u_char nc_scr0; /* Working register B */
+/*5d*/ u_char nc_scr1; /* */
+/*5e*/ u_char nc_scr2; /* */
+/*5f*/ u_char nc_scr3; /* */
+
+/*60*/ u_char nc_scrx[64]; /* Working register C-R */
+/*a0*/ u_int32 nc_mmrs; /* Memory Move Read Selector */
+/*a4*/ u_int32 nc_mmws; /* Memory Move Write Selector */
+/*a8*/ u_int32 nc_sfs; /* Script Fetch Selector */
+/*ac*/ u_int32 nc_drs; /* DSA Relative Selector */
+/*b0*/ u_int32 nc_sbms; /* Static Block Move Selector */
+/*b4*/ u_int32 nc_dbms; /* Dynamic Block Move Selector */
+/*b8*/ u_int32 nc_dnad64; /* DMA Next Address 64 */
+/*bc*/ u_short nc_scntl4; /* C1010 only */
+ #define U3EN 0x80 /* Enable Ultra 3 */
+ #define AIPEN 0x40 /* Allow check upper byte lanes */
+ #define XCLKH_DT 0x08 /* Extra clock of data hold on DT
+ transfer edge */
+ #define XCLKH_ST 0x04 /* Extra clock of data hold on ST
+ transfer edge */
+
+/*be*/ u_char nc_aipcntl0; /* Epat Control 1 C1010 only */
+/*bf*/ u_char nc_aipcntl1; /* AIP Control C1010_66 Only */
+
+/*c0*/ u_int32 nc_pmjad1; /* Phase Mismatch Jump Address 1 */
+/*c4*/ u_int32 nc_pmjad2; /* Phase Mismatch Jump Address 2 */
+/*c8*/ u_char nc_rbc; /* Remaining Byte Count */
+/*c9*/ u_char nc_rbc1; /* */
+/*ca*/ u_char nc_rbc2; /* */
+/*cb*/ u_char nc_rbc3; /* */
+
+/*cc*/ u_char nc_ua; /* Updated Address */
+/*cd*/ u_char nc_ua1; /* */
+/*ce*/ u_char nc_ua2; /* */
+/*cf*/ u_char nc_ua3; /* */
+/*d0*/ u_int32 nc_esa; /* Entry Storage Address */
+/*d4*/ u_char nc_ia; /* Instruction Address */
+/*d5*/ u_char nc_ia1;
+/*d6*/ u_char nc_ia2;
+/*d7*/ u_char nc_ia3;
+/*d8*/ u_int32 nc_sbc; /* SCSI Byte Count (3 bytes only) */
+/*dc*/ u_int32 nc_csbc; /* Cumulative SCSI Byte Count */
+
+ /* Following for C1010 only */
+/*e0*/ u_short nc_crcpad; /* CRC Value */
+/*e2*/ u_char nc_crccntl0; /* CRC control register */
+ #define SNDCRC 0x10 /* Send CRC Request */
+/*e3*/ u_char nc_crccntl1; /* CRC control register */
+/*e4*/ u_int32 nc_crcdata; /* CRC data register */
+/*e8*/ u_int32 nc_e8_; /* rsvd */
+/*ec*/ u_int32 nc_ec_; /* rsvd */
+/*f0*/ u_short nc_dfbc; /* DMA FIFO byte count */
+
+};
+
+/*-----------------------------------------------------------
+**
+** Utility macros for the script.
+**
+**-----------------------------------------------------------
+*/
+
+#define REGJ(p,r) (offsetof(struct ncr_reg, p ## r))
+#define REG(r) REGJ (nc_, r)
+
+typedef u_int32 ncrcmd;
+
+/*-----------------------------------------------------------
+**
+** SCSI phases
+**
+** DT phases illegal for ncr driver.
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_DATA_OUT 0x00000000
+#define SCR_DATA_IN 0x01000000
+#define SCR_COMMAND 0x02000000
+#define SCR_STATUS 0x03000000
+#define SCR_DT_DATA_OUT 0x04000000
+#define SCR_DT_DATA_IN 0x05000000
+#define SCR_MSG_OUT 0x06000000
+#define SCR_MSG_IN 0x07000000
+
+#define SCR_ILG_OUT 0x04000000
+#define SCR_ILG_IN 0x05000000
+
+/*-----------------------------------------------------------
+**
+** Data transfer via SCSI.
+**
+**-----------------------------------------------------------
+**
+** MOVE_ABS (LEN)
+** <<start address>>
+**
+** MOVE_IND (LEN)
+** <<dnad_offset>>
+**
+** MOVE_TBL
+** <<dnad_offset>>
+**
+**-----------------------------------------------------------
+*/
+
+#define OPC_MOVE 0x08000000
+
+#define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l))
+#define SCR_MOVE_IND(l) ((0x20000000 | OPC_MOVE) | (l))
+#define SCR_MOVE_TBL (0x10000000 | OPC_MOVE)
+
+#define SCR_CHMOV_ABS(l) ((0x00000000) | (l))
+#define SCR_CHMOV_IND(l) ((0x20000000) | (l))
+#define SCR_CHMOV_TBL (0x10000000)
+
+struct scr_tblmove {
+ u_int32 size;
+ u_int32 addr;
+};
+
+/*-----------------------------------------------------------
+**
+** Selection
+**
+**-----------------------------------------------------------
+**
+** SEL_ABS | SCR_ID (0..15) [ | REL_JMP]
+** <<alternate_address>>
+**
+** SEL_TBL | << dnad_offset>> [ | REL_JMP]
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SEL_ABS 0x40000000
+#define SCR_SEL_ABS_ATN 0x41000000
+#define SCR_SEL_TBL 0x42000000
+#define SCR_SEL_TBL_ATN 0x43000000
+
+struct scr_tblsel {
+ u_char sel_scntl4;
+ u_char sel_sxfer;
+ u_char sel_id;
+ u_char sel_scntl3;
+};
+
+#define SCR_JMP_REL 0x04000000
+#define SCR_ID(id) (((u_int32)(id)) << 16)
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** WAIT_DISC
+** dummy: <<alternate_address>>
+**
+** WAIT_RESEL
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_WAIT_DISC 0x48000000
+#define SCR_WAIT_RESEL 0x50000000
+
+/*-----------------------------------------------------------
+**
+** Bit Set / Reset
+**
+**-----------------------------------------------------------
+**
+** SET (flags {|.. })
+**
+** CLR (flags {|.. })
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SET(f) (0x58000000 | (f))
+#define SCR_CLR(f) (0x60000000 | (f))
+
+#define SCR_CARRY 0x00000400
+#define SCR_TRG 0x00000200
+#define SCR_ACK 0x00000040
+#define SCR_ATN 0x00000008
+
+
+
+
+/*-----------------------------------------------------------
+**
+** Memory to memory move
+**
+**-----------------------------------------------------------
+**
+** COPY (bytecount)
+** << source_address >>
+** << destination_address >>
+**
+** SCR_COPY sets the NO FLUSH option by default.
+** SCR_COPY_F does not set this option.
+**
+** For chips which do not support this option,
+** ncr_copy_and_bind() will remove this bit.
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_FLUSH 0x01000000
+
+#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n))
+#define SCR_COPY_F(n) (0xc0000000 | (n))
+
+/*-----------------------------------------------------------
+**
+** Register move and binary operations
+**
+**-----------------------------------------------------------
+**
+** SFBR_REG (reg, op, data) reg = SFBR op data
+** << 0 >>
+**
+** REG_SFBR (reg, op, data) SFBR = reg op data
+** << 0 >>
+**
+** REG_REG (reg, op, data) reg = reg op data
+** << 0 >>
+**
+**-----------------------------------------------------------
+** On 810A, 860, 825A, 875, 895 and 896 chips the content
+** of SFBR register can be used as data (SCR_SFBR_DATA).
+** The 896 has additionnal IO registers starting at
+** offset 0x80. Bit 7 of register offset is stored in
+** bit 7 of the SCRIPTS instruction first DWORD.
+**-----------------------------------------------------------
+*/
+
+#define SCR_REG_OFS(ofs) ((((ofs) & 0x7f) << 16ul) + ((ofs) & 0x80))
+
+#define SCR_SFBR_REG(reg,op,data) \
+ (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_SFBR(reg,op,data) \
+ (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_REG(reg,op,data) \
+ (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+
+#define SCR_LOAD 0x00000000
+#define SCR_SHL 0x01000000
+#define SCR_OR 0x02000000
+#define SCR_XOR 0x03000000
+#define SCR_AND 0x04000000
+#define SCR_SHR 0x05000000
+#define SCR_ADD 0x06000000
+#define SCR_ADDC 0x07000000
+
+#define SCR_SFBR_DATA (0x00800000>>8ul) /* Use SFBR as data */
+
+/*-----------------------------------------------------------
+**
+** FROM_REG (reg) SFBR = reg
+** << 0 >>
+**
+** TO_REG (reg) reg = SFBR
+** << 0 >>
+**
+** LOAD_REG (reg, data) reg = <data>
+** << 0 >>
+**
+** LOAD_SFBR(data) SFBR = <data>
+** << 0 >>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_FROM_REG(reg) \
+ SCR_REG_SFBR(reg,SCR_OR,0)
+
+#define SCR_TO_REG(reg) \
+ SCR_SFBR_REG(reg,SCR_OR,0)
+
+#define SCR_LOAD_REG(reg,data) \
+ SCR_REG_REG(reg,SCR_LOAD,data)
+
+#define SCR_LOAD_SFBR(data) \
+ (SCR_REG_SFBR (gpreg, SCR_LOAD, data))
+
+/*-----------------------------------------------------------
+**
+** LOAD from memory to register.
+** STORE from register to memory.
+**
+** Only supported by 810A, 860, 825A, 875, 895 and 896.
+**
+**-----------------------------------------------------------
+**
+** LOAD_ABS (LEN)
+** <<start address>>
+**
+** LOAD_REL (LEN) (DSA relative)
+** <<dsa_offset>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_REG_OFS2(ofs) (((ofs) & 0xff) << 16ul)
+#define SCR_NO_FLUSH2 0x02000000
+#define SCR_DSA_REL2 0x10000000
+
+#define SCR_LOAD_R(reg, how, n) \
+ (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_STORE_R(reg, how, n) \
+ (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_LOAD_ABS(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_LOAD_REL(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n)
+#define SCR_LOAD_ABS_F(reg, n) SCR_LOAD_R(reg, 0, n)
+#define SCR_LOAD_REL_F(reg, n) SCR_LOAD_R(reg, SCR_DSA_REL2, n)
+
+#define SCR_STORE_ABS(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_STORE_REL(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n)
+#define SCR_STORE_ABS_F(reg, n) SCR_STORE_R(reg, 0, n)
+#define SCR_STORE_REL_F(reg, n) SCR_STORE_R(reg, SCR_DSA_REL2, n)
+
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** JUMP [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** JUMPR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** CALL [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** CALLR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** RETURN [ | IFTRUE/IFFALSE ( ... ) ]
+** <<dummy>>
+**
+** INT [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** INT_FLY [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** Conditions:
+** WHEN (phase)
+** IF (phase)
+** CARRYSET
+** DATA (data, mask)
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_OP 0x80000000
+#define SCR_JUMP 0x80080000
+#define SCR_JUMP64 0x80480000
+#define SCR_JUMPR 0x80880000
+#define SCR_CALL 0x88080000
+#define SCR_CALLR 0x88880000
+#define SCR_RETURN 0x90080000
+#define SCR_INT 0x98080000
+#define SCR_INT_FLY 0x98180000
+
+#define IFFALSE(arg) (0x00080000 | (arg))
+#define IFTRUE(arg) (0x00000000 | (arg))
+
+#define WHEN(phase) (0x00030000 | (phase))
+#define IF(phase) (0x00020000 | (phase))
+
+#define DATA(D) (0x00040000 | ((D) & 0xff))
+#define MASK(D,M) (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff))
+
+#define CARRYSET (0x00200000)
+
+/*-----------------------------------------------------------
+**
+** SCSI constants.
+**
+**-----------------------------------------------------------
+*/
+
+/*
+** Messages
+*/
+
+#define M_COMPLETE (0x00)
+#define M_EXTENDED (0x01)
+#define M_SAVE_DP (0x02)
+#define M_RESTORE_DP (0x03)
+#define M_DISCONNECT (0x04)
+#define M_ID_ERROR (0x05)
+#define M_ABORT (0x06)
+#define M_REJECT (0x07)
+#define M_NOOP (0x08)
+#define M_PARITY (0x09)
+#define M_LCOMPLETE (0x0a)
+#define M_FCOMPLETE (0x0b)
+#define M_RESET (0x0c)
+#define M_ABORT_TAG (0x0d)
+#define M_CLEAR_QUEUE (0x0e)
+#define M_INIT_REC (0x0f)
+#define M_REL_REC (0x10)
+#define M_TERMINATE (0x11)
+#define M_SIMPLE_TAG (0x20)
+#define M_HEAD_TAG (0x21)
+#define M_ORDERED_TAG (0x22)
+#define M_IGN_RESIDUE (0x23)
+#define M_IDENTIFY (0x80)
+
+#define M_X_MODIFY_DP (0x00)
+#define M_X_SYNC_REQ (0x01)
+#define M_X_WIDE_REQ (0x03)
+#define M_X_PPR_REQ (0x04)
+
+/*
+** Status
+*/
+
+#define S_GOOD (0x00)
+#define S_CHECK_COND (0x02)
+#define S_COND_MET (0x04)
+#define S_BUSY (0x08)
+#define S_INT (0x10)
+#define S_INT_COND_MET (0x14)
+#define S_CONFLICT (0x18)
+#define S_TERMINATED (0x20)
+#define S_QUEUE_FULL (0x28)
+#define S_ILLEGAL (0xff)
+#define S_SENSE (0x80)
+
+/*
+ * End of ncrreg from FreeBSD
+ */
+
+#endif /* !defined HOSTS_C */
+
+#endif /* defined SYM53C8XX_DEFS_H */
diff --git a/linux/src/drivers/scsi/t128.c b/linux/src/drivers/scsi/t128.c
new file mode 100644
index 0000000..198e910
--- /dev/null
+++ b/linux/src/drivers/scsi/t128.c
@@ -0,0 +1,400 @@
+#define AUTOSENSE
+#define PSEUDO_DMA
+
+/*
+ * Trantor T128/T128F/T228 driver
+ * Note : architecturally, the T100 and T130 are different and won't
+ * work
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * DISTRIBUTION RELEASE 3.
+ *
+ * For more information, please consult
+ *
+ * Trantor Systems, Ltd.
+ * T128/T128F/T228 SCSI Host Adapter
+ * Hardware Specifications
+ *
+ * Trantor Systems, Ltd.
+ * 5415 Randall Place
+ * Fremont, CA 94538
+ * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * Options :
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
+ * increase compared to polled I/O.
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
+ * only really want to use this if you're having a problem with
+ * dropped characters during high speed communications, and even
+ * then, you're going to be better off twiddling with transfersize.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - since the board is memory mapped,
+ * a BIOS signature is scanned for to locate the registers.
+ * An interrupt is triggered to autoprobe for the interrupt
+ * line.
+ *
+ * 2. With command line overrides - t128=address,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 3. With the T128_OVERRIDE compile time define. This is
+ * specified as an array of address, irq tuples. Ie, for
+ * one board at the default 0xcc000 address, IRQ5, I could say
+ * -DT128_OVERRIDE={{0xcc000, 5}}
+ *
+ * Note that if the override methods are used, place holders must
+ * be specified for other boards in the system.
+ *
+ * T128/T128F jumper/dipswitch settings (note : on my sample, the switches
+ * were epoxy'd shut, meaning I couldn't change the 0xcc000 base address) :
+ *
+ * T128 Sw7 Sw8 Sw6 = 0ws Sw5 = boot
+ * T128F Sw6 Sw7 Sw5 = 0ws Sw4 = boot Sw8 = floppy disable
+ * cc000 off off
+ * c8000 off on
+ * dc000 on off
+ * d8000 on on
+ *
+ *
+ * Interrupts
+ * There is a 12 pin jumper block, jp1, numbered as follows :
+ * T128 (JP1) T128F (J5)
+ * 2 4 6 8 10 12 11 9 7 5 3 1
+ * 1 3 5 7 9 11 12 10 8 6 4 2
+ *
+ * 3 2-4
+ * 5 1-3
+ * 7 3-5
+ * T128F only
+ * 10 8-10
+ * 12 7-9
+ * 14 10-12
+ * 15 9-11
+ */
+
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "t128.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_t128 = {
+ PROC_SCSI_T128, 4, "t128",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+
+static struct override {
+ unsigned char *address;
+ int irq;
+} overrides
+#ifdef T128_OVERRIDE
+ [] = T128_OVERRIDE;
+#else
+ [4] = {{NULL,IRQ_AUTO}, {NULL,IRQ_AUTO}, {NULL,IRQ_AUTO},
+ {NULL,IRQ_AUTO}};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+static struct base {
+ unsigned char *address;
+ int noauto;
+} bases[] = {{(unsigned char *) 0xcc000, 0}, {(unsigned char *) 0xc8000, 0},
+ {(unsigned char *) 0xdc000, 0}, {(unsigned char *) 0xd8000, 0}};
+
+#define NO_BASES (sizeof (bases) / sizeof (struct base))
+
+static const struct signature {
+ const char *string;
+ int offset;
+} signatures[] = {
+{"TSROM: SCSI BIOS, Version 1.12", 0x36},
+};
+
+#define NO_SIGNATURES (sizeof (signatures) / sizeof (struct signature))
+
+/*
+ * Function : t128_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+ */
+
+void t128_setup(char *str, int *ints) {
+ static int commandline_current = 0;
+ int i;
+ if (ints[0] != 2)
+ printk("t128_setup : usage t128=address,irq\n");
+ else
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].address = (unsigned char *) ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].address == (unsigned char *) ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : int t128_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : detects and initializes T128,T128F, or T228 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int t128_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0, current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned char *base;
+ int sig, count;
+
+ tpnt->proc_dir = &proc_scsi_t128;
+ tpnt->proc_info = &t128_proc_info;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ base = NULL;
+
+ if (overrides[current_override].address)
+ base = overrides[current_override].address;
+ else
+ for (; !base && (current_base < NO_BASES); ++current_base) {
+#if (TDEBUG & TDEBUG_INIT)
+ printk("scsi : probing address %08x\n", (unsigned int) bases[current_base].address);
+#endif
+ for (sig = 0; sig < NO_SIGNATURES; ++sig)
+ if (!bases[current_base].noauto && !memcmp
+ (bases[current_base].address + signatures[sig].offset,
+ signatures[sig].string, strlen(signatures[sig].string))) {
+ base = bases[current_base].address;
+#if (TDEBUG & TDEBUG_INIT)
+ printk("scsi-t128 : detected board.\n");
+#endif
+ break;
+ }
+ }
+
+#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT)
+ printk("scsi-t128 : base = %08x\n", (unsigned int) base);
+#endif
+
+ if (!base)
+ break;
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->base = base;
+
+ NCR5380_init(instance, 0);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, t128_intr, SA_INTERRUPT, "t128", NULL)) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+
+#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ printk("scsi%d : at 0x%08x", instance->host_no, (int)
+ instance->base);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, T128_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : int t128_biosparam(Disk * disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int t128_biosparam(Disk * disk, kdev_t dev, int * ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
+ int len) {
+ register unsigned char *reg = (unsigned char *) (instance->base +
+ T_DATA_REG_OFFSET), *d = dst;
+ register int i = len;
+
+
+#if 0
+ for (; i; --i) {
+ while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
+#else
+ while (!((instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY)) barrier();
+ for (; i; --i) {
+#endif
+ *d++ = *reg;
+ }
+
+ if (*(instance->base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
+ unsigned char tmp;
+ volatile unsigned char *foo;
+ foo = instance->base + T_CONTROL_REG_OFFSET;
+ tmp = *foo;
+ *foo = tmp | T_CR_CT;
+ *foo = tmp;
+ printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
+ instance->host_no);
+ return -1;
+ } else
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
+ int len) {
+ register unsigned char *reg = (unsigned char *) (instance->base +
+ T_DATA_REG_OFFSET), *s = src;
+ register int i = len;
+
+#if 0
+ for (; i; --i) {
+ while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
+#else
+ while (!((instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY)) barrier();
+ for (; i; --i) {
+#endif
+ *reg = *s++;
+ }
+
+ if (*(instance->base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
+ unsigned char tmp;
+ volatile unsigned char *foo;
+ foo = instance->base + T_CONTROL_REG_OFFSET;
+ tmp = *foo;
+ *foo = tmp | T_CR_CT;
+ *foo = tmp;
+ printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
+ instance->host_no);
+ return -1;
+ } else
+ return 0;
+}
+
+#include "NCR5380.c"
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = TRANTOR_T128;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/t128.h b/linux/src/drivers/scsi/t128.h
new file mode 100644
index 0000000..2a3c3cb
--- /dev/null
+++ b/linux/src/drivers/scsi/t128.h
@@ -0,0 +1,169 @@
+/*
+ * Trantor T128/T128F/T228 defines
+ * Note : architecturally, the T100 and T128 are different and won't work
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * DISTRIBUTION RELEASE 3.
+ *
+ * For more information, please consult
+ *
+ * Trantor Systems, Ltd.
+ * T128/T128F/T228 SCSI Host Adapter
+ * Hardware Specifications
+ *
+ * Trantor Systems, Ltd.
+ * 5415 Randall Place
+ * Fremont, CA 94538
+ * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+#ifndef T128_H
+#define T128_H
+
+#define T128_PUBLIC_RELEASE 3
+
+#define TDEBUG_INIT 0x1
+#define TDEBUG_TRANSFER 0x2
+
+/*
+ * The trantor boards are memory mapped. They use an NCR5380 or
+ * equivalent (my sample board had part second sourced from ZILOG).
+ * NCR's recommended "Pseudo-DMA" architecture is used, where
+ * a PAL drives the DMA signals on the 5380 allowing fast, blind
+ * transfers with proper handshaking.
+ */
+
+/*
+ * Note : a boot switch is provided for the purpose of informing the
+ * firmware to boot or not boot from attached SCSI devices. So, I imagine
+ * there are fewer people who've yanked the ROM like they do on the Seagate
+ * to make bootup faster, and I'll probably use this for autodetection.
+ */
+#define T_ROM_OFFSET 0
+
+/*
+ * Note : my sample board *WAS NOT* populated with the SRAM, so this
+ * can't be used for autodetection without a ROM present.
+ */
+#define T_RAM_OFFSET 0x1800
+
+/*
+ * All of the registers are allocated 32 bytes of address space, except
+ * for the data register (read/write to/from the 5380 in pseudo-DMA mode)
+ */
+#define T_CONTROL_REG_OFFSET 0x1c00 /* rw */
+#define T_CR_INT 0x10 /* Enable interrupts */
+#define T_CR_CT 0x02 /* Reset watchdog timer */
+
+#define T_STATUS_REG_OFFSET 0x1c20 /* ro */
+#define T_ST_BOOT 0x80 /* Boot switch */
+#define T_ST_S3 0x40 /* User settable switches, */
+#define T_ST_S2 0x20 /* read 0 when switch is on, 1 off */
+#define T_ST_S1 0x10
+#define T_ST_PS2 0x08 /* Set for Microchannel 228 */
+#define T_ST_RDY 0x04 /* 5380 DRQ */
+#define T_ST_TIM 0x02 /* indicates 40us watchdog timer fired */
+#define T_ST_ZERO 0x01 /* Always zero */
+
+#define T_5380_OFFSET 0x1d00 /* 8 registers here, see NCR5380.h */
+
+#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
+
+#ifndef ASM
+int t128_abort(Scsi_Cmnd *);
+int t128_biosparam(Disk *, kdev_t, int*);
+int t128_detect(Scsi_Host_Template *);
+int t128_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int t128_reset(Scsi_Cmnd *, unsigned int reset_flags);
+int t128_proc_info (char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+/*
+ * I hadn't thought of this with the earlier drivers - but to prevent
+ * macro definition conflicts, we shouldn't define all of the internal
+ * macros when this is being used solely for the host stub.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define TRANTOR_T128 {NULL, NULL, NULL, NULL, \
+ "Trantor T128/T128F/T228", t128_detect, NULL, \
+ NULL, \
+ NULL, t128_queue_command, t128_abort, t128_reset, NULL, \
+ t128_biosparam, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+
+#ifndef HOSTS_C
+
+#define NCR5380_implementation_fields \
+ volatile unsigned char *base
+
+#define NCR5380_local_declare() \
+ volatile unsigned char *base
+
+#define NCR5380_setup(instance) \
+ base = (volatile unsigned char *) (instance)->base
+
+#define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20))
+
+#if !(TDEBUG & TDEBUG_TRANSFER)
+#define NCR5380_read(reg) (*(T128_address(reg)))
+#define NCR5380_write(reg, value) (*(T128_address(reg)) = (value))
+#else
+#define NCR5380_read(reg) \
+ (((unsigned char) printk("scsi%d : read register %d at address %08x\n"\
+ , instance->hostno, (reg), T128_address(reg))), *(T128_address(reg)))
+
+#define NCR5380_write(reg, value) { \
+ printk("scsi%d : write %02x to register %d at address %08x\n", \
+ instance->hostno, (value), (reg), T128_address(reg)); \
+ *(T128_address(reg)) = (value); \
+}
+#endif
+
+#define NCR5380_intr t128_intr
+#define NCR5380_queue_command t128_queue_command
+#define NCR5380_abort t128_abort
+#define NCR5380_reset t128_reset
+#define NCR5380_proc_info t128_proc_info
+
+/* 15 14 12 10 7 5 3
+ 1101 0100 1010 1000 */
+
+#define T128_IRQS 0xc4a8
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* T128_H */
diff --git a/linux/src/drivers/scsi/tmscsim.c b/linux/src/drivers/scsi/tmscsim.c
new file mode 100644
index 0000000..be986ff
--- /dev/null
+++ b/linux/src/drivers/scsi/tmscsim.c
@@ -0,0 +1,1930 @@
+/***********************************************************************
+ * FILE NAME : TMSCSIM.C *
+ * BY : C.L. Huang, ching@tekram.com.tw *
+ * Description: Device Driver for Tekram DC-390(T) PCI SCSI *
+ * Bus Master Host Adapter *
+ * (C)Copyright 1995-1996 Tekram Technology Co., Ltd. *
+ ***********************************************************************/
+/* Minor enhancements and bugfixes by *
+ * Kurt Garloff <K.Garloff@ping.de> *
+ ***********************************************************************/
+/* HISTORY: *
+ * *
+ * REV# DATE NAME DESCRIPTION *
+ * 1.00 04/24/96 CLH First release *
+ * 1.01 06/12/96 CLH Fixed bug of Media Change for Removable *
+ * Device, scan all LUN. Support Pre2.0.10 *
+ * 1.02 06/18/96 CLH Fixed bug of Command timeout ... *
+ * 1.03 09/25/96 KG Added tmscsim_proc_info() *
+ * 1.04 10/11/96 CLH Updating for support KV 2.0.x *
+ * 1.05 10/18/96 KG Fixed bug in DC390_abort(null ptr deref)*
+ * 1.06 10/25/96 KG Fixed module support *
+ * 1.07 11/09/96 KG Fixed tmscsim_proc_info() *
+ * 1.08 11/18/96 KG Fixed null ptr in DC390_Disconnect() *
+ * 1.09 11/30/96 KG Added register the allocated IO space *
+ * 1.10 12/05/96 CLH Modified tmscsim_proc_info(), and reset *
+ * pending interrupt in DC390_detect() *
+ * 1.11 02/05/97 KG/CLH Fixeds problem with partitions greater *
+ * than 1GB *
+ ***********************************************************************/
+
+
+#define DC390_DEBUG
+
+#define SCSI_MALLOC
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/config.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < 66354 /* 1.3.50 */
+#include "../block/blk.h"
+#else
+#include <linux/blk.h>
+#endif
+
+#include "scsi.h"
+#include "hosts.h"
+#include "tmscsim.h"
+#include "constants.h"
+#include "sd.h"
+#include <linux/stat.h>
+
+#include "dc390.h"
+
+#define PCI_DEVICE_ID_AMD53C974 PCI_DEVICE_ID_AMD_SCSI
+
+
+#ifndef VERSION_ELF_1_2_13
+struct proc_dir_entry proc_scsi_tmscsim ={
+ PROC_SCSI_DC390T, 7 ,"tmscsim",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+ };
+#endif
+
+static USHORT DC390_StartSCSI( PACB pACB, PDCB pDCB, PSRB pSRB );
+static void DC390_DataOut_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_DataIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_Command_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_Status_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_MsgOut_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_MsgIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_DataOutPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_DataInPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_CommandPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_StatusPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_MsgOutPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_MsgInPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_Nop_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_Nop_1( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+
+static void SetXferRate( PACB pACB, PDCB pDCB );
+static void DC390_Disconnect( PACB pACB );
+static void DC390_Reselect( PACB pACB );
+static void SRBdone( PACB pACB, PDCB pDCB, PSRB pSRB );
+static void DoingSRB_Done( PACB pACB );
+static void DC390_ScsiRstDetect( PACB pACB );
+static void DC390_ResetSCSIBus( PACB pACB );
+static void RequestSense( PACB pACB, PDCB pDCB, PSRB pSRB );
+static void EnableMsgOut2( PACB pACB, PSRB pSRB );
+static void EnableMsgOut( PACB pACB, PSRB pSRB );
+static void DC390_InvalidCmd( PACB pACB );
+
+int DC390_initAdapter( PSH psh, ULONG io_port, UCHAR Irq, USHORT index );
+void DC390_initDCB( PACB pACB, PDCB pDCB, PSCSICMD cmd );
+
+#ifdef MODULE
+static int DC390_release(struct Scsi_Host *host);
+static int DC390_shutdown (struct Scsi_Host *host);
+#endif
+
+
+static PSHT pSHT_start = NULL;
+static PSH pSH_start = NULL;
+static PSH pSH_current = NULL;
+static PACB pACB_start= NULL;
+static PACB pACB_current = NULL;
+static PDCB pPrevDCB = NULL;
+static USHORT adapterCnt = 0;
+static USHORT InitialTime = 0;
+static USHORT CurrSyncOffset = 0;
+static ULONG mech1addr;
+static UCHAR mech2bus, mech2Agent, mech2CfgSPenR;
+
+static PVOID DC390_phase0[]={
+ DC390_DataOut_0,
+ DC390_DataIn_0,
+ DC390_Command_0,
+ DC390_Status_0,
+ DC390_Nop_0,
+ DC390_Nop_0,
+ DC390_MsgOut_0,
+ DC390_MsgIn_0,
+ DC390_Nop_1
+ };
+
+static PVOID DC390_phase1[]={
+ DC390_DataOutPhase,
+ DC390_DataInPhase,
+ DC390_CommandPhase,
+ DC390_StatusPhase,
+ DC390_Nop_0,
+ DC390_Nop_0,
+ DC390_MsgOutPhase,
+ DC390_MsgInPhase,
+ DC390_Nop_1,
+ };
+
+UCHAR eepromBuf[MAX_ADAPTER_NUM][128];
+
+
+UCHAR clock_period1[] = {4, 5, 6, 7, 8, 10, 13, 20};
+
+UCHAR baddevname1[2][28] ={
+ "SEAGATE ST3390N 9546",
+ "HP C3323-300 4269"};
+
+#define BADDEVCNT 2
+
+
+/***********************************************************************
+ *
+ *
+ *
+ **********************************************************************/
+static void
+QLinkcmd( PSCSICMD cmd, PDCB pDCB )
+{
+ ULONG flags;
+ PSCSICMD pcmd;
+
+ save_flags(flags);
+ cli();
+
+ if( !pDCB->QIORBCnt )
+ {
+ pDCB->pQIORBhead = cmd;
+ pDCB->pQIORBtail = cmd;
+ pDCB->QIORBCnt++;
+ cmd->next = NULL;
+ }
+ else
+ {
+ pcmd = pDCB->pQIORBtail;
+ pcmd->next = cmd;
+ pDCB->pQIORBtail = cmd;
+ pDCB->QIORBCnt++;
+ cmd->next = NULL;
+ }
+
+ restore_flags(flags);
+}
+
+
+static PSCSICMD
+Getcmd( PDCB pDCB )
+{
+ ULONG flags;
+ PSCSICMD pcmd;
+
+ save_flags(flags);
+ cli();
+
+ pcmd = pDCB->pQIORBhead;
+ pDCB->pQIORBhead = pcmd->next;
+ pcmd->next = NULL;
+ pDCB->QIORBCnt--;
+
+ restore_flags(flags);
+ return( pcmd );
+}
+
+
+static PSRB
+GetSRB( PACB pACB )
+{
+ ULONG flags;
+ PSRB pSRB;
+
+ save_flags(flags);
+ cli();
+
+ pSRB = pACB->pFreeSRB;
+ if( pSRB )
+ {
+ pACB->pFreeSRB = pSRB->pNextSRB;
+ pSRB->pNextSRB = NULL;
+ }
+ restore_flags(flags);
+ return( pSRB );
+}
+
+
+static void
+RewaitSRB0( PDCB pDCB, PSRB pSRB )
+{
+ PSRB psrb1;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+
+ if( (psrb1 = pDCB->pWaitingSRB) )
+ {
+ pSRB->pNextSRB = psrb1;
+ pDCB->pWaitingSRB = pSRB;
+ }
+ else
+ {
+ pSRB->pNextSRB = NULL;
+ pDCB->pWaitingSRB = pSRB;
+ pDCB->pWaitLast = pSRB;
+ }
+ restore_flags(flags);
+}
+
+
+static void
+RewaitSRB( PDCB pDCB, PSRB pSRB )
+{
+ PSRB psrb1;
+ ULONG flags;
+ UCHAR bval;
+
+ save_flags(flags);
+ cli();
+ pDCB->GoingSRBCnt--;
+ psrb1 = pDCB->pGoingSRB;
+ if( pSRB == psrb1 )
+ {
+ pDCB->pGoingSRB = psrb1->pNextSRB;
+ }
+ else
+ {
+ while( pSRB != psrb1->pNextSRB )
+ psrb1 = psrb1->pNextSRB;
+ psrb1->pNextSRB = pSRB->pNextSRB;
+ if( pSRB == pDCB->pGoingLast )
+ pDCB->pGoingLast = psrb1;
+ }
+ if( (psrb1 = pDCB->pWaitingSRB) )
+ {
+ pSRB->pNextSRB = psrb1;
+ pDCB->pWaitingSRB = pSRB;
+ }
+ else
+ {
+ pSRB->pNextSRB = NULL;
+ pDCB->pWaitingSRB = pSRB;
+ pDCB->pWaitLast = pSRB;
+ }
+
+ bval = pSRB->TagNumber;
+ pDCB->TagMask &= (~(1 << bval)); /* Free TAG number */
+ restore_flags(flags);
+}
+
+
+static void
+DoWaitingSRB( PACB pACB )
+{
+ ULONG flags;
+ PDCB ptr, ptr1;
+ PSRB pSRB;
+
+ save_flags(flags);
+ cli();
+
+ if( !(pACB->pActiveDCB) && !(pACB->ACBFlag & (RESET_DETECT+RESET_DONE+RESET_DEV) ) )
+ {
+ ptr = pACB->pDCBRunRobin;
+ if( !ptr )
+ {
+ ptr = pACB->pLinkDCB;
+ pACB->pDCBRunRobin = ptr;
+ }
+ ptr1 = ptr;
+ for( ;ptr1; )
+ {
+ pACB->pDCBRunRobin = ptr1->pNextDCB;
+ if( !( ptr1->MaxCommand > ptr1->GoingSRBCnt ) ||
+ !( pSRB = ptr1->pWaitingSRB ) )
+ {
+ if(pACB->pDCBRunRobin == ptr)
+ break;
+ ptr1 = ptr1->pNextDCB;
+ }
+ else
+ {
+ if( !DC390_StartSCSI(pACB, ptr1, pSRB) )
+ {
+ ptr1->GoingSRBCnt++;
+ if( ptr1->pWaitLast == pSRB )
+ {
+ ptr1->pWaitingSRB = NULL;
+ ptr1->pWaitLast = NULL;
+ }
+ else
+ {
+ ptr1->pWaitingSRB = pSRB->pNextSRB;
+ }
+ pSRB->pNextSRB = NULL;
+
+ if( ptr1->pGoingSRB )
+ ptr1->pGoingLast->pNextSRB = pSRB;
+ else
+ ptr1->pGoingSRB = pSRB;
+ ptr1->pGoingLast = pSRB;
+ }
+ break;
+ }
+ }
+ }
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+SRBwaiting( PDCB pDCB, PSRB pSRB)
+{
+ if( pDCB->pWaitingSRB )
+ {
+ pDCB->pWaitLast->pNextSRB = pSRB;
+ pDCB->pWaitLast = pSRB;
+ pSRB->pNextSRB = NULL;
+ }
+ else
+ {
+ pDCB->pWaitingSRB = pSRB;
+ pDCB->pWaitLast = pSRB;
+ }
+}
+
+
+static void
+SendSRB( PSCSICMD pcmd, PACB pACB, PSRB pSRB )
+{
+ ULONG flags;
+ PDCB pDCB;
+
+ save_flags(flags);
+ cli();
+
+ pDCB = pSRB->pSRBDCB;
+ if( !(pDCB->MaxCommand > pDCB->GoingSRBCnt) || (pACB->pActiveDCB) ||
+ (pACB->ACBFlag & (RESET_DETECT+RESET_DONE+RESET_DEV)) )
+ {
+ SRBwaiting(pDCB, pSRB);
+ goto SND_EXIT;
+ }
+
+ if( pDCB->pWaitingSRB )
+ {
+ SRBwaiting(pDCB, pSRB);
+/* pSRB = GetWaitingSRB(pDCB); */
+ pSRB = pDCB->pWaitingSRB;
+ pDCB->pWaitingSRB = pSRB->pNextSRB;
+ pSRB->pNextSRB = NULL;
+ }
+
+ if( !DC390_StartSCSI(pACB, pDCB, pSRB) )
+ {
+ pDCB->GoingSRBCnt++;
+ if( pDCB->pGoingSRB )
+ {
+ pDCB->pGoingLast->pNextSRB = pSRB;
+ pDCB->pGoingLast = pSRB;
+ }
+ else
+ {
+ pDCB->pGoingSRB = pSRB;
+ pDCB->pGoingLast = pSRB;
+ }
+ }
+ else
+ RewaitSRB0( pDCB, pSRB );
+
+SND_EXIT:
+ restore_flags(flags);
+ return;
+}
+
+
+/***********************************************************************
+ * Function : static int DC390_queue_command (Scsi_Cmnd *cmd,
+ * void (*done)(Scsi_Cmnd *))
+ *
+ * Purpose : enqueues a SCSI command
+ *
+ * Inputs : cmd - SCSI command, done - function called on completion, with
+ * a pointer to the command descriptor.
+ *
+ * Returns : 0
+ *
+ ***********************************************************************/
+
+int
+DC390_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
+{
+ USHORT ioport, i;
+ Scsi_Cmnd *pcmd;
+ struct Scsi_Host *psh;
+ PACB pACB;
+ PDCB pDCB;
+ PSRB pSRB;
+ ULONG flags;
+ PUCHAR ptr,ptr1;
+
+ psh = cmd->host;
+ pACB = (PACB ) psh->hostdata;
+ ioport = pACB->IOPortBase;
+
+#ifdef DC390_DEBUG0
+/* if(pACB->scan_devices) */
+ printk("Cmd=%2x,ID=%d,LUN=%d,",cmd->cmnd[0],cmd->target,cmd->lun);
+#endif
+
+ if( (pACB->scan_devices == END_SCAN) && (cmd->cmnd[0] != INQUIRY) )
+ {
+ pACB->scan_devices = 0;
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ }
+ else if( (pACB->scan_devices) && (cmd->cmnd[0] == 8) )
+ {
+ pACB->scan_devices = 0;
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ }
+
+ if ( ( cmd->target > pACB->max_id ) || (cmd->lun > pACB->max_lun) )
+ {
+/* printk("DC390: Ignore target %d lun %d\n",
+ cmd->target, cmd->lun); */
+ cmd->result = (DID_BAD_TARGET << 16);
+ done(cmd);
+ return( 0 );
+ }
+
+ if( (pACB->scan_devices) && !(pACB->DCBmap[cmd->target] & (1 << cmd->lun)) )
+ {
+ if( pACB->DeviceCnt < MAX_DEVICES )
+ {
+ pACB->DCBmap[cmd->target] |= (1 << cmd->lun);
+ pDCB = pACB->pDCB_free;
+#ifdef DC390_DEBUG0
+ printk("pDCB=%8x,ID=%2x,", (UINT) pDCB, cmd->target);
+#endif
+ DC390_initDCB( pACB, pDCB, cmd );
+ }
+ else /* ???? */
+ {
+/* printk("DC390: Ignore target %d lun %d\n",
+ cmd->target, cmd->lun); */
+ cmd->result = (DID_BAD_TARGET << 16);
+ done(cmd);
+ return(0);
+ }
+ }
+ else if( !(pACB->scan_devices) && !(pACB->DCBmap[cmd->target] & (1 << cmd->lun)) )
+ {
+/* printk("DC390: Ignore target %d lun %d\n",
+ cmd->target, cmd->lun); */
+ cmd->result = (DID_BAD_TARGET << 16);
+ done(cmd);
+ return(0);
+ }
+ else
+ {
+ pDCB = pACB->pLinkDCB;
+ while( (pDCB->UnitSCSIID != cmd->target) ||
+ (pDCB->UnitSCSILUN != cmd->lun) )
+ {
+ pDCB = pDCB->pNextDCB;
+ }
+#ifdef DC390_DEBUG0
+ printk("pDCB=%8x,ID=%2x,", (UINT) pDCB, cmd->target);
+#endif
+ }
+
+ cmd->scsi_done = done;
+ cmd->result = 0;
+
+ save_flags(flags);
+ cli();
+
+ if( pDCB->QIORBCnt )
+ {
+ QLinkcmd( cmd, pDCB );
+ pcmd = Getcmd( pDCB );
+ }
+ else
+ pcmd = cmd;
+
+ pSRB = GetSRB( pACB );
+
+ if( !pSRB )
+ {
+ QLinkcmd( pcmd, pDCB );
+ restore_flags(flags);
+ return(0);
+ }
+
+/* BuildSRB(pSRB); */
+
+ pSRB->pSRBDCB = pDCB;
+ pSRB->pcmd = pcmd;
+ ptr = (PUCHAR) pSRB->CmdBlock;
+ ptr1 = (PUCHAR) pcmd->cmnd;
+ pSRB->ScsiCmdLen = pcmd->cmd_len;
+ for(i=0; i< pcmd->cmd_len; i++)
+ {
+ *ptr = *ptr1;
+ ptr++;
+ ptr1++;
+ }
+ if( pcmd->use_sg )
+ {
+ pSRB->SGcount = (UCHAR) pcmd->use_sg;
+ pSRB->pSegmentList = (PSGL) pcmd->request_buffer;
+ }
+ else if( pcmd->request_buffer )
+ {
+ pSRB->SGcount = 1;
+ pSRB->pSegmentList = (PSGL) &pSRB->Segmentx;
+ pSRB->Segmentx.address = (PUCHAR) pcmd->request_buffer;
+ pSRB->Segmentx.length = pcmd->request_bufflen;
+ }
+ else
+ pSRB->SGcount = 0;
+
+ pSRB->SGIndex = 0;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ pSRB->MsgCnt = 0;
+ if( pDCB->DevType != TYPE_TAPE )
+ pSRB->RetryCnt = 1;
+ else
+ pSRB->RetryCnt = 0;
+ pSRB->SRBStatus = 0;
+ pSRB->SRBFlag = 0;
+ pSRB->SRBState = 0;
+ pSRB->TotalXferredLen = 0;
+ pSRB->SGPhysAddr = 0;
+ pSRB->SGToBeXferLen = 0;
+ pSRB->ScsiPhase = 0;
+ pSRB->EndMessage = 0;
+ SendSRB( pcmd, pACB, pSRB );
+
+ restore_flags(flags);
+ return(0);
+}
+
+
+static void
+DoNextCmd( PACB pACB, PDCB pDCB )
+{
+ Scsi_Cmnd *pcmd;
+ PSRB pSRB;
+ ULONG flags;
+ PUCHAR ptr,ptr1;
+ USHORT i;
+
+
+ if( pACB->ACBFlag & (RESET_DETECT+RESET_DONE+RESET_DEV) )
+ return;
+ save_flags(flags);
+ cli();
+
+ pcmd = Getcmd( pDCB );
+ pSRB = GetSRB( pACB );
+ if( !pSRB )
+ {
+ QLinkcmd( pcmd, pDCB );
+ restore_flags(flags);
+ return;
+ }
+
+ pSRB->pSRBDCB = pDCB;
+ pSRB->pcmd = pcmd;
+ ptr = (PUCHAR) pSRB->CmdBlock;
+ ptr1 = (PUCHAR) pcmd->cmnd;
+ pSRB->ScsiCmdLen = pcmd->cmd_len;
+ for(i=0; i< pcmd->cmd_len; i++)
+ {
+ *ptr = *ptr1;
+ ptr++;
+ ptr1++;
+ }
+ if( pcmd->use_sg )
+ {
+ pSRB->SGcount = (UCHAR) pcmd->use_sg;
+ pSRB->pSegmentList = (PSGL) pcmd->request_buffer;
+ }
+ else if( pcmd->request_buffer )
+ {
+ pSRB->SGcount = 1;
+ pSRB->pSegmentList = (PSGL) &pSRB->Segmentx;
+ pSRB->Segmentx.address = (PUCHAR) pcmd->request_buffer;
+ pSRB->Segmentx.length = pcmd->request_bufflen;
+ }
+ else
+ pSRB->SGcount = 0;
+
+ pSRB->SGIndex = 0;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ pSRB->MsgCnt = 0;
+ if( pDCB->DevType != TYPE_TAPE )
+ pSRB->RetryCnt = 1;
+ else
+ pSRB->RetryCnt = 0;
+ pSRB->SRBStatus = 0;
+ pSRB->SRBFlag = 0;
+ pSRB->SRBState = 0;
+ pSRB->TotalXferredLen = 0;
+ pSRB->SGPhysAddr = 0;
+ pSRB->SGToBeXferLen = 0;
+ pSRB->ScsiPhase = 0;
+ pSRB->EndMessage = 0;
+ SendSRB( pcmd, pACB, pSRB );
+
+ restore_flags(flags);
+ return;
+}
+
+
+/***********************************************************************
+ * Function:
+ * DC390_bios_param
+ *
+ * Description:
+ * Return the disk geometry for the given SCSI device.
+ ***********************************************************************/
+#ifdef VERSION_ELF_1_2_13
+int DC390_bios_param(Disk *disk, int devno, int geom[])
+#else
+int DC390_bios_param(Disk *disk, kdev_t devno, int geom[])
+#endif
+{
+ int heads, sectors, cylinders;
+ PACB pACB;
+
+ pACB = (PACB) disk->device->host->hostdata;
+ heads = 64;
+ sectors = 32;
+ cylinders = disk->capacity / (heads * sectors);
+
+ if ( (pACB->Gmode2 & GREATER_1G) && (cylinders > 1024) )
+ {
+ heads = 255;
+ sectors = 63;
+ cylinders = disk->capacity / (heads * sectors);
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return (0);
+}
+
+
+/***********************************************************************
+ * Function : int DC390_abort (Scsi_Cmnd *cmd)
+ *
+ * Purpose : Abort an errant SCSI command
+ *
+ * Inputs : cmd - command to abort
+ *
+ * Returns : 0 on success, -1 on failure.
+ ***********************************************************************/
+
+int
+DC390_abort (Scsi_Cmnd *cmd)
+{
+ ULONG flags;
+ PACB pACB;
+ PDCB pDCB, pdcb;
+ PSRB pSRB, psrb;
+ USHORT count, i;
+ PSCSICMD pcmd, pcmd1;
+ int status;
+
+
+#ifdef DC390_DEBUG0
+ printk("DC390 : Abort Cmd.");
+#endif
+
+ save_flags(flags);
+ cli();
+
+ pACB = (PACB) cmd->host->hostdata;
+ pDCB = pACB->pLinkDCB;
+ pdcb = pDCB;
+ while( (pDCB->UnitSCSIID != cmd->target) ||
+ (pDCB->UnitSCSILUN != cmd->lun) )
+ {
+ pDCB = pDCB->pNextDCB;
+ if( pDCB == pdcb )
+ goto NOT_RUN;
+ }
+
+ if( pDCB->QIORBCnt )
+ {
+ pcmd = pDCB->pQIORBhead;
+ if( pcmd == cmd )
+ {
+ pDCB->pQIORBhead = pcmd->next;
+ pcmd->next = NULL;
+ pDCB->QIORBCnt--;
+ status = SCSI_ABORT_SUCCESS;
+ goto ABO_X;
+ }
+ for( count = pDCB->QIORBCnt, i=0; i<count-1; i++)
+ {
+ if( pcmd->next == cmd )
+ {
+ pcmd1 = pcmd->next;
+ pcmd->next = pcmd1->next;
+ pcmd1->next = NULL;
+ pDCB->QIORBCnt--;
+ status = SCSI_ABORT_SUCCESS;
+ goto ABO_X;
+ }
+ else
+ {
+ pcmd = pcmd->next;
+ }
+ }
+ }
+
+ pSRB = pDCB->pWaitingSRB;
+ if( !pSRB )
+ goto ON_GOING;
+ if( pSRB->pcmd == cmd )
+ {
+ pDCB->pWaitingSRB = pSRB->pNextSRB;
+ goto IN_WAIT;
+ }
+ else
+ {
+ psrb = pSRB;
+ if( !(psrb->pNextSRB) )
+ goto ON_GOING;
+ while( psrb->pNextSRB->pcmd != cmd )
+ {
+ psrb = psrb->pNextSRB;
+ if( !(psrb->pNextSRB) )
+ goto ON_GOING;
+ }
+ pSRB = psrb->pNextSRB;
+ psrb->pNextSRB = pSRB->pNextSRB;
+ if( pSRB == pDCB->pWaitLast )
+ pDCB->pWaitLast = psrb; /* No check for psrb == NULL ? */
+IN_WAIT:
+ pSRB->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB;
+ cmd->next = NULL;
+ status = SCSI_ABORT_SUCCESS;
+ goto ABO_X;
+ }
+
+ON_GOING:
+ pSRB = pDCB->pGoingSRB;
+ for( count = pDCB->GoingSRBCnt, i=0; i<count; i++)
+ {
+ if( pSRB->pcmd != cmd )
+ pSRB = pSRB->pNextSRB;
+ else
+ {
+ if( (pACB->pActiveDCB == pDCB) && (pDCB->pActiveSRB == pSRB) )
+ {
+ status = SCSI_ABORT_BUSY;
+ goto ABO_X;
+ }
+ else
+ {
+ status = SCSI_ABORT_SNOOZE;
+ goto ABO_X;
+ }
+ }
+ }
+
+NOT_RUN:
+ status = SCSI_ABORT_NOT_RUNNING;
+
+ABO_X:
+ cmd->result = DID_ABORT << 16;
+ cmd->scsi_done(cmd);
+ restore_flags(flags);
+ return( status );
+}
+
+
+static void
+ResetDevParam( PACB pACB )
+{
+ PDCB pDCB, pdcb;
+
+ pDCB = pACB->pLinkDCB;
+ if( pDCB == NULL )
+ return;
+ pdcb = pDCB;
+ do
+ {
+ pDCB->SyncMode &= ~SYNC_NEGO_DONE;
+ pDCB->SyncPeriod = 0;
+ pDCB->SyncOffset = 0;
+ pDCB->CtrlR3 = FAST_CLK;
+ pDCB->CtrlR4 &= NEGATE_REQACKDATA;
+ pDCB->CtrlR4 |= EATER_25NS;
+ pDCB = pDCB->pNextDCB;
+ }
+ while( pdcb != pDCB );
+}
+
+
+static void
+RecoverSRB( PACB pACB )
+{
+ PDCB pDCB, pdcb;
+ PSRB psrb, psrb2;
+ USHORT cnt, i;
+
+ pDCB = pACB->pLinkDCB;
+ if( pDCB == NULL )
+ return;
+ pdcb = pDCB;
+ do
+ {
+ cnt = pdcb->GoingSRBCnt;
+ psrb = pdcb->pGoingSRB;
+ for (i=0; i<cnt; i++)
+ {
+ psrb2 = psrb;
+ psrb = psrb->pNextSRB;
+/* RewaitSRB( pDCB, psrb ); */
+ if( pdcb->pWaitingSRB )
+ {
+ psrb2->pNextSRB = pdcb->pWaitingSRB;
+ pdcb->pWaitingSRB = psrb2;
+ }
+ else
+ {
+ pdcb->pWaitingSRB = psrb2;
+ pdcb->pWaitLast = psrb2;
+ psrb2->pNextSRB = NULL;
+ }
+ }
+ pdcb->GoingSRBCnt = 0;
+ pdcb->pGoingSRB = NULL;
+ pdcb->TagMask = 0;
+ pdcb = pdcb->pNextDCB;
+ }
+ while( pdcb != pDCB );
+}
+
+
+/***********************************************************************
+ * Function : int DC390_reset (Scsi_Cmnd *cmd, ...)
+ *
+ * Purpose : perform a hard reset on the SCSI bus
+ *
+ * Inputs : cmd - command which caused the SCSI RESET
+ *
+ * Returns : 0 on success.
+ ***********************************************************************/
+
+#ifdef VERSION_2_0_0
+int DC390_reset(Scsi_Cmnd *cmd, unsigned int resetFlags)
+#else
+int DC390_reset (Scsi_Cmnd *cmd)
+#endif
+{
+ USHORT ioport;
+ unsigned long flags;
+ PACB pACB;
+ UCHAR bval;
+ USHORT i;
+
+
+#ifdef DC390_DEBUG1
+ printk("DC390: RESET,");
+#endif
+
+ pACB = (PACB ) cmd->host->hostdata;
+ ioport = pACB->IOPortBase;
+ save_flags(flags);
+ cli();
+ bval = inb(ioport+CtrlReg1);
+ bval |= DIS_INT_ON_SCSI_RST;
+ outb(bval,ioport+CtrlReg1); /* disable interrupt */
+ DC390_ResetSCSIBus( pACB );
+ for( i=0; i<500; i++ )
+ udelay(1000);
+ bval = inb(ioport+CtrlReg1);
+ bval &= ~DIS_INT_ON_SCSI_RST;
+ outb(bval,ioport+CtrlReg1); /* re-enable interrupt */
+
+ bval = DMA_IDLE_CMD;
+ outb(bval,ioport+DMA_Cmd);
+ bval = CLEAR_FIFO_CMD;
+ outb(bval,ioport+ScsiCmd);
+
+ ResetDevParam( pACB );
+ DoingSRB_Done( pACB );
+ pACB->pActiveDCB = NULL;
+
+ pACB->ACBFlag = 0;
+ DoWaitingSRB( pACB );
+
+ restore_flags(flags);
+#ifdef DC390_DEBUG1
+ printk("DC390: RESET1,");
+#endif
+ return( SCSI_RESET_SUCCESS );
+}
+
+
+#include "scsiiom.c"
+
+
+/***********************************************************************
+ * Function : static void DC390_initDCB
+ *
+ * Purpose : initialize the internal structures for a given DCB
+ *
+ * Inputs : cmd - pointer to this scsi cmd request block structure
+ *
+ ***********************************************************************/
+void DC390_initDCB( PACB pACB, PDCB pDCB, PSCSICMD cmd )
+{
+ PEEprom prom;
+ UCHAR bval;
+ USHORT index;
+
+ if( pACB->DeviceCnt == 0 )
+ {
+ pACB->pLinkDCB = pDCB;
+ pACB->pDCBRunRobin = pDCB;
+ pDCB->pNextDCB = pDCB;
+ pPrevDCB = pDCB;
+ }
+ else
+ pPrevDCB->pNextDCB = pDCB;
+
+ pDCB->pDCBACB = pACB;
+ pDCB->QIORBCnt = 0;
+ pDCB->UnitSCSIID = cmd->target;
+ pDCB->UnitSCSILUN = cmd->lun;
+ pDCB->pWaitingSRB = NULL;
+ pDCB->pGoingSRB = NULL;
+ pDCB->GoingSRBCnt = 0;
+ pDCB->pActiveSRB = NULL;
+ pDCB->TagMask = 0;
+ pDCB->MaxCommand = 1;
+ pDCB->AdaptIndex = pACB->AdapterIndex;
+ index = pACB->AdapterIndex;
+ pDCB->DCBFlag = 0;
+
+ prom = (PEEprom) &eepromBuf[index][cmd->target << 2];
+ pDCB->DevMode = prom->EE_MODE1;
+ pDCB->AdpMode = eepromBuf[index][EE_MODE2];
+
+ if( pDCB->DevMode & EN_DISCONNECT_ )
+ bval = 0xC0;
+ else
+ bval = 0x80;
+ bval |= cmd->lun;
+ pDCB->IdentifyMsg = bval;
+
+ pDCB->SyncMode = 0;
+ if( pDCB->DevMode & SYNC_NEGO_ )
+ {
+ if( !(cmd->lun) || CurrSyncOffset )
+ pDCB->SyncMode = SYNC_ENABLE;
+ }
+
+ pDCB->SyncPeriod = 0;
+ pDCB->SyncOffset = 0;
+ pDCB->NegoPeriod = (clock_period1[prom->EE_SPEED] * 25) >> 2;
+
+ pDCB->CtrlR1 = pACB->AdaptSCSIID;
+ if( pDCB->DevMode & PARITY_CHK_ )
+ pDCB->CtrlR1 |= PARITY_ERR_REPO;
+
+ pDCB->CtrlR3 = FAST_CLK;
+
+ pDCB->CtrlR4 = EATER_25NS;
+ if( pDCB->AdpMode & ACTIVE_NEGATION)
+ pDCB->CtrlR4 |= NEGATE_REQACKDATA;
+}
+
+
+/***********************************************************************
+ * Function : static void DC390_initSRB
+ *
+ * Purpose : initialize the internal structures for a given SRB
+ *
+ * Inputs : psrb - pointer to this scsi request block structure
+ *
+ ***********************************************************************/
+void DC390_initSRB( PSRB psrb )
+{
+#ifndef VERSION_ELF_1_2_13
+#ifdef DC390_DEBUG0
+ printk("DC390 init: %08lx %08lx,",(ULONG)psrb,(ULONG)virt_to_bus(psrb));
+#endif
+ psrb->PhysSRB = virt_to_bus( psrb );
+#else
+ psrb->PhysSRB = (ULONG) psrb;
+#endif
+}
+
+
+void DC390_linkSRB( PACB pACB )
+{
+ USHORT count, i;
+ PSRB psrb;
+
+ count = pACB->SRBCount;
+
+ for( i=0; i< count; i++)
+ {
+ if( i != count - 1)
+ pACB->SRB_array[i].pNextSRB = &pACB->SRB_array[i+1];
+ else
+ pACB->SRB_array[i].pNextSRB = NULL;
+ psrb = (PSRB) &pACB->SRB_array[i];
+ DC390_initSRB( psrb );
+ }
+}
+
+
+/***********************************************************************
+ * Function : static void DC390_initACB
+ *
+ * Purpose : initialize the internal structures for a given SCSI host
+ *
+ * Inputs : psh - pointer to this host adapter's structure
+ *
+ ***********************************************************************/
+void DC390_initACB( PSH psh, ULONG io_port, UCHAR Irq, USHORT index )
+{
+ PACB pACB;
+ USHORT i;
+
+ psh->can_queue = MAX_CMD_QUEUE;
+ psh->cmd_per_lun = MAX_CMD_PER_LUN;
+ psh->this_id = (int) eepromBuf[index][EE_ADAPT_SCSI_ID];
+ psh->io_port = io_port;
+ psh->n_io_port = 0x80;
+ psh->irq = Irq;
+
+ pACB = (PACB) psh->hostdata;
+
+#ifndef VERSION_ELF_1_2_13
+ psh->max_id = 8;
+#ifdef CONFIG_SCSI_MULTI_LUN
+ if( eepromBuf[index][EE_MODE2] & LUN_CHECK )
+ psh->max_lun = 8;
+ else
+#endif
+ psh->max_lun = 1;
+#endif
+
+ pACB->max_id = 7;
+ if( pACB->max_id == eepromBuf[index][EE_ADAPT_SCSI_ID] )
+ pACB->max_id--;
+#ifdef CONFIG_SCSI_MULTI_LUN
+ if( eepromBuf[index][EE_MODE2] & LUN_CHECK )
+ pACB->max_lun = 7;
+ else
+#endif
+ pACB->max_lun = 0;
+
+ pACB->pScsiHost = psh;
+ pACB->IOPortBase = (USHORT) io_port;
+ pACB->pLinkDCB = NULL;
+ pACB->pDCBRunRobin = NULL;
+ pACB->pActiveDCB = NULL;
+ pACB->pFreeSRB = pACB->SRB_array;
+ pACB->SRBCount = MAX_SRB_CNT;
+ pACB->AdapterIndex = index;
+ pACB->status = 0;
+ pACB->AdaptSCSIID = eepromBuf[index][EE_ADAPT_SCSI_ID];
+ pACB->HostID_Bit = (1 << pACB->AdaptSCSIID);
+ pACB->AdaptSCSILUN = 0;
+ pACB->DeviceCnt = 0;
+ pACB->IRQLevel = Irq;
+ pACB->TagMaxNum = eepromBuf[index][EE_TAG_CMD_NUM] << 2;
+ pACB->ACBFlag = 0;
+ pACB->scan_devices = 1;
+ pACB->Gmode2 = eepromBuf[index][EE_MODE2];
+ if( eepromBuf[index][EE_MODE2] & LUN_CHECK )
+ pACB->LUNchk = 1;
+ pACB->pDCB_free = &pACB->DCB_array[0];
+ DC390_linkSRB( pACB );
+ pACB->pTmpSRB = &pACB->TmpSRB;
+ DC390_initSRB( pACB->pTmpSRB );
+ for(i=0; i<MAX_SCSI_ID; i++)
+ pACB->DCBmap[i] = 0;
+}
+
+
+/***********************************************************************
+ * Function : static int DC390_initAdapter
+ *
+ * Purpose : initialize the SCSI chip ctrl registers
+ *
+ * Inputs : psh - pointer to this host adapter's structure
+ *
+ ***********************************************************************/
+int DC390_initAdapter( PSH psh, ULONG io_port, UCHAR Irq, USHORT index )
+{
+ USHORT ioport;
+ UCHAR bval;
+ PACB pACB, pacb;
+ USHORT used_irq = 0;
+
+ pacb = pACB_start;
+ if( pacb != NULL )
+ {
+ for ( ; (pacb != (PACB) -1) ; )
+ {
+ if( pacb->IRQLevel == Irq )
+ {
+ used_irq = 1;
+ break;
+ }
+ else
+ pacb = pacb->pNextACB;
+ }
+ }
+
+ if( !used_irq )
+ {
+#ifdef VERSION_ELF_1_2_13
+ if( request_irq(Irq, DC390_Interrupt, SA_INTERRUPT, "tmscsim"))
+#else
+ if( request_irq(Irq, DC390_Interrupt, SA_INTERRUPT | SA_SHIRQ, "tmscsim", NULL))
+#endif
+ {
+ printk("DC390: register IRQ error!\n");
+ return( -1 );
+ }
+ }
+
+ request_region(io_port,psh->n_io_port,"tmscsim");
+
+ ioport = (USHORT) io_port;
+
+ pACB = (PACB) psh->hostdata;
+ bval = SEL_TIMEOUT; /* 250ms selection timeout */
+ outb(bval,ioport+Scsi_TimeOut);
+
+ bval = CLK_FREQ_40MHZ; /* Conversion factor = 0 , 40MHz clock */
+ outb(bval,ioport+Clk_Factor);
+
+ bval = NOP_CMD; /* NOP cmd - clear command register */
+ outb(bval,ioport+ScsiCmd);
+
+ bval = EN_FEATURE+EN_SCSI2_CMD; /* Enable Feature and SCSI-2 */
+ outb(bval,ioport+CtrlReg2);
+
+ bval = FAST_CLK; /* fast clock */
+ outb(bval,ioport+CtrlReg3);
+
+ bval = EATER_25NS;
+ if( eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION )
+ bval |= NEGATE_REQACKDATA;
+ outb(bval,ioport+CtrlReg4);
+
+ bval = DIS_INT_ON_SCSI_RST; /* Disable SCSI bus reset interrupt */
+ outb(bval,ioport+CtrlReg1);
+
+ return(0);
+}
+
+
+void
+DC390_EnableCfg( USHORT mechnum, UCHAR regval )
+{
+ ULONG wlval;
+
+ if(mechnum == 2)
+ {
+ outb(mech2bus, PCI_CFG2_FORWARD_REG);
+ outb(mech2CfgSPenR, PCI_CFG2_ENABLE_REG);
+ }
+ else
+ {
+ regval &= 0xFC;
+ wlval = mech1addr;
+ wlval |= (((ULONG)regval) & 0xff);
+ outl(wlval, PCI_CFG1_ADDRESS_REG);
+ }
+}
+
+
+void
+DC390_DisableCfg( USHORT mechnum )
+{
+
+ if(mechnum == 2)
+ outb(0, PCI_CFG2_ENABLE_REG);
+ else
+ outl(0, PCI_CFG1_ADDRESS_REG);
+}
+
+
+UCHAR
+DC390_inByte( USHORT mechnum, UCHAR regval )
+{
+ UCHAR bval;
+ ULONG wval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ DC390_EnableCfg( mechnum, regval );
+ if(mechnum == 2)
+ {
+ wval = mech2Agent;
+ wval <<= 8;
+ wval |= ((USHORT) regval) & 0xff;
+ bval = inb(wval);
+ }
+ else
+ {
+ regval &= 3;
+ bval = inb(PCI_CFG1_DATA_REG | regval);
+ }
+ DC390_DisableCfg(mechnum);
+ restore_flags(flags);
+ return(bval);
+}
+
+
+USHORT
+DC390_inWord( USHORT mechnum, UCHAR regval )
+{
+ USHORT wval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ DC390_EnableCfg(mechnum,regval);
+ if(mechnum == 2)
+ {
+ wval = mech2Agent;
+ wval <<= 8;
+ wval |= regval;
+ wval = inw(wval);
+ }
+ else
+ {
+ regval &= 3;
+ wval = inw(PCI_CFG1_DATA_REG | regval);
+ }
+ DC390_DisableCfg(mechnum);
+ restore_flags(flags);
+ return(wval);
+}
+
+
+ULONG
+DC390_inDword(USHORT mechnum, UCHAR regval )
+{
+ ULONG wlval;
+ ULONG flags;
+ USHORT wval;
+
+ save_flags(flags);
+ cli();
+ DC390_EnableCfg(mechnum,regval);
+ if(mechnum == 2)
+ {
+ wval = mech2Agent;
+ wval <<= 8;
+ wval |= regval;
+ wlval = inl(wval);
+ }
+ else
+ {
+ wlval = inl(PCI_CFG1_DATA_REG);
+ }
+ DC390_DisableCfg(mechnum);
+ restore_flags(flags);
+ return(wlval);
+}
+
+
+void
+DC390_OutB(USHORT mechnum, UCHAR regval, UCHAR bval )
+{
+
+ USHORT wval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ DC390_EnableCfg(mechnum,regval);
+ if(mechnum == 2)
+ {
+ wval = mech2Agent;
+ wval <<= 8;
+ wval |= regval;
+ outb(bval, wval);
+ }
+ else
+ {
+ regval &= 3;
+ outb(bval, PCI_CFG1_DATA_REG | regval);
+ }
+ DC390_DisableCfg(mechnum);
+ restore_flags(flags);
+}
+
+
+void
+DC390_EnDisableCE( UCHAR mode, USHORT mechnum, PUCHAR regval )
+{
+
+ UCHAR bval;
+
+ bval = 0;
+ if(mode == ENABLE_CE)
+ *regval = 0xc0;
+ else
+ *regval = 0x80;
+ DC390_OutB(mechnum,*regval,bval);
+ if(mode == DISABLE_CE)
+ DC390_OutB(mechnum,*regval,bval);
+ udelay(160);
+}
+
+
+void
+DC390_EEpromOutDI( USHORT mechnum, PUCHAR regval, USHORT Carry )
+{
+ UCHAR bval;
+
+ bval = 0;
+ if(Carry)
+ {
+ bval = 0x40;
+ *regval = 0x80;
+ DC390_OutB(mechnum,*regval,bval);
+ }
+ udelay(160);
+ bval |= 0x80;
+ DC390_OutB(mechnum,*regval,bval);
+ udelay(160);
+ bval = 0;
+ DC390_OutB(mechnum,*regval,bval);
+ udelay(160);
+}
+
+
+UCHAR
+DC390_EEpromInDO( USHORT mechnum )
+{
+ UCHAR bval,regval;
+
+ regval = 0x80;
+ bval = 0x80;
+ DC390_OutB(mechnum,regval,bval);
+ udelay(160);
+ bval = 0x40;
+ DC390_OutB(mechnum,regval,bval);
+ udelay(160);
+ regval = 0x0;
+ bval = DC390_inByte(mechnum,regval);
+ if(bval == 0x22)
+ return(1);
+ else
+ return(0);
+}
+
+
+USHORT
+EEpromGetData1( USHORT mechnum )
+{
+ UCHAR i;
+ UCHAR carryFlag;
+ USHORT wval;
+
+ wval = 0;
+ for(i=0; i<16; i++)
+ {
+ wval <<= 1;
+ carryFlag = DC390_EEpromInDO(mechnum);
+ wval |= carryFlag;
+ }
+ return(wval);
+}
+
+
+void
+DC390_Prepare( USHORT mechnum, PUCHAR regval, UCHAR EEpromCmd )
+{
+ UCHAR i,j;
+ USHORT carryFlag;
+
+ carryFlag = 1;
+ j = 0x80;
+ for(i=0; i<9; i++)
+ {
+ DC390_EEpromOutDI(mechnum,regval,carryFlag);
+ carryFlag = (EEpromCmd & j) ? 1 : 0;
+ j >>= 1;
+ }
+}
+
+
+void
+DC390_ReadEEprom( USHORT mechnum, USHORT index )
+{
+ UCHAR regval,cmd;
+ PUSHORT ptr;
+ USHORT i;
+
+ ptr = (PUSHORT) &eepromBuf[index][0];
+ cmd = EEPROM_READ;
+ for(i=0; i<0x40; i++)
+ {
+ DC390_EnDisableCE(ENABLE_CE, mechnum, &regval);
+ DC390_Prepare(mechnum, &regval, cmd);
+ *ptr = EEpromGetData1(mechnum);
+ ptr++;
+ cmd++;
+ DC390_EnDisableCE(DISABLE_CE,mechnum,&regval);
+ }
+}
+
+
+USHORT
+DC390_CheckEEpromCheckSum( USHORT MechNum, USHORT index )
+{
+ USHORT wval, rc, *ptr;
+ UCHAR i;
+
+ DC390_ReadEEprom( MechNum, index );
+ wval = 0;
+ ptr = (PUSHORT) &eepromBuf[index][0];
+ for(i=0; i<128 ;i+=2, ptr++)
+ wval += *ptr;
+ if( wval == 0x1234 )
+ rc = 0;
+ else
+ rc = -1;
+ return( rc );
+}
+
+
+USHORT
+DC390_ToMech( USHORT Mechnum, USHORT BusDevFunNum )
+{
+ USHORT devnum;
+
+ devnum = BusDevFunNum;
+
+ if(Mechnum == 2)
+ {
+ if(devnum & 0x80)
+ return(-1);
+ mech2bus = (UCHAR)((devnum & 0xff00) >> 8); /* Bus num */
+ mech2Agent = ((UCHAR)(devnum & 0xff)) >> 3; /* Dev num */
+ mech2Agent |= 0xc0;
+ mech2CfgSPenR = ((UCHAR)(devnum & 0xff)) & 0x07; /* Fun num */
+ mech2CfgSPenR = (mech2CfgSPenR << 1) | 0x20;
+ }
+ else /* use mech #1 method */
+ {
+ mech1addr = 0x80000000 | ((ULONG)devnum << 8);
+ }
+ return(0);
+}
+
+/***********************************************************************
+ * Function : static int DC390_init (struct Scsi_Host *host)
+ *
+ * Purpose : initialize the internal structures for a given SCSI host
+ *
+ * Inputs : host - pointer to this host adapter's structure/
+ *
+ * Preconditions : when this function is called, the chip_type
+ * field of the pACB structure MUST have been set.
+ ***********************************************************************/
+
+static int
+DC390_init (PSHT psht, ULONG io_port, UCHAR Irq, USHORT index, USHORT MechNum)
+{
+ PSH psh;
+ PACB pACB;
+
+ if( !DC390_CheckEEpromCheckSum( MechNum, index) )
+ {
+ psh = scsi_register( psht, sizeof(DC390_ACB) );
+ if( !psh )
+ return( -1 );
+ if( !pSH_start )
+ {
+ pSH_start = psh;
+ pSH_current = psh;
+ }
+ else
+ {
+ pSH_current->next = psh;
+ pSH_current = psh;
+ }
+
+#ifdef DC390_DEBUG0
+ printk("DC390: pSH = %8x,", (UINT) psh);
+ printk("DC390: Index %02i,", index);
+#endif
+
+ DC390_initACB( psh, io_port, Irq, index );
+ if( !DC390_initAdapter( psh, io_port, Irq, index ) )
+ {
+ pACB = (PACB) psh->hostdata;
+ if( !pACB_start )
+ {
+ pACB_start = pACB;
+ pACB_current = pACB;
+ pACB->pNextACB = (PACB) -1;
+ }
+ else
+ {
+ pACB_current->pNextACB = pACB;
+ pACB_current = pACB;
+ pACB->pNextACB = (PACB) -1;
+ }
+
+#ifdef DC390_DEBUG0
+ printk("DC390: pACB = %8x, pDCB_array = %8x, pSRB_array = %8x\n",
+ (UINT) pACB, (UINT) pACB->DCB_array, (UINT) pACB->SRB_array);
+ printk("DC390: ACB size= %4x, DCB size= %4x, SRB size= %4x\n",
+ sizeof(DC390_ACB), sizeof(DC390_DCB), sizeof(DC390_SRB) );
+#endif
+
+ }
+ else
+ {
+ pSH_start = NULL;
+ scsi_unregister( psh );
+ return( -1 );
+ }
+ return( 0 );
+ }
+ else
+ {
+ printk("DC390_init: EEPROM reading error!\n");
+ return( -1 );
+ }
+}
+
+
+/***********************************************************************
+ * Function : int DC390_detect(Scsi_Host_Template *psht)
+ *
+ * Purpose : detects and initializes AMD53C974 SCSI chips
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : psht - template for this SCSI adapter
+ *
+ * Returns : number of host adapters detected
+ *
+ ***********************************************************************/
+
+int
+DC390_detect(Scsi_Host_Template *psht)
+{
+#ifdef FOR_PCI_OK
+ UCHAR pci_bus, pci_device_fn;
+ int error = 0;
+ USHORT chipType = 0;
+ USHORT i;
+#endif
+
+ UCHAR irq;
+ UCHAR istatus;
+#ifndef VERSION_ELF_1_2_13
+ UINT io_port;
+#else
+ ULONG io_port;
+#endif
+ USHORT adaptCnt = 0; /* Number of boards detected */
+ USHORT pci_index = 0; /* Device index to PCI BIOS calls */
+ USHORT MechNum, BusDevFunNum;
+ ULONG wlval;
+
+#ifndef VERSION_ELF_1_2_13
+ psht->proc_dir = &proc_scsi_tmscsim;
+#endif
+
+ InitialTime = 1;
+ pSHT_start = psht;
+ pACB_start = NULL;
+
+ MechNum = 1;
+ for( ; (MechNum < 3) && (!adaptCnt); MechNum++)
+ {
+ BusDevFunNum = 0;
+ for (; adaptCnt < MAX_ADAPTER_NUM ;)
+ {
+ if( !DC390_ToMech( MechNum, BusDevFunNum) )
+ {
+ wlval = DC390_inDword( MechNum, PCI_VENDOR_ID);
+ if(wlval == ( (PCI_DEVICE_ID_AMD53C974 << 16)+
+ PCI_VENDOR_ID_AMD) )
+ {
+ io_port =DC390_inDword(MechNum,PCI_BASE_ADDRESS_0) & 0xFFFE;
+ irq = DC390_inByte( MechNum, PCI_INTERRUPT_LINE);
+#ifdef DC390_DEBUG0
+ printk("DC390: IO_PORT=%4x,IRQ=%x,\n",(UINT) io_port, irq);
+#endif
+ if( !DC390_init(psht, io_port, irq, pci_index, MechNum) )
+ {
+ adaptCnt++;
+ pci_index++;
+ istatus = inb( (USHORT)io_port+INT_Status ); /* Reset Pending INT */
+#ifdef DC390_DEBUG0
+ printk("DC390: Mech=%2x,\n",(UCHAR) MechNum);
+#endif
+ }
+ }
+ }
+ if( BusDevFunNum != 0xfff8 )
+ BusDevFunNum += 8; /* next device # */
+ else
+ break;
+ }
+ }
+
+#ifdef FOR_PCI_OK
+ if ( pcibios_present() )
+ {
+ for (i = 0; i < MAX_ADAPTER_NUM; ++i)
+ {
+ if( !pcibios_find_device( PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD53C974,
+ pci_index, &pci_bus, &pci_device_fn) )
+ {
+ chipType = PCI_DEVICE_ID_AMD53C974;
+ pci_index++;
+ }
+
+ if( chipType )
+ {
+
+ error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &io_port);
+ error |= pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &irq);
+ if( error )
+ {
+ printk("DC390_detect: reading configuration registers error!\n");
+ InitialTime = 0;
+ return( 0 );
+ }
+
+ (USHORT) io_port = (USHORT) io_port & 0xFFFE;
+#ifdef DC390_DEBUG0
+ printk("DC390: IO_PORT=%4x,IRQ=%x,\n",(UINT) io_port, irq);
+#endif
+ if( !DC390_init(psht, io_port, irq, i) )
+ adaptCnt++;
+ chipType = 0;
+ }
+ else
+ break;
+ }
+ }
+#endif
+
+ InitialTime = 0;
+ adapterCnt = adaptCnt;
+ return( adaptCnt );
+}
+
+
+#ifndef VERSION_ELF_1_2_13
+
+/********************************************************************
+ * Function: tmscsim_set_info()
+ *
+ * Purpose: Set adapter info (!)
+ *
+ * Not yet implemented
+ *
+ *******************************************************************/
+
+int tmscsim_set_info(char *buffer, int length, struct Scsi_Host *shpnt)
+{
+ return(-ENOSYS); /* Currently this is a no-op */
+}
+
+/********************************************************************
+ * Function: tmscsim_proc_info(char* buffer, char **start,
+ * off_t offset, int length, int hostno, int inout)
+ *
+ * Purpose: return SCSI Adapter/Device Info
+ *
+ * Input: buffer: Pointer to a buffer where to write info
+ * start :
+ * offset:
+ * hostno: Host adapter index
+ * inout : Read (=0) or set(!=0) info
+ *
+ * Output: buffer: contains info
+ * length; length of info in buffer
+ *
+ * return value: length
+ *
+ ********************************************************************/
+
+/* KG: proc_info taken from driver aha152x.c */
+
+#undef SPRINTF
+#define SPRINTF(args...) pos += sprintf(pos, ## args)
+
+#define YESNO(YN)\
+if (YN) SPRINTF(" Yes ");\
+else SPRINTF(" No ")
+
+int tmscsim_proc_info(char *buffer, char **start,
+ off_t offset, int length, int hostno, int inout)
+{
+ int dev, spd, spd1;
+ char *pos = buffer;
+ PSH shpnt;
+ PACB acbpnt;
+ PDCB dcbpnt;
+ unsigned long flags;
+/* Scsi_Cmnd *ptr; */
+
+ acbpnt = pACB_start;
+
+ while(acbpnt != (PACB)-1)
+ {
+ shpnt = acbpnt->pScsiHost;
+ if (shpnt->host_no == hostno) break;
+ acbpnt = acbpnt->pNextACB;
+ }
+
+ if (acbpnt == (PACB)-1) return(-ESRCH);
+ if(!shpnt) return(-ESRCH);
+
+ if(inout) /* Has data been written to the file ? */
+ return(tmscsim_set_info(buffer, length, shpnt));
+
+ SPRINTF("Tekram DC390(T) PCI SCSI Host Adadpter, ");
+ SPRINTF("Driver Version 1.10, 1996/12/05\n");
+
+ save_flags(flags);
+ cli();
+
+ SPRINTF("SCSI Host Nr %i, ", shpnt->host_no);
+ SPRINTF("DC390 Adapter Nr %i\n", acbpnt->AdapterIndex);
+ SPRINTF("IOPortBase 0x%04x, ", acbpnt->IOPortBase);
+ SPRINTF("IRQLevel 0x%02x\n", acbpnt->IRQLevel);
+
+ SPRINTF("MaxID %i, MaxLUN %i, ",acbpnt->max_id, acbpnt->max_lun);
+ SPRINTF("AdapterID %i, AdapterLUN %i\n", acbpnt->AdaptSCSIID, acbpnt->AdaptSCSILUN);
+
+ SPRINTF("TagMaxNum %i, Status %i\n", acbpnt->TagMaxNum, acbpnt->status);
+
+ SPRINTF("Nr of attached devices: %i\n", acbpnt->DeviceCnt);
+
+ SPRINTF("Un ID LUN Prty Sync DsCn SndS TagQ NegoPeriod SyncSpeed SyncOffs\n");
+
+ dcbpnt = acbpnt->pLinkDCB;
+ for (dev = 0; dev < acbpnt->DeviceCnt; dev++)
+ {
+ SPRINTF("%02i %02i %02i ", dev, dcbpnt->UnitSCSIID, dcbpnt->UnitSCSILUN);
+ YESNO(dcbpnt->DevMode & PARITY_CHK_);
+ YESNO(dcbpnt->SyncMode & SYNC_NEGO_DONE);
+ YESNO(dcbpnt->DevMode & EN_DISCONNECT_);
+ YESNO(dcbpnt->DevMode & SEND_START_);
+ YESNO(dcbpnt->SyncMode & EN_TAG_QUEUING);
+ SPRINTF(" %03i ns ", (dcbpnt->NegoPeriod) << 2);
+ if (dcbpnt->SyncOffset & 0x0f)
+ {
+ spd = 1000/(dcbpnt->NegoPeriod <<2);
+ spd1 = 1000%(dcbpnt->NegoPeriod <<2);
+ spd1 = (spd1 * 10)/(dcbpnt->NegoPeriod <<2);
+ SPRINTF(" %2i.%1i M %02i\n", spd, spd1, (dcbpnt->SyncOffset & 0x0f));
+ }
+ else SPRINTF("\n");
+ /* Add more info ...*/
+ dcbpnt = dcbpnt->pNextDCB;
+ }
+
+ restore_flags(flags);
+ *start = buffer + offset;
+
+ if (pos - buffer < offset)
+ return 0;
+ else if (pos - buffer - offset < length)
+ return pos - buffer - offset;
+ else
+ return length;
+}
+#endif /* VERSION_ELF_1_2_13 */
+
+
+#ifdef MODULE
+
+/***********************************************************************
+ * Function : static int DC390_shutdown (struct Scsi_Host *host)
+ *
+ * Purpose : does a clean (we hope) shutdown of the SCSI chip.
+ * Use prior to dumping core, unloading the driver, etc.
+ *
+ * Returns : 0 on success
+ ***********************************************************************/
+static int
+DC390_shutdown (struct Scsi_Host *host)
+{
+ UCHAR bval;
+ USHORT ioport;
+ unsigned long flags;
+ PACB pACB = (PACB)(host->hostdata);
+
+ ioport = (unsigned int) pACB->IOPortBase;
+
+ save_flags (flags);
+ cli();
+
+/* pACB->soft_reset(host); */
+
+#ifdef DC390_DEBUG0
+ printk("DC390: shutdown,");
+#endif
+
+ bval = inb(ioport+CtrlReg1);
+ bval |= DIS_INT_ON_SCSI_RST;
+ outb(bval,ioport+CtrlReg1); /* disable interrupt */
+ DC390_ResetSCSIBus( pACB );
+
+ restore_flags (flags);
+ return( 0 );
+}
+
+
+int DC390_release(struct Scsi_Host *host)
+{
+ int irq_count;
+ struct Scsi_Host *tmp;
+
+ DC390_shutdown (host);
+
+ if (host->irq != IRQ_NONE)
+ {
+ for (irq_count = 0, tmp = pSH_start; tmp; tmp = tmp->next)
+ {
+ if ( tmp->irq == host->irq )
+ ++irq_count;
+ }
+ if (irq_count == 1)
+ {
+#ifdef DC390_DEBUG0
+ printk("DC390: Free IRQ %i.",host->irq);
+#endif
+#ifndef VERSION_ELF_1_2_13
+ free_irq(host->irq,NULL);
+#else
+ free_irq(host->irq);
+#endif
+ }
+ }
+
+ release_region(host->io_port,host->n_io_port);
+
+ return( 1 );
+}
+
+Scsi_Host_Template driver_template = DC390_T;
+#include "scsi_module.c"
+#endif /* def MODULE */
+
diff --git a/linux/src/drivers/scsi/tmscsim.h b/linux/src/drivers/scsi/tmscsim.h
new file mode 100644
index 0000000..361c488
--- /dev/null
+++ b/linux/src/drivers/scsi/tmscsim.h
@@ -0,0 +1,680 @@
+/***********************************************************************
+;* File Name : TMSCSIM.H *
+;* TEKRAM DC-390(T) PCI SCSI Bus Master Host Adapter *
+;* Device Driver *
+;***********************************************************************/
+
+#ifndef TMSCSIM_H
+#define TMSCSIM_H
+
+#define IRQ_NONE 255
+
+typedef unsigned char UCHAR;
+typedef unsigned short USHORT;
+typedef unsigned long ULONG;
+typedef unsigned int UINT;
+
+typedef UCHAR *PUCHAR;
+typedef USHORT *PUSHORT;
+typedef ULONG *PULONG;
+typedef Scsi_Host_Template *PSHT;
+typedef struct Scsi_Host *PSH;
+typedef Scsi_Device *PSCSIDEV;
+typedef Scsi_Cmnd *PSCSICMD;
+typedef void *PVOID;
+typedef struct scatterlist *PSGL, SGL;
+
+
+/*;-----------------------------------------------------------------------*/
+typedef struct _SyncMsg
+{
+UCHAR ExtendMsg;
+UCHAR ExtMsgLen;
+UCHAR SyncXferReq;
+UCHAR Period;
+UCHAR ReqOffset;
+} SyncMsg;
+/*;-----------------------------------------------------------------------*/
+typedef struct _Capacity
+{
+ULONG BlockCount;
+ULONG BlockLength;
+} Capacity;
+/*;-----------------------------------------------------------------------*/
+typedef struct _SGentry
+{
+ULONG SGXferDataPtr;
+ULONG SGXferDataLen;
+} SGentry;
+
+typedef struct _SGentry1
+{
+ULONG SGXLen;
+ULONG SGXPtr;
+} SGentry1, *PSGE;
+
+
+#define MAX_ADAPTER_NUM 4
+#define MAX_DEVICES 10
+#define MAX_SG_LIST_BUF 16
+#define MAX_CMD_QUEUE 20
+#define MAX_CMD_PER_LUN 8
+#define MAX_SCSI_ID 8
+#define MAX_SRB_CNT MAX_CMD_QUEUE+4
+#define END_SCAN 2
+
+#define SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */
+
+/*
+;-----------------------------------------------------------------------
+; SCSI Request Block
+;-----------------------------------------------------------------------
+*/
+struct _SRB
+{
+UCHAR CmdBlock[12];
+
+struct _SRB *pNextSRB;
+struct _DCB *pSRBDCB;
+PSCSICMD pcmd;
+PSGL pSegmentList;
+
+ULONG PhysSRB;
+ULONG TotalXferredLen;
+ULONG SGPhysAddr; /*;a segment starting address */
+ULONG SGToBeXferLen; /*; to be xfer length */
+
+SGL Segmentx; /* make a one entry of S/G list table */
+
+PUCHAR pMsgPtr;
+USHORT SRBState;
+USHORT Revxx2; /* ??? */
+
+UCHAR MsgInBuf[6];
+UCHAR MsgOutBuf[6];
+
+UCHAR AdaptStatus;
+UCHAR TargetStatus;
+UCHAR MsgCnt;
+UCHAR EndMessage;
+UCHAR TagNumber;
+UCHAR SGcount;
+UCHAR SGIndex;
+UCHAR IORBFlag; /*;81h-Reset, 2-retry */
+
+UCHAR SRBStatus;
+UCHAR RetryCnt;
+UCHAR SRBFlag; /*; b0-AutoReqSense,b6-Read,b7-write */
+ /*; b4-settimeout,b5-Residual valid */
+UCHAR ScsiCmdLen;
+UCHAR ScsiPhase;
+UCHAR Reserved3[3]; /*;for dword alignment */
+ULONG Segment0[2];
+ULONG Segment1[2];
+};
+
+typedef struct _SRB DC390_SRB, *PSRB;
+
+/*
+;-----------------------------------------------------------------------
+; Device Control Block
+;-----------------------------------------------------------------------
+*/
+struct _DCB
+{
+struct _DCB *pNextDCB;
+struct _ACB *pDCBACB;
+
+PSCSICMD pQIORBhead;
+PSCSICMD pQIORBtail;
+PSCSICMD AboIORBhead;
+PSCSICMD AboIORBtail;
+USHORT QIORBCnt;
+USHORT AboIORBcnt;
+
+PSRB pWaitingSRB;
+PSRB pWaitLast;
+PSRB pGoingSRB;
+PSRB pGoingLast;
+PSRB pActiveSRB;
+USHORT GoingSRBCnt;
+USHORT WaitSRBCnt; /* ??? */
+
+ULONG TagMask;
+
+USHORT MaxCommand;
+USHORT AdaptIndex; /*; UnitInfo struc start */
+USHORT UnitIndex; /*; nth Unit on this card */
+UCHAR UnitSCSIID; /*; SCSI Target ID (SCSI Only) */
+UCHAR UnitSCSILUN; /*; SCSI Log. Unit (SCSI Only) */
+
+UCHAR IdentifyMsg;
+UCHAR CtrlR1;
+UCHAR CtrlR3;
+UCHAR CtrlR4;
+
+UCHAR InqDataBuf[8];
+UCHAR CapacityBuf[8];
+UCHAR DevMode;
+UCHAR AdpMode;
+UCHAR SyncMode; /*; 0:async mode */
+UCHAR NegoPeriod; /*;for nego. */
+UCHAR SyncPeriod; /*;for reg. */
+UCHAR SyncOffset; /*;for reg. and nego.(low nibble) */
+UCHAR UnitCtrlFlag;
+UCHAR DCBFlag;
+UCHAR DevType;
+UCHAR Reserved2[3]; /*;for dword alignment */
+};
+
+typedef struct _DCB DC390_DCB, *PDCB;
+/*
+;-----------------------------------------------------------------------
+; Adapter Control Block
+;-----------------------------------------------------------------------
+*/
+struct _ACB
+{
+ULONG PhysACB;
+PSH pScsiHost;
+struct _ACB *pNextACB;
+USHORT IOPortBase;
+USHORT Revxx1; /* ??? */
+
+PDCB pLinkDCB;
+PDCB pDCBRunRobin;
+PDCB pActiveDCB;
+PDCB pDCB_free;
+PSRB pFreeSRB;
+PSRB pTmpSRB;
+USHORT SRBCount;
+USHORT AdapterIndex; /*; nth Adapter this driver */
+USHORT max_id;
+USHORT max_lun;
+
+UCHAR msgin123[4];
+UCHAR status;
+UCHAR AdaptSCSIID; /*; Adapter SCSI Target ID */
+UCHAR AdaptSCSILUN; /*; Adapter SCSI LUN */
+UCHAR DeviceCnt;
+UCHAR IRQLevel;
+UCHAR TagMaxNum;
+UCHAR ACBFlag;
+UCHAR Gmode2;
+UCHAR LUNchk;
+UCHAR scan_devices;
+UCHAR HostID_Bit;
+UCHAR Reserved1[1]; /*;for dword alignment */
+UCHAR DCBmap[MAX_SCSI_ID];
+DC390_DCB DCB_array[MAX_DEVICES]; /* +74h, Len=3E8 */
+DC390_SRB SRB_array[MAX_SRB_CNT]; /* +45Ch, Len= */
+DC390_SRB TmpSRB;
+};
+
+typedef struct _ACB DC390_ACB, *PACB;
+
+/*;-----------------------------------------------------------------------*/
+
+
+#define BIT31 0x80000000
+#define BIT30 0x40000000
+#define BIT29 0x20000000
+#define BIT28 0x10000000
+#define BIT27 0x08000000
+#define BIT26 0x04000000
+#define BIT25 0x02000000
+#define BIT24 0x01000000
+#define BIT23 0x00800000
+#define BIT22 0x00400000
+#define BIT21 0x00200000
+#define BIT20 0x00100000
+#define BIT19 0x00080000
+#define BIT18 0x00040000
+#define BIT17 0x00020000
+#define BIT16 0x00010000
+#define BIT15 0x00008000
+#define BIT14 0x00004000
+#define BIT13 0x00002000
+#define BIT12 0x00001000
+#define BIT11 0x00000800
+#define BIT10 0x00000400
+#define BIT9 0x00000200
+#define BIT8 0x00000100
+#define BIT7 0x00000080
+#define BIT6 0x00000040
+#define BIT5 0x00000020
+#define BIT4 0x00000010
+#define BIT3 0x00000008
+#define BIT2 0x00000004
+#define BIT1 0x00000002
+#define BIT0 0x00000001
+
+/*;---UnitCtrlFlag */
+#define UNIT_ALLOCATED BIT0
+#define UNIT_INFO_CHANGED BIT1
+#define FORMATING_MEDIA BIT2
+#define UNIT_RETRY BIT3
+
+/*;---UnitFlags */
+#define DASD_SUPPORT BIT0
+#define SCSI_SUPPORT BIT1
+#define ASPI_SUPPORT BIT2
+
+/*;----SRBState machine definition */
+#define SRB_FREE 0
+#define SRB_WAIT BIT0
+#define SRB_READY BIT1
+#define SRB_MSGOUT BIT2 /*;arbitration+msg_out 1st byte*/
+#define SRB_MSGIN BIT3
+#define SRB_MSGIN_MULTI BIT4
+#define SRB_COMMAND BIT5
+#define SRB_START_ BIT6 /*;arbitration+msg_out+command_out*/
+#define SRB_DISCONNECT BIT7
+#define SRB_DATA_XFER BIT8
+#define SRB_XFERPAD BIT9
+#define SRB_STATUS BIT10
+#define SRB_COMPLETED BIT11
+#define SRB_ABORT_SENT BIT12
+#define DO_SYNC_NEGO BIT13
+#define SRB_UNEXPECT_RESEL BIT14
+
+/*;---ACBFlag */
+#define RESET_DEV BIT0
+#define RESET_DETECT BIT1
+#define RESET_DONE BIT2
+
+/*;---DCBFlag */
+#define ABORT_DEV_ BIT0
+
+/*;---SRBstatus */
+#define SRB_OK BIT0
+#define ABORTION BIT1
+#define OVER_RUN BIT2
+#define UNDER_RUN BIT3
+#define PARITY_ERROR BIT4
+#define SRB_ERROR BIT5
+
+/*;---SRBFlag */
+#define DATAOUT BIT7
+#define DATAIN BIT6
+#define RESIDUAL_VALID BIT5
+#define ENABLE_TIMER BIT4
+#define RESET_DEV0 BIT2
+#define ABORT_DEV BIT1
+#define AUTO_REQSENSE BIT0
+
+/*;---Adapter status */
+#define H_STATUS_GOOD 0
+#define H_SEL_TIMEOUT 0x11
+#define H_OVER_UNDER_RUN 0x12
+#define H_UNEXP_BUS_FREE 0x13
+#define H_TARGET_PHASE_F 0x14
+#define H_INVALID_CCB_OP 0x16
+#define H_LINK_CCB_BAD 0x17
+#define H_BAD_TARGET_DIR 0x18
+#define H_DUPLICATE_CCB 0x19
+#define H_BAD_CCB_OR_SG 0x1A
+#define H_ABORT 0x0FF
+
+/*; SCSI Status byte codes*/
+#define SCSI_STAT_GOOD 0x0 /*; Good status */
+#define SCSI_STAT_CHECKCOND 0x02 /*; SCSI Check Condition */
+#define SCSI_STAT_CONDMET 0x04 /*; Condition Met */
+#define SCSI_STAT_BUSY 0x08 /*; Target busy status */
+#define SCSI_STAT_INTER 0x10 /*; Intermediate status */
+#define SCSI_STAT_INTERCONDMET 0x14 /*; Intermediate condition met */
+#define SCSI_STAT_RESCONFLICT 0x18 /*; Reservation conflict */
+#define SCSI_STAT_CMDTERM 0x22 /*; Command Terminated */
+#define SCSI_STAT_QUEUEFULL 0x28 /*; Queue Full */
+
+#define SCSI_STAT_UNEXP_BUS_F 0xFD /*; Unexpect Bus Free */
+#define SCSI_STAT_BUS_RST_DETECT 0xFE /*; Scsi Bus Reset detected */
+#define SCSI_STAT_SEL_TIMEOUT 0xFF /*; Selection Time out */
+
+/*;---Sync_Mode */
+#define SYNC_DISABLE 0
+#define SYNC_ENABLE BIT0
+#define SYNC_NEGO_DONE BIT1
+#define WIDE_ENABLE BIT2
+#define WIDE_NEGO_DONE BIT3
+#define EN_TAG_QUEUING BIT4
+#define EN_ATN_STOP BIT5
+
+#define SYNC_NEGO_OFFSET 15
+
+/*;---SCSI bus phase*/
+#define SCSI_DATA_OUT 0
+#define SCSI_DATA_IN 1
+#define SCSI_COMMAND 2
+#define SCSI_STATUS_ 3
+#define SCSI_NOP0 4
+#define SCSI_NOP1 5
+#define SCSI_MSG_OUT 6
+#define SCSI_MSG_IN 7
+
+/*;----SCSI MSG BYTE*/
+#define MSG_COMPLETE 0x00
+#define MSG_EXTENDED 0x01
+#define MSG_SAVE_PTR 0x02
+#define MSG_RESTORE_PTR 0x03
+#define MSG_DISCONNECT 0x04
+#define MSG_INITIATOR_ERROR 0x05
+#define MSG_ABORT 0x06
+#define MSG_REJECT_ 0x07
+#define MSG_NOP 0x08
+#define MSG_PARITY_ERROR 0x09
+#define MSG_LINK_CMD_COMPL 0x0A
+#define MSG_LINK_CMD_COMPL_FLG 0x0B
+#define MSG_BUS_RESET 0x0C
+#define MSG_ABORT_TAG 0x0D
+#define MSG_SIMPLE_QTAG 0x20
+#define MSG_HEAD_QTAG 0x21
+#define MSG_ORDER_QTAG 0x22
+#define MSG_IDENTIFY 0x80
+#define MSG_HOST_ID 0x0C0
+
+/*;----SCSI STATUS BYTE*/
+#define STATUS_GOOD 0x00
+#define CHECK_CONDITION_ 0x02
+#define STATUS_BUSY 0x08
+#define STATUS_INTERMEDIATE 0x10
+#define RESERVE_CONFLICT 0x18
+
+/* cmd->result */
+#define STATUS_MASK_ 0xFF
+#define MSG_MASK 0xFF00
+#define RETURN_MASK 0xFF0000
+
+/*
+** Inquiry Data format
+*/
+
+typedef struct _SCSIInqData { /* INQ */
+
+ UCHAR DevType; /* Periph Qualifier & Periph Dev Type*/
+ UCHAR RMB_TypeMod; /* rem media bit & Dev Type Modifier */
+ UCHAR Vers; /* ISO, ECMA, & ANSI versions */
+ UCHAR RDF; /* AEN, TRMIOP, & response data format*/
+ UCHAR AddLen; /* length of additional data */
+ UCHAR Res1; /* reserved */
+ UCHAR Res2; /* reserved */
+ UCHAR Flags; /* RelADr,Wbus32,Wbus16,Sync,etc. */
+ UCHAR VendorID[8]; /* Vendor Identification */
+ UCHAR ProductID[16]; /* Product Identification */
+ UCHAR ProductRev[4]; /* Product Revision */
+
+
+} SCSI_INQDATA, *PSCSI_INQDATA;
+
+
+/* Inquiry byte 0 masks */
+
+
+#define SCSI_DEVTYPE 0x1F /* Peripheral Device Type */
+#define SCSI_PERIPHQUAL 0xE0 /* Peripheral Qualifier */
+
+
+/* Inquiry byte 1 mask */
+
+#define SCSI_REMOVABLE_MEDIA 0x80 /* Removable Media bit (1=removable) */
+
+
+/* Peripheral Device Type definitions */
+
+#define SCSI_DASD 0x00 /* Direct-access Device */
+#define SCSI_SEQACESS 0x01 /* Sequential-access device */
+#define SCSI_PRINTER 0x02 /* Printer device */
+#define SCSI_PROCESSOR 0x03 /* Processor device */
+#define SCSI_WRITEONCE 0x04 /* Write-once device */
+#define SCSI_CDROM 0x05 /* CD-ROM device */
+#define SCSI_SCANNER 0x06 /* Scanner device */
+#define SCSI_OPTICAL 0x07 /* Optical memory device */
+#define SCSI_MEDCHGR 0x08 /* Medium changer device */
+#define SCSI_COMM 0x09 /* Communications device */
+#define SCSI_NODEV 0x1F /* Unknown or no device type */
+
+/*
+** Inquiry flag definitions (Inq data byte 7)
+*/
+
+#define SCSI_INQ_RELADR 0x80 /* device supports relative addressing*/
+#define SCSI_INQ_WBUS32 0x40 /* device supports 32 bit data xfers */
+#define SCSI_INQ_WBUS16 0x20 /* device supports 16 bit data xfers */
+#define SCSI_INQ_SYNC 0x10 /* device supports synchronous xfer */
+#define SCSI_INQ_LINKED 0x08 /* device supports linked commands */
+#define SCSI_INQ_CMDQUEUE 0x02 /* device supports command queueing */
+#define SCSI_INQ_SFTRE 0x01 /* device supports soft resets */
+
+
+/*
+;==========================================================
+; EEPROM byte offset
+;==========================================================
+*/
+typedef struct _EEprom
+{
+UCHAR EE_MODE1;
+UCHAR EE_SPEED;
+UCHAR xx1;
+UCHAR xx2;
+} EEprom, *PEEprom;
+
+#define EE_ADAPT_SCSI_ID 64
+#define EE_MODE2 65
+#define EE_DELAY 66
+#define EE_TAG_CMD_NUM 67
+
+/*; EE_MODE1 bits definition*/
+#define PARITY_CHK_ BIT0
+#define SYNC_NEGO_ BIT1
+#define EN_DISCONNECT_ BIT2
+#define SEND_START_ BIT3
+#define TAG_QUEUING_ BIT4
+
+/*; EE_MODE2 bits definition*/
+#define MORE2_DRV BIT0
+#define GREATER_1G BIT1
+#define RST_SCSI_BUS BIT2
+#define ACTIVE_NEGATION BIT3
+#define NO_SEEK BIT4
+#define LUN_CHECK BIT5
+
+#define ENABLE_CE 1
+#define DISABLE_CE 0
+#define EEPROM_READ 0x80
+
+/*
+;==========================================================
+; AMD 53C974 Registers bit Definition
+;==========================================================
+*/
+/*
+;====================
+; SCSI Register
+;====================
+*/
+
+/*; Command Reg.(+0CH) */
+#define DMA_COMMAND BIT7
+#define NOP_CMD 0
+#define CLEAR_FIFO_CMD 1
+#define RST_DEVICE_CMD 2
+#define RST_SCSI_BUS_CMD 3
+#define INFO_XFER_CMD 0x10
+#define INITIATOR_CMD_CMPLTE 0x11
+#define MSG_ACCEPTED_CMD 0x12
+#define XFER_PAD_BYTE 0x18
+#define SET_ATN_CMD 0x1A
+#define RESET_ATN_CMD 0x1B
+#define SELECT_W_ATN 0x42
+#define SEL_W_ATN_STOP 0x43
+#define EN_SEL_RESEL 0x44
+#define SEL_W_ATN2 0x46
+#define DATA_XFER_CMD INFO_XFER_CMD
+
+
+/*; SCSI Status Reg.(+10H) */
+#define INTERRUPT BIT7
+#define ILLEGAL_OP_ERR BIT6
+#define PARITY_ERR BIT5
+#define COUNT_2_ZERO BIT4
+#define GROUP_CODE_VALID BIT3
+#define SCSI_PHASE_MASK (BIT2+BIT1+BIT0)
+
+/*; Interrupt Status Reg.(+14H) */
+#define SCSI_RESET BIT7
+#define INVALID_CMD BIT6
+#define DISCONNECTED BIT5
+#define SERVICE_REQUEST BIT4
+#define SUCCESSFUL_OP BIT3
+#define RESELECTED BIT2
+#define SEL_ATTENTION BIT1
+#define SELECTED BIT0
+
+/*; Internal State Reg.(+18H) */
+#define SYNC_OFFSET_FLAG BIT3
+#define INTRN_STATE_MASK (BIT2+BIT1+BIT0)
+
+/*; Clock Factor Reg.(+24H) */
+#define CLK_FREQ_40MHZ 0
+#define CLK_FREQ_35MHZ (BIT2+BIT1+BIT0)
+#define CLK_FREQ_30MHZ (BIT2+BIT1)
+#define CLK_FREQ_25MHZ (BIT2+BIT0)
+#define CLK_FREQ_20MHZ BIT2
+#define CLK_FREQ_15MHZ (BIT1+BIT0)
+#define CLK_FREQ_10MHZ BIT1
+
+/*; Control Reg. 1(+20H) */
+#define EXTENDED_TIMING BIT7
+#define DIS_INT_ON_SCSI_RST BIT6
+#define PARITY_ERR_REPO BIT4
+#define SCSI_ID_ON_BUS (BIT2+BIT1+BIT0)
+
+/*; Control Reg. 2(+2CH) */
+#define EN_FEATURE BIT6
+#define EN_SCSI2_CMD BIT3
+
+/*; Control Reg. 3(+30H) */
+#define ID_MSG_CHECK BIT7
+#define EN_QTAG_MSG BIT6
+#define EN_GRP2_CMD BIT5
+#define FAST_SCSI BIT4 /* ;10MB/SEC */
+#define FAST_CLK BIT3 /* ;25 - 40 MHZ */
+
+/*; Control Reg. 4(+34H) */
+#define EATER_12NS 0
+#define EATER_25NS BIT7
+#define EATER_35NS BIT6
+#define EATER_0NS (BIT7+BIT6)
+#define NEGATE_REQACKDATA BIT2
+#define NEGATE_REQACK BIT3
+/*
+;====================
+; DMA Register
+;====================
+*/
+/*; DMA Command Reg.(+40H) */
+#define READ_DIRECTION BIT7
+#define WRITE_DIRECTION 0
+#define EN_DMA_INT BIT6
+#define MAP_TO_MDL BIT5
+#define DIAGNOSTIC BIT4
+#define DMA_IDLE_CMD 0
+#define DMA_BLAST_CMD BIT0
+#define DMA_ABORT_CMD BIT1
+#define DMA_START_CMD (BIT1+BIT0)
+
+/*; DMA Status Reg.(+54H) */
+#define PCI_MS_ABORT BIT6
+#define BLAST_COMPLETE BIT5
+#define SCSI_INTERRUPT BIT4
+#define DMA_XFER_DONE BIT3
+#define DMA_XFER_ABORT BIT2
+#define DMA_XFER_ERROR BIT1
+#define POWER_DOWN BIT0
+
+/*
+; DMA SCSI Bus and Ctrl.(+70H)
+;EN_INT_ON_PCI_ABORT
+*/
+
+/*
+;==========================================================
+; SCSI Chip register address offset
+;==========================================================
+*/
+#define CtcReg_Low 0x00
+#define CtcReg_Mid 0x04
+#define ScsiFifo 0x08
+#define ScsiCmd 0x0C
+#define Scsi_Status 0x10
+#define INT_Status 0x14
+#define Sync_Period 0x18
+#define Sync_Offset 0x1C
+#define CtrlReg1 0x20
+#define Clk_Factor 0x24
+#define CtrlReg2 0x2C
+#define CtrlReg3 0x30
+#define CtrlReg4 0x34
+#define CtcReg_High 0x38
+#define DMA_Cmd 0x40
+#define DMA_XferCnt 0x44
+#define DMA_XferAddr 0x48
+#define DMA_Wk_ByteCntr 0x4C
+#define DMA_Wk_AddrCntr 0x50
+#define DMA_Status 0x54
+#define DMA_MDL_Addr 0x58
+#define DMA_Wk_MDL_Cntr 0x5C
+#define DMA_ScsiBusCtrl 0x70
+
+#define StcReg_Low CtcReg_Low
+#define StcReg_Mid CtcReg_Mid
+#define Scsi_Dest_ID Scsi_Status
+#define Scsi_TimeOut INT_Status
+#define Intern_State Sync_Period
+#define Current_Fifo Sync_Offset
+#define StcReg_High CtcReg_High
+
+#define am_target Scsi_Status
+#define am_timeout INT_Status
+#define am_seq_step Sync_Period
+#define am_fifo_count Sync_Offset
+
+
+#define DC390_read8(address) \
+ inb(DC390_ioport + (address)))
+
+#define DC390_read16(address) \
+ inw(DC390_ioport + (address)))
+
+#define DC390_read32(address) \
+ inl(DC390_ioport + (address)))
+
+#define DC390_write8(address,value) \
+ outb((value), DC390_ioport + (address)))
+
+#define DC390_write16(address,value) \
+ outw((value), DC390_ioport + (address)))
+
+#define DC390_write32(address,value) \
+ outl((value), DC390_ioport + (address)))
+
+
+/* Configuration method #1 */
+#define PCI_CFG1_ADDRESS_REG 0xcf8
+#define PCI_CFG1_DATA_REG 0xcfc
+#define PCI_CFG1_ENABLE 0x80000000
+#define PCI_CFG1_TUPPLE(bus, device, function, register) \
+ (PCI_CFG1_ENABLE | (((bus) << 16) & 0xff0000) | \
+ (((device) << 11) & 0xf800) | (((function) << 8) & 0x700)| \
+ (((register) << 2) & 0xfc))
+
+/* Configuration method #2 */
+#define PCI_CFG2_ENABLE_REG 0xcf8
+#define PCI_CFG2_FORWARD_REG 0xcfa
+#define PCI_CFG2_ENABLE 0x0f0
+#define PCI_CFG2_TUPPLE(function) \
+ (PCI_CFG2_ENABLE | (((function) << 1) & 0xe))
+
+
+#endif /* TMSCSIM_H */
diff --git a/linux/src/drivers/scsi/u14-34f.c b/linux/src/drivers/scsi/u14-34f.c
new file mode 100644
index 0000000..ece5330
--- /dev/null
+++ b/linux/src/drivers/scsi/u14-34f.c
@@ -0,0 +1,1996 @@
+/*
+ * u14-34f.c - Low-level driver for UltraStor 14F/34F SCSI host adapters.
+ *
+ * 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111
+ * Added command line option (et:[y|n]) to use the existing
+ * translation (returned by scsicam_bios_param) as disk geometry.
+ * The default is et:n, which uses the disk geometry jumpered
+ * on the board.
+ * The default value et:n is compatible with all previous revisions
+ * of this driver.
+ *
+ * 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104
+ * Increased busy timeout from 10 msec. to 200 msec. while
+ * processing interrupts.
+ *
+ * 18 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102
+ * Improved abort handling during the eh recovery process.
+ *
+ * 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101
+ * The driver is now fully SMP safe, including the
+ * abort and reset routines.
+ * Added command line options (eh:[y|n]) to choose between
+ * new_eh_code and the old scsi code.
+ * If linux version >= 2.1.101 the default is eh:y, while the eh
+ * option is ignored for previous releases and the old scsi code
+ * is used.
+ *
+ * 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97
+ * Reworked interrupt handler.
+ *
+ * 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95
+ * Major reliability improvement: when a batch with overlapping
+ * requests is detected, requests are queued one at a time
+ * eliminating any possible board or drive reordering.
+ *
+ * 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95
+ * Improved SMP support (if linux version >= 2.1.95).
+ *
+ * 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94
+ * Performance improvement: when sequential i/o is detected,
+ * always use direct sort instead of reverse sort.
+ *
+ * 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92
+ * io_port is now unsigned long.
+ *
+ * 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88
+ * Use new scsi error handling code (if linux version >= 2.1.88).
+ * Use new interrupt code.
+ *
+ * 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55
+ * Use of udelay inside the wait loops to avoid timeout
+ * problems with fast cpus.
+ * Removed check about useless calls to the interrupt service
+ * routine (reported on SMP systems only).
+ * At initialization time "sorted/unsorted" is displayed instead
+ * of "linked/unlinked" to reinforce the fact that "linking" is
+ * nothing but "elevator sorting" in the actual implementation.
+ *
+ * 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38
+ * Use of serial_number_at_timeout in abort and reset processing.
+ * Use of the __initfunc and __initdata macro in setup code.
+ * Minor cleanups in the list_statistics code.
+ *
+ * 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26
+ * When loading as a module, parameter passing is now supported
+ * both in 2.0 and in 2.1 style.
+ * Fixed data transfer direction for some SCSI opcodes.
+ * Immediate acknowledge to request sense commands.
+ * Linked commands to each disk device are now reordered by elevator
+ * sorting. Rare cases in which reordering of write requests could
+ * cause wrong results are managed.
+ *
+ * 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28
+ * Added command line options to enable/disable linked commands
+ * (lc:[y|n]), old firmware support (of:[y|n]) and to set the max
+ * queue depth (mq:xx). Default is "u14-34f=lc:n,of:n,mq:8".
+ * Improved command linking.
+ *
+ * 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27
+ * Added linked command support.
+ *
+ * 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27
+ * Added queue depth adjustment.
+ *
+ * 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26
+ * The list of i/o ports to be probed can be overwritten by the
+ * "u14-34f=port0,port1,...." boot command line option.
+ * Scatter/gather lists are now allocated by a number of kmalloc
+ * calls, in order to avoid the previous size limit of 64Kb.
+ *
+ * 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25
+ * Added multichannel support.
+ *
+ * 27 Sep 1996 rev. 2.12 for linux 2.1.0
+ * Portability cleanups (virtual/bus addressing, little/big endian
+ * support).
+ *
+ * 09 Jul 1996 rev. 2.11 for linux 2.0.4
+ * "Data over/under-run" no longer implies a redo on all targets.
+ * Number of internal retries is now limited.
+ *
+ * 16 Apr 1996 rev. 2.10 for linux 1.3.90
+ * New argument "reset_flags" to the reset routine.
+ *
+ * 21 Jul 1995 rev. 2.02 for linux 1.3.11
+ * Fixed Data Transfer Direction for some SCSI commands.
+ *
+ * 13 Jun 1995 rev. 2.01 for linux 1.2.10
+ * HAVE_OLD_UX4F_FIRMWARE should be defined for U34F boards when
+ * the firmware prom is not the latest one (28008-006).
+ *
+ * 11 Mar 1995 rev. 2.00 for linux 1.2.0
+ * Fixed a bug which prevented media change detection for removable
+ * disk drives.
+ *
+ * 23 Feb 1995 rev. 1.18 for linux 1.1.94
+ * Added a check for scsi_register returning NULL.
+ *
+ * 11 Feb 1995 rev. 1.17 for linux 1.1.91
+ * U14F qualified to run with 32 sglists.
+ * Now DEBUG_RESET is disabled by default.
+ *
+ * 9 Feb 1995 rev. 1.16 for linux 1.1.90
+ * Use host->wish_block instead of host->block.
+ *
+ * 8 Feb 1995 rev. 1.15 for linux 1.1.89
+ * Cleared target_time_out counter while performing a reset.
+ *
+ * 28 Jan 1995 rev. 1.14 for linux 1.1.86
+ * Added module support.
+ * Log and do a retry when a disk drive returns a target status
+ * different from zero on a recovered error.
+ * Auto detects if U14F boards have an old firmware revision.
+ * Max number of scatter/gather lists set to 16 for all boards
+ * (most installation run fine using 33 sglists, while other
+ * has problems when using more then 16).
+ *
+ * 16 Jan 1995 rev. 1.13 for linux 1.1.81
+ * Display a message if check_region detects a port address
+ * already in use.
+ *
+ * 15 Dec 1994 rev. 1.12 for linux 1.1.74
+ * The host->block flag is set for all the detected ISA boards.
+ *
+ * 30 Nov 1994 rev. 1.11 for linux 1.1.68
+ * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
+ * Added optional support for using a single board at a time.
+ *
+ * 14 Nov 1994 rev. 1.10 for linux 1.1.63
+ *
+ * 28 Oct 1994 rev. 1.09 for linux 1.1.58 Final BETA release.
+ * 16 Jul 1994 rev. 1.00 for linux 1.1.29 Initial ALPHA release.
+ *
+ * This driver is a total replacement of the original UltraStor
+ * scsi driver, but it supports ONLY the 14F and 34F boards.
+ * It can be configured in the same kernel in which the original
+ * ultrastor driver is configured to allow the original U24F
+ * support.
+ *
+ * Multiple U14F and/or U34F host adapters are supported.
+ *
+ * Copyright (C) 1994-1998 Dario Ballabio (dario@milano.europe.dg.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that redistributions of source
+ * code retain the above copyright notice and this comment without
+ * modification.
+ *
+ * WARNING: if your 14/34F board has an old firmware revision (see below)
+ * you must change "#undef" into "#define" in the following
+ * statement.
+ */
+#undef HAVE_OLD_UX4F_FIRMWARE
+/*
+ * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
+ * performance SCSI-2 host adapters.
+ * Here is the scoop on the various models:
+ *
+ * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
+ * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
+ * 34F - VESA Local-Bus Bus Master HA (no WD1003 emulation).
+ *
+ * This code has been tested with up to two U14F boards, using both
+ * firmware 28004-005/38004-004 (BIOS rev. 2.00) and the latest firmware
+ * 28004-006/38004-005 (BIOS rev. 2.01).
+ *
+ * The latest firmware is required in order to get reliable operations when
+ * clustering is enabled. ENABLE_CLUSTERING provides a performance increase
+ * up to 50% on sequential access.
+ *
+ * Since the Scsi_Host_Template structure is shared among all 14F and 34F,
+ * the last setting of use_clustering is in effect for all of these boards.
+ *
+ * Here a sample configuration using two U14F boards:
+ *
+ U14F0: ISA 0x330, BIOS 0xc8000, IRQ 11, DMA 5, SG 32, MB 16, of:n, lc:y, mq:8.
+ U14F1: ISA 0x340, BIOS 0x00000, IRQ 10, DMA 6, SG 32, MB 16, of:n, lc:y, mq:8.
+ *
+ * The boot controller must have its BIOS enabled, while other boards can
+ * have their BIOS disabled, or enabled to an higher address.
+ * Boards are named Ux4F0, Ux4F1..., according to the port address order in
+ * the io_port[] array.
+ *
+ * The following facts are based on real testing results (not on
+ * documentation) on the above U14F board.
+ *
+ * - The U14F board should be jumpered for bus on time less or equal to 7
+ * microseconds, while the default is 11 microseconds. This is order to
+ * get acceptable performance while using floppy drive and hard disk
+ * together. The jumpering for 7 microseconds is: JP13 pin 15-16,
+ * JP14 pin 7-8 and pin 9-10.
+ * The reduction has a little impact on scsi performance.
+ *
+ * - If scsi bus length exceeds 3m., the scsi bus speed needs to be reduced
+ * from 10Mhz to 5Mhz (do this by inserting a jumper on JP13 pin 7-8).
+ *
+ * - If U14F on board firmware is older than 28004-006/38004-005,
+ * the U14F board is unable to provide reliable operations if the scsi
+ * request length exceeds 16Kbyte. When this length is exceeded the
+ * behavior is:
+ * - adapter_status equal 0x96 or 0xa3 or 0x93 or 0x94;
+ * - adapter_status equal 0 and target_status equal 2 on for all targets
+ * in the next operation following the reset.
+ * This sequence takes a long time (>3 seconds), so in the meantime
+ * the SD_TIMEOUT in sd.c could expire giving rise to scsi aborts
+ * (SD_TIMEOUT has been increased from 3 to 6 seconds in 1.1.31).
+ * Because of this I had to DISABLE_CLUSTERING and to work around the
+ * bus reset in the interrupt service routine, returning DID_BUS_BUSY
+ * so that the operations are retried without complains from the scsi.c
+ * code.
+ * Any reset of the scsi bus is going to kill tape operations, since
+ * no retry is allowed for tapes. Bus resets are more likely when the
+ * scsi bus is under heavy load.
+ * Requests using scatter/gather have a maximum length of 16 x 1024 bytes
+ * when DISABLE_CLUSTERING is in effect, but unscattered requests could be
+ * larger than 16Kbyte.
+ *
+ * The new firmware has fixed all the above problems.
+ *
+ * For U34F boards the latest bios prom is 38008-002 (BIOS rev. 2.01),
+ * the latest firmware prom is 28008-006. Older firmware 28008-005 has
+ * problems when using more then 16 scatter/gather lists.
+ *
+ * The list of i/o ports to be probed can be totally replaced by the
+ * boot command line option: "u14-34f=port0,port1,port2,...", where the
+ * port0, port1... arguments are ISA/VESA addresses to be probed.
+ * For example using "u14-34f=0x230,0x340", the driver probes only the two
+ * addresses 0x230 and 0x340 in this order; "u14-34f=0" totally disables
+ * this driver.
+ *
+ * After the optional list of detection probes, other possible command line
+ * options are:
+ *
+ * eh:y use new scsi code (linux 2.2 only);
+ * eh:n use old scsi code;
+ * et:y use disk geometry returned by scsicam_bios_param;
+ * et:n use disk geometry jumpered on the board;
+ * lc:y enables linked commands;
+ * lc:n disables linked commands;
+ * of:y enables old firmware support;
+ * of:n disables old firmware support;
+ * mq:xx set the max queue depth to the value xx (2 <= xx <= 8).
+ *
+ * The default value is: "u14-34f=lc:n,of:n,mq:8,et:n".
+ * An example using the list of detection probes could be:
+ * "u14-34f=0x230,0x340,lc:y,of:n,mq:4,eh:n,et:n".
+ *
+ * When loading as a module, parameters can be specified as well.
+ * The above example would be (use 1 in place of y and 0 in place of n):
+ *
+ * modprobe u14-34f io_port=0x230,0x340 linked_comm=1 have_old_firmware=0 \
+ * max_queue_depth=4 use_new_eh_code=0 ext_tran=0
+ *
+ * ----------------------------------------------------------------------------
+ * In this implementation, linked commands are designed to work with any DISK
+ * or CD-ROM, since this linking has only the intent of clustering (time-wise)
+ * and reordering by elevator sorting commands directed to each device,
+ * without any relation with the actual SCSI protocol between the controller
+ * and the device.
+ * If Q is the queue depth reported at boot time for each device (also named
+ * cmds/lun) and Q > 2, whenever there is already an active command to the
+ * device all other commands to the same device (up to Q-1) are kept waiting
+ * in the elevator sorting queue. When the active command completes, the
+ * commands in this queue are sorted by sector address. The sort is chosen
+ * between increasing or decreasing by minimizing the seek distance between
+ * the sector of the commands just completed and the sector of the first
+ * command in the list to be sorted.
+ * Trivial math assures that the unsorted average seek distance when doing
+ * random seeks over S sectors is S/3.
+ * When (Q-1) requests are uniformly distributed over S sectors, the average
+ * distance between two adjacent requests is S/((Q-1) + 1), so the sorted
+ * average seek distance for (Q-1) random requests over S sectors is S/Q.
+ * The elevator sorting hence divides the seek distance by a factor Q/3.
+ * The above pure geometric remarks are valid in all cases and the
+ * driver effectively reduces the seek distance by the predicted factor
+ * when there are Q concurrent read i/o operations on the device, but this
+ * does not necessarily results in a noticeable performance improvement:
+ * your mileage may vary....
+ *
+ * Note: command reordering inside a batch of queued commands could cause
+ * wrong results only if there is at least one write request and the
+ * intersection (sector-wise) of all requests is not empty.
+ * When the driver detects a batch including overlapping requests
+ * (a really rare event) strict serial (pid) order is enforced.
+ * ----------------------------------------------------------------------------
+ *
+ * The boards are named Ux4F0, Ux4F1,... according to the detection order.
+ *
+ * In order to support multiple ISA boards in a reliable way,
+ * the driver sets host->wish_block = TRUE for all ISA boards.
+ */
+
+#include <linux/version.h>
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+#define MAX_INT_PARAM 10
+
+#if defined(MODULE)
+#include <linux/module.h>
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,26)
+MODULE_PARM(io_port, "1-" __MODULE_STRING(MAX_INT_PARAM) "i");
+MODULE_PARM(linked_comm, "i");
+MODULE_PARM(have_old_firmware, "i");
+MODULE_PARM(link_statistics, "i");
+MODULE_PARM(max_queue_depth, "i");
+MODULE_PARM(use_new_eh_code, "i");
+MODULE_PARM(ext_tran, "i");
+MODULE_AUTHOR("Dario Ballabio");
+#endif
+
+#endif
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include "u14-34f.h"
+#include <linux/stat.h>
+#include <linux/config.h>
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,36)
+#include <linux/init.h>
+#else
+#define __initfunc(A) A
+#define __initdata
+#define __init
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+#include <asm/spinlock.h>
+#define IRQ_FLAGS
+#define IRQ_LOCK
+#define IRQ_LOCK_SAVE
+#define IRQ_UNLOCK
+#define IRQ_UNLOCK_RESTORE
+#define SPIN_FLAGS unsigned long spin_flags;
+#define SPIN_LOCK spin_lock_irq(&io_request_lock);
+#define SPIN_LOCK_SAVE spin_lock_irqsave(&io_request_lock, spin_flags);
+#define SPIN_UNLOCK spin_unlock_irq(&io_request_lock);
+#define SPIN_UNLOCK_RESTORE \
+ spin_unlock_irqrestore(&io_request_lock, spin_flags);
+static int use_new_eh_code = TRUE;
+#else
+#define IRQ_FLAGS unsigned long irq_flags;
+#define IRQ_LOCK cli();
+#define IRQ_LOCK_SAVE do {save_flags(irq_flags); cli();} while (0);
+#define IRQ_UNLOCK sti();
+#define IRQ_UNLOCK_RESTORE do {restore_flags(irq_flags);} while (0);
+#define SPIN_FLAGS
+#define SPIN_LOCK
+#define SPIN_LOCK_SAVE
+#define SPIN_UNLOCK
+#define SPIN_UNLOCK_RESTORE
+static int use_new_eh_code = FALSE;
+#endif
+
+struct proc_dir_entry proc_scsi_u14_34f = {
+ PROC_SCSI_U14_34F, 6, "u14_34f",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* Values for the PRODUCT_ID ports for the 14/34F */
+#define PRODUCT_ID1 0x56
+#define PRODUCT_ID2 0x40 /* NOTE: Only upper nibble is used */
+
+/* Subversion values */
+#define ISA 0
+#define ESA 1
+
+#define OP_HOST_ADAPTER 0x1
+#define OP_SCSI 0x2
+#define OP_RESET 0x4
+#define DTD_SCSI 0x0
+#define DTD_IN 0x1
+#define DTD_OUT 0x2
+#define DTD_NONE 0x3
+#define HA_CMD_INQUIRY 0x1
+#define HA_CMD_SELF_DIAG 0x2
+#define HA_CMD_READ_BUFF 0x3
+#define HA_CMD_WRITE_BUFF 0x4
+
+#undef DEBUG_LINKED_COMMANDS
+#undef DEBUG_DETECT
+#undef DEBUG_INTERRUPT
+#undef DEBUG_RESET
+#undef DEBUG_GENERATE_ERRORS
+#undef DEBUG_GENERATE_ABORTS
+#undef DEBUG_GEOMETRY
+
+#define MAX_ISA 3
+#define MAX_VESA 1
+#define MAX_EISA 0
+#define MAX_PCI 0
+#define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI)
+#define MAX_CHANNEL 1
+#define MAX_LUN 8
+#define MAX_TARGET 8
+#define MAX_MAILBOXES 16
+#define MAX_SGLIST 32
+#define MAX_SAFE_SGLIST 16
+#define MAX_INTERNAL_RETRIES 64
+#define MAX_CMD_PER_LUN 2
+#define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN)
+
+#define SKIP ULONG_MAX
+#define FALSE 0
+#define TRUE 1
+#define FREE 0
+#define IN_USE 1
+#define LOCKED 2
+#define IN_RESET 3
+#define IGNORE 4
+#define READY 5
+#define ABORTING 6
+#define NO_DMA 0xff
+#define MAXLOOP 10000
+
+#define REG_LCL_MASK 0
+#define REG_LCL_INTR 1
+#define REG_SYS_MASK 2
+#define REG_SYS_INTR 3
+#define REG_PRODUCT_ID1 4
+#define REG_PRODUCT_ID2 5
+#define REG_CONFIG1 6
+#define REG_CONFIG2 7
+#define REG_OGM 8
+#define REG_ICM 12
+#define REGION_SIZE 13
+#define BSY_ASSERTED 0x01
+#define IRQ_ASSERTED 0x01
+#define CMD_RESET 0xc0
+#define CMD_OGM_INTR 0x01
+#define CMD_CLR_INTR 0x01
+#define CMD_ENA_INTR 0x81
+#define ASOK 0x00
+#define ASST 0x91
+
+#define ARRAY_SIZE(arr) (sizeof (arr) / sizeof (arr)[0])
+#define YESNO(a) ((a) ? 'y' : 'n')
+#define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM)
+
+#define PACKED __attribute__((packed))
+
+struct sg_list {
+ unsigned int address; /* Segment Address */
+ unsigned int num_bytes; /* Segment Length */
+ };
+
+/* MailBox SCSI Command Packet */
+struct mscp {
+ unsigned char opcode: 3; /* type of command */
+ unsigned char xdir: 2; /* data transfer direction */
+ unsigned char dcn: 1; /* disable disconnect */
+ unsigned char ca: 1; /* use cache (if available) */
+ unsigned char sg: 1; /* scatter/gather operation */
+ unsigned char target: 3; /* SCSI target id */
+ unsigned char channel: 2; /* SCSI channel number */
+ unsigned char lun: 3; /* SCSI logical unit number */
+ unsigned int data_address PACKED; /* transfer data pointer */
+ unsigned int data_len PACKED; /* length in bytes */
+ unsigned int link_address PACKED; /* for linking command chains */
+ unsigned char clink_id; /* identifies command in chain */
+ unsigned char use_sg; /* (if sg is set) 8 bytes per list */
+ unsigned char sense_len;
+ unsigned char scsi_cdbs_len; /* 6, 10, or 12 */
+ unsigned char scsi_cdbs[12]; /* SCSI commands */
+ unsigned char adapter_status; /* non-zero indicates HA error */
+ unsigned char target_status; /* non-zero indicates target error */
+ unsigned int sense_addr PACKED;
+ Scsi_Cmnd *SCpnt;
+ unsigned int index; /* cp index */
+ struct sg_list *sglist;
+ };
+
+struct hostdata {
+ struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
+ unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
+ unsigned int last_cp_used; /* Index of last mailbox used */
+ unsigned int iocount; /* Total i/o done for this board */
+ int board_number; /* Number of this board */
+ char board_name[16]; /* Name of this board */
+ char board_id[256]; /* data from INQUIRY on this board */
+ int in_reset; /* True if board is doing a reset */
+ int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */
+ int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If TRUE redo i/o on target */
+ unsigned int retries; /* Number of internal retries */
+ unsigned long last_retried_pid; /* Pid of last retried command */
+ unsigned char subversion; /* Bus type, either ISA or ESA */
+ unsigned char heads;
+ unsigned char sectors;
+
+ /* slot != 0 for the U24F, slot == 0 for both the U14F and U34F */
+ unsigned char slot;
+ };
+
+static struct Scsi_Host *sh[MAX_BOARDS + 1];
+static const char *driver_name = "Ux4F";
+static char sha[MAX_BOARDS];
+
+/* Initialize num_boards so that ihdlr can work while detect is in progress */
+static unsigned int num_boards = MAX_BOARDS;
+
+static unsigned long io_port[] __initdata = {
+
+ /* Space for MAX_INT_PARAM ports usable while loading as a module */
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+ SKIP, SKIP,
+
+ /* Possible ISA/VESA ports */
+ 0x330, 0x340, 0x230, 0x240, 0x210, 0x130, 0x140,
+
+ /* End of list */
+ 0x0
+ };
+
+#define HD(board) ((struct hostdata *) &sh[board]->hostdata)
+#define BN(board) (HD(board)->board_name)
+
+#define SWAP_BYTE(x) ((unsigned long)( \
+ (((unsigned long)(x) & 0x000000ffU) << 24) | \
+ (((unsigned long)(x) & 0x0000ff00U) << 8) | \
+ (((unsigned long)(x) & 0x00ff0000U) >> 8) | \
+ (((unsigned long)(x) & 0xff000000U) >> 24)))
+
+#if defined(__BIG_ENDIAN)
+#define H2DEV(x) SWAP_BYTE(x)
+#else
+#define H2DEV(x) (x)
+#endif
+
+#define DEV2H(x) H2DEV(x)
+#define V2DEV(addr) ((addr) ? H2DEV(virt_to_bus((void *)addr)) : 0)
+#define DEV2V(addr) ((addr) ? DEV2H(bus_to_virt((unsigned long)addr)) : 0)
+
+static void do_interrupt_handler(int, void *, struct pt_regs *);
+static void flush_dev(Scsi_Device *, unsigned long, unsigned int, unsigned int);
+static int do_trace = FALSE;
+static int setup_done = FALSE;
+static int link_statistics = 0;
+static int ext_tran = FALSE;
+
+#if defined(HAVE_OLD_UX4F_FIRMWARE)
+static int have_old_firmware = TRUE;
+#else
+static int have_old_firmware = FALSE;
+#endif
+
+#if defined(CONFIG_SCSI_U14_34F_LINKED_COMMANDS)
+static int linked_comm = TRUE;
+#else
+static int linked_comm = FALSE;
+#endif
+
+#if defined(CONFIG_SCSI_U14_34F_MAX_TAGS)
+static int max_queue_depth = CONFIG_SCSI_U14_34F_MAX_TAGS;
+#else
+static int max_queue_depth = MAX_CMD_PER_LUN;
+#endif
+
+static void select_queue_depths(struct Scsi_Host *host, Scsi_Device *devlist) {
+ Scsi_Device *dev;
+ int j, ntag = 0, nuntag = 0, tqd, utqd;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ j = ((struct hostdata *) host->hostdata)->board_number;
+
+ for(dev = devlist; dev; dev = dev->next) {
+
+ if (dev->host != host) continue;
+
+ if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm))
+ ntag++;
+ else
+ nuntag++;
+ }
+
+ utqd = MAX_CMD_PER_LUN;
+
+ tqd = (host->can_queue - utqd * nuntag) / (ntag ? ntag : 1);
+
+ if (tqd > max_queue_depth) tqd = max_queue_depth;
+
+ if (tqd < MAX_CMD_PER_LUN) tqd = MAX_CMD_PER_LUN;
+
+ for(dev = devlist; dev; dev = dev->next) {
+ char *tag_suffix = "", *link_suffix = "";
+
+ if (dev->host != host) continue;
+
+ if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm))
+ dev->queue_depth = tqd;
+ else
+ dev->queue_depth = utqd;
+
+ if (TLDEV(dev->type)) {
+ if (linked_comm && dev->queue_depth > 2)
+ link_suffix = ", sorted";
+ else
+ link_suffix = ", unsorted";
+ }
+
+ if (dev->tagged_supported && TLDEV(dev->type) && dev->tagged_queue)
+ tag_suffix = ", tagged";
+ else if (dev->tagged_supported && TLDEV(dev->type))
+ tag_suffix = ", untagged";
+
+ printk("%s: scsi%d, channel %d, id %d, lun %d, cmds/lun %d%s%s.\n",
+ BN(j), host->host_no, dev->channel, dev->id, dev->lun,
+ dev->queue_depth, link_suffix, tag_suffix);
+ }
+
+ IRQ_UNLOCK_RESTORE
+ return;
+}
+
+static inline int wait_on_busy(unsigned long iobase, unsigned int loop) {
+
+ while (inb(iobase + REG_LCL_INTR) & BSY_ASSERTED) {
+ udelay(1L);
+ if (--loop == 0) return TRUE;
+ }
+
+ return FALSE;
+}
+
+static int board_inquiry(unsigned int j) {
+ struct mscp *cpp;
+ unsigned int time, limit = 0;
+
+ cpp = &HD(j)->cp[0];
+ memset(cpp, 0, sizeof(struct mscp));
+ cpp->opcode = OP_HOST_ADAPTER;
+ cpp->xdir = DTD_IN;
+ cpp->data_address = V2DEV(HD(j)->board_id);
+ cpp->data_len = H2DEV(sizeof(HD(j)->board_id));
+ cpp->scsi_cdbs_len = 6;
+ cpp->scsi_cdbs[0] = HA_CMD_INQUIRY;
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: board_inquiry, adapter busy.\n", BN(j));
+ return TRUE;
+ }
+
+ HD(j)->cp_stat[0] = IGNORE;
+
+ /* Clear the interrupt indication */
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+
+ /* Store pointer in OGM address bytes */
+ outl(V2DEV(cpp), sh[j]->io_port + REG_OGM);
+
+ /* Issue OGM interrupt */
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+
+ SPIN_UNLOCK
+ IRQ_UNLOCK
+ time = jiffies;
+ while ((jiffies - time) < HZ && limit++ < 20000) udelay(100L);
+ IRQ_LOCK
+ SPIN_LOCK
+
+ if (cpp->adapter_status || HD(j)->cp_stat[0] != FREE) {
+ HD(j)->cp_stat[0] = FREE;
+ printk("%s: board_inquiry, err 0x%x.\n", BN(j), cpp->adapter_status);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+__initfunc (static inline int port_detect \
+ (unsigned long port_base, unsigned int j, Scsi_Host_Template *tpnt)) {
+ unsigned char irq, dma_channel, subversion, i;
+ unsigned char in_byte;
+ char *bus_type, dma_name[16];
+
+ /* Allowed BIOS base addresses (NULL indicates reserved) */
+ void *bios_segment_table[8] = {
+ NULL,
+ (void *) 0xc4000, (void *) 0xc8000, (void *) 0xcc000, (void *) 0xd0000,
+ (void *) 0xd4000, (void *) 0xd8000, (void *) 0xdc000
+ };
+
+ /* Allowed IRQs */
+ unsigned char interrupt_table[4] = { 15, 14, 11, 10 };
+
+ /* Allowed DMA channels for ISA (0 indicates reserved) */
+ unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
+
+ /* Head/sector mappings */
+ struct {
+ unsigned char heads;
+ unsigned char sectors;
+ } mapping_table[4] = {
+ { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 }
+ };
+
+ struct config_1 {
+ unsigned char bios_segment: 3;
+ unsigned char removable_disks_as_fixed: 1;
+ unsigned char interrupt: 2;
+ unsigned char dma_channel: 2;
+ } config_1;
+
+ struct config_2 {
+ unsigned char ha_scsi_id: 3;
+ unsigned char mapping_mode: 2;
+ unsigned char bios_drive_number: 1;
+ unsigned char tfr_port: 2;
+ } config_2;
+
+ char name[16];
+
+ sprintf(name, "%s%d", driver_name, j);
+
+ if(check_region(port_base, REGION_SIZE)) {
+ printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base);
+ return FALSE;
+ }
+
+ if (inb(port_base + REG_PRODUCT_ID1) != PRODUCT_ID1) return FALSE;
+
+ in_byte = inb(port_base + REG_PRODUCT_ID2);
+
+ if ((in_byte & 0xf0) != PRODUCT_ID2) return FALSE;
+
+ *(char *)&config_1 = inb(port_base + REG_CONFIG1);
+ *(char *)&config_2 = inb(port_base + REG_CONFIG2);
+
+ irq = interrupt_table[config_1.interrupt];
+ dma_channel = dma_channel_table[config_1.dma_channel];
+ subversion = (in_byte & 0x0f);
+
+ /* Board detected, allocate its IRQ */
+ if (request_irq(irq, do_interrupt_handler,
+ SA_INTERRUPT | ((subversion == ESA) ? SA_SHIRQ : 0),
+ driver_name, (void *) &sha[j])) {
+ printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
+ return FALSE;
+ }
+
+ if (subversion == ISA && request_dma(dma_channel, driver_name)) {
+ printk("%s: unable to allocate DMA channel %u, detaching.\n",
+ name, dma_channel);
+ free_irq(irq, &sha[j]);
+ return FALSE;
+ }
+
+ if (have_old_firmware) tpnt->use_clustering = DISABLE_CLUSTERING;
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+ tpnt->use_new_eh_code = use_new_eh_code;
+#else
+ use_new_eh_code = FALSE;
+#endif
+
+ sh[j] = scsi_register(tpnt, sizeof(struct hostdata));
+
+ if (sh[j] == NULL) {
+ printk("%s: unable to register host, detaching.\n", name);
+
+ free_irq(irq, &sha[j]);
+
+ if (subversion == ISA) free_dma(dma_channel);
+
+ return FALSE;
+ }
+
+ sh[j]->io_port = port_base;
+ sh[j]->unique_id = port_base;
+ sh[j]->n_io_port = REGION_SIZE;
+ sh[j]->base = bios_segment_table[config_1.bios_segment];
+ sh[j]->irq = irq;
+ sh[j]->sg_tablesize = MAX_SGLIST;
+ sh[j]->this_id = config_2.ha_scsi_id;
+ sh[j]->can_queue = MAX_MAILBOXES;
+ sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
+ sh[j]->select_queue_depths = select_queue_depths;
+
+#if defined(DEBUG_DETECT)
+ {
+ unsigned char sys_mask, lcl_mask;
+
+ sys_mask = inb(sh[j]->io_port + REG_SYS_MASK);
+ lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK);
+ printk("SYS_MASK 0x%x, LCL_MASK 0x%x.\n", sys_mask, lcl_mask);
+ }
+#endif
+
+ /* Probably a bogus host scsi id, set it to the dummy value */
+ if (sh[j]->this_id == 0) sh[j]->this_id = -1;
+
+ /* If BIOS is disabled, force enable interrupts */
+ if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK);
+
+ /* Register the I/O space that we use */
+ request_region(sh[j]->io_port, sh[j]->n_io_port, driver_name);
+
+ memset(HD(j), 0, sizeof(struct hostdata));
+ HD(j)->heads = mapping_table[config_2.mapping_mode].heads;
+ HD(j)->sectors = mapping_table[config_2.mapping_mode].sectors;
+ HD(j)->subversion = subversion;
+ HD(j)->board_number = j;
+
+ if (have_old_firmware) sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
+
+ if (HD(j)->subversion == ESA) {
+ sh[j]->unchecked_isa_dma = FALSE;
+ sh[j]->dma_channel = NO_DMA;
+ sprintf(BN(j), "U34F%d", j);
+ bus_type = "VESA";
+ }
+ else {
+ sh[j]->wish_block = TRUE;
+ sh[j]->unchecked_isa_dma = TRUE;
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ sh[j]->dma_channel = dma_channel;
+ sprintf(BN(j), "U14F%d", j);
+ bus_type = "ISA";
+ }
+
+ sh[j]->max_channel = MAX_CHANNEL - 1;
+ sh[j]->max_id = MAX_TARGET;
+ sh[j]->max_lun = MAX_LUN;
+
+ if (HD(j)->subversion == ISA && !board_inquiry(j)) {
+ HD(j)->board_id[40] = 0;
+
+ if (strcmp(&HD(j)->board_id[32], "06000600")) {
+ printk("%s: %s.\n", BN(j), &HD(j)->board_id[8]);
+ printk("%s: firmware %s is outdated, FW PROM should be 28004-006.\n",
+ BN(j), &HD(j)->board_id[32]);
+ sh[j]->hostt->use_clustering = DISABLE_CLUSTERING;
+ sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
+ }
+ }
+
+ if (dma_channel == NO_DMA) sprintf(dma_name, "%s", "BMST");
+ else sprintf(dma_name, "DMA %u", dma_channel);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ if (! ((&HD(j)->cp[i])->sglist = kmalloc(
+ sh[j]->sg_tablesize * sizeof(struct sg_list),
+ (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) {
+ printk("%s: kmalloc SGlist failed, mbox %d, detaching.\n", BN(j), i);
+ u14_34f_release(sh[j]);
+ return FALSE;
+ }
+
+ if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN)
+ max_queue_depth = MAX_TAGGED_CMD_PER_LUN;
+
+ if (max_queue_depth < MAX_CMD_PER_LUN) max_queue_depth = MAX_CMD_PER_LUN;
+
+ if (j == 0) {
+ printk("UltraStor 14F/34F: Copyright (C) 1994-1998 Dario Ballabio.\n");
+ printk("%s config options -> of:%c, lc:%c, mq:%d, eh:%c, et:%c.\n",
+ driver_name, YESNO(have_old_firmware), YESNO(linked_comm),
+ max_queue_depth, YESNO(use_new_eh_code), YESNO(ext_tran));
+ }
+
+ printk("%s: %s 0x%03lx, BIOS 0x%05x, IRQ %u, %s, SG %d, MB %d.\n",
+ BN(j), bus_type, (unsigned long)sh[j]->io_port, (int)sh[j]->base,
+ sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue);
+
+ if (sh[j]->max_id > 8 || sh[j]->max_lun > 8)
+ printk("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n",
+ BN(j), sh[j]->max_id, sh[j]->max_lun);
+
+ for (i = 0; i <= sh[j]->max_channel; i++)
+ printk("%s: SCSI channel %u enabled, host target ID %d.\n",
+ BN(j), i, sh[j]->this_id);
+
+ return TRUE;
+}
+
+__initfunc (void u14_34f_setup(char *str, int *ints)) {
+ int i, argc = ints[0];
+ char *cur = str, *pc;
+
+ if (argc > 0) {
+
+ if (argc > MAX_INT_PARAM) argc = MAX_INT_PARAM;
+
+ for (i = 0; i < argc; i++) io_port[i] = ints[i + 1];
+
+ io_port[i] = 0;
+ setup_done = TRUE;
+ }
+
+ while (cur && (pc = strchr(cur, ':'))) {
+ int val = 0, c = *++pc;
+
+ if (c == 'n' || c == 'N') val = FALSE;
+ else if (c == 'y' || c == 'Y') val = TRUE;
+ else val = (int) simple_strtoul(pc, NULL, 0);
+
+ if (!strncmp(cur, "lc:", 3)) linked_comm = val;
+ else if (!strncmp(cur, "of:", 3)) have_old_firmware = val;
+ else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val;
+ else if (!strncmp(cur, "ls:", 3)) link_statistics = val;
+ else if (!strncmp(cur, "eh:", 3)) use_new_eh_code = val;
+ else if (!strncmp(cur, "et:", 3)) ext_tran = val;
+
+ if ((cur = strchr(cur, ','))) ++cur;
+ }
+
+ return;
+}
+
+__initfunc (int u14_34f_detect(Scsi_Host_Template *tpnt)) {
+ unsigned int j = 0, k;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ tpnt->proc_dir = &proc_scsi_u14_34f;
+
+#if defined(MODULE)
+ /* io_port could have been modified when loading as a module */
+ if(io_port[0] != SKIP) {
+ setup_done = TRUE;
+ io_port[MAX_INT_PARAM] = 0;
+ }
+#endif
+
+ for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL;
+
+ for (k = 0; io_port[k]; k++) {
+
+ if (io_port[k] == SKIP) continue;
+
+ if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt)) j++;
+ }
+
+ num_boards = j;
+ IRQ_UNLOCK_RESTORE
+ return j;
+}
+
+static inline void build_sg_list(struct mscp *cpp, Scsi_Cmnd *SCpnt) {
+ unsigned int k, data_len = 0;
+ struct scatterlist *sgpnt;
+
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+
+ for (k = 0; k < SCpnt->use_sg; k++) {
+ cpp->sglist[k].address = V2DEV(sgpnt[k].address);
+ cpp->sglist[k].num_bytes = H2DEV(sgpnt[k].length);
+ data_len += sgpnt[k].length;
+ }
+
+ cpp->use_sg = SCpnt->use_sg;
+ cpp->data_address = V2DEV(cpp->sglist);
+ cpp->data_len = H2DEV(data_len);
+}
+
+static inline int do_qcomm(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
+ unsigned int i, j, k;
+ struct mscp *cpp;
+
+ static const unsigned char data_out_cmds[] = {
+ 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e,
+ 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40,
+ 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b
+ };
+
+ static const unsigned char data_none_cmds[] = {
+ 0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e,
+ 0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47,
+ 0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5
+ };
+
+ /* j is the board number */
+ j = ((struct hostdata *) SCpnt->host->hostdata)->board_number;
+
+ if (SCpnt->host_scribble)
+ panic("%s: qcomm, pid %ld, SCpnt %p already active.\n",
+ BN(j), SCpnt->pid, SCpnt);
+
+ /* i is the mailbox number, look for the first free mailbox
+ starting from last_cp_used */
+ i = HD(j)->last_cp_used + 1;
+
+ for (k = 0; k < sh[j]->can_queue; k++, i++) {
+
+ if (i >= sh[j]->can_queue) i = 0;
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ HD(j)->last_cp_used = i;
+ break;
+ }
+ }
+
+ if (k == sh[j]->can_queue) {
+ printk("%s: qcomm, no free mailbox.\n", BN(j));
+ return 1;
+ }
+
+ /* Set pointer to control packet structure */
+ cpp = &HD(j)->cp[i];
+
+ memset(cpp, 0, sizeof(struct mscp) - sizeof(struct sg_list *));
+ SCpnt->scsi_done = done;
+ cpp->index = i;
+ SCpnt->host_scribble = (unsigned char *) &cpp->index;
+
+ if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCpnt->channel, SCpnt->target,
+ SCpnt->lun, SCpnt->pid);
+
+ cpp->xdir = DTD_IN;
+
+ for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++)
+ if (SCpnt->cmnd[0] == data_out_cmds[k]) {
+ cpp->xdir = DTD_OUT;
+ break;
+ }
+
+ if (cpp->xdir == DTD_IN)
+ for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++)
+ if (SCpnt->cmnd[0] == data_none_cmds[k]) {
+ cpp->xdir = DTD_NONE;
+ break;
+ }
+
+ cpp->opcode = OP_SCSI;
+ cpp->channel = SCpnt->channel;
+ cpp->target = SCpnt->target;
+ cpp->lun = SCpnt->lun;
+ cpp->SCpnt = SCpnt;
+ cpp->sense_addr = V2DEV(SCpnt->sense_buffer);
+ cpp->sense_len = sizeof SCpnt->sense_buffer;
+
+ if (SCpnt->use_sg) {
+ cpp->sg = TRUE;
+ build_sg_list(cpp, SCpnt);
+ }
+ else {
+ cpp->data_address = V2DEV(SCpnt->request_buffer);
+ cpp->data_len = H2DEV(SCpnt->request_bufflen);
+ }
+
+ cpp->scsi_cdbs_len = SCpnt->cmd_len;
+ memcpy(cpp->scsi_cdbs, SCpnt->cmnd, cpp->scsi_cdbs_len);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type)) {
+ HD(j)->cp_stat[i] = READY;
+ flush_dev(SCpnt->device, SCpnt->request.sector, j, FALSE);
+ return 0;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ SCpnt->host_scribble = NULL;
+ printk("%s: qcomm, target %d.%d:%d, pid %ld, adapter busy.\n",
+ BN(j), SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid);
+ return 1;
+ }
+
+ /* Store pointer in OGM address bytes */
+ outl(V2DEV(cpp), sh[j]->io_port + REG_OGM);
+
+ /* Issue OGM interrupt */
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+
+ HD(j)->cp_stat[i] = IN_USE;
+ return 0;
+}
+
+int u14_34f_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_qcomm(SCpnt, done);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+static inline int do_old_abort(Scsi_Cmnd *SCarg) {
+ unsigned int i, j;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL ||
+ (SCarg->serial_number_at_timeout &&
+ (SCarg->serial_number != SCarg->serial_number_at_timeout))) {
+ printk("%s: abort, target %d.%d:%d, pid %ld inactive.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ printk("%s: abort, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED)
+ printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
+
+ return SCSI_ABORT_SNOOZE;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ SCarg->result = DID_ABORT << 16;
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n",
+ BN(j), i, SCarg->pid);
+ SCarg->scsi_done(SCarg);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+int u14_34f_old_abort(Scsi_Cmnd *SCarg) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_old_abort(SCarg);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+static inline int do_abort(Scsi_Cmnd *SCarg) {
+ unsigned int i, j;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL) {
+ printk("%s: abort, target %d.%d:%d, pid %ld inactive.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+ return SUCCESS;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ printk("%s: abort, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ return SUCCESS;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED)
+ printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
+
+ if (SCarg->eh_state == SCSI_STATE_TIMEOUT) {
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d, eh_state timeout, pid %ld.\n",
+ BN(j), i, SCarg->pid);
+ return SUCCESS;
+ }
+
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ return SUCCESS;
+ }
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ SCarg->result = DID_ABORT << 16;
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n",
+ BN(j), i, SCarg->pid);
+ SCarg->scsi_done(SCarg);
+ return SUCCESS;
+ }
+
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+int u14_34f_abort(Scsi_Cmnd *SCarg) {
+
+ return do_abort(SCarg);
+}
+
+#endif /* new_eh_code */
+
+static inline int do_old_reset(Scsi_Cmnd *SCarg) {
+ unsigned int i, j, time, k, c, limit = 0;
+ int arg_done = FALSE;
+ Scsi_Cmnd *SCpnt;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+ printk("%s: reset, enter, target %d.%d:%d, pid %ld.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->pid);
+
+ if (SCarg->serial_number_at_timeout &&
+ (SCarg->serial_number != SCarg->serial_number_at_timeout)) {
+ printk("%s: reset, pid %ld, reset not running.\n", BN(j), SCarg->pid);
+ return SCSI_RESET_NOT_RUNNING;
+ }
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ HD(j)->retries = 0;
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++) {
+ HD(j)->target_redo[k][c] = TRUE;
+ HD(j)->target_to[k][c] = 0;
+ }
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ if (!(SCpnt = HD(j)->cp[i].SCpnt))
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ HD(j)->cp_stat[i] = ABORTING;
+ printk("%s: reset, mbox %d aborting, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else {
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR);
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined(DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+ SPIN_UNLOCK
+ IRQ_UNLOCK
+ time = jiffies;
+ while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
+ IRQ_LOCK
+ SPIN_LOCK
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else if (HD(j)->cp_stat[i] == ABORTING) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox was never queued to the adapter */
+ HD(j)->cp_stat[i] = FREE;
+
+ printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else
+
+ /* Any other mailbox has already been set free by interrupt */
+ continue;
+
+ SCpnt->scsi_done(SCpnt);
+ IRQ_LOCK
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+
+ if (arg_done) {
+ printk("%s: reset, exit, success.\n", BN(j));
+ return SCSI_RESET_SUCCESS;
+ }
+ else {
+ printk("%s: reset, exit, wakeup.\n", BN(j));
+ return SCSI_RESET_PUNT;
+ }
+}
+
+int u14_34f_old_reset(Scsi_Cmnd *SCarg, unsigned int reset_flags) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_old_reset(SCarg);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+static inline int do_reset(Scsi_Cmnd *SCarg) {
+ unsigned int i, j, time, k, c, limit = 0;
+ int arg_done = FALSE;
+ Scsi_Cmnd *SCpnt;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+ printk("%s: reset, enter, target %d.%d:%d, pid %ld.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->pid);
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ return FAILED;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ HD(j)->retries = 0;
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++) {
+ HD(j)->target_redo[k][c] = TRUE;
+ HD(j)->target_to[k][c] = 0;
+ }
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ if (!(SCpnt = HD(j)->cp[i].SCpnt))
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ HD(j)->cp_stat[i] = ABORTING;
+ printk("%s: reset, mbox %d aborting, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else {
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR);
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined(DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+ SPIN_UNLOCK
+ IRQ_UNLOCK
+ time = jiffies;
+ while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
+ IRQ_LOCK
+ SPIN_LOCK
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else if (HD(j)->cp_stat[i] == ABORTING) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox was never queued to the adapter */
+ HD(j)->cp_stat[i] = FREE;
+
+ printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else
+
+ /* Any other mailbox has already been set free by interrupt */
+ continue;
+
+ SCpnt->scsi_done(SCpnt);
+ IRQ_LOCK
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+
+ if (arg_done) printk("%s: reset, exit, pid %ld done.\n", BN(j), SCarg->pid);
+ else printk("%s: reset, exit.\n", BN(j));
+
+ return SUCCESS;
+}
+
+int u14_34f_reset(Scsi_Cmnd *SCarg) {
+
+ return do_reset(SCarg);
+}
+
+#endif /* new_eh_code */
+
+int u14_34f_biosparam(Disk *disk, kdev_t dev, int *dkinfo) {
+ unsigned int j = 0;
+ int size = disk->capacity;
+
+ dkinfo[0] = HD(j)->heads;
+ dkinfo[1] = HD(j)->sectors;
+ dkinfo[2] = size / (HD(j)->heads * HD(j)->sectors);
+
+ if (ext_tran && (scsicam_bios_param(disk, dev, dkinfo) < 0)) {
+ dkinfo[0] = 255;
+ dkinfo[1] = 63;
+ dkinfo[2] = size / (dkinfo[0] * dkinfo[1]);
+ }
+
+#if defined (DEBUG_GEOMETRY)
+ printk ("%s: biosparam, head=%d, sec=%d, cyl=%d.\n", driver_name,
+ dkinfo[0], dkinfo[1], dkinfo[2]);
+#endif
+
+ return FALSE;
+}
+
+static void sort(unsigned long sk[], unsigned int da[], unsigned int n,
+ unsigned int rev) {
+ unsigned int i, j, k, y;
+ unsigned long x;
+
+ for (i = 0; i < n - 1; i++) {
+ k = i;
+
+ for (j = k + 1; j < n; j++)
+ if (rev) {
+ if (sk[j] > sk[k]) k = j;
+ }
+ else {
+ if (sk[j] < sk[k]) k = j;
+ }
+
+ if (k != i) {
+ x = sk[k]; sk[k] = sk[i]; sk[i] = x;
+ y = da[k]; da[k] = da[i]; da[i] = y;
+ }
+ }
+
+ return;
+ }
+
+static inline int reorder(unsigned int j, unsigned long cursec,
+ unsigned int ihdlr, unsigned int il[], unsigned int n_ready) {
+ Scsi_Cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n;
+ unsigned int rev = FALSE, s = TRUE, r = TRUE;
+ unsigned int input_only = TRUE, overlap = FALSE;
+ unsigned long sl[n_ready], pl[n_ready], ll[n_ready];
+ unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0;
+ unsigned long ioseek = 0;
+
+ static unsigned int flushcount = 0, batchcount = 0, sortcount = 0;
+ static unsigned int readycount = 0, ovlcount = 0, inputcount = 0;
+ static unsigned int readysorted = 0, revcount = 0;
+ static unsigned long seeksorted = 0, seeknosort = 0;
+
+ if (link_statistics && !(++flushcount % link_statistics))
+ printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"\
+ " av %ldK as %ldK.\n", flushcount, batchcount, inputcount,
+ ovlcount, readycount, readysorted, sortcount, revcount,
+ seeknosort / (readycount + 1),
+ seeksorted / (readycount + 1));
+
+ if (n_ready <= 1) return FALSE;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
+
+ if (SCpnt->request.sector < minsec) minsec = SCpnt->request.sector;
+ if (SCpnt->request.sector > maxsec) maxsec = SCpnt->request.sector;
+
+ sl[n] = SCpnt->request.sector;
+ ioseek += SCpnt->request.nr_sectors;
+
+ if (!n) continue;
+
+ if (sl[n] < sl[n - 1]) s = FALSE;
+ if (sl[n] > sl[n - 1]) r = FALSE;
+
+ if (link_statistics) {
+ if (sl[n] > sl[n - 1])
+ seek += sl[n] - sl[n - 1];
+ else
+ seek += sl[n - 1] - sl[n];
+ }
+
+ }
+
+ if (link_statistics) {
+ if (cursec > sl[0]) seek += cursec - sl[0]; else seek += sl[0] - cursec;
+ }
+
+ if (cursec > ((maxsec + minsec) / 2)) rev = TRUE;
+
+ if (ioseek > ((maxsec - minsec) / 2)) rev = FALSE;
+
+ if (!((rev && r) || (!rev && s))) sort(sl, il, n_ready, rev);
+
+ if (!input_only) for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+ ll[n] = SCpnt->request.nr_sectors; pl[n] = SCpnt->pid;
+
+ if (!n) continue;
+
+ if ((sl[n] == sl[n - 1]) || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n]))
+ || (rev && ((sl[n] + ll[n]) > sl[n - 1]))) overlap = TRUE;
+ }
+
+ if (overlap) sort(pl, il, n_ready, FALSE);
+
+ if (link_statistics) {
+ if (cursec > sl[0]) iseek = cursec - sl[0]; else iseek = sl[0] - cursec;
+ batchcount++; readycount += n_ready, seeknosort += seek / 1024;
+ if (input_only) inputcount++;
+ if (overlap) { ovlcount++; seeksorted += iseek / 1024; }
+ else seeksorted += (iseek + maxsec - minsec) / 1024;
+ if (rev && !r) { revcount++; readysorted += n_ready; }
+ if (!rev && !s) { sortcount++; readysorted += n_ready; }
+ }
+
+#if defined(DEBUG_LINKED_COMMANDS)
+ if (link_statistics && (overlap || !(flushcount % link_statistics)))
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+ printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\
+ " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
+ (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
+ SCpnt->lun, SCpnt->pid, k, flushcount, n_ready,
+ SCpnt->request.sector, SCpnt->request.nr_sectors, cursec,
+ YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
+ YESNO(overlap), cpp->xdir);
+ }
+#endif
+ return overlap;
+}
+
+static void flush_dev(Scsi_Device *dev, unsigned long cursec, unsigned int j,
+ unsigned int ihdlr) {
+ Scsi_Cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES];
+
+ for (k = 0; k < sh[j]->can_queue; k++) {
+
+ if (HD(j)->cp_stat[k] != READY && HD(j)->cp_stat[k] != IN_USE) continue;
+
+ cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (SCpnt->device != dev) continue;
+
+ if (HD(j)->cp_stat[k] == IN_USE) return;
+
+ il[n_ready++] = k;
+ }
+
+ if (reorder(j, cursec, ihdlr, il, n_ready)) n_ready = 1;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: %s, target %d.%d:%d, pid %ld, mbox %d, adapter"\
+ " busy, will abort.\n", BN(j), (ihdlr ? "ihdlr" : "qcomm"),
+ SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid, k);
+ HD(j)->cp_stat[k] = ABORTING;
+ continue;
+ }
+
+ outl(V2DEV(cpp), sh[j]->io_port + REG_OGM);
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+ HD(j)->cp_stat[k] = IN_USE;
+ }
+
+}
+
+static inline void ihdlr(int irq, unsigned int j) {
+ Scsi_Cmnd *SCpnt;
+ unsigned int i, k, c, status, tstatus, reg, ret;
+ struct mscp *spp, *cpp;
+
+ if (sh[j]->irq != irq)
+ panic("%s: ihdlr, irq %d, sh[j]->irq %d.\n", BN(j), irq, sh[j]->irq);
+
+ /* Check if this board need to be serviced */
+ if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) return;
+
+ HD(j)->iocount++;
+
+ if (do_trace) printk("%s: ihdlr, enter, irq %d, count %d.\n", BN(j), irq,
+ HD(j)->iocount);
+
+ /* Check if this board is still busy */
+ if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) {
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+ printk("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n",
+ BN(j), irq, reg, HD(j)->iocount);
+ return;
+ }
+
+ ret = inl(sh[j]->io_port + REG_ICM);
+ spp = (struct mscp *)DEV2V(ret);
+ cpp = spp;
+
+ /* Clear interrupt pending flag */
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+
+#if defined(DEBUG_GENERATE_ABORTS)
+ if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 500) < 3)) return;
+#endif
+
+ /* Find the mailbox to be serviced on this board */
+ i = cpp - HD(j)->cp;
+
+ if (cpp < HD(j)->cp || cpp >= HD(j)->cp + sh[j]->can_queue
+ || i >= sh[j]->can_queue)
+ panic("%s: ihdlr, invalid mscp bus address %p, cp0 %p.\n", BN(j),
+ (void *)ret, HD(j)->cp);
+
+ if (HD(j)->cp_stat[i] == IGNORE) {
+ HD(j)->cp_stat[i] = FREE;
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: ihdlr, mbox %d unlocked, count %d.\n", BN(j), i,
+ HD(j)->iocount);
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: ihdlr, mbox %d is free, count %d.\n", BN(j), i,
+ HD(j)->iocount);
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == IN_RESET)
+ printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i);
+ else if (HD(j)->cp_stat[i] != IN_USE)
+ panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n",
+ BN(j), i, HD(j)->cp_stat[i]);
+
+ HD(j)->cp_stat[i] = FREE;
+ SCpnt = cpp->SCpnt;
+
+ if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", BN(j), i,
+ SCpnt->pid, SCpnt);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n",
+ BN(j), i, SCpnt->pid, *(unsigned int *)SCpnt->host_scribble);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type))
+ flush_dev(SCpnt->device, SCpnt->request.sector, j, TRUE);
+
+ tstatus = status_byte(spp->target_status);
+
+#if defined(DEBUG_GENERATE_ERRORS)
+ if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 200) < 2))
+ spp->adapter_status = 0x01;
+#endif
+
+ switch (spp->adapter_status) {
+ case ASOK: /* status OK */
+
+ /* Forces a reset if a disk drive keeps returning BUSY */
+ if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
+ status = DID_ERROR << 16;
+
+ /* If there was a bus reset, redo operation on each target */
+ else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK
+ && HD(j)->target_redo[SCpnt->target][SCpnt->channel])
+ status = DID_BUS_BUSY << 16;
+
+ /* Works around a flaw in scsi.c */
+ else if (tstatus == CHECK_CONDITION
+ && SCpnt->device->type == TYPE_DISK
+ && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
+ status = DID_BUS_BUSY << 16;
+
+ else
+ status = DID_OK << 16;
+
+ if (tstatus == GOOD)
+ HD(j)->target_redo[SCpnt->target][SCpnt->channel] = FALSE;
+
+ if (spp->target_status && SCpnt->device->type == TYPE_DISK)
+ printk("%s: ihdlr, target %d.%d:%d, pid %ld, "\
+ "target_status 0x%x, sense key 0x%x.\n", BN(j),
+ SCpnt->channel, SCpnt->target, SCpnt->lun,
+ SCpnt->pid, spp->target_status,
+ SCpnt->sense_buffer[2]);
+
+ HD(j)->target_to[SCpnt->target][SCpnt->channel] = 0;
+
+ if (HD(j)->last_retried_pid == SCpnt->pid) HD(j)->retries = 0;
+
+ break;
+ case ASST: /* Selection Time Out */
+
+ if (HD(j)->target_to[SCpnt->target][SCpnt->channel] > 1)
+ status = DID_ERROR << 16;
+ else {
+ status = DID_TIME_OUT << 16;
+ HD(j)->target_to[SCpnt->target][SCpnt->channel]++;
+ }
+
+ break;
+
+ /* Perform a limited number of internal retries */
+ case 0x93: /* Unexpected bus free */
+ case 0x94: /* Target bus phase sequence failure */
+ case 0x96: /* Illegal SCSI command */
+ case 0xa3: /* SCSI bus reset error */
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++)
+ HD(j)->target_redo[k][c] = TRUE;
+
+
+ case 0x92: /* Data over/under-run */
+
+ if (SCpnt->device->type != TYPE_TAPE
+ && HD(j)->retries < MAX_INTERNAL_RETRIES) {
+
+#if defined(DID_SOFT_ERROR)
+ status = DID_SOFT_ERROR << 16;
+#else
+ status = DID_BUS_BUSY << 16;
+#endif
+
+ HD(j)->retries++;
+ HD(j)->last_retried_pid = SCpnt->pid;
+ }
+ else
+ status = DID_ERROR << 16;
+
+ break;
+ case 0x01: /* Invalid command */
+ case 0x02: /* Invalid parameters */
+ case 0x03: /* Invalid data list */
+ case 0x84: /* SCSI bus abort error */
+ case 0x9b: /* Auto request sense error */
+ case 0x9f: /* Unexpected command complete message error */
+ case 0xff: /* Invalid parameter in the S/G list */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ }
+
+ SCpnt->result = status | spp->target_status;
+
+#if defined(DEBUG_INTERRUPT)
+ if (SCpnt->result || do_trace)
+#else
+ if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) ||
+ (spp->adapter_status != ASOK &&
+ spp->adapter_status != ASST && HD(j)->iocount <= 1000) ||
+ do_trace || msg_byte(spp->target_status))
+#endif
+ printk("%s: ihdlr, mbox %2d, err 0x%x:%x,"\
+ " target %d.%d:%d, pid %ld, reg 0x%x, count %d.\n",
+ BN(j), i, spp->adapter_status, spp->target_status,
+ SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid,
+ reg, HD(j)->iocount);
+
+ /* Set the command state to inactive */
+ SCpnt->host_scribble = NULL;
+
+ SCpnt->scsi_done(SCpnt);
+
+ if (do_trace) printk("%s: ihdlr, exit, irq %d, count %d.\n", BN(j), irq,
+ HD(j)->iocount);
+
+ return;
+}
+
+static void do_interrupt_handler(int irq, void *shap, struct pt_regs *regs) {
+ unsigned int j;
+ IRQ_FLAGS
+ SPIN_FLAGS
+
+ /* Check if the interrupt must be processed by this handler */
+ if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return;
+
+ SPIN_LOCK_SAVE
+ IRQ_LOCK_SAVE
+ ihdlr(irq, j);
+ IRQ_UNLOCK_RESTORE
+ SPIN_UNLOCK_RESTORE
+}
+
+int u14_34f_release(struct Scsi_Host *shpnt) {
+ unsigned int i, j;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+
+ for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++);
+
+ if (sh[j] == NULL) panic("%s: release, invalid Scsi_Host pointer.\n",
+ driver_name);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ if ((&HD(j)->cp[i])->sglist) kfree((&HD(j)->cp[i])->sglist);
+
+ free_irq(sh[j]->irq, &sha[j]);
+
+ if (sh[j]->dma_channel != NO_DMA) free_dma(sh[j]->dma_channel);
+
+ release_region(sh[j]->io_port, sh[j]->n_io_port);
+ scsi_unregister(sh[j]);
+ IRQ_UNLOCK_RESTORE
+ return FALSE;
+}
+
+#if defined(MODULE)
+Scsi_Host_Template driver_template = ULTRASTOR_14_34F;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/u14-34f.h b/linux/src/drivers/scsi/u14-34f.h
new file mode 100644
index 0000000..943b8cb
--- /dev/null
+++ b/linux/src/drivers/scsi/u14-34f.h
@@ -0,0 +1,60 @@
+/*
+ * u14-34f.h - used by the low-level driver for UltraStor 14F/34F
+ */
+#ifndef _U14_34F_H
+#define _U14_34F_H
+
+#include <scsi/scsicam.h>
+#include <linux/version.h>
+
+int u14_34f_detect(Scsi_Host_Template *);
+int u14_34f_release(struct Scsi_Host *);
+int u14_34f_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int u14_34f_abort(Scsi_Cmnd *);
+int u14_34f_old_abort(Scsi_Cmnd *);
+int u14_34f_reset(Scsi_Cmnd *);
+int u14_34f_old_reset(Scsi_Cmnd *, unsigned int);
+int u14_34f_biosparam(Disk *, kdev_t, int *);
+
+#define U14_34F_VERSION "4.33.00"
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+#define ULTRASTOR_14_34F { \
+ name: "UltraStor 14F/34F rev. " U14_34F_VERSION " ", \
+ detect: u14_34f_detect, \
+ release: u14_34f_release, \
+ queuecommand: u14_34f_queuecommand, \
+ abort: u14_34f_old_abort, \
+ reset: u14_34f_old_reset, \
+ eh_abort_handler: u14_34f_abort, \
+ eh_device_reset_handler: NULL, \
+ eh_bus_reset_handler: NULL, \
+ eh_host_reset_handler: u14_34f_reset, \
+ bios_param: u14_34f_biosparam, \
+ this_id: 7, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 1 /* Enable new error code */ \
+ }
+
+#else /* Use old scsi code */
+
+#define ULTRASTOR_14_34F { \
+ name: "UltraStor 14F/34F rev. " U14_34F_VERSION " ", \
+ detect: u14_34f_detect, \
+ release: u14_34f_release, \
+ queuecommand: u14_34f_queuecommand, \
+ abort: u14_34f_old_abort, \
+ reset: u14_34f_old_reset, \
+ bios_param: u14_34f_biosparam, \
+ this_id: 7, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING \
+ }
+
+#endif
+
+#endif
diff --git a/linux/src/drivers/scsi/ultrastor.c b/linux/src/drivers/scsi/ultrastor.c
new file mode 100644
index 0000000..de82472
--- /dev/null
+++ b/linux/src/drivers/scsi/ultrastor.c
@@ -0,0 +1,1165 @@
+/*
+ * ultrastor.c Copyright (C) 1992 David B. Gentzel
+ * Low-level SCSI driver for UltraStor 14F, 24F, and 34F
+ * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
+ * (gentzel@nova.enet.dec.com)
+ * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
+ * 24F and multiple command support by John F. Carr (jfc@athena.mit.edu)
+ * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
+ * Eric Youngdale (ericy@cais.com).
+ * Thanks to UltraStor for providing the necessary documentation
+ */
+
+/*
+ * TODO:
+ * 1. Find out why scatter/gather is limited to 16 requests per command.
+ * This is fixed, at least on the 24F, as of version 1.12 - CAE.
+ * 2. Look at command linking (mscp.command_link and
+ * mscp.command_link_id). (Does not work with many disks,
+ * and no performance increase. ERY).
+ * 3. Allow multiple adapters.
+ */
+
+/*
+ * NOTES:
+ * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
+ * performance SCSI-2 host adapters. They all support command queueing
+ * and scatter/gather I/O. Some of them can also emulate the standard
+ * WD1003 interface for use with OS's which don't support SCSI. Here
+ * is the scoop on the various models:
+ * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
+ * 14N - ISA HA with floppy support. I think that this is a non-DMA
+ * HA. Nothing further known.
+ * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
+ * 34F - VL-Bus Bus Master HA with floppy support (no WD1003 emulation).
+ *
+ * The 14F, 24F, and 34F are supported by this driver.
+ *
+ * Places flagged with a triple question-mark are things which are either
+ * unfinished, questionable, or wrong.
+ */
+
+/* Changes from version 1.11 alpha to 1.12
+ *
+ * Increased the size of the scatter-gather list to 33 entries for
+ * the 24F adapter (it was 16). I don't have the specs for the 14F
+ * or the 34F, so they may support larger s-g lists as well.
+ *
+ * Caleb Epstein <cae@jpmorgan.com>
+ */
+
+/* Changes from version 1.9 to 1.11
+ *
+ * Patches to bring this driver up to speed with the default kernel
+ * driver which supports only the 14F and 34F adapters. This version
+ * should compile cleanly into 0.99.13, 0.99.12 and probably 0.99.11.
+ *
+ * Fixes from Eric Youngdale to fix a few possible race conditions and
+ * several problems with bit testing operations (insufficient
+ * parentheses).
+ *
+ * Removed the ultrastor_abort() and ultrastor_reset() functions
+ * (enclosed them in #if 0 / #endif). These functions, at least on
+ * the 24F, cause the SCSI bus to do odd things and generally lead to
+ * kernel panics and machine hangs. This is like the Adaptec code.
+ *
+ * Use check/snarf_region for 14f, 34f to avoid I/O space address conflicts.
+ */
+
+/* Changes from version 1.8 to version 1.9
+ *
+ * 0.99.11 patches (cae@jpmorgan.com) */
+
+/* Changes from version 1.7 to version 1.8
+ *
+ * Better error reporting.
+ */
+
+/* Changes from version 1.6 to version 1.7
+ *
+ * Removed CSIR command code.
+ *
+ * Better race condition avoidance (xchgb function added).
+ *
+ * Set ICM and OGM status to zero at probe (24F)
+ *
+ * reset sends soft reset to UltraStor adapter
+ *
+ * reset adapter if adapter interrupts with an invalid MSCP address
+ *
+ * handle aborted command interrupt (24F)
+ *
+ */
+
+/* Changes from version 1.5 to version 1.6:
+ *
+ * Read MSCP address from ICM _before_ clearing the interrupt flag.
+ * This fixes a race condition.
+ */
+
+/* Changes from version 1.4 to version 1.5:
+ *
+ * Abort now calls done when multiple commands are enabled.
+ *
+ * Clear busy when aborted command finishes, not when abort is called.
+ *
+ * More debugging messages for aborts.
+ */
+
+/* Changes from version 1.3 to version 1.4:
+ *
+ * Enable automatic request of sense data on error (requires newer version
+ * of scsi.c to be useful).
+ *
+ * Fix PORT_OVERRIDE for 14F.
+ *
+ * Fix abort and reset to work properly (config.aborted wasn't cleared
+ * after it was tested, so after a command abort no further commands would
+ * work).
+ *
+ * Boot time test to enable SCSI bus reset (defaults to not allowing reset).
+ *
+ * Fix test for OGM busy -- the busy bit is in different places on the 24F.
+ *
+ * Release ICM slot by clearing first byte on 24F.
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+
+#define ULTRASTOR_PRIVATE /* Get the private stuff from ultrastor.h */
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "ultrastor.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_ultrastor = {
+ PROC_SCSI_ULTRASTOR, 9, "ultrastor",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define FALSE 0
+#define TRUE 1
+
+#ifndef ULTRASTOR_DEBUG
+#define ULTRASTOR_DEBUG (UD_ABORT|UD_CSIR|UD_RESET)
+#endif
+
+#define VERSION "1.12"
+
+#define ARRAY_SIZE(arr) (sizeof (arr) / sizeof (arr)[0])
+
+#define PACKED __attribute__((packed))
+#define ALIGNED(x) __attribute__((aligned(x)))
+
+
+/* The 14F uses an array of 4-byte ints for its scatter/gather list.
+ The data can be unaligned, but need not be. It's easier to give
+ the list normal alignment since it doesn't need to fit into a
+ packed structure. */
+
+typedef struct {
+ unsigned int address;
+ unsigned int num_bytes;
+} ultrastor_sg_list;
+
+
+/* MailBox SCSI Command Packet. Basic command structure for communicating
+ with controller. */
+struct mscp {
+ unsigned char opcode: 3; /* type of command */
+ unsigned char xdir: 2; /* data transfer direction */
+ unsigned char dcn: 1; /* disable disconnect */
+ unsigned char ca: 1; /* use cache (if available) */
+ unsigned char sg: 1; /* scatter/gather operation */
+ unsigned char target_id: 3; /* target SCSI id */
+ unsigned char ch_no: 2; /* SCSI channel (always 0 for 14f) */
+ unsigned char lun: 3; /* logical unit number */
+ unsigned int transfer_data PACKED; /* transfer data pointer */
+ unsigned int transfer_data_length PACKED; /* length in bytes */
+ unsigned int command_link PACKED; /* for linking command chains */
+ unsigned char scsi_command_link_id; /* identifies command in chain */
+ unsigned char number_of_sg_list; /* (if sg is set) 8 bytes per list */
+ unsigned char length_of_sense_byte;
+ unsigned char length_of_scsi_cdbs; /* 6, 10, or 12 */
+ unsigned char scsi_cdbs[12]; /* SCSI commands */
+ unsigned char adapter_status; /* non-zero indicates HA error */
+ unsigned char target_status; /* non-zero indicates target error */
+ unsigned int sense_data PACKED;
+ /* The following fields are for software only. They are included in
+ the MSCP structure because they are associated with SCSI requests. */
+ void (*done)(Scsi_Cmnd *);
+ Scsi_Cmnd *SCint;
+ ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */
+};
+
+
+/* Port addresses (relative to the base address) */
+#define U14F_PRODUCT_ID(port) ((port) + 0x4)
+#define CONFIG(port) ((port) + 0x6)
+
+/* Port addresses relative to the doorbell base address. */
+#define LCL_DOORBELL_MASK(port) ((port) + 0x0)
+#define LCL_DOORBELL_INTR(port) ((port) + 0x1)
+#define SYS_DOORBELL_MASK(port) ((port) + 0x2)
+#define SYS_DOORBELL_INTR(port) ((port) + 0x3)
+
+
+/* Used to store configuration info read from config i/o registers. Most of
+ this is not used yet, but might as well save it.
+
+ This structure also holds port addresses that are not at the same offset
+ on the 14F and 24F.
+
+ This structure holds all data that must be duplicated to support multiple
+ adapters. */
+
+static struct ultrastor_config
+{
+ unsigned short port_address; /* base address of card */
+ unsigned short doorbell_address; /* base address of doorbell CSRs */
+ unsigned short ogm_address; /* base address of OGM */
+ unsigned short icm_address; /* base address of ICM */
+ const void *bios_segment;
+ unsigned char interrupt: 4;
+ unsigned char dma_channel: 3;
+ unsigned char bios_drive_number: 1;
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned char ha_scsi_id: 3;
+ unsigned char subversion: 4;
+ unsigned char revision;
+ /* The slot number is used to distinguish the 24F (slot != 0) from
+ the 14F and 34F (slot == 0). */
+ unsigned char slot;
+
+#ifdef PRINT_U24F_VERSION
+ volatile int csir_done;
+#endif
+
+ /* A pool of MSCP structures for this adapter, and a bitmask of
+ busy structures. (If ULTRASTOR_14F_MAX_CMDS == 1, a 1 byte
+ busy flag is used instead.) */
+
+#if ULTRASTOR_MAX_CMDS == 1
+ unsigned char mscp_busy;
+#else
+ unsigned short mscp_free;
+#endif
+ volatile unsigned char aborted[ULTRASTOR_MAX_CMDS];
+ struct mscp mscp[ULTRASTOR_MAX_CMDS];
+} config = {0};
+
+/* Set this to 1 to reset the SCSI bus on error. */
+int ultrastor_bus_reset = 0;
+
+
+/* Allowed BIOS base addresses (NULL indicates reserved) */
+static const void *const bios_segment_table[8] = {
+ NULL, (void *)0xC4000, (void *)0xC8000, (void *)0xCC000,
+ (void *)0xD0000, (void *)0xD4000, (void *)0xD8000, (void *)0xDC000,
+};
+
+/* Allowed IRQs for 14f */
+static const unsigned char interrupt_table_14f[4] = { 15, 14, 11, 10 };
+
+/* Allowed DMA channels for 14f (0 indicates reserved) */
+static const unsigned char dma_channel_table_14f[4] = { 5, 6, 7, 0 };
+
+/* Head/sector mappings allowed by 14f */
+static const struct {
+ unsigned char heads;
+ unsigned char sectors;
+} mapping_table[4] = { { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 } };
+
+#ifndef PORT_OVERRIDE
+/* ??? A probe of address 0x310 screws up NE2000 cards */
+static const unsigned short ultrastor_ports_14f[] = {
+ 0x330, 0x340, /*0x310,*/ 0x230, 0x240, 0x210, 0x130, 0x140,
+};
+#endif
+
+static void ultrastor_interrupt(int, void *, struct pt_regs *);
+static inline void build_sg_list(struct mscp *, Scsi_Cmnd *SCpnt);
+
+
+static inline int find_and_clear_bit_16(unsigned short *field)
+{
+ int rv;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if (*field == 0) panic("No free mscp");
+ asm("xorl %0,%0\n0:\tbsfw %1,%w0\n\tbtr %0,%1\n\tjnc 0b"
+ : "=&r" (rv), "+m" (*field));
+ restore_flags(flags);
+ return rv;
+}
+
+/* This has been re-implemented with the help of Richard Earnshaw,
+ <rwe@pegasus.esprit.ec.org> and works with gcc-2.5.8 and gcc-2.6.0.
+ The instability noted by jfc below appears to be a bug in
+ gcc-2.5.x when compiling w/o optimization. --Caleb
+
+ This asm is fragile: it doesn't work without the casts and it may
+ not work without optimization. Maybe I should add a swap builtin
+ to gcc. --jfc */
+static inline unsigned char xchgb(unsigned char reg,
+ volatile unsigned char *mem)
+{
+ __asm__ ("xchgb %0,%1" : "=q" (reg), "=m" (*mem) : "0" (reg));
+ return reg;
+}
+
+#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
+
+static void log_ultrastor_abort(register struct ultrastor_config *config,
+ int command)
+{
+ static char fmt[80] = "abort %d (%x); MSCP free pool: %x;";
+ register int i;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+
+ for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
+ {
+ fmt[20 + i*2] = ' ';
+ if (! (config->mscp_free & (1 << i)))
+ fmt[21 + i*2] = '0' + config->mscp[i].target_id;
+ else
+ fmt[21 + i*2] = '-';
+ }
+ fmt[20 + ULTRASTOR_MAX_CMDS * 2] = '\n';
+ fmt[21 + ULTRASTOR_MAX_CMDS * 2] = 0;
+ printk(fmt, command, &config->mscp[command], config->mscp_free);
+ restore_flags(flags);
+}
+#endif
+
+static int ultrastor_14f_detect(Scsi_Host_Template * tpnt)
+{
+ size_t i;
+ unsigned char in_byte, version_byte = 0;
+ struct config_1 {
+ unsigned char bios_segment: 3;
+ unsigned char removable_disks_as_fixed: 1;
+ unsigned char interrupt: 2;
+ unsigned char dma_channel: 2;
+ } config_1;
+ struct config_2 {
+ unsigned char ha_scsi_id: 3;
+ unsigned char mapping_mode: 2;
+ unsigned char bios_drive_number: 1;
+ unsigned char tfr_port: 2;
+ } config_2;
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: called\n");
+#endif
+
+ /* If a 24F has already been configured, don't look for a 14F. */
+ if (config.bios_segment)
+ return FALSE;
+
+#ifdef PORT_OVERRIDE
+ if(check_region(PORT_OVERRIDE, 0xc)) {
+ printk("Ultrastor I/O space already in use\n");
+ return FALSE;
+ };
+ config.port_address = PORT_OVERRIDE;
+#else
+ for (i = 0; i < ARRAY_SIZE(ultrastor_ports_14f); i++) {
+ if(check_region(ultrastor_ports_14f[i], 0x0c)) continue;
+ config.port_address = ultrastor_ports_14f[i];
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: testing port address %03X\n", config.port_address);
+#endif
+
+ in_byte = inb(U14F_PRODUCT_ID(config.port_address));
+ if (in_byte != US14F_PRODUCT_ID_0) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+# ifdef PORT_OVERRIDE
+ printk("US14F: detect: wrong product ID 0 - %02X\n", in_byte);
+# else
+ printk("US14F: detect: no adapter at port %03X\n", config.port_address);
+# endif
+#endif
+#ifdef PORT_OVERRIDE
+ return FALSE;
+#else
+ continue;
+#endif
+ }
+ in_byte = inb(U14F_PRODUCT_ID(config.port_address) + 1);
+ /* Only upper nibble is significant for Product ID 1 */
+ if ((in_byte & 0xF0) != US14F_PRODUCT_ID_1) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+# ifdef PORT_OVERRIDE
+ printk("US14F: detect: wrong product ID 1 - %02X\n", in_byte);
+# else
+ printk("US14F: detect: no adapter at port %03X\n", config.port_address);
+# endif
+#endif
+#ifdef PORT_OVERRIDE
+ return FALSE;
+#else
+ continue;
+#endif
+ }
+ version_byte = in_byte;
+#ifndef PORT_OVERRIDE
+ break;
+ }
+ if (i == ARRAY_SIZE(ultrastor_ports_14f)) {
+# if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: no port address found!\n");
+# endif
+ return FALSE;
+ }
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: adapter found at port address %03X\n",
+ config.port_address);
+#endif
+
+ /* Set local doorbell mask to disallow bus reset unless
+ ultrastor_bus_reset is true. */
+ outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(config.port_address));
+
+ /* All above tests passed, must be the right thing. Get some useful
+ info. */
+
+ request_region(config.port_address, 0x0c,"ultrastor");
+ /* Register the I/O space that we use */
+
+ *(char *)&config_1 = inb(CONFIG(config.port_address + 0));
+ *(char *)&config_2 = inb(CONFIG(config.port_address + 1));
+ config.bios_segment = bios_segment_table[config_1.bios_segment];
+ config.doorbell_address = config.port_address;
+ config.ogm_address = config.port_address + 0x8;
+ config.icm_address = config.port_address + 0xC;
+ config.interrupt = interrupt_table_14f[config_1.interrupt];
+ config.ha_scsi_id = config_2.ha_scsi_id;
+ config.heads = mapping_table[config_2.mapping_mode].heads;
+ config.sectors = mapping_table[config_2.mapping_mode].sectors;
+ config.bios_drive_number = config_2.bios_drive_number;
+ config.subversion = (version_byte & 0x0F);
+ if (config.subversion == U34F)
+ config.dma_channel = 0;
+ else
+ config.dma_channel = dma_channel_table_14f[config_1.dma_channel];
+
+ if (!config.bios_segment) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: not detected.\n");
+#endif
+ return FALSE;
+ }
+
+ /* Final consistency check, verify previous info. */
+ if (config.subversion != U34F)
+ if (!config.dma_channel || !(config_2.tfr_port & 0x2)) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: consistency check failed\n");
+#endif
+ return FALSE;
+ }
+
+ /* If we were TRULY paranoid, we could issue a host adapter inquiry
+ command here and verify the data returned. But frankly, I'm
+ exhausted! */
+
+ /* Finally! Now I'm satisfied... */
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: detect succeeded\n"
+ " Port address: %03X\n"
+ " BIOS segment: %05X\n"
+ " Interrupt: %u\n"
+ " DMA channel: %u\n"
+ " H/A SCSI ID: %u\n"
+ " Subversion: %u\n",
+ config.port_address, config.bios_segment, config.interrupt,
+ config.dma_channel, config.ha_scsi_id, config.subversion);
+#endif
+ tpnt->this_id = config.ha_scsi_id;
+ tpnt->unchecked_isa_dma = (config.subversion != U34F);
+
+#if ULTRASTOR_MAX_CMDS > 1
+ config.mscp_free = ~0;
+#endif
+
+ if (request_irq(config.interrupt, ultrastor_interrupt, 0, "Ultrastor", NULL)) {
+ printk("Unable to allocate IRQ%u for UltraStor controller.\n",
+ config.interrupt);
+ return FALSE;
+ }
+ if (config.dma_channel && request_dma(config.dma_channel,"Ultrastor")) {
+ printk("Unable to allocate DMA channel %u for UltraStor controller.\n",
+ config.dma_channel);
+ free_irq(config.interrupt, NULL);
+ return FALSE;
+ }
+ tpnt->sg_tablesize = ULTRASTOR_14F_MAX_SG;
+ printk("UltraStor driver version" VERSION ". Using %d SG lists.\n",
+ ULTRASTOR_14F_MAX_SG);
+
+ return TRUE;
+}
+
+static int ultrastor_24f_detect(Scsi_Host_Template * tpnt)
+{
+ register int i;
+ struct Scsi_Host * shpnt = NULL;
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US24F: detect");
+#endif
+
+ /* probe each EISA slot at slot address C80 */
+ for (i = 1; i < 15; i++)
+ {
+ unsigned char config_1, config_2;
+ unsigned short addr = (i << 12) | ULTRASTOR_24F_PORT;
+
+ if (inb(addr) != US24F_PRODUCT_ID_0 &&
+ inb(addr+1) != US24F_PRODUCT_ID_1 &&
+ inb(addr+2) != US24F_PRODUCT_ID_2)
+ continue;
+
+ config.revision = inb(addr+3);
+ config.slot = i;
+ if (! (inb(addr+4) & 1))
+ {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("U24F: found disabled card in slot %u\n", i);
+#endif
+ continue;
+ }
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("U24F: found card in slot %u\n", i);
+#endif
+ config_1 = inb(addr + 5);
+ config.bios_segment = bios_segment_table[config_1 & 7];
+ switch(config_1 >> 4)
+ {
+ case 1:
+ config.interrupt = 15;
+ break;
+ case 2:
+ config.interrupt = 14;
+ break;
+ case 4:
+ config.interrupt = 11;
+ break;
+ case 8:
+ config.interrupt = 10;
+ break;
+ default:
+ printk("U24F: invalid IRQ\n");
+ return FALSE;
+ }
+ if (request_irq(config.interrupt, ultrastor_interrupt, 0, "Ultrastor", NULL))
+ {
+ printk("Unable to allocate IRQ%u for UltraStor controller.\n",
+ config.interrupt);
+ return FALSE;
+ }
+ /* BIOS addr set */
+ /* base port set */
+ config.port_address = addr;
+ config.doorbell_address = addr + 12;
+ config.ogm_address = addr + 0x17;
+ config.icm_address = addr + 0x1C;
+ config_2 = inb(addr + 7);
+ config.ha_scsi_id = config_2 & 7;
+ config.heads = mapping_table[(config_2 >> 3) & 3].heads;
+ config.sectors = mapping_table[(config_2 >> 3) & 3].sectors;
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US24F: detect: detect succeeded\n"
+ " Port address: %03X\n"
+ " BIOS segment: %05X\n"
+ " Interrupt: %u\n"
+ " H/A SCSI ID: %u\n",
+ config.port_address, config.bios_segment,
+ config.interrupt, config.ha_scsi_id);
+#endif
+ tpnt->this_id = config.ha_scsi_id;
+ tpnt->unchecked_isa_dma = 0;
+ tpnt->sg_tablesize = ULTRASTOR_24F_MAX_SG;
+
+ shpnt = scsi_register(tpnt, 0);
+ shpnt->irq = config.interrupt;
+ shpnt->dma_channel = config.dma_channel;
+ shpnt->io_port = config.port_address;
+
+#if ULTRASTOR_MAX_CMDS > 1
+ config.mscp_free = ~0;
+#endif
+ /* Mark ICM and OGM free */
+ outb(0, addr + 0x16);
+ outb(0, addr + 0x1B);
+
+ /* Set local doorbell mask to disallow bus reset unless
+ ultrastor_bus_reset is true. */
+ outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(addr+12));
+ outb(0x02, SYS_DOORBELL_MASK(addr+12));
+ printk("UltraStor driver version " VERSION ". Using %d SG lists.\n",
+ tpnt->sg_tablesize);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+int ultrastor_detect(Scsi_Host_Template * tpnt)
+{
+ tpnt->proc_dir = &proc_scsi_ultrastor;
+ return ultrastor_14f_detect(tpnt) || ultrastor_24f_detect(tpnt);
+}
+
+const char *ultrastor_info(struct Scsi_Host * shpnt)
+{
+ static char buf[64];
+
+ if (config.slot)
+ sprintf(buf, "UltraStor 24F SCSI @ Slot %u IRQ%u",
+ config.slot, config.interrupt);
+ else if (config.subversion)
+ sprintf(buf, "UltraStor 34F SCSI @ Port %03X BIOS %05X IRQ%u",
+ config.port_address, (int)config.bios_segment,
+ config.interrupt);
+ else
+ sprintf(buf, "UltraStor 14F SCSI @ Port %03X BIOS %05X IRQ%u DMA%u",
+ config.port_address, (int)config.bios_segment,
+ config.interrupt, config.dma_channel);
+ return buf;
+}
+
+static inline void build_sg_list(register struct mscp *mscp, Scsi_Cmnd *SCpnt)
+{
+ struct scatterlist *sl;
+ long transfer_length = 0;
+ int i, max;
+
+ sl = (struct scatterlist *) SCpnt->request_buffer;
+ max = SCpnt->use_sg;
+ for (i = 0; i < max; i++) {
+ mscp->sglist[i].address = (unsigned int)sl[i].address;
+ mscp->sglist[i].num_bytes = sl[i].length;
+ transfer_length += sl[i].length;
+ }
+ mscp->number_of_sg_list = max;
+ mscp->transfer_data = (unsigned int)mscp->sglist;
+ /* ??? May not be necessary. Docs are unclear as to whether transfer
+ length field is ignored or whether it should be set to the total
+ number of bytes of the transfer. */
+ mscp->transfer_data_length = transfer_length;
+}
+
+int ultrastor_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ register struct mscp *my_mscp;
+#if ULTRASTOR_MAX_CMDS > 1
+ int mscp_index;
+#endif
+ unsigned int status;
+ unsigned long flags;
+
+ /* Next test is for debugging; "can't happen" */
+ if ((config.mscp_free & ((1U << ULTRASTOR_MAX_CMDS) - 1)) == 0)
+ panic("ultrastor_queuecommand: no free MSCP\n");
+ mscp_index = find_and_clear_bit_16(&config.mscp_free);
+
+ /* Has the command been aborted? */
+ if (xchgb(0xff, &config.aborted[mscp_index]) != 0)
+ {
+ status = DID_ABORT << 16;
+ goto aborted;
+ }
+
+ my_mscp = &config.mscp[mscp_index];
+
+#if 1
+ /* This way is faster. */
+ *(unsigned char *)my_mscp = OP_SCSI | (DTD_SCSI << 3);
+#else
+ my_mscp->opcode = OP_SCSI;
+ my_mscp->xdir = DTD_SCSI;
+ my_mscp->dcn = FALSE;
+#endif
+ /* Tape drives don't work properly if the cache is used. The SCSI
+ READ command for a tape doesn't have a block offset, and the adapter
+ incorrectly assumes that all reads from the tape read the same
+ blocks. Results will depend on read buffer size and other disk
+ activity.
+
+ ??? Which other device types should never use the cache? */
+ my_mscp->ca = SCpnt->device->type != TYPE_TAPE;
+ my_mscp->target_id = SCpnt->target;
+ my_mscp->ch_no = 0;
+ my_mscp->lun = SCpnt->lun;
+ if (SCpnt->use_sg) {
+ /* Set scatter/gather flag in SCSI command packet */
+ my_mscp->sg = TRUE;
+ build_sg_list(my_mscp, SCpnt);
+ } else {
+ /* Unset scatter/gather flag in SCSI command packet */
+ my_mscp->sg = FALSE;
+ my_mscp->transfer_data = (unsigned int)SCpnt->request_buffer;
+ my_mscp->transfer_data_length = SCpnt->request_bufflen;
+ }
+ my_mscp->command_link = 0; /*???*/
+ my_mscp->scsi_command_link_id = 0; /*???*/
+ my_mscp->length_of_sense_byte = sizeof SCpnt->sense_buffer;
+ my_mscp->length_of_scsi_cdbs = SCpnt->cmd_len;
+ memcpy(my_mscp->scsi_cdbs, SCpnt->cmnd, my_mscp->length_of_scsi_cdbs);
+ my_mscp->adapter_status = 0;
+ my_mscp->target_status = 0;
+ my_mscp->sense_data = (unsigned int)&SCpnt->sense_buffer;
+ my_mscp->done = done;
+ my_mscp->SCint = SCpnt;
+ SCpnt->host_scribble = (unsigned char *)my_mscp;
+
+ /* Find free OGM slot. On 24F, look for OGM status byte == 0.
+ On 14F and 34F, wait for local interrupt pending flag to clear. */
+
+ retry:
+ if (config.slot)
+ while (inb(config.ogm_address - 1) != 0 &&
+ config.aborted[mscp_index] == 0xff) barrier();
+
+ /* else??? */
+
+ while ((inb(LCL_DOORBELL_INTR(config.doorbell_address)) &
+ (config.slot ? 2 : 1))
+ && config.aborted[mscp_index] == 0xff) barrier();
+
+ /* To avoid race conditions, make the code to write to the adapter
+ atomic. This simplifies the abort code. */
+
+ save_flags(flags);
+ cli();
+
+ if (inb(LCL_DOORBELL_INTR(config.doorbell_address)) &
+ (config.slot ? 2 : 1))
+ {
+ restore_flags(flags);
+ goto retry;
+ }
+
+ status = xchgb(0, &config.aborted[mscp_index]);
+ if (status != 0xff) {
+ restore_flags(flags);
+
+#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
+ printk("USx4F: queuecommand: aborted\n");
+#if ULTRASTOR_MAX_CMDS > 1
+ log_ultrastor_abort(&config, mscp_index);
+#endif
+#endif
+ status <<= 16;
+
+ aborted:
+ set_bit(mscp_index, &config.mscp_free);
+ /* If the driver queues commands, call the done proc here. Otherwise
+ return an error. */
+#if ULTRASTOR_MAX_CMDS > 1
+ SCpnt->result = status;
+ done(SCpnt);
+ return 0;
+#else
+ return status;
+#endif
+ }
+
+ /* Store pointer in OGM address bytes */
+ outl((unsigned int)my_mscp, config.ogm_address);
+
+ /* Issue OGM interrupt */
+ if (config.slot) {
+ /* Write OGM command register on 24F */
+ outb(1, config.ogm_address - 1);
+ outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
+ } else {
+ outb(0x1, LCL_DOORBELL_INTR(config.doorbell_address));
+ }
+
+ restore_flags(flags);
+
+#if (ULTRASTOR_DEBUG & UD_COMMAND)
+ printk("USx4F: queuecommand: returning\n");
+#endif
+
+ return 0;
+}
+
+/* This code must deal with 2 cases:
+
+ 1. The command has not been written to the OGM. In this case, set
+ the abort flag and return.
+
+ 2. The command has been written to the OGM and is stuck somewhere in
+ the adapter.
+
+ 2a. On a 24F, ask the adapter to abort the command. It will interrupt
+ when it does.
+
+ 2b. Call the command's done procedure.
+
+ */
+
+int ultrastor_abort(Scsi_Cmnd *SCpnt)
+{
+#if ULTRASTOR_DEBUG & UD_ABORT
+ char out[108];
+ unsigned char icm_status = 0, ogm_status = 0;
+ unsigned int icm_addr = 0, ogm_addr = 0;
+#endif
+ unsigned int mscp_index;
+ unsigned char old_aborted;
+ void (*done)(Scsi_Cmnd *);
+
+ if(config.slot)
+ return SCSI_ABORT_SNOOZE; /* Do not attempt an abort for the 24f */
+
+ /* Simple consistency checking */
+ if(!SCpnt->host_scribble)
+ return SCSI_ABORT_NOT_RUNNING;
+
+ mscp_index = ((struct mscp *)SCpnt->host_scribble) - config.mscp;
+ if (mscp_index >= ULTRASTOR_MAX_CMDS)
+ panic("Ux4F aborting invalid MSCP");
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ if (config.slot)
+ {
+ int port0 = (config.slot << 12) | 0xc80;
+ int i;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ strcpy(out, "OGM %d:%x ICM %d:%x ports: ");
+ for (i = 0; i < 16; i++)
+ {
+ unsigned char p = inb(port0 + i);
+ out[28 + i * 3] = "0123456789abcdef"[p >> 4];
+ out[29 + i * 3] = "0123456789abcdef"[p & 15];
+ out[30 + i * 3] = ' ';
+ }
+ out[28 + i * 3] = '\n';
+ out[29 + i * 3] = 0;
+ ogm_status = inb(port0 + 22);
+ ogm_addr = inl(port0 + 23);
+ icm_status = inb(port0 + 27);
+ icm_addr = inl(port0 + 28);
+ restore_flags(flags);
+ }
+
+ /* First check to see if an interrupt is pending. I suspect the SiS
+ chipset loses interrupts. (I also suspect is mangles data, but
+ one bug at a time... */
+ if (config.slot ? inb(config.icm_address - 1) == 2 :
+ (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
+ {
+ unsigned long flags;
+ save_flags(flags);
+ printk("Ux4F: abort while completed command pending\n");
+ restore_flags(flags);
+ cli();
+ ultrastor_interrupt(0, NULL, NULL);
+ restore_flags(flags);
+ return SCSI_ABORT_SUCCESS; /* FIXME - is this correct? -ERY */
+ }
+#endif
+
+ old_aborted = xchgb(DID_ABORT, &config.aborted[mscp_index]);
+
+ /* aborted == 0xff is the signal that queuecommand has not yet sent
+ the command. It will notice the new abort flag and fail. */
+ if (old_aborted == 0xff)
+ return SCSI_ABORT_SUCCESS;
+
+ /* On 24F, send an abort MSCP request. The adapter will interrupt
+ and the interrupt handler will call done. */
+ if (config.slot && inb(config.ogm_address - 1) == 0)
+ {
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outl((int)&config.mscp[mscp_index], config.ogm_address);
+ inb(0xc80); /* delay */
+ outb(0x80, config.ogm_address - 1);
+ outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
+#if ULTRASTOR_DEBUG & UD_ABORT
+ log_ultrastor_abort(&config, mscp_index);
+ printk(out, ogm_status, ogm_addr, icm_status, icm_addr);
+#endif
+ restore_flags(flags);
+ return SCSI_ABORT_PENDING;
+ }
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ log_ultrastor_abort(&config, mscp_index);
+#endif
+
+ /* Can't request a graceful abort. Either this is not a 24F or
+ the OGM is busy. Don't free the command -- the adapter might
+ still be using it. Setting SCint = 0 causes the interrupt
+ handler to ignore the command. */
+
+ /* FIXME - devices that implement soft resets will still be running
+ the command after a bus reset. We would probably rather leave
+ the command in the queue. The upper level code will automatically
+ leave the command in the active state instead of requeueing it. ERY */
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ if (config.mscp[mscp_index].SCint != SCpnt)
+ printk("abort: command mismatch, %p != %p\n",
+ config.mscp[mscp_index].SCint, SCpnt);
+#endif
+ if (config.mscp[mscp_index].SCint == 0)
+ return SCSI_ABORT_NOT_RUNNING;
+
+ if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
+ config.mscp[mscp_index].SCint = 0;
+ done = config.mscp[mscp_index].done;
+ config.mscp[mscp_index].done = 0;
+ SCpnt->result = DID_ABORT << 16;
+ /* I worry about reentrancy in scsi.c */
+ done(SCpnt);
+
+ /* Need to set a timeout here in case command never completes. */
+ return SCSI_ABORT_SUCCESS;
+}
+
+int ultrastor_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+{
+ unsigned long flags;
+ register int i;
+#if (ULTRASTOR_DEBUG & UD_RESET)
+ printk("US14F: reset: called\n");
+#endif
+
+ if(config.slot)
+ return SCSI_RESET_PUNT; /* Do not attempt a reset for the 24f */
+
+ save_flags(flags);
+ cli();
+
+ /* Reset the adapter and SCSI bus. The SCSI bus reset can be
+ inhibited by clearing ultrastor_bus_reset before probe. */
+ outb(0xc0, LCL_DOORBELL_INTR(config.doorbell_address));
+ if (config.slot)
+ {
+ outb(0, config.ogm_address - 1);
+ outb(0, config.icm_address - 1);
+ }
+
+#if ULTRASTOR_MAX_CMDS == 1
+ if (config.mscp_busy && config.mscp->done && config.mscp->SCint)
+ {
+ config.mscp->SCint->result = DID_RESET << 16;
+ config.mscp->done(config.mscp->SCint);
+ }
+ config.mscp->SCint = 0;
+#else
+ for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
+ {
+ if (! (config.mscp_free & (1 << i)) &&
+ config.mscp[i].done && config.mscp[i].SCint)
+ {
+ config.mscp[i].SCint->result = DID_RESET << 16;
+ config.mscp[i].done(config.mscp[i].SCint);
+ config.mscp[i].done = 0;
+ }
+ config.mscp[i].SCint = 0;
+ }
+#endif
+
+ /* FIXME - if the device implements soft resets, then the command
+ will still be running. ERY */
+
+ memset((unsigned char *)config.aborted, 0, sizeof config.aborted);
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = 0;
+#else
+ config.mscp_free = ~0;
+#endif
+
+ restore_flags(flags);
+ return SCSI_RESET_SUCCESS;
+
+}
+
+int ultrastor_biosparam(Disk * disk, kdev_t dev, int * dkinfo)
+{
+ int size = disk->capacity;
+ unsigned int s = config.heads * config.sectors;
+
+ dkinfo[0] = config.heads;
+ dkinfo[1] = config.sectors;
+ dkinfo[2] = size / s; /* Ignore partial cylinders */
+#if 0
+ if (dkinfo[2] > 1024)
+ dkinfo[2] = 1024;
+#endif
+ return 0;
+}
+
+static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned int status;
+#if ULTRASTOR_MAX_CMDS > 1
+ unsigned int mscp_index;
+#endif
+ register struct mscp *mscp;
+ void (*done)(Scsi_Cmnd *);
+ Scsi_Cmnd *SCtmp;
+
+#if ULTRASTOR_MAX_CMDS == 1
+ mscp = &config.mscp[0];
+#else
+ mscp = (struct mscp *)inl(config.icm_address);
+ mscp_index = mscp - config.mscp;
+ if (mscp_index >= ULTRASTOR_MAX_CMDS) {
+ printk("Ux4F interrupt: bad MSCP address %x\n", (unsigned int) mscp);
+ /* A command has been lost. Reset and report an error
+ for all commands. */
+ ultrastor_reset(NULL, 0);
+ return;
+ }
+#endif
+
+ /* Clean ICM slot (set ICMINT bit to 0) */
+ if (config.slot) {
+ unsigned char icm_status = inb(config.icm_address - 1);
+#if ULTRASTOR_DEBUG & (UD_INTERRUPT|UD_ERROR|UD_ABORT)
+ if (icm_status != 1 && icm_status != 2)
+ printk("US24F: ICM status %x for MSCP %d (%x)\n", icm_status,
+ mscp_index, (unsigned int) mscp);
+#endif
+ /* The manual says clear interrupt then write 0 to ICM status.
+ This seems backwards, but I'll do it anyway. --jfc */
+ outb(2, SYS_DOORBELL_INTR(config.doorbell_address));
+ outb(0, config.icm_address - 1);
+ if (icm_status == 4) {
+ printk("UltraStor abort command failed\n");
+ return;
+ }
+ if (icm_status == 3) {
+ void (*done)(Scsi_Cmnd *) = mscp->done;
+ if (done) {
+ mscp->done = 0;
+ mscp->SCint->result = DID_ABORT << 16;
+ done(mscp->SCint);
+ }
+ return;
+ }
+ } else {
+ outb(1, SYS_DOORBELL_INTR(config.doorbell_address));
+ }
+
+ SCtmp = mscp->SCint;
+ mscp->SCint = NULL;
+
+ if (SCtmp == 0)
+ {
+#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
+ printk("MSCP %d (%x): no command\n", mscp_index, (unsigned int) mscp);
+#endif
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = FALSE;
+#else
+ set_bit(mscp_index, &config.mscp_free);
+#endif
+ config.aborted[mscp_index] = 0;
+ return;
+ }
+
+ /* Save done locally and zero before calling. This is needed as
+ once we call done, we may get another command queued before this
+ interrupt service routine can return. */
+ done = mscp->done;
+ mscp->done = 0;
+
+ /* Let the higher levels know that we're done */
+ switch (mscp->adapter_status)
+ {
+ case 0:
+ status = DID_OK << 16;
+ break;
+ case 0x01: /* invalid command */
+ case 0x02: /* invalid parameters */
+ case 0x03: /* invalid data list */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ case 0x84: /* SCSI bus abort */
+ status = DID_ABORT << 16;
+ break;
+ case 0x91:
+ status = DID_TIME_OUT << 16;
+ break;
+ }
+
+ SCtmp->result = status | mscp->target_status;
+
+ SCtmp->host_scribble = 0;
+
+ /* Free up mscp block for next command */
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = FALSE;
+#else
+ set_bit(mscp_index, &config.mscp_free);
+#endif
+
+#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
+ if (config.aborted[mscp_index])
+ printk("Ux4 interrupt: MSCP %d (%x) aborted = %d\n",
+ mscp_index, (unsigned int) mscp, config.aborted[mscp_index]);
+#endif
+ config.aborted[mscp_index] = 0;
+
+ if (done)
+ done(SCtmp);
+ else
+ printk("US14F: interrupt: unexpected interrupt\n");
+
+ if (config.slot ? inb(config.icm_address - 1) :
+ (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
+#if (ULTRASTOR_DEBUG & UD_MULTI_CMD)
+ printk("Ux4F: multiple commands completed\n");
+#else
+ ;
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_INTERRUPT)
+ printk("USx4F: interrupt: returning\n");
+#endif
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = ULTRASTOR_14F;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/ultrastor.h b/linux/src/drivers/scsi/ultrastor.h
new file mode 100644
index 0000000..7a40acc
--- /dev/null
+++ b/linux/src/drivers/scsi/ultrastor.h
@@ -0,0 +1,102 @@
+/*
+ * ultrastor.c (C) 1991 David B. Gentzel
+ * Low-level scsi driver for UltraStor 14F
+ * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
+ * (gentzel@nova.enet.dec.com)
+ * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
+ * 24F support by John F. Carr (jfc@athena.mit.edu)
+ * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
+ * Eric Youngdale (eric@tantalus.nrl.navy.mil).
+ * Thanks to UltraStor for providing the necessary documentation
+ */
+
+#ifndef _ULTRASTOR_H
+#define _ULTRASTOR_H
+#include <linux/kdev_t.h>
+
+int ultrastor_detect(Scsi_Host_Template *);
+const char *ultrastor_info(struct Scsi_Host * shpnt);
+int ultrastor_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int ultrastor_abort(Scsi_Cmnd *);
+int ultrastor_reset(Scsi_Cmnd *, unsigned int);
+int ultrastor_biosparam(Disk *, kdev_t, int *);
+
+
+#define ULTRASTOR_14F_MAX_SG 16
+#define ULTRASTOR_24F_MAX_SG 33
+
+#define ULTRASTOR_MAX_CMDS_PER_LUN 5
+#define ULTRASTOR_MAX_CMDS 16
+
+#define ULTRASTOR_24F_PORT 0xC80
+
+
+#define ULTRASTOR_14F { NULL, NULL, /* Ptr for modules*/ \
+ NULL, \
+ NULL, \
+ "UltraStor 14F/24F/34F", \
+ ultrastor_detect, \
+ NULL, /* Release */ \
+ ultrastor_info, \
+ 0, \
+ ultrastor_queuecommand, \
+ ultrastor_abort, \
+ ultrastor_reset, \
+ 0, \
+ ultrastor_biosparam, \
+ ULTRASTOR_MAX_CMDS, \
+ 0, \
+ ULTRASTOR_14F_MAX_SG, \
+ ULTRASTOR_MAX_CMDS_PER_LUN, \
+ 0, \
+ 1, \
+ ENABLE_CLUSTERING }
+
+
+#ifdef ULTRASTOR_PRIVATE
+
+#define UD_ABORT 0x0001
+#define UD_COMMAND 0x0002
+#define UD_DETECT 0x0004
+#define UD_INTERRUPT 0x0008
+#define UD_RESET 0x0010
+#define UD_MULTI_CMD 0x0020
+#define UD_CSIR 0x0040
+#define UD_ERROR 0x0080
+
+/* #define PORT_OVERRIDE 0x330 */
+
+/* Values for the PRODUCT_ID ports for the 14F */
+#define US14F_PRODUCT_ID_0 0x56
+#define US14F_PRODUCT_ID_1 0x40 /* NOTE: Only upper nibble is used */
+
+#define US24F_PRODUCT_ID_0 0x56
+#define US24F_PRODUCT_ID_1 0x63
+#define US24F_PRODUCT_ID_2 0x02
+
+/* Subversion values */
+#define U14F 0
+#define U34F 1
+
+/* MSCP field values */
+
+/* Opcode */
+#define OP_HOST_ADAPTER 0x1
+#define OP_SCSI 0x2
+#define OP_RESET 0x4
+
+/* Date Transfer Direction */
+#define DTD_SCSI 0x0
+#define DTD_IN 0x1
+#define DTD_OUT 0x2
+#define DTD_NONE 0x3
+
+/* Host Adapter command subcodes */
+#define HA_CMD_INQUIRY 0x1
+#define HA_CMD_SELF_DIAG 0x2
+#define HA_CMD_READ_BUFF 0x3
+#define HA_CMD_WRITE_BUFF 0x4
+
+#endif
+
+#endif
diff --git a/linux/src/drivers/scsi/wd7000.c b/linux/src/drivers/scsi/wd7000.c
new file mode 100644
index 0000000..08d3ac3
--- /dev/null
+++ b/linux/src/drivers/scsi/wd7000.c
@@ -0,0 +1,1452 @@
+/* $Id: wd7000.c,v 1.1 1999/04/26 05:55:18 tb Exp $
+ * linux/drivers/scsi/wd7000.c
+ *
+ * Copyright (C) 1992 Thomas Wuensche
+ * closely related to the aha1542 driver from Tommy Thorn
+ * ( as close as different hardware allows on a lowlevel-driver :-) )
+ *
+ * Revised (and renamed) by John Boyd <boyd@cis.ohio-state.edu> to
+ * accommodate Eric Youngdale's modifications to scsi.c. Nov 1992.
+ *
+ * Additional changes to support scatter/gather. Dec. 1992. tw/jb
+ *
+ * No longer tries to reset SCSI bus at boot (it wasn't working anyway).
+ * Rewritten to support multiple host adapters.
+ * Miscellaneous cleanup.
+ * So far, still doesn't do reset or abort correctly, since I have no idea
+ * how to do them with this board (8^(. Jan 1994 jb
+ *
+ * This driver now supports both of the two standard configurations (per
+ * the 3.36 Owner's Manual, my latest reference) by the same method as
+ * before; namely, by looking for a BIOS signature. Thus, the location of
+ * the BIOS signature determines the board configuration. Until I have
+ * time to do something more flexible, users should stick to one of the
+ * following:
+ *
+ * Standard configuration for single-adapter systems:
+ * - BIOS at CE00h
+ * - I/O base address 350h
+ * - IRQ level 15
+ * - DMA channel 6
+ * Standard configuration for a second adapter in a system:
+ * - BIOS at C800h
+ * - I/O base address 330h
+ * - IRQ level 11
+ * - DMA channel 5
+ *
+ * Anyone who can recompile the kernel is welcome to add others as need
+ * arises, but unpredictable results may occur if there are conflicts.
+ * In any event, if there are multiple adapters in a system, they MUST
+ * use different I/O bases, IRQ levels, and DMA channels, since they will be
+ * indistinguishable (and in direct conflict) otherwise.
+ *
+ * As a point of information, the NO_OP command toggles the CMD_RDY bit
+ * of the status port, and this fact could be used as a test for the I/O
+ * base address (or more generally, board detection). There is an interrupt
+ * status port, so IRQ probing could also be done. I suppose the full
+ * DMA diagnostic could be used to detect the DMA channel being used. I
+ * haven't done any of this, though, because I think there's too much of
+ * a chance that such explorations could be destructive, if some other
+ * board's resources are used inadvertently. So, call me a wimp, but I
+ * don't want to try it. The only kind of exploration I trust is memory
+ * exploration, since it's more certain that reading memory won't be
+ * destructive.
+ *
+ * More to my liking would be a LILO boot command line specification, such
+ * as is used by the aha152x driver (and possibly others). I'll look into
+ * it, as I have time...
+ *
+ * I get mail occasionally from people who either are using or are
+ * considering using a WD7000 with Linux. There is a variety of
+ * nomenclature describing WD7000's. To the best of my knowledge, the
+ * following is a brief summary (from an old WD doc - I don't work for
+ * them or anything like that):
+ *
+ * WD7000-FASST2: This is a WD7000 board with the real-mode SST ROM BIOS
+ * installed. Last I heard, the BIOS was actually done by Columbia
+ * Data Products. The BIOS is only used by this driver (and thus
+ * by Linux) to identify the board; none of it can be executed under
+ * Linux.
+ *
+ * WD7000-ASC: This is the original adapter board, with or without BIOS.
+ * The board uses a WD33C93 or WD33C93A SBIC, which in turn is
+ * controlled by an onboard Z80 processor. The board interface
+ * visible to the host CPU is defined effectively by the Z80's
+ * firmware, and it is this firmware's revision level that is
+ * determined and reported by this driver. (The version of the
+ * on-board BIOS is of no interest whatsoever.) The host CPU has
+ * no access to the SBIC; hence the fact that it is a WD33C93 is
+ * also of no interest to this driver.
+ *
+ * WD7000-AX:
+ * WD7000-MX:
+ * WD7000-EX: These are newer versions of the WD7000-ASC. The -ASC is
+ * largely built from discrete components; these boards use more
+ * integration. The -AX is an ISA bus board (like the -ASC),
+ * the -MX is an MCA (i.e., PS/2) bus board), and the -EX is an
+ * EISA bus board.
+ *
+ * At the time of my documentation, the -?X boards were "future" products,
+ * and were not yet available. However, I vaguely recall that Thomas
+ * Wuensche had an -AX, so I believe at least it is supported by this
+ * driver. I have no personal knowledge of either -MX or -EX boards.
+ *
+ * P.S. Just recently, I've discovered (directly from WD and Future
+ * Domain) that all but the WD7000-EX have been out of production for
+ * two years now. FD has production rights to the 7000-EX, and are
+ * producing it under a new name, and with a new BIOS. If anyone has
+ * one of the FD boards, it would be nice to come up with a signature
+ * for it.
+ * J.B. Jan 1994.
+ *
+ *
+ * Revisions by Miroslav Zagorac <zaga@fly.cc.fer.hr>
+ *
+ * -- 08/24/1996. --------------------------------------------------------------
+ * Enhancement for wd7000_detect function has been made, so you don't have
+ * to enter BIOS ROM address in initialisation data (see struct Config).
+ * We cannot detect IRQ, DMA and I/O base address for now, so we have to
+ * enter them as arguments while wd_7000 is detected. If someone has IRQ,
+ * DMA or an I/O base address set to some other value, he can enter them in
+ * a configuration without any problem.
+ * Also I wrote a function wd7000_setup, so now you can enter WD-7000
+ * definition as kernel arguments, as in lilo.conf:
+ *
+ * append="wd7000=IRQ,DMA,IO"
+ *
+ * PS: If card BIOS ROM is disabled, function wd7000_detect now will recognize
+ * adapter, unlike the old one. Anyway, BIOS ROM from WD7000 adapter is
+ * useless for Linux. B^)
+ *
+ * -- 09/06/1996. --------------------------------------------------------------
+ * Auto detecting of an I/O base address from wd7000_detect function is
+ * removed, some little bugs too...
+ *
+ * Thanks to Roger Scott for driver debugging.
+ *
+ * -- 06/07/1997. --------------------------------------------------------------
+ * Added support for /proc file system (/proc/scsi/wd7000/[0...] files).
+ * Now, the driver can handle hard disks with capacity >1GB.
+ *
+ * -- 01/15/1998. --------------------------------------------------------------
+ * Added support for BUS_ON and BUS_OFF parameters in config line.
+ * Miscellaneous cleanups. Syntax of the append line is changed to:
+ *
+ * append="wd7000=IRQ,DMA,IO[,BUS_ON[,BUS_OFF]]"
+ *
+ * , where BUS_ON and BUS_OFF are time in nanoseconds.
+ *
+ * -- 03/01/1998. --------------------------------------------------------------
+ * The WD7000 driver now works on kernels' >= 2.1.x
+ *
+ * -- 06/11/1998. --------------------------------------------------------------
+ * Ugly init_scbs, alloc_scbs and free_scb functions are changed with
+ * scbs_init, scb_alloc and scb_free. Now, source code is identical on
+ * 2.0.xx and 2.1.xx kernels.
+ * WD7000 specific definitions are moved from this file to wd7000.h.
+ *
+ */
+#ifdef MODULE
+# include <linux/module.h>
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x020100)
+# include <asm/spinlock.h>
+#endif
+
+#include <stdarg.h>
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/malloc.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include <linux/version.h>
+#include <linux/stat.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <scsi/scsicam.h>
+
+#undef WD7000_DEBUG /* general debug */
+#define WD7000_DEFINES /* This must be defined! */
+
+#include "wd7000.h"
+
+
+struct proc_dir_entry proc_scsi_wd7000 =
+{
+ PROC_SCSI_7000FASST,
+ 6,
+ "wd7000",
+ S_IFDIR | S_IRUGO | S_IXUGO,
+ 2
+};
+
+/*
+ * (linear) base address for ROM BIOS
+ */
+static const long wd7000_biosaddr[] = {
+ 0xc0000, 0xc2000, 0xc4000, 0xc6000, 0xc8000, 0xca000, 0xcc000, 0xce000,
+ 0xd0000, 0xd2000, 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0xde000
+};
+#define NUM_ADDRS (sizeof (wd7000_biosaddr) / sizeof (long))
+
+static const ushort wd7000_iobase[] = {
+ 0x0300, 0x0308, 0x0310, 0x0318, 0x0320, 0x0328, 0x0330, 0x0338,
+ 0x0340, 0x0348, 0x0350, 0x0358, 0x0360, 0x0368, 0x0370, 0x0378,
+ 0x0380, 0x0388, 0x0390, 0x0398, 0x03a0, 0x03a8, 0x03b0, 0x03b8,
+ 0x03c0, 0x03c8, 0x03d0, 0x03d8, 0x03e0, 0x03e8, 0x03f0, 0x03f8
+};
+#define NUM_IOPORTS (sizeof (wd7000_iobase) / sizeof (ushort))
+
+static const short wd7000_irq[] = { 3, 4, 5, 7, 9, 10, 11, 12, 14, 15 };
+#define NUM_IRQS (sizeof (wd7000_irq) / sizeof (short))
+
+static const short wd7000_dma[] = { 5, 6, 7 };
+#define NUM_DMAS (sizeof (wd7000_dma) / sizeof (short))
+
+/*
+ * The following is set up by wd7000_detect, and used thereafter by
+ * wd7000_intr_handle to map the irq level to the corresponding Adapter.
+ * Note that if SA_INTERRUPT is not used, wd7000_intr_handle must be
+ * changed to pick up the IRQ level correctly.
+ */
+static struct Scsi_Host *wd7000_host[IRQS];
+
+/*
+ * Add here your configuration...
+ */
+static Config configs[] =
+{
+ { 15, 6, 0x350, BUS_ON, BUS_OFF }, /* defaults for single adapter */
+ { 11, 5, 0x320, BUS_ON, BUS_OFF }, /* defaults for second adapter */
+ { 7, 6, 0x350, BUS_ON, BUS_OFF }, /* My configuration (Zaga) */
+ { -1, -1, 0x0, BUS_ON, BUS_OFF } /* Empty slot */
+};
+#define NUM_CONFIGS (sizeof(configs)/sizeof(Config))
+
+static const Signature signatures[] =
+{
+ {"SSTBIOS", 0x0000d, 7} /* "SSTBIOS" @ offset 0x0000d */
+};
+#define NUM_SIGNATURES (sizeof(signatures)/sizeof(Signature))
+
+/*
+ * Driver SCB structure pool.
+ *
+ * The SCBs declared here are shared by all host adapters; hence, this
+ * structure is not part of the Adapter structure.
+ */
+static Scb scbs[MAX_SCBS];
+
+
+/*
+ * END of data/declarations - code follows.
+ */
+static void setup_error (char *mesg, int *ints)
+{
+ if (ints[0] == 3)
+ printk ("wd7000_setup: \"wd7000=%d,%d,0x%x\" -> %s\n",
+ ints[1], ints[2], ints[3], mesg);
+ else if (ints[0] == 4)
+ printk ("wd7000_setup: \"wd7000=%d,%d,0x%x,%d\" -> %s\n",
+ ints[1], ints[2], ints[3], ints[4], mesg);
+ else
+ printk ("wd7000_setup: \"wd7000=%d,%d,0x%x,%d,%d\" -> %s\n",
+ ints[1], ints[2], ints[3], ints[4], ints[5], mesg);
+}
+
+
+/*
+ * Note: You can now set these options from the kernel's "command line".
+ * The syntax is:
+ *
+ * wd7000=<IRQ>,<DMA>,<IO>[,<BUS_ON>[,<BUS_OFF>]]
+ *
+ * , where BUS_ON and BUS_OFF are in nanoseconds. BIOS default values
+ * are 8000ns for BUS_ON and 1875ns for BUS_OFF.
+ *
+ * eg:
+ * wd7000=7,6,0x350
+ *
+ * will configure the driver for a WD-7000 controller
+ * using IRQ 15 with a DMA channel 6, at IO base address 0x350.
+ */
+void wd7000_setup (char *str, int *ints)
+{
+ static short wd7000_card_num = 0;
+ short i, j;
+
+ if (wd7000_card_num >= NUM_CONFIGS) {
+ printk ("%s: Too many \"wd7000=\" configurations in "
+ "command line!\n", __FUNCTION__);
+ return;
+ }
+
+ if ((ints[0] < 3) || (ints[0] > 5))
+ printk ("%s: Error in command line! "
+ "Usage: wd7000=<IRQ>,<DMA>,<IO>[,<BUS_ON>[,<BUS_OFF>]]\n",
+ __FUNCTION__);
+ else {
+ for (i = 0; i < NUM_IRQS; i++)
+ if (ints[1] == wd7000_irq[i])
+ break;
+
+ if (i == NUM_IRQS) {
+ setup_error ("invalid IRQ.", ints);
+ return;
+ }
+ else
+ configs[wd7000_card_num].irq = ints[1];
+
+ for (i = 0; i < NUM_DMAS; i++)
+ if (ints[2] == wd7000_dma[i])
+ break;
+
+ if (i == NUM_DMAS) {
+ setup_error ("invalid DMA channel.", ints);
+ return;
+ }
+ else
+ configs[wd7000_card_num].dma = ints[2];
+
+ for (i = 0; i < NUM_IOPORTS; i++)
+ if (ints[3] == wd7000_iobase[i])
+ break;
+
+ if (i == NUM_IOPORTS) {
+ setup_error ("invalid I/O base address.", ints);
+ return;
+ }
+ else
+ configs[wd7000_card_num].iobase = ints[3];
+
+ if (ints[0] > 3) {
+ if ((ints[4] < 500) || (ints[4] > 31875)) {
+ setup_error ("BUS_ON value is out of range (500 to 31875 nanoseconds)!",
+ ints);
+ configs[wd7000_card_num].bus_on = BUS_ON;
+ }
+ else
+ configs[wd7000_card_num].bus_on = ints[4] / 125;
+ }
+ else
+ configs[wd7000_card_num].bus_on = BUS_ON;
+
+ if (ints[0] > 4) {
+ if ((ints[5] < 500) || (ints[5] > 31875)) {
+ setup_error ("BUS_OFF value is out of range (500 to 31875 nanoseconds)!",
+ ints);
+ configs[wd7000_card_num].bus_off = BUS_OFF;
+ }
+ else
+ configs[wd7000_card_num].bus_off = ints[5] / 125;
+ }
+ else
+ configs[wd7000_card_num].bus_off = BUS_OFF;
+
+ if (wd7000_card_num) {
+ for (i = 0; i < (wd7000_card_num - 1); i++)
+ for (j = i + 1; j < wd7000_card_num; j++)
+ if (configs[i].irq == configs[j].irq) {
+ setup_error ("duplicated IRQ!", ints);
+ return;
+ }
+ else if (configs[i].dma == configs[j].dma) {
+ setup_error ("duplicated DMA channel!", ints);
+ return;
+ }
+ else if (configs[i].iobase == configs[j].iobase) {
+ setup_error ("duplicated I/O base address!", ints);
+ return;
+ }
+ }
+
+#ifdef WD7000_DEBUG
+ printk ("%s: IRQ=%d, DMA=%d, I/O=0x%x, BUS_ON=%dns, BUS_OFF=%dns\n",
+ __FUNCTION__,
+ configs[wd7000_card_num].irq,
+ configs[wd7000_card_num].dma,
+ configs[wd7000_card_num].iobase,
+ configs[wd7000_card_num].bus_on * 125,
+ configs[wd7000_card_num].bus_off * 125);
+#endif
+
+ wd7000_card_num++;
+ }
+}
+
+
+/*
+ * Since they're used a lot, I've redone the following from the macros
+ * formerly in wd7000.h, hopefully to speed them up by getting rid of
+ * all the shifting (it may not matter; GCC might have done as well anyway).
+ *
+ * xany2scsi and xscsi2int were not being used, and are no longer defined.
+ * (They were simply 4-byte versions of these routines).
+ */
+static inline void any2scsi (unchar *scsi, int any)
+{
+ *scsi++ = ((i_u) any).u[2];
+ *scsi++ = ((i_u) any).u[1];
+ *scsi = ((i_u) any).u[0];
+}
+
+
+static inline int scsi2int (unchar *scsi)
+{
+ i_u result;
+
+ result.i = 0; /* clears unused bytes */
+ result.u[2] = *scsi++;
+ result.u[1] = *scsi++;
+ result.u[0] = *scsi;
+
+ return (result.i);
+}
+
+
+static inline void wd7000_enable_intr (Adapter *host)
+{
+ host->control |= INT_EN;
+ outb (host->control, host->iobase + ASC_CONTROL);
+}
+
+
+static inline void wd7000_enable_dma (Adapter *host)
+{
+ host->control |= DMA_EN;
+ outb (host->control, host->iobase + ASC_CONTROL);
+ set_dma_mode (host->dma, DMA_MODE_CASCADE);
+ enable_dma (host->dma);
+}
+
+
+static inline short WAIT (uint port, uint mask, uint allof, uint noneof)
+{
+ register uint WAITbits;
+ register ulong WAITtimeout = jiffies + WAITnexttimeout;
+
+ while (jiffies <= WAITtimeout) {
+ WAITbits = inb (port) & mask;
+
+ if (((WAITbits & allof) == allof) && ((WAITbits & noneof) == 0))
+ return (0);
+ }
+
+ return (1);
+}
+
+
+static inline void delay (uint how_long)
+{
+ register ulong time = jiffies + how_long;
+
+ while (jiffies < time);
+}
+
+
+static inline int wd7000_command_out (Adapter *host, unchar *cmd, int len)
+{
+ if (! WAIT (host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
+ for ( ; len--; cmd++)
+ do {
+ outb (*cmd, host->iobase + ASC_COMMAND);
+ WAIT (host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0);
+ } while (inb (host->iobase + ASC_STAT) & CMD_REJ);
+
+ return (1);
+ }
+
+ printk ("%s: WAIT failed (%d)\n", __FUNCTION__, len + 1);
+
+ return (0);
+}
+
+
+static inline void scbs_init (void)
+{
+ short i;
+
+ for (i = 0; i < MAX_SCBS; i++)
+ memset ((void *) &(scbs[i]), 0, sizeof (Scb));
+}
+
+
+static inline Scb *scb_alloc (void)
+{
+ Scb *scb = NULL;
+ ulong flags;
+ short i;
+#ifdef WD7000_DEBUG
+ short free_scbs = 0;
+#endif
+
+ save_flags (flags);
+ cli ();
+
+ for (i = 0; i < MAX_SCBS; i++)
+ if (! scbs[i].used) {
+ scbs[i].used = 1;
+ scb = &(scbs[i]);
+
+ break;
+ }
+
+#ifdef WD7000_DEBUG
+ for (i = 0; i < MAX_SCBS; i++)
+ free_scbs += scbs[i].used ? 0 : 1;
+
+ printk ("wd7000_%s: allocating scb (0x%08x), %d scbs free\n",
+ __FUNCTION__, (int) scb, free_scbs);
+#endif
+
+ restore_flags (flags);
+
+ return (scb);
+}
+
+
+static inline void scb_free (Scb *scb)
+{
+ short i;
+ ulong flags;
+
+ save_flags (flags);
+ cli ();
+
+ for (i = 0; i < MAX_SCBS; i++)
+ if (&(scbs[i]) == scb) {
+ memset ((void *) &(scbs[i]), 0, sizeof (Scb));
+
+ break;
+ }
+
+ if (i == MAX_SCBS)
+ printk ("wd7000_%s: trying to free alien scb (0x%08x)...\n",
+ __FUNCTION__, (int) scb);
+#ifdef WD7000_DEBUG
+ else
+ printk ("wd7000_%s: freeing scb (0x%08x)\n", __FUNCTION__, (int) scb);
+#endif
+
+ restore_flags (flags);
+}
+
+
+static int mail_out (Adapter *host, Scb *scbptr)
+/*
+ * Note: this can also be used for ICBs; just cast to the parm type.
+ */
+{
+ register int i, ogmb;
+ ulong flags;
+ unchar start_ogmb;
+ Mailbox *ogmbs = host->mb.ogmb;
+ int *next_ogmb = &(host->next_ogmb);
+
+#ifdef WD7000_DEBUG
+ printk ("wd7000_%s: 0x%08x", __FUNCTION__, (int) scbptr);
+#endif
+
+ /* We first look for a free outgoing mailbox */
+ save_flags (flags);
+ cli ();
+
+ ogmb = *next_ogmb;
+ for (i = 0; i < OGMB_CNT; i++) {
+ if (ogmbs[ogmb].status == 0) {
+#ifdef WD7000_DEBUG
+ printk (" using OGMB 0x%x", ogmb);
+#endif
+ ogmbs[ogmb].status = 1;
+ any2scsi ((unchar *) ogmbs[ogmb].scbptr, (int) scbptr);
+
+ *next_ogmb = (ogmb + 1) % OGMB_CNT;
+ break;
+ }
+ else
+ ogmb = (ogmb + 1) % OGMB_CNT;
+ }
+
+ restore_flags (flags);
+
+#ifdef WD7000_DEBUG
+ printk (", scb is 0x%08x", (int) scbptr);
+#endif
+
+ if (i >= OGMB_CNT) {
+ /*
+ * Alternatively, we might issue the "interrupt on free OGMB",
+ * and sleep, but it must be ensured that it isn't the init
+ * task running. Instead, this version assumes that the caller
+ * will be persistent, and try again. Since it's the adapter
+ * that marks OGMB's free, waiting even with interrupts off
+ * should work, since they are freed very quickly in most cases.
+ */
+#ifdef WD7000_DEBUG
+ printk (", no free OGMBs.\n");
+#endif
+ return (0);
+ }
+
+ wd7000_enable_intr (host);
+
+ start_ogmb = START_OGMB | ogmb;
+ wd7000_command_out (host, &start_ogmb, 1);
+
+#ifdef WD7000_DEBUG
+ printk (", awaiting interrupt.\n");
+#endif
+
+ return (1);
+}
+
+
+int make_code (uint hosterr, uint scsierr)
+{
+#ifdef WD7000_DEBUG
+ int in_error = hosterr;
+#endif
+
+ switch ((hosterr >> 8) & 0xff) {
+ case 0: /* Reserved */
+ hosterr = DID_ERROR;
+ break;
+
+ case 1: /* Command Complete, no errors */
+ hosterr = DID_OK;
+ break;
+
+ case 2: /* Command complete, error logged in scb status (scsierr) */
+ hosterr = DID_OK;
+ break;
+
+ case 4: /* Command failed to complete - timeout */
+ hosterr = DID_TIME_OUT;
+ break;
+
+ case 5: /* Command terminated; Bus reset by external device */
+ hosterr = DID_RESET;
+ break;
+
+ case 6: /* Unexpected Command Received w/ host as target */
+ hosterr = DID_BAD_TARGET;
+ break;
+
+ case 80: /* Unexpected Reselection */
+ case 81: /* Unexpected Selection */
+ hosterr = DID_BAD_INTR;
+ break;
+
+ case 82: /* Abort Command Message */
+ hosterr = DID_ABORT;
+ break;
+
+ case 83: /* SCSI Bus Software Reset */
+ case 84: /* SCSI Bus Hardware Reset */
+ hosterr = DID_RESET;
+ break;
+
+ default: /* Reserved */
+ hosterr = DID_ERROR;
+ }
+
+#ifdef WD7000_DEBUG
+ if (scsierr || hosterr)
+ printk ("\nSCSI command error: SCSI 0x%02x host 0x%04x return %d\n",
+ scsierr, in_error, hosterr);
+#endif
+
+ return (scsierr | (hosterr << 16));
+}
+
+
+static void wd7000_scsi_done (Scsi_Cmnd *SCpnt)
+{
+#ifdef WD7000_DEBUG
+ printk ("%s: 0x%08x\n", __FUNCTION__, (int) SCpnt);
+#endif
+
+ SCpnt->SCp.phase = 0;
+}
+
+
+static inline void wd7000_intr_ack (Adapter *host)
+{
+ outb (0, host->iobase + ASC_INTR_ACK);
+}
+
+
+void wd7000_intr_handle (int irq, void *dev_id, struct pt_regs *regs)
+{
+ register int flag, icmb, errstatus, icmb_status;
+ register int host_error, scsi_error;
+ register Scb *scb; /* for SCSI commands */
+ register IcbAny *icb; /* for host commands */
+ register Scsi_Cmnd *SCpnt;
+ Adapter *host = (Adapter *) wd7000_host[irq - IRQ_MIN]->hostdata; /* This MUST be set!!! */
+ Mailbox *icmbs = host->mb.icmb;
+
+ host->int_counter++;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: irq = %d, host = 0x%08x\n", __FUNCTION__, irq, (int) host);
+#endif
+
+ flag = inb (host->iobase + ASC_INTR_STAT);
+
+#ifdef WD7000_DEBUG
+ printk ("%s: intr stat = 0x%02x\n", __FUNCTION__, flag);
+#endif
+
+ if (! (inb (host->iobase + ASC_STAT) & INT_IM)) {
+ /* NB: these are _very_ possible if IRQ 15 is being used, since
+ * it's the "garbage collector" on the 2nd 8259 PIC. Specifically,
+ * any interrupt signal into the 8259 which can't be identified
+ * comes out as 7 from the 8259, which is 15 to the host. Thus, it
+ * is a good thing the WD7000 has an interrupt status port, so we
+ * can sort these out. Otherwise, electrical noise and other such
+ * problems would be indistinguishable from valid interrupts...
+ */
+#ifdef WD7000_DEBUG
+ printk ("%s: phantom interrupt...\n", __FUNCTION__);
+#endif
+ wd7000_intr_ack (host);
+ return;
+ }
+
+ if (flag & MB_INTR) {
+ /* The interrupt is for a mailbox */
+ if (! (flag & IMB_INTR)) {
+#ifdef WD7000_DEBUG
+ printk ("%s: free outgoing mailbox\n", __FUNCTION__);
+#endif
+ /*
+ * If sleep_on() and the "interrupt on free OGMB" command are
+ * used in mail_out(), wake_up() should correspondingly be called
+ * here. For now, we don't need to do anything special.
+ */
+ wd7000_intr_ack (host);
+ return;
+ }
+ else {
+ /* The interrupt is for an incoming mailbox */
+ icmb = flag & MB_MASK;
+ icmb_status = icmbs[icmb].status;
+
+ if (icmb_status & 0x80) { /* unsolicited - result in ICMB */
+#ifdef WD7000_DEBUG
+ printk ("%s: unsolicited interrupt 0x%02x\n",
+ __FUNCTION__, icmb_status);
+#endif
+ wd7000_intr_ack (host);
+ return;
+ }
+
+ /* Aaaargh! (Zaga) */
+ scb = (Scb *) bus_to_virt (scsi2int ((unchar *) icmbs[icmb].scbptr));
+
+ icmbs[icmb].status = 0;
+ if (!(scb->op & ICB_OP_MASK)) { /* an SCB is done */
+ SCpnt = scb->SCpnt;
+ if (--(SCpnt->SCp.phase) <= 0) { /* all scbs are done */
+ host_error = scb->vue | (icmb_status << 8);
+ scsi_error = scb->status;
+ errstatus = make_code (host_error, scsi_error);
+ SCpnt->result = errstatus;
+
+ scb_free (scb);
+
+ SCpnt->scsi_done (SCpnt);
+ }
+ }
+ else { /* an ICB is done */
+ icb = (IcbAny *) scb;
+ icb->status = icmb_status;
+ icb->phase = 0;
+ }
+ } /* incoming mailbox */
+ }
+
+ wd7000_intr_ack (host);
+
+#ifdef WD7000_DEBUG
+ printk ("%s: return from interrupt handler\n", __FUNCTION__);
+#endif
+}
+
+
+void do_wd7000_intr_handle (int irq, void *dev_id, struct pt_regs *regs)
+{
+#if (LINUX_VERSION_CODE >= 0x020100)
+ ulong flags;
+
+ spin_lock_irqsave (&io_request_lock, flags);
+#endif
+
+ wd7000_intr_handle (irq, dev_id, regs);
+
+#if (LINUX_VERSION_CODE >= 0x020100)
+ spin_unlock_irqrestore (&io_request_lock, flags);
+#endif
+}
+
+
+int wd7000_queuecommand (Scsi_Cmnd *SCpnt, void (*done) (Scsi_Cmnd *))
+{
+ register Scb *scb;
+ register Sgb *sgb;
+ register Adapter *host = (Adapter *) SCpnt->host->hostdata;
+
+ if ((scb = scb_alloc ()) == NULL) {
+ printk ("%s: Cannot allocate SCB!\n", __FUNCTION__);
+ return (0);
+ }
+
+ SCpnt->scsi_done = done;
+ SCpnt->SCp.phase = 1;
+ SCpnt->host_scribble = (unchar *) scb;
+ scb->idlun = ((SCpnt->target << 5) & 0xe0) | (SCpnt->lun & 7);
+ scb->direc = 0x40; /* Disable direction check */
+ scb->SCpnt = SCpnt; /* so we can find stuff later */
+ scb->host = host;
+ memcpy (scb->cdb, SCpnt->cmnd, SCpnt->cmd_len);
+
+ if (SCpnt->use_sg) {
+ struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer;
+ uint i;
+
+ if (SCpnt->host->sg_tablesize == SG_NONE)
+ panic ("%s: scatter/gather not supported.\n", __FUNCTION__);
+#ifdef WD7000_DEBUG
+ else
+ printk ("Using scatter/gather with %d elements.\n", SCpnt->use_sg);
+#endif
+
+ sgb = scb->sgb;
+ scb->op = 1;
+ any2scsi (scb->dataptr, (int) sgb);
+ any2scsi (scb->maxlen, SCpnt->use_sg * sizeof (Sgb));
+
+ for (i = 0; i < SCpnt->use_sg; i++) {
+ any2scsi (sgb[i].ptr, (int) sg[i].address);
+ any2scsi (sgb[i].len, sg[i].length);
+ }
+ }
+ else {
+ scb->op = 0;
+ any2scsi (scb->dataptr, (int) SCpnt->request_buffer);
+ any2scsi (scb->maxlen, SCpnt->request_bufflen);
+ }
+
+ while (! mail_out (host, scb)); /* keep trying */
+
+ return (1);
+}
+
+
+int wd7000_command (Scsi_Cmnd *SCpnt)
+{
+ if (! wd7000_queuecommand (SCpnt, wd7000_scsi_done))
+ return (-1);
+
+ while (SCpnt->SCp.phase > 0)
+ barrier (); /* phase counts scbs down to 0 */
+
+ return (SCpnt->result);
+}
+
+
+int wd7000_diagnostics (Adapter *host, int code)
+{
+ static IcbDiag icb = { ICB_OP_DIAGNOSTICS };
+ static unchar buf[256];
+ ulong timeout;
+
+ /*
+ * This routine is only called at init, so there should be OGMBs
+ * available. I'm assuming so here. If this is going to
+ * fail, I can just let the timeout catch the failure.
+ */
+ icb.type = code;
+ any2scsi (icb.len, sizeof (buf));
+ any2scsi (icb.ptr, (int) &buf);
+ icb.phase = 1;
+
+ mail_out (host, (Scb *) &icb);
+
+ /*
+ * Wait up to 2 seconds for completion.
+ */
+ for (timeout = jiffies + WAITnexttimeout; icb.phase && (jiffies < timeout); )
+ barrier ();
+
+ if (icb.phase) {
+ printk ("%s: timed out.\n", __FUNCTION__);
+ return (0);
+ }
+
+ if (make_code (icb.vue | (icb.status << 8), 0)) {
+ printk ("%s: failed (0x%02x,0x%02x)\n", __FUNCTION__, icb.vue, icb.status);
+ return (0);
+ }
+
+ return (1);
+}
+
+
+int wd7000_init (Adapter *host)
+{
+ InitCmd init_cmd =
+ {
+ INITIALIZATION,
+ 7,
+ host->bus_on,
+ host->bus_off,
+ 0,
+ { 0, 0, 0 },
+ OGMB_CNT,
+ ICMB_CNT
+ };
+ int diag;
+
+ /*
+ * Reset the adapter - only. The SCSI bus was initialized at power-up,
+ * and we need to do this just so we control the mailboxes, etc.
+ */
+ outb (ASC_RES, host->iobase + ASC_CONTROL);
+ delay (1); /* reset pulse: this is 10ms, only need 25us */
+ outb (0, host->iobase + ASC_CONTROL);
+ host->control = 0; /* this must always shadow ASC_CONTROL */
+
+ if (WAIT (host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
+ printk ("%s: WAIT timed out.\n", __FUNCTION__);
+ return (0); /* 0 = not ok */
+ }
+
+ if ((diag = inb (host->iobase + ASC_INTR_STAT)) != 1) {
+ printk ("%s: ", __FUNCTION__);
+
+ switch (diag) {
+ case 2: printk ("RAM failure.\n");
+ break;
+
+ case 3: printk ("FIFO R/W failed\n");
+ break;
+
+ case 4: printk ("SBIC register R/W failed\n");
+ break;
+
+ case 5: printk ("Initialization D-FF failed.\n");
+ break;
+
+ case 6: printk ("Host IRQ D-FF failed.\n");
+ break;
+
+ case 7: printk ("ROM checksum error.\n");
+ break;
+
+ default: printk ("diagnostic code 0x%02Xh received.\n", diag);
+ }
+
+ return (0);
+ }
+
+ /* Clear mailboxes */
+ memset (&(host->mb), 0, sizeof (host->mb));
+
+ /* Execute init command */
+ any2scsi ((unchar *) &(init_cmd.mailboxes), (int) &(host->mb));
+
+ if (! wd7000_command_out (host, (unchar *) &init_cmd, sizeof (init_cmd))) {
+ printk ("%s: adapter initialization failed.\n", __FUNCTION__);
+ return (0);
+ }
+
+ if (WAIT (host->iobase + ASC_STAT, ASC_STATMASK, ASC_INIT, 0)) {
+ printk ("%s: WAIT timed out.\n", __FUNCTION__);
+ return (0);
+ }
+
+ if (request_irq (host->irq, do_wd7000_intr_handle, SA_INTERRUPT, "wd7000", NULL)) {
+ printk ("%s: can't get IRQ %d.\n", __FUNCTION__, host->irq);
+ return (0);
+ }
+
+ if (request_dma (host->dma, "wd7000")) {
+ printk ("%s: can't get DMA channel %d.\n", __FUNCTION__, host->dma);
+ free_irq (host->irq, NULL);
+ return (0);
+ }
+
+ wd7000_enable_dma (host);
+ wd7000_enable_intr (host);
+
+ if (! wd7000_diagnostics (host, ICB_DIAG_FULL)) {
+ free_dma (host->dma);
+ free_irq (host->irq, NULL);
+ return (0);
+ }
+
+ return (1);
+}
+
+
+void wd7000_revision (Adapter *host)
+{
+ static IcbRevLvl icb = { ICB_OP_GET_REVISION };
+
+ /*
+ * Like diagnostics, this is only done at init time, in fact, from
+ * wd7000_detect, so there should be OGMBs available. If it fails,
+ * the only damage will be that the revision will show up as 0.0,
+ * which in turn means that scatter/gather will be disabled.
+ */
+ icb.phase = 1;
+ mail_out (host, (Scb *) &icb);
+
+ while (icb.phase)
+ barrier (); /* wait for completion */
+
+ host->rev1 = icb.primary;
+ host->rev2 = icb.secondary;
+}
+
+
+#undef SPRINTF
+#define SPRINTF(args...) { if (pos < (buffer + length)) pos += sprintf (pos, ## args); }
+
+int wd7000_set_info (char *buffer, int length, struct Scsi_Host *host)
+{
+ ulong flags;
+
+ save_flags (flags);
+ cli ();
+
+#ifdef WD7000_DEBUG
+ printk ("Buffer = <%.*s>, length = %d\n", length, buffer, length);
+#endif
+
+ /*
+ * Currently this is a no-op
+ */
+ printk ("Sorry, this function is currently out of order...\n");
+
+ restore_flags (flags);
+
+ return (length);
+}
+
+
+int wd7000_proc_info (char *buffer, char **start, off_t offset, int length, int hostno, int inout)
+{
+ struct Scsi_Host *host = NULL;
+ Scsi_Device *scd;
+ Adapter *adapter;
+ ulong flags;
+ char *pos = buffer;
+ short i;
+
+#ifdef WD7000_DEBUG
+ Mailbox *ogmbs, *icmbs;
+ short count;
+#endif
+
+ /*
+ * Find the specified host board.
+ */
+ for (i = 0; i < IRQS; i++)
+ if (wd7000_host[i] && (wd7000_host[i]->host_no == hostno)) {
+ host = wd7000_host[i];
+
+ break;
+ }
+
+ /*
+ * Host not found!
+ */
+ if (! host)
+ return (-ESRCH);
+
+ /*
+ * Has data been written to the file ?
+ */
+ if (inout)
+ return (wd7000_set_info (buffer, length, host));
+
+ adapter = (Adapter *) host->hostdata;
+
+ save_flags (flags);
+ cli ();
+
+ SPRINTF ("Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", hostno, adapter->rev1, adapter->rev2);
+ SPRINTF (" IO base: 0x%x\n", adapter->iobase);
+ SPRINTF (" IRQ: %d\n", adapter->irq);
+ SPRINTF (" DMA channel: %d\n", adapter->dma);
+ SPRINTF (" Interrupts: %d\n", adapter->int_counter);
+ SPRINTF (" BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125);
+ SPRINTF (" BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125);
+
+#ifdef WD7000_DEBUG
+ ogmbs = adapter->mb.ogmb;
+ icmbs = adapter->mb.icmb;
+
+ SPRINTF ("\nControl port value: 0x%x\n", adapter->control);
+ SPRINTF ("Incoming mailbox:\n");
+ SPRINTF (" size: %d\n", ICMB_CNT);
+ SPRINTF (" queued messages: ");
+
+ for (i = count = 0; i < ICMB_CNT; i++)
+ if (icmbs[i].status) {
+ count++;
+ SPRINTF ("0x%x ", i);
+ }
+
+ SPRINTF (count ? "\n" : "none\n");
+
+ SPRINTF ("Outgoing mailbox:\n");
+ SPRINTF (" size: %d\n", OGMB_CNT);
+ SPRINTF (" next message: 0x%x\n", adapter->next_ogmb);
+ SPRINTF (" queued messages: ");
+
+ for (i = count = 0; i < OGMB_CNT; i++)
+ if (ogmbs[i].status) {
+ count++;
+ SPRINTF ("0x%x ", i);
+ }
+
+ SPRINTF (count ? "\n" : "none\n");
+#endif
+
+ /*
+ * Display driver information for each device attached to the board.
+ */
+#if (LINUX_VERSION_CODE >= 0x020100)
+ scd = host->host_queue;
+#else
+ scd = scsi_devices;
+#endif
+
+ SPRINTF ("\nAttached devices: %s\n", scd ? "" : "none");
+
+ for ( ; scd; scd = scd->next)
+ if (scd->host->host_no == hostno) {
+ SPRINTF (" [Channel: %02d, Id: %02d, Lun: %02d] ",
+ scd->channel, scd->id, scd->lun);
+ SPRINTF ("%s ", (scd->type < MAX_SCSI_DEVICE_CODE) ?
+ scsi_device_types[(short) scd->type] : "Unknown device");
+
+ for (i = 0; (i < 8) && (scd->vendor[i] >= 0x20); i++)
+ SPRINTF ("%c", scd->vendor[i]);
+ SPRINTF (" ");
+
+ for (i = 0; (i < 16) && (scd->model[i] >= 0x20); i++)
+ SPRINTF ("%c", scd->model[i]);
+ SPRINTF ("\n");
+ }
+
+ SPRINTF ("\n");
+
+ restore_flags (flags);
+
+ /*
+ * Calculate start of next buffer, and return value.
+ */
+ *start = buffer + offset;
+
+ if ((pos - buffer) < offset)
+ return (0);
+ else if ((pos - buffer - offset) < length)
+ return (pos - buffer - offset);
+ else
+ return (length);
+}
+
+
+/*
+ * Returns the number of adapters this driver is supporting.
+ *
+ * The source for hosts.c says to wait to call scsi_register until 100%
+ * sure about an adapter. We need to do it a little sooner here; we
+ * need the storage set up by scsi_register before wd7000_init, and
+ * changing the location of an Adapter structure is more trouble than
+ * calling scsi_unregister.
+ *
+ */
+int wd7000_detect (Scsi_Host_Template *tpnt)
+{
+ short present = 0, biosaddr_ptr, sig_ptr, i, pass;
+ short biosptr[NUM_CONFIGS];
+ uint iobase;
+ Adapter *host = NULL;
+ struct Scsi_Host *sh;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: started\n", __FUNCTION__);
+#endif
+
+ /*
+ * Set up SCB free list, which is shared by all adapters
+ */
+ scbs_init ();
+
+ for (i = 0; i < IRQS; wd7000_host[i++] = NULL);
+ for (i = 0; i < NUM_CONFIGS; biosptr[i++] = -1);
+
+ tpnt->proc_dir = &proc_scsi_wd7000;
+ tpnt->proc_info = &wd7000_proc_info;
+
+ for (pass = 0; pass < NUM_CONFIGS; pass++) {
+ short bios_match = 1;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: pass %d\n", __FUNCTION__, pass + 1);
+#endif
+
+ /*
+ * First, search for BIOS SIGNATURE...
+ */
+ for (biosaddr_ptr = 0; bios_match && (biosaddr_ptr < NUM_ADDRS); biosaddr_ptr++)
+ for (sig_ptr = 0; bios_match && (sig_ptr < NUM_SIGNATURES); sig_ptr++) {
+ for (i = 0; i < pass; i++)
+ if (biosptr[i] == biosaddr_ptr)
+ break;
+
+ if (i == pass) {
+#if (LINUX_VERSION_CODE >= 0x020100)
+ char *biosaddr = (char *) ioremap (wd7000_biosaddr[biosaddr_ptr] +
+ signatures[sig_ptr].ofs,
+ signatures[sig_ptr].len);
+#else
+ char *biosaddr = (char *) (wd7000_biosaddr[biosaddr_ptr] +
+ signatures[sig_ptr].ofs);
+#endif
+ bios_match = memcmp (biosaddr, signatures[sig_ptr].sig,
+ signatures[sig_ptr].len);
+
+#if (LINUX_VERSION_CODE >= 0x020100)
+ iounmap (biosaddr);
+#else
+#endif
+ if (! bios_match) {
+ /*
+ * BIOS SIGNATURE has been found.
+ */
+ biosptr[pass] = biosaddr_ptr;
+#ifdef WD7000_DEBUG
+ printk ("WD-7000 SST BIOS detected at 0x%lx: checking...\n",
+ wd7000_biosaddr[biosaddr_ptr]);
+#endif
+ }
+ }
+ }
+
+#ifdef WD7000_DEBUG
+ if (bios_match)
+ printk ("WD-7000 SST BIOS not detected...\n");
+#endif
+
+ if (configs[pass].irq < 0)
+ continue;
+
+ iobase = configs[pass].iobase;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: check IO 0x%x region...\n", __FUNCTION__, iobase);
+#endif
+
+ if (! check_region (iobase, 4)) {
+#ifdef WD7000_DEBUG
+ printk ("%s: ASC reset (IO 0x%x) ...", __FUNCTION__, iobase);
+#endif
+ /*
+ * ASC reset...
+ */
+ outb (ASC_RES, iobase + ASC_CONTROL);
+ delay (1);
+ outb (0, iobase + ASC_CONTROL);
+
+ if (WAIT (iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0))
+#ifdef WD7000_DEBUG
+ {
+ printk ("failed!\n");
+ continue;
+ }
+ else
+ printk ("ok!\n");
+#else
+ continue;
+#endif
+
+ if (inb (iobase + ASC_INTR_STAT) == 1) {
+ /*
+ * We register here, to get a pointer to the extra space,
+ * which we'll use as the Adapter structure (host) for
+ * this adapter. It is located just after the registered
+ * Scsi_Host structure (sh), and is located by the empty
+ * array hostdata.
+ */
+ sh = scsi_register (tpnt, sizeof (Adapter));
+ host = (Adapter *) sh->hostdata;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: adapter allocated at 0x%x\n", __FUNCTION__, (int) host);
+#endif
+
+ memset (host, 0, sizeof (Adapter));
+
+ host->irq = configs[pass].irq;
+ host->dma = configs[pass].dma;
+ host->iobase = iobase;
+ host->int_counter = 0;
+ host->bus_on = configs[pass].bus_on;
+ host->bus_off = configs[pass].bus_off;
+ host->sh = wd7000_host[host->irq - IRQ_MIN] = sh;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: Trying to init WD-7000 card at IO 0x%x, IRQ %d, DMA %d...\n",
+ __FUNCTION__, host->iobase, host->irq, host->dma);
+#endif
+
+ if (! wd7000_init (host)) { /* Initialization failed */
+ scsi_unregister (sh);
+ continue;
+ }
+
+ /*
+ * OK from here - we'll use this adapter/configuration.
+ */
+ wd7000_revision (host); /* important for scatter/gather */
+
+ /*
+ * Register our ports.
+ */
+ request_region (host->iobase, 4, "wd7000");
+
+ /*
+ * For boards before rev 6.0, scatter/gather isn't supported.
+ */
+ if (host->rev1 < 6)
+ sh->sg_tablesize = SG_NONE;
+
+ present++; /* count it */
+
+ printk ("Western Digital WD-7000 (rev %d.%d) ",
+ host->rev1, host->rev2);
+ printk ("using IO 0x%x, IRQ %d, DMA %d.\n",
+ host->iobase, host->irq, host->dma);
+ printk (" BUS_ON time: %dns, BUS_OFF time: %dns\n",
+ host->bus_on * 125, host->bus_off * 125);
+ }
+ }
+
+#ifdef WD7000_DEBUG
+ else
+ printk ("%s: IO 0x%x region is already allocated!\n", __FUNCTION__, iobase);
+#endif
+
+ }
+
+ if (! present)
+ printk ("Failed initialization of WD-7000 SCSI card!\n");
+
+ return (present);
+}
+
+
+/*
+ * I have absolutely NO idea how to do an abort with the WD7000...
+ */
+int wd7000_abort (Scsi_Cmnd *SCpnt)
+{
+ Adapter *host = (Adapter *) SCpnt->host->hostdata;
+
+ if (inb (host->iobase + ASC_STAT) & INT_IM) {
+ printk ("%s: lost interrupt\n", __FUNCTION__);
+ wd7000_intr_handle (host->irq, NULL, NULL);
+
+ return (SCSI_ABORT_SUCCESS);
+ }
+
+ return (SCSI_ABORT_SNOOZE);
+}
+
+
+/*
+ * I also have no idea how to do a reset...
+ */
+int wd7000_reset (Scsi_Cmnd *SCpnt, uint flags)
+{
+ return (SCSI_RESET_PUNT);
+}
+
+
+/*
+ * This was borrowed directly from aha1542.c. (Zaga)
+ */
+int wd7000_biosparam (Disk *disk, kdev_t dev, int *ip)
+{
+#ifdef WD7000_DEBUG
+ printk ("%s: dev=%s, size=%d, ", __FUNCTION__, kdevname (dev), disk->capacity);
+#endif
+
+ /*
+ * try default translation
+ */
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = disk->capacity / (64 * 32);
+
+ /*
+ * for disks >1GB do some guessing
+ */
+ if (ip[2] >= 1024) {
+ int info[3];
+
+ /*
+ * try to figure out the geometry from the partition table
+ */
+ if ((scsicam_bios_param (disk, dev, info) < 0) ||
+ !(((info[0] == 64) && (info[1] == 32)) ||
+ ((info[0] == 255) && (info[1] == 63)))) {
+ printk ("%s: unable to verify geometry for disk with >1GB.\n"
+ " using extended translation.\n",
+ __FUNCTION__);
+
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = disk->capacity / (255 * 63);
+ }
+ else {
+ ip[0] = info[0];
+ ip[1] = info[1];
+ ip[2] = info[2];
+
+ if (info[0] == 255)
+ printk ("%s: current partition table is using extended translation.\n",
+ __FUNCTION__);
+ }
+ }
+
+#ifdef WD7000_DEBUG
+ printk ("bios geometry: head=%d, sec=%d, cyl=%d\n", ip[0], ip[1], ip[2]);
+ printk ("WARNING: check, if the bios geometry is correct.\n");
+#endif
+
+ return (0);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = WD7000;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/wd7000.h b/linux/src/drivers/scsi/wd7000.h
new file mode 100644
index 0000000..e17a69b
--- /dev/null
+++ b/linux/src/drivers/scsi/wd7000.h
@@ -0,0 +1,446 @@
+/* $Id: wd7000.h,v 1.1 1999/04/26 05:55:19 tb Exp $
+ *
+ * Header file for the WD-7000 driver for Linux
+ *
+ * John Boyd <boyd@cis.ohio-state.edu> Jan 1994:
+ * This file has been reduced to only the definitions needed for the
+ * WD7000 host structure.
+ *
+ * Revision by Miroslav Zagorac <zaga@fly.cc.fer.hr> Jun 1997.
+ */
+#ifndef _WD7000_H
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+#ifndef NULL
+#define NULL 0L
+#endif
+
+/*
+ * In this version, sg_tablesize now defaults to WD7000_SG, and will
+ * be set to SG_NONE for older boards. This is the reverse of the
+ * previous default, and was changed so that the driver-level
+ * Scsi_Host_Template would reflect the driver's support for scatter/
+ * gather.
+ *
+ * Also, it has been reported that boards at Revision 6 support scatter/
+ * gather, so the new definition of an "older" board has been changed
+ * accordingly.
+ */
+#define WD7000_Q 16
+#define WD7000_SG 16
+
+#ifdef WD7000_DEFINES
+/*
+ * Mailbox structure sizes.
+ * I prefer to keep the number of ICMBs much larger than the number of
+ * OGMBs. OGMBs are used very quickly by the driver to start one or
+ * more commands, while ICMBs are used by the host adapter per command.
+ */
+#define OGMB_CNT 16
+#define ICMB_CNT 32
+
+/*
+ * Scb's are shared by all active adapters. If you'd rather conserve
+ * memory, use a smaller number (> 0, of course) - things will should
+ * still work OK.
+ */
+#define MAX_SCBS (4 * WD7000_Q)
+
+/*
+ * WD7000-specific mailbox structure
+ */
+typedef volatile struct {
+ unchar status;
+ unchar scbptr[3]; /* SCSI-style - MSB first (big endian) */
+} Mailbox;
+
+/*
+ * This structure should contain all per-adapter global data. I.e., any
+ * new global per-adapter data should put in here.
+ */
+typedef struct {
+ struct Scsi_Host *sh; /* Pointer to Scsi_Host structure */
+ int iobase; /* This adapter's I/O base address */
+ int irq; /* This adapter's IRQ level */
+ int dma; /* This adapter's DMA channel */
+ int int_counter; /* This adapter's interrupt counter */
+ int bus_on; /* This adapter's BUS_ON time */
+ int bus_off; /* This adapter's BUS_OFF time */
+ struct { /* This adapter's mailboxes */
+ Mailbox ogmb[OGMB_CNT]; /* Outgoing mailboxes */
+ Mailbox icmb[ICMB_CNT]; /* Incoming mailboxes */
+ } mb;
+ int next_ogmb; /* to reduce contention at mailboxes */
+ unchar control; /* shadows CONTROL port value */
+ unchar rev1; /* filled in by wd7000_revision */
+ unchar rev2;
+} Adapter;
+
+
+/*
+ * possible irq range
+ */
+#define IRQ_MIN 3
+#define IRQ_MAX 15
+#define IRQS (IRQ_MAX - IRQ_MIN + 1)
+
+#define BUS_ON 64 /* x 125ns = 8000ns (BIOS default) */
+#define BUS_OFF 15 /* x 125ns = 1875ns (BIOS default) */
+
+/*
+ * Standard Adapter Configurations - used by wd7000_detect
+ */
+typedef struct {
+ short irq; /* IRQ level */
+ short dma; /* DMA channel */
+ uint iobase; /* I/O base address */
+ short bus_on; /* Time that WD7000 spends on the AT-bus when */
+ /* transferring data. BIOS default is 8000ns. */
+ short bus_off; /* Time that WD7000 spends OFF THE BUS after */
+ /* while it is transferring data. */
+ /* BIOS default is 1875ns */
+} Config;
+
+
+/*
+ * The following list defines strings to look for in the BIOS that identify
+ * it as the WD7000-FASST2 SST BIOS. I suspect that something should be
+ * added for the Future Domain version.
+ */
+typedef struct {
+ const char *sig; /* String to look for */
+ ulong ofs; /* offset from BIOS base address */
+ uint len; /* length of string */
+} Signature;
+
+/*
+ * I/O Port Offsets and Bit Definitions
+ * 4 addresses are used. Those not defined here are reserved.
+ */
+#define ASC_STAT 0 /* Status, Read */
+#define ASC_COMMAND 0 /* Command, Write */
+#define ASC_INTR_STAT 1 /* Interrupt Status, Read */
+#define ASC_INTR_ACK 1 /* Acknowledge, Write */
+#define ASC_CONTROL 2 /* Control, Write */
+
+/*
+ * ASC Status Port
+ */
+#define INT_IM 0x80 /* Interrupt Image Flag */
+#define CMD_RDY 0x40 /* Command Port Ready */
+#define CMD_REJ 0x20 /* Command Port Byte Rejected */
+#define ASC_INIT 0x10 /* ASC Initialized Flag */
+#define ASC_STATMASK 0xf0 /* The lower 4 Bytes are reserved */
+
+/*
+ * COMMAND opcodes
+ *
+ * Unfortunately, I have no idea how to properly use some of these commands,
+ * as the OEM manual does not make it clear. I have not been able to use
+ * enable/disable unsolicited interrupts or the reset commands with any
+ * discernible effect whatsoever. I think they may be related to certain
+ * ICB commands, but again, the OEM manual doesn't make that clear.
+ */
+#define NO_OP 0 /* NO-OP toggles CMD_RDY bit in ASC_STAT */
+#define INITIALIZATION 1 /* initialization (10 bytes) */
+#define DISABLE_UNS_INTR 2 /* disable unsolicited interrupts */
+#define ENABLE_UNS_INTR 3 /* enable unsolicited interrupts */
+#define INTR_ON_FREE_OGMB 4 /* interrupt on free OGMB */
+#define SOFT_RESET 5 /* SCSI bus soft reset */
+#define HARD_RESET_ACK 6 /* SCSI bus hard reset acknowledge */
+#define START_OGMB 0x80 /* start command in OGMB (n) */
+#define SCAN_OGMBS 0xc0 /* start multiple commands, signature (n) */
+ /* where (n) = lower 6 bits */
+/*
+ * For INITIALIZATION:
+ */
+typedef struct {
+ unchar op; /* command opcode (= 1) */
+ unchar ID; /* Adapter's SCSI ID */
+ unchar bus_on; /* Bus on time, x 125ns (see below) */
+ unchar bus_off; /* Bus off time, "" "" */
+ unchar rsvd; /* Reserved */
+ unchar mailboxes[3]; /* Address of Mailboxes, MSB first */
+ unchar ogmbs; /* Number of outgoing MBs, max 64, 0,1 = 1 */
+ unchar icmbs; /* Number of incoming MBs, "" "" */
+} InitCmd;
+
+/*
+ * Interrupt Status Port - also returns diagnostic codes at ASC reset
+ *
+ * if msb is zero, the lower bits are diagnostic status
+ * Diagnostics:
+ * 01 No diagnostic error occurred
+ * 02 RAM failure
+ * 03 FIFO R/W failed
+ * 04 SBIC register read/write failed
+ * 05 Initialization D-FF failed
+ * 06 Host IRQ D-FF failed
+ * 07 ROM checksum error
+ * Interrupt status (bitwise):
+ * 10NNNNNN outgoing mailbox NNNNNN is free
+ * 11NNNNNN incoming mailbox NNNNNN needs service
+ */
+#define MB_INTR 0xC0 /* Mailbox Service possible/required */
+#define IMB_INTR 0x40 /* 1 Incoming / 0 Outgoing */
+#define MB_MASK 0x3f /* mask for mailbox number */
+
+/*
+ * CONTROL port bits
+ */
+#define INT_EN 0x08 /* Interrupt Enable */
+#define DMA_EN 0x04 /* DMA Enable */
+#define SCSI_RES 0x02 /* SCSI Reset */
+#define ASC_RES 0x01 /* ASC Reset */
+
+/*
+ * Driver data structures:
+ * - mb and scbs are required for interfacing with the host adapter.
+ * An SCB has extra fields not visible to the adapter; mb's
+ * _cannot_ do this, since the adapter assumes they are contiguous in
+ * memory, 4 bytes each, with ICMBs following OGMBs, and uses this fact
+ * to access them.
+ * - An icb is for host-only (non-SCSI) commands. ICBs are 16 bytes each;
+ * the additional bytes are used only by the driver.
+ * - For now, a pool of SCBs are kept in global storage by this driver,
+ * and are allocated and freed as needed.
+ *
+ * The 7000-FASST2 marks OGMBs empty as soon as it has _started_ a command,
+ * not when it has finished. Since the SCB must be around for completion,
+ * problems arise when SCBs correspond to OGMBs, which may be reallocated
+ * earlier (or delayed unnecessarily until a command completes).
+ * Mailboxes are used as transient data structures, simply for
+ * carrying SCB addresses to/from the 7000-FASST2.
+ *
+ * Note also since SCBs are not "permanently" associated with mailboxes,
+ * there is no need to keep a global list of Scsi_Cmnd pointers indexed
+ * by OGMB. Again, SCBs reference their Scsi_Cmnds directly, so mailbox
+ * indices need not be involved.
+ */
+
+/*
+ * WD7000-specific scatter/gather element structure
+ */
+typedef struct {
+ unchar len[3];
+ unchar ptr[3]; /* Also SCSI-style - MSB first */
+} Sgb;
+
+typedef struct { /* Command Control Block 5.4.1 */
+ unchar op; /* Command Control Block Operation Code */
+ unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
+ /* Outbound data transfer, length is checked */
+ /* Inbound data transfer, length is checked */
+ /* Logical Unit Number */
+ unchar cdb[12]; /* SCSI Command Block */
+ volatile unchar status; /* SCSI Return Status */
+ volatile unchar vue; /* Vendor Unique Error Code */
+ unchar maxlen[3]; /* Maximum Data Transfer Length */
+ unchar dataptr[3]; /* SCSI Data Block Pointer */
+ unchar linkptr[3]; /* Next Command Link Pointer */
+ unchar direc; /* Transfer Direction */
+ unchar reserved2[6]; /* SCSI Command Descriptor Block */
+ /* end of hardware SCB */
+ Scsi_Cmnd *SCpnt; /* Scsi_Cmnd using this SCB */
+ Sgb sgb[WD7000_SG]; /* Scatter/gather list for this SCB */
+ Adapter *host; /* host adapter */
+ unchar used; /* flag */
+} Scb;
+
+/*
+ * This driver is written to allow host-only commands to be executed.
+ * These use a 16-byte block called an ICB. The format is extended by the
+ * driver to 18 bytes, to support the status returned in the ICMB and
+ * an execution phase code.
+ *
+ * There are other formats besides these; these are the ones I've tried
+ * to use. Formats for some of the defined ICB opcodes are not defined
+ * (notably, get/set unsolicited interrupt status) in my copy of the OEM
+ * manual, and others are ambiguous/hard to follow.
+ */
+#define ICB_OP_MASK 0x80 /* distinguishes scbs from icbs */
+#define ICB_OP_OPEN_RBUF 0x80 /* open receive buffer */
+#define ICB_OP_RECV_CMD 0x81 /* receive command from initiator */
+#define ICB_OP_RECV_DATA 0x82 /* receive data from initiator */
+#define ICB_OP_RECV_SDATA 0x83 /* receive data with status from init. */
+#define ICB_OP_SEND_DATA 0x84 /* send data with status to initiator */
+#define ICB_OP_SEND_STAT 0x86 /* send command status to initiator */
+ /* 0x87 is reserved */
+#define ICB_OP_READ_INIT 0x88 /* read initialization bytes */
+#define ICB_OP_READ_ID 0x89 /* read adapter's SCSI ID */
+#define ICB_OP_SET_UMASK 0x8A /* set unsolicited interrupt mask */
+#define ICB_OP_GET_UMASK 0x8B /* read unsolicited interrupt mask */
+#define ICB_OP_GET_REVISION 0x8C /* read firmware revision level */
+#define ICB_OP_DIAGNOSTICS 0x8D /* execute diagnostics */
+#define ICB_OP_SET_EPARMS 0x8E /* set execution parameters */
+#define ICB_OP_GET_EPARMS 0x8F /* read execution parameters */
+
+typedef struct {
+ unchar op;
+ unchar IDlun; /* Initiator SCSI ID/lun */
+ unchar len[3]; /* command buffer length */
+ unchar ptr[3]; /* command buffer address */
+ unchar rsvd[7]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbRecvCmd;
+
+typedef struct {
+ unchar op;
+ unchar IDlun; /* Target SCSI ID/lun */
+ unchar stat; /* (outgoing) completion status byte 1 */
+ unchar rsvd[12]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbSendStat;
+
+typedef struct {
+ unchar op;
+ volatile unchar primary; /* primary revision level (returned) */
+ volatile unchar secondary; /* secondary revision level (returned) */
+ unchar rsvd[12]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbRevLvl;
+
+typedef struct { /* I'm totally guessing here */
+ unchar op;
+ volatile unchar mask[14]; /* mask bits */
+#if 0
+ unchar rsvd[12]; /* reserved */
+#endif
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbUnsMask;
+
+typedef struct {
+ unchar op;
+ unchar type; /* diagnostics type code (0-3) */
+ unchar len[3]; /* buffer length */
+ unchar ptr[3]; /* buffer address */
+ unchar rsvd[7]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbDiag;
+
+#define ICB_DIAG_POWERUP 0 /* Power-up diags only */
+#define ICB_DIAG_WALKING 1 /* walking 1's pattern */
+#define ICB_DIAG_DMA 2 /* DMA - system memory diags */
+#define ICB_DIAG_FULL 3 /* do both 1 & 2 */
+
+typedef struct {
+ unchar op;
+ unchar rsvd1; /* reserved */
+ unchar len[3]; /* parms buffer length */
+ unchar ptr[3]; /* parms buffer address */
+ unchar idx[2]; /* index (MSB-LSB) */
+ unchar rsvd2[5]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbParms;
+
+typedef struct {
+ unchar op;
+ unchar data[14]; /* format-specific data */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbAny;
+
+typedef union {
+ unchar op; /* ICB opcode */
+ IcbRecvCmd recv_cmd; /* format for receive command */
+ IcbSendStat send_stat; /* format for send status */
+ IcbRevLvl rev_lvl; /* format for get revision level */
+ IcbDiag diag; /* format for execute diagnostics */
+ IcbParms eparms; /* format for get/set exec parms */
+ IcbAny icb; /* generic format */
+ unchar data[18];
+} Icb;
+
+#define WAITnexttimeout 200 /* 2 seconds */
+
+typedef union { /* let's cheat... */
+ int i;
+ unchar u[sizeof (int)]; /* the sizeof(int) makes it more portable */
+} i_u;
+
+#endif /* WD7000_DEFINES */
+
+
+#if (LINUX_VERSION_CODE >= 0x020100)
+
+#define WD7000 { \
+ proc_dir: &proc_scsi_wd7000, \
+ proc_info: wd7000_proc_info, \
+ name: "Western Digital WD-7000", \
+ detect: wd7000_detect, \
+ command: wd7000_command, \
+ queuecommand: wd7000_queuecommand, \
+ abort: wd7000_abort, \
+ reset: wd7000_reset, \
+ bios_param: wd7000_biosparam, \
+ can_queue: WD7000_Q, \
+ this_id: 7, \
+ sg_tablesize: WD7000_SG, \
+ cmd_per_lun: 1, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 0 \
+}
+
+#else /* Use old scsi code */
+
+#define WD7000 { \
+ proc_dir: &proc_scsi_wd7000, \
+ proc_info: wd7000_proc_info, \
+ name: "Western Digital WD-7000", \
+ detect: wd7000_detect, \
+ command: wd7000_command, \
+ queuecommand: wd7000_queuecommand, \
+ abort: wd7000_abort, \
+ reset: wd7000_reset, \
+ bios_param: wd7000_biosparam, \
+ can_queue: WD7000_Q, \
+ this_id: 7, \
+ sg_tablesize: WD7000_SG, \
+ cmd_per_lun: 1, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING, \
+}
+
+#endif /* LINUX_VERSION_CODE */
+
+
+extern struct proc_dir_entry proc_scsi_wd7000;
+
+
+#ifdef WD7000_DEFINES
+int wd7000_diagnostics (Adapter *, int);
+int wd7000_init (Adapter *);
+void wd7000_revision (Adapter *);
+#endif /* WD7000_DEFINES */
+
+void wd7000_setup (char *, int *);
+int make_code (uint, uint);
+void wd7000_intr_handle (int, void *, struct pt_regs *);
+void do_wd7000_intr_handle (int, void *, struct pt_regs *);
+int wd7000_queuecommand (Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int wd7000_command (Scsi_Cmnd *);
+int wd7000_set_info (char *, int, struct Scsi_Host *);
+int wd7000_proc_info (char *, char **, off_t, int, int, int);
+int wd7000_detect (Scsi_Host_Template *);
+int wd7000_abort (Scsi_Cmnd *);
+int wd7000_reset (Scsi_Cmnd *, uint);
+int wd7000_biosparam (Disk *, kdev_t, int *);
+
+#endif /* _WD7000_H */
diff --git a/linux/src/include/asm-i386/atomic.h b/linux/src/include/asm-i386/atomic.h
new file mode 100644
index 0000000..7e5dd06
--- /dev/null
+++ b/linux/src/include/asm-i386/atomic.h
@@ -0,0 +1,69 @@
+#ifndef __ARCH_I386_ATOMIC__
+#define __ARCH_I386_ATOMIC__
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+#ifdef __SMP__
+#define LOCK "lock ; "
+#else
+#define LOCK ""
+#endif
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
+
+typedef int atomic_t;
+
+#define atomic_read(v) (*v)
+
+static __inline__ void atomic_add(atomic_t i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "addl %1,%0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"ir" (i), "m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_sub(atomic_t i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "subl %1,%0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"ir" (i), "m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "incl %0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "decl %0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ int atomic_dec_and_test(atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ LOCK "decl %0; sete %1"
+ :"=m" (__atomic_fool_gcc(v)), "=qm" (c)
+ :"m" (__atomic_fool_gcc(v)));
+ return c != 0;
+}
+
+#endif
diff --git a/linux/src/include/asm-i386/bitops.h b/linux/src/include/asm-i386/bitops.h
new file mode 100644
index 0000000..e2a4c14
--- /dev/null
+++ b/linux/src/include/asm-i386/bitops.h
@@ -0,0 +1,201 @@
+#ifndef _I386_BITOPS_H
+#define _I386_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+#ifdef __SMP__
+#define LOCK_PREFIX "lock ; "
+#define SMPVOL volatile
+#else
+#define LOCK_PREFIX ""
+#define SMPVOL
+#endif
+
+/*
+ * Some hacks to defeat gcc over-optimizations..
+ */
+struct __dummy { unsigned long a[100]; };
+#define ADDR (*(struct __dummy *) addr)
+#define CONST_ADDR (*(const struct __dummy *) addr)
+
+static __inline__ int set_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int clear_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int change_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static __inline__ int test_bit(int nr, const SMPVOL void * addr)
+{
+ return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+/*
+ * Find-bit routines..
+ */
+static __inline__ int find_first_zero_bit(void * addr, unsigned size)
+{
+ int d0, d1, d2;
+ int res;
+
+ if (!size)
+ return 0;
+ __asm__("cld\n\t"
+ "movl $-1,%%eax\n\t"
+ "xorl %%edx,%%edx\n\t"
+ "repe; scasl\n\t"
+ "je 1f\n\t"
+ "xorl -4(%%edi),%%eax\n\t"
+ "subl $4,%%edi\n\t"
+ "bsfl %%eax,%%edx\n"
+ "1:\tsubl %%ebx,%%edi\n\t"
+ "shll $3,%%edi\n\t"
+ "addl %%edi,%%edx"
+ :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
+ :"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
+ return res;
+}
+
+static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
+{
+ unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
+ int set = 0, bit = offset & 31, res;
+
+ if (bit) {
+ /*
+ * Look for zero in first byte
+ */
+ __asm__("bsfl %1,%0\n\t"
+ "jne 1f\n\t"
+ "movl $32, %0\n"
+ "1:"
+ : "=r" (set)
+ : "r" (~(*p >> bit)));
+ if (set < (32 - bit))
+ return set + offset;
+ set = 32 - bit;
+ p++;
+ }
+ /*
+ * No zero yet, search remaining full bytes for a zero
+ */
+ res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
+ return (offset + set + res);
+}
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+static __inline__ unsigned long ffz(unsigned long word)
+{
+ __asm__("bsfl %1,%0"
+ :"=r" (word)
+ :"r" (~word));
+ return word;
+}
+
+#ifdef __KERNEL__
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+static __inline__ int ffs(int x)
+{
+ int r;
+
+ __asm__("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "g" (x));
+ return r+1;
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_BITOPS_H */
diff --git a/linux/src/include/asm-i386/byteorder.h b/linux/src/include/asm-i386/byteorder.h
new file mode 100644
index 0000000..3f40767
--- /dev/null
+++ b/linux/src/include/asm-i386/byteorder.h
@@ -0,0 +1,90 @@
+#ifndef _I386_BYTEORDER_H
+#define _I386_BYTEORDER_H
+
+#undef ntohl
+#undef ntohs
+#undef htonl
+#undef htons
+
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN 1234
+#endif
+
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+/* For avoiding bswap on i386 */
+#ifdef __KERNEL__
+#include <linux/config.h>
+#endif
+
+extern unsigned long int ntohl(unsigned long int);
+extern unsigned short int ntohs(unsigned short int);
+extern unsigned long int htonl(unsigned long int);
+extern unsigned short int htons(unsigned short int);
+
+extern __inline__ unsigned long int __ntohl(unsigned long int);
+extern __inline__ unsigned short int __ntohs(unsigned short int);
+extern __inline__ unsigned long int __constant_ntohl(unsigned long int);
+extern __inline__ unsigned short int __constant_ntohs(unsigned short int);
+
+extern __inline__ unsigned long int
+__ntohl(unsigned long int x)
+{
+#if defined(__KERNEL__) && !defined(CONFIG_M386)
+ __asm__("bswap %0" : "=r" (x) : "0" (x));
+#else
+ __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
+ "rorl $16,%0\n\t" /* swap words */
+ "xchgb %b0,%h0" /* swap higher bytes */
+ :"=q" (x)
+ : "0" (x));
+#endif
+ return x;
+}
+
+#define __constant_ntohl(x) \
+ ((unsigned long int)((((unsigned long int)(x) & 0x000000ffU) << 24) | \
+ (((unsigned long int)(x) & 0x0000ff00U) << 8) | \
+ (((unsigned long int)(x) & 0x00ff0000U) >> 8) | \
+ (((unsigned long int)(x) & 0xff000000U) >> 24)))
+
+extern __inline__ unsigned short int
+__ntohs(unsigned short int x)
+{
+ __asm__("xchgb %b0,%h0" /* swap bytes */
+ : "=q" (x)
+ : "0" (x));
+ return x;
+}
+
+#define __constant_ntohs(x) \
+ ((unsigned short int)((((unsigned short int)(x) & 0x00ff) << 8) | \
+ (((unsigned short int)(x) & 0xff00) >> 8))) \
+
+#define __htonl(x) __ntohl(x)
+#define __htons(x) __ntohs(x)
+#define __constant_htonl(x) __constant_ntohl(x)
+#define __constant_htons(x) __constant_ntohs(x)
+
+#ifdef __OPTIMIZE__
+# define ntohl(x) \
+(__builtin_constant_p((long)(x)) ? \
+ __constant_ntohl((x)) : \
+ __ntohl((x)))
+# define ntohs(x) \
+(__builtin_constant_p((short)(x)) ? \
+ __constant_ntohs((x)) : \
+ __ntohs((x)))
+# define htonl(x) \
+(__builtin_constant_p((long)(x)) ? \
+ __constant_htonl((x)) : \
+ __htonl((x)))
+# define htons(x) \
+(__builtin_constant_p((short)(x)) ? \
+ __constant_htons((x)) : \
+ __htons((x)))
+#endif
+
+#endif
diff --git a/linux/src/include/asm-i386/cache.h b/linux/src/include/asm-i386/cache.h
new file mode 100644
index 0000000..cea6c85
--- /dev/null
+++ b/linux/src/include/asm-i386/cache.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-i386/cache.h
+ */
+#ifndef __ARCH_I386_CACHE_H
+#define __ARCH_I386_CACHE_H
+
+/* bytes per L1 cache line */
+#if CPU==586 || CPU==686
+#define L1_CACHE_BYTES 32
+#else
+#define L1_CACHE_BYTES 16
+#endif
+
+#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
+
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#endif
diff --git a/linux/src/include/asm-i386/checksum.h b/linux/src/include/asm-i386/checksum.h
new file mode 100644
index 0000000..ac49b14
--- /dev/null
+++ b/linux/src/include/asm-i386/checksum.h
@@ -0,0 +1,121 @@
+#ifndef _I386_CHECKSUM_H
+#define _I386_CHECKSUM_H
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
+
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+unsigned int csum_partial_copy_fromuser(const char *src, char *dst, int len, int sum);
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
+ * Arnt Gulbrandsen.
+ */
+static inline unsigned short ip_fast_csum(unsigned char * iph,
+ unsigned int ihl) {
+ unsigned int sum;
+
+ __asm__ __volatile__("
+ movl (%1), %0
+ subl $4, %2
+ jbe 2f
+ addl 4(%1), %0
+ adcl 8(%1), %0
+ adcl 12(%1), %0
+1: adcl 16(%1), %0
+ lea 4(%1), %1
+ decl %2
+ jne 1b
+ adcl $0, %0
+ movl %0, %2
+ shrl $16, %0
+ addw %w2, %w0
+ adcl $0, %0
+ notl %0
+2:
+ "
+ /* Since the input registers which are loaded with iph and ipl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl));
+ return(sum);
+}
+
+/*
+ * Fold a partial checksum
+ */
+
+static inline unsigned int csum_fold(unsigned int sum)
+{
+ __asm__("
+ addl %1, %0
+ adcl $0xffff, %0
+ "
+ : "=r" (sum)
+ : "r" (sum << 16), "0" (sum & 0xffff0000)
+ );
+ return (~sum) >> 16;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+
+static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum) {
+ __asm__("
+ addl %1, %0
+ adcl %2, %0
+ adcl %3, %0
+ adcl $0, %0
+ "
+ : "=r" (sum)
+ : "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum));
+ return csum_fold(sum);
+}
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
+ return csum_fold (csum_partial(buff, len, 0));
+}
+
+#endif
diff --git a/linux/src/include/asm-i386/delay.h b/linux/src/include/asm-i386/delay.h
new file mode 100644
index 0000000..2166c4c
--- /dev/null
+++ b/linux/src/include/asm-i386/delay.h
@@ -0,0 +1,18 @@
+#ifndef _I386_DELAY_H
+#define _I386_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines calling functions in arch/i386/lib/delay.c
+ */
+
+extern void __udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long usecs);
+extern void __delay(unsigned long loops);
+
+#define udelay(n) (__builtin_constant_p(n) ? \
+ __const_udelay((n) * 0x10c6ul) : \
+ __udelay(n))
+
+#endif /* defined(_I386_DELAY_H) */
diff --git a/linux/src/include/asm-i386/dma.h b/linux/src/include/asm-i386/dma.h
new file mode 100644
index 0000000..c323d40
--- /dev/null
+++ b/linux/src/include/asm-i386/dma.h
@@ -0,0 +1,271 @@
+/* $Id: dma.h,v 1.1 1999/04/26 05:55:43 tb Exp $
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <asm/io.h> /* need byte IO */
+
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb outb_p
+#else
+#define dma_outb outb
+#endif
+
+#define dma_inb inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ * controller 1: channels 0-3, byte operations, ports 00-1F
+ * controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ * - ALL registers are 8 bits only, regardless of transfer size
+ * - channel 4 is not used - cascades 1 into 2.
+ * - channels 0-3 are byte - addresses/counts are for physical bytes
+ * - channels 5-7 are word - addresses/counts are for physical words
+ * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ * - transfer count loaded to registers is 1 less than actual count
+ * - controller 2 offsets are all even (2x offsets for controller 1)
+ * - page registers for 5-7 don't use data bit 0, represent 128K pages
+ * - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * DMA transfers are limited to the lower 16MB of _physical_ memory.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ * Address mapping for channels 0-3:
+ *
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Address mapping for channels 5-7:
+ *
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+#define MAX_DMA_CHANNELS 8
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+#define MAX_DMA_ADDRESS 0x1000000
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG 0x08 /* command register (w) */
+#define DMA1_STAT_REG 0x08 /* status register (r) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
+#define DMA1_MODE_REG 0x0B /* mode register (w) */
+#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+
+#define DMA2_CMD_REG 0xD0 /* command register (w) */
+#define DMA2_STAT_REG 0xD0 /* status register (r) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
+#define DMA2_MODE_REG 0xD6 /* mode register (w) */
+#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_PAGE_0 0x87 /* DMA page registers */
+#define DMA_PAGE_1 0x83
+#define DMA_PAGE_2 0x81
+#define DMA_PAGE_3 0x82
+#define DMA_PAGE_5 0x8B
+#define DMA_PAGE_6 0x89
+#define DMA_PAGE_7 0x8A
+
+#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr, DMA1_MASK_REG);
+ else
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(0, DMA1_CLEAR_FF_REG);
+ else
+ dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+ if (dmanr<=3)
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
+ else
+ dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+{
+ switch(dmanr) {
+ case 0:
+ dma_outb(pagenr, DMA_PAGE_0);
+ break;
+ case 1:
+ dma_outb(pagenr, DMA_PAGE_1);
+ break;
+ case 2:
+ dma_outb(pagenr, DMA_PAGE_2);
+ break;
+ case 3:
+ dma_outb(pagenr, DMA_PAGE_3);
+ break;
+ case 5:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_5);
+ break;
+ case 6:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_6);
+ break;
+ case 7:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_7);
+ break;
+ }
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+ set_dma_page(dmanr, a>>16);
+ if (dmanr <= 3) {
+ dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ } else {
+ dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ }
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ count--;
+ if (dmanr <= 3) {
+ dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ } else {
+ dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+ : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr<=3)? count : (count<<1);
+}
+
+
+/* These are in kernel/dma.c: */
+extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
+extern void free_dma(unsigned int dmanr); /* release it again */
+
+
+#endif /* _ASM_DMA_H */
diff --git a/linux/src/include/asm-i386/errno.h b/linux/src/include/asm-i386/errno.h
new file mode 100644
index 0000000..7cf599f
--- /dev/null
+++ b/linux/src/include/asm-i386/errno.h
@@ -0,0 +1,132 @@
+#ifndef _I386_ERRNO_H
+#define _I386_ERRNO_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Arg list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+
+#endif
diff --git a/linux/src/include/asm-i386/fcntl.h b/linux/src/include/asm-i386/fcntl.h
new file mode 100644
index 0000000..369ac51
--- /dev/null
+++ b/linux/src/include/asm-i386/fcntl.h
@@ -0,0 +1,59 @@
+#ifndef _I386_FCNTL_H
+#define _I386_FCNTL_H
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+ located on an ext2 file system */
+#define O_ACCMODE 0003
+#define O_RDONLY 00
+#define O_WRONLY 01
+#define O_RDWR 02
+#define O_CREAT 0100 /* not fcntl */
+#define O_EXCL 0200 /* not fcntl */
+#define O_NOCTTY 0400 /* not fcntl */
+#define O_TRUNC 01000 /* not fcntl */
+#define O_APPEND 02000
+#define O_NONBLOCK 04000
+#define O_NDELAY O_NONBLOCK
+#define O_SYNC 010000
+#define FASYNC 020000 /* fcntl, for BSD compatibility */
+
+#define F_DUPFD 0 /* dup */
+#define F_GETFD 1 /* get f_flags */
+#define F_SETFD 2 /* set f_flags */
+#define F_GETFL 3 /* more flags (cloexec) */
+#define F_SETFL 4
+#define F_GETLK 5
+#define F_SETLK 6
+#define F_SETLKW 7
+
+#define F_SETOWN 8 /* for sockets. */
+#define F_GETOWN 9 /* for sockets. */
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK 0
+#define F_WRLCK 1
+#define F_UNLCK 2
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK 4 /* or 3 */
+#define F_SHLCK 8 /* or 4 */
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH 1 /* shared lock */
+#define LOCK_EX 2 /* exclusive lock */
+#define LOCK_NB 4 /* or'd with one of the above to prevent
+ blocking */
+#define LOCK_UN 8 /* remove lock */
+
+struct flock {
+ short l_type;
+ short l_whence;
+ off_t l_start;
+ off_t l_len;
+ pid_t l_pid;
+};
+
+#endif
diff --git a/linux/src/include/asm-i386/floppy.h b/linux/src/include/asm-i386/floppy.h
new file mode 100644
index 0000000..033a20f
--- /dev/null
+++ b/linux/src/include/asm-i386/floppy.h
@@ -0,0 +1,289 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ */
+#ifndef __ASM_I386_FLOPPY_H
+#define __ASM_I386_FLOPPY_H
+
+
+#define SW fd_routine[use_virtual_dma&1]
+
+
+#define fd_inb(port) inb_p(port)
+#define fd_outb(port,value) outb_p(port,value)
+
+#define fd_enable_dma() SW._enable_dma(FLOPPY_DMA)
+#define fd_disable_dma() SW._disable_dma(FLOPPY_DMA)
+#define fd_request_dma() SW._request_dma(FLOPPY_DMA,"floppy")
+#define fd_free_dma() SW._free_dma(FLOPPY_DMA)
+#define fd_clear_dma_ff() SW._clear_dma_ff(FLOPPY_DMA)
+#define fd_set_dma_mode(mode) SW._set_dma_mode(FLOPPY_DMA,mode)
+#define fd_set_dma_addr(addr) SW._set_dma_addr(FLOPPY_DMA,addr)
+#define fd_set_dma_count(count) SW._set_dma_count(FLOPPY_DMA,count)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_cacheflush(addr,size) /* nothing */
+#define fd_request_irq() SW._request_irq(FLOPPY_IRQ, floppy_interrupt, \
+ SA_INTERRUPT|SA_SAMPLE_RANDOM, \
+ "floppy", NULL)
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
+#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
+#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
+#define fd_dma_mem_free(addr,size) SW._dma_mem_free(addr,size)
+
+static int virtual_dma_count=0;
+static int virtual_dma_residue=0;
+static unsigned long virtual_dma_addr=0;
+static int virtual_dma_mode=0;
+static int doing_pdma=0;
+
+static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
+{
+ register unsigned char st;
+
+#undef TRACE_FLPY_INT
+#undef NO_FLOPPY_ASSEMBLER
+
+#ifdef TRACE_FLPY_INT
+ static int calls=0;
+ static int bytes=0;
+ static int dma_wait=0;
+#endif
+ if(!doing_pdma) {
+ floppy_interrupt(irq, dev_id, regs);
+ return;
+ }
+
+#ifdef TRACE_FLPY_INT
+ if(!calls)
+ bytes = virtual_dma_count;
+#endif
+
+#ifndef NO_FLOPPY_ASSEMBLER
+ __asm__ ("testl %1,%1\n"
+ "je 3f\n"
+ "1: inb %w4,%b0\n"
+ "andb $160,%b0\n"
+ "cmpb $160,%b0\n"
+ "jne 2f\n"
+ "incw %w4\n"
+ "testl %3,%3\n"
+ "jne 4f\n"
+ "inb %w4,%b0\n"
+ "movb %0,(%2)\n"
+ "jmp 5f\n"
+ "4: movb (%2),%0\n"
+ "outb %b0,%w4\n"
+ "5: decw %w4\n"
+ "outb %0,$0x80\n"
+ "decl %1\n"
+ "incl %2\n"
+ "testl %1,%1\n"
+ "jne 1b\n"
+ "3: inb %w4,%b0\n"
+ "2:\n"
+ : "=a" ((char) st),
+ "=c" ((long) virtual_dma_count),
+ "=S" ((long) virtual_dma_addr)
+ : "b" ((long) virtual_dma_mode),
+ "d" ((short) virtual_dma_port+4),
+ "1" ((long) virtual_dma_count),
+ "2" ((long) virtual_dma_addr));
+#else
+ {
+ register int lcount;
+ register char *lptr;
+
+ st = 1;
+ for(lcount=virtual_dma_count, lptr=(char *)virtual_dma_addr;
+ lcount; lcount--, lptr++) {
+ st=inb(virtual_dma_port+4) & 0xa0 ;
+ if(st != 0xa0)
+ break;
+ if(virtual_dma_mode)
+ outb_p(*lptr, virtual_dma_port+5);
+ else
+ *lptr = inb_p(virtual_dma_port+5);
+ st = inb(virtual_dma_port+4);
+ }
+ virtual_dma_count = lcount;
+ virtual_dma_addr = (int) lptr;
+ }
+#endif
+
+#ifdef TRACE_FLPY_INT
+ calls++;
+#endif
+ if(st == 0x20)
+ return;
+ if(!(st & 0x20)) {
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+#ifdef TRACE_FLPY_INT
+ printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
+ virtual_dma_count, virtual_dma_residue, calls, bytes,
+ dma_wait);
+ calls = 0;
+ dma_wait=0;
+#endif
+ doing_pdma = 0;
+ floppy_interrupt(irq, dev_id, regs);
+ return;
+ }
+#ifdef TRACE_FLPY_INT
+ if(!virtual_dma_count)
+ dma_wait++;
+#endif
+}
+
+static void vdma_enable_dma(unsigned int dummy)
+{
+ doing_pdma = 1;
+}
+
+static void vdma_disable_dma(unsigned int dummy)
+{
+ doing_pdma = 0;
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+}
+
+static int vdma_request_dma(unsigned int dmanr, const char * device_id)
+{
+ return 0;
+}
+
+static void vdma_nop(unsigned int dummy)
+{
+}
+
+static void vdma_set_dma_mode(unsigned int dummy,char mode)
+{
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
+}
+
+static void vdma_set_dma_addr(unsigned int dummy,unsigned int addr)
+{
+ virtual_dma_addr = addr;
+}
+
+static void vdma_set_dma_count(unsigned int dummy,unsigned int count)
+{
+ virtual_dma_count = count;
+ virtual_dma_residue = 0;
+}
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+ return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int vdma_request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags,
+ const char *device,
+ void *dev_id)
+{
+ return request_irq(irq, floppy_hardint,SA_INTERRUPT,device, dev_id);
+
+}
+
+static unsigned long dma_mem_alloc(unsigned long size)
+{
+ return __get_dma_pages(GFP_KERNEL,__get_order(size));
+}
+
+static void dma_mem_free(unsigned long addr, unsigned long size)
+{
+ free_pages(addr, __get_order(size));
+}
+
+static unsigned long vdma_mem_alloc(unsigned long size)
+{
+ return (unsigned long) vmalloc(size);
+}
+
+static void vdma_mem_free(unsigned long addr, unsigned long size)
+{
+ return vfree((void *)addr);
+}
+
+struct fd_routine_l {
+ void (*_enable_dma)(unsigned int dummy);
+ void (*_disable_dma)(unsigned int dummy);
+ int (*_request_dma)(unsigned int dmanr, const char * device_id);
+ void (*_free_dma)(unsigned int dmanr);
+ void (*_clear_dma_ff)(unsigned int dummy);
+ void (*_set_dma_mode)(unsigned int dummy, char mode);
+ void (*_set_dma_addr)(unsigned int dummy, unsigned int addr);
+ void (*_set_dma_count)(unsigned int dummy, unsigned int count);
+ int (*_get_dma_residue)(unsigned int dummy);
+ int (*_request_irq)(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags,
+ const char *device,
+ void *dev_id);
+ unsigned long (*_dma_mem_alloc) (unsigned long size);
+ void (*_dma_mem_free)(unsigned long addr, unsigned long size);
+} fd_routine[] = {
+ {
+ enable_dma,
+ disable_dma,
+ request_dma,
+ free_dma,
+ clear_dma_ff,
+ set_dma_mode,
+ set_dma_addr,
+ set_dma_count,
+ get_dma_residue,
+ request_irq,
+ dma_mem_alloc,
+ dma_mem_free
+ },
+ {
+ vdma_enable_dma,
+ vdma_disable_dma,
+ vdma_request_dma,
+ vdma_nop,
+ vdma_nop,
+ vdma_set_dma_mode,
+ vdma_set_dma_addr,
+ vdma_set_dma_count,
+ vdma_get_dma_residue,
+ vdma_request_irq,
+ vdma_mem_alloc,
+ vdma_mem_free
+ }
+};
+
+__inline__ void virtual_dma_init(void)
+{
+ /* Nothing to do on an i386 */
+}
+
+static int FDC1 = 0x3f0;
+static int FDC2 = -1;
+
+#define FLOPPY0_TYPE ((CMOS_READ(0x10) >> 4) & 15)
+#define FLOPPY1_TYPE (CMOS_READ(0x10) & 15)
+
+#define N_FDC 2
+#define N_DRIVE 8
+
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ */
+#define CROSS_64KB(a,s) (((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64) && ! (use_virtual_dma & 1))
+
+#endif /* __ASM_I386_FLOPPY_H */
diff --git a/linux/src/include/asm-i386/hardirq.h b/linux/src/include/asm-i386/hardirq.h
new file mode 100644
index 0000000..10dae41
--- /dev/null
+++ b/linux/src/include/asm-i386/hardirq.h
@@ -0,0 +1,66 @@
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <linux/tasks.h>
+
+extern unsigned int local_irq_count[NR_CPUS];
+extern unsigned int local_bh_count[NR_CPUS];
+
+/*
+ * Are we in an interrupt context? Either doing bottom half
+ * or hardware interrupt processing?
+ */
+#define in_interrupt() ({ int __cpu = smp_processor_id(); \
+ (local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+
+#ifndef __SMP__
+
+#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
+#define hardirq_endlock(cpu) do { } while (0)
+
+#define hardirq_enter(cpu) (local_irq_count[cpu]++)
+#define hardirq_exit(cpu) (local_irq_count[cpu]--)
+
+#define synchronize_irq() barrier()
+
+#else
+
+#include <asm/atomic.h>
+
+extern unsigned char global_irq_holder;
+extern unsigned volatile int global_irq_lock;
+extern atomic_t global_irq_count;
+
+static inline void release_irqlock(int cpu)
+{
+ /* if we didn't own the irq lock, just ignore.. */
+ if (global_irq_holder == (unsigned char) cpu) {
+ global_irq_holder = NO_PROC_ID;
+ clear_bit(0,&global_irq_lock);
+ }
+}
+
+static inline void hardirq_enter(int cpu)
+{
+ ++local_irq_count[cpu];
+ atomic_inc(&global_irq_count);
+}
+
+static inline void hardirq_exit(int cpu)
+{
+ atomic_dec(&global_irq_count);
+ --local_irq_count[cpu];
+}
+
+static inline int hardirq_trylock(int cpu)
+{
+ return !atomic_read(&global_irq_count) && !test_bit(0,&global_irq_lock);
+}
+
+#define hardirq_endlock(cpu) do { } while (0)
+
+extern void synchronize_irq(void);
+
+#endif /* __SMP__ */
+
+#endif /* __ASM_HARDIRQ_H */
diff --git a/linux/src/include/asm-i386/io.h b/linux/src/include/asm-i386/io.h
new file mode 100644
index 0000000..e5c0744
--- /dev/null
+++ b/linux/src/include/asm-i386/io.h
@@ -0,0 +1,216 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+#include <machine/vm_param.h>
+#include <intel/pmap.h>
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:")
+#else
+#define __SLOW_DOWN_IO __asm__ __volatile__("outb %al,$0x80")
+#endif
+
+#ifdef REALLY_SLOW_IO
+#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
+#else
+#define SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are trivial on the 1:1 Linux/i386 mapping (but if we ever
+ * make the kernel segment mapped at 0, we need to do translation
+ * on the i386 as well)
+ */
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ return (unsigned long) kvtophys((vm_offset_t) address);
+}
+
+static inline void * phys_to_virt(unsigned long address)
+{
+ return (void *) phystokv(address);
+}
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#define readl(addr) (*(volatile unsigned int *) (addr))
+
+#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
+#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
+#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
+
+#define memset_io(a,b,c) memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+
+/*
+ * Again, i386 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
+
+/*
+ * Talk about misusing macros..
+ */
+
+#define __OUT1(s,x) \
+static inline void __out##s(unsigned x value, unsigned short port) {
+
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+
+#define __OUT(s,s1,x) \
+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); } \
+__OUT1(s##c,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); } \
+__OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \
+__OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; }
+
+#define __IN1(s) \
+static inline RETURN_TYPE __in##s(unsigned short port) { RETURN_TYPE _v;
+
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+
+#define __IN(s,s1,i...) \
+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); return _v; } \
+__IN1(s##c) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); return _v; } \
+__IN1(s##_p) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; return _v; } \
+__IN1(s##c_p) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; }
+
+#define __INS(s) \
+static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("cld ; rep ; ins" #s \
+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define __OUTS(s) \
+static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("cld ; rep ; outs" #s \
+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define RETURN_TYPE unsigned char
+/* __IN(b,"b","0" (0)) */
+__IN(b,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned short
+/* __IN(w,"w","0" (0)) */
+__IN(w,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned int
+__IN(l,"")
+#undef RETURN_TYPE
+
+__OUT(b,"b",char)
+__OUT(w,"w",short)
+__OUT(l,,int)
+
+__INS(b)
+__INS(w)
+__INS(l)
+
+__OUTS(b)
+__OUTS(w)
+__OUTS(l)
+
+/*
+ * Note that due to the way __builtin_constant_p() works, you
+ * - can't use it inside a inline function (it will never be true)
+ * - you don't have to worry about side effects within the __builtin..
+ */
+#define outb(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outbc((val),(port)) : \
+ __outb((val),(port)))
+
+#define inb(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inbc(port) : \
+ __inb(port))
+
+#define outb_p(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outbc_p((val),(port)) : \
+ __outb_p((val),(port)))
+
+#define inb_p(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inbc_p(port) : \
+ __inb_p(port))
+
+#define outw(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outwc((val),(port)) : \
+ __outw((val),(port)))
+
+#define inw(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inwc(port) : \
+ __inw(port))
+
+#define outw_p(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outwc_p((val),(port)) : \
+ __outw_p((val),(port)))
+
+#define inw_p(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inwc_p(port) : \
+ __inw_p(port))
+
+#define outl(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outlc((val),(port)) : \
+ __outl((val),(port)))
+
+#define inl(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inlc(port) : \
+ __inl(port))
+
+#define outl_p(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outlc_p((val),(port)) : \
+ __outl_p((val),(port)))
+
+#define inl_p(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inlc_p(port) : \
+ __inl_p(port))
+
+#endif
diff --git a/linux/src/include/asm-i386/ioctl.h b/linux/src/include/asm-i386/ioctl.h
new file mode 100644
index 0000000..44df7b0
--- /dev/null
+++ b/linux/src/include/asm-i386/ioctl.h
@@ -0,0 +1,75 @@
+/* $Id: ioctl.h,v 1.1 1999/04/26 05:55:47 tb Exp $
+ *
+ * linux/ioctl.h for Linux by H.H. Bergman.
+ */
+
+#ifndef _ASMI386_IOCTL_H
+#define _ASMI386_IOCTL_H
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms. The i386 ioctl numbering scheme doesn't really enforce
+ * a type field. De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here. Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 14
+#define _IOC_DIRBITS 2
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE 0U
+#define _IOC_WRITE 1U
+#define _IOC_READ 2U
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* _ASMI386_IOCTL_H */
diff --git a/linux/src/include/asm-i386/ioctls.h b/linux/src/include/asm-i386/ioctls.h
new file mode 100644
index 0000000..60e0806
--- /dev/null
+++ b/linux/src/include/asm-i386/ioctls.h
@@ -0,0 +1,74 @@
+#ifndef __ARCH_I386_IOCTLS_H__
+#define __ARCH_I386_IOCTLS_H__
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+#define TCGETA 0x5405
+#define TCSETA 0x5406
+#define TCSETAW 0x5407
+#define TCSETAF 0x5408
+#define TCSBRK 0x5409
+#define TCXONC 0x540A
+#define TCFLSH 0x540B
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+#define TIOCGPGRP 0x540F
+#define TIOCSPGRP 0x5410
+#define TIOCOUTQ 0x5411
+#define TIOCSTI 0x5412
+#define TIOCGWINSZ 0x5413
+#define TIOCSWINSZ 0x5414
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define FIONREAD 0x541B
+#define TIOCINQ FIONREAD
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+#define FIONBIO 0x5421
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define FIOCLEX 0x5451
+#define FIOASYNC 0x5452
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+#endif
diff --git a/linux/src/include/asm-i386/irq.h b/linux/src/include/asm-i386/irq.h
new file mode 100644
index 0000000..c75744a
--- /dev/null
+++ b/linux/src/include/asm-i386/irq.h
@@ -0,0 +1,421 @@
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+/*
+ * linux/include/asm/irq.h
+ *
+ * (C) 1992, 1993 Linus Torvalds
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke <tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <linux/linkage.h>
+#include <asm/segment.h>
+
+#define NR_IRQS 16
+
+#define TIMER_IRQ 0
+
+extern void disable_irq(unsigned int);
+extern void enable_irq(unsigned int);
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+#define SAVE_ALL \
+ "cld\n\t" \
+ "push %gs\n\t" \
+ "push %fs\n\t" \
+ "push %es\n\t" \
+ "push %ds\n\t" \
+ "pushl %eax\n\t" \
+ "pushl %ebp\n\t" \
+ "pushl %edi\n\t" \
+ "pushl %esi\n\t" \
+ "pushl %edx\n\t" \
+ "pushl %ecx\n\t" \
+ "pushl %ebx\n\t" \
+ "movl $" STR(KERNEL_DS) ",%edx\n\t" \
+ "mov %dx,%ds\n\t" \
+ "mov %dx,%es\n\t" \
+ "mov %dx,%gs\n\t" \
+ "movl $" STR(USER_DS) ",%edx\n\t" \
+ "mov %dx,%fs\n\t" \
+ "movl $0,%edx\n\t" \
+ "movl %edx,%db7\n\t"
+
+/*
+ * SAVE_MOST/RESTORE_MOST is used for the faster version of IRQ handlers,
+ * installed by using the SA_INTERRUPT flag. These kinds of IRQ's don't
+ * call the routines that do signal handling etc on return, and can have
+ * more relaxed register-saving etc. They are also atomic, and are thus
+ * suited for small, fast interrupts like the serial lines or the harddisk
+ * drivers, which don't actually need signal handling etc.
+ *
+ * Also note that we actually save only those registers that are used in
+ * C subroutines (%eax, %edx and %ecx), so if you do something weird,
+ * you're on your own. The only segments that are saved (not counting the
+ * automatic stack and code segment handling) are %ds and %es, and they
+ * point to kernel space. No messing around with %fs here.
+ */
+#define SAVE_MOST \
+ "cld\n\t" \
+ "push %es\n\t" \
+ "push %ds\n\t" \
+ "pushl %eax\n\t" \
+ "pushl %edx\n\t" \
+ "pushl %ecx\n\t" \
+ "movl $" STR(KERNEL_DS) ",%edx\n\t" \
+ "mov %dx,%ds\n\t" \
+ "mov %dx,%es\n\t"
+
+#define RESTORE_MOST \
+ "popl %ecx\n\t" \
+ "popl %edx\n\t" \
+ "popl %eax\n\t" \
+ "pop %ds\n\t" \
+ "pop %es\n\t" \
+ "iret"
+
+/*
+ * The "inb" instructions are not needed, but seem to change the timings
+ * a bit - without them it seems that the harddisk driver won't work on
+ * all hardware. Arghh.
+ */
+#define ACK_FIRST(mask,nr) \
+ "inb $0x21,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\torb $" #mask ","SYMBOL_NAME_STR(cache_21)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_21)",%al\n\t" \
+ "outb %al,$0x21\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tmovb $0x20,%al\n\t" \
+ "outb %al,$0x20\n\t"
+
+#define ACK_SECOND(mask,nr) \
+ "inb $0xA1,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\torb $" #mask ","SYMBOL_NAME_STR(cache_A1)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_A1)",%al\n\t" \
+ "outb %al,$0xA1\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tmovb $0x20,%al\n\t" \
+ "outb %al,$0xA0\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\toutb %al,$0x20\n\t"
+
+/* do not modify the ISR nor the cache_A1 variable */
+#define MSGACK_SECOND(mask,nr) \
+ "inb $0xA1,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tmovb $0x20,%al\n\t" \
+ "outb %al,$0xA0\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\toutb %al,$0x20\n\t"
+
+#define UNBLK_FIRST(mask) \
+ "inb $0x21,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tandb $~(" #mask "),"SYMBOL_NAME_STR(cache_21)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_21)",%al\n\t" \
+ "outb %al,$0x21\n\t"
+
+#define UNBLK_SECOND(mask) \
+ "inb $0xA1,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tandb $~(" #mask "),"SYMBOL_NAME_STR(cache_A1)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_A1)",%al\n\t" \
+ "outb %al,$0xA1\n\t"
+
+#define IRQ_NAME2(nr) nr##_interrupt(void)
+#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
+#define FAST_IRQ_NAME(nr) IRQ_NAME2(fast_IRQ##nr)
+#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr)
+
+#ifdef __SMP__
+
+#ifndef __SMP_PROF__
+#define SMP_PROF_INT_SPINS
+#define SMP_PROF_IPI_CNT
+#else
+#define SMP_PROF_INT_SPINS "incl "SYMBOL_NAME_STR(smp_spins)"(,%eax,4)\n\t"
+#define SMP_PROF_IPI_CNT "incl "SYMBOL_NAME_STR(ipi_count)"\n\t"
+#endif
+
+#define GET_PROCESSOR_ID \
+ "movl "SYMBOL_NAME_STR(apic_reg)", %edx\n\t" \
+ "movl 32(%edx), %eax\n\t" \
+ "shrl $24,%eax\n\t" \
+ "andl $0x0F,%eax\n"
+
+#define ENTER_KERNEL \
+ "pushl %eax\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl %ecx\n\t" \
+ "pushl %edx\n\t" \
+ "pushfl\n\t" \
+ "cli\n\t" \
+ "movl $6000, %ebx\n\t" \
+ "movl "SYMBOL_NAME_STR(smp_loops_per_tick)", %ecx\n\t" \
+ GET_PROCESSOR_ID \
+ "btsl $" STR(SMP_FROM_INT) ","SYMBOL_NAME_STR(smp_proc_in_lock)"(,%eax,4)\n\t" \
+ "1: " \
+ "lock\n\t" \
+ "btsl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
+ "jnc 3f\n\t" \
+ "cmpb "SYMBOL_NAME_STR(active_kernel_processor)", %al\n\t" \
+ "je 4f\n\t" \
+ "cmpb "SYMBOL_NAME_STR(boot_cpu_id)", %al\n\t" \
+ "jne 2f\n\t" \
+ "movb $1, "SYMBOL_NAME_STR(smp_blocked_interrupt_pending)"\n\t" \
+ "2: " \
+ SMP_PROF_INT_SPINS \
+ "btl %eax, "SYMBOL_NAME_STR(smp_invalidate_needed)"\n\t" \
+ "jnc 5f\n\t" \
+ "lock\n\t" \
+ "btrl %eax, "SYMBOL_NAME_STR(smp_invalidate_needed)"\n\t" \
+ "jnc 5f\n\t" \
+ "movl %cr3,%edx\n\t" \
+ "movl %edx,%cr3\n" \
+ "5: btl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
+ "jnc 1b\n\t" \
+ "cmpb "SYMBOL_NAME_STR(active_kernel_processor)", %al\n\t" \
+ "je 4f\n\t" \
+ "decl %ecx\n\t" \
+ "jne 2b\n\t" \
+ "decl %ebx\n\t" \
+ "jne 6f\n\t" \
+ "call "SYMBOL_NAME_STR(irq_deadlock_detected)"\n\t" \
+ "6: movl "SYMBOL_NAME_STR(smp_loops_per_tick)", %ecx\n\t" \
+ "cmpb "SYMBOL_NAME_STR(boot_cpu_id)", %al\n\t" \
+ "jne 2b\n\t" \
+ "incl "SYMBOL_NAME_STR(jiffies)"\n\t" \
+ "jmp 2b\n\t" \
+ "3: " \
+ "movb %al, "SYMBOL_NAME_STR(active_kernel_processor)"\n\t" \
+ "4: " \
+ "incl "SYMBOL_NAME_STR(kernel_counter)"\n\t" \
+ "cmpb "SYMBOL_NAME_STR(boot_cpu_id)", %al\n\t" \
+ "jne 7f\n\t" \
+ "movb $0, "SYMBOL_NAME_STR(smp_blocked_interrupt_pending)"\n\t" \
+ "7: " \
+ "popfl\n\t" \
+ "popl %edx\n\t" \
+ "popl %ecx\n\t" \
+ "popl %ebx\n\t" \
+ "popl %eax\n\t"
+
+#define LEAVE_KERNEL \
+ GET_PROCESSOR_ID \
+ "btrl $" STR(SMP_FROM_INT) ","SYMBOL_NAME_STR(smp_proc_in_lock)"(,%eax,4)\n\t" \
+ "pushfl\n\t" \
+ "cli\n\t" \
+ "decl "SYMBOL_NAME_STR(kernel_counter)"\n\t" \
+ "jnz 1f\n\t" \
+ "movb "SYMBOL_NAME_STR(saved_active_kernel_processor)",%al\n\t" \
+ "movb %al,"SYMBOL_NAME_STR(active_kernel_processor)"\n\t" \
+ "cmpb $" STR (NO_PROC_ID) ",%al\n\t" \
+ "jne 1f\n\t" \
+ "lock\n\t" \
+ "btrl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
+ "1: " \
+ "popfl\n\t"
+
+
+/*
+ * the syscall count inc is a gross hack because ret_from_syscall is used by both irq and
+ * syscall return paths (urghh).
+ */
+
+#define BUILD_IRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ENTER_KERNEL \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "sti\n\t" \
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
+ "jmp ret_from_sys_call\n" \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ENTER_KERNEL \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
+ "addl $4,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ LEAVE_KERNEL \
+ RESTORE_MOST \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ENTER_KERNEL \
+ ACK_##chip(mask,(nr&7)) \
+ LEAVE_KERNEL \
+ RESTORE_MOST);
+
+
+#define BUILD_TIMER_IRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ENTER_KERNEL \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
+ "jmp ret_from_sys_call\n");
+
+
+/*
+ * Message pass must be a fast IRQ..
+ */
+
+#define BUILD_MSGIRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ MSGACK_##chip(mask,(nr&7)) \
+ SMP_PROF_IPI_CNT \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
+ "addl $4,%esp\n\t" \
+ "cli\n\t" \
+ RESTORE_MOST \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ACK_##chip(mask,(nr&7)) \
+ RESTORE_MOST);
+
+#define BUILD_RESCHEDIRQ(nr) \
+asmlinkage void IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ENTER_KERNEL \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "sti\n\t" \
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(smp_reschedule_irq)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
+ "jmp ret_from_sys_call\n");
+#else
+
+#define BUILD_IRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "sti\n\t" \
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "jmp ret_from_sys_call\n" \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
+ "addl $4,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ RESTORE_MOST \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ACK_##chip(mask,(nr&7)) \
+ RESTORE_MOST);
+
+#define BUILD_TIMER_IRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "jmp ret_from_sys_call\n");
+
+#endif
+#endif
diff --git a/linux/src/include/asm-i386/math_emu.h b/linux/src/include/asm-i386/math_emu.h
new file mode 100644
index 0000000..92083a5
--- /dev/null
+++ b/linux/src/include/asm-i386/math_emu.h
@@ -0,0 +1,57 @@
+#ifndef _I386_MATH_EMU_H
+#define _I386_MATH_EMU_H
+
+#include <asm/sigcontext.h>
+
+void restore_i387_soft(struct _fpstate *buf);
+struct _fpstate * save_i387_soft(struct _fpstate * buf);
+
+struct fpu_reg {
+ char sign;
+ char tag;
+ long exp;
+ unsigned sigl;
+ unsigned sigh;
+};
+
+
+/* This structure matches the layout of the data saved to the stack
+ following a device-not-present interrupt, part of it saved
+ automatically by the 80386/80486.
+ */
+struct info {
+ long ___orig_eip;
+ long ___ret_from_system_call;
+ long ___ebx;
+ long ___ecx;
+ long ___edx;
+ long ___esi;
+ long ___edi;
+ long ___ebp;
+ long ___eax;
+ long ___ds;
+ long ___es;
+ long ___fs;
+ long ___gs;
+ long ___orig_eax;
+ long ___eip;
+ long ___cs;
+ long ___eflags;
+ long ___esp;
+ long ___ss;
+ long ___vm86_es; /* This and the following only in vm86 mode */
+ long ___vm86_ds;
+ long ___vm86_fs;
+ long ___vm86_gs;
+};
+
+/* Interface for converting data between the emulator format
+ * and the hardware format. Used for core dumping and for
+ * ptrace(2) */
+void hardreg_to_softreg(const char hardreg[10],
+ struct fpu_reg *soft_reg);
+
+void softreg_to_hardreg(const struct fpu_reg *rp, char d[10],
+ long int control_word);
+
+#endif
diff --git a/linux/src/include/asm-i386/page.h b/linux/src/include/asm-i386/page.h
new file mode 100644
index 0000000..f315634
--- /dev/null
+++ b/linux/src/include/asm-i386/page.h
@@ -0,0 +1,62 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef __KERNEL__
+
+#define STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* This handles the memory map.. */
+#define PAGE_OFFSET 0
+#define MAP_NR(addr) (((unsigned long)(addr)) >> PAGE_SHIFT)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/linux/src/include/asm-i386/param.h b/linux/src/include/asm-i386/param.h
new file mode 100644
index 0000000..f821b86
--- /dev/null
+++ b/linux/src/include/asm-i386/param.h
@@ -0,0 +1,20 @@
+#ifndef _ASMi386_PARAM_H
+#define _ASMi386_PARAM_H
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif
diff --git a/linux/src/include/asm-i386/posix_types.h b/linux/src/include/asm-i386/posix_types.h
new file mode 100644
index 0000000..6a04605
--- /dev/null
+++ b/linux/src/include/asm-i386/posix_types.h
@@ -0,0 +1,63 @@
+#ifndef __ARCH_I386_POSIX_TYPES_H
+#define __ARCH_I386_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned short __kernel_dev_t;
+typedef unsigned long __kernel_ino_t;
+typedef unsigned short __kernel_mode_t;
+typedef unsigned short __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned short __kernel_uid_t;
+typedef unsigned short __kernel_gid_t;
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef int __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+
+typedef struct {
+#if defined(__KERNEL__) || defined(__USE_ALL)
+ int val[2];
+#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+ int __val[2];
+#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+} __kernel_fsid_t;
+
+#undef __FD_SET
+#define __FD_SET(fd,fdsetp) \
+ __asm__ __volatile__("btsl %1,%0": \
+ "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+
+#undef __FD_CLR
+#define __FD_CLR(fd,fdsetp) \
+ __asm__ __volatile__("btrl %1,%0": \
+ "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+
+#undef __FD_ISSET
+#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
+ unsigned char __result; \
+ __asm__ __volatile__("btl %1,%2 ; setb %0" \
+ :"=q" (__result) :"r" ((int) (fd)), \
+ "m" (*(__kernel_fd_set *) (fdsetp))); \
+ __result; }))
+
+#undef __FD_ZERO
+#define __FD_ZERO(fdsetp) \
+ __asm__ __volatile__("cld ; rep ; stosl" \
+ :"=m" (*(__kernel_fd_set *) (fdsetp)) \
+ :"a" (0), "c" (__FDSET_LONGS), \
+ "D" ((__kernel_fd_set *) (fdsetp)) :"cx","di")
+
+#endif
diff --git a/linux/src/include/asm-i386/processor.h b/linux/src/include/asm-i386/processor.h
new file mode 100644
index 0000000..b067940
--- /dev/null
+++ b/linux/src/include/asm-i386/processor.h
@@ -0,0 +1,204 @@
+/*
+ * include/asm-i386/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_I386_PROCESSOR_H
+#define __ASM_I386_PROCESSOR_H
+
+#include <asm/vm86.h>
+#include <asm/math_emu.h>
+
+/*
+ * System setup and hardware bug flags..
+ * [Note we don't test the 386 multiply bug or popad bug]
+ */
+
+extern char hard_math;
+extern char x86; /* lower 4 bits */
+extern char x86_vendor_id[13];
+extern char x86_model; /* lower 4 bits */
+extern char x86_mask; /* lower 4 bits */
+extern int x86_capability; /* field of flags */
+extern int fdiv_bug;
+extern char ignore_irq13;
+extern char wp_works_ok; /* doesn't work on a 386 */
+extern char hlt_works_ok; /* problems on some 486Dx4's and old 386's */
+extern int have_cpuid; /* We have a CPUID */
+
+extern unsigned long cpu_hz; /* CPU clock frequency from time.c */
+
+#if 0
+/*
+ * Detection of CPU model (CPUID).
+ */
+extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+{
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "a" (op)
+ : "cc");
+}
+#endif
+
+/*
+ * Cyrix CPU register indexes (use special macros to access these)
+ */
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+
+/*
+ * Cyrix CPU register access macros
+ */
+
+extern inline unsigned char getCx86(unsigned char reg)
+{
+ unsigned char data;
+
+ __asm__ __volatile__("movb %1,%%al\n\t"
+ "outb %%al,$0x22\n\t"
+ "inb $0x23,%%al" : "=a" (data) : "q" (reg));
+ return data;
+}
+
+extern inline void setCx86(unsigned char reg, unsigned char data)
+{
+ __asm__ __volatile__("outb %%al,$0x22\n\t"
+ "movb %1,%%al\n\t"
+ "outb %%al,$0x23" : : "a" (reg), "q" (data));
+}
+
+/*
+ * Bus types (default is ISA, but people can check others with these..)
+ * MCA_bus hardcoded to 0 for now.
+ */
+extern int EISA_bus;
+#define MCA_bus 0
+#define MCA_bus__is_a_macro /* for versions in ksyms.c */
+
+/*
+ * User space process size: 3GB. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing.
+ */
+#define TASK_SIZE (0xC0000000UL)
+#define MAX_USER_ADDR TASK_SIZE
+#define MMAP_SEARCH_START (TASK_SIZE/3)
+
+/*
+ * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
+ */
+#define IO_BITMAP_SIZE 32
+
+struct i387_hard_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+ long status; /* software status information */
+};
+
+struct i387_soft_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long top;
+ struct fpu_reg regs[8]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ unsigned char lookahead;
+ struct info *info;
+ unsigned long entry_eip;
+};
+
+union i387_union {
+ struct i387_hard_struct hard;
+ struct i387_soft_struct soft;
+};
+
+struct thread_struct {
+ unsigned short back_link,__blh;
+ unsigned long esp0;
+ unsigned short ss0,__ss0h;
+ unsigned long esp1;
+ unsigned short ss1,__ss1h;
+ unsigned long esp2;
+ unsigned short ss2,__ss2h;
+ unsigned long cr3;
+ unsigned long eip;
+ unsigned long eflags;
+ unsigned long eax,ecx,edx,ebx;
+ unsigned long esp;
+ unsigned long ebp;
+ unsigned long esi;
+ unsigned long edi;
+ unsigned short es, __esh;
+ unsigned short cs, __csh;
+ unsigned short ss, __ssh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+ unsigned short ldt, __ldth;
+ unsigned short trace, bitmap;
+ unsigned long io_bitmap[IO_BITMAP_SIZE+1];
+ unsigned long tr;
+ unsigned long cr2, trap_no, error_code;
+/* floating point info */
+ union i387_union i387;
+/* virtual 86 mode info */
+ struct vm86_struct * vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags, v86mask, v86mode;
+};
+
+#define INIT_MMAP { &init_mm, 0, 0x40000000, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC }
+
+#define INIT_TSS { \
+ 0,0, \
+ sizeof(init_kernel_stack) + (long) &init_kernel_stack, \
+ KERNEL_DS, 0, \
+ 0,0,0,0,0,0, \
+ (long) &swapper_pg_dir, \
+ 0,0,0,0,0,0,0,0,0,0, \
+ USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0, \
+ _LDT(0),0, \
+ 0, 0x8000, \
+ {~0, }, /* ioperm */ \
+ _TSS(0), 0, 0,0, \
+ { { 0, }, }, /* 387 state */ \
+ NULL, 0, 0, 0, 0 /* vm86_info */ \
+}
+
+#define alloc_kernel_stack() __get_free_page(GFP_KERNEL)
+#define free_kernel_stack(page) free_page((page))
+
+static inline void start_thread(struct pt_regs * regs, unsigned long eip, unsigned long esp)
+{
+ regs->cs = USER_CS;
+ regs->ds = regs->es = regs->ss = regs->fs = regs->gs = USER_DS;
+ regs->eip = eip;
+ regs->esp = esp;
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+extern inline unsigned long thread_saved_pc(struct thread_struct *t)
+{
+ return ((unsigned long *)t->esp)[3];
+}
+
+#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/linux/src/include/asm-i386/ptrace.h b/linux/src/include/asm-i386/ptrace.h
new file mode 100644
index 0000000..ae94ede
--- /dev/null
+++ b/linux/src/include/asm-i386/ptrace.h
@@ -0,0 +1,60 @@
+#ifndef _I386_PTRACE_H
+#define _I386_PTRACE_H
+
+#define EBX 0
+#define ECX 1
+#define EDX 2
+#define ESI 3
+#define EDI 4
+#define EBP 5
+#define EAX 6
+#define DS 7
+#define ES 8
+#define FS 9
+#define GS 10
+#define ORIG_EAX 11
+#define EIP 12
+#define CS 13
+#define EFL 14
+#define UESP 15
+#define SS 16
+
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct pt_regs {
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ unsigned short ds, __dsu;
+ unsigned short es, __esu;
+ unsigned short fs, __fsu;
+ unsigned short gs, __gsu;
+ long orig_eax;
+ long eip;
+ unsigned short cs, __csu;
+ long eflags;
+ long esp;
+ unsigned short ss, __ssu;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+
+#ifdef __KERNEL__
+#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->cs))
+#define instruction_pointer(regs) ((regs)->eip)
+extern void show_regs(struct pt_regs *);
+struct task_struct;
+extern void get_pt_regs_for_task(struct pt_regs *, struct task_struct *p);
+#endif
+
+#endif
diff --git a/linux/src/include/asm-i386/resource.h b/linux/src/include/asm-i386/resource.h
new file mode 100644
index 0000000..3143b5b
--- /dev/null
+++ b/linux/src/include/asm-i386/resource.h
@@ -0,0 +1,39 @@
+#ifndef _I386_RESOURCE_H
+#define _I386_RESOURCE_H
+
+/*
+ * Resource limits
+ */
+
+#define RLIMIT_CPU 0 /* CPU time in ms */
+#define RLIMIT_FSIZE 1 /* Maximum filesize */
+#define RLIMIT_DATA 2 /* max data size */
+#define RLIMIT_STACK 3 /* max stack size */
+#define RLIMIT_CORE 4 /* max core file size */
+#define RLIMIT_RSS 5 /* max resident set size */
+#define RLIMIT_NPROC 6 /* max number of processes */
+#define RLIMIT_NOFILE 7 /* max number of open files */
+#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
+#define RLIMIT_AS 9 /* address space limit */
+
+#define RLIM_NLIMITS 10
+
+#ifdef __KERNEL__
+
+#define INIT_RLIMITS \
+{ \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { _STK_LIM, _STK_LIM }, \
+ { 0, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { MAX_TASKS_PER_USER, MAX_TASKS_PER_USER }, \
+ { NR_OPEN, NR_OPEN }, \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/asm-i386/segment.h b/linux/src/include/asm-i386/segment.h
new file mode 100644
index 0000000..d23aa17
--- /dev/null
+++ b/linux/src/include/asm-i386/segment.h
@@ -0,0 +1,380 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#ifdef MACH
+
+#include <machine/gdt.h>
+#include <machine/ldt.h>
+
+#else /* !MACH */
+
+#define KERNEL_CS 0x10
+#define KERNEL_DS 0x18
+
+#define USER_CS 0x23
+#define USER_DS 0x2B
+
+#endif /* !MACH */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Uh, these should become the main single-value transfer routines..
+ * They automatically use the right size if we just have the right
+ * pointer type..
+ */
+#define put_user(x,ptr) __put_user((unsigned long)(x),(ptr),sizeof(*(ptr)))
+#define get_user(ptr) ((__typeof__(*(ptr)))__get_user((ptr),sizeof(*(ptr))))
+
+/*
+ * This is a silly but good way to make sure that
+ * the __put_user function is indeed always optimized,
+ * and that we use the correct sizes..
+ */
+extern int bad_user_access_length(void);
+
+/*
+ * dummy pointer type structure.. gcc won't try to do something strange
+ * this way..
+ */
+struct __segment_dummy { unsigned long a[100]; };
+#define __sd(x) ((struct __segment_dummy *) (x))
+#define __const_sd(x) ((const struct __segment_dummy *) (x))
+
+static inline void __attribute__((always_inline)) __put_user(unsigned long x, void * y, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ ("movb %b1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"iq" ((unsigned char) x), "m" (*__sd(y)));
+ break;
+ case 2:
+ __asm__ ("movw %w1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"ir" ((unsigned short) x), "m" (*__sd(y)));
+ break;
+ case 4:
+ __asm__ ("movl %1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"ir" (x), "m" (*__sd(y)));
+ break;
+ default:
+#ifdef __OPTIMIZE__
+ bad_user_access_length();
+#else
+ asm volatile("ud2");
+#endif
+ }
+}
+
+static inline unsigned long __attribute__((always_inline)) __get_user(const void * y, int size)
+{
+ unsigned long result;
+
+ switch (size) {
+ case 1:
+ __asm__ ("movb %%fs:%1,%b0"
+ :"=q" (result)
+ :"m" (*__const_sd(y)));
+ return (unsigned char) result;
+ case 2:
+ __asm__ ("movw %%fs:%1,%w0"
+ :"=r" (result)
+ :"m" (*__const_sd(y)));
+ return (unsigned short) result;
+ case 4:
+ __asm__ ("movl %%fs:%1,%0"
+ :"=r" (result)
+ :"m" (*__const_sd(y)));
+ return result;
+ default:
+#ifdef __OPTIMIZE__
+ return bad_user_access_length();
+#else
+ asm volatile("ud2");
+#endif
+ }
+}
+
+#if defined(__GNUC__) && (__GNUC__ == 2) && (__GNUC_MINOR__ < 95)
+static inline void __generic_memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ __asm__ volatile
+ ("cld\n"
+ "push %%es\n"
+ "push %%fs\n"
+ "cmpl $3,%0\n"
+ "pop %%es\n"
+ "jbe 1f\n"
+ "movl %%edi,%%ecx\n"
+ "negl %%ecx\n"
+ "andl $3,%%ecx\n"
+ "subl %%ecx,%0\n"
+ "rep; movsb\n"
+ "movl %0,%%ecx\n"
+ "shrl $2,%%ecx\n"
+ "rep; movsl\n"
+ "andl $3,%0\n"
+ "1: movl %0,%%ecx\n"
+ "rep; movsb\n"
+ "pop %%es\n"
+ :"=abd" (n)
+ :"0" (n),"D" ((long) to),"S" ((long) from)
+ :"cx","di","si");
+}
+
+static inline void __constant_memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ switch (n) {
+ case 0:
+ return;
+ case 1:
+ __put_user(*(const char *) from, (char *) to, 1);
+ return;
+ case 2:
+ __put_user(*(const short *) from, (short *) to, 2);
+ return;
+ case 3:
+ __put_user(*(const short *) from, (short *) to, 2);
+ __put_user(*(2+(const char *) from), 2+(char *) to, 1);
+ return;
+ case 4:
+ __put_user(*(const int *) from, (int *) to, 4);
+ return;
+ case 8:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ return;
+ case 12:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ __put_user(*(2+(const int *) from), 2+(int *) to, 4);
+ return;
+ case 16:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ __put_user(*(2+(const int *) from), 2+(int *) to, 4);
+ __put_user(*(3+(const int *) from), 3+(int *) to, 4);
+ return;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "push %%es\n\t" \
+ "push %%fs\n\t" \
+ "pop %%es\n\t" \
+ "rep ; movsl\n\t" \
+ x \
+ "pop %%es" \
+ : /* no outputs */ \
+ :"c" (n/4),"D" ((long) to),"S" ((long) from) \
+ :"cx","di","si")
+
+ switch (n % 4) {
+ case 0:
+ COMMON("");
+ return;
+ case 1:
+ COMMON("movsb\n\t");
+ return;
+ case 2:
+ COMMON("movsw\n\t");
+ return;
+ case 3:
+ COMMON("movsw\n\tmovsb\n\t");
+ return;
+ }
+#undef COMMON
+}
+
+static inline void __generic_memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ __asm__ volatile
+ ("cld\n"
+ "cmpl $3,%0\n"
+ "jbe 1f\n"
+ "movl %%edi,%%ecx\n"
+ "negl %%ecx\n"
+ "andl $3,%%ecx\n"
+ "subl %%ecx,%0\n"
+ "fs; rep; movsb\n"
+ "movl %0,%%ecx\n"
+ "shrl $2,%%ecx\n"
+ "fs; rep; movsl\n"
+ "andl $3,%0\n"
+ "1:movl %0,%%ecx\n"
+ "fs; rep; movsb\n"
+ :"=abd" (n)
+ :"0" (n),"D" ((long) to),"S" ((long) from)
+ :"cx","di","si", "memory");
+}
+
+static inline void __constant_memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ switch (n) {
+ case 0:
+ return;
+ case 1:
+ *(char *)to = __get_user((const char *) from, 1);
+ return;
+ case 2:
+ *(short *)to = __get_user((const short *) from, 2);
+ return;
+ case 3:
+ *(short *) to = __get_user((const short *) from, 2);
+ *((char *) to + 2) = __get_user(2+(const char *) from, 1);
+ return;
+ case 4:
+ *(int *) to = __get_user((const int *) from, 4);
+ return;
+ case 8:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ return;
+ case 12:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ *(2+(int *) to) = __get_user(2+(const int *) from, 4);
+ return;
+ case 16:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ *(2+(int *) to) = __get_user(2+(const int *) from, 4);
+ *(3+(int *) to) = __get_user(3+(const int *) from, 4);
+ return;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "rep ; fs ; movsl\n\t" \
+ x \
+ : /* no outputs */ \
+ :"c" (n/4),"D" ((long) to),"S" ((long) from) \
+ :"cx","di","si","memory")
+
+ switch (n % 4) {
+ case 0:
+ COMMON("");
+ return;
+ case 1:
+ COMMON("fs ; movsb");
+ return;
+ case 2:
+ COMMON("fs ; movsw");
+ return;
+ case 3:
+ COMMON("fs ; movsw\n\tfs ; movsb");
+ return;
+ }
+#undef COMMON
+}
+
+#define memcpy_fromfs(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy_fromfs((to),(from),(n)) : \
+ __generic_memcpy_fromfs((to),(from),(n)))
+
+#define memcpy_tofs(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy_tofs((to),(from),(n)) : \
+ __generic_memcpy_tofs((to),(from),(n)))
+
+
+#else /* code for gcc-2.95.x and newer follows */
+
+static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ char *d = (char *)to;
+ const char *s = (const char *)from;
+ while (n-- > 0) {
+ *d++ = __get_user(s++, 1);
+ }
+}
+
+static inline void memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ char *d = (char *)to;
+ const char *s = (const char *)from;
+ while (n-- > 0) {
+ __put_user(*s++, d++, 1);
+ }
+}
+
+#endif /* not gcc-2.95 */
+
+/*
+ * These are deprecated..
+ *
+ * Use "put_user()" and "get_user()" with the proper pointer types instead.
+ */
+
+#define get_fs_byte(addr) __get_user((const unsigned char *)(addr),1)
+#define get_fs_word(addr) __get_user((const unsigned short *)(addr),2)
+#define get_fs_long(addr) __get_user((const unsigned int *)(addr),4)
+
+#define put_fs_byte(x,addr) __put_user((x),(unsigned char *)(addr),1)
+#define put_fs_word(x,addr) __put_user((x),(unsigned short *)(addr),2)
+#define put_fs_long(x,addr) __put_user((x),(unsigned int *)(addr),4)
+
+#ifdef WE_REALLY_WANT_TO_USE_A_BROKEN_INTERFACE
+
+static inline unsigned short get_user_word(const short *addr)
+{
+ return __get_user(addr, 2);
+}
+
+static inline unsigned char get_user_byte(const char * addr)
+{
+ return __get_user(addr,1);
+}
+
+static inline unsigned long get_user_long(const int *addr)
+{
+ return __get_user(addr, 4);
+}
+
+static inline void put_user_byte(char val,char *addr)
+{
+ __put_user(val, addr, 1);
+}
+
+static inline void put_user_word(short val,short * addr)
+{
+ __put_user(val, addr, 2);
+}
+
+static inline void put_user_long(unsigned long val,int * addr)
+{
+ __put_user(val, addr, 4);
+}
+
+#endif
+
+/*
+ * Someone who knows GNU asm better than I should double check the following.
+ * It seems to work, but I don't know if I'm doing something subtly wrong.
+ * --- TYT, 11/24/91
+ * [ nothing wrong here, Linus: I just changed the ax to be any reg ]
+ */
+
+static inline unsigned long get_fs(void)
+{
+ unsigned long _v;
+ __asm__("mov %%fs,%w0":"=r" (_v):"0" (0));
+ return _v;
+}
+
+static inline unsigned long get_ds(void)
+{
+ unsigned long _v;
+ __asm__("mov %%ds,%w0":"=r" (_v):"0" (0));
+ return _v;
+}
+
+static inline void set_fs(unsigned long val)
+{
+ __asm__ __volatile__("mov %w0,%%fs": /* no output */ :"r" (val));
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_SEGMENT_H */
diff --git a/linux/src/include/asm-i386/semaphore.h b/linux/src/include/asm-i386/semaphore.h
new file mode 100644
index 0000000..18e12c1
--- /dev/null
+++ b/linux/src/include/asm-i386/semaphore.h
@@ -0,0 +1,133 @@
+#ifndef _I386_SEMAPHORE_H
+#define _I386_SEMAPHORE_H
+
+#include <linux/linkage.h>
+#include <asm/system.h>
+
+/*
+ * SMP- and interrupt-safe semaphores..
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ *
+ * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
+ * the original code and to make semaphore waits
+ * interruptible so that processes waiting on
+ * semaphores can be killed.
+ *
+ * If you would like to see an analysis of this implementation, please
+ * ftp to gcom.com and download the file
+ * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
+ *
+ */
+
+struct semaphore {
+ int count;
+ int waking;
+ int lock ; /* to make waking testing atomic */
+ struct wait_queue * wait;
+};
+
+#define MUTEX ((struct semaphore) { 1, 0, 0, NULL })
+#define MUTEX_LOCKED ((struct semaphore) { 0, 0, 0, NULL })
+
+/* Special register calling convention:
+ * eax contains return address
+ * ecx contains semaphore address
+ */
+asmlinkage void down_failed(void /* special register calling convention */);
+asmlinkage void up_wakeup(void /* special register calling convention */);
+
+extern void __down(struct semaphore * sem);
+extern void __up(struct semaphore * sem);
+
+/*
+ * This is ugly, but we want the default case to fall through.
+ * "down_failed" is a special asm handler that calls the C
+ * routine that actually waits. See arch/i386/lib/semaphore.S
+ */
+static inline void down(struct semaphore * sem)
+{
+ int d0;
+ __asm__ __volatile__(
+ "# atomic down operation\n\t"
+ "movl $1f,%%eax\n\t"
+#ifdef __SMP__
+ "lock ; "
+#endif
+ "decl %1\n\t"
+ "js " SYMBOL_NAME_STR(down_failed) "\n"
+ "1:\n"
+ :"=&a" (d0), "=m" (sem->count)
+ :"c" (sem)
+ :"memory");
+}
+
+/*
+ * Primitives to spin on a lock. Needed only for SMP version.
+ */
+extern inline void get_buzz_lock(int *lock_ptr)
+{
+#ifdef __SMP__
+ while (xchg(lock_ptr,1) != 0) ;
+#endif
+} /* get_buzz_lock */
+
+extern inline void give_buzz_lock(int *lock_ptr)
+{
+#ifdef __SMP__
+ *lock_ptr = 0 ;
+#endif
+} /* give_buzz_lock */
+
+asmlinkage int down_failed_interruptible(void); /* params in registers */
+
+/*
+ * This version waits in interruptible state so that the waiting
+ * process can be killed. The down_failed_interruptible routine
+ * returns negative for signalled and zero for semaphore acquired.
+ */
+static inline int down_interruptible(struct semaphore * sem)
+{
+ int ret ;
+
+ __asm__ __volatile__(
+ "# atomic interruptible down operation\n\t"
+ "movl $2f,%%eax\n\t"
+#ifdef __SMP__
+ "lock ; "
+#endif
+ "decl %1\n\t"
+ "js " SYMBOL_NAME_STR(down_failed_interruptible) "\n\t"
+ "xorl %%eax,%%eax\n"
+ "2:\n"
+ :"=&a" (ret), "=m" (sem->count)
+ :"c" (sem)
+ :"memory");
+
+ return(ret) ;
+}
+
+/*
+ * Note! This is subtle. We jump to wake people up only if
+ * the semaphore was negative (== somebody was waiting on it).
+ * The default case (no contention) will result in NO
+ * jumps for both down() and up().
+ */
+static inline void up(struct semaphore * sem)
+{
+ int d0;
+ __asm__ __volatile__(
+ "# atomic up operation\n\t"
+ "movl $1f,%%eax\n\t"
+#ifdef __SMP__
+ "lock ; "
+#endif
+ "incl %1\n\t"
+ "jle " SYMBOL_NAME_STR(up_wakeup)
+ "\n1:"
+ :"=&a" (d0), "=m" (sem->count)
+ :"c" (sem)
+ :"memory");
+}
+
+#endif
diff --git a/linux/src/include/asm-i386/sigcontext.h b/linux/src/include/asm-i386/sigcontext.h
new file mode 100644
index 0000000..df06596
--- /dev/null
+++ b/linux/src/include/asm-i386/sigcontext.h
@@ -0,0 +1,54 @@
+#ifndef _ASMi386_SIGCONTEXT_H
+#define _ASMi386_SIGCONTEXT_H
+
+/*
+ * As documented in the iBCS2 standard..
+ *
+ * The first part of "struct _fpstate" is just the
+ * normal i387 hardware setup, the extra "status"
+ * word is used to save the coprocessor status word
+ * before entering the handler.
+ */
+struct _fpreg {
+ unsigned short significand[4];
+ unsigned short exponent;
+};
+
+struct _fpstate {
+ unsigned long cw,
+ sw,
+ tag,
+ ipoff,
+ cssel,
+ dataoff,
+ datasel;
+ struct _fpreg _st[8];
+ unsigned long status;
+};
+
+struct sigcontext_struct {
+ unsigned short gs, __gsh;
+ unsigned short fs, __fsh;
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned long edi;
+ unsigned long esi;
+ unsigned long ebp;
+ unsigned long esp;
+ unsigned long ebx;
+ unsigned long edx;
+ unsigned long ecx;
+ unsigned long eax;
+ unsigned long trapno;
+ unsigned long err;
+ unsigned long eip;
+ unsigned short cs, __csh;
+ unsigned long eflags;
+ unsigned long esp_at_signal;
+ unsigned short ss, __ssh;
+ struct _fpstate * fpstate;
+ unsigned long oldmask;
+ unsigned long cr2;
+};
+
+#endif
diff --git a/linux/src/include/asm-i386/signal.h b/linux/src/include/asm-i386/signal.h
new file mode 100644
index 0000000..c68928c
--- /dev/null
+++ b/linux/src/include/asm-i386/signal.h
@@ -0,0 +1,97 @@
+#ifndef _ASMi386_SIGNAL_H
+#define _ASMi386_SIGNAL_H
+
+typedef unsigned long sigset_t; /* at least 32 bits */
+
+#define _NSIG 32
+#define NSIG _NSIG
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGUNUSED 31
+
+/*
+ * sa_flags values: SA_STACK is not currently supported, but will allow the
+ * usage of signal stacks by using the (now obsolete) sa_restorer field in
+ * the sigaction structure as a stack pointer. This is now possible due to
+ * the changes in signal handling. LBT 010493.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_SHIRQ flag is for shared interrupt support on PCI and EISA.
+ */
+#define SA_NOCLDSTOP 1
+#define SA_SHIRQ 0x04000000
+#define SA_STACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_INTERRUPT 0x20000000
+#define SA_NOMASK 0x40000000
+#define SA_ONESHOT 0x80000000
+
+#ifdef __KERNEL__
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ */
+#define SA_PROBE SA_ONESHOT
+#define SA_SAMPLE_RANDOM SA_RESTART
+#endif
+
+
+#define SIG_BLOCK 0 /* for blocking signals */
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#define SIG_SETMASK 2 /* for setting the signal mask */
+
+/* Type of a signal handler. */
+typedef void (*__sighandler_t)(int);
+
+#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#ifdef __KERNEL__
+#include <asm/sigcontext.h>
+#endif
+
+#endif
diff --git a/linux/src/include/asm-i386/socket.h b/linux/src/include/asm-i386/socket.h
new file mode 100644
index 0000000..7301511
--- /dev/null
+++ b/linux/src/include/asm-i386/socket.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockoptions(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+
+#define SO_BINDTODEVICE 25
+
+#endif /* _ASM_SOCKET_H */
diff --git a/linux/src/include/asm-i386/sockios.h b/linux/src/include/asm-i386/sockios.h
new file mode 100644
index 0000000..6b747f8
--- /dev/null
+++ b/linux/src/include/asm-i386/sockios.h
@@ -0,0 +1,12 @@
+#ifndef __ARCH_I386_SOCKIOS__
+#define __ARCH_I386_SOCKIOS__
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp */
+
+#endif
diff --git a/linux/src/include/asm-i386/spinlock.h b/linux/src/include/asm-i386/spinlock.h
new file mode 100644
index 0000000..18119d4
--- /dev/null
+++ b/linux/src/include/asm-i386/spinlock.h
@@ -0,0 +1,262 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#ifndef __SMP__
+
+#define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
+
+#if (DEBUG_SPINLOCKS < 1)
+
+/*
+ * Your basic spinlocks, allowing only a single CPU anywhere
+ *
+ * Gcc-2.7.x has a nasty bug with empty initializers.
+ */
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
+ typedef struct { } spinlock_t;
+ #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+#else
+ typedef struct { int gcc_is_buggy; } spinlock_t;
+ #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#endif
+
+#define spin_lock_init(lock) do { } while(0)
+#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
+#define spin_trylock(lock) (1)
+#define spin_unlock_wait(lock) do { } while(0)
+#define spin_unlock(lock) do { } while(0)
+#define spin_lock_irq(lock) cli()
+#define spin_unlock_irq(lock) sti()
+
+#define spin_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define spin_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+
+#elif (DEBUG_SPINLOCKS < 2)
+
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock(x) do { (x)->lock = 1; } while (0)
+#define spin_unlock_wait(x) do { } while (0)
+#define spin_unlock(x) do { (x)->lock = 0; } while (0)
+#define spin_lock_irq(x) do { cli(); spin_lock(x); } while (0)
+#define spin_unlock_irq(x) do { spin_unlock(x); sti(); } while (0)
+
+#define spin_lock_irqsave(x, flags) \
+ do { save_flags(flags); spin_lock_irq(x); } while (0)
+#define spin_unlock_irqrestore(x, flags) \
+ do { spin_unlock(x); restore_flags(flags); } while (0)
+
+#else /* (DEBUG_SPINLOCKS >= 2) */
+
+typedef struct {
+ volatile unsigned int lock;
+ volatile unsigned int babble;
+ const char *module;
+} spinlock_t;
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 25, __BASE_FILE__ }
+
+#include <linux/kernel.h>
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
+#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
+#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
+#define spin_lock_irq(x) do {cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock_irq(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
+#define spin_unlock_irq(x) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_irq(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; sti();} while (0)
+
+#define spin_lock_irqsave(x,flags) do {save_flags(flags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock_irqsave(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
+#define spin_unlock_irqrestore(x,flags) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_irqrestore(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(flags);} while (0)
+
+#endif /* DEBUG_SPINLOCKS */
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ *
+ * Gcc-2.7.x has a nasty bug with empty initializers.
+ */
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
+ typedef struct { } rwlock_t;
+ #define RW_LOCK_UNLOCKED (rwlock_t) { }
+#else
+ typedef struct { int gcc_is_buggy; } rwlock_t;
+ #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
+#endif
+
+#define read_lock(lock) (void)(lock) /* Not "unused variable." */
+#define read_unlock(lock) do { } while(0)
+#define write_lock(lock) (void)(lock) /* Not "unused variable." */
+#define write_unlock(lock) do { } while(0)
+#define read_lock_irq(lock) cli()
+#define read_unlock_irq(lock) sti()
+#define write_lock_irq(lock) cli()
+#define write_unlock_irq(lock) sti()
+
+#define read_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+#define write_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+
+#else /* __SMP__ */
+
+/*
+ * Your basic spinlocks, allowing only a single CPU anywhere
+ */
+
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while(0)
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock)
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+/*
+ * Intel PIV would benefit from using 'rep nop' here but on older
+ * processors and non intel it is listed as 'undefined' so cannot be
+ * blindly used. On 2.4 we should add a PIV CPU type for this one.
+ */
+#define spin_lock_string \
+ "\n1:\t" \
+ "lock ; btsl $0,%0\n\t" \
+ "jc 2f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "2:\t" \
+ "rep; nop\n\t" \
+ "testb $1,%0\n\t" \
+ "jne 2b\n\t" \
+ "jmp 1b\n" \
+ ".previous"
+
+#define spin_unlock_string \
+ "lock ; btrl $0,%0"
+
+#define spin_lock(lock) \
+__asm__ __volatile__( \
+ spin_lock_string \
+ :"=m" (__dummy_lock(lock)))
+
+#define spin_unlock(lock) \
+__asm__ __volatile__( \
+ spin_unlock_string \
+ :"=m" (__dummy_lock(lock)))
+
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock_irq(lock) \
+ do { __cli(); spin_lock(lock); } while (0)
+
+#define spin_unlock_irq(lock) \
+ do { spin_unlock(lock); __sti(); } while (0)
+
+#define spin_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); spin_lock(lock); } while (0)
+
+#define spin_unlock_irqrestore(lock, flags) \
+ do { spin_unlock(lock); __restore_flags(flags); } while (0)
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct {
+ volatile unsigned int lock;
+ unsigned long previous;
+} rwlock_t;
+
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+
+/*
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "write" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ */
+#define read_lock(rw) \
+ asm volatile("\n1:\t" \
+ "lock ; incl %0\n\t" \
+ "js 2f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "2:\tlock ; decl %0\n" \
+ "3:\trep; nop\n\t" \
+ "cmpl $0,%0\n\t" \
+ "js 3b\n\t" \
+ "jmp 1b\n" \
+ ".previous" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define read_unlock(rw) \
+ asm volatile("lock ; decl %0" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define write_lock(rw) \
+ asm volatile("\n1:\t" \
+ "lock ; btsl $31,%0\n\t" \
+ "jc 4f\n" \
+ "2:\ttestl $0x7fffffff,%0\n\t" \
+ "jne 3f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "3:\tlock ; btrl $31,%0\n" \
+ "4:\trep; nop\n\t" \
+ "cmp $0,%0\n\t" \
+ "jne 4b\n\t" \
+ "jmp 1b\n" \
+ ".previous" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define write_unlock(rw) \
+ asm volatile("lock ; btrl $31,%0":"=m" (__dummy_lock(&(rw)->lock)))
+
+#define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
+#define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
+#define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
+#define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
+
+#define read_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); read_lock(lock); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ do { read_unlock(lock); __restore_flags(flags); } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); write_lock(lock); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ do { write_unlock(lock); __restore_flags(flags); } while (0)
+
+#endif /* __SMP__ */
+#endif /* __ASM_SPINLOCK_H */
diff --git a/linux/src/include/asm-i386/stat.h b/linux/src/include/asm-i386/stat.h
new file mode 100644
index 0000000..b4c6486
--- /dev/null
+++ b/linux/src/include/asm-i386/stat.h
@@ -0,0 +1,41 @@
+#ifndef _I386_STAT_H
+#define _I386_STAT_H
+
+struct old_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+
+struct new_stat {
+ unsigned short st_dev;
+ unsigned short __pad1;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned short __pad2;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long __unused1;
+ unsigned long st_mtime;
+ unsigned long __unused2;
+ unsigned long st_ctime;
+ unsigned long __unused3;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif
diff --git a/linux/src/include/asm-i386/statfs.h b/linux/src/include/asm-i386/statfs.h
new file mode 100644
index 0000000..113d5d4
--- /dev/null
+++ b/linux/src/include/asm-i386/statfs.h
@@ -0,0 +1,25 @@
+#ifndef _I386_STATFS_H
+#define _I386_STATFS_H
+
+#ifndef __KERNEL_STRICT_NAMES
+
+#include <linux/types.h>
+
+typedef __kernel_fsid_t fsid_t;
+
+#endif
+
+struct statfs {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_spare[6];
+};
+
+#endif
diff --git a/linux/src/include/asm-i386/string.h b/linux/src/include/asm-i386/string.h
new file mode 100644
index 0000000..8417d4a
--- /dev/null
+++ b/linux/src/include/asm-i386/string.h
@@ -0,0 +1,487 @@
+#ifndef _I386_STRING_H_
+#define _I386_STRING_H_
+
+/*
+ * On a 486 or Pentium, we are better off not using the
+ * byte string operations. But on a 386 or a PPro the
+ * byte string ops are faster than doing it by hand
+ * (MUCH faster on a Pentium).
+ *
+ * Also, the byte strings actually work correctly. Forget
+ * the i486 routines for now as they may be broken..
+ */
+#if FIXED_486_STRING && (CPU == 486 || CPU == 586)
+#include <asm/string-486.h>
+#else
+
+/*
+ * This string-include defines all string functions as inline
+ * functions. Use gcc. It also assumes ds=es=data space, this should be
+ * normal. Most of the string-functions are rather heavily hand-optimized,
+ * see especially strtok,strstr,str[c]spn. They should work, but are not
+ * very easy to understand. Everything is done entirely within the register
+ * set, making the functions fast and clean. String instructions have been
+ * used through-out, making for "slightly" unclear code :-)
+ *
+ * NO Copyright (C) 1991, 1992 Linus Torvalds,
+ * consider these trivial functions to be PD.
+ */
+
+#define __HAVE_ARCH_STRCPY
+extern inline char * strcpy(char * dest,const char *src)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2)
+ :"0" (src),"1" (dest) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+extern inline char * strncpy(char * dest,const char *src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %2\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "rep\n\t"
+ "stosb\n"
+ "2:"
+ : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
+ :"0" (src),"1" (dest),"2" (count) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCAT
+extern inline char * strcat(char * dest,const char * src)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+ : "0" (src), "1" (dest), "2" (0), "3" (0xffffffff):"memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCAT
+extern inline char * strncat(char * dest,const char * src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n\t"
+ "movl %8,%3\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %2,%2\n\t"
+ "stosb"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+ : "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count)
+ : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCMP
+extern inline int strcmp(const char * cs,const char * ct)
+{
+int d0, d1;
+register int __res;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "scasb\n\t"
+ "jne 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "jmp 3f\n"
+ "2:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "3:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1)
+ :"1" (cs),"2" (ct));
+return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+extern inline int strncmp(const char * cs,const char * ct,size_t count)
+{
+register int __res;
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "scasb\n\t"
+ "jne 3f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %%eax,%%eax\n\t"
+ "jmp 4f\n"
+ "3:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "4:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+ :"1" (cs),"2" (ct),"3" (count));
+return __res;
+}
+
+#define __HAVE_ARCH_STRCHR
+extern inline char * strchr(const char * s, int c)
+{
+int d0;
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "je 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "movl $1,%1\n"
+ "2:\tmovl %1,%0\n\t"
+ "decl %0"
+ :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRRCHR
+extern inline char * strrchr(const char * s, int c)
+{
+int d0, d1;
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "jne 2f\n\t"
+ "leal -1(%%esi),%0\n"
+ "2:\ttestb %%al,%%al\n\t"
+ "jne 1b"
+ :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRLEN
+extern inline size_t strlen(const char * s)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %0\n\t"
+ "decl %0"
+ :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff));
+return __res;
+}
+
+extern inline void * __memcpy(void * to, const void * from, size_t n)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+ : "memory");
+return (to);
+}
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as the count is constant.
+ */
+extern inline void * __constant_memcpy(void * to, const void * from, size_t n)
+{
+ switch (n) {
+ case 0:
+ return to;
+ case 1:
+ *(unsigned char *)to = *(const unsigned char *)from;
+ return to;
+ case 2:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ return to;
+ case 3:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ *(2+(unsigned char *)to) = *(2+(const unsigned char *)from);
+ return to;
+ case 4:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ return to;
+ case 6: /* for Ethernet addresses */
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(2+(unsigned short *)to) = *(2+(const unsigned short *)from);
+ return to;
+ case 8:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ return to;
+ case 12:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ return to;
+ case 16:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ return to;
+ case 20:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ *(4+(unsigned long *)to) = *(4+(const unsigned long *)from);
+ return to;
+ }
+#define COMMON(x) \
+__asm__ __volatile__( \
+ "cld\n\t" \
+ "rep ; movsl" \
+ x \
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2) \
+ : "0" (n/4),"1" ((long) to),"2" ((long) from) \
+ : "memory");
+{
+ int d0, d1, d2;
+ switch (n % 4) {
+ case 0: COMMON(""); return to;
+ case 1: COMMON("\n\tmovsb"); return to;
+ case 2: COMMON("\n\tmovsw"); return to;
+ default: COMMON("\n\tmovsw\n\tmovsb"); return to;
+ }
+}
+
+#undef COMMON
+}
+
+#define __HAVE_ARCH_MEMCPY
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __memcpy((t),(f),(n)))
+
+#define __HAVE_ARCH_MEMMOVE
+extern inline void * memmove(void * dest,const void * src, size_t n)
+{
+int d0, d1, d2;
+if (dest<src)
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "movsb"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ :"0" (n),"1" (src),"2" (dest)
+ : "memory");
+else
+__asm__ __volatile__(
+ "std\n\t"
+ "rep\n\t"
+ "movsb\n\t"
+ "cld"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ :"0" (n),
+ "1" (n-1+(const char *)src),
+ "2" (n-1+(char *)dest)
+ :"memory");
+return dest;
+}
+
+#define memcmp __builtin_memcmp
+
+#define __HAVE_ARCH_MEMCHR
+extern inline void * memchr(const void * cs,int c,size_t count)
+{
+int d0;
+register void * __res;
+if (!count)
+ return NULL;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 1f\n\t"
+ "movl $1,%0\n"
+ "1:\tdecl %0"
+ :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
+return __res;
+}
+
+extern inline void * __memset_generic(void * s, char c,size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "stosb"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (c),"1" (s),"0" (count)
+ :"memory");
+return s;
+}
+
+/* we might want to write optimized versions of these later */
+#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
+
+/*
+ * memset(x,0,y) is a reasonably common thing to do, so we want to fill
+ * things 32 bits at a time even when we don't know the size of the
+ * area at compile-time..
+ */
+extern inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; stosl\n\t"
+ "testb $2,%b3\n\t"
+ "je 1f\n\t"
+ "stosw\n"
+ "1:\ttestb $1,%b3\n\t"
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+ :"memory");
+return (s);
+}
+
+/* Added by Gertjan van Wingerde to make minix and sysv module work */
+#define __HAVE_ARCH_STRNLEN
+extern inline size_t strnlen(const char * s, size_t count)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+ "movl %2,%0\n\t"
+ "jmp 2f\n"
+ "1:\tcmpb $0,(%0)\n\t"
+ "je 3f\n\t"
+ "incl %0\n"
+ "2:\tdecl %1\n\t"
+ "cmpl $-1,%1\n\t"
+ "jne 1b\n"
+ "3:\tsubl %2,%0"
+ :"=a" (__res), "=&d" (d0)
+ :"c" (s),"1" (count));
+return __res;
+}
+/* end of additional stuff */
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as we by now know that both pattern and count is constant..
+ */
+extern inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+{
+ switch (count) {
+ case 0:
+ return s;
+ case 1:
+ *(unsigned char *)s = pattern;
+ return s;
+ case 2:
+ *(unsigned short *)s = pattern;
+ return s;
+ case 3:
+ *(unsigned short *)s = pattern;
+ *(2+(unsigned char *)s) = pattern;
+ return s;
+ case 4:
+ *(unsigned long *)s = pattern;
+ return s;
+ }
+#define COMMON(x) \
+__asm__ __volatile__("cld\n\t" \
+ "rep ; stosl" \
+ x \
+ : "=&c" (d0), "=&D" (d1) \
+ : "a" (pattern),"0" (count/4),"1" ((long) s) \
+ : "memory")
+{
+ int d0, d1;
+ switch (count % 4) {
+ case 0: COMMON(""); return s;
+ case 1: COMMON("\n\tstosb"); return s;
+ case 2: COMMON("\n\tstosw"); return s;
+ default: COMMON("\n\tstosw\n\tstosb"); return s;
+ }
+}
+
+#undef COMMON
+}
+
+#define __constant_c_x_memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_c_and_count_memset((s),(c),(count)) : \
+ __constant_c_memset((s),(c),(count)))
+
+#define __memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_count_memset((s),(c),(count)) : \
+ __memset_generic((s),(c),(count)))
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) \
+(__builtin_constant_p(c) ? \
+ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
+ __memset((s),(c),(count)))
+
+/*
+ * find the first occurrence of byte 'c', or 1 past the area if none
+ */
+#define __HAVE_ARCH_MEMSCAN
+extern inline void * memscan(void * addr, int c, size_t size)
+{
+ if (!size)
+ return addr;
+ __asm__("cld
+ repnz; scasb
+ jnz 1f
+ dec %%edi
+1: "
+ : "=D" (addr), "=c" (size)
+ : "0" (addr), "1" (size), "a" (c));
+ return addr;
+}
+
+#endif
+#endif
diff --git a/linux/src/include/asm-i386/system.h b/linux/src/include/asm-i386/system.h
new file mode 100644
index 0000000..f186393
--- /dev/null
+++ b/linux/src/include/asm-i386/system.h
@@ -0,0 +1,334 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <asm/segment.h>
+
+/*
+ * Entry into gdt where to find first TSS. GDT layout:
+ * 0 - null
+ * 1 - not used
+ * 2 - kernel code segment
+ * 3 - kernel data segment
+ * 4 - user code segment
+ * 5 - user data segment
+ * ...
+ * 8 - TSS #0
+ * 9 - LDT #0
+ * 10 - TSS #1
+ * 11 - LDT #1
+ */
+#define FIRST_TSS_ENTRY 8
+#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
+#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
+#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+#define load_TR(n) __asm__("ltr %%ax": /* no output */ :"a" (_TSS(n)))
+#define load_ldt(n) __asm__("lldt %%ax": /* no output */ :"a" (_LDT(n)))
+#define store_TR(n) \
+__asm__("str %%ax\n\t" \
+ "subl %2,%%eax\n\t" \
+ "shrl $4,%%eax" \
+ :"=a" (n) \
+ :"0" (0),"i" (FIRST_TSS_ENTRY<<3))
+
+/* This special macro can be used to load a debugging register */
+
+#define loaddebug(tsk,register) \
+ __asm__("movl %0,%%edx\n\t" \
+ "movl %%edx,%%db" #register "\n\t" \
+ : /* no output */ \
+ :"m" (tsk->debugreg[register]) \
+ :"dx");
+
+
+/*
+ * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ * This also clears the TS-flag if the task we switched to has used
+ * the math co-processor latest.
+ *
+ * It also reloads the debug regs if necessary..
+ */
+
+
+#ifdef __SMP__
+ /*
+ * Keep the lock depth straight. If we switch on an interrupt from
+ * kernel->user task we need to lose a depth, and if we switch the
+ * other way we need to gain a depth. Same layer switches come out
+ * the same.
+ *
+ * We spot a switch in user mode because the kernel counter is the
+ * same as the interrupt counter depth. (We never switch during the
+ * message/invalidate IPI).
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process.
+ */
+
+#define switch_to(prev,next) do { \
+ cli();\
+ if(prev->flags&PF_USEDFPU) \
+ { \
+ __asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \
+ __asm__ __volatile__("fwait"); \
+ prev->flags&=~PF_USEDFPU; \
+ } \
+ prev->lock_depth=syscall_count; \
+ kernel_counter+=next->lock_depth-prev->lock_depth; \
+ syscall_count=next->lock_depth; \
+__asm__("pushl %%edx\n\t" \
+ "movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \
+ "movl 0x20(%%edx), %%edx\n\t" \
+ "shrl $22,%%edx\n\t" \
+ "and $0x3C,%%edx\n\t" \
+ "movl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \
+ "popl %%edx\n\t" \
+ "ljmp %0\n\t" \
+ "sti\n\t" \
+ : /* no output */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "c" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+
+#else
+#define switch_to(prev,next) do { \
+__asm__("movl %2,"SYMBOL_NAME_STR(current_set)"\n\t" \
+ "ljmp %0\n\t" \
+ "cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \
+ "jne 1f\n\t" \
+ "clts\n" \
+ "1:" \
+ : /* no outputs */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "r" (prev), "r" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+#endif
+
+#define _set_base(addr,base) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%1\n\t" \
+ "movb %%dh,%2" \
+ : /* no output */ \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "d" (base) \
+ :"dx")
+
+#define _set_limit(addr,limit) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %1,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%1" \
+ : /* no output */ \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "d" (limit) \
+ :"dx")
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , base )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , (limit-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+#define stts() \
+__asm__ __volatile__ ( \
+ "movl %%cr0,%%eax\n\t" \
+ "orl $8,%%eax\n\t" \
+ "movl %%eax,%%cr0" \
+ : /* no outputs */ \
+ : /* no inputs */ \
+ :"ax")
+
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+#define mb() __asm__ __volatile__ ("" : : :"memory")
+#define sti() __asm__ __volatile__ ("sti": : :"memory")
+#define cli() __asm__ __volatile__ ("cli": : :"memory")
+
+#define save_flags(x) \
+__asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
+
+#define restore_flags(x) \
+__asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
+
+#define iret() __asm__ __volatile__ ("iret": : :"memory")
+
+#define _set_gate(gate_addr,type,dpl,addr) \
+__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
+ "movw %2,%%dx\n\t" \
+ "movl %%eax,%0\n\t" \
+ "movl %%edx,%1" \
+ :"=m" (*((long *) (gate_addr))), \
+ "=m" (*(1+(long *) (gate_addr))) \
+ :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
+ "d" ((char *) (addr)),"a" (KERNEL_CS << 16) \
+ :"ax","dx")
+
+#define set_intr_gate(n,addr) \
+ _set_gate(&idt[n],14,0,addr)
+
+#define set_trap_gate(n,addr) \
+ _set_gate(&idt[n],15,0,addr)
+
+#define set_system_gate(n,addr) \
+ _set_gate(&idt[n],15,3,addr)
+
+#define set_call_gate(a,addr) \
+ _set_gate(a,12,3,addr)
+
+#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
+ *((gate_addr)+1) = ((base) & 0xff000000) | \
+ (((base) & 0x00ff0000)>>16) | \
+ ((limit) & 0xf0000) | \
+ ((dpl)<<13) | \
+ (0x00408000) | \
+ ((type)<<8); \
+ *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
+ ((limit) & 0x0ffff); }
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw $" #limit ",%1\n\t" \
+ "movw %%ax,%2\n\t" \
+ "rorl $16,%%eax\n\t" \
+ "movb %%al,%3\n\t" \
+ "movb $" type ",%4\n\t" \
+ "movb $0x00,%5\n\t" \
+ "movb %%ah,%6\n\t" \
+ "rorl $16,%%eax" \
+ : /* no output */ \
+ :"a" (addr+0xc0000000), "m" (*(n)), "m" (*(n+2)), "m" (*(n+4)), \
+ "m" (*(n+5)), "m" (*(n+6)), "m" (*(n+7)) \
+ )
+
+#define set_tss_desc(n,addr) _set_tssldt_desc(((char *) (n)),((int)(addr)),235,"0x89")
+#define set_ldt_desc(n,addr,size) \
+ _set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),"0x82")
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt;
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+static __inline__ unsigned long long rdmsr(unsigned int msr)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdmsr"
+ : "=A" (ret)
+ : "c" (msr));
+ return ret;
+}
+
+static __inline__ void wrmsr(unsigned int msr,unsigned long long val)
+{
+ __asm__ __volatile__("wrmsr"
+ : /* no Outputs */
+ : "c" (msr), "A" (val));
+}
+
+
+static __inline__ unsigned long long rdtsc(void)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdtsc"
+ : "=A" (ret)
+ : /* no inputs */);
+ return ret;
+}
+
+static __inline__ unsigned long long rdpmc(unsigned int counter)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdpmc"
+ : "=A" (ret)
+ : "c" (counter));
+ return ret;
+}
+
+#endif
diff --git a/linux/src/include/asm-i386/termbits.h b/linux/src/include/asm-i386/termbits.h
new file mode 100644
index 0000000..c40e6f0
--- /dev/null
+++ b/linux/src/include/asm-i386/termbits.h
@@ -0,0 +1,160 @@
+#ifndef __ARCH_I386_TERMBITS_H__
+#define __ARCH_I386_TERMBITS_H__
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define CIBAUD 002003600000 /* input baud rate (not used) */
+#define CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif
diff --git a/linux/src/include/asm-i386/termios.h b/linux/src/include/asm-i386/termios.h
new file mode 100644
index 0000000..9f65b4d
--- /dev/null
+++ b/linux/src/include/asm-i386/termios.h
@@ -0,0 +1,92 @@
+#ifndef _I386_TERMIOS_H
+#define _I386_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+#ifdef __KERNEL__
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+#endif
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+/* line disciplines */
+#define N_TTY 0
+#define N_SLIP 1
+#define N_MOUSE 2
+#define N_PPP 3
+#define N_STRIP 4
+#define N_AX25 5
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+static inline void trans_from_termio(struct termio * termio,
+ struct termios * termios)
+{
+#define SET_LOW_BITS(x,y) (*(unsigned short *)(&x) = (y))
+ SET_LOW_BITS(termios->c_iflag, termio->c_iflag);
+ SET_LOW_BITS(termios->c_oflag, termio->c_oflag);
+ SET_LOW_BITS(termios->c_cflag, termio->c_cflag);
+ SET_LOW_BITS(termios->c_lflag, termio->c_lflag);
+#undef SET_LOW_BITS
+ memcpy(termios->c_cc, termio->c_cc, NCC);
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+static inline void trans_to_termio(struct termios * termios,
+ struct termio * termio)
+{
+ termio->c_iflag = termios->c_iflag;
+ termio->c_oflag = termios->c_oflag;
+ termio->c_cflag = termios->c_cflag;
+ termio->c_lflag = termios->c_lflag;
+ termio->c_line = termios->c_line;
+ memcpy(termio->c_cc, termios->c_cc, NCC);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_TERMIOS_H */
diff --git a/linux/src/include/asm-i386/types.h b/linux/src/include/asm-i386/types.h
new file mode 100644
index 0000000..d792546
--- /dev/null
+++ b/linux/src/include/asm-i386/types.h
@@ -0,0 +1,46 @@
+#ifndef _I386_TYPES_H
+#define _I386_TYPES_H
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+#define BITS_PER_LONG 32
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/asm-i386/unaligned.h b/linux/src/include/asm-i386/unaligned.h
new file mode 100644
index 0000000..282ce19
--- /dev/null
+++ b/linux/src/include/asm-i386/unaligned.h
@@ -0,0 +1,16 @@
+#ifndef __I386_UNALIGNED_H
+#define __I386_UNALIGNED_H
+
+/*
+ * The i386 can do unaligned accesses itself.
+ *
+ * The strange macros are there to make sure these can't
+ * be misused in a way that makes them not work on other
+ * architectures where unaligned accesses aren't as simple.
+ */
+
+#define get_unaligned(ptr) (*(ptr))
+
+#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+
+#endif
diff --git a/linux/src/include/asm-i386/unistd.h b/linux/src/include/asm-i386/unistd.h
new file mode 100644
index 0000000..c5f8f3a
--- /dev/null
+++ b/linux/src/include/asm-i386/unistd.h
@@ -0,0 +1,328 @@
+#ifndef _ASM_I386_UNISTD_H_
+#define _ASM_I386_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_setup 0 /* used only by init, to get system going */
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_chown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_phys 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_poll 168
+#define __NR_getpmsg 188
+#define __NR_putpmsg 189
+
+/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name)); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1))); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1,type2 arg2) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2))); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1,type2 arg2,type3 arg3) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3))); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3)),"S" ((long)(arg4))); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
+ type5,arg5) \
+type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5))); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#ifdef __KERNEL_SYSCALLS__
+
+/*
+ * we need this inline - forking from kernel space will result
+ * in NO COPY ON WRITE (!!!), until an execve is executed. This
+ * is no problem, but for the stack. This is handled by not letting
+ * main() use the stack at all after fork(). Thus, no function
+ * calls - which means inline code for fork too, as otherwise we
+ * would use the stack upon exit from 'fork()'.
+ *
+ * Actually only pause and fork are needed inline, so that there
+ * won't be any messing with the stack from main(), but we define
+ * some others too.
+ */
+#define __NR__exit __NR_exit
+static inline _syscall0(int,idle)
+static inline _syscall0(int,fork)
+static inline _syscall2(int,clone,unsigned long,flags,char *,esp)
+static inline _syscall0(int,pause)
+static inline _syscall0(int,setup)
+static inline _syscall0(int,sync)
+static inline _syscall0(pid_t,setsid)
+static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
+static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static inline _syscall1(int,dup,int,fd)
+static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
+static inline _syscall1(int,close,int,fd)
+static inline _syscall1(int,_exit,int,exitcode)
+static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
+
+static inline pid_t wait(int * wait_stat)
+{
+ return waitpid(-1,wait_stat,0);
+}
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+ * who haven't done an "execve()") should use this: it will work within
+ * a system call from a "real" process, but the process memory space will
+ * not be free'd until both the parent and the child have exited.
+ */
+static inline pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ long retval;
+
+ __asm__ __volatile__(
+ "movl %%esp,%%esi\n\t"
+ "int $0x80\n\t" /* Linux/i386 system call */
+ "cmpl %%esp,%%esi\n\t" /* child or parent? */
+ "je 1f\n\t" /* parent - jump */
+ "pushl %3\n\t" /* push argument */
+ "call *%4\n\t" /* call fn */
+ "movl %2,%0\n\t" /* exit */
+ "int $0x80\n"
+ "1:\t"
+ :"=a" (retval)
+ :"0" (__NR_clone), "i" (__NR_exit),
+ "r" (arg), "r" (fn),
+ "b" (flags | CLONE_VM)
+ :"si");
+ return retval;
+}
+
+#endif
+
+#endif /* _ASM_I386_UNISTD_H_ */
diff --git a/linux/src/include/asm-i386/vm86.h b/linux/src/include/asm-i386/vm86.h
new file mode 100644
index 0000000..42ef92e
--- /dev/null
+++ b/linux/src/include/asm-i386/vm86.h
@@ -0,0 +1,175 @@
+#ifndef _LINUX_VM86_H
+#define _LINUX_VM86_H
+
+/*
+ * I'm guessing at the VIF/VIP flag usage, but hope that this is how
+ * the Pentium uses them. Linux will return from vm86 mode when both
+ * VIF and VIP is set.
+ *
+ * On a Pentium, we could probably optimize the virtual flags directly
+ * in the eflags register instead of doing it "by hand" in vflags...
+ *
+ * Linus
+ */
+
+#define TF_MASK 0x00000100
+#define IF_MASK 0x00000200
+#define IOPL_MASK 0x00003000
+#define NT_MASK 0x00004000
+#define VM_MASK 0x00020000
+#define AC_MASK 0x00040000
+#define VIF_MASK 0x00080000 /* virtual interrupt flag */
+#define VIP_MASK 0x00100000 /* virtual interrupt pending */
+#define ID_MASK 0x00200000
+
+#define BIOSSEG 0x0f000
+
+#define CPU_086 0
+#define CPU_186 1
+#define CPU_286 2
+#define CPU_386 3
+#define CPU_486 4
+#define CPU_586 5
+
+/*
+ * Return values for the 'vm86()' system call
+ */
+#define VM86_TYPE(retval) ((retval) & 0xff)
+#define VM86_ARG(retval) ((retval) >> 8)
+
+#define VM86_SIGNAL 0 /* return due to signal */
+#define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */
+#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
+#define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */
+
+/*
+ * Additional return values when invoking new vm86()
+ */
+#define VM86_PICRETURN 4 /* return due to pending PIC request */
+#define VM86_TRAP 6 /* return due to DOS-debugger request */
+
+/*
+ * function codes when invoking new vm86()
+ */
+#define VM86_PLUS_INSTALL_CHECK 0
+#define VM86_ENTER 1
+#define VM86_ENTER_NO_BYPASS 2
+#define VM86_REQUEST_IRQ 3
+#define VM86_FREE_IRQ 4
+#define VM86_GET_IRQ_BITS 5
+#define VM86_GET_AND_RESET_IRQ 6
+
+/*
+ * This is the stack-layout when we have done a "SAVE_ALL" from vm86
+ * mode - the main change is that the old segment descriptors aren't
+ * useful any more and are forced to be zero by the kernel (and the
+ * hardware when a trap occurs), and the real segment descriptors are
+ * at the end of the structure. Look at ptrace.h to see the "normal"
+ * setup.
+ */
+
+struct vm86_regs {
+/*
+ * normal regs, with special meaning for the segment descriptors..
+ */
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ long __null_ds;
+ long __null_es;
+ long __null_fs;
+ long __null_gs;
+ long orig_eax;
+ long eip;
+ unsigned short cs, __csh;
+ long eflags;
+ long esp;
+ unsigned short ss, __ssh;
+/*
+ * these are specific to v86 mode:
+ */
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+};
+
+struct revectored_struct {
+ unsigned long __map[8]; /* 256 bits */
+};
+
+struct vm86_struct {
+ struct vm86_regs regs;
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+};
+
+/*
+ * flags masks
+ */
+#define VM86_SCREEN_BITMAP 0x0001
+
+struct vm86plus_info_struct {
+ unsigned long force_return_for_pic:1;
+ unsigned long vm86dbg_active:1; /* for debugger */
+ unsigned long vm86dbg_TFpendig:1; /* for debugger */
+ unsigned long unused:28;
+ unsigned long is_vm86pus:1; /* for vm86 internal use */
+ unsigned char vm86dbg_intxxtab[32]; /* for debugger */
+};
+
+struct vm86plus_struct {
+ struct vm86_regs regs;
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+ struct vm86plus_info_struct vm86plus;
+};
+
+#ifdef __KERNEL__
+
+struct kernel_vm86_struct {
+ struct vm86_regs regs;
+/*
+ * the below part remains on the kernel stack while we are in VM86 mode.
+ * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we
+ * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above
+ * 'struct kernel_vm86_regs' with the then actual values.
+ * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
+ * in kernelspace, hence we need not reget the data from userspace.
+ */
+#define VM86_TSS_ESP0 flags
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+ struct vm86plus_info_struct vm86plus;
+ struct pt_regs *regs32; /* here we save the pointer to the old regs */
+/*
+ * The below is not part of the structure, but the stack layout continues
+ * this way. In front of 'return-eip' may be some data, depending on
+ * compilation, so we don't rely on this and save the pointer to 'oldregs'
+ * in 'regs32' above.
+ * However, with GCC-2.7.2 and the current CFLAGS you see exactly this:
+
+ long return-eip; from call to vm86()
+ struct pt_regs oldregs; user space registers as saved by syscall
+ */
+};
+
+void handle_vm86_fault(struct vm86_regs *, long);
+int handle_vm86_trap(struct vm86_regs *, long, int);
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/affs_hardblocks.h b/linux/src/include/linux/affs_hardblocks.h
new file mode 100644
index 0000000..3331548
--- /dev/null
+++ b/linux/src/include/linux/affs_hardblocks.h
@@ -0,0 +1,66 @@
+#ifndef AFFS_HARDBLOCKS_H
+#define AFFS_HARDBLOCKS_H
+
+/* Just the needed definitions for the RDB of an Amiga HD. */
+
+struct RigidDiskBlock {
+ __u32 rdb_ID;
+ __u32 rdb_SummedLongs;
+ __s32 rdb_ChkSum;
+ __u32 rdb_HostID;
+ __u32 rdb_BlockBytes;
+ __u32 rdb_Flags;
+ __u32 rdb_BadBlockList;
+ __u32 rdb_PartitionList;
+ __u32 rdb_FileSysHeaderList;
+ __u32 rdb_DriveInit;
+ __u32 rdb_Reserved1[6];
+ __u32 rdb_Cylinders;
+ __u32 rdb_Sectors;
+ __u32 rdb_Heads;
+ __u32 rdb_Interleave;
+ __u32 rdb_Park;
+ __u32 rdb_Reserved2[3];
+ __u32 rdb_WritePreComp;
+ __u32 rdb_ReducedWrite;
+ __u32 rdb_StepRate;
+ __u32 rdb_Reserved3[5];
+ __u32 rdb_RDBBlocksLo;
+ __u32 rdb_RDBBlocksHi;
+ __u32 rdb_LoCylinder;
+ __u32 rdb_HiCylinder;
+ __u32 rdb_CylBlocks;
+ __u32 rdb_AutoParkSeconds;
+ __u32 rdb_HighRDSKBlock;
+ __u32 rdb_Reserved4;
+ char rdb_DiskVendor[8];
+ char rdb_DiskProduct[16];
+ char rdb_DiskRevision[4];
+ char rdb_ControllerVendor[8];
+ char rdb_ControllerProduct[16];
+ char rdb_ControllerRevision[4];
+ __u32 rdb_Reserved5[10];
+};
+
+#define IDNAME_RIGIDDISK 0x5244534B /* "RDSK" */
+
+struct PartitionBlock {
+ __u32 pb_ID;
+ __u32 pb_SummedLongs;
+ __s32 pb_ChkSum;
+ __u32 pb_HostID;
+ __u32 pb_Next;
+ __u32 pb_Flags;
+ __u32 pb_Reserved1[2];
+ __u32 pb_DevFlags;
+ __u8 pb_DriveName[32];
+ __u32 pb_Reserved2[15];
+ __u32 pb_Environment[17];
+ __u32 pb_EReserved[15];
+};
+
+#define IDNAME_PARTITION 0x50415254 /* "PART" */
+
+#define RDB_ALLOCATION_LIMIT 16
+
+#endif /* AFFS_HARDBLOCKS_H */
diff --git a/linux/src/include/linux/atalk.h b/linux/src/include/linux/atalk.h
new file mode 100644
index 0000000..c1a5d64
--- /dev/null
+++ b/linux/src/include/linux/atalk.h
@@ -0,0 +1,157 @@
+/*
+ * Appletalk networking structures
+ *
+ * The following are directly referenced from the University Of Michigan
+ * netatalk for compatibility reasons.
+ */
+
+#ifndef __LINUX_ATALK_H__
+#define __LINUX_ATALK_H__
+
+#define SIOCATALKDIFADDR (SIOCPROTOPRIVATE + 0)
+
+#define ATPORT_FIRST 1
+#define ATPORT_RESERVED 128
+#define ATPORT_LAST 255
+#define ATADDR_ANYNET (__u16)0
+#define ATADDR_ANYNODE (__u8)0
+#define ATADDR_ANYPORT (__u8)0
+#define ATADDR_BCAST (__u8)255
+#define DDP_MAXSZ 587
+
+struct at_addr
+{
+ __u16 s_net;
+ __u8 s_node;
+};
+
+struct sockaddr_at
+{
+ short sat_family;
+ __u8 sat_port;
+ struct at_addr sat_addr;
+ char sat_zero[ 8 ];
+};
+
+struct netrange
+{
+ __u8 nr_phase;
+ __u16 nr_firstnet;
+ __u16 nr_lastnet;
+};
+
+struct atalk_route
+{
+ struct device *dev;
+ struct at_addr target;
+ struct at_addr gateway;
+ int flags;
+ struct atalk_route *next;
+};
+
+struct atalk_iface
+{
+ struct device *dev;
+ struct at_addr address; /* Our address */
+ int status; /* What are we doing ?? */
+#define ATIF_PROBE 1 /* Probing for an address */
+#define ATIF_PROBE_FAIL 2 /* Probe collided */
+ struct netrange nets; /* Associated direct netrange */
+ struct atalk_iface *next;
+};
+
+struct atalk_sock
+{
+ unsigned short dest_net;
+ unsigned short src_net;
+ unsigned char dest_node;
+ unsigned char src_node;
+ unsigned char dest_port;
+ unsigned char src_port;
+};
+
+#define DDP_MAXHOPS 15 /* 4 bits of hop counter */
+
+#ifdef __KERNEL__
+
+#include <asm/byteorder.h>
+
+struct ddpehdr
+{
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ __u16 deh_len:10, deh_hops:4, deh_pad:2;
+#else
+ __u16 deh_pad:2, deh_hops:4, deh_len:10;
+#endif
+ __u16 deh_sum;
+ __u16 deh_dnet;
+ __u16 deh_snet;
+ __u8 deh_dnode;
+ __u8 deh_snode;
+ __u8 deh_dport;
+ __u8 deh_sport;
+ /* And netatalk apps expect to stick the type in themselves */
+};
+
+/*
+ * Unused (and currently unsupported)
+ */
+
+struct ddpshdr
+{
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ __u16 dsh_len:10, dsh_pad:6;
+#else
+ __u16 dsh_pad:6, dsh_len:10;
+#endif
+ __u8 dsh_dport;
+ __u8 dsh_sport;
+ /* And netatalk apps expect to stick the type in themselves */
+};
+
+/* Appletalk AARP headers */
+
+struct elapaarp
+{
+ __u16 hw_type;
+#define AARP_HW_TYPE_ETHERNET 1
+#define AARP_HW_TYPE_TOKENRING 2
+ __u16 pa_type;
+ __u8 hw_len;
+ __u8 pa_len;
+#define AARP_PA_ALEN 4
+ __u16 function;
+#define AARP_REQUEST 1
+#define AARP_REPLY 2
+#define AARP_PROBE 3
+ __u8 hw_src[ETH_ALEN] __attribute__ ((packed));
+ __u8 pa_src_zero __attribute__ ((packed));
+ __u16 pa_src_net __attribute__ ((packed));
+ __u8 pa_src_node __attribute__ ((packed));
+ __u8 hw_dst[ETH_ALEN] __attribute__ ((packed));
+ __u8 pa_dst_zero __attribute__ ((packed));
+ __u16 pa_dst_net __attribute__ ((packed));
+ __u8 pa_dst_node __attribute__ ((packed));
+};
+
+typedef struct sock atalk_socket;
+
+#define AARP_EXPIRY_TIME (5*60*HZ) /* Not specified - how long till we drop a resolved entry */
+#define AARP_HASH_SIZE 16 /* Size of hash table */
+#define AARP_TICK_TIME (HZ/5) /* Fast retransmission timer when resolving */
+#define AARP_RETRANSMIT_LIMIT 10 /* Send 10 requests then give up (2 seconds) */
+#define AARP_RESOLVE_TIME (10*HZ) /* Some value bigger than total retransmit time + a bit for last reply to appear and to stop continual requests */
+
+extern struct datalink_proto *ddp_dl, *aarp_dl;
+extern void aarp_proto_init(void);
+/* Inter module exports */
+extern struct atalk_iface *atalk_find_dev(struct device *dev);
+extern struct at_addr *atalk_find_dev_addr(struct device *dev);
+extern int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr);
+extern void aarp_send_probe(struct device *dev, struct at_addr *addr);
+#ifdef MODULE
+extern void aarp_cleanup_module(void);
+#endif
+
+#endif
+#endif
diff --git a/linux/src/include/linux/ax25.h b/linux/src/include/linux/ax25.h
new file mode 100644
index 0000000..f4bc5a0
--- /dev/null
+++ b/linux/src/include/linux/ax25.h
@@ -0,0 +1,96 @@
+/*
+ * These are the public elements of the Linux kernel AX.25 code. A similar
+ * file netrom.h exists for the NET/ROM protocol.
+ */
+
+#ifndef AX25_KERNEL_H
+#define AX25_KERNEL_H
+
+#define AX25_MTU 256
+#define AX25_MAX_DIGIS 6 /* This is wrong, should be 8 */
+
+#define AX25_WINDOW 1
+#define AX25_T1 2
+#define AX25_N2 3
+#define AX25_T3 4
+#define AX25_T2 5
+#define AX25_BACKOFF 6
+#define AX25_EXTSEQ 7
+#define AX25_PIDINCL 8
+#define AX25_IDLE 9
+#define AX25_PACLEN 10
+#define AX25_IAMDIGI 12
+
+#define AX25_KILL 99
+
+#define SIOCAX25GETUID (SIOCPROTOPRIVATE+0)
+#define SIOCAX25ADDUID (SIOCPROTOPRIVATE+1)
+#define SIOCAX25DELUID (SIOCPROTOPRIVATE+2)
+#define SIOCAX25NOUID (SIOCPROTOPRIVATE+3)
+#define SIOCAX25OPTRT (SIOCPROTOPRIVATE+7)
+#define SIOCAX25CTLCON (SIOCPROTOPRIVATE+8)
+#define SIOCAX25GETINFO (SIOCPROTOPRIVATE+9)
+#define SIOCAX25ADDFWD (SIOCPROTOPRIVATE+10)
+#define SIOCAX25DELFWD (SIOCPROTOPRIVATE+11)
+
+#define AX25_SET_RT_IPMODE 2
+
+#define AX25_NOUID_DEFAULT 0
+#define AX25_NOUID_BLOCK 1
+
+typedef struct {
+ char ax25_call[7]; /* 6 call + SSID (shifted ascii!) */
+} ax25_address;
+
+struct sockaddr_ax25 {
+ unsigned short sax25_family;
+ ax25_address sax25_call;
+ int sax25_ndigis;
+ /* Digipeater ax25_address sets follow */
+};
+
+#define sax25_uid sax25_ndigis
+
+struct full_sockaddr_ax25 {
+ struct sockaddr_ax25 fsa_ax25;
+ ax25_address fsa_digipeater[AX25_MAX_DIGIS];
+};
+
+struct ax25_routes_struct {
+ ax25_address port_addr;
+ ax25_address dest_addr;
+ unsigned char digi_count;
+ ax25_address digi_addr[AX25_MAX_DIGIS];
+};
+
+struct ax25_route_opt_struct {
+ ax25_address port_addr;
+ ax25_address dest_addr;
+ int cmd;
+ int arg;
+};
+
+struct ax25_ctl_struct {
+ ax25_address port_addr;
+ ax25_address source_addr;
+ ax25_address dest_addr;
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+struct ax25_info_struct {
+ unsigned int n2, n2count;
+ unsigned int t1, t1timer;
+ unsigned int t2, t2timer;
+ unsigned int t3, t3timer;
+ unsigned int idle, idletimer;
+ unsigned int state;
+ unsigned int rcv_q, snd_q;
+};
+
+struct ax25_fwd_struct {
+ ax25_address port_from;
+ ax25_address port_to;
+};
+
+#endif
diff --git a/linux/src/include/linux/binfmts.h b/linux/src/include/linux/binfmts.h
new file mode 100644
index 0000000..ae7167e
--- /dev/null
+++ b/linux/src/include/linux/binfmts.h
@@ -0,0 +1,65 @@
+#ifndef _LINUX_BINFMTS_H
+#define _LINUX_BINFMTS_H
+
+#include <linux/ptrace.h>
+
+/*
+ * MAX_ARG_PAGES defines the number of pages allocated for arguments
+ * and envelope for the new program. 32 should suffice, this gives
+ * a maximum env+arg of 128kB w/4KB pages!
+ */
+#define MAX_ARG_PAGES 32
+
+/*
+ * This structure is used to hold the arguments that are used when loading binaries.
+ */
+struct linux_binprm{
+ char buf[128];
+ unsigned long page[MAX_ARG_PAGES];
+ unsigned long p;
+ int sh_bang;
+ struct inode * inode;
+ int e_uid, e_gid;
+ int argc, envc;
+ char * filename; /* Name of binary */
+ unsigned long loader, exec;
+ int dont_iput; /* binfmt handler has put inode */
+};
+
+/*
+ * This structure defines the functions that are used to load the binary formats that
+ * linux accepts.
+ */
+struct linux_binfmt {
+ struct linux_binfmt * next;
+ long *use_count;
+ int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
+ int (*load_shlib)(int fd);
+ int (*core_dump)(long signr, struct pt_regs * regs);
+};
+
+extern int register_binfmt(struct linux_binfmt *);
+extern int unregister_binfmt(struct linux_binfmt *);
+
+extern int read_exec(struct inode *inode, unsigned long offset,
+ char * addr, unsigned long count, int to_kmem);
+
+extern int open_inode(struct inode * inode, int mode);
+
+extern int init_elf_binfmt(void);
+extern int init_aout_binfmt(void);
+extern int init_script_binfmt(void);
+extern int init_java_binfmt(void);
+
+extern int prepare_binprm(struct linux_binprm *);
+extern void remove_arg_zero(struct linux_binprm *);
+extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
+extern int flush_old_exec(struct linux_binprm * bprm);
+extern unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm);
+extern unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
+ unsigned long p, int from_kmem);
+
+/* this eventually goes away */
+#define change_ldt(a,b) setup_arg_pages(a,b)
+
+#endif
diff --git a/linux/src/include/linux/bios32.h b/linux/src/include/linux/bios32.h
new file mode 100644
index 0000000..7944a53
--- /dev/null
+++ b/linux/src/include/linux/bios32.h
@@ -0,0 +1,61 @@
+/*
+ * BIOS32, PCI BIOS functions and defines
+ * Copyright 1994, Drew Eckhardt
+ *
+ * For more information, please consult
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * P.O. Box 14070
+ * Portland, OR 97214
+ * U. S. A.
+ * Phone: 800-433-5177 / +1-503-797-4207
+ * Fax: +1-503-234-6762
+ *
+ * Manuals are $25 each or $50 for all three, plus $7 shipping
+ * within the United States, $35 abroad.
+ */
+
+#ifndef BIOS32_H
+#define BIOS32_H
+
+/*
+ * Error values that may be returned by the PCI bios. Use
+ * pcibios_strerror() to convert to a printable string.
+ */
+#define PCIBIOS_SUCCESSFUL 0x00
+#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
+#define PCIBIOS_BAD_VENDOR_ID 0x83
+#define PCIBIOS_DEVICE_NOT_FOUND 0x86
+#define PCIBIOS_BAD_REGISTER_NUMBER 0x87
+#define PCIBIOS_SET_FAILED 0x88
+#define PCIBIOS_BUFFER_TOO_SMALL 0x89
+
+extern int pcibios_present (void);
+extern unsigned long pcibios_init (unsigned long memory_start,
+ unsigned long memory_end);
+extern unsigned long pcibios_fixup (unsigned long memory_start,
+ unsigned long memory_end);
+extern int pcibios_find_class (unsigned int class_code, unsigned short index,
+ unsigned char *bus, unsigned char *dev_fn);
+extern int pcibios_find_device (unsigned short vendor, unsigned short dev_id,
+ unsigned short index, unsigned char *bus,
+ unsigned char *dev_fn);
+extern int pcibios_read_config_byte (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned char *val);
+extern int pcibios_read_config_word (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned short *val);
+extern int pcibios_read_config_dword (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned int *val);
+extern int pcibios_write_config_byte (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned char val);
+extern int pcibios_write_config_word (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned short val);
+extern int pcibios_write_config_dword (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned int val);
+extern const char *pcibios_strerror (int error);
+
+#endif /* BIOS32_H */
diff --git a/linux/src/include/linux/blk.h b/linux/src/include/linux/blk.h
new file mode 100644
index 0000000..92634d7
--- /dev/null
+++ b/linux/src/include/linux/blk.h
@@ -0,0 +1,454 @@
+#ifndef _BLK_H
+#define _BLK_H
+
+#include <linux/blkdev.h>
+#include <linux/locks.h>
+#include <linux/malloc.h>
+#include <linux/config.h>
+#include <linux/md.h>
+
+/*
+ * NR_REQUEST is the number of entries in the request-queue.
+ * NOTE that writes may use only the low 2/3 of these: reads
+ * take precedence.
+ */
+#define NR_REQUEST 64
+
+/*
+ * This is used in the elevator algorithm. We don't prioritise reads
+ * over writes any more --- although reads are more time-critical than
+ * writes, by treating them equally we increase filesystem throughput.
+ * This turns out to give better overall performance. -- sct
+ */
+#define IN_ORDER(s1,s2) \
+((s1)->rq_dev < (s2)->rq_dev || (((s1)->rq_dev == (s2)->rq_dev && \
+(s1)->sector < (s2)->sector)))
+
+/*
+ * These will have to be changed to be aware of different buffer
+ * sizes etc.. It actually needs a major cleanup.
+ */
+#if defined(IDE_DRIVER) || defined(MD_DRIVER)
+#define SECTOR_MASK ((BLOCK_SIZE >> 9) - 1)
+#else
+#define SECTOR_MASK (blksize_size[MAJOR_NR] && \
+ blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] ? \
+ ((blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] >> 9) - 1) : \
+ ((BLOCK_SIZE >> 9) - 1))
+#endif /* IDE_DRIVER */
+
+#define SUBSECTOR(block) (CURRENT->current_nr_sectors > 0)
+
+#ifdef CONFIG_CDU31A
+extern int cdu31a_init(void);
+#endif CONFIG_CDU31A
+#ifdef CONFIG_MCD
+extern int mcd_init(void);
+#endif CONFIG_MCD
+#ifdef CONFIG_MCDX
+extern int mcdx_init(void);
+#endif CONFIG_MCDX
+#ifdef CONFIG_SBPCD
+extern int sbpcd_init(void);
+#endif CONFIG_SBPCD
+#ifdef CONFIG_AZTCD
+extern int aztcd_init(void);
+#endif CONFIG_AZTCD
+#ifdef CONFIG_CDU535
+extern int sony535_init(void);
+#endif CONFIG_CDU535
+#ifdef CONFIG_GSCD
+extern int gscd_init(void);
+#endif CONFIG_GSCD
+#ifdef CONFIG_CM206
+extern int cm206_init(void);
+#endif CONFIG_CM206
+#ifdef CONFIG_OPTCD
+extern int optcd_init(void);
+#endif CONFIG_OPTCD
+#ifdef CONFIG_SJCD
+extern int sjcd_init(void);
+#endif CONFIG_SJCD
+#ifdef CONFIG_CDI_INIT
+extern int cdi_init(void);
+#endif CONFIG_CDI_INIT
+#ifdef CONFIG_BLK_DEV_HD
+extern int hd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_IDE
+extern int ide_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_XD
+extern int xd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_LOOP
+extern int loop_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_MD
+extern int md_init(void);
+#endif CONFIG_BLK_DEV_MD
+
+extern void set_device_ro(kdev_t dev,int flag);
+void add_blkdev_randomness(int major);
+
+extern int floppy_init(void);
+extern void rd_load(void);
+extern int rd_init(void);
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
+
+extern unsigned long initrd_start,initrd_end;
+extern int mount_initrd; /* zero if initrd should not be mounted */
+void initrd_init(void);
+
+#endif
+
+#define RO_IOCTLS(dev,where) \
+ case BLKROSET: { int __err; if (!suser()) return -EACCES; \
+ __err = verify_area(VERIFY_READ, (void *) (where), sizeof(long)); \
+ if (!__err) set_device_ro((dev),get_fs_long((long *) (where))); return __err; } \
+ case BLKROGET: { int __err = verify_area(VERIFY_WRITE, (void *) (where), sizeof(long)); \
+ if (!__err) put_fs_long(0!=is_read_only(dev),(long *) (where)); return __err; }
+
+#if defined(MAJOR_NR) || defined(IDE_DRIVER)
+
+/*
+ * Add entries as needed.
+ */
+
+#ifdef IDE_DRIVER
+
+#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS)
+#define DEVICE_ON(device) /* nothing */
+#define DEVICE_OFF(device) /* nothing */
+
+#elif (MAJOR_NR == RAMDISK_MAJOR)
+
+/* ram disk */
+#define DEVICE_NAME "ramdisk"
+#define DEVICE_REQUEST rd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+#define DEVICE_NO_RANDOM
+
+#elif (MAJOR_NR == FLOPPY_MAJOR)
+
+static void floppy_off(unsigned int nr);
+
+#define DEVICE_NAME "floppy"
+#define DEVICE_INTR do_floppy
+#define DEVICE_REQUEST do_fd_request
+#define DEVICE_NR(device) ( (MINOR(device) & 3) | ((MINOR(device) & 0x80 ) >> 5 ))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device) floppy_off(DEVICE_NR(device))
+
+#elif (MAJOR_NR == HD_MAJOR)
+
+/* harddisk: timeout is 6 seconds.. */
+#define DEVICE_NAME "harddisk"
+#define DEVICE_INTR do_hd
+#define DEVICE_TIMEOUT HD_TIMER
+#define TIMEOUT_VALUE (6*HZ)
+#define DEVICE_REQUEST do_hd_request
+#define DEVICE_NR(device) (MINOR(device)>>6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_DISK_MAJOR)
+
+#define DEVICE_NAME "scsidisk"
+#define DEVICE_INTR do_sd
+#define TIMEOUT_VALUE (2*HZ)
+#define DEVICE_REQUEST do_sd_request
+#define DEVICE_NR(device) (MINOR(device) >> 4)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+/* Kludge to use the same number for both char and block major numbers */
+#elif (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER)
+
+#define DEVICE_NAME "Multiple devices driver"
+#define DEVICE_REQUEST do_md_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_TAPE_MAJOR)
+
+#define DEVICE_NAME "scsitape"
+#define DEVICE_INTR do_st
+#define DEVICE_NR(device) (MINOR(device) & 0x7f)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_CDROM_MAJOR)
+
+#define DEVICE_NAME "CD-ROM"
+#define DEVICE_INTR do_sr
+#define DEVICE_REQUEST do_sr_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == XT_DISK_MAJOR)
+
+#define DEVICE_NAME "xt disk"
+#define DEVICE_REQUEST do_xd_request
+#define DEVICE_NR(device) (MINOR(device) >> 6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU31A_CDROM_MAJOR)
+
+#define DEVICE_NAME "CDU31A"
+#define DEVICE_REQUEST do_cdu31a_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcd */
+#define DEVICE_REQUEST do_mcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_X_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcdx */
+#define DEVICE_REQUEST do_mcdx_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #1"
+#define DEVICE_REQUEST do_sbpcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM2_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #2"
+#define DEVICE_REQUEST do_sbpcd2_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM3_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #3"
+#define DEVICE_REQUEST do_sbpcd3_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM4_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #4"
+#define DEVICE_REQUEST do_sbpcd4_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == AZTECH_CDROM_MAJOR)
+
+#define DEVICE_NAME "Aztech CD-ROM"
+#define DEVICE_REQUEST do_aztcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU535_CDROM_MAJOR)
+
+#define DEVICE_NAME "SONY-CDU535"
+#define DEVICE_INTR do_cdu535
+#define DEVICE_REQUEST do_cdu535_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == GOLDSTAR_CDROM_MAJOR)
+
+#define DEVICE_NAME "Goldstar R420"
+#define DEVICE_REQUEST do_gscd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CM206_CDROM_MAJOR)
+#define DEVICE_NAME "Philips/LMS cd-rom cm206"
+#define DEVICE_REQUEST do_cm206_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == OPTICS_CDROM_MAJOR)
+
+#define DEVICE_NAME "DOLPHIN 8000AT CD-ROM"
+#define DEVICE_REQUEST do_optcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SANYO_CDROM_MAJOR)
+
+#define DEVICE_NAME "Sanyo H94A CD-ROM"
+#define DEVICE_REQUEST do_sjcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#endif /* MAJOR_NR == whatever */
+
+#if (MAJOR_NR != SCSI_TAPE_MAJOR)
+#if !defined(IDE_DRIVER)
+
+#ifndef CURRENT
+#define CURRENT (blk_dev[MAJOR_NR].current_request)
+#endif
+
+#define CURRENT_DEV DEVICE_NR(CURRENT->rq_dev)
+
+#ifdef DEVICE_INTR
+static void (*DEVICE_INTR)(void) = NULL;
+#endif
+#ifdef DEVICE_TIMEOUT
+
+#define SET_TIMER \
+((timer_table[DEVICE_TIMEOUT].expires = jiffies + TIMEOUT_VALUE), \
+(timer_active |= 1<<DEVICE_TIMEOUT))
+
+#define CLEAR_TIMER \
+timer_active &= ~(1<<DEVICE_TIMEOUT)
+
+#define SET_INTR(x) \
+if ((DEVICE_INTR = (x)) != NULL) \
+ SET_TIMER; \
+else \
+ CLEAR_TIMER;
+
+#else
+
+#define SET_INTR(x) (DEVICE_INTR = (x))
+
+#endif /* DEVICE_TIMEOUT */
+
+static void (DEVICE_REQUEST)(void);
+
+#ifdef DEVICE_INTR
+#define CLEAR_INTR SET_INTR(NULL)
+#else
+#define CLEAR_INTR
+#endif
+
+#define INIT_REQUEST \
+ if (!CURRENT) {\
+ CLEAR_INTR; \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed"); \
+ if (CURRENT->bh) { \
+ if (!buffer_locked(CURRENT->bh)) \
+ panic(DEVICE_NAME ": block not locked"); \
+ }
+
+#endif /* !defined(IDE_DRIVER) */
+
+/* end_request() - SCSI devices have their own version */
+/* - IDE drivers have their own copy too */
+
+#if ! SCSI_BLK_MAJOR(MAJOR_NR)
+
+#if defined(IDE_DRIVER) && !defined(_IDE_C) /* shared copy for IDE modules */
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup);
+#else
+
+#ifdef IDE_DRIVER
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup) {
+ struct request *req = hwgroup->rq;
+#else
+static void end_request(int uptodate) {
+ struct request *req = CURRENT;
+#endif /* IDE_DRIVER */
+ struct buffer_head * bh;
+ int nsect;
+
+ req->errors = 0;
+ if (!uptodate) {
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+ if ((bh = req->bh) != NULL) {
+ nsect = bh->b_size >> 9;
+ req->nr_sectors--;
+ req->nr_sectors &= ~(nsect - 1);
+ req->sector += nsect;
+ req->sector &= ~(nsect - 1);
+ }
+ }
+
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+
+ /*
+ * This is our 'MD IO has finished' event handler.
+ * note that b_state should be cached in a register
+ * anyways, so the overhead if this checking is almost
+ * zero. But anyways .. we never get OO for free :)
+ */
+ if (test_bit(BH_MD, &bh->b_state)) {
+ struct md_personality * pers=(struct md_personality *)bh->personality;
+ pers->end_request(bh,uptodate);
+ }
+ /*
+ * the normal (nonmirrored and no RAID5) case:
+ */
+ else {
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+ }
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_request: buffer-list destroyed\n");
+ }
+ req->buffer = bh->b_data;
+ return;
+ }
+ }
+#ifndef DEVICE_NO_RANDOM
+ add_blkdev_randomness(MAJOR(req->rq_dev));
+#endif
+#ifdef IDE_DRIVER
+ blk_dev[MAJOR(req->rq_dev)].current_request = req->next;
+ hwgroup->rq = NULL;
+#else
+ DEVICE_OFF(req->rq_dev);
+ CURRENT = req->next;
+#endif /* IDE_DRIVER */
+ if (req->sem != NULL)
+ up(req->sem);
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+}
+#endif /* defined(IDE_DRIVER) && !defined(_IDE_C) */
+#endif /* ! SCSI_BLK_MAJOR(MAJOR_NR) */
+#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) */
+
+#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
+
+#endif /* _BLK_H */
diff --git a/linux/src/include/linux/blkdev.h b/linux/src/include/linux/blkdev.h
new file mode 100644
index 0000000..5bfc84e
--- /dev/null
+++ b/linux/src/include/linux/blkdev.h
@@ -0,0 +1,66 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/genhd.h>
+#include <linux/tqueue.h>
+
+/*
+ * Ok, this is an expanded form so that we can use the same
+ * request for paging requests when that is implemented. In
+ * paging, 'bh' is NULL, and the semaphore is used to wait
+ * for read/write completion.
+ */
+struct request {
+ volatile int rq_status; /* should split this into a few status bits */
+#define RQ_INACTIVE (-1)
+#define RQ_ACTIVE 1
+#define RQ_SCSI_BUSY 0xffff
+#define RQ_SCSI_DONE 0xfffe
+#define RQ_SCSI_DISCONNECTING 0xffe0
+
+ kdev_t rq_dev;
+ int cmd; /* READ or WRITE */
+ int errors;
+ unsigned long sector;
+ unsigned long nr_sectors;
+ unsigned long current_nr_sectors;
+ char * buffer;
+ struct semaphore * sem;
+ struct buffer_head * bh;
+ struct buffer_head * bhtail;
+ struct request * next;
+};
+
+struct blk_dev_struct {
+ void (*request_fn)(void);
+ struct request * current_request;
+ struct request plug;
+ struct tq_struct plug_tq;
+};
+
+struct sec_size {
+ unsigned block_size;
+ unsigned block_size_bits;
+};
+
+extern struct sec_size * blk_sec[MAX_BLKDEV];
+extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
+extern struct wait_queue * wait_for_request;
+extern void resetup_one_dev(struct gendisk *dev, int drive);
+extern void unplug_device(void * data);
+extern void make_request(int major,int rw, struct buffer_head * bh);
+
+/* md needs this function to remap requests */
+extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size);
+extern int md_make_request (int minor, int rw, struct buffer_head * bh);
+extern int md_error (kdev_t mddev, kdev_t rdev);
+
+extern int * blk_size[MAX_BLKDEV];
+
+extern int * blksize_size[MAX_BLKDEV];
+
+extern int * hardsect_size[MAX_BLKDEV];
+
+#endif
diff --git a/linux/src/include/linux/cdrom.h b/linux/src/include/linux/cdrom.h
new file mode 100644
index 0000000..022d6e1
--- /dev/null
+++ b/linux/src/include/linux/cdrom.h
@@ -0,0 +1,453 @@
+/*
+ * -- <linux/cdrom.h>
+ * general (not only SCSI) header library for linux CDROM drivers
+ * (C) 1992 David Giller rafetmad@oxy.edu
+ * 1994, 1995 Eberhard Moenkeberg emoenke@gwdg.de
+ *
+ */
+
+#ifndef _LINUX_CDROM_H
+#define _LINUX_CDROM_H
+
+/*
+ * some fix numbers
+ */
+#define CD_MINS 74 /* max. minutes per CD, not really a limit */
+#define CD_SECS 60 /* seconds per minute */
+#define CD_FRAMES 75 /* frames per second */
+
+#define CD_SYNC_SIZE 12 /* 12 sync bytes per raw data frame, not transfered by the drive */
+#define CD_HEAD_SIZE 4 /* header (address) bytes per raw data frame */
+#define CD_SUBHEAD_SIZE 8 /* subheader bytes per raw XA data frame */
+#define CD_XA_HEAD (CD_HEAD_SIZE+CD_SUBHEAD_SIZE) /* "before data" part of raw XA frame */
+#define CD_XA_SYNC_HEAD (CD_SYNC_SIZE+CD_XA_HEAD)/* sync bytes + header of XA frame */
+
+#define CD_FRAMESIZE 2048 /* bytes per frame, "cooked" mode */
+#define CD_FRAMESIZE_RAW 2352 /* bytes per frame, "raw" mode */
+/* most drives don't deliver everything: */
+#define CD_FRAMESIZE_RAW1 (CD_FRAMESIZE_RAW-CD_SYNC_SIZE) /* 2340 */
+#define CD_FRAMESIZE_RAW0 (CD_FRAMESIZE_RAW-CD_SYNC_SIZE-CD_HEAD_SIZE) /* 2336 */
+/* Optics drive also has a 'read all' mode: */
+#define CD_FRAMESIZE_RAWER 2646 /* bytes per frame */
+
+#define CD_EDC_SIZE 4 /* bytes EDC per most raw data frame types */
+#define CD_ZERO_SIZE 8 /* bytes zero per yellow book mode 1 frame */
+#define CD_ECC_SIZE 276 /* bytes ECC per most raw data frame types */
+#define CD_XA_TAIL (CD_EDC_SIZE+CD_ECC_SIZE) /* "after data" part of raw XA frame */
+
+#define CD_FRAMESIZE_SUB 96 /* subchannel data "frame" size */
+#define CD_MSF_OFFSET 150 /* MSF numbering offset of first frame */
+
+#define CD_CHUNK_SIZE 24 /* lowest-level "data bytes piece" */
+#define CD_NUM_OF_CHUNKS 98 /* chunks per frame */
+
+#define CD_FRAMESIZE_XA CD_FRAMESIZE_RAW1 /* obsolete name */
+#define CD_BLOCK_OFFSET CD_MSF_OFFSET /* obsolete name */
+
+/*
+ * the raw frame layout:
+ *
+ * - audio (red): | audio_sample_bytes |
+ * | 2352 |
+ *
+ * - data (yellow, mode1): | sync - head - data - EDC - zero - ECC |
+ * | 12 - 4 - 2048 - 4 - 8 - 276 |
+ *
+ * - data (yellow, mode2): | sync - head - data |
+ * | 12 - 4 - 2336 |
+ *
+ * - XA data (green, mode2 form1): | sync - head - sub - data - EDC - ECC |
+ * | 12 - 4 - 8 - 2048 - 4 - 276 |
+ *
+ * - XA data (green, mode2 form2): | sync - head - sub - data - EDC |
+ * | 12 - 4 - 8 - 2324 - 4 |
+ */
+
+/*
+ * CDROM IOCTL structures
+ */
+
+struct cdrom_blk
+{
+ unsigned from;
+ unsigned short len;
+};
+
+
+struct cdrom_msf
+{
+ u_char cdmsf_min0; /* start minute */
+ u_char cdmsf_sec0; /* start second */
+ u_char cdmsf_frame0; /* start frame */
+ u_char cdmsf_min1; /* end minute */
+ u_char cdmsf_sec1; /* end second */
+ u_char cdmsf_frame1; /* end frame */
+};
+
+struct cdrom_ti
+{
+ u_char cdti_trk0; /* start track */
+ u_char cdti_ind0; /* start index */
+ u_char cdti_trk1; /* end track */
+ u_char cdti_ind1; /* end index */
+};
+
+struct cdrom_tochdr
+{
+ u_char cdth_trk0; /* start track */
+ u_char cdth_trk1; /* end track */
+};
+
+struct cdrom_msf0 /* address in MSF format */
+{
+ u_char minute;
+ u_char second;
+ u_char frame;
+};
+
+union cdrom_addr /* address in either MSF or logical format */
+{
+ struct cdrom_msf0 msf;
+ int lba;
+};
+
+struct cdrom_tocentry
+{
+ u_char cdte_track;
+ u_char cdte_adr :4;
+ u_char cdte_ctrl :4;
+ u_char cdte_format;
+ union cdrom_addr cdte_addr;
+ u_char cdte_datamode;
+};
+
+/*
+ * CD-ROM address types (cdrom_tocentry.cdte_format)
+ */
+#define CDROM_LBA 0x01 /* "logical block": first frame is #0 */
+#define CDROM_MSF 0x02 /* "minute-second-frame": binary, not bcd here! */
+
+/*
+ * bit to tell whether track is data or audio (cdrom_tocentry.cdte_ctrl)
+ */
+#define CDROM_DATA_TRACK 0x04
+
+/*
+ * The leadout track is always 0xAA, regardless of # of tracks on disc
+ */
+#define CDROM_LEADOUT 0xAA
+
+struct cdrom_subchnl
+{
+ u_char cdsc_format;
+ u_char cdsc_audiostatus;
+ u_char cdsc_adr: 4;
+ u_char cdsc_ctrl: 4;
+ u_char cdsc_trk;
+ u_char cdsc_ind;
+ union cdrom_addr cdsc_absaddr;
+ union cdrom_addr cdsc_reladdr;
+};
+
+struct cdrom_mcn {
+ u_char medium_catalog_number[14]; /* 13 ASCII digits, null-terminated */
+};
+
+/*
+ * audio states (from SCSI-2, but seen with other drives, too)
+ */
+#define CDROM_AUDIO_INVALID 0x00 /* audio status not supported */
+#define CDROM_AUDIO_PLAY 0x11 /* audio play operation in progress */
+#define CDROM_AUDIO_PAUSED 0x12 /* audio play operation paused */
+#define CDROM_AUDIO_COMPLETED 0x13 /* audio play successfully completed */
+#define CDROM_AUDIO_ERROR 0x14 /* audio play stopped due to error */
+#define CDROM_AUDIO_NO_STATUS 0x15 /* no current audio status to return */
+
+struct cdrom_volctrl
+{
+ u_char channel0;
+ u_char channel1;
+ u_char channel2;
+ u_char channel3;
+};
+
+struct cdrom_read
+{
+ int cdread_lba;
+ caddr_t cdread_bufaddr;
+ int cdread_buflen;
+};
+
+/*
+ * extensions for transferring audio frames
+ * currently used by sbpcd.c, cdu31a.c, ide-cd.c
+ */
+struct cdrom_read_audio
+{
+ union cdrom_addr addr; /* frame address */
+ u_char addr_format; /* CDROM_LBA or CDROM_MSF */
+ int nframes; /* number of 2352-byte-frames to read at once, limited by the drivers */
+ u_char *buf; /* frame buffer (size: nframes*2352 bytes) */
+};
+
+/*
+ * this has to be the "arg" of the CDROMMULTISESSION ioctl
+ * for obtaining multi session info.
+ * The returned "addr" is valid only if "xa_flag" is true.
+ */
+struct cdrom_multisession
+{
+ union cdrom_addr addr; /* frame address: start-of-last-session (not the new "frame 16"!)*/
+ u_char xa_flag; /* 1: "is XA disk" */
+ u_char addr_format; /* CDROM_LBA or CDROM_MSF */
+};
+
+#ifdef FIVETWELVE
+#define CDROM_MODE1_SIZE 512
+#else
+#define CDROM_MODE1_SIZE 2048
+#endif /* FIVETWELVE */
+#define CDROM_MODE2_SIZE 2336
+
+/*
+ * CD-ROM IOCTL commands
+ * For IOCTL calls, we will commandeer byte 0x53, or 'S'.
+ */
+
+#define CDROMPAUSE 0x5301
+#define CDROMRESUME 0x5302
+#define CDROMPLAYMSF 0x5303 /* (struct cdrom_msf) */
+#define CDROMPLAYTRKIND 0x5304 /* (struct cdrom_ti) */
+
+#define CDROMREADTOCHDR 0x5305 /* (struct cdrom_tochdr) */
+#define CDROMREADTOCENTRY 0x5306 /* (struct cdrom_tocentry) */
+
+#define CDROMSTOP 0x5307 /* stop the drive motor */
+#define CDROMSTART 0x5308 /* turn the motor on */
+
+#define CDROMEJECT 0x5309 /* eject CD-ROM media */
+
+#define CDROMVOLCTRL 0x530a /* (struct cdrom_volctrl) */
+
+#define CDROMSUBCHNL 0x530b /* (struct cdrom_subchnl) */
+
+#define CDROMREADMODE2 0x530c /* (struct cdrom_read) */
+ /* read type-2 data */
+
+#define CDROMREADMODE1 0x530d /* (struct cdrom_read) */
+ /* read type-1 data */
+
+#define CDROMREADAUDIO 0x530e /* (struct cdrom_read_audio) */
+
+/*
+ * enable (1) / disable (0) auto-ejecting
+ */
+#define CDROMEJECT_SW 0x530f /* arg: 0 or 1 */
+
+/*
+ * obtain the start-of-last-session address of multi session disks
+ */
+#define CDROMMULTISESSION 0x5310 /* (struct cdrom_multisession) */
+
+/*
+ * obtain the "universal product code" number
+ * (only some data disks have it coded)
+ */
+#define CDROM_GET_UPC 0x5311 /* 8 bytes returned */
+
+#define CDROMRESET 0x5312 /* hard-reset the drive */
+#define CDROMVOLREAD 0x5313 /* let the drive tell its volume setting */
+ /* (struct cdrom_volctrl) */
+
+/*
+ * these ioctls are used in aztcd.c and optcd.c
+ */
+#define CDROMREADRAW 0x5314 /* read data in raw mode */
+#define CDROMREADCOOKED 0x5315 /* read data in cooked mode */
+#define CDROMSEEK 0x5316 /* seek msf address */
+
+/*
+ * for playing audio in logical block addressing mode
+ */
+#define CDROMPLAYBLK 0x5317 /* (struct cdrom_blk) */
+
+/*
+ * these ioctls are used in optcd.c
+ */
+#define CDROMREADALL 0x5318 /* read all 2646 bytes */
+#define CDROMCLOSETRAY 0x5319 /* pendant of CDROMEJECT */
+
+
+/*
+ * For controlling a changer. (Used by ATAPI driver.)
+ * This ioctl is depreciated in favor of CDROM_SELECT_DISC from
+ * ucdrom.h. It will probably be deleted during the 2.1 kernel series.
+ */
+#define CDROMLOADFROMSLOT 0x531a /* LOAD disk from slot*/
+
+
+/*
+ * CD-ROM-specific SCSI command opcodes
+ */
+
+/*
+ * Group 2 (10-byte). All of these are called 'optional' by SCSI-II.
+ */
+#define SCMD_READ_TOC 0x43 /* read table of contents */
+#define SCMD_PLAYAUDIO_MSF 0x47 /* play data at time offset */
+#define SCMD_PLAYAUDIO_TI 0x48 /* play data at track/index */
+#define SCMD_PAUSE_RESUME 0x4B /* pause/resume audio */
+#define SCMD_READ_SUBCHANNEL 0x42 /* read SC info on playing disc */
+#define SCMD_PLAYAUDIO10 0x45 /* play data at logical block */
+#define SCMD_READ_HEADER 0x44 /* read TOC header */
+
+/*
+ * Group 5
+ */
+#define SCMD_PLAYAUDIO12 0xA5 /* play data at logical block */
+#define SCMD_PLAYTRACK_REL12 0xA9 /* play track at relative offset */
+
+/*
+ * Group 6 Commands
+ */
+#define SCMD_CD_PLAYBACK_CONTROL 0xC9 /* Sony vendor-specific audio */
+#define SCMD_CD_PLAYBACK_STATUS 0xC4 /* control opcodes */
+
+/*
+ * CD-ROM capacity structure.
+ */
+struct scsi_capacity
+{
+ u_long capacity;
+ u_long lbasize;
+};
+
+/*
+ * CD-ROM MODE_SENSE/MODE_SELECT parameters
+ */
+#define ERR_RECOVERY_PARMS 0x01
+#define DISCO_RECO_PARMS 0x02
+#define FORMAT_PARMS 0x03
+#define GEOMETRY_PARMS 0x04
+#define CERTIFICATION_PARMS 0x06
+#define CACHE_PARMS 0x38
+
+/*
+ * standard mode-select header prepended to all mode-select commands
+ */
+struct ccs_modesel_head
+{
+ u_char _r1; /* reserved */
+ u_char medium; /* device-specific medium type */
+ u_char _r2; /* reserved */
+ u_char block_desc_length; /* block descriptor length */
+ u_char density; /* device-specific density code */
+ u_char number_blocks_hi; /* number of blocks in this block desc */
+ u_char number_blocks_med;
+ u_char number_blocks_lo;
+ u_char _r3;
+ u_char block_length_hi; /* block length for blocks in this desc */
+ u_short block_length;
+};
+
+/*
+ * error recovery parameters
+ */
+struct ccs_err_recovery
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char awre : 1; /* auto write realloc enabled */
+ u_char arre : 1; /* auto read realloc enabled */
+ u_char tb : 1; /* transfer block */
+ u_char rc : 1; /* read continuous */
+ u_char eec : 1; /* enable early correction */
+ u_char per : 1; /* post error */
+ u_char dte : 1; /* disable transfer on error */
+ u_char dcr : 1; /* disable correction */
+ u_char retry_count; /* error retry count */
+ u_char correction_span; /* largest recov. to be attempted, bits */
+ u_char head_offset_count; /* head offset (2's C) for each retry */
+ u_char strobe_offset_count; /* data strobe */
+ u_char recovery_time_limit; /* time limit on recovery attempts */
+};
+
+/*
+ * disco/reco parameters
+ */
+struct ccs_disco_reco
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char buffer_full_ratio; /* write buffer reconnect threshold */
+ u_char buffer_empty_ratio; /* read */
+ u_short bus_inactivity_limit; /* limit on bus inactivity time */
+ u_short disconnect_time_limit; /* minimum disconnect time */
+ u_short connect_time_limit; /* minimum connect time */
+ u_short _r2; /* reserved */
+};
+
+/*
+ * drive geometry parameters
+ */
+struct ccs_geometry
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char cyl_ub; /* #cyls */
+ u_char cyl_mb;
+ u_char cyl_lb;
+ u_char heads; /* #heads */
+ u_char precomp_cyl_ub; /* precomp start */
+ u_char precomp_cyl_mb;
+ u_char precomp_cyl_lb;
+ u_char current_cyl_ub; /* reduced current start */
+ u_char current_cyl_mb;
+ u_char current_cyl_lb;
+ u_short step_rate; /* stepping motor rate */
+ u_char landing_cyl_ub; /* landing zone */
+ u_char landing_cyl_mb;
+ u_char landing_cyl_lb;
+ u_char _r2;
+ u_char _r3;
+ u_char _r4;
+};
+
+/*
+ * cache parameters
+ */
+struct ccs_cache
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char mode; /* cache control byte */
+ u_char threshold; /* prefetch threshold */
+ u_char max_prefetch; /* maximum prefetch size */
+ u_char max_multiplier; /* maximum prefetch multiplier */
+ u_char min_prefetch; /* minimum prefetch size */
+ u_char min_multiplier; /* minimum prefetch multiplier */
+ u_char _r2[8];
+};
+
+#endif /* _LINUX_CDROM_H */
+/*==========================================================================*/
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 8
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -8
+ * c-argdecl-indent: 8
+ * c-label-offset: -8
+ * c-continued-statement-offset: 8
+ * c-continued-brace-offset: 0
+ * End:
+ */
diff --git a/linux/src/include/linux/compatmac.h b/linux/src/include/linux/compatmac.h
new file mode 100644
index 0000000..9537070
--- /dev/null
+++ b/linux/src/include/linux/compatmac.h
@@ -0,0 +1,153 @@
+ /*
+ * This header tries to allow you to write 2.3-compatible drivers,
+ * but (using this header) still allows you to run them on 2.2 and
+ * 2.0 kernels.
+ *
+ * Sometimes, a #define replaces a "construct" that older kernels
+ * had. For example,
+ *
+ * DECLARE_MUTEX(name);
+ *
+ * replaces the older
+ *
+ * struct semaphore name = MUTEX;
+ *
+ * This file then declares the DECLARE_MUTEX macro to compile into the
+ * older version.
+ *
+ * In some cases, a macro or function changes the number of arguments.
+ * In that case, there is nothing we can do except define an access
+ * macro that provides the same functionality on both versions of Linux.
+ *
+ * This is the case for example with the "get_user" macro 2.0 kernels use:
+ *
+ * a = get_user (b);
+ *
+ * while newer kernels use
+ *
+ * get_user (a,b);
+ *
+ * This is unfortunate. We therefore define "Get_user (a,b)" which looks
+ * almost the same as the 2.2+ construct, and translates into the
+ * appropriate sequence for earlier constructs.
+ *
+ * Supported by this file are the 2.0 kernels, 2.2 kernels, and the
+ * most recent 2.3 kernel. 2.3 support will be dropped as soon when 2.4
+ * comes out. 2.0 support may someday be dropped. But then again, maybe
+ * not.
+ *
+ * I'll try to maintain this, provided that Linus agrees with the setup.
+ * Feel free to mail updates or suggestions.
+ *
+ * -- R.E.Wolff@BitWizard.nl
+ *
+ */
+
+#ifndef COMPATMAC_H
+#define COMPATMAC_H
+
+#include <linux/version.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE < 0x020100 /* Less than 2.1.0 */
+#define TWO_ZERO
+#else
+#if LINUX_VERSION_CODE < 0x020200 /* less than 2.2.x */
+#warning "Please use a 2.2.x kernel. "
+#else
+#if LINUX_VERSION_CODE < 0x020300 /* less than 2.3.x */
+#define TWO_TWO
+#else
+#define TWO_THREE
+#endif
+#endif
+#endif
+
+#ifdef TWO_ZERO
+
+/* Here is the section that makes the 2.2 compatible driver source
+ work for 2.0 too! We mostly try to adopt the "new thingies" from 2.2,
+ and provide for compatibility stuff here if possible. */
+
+/* Some 200 days (on intel) */
+#define MAX_SCHEDULE_TIMEOUT ((long)(~0UL>>1))
+
+#include <linux/bios32.h>
+
+#define Get_user(a,b) a = get_user(b)
+#define Put_user(a,b) 0,put_user(a,b)
+#define copy_to_user(a,b,c) memcpy_tofs(a,b,c)
+
+static inline int copy_from_user(void *to,const void *from, int c)
+{
+ memcpy_fromfs(to, from, c);
+ return 0;
+}
+
+#define pci_present pcibios_present
+#define pci_read_config_word pcibios_read_config_word
+#define pci_read_config_dword pcibios_read_config_dword
+
+static inline unsigned char get_irq (unsigned char bus, unsigned char fn)
+{
+ unsigned char t;
+ pcibios_read_config_byte (bus, fn, PCI_INTERRUPT_LINE, &t);
+ return t;
+}
+
+static inline void *ioremap(unsigned long base, long length)
+{
+ if (base < 0x100000) return phys_to_virt(base);
+ return vremap (base, length);
+}
+
+#define my_iounmap(x, b) (((long)x<(long)phys_to_virt(0x100000))?0:vfree ((void*)x))
+
+#define capable(x) suser()
+
+#define queue_task queue_task_irq_off
+#define tty_flip_buffer_push(tty) queue_task(&tty->flip.tqueue, &tq_timer)
+#define signal_pending(current) (current->signal & ~current->blocked)
+#define schedule_timeout(to) do {current->timeout = jiffies + (to);schedule ();} while (0)
+#define time_after(t1,t2) (((long)t1-t2) > 0)
+
+
+//#define test_and_set_bit(nr, addr) set_bit(nr, addr)
+//#define test_and_clear_bit(nr, addr) clear_bit(nr, addr)
+
+/* Not yet implemented on 2.0 */
+#define ASYNC_SPD_SHI -1
+#define ASYNC_SPD_WARP -1
+
+
+/* Ugly hack: the driver_name doesn't exist in 2.0.x . So we define it
+ to the "name" field that does exist. As long as the assignments are
+ done in the right order, there is nothing to worry about. */
+#define driver_name name
+
+/* Should be in a header somewhere. They are in tty.h on 2.2 */
+#define TTY_HW_COOK_OUT 14 /* Flag to tell ntty what we can handle */
+#define TTY_HW_COOK_IN 15 /* in hardware - output and input */
+
+/* The return type of a "close" routine. */
+#define INT void
+#define NO_ERROR /* Nothing */
+
+#else
+
+/* The 2.2.x compatibility section. */
+#include <asm/uaccess.h>
+
+
+#define Get_user(a,b) get_user(a,b)
+#define Put_user(a,b) put_user(a,b)
+#define get_irq(pdev) pdev->irq
+
+#define INT int
+#define NO_ERROR 0
+
+#define my_iounmap(x,b) (iounmap((char *)(b)))
+
+#endif
+
+#endif
diff --git a/linux/src/include/linux/compiler-gcc.h b/linux/src/include/linux/compiler-gcc.h
new file mode 100644
index 0000000..d9426df
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc.h
@@ -0,0 +1,112 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/*
+ * Common definitions for all gcc versions go here.
+ */
+
+
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#ifndef barrier
+#define barrier() __asm__ __volatile__("": : :"memory")
+#endif /* barrier */
+
+/*
+ * This macro obfuscates arithmetic on a variable address so that gcc
+ * shouldn't recognize the original var, and make assumptions about it.
+ *
+ * This is needed because the C standard makes it undefined to do
+ * pointer arithmetic on "objects" outside their boundaries and the
+ * gcc optimizers assume this is the case. In particular they
+ * assume such arithmetic does not wrap.
+ *
+ * A miscompilation has been observed because of this on PPC.
+ * To work around it we hide the relationship of the pointer and the object
+ * using this macro.
+ *
+ * Versions of the ppc64 compiler before 4.1 had a bug where use of
+ * RELOC_HIDE could trash r30. The bug can be worked around by changing
+ * the inline assembly constraint from =g to =r, in this particular
+ * case either is valid.
+ */
+#define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
+ (typeof(ptr)) (__ptr + (off)); })
+
+#ifdef __CHECKER__
+#define __must_be_array(arr) 0
+#else
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+#endif
+
+/*
+ * Force always-inline if the user requests it so via the .config,
+ * or if gcc is too old:
+ */
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
+# define inline inline __attribute__((always_inline))
+# define __inline__ __inline__ __attribute__((always_inline))
+# define __inline __inline __attribute__((always_inline))
+#endif
+
+#define __deprecated __attribute__((deprecated))
+#define __packed __attribute__((packed))
+#define __weak __attribute__((weak))
+
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked) to trace
+ * naked functions because then mcount is called without stack and frame pointer
+ * being set up and there is no chance to restore the lr register to the value
+ * before mcount was called.
+ *
+ * The asm() bodies of naked functions often depend on standard calling conventions,
+ * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
+ * this, so we must do so ourselves. See GCC PR44290.
+ */
+#define __naked __attribute__((naked)) noinline __noclone notrace
+
+#define __noreturn __attribute__((noreturn))
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions have no effects except the return value and their
+ * return value depends only on the parameters and/or global
+ * variables. Such a function can be subject to common subexpression
+ * elimination and loop optimization just as an arithmetic operator
+ * would be.
+ * [...]
+ */
+#define __pure __attribute__((pure))
+#define __aligned(x) __attribute__((aligned(x)))
+#define __printf(a,b) __attribute__((format(printf,a,b)))
+#define noinline __attribute__((noinline))
+#define __attribute_const__ __attribute__((__const__))
+#define __maybe_unused __attribute__((unused))
+#define __always_unused __attribute__((unused))
+
+#define __gcc_header(x) #x
+#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
+#define gcc_header(x) _gcc_header(x)
+#if __GNUC__ < 5
+#include gcc_header(__GNUC__)
+#else
+#include gcc_header(5)
+#endif
+
+#if !defined(__noclone)
+#define __noclone /* not needed */
+#endif
+
+/*
+ * A trick to suppress uninitialized variable warning without generating any
+ * code
+ */
+#define uninitialized_var(x) x = x
+
+#define __always_inline inline __attribute__((always_inline))
diff --git a/linux/src/include/linux/compiler-gcc3.h b/linux/src/include/linux/compiler-gcc3.h
new file mode 100644
index 0000000..37d4124
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc3.h
@@ -0,0 +1,23 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#if __GNUC_MINOR__ < 2
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
+#if __GNUC_MINOR__ >= 3
+# define __used __attribute__((__used__))
+#else
+# define __used __attribute__((__unused__))
+#endif
+
+#if __GNUC_MINOR__ >= 4
+#define __must_check __attribute__((warn_unused_result))
+#endif
+
+#ifdef CONFIG_GCOV_KERNEL
+# if __GNUC_MINOR__ < 4
+# error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
diff --git a/linux/src/include/linux/compiler-gcc4.h b/linux/src/include/linux/compiler-gcc4.h
new file mode 100644
index 0000000..dfadc96
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc4.h
@@ -0,0 +1,57 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/* GCC 4.1.[01] miscompiles __weak */
+#ifdef __KERNEL__
+# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
+# error Your version of gcc miscompiles the __weak directive
+# endif
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+
+#if __GNUC_MINOR__ >= 3
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+
+#if __GNUC_MINOR__ >= 5
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+#endif
+#endif
+
+#if __GNUC_MINOR__ > 0
+#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#endif
+#if __GNUC_MINOR__ >= 4 && !defined(__CHECKER__)
+#define __compiletime_warning(message) __attribute__((warning(message)))
+#define __compiletime_error(message) __attribute__((error(message)))
+#endif
diff --git a/linux/src/include/linux/compiler-gcc5.h b/linux/src/include/linux/compiler-gcc5.h
new file mode 100644
index 0000000..efee493
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc5.h
@@ -0,0 +1,67 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#define KASAN_ABI_VERSION 4
diff --git a/linux/src/include/linux/compiler.h b/linux/src/include/linux/compiler.h
new file mode 100644
index 0000000..eb3dd94
--- /dev/null
+++ b/linux/src/include/linux/compiler.h
@@ -0,0 +1,315 @@
+#ifndef __LINUX_COMPILER_H
+#define __LINUX_COMPILER_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef __CHECKER__
+# define __user __attribute__((noderef, address_space(1)))
+# define __kernel __attribute__((address_space(0)))
+# define __safe __attribute__((safe))
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __iomem __attribute__((noderef, address_space(2)))
+# define __acquires(x) __attribute__((context(x,0,1)))
+# define __releases(x) __attribute__((context(x,1,0)))
+# define __acquire(x) __context__(x,1)
+# define __release(x) __context__(x,-1)
+# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+# define __percpu __attribute__((noderef, address_space(3)))
+#ifdef CONFIG_SPARSE_RCU_POINTER
+# define __rcu __attribute__((noderef, address_space(4)))
+#else
+# define __rcu
+#endif
+extern void __chk_user_ptr(const volatile void __user *);
+extern void __chk_io_ptr(const volatile void __iomem *);
+#else
+# define __user
+# define __kernel
+# define __safe
+# define __force
+# define __nocast
+# define __iomem
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+# define __builtin_warning(x, y...) (1)
+# define __acquires(x)
+# define __releases(x)
+# define __acquire(x) (void)0
+# define __release(x) (void)0
+# define __cond_lock(x,c) (c)
+# define __percpu
+# define __rcu
+#endif
+
+#ifdef __KERNEL__
+
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
+#define notrace __attribute__((no_instrument_function))
+
+/* Intel compiler defines __GNUC__. So we will overwrite implementations
+ * coming from above header files here
+ */
+#ifdef __INTEL_COMPILER
+# include <linux/compiler-intel.h>
+#endif
+
+/*
+ * Generic compiler-dependent macros required for kernel
+ * build go below this comment. Actual compiler/compiler version
+ * specific implementations come from the above header files
+ */
+
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ unsigned long miss_hit[2];
+ };
+};
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+
+#define likely_notrace(x) __builtin_expect(!!(x), 1)
+#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
+
+#define __branch_check__(x, expect) ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_annotated_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = likely_notrace(x); \
+ ftrace_likely_update(&______f, ______r, expect); \
+ ______r; \
+ })
+
+/*
+ * Using __builtin_constant_p(x) to ignore cases where the return
+ * value is always the same. This idea is taken from a similar patch
+ * written by Daniel Walker.
+ */
+# ifndef likely
+# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
+# endif
+# ifndef unlikely
+# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
+# endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+/*
+ * "Define 'is'", Bill Clinton
+ * "Define 'if'", Steven Rostedt
+ */
+#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
+#define __trace_if(cond) \
+ if (__builtin_constant_p((cond)) ? !!(cond) : \
+ ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = !!(cond); \
+ ______f.miss_hit[______r]++; \
+ ______r; \
+ }))
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+
+#else
+# ifndef likely
+# define likely(x) __builtin_expect(!!(x), 1)
+# endif /* likely */
+# ifndef unlikely
+# define unlikely(x) __builtin_expect(!!(x), 0)
+# endif /* unlikely */
+#endif
+
+/* Optimization barrier */
+#ifndef barrier
+# define barrier() __memory_barrier()
+#endif
+
+/* Unreachable code */
+#ifndef unreachable
+# define unreachable() do { } while (1)
+#endif
+
+#ifndef RELOC_HIDE
+# define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __ptr = (unsigned long) (ptr); \
+ (typeof(ptr)) (__ptr + (off)); })
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+/*
+ * Allow us to mark functions as 'deprecated' and have gcc emit a nice
+ * warning for each use, in hopes of speeding the functions removal.
+ * Usage is:
+ * int __deprecated foo(void)
+ */
+#ifndef __deprecated
+# define __deprecated /* unimplemented */
+#endif
+
+#ifdef MODULE
+#define __deprecated_for_modules __deprecated
+#else
+#define __deprecated_for_modules
+#endif
+
+#ifndef __must_check
+#define __must_check
+#endif
+
+#ifndef CONFIG_ENABLE_MUST_CHECK
+#undef __must_check
+#define __must_check
+#endif
+#ifndef CONFIG_ENABLE_WARN_DEPRECATED
+#undef __deprecated
+#undef __deprecated_for_modules
+#define __deprecated
+#define __deprecated_for_modules
+#endif
+
+/*
+ * Allow us to avoid 'defined but not used' warnings on functions and data,
+ * as well as force them to be emitted to the assembly file.
+ *
+ * As of gcc 3.4, static functions that are not marked with attribute((used))
+ * may be elided from the assembly file. As of gcc 3.4, static data not so
+ * marked will not be elided, but this may change in a future gcc version.
+ *
+ * NOTE: Because distributions shipped with a backported unit-at-a-time
+ * compiler in gcc 3.3, we must define __used to be __attribute__((used))
+ * for gcc >=3.3 instead of 3.4.
+ *
+ * In prior versions of gcc, such functions and data would be emitted, but
+ * would be warned about except with attribute((unused)).
+ *
+ * Mark functions that are referenced only in inline assembly as __used so
+ * the code is emitted even though it appears to be unreferenced.
+ */
+#ifndef __used
+# define __used /* unimplemented */
+#endif
+
+#ifndef __maybe_unused
+# define __maybe_unused /* unimplemented */
+#endif
+
+#ifndef __always_unused
+# define __always_unused /* unimplemented */
+#endif
+
+#ifndef noinline
+#define noinline
+#endif
+
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead. For documentaiton reasons.
+ */
+#define noinline_for_stack noinline
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+
+#endif /* __KERNEL__ */
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions do not examine any values except their arguments,
+ * and have no effects except the return value. Basically this is
+ * just slightly more strict class than the `pure' attribute above,
+ * since function is not allowed to read global memory.
+ *
+ * Note that a function that has pointer arguments and examines the
+ * data pointed to must _not_ be declared `const'. Likewise, a
+ * function that calls a non-`const' function usually must not be
+ * `const'. It does not make sense for a `const' function to return
+ * `void'.
+ */
+#ifndef __attribute_const__
+# define __attribute_const__ /* unimplemented */
+#endif
+
+/*
+ * Tell gcc if a function is cold. The compiler will assume any path
+ * directly leading to the call is unlikely.
+ */
+
+#ifndef __cold
+#define __cold
+#endif
+
+/* Simple shorthand for a section definition */
+#ifndef __section
+# define __section(S) __attribute__ ((__section__(#S)))
+#endif
+
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
+/* Compile time object size, -1 for unknown */
+#ifndef __compiletime_object_size
+# define __compiletime_object_size(obj) -1
+#endif
+#ifndef __compiletime_warning
+# define __compiletime_warning(message)
+#endif
+#ifndef __compiletime_error
+# define __compiletime_error(message)
+#endif
+
+/*
+ * Prevent the compiler from merging or refetching accesses. The compiler
+ * is also forbidden from reordering successive instances of ACCESS_ONCE(),
+ * but only when the compiler is aware of some particular ordering. One way
+ * to make the compiler aware of ordering is to put the two invocations of
+ * ACCESS_ONCE() in different C statements.
+ *
+ * This macro does absolutely -nothing- to prevent the CPU from reordering,
+ * merging, or refetching absolutely anything at any time. Its main intended
+ * use is to mediate communication between process-level code and irq/NMI
+ * handlers, all running on the same CPU.
+ */
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+
+#endif /* __LINUX_COMPILER_H */
diff --git a/linux/src/include/linux/config.h b/linux/src/include/linux/config.h
new file mode 100644
index 0000000..da47f8c
--- /dev/null
+++ b/linux/src/include/linux/config.h
@@ -0,0 +1,43 @@
+#ifndef _LINUX_CONFIG_H
+#define _LINUX_CONFIG_H
+
+#include <linux/autoconf.h>
+
+/*
+ * Defines for what uname() should return
+ */
+#ifndef UTS_SYSNAME
+#define UTS_SYSNAME "Linux"
+#endif
+
+#ifndef UTS_MACHINE
+#define UTS_MACHINE "unknown"
+#endif
+
+#ifndef UTS_NODENAME
+#define UTS_NODENAME "(none)" /* set by sethostname() */
+#endif
+
+#ifndef UTS_DOMAINNAME
+#define UTS_DOMAINNAME "(none)" /* set by setdomainname() */
+#endif
+
+/*
+ * The definitions for UTS_RELEASE and UTS_VERSION are now defined
+ * in linux/version.h, and should only be used by linux/version.c
+ */
+
+/* Shouldn't these be defined somewhere in a i386 definition? */
+
+/* Don't touch these, unless you really know what you're doing. */
+#define DEF_INITSEG 0x9000
+#define DEF_SYSSEG 0x1000
+#define DEF_SETUPSEG 0x9020
+#define DEF_SYSSIZE 0x7F00
+
+/* internal svga startup constants */
+#define NORMAL_VGA 0xffff /* 80x25 mode */
+#define EXTENDED_VGA 0xfffe /* 80x50 mode */
+#define ASK_VGA 0xfffd /* ask for it at bootup */
+
+#endif
diff --git a/linux/src/include/linux/ctype.h b/linux/src/include/linux/ctype.h
new file mode 100644
index 0000000..8acfe31
--- /dev/null
+++ b/linux/src/include/linux/ctype.h
@@ -0,0 +1,64 @@
+#ifndef _LINUX_CTYPE_H
+#define _LINUX_CTYPE_H
+
+/*
+ * NOTE! This ctype does not handle EOF like the standard C
+ * library is required to.
+ */
+
+#define _U 0x01 /* upper */
+#define _L 0x02 /* lower */
+#define _D 0x04 /* digit */
+#define _C 0x08 /* cntrl */
+#define _P 0x10 /* punct */
+#define _S 0x20 /* white space (space/lf/tab) */
+#define _X 0x40 /* hex digit */
+#define _SP 0x80 /* hard space (0x20) */
+
+extern const unsigned char _ctype[];
+
+#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
+
+#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
+#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
+#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
+#define isdigit(c) ((__ismask(c)&(_D)) != 0)
+#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
+#define islower(c) ((__ismask(c)&(_L)) != 0)
+#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
+#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+/* Note: isspace() must return false for %NUL-terminator */
+#define isspace(c) ((__ismask(c)&(_S)) != 0)
+#define isupper(c) ((__ismask(c)&(_U)) != 0)
+#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
+
+#define isascii(c) (((unsigned char)(c))<=0x7f)
+#define toascii(c) (((unsigned char)(c))&0x7f)
+
+static inline unsigned char __tolower(unsigned char c)
+{
+ if (isupper(c))
+ c -= 'A'-'a';
+ return c;
+}
+
+static inline unsigned char __toupper(unsigned char c)
+{
+ if (islower(c))
+ c -= 'a'-'A';
+ return c;
+}
+
+#define tolower(c) __tolower(c)
+#define toupper(c) __toupper(c)
+
+/*
+ * Fast implementation of tolower() for internal usage. Do not use in your
+ * code.
+ */
+static inline char _tolower(const char c)
+{
+ return c | 0x20;
+}
+
+#endif
diff --git a/linux/src/include/linux/delay.h b/linux/src/include/linux/delay.h
new file mode 100644
index 0000000..50b5d0b
--- /dev/null
+++ b/linux/src/include/linux/delay.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_DELAY_H
+#define _LINUX_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines, using a pre-computed "loops_per_second" value.
+ */
+
+extern unsigned long loops_per_sec;
+
+#include <asm/delay.h>
+
+#endif /* defined(_LINUX_DELAY_H) */
diff --git a/linux/src/include/linux/errno.h b/linux/src/include/linux/errno.h
new file mode 100644
index 0000000..ac21284
--- /dev/null
+++ b/linux/src/include/linux/errno.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_ERRNO_H
+#define _LINUX_ERRNO_H
+
+#include <asm/errno.h>
+
+#ifdef __KERNEL__
+
+/* Should never be seen by user programs */
+#define ERESTARTSYS 512
+#define ERESTARTNOINTR 513
+#define ERESTARTNOHAND 514 /* restart if no handler.. */
+#define ENOIOCTLCMD 515 /* No ioctl command */
+
+#endif
+
+#endif
diff --git a/linux/src/include/linux/etherdevice.h b/linux/src/include/linux/etherdevice.h
new file mode 100644
index 0000000..9f8b97c
--- /dev/null
+++ b/linux/src/include/linux/etherdevice.h
@@ -0,0 +1,46 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * WARNING: This move may well be temporary. This file will get merged with others RSN.
+ *
+ */
+#ifndef _LINUX_ETHERDEVICE_H
+#define _LINUX_ETHERDEVICE_H
+
+
+#include <linux/if_ether.h>
+
+#ifdef __KERNEL__
+extern int eth_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+extern int eth_rebuild_header(void *buff, struct device *dev,
+ unsigned long dst, struct sk_buff *skb);
+extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev);
+extern void eth_header_cache_bind(struct hh_cache ** hhp, struct device *dev,
+ unsigned short htype, __u32 daddr);
+extern void eth_header_cache_update(struct hh_cache *hh, struct device *dev, unsigned char * haddr);
+extern void eth_copy_and_sum(struct sk_buff *dest,
+ unsigned char *src, int length, int base);
+extern struct device * init_etherdev(struct device *, int);
+
+#endif
+
+#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/linux/src/include/linux/fcntl.h b/linux/src/include/linux/fcntl.h
new file mode 100644
index 0000000..9de3512
--- /dev/null
+++ b/linux/src/include/linux/fcntl.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_FCNTL_H
+#define _LINUX_FCNTL_H
+
+#include <asm/fcntl.h>
+
+#endif
diff --git a/linux/src/include/linux/fd.h b/linux/src/include/linux/fd.h
new file mode 100644
index 0000000..a05cf39
--- /dev/null
+++ b/linux/src/include/linux/fd.h
@@ -0,0 +1,377 @@
+#ifndef _LINUX_FD_H
+#define _LINUX_FD_H
+
+#include <linux/ioctl.h>
+
+/* New file layout: Now the ioctl definitions immediately follow the
+ * definitions of the structures that they use */
+
+/*
+ * Geometry
+ */
+struct floppy_struct {
+ unsigned int size, /* nr of sectors total */
+ sect, /* sectors per track */
+ head, /* nr of heads */
+ track, /* nr of tracks */
+ stretch; /* !=0 means double track steps */
+#define FD_STRETCH 1
+#define FD_SWAPSIDES 2
+
+ unsigned char gap, /* gap1 size */
+
+ rate, /* data rate. |= 0x40 for perpendicular */
+#define FD_2M 0x4
+#define FD_SIZECODEMASK 0x38
+#define FD_SIZECODE(floppy) (((((floppy)->rate&FD_SIZECODEMASK)>> 3)+ 2) %8)
+#define FD_SECTSIZE(floppy) ( (floppy)->rate & FD_2M ? \
+ 512 : 128 << FD_SIZECODE(floppy) )
+#define FD_PERP 0x40
+
+ spec1, /* stepping rate, head unload time */
+ fmt_gap; /* gap2 size */
+ const char * name; /* used only for predefined formats */
+};
+
+
+/* commands needing write access have 0x40 set */
+/* commands needing super user access have 0x80 set */
+
+#define FDCLRPRM _IO(2, 0x41)
+/* clear user-defined parameters */
+
+#define FDSETPRM _IOW(2, 0x42, struct floppy_struct)
+#define FDSETMEDIAPRM FDSETPRM
+/* set user-defined parameters for current media */
+
+#define FDDEFPRM _IOW(2, 0x43, struct floppy_struct)
+#define FDGETPRM _IOR(2, 0x04, struct floppy_struct)
+#define FDDEFMEDIAPRM FDDEFPRM
+#define FDGETMEDIAPRM FDGETPRM
+/* set/get disk parameters */
+
+
+#define FDMSGON _IO(2,0x45)
+#define FDMSGOFF _IO(2,0x46)
+/* issue/don't issue kernel messages on media type change */
+
+
+/*
+ * Formatting (obsolete)
+ */
+#define FD_FILL_BYTE 0xF6 /* format fill byte. */
+
+struct format_descr {
+ unsigned int device,head,track;
+};
+
+#define FDFMTBEG _IO(2,0x47)
+/* begin formatting a disk */
+#define FDFMTTRK _IOW(2,0x48, struct format_descr)
+/* format the specified track */
+#define FDFMTEND _IO(2,0x49)
+/* end formatting a disk */
+
+
+/*
+ * Error thresholds
+ */
+struct floppy_max_errors {
+ unsigned int
+ abort, /* number of errors to be reached before aborting */
+ read_track, /* maximal number of errors permitted to read an
+ * entire track at once */
+ reset, /* maximal number of errors before a reset is tried */
+ recal, /* maximal number of errors before a recalibrate is
+ * tried */
+
+ /*
+ * Threshold for reporting FDC errors to the console.
+ * Setting this to zero may flood your screen when using
+ * ultra cheap floppies ;-)
+ */
+ reporting;
+
+};
+
+#define FDSETEMSGTRESH _IO(2,0x4a)
+/* set fdc error reporting threshold */
+
+#define FDFLUSH _IO(2,0x4b)
+/* flush buffers for media; either for verifying media, or for
+ * handling a media change without closing the file descriptor */
+
+#define FDSETMAXERRS _IOW(2, 0x4c, struct floppy_max_errors)
+#define FDGETMAXERRS _IOR(2, 0x0e, struct floppy_max_errors)
+/* set/get abortion and read_track threshold. See also floppy_drive_params
+ * structure */
+
+
+typedef char floppy_drive_name[16];
+#define FDGETDRVTYP _IOR(2, 0x0f, floppy_drive_name)
+/* get drive type: 5 1/4 or 3 1/2 */
+
+
+/*
+ * Drive parameters (user modifiable)
+ */
+struct floppy_drive_params {
+ char cmos; /* cmos type */
+
+ /* Spec2 is (HLD<<1 | ND), where HLD is head load time (1=2ms, 2=4 ms
+ * etc) and ND is set means no DMA. Hardcoded to 6 (HLD=6ms, use DMA).
+ */
+ unsigned long max_dtr; /* Step rate, usec */
+ unsigned long hlt; /* Head load/settle time, msec */
+ unsigned long hut; /* Head unload time (remnant of
+ * 8" drives) */
+ unsigned long srt; /* Step rate, usec */
+
+ unsigned long spinup; /* time needed for spinup (expressed
+ * in jiffies) */
+ unsigned long spindown; /* timeout needed for spindown */
+ unsigned char spindown_offset; /* decides in which position the disk
+ * will stop */
+ unsigned char select_delay; /* delay to wait after select */
+ unsigned char rps; /* rotations per second */
+ unsigned char tracks; /* maximum number of tracks */
+ unsigned long timeout; /* timeout for interrupt requests */
+
+ unsigned char interleave_sect; /* if there are more sectors, use
+ * interleave */
+
+ struct floppy_max_errors max_errors;
+
+ char flags; /* various flags, including ftd_msg */
+/*
+ * Announce successful media type detection and media information loss after
+ * disk changes.
+ * Also used to enable/disable printing of overrun warnings.
+ */
+
+#define FTD_MSG 0x10
+#define FD_BROKEN_DCL 0x20
+#define FD_DEBUG 0x02
+#define FD_SILENT_DCL_CLEAR 0x4
+#define FD_INVERTED_DCL 0x80
+
+ char read_track; /* use readtrack during probing? */
+
+/*
+ * Auto-detection. Each drive type has eight formats which are
+ * used in succession to try to read the disk. If the FDC cannot lock onto
+ * the disk, the next format is tried. This uses the variable 'probing'.
+ */
+ short autodetect[8]; /* autodetected formats */
+
+ int checkfreq; /* how often should the drive be checked for disk
+ * changes */
+ int native_format; /* native format of this drive */
+};
+
+enum {
+ FD_NEED_TWADDLE_BIT, /* more magic */
+ FD_VERIFY_BIT, /* inquire for write protection */
+ FD_DISK_NEWCHANGE_BIT, /* change detected, and no action undertaken yet
+ * to clear media change status */
+ FD_UNUSED_BIT,
+ FD_DISK_CHANGED_BIT, /* disk has been changed since last i/o */
+ FD_DISK_WRITABLE_BIT /* disk is writable */
+};
+
+#define FDSETDRVPRM _IOW(2, 0x90, struct floppy_drive_params)
+#define FDGETDRVPRM _IOR(2, 0x11, struct floppy_drive_params)
+/* set/get drive parameters */
+
+
+/*
+ * Current drive state (not directly modifiable by user, readonly)
+ */
+struct floppy_drive_struct {
+ signed char flags;
+/* values for these flags */
+#define FD_NEED_TWADDLE (1 << FD_NEED_TWADDLE_BIT)
+#define FD_VERIFY (1 << FD_VERIFY_BIT)
+#define FD_DISK_NEWCHANGE (1 << FD_DISK_NEWCHANGE_BIT)
+#define FD_DISK_CHANGED (1 << FD_DISK_CHANGED_BIT)
+#define FD_DISK_WRITABLE (1 << FD_DISK_WRITABLE_BIT)
+
+ unsigned long spinup_date;
+ unsigned long select_date;
+ unsigned long first_read_date;
+ short probed_format;
+ short track; /* current track */
+ short maxblock; /* id of highest block read */
+ short maxtrack; /* id of highest half track read */
+ int generation; /* how many diskchanges? */
+
+/*
+ * (User-provided) media information is _not_ discarded after a media change
+ * if the corresponding keep_data flag is non-zero. Positive values are
+ * decremented after each probe.
+ */
+ int keep_data;
+
+ /* Prevent "aliased" accesses. */
+ int fd_ref;
+ int fd_device;
+ int last_checked; /* when was the drive last checked for a disk
+ * change? */
+
+ char *dmabuf;
+ int bufblocks;
+};
+
+#define FDGETDRVSTAT _IOR(2, 0x12, struct floppy_drive_struct)
+#define FDPOLLDRVSTAT _IOR(2, 0x13, struct floppy_drive_struct)
+/* get drive state: GET returns the cached state, POLL polls for new state */
+
+
+/*
+ * reset FDC
+ */
+enum reset_mode {
+ FD_RESET_IF_NEEDED, /* reset only if the reset flags is set */
+ FD_RESET_IF_RAWCMD, /* obsolete */
+ FD_RESET_ALWAYS /* reset always */
+};
+#define FDRESET _IO(2, 0x54)
+
+
+/*
+ * FDC state
+ */
+struct floppy_fdc_state {
+ int spec1; /* spec1 value last used */
+ int spec2; /* spec2 value last used */
+ int dtr;
+ unsigned char version; /* FDC version code */
+ unsigned char dor;
+ int address; /* io address */
+ unsigned int rawcmd:2;
+ unsigned int reset:1;
+ unsigned int need_configure:1;
+ unsigned int perp_mode:2;
+ unsigned int has_fifo:1;
+ unsigned int driver_version; /* version code for floppy driver */
+#define FD_DRIVER_VERSION 0x100
+/* user programs using the floppy API should use floppy_fdc_state to
+ * get the version number of the floppy driver that they are running
+ * on. If this version number is bigger than the one compiled into the
+ * user program (the FD_DRIVER_VERSION define), it should be prepared
+ * to bigger structures
+ */
+
+ unsigned char track[4];
+ /* Position of the heads of the 4 units attached to this FDC,
+ * as stored on the FDC. In the future, the position as stored
+ * on the FDC might not agree with the actual physical
+ * position of these drive heads. By allowing such
+ * disagreement, it will be possible to reset the FDC without
+ * incurring the expensive cost of repositioning all heads.
+ * Right now, these positions are hard wired to 0. */
+
+};
+
+#define FDGETFDCSTAT _IOR(2, 0x15, struct floppy_fdc_state)
+
+
+/*
+ * Asynchronous Write error tracking
+ */
+struct floppy_write_errors {
+ /* Write error logging.
+ *
+ * These fields can be cleared with the FDWERRORCLR ioctl.
+ * Only writes that were attempted but failed due to a physical media
+ * error are logged. write(2) calls that fail and return an error code
+ * to the user process are not counted.
+ */
+
+ unsigned int write_errors; /* number of physical write errors
+ * encountered */
+
+ /* position of first and last write errors */
+ unsigned long first_error_sector;
+ int first_error_generation;
+ unsigned long last_error_sector;
+ int last_error_generation;
+
+ unsigned int badness; /* highest retry count for a read or write
+ * operation */
+};
+
+#define FDWERRORCLR _IO(2, 0x56)
+/* clear write error and badness information */
+#define FDWERRORGET _IOR(2, 0x17, struct floppy_write_errors)
+/* get write error and badness information */
+
+
+/*
+ * Raw commands
+ */
+/* new interface flag: now we can do them in batches */
+#define FDHAVEBATCHEDRAWCMD
+
+struct floppy_raw_cmd {
+ unsigned int flags;
+#define FD_RAW_READ 1
+#define FD_RAW_WRITE 2
+#define FD_RAW_NO_MOTOR 4
+#define FD_RAW_DISK_CHANGE 4 /* out: disk change flag was set */
+#define FD_RAW_INTR 8 /* wait for an interrupt */
+#define FD_RAW_SPIN 0x10 /* spin up the disk for this command */
+#define FD_RAW_NO_MOTOR_AFTER 0x20 /* switch the motor off after command
+ * completion */
+#define FD_RAW_NEED_DISK 0x40 /* this command needs a disk to be present */
+#define FD_RAW_NEED_SEEK 0x80 /* this command uses an implied seek (soft) */
+
+/* more "in" flags */
+#define FD_RAW_MORE 0x100 /* more records follow */
+#define FD_RAW_STOP_IF_FAILURE 0x200 /* stop if we encounter a failure */
+#define FD_RAW_STOP_IF_SUCCESS 0x400 /* stop if command successful */
+#define FD_RAW_SOFTFAILURE 0x800 /* consider the return value for failure
+ * detection too */
+
+/* more "out" flags */
+#define FD_RAW_FAILURE 0x10000 /* command sent to fdc, fdc returned error */
+#define FD_RAW_HARDFAILURE 0x20000 /* fdc had to be reset, or timed out */
+
+ void *data;
+ char *kernel_data; /* location of data buffer in the kernel */
+ struct floppy_raw_cmd *next; /* used for chaining of raw cmd's
+ * within the kernel */
+ long length; /* in: length of dma transfer. out: remaining bytes */
+ long phys_length; /* physical length, if different from dma length */
+ int buffer_length; /* length of allocated buffer */
+
+ unsigned char rate;
+ unsigned char cmd_count;
+ unsigned char cmd[16];
+ unsigned char reply_count;
+ unsigned char reply[16];
+ int track;
+ int resultcode;
+
+ int reserved1;
+ int reserved2;
+};
+
+#define FDRAWCMD _IO(2, 0x58)
+/* send a raw command to the fdc. Structure size not included, because of
+ * batches */
+
+#define FDTWADDLE _IO(2, 0x59)
+/* flicker motor-on bit before reading a sector. Experimental */
+
+
+#define FDEJECT _IO(2, 0x5a)
+/* eject the disk */
+
+
+#ifdef __KERNEL__
+/* eject the boot floppy (if we need the drive for a different root floppy) */
+void floppy_eject(void);
+#endif
+
+#endif
diff --git a/linux/src/include/linux/fddidevice.h b/linux/src/include/linux/fddidevice.h
new file mode 100644
index 0000000..bb0b298
--- /dev/null
+++ b/linux/src/include/linux/fddidevice.h
@@ -0,0 +1,42 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the FDDI handlers.
+ *
+ * Version: @(#)fddidevice.h 1.0.0 08/12/96
+ *
+ * Author: Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * fddidevice.h is based on previous trdevice.h work by
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_FDDIDEVICE_H
+#define _LINUX_FDDIDEVICE_H
+
+#include <linux/if_fddi.h>
+
+#ifdef __KERNEL__
+extern int fddi_header(struct sk_buff *skb,
+ struct device *dev,
+ unsigned short type,
+ void *daddr,
+ void *saddr,
+ unsigned len);
+extern int fddi_rebuild_header(void *buff,
+ struct device *dev,
+ unsigned long dest,
+ struct sk_buff *skb);
+extern unsigned short fddi_type_trans(struct sk_buff *skb,
+ struct device *dev);
+#endif
+
+#endif /* _LINUX_FDDIDEVICE_H */
diff --git a/linux/src/include/linux/fdreg.h b/linux/src/include/linux/fdreg.h
new file mode 100644
index 0000000..1d9026e
--- /dev/null
+++ b/linux/src/include/linux/fdreg.h
@@ -0,0 +1,143 @@
+#ifndef _LINUX_FDREG_H
+#define _LINUX_FDREG_H
+/*
+ * This file contains some defines for the floppy disk controller.
+ * Various sources. Mostly "IBM Microcomputers: A Programmers
+ * Handbook", Sanches and Canton.
+ */
+
+#ifdef FDPATCHES
+
+#define FD_IOPORT fdc_state[fdc].address
+
+/* Fd controller regs. S&C, about page 340 */
+#define FD_STATUS (4 + FD_IOPORT )
+#define FD_DATA (5 + FD_IOPORT )
+
+/* Digital Output Register */
+#define FD_DOR (2 + FD_IOPORT )
+
+/* Digital Input Register (read) */
+#define FD_DIR (7 + FD_IOPORT )
+
+/* Diskette Control Register (write)*/
+#define FD_DCR (7 + FD_IOPORT )
+
+#else
+
+#define FD_STATUS 0x3f4
+#define FD_DATA 0x3f5
+#define FD_DOR 0x3f2 /* Digital Output Register */
+#define FD_DIR 0x3f7 /* Digital Input Register (read) */
+#define FD_DCR 0x3f7 /* Diskette Control Register (write)*/
+
+#endif
+
+/* Bits of main status register */
+#define STATUS_BUSYMASK 0x0F /* drive busy mask */
+#define STATUS_BUSY 0x10 /* FDC busy */
+#define STATUS_DMA 0x20 /* 0- DMA mode */
+#define STATUS_DIR 0x40 /* 0- cpu->fdc */
+#define STATUS_READY 0x80 /* Data reg ready */
+
+/* Bits of FD_ST0 */
+#define ST0_DS 0x03 /* drive select mask */
+#define ST0_HA 0x04 /* Head (Address) */
+#define ST0_NR 0x08 /* Not Ready */
+#define ST0_ECE 0x10 /* Equipment check error */
+#define ST0_SE 0x20 /* Seek end */
+#define ST0_INTR 0xC0 /* Interrupt code mask */
+
+/* Bits of FD_ST1 */
+#define ST1_MAM 0x01 /* Missing Address Mark */
+#define ST1_WP 0x02 /* Write Protect */
+#define ST1_ND 0x04 /* No Data - unreadable */
+#define ST1_OR 0x10 /* OverRun */
+#define ST1_CRC 0x20 /* CRC error in data or addr */
+#define ST1_EOC 0x80 /* End Of Cylinder */
+
+/* Bits of FD_ST2 */
+#define ST2_MAM 0x01 /* Missing Address Mark (again) */
+#define ST2_BC 0x02 /* Bad Cylinder */
+#define ST2_SNS 0x04 /* Scan Not Satisfied */
+#define ST2_SEH 0x08 /* Scan Equal Hit */
+#define ST2_WC 0x10 /* Wrong Cylinder */
+#define ST2_CRC 0x20 /* CRC error in data field */
+#define ST2_CM 0x40 /* Control Mark = deleted */
+
+/* Bits of FD_ST3 */
+#define ST3_HA 0x04 /* Head (Address) */
+#define ST3_DS 0x08 /* drive is double-sided */
+#define ST3_TZ 0x10 /* Track Zero signal (1=track 0) */
+#define ST3_RY 0x20 /* drive is ready */
+#define ST3_WP 0x40 /* Write Protect */
+#define ST3_FT 0x80 /* Drive Fault */
+
+/* Values for FD_COMMAND */
+#define FD_RECALIBRATE 0x07 /* move to track 0 */
+#define FD_SEEK 0x0F /* seek track */
+#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
+#define FD_WRITE 0xC5 /* write with MT, MFM */
+#define FD_SENSEI 0x08 /* Sense Interrupt Status */
+#define FD_SPECIFY 0x03 /* specify HUT etc */
+#define FD_FORMAT 0x4D /* format one track */
+#define FD_VERSION 0x10 /* get version code */
+#define FD_CONFIGURE 0x13 /* configure FIFO operation */
+#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
+#define FD_GETSTATUS 0x04 /* read ST3 */
+#define FD_DUMPREGS 0x0E /* dump the contents of the fdc regs */
+#define FD_READID 0xEA /* prints the header of a sector */
+#define FD_UNLOCK 0x14 /* Fifo config unlock */
+#define FD_LOCK 0x94 /* Fifo config lock */
+#define FD_RSEEK_OUT 0x8f /* seek out (i.e. to lower tracks) */
+#define FD_RSEEK_IN 0xcf /* seek in (i.e. to higher tracks) */
+
+/* the following commands are new in the 82078. They are not used in the
+ * floppy driver, except the first three. These commands may be useful for apps
+ * which use the FDRAWCMD interface. For doc, get the 82078 spec sheets at
+ * http://www-techdoc.intel.com/docs/periph/fd_contr/datasheets/ */
+
+#define FD_PARTID 0x18 /* part id ("extended" version cmd) */
+#define FD_SAVE 0x2e /* save fdc regs for later restore */
+#define FD_DRIVESPEC 0x8e /* drive specification: Access to the
+ * 2 Mbps data transfer rate for tape
+ * drives */
+
+#define FD_RESTORE 0x4e /* later restore */
+#define FD_POWERDOWN 0x27 /* configure FDC's powersave features */
+#define FD_FORMAT_N_WRITE 0xef /* format and write in one go. */
+#define FD_OPTION 0x33 /* ISO format (which is a clean way to
+ * pack more sectors on a track) */
+
+/* DMA commands */
+#define DMA_READ 0x46
+#define DMA_WRITE 0x4A
+
+/* FDC version return types */
+#define FDC_NONE 0x00
+#define FDC_UNKNOWN 0x10 /* DO NOT USE THIS TYPE EXCEPT IF IDENTIFICATION
+ FAILS EARLY */
+#define FDC_8272A 0x20 /* Intel 8272a, NEC 765 */
+#define FDC_765ED 0x30 /* Non-Intel 1MB-compatible FDC, can't detect */
+#define FDC_82072 0x40 /* Intel 82072; 8272a + FIFO + DUMPREGS */
+#define FDC_82072A 0x45 /* 82072A (on Sparcs) */
+#define FDC_82077_ORIG 0x51 /* Original version of 82077AA, sans LOCK */
+#define FDC_82077 0x52 /* 82077AA-1 */
+#define FDC_82078_UNKN 0x5f /* Unknown 82078 variant */
+#define FDC_82078 0x60 /* 44pin 82078 or 64pin 82078SL */
+#define FDC_82078_1 0x61 /* 82078-1 (2Mbps fdc) */
+#define FDC_S82078B 0x62 /* S82078B (first seen on Adaptec AVA-2825 VLB
+ * SCSI/EIDE/Floppy controller) */
+#define FDC_87306 0x63 /* National Semiconductor PC 87306 */
+
+/*
+ * Beware: the fdc type list is roughly sorted by increasing features.
+ * Presence of features is tested by comparing the FDC version id with the
+ * "oldest" version that has the needed feature.
+ * If during FDC detection, an obscure test fails late in the sequence, don't
+ * assign FDC_UNKNOWN. Else the FDC will be treated as a dumb 8272a, or worse.
+ * This is especially true if the tests are unneeded.
+ */
+
+#define FD_RESET_DELAY 20
+#endif
diff --git a/linux/src/include/linux/fs.h b/linux/src/include/linux/fs.h
new file mode 100644
index 0000000..b698b3f
--- /dev/null
+++ b/linux/src/include/linux/fs.h
@@ -0,0 +1,728 @@
+#ifndef _LINUX_FS_H
+#define _LINUX_FS_H
+
+/*
+ * This file has definitions for some important file table
+ * structures etc.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/limits.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/vfs.h>
+#include <linux/net.h>
+#include <linux/kdev_t.h>
+#include <linux/ioctl.h>
+
+/*
+ * It's silly to have NR_OPEN bigger than NR_FILE, but I'll fix
+ * that later. Anyway, now the file code is no longer dependent
+ * on bitmaps in unsigned longs, but uses the new fd_set structure..
+ *
+ * Some programs (notably those using select()) may have to be
+ * recompiled to take full advantage of the new limits..
+ */
+
+/* Fixed constants first: */
+#undef NR_OPEN
+#define NR_OPEN 256
+
+#define NR_SUPER 64
+#define BLOCK_SIZE 1024
+#define BLOCK_SIZE_BITS 10
+
+/* And dynamically-tunable limits and defaults: */
+extern int max_inodes, nr_inodes;
+extern int max_files, nr_files;
+#define NR_INODE 3072 /* this should be bigger than NR_FILE */
+#define NR_FILE 1024 /* this can well be larger on a larger system */
+
+#define MAY_EXEC 1
+#define MAY_WRITE 2
+#define MAY_READ 4
+
+#define FMODE_READ 1
+#define FMODE_WRITE 2
+
+#define READ 0
+#define WRITE 1
+#define READA 2 /* read-ahead - don't block if no resources */
+#define WRITEA 3 /* write-ahead - don't block if no resources */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#define NIL_FILP ((struct file *)0)
+#define SEL_IN 1
+#define SEL_OUT 2
+#define SEL_EX 4
+
+/*
+ * These are the fs-independent mount-flags: up to 16 flags are supported
+ */
+#define MS_RDONLY 1 /* Mount read-only */
+#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+#define MS_NODEV 4 /* Disallow access to device special files */
+#define MS_NOEXEC 8 /* Disallow program execution */
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define S_WRITE 128 /* Write on file/directory/symlink */
+#define S_APPEND 256 /* Append-only file */
+#define S_IMMUTABLE 512 /* Immutable file */
+#define MS_NOATIME 1024 /* Do not update access times. */
+#define S_BAD_INODE 2048 /* Marker for unreadable inodes */
+#define S_ZERO_WR 4096 /* Device accepts 0 length writes */
+
+/*
+ * Flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS|MS_MANDLOCK|MS_NOATIME)
+
+/*
+ * Magic mount flag number. Has to be or-ed to the flag values.
+ */
+#define MS_MGC_VAL 0xC0ED0000 /* magic flag number to indicate "new" flags */
+#define MS_MGC_MSK 0xffff0000 /* magic flag number mask */
+
+/*
+ * Note that read-only etc flags are inode-specific: setting some file-system
+ * flags just means all the inodes inherit those flags by default. It might be
+ * possible to override it selectively if you really wanted to with some
+ * ioctl() that is not currently implemented.
+ *
+ * Exception: MS_RDONLY is always applied to the entire file system.
+ */
+#define IS_RDONLY(inode) (((inode)->i_sb) && ((inode)->i_sb->s_flags & MS_RDONLY))
+#define IS_NOSUID(inode) ((inode)->i_flags & MS_NOSUID)
+#define IS_NODEV(inode) ((inode)->i_flags & MS_NODEV)
+#define IS_NOEXEC(inode) ((inode)->i_flags & MS_NOEXEC)
+#define IS_SYNC(inode) ((inode)->i_flags & MS_SYNCHRONOUS)
+#define IS_MANDLOCK(inode) ((inode)->i_flags & MS_MANDLOCK)
+
+#define IS_WRITABLE(inode) ((inode)->i_flags & S_WRITE)
+#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
+#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+#define IS_NOATIME(inode) ((inode)->i_flags & MS_NOATIME)
+#define IS_ZERO_WR(inode) ((inode)->i_flags & S_ZERO_WR)
+
+#define UPDATE_ATIME(inode) \
+ if (!IS_NOATIME(inode) && !IS_RDONLY(inode)) { \
+ inode->i_atime = CURRENT_TIME; \
+ inode->i_dirt = 1; \
+ }
+
+/* the read-only stuff doesn't really belong here, but any other place is
+ probably as bad and I don't want to create yet another include file. */
+
+#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */
+#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */
+#define BLKRRPART _IO(0x12,95) /* re-read partition table */
+#define BLKGETSIZE _IO(0x12,96) /* return device size */
+#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */
+#define BLKRASET _IO(0x12,98) /* Set read ahead for block device */
+#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */
+
+#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
+#define FIBMAP _IO(0x00,1) /* bmap access */
+#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
+
+#ifdef __KERNEL__
+
+#include <asm/semaphore.h>
+#include <asm/bitops.h>
+
+extern void buffer_init(void);
+extern unsigned long inode_init(unsigned long start, unsigned long end);
+extern unsigned long file_table_init(unsigned long start, unsigned long end);
+extern unsigned long name_cache_init(unsigned long start, unsigned long end);
+
+typedef char buffer_block[BLOCK_SIZE];
+
+/* bh state bits */
+#define BH_Uptodate 0 /* 1 if the buffer contains valid data */
+#define BH_Dirty 1 /* 1 if the buffer is dirty */
+#define BH_Lock 2 /* 1 if the buffer is locked */
+#define BH_Req 3 /* 0 if the buffer has been invalidated */
+#define BH_Touched 4 /* 1 if the buffer has been touched (aging) */
+#define BH_Has_aged 5 /* 1 if the buffer has been aged (aging) */
+#define BH_Protected 6 /* 1 if the buffer is protected */
+#define BH_FreeOnIO 7 /* 1 to discard the buffer_head after IO */
+#define BH_MD 8 /* 1 if the buffer is an MD request */
+
+/*
+ * Try to keep the most commonly used fields in single cache lines (16
+ * bytes) to improve performance. This ordering should be
+ * particularly beneficial on 32-bit processors.
+ *
+ * We use the first 16 bytes for the data which is used in searches
+ * over the block hash lists (ie. getblk(), find_buffer() and
+ * friends).
+ *
+ * The second 16 bytes we use for lru buffer scans, as used by
+ * sync_buffers() and refill_freelist(). -- sct
+ */
+struct buffer_head {
+ /* First cache line: */
+ unsigned long b_blocknr; /* block number */
+ kdev_t b_dev; /* device (B_FREE = free) */
+ kdev_t b_rdev; /* Real device */
+ unsigned long b_rsector; /* Real buffer location on disk */
+ struct buffer_head * b_next; /* Hash queue list */
+ struct buffer_head * b_this_page; /* circular list of buffers in one page */
+
+ /* Second cache line: */
+ unsigned long b_state; /* buffer state bitmap (see above) */
+ struct buffer_head * b_next_free;
+ unsigned int b_count; /* users using this block */
+ unsigned long b_size; /* block size */
+
+ /* Non-performance-critical data follows. */
+ char * b_data; /* pointer to data block (1024 bytes) */
+ unsigned int b_list; /* List that this buffer appears */
+ unsigned long b_flushtime; /* Time when this (dirty) buffer
+ * should be written */
+ unsigned long b_lru_time; /* Time when this buffer was
+ * last used. */
+ struct wait_queue * b_wait;
+ struct buffer_head * b_prev; /* doubly linked list of hash-queue */
+ struct buffer_head * b_prev_free; /* doubly linked list of buffers */
+ struct buffer_head * b_reqnext; /* request queue */
+
+/*
+ * Some MD stuff like RAID5 needs special event handlers and
+ * special private buffer_head fields:
+ */
+ void * personality;
+ void * private_bh;
+};
+
+static inline int buffer_uptodate(struct buffer_head * bh)
+{
+ return test_bit(BH_Uptodate, &bh->b_state);
+}
+
+static inline int buffer_dirty(struct buffer_head * bh)
+{
+ return test_bit(BH_Dirty, &bh->b_state);
+}
+
+static inline int buffer_locked(struct buffer_head * bh)
+{
+ return test_bit(BH_Lock, &bh->b_state);
+}
+
+static inline int buffer_req(struct buffer_head * bh)
+{
+ return test_bit(BH_Req, &bh->b_state);
+}
+
+static inline int buffer_touched(struct buffer_head * bh)
+{
+ return test_bit(BH_Touched, &bh->b_state);
+}
+
+static inline int buffer_has_aged(struct buffer_head * bh)
+{
+ return test_bit(BH_Has_aged, &bh->b_state);
+}
+
+static inline int buffer_protected(struct buffer_head * bh)
+{
+ return test_bit(BH_Protected, &bh->b_state);
+}
+
+#include <linux/pipe_fs_i.h>
+#include <linux/minix_fs_i.h>
+#include <linux/ext_fs_i.h>
+#include <linux/ext2_fs_i.h>
+#include <linux/hpfs_fs_i.h>
+#include <linux/msdos_fs_i.h>
+#include <linux/umsdos_fs_i.h>
+#include <linux/iso_fs_i.h>
+#include <linux/nfs_fs_i.h>
+#include <linux/xia_fs_i.h>
+#include <linux/sysv_fs_i.h>
+#include <linux/affs_fs_i.h>
+#include <linux/ufs_fs_i.h>
+
+/*
+ * Attribute flags. These should be or-ed together to figure out what
+ * has been changed!
+ */
+#define ATTR_MODE 1
+#define ATTR_UID 2
+#define ATTR_GID 4
+#define ATTR_SIZE 8
+#define ATTR_ATIME 16
+#define ATTR_MTIME 32
+#define ATTR_CTIME 64
+#define ATTR_ATIME_SET 128
+#define ATTR_MTIME_SET 256
+#define ATTR_FORCE 512 /* Not a change, but a change it */
+
+/*
+ * This is the Inode Attributes structure, used for notify_change(). It
+ * uses the above definitions as flags, to know which values have changed.
+ * Also, in this manner, a Filesystem can look at only the values it cares
+ * about. Basically, these are the attributes that the VFS layer can
+ * request to change from the FS layer.
+ *
+ * Derek Atkins <warlord@MIT.EDU> 94-10-20
+ */
+struct iattr {
+ unsigned int ia_valid;
+ umode_t ia_mode;
+ uid_t ia_uid;
+ gid_t ia_gid;
+ off_t ia_size;
+ time_t ia_atime;
+ time_t ia_mtime;
+ time_t ia_ctime;
+};
+
+#include <linux/quota.h>
+
+struct inode {
+ kdev_t i_dev;
+ unsigned long i_ino;
+ umode_t i_mode;
+ nlink_t i_nlink;
+ uid_t i_uid;
+ gid_t i_gid;
+ kdev_t i_rdev;
+ off_t i_size;
+ time_t i_atime;
+ time_t i_mtime;
+ time_t i_ctime;
+ unsigned long i_blksize;
+ unsigned long i_blocks;
+ unsigned long i_version;
+ unsigned long i_nrpages;
+ struct semaphore i_sem;
+ struct inode_operations *i_op;
+ struct super_block *i_sb;
+ struct wait_queue *i_wait;
+ struct file_lock *i_flock;
+ struct vm_area_struct *i_mmap;
+ struct page *i_pages;
+ struct dquot *i_dquot[MAXQUOTAS];
+ struct inode *i_next, *i_prev;
+ struct inode *i_hash_next, *i_hash_prev;
+ struct inode *i_bound_to, *i_bound_by;
+ struct inode *i_mount;
+ unsigned long i_count; /* needs to be > (address_space * tasks)>>pagebits */
+ unsigned short i_flags;
+ unsigned short i_writecount;
+ unsigned char i_lock;
+ unsigned char i_dirt;
+ unsigned char i_pipe;
+ unsigned char i_sock;
+ unsigned char i_seek;
+ unsigned char i_update;
+ unsigned char i_condemned;
+ union {
+ struct pipe_inode_info pipe_i;
+ struct minix_inode_info minix_i;
+ struct ext_inode_info ext_i;
+ struct ext2_inode_info ext2_i;
+ struct hpfs_inode_info hpfs_i;
+ struct msdos_inode_info msdos_i;
+ struct umsdos_inode_info umsdos_i;
+ struct iso_inode_info isofs_i;
+ struct nfs_inode_info nfs_i;
+ struct xiafs_inode_info xiafs_i;
+ struct sysv_inode_info sysv_i;
+ struct affs_inode_info affs_i;
+ struct ufs_inode_info ufs_i;
+ struct socket socket_i;
+ void * generic_ip;
+ } u;
+};
+
+struct fown_struct {
+ int pid; /* pid or -pgrp where SIGIO should be sent */
+ uid_t uid, euid; /* uid/euid of process setting the owner */
+};
+
+struct file {
+ mode_t f_mode;
+ loff_t f_pos;
+ unsigned short f_flags;
+ unsigned short f_count;
+ unsigned long f_reada, f_ramax, f_raend, f_ralen, f_rawin;
+ struct file *f_next, *f_prev;
+ struct fown_struct f_owner;
+ struct inode * f_inode;
+ struct file_operations * f_op;
+ unsigned long f_version;
+ void *private_data; /* needed for tty driver, and maybe others */
+};
+
+#define FL_POSIX 1
+#define FL_FLOCK 2
+#define FL_BROKEN 4 /* broken flock() emulation */
+#define FL_ACCESS 8 /* for processes suspended by mandatory locking */
+
+struct file_lock {
+ struct file_lock *fl_next; /* singly linked list for this inode */
+ struct file_lock *fl_nextlink; /* doubly linked list of all locks */
+ struct file_lock *fl_prevlink; /* used to simplify lock removal */
+ struct file_lock *fl_nextblock; /* circular list of blocked processes */
+ struct file_lock *fl_prevblock;
+ struct task_struct *fl_owner;
+ struct wait_queue *fl_wait;
+ struct file *fl_file;
+ unsigned char fl_flags;
+ unsigned char fl_type;
+ off_t fl_start;
+ off_t fl_end;
+};
+
+#include <linux/fcntl.h>
+
+extern int fcntl_getlk(unsigned int fd, struct flock *l);
+extern int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l);
+extern void locks_remove_locks(struct task_struct *task, struct file *filp);
+
+#include <linux/stat.h>
+
+#define FLOCK_VERIFY_READ 1
+#define FLOCK_VERIFY_WRITE 2
+
+extern int locks_mandatory_locked(struct inode *inode);
+extern int locks_mandatory_area(int read_write, struct inode *inode,
+ struct file *filp, unsigned int offset,
+ unsigned int count);
+
+extern inline int locks_verify_locked(struct inode *inode)
+{
+ /* Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+ if (IS_MANDLOCK(inode) &&
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return (locks_mandatory_locked(inode));
+ return (0);
+}
+extern inline int locks_verify_area(int read_write, struct inode *inode,
+ struct file *filp, unsigned int offset,
+ unsigned int count)
+{
+ /* Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+ if (IS_MANDLOCK(inode) &&
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return (locks_mandatory_area(read_write, inode, filp, offset,
+ count));
+ return (0);
+}
+
+struct fasync_struct {
+ int magic;
+ struct fasync_struct *fa_next; /* singly linked list */
+ struct file *fa_file;
+};
+
+#define FASYNC_MAGIC 0x4601
+
+extern int fasync_helper(struct inode *, struct file *, int, struct fasync_struct **);
+
+#include <linux/minix_fs_sb.h>
+#include <linux/ext_fs_sb.h>
+#include <linux/ext2_fs_sb.h>
+#include <linux/hpfs_fs_sb.h>
+#include <linux/msdos_fs_sb.h>
+#include <linux/iso_fs_sb.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/xia_fs_sb.h>
+#include <linux/sysv_fs_sb.h>
+#include <linux/affs_fs_sb.h>
+#include <linux/ufs_fs_sb.h>
+
+struct super_block {
+ kdev_t s_dev;
+ unsigned long s_blocksize;
+ unsigned char s_blocksize_bits;
+ unsigned char s_lock;
+ unsigned char s_rd_only;
+ unsigned char s_dirt;
+ struct file_system_type *s_type;
+ struct super_operations *s_op;
+ struct dquot_operations *dq_op;
+ unsigned long s_flags;
+ unsigned long s_magic;
+ unsigned long s_time;
+ struct inode * s_covered;
+ struct inode * s_mounted;
+ struct wait_queue * s_wait;
+ union {
+ struct minix_sb_info minix_sb;
+ struct ext_sb_info ext_sb;
+ struct ext2_sb_info ext2_sb;
+ struct hpfs_sb_info hpfs_sb;
+ struct msdos_sb_info msdos_sb;
+ struct isofs_sb_info isofs_sb;
+ struct nfs_sb_info nfs_sb;
+ struct xiafs_sb_info xiafs_sb;
+ struct sysv_sb_info sysv_sb;
+ struct affs_sb_info affs_sb;
+ struct ufs_sb_info ufs_sb;
+ void *generic_sbp;
+ } u;
+};
+
+/*
+ * This is the "filldir" function type, used by readdir() to let
+ * the kernel specify what kind of dirent layout it wants to have.
+ * This allows the kernel to read directories into kernel space or
+ * to have different dirent layouts depending on the binary type.
+ */
+typedef int (*filldir_t)(void *, const char *, int, off_t, ino_t);
+
+struct file_operations {
+ int (*lseek) (struct inode *, struct file *, off_t, int);
+ int (*read) (struct inode *, struct file *, char *, int);
+ int (*write) (struct inode *, struct file *, const char *, int);
+ int (*readdir) (struct inode *, struct file *, void *, filldir_t);
+ int (*select) (struct inode *, struct file *, int, select_table *);
+ int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
+ int (*mmap) (struct inode *, struct file *, struct vm_area_struct *);
+ int (*open) (struct inode *, struct file *);
+ void (*release) (struct inode *, struct file *);
+ int (*fsync) (struct inode *, struct file *);
+ int (*fasync) (struct inode *, struct file *, int);
+ int (*check_media_change) (kdev_t dev);
+ int (*revalidate) (kdev_t dev);
+};
+
+struct inode_operations {
+ struct file_operations * default_file_ops;
+ int (*create) (struct inode *,const char *,int,int,struct inode **);
+ int (*lookup) (struct inode *,const char *,int,struct inode **);
+ int (*link) (struct inode *,struct inode *,const char *,int);
+ int (*unlink) (struct inode *,const char *,int);
+ int (*symlink) (struct inode *,const char *,int,const char *);
+ int (*mkdir) (struct inode *,const char *,int,int);
+ int (*rmdir) (struct inode *,const char *,int);
+ int (*mknod) (struct inode *,const char *,int,int,int);
+ int (*rename) (struct inode *,const char *,int,struct inode *,const char *,int, int);
+ int (*readlink) (struct inode *,char *,int);
+ int (*follow_link) (struct inode *,struct inode *,int,int,struct inode **);
+ int (*readpage) (struct inode *, struct page *);
+ int (*writepage) (struct inode *, struct page *);
+ int (*bmap) (struct inode *,int);
+ void (*truncate) (struct inode *);
+ int (*permission) (struct inode *, int);
+ int (*smap) (struct inode *,int);
+};
+
+struct super_operations {
+ void (*read_inode) (struct inode *);
+ int (*notify_change) (struct inode *, struct iattr *);
+ void (*write_inode) (struct inode *);
+ void (*put_inode) (struct inode *);
+ void (*put_super) (struct super_block *);
+ void (*write_super) (struct super_block *);
+ void (*statfs) (struct super_block *, struct statfs *, int);
+ int (*remount_fs) (struct super_block *, int *, char *);
+};
+
+struct dquot_operations {
+ void (*initialize) (struct inode *, short);
+ void (*drop) (struct inode *);
+ int (*alloc_block) (const struct inode *, unsigned long);
+ int (*alloc_inode) (const struct inode *, unsigned long);
+ void (*free_block) (const struct inode *, unsigned long);
+ void (*free_inode) (const struct inode *, unsigned long);
+ int (*transfer) (struct inode *, struct iattr *, char);
+};
+
+struct file_system_type {
+ struct super_block *(*read_super) (struct super_block *, void *, int);
+ const char *name;
+ int requires_dev;
+ struct file_system_type * next;
+};
+
+extern int register_filesystem(struct file_system_type *);
+extern int unregister_filesystem(struct file_system_type *);
+
+asmlinkage int sys_open(const char *, int, int);
+asmlinkage int sys_close(unsigned int); /* yes, it's really unsigned */
+asmlinkage int sys_read(unsigned int, char *, int);
+
+extern void kill_fasync(struct fasync_struct *fa, int sig);
+
+extern int getname(const char * filename, char **result);
+extern void putname(char * name);
+extern int do_truncate(struct inode *, unsigned long);
+extern int register_blkdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_blkdev(unsigned int major, const char * name);
+extern int blkdev_open(struct inode * inode, struct file * filp);
+extern void blkdev_release (struct inode * inode);
+extern struct file_operations def_blk_fops;
+extern struct inode_operations blkdev_inode_operations;
+
+extern int register_chrdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_chrdev(unsigned int major, const char * name);
+extern int chrdev_open(struct inode * inode, struct file * filp);
+extern struct file_operations def_chr_fops;
+extern struct inode_operations chrdev_inode_operations;
+
+extern void init_fifo(struct inode * inode);
+extern struct inode_operations fifo_inode_operations;
+
+extern struct file_operations connecting_fifo_fops;
+extern struct file_operations read_fifo_fops;
+extern struct file_operations write_fifo_fops;
+extern struct file_operations rdwr_fifo_fops;
+extern struct file_operations read_pipe_fops;
+extern struct file_operations write_pipe_fops;
+extern struct file_operations rdwr_pipe_fops;
+
+extern struct file_system_type *get_fs_type(const char *name);
+
+extern int fs_may_mount(kdev_t dev);
+extern int fs_may_umount(kdev_t dev, struct inode * mount_root);
+extern int fs_may_remount_ro(kdev_t dev);
+
+extern struct file *first_file;
+extern struct super_block super_blocks[NR_SUPER];
+
+extern void refile_buffer(struct buffer_head * buf);
+extern void set_writetime(struct buffer_head * buf, int flag);
+extern int try_to_free_buffer(struct buffer_head*, struct buffer_head**, int);
+
+extern int nr_buffers;
+extern int buffermem;
+extern int nr_buffer_heads;
+
+#define BUF_CLEAN 0
+#define BUF_LOCKED 1 /* Buffers scheduled for write */
+#define BUF_LOCKED1 2 /* Supers, inodes */
+#define BUF_DIRTY 3 /* Dirty buffers, not yet scheduled for write */
+#define NR_LIST 4
+
+void mark_buffer_uptodate(struct buffer_head * bh, int on);
+
+extern inline void mark_buffer_clean(struct buffer_head * bh)
+{
+ if (clear_bit(BH_Dirty, &bh->b_state)) {
+ if (bh->b_list == BUF_DIRTY)
+ refile_buffer(bh);
+ }
+}
+
+extern inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
+{
+ if (!set_bit(BH_Dirty, &bh->b_state)) {
+ set_writetime(bh, flag);
+ if (bh->b_list != BUF_DIRTY)
+ refile_buffer(bh);
+ }
+}
+
+extern int check_disk_change(kdev_t dev);
+extern void invalidate_inodes(kdev_t dev);
+extern void invalidate_inode_pages(struct inode *);
+extern void invalidate_buffers(kdev_t dev);
+extern int floppy_is_wp(int minor);
+extern void sync_inodes(kdev_t dev);
+extern void sync_dev(kdev_t dev);
+extern int fsync_dev(kdev_t dev);
+extern void sync_supers(kdev_t dev);
+extern int bmap(struct inode * inode,int block);
+extern int notify_change(struct inode *, struct iattr *);
+extern int namei(const char * pathname, struct inode ** res_inode);
+extern int lnamei(const char * pathname, struct inode ** res_inode);
+extern int permission(struct inode * inode,int mask);
+extern int get_write_access(struct inode *inode);
+extern void put_write_access(struct inode *inode);
+extern int open_namei(const char * pathname, int flag, int mode,
+ struct inode ** res_inode, struct inode * base);
+extern int do_mknod(const char * filename, int mode, dev_t dev);
+extern int do_pipe(int *);
+extern void iput(struct inode * inode);
+extern struct inode * __iget(struct super_block * sb,int nr,int crsmnt);
+extern struct inode * get_empty_inode(void);
+extern void insert_inode_hash(struct inode *);
+extern void clear_inode(struct inode *);
+extern struct inode * get_pipe_inode(void);
+extern void make_bad_inode(struct inode *);
+extern int get_unused_fd(void);
+extern void put_unused_fd(int);
+extern struct file * get_empty_filp(void);
+extern int close_fp(struct file *filp);
+extern struct buffer_head * get_hash_table(kdev_t dev, int block, int size);
+extern struct buffer_head * getblk(kdev_t dev, int block, int size);
+extern void ll_rw_block(int rw, int nr, struct buffer_head * bh[]);
+extern void ll_rw_page(int rw, kdev_t dev, unsigned long nr, char * buffer);
+extern void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buffer);
+extern int is_read_only(kdev_t dev);
+extern void __brelse(struct buffer_head *buf);
+extern inline void brelse(struct buffer_head *buf)
+{
+ if (buf)
+ __brelse(buf);
+}
+extern void __bforget(struct buffer_head *buf);
+extern inline void bforget(struct buffer_head *buf)
+{
+ if (buf)
+ __bforget(buf);
+}
+extern void set_blocksize(kdev_t dev, int size);
+extern struct buffer_head * bread(kdev_t dev, int block, int size);
+extern struct buffer_head * breada(kdev_t dev,int block, int size,
+ unsigned int pos, unsigned int filesize);
+
+extern int generic_readpage(struct inode *, struct page *);
+extern int generic_file_read(struct inode *, struct file *, char *, int);
+extern int generic_file_mmap(struct inode *, struct file *, struct vm_area_struct *);
+extern int brw_page(int, struct page *, kdev_t, int [], int, int);
+
+extern void put_super(kdev_t dev);
+unsigned long generate_cluster(kdev_t dev, int b[], int size);
+extern kdev_t ROOT_DEV;
+
+extern void show_buffers(void);
+extern void mount_root(void);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern kdev_t real_root_dev;
+extern int change_root(kdev_t new_root_dev,const char *put_old);
+#endif
+
+extern int char_read(struct inode *, struct file *, char *, int);
+extern int block_read(struct inode *, struct file *, char *, int);
+extern int read_ahead[];
+
+extern int char_write(struct inode *, struct file *, const char *, int);
+extern int block_write(struct inode *, struct file *, const char *, int);
+
+extern int block_fsync(struct inode *, struct file *);
+extern int file_fsync(struct inode *, struct file *);
+
+extern void dcache_add(struct inode *, const char *, int, unsigned long);
+extern int dcache_lookup(struct inode *, const char *, int, unsigned long *);
+
+extern int inode_change_ok(struct inode *, struct iattr *);
+extern void inode_setattr(struct inode *, struct iattr *);
+
+extern inline struct inode * iget(struct super_block * sb,int nr)
+{
+ return __iget(sb, nr, 1);
+}
+
+/* kludge to get SCSI modules working */
+#include <linux/minix_fs.h>
+#include <linux/minix_fs_sb.h>
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/genhd.h b/linux/src/include/linux/genhd.h
new file mode 100644
index 0000000..dafeff7
--- /dev/null
+++ b/linux/src/include/linux/genhd.h
@@ -0,0 +1,136 @@
+#ifndef _LINUX_GENHD_H
+#define _LINUX_GENHD_H
+
+/*
+ * genhd.h Copyright (C) 1992 Drew Eckhardt
+ * Generic hard disk header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ */
+
+#include <linux/config.h>
+
+#define CONFIG_MSDOS_PARTITION 1
+
+#ifdef __alpha__
+#define CONFIG_OSF_PARTITION 1
+#endif
+
+#if defined(__sparc__) || defined(CONFIG_SMD_DISKLABEL)
+#define CONFIG_SUN_PARTITION 1
+#endif
+
+/* These three have identical behaviour; use the second one if DOS fdisk gets
+ confused about extended/logical partitions starting past cylinder 1023. */
+#define DOS_EXTENDED_PARTITION 5
+#define LINUX_EXTENDED_PARTITION 0x85
+#define WIN98_EXTENDED_PARTITION 0x0f
+
+#define DM6_PARTITION 0x54 /* has DDO: use xlated geom & offset */
+#define EZD_PARTITION 0x55 /* EZ-DRIVE: same as DM6 (we think) */
+#define DM6_AUX1PARTITION 0x51 /* no DDO: use xlated geom */
+#define DM6_AUX3PARTITION 0x53 /* no DDO: use xlated geom */
+
+struct partition {
+ unsigned char boot_ind; /* 0x80 - active */
+ unsigned char head; /* starting head */
+ unsigned char sector; /* starting sector */
+ unsigned char cyl; /* starting cylinder */
+ unsigned char sys_ind; /* What partition type */
+ unsigned char end_head; /* end head */
+ unsigned char end_sector; /* end sector */
+ unsigned char end_cyl; /* end cylinder */
+ unsigned int start_sect; /* starting sector counting from 0 */
+ unsigned int nr_sects; /* nr of sectors in partition */
+} __attribute((packed)); /* Give a polite hint to egcs/alpha to generate
+ unaligned operations */
+
+struct hd_struct {
+ long start_sect;
+ long nr_sects;
+};
+
+struct gendisk {
+ int major; /* major number of driver */
+ const char *major_name; /* name of major driver */
+ int minor_shift; /* number of times minor is shifted to
+ get real minor */
+ int max_p; /* maximum partitions per device */
+ int max_nr; /* maximum number of real devices */
+
+ void (*init)(struct gendisk *); /* Initialization called before we do our thing */
+ struct hd_struct *part; /* partition table */
+ int *sizes; /* device size in blocks, copied to blk_size[] */
+ int nr_real; /* number of real devices */
+
+ void *real_devices; /* internal use */
+ struct gendisk *next;
+};
+
+#ifdef CONFIG_BSD_DISKLABEL
+/*
+ * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
+ */
+
+#define BSD_PARTITION 0xa5 /* Partition ID */
+
+#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
+#define BSD_MAXPARTITIONS 8
+#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
+struct bsd_disklabel {
+ __u32 d_magic; /* the magic number */
+ __s16 d_type; /* drive type */
+ __s16 d_subtype; /* controller/d_type specific */
+ char d_typename[16]; /* type name, e.g. "eagle" */
+ char d_packname[16]; /* pack identifier */
+ __u32 d_secsize; /* # of bytes per sector */
+ __u32 d_nsectors; /* # of data sectors per track */
+ __u32 d_ntracks; /* # of tracks per cylinder */
+ __u32 d_ncylinders; /* # of data cylinders per unit */
+ __u32 d_secpercyl; /* # of data sectors per cylinder */
+ __u32 d_secperunit; /* # of data sectors per unit */
+ __u16 d_sparespertrack; /* # of spare sectors per track */
+ __u16 d_sparespercyl; /* # of spare sectors per cylinder */
+ __u32 d_acylinders; /* # of alt. cylinders per unit */
+ __u16 d_rpm; /* rotational speed */
+ __u16 d_interleave; /* hardware sector interleave */
+ __u16 d_trackskew; /* sector 0 skew, per track */
+ __u16 d_cylskew; /* sector 0 skew, per cylinder */
+ __u32 d_headswitch; /* head switch time, usec */
+ __u32 d_trkseek; /* track-to-track seek, usec */
+ __u32 d_flags; /* generic flags */
+#define NDDATA 5
+ __u32 d_drivedata[NDDATA]; /* drive-type specific information */
+#define NSPARE 5
+ __u32 d_spare[NSPARE]; /* reserved for future use */
+ __u32 d_magic2; /* the magic number (again) */
+ __u16 d_checksum; /* xor of data incl. partitions */
+
+ /* filesystem and partition information: */
+ __u16 d_npartitions; /* number of partitions in following */
+ __u32 d_bbsize; /* size of boot area at sn0, bytes */
+ __u32 d_sbsize; /* max size of fs superblock, bytes */
+ struct bsd_partition { /* the partition table */
+ __u32 p_size; /* number of sectors in partition */
+ __u32 p_offset; /* starting sector */
+ __u32 p_fsize; /* filesystem basic fragment size */
+ __u8 p_fstype; /* filesystem type, see below */
+ __u8 p_frag; /* filesystem fragments per block */
+ __u16 p_cpg; /* filesystem cylinders per group */
+ } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
+};
+
+#endif /* CONFIG_BSD_DISKLABEL */
+
+extern struct gendisk *gendisk_head; /* linked list of disks */
+
+/*
+ * disk_name() is used by genhd.c and md.c.
+ * It formats the devicename of the indicated disk
+ * into the supplied buffer, and returns a pointer
+ * to that same buffer (for convenience).
+ */
+char *disk_name (struct gendisk *hd, int minor, char *buf);
+
+#endif
diff --git a/linux/src/include/linux/hdreg.h b/linux/src/include/linux/hdreg.h
new file mode 100644
index 0000000..4a388c5
--- /dev/null
+++ b/linux/src/include/linux/hdreg.h
@@ -0,0 +1,240 @@
+#ifndef _LINUX_HDREG_H
+#define _LINUX_HDREG_H
+
+/*
+ * This file contains some defines for the AT-hd-controller.
+ * Various sources.
+ */
+
+#define HD_IRQ 14 /* the standard disk interrupt */
+
+/* ide.c has its own port definitions in "ide.h" */
+
+/* Hd controller regs. Ref: IBM AT Bios-listing */
+#define HD_DATA 0x1f0 /* _CTL when writing */
+#define HD_ERROR 0x1f1 /* see err-bits */
+#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */
+#define HD_SECTOR 0x1f3 /* starting sector */
+#define HD_LCYL 0x1f4 /* starting cylinder */
+#define HD_HCYL 0x1f5 /* high byte of starting cyl */
+#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */
+#define HD_STATUS 0x1f7 /* see status-bits */
+#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */
+#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */
+#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */
+
+#define HD_CMD 0x3f6 /* used for resets */
+#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */
+
+/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */
+
+/* Bits of HD_STATUS */
+#define ERR_STAT 0x01
+#define INDEX_STAT 0x02
+#define ECC_STAT 0x04 /* Corrected error */
+#define DRQ_STAT 0x08
+#define SEEK_STAT 0x10
+#define WRERR_STAT 0x20
+#define READY_STAT 0x40
+#define BUSY_STAT 0x80
+
+/* Values for HD_COMMAND */
+#define WIN_RESTORE 0x10
+#define WIN_READ 0x20
+#define WIN_WRITE 0x30
+#define WIN_VERIFY 0x40
+#define WIN_FORMAT 0x50
+#define WIN_INIT 0x60
+#define WIN_SEEK 0x70
+#define WIN_DIAGNOSE 0x90
+#define WIN_SPECIFY 0x91 /* set drive geometry translation */
+#define WIN_SETIDLE1 0xE3
+#define WIN_SETIDLE2 0x97
+
+#define WIN_DOORLOCK 0xde /* lock door on removable drives */
+#define WIN_DOORUNLOCK 0xdf /* unlock door on removable drives */
+#define WIN_ACKMC 0xdb /* acknowledge media change */
+
+#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode */
+#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */
+#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */
+#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */
+#define WIN_SETFEATURES 0xEF /* set special drive features */
+#define WIN_READDMA 0xc8 /* read sectors using DMA transfers */
+#define WIN_WRITEDMA 0xca /* write sectors using DMA transfers */
+#define WIN_READDMA_EXT 0x25 /* read sectors using LBA48 DMA transfers */
+#define WIN_WRITEDMA_EXT 0x35 /* write sectors using LBA48 DMA transfers */
+
+/* Additional drive command codes used by ATAPI devices. */
+#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */
+#define WIN_SRST 0x08 /* ATAPI soft reset command */
+#define WIN_PACKETCMD 0xa0 /* Send a packet command. */
+
+/* Non-standard commands */
+#define EXABYTE_ENABLE_NEST 0xf0
+
+/* Bits for HD_ERROR */
+#define MARK_ERR 0x01 /* Bad address mark */
+#define TRK0_ERR 0x02 /* couldn't find track 0 */
+#define ABRT_ERR 0x04 /* Command aborted */
+#define ID_ERR 0x10 /* ID field not found */
+#define MC_ERR 0x20 /* media changed */
+#define ECC_ERR 0x40 /* Uncorrectable ECC error */
+#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
+#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
+
+struct hd_geometry {
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ unsigned long start;
+};
+
+/* hd/ide ctl's that pass (arg) ptrs to user space are numbered 0x030n/0x031n */
+#define HDIO_GETGEO 0x0301 /* get device geometry */
+#define HDIO_GET_UNMASKINTR 0x0302 /* get current unmask setting */
+#define HDIO_GET_MULTCOUNT 0x0304 /* get current IDE blockmode setting */
+#define HDIO_OBSOLETE_IDENTITY 0x0307 /* OBSOLETE, DO NOT USE: returns 142 bytes */
+#define HDIO_GET_KEEPSETTINGS 0x0308 /* get keep-settings-on-reset flag */
+#define HDIO_GET_32BIT 0x0309 /* get current io_32bit setting */
+#define HDIO_GET_NOWERR 0x030a /* get ignore-write-error flag */
+#define HDIO_GET_DMA 0x030b /* get use-dma flag */
+#define HDIO_GET_IDENTITY 0x030d /* get IDE identification info */
+#define HDIO_DRIVE_CMD 0x031f /* execute a special drive command */
+
+/* hd/ide ctl's that pass (arg) non-ptr values are numbered 0x032n/0x033n */
+#define HDIO_SET_MULTCOUNT 0x0321 /* change IDE blockmode */
+#define HDIO_SET_UNMASKINTR 0x0322 /* permit other irqs during I/O */
+#define HDIO_SET_KEEPSETTINGS 0x0323 /* keep ioctl settings on reset */
+#define HDIO_SET_32BIT 0x0324 /* change io_32bit flags */
+#define HDIO_SET_NOWERR 0x0325 /* change ignore-write-error flag */
+#define HDIO_SET_DMA 0x0326 /* change use-dma flag */
+#define HDIO_SET_PIO_MODE 0x0327 /* reconfig interface to new speed */
+
+/* structure returned by HDIO_GET_IDENTITY, as per ANSI ATA2 rev.2f spec */
+struct hd_driveid {
+ unsigned short config; /* lots of obsolete bit flags */
+ unsigned short cyls; /* "physical" cyls */
+ unsigned short reserved2; /* reserved (word 2) */
+ unsigned short heads; /* "physical" heads */
+ unsigned short track_bytes; /* unformatted bytes per track */
+ unsigned short sector_bytes; /* unformatted bytes per sector */
+ unsigned short sectors; /* "physical" sectors per track */
+ unsigned short vendor0; /* vendor unique */
+ unsigned short vendor1; /* vendor unique */
+ unsigned short vendor2; /* vendor unique */
+ unsigned char serial_no[20]; /* 0 = not_specified */
+ unsigned short buf_type;
+ unsigned short buf_size; /* 512 byte increments; 0 = not_specified */
+ unsigned short ecc_bytes; /* for r/w long cmds; 0 = not_specified */
+ unsigned char fw_rev[8]; /* 0 = not_specified */
+ unsigned char model[40]; /* 0 = not_specified */
+ unsigned char max_multsect; /* 0=not_implemented */
+ unsigned char vendor3; /* vendor unique */
+ unsigned short dword_io; /* 0=not_implemented; 1=implemented */
+ unsigned char vendor4; /* vendor unique */
+ unsigned char capability; /* bits 0:DMA 1:LBA 2:IORDYsw 3:IORDYsup*/
+ unsigned short reserved50; /* reserved (word 50) */
+ unsigned char vendor5; /* vendor unique */
+ unsigned char tPIO; /* 0=slow, 1=medium, 2=fast */
+ unsigned char vendor6; /* vendor unique */
+ unsigned char tDMA; /* 0=slow, 1=medium, 2=fast */
+ unsigned short field_valid; /* bits 0:cur_ok 1:eide_ok */
+ unsigned short cur_cyls; /* logical cylinders */
+ unsigned short cur_heads; /* logical heads */
+ unsigned short cur_sectors; /* logical sectors per track */
+ unsigned short cur_capacity0; /* logical total sectors on drive */
+ unsigned short cur_capacity1; /* (2 words, misaligned int) */
+ unsigned char multsect; /* current multiple sector count */
+ unsigned char multsect_valid; /* when (bit0==1) multsect is ok */
+ unsigned int lba_capacity; /* total number of sectors */
+ unsigned short dma_1word; /* single-word dma info */
+ unsigned short dma_mword; /* multiple-word dma info */
+ unsigned short eide_pio_modes; /* bits 0:mode3 1:mode4 */
+ unsigned short eide_dma_min; /* min mword dma cycle time (ns) */
+ unsigned short eide_dma_time; /* recommended mword dma cycle time (ns) */
+ unsigned short eide_pio; /* min cycle time (ns), no IORDY */
+ unsigned short eide_pio_iordy; /* min cycle time (ns), with IORDY */
+ unsigned short word69;
+ unsigned short word70;
+ /* HDIO_GET_IDENTITY currently returns only words 0 through 70 */
+ unsigned short word71;
+ unsigned short word72;
+ unsigned short word73;
+ unsigned short word74;
+ unsigned short word75;
+ unsigned short word76;
+ unsigned short word77;
+ unsigned short word78;
+ unsigned short word79;
+ unsigned short word80;
+ unsigned short word81;
+ unsigned short command_sets; /* bits 0:Smart 1:Security 2:Removable 3:PM */
+ unsigned short command_set_2; /* bits 14:Smart Enabled 13:0 zero */
+ unsigned short word84;
+ unsigned short word85;
+ unsigned short word86;
+ unsigned short word87;
+ unsigned short dma_ultra;
+ unsigned short word89; /* reserved (word 89) */
+ unsigned short word90; /* reserved (word 90) */
+ unsigned short word91; /* reserved (word 91) */
+ unsigned short word92; /* reserved (word 92) */
+ unsigned short word93; /* reserved (word 93) */
+ unsigned short word94; /* reserved (word 94) */
+ unsigned short word95; /* reserved (word 95) */
+ unsigned short word96; /* reserved (word 96) */
+ unsigned short word97; /* reserved (word 97) */
+ unsigned short word98; /* reserved (word 98) */
+ unsigned short word99; /* reserved (word 99) */
+ unsigned long long lba_capacity_2; /* 48-bit total number of sectors */
+ unsigned short word104; /* reserved (word 104) */
+ unsigned short word105; /* reserved (word 105) */
+ unsigned short word106; /* reserved (word 106) */
+ unsigned short word107; /* reserved (word 107) */
+ unsigned short word108; /* reserved (word 108) */
+ unsigned short word109; /* reserved (word 109) */
+ unsigned short word110; /* reserved (word 110) */
+ unsigned short word111; /* reserved (word 111) */
+ unsigned short word112; /* reserved (word 112) */
+ unsigned short word113; /* reserved (word 113) */
+ unsigned short word114; /* reserved (word 114) */
+ unsigned short word115; /* reserved (word 115) */
+ unsigned short word116; /* reserved (word 116) */
+ unsigned short word117; /* reserved (word 117) */
+ unsigned short word118; /* reserved (word 118) */
+ unsigned short word119; /* reserved (word 119) */
+ unsigned short word120; /* reserved (word 120) */
+ unsigned short word121; /* reserved (word 121) */
+ unsigned short word122; /* reserved (word 122) */
+ unsigned short word123; /* reserved (word 123) */
+ unsigned short word124; /* reserved (word 124) */
+ unsigned short word125; /* reserved (word 125) */
+ unsigned short word126; /* reserved (word 126) */
+ unsigned short word127; /* reserved (word 127) */
+ unsigned short security; /* bits 0:suuport 1:enabled 2:locked 3:frozen */
+ unsigned short reserved[127];
+};
+
+#ifdef __KERNEL__
+/*
+ * These routines are used for kernel command line parameters from main.c:
+ */
+#include <linux/config.h>
+
+#ifdef CONFIG_BLK_DEV_HD
+void hd_setup(char *, int *);
+#endif /* CONFIG_BLK_DEV_HD */
+#ifdef CONFIG_BLK_DEV_IDE
+void ide_setup(char *);
+
+#ifdef CONFIG_BLK_DEV_IDE_PCMCIA
+int ide_register(int io_port, int ctl_port, int irq);
+void ide_unregister(unsigned int);
+#endif /* CONFIG_BLK_DEV_IDE_PCMCIA */
+
+#endif /* CONFIG_BLK_DEV_IDE */
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_HDREG_H */
diff --git a/linux/src/include/linux/head.h b/linux/src/include/linux/head.h
new file mode 100644
index 0000000..c501f21
--- /dev/null
+++ b/linux/src/include/linux/head.h
@@ -0,0 +1,20 @@
+#ifndef _LINUX_HEAD_H
+#define _LINUX_HEAD_H
+
+typedef struct desc_struct {
+ unsigned long a,b;
+} desc_table[256];
+
+/* XXX Linux code shouldn't use idt/gdt directly */
+/* extern desc_table idt,gdt; */
+
+#define GDT_NUL 0
+#define GDT_CODE 1
+#define GDT_DATA 2
+#define GDT_TMP 3
+
+#define LDT_NUL 0
+#define LDT_CODE 1
+#define LDT_DATA 2
+
+#endif
diff --git a/linux/src/include/linux/icmp.h b/linux/src/include/linux/icmp.h
new file mode 100644
index 0000000..611c41d
--- /dev/null
+++ b/linux/src/include/linux/icmp.h
@@ -0,0 +1,85 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the ICMP protocol.
+ *
+ * Version: @(#)icmp.h 1.0.3 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_ICMP_H
+#define _LINUX_ICMP_H
+
+#define ICMP_ECHOREPLY 0 /* Echo Reply */
+#define ICMP_DEST_UNREACH 3 /* Destination Unreachable */
+#define ICMP_SOURCE_QUENCH 4 /* Source Quench */
+#define ICMP_REDIRECT 5 /* Redirect (change route) */
+#define ICMP_ECHO 8 /* Echo Request */
+#define ICMP_TIME_EXCEEDED 11 /* Time Exceeded */
+#define ICMP_PARAMETERPROB 12 /* Parameter Problem */
+#define ICMP_TIMESTAMP 13 /* Timestamp Request */
+#define ICMP_TIMESTAMPREPLY 14 /* Timestamp Reply */
+#define ICMP_INFO_REQUEST 15 /* Information Request */
+#define ICMP_INFO_REPLY 16 /* Information Reply */
+#define ICMP_ADDRESS 17 /* Address Mask Request */
+#define ICMP_ADDRESSREPLY 18 /* Address Mask Reply */
+
+
+/* Codes for UNREACH. */
+#define ICMP_NET_UNREACH 0 /* Network Unreachable */
+#define ICMP_HOST_UNREACH 1 /* Host Unreachable */
+#define ICMP_PROT_UNREACH 2 /* Protocol Unreachable */
+#define ICMP_PORT_UNREACH 3 /* Port Unreachable */
+#define ICMP_FRAG_NEEDED 4 /* Fragmentation Needed/DF set */
+#define ICMP_SR_FAILED 5 /* Source Route failed */
+#define ICMP_NET_UNKNOWN 6
+#define ICMP_HOST_UNKNOWN 7
+#define ICMP_HOST_ISOLATED 8
+#define ICMP_NET_ANO 9
+#define ICMP_HOST_ANO 10
+#define ICMP_NET_UNR_TOS 11
+#define ICMP_HOST_UNR_TOS 12
+#define ICMP_PKT_FILTERED 13 /* Packet filtered */
+#define ICMP_PREC_VIOLATION 14 /* Precedence violation */
+#define ICMP_PREC_CUTOFF 15 /* Precedence cut off */
+#define NR_ICMP_UNREACH 15 /* instead of hardcoding immediate value */
+
+/* Codes for REDIRECT. */
+#define ICMP_REDIR_NET 0 /* Redirect Net */
+#define ICMP_REDIR_HOST 1 /* Redirect Host */
+#define ICMP_REDIR_NETTOS 2 /* Redirect Net for TOS */
+#define ICMP_REDIR_HOSTTOS 3 /* Redirect Host for TOS */
+
+/* Codes for TIME_EXCEEDED. */
+#define ICMP_EXC_TTL 0 /* TTL count exceeded */
+#define ICMP_EXC_FRAGTIME 1 /* Fragment Reass time exceeded */
+
+
+struct icmphdr {
+ __u8 type;
+ __u8 code;
+ __u16 checksum;
+ union {
+ struct {
+ __u16 id;
+ __u16 sequence;
+ } echo;
+ __u32 gateway;
+ } un;
+};
+
+
+struct icmp_err {
+ int errno;
+ unsigned fatal:1;
+};
+
+
+#endif /* _LINUX_ICMP_H */
diff --git a/linux/src/include/linux/if.h b/linux/src/include/linux/if.h
new file mode 100644
index 0000000..7dee13a
--- /dev/null
+++ b/linux/src/include/linux/if.h
@@ -0,0 +1,155 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the INET interface module.
+ *
+ * Version: @(#)if.h 1.0.2 04/18/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1982-1988
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_H
+#define _LINUX_IF_H
+
+#include <linux/types.h> /* for "caddr_t" et al */
+#include <linux/socket.h> /* for "struct sockaddr" et al */
+
+/* Standard interface flags. */
+#define IFF_UP 0x1 /* interface is up */
+#define IFF_BROADCAST 0x2 /* broadcast address valid */
+#define IFF_DEBUG 0x4 /* turn on debugging */
+#define IFF_LOOPBACK 0x8 /* is a loopback net */
+#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define IFF_RUNNING 0x40 /* resources allocated */
+#define IFF_NOARP 0x80 /* no ARP protocol */
+#define IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define IFF_MASTER 0x400 /* master of a load balancer */
+#define IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define IFF_MULTICAST 0x1000 /* Supports multicast */
+#define IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers
+ * until broadcast time. Therefore
+ * SOCK_PACKET must call header
+ * construction. Private flag.
+ * Never visible outside of kernel.
+ */
+
+/*
+ * The ifaddr structure contains information about one address
+ * of an interface. They are maintained by the different address
+ * families, are allocated and attached when an address is set,
+ * and are linked together so all addresses for an interface can
+ * be located.
+ */
+
+struct ifaddr
+{
+ struct sockaddr ifa_addr; /* address of interface */
+ union {
+ struct sockaddr ifu_broadaddr;
+ struct sockaddr ifu_dstaddr;
+ } ifa_ifu;
+ struct iface *ifa_ifp; /* back-pointer to interface */
+ struct ifaddr *ifa_next; /* next address for interface */
+};
+
+#define ifa_broadaddr ifa_ifu.ifu_broadaddr /* broadcast address */
+#define ifa_dstaddr ifa_ifu.ifu_dstaddr /* other end of link */
+
+/*
+ * Device mapping structure. I'd just gone off and designed a
+ * beautiful scheme using only loadable modules with arguments
+ * for driver options and along come the PCMCIA people 8)
+ *
+ * Ah well. The get() side of this is good for WDSETUP, and it'll
+ * be handy for debugging things. The set side is fine for now and
+ * being very small might be worth keeping for clean configuration.
+ */
+
+struct ifmap
+{
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+ /* 3 bytes spare */
+};
+
+/*
+ * Interface request structure used for socket
+ * ioctl's. All interface ioctl's must have parameter
+ * definitions which begin with ifr_name. The
+ * remainder may be interface specific.
+ */
+
+struct ifreq
+{
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union
+ {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_metric;
+ int ifru_mtu;
+ struct ifmap ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ caddr_t ifru_data;
+ } ifr_ifru;
+};
+
+#define ifr_name ifr_ifrn.ifrn_name /* interface name */
+#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
+#define ifr_addr ifr_ifru.ifru_addr /* address */
+#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-p lnk */
+#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */
+#define ifr_netmask ifr_ifru.ifru_netmask /* interface net mask */
+#define ifr_flags ifr_ifru.ifru_flags /* flags */
+#define ifr_metric ifr_ifru.ifru_metric /* metric */
+#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */
+#define ifr_map ifr_ifru.ifru_map /* device map */
+#define ifr_slave ifr_ifru.ifru_slave /* slave device */
+#define ifr_data ifr_ifru.ifru_data /* for use by interface */
+
+/*
+ * Structure used in SIOCGIFCONF request.
+ * Used to retrieve interface configuration
+ * for machine (useful for programs which
+ * must know all networks accessible).
+ */
+
+struct ifconf
+{
+ int ifc_len; /* size of buffer */
+ union
+ {
+ caddr_t ifcu_buf;
+ struct ifreq *ifcu_req;
+ } ifc_ifcu;
+};
+#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
+#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
+
+#endif /* _LINUX_IF_H */
diff --git a/linux/src/include/linux/if_arp.h b/linux/src/include/linux/if_arp.h
new file mode 100644
index 0000000..6104ee2
--- /dev/null
+++ b/linux/src/include/linux/if_arp.h
@@ -0,0 +1,130 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the ARP (RFC 826) protocol.
+ *
+ * Version: @(#)if_arp.h 1.0.2 08/12/96
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988
+ * Portions taken from the KA9Q/NOS (v2.00m PA0GRI) source.
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Florian La Roche,
+ * Jonathan Layes, <layes@loran.com>
+ * Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_ARP_H
+#define _LINUX_IF_ARP_H
+
+#include <linux/netdevice.h>
+
+/* ARP protocol HARDWARE identifiers. */
+#define ARPHRD_NETROM 0 /* from KA9Q: NET/ROM pseudo */
+#define ARPHRD_ETHER 1 /* Ethernet 10Mbps */
+#define ARPHRD_EETHER 2 /* Experimental Ethernet */
+#define ARPHRD_AX25 3 /* AX.25 Level 2 */
+#define ARPHRD_PRONET 4 /* PROnet token ring */
+#define ARPHRD_CHAOS 5 /* Chaosnet */
+#define ARPHRD_IEEE802 6 /* IEEE 802.2 Ethernet/TR/TB */
+#define ARPHRD_ARCNET 7 /* ARCnet */
+#define ARPHRD_APPLETLK 8 /* APPLEtalk */
+#define ARPHRD_DLCI 15 /* Frame Relay DLCI */
+#define ARPHRD_METRICOM 23 /* Metricom STRIP (new IANA id) */
+
+/* Dummy types for non ARP hardware */
+#define ARPHRD_SLIP 256
+#define ARPHRD_CSLIP 257
+#define ARPHRD_SLIP6 258
+#define ARPHRD_CSLIP6 259
+#define ARPHRD_RSRVD 260 /* Notional KISS type */
+#define ARPHRD_ADAPT 264
+#define ARPHRD_ROSE 270
+#define ARPHRD_PPP 512
+
+#define ARPHRD_TUNNEL 768 /* IPIP tunnel */
+#define ARPHRD_TUNNEL6 769 /* IPIP6 tunnel */
+#define ARPHRD_FRAD 770 /* Frame Relay Access Device */
+#define ARPHRD_SKIP 771 /* SKIP vif */
+#define ARPHRD_LOOPBACK 772 /* Loopback device */
+#define ARPHRD_LOCALTLK 773 /* Localtalk device */
+#define ARPHRD_FDDI 774 /* Fiber Distributed Data Interface */
+
+/* ARP protocol opcodes. */
+#define ARPOP_REQUEST 1 /* ARP request */
+#define ARPOP_REPLY 2 /* ARP reply */
+#define ARPOP_RREQUEST 3 /* RARP request */
+#define ARPOP_RREPLY 4 /* RARP reply */
+
+
+/* ARP ioctl request. */
+struct arpreq {
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+ char arp_dev[16];
+};
+
+struct arpreq_old {
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+};
+
+/* ARP Flag values. */
+#define ATF_COM 0x02 /* completed entry (ha valid) */
+#define ATF_PERM 0x04 /* permanent entry */
+#define ATF_PUBL 0x08 /* publish entry */
+#define ATF_USETRAILERS 0x10 /* has requested trailers */
+#define ATF_NETMASK 0x20 /* want to use a netmask (only
+ for proxy entries) */
+
+/*
+ * This structure defines an ethernet arp header.
+ */
+
+struct arphdr
+{
+ unsigned short ar_hrd; /* format of hardware address */
+ unsigned short ar_pro; /* format of protocol address */
+ unsigned char ar_hln; /* length of hardware address */
+ unsigned char ar_pln; /* length of protocol address */
+ unsigned short ar_op; /* ARP opcode (command) */
+
+#if 0
+ /*
+ * Ethernet looks like this : This bit is variable sized however...
+ */
+ unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
+ unsigned char ar_sip[4]; /* sender IP address */
+ unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
+ unsigned char ar_tip[4]; /* target IP address */
+#endif
+
+};
+
+/* Support for the user space arp daemon, arpd */
+
+#define ARPD_UPDATE 0x01
+#define ARPD_LOOKUP 0x02
+#define ARPD_FLUSH 0x03
+
+struct arpd_request
+{
+ unsigned short req; /* request type */
+ __u32 ip; /* ip address of entry */
+ unsigned long dev; /* Device entry is tied to */
+ unsigned long stamp;
+ unsigned long updated;
+ unsigned char ha[MAX_ADDR_LEN]; /* Hardware address */
+};
+
+#endif /* _LINUX_IF_ARP_H */
diff --git a/linux/src/include/linux/if_ether.h b/linux/src/include/linux/if_ether.h
new file mode 100644
index 0000000..dd09d83
--- /dev/null
+++ b/linux/src/include/linux/if_ether.h
@@ -0,0 +1,119 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the Ethernet IEEE 802.3 interface.
+ *
+ * Version: @(#)if_ether.h 1.0.1a 02/08/94
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Alan Cox, <alan@cymru.net>
+ * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_IF_ETHER_H
+#define _LINUX_IF_ETHER_H
+
+/*
+ * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
+ * and FCS/CRC (frame check sequence).
+ */
+
+#define ETH_ALEN 6 /* Octets in one ethernet addr */
+#define ETH_HLEN 14 /* Total octets in header. */
+#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
+#define ETH_DATA_LEN 1500 /* Max. octets in payload */
+#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
+
+/*
+ * These are the defined Ethernet Protocol ID's.
+ */
+
+#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
+#define ETH_P_ECHO 0x0200 /* Ethernet Echo packet */
+#define ETH_P_PUP 0x0400 /* Xerox PUP packet */
+#define ETH_P_IP 0x0800 /* Internet Protocol packet */
+#define ETH_P_X25 0x0805 /* CCITT X.25 */
+#define ETH_P_ARP 0x0806 /* Address Resolution packet */
+#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_DEC 0x6000 /* DEC Assigned proto */
+#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */
+#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */
+#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */
+#define ETH_P_LAT 0x6004 /* DEC LAT */
+#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */
+#define ETH_P_CUST 0x6006 /* DEC Customer use */
+#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */
+#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
+#define ETH_P_ATALK 0x809B /* Appletalk DDP */
+#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
+#define ETH_P_IPX 0x8137 /* IPX over DIX */
+#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
+
+/*
+ * Non DIX types. Won't clash for 1500 types.
+ */
+
+#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
+#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
+#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
+#define ETH_P_802_2 0x0004 /* 802.2 frames */
+#define ETH_P_SNAP 0x0005 /* Internal only */
+#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */
+#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/
+#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */
+#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
+#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
+#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
+
+/*
+ * This is an Ethernet frame header.
+ */
+
+struct ethhdr
+{
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_source[ETH_ALEN]; /* source ether addr */
+ unsigned short h_proto; /* packet type ID field */
+};
+
+/*
+ * Ethernet statistics collection data.
+ */
+
+struct enet_statistics
+{
+ int rx_packets; /* total packets received */
+ int tx_packets; /* total packets transmitted */
+ int rx_errors; /* bad packets received */
+ int tx_errors; /* packet transmit problems */
+ int rx_dropped; /* no space in linux buffers */
+ int tx_dropped; /* no space available in linux */
+ int multicast; /* multicast packets received */
+ int collisions;
+
+ /* detailed rx_errors: */
+ int rx_length_errors;
+ int rx_over_errors; /* receiver ring buff overflow */
+ int rx_crc_errors; /* recved pkt with crc error */
+ int rx_frame_errors; /* recv'd frame alignment error */
+ int rx_fifo_errors; /* recv'r fifo overrun */
+ int rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+ int tx_aborted_errors;
+ int tx_carrier_errors;
+ int tx_fifo_errors;
+ int tx_heartbeat_errors;
+ int tx_window_errors;
+};
+
+
+#endif /* _LINUX_IF_ETHER_H */
diff --git a/linux/src/include/linux/if_fddi.h b/linux/src/include/linux/if_fddi.h
new file mode 100644
index 0000000..6db6745
--- /dev/null
+++ b/linux/src/include/linux/if_fddi.h
@@ -0,0 +1,202 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the ANSI FDDI interface.
+ *
+ * Version: @(#)if_fddi.h 1.0.1 09/16/96
+ *
+ * Author: Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * if_fddi.h is based on previous if_ether.h and if_tr.h work by
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Alan Cox, <alan@cymru.net>
+ * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
+ * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_FDDI_H
+#define _LINUX_IF_FDDI_H
+
+/*
+ * Define max and min legal sizes. The frame sizes do not include
+ * 4 byte FCS/CRC (frame check sequence).
+ */
+#define FDDI_K_ALEN 6 /* Octets in one FDDI address */
+#define FDDI_K_8022_HLEN 16 /* Total octets in 802.2 header */
+#define FDDI_K_SNAP_HLEN 21 /* Total octets in 802.2 SNAP header */
+#define FDDI_K_8022_ZLEN 16 /* Min octets in 802.2 frame sans FCS */
+#define FDDI_K_SNAP_ZLEN 21 /* Min octets in 802.2 SNAP frame sans FCS */
+#define FDDI_K_8022_DLEN 4475 /* Max octets in 802.2 payload */
+#define FDDI_K_SNAP_DLEN 4470 /* Max octets in 802.2 SNAP payload */
+#define FDDI_K_LLC_ZLEN 13 /* Min octets in LLC frame sans FCS */
+#define FDDI_K_LLC_LEN 4491 /* Max octets in LLC frame sans FCS */
+
+/* Define FDDI Frame Control (FC) Byte values */
+#define FDDI_FC_K_VOID 0x00
+#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80
+#define FDDI_FC_K_RESTRICTED_TOKEN 0xC0
+#define FDDI_FC_K_SMT_MIN 0x41
+#define FDDI_FC_K_SMT_MAX 0x4F
+#define FDDI_FC_K_MAC_MIN 0xC1
+#define FDDI_FC_K_MAC_MAX 0xCF
+#define FDDI_FC_K_ASYNC_LLC_MIN 0x50
+#define FDDI_FC_K_ASYNC_LLC_DEF 0x54
+#define FDDI_FC_K_ASYNC_LLC_MAX 0x5F
+#define FDDI_FC_K_SYNC_LLC_MIN 0xD0
+#define FDDI_FC_K_SYNC_LLC_MAX 0xD7
+#define FDDI_FC_K_IMPLEMENTOR_MIN 0x60
+#define FDDI_FC_K_IMPLEMENTOR_MAX 0x6F
+#define FDDI_FC_K_RESERVED_MIN 0x70
+#define FDDI_FC_K_RESERVED_MAX 0x7F
+
+/* Define LLC and SNAP constants */
+#define FDDI_EXTENDED_SAP 0xAA
+#define FDDI_UI_CMD 0x03
+
+/* Define 802.2 Type 1 header */
+struct fddi_8022_1_hdr
+ {
+ __u8 dsap; /* destination service access point */
+ __u8 ssap; /* source service access point */
+ __u8 ctrl; /* control byte #1 */
+ } __attribute__ ((packed));
+
+/* Define 802.2 Type 2 header */
+struct fddi_8022_2_hdr
+ {
+ __u8 dsap; /* destination service access point */
+ __u8 ssap; /* source service access point */
+ __u8 ctrl_1; /* control byte #1 */
+ __u8 ctrl_2; /* control byte #2 */
+ } __attribute__ ((packed));
+
+/* Define 802.2 SNAP header */
+#define FDDI_K_OUI_LEN 3
+struct fddi_snap_hdr
+ {
+ __u8 dsap; /* always 0xAA */
+ __u8 ssap; /* always 0xAA */
+ __u8 ctrl; /* always 0x03 */
+ __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */
+ __u16 ethertype; /* packet type ID field */
+ } __attribute__ ((packed));
+
+/* Define FDDI LLC frame header */
+struct fddihdr
+ {
+ __u8 fc; /* frame control */
+ __u8 daddr[FDDI_K_ALEN]; /* destination address */
+ __u8 saddr[FDDI_K_ALEN]; /* source address */
+ union
+ {
+ struct fddi_8022_1_hdr llc_8022_1;
+ struct fddi_8022_2_hdr llc_8022_2;
+ struct fddi_snap_hdr llc_snap;
+ } hdr;
+ } __attribute__ ((packed));
+
+/* Define FDDI statistics structure */
+struct fddi_statistics
+ {
+ __u32 rx_packets; /* total packets received */
+ __u32 tx_packets; /* total packets transmitted */
+ __u32 rx_errors; /* bad packets received */
+ __u32 tx_errors; /* packet transmit problems */
+ __u32 rx_dropped; /* no space in linux buffers */
+ __u32 tx_dropped; /* no space available in linux */
+ __u32 multicast; /* multicast packets received */
+ __u32 transmit_collision; /* always 0 for FDDI */
+
+ /* Detailed FDDI statistics. Adopted from RFC 1512 */
+
+ __u8 smt_station_id[8];
+ __u32 smt_op_version_id;
+ __u32 smt_hi_version_id;
+ __u32 smt_lo_version_id;
+ __u8 smt_user_data[32];
+ __u32 smt_mib_version_id;
+ __u32 smt_mac_cts;
+ __u32 smt_non_master_cts;
+ __u32 smt_master_cts;
+ __u32 smt_available_paths;
+ __u32 smt_config_capabilities;
+ __u32 smt_config_policy;
+ __u32 smt_connection_policy;
+ __u32 smt_t_notify;
+ __u32 smt_stat_rpt_policy;
+ __u32 smt_trace_max_expiration;
+ __u32 smt_bypass_present;
+ __u32 smt_ecm_state;
+ __u32 smt_cf_state;
+ __u32 smt_remote_disconnect_flag;
+ __u32 smt_station_status;
+ __u32 smt_peer_wrap_flag;
+ __u32 smt_time_stamp;
+ __u32 smt_transition_time_stamp;
+ __u32 mac_frame_status_functions;
+ __u32 mac_t_max_capability;
+ __u32 mac_tvx_capability;
+ __u32 mac_available_paths;
+ __u32 mac_current_path;
+ __u8 mac_upstream_nbr[FDDI_K_ALEN];
+ __u8 mac_downstream_nbr[FDDI_K_ALEN];
+ __u8 mac_old_upstream_nbr[FDDI_K_ALEN];
+ __u8 mac_old_downstream_nbr[FDDI_K_ALEN];
+ __u32 mac_dup_address_test;
+ __u32 mac_requested_paths;
+ __u32 mac_downstream_port_type;
+ __u8 mac_smt_address[FDDI_K_ALEN];
+ __u32 mac_t_req;
+ __u32 mac_t_neg;
+ __u32 mac_t_max;
+ __u32 mac_tvx_value;
+ __u32 mac_frame_cts;
+ __u32 mac_copied_cts;
+ __u32 mac_transmit_cts;
+ __u32 mac_error_cts;
+ __u32 mac_lost_cts;
+ __u32 mac_frame_error_threshold;
+ __u32 mac_frame_error_ratio;
+ __u32 mac_rmt_state;
+ __u32 mac_da_flag;
+ __u32 mac_una_da_flag;
+ __u32 mac_frame_error_flag;
+ __u32 mac_ma_unitdata_available;
+ __u32 mac_hardware_present;
+ __u32 mac_ma_unitdata_enable;
+ __u32 path_tvx_lower_bound;
+ __u32 path_t_max_lower_bound;
+ __u32 path_max_t_req;
+ __u32 path_configuration[8];
+ __u32 port_my_type[2];
+ __u32 port_neighbor_type[2];
+ __u32 port_connection_policies[2];
+ __u32 port_mac_indicated[2];
+ __u32 port_current_path[2];
+ __u8 port_requested_paths[3*2];
+ __u32 port_mac_placement[2];
+ __u32 port_available_paths[2];
+ __u32 port_pmd_class[2];
+ __u32 port_connection_capabilities[2];
+ __u32 port_bs_flag[2];
+ __u32 port_lct_fail_cts[2];
+ __u32 port_ler_estimate[2];
+ __u32 port_lem_reject_cts[2];
+ __u32 port_lem_cts[2];
+ __u32 port_ler_cutoff[2];
+ __u32 port_ler_alarm[2];
+ __u32 port_connect_state[2];
+ __u32 port_pcm_state[2];
+ __u32 port_pc_withhold[2];
+ __u32 port_ler_flag[2];
+ __u32 port_hardware_present[2];
+ };
+
+#endif /* _LINUX_IF_FDDI_H */
diff --git a/linux/src/include/linux/if_tr.h b/linux/src/include/linux/if_tr.h
new file mode 100644
index 0000000..545f1b7
--- /dev/null
+++ b/linux/src/include/linux/if_tr.h
@@ -0,0 +1,102 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the Token-Ring IEEE 802.5 interface.
+ *
+ * Version: @(#)if_tr.h 0.0 07/11/94
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_TR_H
+#define _LINUX_IF_TR_H
+
+
+/* IEEE 802.5 Token-Ring magic constants. The frame sizes omit the preamble
+ and FCS/CRC (frame check sequence). */
+#define TR_ALEN 6 /* Octets in one ethernet addr */
+#define TR_HLEN (sizeof(struct trh_hdr)+sizeof(struct trllc))
+#define AC 0x10
+#define LLC_FRAME 0x40
+#if 0
+#define ETH_HLEN 14 /* Total octets in header. */
+#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
+#define ETH_DATA_LEN 1500 /* Max. octets in payload */
+#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
+#endif
+
+
+/* These are some defined Ethernet Protocol ID's. */
+#define ETH_P_IP 0x0800 /* Internet Protocol packet */
+#define ETH_P_ARP 0x0806 /* Address Resolution packet */
+#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
+
+/* LLC and SNAP constants */
+#define EXTENDED_SAP 0xAA
+#define UI_CMD 0x03
+
+/* This is an Token-Ring frame header. */
+struct trh_hdr {
+ __u8 ac; /* access control field */
+ __u8 fc; /* frame control field */
+ __u8 daddr[TR_ALEN]; /* destination address */
+ __u8 saddr[TR_ALEN]; /* source address */
+ __u16 rcf; /* route control field */
+ __u16 rseg[8]; /* routing registers */
+};
+
+/* This is an Token-Ring LLC structure */
+struct trllc {
+ __u8 dsap; /* destination SAP */
+ __u8 ssap; /* source SAP */
+ __u8 llc; /* LLC control field */
+ __u8 protid[3]; /* protocol id */
+ __u16 ethertype; /* ether type field */
+};
+
+/* Token-Ring statistics collection data. */
+struct tr_statistics {
+ int rx_packets; /* total packets received */
+ int tx_packets; /* total packets transmitted */
+ int rx_errors; /* bad packets received */
+ int tx_errors; /* packet transmit problems */
+ int rx_dropped; /* no space in linux buffers */
+ int tx_dropped; /* no space available in linux */
+ int multicast; /* multicast packets received */
+ int transmit_collision;
+
+ /* detailed Token-Ring errors. See IBM Token-Ring Network
+ Architecture for more info */
+
+ int line_errors;
+ int internal_errors;
+ int burst_errors;
+ int A_C_errors;
+ int abort_delimiters;
+ int lost_frames;
+ int recv_congest_count;
+ int frame_copied_errors;
+ int frequency_errors;
+ int token_errors;
+ int dummy1;
+};
+
+/* source routing stuff */
+
+#define TR_RII 0x80
+#define TR_RCF_DIR_BIT 0x80
+#define TR_RCF_LEN_MASK 0x1f00
+#define TR_RCF_BROADCAST 0x8000
+#define TR_RCF_LIMITED_BROADCAST 0xA000
+#define TR_RCF_FRAME2K 0x20
+#define TR_RCF_BROADCAST_MASK 0xC000
+
+#endif /* _LINUX_IF_TR_H */
diff --git a/linux/src/include/linux/igmp.h b/linux/src/include/linux/igmp.h
new file mode 100644
index 0000000..82569a6
--- /dev/null
+++ b/linux/src/include/linux/igmp.h
@@ -0,0 +1,119 @@
+/*
+ * Linux NET3: Internet Group Management Protocol [IGMP]
+ *
+ * Authors:
+ * Alan Cox <Alan.Cox@linux.org>
+ *
+ * Extended to talk the BSD extended IGMP protocol of mrouted 3.6
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_IGMP_H
+#define _LINUX_IGMP_H
+
+/*
+ * IGMP protocol structures
+ */
+
+/*
+ * Header in on cable format
+ */
+
+struct igmphdr
+{
+ __u8 type;
+ __u8 code; /* For newer IGMP */
+ __u16 csum;
+ __u32 group;
+};
+
+#define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */
+#define IGMP_HOST_MEMBERSHIP_REPORT 0x12 /* Ditto */
+#define IGMP_DVMRP 0x13 /* DVMRP routing */
+#define IGMP_PIM 0x14 /* PIM routing */
+#define IGMP_TRACE 0x15 /* CISCO trace */
+#define IGMP_HOST_NEW_MEMBERSHIP_REPORT 0x16 /* New version of 0x11 */
+#define IGMP_HOST_LEAVE_MESSAGE 0x17 /* An extra BSD seems to send */
+
+#define IGMP_MTRACE_RESP 0x1e
+#define IGMP_MTRACE 0x1f
+
+
+/*
+ * Use the BSD names for these for compatibility
+ */
+
+#define IGMP_DELAYING_MEMBER 0x01
+#define IGMP_IDLE_MEMBER 0x02
+#define IGMP_LAZY_MEMBER 0x03
+#define IGMP_SLEEPING_MEMBER 0x04
+#define IGMP_AWAKENING_MEMBER 0x05
+
+#define IGMP_OLD_ROUTER 0x00
+#define IGMP_NEW_ROUTER 0x01
+
+#define IGMP_MINLEN 8
+
+#define IGMP_MAX_HOST_REPORT_DELAY 10 /* max delay for response to */
+ /* query (in seconds) */
+
+#define IGMP_TIMER_SCALE 10 /* denotes that the igmphdr->timer field */
+ /* specifies time in 10th of seconds */
+
+#define IGMP_AGE_THRESHOLD 540 /* If this host don't hear any IGMP V1 */
+ /* message in this period of time, */
+ /* revert to IGMP v2 router. */
+
+#define IGMP_ALL_HOSTS htonl(0xE0000001L)
+#define IGMP_ALL_ROUTER htonl(0xE0000002L)
+#define IGMP_LOCAL_GROUP htonl(0xE0000000L)
+#define IGMP_LOCAL_GROUP_MASK htonl(0xFFFFFF00L)
+
+/*
+ * struct for keeping the multicast list in
+ */
+
+#ifdef __KERNEL__
+struct ip_mc_socklist
+{
+ unsigned long multiaddr[IP_MAX_MEMBERSHIPS]; /* This is a speed trade off */
+ struct device *multidev[IP_MAX_MEMBERSHIPS];
+};
+
+struct ip_mc_list
+{
+ struct device *interface;
+ unsigned long multiaddr;
+ struct ip_mc_list *next;
+ struct timer_list timer;
+ short tm_running;
+ short reporter;
+ int users;
+};
+
+struct ip_router_info
+{
+ struct device *dev;
+ int type; /* type of router which is querier on this interface */
+ int time; /* # of slow timeouts since last old query */
+ struct timer_list timer;
+ struct ip_router_info *next;
+};
+
+extern struct ip_mc_list *ip_mc_head;
+
+
+extern int igmp_rcv(struct sk_buff *, struct device *, struct options *, __u32, unsigned short,
+ __u32, int , struct inet_protocol *);
+extern void ip_mc_drop_device(struct device *dev);
+extern int ip_mc_join_group(struct sock *sk, struct device *dev, unsigned long addr);
+extern int ip_mc_leave_group(struct sock *sk, struct device *dev,unsigned long addr);
+extern void ip_mc_drop_socket(struct sock *sk);
+extern void ip_mr_init(void);
+#endif
+#endif
diff --git a/linux/src/include/linux/in.h b/linux/src/include/linux/in.h
new file mode 100644
index 0000000..b2a44f5
--- /dev/null
+++ b/linux/src/include/linux/in.h
@@ -0,0 +1,149 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions of the Internet Protocol.
+ *
+ * Version: @(#)in.h 1.0.1 04/21/93
+ *
+ * Authors: Original taken from the GNU Project <netinet/in.h> file.
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IN_H
+#define _LINUX_IN_H
+
+#include <linux/types.h>
+
+/* Standard well-defined IP protocols. */
+enum {
+ IPPROTO_IP = 0, /* Dummy protocol for TCP */
+ IPPROTO_ICMP = 1, /* Internet Control Message Protocol */
+ IPPROTO_IGMP = 2, /* Internet Group Management Protocol */
+ IPPROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */
+ IPPROTO_TCP = 6, /* Transmission Control Protocol */
+ IPPROTO_EGP = 8, /* Exterior Gateway Protocol */
+ IPPROTO_PUP = 12, /* PUP protocol */
+ IPPROTO_UDP = 17, /* User Datagram Protocol */
+ IPPROTO_IDP = 22, /* XNS IDP protocol */
+
+ IPPROTO_RAW = 255, /* Raw IP packets */
+ IPPROTO_MAX
+};
+
+
+/* Internet address. */
+struct in_addr {
+ __u32 s_addr;
+};
+
+/* Request struct for multicast socket ops */
+
+struct ip_mreq
+{
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_interface; /* local IP address of interface */
+};
+
+
+/* Structure describing an Internet (IP) socket address. */
+#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
+struct sockaddr_in {
+ short int sin_family; /* Address family */
+ unsigned short int sin_port; /* Port number */
+ struct in_addr sin_addr; /* Internet address */
+
+ /* Pad to size of `struct sockaddr'. */
+ unsigned char __pad[__SOCK_SIZE__ - sizeof(short int) -
+ sizeof(unsigned short int) - sizeof(struct in_addr)];
+};
+#define sin_zero __pad /* for BSD UNIX comp. -FvK */
+
+
+/*
+ * Definitions of the bits in an Internet address integer.
+ * On subnets, host and network parts are found according
+ * to the subnet mask, not these masks.
+ */
+#define IN_CLASSA(a) ((((long int) (a)) & 0x80000000) == 0)
+#define IN_CLASSA_NET 0xff000000
+#define IN_CLASSA_NSHIFT 24
+#define IN_CLASSA_HOST (0xffffffff & ~IN_CLASSA_NET)
+#define IN_CLASSA_MAX 128
+
+#define IN_CLASSB(a) ((((long int) (a)) & 0xc0000000) == 0x80000000)
+#define IN_CLASSB_NET 0xffff0000
+#define IN_CLASSB_NSHIFT 16
+#define IN_CLASSB_HOST (0xffffffff & ~IN_CLASSB_NET)
+#define IN_CLASSB_MAX 65536
+
+#define IN_CLASSC(a) ((((long int) (a)) & 0xe0000000) == 0xc0000000)
+#define IN_CLASSC_NET 0xffffff00
+#define IN_CLASSC_NSHIFT 8
+#define IN_CLASSC_HOST (0xffffffff & ~IN_CLASSC_NET)
+
+#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
+#define IN_MULTICAST(a) IN_CLASSD(a)
+#define IN_MULTICAST_NET 0xF0000000
+
+#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xe0000000) == 0xe0000000)
+#define IN_BADCLASS(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+
+/* Address to accept any incoming messages. */
+#define INADDR_ANY ((unsigned long int) 0x00000000)
+
+/* Address to send to all hosts. */
+#define INADDR_BROADCAST ((unsigned long int) 0xffffffff)
+
+/* Address indicating an error return. */
+#define INADDR_NONE ((unsigned long int) 0xffffffff)
+
+/* Network number for local host loopback. */
+#define IN_LOOPBACKNET 127
+
+/* Address to loopback in software to local host. */
+#define INADDR_LOOPBACK 0x7f000001 /* 127.0.0.1 */
+#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000)
+
+/* Defines for Multicast INADDR */
+#define INADDR_UNSPEC_GROUP 0xe0000000 /* 224.0.0.0 */
+#define INADDR_ALLHOSTS_GROUP 0xe0000001 /* 224.0.0.1 */
+#define INADDR_MAX_LOCAL_GROUP 0xe00000ff /* 224.0.0.255 */
+
+/* <asm/byteorder.h> contains the htonl type stuff.. */
+
+#include <asm/byteorder.h>
+
+/* Some random defines to make it easier in the kernel.. */
+#ifdef __KERNEL__
+
+#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
+#define MULTICAST(x) (((x) & htonl(0xf0000000)) == htonl(0xe0000000))
+
+#endif
+
+/*
+ * IPv6 definitions as we start to include them. This is just
+ * a beginning -- don't get excited 8)
+ */
+
+struct in_addr6
+{
+ unsigned char s6_addr[16];
+};
+
+struct sockaddr_in6
+{
+ unsigned short sin6_family;
+ unsigned short sin6_port;
+ unsigned long sin6_flowinfo;
+ struct in_addr6 sin6_addr;
+};
+
+
+#endif /* _LINUX_IN_H */
diff --git a/linux/src/include/linux/inet.h b/linux/src/include/linux/inet.h
new file mode 100644
index 0000000..0d0fbd6
--- /dev/null
+++ b/linux/src/include/linux/inet.h
@@ -0,0 +1,52 @@
+/*
+ * Swansea University Computer Society NET3
+ *
+ * This work is derived from NET2Debugged, which is in turn derived
+ * from NET2D which was written by:
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This work was derived from Ross Biro's inspirational work
+ * for the LINUX operating system. His version numbers were:
+ *
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_INET_H
+#define _LINUX_INET_H
+
+#ifdef __KERNEL__
+
+extern void inet_proto_init(struct net_proto *pro);
+extern char *in_ntoa(unsigned long in);
+extern unsigned long in_aton(const char *str);
+
+#endif
+#endif /* _LINUX_INET_H */
diff --git a/linux/src/include/linux/init.h b/linux/src/include/linux/init.h
new file mode 100644
index 0000000..d4798b2
--- /dev/null
+++ b/linux/src/include/linux/init.h
@@ -0,0 +1,30 @@
+#ifndef _COMPAT_INIT_H
+#define _COMPAT_INIT_H
+
+#include <linux/compiler.h>
+
+#ifdef MODULE
+#define __exitused
+#else
+#define __exitused __used
+#endif
+
+#define __init
+#define __initdata
+#define __exit __exitused __cold notrace
+#define __exitdata
+#define __devinit
+#define __devinitdata
+#define __devexit
+#define __devexitdata
+
+#ifndef module_init
+#define module_init(x)
+#define module_exit(x)
+#endif
+
+#ifndef __devexit_p
+#define __devexit_p(x) (x)
+#endif
+
+#endif /* _COMPAT_INIT_H */
diff --git a/linux/src/include/linux/interrupt.h b/linux/src/include/linux/interrupt.h
new file mode 100644
index 0000000..0224475
--- /dev/null
+++ b/linux/src/include/linux/interrupt.h
@@ -0,0 +1,120 @@
+/* interrupt.h */
+#ifndef _LINUX_INTERRUPT_H
+#define _LINUX_INTERRUPT_H
+
+#include <linux/kernel.h>
+#include <asm/bitops.h>
+
+struct irqaction {
+ void (*handler)(int, void *, struct pt_regs *);
+ unsigned long flags;
+ unsigned long mask;
+ const char *name;
+ void *dev_id;
+ struct irqaction *next;
+};
+
+extern unsigned int intr_count;
+
+extern int bh_mask_count[32];
+extern unsigned int bh_active;
+extern unsigned int bh_mask;
+extern void (*bh_base[32])(void);
+
+asmlinkage void do_bottom_half(void);
+
+/* Who gets which entry in bh_base. Things which will occur most often
+ should come first - in which case NET should be up the top with SERIAL/TQUEUE! */
+
+enum {
+ TIMER_BH = 0,
+ CONSOLE_BH,
+ TQUEUE_BH,
+ DIGI_BH,
+ SERIAL_BH,
+ RISCOM8_BH,
+ SPECIALIX_BH,
+ BAYCOM_BH,
+ NET_BH,
+ IMMEDIATE_BH,
+ KEYBOARD_BH,
+ CYCLADES_BH,
+ CM206_BH,
+ ISICOM_BH
+};
+
+static inline void init_bh(int nr, void (*routine)(void))
+{
+ bh_base[nr] = routine;
+ bh_mask_count[nr] = 0;
+ bh_mask |= 1 << nr;
+}
+
+static inline void mark_bh(int nr)
+{
+ set_bit(nr, &bh_active);
+}
+
+/*
+ * These use a mask count to correctly handle
+ * nested disable/enable calls
+ */
+static inline void disable_bh(int nr)
+{
+ bh_mask &= ~(1 << nr);
+ bh_mask_count[nr]++;
+}
+
+static inline void enable_bh(int nr)
+{
+ if (!--bh_mask_count[nr])
+ bh_mask |= 1 << nr;
+}
+
+/*
+ * start_bh_atomic/end_bh_atomic also nest
+ * naturally by using a counter
+ */
+static inline void start_bh_atomic(void)
+{
+ intr_count++;
+ barrier();
+}
+
+static inline void end_bh_atomic(void)
+{
+ barrier();
+ intr_count--;
+}
+
+/*
+ * Autoprobing for irqs:
+ *
+ * probe_irq_on() and probe_irq_off() provide robust primitives
+ * for accurate IRQ probing during kernel initialization. They are
+ * reasonably simple to use, are not "fooled" by spurious interrupts,
+ * and, unlike other attempts at IRQ probing, they do not get hung on
+ * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
+ *
+ * For reasonably foolproof probing, use them as follows:
+ *
+ * 1. clear and/or mask the device's internal interrupt.
+ * 2. sti();
+ * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
+ * 4. enable the device and cause it to trigger an interrupt.
+ * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
+ * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
+ * 7. service the device to clear its pending interrupt.
+ * 8. loop again if paranoia is required.
+ *
+ * probe_irq_on() returns a mask of allocated irq's.
+ *
+ * probe_irq_off() takes the mask as a parameter,
+ * and returns the irq number which occurred,
+ * or zero if none occurred, or a negative irq number
+ * if more than one irq occurred.
+ */
+extern unsigned long probe_irq_on(void); /* returns 0 on failure */
+extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
+
+#endif
diff --git a/linux/src/include/linux/ioctl.h b/linux/src/include/linux/ioctl.h
new file mode 100644
index 0000000..aa91eb3
--- /dev/null
+++ b/linux/src/include/linux/ioctl.h
@@ -0,0 +1,7 @@
+#ifndef _LINUX_IOCTL_H
+#define _LINUX_IOCTL_H
+
+#include <asm/ioctl.h>
+
+#endif /* _LINUX_IOCTL_H */
+
diff --git a/linux/src/include/linux/ioport.h b/linux/src/include/linux/ioport.h
new file mode 100644
index 0000000..293b468
--- /dev/null
+++ b/linux/src/include/linux/ioport.h
@@ -0,0 +1,31 @@
+/*
+ * portio.h Definitions of routines for detecting, reserving and
+ * allocating system resources.
+ *
+ * Version: 0.01 8/30/93
+ *
+ * Author: Donald Becker (becker@super.org)
+ */
+
+#ifndef _LINUX_PORTIO_H
+#define _LINUX_PORTIO_H
+
+#define HAVE_PORTRESERVE
+/*
+ * Call check_region() before probing for your hardware.
+ * Once you have found you hardware, register it with request_region().
+ * If you unload the driver, use release_region to free ports.
+ */
+extern void reserve_setup(char *str, int *ints);
+extern int check_region(unsigned int from, unsigned int extent);
+extern void request_region(unsigned int from, unsigned int extent,const char *name);
+extern void release_region(unsigned int from, unsigned int extent);
+extern int get_ioport_list(char *);
+
+
+#define HAVE_AUTOIRQ
+extern void *irq2dev_map[]; /* Use only if you own the IRQ. */
+extern int autoirq_setup(int waittime);
+extern int autoirq_report(int waittime);
+
+#endif /* _LINUX_PORTIO_H */
diff --git a/linux/src/include/linux/ip.h b/linux/src/include/linux/ip.h
new file mode 100644
index 0000000..6bbe740
--- /dev/null
+++ b/linux/src/include/linux/ip.h
@@ -0,0 +1,112 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP protocol.
+ *
+ * Version: @(#)ip.h 1.0.2 04/28/93
+ *
+ * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IP_H
+#define _LINUX_IP_H
+#include <asm/byteorder.h>
+
+#define IPOPT_END 0
+#define IPOPT_NOOP 1
+#define IPOPT_SEC 130
+#define IPOPT_LSRR 131
+#define IPOPT_SSRR 137
+#define IPOPT_RR 7
+#define IPOPT_SID 136
+#define IPOPT_TIMESTAMP 68
+
+
+#define MAXTTL 255
+
+struct timestamp {
+ __u8 len;
+ __u8 ptr;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 flags:4,
+ overflow:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 overflow:4,
+ flags:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u32 data[9];
+};
+
+
+#define MAX_ROUTE 16
+
+struct route {
+ char route_size;
+ char pointer;
+ unsigned long route[MAX_ROUTE];
+};
+
+#define IPOPT_OPTVAL 0
+#define IPOPT_OLEN 1
+#define IPOPT_OFFSET 2
+#define IPOPT_MINOFF 4
+#define MAX_IPOPTLEN 40
+#define IPOPT_NOP IPOPT_NOOP
+#define IPOPT_EOL IPOPT_END
+#define IPOPT_TS IPOPT_TIMESTAMP
+
+#define IPOPT_TS_TSONLY 0 /* timestamps only */
+#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */
+#define IPOPT_TS_PRESPEC 3 /* specified modules only */
+
+struct options {
+ __u32 faddr; /* Saved first hop address */
+ unsigned char optlen;
+ unsigned char srr;
+ unsigned char rr;
+ unsigned char ts;
+ unsigned char is_setbyuser:1, /* Set by setsockopt? */
+ is_data:1, /* Options in __data, rather than skb */
+ is_strictroute:1, /* Strict source route */
+ srr_is_hit:1, /* Packet destination addr was our one */
+ is_changed:1, /* IP checksum more not valid */
+ rr_needaddr:1, /* Need to record addr of outgoing dev */
+ ts_needtime:1, /* Need to record timestamp */
+ ts_needaddr:1; /* Need to record addr of outgoing dev */
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __pad3;
+ unsigned char __data[0];
+};
+
+struct iphdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 ihl:4,
+ version:4;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u8 version:4,
+ ihl:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 tos;
+ __u16 tot_len;
+ __u16 id;
+ __u16 frag_off;
+ __u8 ttl;
+ __u8 protocol;
+ __u16 check;
+ __u32 saddr;
+ __u32 daddr;
+ /*The options start here. */
+};
+
+#endif /* _LINUX_IP_H */
diff --git a/linux/src/include/linux/ipc.h b/linux/src/include/linux/ipc.h
new file mode 100644
index 0000000..3878e02
--- /dev/null
+++ b/linux/src/include/linux/ipc.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_IPC_H
+#define _LINUX_IPC_H
+#include <linux/types.h>
+
+typedef int key_t; /* should go in <types.h> type for IPC key */
+#define IPC_PRIVATE ((key_t) 0)
+
+struct ipc_perm
+{
+ key_t key;
+ ushort uid; /* owner euid and egid */
+ ushort gid;
+ ushort cuid; /* creator euid and egid */
+ ushort cgid;
+ ushort mode; /* access modes see mode flags below */
+ ushort seq; /* sequence number */
+};
+
+
+/* resource get request flags */
+#define IPC_CREAT 00001000 /* create if key is nonexistent */
+#define IPC_EXCL 00002000 /* fail if key exists */
+#define IPC_NOWAIT 00004000 /* return error on wait */
+
+
+/*
+ * Control commands used with semctl, msgctl and shmctl
+ * see also specific commands in sem.h, msg.h and shm.h
+ */
+#define IPC_RMID 0 /* remove resource */
+#define IPC_SET 1 /* set ipc_perm options */
+#define IPC_STAT 2 /* get ipc_perm options */
+#define IPC_INFO 3 /* see ipcs */
+
+#ifdef __KERNEL__
+
+/* special shmsegs[id], msgque[id] or semary[id] values */
+#define IPC_UNUSED ((void *) -1)
+#define IPC_NOID ((void *) -2) /* being allocated/destroyed */
+
+/*
+ * These are used to wrap system calls. See ipc/util.c.
+ */
+struct ipc_kludge {
+ struct msgbuf *msgp;
+ long msgtyp;
+};
+
+#define SEMOP 1
+#define SEMGET 2
+#define SEMCTL 3
+#define MSGSND 11
+#define MSGRCV 12
+#define MSGGET 13
+#define MSGCTL 14
+#define SHMAT 21
+#define SHMDT 22
+#define SHMGET 23
+#define SHMCTL 24
+
+#define IPCCALL(version,op) ((version)<<16 | (op))
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_IPC_H */
+
+
diff --git a/linux/src/include/linux/ipx.h b/linux/src/include/linux/ipx.h
new file mode 100644
index 0000000..6fb26f7
--- /dev/null
+++ b/linux/src/include/linux/ipx.h
@@ -0,0 +1,80 @@
+#ifndef _IPX_H_
+#define _IPX_H_
+#include <linux/sockios.h>
+#define IPX_NODE_LEN 6
+#define IPX_MTU 576
+
+struct sockaddr_ipx
+{
+ short sipx_family;
+ short sipx_port;
+ unsigned long sipx_network;
+ unsigned char sipx_node[IPX_NODE_LEN];
+ unsigned char sipx_type;
+ unsigned char sipx_zero; /* 16 byte fill */
+};
+
+/*
+ * So we can fit the extra info for SIOCSIFADDR into the address nicely
+ */
+
+#define sipx_special sipx_port
+#define sipx_action sipx_zero
+#define IPX_DLTITF 0
+#define IPX_CRTITF 1
+
+typedef struct ipx_route_definition
+{
+ unsigned long ipx_network;
+ unsigned long ipx_router_network;
+ unsigned char ipx_router_node[IPX_NODE_LEN];
+} ipx_route_definition;
+
+typedef struct ipx_interface_definition
+{
+ unsigned long ipx_network;
+ unsigned char ipx_device[16];
+ unsigned char ipx_dlink_type;
+#define IPX_FRAME_NONE 0
+#define IPX_FRAME_SNAP 1
+#define IPX_FRAME_8022 2
+#define IPX_FRAME_ETHERII 3
+#define IPX_FRAME_8023 4
+#define IPX_FRAME_TR_8022 5
+ unsigned char ipx_special;
+#define IPX_SPECIAL_NONE 0
+#define IPX_PRIMARY 1
+#define IPX_INTERNAL 2
+ unsigned char ipx_node[IPX_NODE_LEN];
+} ipx_interface_definition;
+
+typedef struct ipx_config_data
+{
+ unsigned char ipxcfg_auto_select_primary;
+ unsigned char ipxcfg_auto_create_interfaces;
+} ipx_config_data;
+
+/*
+ * OLD Route Definition for backward compatibility.
+ */
+
+struct ipx_route_def
+{
+ unsigned long ipx_network;
+ unsigned long ipx_router_network;
+#define IPX_ROUTE_NO_ROUTER 0
+ unsigned char ipx_router_node[IPX_NODE_LEN];
+ unsigned char ipx_device[16];
+ unsigned short ipx_flags;
+#define IPX_RT_SNAP 8
+#define IPX_RT_8022 4
+#define IPX_RT_BLUEBOOK 2
+#define IPX_RT_ROUTED 1
+};
+
+#define SIOCAIPXITFCRT (SIOCPROTOPRIVATE)
+#define SIOCAIPXPRISLT (SIOCPROTOPRIVATE+1)
+#define SIOCIPXCFGDATA (SIOCPROTOPRIVATE+2)
+#define SIOCIPXNCPCONN (SIOCPROTOPRIVATE+3)
+#endif
+
diff --git a/linux/src/include/linux/kcomp.h b/linux/src/include/linux/kcomp.h
new file mode 100644
index 0000000..5e06d7e
--- /dev/null
+++ b/linux/src/include/linux/kcomp.h
@@ -0,0 +1,52 @@
+/*
+ * Kernel compatibility glue to allow USB compile on 2.2.x kernels
+ */
+
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/pagemap.h>
+
+#define pci_enable_device(x) 0
+
+#define page_address(x) (x | PAGE_OFFSET)
+
+#define TTY_DRIVER_NO_DEVFS 0
+
+#define net_device device
+#define dev_kfree_skb_irq(a) dev_kfree_skb(a, FREE_WRITE)
+#define netif_wake_queue(dev) do { clear_bit(0, &dev->tbusy); mark_bh(NET_BH); } while(0)
+#define netif_stop_queue(dev) test_and_set_bit(0, &dev->tbusy)
+#define netif_start_queue(dev) do { dev->tbusy = 0; dev->interrupt = 0; dev->start = 1; } while (0)
+#define netif_queue_stopped(dev) dev->tbusy
+#define netif_running(dev) dev->start
+
+/* hot-(un)plugging stuff */
+static inline int netif_device_present(struct net_device *dev)
+{
+ return test_bit(0, &dev->start);
+}
+
+static inline void netif_device_detach(struct net_device *dev)
+{
+ if ( test_and_clear_bit(0, &dev->start) )
+ netif_stop_queue(dev);
+}
+
+static inline void netif_device_attach(struct net_device *dev)
+{
+ if ( !test_and_set_bit(0, &dev->start) )
+ netif_wake_queue(dev);
+}
+
+#define NET_XMIT_SUCCESS 0
+#define NET_XMIT_DROP 1
+#define NET_XMIT_CN 2
+
+#define IORESOURCE_IO 1
+#define pci_resource_start(dev,bar) \
+(((dev)->base_address[(bar)] & PCI_BASE_ADDRESS_SPACE) ? \
+ ((dev)->base_address[(bar)] & PCI_BASE_ADDRESS_IO_MASK) : \
+ ((dev)->base_address[(bar)] & PCI_BASE_ADDRESS_MEM_MASK))
+#define pci_resource_flags(dev, i) (dev->base_address[i] & IORESOURCE_IO)
+
diff --git a/linux/src/include/linux/kdev_t.h b/linux/src/include/linux/kdev_t.h
new file mode 100644
index 0000000..0497ea8
--- /dev/null
+++ b/linux/src/include/linux/kdev_t.h
@@ -0,0 +1,114 @@
+#ifndef _LINUX_KDEV_T_H
+#define _LINUX_KDEV_T_H
+#ifdef __KERNEL__
+/*
+As a preparation for the introduction of larger device numbers,
+we introduce a type kdev_t to hold them. No information about
+this type is known outside of this include file.
+
+Objects of type kdev_t designate a device. Outside of the kernel
+the corresponding things are objects of type dev_t - usually an
+integral type with the device major and minor in the high and low
+bits, respectively. Conversion is done by
+
+extern kdev_t to_kdev_t(int);
+
+It is up to the various file systems to decide how objects of type
+dev_t are stored on disk.
+The only other point of contact between kernel and outside world
+are the system calls stat and mknod, new versions of which will
+eventually have to be used in libc.
+
+[Unfortunately, the floppy control ioctls fail to hide the internal
+kernel structures, and the fd_device field of a struct floppy_drive_struct
+is user-visible. So, it remains a dev_t for the moment, with some ugly
+conversions in floppy.c.]
+
+Inside the kernel, we aim for a kdev_t type that is a pointer
+to a structure with information about the device (like major,
+minor, size, blocksize, sectorsize, name, read-only flag,
+struct file_operations etc.).
+
+However, for the time being we let kdev_t be almost the same as dev_t:
+
+typedef struct { unsigned short major, minor; } kdev_t;
+
+Admissible operations on an object of type kdev_t:
+- passing it along
+- comparing it for equality with another such object
+- storing it in ROOT_DEV, inode->i_dev, inode->i_rdev, sb->s_dev,
+ bh->b_dev, req->rq_dev, de->dc_dev, tty->device
+- using its bit pattern as argument in a hash function
+- finding its major and minor
+- complaining about it
+
+An object of type kdev_t is created only by the function MKDEV(),
+with the single exception of the constant 0 (no device).
+
+Right now the other information mentioned above is usually found
+in static arrays indexed by major or major,minor.
+
+An obstacle to immediately using
+ typedef struct { ... (* lots of information *) } *kdev_t
+is the case of mknod used to create a block device that the
+kernel doesn't know about at present (but first learns about
+when some module is inserted).
+
+aeb - 950811
+*/
+
+/* Since MINOR(dev) is used as index in static arrays,
+ the kernel is not quite ready yet for larger minors.
+ However, everything runs fine with an arbitrary kdev_t type. */
+
+#define MINORBITS 8
+#define MINORMASK ((1<<MINORBITS) - 1)
+
+typedef unsigned short kdev_t;
+
+#define MAJOR(dev) ((dev) >> MINORBITS)
+#define MINOR(dev) ((dev) & MINORMASK)
+#define HASHDEV(dev) (dev)
+#define NODEV 0
+#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi))
+#define B_FREE 0xffff /* yuk */
+
+extern char * kdevname(kdev_t); /* note: returns pointer to static data! */
+
+/*
+As long as device numbers in the outside world have 16 bits only,
+we use these conversions.
+*/
+
+static inline unsigned int kdev_t_to_nr(kdev_t dev) {
+ return (MAJOR(dev)<<8) | MINOR(dev);
+}
+
+static inline kdev_t to_kdev_t(int dev)
+{
+ int major, minor;
+#if 0
+ major = (dev >> 16);
+ if (!major) {
+ major = (dev >> 8);
+ minor = (dev & 0xff);
+ } else
+ minor = (dev & 0xffff);
+#else
+ major = (dev >> 8);
+ minor = (dev & 0xff);
+#endif
+ return MKDEV(major, minor);
+}
+
+#else /* __KERNEL__ */
+
+/*
+Some programs want their definitions of MAJOR and MINOR and MKDEV
+from the kernel sources. These must be the externally visible ones.
+*/
+#define MAJOR(dev) ((dev)>>8)
+#define MINOR(dev) ((dev) & 0xff)
+#define MKDEV(ma,mi) ((ma)<<8 | (mi))
+#endif /* __KERNEL__ */
+#endif
diff --git a/linux/src/include/linux/kernel.h b/linux/src/include/linux/kernel.h
new file mode 100644
index 0000000..e05912b
--- /dev/null
+++ b/linux/src/include/linux/kernel.h
@@ -0,0 +1,97 @@
+#ifndef _LINUX_KERNEL_H
+#define _LINUX_KERNEL_H
+
+/*
+ * 'kernel.h' contains some often-used function prototypes etc
+ */
+
+#ifdef __KERNEL__
+
+#include <stdarg.h>
+#include <linux/linkage.h>
+
+/* Optimization barrier */
+#define barrier() __asm__("": : :"memory")
+
+#define INT_MAX ((int)(~0U>>1))
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL>>1))
+#define ULONG_MAX (~0UL)
+
+#define STACK_MAGIC 0xdeadbeef
+
+#define KERN_EMERG "<0>" /* system is unusable */
+#define KERN_ALERT "<1>" /* action must be taken immediately */
+#define KERN_CRIT "<2>" /* critical conditions */
+#define KERN_ERR "<3>" /* error conditions */
+#define KERN_WARNING "<4>" /* warning conditions */
+#define KERN_NOTICE "<5>" /* normal but significant condition */
+#define KERN_INFO "<6>" /* informational */
+#define KERN_DEBUG "<7>" /* debug-level messages */
+
+# define NORET_TYPE /**/
+# define ATTRIB_NORET __attribute__((noreturn))
+# define NORET_AND noreturn,
+
+extern void math_error(void);
+NORET_TYPE void panic(const char * fmt, ...)
+ __attribute__ ((NORET_AND format (printf, 1, 2)));
+NORET_TYPE void do_exit(long error_code)
+ ATTRIB_NORET;
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern int sprintf(char * buf, const char * fmt, ...);
+extern int vsprintf(char *buf, const char *, va_list);
+
+extern int session_of_pgrp(int pgrp);
+
+extern int kill_proc(int pid, int sig, int priv);
+extern int kill_pg(int pgrp, int sig, int priv);
+extern int kill_sl(int sess, int sig, int priv);
+
+asmlinkage int printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+#if DEBUG
+#define pr_debug(fmt,arg...) \
+ printk(KERN_DEBUG fmt,##arg)
+#else
+#define pr_debug(fmt,arg...) \
+ do { } while (0)
+#endif
+
+#define pr_info(fmt,arg...) \
+ printk(KERN_INFO fmt,##arg)
+
+/*
+ * "suser()" checks against the effective user id, while "fsuser()"
+ * is used for file permission checking and checks against the fsuid..
+ */
+#define fsuser() (current->fsuid == 0)
+
+/*
+ * Display an IP address in readable format.
+ */
+
+#define NIPQUAD(addr) \
+ (((addr) >> 0) & 0xff), \
+ (((addr) >> 8) & 0xff), \
+ (((addr) >> 16) & 0xff), \
+ (((addr) >> 24) & 0xff)
+
+#endif /* __KERNEL__ */
+
+#define SI_LOAD_SHIFT 16
+struct sysinfo {
+ long uptime; /* Seconds since boot */
+ unsigned long loads[3]; /* 1, 5, and 15 minute load averages */
+ unsigned long totalram; /* Total usable main memory size */
+ unsigned long freeram; /* Available memory size */
+ unsigned long sharedram; /* Amount of shared memory */
+ unsigned long bufferram; /* Memory used by buffers */
+ unsigned long totalswap; /* Total swap space size */
+ unsigned long freeswap; /* swap space still available */
+ unsigned short procs; /* Number of current processes */
+ char _f[22]; /* Pads structure to 64 bytes */
+};
+
+#endif
diff --git a/linux/src/include/linux/kernel_stat.h b/linux/src/include/linux/kernel_stat.h
new file mode 100644
index 0000000..1966490
--- /dev/null
+++ b/linux/src/include/linux/kernel_stat.h
@@ -0,0 +1,32 @@
+#ifndef _LINUX_KERNEL_STAT_H
+#define _LINUX_KERNEL_STAT_H
+
+#include <asm/irq.h>
+
+/*
+ * 'kernel_stat.h' contains the definitions needed for doing
+ * some kernel statistics (cpu usage, context switches ...),
+ * used by rstatd/perfmeter
+ */
+
+#define DK_NDRIVE 4
+
+struct kernel_stat {
+ unsigned int cpu_user, cpu_nice, cpu_system;
+ unsigned int dk_drive[DK_NDRIVE];
+ unsigned int dk_drive_rio[DK_NDRIVE];
+ unsigned int dk_drive_wio[DK_NDRIVE];
+ unsigned int dk_drive_rblk[DK_NDRIVE];
+ unsigned int dk_drive_wblk[DK_NDRIVE];
+ unsigned int pgpgin, pgpgout;
+ unsigned int pswpin, pswpout;
+ unsigned int interrupts[NR_IRQS];
+ unsigned int ipackets, opackets;
+ unsigned int ierrors, oerrors;
+ unsigned int collisions;
+ unsigned int context_swtch;
+};
+
+extern struct kernel_stat kstat;
+
+#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/linux/src/include/linux/limits.h b/linux/src/include/linux/limits.h
new file mode 100644
index 0000000..d0f300c
--- /dev/null
+++ b/linux/src/include/linux/limits.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_LIMITS_H
+#define _LINUX_LIMITS_H
+
+#define NR_OPEN 256
+
+#define NGROUPS_MAX 32 /* supplemental group IDs are available */
+#define ARG_MAX 131072 /* # bytes of args + environ for exec() */
+#define CHILD_MAX 999 /* no limit :-) */
+#define OPEN_MAX 256 /* # open files a process may have */
+#define LINK_MAX 127 /* # links a file may have */
+#define MAX_CANON 255 /* size of the canonical input queue */
+#define MAX_INPUT 255 /* size of the type-ahead buffer */
+#define NAME_MAX 255 /* # chars in a file name */
+#define PATH_MAX 1024 /* # chars in a path name */
+#define PIPE_BUF 4096 /* # bytes in atomic write to a pipe */
+
+#endif
diff --git a/linux/src/include/linux/linkage.h b/linux/src/include/linux/linkage.h
new file mode 100644
index 0000000..c8a7a49
--- /dev/null
+++ b/linux/src/include/linux/linkage.h
@@ -0,0 +1,59 @@
+#ifndef _LINUX_LINKAGE_H
+#define _LINUX_LINKAGE_H
+
+#ifdef __cplusplus
+#define asmlinkage extern "C"
+#else
+#define asmlinkage
+#endif
+
+#ifdef __ELF__
+#define SYMBOL_NAME_STR(X) #X
+#define SYMBOL_NAME(X) X
+#ifdef __STDC__
+#define SYMBOL_NAME_LABEL(X) X##:
+#else
+#define SYMBOL_NAME_LABEL(X) X/**/:
+#endif
+#else
+#define SYMBOL_NAME_STR(X) "_"#X
+#ifdef __STDC__
+#define SYMBOL_NAME(X) _##X
+#define SYMBOL_NAME_LABEL(X) _##X##:
+#else
+#define SYMBOL_NAME(X) _/**/X
+#define SYMBOL_NAME_LABEL(X) _/**/X/**/:
+#endif
+#endif
+
+#if !defined(__i486__) && !defined(__i586__)
+#ifdef __ELF__
+#define __ALIGN .align 4,0x90
+#define __ALIGN_STR ".align 4,0x90"
+#else /* __ELF__ */
+#define __ALIGN .align 2,0x90
+#define __ALIGN_STR ".align 2,0x90"
+#endif /* __ELF__ */
+#else /* __i486__/__i586__ */
+#ifdef __ELF__
+#define __ALIGN .align 16,0x90
+#define __ALIGN_STR ".align 16,0x90"
+#else /* __ELF__ */
+#define __ALIGN .align 4,0x90
+#define __ALIGN_STR ".align 4,0x90"
+#endif /* __ELF__ */
+#endif /* __i486__/__i586__ */
+
+#ifdef __ASSEMBLY__
+
+#define ALIGN __ALIGN
+#define ALIGN_STRING __ALIGN_STRING
+
+#define ENTRY(name) \
+ .globl SYMBOL_NAME(name); \
+ ALIGN; \
+ SYMBOL_NAME_LABEL(name)
+
+#endif
+
+#endif
diff --git a/linux/src/include/linux/list.h b/linux/src/include/linux/list.h
new file mode 100644
index 0000000..27a6ff4
--- /dev/null
+++ b/linux/src/include/linux/list.h
@@ -0,0 +1,112 @@
+#ifndef _LINUX_LIST_H
+#define _LINUX_LIST_H
+
+#ifdef __KERNEL__
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = { &name, &name }
+
+#define INIT_LIST_HEAD(ptr) do { \
+ (ptr)->next = (ptr); (ptr)->prev = (ptr); \
+} while (0)
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static __inline__ void __list_add(struct list_head * new,
+ struct list_head * prev,
+ struct list_head * next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+/*
+ * Insert a new entry after the specified head..
+ */
+static __inline__ void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/*
+ * Insert a new entry at the tail
+ */
+static __inline__ void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static __inline__ void __list_del(struct list_head * prev,
+ struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+static __inline__ void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static __inline__ int list_empty(struct list_head *head)
+{
+ return head->next == head;
+}
+
+/*
+ * Splice in "list" into "head"
+ */
+static __inline__ void list_splice(struct list_head *list, struct list_head *head)
+{
+ struct list_head *first = list->next;
+
+ if (first != list) {
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ first->prev = head;
+ head->next = first;
+
+ last->next = at;
+ at->prev = last;
+ }
+}
+
+#define list_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/locks.h b/linux/src/include/linux/locks.h
new file mode 100644
index 0000000..9735bc6
--- /dev/null
+++ b/linux/src/include/linux/locks.h
@@ -0,0 +1,65 @@
+#ifndef _LINUX_LOCKS_H
+#define _LINUX_LOCKS_H
+
+#ifndef _LINUX_MM_H
+#include <linux/mm.h>
+#endif
+#ifndef _LINUX_PAGEMAP_H
+#include <linux/pagemap.h>
+#endif
+
+/*
+ * Unlocked, temporary IO buffer_heads gets moved to the reuse_list
+ * once their page becomes unlocked.
+ */
+extern struct buffer_head *reuse_list;
+
+/*
+ * Buffer cache locking - note that interrupts may only unlock, not
+ * lock buffers.
+ */
+extern void __wait_on_buffer(struct buffer_head *);
+
+extern inline void wait_on_buffer(struct buffer_head * bh)
+{
+ if (test_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+extern inline void lock_buffer(struct buffer_head * bh)
+{
+ while (set_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+void unlock_buffer(struct buffer_head *);
+
+
+/*
+ * super-block locking. Again, interrupts may only unlock
+ * a super-block (although even this isn't done right now.
+ * nfs may need it).
+ */
+extern void __wait_on_super(struct super_block *);
+
+extern inline void wait_on_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+}
+
+extern inline void lock_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+ sb->s_lock = 1;
+}
+
+extern inline void unlock_super(struct super_block * sb)
+{
+ sb->s_lock = 0;
+ wake_up(&sb->s_wait);
+}
+
+#endif /* _LINUX_LOCKS_H */
+
diff --git a/linux/src/include/linux/major.h b/linux/src/include/linux/major.h
new file mode 100644
index 0000000..97d9926
--- /dev/null
+++ b/linux/src/include/linux/major.h
@@ -0,0 +1,88 @@
+#ifndef _LINUX_MAJOR_H
+#define _LINUX_MAJOR_H
+
+/*
+ * This file has definitions for major device numbers.
+ * For the device number assignments, see Documentation/devices.txt.
+ */
+
+/* limits */
+
+#define MAX_CHRDEV 128
+#define MAX_BLKDEV 128
+
+#define UNNAMED_MAJOR 0
+#define MEM_MAJOR 1
+#define RAMDISK_MAJOR 1
+#define FLOPPY_MAJOR 2
+#define PTY_MASTER_MAJOR 2
+#define IDE0_MAJOR 3
+#define PTY_SLAVE_MAJOR 3
+#define HD_MAJOR IDE0_MAJOR
+#define TTY_MAJOR 4
+#define TTYAUX_MAJOR 5
+#define LP_MAJOR 6
+#define VCS_MAJOR 7
+#define LOOP_MAJOR 7
+#define SCSI_DISK_MAJOR 8
+#define SCSI_TAPE_MAJOR 9
+#define MD_MAJOR 9
+#define MISC_MAJOR 10
+#define SCSI_CDROM_MAJOR 11
+#define QIC02_TAPE_MAJOR 12
+#define XT_DISK_MAJOR 13
+#define SOUND_MAJOR 14
+#define CDU31A_CDROM_MAJOR 15
+#define JOYSTICK_MAJOR 15
+#define GOLDSTAR_CDROM_MAJOR 16
+#define OPTICS_CDROM_MAJOR 17
+#define SANYO_CDROM_MAJOR 18
+#define CYCLADES_MAJOR 19
+#define CYCLADESAUX_MAJOR 20
+#define MITSUMI_X_CDROM_MAJOR 20
+#define SCSI_GENERIC_MAJOR 21
+#define Z8530_MAJOR 34
+#define DIGI_MAJOR 23
+#define IDE1_MAJOR 22
+#define DIGICU_MAJOR 22
+#define MITSUMI_CDROM_MAJOR 23
+#define CDU535_CDROM_MAJOR 24
+#define STL_SERIALMAJOR 24
+#define MATSUSHITA_CDROM_MAJOR 25
+#define STL_CALLOUTMAJOR 25
+#define MATSUSHITA_CDROM2_MAJOR 26
+#define QIC117_TAPE_MAJOR 27
+#define MATSUSHITA_CDROM3_MAJOR 27
+#define MATSUSHITA_CDROM4_MAJOR 28
+#define STL_SIOMEMMAJOR 28
+#define ACSI_MAJOR 28
+#define AZTECH_CDROM_MAJOR 29
+#define GRAPHDEV_MAJOR 29 /* SparcLinux & Linux/68k /dev/fb */
+#define CM206_CDROM_MAJOR 32
+#define IDE2_MAJOR 33
+#define IDE3_MAJOR 34
+#define NETLINK_MAJOR 36
+#define IDETAPE_MAJOR 37
+#define Z2RAM_MAJOR 37
+#define RISCOM8_NORMAL_MAJOR 48
+#define RISCOM8_CALLOUT_MAJOR 49
+#define MKISS_MAJOR 55
+#define APBLOCK_MAJOR 60 /* AP1000 Block device */
+#define DDV_MAJOR 61 /* AP1000 DDV block device */
+
+#define SPECIALIX_NORMAL_MAJOR 75
+#define SPECIALIX_CALLOUT_MAJOR 76
+
+/*
+ * Tests for SCSI devices.
+ */
+
+#define SCSI_BLK_MAJOR(M) \
+ ((M) == SCSI_DISK_MAJOR \
+ || (M) == SCSI_CDROM_MAJOR)
+
+static __inline__ int scsi_blk_major(int m) {
+ return SCSI_BLK_MAJOR(m);
+}
+
+#endif
diff --git a/linux/src/include/linux/malloc.h b/linux/src/include/linux/malloc.h
new file mode 100644
index 0000000..0ef0857
--- /dev/null
+++ b/linux/src/include/linux/malloc.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_MALLOC_H
+#define _LINUX_MALLOC_H
+
+#include <linux/mm.h>
+
+void * kmalloc(unsigned int size, int priority);
+void kfree(void * obj);
+
+#define kfree_s(a,b) kfree(a)
+
+#endif /* _LINUX_MALLOC_H */
diff --git a/linux/src/include/linux/mc146818rtc.h b/linux/src/include/linux/mc146818rtc.h
new file mode 100644
index 0000000..0a2efb6
--- /dev/null
+++ b/linux/src/include/linux/mc146818rtc.h
@@ -0,0 +1,149 @@
+/* mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM
+ * Copyright Torsten Duwe <duwe@informatik.uni-erlangen.de> 1993
+ * derived from Data Sheet, Copyright Motorola 1984 (!).
+ * It was written to be part of the Linux operating system.
+ */
+/* permission is hereby granted to copy, modify and redistribute this code
+ * in terms of the GNU Library General Public License, Version 2 or later,
+ * at your option.
+ */
+
+#ifndef _MC146818RTC_H
+#define _MC146818RTC_H
+#include <asm/io.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ALWAYS_BCD 1
+#endif
+
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+/**********************************************************************
+ * register summary
+ **********************************************************************/
+#define RTC_SECONDS 0
+#define RTC_SECONDS_ALARM 1
+#define RTC_MINUTES 2
+#define RTC_MINUTES_ALARM 3
+#define RTC_HOURS 4
+#define RTC_HOURS_ALARM 5
+/* RTC_*_alarm is always true if 2 MSBs are set */
+# define RTC_ALARM_DONT_CARE 0xC0
+
+#define RTC_DAY_OF_WEEK 6
+#define RTC_DAY_OF_MONTH 7
+#define RTC_MONTH 8
+#define RTC_YEAR 9
+
+/* control registers - Moto names
+ */
+#define RTC_REG_A 10
+#define RTC_REG_B 11
+#define RTC_REG_C 12
+#define RTC_REG_D 13
+
+/**********************************************************************
+ * register details
+ **********************************************************************/
+#define RTC_FREQ_SELECT RTC_REG_A
+
+/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus,
+ * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete,
+ * totalling to a max high interval of 2.228 ms.
+ */
+# define RTC_UIP 0x80
+# define RTC_DIV_CTL 0x70
+ /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */
+# define RTC_REF_CLCK_4MHZ 0x00
+# define RTC_REF_CLCK_1MHZ 0x10
+# define RTC_REF_CLCK_32KHZ 0x20
+ /* 2 values for divider stage reset, others for "testing purposes only" */
+# define RTC_DIV_RESET1 0x60
+# define RTC_DIV_RESET2 0x70
+ /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
+# define RTC_RATE_SELECT 0x0F
+
+/**********************************************************************/
+#define RTC_CONTROL RTC_REG_B
+# define RTC_SET 0x80 /* disable updates for clock setting */
+# define RTC_PIE 0x40 /* periodic interrupt enable */
+# define RTC_AIE 0x20 /* alarm interrupt enable */
+# define RTC_UIE 0x10 /* update-finished interrupt enable */
+# define RTC_SQWE 0x08 /* enable square-wave output */
+# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
+# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
+# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
+
+/**********************************************************************/
+#define RTC_INTR_FLAGS RTC_REG_C
+/* caution - cleared by read */
+# define RTC_IRQF 0x80 /* any of the following 3 is active */
+# define RTC_PF 0x40
+# define RTC_AF 0x20
+# define RTC_UF 0x10
+
+/**********************************************************************/
+#define RTC_VALID RTC_REG_D
+# define RTC_VRT 0x80 /* valid RAM and time */
+/**********************************************************************/
+
+/* example: !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
+ * determines if the following two #defines are needed
+ */
+#ifndef BCD_TO_BIN
+#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
+#endif
+
+#ifndef BIN_TO_BCD
+#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
+#endif
+
+/*
+ * The struct used to pass data via the following ioctl. Similar to the
+ * struct tm in <time.h>, but it needs to be here so that the kernel
+ * source is self contained, allowing cross-compiles, etc. etc.
+ */
+
+struct rtc_time {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+};
+
+/*
+ * ioctl calls that are permitted to the /dev/rtc interface, if
+ * CONFIG_RTC was enabled.
+ */
+
+#define RTC_AIE_ON _IO('p', 0x01) /* Alarm int. enable on */
+#define RTC_AIE_OFF _IO('p', 0x02) /* ... off */
+#define RTC_UIE_ON _IO('p', 0x03) /* Update int. enable on */
+#define RTC_UIE_OFF _IO('p', 0x04) /* ... off */
+#define RTC_PIE_ON _IO('p', 0x05) /* Periodic int. enable on */
+#define RTC_PIE_OFF _IO('p', 0x06) /* ... off */
+
+#define RTC_ALM_SET _IOW('p', 0x07, struct rtc_time) /* Set alarm time */
+#define RTC_ALM_READ _IOR('p', 0x08, struct rtc_time) /* Read alarm time */
+#define RTC_RD_TIME _IOR('p', 0x09, struct rtc_time) /* Read RTC time */
+#define RTC_SET_TIME _IOW('p', 0x0a, struct rtc_time) /* Set RTC time */
+#define RTC_IRQP_READ _IOR('p', 0x0b, unsigned long) /* Read IRQ rate */
+#define RTC_IRQP_SET _IOW('p', 0x0c, unsigned long) /* Set IRQ rate */
+#define RTC_EPOCH_READ _IOR('p', 0x0d, unsigned long) /* Read epoch */
+#define RTC_EPOCH_SET _IOW('p', 0x0e, unsigned long) /* Set epoch */
+
+
+#endif /* _MC146818RTC_H */
diff --git a/linux/src/include/linux/md.h b/linux/src/include/linux/md.h
new file mode 100644
index 0000000..413beb7
--- /dev/null
+++ b/linux/src/include/linux/md.h
@@ -0,0 +1,275 @@
+/*
+ md.h : Multiple Devices driver for Linux
+ Copyright (C) 1994-96 Marc ZYNGIER
+ <zyngier@ufr-info-p7.ibp.fr> or
+ <maz@gloups.fdn.fr>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _MD_H
+#define _MD_H
+
+#include <asm/segment.h>
+#include <linux/major.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * Different major versions are not compatible.
+ * Different minor versions are only downward compatible.
+ * Different patchlevel versions are downward and upward compatible.
+ */
+#define MD_MAJOR_VERSION 0
+#define MD_MINOR_VERSION 36
+#define MD_PATCHLEVEL_VERSION 3
+
+/* ioctls */
+#define REGISTER_DEV _IO (MD_MAJOR, 1)
+#define START_MD _IO (MD_MAJOR, 2)
+#define STOP_MD _IO (MD_MAJOR, 3)
+
+/*
+ personalities :
+ Byte 0 : Chunk size factor
+ Byte 1 : Fault tolerance count for each physical device
+ ( 0 means no fault tolerance,
+ 0xFF means always tolerate faults), not used by now.
+ Byte 2 : Personality
+ Byte 3 : Reserved.
+ */
+
+#define FAULT_SHIFT 8
+#define PERSONALITY_SHIFT 16
+
+#define FACTOR_MASK 0x000000FFUL
+#define FAULT_MASK 0x0000FF00UL
+#define PERSONALITY_MASK 0x00FF0000UL
+
+#define MD_RESERVED 0 /* Not used by now */
+#define LINEAR (1UL << PERSONALITY_SHIFT)
+#define STRIPED (2UL << PERSONALITY_SHIFT)
+#define RAID0 STRIPED
+#define RAID1 (3UL << PERSONALITY_SHIFT)
+#define RAID5 (4UL << PERSONALITY_SHIFT)
+#define MAX_PERSONALITY 5
+
+/*
+ * MD superblock.
+ *
+ * The MD superblock maintains some statistics on each MD configuration.
+ * Each real device in the MD set contains it near the end of the device.
+ * Some of the ideas are copied from the ext2fs implementation.
+ *
+ * We currently use 4096 bytes as follows:
+ *
+ * word offset function
+ *
+ * 0 - 31 Constant generic MD device information.
+ * 32 - 63 Generic state information.
+ * 64 - 127 Personality specific information.
+ * 128 - 511 12 32-words descriptors of the disks in the raid set.
+ * 512 - 911 Reserved.
+ * 912 - 1023 Disk specific descriptor.
+ */
+
+/*
+ * If x is the real device size in bytes, we return an apparent size of:
+ *
+ * y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES
+ *
+ * and place the 4kB superblock at offset y.
+ */
+#define MD_RESERVED_BYTES (64 * 1024)
+#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512)
+#define MD_RESERVED_BLOCKS (MD_RESERVED_BYTES / BLOCK_SIZE)
+
+#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS)
+#define MD_NEW_SIZE_BLOCKS(x) ((x & ~(MD_RESERVED_BLOCKS - 1)) - MD_RESERVED_BLOCKS)
+
+#define MD_SB_BYTES 4096
+#define MD_SB_WORDS (MD_SB_BYTES / 4)
+#define MD_SB_BLOCKS (MD_SB_BYTES / BLOCK_SIZE)
+#define MD_SB_SECTORS (MD_SB_BYTES / 512)
+
+/*
+ * The following are counted in 32-bit words
+ */
+#define MD_SB_GENERIC_OFFSET 0
+#define MD_SB_PERSONALITY_OFFSET 64
+#define MD_SB_DISKS_OFFSET 128
+#define MD_SB_DESCRIPTOR_OFFSET 992
+
+#define MD_SB_GENERIC_CONSTANT_WORDS 32
+#define MD_SB_GENERIC_STATE_WORDS 32
+#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS)
+#define MD_SB_PERSONALITY_WORDS 64
+#define MD_SB_DISKS_WORDS 384
+#define MD_SB_DESCRIPTOR_WORDS 32
+#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS)
+#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS)
+#define MD_SB_DISKS (MD_SB_DISKS_WORDS / MD_SB_DESCRIPTOR_WORDS)
+
+/*
+ * Device "operational" state bits
+ */
+#define MD_FAULTY_DEVICE 0 /* Device is faulty / operational */
+#define MD_ACTIVE_DEVICE 1 /* Device is a part or the raid set / spare disk */
+#define MD_SYNC_DEVICE 2 /* Device is in sync with the raid set */
+
+typedef struct md_device_descriptor_s {
+ __u32 number; /* 0 Device number in the entire set */
+ __u32 major; /* 1 Device major number */
+ __u32 minor; /* 2 Device minor number */
+ __u32 raid_disk; /* 3 The role of the device in the raid set */
+ __u32 state; /* 4 Operational state */
+ __u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5];
+} md_descriptor_t;
+
+#define MD_SB_MAGIC 0xa92b4efc
+
+/*
+ * Superblock state bits
+ */
+#define MD_SB_CLEAN 0
+#define MD_SB_ERRORS 1
+
+typedef struct md_superblock_s {
+
+ /*
+ * Constant generic information
+ */
+ __u32 md_magic; /* 0 MD identifier */
+ __u32 major_version; /* 1 major version to which the set conforms */
+ __u32 minor_version; /* 2 minor version to which the set conforms */
+ __u32 patch_version; /* 3 patchlevel version to which the set conforms */
+ __u32 gvalid_words; /* 4 Number of non-reserved words in this section */
+ __u32 set_magic; /* 5 Raid set identifier */
+ __u32 ctime; /* 6 Creation time */
+ __u32 level; /* 7 Raid personality (mirroring, raid5, ...) */
+ __u32 size; /* 8 Apparent size of each individual disk, in kB */
+ __u32 nr_disks; /* 9 Number of total disks in the raid set */
+ __u32 raid_disks; /* 10 Number of disks in a fully functional raid set */
+ __u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 11];
+
+ /*
+ * Generic state information
+ */
+ __u32 utime; /* 0 Superblock update time */
+ __u32 state; /* 1 State bits (clean, ...) */
+ __u32 active_disks; /* 2 Number of currently active disks (some non-faulty disks might not be in sync) */
+ __u32 working_disks; /* 3 Number of working disks */
+ __u32 failed_disks; /* 4 Number of failed disks */
+ __u32 spare_disks; /* 5 Number of spare disks */
+ __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 6];
+
+ /*
+ * Personality information
+ */
+ __u32 parity_algorithm;
+ __u32 chunk_size;
+ __u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 2];
+
+ /*
+ * Disks information
+ */
+ md_descriptor_t disks[MD_SB_DISKS];
+
+ /*
+ * Reserved
+ */
+ __u32 reserved[MD_SB_RESERVED_WORDS];
+
+ /*
+ * Active descriptor
+ */
+ md_descriptor_t descriptor;
+} md_superblock_t;
+
+#ifdef __KERNEL__
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+
+#define MAX_REAL 8 /* Max number of physical dev per md dev */
+#define MAX_MD_DEV 4 /* Max number of md dev */
+#define MAX_MD_THREADS 2 /* Max number of kernel threads */
+
+#define FACTOR(a) ((a)->repartition & FACTOR_MASK)
+#define MAX_FAULT(a) (((a)->repartition & FAULT_MASK)>>8)
+#define PERSONALITY(a) ((a)->repartition & PERSONALITY_MASK)
+
+#define FACTOR_SHIFT(a) (PAGE_SHIFT + (a) - 10)
+
+struct real_dev
+{
+ kdev_t dev; /* Device number */
+ int size; /* Device size (in blocks) */
+ int offset; /* Real device offset (in blocks) in md dev
+ (only used in linear mode) */
+ struct inode *inode; /* Lock inode */
+ md_superblock_t *sb;
+ u32 sb_offset;
+};
+
+struct md_dev;
+
+struct md_personality
+{
+ char *name;
+ int (*map)(struct md_dev *md_dev, kdev_t *rdev,
+ unsigned long *rsector, unsigned long size);
+ int (*make_request)(struct md_dev *md_dev, int rw, struct buffer_head * bh);
+ void (*end_request)(struct buffer_head * bh, int uptodate);
+ int (*run)(int minor, struct md_dev *md_dev);
+ int (*stop)(int minor, struct md_dev *md_dev);
+ int (*status)(char *page, int minor, struct md_dev *md_dev);
+ int (*ioctl)(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+ int max_invalid_dev;
+ int (*error_handler)(struct md_dev *md_dev, kdev_t dev);
+};
+
+struct md_dev
+{
+ struct real_dev devices[MAX_REAL];
+ struct md_personality *pers;
+ md_superblock_t *sb;
+ int sb_dirty;
+ int repartition;
+ int busy;
+ int nb_dev;
+ void *private;
+};
+
+struct md_thread {
+ void (*run) (void *data);
+ void *data;
+ struct wait_queue *wqueue;
+ __u32 flags;
+};
+
+#define THREAD_WAKEUP 0
+
+extern struct md_dev md_dev[MAX_MD_DEV];
+extern int md_size[MAX_MD_DEV];
+
+extern char *partition_name (kdev_t dev);
+
+extern int register_md_personality (int p_num, struct md_personality *p);
+extern int unregister_md_personality (int p_num);
+extern struct md_thread *md_register_thread (void (*run) (void *data), void *data);
+extern void md_unregister_thread (struct md_thread *thread);
+extern void md_wakeup_thread(struct md_thread *thread);
+extern int md_update_sb (int minor);
+
+#endif /* __KERNEL__ */
+#endif /* _MD_H */
diff --git a/linux/src/include/linux/mm.h b/linux/src/include/linux/mm.h
new file mode 100644
index 0000000..39522dd
--- /dev/null
+++ b/linux/src/include/linux/mm.h
@@ -0,0 +1,375 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+
+extern unsigned long high_memory;
+
+#include <asm/page.h>
+#include <asm/atomic.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+extern int verify_area(int, const void *, unsigned long);
+
+/*
+ * Linux kernel virtual memory manager primitives.
+ * The idea being to have a "virtual" mm in the same way
+ * we have a virtual fs - giving a cleaner interface to the
+ * mm details, and allowing different kinds of memory mappings
+ * (from shared memory to executable loading to arbitrary
+ * mmap() functions).
+ */
+
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ struct mm_struct * vm_mm; /* VM area parameters */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ pgprot_t vm_page_prot;
+ unsigned short vm_flags;
+/* AVL tree of VM areas per task, sorted by address */
+ short vm_avl_height;
+ struct vm_area_struct * vm_avl_left;
+ struct vm_area_struct * vm_avl_right;
+/* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct * vm_next;
+/* for areas with inode, the circular list inode->i_mmap */
+/* for shm areas, the circular list of attaches */
+/* otherwise unused */
+ struct vm_area_struct * vm_next_share;
+ struct vm_area_struct * vm_prev_share;
+/* more */
+ struct vm_operations_struct * vm_ops;
+ unsigned long vm_offset;
+ struct inode * vm_inode;
+ unsigned long vm_pte; /* shared mem */
+};
+
+/*
+ * vm_flags..
+ */
+#define VM_READ 0x0001 /* currently active flags */
+#define VM_WRITE 0x0002
+#define VM_EXEC 0x0004
+#define VM_SHARED 0x0008
+
+#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x0020
+#define VM_MAYEXEC 0x0040
+#define VM_MAYSHARE 0x0080
+
+#define VM_GROWSDOWN 0x0100 /* general info on the segment */
+#define VM_GROWSUP 0x0200
+#define VM_SHM 0x0400 /* shared memory area, don't swap out */
+#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
+
+#define VM_EXECUTABLE 0x1000
+#define VM_LOCKED 0x2000
+
+#define VM_STACK_FLAGS 0x0177
+
+/*
+ * mapping from the currently active vm_flags protection bits (the
+ * low four bits) to a page protection mask..
+ */
+extern pgprot_t protection_map[16];
+
+
+/*
+ * These are the virtual MM functions - opening of an area, closing and
+ * unmapping it (needed to keep files on disk up-to-date etc), pointer
+ * to the functions called when a no-page or a wp-page exception occurs.
+ */
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
+ void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
+ int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
+ void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
+ unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
+ unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
+ unsigned long page);
+ int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
+ pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
+};
+
+/*
+ * Try to keep the most commonly accessed fields in single cache lines
+ * here (16 bytes or greater). This ordering should be particularly
+ * beneficial on 32-bit processors.
+ *
+ * The first line is data used in page cache lookup, the second line
+ * is used for linear searches (eg. clock algorithm scans).
+ */
+typedef struct page {
+ /* these must be first (free area handling) */
+ struct page *next;
+ struct page *prev;
+ struct inode *inode;
+ unsigned long offset;
+ struct page *next_hash;
+ atomic_t count;
+ unsigned flags; /* atomic flags, some possibly updated asynchronously */
+ unsigned dirty:16,
+ age:8;
+ struct wait_queue *wait;
+ struct page *prev_hash;
+ struct buffer_head * buffers;
+ unsigned long swap_unlock_entry;
+ unsigned long map_nr; /* page->map_nr == page - mem_map */
+} mem_map_t;
+
+/* Page flag bit values */
+#define PG_locked 0
+#define PG_error 1
+#define PG_referenced 2
+#define PG_uptodate 3
+#define PG_free_after 4
+#define PG_decr_after 5
+#define PG_swap_unlock_after 6
+#define PG_DMA 7
+#define PG_reserved 31
+
+/* Make it prettier to test the above... */
+#define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
+#define PageError(page) (test_bit(PG_error, &(page)->flags))
+#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
+#define PageDirty(page) (test_bit(PG_dirty, &(page)->flags))
+#define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
+#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
+#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
+#define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
+#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
+#define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
+
+/*
+ * page->reserved denotes a page which must never be accessed (which
+ * may not even be present).
+ *
+ * page->dma is set for those pages which lie in the range of
+ * physical addresses capable of carrying DMA transfers.
+ *
+ * Multiple processes may "see" the same page. E.g. for untouched
+ * mappings of /dev/null, all processes see the same page full of
+ * zeroes, and text pages of executables and shared libraries have
+ * only one copy in memory, at most, normally.
+ *
+ * For the non-reserved pages, page->count denotes a reference count.
+ * page->count == 0 means the page is free.
+ * page->count == 1 means the page is used for exactly one purpose
+ * (e.g. a private data page of one process).
+ *
+ * A page may be used for kmalloc() or anyone else who does a
+ * get_free_page(). In this case the page->count is at least 1, and
+ * all other fields are unused but should be 0 or NULL. The
+ * management of this page is the responsibility of the one who uses
+ * it.
+ *
+ * The other pages (we may call them "process pages") are completely
+ * managed by the Linux memory manager: I/O, buffers, swapping etc.
+ * The following discussion applies only to them.
+ *
+ * A page may belong to an inode's memory mapping. In this case,
+ * page->inode is the inode, and page->offset is the file offset
+ * of the page (not necessarily a multiple of PAGE_SIZE).
+ *
+ * A page may have buffers allocated to it. In this case,
+ * page->buffers is a circular list of these buffer heads. Else,
+ * page->buffers == NULL.
+ *
+ * For pages belonging to inodes, the page->count is the number of
+ * attaches, plus 1 if buffers are allocated to the page.
+ *
+ * All pages belonging to an inode make up a doubly linked list
+ * inode->i_pages, using the fields page->next and page->prev. (These
+ * fields are also used for freelist management when page->count==0.)
+ * There is also a hash table mapping (inode,offset) to the page
+ * in memory if present. The lists for this hash table use the fields
+ * page->next_hash and page->prev_hash.
+ *
+ * All process pages can do I/O:
+ * - inode pages may need to be read from disk,
+ * - inode pages which have been modified and are MAP_SHARED may need
+ * to be written to disk,
+ * - private pages which have been modified may need to be swapped out
+ * to swap space and (later) to be read back into memory.
+ * During disk I/O, page->locked is true. This bit is set before I/O
+ * and reset when I/O completes. page->wait is a wait queue of all
+ * tasks waiting for the I/O on this page to complete.
+ * page->uptodate tells whether the page's contents is valid.
+ * When a read completes, the page becomes uptodate, unless a disk I/O
+ * error happened.
+ * When a write completes, and page->free_after is true, the page is
+ * freed without any further delay.
+ *
+ * For choosing which pages to swap out, inode pages carry a
+ * page->referenced bit, which is set any time the system accesses
+ * that page through the (inode,offset) hash table.
+ * There is also the page->age counter, which implements a linear
+ * decay (why not an exponential decay?), see swapctl.h.
+ */
+
+extern mem_map_t * mem_map;
+
+/*
+ * This is timing-critical - most of the time in getting a new page
+ * goes to clearing the page. If you want a page without the clearing
+ * overhead, just use __get_free_page() directly..
+ */
+#define __get_free_page(priority) __get_free_pages((priority),0,0)
+#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
+extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
+
+extern inline unsigned long get_free_page(int priority)
+{
+ unsigned long page;
+
+ page = __get_free_page(priority);
+ if (page)
+ memset((void *) page, 0, PAGE_SIZE);
+ return page;
+}
+
+/* memory.c & swap.c*/
+
+#define free_page(addr) free_pages((addr),0)
+extern void free_pages(unsigned long addr, unsigned long order);
+extern void __free_page(struct page *);
+
+extern void show_free_areas(void);
+extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
+ unsigned long address);
+
+extern void free_page_tables(struct mm_struct * mm);
+extern void clear_page_tables(struct task_struct * tsk);
+extern int new_page_tables(struct task_struct * tsk);
+extern int copy_page_tables(struct task_struct * to);
+
+extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
+extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
+extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
+extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
+
+extern void vmtruncate(struct inode * inode, unsigned long offset);
+extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
+extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+
+extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
+extern void mem_init(unsigned long start_mem, unsigned long end_mem);
+extern void show_mem(void);
+extern void oom(struct task_struct * tsk);
+extern void si_meminfo(struct sysinfo * val);
+
+/* vmalloc.c */
+
+extern void * vmalloc(unsigned long size);
+extern void * vremap(unsigned long offset, unsigned long size);
+extern void vfree(void * addr);
+extern int vread(char *buf, char *addr, int count);
+
+/* mmap.c */
+extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long off);
+extern void merge_segments(struct mm_struct *, unsigned long, unsigned long);
+extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void remove_shared_vm_struct(struct vm_area_struct *);
+extern void build_mmap_avl(struct mm_struct *);
+extern void exit_mmap(struct mm_struct *);
+extern int do_munmap(unsigned long, size_t);
+extern unsigned long get_unmapped_area(unsigned long, unsigned long);
+
+/* filemap.c */
+extern unsigned long page_unuse(unsigned long);
+extern int shrink_mmap(int, int, int);
+extern void truncate_inode_pages(struct inode *, unsigned long);
+
+#define GFP_BUFFER 0x00
+#define GFP_ATOMIC 0x01
+#define GFP_USER 0x02
+#define GFP_KERNEL 0x03
+#define GFP_NOBUFFER 0x04
+#define GFP_NFS 0x05
+#define GFP_IO 0x06
+
+/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
+ platforms, used as appropriate on others */
+
+#define GFP_DMA 0x80
+
+#define GFP_LEVEL_MASK 0xf
+
+/* vma is the first one with address < vma->vm_end,
+ * and even address < vma->vm_start. Have to extend vma. */
+static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
+{
+ unsigned long grow;
+
+ address &= PAGE_MASK;
+ grow = vma->vm_start - address;
+ if (vma->vm_end - address
+ > (unsigned long) current->rlim[RLIMIT_STACK].rlim_cur ||
+ (vma->vm_mm->total_vm << PAGE_SHIFT) + grow
+ > (unsigned long) current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+ vma->vm_start = address;
+ vma->vm_offset -= grow;
+ vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
+ return 0;
+}
+
+#define avl_empty (struct vm_area_struct *) NULL
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+static inline struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
+{
+ struct vm_area_struct * result = NULL;
+
+ if (mm) {
+ struct vm_area_struct * tree = mm->mmap_avl;
+ for (;;) {
+ if (tree == avl_empty)
+ break;
+ if (tree->vm_end > addr) {
+ result = tree;
+ if (tree->vm_start <= addr)
+ break;
+ tree = tree->vm_avl_left;
+ } else
+ tree = tree->vm_avl_right;
+ }
+ }
+ return result;
+}
+
+/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma;
+
+ vma = find_vma(mm,start_addr);
+ if (vma && end_addr <= vma->vm_start)
+ vma = NULL;
+ return vma;
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/module.h b/linux/src/include/linux/module.h
new file mode 100644
index 0000000..acc2540
--- /dev/null
+++ b/linux/src/include/linux/module.h
@@ -0,0 +1,116 @@
+/*
+ * Dynamic loading of modules into the kernel.
+ *
+ * Modified by Bjorn Ekwall <bj0rn@blox.se>
+ */
+
+#ifndef _LINUX_MODULE_H
+#define _LINUX_MODULE_H
+
+#ifdef __GENKSYMS__
+# define _set_ver(sym,vers) sym
+# undef MODVERSIONS
+# define MODVERSIONS
+#else /* ! __GENKSYMS__ */
+# if defined(MODVERSIONS) && !defined(MODULE) && defined(EXPORT_SYMTAB)
+# define _set_ver(sym,vers) sym
+# include <linux/modversions.h>
+# endif
+#endif /* __GENKSYMS__ */
+
+/* values of module.state */
+#define MOD_UNINITIALIZED 0
+#define MOD_RUNNING 1
+#define MOD_DELETED 2
+
+/* maximum length of module name */
+#define MOD_MAX_NAME 64
+
+/* magic marker for modules inserted from kerneld, to be auto-reaped */
+#define MOD_AUTOCLEAN 0x40000000 /* big enough, but no sign problems... */
+#define MOD_VISITED 0x20000000 /* Thanks Jacques! */
+
+/* maximum length of symbol name */
+#define SYM_MAX_NAME 60
+
+struct kernel_sym { /* sent to "insmod" */
+ unsigned long value; /* value of symbol */
+ char name[SYM_MAX_NAME]; /* name of symbol */
+};
+
+struct module_ref {
+ struct module *module;
+ struct module_ref *next;
+};
+
+struct internal_symbol {
+ void *addr;
+ const char *name;
+};
+
+struct symbol_table { /* received from "insmod" */
+ int size; /* total, including string table!!! */
+ int n_symbols;
+ int n_refs;
+ struct internal_symbol symbol[0]; /* actual size defined by n_symbols */
+ struct module_ref ref[0]; /* actual size defined by n_refs */
+};
+/*
+ * Note: The string table follows immediately after the symbol table in memory!
+ */
+
+struct module {
+ struct module *next;
+ struct module_ref *ref; /* the list of modules that refer to me */
+ struct symbol_table *symtab;
+ const char *name;
+ int size; /* size of module in pages */
+ void *addr; /* address of module */
+ int state;
+ void (*cleanup)(void); /* cleanup routine */
+};
+
+struct mod_routines {
+ int (*init)(void); /* initialization routine */
+ void (*cleanup)(void); /* cleanup routine */
+};
+
+/*
+ * The first word of the module contains the use count.
+ */
+#define GET_USE_COUNT(module) (* (long *) (module)->addr)
+/*
+ * define the count variable, and usage macros.
+ */
+
+#ifdef MODULE
+
+extern long mod_use_count_;
+#define MOD_INC_USE_COUNT (mod_use_count_++, mod_use_count_ |= MOD_VISITED)
+#define MOD_DEC_USE_COUNT (mod_use_count_--, mod_use_count_ |= MOD_VISITED)
+#define MOD_IN_USE ((mod_use_count_ & ~(MOD_AUTOCLEAN | MOD_VISITED)) != 0)
+
+#ifndef __NO_VERSION__
+#include <linux/version.h>
+char kernel_version[]=UTS_RELEASE;
+#endif
+
+#if defined(MODVERSIONS) && !defined(__GENKSYMS__)
+int Using_Versions; /* gcc will handle this global (used as a flag) correctly */
+#endif
+
+#else
+
+#define EXPORT_SYMBOL(sym)
+
+#define MOD_INC_USE_COUNT do { } while (0)
+#define MOD_DEC_USE_COUNT do { } while (0)
+#define MOD_IN_USE 1
+#define SET_MODULE_OWNER(dev) do{ } while(0)
+
+#endif
+
+/* insert new symbol table */
+#define register_symtab(symtab)
+
+#endif
diff --git a/linux/src/include/linux/mount.h b/linux/src/include/linux/mount.h
new file mode 100644
index 0000000..a2f941b
--- /dev/null
+++ b/linux/src/include/linux/mount.h
@@ -0,0 +1,30 @@
+/*
+ *
+ * Definitions for mount interface. This describes the in the kernel build
+ * linkedlist with mounted filesystems.
+ *
+ * Author: Marco van Wieringen <mvw@mcs.ow.nl> <mvw@tnix.net> <mvw@cistron.nl>
+ *
+ * Version: $Id: mount.h,v 1.1 1999/04/26 05:56:47 tb Exp $
+ *
+ */
+#ifndef _LINUX_MOUNT_H
+#define _LINUX_MOUNT_H
+
+struct vfsmount
+{
+ kdev_t mnt_dev; /* Device this applies to */
+ char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
+ char *mnt_dirname; /* Name of directory mounted on */
+ unsigned int mnt_flags; /* Flags of this device */
+ struct semaphore mnt_sem; /* lock device while I/O in progress */
+ struct super_block *mnt_sb; /* pointer to superblock */
+ struct file *mnt_quotas[MAXQUOTAS]; /* fp's to quotafiles */
+ time_t mnt_iexp[MAXQUOTAS]; /* expiretime for inodes */
+ time_t mnt_bexp[MAXQUOTAS]; /* expiretime for blocks */
+ struct vfsmount *mnt_next; /* pointer to next in linkedlist */
+};
+
+struct vfsmount *lookup_vfsmnt(kdev_t dev);
+
+#endif /* _LINUX_MOUNT_H */
diff --git a/linux/src/include/linux/net.h b/linux/src/include/linux/net.h
new file mode 100644
index 0000000..a338a8e
--- /dev/null
+++ b/linux/src/include/linux/net.h
@@ -0,0 +1,130 @@
+/*
+ * NET An implementation of the SOCKET network access protocol.
+ * This is the master header file for the Linux NET layer,
+ * or, in plain English: the networking handling part of the
+ * kernel.
+ *
+ * Version: @(#)net.h 1.0.3 05/25/93
+ *
+ * Authors: Orest Zborowski, <obz@Kodak.COM>
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_NET_H
+#define _LINUX_NET_H
+
+
+#include <linux/wait.h>
+#include <linux/socket.h>
+
+#define NPROTO 16 /* should be enough for now.. */
+
+
+#define SYS_SOCKET 1 /* sys_socket(2) */
+#define SYS_BIND 2 /* sys_bind(2) */
+#define SYS_CONNECT 3 /* sys_connect(2) */
+#define SYS_LISTEN 4 /* sys_listen(2) */
+#define SYS_ACCEPT 5 /* sys_accept(2) */
+#define SYS_GETSOCKNAME 6 /* sys_getsockname(2) */
+#define SYS_GETPEERNAME 7 /* sys_getpeername(2) */
+#define SYS_SOCKETPAIR 8 /* sys_socketpair(2) */
+#define SYS_SEND 9 /* sys_send(2) */
+#define SYS_RECV 10 /* sys_recv(2) */
+#define SYS_SENDTO 11 /* sys_sendto(2) */
+#define SYS_RECVFROM 12 /* sys_recvfrom(2) */
+#define SYS_SHUTDOWN 13 /* sys_shutdown(2) */
+#define SYS_SETSOCKOPT 14 /* sys_setsockopt(2) */
+#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */
+#define SYS_SENDMSG 16 /* sys_sendmsg(2) */
+#define SYS_RECVMSG 17 /* sys_recvmsg(2) */
+
+
+typedef enum {
+ SS_FREE = 0, /* not allocated */
+ SS_UNCONNECTED, /* unconnected to any socket */
+ SS_CONNECTING, /* in process of connecting */
+ SS_CONNECTED, /* connected to socket */
+ SS_DISCONNECTING /* in process of disconnecting */
+} socket_state;
+
+#define SO_ACCEPTCON (1<<16) /* performed a listen */
+#define SO_WAITDATA (1<<17) /* wait data to read */
+#define SO_NOSPACE (1<<18) /* no space to write */
+
+#ifdef __KERNEL__
+/*
+ * Internal representation of a socket. not all the fields are used by
+ * all configurations:
+ *
+ * server client
+ * conn client connected to server connected to
+ * iconn list of clients -unused-
+ * awaiting connections
+ * wait sleep for clients, sleep for connection,
+ * sleep for i/o sleep for i/o
+ */
+struct socket {
+ short type; /* SOCK_STREAM, ... */
+ socket_state state;
+ long flags;
+ struct proto_ops *ops; /* protocols do most everything */
+ void *data; /* protocol data */
+ struct socket *conn; /* server socket connected to */
+ struct socket *iconn; /* incomplete client conn.s */
+ struct socket *next;
+ struct wait_queue **wait; /* ptr to place to wait on */
+ struct inode *inode;
+ struct fasync_struct *fasync_list; /* Asynchronous wake up list */
+ struct file *file; /* File back pointer for gc */
+};
+
+#define SOCK_INODE(S) ((S)->inode)
+
+struct proto_ops {
+ int family;
+
+ int (*create) (struct socket *sock, int protocol);
+ int (*dup) (struct socket *newsock, struct socket *oldsock);
+ int (*release) (struct socket *sock, struct socket *peer);
+ int (*bind) (struct socket *sock, struct sockaddr *umyaddr,
+ int sockaddr_len);
+ int (*connect) (struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags);
+ int (*socketpair) (struct socket *sock1, struct socket *sock2);
+ int (*accept) (struct socket *sock, struct socket *newsock,
+ int flags);
+ int (*getname) (struct socket *sock, struct sockaddr *uaddr,
+ int *usockaddr_len, int peer);
+ int (*select) (struct socket *sock, int sel_type,
+ select_table *wait);
+ int (*ioctl) (struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+ int (*listen) (struct socket *sock, int len);
+ int (*shutdown) (struct socket *sock, int flags);
+ int (*setsockopt) (struct socket *sock, int level, int optname,
+ char *optval, int optlen);
+ int (*getsockopt) (struct socket *sock, int level, int optname,
+ char *optval, int *optlen);
+ int (*fcntl) (struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+ int (*sendmsg) (struct socket *sock, struct msghdr *m, int total_len, int nonblock, int flags);
+ int (*recvmsg) (struct socket *sock, struct msghdr *m, int total_len, int nonblock, int flags, int *addr_len);
+};
+
+struct net_proto {
+ const char *name; /* Protocol name */
+ void (*init_func)(struct net_proto *); /* Bootstrap */
+};
+
+extern int sock_wake_async(struct socket *sock, int how);
+extern int sock_register(int family, struct proto_ops *ops);
+extern int sock_unregister(int family);
+extern struct socket *sock_alloc(void);
+extern void sock_release(struct socket *sock);
+#endif /* __KERNEL__ */
+#endif /* _LINUX_NET_H */
diff --git a/linux/src/include/linux/netdevice.h b/linux/src/include/linux/netdevice.h
new file mode 100644
index 0000000..5de278a
--- /dev/null
+++ b/linux/src/include/linux/netdevice.h
@@ -0,0 +1,313 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Interfaces handler.
+ *
+ * Version: @(#)dev.h 1.0.11 07/31/96
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Donald J. Becker, <becker@super.org>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ * Bjorn Ekwall. <bj0rn@blox.se>
+ * Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Moved to /usr/include/linux for NET3
+ * Added extern for fddi_setup()
+ */
+#ifndef _LINUX_NETDEVICE_H
+#define _LINUX_NETDEVICE_H
+
+#include <linux/config.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+
+/* for future expansion when we will have different priorities. */
+#define DEV_NUMBUFFS 3
+#define MAX_ADDR_LEN 7
+#ifndef CONFIG_AX25
+#ifndef CONFIG_AX25_MODULE
+#ifndef CONFIG_TR
+#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE)
+#define MAX_HEADER 32 /* We really need about 18 worst case .. so 32 is aligned */
+#else
+#define MAX_HEADER 80 /* We need to allow for having tunnel headers */
+#endif /* IPIP */
+#else
+#define MAX_HEADER 48 /* Token Ring header needs 40 bytes ... 48 is aligned */
+#endif /* TR */
+#else
+#define MAX_HEADER 96 /* AX.25 + NET/ROM module*/
+#endif /* AX.25 module */
+#else
+#define MAX_HEADER 96 /* AX.25 + NET/ROM */
+#endif /* AX.25 */
+
+#define IS_MYADDR 1 /* address is (one of) our own */
+#define IS_LOOPBACK 2 /* address is for LOOPBACK */
+#define IS_BROADCAST 3 /* address is a valid broadcast */
+#define IS_INVBCAST 4 /* Wrong netmask bcast not for us (unused)*/
+#define IS_MULTICAST 5 /* Multicast IP address */
+
+#ifdef __KERNEL__
+
+#include <linux/skbuff.h>
+
+/*
+ * We tag multicasts with these structures.
+ */
+
+struct dev_mc_list
+{
+ struct dev_mc_list *next;
+ char dmi_addr[MAX_ADDR_LEN];
+ unsigned short dmi_addrlen;
+ unsigned short dmi_users;
+};
+
+struct hh_cache
+{
+ struct hh_cache *hh_next;
+ void *hh_arp; /* Opaque pointer, used by
+ * any address resolution module,
+ * not only ARP.
+ */
+ int hh_refcnt; /* number of users */
+ unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP */
+ char hh_uptodate; /* hh_data is valid */
+ char hh_data[16]; /* cached hardware header */
+};
+
+/*
+ * The DEVICE structure.
+ * Actually, this whole structure is a big mistake. It mixes I/O
+ * data with strictly "high-level" data, and it has to know about
+ * almost every data structure used in the INET module.
+ */
+struct device
+{
+
+ /*
+ * This is the first field of the "visible" part of this structure
+ * (i.e. as seen by users in the "Space.c" file). It is the name
+ * the interface.
+ */
+ char *name;
+
+ /* I/O specific fields - FIXME: Merge these and struct ifmap into one */
+ unsigned long rmem_end; /* shmem "recv" end */
+ unsigned long rmem_start; /* shmem "recv" start */
+ unsigned long mem_end; /* shared mem end */
+ unsigned long mem_start; /* shared mem start */
+ unsigned long base_addr; /* device I/O address */
+ unsigned char irq; /* device IRQ number */
+
+ /* Low-level status flags. */
+ volatile unsigned char start, /* start an operation */
+ interrupt; /* interrupt arrived */
+ unsigned long tbusy; /* transmitter busy must be long for bitops */
+
+ struct device *next;
+
+ /* The device initialization function. Called only once. */
+ int (*init)(struct device *dev);
+
+ /* Some hardware also needs these fields, but they are not part of the
+ usual set specified in Space.c. */
+ unsigned char if_port; /* Selectable AUI, TP,..*/
+ unsigned char dma; /* DMA channel */
+
+ struct enet_statistics* (*get_stats)(struct device *dev);
+
+ /*
+ * This marks the end of the "visible" part of the structure. All
+ * fields hereafter are internal to the system, and may change at
+ * will (read: may be cleaned up at will).
+ */
+
+ /* These may be needed for future network-power-down code. */
+ unsigned long trans_start; /* Time (in jiffies) of last Tx */
+ unsigned long last_rx; /* Time of last Rx */
+
+ unsigned short flags; /* interface flags (a la BSD) */
+ unsigned short family; /* address family ID (AF_INET) */
+ unsigned short metric; /* routing metric (not used) */
+ unsigned short mtu; /* interface MTU value */
+ unsigned short type; /* interface hardware type */
+ unsigned short hard_header_len; /* hardware hdr length */
+ void *priv; /* pointer to private data */
+
+ /* Interface address info. */
+ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+ unsigned char pad; /* make dev_addr aligned to 8 bytes */
+ unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */
+ unsigned char addr_len; /* hardware address length */
+ unsigned long pa_addr; /* protocol address */
+ unsigned long pa_brdaddr; /* protocol broadcast addr */
+ unsigned long pa_dstaddr; /* protocol P-P other side addr */
+ unsigned long pa_mask; /* protocol netmask */
+ unsigned short pa_alen; /* protocol address length */
+
+ struct dev_mc_list *mc_list; /* Multicast mac addresses */
+ int mc_count; /* Number of installed mcasts */
+
+ struct ip_mc_list *ip_mc_list; /* IP multicast filter chain */
+ __u32 tx_queue_len; /* Max frames per queue allowed */
+
+ /* For load balancing driver pair support */
+
+ unsigned long pkt_queue; /* Packets queued */
+ struct device *slave; /* Slave device */
+ struct net_alias_info *alias_info; /* main dev alias info */
+ struct net_alias *my_alias; /* alias devs */
+
+ /* Pointer to the interface buffers. */
+ struct sk_buff_head buffs[DEV_NUMBUFFS];
+
+ /* Pointers to interface service routines. */
+ int (*open)(struct device *dev);
+ int (*stop)(struct device *dev);
+ int (*hard_start_xmit) (struct sk_buff *skb,
+ struct device *dev);
+ int (*hard_header) (struct sk_buff *skb,
+ struct device *dev,
+ unsigned short type,
+ void *daddr,
+ void *saddr,
+ unsigned len);
+ int (*rebuild_header)(void *eth, struct device *dev,
+ unsigned long raddr, struct sk_buff *skb);
+#define HAVE_MULTICAST
+ void (*set_multicast_list)(struct device *dev);
+#define HAVE_SET_MAC_ADDR
+ int (*set_mac_address)(struct device *dev, void *addr);
+#define HAVE_PRIVATE_IOCTL
+ int (*do_ioctl)(struct device *dev, struct ifreq *ifr, int cmd);
+#define HAVE_SET_CONFIG
+ int (*set_config)(struct device *dev, struct ifmap *map);
+#define HAVE_HEADER_CACHE
+ void (*header_cache_bind)(struct hh_cache **hhp, struct device *dev, unsigned short htype, __u32 daddr);
+ void (*header_cache_update)(struct hh_cache *hh, struct device *dev, unsigned char * haddr);
+#define HAVE_CHANGE_MTU
+ int (*change_mtu)(struct device *dev, int new_mtu);
+
+ struct iw_statistics* (*get_wireless_stats)(struct device *dev);
+};
+
+
+struct packet_type {
+ unsigned short type; /* This is really htons(ether_type). */
+ struct device * dev;
+ int (*func) (struct sk_buff *, struct device *,
+ struct packet_type *);
+ void *data;
+ struct packet_type *next;
+};
+
+
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+
+/* Used by dev_rint */
+#define IN_SKBUFF 1
+
+extern volatile unsigned long in_bh;
+
+extern struct device loopback_dev;
+extern struct device *dev_base;
+extern struct packet_type *ptype_base[16];
+
+
+extern int ip_addr_match(unsigned long addr1, unsigned long addr2);
+extern int ip_chk_addr(unsigned long addr);
+extern struct device *ip_dev_bynet(unsigned long daddr, unsigned long mask);
+extern unsigned long ip_my_addr(void);
+extern unsigned long ip_get_mask(unsigned long addr);
+extern struct device *ip_dev_find(unsigned long addr);
+extern struct device *dev_getbytype(unsigned short type);
+
+extern void dev_add_pack(struct packet_type *pt);
+extern void dev_remove_pack(struct packet_type *pt);
+extern struct device *dev_get(const char *name);
+extern int dev_open(struct device *dev);
+extern int dev_close(struct device *dev);
+extern void dev_queue_xmit(struct sk_buff *skb, struct device *dev,
+ int pri);
+
+#define HAVE_NETIF_RX 1
+extern void netif_rx(struct sk_buff *skb);
+extern void net_bh(void);
+extern void dev_tint(struct device *dev);
+extern int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
+extern int dev_ioctl(unsigned int cmd, void *);
+
+extern void dev_init(void);
+
+/* Locking protection for page faults during outputs to devices unloaded during the fault */
+
+extern int dev_lockct;
+
+/*
+ * These two don't currently need to be interrupt-safe
+ * but they may do soon. Do it properly anyway.
+ */
+
+extern __inline__ void dev_lock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct++;
+ restore_flags(flags);
+}
+
+extern __inline__ void dev_unlock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct--;
+ restore_flags(flags);
+}
+
+/*
+ * This almost never occurs, isn't in performance critical paths
+ * and we can thus be relaxed about it
+ */
+
+extern __inline__ void dev_lock_wait(void)
+{
+ while(dev_lockct)
+ schedule();
+}
+
+
+/* These functions live elsewhere (drivers/net/net_init.c, but related) */
+
+extern void ether_setup(struct device *dev);
+extern void tr_setup(struct device *dev);
+extern void fddi_setup(struct device *dev);
+extern int ether_config(struct device *dev, struct ifmap *map);
+/* Support for loadable net-drivers */
+extern int register_netdev(struct device *dev);
+extern void unregister_netdev(struct device *dev);
+extern int register_netdevice_notifier(struct notifier_block *nb);
+extern int unregister_netdevice_notifier(struct notifier_block *nb);
+/* Functions used for multicast support */
+extern void dev_mc_upload(struct device *dev);
+extern void dev_mc_delete(struct device *dev, void *addr, int alen, int all);
+extern void dev_mc_add(struct device *dev, void *addr, int alen, int newonly);
+extern void dev_mc_discard(struct device *dev);
+/* This is the wrong place but it'll do for the moment */
+extern void ip_mc_allhost(struct device *dev);
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_DEV_H */
diff --git a/linux/src/include/linux/netrom.h b/linux/src/include/linux/netrom.h
new file mode 100644
index 0000000..6939b32
--- /dev/null
+++ b/linux/src/include/linux/netrom.h
@@ -0,0 +1,34 @@
+/*
+ * These are the public elements of the Linux kernel NET/ROM implementation.
+ * For kernel AX.25 see the file ax25.h. This file requires ax25.h for the
+ * definition of the ax25_address structure.
+ */
+
+#ifndef NETROM_KERNEL_H
+#define NETROM_KERNEL_H
+
+#define NETROM_MTU 236
+
+#define NETROM_T1 1
+#define NETROM_T2 2
+#define NETROM_N2 3
+#define NETROM_T4 6
+#define NETROM_IDLE 7
+
+#define SIOCNRDECOBS (SIOCPROTOPRIVATE+2)
+
+struct nr_route_struct {
+#define NETROM_NEIGH 0
+#define NETROM_NODE 1
+ int type;
+ ax25_address callsign;
+ char device[16];
+ unsigned int quality;
+ char mnemonic[7];
+ ax25_address neighbour;
+ unsigned int obs_count;
+ unsigned int ndigis;
+ ax25_address digipeaters[AX25_MAX_DIGIS];
+};
+
+#endif
diff --git a/linux/src/include/linux/notifier.h b/linux/src/include/linux/notifier.h
new file mode 100644
index 0000000..b3c9ccf
--- /dev/null
+++ b/linux/src/include/linux/notifier.h
@@ -0,0 +1,96 @@
+/*
+ * Routines to manage notifier chains for passing status changes to any
+ * interested routines. We need this instead of hard coded call lists so
+ * that modules can poke their nose into the innards. The network devices
+ * needed them so here they are for the rest of you.
+ *
+ * Alan Cox <Alan.Cox@linux.org>
+ */
+
+#ifndef _LINUX_NOTIFIER_H
+#define _LINUX_NOTIFIER_H
+#include <linux/errno.h>
+
+struct notifier_block
+{
+ int (*notifier_call)(struct notifier_block *this, unsigned long, void *);
+ struct notifier_block *next;
+ int priority;
+};
+
+
+#ifdef __KERNEL__
+
+#define NOTIFY_DONE 0x0000 /* Don't care */
+#define NOTIFY_OK 0x0001 /* Suits me */
+#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
+#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */
+
+extern __inline__ int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
+{
+ while(*list)
+ {
+ if(n->priority > (*list)->priority)
+ break;
+ list= &((*list)->next);
+ }
+ n->next = *list;
+ *list=n;
+ return 0;
+}
+
+/*
+ * Warning to any non GPL module writers out there.. these functions are
+ * GPL'd
+ */
+
+extern __inline__ int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
+{
+ while((*nl)!=NULL)
+ {
+ if((*nl)==n)
+ {
+ *nl=n->next;
+ return 0;
+ }
+ nl=&((*nl)->next);
+ }
+ return -ENOENT;
+}
+
+/*
+ * This is one of these things that is generally shorter inline
+ */
+
+extern __inline__ int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+{
+ int ret=NOTIFY_DONE;
+ struct notifier_block *nb = *n;
+ while(nb)
+ {
+ ret=nb->notifier_call(nb,val,v);
+ if(ret&NOTIFY_STOP_MASK)
+ return ret;
+ nb=nb->next;
+ }
+ return ret;
+}
+
+
+/*
+ * Declared notifiers so far. I can imagine quite a few more chains
+ * over time (eg laptop power reset chains, reboot chain (to clean
+ * device units up), device [un]mount chain, module load/unload chain,
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
+ */
+
+/* netdevice notifier chain */
+#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
+#define NETDEV_DOWN 0x0002
+#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+ detected a hardware crash and restarted
+ - we can use this eg to kick tcp sessions
+ once done */
+#endif
+#endif
diff --git a/linux/src/include/linux/pagemap.h b/linux/src/include/linux/pagemap.h
new file mode 100644
index 0000000..ac85c78
--- /dev/null
+++ b/linux/src/include/linux/pagemap.h
@@ -0,0 +1,146 @@
+#ifndef _LINUX_PAGEMAP_H
+#define _LINUX_PAGEMAP_H
+
+#include <asm/system.h>
+
+/*
+ * Page-mapping primitive inline functions
+ *
+ * Copyright 1995 Linus Torvalds
+ */
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/swapctl.h>
+
+static inline unsigned long page_address(struct page * page)
+{
+ return PAGE_OFFSET + PAGE_SIZE * page->map_nr;
+}
+
+#define PAGE_HASH_BITS 11
+#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
+
+#define PAGE_AGE_VALUE ((PAGE_INITIAL_AGE)+(PAGE_ADVANCE))
+
+extern unsigned long page_cache_size; /* # of pages currently in the hash table */
+extern struct page * page_hash_table[PAGE_HASH_SIZE];
+
+/*
+ * We use a power-of-two hash table to avoid a modulus,
+ * and get a reasonable hash by knowing roughly how the
+ * inode pointer and offsets are distributed (ie, we
+ * roughly know which bits are "significant")
+ */
+static inline unsigned long _page_hashfn(struct inode * inode, unsigned long offset)
+{
+#define i (((unsigned long) inode)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
+#define o (offset >> PAGE_SHIFT)
+#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
+ return s(i+o) & (PAGE_HASH_SIZE-1);
+#undef i
+#undef o
+#undef s
+}
+
+#define page_hash(inode,offset) (page_hash_table+_page_hashfn(inode,offset))
+
+static inline struct page * __find_page(struct inode * inode, unsigned long offset, struct page *page)
+{
+ goto inside;
+ for (;;) {
+ page = page->next_hash;
+inside:
+ if (!page)
+ goto not_found;
+ if (page->inode != inode)
+ continue;
+ if (page->offset == offset)
+ break;
+ }
+ /* Found the page. */
+ atomic_inc(&page->count);
+ set_bit(PG_referenced, &page->flags);
+not_found:
+ return page;
+}
+
+static inline struct page *find_page(struct inode * inode, unsigned long offset)
+{
+ return __find_page(inode, offset, *page_hash(inode, offset));
+}
+
+static inline void remove_page_from_hash_queue(struct page * page)
+{
+ struct page **p;
+ struct page *next_hash, *prev_hash;
+
+ next_hash = page->next_hash;
+ prev_hash = page->prev_hash;
+ page->next_hash = NULL;
+ page->prev_hash = NULL;
+ if (next_hash)
+ next_hash->prev_hash = prev_hash;
+ if (prev_hash)
+ prev_hash->next_hash = next_hash;
+ p = page_hash(page->inode,page->offset);
+ if (*p == page)
+ *p = next_hash;
+ page_cache_size--;
+}
+
+static inline void __add_page_to_hash_queue(struct page * page, struct page **p)
+{
+ page_cache_size++;
+ set_bit(PG_referenced, &page->flags);
+ page->age = PAGE_AGE_VALUE;
+ page->prev_hash = NULL;
+ if ((page->next_hash = *p) != NULL)
+ page->next_hash->prev_hash = page;
+ *p = page;
+}
+
+static inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long offset)
+{
+ __add_page_to_hash_queue(page, page_hash(inode,offset));
+}
+
+
+static inline void remove_page_from_inode_queue(struct page * page)
+{
+ struct inode * inode = page->inode;
+
+ page->inode = NULL;
+ inode->i_nrpages--;
+ if (inode->i_pages == page)
+ inode->i_pages = page->next;
+ if (page->next)
+ page->next->prev = page->prev;
+ if (page->prev)
+ page->prev->next = page->next;
+ page->next = NULL;
+ page->prev = NULL;
+}
+
+static inline void add_page_to_inode_queue(struct inode * inode, struct page * page)
+{
+ struct page **p = &inode->i_pages;
+
+ inode->i_nrpages++;
+ page->inode = inode;
+ page->prev = NULL;
+ if ((page->next = *p) != NULL)
+ page->next->prev = page;
+ *p = page;
+}
+
+extern void __wait_on_page(struct page *);
+static inline void wait_on_page(struct page * page)
+{
+ if (PageLocked(page))
+ __wait_on_page(page);
+}
+
+extern void update_vm_cache(struct inode *, unsigned long, const char *, int);
+
+#endif
diff --git a/linux/src/include/linux/param.h b/linux/src/include/linux/param.h
new file mode 100644
index 0000000..092e92f
--- /dev/null
+++ b/linux/src/include/linux/param.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_PARAM_H
+#define _LINUX_PARAM_H
+
+#include <asm/param.h>
+
+#endif
diff --git a/linux/src/include/linux/pci.h b/linux/src/include/linux/pci.h
new file mode 100644
index 0000000..8aad3d5
--- /dev/null
+++ b/linux/src/include/linux/pci.h
@@ -0,0 +1,1116 @@
+/*
+ * PCI defines and function prototypes
+ * Copyright 1994, Drew Eckhardt
+ *
+ * For more information, please consult
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ *
+ * Manuals are $25 each or $50 for all three, plus $7 shipping
+ * within the United States, $35 abroad.
+ */
+
+
+
+/* PROCEDURE TO REPORT NEW PCI DEVICES
+ * We are trying to collect information on new PCI devices, using
+ * the standard PCI identification procedure. If some warning is
+ * displayed at boot time, please report
+ * - /proc/pci
+ * - your exact hardware description. Try to find out
+ * which device is unknown. It may be you mainboard chipset.
+ * PCI-CPU bridge or PCI-ISA bridge.
+ * - If you can't find the actual information in your hardware
+ * booklet, try to read the references of the chip on the board.
+ * - Send all that to linux-pcisupport@cck.uni-kl.de
+ * and I'll add your device to the list as soon as possible
+ *
+ * BEFORE you send a mail, please check the latest linux releases
+ * to be sure it has not been recently added.
+ *
+ * Thanks
+ * Jens Maurer
+ */
+
+
+
+#ifndef LINUX_PCI_H
+#define LINUX_PCI_H
+
+/*
+ * Under PCI, each device has 256 bytes of configuration address space,
+ * of which the first 64 bytes are standardized as follows:
+ */
+#define PCI_VENDOR_ID 0x00 /* 16 bits */
+#define PCI_DEVICE_ID 0x02 /* 16 bits */
+#define PCI_COMMAND 0x04 /* 16 bits */
+#define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */
+#define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */
+#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
+#define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */
+#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
+#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
+#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
+#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
+#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
+#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
+
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */
+#define PCI_STATUS_UDF 0x40 /* Support User Definable Features */
+
+#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
+#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
+#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
+#define PCI_STATUS_DEVSEL_FAST 0x000
+#define PCI_STATUS_DEVSEL_MEDIUM 0x200
+#define PCI_STATUS_DEVSEL_SLOW 0x400
+#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
+#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
+#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
+#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
+#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
+
+#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8
+ revision */
+#define PCI_REVISION_ID 0x08 /* Revision ID */
+#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
+#define PCI_CLASS_DEVICE 0x0a /* Device class */
+
+#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
+#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
+#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_BIST 0x0f /* 8 bits */
+#define PCI_BIST_CODE_MASK 0x0f /* Return result */
+#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
+#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
+
+/*
+ * Base addresses specify locations in memory or I/O space.
+ * Decoded size can be determined by writing a value of
+ * 0xffffffff to the register, and reading it back. Only
+ * 1 bits are decoded.
+ */
+#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
+#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits */
+#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits */
+#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
+#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
+#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
+#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
+#define PCI_BASE_ADDRESS_SPACE_IO 0x01
+#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
+#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
+#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
+#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M */
+#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
+#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
+#define PCI_BASE_ADDRESS_MEM_MASK (~0x0f)
+#define PCI_BASE_ADDRESS_IO_MASK (~0x03)
+/* bit 1 is reserved if address_space = 1 */
+
+#define PCI_CARDBUS_CIS 0x28
+#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
+#define PCI_SUBSYSTEM_ID 0x2e
+#define PCI_ROM_ADDRESS 0x30 /* 32 bits */
+#define PCI_ROM_ADDRESS_ENABLE 0x01 /* Write 1 to enable ROM,
+ bits 31..11 are address,
+ 10..2 are reserved */
+/* 0x34-0x3b are reserved */
+#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
+#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
+#define PCI_MIN_GNT 0x3e /* 8 bits */
+#define PCI_MAX_LAT 0x3f /* 8 bits */
+
+#define PCI_CLASS_NOT_DEFINED 0x0000
+#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
+
+#define PCI_BASE_CLASS_STORAGE 0x01
+#define PCI_CLASS_STORAGE_SCSI 0x0100
+#define PCI_CLASS_STORAGE_IDE 0x0101
+#define PCI_CLASS_STORAGE_FLOPPY 0x0102
+#define PCI_CLASS_STORAGE_IPI 0x0103
+#define PCI_CLASS_STORAGE_RAID 0x0104
+#define PCI_CLASS_STORAGE_SATA 0x0106
+#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601
+#define PCI_CLASS_STORAGE_OTHER 0x0180
+
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_CLASS_NETWORK_ETHERNET 0x0200
+#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
+#define PCI_CLASS_NETWORK_FDDI 0x0202
+#define PCI_CLASS_NETWORK_ATM 0x0203
+#define PCI_CLASS_NETWORK_OTHER 0x0280
+
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_CLASS_DISPLAY_VGA 0x0300
+#define PCI_CLASS_DISPLAY_XGA 0x0301
+#define PCI_CLASS_DISPLAY_OTHER 0x0380
+
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
+#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
+#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
+
+#define PCI_BASE_CLASS_MEMORY 0x05
+#define PCI_CLASS_MEMORY_RAM 0x0500
+#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_OTHER 0x0580
+
+#define PCI_BASE_CLASS_BRIDGE 0x06
+#define PCI_CLASS_BRIDGE_HOST 0x0600
+#define PCI_CLASS_BRIDGE_ISA 0x0601
+#define PCI_CLASS_BRIDGE_EISA 0x0602
+#define PCI_CLASS_BRIDGE_MC 0x0603
+#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
+#define PCI_CLASS_BRIDGE_NUBUS 0x0606
+#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
+#define PCI_CLASS_BRIDGE_OTHER 0x0680
+
+
+#define PCI_BASE_CLASS_COMMUNICATION 0x07
+#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
+#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
+#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+
+#define PCI_BASE_CLASS_SYSTEM 0x08
+#define PCI_CLASS_SYSTEM_PIC 0x0800
+#define PCI_CLASS_SYSTEM_DMA 0x0801
+#define PCI_CLASS_SYSTEM_TIMER 0x0802
+#define PCI_CLASS_SYSTEM_RTC 0x0803
+#define PCI_CLASS_SYSTEM_OTHER 0x0880
+
+#define PCI_BASE_CLASS_INPUT 0x09
+#define PCI_CLASS_INPUT_KEYBOARD 0x0900
+#define PCI_CLASS_INPUT_PEN 0x0901
+#define PCI_CLASS_INPUT_MOUSE 0x0902
+#define PCI_CLASS_INPUT_OTHER 0x0980
+
+#define PCI_BASE_CLASS_DOCKING 0x0a
+#define PCI_CLASS_DOCKING_GENERIC 0x0a00
+#define PCI_CLASS_DOCKING_OTHER 0x0a01
+
+#define PCI_BASE_CLASS_PROCESSOR 0x0b
+#define PCI_CLASS_PROCESSOR_386 0x0b00
+#define PCI_CLASS_PROCESSOR_486 0x0b01
+#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
+#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
+#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
+#define PCI_CLASS_PROCESSOR_CO 0x0b40
+
+#define PCI_BASE_CLASS_SERIAL 0x0c
+#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
+#define PCI_CLASS_SERIAL_ACCESS 0x0c01
+#define PCI_CLASS_SERIAL_SSA 0x0c02
+#define PCI_CLASS_SERIAL_USB 0x0c03
+#define PCI_CLASS_SERIAL_FIBER 0x0c04
+
+#define PCI_CLASS_OTHERS 0xff
+
+/*
+ * Vendor and card ID's: sort these numerically according to vendor
+ * (and according to card ID within vendor). Send all updates to
+ * <linux-pcisupport@cck.uni-kl.de>.
+ */
+#define PCI_VENDOR_ID_COMPAQ 0x0e11
+#define PCI_DEVICE_ID_COMPAQ_1280 0x3033
+#define PCI_DEVICE_ID_COMPAQ_TRIFLEX 0x4000
+#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10
+#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32
+#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35
+#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40
+#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43
+#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011
+#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150
+
+#define PCI_VENDOR_ID_NCR 0x1000
+#define PCI_DEVICE_ID_NCR_53C810 0x0001
+#define PCI_DEVICE_ID_NCR_53C820 0x0002
+#define PCI_DEVICE_ID_NCR_53C825 0x0003
+#define PCI_DEVICE_ID_NCR_53C815 0x0004
+#define PCI_DEVICE_ID_NCR_53C860 0x0006
+#define PCI_DEVICE_ID_NCR_53C896 0x000b
+#define PCI_DEVICE_ID_NCR_53C895 0x000c
+#define PCI_DEVICE_ID_NCR_53C885 0x000d
+#define PCI_DEVICE_ID_NCR_53C875 0x000f
+#define PCI_DEVICE_ID_NCR_53C875J 0x008f
+
+#define PCI_VENDOR_ID_ATI 0x1002
+#define PCI_DEVICE_ID_ATI_68800 0x4158
+#define PCI_DEVICE_ID_ATI_215CT222 0x4354
+#define PCI_DEVICE_ID_ATI_210888CX 0x4358
+#define PCI_DEVICE_ID_ATI_215GB 0x4742
+#define PCI_DEVICE_ID_ATI_215GD 0x4744
+#define PCI_DEVICE_ID_ATI_215GI 0x4749
+#define PCI_DEVICE_ID_ATI_215GP 0x4750
+#define PCI_DEVICE_ID_ATI_215GQ 0x4751
+#define PCI_DEVICE_ID_ATI_215GT 0x4754
+#define PCI_DEVICE_ID_ATI_215GTB 0x4755
+#define PCI_DEVICE_ID_ATI_210888GX 0x4758
+#define PCI_DEVICE_ID_ATI_215LG 0x4c47
+#define PCI_DEVICE_ID_ATI_264LT 0x4c54
+#define PCI_DEVICE_ID_ATI_264VT 0x5654
+
+#define PCI_VENDOR_ID_VLSI 0x1004
+#define PCI_DEVICE_ID_VLSI_82C592 0x0005
+#define PCI_DEVICE_ID_VLSI_82C593 0x0006
+#define PCI_DEVICE_ID_VLSI_82C594 0x0007
+#define PCI_DEVICE_ID_VLSI_82C597 0x0009
+#define PCI_DEVICE_ID_VLSI_82C541 0x000c
+#define PCI_DEVICE_ID_VLSI_82C543 0x000d
+#define PCI_DEVICE_ID_VLSI_82C532 0x0101
+#define PCI_DEVICE_ID_VLSI_82C534 0x0102
+#define PCI_DEVICE_ID_VLSI_82C535 0x0104
+#define PCI_DEVICE_ID_VLSI_82C147 0x0105
+#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+
+#define PCI_VENDOR_ID_ADL 0x1005
+#define PCI_DEVICE_ID_ADL_2301 0x2301
+
+#define PCI_VENDOR_ID_NS 0x100b
+#define PCI_DEVICE_ID_NS_87415 0x0002
+#define PCI_DEVICE_ID_NS_87410 0xd001
+
+#define PCI_VENDOR_ID_TSENG 0x100c
+#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
+#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
+#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206
+#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207
+#define PCI_DEVICE_ID_TSENG_ET6000 0x3208
+
+#define PCI_VENDOR_ID_WEITEK 0x100e
+#define PCI_DEVICE_ID_WEITEK_P9000 0x9001
+#define PCI_DEVICE_ID_WEITEK_P9100 0x9100
+
+#define PCI_VENDOR_ID_DEC 0x1011
+#define PCI_DEVICE_ID_DEC_BRD 0x0001
+#define PCI_DEVICE_ID_DEC_TULIP 0x0002
+#define PCI_DEVICE_ID_DEC_TGA 0x0004
+#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009
+#define PCI_DEVICE_ID_DEC_TGA2 0x000D
+#define PCI_DEVICE_ID_DEC_FDDI 0x000F
+#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014
+#define PCI_DEVICE_ID_DEC_21142 0x0019
+#define PCI_DEVICE_ID_DEC_21052 0x0021
+#define PCI_DEVICE_ID_DEC_21150 0x0022
+#define PCI_DEVICE_ID_DEC_21152 0x0024
+#define PCI_DEVICE_ID_DEC_21154 0x0026
+#define PCI_DEVICE_ID_DEC_21285 0x1065
+
+#define PCI_VENDOR_ID_CIRRUS 0x1013
+#define PCI_DEVICE_ID_CIRRUS_7548 0x0038
+#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0
+#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4
+#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8
+#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac
+#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8
+#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc
+#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4
+#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6
+#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
+#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
+#define PCI_DEVICE_ID_CIRRUS_7542 0x1200
+#define PCI_DEVICE_ID_CIRRUS_7543 0x1202
+#define PCI_DEVICE_ID_CIRRUS_7541 0x1204
+
+#define PCI_VENDOR_ID_IBM 0x1014
+#define PCI_DEVICE_ID_IBM_FIRE_CORAL 0x000a
+#define PCI_DEVICE_ID_IBM_TR 0x0018
+#define PCI_DEVICE_ID_IBM_82G2675 0x001d
+#define PCI_DEVICE_ID_IBM_MCA 0x0020
+#define PCI_DEVICE_ID_IBM_82351 0x0022
+#define PCI_DEVICE_ID_IBM_SERVERAID 0x002e
+#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e
+#define PCI_DEVICE_ID_IBM_3780IDSP 0x007d
+
+#define PCI_VENDOR_ID_WD 0x101c
+#define PCI_DEVICE_ID_WD_7197 0x3296
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_LANCE 0x2000
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+
+#define PCI_VENDOR_ID_TRIDENT 0x1023
+#define PCI_DEVICE_ID_TRIDENT_9397 0x9397
+#define PCI_DEVICE_ID_TRIDENT_9420 0x9420
+#define PCI_DEVICE_ID_TRIDENT_9440 0x9440
+#define PCI_DEVICE_ID_TRIDENT_9660 0x9660
+#define PCI_DEVICE_ID_TRIDENT_9750 0x9750
+
+#define PCI_VENDOR_ID_AI 0x1025
+#define PCI_DEVICE_ID_AI_M1435 0x1435
+
+#define PCI_VENDOR_ID_MATROX 0x102B
+#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
+#define PCI_DEVICE_ID_MATROX_MIL 0x0519
+#define PCI_DEVICE_ID_MATROX_MYS 0x051A
+#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b
+#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f
+#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520
+#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521
+#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10
+#define PCI_DEVICE_ID_MATROX_G100_MM 0x1000
+#define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001
+
+#define PCI_VENDOR_ID_CT 0x102c
+#define PCI_DEVICE_ID_CT_65545 0x00d8
+#define PCI_DEVICE_ID_CT_65548 0x00dc
+#define PCI_DEVICE_ID_CT_65550 0x00e0
+#define PCI_DEVICE_ID_CT_65554 0x00e4
+#define PCI_DEVICE_ID_CT_65555 0x00e5
+
+#define PCI_VENDOR_ID_MIRO 0x1031
+#define PCI_DEVICE_ID_MIRO_36050 0x5601
+
+#define PCI_VENDOR_ID_NEC 0x1033
+#define PCI_DEVICE_ID_NEC_PCX2 0x0046
+
+#define PCI_VENDOR_ID_FD 0x1036
+#define PCI_DEVICE_ID_FD_36C70 0x0000
+
+#define PCI_VENDOR_ID_SI 0x1039
+#define PCI_DEVICE_ID_SI_5591_AGP 0x0001
+#define PCI_DEVICE_ID_SI_6202 0x0002
+#define PCI_DEVICE_ID_SI_503 0x0008
+#define PCI_DEVICE_ID_SI_ACPI 0x0009
+#define PCI_DEVICE_ID_SI_5597_VGA 0x0200
+#define PCI_DEVICE_ID_SI_6205 0x0205
+#define PCI_DEVICE_ID_SI_501 0x0406
+#define PCI_DEVICE_ID_SI_496 0x0496
+#define PCI_DEVICE_ID_SI_601 0x0601
+#define PCI_DEVICE_ID_SI_5107 0x5107
+#define PCI_DEVICE_ID_SI_5511 0x5511
+#define PCI_DEVICE_ID_SI_5513 0x5513
+#define PCI_DEVICE_ID_SI_5571 0x5571
+#define PCI_DEVICE_ID_SI_5591 0x5591
+#define PCI_DEVICE_ID_SI_5597 0x5597
+#define PCI_DEVICE_ID_SI_7001 0x7001
+
+#define PCI_VENDOR_ID_HP 0x103c
+#define PCI_DEVICE_ID_HP_J2585A 0x1030
+#define PCI_DEVICE_ID_HP_J2585B 0x1031
+
+#define PCI_VENDOR_ID_PCTECH 0x1042
+#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000
+#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_0 0x3000
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_1 0x3010
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
+
+#define PCI_VENDOR_ID_DPT 0x1044
+#define PCI_DEVICE_ID_DPT 0xa400
+
+#define PCI_VENDOR_ID_OPTI 0x1045
+#define PCI_DEVICE_ID_OPTI_92C178 0xc178
+#define PCI_DEVICE_ID_OPTI_82C557 0xc557
+#define PCI_DEVICE_ID_OPTI_82C558 0xc558
+#define PCI_DEVICE_ID_OPTI_82C621 0xc621
+#define PCI_DEVICE_ID_OPTI_82C700 0xc700
+#define PCI_DEVICE_ID_OPTI_82C701 0xc701
+#define PCI_DEVICE_ID_OPTI_82C814 0xc814
+#define PCI_DEVICE_ID_OPTI_82C822 0xc822
+#define PCI_DEVICE_ID_OPTI_82C825 0xd568
+
+#define PCI_VENDOR_ID_SGS 0x104a
+#define PCI_DEVICE_ID_SGS_2000 0x0008
+#define PCI_DEVICE_ID_SGS_1764 0x0009
+
+#define PCI_VENDOR_ID_BUSLOGIC 0x104B
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
+#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130
+
+#define PCI_VENDOR_ID_TI 0x104c
+#define PCI_DEVICE_ID_TI_TVP4010 0x3d04
+#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
+#define PCI_DEVICE_ID_TI_PCI1130 0xac12
+#define PCI_DEVICE_ID_TI_PCI1131 0xac15
+#define PCI_DEVICE_ID_TI_PCI1250 0xac16
+
+#define PCI_VENDOR_ID_OAK 0x104e
+#define PCI_DEVICE_ID_OAK_OTI107 0x0107
+
+/* Winbond have two vendor IDs! See 0x10ad as well */
+#define PCI_VENDOR_ID_WINBOND2 0x1050
+#define PCI_DEVICE_ID_WINBOND2_89C940 0x0940
+
+#define PCI_VENDOR_ID_MOTOROLA 0x1057
+#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001
+#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002
+#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801
+
+#define PCI_VENDOR_ID_PROMISE 0x105a
+#define PCI_DEVICE_ID_PROMISE_20246 0x4d33
+#define PCI_DEVICE_ID_PROMISE_20262 0x4d38
+#define PCI_DEVICE_ID_PROMISE_5300 0x5300
+
+#define PCI_VENDOR_ID_N9 0x105d
+#define PCI_DEVICE_ID_N9_I128 0x2309
+#define PCI_DEVICE_ID_N9_I128_2 0x2339
+#define PCI_DEVICE_ID_N9_I128_T2R 0x493d
+
+#define PCI_VENDOR_ID_UMC 0x1060
+#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
+#define PCI_DEVICE_ID_UMC_UM8891A 0x0891
+#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
+#define PCI_DEVICE_ID_UMC_UM8886A 0x886a
+#define PCI_DEVICE_ID_UMC_UM8881F 0x8881
+#define PCI_DEVICE_ID_UMC_UM8886F 0x8886
+#define PCI_DEVICE_ID_UMC_UM9017F 0x9017
+#define PCI_DEVICE_ID_UMC_UM8886N 0xe886
+#define PCI_DEVICE_ID_UMC_UM8891N 0xe891
+
+#define PCI_VENDOR_ID_X 0x1061
+#define PCI_DEVICE_ID_X_AGX016 0x0001
+
+#define PCI_VENDOR_ID_PICOP 0x1066
+#define PCI_DEVICE_ID_PICOP_PT86C52X 0x0001
+#define PCI_DEVICE_ID_PICOP_PT80C524 0x8002
+
+#define PCI_VENDOR_ID_MYLEX 0x1069
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V2 0x0001
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V3 0x0002
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V4 0x0010
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V5 0x0020
+
+#define PCI_VENDOR_ID_APPLE 0x106b
+#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001
+#define PCI_DEVICE_ID_APPLE_GC 0x0002
+#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e
+
+#define PCI_VENDOR_ID_NEXGEN 0x1074
+#define PCI_DEVICE_ID_NEXGEN_82C501 0x4e78
+
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020
+#define PCI_DEVICE_ID_QLOGIC_ISP1022 0x1022
+
+#define PCI_VENDOR_ID_CYRIX 0x1078
+#define PCI_DEVICE_ID_CYRIX_5510 0x0000
+#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001
+#define PCI_DEVICE_ID_CYRIX_5520 0x0002
+#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100
+#define PCI_DEVICE_ID_CYRIX_5530_SMI 0x0101
+#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102
+#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103
+#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104
+
+#define PCI_VENDOR_ID_LEADTEK 0x107d
+#define PCI_DEVICE_ID_LEADTEK_805 0x0000
+
+#define PCI_VENDOR_ID_CONTAQ 0x1080
+#define PCI_DEVICE_ID_CONTAQ_82C599 0x0600
+#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693
+
+#define PCI_VENDOR_ID_FOREX 0x1083
+
+#define PCI_VENDOR_ID_OLICOM 0x108d
+#define PCI_DEVICE_ID_OLICOM_OC3136 0x0001
+#define PCI_DEVICE_ID_OLICOM_OC2315 0x0011
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#define PCI_DEVICE_ID_OLICOM_OC6151 0x0021
+
+#define PCI_VENDOR_ID_SUN 0x108e
+#define PCI_DEVICE_ID_SUN_EBUS 0x1000
+#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001
+#define PCI_DEVICE_ID_SUN_SIMBA 0x5000
+#define PCI_DEVICE_ID_SUN_PBM 0x8000
+#define PCI_DEVICE_ID_SUN_SABRE 0xa000
+
+#define PCI_VENDOR_ID_CMD 0x1095
+#define PCI_DEVICE_ID_CMD_640 0x0640
+#define PCI_DEVICE_ID_CMD_643 0x0643
+#define PCI_DEVICE_ID_CMD_646 0x0646
+#define PCI_DEVICE_ID_CMD_670 0x0670
+
+#define PCI_VENDOR_ID_VISION 0x1098
+#define PCI_DEVICE_ID_VISION_QD8500 0x0001
+#define PCI_DEVICE_ID_VISION_QD8580 0x0002
+
+#define PCI_VENDOR_ID_BROOKTREE 0x109e
+#define PCI_DEVICE_ID_BROOKTREE_848 0x0350
+#define PCI_DEVICE_ID_BROOKTREE_849A 0x0351
+#define PCI_DEVICE_ID_BROOKTREE_8474 0x8474
+
+#define PCI_VENDOR_ID_SIERRA 0x10a8
+#define PCI_DEVICE_ID_SIERRA_STB 0x0000
+
+#define PCI_VENDOR_ID_ACC 0x10aa
+#define PCI_DEVICE_ID_ACC_2056 0x0000
+
+#define PCI_VENDOR_ID_WINBOND 0x10ad
+#define PCI_DEVICE_ID_WINBOND_83769 0x0001
+#define PCI_DEVICE_ID_WINBOND_82C105 0x0105
+#define PCI_DEVICE_ID_WINBOND_83C553 0x0565
+
+#define PCI_VENDOR_ID_DATABOOK 0x10b3
+#define PCI_DEVICE_ID_DATABOOK_87144 0xb106
+
+#define PCI_VENDOR_ID_PLX 0x10b5
+#define PCI_DEVICE_ID_PLX_9050 0x9050
+#define PCI_DEVICE_ID_PLX_9080 0x9080
+
+#define PCI_DEVICE_ID_PLX_SPCOM200 0x1103
+
+#define PCI_VENDOR_ID_MADGE 0x10b6
+#define PCI_DEVICE_ID_MADGE_MK2 0x0002
+
+#define PCI_VENDOR_ID_3COM 0x10b7
+#define PCI_DEVICE_ID_3COM_3C339 0x3390
+#define PCI_DEVICE_ID_3COM_3C590 0x5900
+#define PCI_DEVICE_ID_3COM_3C595TX 0x5950
+#define PCI_DEVICE_ID_3COM_3C595T4 0x5951
+#define PCI_DEVICE_ID_3COM_3C595MII 0x5952
+#define PCI_DEVICE_ID_3COM_3C900TPO 0x9000
+#define PCI_DEVICE_ID_3COM_3C900COMBO 0x9001
+#define PCI_DEVICE_ID_3COM_3C905TX 0x9050
+#define PCI_DEVICE_ID_3COM_3C905T4 0x9051
+#define PCI_DEVICE_ID_3COM_3C905B_TX 0x9055
+
+#define PCI_VENDOR_ID_SMC 0x10b8
+#define PCI_DEVICE_ID_SMC_EPIC100 0x0005
+
+#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1445 0x1445
+#define PCI_DEVICE_ID_AL_M1449 0x1449
+#define PCI_DEVICE_ID_AL_M1451 0x1451
+#define PCI_DEVICE_ID_AL_M1461 0x1461
+#define PCI_DEVICE_ID_AL_M1489 0x1489
+#define PCI_DEVICE_ID_AL_M1511 0x1511
+#define PCI_DEVICE_ID_AL_M1513 0x1513
+#define PCI_DEVICE_ID_AL_M1521 0x1521
+#define PCI_DEVICE_ID_AL_M1523 0x1523
+#define PCI_DEVICE_ID_AL_M1531 0x1531
+#define PCI_DEVICE_ID_AL_M1533 0x1533
+#define PCI_DEVICE_ID_AL_M1541 0x1541
+#define PCI_DEVICE_ID_AL_M1543 0x1543
+#define PCI_DEVICE_ID_AL_M3307 0x3307
+#define PCI_DEVICE_ID_AL_M4803 0x5215
+#define PCI_DEVICE_ID_AL_M5219 0x5219
+#define PCI_DEVICE_ID_AL_M5229 0x5229
+#define PCI_DEVICE_ID_AL_M5237 0x5237
+#define PCI_DEVICE_ID_AL_M7101 0x7101
+
+#define PCI_VENDOR_ID_MITSUBISHI 0x10ba
+
+#define PCI_VENDOR_ID_SURECOM 0x10bd
+#define PCI_DEVICE_ID_SURECOM_NE34 0x0e34
+
+#define PCI_VENDOR_ID_NEOMAGIC 0x10c8
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2070 0x0001
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128V 0x0002
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128ZV 0x0003
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2160 0x0004
+
+#define PCI_VENDOR_ID_ASP 0x10cd
+#define PCI_DEVICE_ID_ASP_ABP940 0x1200
+#define PCI_DEVICE_ID_ASP_ABP940U 0x1300
+#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300
+
+#define PCI_VENDOR_ID_MACRONIX 0x10d9
+#define PCI_DEVICE_ID_MACRONIX_MX98713 0x0512
+#define PCI_DEVICE_ID_MACRONIX_MX987x5 0x0531
+
+#define PCI_VENDOR_ID_CERN 0x10dc
+#define PCI_DEVICE_ID_CERN_SPSB_PMC 0x0001
+#define PCI_DEVICE_ID_CERN_SPSB_PCI 0x0002
+#define PCI_DEVICE_ID_CERN_HIPPI_DST 0x0021
+#define PCI_DEVICE_ID_CERN_HIPPI_SRC 0x0022
+
+#define PCI_VENDOR_ID_NVIDIA 0x10de
+
+#define PCI_VENDOR_ID_IMS 0x10e0
+#define PCI_DEVICE_ID_IMS_8849 0x8849
+
+#define PCI_VENDOR_ID_TEKRAM2 0x10e1
+#define PCI_DEVICE_ID_TEKRAM2_690c 0x690c
+
+#define PCI_VENDOR_ID_TUNDRA 0x10e3
+#define PCI_DEVICE_ID_TUNDRA_CA91C042 0x0000
+
+#define PCI_VENDOR_ID_AMCC 0x10e8
+#define PCI_DEVICE_ID_AMCC_MYRINET 0x8043
+#define PCI_DEVICE_ID_AMCC_S5933 0x807d
+#define PCI_DEVICE_ID_AMCC_S5933_HEPC3 0x809c
+
+#define PCI_VENDOR_ID_INTERG 0x10ea
+#define PCI_DEVICE_ID_INTERG_1680 0x1680
+#define PCI_DEVICE_ID_INTERG_1682 0x1682
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+#define PCI_DEVICE_ID_REALTEK_8029 0x8029
+#define PCI_DEVICE_ID_REALTEK_8129 0x8129
+#define PCI_DEVICE_ID_REALTEK_8139 0x8139
+
+#define PCI_VENDOR_ID_TRUEVISION 0x10fa
+#define PCI_DEVICE_ID_TRUEVISION_T1000 0x000c
+
+#define PCI_VENDOR_ID_INIT 0x1101
+#define PCI_DEVICE_ID_INIT_320P 0x9100
+#define PCI_DEVICE_ID_INIT_360P 0x9500
+
+#define PCI_VENDOR_ID_TTI 0x1103
+#define PCI_DEVICE_ID_TTI_HPT343 0x0003
+
+#define PCI_VENDOR_ID_VIA 0x1106
+#define PCI_DEVICE_ID_VIA_82C505 0x0505
+#define PCI_DEVICE_ID_VIA_82C561 0x0561
+#define PCI_DEVICE_ID_VIA_82C586_1 0x0571
+#define PCI_DEVICE_ID_VIA_82C576 0x0576
+#define PCI_DEVICE_ID_VIA_82C585 0x0585
+#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
+#define PCI_DEVICE_ID_VIA_82C595 0x0595
+#define PCI_DEVICE_ID_VIA_82C597_0 0x0597
+#define PCI_DEVICE_ID_VIA_82C598_0 0x0598
+#define PCI_DEVICE_ID_VIA_82C926 0x0926
+#define PCI_DEVICE_ID_VIA_82C416 0x1571
+#define PCI_DEVICE_ID_VIA_82C595_97 0x1595
+#define PCI_DEVICE_ID_VIA_82C586_2 0x3038
+#define PCI_DEVICE_ID_VIA_82C586_3 0x3040
+#define PCI_DEVICE_ID_VIA_86C100A 0x6100
+#define PCI_DEVICE_ID_VIA_82C597_1 0x8597
+#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
+
+#define PCI_VENDOR_ID_SMC2 0x1113
+#define PCI_DEVICE_ID_SMC2_1211TX 0x1211
+
+#define PCI_VENDOR_ID_VORTEX 0x1119
+#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000
+#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001
+#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002
+#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003
+#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004
+#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005
+#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006
+#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007
+#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008
+#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009
+#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a
+#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b
+#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c
+#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP1 0x0110
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP1 0x0111
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP1 0x0112
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP1 0x0113
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP1 0x0114
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP1 0x0115
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP2 0x0120
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP2 0x0121
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP2 0x0122
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP2 0x0123
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP2 0x0124
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP2 0x0125
+
+#define PCI_VENDOR_ID_EF 0x111a
+#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000
+#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002
+
+#define PCI_VENDOR_ID_FORE 0x1127
+#define PCI_DEVICE_ID_FORE_PCA200PC 0x0210
+#define PCI_DEVICE_ID_FORE_PCA200E 0x0300
+
+#define PCI_VENDOR_ID_IMAGINGTECH 0x112f
+#define PCI_DEVICE_ID_IMAGINGTECH_ICPCI 0x0000
+
+#define PCI_VENDOR_ID_PHILIPS 0x1131
+#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146
+
+#define PCI_VENDOR_ID_CYCLONE 0x113c
+#define PCI_DEVICE_ID_CYCLONE_SDK 0x0001
+
+#define PCI_VENDOR_ID_ALLIANCE 0x1142
+#define PCI_DEVICE_ID_ALLIANCE_PROMOTIO 0x3210
+#define PCI_DEVICE_ID_ALLIANCE_PROVIDEO 0x6422
+#define PCI_DEVICE_ID_ALLIANCE_AT24 0x6424
+#define PCI_DEVICE_ID_ALLIANCE_AT3D 0x643d
+
+#define PCI_VENDOR_ID_VMIC 0x114a
+#define PCI_DEVICE_ID_VMIC_VME 0x7587
+
+#define PCI_VENDOR_ID_DIGI 0x114f
+#define PCI_DEVICE_ID_DIGI_EPC 0x0002
+#define PCI_DEVICE_ID_DIGI_RIGHTSWITCH 0x0003
+#define PCI_DEVICE_ID_DIGI_XEM 0x0004
+#define PCI_DEVICE_ID_DIGI_XR 0x0005
+#define PCI_DEVICE_ID_DIGI_CX 0x0006
+#define PCI_DEVICE_ID_DIGI_XRJ 0x0009
+#define PCI_DEVICE_ID_DIGI_EPCJ 0x000a
+#define PCI_DEVICE_ID_DIGI_XR_920 0x0027
+
+#define PCI_VENDOR_ID_MUTECH 0x1159
+#define PCI_DEVICE_ID_MUTECH_MV1000 0x0001
+
+#define PCI_VENDOR_ID_RENDITION 0x1163
+#define PCI_DEVICE_ID_RENDITION_VERITE 0x0001
+#define PCI_DEVICE_ID_RENDITION_VERITE2100 0x2000
+
+#define PCI_VENDOR_ID_TOSHIBA 0x1179
+#define PCI_DEVICE_ID_TOSHIBA_601 0x0601
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f
+
+#define PCI_VENDOR_ID_RICOH 0x1180
+#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466
+
+#define PCI_VENDOR_ID_ARTOP 0x1191
+#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005
+
+#define PCI_VENDOR_ID_ZEITNET 0x1193
+#define PCI_DEVICE_ID_ZEITNET_1221 0x0001
+#define PCI_DEVICE_ID_ZEITNET_1225 0x0002
+
+#define PCI_VENDOR_ID_OMEGA 0x119b
+#define PCI_DEVICE_ID_OMEGA_82C092G 0x1221
+
+#define PCI_VENDOR_ID_LITEON 0x11ad
+#define PCI_DEVICE_ID_LITEON_LNE100TX 0x0002
+
+#define PCI_VENDOR_ID_NP 0x11bc
+#define PCI_DEVICE_ID_NP_PCI_FDDI 0x0001
+
+#define PCI_VENDOR_ID_ATT 0x11c1
+#define PCI_DEVICE_ID_ATT_L56XMF 0x0440
+
+#define PCI_VENDOR_ID_SPECIALIX 0x11cb
+#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000
+#define PCI_DEVICE_ID_SPECIALIX_XIO 0x4000
+#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000
+
+#define PCI_VENDOR_ID_AURAVISION 0x11d1
+#define PCI_DEVICE_ID_AURAVISION_VXP524 0x01f7
+
+#define PCI_VENDOR_ID_IKON 0x11d5
+#define PCI_DEVICE_ID_IKON_10115 0x0115
+#define PCI_DEVICE_ID_IKON_10117 0x0117
+
+#define PCI_VENDOR_ID_ZORAN 0x11de
+#define PCI_DEVICE_ID_ZORAN_36057 0x6057
+#define PCI_DEVICE_ID_ZORAN_36120 0x6120
+
+#define PCI_VENDOR_ID_KINETIC 0x11f4
+#define PCI_DEVICE_ID_KINETIC_2915 0x2915
+
+#define PCI_VENDOR_ID_COMPEX 0x11f6
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
+#define PCI_DEVICE_ID_COMPEX_RL2000 0x1401
+
+#define PCI_VENDOR_ID_RP 0x11fe
+#define PCI_DEVICE_ID_RP8OCTA 0x0001
+#define PCI_DEVICE_ID_RP8INTF 0x0002
+#define PCI_DEVICE_ID_RP16INTF 0x0003
+#define PCI_DEVICE_ID_RP32INTF 0x0004
+
+#define PCI_VENDOR_ID_CYCLADES 0x120e
+#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
+#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
+#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102
+#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103
+#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104
+#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105
+#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
+#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
+
+#define PCI_VENDOR_ID_ESSENTIAL 0x120f
+#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001
+
+#define PCI_VENDOR_ID_O2 0x1217
+#define PCI_DEVICE_ID_O2_6832 0x6832
+
+#define PCI_VENDOR_ID_3DFX 0x121a
+#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
+#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002
+
+#define PCI_VENDOR_ID_SIGMADES 0x1236
+#define PCI_DEVICE_ID_SIGMADES_6425 0x6401
+
+#define PCI_VENDOR_ID_CCUBE 0x123f
+
+#define PCI_VENDOR_ID_DIPIX 0x1246
+
+#define PCI_VENDOR_ID_STALLION 0x124d
+#define PCI_DEVICE_ID_STALLION_ECHPCI832 0x0000
+#define PCI_DEVICE_ID_STALLION_ECHPCI864 0x0002
+#define PCI_DEVICE_ID_STALLION_EIOPCI 0x0003
+
+#define PCI_VENDOR_ID_OPTIBASE 0x1255
+#define PCI_DEVICE_ID_OPTIBASE_FORGE 0x1110
+#define PCI_DEVICE_ID_OPTIBASE_FUSION 0x1210
+#define PCI_DEVICE_ID_OPTIBASE_VPLEX 0x2110
+#define PCI_DEVICE_ID_OPTIBASE_VPLEXCC 0x2120
+#define PCI_DEVICE_ID_OPTIBASE_VQUEST 0x2130
+
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_88140 0x1400
+
+#define PCI_VENDOR_ID_SATSAGEM 0x1267
+#define PCI_DEVICE_ID_SATSAGEM_PCR2101 0x5352
+#define PCI_DEVICE_ID_SATSAGEM_TELSATTURBO 0x5a4b
+
+#define PCI_VENDOR_ID_ENSONIQ 0x1274
+#define PCI_DEVICE_ID_ENSONIQ_AUDIOPCI 0x5000
+
+#define PCI_VENDOR_ID_PICTUREL 0x12c5
+#define PCI_DEVICE_ID_PICTUREL_PCIVST 0x0081
+
+#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
+#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+
+#define PCI_VENDOR_ID_CBOARDS 0x1307
+#define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001
+
+#define PCI_VENDOR_ID_SYMPHONY 0x1c1c
+#define PCI_DEVICE_ID_SYMPHONY_101 0x0001
+
+#define PCI_VENDOR_ID_TEKRAM 0x1de1
+#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+
+#define PCI_VENDOR_ID_3DLABS 0x3d3d
+#define PCI_DEVICE_ID_3DLABS_300SX 0x0001
+#define PCI_DEVICE_ID_3DLABS_500TX 0x0002
+#define PCI_DEVICE_ID_3DLABS_DELTA 0x0003
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA 0x0004
+#define PCI_DEVICE_ID_3DLABS_MX 0x0006
+
+#define PCI_VENDOR_ID_AVANCE 0x4005
+#define PCI_DEVICE_ID_AVANCE_ALG2064 0x2064
+#define PCI_DEVICE_ID_AVANCE_2302 0x2302
+
+#define PCI_VENDOR_ID_NETVIN 0x4a14
+#define PCI_DEVICE_ID_NETVIN_NV5000SC 0x5000
+
+#define PCI_VENDOR_ID_S3 0x5333
+#define PCI_DEVICE_ID_S3_PLATO_PXS 0x0551
+#define PCI_DEVICE_ID_S3_ViRGE 0x5631
+#define PCI_DEVICE_ID_S3_TRIO 0x8811
+#define PCI_DEVICE_ID_S3_AURORA64VP 0x8812
+#define PCI_DEVICE_ID_S3_TRIO64UVP 0x8814
+#define PCI_DEVICE_ID_S3_ViRGE_VX 0x883d
+#define PCI_DEVICE_ID_S3_868 0x8880
+#define PCI_DEVICE_ID_S3_928 0x88b0
+#define PCI_DEVICE_ID_S3_864_1 0x88c0
+#define PCI_DEVICE_ID_S3_864_2 0x88c1
+#define PCI_DEVICE_ID_S3_964_1 0x88d0
+#define PCI_DEVICE_ID_S3_964_2 0x88d1
+#define PCI_DEVICE_ID_S3_968 0x88f0
+#define PCI_DEVICE_ID_S3_TRIO64V2 0x8901
+#define PCI_DEVICE_ID_S3_PLATO_PXG 0x8902
+#define PCI_DEVICE_ID_S3_ViRGE_DXGX 0x8a01
+#define PCI_DEVICE_ID_S3_ViRGE_GX2 0x8a10
+#define PCI_DEVICE_ID_S3_ViRGE_MX 0x8c01
+#define PCI_DEVICE_ID_S3_ViRGE_MXP 0x8c02
+#define PCI_DEVICE_ID_S3_ViRGE_MXPMV 0x8c03
+#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00
+
+#define PCI_VENDOR_ID_DCI 0x6666
+#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL_82375 0x0482
+#define PCI_DEVICE_ID_INTEL_82424 0x0483
+#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_82430 0x0486
+#define PCI_DEVICE_ID_INTEL_82434 0x04a3
+#define PCI_DEVICE_ID_INTEL_I960 0x0960
+#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
+#define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222
+#define PCI_DEVICE_ID_INTEL_7116 0x1223
+#define PCI_DEVICE_ID_INTEL_82596 0x1226
+#define PCI_DEVICE_ID_INTEL_82865 0x1227
+#define PCI_DEVICE_ID_INTEL_82557 0x1229
+#define PCI_DEVICE_ID_INTEL_82437 0x122d
+#define PCI_DEVICE_ID_INTEL_82371_0 0x122e
+#define PCI_DEVICE_ID_INTEL_82371_1 0x1230
+#define PCI_DEVICE_ID_INTEL_82371MX 0x1234
+#define PCI_DEVICE_ID_INTEL_82437MX 0x1235
+#define PCI_DEVICE_ID_INTEL_82441 0x1237
+#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
+#define PCI_DEVICE_ID_INTEL_82439 0x1250
+#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
+#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
+#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
+#define PCI_DEVICE_ID_INTEL_82437VX 0x7030
+#define PCI_DEVICE_ID_INTEL_82439TX 0x7100
+#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
+#define PCI_DEVICE_ID_INTEL_82371AB 0x7111
+#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112
+#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113
+#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180
+#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181
+#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190
+#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191
+#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192
+#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71A0
+#define PCI_DEVICE_ID_INTEL_82443GX_1 0x71A1
+#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71A2
+#define PCI_DEVICE_ID_INTEL_P6 0x84c4
+#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
+
+#define PCI_VENDOR_ID_KTI 0x8e2e
+#define PCI_DEVICE_ID_KTI_ET32P2 0x3000
+
+#define PCI_VENDOR_ID_ADAPTEC 0x9004
+#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078
+#define PCI_DEVICE_ID_ADAPTEC_7821 0x2178
+#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078
+#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578
+#define PCI_DEVICE_ID_ADAPTEC_5800 0x5800
+#define PCI_DEVICE_ID_ADAPTEC_3860 0x6038
+#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
+#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078
+#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178
+#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078
+#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178
+#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278
+#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378
+#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478
+#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895
+#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078
+#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178
+#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278
+#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378
+#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478
+#define PCI_DEVICE_ID_ADAPTEC_7885 0x8578
+#define PCI_DEVICE_ID_ADAPTEC_7886 0x8678
+#define PCI_DEVICE_ID_ADAPTEC_7887 0x8778
+#define PCI_DEVICE_ID_ADAPTEC_7888 0x8878
+#define PCI_DEVICE_ID_ADAPTEC_1030 0x8b78
+
+#define PCI_VENDOR_ID_ADAPTEC2 0x9005
+#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010
+#define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011
+#define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013
+#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f
+#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050
+#define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051
+#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f
+#define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080
+#define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081
+#define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083
+#define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f
+#define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0
+#define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1
+#define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3
+#define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf
+
+#define PCI_VENDOR_ID_ATRONICS 0x907f
+#define PCI_DEVICE_ID_ATRONICS_2015 0x2015
+
+#define PCI_VENDOR_ID_HOLTEK 0x9412
+#define PCI_DEVICE_ID_HOLTEK_6565 0x6565
+
+#define PCI_VENDOR_ID_TIGERJET 0xe159
+#define PCI_DEVICE_ID_TIGERJET_300 0x0001
+
+#define PCI_VENDOR_ID_ARK 0xedd8
+#define PCI_DEVICE_ID_ARK_STING 0xa091
+#define PCI_DEVICE_ID_ARK_STINGARK 0xa099
+#define PCI_DEVICE_ID_ARK_2000MT 0xa0a1
+
+#ifdef __KERNEL__
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices. The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ * 7:3 = slot
+ * 2:0 = function
+ */
+#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+
+/*
+ * There is one pci_dev structure for each slot-number/function-number
+ * combination:
+ */
+struct pci_dev {
+ struct pci_bus *bus; /* bus this device is on */
+ struct pci_dev *sibling; /* next device on this bus */
+ struct pci_dev *next; /* chain of all devices */
+
+ void *sysdata; /* hook for sys-specific extension */
+
+ unsigned int devfn; /* encoded device & function index */
+ unsigned short vendor;
+ unsigned short device;
+ unsigned int class; /* 3 bytes: (base,sub,prog-if) */
+ unsigned int master : 1; /* set if device is master capable */
+ /*
+ * In theory, the irq level can be read from configuration
+ * space and all would be fine. However, old PCI chips don't
+ * support these registers and return 0 instead. For example,
+ * the Vision864-P rev 0 chip can uses INTA, but returns 0 in
+ * the interrupt line and pin registers. pci_init()
+ * initializes this field with the value at PCI_INTERRUPT_LINE
+ * and it is the job of pcibios_fixup() to change it if
+ * necessary. The field must not be 0 unless the device
+ * cannot generate interrupts at all.
+ */
+ unsigned char irq; /* irq generated by this device */
+};
+
+struct pci_bus {
+ struct pci_bus *parent; /* parent bus this bridge is on */
+ struct pci_bus *children; /* chain of P2P bridges on this bus */
+ struct pci_bus *next; /* chain of all PCI buses */
+
+ struct pci_dev *self; /* bridge device as seen by parent */
+ struct pci_dev *devices; /* devices behind this bridge */
+
+ void *sysdata; /* hook for sys-specific extension */
+
+ unsigned char number; /* bus number */
+ unsigned char primary; /* number of primary bridge */
+ unsigned char secondary; /* number of secondary bridge */
+ unsigned char subordinate; /* max number of subordinate buses */
+};
+
+/*
+ * This is used to map a vendor-id/device-id pair into device-specific
+ * information.
+ */
+struct pci_dev_info {
+ unsigned short vendor; /* vendor id */
+ unsigned short device; /* device id */
+
+ const char *name; /* device name */
+ unsigned char bridge_type; /* bridge type or 0xff */
+};
+
+extern struct pci_bus pci_root; /* root bus */
+extern struct pci_dev *pci_devices; /* list of all devices */
+
+
+extern unsigned long pci_init (unsigned long mem_start, unsigned long mem_end);
+
+extern struct pci_dev_info *pci_lookup_dev (unsigned int vendor,
+ unsigned int dev);
+extern const char *pci_strclass (unsigned int class);
+extern const char *pci_strvendor (unsigned int vendor);
+extern const char *pci_strdev (unsigned int vendor, unsigned int device);
+
+extern int get_pci_list (char *buf);
+
+#endif /* __KERNEL__ */
+#endif /* LINUX_PCI_H */
diff --git a/linux/src/include/linux/personality.h b/linux/src/include/linux/personality.h
new file mode 100644
index 0000000..aa09f46
--- /dev/null
+++ b/linux/src/include/linux/personality.h
@@ -0,0 +1,55 @@
+#ifndef _PERSONALITY_H
+#define _PERSONALITY_H
+
+#include <linux/linkage.h>
+#include <linux/ptrace.h>
+
+
+/* Flags for bug emulation. These occupy the top three bytes. */
+#define STICKY_TIMEOUTS 0x4000000
+#define WHOLE_SECONDS 0x2000000
+#define ADDR_MAX_32BIT 0x1000000
+#define ADDR_MAX_31BIT 0x0800000
+
+/* Personality types. These go in the low byte. Avoid using the top bit,
+ * it will conflict with error returns.
+ */
+#define PER_MASK (0x00ff)
+#define PER_LINUX (0x0000)
+#define PER_LINUX_32BIT (PER_LINUX | ADDR_MAX_32BIT)
+#define PER_LINUX_EM86 (PER_LINUX | ADDR_MAX_31BIT)
+#define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
+#define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
+#define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
+#define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
+#define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
+#define PER_BSD (0x0006)
+#define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
+
+/* Prototype for an lcall7 syscall handler. */
+typedef void (*lcall7_func)(struct pt_regs *);
+
+
+/* Description of an execution domain - personality range supported,
+ * lcall7 syscall handler, start up / shut down functions etc.
+ * N.B. The name and lcall7 handler must be where they are since the
+ * offset of the handler is hard coded in kernel/sys_call.S.
+ */
+struct exec_domain {
+ const char *name;
+ lcall7_func handler;
+ unsigned char pers_low, pers_high;
+ unsigned long * signal_map;
+ unsigned long * signal_invmap;
+ long *use_count;
+ struct exec_domain *next;
+};
+
+extern struct exec_domain default_exec_domain;
+
+extern struct exec_domain *lookup_exec_domain(unsigned long personality);
+extern int register_exec_domain(struct exec_domain *it);
+extern int unregister_exec_domain(struct exec_domain *it);
+asmlinkage int sys_personality(unsigned long personality);
+
+#endif /* _PERSONALITY_H */
diff --git a/linux/src/include/linux/posix_types.h b/linux/src/include/linux/posix_types.h
new file mode 100644
index 0000000..d3ceb0b
--- /dev/null
+++ b/linux/src/include/linux/posix_types.h
@@ -0,0 +1,50 @@
+#ifndef _LINUX_POSIX_TYPES_H
+#define _LINUX_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+#ifndef NULL
+# define NULL ((void *) 0)
+#endif
+
+/*
+ * This allows for 1024 file descriptors: if NR_OPEN is ever grown
+ * beyond that you'll have to change this too. But 1024 fd's seem to be
+ * enough even for such "real" unices like OSF/1, so hopefully this is
+ * one limit that doesn't have to be changed [again].
+ *
+ * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in
+ * <sys/time.h> (and thus <linux/time.h>) - but this is a more logical
+ * place for them. Solved by having dummy defines in <sys/time.h>.
+ */
+
+/*
+ * Those macros may have been defined in <gnu/types.h>. But we always
+ * use the ones here.
+ */
+#undef __NFDBITS
+#define __NFDBITS (8 * sizeof(unsigned long))
+
+#undef __FD_SETSIZE
+#define __FD_SETSIZE 1024
+
+#undef __FDSET_LONGS
+#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
+
+#undef __FDELT
+#define __FDELT(d) ((d) / __NFDBITS)
+
+#undef __FDMASK
+#define __FDMASK(d) (1UL << ((d) % __NFDBITS))
+
+typedef struct {
+ unsigned long fds_bits [__FDSET_LONGS];
+} __kernel_fd_set;
+
+#include <asm/posix_types.h>
+
+#endif /* _LINUX_POSIX_TYPES_H */
diff --git a/linux/src/include/linux/proc_fs.h b/linux/src/include/linux/proc_fs.h
new file mode 100644
index 0000000..bbb74dd
--- /dev/null
+++ b/linux/src/include/linux/proc_fs.h
@@ -0,0 +1,292 @@
+#ifndef _LINUX_PROC_FS_H
+#define _LINUX_PROC_FS_H
+
+#include <linux/fs.h>
+#include <linux/malloc.h>
+
+/*
+ * The proc filesystem constants/structures
+ */
+
+/*
+ * We always define these enumerators
+ */
+
+enum root_directory_inos {
+ PROC_ROOT_INO = 1,
+ PROC_LOADAVG,
+ PROC_UPTIME,
+ PROC_MEMINFO,
+ PROC_KMSG,
+ PROC_VERSION,
+ PROC_CPUINFO,
+ PROC_PCI,
+ PROC_SELF, /* will change inode # */
+ PROC_NET,
+ PROC_SCSI,
+ PROC_MALLOC,
+ PROC_KCORE,
+ PROC_MODULES,
+ PROC_STAT,
+ PROC_DEVICES,
+ PROC_INTERRUPTS,
+ PROC_FILESYSTEMS,
+ PROC_KSYMS,
+ PROC_DMA,
+ PROC_IOPORTS,
+#ifdef __SMP_PROF__
+ PROC_SMP_PROF,
+#endif
+ PROC_PROFILE, /* whether enabled or not */
+ PROC_CMDLINE,
+ PROC_SYS,
+ PROC_MTAB,
+ PROC_MD,
+ PROC_RTC,
+ PROC_LOCKS
+};
+
+enum pid_directory_inos {
+ PROC_PID_INO = 2,
+ PROC_PID_STATUS,
+ PROC_PID_MEM,
+ PROC_PID_CWD,
+ PROC_PID_ROOT,
+ PROC_PID_EXE,
+ PROC_PID_FD,
+ PROC_PID_ENVIRON,
+ PROC_PID_CMDLINE,
+ PROC_PID_STAT,
+ PROC_PID_STATM,
+ PROC_PID_MAPS
+};
+
+enum pid_subdirectory_inos {
+ PROC_PID_FD_DIR = 1
+};
+
+enum net_directory_inos {
+ PROC_NET_UNIX = 128,
+ PROC_NET_ARP,
+ PROC_NET_ROUTE,
+ PROC_NET_DEV,
+ PROC_NET_RAW,
+ PROC_NET_TCP,
+ PROC_NET_UDP,
+ PROC_NET_SNMP,
+ PROC_NET_RARP,
+ PROC_NET_IGMP,
+ PROC_NET_IPMR_VIF,
+ PROC_NET_IPMR_MFC,
+ PROC_NET_IPFWFWD,
+ PROC_NET_IPFWIN,
+ PROC_NET_IPFWOUT,
+ PROC_NET_IPACCT,
+ PROC_NET_IPMSQHST,
+ PROC_NET_WIRELESS,
+ PROC_NET_IPX_INTERFACE,
+ PROC_NET_IPX_ROUTE,
+ PROC_NET_IPX,
+ PROC_NET_ATALK,
+ PROC_NET_AT_ROUTE,
+ PROC_NET_ATIF,
+ PROC_NET_AX25_ROUTE,
+ PROC_NET_AX25,
+ PROC_NET_AX25_CALLS,
+ PROC_NET_NR_NODES,
+ PROC_NET_NR_NEIGH,
+ PROC_NET_NR,
+ PROC_NET_SOCKSTAT,
+ PROC_NET_RTCACHE,
+ PROC_NET_AX25_BPQETHER,
+ PROC_NET_ALIAS_TYPES,
+ PROC_NET_ALIASES,
+ PROC_NET_IP_MASQ_APP,
+ PROC_NET_STRIP_STATUS,
+ PROC_NET_STRIP_TRACE,
+ PROC_NET_IPAUTOFW,
+ PROC_NET_RS_NODES,
+ PROC_NET_RS_NEIGH,
+ PROC_NET_RS_ROUTES,
+ PROC_NET_RS,
+ PROC_NET_Z8530,
+ PROC_NET_LAST
+};
+
+enum scsi_directory_inos {
+ PROC_SCSI_SCSI = 256,
+ PROC_SCSI_ADVANSYS,
+ PROC_SCSI_EATA,
+ PROC_SCSI_EATA_PIO,
+ PROC_SCSI_AHA152X,
+ PROC_SCSI_AHA1542,
+ PROC_SCSI_AHA1740,
+ PROC_SCSI_AIC7XXX,
+ PROC_SCSI_BUSLOGIC,
+ PROC_SCSI_U14_34F,
+ PROC_SCSI_FDOMAIN,
+ PROC_SCSI_GENERIC_NCR5380,
+ PROC_SCSI_IN2000,
+ PROC_SCSI_PAS16,
+ PROC_SCSI_QLOGICFAS,
+ PROC_SCSI_QLOGICISP,
+ PROC_SCSI_SEAGATE,
+ PROC_SCSI_T128,
+ PROC_SCSI_DC390T,
+ PROC_SCSI_NCR53C7xx,
+ PROC_SCSI_NCR53C8XX,
+ PROC_SCSI_ULTRASTOR,
+ PROC_SCSI_7000FASST,
+ PROC_SCSI_EATA2X,
+ PROC_SCSI_AM53C974,
+ PROC_SCSI_SSC,
+ PROC_SCSI_NCR53C406A,
+ PROC_SCSI_MEGARAID,
+ PROC_SCSI_PPA,
+ PROC_SCSI_ESP,
+ PROC_SCSI_A3000,
+ PROC_SCSI_A2091,
+ PROC_SCSI_GVP11,
+ PROC_SCSI_ATARI,
+ PROC_SCSI_GDTH,
+ PROC_SCSI_IDESCSI,
+ PROC_SCSI_SCSI_DEBUG,
+ PROC_SCSI_NOT_PRESENT,
+ PROC_SCSI_FILE, /* I'm assuming here that we */
+ PROC_SCSI_LAST = (PROC_SCSI_FILE + 16) /* won't ever see more than */
+}; /* 16 HBAs in one machine */
+
+/* Finally, the dynamically allocatable proc entries are reserved: */
+
+#define PROC_DYNAMIC_FIRST 4096
+#define PROC_NDYNAMIC 4096
+
+#define PROC_SUPER_MAGIC 0x9fa0
+
+/*
+ * This is not completely implemented yet. The idea is to
+ * create a in-memory tree (like the actual /proc filesystem
+ * tree) of these proc_dir_entries, so that we can dynamically
+ * add new files to /proc.
+ *
+ * The "next" pointer creates a linked list of one /proc directory,
+ * while parent/subdir create the directory structure (every
+ * /proc file has a parent, but "subdir" is NULL for all
+ * non-directory entries).
+ *
+ * "get_info" is called at "read", while "fill_inode" is used to
+ * fill in file type/protection/owner information specific to the
+ * particular /proc file.
+ */
+struct proc_dir_entry {
+ unsigned short low_ino;
+ unsigned short namelen;
+ const char *name;
+ mode_t mode;
+ nlink_t nlink;
+ uid_t uid;
+ gid_t gid;
+ unsigned long size;
+ struct inode_operations * ops;
+ int (*get_info)(char *, char **, off_t, int, int);
+ void (*fill_inode)(struct inode *);
+ struct proc_dir_entry *next, *parent, *subdir;
+ void *data;
+};
+
+extern int (* dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+
+extern struct proc_dir_entry proc_root;
+extern struct proc_dir_entry proc_net;
+extern struct proc_dir_entry proc_scsi;
+extern struct proc_dir_entry proc_sys;
+extern struct proc_dir_entry proc_pid;
+extern struct proc_dir_entry proc_pid_fd;
+
+extern struct inode_operations proc_scsi_inode_operations;
+
+extern void proc_root_init(void);
+extern void proc_base_init(void);
+extern void proc_net_init(void);
+
+extern int proc_register(struct proc_dir_entry *, struct proc_dir_entry *);
+extern int proc_register_dynamic(struct proc_dir_entry *,
+ struct proc_dir_entry *);
+extern int proc_unregister(struct proc_dir_entry *, int);
+
+static inline int proc_net_register(struct proc_dir_entry * x)
+{
+ return proc_register(&proc_net, x);
+}
+
+static inline int proc_net_unregister(int x)
+{
+ return proc_unregister(&proc_net, x);
+}
+
+static inline int proc_scsi_register(struct proc_dir_entry *driver,
+ struct proc_dir_entry *x)
+{
+ x->ops = &proc_scsi_inode_operations;
+ if(x->low_ino < PROC_SCSI_FILE){
+ return(proc_register(&proc_scsi, x));
+ }else{
+ return(proc_register(driver, x));
+ }
+}
+
+static inline int proc_scsi_unregister(struct proc_dir_entry *driver, int x)
+{
+ extern void scsi_init_free(char *ptr, unsigned int size);
+
+ if(x <= PROC_SCSI_FILE)
+ return(proc_unregister(&proc_scsi, x));
+ else {
+ struct proc_dir_entry **p = &driver->subdir, *dp;
+ int ret;
+
+ while ((dp = *p) != NULL) {
+ if (dp->low_ino == x)
+ break;
+ p = &dp->next;
+ }
+ ret = proc_unregister(driver, x);
+ scsi_init_free((char *) dp, sizeof(struct proc_dir_entry) + 4);
+ return(ret);
+ }
+}
+
+extern struct super_block *proc_read_super(struct super_block *,void *,int);
+extern int init_proc_fs(void);
+extern struct inode * proc_get_inode(struct super_block *, int, struct proc_dir_entry *);
+extern void proc_statfs(struct super_block *, struct statfs *, int);
+extern void proc_read_inode(struct inode *);
+extern void proc_write_inode(struct inode *);
+extern int proc_match(int, const char *, struct proc_dir_entry *);
+
+/*
+ * These are generic /proc routines that use the internal
+ * "struct proc_dir_entry" tree to traverse the filesystem.
+ *
+ * The /proc root directory has extended versions to take care
+ * of the /proc/<pid> subdirectories.
+ */
+extern int proc_readdir(struct inode *, struct file *, void *, filldir_t);
+extern int proc_lookup(struct inode *, const char *, int, struct inode **);
+
+extern struct inode_operations proc_dir_inode_operations;
+extern struct inode_operations proc_net_inode_operations;
+extern struct inode_operations proc_netdir_inode_operations;
+extern struct inode_operations proc_scsi_inode_operations;
+extern struct inode_operations proc_mem_inode_operations;
+extern struct inode_operations proc_sys_inode_operations;
+extern struct inode_operations proc_array_inode_operations;
+extern struct inode_operations proc_arraylong_inode_operations;
+extern struct inode_operations proc_kcore_inode_operations;
+extern struct inode_operations proc_profile_inode_operations;
+extern struct inode_operations proc_kmsg_inode_operations;
+extern struct inode_operations proc_link_inode_operations;
+extern struct inode_operations proc_fd_inode_operations;
+
+#endif
diff --git a/linux/src/include/linux/ptrace.h b/linux/src/include/linux/ptrace.h
new file mode 100644
index 0000000..0a02879
--- /dev/null
+++ b/linux/src/include/linux/ptrace.h
@@ -0,0 +1,26 @@
+#ifndef _LINUX_PTRACE_H
+#define _LINUX_PTRACE_H
+/* ptrace.h */
+/* structs and defines to help the user use the ptrace system call. */
+
+/* has the defines to get at the registers. */
+
+#define PTRACE_TRACEME 0
+#define PTRACE_PEEKTEXT 1
+#define PTRACE_PEEKDATA 2
+#define PTRACE_PEEKUSR 3
+#define PTRACE_POKETEXT 4
+#define PTRACE_POKEDATA 5
+#define PTRACE_POKEUSR 6
+#define PTRACE_CONT 7
+#define PTRACE_KILL 8
+#define PTRACE_SINGLESTEP 9
+
+#define PTRACE_ATTACH 0x10
+#define PTRACE_DETACH 0x11
+
+#define PTRACE_SYSCALL 24
+
+#include <asm/ptrace.h>
+
+#endif
diff --git a/linux/src/include/linux/quota.h b/linux/src/include/linux/quota.h
new file mode 100644
index 0000000..5a394c5
--- /dev/null
+++ b/linux/src/include/linux/quota.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 1982, 1986 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Robert Elz at The University of Melbourne.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Version: $Id: quota.h,v 1.1 1999/04/26 05:56:57 tb Exp $
+ */
+
+#ifndef _LINUX_QUOTA_
+#define _LINUX_QUOTA_
+
+#include <linux/errno.h>
+
+/*
+ * Convert diskblocks to blocks and the other way around.
+ * currently only to fool the BSD source. :-)
+ */
+#define dbtob(num) (num << 10)
+#define btodb(num) (num >> 10)
+
+/*
+ * Convert count of filesystem blocks to diskquota blocks, meant
+ * for filesystems where i_blksize != BLOCK_SIZE
+ */
+#define fs_to_dq_blocks(num, blksize) (((num) * (blksize)) / BLOCK_SIZE)
+
+/*
+ * Definitions for disk quotas imposed on the average user
+ * (big brother finally hits Linux).
+ *
+ * The following constants define the amount of time given a user
+ * before the soft limits are treated as hard limits (usually resulting
+ * in an allocation failure). The timer is started when the user crosses
+ * their soft limit, it is reset when they go below their soft limit.
+ */
+#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */
+#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */
+
+#define MAXQUOTAS 2
+#define USRQUOTA 0 /* element used for user quotas */
+#define GRPQUOTA 1 /* element used for group quotas */
+
+/*
+ * Definitions for the default names of the quotas files.
+ */
+#define INITQFNAMES { \
+ "user", /* USRQUOTA */ \
+ "group", /* GRPQUOTA */ \
+ "undefined", \
+};
+
+#define QUOTAFILENAME "quota"
+#define QUOTAGROUP "staff"
+
+#define NR_DQHASH 43 /* Just an arbitrary number any suggestions ? */
+#define NR_DQUOTS 256 /* Number of quotas active at one time */
+
+/*
+ * Command definitions for the 'quotactl' system call.
+ * The commands are broken into a main command defined below
+ * and a subcommand that is used to convey the type of
+ * quota that is being manipulated (see above).
+ */
+#define SUBCMDMASK 0x00ff
+#define SUBCMDSHIFT 8
+#define QCMD(cmd, type) (((cmd) << SUBCMDSHIFT) | ((type) & SUBCMDMASK))
+
+#define Q_QUOTAON 0x0100 /* enable quotas */
+#define Q_QUOTAOFF 0x0200 /* disable quotas */
+#define Q_GETQUOTA 0x0300 /* get limits and usage */
+#define Q_SETQUOTA 0x0400 /* set limits and usage */
+#define Q_SETUSE 0x0500 /* set usage */
+#define Q_SYNC 0x0600 /* sync disk copy of a filesystems quotas */
+#define Q_SETQLIM 0x0700 /* set limits */
+#define Q_GETSTATS 0x0800 /* get collected stats */
+
+/*
+ * The following structure defines the format of the disk quota file
+ * (as it appears on disk) - the file is an array of these structures
+ * indexed by user or group number.
+ */
+struct dqblk {
+ __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */
+ __u32 dqb_bsoftlimit; /* preferred limit on disk blks */
+ __u32 dqb_curblocks; /* current block count */
+ __u32 dqb_ihardlimit; /* maximum # allocated inodes */
+ __u32 dqb_isoftlimit; /* preferred inode limit */
+ __u32 dqb_curinodes; /* current # allocated inodes */
+ time_t dqb_btime; /* time limit for excessive disk use */
+ time_t dqb_itime; /* time limit for excessive files */
+};
+
+/*
+ * Shorthand notation.
+ */
+#define dq_bhardlimit dq_dqb.dqb_bhardlimit
+#define dq_bsoftlimit dq_dqb.dqb_bsoftlimit
+#define dq_curblocks dq_dqb.dqb_curblocks
+#define dq_ihardlimit dq_dqb.dqb_ihardlimit
+#define dq_isoftlimit dq_dqb.dqb_isoftlimit
+#define dq_curinodes dq_dqb.dqb_curinodes
+#define dq_btime dq_dqb.dqb_btime
+#define dq_itime dq_dqb.dqb_itime
+
+#define dqoff(UID) ((off_t)((UID) * sizeof (struct dqblk)))
+
+struct dqstats {
+ __u32 lookups;
+ __u32 drops;
+ __u32 reads;
+ __u32 writes;
+ __u32 cache_hits;
+ __u32 pages_allocated;
+ __u32 allocated_dquots;
+ __u32 free_dquots;
+ __u32 syncs;
+};
+
+#ifdef __KERNEL__
+
+#include <linux/mount.h>
+
+/*
+ * Maximum length of a message generated in the quota system,
+ * that needs to be kicked onto the tty.
+ */
+#define MAX_QUOTA_MESSAGE 75
+
+#define DQ_LOCKED 0x01 /* locked for update */
+#define DQ_WANT 0x02 /* wanted for update */
+#define DQ_MOD 0x04 /* dquot modified since read */
+#define DQ_BLKS 0x10 /* uid/gid has been warned about blk limit */
+#define DQ_INODES 0x20 /* uid/gid has been warned about inode limit */
+#define DQ_FAKE 0x40 /* no limits only usage */
+
+struct dquot {
+ unsigned int dq_id; /* id this applies to (uid, gid) */
+ short dq_type; /* type of quota */
+ kdev_t dq_dev; /* Device this applies to */
+ short dq_flags; /* see DQ_* */
+ short dq_count; /* reference count */
+ short dq_locknest; /* lock nesting */
+ struct task_struct *dq_lockproc; /* process holding the lock */
+ struct vfsmount *dq_mnt; /* vfsmountpoint this applies to */
+ struct dqblk dq_dqb; /* diskquota usage */
+ struct wait_queue *dq_wait; /* pointer to waitqueue */
+ struct dquot *dq_prev; /* pointer to prev dquot */
+ struct dquot *dq_next; /* pointer to next dquot */
+ struct dquot *dq_hash_prev; /* pointer to prev dquot */
+ struct dquot *dq_hash_next; /* pointer to next dquot */
+};
+
+#define NODQUOT (struct dquot *)NULL
+
+/*
+ * Flags used for set_dqblk.
+ */
+#define QUOTA_SYSCALL 0x01
+#define SET_QUOTA 0x02
+#define SET_USE 0x04
+#define SET_QLIMIT 0x08
+
+#define QUOTA_OK 0
+#define NO_QUOTA 1
+
+/*
+ * declaration of quota_function calls in kernel.
+ */
+
+extern void dquot_initialize(struct inode *inode, short type);
+extern void dquot_drop(struct inode *inode);
+extern int dquot_alloc_block(const struct inode *inode, unsigned long number);
+extern int dquot_alloc_inode(const struct inode *inode, unsigned long number);
+extern void dquot_free_block(const struct inode *inode, unsigned long number);
+extern void dquot_free_inode(const struct inode *inode, unsigned long number);
+extern int dquot_transfer(struct inode *inode, struct iattr *iattr, char direction);
+
+extern void invalidate_dquots(kdev_t dev, short type);
+extern int quota_off(kdev_t dev, short type);
+extern int sync_dquots(kdev_t dev, short type);
+
+#else
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+int quotactl __P ((int, const char *, int, caddr_t));
+__END_DECLS
+
+#endif /* __KERNEL__ */
+#endif /* _QUOTA_ */
diff --git a/linux/src/include/linux/random.h b/linux/src/include/linux/random.h
new file mode 100644
index 0000000..b47367a
--- /dev/null
+++ b/linux/src/include/linux/random.h
@@ -0,0 +1,70 @@
+/*
+ * include/linux/random.h
+ *
+ * Include file for the random number generator.
+ */
+
+#ifndef _LINUX_RANDOM_H
+#define _LINUX_RANDOM_H
+
+#include <linux/ioctl.h>
+
+/* ioctl()'s for the random number generator */
+
+/* Get the entropy count. */
+#define RNDGETENTCNT _IOR( 'R', 0x00, int )
+
+/* Add to (or subtract from) the entropy count. (Superuser only.) */
+#define RNDADDTOENTCNT _IOW( 'R', 0x01, int )
+
+/* Get the contents of the entropy pool. (Superuser only.) */
+#define RNDGETPOOL _IOR( 'R', 0x02, int [2] )
+
+/*
+ * Write bytes into the entropy pool and add to the entropy count.
+ * (Superuser only.)
+ */
+#define RNDADDENTROPY _IOW( 'R', 0x03, int [2] )
+
+/* Clear entropy count to 0. (Superuser only.) */
+#define RNDZAPENTCNT _IO( 'R', 0x04 )
+
+/* Clear the entropy pool and associated counters. (Superuser only.) */
+#define RNDCLEARPOOL _IO( 'R', 0x06 )
+
+struct rand_pool_info {
+ int entropy_count;
+ int buf_size;
+ __u32 buf[0];
+};
+
+/* Exported functions */
+
+#ifdef __KERNEL__
+
+extern void rand_initialize(void);
+extern void rand_initialize_irq(int irq);
+extern void rand_initialize_blkdev(int irq, int mode);
+
+extern void add_keyboard_randomness(unsigned char scancode);
+extern void add_mouse_randomness(__u32 mouse_data);
+extern void add_interrupt_randomness(int irq);
+extern void add_blkdev_randomness(int major);
+
+extern void get_random_bytes(void *buf, int nbytes);
+
+extern __u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr,
+ __u16 sport, __u16 dport);
+__u32 secure_tcp_probe_number(__u32 saddr, __u32 daddr,
+ __u16 sport, __u16 dport, __u32 sseq, int validate);
+
+__u32 secure_tcp_syn_cookie(__u32 saddr, __u32 daddr,
+ __u16 sport, __u16 dport, __u32 sseq, __u32 count);
+
+#ifndef MODULE
+extern struct file_operations random_fops, urandom_fops;
+#endif
+
+#endif /* __KERNEL___ */
+
+#endif /* _LINUX_RANDOM_H */
diff --git a/linux/src/include/linux/resource.h b/linux/src/include/linux/resource.h
new file mode 100644
index 0000000..f3bffbd
--- /dev/null
+++ b/linux/src/include/linux/resource.h
@@ -0,0 +1,60 @@
+#ifndef _LINUX_RESOURCE_H
+#define _LINUX_RESOURCE_H
+
+#include <linux/time.h>
+
+/*
+ * Resource control/accounting header file for linux
+ */
+
+/*
+ * Definition of struct rusage taken from BSD 4.3 Reno
+ *
+ * We don't support all of these yet, but we might as well have them....
+ * Otherwise, each time we add new items, programs which depend on this
+ * structure will lose. This reduces the chances of that happening.
+ */
+#define RUSAGE_SELF 0
+#define RUSAGE_CHILDREN (-1)
+#define RUSAGE_BOTH (-2) /* sys_wait4() uses this */
+
+struct rusage {
+ struct timeval ru_utime; /* user time used */
+ struct timeval ru_stime; /* system time used */
+ long ru_maxrss; /* maximum resident set size */
+ long ru_ixrss; /* integral shared memory size */
+ long ru_idrss; /* integral unshared data size */
+ long ru_isrss; /* integral unshared stack size */
+ long ru_minflt; /* page reclaims */
+ long ru_majflt; /* page faults */
+ long ru_nswap; /* swaps */
+ long ru_inblock; /* block input operations */
+ long ru_oublock; /* block output operations */
+ long ru_msgsnd; /* messages sent */
+ long ru_msgrcv; /* messages received */
+ long ru_nsignals; /* signals received */
+ long ru_nvcsw; /* voluntary context switches */
+ long ru_nivcsw; /* involuntary " */
+};
+
+#define RLIM_INFINITY ((long)(~0UL>>1))
+
+struct rlimit {
+ long rlim_cur;
+ long rlim_max;
+};
+
+#define PRIO_MIN (-20)
+#define PRIO_MAX 20
+
+#define PRIO_PROCESS 0
+#define PRIO_PGRP 1
+#define PRIO_USER 2
+
+/*
+ * Due to binary compatibility, the actual resource numbers
+ * may be different for different linux versions..
+ */
+#include <asm/resource.h>
+
+#endif
diff --git a/linux/src/include/linux/rose.h b/linux/src/include/linux/rose.h
new file mode 100644
index 0000000..9fb1efc
--- /dev/null
+++ b/linux/src/include/linux/rose.h
@@ -0,0 +1,88 @@
+/*
+ * These are the public elements of the Linux kernel Rose implementation.
+ * For kernel AX.25 see the file ax25.h. This file requires ax25.h for the
+ * definition of the ax25_address structure.
+ */
+
+#ifndef ROSE_KERNEL_H
+#define ROSE_KERNEL_H
+
+#define ROSE_MTU 251
+
+#define ROSE_MAX_DIGIS 6
+
+#define ROSE_DEFER 1
+#define ROSE_T1 2
+#define ROSE_T2 3
+#define ROSE_T3 4
+#define ROSE_IDLE 5
+#define ROSE_QBITINCL 6
+#define ROSE_HOLDBACK 7
+
+#define SIOCRSGCAUSE (SIOCPROTOPRIVATE+0)
+#define SIOCRSSCAUSE (SIOCPROTOPRIVATE+1)
+#define SIOCRSL2CALL (SIOCPROTOPRIVATE+2)
+#define SIOCRSSL2CALL (SIOCPROTOPRIVATE+2)
+#define SIOCRSACCEPT (SIOCPROTOPRIVATE+3)
+#define SIOCRSCLRRT (SIOCPROTOPRIVATE+4)
+#define SIOCRSGL2CALL (SIOCPROTOPRIVATE+5)
+#define SIOCRSGFACILITIES (SIOCPROTOPRIVATE+6)
+
+#define ROSE_DTE_ORIGINATED 0x00
+#define ROSE_NUMBER_BUSY 0x01
+#define ROSE_INVALID_FACILITY 0x03
+#define ROSE_NETWORK_CONGESTION 0x05
+#define ROSE_OUT_OF_ORDER 0x09
+#define ROSE_ACCESS_BARRED 0x0B
+#define ROSE_NOT_OBTAINABLE 0x0D
+#define ROSE_REMOTE_PROCEDURE 0x11
+#define ROSE_LOCAL_PROCEDURE 0x13
+#define ROSE_SHIP_ABSENT 0x39
+
+typedef struct {
+ char rose_addr[5];
+} rose_address;
+
+struct sockaddr_rose {
+ unsigned short srose_family;
+ rose_address srose_addr;
+ ax25_address srose_call;
+ unsigned int srose_ndigis;
+ ax25_address srose_digi;
+};
+
+struct full_sockaddr_rose {
+ unsigned short srose_family;
+ rose_address srose_addr;
+ ax25_address srose_call;
+ unsigned int srose_ndigis;
+ ax25_address srose_digis[ROSE_MAX_DIGIS];
+};
+
+struct rose_route_struct {
+ rose_address address;
+ unsigned short mask;
+ ax25_address neighbour;
+ char device[16];
+ unsigned char ndigis;
+ ax25_address digipeaters[AX25_MAX_DIGIS];
+};
+
+struct rose_cause_struct {
+ unsigned char cause;
+ unsigned char diagnostic;
+};
+
+struct rose_facilities_struct {
+ rose_address source_addr, dest_addr;
+ ax25_address source_call, dest_call;
+ unsigned char source_ndigis, dest_ndigis;
+ ax25_address source_digis[ROSE_MAX_DIGIS];
+ ax25_address dest_digis[ROSE_MAX_DIGIS];
+ unsigned int rand;
+ rose_address fail_addr;
+ ax25_address fail_call;
+};
+
+
+#endif
diff --git a/linux/src/include/linux/route.h b/linux/src/include/linux/route.h
new file mode 100644
index 0000000..8f210b6
--- /dev/null
+++ b/linux/src/include/linux/route.h
@@ -0,0 +1,79 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the IP router interface.
+ *
+ * Version: @(#)route.h 1.0.3 05/27/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988
+ * for the purposes of compatibility only.
+ *
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_ROUTE_H
+#define _LINUX_ROUTE_H
+
+#include <linux/if.h>
+
+
+/* This structure gets passed by the SIOCADDRT and SIOCDELRT calls. */
+struct rtentry
+{
+ unsigned long rt_hash; /* hash key for lookups */
+ struct sockaddr rt_dst; /* target address */
+ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
+ struct sockaddr rt_genmask; /* target network mask (IP) */
+ short rt_flags;
+ short rt_refcnt;
+ unsigned long rt_use;
+ struct ifnet *rt_ifp;
+ short rt_metric; /* +1 for binary compatibility! */
+ char *rt_dev; /* forcing the device at add */
+ unsigned long rt_mss; /* per route MTU/Window */
+ unsigned long rt_window; /* Window clamping */
+ unsigned short rt_irtt; /* Initial RTT */
+};
+
+
+#define RTF_UP 0x0001 /* route usable */
+#define RTF_GATEWAY 0x0002 /* destination is a gateway */
+#define RTF_HOST 0x0004 /* host entry (net otherwise) */
+#define RTF_REINSTATE 0x0008 /* reinstate route after tmout */
+#define RTF_DYNAMIC 0x0010 /* created dyn. (by redirect) */
+#define RTF_MODIFIED 0x0020 /* modified dyn. (by redirect) */
+#define RTF_MSS 0x0040 /* specific MSS for this route */
+#define RTF_WINDOW 0x0080 /* per route window clamping */
+#define RTF_IRTT 0x0100 /* Initial round trip time */
+#define RTF_REJECT 0x0200 /* Reject route */
+#define RTF_NOTCACHED 0x0400 /* this route isn't cached */
+
+/*
+ * This structure is passed from the kernel to user space by netlink
+ * routing/device announcements
+ */
+
+struct netlink_rtinfo
+{
+ unsigned long rtmsg_type;
+ struct sockaddr rtmsg_dst;
+ struct sockaddr rtmsg_gateway;
+ struct sockaddr rtmsg_genmask;
+ short rtmsg_flags;
+ short rtmsg_metric;
+ char rtmsg_device[16];
+};
+
+#define RTMSG_NEWROUTE 0x01
+#define RTMSG_DELROUTE 0x02
+#define RTMSG_NEWDEVICE 0x11
+#define RTMSG_DELDEVICE 0x12
+
+#endif /* _LINUX_ROUTE_H */
+
diff --git a/linux/src/include/linux/sched.h b/linux/src/include/linux/sched.h
new file mode 100644
index 0000000..523d4c4
--- /dev/null
+++ b/linux/src/include/linux/sched.h
@@ -0,0 +1,496 @@
+#ifndef _LINUX_SCHED_H
+#define _LINUX_SCHED_H
+
+/*
+ * define DEBUG if you want the wait-queues to have some extra
+ * debugging code. It's not normally used, but might catch some
+ * wait-queue coding errors.
+ *
+ * #define DEBUG
+ */
+
+#include <asm/param.h> /* for HZ */
+
+extern unsigned long event;
+
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/tasks.h>
+#include <linux/kernel.h>
+
+#include <asm/system.h>
+#include <asm/semaphore.h>
+#include <asm/page.h>
+
+#include <linux/smp.h>
+#include <linux/tty.h>
+#include <linux/sem.h>
+
+/*
+ * cloning flags:
+ */
+#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
+#define CLONE_VM 0x00000100 /* set if VM shared between processes */
+#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
+#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
+#define CLONE_SIGHAND 0x00000800 /* set if signal handlers shared */
+#define CLONE_PID 0x00001000 /* set if pid shared */
+
+/*
+ * These are the constant used to fake the fixed-point load-average
+ * counting. Some notes:
+ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
+ * a load-average precision of 10 bits integer + 11 bits fractional
+ * - if you want to count load-averages more often, you need more
+ * precision, or rounding will get you. With 2-second counting freq,
+ * the EXP_n values would be 1981, 2034 and 2043 if still using only
+ * 11 bit fractions.
+ */
+extern unsigned long avenrun[]; /* Load averages */
+
+#define FSHIFT 11 /* nr of bits of precision */
+#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
+#define LOAD_FREQ (5*HZ) /* 5 sec intervals */
+#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
+#define EXP_5 2014 /* 1/exp(5sec/5min) */
+#define EXP_15 2037 /* 1/exp(5sec/15min) */
+
+#define CALC_LOAD(load,exp,n) \
+ load *= exp; \
+ load += n*(FIXED_1-exp); \
+ load >>= FSHIFT;
+
+#define CT_TO_SECS(x) ((x) / HZ)
+#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
+
+extern int nr_running, nr_tasks;
+extern int last_pid;
+
+#define FIRST_TASK task[0]
+#define LAST_TASK task[NR_TASKS-1]
+
+#include <linux/head.h>
+#include <linux/fs.h>
+#include <linux/signal.h>
+#include <linux/time.h>
+#include <linux/param.h>
+#include <linux/resource.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+
+#include <asm/processor.h>
+
+#define TASK_RUNNING 0
+#define TASK_INTERRUPTIBLE 1
+#define TASK_UNINTERRUPTIBLE 2
+#define TASK_ZOMBIE 3
+#define TASK_STOPPED 4
+#define TASK_SWAPPING 5
+
+/*
+ * Scheduling policies
+ */
+#define SCHED_OTHER 0
+#define SCHED_FIFO 1
+#define SCHED_RR 2
+
+struct sched_param {
+ int sched_priority;
+};
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#ifdef __KERNEL__
+
+extern void sched_init(void);
+extern void show_state(void);
+extern void trap_init(void);
+
+asmlinkage void schedule(void);
+
+/* Open file table structure */
+struct files_struct {
+ int count;
+ fd_set close_on_exec;
+ fd_set open_fds;
+ struct file * fd[NR_OPEN];
+};
+
+#define INIT_FILES { \
+ 1, \
+ { { 0, } }, \
+ { { 0, } }, \
+ { NULL, } \
+}
+
+struct fs_struct {
+ int count;
+ unsigned short umask;
+ struct inode * root, * pwd;
+};
+
+#define INIT_FS { \
+ 1, \
+ 0022, \
+ NULL, NULL \
+}
+
+struct mm_struct {
+ int count;
+ pgd_t * pgd;
+ unsigned long context;
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack, start_mmap;
+ unsigned long arg_start, arg_end, env_start, env_end;
+ unsigned long rss, total_vm, locked_vm;
+ unsigned long def_flags;
+ struct vm_area_struct * mmap;
+ struct vm_area_struct * mmap_avl;
+ struct semaphore mmap_sem;
+};
+
+#define INIT_MM { \
+ 1, \
+ swapper_pg_dir, \
+ 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, \
+ 0, \
+ &init_mmap, &init_mmap, MUTEX }
+
+struct signal_struct {
+ int count;
+ struct sigaction action[32];
+};
+
+#define INIT_SIGNALS { \
+ 1, \
+ { {0,}, } }
+
+struct task_struct {
+/* these are hardcoded - don't touch */
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ long counter;
+ long priority;
+ unsigned long signal;
+ unsigned long blocked; /* bitmap of masked signals */
+ unsigned long flags; /* per process flags, defined below */
+ int errno;
+ long debugreg[8]; /* Hardware debugging registers */
+ struct exec_domain *exec_domain;
+/* various fields */
+ struct linux_binfmt *binfmt;
+ struct task_struct *next_task, *prev_task;
+ struct task_struct *next_run, *prev_run;
+ unsigned long saved_kernel_stack;
+ unsigned long kernel_stack_page;
+ int exit_code, exit_signal;
+ /* ??? */
+ unsigned long personality;
+ int dumpable:1;
+ int did_exec:1;
+ /* shouldn't this be pid_t? */
+ int pid;
+ int pgrp;
+ int tty_old_pgrp;
+ int session;
+ /* boolean value for session group leader */
+ int leader;
+ int groups[NGROUPS];
+ /*
+ * pointers to (original) parent process, youngest child, younger sibling,
+ * older sibling, respectively. (p->father can be replaced with
+ * p->p_pptr->pid)
+ */
+ struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
+ struct wait_queue *wait_chldexit; /* for wait4() */
+ unsigned short uid,euid,suid,fsuid;
+ unsigned short gid,egid,sgid,fsgid;
+ unsigned long timeout, policy, rt_priority;
+ unsigned long it_real_value, it_prof_value, it_virt_value;
+ unsigned long it_real_incr, it_prof_incr, it_virt_incr;
+ struct timer_list real_timer;
+ long utime, stime, cutime, cstime, start_time;
+/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
+ unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
+ int swappable:1;
+ unsigned long swap_address;
+ unsigned long old_maj_flt; /* old value of maj_flt */
+ unsigned long dec_flt; /* page fault count of the last time */
+ unsigned long swap_cnt; /* number of pages to swap on next pass */
+/* limits */
+ struct rlimit rlim[RLIM_NLIMITS];
+ unsigned short used_math;
+ char comm[16];
+/* file system info */
+ int link_count;
+ struct tty_struct *tty; /* NULL if no tty */
+/* ipc stuff */
+ struct sem_undo *semundo;
+ struct sem_queue *semsleeping;
+/* ldt for this task - used by Wine. If NULL, default_ldt is used */
+ struct desc_struct *ldt;
+/* tss for this task */
+ struct thread_struct tss;
+/* filesystem information */
+ struct fs_struct *fs;
+/* open file information */
+ struct files_struct *files;
+/* memory management info */
+ struct mm_struct *mm;
+/* signal handlers */
+ struct signal_struct *sig;
+#ifdef __SMP__
+ int processor;
+ int last_processor;
+ int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
+#endif
+};
+
+/*
+ * Per process flags
+ */
+#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
+#define PF_PTRACED 0x00000010 /* set if ptrace (0) has been called. */
+#define PF_TRACESYS 0x00000020 /* tracing system calls */
+#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
+#define PF_DUMPCORE 0x00000200 /* dumped core */
+#define PF_SIGNALED 0x00000400 /* killed by a signal */
+
+#define PF_STARTING 0x00000002 /* being created */
+#define PF_EXITING 0x00000004 /* getting shut down */
+
+#define PF_USEDFPU 0x00100000 /* Process used the FPU this quantum (SMP only) */
+#define PF_DTRACE 0x00200000 /* delayed trace (used on m68k) */
+
+/*
+ * Limit the stack by to some sane default: root can always
+ * increase this limit if needed.. 8MB seems reasonable.
+ */
+#define _STK_LIM (8*1024*1024)
+
+#define DEF_PRIORITY (20*HZ/100) /* 200 ms time slices */
+
+/*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
+#define INIT_TASK \
+/* state etc */ { 0,DEF_PRIORITY,DEF_PRIORITY,0,0,0,0, \
+/* debugregs */ { 0, }, \
+/* exec domain */&default_exec_domain, \
+/* binfmt */ NULL, \
+/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
+/* stack */ 0,(unsigned long) &init_kernel_stack, \
+/* ec,brk... */ 0,0,0,0,0, \
+/* pid etc.. */ 0,0,0,0,0, \
+/* suppl grps*/ {NOGROUP,}, \
+/* proc links*/ &init_task,&init_task,NULL,NULL,NULL,NULL, \
+/* uid etc */ 0,0,0,0,0,0,0,0, \
+/* timeout */ 0,SCHED_OTHER,0,0,0,0,0,0,0, \
+/* timer */ { NULL, NULL, 0, 0, it_real_fn }, \
+/* utime */ 0,0,0,0,0, \
+/* flt */ 0,0,0,0,0,0, \
+/* swp */ 0,0,0,0,0, \
+/* rlimits */ INIT_RLIMITS, \
+/* math */ 0, \
+/* comm */ "swapper", \
+/* fs info */ 0,NULL, \
+/* ipc */ NULL, NULL, \
+/* ldt */ NULL, \
+/* tss */ INIT_TSS, \
+/* fs */ &init_fs, \
+/* files */ &init_files, \
+/* mm */ &init_mm, \
+/* signals */ &init_signals, \
+}
+
+extern struct mm_struct init_mm;
+extern struct task_struct init_task;
+extern struct task_struct *task[NR_TASKS];
+extern struct task_struct *last_task_used_math;
+extern struct task_struct *current_set[NR_CPUS];
+/*
+ * On a single processor system this comes out as current_set[0] when cpp
+ * has finished with it, which gcc will optimise away.
+ */
+#define current (0+current_set[smp_processor_id()]) /* Current on this processor */
+extern unsigned long volatile jiffies;
+extern unsigned long itimer_ticks;
+extern unsigned long itimer_next;
+extern struct timeval xtime;
+extern int need_resched;
+extern void do_timer(struct pt_regs *);
+
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+
+extern int securelevel; /* system security level */
+
+#define CURRENT_TIME (xtime.tv_sec)
+
+extern void sleep_on(struct wait_queue ** p);
+extern void interruptible_sleep_on(struct wait_queue ** p);
+extern void wake_up(struct wait_queue ** p);
+extern void wake_up_interruptible(struct wait_queue ** p);
+extern void wake_up_process(struct task_struct * tsk);
+
+extern void notify_parent(struct task_struct * tsk, int signal);
+extern void force_sig(unsigned long sig,struct task_struct * p);
+extern int send_sig(unsigned long sig,struct task_struct * p,int priv);
+extern int in_group_p(gid_t grp);
+
+extern int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags,
+ const char *device,
+ void *dev_id);
+extern void free_irq(unsigned int irq, void *dev_id);
+
+/*
+ * This has now become a routine instead of a macro, it sets a flag if
+ * it returns true (to do BSD-style accounting where the process is flagged
+ * if it uses root privs). The implication of this is that you should do
+ * normal permissions checks first, and check suser() last.
+ */
+extern inline int suser(void)
+{
+ if (current->euid == 0) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+}
+
+extern void copy_thread(int, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
+extern void flush_thread(void);
+extern void exit_thread(void);
+
+extern void exit_mm(struct task_struct *);
+extern void exit_fs(struct task_struct *);
+extern void exit_files(struct task_struct *);
+extern void exit_sighand(struct task_struct *);
+extern void release_thread(struct task_struct *);
+
+extern int do_execve(char *, char **, char **, struct pt_regs *);
+extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
+
+
+/* See if we have a valid user level fd.
+ * If it makes sense, return the file structure it references.
+ * Otherwise return NULL.
+ */
+extern inline struct file *file_from_fd(const unsigned int fd)
+{
+
+ if (fd >= NR_OPEN)
+ return NULL;
+ /* either valid or null */
+ return current->files->fd[fd];
+}
+
+/*
+ * The wait-queues are circular lists, and you have to be *very* sure
+ * to keep them correct. Use only these two functions to add/remove
+ * entries in the queues.
+ */
+extern inline void __add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ struct wait_queue *head = *p;
+ struct wait_queue *next = WAIT_QUEUE_HEAD(p);
+
+ if (head)
+ next = head;
+ *p = wait;
+ wait->next = next;
+}
+
+extern inline void add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __add_wait_queue(p, wait);
+ restore_flags(flags);
+}
+
+extern inline void __remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ struct wait_queue * next = wait->next;
+ struct wait_queue * head = next;
+
+ for (;;) {
+ struct wait_queue * nextlist = head->next;
+ if (nextlist == wait)
+ break;
+ head = nextlist;
+ }
+ head->next = next;
+}
+
+extern inline void remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __remove_wait_queue(p, wait);
+ restore_flags(flags);
+}
+
+extern inline void select_wait(struct wait_queue ** wait_address, select_table * p)
+{
+ struct select_table_entry * entry;
+
+ if (!p || !wait_address)
+ return;
+ if (p->nr >= __MAX_SELECT_TABLE_ENTRIES)
+ return;
+ entry = p->entry + p->nr;
+ entry->wait_address = wait_address;
+ entry->wait.task = current;
+ entry->wait.next = NULL;
+ add_wait_queue(wait_address,&entry->wait);
+ p->nr++;
+}
+
+#define REMOVE_LINKS(p) do { unsigned long flags; \
+ save_flags(flags) ; cli(); \
+ (p)->next_task->prev_task = (p)->prev_task; \
+ (p)->prev_task->next_task = (p)->next_task; \
+ restore_flags(flags); \
+ if ((p)->p_osptr) \
+ (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
+ if ((p)->p_ysptr) \
+ (p)->p_ysptr->p_osptr = (p)->p_osptr; \
+ else \
+ (p)->p_pptr->p_cptr = (p)->p_osptr; \
+ } while (0)
+
+#define SET_LINKS(p) do { unsigned long flags; \
+ save_flags(flags); cli(); \
+ (p)->next_task = &init_task; \
+ (p)->prev_task = init_task.prev_task; \
+ init_task.prev_task->next_task = (p); \
+ init_task.prev_task = (p); \
+ restore_flags(flags); \
+ (p)->p_ysptr = NULL; \
+ if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
+ (p)->p_osptr->p_ysptr = p; \
+ (p)->p_pptr->p_cptr = p; \
+ } while (0)
+
+#define for_each_task(p) \
+ for (p = &init_task ; (p = p->next_task) != &init_task ; )
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/sem.h b/linux/src/include/linux/sem.h
new file mode 100644
index 0000000..0eb1d02
--- /dev/null
+++ b/linux/src/include/linux/sem.h
@@ -0,0 +1,112 @@
+#ifndef _LINUX_SEM_H
+#define _LINUX_SEM_H
+#include <linux/ipc.h>
+
+/* semop flags */
+#define SEM_UNDO 0x1000 /* undo the operation on exit */
+
+/* semctl Command Definitions. */
+#define GETPID 11 /* get sempid */
+#define GETVAL 12 /* get semval */
+#define GETALL 13 /* get all semval's */
+#define GETNCNT 14 /* get semncnt */
+#define GETZCNT 15 /* get semzcnt */
+#define SETVAL 16 /* set semval */
+#define SETALL 17 /* set all semval's */
+
+/* One semid data structure for each set of semaphores in the system. */
+struct semid_ds {
+ struct ipc_perm sem_perm; /* permissions .. see ipc.h */
+ time_t sem_otime; /* last semop time */
+ time_t sem_ctime; /* last change time */
+ struct sem *sem_base; /* ptr to first semaphore in array */
+ struct sem_queue *sem_pending; /* pending operations to be processed */
+ struct sem_queue **sem_pending_last; /* last pending operation */
+ struct sem_undo *undo; /* undo requests on this array */
+ ushort sem_nsems; /* no. of semaphores in array */
+};
+
+/* semop system calls takes an array of these. */
+struct sembuf {
+ ushort sem_num; /* semaphore index in array */
+ short sem_op; /* semaphore operation */
+ short sem_flg; /* operation flags */
+};
+
+/* arg for semctl system calls. */
+union semun {
+ int val; /* value for SETVAL */
+ struct semid_ds *buf; /* buffer for IPC_STAT & IPC_SET */
+ ushort *array; /* array for GETALL & SETALL */
+ struct seminfo *__buf; /* buffer for IPC_INFO */
+ void *__pad;
+};
+
+struct seminfo {
+ int semmap;
+ int semmni;
+ int semmns;
+ int semmnu;
+ int semmsl;
+ int semopm;
+ int semume;
+ int semusz;
+ int semvmx;
+ int semaem;
+};
+
+#define SEMMNI 128 /* ? max # of semaphore identifiers */
+#define SEMMSL 32 /* <= 512 max num of semaphores per id */
+#define SEMMNS (SEMMNI*SEMMSL) /* ? max # of semaphores in system */
+#define SEMOPM 32 /* ~ 100 max num of ops per semop call */
+#define SEMVMX 32767 /* semaphore maximum value */
+
+/* unused */
+#define SEMUME SEMOPM /* max num of undo entries per process */
+#define SEMMNU SEMMNS /* num of undo structures system wide */
+#define SEMAEM (SEMVMX >> 1) /* adjust on exit max value */
+#define SEMMAP SEMMNS /* # of entries in semaphore map */
+#define SEMUSZ 20 /* sizeof struct sem_undo */
+
+#ifdef __KERNEL__
+
+/* One semaphore structure for each semaphore in the system. */
+struct sem {
+ short semval; /* current value */
+ short sempid; /* pid of last operation */
+};
+
+/* ipcs ctl cmds */
+#define SEM_STAT 18
+#define SEM_INFO 19
+
+/* One queue for each semaphore set in the system. */
+struct sem_queue {
+ struct sem_queue * next; /* next entry in the queue */
+ struct sem_queue ** prev; /* previous entry in the queue, *(q->prev) == q */
+ struct wait_queue * sleeper; /* sleeping process */
+ struct sem_undo * undo; /* undo structure */
+ int pid; /* process id of requesting process */
+ int status; /* completion status of operation */
+ struct semid_ds * sma; /* semaphore array for operations */
+ struct sembuf * sops; /* array of pending operations */
+ int nsops; /* number of operations */
+};
+
+/* Each task has a list of undo requests. They are executed automatically
+ * when the process exits.
+ */
+struct sem_undo {
+ struct sem_undo * proc_next; /* next entry on this process */
+ struct sem_undo * id_next; /* next entry on this semaphore set */
+ int semid; /* semaphore set identifier */
+ short * semadj; /* array of adjustments, one per semaphore */
+};
+
+asmlinkage int sys_semget (key_t key, int nsems, int semflg);
+asmlinkage int sys_semop (int semid, struct sembuf *sops, unsigned nsops);
+asmlinkage int sys_semctl (int semid, int semnum, int cmd, union semun arg);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SEM_H */
diff --git a/linux/src/include/linux/signal.h b/linux/src/include/linux/signal.h
new file mode 100644
index 0000000..9d1afa9
--- /dev/null
+++ b/linux/src/include/linux/signal.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SIGNAL_H
+#define _LINUX_SIGNAL_H
+
+#include <asm/signal.h>
+
+#endif
diff --git a/linux/src/include/linux/skbuff.h b/linux/src/include/linux/skbuff.h
new file mode 100644
index 0000000..e4c77b4
--- /dev/null
+++ b/linux/src/include/linux/skbuff.h
@@ -0,0 +1,467 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/config.h>
+#include <linux/time.h>
+
+#include <asm/atomic.h>
+#include <asm/types.h>
+
+#define CONFIG_SKB_CHECK 0
+
+#define HAVE_ALLOC_SKB /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
+
+
+#define FREE_READ 1
+#define FREE_WRITE 0
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+struct sk_buff_head
+{
+ struct sk_buff * next;
+ struct sk_buff * prev;
+ __u32 qlen; /* Must be same length as a pointer
+ for using debugging */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+};
+
+
+struct sk_buff
+{
+ struct sk_buff * next; /* Next buffer in list */
+ struct sk_buff * prev; /* Previous buffer in list */
+ struct sk_buff_head * list; /* List we are on */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+ struct sk_buff *link3; /* Link for IP protocol level buffer chains */
+ struct sock *sk; /* Socket we are owned by */
+ unsigned long when; /* used to compute rtt's */
+ struct timeval stamp; /* Time we arrived */
+ struct device *dev; /* Device we arrived on/are leaving by */
+ union
+ {
+ struct tcphdr *th;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ unsigned char *raw;
+ /* for passing file handles in a unix domain socket */
+ void *filp;
+ } h;
+
+ union
+ {
+ /* As yet incomplete physical layer views */
+ unsigned char *raw;
+ struct ethhdr *ethernet;
+ } mac;
+
+ struct iphdr *ip_hdr; /* For IPPROTO_RAW */
+ unsigned long len; /* Length of actual data */
+ unsigned long csum; /* Checksum */
+ __u32 saddr; /* IP source address */
+ __u32 daddr; /* IP target address */
+ __u32 raddr; /* IP next hop address */
+ __u32 seq; /* TCP sequence number */
+ __u32 end_seq; /* seq [+ fin] [+ syn] + datalen */
+ __u32 ack_seq; /* TCP ack sequence number */
+ unsigned char proto_priv[16]; /* Protocol private data */
+ volatile char acked, /* Are we acked ? */
+ used, /* Are we in use ? */
+ free, /* How to free this buffer */
+ arp; /* Has IP/ARP resolution finished */
+ unsigned char tries, /* Times tried */
+ lock, /* Are we locked ? */
+ localroute, /* Local routing asserted for this frame */
+ pkt_type, /* Packet class */
+ pkt_bridged, /* Tracker for bridging */
+ ip_summed; /* Driver fed us an IP checksum */
+#define PACKET_HOST 0 /* To us */
+#define PACKET_BROADCAST 1 /* To all */
+#define PACKET_MULTICAST 2 /* To group */
+#define PACKET_OTHERHOST 3 /* To someone else */
+ unsigned short users; /* User count - see datagram.c,tcp.c */
+ unsigned short protocol; /* Packet protocol from driver. */
+ unsigned int truesize; /* Buffer size */
+
+ atomic_t count; /* reference count */
+ struct sk_buff *data_skb; /* Link to the actual data skb */
+ unsigned char *head; /* Head of buffer */
+ unsigned char *data; /* Data head pointer */
+ unsigned char *tail; /* Tail pointer */
+ unsigned char *end; /* End pointer */
+ void (*destructor)(struct sk_buff *); /* Destruct function */
+ __u16 redirport; /* Redirect port */
+
+ /*
+ * Keep this at the end then we wont break stuff.
+ */
+#if defined(CONFIG_SHAPER) || defined(CONFIG_SHAPER_MODULE)
+ __u32 shapelatency; /* Latency on frame */
+ __u32 shapeclock; /* Time it should go out */
+ __u32 shapelen; /* Frame length in clocks */
+ __u32 shapestamp; /* Stamp for shaper */
+ __u16 shapepend; /* Pending */
+#endif
+};
+
+#ifdef CONFIG_SKB_LARGE
+#define SK_WMEM_MAX 65535
+#define SK_RMEM_MAX 65535
+#else
+#define SK_WMEM_MAX 32767
+#define SK_RMEM_MAX 32767
+#endif
+
+#if CONFIG_SKB_CHECK
+#define SK_FREED_SKB 0x0DE2C0DE
+#define SK_GOOD_SKB 0xDEC0DED1
+#define SK_HEAD_SKB 0x12231298
+#endif
+
+#ifdef __KERNEL__
+/*
+ * Handling routines are only of interest to the kernel
+ */
+#include <linux/malloc.h>
+
+#include <asm/system.h>
+
+#if 0
+extern void print_skb(struct sk_buff *);
+#endif
+extern void kfree_skb(struct sk_buff *skb, int rw);
+extern void skb_queue_head_init(struct sk_buff_head *list);
+extern void skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
+extern void skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
+extern struct sk_buff * skb_dequeue(struct sk_buff_head *list);
+extern void skb_insert(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_append(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_unlink(struct sk_buff *buf);
+extern __u32 skb_queue_len(struct sk_buff_head *list);
+extern struct sk_buff * skb_peek_copy(struct sk_buff_head *list);
+extern struct sk_buff * alloc_skb(unsigned int size, int priority);
+extern struct sk_buff * dev_alloc_skb(unsigned int size);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority);
+extern void skb_device_lock(struct sk_buff *skb);
+extern void skb_device_unlock(struct sk_buff *skb);
+extern void dev_kfree_skb(struct sk_buff *skb, int mode);
+extern int skb_device_locked(struct sk_buff *skb);
+extern unsigned char * skb_put(struct sk_buff *skb, int len);
+extern unsigned char * skb_push(struct sk_buff *skb, int len);
+extern unsigned char * skb_pull(struct sk_buff *skb, int len);
+extern int skb_headroom(struct sk_buff *skb);
+extern int skb_tailroom(struct sk_buff *skb);
+extern void skb_reserve(struct sk_buff *skb, int len);
+extern void skb_trim(struct sk_buff *skb, int len);
+
+extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
+{
+ return (list->next == (struct sk_buff *) list);
+}
+
+/*
+ * Peek an sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. For an interrupt
+ * type system cli() peek the buffer copy the data and sti();
+ */
+extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/*
+ * Return the length of an sk_buff queue
+ */
+
+extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
+{
+ return(list_->qlen);
+}
+
+#if CONFIG_SKB_CHECK
+extern int skb_check(struct sk_buff *skb,int,int, char *);
+#define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__)
+#define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__)
+#else
+#define IS_SKB(skb)
+#define IS_SKB_HEAD(skb)
+
+extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * Insert an sk_buff at the start of a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+
+extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_head(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Insert an sk_buff at the end of a list.
+ */
+
+extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ next = (struct sk_buff *)list;
+ prev = next->prev;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_tail(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Remove an sk_buff from a list.
+ */
+
+extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev, *result;
+
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = NULL;
+ result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
+}
+
+extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+ long flags;
+ struct sk_buff *result;
+
+ save_flags(flags);
+ cli();
+ result = __skb_dequeue(list);
+ restore_flags(flags);
+ return result;
+}
+
+/*
+ * Insert a packet on a list.
+ */
+
+extern __inline__ void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff * prev, struct sk_buff *next,
+ struct sk_buff_head * list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+ newsk->list = list;
+ list->qlen++;
+}
+
+/*
+ * Place a packet before a given packet in a list
+ */
+extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(newsk, old->prev, old, old->list);
+ restore_flags(flags);
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+
+extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(newsk, old, old->next, old->list);
+ restore_flags(flags);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff * next, * prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->list = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+/*
+ * Remove an sk_buff from its list. Works even without knowing the list it
+ * is sitting on, which can be handy at times. It also means that THE LIST
+ * MUST EXIST when you unlink. Thus a list must have its contents unlinked
+ * _FIRST_.
+ */
+
+extern __inline__ void skb_unlink(struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if(skb->list)
+ __skb_unlink(skb, skb->list);
+ restore_flags(flags);
+}
+
+/*
+ * Add data to an sk_buff
+ */
+
+extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len)
+{
+ unsigned char *tmp=skb->tail;
+ skb->tail+=len;
+ skb->len+=len;
+ if(skb->tail>skb->end)
+ {
+ panic("skput:over: %d", len);
+ }
+ return tmp;
+}
+
+extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ if(skb->data<skb->head)
+ {
+ panic("skpush:under: %d", len);
+ }
+ return skb->data;
+}
+
+extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len)
+{
+ if(len > skb->len)
+ return NULL;
+ skb->data+=len;
+ skb->len-=len;
+ return skb->data;
+}
+
+extern __inline__ int skb_headroom(struct sk_buff *skb)
+{
+ return skb->data-skb->head;
+}
+
+extern __inline__ int skb_tailroom(struct sk_buff *skb)
+{
+ return skb->end-skb->tail;
+}
+
+extern __inline__ void skb_reserve(struct sk_buff *skb, int len)
+{
+ skb->data+=len;
+ skb->tail+=len;
+}
+
+extern __inline__ void skb_trim(struct sk_buff *skb, int len)
+{
+ if(skb->len>len)
+ {
+ skb->len=len;
+ skb->tail=skb->data+len;
+ }
+}
+
+#endif
+
+extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
+extern int datagram_select(struct sock *sk, int sel_type, select_table *wait);
+extern void skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
+extern void skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
+extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/linux/src/include/linux/smp.h b/linux/src/include/linux/smp.h
new file mode 100644
index 0000000..72984f1
--- /dev/null
+++ b/linux/src/include/linux/smp.h
@@ -0,0 +1,54 @@
+#ifndef __LINUX_SMP_H
+#define __LINUX_SMP_H
+
+/*
+ * Generic SMP support
+ * Alan Cox. <alan@cymru.net>
+ */
+
+#ifdef __SMP__
+#include <asm/smp.h>
+
+extern void smp_message_pass(int target, int msg, unsigned long data, int wait);
+extern void smp_boot_cpus(void); /* Boot processor call to load the other CPU's */
+extern void smp_callin(void); /* Processor call in. Must hold processors until .. */
+extern void smp_commence(void); /* Multiprocessors may now schedule */
+extern int smp_num_cpus;
+extern int smp_threads_ready; /* True once the per process idle is forked */
+#ifdef __SMP_PROF__
+extern volatile unsigned long smp_spins[NR_CPUS]; /* count of interrupt spins */
+extern volatile unsigned long smp_spins_sys_idle[]; /* count of idle spins */
+extern volatile unsigned long smp_spins_syscall[]; /* count of syscall spins */
+extern volatile unsigned long smp_spins_syscall_cur[]; /* count of syscall spins for the current
+ call */
+extern volatile unsigned long smp_idle_count[1+NR_CPUS];/* count idle ticks */
+extern volatile unsigned long smp_idle_map; /* map with idle cpus */
+#else
+extern volatile unsigned long smp_spins;
+#endif
+
+
+extern volatile unsigned long smp_msg_data;
+extern volatile int smp_src_cpu;
+extern volatile int smp_msg_id;
+
+#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
+#define MSG_ALL 0x8001
+
+#define MSG_INVALIDATE_TLB 0x0001 /* Remote processor TLB invalidate */
+#define MSG_STOP_CPU 0x0002 /* Sent to shut down slave CPU's when rebooting */
+#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU */
+
+#else
+
+/*
+ * These macros fold the SMP functionality into a single CPU system
+ */
+
+#define smp_num_cpus 1
+#define smp_processor_id() 0
+#define smp_message_pass(t,m,d,w)
+#define smp_threads_ready 1
+#define kernel_lock()
+#endif
+#endif
diff --git a/linux/src/include/linux/socket.h b/linux/src/include/linux/socket.h
new file mode 100644
index 0000000..96c04ec
--- /dev/null
+++ b/linux/src/include/linux/socket.h
@@ -0,0 +1,147 @@
+#ifndef _LINUX_SOCKET_H
+#define _LINUX_SOCKET_H
+
+#include <asm/socket.h> /* arch-dependent defines */
+#include <linux/sockios.h> /* the SIOCxxx I/O controls */
+#include <linux/uio.h> /* iovec support */
+
+struct sockaddr
+{
+ unsigned short sa_family; /* address family, AF_xxx */
+ char sa_data[14]; /* 14 bytes of protocol address */
+};
+
+struct linger {
+ int l_onoff; /* Linger active */
+ int l_linger; /* How long to linger for */
+};
+
+/*
+ * As we do 4.4BSD message passing we use a 4.4BSD message passing
+ * system, not 4.3. Thus msg_accrights(len) are now missing. They
+ * belong in an obscure libc emulation or the bin.
+ */
+
+struct msghdr
+{
+ void * msg_name; /* Socket name */
+ int msg_namelen; /* Length of name */
+ struct iovec * msg_iov; /* Data blocks */
+ int msg_iovlen; /* Number of blocks */
+ void * msg_control; /* Per protocol magic (eg BSD file descriptor passing) */
+ int msg_controllen; /* Length of rights list */
+ int msg_flags; /* 4.4 BSD item we dont use */
+};
+
+/* Control Messages */
+
+#define SCM_RIGHTS 1
+
+/* Socket types. */
+#define SOCK_STREAM 1 /* stream (connection) socket */
+#define SOCK_DGRAM 2 /* datagram (conn.less) socket */
+#define SOCK_RAW 3 /* raw socket */
+#define SOCK_RDM 4 /* reliably-delivered message */
+#define SOCK_SEQPACKET 5 /* sequential packet socket */
+#define SOCK_PACKET 10 /* linux specific way of */
+ /* getting packets at the dev */
+ /* level. For writing rarp and */
+ /* other similar things on the */
+ /* user level. */
+
+/* Supported address families. */
+#define AF_UNSPEC 0
+#define AF_UNIX 1 /* Unix domain sockets */
+#define AF_INET 2 /* Internet IP Protocol */
+#define AF_AX25 3 /* Amateur Radio AX.25 */
+#define AF_IPX 4 /* Novell IPX */
+#define AF_APPLETALK 5 /* Appletalk DDP */
+#define AF_NETROM 6 /* Amateur radio NetROM */
+#define AF_BRIDGE 7 /* Multiprotocol bridge */
+#define AF_AAL5 8 /* Reserved for Werner's ATM */
+#define AF_X25 9 /* Reserved for X.25 project */
+#ifdef LINUX_2_1_X
+#define AF_INET6 10 /* IP version 6 */
+#endif
+#define AF_ROSE 11 /* Amateur Radio X.25 PLP */
+#define AF_MAX 13 /* For now.. */
+#define AF_PACKET 17 /* Forward compat hook */
+
+/* Protocol families, same as address families. */
+#define PF_UNSPEC AF_UNSPEC
+#define PF_UNIX AF_UNIX
+#define PF_INET AF_INET
+#define PF_AX25 AF_AX25
+#define PF_IPX AF_IPX
+#define PF_APPLETALK AF_APPLETALK
+#define PF_NETROM AF_NETROM
+#define PF_BRIDGE AF_BRIDGE
+#define PF_AAL5 AF_AAL5
+#define PF_X25 AF_X25
+#ifdef LINUX_2_1_X
+#define PF_INET6 AF_INET6
+#endif
+#define PF_ROSE AF_ROSE
+#define PF_MAX AF_MAX
+#define PF_PACKET AF_PACKET
+/* Maximum queue length specifiable by listen. */
+#define SOMAXCONN 128
+
+/* Flags we can use with send/ and recv. */
+#define MSG_OOB 1
+#define MSG_PEEK 2
+#define MSG_DONTROUTE 4
+/*#define MSG_CTRUNC 8 - We need to support this for BSD oddments */
+#define MSG_PROXY 16 /* Supply or ask second address. */
+
+/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
+#define SOL_IP 0
+#define SOL_IPX 256
+#define SOL_AX25 257
+#define SOL_ATALK 258
+#define SOL_NETROM 259
+#define SOL_ROSE 260
+#define SOL_TCP 6
+#define SOL_UDP 17
+
+/* IP options */
+#define IP_TOS 1
+#define IPTOS_LOWDELAY 0x10
+#define IPTOS_THROUGHPUT 0x08
+#define IPTOS_RELIABILITY 0x04
+#define IPTOS_MINCOST 0x02
+#define IP_TTL 2
+#define IP_HDRINCL 3
+#define IP_OPTIONS 4
+
+#define IP_MULTICAST_IF 32
+#define IP_MULTICAST_TTL 33
+#define IP_MULTICAST_LOOP 34
+#define IP_ADD_MEMBERSHIP 35
+#define IP_DROP_MEMBERSHIP 36
+
+/* These need to appear somewhere around here */
+#define IP_DEFAULT_MULTICAST_TTL 1
+#define IP_DEFAULT_MULTICAST_LOOP 1
+#define IP_MAX_MEMBERSHIPS 20
+
+/* IPX options */
+#define IPX_TYPE 1
+
+/* TCP options - this way around because someone left a set in the c library includes */
+#define TCP_NODELAY 1
+#define TCP_MAXSEG 2
+
+/* The various priorities. */
+#define SOPRI_INTERACTIVE 0
+#define SOPRI_NORMAL 1
+#define SOPRI_BACKGROUND 2
+
+#ifdef __KERNEL__
+extern void memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
+extern int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode);
+extern void memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
+extern int move_addr_to_user(void *kaddr, int klen, void *uaddr, int *ulen);
+extern int move_addr_to_kernel(void *uaddr, int ulen, void *kaddr);
+#endif
+#endif /* _LINUX_SOCKET_H */
diff --git a/linux/src/include/linux/sockios.h b/linux/src/include/linux/sockios.h
new file mode 100644
index 0000000..12a8ae4
--- /dev/null
+++ b/linux/src/include/linux/sockios.h
@@ -0,0 +1,98 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions of the socket-level I/O control calls.
+ *
+ * Version: @(#)sockios.h 1.0.2 03/09/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_SOCKIOS_H
+#define _LINUX_SOCKIOS_H
+
+#include <asm/sockios.h>
+
+/* Routing table calls. */
+#define SIOCADDRT 0x890B /* add routing table entry */
+#define SIOCDELRT 0x890C /* delete routing table entry */
+
+/* Socket configuration controls. */
+#define SIOCGIFNAME 0x8910 /* get iface name */
+#define SIOCSIFLINK 0x8911 /* set iface channel */
+#define SIOCGIFCONF 0x8912 /* get iface list */
+#define SIOCGIFFLAGS 0x8913 /* get flags */
+#define SIOCSIFFLAGS 0x8914 /* set flags */
+#define SIOCGIFADDR 0x8915 /* get PA address */
+#define SIOCSIFADDR 0x8916 /* set PA address */
+#define SIOCGIFDSTADDR 0x8917 /* get remote PA address */
+#define SIOCSIFDSTADDR 0x8918 /* set remote PA address */
+#define SIOCGIFBRDADDR 0x8919 /* get broadcast PA address */
+#define SIOCSIFBRDADDR 0x891a /* set broadcast PA address */
+#define SIOCGIFNETMASK 0x891b /* get network PA mask */
+#define SIOCSIFNETMASK 0x891c /* set network PA mask */
+#define SIOCGIFMETRIC 0x891d /* get metric */
+#define SIOCSIFMETRIC 0x891e /* set metric */
+#define SIOCGIFMEM 0x891f /* get memory address (BSD) */
+#define SIOCSIFMEM 0x8920 /* set memory address (BSD) */
+#define SIOCGIFMTU 0x8921 /* get MTU size */
+#define SIOCSIFMTU 0x8922 /* set MTU size */
+#define SIOCSIFHWADDR 0x8924 /* set hardware address (NI) */
+#define SIOCGIFENCAP 0x8925 /* get/set slip encapsulation */
+#define SIOCSIFENCAP 0x8926
+#define SIOCGIFHWADDR 0x8927 /* Get hardware address */
+#define SIOCGIFSLAVE 0x8929 /* Driver slaving support */
+#define SIOCSIFSLAVE 0x8930
+#define SIOCADDMULTI 0x8931 /* Multicast address lists */
+#define SIOCDELMULTI 0x8932
+
+#define SIOCGIFBR 0x8940 /* Bridging support */
+#define SIOCSIFBR 0x8941 /* Set bridging options */
+
+/* ARP cache control calls. */
+#define OLD_SIOCDARP 0x8950 /* old delete ARP table entry */
+#define OLD_SIOCGARP 0x8951 /* old get ARP table entry */
+#define OLD_SIOCSARP 0x8952 /* old set ARP table entry */
+#define SIOCDARP 0x8953 /* delete ARP table entry */
+#define SIOCGARP 0x8954 /* get ARP table entry */
+#define SIOCSARP 0x8955 /* set ARP table entry */
+
+/* RARP cache control calls. */
+#define SIOCDRARP 0x8960 /* delete RARP table entry */
+#define SIOCGRARP 0x8961 /* get RARP table entry */
+#define SIOCSRARP 0x8962 /* set RARP table entry */
+
+/* Driver configuration calls */
+
+#define SIOCGIFMAP 0x8970 /* Get device parameters */
+#define SIOCSIFMAP 0x8971 /* Set device parameters */
+
+/* DLCI configuration calls */
+
+#define SIOCADDDLCI 0x8980 /* Create new DLCI device */
+#define SIOCDELDLCI 0x8981 /* Delete DLCI device */
+
+/* Device private ioctl calls */
+
+/*
+ * These 16 ioctls are available to devices via the do_ioctl() device
+ * vector. Each device should include this file and redefine these names
+ * as their own. Because these are device dependent it is a good idea
+ * _NOT_ to issue them to random objects and hope.
+ */
+
+#define SIOCDEVPRIVATE 0x89F0 /* to 89FF */
+
+/*
+ * These 16 ioctl calls are protocol private
+ */
+
+#define SIOCPROTOPRIVATE 0x89E0 /* to 89EF */
+#endif /* _LINUX_SOCKIOS_H */
diff --git a/linux/src/include/linux/spinlock.h b/linux/src/include/linux/spinlock.h
new file mode 100644
index 0000000..790ac18
--- /dev/null
+++ b/linux/src/include/linux/spinlock.h
@@ -0,0 +1,4 @@
+#ifndef __LINUX_SPINLOCK_H
+#define __LINUX_SPINLOCK_H
+#include <asm/spinlock.h>
+#endif
diff --git a/linux/src/include/linux/stat.h b/linux/src/include/linux/stat.h
new file mode 100644
index 0000000..d86b164
--- /dev/null
+++ b/linux/src/include/linux/stat.h
@@ -0,0 +1,53 @@
+#ifndef _LINUX_STAT_H
+#define _LINUX_STAT_H
+
+#ifdef __KERNEL__
+
+#include <asm/stat.h>
+
+#endif
+
+#define S_IFMT 00170000
+#define S_IFSOCK 0140000
+#define S_IFLNK 0120000
+#define S_IFREG 0100000
+#define S_IFBLK 0060000
+#define S_IFDIR 0040000
+#define S_IFCHR 0020000
+#define S_IFIFO 0010000
+#define S_ISUID 0004000
+#define S_ISGID 0002000
+#define S_ISVTX 0001000
+
+#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK)
+#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO)
+#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK)
+
+#define S_IRWXU 00700
+#define S_IRUSR 00400
+#define S_IWUSR 00200
+#define S_IXUSR 00100
+
+#define S_IRWXG 00070
+#define S_IRGRP 00040
+#define S_IWGRP 00020
+#define S_IXGRP 00010
+
+#define S_IRWXO 00007
+#define S_IROTH 00004
+#define S_IWOTH 00002
+#define S_IXOTH 00001
+
+#ifdef __KERNEL__
+#define S_IRWXUGO (S_IRWXU|S_IRWXG|S_IRWXO)
+#define S_IALLUGO (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
+#define S_IRUGO (S_IRUSR|S_IRGRP|S_IROTH)
+#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH)
+#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH)
+#endif
+
+#endif
diff --git a/linux/src/include/linux/stddef.h b/linux/src/include/linux/stddef.h
new file mode 100644
index 0000000..488d49c
--- /dev/null
+++ b/linux/src/include/linux/stddef.h
@@ -0,0 +1,15 @@
+#ifndef _LINUX_STDDEF_H
+#define _LINUX_STDDEF_H
+
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef unsigned long size_t;
+#endif
+
+#undef NULL
+#define NULL ((void *)0)
+
+#undef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+#endif
diff --git a/linux/src/include/linux/string.h b/linux/src/include/linux/string.h
new file mode 100644
index 0000000..62ff880
--- /dev/null
+++ b/linux/src/include/linux/string.h
@@ -0,0 +1,53 @@
+#ifndef _LINUX_STRING_H_
+#define _LINUX_STRING_H_
+
+#include <linux/types.h> /* for size_t */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern char * ___strtok;
+#if 0
+extern char * strcpy(char *,const char *);
+extern char * strncpy(char *,const char *, __kernel_size_t);
+extern char * strcat(char *, const char *);
+extern char * strncat(char *, const char *, __kernel_size_t);
+extern char * strchr(const char *,int);
+extern char * strrchr(const char *,int);
+#endif
+extern char * strpbrk(const char *,const char *);
+extern char * strtok(char *,const char *);
+extern char * strstr(const char *,const char *);
+#if 0
+extern __kernel_size_t strlen(const char *);
+extern __kernel_size_t strnlen(const char *,__kernel_size_t);
+#endif
+extern __kernel_size_t strspn(const char *,const char *);
+#if 0
+extern int strcmp(const char *,const char *);
+extern int strncmp(const char *,const char *,__kernel_size_t);
+#endif
+
+extern void * memset(void *,int,__kernel_size_t);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+#if 0
+extern void * memmove(void *,const void *,__kernel_size_t);
+extern void * memscan(void *,int,__kernel_size_t);
+#endif
+extern int memcmp(const void *,const void *,__kernel_size_t);
+
+/*
+ * Include machine specific inline routines
+ */
+#include <asm/string.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LINUX_STRING_H_ */
diff --git a/linux/src/include/linux/symtab_begin.h b/linux/src/include/linux/symtab_begin.h
new file mode 100644
index 0000000..65a8700
--- /dev/null
+++ b/linux/src/include/linux/symtab_begin.h
@@ -0,0 +1,45 @@
+#include <linux/linkage.h>
+
+#ifdef MODVERSIONS
+# undef _set_ver
+# undef X
+/*
+ * These two macros _will_ get enough arguments from the X* macros
+ * since "sym" expands to "symaddr, symstr" from the #define in *.ver
+ */
+# define _basic_version(symaddr,symstr) symaddr, symstr
+# define _alias_version(really,symaddr,symstr) (void *) & really , symstr
+
+# ifndef __GENKSYMS__
+# ifdef MODULE
+# define _set_ver(sym,ver) \
+ (void *) & sym ## _R ## ver, SYMBOL_NAME_STR(sym) "_R" #ver
+# else /* !MODULE */
+# define _set_ver(sym,ver) \
+ (void *) & sym, SYMBOL_NAME_STR(sym) "_R" #ver
+# endif /* !MODULE */
+# define X(sym) { _basic_version(sym) }
+/*
+ * For _really_ stacked modules:
+ *
+ * Use "Xalias(local_symbol, symbol_from_other_module)"
+ * to make subsequent modules really use "local_symbol"
+ * when they think that they are using "symbol_from_other_module"
+ *
+ * The "aliasing" module can still use "symbol_from_other_module",
+ * but can now replace and/or modify the behaviour of that symbol.
+ */
+# define Xalias(really,sym) { _alias_version(really,sym) }
+# endif /* !__GENKSYMS__ */
+#else /* !MODVERSIONS */
+# define X(sym) { (void *) & sym, SYMBOL_NAME_STR(sym)}
+# define Xalias(really,sym) { (void *) & really, SYMBOL_NAME_STR(sym)}
+#endif /* MODVERSIONS */
+/*
+ * Some symbols always need to be unversioned. This includes
+ * compiler generated calls to functions.
+ */
+#define XNOVERS(sym) { (void *) & sym, SYMBOL_NAME_STR(sym)}
+
+#define EMPTY {0,0}
+ 0, 0, 0, {
diff --git a/linux/src/include/linux/symtab_end.h b/linux/src/include/linux/symtab_end.h
new file mode 100644
index 0000000..91b92e2
--- /dev/null
+++ b/linux/src/include/linux/symtab_end.h
@@ -0,0 +1,15 @@
+#ifdef MODVERSIONS
+#undef _set_ver
+#if defined(MODULE) && !defined(__GENKSYMS__)
+#define _set_ver(sym,vers) sym ## _R ## vers
+#else
+#define _set_ver(a,b) a
+#endif
+#endif /* MODVERSIONS */
+#undef X
+#undef EMPTY
+ /* mark end of table, last entry above ended with a comma! */
+ { (void *)0, (char *)0 }
+ },
+ /* no module refs, insmod will take care of that instead! */
+ { { (struct module *)0, (struct module_ref *)0 } }
diff --git a/linux/src/include/linux/tasks.h b/linux/src/include/linux/tasks.h
new file mode 100644
index 0000000..466560e
--- /dev/null
+++ b/linux/src/include/linux/tasks.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_TASKS_H
+#define _LINUX_TASKS_H
+
+/*
+ * This is the maximum nr of tasks - change it if you need to
+ */
+
+#define NR_CPUS NCPUS /* Max processors that can be running in SMP */
+
+#define NR_TASKS 512
+
+#define NO_PROC_ID -1
+
+#define MAX_TASKS_PER_USER (NR_TASKS/2)
+#define MIN_TASKS_LEFT_FOR_ROOT 4
+
+#endif
diff --git a/linux/src/include/linux/tcp.h b/linux/src/include/linux/tcp.h
new file mode 100644
index 0000000..ae6a063
--- /dev/null
+++ b/linux/src/include/linux/tcp.h
@@ -0,0 +1,71 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP protocol.
+ *
+ * Version: @(#)tcp.h 1.0.2 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_TCP_H
+#define _LINUX_TCP_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+struct tcphdr {
+ __u16 source;
+ __u16 dest;
+ __u32 seq;
+ __u32 ack_seq;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 res1:4,
+ doff:4,
+ fin:1,
+ syn:1,
+ rst:1,
+ psh:1,
+ ack:1,
+ urg:1,
+ res2:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u16 doff:4,
+ res1:4,
+ res2:2,
+ urg:1,
+ ack:1,
+ psh:1,
+ rst:1,
+ syn:1,
+ fin:1;
+#else
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+ __u16 window;
+ __u16 check;
+ __u16 urg_ptr;
+};
+
+
+enum {
+ TCP_ESTABLISHED = 1,
+ TCP_SYN_SENT,
+ TCP_SYN_RECV,
+ TCP_FIN_WAIT1,
+ TCP_FIN_WAIT2,
+ TCP_TIME_WAIT,
+ TCP_CLOSE,
+ TCP_CLOSE_WAIT,
+ TCP_LAST_ACK,
+ TCP_LISTEN,
+ TCP_CLOSING /* now a valid state */
+};
+
+#endif /* _LINUX_TCP_H */
diff --git a/linux/src/include/linux/termios.h b/linux/src/include/linux/termios.h
new file mode 100644
index 0000000..4786628
--- /dev/null
+++ b/linux/src/include/linux/termios.h
@@ -0,0 +1,7 @@
+#ifndef _LINUX_TERMIOS_H
+#define _LINUX_TERMIOS_H
+
+#include <linux/types.h>
+#include <asm/termios.h>
+
+#endif
diff --git a/linux/src/include/linux/time.h b/linux/src/include/linux/time.h
new file mode 100644
index 0000000..d929c9c
--- /dev/null
+++ b/linux/src/include/linux/time.h
@@ -0,0 +1,53 @@
+#ifndef _LINUX_TIME_H
+#define _LINUX_TIME_H
+
+#ifndef _STRUCT_TIMESPEC
+#define _STRUCT_TIMESPEC
+struct timespec {
+ long tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+#endif /* _STRUCT_TIMESPEC */
+
+struct timeval {
+ int tv_sec; /* seconds */
+ int tv_usec; /* microseconds */
+};
+
+struct timezone {
+ int tz_minuteswest; /* minutes west of Greenwich */
+ int tz_dsttime; /* type of dst correction */
+};
+
+#define NFDBITS __NFDBITS
+
+#ifdef __KERNEL__
+void do_gettimeofday(struct timeval *tv);
+void do_settimeofday(struct timeval *tv);
+#endif
+
+#define FD_SETSIZE __FD_SETSIZE
+#define FD_SET(fd,fdsetp) __FD_SET(fd,fdsetp)
+#define FD_CLR(fd,fdsetp) __FD_CLR(fd,fdsetp)
+#define FD_ISSET(fd,fdsetp) __FD_ISSET(fd,fdsetp)
+#define FD_ZERO(fdsetp) __FD_ZERO(fdsetp)
+
+/*
+ * Names of the interval timers, and structure
+ * defining a timer setting.
+ */
+#define ITIMER_REAL 0
+#define ITIMER_VIRTUAL 1
+#define ITIMER_PROF 2
+
+struct itimerspec {
+ struct timespec it_interval; /* timer period */
+ struct timespec it_value; /* timer expiration */
+};
+
+struct itimerval {
+ struct timeval it_interval; /* timer interval */
+ struct timeval it_value; /* current value */
+};
+
+#endif
diff --git a/linux/src/include/linux/timer.h b/linux/src/include/linux/timer.h
new file mode 100644
index 0000000..b922d0d
--- /dev/null
+++ b/linux/src/include/linux/timer.h
@@ -0,0 +1,100 @@
+#ifndef _LINUX_TIMER_H
+#define _LINUX_TIMER_H
+
+/*
+ * DON'T CHANGE THESE!! Most of them are hardcoded into some assembly language
+ * as well as being defined here.
+ */
+
+/*
+ * The timers are:
+ *
+ * BLANK_TIMER console screen-saver timer
+ *
+ * BEEP_TIMER console beep timer
+ *
+ * RS_TIMER timer for the RS-232 ports
+ *
+ * SWAP_TIMER timer for the background pageout daemon
+ *
+ * HD_TIMER harddisk timer
+ *
+ * HD_TIMER2 (atdisk2 patches)
+ *
+ * FLOPPY_TIMER floppy disk timer (not used right now)
+ *
+ * SCSI_TIMER scsi.c timeout timer
+ *
+ * NET_TIMER tcp/ip timeout timer
+ *
+ * COPRO_TIMER 387 timeout for buggy hardware..
+ *
+ * QIC02_TAPE_TIMER timer for QIC-02 tape driver (it's not hardcoded)
+ *
+ * MCD_TIMER Mitsumi CD-ROM Timer
+ *
+ * GSCD_TIMER Goldstar CD-ROM Timer
+ *
+ */
+
+#define BLANK_TIMER 0
+#define BEEP_TIMER 1
+#define RS_TIMER 2
+#define SWAP_TIMER 3
+
+#define HD_TIMER 16
+#define FLOPPY_TIMER 17
+#define SCSI_TIMER 18
+#define NET_TIMER 19
+#define SOUND_TIMER 20
+#define COPRO_TIMER 21
+
+#define QIC02_TAPE_TIMER 22 /* hhb */
+#define MCD_TIMER 23
+
+#define HD_TIMER2 24
+#define GSCD_TIMER 25
+
+#define DIGI_TIMER 29
+
+struct timer_struct {
+ unsigned long expires;
+ void (*fn)(void);
+};
+
+extern unsigned long timer_active;
+extern struct timer_struct timer_table[32];
+
+/*
+ * This is completely separate from the above, and is the
+ * "new and improved" way of handling timers more dynamically.
+ * Hopefully efficient and general enough for most things.
+ *
+ * The "hardcoded" timers above are still useful for well-
+ * defined problems, but the timer-list is probably better
+ * when you need multiple outstanding timers or similar.
+ *
+ * The "data" field is in case you want to use the same
+ * timeout function for several timeouts. You can use this
+ * to distinguish between the different invocations.
+ */
+struct timer_list {
+ struct timer_list *next;
+ struct timer_list *prev;
+ unsigned long expires;
+ unsigned long data;
+ void (*function)(unsigned long);
+};
+
+extern void add_timer(struct timer_list * timer);
+extern int del_timer(struct timer_list * timer);
+
+extern void it_real_fn(unsigned long);
+
+extern inline void init_timer(struct timer_list * timer)
+{
+ timer->next = NULL;
+ timer->prev = NULL;
+}
+
+#endif
diff --git a/linux/src/include/linux/tqueue.h b/linux/src/include/linux/tqueue.h
new file mode 100644
index 0000000..d38e1df
--- /dev/null
+++ b/linux/src/include/linux/tqueue.h
@@ -0,0 +1,143 @@
+/*
+ * tqueue.h --- task queue handling for Linux.
+ *
+ * Mostly based on a proposed bottom-half replacement code written by
+ * Kai Petzke, wpp@marie.physik.tu-berlin.de.
+ *
+ * Modified for use in the Linux kernel by Theodore Ts'o,
+ * tytso@mit.edu. Any bugs are my fault, not Kai's.
+ *
+ * The original comment follows below.
+ */
+
+#ifndef _LINUX_TQUEUE_H
+#define _LINUX_TQUEUE_H
+
+#include <asm/bitops.h>
+#include <asm/system.h>
+
+/*
+ * New proposed "bottom half" handlers:
+ * (C) 1994 Kai Petzke, wpp@marie.physik.tu-berlin.de
+ *
+ * Advantages:
+ * - Bottom halfs are implemented as a linked list. You can have as many
+ * of them, as you want.
+ * - No more scanning of a bit field is required upon call of a bottom half.
+ * - Support for chained bottom half lists. The run_task_queue() function can be
+ * used as a bottom half handler. This is for example useful for bottom
+ * halfs, which want to be delayed until the next clock tick.
+ *
+ * Problems:
+ * - The queue_task_irq() inline function is only atomic with respect to itself.
+ * Problems can occur, when queue_task_irq() is called from a normal system
+ * call, and an interrupt comes in. No problems occur, when queue_task_irq()
+ * is called from an interrupt or bottom half, and interrupted, as run_task_queue()
+ * will not be executed/continued before the last interrupt returns. If in
+ * doubt, use queue_task(), not queue_task_irq().
+ * - Bottom halfs are called in the reverse order that they were linked into
+ * the list.
+ */
+
+struct tq_struct {
+ struct tq_struct *next; /* linked list of active bh's */
+ int sync; /* must be initialized to zero */
+ void (*routine)(void *); /* function to call */
+ void *data; /* argument to function */
+};
+
+typedef struct tq_struct * task_queue;
+
+#define DECLARE_TASK_QUEUE(q) task_queue q = NULL
+
+extern task_queue tq_timer, tq_immediate, tq_scheduler, tq_disk;
+
+/*
+ * To implement your own list of active bottom halfs, use the following
+ * two definitions:
+ *
+ * struct tq_struct *my_bh = NULL;
+ * struct tq_struct run_my_bh = {
+ * 0, 0, (void *)(void *) run_task_queue, &my_bh
+ * };
+ *
+ * To activate a bottom half on your list, use:
+ *
+ * queue_task(tq_pointer, &my_bh);
+ *
+ * To run the bottom halfs on your list put them on the immediate list by:
+ *
+ * queue_task(&run_my_bh, &tq_immediate);
+ *
+ * This allows you to do deferred procession. For example, you could
+ * have a bottom half list tq_timer, which is marked active by the timer
+ * interrupt.
+ */
+
+/*
+ * queue_task_irq: put the bottom half handler "bh_pointer" on the list
+ * "bh_list". You may call this function only from an interrupt
+ * handler or a bottom half handler.
+ */
+static __inline__ void queue_task_irq(struct tq_struct *bh_pointer,
+ task_queue *bh_list)
+{
+ if (!set_bit(0,&bh_pointer->sync)) {
+ bh_pointer->next = *bh_list;
+ *bh_list = bh_pointer;
+ }
+}
+
+/*
+ * queue_task_irq_off: put the bottom half handler "bh_pointer" on the list
+ * "bh_list". You may call this function only when interrupts are off.
+ */
+static __inline__ void queue_task_irq_off(struct tq_struct *bh_pointer,
+ task_queue *bh_list)
+{
+ if (!(bh_pointer->sync & 1)) {
+ bh_pointer->sync = 1;
+ bh_pointer->next = *bh_list;
+ *bh_list = bh_pointer;
+ }
+}
+
+
+/*
+ * queue_task: as queue_task_irq, but can be called from anywhere.
+ */
+static __inline__ void queue_task(struct tq_struct *bh_pointer,
+ task_queue *bh_list)
+{
+ if (!set_bit(0,&bh_pointer->sync)) {
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ bh_pointer->next = *bh_list;
+ *bh_list = bh_pointer;
+ restore_flags(flags);
+ }
+}
+
+/*
+ * Call all "bottom halfs" on a given list.
+ */
+static __inline__ void run_task_queue(task_queue *list)
+{
+ struct tq_struct *p;
+
+ p = xchg(list,NULL);
+ while (p) {
+ void *arg;
+ void (*f) (void *);
+ struct tq_struct *save_p;
+ arg = p -> data;
+ f = p -> routine;
+ save_p = p;
+ p = p -> next;
+ save_p -> sync = 0;
+ (*f)(arg);
+ }
+}
+
+#endif /* _LINUX_TQUEUE_H */
diff --git a/linux/src/include/linux/trdevice.h b/linux/src/include/linux/trdevice.h
new file mode 100644
index 0000000..9680176
--- /dev/null
+++ b/linux/src/include/linux/trdevice.h
@@ -0,0 +1,40 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * WARNING: This move may well be temporary. This file will get merged with others RSN.
+ *
+ */
+#ifndef _LINUX_TRDEVICE_H
+#define _LINUX_TRDEVICE_H
+
+
+#include <linux/if_tr.h>
+
+#ifdef __KERNEL__
+extern int tr_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+extern int tr_rebuild_header(void *buff, struct device *dev,
+ unsigned long raddr, struct sk_buff *skb);
+extern unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev);
+
+#endif
+
+#endif /* _LINUX_TRDEVICE_H */
diff --git a/linux/src/include/linux/tty.h b/linux/src/include/linux/tty.h
new file mode 100644
index 0000000..6fe00af
--- /dev/null
+++ b/linux/src/include/linux/tty.h
@@ -0,0 +1,351 @@
+#ifndef _LINUX_TTY_H
+#define _LINUX_TTY_H
+
+/*
+ * 'tty.h' defines some structures used by tty_io.c and some defines.
+ */
+
+/*
+ * These constants are also useful for user-level apps (e.g., VC
+ * resizing).
+ */
+#define MIN_NR_CONSOLES 1 /* must be at least 1 */
+#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */
+#define MAX_NR_USER_CONSOLES 63 /* must be root to allocate above this */
+ /* Note: the ioctl VT_GETSTATE does not work for
+ consoles 16 and higher (since it returns a short) */
+
+#ifdef __KERNEL__
+#include <linux/fs.h>
+#include <linux/termios.h>
+#include <linux/tqueue.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_ldisc.h>
+
+#include <asm/system.h>
+
+
+/*
+ * Note: don't mess with NR_PTYS until you understand the tty minor
+ * number allocation game...
+ * (Note: the *_driver.minor_start values 1, 64, 128, 192 are
+ * hardcoded at present.)
+ */
+#define NR_PTYS 256
+#define NR_LDISCS 16
+
+/*
+ * These are set up by the setup-routine at boot-time:
+ */
+
+struct screen_info {
+ unsigned char orig_x;
+ unsigned char orig_y;
+ unsigned char unused1[2];
+ unsigned short orig_video_page;
+ unsigned char orig_video_mode;
+ unsigned char orig_video_cols;
+ unsigned short unused2;
+ unsigned short orig_video_ega_bx;
+ unsigned short unused3;
+ unsigned char orig_video_lines;
+ unsigned char orig_video_isVGA;
+ unsigned short orig_video_points;
+};
+
+extern struct screen_info screen_info;
+
+#define ORIG_X (screen_info.orig_x)
+#define ORIG_Y (screen_info.orig_y)
+#define ORIG_VIDEO_MODE (screen_info.orig_video_mode)
+#define ORIG_VIDEO_COLS (screen_info.orig_video_cols)
+#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx)
+#define ORIG_VIDEO_LINES (screen_info.orig_video_lines)
+#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA)
+#define ORIG_VIDEO_POINTS (screen_info.orig_video_points)
+
+#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
+#define VIDEO_TYPE_CGA 0x11 /* CGA Display */
+#define VIDEO_TYPE_EGAM 0x20 /* EGA/VGA in Monochrome Mode */
+#define VIDEO_TYPE_EGAC 0x21 /* EGA in Color Mode */
+#define VIDEO_TYPE_VGAC 0x22 /* VGA+ in Color Mode */
+
+#define VIDEO_TYPE_TGAC 0x40 /* DEC TGA */
+
+#define VIDEO_TYPE_SUN 0x50 /* Sun frame buffer. */
+
+/*
+ * This character is the same as _POSIX_VDISABLE: it cannot be used as
+ * a c_cc[] character, but indicates that a particular special character
+ * isn't in use (eg VINTR has no character etc)
+ */
+#define __DISABLED_CHAR '\0'
+
+/*
+ * This is the flip buffer used for the tty driver. The buffer is
+ * located in the tty structure, and is used as a high speed interface
+ * between the tty driver and the tty line discipline.
+ */
+#define TTY_FLIPBUF_SIZE 512
+
+struct tty_flip_buffer {
+ struct tq_struct tqueue;
+ unsigned char char_buf[2*TTY_FLIPBUF_SIZE];
+ char flag_buf[2*TTY_FLIPBUF_SIZE];
+ char *char_buf_ptr;
+ unsigned char *flag_buf_ptr;
+ int count;
+ int buf_num;
+};
+
+/*
+ * When a break, frame error, or parity error happens, these codes are
+ * stuffed into the flags buffer.
+ */
+#define TTY_NORMAL 0
+#define TTY_BREAK 1
+#define TTY_FRAME 2
+#define TTY_PARITY 3
+#define TTY_OVERRUN 4
+
+#define INTR_CHAR(tty) ((tty)->termios->c_cc[VINTR])
+#define QUIT_CHAR(tty) ((tty)->termios->c_cc[VQUIT])
+#define ERASE_CHAR(tty) ((tty)->termios->c_cc[VERASE])
+#define KILL_CHAR(tty) ((tty)->termios->c_cc[VKILL])
+#define EOF_CHAR(tty) ((tty)->termios->c_cc[VEOF])
+#define TIME_CHAR(tty) ((tty)->termios->c_cc[VTIME])
+#define MIN_CHAR(tty) ((tty)->termios->c_cc[VMIN])
+#define SWTC_CHAR(tty) ((tty)->termios->c_cc[VSWTC])
+#define START_CHAR(tty) ((tty)->termios->c_cc[VSTART])
+#define STOP_CHAR(tty) ((tty)->termios->c_cc[VSTOP])
+#define SUSP_CHAR(tty) ((tty)->termios->c_cc[VSUSP])
+#define EOL_CHAR(tty) ((tty)->termios->c_cc[VEOL])
+#define REPRINT_CHAR(tty) ((tty)->termios->c_cc[VREPRINT])
+#define DISCARD_CHAR(tty) ((tty)->termios->c_cc[VDISCARD])
+#define WERASE_CHAR(tty) ((tty)->termios->c_cc[VWERASE])
+#define LNEXT_CHAR(tty) ((tty)->termios->c_cc[VLNEXT])
+#define EOL2_CHAR(tty) ((tty)->termios->c_cc[VEOL2])
+
+#define _I_FLAG(tty,f) ((tty)->termios->c_iflag & (f))
+#define _O_FLAG(tty,f) ((tty)->termios->c_oflag & (f))
+#define _C_FLAG(tty,f) ((tty)->termios->c_cflag & (f))
+#define _L_FLAG(tty,f) ((tty)->termios->c_lflag & (f))
+
+#define I_IGNBRK(tty) _I_FLAG((tty),IGNBRK)
+#define I_BRKINT(tty) _I_FLAG((tty),BRKINT)
+#define I_IGNPAR(tty) _I_FLAG((tty),IGNPAR)
+#define I_PARMRK(tty) _I_FLAG((tty),PARMRK)
+#define I_INPCK(tty) _I_FLAG((tty),INPCK)
+#define I_ISTRIP(tty) _I_FLAG((tty),ISTRIP)
+#define I_INLCR(tty) _I_FLAG((tty),INLCR)
+#define I_IGNCR(tty) _I_FLAG((tty),IGNCR)
+#define I_ICRNL(tty) _I_FLAG((tty),ICRNL)
+#define I_IUCLC(tty) _I_FLAG((tty),IUCLC)
+#define I_IXON(tty) _I_FLAG((tty),IXON)
+#define I_IXANY(tty) _I_FLAG((tty),IXANY)
+#define I_IXOFF(tty) _I_FLAG((tty),IXOFF)
+#define I_IMAXBEL(tty) _I_FLAG((tty),IMAXBEL)
+
+#define O_OPOST(tty) _O_FLAG((tty),OPOST)
+#define O_OLCUC(tty) _O_FLAG((tty),OLCUC)
+#define O_ONLCR(tty) _O_FLAG((tty),ONLCR)
+#define O_OCRNL(tty) _O_FLAG((tty),OCRNL)
+#define O_ONOCR(tty) _O_FLAG((tty),ONOCR)
+#define O_ONLRET(tty) _O_FLAG((tty),ONLRET)
+#define O_OFILL(tty) _O_FLAG((tty),OFILL)
+#define O_OFDEL(tty) _O_FLAG((tty),OFDEL)
+#define O_NLDLY(tty) _O_FLAG((tty),NLDLY)
+#define O_CRDLY(tty) _O_FLAG((tty),CRDLY)
+#define O_TABDLY(tty) _O_FLAG((tty),TABDLY)
+#define O_BSDLY(tty) _O_FLAG((tty),BSDLY)
+#define O_VTDLY(tty) _O_FLAG((tty),VTDLY)
+#define O_FFDLY(tty) _O_FLAG((tty),FFDLY)
+
+#define C_BAUD(tty) _C_FLAG((tty),CBAUD)
+#define C_CSIZE(tty) _C_FLAG((tty),CSIZE)
+#define C_CSTOPB(tty) _C_FLAG((tty),CSTOPB)
+#define C_CREAD(tty) _C_FLAG((tty),CREAD)
+#define C_PARENB(tty) _C_FLAG((tty),PARENB)
+#define C_PARODD(tty) _C_FLAG((tty),PARODD)
+#define C_HUPCL(tty) _C_FLAG((tty),HUPCL)
+#define C_CLOCAL(tty) _C_FLAG((tty),CLOCAL)
+#define C_CIBAUD(tty) _C_FLAG((tty),CIBAUD)
+#define C_CRTSCTS(tty) _C_FLAG((tty),CRTSCTS)
+
+#define L_ISIG(tty) _L_FLAG((tty),ISIG)
+#define L_ICANON(tty) _L_FLAG((tty),ICANON)
+#define L_XCASE(tty) _L_FLAG((tty),XCASE)
+#define L_ECHO(tty) _L_FLAG((tty),ECHO)
+#define L_ECHOE(tty) _L_FLAG((tty),ECHOE)
+#define L_ECHOK(tty) _L_FLAG((tty),ECHOK)
+#define L_ECHONL(tty) _L_FLAG((tty),ECHONL)
+#define L_NOFLSH(tty) _L_FLAG((tty),NOFLSH)
+#define L_TOSTOP(tty) _L_FLAG((tty),TOSTOP)
+#define L_ECHOCTL(tty) _L_FLAG((tty),ECHOCTL)
+#define L_ECHOPRT(tty) _L_FLAG((tty),ECHOPRT)
+#define L_ECHOKE(tty) _L_FLAG((tty),ECHOKE)
+#define L_FLUSHO(tty) _L_FLAG((tty),FLUSHO)
+#define L_PENDIN(tty) _L_FLAG((tty),PENDIN)
+#define L_IEXTEN(tty) _L_FLAG((tty),IEXTEN)
+
+/*
+ * Where all of the state associated with a tty is kept while the tty
+ * is open. Since the termios state should be kept even if the tty
+ * has been closed --- for things like the baud rate, etc --- it is
+ * not stored here, but rather a pointer to the real state is stored
+ * here. Possible the winsize structure should have the same
+ * treatment, but (1) the default 80x24 is usually right and (2) it's
+ * most often used by a windowing system, which will set the correct
+ * size each time the window is created or resized anyway.
+ * IMPORTANT: since this structure is dynamically allocated, it must
+ * be no larger than 4096 bytes. Changing TTY_BUF_SIZE will change
+ * the size of this structure, and it needs to be done with care.
+ * - TYT, 9/14/92
+ */
+struct tty_struct {
+ int magic;
+ struct tty_driver driver;
+ struct tty_ldisc ldisc;
+ struct termios *termios, *termios_locked;
+ int pgrp;
+ int session;
+ kdev_t device;
+ unsigned long flags;
+ int count;
+ struct winsize winsize;
+ unsigned char stopped:1, hw_stopped:1, packet:1;
+ unsigned char ctrl_status;
+
+ struct tty_struct *link;
+ struct fasync_struct *fasync;
+ struct tty_flip_buffer flip;
+ int max_flip_cnt;
+ struct wait_queue *write_wait;
+ struct wait_queue *read_wait;
+ void *disc_data;
+ void *driver_data;
+
+#define N_TTY_BUF_SIZE 4096
+
+ /*
+ * The following is data for the N_TTY line discipline. For
+ * historical reasons, this is included in the tty structure.
+ */
+ unsigned int column;
+ unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1;
+ unsigned char closing:1;
+ unsigned short minimum_to_wake;
+ unsigned overrun_time;
+ int num_overrun;
+ unsigned long process_char_map[256/(8*sizeof(unsigned long))];
+ char *read_buf;
+ int read_head;
+ int read_tail;
+ int read_cnt;
+ unsigned long read_flags[N_TTY_BUF_SIZE/(8*sizeof(unsigned long))];
+ int canon_data;
+ unsigned long canon_head;
+ unsigned int canon_column;
+};
+
+/* tty magic number */
+#define TTY_MAGIC 0x5401
+
+/*
+ * These bits are used in the flags field of the tty structure.
+ *
+ * So that interrupts won't be able to mess up the queues,
+ * copy_to_cooked must be atomic with respect to itself, as must
+ * tty->write. Thus, you must use the inline functions set_bit() and
+ * clear_bit() to make things atomic.
+ */
+#define TTY_THROTTLED 0
+#define TTY_IO_ERROR 1
+#define TTY_OTHER_CLOSED 2
+#define TTY_EXCLUSIVE 3
+#define TTY_DEBUG 4
+#define TTY_DO_WRITE_WAKEUP 5
+#define TTY_PUSH 6
+#define TTY_CLOSING 7
+
+#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
+
+extern void tty_write_flush(struct tty_struct *);
+
+extern struct termios tty_std_termios;
+extern struct tty_struct * redirect;
+extern struct tty_ldisc ldiscs[];
+extern int fg_console, last_console, want_console;
+
+extern int kmsg_redirect;
+extern struct wait_queue * keypress_wait;
+
+extern unsigned long con_init(unsigned long);
+
+extern int rs_init(void);
+extern int lp_init(void);
+extern int pty_init(void);
+extern int tty_init(void);
+extern int pcxe_init(void);
+extern int vcs_init(void);
+extern int cy_init(void);
+extern int stl_init(void);
+extern int stli_init(void);
+extern int riscom8_init(void);
+extern int specialix_init(void);
+extern int baycom_init(void);
+
+extern int tty_paranoia_check(struct tty_struct *tty, kdev_t device,
+ const char *routine);
+extern char *_tty_name(struct tty_struct *tty, char *buf);
+extern char *tty_name(struct tty_struct *tty);
+extern void tty_wait_until_sent(struct tty_struct * tty, int timeout);
+extern int tty_check_change(struct tty_struct * tty);
+extern void stop_tty(struct tty_struct * tty);
+extern void start_tty(struct tty_struct * tty);
+extern int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc);
+extern int tty_register_driver(struct tty_driver *driver);
+extern int tty_unregister_driver(struct tty_driver *driver);
+extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
+ int buflen);
+extern void tty_write_message(struct tty_struct *tty, char *msg);
+
+extern int is_orphaned_pgrp(int pgrp);
+extern int is_ignored(int sig);
+extern int tty_signal(int sig, struct tty_struct *tty);
+extern void tty_hangup(struct tty_struct * tty);
+extern void tty_vhangup(struct tty_struct * tty);
+extern void tty_unhangup(struct file *filp);
+extern int tty_hung_up_p(struct file * filp);
+extern void do_SAK(struct tty_struct *tty);
+extern void disassociate_ctty(int priv);
+
+/* n_tty.c */
+extern struct tty_ldisc tty_ldisc_N_TTY;
+
+/* tty_ioctl.c */
+extern int n_tty_ioctl(struct tty_struct * tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+
+/* serial.c */
+
+extern int rs_open(struct tty_struct * tty, struct file * filp);
+
+/* pty.c */
+
+extern int pty_open(struct tty_struct * tty, struct file * filp);
+extern int pcxe_open(struct tty_struct *tty, struct file *filp);
+
+/* console.c */
+
+extern int con_open(struct tty_struct * tty, struct file * filp);
+extern void update_screen(int new_console);
+extern void console_print(const char *);
+
+/* vt.c */
+
+extern int vt_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/linux/src/include/linux/tty_driver.h b/linux/src/include/linux/tty_driver.h
new file mode 100644
index 0000000..3468fa2
--- /dev/null
+++ b/linux/src/include/linux/tty_driver.h
@@ -0,0 +1,189 @@
+#ifndef _LINUX_TTY_DRIVER_H
+#define _LINUX_TTY_DRIVER_H
+
+/*
+ * This structure defines the interface between the low-level tty
+ * driver and the tty routines. The following routines can be
+ * defined; unless noted otherwise, they are optional, and can be
+ * filled in with a null pointer.
+ *
+ * int (*open)(struct tty_struct * tty, struct file * filp);
+ *
+ * This routine is called when a particular tty device is opened.
+ * This routine is mandatory; if this routine is not filled in,
+ * the attempted open will fail with ENODEV.
+ *
+ * void (*close)(struct tty_struct * tty, struct file * filp);
+ *
+ * This routine is called when a particular tty device is closed.
+ *
+ * int (*write)(struct tty_struct * tty, int from_user,
+ * const unsigned char *buf, int count);
+ *
+ * This routine is called by the kernel to write a series of
+ * characters to the tty device. The characters may come from
+ * user space or kernel space. This routine will return the
+ * number of characters actually accepted for writing. This
+ * routine is mandatory.
+ *
+ * void (*put_char)(struct tty_struct *tty, unsigned char ch);
+ *
+ * This routine is called by the kernel to write a single
+ * character to the tty device. If the kernel uses this routine,
+ * it must call the flush_chars() routine (if defined) when it is
+ * done stuffing characters into the driver. If there is no room
+ * in the queue, the character is ignored.
+ *
+ * void (*flush_chars)(struct tty_struct *tty);
+ *
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using put_char().
+ *
+ * int (*write_room)(struct tty_struct *tty);
+ *
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written. This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted.
+ *
+ * int (*ioctl)(struct tty_struct *tty, struct file * file,
+ * unsigned int cmd, unsigned long arg);
+ *
+ * This routine allows the tty driver to implement
+ * device-specific ioctl's. If the ioctl number passed in cmd
+ * is not recognized by the driver, it should return ENOIOCTLCMD.
+ *
+ * void (*set_termios)(struct tty_struct *tty, struct termios * old);
+ *
+ * This routine allows the tty driver to be notified when
+ * device's termios settings have changed. Note that a
+ * well-designed tty driver should be prepared to accept the case
+ * where old == NULL, and try to do something rational.
+ *
+ * void (*set_ldisc)(struct tty_struct *tty);
+ *
+ * This routine allows the tty driver to be notified when the
+ * device's termios settings have changed.
+ *
+ * void (*throttle)(struct tty_struct * tty);
+ *
+ * This routine notifies the tty driver that input buffers for
+ * the line discipline are close to full, and it should somehow
+ * signal that no more characters should be sent to the tty.
+ *
+ * void (*unthrottle)(struct tty_struct * tty);
+ *
+ * This routine notifies the tty drivers that it should signals
+ * that characters can now be sent to the tty without fear of
+ * overrunning the input buffers of the line disciplines.
+ *
+ * void (*stop)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it should stop
+ * outputting characters to the tty device.
+ *
+ * void (*start)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it resume sending
+ * characters to the tty device.
+ *
+ * void (*hangup)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it should hangup the
+ * tty device.
+ *
+ */
+
+#include <linux/fs.h>
+
+struct tty_driver {
+ int magic; /* magic number for this structure */
+ const char *name;
+ int name_base; /* offset of printed name */
+ short major; /* major device number */
+ short minor_start; /* start of minor device number*/
+ short num; /* number of devices */
+ short type; /* type of tty driver */
+ short subtype; /* subtype of tty driver */
+ struct termios init_termios; /* Initial termios */
+ int flags; /* tty driver flags */
+ int *refcount; /* for loadable tty drivers */
+ struct tty_driver *other; /* only used for the PTY driver */
+
+ /*
+ * Pointer to the tty data structures
+ */
+ struct tty_struct **table;
+ struct termios **termios;
+ struct termios **termios_locked;
+
+ /*
+ * Interface routines from the upper tty layer to the tty
+ * driver.
+ */
+ int (*open)(struct tty_struct * tty, struct file * filp);
+ void (*close)(struct tty_struct * tty, struct file * filp);
+ int (*write)(struct tty_struct * tty, int from_user,
+ const unsigned char *buf, int count);
+ void (*put_char)(struct tty_struct *tty, unsigned char ch);
+ void (*flush_chars)(struct tty_struct *tty);
+ int (*write_room)(struct tty_struct *tty);
+ int (*chars_in_buffer)(struct tty_struct *tty);
+ int (*ioctl)(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty, struct termios * old);
+ void (*throttle)(struct tty_struct * tty);
+ void (*unthrottle)(struct tty_struct * tty);
+ void (*stop)(struct tty_struct *tty);
+ void (*start)(struct tty_struct *tty);
+ void (*hangup)(struct tty_struct *tty);
+ void (*flush_buffer)(struct tty_struct *tty);
+ void (*set_ldisc)(struct tty_struct *tty);
+
+ /*
+ * linked list pointers
+ */
+ struct tty_driver *next;
+ struct tty_driver *prev;
+};
+
+/* tty driver magic number */
+#define TTY_DRIVER_MAGIC 0x5402
+
+/*
+ * tty driver flags
+ *
+ * TTY_DRIVER_RESET_TERMIOS --- requests the tty layer to reset the
+ * termios setting when the last process has closed the device.
+ * Used for PTY's, in particular.
+ *
+ * TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will
+ * guarantee never not to set any special character handling
+ * flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR ||
+ * !INPCK)). That is, if there is no reason for the driver to
+ * send notifications of parity and break characters up to the
+ * line driver, it won't do so. This allows the line driver to
+ * optimize for this case if this flag is set. (Note that there
+ * is also a promise, if the above case is true, not to signal
+ * overruns, either.)
+ */
+#define TTY_DRIVER_INSTALLED 0x0001
+#define TTY_DRIVER_RESET_TERMIOS 0x0002
+#define TTY_DRIVER_REAL_RAW 0x0004
+
+/* tty driver types */
+#define TTY_DRIVER_TYPE_SYSTEM 0x0001
+#define TTY_DRIVER_TYPE_CONSOLE 0x0002
+#define TTY_DRIVER_TYPE_SERIAL 0x0003
+#define TTY_DRIVER_TYPE_PTY 0x0004
+#define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */
+
+/* system subtypes (magic, used by tty_io.c) */
+#define SYSTEM_TYPE_TTY 0x0001
+#define SYSTEM_TYPE_CONSOLE 0x0002
+
+/* pty subtypes (magic, used by tty_io.c) */
+#define PTY_TYPE_MASTER 0x0001
+#define PTY_TYPE_SLAVE 0x0002
+
+#endif /* #ifdef _LINUX_TTY_DRIVER_H */
diff --git a/linux/src/include/linux/tty_ldisc.h b/linux/src/include/linux/tty_ldisc.h
new file mode 100644
index 0000000..87b54ca
--- /dev/null
+++ b/linux/src/include/linux/tty_ldisc.h
@@ -0,0 +1,46 @@
+#ifndef _LINUX_TTY_LDISC_H
+#define _LINUX_TTY_LDISC_H
+
+/*
+ * Definitions for the tty line discipline
+ */
+
+#include <linux/fs.h>
+#include <linux/wait.h>
+
+struct tty_ldisc {
+ int magic;
+ int num;
+ int flags;
+ /*
+ * The following routines are called from above.
+ */
+ int (*open)(struct tty_struct *);
+ void (*close)(struct tty_struct *);
+ void (*flush_buffer)(struct tty_struct *tty);
+ int (*chars_in_buffer)(struct tty_struct *tty);
+ int (*read)(struct tty_struct * tty, struct file * file,
+ unsigned char * buf, unsigned int nr);
+ int (*write)(struct tty_struct * tty, struct file * file,
+ const unsigned char * buf, unsigned int nr);
+ int (*ioctl)(struct tty_struct * tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty, struct termios * old);
+ int (*select)(struct tty_struct * tty, struct inode * inode,
+ struct file * file, int sel_type,
+ struct select_table_struct *wait);
+
+ /*
+ * The following routines are called from below.
+ */
+ void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
+ char *fp, int count);
+ int (*receive_room)(struct tty_struct *);
+ void (*write_wakeup)(struct tty_struct *);
+};
+
+#define TTY_LDISC_MAGIC 0x5403
+
+#define LDISC_FLAG_DEFINED 0x00000001
+
+#endif /* _LINUX_TTY_LDISC_H */
diff --git a/linux/src/include/linux/types.h b/linux/src/include/linux/types.h
new file mode 100644
index 0000000..0b9999f
--- /dev/null
+++ b/linux/src/include/linux/types.h
@@ -0,0 +1,96 @@
+#ifndef _LINUX_TYPES_H
+#define _LINUX_TYPES_H
+
+#ifdef __i386__
+#if defined(__KERNEL__) && !defined(STDC_HEADERS)
+#if ((__GNUC_MINOR__ >= 8) || (__GNUC_MAJOR >=3))
+#warning "This code is tested with gcc 2.7.2.x only. Using egcs/gcc 2.8.x needs"
+#warning "additional patches that have not been sufficiently tested to include by"
+#warning "default."
+#warning "See http://www.suse.de/~florian/kernel+egcs.html for more information"
+#error "Remove this if you have applied the gcc 2.8/egcs patches and wish to use them"
+#endif
+#endif
+#endif
+
+#include <linux/posix_types.h>
+#include <asm/types.h>
+
+#ifndef __KERNEL_STRICT_NAMES
+
+typedef __kernel_fd_set fd_set;
+typedef __kernel_dev_t dev_t;
+typedef __kernel_ino_t ino_t;
+typedef __kernel_mode_t mode_t;
+typedef __kernel_nlink_t nlink_t;
+typedef __kernel_off_t off_t;
+typedef __kernel_pid_t pid_t;
+typedef __kernel_uid_t uid_t;
+typedef __kernel_gid_t gid_t;
+typedef __kernel_daddr_t daddr_t;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __kernel_loff_t loff_t;
+#endif
+
+/*
+ * The following typedefs are also protected by individual ifdefs for
+ * historical reasons:
+ */
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef __kernel_size_t size_t;
+#endif
+
+#ifndef _SSIZE_T
+#define _SSIZE_T
+typedef __kernel_ssize_t ssize_t;
+#endif
+
+#ifndef _PTRDIFF_T
+#define _PTRDIFF_T
+typedef __kernel_ptrdiff_t ptrdiff_t;
+#endif
+
+#ifndef _TIME_T
+#define _TIME_T
+typedef __kernel_time_t time_t;
+#endif
+
+#ifndef _CLOCK_T
+#define _CLOCK_T
+typedef __kernel_clock_t clock_t;
+#endif
+
+#ifndef _CADDR_T
+#define _CADDR_T
+typedef __kernel_caddr_t caddr_t;
+#endif
+
+/* bsd */
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+
+/* sysv */
+typedef unsigned char unchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+
+#endif /* __KERNEL_STRICT_NAMES */
+
+/*
+ * Below are truly Linux-specific types that should never collide with
+ * any application/library that wants linux/types.h.
+ */
+
+struct ustat {
+ __kernel_daddr_t f_tfree;
+ __kernel_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
+#endif /* _LINUX_TYPES_H */
diff --git a/linux/src/include/linux/ucdrom.h b/linux/src/include/linux/ucdrom.h
new file mode 100644
index 0000000..3d8b358
--- /dev/null
+++ b/linux/src/include/linux/ucdrom.h
@@ -0,0 +1,96 @@
+/* ucdrom.h. Uniform cdrom data structures for cdrom.c. -*- linux-c -*-
+ Copyright (c) 1996 David van Leeuwen.
+ */
+
+#ifndef LINUX_UCDROM_H
+#define LINUX_UCDROM_H
+
+#ifdef __KERNEL__
+struct cdrom_device_ops {
+/* routines */
+ int (*open) (kdev_t, int);
+ void (*release) (kdev_t);
+ int (*open_files) (kdev_t); /* number of open files */
+ int (*drive_status) (kdev_t);
+ int (*disc_status) (kdev_t);
+ int (*media_changed) (kdev_t);
+ int (*tray_move) (kdev_t, int);
+ int (*lock_door) (kdev_t, int);
+ int (*select_speed) (kdev_t, int);
+ int (*select_disc) (kdev_t, int);
+ int (*get_last_session) (kdev_t, struct cdrom_multisession *);
+ int (*get_mcn) (kdev_t, struct cdrom_mcn *);
+ int (*reset) (kdev_t dev); /* hard reset device */
+ int (*audio_ioctl) (kdev_t, unsigned int, void *); /* play stuff */
+ int (*dev_ioctl) (kdev_t, unsigned int, unsigned long); /* dev-specific */
+/* specifications */
+ const int capability; /* capability flags */
+ int mask; /* mask of capability: disables them */
+ const int speed; /* maximum speed for reading data */
+ const int minors; /* number of minor devs supported */
+ const int capacity; /* number of discs in jukebox */
+/* device-related storage */
+ int options; /* options flags */
+ long mc_flags; /* media change buffer flags (2*16) */
+};
+#endif
+
+/* capability flags */
+#define CDC_CLOSE_TRAY 0x1 /* caddy systems _can't_ close */
+#define CDC_OPEN_TRAY 0x2 /* but _can_ eject. */
+#define CDC_LOCK 0x4 /* disable manual eject */
+#define CDC_SELECT_SPEED 0x8 /* programmable speed */
+#define CDC_SELECT_DISC 0x10 /* select disc from juke-box */
+#define CDC_MULTI_SESSION 0x20 /* read sessions>1 */
+#define CDC_MCN 0x40 /* Medium Catalog Number */
+#define CDC_MEDIA_CHANGED 0x80 /* media changed */
+#define CDC_PLAY_AUDIO 0x100 /* audio functions */
+
+/* drive status possibilities */
+#define CDS_NO_INFO 0 /* if not implemented */
+#define CDS_NO_DISC 1
+#define CDS_TRAY_OPEN 2
+#define CDS_DRIVE_NOT_READY 3
+#define CDS_DISC_OK 4
+
+/* disc status possibilities, other than CDS_NO_DISC */
+#define CDS_AUDIO 100
+#define CDS_DATA_1 101
+#define CDS_DATA_2 102
+#define CDS_XA_2_1 103
+#define CDS_XA_2_2 104
+
+/* User-configurable behavior options */
+#define CDO_AUTO_CLOSE 0x1 /* close tray on first open() */
+#define CDO_AUTO_EJECT 0x2 /* open tray on last release() */
+#define CDO_USE_FFLAGS 0x4 /* use O_NONBLOCK information on open */
+#define CDO_LOCK 0x8 /* lock tray on open files */
+#define CDO_CHECK_TYPE 0x10 /* check type on open for data */
+
+/* Some more ioctls to control these options */
+#define CDROM_SET_OPTIONS 0x5320
+#define CDROM_CLEAR_OPTIONS 0x5321
+#define CDROM_SELECT_SPEED 0x5322 /* head-speed */
+#define CDROM_SELECT_DISC 0x5323 /* for juke-boxes */
+#define CDROM_MEDIA_CHANGED 0x5325
+#define CDROM_DRIVE_STATUS 0x5326 /* tray position, etc. */
+#define CDROM_DISC_STATUS 0x5327 /* disc type etc. */
+
+/* Rename an old ioctl */
+#define CDROM_GET_MCN CDROM_GET_UPC /* medium catalog number */
+
+#ifdef __KERNEL__
+/* the general file operations structure: */
+extern struct file_operations cdrom_fops;
+
+extern int register_cdrom(int major, char *name,
+ struct cdrom_device_ops *cdo);
+extern int unregister_cdrom(int major, char *name);
+#endif
+
+#endif /* LINUX_UCDROM_H */
+/*
+ * Local variables:
+ * comment-column: 40
+ * End:
+ */
diff --git a/linux/src/include/linux/udp.h b/linux/src/include/linux/udp.h
new file mode 100644
index 0000000..471301a
--- /dev/null
+++ b/linux/src/include/linux/udp.h
@@ -0,0 +1,29 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the UDP protocol.
+ *
+ * Version: @(#)udp.h 1.0.2 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_UDP_H
+#define _LINUX_UDP_H
+
+
+struct udphdr {
+ unsigned short source;
+ unsigned short dest;
+ unsigned short len;
+ unsigned short check;
+};
+
+
+#endif /* _LINUX_UDP_H */
diff --git a/linux/src/include/linux/uio.h b/linux/src/include/linux/uio.h
new file mode 100644
index 0000000..8027bc8
--- /dev/null
+++ b/linux/src/include/linux/uio.h
@@ -0,0 +1,26 @@
+#ifndef __LINUX_UIO_H
+#define __LINUX_UIO_H
+
+/*
+ * Berkeley style UIO structures - Alan Cox 1994.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+/* A word of warning: Our uio structure will clash with the C library one (which is now obsolete). Remove the C
+ library one from sys/uio.h if you have a very old library set */
+
+struct iovec
+{
+ void *iov_base; /* BSD uses caddr_t (same thing in effect) */
+ int iov_len;
+};
+
+#define UIO_MAXIOV 16 /* Maximum iovec's in one operation
+ 16 matches BSD */
+
+#endif
diff --git a/linux/src/include/linux/unistd.h b/linux/src/include/linux/unistd.h
new file mode 100644
index 0000000..10ed983
--- /dev/null
+++ b/linux/src/include/linux/unistd.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_UNISTD_H_
+#define _LINUX_UNISTD_H_
+
+extern int errno;
+
+/*
+ * Include machine specific syscallX macros
+ */
+#include <asm/unistd.h>
+
+#endif /* _LINUX_UNISTD_H_ */
diff --git a/linux/src/include/linux/utsname.h b/linux/src/include/linux/utsname.h
new file mode 100644
index 0000000..7aef28f
--- /dev/null
+++ b/linux/src/include/linux/utsname.h
@@ -0,0 +1,35 @@
+#ifndef _LINUX_UTSNAME_H
+#define _LINUX_UTSNAME_H
+
+#define __OLD_UTS_LEN 8
+
+struct oldold_utsname {
+ char sysname[9];
+ char nodename[9];
+ char release[9];
+ char version[9];
+ char machine[9];
+};
+
+#define __NEW_UTS_LEN 64
+
+struct old_utsname {
+ char sysname[65];
+ char nodename[65];
+ char release[65];
+ char version[65];
+ char machine[65];
+};
+
+struct new_utsname {
+ char sysname[65];
+ char nodename[65];
+ char release[65];
+ char version[65];
+ char machine[65];
+ char domainname[65];
+};
+
+extern struct new_utsname system_utsname;
+
+#endif
diff --git a/linux/src/include/linux/version.h b/linux/src/include/linux/version.h
new file mode 100644
index 0000000..1a8bd9f
--- /dev/null
+++ b/linux/src/include/linux/version.h
@@ -0,0 +1,2 @@
+#define UTS_RELEASE "2.0.36"
+#define LINUX_VERSION_CODE 131108
diff --git a/linux/src/include/linux/vfs.h b/linux/src/include/linux/vfs.h
new file mode 100644
index 0000000..b3a5865
--- /dev/null
+++ b/linux/src/include/linux/vfs.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_VFS_H
+#define _LINUX_VFS_H
+
+#include <asm/statfs.h>
+
+#endif
diff --git a/linux/src/include/linux/wait.h b/linux/src/include/linux/wait.h
new file mode 100644
index 0000000..96de4aa
--- /dev/null
+++ b/linux/src/include/linux/wait.h
@@ -0,0 +1,53 @@
+#ifndef _LINUX_WAIT_H
+#define _LINUX_WAIT_H
+
+#define WNOHANG 0x00000001
+#define WUNTRACED 0x00000002
+
+#define __WALL 0x40000000 /* Wait on all children, regardless of type */
+#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+
+struct wait_queue {
+ struct task_struct * task;
+ struct wait_queue * next;
+};
+
+typedef struct wait_queue wait_queue_t;
+typedef struct wait_queue *wait_queue_head_t;
+
+#define WAIT_QUEUE_HEAD(x) ((struct wait_queue *)((x)-1))
+#define DECLARE_WAITQUEUE(wait, current) struct wait_queue wait = { current, NULL }
+#define DECLARE_WAIT_QUEUE_HEAD(wait) wait_queue_head_t wait
+#define init_waitqueue_head(x) *(x)=NULL
+#define init_waitqueue_entry(q,p) ((q)->task)=(p)
+
+static inline void init_waitqueue(struct wait_queue **q)
+{
+ *q = WAIT_QUEUE_HEAD(q);
+}
+
+static inline int waitqueue_active(struct wait_queue **q)
+{
+ struct wait_queue *head = *q;
+ return head && head != WAIT_QUEUE_HEAD(q);
+}
+
+struct select_table_entry {
+ struct wait_queue wait;
+ struct wait_queue ** wait_address;
+};
+
+typedef struct select_table_struct {
+ int nr;
+ struct select_table_entry * entry;
+} select_table;
+
+#define __MAX_SELECT_TABLE_ENTRIES (4096 / sizeof (struct select_table_entry))
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/wireless.h b/linux/src/include/linux/wireless.h
new file mode 100644
index 0000000..c552ff2
--- /dev/null
+++ b/linux/src/include/linux/wireless.h
@@ -0,0 +1,479 @@
+/*
+ * This file define a set of standard wireless extensions
+ *
+ * Version : 9 16.10.99
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+ */
+
+#ifndef _LINUX_WIRELESS_H
+#define _LINUX_WIRELESS_H
+
+/************************** DOCUMENTATION **************************/
+/*
+ * Basically, the wireless extensions are for now a set of standard ioctl
+ * call + /proc/net/wireless
+ *
+ * The entry /proc/net/wireless give statistics and information on the
+ * driver.
+ * This is better than having each driver having its entry because
+ * its centralised and we may remove the driver module safely.
+ *
+ * Ioctl are used to configure the driver and issue commands. This is
+ * better than command line options of insmod because we may want to
+ * change dynamically (while the driver is running) some parameters.
+ *
+ * The ioctl mechanimsm are copied from standard devices ioctl.
+ * We have the list of command plus a structure descibing the
+ * data exchanged...
+ * Note that to add these ioctl, I was obliged to modify :
+ * net/core/dev.c (two place + add include)
+ * net/ipv4/af_inet.c (one place + add include)
+ *
+ * /proc/net/wireless is a copy of /proc/net/dev.
+ * We have a structure for data passed from the driver to /proc/net/wireless
+ * Too add this, I've modified :
+ * net/core/dev.c (two other places)
+ * include/linux/netdevice.h (one place)
+ * include/linux/proc_fs.h (one place)
+ *
+ * Do not add here things that are redundant with other mechanisms
+ * (drivers init, ifconfig, /proc/net/dev, ...) and with are not
+ * wireless specific.
+ *
+ * These wireless extensions are not magic : each driver has to provide
+ * support for them...
+ *
+ * IMPORTANT NOTE : As everything in the kernel, this is very much a
+ * work in progress. Contact me if you have ideas of improvements...
+ */
+
+/***************************** INCLUDES *****************************/
+
+#include <linux/types.h> /* for "caddr_t" et al */
+#include <linux/socket.h> /* for "struct sockaddr" et al */
+#include <linux/if.h> /* for IFNAMSIZ and co... */
+
+/**************************** CONSTANTS ****************************/
+
+/* --------------------------- VERSION --------------------------- */
+/*
+ * This constant is used to know the availability of the wireless
+ * extensions and to know which version of wireless extensions it is
+ * (there is some stuff that will be added in the future...)
+ * I just plan to increment with each new version.
+ */
+#define WIRELESS_EXT 10
+
+/*
+ * Changes :
+ *
+ * V2 to V3
+ * --------
+ * Alan Cox start some incompatibles changes. I've integrated a bit more.
+ * - Encryption renamed to Encode to avoid US regulation problems
+ * - Frequency changed from float to struct to avoid problems on old 386
+ *
+ * V3 to V4
+ * --------
+ * - Add sensitivity
+ *
+ * V4 to V5
+ * --------
+ * - Missing encoding definitions in range
+ * - Access points stuff
+ *
+ * V5 to V6
+ * --------
+ * - 802.11 support (ESSID ioctls)
+ *
+ * V6 to V7
+ * --------
+ * - define IW_ESSID_MAX_SIZE and IW_MAX_AP
+ *
+ * V7 to V8
+ * --------
+ * - Changed my e-mail address
+ * - More 802.11 support (nickname, rate, rts, frag)
+ * - List index in frequencies
+ *
+ * V8 to V9
+ * --------
+ * - Support for 'mode of operation' (ad-hoc, managed...)
+ * - Support for unicast and multicast power saving
+ * - Change encoding to support larger tokens (>64 bits)
+ * - Updated iw_params (disable, flags) and use it for NWID
+ * - Extracted iw_point from iwreq for clarity
+ *
+ * V9 to V10
+ * ---------
+ * - Add PM capability to range structure
+ * - Add PM modifier : MAX/MIN/RELATIVE
+ * - Add encoding option : IW_ENCODE_NOKEY
+ * - Add TxPower ioctls (work like TxRate)
+ */
+
+/* -------------------------- IOCTL LIST -------------------------- */
+
+/* Basic operations */
+#define SIOCSIWNAME 0x8B00 /* Unused */
+#define SIOCGIWNAME 0x8B01 /* get name == wireless protocol */
+#define SIOCSIWNWID 0x8B02 /* set network id (the cell) */
+#define SIOCGIWNWID 0x8B03 /* get network id */
+#define SIOCSIWFREQ 0x8B04 /* set channel/frequency (Hz) */
+#define SIOCGIWFREQ 0x8B05 /* get channel/frequency (Hz) */
+#define SIOCSIWMODE 0x8B06 /* set operation mode */
+#define SIOCGIWMODE 0x8B07 /* get operation mode */
+#define SIOCSIWSENS 0x8B08 /* set sensitivity (dBm) */
+#define SIOCGIWSENS 0x8B09 /* get sensitivity (dBm) */
+
+/* Informative stuff */
+#define SIOCSIWRANGE 0x8B0A /* Unused */
+#define SIOCGIWRANGE 0x8B0B /* Get range of parameters */
+#define SIOCSIWPRIV 0x8B0C /* Unused */
+#define SIOCGIWPRIV 0x8B0D /* get private ioctl interface info */
+
+/* Mobile IP support */
+#define SIOCSIWSPY 0x8B10 /* set spy addresses */
+#define SIOCGIWSPY 0x8B11 /* get spy info (quality of link) */
+
+/* Access Point manipulation */
+#define SIOCSIWAP 0x8B14 /* set access point MAC addresses */
+#define SIOCGIWAP 0x8B15 /* get access point MAC addresses */
+#define SIOCGIWAPLIST 0x8B17 /* get list of access point in range */
+
+/* 802.11 specific support */
+#define SIOCSIWESSID 0x8B1A /* set ESSID (network name) */
+#define SIOCGIWESSID 0x8B1B /* get ESSID */
+#define SIOCSIWNICKN 0x8B1C /* set node name/nickname */
+#define SIOCGIWNICKN 0x8B1D /* get node name/nickname */
+/* As the ESSID and NICKN are strings up to 32 bytes long, it doesn't fit
+ * within the 'iwreq' structure, so we need to use the 'data' member to
+ * point to a string in user space, like it is done for RANGE...
+ * The "flags" member indicate if the ESSID is active or not (promiscuous).
+ */
+
+/* Other parameters usefull in 802.11 and some other devices */
+#define SIOCSIWRATE 0x8B20 /* set default bit rate (bps) */
+#define SIOCGIWRATE 0x8B21 /* get default bit rate (bps) */
+#define SIOCSIWRTS 0x8B22 /* set RTS/CTS threshold (bytes) */
+#define SIOCGIWRTS 0x8B23 /* get RTS/CTS threshold (bytes) */
+#define SIOCSIWFRAG 0x8B24 /* set fragmentation thr (bytes) */
+#define SIOCGIWFRAG 0x8B25 /* get fragmentation thr (bytes) */
+#define SIOCSIWTXPOW 0x8B26 /* set transmit power (dBm) */
+#define SIOCGIWTXPOW 0x8B27 /* get transmit power (dBm) */
+
+/* Encoding stuff (scrambling, hardware security, WEP...) */
+#define SIOCSIWENCODE 0x8B2A /* set encoding token & mode */
+#define SIOCGIWENCODE 0x8B2B /* get encoding token & mode */
+/* Power saving stuff (power management, unicast and multicast) */
+#define SIOCSIWPOWER 0x8B2C /* set Power Management settings */
+#define SIOCGIWPOWER 0x8B2D /* get Power Management settings */
+
+/* ------------------------- IOCTL STUFF ------------------------- */
+
+/* The first and the last (range) */
+#define SIOCIWFIRST 0x8B00
+#define SIOCIWLAST 0x8B30
+
+/* Even : get (world access), odd : set (root access) */
+#define IW_IS_SET(cmd) (!((cmd) & 0x1))
+#define IW_IS_GET(cmd) ((cmd) & 0x1)
+
+/* ------------------------- PRIVATE INFO ------------------------- */
+/*
+ * The following is used with SIOCGIWPRIV. It allow a driver to define
+ * the interface (name, type of data) for its private ioctl.
+ * Privates ioctl are SIOCDEVPRIVATE -> SIOCDEVPRIVATE + 0xF
+ */
+
+#define IW_PRIV_TYPE_MASK 0x7000 /* Type of arguments */
+#define IW_PRIV_TYPE_NONE 0x0000
+#define IW_PRIV_TYPE_BYTE 0x1000 /* Char as number */
+#define IW_PRIV_TYPE_CHAR 0x2000 /* Char as character */
+#define IW_PRIV_TYPE_INT 0x4000 /* 32 bits int */
+#define IW_PRIV_TYPE_FLOAT 0x5000
+
+#define IW_PRIV_SIZE_FIXED 0x0800 /* Variable or fixed nuber of args */
+
+#define IW_PRIV_SIZE_MASK 0x07FF /* Max number of those args */
+
+/*
+ * Note : if the number of args is fixed and the size < 16 octets,
+ * instead of passing a pointer we will put args in the iwreq struct...
+ */
+
+/* ----------------------- OTHER CONSTANTS ----------------------- */
+
+/* Maximum frequencies in the range struct */
+#define IW_MAX_FREQUENCIES 16
+/* Note : if you have something like 80 frequencies,
+ * don't increase this constant and don't fill the frequency list.
+ * The user will be able to set by channel anyway... */
+
+/* Maximum bit rates in the range struct */
+#define IW_MAX_BITRATES 8
+
+/* Maximum tx powers in the range struct */
+#define IW_MAX_TXPOWER 8
+
+/* Maximum of address that you may set with SPY */
+#define IW_MAX_SPY 8
+
+/* Maximum of address that you may get in the
+ list of access points in range */
+#define IW_MAX_AP 8
+
+/* Maximum size of the ESSID and NICKN strings */
+#define IW_ESSID_MAX_SIZE 32
+
+/* Modes of operation */
+#define IW_MODE_AUTO 0 /* Let the driver decides */
+#define IW_MODE_ADHOC 1 /* Single cell network */
+#define IW_MODE_INFRA 2 /* Multi cell network, roaming, ... */
+#define IW_MODE_MASTER 3 /* Synchronisation master or Access Point */
+#define IW_MODE_REPEAT 4 /* Wireless Repeater (forwarder) */
+#define IW_MODE_SECOND 5 /* Secondary master/repeater (backup) */
+
+/* Maximum number of size of encoding token available
+ * they are listed in the range structure */
+#define IW_MAX_ENCODING_SIZES 8
+
+/* Maximum size of the encoding token in bytes */
+#define IW_ENCODING_TOKEN_MAX 32 /* 256 bits (for now) */
+
+/* Flags for encoding (along with the token) */
+#define IW_ENCODE_INDEX 0x00FF /* Token index (if needed) */
+#define IW_ENCODE_FLAGS 0xFF00 /* Flags defined below */
+#define IW_ENCODE_MODE 0xF000 /* Modes defined below */
+#define IW_ENCODE_DISABLED 0x8000 /* Encoding disabled */
+#define IW_ENCODE_ENABLED 0x0000 /* Encoding enabled */
+#define IW_ENCODE_RESTRICTED 0x4000 /* Refuse non-encoded packets */
+#define IW_ENCODE_OPEN 0x2000 /* Accept non-encoded packets */
+#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
+
+/* Power management flags available (along with the value, if any) */
+#define IW_POWER_ON 0x0000 /* No details... */
+#define IW_POWER_TYPE 0xF000 /* Type of parameter */
+#define IW_POWER_PERIOD 0x1000 /* Value is a period/duration of */
+#define IW_POWER_TIMEOUT 0x2000 /* Value is a timeout (to go asleep) */
+#define IW_POWER_MODE 0x0F00 /* Power Management mode */
+#define IW_POWER_UNICAST_R 0x0100 /* Receive only unicast messages */
+#define IW_POWER_MULTICAST_R 0x0200 /* Receive only multicast messages */
+#define IW_POWER_ALL_R 0x0300 /* Receive all messages though PM */
+#define IW_POWER_FORCE_S 0x0400 /* Force PM procedure for sending unicast */
+#define IW_POWER_REPEATER 0x0800 /* Repeat broadcast messages in PM period */
+#define IW_POWER_MODIFIER 0x000F /* Modify a parameter */
+#define IW_POWER_MIN 0x0001 /* Value is a minimum */
+#define IW_POWER_MAX 0x0002 /* Value is a maximum */
+#define IW_POWER_RELATIVE 0x0004 /* Value is not in seconds/ms/us */
+
+/* Transmit Power flags available */
+#define IW_TXPOW_DBM 0x0000 /* Value is in dBm */
+#define IW_TXPOW_MWATT 0x0001 /* Value is in mW */
+
+/****************************** TYPES ******************************/
+
+/* --------------------------- SUBTYPES --------------------------- */
+/*
+ * Generic format for most parameters that fit in an int
+ */
+struct iw_param
+{
+ __s32 value; /* The value of the parameter itself */
+ __u8 fixed; /* Hardware should not use auto select */
+ __u8 disabled; /* Disable the feature */
+ __u16 flags; /* Various specifc flags (if any) */
+};
+
+/*
+ * For all data larger than 16 octets, we need to use a
+ * pointer to memory alocated in user space.
+ */
+struct iw_point
+{
+ caddr_t pointer; /* Pointer to the data (in user space) */
+ __u16 length; /* number of fields or size in bytes */
+ __u16 flags; /* Optional params */
+};
+
+/*
+ * A frequency
+ * For numbers lower than 10^9, we encode the number in 'm' and
+ * set 'e' to 0
+ * For number greater than 10^9, we divide it by the lowest power
+ * of 10 to get 'm' lower than 10^9, with 'm'= f / (10^'e')...
+ * The power of 10 is in 'e', the result of the division is in 'm'.
+ */
+struct iw_freq
+{
+ __u32 m; /* Mantissa */
+ __u16 e; /* Exponent */
+ __u8 i; /* List index (when in range struct) */
+};
+
+/*
+ * Quality of the link
+ */
+struct iw_quality
+{
+ __u8 qual; /* link quality (%retries, SNR or better...) */
+ __u8 level; /* signal level */
+ __u8 noise; /* noise level */
+ __u8 updated; /* Flags to know if updated */
+};
+
+/*
+ * Packet discarded in the wireless adapter due to
+ * "wireless" specific problems...
+ */
+struct iw_discarded
+{
+ __u32 nwid; /* Wrong nwid */
+ __u32 code; /* Unable to code/decode */
+ __u32 misc; /* Others cases */
+};
+
+/* ------------------------ WIRELESS STATS ------------------------ */
+/*
+ * Wireless statistics (used for /proc/net/wireless)
+ */
+struct iw_statistics
+{
+ __u16 status; /* Status
+ * - device dependent for now */
+
+ struct iw_quality qual; /* Quality of the link
+ * (instant/mean/max) */
+ struct iw_discarded discard; /* Packet discarded counts */
+};
+
+/* ------------------------ IOCTL REQUEST ------------------------ */
+/*
+ * The structure to exchange data for ioctl.
+ * This structure is the same as 'struct ifreq', but (re)defined for
+ * convenience...
+ *
+ * Note that it should fit on the same memory footprint !
+ * You should check this when increasing the above structures (16 octets)
+ * 16 octets = 128 bits. Warning, pointers might be 64 bits wide...
+ */
+struct iwreq
+{
+ union
+ {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "eth0" */
+ } ifr_ifrn;
+
+ /* Data part */
+ union
+ {
+ /* Config - generic */
+ char name[IFNAMSIZ];
+ /* Name : used to verify the presence of wireless extensions.
+ * Name of the protocol/provider... */
+
+ struct iw_point essid; /* Extended network name */
+ struct iw_param nwid; /* network id (or domain - the cell) */
+ struct iw_freq freq; /* frequency or channel :
+ * 0-1000 = channel
+ * > 1000 = frequency in Hz */
+
+ struct iw_param sens; /* signal level threshold */
+ struct iw_param bitrate; /* default bit rate */
+ struct iw_param txpower; /* default transmit power */
+ struct iw_param rts; /* RTS threshold threshold */
+ struct iw_param frag; /* Fragmentation threshold */
+ __u32 mode; /* Operation mode */
+
+ struct iw_point encoding; /* Encoding stuff : tokens */
+ struct iw_param power; /* PM duration/timeout */
+
+ struct sockaddr ap_addr; /* Access point address */
+
+ struct iw_point data; /* Other large parameters */
+ } u;
+};
+
+/* -------------------------- IOCTL DATA -------------------------- */
+/*
+ * For those ioctl which want to exchange mode data that what could
+ * fit in the above structure...
+ */
+
+/*
+ * Range of parameters
+ */
+
+struct iw_range
+{
+ /* Informative stuff (to choose between different interface) */
+ __u32 throughput; /* To give an idea... */
+ /* In theory this value should be the maximum benchmarked
+ * TCP/IP throughput, because with most of these devices the
+ * bit rate is meaningless (overhead an co) to estimate how
+ * fast the connection will go and pick the fastest one.
+ * I suggest people to play with Netperf or any benchmark...
+ */
+
+ /* NWID (or domain id) */
+ __u32 min_nwid; /* Minimal NWID we are able to set */
+ __u32 max_nwid; /* Maximal NWID we are able to set */
+
+ /* Frequency */
+ __u16 num_channels; /* Number of channels [0; num - 1] */
+ __u8 num_frequency; /* Number of entry in the list */
+ struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */
+ /* Note : this frequency list doesn't need to fit channel numbers */
+
+ /* signal level threshold range */
+ __s32 sensitivity;
+
+ /* Quality of link & SNR stuff */
+ struct iw_quality max_qual; /* Quality of the link */
+
+ /* Rates */
+ __u8 num_bitrates; /* Number of entries in the list */
+ __s32 bitrate[IW_MAX_BITRATES]; /* list, in bps */
+
+ /* RTS threshold */
+ __s32 min_rts; /* Minimal RTS threshold */
+ __s32 max_rts; /* Maximal RTS threshold */
+
+ /* Frag threshold */
+ __s32 min_frag; /* Minimal frag threshold */
+ __s32 max_frag; /* Maximal frag threshold */
+
+ /* Power Management duration & timeout */
+ __s32 min_pmp; /* Minimal PM period */
+ __s32 max_pmp; /* Maximal PM period */
+ __s32 min_pmt; /* Minimal PM timeout */
+ __s32 max_pmt; /* Maximal PM timeout */
+ __u16 pmp_flags; /* How to decode max/min PM period */
+ __u16 pmt_flags; /* How to decode max/min PM timeout */
+ __u16 pm_capa; /* What PM options are supported */
+
+ /* Encoder stuff */
+ __u16 encoding_size[IW_MAX_ENCODING_SIZES]; /* Different token sizes */
+ __u8 num_encoding_sizes; /* Number of entry in the list */
+ __u8 max_encoding_tokens; /* Max number of tokens */
+
+ /* Transmit power */
+ __u16 txpower_capa; /* What options are supported */
+ __u8 num_txpower; /* Number of entries in the list */
+ __s32 txpower[IW_MAX_TXPOWER]; /* list, in bps */
+};
+
+/*
+ * Private ioctl interface information
+ */
+
+struct iw_priv_args
+{
+ __u32 cmd; /* Number of the ioctl to issue */
+ __u16 set_args; /* Type and number of args */
+ __u16 get_args; /* Type and number of args */
+ char name[IFNAMSIZ]; /* Name of the extension */
+};
+
+#endif /* _LINUX_WIRELESS_H */
diff --git a/linux/src/include/net/af_unix.h b/linux/src/include/net/af_unix.h
new file mode 100644
index 0000000..86b0e1e
--- /dev/null
+++ b/linux/src/include/net/af_unix.h
@@ -0,0 +1,14 @@
+#ifndef __LINUX_NET_AFUNIX_H
+#define __LINUX_NET_AFUNIX_H
+extern void unix_proto_init(struct net_proto *pro);
+extern struct proto_ops unix_proto_ops;
+extern void unix_inflight(struct file *fp);
+extern void unix_notinflight(struct file *fp);
+typedef struct sock unix_socket;
+extern void unix_gc(void);
+
+extern unix_socket *unix_socket_list;
+
+#define UNIX_MAX_FD 8
+
+#endif
diff --git a/linux/src/include/net/arp.h b/linux/src/include/net/arp.h
new file mode 100644
index 0000000..db7a29c
--- /dev/null
+++ b/linux/src/include/net/arp.h
@@ -0,0 +1,17 @@
+/* linux/net/inet/arp.h */
+#ifndef _ARP_H
+#define _ARP_H
+
+extern void arp_init(void);
+extern int arp_rcv(struct sk_buff *skb, struct device *dev,
+ struct packet_type *pt);
+extern int arp_query(unsigned char *haddr, u32 paddr, struct device *dev);
+extern int arp_find(unsigned char *haddr, u32 paddr,
+ struct device *dev, u32 saddr, struct sk_buff *skb);
+extern int arp_ioctl(unsigned int cmd, void *arg);
+extern void arp_send(int type, int ptype, u32 dest_ip,
+ struct device *dev, u32 src_ip,
+ unsigned char *dest_hw, unsigned char *src_hw, unsigned char *th);
+extern int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short type, __u32 daddr);
+extern int arp_update_cache(struct hh_cache * hh);
+#endif /* _ARP_H */
diff --git a/linux/src/include/net/atalkcall.h b/linux/src/include/net/atalkcall.h
new file mode 100644
index 0000000..726e33c
--- /dev/null
+++ b/linux/src/include/net/atalkcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void atalk_proto_init(struct net_proto *pro);
diff --git a/linux/src/include/net/ax25.h b/linux/src/include/net/ax25.h
new file mode 100644
index 0000000..fce33cf
--- /dev/null
+++ b/linux/src/include/net/ax25.h
@@ -0,0 +1,292 @@
+/*
+ * Declarations of AX.25 type objects.
+ *
+ * Alan Cox (GW4PTS) 10/11/93
+ */
+
+#ifndef _AX25_H
+#define _AX25_H
+#include <linux/ax25.h>
+
+#define AX25_SLOWHZ 10 /* Run timing at 1/10 second - gives us better resolution for 56kbit links */
+
+#define AX25_T1CLAMPLO (1 * AX25_SLOWHZ) /* If defined, clamp at 1 second **/
+#define AX25_T1CLAMPHI (30 * AX25_SLOWHZ) /* If defined, clamp at 30 seconds **/
+
+#define AX25_BPQ_HEADER_LEN 16
+#define AX25_KISS_HEADER_LEN 1
+
+#define AX25_HEADER_LEN 17
+#define AX25_ADDR_LEN 7
+#define AX25_DIGI_HEADER_LEN (AX25_MAX_DIGIS * AX25_ADDR_LEN)
+#define AX25_MAX_HEADER_LEN (AX25_HEADER_LEN + AX25_DIGI_HEADER_LEN)
+
+/* AX.25 Protocol IDs */
+#define AX25_P_ROSE 0x01
+#define AX25_P_IP 0xCC
+#define AX25_P_ARP 0xCD
+#define AX25_P_TEXT 0xF0
+#define AX25_P_NETROM 0xCF
+#define AX25_P_SEGMENT 0x08
+
+/* AX.25 Segment control values */
+#define AX25_SEG_REM 0x7F
+#define AX25_SEG_FIRST 0x80
+
+#define AX25_CBIT 0x80 /* Command/Response bit */
+#define AX25_EBIT 0x01 /* HDLC Address Extension bit */
+#define AX25_HBIT 0x80 /* Has been repeated bit */
+
+#define AX25_SSSID_SPARE 0x60 /* Unused bits in SSID for standard AX.25 */
+#define AX25_ESSID_SPARE 0x20 /* Unused bits in SSID for extended AX.25 */
+#define AX25_DAMA_FLAG 0x20 /* Well, it is *NOT* unused! (dl1bke 951121 */
+
+#define AX25_COND_ACK_PENDING 0x01
+#define AX25_COND_REJECT 0x02
+#define AX25_COND_PEER_RX_BUSY 0x04
+#define AX25_COND_OWN_RX_BUSY 0x08
+
+#ifndef _LINUX_NETDEVICE_H
+#include <linux/netdevice.h>
+#endif
+
+/*
+ * These headers are taken from the KA9Q package by Phil Karn. These specific
+ * files have been placed under the GPL (not the whole package) by Phil.
+ *
+ *
+ * Copyright 1991 Phil Karn, KA9Q
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 dated June, 1991.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave., Cambridge, MA 02139, USA.
+ */
+
+/* Upper sub-layer (LAPB) definitions */
+
+/* Control field templates */
+#define AX25_I 0x00 /* Information frames */
+#define AX25_S 0x01 /* Supervisory frames */
+#define AX25_RR 0x01 /* Receiver ready */
+#define AX25_RNR 0x05 /* Receiver not ready */
+#define AX25_REJ 0x09 /* Reject */
+#define AX25_U 0x03 /* Unnumbered frames */
+#define AX25_SABM 0x2f /* Set Asynchronous Balanced Mode */
+#define AX25_SABME 0x6f /* Set Asynchronous Balanced Mode Extended */
+#define AX25_DISC 0x43 /* Disconnect */
+#define AX25_DM 0x0f /* Disconnected mode */
+#define AX25_UA 0x63 /* Unnumbered acknowledge */
+#define AX25_FRMR 0x87 /* Frame reject */
+#define AX25_UI 0x03 /* Unnumbered information */
+
+#define AX25_PF 0x10 /* Poll/final bit for standard AX.25 */
+#define AX25_EPF 0x01 /* Poll/final bit for extended AX.25 */
+
+#define AX25_ILLEGAL 0x100 /* Impossible to be a real frame type */
+
+#define AX25_POLLOFF 0
+#define AX25_POLLON 1
+
+/* AX25 L2 C-bit */
+#define AX25_COMMAND 1
+#define AX25_RESPONSE 2
+
+/* Define Link State constants. */
+
+enum {
+ AX25_STATE_0,
+ AX25_STATE_1,
+ AX25_STATE_2,
+ AX25_STATE_3,
+ AX25_STATE_4
+};
+
+#define AX25_MAX_DEVICES 20 /* Max No of AX.25 devices */
+
+#define AX25_MODULUS 8 /* Standard AX.25 modulus */
+#define AX25_EMODULUS 128 /* Extended AX.25 modulus */
+
+enum {
+ AX25_VALUES_IPDEFMODE, /* 0=DG 1=VC */
+ AX25_VALUES_AXDEFMODE, /* 0=Normal 1=Extended Seq Nos */
+ AX25_VALUES_BACKOFF, /* 0=None 1=Linear 2=Exponential */
+ AX25_VALUES_CONMODE, /* Allow connected modes - 0=No 1=no "PID text" 2=all PIDs */
+ AX25_VALUES_WINDOW, /* Default window size for standard AX.25 */
+ AX25_VALUES_EWINDOW, /* Default window size for extended AX.25 */
+ AX25_VALUES_T1, /* Default T1 timeout value */
+ AX25_VALUES_T2, /* Default T2 timeout value */
+ AX25_VALUES_T3, /* Default T3 timeout value */
+ AX25_VALUES_IDLE, /* Connected mode idle timer */
+ AX25_VALUES_N2, /* Default N2 value */
+ AX25_VALUES_PACLEN, /* AX.25 MTU */
+ AX25_MAX_VALUES /* THIS MUST REMAIN THE LAST ENTRY OF THIS LIST */
+};
+
+#define AX25_DEF_IPDEFMODE 0 /* Datagram */
+#define AX25_DEF_AXDEFMODE 0 /* Normal */
+#define AX25_DEF_BACKOFF 1 /* Linear backoff */
+#define AX25_DEF_CONMODE 2 /* Connected mode allowed */
+#define AX25_DEF_WINDOW 2 /* Window=2 */
+#define AX25_DEF_EWINDOW 32 /* Module-128 Window=32 */
+#define AX25_DEF_T1 (10 * AX25_SLOWHZ) /* T1=10s */
+#define AX25_DEF_T2 (3 * AX25_SLOWHZ) /* T2=3s */
+#define AX25_DEF_T3 (300 * AX25_SLOWHZ) /* T3=300s */
+#define AX25_DEF_N2 10 /* N2=10 */
+#define AX25_DEF_IDLE (0 * 60 * AX25_SLOWHZ) /* Idle=None */
+#define AX25_DEF_PACLEN 256 /* Paclen=256 */
+
+typedef struct ax25_uid_assoc {
+ struct ax25_uid_assoc *next;
+ uid_t uid;
+ ax25_address call;
+} ax25_uid_assoc;
+
+typedef struct {
+ ax25_address calls[AX25_MAX_DIGIS];
+ unsigned char repeated[AX25_MAX_DIGIS];
+ unsigned char ndigi;
+ char lastrepeat;
+} ax25_digi;
+
+typedef struct ax25_cb {
+ struct ax25_cb *next;
+ ax25_address source_addr, dest_addr;
+ struct device *device;
+ unsigned char dama_slave, iamdigi;
+ unsigned char state, modulus, pidincl;
+ unsigned short vs, vr, va;
+ unsigned char condition, backoff;
+ unsigned char n2, n2count;
+ unsigned short t1, t2, t3, idle, rtt;
+ unsigned short t1timer, t2timer, t3timer, idletimer;
+ unsigned short paclen;
+ unsigned short fragno, fraglen;
+ ax25_digi *digipeat;
+ struct sk_buff_head write_queue;
+ struct sk_buff_head reseq_queue;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head frag_queue;
+ unsigned char window;
+ struct timer_list timer;
+ struct sock *sk; /* Backlink to socket */
+} ax25_cb;
+
+#ifndef _LINUX_SYSCTL_H
+#include <linux/sysctl.h>
+#endif
+
+struct ax25_dev {
+ char name[20];
+ struct device *dev;
+ struct device *forward;
+ struct ctl_table systable[AX25_MAX_VALUES+1];
+ int values[AX25_MAX_VALUES];
+};
+
+/* af_ax25.c */
+extern ax25_address null_ax25_address;
+extern char *ax2asc(ax25_address *);
+extern ax25_address *asc2ax(char *);
+extern int ax25cmp(ax25_address *, ax25_address *);
+extern ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, ax25_digi *, struct device *);
+extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct device *);
+extern void ax25_destroy_socket(ax25_cb *);
+extern struct device *ax25rtr_get_dev(ax25_address *);
+extern int ax25_encapsulate(struct sk_buff *, struct device *, unsigned short,
+ void *, void *, unsigned int);
+extern int ax25_rebuild_header(void *, struct device *, unsigned long, struct sk_buff *);
+extern ax25_uid_assoc *ax25_uid_list;
+extern int ax25_uid_policy;
+extern ax25_address *ax25_findbyuid(uid_t);
+extern void ax25_queue_xmit(struct sk_buff *, struct device *, int);
+extern int ax25_dev_is_dama_slave(struct device *); /* dl1bke 951121 */
+
+#include <net/ax25call.h>
+
+/* ax25_in.c */
+extern int ax25_process_rx_frame(ax25_cb *, struct sk_buff *, int, int);
+
+/* ax25_out.c */
+extern void ax25_output(ax25_cb *, int, struct sk_buff *);
+extern void ax25_kick(ax25_cb *);
+extern void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
+extern void ax25_nr_error_recovery(ax25_cb *);
+extern void ax25_establish_data_link(ax25_cb *);
+extern void ax25_transmit_enquiry(ax25_cb *);
+extern void ax25_enquiry_response(ax25_cb *);
+extern void ax25_timeout_response(ax25_cb *);
+extern void ax25_check_iframes_acked(ax25_cb *, unsigned short);
+extern void dama_enquiry_response(ax25_cb *); /* dl1bke 960114 */
+extern void dama_establish_data_link(ax25_cb *);
+
+/* ax25_route.c */
+extern struct ax25_dev ax25_device[];
+extern int ax25_rt_get_info(char *, char **, off_t, int, int);
+extern int ax25_cs_get_info(char *, char **, off_t, int, int);
+extern int ax25_rt_autobind(ax25_cb *, ax25_address *);
+extern ax25_digi *ax25_rt_find_path(ax25_address *, struct device *);
+extern void ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *);
+extern void ax25_rt_device_down(struct device *);
+extern int ax25_rt_ioctl(unsigned int, void *);
+extern char ax25_rt_mode_get(ax25_address *, struct device *);
+extern int ax25_dev_get_value(struct device *, int);
+extern void ax25_dev_device_up(struct device *);
+extern void ax25_dev_device_down(struct device *);
+extern int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
+extern struct device *ax25_fwd_dev(struct device *);
+extern void ax25_rt_free(void);
+
+/* ax25_subr.c */
+extern void ax25_clear_queues(ax25_cb *);
+extern void ax25_frames_acked(ax25_cb *, unsigned short);
+extern void ax25_requeue_frames(ax25_cb *);
+extern int ax25_validate_nr(ax25_cb *, unsigned short);
+extern int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
+extern void ax25_send_control(ax25_cb *, int, int, int);
+extern unsigned short ax25_calculate_t1(ax25_cb *);
+extern void ax25_calculate_rtt(ax25_cb *);
+extern unsigned char *ax25_parse_addr(unsigned char *, int, ax25_address *,
+ ax25_address *, ax25_digi *, int *, int *); /* dl1bke 951121 */
+extern int build_ax25_addr(unsigned char *, ax25_address *, ax25_address *,
+ ax25_digi *, int, int);
+extern int size_ax25_addr(ax25_digi *);
+extern void ax25_digi_invert(ax25_digi *, ax25_digi *);
+extern void ax25_return_dm(struct device *, ax25_address *, ax25_address *, ax25_digi *);
+extern void ax25_dama_on(ax25_cb *); /* dl1bke 951121 */
+extern void ax25_dama_off(ax25_cb *); /* dl1bke 951121 */
+extern void ax25_disconnect(ax25_cb *, int);
+
+/* ax25_timer.c */
+extern void ax25_set_timer(ax25_cb *);
+extern void ax25_t1_timeout(ax25_cb *);
+extern void ax25_link_failed(ax25_cb *, int);
+extern int (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
+extern int ax25_listen_mine(ax25_address *, struct device *);
+
+/* sysctl_net_ax25.c */
+extern void ax25_register_sysctl(void);
+extern void ax25_unregister_sysctl(void);
+
+/* ... */
+
+extern ax25_cb *volatile ax25_list;
+
+/* support routines for modules that use AX.25, in ax25_timer.c */
+extern int ax25_protocol_register(unsigned int, int (*)(struct sk_buff *, ax25_cb *));
+extern void ax25_protocol_release(unsigned int);
+extern int ax25_linkfail_register(void (*)(ax25_cb *, int));
+extern void ax25_linkfail_release(void (*)(ax25_cb *, int));
+extern int ax25_listen_register(ax25_address *, struct device *);
+extern void ax25_listen_release(ax25_address *, struct device *);
+extern int ax25_protocol_is_registered(unsigned int);
+
+#endif
diff --git a/linux/src/include/net/ax25call.h b/linux/src/include/net/ax25call.h
new file mode 100644
index 0000000..68b8a70
--- /dev/null
+++ b/linux/src/include/net/ax25call.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void ax25_proto_init(struct net_proto *pro);
diff --git a/linux/src/include/net/br.h b/linux/src/include/net/br.h
new file mode 100644
index 0000000..f1d6678
--- /dev/null
+++ b/linux/src/include/net/br.h
@@ -0,0 +1,270 @@
+/*
+ * Constants and structure definitions for the bridging code
+ */
+
+#if !defined(One)
+#define Zero 0
+#define One 1
+#endif /* !defined(One) */
+
+#if !defined(TRUE)
+#define FALSE 0
+#define TRUE 1
+#endif /* !defined(TRUE) */
+
+/** port states. **/
+#define Disabled 0 /* (4.4 5) */
+#define Listening 1 /* (4.4.2) */
+#define Learning 2 /* (4.4.3) */
+#define Forwarding 3 /* (4 4 4) */
+#define Blocking 4 /* (4.4.1) */
+
+#define No_of_ports 8
+/* arbitrary choice, to allow the code below to compile */
+
+#define All_ports (No_of_ports + 1)
+
+/*
+ * We time out our entries in the FDB after this many seconds.
+ */
+#define FDB_TIMEOUT 300
+
+/*
+ * the following defines are the initial values used when the
+ * bridge is booted. These may be overridden when this bridge is
+ * not the root bridge. These are the recommended default values
+ * from the 802.1d specification.
+ */
+#define BRIDGE_MAX_AGE 20
+#define BRIDGE_HELLO_TIME 2
+#define BRIDGE_FORWARD_DELAY 15
+#define HOLD_TIME 1
+
+#define Default_path_cost 10
+
+/*
+ * minimum increment possible to avoid underestimating age, allows for BPDU
+ * transmission time
+ */
+#define Message_age_increment 1
+
+#define No_port 0
+/*
+ * reserved value for Bridge's root port parameter indicating no root port,
+ * used when Bridge is the root - also used to indicate the source when
+ * a frame is being generated by a higher layer protocol on this host
+ */
+
+/** Configuration BPDU Parameters (4.5.1) **/
+
+typedef struct {
+ union {
+ struct {
+ unsigned short priority;
+ unsigned char ula[6];
+ } p_u;
+ unsigned int id[2];
+ } bi;
+} bridge_id_t;
+
+#define BRIDGE_PRIORITY bi.p_u.priority
+#define BRIDGE_ID_ULA bi.p_u.ula
+#define BRIDGE_ID bi.id
+
+typedef struct {
+ unsigned short protocol_id;
+ unsigned char protocol_version_id;
+ unsigned char type;
+ unsigned char flags;
+#define TOPOLOGY_CHANGE 0x01
+#define TOPOLOGY_CHANGE_ACK 0x80
+ bridge_id_t root_id; /* (4.5.1.1) */
+ unsigned int root_path_cost; /* (4.5.1.2) */
+ bridge_id_t bridge_id; /* (4.5.1.3) */
+ unsigned short port_id; /* (4.5.1.4) */
+ unsigned short message_age; /* (4.5.1.5) */
+ unsigned short max_age; /* (4.5.1.6) */
+ unsigned short hello_time; /* (4.5.1.7) */
+ unsigned short forward_delay; /* (4.5.1.8) */
+} Config_bpdu;
+
+
+/** Topology Change Notification BPDU Parameters (4.5.2) **/
+
+typedef struct {
+ unsigned short protocol_id;
+ unsigned char protocol_version_id;
+ unsigned char type;
+} Tcn_bpdu;
+
+#define BPDU_TYPE_CONFIG 0
+#define BPDU_TYPE_TOPO_CHANGE 128
+
+/** Bridge Parameters (4.5.3) **/
+typedef struct {
+ bridge_id_t designated_root; /* (4.5.3.1) */
+ unsigned int root_path_cost; /* (4.5.3.2) */
+ unsigned int root_port; /* (4.5.3.3) */
+ unsigned short max_age; /* (4.5.3.4) */
+ unsigned short hello_time; /* (4.5.3.5) */
+ unsigned short forward_delay; /* (4.5.3.6) */
+ bridge_id_t bridge_id; /* (4.5.3.7) */
+ unsigned short bridge_max_age; /* (4.5.3.8) */
+ unsigned short bridge_hello_time; /* (4.5.3.9) */
+ unsigned short bridge_forward_delay; /* (4.5.3.10) */
+ unsigned int topology_change_detected; /* (4.5.3.11) */
+ unsigned int topology_change; /* (4.5.3.12) */
+ unsigned short topology_change_time; /* (4.5.3.13) */
+ unsigned short hold_time; /* (4.5.3.14) */
+ unsigned int top_change;
+ unsigned int top_change_detected;
+} Bridge_data;
+
+/** Port Parameters (4.5.5) **/
+typedef struct {
+ unsigned short port_id; /* (4.5.5.1) */
+ unsigned int state; /* (4.5.5.2) */
+ unsigned int path_cost; /* (4.5.5.3) */
+ bridge_id_t designated_root; /* (4.5.5.4) */
+ unsigned int designated_cost; /* (4.5.5.5) */
+ bridge_id_t designated_bridge; /* (4.5.5.6) */
+ unsigned short designated_port; /* (4.5.5.7) */
+ unsigned int top_change_ack; /* (4.5.5.8) */
+ unsigned int config_pending; /* (4.5.5.9) */
+ struct device *dev;
+ struct fdb *fdb; /* head of per port fdb chain */
+} Port_data;
+
+
+
+/** types to support timers for this pseudo-implementation. **/
+typedef struct {
+ unsigned int active; /* timer in use. */
+ unsigned int value; /* current value of timer,
+ * counting up. */
+} Timer;
+
+struct fdb {
+ unsigned char ula[6];
+ unsigned char pad[2];
+ unsigned short port;
+ unsigned int timer;
+ unsigned int flags;
+#define FDB_ENT_VALID 0x01
+/* AVL tree of all addresses, sorted by address */
+ short fdb_avl_height;
+ struct fdb *fdb_avl_left;
+ struct fdb *fdb_avl_right;
+/* linked list of addresses for each port */
+ struct fdb *fdb_next;
+};
+
+#define IS_BRIDGED 0x2e
+
+struct br_stat {
+ unsigned int flags;
+ Bridge_data bridge_data;
+ Port_data port_data[No_of_ports];
+};
+
+/* defined flags for br_stat.flags */
+#define BR_UP 0x0001 /* bridging enabled */
+#define BR_DEBUG 0x0002 /* debugging enabled */
+
+struct br_cf {
+ unsigned int cmd;
+ unsigned int arg1;
+ unsigned int arg2;
+};
+
+/* defined cmds */
+#define BRCMD_BRIDGE_ENABLE 1
+#define BRCMD_BRIDGE_DISABLE 2
+#define BRCMD_PORT_ENABLE 3 /* arg1 = port */
+#define BRCMD_PORT_DISABLE 4 /* arg1 = port */
+#define BRCMD_SET_BRIDGE_PRIORITY 5 /* arg1 = priority */
+#define BRCMD_SET_PORT_PRIORITY 6 /* arg1 = port, arg2 = priority */
+#define BRCMD_SET_PATH_COST 7 /* arg1 = port, arg2 = cost */
+#define BRCMD_DISPLAY_FDB 8 /* arg1 = port */
+#define BRCMD_ENABLE_DEBUG 9
+#define BRCMD_DISABLE_DEBUG 10
+
+/* prototypes of all bridging functions... */
+
+void transmit_config(int port_no);
+int root_bridge(void);
+int supersedes_port_info(int port_no, Config_bpdu *config);
+void record_config_information(int port_no, Config_bpdu *config);
+void record_config_timeout_values(Config_bpdu *config);
+void config_bpdu_generation(void);
+int designated_port(int port_no);
+void reply(int port_no);
+void transmit_tcn(void);
+void configuration_update(void);
+void root_selection(void);
+void designated_port_selection(void);
+void become_designated_port(int port_no);
+void port_state_selection(void);
+void make_forwarding(int port_no);
+void topology_change_detection(void);
+void topology_change_acknowledged(void);
+void acknowledge_topology_change(int port_no);
+void make_blocking(int port_no);
+void set_port_state(int port_no, int state);
+void received_config_bpdu(int port_no, Config_bpdu *config);
+void received_tcn_bpdu(int port_no, Tcn_bpdu *tcn);
+void hello_timer_expiry(void);
+void message_age_timer_expiry(int port_no);
+void forward_delay_timer_expiry(int port_no);
+int designated_for_some_port(void);
+void tcn_timer_expiry(void);
+void topology_change_timer_expiry(void);
+void hold_timer_expiry(int port_no);
+void br_init(void);
+void br_init_port(int port_no);
+void enable_port(int port_no);
+void disable_port(int port_no);
+void set_bridge_priority(bridge_id_t *new_bridge_id);
+void set_port_priority(int port_no, unsigned short new_port_id);
+void set_path_cost(int port_no, unsigned short path_cost);
+void start_hello_timer(void);
+void stop_hello_timer(void);
+int hello_timer_expired(void);
+void start_tcn_timer(void);
+void stop_tcn_timer(void);
+int tcn_timer_expired(void);
+void start_topology_change_timer(void);
+void stop_topology_change_timer(void);
+int topology_change_timer_expired(void);
+void start_message_age_timer(int port_no, unsigned short message_age);
+void stop_message_age_timer(int port_no);
+int message_age_timer_expired(int port_no);
+void start_forward_delay_timer(int port_no);
+void stop_forward_delay_timer(int port_no);
+int forward_delay_timer_expired(int port_no);
+void start_hold_timer(int port_no);
+void stop_hold_timer(int port_no);
+int hold_timer_expired(int port_no);
+
+struct fdb *br_avl_find_addr(unsigned char addr[6]);
+int br_avl_insert (struct fdb * new_node);
+int br_avl_remove (struct fdb * node_to_delete);
+
+int send_tcn_bpdu(int port_no, Tcn_bpdu *bpdu);
+int send_config_bpdu(int port_no, Config_bpdu *config_bpdu);
+int find_port(struct device *dev);
+int br_flood(struct sk_buff *skb, int port);
+int br_drop(struct sk_buff *skb);
+int br_learn(struct sk_buff *skb, int port); /* 3.8 */
+
+int br_receive_frame(struct sk_buff *skb); /* 3.5 */
+int br_tx_frame(struct sk_buff *skb);
+int br_ioctl(unsigned int cmd, void *arg);
+
+void free_fdb(struct fdb *);
+struct fdb *get_fdb(void);
+
+/* externs */
+
+extern struct br_stat br_stats;
+
diff --git a/linux/src/include/net/checksum.h b/linux/src/include/net/checksum.h
new file mode 100644
index 0000000..aee4fd4
--- /dev/null
+++ b/linux/src/include/net/checksum.h
@@ -0,0 +1,25 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Checksumming functions for IP, TCP, UDP and so on
+ *
+ * Authors: Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ * Borrows very liberally from tcp.c and ip.c, see those
+ * files for more names.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _CHECKSUM_H
+#define _CHECKSUM_H
+
+#include <asm/byteorder.h>
+#include <net/ip.h>
+#include <asm/checksum.h>
+
+#endif
diff --git a/linux/src/include/net/datalink.h b/linux/src/include/net/datalink.h
new file mode 100644
index 0000000..44e5699
--- /dev/null
+++ b/linux/src/include/net/datalink.h
@@ -0,0 +1,16 @@
+#ifndef _NET_INET_DATALINK_H_
+#define _NET_INET_DATALINK_H_
+
+struct datalink_proto {
+ unsigned short type_len;
+ unsigned char type[8];
+ const char *string_name;
+ unsigned short header_length;
+ int (*rcvfunc)(struct sk_buff *, struct device *,
+ struct packet_type *);
+ void (*datalink_header)(struct datalink_proto *, struct sk_buff *,
+ unsigned char *);
+ struct datalink_proto *next;
+};
+
+#endif
diff --git a/linux/src/include/net/gc.h b/linux/src/include/net/gc.h
new file mode 100644
index 0000000..0b28c09
--- /dev/null
+++ b/linux/src/include/net/gc.h
@@ -0,0 +1,46 @@
+/*
+ * Interface routines assumed by gc()
+ *
+ * Copyright (C) Barak A. Pearlmutter.
+ * Released under the GPL version 2 or later.
+ *
+ */
+
+typedef struct object *pobj; /* pointer to a guy of the type we gc */
+
+/*
+ * How to mark and unmark objects
+ */
+
+extern void gc_mark(pobj);
+extern void gc_unmark(pobj);
+extern int gc_marked(pobj);
+
+/*
+ * How to count and access an object's children
+ */
+
+extern int n_children(pobj); /* how many children */
+extern pobj child_n(pobj, int); /* child i, numbered 0..n-1 */
+
+/*
+ * How to access the root set
+ */
+
+extern int root_size(void); /* number of things in root set */
+extern pobj root_elt(int); /* element i of root set, numbered 0..n-1 */
+
+/*
+ * How to access the free list
+ */
+
+extern void clear_freelist(void);
+extern void add_to_free_list(pobj);
+
+/*
+ * How to iterate through all objects in memory
+ */
+
+extern int N_OBJS;
+extern pobj obj_number(int);
+
diff --git a/linux/src/include/net/icmp.h b/linux/src/include/net/icmp.h
new file mode 100644
index 0000000..fa770d2
--- /dev/null
+++ b/linux/src/include/net/icmp.h
@@ -0,0 +1,43 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the ICMP module.
+ *
+ * Version: @(#)icmp.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ICMP_H
+#define _ICMP_H
+
+#include <linux/icmp.h>
+#include <linux/skbuff.h>
+
+#include <net/sock.h>
+#include <net/protocol.h>
+
+extern struct icmp_err icmp_err_convert[];
+extern struct icmp_mib icmp_statistics;
+
+extern void icmp_send(struct sk_buff *skb_in, int type, int code,
+ unsigned long info, struct device *dev);
+extern int icmp_rcv(struct sk_buff *skb1, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+extern int icmp_ioctl(struct sock *sk, int cmd,
+ unsigned long arg);
+extern void icmp_init(struct proto_ops *ops);
+
+/* CONFIG_IP_TRANSPARENT_PROXY */
+extern int icmp_chkaddr(struct sk_buff *skb);
+
+#endif /* _ICMP_H */
diff --git a/linux/src/include/net/ip.h b/linux/src/include/net/ip.h
new file mode 100644
index 0000000..5437b3d
--- /dev/null
+++ b/linux/src/include/net/ip.h
@@ -0,0 +1,159 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP module.
+ *
+ * Version: @(#)ip.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _IP_H
+#define _IP_H
+
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/route.h>
+
+#ifndef _SNMP_H
+#include <net/snmp.h>
+#endif
+
+#include <net/sock.h> /* struct sock */
+
+/* IP flags. */
+#define IP_CE 0x8000 /* Flag: "Congestion" */
+#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
+#define IP_MF 0x2000 /* Flag: "More Fragments" */
+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
+
+#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
+
+#ifdef CONFIG_IP_MULTICAST
+extern void ip_mc_dropsocket(struct sock *);
+extern void ip_mc_dropdevice(struct device *dev);
+extern int ip_mc_procinfo(char *, char **, off_t, int, int);
+#endif
+
+#include <net/ip_forward.h>
+
+/* Describe an IP fragment. */
+struct ipfrag
+{
+ int offset; /* offset of fragment in IP datagram */
+ int end; /* last byte of data in datagram */
+ int len; /* length of this fragment */
+ struct sk_buff *skb; /* complete received fragment */
+ unsigned char *ptr; /* pointer into real fragment data */
+ struct ipfrag *next; /* linked list pointers */
+ struct ipfrag *prev;
+};
+
+/*
+ * Describe an entry in the "incomplete datagrams" queue.
+ */
+
+struct ipq
+{
+ unsigned char *mac; /* pointer to MAC header */
+ struct iphdr *iph; /* pointer to IP header */
+ int len; /* total length of original datagram */
+ short ihlen; /* length of the IP header */
+ short maclen; /* length of the MAC header */
+ struct timer_list timer; /* when will this queue expire? */
+ struct ipfrag *fragments; /* linked list of received fragments */
+ struct ipq *next; /* linked list pointers */
+ struct ipq *prev;
+ struct device *dev; /* Device - for icmp replies */
+};
+
+/*
+ * Functions provided by ip.c
+ */
+
+extern void ip_print(const struct iphdr *ip);
+extern int ip_ioctl(struct sock *sk, int cmd, unsigned long arg);
+extern void ip_route_check(__u32 daddr);
+extern int ip_send(struct rtable *rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr);
+extern int ip_build_header(struct sk_buff *skb,
+ __u32 saddr,
+ __u32 daddr,
+ struct device **dev, int type,
+ struct options *opt, int len,
+ int tos,int ttl,struct rtable **rp);
+extern int ip_rcv(struct sk_buff *skb, struct device *dev,
+ struct packet_type *pt);
+extern int ip_options_echo(struct options * dopt, struct options * sopt,
+ __u32 daddr, __u32 saddr,
+ struct sk_buff * skb);
+extern int ip_options_compile(struct options * opt, struct sk_buff * skb);
+extern void ip_send_check(struct iphdr *ip);
+extern int ip_id_count;
+extern void ip_queue_xmit(struct sock *sk,
+ struct device *dev, struct sk_buff *skb,
+ int free);
+extern void ip_init(void);
+extern int ip_build_xmit(struct sock *sk,
+ void getfrag (const void *,
+ __u32,
+ char *,
+ unsigned int,
+ unsigned int),
+ const void *frag,
+ unsigned short int length,
+ __u32 daddr,
+ __u32 saddr,
+ struct options * opt,
+ int flags,
+ int type,
+ int noblock);
+
+extern struct ip_mib ip_statistics;
+
+extern int sysctl_ip_dynaddr;
+int ip_rewrite_addrs(struct sock *sk, struct sk_buff *skb, struct device *dev);
+
+/*
+ * Functions provided by ip_fragment.o
+ */
+
+struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev);
+void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag);
+
+/*
+ * Functions provided by ip_forward.c
+ */
+
+extern int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, __u32 target_addr);
+extern int sysctl_ip_forward;
+
+
+/*
+ * Functions provided by ip_options.c
+ */
+
+extern void ip_options_build(struct sk_buff *skb, struct options *opt, __u32 daddr, __u32 saddr, int is_frag);
+extern int ip_options_echo(struct options *dopt, struct options *sopt, __u32 daddr, __u32 saddr, struct sk_buff *skb);
+extern void ip_options_fragment(struct sk_buff *skb);
+extern int ip_options_compile(struct options *opt, struct sk_buff *skb);
+
+/*
+ * Functions provided by ip_sockglue.c
+ */
+
+extern int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen);
+extern int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen);
+
+#endif /* _IP_H */
diff --git a/linux/src/include/net/ip_alias.h b/linux/src/include/net/ip_alias.h
new file mode 100644
index 0000000..ee9aa33
--- /dev/null
+++ b/linux/src/include/net/ip_alias.h
@@ -0,0 +1,23 @@
+/*
+ * IP_ALIAS (AF_INET) aliasing definitions.
+ *
+ *
+ * Version: @(#)ip_alias.h 0.50 4/20/97
+ *
+ * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _IP_ALIAS_H
+#define _IP_ALIAS_H
+
+extern int ip_alias_init(void);
+extern int ip_alias_done(void);
+
+#endif /* _IP_ALIAS_H */
diff --git a/linux/src/include/net/ip_forward.h b/linux/src/include/net/ip_forward.h
new file mode 100644
index 0000000..f1b532f
--- /dev/null
+++ b/linux/src/include/net/ip_forward.h
@@ -0,0 +1,11 @@
+#ifndef __NET_IP_FORWARD_H
+#define __NET_IP_FORWARD_H
+
+#define IPFWD_FRAGMENT 1
+#define IPFWD_LASTFRAG 2
+#define IPFWD_MASQUERADED 4
+#define IPFWD_MULTICASTING 8
+#define IPFWD_MULTITUNNEL 0x10
+#define IPFWD_NOTTLDEC 0x20
+
+#endif
diff --git a/linux/src/include/net/ip_masq.h b/linux/src/include/net/ip_masq.h
new file mode 100644
index 0000000..ccfb646
--- /dev/null
+++ b/linux/src/include/net/ip_masq.h
@@ -0,0 +1,205 @@
+/*
+ * IP masquerading functionality definitions
+ */
+
+#ifndef _IP_MASQ_H
+#define _IP_MASQ_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/config.h>
+
+/*
+ * This define affects the number of ports that can be handled
+ * by each of the protocol helper modules.
+ */
+#define MAX_MASQ_APP_PORTS 12
+
+/*
+ * Linux ports don't normally get allocated above 32K.
+ * I used an extra 4K port-space
+ */
+
+#define PORT_MASQ_BEGIN 61000
+#define PORT_MASQ_END (PORT_MASQ_BEGIN+4096)
+
+/*
+ * Default timeouts for masquerade functions The control channels now
+ * expire the same as TCP channels (other than being updated by
+ * packets on their associated data channels.
+ */
+#define MASQUERADE_EXPIRE_TCP 15*60*HZ
+#define MASQUERADE_EXPIRE_TCP_FIN 2*60*HZ
+#define MASQUERADE_EXPIRE_UDP 5*60*HZ
+/*
+ * ICMP can no longer be modified on the fly using an ioctl - this
+ * define is the only way to change the timeouts
+ */
+#define MASQUERADE_EXPIRE_ICMP 125*HZ
+
+#define IP_AUTOFW_EXPIRE 15*HZ
+
+#define IP_MASQ_F_OUT_SEQ 0x01 /* must do output seq adjust */
+#define IP_MASQ_F_IN_SEQ 0x02 /* must do input seq adjust */
+#define IP_MASQ_F_NO_DPORT 0x04 /* no dport set yet */
+#define IP_MASQ_F_NO_DADDR 0x08 /* no daddr yet */
+#define IP_MASQ_F_HASHED 0x10 /* hashed entry */
+#define IP_MASQ_F_SAW_RST 0x20 /* tcp rst pkt seen */
+#define IP_MASQ_F_SAW_FIN_IN 0x40 /* tcp fin pkt seen incoming */
+#define IP_MASQ_F_SAW_FIN_OUT 0x80 /* tcp fin pkt seen outgoing */
+#define IP_MASQ_F_SAW_FIN (IP_MASQ_F_SAW_FIN_IN | \
+ IP_MASQ_F_SAW_FIN_OUT)
+ /* tcp fin pkts seen */
+#define IP_MASQ_F_CONTROL 0x100 /* this is a control channel */
+#define IP_MASQ_F_NO_SPORT 0x200 /* no sport set yet */
+#define IP_MASQ_F_FTP_PASV 0x400 /* ftp PASV command just issued */
+#define IP_MASQ_F_NO_REPLY 0x800 /* no reply yet from outside */
+#define IP_MASQ_F_AFW_PORT 0x1000
+
+#ifdef __KERNEL__
+
+/*
+ * Delta seq. info structure
+ * Each MASQ struct has 2 (output AND input seq. changes).
+ */
+
+struct ip_masq_seq {
+ __u32 init_seq; /* Add delta from this seq */
+ short delta; /* Delta in sequence numbers */
+ short previous_delta; /* Delta in sequence numbers before last resized pkt */
+};
+
+/*
+ * MASQ structure allocated for each masqueraded association
+ */
+struct ip_masq {
+ struct ip_masq *m_link, *s_link; /* hashed link ptrs */
+ struct timer_list timer; /* Expiration timer */
+ __u16 protocol; /* Which protocol are we talking? */
+ __u16 sport, dport, mport; /* src, dst & masq ports */
+ __u32 saddr, daddr, maddr; /* src, dst & masq addresses */
+ struct ip_masq_seq out_seq, in_seq;
+ struct ip_masq_app *app; /* bound ip_masq_app object */
+ void *app_data; /* Application private data */
+ unsigned flags; /* status flags */
+ struct ip_masq *control; /* Corresponding control connection */
+};
+
+/*
+ * timeout values
+ */
+
+struct ip_fw_masq {
+ int tcp_timeout;
+ int tcp_fin_timeout;
+ int udp_timeout;
+};
+
+extern struct ip_fw_masq *ip_masq_expire;
+
+/*
+ * [0]: UDP free_ports
+ * [1]: TCP free_ports
+ * [2]: ICMP free ids
+ */
+
+extern int ip_masq_free_ports[3];
+
+/*
+ * ip_masq initializer (registers symbols and /proc/net entries)
+ */
+extern int ip_masq_init(void);
+
+/*
+ * functions called from ip layer
+ */
+extern int ip_fw_masquerade(struct sk_buff **, struct device *);
+extern int ip_fw_masq_icmp(struct sk_buff **, struct device *);
+extern int ip_fw_demasquerade(struct sk_buff **, struct device *);
+
+/*
+ * ip_masq obj creation/deletion functions.
+ */
+extern struct ip_masq *ip_masq_new(struct device *dev, int proto, __u32 saddr, __u16 sport, __u32 daddr, __u16 dport, unsigned flags);
+extern void ip_masq_set_expire(struct ip_masq *ms, unsigned long tout);
+
+#ifdef CONFIG_IP_MASQUERADE_IPAUTOFW
+extern void ip_autofw_expire(unsigned long data);
+#endif
+
+/*
+ *
+ * IP_MASQ_APP: IP application masquerading definitions
+ *
+ */
+
+struct ip_masq_app
+{
+ struct ip_masq_app *next;
+ char *name; /* name of application proxy */
+ unsigned type; /* type = proto<<16 | port (host byte order)*/
+ int n_attach;
+ int (*masq_init_1) /* ip_masq initializer */
+ (struct ip_masq_app *, struct ip_masq *);
+ int (*masq_done_1) /* ip_masq fin. */
+ (struct ip_masq_app *, struct ip_masq *);
+ int (*pkt_out) /* output (masquerading) hook */
+ (struct ip_masq_app *, struct ip_masq *, struct sk_buff **, struct device *);
+ int (*pkt_in) /* input (demasq) hook */
+ (struct ip_masq_app *, struct ip_masq *, struct sk_buff **, struct device *);
+};
+
+/*
+ * ip_masq_app initializer
+ */
+extern int ip_masq_app_init(void);
+
+/*
+ * ip_masq_app object registration functions (port: host byte order)
+ */
+extern int register_ip_masq_app(struct ip_masq_app *mapp, unsigned short proto, __u16 port);
+extern int unregister_ip_masq_app(struct ip_masq_app *mapp);
+
+/*
+ * get ip_masq_app obj by proto,port(net_byte_order)
+ */
+extern struct ip_masq_app * ip_masq_app_get(unsigned short proto, __u16 port);
+
+/*
+ * ip_masq TO ip_masq_app (un)binding functions.
+ */
+extern struct ip_masq_app * ip_masq_bind_app(struct ip_masq *ms);
+extern int ip_masq_unbind_app(struct ip_masq *ms);
+
+/*
+ * output and input app. masquerading hooks.
+ *
+ */
+extern int ip_masq_app_pkt_out(struct ip_masq *, struct sk_buff **skb_p, struct device *dev);
+extern int ip_masq_app_pkt_in(struct ip_masq *, struct sk_buff **skb_p, struct device *dev);
+
+/*
+ * service routine(s).
+ */
+extern struct ip_masq * ip_masq_out_get_2(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port);
+extern struct ip_masq * ip_masq_in_get_2(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port);
+
+/*
+ * /proc/net entry
+ */
+extern int ip_masq_app_getinfo(char *buffer, char **start, off_t offset, int length, int dummy);
+
+/*
+ * skb_replace function used by "client" modules to replace
+ * a segment of skb.
+ */
+extern struct sk_buff * ip_masq_skb_replace(struct sk_buff *skb, int pri, char *o_buf, int o_len, char *n_buf, int n_len);
+
+#ifdef CONFIG_IP_MASQUERADE_IPAUTOFW
+extern struct ip_autofw * ip_autofw_hosts;
+#endif /* CONFIG_IP_MASQUERADE_IPAUTOFW */
+
+#endif /* __KERNEL__ */
+
+#endif /* _IP_MASQ_H */
diff --git a/linux/src/include/net/ipip.h b/linux/src/include/net/ipip.h
new file mode 100644
index 0000000..bba1492
--- /dev/null
+++ b/linux/src/include/net/ipip.h
@@ -0,0 +1,4 @@
+extern int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
+ __u32 daddr, unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+
diff --git a/linux/src/include/net/ipx.h b/linux/src/include/net/ipx.h
new file mode 100644
index 0000000..13d3dbb
--- /dev/null
+++ b/linux/src/include/net/ipx.h
@@ -0,0 +1,88 @@
+
+/*
+ * The following information is in its entirety obtained from:
+ *
+ * Novell 'IPX Router Specification' Version 1.10
+ * Part No. 107-000029-001
+ *
+ * Which is available from ftp.novell.com
+ */
+
+#ifndef _NET_INET_IPX_H_
+#define _NET_INET_IPX_H_
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/datalink.h>
+#include <linux/ipx.h>
+
+/* #define CONFIG_IPX_INTERN 1 */
+
+typedef struct
+{
+ unsigned long net;
+ unsigned char node[IPX_NODE_LEN];
+ unsigned short sock;
+} ipx_address;
+
+#define ipx_broadcast_node "\377\377\377\377\377\377"
+#define ipx_this_node "\0\0\0\0\0\0"
+
+typedef struct ipx_packet
+{
+ unsigned short ipx_checksum;
+#define IPX_NO_CHECKSUM 0xFFFF
+ unsigned short ipx_pktsize;
+ unsigned char ipx_tctrl;
+ unsigned char ipx_type;
+#define IPX_TYPE_UNKNOWN 0x00
+#define IPX_TYPE_RIP 0x01 /* may also be 0 */
+#define IPX_TYPE_SAP 0x04 /* may also be 0 */
+#define IPX_TYPE_SPX 0x05 /* Not yet implemented */
+#define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */
+#define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast [Not supported] */
+ ipx_address ipx_dest __attribute__ ((packed));
+ ipx_address ipx_source __attribute__ ((packed));
+} ipx_packet;
+
+
+typedef struct sock ipx_socket;
+
+#include <net/ipxcall.h>
+extern int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt);
+extern void ipxrtr_device_down(struct device *dev);
+
+typedef struct ipx_interface {
+ /* IPX address */
+ unsigned long if_netnum;
+ unsigned char if_node[IPX_NODE_LEN];
+
+ /* physical device info */
+ struct device *if_dev;
+ struct datalink_proto *if_dlink;
+ unsigned short if_dlink_type;
+
+ /* socket support */
+ unsigned short if_sknum;
+ ipx_socket *if_sklist;
+
+ /* administrative overhead */
+ int if_ipx_offset;
+ unsigned char if_internal;
+ unsigned char if_primary;
+
+ struct ipx_interface *if_next;
+} ipx_interface;
+
+typedef struct ipx_route {
+ unsigned long ir_net;
+ ipx_interface *ir_intrfc;
+ unsigned char ir_routed;
+ unsigned char ir_router_node[IPX_NODE_LEN];
+ struct ipx_route *ir_next;
+} ipx_route;
+
+#define IPX_MIN_EPHEMERAL_SOCKET 0x4000
+#define IPX_MAX_EPHEMERAL_SOCKET 0x7fff
+
+#endif
diff --git a/linux/src/include/net/ipxcall.h b/linux/src/include/net/ipxcall.h
new file mode 100644
index 0000000..eb5bd2b
--- /dev/null
+++ b/linux/src/include/net/ipxcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void ipx_proto_init(struct net_proto *pro);
diff --git a/linux/src/include/net/netlink.h b/linux/src/include/net/netlink.h
new file mode 100644
index 0000000..0d7cf3f
--- /dev/null
+++ b/linux/src/include/net/netlink.h
@@ -0,0 +1,32 @@
+#ifndef __NET_NETLINK_H
+#define __NET_NETLINK_H
+
+#define NET_MAJOR 36 /* Major 18 is reserved for networking */
+#define MAX_LINKS 11 /* 18,0 for route updates, 18,1 for SKIP, 18,2 debug tap 18,3 PPP reserved */
+ /* 4-7 are psi0-psi3 8 is arpd 9 is ppp */
+ /* 10 is for IPSEC <John Ioannidis> */
+#define MAX_QBYTES 32768 /* Maximum bytes in the queue */
+
+#include <linux/config.h>
+
+extern int netlink_attach(int unit, int (*function)(struct sk_buff *skb));
+extern int netlink_donothing(struct sk_buff *skb);
+extern void netlink_detach(int unit);
+extern int netlink_post(int unit, struct sk_buff *skb);
+extern int init_netlink(void);
+
+#define NETLINK_ROUTE 0 /* Routing/device hook */
+#define NETLINK_SKIP 1 /* Reserved for ENskip */
+#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
+#define NETLINK_FIREWALL 3 /* Firewalling hook */
+#define NETLINK_PSI 4 /* PSI devices - 4 to 7 */
+#define NETLINK_ARPD 8
+#define NETLINK_NET_PPP 9 /* Non tty PPP devices */
+#define NETLINK_IPSEC 10 /* IPSEC */
+
+#ifdef CONFIG_RTNETLINK
+extern void ip_netlink_msg(unsigned long, __u32, __u32, __u32, short, short, char *);
+#else
+#define ip_netlink_msg(a,b,c,d,e,f,g)
+#endif
+#endif
diff --git a/linux/src/include/net/netrom.h b/linux/src/include/net/netrom.h
new file mode 100644
index 0000000..0920f8d
--- /dev/null
+++ b/linux/src/include/net/netrom.h
@@ -0,0 +1,166 @@
+/*
+ * Declarations of NET/ROM type objects.
+ *
+ * Jonathan Naylor G4KLX 9/4/95
+ */
+
+#ifndef _NETROM_H
+#define _NETROM_H
+#include <linux/netrom.h>
+
+#define NR_SLOWHZ 10 /* Run timing at 1/10 second */
+
+#define NR_NETWORK_LEN 15
+#define NR_TRANSPORT_LEN 5
+
+#define NR_PROTO_IP 0x0C
+
+#define NR_PROTOEXT 0x00
+#define NR_CONNREQ 0x01
+#define NR_CONNACK 0x02
+#define NR_DISCREQ 0x03
+#define NR_DISCACK 0x04
+#define NR_INFO 0x05
+#define NR_INFOACK 0x06
+
+#define NR_CHOKE_FLAG 0x80
+#define NR_NAK_FLAG 0x40
+#define NR_MORE_FLAG 0x20
+
+/* Define Link State constants. */
+
+#define NR_STATE_0 0
+#define NR_STATE_1 1
+#define NR_STATE_2 2
+#define NR_STATE_3 3
+
+#define NR_COND_ACK_PENDING 0x01
+#define NR_COND_REJECT 0x02
+#define NR_COND_PEER_RX_BUSY 0x04
+#define NR_COND_OWN_RX_BUSY 0x08
+
+#define NR_DEFAULT_T1 (120 * NR_SLOWHZ) /* Outstanding frames - 120 seconds */
+#define NR_DEFAULT_T2 (5 * NR_SLOWHZ) /* Response delay - 5 seconds */
+#define NR_DEFAULT_N2 3 /* Number of Retries - 3 */
+#define NR_DEFAULT_T4 (180 * NR_SLOWHZ) /* Busy Delay - 180 seconds */
+#define NR_DEFAULT_WINDOW 4 /* Default Window Size - 4 */
+#define NR_DEFAULT_OBS 6 /* Default Obsolescence Count - 6 */
+#define NR_DEFAULT_QUAL 10 /* Default Neighbour Quality - 10 */
+#define NR_DEFAULT_TTL 16 /* Default Time To Live - 16 */
+#define NR_DEFAULT_ROUTING 1 /* Is routing enabled ? */
+#define NR_DEFAULT_FAILS 2 /* Link fails until route fails */
+
+#define NR_MODULUS 256
+#define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable - 127 */
+#define NR_MAX_PACKET_SIZE 236 /* Maximum Packet Length - 236 */
+
+typedef struct {
+ ax25_address user_addr, source_addr, dest_addr;
+ struct device *device;
+ unsigned char my_index, my_id;
+ unsigned char your_index, your_id;
+ unsigned char state, condition, bpqext, window;
+ unsigned short vs, vr, va, vl;
+ unsigned char n2, n2count;
+ unsigned short t1, t2, t4;
+ unsigned short t1timer, t2timer, t4timer;
+ unsigned short fraglen;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head reseq_queue;
+ struct sk_buff_head frag_queue;
+ struct sock *sk; /* Backlink to socket */
+} nr_cb;
+
+struct nr_neigh {
+ struct nr_neigh *next;
+ ax25_address callsign;
+ ax25_digi *digipeat;
+ ax25_cb *ax25;
+ struct device *dev;
+ unsigned char quality;
+ unsigned char locked;
+ unsigned short count;
+ unsigned int number;
+ unsigned char failed;
+};
+
+struct nr_route {
+ unsigned char quality;
+ unsigned char obs_count;
+ struct nr_neigh *neighbour;
+};
+
+struct nr_node {
+ struct nr_node *next;
+ ax25_address callsign;
+ char mnemonic[7];
+ unsigned char which;
+ unsigned char count;
+ struct nr_route routes[3];
+};
+
+/* af_netrom.c */
+extern int sysctl_netrom_default_path_quality;
+extern int sysctl_netrom_obsolescence_count_initialiser;
+extern int sysctl_netrom_network_ttl_initialiser;
+extern int sysctl_netrom_transport_timeout;
+extern int sysctl_netrom_transport_maximum_tries;
+extern int sysctl_netrom_transport_acknowledge_delay;
+extern int sysctl_netrom_transport_busy_delay;
+extern int sysctl_netrom_transport_requested_window_size;
+extern int sysctl_netrom_routing_control;
+extern int sysctl_netrom_link_fails_count;
+extern int nr_rx_frame(struct sk_buff *, struct device *);
+extern void nr_destroy_socket(struct sock *);
+
+/* nr_dev.c */
+extern int nr_rx_ip(struct sk_buff *, struct device *);
+extern int nr_init(struct device *);
+
+#include <net/nrcall.h>
+
+/* nr_in.c */
+extern int nr_process_rx_frame(struct sock *, struct sk_buff *);
+
+/* nr_out.c */
+extern void nr_output(struct sock *, struct sk_buff *);
+extern void nr_send_nak_frame(struct sock *);
+extern void nr_kick(struct sock *);
+extern void nr_transmit_buffer(struct sock *, struct sk_buff *);
+extern void nr_establish_data_link(struct sock *);
+extern void nr_enquiry_response(struct sock *);
+extern void nr_check_iframes_acked(struct sock *, unsigned short);
+
+/* nr_route.c */
+extern void nr_rt_device_down(struct device *);
+extern struct device *nr_dev_first(void);
+extern struct device *nr_dev_get(ax25_address *);
+extern int nr_rt_ioctl(unsigned int, void *);
+extern void nr_link_failed(ax25_cb *, int);
+extern int nr_route_frame(struct sk_buff *, ax25_cb *);
+extern int nr_nodes_get_info(char *, char **, off_t, int, int);
+extern int nr_neigh_get_info(char *, char **, off_t, int, int);
+extern void nr_rt_free(void);
+
+/* nr_subr.c */
+extern void nr_clear_queues(struct sock *);
+extern void nr_frames_acked(struct sock *, unsigned short);
+extern void nr_requeue_frames(struct sock *);
+extern int nr_validate_nr(struct sock *, unsigned short);
+extern int nr_in_rx_window(struct sock *, unsigned short);
+extern void nr_write_internal(struct sock *, int);
+extern void nr_transmit_dm(struct sk_buff *, int);
+
+/* nr_timer.c */
+extern void nr_set_timer(struct sock *);
+
+/* sysctl_net_netrom.c */
+extern void nr_register_sysctl(void);
+extern void nr_unregister_sysctl(void);
+
+/* nr_loopback.c */
+extern void nr_loopback_init(void);
+extern void nr_loopback_clear(void);
+extern int nr_loopback_queue(struct sk_buff *);
+
+#endif
diff --git a/linux/src/include/net/nrcall.h b/linux/src/include/net/nrcall.h
new file mode 100644
index 0000000..09ee699
--- /dev/null
+++ b/linux/src/include/net/nrcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void nr_proto_init(struct net_proto *pro);
diff --git a/linux/src/include/net/p8022.h b/linux/src/include/net/p8022.h
new file mode 100644
index 0000000..03d7c3d
--- /dev/null
+++ b/linux/src/include/net/p8022.h
@@ -0,0 +1,7 @@
+#ifndef _NET_P8022_H
+#define _NET_P8022_H
+
+extern struct datalink_proto *register_8022_client(unsigned char type, int (*rcvfunc)(struct sk_buff *, struct device *, struct packet_type *));
+extern void unregister_8022_client(unsigned char type);
+
+#endif
diff --git a/linux/src/include/net/p8022call.h b/linux/src/include/net/p8022call.h
new file mode 100644
index 0000000..14f0c2c
--- /dev/null
+++ b/linux/src/include/net/p8022call.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of Space.c simpler */
+extern void p8022_proto_init(struct net_proto *);
diff --git a/linux/src/include/net/p8022tr.h b/linux/src/include/net/p8022tr.h
new file mode 100644
index 0000000..f4231ec
--- /dev/null
+++ b/linux/src/include/net/p8022tr.h
@@ -0,0 +1,8 @@
+#ifndef _NET_P8022TR_H
+#define _NET_P8022TR_H
+
+extern struct datalink_proto *register_8022tr_client(unsigned char type, int (*rcvfunc)(struct sk_buff *, struct device *, struct packet_type *));
+extern void unregister_8022tr_client(unsigned char type);
+
+#endif
+
diff --git a/linux/src/include/net/p8022trcall.h b/linux/src/include/net/p8022trcall.h
new file mode 100644
index 0000000..3ce6f3c
--- /dev/null
+++ b/linux/src/include/net/p8022trcall.h
@@ -0,0 +1,3 @@
+/* Separate to keep compilation of Space.c simpler */
+extern void p8022tr_proto_init(struct net_proto *);
+
diff --git a/linux/src/include/net/protocol.h b/linux/src/include/net/protocol.h
new file mode 100644
index 0000000..c21f845
--- /dev/null
+++ b/linux/src/include/net/protocol.h
@@ -0,0 +1,55 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the protocol dispatcher.
+ *
+ * Version: @(#)protocol.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Changes:
+ * Alan Cox : Added a name field and a frag handler
+ * field for later.
+ * Alan Cox : Cleaned up, and sorted types.
+ */
+
+#ifndef _PROTOCOL_H
+#define _PROTOCOL_H
+
+#define MAX_INET_PROTOS 32 /* Must be a power of 2 */
+
+
+/* This is used to register protocols. */
+struct inet_protocol {
+ int (*handler)(struct sk_buff *skb, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+ void (*err_handler)(int type, int code, unsigned char *buff,
+ __u32 daddr,
+ __u32 saddr,
+ struct inet_protocol *protocol, int len);
+ struct inet_protocol *next;
+ unsigned char protocol;
+ unsigned char copy:1;
+ void *data;
+ const char *name;
+};
+
+
+extern struct inet_protocol *inet_protocol_base;
+extern struct inet_protocol *inet_protos[MAX_INET_PROTOS];
+
+
+extern void inet_add_protocol(struct inet_protocol *prot);
+extern int inet_del_protocol(struct inet_protocol *prot);
+
+
+#endif /* _PROTOCOL_H */
diff --git a/linux/src/include/net/psnap.h b/linux/src/include/net/psnap.h
new file mode 100644
index 0000000..49a68f7
--- /dev/null
+++ b/linux/src/include/net/psnap.h
@@ -0,0 +1,7 @@
+#ifndef _NET_PSNAP_H
+#define _NET_PSNAP_H
+
+extern struct datalink_proto *register_snap_client(unsigned char *desc, int (*rcvfunc)(struct sk_buff *, struct device *, struct packet_type *));
+extern void unregister_snap_client(unsigned char *desc);
+
+#endif
diff --git a/linux/src/include/net/psnapcall.h b/linux/src/include/net/psnapcall.h
new file mode 100644
index 0000000..9da5763
--- /dev/null
+++ b/linux/src/include/net/psnapcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of Space.c simpler */
+extern void snap_proto_init(struct net_proto *);
diff --git a/linux/src/include/net/rarp.h b/linux/src/include/net/rarp.h
new file mode 100644
index 0000000..7bfb08e
--- /dev/null
+++ b/linux/src/include/net/rarp.h
@@ -0,0 +1,12 @@
+/* linux/net/inet/rarp.h */
+#ifndef _RARP_H
+#define _RARP_H
+
+extern int rarp_ioctl(unsigned int cmd, void *arg);
+extern int rarp_get_info(char *buffer,
+ char **start,
+ off_t offset,
+ int length,
+ int dummy);
+#endif /* _RARP_H */
+
diff --git a/linux/src/include/net/raw.h b/linux/src/include/net/raw.h
new file mode 100644
index 0000000..5b2f97f
--- /dev/null
+++ b/linux/src/include/net/raw.h
@@ -0,0 +1,44 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the RAW-IP module.
+ *
+ * Version: @(#)raw.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _RAW_H
+#define _RAW_H
+
+
+extern struct proto raw_prot;
+
+
+extern void raw_err(int type, int code, unsigned char *header, __u32 daddr,
+ __u32 saddr, struct inet_protocol *protocol);
+extern int raw_recvfrom(struct sock *sk, unsigned char *to,
+ int len, int noblock, unsigned flags,
+ struct sockaddr_in *sin, int *addr_len);
+extern int raw_read(struct sock *sk, unsigned char *buff,
+ int len, int noblock, unsigned flags);
+extern int raw_rcv(struct sock *, struct sk_buff *, struct device *,
+ __u32, __u32);
+
+/* Note: v4 ICMP wants to get at this stuff, if you change the
+ * hashing mechanism, make sure you update icmp.c as well.
+ */
+#define RAWV4_HTABLE_SIZE MAX_INET_PROTOS
+extern struct sock *raw_v4_htable[RAWV4_HTABLE_SIZE];
+
+
+extern struct sock *raw_v4_lookup(struct sock *sk, unsigned short num,
+ unsigned long raddr, unsigned long laddr);
+
+#endif /* _RAW_H */
diff --git a/linux/src/include/net/rose.h b/linux/src/include/net/rose.h
new file mode 100644
index 0000000..e868e51
--- /dev/null
+++ b/linux/src/include/net/rose.h
@@ -0,0 +1,233 @@
+/*
+ * Declarations of Rose type objects.
+ *
+ * Jonathan Naylor G4KLX 25/8/96
+ */
+
+#ifndef _ROSE_H
+#define _ROSE_H
+#include <linux/rose.h>
+
+#define ROSE_SLOWHZ 10 /* Run timing at 1/10 second */
+
+#define ROSE_ADDR_LEN 5
+
+#define ROSE_MIN_LEN 3
+
+#define ROSE_GFI 0x10
+#define ROSE_Q_BIT 0x80
+#define ROSE_D_BIT 0x40
+#define ROSE_M_BIT 0x10
+#define M_BIT 0x10
+
+#define ROSE_CALL_REQUEST 0x0B
+#define ROSE_CALL_ACCEPTED 0x0F
+#define ROSE_CLEAR_REQUEST 0x13
+#define ROSE_CLEAR_CONFIRMATION 0x17
+#define ROSE_DATA 0x00
+#define ROSE_INTERRUPT 0x23
+#define ROSE_INTERRUPT_CONFIRMATION 0x27
+#define ROSE_RR 0x01
+#define ROSE_RNR 0x05
+#define ROSE_REJ 0x09
+#define ROSE_RESET_REQUEST 0x1B
+#define ROSE_RESET_CONFIRMATION 0x1F
+#define ROSE_REGISTRATION_REQUEST 0xF3
+#define ROSE_REGISTRATION_CONFIRMATION 0xF7
+#define ROSE_RESTART_REQUEST 0xFB
+#define ROSE_RESTART_CONFIRMATION 0xFF
+#define ROSE_DIAGNOSTIC 0xF1
+#define ROSE_ILLEGAL 0xFD
+
+/* Define Link State constants. */
+
+#define ROSE_STATE_0 0 /* Ready */
+#define ROSE_STATE_1 1 /* Awaiting Call Accepted */
+#define ROSE_STATE_2 2 /* Awaiting Clear Confirmation */
+#define ROSE_STATE_3 3 /* Data Transfer */
+#define ROSE_STATE_4 4 /* Awaiting Reset Confirmation */
+#define ROSE_STATE_5 5 /* Deferred Call Acceptance */
+
+#define ROSE_DEFAULT_T0 (180 * ROSE_SLOWHZ) /* Default T10 T20 value */
+#define ROSE_DEFAULT_T1 (200 * ROSE_SLOWHZ) /* Default T11 T21 value */
+#define ROSE_DEFAULT_T2 (180 * ROSE_SLOWHZ) /* Default T12 T22 value */
+#define ROSE_DEFAULT_T3 (180 * ROSE_SLOWHZ) /* Default T13 T23 value */
+#define ROSE_DEFAULT_HB (5 * ROSE_SLOWHZ) /* Default Holdback value */
+#define ROSE_DEFAULT_ROUTING 1 /* Default routing flag */
+#define ROSE_DEFAULT_FAIL_TIMEOUT (120 * ROSE_SLOWHZ) /* Time until link considered usable */
+#define ROSE_DEFAULT_MAXVC 50 /* Maximum number of VCs per neighbour */
+#define ROSE_DEFAULT_WINDOW_SIZE 7 /* Default window value */
+
+#define ROSE_MODULUS 8
+#define ROSE_MAX_PACKET_SIZE 251 /* Maximum Packet Size */
+
+#define ROSE_MAX_WINDOW_LEN ((AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 300) * 7)
+
+#define ROSE_COND_ACK_PENDING 0x01
+#define ROSE_COND_PEER_RX_BUSY 0x02
+#define ROSE_COND_OWN_RX_BUSY 0x04
+
+#define FAC_NATIONAL 0x00
+#define FAC_CCITT 0x0F
+
+#define FAC_NATIONAL_RAND 0x7F
+#define FAC_NATIONAL_FLAGS 0x3F
+#define FAC_NATIONAL_DEST_DIGI 0xE9
+#define FAC_NATIONAL_SRC_DIGI 0xEB
+#define FAC_NATIONAL_FAIL_CALL 0xED
+#define FAC_NATIONAL_FAIL_ADD 0xEE
+#define FAC_NATIONAL_DIGIS 0xEF
+
+#define FAC_CCITT_DEST_NSAP 0xC9
+#define FAC_CCITT_SRC_NSAP 0xCB
+
+struct rose_neigh {
+ struct rose_neigh *next;
+ ax25_address callsign;
+ ax25_digi *digipeat;
+ ax25_cb *ax25;
+ struct device *dev;
+ unsigned short count;
+ unsigned short use;
+ unsigned int number;
+ char restarted;
+ char dce_mode;
+ struct sk_buff_head queue;
+ unsigned short t0timer, ftimer;
+ struct timer_list timer;
+};
+
+#define ROSE_MAX_ALTERNATE 3
+struct rose_node {
+ struct rose_node *next;
+ rose_address address;
+ unsigned short mask;
+ unsigned char count;
+ struct rose_neigh *neighbour[ROSE_MAX_ALTERNATE];
+};
+
+typedef struct {
+ unsigned int lci;
+ struct rose_neigh *neigh;
+ unsigned short vs, vr, va, vl;
+ unsigned short pending;
+ unsigned char state, condition;
+ struct timer_list timer;
+} rose_tr;
+
+struct rose_route {
+ struct rose_route *next;
+ rose_address src_addr, dest_addr;
+ ax25_address src_call, dest_call;
+ rose_tr tr1, tr2;
+ unsigned int rand;
+};
+
+typedef struct {
+ rose_address source_addr, dest_addr;
+ ax25_address source_call, dest_call;
+ unsigned char source_ndigis, dest_ndigis;
+ ax25_address source_digis[ROSE_MAX_DIGIS];
+ ax25_address dest_digis[ROSE_MAX_DIGIS];
+ struct rose_neigh *neighbour;
+ struct device *device;
+ unsigned int lci, rand;
+ unsigned char state, condition, qbitincl, defer;
+ unsigned char cause, diagnostic;
+ unsigned short vs, vr, va, vl;
+ unsigned short timer;
+ unsigned short t1, t2, t3, hb;
+#ifdef M_BIT
+ unsigned short fraglen;
+ struct sk_buff_head frag_queue;
+#endif
+ struct sk_buff_head ack_queue;
+ struct rose_facilities_struct facilities;
+ struct sock *sk; /* Backlink to socket */
+} rose_cb;
+
+/* af_rose.c */
+extern ax25_address rose_callsign;
+extern int sysctl_rose_restart_request_timeout;
+extern int sysctl_rose_call_request_timeout;
+extern int sysctl_rose_reset_request_timeout;
+extern int sysctl_rose_clear_request_timeout;
+extern int sysctl_rose_ack_hold_back_timeout;
+extern int sysctl_rose_routing_control;
+extern int sysctl_rose_link_fail_timeout;
+extern int sysctl_rose_maximum_vcs;
+extern int sysctl_rose_window_size;
+extern int rosecmp(rose_address *, rose_address *);
+extern int rosecmpm(rose_address *, rose_address *, unsigned short);
+extern char *rose2asc(rose_address *);
+extern void rose_kill_by_neigh(struct rose_neigh *);
+extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
+extern unsigned int rose_new_lci(struct rose_neigh *);
+extern int rose_rx_call_request(struct sk_buff *, struct device *, struct rose_neigh *, unsigned int);
+extern void rose_destroy_socket(struct sock *);
+
+/* rose_dev.c */
+extern int rose_rx_ip(struct sk_buff *, struct device *);
+extern int rose_init(struct device *);
+
+#include <net/rosecall.h>
+
+/* rose_in.c */
+extern int rose_process_rx_frame(struct sock *, struct sk_buff *);
+
+/* rose_link.c */
+extern void rose_link_set_timer(struct rose_neigh *);
+extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short);
+extern void rose_transmit_restart_request(struct rose_neigh *);
+extern void rose_transmit_restart_confirmation(struct rose_neigh *);
+extern void rose_transmit_diagnostic(struct rose_neigh *, unsigned char);
+extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char);
+extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
+
+/* rose_loopback.c */
+extern void rose_loopback_init(void);
+extern void rose_loopback_clear(void);
+extern int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
+
+/* rose_out.c */
+extern void rose_kick(struct sock *);
+extern void rose_enquiry_response(struct sock *);
+
+/* rose_route.c */
+extern void rose_rt_device_down(struct device *);
+extern void rose_link_device_down(struct device *);
+extern void rose_clean_neighbour(struct rose_neigh *);
+extern struct device *rose_dev_first(void);
+extern struct device *rose_dev_get(rose_address *);
+extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
+extern struct device *rose_ax25_dev_get(char *);
+extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *);
+extern int rose_rt_ioctl(unsigned int, void *);
+extern void rose_link_failed(ax25_cb *, int);
+extern int rose_route_frame(struct sk_buff *, ax25_cb *);
+extern int rose_nodes_get_info(char *, char **, off_t, int, int);
+extern int rose_neigh_get_info(char *, char **, off_t, int, int);
+extern int rose_routes_get_info(char *, char **, off_t, int, int);
+extern void rose_rt_free(void);
+
+/* rose_subr.c */
+extern void rose_clear_queues(struct sock *);
+extern void rose_frames_acked(struct sock *, unsigned short);
+extern int rose_validate_nr(struct sock *, unsigned short);
+extern void rose_write_internal(struct sock *, int);
+extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
+extern int rose_parse_facilities(unsigned char *, struct rose_facilities_struct *);
+extern int rose_create_facilities(unsigned char *, rose_cb *);
+
+/* rose_timer.c */
+extern void rose_set_timer(struct sock *);
+
+/* sysctl_net_rose.c */
+extern void rose_register_sysctl(void);
+extern void rose_unregister_sysctl(void);
+
+/* rose_transit.c */
+void rose_transit(struct sk_buff *, rose_tr *, rose_tr *);
+void rose_init_transit(rose_tr *, unsigned int, struct rose_neigh *);
+
+#endif
diff --git a/linux/src/include/net/rosecall.h b/linux/src/include/net/rosecall.h
new file mode 100644
index 0000000..5bbe69c
--- /dev/null
+++ b/linux/src/include/net/rosecall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void rose_proto_init(struct net_proto *pro);
diff --git a/linux/src/include/net/route.h b/linux/src/include/net/route.h
new file mode 100644
index 0000000..2af1a41
--- /dev/null
+++ b/linux/src/include/net/route.h
@@ -0,0 +1,189 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP router.
+ *
+ * Version: @(#)route.h 1.0.4 05/27/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Fixes:
+ * Alan Cox : Reformatted. Added ip_rt_local()
+ * Alan Cox : Support for TCP parameters.
+ * Alexey Kuznetsov: Major changes for new routing code.
+ * Elliot Poger : Added support for SO_BINDTODEVICE.
+ * Wolfgang Walter,
+ * Daniel Ryde,
+ * Ingo Molinar : fixed bug in ip_rt_put introduced
+ * by SO_BINDTODEVICE support causing
+ * a memory leak
+ *
+ * FIXME:
+ * Make atomic ops more generic and hide them in asm/...
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ROUTE_H
+#define _ROUTE_H
+
+#include <linux/config.h>
+
+/*
+ * 0 - no debugging messages
+ * 1 - rare events and bugs situations (default)
+ * 2 - trace mode.
+ */
+#define RT_CACHE_DEBUG 0
+
+#define RT_HASH_DIVISOR 256
+#define RT_CACHE_SIZE_MAX 256
+
+#define RTZ_HASH_DIVISOR 256
+
+#if RT_CACHE_DEBUG >= 2
+#define RTZ_HASHING_LIMIT 0
+#else
+#define RTZ_HASHING_LIMIT 16
+#endif
+
+/*
+ * Maximal time to live for unused entry.
+ */
+#define RT_CACHE_TIMEOUT (HZ*300)
+
+/*
+ * Prevents LRU trashing, entries considered equivalent,
+ * if the difference between last use times is less then this number.
+ */
+#define RT_CACHE_BUBBLE_THRESHOLD (HZ*5)
+
+#include <linux/route.h>
+
+#ifdef __KERNEL__
+#define RTF_LOCAL 0x8000
+#endif
+
+struct rtable
+{
+ struct rtable *rt_next;
+ __u32 rt_dst;
+ __u32 rt_src;
+ __u32 rt_gateway;
+ atomic_t rt_refcnt;
+ atomic_t rt_use;
+ unsigned long rt_window;
+ atomic_t rt_lastuse;
+ struct hh_cache *rt_hh;
+ struct device *rt_dev;
+ unsigned short rt_flags;
+ unsigned short rt_mtu;
+ unsigned short rt_irtt;
+ unsigned char rt_tos;
+};
+
+extern void ip_rt_flush(struct device *dev);
+extern void ip_rt_update(int event, struct device *dev);
+extern void ip_rt_redirect(__u32 src, __u32 dst, __u32 gw, struct device *dev);
+extern struct rtable *ip_rt_slow_route(__u32 daddr, int local, struct device *dev);
+extern struct device *ip_rt_dev(__u32 addr);
+extern int rt_get_info(char * buffer, char **start, off_t offset, int length, int dummy);
+extern int rt_cache_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
+extern int ip_rt_ioctl(unsigned int cmd, void *arg);
+extern int ip_rt_new(struct rtentry *rt);
+extern int ip_rt_kill(struct rtentry *rt);
+extern void ip_rt_check_expire(void);
+extern void ip_rt_advice(struct rtable **rp, int advice);
+
+extern void ip_rt_run_bh(void);
+extern atomic_t ip_rt_lock;
+extern unsigned ip_rt_bh_mask;
+extern struct rtable *ip_rt_hash_table[RT_HASH_DIVISOR];
+extern void rt_free(struct rtable * rt);
+
+static __inline__ void ip_rt_fast_lock(void)
+{
+ atomic_inc(&ip_rt_lock);
+}
+
+static __inline__ void ip_rt_fast_unlock(void)
+{
+ atomic_dec(&ip_rt_lock);
+}
+
+static __inline__ void ip_rt_unlock(void)
+{
+ if (atomic_dec_and_test(&ip_rt_lock) && ip_rt_bh_mask)
+ ip_rt_run_bh();
+}
+
+static __inline__ unsigned ip_rt_hash_code(__u32 addr)
+{
+ unsigned tmp = addr + (addr>>16);
+ return (tmp + (tmp>>8)) & 0xFF;
+}
+
+
+static __inline__ void ip_rt_put(struct rtable * rt)
+#ifndef MODULE
+{
+ /* If this rtable entry is not in the cache, we'd better free
+ * it once the refcnt goes to zero, because nobody else will.
+ */
+ if (rt&&atomic_dec_and_test(&rt->rt_refcnt)&&(rt->rt_flags&RTF_NOTCACHED))
+ rt_free(rt);
+}
+#else
+;
+#endif
+
+#ifdef CONFIG_KERNELD
+static struct rtable * ip_rt_route(__u32 daddr, int local, struct device *dev);
+#else
+static __inline__ struct rtable * ip_rt_route(__u32 daddr, int local, struct device *dev)
+#ifndef MODULE
+{
+ struct rtable * rth;
+
+ ip_rt_fast_lock();
+
+ for (rth=ip_rt_hash_table[ip_rt_hash_code(daddr)^local]; rth; rth=rth->rt_next)
+ {
+ /* If an interface is specified, make sure this route points to it. */
+ if ( (rth->rt_dst == daddr) && ((dev==NULL) || (dev==rth->rt_dev)) )
+ {
+ rth->rt_lastuse = jiffies;
+ atomic_inc(&rth->rt_use);
+ atomic_inc(&rth->rt_refcnt);
+ ip_rt_unlock();
+ return rth;
+ }
+ }
+ return ip_rt_slow_route (daddr, local, dev);
+}
+#else
+;
+#endif
+#endif
+
+static __inline__ struct rtable * ip_check_route(struct rtable ** rp, __u32 daddr,
+ int local, struct device *dev)
+{
+ struct rtable * rt = *rp;
+
+ if (!rt || rt->rt_dst != daddr || !(rt->rt_flags&RTF_UP) || (dev!=NULL)
+ || ((local==1)^((rt->rt_flags&RTF_LOCAL) != 0)))
+ {
+ ip_rt_put(rt);
+ rt = ip_rt_route(daddr, local, dev);
+ *rp = rt;
+ }
+ return rt;
+}
+
+
+#endif /* _ROUTE_H */
diff --git a/linux/src/include/net/slhc.h b/linux/src/include/net/slhc.h
new file mode 100644
index 0000000..c7b39db
--- /dev/null
+++ b/linux/src/include/net/slhc.h
@@ -0,0 +1,6 @@
+#ifndef __NET_SLHC_H
+#define __NET_SLHC_H
+
+extern void slhc_install(void);
+
+#endif
diff --git a/linux/src/include/net/slhc_vj.h b/linux/src/include/net/slhc_vj.h
new file mode 100644
index 0000000..471cf71
--- /dev/null
+++ b/linux/src/include/net/slhc_vj.h
@@ -0,0 +1,187 @@
+#ifndef _SLHC_H
+#define _SLHC_H
+/*
+ * Definitions for tcp compression routines.
+ *
+ * $Header: cvs/gnumach/linux/src/include/net/Attic/slhc_vj.h,v 1.1 1999/04/26 05:57:54 tb Exp $
+ *
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989:
+ * - Initial distribution.
+ *
+ *
+ * modified for KA9Q Internet Software Package by
+ * Katie Stevens (dkstevens@ucdavis.edu)
+ * University of California, Davis
+ * Computing Services
+ * - 01-31-90 initial adaptation
+ *
+ * - Feb 1991 Bill_Simpson@um.cc.umich.edu
+ * variable number of conversation slots
+ * allow zero or one slots
+ * separate routines
+ * status display
+ */
+
+/*
+ * Compressed packet format:
+ *
+ * The first octet contains the packet type (top 3 bits), TCP
+ * 'push' bit, and flags that indicate which of the 4 TCP sequence
+ * numbers have changed (bottom 5 bits). The next octet is a
+ * conversation number that associates a saved IP/TCP header with
+ * the compressed packet. The next two octets are the TCP checksum
+ * from the original datagram. The next 0 to 15 octets are
+ * sequence number changes, one change per bit set in the header
+ * (there may be no changes and there are two special cases where
+ * the receiver implicitly knows what changed -- see below).
+ *
+ * There are 5 numbers which can change (they are always inserted
+ * in the following order): TCP urgent pointer, window,
+ * acknowledgment, sequence number and IP ID. (The urgent pointer
+ * is different from the others in that its value is sent, not the
+ * change in value.) Since typical use of SLIP links is biased
+ * toward small packets (see comments on MTU/MSS below), changes
+ * use a variable length coding with one octet for numbers in the
+ * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the
+ * range 256 - 65535 or 0. (If the change in sequence number or
+ * ack is more than 65535, an uncompressed packet is sent.)
+ */
+
+/*
+ * Packet types (must not conflict with IP protocol version)
+ *
+ * The top nibble of the first octet is the packet type. There are
+ * three possible types: IP (not proto TCP or tcp with one of the
+ * control flags set); uncompressed TCP (a normal IP/TCP packet but
+ * with the 8-bit protocol field replaced by an 8-bit connection id --
+ * this type of packet syncs the sender & receiver); and compressed
+ * TCP (described above).
+ *
+ * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and
+ * is logically part of the 4-bit "changes" field that follows. Top
+ * three bits are actual packet type. For backward compatibility
+ * and in the interest of conserving bits, numbers are chosen so the
+ * IP protocol version number (4) which normally appears in this nibble
+ * means "IP packet".
+ */
+
+/* SLIP compression masks for len/vers byte */
+#define SL_TYPE_IP 0x40
+#define SL_TYPE_UNCOMPRESSED_TCP 0x70
+#define SL_TYPE_COMPRESSED_TCP 0x80
+#define SL_TYPE_ERROR 0x00
+
+/* Bits in first octet of compressed packet */
+#define NEW_C 0x40 /* flag bits for what changed in a packet */
+#define NEW_I 0x20
+#define NEW_S 0x08
+#define NEW_A 0x04
+#define NEW_W 0x02
+#define NEW_U 0x01
+
+/* reserved, special-case values of above */
+#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */
+#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */
+#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U)
+
+#define TCP_PUSH_BIT 0x10
+
+/*
+ * data type and sizes conversion assumptions:
+ *
+ * VJ code KA9Q style generic
+ * u_char byte_t unsigned char 8 bits
+ * u_short int16 unsigned short 16 bits
+ * u_int int16 unsigned short 16 bits
+ * u_long unsigned long unsigned long 32 bits
+ * int int32 long 32 bits
+ */
+
+typedef unsigned char byte_t;
+typedef unsigned long int32;
+
+/*
+ * "state" data for each active tcp conversation on the wire. This is
+ * basically a copy of the entire IP/TCP header from the last packet
+ * we saw from the conversation together with a small identifier
+ * the transmit & receive ends of the line use to locate saved header.
+ */
+struct cstate {
+ byte_t cs_this; /* connection id number (xmit) */
+ struct cstate *next; /* next in ring (xmit) */
+ struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
+ struct tcphdr cs_tcp;
+ unsigned char cs_ipopt[64];
+ unsigned char cs_tcpopt[64];
+ int cs_hsize;
+};
+#define NULLSLSTATE (struct cstate *)0
+
+/*
+ * all the state data for one serial line (we need one of these per line).
+ */
+struct slcompress {
+ struct cstate *tstate; /* transmit connection states (array)*/
+ struct cstate *rstate; /* receive connection states (array)*/
+
+ byte_t tslot_limit; /* highest transmit slot id (0-l)*/
+ byte_t rslot_limit; /* highest receive slot id (0-l)*/
+
+ byte_t xmit_oldest; /* oldest xmit in ring */
+ byte_t xmit_current; /* most recent xmit id */
+ byte_t recv_current; /* most recent rcvd id */
+
+ byte_t flags;
+#define SLF_TOSS 0x01 /* tossing rcvd frames until id received */
+
+ int32 sls_o_nontcp; /* outbound non-TCP packets */
+ int32 sls_o_tcp; /* outbound TCP packets */
+ int32 sls_o_uncompressed; /* outbound uncompressed packets */
+ int32 sls_o_compressed; /* outbound compressed packets */
+ int32 sls_o_searches; /* searches for connection state */
+ int32 sls_o_misses; /* times couldn't find conn. state */
+
+ int32 sls_i_uncompressed; /* inbound uncompressed packets */
+ int32 sls_i_compressed; /* inbound compressed packets */
+ int32 sls_i_error; /* inbound error packets */
+ int32 sls_i_tossed; /* inbound packets tossed because of error */
+
+ int32 sls_i_runt;
+ int32 sls_i_badcheck;
+};
+#define NULLSLCOMPR (struct slcompress *)0
+
+#define __ARGS(x) x
+
+/* In slhc.c: */
+struct slcompress *slhc_init __ARGS((int rslots, int tslots));
+void slhc_free __ARGS((struct slcompress *comp));
+
+int slhc_compress __ARGS((struct slcompress *comp, unsigned char *icp,
+ int isize, unsigned char *ocp, unsigned char **cpp,
+ int compress_cid));
+int slhc_uncompress __ARGS((struct slcompress *comp, unsigned char *icp,
+ int isize));
+int slhc_remember __ARGS((struct slcompress *comp, unsigned char *icp,
+ int isize));
+int slhc_toss __ARGS((struct slcompress *comp));
+
+void slhc_i_status __ARGS((struct slcompress *comp));
+void slhc_o_status __ARGS((struct slcompress *comp));
+
+#endif /* _SLHC_H */
diff --git a/linux/src/include/net/snmp.h b/linux/src/include/net/snmp.h
new file mode 100644
index 0000000..552292b
--- /dev/null
+++ b/linux/src/include/net/snmp.h
@@ -0,0 +1,107 @@
+/*
+ *
+ * SNMP MIB entries for the IP subsystem.
+ *
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ *
+ * We don't chose to implement SNMP in the kernel (this would
+ * be silly as SNMP is a pain in the backside in places). We do
+ * however need to collect the MIB statistics and export them
+ * out of /proc (eventually)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _SNMP_H
+#define _SNMP_H
+
+/*
+ * We use all unsigned longs. Linux will soon be so reliable that even these
+ * will rapidly get too small 8-). Seriously consider the IpInReceives count
+ * on the 20Gb/s + networks people expect in a few years time!
+ */
+
+struct ip_mib
+{
+ unsigned long IpForwarding;
+ unsigned long IpDefaultTTL;
+ unsigned long IpInReceives;
+ unsigned long IpInHdrErrors;
+ unsigned long IpInAddrErrors;
+ unsigned long IpForwDatagrams;
+ unsigned long IpInUnknownProtos;
+ unsigned long IpInDiscards;
+ unsigned long IpInDelivers;
+ unsigned long IpOutRequests;
+ unsigned long IpOutDiscards;
+ unsigned long IpOutNoRoutes;
+ unsigned long IpReasmTimeout;
+ unsigned long IpReasmReqds;
+ unsigned long IpReasmOKs;
+ unsigned long IpReasmFails;
+ unsigned long IpFragOKs;
+ unsigned long IpFragFails;
+ unsigned long IpFragCreates;
+};
+
+
+struct icmp_mib
+{
+ unsigned long IcmpInMsgs;
+ unsigned long IcmpInErrors;
+ unsigned long IcmpInDestUnreachs;
+ unsigned long IcmpInTimeExcds;
+ unsigned long IcmpInParmProbs;
+ unsigned long IcmpInSrcQuenchs;
+ unsigned long IcmpInRedirects;
+ unsigned long IcmpInEchos;
+ unsigned long IcmpInEchoReps;
+ unsigned long IcmpInTimestamps;
+ unsigned long IcmpInTimestampReps;
+ unsigned long IcmpInAddrMasks;
+ unsigned long IcmpInAddrMaskReps;
+ unsigned long IcmpOutMsgs;
+ unsigned long IcmpOutErrors;
+ unsigned long IcmpOutDestUnreachs;
+ unsigned long IcmpOutTimeExcds;
+ unsigned long IcmpOutParmProbs;
+ unsigned long IcmpOutSrcQuenchs;
+ unsigned long IcmpOutRedirects;
+ unsigned long IcmpOutEchos;
+ unsigned long IcmpOutEchoReps;
+ unsigned long IcmpOutTimestamps;
+ unsigned long IcmpOutTimestampReps;
+ unsigned long IcmpOutAddrMasks;
+ unsigned long IcmpOutAddrMaskReps;
+};
+
+struct tcp_mib
+{
+ unsigned long TcpRtoAlgorithm;
+ unsigned long TcpRtoMin;
+ unsigned long TcpRtoMax;
+ unsigned long TcpMaxConn;
+ unsigned long TcpActiveOpens;
+ unsigned long TcpPassiveOpens;
+ unsigned long TcpAttemptFails;
+ unsigned long TcpEstabResets;
+ unsigned long TcpCurrEstab;
+ unsigned long TcpInSegs;
+ unsigned long TcpOutSegs;
+ unsigned long TcpRetransSegs;
+};
+
+struct udp_mib
+{
+ unsigned long UdpInDatagrams;
+ unsigned long UdpNoPorts;
+ unsigned long UdpInErrors;
+ unsigned long UdpOutDatagrams;
+};
+
+
+#endif
diff --git a/linux/src/include/net/sock.h b/linux/src/include/net/sock.h
new file mode 100644
index 0000000..25a9044
--- /dev/null
+++ b/linux/src/include/net/sock.h
@@ -0,0 +1,613 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the AF_INET socket handler.
+ *
+ * Version: @(#)sock.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Florian La Roche <flla@stud.uni-sb.de>
+ *
+ * Fixes:
+ * Alan Cox : Volatiles in skbuff pointers. See
+ * skbuff comments. May be overdone,
+ * better to prove they can be removed
+ * than the reverse.
+ * Alan Cox : Added a zapped field for tcp to note
+ * a socket is reset and must stay shut up
+ * Alan Cox : New fields for options
+ * Pauline Middelink : identd support
+ * Alan Cox : Eliminate low level recv/recvfrom
+ * David S. Miller : New socket lookup architecture for ISS.
+ * Elliot Poger : New field for SO_BINDTODEVICE option.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _SOCK_H
+#define _SOCK_H
+
+#include <linux/timer.h>
+#include <linux/ip.h> /* struct options */
+#include <linux/in.h> /* struct sockaddr_in */
+#include <linux/tcp.h> /* struct tcphdr */
+#include <linux/config.h>
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h> /* struct sk_buff */
+#include <net/protocol.h> /* struct inet_protocol */
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#include <net/ax25.h>
+#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#include <net/netrom.h>
+#endif
+#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
+#include <net/rose.h>
+#endif
+#endif
+
+#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
+#include <net/ipx.h>
+#endif
+
+#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
+#include <linux/atalk.h>
+#endif
+
+#include <linux/igmp.h>
+
+#include <asm/atomic.h>
+
+/*
+ * The AF_UNIX specific socket options
+ */
+
+struct unix_opt
+{
+ int family;
+ char * name;
+ int locks;
+ struct inode * inode;
+ struct semaphore readsem;
+ struct sock * other;
+ int marksweep;
+#define MARKED 1
+ int inflight;
+};
+
+/*
+ * IP packet socket options
+ */
+
+struct inet_packet_opt
+{
+ struct notifier_block notifier; /* Used when bound */
+ struct device *bound_dev;
+ unsigned long dev_stamp;
+ struct packet_type *prot_hook;
+ char device_name[15];
+};
+
+/*
+ * Once the IPX ncpd patches are in these are going into protinfo
+ */
+
+#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
+struct ipx_opt
+{
+ ipx_address dest_addr;
+ ipx_interface *intrfc;
+ unsigned short port;
+#ifdef CONFIG_IPX_INTERN
+ unsigned char node[IPX_NODE_LEN];
+#endif
+ unsigned short type;
+/*
+ * To handle asynchronous messages from the NetWare server, we have to
+ * know the connection this socket belongs to.
+ */
+ struct ncp_server *ncp_server;
+/*
+ * To handle special ncp connection-handling sockets for mars_nwe,
+ * the connection number must be stored in the socket.
+ */
+ unsigned short ipx_ncp_conn;
+};
+#endif
+
+#ifdef CONFIG_NUTCP
+struct tcp_opt
+{
+/*
+ * RFC793 variables by their proper names. This means you can
+ * read the code and the spec side by side (and laugh ...)
+ * See RFC793 and RFC1122. The RFC writes these in capitals.
+ */
+ __u32 rcv_nxt; /* What we want to receive next */
+ __u32 rcv_up; /* The urgent point (may not be valid) */
+ __u32 rcv_wnd; /* Current receiver window */
+ __u32 snd_nxt; /* Next sequence we send */
+ __u32 snd_una; /* First byte we want an ack for */
+ __u32 snd_up; /* Outgoing urgent pointer */
+ __u32 snd_wl1; /* Sequence for window update */
+ __u32 snd_wl2; /* Ack sequence for update */
+/*
+ * Slow start and congestion control (see also Nagle, and Karn & Partridge)
+ */
+ __u32 snd_cwnd; /* Sending congestion window */
+ __u32 snd_ssthresh; /* Slow start size threshold */
+/*
+ * Timers used by the TCP protocol layer
+ */
+ struct timer_list delack_timer; /* Ack delay */
+ struct timer_list idle_timer; /* Idle watch */
+ struct timer_list completion_timer; /* Up/Down timer */
+ struct timer_list probe_timer; /* Probes */
+ struct timer_list retransmit_timer; /* Resend (no ack) */
+};
+#endif
+
+/*
+ * This structure really needs to be cleaned up.
+ * Most of it is for TCP, and not used by any of
+ * the other protocols.
+ */
+struct sock
+{
+ /* This must be first. */
+ struct sock *sklist_next;
+ struct sock *sklist_prev;
+
+ struct options *opt;
+ atomic_t wmem_alloc;
+ atomic_t rmem_alloc;
+ unsigned long allocation; /* Allocation mode */
+ __u32 write_seq;
+ __u32 sent_seq;
+ __u32 acked_seq;
+ __u32 copied_seq;
+ __u32 rcv_ack_seq;
+ unsigned short rcv_ack_cnt; /* count of same ack */
+ __u32 window_seq;
+ __u32 fin_seq;
+ __u32 urg_seq;
+ __u32 urg_data;
+ __u32 syn_seq;
+ int users; /* user count */
+ /*
+ * Not all are volatile, but some are, so we
+ * might as well say they all are.
+ */
+ volatile char dead,
+ urginline,
+ intr,
+ blog,
+ done,
+ reuse,
+ keepopen,
+ linger,
+ delay_acks,
+ destroy,
+ ack_timed,
+ no_check,
+ zapped, /* In ax25 & ipx means not linked */
+ broadcast,
+ nonagle,
+ bsdism;
+ struct device * bound_device;
+ unsigned long lingertime;
+ int proc;
+
+ struct sock *next;
+ struct sock **pprev;
+ struct sock *bind_next;
+ struct sock **bind_pprev;
+ struct sock *pair;
+ int hashent;
+ struct sock *prev;
+ struct sk_buff * volatile send_head;
+ struct sk_buff * volatile send_next;
+ struct sk_buff * volatile send_tail;
+ struct sk_buff_head back_log;
+ struct sk_buff *partial;
+ struct timer_list partial_timer;
+ long retransmits;
+ struct sk_buff_head write_queue,
+ receive_queue;
+ struct proto *prot;
+ struct wait_queue **sleep;
+ __u32 daddr;
+ __u32 saddr; /* Sending source */
+ __u32 rcv_saddr; /* Bound address */
+ unsigned short max_unacked;
+ unsigned short window;
+ __u32 lastwin_seq; /* sequence number when we last updated the window we offer */
+ __u32 high_seq; /* sequence number when we did current fast retransmit */
+ volatile unsigned long ato; /* ack timeout */
+ volatile unsigned long lrcvtime; /* jiffies at last data rcv */
+ volatile unsigned long idletime; /* jiffies at last rcv */
+ unsigned int bytes_rcv;
+/*
+ * mss is min(mtu, max_window)
+ */
+ unsigned short mtu; /* mss negotiated in the syn's */
+ volatile unsigned short mss; /* current eff. mss - can change */
+ volatile unsigned short user_mss; /* mss requested by user in ioctl */
+ volatile unsigned short max_window;
+ unsigned long window_clamp;
+ unsigned int ssthresh;
+ unsigned short num;
+ volatile unsigned short cong_window;
+ volatile unsigned short cong_count;
+ volatile unsigned short packets_out;
+ volatile unsigned short shutdown;
+ volatile unsigned long rtt;
+ volatile unsigned long mdev;
+ volatile unsigned long rto;
+
+/*
+ * currently backoff isn't used, but I'm maintaining it in case
+ * we want to go back to a backoff formula that needs it
+ */
+
+ volatile unsigned short backoff;
+ int err, err_soft; /* Soft holds errors that don't
+ cause failure but are the cause
+ of a persistent failure not just
+ 'timed out' */
+ unsigned char protocol;
+ volatile unsigned char state;
+ unsigned short ack_backlog;
+ unsigned char priority;
+ unsigned char debug;
+ int rcvbuf;
+ int sndbuf;
+ unsigned short type;
+ unsigned char localroute; /* Route locally only */
+
+/*
+ * This is where all the private (optional) areas that don't
+ * overlap will eventually live.
+ */
+
+ union
+ {
+ struct unix_opt af_unix;
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+ ax25_cb *ax25;
+#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+ nr_cb *nr;
+#endif
+#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
+ rose_cb *rose;
+#endif
+#endif
+#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
+ struct atalk_sock af_at;
+#endif
+#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
+ struct ipx_opt af_ipx;
+#endif
+#ifdef CONFIG_INET
+ struct inet_packet_opt af_packet;
+#ifdef CONFIG_NUTCP
+ struct tcp_opt af_tcp;
+#endif
+#endif
+ } protinfo;
+
+/*
+ * IP 'private area' or will be eventually
+ */
+ int ip_ttl; /* TTL setting */
+ int ip_tos; /* TOS */
+ struct tcphdr dummy_th;
+ struct timer_list keepalive_timer; /* TCP keepalive hack */
+ struct timer_list retransmit_timer; /* TCP retransmit timer */
+ struct timer_list delack_timer; /* TCP delayed ack timer */
+ int ip_xmit_timeout; /* Why the timeout is running */
+ struct rtable *ip_route_cache; /* Cached output route */
+ unsigned char ip_hdrincl; /* Include headers ? */
+#ifdef CONFIG_IP_MULTICAST
+ int ip_mc_ttl; /* Multicasting TTL */
+ int ip_mc_loop; /* Loopback */
+ char ip_mc_name[MAX_ADDR_LEN];/* Multicast device name */
+ struct ip_mc_socklist *ip_mc_list; /* Group array */
+#endif
+
+/*
+ * This part is used for the timeout functions (timer.c).
+ */
+
+ int timeout; /* What are we waiting for? */
+ struct timer_list timer; /* This is the TIME_WAIT/receive timer
+ * when we are doing IP
+ */
+ struct timeval stamp;
+
+ /*
+ * Identd
+ */
+
+ struct socket *socket;
+
+ /*
+ * Callbacks
+ */
+
+ void (*state_change)(struct sock *sk);
+ void (*data_ready)(struct sock *sk,int bytes);
+ void (*write_space)(struct sock *sk);
+ void (*error_report)(struct sock *sk);
+
+ /*
+ * Moved solely for 2.0 to keep binary module compatibility stuff straight.
+ */
+
+ unsigned short max_ack_backlog;
+ struct sock *listening;
+};
+
+/*
+ * IP protocol blocks we attach to sockets.
+ */
+
+struct proto
+{
+ /* These must be first. */
+ struct sock *sklist_next;
+ struct sock *sklist_prev;
+
+ void (*close)(struct sock *sk, unsigned long timeout);
+ int (*build_header)(struct sk_buff *skb,
+ __u32 saddr,
+ __u32 daddr,
+ struct device **dev, int type,
+ struct options *opt, int len,
+ int tos, int ttl, struct rtable ** rp);
+ int (*connect)(struct sock *sk,
+ struct sockaddr_in *usin, int addr_len);
+ struct sock * (*accept) (struct sock *sk, int flags);
+ void (*queue_xmit)(struct sock *sk,
+ struct device *dev, struct sk_buff *skb,
+ int free);
+ void (*retransmit)(struct sock *sk, int all);
+ void (*write_wakeup)(struct sock *sk);
+ void (*read_wakeup)(struct sock *sk);
+ int (*rcv)(struct sk_buff *buff, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+ int (*select)(struct sock *sk, int which,
+ select_table *wait);
+ int (*ioctl)(struct sock *sk, int cmd,
+ unsigned long arg);
+ int (*init)(struct sock *sk);
+ void (*shutdown)(struct sock *sk, int how);
+ int (*setsockopt)(struct sock *sk, int level, int optname,
+ char *optval, int optlen);
+ int (*getsockopt)(struct sock *sk, int level, int optname,
+ char *optval, int *option);
+ int (*sendmsg)(struct sock *sk, struct msghdr *msg, int len,
+ int noblock, int flags);
+ int (*recvmsg)(struct sock *sk, struct msghdr *msg, int len,
+ int noblock, int flags, int *addr_len);
+ int (*bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+ /* Keeping track of sk's, looking them up, and port selection methods. */
+ void (*hash)(struct sock *sk);
+ void (*unhash)(struct sock *sk);
+ void (*rehash)(struct sock *sk);
+ unsigned short (*good_socknum)(void);
+ int (*verify_bind)(struct sock *sk, unsigned short snum);
+
+ unsigned short max_header;
+ unsigned long retransmits;
+ char name[32];
+ int inuse, highestinuse;
+};
+
+#define TIME_WRITE 1
+#define TIME_CLOSE 2
+#define TIME_KEEPOPEN 3
+#define TIME_DESTROY 4
+#define TIME_DONE 5 /* Used to absorb those last few packets */
+#define TIME_PROBE0 6
+
+/*
+ * About 10 seconds
+ */
+
+#define SOCK_DESTROY_TIME (10*HZ)
+
+/*
+ * Sockets 0-1023 can't be bound to unless you are superuser
+ */
+
+#define PROT_SOCK 1024
+
+#define SHUTDOWN_MASK 3
+#define RCV_SHUTDOWN 1
+#define SEND_SHUTDOWN 2
+
+/* Per-protocol hash table implementations use this to make sure
+ * nothing changes.
+ */
+#define SOCKHASH_LOCK() start_bh_atomic()
+#define SOCKHASH_UNLOCK() end_bh_atomic()
+
+/* Some things in the kernel just want to get at a protocols
+ * entire socket list commensurate, thus...
+ */
+static __inline__ void add_to_prot_sklist(struct sock *sk)
+{
+ SOCKHASH_LOCK();
+ if(!sk->sklist_next) {
+ struct proto *p = sk->prot;
+
+ sk->sklist_prev = (struct sock *) p;
+ sk->sklist_next = p->sklist_next;
+ p->sklist_next->sklist_prev = sk;
+ p->sklist_next = sk;
+
+ /* Charge the protocol. */
+ sk->prot->inuse += 1;
+ if(sk->prot->highestinuse < sk->prot->inuse)
+ sk->prot->highestinuse = sk->prot->inuse;
+ }
+ SOCKHASH_UNLOCK();
+}
+
+static __inline__ void del_from_prot_sklist(struct sock *sk)
+{
+ SOCKHASH_LOCK();
+ if(sk->sklist_next) {
+ sk->sklist_next->sklist_prev = sk->sklist_prev;
+ sk->sklist_prev->sklist_next = sk->sklist_next;
+ sk->sklist_next = NULL;
+ sk->prot->inuse--;
+ }
+ SOCKHASH_UNLOCK();
+}
+
+/*
+ * Used by processes to "lock" a socket state, so that
+ * interrupts and bottom half handlers won't change it
+ * from under us. It essentially blocks any incoming
+ * packets, so that we won't get any new data or any
+ * packets that change the state of the socket.
+ *
+ * Note the 'barrier()' calls: gcc may not move a lock
+ * "downwards" or a unlock "upwards" when optimizing.
+ */
+extern void __release_sock(struct sock *sk);
+
+static inline void lock_sock(struct sock *sk)
+{
+#if 0
+/* debugging code: the test isn't even 100% correct, but it can catch bugs */
+/* Note that a double lock is ok in theory - it's just _usually_ a bug */
+ if (sk->users) {
+ __label__ here;
+ printk("double lock on socket at %p\n", &&here);
+here:
+ }
+#endif
+ sk->users++;
+ barrier();
+}
+
+static inline void release_sock(struct sock *sk)
+{
+ barrier();
+#if 0
+/* debugging code: remove me when ok */
+ if (sk->users == 0) {
+ __label__ here;
+ sk->users = 1;
+ printk("trying to unlock unlocked socket at %p\n", &&here);
+here:
+ }
+#endif
+ if ((sk->users = sk->users-1) == 0)
+ __release_sock(sk);
+}
+
+
+extern struct sock * sk_alloc(int priority);
+extern void sk_free(struct sock *sk);
+extern void destroy_sock(struct sock *sk);
+
+extern struct sk_buff *sock_wmalloc(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+extern struct sk_buff *sock_rmalloc(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+extern void sock_wfree(struct sock *sk,
+ struct sk_buff *skb);
+extern void sock_rfree(struct sock *sk,
+ struct sk_buff *skb);
+extern unsigned long sock_rspace(struct sock *sk);
+extern unsigned long sock_wspace(struct sock *sk);
+
+extern int sock_setsockopt(struct sock *sk, int level,
+ int op, char *optval,
+ int optlen);
+
+extern int sock_getsockopt(struct sock *sk, int level,
+ int op, char *optval,
+ int *optlen);
+extern struct sk_buff *sock_alloc_send_skb(struct sock *skb,
+ unsigned long size,
+ unsigned long fallback,
+ int noblock,
+ int *errcode);
+
+/*
+ * Queue a received datagram if it will fit. Stream and sequenced
+ * protocols can't normally use this as they need to fit buffers in
+ * and play with them.
+ *
+ * Inlined as it's very short and called for pretty much every
+ * packet ever received.
+ */
+
+static __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
+ return -ENOMEM;
+ atomic_add(skb->truesize, &sk->rmem_alloc);
+ skb->sk=sk;
+ skb_queue_tail(&sk->receive_queue,skb);
+ if (!sk->dead)
+ sk->data_ready(sk,skb->len);
+ return 0;
+}
+
+static __inline__ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
+ return -ENOMEM;
+ atomic_add(skb->truesize, &sk->rmem_alloc);
+ skb->sk=sk;
+ __skb_queue_tail(&sk->receive_queue,skb);
+ if (!sk->dead)
+ sk->data_ready(sk,skb->len);
+ return 0;
+}
+
+/*
+ * Recover an error report and clear atomically
+ */
+
+static __inline__ int sock_error(struct sock *sk)
+{
+ int err=xchg(&sk->err,0);
+ return -err;
+}
+
+/*
+ * Declarations from timer.c
+ */
+
+extern struct sock *timer_base;
+
+extern void delete_timer (struct sock *);
+extern void reset_timer (struct sock *, int, unsigned long);
+extern void net_timer (unsigned long);
+
+
+/*
+ * Enable debug/info messages
+ */
+
+#define NETDEBUG(x) do { } while (0)
+
+#endif /* _SOCK_H */
diff --git a/linux/src/include/net/spx.h b/linux/src/include/net/spx.h
new file mode 100644
index 0000000..3e9b1d1
--- /dev/null
+++ b/linux/src/include/net/spx.h
@@ -0,0 +1,38 @@
+#ifndef __NET_SPX_H
+#define __NET_SPX_H
+
+/*
+ * Internal definitions for the SPX protocol.
+ */
+
+/*
+ * The SPX header following an IPX header.
+ */
+
+struct spxhdr
+{
+ __u8 cctl;
+#define CCTL_SPXII_XHD 0x01 /* SPX2 extended header */
+#define CCTL_SPX_UNKNOWN 0x02 /* Unknown (unused ??) */
+#define CCTL_SPXII_NEG 0x04 /* Negotiate size */
+#define CCTL_SPXII 0x08 /* Set for SPX2 */
+#define CCTL_EOM 0x10 /* End of message marker */
+#define CCTL_URG 0x20 /* Urgent marker in SPP (not used in SPX?) */
+#define CCTL_ACK 0x40 /* Send me an ACK */
+#define CCTL_CTL 0x80 /* Control message */
+ __u8 dtype;
+#define SPX_DTYPE_ECONN 0xFE /* Finished */
+#define SPX_DTYPE_ECACK 0xFF /* Ok */
+ __u16 sconn; /* Connection ID */
+ __u16 dconn; /* Connection ID */
+ __u16 sequence;
+ __u16 ackseq;
+ __u16 allocseq;
+};
+
+#define IPXTYPE_SPX 5
+
+
+
+
+#endif
diff --git a/linux/src/include/net/tcp.h b/linux/src/include/net/tcp.h
new file mode 100644
index 0000000..b2534ba
--- /dev/null
+++ b/linux/src/include/net/tcp.h
@@ -0,0 +1,374 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP module.
+ *
+ * Version: @(#)tcp.h 1.0.5 05/23/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _TCP_H
+#define _TCP_H
+
+#include <linux/tcp.h>
+#include <net/checksum.h>
+
+/* This is for all connections with a full identity, no wildcards. */
+#define TCP_HTABLE_SIZE 256
+
+/* This is for listening sockets, thus all sockets which possess wildcards. */
+#define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
+
+/* This is for all sockets, to keep track of the local port allocations. */
+#define TCP_BHTABLE_SIZE 64
+
+/* tcp_ipv4.c: These need to be shared by v4 and v6 because the lookup
+ * and hashing code needs to work with different AF's yet
+ * the port space is shared.
+ */
+extern struct sock *tcp_established_hash[TCP_HTABLE_SIZE];
+extern struct sock *tcp_listening_hash[TCP_LHTABLE_SIZE];
+extern struct sock *tcp_bound_hash[TCP_BHTABLE_SIZE];
+
+/* These are AF independant. */
+static __inline__ int tcp_bhashfn(__u16 lport)
+{
+ return (lport ^ (lport >> 7)) & (TCP_BHTABLE_SIZE-1);
+}
+
+/* Find the next port that hashes h that is larger than lport.
+ * If you change the hash, change this function to match, or you will
+ * break TCP port selection. This function must also NOT wrap around
+ * when the next number exceeds the largest possible port (2^16-1).
+ */
+static __inline__ int tcp_bhashnext(__u16 lport, __u16 h)
+{
+ __u32 s; /* don't change this to a smaller type! */
+
+ s = (lport ^ (h ^ tcp_bhashfn(lport)));
+ if (s > lport)
+ return s;
+ s = lport + TCP_BHTABLE_SIZE;
+ return (s ^ (h ^ tcp_bhashfn(s)));
+}
+
+static __inline__ int tcp_sk_bhashfn(struct sock *sk)
+{
+ __u16 lport = sk->num;
+ return tcp_bhashfn(lport);
+}
+
+/* These can have wildcards, don't try too hard.
+ * XXX deal with thousands of IP aliases for listening ports later
+ */
+static __inline__ int tcp_lhashfn(unsigned short num)
+{
+ return num & (TCP_LHTABLE_SIZE - 1);
+}
+
+static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
+{
+ return tcp_lhashfn(sk->num);
+}
+
+/* This is IPv4 specific. */
+static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
+ __u32 faddr, __u16 fport)
+{
+ return ((laddr ^ lport) ^ (faddr ^ fport)) & (TCP_HTABLE_SIZE - 1);
+}
+
+static __inline__ int tcp_sk_hashfn(struct sock *sk)
+{
+ __u32 laddr = sk->rcv_saddr;
+ __u16 lport = sk->num;
+ __u32 faddr = sk->daddr;
+ __u16 fport = sk->dummy_th.dest;
+
+ return tcp_hashfn(laddr, lport, faddr, fport);
+}
+
+/* Only those holding the sockhash lock call these two things here.
+ * Note the slightly gross overloading of sk->prev, AF_UNIX is the
+ * only other main benefactor of that member of SK, so who cares.
+ */
+static __inline__ void tcp_sk_bindify(struct sock *sk)
+{
+ int hashent = tcp_sk_bhashfn(sk);
+ struct sock **htable = &tcp_bound_hash[hashent];
+
+ if((sk->bind_next = *htable) != NULL)
+ (*htable)->bind_pprev = &sk->bind_next;
+ *htable = sk;
+ sk->bind_pprev = htable;
+}
+
+static __inline__ void tcp_sk_unbindify(struct sock *sk)
+{
+ if(sk->bind_next)
+ sk->bind_next->bind_pprev = sk->bind_pprev;
+ *(sk->bind_pprev) = sk->bind_next;
+}
+
+/*
+ * 40 is maximal IP options size
+ * 4 is TCP option size (MSS)
+ */
+#define MAX_SYN_SIZE (sizeof(struct iphdr) + 40 + sizeof(struct tcphdr) + 4 + MAX_HEADER + 15)
+#define MAX_FIN_SIZE (sizeof(struct iphdr) + 40 + sizeof(struct tcphdr) + MAX_HEADER + 15)
+#define MAX_ACK_SIZE (sizeof(struct iphdr) + 40 + sizeof(struct tcphdr) + MAX_HEADER + 15)
+#define MAX_RESET_SIZE (sizeof(struct iphdr) + 40 + sizeof(struct tcphdr) + MAX_HEADER + 15)
+
+#define MAX_WINDOW 32767 /* Never offer a window over 32767 without using
+ window scaling (not yet supported). Some poor
+ stacks do signed 16bit maths! */
+#define MIN_WINDOW 2048
+#define MAX_ACK_BACKLOG 2
+#define MAX_DUP_ACKS 3
+#define MIN_WRITE_SPACE 2048
+#define TCP_WINDOW_DIFF 2048
+
+/* urg_data states */
+#define URG_VALID 0x0100
+#define URG_NOTYET 0x0200
+#define URG_READ 0x0400
+
+#define TCP_RETR1 7 /*
+ * This is how many retries it does before it
+ * tries to figure out if the gateway is
+ * down.
+ */
+
+#define TCP_RETR2 15 /*
+ * This should take at least
+ * 90 minutes to time out.
+ */
+
+#define TCP_TIMEOUT_LEN (15*60*HZ) /* should be about 15 mins */
+#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to successfully
+ * close the socket, about 60 seconds */
+#define TCP_FIN_TIMEOUT (3*60*HZ) /* BSD style FIN_WAIT2 deadlock breaker */
+#define TCP_ACK_TIME (3*HZ) /* time to delay before sending an ACK */
+#define TCP_DONE_TIME (5*HZ/2)/* maximum time to wait before actually
+ * destroying a socket */
+#define TCP_WRITE_TIME (30*HZ) /* initial time to wait for an ACK,
+ * after last transmit */
+#define TCP_TIMEOUT_INIT (3*HZ) /* RFC 1122 initial timeout value */
+#define TCP_SYN_RETRIES 5 /* number of times to retry opening a
+ * connection (TCP_RETR2-....) */
+#define TCP_PROBEWAIT_LEN (1*HZ)/* time to wait between probes when
+ * I've got something to write and
+ * there is no window */
+
+#define TCP_NO_CHECK 0 /* turn to one if you want the default
+ * to be no checksum */
+
+
+/*
+ * TCP option
+ */
+
+#define TCPOPT_NOP 1 /* Padding */
+#define TCPOPT_EOL 0 /* End of options */
+#define TCPOPT_MSS 2 /* Segment size negotiating */
+/*
+ * We don't use these yet, but they are for PAWS and big windows
+ */
+#define TCPOPT_WINDOW 3 /* Window scaling */
+#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
+
+
+/*
+ * The next routines deal with comparing 32 bit unsigned ints
+ * and worry about wraparound (automatic with unsigned arithmetic).
+ */
+
+extern __inline int before(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq1-seq2) < 0;
+}
+
+extern __inline int after(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq2-seq1) < 0;
+}
+
+
+/* is s2<=s1<=s3 ? */
+extern __inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
+{
+ return (after(seq1+1, seq2) && before(seq1, seq3+1));
+}
+
+static __inline__ int min(unsigned int a, unsigned int b)
+{
+ if (a > b)
+ a = b;
+ return a;
+}
+
+static __inline__ int max(unsigned int a, unsigned int b)
+{
+ if (a < b)
+ a = b;
+ return a;
+}
+
+extern struct proto tcp_prot;
+extern struct tcp_mib tcp_statistics;
+
+extern unsigned short tcp_good_socknum(void);
+
+extern void tcp_err(int type, int code, unsigned char *header, __u32 daddr,
+ __u32, struct inet_protocol *protocol, int len);
+extern void tcp_shutdown (struct sock *sk, int how);
+extern int tcp_rcv(struct sk_buff *skb, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr, int redo,
+ struct inet_protocol *protocol);
+
+extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+
+extern void tcp_v4_unhash(struct sock *sk);
+
+extern void tcp_read_wakeup(struct sock *);
+extern void tcp_write_xmit(struct sock *);
+extern void tcp_time_wait(struct sock *);
+extern void tcp_retransmit(struct sock *, int);
+extern void tcp_do_retransmit(struct sock *, int);
+extern void tcp_send_check(struct tcphdr *th, unsigned long saddr,
+ unsigned long daddr, int len, struct sk_buff *skb);
+
+/* tcp_output.c */
+
+extern void tcp_send_probe0(struct sock *);
+extern void tcp_send_partial(struct sock *);
+extern void tcp_write_wakeup(struct sock *);
+extern void tcp_send_fin(struct sock *sk);
+extern void tcp_send_synack(struct sock *, struct sock *, struct sk_buff *, int);
+extern void tcp_send_skb(struct sock *, struct sk_buff *);
+extern void tcp_send_ack(struct sock *sk);
+extern void tcp_send_delayed_ack(struct sock *sk, int max_timeout, unsigned long timeout);
+extern void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
+ struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl);
+
+extern void tcp_enqueue_partial(struct sk_buff *, struct sock *);
+extern struct sk_buff * tcp_dequeue_partial(struct sock *);
+extern void tcp_shrink_skb(struct sock *,struct sk_buff *,u32);
+
+/* CONFIG_IP_TRANSPARENT_PROXY */
+extern int tcp_chkaddr(struct sk_buff *);
+
+/* tcp_timer.c */
+#define tcp_reset_msl_timer(x,y,z) reset_timer(x,y,z)
+extern void tcp_reset_xmit_timer(struct sock *, int, unsigned long);
+extern void tcp_delack_timer(unsigned long);
+extern void tcp_retransmit_timer(unsigned long);
+
+static __inline__ int tcp_old_window(struct sock * sk)
+{
+ return sk->window - (sk->acked_seq - sk->lastwin_seq);
+}
+
+extern int tcp_new_window(struct sock *);
+
+/*
+ * Return true if we should raise the window when we
+ * have cleaned up the receive queue. We don't want to
+ * do this normally, only if it makes sense to avoid
+ * zero window probes..
+ *
+ * We do this only if we can raise the window noticeably.
+ */
+static __inline__ int tcp_raise_window(struct sock * sk)
+{
+ int new = tcp_new_window(sk);
+ return new && (new >= 2*tcp_old_window(sk));
+}
+
+static __inline__ unsigned short tcp_select_window(struct sock *sk)
+{
+ int window = tcp_new_window(sk);
+ int oldwin = tcp_old_window(sk);
+
+ /* Don't allow a shrinking window */
+ if (window > oldwin) {
+ sk->window = window;
+ sk->lastwin_seq = sk->acked_seq;
+ oldwin = window;
+ }
+ return oldwin;
+}
+
+/*
+ * List all states of a TCP socket that can be viewed as a "connected"
+ * state. This now includes TCP_SYN_RECV, although I am not yet fully
+ * convinced that this is the solution for the 'getpeername(2)'
+ * problem. Thanks to Stephen A. Wood <saw@cebaf.gov> -FvK
+ */
+
+extern __inline const int tcp_connected(const int state)
+{
+ return(state == TCP_ESTABLISHED || state == TCP_CLOSE_WAIT ||
+ state == TCP_FIN_WAIT1 || state == TCP_FIN_WAIT2 ||
+ state == TCP_SYN_RECV);
+}
+
+/*
+ * Calculate(/check) TCP checksum
+ */
+static __inline__ u16 tcp_check(struct tcphdr *th, int len,
+ unsigned long saddr, unsigned long daddr, unsigned long base)
+{
+ return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
+}
+
+#undef STATE_TRACE
+
+#ifdef STATE_TRACE
+static char *statename[]={
+ "Unused","Established","Syn Sent","Syn Recv",
+ "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
+ "Close Wait","Last ACK","Listen","Closing"
+};
+#endif
+
+static __inline__ void tcp_set_state(struct sock *sk, int state)
+{
+ int oldstate = sk->state;
+
+ sk->state = state;
+
+#ifdef STATE_TRACE
+ if(sk->debug)
+ printk("TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
+#endif
+
+ switch (state) {
+ case TCP_ESTABLISHED:
+ if (oldstate != TCP_ESTABLISHED) {
+ tcp_statistics.TcpCurrEstab++;
+ }
+ break;
+
+ case TCP_CLOSE:
+ /* Preserve the invariant */
+ tcp_v4_unhash(sk);
+ /* Should be about 2 rtt's */
+ reset_timer(sk, TIME_DONE, min(sk->rtt * 2, TCP_DONE_TIME));
+ /* fall through */
+ default:
+ if (oldstate==TCP_ESTABLISHED)
+ tcp_statistics.TcpCurrEstab--;
+ }
+}
+
+#endif /* _TCP_H */
diff --git a/linux/src/include/net/udp.h b/linux/src/include/net/udp.h
new file mode 100644
index 0000000..d2c7476
--- /dev/null
+++ b/linux/src/include/net/udp.h
@@ -0,0 +1,63 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the UDP module.
+ *
+ * Version: @(#)udp.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Alan Cox : Turned on udp checksums. I don't want to
+ * chase 'memory corruption' bugs that aren't!
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UDP_H
+#define _UDP_H
+
+#include <linux/udp.h>
+
+#define UDP_HTABLE_SIZE 128
+
+/* udp.c: This needs to be shared by v4 and v6 because the lookup
+ * and hashing code needs to work with different AF's yet
+ * the port space is shared.
+ */
+extern struct sock *udp_hash[UDP_HTABLE_SIZE];
+
+extern unsigned short udp_good_socknum(void);
+
+#define UDP_NO_CHECK 0
+
+
+extern struct proto udp_prot;
+
+
+extern void udp_err(int type, int code, unsigned char *header, __u32 daddr,
+ __u32 saddr, struct inet_protocol *protocol, int len);
+extern void udp_send_check(struct udphdr *uh, __u32 saddr,
+ __u32 daddr, int len, struct sock *sk);
+extern int udp_recvfrom(struct sock *sk, unsigned char *to,
+ int len, int noblock, unsigned flags,
+ struct sockaddr_in *sin, int *addr_len);
+extern int udp_read(struct sock *sk, unsigned char *buff,
+ int len, int noblock, unsigned flags);
+extern int udp_connect(struct sock *sk,
+ struct sockaddr_in *usin, int addr_len);
+extern int udp_rcv(struct sk_buff *skb, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr, int redo,
+ struct inet_protocol *protocol);
+extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+
+/* CONFIG_IP_TRANSPARENT_PROXY */
+extern int udp_chkaddr(struct sk_buff *skb);
+
+#endif /* _UDP_H */
diff --git a/linux/src/include/scsi/scsi.h b/linux/src/include/scsi/scsi.h
new file mode 100644
index 0000000..cd44f52
--- /dev/null
+++ b/linux/src/include/scsi/scsi.h
@@ -0,0 +1,205 @@
+#ifndef _LINUX_SCSI_H
+#define _LINUX_SCSI_H
+
+/*
+ * This header file contains public constants and structures used by
+ * the scsi code for linux.
+ */
+
+/*
+ $Header: cvs/gnumach/linux/src/include/scsi/Attic/scsi.h,v 1.1 1999/04/26 05:58:14 tb Exp $
+
+ For documentation on the OPCODES, MESSAGES, and SENSE values,
+ please consult the SCSI standard.
+
+*/
+
+/*
+ * SCSI opcodes
+ */
+
+#define TEST_UNIT_READY 0x00
+#define REZERO_UNIT 0x01
+#define REQUEST_SENSE 0x03
+#define FORMAT_UNIT 0x04
+#define READ_BLOCK_LIMITS 0x05
+#define REASSIGN_BLOCKS 0x07
+#define READ_6 0x08
+#define WRITE_6 0x0a
+#define SEEK_6 0x0b
+#define READ_REVERSE 0x0f
+#define WRITE_FILEMARKS 0x10
+#define SPACE 0x11
+#define INQUIRY 0x12
+#define RECOVER_BUFFERED_DATA 0x14
+#define MODE_SELECT 0x15
+#define RESERVE 0x16
+#define RELEASE 0x17
+#define COPY 0x18
+#define ERASE 0x19
+#define MODE_SENSE 0x1a
+#define START_STOP 0x1b
+#define RECEIVE_DIAGNOSTIC 0x1c
+#define SEND_DIAGNOSTIC 0x1d
+#define ALLOW_MEDIUM_REMOVAL 0x1e
+
+#define SET_WINDOW 0x24
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define WRITE_10 0x2a
+#define SEEK_10 0x2b
+#define WRITE_VERIFY 0x2e
+#define VERIFY 0x2f
+#define SEARCH_HIGH 0x30
+#define SEARCH_EQUAL 0x31
+#define SEARCH_LOW 0x32
+#define SET_LIMITS 0x33
+#define PRE_FETCH 0x34
+#define READ_POSITION 0x34
+#define SYNCHRONIZE_CACHE 0x35
+#define LOCK_UNLOCK_CACHE 0x36
+#define READ_DEFECT_DATA 0x37
+#define MEDIUM_SCAN 0x38
+#define COMPARE 0x39
+#define COPY_VERIFY 0x3a
+#define WRITE_BUFFER 0x3b
+#define READ_BUFFER 0x3c
+#define UPDATE_BLOCK 0x3d
+#define READ_LONG 0x3e
+#define WRITE_LONG 0x3f
+#define CHANGE_DEFINITION 0x40
+#define WRITE_SAME 0x41
+#define READ_TOC 0x43
+#define LOG_SELECT 0x4c
+#define LOG_SENSE 0x4d
+#define MODE_SELECT_10 0x55
+#define MODE_SENSE_10 0x5a
+#define MOVE_MEDIUM 0xa5
+#define READ_12 0xa8
+#define WRITE_12 0xaa
+#define WRITE_VERIFY_12 0xae
+#define SEARCH_HIGH_12 0xb0
+#define SEARCH_EQUAL_12 0xb1
+#define SEARCH_LOW_12 0xb2
+#define READ_ELEMENT_STATUS 0xb8
+#define SEND_VOLUME_TAG 0xb6
+#define WRITE_LONG_2 0xea
+
+/*
+ * Status codes
+ */
+
+#define GOOD 0x00
+#define CHECK_CONDITION 0x01
+#define CONDITION_GOOD 0x02
+#define BUSY 0x04
+#define INTERMEDIATE_GOOD 0x08
+#define INTERMEDIATE_C_GOOD 0x0a
+#define RESERVATION_CONFLICT 0x0c
+#define COMMAND_TERMINATED 0x11
+#define QUEUE_FULL 0x14
+
+#define STATUS_MASK 0x3e
+
+/*
+ * SENSE KEYS
+ */
+
+#define NO_SENSE 0x00
+#define RECOVERED_ERROR 0x01
+#define NOT_READY 0x02
+#define MEDIUM_ERROR 0x03
+#define HARDWARE_ERROR 0x04
+#define ILLEGAL_REQUEST 0x05
+#define UNIT_ATTENTION 0x06
+#define DATA_PROTECT 0x07
+#define BLANK_CHECK 0x08
+#define COPY_ABORTED 0x0a
+#define ABORTED_COMMAND 0x0b
+#define VOLUME_OVERFLOW 0x0d
+#define MISCOMPARE 0x0e
+
+
+/*
+ * DEVICE TYPES
+ */
+
+#define TYPE_DISK 0x00
+#define TYPE_TAPE 0x01
+#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
+#define TYPE_WORM 0x04 /* Treated as ROM by our system */
+#define TYPE_ROM 0x05
+#define TYPE_SCANNER 0x06
+#define TYPE_MOD 0x07 /* Magneto-optical disk -
+ * - treated as TYPE_DISK */
+#define TYPE_MEDIUM_CHANGER 0x08
+#define TYPE_NO_LUN 0x7f
+
+
+/*
+ * MESSAGE CODES
+ */
+
+#define COMMAND_COMPLETE 0x00
+#define EXTENDED_MESSAGE 0x01
+#define EXTENDED_MODIFY_DATA_POINTER 0x00
+#define EXTENDED_SDTR 0x01
+#define EXTENDED_EXTENDED_IDENTIFY 0x02 /* SCSI-I only */
+#define EXTENDED_WDTR 0x03
+#define SAVE_POINTERS 0x02
+#define RESTORE_POINTERS 0x03
+#define DISCONNECT 0x04
+#define INITIATOR_ERROR 0x05
+#define ABORT 0x06
+#define MESSAGE_REJECT 0x07
+#define NOP 0x08
+#define MSG_PARITY_ERROR 0x09
+#define LINKED_CMD_COMPLETE 0x0a
+#define LINKED_FLG_CMD_COMPLETE 0x0b
+#define BUS_DEVICE_RESET 0x0c
+
+#define INITIATE_RECOVERY 0x0f /* SCSI-II only */
+#define RELEASE_RECOVERY 0x10 /* SCSI-II only */
+
+#define SIMPLE_QUEUE_TAG 0x20
+#define HEAD_OF_QUEUE_TAG 0x21
+#define ORDERED_QUEUE_TAG 0x22
+
+/*
+ * Here are some scsi specific ioctl commands which are sometimes useful.
+ */
+/* These are a few other constants only used by scsi devices */
+
+#define SCSI_IOCTL_GET_IDLUN 0x5382
+
+/* Used to turn on and off tagged queuing for scsi devices */
+
+#define SCSI_IOCTL_TAGGED_ENABLE 0x5383
+#define SCSI_IOCTL_TAGGED_DISABLE 0x5384
+
+/* Used to obtain the host number of a device. */
+#define SCSI_IOCTL_PROBE_HOST 0x5385
+
+/* Used to get the bus number for a device */
+#define SCSI_IOCTL_GET_BUS_NUMBER 0x5386
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
+#endif
diff --git a/linux/src/include/scsi/scsi_ioctl.h b/linux/src/include/scsi/scsi_ioctl.h
new file mode 100644
index 0000000..a90abf6
--- /dev/null
+++ b/linux/src/include/scsi/scsi_ioctl.h
@@ -0,0 +1,28 @@
+#ifndef _SCSI_IOCTL_H
+#define _SCSI_IOCTL_H
+
+#define SCSI_IOCTL_SEND_COMMAND 1
+#define SCSI_IOCTL_TEST_UNIT_READY 2
+#define SCSI_IOCTL_BENCHMARK_COMMAND 3
+#define SCSI_IOCTL_SYNC 4 /* Request synchronous parameters */
+#define SCSI_IOCTL_START_UNIT 5
+#define SCSI_IOCTL_STOP_UNIT 6
+/* The door lock/unlock constants are compatible with Sun constants for
+ the cdrom */
+#define SCSI_IOCTL_DOORLOCK 0x5380 /* lock the eject mechanism */
+#define SCSI_IOCTL_DOORUNLOCK 0x5381 /* unlock the mechanism */
+
+#define SCSI_REMOVAL_PREVENT 1
+#define SCSI_REMOVAL_ALLOW 0
+
+#ifdef __KERNEL__
+
+extern int scsi_ioctl (Scsi_Device *dev, int cmd, void *arg);
+extern int kernel_scsi_ioctl (Scsi_Device *dev, int cmd, void *arg);
+extern int scsi_ioctl_send_command(Scsi_Device *dev, void *buffer);
+
+#endif
+
+#endif
+
+
diff --git a/linux/src/include/scsi/scsicam.h b/linux/src/include/scsi/scsicam.h
new file mode 100644
index 0000000..954e140
--- /dev/null
+++ b/linux/src/include/scsi/scsicam.h
@@ -0,0 +1,17 @@
+/*
+ * scsicam.h - SCSI CAM support functions, use for HDIO_GETGEO, etc.
+ *
+ * Copyright 1993, 1994 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@Colorado.EDU
+ * +1 (303) 786-7975
+ *
+ * For more information, please consult the SCSI-CAM draft.
+ */
+
+#ifndef SCSICAM_H
+#define SCSICAM_H
+#include <linux/kdev_t.h>
+extern int scsicam_bios_param (Disk *disk, kdev_t dev, int *ip);
+#endif /* def SCSICAM_H */
diff --git a/linux/src/init/main.c b/linux/src/init/main.c
new file mode 100644
index 0000000..1aa15b9
--- /dev/null
+++ b/linux/src/init/main.c
@@ -0,0 +1,1135 @@
+/*
+ * linux/init/main.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * GK 2/5/95 - Changed to support mounting root fs via NFS
+ * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
+ * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/head.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/utsname.h>
+#include <linux/ioport.h>
+#include <linux/hdreg.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/blk.h>
+#ifdef CONFIG_ROOT_NFS
+#include <linux/nfs_fs.h>
+#endif
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+
+#include <asm/bugs.h>
+
+#include <linux/dev/glue/glue.h>
+
+/*
+ * Versions of gcc older than that listed below may actually compile
+ * and link okay, but the end product can have subtle run time bugs.
+ * To avoid associated bogus bug reports, we flatly refuse to compile
+ * with a gcc that is known to be too old from the very beginning.
+ */
+#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 6)
+#error sorry, your GCC is too old. It builds incorrect kernels.
+#endif
+
+extern char _stext, _etext;
+extern const char *linux_banner;
+
+static char printbuf[1024];
+
+extern int console_loglevel;
+
+static int init(void *);
+extern int bdflush(void *);
+extern int kswapd(void *);
+extern void kswapd_setup(void);
+
+extern void init_modules(void);
+extern long console_init(long, long);
+extern long kmalloc_init(long,long);
+extern void sock_init(void);
+extern unsigned long pci_init(unsigned long, unsigned long);
+extern void sysctl_init(void);
+
+extern void no_scroll(char *str, int *ints);
+extern void swap_setup(char *str, int *ints);
+extern void buff_setup(char *str, int *ints);
+extern void panic_setup(char *str, int *ints);
+extern void bmouse_setup(char *str, int *ints);
+extern void msmouse_setup(char *str, int *ints);
+extern void lp_setup(char *str, int *ints);
+extern void eth_setup(char *str, int *ints);
+extern void xd_setup(char *str, int *ints);
+extern void xd_manual_geo_init(char *str, int *ints);
+extern void floppy_setup(char *str, int *ints);
+extern void st_setup(char *str, int *ints);
+extern void st0x_setup(char *str, int *ints);
+extern void advansys_setup(char *str, int *ints);
+extern void tmc8xx_setup(char *str, int *ints);
+extern void t128_setup(char *str, int *ints);
+extern void pas16_setup(char *str, int *ints);
+extern void generic_NCR5380_setup(char *str, int *intr);
+extern void generic_NCR53C400_setup(char *str, int *intr);
+extern void aha152x_setup(char *str, int *ints);
+extern void aha1542_setup(char *str, int *ints);
+extern void gdth_setup(char *str, int *ints);
+extern void aic7xxx_setup(char *str, int *ints);
+extern void AM53C974_setup(char *str, int *ints);
+extern void BusLogic_Setup(char *str, int *ints);
+extern void ncr53c8xx_setup(char *str, int *ints);
+extern void eata2x_setup(char *str, int *ints);
+extern void u14_34f_setup(char *str, int *ints);
+extern void fdomain_setup(char *str, int *ints);
+extern void in2000_setup(char *str, int *ints);
+extern void NCR53c406a_setup(char *str, int *ints);
+extern void wd7000_setup(char *str, int *ints);
+extern void ppa_setup(char *str, int *ints);
+extern void scsi_luns_setup(char *str, int *ints);
+extern void sound_setup(char *str, int *ints);
+extern void apm_setup(char *str, int *ints);
+extern void reboot_setup(char *str, int *ints);
+#ifdef CONFIG_CDU31A
+extern void cdu31a_setup(char *str, int *ints);
+#endif CONFIG_CDU31A
+#ifdef CONFIG_MCD
+extern void mcd_setup(char *str, int *ints);
+#endif CONFIG_MCD
+#ifdef CONFIG_MCDX
+extern void mcdx_setup(char *str, int *ints);
+#endif CONFIG_MCDX
+#ifdef CONFIG_SBPCD
+extern void sbpcd_setup(char *str, int *ints);
+#endif CONFIG_SBPCD
+#ifdef CONFIG_AZTCD
+extern void aztcd_setup(char *str, int *ints);
+#endif CONFIG_AZTCD
+#ifdef CONFIG_CDU535
+extern void sonycd535_setup(char *str, int *ints);
+#endif CONFIG_CDU535
+#ifdef CONFIG_GSCD
+extern void gscd_setup(char *str, int *ints);
+#endif CONFIG_GSCD
+#ifdef CONFIG_CM206
+extern void cm206_setup(char *str, int *ints);
+#endif CONFIG_CM206
+#ifdef CONFIG_OPTCD
+extern void optcd_setup(char *str, int *ints);
+#endif CONFIG_OPTCD
+#ifdef CONFIG_SJCD
+extern void sjcd_setup(char *str, int *ints);
+#endif CONFIG_SJCD
+#ifdef CONFIG_ISP16_CDI
+extern void isp16_setup(char *str, int *ints);
+#endif CONFIG_ISP16_CDI
+#ifdef CONFIG_BLK_DEV_RAM
+static void ramdisk_start_setup(char *str, int *ints);
+static void load_ramdisk(char *str, int *ints);
+static void prompt_ramdisk(char *str, int *ints);
+static void ramdisk_size(char *str, int *ints);
+#ifdef CONFIG_BLK_DEV_INITRD
+static void no_initrd(char *s,int *ints);
+#endif
+#endif CONFIG_BLK_DEV_RAM
+#ifdef CONFIG_ISDN_DRV_ICN
+extern void icn_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_ISDN_DRV_HISAX
+extern void HiSax_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_ISDN_DRV_PCBIT
+extern void pcbit_setup(char *str, int *ints);
+#endif
+
+#ifdef CONFIG_ATARIMOUSE
+extern void atari_mouse_setup (char *str, int *ints);
+#endif
+#ifdef CONFIG_DMASOUND
+extern void dmasound_setup (char *str, int *ints);
+#endif
+#ifdef CONFIG_ATARI_SCSI
+extern void atari_scsi_setup (char *str, int *ints);
+#endif
+extern void wd33c93_setup (char *str, int *ints);
+extern void gvp11_setup (char *str, int *ints);
+
+#ifdef CONFIG_CYCLADES
+extern void cy_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_DIGI
+extern void pcxx_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_RISCOM8
+extern void riscom8_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_SPECIALIX
+extern void specialix_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_BAYCOM
+extern void baycom_setup(char *str, int *ints);
+#endif
+
+#ifdef CONFIG_PARIDE_PD
+extern void pd_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_PARIDE_PF
+extern void pf_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_PARIDE_PT
+extern void pt_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_PARIDE_PG
+extern void pg_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_PARIDE_PCD
+extern void pcd_setup(char *str, int *ints);
+#endif
+
+#if defined(CONFIG_SYSVIPC) || defined(CONFIG_KERNELD)
+extern void ipc_init(void);
+#endif
+
+/*
+ * Boot command-line arguments
+ */
+#define MAX_INIT_ARGS 8
+#define MAX_INIT_ENVS 8
+
+extern void time_init(void);
+
+static unsigned long memory_start = 0;
+static unsigned long memory_end = 0;
+
+int rows, cols;
+
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_size; /* Size of the ramdisk(s) */
+extern int rd_image_start; /* starting block # of image */
+#ifdef CONFIG_BLK_DEV_INITRD
+kdev_t real_root_dev;
+#endif
+#endif
+
+int root_mountflags = MS_RDONLY;
+char *execute_command = 0;
+
+#ifdef CONFIG_ROOT_NFS
+char nfs_root_name[NFS_ROOT_NAME_LEN] = { "default" };
+char nfs_root_addrs[NFS_ROOT_ADDRS_LEN] = { "" };
+#endif
+
+extern void dquot_init(void);
+
+static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+static char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+
+static char * argv_rc[] = { "/bin/sh", NULL };
+static char * envp_rc[] = { "HOME=/", "TERM=linux", NULL };
+
+static char * argv[] = { "-/bin/sh",NULL };
+static char * envp[] = { "HOME=/usr/root", "TERM=linux", NULL };
+
+char *get_options(char *str, int *ints)
+{
+ char *cur = str;
+ int i=1;
+
+ while (cur && isdigit(*cur) && i <= 10) {
+ ints[i++] = simple_strtoul(cur,NULL,0);
+ if ((cur = strchr(cur,',')) != NULL)
+ cur++;
+ }
+ ints[0] = i-1;
+ return(cur);
+}
+
+static void profile_setup(char *str, int *ints)
+{
+ if (ints[0] > 0)
+ prof_shift = (unsigned long) ints[1];
+ else
+#ifdef CONFIG_PROFILE_SHIFT
+ prof_shift = CONFIG_PROFILE_SHIFT;
+#else
+ prof_shift = 2;
+#endif
+}
+
+struct kernel_param {
+ const char *str;
+ void (*setup_func)(char *, int *);
+} ;
+
+struct kernel_param bootsetups[] = {
+ { "reserve=", reserve_setup },
+ { "profile=", profile_setup },
+#ifdef CONFIG_BLK_DEV_RAM
+ { "ramdisk_start=", ramdisk_start_setup },
+ { "load_ramdisk=", load_ramdisk },
+ { "prompt_ramdisk=", prompt_ramdisk },
+ { "ramdisk=", ramdisk_size },
+ { "ramdisk_size=", ramdisk_size },
+#ifdef CONFIG_BLK_DEV_INITRD
+ { "noinitrd", no_initrd },
+#endif
+#endif
+ { "swap=", swap_setup },
+ { "buff=", buff_setup },
+ { "panic=", panic_setup },
+ { "no-scroll", no_scroll },
+#ifdef CONFIG_BUGi386
+ { "no-hlt", no_halt },
+ { "no387", no_387 },
+ { "reboot=", reboot_setup },
+#endif
+#ifdef CONFIG_INET
+ { "ether=", eth_setup },
+#endif
+#ifdef CONFIG_PRINTER
+ { "lp=", lp_setup },
+#endif
+#ifdef CONFIG_SCSI
+ { "max_scsi_luns=", scsi_luns_setup },
+#endif
+#ifdef CONFIG_SCSI_ADVANSYS
+ { "advansys=", advansys_setup },
+#endif
+#if defined(CONFIG_BLK_DEV_HD)
+ { "hd=", hd_setup },
+#endif
+#ifdef CONFIG_CHR_DEV_ST
+ { "st=", st_setup },
+#endif
+#ifdef CONFIG_BUSMOUSE
+ { "bmouse=", bmouse_setup },
+#endif
+#ifdef CONFIG_MS_BUSMOUSE
+ { "msmouse=", msmouse_setup },
+#endif
+#ifdef CONFIG_SCSI_SEAGATE
+ { "st0x=", st0x_setup },
+ { "tmc8xx=", tmc8xx_setup },
+#endif
+#ifdef CONFIG_SCSI_T128
+ { "t128=", t128_setup },
+#endif
+#ifdef CONFIG_SCSI_PAS16
+ { "pas16=", pas16_setup },
+#endif
+#ifdef CONFIG_SCSI_GENERIC_NCR5380
+ { "ncr5380=", generic_NCR5380_setup },
+ { "ncr53c400=", generic_NCR53C400_setup },
+#endif
+#ifdef CONFIG_SCSI_AHA152X
+ { "aha152x=", aha152x_setup},
+#endif
+#ifdef CONFIG_SCSI_AHA1542
+ { "aha1542=", aha1542_setup},
+#endif
+#ifdef CONFIG_SCSI_GDTH
+ { "gdth=", gdth_setup},
+#endif
+#ifdef CONFIG_SCSI_AIC7XXX
+ { "aic7xxx=", aic7xxx_setup},
+#endif
+#ifdef CONFIG_SCSI_BUSLOGIC
+ { "BusLogic=", BusLogic_Setup},
+#endif
+#ifdef CONFIG_SCSI_NCR53C8XX
+ { "ncr53c8xx=", ncr53c8xx_setup},
+#endif
+#ifdef CONFIG_SCSI_EATA
+ { "eata=", eata2x_setup},
+#endif
+#ifdef CONFIG_SCSI_U14_34F
+ { "u14-34f=", u14_34f_setup},
+#endif
+#ifdef CONFIG_SCSI_AM53C974
+ { "AM53C974=", AM53C974_setup},
+#endif
+#ifdef CONFIG_SCSI_NCR53C406A
+ { "ncr53c406a=", NCR53c406a_setup},
+#endif
+#ifdef CONFIG_SCSI_FUTURE_DOMAIN
+ { "fdomain=", fdomain_setup},
+#endif
+#ifdef CONFIG_SCSI_IN2000
+ { "in2000=", in2000_setup},
+#endif
+#ifdef CONFIG_SCSI_7000FASST
+ { "wd7000=", wd7000_setup},
+#endif
+#ifdef CONFIG_SCSI_PPA
+ { "ppa=", ppa_setup },
+#endif
+#ifdef CONFIG_BLK_DEV_XD
+ { "xd=", xd_setup },
+ { "xd_geo=", xd_manual_geo_init },
+#endif
+#ifdef CONFIG_BLK_DEV_FD
+ { "floppy=", floppy_setup },
+#endif
+#ifdef CONFIG_CDU31A
+ { "cdu31a=", cdu31a_setup },
+#endif CONFIG_CDU31A
+#ifdef CONFIG_MCD
+ { "mcd=", mcd_setup },
+#endif CONFIG_MCD
+#ifdef CONFIG_MCDX
+ { "mcdx=", mcdx_setup },
+#endif CONFIG_MCDX
+#ifdef CONFIG_SBPCD
+ { "sbpcd=", sbpcd_setup },
+#endif CONFIG_SBPCD
+#ifdef CONFIG_AZTCD
+ { "aztcd=", aztcd_setup },
+#endif CONFIG_AZTCD
+#ifdef CONFIG_CDU535
+ { "sonycd535=", sonycd535_setup },
+#endif CONFIG_CDU535
+#ifdef CONFIG_GSCD
+ { "gscd=", gscd_setup },
+#endif CONFIG_GSCD
+#ifdef CONFIG_CM206
+ { "cm206=", cm206_setup },
+#endif CONFIG_CM206
+#ifdef CONFIG_OPTCD
+ { "optcd=", optcd_setup },
+#endif CONFIG_OPTCD
+#ifdef CONFIG_SJCD
+ { "sjcd=", sjcd_setup },
+#endif CONFIG_SJCD
+#ifdef CONFIG_ISP16_CDI
+ { "isp16=", isp16_setup },
+#endif CONFIG_ISP16_CDI
+#ifdef CONFIG_SOUND
+ { "sound=", sound_setup },
+#endif
+#ifdef CONFIG_ISDN_DRV_ICN
+ { "icn=", icn_setup },
+#endif
+#ifdef CONFIG_ISDN_DRV_HISAX
+ { "hisax=", HiSax_setup },
+ { "HiSax=", HiSax_setup },
+#endif
+#ifdef CONFIG_ISDN_DRV_PCBIT
+ { "pcbit=", pcbit_setup },
+#endif
+#ifdef CONFIG_ATARIMOUSE
+ { "atamouse=", atari_mouse_setup },
+#endif
+#ifdef CONFIG_DMASOUND
+ { "dmasound=", dmasound_setup },
+#endif
+#ifdef CONFIG_ATARI_SCSI
+ { "atascsi=", atari_scsi_setup },
+#endif
+#if defined(CONFIG_A3000_SCSI) || defined(CONFIG_A2091_SCSI) \
+ || defined(CONFIG_GVP11_SCSI)
+ { "wd33c93=", wd33c93_setup },
+#endif
+#if defined(CONFIG_GVP11_SCSI)
+ { "gvp11=", gvp11_setup },
+#endif
+#ifdef CONFIG_CYCLADES
+ { "cyclades=", cy_setup },
+#endif
+#ifdef CONFIG_DIGI
+ { "digi=", pcxx_setup },
+#endif
+#ifdef CONFIG_RISCOM8
+ { "riscom8=", riscom8_setup },
+#endif
+#ifdef CONFIG_SPECIALIX
+ { "specialix=", specialix_setup },
+#endif
+#ifdef CONFIG_BAYCOM
+ { "baycom=", baycom_setup },
+#endif
+#ifdef CONFIG_APM
+ { "apm=", apm_setup },
+#endif
+ { 0, 0 }
+};
+
+static struct kernel_param raw_params[] = {
+
+#ifdef CONFIG_PARIDE_PD
+ { "pd.", pd_setup },
+#endif
+#ifdef CONFIG_PARIDE_PCD
+ { "pcd.", pcd_setup },
+#endif
+#ifdef CONFIG_PARIDE_PF
+ { "pf.", pf_setup },
+#endif
+#ifdef CONFIG_PARIDE_PT
+ { "pt.", pt_setup },
+#endif
+#ifdef CONFIG_PARIDE_PG
+ { "pg.", pg_setup },
+#endif
+ { 0, 0 }
+} ;
+
+
+#ifdef CONFIG_BLK_DEV_RAM
+static void ramdisk_start_setup(char *str, int *ints)
+{
+ if (ints[0] > 0 && ints[1] >= 0)
+ rd_image_start = ints[1];
+}
+
+static void load_ramdisk(char *str, int *ints)
+{
+ if (ints[0] > 0 && ints[1] >= 0)
+ rd_doload = ints[1] & 3;
+}
+
+static void prompt_ramdisk(char *str, int *ints)
+{
+ if (ints[0] > 0 && ints[1] >= 0)
+ rd_prompt = ints[1] & 1;
+}
+
+static void ramdisk_size(char *str, int *ints)
+{
+ if (ints[0] > 0 && ints[1] >= 0)
+ rd_size = ints[1];
+}
+
+#endif
+
+static int checksetup(char *line)
+{
+ int i = 0;
+ int ints[11];
+
+#ifdef CONFIG_BLK_DEV_IDE
+ /* ide driver needs the basic string, rather than pre-processed values */
+ if (!strncmp(line,"ide",3) || (!strncmp(line,"hd",2) && line[2] != '=')) {
+ ide_setup(line);
+ return 1;
+ }
+#endif
+ while (bootsetups[i].str) {
+ int n = strlen(bootsetups[i].str);
+ if (!strncmp(line,bootsetups[i].str,n)) {
+ bootsetups[i].setup_func(get_options(line+n,ints), ints);
+ return 1;
+ }
+ i++;
+ }
+
+ for (i=0; raw_params[i].str; i++) {
+ int n = strlen(raw_params[i].str);
+ if (!strncmp(line,raw_params[i].str,n)) {
+ raw_params[i].setup_func(line+n, NULL);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* this should be approx 2 Bo*oMips to start (note initial shift), and will
+ still work even if initially too large, it will just take slightly longer */
+unsigned long loops_per_sec = (1<<12);
+
+#if defined(__SMP__) && defined(__i386__)
+unsigned long smp_loops_per_tick = 1000000;
+#endif
+
+/* This is the number of bits of precision for the loops_per_second. Each
+ bit takes on average 1.5/HZ seconds. This (like the original) is a little
+ better than 1% */
+#define LPS_PREC 8
+
+void calibrate_delay(void)
+{
+ int ticks;
+ int loopbit;
+ int lps_precision = LPS_PREC;
+
+ loops_per_sec = (1<<12);
+
+ printk("Calibrating delay loop.. ");
+ while (loops_per_sec <<= 1) {
+ /* wait for "start of" clock tick */
+ ticks = jiffies;
+ while (ticks == jiffies)
+ /* nothing */;
+ /* Go .. */
+ ticks = jiffies;
+ __delay(loops_per_sec);
+ ticks = jiffies - ticks;
+ if (ticks)
+ break;
+ }
+
+/* Do a binary approximation to get loops_per_second set to equal one clock
+ (up to lps_precision bits) */
+ loops_per_sec >>= 1;
+ loopbit = loops_per_sec;
+ while ( lps_precision-- && (loopbit >>= 1) ) {
+ loops_per_sec |= loopbit;
+ ticks = jiffies;
+ while (ticks == jiffies);
+ ticks = jiffies;
+ __delay(loops_per_sec);
+ if (jiffies != ticks) /* longer than 1 tick */
+ loops_per_sec &= ~loopbit;
+ }
+
+/* finally, adjust loops per second in terms of seconds instead of clocks */
+ loops_per_sec *= HZ;
+/* Round the value and print it */
+ printk("ok - %lu.%02lu BogoMIPS\n",
+ (loops_per_sec+2500)/500000,
+ ((loops_per_sec+2500)/5000) % 100);
+
+#if defined(__SMP__) && defined(__i386__)
+ smp_loops_per_tick = loops_per_sec / 400;
+#endif
+}
+
+static void parse_root_dev(char * line)
+{
+ int base = 0;
+ static struct dev_name_struct {
+ const char *name;
+ const int num;
+ } devices[] = {
+ { "nfs", 0x00ff },
+ { "loop", 0x0700 },
+ { "hda", 0x0300 },
+ { "hdb", 0x0340 },
+ { "hdc", 0x1600 },
+ { "hdd", 0x1640 },
+ { "hde", 0x2100 },
+ { "hdf", 0x2140 },
+ { "hdg", 0x2200 },
+ { "hdh", 0x2240 },
+ { "sda", 0x0800 },
+ { "sdb", 0x0810 },
+ { "sdc", 0x0820 },
+ { "sdd", 0x0830 },
+ { "sde", 0x0840 },
+ { "sdf", 0x0850 },
+ { "sdg", 0x0860 },
+ { "sdh", 0x0870 },
+ { "sdi", 0x0880 },
+ { "sdj", 0x0890 },
+ { "sdk", 0x08a0 },
+ { "sdl", 0x08b0 },
+ { "sdm", 0x08c0 },
+ { "sdn", 0x08d0 },
+ { "sdo", 0x08e0 },
+ { "sdp", 0x08f0 },
+ { "fd", 0x0200 },
+ { "xda", 0x0d00 },
+ { "xdb", 0x0d40 },
+ { "ram", 0x0100 },
+ { "scd", 0x0b00 },
+ { "mcd", 0x1700 },
+ { "cdu535", 0x1800 },
+ { "aztcd", 0x1d00 },
+ { "cm206cd", 0x2000 },
+ { "gscd", 0x1000 },
+ { "sbpcd", 0x1900 },
+ { "sonycd", 0x1800 },
+#ifdef CONFIG_PARIDE_PD
+ { "pda", 0x2d00 },
+ { "pdb", 0x2d10 },
+ { "pdc", 0x2d20 },
+ { "pdd", 0x2d30 },
+#endif
+#ifdef CONFIG_PARIDE_PCD
+ { "pcd", 0x2e00 },
+#endif
+#ifdef CONFIG_PARIDE_PF
+ { "pf", 0x2f00 },
+#endif
+ { NULL, 0 }
+ };
+
+ if (strncmp(line,"/dev/",5) == 0) {
+ struct dev_name_struct *dev = devices;
+ line += 5;
+ do {
+ int len = strlen(dev->name);
+ if (strncmp(line,dev->name,len) == 0) {
+ line += len;
+ base = dev->num;
+ break;
+ }
+ dev++;
+ } while (dev->name);
+ }
+ ROOT_DEV = to_kdev_t(base + simple_strtoul(line,NULL,base?10:16));
+}
+
+/*
+ * This is a simple kernel command line parsing function: it parses
+ * the command line, and fills in the arguments/environment to init
+ * as appropriate. Any cmd-line option is taken to be an environment
+ * variable if it contains the character '='.
+ *
+ *
+ * This routine also checks for options meant for the kernel.
+ * These options are not given to init - they are for internal kernel use only.
+ */
+static void parse_options(char *line)
+{
+ char *next;
+ int args, envs;
+
+ if (!*line)
+ return;
+ args = 0;
+ envs = 1; /* TERM is set to 'linux' by default */
+ next = line;
+ while ((line = next) != NULL) {
+ if ((next = strchr(line,' ')) != NULL)
+ *next++ = 0;
+ /*
+ * check for kernel options first..
+ */
+ if (!strncmp(line,"root=",5)) {
+ parse_root_dev(line+5);
+ continue;
+ }
+#ifdef CONFIG_ROOT_NFS
+ if (!strncmp(line, "nfsroot=", 8)) {
+ int n;
+ line += 8;
+ ROOT_DEV = MKDEV(UNNAMED_MAJOR, 255);
+ if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) {
+ strncpy(nfs_root_name, line, sizeof(nfs_root_name));
+ nfs_root_name[sizeof(nfs_root_name)-1] = '\0';
+ continue;
+ }
+ n = strlen(line) + strlen(NFS_ROOT);
+ if (n >= sizeof(nfs_root_name))
+ line[sizeof(nfs_root_name) - strlen(NFS_ROOT) - 1] = '\0';
+ sprintf(nfs_root_name, NFS_ROOT, line);
+ continue;
+ }
+ if (!strncmp(line, "nfsaddrs=", 9)) {
+ line += 9;
+ strncpy(nfs_root_addrs, line, sizeof(nfs_root_addrs));
+ nfs_root_addrs[sizeof(nfs_root_addrs)-1] = '\0';
+ continue;
+ }
+#endif
+ if (!strcmp(line,"ro")) {
+ root_mountflags |= MS_RDONLY;
+ continue;
+ }
+ if (!strcmp(line,"rw")) {
+ root_mountflags &= ~MS_RDONLY;
+ continue;
+ }
+ if (!strcmp(line,"debug")) {
+ console_loglevel = 10;
+ continue;
+ }
+ if (!strncmp(line,"init=",5)) {
+ line += 5;
+ execute_command = line;
+ continue;
+ }
+ if (checksetup(line))
+ continue;
+ /*
+ * Then check if it's an environment variable or
+ * an option.
+ */
+ if (strchr(line,'=')) {
+ if (envs >= MAX_INIT_ENVS)
+ break;
+ envp_init[++envs] = line;
+ } else {
+ if (args >= MAX_INIT_ARGS)
+ break;
+ argv_init[++args] = line;
+ }
+ }
+ argv_init[args+1] = NULL;
+ envp_init[envs+1] = NULL;
+}
+
+
+extern void setup_arch(char **, unsigned long *, unsigned long *);
+extern void arch_syms_export(void);
+
+#ifndef __SMP__
+
+/*
+ * Uniprocessor idle thread
+ */
+
+int cpu_idle(void *unused)
+{
+ for(;;)
+ idle();
+}
+
+#else
+
+/*
+ * Multiprocessor idle thread is in arch/...
+ */
+
+extern int cpu_idle(void * unused);
+
+/*
+ * Activate a secondary processor.
+ */
+
+asmlinkage void start_secondary(void)
+{
+ trap_init();
+ init_IRQ();
+ smp_callin();
+ cpu_idle(NULL);
+}
+
+
+
+/*
+ * Called by CPU#0 to activate the rest.
+ */
+
+static void smp_init(void)
+{
+ int i, j;
+ smp_boot_cpus();
+
+ /*
+ * Create the slave init tasks as sharing pid 0.
+ *
+ * This should only happen if we have virtual CPU numbers
+ * higher than 0.
+ */
+
+ for (i=1; i<smp_num_cpus; i++)
+ {
+ struct task_struct *n, *p;
+
+ j = cpu_logical_map[i];
+ /*
+ * We use kernel_thread for the idlers which are
+ * unlocked tasks running in kernel space.
+ */
+ kernel_thread(cpu_idle, NULL, CLONE_PID);
+ /*
+ * Don't assume linear processor numbering
+ */
+ current_set[j]=task[i];
+ current_set[j]->processor=j;
+ cli();
+ n = task[i]->next_run;
+ p = task[i]->prev_run;
+ nr_running--;
+ n->prev_run = p;
+ p->next_run = n;
+ task[i]->next_run = task[i]->prev_run = task[i];
+ sti();
+ }
+}
+
+/*
+ * The autoprobe routines assume CPU#0 on the i386
+ * so we don't actually set the game in motion until
+ * they are finished.
+ */
+
+static void smp_begin(void)
+{
+ smp_threads_ready=1;
+ smp_commence();
+}
+
+#endif
+
+/*
+ * Activate the first processor.
+ */
+
+asmlinkage void start_kernel(void)
+{
+ char * command_line;
+
+/*
+ * This little check will move.
+ */
+
+#ifdef __SMP__
+ static int first_cpu=1;
+
+ if(!first_cpu)
+ start_secondary();
+ first_cpu=0;
+
+#endif
+/*
+ * Interrupts are still disabled. Do necessary setups, then
+ * enable them
+ */
+ setup_arch(&command_line, &memory_start, &memory_end);
+ memory_start = paging_init(memory_start,memory_end);
+ trap_init();
+ init_IRQ();
+ sched_init();
+ time_init();
+ parse_options(command_line);
+#ifdef CONFIG_MODULES
+ init_modules();
+#endif
+#ifdef CONFIG_PROFILE
+ if (!prof_shift)
+#ifdef CONFIG_PROFILE_SHIFT
+ prof_shift = CONFIG_PROFILE_SHIFT;
+#else
+ prof_shift = 2;
+#endif
+#endif
+ if (prof_shift) {
+ prof_buffer = (unsigned int *) memory_start;
+ /* only text is profiled */
+ prof_len = (unsigned long) &_etext - (unsigned long) &_stext;
+ prof_len >>= prof_shift;
+ memory_start += prof_len * sizeof(unsigned int);
+ memset(prof_buffer, 0, prof_len * sizeof(unsigned int));
+ }
+ memory_start = console_init(memory_start,memory_end);
+#ifdef CONFIG_PCI
+ memory_start = pci_init(memory_start,memory_end);
+#endif
+ memory_start = kmalloc_init(memory_start,memory_end);
+ sti();
+ calibrate_delay();
+ memory_start = inode_init(memory_start,memory_end);
+ memory_start = file_table_init(memory_start,memory_end);
+ memory_start = name_cache_init(memory_start,memory_end);
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start && initrd_start < memory_start) {
+ printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
+ "disabling it.\n",initrd_start,memory_start);
+ initrd_start = 0;
+ }
+#endif
+ mem_init(memory_start,memory_end);
+ buffer_init();
+ sock_init();
+#if defined(CONFIG_SYSVIPC) || defined(CONFIG_KERNELD)
+ ipc_init();
+#endif
+ dquot_init();
+ arch_syms_export();
+ sti();
+ check_bugs();
+
+#if defined(CONFIG_MTRR) && defined(__SMP__)
+ init_mtrr_config();
+#endif
+
+
+ printk(linux_banner);
+#ifdef __SMP__
+ smp_init();
+#endif
+ sysctl_init();
+ /*
+ * We count on the initial thread going ok
+ * Like idlers init is an unlocked kernel thread, which will
+ * make syscalls (and thus be locked).
+ */
+ kernel_thread(init, NULL, 0);
+/*
+ * task[0] is meant to be used as an "idle" task: it may not sleep, but
+ * it might do some general things like count free pages or it could be
+ * used to implement a reasonable LRU algorithm for the paging routines:
+ * anything that can be useful, but shouldn't take time from the real
+ * processes.
+ *
+ * Right now task[0] just does a infinite idle loop.
+ */
+ cpu_idle(NULL);
+}
+
+static int printf(const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ write(1,printbuf,i=vsprintf(printbuf, fmt, args));
+ va_end(args);
+ return i;
+}
+
+static int do_rc(void * rc)
+{
+ close(0);
+ if (open(rc,O_RDONLY,0))
+ return -1;
+ return execve("/bin/sh", argv_rc, envp_rc);
+}
+
+static int do_shell(void * shell)
+{
+ close(0);close(1);close(2);
+ setsid();
+ (void) open("/dev/tty1",O_RDWR,0);
+ (void) dup(0);
+ (void) dup(0);
+ return execve(shell, argv, envp);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+static int do_linuxrc(void * shell)
+{
+ static char *argv[] = { "linuxrc", NULL, };
+
+ close(0);close(1);close(2);
+ setsid();
+ (void) open("/dev/tty1",O_RDWR,0);
+ (void) dup(0);
+ (void) dup(0);
+ return execve(shell, argv, envp_init);
+}
+
+static void no_initrd(char *s,int *ints)
+{
+ mount_initrd = 0;
+}
+#endif
+
+static int init(void * unused)
+{
+ int pid,i;
+#ifdef CONFIG_BLK_DEV_INITRD
+ int real_root_mountflags;
+#endif
+
+ /* Launch bdflush from here, instead of the old syscall way. */
+ kernel_thread(bdflush, NULL, 0);
+ /* Start the background pageout daemon. */
+ kswapd_setup();
+ kernel_thread(kswapd, NULL, 0);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ real_root_dev = ROOT_DEV;
+ real_root_mountflags = root_mountflags;
+ if (initrd_start && mount_initrd) root_mountflags &= ~MS_RDONLY;
+ else mount_initrd =0;
+#endif
+ setup();
+
+#ifdef __SMP__
+ /*
+ * With the devices probed and setup we can
+ * now enter SMP mode.
+ */
+
+ smp_begin();
+#endif
+
+ #ifdef CONFIG_UMSDOS_FS
+ {
+ /*
+ When mounting a umsdos fs as root, we detect
+ the pseudo_root (/linux) and initialise it here.
+ pseudo_root is defined in fs/umsdos/inode.c
+ */
+ extern struct inode *pseudo_root;
+ if (pseudo_root != NULL){
+ current->fs->root = pseudo_root;
+ current->fs->pwd = pseudo_root;
+ }
+ }
+ #endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ root_mountflags = real_root_mountflags;
+ if (mount_initrd && ROOT_DEV != real_root_dev && ROOT_DEV == MKDEV(RAMDISK_MAJOR,0)) {
+ int error;
+
+ pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
+ if (pid>0)
+ while (pid != wait(&i));
+ if (real_root_dev != MKDEV(RAMDISK_MAJOR, 0)) {
+ error = change_root(real_root_dev,"/initrd");
+ if (error)
+ printk(KERN_ERR "Change root to /initrd: "
+ "error %d\n",error);
+ }
+ }
+#endif
+
+ /*
+ * This keeps serial console MUCH cleaner, but does assume
+ * the console driver checks there really is a video device
+ * attached (Sparc effectively does).
+ */
+
+ if ((open("/dev/tty1",O_RDWR,0) < 0) &&
+ (open("/dev/ttyS0",O_RDWR,0) < 0))
+ printk("Unable to open an initial console.\n");
+
+ (void) dup(0);
+ (void) dup(0);
+
+ if (!execute_command) {
+ execve("/etc/init",argv_init,envp_init);
+ execve("/bin/init",argv_init,envp_init);
+ execve("/sbin/init",argv_init,envp_init);
+ /* if this fails, fall through to original stuff */
+
+ pid = kernel_thread(do_rc, "/etc/rc", SIGCHLD);
+ if (pid>0)
+ while (pid != wait(&i))
+ /* nothing */;
+ }
+
+ while (1) {
+ pid = kernel_thread(do_shell,
+ execute_command ? execute_command : "/bin/sh",
+ SIGCHLD);
+ if (pid < 0) {
+ printf("Fork failed in init\n\r");
+ continue;
+ }
+ while (1)
+ if (pid == wait(&i))
+ break;
+ printf("\n\rchild %d died with code %04x\n\r",pid,i);
+ sync();
+ }
+ return -1;
+}
diff --git a/linux/src/init/version.c b/linux/src/init/version.c
new file mode 100644
index 0000000..0196fdf
--- /dev/null
+++ b/linux/src/init/version.c
@@ -0,0 +1,30 @@
+/*
+ * linux/version.c
+ *
+ * Copyright (C) 1992 Theodore Ts'o
+ *
+ * May be freely distributed as part of Linux.
+ */
+
+#include <linux/config.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+#include <linux/compile.h>
+
+/* make the "checkconfig" script happy: we really need to include config.h */
+#ifdef CONFIG_BOGUS
+#endif
+
+#define version(a) Version_ ## a
+#define version_string(a) version(a)
+
+int version_string(LINUX_VERSION_CODE) = 0;
+
+struct new_utsname system_utsname = {
+ UTS_SYSNAME, UTS_NODENAME, UTS_RELEASE, UTS_VERSION,
+ UTS_MACHINE, UTS_DOMAINNAME
+};
+
+const char *linux_banner =
+ "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+ LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
diff --git a/linux/src/kernel/dma.c b/linux/src/kernel/dma.c
new file mode 100644
index 0000000..724d8ca
--- /dev/null
+++ b/linux/src/kernel/dma.c
@@ -0,0 +1,99 @@
+/* $Id: dma.c,v 1.1 1999/04/26 05:58:29 tb Exp $
+ * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
+ *
+ * Written by Hennus Bergman, 1992.
+ *
+ * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma.
+ * In the previous version the reported device could end up being wrong,
+ * if a device requested a DMA channel that was already in use.
+ * [It also happened to remove the sizeof(char *) == sizeof(int)
+ * assumption introduced because of those /proc/dma patches. -- Hennus]
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+
+
+/* A note on resource allocation:
+ *
+ * All drivers needing DMA channels, should allocate and release them
+ * through the public routines `request_dma()' and `free_dma()'.
+ *
+ * In order to avoid problems, all processes should allocate resources in
+ * the same sequence and release them in the reverse order.
+ *
+ * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA.
+ * When releasing them, first release the DMA, then release the IRQ.
+ * If you don't, you may cause allocation requests to fail unnecessarily.
+ * This doesn't really matter now, but it will once we get real semaphores
+ * in the kernel.
+ */
+
+
+
+/* Channel n is busy iff dma_chan_busy[n].lock != 0.
+ * DMA0 used to be reserved for DRAM refresh, but apparently not any more...
+ * DMA4 is reserved for cascading.
+ */
+
+struct dma_chan {
+ int lock;
+ const char *device_id;
+};
+
+static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] = {
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 },
+ { 1, "cascade" },
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 }
+};
+
+int get_dma_list(char *buf)
+{
+ int i, len = 0;
+
+ for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
+ if (dma_chan_busy[i].lock) {
+ len += sprintf(buf+len, "%2d: %s\n",
+ i,
+ dma_chan_busy[i].device_id);
+ }
+ }
+ return len;
+} /* get_dma_list */
+
+
+int request_dma(unsigned int dmanr, const char * device_id)
+{
+ if (dmanr >= MAX_DMA_CHANNELS)
+ return -EINVAL;
+
+ if (xchg(&dma_chan_busy[dmanr].lock, 1) != 0)
+ return -EBUSY;
+
+ dma_chan_busy[dmanr].device_id = device_id;
+
+ /* old flag was 0, now contains 1 to indicate busy */
+ return 0;
+} /* request_dma */
+
+
+void free_dma(unsigned int dmanr)
+{
+ if (dmanr >= MAX_DMA_CHANNELS) {
+ printk("Trying to free DMA%d\n", dmanr);
+ return;
+ }
+
+ if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) {
+ printk("Trying to free free DMA%d\n", dmanr);
+ return;
+ }
+
+} /* free_dma */
diff --git a/linux/src/kernel/printk.c b/linux/src/kernel/printk.c
new file mode 100644
index 0000000..da8ffca
--- /dev/null
+++ b/linux/src/kernel/printk.c
@@ -0,0 +1,253 @@
+/*
+ * linux/kernel/printk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Modified to make sys_syslog() more flexible: added commands to
+ * return the last 4k of kernel messages, regardless of whether
+ * they've been read or not. Added option to suppress kernel printk's
+ * to the console. Added hook for sending the console messages
+ * elsewhere, in preparation for a serial line console (someday).
+ * Ted Ts'o, 2/11/93.
+ */
+
+#include <stdarg.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+
+#define LOG_BUF_LEN 8192
+
+static char buf[1024];
+
+extern void console_print(const char *);
+
+/* printk's without a loglevel use this.. */
+#define DEFAULT_MESSAGE_LOGLEVEL 4 /* KERN_WARNING */
+
+/* We show everything that is MORE important than this.. */
+#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
+#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
+
+unsigned long log_size = 0;
+struct wait_queue * log_wait = NULL;
+int console_loglevel = DEFAULT_CONSOLE_LOGLEVEL;
+
+static void (*console_print_proc)(const char *) = 0;
+static char log_buf[LOG_BUF_LEN];
+static unsigned long log_start = 0;
+static unsigned long logged_chars = 0;
+
+/*
+ * Commands to sys_syslog:
+ *
+ * 0 -- Close the log. Currently a NOP.
+ * 1 -- Open the log. Currently a NOP.
+ * 2 -- Read from the log.
+ * 3 -- Read up to the last 4k of messages in the ring buffer.
+ * 4 -- Read and clear last 4k of messages in the ring buffer
+ * 5 -- Clear ring buffer.
+ * 6 -- Disable printk's to console
+ * 7 -- Enable printk's to console
+ * 8 -- Set level of messages printed to console
+ */
+asmlinkage int sys_syslog(int type, char * buf, int len)
+{
+ unsigned long i, j, count;
+ int do_clear = 0;
+ char c;
+ int error;
+
+ if ((type != 3) && !suser())
+ return -EPERM;
+ switch (type) {
+ case 0: /* Close log */
+ return 0;
+ case 1: /* Open log */
+ return 0;
+ case 2: /* Read from log */
+ if (!buf || len < 0)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ error = verify_area(VERIFY_WRITE,buf,len);
+ if (error)
+ return error;
+ cli();
+ while (!log_size) {
+ if (current->signal & ~current->blocked) {
+ sti();
+ return -ERESTARTSYS;
+ }
+ interruptible_sleep_on(&log_wait);
+ }
+ i = 0;
+ while (log_size && i < len) {
+ c = *((char *) log_buf+log_start);
+ log_start++;
+ log_size--;
+ log_start &= LOG_BUF_LEN-1;
+ sti();
+ put_user(c,buf);
+ buf++;
+ i++;
+ cli();
+ }
+ sti();
+ return i;
+ case 4: /* Read/clear last kernel messages */
+ do_clear = 1;
+ /* FALL THRU */
+ case 3: /* Read last kernel messages */
+ if (!buf || len < 0)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ error = verify_area(VERIFY_WRITE,buf,len);
+ if (error)
+ return error;
+ count = len;
+ if (count > LOG_BUF_LEN)
+ count = LOG_BUF_LEN;
+ if (count > logged_chars)
+ count = logged_chars;
+ j = log_start + log_size - count;
+ for (i = 0; i < count; i++) {
+ c = *((char *) log_buf+(j++ & (LOG_BUF_LEN-1)));
+ put_user(c, buf++);
+ }
+ if (do_clear)
+ logged_chars = 0;
+ return i;
+ case 5: /* Clear ring buffer */
+ logged_chars = 0;
+ return 0;
+ case 6: /* Disable logging to console */
+ console_loglevel = MINIMUM_CONSOLE_LOGLEVEL;
+ return 0;
+ case 7: /* Enable logging to console */
+ console_loglevel = DEFAULT_CONSOLE_LOGLEVEL;
+ return 0;
+ case 8:
+ if (len < 1 || len > 8)
+ return -EINVAL;
+ if (len < MINIMUM_CONSOLE_LOGLEVEL)
+ len = MINIMUM_CONSOLE_LOGLEVEL;
+ console_loglevel = len;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+
+asmlinkage int printk(const char *fmt, ...)
+{
+ va_list args;
+ int i;
+ char *msg, *p, *buf_end;
+ static char msg_level = -1;
+ long flags;
+
+ save_flags(flags);
+ cli();
+ va_start(args, fmt);
+ i = vsprintf(buf + 3, fmt, args); /* hopefully i < sizeof(buf)-4 */
+ buf_end = buf + 3 + i;
+ va_end(args);
+ for (p = buf + 3; p < buf_end; p++) {
+ msg = p;
+ if (msg_level < 0) {
+ if (
+ p[0] != '<' ||
+ p[1] < '0' ||
+ p[1] > '7' ||
+ p[2] != '>'
+ ) {
+ p -= 3;
+ p[0] = '<';
+ p[1] = DEFAULT_MESSAGE_LOGLEVEL + '0';
+ p[2] = '>';
+ } else
+ msg += 3;
+ msg_level = p[1] - '0';
+ }
+ for (; p < buf_end; p++) {
+ log_buf[(log_start+log_size) & (LOG_BUF_LEN-1)] = *p;
+ if (log_size < LOG_BUF_LEN)
+ log_size++;
+ else {
+ log_start++;
+ log_start &= LOG_BUF_LEN-1;
+ }
+ logged_chars++;
+ if (*p == '\n')
+ break;
+ }
+ if (msg_level < console_loglevel && console_print_proc) {
+ char tmp = p[1];
+ p[1] = '\0';
+ (*console_print_proc)(msg);
+ p[1] = tmp;
+ }
+ if (*p == '\n')
+ msg_level = -1;
+ }
+ restore_flags(flags);
+ wake_up_interruptible(&log_wait);
+ return i;
+}
+
+/*
+ * The console driver calls this routine during kernel initialization
+ * to register the console printing procedure with printk() and to
+ * print any messages that were printed by the kernel before the
+ * console driver was initialized.
+ */
+void register_console(void (*proc)(const char *))
+{
+ int i,j;
+ int p = log_start;
+ char buf[16];
+ char msg_level = -1;
+ char *q;
+
+ console_print_proc = proc;
+
+ for (i=0,j=0; i < log_size; i++) {
+ buf[j++] = log_buf[p];
+ p++; p &= LOG_BUF_LEN-1;
+ if (buf[j-1] != '\n' && i < log_size - 1 && j < sizeof(buf)-1)
+ continue;
+ buf[j] = 0;
+ q = buf;
+ if (msg_level < 0) {
+ msg_level = buf[1] - '0';
+ q = buf + 3;
+ }
+ if (msg_level < console_loglevel)
+ (*proc)(q);
+ if (buf[j-1] == '\n')
+ msg_level = -1;
+ j = 0;
+ }
+}
+
+/*
+ * Write a message to a certain tty, not just the console. This is used for
+ * messages that need to be redirected to a specific tty.
+ * We don't put it into the syslog queue right now maybe in the future if
+ * really needed.
+ */
+void tty_write_message(struct tty_struct *tty, char *msg)
+{
+ if (tty && tty->driver.write)
+ tty->driver.write(tty, 0, msg, strlen(msg));
+ return;
+}
diff --git a/linux/src/kernel/resource.c b/linux/src/kernel/resource.c
new file mode 100644
index 0000000..7d7a4ad
--- /dev/null
+++ b/linux/src/kernel/resource.c
@@ -0,0 +1,129 @@
+/*
+ * linux/kernel/resource.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * David Hinds
+ *
+ * Kernel io-region resource management
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+#define IOTABLE_SIZE 128
+
+typedef struct resource_entry_t {
+ u_long from, num;
+ const char *name;
+ struct resource_entry_t *next;
+} resource_entry_t;
+
+static resource_entry_t iolist = { 0, 0, "", NULL };
+
+static resource_entry_t iotable[IOTABLE_SIZE];
+
+/*
+ * This generates the report for /proc/ioports
+ */
+int get_ioport_list(char *buf)
+{
+ resource_entry_t *p;
+ int len = 0;
+
+ for (p = iolist.next; (p) && (len < 4000); p = p->next)
+ len += sprintf(buf+len, "%04lx-%04lx : %s\n",
+ p->from, p->from+p->num-1, p->name);
+ if (p)
+ len += sprintf(buf+len, "4K limit reached!\n");
+ return len;
+}
+
+/*
+ * The workhorse function: find where to put a new entry
+ */
+static resource_entry_t *find_gap(resource_entry_t *root,
+ u_long from, u_long num)
+{
+ unsigned long flags;
+ resource_entry_t *p;
+
+ if (from > from+num-1)
+ return NULL;
+ save_flags(flags);
+ cli();
+ for (p = root; ; p = p->next) {
+ if ((p != root) && (p->from+p->num-1 >= from)) {
+ p = NULL;
+ break;
+ }
+ if ((p->next == NULL) || (p->next->from > from+num-1))
+ break;
+ }
+ restore_flags(flags);
+ return p;
+}
+
+/*
+ * Call this from the device driver to register the ioport region.
+ */
+void request_region(unsigned int from, unsigned int num, const char *name)
+{
+ resource_entry_t *p;
+ int i;
+
+ for (i = 0; i < IOTABLE_SIZE; i++)
+ if (iotable[i].num == 0)
+ break;
+ if (i == IOTABLE_SIZE)
+ printk("warning: ioport table is full\n");
+ else {
+ p = find_gap(&iolist, from, num);
+ if (p == NULL)
+ return;
+ iotable[i].name = name;
+ iotable[i].from = from;
+ iotable[i].num = num;
+ iotable[i].next = p->next;
+ p->next = &iotable[i];
+ return;
+ }
+}
+
+/*
+ * Call this when the device driver is unloaded
+ */
+void release_region(unsigned int from, unsigned int num)
+{
+ resource_entry_t *p, *q;
+
+ for (p = &iolist; ; p = q) {
+ q = p->next;
+ if (q == NULL)
+ break;
+ if ((q->from == from) && (q->num == num)) {
+ q->num = 0;
+ p->next = q->next;
+ return;
+ }
+ }
+}
+
+/*
+ * Call this to check the ioport region before probing
+ */
+int check_region(unsigned int from, unsigned int num)
+{
+ return (find_gap(&iolist, from, num) == NULL) ? -EBUSY : 0;
+}
+
+/* Called from init/main.c to reserve IO ports. */
+void reserve_setup(char *str, int *ints)
+{
+ int i;
+
+ for (i = 1; i < ints[0]; i += 2)
+ request_region(ints[i], ints[i+1], "reserved");
+}
diff --git a/linux/src/kernel/sched.c b/linux/src/kernel/sched.c
new file mode 100644
index 0000000..0904f59
--- /dev/null
+++ b/linux/src/kernel/sched.c
@@ -0,0 +1,1747 @@
+/*
+ * linux/kernel/sched.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * 1996-04-21 Modified by Ulrich Windl to make NTP work
+ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
+ * make semaphores SMP safe
+ * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+
+/*
+ * 'sched.c' is the main kernel file. It contains scheduling primitives
+ * (sleep_on, wakeup, schedule etc) as well as a number of simple system
+ * call functions (type getpid()), which just extract a field from
+ * current-task
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/fdreg.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/ptrace.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/tqueue.h>
+#include <linux/resource.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/segment.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+
+#include <linux/timex.h>
+
+/*
+ * kernel variables
+ */
+
+int securelevel = 0; /* system security level */
+
+long tick = (1000000 + HZ/2) / HZ; /* timer interrupt period */
+volatile struct timeval xtime; /* The current time */
+int tickadj = 500/HZ ? 500/HZ : 1; /* microsecs */
+
+DECLARE_TASK_QUEUE(tq_timer);
+DECLARE_TASK_QUEUE(tq_immediate);
+DECLARE_TASK_QUEUE(tq_scheduler);
+
+/*
+ * phase-lock loop variables
+ */
+/* TIME_ERROR prevents overwriting the CMOS clock */
+int time_state = TIME_ERROR; /* clock synchronization status */
+int time_status = STA_UNSYNC; /* clock status bits */
+long time_offset = 0; /* time adjustment (us) */
+long time_constant = 2; /* pll time constant */
+long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
+long time_precision = 1; /* clock precision (us) */
+long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
+long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
+long time_phase = 0; /* phase offset (scaled us) */
+long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC; /* frequency offset (scaled ppm) */
+long time_adj = 0; /* tick adjust (scaled 1 / HZ) */
+long time_reftime = 0; /* time at last adjustment (s) */
+
+long time_adjust = 0;
+long time_adjust_step = 0;
+
+int need_resched = 0;
+unsigned long event = 0;
+
+extern int _setitimer(int, struct itimerval *, struct itimerval *);
+unsigned int * prof_buffer = NULL;
+unsigned long prof_len = 0;
+unsigned long prof_shift = 0;
+
+#define _S(nr) (1<<((nr)-1))
+
+extern void mem_use(void);
+extern unsigned long get_wchan(struct task_struct *);
+
+static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
+unsigned long init_user_stack[1024] = { STACK_MAGIC, };
+static struct vm_area_struct init_mmap = INIT_MMAP;
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS;
+
+struct mm_struct init_mm = INIT_MM;
+struct task_struct init_task = INIT_TASK;
+
+unsigned long volatile jiffies=0;
+
+struct task_struct *current_set[NR_CPUS];
+struct task_struct *last_task_used_math = NULL;
+
+struct task_struct * task[NR_TASKS] = {&init_task, };
+
+struct kernel_stat kstat = { 0 };
+
+static inline void add_to_runqueue(struct task_struct * p)
+{
+#ifdef __SMP__
+ int cpu=smp_processor_id();
+#endif
+#if 1 /* sanity tests */
+ if (p->next_run || p->prev_run) {
+ printk("task already on run-queue\n");
+ return;
+ }
+#endif
+ if (p->policy != SCHED_OTHER || p->counter > current->counter + 3)
+ need_resched = 1;
+ nr_running++;
+ (p->prev_run = init_task.prev_run)->next_run = p;
+ p->next_run = &init_task;
+ init_task.prev_run = p;
+#ifdef __SMP__
+ /* this is safe only if called with cli()*/
+ while(set_bit(31,&smp_process_available))
+ {
+ while(test_bit(31,&smp_process_available))
+ {
+ if(clear_bit(cpu,&smp_invalidate_needed))
+ {
+ local_flush_tlb();
+ set_bit(cpu,&cpu_callin_map[0]);
+ }
+ }
+ }
+ smp_process_available++;
+ clear_bit(31,&smp_process_available);
+ if ((0!=p->pid) && smp_threads_ready)
+ {
+ int i;
+ for (i=0;i<smp_num_cpus;i++)
+ {
+ if (0==current_set[cpu_logical_map[i]]->pid)
+ {
+ smp_message_pass(cpu_logical_map[i], MSG_RESCHEDULE, 0L, 0);
+ break;
+ }
+ }
+ }
+#endif
+}
+
+static inline void del_from_runqueue(struct task_struct * p)
+{
+ struct task_struct *next = p->next_run;
+ struct task_struct *prev = p->prev_run;
+
+#if 1 /* sanity tests */
+ if (!next || !prev) {
+ printk("task not on run-queue\n");
+ return;
+ }
+#endif
+ if (p == &init_task) {
+ static int nr = 0;
+ if (nr < 5) {
+ nr++;
+ printk("idle task may not sleep\n");
+ }
+ return;
+ }
+ nr_running--;
+ next->prev_run = prev;
+ prev->next_run = next;
+ p->next_run = NULL;
+ p->prev_run = NULL;
+}
+
+static inline void move_last_runqueue(struct task_struct * p)
+{
+ struct task_struct *next = p->next_run;
+ struct task_struct *prev = p->prev_run;
+
+ /* remove from list */
+ next->prev_run = prev;
+ prev->next_run = next;
+ /* add back to list */
+ p->next_run = &init_task;
+ prev = init_task.prev_run;
+ init_task.prev_run = p;
+ p->prev_run = prev;
+ prev->next_run = p;
+}
+
+/*
+ * Wake up a process. Put it on the run-queue if it's not
+ * already there. The "current" process is always on the
+ * run-queue (except when the actual re-schedule is in
+ * progress), and as such you're allowed to do the simpler
+ * "current->state = TASK_RUNNING" to mark yourself runnable
+ * without the overhead of this.
+ */
+inline void wake_up_process(struct task_struct * p)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ p->state = TASK_RUNNING;
+ if (!p->next_run)
+ add_to_runqueue(p);
+ restore_flags(flags);
+}
+
+static void process_timeout(unsigned long __data)
+{
+ struct task_struct * p = (struct task_struct *) __data;
+
+ p->timeout = 0;
+ wake_up_process(p);
+}
+
+/*
+ * This is the function that decides how desirable a process is..
+ * You can weigh different processes against each other depending
+ * on what CPU they've run on lately etc to try to handle cache
+ * and TLB miss penalties.
+ *
+ * Return values:
+ * -1000: never select this
+ * 0: out of time, recalculate counters (but it might still be
+ * selected)
+ * +ve: "goodness" value (the larger, the better)
+ * +1000: realtime process, select this.
+ */
+static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
+{
+ int weight;
+
+#ifdef __SMP__
+ /* We are not permitted to run a task someone else is running */
+ if (p->processor != NO_PROC_ID)
+ return -1000;
+#ifdef PAST_2_0
+ /* This process is locked to a processor group */
+ if (p->processor_mask && !(p->processor_mask & (1<<this_cpu))
+ return -1000;
+#endif
+#endif
+
+ /*
+ * Realtime process, select the first one on the
+ * runqueue (taking priorities within processes
+ * into account).
+ */
+ if (p->policy != SCHED_OTHER)
+ return 1000 + p->rt_priority;
+
+ /*
+ * Give the process a first-approximation goodness value
+ * according to the number of clock-ticks it has left.
+ *
+ * Don't do any other calculations if the time slice is
+ * over..
+ */
+ weight = p->counter;
+ if (weight) {
+
+#ifdef __SMP__
+ /* Give a largish advantage to the same processor... */
+ /* (this is equivalent to penalizing other processors) */
+ if (p->last_processor == this_cpu)
+ weight += PROC_CHANGE_PENALTY;
+#endif
+
+ /* .. and a slight advantage to the current process */
+ if (p == prev)
+ weight += 1;
+ }
+
+ return weight;
+}
+
+
+/*
+ The following allow_interrupts function is used to workaround a rare but
+ nasty deadlock situation that is possible for 2.0.x Intel SMP because it uses
+ a single kernel lock and interrupts are only routed to the boot CPU. There
+ are two deadlock scenarios this code protects against.
+
+ The first scenario is that if a CPU other than the boot CPU holds the kernel
+ lock and needs to wait for an operation to complete that itself requires an
+ interrupt, there is a deadlock since the boot CPU may be able to accept the
+ interrupt but will not be able to acquire the kernel lock to process it.
+
+ The workaround for this deadlock requires adding calls to allow_interrupts to
+ places where this deadlock is possible. These places are known to be present
+ in buffer.c and keyboard.c. It is also possible that there are other such
+ places which have not been identified yet. In order to break the deadlock,
+ the code in allow_interrupts temporarily yields the kernel lock directly to
+ the boot CPU to allow the interrupt to be processed. The boot CPU interrupt
+ entry code indicates that it is spinning waiting for the kernel lock by
+ setting the smp_blocked_interrupt_pending variable. This code notices that
+ and manipulates the active_kernel_processor variable to yield the kernel lock
+ without ever clearing it. When the interrupt has been processed, the
+ saved_active_kernel_processor variable contains the value for the interrupt
+ exit code to restore, either the APICID of the CPU that granted it the kernel
+ lock, or NO_PROC_ID in the normal case where no yielding occurred. Restoring
+ active_kernel_processor from saved_active_kernel_processor returns the kernel
+ lock back to the CPU that yielded it.
+
+ The second form of deadlock is even more insidious. Suppose the boot CPU
+ takes a page fault and then the previous scenario ensues. In this case, the
+ boot CPU would spin with interrupts disabled waiting to acquire the kernel
+ lock. To resolve this deadlock, the kernel lock acquisition code must enable
+ interrupts briefly so that the pending interrupt can be handled as in the
+ case above.
+
+ An additional form of deadlock is where kernel code running on a non-boot CPU
+ waits for the jiffies variable to be incremented. This deadlock is avoided
+ by having the spin loops in ENTER_KERNEL increment jiffies approximately
+ every 10 milliseconds. Finally, if approximately 60 seconds elapse waiting
+ for the kernel lock, a message will be printed if possible to indicate that a
+ deadlock has been detected.
+
+ Leonard N. Zubkoff
+ 4 August 1997
+*/
+
+#if defined(__SMP__) && defined(__i386__)
+
+volatile unsigned char smp_blocked_interrupt_pending = 0;
+
+volatile unsigned char saved_active_kernel_processor = NO_PROC_ID;
+
+void allow_interrupts(void)
+{
+ if (smp_processor_id() == boot_cpu_id) return;
+ if (smp_blocked_interrupt_pending)
+ {
+ unsigned long saved_kernel_counter;
+ long timeout_counter;
+ saved_active_kernel_processor = active_kernel_processor;
+ saved_kernel_counter = kernel_counter;
+ kernel_counter = 0;
+ active_kernel_processor = boot_cpu_id;
+ timeout_counter = 6000000;
+ while (active_kernel_processor != saved_active_kernel_processor &&
+ --timeout_counter >= 0)
+ {
+ udelay(10);
+ barrier();
+ }
+ if (timeout_counter < 0)
+ panic("FORWARDED INTERRUPT TIMEOUT (AKP = %d, Saved AKP = %d)\n",
+ active_kernel_processor, saved_active_kernel_processor);
+ kernel_counter = saved_kernel_counter;
+ saved_active_kernel_processor = NO_PROC_ID;
+ }
+}
+
+#else
+
+void allow_interrupts(void) {}
+
+#endif
+
+
+/*
+ * 'schedule()' is the scheduler function. It's a very simple and nice
+ * scheduler: it's not perfect, but certainly works for most things.
+ *
+ * The goto is "interesting".
+ *
+ * NOTE!! Task 0 is the 'idle' task, which gets called when no other
+ * tasks can run. It can not be killed, and it cannot sleep. The 'state'
+ * information in task[0] is never used.
+ */
+asmlinkage void schedule(void)
+{
+ int c;
+ struct task_struct * p;
+ struct task_struct * prev, * next;
+ unsigned long timeout = 0;
+ int this_cpu=smp_processor_id();
+
+/* check alarm, wake up any interruptible tasks that have got a signal */
+
+ allow_interrupts();
+
+ if (intr_count)
+ goto scheduling_in_interrupt;
+
+ if (bh_active & bh_mask) {
+ intr_count = 1;
+ do_bottom_half();
+ intr_count = 0;
+ }
+
+ run_task_queue(&tq_scheduler);
+
+ need_resched = 0;
+ prev = current;
+ cli();
+ /* move an exhausted RR process to be last.. */
+ if (!prev->counter && prev->policy == SCHED_RR) {
+ prev->counter = prev->priority;
+ move_last_runqueue(prev);
+ }
+ switch (prev->state) {
+ case TASK_INTERRUPTIBLE:
+ if (prev->signal & ~prev->blocked)
+ goto makerunnable;
+ timeout = prev->timeout;
+ if (timeout && (timeout <= jiffies)) {
+ prev->timeout = 0;
+ timeout = 0;
+ makerunnable:
+ prev->state = TASK_RUNNING;
+ break;
+ }
+ default:
+ del_from_runqueue(prev);
+ case TASK_RUNNING:
+ }
+ p = init_task.next_run;
+ sti();
+
+#ifdef __SMP__
+ /*
+ * This is safe as we do not permit re-entry of schedule()
+ */
+ prev->processor = NO_PROC_ID;
+#define idle_task (task[cpu_number_map[this_cpu]])
+#else
+#define idle_task (&init_task)
+#endif
+
+/*
+ * Note! there may appear new tasks on the run-queue during this, as
+ * interrupts are enabled. However, they will be put on front of the
+ * list, so our list starting at "p" is essentially fixed.
+ */
+/* this is the scheduler proper: */
+ c = -1000;
+ next = idle_task;
+ while (p != &init_task) {
+ int weight = goodness(p, prev, this_cpu);
+ if (weight > c)
+ c = weight, next = p;
+ p = p->next_run;
+ }
+
+ /* if all runnable processes have "counter == 0", re-calculate counters */
+ if (!c) {
+ for_each_task(p)
+ p->counter = (p->counter >> 1) + p->priority;
+ }
+#ifdef __SMP__
+ /*
+ * Allocate process to CPU
+ */
+
+ next->processor = this_cpu;
+ next->last_processor = this_cpu;
+#endif
+#ifdef __SMP_PROF__
+ /* mark processor running an idle thread */
+ if (0==next->pid)
+ set_bit(this_cpu,&smp_idle_map);
+ else
+ clear_bit(this_cpu,&smp_idle_map);
+#endif
+ if (prev != next) {
+ struct timer_list timer;
+
+ kstat.context_swtch++;
+ if (timeout) {
+ init_timer(&timer);
+ timer.expires = timeout;
+ timer.data = (unsigned long) prev;
+ timer.function = process_timeout;
+ add_timer(&timer);
+ }
+ get_mmu_context(next);
+ switch_to(prev,next);
+ if (timeout)
+ del_timer(&timer);
+ }
+ return;
+
+scheduling_in_interrupt:
+ printk("Aiee: scheduling in interrupt %p\n",
+ __builtin_return_address(0));
+}
+
+#ifndef __alpha__
+
+/*
+ * For backwards compatibility? This can be done in libc so Alpha
+ * and all newer ports shouldn't need it.
+ */
+asmlinkage int sys_pause(void)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ return -ERESTARTNOHAND;
+}
+
+#endif
+
+/*
+ * wake_up doesn't wake up stopped processes - they have to be awakened
+ * with signals or similar.
+ *
+ * Note that this doesn't need cli-sti pairs: interrupts may not change
+ * the wait-queue structures directly, but only call wake_up() to wake
+ * a process. The process itself must remove the queue once it has woken.
+ */
+void wake_up(struct wait_queue **q)
+{
+ struct wait_queue *next;
+ struct wait_queue *head;
+
+ if (!q || !(next = *q))
+ return;
+ head = WAIT_QUEUE_HEAD(q);
+ while (next != head) {
+ struct task_struct *p = next->task;
+ next = next->next;
+ if (p != NULL) {
+ if ((p->state == TASK_UNINTERRUPTIBLE) ||
+ (p->state == TASK_INTERRUPTIBLE))
+ wake_up_process(p);
+ }
+ if (!next)
+ goto bad;
+ }
+ return;
+bad:
+ printk("wait_queue is bad (eip = %p)\n",
+ __builtin_return_address(0));
+ printk(" q = %p\n",q);
+ printk(" *q = %p\n",*q);
+}
+
+void wake_up_interruptible(struct wait_queue **q)
+{
+ struct wait_queue *next;
+ struct wait_queue *head;
+
+ if (!q || !(next = *q))
+ return;
+ head = WAIT_QUEUE_HEAD(q);
+ while (next != head) {
+ struct task_struct *p = next->task;
+ next = next->next;
+ if (p != NULL) {
+ if (p->state == TASK_INTERRUPTIBLE)
+ wake_up_process(p);
+ }
+ if (!next)
+ goto bad;
+ }
+ return;
+bad:
+ printk("wait_queue is bad (eip = %p)\n",
+ __builtin_return_address(0));
+ printk(" q = %p\n",q);
+ printk(" *q = %p\n",*q);
+}
+
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to sleep, while the "waking" variable is
+ * incremented when the "up()" code goes to wake up waiting
+ * processes.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * This routine must execute atomically.
+ */
+static inline int waking_non_zero(struct semaphore *sem)
+{
+ int ret ;
+ long flags ;
+
+ get_buzz_lock(&sem->lock) ;
+ save_flags(flags) ;
+ cli() ;
+
+ if ((ret = (sem->waking > 0)))
+ sem->waking-- ;
+
+ restore_flags(flags) ;
+ give_buzz_lock(&sem->lock) ;
+ return(ret) ;
+}
+
+/*
+ * When __up() is called, the count was negative before
+ * incrementing it, and we need to wake up somebody.
+ *
+ * This routine adds one to the count of processes that need to
+ * wake up and exit. ALL waiting processes actually wake up but
+ * only the one that gets to the "waking" field first will gate
+ * through and acquire the semaphore. The others will go back
+ * to sleep.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+void __up(struct semaphore *sem)
+{
+ atomic_inc(&sem->waking) ;
+ wake_up(&sem->wait);
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from __down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from __down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ *
+ */
+int __do_down(struct semaphore * sem, int task_state)
+{
+ struct task_struct *tsk = current;
+ struct wait_queue wait = { tsk, NULL };
+ int ret = 0 ;
+
+ tsk->state = task_state;
+ add_wait_queue(&sem->wait, &wait);
+
+ /*
+ * Ok, we're set up. sem->count is known to be less than zero
+ * so we must wait.
+ *
+ * We can let go the lock for purposes of waiting.
+ * We re-acquire it after awaking so as to protect
+ * all semaphore operations.
+ *
+ * If "up()" is called before we call waking_non_zero() then
+ * we will catch it right away. If it is called later then
+ * we will have to go through a wakeup cycle to catch it.
+ *
+ * Multiple waiters contend for the semaphore lock to see
+ * who gets to gate through and who has to wait some more.
+ */
+ for (;;)
+ {
+ if (waking_non_zero(sem)) /* are we waking up? */
+ break ; /* yes, exit loop */
+
+ if ( task_state == TASK_INTERRUPTIBLE
+ && (tsk->signal & ~tsk->blocked) /* signalled */
+ )
+ {
+ ret = -EINTR ; /* interrupted */
+ atomic_inc(&sem->count) ; /* give up on down operation */
+ break ;
+ }
+
+ schedule();
+ tsk->state = task_state;
+ }
+
+ tsk->state = TASK_RUNNING;
+ remove_wait_queue(&sem->wait, &wait);
+ return(ret) ;
+
+} /* __do_down */
+
+void __down(struct semaphore * sem)
+{
+ __do_down(sem,TASK_UNINTERRUPTIBLE) ;
+}
+
+int __down_interruptible(struct semaphore * sem)
+{
+ return(__do_down(sem,TASK_INTERRUPTIBLE)) ;
+}
+
+
+static inline void __sleep_on(struct wait_queue **p, int state)
+{
+ unsigned long flags;
+ struct wait_queue wait = { current, NULL };
+
+ if (!p)
+ return;
+ if (current == task[0])
+ panic("task[0] trying to sleep");
+ current->state = state;
+ save_flags(flags);
+ cli();
+ __add_wait_queue(p, &wait);
+ sti();
+ schedule();
+ cli();
+ __remove_wait_queue(p, &wait);
+ restore_flags(flags);
+}
+
+void interruptible_sleep_on(struct wait_queue **p)
+{
+ __sleep_on(p,TASK_INTERRUPTIBLE);
+}
+
+void sleep_on(struct wait_queue **p)
+{
+ __sleep_on(p,TASK_UNINTERRUPTIBLE);
+}
+
+#define TVN_BITS 6
+#define TVR_BITS 8
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+
+#define SLOW_BUT_DEBUGGING_TIMERS 0
+
+struct timer_vec {
+ int index;
+ struct timer_list *vec[TVN_SIZE];
+};
+
+struct timer_vec_root {
+ int index;
+ struct timer_list *vec[TVR_SIZE];
+};
+
+static struct timer_vec tv5 = { 0 };
+static struct timer_vec tv4 = { 0 };
+static struct timer_vec tv3 = { 0 };
+static struct timer_vec tv2 = { 0 };
+static struct timer_vec_root tv1 = { 0 };
+
+static struct timer_vec * const tvecs[] = {
+ (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
+};
+
+#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
+
+static unsigned long timer_jiffies = 0;
+
+static inline void insert_timer(struct timer_list *timer,
+ struct timer_list **vec, int idx)
+{
+ if ((timer->next = vec[idx]))
+ vec[idx]->prev = timer;
+ vec[idx] = timer;
+ timer->prev = (struct timer_list *)&vec[idx];
+}
+
+static inline void internal_add_timer(struct timer_list *timer)
+{
+ /*
+ * must be cli-ed when calling this
+ */
+ unsigned long expires = timer->expires;
+ unsigned long idx = expires - timer_jiffies;
+
+ if (idx < TVR_SIZE) {
+ int i = expires & TVR_MASK;
+ insert_timer(timer, tv1.vec, i);
+ } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
+ int i = (expires >> TVR_BITS) & TVN_MASK;
+ insert_timer(timer, tv2.vec, i);
+ } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
+ int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
+ insert_timer(timer, tv3.vec, i);
+ } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
+ int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
+ insert_timer(timer, tv4.vec, i);
+ } else if (expires < timer_jiffies) {
+ /* can happen if you add a timer with expires == jiffies,
+ * or you set a timer to go off in the past
+ */
+ insert_timer(timer, tv1.vec, tv1.index);
+ } else if (idx < 0xffffffffUL) {
+ int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+ insert_timer(timer, tv5.vec, i);
+ } else {
+ /* Can only get here on architectures with 64-bit jiffies */
+ timer->next = timer->prev = timer;
+ }
+}
+
+void add_timer(struct timer_list *timer)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+#if SLOW_BUT_DEBUGGING_TIMERS
+ if (timer->next || timer->prev) {
+ printk("add_timer() called with non-zero list from %p\n",
+ __builtin_return_address(0));
+ goto out;
+ }
+#endif
+ internal_add_timer(timer);
+#if SLOW_BUT_DEBUGGING_TIMERS
+out:
+#endif
+ restore_flags(flags);
+}
+
+static inline int detach_timer(struct timer_list *timer)
+{
+ int ret = 0;
+ struct timer_list *next, *prev;
+ next = timer->next;
+ prev = timer->prev;
+ if (next) {
+ next->prev = prev;
+ }
+ if (prev) {
+ ret = 1;
+ prev->next = next;
+ }
+ return ret;
+}
+
+
+int del_timer(struct timer_list * timer)
+{
+ int ret;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ ret = detach_timer(timer);
+ timer->next = timer->prev = 0;
+ restore_flags(flags);
+ return ret;
+}
+
+static inline void cascade_timers(struct timer_vec *tv)
+{
+ /* cascade all the timers from tv up one level */
+ struct timer_list *timer;
+ timer = tv->vec[tv->index];
+ /*
+ * We are removing _all_ timers from the list, so we don't have to
+ * detach them individually, just clear the list afterwards.
+ */
+ while (timer) {
+ struct timer_list *tmp = timer;
+ timer = timer->next;
+ internal_add_timer(tmp);
+ }
+ tv->vec[tv->index] = NULL;
+ tv->index = (tv->index + 1) & TVN_MASK;
+}
+
+static inline void run_timer_list(void)
+{
+ cli();
+ while ((long)(jiffies - timer_jiffies) >= 0) {
+ struct timer_list *timer;
+ if (!tv1.index) {
+ int n = 1;
+ do {
+ cascade_timers(tvecs[n]);
+ } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
+ }
+ while ((timer = tv1.vec[tv1.index])) {
+ void (*fn)(unsigned long) = timer->function;
+ unsigned long data = timer->data;
+ detach_timer(timer);
+ timer->next = timer->prev = NULL;
+ sti();
+ fn(data);
+ cli();
+ }
+ ++timer_jiffies;
+ tv1.index = (tv1.index + 1) & TVR_MASK;
+ }
+ sti();
+}
+
+static inline void run_old_timers(void)
+{
+ struct timer_struct *tp;
+ unsigned long mask;
+
+ for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
+ if (mask > timer_active)
+ break;
+ if (!(mask & timer_active))
+ continue;
+ if (tp->expires > jiffies)
+ continue;
+ timer_active &= ~mask;
+ tp->fn();
+ sti();
+ }
+}
+
+void tqueue_bh(void)
+{
+ run_task_queue(&tq_timer);
+}
+
+void immediate_bh(void)
+{
+ run_task_queue(&tq_immediate);
+}
+
+unsigned long timer_active = 0;
+struct timer_struct timer_table[32];
+
+/*
+ * Hmm.. Changed this, as the GNU make sources (load.c) seems to
+ * imply that avenrun[] is the standard name for this kind of thing.
+ * Nothing else seems to be standardized: the fractional size etc
+ * all seem to differ on different machines.
+ */
+unsigned long avenrun[3] = { 0,0,0 };
+
+/*
+ * Nr of active tasks - counted in fixed-point numbers
+ */
+static unsigned long count_active_tasks(void)
+{
+ struct task_struct **p;
+ unsigned long nr = 0;
+
+ for(p = &LAST_TASK; p > &FIRST_TASK; --p)
+ if (*p && ((*p)->state == TASK_RUNNING ||
+ (*p)->state == TASK_UNINTERRUPTIBLE ||
+ (*p)->state == TASK_SWAPPING))
+ nr += FIXED_1;
+#ifdef __SMP__
+ nr-=(smp_num_cpus-1)*FIXED_1;
+#endif
+ return nr;
+}
+
+static inline void calc_load(unsigned long ticks)
+{
+ unsigned long active_tasks; /* fixed-point */
+ static int count = LOAD_FREQ;
+
+ count -= ticks;
+ if (count < 0) {
+ count += LOAD_FREQ;
+ active_tasks = count_active_tasks();
+ CALC_LOAD(avenrun[0], EXP_1, active_tasks);
+ CALC_LOAD(avenrun[1], EXP_5, active_tasks);
+ CALC_LOAD(avenrun[2], EXP_15, active_tasks);
+ }
+}
+
+/*
+ * this routine handles the overflow of the microsecond field
+ *
+ * The tricky bits of code to handle the accurate clock support
+ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+ * They were originally developed for SUN and DEC kernels.
+ * All the kudos should go to Dave for this stuff.
+ *
+ */
+static void second_overflow(void)
+{
+ long ltemp;
+
+ /* Bump the maxerror field */
+ time_maxerror += time_tolerance >> SHIFT_USEC;
+ if ( time_maxerror > NTP_PHASE_LIMIT ) {
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_state = TIME_ERROR; /* p. 17, sect. 4.3, (b) */
+ time_status |= STA_UNSYNC;
+ }
+
+ /*
+ * Leap second processing. If in leap-insert state at
+ * the end of the day, the system clock is set back one
+ * second; if in leap-delete state, the system clock is
+ * set ahead one second. The microtime() routine or
+ * external clock driver will insure that reported time
+ * is always monotonic. The ugly divides should be
+ * replaced.
+ */
+ switch (time_state) {
+
+ case TIME_OK:
+ if (time_status & STA_INS)
+ time_state = TIME_INS;
+ else if (time_status & STA_DEL)
+ time_state = TIME_DEL;
+ break;
+
+ case TIME_INS:
+ if (xtime.tv_sec % 86400 == 0) {
+ xtime.tv_sec--;
+ time_state = TIME_OOP;
+ printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
+ }
+ break;
+
+ case TIME_DEL:
+ if ((xtime.tv_sec + 1) % 86400 == 0) {
+ xtime.tv_sec++;
+ time_state = TIME_WAIT;
+ printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
+ }
+ break;
+
+ case TIME_OOP:
+ time_state = TIME_WAIT;
+ break;
+
+ case TIME_WAIT:
+ if (!(time_status & (STA_INS | STA_DEL)))
+ time_state = TIME_OK;
+ }
+
+ /*
+ * Compute the phase adjustment for the next second. In
+ * PLL mode, the offset is reduced by a fixed factor
+ * times the time constant. In FLL mode the offset is
+ * used directly. In either mode, the maximum phase
+ * adjustment for each second is clamped so as to spread
+ * the adjustment over not more than the number of
+ * seconds between updates.
+ */
+ if (time_offset < 0) {
+ ltemp = -time_offset;
+ if (!(time_status & STA_FLL))
+ ltemp >>= SHIFT_KG + time_constant;
+ if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
+ ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+ time_offset += ltemp;
+ time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
+ } else {
+ ltemp = time_offset;
+ if (!(time_status & STA_FLL))
+ ltemp >>= SHIFT_KG + time_constant;
+ if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
+ ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+ time_offset -= ltemp;
+ time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
+ }
+
+ /*
+ * Compute the frequency estimate and additional phase
+ * adjustment due to frequency error for the next
+ * second. When the PPS signal is engaged, gnaw on the
+ * watchdog counter and update the frequency computed by
+ * the pll and the PPS signal.
+ */
+ pps_valid++;
+ if (pps_valid == PPS_VALID) { /* PPS signal lost */
+ pps_jitter = MAXTIME;
+ pps_stabil = MAXFREQ;
+ time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
+ STA_PPSWANDER | STA_PPSERROR);
+ }
+ ltemp = time_freq + pps_freq;
+ if (ltemp < 0)
+ time_adj -= -ltemp >> (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+ else
+ time_adj += ltemp >> (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+
+#if HZ == 100
+ /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
+ * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
+ */
+ if (time_adj < 0)
+ time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
+ else
+ time_adj += (time_adj >> 2) + (time_adj >> 5);
+#endif
+}
+
+/* in the NTP reference this is called "hardclock()" */
+static void update_wall_time_one_tick(void)
+{
+ if ( (time_adjust_step = time_adjust) != 0 ) {
+ /* We are doing an adjtime thing.
+ *
+ * Prepare time_adjust_step to be within bounds.
+ * Note that a positive time_adjust means we want the clock
+ * to run faster.
+ *
+ * Limit the amount of the step to be in the range
+ * -tickadj .. +tickadj
+ */
+ if (time_adjust > tickadj)
+ time_adjust_step = tickadj;
+ else if (time_adjust < -tickadj)
+ time_adjust_step = -tickadj;
+
+ /* Reduce by this step the amount of time left */
+ time_adjust -= time_adjust_step;
+ }
+ xtime.tv_usec += tick + time_adjust_step;
+ /*
+ * Advance the phase, once it gets to one microsecond, then
+ * advance the tick more.
+ */
+ time_phase += time_adj;
+ if (time_phase <= -FINEUSEC) {
+ long ltemp = -time_phase >> SHIFT_SCALE;
+ time_phase += ltemp << SHIFT_SCALE;
+ xtime.tv_usec -= ltemp;
+ }
+ else if (time_phase >= FINEUSEC) {
+ long ltemp = time_phase >> SHIFT_SCALE;
+ time_phase -= ltemp << SHIFT_SCALE;
+ xtime.tv_usec += ltemp;
+ }
+}
+
+/*
+ * Using a loop looks inefficient, but "ticks" is
+ * usually just one (we shouldn't be losing ticks,
+ * we're doing this this way mainly for interrupt
+ * latency reasons, not because we think we'll
+ * have lots of lost timer ticks
+ */
+static void update_wall_time(unsigned long ticks)
+{
+ do {
+ ticks--;
+ update_wall_time_one_tick();
+ } while (ticks);
+
+ if (xtime.tv_usec >= 1000000) {
+ xtime.tv_usec -= 1000000;
+ xtime.tv_sec++;
+ second_overflow();
+ }
+}
+
+static inline void do_process_times(struct task_struct *p,
+ unsigned long user, unsigned long system)
+{
+ long psecs;
+
+ p->utime += user;
+ p->stime += system;
+
+ psecs = (p->stime + p->utime) / HZ;
+ if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) {
+ /* Send SIGXCPU every second.. */
+ if (psecs * HZ == p->stime + p->utime)
+ send_sig(SIGXCPU, p, 1);
+ /* and SIGKILL when we go over max.. */
+ if (psecs > p->rlim[RLIMIT_CPU].rlim_max)
+ send_sig(SIGKILL, p, 1);
+ }
+}
+
+static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
+{
+ unsigned long it_virt = p->it_virt_value;
+
+ if (it_virt) {
+ if (it_virt <= ticks) {
+ it_virt = ticks + p->it_virt_incr;
+ send_sig(SIGVTALRM, p, 1);
+ }
+ p->it_virt_value = it_virt - ticks;
+ }
+}
+
+static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
+{
+ unsigned long it_prof = p->it_prof_value;
+
+ if (it_prof) {
+ if (it_prof <= ticks) {
+ it_prof = ticks + p->it_prof_incr;
+ send_sig(SIGPROF, p, 1);
+ }
+ p->it_prof_value = it_prof - ticks;
+ }
+}
+
+static __inline__ void update_one_process(struct task_struct *p,
+ unsigned long ticks, unsigned long user, unsigned long system)
+{
+ do_process_times(p, user, system);
+ do_it_virt(p, user);
+ do_it_prof(p, ticks);
+}
+
+static void update_process_times(unsigned long ticks, unsigned long system)
+{
+#ifndef __SMP__
+ struct task_struct * p = current;
+ unsigned long user = ticks - system;
+ if (p->pid) {
+ p->counter -= ticks;
+ if (p->counter < 0) {
+ p->counter = 0;
+ need_resched = 1;
+ }
+ if (p->priority < DEF_PRIORITY)
+ kstat.cpu_nice += user;
+ else
+ kstat.cpu_user += user;
+ kstat.cpu_system += system;
+ }
+ update_one_process(p, ticks, user, system);
+#else
+ int cpu,j;
+ cpu = smp_processor_id();
+ for (j=0;j<smp_num_cpus;j++)
+ {
+ int i = cpu_logical_map[j];
+ struct task_struct *p;
+
+#ifdef __SMP_PROF__
+ if (test_bit(i,&smp_idle_map))
+ smp_idle_count[i]++;
+#endif
+ p = current_set[i];
+ /*
+ * Do we have a real process?
+ */
+ if (p->pid) {
+ /* assume user-mode process */
+ unsigned long utime = ticks;
+ unsigned long stime = 0;
+ if (cpu == i) {
+ utime = ticks-system;
+ stime = system;
+ } else if (smp_proc_in_lock[j]) {
+ utime = 0;
+ stime = ticks;
+ }
+ update_one_process(p, ticks, utime, stime);
+
+ if (p->priority < DEF_PRIORITY)
+ kstat.cpu_nice += utime;
+ else
+ kstat.cpu_user += utime;
+ kstat.cpu_system += stime;
+
+ p->counter -= ticks;
+ if (p->counter >= 0)
+ continue;
+ p->counter = 0;
+ } else {
+ /*
+ * Idle processor found, do we have anything
+ * we could run?
+ */
+ if (!(0x7fffffff & smp_process_available))
+ continue;
+ }
+ /* Ok, we should reschedule, do the magic */
+ if (i==cpu)
+ need_resched = 1;
+ else
+ smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
+ }
+#endif
+}
+
+static unsigned long lost_ticks = 0;
+static unsigned long lost_ticks_system = 0;
+
+static inline void update_times(void)
+{
+ unsigned long ticks;
+
+ ticks = xchg(&lost_ticks, 0);
+
+ if (ticks) {
+ unsigned long system;
+
+ system = xchg(&lost_ticks_system, 0);
+ calc_load(ticks);
+ update_wall_time(ticks);
+ update_process_times(ticks, system);
+ }
+}
+
+static void timer_bh(void)
+{
+ update_times();
+ run_old_timers();
+ run_timer_list();
+}
+
+void do_timer(struct pt_regs * regs)
+{
+ (*(unsigned long *)&jiffies)++;
+ lost_ticks++;
+ mark_bh(TIMER_BH);
+ if (!user_mode(regs)) {
+ lost_ticks_system++;
+ if (prof_buffer && current->pid) {
+ extern int _stext;
+ unsigned long ip = instruction_pointer(regs);
+ ip -= (unsigned long) &_stext;
+ ip >>= prof_shift;
+ if (ip < prof_len)
+ prof_buffer[ip]++;
+ }
+ }
+ if (tq_timer)
+ mark_bh(TQUEUE_BH);
+}
+
+#ifndef __alpha__
+
+/*
+ * For backwards compatibility? This can be done in libc so Alpha
+ * and all newer ports shouldn't need it.
+ */
+asmlinkage unsigned int sys_alarm(unsigned int seconds)
+{
+ struct itimerval it_new, it_old;
+ unsigned int oldalarm;
+
+ it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
+ it_new.it_value.tv_sec = seconds;
+ it_new.it_value.tv_usec = 0;
+ _setitimer(ITIMER_REAL, &it_new, &it_old);
+ oldalarm = it_old.it_value.tv_sec;
+ /* ehhh.. We can't return 0 if we have an alarm pending.. */
+ /* And we'd better return too much than too little anyway */
+ if (it_old.it_value.tv_usec)
+ oldalarm++;
+ return oldalarm;
+}
+
+/*
+ * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
+ * should be moved into arch/i386 instead?
+ */
+asmlinkage int sys_getpid(void)
+{
+ return current->pid;
+}
+
+asmlinkage int sys_getppid(void)
+{
+ return current->p_opptr->pid;
+}
+
+asmlinkage int sys_getuid(void)
+{
+ return current->uid;
+}
+
+asmlinkage int sys_geteuid(void)
+{
+ return current->euid;
+}
+
+asmlinkage int sys_getgid(void)
+{
+ return current->gid;
+}
+
+asmlinkage int sys_getegid(void)
+{
+ return current->egid;
+}
+
+/*
+ * This has been replaced by sys_setpriority. Maybe it should be
+ * moved into the arch dependent tree for those ports that require
+ * it for backward compatibility?
+ */
+asmlinkage int sys_nice(int increment)
+{
+ unsigned long newprio;
+ int increase = 0;
+
+ newprio = increment;
+ if (increment < 0) {
+ if (!suser())
+ return -EPERM;
+ newprio = -increment;
+ increase = 1;
+ }
+ if (newprio > 40)
+ newprio = 40;
+ /*
+ * do a "normalization" of the priority (traditionally
+ * unix nice values are -20..20, linux doesn't really
+ * use that kind of thing, but uses the length of the
+ * timeslice instead (default 150 msec). The rounding is
+ * why we want to avoid negative values.
+ */
+ newprio = (newprio * DEF_PRIORITY + 10) / 20;
+ increment = newprio;
+ if (increase)
+ increment = -increment;
+ newprio = current->priority - increment;
+ if ((signed) newprio < 1)
+ newprio = 1;
+ if (newprio > DEF_PRIORITY*2)
+ newprio = DEF_PRIORITY*2;
+ current->priority = newprio;
+ return 0;
+}
+
+#endif
+
+static struct task_struct *find_process_by_pid(pid_t pid) {
+ struct task_struct *p, *q;
+
+ if (pid == 0)
+ p = current;
+ else {
+ p = 0;
+ for_each_task(q) {
+ if (q && q->pid == pid) {
+ p = q;
+ break;
+ }
+ }
+ }
+ return p;
+}
+
+static int setscheduler(pid_t pid, int policy,
+ struct sched_param *param)
+{
+ int error;
+ struct sched_param lp;
+ struct task_struct *p;
+
+ if (!param || pid < 0)
+ return -EINVAL;
+
+ error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
+ if (error)
+ return error;
+ memcpy_fromfs(&lp, param, sizeof(struct sched_param));
+
+ p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ if (policy < 0)
+ policy = p->policy;
+ else if (policy != SCHED_FIFO && policy != SCHED_RR &&
+ policy != SCHED_OTHER)
+ return -EINVAL;
+
+ /*
+ * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
+ * priority for SCHED_OTHER is 0.
+ */
+ if (lp.sched_priority < 0 || lp.sched_priority > 99)
+ return -EINVAL;
+ if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
+ return -EINVAL;
+
+ if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
+ return -EPERM;
+ if ((current->euid != p->euid) && (current->euid != p->uid) &&
+ !suser())
+ return -EPERM;
+
+ p->policy = policy;
+ p->rt_priority = lp.sched_priority;
+ cli();
+ if (p->next_run)
+ move_last_runqueue(p);
+ sti();
+ need_resched = 1;
+ return 0;
+}
+
+asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
+ struct sched_param *param)
+{
+ return setscheduler(pid, policy, param);
+}
+
+asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
+{
+ return setscheduler(pid, -1, param);
+}
+
+asmlinkage int sys_sched_getscheduler(pid_t pid)
+{
+ struct task_struct *p;
+
+ if (pid < 0)
+ return -EINVAL;
+
+ p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ return p->policy;
+}
+
+asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
+{
+ int error;
+ struct task_struct *p;
+ struct sched_param lp;
+
+ if (!param || pid < 0)
+ return -EINVAL;
+
+ error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param));
+ if (error)
+ return error;
+
+ p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ lp.sched_priority = p->rt_priority;
+ memcpy_tofs(param, &lp, sizeof(struct sched_param));
+
+ return 0;
+}
+
+asmlinkage int sys_sched_yield(void)
+{
+ cli();
+ move_last_runqueue(current);
+ current->counter = 0;
+ need_resched = 1;
+ sti();
+ return 0;
+}
+
+asmlinkage int sys_sched_get_priority_max(int policy)
+{
+ switch (policy) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ return 99;
+ case SCHED_OTHER:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+asmlinkage int sys_sched_get_priority_min(int policy)
+{
+ switch (policy) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ return 1;
+ case SCHED_OTHER:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
+{
+ int error;
+ struct timespec t;
+
+ error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec));
+ if (error)
+ return error;
+
+ /* Values taken from 2.1.38 */
+ t.tv_sec = 0;
+ t.tv_nsec = 150000; /* is this right for non-intel architecture too?*/
+ memcpy_tofs(interval, &t, sizeof(struct timespec));
+
+ return 0;
+}
+
+/*
+ * change timeval to jiffies, trying to avoid the
+ * most obvious overflows..
+ */
+static unsigned long timespectojiffies(struct timespec *value)
+{
+ unsigned long sec = (unsigned) value->tv_sec;
+ long nsec = value->tv_nsec;
+
+ if (sec > (LONG_MAX / HZ))
+ return LONG_MAX;
+ nsec += 1000000000L / HZ - 1;
+ nsec /= 1000000000L / HZ;
+ return HZ * sec + nsec;
+}
+
+static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
+{
+ value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+ return;
+}
+
+asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
+{
+ int error;
+ struct timespec t;
+ unsigned long expire;
+
+ error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
+ if (error)
+ return error;
+ memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
+ if (rmtp) {
+ error = verify_area(VERIFY_WRITE, rmtp,
+ sizeof(struct timespec));
+ if (error)
+ return error;
+ }
+
+ if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
+ return -EINVAL;
+
+ if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
+ current->policy != SCHED_OTHER) {
+ /*
+ * Short delay requests up to 2 ms will be handled with
+ * high precision by a busy wait for all real-time processes.
+ */
+ udelay((t.tv_nsec + 999) / 1000);
+ return 0;
+ }
+
+ expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
+ current->timeout = expire;
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+
+ if (expire > jiffies) {
+ if (rmtp) {
+ jiffiestotimespec(expire - jiffies -
+ (expire > jiffies + 1), &t);
+ memcpy_tofs(rmtp, &t, sizeof(struct timespec));
+ }
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+static void show_task(int nr,struct task_struct * p)
+{
+ unsigned long free;
+ static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
+
+ printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
+ if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
+ printk(stat_nam[p->state]);
+ else
+ printk(" ");
+#if ((~0UL) == 0xffffffff)
+ if (p == current)
+ printk(" current ");
+ else
+ printk(" %08lX ", thread_saved_pc(&p->tss));
+ printk("%08lX ", get_wchan(p));
+#else
+ if (p == current)
+ printk(" current task ");
+ else
+ printk(" %016lx ", thread_saved_pc(&p->tss));
+ printk("%08lX ", get_wchan(p) & 0xffffffffL);
+#endif
+ for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
+ if (((unsigned long *)p->kernel_stack_page)[free])
+ break;
+ }
+ printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
+ if (p->p_cptr)
+ printk("%5d ", p->p_cptr->pid);
+ else
+ printk(" ");
+ if (p->p_ysptr)
+ printk("%7d", p->p_ysptr->pid);
+ else
+ printk(" ");
+ if (p->p_osptr)
+ printk(" %5d\n", p->p_osptr->pid);
+ else
+ printk("\n");
+}
+
+void show_state(void)
+{
+ int i;
+
+#if ((~0UL) == 0xffffffff)
+ printk("\n"
+ " free sibling\n");
+ printk(" task PC wchan stack pid father child younger older\n");
+#else
+ printk("\n"
+ " free sibling\n");
+ printk(" task PC wchan stack pid father child younger older\n");
+#endif
+ for (i=0 ; i<NR_TASKS ; i++)
+ if (task[i])
+ show_task(i,task[i]);
+}
+
+void sched_init(void)
+{
+ /*
+ * We have to do a little magic to get the first
+ * process right in SMP mode.
+ */
+ int cpu=smp_processor_id();
+#ifndef __SMP__
+ current_set[cpu]=&init_task;
+#else
+ init_task.processor=cpu;
+ for(cpu = 0; cpu < NR_CPUS; cpu++)
+ current_set[cpu] = &init_task;
+#endif
+ init_bh(TIMER_BH, timer_bh);
+ init_bh(TQUEUE_BH, tqueue_bh);
+ init_bh(IMMEDIATE_BH, immediate_bh);
+}
diff --git a/linux/src/kernel/softirq.c b/linux/src/kernel/softirq.c
new file mode 100644
index 0000000..32038b1
--- /dev/null
+++ b/linux/src/kernel/softirq.c
@@ -0,0 +1,54 @@
+/*
+ * linux/kernel/softirq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ *
+ * do_bottom_half() runs at normal kernel priority: all interrupts
+ * enabled. do_bottom_half() is atomic with respect to itself: a
+ * bottom_half handler need not be re-entrant.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+
+unsigned int intr_count = 0;
+
+int bh_mask_count[32];
+unsigned int bh_active = 0;
+unsigned int bh_mask = 0;
+void (*bh_base[32])(void);
+
+
+asmlinkage void do_bottom_half(void)
+{
+ unsigned int active;
+ unsigned int mask, left;
+ void (**bh)(void);
+
+ sti();
+ bh = bh_base;
+ active = bh_active & bh_mask;
+ for (mask = 1, left = ~0 ; left & active ; bh++,mask += mask,left += left) {
+ if (mask & active) {
+ void (*fn)(void);
+ bh_active &= ~mask;
+ fn = *bh;
+ if (!fn)
+ goto bad_bh;
+ fn();
+ }
+ }
+ return;
+bad_bh:
+ printk ("irq.c:bad bottom half entry %08lx\n", mask);
+}
diff --git a/linux/src/lib/ctype.c b/linux/src/lib/ctype.c
new file mode 100644
index 0000000..26baa62
--- /dev/null
+++ b/linux/src/lib/ctype.c
@@ -0,0 +1,36 @@
+/*
+ * linux/lib/ctype.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+
+const unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+
+EXPORT_SYMBOL(_ctype);
diff --git a/linux/src/lib/vsprintf.c b/linux/src/lib/vsprintf.c
new file mode 100644
index 0000000..8f813c6
--- /dev/null
+++ b/linux/src/lib/vsprintf.c
@@ -0,0 +1,306 @@
+/*
+ * linux/lib/vsprintf.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
+/*
+ * Wirzenius wrote this portably, Torvalds fucked it up :-)
+ */
+
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base)
+{
+ unsigned long result = 0,value;
+
+ if (!base) {
+ base = 10;
+ if (*cp == '0') {
+ base = 8;
+ cp++;
+ if ((*cp == 'x') && isxdigit(cp[1])) {
+ cp++;
+ base = 16;
+ }
+ }
+ }
+ while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp)
+ ? toupper(*cp) : *cp)-'A'+10) < base) {
+ result = result*base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *)cp;
+ return result;
+}
+
+/* we use this so that we can do without the ctype library */
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+
+static int skip_atoi(const char **s)
+{
+ int i=0;
+
+ while (is_digit(**s))
+ i = i*10 + *((*s)++) - '0';
+ return i;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SPECIAL 32 /* 0x */
+#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+#define do_div(n,base) ({ \
+int __res; \
+__res = ((unsigned long) n) % (unsigned) base; \
+n = ((unsigned long) n) / (unsigned) base; \
+__res; })
+
+static char * number(char * str, long num, int base, int size, int precision
+ ,int type)
+{
+ char c,sign,tmp[66];
+ const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
+ int i;
+
+ if (type & LARGE)
+ digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ if (type & LEFT)
+ type &= ~ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN) {
+ if (num < 0) {
+ sign = '-';
+ num = -num;
+ size--;
+ } else if (type & PLUS) {
+ sign = '+';
+ size--;
+ } else if (type & SPACE) {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & SPECIAL) {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++]='0';
+ else while (num != 0)
+ tmp[i++] = digits[do_div(num,base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type&(ZEROPAD+LEFT)))
+ while(size-->0)
+ *str++ = ' ';
+ if (sign)
+ *str++ = sign;
+ if (type & SPECIAL)
+ if (base==8)
+ *str++ = '0';
+ else if (base==16) {
+ *str++ = '0';
+ *str++ = digits[33];
+ }
+ if (!(type & LEFT))
+ while (size-- > 0)
+ *str++ = c;
+ while (i < precision--)
+ *str++ = '0';
+ while (i-- > 0)
+ *str++ = tmp[i];
+ while (size-- > 0)
+ *str++ = ' ';
+ return str;
+}
+
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long num;
+ int i, base;
+ char * str;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+
+ for (str=buf ; *fmt ; ++fmt) {
+ if (*fmt != '%') {
+ *str++ = *fmt;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt) {
+ case '-': flags |= LEFT; goto repeat;
+ case '+': flags |= PLUS; goto repeat;
+ case ' ': flags |= SPACE; goto repeat;
+ case '#': flags |= SPECIAL; goto repeat;
+ case '0': flags |= ZEROPAD; goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (is_digit(*fmt))
+ field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg(args, int);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (is_digit(*fmt))
+ precision = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg(args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L') {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt) {
+ case 'c':
+ if (!(flags & LEFT))
+ while (--field_width > 0)
+ *str++ = ' ';
+ *str++ = (unsigned char) va_arg(args, int);
+ while (--field_width > 0)
+ *str++ = ' ';
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ if (!s)
+ s = "<NULL>";
+
+ len = strnlen(s, precision);
+
+ if (!(flags & LEFT))
+ while (len < field_width--)
+ *str++ = ' ';
+ for (i = 0; i < len; ++i)
+ *str++ = *s++;
+ while (len < field_width--)
+ *str++ = ' ';
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= ZEROPAD;
+ }
+ str = number(str,
+ (unsigned long) va_arg(args, void *), 16,
+ field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ if (qualifier == 'l') {
+ long * ip = va_arg(args, long *);
+ *ip = (str - buf);
+ } else {
+ int * ip = va_arg(args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (*fmt != '%')
+ *str++ = '%';
+ if (*fmt)
+ *str++ = *fmt;
+ else
+ --fmt;
+ continue;
+ }
+ if (qualifier == 'l')
+ num = va_arg(args, unsigned long);
+ else if (qualifier == 'h')
+ if (flags & SIGN)
+ num = va_arg(args, short);
+ else
+ num = va_arg(args, unsigned short);
+ else if (flags & SIGN)
+ num = va_arg(args, int);
+ else
+ num = va_arg(args, unsigned int);
+ str = number(str, num, base, field_width, precision, flags);
+ }
+ *str = '\0';
+ return str-buf;
+}
+
+int sprintf(char * buf, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i=vsprintf(buf,fmt,args);
+ va_end(args);
+ return i;
+}
+
diff --git a/linux/src/net/core/dev.c b/linux/src/net/core/dev.c
new file mode 100644
index 0000000..4e46f9f
--- /dev/null
+++ b/linux/src/net/core/dev.c
@@ -0,0 +1,1629 @@
+/*
+ * NET3 Protocol independent device support routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from the non IP parts of dev.c 1.0.19
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ * Additional Authors:
+ * Florian la Roche <rzsfl@rz.uni-sb.de>
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ * David Hinds <dhinds@allegro.stanford.edu>
+ *
+ * Changes:
+ * Alan Cox : device private ioctl copies fields back.
+ * Alan Cox : Transmit queue code does relevant stunts to
+ * keep the queue safe.
+ * Alan Cox : Fixed double lock.
+ * Alan Cox : Fixed promisc NULL pointer trap
+ * ???????? : Support the full private ioctl range
+ * Alan Cox : Moved ioctl permission check into drivers
+ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
+ * Alan Cox : 100 backlog just doesn't cut it when
+ * you start doing multicast video 8)
+ * Alan Cox : Rewrote net_bh and list manager.
+ * Alan Cox : Fix ETH_P_ALL echoback lengths.
+ * Alan Cox : Took out transmit every packet pass
+ * Saved a few bytes in the ioctl handler
+ * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
+ * a function call a packet.
+ * Alan Cox : Hashed net_bh()
+ * Richard Kooijman: Timestamp fixes.
+ * Alan Cox : Wrong field in SIOCGIFDSTADDR
+ * Alan Cox : Device lock protection.
+ * Alan Cox : Fixed nasty side effect of device close changes.
+ * Rudi Cilibrasi : Pass the right thing to set_mac_address()
+ * Dave Miller : 32bit quantity for the device lock to make it work out
+ * on a Sparc.
+ * Bjorn Ekwall : Added KERNELD hack.
+ * Alan Cox : Cleaned up the backlog initialise.
+ * Craig Metz : SIOCGIFCONF fix if space for under
+ * 1 device.
+ * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
+ * is no device open function.
+ * Lawrence V. Stefani : Changed set MTU ioctl to not assume
+ * min MTU of 68 bytes for devices
+ * that have change MTU functions.
+ *
+ */
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/slhc.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <net/br.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+#ifdef CONFIG_KERNELD
+#include <linux/kerneld.h>
+#endif
+#ifdef CONFIG_NET_RADIO
+#include <linux/wireless.h>
+#endif /* CONFIG_NET_RADIO */
+
+/*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+ */
+
+struct packet_type *ptype_base[16];
+struct packet_type *ptype_all = NULL; /* Taps */
+
+/*
+ * Device list lock
+ */
+
+int dev_lockct=0;
+
+/*
+ * Our notifier list
+ */
+
+struct notifier_block *netdev_chain=NULL;
+
+/*
+ * Device drivers call our routines to queue packets here. We empty the
+ * queue in the bottom half handler.
+ */
+
+static struct sk_buff_head backlog;
+
+/*
+ * We don't overdo the queue or we will thrash memory badly.
+ */
+
+static int backlog_size = 0;
+
+/*
+ * Return the lesser of the two values.
+ */
+
+static __inline__ unsigned long min(unsigned long a, unsigned long b)
+{
+ return (a < b)? a : b;
+}
+
+
+/******************************************************************************************
+
+ Protocol management and registration routines
+
+*******************************************************************************************/
+
+/*
+ * For efficiency
+ */
+
+static int dev_nit=0;
+
+/*
+ * Add a protocol ID to the list. Now that the input handler is
+ * smarter we can dispense with all the messy stuff that used to be
+ * here.
+ */
+
+void dev_add_pack(struct packet_type *pt)
+{
+ int hash;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit++;
+ pt->next=ptype_all;
+ ptype_all=pt;
+ }
+ else
+ {
+ hash=ntohs(pt->type)&15;
+ pt->next = ptype_base[hash];
+ ptype_base[hash] = pt;
+ }
+}
+
+
+/*
+ * Remove a protocol ID from the list.
+ */
+
+void dev_remove_pack(struct packet_type *pt)
+{
+ struct packet_type **pt1;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit--;
+ pt1=&ptype_all;
+ }
+ else
+ pt1=&ptype_base[ntohs(pt->type)&15];
+ for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
+ {
+ if(pt==(*pt1))
+ {
+ *pt1=pt->next;
+ return;
+ }
+ }
+ printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
+}
+
+/*****************************************************************************************
+
+ Device Interface Subroutines
+
+******************************************************************************************/
+
+/*
+ * Find an interface by name.
+ */
+
+struct device *dev_get(const char *name)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (strcmp(dev->name, name) == 0)
+ return(dev);
+ }
+ return NULL;
+}
+
+/*
+ * Find and possibly load an interface.
+ */
+
+#ifdef CONFIG_KERNELD
+
+extern __inline__ void dev_load(const char *name)
+{
+ if(!dev_get(name) && suser()) {
+#ifdef CONFIG_NET_ALIAS
+ const char *sptr;
+
+ for (sptr=name ; *sptr ; sptr++) if(*sptr==':') break;
+ if (!(*sptr && *(sptr+1)))
+#endif
+ request_module(name);
+ }
+}
+
+#endif
+
+/*
+ * Prepare an interface for use.
+ */
+
+int dev_open(struct device *dev)
+{
+ int ret = -ENODEV;
+
+ /*
+ * Call device private open method
+ */
+ if (dev->open)
+ ret = dev->open(dev);
+
+ /*
+ * If it went open OK then set the flags
+ */
+
+ if (ret == 0)
+ {
+ dev->flags |= (IFF_UP | IFF_RUNNING);
+ /*
+ * Initialise multicasting status
+ */
+ dev_mc_upload(dev);
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ }
+ return(ret);
+}
+
+
+/*
+ * Completely shutdown an interface.
+ */
+
+int dev_close(struct device *dev)
+{
+ int ct=0;
+
+ /*
+ * Call the device specific close. This cannot fail.
+ * Only if device is UP
+ */
+
+ if ((dev->flags & IFF_UP) && dev->stop)
+ dev->stop(dev);
+
+ /*
+ * Device is now down.
+ */
+
+ dev->flags&=~(IFF_UP|IFF_RUNNING);
+
+ /*
+ * Tell people we are going down
+ */
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+ /*
+ * Flush the multicast chain
+ */
+ dev_mc_discard(dev);
+
+ /*
+ * Purge any queued packets when we down the link
+ */
+ while(ct<DEV_NUMBUFFS)
+ {
+ struct sk_buff *skb;
+ while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
+ if(skb->free)
+ kfree_skb(skb,FREE_WRITE);
+ ct++;
+ }
+ return(0);
+}
+
+
+/*
+ * Device change register/unregister. These are not inline or static
+ * as we export them to the world.
+ */
+
+int register_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_register(&netdev_chain, nb);
+}
+
+int unregister_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&netdev_chain,nb);
+}
+
+/*
+ * Send (or queue for sending) a packet.
+ *
+ * IMPORTANT: When this is called to resend frames. The caller MUST
+ * already have locked the sk_buff. Apart from that we do the
+ * rest of the magic.
+ */
+
+static void do_dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ unsigned long flags;
+ struct sk_buff_head *list;
+ int retransmission = 0; /* used to say if the packet should go */
+ /* at the front or the back of the */
+ /* queue - front is a retransmit try */
+
+ if(pri>=0 && !skb_device_locked(skb))
+ skb_device_lock(skb); /* Shove a lock on the frame */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb->dev = dev;
+
+ /*
+ * Negative priority is used to flag a frame that is being pulled from the
+ * queue front as a retransmit attempt. It therefore goes back on the queue
+ * start on a failure.
+ */
+
+ if (pri < 0)
+ {
+ pri = -pri-1;
+ retransmission = 1;
+ }
+
+#ifdef CONFIG_NET_DEBUG
+ if (pri >= DEV_NUMBUFFS)
+ {
+ printk(KERN_WARNING "bad priority in dev_queue_xmit.\n");
+ pri = 1;
+ }
+#endif
+
+ /*
+ * If the address has not been resolved. Call the device header rebuilder.
+ * This can cover all protocols and technically not just ARP either.
+ */
+
+ if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
+ return;
+ }
+
+ /*
+ *
+ * If dev is an alias, switch to its main device.
+ * "arp" resolution has been made with alias device, so
+ * arp entries refer to alias, not main.
+ *
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ skb->dev = dev = net_alias_dev_tx(dev);
+#endif
+
+ /*
+ * If we are bridging and this is directly generated output
+ * pass the frame via the bridge.
+ */
+
+#ifdef CONFIG_BRIDGE
+ if(skb->pkt_bridged!=IS_BRIDGED && br_stats.flags & BR_UP)
+ {
+ if(br_tx_frame(skb))
+ return;
+ }
+#endif
+
+ list = dev->buffs + pri;
+
+ save_flags(flags);
+ /* if this isn't a retransmission, use the first packet instead... */
+ if (!retransmission) {
+ if (skb_queue_len(list)) {
+ /* avoid overrunning the device queue.. */
+ if (skb_queue_len(list) > dev->tx_queue_len) {
+ dev_kfree_skb(skb, FREE_WRITE);
+ return;
+ }
+ }
+
+ /* copy outgoing packets to any sniffer packet handlers */
+ if (dev_nit) {
+ struct packet_type *ptype;
+ skb->stamp=xtime;
+ for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
+ {
+ /* Never send packets back to the socket
+ * they originated from - MvS (miquels@drinkel.ow.org)
+ */
+ if ((ptype->dev == dev || !ptype->dev) &&
+ ((struct sock *)ptype->data != skb->sk))
+ {
+ struct sk_buff *skb2;
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+ break;
+ /* FIXME?: Wrong when the hard_header_len
+ * is an upper bound. Is this even
+ * used anywhere?
+ */
+ skb2->h.raw = skb2->data + dev->hard_header_len;
+ /* On soft header devices we
+ * yank the header before mac.raw
+ * back off. This is set by
+ * dev->hard_header().
+ */
+ if (dev->flags&IFF_SOFTHEADERS)
+ skb_pull(skb2,skb2->mac.raw-skb2->data);
+ skb2->mac.raw = skb2->data;
+ ptype->func(skb2, skb->dev, ptype);
+ }
+ }
+ }
+
+ if (skb_queue_len(list)) {
+ cli();
+ skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
+ __skb_queue_tail(list, skb);
+ skb = __skb_dequeue(list);
+ skb_device_lock(skb); /* New buffer needs locking down */
+ restore_flags(flags);
+ }
+ }
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ /*
+ * Packet is now solely the responsibility of the driver
+ */
+ return;
+ }
+
+ /*
+ * Transmission failed, put skb back into a list. Once on the list it's safe and
+ * no longer device locked (it can be freed safely from the device queue)
+ */
+ cli();
+ skb_device_unlock(skb);
+ __skb_queue_head(list,skb);
+ restore_flags(flags);
+}
+
+void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ start_bh_atomic();
+ do_dev_queue_xmit(skb, dev, pri);
+ end_bh_atomic();
+}
+
+/*
+ * Receive a packet from a device driver and queue it for the upper
+ * (protocol) levels. It always succeeds. This is the recommended
+ * interface to use.
+ */
+
+void netif_rx(struct sk_buff *skb)
+{
+ static int dropping = 0;
+
+ /*
+ * Any received buffers are un-owned and should be discarded
+ * when freed. These will be updated later as the frames get
+ * owners.
+ */
+
+ skb->sk = NULL;
+ skb->free = 1;
+ if(skb->stamp.tv_sec==0)
+ skb->stamp = xtime;
+
+ /*
+ * Check that we aren't overdoing things.
+ */
+
+ if (!backlog_size)
+ dropping = 0;
+ else if (backlog_size > 300)
+ dropping = 1;
+
+ if (dropping)
+ {
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Add it to the "backlog" queue.
+ */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb_queue_tail(&backlog,skb);
+ backlog_size++;
+
+ /*
+ * If any packet arrived, mark it for processing after the
+ * hardware interrupt returns.
+ */
+
+ mark_bh(NET_BH);
+ return;
+}
+
+/*
+ * This routine causes all interfaces to try to send some data.
+ */
+
+static void dev_transmit(void)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (dev->flags != 0 && !dev->tbusy) {
+ /*
+ * Kick the device
+ */
+ dev_tint(dev);
+ }
+ }
+}
+
+
+/**********************************************************************************
+
+ Receive Queue Processor
+
+***********************************************************************************/
+
+/*
+ * When we are called the queue is ready to grab, the interrupts are
+ * on and hardware can interrupt and queue to the receive queue as we
+ * run with no problems.
+ * This is run as a bottom half after an interrupt handler that does
+ * mark_bh(NET_BH);
+ */
+
+void net_bh(void)
+{
+ struct packet_type *ptype;
+ struct packet_type *pt_prev;
+ unsigned short type;
+
+ /*
+ * Can we send anything now? We want to clear the
+ * decks for any more sends that get done as we
+ * process the input. This also minimises the
+ * latency on a transmit interrupt bh.
+ */
+
+ dev_transmit();
+
+ /*
+ * Any data left to process. This may occur because a
+ * mark_bh() is done after we empty the queue including
+ * that from the device which does a mark_bh() just after
+ */
+
+ /*
+ * While the queue is not empty..
+ *
+ * Note that the queue never shrinks due to
+ * an interrupt, so we can do this test without
+ * disabling interrupts.
+ */
+
+ while (!skb_queue_empty(&backlog)) {
+ struct sk_buff * skb = backlog.next;
+
+ /*
+ * We have a packet. Therefore the queue has shrunk
+ */
+ cli();
+ __skb_unlink(skb, &backlog);
+ backlog_size--;
+ sti();
+
+
+#ifdef CONFIG_BRIDGE
+
+ /*
+ * If we are bridging then pass the frame up to the
+ * bridging code. If it is bridged then move on
+ */
+
+ if (br_stats.flags & BR_UP)
+ {
+ /*
+ * We pass the bridge a complete frame. This means
+ * recovering the MAC header first.
+ */
+
+ int offset=skb->data-skb->mac.raw;
+ cli();
+ skb_push(skb,offset); /* Put header back on for bridge */
+ if(br_receive_frame(skb))
+ {
+ sti();
+ continue;
+ }
+ /*
+ * Pull the MAC header off for the copy going to
+ * the upper layers.
+ */
+ skb_pull(skb,offset);
+ sti();
+ }
+#endif
+
+ /*
+ * Bump the pointer to the next structure.
+ *
+ * On entry to the protocol layer. skb->data and
+ * skb->h.raw point to the MAC and encapsulated data
+ */
+
+ skb->h.raw = skb->data;
+
+ /*
+ * Fetch the packet protocol ID.
+ */
+
+ type = skb->protocol;
+
+ /*
+ * We got a packet ID. Now loop over the "known protocols"
+ * list. There are two lists. The ptype_all list of taps (normally empty)
+ * and the main protocol list which is hashed perfectly for normal protocols.
+ */
+
+ pt_prev = NULL;
+ for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
+ {
+ if(!ptype->dev || ptype->dev == skb->dev) {
+ if(pt_prev) {
+ struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
+ if(skb2)
+ pt_prev->func(skb2,skb->dev, pt_prev);
+ }
+ pt_prev=ptype;
+ }
+ }
+
+ for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
+ {
+ if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
+ {
+ /*
+ * We already have a match queued. Deliver
+ * to it and then remember the new match
+ */
+ if(pt_prev)
+ {
+ struct sk_buff *skb2;
+
+ skb2=skb_clone(skb, GFP_ATOMIC);
+
+ /*
+ * Kick the protocol handler. This should be fast
+ * and efficient code.
+ */
+
+ if(skb2)
+ pt_prev->func(skb2, skb->dev, pt_prev);
+ }
+ /* Remember the current last to do */
+ pt_prev=ptype;
+ }
+ } /* End of protocol list loop */
+
+ /*
+ * Is there a last item to send to ?
+ */
+
+ if(pt_prev)
+ pt_prev->func(skb, skb->dev, pt_prev);
+ /*
+ * Has an unknown packet has been received ?
+ */
+
+ else
+ kfree_skb(skb, FREE_WRITE);
+ /*
+ * Again, see if we can transmit anything now.
+ * [Ought to take this out judging by tests it slows
+ * us down not speeds us up]
+ */
+#ifdef XMIT_EVERY
+ dev_transmit();
+#endif
+ } /* End of queue loop */
+
+ /*
+ * We have emptied the queue
+ */
+
+ /*
+ * One last output flush.
+ */
+
+#ifdef XMIT_AFTER
+ dev_transmit();
+#endif
+}
+
+
+/*
+ * This routine is called when an device driver (i.e. an
+ * interface) is ready to transmit a packet.
+ */
+
+void dev_tint(struct device *dev)
+{
+ int i;
+ unsigned long flags;
+ struct sk_buff_head * head;
+
+ /*
+ * aliases do not transmit (for now :) )
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev)) return;
+#endif
+ head = dev->buffs;
+ save_flags(flags);
+ cli();
+
+ /*
+ * Work the queues in priority order
+ */
+ for(i = 0;i < DEV_NUMBUFFS; i++,head++)
+ {
+
+ while (!skb_queue_empty(head)) {
+ struct sk_buff *skb;
+
+ skb = head->next;
+ __skb_unlink(skb, head);
+ /*
+ * Stop anyone freeing the buffer while we retransmit it
+ */
+ skb_device_lock(skb);
+ restore_flags(flags);
+ /*
+ * Feed them to the output stage and if it fails
+ * indicate they re-queue at the front.
+ */
+ do_dev_queue_xmit(skb,dev,-i - 1);
+ /*
+ * If we can take no more then stop here.
+ */
+ if (dev->tbusy)
+ return;
+ cli();
+ }
+ }
+ restore_flags(flags);
+}
+
+
+/*
+ * Perform a SIOCGIFCONF call. This structure will change
+ * size shortly, and there is nothing I can do about it.
+ * Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(char *arg)
+{
+ struct ifconf ifc;
+ struct ifreq ifr;
+ struct device *dev;
+ char *pos;
+ int len;
+ int err;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
+ if(err)
+ return err;
+ memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
+ len = ifc.ifc_len;
+ pos = ifc.ifc_buf;
+
+ /*
+ * We now walk the device list filling each active device
+ * into the array.
+ */
+
+ err=verify_area(VERIFY_WRITE,pos,len);
+ if(err)
+ return err;
+
+ /*
+ * Loop over the interfaces, and write an info block for each.
+ */
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
+ continue;
+ /*
+ * Have we run out of space here ?
+ */
+
+ if (len < sizeof(struct ifreq))
+ break;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+ strcpy(ifr.ifr_name, dev->name);
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+
+
+ /*
+ * Write this block to the caller's space.
+ */
+
+ memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
+ pos += sizeof(struct ifreq);
+ len -= sizeof(struct ifreq);
+ }
+
+ /*
+ * All done. Write the updated control block back to the caller.
+ */
+
+ ifc.ifc_len = (pos - ifc.ifc_buf);
+ ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
+ memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
+
+ /*
+ * Report how much was filled in
+ */
+
+ return(pos - arg);
+}
+
+
+/*
+ * This is invoked by the /proc filesystem handler to display a device
+ * in detail.
+ */
+
+#ifdef CONFIG_PROC_FS
+static int sprintf_stats(char *buffer, struct device *dev)
+{
+ struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
+ int size;
+
+ if (stats)
+ size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
+ dev->name,
+ stats->rx_packets, stats->rx_errors,
+ stats->rx_dropped + stats->rx_missed_errors,
+ stats->rx_fifo_errors,
+ stats->rx_length_errors + stats->rx_over_errors
+ + stats->rx_crc_errors + stats->rx_frame_errors,
+ stats->tx_packets, stats->tx_errors, stats->tx_dropped,
+ stats->tx_fifo_errors, stats->collisions,
+ stats->tx_carrier_errors + stats->tx_aborted_errors
+ + stats->tx_window_errors + stats->tx_heartbeat_errors);
+ else
+ size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
+
+ return size;
+}
+
+/*
+ * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
+ * to create /proc/net/dev
+ */
+
+int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
+{
+ int len=0;
+ off_t begin=0;
+ off_t pos=0;
+ int size;
+
+ struct device *dev;
+
+
+ size = sprintf(buffer, "Inter-| Receive | Transmit\n"
+ " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len=length; /* Ending slop */
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+
+
+#ifdef CONFIG_NET_RADIO
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Print one entry of /proc/net/wireless
+ * This is a clone of /proc/net/dev (just above)
+ */
+static int
+sprintf_wireless_stats(char * buffer,
+ struct device * dev)
+{
+ /* Get stats from the driver */
+ struct iw_statistics *stats = (dev->get_wireless_stats ?
+ dev->get_wireless_stats(dev) :
+ (struct iw_statistics *) NULL);
+ int size;
+
+ if(stats != (struct iw_statistics *) NULL)
+ size = sprintf(buffer,
+ "%6s: %02x %3d%c %3d%c %3d%c %5d %5d %5d\n",
+ dev->name,
+ stats->status,
+ stats->qual.qual,
+ stats->qual.updated & 1 ? '.' : ' ',
+ stats->qual.level,
+ stats->qual.updated & 2 ? '.' : ' ',
+ stats->qual.noise,
+ stats->qual.updated & 3 ? '.' : ' ',
+ stats->discard.nwid,
+ stats->discard.code,
+ stats->discard.misc);
+ else
+ size = 0;
+
+ return size;
+}
+
+/*
+ * Print info for /proc/net/wireless (print all entries)
+ * This is a clone of /proc/net/dev (just above)
+ */
+int
+dev_get_wireless_info(char * buffer,
+ char ** start,
+ off_t offset,
+ int length,
+ int dummy)
+{
+ int len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+ int size;
+
+ struct device * dev;
+
+ size = sprintf(buffer,
+ "Inter-|sta| Quality | Discarded packets\n"
+ " face |tus|link level noise| nwid crypt misc\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for(dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_wireless_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos < offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos > offset + length)
+ break;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin); /* Start slop */
+ if(len > length)
+ len = length; /* Ending slop */
+
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NET_RADIO */
+
+
+/*
+ * This checks bitmasks for the ioctl calls for devices.
+ */
+
+static inline int bad_mask(unsigned long mask, unsigned long addr)
+{
+ if (addr & (mask = ~mask))
+ return 1;
+ mask = ntohl(mask);
+ if (mask & (mask+1))
+ return 1;
+ return 0;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls.
+ *
+ * The socket layer has seen an ioctl the address family thinks is
+ * for the device. At this point we get invoked to make a decision
+ */
+
+static int dev_ifsioc(void *arg, unsigned int getset)
+{
+ struct ifreq ifr;
+ struct device *dev;
+ int ret;
+
+ /*
+ * Fetch the caller's info block into kernel space
+ */
+
+ int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
+ if(err)
+ return err;
+
+ memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
+
+ /*
+ * See which interface the caller is talking about.
+ */
+
+ /*
+ *
+ * net_alias_dev_get(): dev_get() with added alias naming magic.
+ * only allow alias creation/deletion if (getset==SIOCSIFADDR)
+ *
+ */
+
+#ifdef CONFIG_KERNELD
+ dev_load(ifr.ifr_name);
+#endif
+
+#ifdef CONFIG_NET_ALIAS
+ if ((dev = net_alias_dev_get(ifr.ifr_name, getset == SIOCSIFADDR, &err, NULL, NULL)) == NULL)
+ return(err);
+#else
+ if ((dev = dev_get(ifr.ifr_name)) == NULL)
+ return(-ENODEV);
+#endif
+ switch(getset)
+ {
+ case SIOCGIFFLAGS: /* Get interface flags */
+ ifr.ifr_flags = (dev->flags & ~IFF_SOFTHEADERS);
+ goto rarok;
+
+ case SIOCSIFFLAGS: /* Set interface flags */
+ {
+ int old_flags = dev->flags;
+
+ if(securelevel>0)
+ ifr.ifr_flags&=~IFF_PROMISC;
+ /*
+ * We are not allowed to potentially close/unload
+ * a device until we get this lock.
+ */
+
+ dev_lock_wait();
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (ifr.ifr_flags & (
+ IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
+ IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
+ IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
+ | IFF_MULTICAST)) | (dev->flags & (IFF_SOFTHEADERS|IFF_UP));
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+
+ /*
+ * Have we downed the interface. We handle IFF_UP ourselves
+ * according to user attempts to set it, rather than blindly
+ * setting it.
+ */
+
+ if ((old_flags^ifr.ifr_flags)&IFF_UP) /* Bit is different ? */
+ {
+ if(old_flags&IFF_UP) /* Gone down */
+ ret=dev_close(dev);
+ else /* Come up */
+ {
+ ret=dev_open(dev);
+ if(ret<0)
+ dev->flags&=~IFF_UP; /* Open failed */
+ }
+ }
+ else
+ ret=0;
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+ }
+ break;
+
+ case SIOCGIFADDR: /* Get interface address (and family) */
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+ }
+ else
+ {
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_port = 0;
+ }
+ goto rarok;
+
+ case SIOCSIFADDR: /* Set interface address (and family) */
+
+ /*
+ * BSDism. SIOCSIFADDR family=AF_UNSPEC sets the
+ * physical address. We can cope with this now.
+ */
+
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(securelevel>0)
+ return -EPERM;
+ ret=dev->set_mac_address(dev,&ifr.ifr_addr);
+ }
+ else
+ {
+ u32 new_pa_addr = (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr;
+ u16 new_family = ifr.ifr_addr.sa_family;
+
+ if (new_family == dev->family &&
+ new_pa_addr == dev->pa_addr) {
+ ret =0;
+ break;
+ }
+ if (dev->flags & IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+
+ /*
+ * if dev is an alias, must rehash to update
+ * address change
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ net_alias_dev_rehash(dev ,&ifr.ifr_addr);
+#endif
+ dev->pa_addr = new_pa_addr;
+ dev->family = new_family;
+
+#ifdef CONFIG_INET
+ /* This is naughty. When net-032e comes out It wants moving into the net032
+ code not the kernel. Till then it can sit here (SIGH) */
+ if (!dev->pa_mask)
+ dev->pa_mask = ip_get_mask(dev->pa_addr);
+#endif
+ if (!dev->pa_brdaddr)
+ dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
+ if (dev->flags & IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFBRDADDR: /* Get the broadcast address */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFBRDADDR: /* Set the broadcast address */
+ dev->pa_brdaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
+ dev->pa_dstaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFNETMASK: /* Get the netmask for the interface */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFNETMASK: /* Set the netmask for the interface */
+ {
+ unsigned long mask = (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr;
+ ret = -EINVAL;
+ /*
+ * The mask we set must be legal.
+ */
+ if (bad_mask(mask,0))
+ break;
+ dev->pa_mask = mask;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
+
+ ifr.ifr_metric = dev->metric;
+ goto rarok;
+
+ case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
+ dev->metric = ifr.ifr_metric;
+ ret=0;
+ break;
+
+ case SIOCGIFMTU: /* Get the MTU of a device */
+ ifr.ifr_mtu = dev->mtu;
+ goto rarok;
+
+ case SIOCSIFMTU: /* Set the MTU of a device */
+
+ if (dev->change_mtu)
+ ret = dev->change_mtu(dev, ifr.ifr_mtu);
+ else
+ {
+ /*
+ * MTU must be positive.
+ */
+
+ if(ifr.ifr_mtu<68)
+ return -EINVAL;
+
+ dev->mtu = ifr.ifr_mtu;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
+ do not support it */
+ ret = -EINVAL;
+ break;
+
+ case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
+ ret = -EINVAL;
+ break;
+
+ case SIOCGIFHWADDR:
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+
+ case SIOCSIFHWADDR:
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(securelevel > 0)
+ return -EPERM;
+ if(ifr.ifr_hwaddr.sa_family!=dev->type)
+ return -EINVAL;
+ ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
+ break;
+
+ case SIOCGIFMAP:
+ ifr.ifr_map.mem_start=dev->mem_start;
+ ifr.ifr_map.mem_end=dev->mem_end;
+ ifr.ifr_map.base_addr=dev->base_addr;
+ ifr.ifr_map.irq=dev->irq;
+ ifr.ifr_map.dma=dev->dma;
+ ifr.ifr_map.port=dev->if_port;
+ goto rarok;
+
+ case SIOCSIFMAP:
+ if(dev->set_config==NULL)
+ return -EOPNOTSUPP;
+ return dev->set_config(dev,&ifr.ifr_map);
+
+ case SIOCADDMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
+ return 0;
+
+ case SIOCDELMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
+ return 0;
+ /*
+ * Unknown or private ioctl
+ */
+
+ default:
+ if((getset >= SIOCDEVPRIVATE) &&
+ (getset <= (SIOCDEVPRIVATE + 15))) {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ break;
+ }
+
+#ifdef CONFIG_NET_RADIO
+ if((getset >= SIOCIWFIRST) &&
+ (getset <= SIOCIWLAST))
+ {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ /* Perform the ioctl */
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ /* If return args... */
+ if(IW_IS_GET(getset))
+ memcpy_tofs(arg, &ifr,
+ sizeof(struct ifreq));
+ break;
+ }
+#endif /* CONFIG_NET_RADIO */
+
+ ret = -EINVAL;
+ }
+ return(ret);
+/*
+ * The load of calls that return an ifreq and ok (saves memory).
+ */
+rarok:
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ return 0;
+}
+
+
+/*
+ * This function handles all "interface"-type I/O control requests. The actual
+ * 'doing' part of this is dev_ifsioc above.
+ */
+
+int dev_ioctl(unsigned int cmd, void *arg)
+{
+ switch(cmd)
+ {
+ case SIOCGIFCONF:
+ (void) dev_ifconf((char *) arg);
+ return 0;
+
+ /*
+ * Ioctl calls that can be done by all.
+ */
+
+ case SIOCGIFFLAGS:
+ case SIOCGIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCGIFSLAVE:
+ case SIOCGIFMAP:
+ return dev_ifsioc(arg, cmd);
+
+ /*
+ * Ioctl calls requiring the power of a superuser
+ */
+
+ case SIOCSIFFLAGS:
+ case SIOCSIFADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
+ case SIOCSIFMETRIC:
+ case SIOCSIFMTU:
+ case SIOCSIFMEM:
+ case SIOCSIFHWADDR:
+ case SIOCSIFMAP:
+ case SIOCSIFSLAVE:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (!suser())
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+
+ case SIOCSIFLINK:
+ return -EINVAL;
+
+ /*
+ * Unknown or private ioctl.
+ */
+
+ default:
+ if((cmd >= SIOCDEVPRIVATE) &&
+ (cmd <= (SIOCDEVPRIVATE + 15))) {
+ return dev_ifsioc(arg, cmd);
+ }
+#ifdef CONFIG_NET_RADIO
+ if((cmd >= SIOCIWFIRST) &&
+ (cmd <= SIOCIWLAST))
+ {
+ if((IW_IS_SET(cmd)) && (!suser()))
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+ }
+#endif /* CONFIG_NET_RADIO */
+ return -EINVAL;
+ }
+}
+
+
+/*
+ * Initialize the DEV module. At boot time this walks the device list and
+ * unhooks any devices that fail to initialise (normally hardware not
+ * present) and leaves us with a valid list of present and active devices.
+ *
+ */
+extern int lance_init(void);
+extern int pi_init(void);
+extern int pt_init(void);
+extern int bpq_init(void);
+extern void sdla_setup(void);
+extern int dlci_setup(void);
+extern int sm_init(void);
+extern int baycom_init(void);
+
+int net_dev_init(void)
+{
+ struct device *dev, **dp;
+
+ /*
+ * Initialise the packet receive queue.
+ */
+
+ skb_queue_head_init(&backlog);
+
+ /*
+ * The bridge has to be up before the devices
+ */
+
+#ifdef CONFIG_BRIDGE
+ br_init();
+#endif
+
+ /*
+ * This is Very Ugly(tm).
+ *
+ * Some devices want to be initialized early..
+ */
+#if defined(CONFIG_PI)
+ pi_init();
+#endif
+#if defined(CONFIG_PT)
+ pt_init();
+#endif
+#if defined(CONFIG_BPQETHER)
+ bpq_init();
+#endif
+#if defined(CONFIG_DLCI)
+ dlci_setup();
+#endif
+#if defined(CONFIG_SDLA)
+ sdla_setup();
+#endif
+#if defined(CONFIG_BAYCOM)
+ baycom_init();
+#endif
+#if defined(CONFIG_SOUNDMODEM)
+ sm_init();
+#endif
+ /*
+ * SLHC if present needs attaching so other people see it
+ * even if not opened.
+ */
+#if (defined(CONFIG_SLIP) && defined(CONFIG_SLIP_COMPRESSED)) \
+ || defined(CONFIG_PPP) \
+ || (defined(CONFIG_ISDN) && defined(CONFIG_ISDN_PPP))
+ slhc_install();
+#endif
+
+ /*
+ * Add the devices.
+ * If the call to dev->init fails, the dev is removed
+ * from the chain disconnecting the device until the
+ * next reboot.
+ */
+
+ dp = &dev_base;
+ while ((dev = *dp) != NULL)
+ {
+ int i;
+ for (i = 0; i < DEV_NUMBUFFS; i++) {
+ skb_queue_head_init(dev->buffs + i);
+ }
+
+ if (dev->init && dev->init(dev))
+ {
+ /*
+ * It failed to come up. Unhook it.
+ */
+ *dp = dev->next;
+ }
+ else
+ {
+ dp = &dev->next;
+ }
+ }
+
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_DEV, 3, "dev",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_info
+ });
+#endif
+
+#ifdef CONFIG_NET_RADIO
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_WIRELESS, 8, "wireless",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_wireless_info
+ });
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NET_RADIO */
+
+ /*
+ * Initialise net_alias engine
+ *
+ * - register net_alias device notifier
+ * - register proc entries: /proc/net/alias_types
+ * /proc/net/aliases
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ net_alias_init();
+#endif
+
+ init_bh(NET_BH, net_bh);
+ return 0;
+}
diff --git a/tests/.gitignore b/tests/.gitignore
new file mode 100644
index 0000000..6e86af1
--- /dev/null
+++ b/tests/.gitignore
@@ -0,0 +1 @@
+/test-mbchk
diff --git a/tests/Makefrag.am b/tests/Makefrag.am
new file mode 100644
index 0000000..faabdf4
--- /dev/null
+++ b/tests/Makefrag.am
@@ -0,0 +1,34 @@
+# Makefile fragment for the test suite.
+
+# Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# Tests.
+#
+
+if !PLATFORM_xen
+
+include tests/user-qemu.mk
+
+TESTS += \
+ tests/test-multiboot \
+ $(USER_TESTS)
+
+clean-local: $(USER_TESTS_CLEAN)
+ rm -fr tests/include-mach
+
+endif # !PLATFORM_xen
diff --git a/tests/README b/tests/README
new file mode 100644
index 0000000..3dacc18
--- /dev/null
+++ b/tests/README
@@ -0,0 +1,37 @@
+
+There are some basic tests that can be run qith qemu. You can run all the tests with
+
+ $ make check
+
+or selectively with:
+
+ $ make run-hello
+
+Also, you can debug the existing tests, or a new one, by starting on one shell
+
+ $ make debug-hello
+
+and on another shell you can attach with gdb, load the symbols of the
+bootstrap module and break on its _start():
+
+ $ gdb gnumach
+ ...
+ (gdb) target remote :1234
+ ...
+ (gdb) b setup_main
+ Breakpoint 11 at 0xffffffff81019d60: file ../kern/startup.c, line 98.
+ (gdb) c
+ Continuing.
+
+ Breakpoint 11, setup_main () at ../kern/startup.c:98
+ 98 cninit();
+ (gdb) add-symbol-file ../gnumach/build-64/module-task
+ Reading symbols from ../gnumach/build-64/module-task...
+ (gdb) b _start
+ Breakpoint 12 at 0x40324a: _start. (2 locations)
+ (gdb) c
+ Continuing.
+
+ Breakpoint 12, _start () at ../tests/testlib.c:96
+ 96 {
+ (gdb)
diff --git a/tests/configfrag.ac b/tests/configfrag.ac
new file mode 100644
index 0000000..de87cba
--- /dev/null
+++ b/tests/configfrag.ac
@@ -0,0 +1,43 @@
+dnl Configure fragment for the test suite.
+
+dnl Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+
+dnl This program is free software; you can redistribute it and/or modify it
+dnl under the terms of the GNU General Public License as published by the
+dnl Free Software Foundation; either version 2, or (at your option) any later
+dnl version.
+dnl
+dnl This program is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received a copy of the GNU General Public License along
+dnl with this program; if not, write to the Free Software Foundation, Inc.,
+dnl 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# Tests.
+#
+
+AC_CONFIG_FILES([tests/test-multiboot], [chmod +x tests/test-multiboot])
+
+
+[if test x"$enable_user32" = xyes ; then
+ ac_miguser=$user32_cpu-gnu-mig
+else
+ ac_miguser=$host_cpu-gnu-mig
+fi]
+
+AC_CHECK_PROG([USER_MIG], [$ac_miguser], [$ac_miguser])
+AC_ARG_VAR([USER_MIG], [Path to the mig tool for user-space tests])
+AC_CHECK_PROG([USER_CC], [$CC], [$CC], [none])
+AC_ARG_VAR([USER_CC], [C compiler command for user-space tests])
+AC_CHECK_PROG([USER_CPP], [$CPP], [$CPP], [none])
+AC_ARG_VAR([USER_CPP], [C preprocessor for user-space tests])
+AC_ARG_VAR([USER_CFLAGS], [C compiler flags for user-space tests])
+AC_ARG_VAR([USER_CPPFLAGS], [C preprocessor flags for user-space tests])
+
+dnl Local Variables:
+dnl mode: autoconf
+dnl End:
diff --git a/tests/grub.cfg.single.template b/tests/grub.cfg.single.template
new file mode 100644
index 0000000..4432be3
--- /dev/null
+++ b/tests/grub.cfg.single.template
@@ -0,0 +1,4 @@
+echo TEST_START_MARKER
+multiboot /boot/gnumach GNUMACHARGS
+module /boot/BOOTMODULE BOOTMODULE '${host-port}' '${device-port}' '$(task-create)' '$(task-resume)'
+boot
diff --git a/tests/include/device/cons.h b/tests/include/device/cons.h
new file mode 100644
index 0000000..f4d8fe1
--- /dev/null
+++ b/tests/include/device/cons.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef CONS_H
+#define CONS_H
+
+#include <mach/machine/vm_types.h>
+
+void cnputc(char c, vm_offset_t cookie);
+static inline int cngetc() { return 0; }
+
+#endif /* CONS_H */
diff --git a/tests/include/kern/printf.h b/tests/include/kern/printf.h
new file mode 120000
index 0000000..c61f3e0
--- /dev/null
+++ b/tests/include/kern/printf.h
@@ -0,0 +1 @@
+../../../kern/printf.h \ No newline at end of file
diff --git a/tests/include/mach/mig_support.h b/tests/include/mach/mig_support.h
new file mode 100644
index 0000000..bf67008
--- /dev/null
+++ b/tests/include/mach/mig_support.h
@@ -0,0 +1,71 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Abstract:
+ * MIG helpers for gnumach tests, mainly copied from glibc
+ *
+ */
+
+#ifndef _MACH_MIG_SUPPORT_H_
+#define _MACH_MIG_SUPPORT_H_
+
+#include <string.h>
+
+#include <mach/message.h>
+#include <mach/mach_types.h>
+
+#include <syscalls.h>
+
+static inline void mig_init(void *_first)
+{}
+
+static inline void mig_allocate(vm_address_t *addr, vm_size_t size)
+{
+ if (syscall_vm_allocate(mach_task_self(), addr, size, 1) != KERN_SUCCESS)
+ *addr = 0;
+}
+static inline void mig_deallocate(vm_address_t addr, vm_size_t size)
+{
+ syscall_vm_deallocate (mach_task_self(), addr, size);
+}
+static inline void mig_dealloc_reply_port(mach_port_t port)
+{}
+static inline void mig_put_reply_port(mach_port_t port)
+{}
+static inline mach_port_t mig_get_reply_port(void)
+{
+ return mach_reply_port();
+}
+static inline void mig_reply_setup(const mach_msg_header_t *_request,
+ mach_msg_header_t *reply)
+{}
+
+static inline vm_size_t mig_strncpy (char *dst, const char *src, vm_size_t len)
+{
+ return dst - strncpy(dst, src, len);
+}
+
+#endif /* not defined(_MACH_MIG_SUPPORT_H_) */
diff --git a/tests/include/syscalls.h b/tests/include/syscalls.h
new file mode 100644
index 0000000..f958154
--- /dev/null
+++ b/tests/include/syscalls.h
@@ -0,0 +1,83 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Abstract:
+ * Syscall functions
+ *
+ */
+
+#ifndef _SYSCALLS_
+#define _SYSCALLS_
+
+#include <device/device_types.h>
+#include <mach/message.h>
+
+// TODO: there is probably a better way to define these
+
+#define MACH_SYSCALL0(syscallid, retval, name) \
+ retval name(void) __attribute__((naked));
+
+#define MACH_SYSCALL1(syscallid, retval, name, arg1) \
+ retval name(arg1 a1) __attribute__((naked));
+
+#define MACH_SYSCALL2(syscallid, retval, name, arg1, arg2) \
+ retval name(arg1 a1, arg2 a2) __attribute__((naked));
+
+#define MACH_SYSCALL3(syscallid, retval, name, arg1, arg2, arg3) \
+ retval name(arg1 a1, arg2 a2, arg3 a3) __attribute__((naked));
+
+#define MACH_SYSCALL4(syscallid, retval, name, arg1, arg2, arg3, arg4) \
+ retval name(arg1 a1, arg2 a2, arg3 a3, arg4 a4) __attribute__((naked));
+
+#define MACH_SYSCALL6(syscallid, retval, name, arg1, arg2, arg3, arg4, arg5, arg6) \
+ retval name(arg1 a1, arg2 a2, arg3 a3, arg4 a4, arg5 a5, arg6 a6) __attribute__((naked));
+
+#define MACH_SYSCALL7(syscallid, retval, name, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
+ retval name(arg1 a1, arg2 a2, arg3 a3, arg4 a4, arg5 a5, arg6 a6, arg7 a7) __attribute__((naked));
+
+#define mach_msg mach_msg_trap
+
+MACH_SYSCALL0(26, mach_port_name_t, mach_reply_port)
+MACH_SYSCALL0(27, mach_port_name_t, mach_thread_self)
+MACH_SYSCALL0(28, mach_port_name_t, mach_task_self)
+MACH_SYSCALL0(29, mach_port_name_t, mach_host_self)
+MACH_SYSCALL1(30, void, mach_print, const char*)
+MACH_SYSCALL0(31, kern_return_t, invalid_syscall)
+MACH_SYSCALL4(65, kern_return_t, syscall_vm_allocate, mach_port_t, vm_offset_t*, vm_size_t, boolean_t)
+MACH_SYSCALL3(66, kern_return_t, syscall_vm_deallocate, mach_port_t, vm_offset_t, vm_size_t)
+MACH_SYSCALL3(72, kern_return_t, syscall_mach_port_allocate, mach_port_t, mach_port_right_t, mach_port_t*)
+MACH_SYSCALL2(73, kern_return_t, syscall_mach_port_deallocate, mach_port_t, mach_port_t)
+
+/*
+ todo: swtch_pri swtch ...
+ these seem obsolete: evc_wait
+ evc_wait_clear syscall_device_writev_request
+ syscall_device_write_request ...
+ */
+MACH_SYSCALL6(40, io_return_t, syscall_device_write_request, mach_port_name_t,
+ mach_port_name_t, dev_mode_t, recnum_t, vm_offset_t, vm_size_t)
+
+#endif /* SYSCALLS */
diff --git a/tests/include/testlib.h b/tests/include/testlib.h
new file mode 100644
index 0000000..a3f3a6a
--- /dev/null
+++ b/tests/include/testlib.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef TESTLIB_H
+#define TESTLIB_H
+
+// in freestanding we can only use a few standard headers
+// float.h iso646.h limits.h stdarg.h stdbool.h stddef.h stdint.h
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdbool.h>
+
+#include <string.h> // we shouldn't include this from gcc, but it seems to be ok
+
+#include <mach/mach_types.h>
+#include <kern/printf.h>
+#include <util/atoi.h>
+#include <syscalls.h>
+
+#define ASSERT(cond, msg) do { \
+ if (!(cond)) \
+ { \
+ printf("%s: " #cond " failed: %s\n", \
+ TEST_FAILURE_MARKER, (msg)); \
+ halt(); \
+ } \
+ } while (0)
+
+#define ASSERT_RET(ret, msg) do { \
+ if ((ret) != KERN_SUCCESS) \
+ { \
+ printf("%s %s (0x%x): %s\n", \
+ TEST_FAILURE_MARKER, e2s((ret)), (ret), (msg)); \
+ halt(); \
+ } \
+ } while (0)
+
+#define FAILURE(msg) do { \
+ printf("%s: %s\n", TEST_FAILURE_MARKER, (msg)); \
+ halt(); \
+ } while (0)
+
+
+extern const char* TEST_SUCCESS_MARKER;
+extern const char* TEST_FAILURE_MARKER;
+
+const char* e2s(int err);
+const char* e2s_gnumach(int err);
+void halt();
+int msleep(uint32_t timeout);
+thread_t test_thread_start(task_t task, void(*routine)(void*), void* arg);
+
+mach_port_t host_priv(void);
+mach_port_t device_priv(void);
+
+int main(int argc, char *argv[], int envc, char *envp[]);
+
+#endif /* TESTLIB_H */
diff --git a/tests/include/util/atoi.h b/tests/include/util/atoi.h
new file mode 120000
index 0000000..c32c258
--- /dev/null
+++ b/tests/include/util/atoi.h
@@ -0,0 +1 @@
+../../../util/atoi.h \ No newline at end of file
diff --git a/tests/run-qemu.sh.template b/tests/run-qemu.sh.template
new file mode 100644
index 0000000..aba8d68
--- /dev/null
+++ b/tests/run-qemu.sh.template
@@ -0,0 +1,38 @@
+#!/bin/sh
+# Copyright (C) 2024 Free Software Foundation
+#
+# This program is free software ; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation ; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY ; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with the program ; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+set -e
+
+cmd="QEMU_BIN QEMU_OPTS -cdrom tests/test-TESTNAME.iso"
+log="tests/test-TESTNAME.raw"
+
+echo "temp log $log"
+if which QEMU_BIN >/dev/null ; then
+ if ! timeout -v --foreground --kill-after=3 15s $cmd \
+ | tee $log | sed -n "/TEST_START_MARKER/"',$p' ; then
+ exit 10 # timeout
+ fi
+ if grep -qi 'TEST_FAILURE_MARKER' $log; then
+ exit 99 # error marker found, test explicitely failed
+ fi
+ if ! grep -q 'TEST_SUCCESS_MARKER' $log; then
+ exit 12 # missing reboot marker, maybe the kernel crashed
+ fi
+else
+ echo "skipping, QEMU_BIN not found"
+ exit 77
+fi
diff --git a/tests/start.S b/tests/start.S
new file mode 100644
index 0000000..b795bfb
--- /dev/null
+++ b/tests/start.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+ .global _start
+_start:
+#ifdef __i386__
+ pushl %esp
+ call c_start
+#endif /* __i386__ */
+#ifdef __x86_64__
+ movq %rsp,%rdi
+ callq c_start
+#endif /* __x86_64__ */
diff --git a/tests/syscalls.S b/tests/syscalls.S
new file mode 100644
index 0000000..df9c9bc
--- /dev/null
+++ b/tests/syscalls.S
@@ -0,0 +1,4 @@
+
+ #include <mach/syscall_sw.h>
+
+ kernel_trap(invalid_syscall,-31,0)
diff --git a/tests/test-gsync.c b/tests/test-gsync.c
new file mode 100644
index 0000000..a516065
--- /dev/null
+++ b/tests/test-gsync.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <syscalls.h>
+#include <testlib.h>
+
+#include <mach/machine/vm_param.h>
+#include <mach/std_types.h>
+#include <mach/mach_types.h>
+#include <mach/vm_wire.h>
+
+#include <mach.user.h>
+#include <gnumach.user.h>
+
+/* Gsync flags. */
+#ifndef GSYNC_SHARED
+# define GSYNC_SHARED 0x01
+# define GSYNC_QUAD 0x02
+# define GSYNC_TIMED 0x04
+# define GSYNC_BROADCAST 0x08
+# define GSYNC_MUTATE 0x10
+#endif
+
+static uint32_t single_shared;
+static struct {
+ uint32_t val1;
+ uint32_t val2;
+} single_shared_quad;
+
+static void test_single()
+{
+ int err;
+ single_shared = 0;
+ err = gsync_wait(mach_task_self(), (vm_offset_t)&single_shared, 0, 0, 100, GSYNC_TIMED);
+ ASSERT(err == KERN_TIMEDOUT, "gsync_wait did not timeout");
+
+ single_shared = 1;
+ err = gsync_wait(mach_task_self(), (vm_offset_t)&single_shared, 0, 0, 100, GSYNC_TIMED);
+ ASSERT(err == KERN_INVALID_ARGUMENT, "gsync_wait on wrong value");
+ err = gsync_wait(mach_task_self(), (vm_offset_t)&single_shared, 1, 0, 100, GSYNC_TIMED);
+ ASSERT(err == KERN_TIMEDOUT, "gsync_wait again on correct value did not timeout");
+
+ single_shared_quad.val1 = 1;
+ single_shared_quad.val2 = 2;
+ err = gsync_wait(mach_task_self(), (vm_offset_t)&single_shared_quad, 99, 88,
+ 100, GSYNC_TIMED | GSYNC_QUAD);
+ ASSERT(err == KERN_INVALID_ARGUMENT, "gsync_wait on wrong quad value");
+ err = gsync_wait(mach_task_self(), (vm_offset_t)&single_shared_quad, 1, 2,
+ 100, GSYNC_TIMED | GSYNC_QUAD);
+ ASSERT(err == KERN_TIMEDOUT, "gsync_wait again on correct value did not timeout");
+
+ err = gsync_wake(mach_task_self(), (vm_offset_t)&single_shared, 0, 0);
+ ASSERT_RET(err, "gsync_wake with nobody waiting");
+
+ err = gsync_wait(mach_task_self(), (vm_offset_t)&single_shared, 1, 0, 100, GSYNC_TIMED);
+ ASSERT(err == KERN_TIMEDOUT, "gsync_wait not affected by previous gsync_wake");
+
+ err = gsync_wake(mach_task_self(), (vm_offset_t)&single_shared, 0, GSYNC_BROADCAST);
+ ASSERT_RET(err, "gsync_wake broadcast with nobody waiting");
+
+ err = gsync_wait(mach_task_self(), (vm_offset_t)&single_shared, 1, 0, 100, GSYNC_TIMED);
+ ASSERT(err == KERN_TIMEDOUT, "gsync_wait not affected by previous gsync_wake");
+
+ err = gsync_wake(mach_task_self(), (vm_offset_t)&single_shared, 2, GSYNC_MUTATE);
+ ASSERT_RET(err, "gsync_wake nobody + mutate");
+ ASSERT(single_shared == 2, "gsync_wake single_shared did not mutate");
+
+ err = gsync_wake(mach_task_self(), (vm_offset_t)&single_shared, 0, 0);
+ ASSERT_RET(err, "gsync_wake nobody without mutate");
+ err = gsync_wake(mach_task_self(), (vm_offset_t)&single_shared, 0, 0);
+ ASSERT_RET(err, "gsync_wake 3a");
+}
+
+static void single_thread_setter(void *arg)
+{
+ int err;
+ int val = (long)arg;
+
+ /* It should be enough to sleep a bit for our creator to call
+ gsync_wait(). To verify that the test is performed with the
+ correct sequence, we also change the value so if the wait is
+ called after our wake it will fail with KERN_INVALID_ARGUMENT */
+ msleep(100);
+
+ err = gsync_wake(mach_task_self(), (vm_offset_t)&single_shared, val, GSYNC_MUTATE);
+ ASSERT_RET(err, "gsync_wake from thread + mutate");
+
+ thread_terminate(mach_thread_self());
+ FAILURE("thread_terminate");
+}
+
+static void test_single_from_thread()
+{
+ int err;
+ single_shared = 10;
+ test_thread_start(mach_task_self(), single_thread_setter, (void*)11);
+ err = gsync_wait(mach_task_self(), (vm_offset_t)&single_shared, 10, 0, 0, 0);
+ ASSERT_RET(err, "gsync_wait without timeout for wake from another thread");
+ ASSERT(single_shared == 11, "wake didn't mutate");
+}
+
+int main(int argc, char *argv[], int envc, char *envp[])
+{
+ test_single_from_thread();
+ test_single();
+ return 0;
+}
diff --git a/tests/test-hello.c b/tests/test-hello.c
new file mode 100644
index 0000000..0d739c6
--- /dev/null
+++ b/tests/test-hello.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <testlib.h>
+
+int main(int argc, char *argv[], int envc, char *envp[])
+{
+ int ret = printf("hello!!\n");
+ ASSERT_RET(ret, "printf() should return 0 here");
+ return 0;
+}
diff --git a/tests/test-mach_host.c b/tests/test-mach_host.c
new file mode 100644
index 0000000..53f3024
--- /dev/null
+++ b/tests/test-mach_host.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <testlib.h>
+
+#include <mach_host.user.h>
+
+void test_kernel_version()
+{
+ int err;
+ kernel_version_t kver;
+ err = host_get_kernel_version(mach_host_self(), kver);
+ ASSERT_RET(err, "host_kernel_info");
+ printf("kernel version: %s\n", kver);
+}
+
+void test_host_info()
+{
+ int err;
+ mach_msg_type_number_t count;
+ mach_port_t thishost = mach_host_self();
+
+ host_basic_info_data_t binfo;
+ count = HOST_BASIC_INFO_COUNT;
+ err = host_info(thishost, HOST_BASIC_INFO, (host_info_t)&binfo, &count);
+ ASSERT_RET(err, "host_basic_info");
+ ASSERT(count == HOST_BASIC_INFO_COUNT, "");
+ ASSERT(binfo.max_cpus > 0, "no cpu?");
+ ASSERT(binfo.avail_cpus > 0, "no cpu available?");
+ ASSERT(binfo.memory_size > (1024 * 1024), "memory too low");
+
+ const int maxcpus = 255;
+ int proc_slots[maxcpus];
+ count = maxcpus;
+ err = host_info(thishost, HOST_PROCESSOR_SLOTS, (host_info_t)&proc_slots, &count);
+ ASSERT_RET(err, "host_processor_slots");
+ ASSERT((1 <= count) && (count <= maxcpus), "");
+
+ host_sched_info_data_t sinfo;
+ count = HOST_SCHED_INFO_COUNT;
+ err = host_info(thishost, HOST_SCHED_INFO, (host_info_t)&sinfo, &count);
+ ASSERT_RET(err, "host_sched_info");
+ ASSERT(count == HOST_SCHED_INFO_COUNT, "");
+ ASSERT(sinfo.min_timeout < 1000, "timeout unexpectedly high");
+ ASSERT(sinfo.min_quantum < 1000, "quantum unexpectedly high");
+
+ host_load_info_data_t linfo;
+ count = HOST_LOAD_INFO_COUNT;
+ err = host_info(thishost, HOST_LOAD_INFO, (host_info_t)&linfo, &count);
+ ASSERT_RET(err, "host_load_info");
+ ASSERT(count == HOST_LOAD_INFO_COUNT, "");
+ for (int i=0; i<3; i++)
+ {
+ printf("avenrun %d\n", linfo.avenrun[i]);
+ printf("mach_factor %d\n", linfo.mach_factor[i]);
+ }
+}
+
+// TODO processor sets
+
+int main(int argc, char *argv[], int envc, char *envp[])
+{
+ test_kernel_version();
+ test_host_info();
+ return 0;
+}
diff --git a/tests/test-mach_port.c b/tests/test-mach_port.c
new file mode 100644
index 0000000..81a1072
--- /dev/null
+++ b/tests/test-mach_port.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <mach/message.h>
+#include <mach/mach_types.h>
+#include <mach/vm_param.h>
+
+#include <syscalls.h>
+#include <testlib.h>
+
+#include <mach.user.h>
+#include <mach_port.user.h>
+
+void test_mach_port(void)
+{
+ kern_return_t err;
+
+ mach_port_name_t *namesp;
+ mach_msg_type_number_t namesCnt=0;
+ mach_port_type_t *typesp;
+ mach_msg_type_number_t typesCnt=0;
+ err = mach_port_names(mach_task_self(), &namesp, &namesCnt, &typesp, &typesCnt);
+ ASSERT_RET(err, "mach_port_names");
+ printf("mach_port_names: type/name length: %d %d\n", namesCnt, typesCnt);
+ ASSERT((namesCnt != 0) && (namesCnt == typesCnt),
+ "mach_port_names: wrong type/name length");
+ for (int i=0; i<namesCnt; i++)
+ printf("port name %d type %x\n", namesp[i], typesp[i]);
+
+ /*
+ * test mach_port_type()
+ * use the ports we have already as bootstrap modules
+ * maybe we could do more checks on the bootstrap ports, on other modules
+ */
+ mach_port_type_t pt;
+ err = mach_port_type(mach_task_self(), host_priv(), &pt);
+ ASSERT_RET(err, "mach_port_type host_priv");
+ ASSERT(pt == MACH_PORT_TYPE_SEND, "wrong type for host_priv");
+
+ err = mach_port_type(mach_task_self(), device_priv(), &pt);
+ ASSERT_RET(err, "mach_port_type device_priv");
+ ASSERT(pt == MACH_PORT_TYPE_SEND, "wrong type for device_priv");
+
+ err = mach_port_rename(mach_task_self(), device_priv(), 111);
+ ASSERT_RET(err, "mach_port_rename");
+ // FIXME: it seems we can't restore the old name
+ err = mach_port_rename(mach_task_self(), 111, 112);
+ ASSERT_RET(err, "mach_port_rename restore orig");
+
+ const mach_port_t nrx = 1000, nset = 1001, ndead = 1002;
+ err = mach_port_allocate_name(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, nrx);
+ ASSERT_RET(err, "mach_port_allocate_name rx");
+ err = mach_port_allocate_name(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, nset);
+ ASSERT_RET(err, "mach_port_allocate_name pset");
+ err = mach_port_allocate_name(mach_task_self(), MACH_PORT_RIGHT_DEAD_NAME, ndead);
+ ASSERT_RET(err, "mach_port_allocate_name dead");
+
+ // set to a valid name to check it's really allocated to a new one
+ mach_port_t newname = nrx, oldname = nrx;
+ err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &newname);
+ ASSERT_RET(err, "mach_port_allocate");
+ ASSERT(newname != nrx, "allocated name didn't change");
+
+ oldname = newname;
+ newname = nrx;
+ err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &newname);
+ ASSERT_RET(err, "mach_port_allocate");
+ ASSERT(newname != nrx, "allocated name didn't change");
+ ASSERT(newname != oldname, "allocated name is duplicated");
+
+ oldname = newname;
+ newname = nrx;
+ err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_DEAD_NAME, &newname);
+ ASSERT_RET(err, "mach_port_allocate");
+ ASSERT(newname != nrx, "allocated name didn't change");
+ ASSERT(newname != oldname, "allocated name is duplicated");
+
+ err = mach_port_destroy(mach_task_self(), newname);
+ ASSERT_RET(err, "mach_port_destroy");
+
+ mach_port_urefs_t urefs;
+ err = mach_port_get_refs(mach_task_self(), nrx, MACH_PORT_RIGHT_RECEIVE, &urefs);
+ ASSERT_RET(err, "mach_port_get_refs");
+ ASSERT(urefs == 1, "rx port urefs");
+ err = mach_port_get_refs(mach_task_self(), nset, MACH_PORT_RIGHT_PORT_SET, &urefs);
+ ASSERT_RET(err, "mach_port_get_refs");
+ ASSERT(urefs == 1, "pset port urefs");
+ err = mach_port_get_refs(mach_task_self(), ndead, MACH_PORT_RIGHT_DEAD_NAME, &urefs);
+ ASSERT_RET(err, "mach_port_get_refs");
+ ASSERT(urefs == 1, "dead port urefs");
+
+ err = mach_port_destroy(mach_task_self(), nrx);
+ ASSERT_RET(err, "mach_port_destroy rx");
+ err = mach_port_destroy(mach_task_self(), nset);
+ ASSERT_RET(err, "mach_port_destroy pset");
+ err = mach_port_deallocate(mach_task_self(), ndead);
+ ASSERT_RET(err, "mach_port_deallocate dead");
+
+ // TODO test other rpc
+}
+
+int main(int argc, char *argv[], int envc, char *envp[])
+{
+ test_mach_port();
+ return 0;
+}
diff --git a/tests/test-machmsg.c b/tests/test-machmsg.c
new file mode 100644
index 0000000..60f3f49
--- /dev/null
+++ b/tests/test-machmsg.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <mach/message.h>
+#include <mach/mach_types.h>
+#include <mach/vm_param.h>
+
+#include <syscalls.h>
+#include <testlib.h>
+
+#include <mach.user.h>
+#include <mach_port.user.h>
+#include <mach_host.user.h>
+
+#define ECHO_MAX_BODY_LEN 256
+
+static uint32_t align(uint32_t val, size_t aln)
+{
+ // we should check aln is a power of 2
+ aln--;
+ return (val + aln) & (~aln);
+}
+
+#define ALIGN_INLINE(val, n) { (val) = align((val), (n)); }
+
+struct echo_params
+{
+ mach_port_t rx_port;
+ mach_msg_size_t rx_size;
+ mach_msg_size_t rx_number;
+};
+
+void echo_thread (void *arg)
+{
+ struct echo_params *params = arg;
+ int err;
+ struct message
+ {
+ mach_msg_header_t header;
+ char body[ECHO_MAX_BODY_LEN];
+ } message;
+
+ printf ("thread echo started\n");
+ for (mach_msg_size_t i=0; i<params->rx_number; i++)
+ {
+ message.header.msgh_local_port = params->rx_port;
+ message.header.msgh_size = sizeof (message);
+
+ err = mach_msg (&message.header,
+ MACH_RCV_MSG,
+ 0, sizeof (message),
+ params->rx_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ ASSERT_RET(err, "mach_msg echo rx");
+ printf("echo rx %d expected 5d\n",
+ message.header.msgh_size, params->rx_size);
+ ASSERT(message.header.msgh_size == params->rx_size,
+ "wrong size in echo rx");
+
+ message.header.msgh_local_port = MACH_PORT_NULL;
+ printf ("echo: msgh_id %d\n", message.header.msgh_id);
+
+ err = mach_msg (&message.header,
+ MACH_SEND_MSG,
+ message.header.msgh_size, 0,
+ MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ ASSERT_RET(err, "mach_msg echo tx");
+ }
+ printf ("thread echo stopped\n");
+ thread_terminate (mach_thread_self ());
+ FAILURE("thread_terminate");
+}
+
+#define TEST_ITERATIONS 3
+
+// TODO run_test_iterations
+void
+test_iterations (void)
+{
+ mach_port_t port, receive;
+ int err;
+ struct message
+ {
+ mach_msg_header_t header;
+ mach_msg_type_t type;
+ char data[64];
+ } message;
+
+ err = mach_port_allocate (mach_task_self (),
+ MACH_PORT_RIGHT_RECEIVE, &port);
+ ASSERT_RET(err, "mach_port_allocate");
+
+ err = mach_port_allocate (mach_task_self (),
+ MACH_PORT_RIGHT_RECEIVE, &receive);
+ ASSERT_RET(err, "mach_port_allocate 2");
+
+ struct echo_params params;
+ params.rx_port = port;
+ params.rx_size = sizeof(message.header) + sizeof(message.type) + 5;
+ ALIGN_INLINE(params.rx_size, MACH_MSG_USER_ALIGNMENT);
+ params.rx_number = TEST_ITERATIONS;
+ test_thread_start (mach_task_self (), echo_thread, &params);
+
+ time_value_t start_time;
+ err = host_get_time (mach_host_self (), &start_time);
+ ASSERT_RET(err, "host_get_time");
+
+ /* Send a message down the port */
+ for (int i = 0; i < TEST_ITERATIONS; i++)
+ {
+ struct message message;
+
+ memset (&message, 0, sizeof (message));
+ strcpy (message.data, "ciao");
+ size_t datalen = strlen (message.data) + 1;
+
+ message.header.msgh_bits
+ = MACH_MSGH_BITS (MACH_MSG_TYPE_MAKE_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE);
+ message.header.msgh_remote_port = port; /* Request port */
+ message.header.msgh_local_port = receive; /* Reply port */
+ message.header.msgh_id = 123; /* Message id */
+ message.header.msgh_size = sizeof (message.header) + sizeof (message.type) + datalen; /* Message size */
+ ALIGN_INLINE(message.header.msgh_size, 4);
+ message.type.msgt_name = MACH_MSG_TYPE_STRING; /* Parameter type */
+ message.type.msgt_size = 8 * datalen; /* # Bits */
+ message.type.msgt_number = 1; /* Number of elements */
+ message.type.msgt_inline = TRUE; /* Inlined */
+ message.type.msgt_longform = FALSE; /* Shortform */
+ message.type.msgt_deallocate = FALSE; /* Do not deallocate */
+ message.type.msgt_unused = 0; /* = 0 */
+
+ /* Send the message on its way and wait for a reply. */
+ err = mach_msg (&message.header, /* The header */
+ MACH_SEND_MSG | MACH_RCV_MSG, /* Flags */
+ message.header.msgh_size, /* Send size */
+ sizeof (message), /* Max receive Size */
+ receive, /* Receive port */
+ MACH_MSG_TIMEOUT_NONE, /* No timeout */
+ MACH_PORT_NULL); /* No notification */
+ ASSERT_RET(err, "mach_msg txrx");
+ }
+
+ time_value_t stop_time;
+ err = host_get_time (mach_host_self (), &stop_time);
+ ASSERT_RET(err, "host_get_time");
+
+ printf ("start: %d.%06d\n", start_time.seconds, start_time.microseconds);
+ printf ("stop: %d.%06d\n", stop_time.seconds, stop_time.microseconds);
+}
+
+/*
+ Test a specific message type on tx, rx and rx-continue paths
+ We need to be able to create a thread for this, so some rpc must work.
+*/
+void
+run_test_simple(void *msg, mach_msg_size_t msglen, mach_msg_id_t msgid)
+{
+ mach_msg_header_t *head = msg;
+ mach_port_t port, receive;
+ int err;
+
+ err = syscall_mach_port_allocate (mach_task_self (),
+ MACH_PORT_RIGHT_RECEIVE, &port);
+ ASSERT_RET(err, "syscall_mach_port_allocate");
+
+ err = syscall_mach_port_allocate (mach_task_self (),
+ MACH_PORT_RIGHT_RECEIVE, &receive);
+ ASSERT_RET(err, "syscall_mach_port_allocate 2");
+
+ struct echo_params params;
+ params.rx_port = port;
+ params.rx_size = msglen;
+ params.rx_number = 1;
+ test_thread_start (mach_task_self (), echo_thread, &params);
+
+ head->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE);
+ head->msgh_remote_port = port;
+ head->msgh_local_port = receive;
+ head->msgh_id = msgid;
+ head->msgh_size = msglen;
+
+ err = mach_msg (msg,
+ MACH_SEND_MSG | MACH_RCV_MSG,
+ msglen,
+ msglen,
+ receive,
+ MACH_MSG_TIMEOUT_NONE,
+ MACH_PORT_NULL);
+ ASSERT_RET(err, "mach_msg txrx");
+
+ printf("size in final rx: %d expected %d\n", head->msgh_size, msglen);
+ ASSERT(head->msgh_size == msglen, "wrong size in final rx");
+}
+
+void
+run_test_simple_self(void *msg, mach_msg_size_t msglen, mach_msg_id_t msgid)
+{
+ mach_msg_header_t *head = msg;
+ mach_port_t port, receive;
+ int err;
+
+ err = syscall_mach_port_allocate (mach_task_self (),
+ MACH_PORT_RIGHT_RECEIVE, &port);
+ ASSERT_RET(err, "syscall_mach_port_allocate");
+
+ head->msgh_bits
+ = MACH_MSGH_BITS (MACH_MSG_TYPE_MAKE_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE);
+ /* head->msgh_bits */
+ /* = MACH_MSGH_BITS (MACH_MSG_TYPE_MAKE_SEND_ONCE, */
+ /* MACH_MSG_TYPE_COPY_SEND); */
+
+ head->msgh_bits |= MACH_MSGH_BITS_COMPLEX;
+ head->msgh_remote_port = port;
+ head->msgh_local_port = port;
+ head->msgh_id = msgid;
+ head->msgh_size = msglen;
+
+ err = mach_msg (msg,
+ MACH_SEND_MSG | MACH_RCV_MSG,
+ msglen,
+ msglen,
+ port,
+ MACH_MSG_TIMEOUT_NONE,
+ MACH_PORT_NULL);
+ ASSERT_RET(err, "mach_msg txrx");
+
+ printf("size in final rx: %d expected %d\n", head->msgh_size, msglen);
+ ASSERT(head->msgh_size == msglen, "wrong size in final rx\n");
+}
+
+
+void test_msg_string(void)
+{
+ struct message
+ {
+ mach_msg_header_t header;
+ mach_msg_type_t type;
+ char data[64];
+ } msg;
+ char *test_strings[] = {"123", "12345", "ciaociao"};
+
+ memset (&msg, 0, sizeof (struct message));
+ strcpy (msg.data, "ciao");
+ size_t datalen = strlen (msg.data) + 1;
+
+ int msgid = 123;
+ int msglen = sizeof (msg.header) + sizeof (msg.type) + datalen;
+ ALIGN_INLINE(msglen, MACH_MSG_USER_ALIGNMENT);
+ msg.type.msgt_name = MACH_MSG_TYPE_STRING;
+ msg.type.msgt_size = 8 * datalen;
+ msg.type.msgt_number = 1;
+ msg.type.msgt_inline = TRUE;
+ msg.type.msgt_longform = FALSE;
+ msg.type.msgt_deallocate = FALSE;
+ msg.type.msgt_unused = 0;
+
+ run_test_simple_self(&msg, msglen, msgid);
+ run_test_simple(&msg, msglen, msgid);
+}
+
+void test_msg_string2(void)
+{
+ struct message
+ {
+ mach_msg_header_t header;
+ mach_msg_type_t type;
+ char data[10];
+ mach_msg_type_t type2;
+ char data2[5];
+ } msg;
+ const int len1 = 10;
+ const int len2 = 5;
+
+ memset (&msg, 0, sizeof (struct message));
+ int msgid = 123;
+ int msglen = sizeof (msg.header) + sizeof (msg.type) + len1;
+ ALIGN_INLINE(msglen, MACH_MSG_USER_ALIGNMENT);
+ msglen += sizeof (msg.type2) + len2;
+ ALIGN_INLINE(msglen, MACH_MSG_USER_ALIGNMENT);
+ msg.type.msgt_name = MACH_MSG_TYPE_STRING;
+ msg.type.msgt_size = 8 * len1;
+ msg.type.msgt_number = 1;
+ msg.type.msgt_inline = TRUE;
+ msg.type.msgt_longform = FALSE;
+ msg.type.msgt_deallocate = FALSE;
+ msg.type.msgt_unused = 0;
+ memset (msg.data, 'c', len1);
+ msg.type2.msgt_name = MACH_MSG_TYPE_CHAR;
+ msg.type2.msgt_size = 8;
+ msg.type2.msgt_number = len2;
+ msg.type2.msgt_inline = TRUE;
+ msg.type2.msgt_longform = FALSE;
+ msg.type2.msgt_deallocate = FALSE;
+ msg.type2.msgt_unused = 0;
+ memset (msg.data2, 'x', len2);
+
+ run_test_simple_self(&msg, msglen, msgid);
+ run_test_simple(&msg, msglen, msgid);
+}
+
+
+void test_msg_ports(void)
+{
+ struct message
+ {
+ mach_msg_header_t head;
+ mach_msg_type_t type;
+ mach_port_t *portp;
+ } msg;
+ mach_port_t msgports[3];
+
+ memset (&msg, 0, sizeof (struct message));
+
+ int msgid = 123;
+ int msglen = sizeof (msg.head) + sizeof (msg.type) + sizeof(msg.portp);
+ msg.type.msgt_name = MACH_MSG_TYPE_MOVE_SEND;
+ msg.type.msgt_size = 8*sizeof(mach_port_t);
+ msg.type.msgt_number = 3;
+ msg.type.msgt_inline = FALSE;
+ msg.type.msgt_longform = FALSE;
+ msg.type.msgt_deallocate = FALSE;
+ msg.type.msgt_unused = 0;
+ msg.portp = msgports;
+ msgports[0] = mach_host_self();
+ msgports[1] = mach_task_self();
+ msgports[2] = mach_thread_self();
+
+ run_test_simple_self(&msg, msglen, msgid);
+ run_test_simple(&msg, msglen, msgid);
+}
+
+void test_msg_emptydesc(void)
+{
+ struct message
+ {
+ mach_msg_header_t header;
+ mach_msg_type_t type_empty;
+ vm_offset_t addr_empty;
+ mach_msg_type_t type;
+ char data[64];
+ } msg;
+
+ memset (&msg, 0, sizeof (struct message));
+ strcpy (msg.data, "ciao");
+ size_t datalen = strlen (msg.data) + 1;
+
+ int msgid = 123;
+ int msglen = sizeof (msg.header);
+ msglen += sizeof (msg.type_empty)+ sizeof (msg.addr_empty);
+ msglen += sizeof (msg.type) + datalen;
+ ALIGN_INLINE(msglen, MACH_MSG_USER_ALIGNMENT);
+ msg.type_empty.msgt_name = MACH_MSG_TYPE_STRING;
+ msg.type_empty.msgt_size = 8;
+ msg.type_empty.msgt_number = 0;
+ msg.type_empty.msgt_inline = FALSE;
+ msg.type_empty.msgt_longform = FALSE;
+ msg.type_empty.msgt_deallocate = FALSE;
+ msg.type_empty.msgt_unused = 0;
+ msg.addr_empty = 0;
+
+ msg.type.msgt_name = MACH_MSG_TYPE_STRING;
+ msg.type.msgt_size = 8 * datalen;
+ msg.type.msgt_number = 1;
+ msg.type.msgt_inline = TRUE;
+ msg.type.msgt_longform = FALSE;
+ msg.type.msgt_deallocate = FALSE;
+ msg.type.msgt_unused = 0;
+
+ run_test_simple_self(&msg, msglen, msgid);
+ run_test_simple(&msg, msglen, msgid);
+}
+
+
+int
+main (int argc, char *argv[], int envc, char *envp[])
+{
+ printf("test_msg_string()\n");
+ test_msg_string();
+ printf("test_msg_string2()\n");
+ test_msg_string2();
+ printf("test_msg_ports()\n");
+ test_msg_ports();
+ printf("test_msg_emptydesc()\n");
+ test_msg_emptydesc();
+ printf("test_iters()\n");
+ test_iterations();
+ return 0;
+}
diff --git a/tests/test-multiboot.in b/tests/test-multiboot.in
new file mode 100644
index 0000000..20ab330
--- /dev/null
+++ b/tests/test-multiboot.in
@@ -0,0 +1,30 @@
+#!@SHELL@
+
+# Test if the kernel image complies with the multiboot specification.
+
+# Copyright (C) 2023 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+if grub-file --help > /dev/null 2>&1
+then grub-file --is-x86-multiboot gnumach
+else
+ # `grub-file' is not available -- ignore this test.
+ exit 77
+fi
+
+# Local Variables:
+# mode: shell-script
+# End:
diff --git a/tests/test-syscalls.c b/tests/test-syscalls.c
new file mode 100644
index 0000000..be4df8c
--- /dev/null
+++ b/tests/test-syscalls.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <syscalls.h>
+#include <testlib.h>
+
+#include <mach/exception.h>
+#include <mach/mig_errors.h>
+#include <mach/vm_param.h>
+
+#include <mach.user.h>
+#include <mach_port.user.h>
+#include <exc.server.h>
+
+
+static struct {
+ mach_port_t exception_port;
+ mach_port_t thread;
+ mach_port_t task;
+ integer_t exception;
+ integer_t code;
+ integer_t subcode;
+} last_exc;
+kern_return_t catch_exception_raise(mach_port_t exception_port,
+ mach_port_t thread, mach_port_t task,
+ integer_t exception, integer_t code,
+ long_integer_t subcode)
+{
+ printf("received catch_exception_raise(%u %u %u %d %d %d)\n",
+ exception_port, thread, task, exception, code, subcode);
+ last_exc.exception_port = exception_port;
+ last_exc.thread = thread;
+ last_exc.task = task;
+ last_exc.exception = exception;
+ last_exc.code = code;
+ last_exc.subcode = subcode;
+ return KERN_SUCCESS;
+}
+
+static char simple_request_data[PAGE_SIZE];
+static char simple_reply_data[PAGE_SIZE];
+int simple_msg_server(boolean_t (*demuxer) (mach_msg_header_t *request,
+ mach_msg_header_t *reply),
+ mach_port_t rcv_port_name,
+ int num_msgs)
+{
+ int midx = 0, mok = 0;
+ int ret;
+ mig_reply_header_t *request = (mig_reply_header_t*)simple_request_data;
+ mig_reply_header_t *reply = (mig_reply_header_t*)simple_reply_data;
+ while ((midx - num_msgs) < 0)
+ {
+ ret = mach_msg(&request->Head, MACH_RCV_MSG, 0, PAGE_SIZE,
+ rcv_port_name, 0, MACH_PORT_NULL);
+ switch (ret)
+ {
+ case MACH_MSG_SUCCESS:
+ if ((*demuxer)(&request->Head, &reply->Head))
+ mok++; // TODO send reply
+ else
+ FAILURE("demuxer didn't handle the message");
+ break;
+ default:
+ ASSERT_RET(ret, "receiving in msg_server");
+ break;
+ }
+ midx++;
+ }
+ if (mok != midx)
+ FAILURE("wrong number of message received");
+ return mok != midx;
+}
+
+
+void test_syscall_bad_arg_on_stack(void *arg)
+{
+ /* mach_msg() has 7 arguments, so the last one should be always
+ passed on the stack on x86. Here we make ESP/RSP point to the
+ wrong place to test the access check */
+#ifdef __x86_64__
+ asm volatile("movq $0x123,%rsp;" \
+ "movq $-25,%rax;" \
+ "syscall;" \
+ );
+#else
+ asm volatile("mov $0x123,%esp;" \
+ "mov $-25,%eax;" \
+ "lcall $0x7,$0x0;" \
+ );
+#endif
+ FAILURE("we shouldn't be here!");
+}
+
+void test_bad_syscall_num(void *arg)
+{
+#ifdef __x86_64__
+ asm volatile("movq $0x123456,%rax;" \
+ "syscall;" \
+ );
+#else
+ asm volatile("mov $0x123456,%eax;" \
+ "lcall $0x7,$0x0;" \
+ );
+#endif
+ FAILURE("we shouldn't be here!");
+}
+
+
+int main(int argc, char *argv[], int envc, char *envp[])
+{
+ int err;
+ mach_port_t excp;
+
+ err = mach_port_allocate(mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &excp);
+ ASSERT_RET(err, "creating exception port");
+
+ err = mach_port_insert_right(mach_task_self(), excp, excp,
+ MACH_MSG_TYPE_MAKE_SEND);
+ ASSERT_RET(err, "inserting send right into exception port");
+
+ err = task_set_special_port(mach_task_self(), TASK_EXCEPTION_PORT, excp);
+ ASSERT_RET(err, "setting task exception port");
+
+ /* FIXME: receiving an exception with small size causes GP on 64 bit userspace */
+ /* mig_reply_header_t msg; */
+ /* err = mach_msg(&msg.Head, /\* The header *\/ */
+ /* MACH_RCV_MSG, */
+ /* 0, */
+ /* sizeof (msg), /\* Max receive Size *\/ */
+ /* excp, */
+ /* 1000, */
+ /* MACH_PORT_NULL); */
+
+ // FIXME: maybe MIG should provide this prototype?
+ boolean_t exc_server
+ (mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
+
+ memset(&last_exc, 0, sizeof(last_exc));
+ test_thread_start(mach_task_self(), test_bad_syscall_num, NULL);
+ ASSERT_RET(simple_msg_server(exc_server, excp, 1), "error in exc server");
+ ASSERT((last_exc.exception == EXC_BAD_INSTRUCTION) && (last_exc.code == EXC_I386_INVOP),
+ "bad exception for test_bad_syscall_num()");
+
+ memset(&last_exc, 0, sizeof(last_exc));
+ test_thread_start(mach_task_self(), test_syscall_bad_arg_on_stack, NULL);
+ ASSERT_RET(simple_msg_server(exc_server, excp, 1), "error in exc server");
+ ASSERT((last_exc.exception == EXC_BAD_ACCESS) && (last_exc.code == KERN_INVALID_ADDRESS),
+ "bad exception for test_syscall_bad_arg_on_stack()");
+
+ return 0;
+}
diff --git a/tests/test-task.c b/tests/test-task.c
new file mode 100644
index 0000000..cbc75e2
--- /dev/null
+++ b/tests/test-task.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <syscalls.h>
+#include <testlib.h>
+
+#include <mach/machine/vm_param.h>
+#include <mach/std_types.h>
+#include <mach/mach_types.h>
+#include <mach/vm_wire.h>
+#include <mach/vm_param.h>
+
+#include <gnumach.user.h>
+#include <mach.user.h>
+
+
+void test_task()
+{
+ mach_port_t ourtask = mach_task_self();
+ mach_msg_type_number_t count;
+ int err;
+
+ struct task_basic_info binfo;
+ count = TASK_BASIC_INFO_COUNT;
+ err = task_info(ourtask, TASK_BASIC_INFO, (task_info_t)&binfo, &count);
+ ASSERT_RET(err, "TASK_BASIC_INFO");
+ ASSERT(binfo.virtual_size > binfo.resident_size, "wrong memory counters");
+
+ struct task_events_info einfo;
+ count = TASK_EVENTS_INFO_COUNT;
+ err = task_info(ourtask, TASK_EVENTS_INFO, (task_info_t)&einfo, &count);
+ ASSERT_RET(err, "TASK_EVENTS_INFO");
+ printf("msgs sent %llu received %llu\n",
+ einfo.messages_sent, einfo.messages_received);
+
+ struct task_thread_times_info ttinfo;
+ count = TASK_THREAD_TIMES_INFO_COUNT;
+ err = task_info(ourtask, TASK_THREAD_TIMES_INFO, (task_info_t)&ttinfo, &count);
+ ASSERT_RET(err, "TASK_THREAD_TIMES_INFO");
+ printf("run user %lld system %lld\n",
+ ttinfo.user_time64.seconds, ttinfo.user_time64.nanoseconds);
+}
+
+
+void dummy_thread(void *arg)
+{
+ printf("started dummy thread\n");
+ while (1)
+ ;
+}
+
+void check_threads(thread_t *threads, mach_msg_type_number_t nthreads)
+{
+ for (int tid=0; tid<nthreads; tid++)
+ {
+ struct thread_basic_info tinfo;
+ mach_msg_type_number_t thcount = THREAD_BASIC_INFO_COUNT;
+ int err = thread_info(threads[tid], THREAD_BASIC_INFO, (thread_info_t)&tinfo, &thcount);
+ ASSERT_RET(err, "thread_info");
+ ASSERT(thcount == THREAD_BASIC_INFO_COUNT, "thcount");
+ printf("th%d (port %d):\n", tid, threads[tid]);
+ printf(" user time %d.%06d\n", tinfo.user_time.seconds, tinfo.user_time.microseconds);
+ printf(" system time %d.%06d\n", tinfo.system_time.seconds, tinfo.system_time.microseconds);
+ printf(" cpu usage %d\n", tinfo.cpu_usage);
+ printf(" creation time %d.%06d\n", tinfo.creation_time.seconds, tinfo.creation_time.microseconds);
+ }
+}
+
+static void test_task_threads()
+{
+ thread_t *threads;
+ mach_msg_type_number_t nthreads;
+ int err;
+
+ err = task_threads(mach_task_self(), &threads, &nthreads);
+ ASSERT_RET(err, "task_threads");
+ ASSERT(nthreads == 1, "nthreads");
+ check_threads(threads, nthreads);
+
+ thread_t t1 = test_thread_start(mach_task_self(), dummy_thread, 0);
+
+ thread_t t2 = test_thread_start(mach_task_self(), dummy_thread, 0);
+
+ // let the threads run
+ msleep(100);
+
+ err = task_threads(mach_task_self(), &threads, &nthreads);
+ ASSERT_RET(err, "task_threads");
+ ASSERT(nthreads == 3, "nthreads");
+ check_threads(threads, nthreads);
+
+ err = thread_terminate(t1);
+ ASSERT_RET(err, "thread_terminate");
+ err = thread_terminate(t2);
+ ASSERT_RET(err, "thread_terminate");
+
+ err = task_threads(mach_task_self(), &threads, &nthreads);
+ ASSERT_RET(err, "task_threads");
+ ASSERT(nthreads == 1, "nthreads");
+ check_threads(threads, nthreads);
+}
+
+void test_new_task()
+{
+ int err;
+ task_t newtask;
+ err = task_create(mach_task_self(), 1, &newtask);
+ ASSERT_RET(err, "task_create");
+
+ err = task_suspend(newtask);
+ ASSERT_RET(err, "task_suspend");
+
+ err = task_set_name(newtask, "newtask");
+ ASSERT_RET(err, "task_set_name");
+
+ thread_t *threads;
+ mach_msg_type_number_t nthreads;
+ err = task_threads(newtask, &threads, &nthreads);
+ ASSERT_RET(err, "task_threads");
+ ASSERT(nthreads == 0, "nthreads 0");
+
+ test_thread_start(newtask, dummy_thread, 0);
+
+ err = task_resume(newtask);
+ ASSERT_RET(err, "task_resume");
+
+ msleep(100); // let the thread run a bit
+
+ err = task_threads(newtask, &threads, &nthreads);
+ ASSERT_RET(err, "task_threads");
+ ASSERT(nthreads == 1, "nthreads 1");
+ check_threads(threads, nthreads);
+
+ err = thread_terminate(threads[0]);
+ ASSERT_RET(err, "thread_terminate");
+
+ err = task_terminate(newtask);
+ ASSERT_RET(err, "task_terminate");
+}
+
+int test_errors()
+{
+ int err;
+ err = task_resume(MACH_PORT_NAME_DEAD);
+ ASSERT(err == MACH_SEND_INVALID_DEST, "task DEAD");
+}
+
+
+int main(int argc, char *argv[], int envc, char *envp[])
+{
+ test_task();
+ test_task_threads();
+ test_new_task();
+ test_errors();
+ return 0;
+}
diff --git a/tests/test-threads.c b/tests/test-threads.c
new file mode 100644
index 0000000..06630be
--- /dev/null
+++ b/tests/test-threads.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <stdint.h>
+#include <mach/machine/thread_status.h>
+
+#include <syscalls.h>
+#include <testlib.h>
+
+#include <mach.user.h>
+
+void sleeping_thread(void* arg)
+{
+ printf("starting thread %d\n", arg);
+ for (int i=0; i<100; i++)
+ msleep(50);
+ printf("stopping thread %d\n", arg);
+ thread_terminate(mach_thread_self());
+ FAILURE("thread_terminate");
+}
+
+void test_many(void)
+{
+ for (long tid=0; tid<10; tid++)
+ {
+ test_thread_start(mach_task_self(), sleeping_thread, (void*)tid);
+ }
+ // TODO: wait for thread end notifications
+ msleep(6000);
+}
+
+#ifdef __x86_64__
+void test_fsgs_base_thread(void* tid)
+{
+ int err;
+#if defined(__SEG_FS) && defined(__SEG_GS)
+ long __seg_fs *fs_ptr;
+ long __seg_gs *gs_ptr;
+ long fs_value;
+ long gs_value;
+
+ struct i386_fsgs_base_state state;
+ state.fs_base = (unsigned long)&fs_value;
+ state.gs_base = (unsigned long)&gs_value;
+ err = thread_set_state(mach_thread_self(), i386_FSGS_BASE_STATE,
+ (thread_state_t) &state, i386_FSGS_BASE_STATE_COUNT);
+ ASSERT_RET(err, "thread_set_state");
+
+ fs_value = 0x100 + (long)tid;
+ gs_value = 0x200 + (long)tid;
+
+ msleep(50); // allow the others to set their segment base
+
+ fs_ptr = 0;
+ gs_ptr = 0;
+ long rdvalue = *fs_ptr;
+ printf("FS expected %lx read %lx\n", fs_value, rdvalue);
+ ASSERT(fs_value == rdvalue, "FS base error\n");
+ rdvalue = *gs_ptr;
+ printf("GS expected %lx read %lx\n", gs_value, rdvalue);
+ ASSERT(gs_value == rdvalue, "GS base error\n");
+#else
+#error " missing __SEG_FS and __SEG_GS"
+#endif
+
+ thread_terminate(mach_thread_self());
+ FAILURE("thread_terminate");
+}
+#endif
+
+void test_fsgs_base(void)
+{
+#ifdef __x86_64__
+ int err;
+ for (long tid=0; tid<10; tid++)
+ {
+ test_thread_start(mach_task_self(), test_fsgs_base_thread, (void*)tid);
+ }
+ msleep(1000); // TODO: wait for threads
+#endif
+}
+
+
+int main(int argc, char *argv[], int envc, char *envp[])
+{
+ test_fsgs_base();
+ test_many();
+ return 0;
+}
diff --git a/tests/test-vm.c b/tests/test-vm.c
new file mode 100644
index 0000000..4ece792
--- /dev/null
+++ b/tests/test-vm.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <syscalls.h>
+#include <testlib.h>
+
+#include <mach/machine/vm_param.h>
+#include <mach/std_types.h>
+#include <mach/mach_types.h>
+#include <mach/vm_wire.h>
+#include <mach/vm_param.h>
+
+#include <device.user.h>
+#include <gnumach.user.h>
+#include <mach.user.h>
+#include <mach_port.user.h>
+
+
+static void test_memobj()
+{
+ // this emulates maptime() mapping and reading
+ struct mapped_time_value *mtime;
+ int64_t secs, usecs;
+ mach_port_t device, memobj;
+ int err;
+
+ err = device_open (device_priv(), 0, "time", &device);
+ ASSERT_RET(err, "device_open");
+ err = device_map (device, VM_PROT_READ, 0, sizeof(*mtime), &memobj, 0);
+ ASSERT_RET(err, "device_map");
+ err = mach_port_deallocate (mach_task_self (), device);
+ ASSERT_RET(err, "mach_port_deallocate");
+ mtime = 0;
+ err = vm_map(mach_task_self (), (vm_address_t *)&mtime, sizeof *mtime, 0, 1,
+ memobj, 0, 0, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
+ ASSERT_RET(err, "vm_map");
+
+ do
+ {
+ secs = mtime->seconds;
+ __sync_synchronize ();
+ usecs = mtime->microseconds;
+ __sync_synchronize ();
+ }
+ while (secs != mtime->check_seconds);
+ printf("mapped time is %lld.%lld\n",secs, usecs);
+
+ err = mach_port_deallocate (mach_task_self (), memobj);
+ ASSERT_RET(err, "mach_port_deallocate");
+ err = vm_deallocate(mach_task_self(), (vm_address_t)mtime, sizeof(*mtime));
+ ASSERT_RET(err, "vm_deallocate");
+}
+
+static void test_wire()
+{
+ int err = vm_wire_all(host_priv(), mach_task_self(), VM_WIRE_ALL);
+ ASSERT_RET(err, "vm_wire_all-ALL");
+ err = vm_wire_all(host_priv(), mach_task_self(), VM_WIRE_NONE);
+ ASSERT_RET(err, "vm_wire_all-NONE");
+ // TODO check that all memory is actually wired or unwired
+}
+
+int main(int argc, char *argv[], int envc, char *envp[])
+{
+ printf("VM_MIN_ADDRESS=0x%p\n", VM_MIN_ADDRESS);
+ printf("VM_MAX_ADDRESS=0x%p\n", VM_MAX_ADDRESS);
+ test_wire();
+ test_memobj();
+ return 0;
+}
diff --git a/tests/testlib.c b/tests/testlib.c
new file mode 100644
index 0000000..2eaeb59
--- /dev/null
+++ b/tests/testlib.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2024 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <testlib.h>
+
+#include <device/cons.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <mach/mig_errors.h>
+#include <mach/vm_param.h>
+
+#include <mach.user.h>
+#include <mach_host.user.h>
+
+
+static int argc = 0;
+static char *argv_unknown[] = {"unknown", "m1", "123", "456"};
+static char **argv = argv_unknown;
+static char **envp = NULL;
+static int envc = 0;
+
+static mach_port_t host_priv_port = 1;
+static mach_port_t device_master_port = 2;
+
+void cnputc(char c, vm_offset_t cookie)
+{
+ char buf[2] = {c, 0};
+ mach_print(buf);
+}
+
+mach_port_t host_priv(void)
+{
+ return host_priv_port;
+}
+
+mach_port_t device_priv(void)
+{
+ return device_master_port;
+}
+
+void halt()
+{
+ int ret = host_reboot(host_priv_port, 0);
+ ASSERT_RET(ret, "host_reboot() failed!");
+ while (1)
+ ;
+}
+
+int msleep(uint32_t timeout)
+{
+ mach_port_t recv = mach_reply_port();
+ return mach_msg(NULL, MACH_RCV_MSG|MACH_RCV_TIMEOUT|MACH_RCV_INTERRUPT,
+ 0, 0, recv, timeout, MACH_PORT_NULL);
+}
+
+const char* e2s(int err)
+{
+ const char* s = e2s_gnumach(err);
+ if (s != NULL)
+ return s;
+ else
+ switch (err)
+ {
+ default: return "unknown";
+ }
+}
+
+/*
+ * Minimal _start() for test modules, we just take the arguments from the
+ * kernel, call main() and reboot. As in glibc, we expect the argument pointer
+ * as a first asrgument.
+ */
+void __attribute__((used, retain))
+c_start(void **argptr)
+{
+ intptr_t* argcptr = (intptr_t*)argptr;
+ argc = argcptr[0];
+ argv = (char **) &argcptr[1];
+ envp = &argv[argc + 1];
+ envc = 0;
+
+ while (envp[envc])
+ ++envc;
+
+ mach_atoi(argv[1], &host_priv_port);
+ mach_atoi(argv[2], &device_master_port);
+
+ printf("started %s", argv[0]);
+ for (int i=1; i<argc; i++)
+ {
+ printf(" %s", argv[i]);
+ }
+ printf("\n");
+
+ int ret = main(argc, argv, envc, envp);
+
+ printf("%s: test %s exit code %x\n", TEST_SUCCESS_MARKER, argv[0], ret);
+ halt();
+}
diff --git a/tests/testlib_thread_start.c b/tests/testlib_thread_start.c
new file mode 100644
index 0000000..fa8af0e
--- /dev/null
+++ b/tests/testlib_thread_start.c
@@ -0,0 +1,86 @@
+/*
+ * MIT License
+ *
+ * Copyright (c) 2017 Luc Chabassier
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This small helper was started from
+ * https://github.com/dwarfmaster/mach-ipc/blob/master/minimal_threads/main.c
+ * and then reworked. */
+
+#include <testlib.h>
+#include <mach/vm_param.h>
+#include <mach.user.h>
+
+/* This is just a temporary mapping to set up the stack */
+static long stack_top[PAGE_SIZE/sizeof(long)] __attribute__ ((aligned (PAGE_SIZE)));
+
+thread_t test_thread_start(task_t task, void(*routine)(void*), void* arg) {
+ const vm_size_t stack_size = PAGE_SIZE * 16;
+ kern_return_t ret;
+ vm_address_t stack;
+
+ ret = vm_allocate(task, &stack, stack_size, TRUE);
+ ASSERT_RET(ret, "can't allocate the stack for a new thread");
+
+ ret = vm_protect(task, stack, PAGE_SIZE, FALSE, VM_PROT_NONE);
+ ASSERT_RET(ret, "can't protect the stack from overflows");
+
+ long *top = (long*)((vm_offset_t)stack_top + PAGE_SIZE) - 1;
+#ifdef __i386__
+ *top = (long)arg; /* The argument is passed on the stack on x86_32 */
+ *(top - 1) = 0; /* The return address */
+#elif defined(__x86_64__)
+ *top = 0; /* The return address */
+#endif
+ ret = vm_write(task, stack + stack_size - PAGE_SIZE, (vm_offset_t)stack_top, PAGE_SIZE);
+ ASSERT_RET(ret, "can't initialize the stack for the new thread");
+
+ thread_t thread;
+ ret = thread_create(task, &thread);
+ ASSERT_RET(ret, "thread_create()");
+
+ struct i386_thread_state state;
+ unsigned int count;
+ count = i386_THREAD_STATE_COUNT;
+ ret = thread_get_state(thread, i386_REGS_SEGS_STATE,
+ (thread_state_t) &state, &count);
+ ASSERT_RET(ret, "thread_get_state()");
+
+#ifdef __i386__
+ state.eip = (long) routine;
+ state.uesp = (long) (stack + stack_size - sizeof(long) * 2);
+ state.ebp = 0;
+#elif defined(__x86_64__)
+ state.rip = (long) routine;
+ state.ursp = (long) (stack + stack_size - sizeof(long) * 1);
+ state.rbp = 0;
+ state.rdi = (long)arg;
+#endif
+ ret = thread_set_state(thread, i386_REGS_SEGS_STATE,
+ (thread_state_t) &state, i386_THREAD_STATE_COUNT);
+ ASSERT_RET(ret, "thread_set_state");
+
+ ret = thread_resume(thread);
+ ASSERT_RET(ret, "thread_resume");
+
+ return thread;
+}
diff --git a/tests/user-qemu.mk b/tests/user-qemu.mk
new file mode 100644
index 0000000..fd5ae1a
--- /dev/null
+++ b/tests/user-qemu.mk
@@ -0,0 +1,221 @@
+# Copyright (C) 2024 Free Software Foundation
+
+# This program is free software ; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation ; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY ; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with the program ; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+#
+# MIG stubs generation for user-space tests
+#
+
+MACH_TESTINSTALL = $(builddir)/tests/include-mach
+MACH_TESTINCLUDE = $(MACH_TESTINSTALL)/$(prefix)/include
+
+MIGCOMUSER = $(USER_MIG) -n -cc cat - /dev/null
+MIG_OUTDIR = $(builddir)/tests/mig-out
+MIG_CPPFLAGS = -x c -nostdinc -I$(MACH_TESTINCLUDE)
+
+# FIXME: how can we reliably detect a change on any header and reinstall them?
+$(MACH_TESTINSTALL):
+ mkdir -p $@
+ $(MAKE) install-data DESTDIR=$@
+
+prepare-test: $(MACH_TESTINSTALL)
+
+$(MIG_OUTDIR):
+ mkdir -p $@
+
+define generate_mig_client
+$(MIG_OUTDIR)/$(2).user.c: prepare-test $(MIG_OUTDIR) $(MACH_TESTINCLUDE)/$(1)/$(2).defs
+ $(USER_CPP) $(USER_CPPFLAGS) $(MIG_CPPFLAGS) \
+ -o $(MIG_OUTDIR)/$(2).user.defs \
+ $(MACH_TESTINCLUDE)/$(1)/$(2).defs
+ $(MIGCOMUSER) $(MIGCOMFLAGS) $(MIGCOMUFLAGS) \
+ -user $(MIG_OUTDIR)/$(2).user.c \
+ -header $(MIG_OUTDIR)/$(2).user.h \
+ -list $(MIG_OUTDIR)/$(2).user.msgids \
+ < $(MIG_OUTDIR)/$(2).user.defs
+endef
+
+define generate_mig_server
+$(MIG_OUTDIR)/$(2).server.c: prepare-test $(MIG_OUTDIR) $(srcdir)/include/$(1)/$(2).defs
+ $(USER_CPP) $(USER_CPPFLAGS) $(MIG_CPPFLAGS) \
+ -o $(MIG_OUTDIR)/$(2).server.defs \
+ $(srcdir)/include/$(1)/$(2).defs
+ $(MIGCOMUSER) $(MIGCOMFLAGS) $(MIGCOMUFLAGS) \
+ -server $(MIG_OUTDIR)/$(2).server.c \
+ -header $(MIG_OUTDIR)/$(2).server.h \
+ -list $(MIG_OUTDIR)/$(2).server.msgids \
+ < $(MIG_OUTDIR)/$(2).server.defs
+endef
+
+# These are all the IPC implemented in the kernel, both as a server or as a client.
+# Files are sorted as in
+# find builddir/tests/include-mach/ -name *.defs | grep -v types | sort
+# eval->info for debug of generated rules
+$(eval $(call generate_mig_client,device,device))
+$(eval $(call generate_mig_client,device,device_reply))
+$(eval $(call generate_mig_client,device,device_request))
+$(eval $(call generate_mig_client,mach_debug,mach_debug))
+# default_pager.defs?
+$(eval $(call generate_mig_server,mach,exc))
+# experimental.defs?
+$(eval $(call generate_mig_client,mach,gnumach))
+$(eval $(call generate_mig_client,mach,mach4))
+$(eval $(call generate_mig_client,mach,mach))
+$(eval $(call generate_mig_client,mach,mach_host))
+$(eval $(call generate_mig_client,mach,mach_port))
+# memory_object{_default}.defs?
+# notify.defs?
+$(eval $(call generate_mig_server,mach,task_notify))
+if HOST_ix86
+$(eval $(call generate_mig_client,mach/i386,mach_i386))
+endif
+if HOST_x86_64
+$(eval $(call generate_mig_client,mach/x86_64,mach_i386))
+endif
+
+# NOTE: keep in sync with the rules above
+MIG_GEN_CC = \
+ $(MIG_OUTDIR)/device.user.c \
+ $(MIG_OUTDIR)/device_reply.user.c \
+ $(MIG_OUTDIR)/device_request.user.c \
+ $(MIG_OUTDIR)/mach_debug.user.c \
+ $(MIG_OUTDIR)/exc.server.c \
+ $(MIG_OUTDIR)/gnumach.user.c \
+ $(MIG_OUTDIR)/mach4.user.c \
+ $(MIG_OUTDIR)/mach.user.c \
+ $(MIG_OUTDIR)/mach_host.user.c \
+ $(MIG_OUTDIR)/mach_port.user.c \
+ $(MIG_OUTDIR)/task_notify.server.c \
+ $(MIG_OUTDIR)/mach_i386.user.c
+
+#
+# compilation of user space tests and utilities
+#
+
+TEST_START_MARKER = booting-start-of-test
+TEST_SUCCESS_MARKER = gnumach-test-success-and-reboot
+TEST_FAILURE_MARKER = gnumach-test-failure
+
+TESTCFLAGS = -static -nostartfiles -nolibc \
+ -ffreestanding \
+ -ftrivial-auto-var-init=pattern \
+ -I$(srcdir)/tests/include \
+ -I$(MACH_TESTINCLUDE) \
+ -I$(MIG_OUTDIR) \
+ -ggdb3 \
+ -DMIG_EOPNOTSUPP
+
+SRC_TESTLIB= \
+ $(srcdir)/i386/i386/strings.c \
+ $(srcdir)/kern/printf.c \
+ $(srcdir)/kern/strings.c \
+ $(srcdir)/util/atoi.c \
+ $(srcdir)/tests/syscalls.S \
+ $(srcdir)/tests/start.S \
+ $(srcdir)/tests/testlib.c \
+ $(srcdir)/tests/testlib_thread_start.c \
+ $(builddir)/tests/errlist.c \
+ $(MIG_GEN_CC)
+
+tests/errlist.c: $(addprefix $(srcdir)/include/mach/,message.h kern_return.h mig_errors.h)
+ echo "/* autogenerated file */" >$@
+ echo "#include <mach/message.h>" >>$@
+ echo "#include <mach/kern_return.h>" >>$@
+ echo "#include <mach/mig_errors.h>" >>$@
+ echo "#include <testlib.h>" >>$@
+ echo "#include <stddef.h>" >>$@
+ echo "const char* TEST_SUCCESS_MARKER = \"$(TEST_SUCCESS_MARKER)\";" >>$@
+ echo "const char* TEST_FAILURE_MARKER = \"$(TEST_FAILURE_MARKER)\";" >>$@
+ echo "const char* e2s_gnumach(int err) { switch (err) {" >>$@
+ grep "define[[:space:]]MIG" $(srcdir)/include/mach/mig_errors.h | \
+ awk '{printf " case %s: return \"%s\";\n", $$2, $$2}' >>$@
+ grep "define[[:space:]]KERN" $(srcdir)/include/mach/kern_return.h | \
+ awk '{printf " case %s: return \"%s\";\n", $$2, $$2}' >>$@
+ awk 'f;/MACH_MSG_SUCCESS/{f=1}' $(srcdir)/include/mach/message.h | \
+ grep "define[[:space:]]MACH" | \
+ awk '{printf " case %s: return \"%s\";\n", $$2, $$2}' >>$@
+ echo " default: return NULL;" >>$@
+ echo "}}" >>$@
+
+tests/module-%: $(srcdir)/tests/test-%.c $(SRC_TESTLIB) $(MACH_TESTINSTALL)
+ $(USER_CC) $(USER_CFLAGS) $(TESTCFLAGS) $< $(SRC_TESTLIB) -o $@
+
+#
+# packaging of qemu bootable image and test runner
+#
+
+GNUMACH_ARGS = console=com0
+QEMU_OPTS = -m 2048 -nographic -no-reboot -boot d
+QEMU_GDB_PORT ?= 1234
+
+if HOST_ix86
+QEMU_BIN = qemu-system-i386
+QEMU_OPTS += -cpu pentium3-v1
+endif
+if HOST_x86_64
+QEMU_BIN = qemu-system-x86_64
+QEMU_OPTS += -cpu core2duo-v1
+endif
+
+tests/test-%.iso: tests/module-% gnumach $(srcdir)/tests/grub.cfg.single.template
+ rm -rf $(builddir)/tests/isofiles
+ mkdir -p $(builddir)/tests/isofiles/boot/grub/
+ < $(srcdir)/tests/grub.cfg.single.template \
+ sed -e "s|BOOTMODULE|$(notdir $<)|g" \
+ -e "s/GNUMACHARGS/$(GNUMACH_ARGS)/g" \
+ -e "s/TEST_START_MARKER/$(TEST_START_MARKER)/g" \
+ >$(builddir)/tests/isofiles/boot/grub/grub.cfg
+ cp gnumach $< $(builddir)/tests/isofiles/boot/
+ grub-mkrescue -o $@ $(builddir)/tests/isofiles
+
+tests/test-%: tests/test-%.iso $(srcdir)/tests/run-qemu.sh.template
+ < $(srcdir)/tests/run-qemu.sh.template \
+ sed -e "s|TESTNAME|$(subst tests/test-,,$@)|g" \
+ -e "s/QEMU_OPTS/$(QEMU_OPTS)/g" \
+ -e "s/QEMU_BIN/$(QEMU_BIN)/g" \
+ -e "s/TEST_START_MARKER/$(TEST_START_MARKER)/g" \
+ -e "s/TEST_SUCCESS_MARKER/$(TEST_SUCCESS_MARKER)/g" \
+ -e "s/TEST_FAILURE_MARKER/$(TEST_FAILURE_MARKER)/g" \
+ >$@
+ chmod +x $@
+
+clean-test-%:
+ rm -f tests/test-$*.iso tests/module-$* tests/test-$**
+
+
+USER_TESTS := \
+ tests/test-hello \
+ tests/test-mach_host \
+ tests/test-gsync \
+ tests/test-mach_port \
+ tests/test-vm \
+ tests/test-syscalls \
+ tests/test-machmsg \
+ tests/test-task \
+ tests/test-threads
+
+USER_TESTS_CLEAN = $(subst tests/,clean-,$(USER_TESTS))
+
+#
+# helpers for interactive test run and debug
+#
+
+run-%: tests/test-%
+ $^
+
+# don't reuse the launcher script as the timeout would kill the debug session
+debug-%: tests/test-%.iso
+ $(QEMU_BIN) $(QEMU_OPTS) -cdrom $< -gdb tcp::$(QEMU_GDB_PORT) -S \
+ | sed -n "/$(TEST_START_MARKER)/"',$$p'
diff --git a/util/atoi.c b/util/atoi.c
new file mode 100644
index 0000000..f8c4a70
--- /dev/null
+++ b/util/atoi.c
@@ -0,0 +1,106 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Olivetti Mach Console driver v0.0
+ * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989
+ * All rights reserved.
+ *
+ */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <util/atoi.h>
+
+/*
+ * mach_atoi:
+ *
+ * This function converts an ascii string into an integer, and returns
+ * MACH_ATOI_DEFAULT if no integer was found. Note that this is why we
+ * don't use the regular atio(), because ZERO is ZERO and not the
+ * MACH_ATOI_DEFAULT in all cases.
+ *
+ * input : string
+ * output : a number or possibly MACH_ATOI_DEFAULT, and the count of
+ * characters consumed by the conversion
+ *
+ */
+int
+mach_atoi(const u_char *cp, int *nump)
+{
+ int number;
+ const u_char *original;
+
+ original = cp;
+ for (number = 0; ('0' <= *cp) && (*cp <= '9'); cp++)
+ number = (number * 10) + (*cp - '0');
+ if (original == cp)
+ *nump = MACH_ATOI_DEFAULT;
+ else
+ *nump = number;
+ return(cp - original);
+}
diff --git a/util/atoi.h b/util/atoi.h
new file mode 100644
index 0000000..47adb42
--- /dev/null
+++ b/util/atoi.h
@@ -0,0 +1,67 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: atoi.h
+ Description: definitions for mach_atoi
+ Authors: Eugene Kuerner, Adrienne Jardetzky, Mike Kupfer
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _UTIL_ATOI_H_
+#define _UTIL_ATOI_H_
+
+#include <sys/types.h>
+
+#define MACH_ATOI_DEFAULT -1
+extern int mach_atoi (const u_char *, int *);
+
+#endif /* _UTIL_ATOI_H_ */
diff --git a/util/byteorder.c b/util/byteorder.c
new file mode 100644
index 0000000..0629b31
--- /dev/null
+++ b/util/byteorder.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2022 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Mach.
+ *
+ * GNU Mach is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "util/byteorder.h"
+
+uint16_t ntohs(uint16_t netshort) {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return __builtin_bswap16(netshort);
+#else
+ return netshort;
+#endif
+}
+
+uint32_t ntohl(uint32_t netlong) {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return __builtin_bswap32(netlong);
+#else
+ return netlong;
+#endif
+}
+
+uint16_t htons(uint16_t hostshort) {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return __builtin_bswap16(hostshort);
+#else
+ return hostshort;
+#endif
+}
+
+uint32_t htonl(uint32_t hostlong) {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return __builtin_bswap32(hostlong);
+#else
+ return hostlong;
+#endif
+}
diff --git a/util/byteorder.h b/util/byteorder.h
new file mode 100644
index 0000000..fbec39e
--- /dev/null
+++ b/util/byteorder.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2022 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Mach.
+ *
+ * GNU Mach is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _UTIL_BYTEORDER_H_
+#define _UTIL_BYTEORDER_H_
+
+#include <stdint.h>
+
+uint16_t ntohs(uint16_t netshort);
+uint32_t ntohl(uint32_t netlong);
+
+uint16_t htons(uint16_t hostshort);
+uint32_t htonl(uint32_t hostlong);
+
+#endif /* _UTIL_BYTEORDER_H_ */
diff --git a/version.c.in b/version.c.in
new file mode 100644
index 0000000..d894d7f
--- /dev/null
+++ b/version.c.in
@@ -0,0 +1,2 @@
+/* @configure_input@ */
+const char version[] = "@PACKAGE_NAME@ @PACKAGE_VERSION@";
diff --git a/version.m4 b/version.m4
new file mode 100644
index 0000000..d6696d6
--- /dev/null
+++ b/version.m4
@@ -0,0 +1,4 @@
+m4_define([AC_PACKAGE_NAME],[GNU Mach])
+m4_define([AC_PACKAGE_VERSION],[1.8])
+m4_define([AC_PACKAGE_BUGREPORT],[bug-hurd@gnu.org])
+m4_define([AC_PACKAGE_TARNAME],[gnumach])
diff --git a/vm/memory_object.c b/vm/memory_object.c
new file mode 100644
index 0000000..1ea5956
--- /dev/null
+++ b/vm/memory_object.c
@@ -0,0 +1,1090 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/memory_object.c
+ * Author: Michael Wayne Young
+ *
+ * External memory management interface control functions.
+ */
+
+/*
+ * Interface dependencies:
+ */
+
+#include <mach/std_types.h> /* For pointer_t */
+#include <mach/mach_types.h>
+
+#include <mach/kern_return.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <mach/memory_object.h>
+#include <mach/boolean.h>
+#include <mach/vm_prot.h>
+#include <mach/message.h>
+
+#include <vm/memory_object_user.user.h>
+#include <vm/memory_object_default.user.h>
+
+/*
+ * Implementation dependencies:
+ */
+#include <vm/memory_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <vm/pmap.h> /* For copy_to_phys, pmap_clear_modify */
+#include <kern/debug.h> /* For panic() */
+#include <kern/thread.h> /* For current_thread() */
+#include <kern/host.h>
+#include <kern/mach.server.h> /* For rpc prototypes */
+#include <vm/vm_kern.h> /* For kernel_map, vm_move */
+#include <vm/vm_map.h> /* For vm_map_pageable */
+#include <ipc/ipc_port.h>
+
+#if MACH_PAGEMAP
+#include <vm/vm_external.h>
+#endif /* MACH_PAGEMAP */
+
+typedef int memory_object_lock_result_t; /* moved from below */
+
+
+ipc_port_t memory_manager_default = IP_NULL;
+def_simple_lock_data(static,memory_manager_default_lock)
+
+/*
+ * Important note:
+ * All of these routines gain a reference to the
+ * object (first argument) as part of the automatic
+ * argument conversion. Explicit deallocation is necessary.
+ */
+
+kern_return_t memory_object_data_supply(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t vm_data_copy,
+ unsigned int data_cnt,
+ vm_prot_t lock_value,
+ boolean_t precious,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
+{
+ kern_return_t result = KERN_SUCCESS;
+ vm_offset_t error_offset = 0;
+ vm_page_t m;
+ vm_page_t data_m;
+ vm_size_t original_length;
+ vm_offset_t original_offset;
+ vm_page_t *page_list;
+ boolean_t was_absent;
+ vm_map_copy_t data_copy = (vm_map_copy_t)vm_data_copy;
+ vm_map_copy_t orig_copy = data_copy;
+
+ /*
+ * Look for bogus arguments
+ */
+
+ if (object == VM_OBJECT_NULL) {
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ if (lock_value & ~VM_PROT_ALL) {
+ vm_object_deallocate(object);
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ if ((data_cnt % PAGE_SIZE) != 0) {
+ vm_object_deallocate(object);
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ /*
+ * Adjust the offset from the memory object to the offset
+ * within the vm_object.
+ */
+
+ original_length = data_cnt;
+ original_offset = offset;
+
+ assert(data_copy->type == VM_MAP_COPY_PAGE_LIST);
+ page_list = &data_copy->cpy_page_list[0];
+
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ offset -= object->paging_offset;
+
+ /*
+ * Loop over copy stealing pages for pagein.
+ */
+
+ for (; data_cnt > 0 ; data_cnt -= PAGE_SIZE, offset += PAGE_SIZE) {
+
+ assert(data_copy->cpy_npages > 0);
+ data_m = *page_list;
+
+ if (data_m == VM_PAGE_NULL || data_m->tabled ||
+ data_m->error || data_m->absent || data_m->fictitious) {
+
+ panic("Data_supply: bad page");
+ }
+
+ /*
+ * Look up target page and check its state.
+ */
+
+retry_lookup:
+ m = vm_page_lookup(object,offset);
+ if (m == VM_PAGE_NULL) {
+ was_absent = FALSE;
+ }
+ else {
+ if (m->absent && m->busy) {
+
+ /*
+ * Page was requested. Free the busy
+ * page waiting for it. Insertion
+ * of new page happens below.
+ */
+
+ VM_PAGE_FREE(m);
+ was_absent = TRUE;
+ }
+ else {
+
+ /*
+ * Have to wait for page that is busy and
+ * not absent. This is probably going to
+ * be an error, but go back and check.
+ */
+ if (m->busy) {
+ PAGE_ASSERT_WAIT(m, FALSE);
+ vm_object_unlock(object);
+ thread_block((void (*)()) 0);
+ vm_object_lock(object);
+ goto retry_lookup;
+ }
+
+ /*
+ * Page already present; error.
+ * This is an error if data is precious.
+ */
+ result = KERN_MEMORY_PRESENT;
+ error_offset = offset + object->paging_offset;
+
+ break;
+ }
+ }
+
+ /*
+ * Ok to pagein page. Target object now has no page
+ * at offset. Set the page parameters, then drop
+ * in new page and set up pageout state. Object is
+ * still locked here.
+ *
+ * Must clear busy bit in page before inserting it.
+ * Ok to skip wakeup logic because nobody else
+ * can possibly know about this page.
+ */
+
+ data_m->busy = FALSE;
+ data_m->dirty = FALSE;
+ pmap_clear_modify(data_m->phys_addr);
+
+ data_m->page_lock = lock_value;
+ data_m->unlock_request = VM_PROT_NONE;
+ data_m->precious = precious;
+
+ vm_page_lock_queues();
+ vm_page_insert(data_m, object, offset);
+
+ if (was_absent)
+ vm_page_activate(data_m);
+ else
+ vm_page_deactivate(data_m);
+
+ vm_page_unlock_queues();
+
+ /*
+ * Null out this page list entry, and advance to next
+ * page.
+ */
+
+ *page_list++ = VM_PAGE_NULL;
+
+ if (--(data_copy->cpy_npages) == 0 &&
+ vm_map_copy_has_cont(data_copy)) {
+ vm_map_copy_t new_copy;
+
+ vm_object_unlock(object);
+
+ vm_map_copy_invoke_cont(data_copy, &new_copy, &result);
+
+ if (result == KERN_SUCCESS) {
+
+ /*
+ * Consume on success requires that
+ * we keep the original vm_map_copy
+ * around in case something fails.
+ * Free the old copy if it's not the original
+ */
+ if (data_copy != orig_copy) {
+ vm_map_copy_discard(data_copy);
+ }
+
+ if ((data_copy = new_copy) != VM_MAP_COPY_NULL)
+ page_list = &data_copy->cpy_page_list[0];
+
+ vm_object_lock(object);
+ }
+ else {
+ vm_object_lock(object);
+ error_offset = offset + object->paging_offset +
+ PAGE_SIZE;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Send reply if one was requested.
+ */
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ if (vm_map_copy_has_cont(data_copy))
+ vm_map_copy_abort_cont(data_copy);
+
+ if (IP_VALID(reply_to)) {
+ memory_object_supply_completed(
+ reply_to, reply_to_type,
+ object->pager_request,
+ original_offset,
+ original_length,
+ result,
+ error_offset);
+ }
+
+ vm_object_deallocate(object);
+
+ /*
+ * Consume on success: The final data copy must be
+ * be discarded if it is not the original. The original
+ * gets discarded only if this routine succeeds.
+ */
+ if (data_copy != orig_copy)
+ vm_map_copy_discard(data_copy);
+ if (result == KERN_SUCCESS)
+ vm_map_copy_discard(orig_copy);
+
+
+ return(result);
+}
+
+kern_return_t memory_object_data_error(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ kern_return_t error_value)
+{
+ if (object == VM_OBJECT_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (size != round_page(size))
+ return(KERN_INVALID_ARGUMENT);
+
+ vm_object_lock(object);
+ offset -= object->paging_offset;
+
+ while (size != 0) {
+ vm_page_t m;
+
+ m = vm_page_lookup(object, offset);
+ if ((m != VM_PAGE_NULL) && m->busy && m->absent) {
+ m->error = TRUE;
+ m->absent = FALSE;
+ vm_object_absent_release(object);
+
+ PAGE_WAKEUP_DONE(m);
+
+ vm_page_lock_queues();
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+ }
+
+ size -= PAGE_SIZE;
+ offset += PAGE_SIZE;
+ }
+ vm_object_unlock(object);
+
+ vm_object_deallocate(object);
+ return(KERN_SUCCESS);
+}
+
+kern_return_t memory_object_data_unavailable(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size)
+{
+#if MACH_PAGEMAP
+ vm_external_t existence_info = VM_EXTERNAL_NULL;
+#endif /* MACH_PAGEMAP */
+
+ if (object == VM_OBJECT_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (size != round_page(size))
+ return(KERN_INVALID_ARGUMENT);
+
+#if MACH_PAGEMAP
+ if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE) &&
+ (object->existence_info == VM_EXTERNAL_NULL)) {
+ existence_info = vm_external_create(VM_EXTERNAL_SMALL_SIZE);
+ }
+#endif /* MACH_PAGEMAP */
+
+ vm_object_lock(object);
+#if MACH_PAGEMAP
+ if (existence_info != VM_EXTERNAL_NULL) {
+ object->existence_info = existence_info;
+ }
+ if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE)) {
+ vm_object_unlock(object);
+ vm_object_deallocate(object);
+ return(KERN_SUCCESS);
+ }
+#endif /* MACH_PAGEMAP */
+ offset -= object->paging_offset;
+
+ while (size != 0) {
+ vm_page_t m;
+
+ /*
+ * We're looking for pages that are both busy and
+ * absent (waiting to be filled), converting them
+ * to just absent.
+ *
+ * Pages that are just busy can be ignored entirely.
+ */
+
+ m = vm_page_lookup(object, offset);
+ if ((m != VM_PAGE_NULL) && m->busy && m->absent) {
+ PAGE_WAKEUP_DONE(m);
+
+ vm_page_lock_queues();
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+ }
+ size -= PAGE_SIZE;
+ offset += PAGE_SIZE;
+ }
+
+ vm_object_unlock(object);
+
+ vm_object_deallocate(object);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: memory_object_lock_page
+ *
+ * Description:
+ * Perform the appropriate lock operations on the
+ * given page. See the description of
+ * "memory_object_lock_request" for the meanings
+ * of the arguments.
+ *
+ * Returns an indication that the operation
+ * completed, blocked, or that the page must
+ * be cleaned.
+ */
+
+#define MEMORY_OBJECT_LOCK_RESULT_DONE 0
+#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
+#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
+#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
+
+static memory_object_lock_result_t memory_object_lock_page(
+ vm_page_t m,
+ memory_object_return_t should_return,
+ boolean_t should_flush,
+ vm_prot_t prot)
+{
+ /*
+ * Don't worry about pages for which the kernel
+ * does not have any data.
+ */
+
+ if (m->absent)
+ return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+
+ /*
+ * If we cannot change access to the page,
+ * either because a mapping is in progress
+ * (busy page) or because a mapping has been
+ * wired, then give up.
+ */
+
+ if (m->busy)
+ return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
+
+ assert(!m->fictitious);
+
+ if (m->wire_count != 0) {
+ /*
+ * If no change would take place
+ * anyway, return successfully.
+ *
+ * No change means:
+ * Not flushing AND
+ * No change to page lock [2 checks] AND
+ * Don't need to send page to manager
+ *
+ * Don't need to send page to manager means:
+ * No clean or return request OR (
+ * Page is not dirty [2 checks] AND (
+ * Page is not precious OR
+ * No request to return precious pages ))
+ *
+ * Now isn't that straightforward and obvious ?? ;-)
+ *
+ * XXX This doesn't handle sending a copy of a wired
+ * XXX page to the pager, but that will require some
+ * XXX significant surgery.
+ */
+
+ if (!should_flush &&
+ ((m->page_lock == prot) || (prot == VM_PROT_NO_CHANGE)) &&
+ ((should_return == MEMORY_OBJECT_RETURN_NONE) ||
+ (!m->dirty && !pmap_is_modified(m->phys_addr) &&
+ (!m->precious ||
+ should_return != MEMORY_OBJECT_RETURN_ALL)))) {
+ /*
+ * Restart page unlock requests,
+ * even though no change took place.
+ * [Memory managers may be expecting
+ * to see new requests.]
+ */
+ m->unlock_request = VM_PROT_NONE;
+ PAGE_WAKEUP(m);
+
+ return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+ }
+
+ return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
+ }
+
+ /*
+ * If the page is to be flushed, allow
+ * that to be done as part of the protection.
+ */
+
+ if (should_flush)
+ prot = VM_PROT_ALL;
+
+ /*
+ * Set the page lock.
+ *
+ * If we are decreasing permission, do it now;
+ * let the fault handler take care of increases
+ * (pmap_page_protect may not increase protection).
+ */
+
+ if (prot != VM_PROT_NO_CHANGE) {
+ if ((m->page_lock ^ prot) & prot) {
+ pmap_page_protect(m->phys_addr, VM_PROT_ALL & ~prot);
+ }
+ m->page_lock = prot;
+
+ /*
+ * Restart any past unlock requests, even if no
+ * change resulted. If the manager explicitly
+ * requested no protection change, then it is assumed
+ * to be remembering past requests.
+ */
+
+ m->unlock_request = VM_PROT_NONE;
+ PAGE_WAKEUP(m);
+ }
+
+ /*
+ * Handle cleaning.
+ */
+
+ if (should_return != MEMORY_OBJECT_RETURN_NONE) {
+ /*
+ * Check whether the page is dirty. If
+ * write permission has not been removed,
+ * this may have unpredictable results.
+ */
+
+ if (!m->dirty)
+ m->dirty = pmap_is_modified(m->phys_addr);
+
+ if (m->dirty || (m->precious &&
+ should_return == MEMORY_OBJECT_RETURN_ALL)) {
+ /*
+ * If we weren't planning
+ * to flush the page anyway,
+ * we may need to remove the
+ * page from the pageout
+ * system and from physical
+ * maps now.
+ */
+
+ vm_page_lock_queues();
+ VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_unlock_queues();
+
+ if (!should_flush)
+ pmap_page_protect(m->phys_addr,
+ VM_PROT_NONE);
+
+ /*
+ * Cleaning a page will cause
+ * it to be flushed.
+ */
+
+ if (m->dirty)
+ return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN);
+ else
+ return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
+ }
+ }
+
+ /*
+ * Handle flushing
+ */
+
+ if (should_flush) {
+ VM_PAGE_FREE(m);
+ } else {
+ extern boolean_t vm_page_deactivate_hint;
+
+ /*
+ * XXX Make clean but not flush a paging hint,
+ * and deactivate the pages. This is a hack
+ * because it overloads flush/clean with
+ * implementation-dependent meaning. This only
+ * happens to pages that are already clean.
+ */
+
+ if (vm_page_deactivate_hint &&
+ (should_return != MEMORY_OBJECT_RETURN_NONE)) {
+ vm_page_lock_queues();
+ vm_page_deactivate(m);
+ vm_page_unlock_queues();
+ }
+ }
+
+ return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+}
+
+/*
+ * Routine: memory_object_lock_request [user interface]
+ *
+ * Description:
+ * Control use of the data associated with the given
+ * memory object. For each page in the given range,
+ * perform the following operations, in order:
+ * 1) restrict access to the page (disallow
+ * forms specified by "prot");
+ * 2) return data to the manager (if "should_return"
+ * is RETURN_DIRTY and the page is dirty, or
+ * "should_return" is RETURN_ALL and the page
+ * is either dirty or precious); and,
+ * 3) flush the cached copy (if "should_flush"
+ * is asserted).
+ * The set of pages is defined by a starting offset
+ * ("offset") and size ("size"). Only pages with the
+ * same page alignment as the starting offset are
+ * considered.
+ *
+ * A single acknowledgement is sent (to the "reply_to"
+ * port) when these actions are complete. If successful,
+ * the naked send right for reply_to is consumed.
+ */
+
+kern_return_t
+memory_object_lock_request(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ memory_object_return_t should_return,
+ boolean_t should_flush,
+ vm_prot_t prot,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
+{
+ vm_page_t m;
+ vm_offset_t original_offset = offset;
+ vm_size_t original_size = size;
+ vm_offset_t paging_offset = 0;
+ vm_object_t new_object = VM_OBJECT_NULL;
+ vm_offset_t new_offset = 0;
+ vm_offset_t last_offset = offset;
+ int page_lock_result;
+ int pageout_action = 0; /* '=0' to quiet lint */
+
+#define DATA_WRITE_MAX 32
+ vm_page_t holding_pages[DATA_WRITE_MAX];
+
+ /*
+ * Check for bogus arguments.
+ */
+ if (object == VM_OBJECT_NULL ||
+ ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE))
+ return (KERN_INVALID_ARGUMENT);
+
+ size = round_page(size);
+
+ /*
+ * Lock the object, and acquire a paging reference to
+ * prevent the memory_object and control ports from
+ * being destroyed.
+ */
+
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ offset -= object->paging_offset;
+
+ /*
+ * To avoid blocking while scanning for pages, save
+ * dirty pages to be cleaned all at once.
+ *
+ * XXXO A similar strategy could be used to limit the
+ * number of times that a scan must be restarted for
+ * other reasons. Those pages that would require blocking
+ * could be temporarily collected in another list, or
+ * their offsets could be recorded in a small array.
+ */
+
+ /*
+ * XXX NOTE: May want to consider converting this to a page list
+ * XXX vm_map_copy interface. Need to understand object
+ * XXX coalescing implications before doing so.
+ */
+
+#define PAGEOUT_PAGES \
+MACRO_BEGIN \
+ vm_map_copy_t copy; \
+ unsigned i; \
+ vm_page_t hp; \
+ \
+ vm_object_unlock(object); \
+ \
+ (void) vm_map_copyin_object(new_object, 0, new_offset, &copy); \
+ \
+ (void) memory_object_data_return( \
+ object->pager, \
+ object->pager_request, \
+ paging_offset, \
+ (pointer_t) copy, \
+ new_offset, \
+ (pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
+ !should_flush); \
+ \
+ vm_object_lock(object); \
+ \
+ for (i = 0; i < atop(new_offset); i++) { \
+ hp = holding_pages[i]; \
+ if (hp != VM_PAGE_NULL) \
+ VM_PAGE_FREE(hp); \
+ } \
+ \
+ new_object = VM_OBJECT_NULL; \
+MACRO_END
+
+ for (;
+ size != 0;
+ size -= PAGE_SIZE, offset += PAGE_SIZE)
+ {
+ /*
+ * Limit the number of pages to be cleaned at once.
+ */
+ if (new_object != VM_OBJECT_NULL &&
+ new_offset >= PAGE_SIZE * DATA_WRITE_MAX)
+ {
+ PAGEOUT_PAGES;
+ }
+
+ while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
+ switch ((page_lock_result = memory_object_lock_page(m,
+ should_return,
+ should_flush,
+ prot)))
+ {
+ case MEMORY_OBJECT_LOCK_RESULT_DONE:
+ /*
+ * End of a cluster of dirty pages.
+ */
+ if (new_object != VM_OBJECT_NULL) {
+ PAGEOUT_PAGES;
+ continue;
+ }
+ break;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
+ /*
+ * Since it is necessary to block,
+ * clean any dirty pages now.
+ */
+ if (new_object != VM_OBJECT_NULL) {
+ PAGEOUT_PAGES;
+ continue;
+ }
+
+ PAGE_ASSERT_WAIT(m, FALSE);
+ vm_object_unlock(object);
+ thread_block((void (*)()) 0);
+ vm_object_lock(object);
+ continue;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
+ /*
+ * The clean and return cases are similar.
+ *
+ * Mark the page busy since we unlock the
+ * object below.
+ */
+ m->busy = TRUE;
+
+ /*
+ * if this would form a discontiguous block,
+ * clean the old pages and start anew.
+ *
+ * NOTE: The first time through here, new_object
+ * is null, hiding the fact that pageout_action
+ * is not initialized.
+ */
+ if (new_object != VM_OBJECT_NULL &&
+ (last_offset != offset ||
+ pageout_action != page_lock_result)) {
+ PAGEOUT_PAGES;
+ }
+
+ vm_object_unlock(object);
+
+ /*
+ * If we have not already allocated an object
+ * for a range of pages to be written, do so
+ * now.
+ */
+ if (new_object == VM_OBJECT_NULL) {
+ new_object = vm_object_allocate(original_size);
+ new_offset = 0;
+ paging_offset = m->offset +
+ object->paging_offset;
+ pageout_action = page_lock_result;
+ }
+
+ /*
+ * Move or copy the dirty page into the
+ * new object.
+ */
+ m = vm_pageout_setup(m,
+ m->offset + object->paging_offset,
+ new_object,
+ new_offset,
+ should_flush);
+
+ /*
+ * Save the holding page if there is one.
+ */
+ holding_pages[atop(new_offset)] = m;
+ new_offset += PAGE_SIZE;
+ last_offset = offset + PAGE_SIZE;
+
+ vm_object_lock(object);
+ break;
+ }
+ break;
+ }
+ }
+
+ /*
+ * We have completed the scan for applicable pages.
+ * Clean any pages that have been saved.
+ */
+ if (new_object != VM_OBJECT_NULL) {
+ PAGEOUT_PAGES;
+ }
+
+ if (IP_VALID(reply_to)) {
+ vm_object_unlock(object);
+
+ /* consumes our naked send-once/send right for reply_to */
+ (void) memory_object_lock_completed(reply_to, reply_to_type,
+ object->pager_request, original_offset, original_size);
+
+ vm_object_lock(object);
+ }
+
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ vm_object_deallocate(object);
+
+ return (KERN_SUCCESS);
+}
+
+static kern_return_t
+memory_object_set_attributes_common(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
+{
+ if (object == VM_OBJECT_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ /*
+ * Verify the attributes of importance
+ */
+
+ switch(copy_strategy) {
+ case MEMORY_OBJECT_COPY_NONE:
+ case MEMORY_OBJECT_COPY_CALL:
+ case MEMORY_OBJECT_COPY_DELAY:
+ case MEMORY_OBJECT_COPY_TEMPORARY:
+ break;
+ default:
+ vm_object_deallocate(object);
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ if (may_cache)
+ may_cache = TRUE;
+
+ vm_object_lock(object);
+
+ /*
+ * Wake up anyone waiting for the ready attribute
+ * to become asserted.
+ */
+
+ if (!object->pager_ready) {
+ vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+ }
+
+ /*
+ * Copy the attributes
+ */
+
+ object->can_persist = may_cache;
+ object->pager_ready = TRUE;
+ if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
+ object->temporary = TRUE;
+ } else {
+ object->copy_strategy = copy_strategy;
+ }
+
+ vm_object_unlock(object);
+
+ vm_object_deallocate(object);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * XXX rpd claims that reply_to could be obviated in favor of a client
+ * XXX stub that made change_attributes an RPC. Need investigation.
+ */
+
+kern_return_t memory_object_change_attributes(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
+{
+ kern_return_t result;
+
+ /*
+ * Do the work and throw away our object reference. It
+ * is important that the object reference be deallocated
+ * BEFORE sending the reply. The whole point of the reply
+ * is that it shows up after the terminate message that
+ * may be generated by setting the object uncacheable.
+ *
+ * XXX may_cache may become a tri-valued variable to handle
+ * XXX uncache if not in use.
+ */
+ result = memory_object_set_attributes_common(object, may_cache,
+ copy_strategy);
+
+ if (IP_VALID(reply_to)) {
+
+ /* consumes our naked send-once/send right for reply_to */
+ (void) memory_object_change_completed(reply_to, reply_to_type,
+ may_cache, copy_strategy);
+
+ }
+
+ return(result);
+}
+
+kern_return_t memory_object_ready(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
+{
+ return memory_object_set_attributes_common(object, may_cache,
+ copy_strategy);
+}
+
+kern_return_t memory_object_get_attributes(
+ vm_object_t object,
+ boolean_t *object_ready,
+ boolean_t *may_cache,
+ memory_object_copy_strategy_t *copy_strategy)
+{
+ if (object == VM_OBJECT_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ vm_object_lock(object);
+ *may_cache = object->can_persist;
+ *object_ready = object->pager_ready;
+ *copy_strategy = object->copy_strategy;
+ vm_object_unlock(object);
+
+ vm_object_deallocate(object);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * If successful, consumes the supplied naked send right.
+ */
+kern_return_t vm_set_default_memory_manager(
+ const host_t host,
+ ipc_port_t *default_manager)
+{
+ ipc_port_t current_manager;
+ ipc_port_t new_manager;
+ ipc_port_t returned_manager;
+
+ if (host == HOST_NULL)
+ return(KERN_INVALID_HOST);
+
+ new_manager = *default_manager;
+ simple_lock(&memory_manager_default_lock);
+ current_manager = memory_manager_default;
+
+ if (new_manager == IP_NULL) {
+ /*
+ * Retrieve the current value.
+ */
+
+ returned_manager = ipc_port_copy_send(current_manager);
+ } else {
+ /*
+ * Retrieve the current value,
+ * and replace it with the supplied value.
+ * We consume the supplied naked send right.
+ */
+
+ returned_manager = current_manager;
+ memory_manager_default = new_manager;
+
+ /*
+ * In case anyone's been waiting for a memory
+ * manager to be established, wake them up.
+ */
+
+ thread_wakeup((event_t) &memory_manager_default);
+ }
+
+ simple_unlock(&memory_manager_default_lock);
+
+ *default_manager = returned_manager;
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: memory_manager_default_reference
+ * Purpose:
+ * Returns a naked send right for the default
+ * memory manager. The returned right is always
+ * valid (not IP_NULL or IP_DEAD).
+ */
+
+ipc_port_t memory_manager_default_reference(void)
+{
+ ipc_port_t current_manager;
+
+ simple_lock(&memory_manager_default_lock);
+
+ while (current_manager = ipc_port_copy_send(memory_manager_default),
+ !IP_VALID(current_manager)) {
+ thread_sleep((event_t) &memory_manager_default,
+ simple_lock_addr(memory_manager_default_lock),
+ FALSE);
+ simple_lock(&memory_manager_default_lock);
+ }
+
+ simple_unlock(&memory_manager_default_lock);
+
+ return current_manager;
+}
+
+/*
+ * Routine: memory_manager_default_port
+ * Purpose:
+ * Returns true if the receiver for the port
+ * is the default memory manager.
+ *
+ * This is a hack to let ds_read_done
+ * know when it should keep memory wired.
+ */
+
+boolean_t memory_manager_default_port(const ipc_port_t port)
+{
+ ipc_port_t current;
+ boolean_t result;
+
+ simple_lock(&memory_manager_default_lock);
+ current = memory_manager_default;
+ if (IP_VALID(current)) {
+ /*
+ * There is no point in bothering to lock
+ * both ports, which would be painful to do.
+ * If the receive rights are moving around,
+ * we might be inaccurate.
+ */
+
+ result = port->ip_receiver == current->ip_receiver;
+ } else
+ result = FALSE;
+ simple_unlock(&memory_manager_default_lock);
+
+ return result;
+}
+
+void memory_manager_default_init(void)
+{
+ memory_manager_default = IP_NULL;
+ simple_lock_init(&memory_manager_default_lock);
+}
diff --git a/vm/memory_object.h b/vm/memory_object.h
new file mode 100644
index 0000000..ee0c963
--- /dev/null
+++ b/vm/memory_object.h
@@ -0,0 +1,39 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef _VM_MEMORY_OBJECT_H_
+#define _VM_MEMORY_OBJECT_H_
+
+#include <mach/boolean.h>
+#include <ipc/ipc_types.h>
+
+extern ipc_port_t memory_manager_default_reference(void);
+extern boolean_t memory_manager_default_port(ipc_port_t);
+extern void memory_manager_default_init(void);
+
+extern ipc_port_t memory_manager_default;
+
+#endif /* _VM_MEMORY_OBJECT_H_ */
diff --git a/vm/memory_object_default.cli b/vm/memory_object_default.cli
new file mode 100644
index 0000000..998a986
--- /dev/null
+++ b/vm/memory_object_default.cli
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a client presentation file. */
+
+#define KERNEL_USER 1
+#define SEQNOS 1
+
+#include <mach/memory_object_default.defs>
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c
new file mode 100644
index 0000000..5724349
--- /dev/null
+++ b/vm/memory_object_proxy.c
@@ -0,0 +1,228 @@
+/* memory_object_proxy.c - Proxy memory objects for Mach.
+ Copyright (C) 2005 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+/* A proxy memory object is a kernel port that can be used like a real
+ memory object in a vm_map call, except that the current and maximum
+ protection are restricted to the proxy object's maximum protection
+ at the time the mapping is established. The kernel port will hold
+ a reference to the real memory object for the life time of the
+ proxy object.
+
+ Note that we don't need to do any reference counting on the proxy
+ object. Our caller will hold a reference to the proxy object when
+ looking it up, and is expected to acquire its own reference to the
+ real memory object if needed before releasing the reference to the
+ proxy object.
+
+ The user provided real memory object and the maximum protection are
+ not checked for validity. The maximum protection is only used as a
+ mask, and the memory object is validated at the time the mapping is
+ established. */
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <mach/notify.h>
+#include <mach/vm_prot.h>
+#include <kern/printf.h>
+#include <kern/slab.h>
+#include <kern/mach4.server.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <vm/memory_object_proxy.h>
+
+/* The cache which holds our proxy memory objects. */
+static struct kmem_cache memory_object_proxy_cache;
+
+struct memory_object_proxy
+{
+ struct ipc_port *port;
+
+ ipc_port_t object;
+ ipc_port_t notify;
+ vm_prot_t max_protection;
+ vm_offset_t start;
+ vm_offset_t len;
+};
+typedef struct memory_object_proxy *memory_object_proxy_t;
+
+
+void
+memory_object_proxy_init (void)
+{
+ kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy",
+ sizeof (struct memory_object_proxy), 0, NULL, 0);
+}
+
+/* Lookup a proxy memory object by its port. */
+static memory_object_proxy_t
+memory_object_proxy_port_lookup (ipc_port_t port)
+{
+ memory_object_proxy_t proxy;
+
+ if (!IP_VALID(port))
+ return 0;
+
+ ip_lock (port);
+ if (ip_active (port) && (ip_kotype (port) == IKOT_PAGER_PROXY))
+ proxy = (memory_object_proxy_t) port->ip_kobject;
+ else
+ proxy = 0;
+ ip_unlock (port);
+ return proxy;
+}
+
+
+/* Process a no-sender notification for the proxy memory object
+ port. */
+boolean_t
+memory_object_proxy_notify (mach_msg_header_t *msg)
+{
+ if (msg->msgh_id == MACH_NOTIFY_NO_SENDERS)
+ {
+ memory_object_proxy_t proxy;
+ mach_no_senders_notification_t *ns;
+
+ ns = (mach_no_senders_notification_t *) msg;
+
+ proxy = (memory_object_proxy_t)
+ ((ipc_port_t) ns->not_header.msgh_remote_port)->ip_kobject;
+ if (!proxy)
+ return FALSE;
+ if ((ipc_port_t) ns->not_header.msgh_remote_port != proxy->notify)
+ return FALSE;
+
+ ipc_port_release_send (proxy->object);
+
+ ipc_kobject_set (proxy->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (proxy->port);
+ ipc_kobject_set (proxy->notify, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (proxy->notify);
+
+ kmem_cache_free (&memory_object_proxy_cache, (vm_offset_t) proxy);
+
+ return TRUE;
+ }
+
+ printf ("memory_object_proxy_notify: strange notification %d\n",
+ msg->msgh_id);
+ return FALSE;
+}
+
+
+/* Create a new proxy memory object from [START;START+LEN) in the
+ given OBJECT at OFFSET in the new object with the maximum
+ protection MAX_PROTECTION and return it in *PORT. */
+kern_return_t
+memory_object_create_proxy (ipc_space_t space, vm_prot_t max_protection,
+ ipc_port_t *object, natural_t object_count,
+ rpc_vm_offset_t *offset, natural_t offset_count,
+ rpc_vm_offset_t *start, natural_t start_count,
+ rpc_vm_size_t *len, natural_t len_count,
+ ipc_port_t *port)
+{
+ memory_object_proxy_t proxy;
+ ipc_port_t notify;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (offset_count != object_count || start_count != object_count
+ || len_count != object_count)
+ return KERN_INVALID_ARGUMENT;
+
+ /* FIXME: Support more than one memory object. */
+ if (object_count != 1)
+ return KERN_INVALID_ARGUMENT;
+
+ if (!IP_VALID(object[0]))
+ return KERN_INVALID_NAME;
+
+ /* FIXME: Support a different offset from 0. */
+ if (offset[0] != 0)
+ return KERN_INVALID_ARGUMENT;
+
+ if (start[0] + len[0] < start[0])
+ return KERN_INVALID_ARGUMENT;
+
+ proxy = (memory_object_proxy_t) kmem_cache_alloc (&memory_object_proxy_cache);
+
+ /* Allocate port, keeping a reference for it. */
+ proxy->port = ipc_port_alloc_kernel ();
+ if (proxy->port == IP_NULL)
+ {
+ kmem_cache_free (&memory_object_proxy_cache, (vm_offset_t) proxy);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ /* Associate the port with the proxy memory object. */
+ ipc_kobject_set (proxy->port, (ipc_kobject_t) proxy, IKOT_PAGER_PROXY);
+
+ /* Request no-senders notifications on the port. */
+ proxy->notify = ipc_port_alloc_kernel ();
+ ipc_kobject_set (proxy->notify, (ipc_kobject_t) proxy, IKOT_PAGER_PROXY);
+ notify = ipc_port_make_sonce (proxy->notify);
+ ip_lock (proxy->port);
+ ipc_port_nsrequest (proxy->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+
+ /* Consumes the port right */
+ proxy->object = object[0];
+ proxy->max_protection = max_protection;
+ proxy->start = start[0];
+ proxy->len = len[0];
+
+ *port = ipc_port_make_send (proxy->port);
+ return KERN_SUCCESS;
+}
+
+/* Lookup the real memory object and maximum protection for the proxy
+ memory object port PORT, for which the caller holds a reference.
+ *OBJECT is only guaranteed to be valid as long as the caller holds
+ the reference to PORT (unless the caller acquires its own reference
+ to it). If PORT is not a proxy memory object, return
+ KERN_INVALID_ARGUMENT. */
+kern_return_t
+memory_object_proxy_lookup (ipc_port_t port, ipc_port_t *object,
+ vm_prot_t *max_protection, vm_offset_t *start,
+ vm_offset_t *len)
+{
+ memory_object_proxy_t proxy;
+
+ proxy = memory_object_proxy_port_lookup (port);
+ if (!proxy)
+ return KERN_INVALID_ARGUMENT;
+
+ *max_protection = proxy->max_protection;
+ *start = 0;
+ *len = (vm_offset_t) ~0;
+
+ do
+ {
+ *object = proxy->object;
+ if (proxy->len <= *start)
+ *len = 0;
+ else
+ *len = MIN(*len, proxy->len - *start);
+ *start += proxy->start;
+ }
+ while ((proxy = memory_object_proxy_port_lookup (proxy->object)));
+
+ return KERN_SUCCESS;
+}
diff --git a/vm/memory_object_proxy.h b/vm/memory_object_proxy.h
new file mode 100644
index 0000000..8b3f202
--- /dev/null
+++ b/vm/memory_object_proxy.h
@@ -0,0 +1,39 @@
+/* memory_object_proxy.h - Proxy memory objects for Mach.
+ Copyright (C) 2005, 2011 Free Software Foundation, Inc.
+ Written by Marcus Brinkmann.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#ifndef _VM_MEMORY_OBJECT_PROXY_H_
+#define _VM_MEMORY_OBJECT_PROXY_H_
+
+#include <ipc/ipc_types.h>
+#include <mach/boolean.h>
+#include <mach/machine/kern_return.h>
+#include <mach/machine/vm_types.h>
+#include <mach/message.h>
+#include <mach/vm_prot.h>
+
+extern void memory_object_proxy_init (void);
+extern boolean_t memory_object_proxy_notify (mach_msg_header_t *msg);
+extern kern_return_t memory_object_proxy_lookup (ipc_port_t port,
+ ipc_port_t *object,
+ vm_prot_t *max_protection,
+ vm_offset_t *start,
+ vm_offset_t *len);
+
+#endif /* _VM_MEMORY_OBJECT_PROXY_H_ */
diff --git a/vm/memory_object_user.cli b/vm/memory_object_user.cli
new file mode 100644
index 0000000..2bba41f
--- /dev/null
+++ b/vm/memory_object_user.cli
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a client presentation file. */
+
+#define KERNEL_USER 1
+#define SEQNOS 1
+
+#include <mach/memory_object.defs>
diff --git a/vm/pmap.h b/vm/pmap.h
new file mode 100644
index 0000000..aca9ada
--- /dev/null
+++ b/vm/pmap.h
@@ -0,0 +1,241 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/pmap.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Machine address mapping definitions -- machine-independent
+ * section. [For machine-dependent section, see "machine/pmap.h".]
+ */
+
+#ifndef _VM_PMAP_H_
+#define _VM_PMAP_H_
+
+#include <machine/pmap.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_prot.h>
+#include <mach/boolean.h>
+#include <kern/thread.h>
+
+/*
+ * The following is a description of the interface to the
+ * machine-dependent "physical map" data structure. The module
+ * must provide a "pmap_t" data type that represents the
+ * set of valid virtual-to-physical addresses for one user
+ * address space. [The kernel address space is represented
+ * by a distinguished "pmap_t".] The routines described manage
+ * this type, install and update virtual-to-physical mappings,
+ * and perform operations on physical addresses common to
+ * many address spaces.
+ */
+
+/*
+ * Routines used for initialization.
+ * There is traditionally also a pmap_bootstrap,
+ * used very early by machine-dependent code,
+ * but it is not part of the interface.
+ */
+
+/* During VM initialization, steal a chunk of memory. */
+extern vm_offset_t pmap_steal_memory(vm_size_t);
+/* Initialization, after kernel runs in virtual memory. */
+extern void pmap_init(void);
+
+#ifndef MACHINE_PAGES
+/*
+ * If machine/pmap.h defines MACHINE_PAGES, it must implement
+ * the above functions. The pmap module has complete control.
+ * Otherwise, it must implement
+ * pmap_virtual_space
+ * pmap_init
+ * and vm/vm_resident.c implements pmap_steal_memory using
+ * pmap_virtual_space and pmap_enter.
+ */
+
+/* During VM initialization, report virtual space available for the kernel. */
+extern void pmap_virtual_space(vm_offset_t *, vm_offset_t *);
+#endif /* MACHINE_PAGES */
+
+/*
+ * Routines to manage the physical map data structure.
+ */
+
+/* Create a pmap_t. */
+pmap_t pmap_create(vm_size_t size);
+
+/* Return the kernel's pmap_t. */
+#ifndef pmap_kernel
+extern pmap_t pmap_kernel(void);
+#endif /* pmap_kernel */
+
+/* Gain and release a reference. */
+extern void pmap_reference(pmap_t pmap);
+extern void pmap_destroy(pmap_t pmap);
+
+/* Enter a mapping */
+extern void pmap_enter(pmap_t pmap, vm_offset_t va, phys_addr_t pa,
+ vm_prot_t prot, boolean_t wired);
+
+
+/*
+ * Routines that operate on ranges of virtual addresses.
+ */
+
+/* Remove mappings. */
+void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
+
+/* Change protections. */
+void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot);
+
+/*
+ * Routines to set up hardware state for physical maps to be used.
+ */
+/* Prepare pmap_t to run on a given processor. */
+extern void pmap_activate(pmap_t, thread_t, int);
+/* Release pmap_t from use on processor. */
+extern void pmap_deactivate(pmap_t, thread_t, int);
+
+
+/*
+ * Routines that operate on physical addresses.
+ */
+
+/* Restrict access to page. */
+void pmap_page_protect(phys_addr_t pa, vm_prot_t prot);
+
+/*
+ * Routines to manage reference/modify bits based on
+ * physical addresses, simulating them if not provided
+ * by the hardware.
+ */
+
+/* Clear reference bit */
+void pmap_clear_reference(phys_addr_t pa);
+
+/* Return reference bit */
+#ifndef pmap_is_referenced
+boolean_t pmap_is_referenced(phys_addr_t pa);
+#endif /* pmap_is_referenced */
+
+/* Clear modify bit */
+void pmap_clear_modify(phys_addr_t pa);
+
+/* Return modify bit */
+boolean_t pmap_is_modified(phys_addr_t pa);
+
+/*
+ * Sundry required routines
+ */
+/* Return a virtual-to-physical mapping, if possible. */
+extern phys_addr_t pmap_extract(pmap_t, vm_offset_t);
+/* Perform garbage collection, if any. */
+extern void pmap_collect(pmap_t);
+
+/* Lookup an address. */
+int pmap_whatis(pmap_t, vm_offset_t);
+
+/* Specify pageability. */
+extern void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
+
+/*
+ * Optional routines
+ */
+#ifndef pmap_copy
+/* Copy range of mappings, if desired. */
+extern void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t,
+ vm_offset_t);
+#endif /* pmap_copy */
+#ifndef pmap_attribute
+/* Get/Set special memory attributes. */
+extern kern_return_t pmap_attribute(void);
+#endif /* pmap_attribute */
+
+/*
+ * Grab a physical page:
+ * the standard memory allocation mechanism
+ * during system initialization.
+ */
+extern vm_offset_t pmap_grab_page (void);
+
+/*
+ * Make the specified pages (by pmap, offset)
+ * pageable (or not) as requested.
+ */
+extern void pmap_pageable(
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end,
+ boolean_t pageable);
+
+/*
+ * Back-door routine for mapping kernel VM at initialization.
+ * Useful for mapping memory outside the range of direct mapped
+ * physical memory (i.e., devices).
+ */
+extern vm_offset_t pmap_map_bd(
+ vm_offset_t virt,
+ phys_addr_t start,
+ phys_addr_t end,
+ vm_prot_t prot);
+
+/*
+ * Routines defined as macros.
+ */
+#ifndef PMAP_ACTIVATE_USER
+#define PMAP_ACTIVATE_USER(pmap, thread, cpu) { \
+ if ((pmap) != kernel_pmap) \
+ PMAP_ACTIVATE(pmap, thread, cpu); \
+}
+#endif /* PMAP_ACTIVATE_USER */
+
+#ifndef PMAP_DEACTIVATE_USER
+#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
+ if ((pmap) != kernel_pmap) \
+ PMAP_DEACTIVATE(pmap, thread, cpu); \
+}
+#endif /* PMAP_DEACTIVATE_USER */
+
+#ifndef PMAP_ACTIVATE_KERNEL
+#define PMAP_ACTIVATE_KERNEL(cpu) \
+ PMAP_ACTIVATE(kernel_pmap, THREAD_NULL, cpu)
+#endif /* PMAP_ACTIVATE_KERNEL */
+
+#ifndef PMAP_DEACTIVATE_KERNEL
+#define PMAP_DEACTIVATE_KERNEL(cpu) \
+ PMAP_DEACTIVATE(kernel_pmap, THREAD_NULL, cpu)
+#endif /* PMAP_DEACTIVATE_KERNEL */
+
+/*
+ * Exported data structures
+ */
+
+extern pmap_t kernel_pmap; /* The kernel's map */
+
+#endif /* _VM_PMAP_H_ */
diff --git a/vm/vm_debug.c b/vm/vm_debug.c
new file mode 100644
index 0000000..b0dace8
--- /dev/null
+++ b/vm/vm_debug.c
@@ -0,0 +1,548 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_debug.c.
+ * Author: Rich Draves
+ * Date: March, 1990
+ *
+ * Exported kernel calls. See mach_debug/mach_debug.defs.
+ */
+
+#include <string.h>
+
+#include <kern/debug.h>
+#include <kern/thread.h>
+#include <mach/kern_return.h>
+#include <mach/machine/vm_types.h>
+#include <mach/memory_object.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_inherit.h>
+#include <mach/vm_param.h>
+#include <mach_debug/vm_info.h>
+#include <mach_debug/hash_info.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <kern/mach_debug.server.h>
+#include <kern/task.h>
+#include <kern/host.h>
+#include <kern/printf.h>
+#include <ipc/ipc_port.h>
+
+
+#if MACH_VM_DEBUG
+
+/*
+ * Routine: vm_object_real_name
+ * Purpose:
+ * Convert a VM object to a name port.
+ * Conditions:
+ * Takes object and port locks.
+ * Returns:
+ * A naked send right for the object's name port,
+ * or IP_NULL if the object or its name port is null.
+ */
+
+static ipc_port_t
+vm_object_real_name(vm_object_t object)
+{
+ ipc_port_t port = IP_NULL;
+
+ if (object != VM_OBJECT_NULL) {
+ vm_object_lock(object);
+ if (object->pager_name != IP_NULL)
+ port = ipc_port_make_send(object->pager_name);
+ vm_object_unlock(object);
+ }
+
+ return port;
+}
+
+/*
+ * Routine: mach_vm_region_info [kernel call]
+ * Purpose:
+ * Retrieve information about a VM region,
+ * including info about the object chain.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieve region/object info.
+ * KERN_INVALID_TASK The map is null.
+ * KERN_NO_SPACE There is no entry at/after the address.
+ */
+
+kern_return_t
+mach_vm_region_info(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_region_info_t *regionp,
+ ipc_port_t *portp)
+{
+ vm_map_t cmap; /* current map in traversal */
+ vm_map_t nmap; /* next map to look at */
+ vm_map_entry_t entry; /* entry in current map */
+ vm_object_t object;
+
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ /* find the entry containing (or following) the address */
+
+ vm_map_lock_read(map);
+ for (cmap = map;;) {
+ /* cmap is read-locked */
+
+ if (!vm_map_lookup_entry(cmap, address, &entry)) {
+ entry = entry->vme_next;
+ if (entry == vm_map_to_entry(cmap)) {
+ if (map == cmap) {
+ vm_map_unlock_read(cmap);
+ return KERN_NO_SPACE;
+ }
+
+ /* back out to top-level & skip this submap */
+
+ address = vm_map_max(cmap);
+ vm_map_unlock_read(cmap);
+ vm_map_lock_read(map);
+ cmap = map;
+ continue;
+ }
+ }
+
+ if (entry->is_sub_map) {
+ /* move down to the sub map */
+
+ nmap = entry->object.sub_map;
+ vm_map_lock_read(nmap);
+ vm_map_unlock_read(cmap);
+ cmap = nmap;
+ continue;
+ } else {
+ break;
+ }
+ /*NOTREACHED*/
+ }
+
+
+ assert(entry->vme_start < entry->vme_end);
+
+ regionp->vri_start = entry->vme_start;
+ regionp->vri_end = entry->vme_end;
+
+ /* attributes from the real entry */
+
+ regionp->vri_protection = entry->protection;
+ regionp->vri_max_protection = entry->max_protection;
+ regionp->vri_inheritance = entry->inheritance;
+ regionp->vri_wired_count = !!entry->wired_count; /* Doesn't stack */
+ regionp->vri_user_wired_count = regionp->vri_wired_count; /* Obsolete */
+
+ object = entry->object.vm_object;
+ *portp = vm_object_real_name(object);
+ regionp->vri_object = (vm_offset_t) object;
+ regionp->vri_offset = entry->offset;
+ regionp->vri_needs_copy = entry->needs_copy;
+
+ regionp->vri_sharing = entry->is_shared;
+
+ vm_map_unlock_read(cmap);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_vm_object_info [kernel call]
+ * Purpose:
+ * Retrieve information about a VM object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved object info.
+ * KERN_INVALID_ARGUMENT The object is null.
+ */
+
+kern_return_t
+mach_vm_object_info(
+ vm_object_t object,
+ vm_object_info_t *infop,
+ ipc_port_t *shadowp,
+ ipc_port_t *copyp)
+{
+ vm_object_info_t info;
+ vm_object_info_state_t state;
+ ipc_port_t shadow, copy;
+
+ if (object == VM_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Because of lock-ordering/deadlock considerations,
+ * we can't use vm_object_real_name for the copy object.
+ */
+
+ retry:
+ vm_object_lock(object);
+ copy = IP_NULL;
+ if (object->copy != VM_OBJECT_NULL) {
+ if (!vm_object_lock_try(object->copy)) {
+ vm_object_unlock(object);
+ simple_lock_pause(); /* wait a bit */
+ goto retry;
+ }
+
+ if (object->copy->pager_name != IP_NULL)
+ copy = ipc_port_make_send(object->copy->pager_name);
+ vm_object_unlock(object->copy);
+ }
+ shadow = vm_object_real_name(object->shadow);
+
+ info.voi_object = (vm_offset_t) object;
+ info.voi_pagesize = PAGE_SIZE;
+ info.voi_size = object->size;
+ info.voi_ref_count = object->ref_count;
+ info.voi_resident_page_count = object->resident_page_count;
+ info.voi_absent_count = object->absent_count;
+ info.voi_copy = (vm_offset_t) object->copy;
+ info.voi_shadow = (vm_offset_t) object->shadow;
+ info.voi_shadow_offset = object->shadow_offset;
+ info.voi_paging_offset = object->paging_offset;
+ info.voi_copy_strategy = object->copy_strategy;
+ info.voi_last_alloc = object->last_alloc;
+ info.voi_paging_in_progress = object->paging_in_progress;
+
+ state = 0;
+ if (object->pager_created)
+ state |= VOI_STATE_PAGER_CREATED;
+ if (object->pager_initialized)
+ state |= VOI_STATE_PAGER_INITIALIZED;
+ if (object->pager_ready)
+ state |= VOI_STATE_PAGER_READY;
+ if (object->can_persist)
+ state |= VOI_STATE_CAN_PERSIST;
+ if (object->internal)
+ state |= VOI_STATE_INTERNAL;
+ if (object->temporary)
+ state |= VOI_STATE_TEMPORARY;
+ if (object->alive)
+ state |= VOI_STATE_ALIVE;
+ if (object->lock_in_progress)
+ state |= VOI_STATE_LOCK_IN_PROGRESS;
+ if (object->lock_restart)
+ state |= VOI_STATE_LOCK_RESTART;
+ info.voi_state = state;
+ vm_object_unlock(object);
+
+ *infop = info;
+ *shadowp = shadow;
+ *copyp = copy;
+ return KERN_SUCCESS;
+}
+
+#define VPI_STATE_NODATA (VPI_STATE_BUSY|VPI_STATE_FICTITIOUS| \
+ VPI_STATE_PRIVATE|VPI_STATE_ABSENT)
+
+/*
+ * Routine: mach_vm_object_pages/mach_vm_object_pages_phys/ [kernel call]
+ * Purpose:
+ * Retrieve information about the pages in a VM object.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Retrieved object info.
+ * KERN_INVALID_ARGUMENT The object is null.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+static kern_return_t
+_mach_vm_object_pages(
+ vm_object_t object,
+ void* *pagesp,
+ natural_t *countp,
+ int phys)
+{
+ vm_size_t size;
+ vm_offset_t addr;
+ void *pages;
+ unsigned int potential, actual, count;
+ vm_page_t p;
+ kern_return_t kr;
+
+ if (object == VM_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /* start with in-line memory */
+
+ pages = *pagesp;
+ potential = *countp;
+
+ for (size = 0;;) {
+ vm_object_lock(object);
+ actual = object->resident_page_count;
+ if (actual <= potential)
+ break;
+ vm_object_unlock(object);
+
+ if (pages != *pagesp)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ if (phys)
+ size = round_page(actual * sizeof(vm_page_phys_info_t));
+ else
+ size = round_page(actual * sizeof(vm_page_info_t));
+ kr = kmem_alloc(ipc_kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ pages = (void *) addr;
+ if (phys)
+ potential = size / sizeof(vm_page_phys_info_t);
+ else
+ potential = size / sizeof(vm_page_info_t);
+ }
+ /* object is locked, we have enough wired memory */
+
+ count = 0;
+ queue_iterate(&object->memq, p, vm_page_t, listq) {
+ vm_page_info_t *info = NULL;
+ vm_page_phys_info_t *info_phys = NULL;
+
+ if (phys)
+ info_phys = pages + count * sizeof(*info_phys);
+ else
+ info = pages + count * sizeof(*info);
+ count++;
+
+ vm_page_info_state_t state = 0;
+
+ if (phys) {
+ info_phys->vpi_offset = p->offset;
+ if (p->phys_addr != (typeof(info_phys->vpi_phys_addr)) p->phys_addr)
+ printf("warning: physical address overflow in mach_vm_object_pages!!\n");
+ info_phys->vpi_phys_addr = p->phys_addr;
+ info_phys->vpi_wire_count = p->wire_count;
+ info_phys->vpi_page_lock = p->page_lock;
+ info_phys->vpi_unlock_request = p->unlock_request;
+ } else {
+ info->vpi_offset = p->offset;
+ if (p->phys_addr != (typeof(info->vpi_phys_addr)) p->phys_addr)
+ printf("warning: physical address overflow in mach_vm_object_pages!!\n");
+ info->vpi_phys_addr = p->phys_addr;
+ info->vpi_wire_count = p->wire_count;
+ info->vpi_page_lock = p->page_lock;
+ info->vpi_unlock_request = p->unlock_request;
+ }
+
+ if (p->busy)
+ state |= VPI_STATE_BUSY;
+ if (p->wanted)
+ state |= VPI_STATE_WANTED;
+ if (p->tabled)
+ state |= VPI_STATE_TABLED;
+ if (p->fictitious)
+ state |= VPI_STATE_FICTITIOUS;
+ if (p->private)
+ state |= VPI_STATE_PRIVATE;
+ if (p->absent)
+ state |= VPI_STATE_ABSENT;
+ if (p->error)
+ state |= VPI_STATE_ERROR;
+ if (p->dirty)
+ state |= VPI_STATE_DIRTY;
+ if (p->precious)
+ state |= VPI_STATE_PRECIOUS;
+ if (p->overwriting)
+ state |= VPI_STATE_OVERWRITING;
+
+ if (((state & (VPI_STATE_NODATA|VPI_STATE_DIRTY)) == 0) &&
+ pmap_is_modified(p->phys_addr)) {
+ state |= VPI_STATE_DIRTY;
+ p->dirty = TRUE;
+ }
+
+ vm_page_lock_queues();
+ if (p->inactive)
+ state |= VPI_STATE_INACTIVE;
+ if (p->active)
+ state |= VPI_STATE_ACTIVE;
+ if (p->laundry)
+ state |= VPI_STATE_LAUNDRY;
+ if (p->free)
+ state |= VPI_STATE_FREE;
+ if (p->reference)
+ state |= VPI_STATE_REFERENCE;
+
+ if (((state & (VPI_STATE_NODATA|VPI_STATE_REFERENCE)) == 0) &&
+ pmap_is_referenced(p->phys_addr)) {
+ state |= VPI_STATE_REFERENCE;
+ p->reference = TRUE;
+ }
+ vm_page_unlock_queues();
+
+ if (phys)
+ info_phys->vpi_state = state;
+ else
+ info->vpi_state = state;
+ }
+
+ if (object->resident_page_count != count)
+ panic("mach_vm_object_pages");
+ vm_object_unlock(object);
+
+ if (pages == *pagesp) {
+ /* data fit in-line; nothing to deallocate */
+
+ *countp = actual;
+ } else if (actual == 0) {
+ kmem_free(ipc_kernel_map, addr, size);
+
+ *countp = 0;
+ } else {
+ vm_size_t size_used, rsize_used;
+ vm_map_copy_t copy;
+
+ /* kmem_alloc doesn't zero memory */
+
+ if (phys)
+ size_used = actual * sizeof(vm_page_phys_info_t);
+ else
+ size_used = actual * sizeof(vm_page_info_t);
+ rsize_used = round_page(size_used);
+
+ if (rsize_used != size)
+ kmem_free(ipc_kernel_map,
+ addr + rsize_used, size - rsize_used);
+
+ if (size_used != rsize_used)
+ memset((void *) (addr + size_used), 0,
+ rsize_used - size_used);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, rsize_used,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *pagesp = (void *) copy;
+ *countp = actual;
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+mach_vm_object_pages(
+ vm_object_t object,
+ vm_page_info_array_t *pagesp,
+ natural_t *countp)
+{
+ return _mach_vm_object_pages(object, (void**) pagesp, countp, 0);
+}
+
+kern_return_t
+mach_vm_object_pages_phys(
+ vm_object_t object,
+ vm_page_phys_info_array_t *pagesp,
+ natural_t *countp)
+{
+ return _mach_vm_object_pages(object, (void**) pagesp, countp, 1);
+}
+
+#endif /* MACH_VM_DEBUG */
+
+/*
+ * Routine: host_virtual_physical_table_info
+ * Purpose:
+ * Return information about the VP table.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Returned information.
+ * KERN_INVALID_HOST The host is null.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+host_virtual_physical_table_info(const host_t host,
+ hash_info_bucket_array_t *infop, natural_t *countp)
+{
+ vm_offset_t addr;
+ vm_size_t size = 0;/* '=0' to quiet gcc warnings */
+ hash_info_bucket_t *info;
+ unsigned int potential, actual;
+ kern_return_t kr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ /* start with in-line data */
+
+ info = *infop;
+ potential = *countp;
+
+ for (;;) {
+ actual = vm_page_info(info, potential);
+ if (actual <= potential)
+ break;
+
+ /* allocate more memory */
+
+ if (info != *infop)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = round_page(actual * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ info = (hash_info_bucket_t *) addr;
+ potential = size/sizeof *info;
+ }
+
+ if (info == *infop) {
+ /* data fit in-line; nothing to deallocate */
+
+ *countp = actual;
+ } else if (actual == 0) {
+ kmem_free(ipc_kernel_map, addr, size);
+
+ *countp = 0;
+ } else {
+ vm_map_copy_t copy;
+ vm_size_t used;
+
+ used = round_page(actual * sizeof *info);
+
+ if (used != size)
+ kmem_free(ipc_kernel_map, addr + used, size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, used,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *infop = (hash_info_bucket_t *) copy;
+ *countp = actual;
+ }
+
+ return KERN_SUCCESS;
+}
diff --git a/vm/vm_external.c b/vm/vm_external.c
new file mode 100644
index 0000000..99f4b9c
--- /dev/null
+++ b/vm/vm_external.c
@@ -0,0 +1,151 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * This module maintains information about the presence of
+ * pages not in memory. Since an external memory object
+ * must maintain a complete knowledge of its contents, this
+ * information takes the form of hints.
+ */
+
+#include <mach/boolean.h>
+#include <kern/slab.h>
+#include <vm/vm_external.h>
+#include <mach/vm_param.h>
+#include <kern/assert.h>
+#include <string.h>
+
+
+
+boolean_t vm_external_unsafe = FALSE;
+
+struct kmem_cache vm_external_cache;
+
+/*
+ * The implementation uses bit arrays to record whether
+ * a page has been written to external storage. For
+ * convenience, these bit arrays come in two sizes
+ * (measured in bytes).
+ */
+
+#define SMALL_SIZE (VM_EXTERNAL_SMALL_SIZE/8)
+#define LARGE_SIZE (VM_EXTERNAL_LARGE_SIZE/8)
+
+struct kmem_cache vm_object_small_existence_map_cache;
+struct kmem_cache vm_object_large_existence_map_cache;
+
+
+vm_external_t vm_external_create(vm_offset_t size)
+{
+ vm_external_t result;
+ vm_size_t bytes;
+
+ result = (vm_external_t) kmem_cache_alloc(&vm_external_cache);
+ result->existence_map = (char *) 0;
+
+ bytes = (atop(size) + 07) >> 3;
+ if (bytes <= SMALL_SIZE) {
+ result->existence_map =
+ (char *) kmem_cache_alloc(&vm_object_small_existence_map_cache);
+ result->existence_size = SMALL_SIZE;
+ } else {
+ result->existence_map =
+ (char *) kmem_cache_alloc(&vm_object_large_existence_map_cache);
+ result->existence_size = LARGE_SIZE;
+ }
+ memset (result->existence_map, 0, result->existence_size);
+ return(result);
+}
+
+void vm_external_destroy(vm_external_t e)
+{
+ if (e == VM_EXTERNAL_NULL)
+ return;
+
+ if (e->existence_map != (char *) 0) {
+ if (e->existence_size <= SMALL_SIZE) {
+ kmem_cache_free(&vm_object_small_existence_map_cache,
+ (vm_offset_t) e->existence_map);
+ } else {
+ kmem_cache_free(&vm_object_large_existence_map_cache,
+ (vm_offset_t) e->existence_map);
+ }
+ }
+ kmem_cache_free(&vm_external_cache, (vm_offset_t) e);
+}
+
+vm_external_state_t _vm_external_state_get(const vm_external_t e,
+ vm_offset_t offset)
+{
+ unsigned
+ int bit, byte;
+
+ if (vm_external_unsafe ||
+ (e == VM_EXTERNAL_NULL) ||
+ (e->existence_map == (char *) 0))
+ return(VM_EXTERNAL_STATE_UNKNOWN);
+
+ bit = atop(offset);
+ byte = bit >> 3;
+ if (byte >= e->existence_size) return (VM_EXTERNAL_STATE_UNKNOWN);
+ return( (e->existence_map[byte] & (1 << (bit & 07))) ?
+ VM_EXTERNAL_STATE_EXISTS : VM_EXTERNAL_STATE_ABSENT );
+}
+
+void vm_external_state_set(
+ vm_external_t e,
+ vm_offset_t offset,
+ vm_external_state_t state)
+{
+ unsigned
+ int bit, byte;
+
+ if ((e == VM_EXTERNAL_NULL) || (e->existence_map == (char *) 0))
+ return;
+
+ if (state != VM_EXTERNAL_STATE_EXISTS)
+ return;
+
+ bit = atop(offset);
+ byte = bit >> 3;
+ if (byte >= e->existence_size) return;
+ e->existence_map[byte] |= (1 << (bit & 07));
+}
+
+void vm_external_module_initialize(void)
+{
+ vm_size_t size = (vm_size_t) sizeof(struct vm_external);
+
+ kmem_cache_init(&vm_external_cache, "vm_external", size, 0,
+ NULL, 0);
+
+ kmem_cache_init(&vm_object_small_existence_map_cache,
+ "small_existence_map", SMALL_SIZE, 0,
+ NULL, 0);
+
+ kmem_cache_init(&vm_object_large_existence_map_cache,
+ "large_existence_map", LARGE_SIZE, 0,
+ NULL, 0);
+}
diff --git a/vm/vm_external.h b/vm/vm_external.h
new file mode 100644
index 0000000..4e44ddf
--- /dev/null
+++ b/vm/vm_external.h
@@ -0,0 +1,95 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _VM_VM_EXTERNAL_H_
+#define _VM_VM_EXTERNAL_H_
+
+/*
+ * External page management hint technology
+ *
+ * The data structure exported by this module maintains
+ * a (potentially incomplete) map of the pages written
+ * to external storage for a range of virtual memory.
+ */
+
+/*
+ * The data structure representing the state of pages
+ * on external storage.
+ */
+
+typedef struct vm_external {
+ int existence_size; /* Size of the following bitmap */
+ char *existence_map; /* A bitmap of pages that have
+ * been written to backing
+ * storage.
+ */
+#if 0
+ /* XXX: Currently, existence_count is not used. I guess it
+ could be useful to get rid of the map if the count drops to
+ zero. */
+ int existence_count;/* Number of bits turned on in
+ * existence_map.
+ */
+#endif
+} *vm_external_t;
+
+#define VM_EXTERNAL_NULL ((vm_external_t) 0)
+
+#define VM_EXTERNAL_SMALL_SIZE 128
+#define VM_EXTERNAL_LARGE_SIZE 8192
+
+/*
+ * The states that may be recorded for a page of external storage.
+ */
+
+typedef int vm_external_state_t;
+#define VM_EXTERNAL_STATE_EXISTS 1
+#define VM_EXTERNAL_STATE_UNKNOWN 2
+#define VM_EXTERNAL_STATE_ABSENT 3
+
+
+/*
+ * Routines exported by this module.
+ */
+
+/* Initialize the module */
+extern void vm_external_module_initialize(void);
+/* Create a vm_external_t */
+extern vm_external_t vm_external_create(vm_offset_t);
+/* Destroy one */
+extern void vm_external_destroy(vm_external_t);
+
+/* Set state of a page. */
+extern void vm_external_state_set(vm_external_t, vm_offset_t,
+ vm_external_state_t);
+/* Retrieve the state for a given page, if known. */
+#define vm_external_state_get(e,offset) (((e) != VM_EXTERNAL_NULL) ? \
+ _vm_external_state_get(e, offset) : \
+ VM_EXTERNAL_STATE_UNKNOWN)
+/* HIDDEN routine */
+extern vm_external_state_t _vm_external_state_get(vm_external_t, vm_offset_t);
+
+#endif /* _VM_VM_EXTERNAL_H_ */
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
new file mode 100644
index 0000000..c6e2800
--- /dev/null
+++ b/vm/vm_fault.c
@@ -0,0 +1,2136 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_fault.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Page fault handling module.
+ */
+
+#include <kern/printf.h>
+#include <vm/vm_fault.h>
+#include <mach/kern_return.h>
+#include <mach/message.h> /* for error codes */
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+#include <mach/vm_statistics.h>
+#include <vm/vm_pageout.h>
+#include <mach/vm_param.h>
+#include <mach/memory_object.h>
+#include <vm/memory_object_user.user.h>
+ /* For memory_object_data_{request,unlock} */
+#include <kern/macros.h>
+#include <kern/slab.h>
+
+#if MACH_PCSAMPLE
+#include <kern/pc_sample.h>
+#endif
+
+
+
+/*
+ * State needed by vm_fault_continue.
+ * This is a little hefty to drop directly
+ * into the thread structure.
+ */
+typedef struct vm_fault_state {
+ struct vm_map *vmf_map;
+ vm_offset_t vmf_vaddr;
+ vm_prot_t vmf_fault_type;
+ boolean_t vmf_change_wiring;
+ vm_fault_continuation_t vmf_continuation;
+ vm_map_version_t vmf_version;
+ boolean_t vmf_wired;
+ struct vm_object *vmf_object;
+ vm_offset_t vmf_offset;
+ vm_prot_t vmf_prot;
+
+ boolean_t vmfp_backoff;
+ struct vm_object *vmfp_object;
+ vm_offset_t vmfp_offset;
+ struct vm_page *vmfp_first_m;
+ vm_prot_t vmfp_access;
+} vm_fault_state_t;
+
+struct kmem_cache vm_fault_state_cache;
+
+int vm_object_absent_max = 50;
+
+boolean_t vm_fault_dirty_handling = FALSE;
+boolean_t vm_fault_interruptible = TRUE;
+
+boolean_t software_reference_bits = TRUE;
+
+#if MACH_KDB
+extern struct db_watchpoint *db_watchpoint_list;
+#endif /* MACH_KDB */
+
+/*
+ * Routine: vm_fault_init
+ * Purpose:
+ * Initialize our private data structures.
+ */
+void vm_fault_init(void)
+{
+ kmem_cache_init(&vm_fault_state_cache, "vm_fault_state",
+ sizeof(vm_fault_state_t), 0, NULL, 0);
+}
+
+/*
+ * Routine: vm_fault_cleanup
+ * Purpose:
+ * Clean up the result of vm_fault_page.
+ * Results:
+ * The paging reference for "object" is released.
+ * "object" is unlocked.
+ * If "top_page" is not null, "top_page" is
+ * freed and the paging reference for the object
+ * containing it is released.
+ *
+ * In/out conditions:
+ * "object" must be locked.
+ */
+void
+vm_fault_cleanup(
+ vm_object_t object,
+ vm_page_t top_page)
+{
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ if (top_page != VM_PAGE_NULL) {
+ object = top_page->object;
+ vm_object_lock(object);
+ VM_PAGE_FREE(top_page);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ }
+}
+
+
+#if MACH_PCSAMPLE
+/*
+ * Do PC sampling on current thread, assuming
+ * that it is the thread taking this page fault.
+ *
+ * Must check for THREAD_NULL, since faults
+ * can occur before threads are running.
+ */
+
+#define vm_stat_sample(flavor) \
+ MACRO_BEGIN \
+ thread_t _thread_ = current_thread(); \
+ \
+ if (_thread_ != THREAD_NULL) \
+ take_pc_sample_macro(_thread_, (flavor), 1, 0); \
+ MACRO_END
+
+#else
+#define vm_stat_sample(x)
+#endif /* MACH_PCSAMPLE */
+
+
+
+/*
+ * Routine: vm_fault_page
+ * Purpose:
+ * Find the resident page for the virtual memory
+ * specified by the given virtual memory object
+ * and offset.
+ * Additional arguments:
+ * The required permissions for the page is given
+ * in "fault_type". Desired permissions are included
+ * in "protection".
+ *
+ * If the desired page is known to be resident (for
+ * example, because it was previously wired down), asserting
+ * the "unwiring" parameter will speed the search.
+ *
+ * If the operation can be interrupted (by thread_abort
+ * or thread_terminate), then the "interruptible"
+ * parameter should be asserted.
+ *
+ * Results:
+ * The page containing the proper data is returned
+ * in "result_page".
+ *
+ * In/out conditions:
+ * The source object must be locked and referenced,
+ * and must donate one paging reference. The reference
+ * is not affected. The paging reference and lock are
+ * consumed.
+ *
+ * If the call succeeds, the object in which "result_page"
+ * resides is left locked and holding a paging reference.
+ * If this is not the original object, a busy page in the
+ * original object is returned in "top_page", to prevent other
+ * callers from pursuing this same data, along with a paging
+ * reference for the original object. The "top_page" should
+ * be destroyed when this guarantee is no longer required.
+ * The "result_page" is also left busy. It is not removed
+ * from the pageout queues.
+ */
+vm_fault_return_t vm_fault_page(
+ /* Arguments: */
+ vm_object_t first_object, /* Object to begin search */
+ vm_offset_t first_offset, /* Offset into object */
+ vm_prot_t fault_type, /* What access is requested */
+ boolean_t must_be_resident,/* Must page be resident? */
+ boolean_t interruptible, /* May fault be interrupted? */
+ /* Modifies in place: */
+ vm_prot_t *protection, /* Protection for mapping */
+ /* Returns: */
+ vm_page_t *result_page, /* Page found, if successful */
+ vm_page_t *top_page, /* Page in top object, if
+ * not result_page.
+ */
+ /* More arguments: */
+ boolean_t resume, /* We are restarting. */
+ continuation_t continuation) /* Continuation for blocking. */
+{
+ vm_page_t m;
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_page_t first_m;
+ vm_object_t next_object;
+ vm_object_t copy_object;
+ boolean_t look_for_page;
+ vm_prot_t access_required;
+
+ if (resume) {
+ vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ if (state->vmfp_backoff)
+ goto after_block_and_backoff;
+
+ object = state->vmfp_object;
+ offset = state->vmfp_offset;
+ first_m = state->vmfp_first_m;
+ access_required = state->vmfp_access;
+ goto after_thread_block;
+ }
+
+ vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY);
+ vm_stat.faults++; /* needs lock XXX */
+ current_task()->faults++;
+
+/*
+ * Recovery actions
+ */
+#define RELEASE_PAGE(m) \
+ MACRO_BEGIN \
+ PAGE_WAKEUP_DONE(m); \
+ vm_page_lock_queues(); \
+ if (!m->active && !m->inactive) \
+ vm_page_activate(m); \
+ vm_page_unlock_queues(); \
+ MACRO_END
+
+ if (vm_fault_dirty_handling
+#if MACH_KDB
+ /*
+ * If there are watchpoints set, then
+ * we don't want to give away write permission
+ * on a read fault. Make the task write fault,
+ * so that the watchpoint code notices the access.
+ */
+ || db_watchpoint_list
+#endif /* MACH_KDB */
+ ) {
+ /*
+ * If we aren't asking for write permission,
+ * then don't give it away. We're using write
+ * faults to set the dirty bit.
+ */
+ if (!(fault_type & VM_PROT_WRITE))
+ *protection &= ~VM_PROT_WRITE;
+ }
+
+ if (!vm_fault_interruptible)
+ interruptible = FALSE;
+
+ /*
+ * INVARIANTS (through entire routine):
+ *
+ * 1) At all times, we must either have the object
+ * lock or a busy page in some object to prevent
+ * some other thread from trying to bring in
+ * the same page.
+ *
+ * Note that we cannot hold any locks during the
+ * pager access or when waiting for memory, so
+ * we use a busy page then.
+ *
+ * Note also that we aren't as concerned about more than
+ * one thread attempting to memory_object_data_unlock
+ * the same page at once, so we don't hold the page
+ * as busy then, but do record the highest unlock
+ * value so far. [Unlock requests may also be delivered
+ * out of order.]
+ *
+ * 2) To prevent another thread from racing us down the
+ * shadow chain and entering a new page in the top
+ * object before we do, we must keep a busy page in
+ * the top object while following the shadow chain.
+ *
+ * 3) We must increment paging_in_progress on any object
+ * for which we have a busy page, to prevent
+ * vm_object_collapse from removing the busy page
+ * without our noticing.
+ *
+ * 4) We leave busy pages on the pageout queues.
+ * If the pageout daemon comes across a busy page,
+ * it will remove the page from the pageout queues.
+ */
+
+ /*
+ * Search for the page at object/offset.
+ */
+
+ object = first_object;
+ offset = first_offset;
+ first_m = VM_PAGE_NULL;
+ access_required = fault_type;
+
+ /*
+ * See whether this page is resident
+ */
+
+ while (TRUE) {
+ m = vm_page_lookup(object, offset);
+ if (m != VM_PAGE_NULL) {
+ /*
+ * If the page is being brought in,
+ * wait for it and then retry.
+ *
+ * A possible optimization: if the page
+ * is known to be resident, we can ignore
+ * pages that are absent (regardless of
+ * whether they're busy).
+ */
+
+ if (m->busy) {
+ kern_return_t wait_result;
+
+ PAGE_ASSERT_WAIT(m, interruptible);
+ vm_object_unlock(object);
+ if (continuation != thread_no_continuation) {
+ vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ /*
+ * Save variables in case
+ * thread_block discards
+ * our kernel stack.
+ */
+
+ state->vmfp_backoff = FALSE;
+ state->vmfp_object = object;
+ state->vmfp_offset = offset;
+ state->vmfp_first_m = first_m;
+ state->vmfp_access =
+ access_required;
+ state->vmf_prot = *protection;
+
+ counter(c_vm_fault_page_block_busy_user++);
+ thread_block(continuation);
+ } else
+ {
+ counter(c_vm_fault_page_block_busy_kernel++);
+ thread_block((void (*)()) 0);
+ }
+ after_thread_block:
+ wait_result = current_thread()->wait_result;
+ vm_object_lock(object);
+ if (wait_result != THREAD_AWAKENED) {
+ vm_fault_cleanup(object, first_m);
+ if (wait_result == THREAD_RESTART)
+ return(VM_FAULT_RETRY);
+ else
+ return(VM_FAULT_INTERRUPTED);
+ }
+ continue;
+ }
+
+ /*
+ * If the page is in error, give up now.
+ */
+
+ if (m->error) {
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_MEMORY_ERROR);
+ }
+
+ /*
+ * If the page isn't busy, but is absent,
+ * then it was deemed "unavailable".
+ */
+
+ if (m->absent) {
+ /*
+ * Remove the non-existent page (unless it's
+ * in the top object) and move on down to the
+ * next object (if there is one).
+ */
+
+ offset += object->shadow_offset;
+ access_required = VM_PROT_READ;
+ next_object = object->shadow;
+ if (next_object == VM_OBJECT_NULL) {
+ vm_page_t real_m;
+
+ assert(!must_be_resident);
+
+ /*
+ * Absent page at bottom of shadow
+ * chain; zero fill the page we left
+ * busy in the first object, and flush
+ * the absent page. But first we
+ * need to allocate a real page.
+ */
+
+ real_m = vm_page_grab(VM_PAGE_HIGHMEM);
+ if (real_m == VM_PAGE_NULL) {
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+
+ if (object != first_object) {
+ VM_PAGE_FREE(m);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ object = first_object;
+ offset = first_offset;
+ m = first_m;
+ first_m = VM_PAGE_NULL;
+ vm_object_lock(object);
+ }
+
+ VM_PAGE_FREE(m);
+ assert(real_m->busy);
+ vm_page_lock_queues();
+ vm_page_insert(real_m, object, offset);
+ vm_page_unlock_queues();
+ m = real_m;
+
+ /*
+ * Drop the lock while zero filling
+ * page. Then break because this
+ * is the page we wanted. Checking
+ * the page lock is a waste of time;
+ * this page was either absent or
+ * newly allocated -- in both cases
+ * it can't be page locked by a pager.
+ */
+ vm_object_unlock(object);
+
+ vm_page_zero_fill(m);
+
+ vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS);
+
+ vm_stat.zero_fill_count++;
+ current_task()->zero_fills++;
+ vm_object_lock(object);
+ pmap_clear_modify(m->phys_addr);
+ break;
+ } else {
+ if (must_be_resident) {
+ vm_object_paging_end(object);
+ } else if (object != first_object) {
+ vm_object_paging_end(object);
+ VM_PAGE_FREE(m);
+ } else {
+ first_m = m;
+ m->absent = FALSE;
+ vm_object_absent_release(object);
+ m->busy = TRUE;
+
+ vm_page_lock_queues();
+ VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_unlock_queues();
+ }
+ vm_object_lock(next_object);
+ vm_object_unlock(object);
+ object = next_object;
+ vm_object_paging_begin(object);
+ continue;
+ }
+ }
+
+ /*
+ * If the desired access to this page has
+ * been locked out, request that it be unlocked.
+ */
+
+ if (access_required & m->page_lock) {
+ if ((access_required & m->unlock_request) != access_required) {
+ vm_prot_t new_unlock_request;
+ kern_return_t rc;
+
+ if (!object->pager_ready) {
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ interruptible);
+ goto block_and_backoff;
+ }
+
+ new_unlock_request = m->unlock_request =
+ (access_required | m->unlock_request);
+ vm_object_unlock(object);
+ if ((rc = memory_object_data_unlock(
+ object->pager,
+ object->pager_request,
+ offset + object->paging_offset,
+ PAGE_SIZE,
+ new_unlock_request))
+ != KERN_SUCCESS) {
+ printf("vm_fault: memory_object_data_unlock failed\n");
+ vm_object_lock(object);
+ vm_fault_cleanup(object, first_m);
+ return((rc == MACH_SEND_INTERRUPTED) ?
+ VM_FAULT_INTERRUPTED :
+ VM_FAULT_MEMORY_ERROR);
+ }
+ vm_object_lock(object);
+ continue;
+ }
+
+ PAGE_ASSERT_WAIT(m, interruptible);
+ goto block_and_backoff;
+ }
+
+ /*
+ * We mark the page busy and leave it on
+ * the pageout queues. If the pageout
+ * deamon comes across it, then it will
+ * remove the page.
+ */
+
+ if (!software_reference_bits) {
+ vm_page_lock_queues();
+ if (m->inactive) {
+ vm_stat_sample(SAMPLED_PC_VM_REACTIVATION_FAULTS);
+ vm_stat.reactivations++;
+ current_task()->reactivations++;
+ }
+
+ VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_unlock_queues();
+ }
+
+ assert(!m->busy);
+ m->busy = TRUE;
+ assert(!m->absent);
+ break;
+ }
+
+ look_for_page =
+ (object->pager_created)
+#if MACH_PAGEMAP
+ && (vm_external_state_get(object->existence_info, offset + object->paging_offset) !=
+ VM_EXTERNAL_STATE_ABSENT)
+#endif /* MACH_PAGEMAP */
+ ;
+
+ if ((look_for_page || (object == first_object))
+ && !must_be_resident) {
+ /*
+ * Allocate a new page for this object/offset
+ * pair.
+ */
+
+ m = vm_page_grab_fictitious();
+ if (m == VM_PAGE_NULL) {
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_FICTITIOUS_SHORTAGE);
+ }
+
+ vm_page_lock_queues();
+ vm_page_insert(m, object, offset);
+ vm_page_unlock_queues();
+ }
+
+ if (look_for_page && !must_be_resident) {
+ kern_return_t rc;
+
+ /*
+ * If the memory manager is not ready, we
+ * cannot make requests.
+ */
+ if (!object->pager_ready) {
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ interruptible);
+ VM_PAGE_FREE(m);
+ goto block_and_backoff;
+ }
+
+ if (object->internal) {
+ /*
+ * Requests to the default pager
+ * must reserve a real page in advance,
+ * because the pager's data-provided
+ * won't block for pages.
+ */
+
+ if (m->fictitious && !vm_page_convert(&m)) {
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+ } else if (object->absent_count >
+ vm_object_absent_max) {
+ /*
+ * If there are too many outstanding page
+ * requests pending on this object, we
+ * wait for them to be resolved now.
+ */
+
+ vm_object_absent_assert_wait(object, interruptible);
+ VM_PAGE_FREE(m);
+ goto block_and_backoff;
+ }
+
+ /*
+ * Indicate that the page is waiting for data
+ * from the memory manager.
+ */
+
+ m->absent = TRUE;
+ object->absent_count++;
+
+ /*
+ * We have a busy page, so we can
+ * release the object lock.
+ */
+ vm_object_unlock(object);
+
+ /*
+ * Call the memory manager to retrieve the data.
+ */
+
+ vm_stat.pageins++;
+ vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS);
+ current_task()->pageins++;
+
+ if ((rc = memory_object_data_request(object->pager,
+ object->pager_request,
+ m->offset + object->paging_offset,
+ PAGE_SIZE, access_required)) != KERN_SUCCESS) {
+ if (object->pager && rc != MACH_SEND_INTERRUPTED)
+ printf("%s(0x%p, 0x%p, 0x%zx, 0x%x, 0x%x) failed, %x\n",
+ "memory_object_data_request",
+ object->pager,
+ object->pager_request,
+ m->offset + object->paging_offset,
+ PAGE_SIZE, access_required, rc);
+ /*
+ * Don't want to leave a busy page around,
+ * but the data request may have blocked,
+ * so check if it's still there and busy.
+ */
+ vm_object_lock(object);
+ if (m == vm_page_lookup(object,offset) &&
+ m->absent && m->busy)
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, first_m);
+ return((rc == MACH_SEND_INTERRUPTED) ?
+ VM_FAULT_INTERRUPTED :
+ VM_FAULT_MEMORY_ERROR);
+ }
+
+ /*
+ * Retry with same object/offset, since new data may
+ * be in a different page (i.e., m is meaningless at
+ * this point).
+ */
+ vm_object_lock(object);
+ continue;
+ }
+
+ /*
+ * For the XP system, the only case in which we get here is if
+ * object has no pager (or unwiring). If the pager doesn't
+ * have the page this is handled in the m->absent case above
+ * (and if you change things here you should look above).
+ */
+ if (object == first_object)
+ first_m = m;
+ else
+ {
+ assert(m == VM_PAGE_NULL);
+ }
+
+ /*
+ * Move on to the next object. Lock the next
+ * object before unlocking the current one.
+ */
+ access_required = VM_PROT_READ;
+
+ offset += object->shadow_offset;
+ next_object = object->shadow;
+ if (next_object == VM_OBJECT_NULL) {
+ assert(!must_be_resident);
+
+ /*
+ * If there's no object left, fill the page
+ * in the top object with zeros. But first we
+ * need to allocate a real page.
+ */
+
+ if (object != first_object) {
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ object = first_object;
+ offset = first_offset;
+ vm_object_lock(object);
+ }
+
+ m = first_m;
+ assert(m->object == object);
+ first_m = VM_PAGE_NULL;
+
+ if (m->fictitious && !vm_page_convert(&m)) {
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, VM_PAGE_NULL);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+
+ vm_object_unlock(object);
+ vm_page_zero_fill(m);
+ vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS);
+ vm_stat.zero_fill_count++;
+ current_task()->zero_fills++;
+ vm_object_lock(object);
+ pmap_clear_modify(m->phys_addr);
+ break;
+ }
+ else {
+ vm_object_lock(next_object);
+ if ((object != first_object) || must_be_resident)
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ object = next_object;
+ vm_object_paging_begin(object);
+ }
+ }
+
+ /*
+ * PAGE HAS BEEN FOUND.
+ *
+ * This page (m) is:
+ * busy, so that we can play with it;
+ * not absent, so that nobody else will fill it;
+ * possibly eligible for pageout;
+ *
+ * The top-level page (first_m) is:
+ * VM_PAGE_NULL if the page was found in the
+ * top-level object;
+ * busy, not absent, and ineligible for pageout.
+ *
+ * The current object (object) is locked. A paging
+ * reference is held for the current and top-level
+ * objects.
+ */
+
+ assert(m->busy && !m->absent);
+ assert((first_m == VM_PAGE_NULL) ||
+ (first_m->busy && !first_m->absent &&
+ !first_m->active && !first_m->inactive));
+
+ /*
+ * If the page is being written, but isn't
+ * already owned by the top-level object,
+ * we have to copy it into a new page owned
+ * by the top-level object.
+ */
+
+ if (object != first_object) {
+ /*
+ * We only really need to copy if we
+ * want to write it.
+ */
+
+ if (fault_type & VM_PROT_WRITE) {
+ vm_page_t copy_m;
+
+ assert(!must_be_resident);
+
+ /*
+ * If we try to collapse first_object at this
+ * point, we may deadlock when we try to get
+ * the lock on an intermediate object (since we
+ * have the bottom object locked). We can't
+ * unlock the bottom object, because the page
+ * we found may move (by collapse) if we do.
+ *
+ * Instead, we first copy the page. Then, when
+ * we have no more use for the bottom object,
+ * we unlock it and try to collapse.
+ *
+ * Note that we copy the page even if we didn't
+ * need to... that's the breaks.
+ */
+
+ /*
+ * Allocate a page for the copy
+ */
+ copy_m = vm_page_grab(VM_PAGE_HIGHMEM);
+ if (copy_m == VM_PAGE_NULL) {
+ RELEASE_PAGE(m);
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+
+ vm_object_unlock(object);
+ vm_page_copy(m, copy_m);
+ vm_object_lock(object);
+
+ /*
+ * If another map is truly sharing this
+ * page with us, we have to flush all
+ * uses of the original page, since we
+ * can't distinguish those which want the
+ * original from those which need the
+ * new copy.
+ *
+ * XXXO If we know that only one map has
+ * access to this page, then we could
+ * avoid the pmap_page_protect() call.
+ */
+
+ vm_page_lock_queues();
+ vm_page_deactivate(m);
+ pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+ vm_page_unlock_queues();
+
+ /*
+ * We no longer need the old page or object.
+ */
+
+ PAGE_WAKEUP_DONE(m);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ vm_stat.cow_faults++;
+ vm_stat_sample(SAMPLED_PC_VM_COW_FAULTS);
+ current_task()->cow_faults++;
+ object = first_object;
+ offset = first_offset;
+
+ vm_object_lock(object);
+ VM_PAGE_FREE(first_m);
+ first_m = VM_PAGE_NULL;
+ assert(copy_m->busy);
+ vm_page_lock_queues();
+ vm_page_insert(copy_m, object, offset);
+ vm_page_unlock_queues();
+ m = copy_m;
+
+ /*
+ * Now that we've gotten the copy out of the
+ * way, let's try to collapse the top object.
+ * But we have to play ugly games with
+ * paging_in_progress to do that...
+ */
+
+ vm_object_paging_end(object);
+ vm_object_collapse(object);
+ vm_object_paging_begin(object);
+ }
+ else {
+ *protection &= (~VM_PROT_WRITE);
+ }
+ }
+
+ /*
+ * Now check whether the page needs to be pushed into the
+ * copy object. The use of asymmetric copy on write for
+ * shared temporary objects means that we may do two copies to
+ * satisfy the fault; one above to get the page from a
+ * shadowed object, and one here to push it into the copy.
+ */
+
+ while ((copy_object = first_object->copy) != VM_OBJECT_NULL) {
+ vm_offset_t copy_offset;
+ vm_page_t copy_m;
+
+ /*
+ * If the page is being written, but hasn't been
+ * copied to the copy-object, we have to copy it there.
+ */
+
+ if ((fault_type & VM_PROT_WRITE) == 0) {
+ *protection &= ~VM_PROT_WRITE;
+ break;
+ }
+
+ /*
+ * If the page was guaranteed to be resident,
+ * we must have already performed the copy.
+ */
+
+ if (must_be_resident)
+ break;
+
+ /*
+ * Try to get the lock on the copy_object.
+ */
+ if (!vm_object_lock_try(copy_object)) {
+ vm_object_unlock(object);
+
+ simple_lock_pause(); /* wait a bit */
+
+ vm_object_lock(object);
+ continue;
+ }
+
+ /*
+ * Make another reference to the copy-object,
+ * to keep it from disappearing during the
+ * copy.
+ */
+ assert(copy_object->ref_count > 0);
+ copy_object->ref_count++;
+
+ /*
+ * Does the page exist in the copy?
+ */
+ copy_offset = first_offset - copy_object->shadow_offset;
+ copy_m = vm_page_lookup(copy_object, copy_offset);
+ if (copy_m != VM_PAGE_NULL) {
+ if (copy_m->busy) {
+ /*
+ * If the page is being brought
+ * in, wait for it and then retry.
+ */
+ PAGE_ASSERT_WAIT(copy_m, interruptible);
+ RELEASE_PAGE(m);
+ copy_object->ref_count--;
+ assert(copy_object->ref_count > 0);
+ vm_object_unlock(copy_object);
+ goto block_and_backoff;
+ }
+ }
+ else {
+ /*
+ * Allocate a page for the copy
+ */
+ copy_m = vm_page_alloc(copy_object, copy_offset);
+ if (copy_m == VM_PAGE_NULL) {
+ RELEASE_PAGE(m);
+ copy_object->ref_count--;
+ assert(copy_object->ref_count > 0);
+ vm_object_unlock(copy_object);
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+
+ /*
+ * Must copy page into copy-object.
+ */
+
+ vm_page_copy(m, copy_m);
+
+ /*
+ * If the old page was in use by any users
+ * of the copy-object, it must be removed
+ * from all pmaps. (We can't know which
+ * pmaps use it.)
+ */
+
+ vm_page_lock_queues();
+ pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+ copy_m->dirty = TRUE;
+ vm_page_unlock_queues();
+
+ /*
+ * If there's a pager, then immediately
+ * page out this page, using the "initialize"
+ * option. Else, we use the copy.
+ */
+
+ if (!copy_object->pager_created) {
+ vm_page_lock_queues();
+ vm_page_activate(copy_m);
+ vm_page_unlock_queues();
+ PAGE_WAKEUP_DONE(copy_m);
+ } else {
+ /*
+ * The page is already ready for pageout:
+ * not on pageout queues and busy.
+ * Unlock everything except the
+ * copy_object itself.
+ */
+
+ vm_object_unlock(object);
+
+ /*
+ * Write the page to the copy-object,
+ * flushing it from the kernel.
+ */
+
+ vm_pageout_page(copy_m, TRUE, TRUE);
+
+ /*
+ * Since the pageout may have
+ * temporarily dropped the
+ * copy_object's lock, we
+ * check whether we'll have
+ * to deallocate the hard way.
+ */
+
+ if ((copy_object->shadow != object) ||
+ (copy_object->ref_count == 1)) {
+ vm_object_unlock(copy_object);
+ vm_object_deallocate(copy_object);
+ vm_object_lock(object);
+ continue;
+ }
+
+ /*
+ * Pick back up the old object's
+ * lock. [It is safe to do so,
+ * since it must be deeper in the
+ * object tree.]
+ */
+
+ vm_object_lock(object);
+ }
+
+ /*
+ * Because we're pushing a page upward
+ * in the object tree, we must restart
+ * any faults that are waiting here.
+ * [Note that this is an expansion of
+ * PAGE_WAKEUP that uses the THREAD_RESTART
+ * wait result]. Can't turn off the page's
+ * busy bit because we're not done with it.
+ */
+
+ if (m->wanted) {
+ m->wanted = FALSE;
+ thread_wakeup_with_result((event_t) m,
+ THREAD_RESTART);
+ }
+ }
+
+ /*
+ * The reference count on copy_object must be
+ * at least 2: one for our extra reference,
+ * and at least one from the outside world
+ * (we checked that when we last locked
+ * copy_object).
+ */
+ copy_object->ref_count--;
+ assert(copy_object->ref_count > 0);
+ vm_object_unlock(copy_object);
+
+ break;
+ }
+
+ *result_page = m;
+ *top_page = first_m;
+
+ /*
+ * If the page can be written, assume that it will be.
+ * [Earlier, we restrict the permission to allow write
+ * access only if the fault so required, so we don't
+ * mark read-only data as dirty.]
+ */
+
+ if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE))
+ m->dirty = TRUE;
+
+ return(VM_FAULT_SUCCESS);
+
+ block_and_backoff:
+ vm_fault_cleanup(object, first_m);
+
+ if (continuation != thread_no_continuation) {
+ vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ /*
+ * Save variables in case we must restart.
+ */
+
+ state->vmfp_backoff = TRUE;
+ state->vmf_prot = *protection;
+
+ counter(c_vm_fault_page_block_backoff_user++);
+ thread_block(continuation);
+ } else
+ {
+ counter(c_vm_fault_page_block_backoff_kernel++);
+ thread_block((void (*)()) 0);
+ }
+ after_block_and_backoff:
+ if (current_thread()->wait_result == THREAD_AWAKENED)
+ return VM_FAULT_RETRY;
+ else
+ return VM_FAULT_INTERRUPTED;
+
+#undef RELEASE_PAGE
+}
+
+/*
+ * Routine: vm_fault
+ * Purpose:
+ * Handle page faults, including pseudo-faults
+ * used to change the wiring status of pages.
+ * Returns:
+ * If an explicit (expression) continuation is supplied,
+ * then we call the continuation instead of returning.
+ * Implementation:
+ * Explicit continuations make this a little icky,
+ * because it hasn't been rewritten to embrace CPS.
+ * Instead, we have resume arguments for vm_fault and
+ * vm_fault_page, to let continue the fault computation.
+ *
+ * vm_fault and vm_fault_page save mucho state
+ * in the moral equivalent of a closure. The state
+ * structure is allocated when first entering vm_fault
+ * and deallocated when leaving vm_fault.
+ */
+
+static void
+vm_fault_continue(void)
+{
+ vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ (void) vm_fault(state->vmf_map,
+ state->vmf_vaddr,
+ state->vmf_fault_type,
+ state->vmf_change_wiring,
+ TRUE, state->vmf_continuation);
+ /*NOTREACHED*/
+}
+
+kern_return_t vm_fault(
+ vm_map_t map,
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+ boolean_t change_wiring,
+ boolean_t resume,
+ vm_fault_continuation_t continuation)
+{
+ vm_map_version_t version; /* Map version for verificiation */
+ boolean_t wired; /* Should mapping be wired down? */
+ vm_object_t object; /* Top-level object */
+ vm_offset_t offset; /* Top-level offset */
+ vm_prot_t prot; /* Protection for mapping */
+ vm_object_t old_copy_object; /* Saved copy object */
+ vm_page_t result_page; /* Result of vm_fault_page */
+ vm_page_t top_page; /* Placeholder page */
+ kern_return_t kr;
+
+ vm_page_t m; /* Fast access to result_page */
+
+ if (resume) {
+ vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ /*
+ * Retrieve cached variables and
+ * continue vm_fault_page.
+ */
+
+ object = state->vmf_object;
+ if (object == VM_OBJECT_NULL)
+ goto RetryFault;
+ version = state->vmf_version;
+ wired = state->vmf_wired;
+ offset = state->vmf_offset;
+ prot = state->vmf_prot;
+
+ kr = vm_fault_page(object, offset, fault_type,
+ (change_wiring && !wired), !change_wiring,
+ &prot, &result_page, &top_page,
+ TRUE, vm_fault_continue);
+ goto after_vm_fault_page;
+ }
+
+ if (continuation != vm_fault_no_continuation) {
+ /*
+ * We will probably need to save state.
+ */
+
+ char * state;
+
+ /*
+ * if this assignment stmt is written as
+ * 'active_threads[cpu_number()] = kmem_cache_alloc()',
+ * cpu_number may be evaluated before kmem_cache_alloc;
+ * if kmem_cache_alloc blocks, cpu_number will be wrong
+ */
+
+ state = (char *) kmem_cache_alloc(&vm_fault_state_cache);
+ current_thread()->ith_other = state;
+
+ }
+
+ RetryFault: ;
+
+ /*
+ * Find the backing store object and offset into
+ * it to begin the search.
+ */
+
+ if ((kr = vm_map_lookup(&map, vaddr, fault_type, &version,
+ &object, &offset,
+ &prot, &wired)) != KERN_SUCCESS) {
+ goto done;
+ }
+
+ /*
+ * If the page is wired, we must fault for the current protection
+ * value, to avoid further faults.
+ */
+
+ if (wired)
+ fault_type = prot;
+
+ /*
+ * Make a reference to this object to
+ * prevent its disposal while we are messing with
+ * it. Once we have the reference, the map is free
+ * to be diddled. Since objects reference their
+ * shadows (and copies), they will stay around as well.
+ */
+
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ vm_object_paging_begin(object);
+
+ if (continuation != vm_fault_no_continuation) {
+ vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ /*
+ * Save variables, in case vm_fault_page discards
+ * our kernel stack and we have to restart.
+ */
+
+ state->vmf_map = map;
+ state->vmf_vaddr = vaddr;
+ state->vmf_fault_type = fault_type;
+ state->vmf_change_wiring = change_wiring;
+ state->vmf_continuation = continuation;
+
+ state->vmf_version = version;
+ state->vmf_wired = wired;
+ state->vmf_object = object;
+ state->vmf_offset = offset;
+ state->vmf_prot = prot;
+
+ kr = vm_fault_page(object, offset, fault_type,
+ (change_wiring && !wired), !change_wiring,
+ &prot, &result_page, &top_page,
+ FALSE, vm_fault_continue);
+ } else
+ {
+ kr = vm_fault_page(object, offset, fault_type,
+ (change_wiring && !wired), !change_wiring,
+ &prot, &result_page, &top_page,
+ FALSE, (void (*)()) 0);
+ }
+ after_vm_fault_page:
+
+ /*
+ * If we didn't succeed, lose the object reference immediately.
+ */
+
+ if (kr != VM_FAULT_SUCCESS)
+ vm_object_deallocate(object);
+
+ /*
+ * See why we failed, and take corrective action.
+ */
+
+ switch (kr) {
+ case VM_FAULT_SUCCESS:
+ break;
+ case VM_FAULT_RETRY:
+ goto RetryFault;
+ case VM_FAULT_INTERRUPTED:
+ kr = KERN_SUCCESS;
+ goto done;
+ case VM_FAULT_MEMORY_SHORTAGE:
+ if (continuation != vm_fault_no_continuation) {
+ vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ /*
+ * Save variables in case VM_PAGE_WAIT
+ * discards our kernel stack.
+ */
+
+ state->vmf_map = map;
+ state->vmf_vaddr = vaddr;
+ state->vmf_fault_type = fault_type;
+ state->vmf_change_wiring = change_wiring;
+ state->vmf_continuation = continuation;
+ state->vmf_object = VM_OBJECT_NULL;
+
+ VM_PAGE_WAIT(vm_fault_continue);
+ } else
+ VM_PAGE_WAIT((void (*)()) 0);
+ goto RetryFault;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ goto RetryFault;
+ case VM_FAULT_MEMORY_ERROR:
+ kr = KERN_MEMORY_ERROR;
+ goto done;
+ }
+
+ m = result_page;
+
+ assert((change_wiring && !wired) ?
+ (top_page == VM_PAGE_NULL) :
+ ((top_page == VM_PAGE_NULL) == (m->object == object)));
+
+ /*
+ * How to clean up the result of vm_fault_page. This
+ * happens whether the mapping is entered or not.
+ */
+
+#define UNLOCK_AND_DEALLOCATE \
+ MACRO_BEGIN \
+ vm_fault_cleanup(m->object, top_page); \
+ vm_object_deallocate(object); \
+ MACRO_END
+
+ /*
+ * What to do with the resulting page from vm_fault_page
+ * if it doesn't get entered into the physical map:
+ */
+
+#define RELEASE_PAGE(m) \
+ MACRO_BEGIN \
+ PAGE_WAKEUP_DONE(m); \
+ vm_page_lock_queues(); \
+ if (!m->active && !m->inactive) \
+ vm_page_activate(m); \
+ vm_page_unlock_queues(); \
+ MACRO_END
+
+ /*
+ * We must verify that the maps have not changed
+ * since our last lookup.
+ */
+
+ old_copy_object = m->object->copy;
+
+ vm_object_unlock(m->object);
+ while (!vm_map_verify(map, &version)) {
+ vm_object_t retry_object;
+ vm_offset_t retry_offset;
+ vm_prot_t retry_prot;
+
+ /*
+ * To avoid trying to write_lock the map while another
+ * thread has it read_locked (in vm_map_pageable), we
+ * do not try for write permission. If the page is
+ * still writable, we will get write permission. If it
+ * is not, or has been marked needs_copy, we enter the
+ * mapping without write permission, and will merely
+ * take another fault.
+ */
+ kr = vm_map_lookup(&map, vaddr,
+ fault_type & ~VM_PROT_WRITE, &version,
+ &retry_object, &retry_offset, &retry_prot,
+ &wired);
+
+ if (kr != KERN_SUCCESS) {
+ vm_object_lock(m->object);
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ goto done;
+ }
+
+ vm_object_unlock(retry_object);
+ vm_object_lock(m->object);
+
+ if ((retry_object != object) ||
+ (retry_offset != offset)) {
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ goto RetryFault;
+ }
+
+ /*
+ * Check whether the protection has changed or the object
+ * has been copied while we left the map unlocked.
+ */
+ prot &= retry_prot;
+ vm_object_unlock(m->object);
+ }
+ vm_object_lock(m->object);
+
+ /*
+ * If the copy object changed while the top-level object
+ * was unlocked, then we must take away write permission.
+ */
+
+ if (m->object->copy != old_copy_object)
+ prot &= ~VM_PROT_WRITE;
+
+ /*
+ * If we want to wire down this page, but no longer have
+ * adequate permissions, we must start all over.
+ */
+
+ if (wired && (prot != fault_type)) {
+ vm_map_verify_done(map, &version);
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ goto RetryFault;
+ }
+
+ /*
+ * It's critically important that a wired-down page be faulted
+ * only once in each map for which it is wired.
+ */
+
+ vm_object_unlock(m->object);
+
+ /*
+ * Put this page into the physical map.
+ * We had to do the unlock above because pmap_enter
+ * may cause other faults. The page may be on
+ * the pageout queues. If the pageout daemon comes
+ * across the page, it will remove it from the queues.
+ */
+
+ PMAP_ENTER(map->pmap, vaddr, m, prot, wired);
+
+ /*
+ * If the page is not wired down and isn't already
+ * on a pageout queue, then put it where the
+ * pageout daemon can find it.
+ */
+ vm_object_lock(m->object);
+ vm_page_lock_queues();
+ if (change_wiring) {
+ if (wired)
+ vm_page_wire(m);
+ else
+ vm_page_unwire(m);
+ } else if (software_reference_bits) {
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ m->reference = TRUE;
+ } else {
+ vm_page_activate(m);
+ }
+ vm_page_unlock_queues();
+
+ /*
+ * Unlock everything, and return
+ */
+
+ vm_map_verify_done(map, &version);
+ PAGE_WAKEUP_DONE(m);
+ kr = KERN_SUCCESS;
+ UNLOCK_AND_DEALLOCATE;
+
+#undef UNLOCK_AND_DEALLOCATE
+#undef RELEASE_PAGE
+
+ done:
+ if (continuation != vm_fault_no_continuation) {
+ vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state);
+ (*continuation)(kr);
+ /*NOTREACHED*/
+ }
+
+ return(kr);
+}
+
+/*
+ * vm_fault_wire:
+ *
+ * Wire down a range of virtual addresses in a map.
+ */
+void vm_fault_wire(
+ vm_map_t map,
+ vm_map_entry_t entry)
+{
+
+ vm_offset_t va;
+ pmap_t pmap;
+ vm_offset_t end_addr = entry->vme_end;
+
+ pmap = vm_map_pmap(map);
+
+ /*
+ * Inform the physical mapping system that the
+ * range of addresses may not fault, so that
+ * page tables and such can be locked down as well.
+ */
+
+ pmap_pageable(pmap, entry->vme_start, end_addr, FALSE);
+
+ /*
+ * We simulate a fault to get the page and enter it
+ * in the physical map.
+ */
+
+ for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
+ if (vm_fault_wire_fast(map, va, entry) != KERN_SUCCESS)
+ (void) vm_fault(map, va, VM_PROT_NONE, TRUE,
+ FALSE, (void (*)()) 0);
+ }
+}
+
+/*
+ * vm_fault_unwire:
+ *
+ * Unwire a range of virtual addresses in a map.
+ */
+void vm_fault_unwire(
+ vm_map_t map,
+ vm_map_entry_t entry)
+{
+ vm_offset_t va;
+ pmap_t pmap;
+ vm_offset_t end_addr = entry->vme_end;
+ vm_object_t object;
+
+ pmap = vm_map_pmap(map);
+
+ object = (entry->is_sub_map)
+ ? VM_OBJECT_NULL : entry->object.vm_object;
+
+ /*
+ * Since the pages are wired down, we must be able to
+ * get their mappings from the physical map system.
+ */
+
+ for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
+ pmap_change_wiring(pmap, va, FALSE);
+
+ if (object == VM_OBJECT_NULL) {
+ vm_map_lock_set_recursive(map);
+ (void) vm_fault(map, va, VM_PROT_NONE, TRUE,
+ FALSE, (void (*)()) 0);
+ vm_map_lock_clear_recursive(map);
+ } else {
+ vm_prot_t prot;
+ vm_page_t result_page;
+ vm_page_t top_page;
+ vm_fault_return_t result;
+
+ do {
+ prot = VM_PROT_NONE;
+
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ result = vm_fault_page(object,
+ entry->offset +
+ (va - entry->vme_start),
+ VM_PROT_NONE, TRUE,
+ FALSE, &prot,
+ &result_page,
+ &top_page,
+ FALSE, (void (*)()) 0);
+ } while (result == VM_FAULT_RETRY);
+
+ if (result != VM_FAULT_SUCCESS)
+ panic("vm_fault_unwire: failure");
+
+ vm_page_lock_queues();
+ vm_page_unwire(result_page);
+ vm_page_unlock_queues();
+ PAGE_WAKEUP_DONE(result_page);
+
+ vm_fault_cleanup(result_page->object, top_page);
+ }
+ }
+
+ /*
+ * Inform the physical mapping system that the range
+ * of addresses may fault, so that page tables and
+ * such may be unwired themselves.
+ */
+
+ pmap_pageable(pmap, entry->vme_start, end_addr, TRUE);
+}
+
+/*
+ * vm_fault_wire_fast:
+ *
+ * Handle common case of a wire down page fault at the given address.
+ * If successful, the page is inserted into the associated physical map.
+ * The map entry is passed in to avoid the overhead of a map lookup.
+ *
+ * NOTE: the given address should be truncated to the
+ * proper page address.
+ *
+ * KERN_SUCCESS is returned if the page fault is handled; otherwise,
+ * a standard error specifying why the fault is fatal is returned.
+ *
+ * The map in question must be referenced, and remains so.
+ * Caller has a read lock on the map.
+ *
+ * This is a stripped version of vm_fault() for wiring pages. Anything
+ * other than the common case will return KERN_FAILURE, and the caller
+ * is expected to call vm_fault().
+ */
+kern_return_t vm_fault_wire_fast(
+ vm_map_t map,
+ vm_offset_t va,
+ vm_map_entry_t entry)
+{
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_page_t m;
+ vm_prot_t prot;
+
+ vm_stat.faults++; /* needs lock XXX */
+ current_task()->faults++;
+/*
+ * Recovery actions
+ */
+
+#undef RELEASE_PAGE
+#define RELEASE_PAGE(m) { \
+ PAGE_WAKEUP_DONE(m); \
+ vm_page_lock_queues(); \
+ vm_page_unwire(m); \
+ vm_page_unlock_queues(); \
+}
+
+
+#undef UNLOCK_THINGS
+#define UNLOCK_THINGS { \
+ object->paging_in_progress--; \
+ vm_object_unlock(object); \
+}
+
+#undef UNLOCK_AND_DEALLOCATE
+#define UNLOCK_AND_DEALLOCATE { \
+ UNLOCK_THINGS; \
+ vm_object_deallocate(object); \
+}
+/*
+ * Give up and have caller do things the hard way.
+ */
+
+#define GIVE_UP { \
+ UNLOCK_AND_DEALLOCATE; \
+ return(KERN_FAILURE); \
+}
+
+
+ /*
+ * If this entry is not directly to a vm_object, bail out.
+ */
+ if (entry->is_sub_map)
+ return(KERN_FAILURE);
+
+ /*
+ * Find the backing store object and offset into it.
+ */
+
+ object = entry->object.vm_object;
+ offset = (va - entry->vme_start) + entry->offset;
+ prot = entry->protection;
+
+ /*
+ * Make a reference to this object to prevent its
+ * disposal while we are messing with it.
+ */
+
+ vm_object_lock(object);
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ object->paging_in_progress++;
+
+ /*
+ * INVARIANTS (through entire routine):
+ *
+ * 1) At all times, we must either have the object
+ * lock or a busy page in some object to prevent
+ * some other thread from trying to bring in
+ * the same page.
+ *
+ * 2) Once we have a busy page, we must remove it from
+ * the pageout queues, so that the pageout daemon
+ * will not grab it away.
+ *
+ */
+
+ /*
+ * Look for page in top-level object. If it's not there or
+ * there's something going on, give up.
+ */
+ m = vm_page_lookup(object, offset);
+ if ((m == VM_PAGE_NULL) || (m->error) ||
+ (m->busy) || (m->absent) || (prot & m->page_lock)) {
+ GIVE_UP;
+ }
+
+ /*
+ * Wire the page down now. All bail outs beyond this
+ * point must unwire the page.
+ */
+
+ vm_page_lock_queues();
+ vm_page_wire(m);
+ vm_page_unlock_queues();
+
+ /*
+ * Mark page busy for other threads.
+ */
+ assert(!m->busy);
+ m->busy = TRUE;
+ assert(!m->absent);
+
+ /*
+ * Give up if the page is being written and there's a copy object
+ */
+ if ((object->copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
+ RELEASE_PAGE(m);
+ GIVE_UP;
+ }
+
+ /*
+ * Put this page into the physical map.
+ * We have to unlock the object because pmap_enter
+ * may cause other faults.
+ */
+ vm_object_unlock(object);
+
+ PMAP_ENTER(map->pmap, va, m, prot, TRUE);
+
+ /*
+ * Must relock object so that paging_in_progress can be cleared.
+ */
+ vm_object_lock(object);
+
+ /*
+ * Unlock everything, and return
+ */
+
+ PAGE_WAKEUP_DONE(m);
+ UNLOCK_AND_DEALLOCATE;
+
+ return(KERN_SUCCESS);
+
+}
+
+/*
+ * Routine: vm_fault_copy_cleanup
+ * Purpose:
+ * Release a page used by vm_fault_copy.
+ */
+
+static void vm_fault_copy_cleanup(
+ vm_page_t page,
+ vm_page_t top_page)
+{
+ vm_object_t object = page->object;
+
+ vm_object_lock(object);
+ PAGE_WAKEUP_DONE(page);
+ vm_page_lock_queues();
+ if (!page->active && !page->inactive)
+ vm_page_activate(page);
+ vm_page_unlock_queues();
+ vm_fault_cleanup(object, top_page);
+}
+
+/*
+ * Routine: vm_fault_copy
+ *
+ * Purpose:
+ * Copy pages from one virtual memory object to another --
+ * neither the source nor destination pages need be resident.
+ *
+ * Before actually copying a page, the version associated with
+ * the destination address map wil be verified.
+ *
+ * In/out conditions:
+ * The caller must hold a reference, but not a lock, to
+ * each of the source and destination objects and to the
+ * destination map.
+ *
+ * Results:
+ * Returns KERN_SUCCESS if no errors were encountered in
+ * reading or writing the data. Returns KERN_INTERRUPTED if
+ * the operation was interrupted (only possible if the
+ * "interruptible" argument is asserted). Other return values
+ * indicate a permanent error in copying the data.
+ *
+ * The actual amount of data copied will be returned in the
+ * "copy_size" argument. In the event that the destination map
+ * verification failed, this amount may be less than the amount
+ * requested.
+ */
+kern_return_t vm_fault_copy(
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t *src_size, /* INOUT */
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_map_t dst_map,
+ vm_map_version_t *dst_version,
+ boolean_t interruptible)
+{
+ vm_page_t result_page;
+ vm_prot_t prot;
+
+ vm_page_t src_page;
+ vm_page_t src_top_page;
+
+ vm_page_t dst_page;
+ vm_page_t dst_top_page;
+
+ vm_size_t amount_done;
+ vm_object_t old_copy_object;
+
+#define RETURN(x) \
+ MACRO_BEGIN \
+ *src_size = amount_done; \
+ MACRO_RETURN(x); \
+ MACRO_END
+
+ amount_done = 0;
+ do { /* while (amount_done != *src_size) */
+
+ RetrySourceFault: ;
+
+ if (src_object == VM_OBJECT_NULL) {
+ /*
+ * No source object. We will just
+ * zero-fill the page in dst_object.
+ */
+
+ src_page = VM_PAGE_NULL;
+ } else {
+ prot = VM_PROT_READ;
+
+ vm_object_lock(src_object);
+ vm_object_paging_begin(src_object);
+
+ switch (vm_fault_page(src_object, src_offset,
+ VM_PROT_READ, FALSE, interruptible,
+ &prot, &result_page, &src_top_page,
+ FALSE, (void (*)()) 0)) {
+
+ case VM_FAULT_SUCCESS:
+ break;
+ case VM_FAULT_RETRY:
+ goto RetrySourceFault;
+ case VM_FAULT_INTERRUPTED:
+ RETURN(MACH_SEND_INTERRUPTED);
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ goto RetrySourceFault;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ goto RetrySourceFault;
+ case VM_FAULT_MEMORY_ERROR:
+ return(KERN_MEMORY_ERROR);
+ }
+
+ src_page = result_page;
+
+ assert((src_top_page == VM_PAGE_NULL) ==
+ (src_page->object == src_object));
+
+ assert ((prot & VM_PROT_READ) != VM_PROT_NONE);
+
+ vm_object_unlock(src_page->object);
+ }
+
+ RetryDestinationFault: ;
+
+ prot = VM_PROT_WRITE;
+
+ vm_object_lock(dst_object);
+ vm_object_paging_begin(dst_object);
+
+ switch (vm_fault_page(dst_object, dst_offset, VM_PROT_WRITE,
+ FALSE, FALSE /* interruptible */,
+ &prot, &result_page, &dst_top_page,
+ FALSE, (void (*)()) 0)) {
+
+ case VM_FAULT_SUCCESS:
+ break;
+ case VM_FAULT_RETRY:
+ goto RetryDestinationFault;
+ case VM_FAULT_INTERRUPTED:
+ if (src_page != VM_PAGE_NULL)
+ vm_fault_copy_cleanup(src_page,
+ src_top_page);
+ RETURN(MACH_SEND_INTERRUPTED);
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ goto RetryDestinationFault;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ goto RetryDestinationFault;
+ case VM_FAULT_MEMORY_ERROR:
+ if (src_page != VM_PAGE_NULL)
+ vm_fault_copy_cleanup(src_page,
+ src_top_page);
+ return(KERN_MEMORY_ERROR);
+ }
+ assert ((prot & VM_PROT_WRITE) != VM_PROT_NONE);
+
+ dst_page = result_page;
+
+ old_copy_object = dst_page->object->copy;
+
+ vm_object_unlock(dst_page->object);
+
+ if (!vm_map_verify(dst_map, dst_version)) {
+
+ BailOut: ;
+
+ if (src_page != VM_PAGE_NULL)
+ vm_fault_copy_cleanup(src_page, src_top_page);
+ vm_fault_copy_cleanup(dst_page, dst_top_page);
+ break;
+ }
+
+
+ vm_object_lock(dst_page->object);
+ if (dst_page->object->copy != old_copy_object) {
+ vm_object_unlock(dst_page->object);
+ vm_map_verify_done(dst_map, dst_version);
+ goto BailOut;
+ }
+ vm_object_unlock(dst_page->object);
+
+ /*
+ * Copy the page, and note that it is dirty
+ * immediately.
+ */
+
+ if (src_page == VM_PAGE_NULL)
+ vm_page_zero_fill(dst_page);
+ else
+ vm_page_copy(src_page, dst_page);
+ dst_page->dirty = TRUE;
+
+ /*
+ * Unlock everything, and return
+ */
+
+ vm_map_verify_done(dst_map, dst_version);
+
+ if (src_page != VM_PAGE_NULL)
+ vm_fault_copy_cleanup(src_page, src_top_page);
+ vm_fault_copy_cleanup(dst_page, dst_top_page);
+
+ amount_done += PAGE_SIZE;
+ src_offset += PAGE_SIZE;
+ dst_offset += PAGE_SIZE;
+
+ } while (amount_done != *src_size);
+
+ RETURN(KERN_SUCCESS);
+#undef RETURN
+
+ /*NOTREACHED*/
+}
+
+
+
+
+
+#ifdef notdef
+
+/*
+ * Routine: vm_fault_page_overwrite
+ *
+ * Description:
+ * A form of vm_fault_page that assumes that the
+ * resulting page will be overwritten in its entirety,
+ * making it unnecessary to obtain the correct *contents*
+ * of the page.
+ *
+ * Implementation:
+ * XXX Untested. Also unused. Eventually, this technology
+ * could be used in vm_fault_copy() to advantage.
+ */
+vm_fault_return_t vm_fault_page_overwrite(
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_page_t *result_page) /* OUT */
+{
+ vm_page_t dst_page;
+
+#define interruptible FALSE /* XXX */
+
+ while (TRUE) {
+ /*
+ * Look for a page at this offset
+ */
+
+ while ((dst_page = vm_page_lookup(dst_object, dst_offset))
+ == VM_PAGE_NULL) {
+ /*
+ * No page, no problem... just allocate one.
+ */
+
+ dst_page = vm_page_alloc(dst_object, dst_offset);
+ if (dst_page == VM_PAGE_NULL) {
+ vm_object_unlock(dst_object);
+ VM_PAGE_WAIT((void (*)()) 0);
+ vm_object_lock(dst_object);
+ continue;
+ }
+
+ /*
+ * Pretend that the memory manager
+ * write-protected the page.
+ *
+ * Note that we will be asking for write
+ * permission without asking for the data
+ * first.
+ */
+
+ dst_page->overwriting = TRUE;
+ dst_page->page_lock = VM_PROT_WRITE;
+ dst_page->absent = TRUE;
+ dst_object->absent_count++;
+
+ break;
+
+ /*
+ * When we bail out, we might have to throw
+ * away the page created here.
+ */
+
+#define DISCARD_PAGE \
+ MACRO_BEGIN \
+ vm_object_lock(dst_object); \
+ dst_page = vm_page_lookup(dst_object, dst_offset); \
+ if ((dst_page != VM_PAGE_NULL) && dst_page->overwriting) \
+ VM_PAGE_FREE(dst_page); \
+ vm_object_unlock(dst_object); \
+ MACRO_END
+ }
+
+ /*
+ * If the page is write-protected...
+ */
+
+ if (dst_page->page_lock & VM_PROT_WRITE) {
+ /*
+ * ... and an unlock request hasn't been sent
+ */
+
+ if ( ! (dst_page->unlock_request & VM_PROT_WRITE)) {
+ vm_prot_t u;
+ kern_return_t rc;
+
+ /*
+ * ... then send one now.
+ */
+
+ if (!dst_object->pager_ready) {
+ vm_object_assert_wait(dst_object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ interruptible);
+ vm_object_unlock(dst_object);
+ thread_block((void (*)()) 0);
+ if (current_thread()->wait_result !=
+ THREAD_AWAKENED) {
+ DISCARD_PAGE;
+ return(VM_FAULT_INTERRUPTED);
+ }
+ continue;
+ }
+
+ u = dst_page->unlock_request |= VM_PROT_WRITE;
+ vm_object_unlock(dst_object);
+
+ if ((rc = memory_object_data_unlock(
+ dst_object->pager,
+ dst_object->pager_request,
+ dst_offset + dst_object->paging_offset,
+ PAGE_SIZE,
+ u)) != KERN_SUCCESS) {
+ printf("vm_object_overwrite: memory_object_data_unlock failed\n");
+ DISCARD_PAGE;
+ return((rc == MACH_SEND_INTERRUPTED) ?
+ VM_FAULT_INTERRUPTED :
+ VM_FAULT_MEMORY_ERROR);
+ }
+ vm_object_lock(dst_object);
+ continue;
+ }
+
+ /* ... fall through to wait below */
+ } else {
+ /*
+ * If the page isn't being used for other
+ * purposes, then we're done.
+ */
+ if ( ! (dst_page->busy || dst_page->absent || dst_page->error) )
+ break;
+ }
+
+ PAGE_ASSERT_WAIT(dst_page, interruptible);
+ vm_object_unlock(dst_object);
+ thread_block((void (*)()) 0);
+ if (current_thread()->wait_result != THREAD_AWAKENED) {
+ DISCARD_PAGE;
+ return(VM_FAULT_INTERRUPTED);
+ }
+ }
+
+ *result_page = dst_page;
+ return(VM_FAULT_SUCCESS);
+
+#undef interruptible
+#undef DISCARD_PAGE
+}
+
+#endif /* notdef */
diff --git a/vm/vm_fault.h b/vm/vm_fault.h
new file mode 100644
index 0000000..ae692b1
--- /dev/null
+++ b/vm/vm_fault.h
@@ -0,0 +1,81 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_fault.h
+ *
+ * Page fault handling module declarations.
+ */
+
+#ifndef _VM_VM_FAULT_H_
+#define _VM_VM_FAULT_H_
+
+#include <mach/kern_return.h>
+#include <mach/vm_prot.h>
+#include <vm/vm_map.h>
+#include <vm/vm_types.h>
+
+/*
+ * Page fault handling based on vm_object only.
+ */
+
+typedef kern_return_t vm_fault_return_t;
+#define VM_FAULT_SUCCESS 0
+#define VM_FAULT_RETRY 1
+#define VM_FAULT_INTERRUPTED 2
+#define VM_FAULT_MEMORY_SHORTAGE 3
+#define VM_FAULT_FICTITIOUS_SHORTAGE 4
+#define VM_FAULT_MEMORY_ERROR 5
+
+typedef void (*vm_fault_continuation_t)(kern_return_t);
+#define vm_fault_no_continuation ((vm_fault_continuation_t)0)
+
+extern void vm_fault_init(void);
+extern vm_fault_return_t vm_fault_page(vm_object_t, vm_offset_t, vm_prot_t,
+ boolean_t, boolean_t, vm_prot_t *,
+ vm_page_t *, vm_page_t *, boolean_t,
+ continuation_t);
+
+extern void vm_fault_cleanup(vm_object_t, vm_page_t);
+/*
+ * Page fault handling based on vm_map (or entries therein)
+ */
+
+extern kern_return_t vm_fault(vm_map_t, vm_offset_t, vm_prot_t, boolean_t,
+ boolean_t, vm_fault_continuation_t);
+extern void vm_fault_wire(vm_map_t, vm_map_entry_t);
+extern void vm_fault_unwire(vm_map_t, vm_map_entry_t);
+
+/* Copy pages from one object to another. */
+extern kern_return_t vm_fault_copy(vm_object_t, vm_offset_t, vm_size_t *,
+ vm_object_t, vm_offset_t, vm_map_t,
+ vm_map_version_t *, boolean_t);
+
+kern_return_t vm_fault_wire_fast(
+ vm_map_t map,
+ vm_offset_t va,
+ vm_map_entry_t entry);
+
+#endif /* _VM_VM_FAULT_H_ */
diff --git a/vm/vm_init.c b/vm/vm_init.c
new file mode 100644
index 0000000..593af11
--- /dev/null
+++ b/vm/vm_init.c
@@ -0,0 +1,88 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_init.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Initialize the Virtual Memory subsystem.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <kern/slab.h>
+#include <kern/kalloc.h>
+#include <vm/vm_fault.h>
+#include <vm/vm_init.h>
+#include <vm/vm_object.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_kern.h>
+#include <vm/memory_object.h>
+#include <vm/memory_object_proxy.h>
+
+
+/*
+ * vm_mem_bootstrap initializes the virtual memory system.
+ * This is done only by the first cpu up.
+ */
+
+void vm_mem_bootstrap(void)
+{
+ vm_offset_t start, end;
+
+ /*
+ * Initializes resident memory structures.
+ * From here on, all physical memory is accounted for,
+ * and we use only virtual addresses.
+ */
+
+ vm_page_bootstrap(&start, &end);
+
+ /*
+ * Initialize other VM packages
+ */
+
+ slab_bootstrap();
+ vm_object_bootstrap();
+ vm_map_init();
+ kmem_init(start, end);
+ pmap_init();
+ slab_init();
+ kalloc_init();
+ vm_fault_init();
+ vm_page_module_init();
+ memory_manager_default_init();
+}
+
+void vm_mem_init(void)
+{
+ vm_object_init();
+ memory_object_proxy_init();
+ vm_page_info_all();
+}
diff --git a/vm/vm_init.h b/vm/vm_init.h
new file mode 100644
index 0000000..42ef48b
--- /dev/null
+++ b/vm/vm_init.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _VM_VM_INIT_H_
+#define _VM_VM_INIT_H_
+
+extern void vm_mem_init(void);
+extern void vm_mem_bootstrap(void);
+
+#endif /* _VM_VM_INIT_H_ */
diff --git a/vm/vm_kern.c b/vm/vm_kern.c
new file mode 100644
index 0000000..51223d9
--- /dev/null
+++ b/vm/vm_kern.c
@@ -0,0 +1,1099 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_kern.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Kernel memory management.
+ */
+
+#include <string.h>
+
+#include <mach/kern_return.h>
+#include <machine/locore.h>
+#include <machine/vm_param.h>
+#include <kern/assert.h>
+#include <kern/debug.h>
+#include <kern/lock.h>
+#include <kern/slab.h>
+#include <kern/thread.h>
+#include <kern/printf.h>
+#include <vm/pmap.h>
+#include <vm/vm_fault.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+
+
+
+/*
+ * Variables exported by this module.
+ */
+
+static struct vm_map kernel_map_store;
+vm_map_t kernel_map = &kernel_map_store;
+vm_map_t kernel_pageable_map;
+
+/*
+ * projected_buffer_allocate
+ *
+ * Allocate a wired-down buffer shared between kernel and user task.
+ * Fresh, zero-filled memory is allocated.
+ * If persistence is false, this buffer can only be deallocated from
+ * user task using projected_buffer_deallocate, and deallocation
+ * from user task also deallocates the buffer from the kernel map.
+ * projected_buffer_collect is called from vm_map_deallocate to
+ * automatically deallocate projected buffers on task_deallocate.
+ * Sharing with more than one user task is achieved by using
+ * projected_buffer_map for the second and subsequent tasks.
+ * The user is precluded from manipulating the VM entry of this buffer
+ * (i.e. changing protection, inheritance or machine attributes).
+ */
+
+kern_return_t
+projected_buffer_allocate(
+ vm_map_t map,
+ vm_size_t size,
+ int persistence,
+ vm_offset_t *kernel_p,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
+{
+ vm_object_t object;
+ vm_map_entry_t u_entry, k_entry;
+ vm_offset_t addr;
+ phys_addr_t physical_addr;
+ vm_size_t r_size;
+ kern_return_t kr;
+
+ if (map == VM_MAP_NULL || map == kernel_map)
+ return(KERN_INVALID_ARGUMENT);
+
+ /*
+ * Allocate a new object.
+ */
+
+ size = round_page(size);
+ object = vm_object_allocate(size);
+
+ vm_map_lock(kernel_map);
+ kr = vm_map_find_entry(kernel_map, &addr, size, (vm_offset_t) 0,
+ VM_OBJECT_NULL, &k_entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(kernel_map);
+ vm_object_deallocate(object);
+ return kr;
+ }
+
+ k_entry->object.vm_object = object;
+ if (!persistence)
+ k_entry->projected_on = (vm_map_entry_t) -1;
+ /*Mark entry so as to automatically deallocate it when
+ last corresponding user entry is deallocated*/
+ vm_map_unlock(kernel_map);
+ *kernel_p = addr;
+
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
+ VM_OBJECT_NULL, &u_entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+ vm_map_lock(kernel_map);
+ vm_map_entry_delete(kernel_map, k_entry);
+ vm_map_unlock(kernel_map);
+ vm_object_deallocate(object);
+ return kr;
+ }
+
+ u_entry->object.vm_object = object;
+ vm_object_reference(object);
+ u_entry->projected_on = k_entry;
+ /*Creates coupling with kernel mapping of the buffer, and
+ also guarantees that user cannot directly manipulate
+ buffer VM entry*/
+ u_entry->protection = protection;
+ u_entry->max_protection = protection;
+ u_entry->inheritance = inheritance;
+ vm_map_unlock(map);
+ *user_p = addr;
+
+ /*
+ * Allocate wired-down memory in the object,
+ * and enter it in the kernel pmap.
+ */
+ kmem_alloc_pages(object, 0,
+ *kernel_p, *kernel_p + size,
+ VM_PROT_READ | VM_PROT_WRITE);
+ memset((void*) *kernel_p, 0, size); /*Zero fill*/
+
+ /* Set up physical mappings for user pmap */
+
+ pmap_pageable(map->pmap, *user_p, *user_p + size, FALSE);
+ for (r_size = 0; r_size < size; r_size += PAGE_SIZE) {
+ physical_addr = pmap_extract(kernel_pmap, *kernel_p + r_size);
+ pmap_enter(map->pmap, *user_p + r_size, physical_addr,
+ protection, TRUE);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+
+/*
+ * projected_buffer_map
+ *
+ * Map an area of kernel memory onto a task's address space.
+ * No new memory is allocated; the area must previously exist in the
+ * kernel memory map.
+ */
+
+kern_return_t
+projected_buffer_map(
+ vm_map_t map,
+ vm_offset_t kernel_addr,
+ vm_size_t size,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
+{
+ vm_map_entry_t u_entry, k_entry;
+ vm_offset_t user_addr;
+ phys_addr_t physical_addr;
+ vm_size_t r_size;
+ kern_return_t kr;
+
+ /*
+ * Find entry in kernel map
+ */
+
+ size = round_page(size);
+ if (map == VM_MAP_NULL || map == kernel_map ||
+ !vm_map_lookup_entry(kernel_map, kernel_addr, &k_entry) ||
+ kernel_addr + size > k_entry->vme_end)
+ return(KERN_INVALID_ARGUMENT);
+
+
+ /*
+ * Create entry in user task
+ */
+
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &user_addr, size, (vm_offset_t) 0,
+ VM_OBJECT_NULL, &u_entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+ return kr;
+ }
+
+ u_entry->object.vm_object = k_entry->object.vm_object;
+ vm_object_reference(k_entry->object.vm_object);
+ u_entry->offset = kernel_addr - k_entry->vme_start + k_entry->offset;
+ u_entry->projected_on = k_entry;
+ /*Creates coupling with kernel mapping of the buffer, and
+ also guarantees that user cannot directly manipulate
+ buffer VM entry*/
+ u_entry->protection = protection;
+ u_entry->max_protection = protection;
+ u_entry->inheritance = inheritance;
+ u_entry->wired_count = k_entry->wired_count;
+ vm_map_unlock(map);
+ *user_p = user_addr;
+
+ /* Set up physical mappings for user pmap */
+
+ pmap_pageable(map->pmap, user_addr, user_addr + size,
+ !k_entry->wired_count);
+ for (r_size = 0; r_size < size; r_size += PAGE_SIZE) {
+ physical_addr = pmap_extract(kernel_pmap, kernel_addr + r_size);
+ pmap_enter(map->pmap, user_addr + r_size, physical_addr,
+ protection, k_entry->wired_count);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+
+/*
+ * projected_buffer_deallocate
+ *
+ * Unmap projected buffer from task's address space.
+ * May also unmap buffer from kernel map, if buffer is not
+ * persistent and only the kernel reference remains.
+ */
+
+kern_return_t
+projected_buffer_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ vm_map_entry_t entry, k_entry;
+
+ if (map == VM_MAP_NULL || map == kernel_map)
+ return KERN_INVALID_ARGUMENT;
+
+ vm_map_lock(map);
+ if (!vm_map_lookup_entry(map, start, &entry) ||
+ end > entry->vme_end ||
+ /*Check corresponding kernel entry*/
+ (k_entry = entry->projected_on) == 0) {
+ vm_map_unlock(map);
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ /*Prepare for deallocation*/
+ if (entry->vme_start < start)
+ _vm_map_clip_start(&map->hdr, entry, start, 1);
+ if (entry->vme_end > end)
+ _vm_map_clip_end(&map->hdr, entry, end, 1);
+ if (map->first_free == entry) /*Adjust first_free hint*/
+ map->first_free = entry->vme_prev;
+ entry->projected_on = 0; /*Needed to allow deletion*/
+ entry->wired_count = 0; /*Avoid unwire fault*/
+ vm_map_entry_delete(map, entry);
+ vm_map_unlock(map);
+
+ /*Check if the buffer is not persistent and only the
+ kernel mapping remains, and if so delete it*/
+ vm_map_lock(kernel_map);
+ if (k_entry->projected_on == (vm_map_entry_t) -1 &&
+ k_entry->object.vm_object->ref_count == 1) {
+ if (kernel_map->first_free == k_entry)
+ kernel_map->first_free = k_entry->vme_prev;
+ k_entry->projected_on = 0; /*Allow unwire fault*/
+ vm_map_entry_delete(kernel_map, k_entry);
+ }
+ vm_map_unlock(kernel_map);
+ return(KERN_SUCCESS);
+}
+
+
+/*
+ * projected_buffer_collect
+ *
+ * Unmap all projected buffers from task's address space.
+ */
+
+kern_return_t
+projected_buffer_collect(vm_map_t map)
+{
+ vm_map_entry_t entry, next;
+
+ if (map == VM_MAP_NULL || map == kernel_map)
+ return(KERN_INVALID_ARGUMENT);
+
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = next) {
+ next = entry->vme_next;
+ if (entry->projected_on != 0)
+ projected_buffer_deallocate(map, entry->vme_start, entry->vme_end);
+ }
+ return(KERN_SUCCESS);
+}
+
+
+/*
+ * projected_buffer_in_range
+ *
+ * Verifies whether a projected buffer exists in the address range
+ * given.
+ */
+
+boolean_t
+projected_buffer_in_range(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ vm_map_entry_t entry;
+
+ if (map == VM_MAP_NULL || map == kernel_map)
+ return(FALSE);
+
+ /*Find first entry*/
+ if (!vm_map_lookup_entry(map, start, &entry))
+ entry = entry->vme_next;
+
+ while (entry != vm_map_to_entry(map) && entry->projected_on == 0 &&
+ entry->vme_start <= end) {
+ entry = entry->vme_next;
+ }
+ return(entry != vm_map_to_entry(map) && entry->vme_start <= end);
+}
+
+
+/*
+ * kmem_alloc:
+ *
+ * Allocate wired-down memory in the kernel's address map
+ * or a submap. The memory is not zero-filled.
+ */
+
+kern_return_t
+kmem_alloc(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
+{
+ vm_object_t object;
+ vm_map_entry_t entry;
+ vm_offset_t addr;
+ unsigned int attempts;
+ kern_return_t kr;
+
+ /*
+ * Allocate a new object. We must do this before locking
+ * the map, lest we risk deadlock with the default pager:
+ * device_read_alloc uses kmem_alloc,
+ * which tries to allocate an object,
+ * which uses kmem_alloc_wired to get memory,
+ * which blocks for pages.
+ * then the default pager needs to read a block
+ * to process a memory_object_data_write,
+ * and device_read_alloc calls kmem_alloc
+ * and deadlocks on the map lock.
+ */
+
+ size = round_page(size);
+ object = vm_object_allocate(size);
+
+ attempts = 0;
+
+retry:
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
+ VM_OBJECT_NULL, &entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc in %p (%s)\n",
+ map, map->name);
+ vm_object_deallocate(object);
+ return kr;
+ }
+
+ entry->object.vm_object = object;
+ entry->offset = 0;
+
+ /*
+ * Since we have not given out this address yet,
+ * it is safe to unlock the map.
+ */
+ vm_map_unlock(map);
+
+ /*
+ * Allocate wired-down memory in the kernel_object,
+ * for this entry, and enter it in the kernel pmap.
+ */
+ kmem_alloc_pages(object, 0,
+ addr, addr + size,
+ VM_PROT_DEFAULT);
+
+ /*
+ * Return the memory, not zeroed.
+ */
+ *addrp = addr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * kmem_valloc:
+ *
+ * Allocate addressing space in the kernel's address map
+ * or a submap. The adressing space does not map anything.
+ */
+
+kern_return_t
+kmem_valloc(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
+{
+ vm_map_entry_t entry;
+ vm_offset_t offset;
+ vm_offset_t addr;
+ unsigned int attempts;
+ kern_return_t kr;
+
+ /*
+ * Use the kernel object for wired-down kernel pages.
+ * Assume that no region of the kernel object is
+ * referenced more than once. We want vm_map_find_entry
+ * to extend an existing entry if possible.
+ */
+
+ size = round_page(size);
+ attempts = 0;
+
+retry:
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
+ kernel_object, &entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_valloc in %p (%s)\n",
+ map, map->name);
+ return kr;
+ }
+
+ /*
+ * Since we didn't know where the new region would
+ * start, we couldn't supply the correct offset into
+ * the kernel object. We only initialize the entry
+ * if we aren't extending an existing entry.
+ */
+
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
+
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+ vm_object_reference(kernel_object);
+
+ entry->object.vm_object = kernel_object;
+ entry->offset = offset;
+ }
+
+ /*
+ * Since we have not given out this address yet,
+ * it is safe to unlock the map.
+ */
+ vm_map_unlock(map);
+
+ /*
+ * Return the memory, not mapped.
+ */
+ *addrp = addr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * kmem_alloc_wired:
+ *
+ * Allocate wired-down memory in the kernel's address map
+ * or a submap. The memory is not zero-filled.
+ *
+ * The memory is allocated in the kernel_object.
+ * It may not be copied with vm_map_copy.
+ */
+
+kern_return_t
+kmem_alloc_wired(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
+{
+ vm_offset_t offset;
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ kr = kmem_valloc(map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
+
+ /*
+ * Allocate wired-down memory in the kernel_object,
+ * for this entry, and enter it in the kernel pmap.
+ */
+ kmem_alloc_pages(kernel_object, offset,
+ addr, addr + size,
+ VM_PROT_DEFAULT);
+
+ /*
+ * Return the memory, not zeroed.
+ */
+ *addrp = addr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * kmem_alloc_aligned:
+ *
+ * Like kmem_alloc_wired, except that the memory is aligned.
+ * The size should be a power-of-2.
+ */
+
+kern_return_t
+kmem_alloc_aligned(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
+{
+ vm_map_entry_t entry;
+ vm_offset_t offset;
+ vm_offset_t addr;
+ unsigned int attempts;
+ kern_return_t kr;
+
+ if ((size & (size - 1)) != 0)
+ panic("kmem_alloc_aligned");
+
+ /*
+ * Use the kernel object for wired-down kernel pages.
+ * Assume that no region of the kernel object is
+ * referenced more than once. We want vm_map_find_entry
+ * to extend an existing entry if possible.
+ */
+
+ size = round_page(size);
+ attempts = 0;
+
+retry:
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &addr, size, size - 1,
+ kernel_object, &entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc_aligned in %p (%s)\n",
+ map, map->name);
+ return kr;
+ }
+
+ /*
+ * Since we didn't know where the new region would
+ * start, we couldn't supply the correct offset into
+ * the kernel object. We only initialize the entry
+ * if we aren't extending an existing entry.
+ */
+
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
+
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+ vm_object_reference(kernel_object);
+
+ entry->object.vm_object = kernel_object;
+ entry->offset = offset;
+ }
+
+ /*
+ * Since we have not given out this address yet,
+ * it is safe to unlock the map.
+ */
+ vm_map_unlock(map);
+
+ /*
+ * Allocate wired-down memory in the kernel_object,
+ * for this entry, and enter it in the kernel pmap.
+ */
+ kmem_alloc_pages(kernel_object, offset,
+ addr, addr + size,
+ VM_PROT_DEFAULT);
+
+ /*
+ * Return the memory, not zeroed.
+ */
+ *addrp = addr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * kmem_map_aligned_table: map a table or structure in a virtual memory page
+ * Align the table initial address with the page initial address.
+ *
+ * Parameters:
+ * phys_address: physical address, the start address of the table.
+ * size: size of the table.
+ * mode: access mode. VM_PROT_READ for read, VM_PROT_WRITE for write.
+ *
+ * Returns a reference to the virtual address if success, NULL if failure.
+ */
+
+void*
+kmem_map_aligned_table(
+ phys_addr_t phys_address,
+ vm_size_t size,
+ int mode)
+{
+ vm_offset_t virt_addr;
+ kern_return_t ret;
+ phys_addr_t into_page = phys_address % PAGE_SIZE;
+ phys_addr_t nearest_page = phys_address - into_page;
+
+ size += into_page;
+
+ ret = kmem_alloc_wired(kernel_map, &virt_addr,
+ round_page(size));
+
+ if (ret != KERN_SUCCESS)
+ return NULL;
+
+ (void) pmap_map_bd(virt_addr, nearest_page,
+ nearest_page + round_page(size), mode);
+
+ /* XXX remember mapping somewhere so we can free it? */
+
+ return (void *) (virt_addr + into_page);
+}
+
+/*
+ * kmem_alloc_pageable:
+ *
+ * Allocate pageable memory in the kernel's address map.
+ */
+
+kern_return_t
+kmem_alloc_pageable(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
+{
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ addr = vm_map_min(map);
+ kr = vm_map_enter(map, &addr, round_page(size),
+ (vm_offset_t) 0, TRUE,
+ VM_OBJECT_NULL, (vm_offset_t) 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+ if (kr != KERN_SUCCESS) {
+ printf_once("no more room for kmem_alloc_pageable in %p (%s)\n",
+ map, map->name);
+ return kr;
+ }
+
+ *addrp = addr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * kmem_free:
+ *
+ * Release a region of kernel virtual memory allocated
+ * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
+ * and return the physical pages associated with that region.
+ */
+
+void
+kmem_free(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
+{
+ kern_return_t kr;
+
+ kr = vm_map_remove(map, trunc_page(addr), round_page(addr + size));
+ if (kr != KERN_SUCCESS)
+ panic("kmem_free");
+}
+
+/*
+ * Allocate new wired pages in an object.
+ * The object is assumed to be mapped into the kernel map or
+ * a submap.
+ */
+void
+kmem_alloc_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
+{
+ /*
+ * Mark the pmap region as not pageable.
+ */
+ pmap_pageable(kernel_pmap, start, end, FALSE);
+
+ while (start < end) {
+ vm_page_t mem;
+
+ vm_object_lock(object);
+
+ /*
+ * Allocate a page
+ */
+ while ((mem = vm_page_alloc(object, offset))
+ == VM_PAGE_NULL) {
+ vm_object_unlock(object);
+ VM_PAGE_WAIT((void (*)()) 0);
+ vm_object_lock(object);
+ }
+
+ /*
+ * Wire it down
+ */
+ vm_page_lock_queues();
+ vm_page_wire(mem);
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ /*
+ * Enter it in the kernel pmap
+ */
+ PMAP_ENTER(kernel_pmap, start, mem,
+ protection, TRUE);
+
+ vm_object_lock(object);
+ PAGE_WAKEUP_DONE(mem);
+ vm_object_unlock(object);
+
+ start += PAGE_SIZE;
+ offset += PAGE_SIZE;
+ }
+}
+
+/*
+ * Remap wired pages in an object into a new region.
+ * The object is assumed to be mapped into the kernel map or
+ * a submap.
+ */
+void
+kmem_remap_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
+{
+ /*
+ * Mark the pmap region as not pageable.
+ */
+ pmap_pageable(kernel_pmap, start, end, FALSE);
+
+ while (start < end) {
+ vm_page_t mem;
+
+ vm_object_lock(object);
+
+ /*
+ * Find a page
+ */
+ if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
+ panic("kmem_remap_pages");
+
+ /*
+ * Wire it down (again)
+ */
+ vm_page_lock_queues();
+ vm_page_wire(mem);
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ /*
+ * Enter it in the kernel pmap. The page isn't busy,
+ * but this shouldn't be a problem because it is wired.
+ */
+ PMAP_ENTER(kernel_pmap, start, mem,
+ protection, TRUE);
+
+ start += PAGE_SIZE;
+ offset += PAGE_SIZE;
+ }
+}
+
+/*
+ * kmem_submap:
+ *
+ * Initializes a map to manage a subrange
+ * of the kernel virtual address space.
+ *
+ * Arguments are as follows:
+ *
+ * map Map to initialize
+ * parent Map to take range from
+ * size Size of range to find
+ * min, max Returned endpoints of map
+ * pageable Can the region be paged
+ */
+
+void
+kmem_submap(
+ vm_map_t map,
+ vm_map_t parent,
+ vm_offset_t *min,
+ vm_offset_t *max,
+ vm_size_t size)
+{
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ size = round_page(size);
+
+ /*
+ * Need reference on submap object because it is internal
+ * to the vm_system. vm_object_enter will never be called
+ * on it (usual source of reference for vm_map_enter).
+ */
+ vm_object_reference(vm_submap_object);
+
+ addr = vm_map_min(parent);
+ kr = vm_map_enter(parent, &addr, size,
+ (vm_offset_t) 0, TRUE,
+ vm_submap_object, (vm_offset_t) 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+ if (kr != KERN_SUCCESS)
+ panic("kmem_submap");
+
+ pmap_reference(vm_map_pmap(parent));
+ vm_map_setup(map, vm_map_pmap(parent), addr, addr + size);
+ kr = vm_map_submap(parent, addr, addr + size, map);
+ if (kr != KERN_SUCCESS)
+ panic("kmem_submap");
+
+ *min = addr;
+ *max = addr + size;
+}
+
+/*
+ * kmem_init:
+ *
+ * Initialize the kernel's virtual memory map, taking
+ * into account all memory allocated up to this time.
+ */
+void kmem_init(
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end);
+
+ /*
+ * Reserve virtual memory allocated up to this time.
+ */
+ if (start != VM_MIN_KERNEL_ADDRESS) {
+ kern_return_t rc;
+ vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
+ rc = vm_map_enter(kernel_map,
+ &addr, start - VM_MIN_KERNEL_ADDRESS,
+ (vm_offset_t) 0, TRUE,
+ VM_OBJECT_NULL, (vm_offset_t) 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ if (rc)
+ panic("vm_map_enter failed (%d)\n", rc);
+ }
+}
+
+/*
+ * New and improved IO wiring support.
+ */
+
+/*
+ * kmem_io_map_copyout:
+ *
+ * Establish temporary mapping in designated map for the memory
+ * passed in. Memory format must be a page_list vm_map_copy.
+ * Mapping is READ-ONLY.
+ */
+
+kern_return_t
+kmem_io_map_copyout(
+ vm_map_t map,
+ vm_offset_t *addr, /* actual addr of data */
+ vm_offset_t *alloc_addr, /* page aligned addr */
+ vm_size_t *alloc_size, /* size allocated */
+ vm_map_copy_t copy,
+ vm_size_t min_size) /* Do at least this much */
+{
+ vm_offset_t myaddr, offset;
+ vm_size_t mysize, copy_size;
+ kern_return_t ret;
+ vm_page_t *page_list;
+ vm_map_copy_t new_copy;
+ int i;
+
+ assert(copy->type == VM_MAP_COPY_PAGE_LIST);
+ assert(min_size != 0);
+
+ /*
+ * Figure out the size in vm pages.
+ */
+ min_size += copy->offset - trunc_page(copy->offset);
+ min_size = round_page(min_size);
+ mysize = round_page(copy->offset + copy->size) -
+ trunc_page(copy->offset);
+
+ /*
+ * If total size is larger than one page list and
+ * we don't have to do more than one page list, then
+ * only do one page list.
+ *
+ * XXX Could be much smarter about this ... like trimming length
+ * XXX if we need more than one page list but not all of them.
+ */
+
+ copy_size = ptoa(copy->cpy_npages);
+ if (mysize > copy_size && copy_size > min_size)
+ mysize = copy_size;
+
+ /*
+ * Allocate some address space in the map (must be kernel
+ * space).
+ */
+ myaddr = vm_map_min(map);
+ ret = vm_map_enter(map, &myaddr, mysize,
+ (vm_offset_t) 0, TRUE,
+ VM_OBJECT_NULL, (vm_offset_t) 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+
+ if (ret != KERN_SUCCESS)
+ return(ret);
+
+ /*
+ * Tell the pmap module that this will be wired, and
+ * enter the mappings.
+ */
+ pmap_pageable(vm_map_pmap(map), myaddr, myaddr + mysize, TRUE);
+
+ *addr = myaddr + (copy->offset - trunc_page(copy->offset));
+ *alloc_addr = myaddr;
+ *alloc_size = mysize;
+
+ offset = myaddr;
+ page_list = &copy->cpy_page_list[0];
+ while (TRUE) {
+ for ( i = 0; i < copy->cpy_npages; i++, offset += PAGE_SIZE) {
+ PMAP_ENTER(vm_map_pmap(map), offset, *page_list,
+ VM_PROT_READ, TRUE);
+ page_list++;
+ }
+
+ if (offset == (myaddr + mysize))
+ break;
+
+ /*
+ * Onward to the next page_list. The extend_cont
+ * leaves the current page list's pages alone;
+ * they'll be cleaned up at discard. Reset this
+ * copy's continuation to discard the next one.
+ */
+ vm_map_copy_invoke_extend_cont(copy, &new_copy, &ret);
+
+ if (ret != KERN_SUCCESS) {
+ kmem_io_map_deallocate(map, myaddr, mysize);
+ return(ret);
+ }
+ copy->cpy_cont = vm_map_copy_discard_cont;
+ copy->cpy_cont_args = (vm_map_copyin_args_t)new_copy;
+ copy = new_copy;
+ page_list = &copy->cpy_page_list[0];
+ }
+
+ return(ret);
+}
+
+/*
+ * kmem_io_map_deallocate:
+ *
+ * Get rid of the mapping established by kmem_io_map_copyout.
+ * Assumes that addr and size have been rounded to page boundaries.
+ * (e.g., the alloc_addr and alloc_size returned by kmem_io_map_copyout)
+ */
+
+void
+kmem_io_map_deallocate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
+{
+ /*
+ * Remove the mappings. The pmap_remove is needed.
+ */
+
+ pmap_remove(vm_map_pmap(map), addr, addr + size);
+ vm_map_remove(map, addr, addr + size);
+}
+
+/*
+ * Routine: copyinmap
+ * Purpose:
+ * Like copyin, except that fromaddr is an address
+ * in the specified VM map. This implementation
+ * is incomplete; it handles the current user map
+ * and the kernel map/submaps.
+ */
+
+int copyinmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
+{
+ if (vm_map_pmap(map) == kernel_pmap) {
+ /* assume a correct copy */
+ memcpy(toaddr, fromaddr, length);
+ return 0;
+ }
+
+ if (current_map() == map)
+ return copyin( fromaddr, toaddr, length);
+
+ return 1;
+}
+
+/*
+ * Routine: copyoutmap
+ * Purpose:
+ * Like copyout, except that toaddr is an address
+ * in the specified VM map. This implementation
+ * is incomplete; it handles the current user map
+ * and the kernel map/submaps.
+ */
+
+int copyoutmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
+{
+ if (vm_map_pmap(map) == kernel_pmap) {
+ /* assume a correct copy */
+ memcpy(toaddr, fromaddr, length);
+ return 0;
+ }
+
+ if (current_map() == map)
+ return copyout(fromaddr, toaddr, length);
+
+ return 1;
+}
diff --git a/vm/vm_kern.h b/vm/vm_kern.h
new file mode 100644
index 0000000..13115ff
--- /dev/null
+++ b/vm/vm_kern.h
@@ -0,0 +1,100 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_kern.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Kernel memory management definitions.
+ */
+
+#ifndef _VM_VM_KERN_H_
+#define _VM_VM_KERN_H_
+
+#include <mach/kern_return.h>
+#include <vm/vm_map.h>
+
+extern kern_return_t projected_buffer_allocate(vm_map_t, vm_size_t, int,
+ vm_offset_t *, vm_offset_t *,
+ vm_prot_t, vm_inherit_t);
+extern kern_return_t projected_buffer_deallocate(vm_map_t, vm_offset_t,
+ vm_offset_t);
+extern kern_return_t projected_buffer_map(vm_map_t, vm_offset_t, vm_size_t,
+ vm_offset_t *, vm_prot_t,
+ vm_inherit_t);
+extern kern_return_t projected_buffer_collect(vm_map_t);
+
+extern void kmem_init(vm_offset_t, vm_offset_t);
+
+extern kern_return_t kmem_alloc(vm_map_t, vm_offset_t *, vm_size_t);
+extern kern_return_t kmem_alloc_pageable(vm_map_t, vm_offset_t *,
+ vm_size_t);
+extern kern_return_t kmem_valloc(vm_map_t, vm_offset_t *, vm_size_t);
+extern kern_return_t kmem_alloc_wired(vm_map_t, vm_offset_t *, vm_size_t);
+extern kern_return_t kmem_alloc_aligned(vm_map_t, vm_offset_t *, vm_size_t);
+extern void* kmem_map_aligned_table(phys_addr_t, vm_size_t, int);
+
+extern void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
+
+extern void kmem_submap(vm_map_t, vm_map_t, vm_offset_t *,
+ vm_offset_t *, vm_size_t);
+
+extern kern_return_t kmem_io_map_copyout(vm_map_t, vm_offset_t *,
+ vm_offset_t *, vm_size_t *,
+ vm_map_copy_t, vm_size_t);
+extern void kmem_io_map_deallocate(vm_map_t, vm_offset_t,
+ vm_size_t);
+
+extern int
+copyinmap (vm_map_t map, char *fromaddr, char *toaddr, int length);
+
+extern int
+copyoutmap (vm_map_t map, char *fromaddr, char *toaddr, int length);
+
+extern vm_map_t kernel_map;
+extern vm_map_t kernel_pageable_map;
+extern vm_map_t ipc_kernel_map;
+
+extern boolean_t projected_buffer_in_range(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end);
+
+extern void kmem_alloc_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection);
+
+extern void kmem_remap_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection);
+
+#endif /* _VM_VM_KERN_H_ */
diff --git a/vm/vm_map.c b/vm/vm_map.c
new file mode 100644
index 0000000..e454bb2
--- /dev/null
+++ b/vm/vm_map.c
@@ -0,0 +1,5237 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_map.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Virtual memory mapping module.
+ */
+
+#include <kern/printf.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/vm_attributes.h>
+#include <mach/vm_param.h>
+#include <mach/vm_wire.h>
+#include <kern/assert.h>
+#include <kern/debug.h>
+#include <kern/kalloc.h>
+#include <kern/mach.server.h>
+#include <kern/list.h>
+#include <kern/rbtree.h>
+#include <kern/slab.h>
+#include <kern/mach4.server.h>
+#include <vm/pmap.h>
+#include <vm/vm_fault.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_resident.h>
+#include <vm/vm_kern.h>
+#include <vm/memory_object_proxy.h>
+#include <ipc/ipc_port.h>
+#include <string.h>
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#include <vm/vm_print.h>
+#endif /* MACH_KDB */
+
+/*
+ * Macros to copy a vm_map_entry. We must be careful to correctly
+ * manage the wired page count. vm_map_entry_copy() creates a new
+ * map entry to the same memory - the wired count in the new entry
+ * must be set to zero. vm_map_entry_copy_full() creates a new
+ * entry that is identical to the old entry. This preserves the
+ * wire count; it's used for map splitting and cache changing in
+ * vm_map_copyout.
+ */
+#define vm_map_entry_copy(NEW,OLD) \
+MACRO_BEGIN \
+ *(NEW) = *(OLD); \
+ (NEW)->is_shared = FALSE; \
+ (NEW)->needs_wakeup = FALSE; \
+ (NEW)->in_transition = FALSE; \
+ (NEW)->wired_count = 0; \
+ (NEW)->wired_access = VM_PROT_NONE; \
+MACRO_END
+
+#define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
+
+/*
+ * Virtual memory maps provide for the mapping, protection,
+ * and sharing of virtual memory objects. In addition,
+ * this module provides for an efficient virtual copy of
+ * memory from one map to another.
+ *
+ * Synchronization is required prior to most operations.
+ *
+ * Maps consist of an ordered doubly-linked list of simple
+ * entries; a hint and a red-black tree are used to speed up lookups.
+ *
+ * Sharing maps have been deleted from this version of Mach.
+ * All shared objects are now mapped directly into the respective
+ * maps. This requires a change in the copy on write strategy;
+ * the asymmetric (delayed) strategy is used for shared temporary
+ * objects instead of the symmetric (shadow) strategy. This is
+ * selected by the (new) use_shared_copy bit in the object. See
+ * vm_object_copy_temporary in vm_object.c for details. All maps
+ * are now "top level" maps (either task map, kernel map or submap
+ * of the kernel map).
+ *
+ * Since portions of maps are specified by start/end addresses,
+ * which may not align with existing map entries, all
+ * routines merely "clip" entries to these start/end values.
+ * [That is, an entry is split into two, bordering at a
+ * start or end value.] Note that these clippings may not
+ * always be necessary (as the two resulting entries are then
+ * not changed); however, the clipping is done for convenience.
+ * The entries can later be "glued back together" (coalesced).
+ *
+ * The symmetric (shadow) copy strategy implements virtual copy
+ * by copying VM object references from one map to
+ * another, and then marking both regions as copy-on-write.
+ * It is important to note that only one writeable reference
+ * to a VM object region exists in any map when this strategy
+ * is used -- this means that shadow object creation can be
+ * delayed until a write operation occurs. The asymmetric (delayed)
+ * strategy allows multiple maps to have writeable references to
+ * the same region of a vm object, and hence cannot delay creating
+ * its copy objects. See vm_object_copy_temporary() in vm_object.c.
+ * Copying of permanent objects is completely different; see
+ * vm_object_copy_strategically() in vm_object.c.
+ */
+
+struct kmem_cache vm_map_cache; /* cache for vm_map structures */
+struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */
+struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */
+
+/*
+ * Placeholder object for submap operations. This object is dropped
+ * into the range by a call to vm_map_find, and removed when
+ * vm_map_submap creates the submap.
+ */
+
+static struct vm_object vm_submap_object_store;
+vm_object_t vm_submap_object = &vm_submap_object_store;
+
+/*
+ * vm_map_init:
+ *
+ * Initialize the vm_map module. Must be called before
+ * any other vm_map routines.
+ *
+ * Map and entry structures are allocated from caches -- we must
+ * initialize those caches.
+ *
+ * There are two caches of interest:
+ *
+ * vm_map_cache: used to allocate maps.
+ * vm_map_entry_cache: used to allocate map entries.
+ *
+ * We make sure the map entry cache allocates memory directly from the
+ * physical allocator to avoid recursion with this module.
+ */
+
+void vm_map_init(void)
+{
+ kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0,
+ NULL, 0);
+ kmem_cache_init(&vm_map_entry_cache, "vm_map_entry",
+ sizeof(struct vm_map_entry), 0, NULL,
+ KMEM_CACHE_NOOFFSLAB | KMEM_CACHE_PHYSMEM);
+ kmem_cache_init(&vm_map_copy_cache, "vm_map_copy",
+ sizeof(struct vm_map_copy), 0, NULL, 0);
+
+ /*
+ * Submap object is initialized by vm_object_init.
+ */
+}
+
+void vm_map_setup(
+ vm_map_t map,
+ pmap_t pmap,
+ vm_offset_t min,
+ vm_offset_t max)
+{
+ vm_map_first_entry(map) = vm_map_to_entry(map);
+ vm_map_last_entry(map) = vm_map_to_entry(map);
+ map->hdr.nentries = 0;
+ rbtree_init(&map->hdr.tree);
+ rbtree_init(&map->hdr.gap_tree);
+
+ map->size = 0;
+ map->size_wired = 0;
+ map->ref_count = 1;
+ map->pmap = pmap;
+ map->min_offset = min;
+ map->max_offset = max;
+ map->wiring_required = FALSE;
+ map->wait_for_space = FALSE;
+ map->first_free = vm_map_to_entry(map);
+ map->hint = vm_map_to_entry(map);
+ map->name = NULL;
+ vm_map_lock_init(map);
+ simple_lock_init(&map->ref_lock);
+ simple_lock_init(&map->hint_lock);
+}
+
+/*
+ * vm_map_create:
+ *
+ * Creates and returns a new empty VM map with
+ * the given physical map structure, and having
+ * the given lower and upper address bounds.
+ */
+vm_map_t vm_map_create(
+ pmap_t pmap,
+ vm_offset_t min,
+ vm_offset_t max)
+{
+ vm_map_t result;
+
+ result = (vm_map_t) kmem_cache_alloc(&vm_map_cache);
+ if (result == VM_MAP_NULL)
+ return VM_MAP_NULL;
+
+ vm_map_setup(result, pmap, min, max);
+
+ return(result);
+}
+
+void vm_map_lock(struct vm_map *map)
+{
+ lock_write(&map->lock);
+
+ /*
+ * XXX Memory allocation may occur while a map is locked,
+ * for example when clipping entries. If the system is running
+ * low on memory, allocating may block until pages are
+ * available. But if a map used by the default pager is
+ * kept locked, a deadlock occurs.
+ *
+ * This workaround temporarily elevates the current thread
+ * VM privileges to avoid that particular deadlock, and does
+ * so regardless of the map for convenience, and because it's
+ * currently impossible to predict which map the default pager
+ * may depend on.
+ *
+ * This workaround isn't reliable, and only makes exhaustion
+ * less likely. In particular pageout may cause lots of data
+ * to be passed between the kernel and the pagers, often
+ * in the form of large copy maps. Making the minimum
+ * number of pages depend on the total number of pages
+ * should make exhaustion even less likely.
+ */
+
+ if (current_thread()) {
+ current_thread()->vm_privilege++;
+ assert(current_thread()->vm_privilege != 0);
+ }
+
+ map->timestamp++;
+}
+
+void vm_map_unlock(struct vm_map *map)
+{
+ if (current_thread()) {
+ current_thread()->vm_privilege--;
+ }
+
+ lock_write_done(&map->lock);
+}
+
+/*
+ * vm_map_entry_create: [ internal use only ]
+ *
+ * Allocates a VM map entry for insertion in the
+ * given map (or map copy). No fields are filled.
+ */
+#define vm_map_entry_create(map) \
+ _vm_map_entry_create(&(map)->hdr)
+
+#define vm_map_copy_entry_create(copy) \
+ _vm_map_entry_create(&(copy)->cpy_hdr)
+
+static vm_map_entry_t
+_vm_map_entry_create(const struct vm_map_header *map_header)
+{
+ vm_map_entry_t entry;
+
+ entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache);
+ if (entry == VM_MAP_ENTRY_NULL)
+ panic("vm_map_entry_create");
+
+ return(entry);
+}
+
+/*
+ * vm_map_entry_dispose: [ internal use only ]
+ *
+ * Inverse of vm_map_entry_create.
+ */
+#define vm_map_entry_dispose(map, entry) \
+ _vm_map_entry_dispose(&(map)->hdr, (entry))
+
+#define vm_map_copy_entry_dispose(map, entry) \
+ _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
+
+static void
+_vm_map_entry_dispose(const struct vm_map_header *map_header,
+ vm_map_entry_t entry)
+{
+ (void)map_header;
+
+ kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry);
+}
+
+/*
+ * Red-black tree lookup/insert comparison functions
+ */
+static inline int vm_map_entry_cmp_lookup(vm_offset_t addr,
+ const struct rbtree_node *node)
+{
+ struct vm_map_entry *entry;
+
+ entry = rbtree_entry(node, struct vm_map_entry, tree_node);
+
+ if (addr < entry->vme_start)
+ return -1;
+ else if (addr < entry->vme_end)
+ return 0;
+ else
+ return 1;
+}
+
+static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a,
+ const struct rbtree_node *b)
+{
+ struct vm_map_entry *entry;
+
+ entry = rbtree_entry(a, struct vm_map_entry, tree_node);
+ return vm_map_entry_cmp_lookup(entry->vme_start, b);
+}
+
+/*
+ * Gap management functions
+ */
+static inline int vm_map_entry_gap_cmp_lookup(vm_size_t gap_size,
+ const struct rbtree_node *node)
+{
+ struct vm_map_entry *entry;
+
+ entry = rbtree_entry(node, struct vm_map_entry, gap_node);
+
+ if (gap_size < entry->gap_size)
+ return -1;
+ else if (gap_size == entry->gap_size)
+ return 0;
+ else
+ return 1;
+}
+
+static inline int vm_map_entry_gap_cmp_insert(const struct rbtree_node *a,
+ const struct rbtree_node *b)
+{
+ struct vm_map_entry *entry;
+
+ entry = rbtree_entry(a, struct vm_map_entry, gap_node);
+ return vm_map_entry_gap_cmp_lookup(entry->gap_size, b);
+}
+
+static int
+vm_map_gap_valid(struct vm_map_header *hdr, struct vm_map_entry *entry)
+{
+ return entry != (struct vm_map_entry *)&hdr->links;
+}
+
+static void
+vm_map_gap_compute(struct vm_map_header *hdr, struct vm_map_entry *entry)
+{
+ struct vm_map_entry *next;
+
+ next = entry->vme_next;
+
+ if (vm_map_gap_valid(hdr, next)) {
+ entry->gap_size = next->vme_start - entry->vme_end;
+ } else {
+ entry->gap_size = hdr->vme_end - entry->vme_end;
+ }
+}
+
+static void
+vm_map_gap_insert_single(struct vm_map_header *hdr, struct vm_map_entry *entry)
+{
+ struct vm_map_entry *tmp;
+ struct rbtree_node *node;
+ unsigned long slot;
+
+ if (!vm_map_gap_valid(hdr, entry)) {
+ return;
+ }
+
+ vm_map_gap_compute(hdr, entry);
+
+ if (entry->gap_size == 0) {
+ return;
+ }
+
+ node = rbtree_lookup_slot(&hdr->gap_tree, entry->gap_size,
+ vm_map_entry_gap_cmp_lookup, slot);
+
+ if (node == NULL) {
+ rbtree_insert_slot(&hdr->gap_tree, slot, &entry->gap_node);
+ list_init(&entry->gap_list);
+ entry->in_gap_tree = 1;
+ } else {
+ tmp = rbtree_entry(node, struct vm_map_entry, gap_node);
+ list_insert_tail(&tmp->gap_list, &entry->gap_list);
+ entry->in_gap_tree = 0;
+ }
+}
+
+static void
+vm_map_gap_remove_single(struct vm_map_header *hdr, struct vm_map_entry *entry)
+{
+ struct vm_map_entry *tmp;
+
+ if (!vm_map_gap_valid(hdr, entry)) {
+ return;
+ }
+
+ if (entry->gap_size == 0) {
+ return;
+ }
+
+ if (!entry->in_gap_tree) {
+ list_remove(&entry->gap_list);
+ return;
+ }
+
+ rbtree_remove(&hdr->gap_tree, &entry->gap_node);
+
+ if (list_empty(&entry->gap_list)) {
+ return;
+ }
+
+ tmp = list_first_entry(&entry->gap_list, struct vm_map_entry, gap_list);
+ assert(tmp->gap_size == entry->gap_size);
+ list_remove(&tmp->gap_list);
+ list_set_head(&tmp->gap_list, &entry->gap_list);
+ assert(!tmp->in_gap_tree);
+ rbtree_insert(&hdr->gap_tree, &tmp->gap_node,
+ vm_map_entry_gap_cmp_insert);
+ tmp->in_gap_tree = 1;
+}
+
+static void
+vm_map_gap_update(struct vm_map_header *hdr, struct vm_map_entry *entry)
+{
+ vm_map_gap_remove_single(hdr, entry);
+ vm_map_gap_insert_single(hdr, entry);
+}
+
+static void
+vm_map_gap_insert(struct vm_map_header *hdr, struct vm_map_entry *entry)
+{
+ vm_map_gap_remove_single(hdr, entry->vme_prev);
+ vm_map_gap_insert_single(hdr, entry->vme_prev);
+ vm_map_gap_insert_single(hdr, entry);
+}
+
+static void
+vm_map_gap_remove(struct vm_map_header *hdr, struct vm_map_entry *entry)
+{
+ vm_map_gap_remove_single(hdr, entry);
+ vm_map_gap_remove_single(hdr, entry->vme_prev);
+ vm_map_gap_insert_single(hdr, entry->vme_prev);
+}
+
+/*
+ * vm_map_entry_{un,}link:
+ *
+ * Insert/remove entries from maps (or map copies).
+ *
+ * The start and end addresses of the entries must be properly set
+ * before using these macros.
+ */
+#define vm_map_entry_link(map, after_where, entry) \
+ _vm_map_entry_link(&(map)->hdr, after_where, entry, 1)
+
+#define vm_map_copy_entry_link(copy, after_where, entry) \
+ _vm_map_entry_link(&(copy)->cpy_hdr, after_where, entry, 0)
+
+#define _vm_map_entry_link(hdr, after_where, entry, link_gap) \
+ MACRO_BEGIN \
+ (hdr)->nentries++; \
+ (entry)->vme_prev = (after_where); \
+ (entry)->vme_next = (after_where)->vme_next; \
+ (entry)->vme_prev->vme_next = \
+ (entry)->vme_next->vme_prev = (entry); \
+ rbtree_insert(&(hdr)->tree, &(entry)->tree_node, \
+ vm_map_entry_cmp_insert); \
+ if (link_gap) \
+ vm_map_gap_insert((hdr), (entry)); \
+ MACRO_END
+
+#define vm_map_entry_unlink(map, entry) \
+ _vm_map_entry_unlink(&(map)->hdr, entry, 1)
+
+#define vm_map_copy_entry_unlink(copy, entry) \
+ _vm_map_entry_unlink(&(copy)->cpy_hdr, entry, 0)
+
+#define _vm_map_entry_unlink(hdr, entry, unlink_gap) \
+ MACRO_BEGIN \
+ (hdr)->nentries--; \
+ (entry)->vme_next->vme_prev = (entry)->vme_prev; \
+ (entry)->vme_prev->vme_next = (entry)->vme_next; \
+ rbtree_remove(&(hdr)->tree, &(entry)->tree_node); \
+ if (unlink_gap) \
+ vm_map_gap_remove((hdr), (entry)); \
+ MACRO_END
+
+/*
+ * vm_map_reference:
+ *
+ * Creates another valid reference to the given map.
+ *
+ */
+void vm_map_reference(vm_map_t map)
+{
+ if (map == VM_MAP_NULL)
+ return;
+
+ simple_lock(&map->ref_lock);
+ map->ref_count++;
+ simple_unlock(&map->ref_lock);
+}
+
+/*
+ * vm_map_deallocate:
+ *
+ * Removes a reference from the specified map,
+ * destroying it if no references remain.
+ * The map should not be locked.
+ */
+void vm_map_deallocate(vm_map_t map)
+{
+ int c;
+
+ if (map == VM_MAP_NULL)
+ return;
+
+ simple_lock(&map->ref_lock);
+ c = --map->ref_count;
+ simple_unlock(&map->ref_lock);
+
+ if (c > 0) {
+ return;
+ }
+
+ projected_buffer_collect(map);
+ (void) vm_map_delete(map, map->min_offset, map->max_offset);
+
+ pmap_destroy(map->pmap);
+
+ kmem_cache_free(&vm_map_cache, (vm_offset_t) map);
+}
+
+/*
+ * SAVE_HINT:
+ *
+ * Saves the specified entry as the hint for
+ * future lookups. Performs necessary interlocks.
+ */
+#define SAVE_HINT(map,value) \
+ simple_lock(&(map)->hint_lock); \
+ (map)->hint = (value); \
+ simple_unlock(&(map)->hint_lock);
+
+/*
+ * vm_map_lookup_entry: [ internal use only ]
+ *
+ * Finds the map entry containing (or
+ * immediately preceding) the specified address
+ * in the given map; the entry is returned
+ * in the "entry" parameter. The boolean
+ * result indicates whether the address is
+ * actually contained in the map.
+ */
+boolean_t vm_map_lookup_entry(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_map_entry_t *entry) /* OUT */
+{
+ struct rbtree_node *node;
+ vm_map_entry_t hint;
+
+ /*
+ * First, make a quick check to see if we are already
+ * looking at the entry we want (which is often the case).
+ */
+
+ simple_lock(&map->hint_lock);
+ hint = map->hint;
+ simple_unlock(&map->hint_lock);
+
+ if ((hint != vm_map_to_entry(map)) && (address >= hint->vme_start)) {
+ if (address < hint->vme_end) {
+ *entry = hint;
+ return(TRUE);
+ } else {
+ vm_map_entry_t next = hint->vme_next;
+
+ if ((next == vm_map_to_entry(map))
+ || (address < next->vme_start)) {
+ *entry = hint;
+ return(FALSE);
+ }
+ }
+ }
+
+ /*
+ * If the hint didn't help, use the red-black tree.
+ */
+
+ node = rbtree_lookup_nearest(&map->hdr.tree, address,
+ vm_map_entry_cmp_lookup, RBTREE_LEFT);
+
+ if (node == NULL) {
+ *entry = vm_map_to_entry(map);
+ SAVE_HINT(map, *entry);
+ return(FALSE);
+ } else {
+ *entry = rbtree_entry(node, struct vm_map_entry, tree_node);
+ SAVE_HINT(map, *entry);
+ return((address < (*entry)->vme_end) ? TRUE : FALSE);
+ }
+}
+
+/*
+ * Find a range of available space from the specified map.
+ *
+ * If successful, this function returns the map entry immediately preceding
+ * the range, and writes the range address in startp. If the map contains
+ * no entry, the entry returned points to the map header.
+ * Otherwise, NULL is returned.
+ *
+ * If map_locked is true, this function will not wait for more space in case
+ * of failure. Otherwise, the map is locked.
+ */
+static struct vm_map_entry *
+vm_map_find_entry_anywhere(struct vm_map *map,
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t map_locked,
+ vm_offset_t *startp)
+{
+ struct vm_map_entry *entry;
+ struct rbtree_node *node;
+ vm_size_t max_size;
+ vm_offset_t start, end;
+ vm_offset_t max;
+
+ assert(size != 0);
+
+ max = map->max_offset;
+ if (((mask + 1) & mask) != 0) {
+ /* We have high bits in addition to the low bits */
+
+ int first0 = __builtin_ffs(~mask); /* First zero after low bits */
+ vm_offset_t lowmask = (1UL << (first0-1)) - 1; /* low bits */
+ vm_offset_t himask = mask - lowmask; /* high bits */
+ int second1 = __builtin_ffs(himask); /* First one after low bits */
+
+ max = 1UL << (second1-1);
+
+ if (himask + max != 0) {
+ /* high bits do not continue up to the end */
+ printf("invalid mask %zx\n", mask);
+ return NULL;
+ }
+
+ mask = lowmask;
+ }
+
+ if (!map_locked) {
+ vm_map_lock(map);
+ }
+
+restart:
+ if (map->hdr.nentries == 0) {
+ entry = vm_map_to_entry(map);
+ start = (map->min_offset + mask) & ~mask;
+ end = start + size;
+
+ if ((start < map->min_offset) || (end <= start) || (end > max)) {
+ goto error;
+ }
+
+ *startp = start;
+ return entry;
+ }
+
+ entry = map->first_free;
+
+ if (entry != vm_map_to_entry(map)) {
+ start = (entry->vme_end + mask) & ~mask;
+ end = start + size;
+
+ if ((start >= entry->vme_end)
+ && (end > start)
+ && (end <= max)
+ && (end <= (entry->vme_end + entry->gap_size))) {
+ *startp = start;
+ return entry;
+ }
+ }
+
+ max_size = size + mask;
+
+ if (max_size < size) {
+ printf("max_size %zd got smaller than size %zd with mask %zd\n",
+ max_size, size, mask);
+ goto error;
+ }
+
+ node = rbtree_lookup_nearest(&map->hdr.gap_tree, max_size,
+ vm_map_entry_gap_cmp_lookup, RBTREE_RIGHT);
+
+ if (node == NULL) {
+ if (map_locked || !map->wait_for_space) {
+ goto error;
+ }
+
+ assert_wait((event_t)map, TRUE);
+ vm_map_unlock(map);
+ thread_block(NULL);
+ vm_map_lock(map);
+ goto restart;
+ }
+
+ entry = rbtree_entry(node, struct vm_map_entry, gap_node);
+ assert(entry->in_gap_tree);
+
+ if (!list_empty(&entry->gap_list)) {
+ entry = list_last_entry(&entry->gap_list,
+ struct vm_map_entry, gap_list);
+ }
+
+ assert(entry->gap_size >= max_size);
+ start = (entry->vme_end + mask) & ~mask;
+ assert(start >= entry->vme_end);
+ end = start + size;
+ assert(end > start);
+ assert(end <= (entry->vme_end + entry->gap_size));
+ if (end > max) {
+ /* Does not respect the allowed maximum */
+ printf("%zx does not respect %zx\n", end, max);
+ return NULL;
+ }
+ *startp = start;
+ return entry;
+
+error:
+ printf("no more room in %p (%s)\n", map, map->name);
+ return NULL;
+}
+
+/*
+ * Routine: vm_map_find_entry
+ * Purpose:
+ * Allocate a range in the specified virtual address map,
+ * returning the entry allocated for that range.
+ * Used by kmem_alloc, etc. Returns wired entries.
+ *
+ * The map must be locked.
+ *
+ * If an entry is allocated, the object/offset fields
+ * are initialized to zero. If an object is supplied,
+ * then an existing entry may be extended.
+ */
+kern_return_t vm_map_find_entry(
+ vm_map_t map,
+ vm_offset_t *address, /* OUT */
+ vm_size_t size,
+ vm_offset_t mask,
+ vm_object_t object,
+ vm_map_entry_t *o_entry) /* OUT */
+{
+ vm_map_entry_t entry, new_entry;
+ vm_offset_t start;
+ vm_offset_t end;
+
+ entry = vm_map_find_entry_anywhere(map, size, mask, TRUE, &start);
+
+ if (entry == NULL) {
+ return KERN_NO_SPACE;
+ }
+
+ end = start + size;
+
+ /*
+ * At this point,
+ * "start" and "end" should define the endpoints of the
+ * available new range, and
+ * "entry" should refer to the region before the new
+ * range, and
+ *
+ * the map should be locked.
+ */
+
+ *address = start;
+
+ /*
+ * See whether we can avoid creating a new entry by
+ * extending one of our neighbors. [So far, we only attempt to
+ * extend from below.]
+ */
+
+ if ((object != VM_OBJECT_NULL) &&
+ (entry != vm_map_to_entry(map)) &&
+ (entry->vme_end == start) &&
+ (!entry->is_shared) &&
+ (!entry->is_sub_map) &&
+ (entry->object.vm_object == object) &&
+ (entry->needs_copy == FALSE) &&
+ (entry->inheritance == VM_INHERIT_DEFAULT) &&
+ (entry->protection == VM_PROT_DEFAULT) &&
+ (entry->max_protection == VM_PROT_ALL) &&
+ (entry->wired_count != 0) &&
+ (entry->projected_on == 0)) {
+ /*
+ * Because this is a special case,
+ * we don't need to use vm_object_coalesce.
+ */
+
+ entry->vme_end = end;
+ vm_map_gap_update(&map->hdr, entry);
+ new_entry = entry;
+ } else {
+ new_entry = vm_map_entry_create(map);
+
+ new_entry->vme_start = start;
+ new_entry->vme_end = end;
+
+ new_entry->is_shared = FALSE;
+ new_entry->is_sub_map = FALSE;
+ new_entry->object.vm_object = VM_OBJECT_NULL;
+ new_entry->offset = (vm_offset_t) 0;
+
+ new_entry->needs_copy = FALSE;
+
+ new_entry->inheritance = VM_INHERIT_DEFAULT;
+ new_entry->protection = VM_PROT_DEFAULT;
+ new_entry->max_protection = VM_PROT_ALL;
+ new_entry->wired_count = 1;
+ new_entry->wired_access = VM_PROT_DEFAULT;
+
+ new_entry->in_transition = FALSE;
+ new_entry->needs_wakeup = FALSE;
+ new_entry->projected_on = 0;
+
+ /*
+ * Insert the new entry into the list
+ */
+
+ vm_map_entry_link(map, entry, new_entry);
+ }
+
+ map->size += size;
+
+ /*
+ * Update the free space hint and the lookup hint
+ */
+
+ map->first_free = new_entry;
+ SAVE_HINT(map, new_entry);
+
+ *o_entry = new_entry;
+ return(KERN_SUCCESS);
+}
+
+boolean_t vm_map_pmap_enter_print = FALSE;
+boolean_t vm_map_pmap_enter_enable = FALSE;
+
+/*
+ * Routine: vm_map_pmap_enter
+ *
+ * Description:
+ * Force pages from the specified object to be entered into
+ * the pmap at the specified address if they are present.
+ * As soon as a page not found in the object the scan ends.
+ *
+ * Returns:
+ * Nothing.
+ *
+ * In/out conditions:
+ * The source map should not be locked on entry.
+ */
+static void
+vm_map_pmap_enter(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_offset_t end_addr,
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_prot_t protection)
+{
+ while (addr < end_addr) {
+ vm_page_t m;
+
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ m = vm_page_lookup(object, offset);
+ if (m == VM_PAGE_NULL || m->absent) {
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ return;
+ }
+
+ if (vm_map_pmap_enter_print) {
+ printf("vm_map_pmap_enter:");
+ printf("map: %p, addr: %zx, object: %p, offset: %zx\n",
+ map, addr, object, offset);
+ }
+
+ m->busy = TRUE;
+ vm_object_unlock(object);
+
+ PMAP_ENTER(map->pmap, addr, m,
+ protection, FALSE);
+
+ vm_object_lock(object);
+ PAGE_WAKEUP_DONE(m);
+ vm_page_lock_queues();
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ offset += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ }
+}
+
+/*
+ * Routine: vm_map_enter
+ *
+ * Description:
+ * Allocate a range in the specified virtual address map.
+ * The resulting range will refer to memory defined by
+ * the given memory object and offset into that object.
+ *
+ * Arguments are as defined in the vm_map call.
+ */
+kern_return_t vm_map_enter(
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ vm_object_t object,
+ vm_offset_t offset,
+ boolean_t needs_copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_map_entry_t entry;
+ vm_map_entry_t next_entry;
+ vm_offset_t start;
+ vm_offset_t end;
+ kern_return_t result = KERN_SUCCESS;
+
+#define RETURN(value) { result = value; goto BailOut; }
+
+ if (size == 0)
+ return KERN_INVALID_ARGUMENT;
+
+ start = *address;
+
+ if (anywhere) {
+ entry = vm_map_find_entry_anywhere(map, size, mask, FALSE, &start);
+
+ if (entry == NULL) {
+ RETURN(KERN_NO_SPACE);
+ }
+
+ end = start + size;
+ *address = start;
+ next_entry = entry->vme_next;
+ } else {
+ vm_map_entry_t temp_entry;
+
+ /*
+ * Verify that:
+ * the address doesn't itself violate
+ * the mask requirement.
+ */
+
+ if ((start & mask) != 0)
+ return(KERN_NO_SPACE);
+
+ vm_map_lock(map);
+
+ /*
+ * ... the address is within bounds
+ */
+
+ end = start + size;
+
+ if ((start < map->min_offset) ||
+ (end > map->max_offset) ||
+ (start >= end)) {
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+
+ /*
+ * ... the starting address isn't allocated
+ */
+
+ if (vm_map_lookup_entry(map, start, &temp_entry))
+ RETURN(KERN_NO_SPACE);
+
+ entry = temp_entry;
+ next_entry = entry->vme_next;
+
+ /*
+ * ... the next region doesn't overlap the
+ * end point.
+ */
+
+ if ((next_entry != vm_map_to_entry(map)) &&
+ (next_entry->vme_start < end))
+ RETURN(KERN_NO_SPACE);
+ }
+
+ /*
+ * At this point,
+ * "start" and "end" should define the endpoints of the
+ * available new range, and
+ * "entry" should refer to the region before the new
+ * range, and
+ *
+ * the map should be locked.
+ */
+
+ /*
+ * See whether we can avoid creating a new entry (and object) by
+ * extending one of our neighbors.
+ */
+
+ if ((entry != vm_map_to_entry(map)) &&
+ (entry->vme_end == start) &&
+ (!entry->is_shared) &&
+ (!entry->is_sub_map) &&
+ (entry->inheritance == inheritance) &&
+ (entry->protection == cur_protection) &&
+ (entry->max_protection == max_protection) &&
+ (entry->wired_count == 0) &&
+ (entry->projected_on == 0)) {
+ if (vm_object_coalesce(entry->object.vm_object,
+ object,
+ entry->offset,
+ offset,
+ (vm_size_t)(entry->vme_end - entry->vme_start),
+ size,
+ &entry->object.vm_object,
+ &entry->offset)) {
+
+ /*
+ * Coalesced the two objects - can extend
+ * the previous map entry to include the
+ * new range.
+ */
+ map->size += size;
+ entry->vme_end = end;
+ vm_map_gap_update(&map->hdr, entry);
+ /*
+ * Now that we did, perhaps we could simplify
+ * things even further by coalescing the next
+ * entry into the one we just extended.
+ */
+ vm_map_coalesce_entry(map, next_entry);
+ RETURN(KERN_SUCCESS);
+ }
+ }
+ if ((next_entry != vm_map_to_entry(map)) &&
+ (next_entry->vme_start == end) &&
+ (!next_entry->is_shared) &&
+ (!next_entry->is_sub_map) &&
+ (next_entry->inheritance == inheritance) &&
+ (next_entry->protection == cur_protection) &&
+ (next_entry->max_protection == max_protection) &&
+ (next_entry->wired_count == 0) &&
+ (next_entry->projected_on == 0)) {
+ if (vm_object_coalesce(object,
+ next_entry->object.vm_object,
+ offset,
+ next_entry->offset,
+ size,
+ (vm_size_t)(next_entry->vme_end - next_entry->vme_start),
+ &next_entry->object.vm_object,
+ &next_entry->offset)) {
+
+ /*
+ * Coalesced the two objects - can extend
+ * the next map entry to include the
+ * new range.
+ */
+ map->size += size;
+ next_entry->vme_start = start;
+ vm_map_gap_update(&map->hdr, entry);
+ /*
+ * Now that we did, perhaps we could simplify
+ * things even further by coalescing the
+ * entry into the previous one.
+ */
+ vm_map_coalesce_entry(map, next_entry);
+ RETURN(KERN_SUCCESS);
+ }
+ }
+
+ /*
+ * Create a new entry
+ */
+
+ /**/ {
+ vm_map_entry_t new_entry;
+
+ new_entry = vm_map_entry_create(map);
+
+ new_entry->vme_start = start;
+ new_entry->vme_end = end;
+
+ new_entry->is_shared = FALSE;
+ new_entry->is_sub_map = FALSE;
+ new_entry->object.vm_object = object;
+ new_entry->offset = offset;
+
+ new_entry->needs_copy = needs_copy;
+
+ new_entry->inheritance = inheritance;
+ new_entry->protection = cur_protection;
+ new_entry->max_protection = max_protection;
+ new_entry->wired_count = 0;
+ new_entry->wired_access = VM_PROT_NONE;
+
+ new_entry->in_transition = FALSE;
+ new_entry->needs_wakeup = FALSE;
+ new_entry->projected_on = 0;
+
+ /*
+ * Insert the new entry into the list
+ */
+
+ vm_map_entry_link(map, entry, new_entry);
+ map->size += size;
+
+ /*
+ * Update the free space hint and the lookup hint
+ */
+
+ if ((map->first_free == entry) &&
+ ((entry == vm_map_to_entry(map) ? map->min_offset : entry->vme_end)
+ >= new_entry->vme_start))
+ map->first_free = new_entry;
+
+ SAVE_HINT(map, new_entry);
+
+ if (map->wiring_required) {
+ /* Returns with the map read-locked if successful */
+ result = vm_map_pageable(map, start, end, cur_protection, FALSE, FALSE);
+
+ if (result != KERN_SUCCESS) {
+ RETURN(KERN_SUCCESS);
+ }
+ }
+
+ vm_map_unlock(map);
+
+ if ((object != VM_OBJECT_NULL) &&
+ (vm_map_pmap_enter_enable) &&
+ (!anywhere) &&
+ (!needs_copy) &&
+ (size < (128*1024))) {
+ vm_map_pmap_enter(map, start, end,
+ object, offset, cur_protection);
+ }
+
+ return(result);
+ /**/ }
+
+ BailOut: ;
+
+ vm_map_unlock(map);
+ return(result);
+
+#undef RETURN
+}
+
+/*
+ * vm_map_clip_start: [ internal use only ]
+ *
+ * Asserts that the given entry begins at or after
+ * the specified address; if necessary,
+ * it splits the entry into two.
+ */
+#define vm_map_clip_start(map, entry, startaddr) \
+ MACRO_BEGIN \
+ if ((startaddr) > (entry)->vme_start) \
+ _vm_map_clip_start(&(map)->hdr,(entry),(startaddr),1); \
+ MACRO_END
+
+#define vm_map_copy_clip_start(copy, entry, startaddr) \
+ MACRO_BEGIN \
+ if ((startaddr) > (entry)->vme_start) \
+ _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr),0); \
+ MACRO_END
+
+/*
+ * This routine is called only when it is known that
+ * the entry must be split.
+ */
+void _vm_map_clip_start(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t start,
+ boolean_t link_gap)
+{
+ vm_map_entry_t new_entry;
+
+ /*
+ * Split off the front portion --
+ * note that we must insert the new
+ * entry BEFORE this one, so that
+ * this entry has the specified starting
+ * address.
+ */
+
+ new_entry = _vm_map_entry_create(map_header);
+ vm_map_entry_copy_full(new_entry, entry);
+
+ new_entry->vme_end = start;
+ entry->offset += (start - entry->vme_start);
+ entry->vme_start = start;
+
+ _vm_map_entry_link(map_header, entry->vme_prev, new_entry, link_gap);
+
+ if (entry->is_sub_map)
+ vm_map_reference(new_entry->object.sub_map);
+ else
+ vm_object_reference(new_entry->object.vm_object);
+}
+
+/*
+ * vm_map_clip_end: [ internal use only ]
+ *
+ * Asserts that the given entry ends at or before
+ * the specified address; if necessary,
+ * it splits the entry into two.
+ */
+#define vm_map_clip_end(map, entry, endaddr) \
+ MACRO_BEGIN \
+ if ((endaddr) < (entry)->vme_end) \
+ _vm_map_clip_end(&(map)->hdr,(entry),(endaddr),1); \
+ MACRO_END
+
+#define vm_map_copy_clip_end(copy, entry, endaddr) \
+ MACRO_BEGIN \
+ if ((endaddr) < (entry)->vme_end) \
+ _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr),0); \
+ MACRO_END
+
+/*
+ * This routine is called only when it is known that
+ * the entry must be split.
+ */
+void _vm_map_clip_end(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t end,
+ boolean_t link_gap)
+{
+ vm_map_entry_t new_entry;
+
+ /*
+ * Create a new entry and insert it
+ * AFTER the specified entry
+ */
+
+ new_entry = _vm_map_entry_create(map_header);
+ vm_map_entry_copy_full(new_entry, entry);
+
+ new_entry->vme_start = entry->vme_end = end;
+ new_entry->offset += (end - entry->vme_start);
+
+ _vm_map_entry_link(map_header, entry, new_entry, link_gap);
+
+ if (entry->is_sub_map)
+ vm_map_reference(new_entry->object.sub_map);
+ else
+ vm_object_reference(new_entry->object.vm_object);
+}
+
+/*
+ * VM_MAP_RANGE_CHECK: [ internal use only ]
+ *
+ * Asserts that the starting and ending region
+ * addresses fall within the valid range of the map.
+ */
+#define VM_MAP_RANGE_CHECK(map, start, end) \
+ { \
+ if (start < vm_map_min(map)) \
+ start = vm_map_min(map); \
+ if (end > vm_map_max(map)) \
+ end = vm_map_max(map); \
+ if (start > end) \
+ start = end; \
+ }
+
+/*
+ * vm_map_submap: [ kernel use only ]
+ *
+ * Mark the given range as handled by a subordinate map.
+ *
+ * This range must have been created with vm_map_find using
+ * the vm_submap_object, and no other operations may have been
+ * performed on this range prior to calling vm_map_submap.
+ *
+ * Only a limited number of operations can be performed
+ * within this rage after calling vm_map_submap:
+ * vm_fault
+ * [Don't try vm_map_copyin!]
+ *
+ * To remove a submapping, one must first remove the
+ * range from the superior map, and then destroy the
+ * submap (if desired). [Better yet, don't try it.]
+ */
+kern_return_t vm_map_submap(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_map_t submap)
+{
+ vm_map_entry_t entry;
+ kern_return_t result = KERN_INVALID_ARGUMENT;
+ vm_object_t object;
+
+ vm_map_lock(map);
+
+ VM_MAP_RANGE_CHECK(map, start, end);
+
+ if (vm_map_lookup_entry(map, start, &entry)) {
+ vm_map_clip_start(map, entry, start);
+ }
+ else
+ entry = entry->vme_next;
+
+ vm_map_clip_end(map, entry, end);
+
+ if ((entry->vme_start == start) && (entry->vme_end == end) &&
+ (!entry->is_sub_map) &&
+ ((object = entry->object.vm_object) == vm_submap_object) &&
+ (object->resident_page_count == 0) &&
+ (object->copy == VM_OBJECT_NULL) &&
+ (object->shadow == VM_OBJECT_NULL) &&
+ (!object->pager_created)) {
+ entry->object.vm_object = VM_OBJECT_NULL;
+ vm_object_deallocate(object);
+ entry->is_sub_map = TRUE;
+ vm_map_reference(entry->object.sub_map = submap);
+ result = KERN_SUCCESS;
+ }
+ vm_map_unlock(map);
+
+ return(result);
+}
+
+static void
+vm_map_entry_inc_wired(vm_map_t map, vm_map_entry_t entry)
+{
+ /*
+ * This member is a counter to indicate whether an entry
+ * should be faulted in (first time it is wired, wired_count
+ * goes from 0 to 1) or not (other times, wired_count goes
+ * from 1 to 2 or remains 2).
+ */
+ if (entry->wired_count > 1) {
+ return;
+ }
+
+ if (entry->wired_count == 0) {
+ map->size_wired += entry->vme_end - entry->vme_start;
+ }
+
+ entry->wired_count++;
+}
+
+static void
+vm_map_entry_reset_wired(vm_map_t map, vm_map_entry_t entry)
+{
+ if (entry->wired_count != 0) {
+ map->size_wired -= entry->vme_end - entry->vme_start;
+ entry->wired_count = 0;
+ }
+}
+
+/*
+ * vm_map_pageable_scan: scan entries and update wiring as appropriate
+ *
+ * This function is used by the VM system after either the wiring
+ * access or protection of a mapping changes. It scans part or
+ * all the entries of a map, and either wires, unwires, or skips
+ * entries depending on their state.
+ *
+ * The map must be locked. If wiring faults are performed, the lock
+ * is downgraded to a read lock. The caller should always consider
+ * the map read locked on return.
+ */
+static void
+vm_map_pageable_scan(struct vm_map *map,
+ struct vm_map_entry *start,
+ struct vm_map_entry *end)
+{
+ struct vm_map_entry *entry;
+ boolean_t do_wire_faults;
+
+ /*
+ * Pass 1. Update counters and prepare wiring faults.
+ */
+
+ do_wire_faults = FALSE;
+
+ for (entry = start; entry != end; entry = entry->vme_next) {
+
+ /*
+ * Unwiring.
+ *
+ * Note that unwiring faults can be performed while
+ * holding a write lock on the map. A wiring fault
+ * can only be done with a read lock.
+ */
+
+ if (entry->wired_access == VM_PROT_NONE) {
+ if (entry->wired_count != 0) {
+ vm_map_entry_reset_wired(map, entry);
+ vm_fault_unwire(map, entry);
+ }
+
+ continue;
+ }
+
+ /*
+ * Wiring.
+ */
+
+ if (entry->protection == VM_PROT_NONE) {
+
+ /*
+ * Make sure entries that cannot be accessed
+ * because of their protection aren't wired.
+ */
+
+ if (entry->wired_count == 0) {
+ continue;
+ }
+
+ /*
+ * This normally occurs after changing the protection of
+ * a wired region to VM_PROT_NONE.
+ */
+ vm_map_entry_reset_wired(map, entry);
+ vm_fault_unwire(map, entry);
+ continue;
+ }
+
+ /*
+ * We must do this in two passes:
+ *
+ * 1. Holding the write lock, we create any shadow
+ * or zero-fill objects that need to be created.
+ * Then we increment the wiring count.
+ *
+ * 2. We downgrade to a read lock, and call
+ * vm_fault_wire to fault in the pages for any
+ * newly wired area (wired_count is 1).
+ *
+ * Downgrading to a read lock for vm_fault_wire avoids
+ * a possible deadlock with another thread that may have
+ * faulted on one of the pages to be wired (it would mark
+ * the page busy, blocking us, then in turn block on the
+ * map lock that we hold). Because of problems in the
+ * recursive lock package, we cannot upgrade to a write
+ * lock in vm_map_lookup. Thus, any actions that require
+ * the write lock must be done beforehand. Because we
+ * keep the read lock on the map, the copy-on-write
+ * status of the entries we modify here cannot change.
+ */
+
+ if (entry->wired_count == 0) {
+ /*
+ * Perform actions of vm_map_lookup that need
+ * the write lock on the map: create a shadow
+ * object for a copy-on-write region, or an
+ * object for a zero-fill region.
+ */
+ if (entry->needs_copy &&
+ ((entry->protection & VM_PROT_WRITE) != 0)) {
+ vm_object_shadow(&entry->object.vm_object,
+ &entry->offset,
+ (vm_size_t)(entry->vme_end
+ - entry->vme_start));
+ entry->needs_copy = FALSE;
+ }
+
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+ entry->object.vm_object =
+ vm_object_allocate(
+ (vm_size_t)(entry->vme_end
+ - entry->vme_start));
+ entry->offset = (vm_offset_t)0;
+ }
+ }
+
+ vm_map_entry_inc_wired(map, entry);
+
+ if (entry->wired_count == 1) {
+ do_wire_faults = TRUE;
+ }
+ }
+
+ /*
+ * Pass 2. Trigger wiring faults.
+ */
+
+ if (!do_wire_faults) {
+ return;
+ }
+
+ /*
+ * HACK HACK HACK HACK
+ *
+ * If we are wiring in the kernel map or a submap of it,
+ * unlock the map to avoid deadlocks. We trust that the
+ * kernel threads are well-behaved, and therefore will
+ * not do anything destructive to this region of the map
+ * while we have it unlocked. We cannot trust user threads
+ * to do the same.
+ *
+ * HACK HACK HACK HACK
+ */
+ if (vm_map_pmap(map) == kernel_pmap) {
+ vm_map_unlock(map); /* trust me ... */
+ } else {
+ vm_map_lock_set_recursive(map);
+ vm_map_lock_write_to_read(map);
+ }
+
+ for (entry = start; entry != end; entry = entry->vme_next) {
+ /*
+ * The wiring count can only be 1 if it was
+ * incremented by this function right before
+ * downgrading the lock.
+ */
+ if (entry->wired_count == 1) {
+ /*
+ * XXX This assumes that the faults always succeed.
+ */
+ vm_fault_wire(map, entry);
+ }
+ }
+
+ if (vm_map_pmap(map) == kernel_pmap) {
+ vm_map_lock(map);
+ } else {
+ vm_map_lock_clear_recursive(map);
+ }
+}
+
+/*
+ * vm_map_protect:
+ *
+ * Sets the protection of the specified address
+ * region in the target map. If "set_max" is
+ * specified, the maximum protection is to be set;
+ * otherwise, only the current protection is affected.
+ */
+kern_return_t vm_map_protect(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t new_prot,
+ boolean_t set_max)
+{
+ vm_map_entry_t current;
+ vm_map_entry_t entry;
+ vm_map_entry_t next;
+
+ vm_map_lock(map);
+
+ VM_MAP_RANGE_CHECK(map, start, end);
+
+ if (vm_map_lookup_entry(map, start, &entry)) {
+ vm_map_clip_start(map, entry, start);
+ }
+ else
+ entry = entry->vme_next;
+
+ /*
+ * Make a first pass to check for protection
+ * violations.
+ */
+
+ current = entry;
+ while ((current != vm_map_to_entry(map)) &&
+ (current->vme_start < end)) {
+
+ if (current->is_sub_map) {
+ vm_map_unlock(map);
+ return(KERN_INVALID_ARGUMENT);
+ }
+ if ((new_prot & (VM_PROT_NOTIFY | current->max_protection))
+ != new_prot) {
+ vm_map_unlock(map);
+ return(KERN_PROTECTION_FAILURE);
+ }
+
+ current = current->vme_next;
+ }
+
+ /*
+ * Go back and fix up protections.
+ * [Note that clipping is not necessary the second time.]
+ */
+
+ current = entry;
+
+ while ((current != vm_map_to_entry(map)) &&
+ (current->vme_start < end)) {
+
+ vm_prot_t old_prot;
+
+ vm_map_clip_end(map, current, end);
+
+ old_prot = current->protection;
+ if (set_max)
+ current->protection =
+ (current->max_protection = new_prot) &
+ old_prot;
+ else
+ current->protection = new_prot;
+
+ /*
+ * Make sure the new protection doesn't conflict
+ * with the desired wired access if any.
+ */
+
+ if ((current->protection != VM_PROT_NONE) &&
+ (current->wired_access != VM_PROT_NONE ||
+ map->wiring_required)) {
+ current->wired_access = current->protection;
+ }
+
+ /*
+ * Update physical map if necessary.
+ */
+
+ if (current->protection != old_prot) {
+ pmap_protect(map->pmap, current->vme_start,
+ current->vme_end,
+ current->protection);
+ }
+
+ next = current->vme_next;
+ vm_map_coalesce_entry(map, current);
+ current = next;
+ }
+
+ next = current->vme_next;
+ if (vm_map_coalesce_entry(map, current))
+ current = next;
+
+ /* Returns with the map read-locked if successful */
+ vm_map_pageable_scan(map, entry, current);
+
+ vm_map_unlock(map);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * vm_map_inherit:
+ *
+ * Sets the inheritance of the specified address
+ * range in the target map. Inheritance
+ * affects how the map will be shared with
+ * child maps at the time of vm_map_fork.
+ */
+kern_return_t vm_map_inherit(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_inherit_t new_inheritance)
+{
+ vm_map_entry_t entry;
+ vm_map_entry_t temp_entry;
+ vm_map_entry_t next;
+
+ vm_map_lock(map);
+
+ VM_MAP_RANGE_CHECK(map, start, end);
+
+ if (vm_map_lookup_entry(map, start, &temp_entry)) {
+ entry = temp_entry;
+ vm_map_clip_start(map, entry, start);
+ }
+ else
+ entry = temp_entry->vme_next;
+
+ while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
+ vm_map_clip_end(map, entry, end);
+
+ entry->inheritance = new_inheritance;
+
+ next = entry->vme_next;
+ vm_map_coalesce_entry(map, entry);
+ entry = next;
+ }
+
+ vm_map_coalesce_entry(map, entry);
+
+ vm_map_unlock(map);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * vm_map_pageable:
+ *
+ * Sets the pageability of the specified address
+ * range in the target map. Regions specified
+ * as not pageable require locked-down physical
+ * memory and physical page maps. access_type indicates
+ * types of accesses that must not generate page faults.
+ * This is checked against protection of memory being locked-down.
+ * access_type of VM_PROT_NONE makes memory pageable.
+ *
+ * If lock_map is TRUE, the map is locked and unlocked
+ * by this function. Otherwise, it is assumed the caller
+ * already holds the lock, in which case the function
+ * returns with the lock downgraded to a read lock if successful.
+ *
+ * If check_range is TRUE, this function fails if it finds
+ * holes or protection mismatches in the specified range.
+ *
+ * A reference must remain to the map throughout the call.
+ */
+
+kern_return_t vm_map_pageable(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t access_type,
+ boolean_t lock_map,
+ boolean_t check_range)
+{
+ vm_map_entry_t entry;
+ vm_map_entry_t start_entry;
+ vm_map_entry_t end_entry;
+
+ if (lock_map) {
+ vm_map_lock(map);
+ }
+
+ VM_MAP_RANGE_CHECK(map, start, end);
+
+ if (!vm_map_lookup_entry(map, start, &start_entry)) {
+ /*
+ * Start address is not in map; this is fatal.
+ */
+ if (lock_map) {
+ vm_map_unlock(map);
+ }
+
+ return KERN_NO_SPACE;
+ }
+
+ /*
+ * Pass 1. Clip entries, check for holes and protection mismatches
+ * if requested.
+ */
+
+ vm_map_clip_start(map, start_entry, start);
+
+ for (entry = start_entry;
+ (entry != vm_map_to_entry(map)) &&
+ (entry->vme_start < end);
+ entry = entry->vme_next) {
+ vm_map_clip_end(map, entry, end);
+
+ if (check_range &&
+ (((entry->vme_end < end) &&
+ ((entry->vme_next == vm_map_to_entry(map)) ||
+ (entry->vme_next->vme_start > entry->vme_end))) ||
+ ((entry->protection & access_type) != access_type))) {
+ if (lock_map) {
+ vm_map_unlock(map);
+ }
+
+ return KERN_NO_SPACE;
+ }
+ }
+
+ end_entry = entry;
+
+ /*
+ * Pass 2. Set the desired wired access.
+ */
+
+ for (entry = start_entry; entry != end_entry; entry = entry->vme_next) {
+ entry->wired_access = access_type;
+ }
+
+ /* Returns with the map read-locked */
+ vm_map_pageable_scan(map, start_entry, end_entry);
+
+ if (lock_map) {
+ vm_map_unlock(map);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+/* Update pageability of all the memory currently in the map.
+ * The map must be locked, and protection mismatch will not be checked, see
+ * vm_map_pageable().
+ */
+static kern_return_t
+vm_map_pageable_current(vm_map_t map, vm_prot_t access_type)
+{
+ struct rbtree_node *node;
+ vm_offset_t min_address, max_address;
+
+ node = rbtree_first(&map->hdr.tree);
+ min_address = rbtree_entry(node, struct vm_map_entry,
+ tree_node)->vme_start;
+
+ node = rbtree_last(&map->hdr.tree);
+ max_address = rbtree_entry(node, struct vm_map_entry,
+ tree_node)->vme_end;
+
+ /* Returns with the map read-locked if successful */
+ return vm_map_pageable(map, min_address, max_address,access_type,
+ FALSE, FALSE);
+}
+
+
+/*
+ * vm_map_pageable_all:
+ *
+ * Sets the pageability of an entire map. If the VM_WIRE_CURRENT
+ * flag is set, then all current mappings are locked down. If the
+ * VM_WIRE_FUTURE flag is set, then all mappings created after the
+ * call returns are locked down. If no flags are passed
+ * (i.e. VM_WIRE_NONE), all mappings become pageable again, and
+ * future mappings aren't automatically locked down any more.
+ *
+ * The access type of the mappings match their current protection.
+ * Null mappings (with protection PROT_NONE) are updated to track
+ * that they should be wired in case they become accessible.
+ */
+kern_return_t
+vm_map_pageable_all(struct vm_map *map, vm_wire_t flags)
+{
+ boolean_t wiring_required;
+ kern_return_t kr;
+
+ if ((flags & ~VM_WIRE_ALL) != 0) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vm_map_lock(map);
+
+ if (flags == VM_WIRE_NONE) {
+ map->wiring_required = FALSE;
+
+ /* Returns with the map read-locked if successful */
+ kr = vm_map_pageable_current(map, VM_PROT_NONE);
+ vm_map_unlock(map);
+ return kr;
+ }
+
+ wiring_required = map->wiring_required;
+
+ if (flags & VM_WIRE_FUTURE) {
+ map->wiring_required = TRUE;
+ }
+
+ if (flags & VM_WIRE_CURRENT) {
+ /* Returns with the map read-locked if successful */
+ kr = vm_map_pageable_current(map, VM_PROT_READ | VM_PROT_WRITE);
+
+ if (kr != KERN_SUCCESS) {
+ if (flags & VM_WIRE_FUTURE) {
+ map->wiring_required = wiring_required;
+ }
+
+ vm_map_unlock(map);
+ return kr;
+ }
+ }
+
+ vm_map_unlock(map);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * vm_map_entry_delete: [ internal use only ]
+ *
+ * Deallocate the given entry from the target map.
+ */
+void vm_map_entry_delete(
+ vm_map_t map,
+ vm_map_entry_t entry)
+{
+ vm_offset_t s, e;
+ vm_size_t size;
+ vm_object_t object;
+ extern vm_object_t kernel_object;
+
+ s = entry->vme_start;
+ e = entry->vme_end;
+ size = e - s;
+
+ /*Check if projected buffer*/
+ if (map != kernel_map && entry->projected_on != 0) {
+ /*Check if projected kernel entry is persistent;
+ may only manipulate directly if it is*/
+ if (entry->projected_on->projected_on == 0)
+ entry->wired_count = 0; /*Avoid unwire fault*/
+ else
+ return;
+ }
+
+ /*
+ * Get the object. Null objects cannot have pmap entries.
+ */
+
+ if ((object = entry->object.vm_object) != VM_OBJECT_NULL) {
+
+ /*
+ * Unwire before removing addresses from the pmap;
+ * otherwise, unwiring will put the entries back in
+ * the pmap.
+ */
+
+ if (entry->wired_count != 0) {
+ vm_map_entry_reset_wired(map, entry);
+ vm_fault_unwire(map, entry);
+ }
+
+ /*
+ * If the object is shared, we must remove
+ * *all* references to this data, since we can't
+ * find all of the physical maps which are sharing
+ * it.
+ */
+
+ if (object == kernel_object) {
+ vm_object_lock(object);
+ vm_object_page_remove(object, entry->offset,
+ entry->offset + size);
+ vm_object_unlock(object);
+ } else if (entry->is_shared) {
+ vm_object_pmap_remove(object,
+ entry->offset,
+ entry->offset + size);
+ } else {
+ pmap_remove(map->pmap, s, e);
+ /*
+ * If this object has no pager and our
+ * reference to the object is the only
+ * one, we can release the deleted pages
+ * now.
+ */
+ vm_object_lock(object);
+ if ((!object->pager_created) &&
+ (object->ref_count == 1) &&
+ (object->paging_in_progress == 0)) {
+ vm_object_page_remove(object,
+ entry->offset,
+ entry->offset + size);
+ }
+ vm_object_unlock(object);
+ }
+ }
+
+ /*
+ * Deallocate the object only after removing all
+ * pmap entries pointing to its pages.
+ */
+
+ if (entry->is_sub_map)
+ vm_map_deallocate(entry->object.sub_map);
+ else
+ vm_object_deallocate(entry->object.vm_object);
+
+ vm_map_entry_unlink(map, entry);
+ map->size -= size;
+
+ vm_map_entry_dispose(map, entry);
+}
+
+/*
+ * vm_map_delete: [ internal use only ]
+ *
+ * Deallocates the given address range from the target
+ * map.
+ */
+
+kern_return_t vm_map_delete(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ vm_map_entry_t entry;
+ vm_map_entry_t first_entry;
+
+ if (map->pmap == kernel_pmap && (start < kernel_virtual_start || end > kernel_virtual_end))
+ panic("vm_map_delete(%lx-%lx) falls in physical memory area!\n", (unsigned long) start, (unsigned long) end);
+
+ /*
+ * Find the start of the region, and clip it
+ */
+
+ if (!vm_map_lookup_entry(map, start, &first_entry))
+ entry = first_entry->vme_next;
+ else {
+ entry = first_entry;
+ vm_map_clip_start(map, entry, start);
+
+ /*
+ * Fix the lookup hint now, rather than each
+ * time though the loop.
+ */
+
+ SAVE_HINT(map, entry->vme_prev);
+ }
+
+ /*
+ * Save the free space hint
+ */
+
+ if (map->first_free->vme_start >= start)
+ map->first_free = entry->vme_prev;
+
+ /*
+ * Step through all entries in this region
+ */
+
+ while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
+ vm_map_entry_t next;
+
+ vm_map_clip_end(map, entry, end);
+
+ /*
+ * If the entry is in transition, we must wait
+ * for it to exit that state. It could be clipped
+ * while we leave the map unlocked.
+ */
+ if(entry->in_transition) {
+ /*
+ * Say that we are waiting, and wait for entry.
+ */
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(map, FALSE);
+ vm_map_lock(map);
+
+ /*
+ * The entry could have been clipped or it
+ * may not exist anymore. look it up again.
+ */
+ if(!vm_map_lookup_entry(map, start, &entry)) {
+ entry = entry->vme_next;
+ }
+ continue;
+ }
+
+ next = entry->vme_next;
+
+ vm_map_entry_delete(map, entry);
+ entry = next;
+ }
+
+ if (map->wait_for_space)
+ thread_wakeup((event_t) map);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * vm_map_remove:
+ *
+ * Remove the given address range from the target map.
+ * This is the exported form of vm_map_delete.
+ */
+kern_return_t vm_map_remove(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ kern_return_t result;
+
+ vm_map_lock(map);
+ VM_MAP_RANGE_CHECK(map, start, end);
+ result = vm_map_delete(map, start, end);
+ vm_map_unlock(map);
+
+ return(result);
+}
+
+
+/*
+ * vm_map_copy_steal_pages:
+ *
+ * Steal all the pages from a vm_map_copy page_list by copying ones
+ * that have not already been stolen.
+ */
+static void
+vm_map_copy_steal_pages(vm_map_copy_t copy)
+{
+ vm_page_t m, new_m;
+ int i;
+ vm_object_t object;
+
+ for (i = 0; i < copy->cpy_npages; i++) {
+
+ /*
+ * If the page is not tabled, then it's already stolen.
+ */
+ m = copy->cpy_page_list[i];
+ if (!m->tabled)
+ continue;
+
+ /*
+ * Page was not stolen, get a new
+ * one and do the copy now.
+ */
+ while ((new_m = vm_page_grab(VM_PAGE_HIGHMEM)) == VM_PAGE_NULL) {
+ VM_PAGE_WAIT((void(*)()) 0);
+ }
+
+ vm_page_copy(m, new_m);
+
+ object = m->object;
+ vm_object_lock(object);
+ vm_page_lock_queues();
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+ PAGE_WAKEUP_DONE(m);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ copy->cpy_page_list[i] = new_m;
+ }
+}
+
+/*
+ * vm_map_copy_page_discard:
+ *
+ * Get rid of the pages in a page_list copy. If the pages are
+ * stolen, they are freed. If the pages are not stolen, they
+ * are unbusied, and associated state is cleaned up.
+ */
+void vm_map_copy_page_discard(vm_map_copy_t copy)
+{
+ while (copy->cpy_npages > 0) {
+ vm_page_t m;
+
+ if((m = copy->cpy_page_list[--(copy->cpy_npages)]) !=
+ VM_PAGE_NULL) {
+
+ /*
+ * If it's not in the table, then it's
+ * a stolen page that goes back
+ * to the free list. Else it belongs
+ * to some object, and we hold a
+ * paging reference on that object.
+ */
+ if (!m->tabled) {
+ VM_PAGE_FREE(m);
+ }
+ else {
+ vm_object_t object;
+
+ object = m->object;
+
+ vm_object_lock(object);
+ vm_page_lock_queues();
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+
+ PAGE_WAKEUP_DONE(m);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ }
+ }
+ }
+}
+
+/*
+ * Routine: vm_map_copy_discard
+ *
+ * Description:
+ * Dispose of a map copy object (returned by
+ * vm_map_copyin).
+ */
+void
+vm_map_copy_discard(vm_map_copy_t copy)
+{
+free_next_copy:
+ if (copy == VM_MAP_COPY_NULL)
+ return;
+
+ switch (copy->type) {
+ case VM_MAP_COPY_ENTRY_LIST:
+ while (vm_map_copy_first_entry(copy) !=
+ vm_map_copy_to_entry(copy)) {
+ vm_map_entry_t entry = vm_map_copy_first_entry(copy);
+
+ vm_map_copy_entry_unlink(copy, entry);
+ vm_object_deallocate(entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, entry);
+ }
+ break;
+ case VM_MAP_COPY_OBJECT:
+ vm_object_deallocate(copy->cpy_object);
+ break;
+ case VM_MAP_COPY_PAGE_LIST:
+
+ /*
+ * To clean this up, we have to unbusy all the pages
+ * and release the paging references in their objects.
+ */
+ if (copy->cpy_npages > 0)
+ vm_map_copy_page_discard(copy);
+
+ /*
+ * If there's a continuation, abort it. The
+ * abort routine releases any storage.
+ */
+ if (vm_map_copy_has_cont(copy)) {
+
+ /*
+ * Special case: recognize
+ * vm_map_copy_discard_cont and optimize
+ * here to avoid tail recursion.
+ */
+ if (copy->cpy_cont == vm_map_copy_discard_cont) {
+ vm_map_copy_t new_copy;
+
+ new_copy = (vm_map_copy_t) copy->cpy_cont_args;
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
+ copy = new_copy;
+ goto free_next_copy;
+ }
+ else {
+ vm_map_copy_abort_cont(copy);
+ }
+ }
+
+ break;
+ }
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
+}
+
+/*
+ * Routine: vm_map_copy_copy
+ *
+ * Description:
+ * Move the information in a map copy object to
+ * a new map copy object, leaving the old one
+ * empty.
+ *
+ * This is used by kernel routines that need
+ * to look at out-of-line data (in copyin form)
+ * before deciding whether to return SUCCESS.
+ * If the routine returns FAILURE, the original
+ * copy object will be deallocated; therefore,
+ * these routines must make a copy of the copy
+ * object and leave the original empty so that
+ * deallocation will not fail.
+ */
+vm_map_copy_t
+vm_map_copy_copy(vm_map_copy_t copy)
+{
+ vm_map_copy_t new_copy;
+
+ if (copy == VM_MAP_COPY_NULL)
+ return VM_MAP_COPY_NULL;
+
+ /*
+ * Allocate a new copy object, and copy the information
+ * from the old one into it.
+ */
+
+ new_copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
+ *new_copy = *copy;
+
+ if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
+ /*
+ * The links in the entry chain must be
+ * changed to point to the new copy object.
+ */
+ vm_map_copy_first_entry(copy)->vme_prev
+ = vm_map_copy_to_entry(new_copy);
+ vm_map_copy_last_entry(copy)->vme_next
+ = vm_map_copy_to_entry(new_copy);
+ }
+
+ /*
+ * Change the old copy object into one that contains
+ * nothing to be deallocated.
+ */
+ copy->type = VM_MAP_COPY_OBJECT;
+ copy->cpy_object = VM_OBJECT_NULL;
+
+ /*
+ * Return the new object.
+ */
+ return new_copy;
+}
+
+/*
+ * Routine: vm_map_copy_discard_cont
+ *
+ * Description:
+ * A version of vm_map_copy_discard that can be called
+ * as a continuation from a vm_map_copy page list.
+ */
+kern_return_t vm_map_copy_discard_cont(
+vm_map_copyin_args_t cont_args,
+vm_map_copy_t *copy_result) /* OUT */
+{
+ vm_map_copy_discard((vm_map_copy_t) cont_args);
+ if (copy_result != (vm_map_copy_t *)0)
+ *copy_result = VM_MAP_COPY_NULL;
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: vm_map_copy_overwrite
+ *
+ * Description:
+ * Copy the memory described by the map copy
+ * object (copy; returned by vm_map_copyin) onto
+ * the specified destination region (dst_map, dst_addr).
+ * The destination must be writeable.
+ *
+ * Unlike vm_map_copyout, this routine actually
+ * writes over previously-mapped memory. If the
+ * previous mapping was to a permanent (user-supplied)
+ * memory object, it is preserved.
+ *
+ * The attributes (protection and inheritance) of the
+ * destination region are preserved.
+ *
+ * If successful, consumes the copy object.
+ * Otherwise, the caller is responsible for it.
+ *
+ * Implementation notes:
+ * To overwrite temporary virtual memory, it is
+ * sufficient to remove the previous mapping and insert
+ * the new copy. This replacement is done either on
+ * the whole region (if no permanent virtual memory
+ * objects are embedded in the destination region) or
+ * in individual map entries.
+ *
+ * To overwrite permanent virtual memory, it is
+ * necessary to copy each page, as the external
+ * memory management interface currently does not
+ * provide any optimizations.
+ *
+ * Once a page of permanent memory has been overwritten,
+ * it is impossible to interrupt this function; otherwise,
+ * the call would be neither atomic nor location-independent.
+ * The kernel-state portion of a user thread must be
+ * interruptible.
+ *
+ * It may be expensive to forward all requests that might
+ * overwrite permanent memory (vm_write, vm_copy) to
+ * uninterruptible kernel threads. This routine may be
+ * called by interruptible threads; however, success is
+ * not guaranteed -- if the request cannot be performed
+ * atomically and interruptibly, an error indication is
+ * returned.
+ */
+kern_return_t vm_map_copy_overwrite(
+ vm_map_t dst_map,
+ vm_offset_t dst_addr,
+ vm_map_copy_t copy,
+ boolean_t interruptible)
+{
+ vm_size_t size;
+ vm_offset_t start;
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+
+ boolean_t contains_permanent_objects = FALSE;
+
+ interruptible = FALSE; /* XXX */
+
+ /*
+ * Check for null copy object.
+ */
+
+ if (copy == VM_MAP_COPY_NULL)
+ return(KERN_SUCCESS);
+
+ /*
+ * Only works for entry lists at the moment. Will
+ * support page lists LATER.
+ */
+
+ assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
+
+ /*
+ * Currently this routine only handles page-aligned
+ * regions. Eventually, it should handle misalignments
+ * by actually copying pages.
+ */
+
+ if (!page_aligned(copy->offset) ||
+ !page_aligned(copy->size) ||
+ !page_aligned(dst_addr))
+ return(KERN_INVALID_ARGUMENT);
+
+ size = copy->size;
+
+ if (size == 0) {
+ vm_map_copy_discard(copy);
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Verify that the destination is all writeable
+ * initially.
+ */
+start_pass_1:
+ vm_map_lock(dst_map);
+ if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ vm_map_clip_start(dst_map, tmp_entry, dst_addr);
+ for (entry = tmp_entry;;) {
+ vm_size_t sub_size = (entry->vme_end - entry->vme_start);
+ vm_map_entry_t next = entry->vme_next;
+
+ if ( ! (entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock(dst_map);
+ return(KERN_PROTECTION_FAILURE);
+ }
+
+ /*
+ * If the entry is in transition, we must wait
+ * for it to exit that state. Anything could happen
+ * when we unlock the map, so start over.
+ */
+ if (entry->in_transition) {
+
+ /*
+ * Say that we are waiting, and wait for entry.
+ */
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, FALSE);
+
+ goto start_pass_1;
+ }
+
+ if (size <= sub_size)
+ break;
+
+ if ((next == vm_map_to_entry(dst_map)) ||
+ (next->vme_start != entry->vme_end)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+
+
+ /*
+ * Check for permanent objects in the destination.
+ */
+
+ if ((entry->object.vm_object != VM_OBJECT_NULL) &&
+ !entry->object.vm_object->temporary)
+ contains_permanent_objects = TRUE;
+
+ size -= sub_size;
+ entry = next;
+ }
+
+ /*
+ * If there are permanent objects in the destination, then
+ * the copy cannot be interrupted.
+ */
+
+ if (interruptible && contains_permanent_objects) {
+ vm_map_unlock(dst_map);
+ return(KERN_FAILURE); /* XXX */
+ }
+
+ /*
+ * XXXO If there are no permanent objects in the destination,
+ * XXXO and the destination map entry is not shared,
+ * XXXO then the map entries can be deleted and replaced
+ * XXXO with those from the copy. The following code is the
+ * XXXO basic idea of what to do, but there are lots of annoying
+ * XXXO little details about getting protection and inheritance
+ * XXXO right. Should add protection, inheritance, and sharing checks
+ * XXXO to the above pass and make sure that no wiring is involved.
+ */
+/*
+ * if (!contains_permanent_objects) {
+ *
+ * *
+ * * Run over copy and adjust entries. Steal code
+ * * from vm_map_copyout() to do this.
+ * *
+ *
+ * tmp_entry = tmp_entry->vme_prev;
+ * vm_map_delete(dst_map, dst_addr, dst_addr + copy->size);
+ * vm_map_copy_insert(dst_map, tmp_entry, copy);
+ *
+ * vm_map_unlock(dst_map);
+ * vm_map_copy_discard(copy);
+ * }
+ */
+ /*
+ *
+ * Make a second pass, overwriting the data
+ * At the beginning of each loop iteration,
+ * the next entry to be overwritten is "tmp_entry"
+ * (initially, the value returned from the lookup above),
+ * and the starting address expected in that entry
+ * is "start".
+ */
+
+ start = dst_addr;
+
+ while (vm_map_copy_first_entry(copy) != vm_map_copy_to_entry(copy)) {
+ vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
+ vm_size_t copy_size = (copy_entry->vme_end - copy_entry->vme_start);
+ vm_object_t object;
+
+ entry = tmp_entry;
+ size = (entry->vme_end - entry->vme_start);
+ /*
+ * Make sure that no holes popped up in the
+ * address map, and that the protection is
+ * still valid, in case the map was unlocked
+ * earlier.
+ */
+
+ if (entry->vme_start != start) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ assert(entry != vm_map_to_entry(dst_map));
+
+ /*
+ * Check protection again
+ */
+
+ if ( ! (entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock(dst_map);
+ return(KERN_PROTECTION_FAILURE);
+ }
+
+ /*
+ * Adjust to source size first
+ */
+
+ if (copy_size < size) {
+ vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size);
+ size = copy_size;
+ }
+
+ /*
+ * Adjust to destination size
+ */
+
+ if (size < copy_size) {
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start + size);
+ copy_size = size;
+ }
+
+ assert((entry->vme_end - entry->vme_start) == size);
+ assert((tmp_entry->vme_end - tmp_entry->vme_start) == size);
+ assert((copy_entry->vme_end - copy_entry->vme_start) == size);
+
+ /*
+ * If the destination contains temporary unshared memory,
+ * we can perform the copy by throwing it away and
+ * installing the source data.
+ */
+
+ object = entry->object.vm_object;
+ if (!entry->is_shared &&
+ ((object == VM_OBJECT_NULL) || object->temporary)) {
+ vm_object_t old_object = entry->object.vm_object;
+ vm_offset_t old_offset = entry->offset;
+
+ entry->object = copy_entry->object;
+ entry->offset = copy_entry->offset;
+ entry->needs_copy = copy_entry->needs_copy;
+ vm_map_entry_reset_wired(dst_map, entry);
+
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+
+ vm_object_pmap_protect(
+ old_object,
+ old_offset,
+ size,
+ dst_map->pmap,
+ tmp_entry->vme_start,
+ VM_PROT_NONE);
+
+ vm_object_deallocate(old_object);
+
+ /*
+ * Set up for the next iteration. The map
+ * has not been unlocked, so the next
+ * address should be at the end of this
+ * entry, and the next map entry should be
+ * the one following it.
+ */
+
+ start = tmp_entry->vme_end;
+ tmp_entry = tmp_entry->vme_next;
+ } else {
+ vm_map_version_t version;
+ vm_object_t dst_object = entry->object.vm_object;
+ vm_offset_t dst_offset = entry->offset;
+ kern_return_t r;
+
+ /*
+ * Take an object reference, and record
+ * the map version information so that the
+ * map can be safely unlocked.
+ */
+
+ vm_object_reference(dst_object);
+
+ version.main_timestamp = dst_map->timestamp;
+
+ vm_map_unlock(dst_map);
+
+ /*
+ * Copy as much as possible in one pass
+ */
+
+ copy_size = size;
+ r = vm_fault_copy(
+ copy_entry->object.vm_object,
+ copy_entry->offset,
+ &copy_size,
+ dst_object,
+ dst_offset,
+ dst_map,
+ &version,
+ FALSE /* XXX interruptible */ );
+
+ /*
+ * Release the object reference
+ */
+
+ vm_object_deallocate(dst_object);
+
+ /*
+ * If a hard error occurred, return it now
+ */
+
+ if (r != KERN_SUCCESS)
+ return(r);
+
+ if (copy_size != 0) {
+ /*
+ * Dispose of the copied region
+ */
+
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start + copy_size);
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_object_deallocate(copy_entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+ }
+
+ /*
+ * Pick up in the destination map where we left off.
+ *
+ * Use the version information to avoid a lookup
+ * in the normal case.
+ */
+
+ start += copy_size;
+ vm_map_lock(dst_map);
+ if ((version.main_timestamp + 1) == dst_map->timestamp) {
+ /* We can safely use saved tmp_entry value */
+
+ vm_map_clip_end(dst_map, tmp_entry, start);
+ tmp_entry = tmp_entry->vme_next;
+ } else {
+ /* Must do lookup of tmp_entry */
+
+ if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ vm_map_clip_start(dst_map, tmp_entry, start);
+ }
+ }
+
+ }
+ vm_map_unlock(dst_map);
+
+ /*
+ * Throw away the vm_map_copy object
+ */
+ vm_map_copy_discard(copy);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: vm_map_copy_insert
+ *
+ * Description:
+ * Link a copy chain ("copy") into a map at the
+ * specified location (after "where").
+ * Side effects:
+ * The copy chain is destroyed.
+ */
+static void
+vm_map_copy_insert(struct vm_map *map, struct vm_map_entry *where,
+ struct vm_map_copy *copy)
+{
+ struct vm_map_entry *entry;
+
+ assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
+
+ for (;;) {
+ entry = vm_map_copy_first_entry(copy);
+
+ if (entry == vm_map_copy_to_entry(copy)) {
+ break;
+ }
+
+ /*
+ * TODO Turn copy maps into their own type so they don't
+ * use any of the tree operations.
+ */
+ vm_map_copy_entry_unlink(copy, entry);
+ vm_map_entry_link(map, where, entry);
+ where = entry;
+ }
+
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t)copy);
+}
+
+/*
+ * Routine: vm_map_copyout
+ *
+ * Description:
+ * Copy out a copy chain ("copy") into newly-allocated
+ * space in the destination map.
+ *
+ * If successful, consumes the copy object.
+ * Otherwise, the caller is responsible for it.
+ */
+kern_return_t vm_map_copyout(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
+{
+ vm_size_t size;
+ vm_size_t adjustment;
+ vm_offset_t start;
+ vm_offset_t vm_copy_start;
+ vm_map_entry_t last;
+ vm_map_entry_t entry;
+ kern_return_t kr;
+
+ /*
+ * Check for null copy object.
+ */
+
+ if (copy == VM_MAP_COPY_NULL) {
+ *dst_addr = 0;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check for special copy object, created
+ * by vm_map_copyin_object.
+ */
+
+ if (copy->type == VM_MAP_COPY_OBJECT) {
+ vm_object_t object = copy->cpy_object;
+ vm_size_t offset = copy->offset;
+ vm_size_t tmp_size = copy->size;
+
+ *dst_addr = 0;
+ kr = vm_map_enter(dst_map, dst_addr, tmp_size,
+ (vm_offset_t) 0, TRUE,
+ object, offset, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ if (kr != KERN_SUCCESS)
+ return(kr);
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
+ return(KERN_SUCCESS);
+ }
+
+ if (copy->type == VM_MAP_COPY_PAGE_LIST)
+ return(vm_map_copyout_page_list(dst_map, dst_addr, copy));
+
+ /*
+ * Find space for the data
+ */
+
+ vm_copy_start = trunc_page(copy->offset);
+ size = round_page(copy->offset + copy->size) - vm_copy_start;
+ last = vm_map_find_entry_anywhere(dst_map, size, 0, FALSE, &start);
+
+ if (last == NULL) {
+ vm_map_unlock(dst_map);
+ return KERN_NO_SPACE;
+ }
+
+ /*
+ * Adjust the addresses in the copy chain, and
+ * reset the region attributes.
+ */
+
+ adjustment = start - vm_copy_start;
+ for (entry = vm_map_copy_first_entry(copy);
+ entry != vm_map_copy_to_entry(copy);
+ entry = entry->vme_next) {
+ entry->vme_start += adjustment;
+ entry->vme_end += adjustment;
+
+ /*
+ * XXX There is no need to update the gap tree here.
+ * See vm_map_copy_insert.
+ */
+
+ entry->inheritance = VM_INHERIT_DEFAULT;
+ entry->protection = VM_PROT_DEFAULT;
+ entry->max_protection = VM_PROT_ALL;
+ entry->projected_on = 0;
+
+ /*
+ * If the entry is now wired,
+ * map the pages into the destination map.
+ */
+ if (entry->wired_count != 0) {
+ vm_offset_t va;
+ vm_offset_t offset;
+ vm_object_t object;
+
+ object = entry->object.vm_object;
+ offset = entry->offset;
+ va = entry->vme_start;
+
+ pmap_pageable(dst_map->pmap,
+ entry->vme_start,
+ entry->vme_end,
+ TRUE);
+
+ while (va < entry->vme_end) {
+ vm_page_t m;
+
+ /*
+ * Look up the page in the object.
+ * Assert that the page will be found in the
+ * top object:
+ * either
+ * the object was newly created by
+ * vm_object_copy_slowly, and has
+ * copies of all of the pages from
+ * the source object
+ * or
+ * the object was moved from the old
+ * map entry; because the old map
+ * entry was wired, all of the pages
+ * were in the top-level object.
+ * (XXX not true if we wire pages for
+ * reading)
+ */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ m = vm_page_lookup(object, offset);
+ if (m == VM_PAGE_NULL || m->wire_count == 0 ||
+ m->absent)
+ panic("vm_map_copyout: wiring %p", m);
+
+ m->busy = TRUE;
+ vm_object_unlock(object);
+
+ PMAP_ENTER(dst_map->pmap, va, m,
+ entry->protection, TRUE);
+
+ vm_object_lock(object);
+ PAGE_WAKEUP_DONE(m);
+ /* the page is wired, so we don't have to activate */
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ offset += PAGE_SIZE;
+ va += PAGE_SIZE;
+ }
+ }
+
+
+ }
+
+ /*
+ * Correct the page alignment for the result
+ */
+
+ *dst_addr = start + (copy->offset - vm_copy_start);
+
+ /*
+ * Update the hints and the map size
+ */
+
+ if (dst_map->first_free == last)
+ dst_map->first_free = vm_map_copy_last_entry(copy);
+ SAVE_HINT(dst_map, vm_map_copy_last_entry(copy));
+
+ dst_map->size += size;
+
+ /*
+ * Link in the copy
+ */
+
+ vm_map_copy_insert(dst_map, last, copy);
+
+ if (dst_map->wiring_required) {
+ /* Returns with the map read-locked if successful */
+ kr = vm_map_pageable(dst_map, start, start + size,
+ VM_PROT_READ | VM_PROT_WRITE,
+ FALSE, FALSE);
+
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(dst_map);
+ return kr;
+ }
+ }
+
+ vm_map_unlock(dst_map);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ *
+ * vm_map_copyout_page_list:
+ *
+ * Version of vm_map_copyout() for page list vm map copies.
+ *
+ */
+kern_return_t vm_map_copyout_page_list(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
+{
+ vm_size_t size;
+ vm_offset_t start;
+ vm_offset_t end;
+ vm_offset_t offset;
+ vm_map_entry_t last;
+ vm_object_t object;
+ vm_page_t *page_list, m;
+ vm_map_entry_t entry;
+ vm_offset_t old_last_offset;
+ boolean_t cont_invoked, needs_wakeup = FALSE;
+ kern_return_t result = KERN_SUCCESS;
+ vm_map_copy_t orig_copy;
+ vm_offset_t dst_offset;
+ boolean_t must_wire;
+
+ /*
+ * Make sure the pages are stolen, because we are
+ * going to put them in a new object. Assume that
+ * all pages are identical to first in this regard.
+ */
+
+ page_list = &copy->cpy_page_list[0];
+ if ((*page_list)->tabled)
+ vm_map_copy_steal_pages(copy);
+
+ /*
+ * Find space for the data
+ */
+
+ size = round_page(copy->offset + copy->size) -
+ trunc_page(copy->offset);
+
+ vm_map_lock(dst_map);
+
+ last = vm_map_find_entry_anywhere(dst_map, size, 0, TRUE, &start);
+
+ if (last == NULL) {
+ vm_map_unlock(dst_map);
+ return KERN_NO_SPACE;
+ }
+
+ end = start + size;
+
+ must_wire = dst_map->wiring_required;
+
+ /*
+ * See whether we can avoid creating a new entry (and object) by
+ * extending one of our neighbors. [So far, we only attempt to
+ * extend from below.]
+ *
+ * The code path below here is a bit twisted. If any of the
+ * extension checks fails, we branch to create_object. If
+ * it all works, we fall out the bottom and goto insert_pages.
+ */
+ if (last == vm_map_to_entry(dst_map) ||
+ last->vme_end != start ||
+ last->is_shared != FALSE ||
+ last->is_sub_map != FALSE ||
+ last->inheritance != VM_INHERIT_DEFAULT ||
+ last->protection != VM_PROT_DEFAULT ||
+ last->max_protection != VM_PROT_ALL ||
+ (must_wire ? (last->wired_count == 0)
+ : (last->wired_count != 0))) {
+ goto create_object;
+ }
+
+ /*
+ * If this entry needs an object, make one.
+ */
+ if (last->object.vm_object == VM_OBJECT_NULL) {
+ object = vm_object_allocate(
+ (vm_size_t)(last->vme_end - last->vme_start + size));
+ last->object.vm_object = object;
+ last->offset = 0;
+ vm_object_lock(object);
+ }
+ else {
+ vm_offset_t prev_offset = last->offset;
+ vm_size_t prev_size = start - last->vme_start;
+ vm_size_t new_size;
+
+ /*
+ * This is basically vm_object_coalesce.
+ */
+
+ object = last->object.vm_object;
+ vm_object_lock(object);
+
+ /*
+ * Try to collapse the object first
+ */
+ vm_object_collapse(object);
+
+ /*
+ * Can't coalesce if pages not mapped to
+ * last may be in use anyway:
+ * . more than one reference
+ * . paged out
+ * . shadows another object
+ * . has a copy elsewhere
+ * . paging references (pages might be in page-list)
+ */
+
+ if ((object->ref_count > 1) ||
+ object->pager_created ||
+ (object->shadow != VM_OBJECT_NULL) ||
+ (object->copy != VM_OBJECT_NULL) ||
+ (object->paging_in_progress != 0)) {
+ vm_object_unlock(object);
+ goto create_object;
+ }
+
+ /*
+ * Extend the object if necessary. Don't have to call
+ * vm_object_page_remove because the pages aren't mapped,
+ * and vm_page_replace will free up any old ones it encounters.
+ */
+ new_size = prev_offset + prev_size + size;
+ if (new_size > object->size)
+ object->size = new_size;
+ }
+
+ /*
+ * Coalesced the two objects - can extend
+ * the previous map entry to include the
+ * new range.
+ */
+ dst_map->size += size;
+ last->vme_end = end;
+ vm_map_gap_update(&dst_map->hdr, last);
+
+ SAVE_HINT(dst_map, last);
+
+ goto insert_pages;
+
+create_object:
+
+ /*
+ * Create object
+ */
+ object = vm_object_allocate(size);
+
+ /*
+ * Create entry
+ */
+
+ entry = vm_map_entry_create(dst_map);
+
+ entry->object.vm_object = object;
+ entry->offset = 0;
+
+ entry->is_shared = FALSE;
+ entry->is_sub_map = FALSE;
+ entry->needs_copy = FALSE;
+ entry->wired_count = 0;
+
+ if (must_wire) {
+ vm_map_entry_inc_wired(dst_map, entry);
+ entry->wired_access = VM_PROT_DEFAULT;
+ } else {
+ entry->wired_access = VM_PROT_NONE;
+ }
+
+ entry->in_transition = TRUE;
+ entry->needs_wakeup = FALSE;
+
+ entry->vme_start = start;
+ entry->vme_end = start + size;
+
+ entry->inheritance = VM_INHERIT_DEFAULT;
+ entry->protection = VM_PROT_DEFAULT;
+ entry->max_protection = VM_PROT_ALL;
+ entry->projected_on = 0;
+
+ vm_object_lock(object);
+
+ /*
+ * Update the hints and the map size
+ */
+ if (dst_map->first_free == last) {
+ dst_map->first_free = entry;
+ }
+ SAVE_HINT(dst_map, entry);
+ dst_map->size += size;
+
+ /*
+ * Link in the entry
+ */
+ vm_map_entry_link(dst_map, last, entry);
+ last = entry;
+
+ /*
+ * Transfer pages into new object.
+ * Scan page list in vm_map_copy.
+ */
+insert_pages:
+ dst_offset = copy->offset & PAGE_MASK;
+ cont_invoked = FALSE;
+ orig_copy = copy;
+ last->in_transition = TRUE;
+ old_last_offset = last->offset
+ + (start - last->vme_start);
+
+ vm_page_lock_queues();
+
+ for (offset = 0; offset < size; offset += PAGE_SIZE) {
+ m = *page_list;
+ assert(m && !m->tabled);
+
+ /*
+ * Must clear busy bit in page before inserting it.
+ * Ok to skip wakeup logic because nobody else
+ * can possibly know about this page.
+ * The page is dirty in its new object.
+ */
+
+ assert(!m->wanted);
+
+ m->busy = FALSE;
+ m->dirty = TRUE;
+ vm_page_replace(m, object, old_last_offset + offset);
+ if (must_wire) {
+ vm_page_wire(m);
+ PMAP_ENTER(dst_map->pmap,
+ last->vme_start + m->offset - last->offset,
+ m, last->protection, TRUE);
+ } else {
+ vm_page_activate(m);
+ }
+
+ *page_list++ = VM_PAGE_NULL;
+ if (--(copy->cpy_npages) == 0 &&
+ vm_map_copy_has_cont(copy)) {
+ vm_map_copy_t new_copy;
+
+ /*
+ * Ok to unlock map because entry is
+ * marked in_transition.
+ */
+ cont_invoked = TRUE;
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+ vm_map_unlock(dst_map);
+ vm_map_copy_invoke_cont(copy, &new_copy, &result);
+
+ if (result == KERN_SUCCESS) {
+
+ /*
+ * If we got back a copy with real pages,
+ * steal them now. Either all of the
+ * pages in the list are tabled or none
+ * of them are; mixtures are not possible.
+ *
+ * Save original copy for consume on
+ * success logic at end of routine.
+ */
+ if (copy != orig_copy)
+ vm_map_copy_discard(copy);
+
+ if ((copy = new_copy) != VM_MAP_COPY_NULL) {
+ page_list = &copy->cpy_page_list[0];
+ if ((*page_list)->tabled)
+ vm_map_copy_steal_pages(copy);
+ }
+ }
+ else {
+ /*
+ * Continuation failed.
+ */
+ vm_map_lock(dst_map);
+ goto error;
+ }
+
+ vm_map_lock(dst_map);
+ vm_object_lock(object);
+ vm_page_lock_queues();
+ }
+ }
+
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ *dst_addr = start + dst_offset;
+
+ /*
+ * Clear the in transition bits. This is easy if we
+ * didn't have a continuation.
+ */
+error:
+ if (!cont_invoked) {
+ /*
+ * We didn't unlock the map, so nobody could
+ * be waiting.
+ */
+ last->in_transition = FALSE;
+ assert(!last->needs_wakeup);
+ needs_wakeup = FALSE;
+ }
+ else {
+ if (!vm_map_lookup_entry(dst_map, start, &entry))
+ panic("vm_map_copyout_page_list: missing entry");
+
+ /*
+ * Clear transition bit for all constituent entries that
+ * were in the original entry. Also check for waiters.
+ */
+ while((entry != vm_map_to_entry(dst_map)) &&
+ (entry->vme_start < end)) {
+ assert(entry->in_transition);
+ entry->in_transition = FALSE;
+ if(entry->needs_wakeup) {
+ entry->needs_wakeup = FALSE;
+ needs_wakeup = TRUE;
+ }
+ entry = entry->vme_next;
+ }
+ }
+
+ if (result != KERN_SUCCESS)
+ vm_map_delete(dst_map, start, end);
+
+ vm_map_unlock(dst_map);
+
+ if (needs_wakeup)
+ vm_map_entry_wakeup(dst_map);
+
+ /*
+ * Consume on success logic.
+ */
+ if (copy != orig_copy) {
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
+ }
+ if (result == KERN_SUCCESS) {
+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) orig_copy);
+ }
+
+ return(result);
+}
+
+/*
+ * Routine: vm_map_copyin
+ *
+ * Description:
+ * Copy the specified region (src_addr, len) from the
+ * source address space (src_map), possibly removing
+ * the region from the source address space (src_destroy).
+ *
+ * Returns:
+ * A vm_map_copy_t object (copy_result), suitable for
+ * insertion into another address space (using vm_map_copyout),
+ * copying over another address space region (using
+ * vm_map_copy_overwrite). If the copy is unused, it
+ * should be destroyed (using vm_map_copy_discard).
+ *
+ * In/out conditions:
+ * The source map should not be locked on entry.
+ */
+kern_return_t vm_map_copyin(
+ vm_map_t src_map,
+ vm_offset_t src_addr,
+ vm_size_t len,
+ boolean_t src_destroy,
+ vm_map_copy_t *copy_result) /* OUT */
+{
+ vm_map_entry_t tmp_entry; /* Result of last map lookup --
+ * in multi-level lookup, this
+ * entry contains the actual
+ * vm_object/offset.
+ */
+
+ vm_offset_t src_start; /* Start of current entry --
+ * where copy is taking place now
+ */
+ vm_offset_t src_end; /* End of entire region to be
+ * copied */
+
+ vm_map_copy_t copy; /* Resulting copy */
+
+ /*
+ * Check for copies of zero bytes.
+ */
+
+ if (len == 0) {
+ *copy_result = VM_MAP_COPY_NULL;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check that the end address doesn't overflow
+ */
+
+ if ((src_addr + len) <= src_addr) {
+ return KERN_INVALID_ADDRESS;
+ }
+
+ /*
+ * Compute start and end of region
+ */
+
+ src_start = trunc_page(src_addr);
+ src_end = round_page(src_addr + len);
+
+ /*
+ * XXX VM maps shouldn't end at maximum address
+ */
+
+ if (src_end == 0) {
+ return KERN_INVALID_ADDRESS;
+ }
+
+ /*
+ * Allocate a header element for the list.
+ *
+ * Use the start and end in the header to
+ * remember the endpoints prior to rounding.
+ */
+
+ copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
+ copy->type = VM_MAP_COPY_ENTRY_LIST;
+ copy->cpy_hdr.nentries = 0;
+ rbtree_init(&copy->cpy_hdr.tree);
+ rbtree_init(&copy->cpy_hdr.gap_tree);
+
+ copy->offset = src_addr;
+ copy->size = len;
+
+#define RETURN(x) \
+ MACRO_BEGIN \
+ vm_map_unlock(src_map); \
+ vm_map_copy_discard(copy); \
+ MACRO_RETURN(x); \
+ MACRO_END
+
+ /*
+ * Find the beginning of the region.
+ */
+
+ vm_map_lock(src_map);
+
+ if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry))
+ RETURN(KERN_INVALID_ADDRESS);
+ vm_map_clip_start(src_map, tmp_entry, src_start);
+
+ /*
+ * Go through entries until we get to the end.
+ */
+
+ while (TRUE) {
+ vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */
+ vm_size_t src_size; /* Size of source
+ * map entry (in both
+ * maps)
+ */
+
+ vm_object_t src_object; /* Object to copy */
+ vm_offset_t src_offset;
+
+ boolean_t src_needs_copy; /* Should source map
+ * be made read-only
+ * for copy-on-write?
+ */
+
+ vm_map_entry_t new_entry; /* Map entry for copy */
+ boolean_t new_entry_needs_copy; /* Will new entry be COW? */
+
+ boolean_t was_wired; /* Was source wired? */
+ vm_map_version_t version; /* Version before locks
+ * dropped to make copy
+ */
+
+ /*
+ * Verify that the region can be read.
+ */
+
+ if (! (src_entry->protection & VM_PROT_READ))
+ RETURN(KERN_PROTECTION_FAILURE);
+
+ /*
+ * Clip against the endpoints of the entire region.
+ */
+
+ vm_map_clip_end(src_map, src_entry, src_end);
+
+ src_size = src_entry->vme_end - src_start;
+ src_object = src_entry->object.vm_object;
+ src_offset = src_entry->offset;
+ was_wired = (src_entry->wired_count != 0);
+
+ /*
+ * Create a new address map entry to
+ * hold the result. Fill in the fields from
+ * the appropriate source entries.
+ */
+
+ new_entry = vm_map_copy_entry_create(copy);
+ vm_map_entry_copy(new_entry, src_entry);
+
+ /*
+ * Attempt non-blocking copy-on-write optimizations.
+ */
+
+ if (src_destroy &&
+ (src_object == VM_OBJECT_NULL ||
+ (src_object->temporary && !src_object->use_shared_copy)))
+ {
+ /*
+ * If we are destroying the source, and the object
+ * is temporary, and not shared writable,
+ * we can move the object reference
+ * from the source to the copy. The copy is
+ * copy-on-write only if the source is.
+ * We make another reference to the object, because
+ * destroying the source entry will deallocate it.
+ */
+ vm_object_reference(src_object);
+
+ /*
+ * Copy is always unwired. vm_map_copy_entry
+ * set its wired count to zero.
+ */
+
+ goto CopySuccessful;
+ }
+
+ if (!was_wired &&
+ vm_object_copy_temporary(
+ &new_entry->object.vm_object,
+ &new_entry->offset,
+ &src_needs_copy,
+ &new_entry_needs_copy)) {
+
+ new_entry->needs_copy = new_entry_needs_copy;
+
+ /*
+ * Handle copy-on-write obligations
+ */
+
+ if (src_needs_copy && !tmp_entry->needs_copy) {
+ vm_object_pmap_protect(
+ src_object,
+ src_offset,
+ src_size,
+ (src_entry->is_shared ? PMAP_NULL
+ : src_map->pmap),
+ src_entry->vme_start,
+ src_entry->protection &
+ ~VM_PROT_WRITE);
+
+ tmp_entry->needs_copy = TRUE;
+ }
+
+ /*
+ * The map has never been unlocked, so it's safe to
+ * move to the next entry rather than doing another
+ * lookup.
+ */
+
+ goto CopySuccessful;
+ }
+
+ new_entry->needs_copy = FALSE;
+
+ /*
+ * Take an object reference, so that we may
+ * release the map lock(s).
+ */
+
+ assert(src_object != VM_OBJECT_NULL);
+ vm_object_reference(src_object);
+
+ /*
+ * Record the timestamp for later verification.
+ * Unlock the map.
+ */
+
+ version.main_timestamp = src_map->timestamp;
+ vm_map_unlock(src_map);
+
+ /*
+ * Perform the copy
+ */
+
+ if (was_wired) {
+ vm_object_lock(src_object);
+ (void) vm_object_copy_slowly(
+ src_object,
+ src_offset,
+ src_size,
+ FALSE,
+ &new_entry->object.vm_object);
+ new_entry->offset = 0;
+ new_entry->needs_copy = FALSE;
+ } else {
+ kern_return_t result;
+
+ result = vm_object_copy_strategically(src_object,
+ src_offset,
+ src_size,
+ &new_entry->object.vm_object,
+ &new_entry->offset,
+ &new_entry_needs_copy);
+
+ new_entry->needs_copy = new_entry_needs_copy;
+
+
+ if (result != KERN_SUCCESS) {
+ vm_map_copy_entry_dispose(copy, new_entry);
+
+ vm_map_lock(src_map);
+ RETURN(result);
+ }
+
+ }
+
+ /*
+ * Throw away the extra reference
+ */
+
+ vm_object_deallocate(src_object);
+
+ /*
+ * Verify that the map has not substantially
+ * changed while the copy was being made.
+ */
+
+ vm_map_lock(src_map); /* Increments timestamp once! */
+
+ if ((version.main_timestamp + 1) == src_map->timestamp)
+ goto CopySuccessful;
+
+ /*
+ * Simple version comparison failed.
+ *
+ * Retry the lookup and verify that the
+ * same object/offset are still present.
+ *
+ * [Note: a memory manager that colludes with
+ * the calling task can detect that we have
+ * cheated. While the map was unlocked, the
+ * mapping could have been changed and restored.]
+ */
+
+ if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) {
+ vm_map_copy_entry_dispose(copy, new_entry);
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+
+ src_entry = tmp_entry;
+ vm_map_clip_start(src_map, src_entry, src_start);
+
+ if ((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE)
+ goto VerificationFailed;
+
+ if (src_entry->vme_end < new_entry->vme_end)
+ src_size = (new_entry->vme_end = src_entry->vme_end) - src_start;
+
+ if ((src_entry->object.vm_object != src_object) ||
+ (src_entry->offset != src_offset) ) {
+
+ /*
+ * Verification failed.
+ *
+ * Start over with this top-level entry.
+ */
+
+ VerificationFailed: ;
+
+ vm_object_deallocate(new_entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, new_entry);
+ tmp_entry = src_entry;
+ continue;
+ }
+
+ /*
+ * Verification succeeded.
+ */
+
+ CopySuccessful: ;
+
+ /*
+ * Link in the new copy entry.
+ */
+
+ vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),
+ new_entry);
+
+ /*
+ * Determine whether the entire region
+ * has been copied.
+ */
+ src_start = new_entry->vme_end;
+ if ((src_start >= src_end) && (src_end != 0))
+ break;
+
+ /*
+ * Verify that there are no gaps in the region
+ */
+
+ tmp_entry = src_entry->vme_next;
+ if (tmp_entry->vme_start != src_start)
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+
+ /*
+ * If the source should be destroyed, do it now, since the
+ * copy was successful.
+ */
+ if (src_destroy)
+ (void) vm_map_delete(src_map, trunc_page(src_addr), src_end);
+
+ vm_map_unlock(src_map);
+
+ *copy_result = copy;
+ return(KERN_SUCCESS);
+
+#undef RETURN
+}
+
+/*
+ * vm_map_copyin_object:
+ *
+ * Create a copy object from an object.
+ * Our caller donates an object reference.
+ */
+
+kern_return_t vm_map_copyin_object(
+ vm_object_t object,
+ vm_offset_t offset, /* offset of region in object */
+ vm_size_t size, /* size of region in object */
+ vm_map_copy_t *copy_result) /* OUT */
+{
+ vm_map_copy_t copy; /* Resulting copy */
+
+ /*
+ * We drop the object into a special copy object
+ * that contains the object directly. These copy objects
+ * are distinguished by links.
+ */
+
+ copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) = VM_MAP_ENTRY_NULL;
+ copy->type = VM_MAP_COPY_OBJECT;
+ copy->cpy_object = object;
+ copy->offset = offset;
+ copy->size = size;
+
+ *copy_result = copy;
+ return(KERN_SUCCESS);
+}
+
+/*
+ * vm_map_copyin_page_list_cont:
+ *
+ * Continuation routine for vm_map_copyin_page_list.
+ *
+ * If vm_map_copyin_page_list can't fit the entire vm range
+ * into a single page list object, it creates a continuation.
+ * When the target of the operation has used the pages in the
+ * initial page list, it invokes the continuation, which calls
+ * this routine. If an error happens, the continuation is aborted
+ * (abort arg to this routine is TRUE). To avoid deadlocks, the
+ * pages are discarded from the initial page list before invoking
+ * the continuation.
+ *
+ * NOTE: This is not the same sort of continuation used by
+ * the scheduler.
+ */
+
+static kern_return_t vm_map_copyin_page_list_cont(
+ vm_map_copyin_args_t cont_args,
+ vm_map_copy_t *copy_result) /* OUT */
+{
+ kern_return_t result = 0; /* '=0' to quiet gcc warnings */
+ boolean_t do_abort, src_destroy, src_destroy_only;
+
+ /*
+ * Check for cases that only require memory destruction.
+ */
+ do_abort = (copy_result == (vm_map_copy_t *) 0);
+ src_destroy = (cont_args->destroy_len != (vm_size_t) 0);
+ src_destroy_only = (cont_args->src_len == (vm_size_t) 0);
+
+ if (do_abort || src_destroy_only) {
+ if (src_destroy)
+ result = vm_map_remove(cont_args->map,
+ cont_args->destroy_addr,
+ cont_args->destroy_addr + cont_args->destroy_len);
+ if (!do_abort)
+ *copy_result = VM_MAP_COPY_NULL;
+ }
+ else {
+ result = vm_map_copyin_page_list(cont_args->map,
+ cont_args->src_addr, cont_args->src_len, src_destroy,
+ cont_args->steal_pages, copy_result, TRUE);
+
+ if (src_destroy && !cont_args->steal_pages &&
+ vm_map_copy_has_cont(*copy_result)) {
+ vm_map_copyin_args_t new_args;
+ /*
+ * Transfer old destroy info.
+ */
+ new_args = (vm_map_copyin_args_t)
+ (*copy_result)->cpy_cont_args;
+ new_args->destroy_addr = cont_args->destroy_addr;
+ new_args->destroy_len = cont_args->destroy_len;
+ }
+ }
+
+ vm_map_deallocate(cont_args->map);
+ kfree((vm_offset_t)cont_args, sizeof(vm_map_copyin_args_data_t));
+
+ return(result);
+}
+
+/*
+ * vm_map_copyin_page_list:
+ *
+ * This is a variant of vm_map_copyin that copies in a list of pages.
+ * If steal_pages is TRUE, the pages are only in the returned list.
+ * If steal_pages is FALSE, the pages are busy and still in their
+ * objects. A continuation may be returned if not all the pages fit:
+ * the recipient of this copy_result must be prepared to deal with it.
+ */
+
+kern_return_t vm_map_copyin_page_list(
+ vm_map_t src_map,
+ vm_offset_t src_addr,
+ vm_size_t len,
+ boolean_t src_destroy,
+ boolean_t steal_pages,
+ vm_map_copy_t *copy_result, /* OUT */
+ boolean_t is_cont)
+{
+ vm_map_entry_t src_entry;
+ vm_page_t m;
+ vm_offset_t src_start;
+ vm_offset_t src_end;
+ vm_size_t src_size;
+ vm_object_t src_object;
+ vm_offset_t src_offset;
+ vm_offset_t src_last_offset;
+ vm_map_copy_t copy; /* Resulting copy */
+ kern_return_t result = KERN_SUCCESS;
+ boolean_t need_map_lookup;
+ vm_map_copyin_args_t cont_args;
+
+ /*
+ * If steal_pages is FALSE, this leaves busy pages in
+ * the object. A continuation must be used if src_destroy
+ * is true in this case (!steal_pages && src_destroy).
+ *
+ * XXX Still have a more general problem of what happens
+ * XXX if the same page occurs twice in a list. Deadlock
+ * XXX can happen if vm_fault_page was called. A
+ * XXX possible solution is to use a continuation if vm_fault_page
+ * XXX is called and we cross a map entry boundary.
+ */
+
+ /*
+ * Check for copies of zero bytes.
+ */
+
+ if (len == 0) {
+ *copy_result = VM_MAP_COPY_NULL;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check that the end address doesn't overflow
+ */
+
+ if ((src_addr + len) <= src_addr) {
+ return KERN_INVALID_ADDRESS;
+ }
+
+ /*
+ * Compute start and end of region
+ */
+
+ src_start = trunc_page(src_addr);
+ src_end = round_page(src_addr + len);
+
+ /*
+ * XXX VM maps shouldn't end at maximum address
+ */
+
+ if (src_end == 0) {
+ return KERN_INVALID_ADDRESS;
+ }
+
+ /*
+ * Allocate a header element for the page list.
+ *
+ * Record original offset and size, as caller may not
+ * be page-aligned.
+ */
+
+ copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
+ copy->type = VM_MAP_COPY_PAGE_LIST;
+ copy->cpy_npages = 0;
+ copy->offset = src_addr;
+ copy->size = len;
+ copy->cpy_cont = ((kern_return_t (*)()) 0);
+ copy->cpy_cont_args = VM_MAP_COPYIN_ARGS_NULL;
+
+ /*
+ * Find the beginning of the region.
+ */
+
+do_map_lookup:
+
+ vm_map_lock(src_map);
+
+ if (!vm_map_lookup_entry(src_map, src_start, &src_entry)) {
+ result = KERN_INVALID_ADDRESS;
+ goto error;
+ }
+ need_map_lookup = FALSE;
+
+ /*
+ * Go through entries until we get to the end.
+ */
+
+ while (TRUE) {
+
+ if (! (src_entry->protection & VM_PROT_READ)) {
+ result = KERN_PROTECTION_FAILURE;
+ goto error;
+ }
+
+ if (src_end > src_entry->vme_end)
+ src_size = src_entry->vme_end - src_start;
+ else
+ src_size = src_end - src_start;
+
+ src_object = src_entry->object.vm_object;
+ src_offset = src_entry->offset +
+ (src_start - src_entry->vme_start);
+
+ /*
+ * If src_object is NULL, allocate it now;
+ * we're going to fault on it shortly.
+ */
+ if (src_object == VM_OBJECT_NULL) {
+ src_object = vm_object_allocate((vm_size_t)
+ src_entry->vme_end -
+ src_entry->vme_start);
+ src_entry->object.vm_object = src_object;
+ }
+
+ /*
+ * Iterate over pages. Fault in ones that aren't present.
+ */
+ src_last_offset = src_offset + src_size;
+ for (; (src_offset < src_last_offset && !need_map_lookup);
+ src_offset += PAGE_SIZE, src_start += PAGE_SIZE) {
+
+ if (copy->cpy_npages == VM_MAP_COPY_PAGE_LIST_MAX) {
+make_continuation:
+ /*
+ * At this point we have the max number of
+ * pages busy for this thread that we're
+ * willing to allow. Stop here and record
+ * arguments for the remainder. Note:
+ * this means that this routine isn't atomic,
+ * but that's the breaks. Note that only
+ * the first vm_map_copy_t that comes back
+ * from this routine has the right offset
+ * and size; those from continuations are
+ * page rounded, and short by the amount
+ * already done.
+ *
+ * Reset src_end so the src_destroy
+ * code at the bottom doesn't do
+ * something stupid.
+ */
+
+ cont_args = (vm_map_copyin_args_t)
+ kalloc(sizeof(vm_map_copyin_args_data_t));
+ cont_args->map = src_map;
+ vm_map_reference(src_map);
+ cont_args->src_addr = src_start;
+ cont_args->src_len = len - (src_start - src_addr);
+ if (src_destroy) {
+ cont_args->destroy_addr = cont_args->src_addr;
+ cont_args->destroy_len = cont_args->src_len;
+ }
+ else {
+ cont_args->destroy_addr = (vm_offset_t) 0;
+ cont_args->destroy_len = (vm_offset_t) 0;
+ }
+ cont_args->steal_pages = steal_pages;
+
+ copy->cpy_cont_args = cont_args;
+ copy->cpy_cont = vm_map_copyin_page_list_cont;
+
+ src_end = src_start;
+ vm_map_clip_end(src_map, src_entry, src_end);
+ break;
+ }
+
+ /*
+ * Try to find the page of data.
+ */
+ vm_object_lock(src_object);
+ vm_object_paging_begin(src_object);
+ if (((m = vm_page_lookup(src_object, src_offset)) !=
+ VM_PAGE_NULL) && !m->busy && !m->fictitious &&
+ !m->absent && !m->error) {
+
+ /*
+ * This is the page. Mark it busy
+ * and keep the paging reference on
+ * the object whilst we do our thing.
+ */
+ m->busy = TRUE;
+
+ /*
+ * Also write-protect the page, so
+ * that the map`s owner cannot change
+ * the data. The busy bit will prevent
+ * faults on the page from succeeding
+ * until the copy is released; after
+ * that, the page can be re-entered
+ * as writable, since we didn`t alter
+ * the map entry. This scheme is a
+ * cheap copy-on-write.
+ *
+ * Don`t forget the protection and
+ * the page_lock value!
+ *
+ * If the source is being destroyed
+ * AND not shared writable, we don`t
+ * have to protect the page, since
+ * we will destroy the (only)
+ * writable mapping later.
+ */
+ if (!src_destroy ||
+ src_object->use_shared_copy)
+ {
+ pmap_page_protect(m->phys_addr,
+ src_entry->protection
+ & ~m->page_lock
+ & ~VM_PROT_WRITE);
+ }
+
+ }
+ else {
+ vm_prot_t result_prot;
+ vm_page_t top_page;
+ kern_return_t kr;
+
+ /*
+ * Have to fault the page in; must
+ * unlock the map to do so. While
+ * the map is unlocked, anything
+ * can happen, we must lookup the
+ * map entry before continuing.
+ */
+ vm_map_unlock(src_map);
+ need_map_lookup = TRUE;
+retry:
+ result_prot = VM_PROT_READ;
+
+ kr = vm_fault_page(src_object, src_offset,
+ VM_PROT_READ, FALSE, FALSE,
+ &result_prot, &m, &top_page,
+ FALSE, (void (*)()) 0);
+ /*
+ * Cope with what happened.
+ */
+ switch (kr) {
+ case VM_FAULT_SUCCESS:
+ break;
+ case VM_FAULT_INTERRUPTED: /* ??? */
+ case VM_FAULT_RETRY:
+ vm_object_lock(src_object);
+ vm_object_paging_begin(src_object);
+ goto retry;
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ vm_object_lock(src_object);
+ vm_object_paging_begin(src_object);
+ goto retry;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ vm_object_lock(src_object);
+ vm_object_paging_begin(src_object);
+ goto retry;
+ case VM_FAULT_MEMORY_ERROR:
+ /*
+ * Something broke. If this
+ * is a continuation, return
+ * a partial result if possible,
+ * else fail the whole thing.
+ * In the continuation case, the
+ * next continuation call will
+ * get this error if it persists.
+ */
+ vm_map_lock(src_map);
+ if (is_cont &&
+ copy->cpy_npages != 0)
+ goto make_continuation;
+
+ result = KERN_MEMORY_ERROR;
+ goto error;
+ }
+
+ if (top_page != VM_PAGE_NULL) {
+ vm_object_lock(src_object);
+ VM_PAGE_FREE(top_page);
+ vm_object_paging_end(src_object);
+ vm_object_unlock(src_object);
+ }
+
+ /*
+ * We do not need to write-protect
+ * the page, since it cannot have
+ * been in the pmap (and we did not
+ * enter it above). The busy bit
+ * will protect the page from being
+ * entered as writable until it is
+ * unlocked.
+ */
+
+ }
+
+ /*
+ * The page is busy, its object is locked, and
+ * we have a paging reference on it. Either
+ * the map is locked, or need_map_lookup is
+ * TRUE.
+ *
+ * Put the page in the page list.
+ */
+ copy->cpy_page_list[copy->cpy_npages++] = m;
+ vm_object_unlock(m->object);
+ }
+
+ /*
+ * DETERMINE whether the entire region
+ * has been copied.
+ */
+ if (src_start >= src_end && src_end != 0) {
+ if (need_map_lookup)
+ vm_map_lock(src_map);
+ break;
+ }
+
+ /*
+ * If need_map_lookup is TRUE, have to start over with
+ * another map lookup. Note that we dropped the map
+ * lock (to call vm_fault_page) above only in this case.
+ */
+ if (need_map_lookup)
+ goto do_map_lookup;
+
+ /*
+ * Verify that there are no gaps in the region
+ */
+
+ src_start = src_entry->vme_end;
+ src_entry = src_entry->vme_next;
+ if (src_entry->vme_start != src_start) {
+ result = KERN_INVALID_ADDRESS;
+ goto error;
+ }
+ }
+
+ /*
+ * If steal_pages is true, make sure all
+ * pages in the copy are not in any object
+ * We try to remove them from the original
+ * object, but we may have to copy them.
+ *
+ * At this point every page in the list is busy
+ * and holds a paging reference to its object.
+ * When we're done stealing, every page is busy,
+ * and in no object (m->tabled == FALSE).
+ */
+ src_start = trunc_page(src_addr);
+ if (steal_pages) {
+ int i;
+ vm_offset_t unwire_end;
+
+ unwire_end = src_start;
+ for (i = 0; i < copy->cpy_npages; i++) {
+
+ /*
+ * Remove the page from its object if it
+ * can be stolen. It can be stolen if:
+ *
+ * (1) The source is being destroyed,
+ * the object is temporary, and
+ * not shared.
+ * (2) The page is not precious.
+ *
+ * The not shared check consists of two
+ * parts: (a) there are no objects that
+ * shadow this object. (b) it is not the
+ * object in any shared map entries (i.e.,
+ * use_shared_copy is not set).
+ *
+ * The first check (a) means that we can't
+ * steal pages from objects that are not
+ * at the top of their shadow chains. This
+ * should not be a frequent occurrence.
+ *
+ * Stealing wired pages requires telling the
+ * pmap module to let go of them.
+ *
+ * NOTE: stealing clean pages from objects
+ * whose mappings survive requires a call to
+ * the pmap module. Maybe later.
+ */
+ m = copy->cpy_page_list[i];
+ src_object = m->object;
+ vm_object_lock(src_object);
+
+ if (src_destroy &&
+ src_object->temporary &&
+ (!src_object->shadowed) &&
+ (!src_object->use_shared_copy) &&
+ !m->precious) {
+ vm_offset_t page_vaddr;
+
+ page_vaddr = src_start + (i * PAGE_SIZE);
+ if (m->wire_count > 0) {
+
+ assert(m->wire_count == 1);
+ /*
+ * In order to steal a wired
+ * page, we have to unwire it
+ * first. We do this inline
+ * here because we have the page.
+ *
+ * Step 1: Unwire the map entry.
+ * Also tell the pmap module
+ * that this piece of the
+ * pmap is pageable.
+ */
+ vm_object_unlock(src_object);
+ if (page_vaddr >= unwire_end) {
+ if (!vm_map_lookup_entry(src_map,
+ page_vaddr, &src_entry))
+ panic("vm_map_copyin_page_list: missing wired map entry");
+
+ vm_map_clip_start(src_map, src_entry,
+ page_vaddr);
+ vm_map_clip_end(src_map, src_entry,
+ src_start + src_size);
+
+ assert(src_entry->wired_count > 0);
+ vm_map_entry_reset_wired(src_map, src_entry);
+ unwire_end = src_entry->vme_end;
+ pmap_pageable(vm_map_pmap(src_map),
+ page_vaddr, unwire_end, TRUE);
+ }
+
+ /*
+ * Step 2: Unwire the page.
+ * pmap_remove handles this for us.
+ */
+ vm_object_lock(src_object);
+ }
+
+ /*
+ * Don't need to remove the mapping;
+ * vm_map_delete will handle it.
+ *
+ * Steal the page. Setting the wire count
+ * to zero is vm_page_unwire without
+ * activating the page.
+ */
+ vm_page_lock_queues();
+ vm_page_remove(m);
+ if (m->wire_count > 0) {
+ m->wire_count = 0;
+ vm_page_wire_count--;
+ } else {
+ VM_PAGE_QUEUES_REMOVE(m);
+ }
+ vm_page_unlock_queues();
+ }
+ else {
+ /*
+ * Have to copy this page. Have to
+ * unlock the map while copying,
+ * hence no further page stealing.
+ * Hence just copy all the pages.
+ * Unlock the map while copying;
+ * This means no further page stealing.
+ */
+ vm_object_unlock(src_object);
+ vm_map_unlock(src_map);
+
+ vm_map_copy_steal_pages(copy);
+
+ vm_map_lock(src_map);
+ break;
+ }
+
+ vm_object_paging_end(src_object);
+ vm_object_unlock(src_object);
+ }
+
+ /*
+ * If the source should be destroyed, do it now, since the
+ * copy was successful.
+ */
+
+ if (src_destroy) {
+ (void) vm_map_delete(src_map, src_start, src_end);
+ }
+ }
+ else {
+ /*
+ * !steal_pages leaves busy pages in the map.
+ * This will cause src_destroy to hang. Use
+ * a continuation to prevent this.
+ */
+ if (src_destroy && !vm_map_copy_has_cont(copy)) {
+ cont_args = (vm_map_copyin_args_t)
+ kalloc(sizeof(vm_map_copyin_args_data_t));
+ vm_map_reference(src_map);
+ cont_args->map = src_map;
+ cont_args->src_addr = (vm_offset_t) 0;
+ cont_args->src_len = (vm_size_t) 0;
+ cont_args->destroy_addr = src_start;
+ cont_args->destroy_len = src_end - src_start;
+ cont_args->steal_pages = FALSE;
+
+ copy->cpy_cont_args = cont_args;
+ copy->cpy_cont = vm_map_copyin_page_list_cont;
+ }
+
+ }
+
+ vm_map_unlock(src_map);
+
+ *copy_result = copy;
+ return(result);
+
+error:
+ vm_map_unlock(src_map);
+ vm_map_copy_discard(copy);
+ return(result);
+}
+
+/*
+ * vm_map_fork:
+ *
+ * Create and return a new map based on the old
+ * map, according to the inheritance values on the
+ * regions in that map.
+ *
+ * The source map must not be locked.
+ */
+vm_map_t vm_map_fork(vm_map_t old_map)
+{
+ vm_map_t new_map;
+ vm_map_entry_t old_entry;
+ vm_map_entry_t new_entry;
+ pmap_t new_pmap = pmap_create((vm_size_t) 0);
+ vm_size_t new_size = 0;
+ vm_size_t entry_size;
+ vm_object_t object;
+
+ if (new_pmap == PMAP_NULL)
+ return VM_MAP_NULL;
+
+ vm_map_lock(old_map);
+
+ new_map = vm_map_create(new_pmap,
+ old_map->min_offset,
+ old_map->max_offset);
+ if (new_map == VM_MAP_NULL) {
+ pmap_destroy(new_pmap);
+ return VM_MAP_NULL;
+ }
+
+ for (
+ old_entry = vm_map_first_entry(old_map);
+ old_entry != vm_map_to_entry(old_map);
+ ) {
+ if (old_entry->is_sub_map)
+ panic("vm_map_fork: encountered a submap");
+
+ entry_size = (old_entry->vme_end - old_entry->vme_start);
+
+ switch (old_entry->inheritance) {
+ case VM_INHERIT_NONE:
+ break;
+
+ case VM_INHERIT_SHARE:
+ /*
+ * New sharing code. New map entry
+ * references original object. Temporary
+ * objects use asynchronous copy algorithm for
+ * future copies. First make sure we have
+ * the right object. If we need a shadow,
+ * or someone else already has one, then
+ * make a new shadow and share it.
+ */
+
+ object = old_entry->object.vm_object;
+ if (object == VM_OBJECT_NULL) {
+ object = vm_object_allocate(
+ (vm_size_t)(old_entry->vme_end -
+ old_entry->vme_start));
+ old_entry->offset = 0;
+ old_entry->object.vm_object = object;
+ assert(!old_entry->needs_copy);
+ }
+ else if (old_entry->needs_copy || object->shadowed ||
+ (object->temporary && !old_entry->is_shared &&
+ object->size > (vm_size_t)(old_entry->vme_end -
+ old_entry->vme_start))) {
+
+ assert(object->temporary);
+ assert(!(object->shadowed && old_entry->is_shared));
+ vm_object_shadow(
+ &old_entry->object.vm_object,
+ &old_entry->offset,
+ (vm_size_t) (old_entry->vme_end -
+ old_entry->vme_start));
+
+ /*
+ * If we're making a shadow for other than
+ * copy on write reasons, then we have
+ * to remove write permission.
+ */
+
+ if (!old_entry->needs_copy &&
+ (old_entry->protection & VM_PROT_WRITE)) {
+ pmap_protect(vm_map_pmap(old_map),
+ old_entry->vme_start,
+ old_entry->vme_end,
+ old_entry->protection &
+ ~VM_PROT_WRITE);
+ }
+ old_entry->needs_copy = FALSE;
+ object = old_entry->object.vm_object;
+ }
+
+ /*
+ * Set use_shared_copy to indicate that
+ * object must use shared (delayed) copy-on
+ * write. This is ignored for permanent objects.
+ * Bump the reference count for the new entry
+ */
+
+ vm_object_lock(object);
+ object->use_shared_copy = TRUE;
+ object->ref_count++;
+ vm_object_unlock(object);
+
+ new_entry = vm_map_entry_create(new_map);
+
+ if (old_entry->projected_on != 0) {
+ /*
+ * If entry is projected buffer, clone the
+ * entry exactly.
+ */
+
+ vm_map_entry_copy_full(new_entry, old_entry);
+
+ } else {
+ /*
+ * Clone the entry, using object ref from above.
+ * Mark both entries as shared.
+ */
+
+ vm_map_entry_copy(new_entry, old_entry);
+ old_entry->is_shared = TRUE;
+ new_entry->is_shared = TRUE;
+ }
+
+ /*
+ * Insert the entry into the new map -- we
+ * know we're inserting at the end of the new
+ * map.
+ */
+
+ vm_map_entry_link(
+ new_map,
+ vm_map_last_entry(new_map),
+ new_entry);
+
+ /*
+ * Update the physical map
+ */
+
+ pmap_copy(new_map->pmap, old_map->pmap,
+ new_entry->vme_start,
+ entry_size,
+ old_entry->vme_start);
+
+ new_size += entry_size;
+ break;
+
+ case VM_INHERIT_COPY:
+ if (old_entry->wired_count == 0) {
+ boolean_t src_needs_copy;
+ boolean_t new_entry_needs_copy;
+
+ new_entry = vm_map_entry_create(new_map);
+ vm_map_entry_copy(new_entry, old_entry);
+
+ if (vm_object_copy_temporary(
+ &new_entry->object.vm_object,
+ &new_entry->offset,
+ &src_needs_copy,
+ &new_entry_needs_copy)) {
+
+ /*
+ * Handle copy-on-write obligations
+ */
+
+ if (src_needs_copy && !old_entry->needs_copy) {
+ vm_object_pmap_protect(
+ old_entry->object.vm_object,
+ old_entry->offset,
+ entry_size,
+ (old_entry->is_shared ?
+ PMAP_NULL :
+ old_map->pmap),
+ old_entry->vme_start,
+ old_entry->protection &
+ ~VM_PROT_WRITE);
+
+ old_entry->needs_copy = TRUE;
+ }
+
+ new_entry->needs_copy = new_entry_needs_copy;
+
+ /*
+ * Insert the entry at the end
+ * of the map.
+ */
+
+ vm_map_entry_link(new_map,
+ vm_map_last_entry(new_map),
+ new_entry);
+
+
+ new_size += entry_size;
+ break;
+ }
+
+ vm_map_entry_dispose(new_map, new_entry);
+ }
+
+ /* INNER BLOCK (copy cannot be optimized) */ {
+
+ vm_offset_t start = old_entry->vme_start;
+ vm_map_copy_t copy;
+ vm_map_entry_t last = vm_map_last_entry(new_map);
+
+ vm_map_unlock(old_map);
+ if (vm_map_copyin(old_map,
+ start,
+ entry_size,
+ FALSE,
+ &copy)
+ != KERN_SUCCESS) {
+ vm_map_lock(old_map);
+ if (!vm_map_lookup_entry(old_map, start, &last))
+ last = last->vme_next;
+ old_entry = last;
+ /*
+ * For some error returns, want to
+ * skip to the next element.
+ */
+
+ continue;
+ }
+
+ /*
+ * Insert the copy into the new map
+ */
+
+ vm_map_copy_insert(new_map, last, copy);
+ new_size += entry_size;
+
+ /*
+ * Pick up the traversal at the end of
+ * the copied region.
+ */
+
+ vm_map_lock(old_map);
+ start += entry_size;
+ if (!vm_map_lookup_entry(old_map, start, &last))
+ last = last->vme_next;
+ else
+ vm_map_clip_start(old_map, last, start);
+ old_entry = last;
+
+ continue;
+ /* INNER BLOCK (copy cannot be optimized) */ }
+ }
+ old_entry = old_entry->vme_next;
+ }
+
+ new_map->size = new_size;
+ vm_map_unlock(old_map);
+
+ return(new_map);
+}
+
+/*
+ * vm_map_lookup:
+ *
+ * Finds the VM object, offset, and
+ * protection for a given virtual address in the
+ * specified map, assuming a page fault of the
+ * type specified.
+ *
+ * Returns the (object, offset, protection) for
+ * this address, whether it is wired down, and whether
+ * this map has the only reference to the data in question.
+ * In order to later verify this lookup, a "version"
+ * is returned.
+ *
+ * The map should not be locked; it will not be
+ * locked on exit. In order to guarantee the
+ * existence of the returned object, it is returned
+ * locked.
+ *
+ * If a lookup is requested with "write protection"
+ * specified, the map may be changed to perform virtual
+ * copying operations, although the data referenced will
+ * remain the same.
+ */
+kern_return_t vm_map_lookup(
+ vm_map_t *var_map, /* IN/OUT */
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+
+ vm_map_version_t *out_version, /* OUT */
+ vm_object_t *object, /* OUT */
+ vm_offset_t *offset, /* OUT */
+ vm_prot_t *out_prot, /* OUT */
+ boolean_t *wired) /* OUT */
+{
+ vm_map_entry_t entry;
+ vm_map_t map = *var_map;
+ vm_prot_t prot;
+
+ RetryLookup: ;
+
+ /*
+ * Lookup the faulting address.
+ */
+
+ vm_map_lock_read(map);
+
+#define RETURN(why) \
+ { \
+ vm_map_unlock_read(map); \
+ return(why); \
+ }
+
+ /*
+ * If the map has an interesting hint, try it before calling
+ * full blown lookup routine.
+ */
+
+ simple_lock(&map->hint_lock);
+ entry = map->hint;
+ simple_unlock(&map->hint_lock);
+
+ if ((entry == vm_map_to_entry(map)) ||
+ (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) {
+ vm_map_entry_t tmp_entry;
+
+ /*
+ * Entry was either not a valid hint, or the vaddr
+ * was not contained in the entry, so do a full lookup.
+ */
+ if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
+ RETURN(KERN_INVALID_ADDRESS);
+
+ entry = tmp_entry;
+ }
+
+ /*
+ * Handle submaps.
+ */
+
+ if (entry->is_sub_map) {
+ vm_map_t old_map = map;
+
+ *var_map = map = entry->object.sub_map;
+ vm_map_unlock_read(old_map);
+ goto RetryLookup;
+ }
+
+ /*
+ * Check whether this task is allowed to have
+ * this page.
+ */
+
+ prot = entry->protection;
+
+ if ((fault_type & (prot)) != fault_type) {
+ if ((prot & VM_PROT_NOTIFY) && (fault_type & VM_PROT_WRITE)) {
+ RETURN(KERN_WRITE_PROTECTION_FAILURE);
+ } else {
+ RETURN(KERN_PROTECTION_FAILURE);
+ }
+ }
+
+ /*
+ * If this page is not pageable, we have to get
+ * it for all possible accesses.
+ */
+
+ if ((*wired = (entry->wired_count != 0)))
+ prot = fault_type = entry->protection;
+
+ /*
+ * If the entry was copy-on-write, we either ...
+ */
+
+ if (entry->needs_copy) {
+ /*
+ * If we want to write the page, we may as well
+ * handle that now since we've got the map locked.
+ *
+ * If we don't need to write the page, we just
+ * demote the permissions allowed.
+ */
+
+ if (fault_type & VM_PROT_WRITE) {
+ /*
+ * Make a new object, and place it in the
+ * object chain. Note that no new references
+ * have appeared -- one just moved from the
+ * map to the new object.
+ */
+
+ if (vm_map_lock_read_to_write(map)) {
+ goto RetryLookup;
+ }
+ map->timestamp++;
+
+ vm_object_shadow(
+ &entry->object.vm_object,
+ &entry->offset,
+ (vm_size_t) (entry->vme_end - entry->vme_start));
+
+ entry->needs_copy = FALSE;
+
+ vm_map_lock_write_to_read(map);
+ }
+ else {
+ /*
+ * We're attempting to read a copy-on-write
+ * page -- don't allow writes.
+ */
+
+ prot &= (~VM_PROT_WRITE);
+ }
+ }
+
+ /*
+ * Create an object if necessary.
+ */
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+
+ if (vm_map_lock_read_to_write(map)) {
+ goto RetryLookup;
+ }
+
+ entry->object.vm_object = vm_object_allocate(
+ (vm_size_t)(entry->vme_end - entry->vme_start));
+ entry->offset = 0;
+ vm_map_lock_write_to_read(map);
+ }
+
+ /*
+ * Return the object/offset from this entry. If the entry
+ * was copy-on-write or empty, it has been fixed up. Also
+ * return the protection.
+ */
+
+ *offset = (vaddr - entry->vme_start) + entry->offset;
+ *object = entry->object.vm_object;
+ *out_prot = prot;
+
+ /*
+ * Lock the object to prevent it from disappearing
+ */
+
+ vm_object_lock(*object);
+
+ /*
+ * Save the version number and unlock the map.
+ */
+
+ out_version->main_timestamp = map->timestamp;
+
+ RETURN(KERN_SUCCESS);
+
+#undef RETURN
+}
+
+/*
+ * vm_map_verify:
+ *
+ * Verifies that the map in question has not changed
+ * since the given version. If successful, the map
+ * will not change until vm_map_verify_done() is called.
+ */
+boolean_t vm_map_verify(
+ vm_map_t map,
+ vm_map_version_t *version) /* REF */
+{
+ boolean_t result;
+
+ vm_map_lock_read(map);
+ result = (map->timestamp == version->main_timestamp);
+
+ if (!result)
+ vm_map_unlock_read(map);
+
+ return(result);
+}
+
+/*
+ * vm_map_verify_done:
+ *
+ * Releases locks acquired by a vm_map_verify.
+ *
+ * This is now a macro in vm/vm_map.h. It does a
+ * vm_map_unlock_read on the map.
+ */
+
+/*
+ * vm_region:
+ *
+ * User call to obtain information about a region in
+ * a task's address map.
+ */
+
+kern_return_t vm_region(
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t *size, /* OUT */
+ vm_prot_t *protection, /* OUT */
+ vm_prot_t *max_protection, /* OUT */
+ vm_inherit_t *inheritance, /* OUT */
+ boolean_t *is_shared, /* OUT */
+ ipc_port_t *object_name, /* OUT */
+ vm_offset_t *offset_in_object) /* OUT */
+{
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+ vm_offset_t tmp_offset;
+ vm_offset_t start;
+
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ start = *address;
+
+ vm_map_lock_read(map);
+ if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ vm_map_unlock_read(map);
+ return(KERN_NO_SPACE);
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ start = entry->vme_start;
+ *protection = entry->protection;
+ *max_protection = entry->max_protection;
+ *inheritance = entry->inheritance;
+ *address = start;
+ *size = (entry->vme_end - start);
+
+ tmp_offset = entry->offset;
+
+
+ if (entry->is_sub_map) {
+ *is_shared = FALSE;
+ *object_name = IP_NULL;
+ *offset_in_object = tmp_offset;
+ } else {
+ *is_shared = entry->is_shared;
+ *object_name = vm_object_name(entry->object.vm_object);
+ *offset_in_object = tmp_offset;
+ }
+
+ vm_map_unlock_read(map);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * vm_region_create_proxy:
+ *
+ * Gets a proxy to the region that ADDRESS belongs to, starting at the
+ * region start, with MAX_PROTECTION and LEN limited by the region ones,
+ * and returns it in *PORT.
+ */
+kern_return_t
+vm_region_create_proxy (task_t task, vm_address_t address,
+ vm_prot_t max_protection, vm_size_t len,
+ ipc_port_t *port)
+{
+ kern_return_t ret;
+ vm_map_entry_t entry, tmp_entry;
+ vm_object_t object;
+ rpc_vm_offset_t rpc_offset, rpc_start;
+ rpc_vm_size_t rpc_len = (rpc_vm_size_t) len;
+ ipc_port_t pager;
+
+ if (task == TASK_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ vm_map_lock_read(task->map);
+ if (!vm_map_lookup_entry(task->map, address, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(task->map)) {
+ vm_map_unlock_read(task->map);
+ return(KERN_NO_SPACE);
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ if (entry->is_sub_map) {
+ vm_map_unlock_read(task->map);
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ /* Limit the allowed protection and range to the entry ones */
+ if (len > entry->vme_end - entry->vme_start) {
+ vm_map_unlock_read(task->map);
+ return(KERN_INVALID_ARGUMENT);
+ }
+ max_protection &= entry->max_protection;
+
+ object = entry->object.vm_object;
+ vm_object_lock(object);
+ /* Create a pager in case this is an internal object that does
+ not yet have one. */
+ vm_object_pager_create(object);
+ pager = ipc_port_copy_send(object->pager);
+ vm_object_unlock(object);
+
+ rpc_start = (address - entry->vme_start) + entry->offset;
+ rpc_offset = 0;
+
+ vm_map_unlock_read(task->map);
+
+ ret = memory_object_create_proxy(task->itk_space, max_protection,
+ &pager, 1,
+ &rpc_offset, 1,
+ &rpc_start, 1,
+ &rpc_len, 1, port);
+ if (ret)
+ ipc_port_release_send(pager);
+
+ return ret;
+}
+
+/*
+ * Routine: vm_map_coalesce_entry
+ * Purpose:
+ * Try to coalesce an entry with the preceeding entry in the map.
+ * Conditions:
+ * The map is locked. If coalesced, the entry is destroyed
+ * by the call.
+ * Returns:
+ * Whether the entry was coalesced.
+ */
+boolean_t
+vm_map_coalesce_entry(
+ vm_map_t map,
+ vm_map_entry_t entry)
+{
+ vm_map_entry_t prev = entry->vme_prev;
+ vm_size_t prev_size;
+ vm_size_t entry_size;
+
+ /*
+ * Check the basic conditions for coalescing the two entries.
+ */
+ if ((entry == vm_map_to_entry(map)) ||
+ (prev == vm_map_to_entry(map)) ||
+ (prev->vme_end != entry->vme_start) ||
+ (prev->is_shared || entry->is_shared) ||
+ (prev->is_sub_map || entry->is_sub_map) ||
+ (prev->inheritance != entry->inheritance) ||
+ (prev->protection != entry->protection) ||
+ (prev->max_protection != entry->max_protection) ||
+ (prev->needs_copy != entry->needs_copy) ||
+ (prev->in_transition || entry->in_transition) ||
+ (prev->wired_count != entry->wired_count) ||
+ (prev->projected_on != 0) ||
+ (entry->projected_on != 0))
+ return FALSE;
+
+ prev_size = prev->vme_end - prev->vme_start;
+ entry_size = entry->vme_end - entry->vme_start;
+ assert(prev->gap_size == 0);
+
+ /*
+ * See if we can coalesce the two objects.
+ */
+ if (!vm_object_coalesce(prev->object.vm_object,
+ entry->object.vm_object,
+ prev->offset,
+ entry->offset,
+ prev_size,
+ entry_size,
+ &prev->object.vm_object,
+ &prev->offset))
+ return FALSE;
+
+ /*
+ * Update the hints.
+ */
+ if (map->hint == entry)
+ SAVE_HINT(map, prev);
+ if (map->first_free == entry)
+ map->first_free = prev;
+
+ /*
+ * Get rid of the entry without changing any wirings or the pmap,
+ * and without altering map->size.
+ */
+ prev->vme_end = entry->vme_end;
+ vm_map_entry_unlink(map, entry);
+ vm_map_entry_dispose(map, entry);
+
+ return TRUE;
+}
+
+
+
+/*
+ * Routine: vm_map_machine_attribute
+ * Purpose:
+ * Provide machine-specific attributes to mappings,
+ * such as cachability etc. for machines that provide
+ * them. NUMA architectures and machines with big/strange
+ * caches will use this.
+ * Note:
+ * Responsibilities for locking and checking are handled here,
+ * everything else in the pmap module. If any non-volatile
+ * information must be kept, the pmap module should handle
+ * it itself. [This assumes that attributes do not
+ * need to be inherited, which seems ok to me]
+ */
+kern_return_t vm_map_machine_attribute(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
+{
+ kern_return_t ret;
+
+ if (address < vm_map_min(map) ||
+ (address + size) > vm_map_max(map))
+ return KERN_INVALID_ARGUMENT;
+
+ vm_map_lock(map);
+
+ ret = pmap_attribute(map->pmap, address, size, attribute, value);
+
+ vm_map_unlock(map);
+
+ return ret;
+}
+
+/*
+ * Routine: vm_map_msync
+ * Purpose:
+ * Synchronize out pages of the given map out to their memory
+ * manager, if any.
+ */
+kern_return_t vm_map_msync(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_size_t size,
+ vm_sync_t sync_flags)
+{
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if ((sync_flags & (VM_SYNC_ASYNCHRONOUS | VM_SYNC_SYNCHRONOUS)) ==
+ (VM_SYNC_ASYNCHRONOUS | VM_SYNC_SYNCHRONOUS))
+ return KERN_INVALID_ARGUMENT;
+
+ size = round_page(address + size) - trunc_page(address);
+ address = trunc_page(address);
+
+ if (size == 0)
+ return KERN_SUCCESS;
+
+ /* TODO */
+
+ return KERN_INVALID_ARGUMENT;
+}
+
+
+
+#if MACH_KDB
+
+#define printf kdbprintf
+
+/*
+ * vm_map_print: [ debug ]
+ */
+void vm_map_print(db_expr_t addr, boolean_t have_addr, db_expr_t count, const char *modif)
+{
+ vm_map_t map;
+ vm_map_entry_t entry;
+
+ if (!have_addr)
+ map = current_thread()->task->map;
+ else
+ map = (vm_map_t)addr;
+
+ iprintf("Map 0x%X: name=\"%s\", pmap=0x%X,",
+ (vm_offset_t) map, map->name, (vm_offset_t) (map->pmap));
+ printf("ref=%d,nentries=%d\n", map->ref_count, map->hdr.nentries);
+ printf("size=%lu,resident:%lu,wired=%lu\n", map->size,
+ pmap_resident_count(map->pmap) * PAGE_SIZE, map->size_wired);
+ printf("version=%d\n", map->timestamp);
+ indent += 1;
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = entry->vme_next) {
+ static char *inheritance_name[3] = { "share", "copy", "none"};
+
+ iprintf("map entry 0x%X: ", (vm_offset_t) entry);
+ printf("start=0x%X, end=0x%X\n",
+ (vm_offset_t) entry->vme_start, (vm_offset_t) entry->vme_end);
+ iprintf("prot=%X/%X/%s, ",
+ entry->protection,
+ entry->max_protection,
+ inheritance_name[entry->inheritance]);
+ if (entry->wired_count != 0) {
+ printf("wired, ");
+ }
+ if (entry->in_transition) {
+ printf("in transition");
+ if (entry->needs_wakeup)
+ printf("(wake request)");
+ printf(", ");
+ }
+ if (entry->is_sub_map) {
+ printf("submap=0x%X, offset=0x%X\n",
+ (vm_offset_t) entry->object.sub_map,
+ (vm_offset_t) entry->offset);
+ } else {
+ printf("object=0x%X, offset=0x%X",
+ (vm_offset_t) entry->object.vm_object,
+ (vm_offset_t) entry->offset);
+ if (entry->is_shared)
+ printf(", shared");
+ if (entry->needs_copy)
+ printf(", copy needed");
+ printf("\n");
+
+ if ((entry->vme_prev == vm_map_to_entry(map)) ||
+ (entry->vme_prev->object.vm_object != entry->object.vm_object)) {
+ indent += 1;
+ vm_object_print(entry->object.vm_object);
+ indent -= 1;
+ }
+ }
+ }
+ indent -= 1;
+}
+
+/*
+ * Routine: vm_map_copy_print
+ * Purpose:
+ * Pretty-print a copy object for ddb.
+ */
+
+void vm_map_copy_print(const vm_map_copy_t copy)
+{
+ int i, npages;
+
+ printf("copy object 0x%x\n", copy);
+
+ indent += 1;
+
+ iprintf("type=%d", copy->type);
+ switch (copy->type) {
+ case VM_MAP_COPY_ENTRY_LIST:
+ printf("[entry_list]");
+ break;
+
+ case VM_MAP_COPY_OBJECT:
+ printf("[object]");
+ break;
+
+ case VM_MAP_COPY_PAGE_LIST:
+ printf("[page_list]");
+ break;
+
+ default:
+ printf("[bad type]");
+ break;
+ }
+ printf(", offset=0x%x", copy->offset);
+ printf(", size=0x%x\n", copy->size);
+
+ switch (copy->type) {
+ case VM_MAP_COPY_ENTRY_LIST:
+ /* XXX add stuff here */
+ break;
+
+ case VM_MAP_COPY_OBJECT:
+ iprintf("object=0x%x\n", copy->cpy_object);
+ break;
+
+ case VM_MAP_COPY_PAGE_LIST:
+ iprintf("npages=%d", copy->cpy_npages);
+ printf(", cont=%x", copy->cpy_cont);
+ printf(", cont_args=%x\n", copy->cpy_cont_args);
+ if (copy->cpy_npages < 0) {
+ npages = 0;
+ } else if (copy->cpy_npages > VM_MAP_COPY_PAGE_LIST_MAX) {
+ npages = VM_MAP_COPY_PAGE_LIST_MAX;
+ } else {
+ npages = copy->cpy_npages;
+ }
+ iprintf("copy->cpy_page_list[0..%d] = {", npages);
+ for (i = 0; i < npages - 1; i++) {
+ printf("0x%x, ", copy->cpy_page_list[i]);
+ }
+ if (npages > 0) {
+ printf("0x%x", copy->cpy_page_list[npages - 1]);
+ }
+ printf("}\n");
+ break;
+ }
+
+ indent -= 1;
+}
+#endif /* MACH_KDB */
diff --git a/vm/vm_map.h b/vm/vm_map.h
new file mode 100644
index 0000000..a4949e4
--- /dev/null
+++ b/vm/vm_map.h
@@ -0,0 +1,585 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_map.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Virtual memory map module definitions.
+ *
+ * Contributors:
+ * avie, dlb, mwyoung
+ */
+
+#ifndef _VM_VM_MAP_H_
+#define _VM_VM_MAP_H_
+
+#include <mach/kern_return.h>
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_attributes.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_inherit.h>
+#include <mach/vm_wire.h>
+#include <mach/vm_sync.h>
+#include <vm/pmap.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_types.h>
+#include <kern/list.h>
+#include <kern/lock.h>
+#include <kern/rbtree.h>
+#include <kern/macros.h>
+
+/* TODO: make it dynamic */
+#define KENTRY_DATA_SIZE (256*PAGE_SIZE)
+
+/*
+ * Types defined:
+ *
+ * vm_map_entry_t an entry in an address map.
+ * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
+ * vm_map_copy_t represents memory copied from an address map,
+ * used for inter-map copy operations
+ */
+
+/*
+ * Type: vm_map_object_t [internal use only]
+ *
+ * Description:
+ * The target of an address mapping, either a virtual
+ * memory object or a sub map (of the kernel map).
+ */
+typedef union vm_map_object {
+ struct vm_object *vm_object; /* object object */
+ struct vm_map *sub_map; /* belongs to another map */
+} vm_map_object_t;
+
+/*
+ * Type: vm_map_entry_t [internal use only]
+ *
+ * Description:
+ * A single mapping within an address map.
+ *
+ * Implementation:
+ * Address map entries consist of start and end addresses,
+ * a VM object (or sub map) and offset into that object,
+ * and user-exported inheritance and protection information.
+ * Control information for virtual copy operations is also
+ * stored in the address map entry.
+ */
+struct vm_map_links {
+ struct vm_map_entry *prev; /* previous entry */
+ struct vm_map_entry *next; /* next entry */
+ vm_offset_t start; /* start address */
+ vm_offset_t end; /* end address */
+};
+
+struct vm_map_entry {
+ struct vm_map_links links; /* links to other entries */
+#define vme_prev links.prev
+#define vme_next links.next
+#define vme_start links.start
+#define vme_end links.end
+ struct rbtree_node tree_node; /* links to other entries in tree */
+ struct rbtree_node gap_node; /* links to other entries in gap tree */
+ struct list gap_list; /* links to other entries with
+ the same gap size */
+ vm_size_t gap_size; /* size of available memory
+ following this entry */
+ union vm_map_object object; /* object I point to */
+ vm_offset_t offset; /* offset into object */
+ unsigned int
+ /* boolean_t */ in_gap_tree:1, /* entry is in the gap tree if true,
+ or linked to other entries with
+ the same gap size if false */
+ /* boolean_t */ is_shared:1, /* region is shared */
+ /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
+ /* boolean_t */ in_transition:1, /* Entry being changed */
+ /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
+ /* Only used when object is a vm_object: */
+ /* boolean_t */ needs_copy:1; /* does object need to be copied */
+
+ /* Only in task maps: */
+ vm_prot_t protection; /* protection code */
+ vm_prot_t max_protection; /* maximum protection */
+ vm_inherit_t inheritance; /* inheritance */
+ unsigned short wired_count; /* can be paged if = 0 */
+ vm_prot_t wired_access; /* wiring access types, as accepted
+ by vm_map_pageable; used on wiring
+ scans when protection != VM_PROT_NONE */
+ struct vm_map_entry *projected_on; /* 0 for normal map entry
+ or persistent kernel map projected buffer entry;
+ -1 for non-persistent kernel map projected buffer entry;
+ pointer to corresponding kernel map entry for user map
+ projected buffer entry */
+};
+
+typedef struct vm_map_entry *vm_map_entry_t;
+
+#define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0)
+
+/*
+ * Type: struct vm_map_header
+ *
+ * Description:
+ * Header for a vm_map and a vm_map_copy.
+ */
+struct vm_map_header {
+ struct vm_map_links links; /* first, last, min, max */
+ struct rbtree tree; /* Sorted tree of entries */
+ struct rbtree gap_tree; /* Sorted tree of gap lists
+ for allocations */
+ int nentries; /* Number of entries */
+};
+
+/*
+ * Type: vm_map_t [exported; contents invisible]
+ *
+ * Description:
+ * An address map -- a directory relating valid
+ * regions of a task's address space to the corresponding
+ * virtual memory objects.
+ *
+ * Implementation:
+ * Maps are doubly-linked lists of map entries, sorted
+ * by address. They're also contained in a red-black tree.
+ * One hint is used to start searches again at the last
+ * successful search, insertion, or removal. If the hint
+ * lookup failed (i.e. the hint didn't refer to the requested
+ * entry), a BST lookup is performed. Another hint is used to
+ * quickly find free space.
+ */
+struct vm_map {
+ lock_data_t lock; /* Lock for map data */
+ struct vm_map_header hdr; /* Map entry header */
+#define min_offset hdr.links.start /* start of range */
+#define max_offset hdr.links.end /* end of range */
+ pmap_t pmap; /* Physical map */
+ vm_size_t size; /* virtual size */
+ vm_size_t size_wired; /* wired size */
+ int ref_count; /* Reference count */
+ decl_simple_lock_data(, ref_lock) /* Lock for ref_count field */
+ vm_map_entry_t hint; /* hint for quick lookups */
+ decl_simple_lock_data(, hint_lock) /* lock for hint storage */
+ vm_map_entry_t first_free; /* First free space hint */
+
+ /* Flags */
+ unsigned int wait_for_space:1, /* Should callers wait
+ for space? */
+ /* boolean_t */ wiring_required:1; /* New mappings are wired? */
+
+ unsigned int timestamp; /* Version number */
+
+ const char *name; /* Associated name */
+};
+
+#define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links)
+#define vm_map_first_entry(map) ((map)->hdr.links.next)
+#define vm_map_last_entry(map) ((map)->hdr.links.prev)
+
+/*
+ * Type: vm_map_version_t [exported; contents invisible]
+ *
+ * Description:
+ * Map versions may be used to quickly validate a previous
+ * lookup operation.
+ *
+ * Usage note:
+ * Because they are bulky objects, map versions are usually
+ * passed by reference.
+ *
+ * Implementation:
+ * Just a timestamp for the main map.
+ */
+typedef struct vm_map_version {
+ unsigned int main_timestamp;
+} vm_map_version_t;
+
+/*
+ * Type: vm_map_copy_t [exported; contents invisible]
+ *
+ * Description:
+ * A map copy object represents a region of virtual memory
+ * that has been copied from an address map but is still
+ * in transit.
+ *
+ * A map copy object may only be used by a single thread
+ * at a time.
+ *
+ * Implementation:
+ * There are three formats for map copy objects.
+ * The first is very similar to the main
+ * address map in structure, and as a result, some
+ * of the internal maintenance functions/macros can
+ * be used with either address maps or map copy objects.
+ *
+ * The map copy object contains a header links
+ * entry onto which the other entries that represent
+ * the region are chained.
+ *
+ * The second format is a single vm object. This is used
+ * primarily in the pageout path. The third format is a
+ * list of vm pages. An optional continuation provides
+ * a hook to be called to obtain more of the memory,
+ * or perform other operations. The continuation takes 3
+ * arguments, a saved arg buffer, a pointer to a new vm_map_copy
+ * (returned) and an abort flag (abort if TRUE).
+ */
+
+#define VM_MAP_COPY_PAGE_LIST_MAX 64
+
+struct vm_map_copy;
+struct vm_map_copyin_args_data;
+typedef kern_return_t (*vm_map_copy_cont_fn)(struct vm_map_copyin_args_data*, struct vm_map_copy**);
+
+typedef struct vm_map_copy {
+ int type;
+#define VM_MAP_COPY_ENTRY_LIST 1
+#define VM_MAP_COPY_OBJECT 2
+#define VM_MAP_COPY_PAGE_LIST 3
+ vm_offset_t offset;
+ vm_size_t size;
+ union {
+ struct vm_map_header hdr; /* ENTRY_LIST */
+ struct { /* OBJECT */
+ vm_object_t object;
+ } c_o;
+ struct { /* PAGE_LIST */
+ vm_page_t page_list[VM_MAP_COPY_PAGE_LIST_MAX];
+ int npages;
+ vm_map_copy_cont_fn cont;
+ struct vm_map_copyin_args_data* cont_args;
+ } c_p;
+ } c_u;
+} *vm_map_copy_t;
+
+#define cpy_hdr c_u.hdr
+
+#define cpy_object c_u.c_o.object
+
+#define cpy_page_list c_u.c_p.page_list
+#define cpy_npages c_u.c_p.npages
+#define cpy_cont c_u.c_p.cont
+#define cpy_cont_args c_u.c_p.cont_args
+
+#define VM_MAP_COPY_NULL ((vm_map_copy_t) 0)
+
+/*
+ * Useful macros for entry list copy objects
+ */
+
+#define vm_map_copy_to_entry(copy) \
+ ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
+#define vm_map_copy_first_entry(copy) \
+ ((copy)->cpy_hdr.links.next)
+#define vm_map_copy_last_entry(copy) \
+ ((copy)->cpy_hdr.links.prev)
+
+/*
+ * Continuation macros for page list copy objects
+ */
+
+#define vm_map_copy_invoke_cont(old_copy, new_copy, result) \
+MACRO_BEGIN \
+ vm_map_copy_page_discard(old_copy); \
+ *result = (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
+ new_copy); \
+ (old_copy)->cpy_cont = (kern_return_t (*)()) 0; \
+MACRO_END
+
+#define vm_map_copy_invoke_extend_cont(old_copy, new_copy, result) \
+MACRO_BEGIN \
+ *result = (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
+ new_copy); \
+ (old_copy)->cpy_cont = (kern_return_t (*)()) 0; \
+MACRO_END
+
+#define vm_map_copy_abort_cont(old_copy) \
+MACRO_BEGIN \
+ vm_map_copy_page_discard(old_copy); \
+ (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
+ (vm_map_copy_t *) 0); \
+ (old_copy)->cpy_cont = (kern_return_t (*)()) 0; \
+ (old_copy)->cpy_cont_args = VM_MAP_COPYIN_ARGS_NULL; \
+MACRO_END
+
+#define vm_map_copy_has_cont(copy) \
+ (((copy)->cpy_cont) != (kern_return_t (*)()) 0)
+
+/*
+ * Continuation structures for vm_map_copyin_page_list.
+ */
+
+typedef struct vm_map_copyin_args_data {
+ vm_map_t map;
+ vm_offset_t src_addr;
+ vm_size_t src_len;
+ vm_offset_t destroy_addr;
+ vm_size_t destroy_len;
+ boolean_t steal_pages;
+} vm_map_copyin_args_data_t, *vm_map_copyin_args_t;
+
+#define VM_MAP_COPYIN_ARGS_NULL ((vm_map_copyin_args_t) 0)
+
+/*
+ * Macros: vm_map_lock, etc. [internal use only]
+ * Description:
+ * Perform locking on the data portion of a map.
+ */
+
+#define vm_map_lock_init(map) \
+MACRO_BEGIN \
+ lock_init(&(map)->lock, TRUE); \
+ (map)->timestamp = 0; \
+MACRO_END
+
+void vm_map_lock(struct vm_map *map);
+void vm_map_unlock(struct vm_map *map);
+
+#define vm_map_lock_read(map) lock_read(&(map)->lock)
+#define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
+#define vm_map_lock_write_to_read(map) \
+ lock_write_to_read(&(map)->lock)
+#define vm_map_lock_read_to_write(map) \
+ (lock_read_to_write(&(map)->lock) || (((map)->timestamp++), 0))
+#define vm_map_lock_set_recursive(map) \
+ lock_set_recursive(&(map)->lock)
+#define vm_map_lock_clear_recursive(map) \
+ lock_clear_recursive(&(map)->lock)
+
+/*
+ * Exported procedures that operate on vm_map_t.
+ */
+
+/* Initialize the module */
+extern void vm_map_init(void);
+
+/* Initialize an empty map */
+extern void vm_map_setup(vm_map_t, pmap_t, vm_offset_t, vm_offset_t);
+/* Create an empty map */
+extern vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t);
+/* Create a map in the image of an existing map */
+extern vm_map_t vm_map_fork(vm_map_t);
+
+/* Gain a reference to an existing map */
+extern void vm_map_reference(vm_map_t);
+/* Lose a reference */
+extern void vm_map_deallocate(vm_map_t);
+
+/* Enter a mapping */
+extern kern_return_t vm_map_enter(vm_map_t, vm_offset_t *, vm_size_t,
+ vm_offset_t, boolean_t, vm_object_t,
+ vm_offset_t, boolean_t, vm_prot_t,
+ vm_prot_t, vm_inherit_t);
+/* Enter a mapping primitive */
+extern kern_return_t vm_map_find_entry(vm_map_t, vm_offset_t *, vm_size_t,
+ vm_offset_t, vm_object_t,
+ vm_map_entry_t *);
+/* Deallocate a region */
+extern kern_return_t vm_map_remove(vm_map_t, vm_offset_t, vm_offset_t);
+/* Change protection */
+extern kern_return_t vm_map_protect(vm_map_t, vm_offset_t, vm_offset_t,
+ vm_prot_t, boolean_t);
+/* Change inheritance */
+extern kern_return_t vm_map_inherit(vm_map_t, vm_offset_t, vm_offset_t,
+ vm_inherit_t);
+
+/* Look up an address */
+extern kern_return_t vm_map_lookup(vm_map_t *, vm_offset_t, vm_prot_t,
+ vm_map_version_t *, vm_object_t *,
+ vm_offset_t *, vm_prot_t *, boolean_t *);
+/* Find a map entry */
+extern boolean_t vm_map_lookup_entry(vm_map_t, vm_offset_t,
+ vm_map_entry_t *);
+/* Verify that a previous lookup is still valid */
+extern boolean_t vm_map_verify(vm_map_t, vm_map_version_t *);
+/* vm_map_verify_done is now a macro -- see below */
+/* Make a copy of a region */
+extern kern_return_t vm_map_copyin(vm_map_t, vm_offset_t, vm_size_t,
+ boolean_t, vm_map_copy_t *);
+/* Make a copy of a region using a page list copy */
+extern kern_return_t vm_map_copyin_page_list(vm_map_t, vm_offset_t,
+ vm_size_t, boolean_t,
+ boolean_t, vm_map_copy_t *,
+ boolean_t);
+/* Place a copy into a map */
+extern kern_return_t vm_map_copyout(vm_map_t, vm_offset_t *, vm_map_copy_t);
+/* Overwrite existing memory with a copy */
+extern kern_return_t vm_map_copy_overwrite(vm_map_t, vm_offset_t,
+ vm_map_copy_t, boolean_t);
+/* Discard a copy without using it */
+extern void vm_map_copy_discard(vm_map_copy_t);
+extern void vm_map_copy_page_discard(vm_map_copy_t);
+extern vm_map_copy_t vm_map_copy_copy(vm_map_copy_t);
+/* Page list continuation version of previous */
+extern kern_return_t vm_map_copy_discard_cont(vm_map_copyin_args_t,
+ vm_map_copy_t *);
+
+extern boolean_t vm_map_coalesce_entry(vm_map_t, vm_map_entry_t);
+
+/* Add or remove machine- dependent attributes from map regions */
+extern kern_return_t vm_map_machine_attribute(vm_map_t, vm_offset_t,
+ vm_size_t,
+ vm_machine_attribute_t,
+ vm_machine_attribute_val_t *);
+
+extern kern_return_t vm_map_msync(vm_map_t,
+ vm_offset_t, vm_size_t, vm_sync_t);
+
+/* Delete entry from map */
+extern void vm_map_entry_delete(vm_map_t, vm_map_entry_t);
+
+kern_return_t vm_map_delete(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end);
+
+kern_return_t vm_map_copyout_page_list(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy);
+
+void vm_map_copy_page_discard (vm_map_copy_t copy);
+
+boolean_t vm_map_lookup_entry(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_map_entry_t *entry); /* OUT */
+
+static inline void vm_map_set_name(vm_map_t map, const char *name)
+{
+ map->name = name;
+}
+
+
+/*
+ * Functions implemented as macros
+ */
+#define vm_map_min(map) ((map)->min_offset)
+ /* Lowest valid address in
+ * a map */
+
+#define vm_map_max(map) ((map)->max_offset)
+ /* Highest valid address */
+
+#define vm_map_pmap(map) ((map)->pmap)
+ /* Physical map associated
+ * with this address map */
+
+#define vm_map_verify_done(map, version) (vm_map_unlock_read(map))
+ /* Operation that required
+ * a verified lookup is
+ * now complete */
+/*
+ * Pageability functions.
+ */
+extern kern_return_t vm_map_pageable(vm_map_t, vm_offset_t, vm_offset_t,
+ vm_prot_t, boolean_t, boolean_t);
+
+extern kern_return_t vm_map_pageable_all(vm_map_t, vm_wire_t);
+
+/*
+ * Submap object. Must be used to create memory to be put
+ * in a submap by vm_map_submap.
+ */
+extern vm_object_t vm_submap_object;
+
+/*
+ * vm_map_copyin_object:
+ *
+ * Create a copy object from an object.
+ * Our caller donates an object reference.
+ */
+extern kern_return_t vm_map_copyin_object(
+ vm_object_t object,
+ vm_offset_t offset, /* offset of region in object */
+ vm_size_t size, /* size of region in object */
+ vm_map_copy_t *copy_result); /* OUT */
+
+/*
+ * vm_map_submap: [ kernel use only ]
+ *
+ * Mark the given range as handled by a subordinate map.
+ *
+ * This range must have been created with vm_map_find using
+ * the vm_submap_object, and no other operations may have been
+ * performed on this range prior to calling vm_map_submap.
+ *
+ * Only a limited number of operations can be performed
+ * within this rage after calling vm_map_submap:
+ * vm_fault
+ * [Don't try vm_map_copyin!]
+ *
+ * To remove a submapping, one must first remove the
+ * range from the superior map, and then destroy the
+ * submap (if desired). [Better yet, don't try it.]
+ */
+extern kern_return_t vm_map_submap(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_map_t submap);
+
+/*
+ * Wait and wakeup macros for in_transition map entries.
+ */
+#define vm_map_entry_wait(map, interruptible) \
+ MACRO_BEGIN \
+ assert_wait((event_t)&(map)->hdr, interruptible); \
+ vm_map_unlock(map); \
+ thread_block((void (*)()) 0); \
+ MACRO_END
+
+#define vm_map_entry_wakeup(map) thread_wakeup((event_t)&(map)->hdr)
+
+/*
+ * This routine is called only when it is known that
+ * the entry must be split.
+ */
+extern void _vm_map_clip_start(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t start,
+ boolean_t link_gap);
+
+/*
+ * vm_map_clip_end: [ internal use only ]
+ *
+ * Asserts that the given entry ends at or before
+ * the specified address; if necessary,
+ * it splits the entry into two.
+ */
+void _vm_map_clip_end(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t end,
+ boolean_t link_gap);
+
+#endif /* _VM_VM_MAP_H_ */
diff --git a/vm/vm_object.c b/vm/vm_object.c
new file mode 100644
index 0000000..c238cce
--- /dev/null
+++ b/vm/vm_object.c
@@ -0,0 +1,2994 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_object.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Virtual memory object module.
+ */
+
+#include <kern/printf.h>
+#include <string.h>
+
+#include <mach/memory_object.h>
+#include <vm/memory_object_default.user.h>
+#include <vm/memory_object_user.user.h>
+#include <machine/vm_param.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <kern/assert.h>
+#include <kern/debug.h>
+#include <kern/mach.server.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/xpr.h>
+#include <kern/slab.h>
+#include <vm/memory_object.h>
+#include <vm/vm_fault.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#endif /* MACH_KDB */
+
+void memory_object_release(
+ ipc_port_t pager,
+ pager_request_t pager_request,
+ ipc_port_t pager_name); /* forward */
+
+/*
+ * Virtual memory objects maintain the actual data
+ * associated with allocated virtual memory. A given
+ * page of memory exists within exactly one object.
+ *
+ * An object is only deallocated when all "references"
+ * are given up. Only one "reference" to a given
+ * region of an object should be writeable.
+ *
+ * Associated with each object is a list of all resident
+ * memory pages belonging to that object; this list is
+ * maintained by the "vm_page" module, but locked by the object's
+ * lock.
+ *
+ * Each object also records the memory object port
+ * that is used by the kernel to request and write
+ * back data (the memory object port, field "pager"),
+ * and the ports provided to the memory manager, the server that
+ * manages that data, to return data and control its
+ * use (the memory object control port, field "pager_request")
+ * and for naming (the memory object name port, field "pager_name").
+ *
+ * Virtual memory objects are allocated to provide
+ * zero-filled memory (vm_allocate) or map a user-defined
+ * memory object into a virtual address space (vm_map).
+ *
+ * Virtual memory objects that refer to a user-defined
+ * memory object are called "permanent", because all changes
+ * made in virtual memory are reflected back to the
+ * memory manager, which may then store it permanently.
+ * Other virtual memory objects are called "temporary",
+ * meaning that changes need be written back only when
+ * necessary to reclaim pages, and that storage associated
+ * with the object can be discarded once it is no longer
+ * mapped.
+ *
+ * A permanent memory object may be mapped into more
+ * than one virtual address space. Moreover, two threads
+ * may attempt to make the first mapping of a memory
+ * object concurrently. Only one thread is allowed to
+ * complete this mapping; all others wait for the
+ * "pager_initialized" field is asserted, indicating
+ * that the first thread has initialized all of the
+ * necessary fields in the virtual memory object structure.
+ *
+ * The kernel relies on a *default memory manager* to
+ * provide backing storage for the zero-filled virtual
+ * memory objects. The memory object ports associated
+ * with these temporary virtual memory objects are only
+ * generated and passed to the default memory manager
+ * when it becomes necessary. Virtual memory objects
+ * that depend on the default memory manager are called
+ * "internal". The "pager_created" field is provided to
+ * indicate whether these ports have ever been allocated.
+ *
+ * The kernel may also create virtual memory objects to
+ * hold changed pages after a copy-on-write operation.
+ * In this case, the virtual memory object (and its
+ * backing storage -- its memory object) only contain
+ * those pages that have been changed. The "shadow"
+ * field refers to the virtual memory object that contains
+ * the remainder of the contents. The "shadow_offset"
+ * field indicates where in the "shadow" these contents begin.
+ * The "copy" field refers to a virtual memory object
+ * to which changed pages must be copied before changing
+ * this object, in order to implement another form
+ * of copy-on-write optimization.
+ *
+ * The virtual memory object structure also records
+ * the attributes associated with its memory object.
+ * The "pager_ready", "can_persist" and "copy_strategy"
+ * fields represent those attributes. The "cached_list"
+ * field is used in the implementation of the persistence
+ * attribute.
+ *
+ * ZZZ Continue this comment.
+ */
+
+struct kmem_cache vm_object_cache; /* vm backing store cache */
+
+/*
+ * All wired-down kernel memory belongs to a single virtual
+ * memory object (kernel_object) to avoid wasting data structures.
+ */
+static struct vm_object kernel_object_store;
+vm_object_t kernel_object = &kernel_object_store;
+
+/*
+ * Virtual memory objects that are not referenced by
+ * any address maps, but that are allowed to persist
+ * (an attribute specified by the associated memory manager),
+ * are kept in a queue (vm_object_cached_list).
+ *
+ * When an object from this queue is referenced again,
+ * for example to make another address space mapping,
+ * it must be removed from the queue. That is, the
+ * queue contains *only* objects with zero references.
+ *
+ * The kernel may choose to terminate objects from this
+ * queue in order to reclaim storage. The current policy
+ * is to let memory pressure dynamically adjust the number
+ * of unreferenced objects. The pageout daemon attempts to
+ * collect objects after removing pages from them.
+ *
+ * A simple lock (accessed by routines
+ * vm_object_cache_{lock,lock_try,unlock}) governs the
+ * object cache. It must be held when objects are
+ * added to or removed from the cache (in vm_object_terminate).
+ * The routines that acquire a reference to a virtual
+ * memory object based on one of the memory object ports
+ * must also lock the cache.
+ *
+ * Ideally, the object cache should be more isolated
+ * from the reference mechanism, so that the lock need
+ * not be held to make simple references.
+ */
+queue_head_t vm_object_cached_list;
+
+def_simple_lock_data(static,vm_object_cached_lock_data)
+
+#define vm_object_cache_lock() \
+ simple_lock(&vm_object_cached_lock_data)
+#define vm_object_cache_lock_try() \
+ simple_lock_try(&vm_object_cached_lock_data)
+#define vm_object_cache_unlock() \
+ simple_unlock(&vm_object_cached_lock_data)
+
+/*
+ * Number of physical pages referenced by cached objects.
+ * This counter is protected by its own lock to work around
+ * lock ordering issues.
+ */
+int vm_object_cached_pages;
+
+def_simple_lock_data(static,vm_object_cached_pages_lock_data)
+
+/*
+ * Virtual memory objects are initialized from
+ * a template (see vm_object_allocate).
+ *
+ * When adding a new field to the virtual memory
+ * object structure, be sure to add initialization
+ * (see vm_object_init).
+ */
+struct vm_object vm_object_template;
+
+/*
+ * vm_object_allocate:
+ *
+ * Returns a new object with the given size.
+ */
+
+static void _vm_object_setup(
+ vm_object_t object,
+ vm_size_t size)
+{
+ *object = vm_object_template;
+ queue_init(&object->memq);
+ vm_object_lock_init(object);
+ object->size = size;
+}
+
+static vm_object_t _vm_object_allocate(
+ vm_size_t size)
+{
+ vm_object_t object;
+
+ object = (vm_object_t) kmem_cache_alloc(&vm_object_cache);
+ if (!object)
+ return 0;
+
+ _vm_object_setup(object, size);
+
+ return object;
+}
+
+vm_object_t vm_object_allocate(
+ vm_size_t size)
+{
+ vm_object_t object;
+ ipc_port_t port;
+
+ object = _vm_object_allocate(size);
+ if (object == 0)
+ panic("vm_object_allocate");
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("vm_object_allocate");
+ object->pager_name = port;
+ ipc_kobject_set(port, (ipc_kobject_t) object, IKOT_PAGING_NAME);
+
+ return object;
+}
+
+/*
+ * vm_object_bootstrap:
+ *
+ * Initialize the VM objects module.
+ */
+void vm_object_bootstrap(void)
+{
+ kmem_cache_init(&vm_object_cache, "vm_object",
+ sizeof(struct vm_object), 0, NULL, 0);
+
+ queue_init(&vm_object_cached_list);
+ simple_lock_init(&vm_object_cached_lock_data);
+
+ /*
+ * Fill in a template object, for quick initialization
+ */
+
+ vm_object_template.ref_count = 1;
+ vm_object_template.size = 0;
+ vm_object_template.resident_page_count = 0;
+ vm_object_template.copy = VM_OBJECT_NULL;
+ vm_object_template.shadow = VM_OBJECT_NULL;
+ vm_object_template.shadow_offset = (vm_offset_t) 0;
+
+ vm_object_template.pager = IP_NULL;
+ vm_object_template.paging_offset = 0;
+ vm_object_template.pager_request = PAGER_REQUEST_NULL;
+ vm_object_template.pager_name = IP_NULL;
+
+ vm_object_template.pager_created = FALSE;
+ vm_object_template.pager_initialized = FALSE;
+ vm_object_template.pager_ready = FALSE;
+
+ vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ /* ignored if temporary, will be reset before
+ * permanent object becomes ready */
+ vm_object_template.use_shared_copy = FALSE;
+ vm_object_template.shadowed = FALSE;
+
+ vm_object_template.absent_count = 0;
+ vm_object_template.all_wanted = 0; /* all bits FALSE */
+
+ vm_object_template.paging_in_progress = 0;
+ vm_object_template.used_for_pageout = FALSE;
+ vm_object_template.can_persist = FALSE;
+ vm_object_template.cached = FALSE;
+ vm_object_template.internal = TRUE;
+ vm_object_template.temporary = TRUE;
+ vm_object_template.alive = TRUE;
+ vm_object_template.lock_in_progress = FALSE;
+ vm_object_template.lock_restart = FALSE;
+ vm_object_template.last_alloc = (vm_offset_t) 0;
+
+#if MACH_PAGEMAP
+ vm_object_template.existence_info = VM_EXTERNAL_NULL;
+#endif /* MACH_PAGEMAP */
+
+ /*
+ * Initialize the "kernel object"
+ */
+
+ _vm_object_setup(kernel_object,
+ VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS);
+
+ /*
+ * Initialize the "submap object". Make it as large as the
+ * kernel object so that no limit is imposed on submap sizes.
+ */
+
+ _vm_object_setup(vm_submap_object,
+ VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS);
+
+#if MACH_PAGEMAP
+ vm_external_module_initialize();
+#endif /* MACH_PAGEMAP */
+}
+
+void vm_object_init(void)
+{
+ /*
+ * Finish initializing the kernel object.
+ * The submap object doesn't need a name port.
+ */
+
+ kernel_object->pager_name = ipc_port_alloc_kernel();
+ ipc_kobject_set(kernel_object->pager_name,
+ (ipc_kobject_t) kernel_object,
+ IKOT_PAGING_NAME);
+}
+
+/*
+ * Object cache management functions.
+ *
+ * Both the cache and the object must be locked
+ * before calling these functions.
+ */
+
+static void vm_object_cache_add(
+ vm_object_t object)
+{
+ assert(!object->cached);
+ queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
+ object->cached = TRUE;
+}
+
+static void vm_object_cache_remove(
+ vm_object_t object)
+{
+ assert(object->cached);
+ queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
+ object->cached = FALSE;
+}
+
+void vm_object_collect(
+ vm_object_t object)
+{
+ vm_object_unlock(object);
+
+ /*
+ * The cache lock must be acquired in the proper order.
+ */
+
+ vm_object_cache_lock();
+ vm_object_lock(object);
+
+ /*
+ * If the object was referenced while the lock was
+ * dropped, cancel the termination.
+ */
+
+ if (!vm_object_collectable(object)) {
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ return;
+ }
+
+ vm_object_cache_remove(object);
+ vm_object_terminate(object);
+}
+
+/*
+ * vm_object_reference:
+ *
+ * Gets another reference to the given object.
+ */
+void vm_object_reference(
+ vm_object_t object)
+{
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ vm_object_lock(object);
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ vm_object_unlock(object);
+}
+
+/*
+ * vm_object_deallocate:
+ *
+ * Release a reference to the specified object,
+ * gained either through a vm_object_allocate
+ * or a vm_object_reference call. When all references
+ * are gone, storage associated with this object
+ * may be relinquished.
+ *
+ * No object may be locked.
+ */
+void vm_object_deallocate(
+ vm_object_t object)
+{
+ vm_object_t temp;
+
+ while (object != VM_OBJECT_NULL) {
+
+ /*
+ * The cache holds a reference (uncounted) to
+ * the object; we must lock it before removing
+ * the object.
+ */
+
+ vm_object_cache_lock();
+
+ /*
+ * Lose the reference
+ */
+ vm_object_lock(object);
+ if (--(object->ref_count) > 0) {
+
+ /*
+ * If there are still references, then
+ * we are done.
+ */
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ return;
+ }
+
+ /*
+ * See whether this object can persist. If so, enter
+ * it in the cache.
+ */
+ if (object->can_persist && (object->resident_page_count > 0)) {
+ vm_object_cache_add(object);
+ vm_object_cache_unlock();
+ vm_object_unlock(object);
+ return;
+ }
+
+ if (object->pager_created &&
+ !object->pager_initialized) {
+
+ /*
+ * Have to wait for initialization.
+ * Put reference back and retry
+ * when it's initialized.
+ */
+
+ object->ref_count++;
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_INITIALIZED, FALSE);
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ thread_block((void (*)()) 0);
+ continue;
+ }
+
+ /*
+ * Take the reference to the shadow object
+ * out of the object to be destroyed.
+ */
+
+ temp = object->shadow;
+
+ /*
+ * Destroy the object; the cache lock will
+ * be released in the process.
+ */
+
+ vm_object_terminate(object);
+
+ /*
+ * Deallocate the reference to the shadow
+ * by continuing the loop with that object
+ * in place of the original.
+ */
+
+ object = temp;
+ }
+}
+
+/*
+ * Routine: vm_object_terminate
+ * Purpose:
+ * Free all resources associated with a vm_object.
+ * In/out conditions:
+ * Upon entry, the object and the cache must be locked,
+ * and the object must have no references.
+ *
+ * The shadow object reference is left alone.
+ *
+ * Upon exit, the cache will be unlocked, and the
+ * object will cease to exist.
+ */
+void vm_object_terminate(
+ vm_object_t object)
+{
+ vm_page_t p;
+ vm_object_t shadow_object;
+
+ /*
+ * Make sure the object isn't already being terminated
+ */
+
+ assert(object->alive);
+ object->alive = FALSE;
+
+ /*
+ * Make sure no one can look us up now.
+ */
+
+ vm_object_remove(object);
+ vm_object_cache_unlock();
+
+ /*
+ * Detach the object from its shadow if we are the shadow's
+ * copy.
+ */
+ if ((shadow_object = object->shadow) != VM_OBJECT_NULL) {
+ vm_object_lock(shadow_object);
+ assert((shadow_object->copy == object) ||
+ (shadow_object->copy == VM_OBJECT_NULL));
+ shadow_object->copy = VM_OBJECT_NULL;
+ vm_object_unlock(shadow_object);
+ }
+
+ /*
+ * The pageout daemon might be playing with our pages.
+ * Now that the object is dead, it won't touch any more
+ * pages, but some pages might already be on their way out.
+ * Hence, we wait until the active paging activities have ceased.
+ */
+
+ vm_object_paging_wait(object, FALSE);
+
+ /*
+ * Clean or free the pages, as appropriate.
+ * It is possible for us to find busy/absent pages,
+ * if some faults on this object were aborted.
+ */
+
+ if ((object->temporary) || (object->pager == IP_NULL)) {
+ while (!queue_empty(&object->memq)) {
+ p = (vm_page_t) queue_first(&object->memq);
+
+ VM_PAGE_CHECK(p);
+
+ VM_PAGE_FREE(p);
+ }
+ } else while (!queue_empty(&object->memq)) {
+ p = (vm_page_t) queue_first(&object->memq);
+
+ VM_PAGE_CHECK(p);
+
+ vm_page_lock_queues();
+ VM_PAGE_QUEUES_REMOVE(p);
+ vm_page_unlock_queues();
+
+ if (p->absent || p->private) {
+
+ /*
+ * For private pages, VM_PAGE_FREE just
+ * leaves the page structure around for
+ * its owner to clean up. For absent
+ * pages, the structure is returned to
+ * the appropriate pool.
+ */
+
+ goto free_page;
+ }
+
+ if (!p->dirty)
+ p->dirty = pmap_is_modified(p->phys_addr);
+
+ if (p->dirty || p->precious) {
+ p->busy = TRUE;
+ vm_pageout_page(p, FALSE, TRUE); /* flush page */
+ } else {
+ free_page:
+ VM_PAGE_FREE(p);
+ }
+ }
+
+ assert(object->ref_count == 0);
+ assert(object->paging_in_progress == 0);
+ assert(!object->cached);
+
+ if (!object->internal) {
+ assert(object->resident_page_count == 0);
+
+ vm_page_lock_queues();
+ vm_object_external_count--;
+ vm_page_unlock_queues();
+ }
+
+ /*
+ * Throw away port rights... note that they may
+ * already have been thrown away (by vm_object_destroy
+ * or memory_object_destroy).
+ *
+ * Instead of destroying the control and name ports,
+ * we send all rights off to the memory manager instead,
+ * using memory_object_terminate.
+ */
+
+ vm_object_unlock(object);
+
+ if (object->pager != IP_NULL) {
+ /* consumes our rights for pager, pager_request, pager_name */
+ memory_object_release(object->pager,
+ object->pager_request,
+ object->pager_name);
+ } else if (object->pager_name != IP_NULL) {
+ /* consumes our right for pager_name */
+ ipc_port_dealloc_kernel(object->pager_name);
+ }
+
+#if MACH_PAGEMAP
+ vm_external_destroy(object->existence_info);
+#endif /* MACH_PAGEMAP */
+
+ /*
+ * Free the space for the object.
+ */
+
+ kmem_cache_free(&vm_object_cache, (vm_offset_t) object);
+}
+
+/*
+ * Routine: vm_object_pager_wakeup
+ * Purpose: Wake up anyone waiting for IKOT_PAGER_TERMINATING
+ */
+
+void
+vm_object_pager_wakeup(
+ ipc_port_t pager)
+{
+ boolean_t someone_waiting;
+
+ /*
+ * If anyone was waiting for the memory_object_terminate
+ * to be queued, wake them up now.
+ */
+ vm_object_cache_lock();
+ assert(ip_kotype(pager) == IKOT_PAGER_TERMINATING);
+ someone_waiting = (pager->ip_kobject != IKO_NULL);
+ if (ip_active(pager))
+ ipc_kobject_set(pager, IKO_NULL, IKOT_NONE);
+ vm_object_cache_unlock();
+ if (someone_waiting) {
+ thread_wakeup((event_t) pager);
+ }
+}
+
+/*
+ * Routine: memory_object_release
+ * Purpose: Terminate the pager and release port rights,
+ * just like memory_object_terminate, except
+ * that we wake up anyone blocked in vm_object_enter
+ * waiting for termination message to be queued
+ * before calling memory_object_init.
+ */
+void memory_object_release(
+ ipc_port_t pager,
+ pager_request_t pager_request,
+ ipc_port_t pager_name)
+{
+
+ /*
+ * Keep a reference to pager port;
+ * the terminate might otherwise release all references.
+ */
+ ip_reference(pager);
+
+ /*
+ * Terminate the pager.
+ */
+ (void) memory_object_terminate(pager, pager_request, pager_name);
+
+ /*
+ * Wakeup anyone waiting for this terminate
+ */
+ vm_object_pager_wakeup(pager);
+
+ /*
+ * Release reference to pager port.
+ */
+ ip_release(pager);
+}
+
+/*
+ * Routine: vm_object_abort_activity [internal use only]
+ * Purpose:
+ * Abort paging requests pending on this object.
+ * In/out conditions:
+ * The object is locked on entry and exit.
+ */
+static void vm_object_abort_activity(
+ vm_object_t object)
+{
+ vm_page_t p;
+ vm_page_t next;
+
+ /*
+ * Abort all activity that would be waiting
+ * for a result on this memory object.
+ *
+ * We could also choose to destroy all pages
+ * that we have in memory for this object, but
+ * we don't.
+ */
+
+ p = (vm_page_t) queue_first(&object->memq);
+ while (!queue_end(&object->memq, (queue_entry_t) p)) {
+ next = (vm_page_t) queue_next(&p->listq);
+
+ /*
+ * If it's being paged in, destroy it.
+ * If an unlock has been requested, start it again.
+ */
+
+ if (p->busy && p->absent) {
+ VM_PAGE_FREE(p);
+ }
+ else {
+ if (p->unlock_request != VM_PROT_NONE)
+ p->unlock_request = VM_PROT_NONE;
+ PAGE_WAKEUP(p);
+ }
+
+ p = next;
+ }
+
+ /*
+ * Wake up threads waiting for the memory object to
+ * become ready.
+ */
+
+ object->pager_ready = TRUE;
+ vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+}
+
+/*
+ * Routine: memory_object_destroy [user interface]
+ * Purpose:
+ * Shut down a memory object, despite the
+ * presence of address map (or other) references
+ * to the vm_object.
+ * Note:
+ * This routine may be called either from the user interface,
+ * or from port destruction handling (via vm_object_destroy).
+ */
+kern_return_t memory_object_destroy(
+ vm_object_t object,
+ kern_return_t reason)
+{
+ ipc_port_t old_object, old_name;
+ pager_request_t old_control;
+
+ if (object == VM_OBJECT_NULL)
+ return KERN_SUCCESS;
+
+ /*
+ * Remove the port associations immediately.
+ *
+ * This will prevent the memory manager from further
+ * meddling. [If it wanted to flush data or make
+ * other changes, it should have done so before performing
+ * the destroy call.]
+ */
+
+ vm_object_cache_lock();
+ vm_object_lock(object);
+ vm_object_remove(object);
+ object->can_persist = FALSE;
+ vm_object_cache_unlock();
+
+ /*
+ * Rip out the ports from the vm_object now... this
+ * will prevent new memory_object calls from succeeding.
+ */
+
+ old_object = object->pager;
+ object->pager = IP_NULL;
+
+ old_control = object->pager_request;
+ object->pager_request = PAGER_REQUEST_NULL;
+
+ old_name = object->pager_name;
+ object->pager_name = IP_NULL;
+
+
+ /*
+ * Wait for existing paging activity (that might
+ * have the old ports) to subside.
+ */
+
+ vm_object_paging_wait(object, FALSE);
+ vm_object_unlock(object);
+
+ /*
+ * Shut down the ports now.
+ *
+ * [Paging operations may be proceeding concurrently --
+ * they'll get the null values established above.]
+ */
+
+ if (old_object != IP_NULL) {
+ /* consumes our rights for object, control, name */
+ memory_object_release(old_object, old_control,
+ old_name);
+ } else if (old_name != IP_NULL) {
+ /* consumes our right for name */
+ ipc_port_dealloc_kernel(object->pager_name);
+ }
+
+ /*
+ * Lose the reference that was donated for this routine
+ */
+
+ vm_object_deallocate(object);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: vm_object_pmap_protect
+ *
+ * Purpose:
+ * Reduces the permission for all physical
+ * pages in the specified object range.
+ *
+ * If removing write permission only, it is
+ * sufficient to protect only the pages in
+ * the top-level object; only those pages may
+ * have write permission.
+ *
+ * If removing all access, we must follow the
+ * shadow chain from the top-level object to
+ * remove access to all pages in shadowed objects.
+ *
+ * The object must *not* be locked. The object must
+ * be temporary/internal.
+ *
+ * If pmap is not NULL, this routine assumes that
+ * the only mappings for the pages are in that
+ * pmap.
+ */
+boolean_t vm_object_pmap_protect_by_page = FALSE;
+
+void vm_object_pmap_protect(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ pmap_t pmap,
+ vm_offset_t pmap_start,
+ vm_prot_t prot)
+{
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ vm_object_lock(object);
+
+ assert(object->temporary && object->internal);
+
+ while (TRUE) {
+ if (object->resident_page_count > atop(size) / 2 &&
+ pmap != PMAP_NULL) {
+ vm_object_unlock(object);
+ pmap_protect(pmap, pmap_start, pmap_start + size, prot);
+ return;
+ }
+
+ {
+ vm_page_t p;
+ vm_offset_t end;
+
+ end = offset + size;
+
+ queue_iterate(&object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious &&
+ (offset <= p->offset) &&
+ (p->offset < end)) {
+ if ((pmap == PMAP_NULL) ||
+ vm_object_pmap_protect_by_page) {
+ pmap_page_protect(p->phys_addr,
+ prot & ~p->page_lock);
+ } else {
+ vm_offset_t start =
+ pmap_start +
+ (p->offset - offset);
+
+ pmap_protect(pmap,
+ start,
+ start + PAGE_SIZE,
+ prot);
+ }
+ }
+ }
+ }
+
+ if (prot == VM_PROT_NONE) {
+ /*
+ * Must follow shadow chain to remove access
+ * to pages in shadowed objects.
+ */
+ vm_object_t next_object;
+
+ next_object = object->shadow;
+ if (next_object != VM_OBJECT_NULL) {
+ offset += object->shadow_offset;
+ vm_object_lock(next_object);
+ vm_object_unlock(object);
+ object = next_object;
+ }
+ else {
+ /*
+ * End of chain - we are done.
+ */
+ break;
+ }
+ }
+ else {
+ /*
+ * Pages in shadowed objects may never have
+ * write permission - we may stop here.
+ */
+ break;
+ }
+ }
+
+ vm_object_unlock(object);
+}
+
+/*
+ * vm_object_pmap_remove:
+ *
+ * Removes all physical pages in the specified
+ * object range from all physical maps.
+ *
+ * The object must *not* be locked.
+ */
+void vm_object_pmap_remove(
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ vm_page_t p;
+
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ vm_object_lock(object);
+ queue_iterate(&object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious &&
+ (start <= p->offset) &&
+ (p->offset < end))
+ pmap_page_protect(p->phys_addr, VM_PROT_NONE);
+ }
+ vm_object_unlock(object);
+}
+
+/*
+ * Routine: vm_object_copy_slowly
+ *
+ * Description:
+ * Copy the specified range of the source
+ * virtual memory object without using
+ * protection-based optimizations (such
+ * as copy-on-write). The pages in the
+ * region are actually copied.
+ *
+ * In/out conditions:
+ * The caller must hold a reference and a lock
+ * for the source virtual memory object. The source
+ * object will be returned *unlocked*.
+ *
+ * Results:
+ * If the copy is completed successfully, KERN_SUCCESS is
+ * returned. If the caller asserted the interruptible
+ * argument, and an interruption occurred while waiting
+ * for a user-generated event, MACH_SEND_INTERRUPTED is
+ * returned. Other values may be returned to indicate
+ * hard errors during the copy operation.
+ *
+ * A new virtual memory object is returned in a
+ * parameter (_result_object). The contents of this
+ * new object, starting at a zero offset, are a copy
+ * of the source memory region. In the event of
+ * an error, this parameter will contain the value
+ * VM_OBJECT_NULL.
+ */
+kern_return_t vm_object_copy_slowly(
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t size,
+ boolean_t interruptible,
+ vm_object_t *_result_object) /* OUT */
+{
+ vm_object_t new_object;
+ vm_offset_t new_offset;
+
+ if (size == 0) {
+ vm_object_unlock(src_object);
+ *_result_object = VM_OBJECT_NULL;
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Prevent destruction of the source object while we copy.
+ */
+
+ assert(src_object->ref_count > 0);
+ src_object->ref_count++;
+ vm_object_unlock(src_object);
+
+ /*
+ * Create a new object to hold the copied pages.
+ * A few notes:
+ * We fill the new object starting at offset 0,
+ * regardless of the input offset.
+ * We don't bother to lock the new object within
+ * this routine, since we have the only reference.
+ */
+
+ new_object = vm_object_allocate(size);
+ new_offset = 0;
+
+ assert(size == trunc_page(size)); /* Will the loop terminate? */
+
+ for ( ;
+ size != 0 ;
+ src_offset += PAGE_SIZE, new_offset += PAGE_SIZE, size -= PAGE_SIZE
+ ) {
+ vm_page_t new_page;
+ vm_fault_return_t result;
+
+ while ((new_page = vm_page_alloc(new_object, new_offset))
+ == VM_PAGE_NULL) {
+ VM_PAGE_WAIT((void (*)()) 0);
+ }
+
+ do {
+ vm_prot_t prot = VM_PROT_READ;
+ vm_page_t _result_page;
+ vm_page_t top_page;
+ vm_page_t result_page;
+
+ vm_object_lock(src_object);
+ src_object->paging_in_progress++;
+
+ result = vm_fault_page(src_object, src_offset,
+ VM_PROT_READ, FALSE, interruptible,
+ &prot, &_result_page, &top_page,
+ FALSE, (void (*)()) 0);
+
+ switch(result) {
+ case VM_FAULT_SUCCESS:
+ result_page = _result_page;
+
+ /*
+ * We don't need to hold the object
+ * lock -- the busy page will be enough.
+ * [We don't care about picking up any
+ * new modifications.]
+ *
+ * Copy the page to the new object.
+ *
+ * POLICY DECISION:
+ * If result_page is clean,
+ * we could steal it instead
+ * of copying.
+ */
+
+ vm_object_unlock(result_page->object);
+ vm_page_copy(result_page, new_page);
+
+ /*
+ * Let go of both pages (make them
+ * not busy, perform wakeup, activate).
+ */
+
+ new_page->busy = FALSE;
+ new_page->dirty = TRUE;
+ vm_object_lock(result_page->object);
+ PAGE_WAKEUP_DONE(result_page);
+
+ vm_page_lock_queues();
+ if (!result_page->active &&
+ !result_page->inactive)
+ vm_page_activate(result_page);
+ vm_page_activate(new_page);
+ vm_page_unlock_queues();
+
+ /*
+ * Release paging references and
+ * top-level placeholder page, if any.
+ */
+
+ vm_fault_cleanup(result_page->object,
+ top_page);
+
+ break;
+
+ case VM_FAULT_RETRY:
+ break;
+
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ break;
+
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ break;
+
+ case VM_FAULT_INTERRUPTED:
+ vm_page_free(new_page);
+ vm_object_deallocate(new_object);
+ vm_object_deallocate(src_object);
+ *_result_object = VM_OBJECT_NULL;
+ return MACH_SEND_INTERRUPTED;
+
+ case VM_FAULT_MEMORY_ERROR:
+ /*
+ * A policy choice:
+ * (a) ignore pages that we can't
+ * copy
+ * (b) return the null object if
+ * any page fails [chosen]
+ */
+
+ vm_page_free(new_page);
+ vm_object_deallocate(new_object);
+ vm_object_deallocate(src_object);
+ *_result_object = VM_OBJECT_NULL;
+ return KERN_MEMORY_ERROR;
+ }
+ } while (result != VM_FAULT_SUCCESS);
+ }
+
+ /*
+ * Lose the extra reference, and return our object.
+ */
+
+ vm_object_deallocate(src_object);
+ *_result_object = new_object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: vm_object_copy_temporary
+ *
+ * Purpose:
+ * Copy the specified range of the source virtual
+ * memory object, if it can be done without blocking.
+ *
+ * Results:
+ * If the copy is successful, the copy is returned in
+ * the arguments; otherwise, the arguments are not
+ * affected.
+ *
+ * In/out conditions:
+ * The object should be unlocked on entry and exit.
+ */
+
+boolean_t vm_object_copy_temporary(
+ vm_object_t *_object, /* INOUT */
+ vm_offset_t *_offset, /* INOUT */
+ boolean_t *_src_needs_copy, /* OUT */
+ boolean_t *_dst_needs_copy) /* OUT */
+{
+ vm_object_t object = *_object;
+
+ if (object == VM_OBJECT_NULL) {
+ *_src_needs_copy = FALSE;
+ *_dst_needs_copy = FALSE;
+ return TRUE;
+ }
+
+ /*
+ * If the object is temporary, we can perform
+ * a symmetric copy-on-write without asking.
+ */
+
+ vm_object_lock(object);
+ if (object->temporary) {
+
+ /*
+ * Shared objects use delayed copy
+ */
+ if (object->use_shared_copy) {
+
+ /*
+ * Asymmetric copy strategy. Destination
+ * must be copied (to allow copy object reuse).
+ * Source is unaffected.
+ */
+ vm_object_unlock(object);
+ object = vm_object_copy_delayed(object);
+ *_object = object;
+ *_src_needs_copy = FALSE;
+ *_dst_needs_copy = TRUE;
+ return TRUE;
+ }
+
+ /*
+ * Make another reference to the object.
+ *
+ * Leave object/offset unchanged.
+ */
+
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ object->shadowed = TRUE;
+ vm_object_unlock(object);
+
+ /*
+ * Both source and destination must make
+ * shadows, and the source must be made
+ * read-only if not already.
+ */
+
+ *_src_needs_copy = TRUE;
+ *_dst_needs_copy = TRUE;
+ return TRUE;
+ }
+
+ if (object->pager_ready &&
+ (object->copy_strategy == MEMORY_OBJECT_COPY_DELAY)) {
+ /* XXX Do something intelligent (see temporary code above) */
+ }
+ vm_object_unlock(object);
+
+ return FALSE;
+}
+
+/*
+ * Routine: vm_object_copy_call [internal]
+ *
+ * Description:
+ * Copy the specified (src_offset, size) portion
+ * of the source object (src_object), using the
+ * user-managed copy algorithm.
+ *
+ * In/out conditions:
+ * The source object must be locked on entry. It
+ * will be *unlocked* on exit.
+ *
+ * Results:
+ * If the copy is successful, KERN_SUCCESS is returned.
+ * This routine is interruptible; if a wait for
+ * a user-generated event is interrupted, MACH_SEND_INTERRUPTED
+ * is returned. Other return values indicate hard errors
+ * in creating the user-managed memory object for the copy.
+ *
+ * A new object that represents the copied virtual
+ * memory is returned in a parameter (*_result_object).
+ * If the return value indicates an error, this parameter
+ * is not valid.
+ */
+static kern_return_t vm_object_copy_call(
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t size,
+ vm_object_t *_result_object) /* OUT */
+{
+ vm_offset_t src_end = src_offset + size;
+ ipc_port_t new_memory_object;
+ vm_object_t new_object;
+ vm_page_t p;
+
+ /*
+ * Create a memory object port to be associated
+ * with this new vm_object.
+ *
+ * Since the kernel has the only rights to this
+ * port, we need not hold the cache lock.
+ *
+ * Since we have the only object reference, we
+ * need not be worried about collapse operations.
+ *
+ */
+
+ new_memory_object = ipc_port_alloc_kernel();
+ if (new_memory_object == IP_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /*
+ * Set the backing object for the new
+ * temporary object.
+ */
+
+ assert(src_object->ref_count > 0);
+ src_object->ref_count++;
+ vm_object_paging_begin(src_object);
+ vm_object_unlock(src_object);
+
+ /* we hold a naked receive right for new_memory_object */
+ (void) ipc_port_make_send(new_memory_object);
+ /* now we also hold a naked send right for new_memory_object */
+
+ /*
+ * Let the memory manager know that a copy operation
+ * is in progress. Note that we're using the old
+ * memory object's ports (for which we're holding
+ * a paging reference)... the memory manager cannot
+ * yet affect the new memory object.
+ */
+
+ (void) memory_object_copy(src_object->pager,
+ src_object->pager_request,
+ src_offset, size,
+ new_memory_object);
+ /* no longer hold the naked receive right for new_memory_object */
+
+ vm_object_lock(src_object);
+ vm_object_paging_end(src_object);
+
+ /*
+ * Remove write access from all of the pages of
+ * the old memory object that we can.
+ */
+
+ queue_iterate(&src_object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious &&
+ (src_offset <= p->offset) &&
+ (p->offset < src_end) &&
+ !(p->page_lock & VM_PROT_WRITE)) {
+ p->page_lock |= VM_PROT_WRITE;
+ pmap_page_protect(p->phys_addr, VM_PROT_ALL & ~p->page_lock);
+ }
+ }
+
+ vm_object_unlock(src_object);
+
+ /*
+ * Initialize the rest of the paging stuff
+ */
+
+ new_object = vm_object_enter(new_memory_object, size, FALSE);
+ assert(new_object);
+ new_object->shadow = src_object;
+ new_object->shadow_offset = src_offset;
+
+ /*
+ * Drop the reference for new_memory_object taken above.
+ */
+
+ ipc_port_release_send(new_memory_object);
+ /* no longer hold the naked send right for new_memory_object */
+
+ *_result_object = new_object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: vm_object_copy_delayed [internal]
+ *
+ * Description:
+ * Copy the specified virtual memory object, using
+ * the asymmetric copy-on-write algorithm.
+ *
+ * In/out conditions:
+ * The object must be unlocked on entry.
+ *
+ * This routine will not block waiting for user-generated
+ * events. It is not interruptible.
+ */
+vm_object_t vm_object_copy_delayed(
+ vm_object_t src_object)
+{
+ vm_object_t new_copy;
+ vm_object_t old_copy;
+ vm_page_t p;
+
+ /*
+ * The user-level memory manager wants to see
+ * all of the changes to this object, but it
+ * has promised not to make any changes on its own.
+ *
+ * Perform an asymmetric copy-on-write, as follows:
+ * Create a new object, called a "copy object"
+ * to hold pages modified by the new mapping
+ * (i.e., the copy, not the original mapping).
+ * Record the original object as the backing
+ * object for the copy object. If the
+ * original mapping does not change a page,
+ * it may be used read-only by the copy.
+ * Record the copy object in the original
+ * object. When the original mapping causes
+ * a page to be modified, it must be copied
+ * to a new page that is "pushed" to the
+ * copy object.
+ * Mark the new mapping (the copy object)
+ * copy-on-write. This makes the copy
+ * object itself read-only, allowing it
+ * to be reused if the original mapping
+ * makes no changes, and simplifying the
+ * synchronization required in the "push"
+ * operation described above.
+ *
+ * The copy-on-write is said to be asymmetric because
+ * the original object is *not* marked copy-on-write.
+ * A copied page is pushed to the copy object, regardless
+ * which party attempted to modify the page.
+ *
+ * Repeated asymmetric copy operations may be done.
+ * If the original object has not been changed since
+ * the last copy, its copy object can be reused.
+ * Otherwise, a new copy object can be inserted
+ * between the original object and its previous
+ * copy object. Since any copy object is read-only,
+ * this cannot affect the contents of the previous copy
+ * object.
+ *
+ * Note that a copy object is higher in the object
+ * tree than the original object; therefore, use of
+ * the copy object recorded in the original object
+ * must be done carefully, to avoid deadlock.
+ */
+
+ /*
+ * Allocate a new copy object before locking, even
+ * though we may not need it later.
+ */
+
+ new_copy = vm_object_allocate(src_object->size);
+
+ vm_object_lock(src_object);
+
+ /*
+ * See whether we can reuse the result of a previous
+ * copy operation.
+ */
+ Retry:
+ old_copy = src_object->copy;
+ if (old_copy != VM_OBJECT_NULL) {
+ /*
+ * Try to get the locks (out of order)
+ */
+ if (!vm_object_lock_try(old_copy)) {
+ vm_object_unlock(src_object);
+
+ simple_lock_pause(); /* wait a bit */
+
+ vm_object_lock(src_object);
+ goto Retry;
+ }
+
+ /*
+ * Determine whether the old copy object has
+ * been modified.
+ */
+
+ if (old_copy->resident_page_count == 0 &&
+ !old_copy->pager_created) {
+ /*
+ * It has not been modified.
+ *
+ * Return another reference to
+ * the existing copy-object.
+ */
+ assert(old_copy->ref_count > 0);
+ old_copy->ref_count++;
+ vm_object_unlock(old_copy);
+ vm_object_unlock(src_object);
+
+ vm_object_deallocate(new_copy);
+
+ return old_copy;
+ }
+
+ /*
+ * The copy-object is always made large enough to
+ * completely shadow the original object, since
+ * it may have several users who want to shadow
+ * the original object at different points.
+ */
+
+ assert((old_copy->shadow == src_object) &&
+ (old_copy->shadow_offset == (vm_offset_t) 0));
+
+ /*
+ * Make the old copy-object shadow the new one.
+ * It will receive no more pages from the original
+ * object.
+ */
+
+ src_object->ref_count--; /* remove ref. from old_copy */
+ assert(src_object->ref_count > 0);
+ old_copy->shadow = new_copy;
+ assert(new_copy->ref_count > 0);
+ new_copy->ref_count++;
+ vm_object_unlock(old_copy); /* done with old_copy */
+ }
+
+ /*
+ * Point the new copy at the existing object.
+ */
+
+ new_copy->shadow = src_object;
+ new_copy->shadow_offset = 0;
+ new_copy->shadowed = TRUE; /* caller must set needs_copy */
+ assert(src_object->ref_count > 0);
+ src_object->ref_count++;
+ src_object->copy = new_copy;
+
+ /*
+ * Mark all pages of the existing object copy-on-write.
+ * This object may have a shadow chain below it, but
+ * those pages will already be marked copy-on-write.
+ */
+
+ queue_iterate(&src_object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious)
+ pmap_page_protect(p->phys_addr,
+ (VM_PROT_ALL & ~VM_PROT_WRITE &
+ ~p->page_lock));
+ }
+
+ vm_object_unlock(src_object);
+
+ return new_copy;
+}
+
+/*
+ * Routine: vm_object_copy_strategically
+ *
+ * Purpose:
+ * Perform a copy according to the source object's
+ * declared strategy. This operation may block,
+ * and may be interrupted.
+ */
+kern_return_t vm_object_copy_strategically(
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t size,
+ vm_object_t *dst_object, /* OUT */
+ vm_offset_t *dst_offset, /* OUT */
+ boolean_t *dst_needs_copy) /* OUT */
+{
+ kern_return_t result = KERN_SUCCESS; /* to quiet gcc warnings */
+ boolean_t interruptible = TRUE; /* XXX */
+
+ assert(src_object != VM_OBJECT_NULL);
+
+ vm_object_lock(src_object);
+
+ /* XXX assert(!src_object->temporary); JSB FIXME */
+
+ /*
+ * The copy strategy is only valid if the memory manager
+ * is "ready".
+ */
+
+ while (!src_object->pager_ready) {
+ vm_object_wait( src_object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ interruptible);
+ if (interruptible &&
+ (current_thread()->wait_result != THREAD_AWAKENED)) {
+ *dst_object = VM_OBJECT_NULL;
+ *dst_offset = 0;
+ *dst_needs_copy = FALSE;
+ return MACH_SEND_INTERRUPTED;
+ }
+ vm_object_lock(src_object);
+ }
+
+ /*
+ * The object may be temporary (even though it is external).
+ * If so, do a symmetric copy.
+ */
+
+ if (src_object->temporary) {
+ /*
+ * XXX
+ * This does not count as intelligent!
+ * This buys us the object->temporary optimizations,
+ * but we aren't using a symmetric copy,
+ * which may confuse the vm code. The correct thing
+ * to do here is to figure out what to call to get
+ * a temporary shadowing set up.
+ */
+ src_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
+
+ /*
+ * The object is permanent. Use the appropriate copy strategy.
+ */
+
+ switch (src_object->copy_strategy) {
+ case MEMORY_OBJECT_COPY_NONE:
+ if ((result = vm_object_copy_slowly(
+ src_object,
+ src_offset,
+ size,
+ interruptible,
+ dst_object))
+ == KERN_SUCCESS) {
+ *dst_offset = 0;
+ *dst_needs_copy = FALSE;
+ }
+ break;
+
+ case MEMORY_OBJECT_COPY_CALL:
+ if ((result = vm_object_copy_call(
+ src_object,
+ src_offset,
+ size,
+ dst_object))
+ == KERN_SUCCESS) {
+ *dst_offset = 0;
+ *dst_needs_copy = FALSE;
+ }
+ break;
+
+ case MEMORY_OBJECT_COPY_DELAY:
+ vm_object_unlock(src_object);
+ *dst_object = vm_object_copy_delayed(src_object);
+ *dst_offset = src_offset;
+ *dst_needs_copy = TRUE;
+
+ result = KERN_SUCCESS;
+ break;
+ }
+
+ return result;
+}
+
+/*
+ * vm_object_shadow:
+ *
+ * Create a new object which is backed by the
+ * specified existing object range. The source
+ * object reference is deallocated.
+ *
+ * The new object and offset into that object
+ * are returned in the source parameters.
+ */
+
+void vm_object_shadow(
+ vm_object_t *object, /* IN/OUT */
+ vm_offset_t *offset, /* IN/OUT */
+ vm_size_t length)
+{
+ vm_object_t source;
+ vm_object_t result;
+
+ source = *object;
+
+ /*
+ * Allocate a new object with the given length
+ */
+
+ if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
+ panic("vm_object_shadow: no object for shadowing");
+
+ /*
+ * The new object shadows the source object, adding
+ * a reference to it. Our caller changes his reference
+ * to point to the new object, removing a reference to
+ * the source object. Net result: no change of reference
+ * count.
+ */
+ result->shadow = source;
+
+ /*
+ * Store the offset into the source object,
+ * and fix up the offset into the new object.
+ */
+
+ result->shadow_offset = *offset;
+
+ /*
+ * Return the new things
+ */
+
+ *offset = 0;
+ *object = result;
+}
+
+/*
+ * The relationship between vm_object structures and
+ * the memory_object ports requires careful synchronization.
+ *
+ * All associations are created by vm_object_enter. All three
+ * port fields are filled in, as follows:
+ * pager: the memory_object port itself, supplied by
+ * the user requesting a mapping (or the kernel,
+ * when initializing internal objects); the
+ * kernel simulates holding send rights by keeping
+ * a port reference;
+ * pager_request:
+ * pager_name:
+ * the memory object control and name ports,
+ * created by the kernel; the kernel holds
+ * receive (and ownership) rights to these
+ * ports, but no other references.
+ * All of the ports are referenced by their global names.
+ *
+ * When initialization is complete, the "initialized" field
+ * is asserted. Other mappings using a particular memory object,
+ * and any references to the vm_object gained through the
+ * port association must wait for this initialization to occur.
+ *
+ * In order to allow the memory manager to set attributes before
+ * requests (notably virtual copy operations, but also data or
+ * unlock requests) are made, a "ready" attribute is made available.
+ * Only the memory manager may affect the value of this attribute.
+ * Its value does not affect critical kernel functions, such as
+ * internal object initialization or destruction. [Furthermore,
+ * memory objects created by the kernel are assumed to be ready
+ * immediately; the default memory manager need not explicitly
+ * set the "ready" attribute.]
+ *
+ * [Both the "initialized" and "ready" attribute wait conditions
+ * use the "pager" field as the wait event.]
+ *
+ * The port associations can be broken down by any of the
+ * following routines:
+ * vm_object_terminate:
+ * No references to the vm_object remain, and
+ * the object cannot (or will not) be cached.
+ * This is the normal case, and is done even
+ * though one of the other cases has already been
+ * done.
+ * vm_object_destroy:
+ * The memory_object port has been destroyed,
+ * meaning that the kernel cannot flush dirty
+ * pages or request new data or unlock existing
+ * data.
+ * memory_object_destroy:
+ * The memory manager has requested that the
+ * kernel relinquish rights to the memory object
+ * port. [The memory manager may not want to
+ * destroy the port, but may wish to refuse or
+ * tear down existing memory mappings.]
+ * Each routine that breaks an association must break all of
+ * them at once. At some later time, that routine must clear
+ * the vm_object port fields and release the port rights.
+ * [Furthermore, each routine must cope with the simultaneous
+ * or previous operations of the others.]
+ *
+ * In addition to the lock on the object, the vm_object_cache_lock
+ * governs the port associations. References gained through the
+ * port association require use of the cache lock.
+ *
+ * Because the port fields may be cleared spontaneously, they
+ * cannot be used to determine whether a memory object has
+ * ever been associated with a particular vm_object. [This
+ * knowledge is important to the shadow object mechanism.]
+ * For this reason, an additional "created" attribute is
+ * provided.
+ *
+ * During various paging operations, the port values found in the
+ * vm_object must be valid. To prevent these port rights from being
+ * released, and to prevent the port associations from changing
+ * (other than being removed, i.e., made null), routines may use
+ * the vm_object_paging_begin/end routines [actually, macros].
+ * The implementation uses the "paging_in_progress" and "wanted" fields.
+ * [Operations that alter the validity of the port values include the
+ * termination routines and vm_object_collapse.]
+ */
+
+vm_object_t vm_object_lookup(
+ ipc_port_t port)
+{
+ vm_object_t object = VM_OBJECT_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_PAGING_REQUEST)) {
+ vm_object_cache_lock();
+ object = (vm_object_t) port->ip_kobject;
+ vm_object_lock(object);
+
+ assert(object->alive);
+
+ if (object->ref_count == 0)
+ vm_object_cache_remove(object);
+
+ object->ref_count++;
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ }
+ ip_unlock(port);
+ }
+
+ return object;
+}
+
+vm_object_t vm_object_lookup_name(
+ ipc_port_t port)
+{
+ vm_object_t object = VM_OBJECT_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_PAGING_NAME)) {
+ vm_object_cache_lock();
+ object = (vm_object_t) port->ip_kobject;
+ vm_object_lock(object);
+
+ assert(object->alive);
+
+ if (object->ref_count == 0)
+ vm_object_cache_remove(object);
+
+ object->ref_count++;
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ }
+ ip_unlock(port);
+ }
+
+ return object;
+}
+
+void vm_object_destroy(
+ ipc_port_t pager)
+{
+ vm_object_t object;
+ pager_request_t old_request;
+ ipc_port_t old_name;
+
+ /*
+ * Perform essentially the same operations as in vm_object_lookup,
+ * except that this time we look up based on the memory_object
+ * port, not the control port.
+ */
+ vm_object_cache_lock();
+ if (ip_kotype(pager) != IKOT_PAGER) {
+ vm_object_cache_unlock();
+ return;
+ }
+
+ object = (vm_object_t) pager->ip_kobject;
+ vm_object_lock(object);
+ if (object->ref_count == 0)
+ vm_object_cache_remove(object);
+ object->ref_count++;
+
+ object->can_persist = FALSE;
+
+ assert(object->pager == pager);
+
+ /*
+ * Remove the port associations.
+ *
+ * Note that the memory_object itself is dead, so
+ * we don't bother with it.
+ */
+
+ object->pager = IP_NULL;
+ vm_object_remove(object);
+
+ old_request = object->pager_request;
+ object->pager_request = PAGER_REQUEST_NULL;
+
+ old_name = object->pager_name;
+ object->pager_name = IP_NULL;
+
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+
+ /*
+ * Clean up the port references. Note that there's no
+ * point in trying the memory_object_terminate call
+ * because the memory_object itself is dead.
+ */
+
+ ipc_port_release_send(pager);
+ if (old_request != IP_NULL)
+ ipc_port_dealloc_kernel(old_request);
+ if (old_name != IP_NULL)
+ ipc_port_dealloc_kernel(old_name);
+
+ /*
+ * Restart pending page requests
+ */
+
+ vm_object_abort_activity(object);
+
+ /*
+ * Lose the object reference.
+ */
+
+ vm_object_deallocate(object);
+}
+
+/*
+ * Routine: vm_object_enter
+ * Purpose:
+ * Find a VM object corresponding to the given
+ * pager; if no such object exists, create one,
+ * and initialize the pager.
+ */
+vm_object_t vm_object_enter(
+ ipc_port_t pager,
+ vm_size_t size,
+ boolean_t internal)
+{
+ vm_object_t object;
+ vm_object_t new_object;
+ boolean_t must_init;
+ ipc_kobject_type_t po;
+
+restart:
+ if (!IP_VALID(pager))
+ return vm_object_allocate(size);
+
+ new_object = VM_OBJECT_NULL;
+ must_init = FALSE;
+
+ /*
+ * Look for an object associated with this port.
+ */
+
+ vm_object_cache_lock();
+ for (;;) {
+ po = ip_kotype(pager);
+
+ /*
+ * If a previous object is being terminated,
+ * we must wait for the termination message
+ * to be queued.
+ *
+ * We set kobject to a non-null value to let the
+ * terminator know that someone is waiting.
+ * Among the possibilities is that the port
+ * could die while we're waiting. Must restart
+ * instead of continuing the loop.
+ */
+
+ if (po == IKOT_PAGER_TERMINATING) {
+ pager->ip_kobject = (ipc_kobject_t) pager;
+ assert_wait((event_t) pager, FALSE);
+ vm_object_cache_unlock();
+ thread_block((void (*)()) 0);
+ goto restart;
+ }
+
+ /*
+ * Bail if there is already a kobject associated
+ * with the pager port.
+ */
+ if (po != IKOT_NONE) {
+ break;
+ }
+
+ /*
+ * We must unlock to create a new object;
+ * if we do so, we must try the lookup again.
+ */
+
+ if (new_object == VM_OBJECT_NULL) {
+ vm_object_cache_unlock();
+ new_object = vm_object_allocate(size);
+ vm_object_cache_lock();
+ } else {
+ /*
+ * Lookup failed twice, and we have something
+ * to insert; set the object.
+ */
+
+ ipc_kobject_set(pager,
+ (ipc_kobject_t) new_object,
+ IKOT_PAGER);
+ new_object = VM_OBJECT_NULL;
+ must_init = TRUE;
+ }
+ }
+
+ if (internal)
+ must_init = TRUE;
+
+ /*
+ * It's only good if it's a VM object!
+ */
+
+ object = (po == IKOT_PAGER) ? (vm_object_t) pager->ip_kobject
+ : VM_OBJECT_NULL;
+
+ if ((object != VM_OBJECT_NULL) && !must_init) {
+ vm_object_lock(object);
+ if (object->ref_count == 0)
+ vm_object_cache_remove(object);
+ object->ref_count++;
+ vm_object_unlock(object);
+
+ vm_stat.hits++;
+ }
+ assert((object == VM_OBJECT_NULL) || (object->ref_count > 0) ||
+ ((object->paging_in_progress != 0) && internal));
+
+ vm_stat.lookups++;
+
+ vm_object_cache_unlock();
+
+ /*
+ * If we raced to create a vm_object but lost, let's
+ * throw away ours.
+ */
+
+ if (new_object != VM_OBJECT_NULL)
+ vm_object_deallocate(new_object);
+
+ if (object == VM_OBJECT_NULL)
+ return(object);
+
+ if (must_init) {
+ /*
+ * Copy the naked send right we were given.
+ */
+
+ pager = ipc_port_copy_send(pager);
+ if (!IP_VALID(pager))
+ panic("vm_object_enter: port died"); /* XXX */
+
+ object->pager_created = TRUE;
+ object->pager = pager;
+
+ /*
+ * Allocate request port.
+ */
+
+ object->pager_request = ipc_port_alloc_kernel();
+ if (object->pager_request == IP_NULL)
+ panic("vm_object_enter: pager request alloc");
+
+ ipc_kobject_set(object->pager_request,
+ (ipc_kobject_t) object,
+ IKOT_PAGING_REQUEST);
+
+ /*
+ * Let the pager know we're using it.
+ */
+
+ if (internal) {
+ /* acquire a naked send right for the DMM */
+ ipc_port_t DMM = memory_manager_default_reference();
+
+ /* mark the object internal */
+ object->internal = TRUE;
+ assert(object->temporary);
+
+ /* default-pager objects are ready immediately */
+ object->pager_ready = TRUE;
+
+ /* consumes the naked send right for DMM */
+ (void) memory_object_create(DMM,
+ pager,
+ object->size,
+ object->pager_request,
+ object->pager_name,
+ PAGE_SIZE);
+ } else {
+ /* the object is external and not temporary */
+ object->internal = FALSE;
+ object->temporary = FALSE;
+
+ assert(object->resident_page_count == 0);
+ vm_object_external_count++;
+
+ /* user pager objects are not ready until marked so */
+ object->pager_ready = FALSE;
+
+ (void) memory_object_init(pager,
+ object->pager_request,
+ object->pager_name,
+ PAGE_SIZE);
+
+ }
+
+ vm_object_lock(object);
+ object->pager_initialized = TRUE;
+
+ vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
+ } else {
+ vm_object_lock(object);
+ }
+ /*
+ * [At this point, the object must be locked]
+ */
+
+ /*
+ * Wait for the work above to be done by the first
+ * thread to map this object.
+ */
+
+ while (!object->pager_initialized) {
+ vm_object_wait( object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ FALSE);
+ vm_object_lock(object);
+ }
+ vm_object_unlock(object);
+
+ return object;
+}
+
+/*
+ * Routine: vm_object_pager_create
+ * Purpose:
+ * Create a memory object for an internal object.
+ * In/out conditions:
+ * The object is locked on entry and exit;
+ * it may be unlocked within this call.
+ * Limitations:
+ * Only one thread may be performing a
+ * vm_object_pager_create on an object at
+ * a time. Presumably, only the pageout
+ * daemon will be using this routine.
+ */
+void vm_object_pager_create(
+ vm_object_t object)
+{
+ ipc_port_t pager;
+
+ if (object->pager_created) {
+ /*
+ * Someone else got to it first...
+ * wait for them to finish initializing
+ */
+
+ while (!object->pager_initialized) {
+ vm_object_wait( object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ FALSE);
+ vm_object_lock(object);
+ }
+ return;
+ }
+
+ /*
+ * Indicate that a memory object has been assigned
+ * before dropping the lock, to prevent a race.
+ */
+
+ object->pager_created = TRUE;
+
+ /*
+ * Prevent collapse or termination by
+ * holding a paging reference
+ */
+
+ vm_object_paging_begin(object);
+ vm_object_unlock(object);
+
+#if MACH_PAGEMAP
+ object->existence_info = vm_external_create(
+ object->size +
+ object->paging_offset);
+ assert((object->size + object->paging_offset) >=
+ object->size);
+#endif /* MACH_PAGEMAP */
+
+ /*
+ * Create the pager, and associate with it
+ * this object.
+ *
+ * Note that we only make the port association
+ * so that vm_object_enter can properly look up
+ * the object to complete the initialization...
+ * we do not expect any user to ever map this
+ * object.
+ *
+ * Since the kernel has the only rights to the
+ * port, it's safe to install the association
+ * without holding the cache lock.
+ */
+
+ pager = ipc_port_alloc_kernel();
+ if (pager == IP_NULL)
+ panic("vm_object_pager_create: allocate pager port");
+
+ (void) ipc_port_make_send(pager);
+ ipc_kobject_set(pager, (ipc_kobject_t) object, IKOT_PAGER);
+
+ /*
+ * Initialize the rest of the paging stuff
+ */
+
+ if (vm_object_enter(pager, object->size, TRUE) != object)
+ panic("vm_object_pager_create: mismatch");
+
+ /*
+ * Drop the naked send right taken above.
+ */
+
+ ipc_port_release_send(pager);
+
+ /*
+ * Release the paging reference
+ */
+
+ vm_object_lock(object);
+ vm_object_paging_end(object);
+}
+
+/*
+ * Routine: vm_object_remove
+ * Purpose:
+ * Eliminate the pager/object association
+ * for this pager.
+ * Conditions:
+ * The object cache must be locked.
+ */
+void vm_object_remove(
+ vm_object_t object)
+{
+ ipc_port_t port;
+
+ if ((port = object->pager) != IP_NULL) {
+ if (ip_kotype(port) == IKOT_PAGER)
+ ipc_kobject_set(port, IKO_NULL,
+ IKOT_PAGER_TERMINATING);
+ else if (ip_kotype(port) != IKOT_NONE)
+ panic("vm_object_remove: bad object port");
+ }
+ if ((port = object->pager_request) != IP_NULL) {
+ if (ip_kotype(port) == IKOT_PAGING_REQUEST)
+ ipc_kobject_set(port, IKO_NULL, IKOT_NONE);
+ else if (ip_kotype(port) != IKOT_NONE)
+ panic("vm_object_remove: bad request port");
+ }
+ if ((port = object->pager_name) != IP_NULL) {
+ if (ip_kotype(port) == IKOT_PAGING_NAME)
+ ipc_kobject_set(port, IKO_NULL, IKOT_NONE);
+ else if (ip_kotype(port) != IKOT_NONE)
+ panic("vm_object_remove: bad name port");
+ }
+}
+
+/*
+ * Global variables for vm_object_collapse():
+ *
+ * Counts for normal collapses and bypasses.
+ * Debugging variables, to watch or disable collapse.
+ */
+long object_collapses = 0;
+long object_bypasses = 0;
+
+int vm_object_collapse_debug = 0;
+boolean_t vm_object_collapse_allowed = TRUE;
+boolean_t vm_object_collapse_bypass_allowed = TRUE;
+
+/*
+ * vm_object_collapse:
+ *
+ * Collapse an object with the object backing it.
+ * Pages in the backing object are moved into the
+ * parent, and the backing object is deallocated.
+ *
+ * Requires that the object be locked and the page
+ * queues be unlocked. May unlock/relock the object,
+ * so the caller should hold a reference for the object.
+ */
+void vm_object_collapse(
+ vm_object_t object)
+{
+ vm_object_t backing_object;
+ vm_offset_t backing_offset;
+ vm_size_t size;
+ vm_offset_t new_offset;
+ vm_page_t p, pp;
+ ipc_port_t old_name_port;
+
+ if (!vm_object_collapse_allowed)
+ return;
+
+ while (TRUE) {
+ /*
+ * Verify that the conditions are right for collapse:
+ *
+ * The object exists and no pages in it are currently
+ * being paged out (or have ever been paged out).
+ *
+ * This check is probably overkill -- if a memory
+ * object has not been created, the fault handler
+ * shouldn't release the object lock while paging
+ * is in progress or absent pages exist.
+ */
+ if (object == VM_OBJECT_NULL ||
+ object->pager_created ||
+ object->paging_in_progress != 0 ||
+ object->absent_count != 0)
+ return;
+
+ /*
+ * There is a backing object, and
+ */
+
+ if ((backing_object = object->shadow) == VM_OBJECT_NULL)
+ return;
+
+ vm_object_lock(backing_object);
+ /*
+ * ...
+ * The backing object is not read_only,
+ * and no pages in the backing object are
+ * currently being paged out.
+ * The backing object is internal.
+ *
+ * XXX It may be sufficient for the backing
+ * XXX object to be temporary.
+ */
+
+ if (!backing_object->internal ||
+ backing_object->paging_in_progress != 0) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+ /*
+ * The backing object can't be a copy-object:
+ * the shadow_offset for the copy-object must stay
+ * as 0. Furthermore (for the 'we have all the
+ * pages' case), if we bypass backing_object and
+ * just shadow the next object in the chain, old
+ * pages from that object would then have to be copied
+ * BOTH into the (former) backing_object and into the
+ * parent object.
+ */
+ if (backing_object->shadow != VM_OBJECT_NULL &&
+ backing_object->shadow->copy != VM_OBJECT_NULL) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+ /*
+ * We know that we can either collapse the backing
+ * object (if the parent is the only reference to
+ * it) or (perhaps) remove the parent's reference
+ * to it.
+ */
+
+ backing_offset = object->shadow_offset;
+ size = object->size;
+
+ /*
+ * If there is exactly one reference to the backing
+ * object, we can collapse it into the parent.
+ */
+
+ if (backing_object->ref_count == 1) {
+ if (!vm_object_cache_lock_try()) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+ /*
+ * We can collapse the backing object.
+ *
+ * Move all in-memory pages from backing_object
+ * to the parent. Pages that have been paged out
+ * will be overwritten by any of the parent's
+ * pages that shadow them.
+ */
+
+ while (!queue_empty(&backing_object->memq)) {
+
+ p = (vm_page_t)
+ queue_first(&backing_object->memq);
+
+ new_offset = (p->offset - backing_offset);
+
+ assert(!p->busy || p->absent);
+
+ /*
+ * If the parent has a page here, or if
+ * this page falls outside the parent,
+ * dispose of it.
+ *
+ * Otherwise, move it as planned.
+ */
+
+ if (p->offset < backing_offset ||
+ new_offset >= size) {
+ VM_PAGE_FREE(p);
+ } else {
+ pp = vm_page_lookup(object, new_offset);
+ if (pp != VM_PAGE_NULL && !pp->absent) {
+ /*
+ * Parent object has a real page.
+ * Throw away the backing object's
+ * page.
+ */
+ VM_PAGE_FREE(p);
+ }
+ else {
+ assert(pp == VM_PAGE_NULL || !
+ "vm_object_collapse: bad case");
+
+ /*
+ * Parent now has no page.
+ * Move the backing object's page up.
+ */
+ vm_page_rename(p, object, new_offset);
+ }
+ }
+ }
+
+ /*
+ * Move the pager from backing_object to object.
+ *
+ * XXX We're only using part of the paging space
+ * for keeps now... we ought to discard the
+ * unused portion.
+ */
+
+ switch (vm_object_collapse_debug) {
+ case 0:
+ break;
+ case 1:
+ if ((backing_object->pager == IP_NULL) &&
+ (backing_object->pager_request ==
+ PAGER_REQUEST_NULL))
+ break;
+ /* Fall through to... */
+
+ default:
+ printf("vm_object_collapse: %p (pager %p, request %p) up to %p\n",
+ backing_object, backing_object->pager, backing_object->pager_request,
+ object);
+ if (vm_object_collapse_debug > 2)
+ SoftDebugger("vm_object_collapse");
+ }
+
+ object->pager = backing_object->pager;
+ if (object->pager != IP_NULL)
+ ipc_kobject_set(object->pager,
+ (ipc_kobject_t) object,
+ IKOT_PAGER);
+ object->pager_initialized = backing_object->pager_initialized;
+ object->pager_ready = backing_object->pager_ready;
+ object->pager_created = backing_object->pager_created;
+
+ object->pager_request = backing_object->pager_request;
+ if (object->pager_request != IP_NULL)
+ ipc_kobject_set(object->pager_request,
+ (ipc_kobject_t) object,
+ IKOT_PAGING_REQUEST);
+ old_name_port = object->pager_name;
+ if (old_name_port != IP_NULL)
+ ipc_kobject_set(old_name_port,
+ IKO_NULL, IKOT_NONE);
+ object->pager_name = backing_object->pager_name;
+ if (object->pager_name != IP_NULL)
+ ipc_kobject_set(object->pager_name,
+ (ipc_kobject_t) object,
+ IKOT_PAGING_NAME);
+
+ vm_object_cache_unlock();
+
+ /*
+ * If there is no pager, leave paging-offset alone.
+ */
+ if (object->pager != IP_NULL)
+ object->paging_offset =
+ backing_object->paging_offset +
+ backing_offset;
+
+#if MACH_PAGEMAP
+ assert(object->existence_info == VM_EXTERNAL_NULL);
+ object->existence_info = backing_object->existence_info;
+#endif /* MACH_PAGEMAP */
+
+ /*
+ * Object now shadows whatever backing_object did.
+ * Note that the reference to backing_object->shadow
+ * moves from within backing_object to within object.
+ */
+
+ object->shadow = backing_object->shadow;
+ object->shadow_offset += backing_object->shadow_offset;
+ if (object->shadow != VM_OBJECT_NULL &&
+ object->shadow->copy != VM_OBJECT_NULL) {
+ panic("vm_object_collapse: we collapsed a copy-object!");
+ }
+ /*
+ * Discard backing_object.
+ *
+ * Since the backing object has no pages, no
+ * pager left, and no object references within it,
+ * all that is necessary is to dispose of it.
+ */
+
+ assert(
+ (backing_object->ref_count == 1) &&
+ (backing_object->resident_page_count == 0) &&
+ (backing_object->paging_in_progress == 0)
+ );
+
+ assert(backing_object->alive);
+ assert(!backing_object->cached);
+ backing_object->alive = FALSE;
+ vm_object_unlock(backing_object);
+
+ vm_object_unlock(object);
+ if (old_name_port != IP_NULL)
+ ipc_port_dealloc_kernel(old_name_port);
+ kmem_cache_free(&vm_object_cache, (vm_offset_t) backing_object);
+ vm_object_lock(object);
+
+ object_collapses++;
+ }
+ else {
+ if (!vm_object_collapse_bypass_allowed) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+ /*
+ * If all of the pages in the backing object are
+ * shadowed by the parent object, the parent
+ * object no longer has to shadow the backing
+ * object; it can shadow the next one in the
+ * chain.
+ *
+ * The backing object must not be paged out - we'd
+ * have to check all of the paged-out pages, as
+ * well.
+ */
+
+ if (backing_object->pager_created) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+ /*
+ * Should have a check for a 'small' number
+ * of pages here.
+ */
+
+ queue_iterate(&backing_object->memq, p,
+ vm_page_t, listq)
+ {
+ new_offset = (p->offset - backing_offset);
+
+ /*
+ * If the parent has a page here, or if
+ * this page falls outside the parent,
+ * keep going.
+ *
+ * Otherwise, the backing_object must be
+ * left in the chain.
+ */
+
+ if (p->offset >= backing_offset &&
+ new_offset <= size &&
+ (pp = vm_page_lookup(object, new_offset))
+ == VM_PAGE_NULL) {
+ /*
+ * Page still needed.
+ * Can't go any further.
+ */
+ vm_object_unlock(backing_object);
+ return;
+ }
+ }
+
+ /*
+ * Make the parent shadow the next object
+ * in the chain. Deallocating backing_object
+ * will not remove it, since its reference
+ * count is at least 2.
+ */
+
+ vm_object_reference(object->shadow = backing_object->shadow);
+ object->shadow_offset += backing_object->shadow_offset;
+
+ /*
+ * Backing object might have had a copy pointer
+ * to us. If it did, clear it.
+ */
+ if (backing_object->copy == object)
+ backing_object->copy = VM_OBJECT_NULL;
+
+ /*
+ * Drop the reference count on backing_object.
+ * Since its ref_count was at least 2, it
+ * will not vanish; so we don't need to call
+ * vm_object_deallocate.
+ */
+ backing_object->ref_count--;
+ assert(backing_object->ref_count > 0);
+ vm_object_unlock(backing_object);
+
+ object_bypasses ++;
+
+ }
+
+ /*
+ * Try again with this object's new backing object.
+ */
+ }
+}
+
+/*
+ * Routine: vm_object_page_remove: [internal]
+ * Purpose:
+ * Removes all physical pages in the specified
+ * object range from the object's list of pages.
+ *
+ * In/out conditions:
+ * The object must be locked.
+ */
+unsigned int vm_object_page_remove_lookup = 0;
+unsigned int vm_object_page_remove_iterate = 0;
+
+void vm_object_page_remove(
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ vm_page_t p, next;
+
+ /*
+ * One and two page removals are most popular.
+ * The factor of 16 here is somewhat arbitrary.
+ * It balances vm_object_lookup vs iteration.
+ */
+
+ if (atop(end - start) < object->resident_page_count/16) {
+ vm_object_page_remove_lookup++;
+
+ for (; start < end; start += PAGE_SIZE) {
+ p = vm_page_lookup(object, start);
+ if (p != VM_PAGE_NULL) {
+ if (!p->fictitious)
+ pmap_page_protect(p->phys_addr,
+ VM_PROT_NONE);
+ VM_PAGE_FREE(p);
+ }
+ }
+ } else {
+ vm_object_page_remove_iterate++;
+
+ p = (vm_page_t) queue_first(&object->memq);
+ while (!queue_end(&object->memq, (queue_entry_t) p)) {
+ next = (vm_page_t) queue_next(&p->listq);
+ if ((start <= p->offset) && (p->offset < end)) {
+ if (!p->fictitious)
+ pmap_page_protect(p->phys_addr,
+ VM_PROT_NONE);
+ VM_PAGE_FREE(p);
+ }
+ p = next;
+ }
+ }
+}
+
+/*
+ * Routine: vm_object_coalesce
+ * Purpose:
+ * Tries to coalesce two objects backing up adjoining
+ * regions of memory into a single object.
+ *
+ * NOTE: Only works at the moment if one of the objects
+ * is NULL or if the objects are the same - otherwise,
+ * which object do we lock first?
+ * Returns:
+ * TRUE if objects have been coalesced.
+ * FALSE the objects could not be coalesced.
+ * Parameters:
+ * prev_object First object to coalesce
+ * prev_offset Offset into prev_object
+ * next_object Second object into coalesce
+ * next_offset Offset into next_object
+ *
+ * prev_size Size of reference to prev_object
+ * next_size Size of reference to next_object
+ *
+ * new_object Resulting colesced object
+ * new_offset Offset into the resulting object
+ * Conditions:
+ * The objects must *not* be locked.
+ *
+ * If the objects are coalesced successfully, the caller's
+ * references for both objects are consumed, and the caller
+ * gains a reference for the new object.
+ */
+
+boolean_t vm_object_coalesce(
+ vm_object_t prev_object,
+ vm_object_t next_object,
+ vm_offset_t prev_offset,
+ vm_offset_t next_offset,
+ vm_size_t prev_size,
+ vm_size_t next_size,
+ vm_object_t *new_object, /* OUT */
+ vm_offset_t *new_offset) /* OUT */
+{
+ vm_object_t object;
+ vm_size_t newsize;
+
+ if (prev_object == next_object) {
+ /*
+ * If neither object actually exists,
+ * the offsets don't matter.
+ */
+ if (prev_object == VM_OBJECT_NULL) {
+ *new_object = VM_OBJECT_NULL;
+ *new_offset = 0;
+ return TRUE;
+ }
+
+ if (prev_offset + prev_size == next_offset) {
+ *new_object = prev_object;
+ *new_offset = prev_offset;
+ /*
+ * Deallocate one of the two references.
+ */
+ vm_object_deallocate(prev_object);
+ return TRUE;
+ }
+
+ return FALSE;
+ }
+
+ if (next_object != VM_OBJECT_NULL) {
+ /*
+ * Don't know how to merge two different
+ * objects yet.
+ */
+ if (prev_object != VM_OBJECT_NULL)
+ return FALSE;
+
+ object = next_object;
+ } else {
+ object = prev_object;
+ }
+
+ vm_object_lock(object);
+
+ /*
+ * Try to collapse the object first
+ */
+ vm_object_collapse(object);
+
+ /*
+ * Can't coalesce if pages not mapped to
+ * the object may be in use anyway:
+ * . more than one reference
+ * . paged out
+ * . shadows another object
+ * . has a copy elsewhere
+ * . paging references (pages might be in page-list)
+ */
+
+ if ((object->ref_count > 1) ||
+ object->pager_created ||
+ object->used_for_pageout ||
+ (object->shadow != VM_OBJECT_NULL) ||
+ (object->copy != VM_OBJECT_NULL) ||
+ (object->paging_in_progress != 0)) {
+ vm_object_unlock(object);
+ return FALSE;
+ }
+
+ if (object == prev_object) {
+ /*
+ * Remove any pages that may still be in
+ * the object from a previous deallocation.
+ */
+ vm_object_page_remove(object,
+ prev_offset + prev_size,
+ prev_offset + prev_size + next_size);
+ /*
+ * Extend the object if necessary.
+ */
+ newsize = prev_offset + prev_size + next_size;
+ if (newsize > object->size)
+ object->size = newsize;
+
+ *new_offset = prev_offset;
+ } else {
+ /*
+ * Check if we have enough space in the object
+ * offset space to insert the new mapping before
+ * the existing one.
+ */
+ if (next_offset < prev_size) {
+ vm_object_unlock(object);
+ return FALSE;
+ }
+ /*
+ * Remove any pages that may still be in
+ * the object from a previous deallocation.
+ */
+ vm_object_page_remove(object,
+ next_offset - prev_size,
+ next_offset);
+
+ *new_offset = next_offset - prev_size;
+ }
+
+ vm_object_unlock(object);
+ *new_object = object;
+ return TRUE;
+}
+
+vm_object_t vm_object_request_object(
+ ipc_port_t p)
+{
+ return vm_object_lookup(p);
+}
+
+/*
+ * Routine: vm_object_name
+ * Purpose:
+ * Returns a naked send right to the "name" port associated
+ * with this object.
+ */
+ipc_port_t vm_object_name(
+ vm_object_t object)
+{
+ ipc_port_t p;
+
+ if (object == VM_OBJECT_NULL)
+ return IP_NULL;
+
+ vm_object_lock(object);
+
+ while (object->shadow != VM_OBJECT_NULL) {
+ vm_object_t new_object = object->shadow;
+ vm_object_lock(new_object);
+ vm_object_unlock(object);
+ object = new_object;
+ }
+
+ p = object->pager_name;
+ if (p != IP_NULL)
+ p = ipc_port_make_send(p);
+ vm_object_unlock(object);
+
+ return p;
+}
+
+/*
+ * Attach a set of physical pages to an object, so that they can
+ * be mapped by mapping the object. Typically used to map IO memory.
+ *
+ * The mapping function and its private data are used to obtain the
+ * physical addresses for each page to be mapped.
+ */
+kern_return_t
+vm_object_page_map(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ phys_addr_t (*map_fn)(void *, vm_offset_t),
+ void * map_fn_data) /* private to map_fn */
+{
+ int num_pages;
+ int i;
+ vm_page_t m;
+ vm_page_t old_page;
+ phys_addr_t addr;
+
+ num_pages = atop(size);
+
+ for (i = 0; i < num_pages; i++, offset += PAGE_SIZE) {
+
+ addr = (*map_fn)(map_fn_data, offset);
+ if (addr == vm_page_fictitious_addr)
+ return KERN_NO_ACCESS;
+
+ while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
+ vm_page_more_fictitious();
+
+ vm_object_lock(object);
+ if ((old_page = vm_page_lookup(object, offset))
+ != VM_PAGE_NULL)
+ {
+ VM_PAGE_FREE(old_page);
+ }
+
+ vm_page_init(m);
+ m->phys_addr = addr;
+ m->private = TRUE; /* don`t free page */
+ m->wire_count = 1;
+ vm_page_lock_queues();
+ vm_page_insert(m, object, offset);
+ vm_page_unlock_queues();
+
+ PAGE_WAKEUP_DONE(m);
+ vm_object_unlock(object);
+ }
+ return KERN_SUCCESS;
+}
+
+
+#if MACH_KDB
+#include <vm/vm_print.h>
+#define printf kdbprintf
+
+boolean_t vm_object_print_pages = FALSE;
+
+/*
+ * vm_object_print: [ debug ]
+ */
+void vm_object_print(
+ vm_object_t object)
+{
+ vm_page_t p;
+
+ int count;
+
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ iprintf("Object 0x%X: size=0x%X, %d references",
+ (vm_offset_t) object, (vm_offset_t) object->size,
+ object->ref_count);
+ printf("\n");
+ iprintf("%lu resident pages,", object->resident_page_count);
+ printf(" %d absent pages,", object->absent_count);
+ printf(" %d paging ops\n", object->paging_in_progress);
+ indent += 1;
+ iprintf("memory object=0x%X (offset=0x%X),",
+ (vm_offset_t) object->pager, (vm_offset_t) object->paging_offset);
+ printf("control=0x%X, name=0x%X\n",
+ (vm_offset_t) object->pager_request, (vm_offset_t) object->pager_name);
+ iprintf("%s%s",
+ object->pager_ready ? " ready" : "",
+ object->pager_created ? " created" : "");
+ printf("%s,%s ",
+ object->pager_initialized ? "" : "uninitialized",
+ object->temporary ? "temporary" : "permanent");
+ printf("%s%s,",
+ object->internal ? "internal" : "external",
+ object->can_persist ? " cacheable" : "");
+ printf("copy_strategy=%d\n", (vm_offset_t)object->copy_strategy);
+ iprintf("shadow=0x%X (offset=0x%X),",
+ (vm_offset_t) object->shadow, (vm_offset_t) object->shadow_offset);
+ printf("copy=0x%X\n", (vm_offset_t) object->copy);
+
+ indent += 1;
+
+ if (vm_object_print_pages) {
+ count = 0;
+ p = (vm_page_t) queue_first(&object->memq);
+ while (!queue_end(&object->memq, (queue_entry_t) p)) {
+ if (count == 0) iprintf("memory:=");
+ else if (count == 4) {printf("\n"); iprintf(" ..."); count = 0;}
+ else printf(",");
+ count++;
+
+ printf("(off=0x%X,page=0x%X)", p->offset, (vm_offset_t) p);
+ p = (vm_page_t) queue_next(&p->listq);
+ }
+ if (count != 0)
+ printf("\n");
+ }
+ indent -= 2;
+}
+
+#endif /* MACH_KDB */
diff --git a/vm/vm_object.h b/vm/vm_object.h
new file mode 100644
index 0000000..9c17541
--- /dev/null
+++ b/vm/vm_object.h
@@ -0,0 +1,415 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_object.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Virtual memory object module definitions.
+ */
+
+#ifndef _VM_VM_OBJECT_H_
+#define _VM_VM_OBJECT_H_
+
+#include <sys/types.h>
+#include <mach/kern_return.h>
+#include <mach/boolean.h>
+#include <mach/memory_object.h>
+#include <mach/port.h>
+#include <mach/vm_prot.h>
+#include <mach/machine/vm_types.h>
+#include <kern/queue.h>
+#include <kern/lock.h>
+#include <kern/assert.h>
+#include <kern/debug.h>
+#include <kern/macros.h>
+#include <vm/pmap.h>
+#include <ipc/ipc_types.h>
+
+#if MACH_PAGEMAP
+#include <vm/vm_external.h>
+#endif /* MACH_PAGEMAP */
+
+typedef struct ipc_port * pager_request_t;
+#define PAGER_REQUEST_NULL ((pager_request_t) 0)
+
+/*
+ * We use "struct ipc_port *" instead of "ipc_port_t"
+ * to avoid include file circularities.
+ */
+
+struct vm_object {
+ queue_head_t memq; /* Resident memory */
+ decl_simple_lock_data(, Lock) /* Synchronization */
+#if VM_OBJECT_DEBUG
+ thread_t LockHolder; /* Thread holding Lock */
+#endif /* VM_OBJECT_DEBUG */
+ vm_size_t size; /* Object size (only valid
+ * if internal)
+ */
+
+ int ref_count; /* Number of references */
+ unsigned long resident_page_count;
+ /* number of resident pages */
+
+ struct vm_object *copy; /* Object that should receive
+ * a copy of my changed pages
+ */
+ struct vm_object *shadow; /* My shadow */
+ vm_offset_t shadow_offset; /* Offset into shadow */
+
+ struct ipc_port *pager; /* Where to get data */
+ vm_offset_t paging_offset; /* Offset into memory object */
+ pager_request_t pager_request; /* Where data comes back */
+ struct ipc_port *pager_name; /* How to identify region */
+
+ memory_object_copy_strategy_t
+ copy_strategy; /* How to handle data copy */
+
+ unsigned int
+ absent_count; /* The number of pages that
+ * have been requested but
+ * not filled. That is, the
+ * number of pages for which
+ * the "absent" attribute is
+ * asserted.
+ */
+
+ unsigned int /* boolean_t array */
+ all_wanted; /* Bit array of "want to be
+ * awakened" notations. See
+ * VM_OBJECT_EVENT_* items
+ * below
+ */
+
+ unsigned int
+ paging_in_progress:16,
+ /* The memory object ports are
+ * being used (e.g., for pagein
+ * or pageout) -- don't change any
+ * of these fields (i.e., don't
+ * collapse, destroy or terminate)
+ */
+ /* boolean_t */ used_for_pageout:1,/* The object carries data sent to
+ * a memory manager, which signals
+ * it's done by releasing memory.
+ * This flag prevents coalescing so
+ * that unmapping memory immediately
+ * results in object termination.
+ */
+ /* boolean_t */ pager_created:1,/* Has pager ever been created? */
+ /* boolean_t */ pager_initialized:1,/* Are fields ready to use? */
+ /* boolean_t */ pager_ready:1, /* Will manager take requests? */
+
+ /* boolean_t */ can_persist:1, /* The kernel may keep the data
+ * for this object (and rights to
+ * the memory object) after all
+ * address map references are
+ * deallocated?
+ */
+ /* boolean_t */ internal:1, /* Created by the kernel (and
+ * therefore, managed by the
+ * default memory manger)
+ */
+ /* boolean_t */ temporary:1, /* Permanent objects may be changed
+ * externally by the memory manager,
+ * and changes made in memory must
+ * be reflected back to the memory
+ * manager. Temporary objects lack
+ * both of these characteristics.
+ */
+ /* boolean_t */ alive:1, /* Not yet terminated (debug) */
+ /* boolean_t */ lock_in_progress : 1,
+ /* Is a multi-page lock
+ * request in progress?
+ */
+ /* boolean_t */ lock_restart : 1,
+ /* Should lock request in
+ * progress restart search?
+ */
+ /* boolean_t */ use_shared_copy : 1,/* Use shared (i.e.,
+ * delayed) copy on write */
+ /* boolean_t */ shadowed: 1, /* Shadow may exist */
+
+ /* boolean_t */ cached: 1; /* Object is cached */
+ queue_chain_t cached_list; /* Attachment point for the list
+ * of objects cached as a result
+ * of their can_persist value
+ */
+ vm_offset_t last_alloc; /* last allocation offset */
+#if MACH_PAGEMAP
+ vm_external_t existence_info;
+#endif /* MACH_PAGEMAP */
+};
+
+extern
+vm_object_t kernel_object; /* the single kernel object */
+
+/*
+ * Declare procedures that operate on VM objects.
+ */
+
+extern void vm_object_bootstrap(void);
+extern void vm_object_init(void);
+extern void vm_object_collect(vm_object_t);
+extern void vm_object_terminate(vm_object_t);
+extern vm_object_t vm_object_allocate(vm_size_t);
+extern void vm_object_reference(vm_object_t);
+extern void vm_object_deallocate(vm_object_t);
+extern void vm_object_pmap_protect(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ pmap_t pmap,
+ vm_offset_t pmap_start,
+ vm_prot_t prot);
+extern void vm_object_pmap_remove(
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end);
+extern void vm_object_page_remove(
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end);
+extern void vm_object_shadow(
+ vm_object_t *object, /* in/out */
+ vm_offset_t *offset, /* in/out */
+ vm_size_t length);
+extern void vm_object_collapse(vm_object_t);
+extern vm_object_t vm_object_lookup(struct ipc_port *);
+extern vm_object_t vm_object_lookup_name(struct ipc_port *);
+extern struct ipc_port *vm_object_name(vm_object_t);
+extern void vm_object_remove(vm_object_t);
+
+extern boolean_t vm_object_copy_temporary(
+ vm_object_t *_object, /* in/out */
+ vm_offset_t *_offset, /* in/out */
+ boolean_t *_src_needs_copy, /* out */
+ boolean_t *_dst_needs_copy); /* out */
+extern kern_return_t vm_object_copy_strategically(
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t size,
+ vm_object_t *dst_object, /* out */
+ vm_offset_t *dst_offset, /* out */
+ boolean_t *dst_needs_copy); /* out */
+extern kern_return_t vm_object_copy_slowly(
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t size,
+ boolean_t interruptible,
+ vm_object_t *_result_object); /* out */
+
+extern vm_object_t vm_object_enter(
+ struct ipc_port *pager,
+ vm_size_t size,
+ boolean_t internal);
+extern void vm_object_pager_create(
+ vm_object_t object);
+extern void vm_object_destroy(
+ struct ipc_port *pager);
+
+extern kern_return_t vm_object_page_map(
+ vm_object_t,
+ vm_offset_t,
+ vm_size_t,
+ phys_addr_t (*)(void *, vm_offset_t),
+ void *);
+
+extern vm_object_t vm_object_request_object(struct ipc_port *);
+
+extern boolean_t vm_object_coalesce(
+ vm_object_t prev_object,
+ vm_object_t next_object,
+ vm_offset_t prev_offset,
+ vm_offset_t next_offset,
+ vm_size_t prev_size,
+ vm_size_t next_size,
+ vm_object_t *new_object, /* OUT */
+ vm_offset_t *new_offset); /* OUT */
+
+extern void vm_object_pager_wakeup(ipc_port_t pager);
+
+void memory_object_release(
+ ipc_port_t pager,
+ pager_request_t pager_request,
+ ipc_port_t pager_name);
+
+void vm_object_deactivate_pages(vm_object_t);
+
+vm_object_t vm_object_copy_delayed(
+ vm_object_t src_object);
+
+/*
+ * Event waiting handling
+ */
+
+#define VM_OBJECT_EVENT_INITIALIZED 0
+#define VM_OBJECT_EVENT_PAGER_READY 1
+#define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
+#define VM_OBJECT_EVENT_ABSENT_COUNT 3
+#define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
+
+#define vm_object_wait(object, event, interruptible) \
+ MACRO_BEGIN \
+ (object)->all_wanted |= 1 << (event); \
+ vm_object_sleep(((vm_offset_t) object) + (event), \
+ (object), \
+ (interruptible)); \
+ MACRO_END
+
+#define vm_object_assert_wait(object, event, interruptible) \
+ MACRO_BEGIN \
+ (object)->all_wanted |= 1 << (event); \
+ assert_wait((event_t)(((vm_offset_t) object) + (event)), (interruptible)); \
+ MACRO_END
+
+#define vm_object_wakeup(object, event) \
+ MACRO_BEGIN \
+ if ((object)->all_wanted & (1 << (event))) \
+ thread_wakeup((event_t)(((vm_offset_t) object) + (event))); \
+ (object)->all_wanted &= ~(1 << (event)); \
+ MACRO_END
+
+/*
+ * Routines implemented as macros
+ */
+
+#define vm_object_collectable(object) \
+ (((object)->ref_count == 0) \
+ && ((object)->resident_page_count == 0))
+
+#define vm_object_paging_begin(object) \
+ ((object)->paging_in_progress++)
+
+#define vm_object_paging_end(object) \
+ MACRO_BEGIN \
+ assert((object)->paging_in_progress != 0); \
+ if (--(object)->paging_in_progress == 0) { \
+ vm_object_wakeup(object, \
+ VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
+ } \
+ MACRO_END
+
+#define vm_object_paging_wait(object, interruptible) \
+ MACRO_BEGIN \
+ while ((object)->paging_in_progress != 0) { \
+ vm_object_wait( (object), \
+ VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
+ (interruptible)); \
+ vm_object_lock(object); \
+ \
+ /*XXX if ((interruptible) && */ \
+ /*XXX (current_thread()->wait_result != THREAD_AWAKENED))*/ \
+ /*XXX break; */ \
+ } \
+ MACRO_END
+
+#define vm_object_absent_assert_wait(object, interruptible) \
+ MACRO_BEGIN \
+ vm_object_assert_wait( (object), \
+ VM_OBJECT_EVENT_ABSENT_COUNT, \
+ (interruptible)); \
+ MACRO_END
+
+
+#define vm_object_absent_release(object) \
+ MACRO_BEGIN \
+ (object)->absent_count--; \
+ vm_object_wakeup((object), \
+ VM_OBJECT_EVENT_ABSENT_COUNT); \
+ MACRO_END
+
+/*
+ * Object locking macros (with and without debugging)
+ */
+
+#if VM_OBJECT_DEBUG
+#define vm_object_lock_init(object) \
+MACRO_BEGIN \
+ simple_lock_init(&(object)->Lock); \
+ (object)->LockHolder = 0; \
+MACRO_END
+#define vm_object_lock(object) \
+MACRO_BEGIN \
+ simple_lock(&(object)->Lock); \
+ (object)->LockHolder = current_thread(); \
+MACRO_END
+#define vm_object_unlock(object) \
+MACRO_BEGIN \
+ if ((object)->LockHolder != current_thread()) \
+ panic("vm_object_unlock 0x%x", (object)); \
+ (object)->LockHolder = 0; \
+ simple_unlock(&(object)->Lock); \
+MACRO_END
+#define vm_object_lock_try(object) \
+ (simple_lock_try(&(object)->Lock) \
+ ? ( ((object)->LockHolder = current_thread()) , TRUE) \
+ : FALSE)
+#define vm_object_sleep(event, object, interruptible) \
+MACRO_BEGIN \
+ if ((object)->LockHolder != current_thread()) \
+ panic("vm_object_sleep %#x", (object)); \
+ (object)->LockHolder = 0; \
+ thread_sleep((event_t)(event), simple_lock_addr((object)->Lock), \
+ (interruptible)); \
+MACRO_END
+#define vm_object_lock_taken(object) \
+ ((object)->LockHolder == current_thread())
+#else /* VM_OBJECT_DEBUG */
+#define vm_object_lock_init(object) simple_lock_init(&(object)->Lock)
+#define vm_object_lock(object) simple_lock(&(object)->Lock)
+#define vm_object_unlock(object) simple_unlock(&(object)->Lock)
+#define vm_object_lock_try(object) simple_lock_try(&(object)->Lock)
+#define vm_object_sleep(event, object, interruptible) \
+ thread_sleep((event_t)(event), simple_lock_addr((object)->Lock), \
+ (interruptible))
+#define vm_object_lock_taken(object) simple_lock_taken(&(object)->Lock)
+#endif /* VM_OBJECT_DEBUG */
+
+/*
+ * Page cache accounting.
+ *
+ * The page queues must be locked when changing these counters.
+ */
+extern int vm_object_external_count;
+extern int vm_object_external_pages;
+
+/* Add a reference to a locked VM object. */
+static inline int
+vm_object_reference_locked (vm_object_t obj)
+{
+ return (++obj->ref_count);
+}
+
+/* Remove a reference from a locked VM object. */
+static inline int
+vm_object_unreference_locked (vm_object_t obj)
+{
+ return (--obj->ref_count);
+}
+
+#endif /* _VM_VM_OBJECT_H_ */
diff --git a/vm/vm_page.c b/vm/vm_page.c
new file mode 100644
index 0000000..04decbb
--- /dev/null
+++ b/vm/vm_page.c
@@ -0,0 +1,2164 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This implementation uses the binary buddy system to manage its heap.
+ * Descriptions of the buddy system can be found in the following works :
+ * - "UNIX Internals: The New Frontiers", by Uresh Vahalia.
+ * - "Dynamic Storage Allocation: A Survey and Critical Review",
+ * by Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles.
+ *
+ * In addition, this allocator uses per-CPU pools of pages for order 0
+ * (i.e. single page) allocations. These pools act as caches (but are named
+ * differently to avoid confusion with CPU caches) that reduce contention on
+ * multiprocessor systems. When a pool is empty and cannot provide a page,
+ * it is filled by transferring multiple pages from the backend buddy system.
+ * The symmetric case is handled likewise.
+ *
+ * TODO Limit number of dirty pages, block allocations above a top limit.
+ */
+
+#include <string.h>
+#include <kern/assert.h>
+#include <kern/counters.h>
+#include <kern/cpu_number.h>
+#include <kern/debug.h>
+#include <kern/list.h>
+#include <kern/lock.h>
+#include <kern/macros.h>
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <mach/vm_param.h>
+#include <machine/pmap.h>
+#include <sys/types.h>
+#include <vm/memory_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+
+#define DEBUG 0
+
+#define __init
+#define __initdata
+#define __read_mostly
+
+#define thread_pin()
+#define thread_unpin()
+
+/*
+ * Number of free block lists per segment.
+ */
+#define VM_PAGE_NR_FREE_LISTS 11
+
+/*
+ * The size of a CPU pool is computed by dividing the number of pages in its
+ * containing segment by this value.
+ */
+#define VM_PAGE_CPU_POOL_RATIO 1024
+
+/*
+ * Maximum number of pages in a CPU pool.
+ */
+#define VM_PAGE_CPU_POOL_MAX_SIZE 128
+
+/*
+ * The transfer size of a CPU pool is computed by dividing the pool size by
+ * this value.
+ */
+#define VM_PAGE_CPU_POOL_TRANSFER_RATIO 2
+
+/*
+ * Per-processor cache of pages.
+ */
+struct vm_page_cpu_pool {
+ simple_lock_data_t lock;
+ int size;
+ int transfer_size;
+ int nr_pages;
+ struct list pages;
+} __aligned(CPU_L1_SIZE);
+
+/*
+ * Special order value for pages that aren't in a free list. Such pages are
+ * either allocated, or part of a free block of pages but not the head page.
+ */
+#define VM_PAGE_ORDER_UNLISTED (VM_PAGE_NR_FREE_LISTS + 1)
+
+/*
+ * Doubly-linked list of free blocks.
+ */
+struct vm_page_free_list {
+ unsigned long size;
+ struct list blocks;
+};
+
+/*
+ * XXX Because of a potential deadlock involving the default pager (see
+ * vm_map_lock()), it's currently impossible to reliably determine the
+ * minimum number of free pages required for successful pageout. Since
+ * that process is dependent on the amount of physical memory, we scale
+ * the minimum number of free pages from it, in the hope that memory
+ * exhaustion happens as rarely as possible...
+ */
+
+/*
+ * Ratio used to compute the minimum number of pages in a segment.
+ */
+#define VM_PAGE_SEG_THRESHOLD_MIN_NUM 5
+#define VM_PAGE_SEG_THRESHOLD_MIN_DENOM 100
+
+/*
+ * Number of pages reserved for privileged allocations in a segment.
+ */
+#define VM_PAGE_SEG_THRESHOLD_MIN 500
+
+/*
+ * Ratio used to compute the threshold below which pageout is started.
+ */
+#define VM_PAGE_SEG_THRESHOLD_LOW_NUM 6
+#define VM_PAGE_SEG_THRESHOLD_LOW_DENOM 100
+
+/*
+ * Minimum value the low threshold can have for a segment.
+ */
+#define VM_PAGE_SEG_THRESHOLD_LOW 600
+
+#if VM_PAGE_SEG_THRESHOLD_LOW <= VM_PAGE_SEG_THRESHOLD_MIN
+#error VM_PAGE_SEG_THRESHOLD_LOW invalid
+#endif /* VM_PAGE_SEG_THRESHOLD_LOW >= VM_PAGE_SEG_THRESHOLD_MIN */
+
+/*
+ * Ratio used to compute the threshold above which pageout is stopped.
+ */
+#define VM_PAGE_SEG_THRESHOLD_HIGH_NUM 10
+#define VM_PAGE_SEG_THRESHOLD_HIGH_DENOM 100
+
+/*
+ * Minimum value the high threshold can have for a segment.
+ */
+#define VM_PAGE_SEG_THRESHOLD_HIGH 1000
+
+#if VM_PAGE_SEG_THRESHOLD_HIGH <= VM_PAGE_SEG_THRESHOLD_LOW
+#error VM_PAGE_SEG_THRESHOLD_HIGH invalid
+#endif /* VM_PAGE_SEG_THRESHOLD_HIGH <= VM_PAGE_SEG_THRESHOLD_LOW */
+
+/*
+ * Minimum number of pages allowed for a segment.
+ */
+#define VM_PAGE_SEG_MIN_PAGES 2000
+
+#if VM_PAGE_SEG_MIN_PAGES <= VM_PAGE_SEG_THRESHOLD_HIGH
+#error VM_PAGE_SEG_MIN_PAGES invalid
+#endif /* VM_PAGE_SEG_MIN_PAGES <= VM_PAGE_SEG_THRESHOLD_HIGH */
+
+/*
+ * Ratio used to compute the threshold of active pages beyond which
+ * to refill the inactive queue.
+ */
+#define VM_PAGE_HIGH_ACTIVE_PAGE_NUM 1
+#define VM_PAGE_HIGH_ACTIVE_PAGE_DENOM 3
+
+/*
+ * Page cache queue.
+ *
+ * XXX The current implementation hardcodes a preference to evict external
+ * pages first and keep internal ones as much as possible. This is because
+ * the Hurd default pager implementation suffers from bugs that can easily
+ * cause the system to freeze.
+ */
+struct vm_page_queue {
+ struct list internal_pages;
+ struct list external_pages;
+};
+
+/*
+ * Segment name buffer size.
+ */
+#define VM_PAGE_NAME_SIZE 16
+
+/*
+ * Segment of contiguous memory.
+ *
+ * XXX Per-segment locking is probably useless, since one or both of the
+ * page queues lock and the free page queue lock is held on any access.
+ * However it should first be made clear which lock protects access to
+ * which members of a segment.
+ */
+struct vm_page_seg {
+ struct vm_page_cpu_pool cpu_pools[NCPUS];
+
+ phys_addr_t start;
+ phys_addr_t end;
+ struct vm_page *pages;
+ struct vm_page *pages_end;
+ simple_lock_data_t lock;
+ struct vm_page_free_list free_lists[VM_PAGE_NR_FREE_LISTS];
+ unsigned long nr_free_pages;
+
+ /* Free memory thresholds */
+ unsigned long min_free_pages; /* Privileged allocations only */
+ unsigned long low_free_pages; /* Pageout daemon starts scanning */
+ unsigned long high_free_pages; /* Pageout daemon stops scanning,
+ unprivileged allocations resume */
+
+ /* Page cache related data */
+ struct vm_page_queue active_pages;
+ unsigned long nr_active_pages;
+ unsigned long high_active_pages;
+ struct vm_page_queue inactive_pages;
+ unsigned long nr_inactive_pages;
+};
+
+/*
+ * Bootstrap information about a segment.
+ */
+struct vm_page_boot_seg {
+ phys_addr_t start;
+ phys_addr_t end;
+ boolean_t heap_present;
+ phys_addr_t avail_start;
+ phys_addr_t avail_end;
+};
+
+static int vm_page_is_ready __read_mostly;
+
+/*
+ * Segment table.
+ *
+ * The system supports a maximum of 4 segments :
+ * - DMA: suitable for DMA
+ * - DMA32: suitable for DMA when devices support 32-bits addressing
+ * - DIRECTMAP: direct physical mapping, allows direct access from
+ * the kernel with a simple offset translation
+ * - HIGHMEM: must be mapped before it can be accessed
+ *
+ * Segments are ordered by priority, 0 being the lowest priority. Their
+ * relative priorities are DMA < DMA32 < DIRECTMAP < HIGHMEM or
+ * DMA < DIRECTMAP < DMA32 < HIGHMEM.
+ * Some segments may actually be aliases for others, e.g. if DMA is always
+ * possible from the direct physical mapping, DMA and DMA32 are aliases for
+ * DIRECTMAP, in which case the segment table contains DIRECTMAP and HIGHMEM
+ * only.
+ */
+static struct vm_page_seg vm_page_segs[VM_PAGE_MAX_SEGS];
+
+/*
+ * Bootstrap segment table.
+ */
+static struct vm_page_boot_seg vm_page_boot_segs[VM_PAGE_MAX_SEGS] __initdata;
+
+/*
+ * Number of loaded segments.
+ */
+static unsigned int vm_page_segs_size __read_mostly;
+
+/*
+ * If true, unprivileged allocations are blocked, disregarding any other
+ * condition.
+ *
+ * This variable is also used to resume clients once pages are available.
+ *
+ * The free page queue lock must be held when accessing this variable.
+ */
+static boolean_t vm_page_alloc_paused;
+
+static void __init
+vm_page_init_pa(struct vm_page *page, unsigned short seg_index, phys_addr_t pa)
+{
+ memset(page, 0, sizeof(*page));
+ vm_page_init(page); /* vm_resident members */
+ page->type = VM_PT_RESERVED;
+ page->seg_index = seg_index;
+ page->order = VM_PAGE_ORDER_UNLISTED;
+ page->priv = NULL;
+ page->phys_addr = pa;
+}
+
+void
+vm_page_set_type(struct vm_page *page, unsigned int order, unsigned short type)
+{
+ unsigned int i, nr_pages;
+
+ nr_pages = 1 << order;
+
+ for (i = 0; i < nr_pages; i++)
+ page[i].type = type;
+}
+
+static boolean_t
+vm_page_pageable(const struct vm_page *page)
+{
+ return (page->object != NULL)
+ && (page->wire_count == 0)
+ && (page->active || page->inactive);
+}
+
+static boolean_t
+vm_page_can_move(const struct vm_page *page)
+{
+ /*
+ * This function is called on pages pulled from the page queues,
+ * implying they're pageable, which is why the wire count isn't
+ * checked here.
+ */
+
+ return !page->busy
+ && !page->wanted
+ && !page->absent
+ && page->object->alive;
+}
+
+static void
+vm_page_remove_mappings(struct vm_page *page)
+{
+ page->busy = TRUE;
+ pmap_page_protect(page->phys_addr, VM_PROT_NONE);
+
+ if (!page->dirty) {
+ page->dirty = pmap_is_modified(page->phys_addr);
+ }
+}
+
+static void __init
+vm_page_free_list_init(struct vm_page_free_list *free_list)
+{
+ free_list->size = 0;
+ list_init(&free_list->blocks);
+}
+
+static inline void
+vm_page_free_list_insert(struct vm_page_free_list *free_list,
+ struct vm_page *page)
+{
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+
+ free_list->size++;
+ list_insert_head(&free_list->blocks, &page->node);
+}
+
+static inline void
+vm_page_free_list_remove(struct vm_page_free_list *free_list,
+ struct vm_page *page)
+{
+ assert(page->order != VM_PAGE_ORDER_UNLISTED);
+
+ free_list->size--;
+ list_remove(&page->node);
+}
+
+static struct vm_page *
+vm_page_seg_alloc_from_buddy(struct vm_page_seg *seg, unsigned int order)
+{
+ struct vm_page_free_list *free_list = free_list;
+ struct vm_page *page, *buddy;
+ unsigned int i;
+
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ if (vm_page_alloc_paused && current_thread()
+ && !current_thread()->vm_privilege) {
+ return NULL;
+ } else if (seg->nr_free_pages <= seg->low_free_pages) {
+ vm_pageout_start();
+
+ if ((seg->nr_free_pages <= seg->min_free_pages)
+ && current_thread() && !current_thread()->vm_privilege) {
+ vm_page_alloc_paused = TRUE;
+ return NULL;
+ }
+ }
+
+ for (i = order; i < VM_PAGE_NR_FREE_LISTS; i++) {
+ free_list = &seg->free_lists[i];
+
+ if (free_list->size != 0)
+ break;
+ }
+
+ if (i == VM_PAGE_NR_FREE_LISTS)
+ return NULL;
+
+ page = list_first_entry(&free_list->blocks, struct vm_page, node);
+ vm_page_free_list_remove(free_list, page);
+ page->order = VM_PAGE_ORDER_UNLISTED;
+
+ while (i > order) {
+ i--;
+ buddy = &page[1 << i];
+ vm_page_free_list_insert(&seg->free_lists[i], buddy);
+ buddy->order = i;
+ }
+
+ seg->nr_free_pages -= (1 << order);
+
+ if (seg->nr_free_pages < seg->min_free_pages) {
+ vm_page_alloc_paused = TRUE;
+ }
+
+ return page;
+}
+
+static void
+vm_page_seg_free_to_buddy(struct vm_page_seg *seg, struct vm_page *page,
+ unsigned int order)
+{
+ struct vm_page *buddy;
+ phys_addr_t pa, buddy_pa;
+ unsigned int nr_pages;
+
+ assert(page >= seg->pages);
+ assert(page < seg->pages_end);
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ nr_pages = (1 << order);
+ pa = page->phys_addr;
+
+ while (order < (VM_PAGE_NR_FREE_LISTS - 1)) {
+ buddy_pa = pa ^ vm_page_ptoa(1ULL << order);
+
+ if ((buddy_pa < seg->start) || (buddy_pa >= seg->end))
+ break;
+
+ buddy = &seg->pages[vm_page_atop(buddy_pa - seg->start)];
+
+ if (buddy->order != order)
+ break;
+
+ vm_page_free_list_remove(&seg->free_lists[order], buddy);
+ buddy->order = VM_PAGE_ORDER_UNLISTED;
+ order++;
+ pa &= -vm_page_ptoa(1ULL << order);
+ page = &seg->pages[vm_page_atop(pa - seg->start)];
+ }
+
+ vm_page_free_list_insert(&seg->free_lists[order], page);
+ page->order = order;
+ seg->nr_free_pages += nr_pages;
+}
+
+static void __init
+vm_page_cpu_pool_init(struct vm_page_cpu_pool *cpu_pool, int size)
+{
+ simple_lock_init(&cpu_pool->lock);
+ cpu_pool->size = size;
+ cpu_pool->transfer_size = (size + VM_PAGE_CPU_POOL_TRANSFER_RATIO - 1)
+ / VM_PAGE_CPU_POOL_TRANSFER_RATIO;
+ cpu_pool->nr_pages = 0;
+ list_init(&cpu_pool->pages);
+}
+
+static inline struct vm_page_cpu_pool *
+vm_page_cpu_pool_get(struct vm_page_seg *seg)
+{
+ return &seg->cpu_pools[cpu_number()];
+}
+
+static inline struct vm_page *
+vm_page_cpu_pool_pop(struct vm_page_cpu_pool *cpu_pool)
+{
+ struct vm_page *page;
+
+ assert(cpu_pool->nr_pages != 0);
+ cpu_pool->nr_pages--;
+ page = list_first_entry(&cpu_pool->pages, struct vm_page, node);
+ list_remove(&page->node);
+ return page;
+}
+
+static inline void
+vm_page_cpu_pool_push(struct vm_page_cpu_pool *cpu_pool, struct vm_page *page)
+{
+ assert(cpu_pool->nr_pages < cpu_pool->size);
+ cpu_pool->nr_pages++;
+ list_insert_head(&cpu_pool->pages, &page->node);
+}
+
+static int
+vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool,
+ struct vm_page_seg *seg)
+{
+ struct vm_page *page;
+ int i;
+
+ assert(cpu_pool->nr_pages == 0);
+
+ simple_lock(&seg->lock);
+
+ for (i = 0; i < cpu_pool->transfer_size; i++) {
+ page = vm_page_seg_alloc_from_buddy(seg, 0);
+
+ if (page == NULL)
+ break;
+
+ vm_page_cpu_pool_push(cpu_pool, page);
+ }
+
+ simple_unlock(&seg->lock);
+
+ return i;
+}
+
+static void
+vm_page_cpu_pool_drain(struct vm_page_cpu_pool *cpu_pool,
+ struct vm_page_seg *seg)
+{
+ struct vm_page *page;
+ int i;
+
+ assert(cpu_pool->nr_pages == cpu_pool->size);
+
+ simple_lock(&seg->lock);
+
+ for (i = cpu_pool->transfer_size; i > 0; i--) {
+ page = vm_page_cpu_pool_pop(cpu_pool);
+ vm_page_seg_free_to_buddy(seg, page, 0);
+ }
+
+ simple_unlock(&seg->lock);
+}
+
+static void
+vm_page_queue_init(struct vm_page_queue *queue)
+{
+ list_init(&queue->internal_pages);
+ list_init(&queue->external_pages);
+}
+
+static void
+vm_page_queue_push(struct vm_page_queue *queue, struct vm_page *page)
+{
+ if (page->external) {
+ list_insert_tail(&queue->external_pages, &page->node);
+ } else {
+ list_insert_tail(&queue->internal_pages, &page->node);
+ }
+}
+
+static void
+vm_page_queue_remove(struct vm_page_queue *queue, struct vm_page *page)
+{
+ (void)queue;
+ list_remove(&page->node);
+}
+
+static struct vm_page *
+vm_page_queue_first(struct vm_page_queue *queue, boolean_t external_only)
+{
+ struct vm_page *page;
+
+ if (!list_empty(&queue->external_pages)) {
+ page = list_first_entry(&queue->external_pages, struct vm_page, node);
+ return page;
+ }
+
+ if (!external_only && !list_empty(&queue->internal_pages)) {
+ page = list_first_entry(&queue->internal_pages, struct vm_page, node);
+ return page;
+ }
+
+ return NULL;
+}
+
+static struct vm_page_seg *
+vm_page_seg_get(unsigned short index)
+{
+ assert(index < vm_page_segs_size);
+ return &vm_page_segs[index];
+}
+
+static unsigned int
+vm_page_seg_index(const struct vm_page_seg *seg)
+{
+ unsigned int index;
+
+ index = seg - vm_page_segs;
+ assert(index < vm_page_segs_size);
+ return index;
+}
+
+static phys_addr_t __init
+vm_page_seg_size(struct vm_page_seg *seg)
+{
+ return seg->end - seg->start;
+}
+
+static int __init
+vm_page_seg_compute_pool_size(struct vm_page_seg *seg)
+{
+ phys_addr_t size;
+
+ size = vm_page_atop(vm_page_seg_size(seg)) / VM_PAGE_CPU_POOL_RATIO;
+
+ if (size == 0)
+ size = 1;
+ else if (size > VM_PAGE_CPU_POOL_MAX_SIZE)
+ size = VM_PAGE_CPU_POOL_MAX_SIZE;
+
+ return size;
+}
+
+static void __init
+vm_page_seg_compute_pageout_thresholds(struct vm_page_seg *seg)
+{
+ unsigned long nr_pages;
+
+ nr_pages = vm_page_atop(vm_page_seg_size(seg));
+
+ if (nr_pages < VM_PAGE_SEG_MIN_PAGES) {
+ panic("vm_page: segment too small");
+ }
+
+ seg->min_free_pages = nr_pages * VM_PAGE_SEG_THRESHOLD_MIN_NUM
+ / VM_PAGE_SEG_THRESHOLD_MIN_DENOM;
+
+ if (seg->min_free_pages < VM_PAGE_SEG_THRESHOLD_MIN) {
+ seg->min_free_pages = VM_PAGE_SEG_THRESHOLD_MIN;
+ }
+
+ seg->low_free_pages = nr_pages * VM_PAGE_SEG_THRESHOLD_LOW_NUM
+ / VM_PAGE_SEG_THRESHOLD_LOW_DENOM;
+
+ if (seg->low_free_pages < VM_PAGE_SEG_THRESHOLD_LOW) {
+ seg->low_free_pages = VM_PAGE_SEG_THRESHOLD_LOW;
+ }
+
+ seg->high_free_pages = nr_pages * VM_PAGE_SEG_THRESHOLD_HIGH_NUM
+ / VM_PAGE_SEG_THRESHOLD_HIGH_DENOM;
+
+ if (seg->high_free_pages < VM_PAGE_SEG_THRESHOLD_HIGH) {
+ seg->high_free_pages = VM_PAGE_SEG_THRESHOLD_HIGH;
+ }
+}
+
+static void __init
+vm_page_seg_init(struct vm_page_seg *seg, phys_addr_t start, phys_addr_t end,
+ struct vm_page *pages)
+{
+ phys_addr_t pa;
+ int pool_size;
+ unsigned int i;
+
+ seg->start = start;
+ seg->end = end;
+ pool_size = vm_page_seg_compute_pool_size(seg);
+
+ for (i = 0; i < ARRAY_SIZE(seg->cpu_pools); i++)
+ vm_page_cpu_pool_init(&seg->cpu_pools[i], pool_size);
+
+ seg->pages = pages;
+ seg->pages_end = pages + vm_page_atop(vm_page_seg_size(seg));
+ simple_lock_init(&seg->lock);
+
+ for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++)
+ vm_page_free_list_init(&seg->free_lists[i]);
+
+ seg->nr_free_pages = 0;
+
+ vm_page_seg_compute_pageout_thresholds(seg);
+
+ vm_page_queue_init(&seg->active_pages);
+ seg->nr_active_pages = 0;
+ vm_page_queue_init(&seg->inactive_pages);
+ seg->nr_inactive_pages = 0;
+
+ i = vm_page_seg_index(seg);
+
+ for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE)
+ vm_page_init_pa(&pages[vm_page_atop(pa - seg->start)], i, pa);
+}
+
+static struct vm_page *
+vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order,
+ unsigned short type)
+{
+ struct vm_page_cpu_pool *cpu_pool;
+ struct vm_page *page;
+ int filled;
+
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ if (order == 0) {
+ thread_pin();
+ cpu_pool = vm_page_cpu_pool_get(seg);
+ simple_lock(&cpu_pool->lock);
+
+ if (cpu_pool->nr_pages == 0) {
+ filled = vm_page_cpu_pool_fill(cpu_pool, seg);
+
+ if (!filled) {
+ simple_unlock(&cpu_pool->lock);
+ thread_unpin();
+ return NULL;
+ }
+ }
+
+ page = vm_page_cpu_pool_pop(cpu_pool);
+ simple_unlock(&cpu_pool->lock);
+ thread_unpin();
+ } else {
+ simple_lock(&seg->lock);
+ page = vm_page_seg_alloc_from_buddy(seg, order);
+ simple_unlock(&seg->lock);
+
+ if (page == NULL)
+ return NULL;
+ }
+
+ assert(page->type == VM_PT_FREE);
+ vm_page_set_type(page, order, type);
+ return page;
+}
+
+static void
+vm_page_seg_free(struct vm_page_seg *seg, struct vm_page *page,
+ unsigned int order)
+{
+ struct vm_page_cpu_pool *cpu_pool;
+
+ assert(page->type != VM_PT_FREE);
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ vm_page_set_type(page, order, VM_PT_FREE);
+
+ if (order == 0) {
+ thread_pin();
+ cpu_pool = vm_page_cpu_pool_get(seg);
+ simple_lock(&cpu_pool->lock);
+
+ if (cpu_pool->nr_pages == cpu_pool->size)
+ vm_page_cpu_pool_drain(cpu_pool, seg);
+
+ vm_page_cpu_pool_push(cpu_pool, page);
+ simple_unlock(&cpu_pool->lock);
+ thread_unpin();
+ } else {
+ simple_lock(&seg->lock);
+ vm_page_seg_free_to_buddy(seg, page, order);
+ simple_unlock(&seg->lock);
+ }
+}
+
+static void
+vm_page_seg_add_active_page(struct vm_page_seg *seg, struct vm_page *page)
+{
+ assert(page->object != NULL);
+ assert(page->seg_index == vm_page_seg_index(seg));
+ assert(page->type != VM_PT_FREE);
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+ assert(!page->free && !page->active && !page->inactive);
+ page->active = TRUE;
+ page->reference = TRUE;
+ vm_page_queue_push(&seg->active_pages, page);
+ seg->nr_active_pages++;
+ vm_page_active_count++;
+}
+
+static void
+vm_page_seg_remove_active_page(struct vm_page_seg *seg, struct vm_page *page)
+{
+ assert(page->object != NULL);
+ assert(page->seg_index == vm_page_seg_index(seg));
+ assert(page->type != VM_PT_FREE);
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+ assert(!page->free && page->active && !page->inactive);
+ page->active = FALSE;
+ vm_page_queue_remove(&seg->active_pages, page);
+ seg->nr_active_pages--;
+ vm_page_active_count--;
+}
+
+static void
+vm_page_seg_add_inactive_page(struct vm_page_seg *seg, struct vm_page *page)
+{
+ assert(page->object != NULL);
+ assert(page->seg_index == vm_page_seg_index(seg));
+ assert(page->type != VM_PT_FREE);
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+ assert(!page->free && !page->active && !page->inactive);
+ page->inactive = TRUE;
+ vm_page_queue_push(&seg->inactive_pages, page);
+ seg->nr_inactive_pages++;
+ vm_page_inactive_count++;
+}
+
+static void
+vm_page_seg_remove_inactive_page(struct vm_page_seg *seg, struct vm_page *page)
+{
+ assert(page->object != NULL);
+ assert(page->seg_index == vm_page_seg_index(seg));
+ assert(page->type != VM_PT_FREE);
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+ assert(!page->free && !page->active && page->inactive);
+ page->inactive = FALSE;
+ vm_page_queue_remove(&seg->inactive_pages, page);
+ seg->nr_inactive_pages--;
+ vm_page_inactive_count--;
+}
+
+/*
+ * Attempt to pull an active page.
+ *
+ * If successful, the object containing the page is locked.
+ */
+static struct vm_page *
+vm_page_seg_pull_active_page(struct vm_page_seg *seg, boolean_t external_only)
+{
+ struct vm_page *page, *first;
+ boolean_t locked;
+
+ first = NULL;
+
+ for (;;) {
+ page = vm_page_queue_first(&seg->active_pages, external_only);
+
+ if (page == NULL) {
+ break;
+ } else if (first == NULL) {
+ first = page;
+ } else if (first == page) {
+ break;
+ }
+
+ vm_page_seg_remove_active_page(seg, page);
+ locked = vm_object_lock_try(page->object);
+
+ if (!locked) {
+ vm_page_seg_add_active_page(seg, page);
+ continue;
+ }
+
+ if (!vm_page_can_move(page)) {
+ vm_page_seg_add_active_page(seg, page);
+ vm_object_unlock(page->object);
+ continue;
+ }
+
+ return page;
+ }
+
+ return NULL;
+}
+
+/*
+ * Attempt to pull an inactive page.
+ *
+ * If successful, the object containing the page is locked.
+ *
+ * XXX See vm_page_seg_pull_active_page (duplicated code).
+ */
+static struct vm_page *
+vm_page_seg_pull_inactive_page(struct vm_page_seg *seg, boolean_t external_only)
+{
+ struct vm_page *page, *first;
+ boolean_t locked;
+
+ first = NULL;
+
+ for (;;) {
+ page = vm_page_queue_first(&seg->inactive_pages, external_only);
+
+ if (page == NULL) {
+ break;
+ } else if (first == NULL) {
+ first = page;
+ } else if (first == page) {
+ break;
+ }
+
+ vm_page_seg_remove_inactive_page(seg, page);
+ locked = vm_object_lock_try(page->object);
+
+ if (!locked) {
+ vm_page_seg_add_inactive_page(seg, page);
+ continue;
+ }
+
+ if (!vm_page_can_move(page)) {
+ vm_page_seg_add_inactive_page(seg, page);
+ vm_object_unlock(page->object);
+ continue;
+ }
+
+ return page;
+ }
+
+ return NULL;
+}
+
+/*
+ * Attempt to pull a page cache page.
+ *
+ * If successful, the object containing the page is locked.
+ */
+static struct vm_page *
+vm_page_seg_pull_cache_page(struct vm_page_seg *seg,
+ boolean_t external_only,
+ boolean_t *was_active)
+{
+ struct vm_page *page;
+
+ page = vm_page_seg_pull_inactive_page(seg, external_only);
+
+ if (page != NULL) {
+ *was_active = FALSE;
+ return page;
+ }
+
+ page = vm_page_seg_pull_active_page(seg, external_only);
+
+ if (page != NULL) {
+ *was_active = TRUE;
+ return page;
+ }
+
+ return NULL;
+}
+
+static boolean_t
+vm_page_seg_page_available(const struct vm_page_seg *seg)
+{
+ return (seg->nr_free_pages > seg->high_free_pages);
+}
+
+static boolean_t
+vm_page_seg_usable(const struct vm_page_seg *seg)
+{
+ if ((seg->nr_active_pages + seg->nr_inactive_pages) == 0) {
+ /* Nothing to page out, assume segment is usable */
+ return TRUE;
+ }
+
+ return (seg->nr_free_pages >= seg->high_free_pages);
+}
+
+static void
+vm_page_seg_double_lock(struct vm_page_seg *seg1, struct vm_page_seg *seg2)
+{
+ assert(seg1 != seg2);
+
+ if (seg1 < seg2) {
+ simple_lock(&seg1->lock);
+ simple_lock(&seg2->lock);
+ } else {
+ simple_lock(&seg2->lock);
+ simple_lock(&seg1->lock);
+ }
+}
+
+static void
+vm_page_seg_double_unlock(struct vm_page_seg *seg1, struct vm_page_seg *seg2)
+{
+ simple_unlock(&seg1->lock);
+ simple_unlock(&seg2->lock);
+}
+
+/*
+ * Attempt to balance a segment by moving one page to another segment.
+ *
+ * Return TRUE if a page was actually moved.
+ */
+static boolean_t
+vm_page_seg_balance_page(struct vm_page_seg *seg,
+ struct vm_page_seg *remote_seg)
+{
+ struct vm_page *src, *dest;
+ vm_object_t object;
+ vm_offset_t offset;
+ boolean_t was_active;
+
+ vm_page_lock_queues();
+ simple_lock(&vm_page_queue_free_lock);
+ vm_page_seg_double_lock(seg, remote_seg);
+
+ if (vm_page_seg_usable(seg)
+ || !vm_page_seg_page_available(remote_seg)) {
+ goto error;
+ }
+
+ src = vm_page_seg_pull_cache_page(seg, FALSE, &was_active);
+
+ if (src == NULL) {
+ goto error;
+ }
+
+ assert(src->object != NULL);
+ assert(!src->fictitious && !src->private);
+ assert(src->wire_count == 0);
+ assert(src->type != VM_PT_FREE);
+ assert(src->order == VM_PAGE_ORDER_UNLISTED);
+
+ dest = vm_page_seg_alloc_from_buddy(remote_seg, 0);
+ assert(dest != NULL);
+
+ vm_page_seg_double_unlock(seg, remote_seg);
+ simple_unlock(&vm_page_queue_free_lock);
+
+ if (!was_active && !src->reference && pmap_is_referenced(src->phys_addr)) {
+ src->reference = TRUE;
+ }
+
+ object = src->object;
+ offset = src->offset;
+ vm_page_remove(src);
+
+ vm_page_remove_mappings(src);
+
+ vm_page_set_type(dest, 0, src->type);
+ memcpy(&dest->vm_page_header, &src->vm_page_header,
+ VM_PAGE_BODY_SIZE);
+ vm_page_copy(src, dest);
+
+ if (!src->dirty) {
+ pmap_clear_modify(dest->phys_addr);
+ }
+
+ dest->busy = FALSE;
+
+ simple_lock(&vm_page_queue_free_lock);
+ vm_page_init(src);
+ src->free = TRUE;
+ simple_lock(&seg->lock);
+ vm_page_set_type(src, 0, VM_PT_FREE);
+ vm_page_seg_free_to_buddy(seg, src, 0);
+ simple_unlock(&seg->lock);
+ simple_unlock(&vm_page_queue_free_lock);
+
+ vm_object_lock(object);
+ vm_page_insert(dest, object, offset);
+ vm_object_unlock(object);
+
+ if (was_active) {
+ vm_page_activate(dest);
+ } else {
+ vm_page_deactivate(dest);
+ }
+
+ vm_page_unlock_queues();
+
+ return TRUE;
+
+error:
+ vm_page_seg_double_unlock(seg, remote_seg);
+ simple_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();
+ return FALSE;
+}
+
+static boolean_t
+vm_page_seg_balance(struct vm_page_seg *seg)
+{
+ struct vm_page_seg *remote_seg;
+ unsigned int i;
+ boolean_t balanced;
+
+ /*
+ * It's important here that pages are moved to lower priority
+ * segments first.
+ */
+
+ for (i = vm_page_segs_size - 1; i < vm_page_segs_size; i--) {
+ remote_seg = vm_page_seg_get(i);
+
+ if (remote_seg == seg) {
+ continue;
+ }
+
+ balanced = vm_page_seg_balance_page(seg, remote_seg);
+
+ if (balanced) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+static boolean_t
+vm_page_seg_evict(struct vm_page_seg *seg, boolean_t external_only,
+ boolean_t alloc_paused)
+{
+ struct vm_page *page;
+ boolean_t reclaim, double_paging;
+ vm_object_t object;
+ boolean_t was_active;
+
+ page = NULL;
+ object = NULL;
+ double_paging = FALSE;
+
+restart:
+ vm_page_lock_queues();
+ simple_lock(&seg->lock);
+
+ if (page != NULL) {
+ vm_object_lock(page->object);
+ } else {
+ page = vm_page_seg_pull_cache_page(seg, external_only, &was_active);
+
+ if (page == NULL) {
+ goto out;
+ }
+ }
+
+ assert(page->object != NULL);
+ assert(!page->fictitious && !page->private);
+ assert(page->wire_count == 0);
+ assert(page->type != VM_PT_FREE);
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+
+ object = page->object;
+
+ if (!was_active
+ && (page->reference || pmap_is_referenced(page->phys_addr))) {
+ vm_page_seg_add_active_page(seg, page);
+ simple_unlock(&seg->lock);
+ vm_object_unlock(object);
+ vm_stat.reactivations++;
+ current_task()->reactivations++;
+ vm_page_unlock_queues();
+ page = NULL;
+ goto restart;
+ }
+
+ vm_page_remove_mappings(page);
+
+ if (!page->dirty && !page->precious) {
+ reclaim = TRUE;
+ goto out;
+ }
+
+ reclaim = FALSE;
+
+ /*
+ * If we are very low on memory, then we can't rely on an external
+ * pager to clean a dirty page, because external pagers are not
+ * vm-privileged.
+ *
+ * The laundry bit tells vm_pageout_setup not to do any special
+ * processing of this page since it's immediately going to be
+ * double paged out to the default pager. The laundry bit is
+ * reset and the page is inserted into an internal object by
+ * vm_pageout_setup before the second double paging pass.
+ *
+ * There is one important special case: the default pager can
+ * back external memory objects. When receiving the first
+ * pageout request, where the page is no longer present, a
+ * fault could occur, during which the map would be locked.
+ * This fault would cause a new paging request to the default
+ * pager. Receiving that request would deadlock when trying to
+ * lock the map again. Instead, the page isn't double paged
+ * and vm_pageout_setup wires the page down, trusting the
+ * default pager as for internal pages.
+ */
+
+ assert(!page->laundry);
+ assert(!(double_paging && page->external));
+
+ if (object->internal || !alloc_paused ||
+ memory_manager_default_port(object->pager)) {
+ double_paging = FALSE;
+ } else {
+ double_paging = page->laundry = TRUE;
+ }
+
+out:
+ simple_unlock(&seg->lock);
+
+ if (object == NULL) {
+ vm_page_unlock_queues();
+ return FALSE;
+ }
+
+ if (reclaim) {
+ vm_page_free(page);
+ vm_page_unlock_queues();
+
+ if (vm_object_collectable(object)) {
+ vm_object_collect(object);
+ } else {
+ vm_object_unlock(object);
+ }
+
+ return TRUE;
+ }
+
+ vm_page_unlock_queues();
+
+ /*
+ * If there is no memory object for the page, create one and hand it
+ * to the default pager. First try to collapse, so we don't create
+ * one unnecessarily.
+ */
+
+ if (!object->pager_initialized) {
+ vm_object_collapse(object);
+ }
+
+ if (!object->pager_initialized) {
+ vm_object_pager_create(object);
+ }
+
+ if (!object->pager_initialized) {
+ panic("vm_page_seg_evict");
+ }
+
+ vm_pageout_page(page, FALSE, TRUE); /* flush it */
+ vm_object_unlock(object);
+
+ if (double_paging) {
+ goto restart;
+ }
+
+ return TRUE;
+}
+
+static void
+vm_page_seg_compute_high_active_page(struct vm_page_seg *seg)
+{
+ unsigned long nr_pages;
+
+ nr_pages = seg->nr_active_pages + seg->nr_inactive_pages;
+ seg->high_active_pages = nr_pages * VM_PAGE_HIGH_ACTIVE_PAGE_NUM
+ / VM_PAGE_HIGH_ACTIVE_PAGE_DENOM;
+}
+
+static void
+vm_page_seg_refill_inactive(struct vm_page_seg *seg)
+{
+ struct vm_page *page;
+
+ simple_lock(&seg->lock);
+
+ vm_page_seg_compute_high_active_page(seg);
+
+ while (seg->nr_active_pages > seg->high_active_pages) {
+ page = vm_page_seg_pull_active_page(seg, FALSE);
+
+ if (page == NULL) {
+ break;
+ }
+
+ page->reference = FALSE;
+ pmap_clear_reference(page->phys_addr);
+ vm_page_seg_add_inactive_page(seg, page);
+ vm_object_unlock(page->object);
+ }
+
+ simple_unlock(&seg->lock);
+}
+
+void __init
+vm_page_load(unsigned int seg_index, phys_addr_t start, phys_addr_t end)
+{
+ struct vm_page_boot_seg *seg;
+
+ assert(seg_index < ARRAY_SIZE(vm_page_boot_segs));
+ assert(vm_page_aligned(start));
+ assert(vm_page_aligned(end));
+ assert(start < end);
+ assert(vm_page_segs_size < ARRAY_SIZE(vm_page_boot_segs));
+
+ seg = &vm_page_boot_segs[seg_index];
+ seg->start = start;
+ seg->end = end;
+ seg->heap_present = FALSE;
+
+#if DEBUG
+ printf("vm_page: load: %s: %llx:%llx\n",
+ vm_page_seg_name(seg_index),
+ (unsigned long long)start, (unsigned long long)end);
+#endif
+
+ vm_page_segs_size++;
+}
+
+void
+vm_page_load_heap(unsigned int seg_index, phys_addr_t start, phys_addr_t end)
+{
+ struct vm_page_boot_seg *seg;
+
+ assert(seg_index < ARRAY_SIZE(vm_page_boot_segs));
+ assert(vm_page_aligned(start));
+ assert(vm_page_aligned(end));
+
+ seg = &vm_page_boot_segs[seg_index];
+
+ assert(seg->start <= start);
+ assert(end <= seg-> end);
+
+ seg->avail_start = start;
+ seg->avail_end = end;
+ seg->heap_present = TRUE;
+
+#if DEBUG
+ printf("vm_page: heap: %s: %llx:%llx\n",
+ vm_page_seg_name(seg_index),
+ (unsigned long long)start, (unsigned long long)end);
+#endif
+}
+
+int
+vm_page_ready(void)
+{
+ return vm_page_is_ready;
+}
+
+static unsigned int
+vm_page_select_alloc_seg(unsigned int selector)
+{
+ unsigned int seg_index;
+
+ switch (selector) {
+ case VM_PAGE_SEL_DMA:
+ seg_index = VM_PAGE_SEG_DMA;
+ break;
+ case VM_PAGE_SEL_DMA32:
+ seg_index = VM_PAGE_SEG_DMA32;
+ break;
+ case VM_PAGE_SEL_DIRECTMAP:
+ seg_index = VM_PAGE_SEG_DIRECTMAP;
+ break;
+ case VM_PAGE_SEL_HIGHMEM:
+ seg_index = VM_PAGE_SEG_HIGHMEM;
+ break;
+ default:
+ panic("vm_page: invalid selector");
+ }
+
+ return MIN(vm_page_segs_size - 1, seg_index);
+}
+
+static int __init
+vm_page_boot_seg_loaded(const struct vm_page_boot_seg *seg)
+{
+ return (seg->end != 0);
+}
+
+static void __init
+vm_page_check_boot_segs(void)
+{
+ unsigned int i;
+ int expect_loaded;
+
+ if (vm_page_segs_size == 0)
+ panic("vm_page: no physical memory loaded");
+
+ for (i = 0; i < ARRAY_SIZE(vm_page_boot_segs); i++) {
+ expect_loaded = (i < vm_page_segs_size);
+
+ if (vm_page_boot_seg_loaded(&vm_page_boot_segs[i]) == expect_loaded)
+ continue;
+
+ panic("vm_page: invalid boot segment table");
+ }
+}
+
+static phys_addr_t __init
+vm_page_boot_seg_size(struct vm_page_boot_seg *seg)
+{
+ return seg->end - seg->start;
+}
+
+static phys_addr_t __init
+vm_page_boot_seg_avail_size(struct vm_page_boot_seg *seg)
+{
+ return seg->avail_end - seg->avail_start;
+}
+
+phys_addr_t __init
+vm_page_bootalloc(size_t size)
+{
+ struct vm_page_boot_seg *seg;
+ phys_addr_t pa;
+ unsigned int i;
+
+ for (i = vm_page_select_alloc_seg(VM_PAGE_SEL_DIRECTMAP);
+ i < vm_page_segs_size;
+ i--) {
+ seg = &vm_page_boot_segs[i];
+
+ if (size <= vm_page_boot_seg_avail_size(seg)) {
+ pa = seg->avail_start;
+ seg->avail_start += vm_page_round(size);
+ return pa;
+ }
+ }
+
+ panic("vm_page: no physical memory available");
+}
+
+void __init
+vm_page_setup(void)
+{
+ struct vm_page_boot_seg *boot_seg;
+ struct vm_page_seg *seg;
+ struct vm_page *table, *page, *end;
+ size_t nr_pages, table_size;
+ unsigned long va;
+ unsigned int i;
+ phys_addr_t pa;
+
+ vm_page_check_boot_segs();
+
+ /*
+ * Compute the page table size.
+ */
+ nr_pages = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++)
+ nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i]));
+
+ table_size = vm_page_round(nr_pages * sizeof(struct vm_page));
+ printf("vm_page: page table size: %lu entries (%luk)\n", nr_pages,
+ table_size >> 10);
+ table = (struct vm_page *)pmap_steal_memory(table_size);
+ va = (unsigned long)table;
+
+ /*
+ * Initialize the segments, associating them to the page table. When
+ * the segments are initialized, all their pages are set allocated.
+ * Pages are then released, which populates the free lists.
+ */
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+ boot_seg = &vm_page_boot_segs[i];
+ vm_page_seg_init(seg, boot_seg->start, boot_seg->end, table);
+ page = seg->pages + vm_page_atop(boot_seg->avail_start
+ - boot_seg->start);
+ end = seg->pages + vm_page_atop(boot_seg->avail_end
+ - boot_seg->start);
+
+ while (page < end) {
+ page->type = VM_PT_FREE;
+ vm_page_seg_free_to_buddy(seg, page, 0);
+ page++;
+ }
+
+ table += vm_page_atop(vm_page_seg_size(seg));
+ }
+
+ while (va < (unsigned long)table) {
+ pa = pmap_extract(kernel_pmap, va);
+ page = vm_page_lookup_pa(pa);
+ assert((page != NULL) && (page->type == VM_PT_RESERVED));
+ page->type = VM_PT_TABLE;
+ va += PAGE_SIZE;
+ }
+
+ vm_page_is_ready = 1;
+}
+
+void __init
+vm_page_manage(struct vm_page *page)
+{
+ assert(page->seg_index < ARRAY_SIZE(vm_page_segs));
+ assert(page->type == VM_PT_RESERVED);
+
+ vm_page_set_type(page, 0, VM_PT_FREE);
+ vm_page_seg_free_to_buddy(&vm_page_segs[page->seg_index], page, 0);
+}
+
+struct vm_page *
+vm_page_lookup_pa(phys_addr_t pa)
+{
+ struct vm_page_seg *seg;
+ unsigned int i;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+
+ if ((pa >= seg->start) && (pa < seg->end))
+ return &seg->pages[vm_page_atop(pa - seg->start)];
+ }
+
+ return NULL;
+}
+
+static struct vm_page_seg *
+vm_page_lookup_seg(const struct vm_page *page)
+{
+ struct vm_page_seg *seg;
+ unsigned int i;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+
+ if ((page->phys_addr >= seg->start) && (page->phys_addr < seg->end)) {
+ return seg;
+ }
+ }
+
+ return NULL;
+}
+
+void vm_page_check(const struct vm_page *page)
+{
+ if (page->fictitious) {
+ if (page->private) {
+ panic("vm_page: page both fictitious and private");
+ }
+
+ if (page->phys_addr != vm_page_fictitious_addr) {
+ panic("vm_page: invalid fictitious page");
+ }
+ } else {
+ struct vm_page_seg *seg;
+
+ if (page->phys_addr == vm_page_fictitious_addr) {
+ panic("vm_page: real page has fictitious address");
+ }
+
+ seg = vm_page_lookup_seg(page);
+
+ if (seg == NULL) {
+ if (!page->private) {
+ panic("vm_page: page claims it's managed but not in any segment");
+ }
+ } else {
+ if (page->private) {
+ struct vm_page *real_page;
+
+ if (vm_page_pageable(page)) {
+ panic("vm_page: private page is pageable");
+ }
+
+ real_page = vm_page_lookup_pa(page->phys_addr);
+
+ if (vm_page_pageable(real_page)) {
+ panic("vm_page: page underlying private page is pageable");
+ }
+
+ if ((real_page->type == VM_PT_FREE)
+ || (real_page->order != VM_PAGE_ORDER_UNLISTED)) {
+ panic("vm_page: page underlying private pagei is free");
+ }
+ } else {
+ unsigned int index;
+
+ index = vm_page_seg_index(seg);
+
+ if (index != page->seg_index) {
+ panic("vm_page: page segment mismatch");
+ }
+ }
+ }
+ }
+}
+
+struct vm_page *
+vm_page_alloc_pa(unsigned int order, unsigned int selector, unsigned short type)
+{
+ struct vm_page *page;
+ unsigned int i;
+
+ for (i = vm_page_select_alloc_seg(selector); i < vm_page_segs_size; i--) {
+ page = vm_page_seg_alloc(&vm_page_segs[i], order, type);
+
+ if (page != NULL)
+ return page;
+ }
+
+ if (!current_thread() || current_thread()->vm_privilege)
+ panic("vm_page: privileged thread unable to allocate page");
+
+ return NULL;
+}
+
+void
+vm_page_free_pa(struct vm_page *page, unsigned int order)
+{
+ assert(page != NULL);
+ assert(page->seg_index < ARRAY_SIZE(vm_page_segs));
+
+ vm_page_seg_free(&vm_page_segs[page->seg_index], page, order);
+}
+
+const char *
+vm_page_seg_name(unsigned int seg_index)
+{
+ /* Don't use a switch statement since segments can be aliased */
+ if (seg_index == VM_PAGE_SEG_HIGHMEM)
+ return "HIGHMEM";
+ else if (seg_index == VM_PAGE_SEG_DIRECTMAP)
+ return "DIRECTMAP";
+ else if (seg_index == VM_PAGE_SEG_DMA32)
+ return "DMA32";
+ else if (seg_index == VM_PAGE_SEG_DMA)
+ return "DMA";
+ else
+ panic("vm_page: invalid segment index");
+}
+
+void
+vm_page_info_all(void)
+{
+ struct vm_page_seg *seg;
+ unsigned long pages;
+ unsigned int i;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+ pages = (unsigned long)(seg->pages_end - seg->pages);
+ printf("vm_page: %s: pages: %lu (%luM), free: %lu (%luM)\n",
+ vm_page_seg_name(i), pages, pages >> (20 - PAGE_SHIFT),
+ seg->nr_free_pages, seg->nr_free_pages >> (20 - PAGE_SHIFT));
+ printf("vm_page: %s: min:%lu low:%lu high:%lu\n",
+ vm_page_seg_name(vm_page_seg_index(seg)),
+ seg->min_free_pages, seg->low_free_pages, seg->high_free_pages);
+ }
+}
+
+phys_addr_t
+vm_page_seg_end(unsigned int selector)
+{
+ return vm_page_segs[vm_page_select_alloc_seg(selector)].end;
+}
+
+static unsigned long
+vm_page_boot_table_size(void)
+{
+ unsigned long nr_pages;
+ unsigned int i;
+
+ nr_pages = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i]));
+ }
+
+ return nr_pages;
+}
+
+unsigned long
+vm_page_table_size(void)
+{
+ unsigned long nr_pages;
+ unsigned int i;
+
+ if (!vm_page_is_ready) {
+ return vm_page_boot_table_size();
+ }
+
+ nr_pages = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ nr_pages += vm_page_atop(vm_page_seg_size(&vm_page_segs[i]));
+ }
+
+ return nr_pages;
+}
+
+unsigned long
+vm_page_table_index(phys_addr_t pa)
+{
+ struct vm_page_seg *seg;
+ unsigned long index;
+ unsigned int i;
+
+ index = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+
+ if ((pa >= seg->start) && (pa < seg->end)) {
+ return index + vm_page_atop(pa - seg->start);
+ }
+
+ index += vm_page_atop(vm_page_seg_size(seg));
+ }
+
+ panic("vm_page: invalid physical address");
+}
+
+phys_addr_t
+vm_page_mem_size(void)
+{
+ phys_addr_t total;
+ unsigned int i;
+
+ total = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ total += vm_page_seg_size(&vm_page_segs[i]);
+ }
+
+ return total;
+}
+
+unsigned long
+vm_page_mem_free(void)
+{
+ unsigned long total;
+ unsigned int i;
+
+ total = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ total += vm_page_segs[i].nr_free_pages;
+ }
+
+ return total;
+}
+
+/*
+ * Mark this page as wired down by yet another map, removing it
+ * from paging queues as necessary.
+ *
+ * The page's object and the page queues must be locked.
+ */
+void
+vm_page_wire(struct vm_page *page)
+{
+ VM_PAGE_CHECK(page);
+
+ if (page->wire_count == 0) {
+ vm_page_queues_remove(page);
+
+ if (!page->private && !page->fictitious) {
+ vm_page_wire_count++;
+ }
+ }
+
+ page->wire_count++;
+}
+
+/*
+ * Release one wiring of this page, potentially enabling it to be paged again.
+ *
+ * The page's object and the page queues must be locked.
+ */
+void
+vm_page_unwire(struct vm_page *page)
+{
+ struct vm_page_seg *seg;
+
+ VM_PAGE_CHECK(page);
+
+ assert(page->wire_count != 0);
+ page->wire_count--;
+
+ if ((page->wire_count != 0)
+ || page->fictitious
+ || page->private) {
+ return;
+ }
+
+ seg = vm_page_seg_get(page->seg_index);
+
+ simple_lock(&seg->lock);
+ vm_page_seg_add_active_page(seg, page);
+ simple_unlock(&seg->lock);
+
+ vm_page_wire_count--;
+}
+
+/*
+ * Returns the given page to the inactive list, indicating that
+ * no physical maps have access to this page.
+ * [Used by the physical mapping system.]
+ *
+ * The page queues must be locked.
+ */
+void
+vm_page_deactivate(struct vm_page *page)
+{
+ struct vm_page_seg *seg;
+
+ VM_PAGE_CHECK(page);
+
+ /*
+ * This page is no longer very interesting. If it was
+ * interesting (active or inactive/referenced), then we
+ * clear the reference bit and (re)enter it in the
+ * inactive queue. Note wired pages should not have
+ * their reference bit cleared.
+ */
+
+ if (page->active || (page->inactive && page->reference)) {
+ if (!page->fictitious && !page->private && !page->absent) {
+ pmap_clear_reference(page->phys_addr);
+ }
+
+ page->reference = FALSE;
+ vm_page_queues_remove(page);
+ }
+
+ if ((page->wire_count == 0) && !page->fictitious
+ && !page->private && !page->inactive) {
+ seg = vm_page_seg_get(page->seg_index);
+
+ simple_lock(&seg->lock);
+ vm_page_seg_add_inactive_page(seg, page);
+ simple_unlock(&seg->lock);
+ }
+}
+
+/*
+ * Put the specified page on the active list (if appropriate).
+ *
+ * The page queues must be locked.
+ */
+void
+vm_page_activate(struct vm_page *page)
+{
+ struct vm_page_seg *seg;
+
+ VM_PAGE_CHECK(page);
+
+ /*
+ * Unconditionally remove so that, even if the page was already
+ * active, it gets back to the end of the active queue.
+ */
+ vm_page_queues_remove(page);
+
+ if ((page->wire_count == 0) && !page->fictitious && !page->private) {
+ seg = vm_page_seg_get(page->seg_index);
+
+ if (page->active)
+ panic("vm_page_activate: already active");
+
+ simple_lock(&seg->lock);
+ vm_page_seg_add_active_page(seg, page);
+ simple_unlock(&seg->lock);
+ }
+}
+
+void
+vm_page_queues_remove(struct vm_page *page)
+{
+ struct vm_page_seg *seg;
+
+ assert(!page->active || !page->inactive);
+
+ if (!page->active && !page->inactive) {
+ return;
+ }
+
+ seg = vm_page_seg_get(page->seg_index);
+
+ simple_lock(&seg->lock);
+
+ if (page->active) {
+ vm_page_seg_remove_active_page(seg, page);
+ } else {
+ vm_page_seg_remove_inactive_page(seg, page);
+ }
+
+ simple_unlock(&seg->lock);
+}
+
+/*
+ * Check whether segments are all usable for unprivileged allocations.
+ *
+ * If all segments are usable, resume pending unprivileged allocations
+ * and return TRUE.
+ *
+ * This function acquires vm_page_queue_free_lock, which is held on return.
+ */
+static boolean_t
+vm_page_check_usable(void)
+{
+ struct vm_page_seg *seg;
+ boolean_t usable;
+ unsigned int i;
+
+ simple_lock(&vm_page_queue_free_lock);
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = vm_page_seg_get(i);
+
+ simple_lock(&seg->lock);
+ usable = vm_page_seg_usable(seg);
+ simple_unlock(&seg->lock);
+
+ if (!usable) {
+ return FALSE;
+ }
+ }
+
+ vm_page_external_laundry_count = -1;
+ vm_page_alloc_paused = FALSE;
+ thread_wakeup(&vm_page_alloc_paused);
+ return TRUE;
+}
+
+static boolean_t
+vm_page_may_balance(void)
+{
+ struct vm_page_seg *seg;
+ boolean_t page_available;
+ unsigned int i;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = vm_page_seg_get(i);
+
+ simple_lock(&seg->lock);
+ page_available = vm_page_seg_page_available(seg);
+ simple_unlock(&seg->lock);
+
+ if (page_available) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+static boolean_t
+vm_page_balance_once(void)
+{
+ boolean_t balanced;
+ unsigned int i;
+
+ /*
+ * It's important here that pages are moved from higher priority
+ * segments first.
+ */
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ balanced = vm_page_seg_balance(vm_page_seg_get(i));
+
+ if (balanced) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+boolean_t
+vm_page_balance(void)
+{
+ boolean_t balanced;
+
+ while (vm_page_may_balance()) {
+ balanced = vm_page_balance_once();
+
+ if (!balanced) {
+ break;
+ }
+ }
+
+ return vm_page_check_usable();
+}
+
+static boolean_t
+vm_page_evict_once(boolean_t external_only, boolean_t alloc_paused)
+{
+ boolean_t evicted;
+ unsigned int i;
+
+ /*
+ * It's important here that pages are evicted from lower priority
+ * segments first.
+ */
+
+ for (i = vm_page_segs_size - 1; i < vm_page_segs_size; i--) {
+ evicted = vm_page_seg_evict(vm_page_seg_get(i),
+ external_only, alloc_paused);
+
+ if (evicted) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+#define VM_PAGE_MAX_LAUNDRY 5
+#define VM_PAGE_MAX_EVICTIONS 5
+
+boolean_t
+vm_page_evict(boolean_t *should_wait)
+{
+ boolean_t pause, evicted, external_only, alloc_paused;
+ unsigned int i;
+
+ *should_wait = TRUE;
+ external_only = TRUE;
+
+ simple_lock(&vm_page_queue_free_lock);
+ vm_page_external_laundry_count = 0;
+ alloc_paused = vm_page_alloc_paused;
+ simple_unlock(&vm_page_queue_free_lock);
+
+again:
+ vm_page_lock_queues();
+ pause = (vm_page_laundry_count >= VM_PAGE_MAX_LAUNDRY);
+ vm_page_unlock_queues();
+
+ if (pause) {
+ simple_lock(&vm_page_queue_free_lock);
+ return FALSE;
+ }
+
+ for (i = 0; i < VM_PAGE_MAX_EVICTIONS; i++) {
+ evicted = vm_page_evict_once(external_only, alloc_paused);
+
+ if (!evicted) {
+ break;
+ }
+ }
+
+ simple_lock(&vm_page_queue_free_lock);
+
+ /*
+ * Keep in mind eviction may not cause pageouts, since non-precious
+ * clean pages are simply released.
+ */
+ if ((vm_page_laundry_count == 0) && (vm_page_external_laundry_count == 0)) {
+ /*
+ * No pageout, but some clean pages were freed. Start a complete
+ * scan again without waiting.
+ */
+ if (evicted) {
+ *should_wait = FALSE;
+ return FALSE;
+ }
+
+ /*
+ * Eviction failed, consider pages from internal objects on the
+ * next attempt.
+ */
+ if (external_only) {
+ simple_unlock(&vm_page_queue_free_lock);
+ external_only = FALSE;
+ goto again;
+ }
+
+ /*
+ * TODO Find out what could cause this and how to deal with it.
+ * This will likely require an out-of-memory killer.
+ */
+
+ {
+ static boolean_t warned = FALSE;
+
+ if (!warned) {
+ printf("vm_page warning: unable to recycle any page\n");
+ warned = 1;
+ }
+ }
+ }
+
+ simple_unlock(&vm_page_queue_free_lock);
+
+ return vm_page_check_usable();
+}
+
+void
+vm_page_refill_inactive(void)
+{
+ unsigned int i;
+
+ vm_page_lock_queues();
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ vm_page_seg_refill_inactive(vm_page_seg_get(i));
+ }
+
+ vm_page_unlock_queues();
+}
+
+void
+vm_page_wait(void (*continuation)(void))
+{
+ assert(!current_thread()->vm_privilege);
+
+ simple_lock(&vm_page_queue_free_lock);
+
+ if (!vm_page_alloc_paused) {
+ simple_unlock(&vm_page_queue_free_lock);
+ return;
+ }
+
+ assert_wait(&vm_page_alloc_paused, FALSE);
+
+ simple_unlock(&vm_page_queue_free_lock);
+
+ if (continuation != 0) {
+ counter(c_vm_page_wait_block_user++);
+ thread_block(continuation);
+ } else {
+ counter(c_vm_page_wait_block_kernel++);
+ thread_block((void (*)(void)) 0);
+ }
+}
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#define PAGES_PER_MB ((1<<20) / PAGE_SIZE)
+void db_show_vmstat(void)
+{
+ integer_t free_count = vm_page_mem_free();
+ unsigned i;
+
+ db_printf("%-20s %10uM\n", "size:",
+ (free_count + vm_page_active_count +
+ vm_page_inactive_count + vm_page_wire_count)
+ / PAGES_PER_MB);
+
+ db_printf("%-20s %10uM\n", "free:",
+ free_count / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "active:",
+ vm_page_active_count / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "inactive:",
+ vm_page_inactive_count / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "wired:",
+ vm_page_wire_count / PAGES_PER_MB);
+
+ db_printf("%-20s %10uM\n", "zero filled:",
+ vm_stat.zero_fill_count / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "reactivated:",
+ vm_stat.reactivations / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "pageins:",
+ vm_stat.pageins / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "pageouts:",
+ vm_stat.pageouts / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "page faults:",
+ vm_stat.faults / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "cow faults:",
+ vm_stat.cow_faults / PAGES_PER_MB);
+ db_printf("%-20s %10u%\n", "memobj hit ratio:",
+ (vm_stat.hits * 100) / vm_stat.lookups);
+
+ db_printf("%-20s %10u%\n", "cached_memobjs",
+ vm_object_external_count);
+ db_printf("%-20s %10uM\n", "cache",
+ vm_object_external_pages / PAGES_PER_MB);
+
+ for (i = 0; i < vm_page_segs_size; i++)
+ {
+ db_printf("\nSegment %s:\n", vm_page_seg_name(i));
+ db_printf("%-20s %10uM\n", "size:",
+ vm_page_seg_size(&vm_page_segs[i]) >> 20);
+ db_printf("%-20s %10uM\n", "free:",
+ vm_page_segs[i].nr_free_pages / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "min_free:",
+ vm_page_segs[i].min_free_pages / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "low_free:",
+ vm_page_segs[i].low_free_pages / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "high_free:",
+ vm_page_segs[i].high_free_pages / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "active:",
+ vm_page_segs[i].nr_active_pages / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "high active:",
+ vm_page_segs[i].high_active_pages / PAGES_PER_MB);
+ db_printf("%-20s %10uM\n", "inactive:",
+ vm_page_segs[i].nr_inactive_pages / PAGES_PER_MB);
+ }
+}
+#endif /* MACH_KDB */
diff --git a/vm/vm_page.h b/vm/vm_page.h
new file mode 100644
index 0000000..3be75f1
--- /dev/null
+++ b/vm/vm_page.h
@@ -0,0 +1,567 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_page.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Resident memory system definitions.
+ */
+
+#ifndef _VM_VM_PAGE_H_
+#define _VM_VM_PAGE_H_
+
+#include <mach/boolean.h>
+#include <mach/vm_prot.h>
+#include <machine/vm_param.h>
+#include <vm/vm_object.h>
+#include <vm/vm_types.h>
+#include <kern/queue.h>
+#include <kern/list.h>
+#include <kern/lock.h>
+#include <kern/log2.h>
+
+#include <kern/macros.h>
+#include <kern/sched_prim.h> /* definitions of wait/wakeup */
+
+#if MACH_VM_DEBUG
+#include <mach_debug/hash_info.h>
+#endif
+
+/*
+ * Management of resident (logical) pages.
+ *
+ * A small structure is kept for each resident
+ * page, indexed by page number. Each structure
+ * is an element of several lists:
+ *
+ * A hash table bucket used to quickly
+ * perform object/offset lookups
+ *
+ * A list of all pages for a given object,
+ * so they can be quickly deactivated at
+ * time of deallocation.
+ *
+ * An ordered list of pages due for pageout.
+ *
+ * In addition, the structure contains the object
+ * and offset to which this page belongs (for pageout),
+ * and sundry status bits.
+ *
+ * Fields in this structure are locked either by the lock on the
+ * object that the page belongs to (O) or by the lock on the page
+ * queues (P). [Some fields require that both locks be held to
+ * change that field; holding either lock is sufficient to read.]
+ */
+
+struct vm_page {
+ struct list node; /* page queues or free list (P) */
+ void *priv;
+
+ /*
+ * This member is used throughout the code and may only change for
+ * fictitious pages.
+ */
+ phys_addr_t phys_addr;
+
+ queue_chain_t listq; /* all pages in same object (O) */
+ struct vm_page *next; /* VP bucket link (O) */
+
+ /* We use an empty struct as the delimiter. */
+ struct {} vm_page_header;
+
+ vm_object_t object; /* which object am I in (O,P) */
+ vm_offset_t offset; /* offset into that object (O,P) */
+
+ unsigned int wire_count:15, /* how many wired down maps use me?
+ (O&P) */
+ /* boolean_t */ inactive:1, /* page is in inactive list (P) */
+ active:1, /* page is in active list (P) */
+ laundry:1, /* page is being cleaned now (P)*/
+ external_laundry:1, /* same as laundry for external pagers (P)*/
+ free:1, /* page is on free list (P) */
+ reference:1, /* page has been used (P) */
+ external:1, /* page in external object (P) */
+ busy:1, /* page is in transit (O) */
+ wanted:1, /* someone is waiting for page (O) */
+ tabled:1, /* page is in VP table (O) */
+ fictitious:1, /* Physical page doesn't exist (O) */
+ private:1, /* Page should not be returned to
+ * the free list (O) */
+ absent:1, /* Data has been requested, but is
+ * not yet available (O) */
+ error:1, /* Data manager was unable to provide
+ * data due to error (O) */
+ dirty:1, /* Page must be cleaned (O) */
+ precious:1, /* Page is precious; data must be
+ * returned even if clean (O) */
+ overwriting:1; /* Request to unlock has been made
+ * without having data. (O)
+ * [See vm_object_overwrite] */
+
+ vm_prot_t page_lock:3; /* Uses prohibited by data manager (O) */
+ vm_prot_t unlock_request:3; /* Outstanding unlock request (O) */
+
+ struct {} vm_page_footer;
+
+ unsigned short type:2;
+ unsigned short seg_index:2;
+ unsigned short order:4;
+};
+
+#define VM_PAGE_BODY_SIZE \
+ (offsetof(struct vm_page, vm_page_footer) \
+ - offsetof(struct vm_page, vm_page_header))
+
+/*
+ * For debugging, this macro can be defined to perform
+ * some useful check on a page structure.
+ */
+
+#define VM_PAGE_CHECK(mem) vm_page_check(mem)
+
+void vm_page_check(const struct vm_page *page);
+
+/*
+ * Each pageable resident page falls into one of three lists:
+ *
+ * free
+ * Available for allocation now.
+ * inactive
+ * Not referenced in any map, but still has an
+ * object/offset-page mapping, and may be dirty.
+ * This is the list of pages that should be
+ * paged out next.
+ * active
+ * A list of pages which have been placed in
+ * at least one physical map. This list is
+ * ordered, in LRU-like fashion.
+ */
+
+#define VM_PAGE_DMA 0x01
+#if defined(VM_PAGE_DMA32_LIMIT) && VM_PAGE_DMA32_LIMIT > VM_PAGE_DIRECTMAP_LIMIT
+#define VM_PAGE_DIRECTMAP 0x02
+#define VM_PAGE_DMA32 0x04
+#else
+#define VM_PAGE_DMA32 0x02
+#define VM_PAGE_DIRECTMAP 0x04
+#endif
+#define VM_PAGE_HIGHMEM 0x08
+
+extern
+int vm_page_fictitious_count;/* How many fictitious pages are free? */
+extern
+int vm_page_active_count; /* How many pages are active? */
+extern
+int vm_page_inactive_count; /* How many pages are inactive? */
+extern
+int vm_page_wire_count; /* How many pages are wired? */
+extern
+int vm_page_laundry_count; /* How many pages being laundered? */
+extern
+int vm_page_external_laundry_count; /* How many external pages being paged out? */
+
+decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive
+ page queues */
+decl_simple_lock_data(extern,vm_page_queue_free_lock)
+ /* lock on free page queue */
+
+extern phys_addr_t vm_page_fictitious_addr;
+ /* (fake) phys_addr of fictitious pages */
+
+extern void vm_page_bootstrap(
+ vm_offset_t *startp,
+ vm_offset_t *endp);
+extern void vm_page_module_init(void);
+
+extern vm_page_t vm_page_lookup(
+ vm_object_t object,
+ vm_offset_t offset);
+extern vm_page_t vm_page_grab_fictitious(void);
+extern boolean_t vm_page_convert(vm_page_t *);
+extern void vm_page_more_fictitious(void);
+extern vm_page_t vm_page_grab(unsigned flags);
+extern void vm_page_release(vm_page_t, boolean_t, boolean_t);
+extern phys_addr_t vm_page_grab_phys_addr(void);
+extern vm_page_t vm_page_grab_contig(vm_size_t, unsigned int);
+extern void vm_page_free_contig(vm_page_t, vm_size_t);
+extern void vm_page_wait(void (*)(void));
+extern vm_page_t vm_page_alloc(
+ vm_object_t object,
+ vm_offset_t offset);
+extern void vm_page_init(
+ vm_page_t mem);
+extern void vm_page_free(vm_page_t);
+extern void vm_page_activate(vm_page_t);
+extern void vm_page_deactivate(vm_page_t);
+extern void vm_page_rename(
+ vm_page_t mem,
+ vm_object_t new_object,
+ vm_offset_t new_offset);
+extern void vm_page_insert(
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset);
+extern void vm_page_remove(
+ vm_page_t mem);
+
+extern void vm_page_zero_fill(vm_page_t);
+extern void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
+
+extern void vm_page_wire(vm_page_t);
+extern void vm_page_unwire(vm_page_t);
+
+#if MACH_VM_DEBUG
+extern unsigned int vm_page_info(
+ hash_info_bucket_t *info,
+ unsigned int count);
+#endif
+
+/*
+ * Functions implemented as macros
+ */
+
+#define PAGE_ASSERT_WAIT(m, interruptible) \
+ MACRO_BEGIN \
+ (m)->wanted = TRUE; \
+ assert_wait((event_t) (m), (interruptible)); \
+ MACRO_END
+
+#define PAGE_WAKEUP_DONE(m) \
+ MACRO_BEGIN \
+ (m)->busy = FALSE; \
+ if ((m)->wanted) { \
+ (m)->wanted = FALSE; \
+ thread_wakeup(((event_t) m)); \
+ } \
+ MACRO_END
+
+#define PAGE_WAKEUP(m) \
+ MACRO_BEGIN \
+ if ((m)->wanted) { \
+ (m)->wanted = FALSE; \
+ thread_wakeup((event_t) (m)); \
+ } \
+ MACRO_END
+
+#define VM_PAGE_FREE(p) \
+ MACRO_BEGIN \
+ vm_page_lock_queues(); \
+ vm_page_free(p); \
+ vm_page_unlock_queues(); \
+ MACRO_END
+
+/*
+ * Macro to be used in place of pmap_enter()
+ */
+
+#define PMAP_ENTER(pmap, virtual_address, page, protection, wired) \
+ MACRO_BEGIN \
+ pmap_enter( \
+ (pmap), \
+ (virtual_address), \
+ (page)->phys_addr, \
+ (protection) & ~(page)->page_lock, \
+ (wired) \
+ ); \
+ MACRO_END
+
+#define VM_PAGE_WAIT(continuation) vm_page_wait(continuation)
+
+#define vm_page_lock_queues() simple_lock(&vm_page_queue_lock)
+#define vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock)
+
+#define VM_PAGE_QUEUES_REMOVE(mem) vm_page_queues_remove(mem)
+
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Physical page management.
+ */
+
+/*
+ * Address/page conversion and rounding macros (not inline functions to
+ * be easily usable on both virtual and physical addresses, which may not
+ * have the same type size).
+ */
+#define vm_page_atop(addr) ((addr) >> PAGE_SHIFT)
+#define vm_page_ptoa(page) ((page) << PAGE_SHIFT)
+#define vm_page_trunc(addr) P2ALIGN(addr, PAGE_SIZE)
+#define vm_page_round(addr) P2ROUND(addr, PAGE_SIZE)
+#define vm_page_aligned(addr) P2ALIGNED(addr, PAGE_SIZE)
+
+/*
+ * Segment selectors.
+ *
+ * Selector-to-segment-list translation table :
+ * DMA DMA
+ * if 32bit PAE
+ * DIRECTMAP DMA32 DMA
+ * DMA32 DMA32 DIRECTMAP DMA
+ * HIGHMEM HIGHMEM DMA32 DIRECTMAP DMA
+ * else
+ * DMA32 DMA32 DMA
+ * DIRECTMAP DIRECTMAP DMA32 DMA
+ * HIGHMEM HIGHMEM DIRECTMAP DMA32 DMA
+ * endif
+ */
+#define VM_PAGE_SEL_DMA 0
+#if defined(VM_PAGE_DMA32_LIMIT) && VM_PAGE_DMA32_LIMIT > VM_PAGE_DIRECTMAP_LIMIT
+#define VM_PAGE_SEL_DIRECTMAP 1
+#define VM_PAGE_SEL_DMA32 2
+#else
+#define VM_PAGE_SEL_DMA32 1
+#define VM_PAGE_SEL_DIRECTMAP 2
+#endif
+#define VM_PAGE_SEL_HIGHMEM 3
+
+/*
+ * Page usage types.
+ */
+#define VM_PT_FREE 0 /* Page unused */
+#define VM_PT_RESERVED 1 /* Page reserved at boot time */
+#define VM_PT_TABLE 2 /* Page is part of the page table */
+#define VM_PT_KERNEL 3 /* Type for generic kernel allocations */
+
+static inline unsigned short
+vm_page_type(const struct vm_page *page)
+{
+ return page->type;
+}
+
+void vm_page_set_type(struct vm_page *page, unsigned int order,
+ unsigned short type);
+
+static inline unsigned int
+vm_page_order(size_t size)
+{
+ return iorder2(vm_page_atop(vm_page_round(size)));
+}
+
+static inline phys_addr_t
+vm_page_to_pa(const struct vm_page *page)
+{
+ return page->phys_addr;
+}
+
+/*
+ * Associate private data with a page.
+ */
+static inline void
+vm_page_set_priv(struct vm_page *page, void *priv)
+{
+ page->priv = priv;
+}
+
+static inline void *
+vm_page_get_priv(const struct vm_page *page)
+{
+ return page->priv;
+}
+
+/*
+ * Load physical memory into the vm_page module at boot time.
+ *
+ * All addresses must be page-aligned. Segments can be loaded in any order.
+ */
+void vm_page_load(unsigned int seg_index, phys_addr_t start, phys_addr_t end);
+
+/*
+ * Load available physical memory into the vm_page module at boot time.
+ *
+ * The segment referred to must have been loaded with vm_page_load
+ * before loading its heap.
+ */
+void vm_page_load_heap(unsigned int seg_index, phys_addr_t start,
+ phys_addr_t end);
+
+/*
+ * Return true if the vm_page module is completely initialized, false
+ * otherwise, in which case only vm_page_bootalloc() can be used for
+ * allocations.
+ */
+int vm_page_ready(void);
+
+/*
+ * Early allocation function.
+ *
+ * This function is used by the vm_resident module to implement
+ * pmap_steal_memory. It can be used after physical segments have been loaded
+ * and before the vm_page module is initialized.
+ */
+phys_addr_t vm_page_bootalloc(size_t size);
+
+/*
+ * Set up the vm_page module.
+ *
+ * Architecture-specific code must have loaded segments before calling this
+ * function. Segments must comply with the selector-to-segment-list table,
+ * e.g. HIGHMEM is loaded if and only if DIRECTMAP, DMA32 and DMA are loaded,
+ * notwithstanding segment aliasing.
+ *
+ * Once this function returns, the vm_page module is ready, and normal
+ * allocation functions can be used.
+ */
+void vm_page_setup(void);
+
+/*
+ * Make the given page managed by the vm_page module.
+ *
+ * If additional memory can be made usable after the VM system is initialized,
+ * it should be reported through this function.
+ */
+void vm_page_manage(struct vm_page *page);
+
+/*
+ * Return the page descriptor for the given physical address.
+ */
+struct vm_page * vm_page_lookup_pa(phys_addr_t pa);
+
+/*
+ * Allocate a block of 2^order physical pages.
+ *
+ * The selector is used to determine the segments from which allocation can
+ * be attempted.
+ *
+ * This function should only be used by the vm_resident module.
+ */
+struct vm_page * vm_page_alloc_pa(unsigned int order, unsigned int selector,
+ unsigned short type);
+
+/*
+ * Release a block of 2^order physical pages.
+ *
+ * This function should only be used by the vm_resident module.
+ */
+void vm_page_free_pa(struct vm_page *page, unsigned int order);
+
+/*
+ * Return the name of the given segment.
+ */
+const char * vm_page_seg_name(unsigned int seg_index);
+
+/*
+ * Display internal information about the module.
+ */
+void vm_page_info_all(void);
+
+/*
+ * Return the maximum physical address for a given segment selector.
+ */
+phys_addr_t vm_page_seg_end(unsigned int selector);
+
+/*
+ * Return the total number of physical pages.
+ */
+unsigned long vm_page_table_size(void);
+
+/*
+ * Return the index of a page in the page table.
+ */
+unsigned long vm_page_table_index(phys_addr_t pa);
+
+/*
+ * Return the total amount of physical memory.
+ */
+phys_addr_t vm_page_mem_size(void);
+
+/*
+ * Return the amount of free (unused) pages.
+ *
+ * XXX This currently relies on the kernel being non preemptible and
+ * uniprocessor.
+ */
+unsigned long vm_page_mem_free(void);
+
+/*
+ * Remove the given page from any page queue it might be in.
+ */
+void vm_page_queues_remove(struct vm_page *page);
+
+/*
+ * Balance physical pages among segments.
+ *
+ * This function should be called first by the pageout daemon
+ * on memory pressure, since it may be unnecessary to perform any
+ * other operation, let alone shrink caches, if balancing is
+ * enough to make enough free pages.
+ *
+ * Return TRUE if balancing made enough free pages for unprivileged
+ * allocations to succeed, in which case pending allocations are resumed.
+ *
+ * This function acquires vm_page_queue_free_lock, which is held on return.
+ */
+boolean_t vm_page_balance(void);
+
+/*
+ * Evict physical pages.
+ *
+ * This function should be called by the pageout daemon after balancing
+ * the segments and shrinking kernel caches.
+ *
+ * Return TRUE if eviction made enough free pages for unprivileged
+ * allocations to succeed, in which case pending allocations are resumed.
+ *
+ * Otherwise, report whether the pageout daemon should wait (some pages
+ * have been paged out) or not (only clean pages have been released).
+ *
+ * This function acquires vm_page_queue_free_lock, which is held on return.
+ */
+boolean_t vm_page_evict(boolean_t *should_wait);
+
+/*
+ * Turn active pages into inactive ones for second-chance LRU
+ * approximation.
+ *
+ * This function should be called by the pageout daemon on memory pressure,
+ * i.e. right before evicting pages.
+ *
+ * XXX This is probably not the best strategy, compared to keeping the
+ * active/inactive ratio in check at all times, but this means less
+ * frequent refills.
+ */
+void vm_page_refill_inactive(void);
+
+/*
+ * Print vmstat information
+ */
+void db_show_vmstat(void);
+
+#endif /* _VM_VM_PAGE_H_ */
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
new file mode 100644
index 0000000..e2f4cf2
--- /dev/null
+++ b/vm/vm_pageout.c
@@ -0,0 +1,515 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_pageout.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * The proverbial page-out daemon.
+ */
+
+#include <device/net_io.h>
+#include <mach/mach_types.h>
+#include <mach/memory_object.h>
+#include <vm/memory_object_default.user.h>
+#include <vm/memory_object_user.user.h>
+#include <mach/vm_param.h>
+#include <mach/vm_statistics.h>
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/slab.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/printf.h>
+#include <vm/memory_object.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <machine/locore.h>
+
+#define DEBUG 0
+
+/*
+ * Maximum delay, in milliseconds, between two pageout scans.
+ */
+#define VM_PAGEOUT_TIMEOUT 50
+
+/*
+ * Event placeholder for pageout requests, synchronized with
+ * the free page queue lock.
+ */
+static int vm_pageout_requested;
+
+/*
+ * Event placeholder for pageout throttling, synchronized with
+ * the free page queue lock.
+ */
+static int vm_pageout_continue;
+
+/*
+ * Routine: vm_pageout_setup
+ * Purpose:
+ * Set up a page for pageout.
+ *
+ * Move or copy the page to a new object, as part
+ * of which it will be sent to its memory manager
+ * in a memory_object_data_return or memory_object_initialize
+ * message.
+ *
+ * The "paging_offset" argument specifies the offset
+ * of the page within its external memory object.
+ *
+ * The "new_object" and "new_offset" arguments
+ * indicate where the page should be moved.
+ *
+ * The "flush" argument specifies whether the page
+ * should be flushed from its object. If not, a
+ * copy of the page is moved to the new object.
+ *
+ * In/Out conditions:
+ * The page in question must not be on any pageout queues,
+ * and must be busy. The object to which it belongs
+ * must be unlocked, and the caller must hold a paging
+ * reference to it. The new_object must not be locked.
+ *
+ * If the page is flushed from its original object,
+ * this routine returns a pointer to a place-holder page,
+ * inserted at the same offset, to block out-of-order
+ * requests for the page. The place-holder page must
+ * be freed after the data_return or initialize message
+ * has been sent. If the page is copied,
+ * the holding page is VM_PAGE_NULL.
+ *
+ * The original page is put on a paging queue and marked
+ * not busy on exit.
+ */
+vm_page_t
+vm_pageout_setup(
+ vm_page_t m,
+ vm_offset_t paging_offset,
+ vm_object_t new_object,
+ vm_offset_t new_offset,
+ boolean_t flush)
+{
+ vm_object_t old_object = m->object;
+ vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/
+ vm_page_t new_m;
+
+ assert(m->busy && !m->absent && !m->fictitious);
+
+ /*
+ * If we are not flushing the page, allocate a
+ * page in the object.
+ */
+ if (!flush) {
+ for (;;) {
+ vm_object_lock(new_object);
+ new_m = vm_page_alloc(new_object, new_offset);
+ vm_object_unlock(new_object);
+
+ if (new_m != VM_PAGE_NULL) {
+ break;
+ }
+
+ VM_PAGE_WAIT(NULL);
+ }
+ }
+
+ if (flush) {
+ /*
+ * Create a place-holder page where the old one was,
+ * to prevent anyone from attempting to page in this
+ * page while we`re unlocked.
+ */
+ while ((holding_page = vm_page_grab_fictitious())
+ == VM_PAGE_NULL)
+ vm_page_more_fictitious();
+
+ vm_object_lock(old_object);
+ vm_page_lock_queues();
+ vm_page_remove(m);
+ vm_page_unlock_queues();
+ PAGE_WAKEUP_DONE(m);
+
+ vm_page_lock_queues();
+ vm_page_insert(holding_page, old_object, m->offset);
+ vm_page_unlock_queues();
+
+ /*
+ * Record that this page has been written out
+ */
+#if MACH_PAGEMAP
+ vm_external_state_set(old_object->existence_info,
+ paging_offset,
+ VM_EXTERNAL_STATE_EXISTS);
+#endif /* MACH_PAGEMAP */
+
+ vm_object_unlock(old_object);
+
+ vm_object_lock(new_object);
+
+ /*
+ * Move this page into the new object
+ */
+
+ vm_page_lock_queues();
+ vm_page_insert(m, new_object, new_offset);
+ vm_page_unlock_queues();
+
+ m->dirty = TRUE;
+ m->precious = FALSE;
+ m->page_lock = VM_PROT_NONE;
+ m->unlock_request = VM_PROT_NONE;
+ }
+ else {
+ /*
+ * Copy the data into the new page,
+ * and mark the new page as clean.
+ */
+ vm_page_copy(m, new_m);
+
+ vm_object_lock(old_object);
+ m->dirty = FALSE;
+ pmap_clear_modify(m->phys_addr);
+
+ /*
+ * Deactivate old page.
+ */
+ vm_page_lock_queues();
+ vm_page_deactivate(m);
+ vm_page_unlock_queues();
+
+ PAGE_WAKEUP_DONE(m);
+
+ /*
+ * Record that this page has been written out
+ */
+
+#if MACH_PAGEMAP
+ vm_external_state_set(old_object->existence_info,
+ paging_offset,
+ VM_EXTERNAL_STATE_EXISTS);
+#endif /* MACH_PAGEMAP */
+
+ vm_object_unlock(old_object);
+
+ vm_object_lock(new_object);
+
+ /*
+ * Use the new page below.
+ */
+ m = new_m;
+ m->dirty = TRUE;
+ assert(!m->precious);
+ PAGE_WAKEUP_DONE(m);
+ }
+
+ /*
+ * Make the old page eligible for replacement again; if a
+ * user-supplied memory manager fails to release the page,
+ * it will be paged out again to the default memory manager.
+ *
+ * Note that pages written to the default memory manager
+ * must be wired down -- in return, it guarantees to free
+ * this page, rather than reusing it.
+ */
+
+ vm_page_lock_queues();
+ vm_stat.pageouts++;
+ if (m->laundry) {
+
+ /*
+ * The caller is telling us that it is going to
+ * immediately double page this page to the default
+ * pager.
+ */
+
+ assert(!old_object->internal);
+ m->laundry = FALSE;
+ } else if (old_object->internal ||
+ memory_manager_default_port(old_object->pager)) {
+ m->laundry = TRUE;
+ vm_page_laundry_count++;
+
+ vm_page_wire(m);
+ } else {
+ m->external_laundry = TRUE;
+
+ /*
+ * If vm_page_external_laundry_count is negative,
+ * the pageout daemon isn't expecting to be
+ * notified.
+ */
+
+ if (vm_page_external_laundry_count >= 0) {
+ vm_page_external_laundry_count++;
+ }
+
+ vm_page_activate(m);
+ }
+ vm_page_unlock_queues();
+
+ /*
+ * Since IPC operations may block, we drop locks now.
+ * [The placeholder page is busy, and we still have
+ * paging_in_progress incremented.]
+ */
+
+ vm_object_unlock(new_object);
+
+ /*
+ * Return the placeholder page to simplify cleanup.
+ */
+ return (flush ? holding_page : VM_PAGE_NULL);
+}
+
+/*
+ * Routine: vm_pageout_page
+ * Purpose:
+ * Causes the specified page to be written back to
+ * the appropriate memory object.
+ *
+ * The "initial" argument specifies whether this
+ * data is an initialization only, and should use
+ * memory_object_data_initialize instead of
+ * memory_object_data_return.
+ *
+ * The "flush" argument specifies whether the page
+ * should be flushed from the object. If not, a
+ * copy of the data is sent to the memory object.
+ *
+ * In/out conditions:
+ * The page in question must not be on any pageout queues.
+ * The object to which it belongs must be locked.
+ * Implementation:
+ * Move this page to a completely new object, if flushing;
+ * copy to a new page in a new object, if not.
+ */
+void
+vm_pageout_page(
+ vm_page_t m,
+ boolean_t initial,
+ boolean_t flush)
+{
+ vm_map_copy_t copy;
+ vm_object_t old_object;
+ vm_object_t new_object;
+ vm_page_t holding_page;
+ vm_offset_t paging_offset;
+ kern_return_t rc;
+ boolean_t precious_clean;
+
+ assert(m->busy);
+
+ /*
+ * Cleaning but not flushing a clean precious page is a
+ * no-op. Remember whether page is clean and precious now
+ * because vm_pageout_setup will mark it dirty and not precious.
+ *
+ * XXX Check if precious_clean && !flush can really happen.
+ */
+ precious_clean = (!m->dirty) && m->precious;
+ if (precious_clean && !flush) {
+ PAGE_WAKEUP_DONE(m);
+ return;
+ }
+
+ /*
+ * Verify that we really want to clean this page.
+ */
+ if (m->absent || m->error || (!m->dirty && !m->precious)) {
+ VM_PAGE_FREE(m);
+ return;
+ }
+
+ /*
+ * Create a paging reference to let us play with the object.
+ */
+ old_object = m->object;
+ paging_offset = m->offset + old_object->paging_offset;
+ vm_object_paging_begin(old_object);
+ vm_object_unlock(old_object);
+
+ /*
+ * Allocate a new object into which we can put the page.
+ */
+ new_object = vm_object_allocate(PAGE_SIZE);
+ new_object->used_for_pageout = TRUE;
+
+ /*
+ * Move the page into the new object.
+ */
+ holding_page = vm_pageout_setup(m,
+ paging_offset,
+ new_object,
+ 0, /* new offset */
+ flush); /* flush */
+
+ rc = vm_map_copyin_object(new_object, 0, PAGE_SIZE, &copy);
+ assert(rc == KERN_SUCCESS);
+
+ if (initial) {
+ rc = memory_object_data_initialize(
+ old_object->pager,
+ old_object->pager_request,
+ paging_offset, (pointer_t) copy, PAGE_SIZE);
+ }
+ else {
+ rc = memory_object_data_return(
+ old_object->pager,
+ old_object->pager_request,
+ paging_offset, (pointer_t) copy, PAGE_SIZE,
+ !precious_clean, !flush);
+ }
+
+ if (rc != KERN_SUCCESS)
+ vm_map_copy_discard(copy);
+
+ /*
+ * Clean up.
+ */
+ vm_object_lock(old_object);
+ if (holding_page != VM_PAGE_NULL)
+ VM_PAGE_FREE(holding_page);
+ vm_object_paging_end(old_object);
+}
+
+/*
+ * vm_pageout_scan does the dirty work for the pageout daemon.
+ *
+ * Return TRUE if the pageout daemon is done for now, FALSE otherwise,
+ * in which case should_wait indicates whether the pageout daemon
+ * should wait to allow pagers to keep up.
+ *
+ * It returns with vm_page_queue_free_lock held.
+ */
+
+static boolean_t vm_pageout_scan(boolean_t *should_wait)
+{
+ boolean_t done;
+
+ /*
+ * Try balancing pages among segments first, since this
+ * may be enough to resume unprivileged allocations.
+ */
+
+ /* This function returns with vm_page_queue_free_lock held */
+ done = vm_page_balance();
+
+ if (done) {
+ return TRUE;
+ }
+
+ simple_unlock(&vm_page_queue_free_lock);
+
+ /*
+ * Balancing is not enough. Shrink caches and scan pages
+ * for eviction.
+ */
+
+ stack_collect();
+ net_kmsg_collect();
+ consider_task_collect();
+ if (0) /* XXX: pcb_collect doesn't do anything yet, so it is
+ pointless to call consider_thread_collect. */
+ consider_thread_collect();
+
+ /*
+ * slab_collect should be last, because the other operations
+ * might return memory to caches.
+ */
+ slab_collect();
+
+ vm_page_refill_inactive();
+
+ /* This function returns with vm_page_queue_free_lock held */
+ return vm_page_evict(should_wait);
+}
+
+void vm_pageout(void)
+{
+ boolean_t done, should_wait;
+
+ current_thread()->vm_privilege = 1;
+ stack_privilege(current_thread());
+ thread_set_own_priority(0);
+
+ for (;;) {
+ done = vm_pageout_scan(&should_wait);
+ /* we hold vm_page_queue_free_lock now */
+
+ if (done) {
+ thread_sleep(&vm_pageout_requested,
+ simple_lock_addr(vm_page_queue_free_lock),
+ FALSE);
+ } else if (should_wait) {
+ assert_wait(&vm_pageout_continue, FALSE);
+ thread_set_timeout(VM_PAGEOUT_TIMEOUT * hz / 1000);
+ simple_unlock(&vm_page_queue_free_lock);
+ thread_block(NULL);
+
+#if DEBUG
+ if (current_thread()->wait_result != THREAD_AWAKENED) {
+ printf("vm_pageout: timeout,"
+ " vm_page_laundry_count:%d"
+ " vm_page_external_laundry_count:%d\n",
+ vm_page_laundry_count,
+ vm_page_external_laundry_count);
+ }
+#endif
+ } else {
+ simple_unlock(&vm_page_queue_free_lock);
+ }
+ }
+}
+
+/*
+ * Start pageout
+ *
+ * The free page queue lock must be held before calling this function.
+ */
+void vm_pageout_start(void)
+{
+ if (!current_thread())
+ return;
+
+ thread_wakeup_one(&vm_pageout_requested);
+}
+
+/*
+ * Resume pageout
+ *
+ * The free page queue lock must be held before calling this function.
+ */
+void vm_pageout_resume(void)
+{
+ thread_wakeup_one(&vm_pageout_continue);
+}
diff --git a/vm/vm_pageout.h b/vm/vm_pageout.h
new file mode 100644
index 0000000..6ddd821
--- /dev/null
+++ b/vm/vm_pageout.h
@@ -0,0 +1,53 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_pageout.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Declarations for the pageout daemon interface.
+ */
+
+#ifndef _VM_VM_PAGEOUT_H_
+#define _VM_VM_PAGEOUT_H_
+
+#include <vm/vm_page.h>
+
+/*
+ * Exported routines.
+ */
+
+extern vm_page_t vm_pageout_setup(vm_page_t, vm_offset_t, vm_object_t,
+ vm_offset_t, boolean_t);
+extern void vm_pageout_page(vm_page_t, boolean_t, boolean_t);
+
+extern void vm_pageout(void) __attribute__((noreturn));
+
+extern void vm_pageout_start(void);
+
+extern void vm_pageout_resume(void);
+
+#endif /* _VM_VM_PAGEOUT_H_ */
diff --git a/vm/vm_print.h b/vm/vm_print.h
new file mode 100644
index 0000000..8a36d75
--- /dev/null
+++ b/vm/vm_print.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef VM_PRINT_H
+#define VM_PRINT_H
+
+#include <vm/vm_map.h>
+#include <machine/db_machdep.h>
+
+/* Debugging: print a map */
+extern void vm_map_print(db_expr_t addr, boolean_t have_addr,
+ db_expr_t count, const char *modif);
+
+/* Pretty-print a copy object for ddb. */
+extern void vm_map_copy_print(const vm_map_copy_t);
+
+#include <vm/vm_object.h>
+
+extern void vm_object_print(vm_object_t);
+
+#include <vm/vm_page.h>
+
+extern void vm_page_print(const vm_page_t);
+
+#endif /* VM_PRINT_H */
+
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
new file mode 100644
index 0000000..3f0cc90
--- /dev/null
+++ b/vm/vm_resident.c
@@ -0,0 +1,1116 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_resident.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Resident memory management module.
+ */
+
+#include <kern/printf.h>
+#include <string.h>
+
+#include <mach/vm_prot.h>
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/list.h>
+#include <kern/sched_prim.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <mach/vm_statistics.h>
+#include <machine/vm_param.h>
+#include <kern/xpr.h>
+#include <kern/slab.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_resident.h>
+
+#if MACH_VM_DEBUG
+#include <mach/kern_return.h>
+#include <mach_debug/hash_info.h>
+#include <vm/vm_user.h>
+#endif
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#include <vm/vm_print.h>
+#endif /* MACH_KDB */
+
+
+/*
+ * Associated with each page of user-allocatable memory is a
+ * page structure.
+ */
+
+/*
+ * These variables record the values returned by vm_page_bootstrap,
+ * for debugging purposes. The implementation of pmap_steal_memory
+ * here also uses them internally.
+ */
+
+vm_offset_t virtual_space_start;
+vm_offset_t virtual_space_end;
+
+/*
+ * The vm_page_lookup() routine, which provides for fast
+ * (virtual memory object, offset) to page lookup, employs
+ * the following hash table. The vm_page_{insert,remove}
+ * routines install and remove associations in the table.
+ * [This table is often called the virtual-to-physical,
+ * or VP, table.]
+ */
+typedef struct {
+ decl_simple_lock_data(,lock)
+ vm_page_t pages;
+} vm_page_bucket_t;
+
+vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
+unsigned long vm_page_bucket_count = 0; /* How big is array? */
+unsigned long vm_page_hash_mask; /* Mask for hash function */
+
+static struct list vm_page_queue_fictitious;
+def_simple_lock_data(,vm_page_queue_free_lock)
+int vm_page_fictitious_count;
+int vm_object_external_count;
+int vm_object_external_pages;
+
+/*
+ * Occasionally, the virtual memory system uses
+ * resident page structures that do not refer to
+ * real pages, for example to leave a page with
+ * important state information in the VP table.
+ *
+ * These page structures are allocated the way
+ * most other kernel structures are.
+ */
+struct kmem_cache vm_page_cache;
+
+/*
+ * Fictitious pages don't have a physical address,
+ * but we must initialize phys_addr to something.
+ * For debugging, this should be a strange value
+ * that the pmap module can recognize in assertions.
+ */
+phys_addr_t vm_page_fictitious_addr = (phys_addr_t) -1;
+
+/*
+ * Resident page structures are also chained on
+ * queues that are used by the page replacement
+ * system (pageout daemon). These queues are
+ * defined here, but are shared by the pageout
+ * module.
+ */
+def_simple_lock_data(,vm_page_queue_lock)
+int vm_page_active_count;
+int vm_page_inactive_count;
+int vm_page_wire_count;
+
+/*
+ * Several page replacement parameters are also
+ * shared with this module, so that page allocation
+ * (done here in vm_page_alloc) can trigger the
+ * pageout daemon.
+ */
+int vm_page_laundry_count = 0;
+int vm_page_external_laundry_count = 0;
+
+
+/*
+ * The VM system has a couple of heuristics for deciding
+ * that pages are "uninteresting" and should be placed
+ * on the inactive queue as likely candidates for replacement.
+ * These variables let the heuristics be controlled at run-time
+ * to make experimentation easier.
+ */
+
+boolean_t vm_page_deactivate_behind = TRUE;
+boolean_t vm_page_deactivate_hint = TRUE;
+
+/*
+ * vm_page_bootstrap:
+ *
+ * Initializes the resident memory module.
+ *
+ * Allocates memory for the page cells, and
+ * for the object/offset-to-page hash table headers.
+ * Each page cell is initialized and placed on the free list.
+ * Returns the range of available kernel virtual memory.
+ */
+
+void vm_page_bootstrap(
+ vm_offset_t *startp,
+ vm_offset_t *endp)
+{
+ int i;
+
+ /*
+ * Initialize the page queues.
+ */
+
+ simple_lock_init(&vm_page_queue_free_lock);
+ simple_lock_init(&vm_page_queue_lock);
+
+ list_init(&vm_page_queue_fictitious);
+
+ /*
+ * Allocate (and initialize) the virtual-to-physical
+ * table hash buckets.
+ *
+ * The number of buckets should be a power of two to
+ * get a good hash function. The following computation
+ * chooses the first power of two that is greater
+ * than the number of physical pages in the system.
+ */
+
+ if (vm_page_bucket_count == 0) {
+ unsigned long npages = vm_page_table_size();
+
+ vm_page_bucket_count = 1;
+ while (vm_page_bucket_count < npages)
+ vm_page_bucket_count <<= 1;
+ }
+
+ vm_page_hash_mask = vm_page_bucket_count - 1;
+
+ if (vm_page_hash_mask & vm_page_bucket_count)
+ printf("vm_page_bootstrap: WARNING -- strange page hash\n");
+
+ vm_page_buckets = (vm_page_bucket_t *)
+ pmap_steal_memory(vm_page_bucket_count *
+ sizeof(vm_page_bucket_t));
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+ vm_page_bucket_t *bucket = &vm_page_buckets[i];
+
+ bucket->pages = VM_PAGE_NULL;
+ simple_lock_init(&bucket->lock);
+ }
+
+ vm_page_setup();
+
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+
+ *startp = virtual_space_start;
+ *endp = virtual_space_end;
+}
+
+#ifndef MACHINE_PAGES
+/*
+ * We implement pmap_steal_memory with the help
+ * of two simpler functions, pmap_virtual_space and vm_page_bootalloc.
+ */
+
+vm_offset_t pmap_steal_memory(
+ vm_size_t size)
+{
+ vm_offset_t addr, vaddr;
+ phys_addr_t paddr;
+
+ size = round_page(size);
+
+ /*
+ * If this is the first call to pmap_steal_memory,
+ * we have to initialize ourself.
+ */
+
+ if (virtual_space_start == virtual_space_end) {
+ pmap_virtual_space(&virtual_space_start, &virtual_space_end);
+
+ /*
+ * The initial values must be aligned properly, and
+ * we don't trust the pmap module to do it right.
+ */
+
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+ }
+
+ /*
+ * Allocate virtual memory for this request.
+ */
+
+ addr = virtual_space_start;
+ virtual_space_start += size;
+
+ /*
+ * Allocate and map physical pages to back new virtual pages.
+ */
+
+ for (vaddr = round_page(addr);
+ vaddr < addr + size;
+ vaddr += PAGE_SIZE) {
+ paddr = vm_page_bootalloc(PAGE_SIZE);
+
+ /*
+ * XXX Logically, these mappings should be wired,
+ * but some pmap modules barf if they are.
+ */
+
+ pmap_enter(kernel_pmap, vaddr, paddr,
+ VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ }
+
+ return addr;
+}
+#endif /* MACHINE_PAGES */
+
+/*
+ * Routine: vm_page_module_init
+ * Purpose:
+ * Second initialization pass, to be done after
+ * the basic VM system is ready.
+ */
+void vm_page_module_init(void)
+{
+ kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0,
+ NULL, 0);
+}
+
+/*
+ * vm_page_hash:
+ *
+ * Distributes the object/offset key pair among hash buckets.
+ *
+ * NOTE: To get a good hash function, the bucket count should
+ * be a power of two.
+ */
+#define vm_page_hash(object, offset) \
+ (((unsigned int)(vm_offset_t)object + (unsigned int)atop(offset)) \
+ & vm_page_hash_mask)
+
+/*
+ * vm_page_insert: [ internal use only ]
+ *
+ * Inserts the given mem entry into the object/object-page
+ * table and object list.
+ *
+ * The object and page must be locked.
+ * The free page queue must not be locked.
+ */
+
+void vm_page_insert(
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset)
+{
+ vm_page_bucket_t *bucket;
+
+ VM_PAGE_CHECK(mem);
+
+ assert(!mem->active && !mem->inactive);
+ assert(!mem->external);
+
+ if (!object->internal) {
+ mem->external = TRUE;
+ vm_object_external_pages++;
+ }
+
+ if (mem->tabled)
+ panic("vm_page_insert");
+
+ /*
+ * Record the object/offset pair in this page
+ */
+
+ mem->object = object;
+ mem->offset = offset;
+
+ /*
+ * Insert it into the object_object/offset hash table
+ */
+
+ bucket = &vm_page_buckets[vm_page_hash(object, offset)];
+ simple_lock(&bucket->lock);
+ mem->next = bucket->pages;
+ bucket->pages = mem;
+ simple_unlock(&bucket->lock);
+
+ /*
+ * Now link into the object's list of backed pages.
+ */
+
+ queue_enter(&object->memq, mem, vm_page_t, listq);
+ mem->tabled = TRUE;
+
+ /*
+ * Show that the object has one more resident page.
+ */
+
+ object->resident_page_count++;
+ assert(object->resident_page_count != 0);
+
+ /*
+ * Detect sequential access and inactivate previous page.
+ * We ignore busy pages.
+ */
+
+ if (vm_page_deactivate_behind &&
+ (offset == object->last_alloc + PAGE_SIZE)) {
+ vm_page_t last_mem;
+
+ last_mem = vm_page_lookup(object, object->last_alloc);
+ if ((last_mem != VM_PAGE_NULL) && !last_mem->busy)
+ vm_page_deactivate(last_mem);
+ }
+ object->last_alloc = offset;
+}
+
+/*
+ * vm_page_replace:
+ *
+ * Exactly like vm_page_insert, except that we first
+ * remove any existing page at the given offset in object
+ * and we don't do deactivate-behind.
+ *
+ * The object and page must be locked.
+ * The free page queue must not be locked.
+ */
+
+void vm_page_replace(
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset)
+{
+ vm_page_bucket_t *bucket;
+
+ VM_PAGE_CHECK(mem);
+
+ assert(!mem->active && !mem->inactive);
+ assert(!mem->external);
+
+ if (!object->internal) {
+ mem->external = TRUE;
+ vm_object_external_pages++;
+ }
+
+ if (mem->tabled)
+ panic("vm_page_replace");
+
+ /*
+ * Record the object/offset pair in this page
+ */
+
+ mem->object = object;
+ mem->offset = offset;
+
+ /*
+ * Insert it into the object_object/offset hash table,
+ * replacing any page that might have been there.
+ */
+
+ bucket = &vm_page_buckets[vm_page_hash(object, offset)];
+ simple_lock(&bucket->lock);
+ if (bucket->pages) {
+ vm_page_t *mp = &bucket->pages;
+ vm_page_t m = *mp;
+ do {
+ if (m->object == object && m->offset == offset) {
+ /*
+ * Remove page from bucket and from object,
+ * and return it to the free list.
+ */
+ *mp = m->next;
+ queue_remove(&object->memq, m, vm_page_t,
+ listq);
+ m->tabled = FALSE;
+ object->resident_page_count--;
+ VM_PAGE_QUEUES_REMOVE(m);
+
+ if (m->external) {
+ m->external = FALSE;
+ vm_object_external_pages--;
+ }
+
+ /*
+ * Return page to the free list.
+ * Note the page is not tabled now, so this
+ * won't self-deadlock on the bucket lock.
+ */
+
+ vm_page_free(m);
+ break;
+ }
+ mp = &m->next;
+ } while ((m = *mp) != 0);
+ mem->next = bucket->pages;
+ } else {
+ mem->next = VM_PAGE_NULL;
+ }
+ bucket->pages = mem;
+ simple_unlock(&bucket->lock);
+
+ /*
+ * Now link into the object's list of backed pages.
+ */
+
+ queue_enter(&object->memq, mem, vm_page_t, listq);
+ mem->tabled = TRUE;
+
+ /*
+ * And show that the object has one more resident
+ * page.
+ */
+
+ object->resident_page_count++;
+ assert(object->resident_page_count != 0);
+}
+
+/*
+ * vm_page_remove: [ internal use only ]
+ *
+ * Removes the given mem entry from the object/offset-page
+ * table, the object page list, and the page queues.
+ *
+ * The object and page must be locked.
+ * The free page queue must not be locked.
+ */
+
+void vm_page_remove(
+ vm_page_t mem)
+{
+ vm_page_bucket_t *bucket;
+ vm_page_t this;
+
+ assert(mem->tabled);
+ VM_PAGE_CHECK(mem);
+
+ /*
+ * Remove from the object_object/offset hash table
+ */
+
+ bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
+ simple_lock(&bucket->lock);
+ if ((this = bucket->pages) == mem) {
+ /* optimize for common case */
+
+ bucket->pages = mem->next;
+ } else {
+ vm_page_t *prev;
+
+ for (prev = &this->next;
+ (this = *prev) != mem;
+ prev = &this->next)
+ continue;
+ *prev = this->next;
+ }
+ simple_unlock(&bucket->lock);
+
+ /*
+ * Now remove from the object's list of backed pages.
+ */
+
+ queue_remove(&mem->object->memq, mem, vm_page_t, listq);
+
+ /*
+ * And show that the object has one fewer resident
+ * page.
+ */
+
+ mem->object->resident_page_count--;
+
+ mem->tabled = FALSE;
+
+ VM_PAGE_QUEUES_REMOVE(mem);
+
+ if (mem->external) {
+ mem->external = FALSE;
+ vm_object_external_pages--;
+ }
+}
+
+/*
+ * vm_page_lookup:
+ *
+ * Returns the page associated with the object/offset
+ * pair specified; if none is found, VM_PAGE_NULL is returned.
+ *
+ * The object must be locked. No side effects.
+ */
+
+vm_page_t vm_page_lookup(
+ vm_object_t object,
+ vm_offset_t offset)
+{
+ vm_page_t mem;
+ vm_page_bucket_t *bucket;
+
+ /*
+ * Search the hash table for this object/offset pair
+ */
+
+ bucket = &vm_page_buckets[vm_page_hash(object, offset)];
+
+ simple_lock(&bucket->lock);
+ for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
+ VM_PAGE_CHECK(mem);
+ if ((mem->object == object) && (mem->offset == offset))
+ break;
+ }
+ simple_unlock(&bucket->lock);
+ return mem;
+}
+
+/*
+ * vm_page_rename:
+ *
+ * Move the given memory entry from its
+ * current object to the specified target object/offset.
+ *
+ * The object must be locked.
+ */
+void vm_page_rename(
+ vm_page_t mem,
+ vm_object_t new_object,
+ vm_offset_t new_offset)
+{
+ /*
+ * Changes to mem->object require the page lock because
+ * the pageout daemon uses that lock to get the object.
+ */
+
+ vm_page_lock_queues();
+ vm_page_remove(mem);
+ vm_page_insert(mem, new_object, new_offset);
+ vm_page_unlock_queues();
+}
+
+static void vm_page_init_template(vm_page_t m)
+{
+ m->object = VM_OBJECT_NULL; /* reset later */
+ m->offset = 0; /* reset later */
+ m->wire_count = 0;
+
+ m->inactive = FALSE;
+ m->active = FALSE;
+ m->laundry = FALSE;
+ m->external_laundry = FALSE;
+ m->free = FALSE;
+ m->external = FALSE;
+
+ m->busy = TRUE;
+ m->wanted = FALSE;
+ m->tabled = FALSE;
+ m->fictitious = FALSE;
+ m->private = FALSE;
+ m->absent = FALSE;
+ m->error = FALSE;
+ m->dirty = FALSE;
+ m->precious = FALSE;
+ m->reference = FALSE;
+
+ m->page_lock = VM_PROT_NONE;
+ m->unlock_request = VM_PROT_NONE;
+}
+
+/*
+ * vm_page_init:
+ *
+ * Initialize the fields in a new page.
+ * This takes a structure with random values and initializes it
+ * so that it can be given to vm_page_release or vm_page_insert.
+ */
+void vm_page_init(
+ vm_page_t mem)
+{
+ vm_page_init_template(mem);
+}
+
+/*
+ * vm_page_grab_fictitious:
+ *
+ * Remove a fictitious page from the free list.
+ * Returns VM_PAGE_NULL if there are no free pages.
+ */
+
+vm_page_t vm_page_grab_fictitious(void)
+{
+ vm_page_t m;
+
+ simple_lock(&vm_page_queue_free_lock);
+ if (list_empty(&vm_page_queue_fictitious)) {
+ m = VM_PAGE_NULL;
+ } else {
+ m = list_first_entry(&vm_page_queue_fictitious,
+ struct vm_page, node);
+ assert(m->fictitious);
+ list_remove(&m->node);
+ m->free = FALSE;
+ vm_page_fictitious_count--;
+ }
+ simple_unlock(&vm_page_queue_free_lock);
+
+ return m;
+}
+
+/*
+ * vm_page_release_fictitious:
+ *
+ * Release a fictitious page to the free list.
+ */
+
+static void vm_page_release_fictitious(
+ vm_page_t m)
+{
+ simple_lock(&vm_page_queue_free_lock);
+ if (m->free)
+ panic("vm_page_release_fictitious");
+ m->free = TRUE;
+ list_insert_head(&vm_page_queue_fictitious, &m->node);
+ vm_page_fictitious_count++;
+ simple_unlock(&vm_page_queue_free_lock);
+}
+
+/*
+ * vm_page_more_fictitious:
+ *
+ * Add more fictitious pages to the free list.
+ * Allowed to block.
+ */
+
+int vm_page_fictitious_quantum = 5;
+
+void vm_page_more_fictitious(void)
+{
+ vm_page_t m;
+ int i;
+
+ for (i = 0; i < vm_page_fictitious_quantum; i++) {
+ m = (vm_page_t) kmem_cache_alloc(&vm_page_cache);
+ if (m == VM_PAGE_NULL)
+ panic("vm_page_more_fictitious");
+
+ vm_page_init(m);
+ m->phys_addr = vm_page_fictitious_addr;
+ m->fictitious = TRUE;
+ vm_page_release_fictitious(m);
+ }
+}
+
+/*
+ * vm_page_convert:
+ *
+ * Attempt to convert a fictitious page into a real page.
+ *
+ * The object referenced by *MP must be locked.
+ */
+
+boolean_t vm_page_convert(struct vm_page **mp)
+{
+ struct vm_page *real_m, *fict_m;
+ vm_object_t object;
+ vm_offset_t offset;
+
+ fict_m = *mp;
+
+ assert(fict_m->fictitious);
+ assert(fict_m->phys_addr == vm_page_fictitious_addr);
+ assert(!fict_m->active);
+ assert(!fict_m->inactive);
+
+ real_m = vm_page_grab(VM_PAGE_HIGHMEM);
+ if (real_m == VM_PAGE_NULL)
+ return FALSE;
+
+ object = fict_m->object;
+ offset = fict_m->offset;
+ vm_page_remove(fict_m);
+
+ memcpy(&real_m->vm_page_header,
+ &fict_m->vm_page_header,
+ VM_PAGE_BODY_SIZE);
+ real_m->fictitious = FALSE;
+
+ vm_page_insert(real_m, object, offset);
+
+ assert(real_m->phys_addr != vm_page_fictitious_addr);
+ assert(fict_m->fictitious);
+ assert(fict_m->phys_addr == vm_page_fictitious_addr);
+
+ vm_page_release_fictitious(fict_m);
+ *mp = real_m;
+ return TRUE;
+}
+
+/*
+ * vm_page_grab:
+ *
+ * Remove a page from the free list.
+ * Returns VM_PAGE_NULL if the free list is too small.
+ *
+ * FLAGS specify which constraint should be enforced for the allocated
+ * addresses.
+ */
+
+vm_page_t vm_page_grab(unsigned flags)
+{
+ unsigned selector;
+ vm_page_t mem;
+
+ if (flags & VM_PAGE_HIGHMEM)
+ selector = VM_PAGE_SEL_HIGHMEM;
+#if defined(VM_PAGE_DMA32_LIMIT) && VM_PAGE_DMA32_LIMIT > VM_PAGE_DIRECTMAP_LIMIT
+ else if (flags & VM_PAGE_DMA32)
+ selector = VM_PAGE_SEL_DMA32;
+#endif
+ else if (flags & VM_PAGE_DIRECTMAP)
+ selector = VM_PAGE_SEL_DIRECTMAP;
+#if defined(VM_PAGE_DMA32_LIMIT) && VM_PAGE_DMA32_LIMIT <= VM_PAGE_DIRECTMAP_LIMIT
+ else if (flags & VM_PAGE_DMA32)
+ selector = VM_PAGE_SEL_DMA32;
+#endif
+ else
+ selector = VM_PAGE_SEL_DMA;
+
+ simple_lock(&vm_page_queue_free_lock);
+
+ /*
+ * XXX Mach has many modules that merely assume memory is
+ * directly mapped in kernel space. Instead of updating all
+ * users, we assume those which need specific physical memory
+ * properties will wire down their pages, either because
+ * they can't be paged (not part of an object), or with
+ * explicit VM calls. The strategy is then to let memory
+ * pressure balance the physical segments with pageable pages.
+ */
+ mem = vm_page_alloc_pa(0, selector, VM_PT_KERNEL);
+
+ if (mem == NULL) {
+ simple_unlock(&vm_page_queue_free_lock);
+ return NULL;
+ }
+
+ mem->free = FALSE;
+ simple_unlock(&vm_page_queue_free_lock);
+
+ return mem;
+}
+
+phys_addr_t vm_page_grab_phys_addr(void)
+{
+ vm_page_t p = vm_page_grab(VM_PAGE_DIRECTMAP);
+ if (p == VM_PAGE_NULL)
+ return -1;
+ else
+ return p->phys_addr;
+}
+
+/*
+ * vm_page_release:
+ *
+ * Return a page to the free list.
+ */
+
+void vm_page_release(
+ vm_page_t mem,
+ boolean_t laundry,
+ boolean_t external_laundry)
+{
+ simple_lock(&vm_page_queue_free_lock);
+ if (mem->free)
+ panic("vm_page_release");
+ mem->free = TRUE;
+ vm_page_free_pa(mem, 0);
+ if (laundry) {
+ vm_page_laundry_count--;
+
+ if (vm_page_laundry_count == 0) {
+ vm_pageout_resume();
+ }
+ }
+ if (external_laundry) {
+
+ /*
+ * If vm_page_external_laundry_count is negative,
+ * the pageout daemon isn't expecting to be
+ * notified.
+ */
+
+ if (vm_page_external_laundry_count > 0) {
+ vm_page_external_laundry_count--;
+
+ if (vm_page_external_laundry_count == 0) {
+ vm_pageout_resume();
+ }
+ }
+ }
+
+ simple_unlock(&vm_page_queue_free_lock);
+}
+
+/*
+ * vm_page_grab_contig:
+ *
+ * Remove a block of contiguous pages from the free list.
+ * Returns VM_PAGE_NULL if the request fails.
+ */
+
+vm_page_t vm_page_grab_contig(
+ vm_size_t size,
+ unsigned int selector)
+{
+ unsigned int i, order, nr_pages;
+ vm_page_t mem;
+
+ order = vm_page_order(size);
+ nr_pages = 1 << order;
+
+ simple_lock(&vm_page_queue_free_lock);
+
+ /* TODO Allow caller to pass type */
+ mem = vm_page_alloc_pa(order, selector, VM_PT_KERNEL);
+
+ if (mem == NULL) {
+ simple_unlock(&vm_page_queue_free_lock);
+ return NULL;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ mem[i].free = FALSE;
+ }
+
+ simple_unlock(&vm_page_queue_free_lock);
+
+ return mem;
+}
+
+/*
+ * vm_page_free_contig:
+ *
+ * Return a block of contiguous pages to the free list.
+ */
+
+void vm_page_free_contig(vm_page_t mem, vm_size_t size)
+{
+ unsigned int i, order, nr_pages;
+
+ order = vm_page_order(size);
+ nr_pages = 1 << order;
+
+ simple_lock(&vm_page_queue_free_lock);
+
+ for (i = 0; i < nr_pages; i++) {
+ if (mem[i].free)
+ panic("vm_page_free_contig");
+
+ mem[i].free = TRUE;
+ }
+
+ vm_page_free_pa(mem, order);
+
+ simple_unlock(&vm_page_queue_free_lock);
+}
+
+/*
+ * vm_page_alloc:
+ *
+ * Allocate and return a memory cell associated
+ * with this VM object/offset pair.
+ *
+ * Object must be locked.
+ */
+
+vm_page_t vm_page_alloc(
+ vm_object_t object,
+ vm_offset_t offset)
+{
+ vm_page_t mem;
+
+ mem = vm_page_grab(VM_PAGE_HIGHMEM);
+ if (mem == VM_PAGE_NULL)
+ return VM_PAGE_NULL;
+
+ vm_page_lock_queues();
+ vm_page_insert(mem, object, offset);
+ vm_page_unlock_queues();
+
+ return mem;
+}
+
+/*
+ * vm_page_free:
+ *
+ * Returns the given page to the free list,
+ * disassociating it with any VM object.
+ *
+ * Object and page queues must be locked prior to entry.
+ */
+void vm_page_free(
+ vm_page_t mem)
+{
+ if (mem->free)
+ panic("vm_page_free");
+
+ if (mem->tabled) {
+ vm_page_remove(mem);
+ }
+
+ assert(!mem->active && !mem->inactive);
+
+ if (mem->wire_count != 0) {
+ if (!mem->private && !mem->fictitious)
+ vm_page_wire_count--;
+ mem->wire_count = 0;
+ }
+
+ PAGE_WAKEUP_DONE(mem);
+
+ if (mem->absent)
+ vm_object_absent_release(mem->object);
+
+ /*
+ * XXX The calls to vm_page_init here are
+ * really overkill.
+ */
+
+ if (mem->private || mem->fictitious) {
+ vm_page_init(mem);
+ mem->phys_addr = vm_page_fictitious_addr;
+ mem->fictitious = TRUE;
+ vm_page_release_fictitious(mem);
+ } else {
+ boolean_t laundry = mem->laundry;
+ boolean_t external_laundry = mem->external_laundry;
+ vm_page_init(mem);
+ vm_page_release(mem, laundry, external_laundry);
+ }
+}
+
+/*
+ * vm_page_zero_fill:
+ *
+ * Zero-fill the specified page.
+ */
+void vm_page_zero_fill(
+ vm_page_t m)
+{
+ VM_PAGE_CHECK(m);
+
+ pmap_zero_page(m->phys_addr);
+}
+
+/*
+ * vm_page_copy:
+ *
+ * Copy one page to another
+ */
+
+void vm_page_copy(
+ vm_page_t src_m,
+ vm_page_t dest_m)
+{
+ VM_PAGE_CHECK(src_m);
+ VM_PAGE_CHECK(dest_m);
+
+ pmap_copy_page(src_m->phys_addr, dest_m->phys_addr);
+}
+
+#if MACH_VM_DEBUG
+/*
+ * Routine: vm_page_info
+ * Purpose:
+ * Return information about the global VP table.
+ * Fills the buffer with as much information as possible
+ * and returns the desired size of the buffer.
+ * Conditions:
+ * Nothing locked. The caller should provide
+ * possibly-pageable memory.
+ */
+
+unsigned int
+vm_page_info(
+ hash_info_bucket_t *info,
+ unsigned int count)
+{
+ int i;
+
+ if (vm_page_bucket_count < count)
+ count = vm_page_bucket_count;
+
+ for (i = 0; i < count; i++) {
+ vm_page_bucket_t *bucket = &vm_page_buckets[i];
+ unsigned int bucket_count = 0;
+ vm_page_t m;
+
+ simple_lock(&bucket->lock);
+ for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
+ bucket_count++;
+ simple_unlock(&bucket->lock);
+
+ /* don't touch pageable memory while holding locks */
+ info[i].hib_count = bucket_count;
+ }
+
+ return vm_page_bucket_count;
+}
+#endif /* MACH_VM_DEBUG */
+
+
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: vm_page_print [exported]
+ */
+void vm_page_print(const vm_page_t p)
+{
+ iprintf("Page 0x%X: object 0x%X,", (vm_offset_t) p, (vm_offset_t) p->object);
+ printf(" offset 0x%X", p->offset);
+ printf("wire_count %d,", p->wire_count);
+ printf(" %s",
+ (p->active ? "active" : (p->inactive ? "inactive" : "loose")));
+ printf("%s",
+ (p->free ? " free" : ""));
+ printf("%s ",
+ (p->laundry ? " laundry" : ""));
+ printf("%s",
+ (p->dirty ? "dirty" : "clean"));
+ printf("%s",
+ (p->busy ? " busy" : ""));
+ printf("%s",
+ (p->absent ? " absent" : ""));
+ printf("%s",
+ (p->error ? " error" : ""));
+ printf("%s",
+ (p->fictitious ? " fictitious" : ""));
+ printf("%s",
+ (p->private ? " private" : ""));
+ printf("%s",
+ (p->wanted ? " wanted" : ""));
+ printf("%s,",
+ (p->tabled ? "" : "not_tabled"));
+ printf("phys_addr = 0x%X, lock = 0x%X, unlock_request = 0x%X\n",
+ p->phys_addr,
+ (vm_offset_t) p->page_lock,
+ (vm_offset_t) p->unlock_request);
+}
+#endif /* MACH_KDB */
diff --git a/vm/vm_resident.h b/vm/vm_resident.h
new file mode 100644
index 0000000..e8bf681
--- /dev/null
+++ b/vm/vm_resident.h
@@ -0,0 +1,45 @@
+/*
+ * Resident memory management module functions.
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Resident memory management module functions.
+ *
+ */
+
+#ifndef _VM_RESIDENT_H_
+#define _VM_RESIDENT_H_
+
+#include <mach/std_types.h>
+
+/*
+ * vm_page_replace:
+ *
+ * Exactly like vm_page_insert, except that we first
+ * remove any existing page at the given offset in object
+ * and we don't do deactivate-behind.
+ *
+ * The object and page must be locked.
+ */
+extern void vm_page_replace (
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset);
+
+#endif /* _VM_RESIDENT_H_ */
diff --git a/vm/vm_types.h b/vm/vm_types.h
new file mode 100644
index 0000000..f64ebee
--- /dev/null
+++ b/vm/vm_types.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Written by Thomas Schwinge.
+ */
+
+#ifndef VM_VM_TYPES_H
+#define VM_VM_TYPES_H
+
+/*
+ * Types defined:
+ *
+ * vm_map_t the high-level address map data structure.
+ * vm_object_t Virtual memory object.
+ * vm_page_t See `vm/vm_page.h'.
+ */
+
+typedef struct vm_map *vm_map_t;
+#define VM_MAP_NULL ((vm_map_t) 0)
+
+typedef struct vm_object *vm_object_t;
+#define VM_OBJECT_NULL ((vm_object_t) 0)
+
+typedef struct vm_page *vm_page_t;
+#define VM_PAGE_NULL ((vm_page_t) 0)
+
+
+#endif /* VM_VM_TYPES_H */
diff --git a/vm/vm_user.c b/vm/vm_user.c
new file mode 100644
index 0000000..868230a
--- /dev/null
+++ b/vm/vm_user.c
@@ -0,0 +1,803 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_user.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * User-exported virtual memory functions.
+ */
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/mach_types.h> /* to get vm_address_t */
+#include <mach/memory_object.h>
+#include <mach/std_types.h> /* to get pointer_t */
+#include <mach/vm_attributes.h>
+#include <mach/vm_param.h>
+#include <mach/vm_statistics.h>
+#include <mach/vm_cache_statistics.h>
+#include <mach/vm_sync.h>
+#include <kern/gnumach.server.h>
+#include <kern/host.h>
+#include <kern/mach.server.h>
+#include <kern/mach_host.server.h>
+#include <kern/task.h>
+#include <vm/vm_fault.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/memory_object_proxy.h>
+#include <vm/vm_page.h>
+
+
+
+vm_statistics_data_t vm_stat;
+
+/*
+ * vm_allocate allocates "zero fill" memory in the specfied
+ * map.
+ */
+kern_return_t vm_allocate(
+ vm_map_t map,
+ vm_offset_t *addr,
+ vm_size_t size,
+ boolean_t anywhere)
+{
+ kern_return_t result;
+
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+ if (size == 0) {
+ *addr = 0;
+ return(KERN_SUCCESS);
+ }
+
+ if (anywhere)
+ *addr = vm_map_min(map);
+ else
+ *addr = trunc_page(*addr);
+ size = round_page(size);
+
+ result = vm_map_enter(
+ map,
+ addr,
+ size,
+ (vm_offset_t)0,
+ anywhere,
+ VM_OBJECT_NULL,
+ (vm_offset_t)0,
+ FALSE,
+ VM_PROT_DEFAULT,
+ VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+
+ return(result);
+}
+
+/*
+ * vm_deallocate deallocates the specified range of addresses in the
+ * specified address map.
+ */
+kern_return_t vm_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size)
+{
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (size == (vm_offset_t) 0)
+ return(KERN_SUCCESS);
+
+ return(vm_map_remove(map, trunc_page(start), round_page(start+size)));
+}
+
+/*
+ * vm_inherit sets the inheritance of the specified range in the
+ * specified map.
+ */
+kern_return_t vm_inherit(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ vm_inherit_t new_inheritance)
+{
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ switch (new_inheritance) {
+ case VM_INHERIT_NONE:
+ case VM_INHERIT_COPY:
+ case VM_INHERIT_SHARE:
+ break;
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ /*Check if range includes projected buffer;
+ user is not allowed direct manipulation in that case*/
+ if (projected_buffer_in_range(map, start, start+size))
+ return(KERN_INVALID_ARGUMENT);
+
+ return(vm_map_inherit(map,
+ trunc_page(start),
+ round_page(start+size),
+ new_inheritance));
+}
+
+/*
+ * vm_protect sets the protection of the specified range in the
+ * specified map.
+ */
+
+kern_return_t vm_protect(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ boolean_t set_maximum,
+ vm_prot_t new_protection)
+{
+ if ((map == VM_MAP_NULL) ||
+ (new_protection & ~(VM_PROT_ALL|VM_PROT_NOTIFY)))
+ return(KERN_INVALID_ARGUMENT);
+
+ /*Check if range includes projected buffer;
+ user is not allowed direct manipulation in that case*/
+ if (projected_buffer_in_range(map, start, start+size))
+ return(KERN_INVALID_ARGUMENT);
+
+ return(vm_map_protect(map,
+ trunc_page(start),
+ round_page(start+size),
+ new_protection,
+ set_maximum));
+}
+
+kern_return_t vm_statistics(
+ vm_map_t map,
+ vm_statistics_data_t *stat)
+{
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ *stat = vm_stat;
+
+ stat->pagesize = PAGE_SIZE;
+ stat->free_count = vm_page_mem_free();
+ stat->active_count = vm_page_active_count;
+ stat->inactive_count = vm_page_inactive_count;
+ stat->wire_count = vm_page_wire_count;
+
+ return(KERN_SUCCESS);
+}
+
+kern_return_t vm_cache_statistics(
+ vm_map_t map,
+ vm_cache_statistics_data_t *stats)
+{
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ stats->cache_object_count = vm_object_external_count;
+ stats->cache_count = vm_object_external_pages;
+
+ /* XXX Not implemented yet */
+ stats->active_tmp_count = 0;
+ stats->inactive_tmp_count = 0;
+ stats->active_perm_count = 0;
+ stats->inactive_perm_count = 0;
+ stats->dirty_count = 0;
+ stats->laundry_count = 0;
+ stats->writeback_count = 0;
+ stats->slab_count = 0;
+ stats->slab_reclaim_count = 0;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Handle machine-specific attributes for a mapping, such
+ * as cachability, migrability, etc.
+ */
+kern_return_t vm_machine_attribute(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
+{
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ /*Check if range includes projected buffer;
+ user is not allowed direct manipulation in that case*/
+ if (projected_buffer_in_range(map, address, address+size))
+ return(KERN_INVALID_ARGUMENT);
+
+ return vm_map_machine_attribute(map, address, size, attribute, value);
+}
+
+kern_return_t vm_read(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ pointer_t *data,
+ mach_msg_type_number_t *data_size)
+{
+ kern_return_t error;
+ vm_map_copy_t ipc_address;
+
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ if ((error = vm_map_copyin(map,
+ address,
+ size,
+ FALSE, /* src_destroy */
+ &ipc_address)) == KERN_SUCCESS) {
+ *data = (pointer_t) ipc_address;
+ *data_size = size;
+ }
+ return(error);
+}
+
+kern_return_t vm_write(
+ vm_map_t map,
+ vm_address_t address,
+ pointer_t data,
+ mach_msg_type_number_t size)
+{
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
+ FALSE /* interruptible XXX */);
+}
+
+kern_return_t vm_copy(
+ vm_map_t map,
+ vm_address_t source_address,
+ vm_size_t size,
+ vm_address_t dest_address)
+{
+ vm_map_copy_t copy;
+ kern_return_t kr;
+
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = vm_map_copyin(map, source_address, size,
+ FALSE, &copy);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ kr = vm_map_copy_overwrite(map, dest_address, copy,
+ FALSE /* interruptible XXX */);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+ return kr;
+ }
+
+ return KERN_SUCCESS;
+}
+
+
+/*
+ * Routine: vm_map
+ */
+kern_return_t vm_map(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ ipc_port_t memory_object,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_object_t object;
+ kern_return_t result;
+
+ if ((target_map == VM_MAP_NULL) ||
+ (cur_protection & ~VM_PROT_ALL) ||
+ (max_protection & ~VM_PROT_ALL))
+ return(KERN_INVALID_ARGUMENT);
+
+ switch (inheritance) {
+ case VM_INHERIT_NONE:
+ case VM_INHERIT_COPY:
+ case VM_INHERIT_SHARE:
+ break;
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ if (size == 0)
+ return KERN_INVALID_ARGUMENT;
+
+#ifdef USER32
+ if (mask & 0x80000000)
+ mask |= 0xffffffff00000000;
+#endif
+
+ *address = trunc_page(*address);
+ size = round_page(size);
+
+ if (!IP_VALID(memory_object)) {
+ object = VM_OBJECT_NULL;
+ offset = 0;
+ copy = FALSE;
+ } else if ((object = vm_object_enter(memory_object, size, FALSE))
+ == VM_OBJECT_NULL)
+ {
+ ipc_port_t real_memobj;
+ vm_prot_t prot;
+ vm_offset_t start;
+ vm_offset_t len;
+
+ result = memory_object_proxy_lookup (memory_object, &real_memobj,
+ &prot, &start, &len);
+ if (result != KERN_SUCCESS)
+ return result;
+
+ if (!copy)
+ {
+ /* Reduce the allowed access to the memory object. */
+ max_protection &= prot;
+ cur_protection &= prot;
+ }
+ else
+ {
+ /* Disallow making a copy unless the proxy allows reading. */
+ if (!(prot & VM_PROT_READ))
+ return KERN_PROTECTION_FAILURE;
+ }
+
+ /* Reduce the allowed range */
+ if ((start + offset + size) > (start + len))
+ return KERN_INVALID_ARGUMENT;
+
+ offset += start;
+
+ if ((object = vm_object_enter(real_memobj, size, FALSE))
+ == VM_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Perform the copy if requested
+ */
+
+ if (copy) {
+ vm_object_t new_object;
+ vm_offset_t new_offset;
+
+ result = vm_object_copy_strategically(object, offset, size,
+ &new_object, &new_offset,
+ &copy);
+
+ /*
+ * Throw away the reference to the
+ * original object, as it won't be mapped.
+ */
+
+ vm_object_deallocate(object);
+
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ object = new_object;
+ offset = new_offset;
+ }
+
+ if ((result = vm_map_enter(target_map,
+ address, size, mask, anywhere,
+ object, offset,
+ copy,
+ cur_protection, max_protection, inheritance
+ )) != KERN_SUCCESS)
+ vm_object_deallocate(object);
+ return(result);
+}
+
+/*
+ * Specify that the range of the virtual address space
+ * of the target task must not cause page faults for
+ * the indicated accesses.
+ *
+ * [ To unwire the pages, specify VM_PROT_NONE. ]
+ */
+kern_return_t vm_wire(const ipc_port_t port,
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ vm_prot_t access)
+{
+ boolean_t priv;
+
+ if (!IP_VALID(port))
+ return KERN_INVALID_HOST;
+
+ ip_lock(port);
+ if (!ip_active(port) ||
+ (ip_kotype(port) != IKOT_HOST_PRIV
+ && ip_kotype(port) != IKOT_HOST))
+ {
+ ip_unlock(port);
+ return KERN_INVALID_HOST;
+ }
+
+ priv = ip_kotype(port) == IKOT_HOST_PRIV;
+ ip_unlock(port);
+
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ if (access & ~VM_PROT_ALL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*Check if range includes projected buffer;
+ user is not allowed direct manipulation in that case*/
+ if (projected_buffer_in_range(map, start, start+size))
+ return(KERN_INVALID_ARGUMENT);
+
+ /* TODO: make it tunable */
+ if (!priv && access != VM_PROT_NONE && map->size_wired + size > (8<<20))
+ return KERN_NO_ACCESS;
+
+ return vm_map_pageable(map, trunc_page(start), round_page(start+size),
+ access, TRUE, TRUE);
+}
+
+kern_return_t vm_wire_all(const ipc_port_t port, vm_map_t map, vm_wire_t flags)
+{
+ if (!IP_VALID(port))
+ return KERN_INVALID_HOST;
+
+ ip_lock(port);
+
+ if (!ip_active(port)
+ || (ip_kotype(port) != IKOT_HOST_PRIV)) {
+ ip_unlock(port);
+ return KERN_INVALID_HOST;
+ }
+
+ ip_unlock(port);
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ if (flags & ~VM_WIRE_ALL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*Check if range includes projected buffer;
+ user is not allowed direct manipulation in that case*/
+ if (projected_buffer_in_range(map, map->min_offset, map->max_offset)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return vm_map_pageable_all(map, flags);
+}
+
+/*
+ * vm_object_sync synchronizes out pages from the memory object to its
+ * memory manager, if any.
+ */
+kern_return_t vm_object_sync(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ boolean_t should_flush,
+ boolean_t should_return,
+ boolean_t should_iosync)
+{
+ if (object == VM_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /* FIXME: we should rather introduce an internal function, e.g.
+ vm_object_update, rather than calling memory_object_lock_request. */
+ vm_object_reference(object);
+
+ /* This is already always synchronous for now. */
+ (void) should_iosync;
+
+ size = round_page(offset + size) - trunc_page(offset);
+ offset = trunc_page(offset);
+
+ return memory_object_lock_request(object, offset, size,
+ should_return ?
+ MEMORY_OBJECT_RETURN_ALL :
+ MEMORY_OBJECT_RETURN_NONE,
+ should_flush,
+ VM_PROT_NO_CHANGE,
+ NULL, 0);
+}
+
+/*
+ * vm_msync synchronizes out pages from the map to their memory manager,
+ * if any.
+ */
+kern_return_t vm_msync(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ vm_sync_t sync_flags)
+{
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ return vm_map_msync(map, (vm_offset_t) address, size, sync_flags);
+}
+
+/*
+ * vm_allocate_contiguous allocates "zero fill" physical memory and maps
+ * it into in the specfied map.
+ */
+/* TODO: respect physical alignment (palign)
+ * and minimum physical address (pmin)
+ */
+kern_return_t vm_allocate_contiguous(
+ host_t host_priv,
+ vm_map_t map,
+ vm_address_t *result_vaddr,
+ rpc_phys_addr_t *result_paddr,
+ vm_size_t size,
+ rpc_phys_addr_t pmin,
+ rpc_phys_addr_t pmax,
+ rpc_phys_addr_t palign)
+{
+ vm_size_t alloc_size;
+ unsigned int npages;
+ unsigned int i;
+ unsigned int order;
+ unsigned int selector;
+ vm_page_t pages;
+ vm_object_t object;
+ kern_return_t kr;
+ vm_address_t vaddr;
+
+ if (host_priv == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ /* FIXME */
+ if (pmin != 0)
+ return KERN_INVALID_ARGUMENT;
+
+ if (palign == 0)
+ palign = PAGE_SIZE;
+
+ /* FIXME: Allows some small alignments less than page size */
+ if ((palign < PAGE_SIZE) && (PAGE_SIZE % palign == 0))
+ palign = PAGE_SIZE;
+
+ /* FIXME */
+ if (palign != PAGE_SIZE)
+ return KERN_INVALID_ARGUMENT;
+
+ selector = VM_PAGE_SEL_DMA;
+ if (pmax > VM_PAGE_DMA_LIMIT)
+#ifdef VM_PAGE_DMA32_LIMIT
+#if VM_PAGE_DMA32_LIMIT < VM_PAGE_DIRECTMAP_LIMIT
+ if (pmax <= VM_PAGE_DMA32_LIMIT)
+ selector = VM_PAGE_SEL_DMA32;
+ if (pmax > VM_PAGE_DMA32_LIMIT)
+#endif
+#endif
+ if (pmax <= VM_PAGE_DIRECTMAP_LIMIT)
+ selector = VM_PAGE_SEL_DIRECTMAP;
+ if (pmax > VM_PAGE_DIRECTMAP_LIMIT)
+#ifdef VM_PAGE_DMA32_LIMIT
+#if VM_PAGE_DMA32_LIMIT > VM_PAGE_DIRECTMAP_LIMIT
+ if (pmax <= VM_PAGE_DMA32_LIMIT)
+ selector = VM_PAGE_SEL_DMA32;
+ if (pmax > VM_PAGE_DMA32_LIMIT)
+#endif
+#endif
+ if (pmax <= VM_PAGE_HIGHMEM_LIMIT)
+ selector = VM_PAGE_SEL_HIGHMEM;
+
+ size = vm_page_round(size);
+
+ if (size == 0)
+ return KERN_INVALID_ARGUMENT;
+
+ object = vm_object_allocate(size);
+
+ if (object == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /*
+ * XXX The page allocator returns blocks with a power-of-two size.
+ * The requested size may not be a power-of-two, requiring some
+ * work to release back the pages that aren't needed.
+ */
+ order = vm_page_order(size);
+ alloc_size = (1 << (order + PAGE_SHIFT));
+ npages = vm_page_atop(alloc_size);
+
+ pages = vm_page_grab_contig(alloc_size, selector);
+
+ if (pages == NULL) {
+ vm_object_deallocate(object);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ vm_object_lock(object);
+ vm_page_lock_queues();
+
+ for (i = 0; i < vm_page_atop(size); i++) {
+ /*
+ * XXX We can safely handle contiguous pages as an array,
+ * but this relies on knowing the implementation of the
+ * page allocator.
+ */
+ pages[i].busy = FALSE;
+ vm_page_insert(&pages[i], object, vm_page_ptoa(i));
+ vm_page_wire(&pages[i]);
+ }
+
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ for (i = vm_page_atop(size); i < npages; i++) {
+ vm_page_release(&pages[i], FALSE, FALSE);
+ }
+
+ vaddr = 0;
+ kr = vm_map_enter(map, &vaddr, size, 0, TRUE, object, 0, FALSE,
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_DEFAULT);
+
+ if (kr != KERN_SUCCESS) {
+ vm_object_deallocate(object);
+ return kr;
+ }
+
+ kr = vm_map_pageable(map, vaddr, vaddr + size,
+ VM_PROT_READ | VM_PROT_WRITE,
+ TRUE, TRUE);
+
+ if (kr != KERN_SUCCESS) {
+ vm_map_remove(map, vaddr, vaddr + size);
+ return kr;
+ }
+
+ *result_vaddr = vaddr;
+ *result_paddr = pages->phys_addr;
+
+ assert(*result_paddr >= pmin);
+ assert(*result_paddr + size <= pmax);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * vm_pages_phys returns information about a region of memory
+ */
+kern_return_t vm_pages_phys(
+ host_t host,
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ rpc_phys_addr_array_t *pagespp,
+ mach_msg_type_number_t *countp)
+{
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!page_aligned(address))
+ return KERN_INVALID_ARGUMENT;
+ if (!page_aligned(size))
+ return KERN_INVALID_ARGUMENT;
+
+ mach_msg_type_number_t count = atop(size), cur;
+ rpc_phys_addr_array_t pagesp = *pagespp;
+ kern_return_t kr;
+
+ if (*countp < count) {
+ vm_offset_t allocated;
+ /* Avoid faults while we keep vm locks */
+ kr = kmem_alloc(ipc_kernel_map, &allocated,
+ count * sizeof(pagesp[0]));
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+ pagesp = (rpc_phys_addr_array_t) allocated;
+ }
+
+ for (cur = 0; cur < count; cur++) {
+ vm_map_t cmap; /* current map in traversal */
+ rpc_phys_addr_t paddr;
+ vm_map_entry_t entry; /* entry in current map */
+
+ /* find the entry containing (or following) the address */
+ vm_map_lock_read(map);
+ for (cmap = map;;) {
+ /* cmap is read-locked */
+
+ if (!vm_map_lookup_entry(cmap, address, &entry)) {
+ entry = VM_MAP_ENTRY_NULL;
+ break;
+ }
+
+ if (entry->is_sub_map) {
+ /* move down to the sub map */
+
+ vm_map_t nmap = entry->object.sub_map;
+ vm_map_lock_read(nmap);
+ vm_map_unlock_read(cmap);
+ cmap = nmap;
+ continue;
+ } else {
+ /* Found it */
+ break;
+ }
+ /*NOTREACHED*/
+ }
+
+ paddr = 0;
+ if (entry) {
+ vm_offset_t offset = address - entry->vme_start + entry->offset;
+ vm_object_t object = entry->object.vm_object;
+
+ if (object) {
+ vm_object_lock(object);
+ vm_page_t page = vm_page_lookup(object, offset);
+ if (page) {
+ if (page->phys_addr != (typeof(pagesp[cur])) page->phys_addr)
+ printf("warning: physical address overflow in vm_pages_phys!!\n");
+ else
+ paddr = page->phys_addr;
+ }
+ vm_object_unlock(object);
+ }
+ }
+ vm_map_unlock_read(cmap);
+ pagesp[cur] = paddr;
+
+ address += PAGE_SIZE;
+ }
+
+ if (pagesp != *pagespp) {
+ vm_map_copy_t copy;
+ kr = vm_map_copyin(ipc_kernel_map, (vm_offset_t) pagesp,
+ count * sizeof(pagesp[0]), TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+ *pagespp = (rpc_phys_addr_array_t) copy;
+ }
+
+ *countp = count;
+
+ return KERN_SUCCESS;
+}
diff --git a/vm/vm_user.h b/vm/vm_user.h
new file mode 100644
index 0000000..c6f20a8
--- /dev/null
+++ b/vm/vm_user.h
@@ -0,0 +1,60 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_user.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1986
+ *
+ * Declarations of user-visible virtual address space
+ * management functionality.
+ */
+
+#ifndef _VM_VM_USER_H_
+#define _VM_VM_USER_H_
+
+#include <mach/kern_return.h>
+#include <mach/std_types.h>
+#include <mach/mach_types.h>
+
+extern kern_return_t vm_allocate(vm_map_t, vm_offset_t *, vm_size_t,
+ boolean_t);
+extern kern_return_t vm_deallocate(vm_map_t, vm_offset_t, vm_size_t);
+extern kern_return_t vm_inherit(vm_map_t, vm_offset_t, vm_size_t,
+ vm_inherit_t);
+extern kern_return_t vm_protect(vm_map_t, vm_offset_t, vm_size_t, boolean_t,
+ vm_prot_t);
+extern kern_return_t vm_statistics(vm_map_t, vm_statistics_data_t *);
+extern kern_return_t vm_cache_statistics(vm_map_t, vm_cache_statistics_data_t *);
+extern kern_return_t vm_read(vm_map_t, vm_address_t, vm_size_t, pointer_t *,
+ vm_size_t *);
+extern kern_return_t vm_write(vm_map_t, vm_address_t, pointer_t, vm_size_t);
+extern kern_return_t vm_copy(vm_map_t, vm_address_t, vm_size_t,
+ vm_address_t);
+extern kern_return_t vm_map(vm_map_t, vm_offset_t *, vm_size_t, vm_offset_t,
+ boolean_t, ipc_port_t, vm_offset_t, boolean_t,
+ vm_prot_t, vm_prot_t, vm_inherit_t);
+
+#endif /* _VM_VM_USER_H_ */
diff --git a/x86_64/Makefrag.am b/x86_64/Makefrag.am
new file mode 100644
index 0000000..b0bc45c
--- /dev/null
+++ b/x86_64/Makefrag.am
@@ -0,0 +1,245 @@
+# Makefile fragment for x86_64.
+
+# Copyright (C) 1997, 1999, 2006, 2007 Free Software Foundation, Inc.
+
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+# "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+# LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+# USE OF THIS SOFTWARE.
+
+#
+# Building a distribution.
+#
+EXTRA_DIST += \
+ x86_64/ldscript \
+ x86_64/x86_64 \
+ x86_64/include/mach/x86_64
+
+if HOST_x86_64
+
+#
+# Source files for any x86_64 kernel.
+#
+
+libkernel_a_SOURCES += \
+ i386/i386at/acpi_parse_apic.h \
+ i386/i386at/acpi_parse_apic.c \
+ i386/i386at/autoconf.c \
+ i386/i386at/autoconf.h \
+ i386/i386at/biosmem.c \
+ i386/i386at/biosmem.h \
+ i386/i386at/conf.c \
+ i386/i386at/cons_conf.c \
+ i386/i386at/elf.h \
+ i386/i386at/idt.h \
+ i386/i386at/model_dep.c \
+ i386/i386at/model_dep.h \
+ i386/include/mach/sa/stdarg.h
+
+if PLATFORM_at
+libkernel_a_SOURCES += \
+ x86_64/boothdr.S \
+ i386/i386at/com.c \
+ i386/i386at/com.h \
+ i386/i386at/comreg.h \
+ i386/i386at/cram.h \
+ i386/i386at/disk.h \
+ i386/i386at/i8250.h \
+ i386/i386at/immc.c \
+ i386/i386at/int_init.c \
+ i386/i386at/int_init.h \
+ x86_64/interrupt.S \
+ i386/i386at/kd.c \
+ i386/i386at/kd.h \
+ i386/i386at/kd_event.c \
+ i386/i386at/kd_event.h \
+ i386/i386at/kd_queue.c \
+ i386/i386at/kd_queue.h \
+ i386/i386at/kd_mouse.c \
+ i386/i386at/kd_mouse.h \
+ x86_64/kdasm.S \
+ i386/i386at/kdsoft.h \
+ i386/i386at/mem.c \
+ i386/i386at/mem.h \
+ i386/i386at/rtc.c \
+ i386/i386at/rtc.h
+endif
+
+#
+# `lpr' device support.
+#
+
+if enable_lpr
+libkernel_a_SOURCES += \
+ i386/i386at/lpr.c \
+ i386/i386at/lpr.h
+endif
+
+
+#
+# Further source files for any x86_64 kernel.
+#
+
+libkernel_a_SOURCES += \
+ i386/i386/percpu.h \
+ i386/i386/percpu.c \
+ x86_64/cswitch.S \
+ x86_64/copy_user.c \
+ x86_64/debug_trace.S \
+ x86_64/idt_inittab.S \
+ x86_64/locore.S \
+ x86_64/spl.S
+
+if PLATFORM_at
+libkernel_a_SOURCES += \
+ i386/i386/apic.h \
+ i386/i386/apic.c \
+ i386/i386/hardclock.c \
+ i386/i386/hardclock.h \
+ i386/i386/irq.c \
+ i386/i386/irq.h \
+ i386/i386/msr.h \
+ i386/i386/pit.c \
+ i386/i386/pit.h
+
+if enable_apic
+libkernel_a_SOURCES += \
+ i386/i386at/ioapic.c
+else
+libkernel_a_SOURCES += \
+ i386/i386/pic.c \
+ i386/i386/pic.h \
+ i386/i386at/pic_isa.c
+endif
+endif
+
+#
+# KDB support.
+#
+
+if enable_kdb
+libkernel_a_SOURCES += \
+ x86_64/_setjmp.S
+endif
+
+
+#
+# Files from the generic sources that we want.
+#
+
+libkernel_a_SOURCES += \
+ chips/busses.c \
+ chips/busses.h \
+ device/cirbuf.c
+
+#
+# Automatically generated source files.
+#
+# See Makerules.mig.am.
+#
+
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ i386/i386/mach_i386.server.defs.c
+nodist_libkernel_a_SOURCES += \
+ i386/i386/mach_i386.server.h \
+ i386/i386/mach_i386.server.c \
+ i386/i386/mach_i386.server.msgids
+# i386/i386/mach_i386.server.defs
+
+nodist_libkernel_a_SOURCES += \
+ i386/i386/i386asm.h
+
+#
+# Architecture specialities.
+#
+
+AM_CPPFLAGS += \
+ -I$(top_srcdir)/i386 \
+ -I$(top_srcdir)/i386/i386 \
+ -I$(top_srcdir)/i386/include/mach/sa
+
+AM_CFLAGS += \
+ -mno-red-zone \
+ -mcmodel=kernel \
+ -mno-3dnow \
+ -mno-mmx \
+ -mno-sse \
+ -mno-sse2
+
+#
+# Installation.
+#
+
+include_mach_x86_64dir = $(includedir)/mach/x86_64
+include_mach_x86_64_HEADERS = \
+ i386/include/mach/i386/asm.h \
+ i386/include/mach/i386/boolean.h \
+ i386/include/mach/i386/eflags.h \
+ i386/include/mach/i386/exception.h \
+ i386/include/mach/i386/fp_reg.h \
+ i386/include/mach/i386/ioccom.h \
+ i386/include/mach/i386/kern_return.h \
+ i386/include/mach/i386/mach_i386.defs \
+ i386/include/mach/i386/mach_i386_types.h \
+ i386/include/mach/i386/machine_types.defs \
+ i386/include/mach/i386/multiboot.h \
+ i386/include/mach/i386/thread_status.h \
+ i386/include/mach/i386/trap.h \
+ i386/include/mach/i386/vm_param.h \
+ i386/include/mach/i386/vm_types.h
+
+if enable_user32
+include_mach_x86_64_HEADERS += i386/include/mach/i386/syscall_sw.h
+else
+include_mach_x86_64_HEADERS += x86_64/include/syscall_sw.h
+endif
+
+#
+# Platform specific parts.
+#
+
+KERNEL_MAP_BASE=0xffffffff80000000
+
+if PLATFORM_at
+# For now simply keep all the kernel virtual space in the last 2G.
+# We could use a more elaborate schema if needed (e.g. reserving a
+# larger area for directmap or the kernel heap)), I think only the
+# test/bss/data sections need to be placed here kere because of
+# -mcmodel=kernel
+gnumach_LINKFLAGS += \
+ --defsym _START_MAP=$(_START_MAP) \
+ --defsym _START=$(_START_MAP) \
+ --defsym KERNEL_MAP_SHIFT=$(KERNEL_MAP_BASE) \
+ -z max-page-size=0x1000 \
+ -T '$(srcdir)'/x86_64/ldscript
+
+AM_CCASFLAGS += \
+ -Ii386
+endif
+
+if PLATFORM_xen
+libkernel_a_SOURCES += \
+ x86_64/xen_locore.S \
+ x86_64/xen_boothdr.S \
+ i386/xen/xen.c \
+ i386/i386/xen.h
+
+gnumach_LINKFLAGS += \
+ --defsym _START_MAP=$(KERNEL_MAP_BASE) \
+ --defsym _START=$(KERNEL_MAP_BASE) \
+ --defsym KERNEL_MAP_SHIFT=0 \
+ -T '$(srcdir)'/x86_64/ldscript
+endif
+
+AM_CFLAGS += -D_START_MAP=$(_START_MAP) \
+ -DKERNEL_MAP_BASE=$(KERNEL_MAP_BASE)
+AM_CCASFLAGS += -D_START_MAP=$(_START_MAP) \
+ -DKERNEL_MAP_BASE=$(KERNEL_MAP_BASE)
+
+endif # HOST_x86_64
diff --git a/x86_64/_setjmp.S b/x86_64/_setjmp.S
new file mode 100644
index 0000000..5714f43
--- /dev/null
+++ b/x86_64/_setjmp.S
@@ -0,0 +1,65 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * C library -- _setjmp, _longjmp
+ *
+ * _longjmp(a,v)
+ * will generate a "return(v)" from
+ * the last call to
+ * _setjmp(a)
+ * by restoring registers from the stack,
+ * The previous signal state is NOT restored.
+ *
+ */
+
+#include <mach/machine/asm.h>
+
+ENTRY(_setjmp)
+ movq %rbx,0(%rdi)
+ movq %rbp,8(%rdi) /* save frame pointer of caller */
+ movq %r12,16(%rdi)
+ movq %r13,24(%rdi)
+ movq %r14,32(%rdi)
+ movq %r15,40(%rdi)
+ popq %rdx
+ movq %rsp,48(%rdi) /* save stack pointer of caller */
+ movq %rdx,56(%rdi) /* save pc of caller */
+ xorq %rax,%rax
+ jmp *%rdx
+
+ENTRY(_longjmp)
+ movq %rsi,%rax /* return(v) */
+ movq 0(%rdi),%rbx
+ movq 8(%rdi),%rbp
+ movq 16(%rdi),%r12
+ movq 24(%rdi),%r13
+ movq 32(%rdi),%r14
+ movq 40(%rdi),%r15
+ movq 48(%rdi),%rsp
+ orq %rax,%rax
+ jnz 0f
+ incq %rax
+0: jmp *56(%rdi) /* done, return.... */
diff --git a/x86_64/boothdr.S b/x86_64/boothdr.S
new file mode 100644
index 0000000..0ab9bd5
--- /dev/null
+++ b/x86_64/boothdr.S
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2022 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <mach/machine/asm.h>
+
+#include <i386/i386asm.h>
+#include <i386/i386/proc_reg.h>
+#include <i386/i386/seg.h>
+ /*
+ * This section will be put first into .boot. See also x86_64/ldscript.
+ */
+ .section .boot.text,"ax"
+ /* We should never be entered this way. */
+ .globl boot_start
+boot_start:
+
+ .code32
+ jmp boot_entry
+
+ /* MultiBoot header - see multiboot.h. */
+#define MULTIBOOT_MAGIC 0x1BADB002
+#define MULTIBOOT_FLAGS 0x00000003
+ P2ALIGN(2)
+boot_hdr:
+ .long MULTIBOOT_MAGIC
+ .long MULTIBOOT_FLAGS
+ /*
+ * The next item here is the checksum.
+ * XX this works OK until we need at least the 30th bit.
+ */
+ .long - (MULTIBOOT_MAGIC+MULTIBOOT_FLAGS)
+
+ .global _start
+_start:
+boot_entry:
+ /*
+ * Prepare minimal page mapping to jump to 64 bit and to C code.
+ * The first 4GB is identity mapped, and the first 2GB are re-mapped
+ * to high addresses at KERNEL_MAP_BASE
+ */
+
+ movl $p3table,%eax
+ or $(PTE_V|PTE_W),%eax
+ movl %eax,(p4table)
+ /*
+ * Fill 4 entries in L3 table to cover the whole 32-bit 4GB address
+ * space. Part of it might be remapped later if the kernel is mapped
+ * below 4G.
+ */
+ movl $p2table,%eax
+ or $(PTE_V|PTE_W),%eax
+ movl %eax,(p3table)
+ movl $p2table1,%eax
+ or $(PTE_V|PTE_W),%eax
+ movl %eax,(p3table + 8)
+ movl $p2table2,%eax
+ or $(PTE_V|PTE_W),%eax
+ movl %eax,(p3table + 16)
+ movl $p2table3,%eax
+ or $(PTE_V|PTE_W),%eax
+ movl %eax,(p3table + 24)
+ /* point each page table level two entry to a page */
+ mov $0,%ecx
+.map_p2_table:
+ mov $0x200000,%eax // 2MiB page, should be always available
+ mul %ecx
+ or $(PTE_V|PTE_W|PTE_S),%eax // enable 2MiB page instead of 4k
+ mov %eax,p2table(,%ecx,8)
+ inc %ecx
+ cmp $2048,%ecx // 512 entries per table, map 4 L2 tables
+ jne .map_p2_table
+
+ /*
+ * KERNEL_MAP_BASE must me aligned to 2GB.
+ * Depending on kernel starting address, we might need to add another
+ * entry in the L4 table (controlling 512 GB chunks). In any case, we
+ * add two entries in L3 table to make sure we map 2GB for the kernel.
+ * Note that this may override part of the mapping create above.
+ */
+.kernel_map:
+#if KERNEL_MAP_BASE >= (1U << 39)
+ movl $p3ktable,%eax
+ or $(PTE_V|PTE_W),%eax
+ movl %eax,(p4table + (8 * ((KERNEL_MAP_BASE >> 39) & 0x1FF))) // select 512G block
+ movl $p2ktable1,%eax
+ or $(PTE_V|PTE_W),%eax
+ movl %eax,(p3ktable + (8 * ((KERNEL_MAP_BASE >> 30) & 0x1FF) )) // select first 1G block
+ movl $p2ktable2,%eax
+ or $(PTE_V|PTE_W),%eax
+ movl %eax,(p3ktable + (8 * (((KERNEL_MAP_BASE >> 30) & 0x1FF) + 1) )) // select second 1G block
+#else
+ movl $p2ktable1,%eax
+ or $(PTE_V|PTE_W),%eax
+ movl %eax,(p3table + (8 * ((KERNEL_MAP_BASE >> 30) & 0x1FF) )) // select first 1G block
+ movl $p2ktable2,%eax
+ or $(PTE_V|PTE_W),%eax
+ movl %eax,(p3table + (8 * (((KERNEL_MAP_BASE >> 30) & 0x1FF) + 1) )) // select second 1G block
+#endif
+
+ mov $0,%ecx
+.map_p2k_table:
+ mov $0x200000,%eax // 2MiB page, should be always available
+ mul %ecx
+ or $(PTE_V|PTE_W|PTE_S),%eax // enable 2MiB page instead of 4K
+ mov %eax,p2ktable1(,%ecx,8)
+ inc %ecx
+ cmp $1024,%ecx // 512 entries per table, map 2 L2 tables
+ jne .map_p2k_table
+
+switch64:
+ /*
+ * Jump to 64 bit mode, we have to
+ * - enable PAE
+ * - enable long mode
+ * - enable paging and load the tables filled above in CR3
+ * - jump to a 64-bit code segment
+ */
+ mov %cr4,%eax
+ or $CR4_PAE,%eax
+ mov %eax,%cr4
+ mov $0xC0000080,%ecx // select EFER register
+ rdmsr
+ or $(1 << 8),%eax // long mode enable bit
+ wrmsr
+ mov $p4table,%eax
+ mov %eax,%cr3
+ mov %cr0,%eax
+ or $CR0_PG,%eax
+ or $CR0_WP,%eax
+ mov %eax,%cr0
+
+ lgdt gdt64pointer
+ movw $0,%ax
+ movw %ax,%fs
+ movw %ax,%gs
+ movw $16,%ax
+ movw %ax,%ds
+ movw %ax,%es
+ movw %ax,%ss
+ ljmp $8,$boot_entry64
+
+ .code64
+
+boot_entry64:
+ /* Switch to our own interrupt stack. */
+ movq $solid_intstack+INTSTACK_SIZE-16, %rax
+ andq $(~15),%rax
+ movq %rax,%rsp
+
+ /* Reset EFLAGS to a known state. */
+ pushq $0
+ popf
+ /* save multiboot info for later */
+ movq %rbx,%r8
+
+ /* Fix ifunc entries */
+ movq $__rela_iplt_start,%rsi
+ movq $__rela_iplt_end,%rdi
+iplt_cont:
+ cmpq %rdi,%rsi
+ jae iplt_done
+ movq (%rsi),%rbx /* r_offset */
+ movb 4(%rsi),%al /* info */
+ cmpb $42,%al /* IRELATIVE */
+ jnz iplt_next
+ call *(%ebx) /* call ifunc */
+ movq %rax,(%rbx) /* fixed address */
+iplt_next:
+ addq $8,%rsi
+ jmp iplt_cont
+iplt_done:
+
+ /* restore multiboot info */
+ movq %r8,%rdi
+ /* Jump into C code. */
+ call EXT(c_boot_entry)
+ /* not reached */
+ nop
+
+ .code32
+ .section .boot.data
+ .align 4096
+#define SEG_ACCESS_OFS 40
+#define SEG_GRANULARITY_OFS 52
+gdt64:
+ .quad 0
+gdt64code:
+ .quad (ACC_P << SEG_ACCESS_OFS) | (ACC_CODE_R << SEG_ACCESS_OFS) | (SZ_64 << SEG_GRANULARITY_OFS)
+gdt64data:
+ .quad (ACC_P << SEG_ACCESS_OFS) | (ACC_DATA_W << SEG_ACCESS_OFS)
+gdt64end:
+ .skip (4096 - (gdt64end - gdt64))
+gdt64pointer:
+ .word gdt64end - gdt64 - 1
+ .quad gdt64
+
+ .section .boot.data
+ .align 4096
+p4table: .space 4096
+p3table: .space 4096
+p2table: .space 4096
+p2table1: .space 4096
+p2table2: .space 4096
+p2table3: .space 4096
+p3ktable: .space 4096
+p2ktable1: .space 4096
+p2ktable2: .space 4096
diff --git a/x86_64/configfrag.ac b/x86_64/configfrag.ac
new file mode 100644
index 0000000..f119a9a
--- /dev/null
+++ b/x86_64/configfrag.ac
@@ -0,0 +1,63 @@
+dnl Configure fragment for x86_64.
+
+dnl Copyright (C) 1999, 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+
+dnl Permission to use, copy, modify and distribute this software and its
+dnl documentation is hereby granted, provided that both the copyright
+dnl notice and this permission notice appear in all copies of the
+dnl software, derivative works or modified versions, and any portions
+dnl thereof, and that both notices appear in supporting documentation.
+dnl
+dnl THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+dnl "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+dnl LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+dnl USE OF THIS SOFTWARE.
+
+#
+# Definitions.
+#
+
+[case $host_cpu in
+ x86_64)]
+ AM_CONDITIONAL([HOST_x86_64], [true])
+
+ # Some of the x86_64-specific code checks for these.
+ AC_DEFINE([__ELF__], [1], [__ELF__])
+
+ # Determines the size of the CPU cache line.
+ AC_DEFINE([CPU_L1_SHIFT], [6], [CPU_L1_SHIFT])
+
+ [if test x"$enable_user32" = xyes ; then
+ user32_cpu=i686
+ fi]
+
+ [# Does the architecture provide machine-specific interfaces?
+ mach_machine_routines=1
+
+ enable_pae=yes;;
+ *)]
+ AM_CONDITIONAL([HOST_x86_64], [false])[;;
+esac
+
+case $host_platform in
+ at)]
+ AM_CONDITIONAL([PLATFORM_at], [true])[;;
+ *)]
+ AM_CONDITIONAL([PLATFORM_at], [false])[;;
+esac]
+
+[case $host_platform:$host_cpu in
+ at:x86_64)]
+ # should be 4, but we do not support shared IRQ for these
+ ncom=2
+ nlpr=1
+ AC_DEFINE([ATX86_64], [1], [ATX86_64])[;;
+ xen:x86_64)]
+ AC_DEFINE([ATX86_64], [1], [ATX86_64])[;;
+ *)
+ :;;
+esac]
+
+dnl Local Variables:
+dnl mode: autoconf
+dnl End:
diff --git a/x86_64/copy_user.c b/x86_64/copy_user.c
new file mode 100644
index 0000000..c6e125d
--- /dev/null
+++ b/x86_64/copy_user.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright (C) 2023 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#include <kern/debug.h>
+#include <mach/boolean.h>
+
+#include <copy_user.h>
+
+
+/* Mach field descriptors measure size in bits */
+#define descsize_to_bytes(n) (n / 8)
+#define bytes_to_descsize(n) (n * 8)
+
+#ifdef USER32
+/* Versions of mach_msg_type_t and mach_msg_type_long that are expected from the 32 bit userland. */
+typedef struct {
+ unsigned int msgt_name : 8,
+ msgt_size : 8,
+ msgt_number : 12,
+ msgt_inline : 1,
+ msgt_longform : 1,
+ msgt_deallocate : 1,
+ msgt_unused : 1;
+} mach_msg_user_type_t;
+_Static_assert(sizeof(mach_msg_user_type_t) == 4);
+
+typedef struct {
+ mach_msg_user_type_t msgtl_header;
+ unsigned short msgtl_name;
+ unsigned short msgtl_size;
+ natural_t msgtl_number;
+} mach_msg_user_type_long_t;
+_Static_assert(sizeof(mach_msg_user_type_long_t) == 12);
+#else
+typedef mach_msg_type_t mach_msg_user_type_t;
+typedef mach_msg_type_long_t mach_msg_user_type_long_t;
+#endif /* USER32 */
+
+/*
+* Helper to unpack the relevant fields of a msg type; the fields are different
+* depending on whether is long form or not.
+.*/
+static inline void unpack_msg_type(vm_offset_t addr,
+ mach_msg_type_name_t *name,
+ mach_msg_type_size_t *size,
+ mach_msg_type_number_t *number,
+ boolean_t *is_inline,
+ vm_size_t *user_amount,
+ vm_size_t *kernel_amount)
+{
+ mach_msg_type_t* kmt = (mach_msg_type_t*)addr;
+ *is_inline = kmt->msgt_inline;
+ if (kmt->msgt_longform)
+ {
+ mach_msg_type_long_t* kmtl = (mach_msg_type_long_t*)addr;
+ *name = kmtl->msgtl_name;
+ *size = kmtl->msgtl_size;
+ *number = kmtl->msgtl_number;
+ *kernel_amount = sizeof(mach_msg_type_long_t);
+ *user_amount = sizeof(mach_msg_user_type_long_t);
+ }
+ else
+ {
+ *name = kmt->msgt_name;
+ *size = kmt->msgt_size;
+ *number = kmt->msgt_number;
+ *kernel_amount = sizeof(mach_msg_type_t);
+ *user_amount = sizeof(mach_msg_user_type_t);
+ }
+}
+
+#ifdef USER32
+static inline void mach_msg_user_type_to_kernel(const mach_msg_user_type_t *u,
+ mach_msg_type_t* k) {
+ k->msgt_name = u->msgt_name;
+ k->msgt_size = u->msgt_size;
+ k->msgt_number = u->msgt_number;
+ k->msgt_inline = u->msgt_inline;
+ k->msgt_longform = u->msgt_longform;
+ k->msgt_deallocate = u->msgt_deallocate;
+ k->msgt_unused = 0;
+}
+
+static inline void mach_msg_user_type_to_kernel_long(const mach_msg_user_type_long_t *u,
+ mach_msg_type_long_t* k) {
+ const mach_msg_type_long_t kernel = {
+ .msgtl_header = {
+ .msgt_name = u->msgtl_name,
+ .msgt_size = u->msgtl_size,
+ .msgt_number = u->msgtl_number,
+ .msgt_inline = u->msgtl_header.msgt_inline,
+ .msgt_longform = u->msgtl_header.msgt_longform,
+ .msgt_deallocate = u->msgtl_header.msgt_deallocate,
+ .msgt_unused = 0
+ }
+ };
+ *k = kernel;
+}
+
+static inline void mach_msg_kernel_type_to_user(const mach_msg_type_t *k,
+ mach_msg_user_type_t *u) {
+ u->msgt_name = k->msgt_name;
+ u->msgt_size = k->msgt_size;
+ u->msgt_number = k->msgt_number;
+ u->msgt_inline = k->msgt_inline;
+ u->msgt_longform = k->msgt_longform;
+ u->msgt_deallocate = k->msgt_deallocate;
+ u->msgt_unused = 0;
+}
+
+static inline void mach_msg_kernel_type_to_user_long(const mach_msg_type_long_t *k,
+ mach_msg_user_type_long_t *u) {
+ const mach_msg_user_type_long_t user = {
+ .msgtl_header = {
+ .msgt_name = 0,
+ .msgt_size = 0,
+ .msgt_number = 0,
+ .msgt_inline = k->msgtl_header.msgt_inline,
+ .msgt_longform = k->msgtl_header.msgt_longform,
+ .msgt_deallocate = k->msgtl_header.msgt_deallocate,
+ .msgt_unused = 0
+ },
+ .msgtl_name = k->msgtl_header.msgt_name,
+ .msgtl_size = k->msgtl_header.msgt_size,
+ .msgtl_number = k->msgtl_header.msgt_number
+ };
+ *u = user;
+}
+#endif
+
+static inline int copyin_mach_msg_type(const rpc_vm_offset_t *uaddr, mach_msg_type_t *kaddr) {
+#ifdef USER32
+ mach_msg_user_type_t user;
+ int ret = copyin(uaddr, &user, sizeof(mach_msg_user_type_t));
+ if (ret) {
+ return ret;
+ }
+ mach_msg_user_type_to_kernel(&user, kaddr);
+ return 0;
+#else
+ return copyin(uaddr, kaddr, sizeof(mach_msg_type_t));
+#endif
+}
+
+static inline int copyout_mach_msg_type(const mach_msg_type_t *kaddr, rpc_vm_offset_t *uaddr) {
+#ifdef USER32
+ mach_msg_user_type_t user;
+ mach_msg_kernel_type_to_user(kaddr, &user);
+ return copyout(&user, uaddr, sizeof(mach_msg_user_type_t));
+#else
+ return copyout(kaddr, uaddr, sizeof(mach_msg_type_t));
+#endif
+}
+
+static inline int copyin_mach_msg_type_long(const rpc_vm_offset_t *uaddr, mach_msg_type_long_t *kaddr) {
+#ifdef USER32
+ mach_msg_user_type_long_t user;
+ int ret = copyin(uaddr, &user, sizeof(mach_msg_user_type_long_t));
+ if (ret)
+ return ret;
+ mach_msg_user_type_to_kernel_long(&user, kaddr);
+ return 0;
+#else
+ return copyin(uaddr, kaddr, sizeof(mach_msg_type_long_t));
+#endif
+}
+
+static inline int copyout_mach_msg_type_long(const mach_msg_type_long_t *kaddr, rpc_vm_offset_t *uaddr) {
+#ifdef USER32
+ mach_msg_user_type_long_t user;
+ mach_msg_kernel_type_to_user_long(kaddr, &user);
+ return copyout(&user, uaddr, sizeof(mach_msg_user_type_long_t));
+#else
+ return copyout(kaddr, uaddr, sizeof(mach_msg_type_long_t));
+#endif
+}
+
+/* Optimized version of unpack_msg_type(), including proper copyin() */
+static inline int copyin_unpack_msg_type(vm_offset_t uaddr,
+ vm_offset_t kaddr,
+ mach_msg_type_name_t *name,
+ mach_msg_type_size_t *size,
+ mach_msg_type_number_t *number,
+ boolean_t *is_inline,
+ vm_size_t *user_amount,
+ vm_size_t *kernel_amount)
+{
+ mach_msg_type_t *kmt = (mach_msg_type_t*)kaddr;
+ if (copyin_mach_msg_type((void *)uaddr, kmt))
+ return 1;
+ *is_inline = kmt->msgt_inline;
+ if (kmt->msgt_longform)
+ {
+ mach_msg_type_long_t* kmtl = (mach_msg_type_long_t*)kaddr;
+ if (copyin_mach_msg_type_long((void *)uaddr, kmtl))
+ return 1;
+ *name = kmtl->msgtl_name;
+ *size = kmtl->msgtl_size;
+ *number = kmtl->msgtl_number;
+ *user_amount = sizeof(mach_msg_user_type_long_t);
+ *kernel_amount = sizeof(mach_msg_type_long_t);
+ }
+ else
+ {
+ *name = kmt->msgt_name;
+ *size = kmt->msgt_size;
+ *number = kmt->msgt_number;
+ *user_amount = sizeof(mach_msg_user_type_t);
+ *kernel_amount = sizeof(mach_msg_type_t);
+ }
+ return 0;
+}
+
+/*
+ * The msg type has a different size field depending on whether is long or not,
+ * and we also need to convert from bytes to bits
+ */
+static inline void adjust_msg_type_size(vm_offset_t addr, int amount)
+{
+ mach_msg_type_t* kmt = (mach_msg_type_t*)addr;
+ if (kmt->msgt_longform)
+ {
+ mach_msg_type_long_t* kmtl = (mach_msg_type_long_t*)addr;
+ kmtl->msgtl_size += bytes_to_descsize(amount);
+ }
+ else
+ {
+ kmt->msgt_size += bytes_to_descsize(amount);
+ }
+}
+
+/* Optimized version of unpack_msg_type(), including proper copyout() */
+static inline int copyout_unpack_msg_type(vm_offset_t kaddr,
+ vm_offset_t uaddr,
+ mach_msg_type_name_t *name,
+ mach_msg_type_size_t *size,
+ mach_msg_type_number_t *number,
+ boolean_t *is_inline,
+ vm_size_t *user_amount,
+ vm_size_t *kernel_amount)
+{
+ mach_msg_type_t *kmt = (mach_msg_type_t*)kaddr;
+ *is_inline = kmt->msgt_inline;
+ if (kmt->msgt_longform)
+ {
+ mach_msg_type_long_t* kmtl = (mach_msg_type_long_t*)kaddr;
+ mach_msg_type_size_t orig_size = kmtl->msgtl_size;
+ int ret;
+
+ if (MACH_MSG_TYPE_PORT_ANY(kmtl->msgtl_name)) {
+#ifdef USER32
+ kmtl->msgtl_size = bytes_to_descsize(sizeof(mach_port_name_t));
+#else
+ /* 64 bit ABI uses mach_port_name_inlined_t for inlined ports. */
+ if (!kmt->msgt_inline)
+ kmtl->msgtl_size = bytes_to_descsize(sizeof(mach_port_name_t));
+#endif
+ }
+ ret = copyout_mach_msg_type_long(kmtl, (void*)uaddr);
+ kmtl->msgtl_size = orig_size;
+ if (ret)
+ return 1;
+
+ *name = kmtl->msgtl_name;
+ *size = kmtl->msgtl_size;
+ *number = kmtl->msgtl_number;
+ *user_amount = sizeof(mach_msg_user_type_long_t);
+ *kernel_amount = sizeof(mach_msg_type_long_t);
+ }
+ else
+ {
+ mach_msg_type_size_t orig_size = kmt->msgt_size;
+ int ret;
+
+ if (MACH_MSG_TYPE_PORT_ANY(kmt->msgt_name)) {
+#ifdef USER32
+ kmt->msgt_size = bytes_to_descsize(sizeof(mach_port_name_t));
+#else
+ /* 64 bit ABI uses mach_port_name_inlined_t for inlined ports. */
+ if (!kmt->msgt_inline)
+ kmt->msgt_size = bytes_to_descsize(sizeof(mach_port_name_t));
+#endif
+ }
+ ret = copyout_mach_msg_type(kmt, (void *)uaddr);
+ kmt->msgt_size = orig_size;
+ if (ret)
+ return 1;
+
+ *name = kmt->msgt_name;
+ *size = kmt->msgt_size;
+ *number = kmt->msgt_number;
+ *user_amount = sizeof(mach_msg_user_type_t);
+ *kernel_amount = sizeof(mach_msg_type_t);
+ }
+ return 0;
+}
+
+#ifdef USER32
+/*
+ * Compute the user-space size of a message still in the kernel when processing
+ * messages from 32bit userland.
+ * The message may be originating from userspace (in which case we could
+ * optimize this by keeping the usize around) or from kernel space (we could
+ * optimize if the message structure is fixed and known in advance).
+ * For now just handle the most general case, iterating over the msg body.
+ */
+size_t msg_usize(const mach_msg_header_t *kmsg)
+{
+ size_t ksize = kmsg->msgh_size;
+ size_t usize = sizeof(mach_msg_user_header_t);
+ if (ksize > sizeof(mach_msg_header_t))
+ {
+ // iterate over body compute the user-space message size
+ vm_offset_t saddr, eaddr;
+ saddr = (vm_offset_t)(kmsg + 1);
+ eaddr = saddr + ksize - sizeof(mach_msg_header_t);
+ while (saddr < (eaddr - sizeof(mach_msg_type_t)))
+ {
+ vm_size_t user_amount, kernel_amount;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline;
+ unpack_msg_type(saddr, &name, &size, &number, &is_inline, &user_amount, &kernel_amount);
+ saddr += kernel_amount;
+ saddr = mach_msg_kernel_align(saddr);
+ usize += user_amount;
+ usize = mach_msg_user_align(usize);
+
+ if (is_inline)
+ {
+ if (MACH_MSG_TYPE_PORT_ANY(name))
+ {
+ const vm_size_t length = sizeof(mach_port_t) * number;
+ saddr += length;
+ usize += sizeof(mach_port_name_t) * number;
+ }
+ else
+ {
+ size_t n = descsize_to_bytes(size);
+ saddr += n*number;
+ usize += n*number;
+ }
+ }
+ else
+ {
+ // advance one pointer
+ saddr += sizeof(vm_offset_t);
+ usize += sizeof(rpc_vm_offset_t);
+ }
+ saddr = mach_msg_kernel_align(saddr);
+ usize = mach_msg_user_align(usize);
+ }
+ }
+ return usize;
+}
+#endif /* USER32 */
+
+/*
+ * Expand the msg header and, if required, the msg body (ports, pointers)
+ *
+ * To not make the code too complicated, we use the fact that some fields of
+ * mach_msg_header have the same size in the kernel and user variant (basically
+ * all fields except ports and addresses)
+*/
+int copyinmsg (const void *userbuf, void *kernelbuf, const size_t usize, const size_t ksize)
+{
+ const mach_msg_user_header_t *umsg = userbuf;
+ mach_msg_header_t *kmsg = kernelbuf;
+
+#ifdef USER32
+ if (copyin(&umsg->msgh_bits, &kmsg->msgh_bits, sizeof(kmsg->msgh_bits)))
+ return 1;
+ /* kmsg->msgh_size is filled in later */
+ if (copyin_port(&umsg->msgh_remote_port, &kmsg->msgh_remote_port))
+ return 1;
+ if (copyin_port(&umsg->msgh_local_port, &kmsg->msgh_local_port))
+ return 1;
+ if (copyin(&umsg->msgh_seqno, &kmsg->msgh_seqno,
+ sizeof(kmsg->msgh_seqno) + sizeof(kmsg->msgh_id)))
+ return 1;
+#else
+ /* The 64 bit interface ensures the header is the same size, so it does not need any resizing. */
+ _Static_assert(sizeof(mach_msg_header_t) == sizeof(mach_msg_user_header_t),
+ "mach_msg_header_t and mach_msg_user_header_t expected to be of the same size");
+ if (copyin(umsg, kmsg, sizeof(mach_msg_header_t)))
+ return 1;
+ kmsg->msgh_remote_port &= 0xFFFFFFFF; // FIXME: still have port names here
+ kmsg->msgh_local_port &= 0xFFFFFFFF; // also, this assumes little-endian
+#endif
+
+ vm_offset_t usaddr, ueaddr, ksaddr;
+ ksaddr = (vm_offset_t)(kmsg + 1);
+ usaddr = (vm_offset_t)(umsg + 1);
+ ueaddr = (vm_offset_t)umsg + usize;
+
+ _Static_assert(!mach_msg_user_is_misaligned(sizeof(mach_msg_user_header_t)),
+ "mach_msg_user_header_t needs to be MACH_MSG_USER_ALIGNMENT aligned.");
+
+ if (usize > sizeof(mach_msg_user_header_t))
+ {
+ /* check we have at least space for an empty descryptor */
+ while (usaddr <= (ueaddr - sizeof(mach_msg_user_type_t)))
+ {
+ vm_size_t user_amount, kernel_amount;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline;
+ if (copyin_unpack_msg_type(usaddr, ksaddr, &name, &size, &number,
+ &is_inline, &user_amount, &kernel_amount))
+ return 1;
+
+ // keep a reference to the current field descriptor, we
+ // might need to adjust it later depending on the type
+ vm_offset_t ktaddr = ksaddr;
+ usaddr += user_amount;
+ usaddr = mach_msg_user_align(usaddr);
+ ksaddr += kernel_amount;
+ ksaddr = mach_msg_kernel_align(ksaddr);
+
+ if (is_inline)
+ {
+ if (MACH_MSG_TYPE_PORT_ANY(name))
+ {
+#ifdef USER32
+ if (size != bytes_to_descsize(sizeof(mach_port_name_t)))
+ return 1;
+ if ((usaddr + sizeof(mach_port_name_t)*number) > ueaddr)
+ return 1;
+ adjust_msg_type_size(ktaddr, sizeof(mach_port_t) - sizeof(mach_port_name_t));
+ for (int i=0; i<number; i++)
+ {
+ if (copyin_port((mach_port_name_t*)usaddr, (mach_port_t*)ksaddr))
+ return 1;
+ ksaddr += sizeof(mach_port_t);
+ usaddr += sizeof(mach_port_name_t);
+ }
+#else
+ if (size != bytes_to_descsize(sizeof(mach_port_name_inlined_t)))
+ return 1;
+ const vm_size_t length = number * sizeof(mach_port_name_inlined_t);
+ if ((usaddr + length) > ueaddr)
+ return 1;
+ if (copyin((void*)usaddr, (void*)ksaddr, length))
+ return 1;
+ usaddr += length;
+ ksaddr += length;
+#endif
+ }
+ else
+ {
+ // type that doesn't need change
+ size_t n = descsize_to_bytes(size);
+ if ((usaddr + n*number) > ueaddr)
+ return 1;
+ if (copyin((void*)usaddr, (void*)ksaddr, n*number))
+ return 1;
+ usaddr += n*number;
+ ksaddr += n*number;
+ }
+ }
+ else
+ {
+ if ((usaddr + sizeof(rpc_vm_offset_t)) > ueaddr)
+ return 1;
+
+ /* out-of-line port arrays are always arrays of mach_port_name_t (4 bytes)
+ * and are expanded in ipc_kmsg_copyin_body() */
+ if (MACH_MSG_TYPE_PORT_ANY(name)) {
+ if (size != bytes_to_descsize(sizeof(mach_port_name_t)))
+ return 1;
+ adjust_msg_type_size(ktaddr, sizeof(mach_port_t) - sizeof(mach_port_name_t));
+ }
+
+ if (copyin_address((rpc_vm_offset_t*)usaddr, (vm_offset_t*)ksaddr))
+ return 1;
+ // Advance one pointer.
+ ksaddr += sizeof(vm_offset_t);
+ usaddr += sizeof(rpc_vm_offset_t);
+ }
+ // Note that we have to align because mach_port_name_t might not align
+ // with the required user alignment.
+ usaddr = mach_msg_user_align(usaddr);
+ ksaddr = mach_msg_kernel_align(ksaddr);
+ }
+ }
+
+ kmsg->msgh_size = sizeof(mach_msg_header_t) + ksaddr - (vm_offset_t)(kmsg + 1);
+ assert(kmsg->msgh_size <= ksize);
+#ifndef USER32
+ if (kmsg->msgh_size != usize)
+ return 1;
+#endif
+ return 0;
+}
+
+int copyoutmsg (const void *kernelbuf, void *userbuf, const size_t ksize)
+{
+ const mach_msg_header_t *kmsg = kernelbuf;
+ mach_msg_user_header_t *umsg = userbuf;
+#ifdef USER32
+ if (copyout(&kmsg->msgh_bits, &umsg->msgh_bits, sizeof(kmsg->msgh_bits)))
+ return 1;
+ /* umsg->msgh_size is filled in later */
+ if (copyout_port(&kmsg->msgh_remote_port, &umsg->msgh_remote_port))
+ return 1;
+ if (copyout_port(&kmsg->msgh_local_port, &umsg->msgh_local_port))
+ return 1;
+ if (copyout(&kmsg->msgh_seqno, &umsg->msgh_seqno,
+ sizeof(kmsg->msgh_seqno) + sizeof(kmsg->msgh_id)))
+ return 1;
+#else
+ if (copyout(kmsg, umsg, sizeof(mach_msg_header_t)))
+ return 1;
+#endif /* USER32 */
+
+ vm_offset_t ksaddr, keaddr, usaddr;
+ ksaddr = (vm_offset_t)(kmsg + 1);
+ usaddr = (vm_offset_t)(umsg + 1);
+ keaddr = ksaddr + ksize - sizeof(mach_msg_header_t);
+
+ if (ksize > sizeof(mach_msg_header_t))
+ {
+ while (ksaddr < keaddr)
+ {
+ vm_size_t user_amount, kernel_amount;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline;
+ if (copyout_unpack_msg_type(ksaddr, usaddr, &name, &size, &number,
+ &is_inline, &user_amount, &kernel_amount))
+ return 1;
+ usaddr += user_amount;
+ usaddr = mach_msg_user_align(usaddr);
+ ksaddr += kernel_amount;
+ ksaddr = mach_msg_kernel_align(ksaddr);
+
+ if (is_inline)
+ {
+ if (MACH_MSG_TYPE_PORT_ANY(name))
+ {
+#ifdef USER32
+ for (int i=0; i<number; i++)
+ {
+ if (copyout_port((mach_port_t*)ksaddr, (mach_port_name_t*)usaddr))
+ return 1;
+ ksaddr += sizeof(mach_port_t);
+ usaddr += sizeof(mach_port_name_t);
+ }
+#else
+ if (size != bytes_to_descsize(sizeof(mach_port_name_inlined_t)))
+ return 1;
+ const vm_size_t length = number * sizeof(mach_port_name_inlined_t);
+ if (copyout((void*)ksaddr, (void*)usaddr, length))
+ return 1;
+ ksaddr += length;
+ usaddr += length;
+#endif
+ }
+ else
+ {
+ size_t n = descsize_to_bytes(size);
+ if (copyout((void*)ksaddr, (void*)usaddr, n*number))
+ return 1;
+ usaddr += n*number;
+ ksaddr += n*number;
+ }
+ }
+ else
+ {
+ if (copyout_address((vm_offset_t*)ksaddr, (rpc_vm_offset_t*)usaddr))
+ return 1;
+ // advance one pointer
+ ksaddr += sizeof(vm_offset_t);
+ usaddr += sizeof(rpc_vm_offset_t);
+ }
+ usaddr = mach_msg_user_align(usaddr);
+ ksaddr = mach_msg_kernel_align(ksaddr);
+ }
+ }
+
+ mach_msg_size_t usize;
+ usize = sizeof(mach_msg_user_header_t) + usaddr - (vm_offset_t)(umsg + 1);
+ if (copyout(&usize, &umsg->msgh_size, sizeof(umsg->msgh_size)))
+ return 1;
+#ifndef USER32
+ if (usize != ksize)
+ return 1;
+#endif
+
+ return 0;
+
+}
diff --git a/x86_64/cswitch.S b/x86_64/cswitch.S
new file mode 100644
index 0000000..9c4640f
--- /dev/null
+++ b/x86_64/cswitch.S
@@ -0,0 +1,148 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/machine/asm.h>
+
+#include <i386/i386/proc_reg.h>
+#include <i386/i386/i386asm.h>
+#include <i386/i386/cpu_number.h>
+
+/*
+ * Context switch routines for x86_64.
+ */
+
+ENTRY(Load_context)
+ movq S_ARG0,%rcx /* get thread */
+ movq TH_KERNEL_STACK(%rcx),%rcx /* get kernel stack */
+ lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%rcx),%rdx
+ /* point to stack top */
+ CPU_NUMBER(%eax)
+ movq %rcx,MY(ACTIVE_STACK) /* store stack address */
+ movq %rdx,CX(EXT(kernel_stack),%rax) /* store stack top */
+
+/* XXX complete */
+
+ movq KSS_ESP(%rcx),%rsp /* switch stacks */
+ movq KSS_EBP(%rcx),%rbp /* restore registers */
+ movq KSS_EBX(%rcx),%rbx
+ movq KSS_R12(%rcx),%r12
+ movq KSS_R13(%rcx),%r13
+ movq KSS_R14(%rcx),%r14
+ movq KSS_R15(%rcx),%r15
+ xorq %rax,%rax /* return zero (no old thread) */
+ jmp *KSS_EIP(%rcx) /* resume thread */
+
+/*
+ * This really only has to save registers
+ * when there is no explicit continuation.
+ */
+
+ENTRY(Switch_context)
+ movq MY(ACTIVE_STACK),%rcx /* get old kernel stack */
+
+ movq %r12,KSS_R12(%rcx) /* save registers */
+ movq %r13,KSS_R13(%rcx)
+ movq %r14,KSS_R14(%rcx)
+ movq %r15,KSS_R15(%rcx)
+ movq %rbx,KSS_EBX(%rcx)
+ movq %rbp,KSS_EBP(%rcx)
+ popq KSS_EIP(%rcx) /* save return PC */
+ movq %rsp,KSS_ESP(%rcx) /* save SP */
+
+ movq S_ARG0,%rax /* get old thread */
+ movq %rcx,TH_KERNEL_STACK(%rax) /* save old stack */
+ movq S_ARG1,%rbx /* get continuation */
+ movq %rbx,TH_SWAP_FUNC(%rax) /* save continuation */
+
+ movq S_ARG2,%rsi /* get new thread */
+
+ movq TH_KERNEL_STACK(%rsi),%rcx /* get its kernel stack */
+ lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%rcx),%rbx
+ /* point to stack top */
+
+ CPU_NUMBER(%eax)
+ movq %rsi,MY(ACTIVE_THREAD) /* new thread is active */
+ movq %rcx,MY(ACTIVE_STACK) /* set current stack */
+ movq %rbx,CX(EXT(kernel_stack),%rax) /* set stack top */
+
+ movq KSS_ESP(%rcx),%rsp /* switch stacks */
+ movq KSS_EBP(%rcx),%rbp /* restore registers */
+ movq KSS_EBX(%rcx),%rbx
+ movq KSS_R12(%rcx),%r12
+ movq KSS_R13(%rcx),%r13
+ movq KSS_R14(%rcx),%r14
+ movq KSS_R15(%rcx),%r15
+ jmp *KSS_EIP(%rcx) /* return old thread */
+
+ENTRY(Thread_continue)
+ movq %rax,%rdi /* push the thread argument */
+ xorq %rbp,%rbp /* zero frame pointer */
+ call *%rbx /* call real continuation */
+
+#if NCPUS > 1
+/*
+ * void switch_to_shutdown_context(thread_t thread,
+ * void (*routine)(processor_t),
+ * processor_t processor)
+ *
+ * saves the kernel context of the thread,
+ * switches to the interrupt stack,
+ * continues the thread (with thread_continue),
+ * then runs routine on the interrupt stack.
+ *
+ * Assumes that the thread is a kernel thread (thus
+ * has no FPU state)
+ */
+ENTRY(switch_to_shutdown_context)
+ud2
+ movq MY(ACTIVE_STACK),%rcx /* get old kernel stack */
+ movq %r12,KSS_R12(%rcx) /* save registers */
+ movq %r13,KSS_R13(%rcx)
+ movq %r14,KSS_R14(%rcx)
+ movq %r15,KSS_R15(%rcx)
+ movq %rbx,KSS_EBX(%rcx)
+ movq %rbp,KSS_EBP(%rcx)
+ popq KSS_EIP(%rcx) /* save return PC */
+ movq %rsp,KSS_ESP(%rcx) /* save SP */
+
+ movq S_ARG0,%rax /* get old thread */
+ movq %rcx,TH_KERNEL_STACK(%rax) /* save old stack */
+ movq $0,TH_SWAP_FUNC(%rax) /* clear continuation */
+ movq S_ARG1,%rbx /* get routine to run next */
+ movq S_ARG2,%rsi /* get its argument */
+
+ CPU_NUMBER(%ecx)
+ movq CX(EXT(int_stack_base),%rcx),%rcx /* point to its interrupt stack */
+ lea INTSTACK_SIZE(%rcx),%rsp /* switch to it (top) */
+
+ movq %rax,%rdi /* push thread */
+ call EXT(thread_dispatch) /* reschedule thread */
+
+ movq %rsi,%rdi /* push argument */
+ call *%rbx /* call routine to run */
+ hlt /* (should never return) */
+
+#endif /* NCPUS > 1 */
diff --git a/x86_64/debug_trace.S b/x86_64/debug_trace.S
new file mode 100644
index 0000000..7bed5cc
--- /dev/null
+++ b/x86_64/debug_trace.S
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifdef DEBUG
+
+#include <mach/machine/asm.h>
+#include <i386/xen.h>
+
+#include "debug.h"
+
+ .text
+ENTRY(_debug_trace)
+ pushf
+ cli
+ pushq %rax
+ pushq %rbx
+ .byte 0x36 /* SS: bug in gas? */
+ movl %ss:EXT(debug_trace_pos),%eax
+ movq S_ARG0,%rbx
+ movq %rbx,%ss:EXT(debug_trace_buf)(,%eax,16)
+ movl S_ARG1,%ebx
+ movl %ebx,%ss:EXT(debug_trace_buf)+8(,%eax,16)
+ incl %eax
+ andl $DEBUG_TRACE_LEN-1,%eax
+ .byte 0x36 /* SS: bug in gas? */
+ movl %eax,%ss:EXT(debug_trace_pos)
+ popq %rbx
+ popq %rax
+ popf
+ ret
+
+#endif /* DEBUG */
+
+/* XXX gas bug? need at least one symbol... */
+foo:
+
diff --git a/x86_64/idt_inittab.S b/x86_64/idt_inittab.S
new file mode 100644
index 0000000..3a205ae
--- /dev/null
+++ b/x86_64/idt_inittab.S
@@ -0,0 +1,148 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#include <mach/machine/asm.h>
+
+#include <i386/i386/seg.h>
+#include <i386/i386/i386asm.h>
+
+#include <xen/public/arch-x86/xen-x86_64.h>
+
+/* We'll be using macros to fill in a table in data hunk 2
+ while writing trap entrypoint routines at the same time.
+ Here's the header that comes before everything else. */
+ .data 2
+ENTRY(idt_inittab)
+ .text
+
+/*
+ * Interrupt descriptor table and code vectors for it.
+ */
+#ifdef MACH_PV_DESCRIPTORS
+#define IDT_ENTRY(n,entry,type,ist) \
+ .data 2 ;\
+ .byte n ;\
+ .byte (((type)&ACC_PL)>>5)|((((type)&(ACC_TYPE|ACC_A))==ACC_INTR_GATE)<<2) ;\
+ .word FLAT_KERNEL_CS ;\
+ .word ist ;\
+ .word 0 /*pad*/ ;\
+ .quad entry ;\
+ .text
+#else /* MACH_PV_DESCRIPTORS */
+#define IDT_ENTRY(n,entry,type,ist) \
+ .data 2 ;\
+ .quad entry ;\
+ .word n ;\
+ .word type ;\
+ .word ist ;\
+ .word 0 /*pad*/ ;\
+ .text
+#endif /* MACH_PV_DESCRIPTORS */
+
+/*
+ * No error code. Clear error code and push trap number.
+ */
+#define EXCEPTION(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_TRAP_GATE, 0);\
+ENTRY(name) ;\
+ INT_FIX ;\
+ pushq $(0) ;\
+ pushq $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * User-accessible exception. Otherwise, same as above.
+ */
+#define EXCEP_USR(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_U|ACC_TRAP_GATE, 0);\
+ENTRY(name) ;\
+ INT_FIX ;\
+ pushq $(0) ;\
+ pushq $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * Error code has been pushed. Just push trap number.
+ */
+#define EXCEP_ERR(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_INTR_GATE, 0);\
+ENTRY(name) ;\
+ INT_FIX ;\
+ pushq $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * Special interrupt code: dispatches to a unique entrypoint,
+ * not defined automatically here.
+ */
+#define EXCEP_SPC(n,name, ist) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_TRAP_GATE, ist)
+
+
+EXCEPTION(0x00,t_zero_div)
+EXCEP_SPC(0x01,t_debug, 0)
+/* skip NMI interrupt - let more specific code figure that out. */
+EXCEP_USR(0x03,t_int3)
+EXCEP_USR(0x04,t_into)
+EXCEP_USR(0x05,t_bounds)
+EXCEPTION(0x06,t_invop)
+EXCEPTION(0x07,t_nofpu)
+EXCEP_SPC(0x08,t_dbl_fault, 1)
+EXCEPTION(0x09,a_fpu_over)
+EXCEPTION(0x0a,a_inv_tss)
+EXCEP_SPC(0x0b,t_segnp, 0)
+EXCEP_ERR(0x0c,t_stack_fault)
+EXCEP_SPC(0x0d,t_gen_prot, 0)
+EXCEP_SPC(0x0e,t_page_fault, 0)
+#ifdef MACH_PV_DESCRIPTORS
+EXCEP_ERR(0x0f,t_trap_0f)
+#else
+EXCEPTION(0x0f,t_trap_0f)
+#endif
+EXCEPTION(0x10,t_fpu_err)
+EXCEPTION(0x11,t_trap_11)
+EXCEPTION(0x12,t_trap_12)
+EXCEPTION(0x13,t_trap_13)
+EXCEPTION(0x14,t_trap_14)
+EXCEPTION(0x15,t_trap_15)
+EXCEPTION(0x16,t_trap_16)
+EXCEPTION(0x17,t_trap_17)
+EXCEPTION(0x18,t_trap_18)
+EXCEPTION(0x19,t_trap_19)
+EXCEPTION(0x1a,t_trap_1a)
+EXCEPTION(0x1b,t_trap_1b)
+EXCEPTION(0x1c,t_trap_1c)
+EXCEPTION(0x1d,t_trap_1d)
+EXCEPTION(0x1e,t_trap_1e)
+EXCEPTION(0x1f,t_trap_1f)
+
+/* Terminator */
+ .data 2
+ .long 0
+#ifdef MACH_PV_DESCRIPTORS
+ .long 0
+ .quad 0
+#endif /* MACH_PV_DESCRIPTORS */
+
diff --git a/x86_64/include/mach/x86_64 b/x86_64/include/mach/x86_64
new file mode 120000
index 0000000..698e9fb
--- /dev/null
+++ b/x86_64/include/mach/x86_64
@@ -0,0 +1 @@
+../../../i386/include/mach/i386 \ No newline at end of file
diff --git a/x86_64/include/syscall_sw.h b/x86_64/include/syscall_sw.h
new file mode 100644
index 0000000..4e03f28
--- /dev/null
+++ b/x86_64/include/syscall_sw.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_X86_64_SYSCALL_SW_H_
+#define _MACH_X86_64_SYSCALL_SW_H_
+
+#include <mach/machine/asm.h>
+
+#define kernel_trap(trap_name,trap_number,number_args) \
+ENTRY(trap_name) \
+ movq $ trap_number,%rax; \
+ movq %rcx,%r10; \
+ syscall; \
+ ret; \
+END(trap_name)
+
+#endif /* _MACH_X86_64_SYSCALL_SW_H_ */
diff --git a/x86_64/interrupt.S b/x86_64/interrupt.S
new file mode 100644
index 0000000..6fb7772
--- /dev/null
+++ b/x86_64/interrupt.S
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 1995 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+#include <mach/machine/asm.h>
+
+#include <i386/ipl.h>
+#ifdef APIC
+# include <i386/apic.h>
+#else
+# include <i386/pic.h>
+#endif
+#include <i386/i386asm.h>
+
+#define READ_ISR (OCW_TEMPLATE|READ_NEXT_RD|READ_IS_ONRD)
+
+/*
+ * Generic interrupt handler.
+ *
+ * On entry, %eax contains the irq number.
+ *
+ * Note: kdb_kintr needs to know our stack usage
+ */
+
+#define S_REGS 24(%rsp)
+#define S_RET 16(%rsp)
+#define S_IRQ 8(%rsp)
+#define S_IPL 0(%rsp)
+
+ENTRY(interrupt)
+#ifdef APIC
+ cmpl $255,%eax /* was this a spurious intr? */
+ jne 1f
+ ret /* if so, just return */
+1:
+#endif
+ subq $16,%rsp /* Two local variables */
+ movl %eax,S_IRQ /* save irq number */
+
+ call spl7 /* set ipl */
+ movl %eax,S_IPL /* save previous ipl */
+
+ movl S_IRQ,%ecx /* restore irq number */
+
+#if NCPUS > 1
+ cmpl $CALL_PMAP_UPDATE,%ecx /* was this a SMP pmap_update request? */
+ je _call_single
+
+ cmpl $CALL_AST_CHECK,%ecx /* was this a SMP remote -> local ast request? */
+ je _call_local_ast
+#endif
+
+#ifndef APIC
+ movl $1,%eax
+ shll %cl,%eax /* get corresponding IRQ mask */
+ orl EXT(curr_pic_mask),%eax /* add current mask */
+
+ cmpl $8,%ecx /* do we need to ack slave? */
+ jl 1f /* no, only master */
+
+ /* EOI on slave */
+ movb %ah,%al
+ outb %al,$(PIC_SLAVE_OCW) /* mask slave out */
+
+ movb $(SPECIFIC_EOI),%al /* specific EOI for this irq */
+ andb $7,%cl /* irq number for the slave */
+ orb %cl,%al /* combine them */
+ outb %al,$(PIC_SLAVE_ICW) /* ack interrupt to slave */
+
+ movb $(SPECIFIC_EOI + I_AM_SLAVE_2),%al /* specific master EOI for cascaded slave */
+ outb %al,$(PIC_MASTER_ICW) /* ack interrupt to master */
+
+ movl EXT(curr_pic_mask),%eax /* restore original mask */
+ movb %ah,%al
+ outb %al,$(PIC_SLAVE_OCW) /* unmask slave */
+ jmp 2f
+
+1:
+ /* EOI on master */
+ outb %al,$(PIC_MASTER_OCW) /* mask master out */
+
+ movb $(SPECIFIC_EOI),%al /* specific EOI for this irq */
+ orb %cl,%al /* combine with irq number */
+ outb %al,$(PIC_MASTER_ICW) /* ack interrupt to master */
+
+ movl EXT(curr_pic_mask),%eax /* restore original mask */
+ outb %al,$(PIC_MASTER_OCW) /* unmask master */
+2:
+#else
+ movl %ecx,%edi /* load irq number as 1st arg */
+ call EXT(ioapic_irq_eoi) /* ioapic irq specific EOI */
+#endif
+
+ ;
+ movq S_IPL,S_ARG1 /* previous ipl as 2nd arg */
+
+ ;
+ movq S_RET,S_ARG2 /* return address as 3th arg */
+
+ ;
+ movq S_REGS,S_ARG3 /* address of interrupted registers as 4th arg */
+
+ movl S_IRQ,%eax /* copy irq number */
+ shll $2,%eax /* irq * 4 */
+ movl EXT(iunit)(%rax),%edi /* get device unit number as 1st arg */
+
+ shll $1,%eax /* irq * 8 */
+ call *EXT(ivect)(%rax) /* call interrupt handler */
+
+_completed:
+ movl S_IPL,%edi /* restore previous ipl */
+ call splx_cli /* restore previous ipl */
+
+ addq $16,%rsp /* pop local variables */
+ ret
+
+#if NCPUS > 1
+_call_single:
+ call EXT(lapic_eoi) /* lapic EOI before the handler to allow extra update */
+ call EXT(pmap_update_interrupt)
+ jmp _completed
+
+_call_local_ast:
+ call EXT(lapic_eoi) /* lapic EOI */
+ call EXT(ast_check) /* AST check on this cpu */
+ jmp _completed
+
+#endif
+END(interrupt)
diff --git a/x86_64/kdasm.S b/x86_64/kdasm.S
new file mode 100644
index 0000000..e1acf39
--- /dev/null
+++ b/x86_64/kdasm.S
@@ -0,0 +1,133 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Some inline code to speed up major block copies to and from the
+ * screen buffer.
+ *
+ * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ * All rights reserved.
+ *
+ * orc!eugene 28 Oct 1988
+ *
+ */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/* $ Header: $ */
+
+
+#include <mach/machine/asm.h>
+
+/*
+ * Function: kd_slmwd()
+ *
+ * This function "slams" a word (char/attr) into the screen memory using
+ * a block fill operation on the 386.
+ *
+ */
+
+#define start B_ARG0
+#define count B_ARG1
+#define value %dx // B_ARG2
+
+ENTRY(kd_slmwd)
+ pushq %rbp
+ movq %rsp, %rbp
+
+ # start already in %rdi
+ movq count, %rcx
+ movw value, %ax
+ cld
+ rep
+ stosw
+
+ leave
+ ret
+#undef start
+#undef count
+#undef value
+
+/*
+ * "slam up"
+ */
+
+#define from B_ARG0
+#define to B_ARG1
+#define count %edx // B_ARG2
+ENTRY(kd_slmscu)
+ pushq %rbp
+ movq %rsp, %rbp
+
+ xchgq %rsi, %rdi
+ movl count, %ecx
+ cmpq %rdi, %rsi
+ cld
+ rep
+ movsw
+
+ leave
+ ret
+
+/*
+ * "slam down"
+ */
+ENTRY(kd_slmscd)
+ pushq %rbp
+ movq %rsp, %rbp
+
+ xchgq %rsi, %rdi
+ movl count, %ecx
+ cmpq %rdi, %rsi
+ std
+ rep
+ movsw
+ cld
+
+ leave
+ ret
+#undef from
+#undef to
+#undef count
diff --git a/x86_64/ldscript b/x86_64/ldscript
new file mode 100644
index 0000000..67703b4
--- /dev/null
+++ b/x86_64/ldscript
@@ -0,0 +1,227 @@
+/* Default linker script, for normal executables */
+OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64",
+ "elf64-x86-64")
+OUTPUT_ARCH(i386:x86-64)
+ENTRY(boot_start)
+SECTIONS
+{
+ /*
+ * There are specific requirements about entry points, so we have it
+ * configurable via `_START': `.text' will begin there and `.text.start' will
+ * be first in there. See also `i386/i386at/boothdr.S' and
+ * `gnumach_LINKFLAGS' in `i386/Makefrag.am'.
+ */
+
+ . = _START;
+ .boot : AT(_START_MAP)
+ {
+ *(.boot.text)
+ *(.boot.data)
+ } =0x90909090
+
+ . += KERNEL_MAP_SHIFT;
+ _start = .;
+ .text : AT(((ADDR(.text)) - KERNEL_MAP_SHIFT))
+ {
+ *(.text*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ *(.text.unlikely .text.*_unlikely)
+ KEEP (*(.text.*personality*))
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ } =0x90909090
+ .init : AT(((ADDR(.init)) - KERNEL_MAP_SHIFT))
+ {
+ KEEP (*(.init))
+ } =0x90909090
+ .fini : AT(((ADDR(.fini)) - KERNEL_MAP_SHIFT))
+ {
+ KEEP (*(.fini))
+ } =0x90909090
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = .);
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rela.init : { *(.rela.init) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rela.fini : { *(.rela.fini) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rela.got : { *(.rela.got) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rela.ldata : { *(.rela.ldata .rela.ldata.* .rela.gnu.linkonce.l.*) }
+ .rela.lbss : { *(.rela.lbss .rela.lbss.* .rela.gnu.linkonce.lb.*) }
+ .rela.lrodata : { *(.rela.lrodata .rela.lrodata.* .rela.gnu.linkonce.lr.*) }
+ .rela.ifunc : { *(.rela.ifunc) }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .plt : { *(.plt) *(.iplt) }
+ .rodata : AT(((ADDR(.rodata)) - KERNEL_MAP_SHIFT)) { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ .preinit_array :
+ {
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ KEEP (*(.preinit_array))
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ }
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT(.fini_array.*)))
+ KEEP (*(.fini_array))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin?.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin?.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (24, .);
+ .got.plt : { *(.got.plt) *(.igot.plt) }
+ .data : AT(((ADDR(.data)) - KERNEL_MAP_SHIFT))
+ {
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ __bss_start = .;
+ .bss : AT(((ADDR(.bss)) - KERNEL_MAP_SHIFT))
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections.
+ FIXME: Why do we need it? When there is no .bss section, we don't
+ pad the .data section. */
+ . = ALIGN(. != 0 ? 64 / 8 : 1);
+ }
+ .lbss :
+ {
+ *(.dynlbss)
+ *(.lbss .lbss.* .gnu.linkonce.lb.*)
+ *(LARGE_COMMON)
+ }
+ . = ALIGN(64 / 8);
+ .lrodata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
+ {
+ *(.lrodata .lrodata.* .gnu.linkonce.lr.*)
+ }
+ .ldata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
+ {
+ *(.ldata .ldata.* .gnu.linkonce.l.*)
+ . = ALIGN(. != 0 ? 64 / 8 : 1);
+ }
+ . = ALIGN(64 / 8);
+ _end = .; PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) }
+}
diff --git a/x86_64/locore.S b/x86_64/locore.S
new file mode 100644
index 0000000..25dc15d
--- /dev/null
+++ b/x86_64/locore.S
@@ -0,0 +1,1640 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the nema IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/machine/asm.h>
+#include <mach/machine/eflags.h>
+#include <i386/i386/proc_reg.h>
+#include <i386/i386/trap.h>
+#include <i386/i386/seg.h>
+#include <i386/i386/gdt.h>
+#include <i386/i386/ldt.h>
+#include <i386/i386/msr.h>
+#include <i386/i386/i386asm.h>
+#include <i386/i386/cpu_number.h>
+#include <i386/i386/xen.h>
+
+
+/*
+ * Helpers for thread state as saved in the pcb area, during trap or irq handling
+ */
+#define pusha \
+ pushq %rax ;\
+ pushq %rcx ;\
+ pushq %rdx ;\
+ pushq %rbx ;\
+ subq $8,%rsp ;\
+ pushq %rbp ;\
+ pushq %rsi ;\
+ pushq %rdi ;\
+ pushq %r8 ;\
+ pushq %r9 ;\
+ pushq %r10 ;\
+ pushq %r11 ;\
+ pushq %r12 ;\
+ pushq %r13 ;\
+ pushq %r14 ;\
+ pushq %r15
+
+#define popa \
+ popq %r15 ;\
+ popq %r14 ;\
+ popq %r13 ;\
+ popq %r12 ;\
+ popq %r11 ;\
+ popq %r10 ;\
+ popq %r9 ;\
+ popq %r8 ;\
+ popq %rdi ;\
+ popq %rsi ;\
+ popq %rbp ;\
+ addq $8,%rsp ;\
+ popq %rbx ;\
+ popq %rdx ;\
+ popq %rcx ;\
+ popq %rax
+
+#define PUSH_REGS_ISR \
+ pushq %rcx ;\
+ pushq %rdx ;\
+ pushq %rsi ;\
+ pushq %rdi ;\
+ pushq %r8 ;\
+ pushq %r9 ;\
+ pushq %r10 ;\
+ pushq %r11
+
+#define PUSH_AREGS_ISR \
+ pushq %rax ;\
+ PUSH_REGS_ISR
+
+
+#define POP_REGS_ISR \
+ popq %r11 ;\
+ popq %r10 ;\
+ popq %r9 ;\
+ popq %r8 ;\
+ popq %rdi ;\
+ popq %rsi ;\
+ popq %rdx ;\
+ popq %rcx
+
+#define POP_AREGS_ISR \
+ POP_REGS_ISR ;\
+ popq %rax
+
+/*
+ * Note that we have to load the kernel segment registers even if this
+ * is a trap from the kernel, because the kernel uses user segment
+ * registers for copyin/copyout.
+ * (XXX Would it be smarter just to use fs or gs for that?)
+ */
+#ifdef USER32
+#define PUSH_SEGMENTS(reg) \
+ movq %ds,reg ;\
+ pushq reg ;\
+ movq %es,reg ;\
+ pushq reg ;\
+ pushq %fs ;\
+ pushq %gs
+#else
+#define PUSH_SEGMENTS(reg)
+#endif
+
+#ifdef USER32
+#define POP_SEGMENTS(reg) \
+ popq %gs ;\
+ popq %fs ;\
+ popq reg ;\
+ movq reg,%es ;\
+ popq reg ;\
+ movq reg,%ds
+#else
+#define POP_SEGMENTS(reg)
+#endif
+
+#ifdef USER32
+#define PUSH_SEGMENTS_ISR(reg) \
+ movq %ds,reg ;\
+ pushq reg ;\
+ movq %es,reg ;\
+ pushq reg ;\
+ pushq %fs ;\
+ pushq %gs
+#else
+#define PUSH_SEGMENTS_ISR(reg)
+#endif
+
+#ifdef USER32
+#define POP_SEGMENTS_ISR(reg) \
+ popq %gs ;\
+ popq %fs ;\
+ popq reg ;\
+ movq reg,%es ;\
+ popq reg ;\
+ movq reg,%ds
+#else
+#define POP_SEGMENTS_ISR(reg)
+#endif
+
+#ifdef USER32
+#define SET_KERNEL_SEGMENTS(reg) \
+ mov %ss,reg /* switch to kernel segments */ ;\
+ mov reg,%ds /* (same as kernel stack segment) */ ;\
+ mov reg,%es ;\
+ mov reg,%fs ;\
+ mov $(PERCPU_DS),reg ;\
+ mov reg,%gs
+#else
+#define SET_KERNEL_SEGMENTS(reg)
+#endif
+
+/*
+ * Fault recovery.
+ */
+#define RECOVER_TABLE_START \
+ .text 2 ;\
+DATA(recover_table) ;\
+ .text
+
+#define RECOVER(addr) \
+ .text 2 ;\
+ .quad 9f ;\
+ .quad addr ;\
+ .text ;\
+9:
+
+#define RECOVER_TABLE_END \
+ .text 2 ;\
+ .globl EXT(recover_table_end) ;\
+LEXT(recover_table_end) ;\
+ .text
+
+/*
+ * Retry table for certain successful faults.
+ */
+#define RETRY_TABLE_START \
+ .text 3 ;\
+DATA(retry_table) ;\
+ .text
+
+#define RETRY(addr) \
+ .text 3 ;\
+ .quad 9f ;\
+ .quad addr ;\
+ .text ;\
+9:
+
+#define RETRY_TABLE_END \
+ .text 3 ;\
+ .globl EXT(retry_table_end) ;\
+LEXT(retry_table_end) ;\
+ .text
+
+/*
+ * Allocate recovery and retry tables.
+ */
+ RECOVER_TABLE_START
+ RETRY_TABLE_START
+
+/*
+ * Timing routines.
+ */
+#if STAT_TIME
+
+#define TIME_TRAP_UENTRY
+#define TIME_TRAP_SENTRY
+#define TIME_TRAP_UEXIT
+#define TIME_INT_ENTRY
+#define TIME_INT_EXIT
+
+#else /* microsecond timing */
+
+/*
+ * Microsecond timing.
+ * Assumes a free-running microsecond counter.
+ * no TIMER_MAX check needed.
+ */
+
+/*
+ * There is only one current time-stamp per CPU, since only
+ * the time-stamp in the current timer is used.
+ * To save time, we allocate the current time-stamps here.
+ */
+ .comm EXT(current_tstamp), 4*NCPUS
+
+/*
+ * Update time on user trap entry.
+ * 11 instructions (including cli on entry)
+ * Assumes CPU number in %edx.
+ * Uses %eax, %ebx, %ecx.
+ */
+#define TIME_TRAP_UENTRY \
+ pushf /* Save flags */ ;\
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer value */ ;\
+ movl CX(EXT(current_tstamp),%rdx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%rdx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%rdx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: addl $(TH_SYSTEM_TIMER-TH_USER_TIMER),%ecx ;\
+ /* switch to sys timer */;\
+ movl %ecx,CX(EXT(current_timer),%rdx) /* make it current */ ;\
+ popf /* allow interrupts */
+
+/*
+ * Update time on system call entry.
+ * 11 instructions (including cli on entry)
+ * Assumes CPU number in %edx.
+ * Uses %ebx, %ecx.
+ * Same as TIME_TRAP_UENTRY, but preserves %eax.
+ */
+#define TIME_TRAP_SENTRY \
+ pushf /* Save flags */ ;\
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer value */ ;\
+ movl CX(EXT(current_tstamp),%rdx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%rdx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%rdx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ pushq %rax /* save %rax */ ;\
+ call timer_normalize /* normalize timer */ ;\
+ popq %rax /* restore %rax */ ;\
+0: addl $(TH_SYSTEM_TIMER-TH_USER_TIMER),%ecx ;\
+ /* switch to sys timer */;\
+ movl %ecx,CX(EXT(current_timer),%rdx) /* make it current */ ;\
+ popf /* allow interrupts */
+
+/*
+ * update time on user trap exit.
+ * 10 instructions.
+ * Assumes CPU number in %edx.
+ * Uses %ebx, %ecx.
+ */
+#define TIME_TRAP_UEXIT \
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer */ ;\
+ movl CX(EXT(current_tstamp),%rdx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%rdx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%rdx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: addl $(TH_USER_TIMER-TH_SYSTEM_TIMER),%ecx ;\
+ /* switch to user timer */;\
+ movl %ecx,CX(EXT(current_timer),%rdx) /* make it current */
+
+/*
+ * update time on interrupt entry.
+ * 9 instructions.
+ * Assumes CPU number in %edx.
+ * Leaves old timer in %ebx.
+ * Uses %ecx.
+ */
+#define TIME_INT_ENTRY \
+ movl VA_ETC,%ecx /* get timer */ ;\
+ movl CX(EXT(current_tstamp),%rdx),%ebx /* get old time stamp */;\
+ movl %ecx,CX(EXT(current_tstamp),%rdx) /* set new time stamp */;\
+ subl %ebx,%ecx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%rdx),%ebx /* get current timer */ ;\
+ addl %ecx,LOW_BITS(%ebx) /* add to low bits */ ;\
+ leal CX(0,%rdx),%ecx /* timer is 16 bytes */ ;\
+ lea CX(EXT(kernel_timer),%rdx),%ecx /* get interrupt timer*/;\
+ movl %ecx,CX(EXT(current_timer),%rdx) /* set timer */
+
+/*
+ * update time on interrupt exit.
+ * 11 instructions
+ * Assumes CPU number in %edx, old timer in %ebx.
+ * Uses %eax, %ecx.
+ */
+#define TIME_INT_EXIT \
+ movl VA_ETC,%eax /* get timer */ ;\
+ movl CX(EXT(current_tstamp),%rdx),%ecx /* get old time stamp */;\
+ movl %eax,CX(EXT(current_tstamp),%rdx) /* set new time stamp */;\
+ subl %ecx,%eax /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%rdx),%ecx /* get current timer */ ;\
+ addl %eax,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: testb $0x80,LOW_BITS+3(%ebx) /* old timer overflow? */;\
+ jz 0f /* if overflow, */ ;\
+ movl %ebx,%ecx /* get old timer */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: movl %ebx,CX(EXT(current_timer),%rdx) /* set timer */
+
+
+/*
+ * Normalize timer in ecx.
+ * Preserves edx; clobbers eax.
+ */
+ .align 2
+timer_high_unit:
+ .long TIMER_HIGH_UNIT /* div has no immediate opnd */
+
+timer_normalize:
+ pushq %rdx /* save register */
+ xorl %edx,%edx /* clear divisor high */
+ movl LOW_BITS(%ecx),%eax /* get divisor low */
+ divl timer_high_unit,%eax /* quotient in eax */
+ /* remainder in edx */
+ addl %eax,HIGH_BITS_CHECK(%ecx) /* add high_inc to check */
+ movl %edx,LOW_BITS(%ecx) /* remainder to low_bits */
+ addl %eax,HIGH_BITS(%ecx) /* add high_inc to high bits */
+ popq %rdx /* restore register */
+ ret
+
+/*
+ * Switch to a new timer.
+ */
+ENTRY(timer_switch)
+ CPU_NUMBER(%edx) /* get this CPU */
+ movl VA_ETC,%ecx /* get timer */
+ movl CX(EXT(current_tstamp),%rdx),%eax /* get old time stamp */
+ movl %ecx,CX(EXT(current_tstamp),%rdx) /* set new time stamp */
+ subl %ecx,%eax /* elapsed = new - old */
+ movl CX(EXT(current_timer),%rdx),%ecx /* get current timer */
+ addl %eax,LOW_BITS(%ecx) /* add to low bits */
+ jns 0f /* if overflow, */
+ call timer_normalize /* normalize timer */
+0:
+ movl S_ARG0,%ecx /* get new timer */
+ movl %ecx,CX(EXT(current_timer),%rdx) /* set timer */
+ ret
+
+/*
+ * Initialize the first timer for a CPU.
+ */
+ENTRY(start_timer)
+ CPU_NUMBER(%edx) /* get this CPU */
+ movl VA_ETC,%ecx /* get timer */
+ movl %ecx,CX(EXT(current_tstamp),%rdx) /* set initial time stamp */
+ movl S_ARG0,%ecx /* get timer */
+ movl %ecx,CX(EXT(current_timer),%rdx) /* set initial timer */
+ ret
+
+#endif /* accurate timing */
+
+/* */
+
+/*
+ * Trap/interrupt entry points.
+ *
+ * All traps must create the i386_saved_state struct on the stack on
+ * entry. Note that:
+ * - CR2 is only used if the trap is a page fault
+ * - user_rsp/user_ss are only used if entering from user space
+ * - v86_regs are used only from V86 threads
+ * (TODO check if V86 is still used with USER32)
+ *
+ * Depending the CPL before entry, the stack might be switched or not;
+ * if entering from user-space the CPU loads TSS->RSP0 in RSP,
+ * otherwise RSP is unchanged. After this, the cpu pushes
+ * SS/RSP/RFLAFS/CS/RIP and optionally ErrorCode and executes the handler.
+ */
+
+/* Try to save/show some information when a double fault happens
+ * We can't recover to a working state, so if we have a debugger wait for it,
+ * otherwise reset */
+ENTRY(t_dbl_fault)
+ INT_FIX
+ cli /* disable interrupts that might corrupt the state*/
+ pusha
+ movq %cr2,%rax
+ movq %rax,R_CR2-R_R15(%rsp) /* CR2 might contain the faulting address */
+ subq $48,%rsp // FIXME remove when segments are cleaned up
+ movq %rsp,%rdi /* pass the saved state */
+ call handle_double_fault
+ jmp cpu_shutdown /* reset */
+END(t_dbl_fault)
+
+/*
+ * General protection or segment-not-present fault.
+ * Check for a GP/NP fault in the kernel_return
+ * sequence; if there, report it as a GP/NP fault on the user's instruction.
+ *
+ * rsp-> 0: trap code (NP or GP)
+ * 8: segment number in error
+ * 16 eip
+ * 24 cs
+ * 32 eflags
+ * 40 old registers (trap is from kernel)
+ */
+ENTRY(t_gen_prot)
+ INT_FIX
+ pushq $(T_GENERAL_PROTECTION) /* indicate fault type */
+ jmp trap_check_kernel_exit /* check for kernel exit sequence */
+
+ENTRY(t_segnp)
+ INT_FIX
+ pushq $(T_SEGMENT_NOT_PRESENT)
+ /* indicate fault type */
+
+trap_check_kernel_exit:
+#ifdef USER32
+ testq $(EFL_VM),32(%rsp) /* is trap from V86 mode? */
+ jnz EXT(alltraps) /* isn`t kernel trap if so */
+#endif
+ /* Note: handling KERNEL_RING value by hand */
+ testq $2,24(%rsp) /* is trap from kernel mode? */
+ jnz EXT(alltraps) /* if so: */
+ /* check for the kernel exit sequence */
+ cmpq $_kret_iret,16(%rsp) /* on IRET? */
+ je fault_iret
+#ifdef USER32
+ cmpq $_kret_popl_ds,16(%rsp) /* popping DS? */
+ je fault_popl_ds
+ cmpq $_kret_popl_es,16(%rsp) /* popping ES? */
+ je fault_popl_es
+ cmpq $_kret_popl_fs,16(%rsp) /* popping FS? */
+ je fault_popl_fs
+ cmpq $_kret_popl_gs,16(%rsp) /* popping GS? */
+ je fault_popl_gs
+#endif
+take_fault: /* if none of the above: */
+ jmp EXT(alltraps) /* treat as normal trap. */
+
+/*
+ * GP/NP fault on IRET: CS or SS is in error.
+ * All registers contain the user's values.
+ *
+ * on SP is
+ * 0 trap number
+ * 8 errcode
+ * 16 eip
+ * 24 cs --> trapno
+ * 32 efl --> errcode
+ * 40 user eip
+ * 48 user cs
+ * 56 user eflags
+ * 64 user rsp
+ * 72 user ss
+ */
+fault_iret:
+ movq %rax,16(%rsp) /* save eax (we don`t need saved eip) */
+ popq %rax /* get trap number */
+ movq %rax,24-8(%rsp) /* put in user trap number */
+ popq %rax /* get error code */
+ movq %rax,32-16(%rsp) /* put in user errcode */
+ popq %rax /* restore eax */
+ jmp EXT(alltraps) /* take fault */
+
+#ifdef USER32
+/*
+ * Fault restoring a segment register. The user's registers are still
+ * saved on the stack. The offending segment register has not been
+ * popped.
+ */
+fault_popl_ds:
+ popq %rax /* get trap number */
+ popq %rdx /* get error code */
+ addq $24,%rsp /* pop stack to user regs */
+ jmp push_es /* (DS on top of stack) */
+fault_popl_es:
+ popq %rax /* get trap number */
+ popq %rdx /* get error code */
+ addq $24,%rsp /* pop stack to user regs */
+ jmp push_fs /* (ES on top of stack) */
+fault_popl_fs:
+ popq %rax /* get trap number */
+ popq %rdx /* get error code */
+ addq $24,%rsp /* pop stack to user regs */
+ jmp push_gs /* (FS on top of stack) */
+fault_popl_gs:
+ popq %rax /* get trap number */
+ popq %rdx /* get error code */
+ addq $24,%rsp /* pop stack to user regs */
+ jmp push_segregs /* (GS on top of stack) */
+
+push_es:
+ movq %es,%rcx
+ pushq %rcx /* restore es, */
+push_fs:
+ pushq %fs /* restore fs, */
+push_gs:
+ pushq %gs /* restore gs. */
+push_gsbase:
+ pushq $0
+ pushq $0
+#endif
+push_segregs:
+ movq %rax,R_TRAPNO(%rsp) /* set trap number */
+ movq %rdx,R_ERR(%rsp) /* set error code */
+ jmp trap_set_segs /* take trap */
+
+/*
+ * Debug trap. Check for single-stepping across system call into
+ * kernel. If this is the case, taking the debug trap has turned
+ * off single-stepping - save the flags register with the trace
+ * bit set.
+ */
+ENTRY(t_debug)
+ INT_FIX
+#ifdef USER32
+ testq $(EFL_VM),16(%rsp) /* is trap from V86 mode? */
+ jnz 0f /* isn`t kernel trap if so */
+#endif
+ /* Note: handling KERNEL_RING value by hand */
+ testq $2,8(%rsp) /* is trap from kernel mode? */
+ jnz 0f /* if so: */
+#ifdef USER32
+ cmpq $syscall_entry,(%rsp) /* system call entry? */
+ jne 0f /* if so: */
+ /* flags are sitting where syscall */
+ /* wants them */
+ addq $32,%rsp /* remove eip/cs */
+ jmp syscall_entry_2 /* continue system call entry */
+#else
+ // TODO: implement the 64-bit case
+ ud2
+#endif
+0: pushq $0 /* otherwise: */
+ pushq $(T_DEBUG) /* handle as normal */
+ jmp EXT(alltraps) /* debug fault */
+
+/*
+ * Page fault traps save cr2.
+ */
+ENTRY(t_page_fault)
+ INT_FIX
+ pushq $(T_PAGE_FAULT) /* mark a page fault trap */
+ pusha /* save the general registers */
+#ifdef MACH_XEN
+ movq %ss:hyp_shared_info+CR2,%rax
+#else /* MACH_XEN */
+ movq %cr2,%rax /* get the faulting address */
+#endif /* MACH_XEN */
+ movq %rax,R_CR2-R_R15(%rsp) /* save in rsp save slot */
+ jmp trap_push_segs /* continue fault */
+
+/*
+ * All 'exceptions' enter here with:
+ * rsp-> trap number
+ * error code
+ * old eip
+ * old cs
+ * old eflags
+ * old rsp if trapped from user
+ * old ss if trapped from user
+ */
+ENTRY(alltraps)
+ pusha /* save the general registers */
+trap_push_segs:
+ PUSH_SEGMENTS(%rax) /* and the segment registers */
+ SET_KERNEL_SEGMENTS(%rax) /* switch to kernel data segment */
+trap_set_segs:
+ cld /* clear direction flag */
+#ifdef USER32
+ testl $(EFL_VM),R_EFLAGS(%rsp) /* in V86 mode? */
+ jnz trap_from_user /* user mode trap if so */
+#endif
+ /* Note: handling KERNEL_RING value by hand */
+ testb $2,R_CS(%rsp) /* user mode trap? */
+ jz trap_from_kernel /* kernel trap if not */
+trap_from_user:
+
+ CPU_NUMBER(%edx)
+ TIME_TRAP_UENTRY
+
+ movq CX(EXT(kernel_stack),%rdx),%rbx
+ xchgq %rbx,%rsp /* switch to kernel stack */
+ /* user regs pointer already set */
+_take_trap:
+ movq %rbx,%rdi /* pass register save area to trap */
+ call EXT(user_trap) /* call user trap routine */
+#ifdef USER32
+ orq %rax,%rax /* emulated syscall? */
+ jz 1f /* no, just return */
+ movq R_EAX(%rbx),%rax /* yes, get syscall number */
+ jmp syscall_entry_3 /* and emulate it */
+#endif
+1:
+ movq (%rsp),%rsp /* switch back to PCB stack */
+
+/*
+ * Return from trap or system call, checking for ASTs.
+ * On PCB stack.
+ */
+
+_return_from_trap:
+ CPU_NUMBER(%edx)
+ cmpl $0,CX(EXT(need_ast),%rdx)
+ jz _return_to_user /* if we need an AST: */
+
+ movq CX(EXT(kernel_stack),%rdx),%rsp
+ /* switch to kernel stack */
+ call EXT(i386_astintr) /* take the AST */
+ popq %rsp /* switch back to PCB stack */
+ jmp _return_from_trap /* and check again (rare) */
+ /* ASTs after this point will */
+ /* have to wait */
+
+_return_to_user:
+ TIME_TRAP_UEXIT
+
+/*
+ * Return from kernel mode to interrupted thread.
+ */
+
+_return_from_kernel:
+#ifdef USER32
+_kret_popl_gs:
+ popq %gs /* restore segment registers */
+_kret_popl_fs:
+ popq %fs
+_kret_popl_es:
+ popq %rax
+ movq %rax,%es
+_kret_popl_ds:
+ popq %rax
+ movq %rax,%ds
+#endif
+ popa /* restore general registers */
+ addq $16,%rsp /* discard trap number and error code */
+_kret_iret:
+ iretq /* return from interrupt */
+
+
+/*
+ * Trap from kernel mode. No need to switch stacks.
+ */
+trap_from_kernel:
+#if MACH_KDB || MACH_TTD
+ movq %rsp,%rbx /* save current stack */
+ movq %rsp,%rdx /* on an interrupt stack? */
+
+ CPU_NUMBER(%ecx)
+ and $(~(INTSTACK_SIZE-1)),%rdx
+ cmpq CX(EXT(int_stack_base),%rcx),%rdx
+ je 1f /* OK if so */
+
+ movl %ecx,%edx
+ cmpq CX(EXT(kernel_stack),%rdx),%rsp
+ /* already on kernel stack? */
+ ja 0f
+ cmpq MY(ACTIVE_STACK),%rsp
+ ja 1f /* switch if not */
+0:
+ movq CX(EXT(kernel_stack),%rdx),%rsp
+1:
+ pushq %rbx /* save old stack */
+ movq %rbx,%rdi /* pass as parameter */
+ call EXT(kernel_trap) /* to kernel trap routine */
+
+ popq %rsp /* return to old stack */
+#else /* MACH_KDB || MACH_TTD */
+
+ movq %rsp,%rdi /* pass parameter */
+ call EXT(kernel_trap) /* to kernel trap routine */
+
+#endif /* MACH_KDB || MACH_TTD */
+
+ jmp _return_from_kernel
+
+
+/*
+ * Called as a function, makes the current thread
+ * return from the kernel as if from an exception.
+ */
+
+ENTRY(thread_exception_return)
+ENTRY(thread_bootstrap_return)
+ movq %rsp,%rcx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%rcx
+ movq -7-IKS_SIZE(%rcx),%rsp /* switch back to PCB stack */
+ jmp _return_from_trap
+
+/*
+ * Called as a function, makes the current thread
+ * return from the kernel as if from a syscall.
+ * Takes the syscall's return code as an argument.
+ */
+
+ENTRY(thread_syscall_return)
+ movq S_ARG0,%rax /* get return value */
+ movq %rsp,%rcx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%rcx
+ movq -7-IKS_SIZE(%rcx),%rsp /* switch back to PCB stack */
+ movq %rax,R_EAX(%rsp) /* save return value */
+ jmp _return_from_trap
+
+ENTRY(call_continuation)
+ movq S_ARG0,%rax /* get continuation */
+ movq %rsp,%rcx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%rcx
+ addq $(-7-IKS_SIZE),%rcx
+ movq %rcx,%rsp /* pop the stack */
+ xorq %rbp,%rbp /* zero frame pointer */
+ pushq $0 /* Dummy return address */
+ jmp *%rax /* goto continuation */
+
+/* IOAPIC has 24 interrupts, put spurious in the same array */
+
+#define INTERRUPT(n) \
+ .data 2 ;\
+ .quad 0f ;\
+ .text ;\
+ P2ALIGN(TEXT_ALIGN) ;\
+0: ;\
+ INT_FIX ;\
+ pushq %rax ;\
+ movq $(n),%rax ;\
+ jmp EXT(all_intrs)
+
+ .data 2
+DATA(int_entry_table)
+ .text
+/* Legacy APIC interrupts or PIC interrupts */
+INTERRUPT(0)
+INTERRUPT(1)
+INTERRUPT(2)
+INTERRUPT(3)
+INTERRUPT(4)
+INTERRUPT(5)
+INTERRUPT(6)
+INTERRUPT(7)
+INTERRUPT(8)
+INTERRUPT(9)
+INTERRUPT(10)
+INTERRUPT(11)
+INTERRUPT(12)
+INTERRUPT(13)
+INTERRUPT(14)
+INTERRUPT(15)
+#ifdef APIC
+/* APIC PCI interrupts PIRQ A-H */
+INTERRUPT(16)
+INTERRUPT(17)
+INTERRUPT(18)
+INTERRUPT(19)
+INTERRUPT(20)
+INTERRUPT(21)
+INTERRUPT(22)
+INTERRUPT(23)
+#endif
+#if NCPUS > 1
+INTERRUPT(CALL_AST_CHECK)
+INTERRUPT(CALL_PMAP_UPDATE)
+#endif
+#ifdef APIC
+/* Spurious interrupt, set irq number to vect number */
+INTERRUPT(255)
+#endif
+
+/* XXX handle NMI - at least print a warning like Linux does. */
+
+/*
+ * All interrupts enter here. The cpu might have loaded a new RSP,
+ * depending on the previous CPL, as in alltraps.
+ * Old %eax on stack, interrupt number in %eax; we need to fill the remaining
+ * fields of struct i386_interrupt_state, which might be in the pcb or in the
+ * interrupt stack.
+ */
+ENTRY(all_intrs)
+ PUSH_REGS_ISR /* save registers */
+ cld /* clear direction flag */
+
+ PUSH_SEGMENTS_ISR(%rdx) /* save segment registers */
+
+ CPU_NUMBER_NO_GS(%ecx)
+ movq %rsp,%rdx /* on an interrupt stack? */
+ and $(~(INTSTACK_SIZE-1)),%rdx
+ cmpq %ss:CX(EXT(int_stack_base),%rcx),%rdx
+ je int_from_intstack /* if not: */
+
+ SET_KERNEL_SEGMENTS(%rdx) /* switch to kernel segments */
+
+ CPU_NUMBER(%edx)
+
+ movq CX(EXT(int_stack_top),%rdx),%rcx
+
+ xchgq %rcx,%rsp /* switch to interrupt stack */
+
+#if STAT_TIME
+ pushq %rcx /* save pointer to old stack */
+#else
+ pushq %rbx /* save %ebx - out of the way */
+ /* so stack looks the same */
+ pushq %rcx /* save pointer to old stack */
+ TIME_INT_ENTRY /* do timing */
+#endif
+
+#ifdef MACH_LDEBUG
+ incl CX(EXT(in_interrupt),%rdx)
+#endif
+
+ call EXT(interrupt) /* call generic interrupt routine */
+ .globl EXT(return_to_iret) /* ( label for kdb_kintr and hardclock */
+LEXT(return_to_iret) /* to find the return from calling interrupt) */
+
+ CPU_NUMBER(%edx)
+#ifdef MACH_LDEBUG
+ decl CX(EXT(in_interrupt),%rdx)
+#endif
+
+#if STAT_TIME
+#else
+ TIME_INT_EXIT /* do timing */
+ movq 8(%rsp),%rbx /* restore the extra reg we saved */
+#endif
+
+ popq %rsp /* switch back to old stack */
+
+#ifdef USER32
+ testl $(EFL_VM),I_EFL(%rsp) /* if in V86 */
+ jnz 0f /* or */
+#endif
+ /* Note: handling KERNEL_RING value by hand */
+ testb $2,I_CS(%rsp) /* user mode, */
+ jz 1f /* check for ASTs */
+0:
+ cmpq $0,CX(EXT(need_ast),%rdx)
+ jnz ast_from_interrupt /* take it if so */
+1:
+ POP_SEGMENTS_ISR(%rdx) /* restore segment regs */
+ POP_AREGS_ISR /* restore registers */
+
+ iretq /* return to caller */
+
+int_from_intstack:
+ CPU_NUMBER_NO_GS(%edx)
+ cmpq CX(EXT(int_stack_base),%rdx),%rsp /* seemingly looping? */
+ jb stack_overflowed /* if not: */
+ call EXT(interrupt) /* call interrupt routine */
+_return_to_iret_i: /* ( label for kdb_kintr) */
+ POP_SEGMENTS_ISR(%rdx)
+ POP_AREGS_ISR /* restore registers */
+ /* no ASTs */
+
+ iretq
+
+stack_overflowed:
+ ud2
+
+/*
+ * Take an AST from an interrupt.
+ * On PCB stack.
+ * sp-> gs -> edx
+ * fs -> ecx
+ * es -> eax
+ * ds -> trapno
+ * edx -> code
+ * ecx
+ * eax
+ * eip
+ * cs
+ * efl
+ * rsp
+ * ss
+ */
+ast_from_interrupt:
+ POP_SEGMENTS_ISR(%rdx) /* restore all registers ... */
+ POP_AREGS_ISR
+ pushq $0 /* zero code */
+ pushq $0 /* zero trap number */
+ pusha /* save general registers */
+ PUSH_SEGMENTS_ISR(%rdx) /* save segment registers */
+ SET_KERNEL_SEGMENTS(%rdx) /* switch to kernel segments */
+ CPU_NUMBER(%edx)
+ TIME_TRAP_UENTRY
+
+ movq CX(EXT(kernel_stack),%rdx),%rsp
+ /* switch to kernel stack */
+ call EXT(i386_astintr) /* take the AST */
+ popq %rsp /* back to PCB stack */
+ jmp _return_from_trap /* return */
+
+#if MACH_KDB
+/*
+ * kdb_kintr: enter kdb from keyboard interrupt.
+ * Chase down the stack frames until we find one whose return
+ * address is the interrupt handler. At that point, we have:
+ *
+ * frame-> saved %rbp
+ * return address in interrupt handler
+ * saved SPL
+ * saved IRQ
+ * return address == return_to_iret_i
+ * saved %r11
+ * saved %r10
+ * saved %r9
+ * saved %r8
+ * saved %rdx
+ * saved %rcx
+ * saved %rax
+ * saved %rip
+ * saved %cs
+ * saved %rfl
+ *
+ * OR:
+ * frame-> saved %rbp
+ * return address in interrupt handler
+ * return address == return_to_iret
+ * pointer to save area on old stack
+ * [ saved %ebx, if accurate timing ]
+ *
+ * old stack: saved %gs
+ * saved %fs
+ * saved %es
+ * saved %ds
+ * saved %r11
+ * saved %r10
+ * saved %r9
+ * saved %r8
+ * saved %rdi
+ * saved %rsi
+ * saved %rdx
+ * saved %rcx
+ * saved %eax
+ * saved %rip
+ * saved %cs
+ * saved %rfl
+ *
+ * Call kdb, passing it that register save area.
+ */
+
+#define RET_OFFSET 32
+
+
+ENTRY(kdb_kintr)
+ movq %rbp,%rax /* save caller`s frame pointer */
+ movq $EXT(return_to_iret),%rcx /* interrupt return address 1 */
+ movq $_return_to_iret_i,%rdx /* interrupt return address 2 */
+
+0: cmpq RET_OFFSET(%rax),%rcx /* does this frame return to */
+ /* interrupt handler (1)? */
+ je 1f
+ cmpq RET_OFFSET(%rax),%rdx /* interrupt handler (2)? */
+ je 2f /* if not: */
+ movq (%rax),%rax /* try next frame */
+ testq %rax,%rax
+ jnz 0b
+ ud2 /* oops, didn't find frame, fix me :/ */
+
+1: movq $kdb_from_iret,RET_OFFSET(%rax)
+ ret /* returns to kernel/user stack */
+
+2: movq $kdb_from_iret_i,RET_OFFSET(%rax)
+ /* returns to interrupt stack */
+ ret
+
+/*
+ * On return from keyboard interrupt, we will execute
+ * kdb_from_iret_i
+ * if returning to an interrupt on the interrupt stack
+ * kdb_from_iret
+ * if returning to an interrupt on the user or kernel stack
+ */
+kdb_from_iret:
+ /* save regs in known locations */
+#if STAT_TIME
+ pushq %rbx /* caller`s %ebx is in reg */
+#else
+ movq 8(%rsp),%rax /* get caller`s %ebx */
+ pushq %rax /* push on stack */
+#endif
+ pushq %rbp
+ movq %rsp,%rdi /* pass regs */
+ call EXT(kdb_kentry) /* to kdb */
+ popq %rbp
+#if STAT_TIME
+ popq %rbx
+#else
+ popq %rax
+ movq %rax,8(%rsp)
+#endif
+ jmp EXT(return_to_iret) /* normal interrupt return */
+
+kdb_from_iret_i: /* on interrupt stack */
+ pop %rdx /* restore saved registers */
+ pop %rcx
+ pop %rax
+ pushq $0 /* zero error code */
+ pushq $0 /* zero trap number */
+ pusha /* save general registers */
+ PUSH_SEGMENTS(%rdx) /* save segment registers */
+ movq %rsp,%rdx /* pass regs, */
+ movq $0,%rsi /* code, */
+ movq $-1,%rdi /* type to kdb */
+ call EXT(kdb_trap)
+ POP_SEGMENTS(%rdx) /* restore segment registers */
+ popa /* restore general registers */
+ addq $16,%rsp
+
+// TODO: test it before dropping ud2
+movq (%rsp),%rax
+ud2
+ iretq
+
+#endif /* MACH_KDB */
+
+#if MACH_TTD
+/*
+ * Same code as that above for the keyboard entry into kdb.
+ */
+ENTRY(kttd_intr)
+// TODO: test it before dropping ud2
+ud2
+ movq %rbp,%rax /* save caller`s frame pointer */
+ movq $EXT(return_to_iret),%rcx /* interrupt return address 1 */
+ movq $_return_to_iret_i,%rdx /* interrupt return address 2 */
+
+0: cmpq 32(%rax),%rcx /* does this frame return to */
+ /* interrupt handler (1)? */
+ je 1f
+ cmpq 32(%rax),%rdx /* interrupt handler (2)? */
+ je 2f /* if not: */
+ movq (%rax),%rax /* try next frame */
+ jmp 0b
+
+1: movq $ttd_from_iret,32(%rax) /* returns to kernel/user stack */
+ ret
+
+2: movq $ttd_from_iret_i,32(%rax)
+ /* returns to interrupt stack */
+ ret
+
+/*
+ * On return from keyboard interrupt, we will execute
+ * ttd_from_iret_i
+ * if returning to an interrupt on the interrupt stack
+ * ttd_from_iret
+ * if returning to an interrupt on the user or kernel stack
+ */
+ttd_from_iret:
+ /* save regs in known locations */
+#if STAT_TIME
+ pushq %rbx /* caller`s %ebx is in reg */
+#else
+ movq 8(%rsp),%rax /* get caller`s %ebx */
+ pushq %rax /* push on stack */
+#endif
+ pushq %rbp
+ pushq %rsi
+ pushq %rdi
+ movq %rsp,%rdi /* pass regs */
+ call _kttd_netentry /* to kdb */
+ popq %rdi /* restore registers */
+ popq %rsi
+ popq %rbp
+#if STAT_TIME
+ popq %rbx
+#else
+ popq %rax
+ movq %rax,8(%rsp)
+#endif
+ jmp EXT(return_to_iret) /* normal interrupt return */
+
+ttd_from_iret_i: /* on interrupt stack */
+ pop %rdx /* restore saved registers */
+ pop %rcx
+ pop %rax
+ pushq $0 /* zero error code */
+ pushq $0 /* zero trap number */
+ pusha /* save general registers */
+ PUSH_SEGMENTS_ISR(%rdx) /* save segment registers */
+ ud2 // TEST it
+ movq %rsp,%rdx /* pass regs, */
+ movq $0,%rsi /* code, */
+ movq $-1,%rdi /* type to kdb */
+ call _kttd_trap
+ POP_SEGMENTS_ISR(%rdx) /* restore segment registers */
+ popa /* restore general registers */
+ addq $16,%rsp
+
+// TODO: test it before dropping ud2
+movq (%rsp),%rax
+ud2
+ iretq
+
+#endif /* MACH_TTD */
+
+#ifdef USER32
+/*
+ * System call enters through a call gate. Flags are not saved -
+ * we must shuffle stack to look like trap save area.
+ *
+ * rsp-> old eip
+ * old cs
+ * old rsp
+ * old ss
+ *
+ * eax contains system call number.
+ */
+ENTRY(syscall)
+syscall_entry:
+ pushf /* save flags as soon as possible */
+syscall_entry_2:
+ cld /* clear direction flag */
+
+ pushq %rax /* save system call number */
+ pushq $0 /* clear trap number slot */
+
+ pusha /* save the general registers */
+ PUSH_SEGMENTS(%rdx) /* and the segment registers */
+ SET_KERNEL_SEGMENTS(%rdx) /* switch to kernel data segment */
+
+/*
+ * Shuffle eflags,eip,cs into proper places
+ */
+
+ movq R_EIP(%rsp),%rbx /* eflags are in EIP slot */
+ movq R_CS(%rsp),%rcx /* eip is in CS slot */
+ movq R_EFLAGS(%rsp),%rdx /* cs is in EFLAGS slot */
+ movq %rcx,R_EIP(%rsp) /* fix eip */
+ movq %rdx,R_CS(%rsp) /* fix cs */
+ movq %rbx,R_EFLAGS(%rsp) /* fix eflags */
+
+ CPU_NUMBER_NO_STACK(%edx)
+ TIME_TRAP_SENTRY
+
+ movq CX(EXT(kernel_stack),%rdx),%rbx
+ /* get current kernel stack */
+ xchgq %rbx,%rsp /* switch stacks - %ebx points to */
+ /* user registers. */
+ /* user regs pointer already set */
+
+/*
+ * Check for MACH or emulated system call
+ */
+syscall_entry_3:
+ movq MY(ACTIVE_THREAD),%rdx
+ /* point to current thread */
+ movq TH_TASK(%rdx),%rdx /* point to task */
+ movq TASK_EMUL(%rdx),%rdx /* get emulation vector */
+ orq %rdx,%rdx /* if none, */
+ je syscall_native /* do native system call */
+ movq %rax,%rcx /* copy system call number */
+ subq DISP_MIN(%rdx),%rcx /* get displacement into syscall */
+ /* vector table */
+ jl syscall_native /* too low - native system call */
+ cmpq DISP_COUNT(%rdx),%rcx /* check range */
+ jnl syscall_native /* too high - native system call */
+ movq DISP_VECTOR(%rdx,%rcx,4),%rdx
+ /* get the emulation vector */
+ orq %rdx,%rdx /* emulated system call if not zero */
+ jnz syscall_emul
+
+/*
+ * Native system call.
+ */
+syscall_native:
+ negl %eax /* get system call number */
+ jl mach_call_range /* out of range if it was positive */
+ cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
+ jg mach_call_range /* error if out of range */
+#if 0 /* debug hack to show the syscall number on the screen */
+ movb %al,%dl
+ shrb $4,%dl
+ orb $0x30,%dl
+ movb $0x0f,%dh
+ movw %dx,0xb800a
+ movb %al,%dl
+ andb $0xf,%dl
+ orb $0x30,%dl
+ movb $0xf,%dh
+ movw %dx,0xb800c
+#endif
+ shll $5,%eax /* manual indexing of mach_trap_t */
+ xorq %r10,%r10
+ mov EXT(mach_trap_table)(%rax),%r10
+ /* get number of arguments */
+ andq %r10,%r10
+ jz mach_call_call /* skip argument copy if none */
+
+ movq $USER_DS,%rdx /* use user data segment for accesses */
+ mov %dx,%fs
+ movq %rsp,%r11 /* save kernel ESP for error recovery */
+
+ movq R_UESP(%rbx),%rbp /* get user stack pointer */
+ addq $4,%rbp /* Skip user return address */
+
+#define PARAM(reg,ereg) \
+ xorq %reg,%reg ;\
+ RECOVER(mach_call_addr_push) \
+ movl %fs:(%rbp),%ereg /* 1st parameter */ ;\
+ addq $4,%rbp ;\
+ dec %r10 ;\
+ jz mach_call_call
+
+ PARAM(rdi,edi) /* 1st parameter */
+ PARAM(rsi,esi) /* 2nd parameter */
+ PARAM(rdx,edx) /* 3rd parameter */
+ PARAM(rcx,ecx) /* 4th parameter */
+ PARAM(r8,r8d) /* 5th parameter */
+ PARAM(r9,r9d) /* 6th parameter */
+
+ lea (%rbp,%r10,4),%rbp /* point past last argument */
+ xorq %r12,%r12
+
+0: subq $4,%rbp
+ RECOVER(mach_call_addr_push)
+ movl %fs:(%rbp),%r12d
+ pushq %r12 /* push argument on stack */
+ dec %r10
+ jnz 0b /* loop for all arguments */
+
+mach_call_call:
+
+#ifdef DEBUG
+ testb $0xff,EXT(syscall_trace)
+ jz 0f
+ movq %rax,%rdi
+ call EXT(syscall_trace_print)
+ /* will return with syscallofs still (or again) in eax */
+0:
+#endif /* DEBUG */
+ call *EXT(mach_trap_table)+8(%rax) /* call procedure */
+ movq %rsp,%rcx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%rcx
+ movq -7-IKS_SIZE(%rcx),%rsp /* switch back to PCB stack */
+ movq %rax,R_EAX(%rsp) /* save return value */
+ jmp _return_from_trap /* return to user */
+
+/*
+ * Address out of range. Change to page fault.
+ * %rbp holds failing address.
+ */
+mach_call_addr_push:
+ movq %r11,%rsp /* clean parameters from stack */
+mach_call_addr:
+ movq %rbp,R_CR2(%rbx) /* set fault address */
+ movq $(T_PAGE_FAULT),R_TRAPNO(%rbx)
+ /* set page-fault trap */
+ movq $(T_PF_USER),R_ERR(%rbx)
+ /* set error code - read user space */
+ jmp _take_trap /* treat as a trap */
+
+/*
+ * System call out of range. Treat as invalid-instruction trap.
+ * (? general protection?)
+ */
+mach_call_range:
+ movq $(T_INVALID_OPCODE),R_TRAPNO(%rbx)
+ /* set invalid-operation trap */
+ movq $0,R_ERR(%rbx) /* clear error code */
+ jmp _take_trap /* treat as a trap */
+
+/*
+ * User space emulation of system calls.
+ * edx - user address to handle syscall
+ *
+ * User stack will become:
+ * ursp-> eflags
+ * eip
+ * eax still contains syscall number.
+ */
+syscall_emul:
+ movq $USER_DS,%rdi /* use user data segment for accesses */
+ mov %di,%fs
+
+/* XXX what about write-protected pages? */
+ movq R_UESP(%rbx),%rdi /* get user stack pointer */
+ subq $16,%rdi /* push space for new arguments */
+ movq R_EFLAGS(%rbx),%rax /* move flags */
+ RECOVER(syscall_addr)
+ movl %eax,%fs:0(%rdi) /* to user stack */
+ movl R_EIP(%rbx),%eax /* move eip */
+ RECOVER(syscall_addr)
+ movl %eax,%fs:4(%rdi) /* to user stack */
+ movq %rdi,R_UESP(%rbx) /* set new user stack pointer */
+ movq %rdx,R_EIP(%rbx) /* change return address to trap */
+ movq %rbx,%rsp /* back to PCB stack */
+// TODO: test it before dropping ud2
+ud2
+ jmp _return_from_trap /* return to user */
+
+/*
+ * Address error - address is in %edi.
+ */
+syscall_addr:
+ movq %rdi,R_CR2(%rbx) /* set fault address */
+ movq $(T_PAGE_FAULT),R_TRAPNO(%rbx)
+ /* set page-fault trap */
+ movq $(T_PF_USER),R_ERR(%rbx)
+ /* set error code - read user space */
+ jmp _take_trap /* treat as a trap */
+END(syscall)
+
+#else /* USER32 */
+
+/* Entry point for 64-bit syscalls.
+ * On entry we're still on the user stack, so better not use it. Instead we
+ * save the thread state immediately in thread->pcb->iss, then try to invoke
+ * the syscall.
+ * Note: emulated syscalls seem to not be used anymore in GNU/Hurd, so they
+ * are not handled here.
+ * TODO:
+ - for now we assume the return address is canonical, but apparently there
+ can be cases where it's not (see how Linux handles this). Does it apply
+ here?
+ - check that the case where a task is suspended, and later returns via
+ iretq from return_from_trap, works fine in all combinations
+ */
+ENTRY(syscall64)
+ /* RFLAGS[32:63] are reserved, so combine syscall num (32 bit) and
+ * eflags in RAX to allow using r11 as temporary register
+ */
+ shlq $32,%r11
+ shlq $32,%rax /* make sure bits 32:63 of %rax are zero */
+ shrq $32,%rax
+ or %r11,%rax
+
+ /* Save thread state in pcb->iss, as on exception entry.
+ * Since this is triggered synchronously from userspace, we could
+ * save only the callee-preserved status according to the C ABI,
+ * plus RIP and EFLAGS for sysret
+ */
+ movq MY(ACTIVE_THREAD),%r11 /* point to current thread */
+ movq TH_PCB(%r11),%r11 /* point to pcb */
+ addq $ PCB_ISS,%r11 /* point to saved state */
+
+ mov %rsp,R_UESP(%r11) /* callee-preserved register */
+ mov %rcx,R_EIP(%r11) /* syscall places user RIP in RCX */
+ mov %rbx,R_EBX(%r11) /* callee-preserved register */
+ mov %rax,%rbx /* Now we can unpack eflags again */
+ shr $32,%rbx
+ mov %rbx,R_EFLAGS(%r11) /* ... and save them in pcb as well */
+ mov %rbp,R_EBP(%r11) /* callee-preserved register */
+ mov %r12,R_R12(%r11) /* callee-preserved register */
+ mov %r13,R_R13(%r11) /* callee-preserved register */
+ mov %r14,R_R14(%r11) /* callee-preserved register */
+ mov %r15,R_R15(%r11) /* callee-preserved register */
+
+ /* Save syscall number and args for SYSCALL_EXAMINE/MSG_EXAMINE in glibc.
+ * Note: syscall number is only 32 bit, in EAX, so we sign-extend it in
+ * RAX to mask the EFLAGS bits.
+ */
+ cdqe /* sign-extend EAX in RAX */
+ mov %rax,R_EAX(%r11) /* syscall number */
+ mov %rdi,R_EDI(%r11) /* syscall arg0 */
+ mov %rsi,R_ESI(%r11) /* syscall arg1 */
+ mov %rdx,R_EDX(%r11) /* syscall arg2 */
+ mov %r10,R_R10(%r11) /* syscall arg3 */
+ mov %r8,R_R8(%r11) /* syscall arg4 */
+ mov %r9,R_R9(%r11) /* syscall arg5 */
+
+ mov %r11,%rbx /* prepare for error handling */
+ mov %r10,%rcx /* fix arg3 location according to C ABI */
+
+ /* switch to kernel stack, then we can enable interrupts */
+ CPU_NUMBER_NO_STACK(%r11d)
+ movq CX(EXT(kernel_stack),%r11),%rsp
+ sti
+
+ /* Now we have saved state and args 1-6 are in place.
+ * Before invoking the syscall we do some bound checking and,
+ * if we have more that 6 arguments, we need to copy the
+ * remaining ones to the kernel stack, handling page faults when
+ * accessing the user stack.
+ */
+ negl %eax /* get system call number */
+ jl _syscall64_range /* out of range if it was positive */
+ cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
+ jg _syscall64_range /* error if out of range */
+ shll $5,%eax /* manual indexing of mach_trap_t */
+
+ /* check if we need to place some arguments on the stack */
+_syscall64_args_stack:
+ mov EXT(mach_trap_table)(%rax),%r10 /* get number of arguments */
+ subq $6,%r10 /* the first 6 args are already in place */
+ jle _syscall64_call /* skip argument copy if num args <= 6 */
+
+ movq R_UESP(%rbx),%r11 /* get user stack pointer */
+ addq $8,%r11 /* Skip user return address */
+
+ lea (%r11,%r10,8),%r11 /* point past last argument */
+
+0: subq $8,%r11
+ RECOVER(_syscall64_addr_push)
+ mov (%r11),%r12
+ pushq %r12 /* push argument on stack */
+ dec %r10
+ jnz 0b /* loop for all remaining arguments */
+
+_syscall64_call:
+ call *EXT(mach_trap_table)+8(%rax) /* call procedure */
+
+_syscall64_check_for_ast:
+ /* Check for ast. */
+ CPU_NUMBER_NO_GS(%r11d)
+ cmpl $0,CX(EXT(need_ast),%r11)
+ jz _syscall64_restore_state
+
+ /* Save the syscall return value, both on our stack, for the case
+ * i386_astintr returns normally, and in the PCB stack, in case it
+ * instead calls thread_block(thread_exception_return).
+ */
+ pushq %rax /* save the return value on our stack */
+ pushq $0 /* dummy value to keep the stack aligned */
+
+ /* Find the PCB stack. */
+ movq %rsp,%rcx
+ or $(KERNEL_STACK_SIZE-1),%rcx
+ movq -7-IKS_SIZE(%rcx),%rcx
+
+ movq %rax,R_EAX(%rcx) /* save the return value in the PCB stack */
+ call EXT(i386_astintr)
+ popq %rax
+ popq %rax /* restore the return value */
+ jmp _syscall64_check_for_ast /* check again */
+
+_syscall64_restore_state:
+ /* Restore thread state and return to user using sysret. */
+ cli /* block interrupts when using the user stack in kernel space */
+ movq MY(ACTIVE_THREAD),%r11 /* point to current thread */
+ movq TH_PCB(%r11),%r11 /* point to pcb */
+ addq $ PCB_ISS,%r11 /* point to saved state */
+
+ /* Restore syscall args. Note: we can't restore the syscall number in
+ * RAX because it needs to hold the return value.*/
+ mov R_EDI(%r11),%rdi /* syscall arg0 */
+ mov R_ESI(%r11),%rsi /* syscall arg1 */
+ mov R_EDX(%r11),%rdx /* syscall arg2 */
+ mov R_R10(%r11),%r10 /* syscall arg3 */
+ mov R_R8(%r11),%r8 /* syscall arg4 */
+ mov R_R9(%r11),%r9 /* syscall arg5 */
+
+ mov R_UESP(%r11),%rsp /* callee-preserved register,
+ * also switch back to user stack */
+ mov R_EIP(%r11),%rcx /* sysret convention */
+ mov R_EBX(%r11),%rbx /* callee-preserved register */
+ mov R_EBP(%r11),%rbp /* callee-preserved register */
+ mov R_R12(%r11),%r12 /* callee-preserved register */
+ mov R_R13(%r11),%r13 /* callee-preserved register */
+ mov R_R14(%r11),%r14 /* callee-preserved register */
+ mov R_R15(%r11),%r15 /* callee-preserved register */
+ mov R_EFLAGS(%r11),%r11 /* sysret convention */
+
+ sysretq /* fast return to user-space, the thread didn't block */
+
+/* Error handling fragments, from here we jump directly to the trap handler */
+_syscall64_addr_push:
+ movq %r11,R_CR2(%rbx) /* set fault address */
+ movq $(T_PAGE_FAULT),R_TRAPNO(%rbx) /* set page-fault trap */
+ movq $(T_PF_USER),R_ERR(%rbx) /* set error code - read user space */
+ jmp _take_trap /* treat as a trap */
+
+_syscall64_range:
+ movq $(T_INVALID_OPCODE),R_TRAPNO(%rbx)
+ /* set invalid-operation trap */
+ movq $0,R_ERR(%rbx) /* clear error code */
+ jmp _take_trap /* treat as a trap */
+
+END(syscall64)
+#endif /* USER32 */
+
+ .data
+DATA(cpu_features)
+DATA(cpu_features_edx)
+ .long 0
+DATA(cpu_features_ecx)
+ .long 0
+ .text
+
+/* Discover what kind of cpu we have; return the family number
+ (3, 4, 5, 6, for 386, 486, 586, 686 respectively). */
+ENTRY(discover_x86_cpu_type)
+ /* We are a modern enough processor to have the CPUID instruction;
+ use it to find out what we are. */
+ movl $1,%eax /* Fetch CPU type info ... */
+ cpuid /* ... into eax */
+ movl %ecx,cpu_features_ecx /* Keep a copy */
+ movl %edx,cpu_features_edx /* Keep a copy */
+ shrl $8,%eax /* Slide family bits down */
+ andl $15,%eax /* And select them */
+ ret /* And return */
+
+
+/* */
+/*
+ * Utility routines.
+ */
+
+ENTRY(copyin)
+ xchgq %rsi,%rdi /* Get user source and kernel destination */
+
+copyin_remainder:
+ /*cld*/ /* count up: default mode in all GCC code */
+ movq %rdx,%rcx /* move by longwords first */
+ shrq $3,%rcx
+ RECOVER(copyin_fail)
+ rep
+ movsq /* move longwords */
+ movq %rdx,%rcx /* now move remaining bytes */
+ andq $7,%rcx
+ RECOVER(copyin_fail)
+ rep
+ movsb
+ xorq %rax,%rax /* return 0 for success */
+
+copyin_ret:
+ ret /* and return */
+
+copyin_fail:
+ movq $1,%rax /* return 1 for failure */
+ jmp copyin_ret /* pop frame and return */
+
+bogus:
+ ud2
+
+ENTRY(copyout)
+ xchgq %rsi,%rdi /* Get user source and kernel destination */
+
+copyout_remainder:
+ movq %rdx,%rax /* use count */
+ /*cld*/ /* count up: always this way in GCC code */
+ movq %rax,%rcx /* move by longwords first */
+ shrq $3,%rcx
+ RECOVER(copyout_fail)
+ rep
+ movsq
+ movq %rax,%rcx /* now move remaining bytes */
+ andq $7,%rcx
+ RECOVER(copyout_fail)
+ rep
+ movsb /* move */
+ xorq %rax,%rax /* return 0 for success */
+
+copyout_ret:
+ ret /* and return */
+
+copyout_fail:
+ movq $1,%rax /* return 1 for failure */
+ jmp copyout_ret /* pop frame and return */
+
+/*
+ * int inst_fetch(int eip, int cs);
+ *
+ * Fetch instruction byte. Return -1 if invalid address.
+ */
+ENTRY(inst_fetch)
+ movq S_ARG1, %rax /* get segment */
+ movw %ax,%fs /* into FS */
+ movq S_ARG0, %rax /* get offset */
+ RETRY(EXT(inst_fetch)) /* re-load FS on retry */
+ RECOVER(_inst_fetch_fault)
+ movzbq %fs:(%rax),%rax /* load instruction byte */
+ ret
+
+_inst_fetch_fault:
+ movq $-1,%rax /* return -1 if error */
+ ret
+
+
+/*
+ * Done with recovery and retry tables.
+ */
+ RECOVER_TABLE_END
+ RETRY_TABLE_END
+
+
+
+/*
+ * cpu_shutdown()
+ * Force reboot
+ */
+null_idt:
+ .space 8 * 32
+
+null_idtr:
+ .word 8 * 32 - 1
+ .quad null_idt
+
+Entry(cpu_shutdown)
+ lidt null_idtr /* disable the interrupt handler */
+ xor %rcx,%rcx /* generate a divide by zero */
+ div %rcx,%rax /* reboot now */
+ ret /* this will "never" be executed */
diff --git a/x86_64/spl.S b/x86_64/spl.S
new file mode 100644
index 0000000..80c65c1
--- /dev/null
+++ b/x86_64/spl.S
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 1995 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * spl routines for the i386at.
+ */
+
+#include <mach/machine/asm.h>
+#include <i386/i386/ipl.h>
+#include <i386/i386/i386asm.h>
+#include <i386/i386/xen.h>
+#include <i386/cpu_number.h>
+
+#if NCPUS > 1
+#define mb lock; addl $0,(%esp)
+#else
+#define mb
+#endif
+
+/*
+ * Program XEN evt masks from %eax.
+ */
+#define XEN_SETMASK() \
+ pushq %rbx; \
+ movl %eax,%ebx; \
+ xchgl %eax,hyp_shared_info+EVTMASK; \
+ notl %ebx; \
+ andl %eax,%ebx; /* Get unmasked events */ \
+ testl hyp_shared_info+PENDING, %ebx; \
+ popq %rbx; \
+ jz 9f; /* Check whether there was some pending */ \
+lock orl $1,hyp_shared_info+CPU_PENDING_SEL; /* Yes, activate it */ \
+ movb $1,hyp_shared_info+CPU_PENDING; \
+9:
+
+ENTRY(spl0)
+ mb;
+ CPU_NUMBER(%edx)
+ movl CX(EXT(curr_ipl),%rdx),%eax /* save current ipl */
+ pushq %rax
+ cli /* disable interrupts */
+#ifdef LINUX_DEV
+ movl EXT(bh_active),%eax
+ /* get pending mask */
+ andl EXT(bh_mask),%eax /* any pending unmasked interrupts? */
+ jz 1f /* no, skip */
+ call EXT(spl1) /* block further interrupts */
+ incl EXT(intr_count) /* set interrupt flag */
+ call EXT(linux_soft_intr) /* go handle interrupt */
+ decl EXT(intr_count) /* decrement interrupt flag */
+ cli /* disable interrupts */
+1:
+#endif
+ cmpl $0,softclkpending /* softclock pending? */
+ je 1f /* no, skip */
+ movl $0,softclkpending /* clear flag */
+ call EXT(spl1) /* block further interrupts */
+#ifdef LINUX_DEV
+ incl EXT(intr_count) /* set interrupt flag */
+#endif
+ call EXT(softclock) /* go handle interrupt */
+#ifdef LINUX_DEV
+ decl EXT(intr_count) /* decrement interrupt flag */
+#endif
+ cli /* disable interrupts */
+1:
+ CPU_NUMBER(%edx)
+ cmpl $(SPL0),CX(EXT(curr_ipl),%rdx) /* are we at spl0? */
+ je 1f /* yes, all done */
+ movl $(SPL0),CX(EXT(curr_ipl),%rdx) /* set ipl */
+#ifdef MACH_XEN
+ movl EXT(int_mask)+SPL0*4,%eax
+ /* get xen mask */
+ XEN_SETMASK() /* program xen evts */
+#endif
+1:
+ sti /* enable interrupts */
+ popq %rax /* return previous mask */
+ ret
+
+
+/*
+ * Historically, SETIPL(level) was called
+ * for spl levels 1-6, now we have combined
+ * all the intermediate levels into the highest level
+ * such that interrupts are either on or off,
+ * since modern hardware can handle it.
+ * This simplifies the interrupt handling
+ * especially for the linux drivers.
+ */
+Entry(splsoftclock)
+ENTRY(spl1)
+ENTRY(spl2)
+ENTRY(spl3)
+Entry(splnet)
+Entry(splhdw)
+ENTRY(spl4)
+Entry(splbio)
+Entry(spldcm)
+ENTRY(spl5)
+Entry(spltty)
+Entry(splimp)
+Entry(splvm)
+ENTRY(spl6)
+Entry(splclock)
+Entry(splsched)
+Entry(splhigh)
+Entry(splhi)
+ENTRY(spl7)
+ mb;
+ /* just clear IF */
+ cli
+ CPU_NUMBER(%edx)
+ movl $SPL7,%eax
+ xchgl CX(EXT(curr_ipl),%rdx),%eax
+ ret
+
+ENTRY(splx)
+ movq S_ARG0,%rdx /* get ipl */
+ CPU_NUMBER(%eax)
+#if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
+ /* First make sure that if we're exitting from ipl7, IF is still cleared */
+ cmpl $SPL7,CX(EXT(curr_ipl),%rax) /* from ipl7? */
+ jne 0f
+ pushfq
+ popq %rax
+ testl $0x200,%eax /* IF? */
+ jz 0f
+ int3 /* Oops, interrupts got enabled?! */
+
+0:
+#endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
+ testl %edx,%edx /* spl0? */
+ jz EXT(spl0) /* yes, handle specially */
+ CPU_NUMBER(%eax)
+ cmpl CX(EXT(curr_ipl),%rax),%edx /* same ipl as current? */
+ jne spl /* no */
+ cmpl $SPL7,%edx /* spl7? */
+ je 1f /* to ipl7, don't enable interrupts */
+ sti /* ensure interrupts are enabled */
+1:
+ movl %edx,%eax /* return previous ipl */
+ ret
+
+/*
+ * Like splx() but returns with interrupts disabled and does
+ * not return the previous ipl. This should only be called
+ * when returning from an interrupt.
+ */
+ .align TEXT_ALIGN
+ .globl splx_cli
+splx_cli:
+ movq S_ARG0,%rdx /* get ipl */
+ cli /* disable interrupts */
+ testl %edx,%edx /* spl0? */
+ jnz 2f /* no, skip */
+#ifdef LINUX_DEV
+ movl EXT(bh_active),%eax
+ /* get pending mask */
+ andl EXT(bh_mask),%eax /* any pending unmasked interrupts? */
+ jz 1f /* no, skip */
+ call EXT(spl1) /* block further interrupts */
+ incl EXT(intr_count) /* set interrupt flag */
+ call EXT(linux_soft_intr) /* go handle interrupt */
+ decl EXT(intr_count) /* decrement interrupt flag */
+ cli /* disable interrupts */
+1:
+#endif
+ cmpl $0,softclkpending /* softclock pending? */
+ je 1f /* no, skip */
+ movl $0,softclkpending /* clear flag */
+ call EXT(spl1) /* block further interrupts */
+#ifdef LINUX_DEV
+ incl EXT(intr_count) /* set interrupt flag */
+#endif
+ call EXT(softclock) /* go handle interrupt */
+#ifdef LINUX_DEV
+ decl EXT(intr_count) /* decrement interrupt flag */
+#endif
+ cli /* disable interrupts */
+1:
+ xorl %edx,%edx /* edx = ipl 0 */
+2:
+ CPU_NUMBER(%eax)
+ cmpl CX(EXT(curr_ipl),%rax),%edx /* same ipl as current? */
+ je 1f /* yes, all done */
+ movl %edx,CX(EXT(curr_ipl),%rax) /* set ipl */
+#ifdef MACH_XEN
+ movl EXT(int_mask),%eax
+ movl (%eax,%edx,4),%eax
+ /* get int mask */
+ XEN_SETMASK() /* program xen evts with new mask */
+#endif
+1:
+ ret
+
+/*
+ * NOTE: This routine must *not* use %ecx, otherwise
+ * the interrupt code will break.
+ */
+ .align TEXT_ALIGN
+ .globl spl
+spl:
+ CPU_NUMBER(%eax)
+#if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
+ /* First make sure that if we're exitting from ipl7, IF is still cleared */
+ cmpl $SPL7,CX(EXT(curr_ipl),%rax) /* from ipl7? */
+ jne 0f
+ pushfq
+ popq %rax
+ testl $0x200,%eax /* IF? */
+ jz 0f
+ int3 /* Oops, interrupts got enabled?! */
+
+0:
+#endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
+ cmpl $SPL7,%edx /* spl7? */
+ je EXT(spl7) /* yes, handle specially */
+#ifdef MACH_XEN
+ movl EXT(int_mask),%eax
+ movl (%eax,%edx,4),%eax
+ /* get int mask */
+#endif
+ cli /* disable interrupts */
+ CPU_NUMBER(%eax)
+ xchgl CX(EXT(curr_ipl),%rax),%edx /* set ipl */
+#ifdef MACH_XEN
+ XEN_SETMASK() /* program PICs with new mask */
+#endif
+ sti /* enable interrupts */
+ movl %edx,%eax /* return previous ipl */
+ ret
+
+ENTRY(sploff)
+ pushfq
+ popq %rax
+ cli
+ ret
+
+ENTRY(splon)
+ pushq S_ARG0
+ popfq
+ ret
+
+ .data
+ .align DATA_ALIGN
+softclkpending:
+ .long 0
+ .text
+
+ENTRY(setsoftclock)
+ incl softclkpending
+ ret
diff --git a/x86_64/x86_64 b/x86_64/x86_64
new file mode 120000
index 0000000..ee8aacf
--- /dev/null
+++ b/x86_64/x86_64
@@ -0,0 +1 @@
+../i386/i386 \ No newline at end of file
diff --git a/x86_64/xen_boothdr.S b/x86_64/xen_boothdr.S
new file mode 100644
index 0000000..da40a5c
--- /dev/null
+++ b/x86_64/xen_boothdr.S
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2006-2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <xen/public/elfnote.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=GNU Mach"
+ .ascii ",GUEST_VERSION=1.3"
+ .ascii ",XEN_VER=xen-3.0"
+ .ascii ",VIRT_BASE=0x40000000"
+ .ascii ",ELF_PADDR_OFFSET=0x40000000"
+ .ascii ",HYPERCALL_PAGE=0x2"
+ .ascii ",LOADER=generic"
+#ifndef MACH_PSEUDO_PHYS
+ .ascii ",FEATURES=!auto_translated_physmap"
+#endif
+#ifndef MACH_PV_PAGETABLES
+ .ascii "|!writable_page_tables"
+#endif /* MACH_PV_PAGETABLES */
+#ifndef MACH_PV_DESCRIPTORS
+ .ascii "|!writable_descriptor_tables"
+#endif /* MACH_PV_DESCRIPTORS */
+ .byte 0
+
+/* Macro taken from linux/include/linux/elfnote.h */
+#define ELFNOTE(name, type, desctype, descdata) \
+.pushsection .note.name ; \
+ .align 4 ; \
+ .long 2f - 1f /* namesz */ ; \
+ .long 4f - 3f /* descsz */ ; \
+ .long type ; \
+1:.asciz "name" ; \
+2:.align 4 ; \
+3:desctype descdata ; \
+4:.align 4 ; \
+.popsection ;
+
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "GNU Mach")
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "1.3")
+ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
+ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .quad, _START)
+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, _START)
+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad, start)
+ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad, hypcalls)
+ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, ""
+#ifndef MACH_PSEUDO_PHYS
+ "!auto_translated_physmap"
+#endif
+#ifndef MACH_PV_PAGETABLES
+ "|!writable_page_tables"
+#endif /* MACH_PV_PAGETABLES */
+#ifndef MACH_PV_DESCRIPTORS
+ "|!writable_descriptor_tables"
+#endif /* MACH_PV_DESCRIPTORS */
+ )
+
+#include <mach/machine/asm.h>
+
+#include <i386/i386/i386asm.h>
+
+ .text
+ .globl gdt, ldt
+ .globl start, _start, gdt
+start:
+_start:
+
+ /* Switch to our own interrupt stack. */
+ movq $(_intstack+INTSTACK_SIZE),%rax
+ andq $(~15),%rax
+ movq %rax,%rsp
+
+ /* Reset EFLAGS to a known state. */
+ pushq $0
+ popf
+
+ /* Push the start_info pointer to be the argument. */
+ movabs $KERNELBASE,%rax
+ subq %rax,%rsi
+ movq %rsi,%r8
+
+ /* Fix ifunc entries */
+ movq $__rela_iplt_start,%rsi
+ movq $__rela_iplt_end,%rdi
+iplt_cont:
+ cmpq %rdi,%rsi
+ jae iplt_done
+ movq (%rsi),%rbx /* r_offset */
+ movb 4(%rsi),%al /* info */
+ cmpb $42,%al /* IRELATIVE */
+ jnz iplt_next
+ call *(%ebx) /* call ifunc */
+ movq %rax,(%rbx) /* fixed address */
+iplt_next:
+ addq $8,%rsi
+ jmp iplt_cont
+iplt_done:
+
+ movq %r8,%rdi
+ /* Jump into C code. */
+ call EXT(c_boot_entry)
+
+/* Those need to be aligned on page boundaries. */
+.global hyp_shared_info, hypcalls
+
+ .org (start + 0x1000)
+hyp_shared_info:
+ .org hyp_shared_info + 0x1000
+
+/* Labels just for debuggers */
+#define hypcall(name, n) \
+ .org hypcalls + n*32 ; \
+.globl __hyp_##name ; \
+__hyp_##name:
+
+hypcalls:
+ hypcall(set_trap_table, 0)
+ hypcall(mmu_update, 1)
+ hypcall(set_gdt, 2)
+ hypcall(stack_switch, 3)
+ hypcall(set_callbacks, 4)
+ hypcall(fpu_taskswitch, 5)
+ hypcall(sched_op_compat, 6)
+ hypcall(platform_op, 7)
+ hypcall(set_debugreg, 8)
+ hypcall(get_debugreg, 9)
+ hypcall(update_descriptor, 10)
+ hypcall(memory_op, 12)
+ hypcall(multicall, 13)
+ hypcall(update_va_mapping, 14)
+ hypcall(set_timer_op, 15)
+ hypcall(event_channel_op_compat, 16)
+ hypcall(xen_version, 17)
+ hypcall(console_io, 18)
+ hypcall(physdev_op_compat, 19)
+ hypcall(grant_table_op, 20)
+ hypcall(vm_assist, 21)
+ hypcall(update_va_mapping_otherdomain, 22)
+ hypcall(iret, 23)
+ hypcall(vcpu_op, 24)
+ hypcall(set_segment_base, 25)
+ hypcall(mmuext_op, 26)
+ hypcall(acm_op, 27)
+ hypcall(nmi_op, 28)
+ hypcall(sched_op, 29)
+ hypcall(callback_op, 30)
+ hypcall(xenoprof_op, 31)
+ hypcall(event_channel_op, 32)
+ hypcall(physdev_op, 33)
+ hypcall(hvm_op, 34)
+ hypcall(sysctl, 35)
+ hypcall(domctl, 36)
+ hypcall(kexec_op, 37)
+
+ hypcall(arch_0, 48)
+ hypcall(arch_1, 49)
+ hypcall(arch_2, 50)
+ hypcall(arch_3, 51)
+ hypcall(arch_4, 52)
+ hypcall(arch_5, 53)
+ hypcall(arch_6, 54)
+ hypcall(arch_7, 55)
+
+ .org hypcalls + 0x1000
+
+gdt:
+ .org gdt + 0x1000
+
+ldt:
+ .org ldt + 0x1000
+
+stack:
+ .comm _intstack,INTSTACK_SIZE
+ .comm _eintstack,0
+
diff --git a/x86_64/xen_locore.S b/x86_64/xen_locore.S
new file mode 100644
index 0000000..967c890
--- /dev/null
+++ b/x86_64/xen_locore.S
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <mach/machine/asm.h>
+
+#include <i386/i386/i386asm.h>
+#include <i386/i386/cpu_number.h>
+#include <i386/i386/xen.h>
+
+ .data 2
+int_active:
+ .long 0
+
+
+ .text
+ .globl hyp_callback, hyp_failsafe_callback
+ P2ALIGN(TEXT_ALIGN)
+hyp_callback:
+ popq %rcx
+ popq %r11
+ pushq %rax
+ jmp EXT(all_intrs)
+
+ENTRY(interrupt)
+ incl int_active /* currently handling interrupts */
+ call EXT(hyp_c_callback) /* call generic interrupt routine */
+ decl int_active /* stopped handling interrupts */
+ sti
+ ret
+
+/* FIXME: if we're _very_ unlucky, we may be re-interrupted, filling stack
+ *
+ * Far from trivial, see mini-os. That said, maybe we could just, before poping
+ * everything (which is _not_ destructive), save sp into a known place and use
+ * it+jmp back?
+ *
+ * Mmm, there seems to be an iret hypcall that does exactly what we want:
+ * perform iret, and if IF is set, clear the interrupt mask.
+ */
+
+/* Pfff, we have to check pending interrupts ourselves. Some other DomUs just make an hypercall for retriggering the irq. Not sure it's really easier/faster */
+ENTRY(hyp_sti)
+ pushq %rbp
+ movq %rsp, %rbp
+_hyp_sti:
+ movb $0,hyp_shared_info+CPU_CLI /* Enable interrupts */
+ cmpl $0,int_active /* Check whether we were already checking pending interrupts */
+ jz 0f
+ popq %rbp
+ ret /* Already active, just return */
+0:
+ /* Not active, check pending interrupts by hand */
+ /* no memory barrier needed on x86 */
+ cmpb $0,hyp_shared_info+CPU_PENDING
+ jne 0f
+ popq %rbp
+ ret
+0:
+ movb $0xff,hyp_shared_info+CPU_CLI
+1:
+ pushq %rax
+ pushq %rcx
+ pushq %rdx
+ pushq %rdi
+ pushq %rsi
+ pushq %r8
+ pushq %r9
+ pushq %r10
+ pushq %r11
+ incl int_active /* currently handling interrupts */
+
+ xorq %rdi,%rdi
+ xorq %rsi,%rsi
+ call EXT(hyp_c_callback)
+
+ popq %r11
+ popq %r10
+ popq %r9
+ popq %r8
+ popq %rsi
+ popq %rdi
+ popq %rdx
+ popq %rcx
+ popq %rax
+ decl int_active /* stopped handling interrupts */
+ cmpb $0,hyp_shared_info+CPU_PENDING
+ jne 1b
+ jmp _hyp_sti
+
+/* Hypervisor failed to reload segments. Dump them. */
+hyp_failsafe_callback:
+ud2
+#if 1
+/* TODO: FIXME */
+ /* load sane segments */
+ mov %ss, %ax
+#if 0
+ mov %ax, %ds
+ mov %ax, %es
+#endif
+ mov %ax, %fs
+ mov %ax, %gs
+ movq %rsp, %rdi
+ call EXT(hyp_failsafe_c_callback)
+#else
+ popq %rdx
+ movq %rdx,%ds
+ popq %rdx
+ movq %rdx,%es
+ popq %fs
+ popq %gs
+
+movq (%rsp),%rax
+ud2
+ iretq
+#endif
+
+#undef iretq
+ENTRY(hyp_iretq)
+ testb $2,1*8(%rsp)
+ jnz slow
+ /* There is no ring1 on x86_64, we have to force ring 3 */
+ orb $3,1*8(%rsp)
+ orb $3,4*8(%rsp)
+ iretq
+
+slow:
+/* There is no ring 1/2 on x86_64, so going back to user needs to go through
+ * hypervisor */
+ pushq $0
+ jmp __hyp_iret
diff --git a/xen/Makefrag.am b/xen/Makefrag.am
new file mode 100644
index 0000000..61eb475
--- /dev/null
+++ b/xen/Makefrag.am
@@ -0,0 +1,83 @@
+# Makefile fragment for the Xen platform.
+
+# Copyright (C) 2007 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# Xen support.
+#
+
+libkernel_a_SOURCES += \
+ xen/public/arch-x86_32.h \
+ xen/public/arch-x86_64.h \
+ xen/public/arch-x86/xen.h \
+ xen/public/arch-x86/xen-mca.h \
+ xen/public/arch-x86/xen-x86_32.h \
+ xen/public/arch-x86/xen-x86_64.h \
+ xen/public/callback.h \
+ xen/public/COPYING \
+ xen/public/dom0_ops.h \
+ xen/public/domctl.h \
+ xen/public/elfnote.h \
+ xen/public/elfstructs.h \
+ xen/public/event_channel.h \
+ xen/public/features.h \
+ xen/public/grant_table.h \
+ xen/public/io/blkif.h \
+ xen/public/io/console.h \
+ xen/public/io/fbif.h \
+ xen/public/io/fsif.h \
+ xen/public/io/kbdif.h \
+ xen/public/io/netif.h \
+ xen/public/io/pciif.h \
+ xen/public/io/protocols.h \
+ xen/public/io/ring.h \
+ xen/public/io/tpmif.h \
+ xen/public/io/xenbus.h \
+ xen/public/io/xs_wire.h \
+ xen/public/kexec.h \
+ xen/public/libelf.h \
+ xen/public/memory.h \
+ xen/public/nmi.h \
+ xen/public/physdev.h \
+ xen/public/platform.h \
+ xen/public/sched.h \
+ xen/public/sysctl.h \
+ xen/public/trace.h \
+ xen/public/vcpu.h \
+ xen/public/version.h \
+ xen/public/xencomm.h \
+ xen/public/xen-compat.h \
+ xen/public/xen.h \
+ xen/public/xenoprof.h \
+ xen/block.c \
+ xen/block.h \
+ xen/console.c \
+ xen/console.h \
+ xen/evt.c \
+ xen/evt.h \
+ xen/grant.c \
+ xen/grant.h \
+ xen/net.c \
+ xen/net.h \
+ xen/ring.c \
+ xen/ring.h \
+ xen/store.c \
+ xen/store.h \
+ xen/time.c \
+ xen/time.h \
+ xen/xen.c \
+ xen/xen.h
diff --git a/xen/block.c b/xen/block.c
new file mode 100644
index 0000000..7e9db26
--- /dev/null
+++ b/xen/block.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (C) 2006-2009, 2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <mach/mig_errors.h>
+#include <kern/kalloc.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/disk_status.h>
+#include <device/device_reply.user.h>
+#include <device/device_emul.h>
+#include <device/ds_routines.h>
+#include <xen/public/io/blkif.h>
+#include <xen/evt.h>
+#include <string.h>
+#include <util/atoi.h>
+#include "store.h"
+#include "block.h"
+#include "grant.h"
+#include "ring.h"
+#include "xen.h"
+
+/* Hypervisor part */
+
+struct block_data {
+ struct device device;
+ char *name;
+ int open_count;
+ char *backend;
+ domid_t domid;
+ char *vbd;
+ int handle;
+ unsigned info;
+ dev_mode_t mode;
+ unsigned sector_size;
+ unsigned long nr_sectors;
+ ipc_port_t port;
+ blkif_front_ring_t ring;
+ evtchn_port_t evt;
+ simple_lock_data_t lock;
+ simple_lock_data_t pushlock;
+};
+
+static int n_vbds;
+static struct block_data *vbd_data;
+
+struct device_emulation_ops hyp_block_emulation_ops;
+
+static void hyp_block_intr(int unit) {
+ struct block_data *bd = &vbd_data[unit];
+ blkif_response_t *rsp;
+ int more;
+ io_return_t *err;
+
+ simple_lock(&bd->lock);
+ more = RING_HAS_UNCONSUMED_RESPONSES(&bd->ring);
+ while (more) {
+ rmb(); /* make sure we see responses */
+ rsp = RING_GET_RESPONSE(&bd->ring, bd->ring.rsp_cons++);
+ err = (void *) (unsigned long) rsp->id;
+ switch (rsp->status) {
+ case BLKIF_RSP_ERROR:
+ *err = D_IO_ERROR;
+ break;
+ case BLKIF_RSP_OKAY:
+ break;
+ default:
+ printf("Unrecognized blkif status %d\n", rsp->status);
+ goto drop;
+ }
+ thread_wakeup(err);
+drop:
+ thread_wakeup_one(bd);
+ RING_FINAL_CHECK_FOR_RESPONSES(&bd->ring, more);
+ }
+ simple_unlock(&bd->lock);
+}
+
+#define VBD_PATH "device/vbd"
+void hyp_block_init(void) {
+ char **vbds, **vbd;
+ char *c;
+ int i, disk, partition;
+ int n;
+ int grant;
+ char port_name[10];
+ char *prefix;
+ char device_name[32];
+ domid_t domid;
+ evtchn_port_t evt;
+ hyp_store_transaction_t t;
+ phys_addr_t addr;
+ struct block_data *bd;
+ blkif_sring_t *ring;
+
+ vbds = hyp_store_ls(0, 1, VBD_PATH);
+ if (!vbds) {
+ printf("hd: No block device (%s). Hoping you don't need any\n", hyp_store_error);
+ n_vbds = 0;
+ return;
+ }
+
+ n = 0;
+ for (vbd = vbds; *vbd; vbd++)
+ n++;
+
+ vbd_data = (void*) kalloc(n * sizeof(*vbd_data));
+ if (!vbd_data) {
+ printf("hd: No memory room for VBD\n");
+ n_vbds = 0;
+ return;
+ }
+ n_vbds = n;
+
+ for (n = 0; n < n_vbds; n++) {
+ bd = &vbd_data[n];
+ mach_atoi((u_char *) vbds[n], &bd->handle);
+ if (bd->handle == MACH_ATOI_DEFAULT)
+ continue;
+
+ bd->open_count = -2;
+ bd->vbd = vbds[n];
+
+ /* Get virtual number. */
+ i = hyp_store_read_int(0, 5, VBD_PATH, "/", vbds[n], "/", "virtual-device");
+ if (i == -1)
+ panic("hd: couldn't virtual device of VBD %s\n",vbds[n]);
+ if ((i >> 28) == 1) {
+ /* xvd, new format */
+ prefix = "xvd";
+ disk = (i >> 8) & ((1 << 20) - 1);
+ partition = i & ((1 << 8) - 1);
+ } else if ((i >> 8) == 202) {
+ /* xvd, old format */
+ prefix = "xvd";
+ disk = (i >> 4) & ((1 << 4) - 1);
+ partition = i & ((1 << 4) - 1);
+ } else if ((i >> 8) == 8) {
+ /* SCSI */
+ prefix = "sd";
+ disk = (i >> 4) & ((1 << 4) - 1);
+ partition = i & ((1 << 4) - 1);
+ } else if ((i >> 8) == 3) {
+ /* IDE primary */
+ prefix = "hd";
+ disk = (i >> 6) & ((1 << 2) - 1);
+ partition = i & ((1 << 6) - 1);
+ } else if ((i >> 8) == 22) {
+ /* IDE secondary */
+ prefix = "hd";
+ disk = ((i >> 6) & ((1 << 2) - 1)) + 2;
+ partition = i & ((1 << 6) - 1);
+ } else if ((i >> 8) == 33) {
+ /* IDE 3 */
+ prefix = "hd";
+ disk = ((i >> 6) & ((1 << 2) - 1)) + 4;
+ partition = i & ((1 << 6) - 1);
+ } else if ((i >> 8) == 34) {
+ /* IDE 4 */
+ prefix = "hd";
+ disk = ((i >> 6) & ((1 << 2) - 1)) + 6;
+ partition = i & ((1 << 6) - 1);
+ } else if ((i >> 8) == 56) {
+ /* IDE 5 */
+ prefix = "hd";
+ disk = ((i >> 6) & ((1 << 2) - 1)) + 8;
+ partition = i & ((1 << 6) - 1);
+ } else if ((i >> 8) == 57) {
+ /* IDE 6 */
+ prefix = "hd";
+ disk = ((i >> 6) & ((1 << 2) - 1)) + 10;
+ partition = i & ((1 << 6) - 1);
+ } else if ((i >> 8) == 88) {
+ /* IDE 7 */
+ prefix = "hd";
+ disk = ((i >> 6) & ((1 << 2) - 1)) + 12;
+ partition = i & ((1 << 6) - 1);
+ } else if ((i >> 8) == 89) {
+ /* IDE 8 */
+ prefix = "hd";
+ disk = ((i >> 6) & ((1 << 2) - 1)) + 14;
+ partition = i & ((1 << 6) - 1);
+ } else if ((i >> 8) == 90) {
+ /* IDE 9 */
+ prefix = "hd";
+ disk = ((i >> 6) & ((1 << 2) - 1)) + 16;
+ partition = i & ((1 << 6) - 1);
+ } else if ((i >> 8) == 91) {
+ /* IDE 10 */
+ prefix = "hd";
+ disk = ((i >> 6) & ((1 << 2) - 1)) + 18;
+ partition = i & ((1 << 6) - 1);
+ } else {
+ printf("unsupported VBD number %d\n", i);
+ continue;
+ }
+ if (partition)
+ sprintf(device_name, "%s%ds%d", prefix, disk, partition);
+ else
+ sprintf(device_name, "%s%d", prefix, disk);
+ bd->name = (char*) kalloc(strlen(device_name) + 1);
+ strcpy(bd->name, device_name);
+
+ /* Get domain id of backend driver. */
+ i = hyp_store_read_int(0, 5, VBD_PATH, "/", vbds[n], "/", "backend-id");
+ if (i == -1)
+ panic("%s: couldn't read backend domid (%s)", device_name, hyp_store_error);
+ bd->domid = domid = i;
+
+ do {
+ t = hyp_store_transaction_start();
+
+ /* Get a page for ring */
+ if ((addr = vm_page_grab_phys_addr()) == -1)
+ panic("%s: couldn't allocate space for store ring\n", device_name);
+ ring = (void*) phystokv(addr);
+ SHARED_RING_INIT(ring);
+ FRONT_RING_INIT(&bd->ring, ring, PAGE_SIZE);
+ grant = hyp_grant_give(domid, atop(addr), 0);
+
+ /* and give it to backend. */
+ i = sprintf(port_name, "%d", grant);
+ c = hyp_store_write(t, port_name, 5, VBD_PATH, "/", vbds[n], "/", "ring-ref");
+ if (!c)
+ panic("%s: couldn't store ring reference (%s)", device_name, hyp_store_error);
+ kfree((vm_offset_t) c, strlen(c)+1);
+
+ /* Allocate an event channel and give it to backend. */
+ bd->evt = evt = hyp_event_channel_alloc(domid);
+ hyp_evt_handler(evt, hyp_block_intr, n, SPL7);
+ i = sprintf(port_name, "%u", evt);
+ c = hyp_store_write(t, port_name, 5, VBD_PATH, "/", vbds[n], "/", "event-channel");
+ if (!c)
+ panic("%s: couldn't store event channel (%s)", device_name, hyp_store_error);
+ kfree((vm_offset_t) c, strlen(c)+1);
+ c = hyp_store_write(t, hyp_store_state_initialized, 5, VBD_PATH, "/", vbds[n], "/", "state");
+ if (!c)
+ panic("%s: couldn't store state (%s)", device_name, hyp_store_error);
+ kfree((vm_offset_t) c, strlen(c)+1);
+ } while (!hyp_store_transaction_stop(t));
+ /* TODO randomly wait? */
+
+ c = hyp_store_read(0, 5, VBD_PATH, "/", vbds[n], "/", "backend");
+ if (!c)
+ panic("%s: couldn't get path to backend (%s)", device_name, hyp_store_error);
+ bd->backend = c;
+
+ while(1) {
+ i = hyp_store_read_int(0, 3, bd->backend, "/", "state");
+ if (i == MACH_ATOI_DEFAULT)
+ panic("can't read state from %s", bd->backend);
+ if (i == XenbusStateConnected)
+ break;
+ hyp_yield();
+ }
+
+ i = hyp_store_read_int(0, 3, bd->backend, "/", "sectors");
+ if (i == -1)
+ panic("%s: couldn't get number of sectors (%s)", device_name, hyp_store_error);
+ bd->nr_sectors = i;
+
+ i = hyp_store_read_int(0, 3, bd->backend, "/", "sector-size");
+ if (i == -1)
+ panic("%s: couldn't get sector size (%s)", device_name, hyp_store_error);
+ if (i & ~(2*(i-1)+1))
+ panic("sector size %d is not a power of 2\n", i);
+ if (i > PAGE_SIZE || PAGE_SIZE % i != 0)
+ panic("%s: couldn't handle sector size %d with pages of size %d\n", device_name, i, PAGE_SIZE);
+ bd->sector_size = i;
+
+ i = hyp_store_read_int(0, 3, bd->backend, "/", "info");
+ if (i == -1)
+ panic("%s: couldn't get info (%s)", device_name, hyp_store_error);
+ bd->info = i;
+
+ c = hyp_store_read(0, 3, bd->backend, "/", "mode");
+ if (!c)
+ panic("%s: couldn't get backend's mode (%s)", device_name, hyp_store_error);
+ if ((c[0] == 'w') && !(bd->info & VDISK_READONLY))
+ bd->mode = D_READ|D_WRITE;
+ else
+ bd->mode = D_READ;
+
+ c = hyp_store_read(0, 3, bd->backend, "/", "params");
+ if (!c)
+ panic("%s: couldn't get backend's real device (%s)", device_name, hyp_store_error);
+
+ /* TODO: change suffix */
+ printf("%s: dom%d's VBD %s (%s,%c%s) %ldMB\n", device_name, domid,
+ vbds[n], c, bd->mode & D_WRITE ? 'w' : 'r',
+ bd->info & VDISK_CDROM ? ", cdrom" : "",
+ bd->nr_sectors / ((1<<20) / 512));
+ kfree((vm_offset_t) c, strlen(c)+1);
+
+ c = hyp_store_write(0, hyp_store_state_connected, 5, VBD_PATH, "/", bd->vbd, "/", "state");
+ if (!c)
+ panic("couldn't store state for %s (%s)", device_name, hyp_store_error);
+ kfree((vm_offset_t) c, strlen(c)+1);
+
+ bd->open_count = -1;
+ bd->device.emul_ops = &hyp_block_emulation_ops;
+ bd->device.emul_data = bd;
+ simple_lock_init(&bd->lock);
+ simple_lock_init(&bd->pushlock);
+ }
+}
+
+static ipc_port_t
+dev_to_port(void *d)
+{
+ struct block_data *b = d;
+ if (!d)
+ return IP_NULL;
+ return ipc_port_make_send(b->port);
+}
+
+static int
+device_close(void *devp)
+{
+ struct block_data *bd = devp;
+ if (--bd->open_count < 0)
+ panic("too many closes on %s", bd->name);
+ printf("close, %s count %d\n", bd->name, bd->open_count);
+ if (bd->open_count)
+ return 0;
+ ipc_kobject_set(bd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel(bd->port);
+ return 0;
+}
+
+static io_return_t
+device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, const char *name, device_t *devp /* out */)
+{
+ int i;
+ ipc_port_t port, notify;
+ struct block_data *bd;
+
+ for (i = 0; i < n_vbds; i++)
+ if (!strcmp(name, vbd_data[i].name))
+ break;
+
+ if (i == n_vbds)
+ return D_NO_SUCH_DEVICE;
+
+ bd = &vbd_data[i];
+ if (bd->open_count == -2)
+ /* couldn't be initialized */
+ return D_NO_SUCH_DEVICE;
+
+ if ((mode & D_WRITE) && !(bd->mode & D_WRITE))
+ return D_READ_ONLY;
+
+ if (bd->open_count >= 0) {
+ *devp = &bd->device ;
+ bd->open_count++ ;
+ printf("re-open, %s count %d\n", bd->name, bd->open_count);
+ return D_SUCCESS;
+ }
+
+ bd->open_count = 1;
+ printf("%s count %d\n", bd->name, bd->open_count);
+
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL) {
+ device_close(bd);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ bd->port = port;
+
+ *devp = &bd->device;
+
+ ipc_kobject_set (port, (ipc_kobject_t) &bd->device, IKOT_DEVICE);
+
+ notify = ipc_port_make_sonce (bd->port);
+ ip_lock (bd->port);
+ ipc_port_nsrequest (bd->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+
+ if (IP_VALID (reply_port))
+ ds_device_open_reply (reply_port, reply_port_type, D_SUCCESS, port);
+ else
+ device_close(bd);
+ return MIG_NO_REPLY;
+}
+
+static io_return_t
+device_read (void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, int count, io_buf_ptr_t *data,
+ unsigned *bytes_read)
+{
+ int resid, amt;
+ io_return_t err = 0;
+ vm_page_t pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ int nbpages;
+ vm_map_copy_t copy;
+ vm_offset_t offset, alloc_offset, o;
+ vm_object_t object;
+ vm_page_t m;
+ vm_size_t len, size;
+ struct block_data *bd = d;
+ struct blkif_request *req;
+
+ *data = 0;
+ *bytes_read = 0;
+
+ if (count < 0)
+ return D_INVALID_SIZE;
+ if (count == 0)
+ return 0;
+
+ /* Allocate an object to hold the data. */
+ size = round_page (count);
+ object = vm_object_allocate (size);
+ if (! object)
+ {
+ err = D_NO_MEMORY;
+ goto out;
+ }
+ alloc_offset = offset = 0;
+ resid = count;
+
+ while (resid && !err)
+ {
+ unsigned reqn;
+ int i;
+ int last_sect;
+
+ nbpages = 0;
+
+ /* Determine size of I/O this time around. */
+ len = round_page(offset + resid) - offset;
+ if (len > PAGE_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST)
+ len = PAGE_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
+ /* Allocate pages. */
+ while (alloc_offset < offset + len)
+ {
+ while ((m = vm_page_grab (VM_PAGE_DIRECTMAP)) == 0)
+ VM_PAGE_WAIT (0);
+ assert (! m->active && ! m->inactive);
+ m->busy = TRUE;
+ assert(nbpages < BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ pages[nbpages++] = m;
+ alloc_offset += PAGE_SIZE;
+ }
+
+ /* Do the read. */
+ amt = len;
+ if (amt > resid)
+ amt = resid;
+
+ /* allocate a request */
+ spl_t spl = splsched();
+ while(1) {
+ simple_lock(&bd->lock);
+ if (!RING_FULL(&bd->ring))
+ break;
+ thread_sleep(bd, &bd->lock, FALSE);
+ }
+ mb();
+ reqn = bd->ring.req_prod_pvt++;;
+ simple_lock(&bd->pushlock);
+ simple_unlock(&bd->lock);
+ (void) splx(spl);
+
+ req = RING_GET_REQUEST(&bd->ring, reqn);
+ req->operation = BLKIF_OP_READ;
+ req->nr_segments = nbpages;
+ req->handle = bd->handle;
+ req->id = (uint64_t) (unsigned long) &err; /* pointer on the stack */
+ req->sector_number = bn + offset / 512;
+ for (i = 0; i < nbpages; i++) {
+ req->seg[i].gref = gref[i] = hyp_grant_give(bd->domid, atop(pages[i]->phys_addr), 0);
+ req->seg[i].first_sect = 0;
+ req->seg[i].last_sect = PAGE_SIZE/512 - 1;
+ }
+ last_sect = ((amt - 1) & PAGE_MASK) / 512;
+ req->seg[nbpages-1].last_sect = last_sect;
+
+ memset((void*) phystokv(pages[nbpages-1]->phys_addr
+ + (last_sect + 1) * 512),
+ 0, PAGE_SIZE - (last_sect + 1) * 512);
+
+ /* no need for a lock: as long as the request is not pushed, the event won't be triggered */
+ assert_wait((event_t) &err, FALSE);
+
+ int notify;
+ wmb(); /* make sure it sees requests */
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bd->ring, notify);
+ if (notify)
+ hyp_event_channel_send(bd->evt);
+ simple_unlock(&bd->pushlock);
+
+ thread_block(NULL);
+
+ if (err)
+ printf("error reading %d bytes at sector %ld\n", amt,
+ bn + offset / 512);
+
+ for (i = 0; i < nbpages; i++)
+ hyp_grant_takeback(gref[i]);
+
+ /* Compute number of pages to insert in object. */
+ o = offset;
+
+ resid -= amt;
+ if (resid == 0)
+ offset = o + len;
+ else
+ offset += amt;
+
+ /* Add pages to the object. */
+ vm_object_lock (object);
+ for (i = 0; i < nbpages; i++)
+ {
+ m = pages[i];
+ assert (m->busy);
+ vm_page_lock_queues ();
+ PAGE_WAKEUP_DONE (m);
+ m->dirty = TRUE;
+ vm_page_insert (m, object, o);
+ vm_page_unlock_queues ();
+ o += PAGE_SIZE;
+ }
+ vm_object_unlock (object);
+ }
+
+out:
+ if (! err)
+ err = vm_map_copyin_object (object, 0, round_page (count), &copy);
+ if (! err)
+ {
+ *data = (io_buf_ptr_t) copy;
+ *bytes_read = count - resid;
+ }
+ else
+ vm_object_deallocate (object);
+ return err;
+}
+
+static io_return_t
+device_write(void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, io_buf_ptr_t data, unsigned int count,
+ int *bytes_written)
+{
+ io_return_t err = 0;
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ vm_offset_t buffer = 0;
+ char *map_data;
+ vm_offset_t map_addr;
+ vm_size_t map_size;
+ unsigned copy_npages = atop(round_page(count));
+ phys_addr_t phys_addrs[copy_npages];
+ struct block_data *bd = d;
+ blkif_request_t *req;
+ grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ unsigned reqn, size;
+ unsigned i, nbpages, j;
+ kern_return_t kr;
+
+ if (!(bd->mode & D_WRITE))
+ return D_READ_ONLY;
+
+ if (count == 0) {
+ vm_map_copy_discard(copy);
+ return 0;
+ }
+
+ if (count % bd->sector_size)
+ return D_INVALID_SIZE;
+
+ if (count > copy->size)
+ return D_INVALID_SIZE;
+
+ /* XXX The underlying physical pages of the mapping could be highmem,
+ for which drivers require the use of a bounce buffer. */
+ kr = kmem_alloc(device_io_map, &buffer, count);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ kr = kmem_io_map_copyout(device_io_map, (vm_offset_t *)&map_data,
+ &map_addr, &map_size, copy, count);
+ if (kr != KERN_SUCCESS) {
+ kmem_free(device_io_map, buffer, count);
+ return kr;
+ }
+
+ memcpy((void *)buffer, map_data, count);
+ kmem_io_map_deallocate(device_io_map, map_addr, map_size);
+
+ for (i = 0; i < copy_npages; i++)
+ phys_addrs[i] = kvtophys(buffer + ptoa(i));
+
+ for (i=0; i<copy_npages; i+=nbpages) {
+
+ nbpages = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ if (nbpages > copy_npages-i)
+ nbpages = copy_npages-i;
+
+ /* allocate a request */
+ spl_t spl = splsched();
+ while(1) {
+ simple_lock(&bd->lock);
+ if (!RING_FULL(&bd->ring))
+ break;
+ thread_sleep(bd, &bd->lock, FALSE);
+ }
+ mb();
+ reqn = bd->ring.req_prod_pvt++;;
+ simple_lock(&bd->pushlock);
+ simple_unlock(&bd->lock);
+ (void) splx(spl);
+
+ req = RING_GET_REQUEST(&bd->ring, reqn);
+ req->operation = BLKIF_OP_WRITE;
+ req->nr_segments = nbpages;
+ req->handle = bd->handle;
+ req->id = (uint64_t) (unsigned long) &err; /* pointer on the stack */
+ req->sector_number = bn + i*PAGE_SIZE / 512;
+
+ for (j = 0; j < nbpages; j++) {
+ req->seg[j].gref = gref[j] = hyp_grant_give(bd->domid, atop(phys_addrs[i + j]), 1);
+ req->seg[j].first_sect = 0;
+ size = PAGE_SIZE;
+ if ((i + j + 1) * PAGE_SIZE > count)
+ size = count - (i + j) * PAGE_SIZE;
+ req->seg[j].last_sect = size/512 - 1;
+ }
+
+ /* no need for a lock: as long as the request is not pushed, the event won't be triggered */
+ assert_wait((event_t) &err, FALSE);
+
+ int notify;
+ wmb(); /* make sure it sees requests */
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bd->ring, notify);
+ if (notify)
+ hyp_event_channel_send(bd->evt);
+ simple_unlock(&bd->pushlock);
+
+ thread_block(NULL);
+
+ for (j = 0; j < nbpages; j++)
+ hyp_grant_takeback(gref[j]);
+
+ if (err) {
+ printf("error writing %u bytes at sector %ld\n", count, bn);
+ break;
+ }
+ }
+
+ if (buffer)
+ kmem_free(device_io_map, buffer, count);
+
+ vm_map_copy_discard (copy);
+
+ if (!err)
+ *bytes_written = count;
+
+ if (IP_VALID(reply_port))
+ ds_device_write_reply (reply_port, reply_port_type, err, count);
+
+ return MIG_NO_REPLY;
+}
+
+static io_return_t
+device_get_status(void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *status_count)
+{
+ struct block_data *bd = d;
+
+ switch (flavor)
+ {
+ case DEV_GET_SIZE:
+ status[DEV_GET_SIZE_DEVICE_SIZE] = (unsigned long long) bd->nr_sectors * 512;
+ status[DEV_GET_SIZE_RECORD_SIZE] = bd->sector_size;
+ *status_count = DEV_GET_SIZE_COUNT;
+ break;
+ case DEV_GET_RECORDS:
+ status[DEV_GET_RECORDS_DEVICE_RECORDS] = ((unsigned long long) bd->nr_sectors * 512) / bd->sector_size;
+ status[DEV_GET_RECORDS_RECORD_SIZE] = bd->sector_size;
+ *status_count = DEV_GET_RECORDS_COUNT;
+ break;
+ default:
+ printf("TODO: block_%s(%d)\n", __func__, flavor);
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+struct device_emulation_ops hyp_block_emulation_ops = {
+ NULL, /* dereference */
+ NULL, /* deallocate */
+ dev_to_port,
+ device_open,
+ device_close,
+ device_write,
+ NULL, /* write_inband */
+ device_read,
+ NULL, /* read_inband */
+ NULL, /* set_status */
+ device_get_status,
+ NULL, /* set_filter */
+ NULL, /* map */
+ NULL, /* no_senders */
+ NULL, /* write_trap */
+ NULL, /* writev_trap */
+};
diff --git a/xen/block.h b/xen/block.h
new file mode 100644
index 0000000..8248885
--- /dev/null
+++ b/xen/block.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_BLOCK_H
+#define XEN_BLOCK_H
+
+void hyp_block_init(void);
+
+#endif /* XEN_BLOCK_H */
diff --git a/xen/configfrag.ac b/xen/configfrag.ac
new file mode 100644
index 0000000..3745a31
--- /dev/null
+++ b/xen/configfrag.ac
@@ -0,0 +1,76 @@
+dnl Configure fragment for the Xen platform.
+
+dnl Copyright (C) 2007 Free Software Foundation, Inc.
+
+dnl This program is free software; you can redistribute it and/or modify it
+dnl under the terms of the GNU General Public License as published by the
+dnl Free Software Foundation; either version 2, or (at your option) any later
+dnl version.
+dnl
+dnl This program is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received a copy of the GNU General Public License along
+dnl with this program; if not, write to the Free Software Foundation, Inc.,
+dnl 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# Xen platform.
+#
+
+[if [ "$host_platform" = xen ]; then]
+ AC_DEFINE([MACH_XEN], [], [build a MachXen kernel])
+ AC_DEFINE([MACH_HYP], [], [be a hypervisor guest])
+ AM_CONDITIONAL([PLATFORM_xen], [true])
+
+dnl These are experimental
+
+ AC_ARG_ENABLE([pseudo-phys],
+ AS_HELP_STRING([--disable-pseudo-phys], [Pseudo physical pages support]))
+ [if [ x"$enable_pseudo_phys" = xno ]; then]
+ AM_CONDITIONAL([enable_pseudo_phys], [false])
+ [else]
+ AC_DEFINE([MACH_PSEUDO_PHYS], [], [Enable pseudo physical memory support])
+ AM_CONDITIONAL([enable_pseudo_phys], [true])
+ [fi]
+
+ AC_ARG_ENABLE([pv-pagetables],
+ AS_HELP_STRING([--disable-pv-pagetables], [Paravirtualized page tables support]))
+ [if [ x"$enable_pv_pagetables" = xno ]; then]
+ AM_CONDITIONAL([enable_pv_pagetables], [false])
+ [else]
+ AC_DEFINE([MACH_PV_PAGETABLES], [], [Enable paravirtualized page tables support])
+ AM_CONDITIONAL([enable_pv_pagetables], [true])
+ [fi]
+
+ AC_ARG_ENABLE([pv-descriptors],
+ AS_HELP_STRING([--disable-pv-descriptors], [Paravirtualized segment descriptors support]))
+ [if [ x"$enable_pv_descriptors" = xno ]; then]
+ AM_CONDITIONAL([enable_pv_descriptors], [false])
+ [else]
+ AC_DEFINE([MACH_PV_DESCRIPTORS], [], [Enable paravirtualized segment descriptors support])
+ AM_CONDITIONAL([enable_pv_descriptors], [true])
+ [fi]
+
+ AC_ARG_ENABLE([ring1],
+ AS_HELP_STRING([--disable-ring1], [ring1 kernel support]))
+ [if [ x"$enable_ring1" = xno ]; then]
+ AM_CONDITIONAL([enable_ring1], [false])
+ [else]
+ AC_DEFINE([MACH_RING1], [], [Enable ring1 kernel support])
+ AM_CONDITIONAL([enable_ring1], [true])
+ [fi]
+
+[else]
+ AM_CONDITIONAL([PLATFORM_xen], [false])
+ AM_CONDITIONAL([enable_pseudo_phys], [false])
+ AM_CONDITIONAL([enable_pv_pagetables], [false])
+ AM_CONDITIONAL([enable_pv_descriptors], [false])
+ AM_CONDITIONAL([enable_ring1], [false])
+[fi]
+
+dnl Local Variables:
+dnl mode: autoconf
+dnl End:
diff --git a/xen/console.c b/xen/console.c
new file mode 100644
index 0000000..9ceb6dd
--- /dev/null
+++ b/xen/console.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2006-2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <i386at/kd.h>
+#include <sys/types.h>
+#include <device/tty.h>
+#include <device/cons.h>
+#include <machine/pmap.h>
+#include <machine/machspl.h>
+#include <xen/public/io/console.h>
+#include "console.h"
+#include "ring.h"
+#include "evt.h"
+
+/* Hypervisor part */
+
+def_simple_lock_irq_data(static, outlock);
+def_simple_lock_irq_data(static, inlock);
+static struct xencons_interface *console;
+static int kd_pollc;
+int kb_mode; /* XXX: actually don't care. */
+
+static int hypputc(int c)
+{
+ if (!console) {
+ char d = c;
+ hyp_console_io(CONSOLEIO_write, 1, kvtolin(&d));
+ } else {
+ spl_t spl;
+ static int complain;
+ spl = simple_lock_irq(&outlock);
+ while (hyp_ring_smash(console->out, console->out_prod, console->out_cons)) {
+ if (!complain) {
+ complain = 1;
+ hyp_console_put("ring smash\n");
+ }
+ /* TODO: are we allowed to sleep in putc? */
+ hyp_yield();
+ }
+ hyp_ring_cell(console->out, console->out_prod) = c;
+ wmb();
+ console->out_prod++;
+ hyp_event_channel_send(boot_info.console_evtchn);
+ simple_unlock_irq(spl, &outlock);
+ }
+ return 0;
+}
+
+int hypcnputc(dev_t dev, int c)
+{
+ return hypputc(c);
+}
+
+/* get char by polling, used by debugger */
+int hypcngetc(dev_t dev, int wait)
+{
+ int ret;
+ if (wait)
+ while (console->in_prod == console->in_cons)
+ hyp_yield();
+ else
+ if (console->in_prod == console->in_cons)
+ return -1;
+ ret = hyp_ring_cell(console->in, console->in_cons);
+ mb();
+ console->in_cons++;
+ hyp_event_channel_send(boot_info.console_evtchn);
+ return ret;
+}
+
+void cnpollc(boolean_t on) {
+ if (on) {
+ kd_pollc++;
+ } else {
+ --kd_pollc;
+ }
+}
+
+void kd_setleds1(u_char val)
+{
+ /* Can't do this. */
+}
+
+/* Mach part */
+
+struct tty hypcn_tty;
+
+static void hypcnintr(int unit, spl_t spl, void *ret_addr, void *regs) {
+ struct tty *tp = &hypcn_tty;
+ if (kd_pollc)
+ return;
+ simple_lock_nocheck(&inlock.slock);
+ while (console->in_prod != console->in_cons) {
+ int c = hyp_ring_cell(console->in, console->in_cons);
+ mb();
+ console->in_cons++;
+#if MACH_KDB
+ if (c == (char)0xA3) {
+ printf("pound pressed\n");
+ kdb_kintr();
+ continue;
+ }
+#endif /* MACH_KDB */
+ if ((tp->t_state & (TS_ISOPEN|TS_WOPEN)))
+ (*linesw[tp->t_line].l_rint)(c, tp);
+ }
+ hyp_event_channel_send(boot_info.console_evtchn);
+ simple_unlock_nocheck(&inlock.slock);
+}
+
+int hypcnread(dev_t dev, io_req_t ior)
+{
+ struct tty *tp = &hypcn_tty;
+ tp->t_state |= TS_CARR_ON;
+ return char_read(tp, ior);
+}
+
+int hypcnwrite(dev_t dev, io_req_t ior)
+{
+ return char_write(&hypcn_tty, ior);
+}
+
+static void hypcnstart(struct tty *tp)
+{
+ spl_t o_pri;
+ int ch;
+ unsigned char c;
+
+ if (tp->t_state & TS_TTSTOP)
+ return;
+ while (1) {
+ tp->t_state &= ~TS_BUSY;
+ if (tp->t_state & TS_TTSTOP)
+ break;
+ if ((tp->t_outq.c_cc <= 0) || (ch = getc(&tp->t_outq)) == -1)
+ break;
+ c = ch;
+ o_pri = splsoftclock();
+ hypputc(c);
+ splx(o_pri);
+ }
+ if (tp->t_outq.c_cc <= TTLOWAT(tp)) {
+ tt_write_wakeup(tp);
+ }
+}
+
+static void hypcnstop(struct tty *t, int n)
+{
+}
+
+io_return_t hypcngetstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count)
+{
+ return tty_get_status(&hypcn_tty, flavor, data, count);
+}
+
+io_return_t hypcnsetstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t count)
+{
+ return tty_set_status(&hypcn_tty, flavor, data, count);
+}
+
+int hypcnportdeath(dev_t dev, mach_port_t port)
+{
+ return tty_portdeath(&hypcn_tty, (ipc_port_t) port);
+}
+
+int hypcnopen(dev_t dev, int flag, io_req_t ior)
+{
+ struct tty *tp = &hypcn_tty;
+ spl_t o_pri;
+
+ o_pri = simple_lock_irq(&tp->t_lock);
+ if (!(tp->t_state & (TS_ISOPEN|TS_WOPEN))) {
+ /* XXX ttychars allocates memory */
+ simple_unlock_nocheck(&tp->t_lock.slock);
+ ttychars(tp);
+ simple_lock_nocheck(&tp->t_lock.slock);
+ tp->t_oproc = hypcnstart;
+ tp->t_stop = hypcnstop;
+ tp->t_ospeed = tp->t_ispeed = B115200;
+ tp->t_flags = ODDP|EVENP|ECHO|CRMOD|XTABS|LITOUT;
+ }
+ tp->t_state |= TS_CARR_ON;
+ simple_unlock_irq(o_pri, &tp->t_lock);
+ return (char_open(dev, tp, flag, ior));
+}
+
+void hypcnclose(dev_t dev, int flag)
+{
+ struct tty *tp = &hypcn_tty;
+ spl_t s;
+ s = simple_lock_irq(&tp->t_lock);
+ ttyclose(tp);
+ simple_unlock_irq(s, &tp->t_lock);
+}
+
+int hypcnprobe(struct consdev *cp)
+{
+ cp->cn_dev = makedev(0, 0);
+ cp->cn_pri = CN_INTERNAL;
+ return 0;
+}
+
+int hypcninit(struct consdev *cp)
+{
+ if (console)
+ return 0;
+ simple_lock_init_irq(&outlock);
+ simple_lock_init_irq(&inlock);
+ console = (void*) mfn_to_kv(boot_info.console_mfn);
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readwrite(console);
+#endif /* MACH_PV_PAGETABLES */
+ hyp_evt_handler(boot_info.console_evtchn, (interrupt_handler_fn)hypcnintr, 0, SPL6);
+ return 0;
+}
diff --git a/xen/console.h b/xen/console.h
new file mode 100644
index 0000000..4a3c541
--- /dev/null
+++ b/xen/console.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2006-2009, 2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_CONSOLE_H
+#define XEN_CONSOLE_H
+#include <machine/xen.h>
+#include <string.h>
+
+#include <mach/port.h>
+#include <device/cons.h>
+#include <device/io_req.h>
+
+static inline void hyp_console_write(const char *str, int len)
+{
+ hyp_console_io (CONSOLEIO_write, len, kvtolin(str));
+}
+
+#define hyp_console_put(str) ({ \
+ const char *__str = (void*) (str); \
+ hyp_console_write (__str, strlen (__str)); \
+})
+
+extern void hyp_console_init(void);
+
+extern int hypcnputc(dev_t dev, int c);
+extern int hypcngetc(dev_t dev, int wait);
+extern int hypcnprobe(struct consdev *cp);
+extern int hypcninit(struct consdev *cp);
+
+extern int hypcnopen(dev_t dev, int flag, io_req_t ior);
+extern int hypcnread(dev_t dev, io_req_t ior);
+extern int hypcnwrite(dev_t dev, io_req_t ior);
+extern void hypcnclose(dev_t dev, int flag);
+extern io_return_t hypcngetstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count);
+extern io_return_t hypcnsetstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t count);
+extern int hypcnportdeath(dev_t dev, mach_port_t port);
+
+#endif /* XEN_CONSOLE_H */
diff --git a/xen/evt.c b/xen/evt.c
new file mode 100644
index 0000000..7296ae4
--- /dev/null
+++ b/xen/evt.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2007-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <string.h>
+#include <mach/xen.h>
+#include <machine/xen.h>
+#include <machine/ipl.h>
+#include <machine/gdt.h>
+#include <xen/console.h>
+#include "evt.h"
+
+#define NEVNT (sizeof(unsigned long) * sizeof(unsigned long) * 8)
+int int_mask[NSPL];
+
+spl_t curr_ipl[NCPUS];
+int spl_init = 0;
+
+interrupt_handler_fn ivect[NEVNT];
+int intpri[NEVNT];
+int iunit[NEVNT];
+
+void hyp_c_callback(void *ret_addr, void *regs)
+{
+ int i, j, n;
+ int cpu = 0;
+ unsigned long pending_sel;
+
+ hyp_shared_info.vcpu_info[cpu].evtchn_upcall_pending = 0;
+ /* no need for a barrier on x86, xchg is already one */
+#if !(defined(__i386__) || defined(__x86_64__))
+ wmb();
+#endif
+ while ((pending_sel = xchgl(&hyp_shared_info.vcpu_info[cpu].evtchn_pending_sel, 0))) {
+
+ for (i = 0; pending_sel; i++, pending_sel >>= 1) {
+ unsigned long pending;
+
+ if (!(pending_sel & 1))
+ continue;
+
+ while ((pending = (hyp_shared_info.evtchn_pending[i] & ~hyp_shared_info.evtchn_mask[i]))) {
+
+ n = i * sizeof(unsigned long);
+ for (j = 0; pending; j++, n++, pending >>= 1) {
+ if (!(pending & 1))
+ continue;
+
+ if (ivect[n]) {
+ spl_t spl = splx(intpri[n]);
+ asm ("lock; and %1,%0":"=m"(hyp_shared_info.evtchn_pending[i]):"r"(~(1UL<<j)));
+ ((void(*)(int, int, const char*, struct i386_interrupt_state*))(ivect[n]))(iunit[n], spl, ret_addr, regs);
+ splx_cli(spl);
+ } else {
+ printf("warning: lost unbound event %d\n", n);
+ asm ("lock; and %1,%0":"=m"(hyp_shared_info.evtchn_pending[i]):"r"(~(1UL<<j)));
+ }
+ }
+ }
+ }
+ }
+}
+
+void form_int_mask(void)
+{
+ unsigned int j, bit, mask;
+ int i;
+
+ for (i=SPL0; i < NSPL; i++) {
+ for (j=0x00, bit=0x01, mask = 0; j < NEVNT; j++, bit<<=1)
+ if (intpri[j] <= i)
+ mask |= bit;
+ int_mask[i] = mask;
+ }
+}
+
+extern void hyp_callback(void);
+extern void hyp_failsafe_callback(void);
+
+void hyp_intrinit(void) {
+ int i;
+
+ form_int_mask();
+ for (i = 0; i < NCPUS; i++)
+ curr_ipl[i] = SPLHI;
+ hyp_shared_info.evtchn_mask[0] = int_mask[SPLHI];
+#ifdef __i386__
+ hyp_set_callbacks(KERNEL_CS, hyp_callback,
+ KERNEL_CS, hyp_failsafe_callback);
+#endif
+#ifdef __x86_64__
+ hyp_set_callbacks(hyp_callback, hyp_failsafe_callback, NULL);
+#endif
+}
+
+void hyp_evt_handler(evtchn_port_t port, interrupt_handler_fn handler, int unit, spl_t spl) {
+ if (port > NEVNT)
+ panic("event channel port %d > %d not supported\n", port, (int) NEVNT);
+ intpri[port] = spl;
+ iunit[port] = unit;
+ form_int_mask();
+ wmb();
+ ivect[port] = handler;
+}
diff --git a/xen/evt.h b/xen/evt.h
new file mode 100644
index 0000000..a73733e
--- /dev/null
+++ b/xen/evt.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_EVT_H
+#define XEN_EVT_H
+
+#include <machine/spl.h>
+
+void hyp_intrinit(void);
+void form_int_mask(void);
+void hyp_evt_handler(evtchn_port_t port, interrupt_handler_fn handler, int unit, spl_t spl);
+void hyp_c_callback(void *ret_addr, void *regs);
+
+#endif /* XEN_EVT_H */
diff --git a/xen/grant.c b/xen/grant.c
new file mode 100644
index 0000000..84758cf
--- /dev/null
+++ b/xen/grant.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <machine/model_dep.h>
+#include <sys/types.h>
+#include <mach/vm_param.h>
+#include <machine/spl.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include "grant.h"
+
+#define NR_RESERVED_ENTRIES 8
+#define NR_GRANT_PAGES 8
+
+def_simple_lock_data(static,lock);
+static struct grant_entry *grants;
+static vm_map_entry_t grants_map_entry;
+static int last_grant = NR_RESERVED_ENTRIES;
+
+static grant_ref_t free_grants = -1;
+
+static grant_ref_t grant_alloc(void) {
+ grant_ref_t grant;
+ if (free_grants != -1) {
+ grant = free_grants;
+ free_grants = grants[grant].frame;
+ } else {
+ grant = last_grant++;
+ if (grant == (NR_GRANT_PAGES * PAGE_SIZE)/sizeof(*grants))
+ panic("not enough grant entries, increase NR_GRANT_PAGES");
+ }
+ return grant;
+}
+
+static void grant_free(grant_ref_t grant) {
+ grants[grant].frame = free_grants;
+ free_grants = grant;
+}
+
+static grant_ref_t grant_set(domid_t domid, unsigned long mfn, uint16_t flags) {
+ spl_t spl = splhigh();
+ simple_lock(&lock);
+
+ grant_ref_t grant = grant_alloc();
+ grants[grant].domid = domid;
+ grants[grant].frame = mfn;
+ wmb();
+ grants[grant].flags = flags;
+
+ simple_unlock(&lock);
+ splx(spl);
+ return grant;
+}
+
+grant_ref_t hyp_grant_give(domid_t domid, unsigned long frame, int readonly) {
+ return grant_set(domid, pfn_to_mfn(frame),
+ GTF_permit_access | (readonly ? GTF_readonly : 0));
+}
+
+grant_ref_t hyp_grant_accept_transfer(domid_t domid, unsigned long frame) {
+ return grant_set(domid, frame, GTF_accept_transfer);
+}
+
+unsigned long hyp_grant_finish_transfer(grant_ref_t grant) {
+ unsigned long frame;
+ spl_t spl = splhigh();
+ simple_lock(&lock);
+
+ if (!(grants[grant].flags & GTF_transfer_committed))
+ panic("grant transfer %x not committed\n", grant);
+ while (!(grants[grant].flags & GTF_transfer_completed))
+ machine_relax();
+ rmb();
+ frame = grants[grant].frame;
+ grant_free(grant);
+
+ simple_unlock(&lock);
+ splx(spl);
+ return frame;
+}
+
+void hyp_grant_takeback(grant_ref_t grant) {
+ spl_t spl = splhigh();
+ simple_lock(&lock);
+
+ if (grants[grant].flags & (GTF_reading|GTF_writing))
+ panic("grant %d still in use (%x)\n", grant, grants[grant].flags);
+
+ /* Note: this is not safe, a cmpxchg is needed, see grant_table.h */
+ grants[grant].flags = 0;
+ wmb();
+
+ grant_free(grant);
+
+ simple_unlock(&lock);
+ splx(spl);
+}
+
+void *hyp_grant_address(grant_ref_t grant) {
+ return &grants[grant];
+}
+
+void hyp_grant_init(void) {
+ struct gnttab_setup_table setup;
+ unsigned long frame[NR_GRANT_PAGES];
+ long ret;
+ int i;
+ vm_offset_t addr;
+
+ setup.dom = DOMID_SELF;
+ setup.nr_frames = NR_GRANT_PAGES;
+ setup.frame_list = (void*) kvtolin(frame);
+
+ ret = hyp_grant_table_op(GNTTABOP_setup_table, kvtolin(&setup), 1);
+ if (ret)
+ panic("setup grant table error %ld", ret);
+ if (setup.status)
+ panic("setup grant table: %d\n", setup.status);
+
+ simple_lock_init(&lock);
+ vm_map_find_entry(kernel_map, &addr, NR_GRANT_PAGES * PAGE_SIZE,
+ (vm_offset_t) 0, kernel_object, &grants_map_entry);
+ grants = (void*) addr;
+
+ for (i = 0; i < NR_GRANT_PAGES; i++)
+ pmap_map_mfn((void *)grants + i * PAGE_SIZE, frame[i]);
+}
diff --git a/xen/grant.h b/xen/grant.h
new file mode 100644
index 0000000..30ce344
--- /dev/null
+++ b/xen/grant.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_GRANT_H
+#define XEN_GRANT_H
+#include <sys/types.h>
+#include <machine/xen.h>
+#include <xen/public/xen.h>
+#include <xen/public/grant_table.h>
+
+void hyp_grant_init(void);
+grant_ref_t hyp_grant_give(domid_t domid, unsigned long frame_nr, int readonly);
+void hyp_grant_takeback(grant_ref_t grant);
+grant_ref_t hyp_grant_accept_transfer(domid_t domid, unsigned long frame_nr);
+unsigned long hyp_grant_finish_transfer(grant_ref_t grant);
+void *hyp_grant_address(grant_ref_t grant);
+
+#endif /* XEN_GRANT_H */
diff --git a/xen/net.c b/xen/net.c
new file mode 100644
index 0000000..b72844d
--- /dev/null
+++ b/xen/net.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright (C) 2006-2009, 2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <mach/mig_errors.h>
+#include <kern/kalloc.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <vm/vm_kern.h>
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_io.h>
+#include <device/device_reply.user.h>
+#include <device/device_emul.h>
+#include <device/ds_routines.h>
+#include <device/subrs.h>
+#include <intel/pmap.h>
+#include <xen/public/io/netif.h>
+#include <xen/public/memory.h>
+#include <string.h>
+#include <util/atoi.h>
+#include <util/byteorder.h>
+#include "evt.h"
+#include "store.h"
+#include "net.h"
+#include "grant.h"
+#include "ring.h"
+#include "time.h"
+#include "xen.h"
+
+/* Hypervisor part */
+
+#define ADDRESS_SIZE 6
+#define WINDOW __CONST_RING_SIZE(netif_rx, PAGE_SIZE)
+
+struct net_data {
+ struct device device;
+ struct ifnet ifnet;
+ int open_count;
+ char *backend;
+ domid_t domid;
+ char *vif;
+ u_char address[ADDRESS_SIZE];
+ int handle;
+ ipc_port_t port;
+ netif_tx_front_ring_t tx;
+ netif_rx_front_ring_t rx;
+ void *rx_buf[WINDOW];
+ grant_ref_t rx_buf_gnt[WINDOW];
+ unsigned long rx_buf_pfn[WINDOW];
+ int rx_copy;
+ evtchn_port_t evt;
+ simple_lock_data_t lock;
+ simple_lock_data_t pushlock;
+};
+
+static int n_vifs;
+static struct net_data *vif_data;
+
+struct device_emulation_ops hyp_net_emulation_ops;
+
+static int hextoi(char *cp, int *nump)
+{
+ int number;
+ char *original;
+ char c;
+
+ original = cp;
+ for (number = 0, c = *cp | 0x20; (('0' <= c) && (c <= '9')) || (('a' <= c) && (c <= 'f')); c = *(++cp)) {
+ number *= 16;
+ if (c <= '9')
+ number += c - '0';
+ else
+ number += c - 'a' + 10;
+ }
+ if (original == cp)
+ *nump = -1;
+ else
+ *nump = number;
+ return(cp - original);
+}
+
+static void enqueue_rx_buf(struct net_data *nd, int number) {
+ unsigned reqn = nd->rx.req_prod_pvt++;
+ netif_rx_request_t *req = RING_GET_REQUEST(&nd->rx, reqn);
+ grant_ref_t gref;
+
+ assert(number < WINDOW);
+
+ req->id = number;
+ if (nd->rx_copy) {
+ /* Let domD write the data */
+ gref = hyp_grant_give(nd->domid, nd->rx_buf_pfn[number], 0);
+ } else {
+ /* Accept pages from domD */
+ gref = hyp_grant_accept_transfer(nd->domid, nd->rx_buf_pfn[number]);
+ /* give back page */
+ hyp_free_page(nd->rx_buf_pfn[number], nd->rx_buf[number]);
+ }
+
+ req->gref = nd->rx_buf_gnt[number] = gref;
+}
+
+static int recompute_checksum(void *data, int len) {
+ uint16_t *header16 = data;
+ uint8_t *header8 = data;
+ unsigned length, i;
+ uint32_t checksum = 0;
+
+ /* IPv4 header length */
+ length = (header8[0] & 0xf) * 4;
+ if (length < 20)
+ /* Too small for an IP header16 */
+ return -1;
+ if (length > len)
+ /* Does not fit in the ethernet frame */
+ return -1;
+
+ /* Compute IP header checksum */
+ header16[5] = 0;
+ for (i = 0; i < length/2; i++)
+ checksum += ntohs(header16[i]);
+
+ while (checksum >> 16)
+ checksum = (checksum & 0xffff) + (checksum >> 16);
+
+ header16[5] = htons(~checksum);
+
+ if (header8[9] == 6) {
+ /* Need to fix TCP checksum as well */
+ uint16_t *tcp_header16 = header16 + length/2;
+ uint8_t *tcp_header8 = header8 + length;
+ unsigned tcp_length = ntohs(header16[1]) - length;
+
+ /* Pseudo IP header */
+ checksum = ntohs(header16[6]) + ntohs(header16[7]) +
+ ntohs(header16[8]) + ntohs(header16[9]) +
+ header8[9] + tcp_length;
+
+ tcp_header16[8] = 0;
+ for (i = 0; i < tcp_length / 2; i++)
+ checksum += ntohs(tcp_header16[i]);
+ if (tcp_length & 1)
+ checksum += tcp_header8[tcp_length-1] << 8;
+
+ while (checksum >> 16)
+ checksum = (checksum & 0xffff) + (checksum >> 16);
+
+ tcp_header16[8] = htons(~checksum);
+ } else if (header8[9] == 17) {
+ /* Drop any bogus checksum */
+ uint16_t *udp_header16 = header16 + length/2;
+ udp_header16[3] = 0;
+ }
+
+ return 0;
+}
+
+static void hyp_net_intr(int unit) {
+ ipc_kmsg_t kmsg;
+ struct ether_header *eh;
+ struct packet_header *ph;
+ netif_rx_response_t *rx_rsp;
+ netif_tx_response_t *tx_rsp;
+ void *data;
+ int len, more;
+ struct net_data *nd = &vif_data[unit];
+
+ simple_lock(&nd->lock);
+ if ((nd->rx.sring->rsp_prod - nd->rx.rsp_cons) >= (WINDOW*3)/4)
+ printf("window %ld a bit small!\n", (long) WINDOW);
+
+ more = RING_HAS_UNCONSUMED_RESPONSES(&nd->rx);
+ while (more) {
+ rmb(); /* make sure we see responses */
+ rx_rsp = RING_GET_RESPONSE(&nd->rx, nd->rx.rsp_cons++);
+
+ unsigned number = rx_rsp->id;
+ assert(number < WINDOW);
+ if (nd->rx_copy) {
+ hyp_grant_takeback(nd->rx_buf_gnt[number]);
+ } else {
+ unsigned long mfn = hyp_grant_finish_transfer(nd->rx_buf_gnt[number]);
+#ifdef MACH_PSEUDO_PHYS
+ mfn_list[nd->rx_buf_pfn[number]] = mfn;
+#endif /* MACH_PSEUDO_PHYS */
+ pmap_map_mfn(nd->rx_buf[number], mfn);
+ }
+
+ kmsg = net_kmsg_get();
+ if (!kmsg)
+ /* gasp! Drop */
+ goto drop;
+
+ if (rx_rsp->status <= 0)
+ switch (rx_rsp->status) {
+ case NETIF_RSP_DROPPED:
+ printf("Packet dropped\n");
+ goto drop_kmsg;
+ case NETIF_RSP_ERROR:
+ printf("Packet error\n");
+ goto drop_kmsg;
+ case 0:
+ printf("nul packet\n");
+ goto drop_kmsg;
+ default:
+ printf("Unknown error %d\n", rx_rsp->status);
+ goto drop_kmsg;
+ }
+
+ data = nd->rx_buf[number] + rx_rsp->offset;
+ len = rx_rsp->status;
+
+ if (rx_rsp->flags & NETRXF_csum_blank) {
+ struct ether_header *ether = data;
+
+ if (!(rx_rsp->flags & NETRXF_data_validated)) {
+ printf("packet with no checksum and not validated, dropping it\n");
+ goto drop_kmsg;
+ }
+
+ /* TODO: rather tell pfinet to ignore checksums */
+
+ if (ntohs(ether->ether_type) != 0x0800) {
+ printf("packet with no checksum and not IPv4, dropping it\n");
+ goto drop_kmsg;
+ }
+
+ if (recompute_checksum(data + sizeof(*ether), len - sizeof(*ether)))
+ goto drop_kmsg;
+ }
+
+ eh = (void*) (net_kmsg(kmsg)->header);
+ ph = (void*) (net_kmsg(kmsg)->packet);
+ memcpy(eh, data, sizeof (struct ether_header));
+ memcpy(ph + 1, data + sizeof (struct ether_header), len - sizeof(struct ether_header));
+ RING_FINAL_CHECK_FOR_RESPONSES(&nd->rx, more);
+ enqueue_rx_buf(nd, number);
+ ph->type = eh->ether_type;
+ ph->length = len - sizeof(struct ether_header) + sizeof (struct packet_header);
+
+ net_kmsg(kmsg)->sent = FALSE; /* Mark packet as received. */
+
+ net_packet(&nd->ifnet, kmsg, ph->length, ethernet_priority(kmsg));
+ continue;
+
+drop_kmsg:
+ net_kmsg_put(kmsg);
+drop:
+ RING_FINAL_CHECK_FOR_RESPONSES(&nd->rx, more);
+ enqueue_rx_buf(nd, number);
+ }
+
+ /* commit new requests */
+ int notify;
+ wmb(); /* make sure it sees requests */
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&nd->rx, notify);
+ if (notify)
+ hyp_event_channel_send(nd->evt);
+
+ /* Now the tx side */
+ more = RING_HAS_UNCONSUMED_RESPONSES(&nd->tx);
+ spl_t s = splsched ();
+ while (more) {
+ rmb(); /* make sure we see responses */
+ tx_rsp = RING_GET_RESPONSE(&nd->tx, nd->tx.rsp_cons++);
+ switch (tx_rsp->status) {
+ case NETIF_RSP_DROPPED:
+ printf("Packet dropped\n");
+ break;
+ case NETIF_RSP_ERROR:
+ printf("Packet error\n");
+ break;
+ case NETIF_RSP_OKAY:
+ break;
+ default:
+ printf("Unknown error %d\n", tx_rsp->status);
+ break;
+ }
+ thread_wakeup((event_t) hyp_grant_address(tx_rsp->id));
+ thread_wakeup_one(nd);
+ RING_FINAL_CHECK_FOR_RESPONSES(&nd->tx, more);
+ }
+ splx(s);
+
+ simple_unlock(&nd->lock);
+}
+
+#define VIF_PATH "device/vif"
+void hyp_net_init(void) {
+ char **vifs, **vif;
+ char *c;
+ int i;
+ int n;
+ int grant;
+ char port_name[10];
+ domid_t domid;
+ evtchn_port_t evt;
+ hyp_store_transaction_t t;
+ phys_addr_t addr;
+ struct net_data *nd;
+ struct ifnet *ifp;
+ netif_tx_sring_t *tx_ring;
+ netif_rx_sring_t *rx_ring;
+
+ vifs = hyp_store_ls(0, 1, VIF_PATH);
+ if (!vifs) {
+ printf("eth: No net device (%s). Hoping you don't need any\n", hyp_store_error);
+ n_vifs = 0;
+ return;
+ }
+
+ n = 0;
+ for (vif = vifs; *vif; vif++)
+ n++;
+
+ vif_data = (void*) kalloc(n * sizeof(*vif_data));
+ if (!vif_data) {
+ printf("eth: No memory room for VIF\n");
+ n_vifs = 0;
+ return;
+ }
+ n_vifs = n;
+
+ for (n = 0; n < n_vifs; n++) {
+ nd = &vif_data[n];
+ mach_atoi((u_char *) vifs[n], &nd->handle);
+ if (nd->handle == MACH_ATOI_DEFAULT)
+ continue;
+
+ nd->open_count = -2;
+ nd->vif = vifs[n];
+
+ /* Get domain id of frontend driver. */
+ i = hyp_store_read_int(0, 5, VIF_PATH, "/", vifs[n], "/", "backend-id");
+ if (i == -1)
+ panic("eth: couldn't read frontend domid of VIF %s (%s)",vifs[n], hyp_store_error);
+ nd->domid = domid = i;
+
+ do {
+ t = hyp_store_transaction_start();
+
+ /* Get a page for tx_ring */
+ if ((addr = vm_page_grab_phys_addr()) == -1)
+ panic("eth: couldn't allocate space for store tx_ring");
+ tx_ring = (void*) phystokv(addr);
+ SHARED_RING_INIT(tx_ring);
+ FRONT_RING_INIT(&nd->tx, tx_ring, PAGE_SIZE);
+ grant = hyp_grant_give(domid, atop(addr), 0);
+
+ /* and give it to backend. */
+ i = sprintf(port_name, "%d", grant);
+ c = hyp_store_write(t, port_name, 5, VIF_PATH, "/", vifs[n], "/", "tx-ring-ref");
+ if (!c)
+ panic("eth: couldn't store tx_ring reference for VIF %s (%s)", vifs[n], hyp_store_error);
+ kfree((vm_offset_t) c, strlen(c)+1);
+
+ /* Get a page for rx_ring */
+ if ((addr = vm_page_grab_phys_addr()) == -1)
+ panic("eth: couldn't allocate space for store tx_ring");
+ rx_ring = (void*) phystokv(addr);
+ SHARED_RING_INIT(rx_ring);
+ FRONT_RING_INIT(&nd->rx, rx_ring, PAGE_SIZE);
+ grant = hyp_grant_give(domid, atop(addr), 0);
+
+ /* and give it to backend. */
+ i = sprintf(port_name, "%d", grant);
+ c = hyp_store_write(t, port_name, 5, VIF_PATH, "/", vifs[n], "/", "rx-ring-ref");
+ if (!c)
+ panic("eth: couldn't store rx_ring reference for VIF %s (%s)", vifs[n], hyp_store_error);
+ kfree((vm_offset_t) c, strlen(c)+1);
+
+ /* tell we need csums. */
+ c = hyp_store_write(t, "1", 5, VIF_PATH, "/", vifs[n], "/", "feature-no-csum-offload");
+ if (!c)
+ panic("eth: couldn't store feature-no-csum-offload reference for VIF %s (%s)", vifs[n], hyp_store_error);
+ kfree((vm_offset_t) c, strlen(c)+1);
+
+ /* Allocate an event channel and give it to backend. */
+ nd->evt = evt = hyp_event_channel_alloc(domid);
+ i = sprintf(port_name, "%u", evt);
+ c = hyp_store_write(t, port_name, 5, VIF_PATH, "/", vifs[n], "/", "event-channel");
+ if (!c)
+ panic("eth: couldn't store event channel for VIF %s (%s)", vifs[n], hyp_store_error);
+ kfree((vm_offset_t) c, strlen(c)+1);
+ c = hyp_store_write(t, hyp_store_state_initialized, 5, VIF_PATH, "/", vifs[n], "/", "state");
+ if (!c)
+ panic("eth: couldn't store state for VIF %s (%s)", vifs[n], hyp_store_error);
+ kfree((vm_offset_t) c, strlen(c)+1);
+ } while ((!hyp_store_transaction_stop(t)));
+ /* TODO randomly wait? */
+
+ c = hyp_store_read(0, 5, VIF_PATH, "/", vifs[n], "/", "backend");
+ if (!c)
+ panic("eth: couldn't get path to VIF %s backend (%s)", vifs[n], hyp_store_error);
+ nd->backend = c;
+
+ while(1) {
+ i = hyp_store_read_int(0, 3, nd->backend, "/", "state");
+ if (i == MACH_ATOI_DEFAULT)
+ panic("can't read state from %s", nd->backend);
+ if (i == XenbusStateInitWait)
+ break;
+ hyp_yield();
+ }
+
+ c = hyp_store_read(0, 3, nd->backend, "/", "mac");
+ if (!c)
+ panic("eth: couldn't get VIF %s's mac (%s)", vifs[n], hyp_store_error);
+
+ for (i=0; ; i++) {
+ int val;
+ hextoi(&c[3*i], &val);
+ if (val == -1)
+ panic("eth: couldn't understand %dth number of VIF %s's mac %s", i, vifs[n], c);
+ nd->address[i] = val;
+ if (i==ADDRESS_SIZE-1)
+ break;
+ if (c[3*i+2] != ':')
+ panic("eth: couldn't understand %dth separator of VIF %s's mac %s", i, vifs[n], c);
+ }
+ kfree((vm_offset_t) c, strlen(c)+1);
+
+ printf("eth%d: dom%d's VIF %s ", n, domid, vifs[n]);
+ for (i=0; ; i++) {
+ printf("%02x", nd->address[i]);
+ if (i==ADDRESS_SIZE-1)
+ break;
+ printf(":");
+ }
+ printf("\n");
+
+ nd->rx_copy = hyp_store_read_int(0, 3, nd->backend, "/", "feature-rx-copy");
+ if (nd->rx_copy == 1) {
+ c = hyp_store_write(0, "1", 5, VIF_PATH, "/", vifs[n], "/", "request-rx-copy");
+ if (!c)
+ panic("eth: couldn't request rx copy feature for VIF %s (%s)", vifs[n], hyp_store_error);
+ } else
+ nd->rx_copy = 0;
+
+ c = hyp_store_write(0, hyp_store_state_connected, 5, VIF_PATH, "/", nd->vif, "/", "state");
+ if (!c)
+ panic("couldn't store state for eth%d (%s)", (int) (nd - vif_data), hyp_store_error);
+ kfree((vm_offset_t) c, strlen(c)+1);
+
+ while(1) {
+ i = hyp_store_read_int(0, 3, nd->backend, "/", "state");
+ if (i == MACH_ATOI_DEFAULT)
+ panic("can't read state from %s", nd->backend);
+ if (i == XenbusStateConnected)
+ break;
+ hyp_yield();
+ }
+
+ /* Get a page for packet reception */
+ for (i= 0; i<WINDOW; i++) {
+ if ((addr = vm_page_grab_phys_addr()) == -1)
+ panic("eth: couldn't allocate space for store tx_ring");
+ nd->rx_buf[i] = (void*)phystokv(addr);
+ nd->rx_buf_pfn[i] = atop(addr);
+ if (!nd->rx_copy) {
+ if (hyp_do_update_va_mapping(kvtolin(nd->rx_buf[i]), 0, UVMF_INVLPG|UVMF_ALL))
+ panic("eth: couldn't clear rx kv buf %d at %llx", i, addr);
+ }
+ /* and enqueue it to backend. */
+ enqueue_rx_buf(nd, i);
+ }
+ int notify;
+ wmb(); /* make sure it sees requests */
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&nd->rx, notify);
+ if (notify)
+ hyp_event_channel_send(nd->evt);
+
+
+ nd->open_count = -1;
+ nd->device.emul_ops = &hyp_net_emulation_ops;
+ nd->device.emul_data = nd;
+ simple_lock_init(&nd->lock);
+ simple_lock_init(&nd->pushlock);
+
+ ifp = &nd->ifnet;
+ ifp->if_unit = n;
+ ifp->if_flags = IFF_UP | IFF_RUNNING;
+ ifp->if_header_size = 14;
+ ifp->if_header_format = HDR_ETHERNET;
+ /* Set to the maximum that we can handle in device_write. */
+ ifp->if_mtu = PAGE_SIZE - ifp->if_header_size;
+ ifp->if_address_size = ADDRESS_SIZE;
+ ifp->if_address = (void*) nd->address;
+ if_init_queues (ifp);
+
+ /* Now we can start receiving */
+ hyp_evt_handler(evt, hyp_net_intr, n, SPL6);
+ }
+}
+
+static ipc_port_t
+dev_to_port(void *d)
+{
+ struct net_data *b = d;
+ if (!d)
+ return IP_NULL;
+ return ipc_port_make_send(b->port);
+}
+
+static int
+device_close(void *devp)
+{
+ struct net_data *nd = devp;
+ if (--nd->open_count < 0)
+ panic("too many closes on eth%d", (int) (nd - vif_data));
+ printf("close, eth%d count %d\n", (int) (nd - vif_data), nd->open_count);
+ if (nd->open_count)
+ return 0;
+ ipc_kobject_set(nd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel(nd->port);
+ return 0;
+}
+
+static io_return_t
+device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, const char *name, device_t *devp /* out */)
+{
+ int i, n;
+ ipc_port_t port, notify;
+ struct net_data *nd;
+
+ if (name[0] != 'e' || name[1] != 't' || name[2] != 'h' || name[3] < '0' || name[3] > '9')
+ return D_NO_SUCH_DEVICE;
+ i = mach_atoi((u_char *) &name[3], &n);
+ if (n == MACH_ATOI_DEFAULT)
+ return D_NO_SUCH_DEVICE;
+ if (name[3 + i])
+ return D_NO_SUCH_DEVICE;
+ if (n >= n_vifs)
+ return D_NO_SUCH_DEVICE;
+ nd = &vif_data[n];
+ if (nd->open_count == -2)
+ /* couldn't be initialized */
+ return D_NO_SUCH_DEVICE;
+
+ if (nd->open_count >= 0) {
+ *devp = &nd->device ;
+ nd->open_count++ ;
+ printf("re-open, eth%d count %d\n", (int) (nd - vif_data), nd->open_count);
+ return D_SUCCESS;
+ }
+
+ nd->open_count = 1;
+ printf("eth%d count %d\n", (int) (nd - vif_data), nd->open_count);
+
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL) {
+ device_close (nd);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ nd->port = port;
+
+ *devp = &nd->device;
+
+ ipc_kobject_set (port, (ipc_kobject_t) &nd->device, IKOT_DEVICE);
+
+ notify = ipc_port_make_sonce (nd->port);
+ ip_lock (nd->port);
+ ipc_port_nsrequest (nd->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+
+ if (IP_VALID (reply_port))
+ ds_device_open_reply (reply_port, reply_port_type, D_SUCCESS, dev_to_port(nd));
+ else
+ device_close(nd);
+ return MIG_NO_REPLY;
+}
+
+static io_return_t
+device_write(void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, io_buf_ptr_t data, unsigned int count,
+ int *bytes_written)
+{
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ grant_ref_t gref;
+ struct net_data *nd = d;
+ struct ifnet *ifp = &nd->ifnet;
+ netif_tx_request_t *req;
+ unsigned reqn;
+ vm_offset_t buffer;
+ char *map_data;
+ vm_offset_t map_addr;
+ vm_size_t map_size;
+ kern_return_t kr;
+
+ /* The maximum that we can handle. */
+ assert(ifp->if_header_size + ifp->if_mtu <= PAGE_SIZE);
+
+ if (count < ifp->if_header_size ||
+ count > ifp->if_header_size + ifp->if_mtu)
+ return D_INVALID_SIZE;
+
+ assert(copy->type == VM_MAP_COPY_PAGE_LIST);
+
+ assert(copy->cpy_npages <= 2);
+ assert(copy->cpy_npages >= 1);
+
+ kr = kmem_alloc(device_io_map, &buffer, count);
+
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ kr = kmem_io_map_copyout(device_io_map, (vm_offset_t *)&map_data,
+ &map_addr, &map_size, copy, count);
+
+ if (kr != KERN_SUCCESS) {
+ kmem_free(device_io_map, buffer, count);
+ return kr;
+ }
+
+ memcpy((void *)buffer, map_data, count);
+ kmem_io_map_deallocate(device_io_map, map_addr, map_size);
+
+ /* allocate a request */
+ spl_t spl = splimp();
+ while (1) {
+ simple_lock(&nd->lock);
+ if (!RING_FULL(&nd->tx))
+ break;
+ thread_sleep(nd, &nd->lock, FALSE);
+ }
+ mb();
+ reqn = nd->tx.req_prod_pvt++;;
+ simple_lock(&nd->pushlock);
+ simple_unlock(&nd->lock);
+ (void) splx(spl);
+
+ req = RING_GET_REQUEST(&nd->tx, reqn);
+ req->gref = gref = hyp_grant_give(nd->domid, atop(kvtophys(buffer)), 1);
+ req->offset = 0;
+ req->flags = 0;
+ req->id = gref;
+ req->size = count;
+
+ assert_wait(hyp_grant_address(gref), FALSE);
+
+ int notify;
+ wmb(); /* make sure it sees requests */
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&nd->tx, notify);
+ if (notify)
+ hyp_event_channel_send(nd->evt);
+ simple_unlock(&nd->pushlock);
+
+ thread_block(NULL);
+
+ hyp_grant_takeback(gref);
+
+ /* Send packet to filters. */
+ {
+ struct packet_header *packet;
+ struct ether_header *header;
+ ipc_kmsg_t kmsg;
+
+ kmsg = net_kmsg_get ();
+
+ if (kmsg != IKM_NULL)
+ {
+ /* Suitable for Ethernet only. */
+ header = (struct ether_header *) (net_kmsg (kmsg)->header);
+ packet = (struct packet_header *) (net_kmsg (kmsg)->packet);
+ memcpy (header, (void*)buffer, sizeof (struct ether_header));
+
+ /* packet is prefixed with a struct packet_header,
+ see include/device/net_status.h. */
+ memcpy (packet + 1, (void*)buffer + sizeof (struct ether_header),
+ count - sizeof (struct ether_header));
+ packet->length = count - sizeof (struct ether_header)
+ + sizeof (struct packet_header);
+ packet->type = header->ether_type;
+ net_kmsg (kmsg)->sent = TRUE; /* Mark packet as sent. */
+ spl_t s = splimp ();
+ net_packet (&nd->ifnet, kmsg, packet->length,
+ ethernet_priority (kmsg));
+ splx (s);
+ }
+ }
+
+ kmem_free(device_io_map, buffer, count);
+
+ vm_map_copy_discard (copy);
+
+ *bytes_written = count;
+
+ if (IP_VALID(reply_port))
+ ds_device_write_reply (reply_port, reply_port_type, 0, count);
+
+ return MIG_NO_REPLY;
+}
+
+static io_return_t
+device_get_status(void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *status_count)
+{
+ struct net_data *nd = d;
+
+ return net_getstat (&nd->ifnet, flavor, status, status_count);
+}
+
+static io_return_t
+device_set_status(void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t count)
+{
+ struct net_data *nd = d;
+
+ switch (flavor)
+ {
+ default:
+ printf("TODO: net_%s(%p, 0x%x)\n", __func__, nd, flavor);
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_set_filter(void *d, ipc_port_t port, int priority,
+ filter_t * filter, unsigned filter_count)
+{
+ struct net_data *nd = d;
+
+ if (!nd)
+ return D_NO_SUCH_DEVICE;
+
+ return net_set_filter (&nd->ifnet, port, priority, filter, filter_count);
+}
+
+struct device_emulation_ops hyp_net_emulation_ops = {
+ NULL, /* dereference */
+ NULL, /* deallocate */
+ dev_to_port,
+ device_open,
+ device_close,
+ device_write,
+ NULL, /* write_inband */
+ NULL,
+ NULL, /* read_inband */
+ device_set_status, /* set_status */
+ device_get_status,
+ device_set_filter, /* set_filter */
+ NULL, /* map */
+ NULL, /* no_senders */
+ NULL, /* write_trap */
+ NULL, /* writev_trap */
+};
diff --git a/xen/net.h b/xen/net.h
new file mode 100644
index 0000000..836e75f
--- /dev/null
+++ b/xen/net.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_NET_H
+#define XEN_NET_H
+
+void hyp_net_init(void);
+
+#endif /* XEN_NET_H */
diff --git a/xen/public/COPYING b/xen/public/COPYING
new file mode 100644
index 0000000..ffc6d61
--- /dev/null
+++ b/xen/public/COPYING
@@ -0,0 +1,38 @@
+XEN NOTICE
+==========
+
+This copyright applies to all files within this subdirectory and its
+subdirectories:
+ include/public/*.h
+ include/public/hvm/*.h
+ include/public/io/*.h
+
+The intention is that these files can be freely copied into the source
+tree of an operating system when porting that OS to run on Xen. Doing
+so does *not* cause the OS to become subject to the terms of the GPL.
+
+All other files in the Xen source distribution are covered by version
+2 of the GNU General Public License except where explicitly stated
+otherwise within individual source files.
+
+ -- Keir Fraser (on behalf of the Xen team)
+
+=====================================================================
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/xen/public/arch-x86/xen-mca.h b/xen/public/arch-x86/xen-mca.h
new file mode 100644
index 0000000..103d41f
--- /dev/null
+++ b/xen/public/arch-x86/xen-mca.h
@@ -0,0 +1,279 @@
+/******************************************************************************
+ * arch-x86/mca.h
+ *
+ * Contributed by Advanced Micro Devices, Inc.
+ * Author: Christoph Egger <Christoph.Egger@amd.com>
+ *
+ * Guest OS machine check interface to x86 Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/* Full MCA functionality has the following Usecases from the guest side:
+ *
+ * Must have's:
+ * 1. Dom0 and DomU register machine check trap callback handlers
+ * (already done via "set_trap_table" hypercall)
+ * 2. Dom0 registers machine check event callback handler
+ * (doable via EVTCHNOP_bind_virq)
+ * 3. Dom0 and DomU fetches machine check data
+ * 4. Dom0 wants Xen to notify a DomU
+ * 5. Dom0 gets DomU ID from physical address
+ * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy")
+ *
+ * Nice to have's:
+ * 7. Dom0 wants Xen to deactivate a physical CPU
+ * This is better done as separate task, physical CPU hotplugging,
+ * and hypercall(s) should be sysctl's
+ * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to
+ * move a DomU (or Dom0 itself) away from a malicious page
+ * producing correctable errors.
+ * 9. offlining physical page:
+ * Xen free's and never re-uses a certain physical page.
+ * 10. Testfacility: Allow Dom0 to write values into machine check MSR's
+ * and tell Xen to trigger a machine check
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__
+#define __XEN_PUBLIC_ARCH_X86_MCA_H__
+
+/* Hypercall */
+#define __HYPERVISOR_mca __HYPERVISOR_arch_0
+
+#define XEN_MCA_INTERFACE_VERSION 0x03000001
+
+/* IN: Dom0 calls hypercall from MC event handler. */
+#define XEN_MC_CORRECTABLE 0x0
+/* IN: Dom0/DomU calls hypercall from MC trap handler. */
+#define XEN_MC_TRAP 0x1
+/* XEN_MC_CORRECTABLE and XEN_MC_TRAP are mutually exclusive. */
+
+/* OUT: All is ok */
+#define XEN_MC_OK 0x0
+/* OUT: Domain could not fetch data. */
+#define XEN_MC_FETCHFAILED 0x1
+/* OUT: There was no machine check data to fetch. */
+#define XEN_MC_NODATA 0x2
+/* OUT: Between notification time and this hypercall an other
+ * (most likely) correctable error happened. The fetched data,
+ * does not match the original machine check data. */
+#define XEN_MC_NOMATCH 0x4
+
+/* OUT: DomU did not register MC NMI handler. Try something else. */
+#define XEN_MC_CANNOTHANDLE 0x8
+/* OUT: Notifying DomU failed. Retry later or try something else. */
+#define XEN_MC_NOTDELIVERED 0x10
+/* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */
+
+
+#ifndef __ASSEMBLY__
+
+#define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */
+
+/*
+ * Machine Check Architecure:
+ * structs are read-only and used to report all kinds of
+ * correctable and uncorrectable errors detected by the HW.
+ * Dom0 and DomU: register a handler to get notified.
+ * Dom0 only: Correctable errors are reported via VIRQ_MCA
+ * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers
+ */
+#define MC_TYPE_GLOBAL 0
+#define MC_TYPE_BANK 1
+#define MC_TYPE_EXTENDED 2
+
+struct mcinfo_common {
+ uint16_t type; /* structure type */
+ uint16_t size; /* size of this struct in bytes */
+};
+
+
+#define MC_FLAG_CORRECTABLE (1 << 0)
+#define MC_FLAG_UNCORRECTABLE (1 << 1)
+
+/* contains global x86 mc information */
+struct mcinfo_global {
+ struct mcinfo_common common;
+
+ /* running domain at the time in error (most likely the impacted one) */
+ uint16_t mc_domid;
+ uint32_t mc_socketid; /* physical socket of the physical core */
+ uint16_t mc_coreid; /* physical impacted core */
+ uint16_t mc_core_threadid; /* core thread of physical core */
+ uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */
+ uint64_t mc_gstatus; /* global status */
+ uint32_t mc_flags;
+};
+
+/* contains bank local x86 mc information */
+struct mcinfo_bank {
+ struct mcinfo_common common;
+
+ uint16_t mc_bank; /* bank nr */
+ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0
+ * and if mc_addr is valid. Never valid on DomU. */
+ uint64_t mc_status; /* bank status */
+ uint64_t mc_addr; /* bank address, only valid
+ * if addr bit is set in mc_status */
+ uint64_t mc_misc;
+};
+
+
+struct mcinfo_msr {
+ uint64_t reg; /* MSR */
+ uint64_t value; /* MSR value */
+};
+
+/* contains mc information from other
+ * or additional mc MSRs */
+struct mcinfo_extended {
+ struct mcinfo_common common;
+
+ /* You can fill up to five registers.
+ * If you need more, then use this structure
+ * multiple times. */
+
+ uint32_t mc_msrs; /* Number of msr with valid values. */
+ struct mcinfo_msr mc_msr[5];
+};
+
+#define MCINFO_HYPERCALLSIZE 1024
+#define MCINFO_MAXSIZE 768
+
+struct mc_info {
+ /* Number of mcinfo_* entries in mi_data */
+ uint32_t mi_nentries;
+
+ uint8_t mi_data[MCINFO_MAXSIZE - sizeof(uint32_t)];
+};
+typedef struct mc_info mc_info_t;
+
+
+
+/*
+ * OS's should use these instead of writing their own lookup function
+ * each with its own bugs and drawbacks.
+ * We use macros instead of static inline functions to allow guests
+ * to include this header in assembly files (*.S).
+ */
+/* Prototype:
+ * uint32_t x86_mcinfo_nentries(struct mc_info *mi);
+ */
+#define x86_mcinfo_nentries(_mi) \
+ (_mi)->mi_nentries
+/* Prototype:
+ * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi);
+ */
+#define x86_mcinfo_first(_mi) \
+ (struct mcinfo_common *)((_mi)->mi_data)
+/* Prototype:
+ * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic);
+ */
+#define x86_mcinfo_next(_mic) \
+ (struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)
+
+/* Prototype:
+ * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type);
+ */
+#define x86_mcinfo_lookup(_ret, _mi, _type) \
+ do { \
+ uint32_t found, i; \
+ struct mcinfo_common *_mic; \
+ \
+ found = 0; \
+ (_ret) = NULL; \
+ if (_mi == NULL) break; \
+ _mic = x86_mcinfo_first(_mi); \
+ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \
+ if (_mic->type == (_type)) { \
+ found = 1; \
+ break; \
+ } \
+ _mic = x86_mcinfo_next(_mic); \
+ } \
+ (_ret) = found ? _mic : NULL; \
+ } while (0)
+
+
+/* Usecase 1
+ * Register machine check trap callback handler
+ * (already done via "set_trap_table" hypercall)
+ */
+
+/* Usecase 2
+ * Dom0 registers machine check event callback handler
+ * done by EVTCHNOP_bind_virq
+ */
+
+/* Usecase 3
+ * Fetch machine check data from hypervisor.
+ * Note, this hypercall is special, because both Dom0 and DomU must use this.
+ */
+#define XEN_MC_fetch 1
+struct xen_mc_fetch {
+ /* IN/OUT variables. */
+ uint32_t flags;
+
+/* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */
+/* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */
+
+ /* OUT variables. */
+ uint32_t fetch_idx; /* only useful for Dom0 for the notify hypercall */
+ struct mc_info mc_info;
+};
+typedef struct xen_mc_fetch xen_mc_fetch_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t);
+
+
+/* Usecase 4
+ * This tells the hypervisor to notify a DomU about the machine check error
+ */
+#define XEN_MC_notifydomain 2
+struct xen_mc_notifydomain {
+ /* IN variables. */
+ uint16_t mc_domid; /* The unprivileged domain to notify. */
+ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify.
+ * Usually echo'd value from the fetch hypercall. */
+ uint32_t fetch_idx; /* echo'd value from the fetch hypercall. */
+
+ /* IN/OUT variables. */
+ uint32_t flags;
+
+/* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */
+/* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */
+};
+typedef struct xen_mc_notifydomain xen_mc_notifydomain_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t);
+
+
+struct xen_mc {
+ uint32_t cmd;
+ uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */
+ union {
+ struct xen_mc_fetch mc_fetch;
+ struct xen_mc_notifydomain mc_notifydomain;
+ uint8_t pad[MCINFO_HYPERCALLSIZE];
+ } u;
+};
+typedef struct xen_mc xen_mc_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mc_t);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */
diff --git a/xen/public/arch-x86/xen-x86_32.h b/xen/public/arch-x86/xen-x86_32.h
new file mode 100644
index 0000000..7cb6a01
--- /dev/null
+++ b/xen/public/arch-x86/xen-x86_32.h
@@ -0,0 +1,180 @@
+/******************************************************************************
+ * xen-x86_32.h
+ *
+ * Guest OS interface to x86 32-bit Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004-2007, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
+
+/*
+ * Hypercall interface:
+ * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
+ * Output: %eax
+ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
+ * call hypercall_page + hypercall-number * 32
+ * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
+ */
+
+#if __XEN_INTERFACE_VERSION__ < 0x00030203
+/*
+ * Legacy hypercall interface:
+ * As above, except the entry sequence to the hypervisor is:
+ * mov $hypercall-number*32,%eax ; int $0x82
+ */
+#define TRAP_INSTR "int $0x82"
+#endif
+
+/*
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
+#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
+#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
+#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
+#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
+#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
+
+#define FLAT_KERNEL_CS FLAT_RING1_CS
+#define FLAT_KERNEL_DS FLAT_RING1_DS
+#define FLAT_KERNEL_SS FLAT_RING1_SS
+#define FLAT_USER_CS FLAT_RING3_CS
+#define FLAT_USER_DS FLAT_RING3_DS
+#define FLAT_USER_SS FLAT_RING3_SS
+
+#define __HYPERVISOR_VIRT_START_PAE 0xF5800000
+#define __MACH2PHYS_VIRT_START_PAE 0xF5800000
+#define __MACH2PHYS_VIRT_END_PAE 0xF6800000
+#define HYPERVISOR_VIRT_START_PAE \
+ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE)
+#define MACH2PHYS_VIRT_START_PAE \
+ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE)
+#define MACH2PHYS_VIRT_END_PAE \
+ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE)
+
+/* Non-PAE bounds are obsolete. */
+#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
+#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000
+#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000
+#define HYPERVISOR_VIRT_START_NONPAE \
+ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE)
+#define MACH2PHYS_VIRT_START_NONPAE \
+ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE)
+#define MACH2PHYS_VIRT_END_NONPAE \
+ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE)
+
+#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
+#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE
+#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
+#endif
+
+#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
+#endif
+
+/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#undef ___DEFINE_XEN_GUEST_HANDLE
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } \
+ __guest_handle_ ## name; \
+ typedef struct { union { type *p; uint64_aligned_t q; }; } \
+ __guest_handle_64_ ## name
+#undef set_xen_guest_handle
+#define set_xen_guest_handle(hnd, val) \
+ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
+ (hnd).p = val; \
+ } while ( 0 )
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
+#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
+#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name)
+#endif
+
+#ifndef __ASSEMBLY__
+
+struct cpu_user_regs {
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t ebp;
+ uint32_t eax;
+ uint16_t error_code; /* private */
+ uint16_t entry_vector; /* private */
+ uint32_t eip;
+ uint16_t cs;
+ uint8_t saved_upcall_mask;
+ uint8_t _pad0;
+ uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
+ uint32_t esp;
+ uint16_t ss, _pad1;
+ uint16_t es, _pad2;
+ uint16_t ds, _pad3;
+ uint16_t fs, _pad4;
+ uint16_t gs, _pad5;
+};
+typedef struct cpu_user_regs cpu_user_regs_t;
+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+
+/*
+ * Page-directory addresses above 4GB do not fit into architectural %cr3.
+ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
+ * must use the following accessor macros to pack/unpack valid MFNs.
+ */
+#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
+#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
+
+struct arch_vcpu_info {
+ unsigned long cr2;
+ unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
+};
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+struct xen_callback {
+ unsigned long cs;
+ unsigned long eip;
+};
+typedef struct xen_callback xen_callback_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/arch-x86/xen-x86_64.h b/xen/public/arch-x86/xen-x86_64.h
new file mode 100644
index 0000000..1e54cf9
--- /dev/null
+++ b/xen/public/arch-x86/xen-x86_64.h
@@ -0,0 +1,212 @@
+/******************************************************************************
+ * xen-x86_64.h
+ *
+ * Guest OS interface to x86 64-bit Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004-2006, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
+
+/*
+ * Hypercall interface:
+ * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5)
+ * Output: %rax
+ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
+ * call hypercall_page + hypercall-number * 32
+ * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
+ */
+
+#if __XEN_INTERFACE_VERSION__ < 0x00030203
+/*
+ * Legacy hypercall interface:
+ * As above, except the entry sequence to the hypervisor is:
+ * mov $hypercall-number*32,%eax ; syscall
+ * Clobbered: %rcx, %r11, argument registers (as above)
+ */
+#define TRAP_INSTR "syscall"
+#endif
+
+/*
+ * 64-bit segment selectors
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+
+#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
+#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
+#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
+#define FLAT_RING3_DS64 0x0000 /* NULL selector */
+#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
+#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
+
+#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
+#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
+#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
+#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
+#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
+#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
+#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
+#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
+#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
+
+#define FLAT_USER_DS64 FLAT_RING3_DS64
+#define FLAT_USER_DS32 FLAT_RING3_DS32
+#define FLAT_USER_DS FLAT_USER_DS64
+#define FLAT_USER_CS64 FLAT_RING3_CS64
+#define FLAT_USER_CS32 FLAT_RING3_CS32
+#define FLAT_USER_CS FLAT_USER_CS64
+#define FLAT_USER_SS64 FLAT_RING3_SS64
+#define FLAT_USER_SS32 FLAT_RING3_SS32
+#define FLAT_USER_SS FLAT_USER_SS64
+
+#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
+#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
+#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
+#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
+#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
+#endif
+
+#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#endif
+
+/*
+ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
+ * @which == SEGBASE_* ; @base == 64-bit base address
+ * Returns 0 on success.
+ */
+#define SEGBASE_FS 0
+#define SEGBASE_GS_USER 1
+#define SEGBASE_GS_KERNEL 2
+#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
+
+/*
+ * int HYPERVISOR_iret(void)
+ * All arguments are on the kernel stack, in the following format.
+ * Never returns if successful. Current kernel context is lost.
+ * The saved CS is mapped as follows:
+ * RING0 -> RING3 kernel mode.
+ * RING1 -> RING3 kernel mode.
+ * RING2 -> RING3 kernel mode.
+ * RING3 -> RING3 user mode.
+ * However RING0 indicates that the guest kernel should return to iteself
+ * directly with
+ * orb $3,1*8(%rsp)
+ * iretq
+ * If flags contains VGCF_in_syscall:
+ * Restore RAX, RIP, RFLAGS, RSP.
+ * Discard R11, RCX, CS, SS.
+ * Otherwise:
+ * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
+ * All other registers are saved on hypercall entry and restored to user.
+ */
+/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
+#define _VGCF_in_syscall 8
+#define VGCF_in_syscall (1<<_VGCF_in_syscall)
+#define VGCF_IN_SYSCALL VGCF_in_syscall
+
+#ifndef __ASSEMBLY__
+
+struct iret_context {
+ /* Top of stack (%rsp at point of hypercall). */
+ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
+ /* Bottom of iret stack frame. */
+};
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
+#define __DECL_REG(name) union { \
+ uint64_t r ## name, e ## name; \
+ uint32_t _e ## name; \
+}
+#else
+/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
+#define __DECL_REG(name) uint64_t r ## name
+#endif
+
+struct cpu_user_regs {
+ uint64_t r15;
+ uint64_t r14;
+ uint64_t r13;
+ uint64_t r12;
+ __DECL_REG(bp);
+ __DECL_REG(bx);
+ uint64_t r11;
+ uint64_t r10;
+ uint64_t r9;
+ uint64_t r8;
+ __DECL_REG(ax);
+ __DECL_REG(cx);
+ __DECL_REG(dx);
+ __DECL_REG(si);
+ __DECL_REG(di);
+ uint32_t error_code; /* private */
+ uint32_t entry_vector; /* private */
+ __DECL_REG(ip);
+ uint16_t cs, _pad0[1];
+ uint8_t saved_upcall_mask;
+ uint8_t _pad1[3];
+ __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
+ __DECL_REG(sp);
+ uint16_t ss, _pad2[3];
+ uint16_t es, _pad3[3];
+ uint16_t ds, _pad4[3];
+ uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
+ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
+};
+typedef struct cpu_user_regs cpu_user_regs_t;
+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+
+#undef __DECL_REG
+
+#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
+#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
+
+struct arch_vcpu_info {
+ unsigned long cr2;
+ unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
+};
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+typedef unsigned long xen_callback_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/arch-x86/xen.h b/xen/public/arch-x86/xen.h
new file mode 100644
index 0000000..084348f
--- /dev/null
+++ b/xen/public/arch-x86/xen.h
@@ -0,0 +1,204 @@
+/******************************************************************************
+ * arch-x86/xen.h
+ *
+ * Guest OS interface to x86 Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004-2006, K A Fraser
+ */
+
+#include "../xen.h"
+
+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
+#define __XEN_PUBLIC_ARCH_X86_XEN_H__
+
+/* Structural guest handles introduced in 0x00030201. */
+#if __XEN_INTERFACE_VERSION__ >= 0x00030201
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef type * __guest_handle_ ## name
+#endif
+
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
+ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
+#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
+#ifdef __XEN_TOOLS__
+#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
+#endif
+
+#if defined(__i386__)
+#include "xen-x86_32.h"
+#elif defined(__x86_64__)
+#include "xen-x86_64.h"
+#endif
+
+#ifndef __ASSEMBLY__
+typedef unsigned long xen_pfn_t;
+#define PRI_xen_pfn "lx"
+#endif
+
+/*
+ * SEGMENT DESCRIPTOR TABLES
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way, at the far end of the GDT.
+ */
+#define FIRST_RESERVED_GDT_PAGE 14
+#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
+
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 32
+
+
+/* Machine check support */
+#include "xen-mca.h"
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long xen_ulong_t;
+
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table().
+ * The privilege level specifies which modes may enter a trap via a software
+ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
+ * privilege levels as follows:
+ * Level == 0: Noone may enter
+ * Level == 1: Kernel may enter
+ * Level == 2: Kernel may enter
+ * Level == 3: Everyone may enter
+ */
+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
+struct trap_info {
+ uint8_t vector; /* exception vector */
+ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
+ uint16_t cs; /* code selector */
+ unsigned long address; /* code offset */
+};
+typedef struct trap_info trap_info_t;
+DEFINE_XEN_GUEST_HANDLE(trap_info_t);
+
+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
+
+/*
+ * The following is all CPU context. Note that the fpu_ctxt block is filled
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ */
+struct vcpu_guest_context {
+ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
+ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
+#define VGCF_I387_VALID (1<<0)
+#define VGCF_IN_KERNEL (1<<2)
+#define _VGCF_i387_valid 0
+#define VGCF_i387_valid (1<<_VGCF_i387_valid)
+#define _VGCF_in_kernel 2
+#define VGCF_in_kernel (1<<_VGCF_in_kernel)
+#define _VGCF_failsafe_disables_events 3
+#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
+#define _VGCF_syscall_disables_events 4
+#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events)
+#define _VGCF_online 5
+#define VGCF_online (1<<_VGCF_online)
+ unsigned long flags; /* VGCF_* flags */
+ struct cpu_user_regs user_regs; /* User-level CPU registers */
+ struct trap_info trap_ctxt[256]; /* Virtual IDT */
+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
+ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
+#ifdef __i386__
+ unsigned long event_callback_cs; /* CS:EIP of event callback */
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
+ unsigned long failsafe_callback_eip;
+#else
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_eip;
+#ifdef __XEN__
+ union {
+ unsigned long syscall_callback_eip;
+ struct {
+ unsigned int event_callback_cs; /* compat CS of event cb */
+ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */
+ };
+ };
+#else
+ unsigned long syscall_callback_eip;
+#endif
+#endif
+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
+#ifdef __x86_64__
+ /* Segment base addresses. */
+ uint64_t fs_base;
+ uint64_t gs_base_kernel;
+ uint64_t gs_base_user;
+#endif
+};
+typedef struct vcpu_guest_context vcpu_guest_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+struct arch_shared_info {
+ unsigned long max_pfn; /* max pfn that appears in table */
+ /* Frame containing list of mfns containing list of mfns containing p2m. */
+ xen_pfn_t pfn_to_mfn_frame_list_list;
+ unsigned long nmi_reason;
+ uint64_t pad[32];
+};
+typedef struct arch_shared_info arch_shared_info_t;
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Prefix forces emulation of some non-trapping instructions.
+ * Currently only CPUID.
+ */
+#ifdef __ASSEMBLY__
+#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
+#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
+#else
+#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
+#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
+#endif
+
+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/arch-x86_32.h b/xen/public/arch-x86_32.h
new file mode 100644
index 0000000..45842b2
--- /dev/null
+++ b/xen/public/arch-x86_32.h
@@ -0,0 +1,27 @@
+/******************************************************************************
+ * arch-x86_32.h
+ *
+ * Guest OS interface to x86 32-bit Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004-2006, K A Fraser
+ */
+
+#include "arch-x86/xen.h"
diff --git a/xen/public/arch-x86_64.h b/xen/public/arch-x86_64.h
new file mode 100644
index 0000000..fbb2639
--- /dev/null
+++ b/xen/public/arch-x86_64.h
@@ -0,0 +1,27 @@
+/******************************************************************************
+ * arch-x86_64.h
+ *
+ * Guest OS interface to x86 64-bit Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004-2006, K A Fraser
+ */
+
+#include "arch-x86/xen.h"
diff --git a/xen/public/callback.h b/xen/public/callback.h
new file mode 100644
index 0000000..f4962f6
--- /dev/null
+++ b/xen/public/callback.h
@@ -0,0 +1,121 @@
+/******************************************************************************
+ * callback.h
+ *
+ * Register guest OS callbacks with Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2006, Ian Campbell
+ */
+
+#ifndef __XEN_PUBLIC_CALLBACK_H__
+#define __XEN_PUBLIC_CALLBACK_H__
+
+#include "xen.h"
+
+/*
+ * Prototype for this hypercall is:
+ * long callback_op(int cmd, void *extra_args)
+ * @cmd == CALLBACKOP_??? (callback operation).
+ * @extra_args == Operation-specific extra arguments (NULL if none).
+ */
+
+/* ia64, x86: Callback for event delivery. */
+#define CALLBACKTYPE_event 0
+
+/* x86: Failsafe callback when guest state cannot be restored by Xen. */
+#define CALLBACKTYPE_failsafe 1
+
+/* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */
+#define CALLBACKTYPE_syscall 2
+
+/*
+ * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel
+ * feature is enabled. Do not use this callback type in new code.
+ */
+#define CALLBACKTYPE_sysenter_deprecated 3
+
+/* x86: Callback for NMI delivery. */
+#define CALLBACKTYPE_nmi 4
+
+/*
+ * x86: sysenter is only available as follows:
+ * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled
+ * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs
+ * ('32-on-32-on-64', '32-on-64-on-64')
+ * [nb. also 64-bit guest applications on Intel CPUs
+ * ('64-on-64-on-64'), but syscall is preferred]
+ */
+#define CALLBACKTYPE_sysenter 5
+
+/*
+ * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs
+ * ('32-on-32-on-64', '32-on-64-on-64')
+ */
+#define CALLBACKTYPE_syscall32 7
+
+/*
+ * Disable event deliver during callback? This flag is ignored for event and
+ * NMI callbacks: event delivery is unconditionally disabled.
+ */
+#define _CALLBACKF_mask_events 0
+#define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events)
+
+/*
+ * Register a callback.
+ */
+#define CALLBACKOP_register 0
+struct callback_register {
+ uint16_t type;
+ uint16_t flags;
+ xen_callback_t address;
+};
+typedef struct callback_register callback_register_t;
+DEFINE_XEN_GUEST_HANDLE(callback_register_t);
+
+/*
+ * Unregister a callback.
+ *
+ * Not all callbacks can be unregistered. -EINVAL will be returned if
+ * you attempt to unregister such a callback.
+ */
+#define CALLBACKOP_unregister 1
+struct callback_unregister {
+ uint16_t type;
+ uint16_t _unused;
+};
+typedef struct callback_unregister callback_unregister_t;
+DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00030207
+#undef CALLBACKTYPE_sysenter
+#define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated
+#endif
+
+#endif /* __XEN_PUBLIC_CALLBACK_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/dom0_ops.h b/xen/public/dom0_ops.h
new file mode 100644
index 0000000..5d2b324
--- /dev/null
+++ b/xen/public/dom0_ops.h
@@ -0,0 +1,120 @@
+/******************************************************************************
+ * dom0_ops.h
+ *
+ * Process command requests from domain-0 guest OS.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2002-2003, B Dragovic
+ * Copyright (c) 2002-2006, K Fraser
+ */
+
+#ifndef __XEN_PUBLIC_DOM0_OPS_H__
+#define __XEN_PUBLIC_DOM0_OPS_H__
+
+#include "xen.h"
+#include "platform.h"
+
+#if __XEN_INTERFACE_VERSION__ >= 0x00030204
+#error "dom0_ops.h is a compatibility interface only"
+#endif
+
+#define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION
+
+#define DOM0_SETTIME XENPF_settime
+#define dom0_settime xenpf_settime
+#define dom0_settime_t xenpf_settime_t
+
+#define DOM0_ADD_MEMTYPE XENPF_add_memtype
+#define dom0_add_memtype xenpf_add_memtype
+#define dom0_add_memtype_t xenpf_add_memtype_t
+
+#define DOM0_DEL_MEMTYPE XENPF_del_memtype
+#define dom0_del_memtype xenpf_del_memtype
+#define dom0_del_memtype_t xenpf_del_memtype_t
+
+#define DOM0_READ_MEMTYPE XENPF_read_memtype
+#define dom0_read_memtype xenpf_read_memtype
+#define dom0_read_memtype_t xenpf_read_memtype_t
+
+#define DOM0_MICROCODE XENPF_microcode_update
+#define dom0_microcode xenpf_microcode_update
+#define dom0_microcode_t xenpf_microcode_update_t
+
+#define DOM0_PLATFORM_QUIRK XENPF_platform_quirk
+#define dom0_platform_quirk xenpf_platform_quirk
+#define dom0_platform_quirk_t xenpf_platform_quirk_t
+
+typedef uint64_t cpumap_t;
+
+/* Unsupported legacy operation -- defined for API compatibility. */
+#define DOM0_MSR 15
+struct dom0_msr {
+ /* IN variables. */
+ uint32_t write;
+ cpumap_t cpu_mask;
+ uint32_t msr;
+ uint32_t in1;
+ uint32_t in2;
+ /* OUT variables. */
+ uint32_t out1;
+ uint32_t out2;
+};
+typedef struct dom0_msr dom0_msr_t;
+DEFINE_XEN_GUEST_HANDLE(dom0_msr_t);
+
+/* Unsupported legacy operation -- defined for API compatibility. */
+#define DOM0_PHYSICAL_MEMORY_MAP 40
+struct dom0_memory_map_entry {
+ uint64_t start, end;
+ uint32_t flags; /* reserved */
+ uint8_t is_ram;
+};
+typedef struct dom0_memory_map_entry dom0_memory_map_entry_t;
+DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t);
+
+struct dom0_op {
+ uint32_t cmd;
+ uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
+ union {
+ struct dom0_msr msr;
+ struct dom0_settime settime;
+ struct dom0_add_memtype add_memtype;
+ struct dom0_del_memtype del_memtype;
+ struct dom0_read_memtype read_memtype;
+ struct dom0_microcode microcode;
+ struct dom0_platform_quirk platform_quirk;
+ struct dom0_memory_map_entry physical_memory_map;
+ uint8_t pad[128];
+ } u;
+};
+typedef struct dom0_op dom0_op_t;
+DEFINE_XEN_GUEST_HANDLE(dom0_op_t);
+
+#endif /* __XEN_PUBLIC_DOM0_OPS_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/domctl.h b/xen/public/domctl.h
new file mode 100644
index 0000000..b7075ac
--- /dev/null
+++ b/xen/public/domctl.h
@@ -0,0 +1,680 @@
+/******************************************************************************
+ * domctl.h
+ *
+ * Domain management operations. For use by node control stack.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2002-2003, B Dragovic
+ * Copyright (c) 2002-2006, K Fraser
+ */
+
+#ifndef __XEN_PUBLIC_DOMCTL_H__
+#define __XEN_PUBLIC_DOMCTL_H__
+
+#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
+#error "domctl operations are intended for use by node control tools only"
+#endif
+
+#include "xen.h"
+
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
+
+struct xenctl_cpumap {
+ XEN_GUEST_HANDLE_64(uint8) bitmap;
+ uint32_t nr_cpus;
+};
+
+/*
+ * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
+ * If it is specified as zero, an id is auto-allocated and returned.
+ */
+#define XEN_DOMCTL_createdomain 1
+struct xen_domctl_createdomain {
+ /* IN parameters */
+ uint32_t ssidref;
+ xen_domain_handle_t handle;
+ /* Is this an HVM guest (as opposed to a PV guest)? */
+#define _XEN_DOMCTL_CDF_hvm_guest 0
+#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
+ /* Use hardware-assisted paging if available? */
+#define _XEN_DOMCTL_CDF_hap 1
+#define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap)
+ uint32_t flags;
+};
+typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
+
+#define XEN_DOMCTL_destroydomain 2
+#define XEN_DOMCTL_pausedomain 3
+#define XEN_DOMCTL_unpausedomain 4
+#define XEN_DOMCTL_resumedomain 27
+
+#define XEN_DOMCTL_getdomaininfo 5
+struct xen_domctl_getdomaininfo {
+ /* OUT variables. */
+ domid_t domain; /* Also echoed in domctl.domain */
+ /* Domain is scheduled to die. */
+#define _XEN_DOMINF_dying 0
+#define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying)
+ /* Domain is an HVM guest (as opposed to a PV guest). */
+#define _XEN_DOMINF_hvm_guest 1
+#define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest)
+ /* The guest OS has shut down. */
+#define _XEN_DOMINF_shutdown 2
+#define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown)
+ /* Currently paused by control software. */
+#define _XEN_DOMINF_paused 3
+#define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused)
+ /* Currently blocked pending an event. */
+#define _XEN_DOMINF_blocked 4
+#define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked)
+ /* Domain is currently running. */
+#define _XEN_DOMINF_running 5
+#define XEN_DOMINF_running (1U<<_XEN_DOMINF_running)
+ /* Being debugged. */
+#define _XEN_DOMINF_debugged 6
+#define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged)
+ /* CPU to which this domain is bound. */
+#define XEN_DOMINF_cpumask 255
+#define XEN_DOMINF_cpushift 8
+ /* XEN_DOMINF_shutdown guest-supplied code. */
+#define XEN_DOMINF_shutdownmask 255
+#define XEN_DOMINF_shutdownshift 16
+ uint32_t flags; /* XEN_DOMINF_* */
+ uint64_aligned_t tot_pages;
+ uint64_aligned_t max_pages;
+ uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
+ uint64_aligned_t cpu_time;
+ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
+ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
+ uint32_t ssidref;
+ xen_domain_handle_t handle;
+};
+typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
+
+
+#define XEN_DOMCTL_getmemlist 6
+struct xen_domctl_getmemlist {
+ /* IN variables. */
+ /* Max entries to write to output buffer. */
+ uint64_aligned_t max_pfns;
+ /* Start index in guest's page list. */
+ uint64_aligned_t start_pfn;
+ XEN_GUEST_HANDLE_64(uint64) buffer;
+ /* OUT variables. */
+ uint64_aligned_t num_pfns;
+};
+typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
+
+
+#define XEN_DOMCTL_getpageframeinfo 7
+
+#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
+#define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28)
+#define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28)
+#define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28)
+#define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28)
+#define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28)
+#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
+#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
+#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */
+#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
+
+struct xen_domctl_getpageframeinfo {
+ /* IN variables. */
+ uint64_aligned_t gmfn; /* GMFN to query */
+ /* OUT variables. */
+ /* Is the page PINNED to a type? */
+ uint32_t type; /* see above type defs */
+};
+typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
+
+
+#define XEN_DOMCTL_getpageframeinfo2 8
+struct xen_domctl_getpageframeinfo2 {
+ /* IN variables. */
+ uint64_aligned_t num;
+ /* IN/OUT variables. */
+ XEN_GUEST_HANDLE_64(uint32) array;
+};
+typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
+
+
+/*
+ * Control shadow pagetables operation
+ */
+#define XEN_DOMCTL_shadow_op 10
+
+/* Disable shadow mode. */
+#define XEN_DOMCTL_SHADOW_OP_OFF 0
+
+/* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */
+#define XEN_DOMCTL_SHADOW_OP_ENABLE 32
+
+/* Log-dirty bitmap operations. */
+ /* Return the bitmap and clean internal copy for next round. */
+#define XEN_DOMCTL_SHADOW_OP_CLEAN 11
+ /* Return the bitmap but do not modify internal copy. */
+#define XEN_DOMCTL_SHADOW_OP_PEEK 12
+
+/* Memory allocation accessors. */
+#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30
+#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31
+
+/* Legacy enable operations. */
+ /* Equiv. to ENABLE with no mode flags. */
+#define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1
+ /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */
+#define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2
+ /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */
+#define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3
+
+/* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */
+ /*
+ * Shadow pagetables are refcounted: guest does not use explicit mmu
+ * operations nor write-protect its pagetables.
+ */
+#define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1)
+ /*
+ * Log pages in a bitmap as they are dirtied.
+ * Used for live relocation to determine which pages must be re-sent.
+ */
+#define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2)
+ /*
+ * Automatically translate GPFNs into MFNs.
+ */
+#define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3)
+ /*
+ * Xen does not steal virtual address space from the guest.
+ * Requires HVM support.
+ */
+#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4)
+
+struct xen_domctl_shadow_op_stats {
+ uint32_t fault_count;
+ uint32_t dirty_count;
+};
+typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
+
+struct xen_domctl_shadow_op {
+ /* IN variables. */
+ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */
+
+ /* OP_ENABLE */
+ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */
+
+ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
+ uint32_t mb; /* Shadow memory allocation in MB */
+
+ /* OP_PEEK / OP_CLEAN */
+ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
+ uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
+ struct xen_domctl_shadow_op_stats stats;
+};
+typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
+
+
+#define XEN_DOMCTL_max_mem 11
+struct xen_domctl_max_mem {
+ /* IN variables. */
+ uint64_aligned_t max_memkb;
+};
+typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
+
+
+#define XEN_DOMCTL_setvcpucontext 12
+#define XEN_DOMCTL_getvcpucontext 13
+struct xen_domctl_vcpucontext {
+ uint32_t vcpu; /* IN */
+ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
+};
+typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
+
+
+#define XEN_DOMCTL_getvcpuinfo 14
+struct xen_domctl_getvcpuinfo {
+ /* IN variables. */
+ uint32_t vcpu;
+ /* OUT variables. */
+ uint8_t online; /* currently online (not hotplugged)? */
+ uint8_t blocked; /* blocked waiting for an event? */
+ uint8_t running; /* currently scheduled on its CPU? */
+ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */
+ uint32_t cpu; /* current mapping */
+};
+typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
+
+
+/* Get/set which physical cpus a vcpu can execute on. */
+#define XEN_DOMCTL_setvcpuaffinity 9
+#define XEN_DOMCTL_getvcpuaffinity 25
+struct xen_domctl_vcpuaffinity {
+ uint32_t vcpu; /* IN */
+ struct xenctl_cpumap cpumap; /* IN/OUT */
+};
+typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
+
+
+#define XEN_DOMCTL_max_vcpus 15
+struct xen_domctl_max_vcpus {
+ uint32_t max; /* maximum number of vcpus */
+};
+typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
+
+
+#define XEN_DOMCTL_scheduler_op 16
+/* Scheduler types. */
+#define XEN_SCHEDULER_SEDF 4
+#define XEN_SCHEDULER_CREDIT 5
+/* Set or get info? */
+#define XEN_DOMCTL_SCHEDOP_putinfo 0
+#define XEN_DOMCTL_SCHEDOP_getinfo 1
+struct xen_domctl_scheduler_op {
+ uint32_t sched_id; /* XEN_SCHEDULER_* */
+ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
+ union {
+ struct xen_domctl_sched_sedf {
+ uint64_aligned_t period;
+ uint64_aligned_t slice;
+ uint64_aligned_t latency;
+ uint32_t extratime;
+ uint32_t weight;
+ } sedf;
+ struct xen_domctl_sched_credit {
+ uint16_t weight;
+ uint16_t cap;
+ } credit;
+ } u;
+};
+typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
+
+
+#define XEN_DOMCTL_setdomainhandle 17
+struct xen_domctl_setdomainhandle {
+ xen_domain_handle_t handle;
+};
+typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
+
+
+#define XEN_DOMCTL_setdebugging 18
+struct xen_domctl_setdebugging {
+ uint8_t enable;
+};
+typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
+
+
+#define XEN_DOMCTL_irq_permission 19
+struct xen_domctl_irq_permission {
+ uint8_t pirq;
+ uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
+};
+typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
+
+
+#define XEN_DOMCTL_iomem_permission 20
+struct xen_domctl_iomem_permission {
+ uint64_aligned_t first_mfn;/* first page (physical page number) in range */
+ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
+ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */
+};
+typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
+
+
+#define XEN_DOMCTL_ioport_permission 21
+struct xen_domctl_ioport_permission {
+ uint32_t first_port; /* first port int range */
+ uint32_t nr_ports; /* size of port range */
+ uint8_t allow_access; /* allow or deny access to range? */
+};
+typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
+
+
+#define XEN_DOMCTL_hypercall_init 22
+struct xen_domctl_hypercall_init {
+ uint64_aligned_t gmfn; /* GMFN to be initialised */
+};
+typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
+
+
+#define XEN_DOMCTL_arch_setup 23
+#define _XEN_DOMAINSETUP_hvm_guest 0
+#define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest)
+#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */
+#define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query)
+#define _XEN_DOMAINSETUP_sioemu_guest 2
+#define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest)
+typedef struct xen_domctl_arch_setup {
+ uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */
+#ifdef __ia64__
+ uint64_aligned_t bp; /* mpaddr of boot param area */
+ uint64_aligned_t maxmem; /* Highest memory address for MDT. */
+ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */
+ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */
+ int8_t vhpt_size_log2; /* Log2 of VHPT size. */
+#endif
+} xen_domctl_arch_setup_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
+
+
+#define XEN_DOMCTL_settimeoffset 24
+struct xen_domctl_settimeoffset {
+ int32_t time_offset_seconds; /* applied to domain wallclock time */
+};
+typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
+
+
+#define XEN_DOMCTL_gethvmcontext 33
+#define XEN_DOMCTL_sethvmcontext 34
+typedef struct xen_domctl_hvmcontext {
+ uint32_t size; /* IN/OUT: size of buffer / bytes filled */
+ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call
+ * gethvmcontext with NULL
+ * buffer to get size req'd */
+} xen_domctl_hvmcontext_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
+
+
+#define XEN_DOMCTL_set_address_size 35
+#define XEN_DOMCTL_get_address_size 36
+typedef struct xen_domctl_address_size {
+ uint32_t size;
+} xen_domctl_address_size_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
+
+
+#define XEN_DOMCTL_real_mode_area 26
+struct xen_domctl_real_mode_area {
+ uint32_t log; /* log2 of Real Mode Area size */
+};
+typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
+
+
+#define XEN_DOMCTL_sendtrigger 28
+#define XEN_DOMCTL_SENDTRIGGER_NMI 0
+#define XEN_DOMCTL_SENDTRIGGER_RESET 1
+#define XEN_DOMCTL_SENDTRIGGER_INIT 2
+struct xen_domctl_sendtrigger {
+ uint32_t trigger; /* IN */
+ uint32_t vcpu; /* IN */
+};
+typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
+
+
+/* Assign PCI device to HVM guest. Sets up IOMMU structures. */
+#define XEN_DOMCTL_assign_device 37
+#define XEN_DOMCTL_test_assign_device 45
+#define XEN_DOMCTL_deassign_device 47
+struct xen_domctl_assign_device {
+ uint32_t machine_bdf; /* machine PCI ID of assigned device */
+};
+typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
+
+/* Retrieve sibling devices infomation of machine_bdf */
+#define XEN_DOMCTL_get_device_group 50
+struct xen_domctl_get_device_group {
+ uint32_t machine_bdf; /* IN */
+ uint32_t max_sdevs; /* IN */
+ uint32_t num_sdevs; /* OUT */
+ XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */
+};
+typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t);
+
+/* Pass-through interrupts: bind real irq -> hvm devfn. */
+#define XEN_DOMCTL_bind_pt_irq 38
+#define XEN_DOMCTL_unbind_pt_irq 48
+typedef enum pt_irq_type_e {
+ PT_IRQ_TYPE_PCI,
+ PT_IRQ_TYPE_ISA,
+ PT_IRQ_TYPE_MSI,
+} pt_irq_type_t;
+struct xen_domctl_bind_pt_irq {
+ uint32_t machine_irq;
+ pt_irq_type_t irq_type;
+ uint32_t hvm_domid;
+
+ union {
+ struct {
+ uint8_t isa_irq;
+ } isa;
+ struct {
+ uint8_t bus;
+ uint8_t device;
+ uint8_t intx;
+ } pci;
+ struct {
+ uint8_t gvec;
+ uint32_t gflags;
+ } msi;
+ } u;
+};
+typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
+
+
+/* Bind machine I/O address range -> HVM address range. */
+#define XEN_DOMCTL_memory_mapping 39
+#define DPCI_ADD_MAPPING 1
+#define DPCI_REMOVE_MAPPING 0
+struct xen_domctl_memory_mapping {
+ uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
+ uint64_aligned_t first_mfn; /* first page (machine page) in range */
+ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
+ uint32_t add_mapping; /* add or remove mapping */
+ uint32_t padding; /* padding for 64-bit aligned structure */
+};
+typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t);
+
+
+/* Bind machine I/O port range -> HVM I/O port range. */
+#define XEN_DOMCTL_ioport_mapping 40
+struct xen_domctl_ioport_mapping {
+ uint32_t first_gport; /* first guest IO port*/
+ uint32_t first_mport; /* first machine IO port */
+ uint32_t nr_ports; /* size of port range */
+ uint32_t add_mapping; /* add or remove mapping */
+};
+typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
+
+
+/*
+ * Pin caching type of RAM space for x86 HVM domU.
+ */
+#define XEN_DOMCTL_pin_mem_cacheattr 41
+/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
+#define XEN_DOMCTL_MEM_CACHEATTR_UC 0
+#define XEN_DOMCTL_MEM_CACHEATTR_WC 1
+#define XEN_DOMCTL_MEM_CACHEATTR_WT 4
+#define XEN_DOMCTL_MEM_CACHEATTR_WP 5
+#define XEN_DOMCTL_MEM_CACHEATTR_WB 6
+#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7
+struct xen_domctl_pin_mem_cacheattr {
+ uint64_aligned_t start, end;
+ unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
+};
+typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
+
+
+#define XEN_DOMCTL_set_ext_vcpucontext 42
+#define XEN_DOMCTL_get_ext_vcpucontext 43
+struct xen_domctl_ext_vcpucontext {
+ /* IN: VCPU that this call applies to. */
+ uint32_t vcpu;
+ /*
+ * SET: Size of struct (IN)
+ * GET: Size of struct (OUT)
+ */
+ uint32_t size;
+#if defined(__i386__) || defined(__x86_64__)
+ /* SYSCALL from 32-bit mode and SYSENTER callback information. */
+ /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */
+ uint64_aligned_t syscall32_callback_eip;
+ uint64_aligned_t sysenter_callback_eip;
+ uint16_t syscall32_callback_cs;
+ uint16_t sysenter_callback_cs;
+ uint8_t syscall32_disables_events;
+ uint8_t sysenter_disables_events;
+#endif
+};
+typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
+
+/*
+ * Set optimizaton features for a domain
+ */
+#define XEN_DOMCTL_set_opt_feature 44
+struct xen_domctl_set_opt_feature {
+#if defined(__ia64__)
+ struct xen_ia64_opt_feature optf;
+#else
+ /* Make struct non-empty: do not depend on this field name! */
+ uint64_t dummy;
+#endif
+};
+typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t);
+
+/*
+ * Set the target domain for a domain
+ */
+#define XEN_DOMCTL_set_target 46
+struct xen_domctl_set_target {
+ domid_t target;
+};
+typedef struct xen_domctl_set_target xen_domctl_set_target_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t);
+
+#if defined(__i386__) || defined(__x86_64__)
+# define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF
+# define XEN_DOMCTL_set_cpuid 49
+struct xen_domctl_cpuid {
+ unsigned int input[2];
+ unsigned int eax;
+ unsigned int ebx;
+ unsigned int ecx;
+ unsigned int edx;
+};
+typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
+#endif
+
+#define XEN_DOMCTL_subscribe 29
+struct xen_domctl_subscribe {
+ uint32_t port; /* IN */
+};
+typedef struct xen_domctl_subscribe xen_domctl_subscribe_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t);
+
+/*
+ * Define the maximum machine address size which should be allocated
+ * to a guest.
+ */
+#define XEN_DOMCTL_set_machine_address_size 51
+#define XEN_DOMCTL_get_machine_address_size 52
+
+/*
+ * Do not inject spurious page faults into this domain.
+ */
+#define XEN_DOMCTL_suppress_spurious_page_faults 53
+
+struct xen_domctl {
+ uint32_t cmd;
+ uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
+ domid_t domain;
+ union {
+ struct xen_domctl_createdomain createdomain;
+ struct xen_domctl_getdomaininfo getdomaininfo;
+ struct xen_domctl_getmemlist getmemlist;
+ struct xen_domctl_getpageframeinfo getpageframeinfo;
+ struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
+ struct xen_domctl_vcpuaffinity vcpuaffinity;
+ struct xen_domctl_shadow_op shadow_op;
+ struct xen_domctl_max_mem max_mem;
+ struct xen_domctl_vcpucontext vcpucontext;
+ struct xen_domctl_getvcpuinfo getvcpuinfo;
+ struct xen_domctl_max_vcpus max_vcpus;
+ struct xen_domctl_scheduler_op scheduler_op;
+ struct xen_domctl_setdomainhandle setdomainhandle;
+ struct xen_domctl_setdebugging setdebugging;
+ struct xen_domctl_irq_permission irq_permission;
+ struct xen_domctl_iomem_permission iomem_permission;
+ struct xen_domctl_ioport_permission ioport_permission;
+ struct xen_domctl_hypercall_init hypercall_init;
+ struct xen_domctl_arch_setup arch_setup;
+ struct xen_domctl_settimeoffset settimeoffset;
+ struct xen_domctl_real_mode_area real_mode_area;
+ struct xen_domctl_hvmcontext hvmcontext;
+ struct xen_domctl_address_size address_size;
+ struct xen_domctl_sendtrigger sendtrigger;
+ struct xen_domctl_get_device_group get_device_group;
+ struct xen_domctl_assign_device assign_device;
+ struct xen_domctl_bind_pt_irq bind_pt_irq;
+ struct xen_domctl_memory_mapping memory_mapping;
+ struct xen_domctl_ioport_mapping ioport_mapping;
+ struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
+ struct xen_domctl_ext_vcpucontext ext_vcpucontext;
+ struct xen_domctl_set_opt_feature set_opt_feature;
+ struct xen_domctl_set_target set_target;
+ struct xen_domctl_subscribe subscribe;
+#if defined(__i386__) || defined(__x86_64__)
+ struct xen_domctl_cpuid cpuid;
+#endif
+ uint8_t pad[128];
+ } u;
+};
+typedef struct xen_domctl xen_domctl_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
+
+#endif /* __XEN_PUBLIC_DOMCTL_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/elfnote.h b/xen/public/elfnote.h
new file mode 100644
index 0000000..77be41b
--- /dev/null
+++ b/xen/public/elfnote.h
@@ -0,0 +1,233 @@
+/******************************************************************************
+ * elfnote.h
+ *
+ * Definitions used for the Xen ELF notes.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
+ */
+
+#ifndef __XEN_PUBLIC_ELFNOTE_H__
+#define __XEN_PUBLIC_ELFNOTE_H__
+
+/*
+ * The notes should live in a PT_NOTE segment and have "Xen" in the
+ * name field.
+ *
+ * Numeric types are either 4 or 8 bytes depending on the content of
+ * the desc field.
+ *
+ * LEGACY indicated the fields in the legacy __xen_guest string which
+ * this a note type replaces.
+ */
+
+/*
+ * NAME=VALUE pair (string).
+ */
+#define XEN_ELFNOTE_INFO 0
+
+/*
+ * The virtual address of the entry point (numeric).
+ *
+ * LEGACY: VIRT_ENTRY
+ */
+#define XEN_ELFNOTE_ENTRY 1
+
+/* The virtual address of the hypercall transfer page (numeric).
+ *
+ * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page
+ * number not a virtual address)
+ */
+#define XEN_ELFNOTE_HYPERCALL_PAGE 2
+
+/* The virtual address where the kernel image should be mapped (numeric).
+ *
+ * Defaults to 0.
+ *
+ * LEGACY: VIRT_BASE
+ */
+#define XEN_ELFNOTE_VIRT_BASE 3
+
+/*
+ * The offset of the ELF paddr field from the acutal required
+ * psuedo-physical address (numeric).
+ *
+ * This is used to maintain backwards compatibility with older kernels
+ * which wrote __PAGE_OFFSET into that field. This field defaults to 0
+ * if not present.
+ *
+ * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE)
+ */
+#define XEN_ELFNOTE_PADDR_OFFSET 4
+
+/*
+ * The version of Xen that we work with (string).
+ *
+ * LEGACY: XEN_VER
+ */
+#define XEN_ELFNOTE_XEN_VERSION 5
+
+/*
+ * The name of the guest operating system (string).
+ *
+ * LEGACY: GUEST_OS
+ */
+#define XEN_ELFNOTE_GUEST_OS 6
+
+/*
+ * The version of the guest operating system (string).
+ *
+ * LEGACY: GUEST_VER
+ */
+#define XEN_ELFNOTE_GUEST_VERSION 7
+
+/*
+ * The loader type (string).
+ *
+ * LEGACY: LOADER
+ */
+#define XEN_ELFNOTE_LOADER 8
+
+/*
+ * The kernel supports PAE (x86/32 only, string = "yes", "no" or
+ * "bimodal").
+ *
+ * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
+ * may be given as "yes,bimodal" which will cause older Xen to treat
+ * this kernel as PAE.
+ *
+ * LEGACY: PAE (n.b. The legacy interface included a provision to
+ * indicate 'extended-cr3' support allowing L3 page tables to be
+ * placed above 4G. It is assumed that any kernel new enough to use
+ * these ELF notes will include this and therefore "yes" here is
+ * equivalent to "yes[entended-cr3]" in the __xen_guest interface.
+ */
+#define XEN_ELFNOTE_PAE_MODE 9
+
+/*
+ * The features supported/required by this kernel (string).
+ *
+ * The string must consist of a list of feature names (as given in
+ * features.h, without the "XENFEAT_" prefix) separated by '|'
+ * characters. If a feature is required for the kernel to function
+ * then the feature name must be preceded by a '!' character.
+ *
+ * LEGACY: FEATURES
+ */
+#define XEN_ELFNOTE_FEATURES 10
+
+/*
+ * The kernel requires the symbol table to be loaded (string = "yes" or "no")
+ * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence
+ * of this string as a boolean flag rather than requiring "yes" or
+ * "no".
+ */
+#define XEN_ELFNOTE_BSD_SYMTAB 11
+
+/*
+ * The lowest address the hypervisor hole can begin at (numeric).
+ *
+ * This must not be set higher than HYPERVISOR_VIRT_START. Its presence
+ * also indicates to the hypervisor that the kernel can deal with the
+ * hole starting at a higher address.
+ */
+#define XEN_ELFNOTE_HV_START_LOW 12
+
+/*
+ * List of maddr_t-sized mask/value pairs describing how to recognize
+ * (non-present) L1 page table entries carrying valid MFNs (numeric).
+ */
+#define XEN_ELFNOTE_L1_MFN_VALID 13
+
+/*
+ * Whether or not the guest supports cooperative suspend cancellation.
+ */
+#define XEN_ELFNOTE_SUSPEND_CANCEL 14
+
+/*
+ * The number of the highest elfnote defined.
+ */
+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL
+
+/*
+ * System information exported through crash notes.
+ *
+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
+ * note in case of a system crash. This note will contain various
+ * information about the system, see xen/include/xen/elfcore.h.
+ */
+#define XEN_ELFNOTE_CRASH_INFO 0x1000001
+
+/*
+ * System registers exported through crash notes.
+ *
+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
+ * note per cpu in case of a system crash. This note is architecture
+ * specific and will contain registers not saved in the "CORE" note.
+ * See xen/include/xen/elfcore.h for more information.
+ */
+#define XEN_ELFNOTE_CRASH_REGS 0x1000002
+
+
+/*
+ * xen dump-core none note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
+ * in its dump file to indicate that the file is xen dump-core
+ * file. This note doesn't have any other information.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000
+
+/*
+ * xen dump-core header note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
+ * in its dump file.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001
+
+/*
+ * xen dump-core xen version note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
+ * in its dump file. It contains the xen version obtained via the
+ * XENVER hypercall.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002
+
+/*
+ * xen dump-core format version note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
+ * in its dump file. It contains a format version identifier.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003
+
+#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/elfstructs.h b/xen/public/elfstructs.h
new file mode 100644
index 0000000..dc71e2d
--- /dev/null
+++ b/xen/public/elfstructs.h
@@ -0,0 +1,526 @@
+#ifndef __XEN_PUBLIC_ELFSTRUCTS_H__
+#define __XEN_PUBLIC_ELFSTRUCTS_H__ 1
+/*
+ * Copyright (c) 1995, 1996 Erik Theisen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+typedef uint8_t Elf_Byte;
+
+typedef uint32_t Elf32_Addr; /* Unsigned program address */
+typedef uint32_t Elf32_Off; /* Unsigned file offset */
+typedef int32_t Elf32_Sword; /* Signed large integer */
+typedef uint32_t Elf32_Word; /* Unsigned large integer */
+typedef uint16_t Elf32_Half; /* Unsigned medium integer */
+
+typedef uint64_t Elf64_Addr;
+typedef uint64_t Elf64_Off;
+typedef int32_t Elf64_Shalf;
+
+typedef int32_t Elf64_Sword;
+typedef uint32_t Elf64_Word;
+
+typedef int64_t Elf64_Sxword;
+typedef uint64_t Elf64_Xword;
+
+typedef uint16_t Elf64_Half;
+
+/*
+ * e_ident[] identification indexes
+ * See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html
+ */
+#define EI_MAG0 0 /* file ID */
+#define EI_MAG1 1 /* file ID */
+#define EI_MAG2 2 /* file ID */
+#define EI_MAG3 3 /* file ID */
+#define EI_CLASS 4 /* file class */
+#define EI_DATA 5 /* data encoding */
+#define EI_VERSION 6 /* ELF header version */
+#define EI_OSABI 7 /* OS/ABI ID */
+#define EI_ABIVERSION 8 /* ABI version */
+#define EI_PAD 9 /* start of pad bytes */
+#define EI_NIDENT 16 /* Size of e_ident[] */
+
+/* e_ident[] magic number */
+#define ELFMAG0 0x7f /* e_ident[EI_MAG0] */
+#define ELFMAG1 'E' /* e_ident[EI_MAG1] */
+#define ELFMAG2 'L' /* e_ident[EI_MAG2] */
+#define ELFMAG3 'F' /* e_ident[EI_MAG3] */
+#define ELFMAG "\177ELF" /* magic */
+#define SELFMAG 4 /* size of magic */
+
+/* e_ident[] file class */
+#define ELFCLASSNONE 0 /* invalid */
+#define ELFCLASS32 1 /* 32-bit objs */
+#define ELFCLASS64 2 /* 64-bit objs */
+#define ELFCLASSNUM 3 /* number of classes */
+
+/* e_ident[] data encoding */
+#define ELFDATANONE 0 /* invalid */
+#define ELFDATA2LSB 1 /* Little-Endian */
+#define ELFDATA2MSB 2 /* Big-Endian */
+#define ELFDATANUM 3 /* number of data encode defines */
+
+/* e_ident[] Operating System/ABI */
+#define ELFOSABI_SYSV 0 /* UNIX System V ABI */
+#define ELFOSABI_HPUX 1 /* HP-UX operating system */
+#define ELFOSABI_NETBSD 2 /* NetBSD */
+#define ELFOSABI_LINUX 3 /* GNU/Linux */
+#define ELFOSABI_HURD 4 /* GNU/Hurd */
+#define ELFOSABI_86OPEN 5 /* 86Open common IA32 ABI */
+#define ELFOSABI_SOLARIS 6 /* Solaris */
+#define ELFOSABI_MONTEREY 7 /* Monterey */
+#define ELFOSABI_IRIX 8 /* IRIX */
+#define ELFOSABI_FREEBSD 9 /* FreeBSD */
+#define ELFOSABI_TRU64 10 /* TRU64 UNIX */
+#define ELFOSABI_MODESTO 11 /* Novell Modesto */
+#define ELFOSABI_OPENBSD 12 /* OpenBSD */
+#define ELFOSABI_ARM 97 /* ARM */
+#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
+
+/* e_ident */
+#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
+ (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
+ (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
+ (ehdr).e_ident[EI_MAG3] == ELFMAG3)
+
+/* ELF Header */
+typedef struct elfhdr {
+ unsigned char e_ident[EI_NIDENT]; /* ELF Identification */
+ Elf32_Half e_type; /* object file type */
+ Elf32_Half e_machine; /* machine */
+ Elf32_Word e_version; /* object file version */
+ Elf32_Addr e_entry; /* virtual entry point */
+ Elf32_Off e_phoff; /* program header table offset */
+ Elf32_Off e_shoff; /* section header table offset */
+ Elf32_Word e_flags; /* processor-specific flags */
+ Elf32_Half e_ehsize; /* ELF header size */
+ Elf32_Half e_phentsize; /* program header entry size */
+ Elf32_Half e_phnum; /* number of program header entries */
+ Elf32_Half e_shentsize; /* section header entry size */
+ Elf32_Half e_shnum; /* number of section header entries */
+ Elf32_Half e_shstrndx; /* section header table's "section
+ header string table" entry offset */
+} Elf32_Ehdr;
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT]; /* Id bytes */
+ Elf64_Half e_type; /* file type */
+ Elf64_Half e_machine; /* machine type */
+ Elf64_Word e_version; /* version number */
+ Elf64_Addr e_entry; /* entry point */
+ Elf64_Off e_phoff; /* Program hdr offset */
+ Elf64_Off e_shoff; /* Section hdr offset */
+ Elf64_Word e_flags; /* Processor flags */
+ Elf64_Half e_ehsize; /* sizeof ehdr */
+ Elf64_Half e_phentsize; /* Program header entry size */
+ Elf64_Half e_phnum; /* Number of program headers */
+ Elf64_Half e_shentsize; /* Section header entry size */
+ Elf64_Half e_shnum; /* Number of section headers */
+ Elf64_Half e_shstrndx; /* String table index */
+} Elf64_Ehdr;
+
+/* e_type */
+#define ET_NONE 0 /* No file type */
+#define ET_REL 1 /* relocatable file */
+#define ET_EXEC 2 /* executable file */
+#define ET_DYN 3 /* shared object file */
+#define ET_CORE 4 /* core file */
+#define ET_NUM 5 /* number of types */
+#define ET_LOPROC 0xff00 /* reserved range for processor */
+#define ET_HIPROC 0xffff /* specific e_type */
+
+/* e_machine */
+#define EM_NONE 0 /* No Machine */
+#define EM_M32 1 /* AT&T WE 32100 */
+#define EM_SPARC 2 /* SPARC */
+#define EM_386 3 /* Intel 80386 */
+#define EM_68K 4 /* Motorola 68000 */
+#define EM_88K 5 /* Motorola 88000 */
+#define EM_486 6 /* Intel 80486 - unused? */
+#define EM_860 7 /* Intel 80860 */
+#define EM_MIPS 8 /* MIPS R3000 Big-Endian only */
+/*
+ * Don't know if EM_MIPS_RS4_BE,
+ * EM_SPARC64, EM_PARISC,
+ * or EM_PPC are ABI compliant
+ */
+#define EM_MIPS_RS4_BE 10 /* MIPS R4000 Big-Endian */
+#define EM_SPARC64 11 /* SPARC v9 64-bit unoffical */
+#define EM_PARISC 15 /* HPPA */
+#define EM_SPARC32PLUS 18 /* Enhanced instruction set SPARC */
+#define EM_PPC 20 /* PowerPC */
+#define EM_PPC64 21 /* PowerPC 64-bit */
+#define EM_ARM 40 /* Advanced RISC Machines ARM */
+#define EM_ALPHA 41 /* DEC ALPHA */
+#define EM_SPARCV9 43 /* SPARC version 9 */
+#define EM_ALPHA_EXP 0x9026 /* DEC ALPHA */
+#define EM_IA_64 50 /* Intel Merced */
+#define EM_X86_64 62 /* AMD x86-64 architecture */
+#define EM_VAX 75 /* DEC VAX */
+
+/* Version */
+#define EV_NONE 0 /* Invalid */
+#define EV_CURRENT 1 /* Current */
+#define EV_NUM 2 /* number of versions */
+
+/* Section Header */
+typedef struct {
+ Elf32_Word sh_name; /* name - index into section header
+ string table section */
+ Elf32_Word sh_type; /* type */
+ Elf32_Word sh_flags; /* flags */
+ Elf32_Addr sh_addr; /* address */
+ Elf32_Off sh_offset; /* file offset */
+ Elf32_Word sh_size; /* section size */
+ Elf32_Word sh_link; /* section header table index link */
+ Elf32_Word sh_info; /* extra information */
+ Elf32_Word sh_addralign; /* address alignment */
+ Elf32_Word sh_entsize; /* section entry size */
+} Elf32_Shdr;
+
+typedef struct {
+ Elf64_Word sh_name; /* section name */
+ Elf64_Word sh_type; /* section type */
+ Elf64_Xword sh_flags; /* section flags */
+ Elf64_Addr sh_addr; /* virtual address */
+ Elf64_Off sh_offset; /* file offset */
+ Elf64_Xword sh_size; /* section size */
+ Elf64_Word sh_link; /* link to another */
+ Elf64_Word sh_info; /* misc info */
+ Elf64_Xword sh_addralign; /* memory alignment */
+ Elf64_Xword sh_entsize; /* table entry size */
+} Elf64_Shdr;
+
+/* Special Section Indexes */
+#define SHN_UNDEF 0 /* undefined */
+#define SHN_LORESERVE 0xff00 /* lower bounds of reserved indexes */
+#define SHN_LOPROC 0xff00 /* reserved range for processor */
+#define SHN_HIPROC 0xff1f /* specific section indexes */
+#define SHN_ABS 0xfff1 /* absolute value */
+#define SHN_COMMON 0xfff2 /* common symbol */
+#define SHN_HIRESERVE 0xffff /* upper bounds of reserved indexes */
+
+/* sh_type */
+#define SHT_NULL 0 /* inactive */
+#define SHT_PROGBITS 1 /* program defined information */
+#define SHT_SYMTAB 2 /* symbol table section */
+#define SHT_STRTAB 3 /* string table section */
+#define SHT_RELA 4 /* relocation section with addends*/
+#define SHT_HASH 5 /* symbol hash table section */
+#define SHT_DYNAMIC 6 /* dynamic section */
+#define SHT_NOTE 7 /* note section */
+#define SHT_NOBITS 8 /* no space section */
+#define SHT_REL 9 /* relation section without addends */
+#define SHT_SHLIB 10 /* reserved - purpose unknown */
+#define SHT_DYNSYM 11 /* dynamic symbol table section */
+#define SHT_NUM 12 /* number of section types */
+#define SHT_LOPROC 0x70000000 /* reserved range for processor */
+#define SHT_HIPROC 0x7fffffff /* specific section header types */
+#define SHT_LOUSER 0x80000000 /* reserved range for application */
+#define SHT_HIUSER 0xffffffff /* specific indexes */
+
+/* Section names */
+#define ELF_BSS ".bss" /* uninitialized data */
+#define ELF_DATA ".data" /* initialized data */
+#define ELF_DEBUG ".debug" /* debug */
+#define ELF_DYNAMIC ".dynamic" /* dynamic linking information */
+#define ELF_DYNSTR ".dynstr" /* dynamic string table */
+#define ELF_DYNSYM ".dynsym" /* dynamic symbol table */
+#define ELF_FINI ".fini" /* termination code */
+#define ELF_GOT ".got" /* global offset table */
+#define ELF_HASH ".hash" /* symbol hash table */
+#define ELF_INIT ".init" /* initialization code */
+#define ELF_REL_DATA ".rel.data" /* relocation data */
+#define ELF_REL_FINI ".rel.fini" /* relocation termination code */
+#define ELF_REL_INIT ".rel.init" /* relocation initialization code */
+#define ELF_REL_DYN ".rel.dyn" /* relocaltion dynamic link info */
+#define ELF_REL_RODATA ".rel.rodata" /* relocation read-only data */
+#define ELF_REL_TEXT ".rel.text" /* relocation code */
+#define ELF_RODATA ".rodata" /* read-only data */
+#define ELF_SHSTRTAB ".shstrtab" /* section header string table */
+#define ELF_STRTAB ".strtab" /* string table */
+#define ELF_SYMTAB ".symtab" /* symbol table */
+#define ELF_TEXT ".text" /* code */
+
+
+/* Section Attribute Flags - sh_flags */
+#define SHF_WRITE 0x1 /* Writable */
+#define SHF_ALLOC 0x2 /* occupies memory */
+#define SHF_EXECINSTR 0x4 /* executable */
+#define SHF_MASKPROC 0xf0000000 /* reserved bits for processor */
+ /* specific section attributes */
+
+/* Symbol Table Entry */
+typedef struct elf32_sym {
+ Elf32_Word st_name; /* name - index into string table */
+ Elf32_Addr st_value; /* symbol value */
+ Elf32_Word st_size; /* symbol size */
+ unsigned char st_info; /* type and binding */
+ unsigned char st_other; /* 0 - no defined meaning */
+ Elf32_Half st_shndx; /* section header index */
+} Elf32_Sym;
+
+typedef struct {
+ Elf64_Word st_name; /* Symbol name index in str table */
+ Elf_Byte st_info; /* type / binding attrs */
+ Elf_Byte st_other; /* unused */
+ Elf64_Half st_shndx; /* section index of symbol */
+ Elf64_Xword st_value; /* value of symbol */
+ Elf64_Xword st_size; /* size of symbol */
+} Elf64_Sym;
+
+/* Symbol table index */
+#define STN_UNDEF 0 /* undefined */
+
+/* Extract symbol info - st_info */
+#define ELF32_ST_BIND(x) ((x) >> 4)
+#define ELF32_ST_TYPE(x) (((unsigned int) x) & 0xf)
+#define ELF32_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf))
+
+#define ELF64_ST_BIND(x) ((x) >> 4)
+#define ELF64_ST_TYPE(x) (((unsigned int) x) & 0xf)
+#define ELF64_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf))
+
+/* Symbol Binding - ELF32_ST_BIND - st_info */
+#define STB_LOCAL 0 /* Local symbol */
+#define STB_GLOBAL 1 /* Global symbol */
+#define STB_WEAK 2 /* like global - lower precedence */
+#define STB_NUM 3 /* number of symbol bindings */
+#define STB_LOPROC 13 /* reserved range for processor */
+#define STB_HIPROC 15 /* specific symbol bindings */
+
+/* Symbol type - ELF32_ST_TYPE - st_info */
+#define STT_NOTYPE 0 /* not specified */
+#define STT_OBJECT 1 /* data object */
+#define STT_FUNC 2 /* function */
+#define STT_SECTION 3 /* section */
+#define STT_FILE 4 /* file */
+#define STT_NUM 5 /* number of symbol types */
+#define STT_LOPROC 13 /* reserved range for processor */
+#define STT_HIPROC 15 /* specific symbol types */
+
+/* Relocation entry with implicit addend */
+typedef struct {
+ Elf32_Addr r_offset; /* offset of relocation */
+ Elf32_Word r_info; /* symbol table index and type */
+} Elf32_Rel;
+
+/* Relocation entry with explicit addend */
+typedef struct {
+ Elf32_Addr r_offset; /* offset of relocation */
+ Elf32_Word r_info; /* symbol table index and type */
+ Elf32_Sword r_addend;
+} Elf32_Rela;
+
+/* Extract relocation info - r_info */
+#define ELF32_R_SYM(i) ((i) >> 8)
+#define ELF32_R_TYPE(i) ((unsigned char) (i))
+#define ELF32_R_INFO(s,t) (((s) << 8) + (unsigned char)(t))
+
+typedef struct {
+ Elf64_Xword r_offset; /* where to do it */
+ Elf64_Xword r_info; /* index & type of relocation */
+} Elf64_Rel;
+
+typedef struct {
+ Elf64_Xword r_offset; /* where to do it */
+ Elf64_Xword r_info; /* index & type of relocation */
+ Elf64_Sxword r_addend; /* adjustment value */
+} Elf64_Rela;
+
+#define ELF64_R_SYM(info) ((info) >> 32)
+#define ELF64_R_TYPE(info) ((info) & 0xFFFFFFFF)
+#define ELF64_R_INFO(s,t) (((s) << 32) + (uint32_t)(t))
+
+/* Program Header */
+typedef struct {
+ Elf32_Word p_type; /* segment type */
+ Elf32_Off p_offset; /* segment offset */
+ Elf32_Addr p_vaddr; /* virtual address of segment */
+ Elf32_Addr p_paddr; /* physical address - ignored? */
+ Elf32_Word p_filesz; /* number of bytes in file for seg. */
+ Elf32_Word p_memsz; /* number of bytes in mem. for seg. */
+ Elf32_Word p_flags; /* flags */
+ Elf32_Word p_align; /* memory alignment */
+} Elf32_Phdr;
+
+typedef struct {
+ Elf64_Word p_type; /* entry type */
+ Elf64_Word p_flags; /* flags */
+ Elf64_Off p_offset; /* offset */
+ Elf64_Addr p_vaddr; /* virtual address */
+ Elf64_Addr p_paddr; /* physical address */
+ Elf64_Xword p_filesz; /* file size */
+ Elf64_Xword p_memsz; /* memory size */
+ Elf64_Xword p_align; /* memory & file alignment */
+} Elf64_Phdr;
+
+/* Segment types - p_type */
+#define PT_NULL 0 /* unused */
+#define PT_LOAD 1 /* loadable segment */
+#define PT_DYNAMIC 2 /* dynamic linking section */
+#define PT_INTERP 3 /* the RTLD */
+#define PT_NOTE 4 /* auxiliary information */
+#define PT_SHLIB 5 /* reserved - purpose undefined */
+#define PT_PHDR 6 /* program header */
+#define PT_NUM 7 /* Number of segment types */
+#define PT_LOPROC 0x70000000 /* reserved range for processor */
+#define PT_HIPROC 0x7fffffff /* specific segment types */
+
+/* Segment flags - p_flags */
+#define PF_X 0x1 /* Executable */
+#define PF_W 0x2 /* Writable */
+#define PF_R 0x4 /* Readable */
+#define PF_MASKPROC 0xf0000000 /* reserved bits for processor */
+ /* specific segment flags */
+
+/* Dynamic structure */
+typedef struct {
+ Elf32_Sword d_tag; /* controls meaning of d_val */
+ union {
+ Elf32_Word d_val; /* Multiple meanings - see d_tag */
+ Elf32_Addr d_ptr; /* program virtual address */
+ } d_un;
+} Elf32_Dyn;
+
+typedef struct {
+ Elf64_Xword d_tag; /* controls meaning of d_val */
+ union {
+ Elf64_Addr d_ptr;
+ Elf64_Xword d_val;
+ } d_un;
+} Elf64_Dyn;
+
+/* Dynamic Array Tags - d_tag */
+#define DT_NULL 0 /* marks end of _DYNAMIC array */
+#define DT_NEEDED 1 /* string table offset of needed lib */
+#define DT_PLTRELSZ 2 /* size of relocation entries in PLT */
+#define DT_PLTGOT 3 /* address PLT/GOT */
+#define DT_HASH 4 /* address of symbol hash table */
+#define DT_STRTAB 5 /* address of string table */
+#define DT_SYMTAB 6 /* address of symbol table */
+#define DT_RELA 7 /* address of relocation table */
+#define DT_RELASZ 8 /* size of relocation table */
+#define DT_RELAENT 9 /* size of relocation entry */
+#define DT_STRSZ 10 /* size of string table */
+#define DT_SYMENT 11 /* size of symbol table entry */
+#define DT_INIT 12 /* address of initialization func. */
+#define DT_FINI 13 /* address of termination function */
+#define DT_SONAME 14 /* string table offset of shared obj */
+#define DT_RPATH 15 /* string table offset of library
+ search path */
+#define DT_SYMBOLIC 16 /* start sym search in shared obj. */
+#define DT_REL 17 /* address of rel. tbl. w addends */
+#define DT_RELSZ 18 /* size of DT_REL relocation table */
+#define DT_RELENT 19 /* size of DT_REL relocation entry */
+#define DT_PLTREL 20 /* PLT referenced relocation entry */
+#define DT_DEBUG 21 /* bugger */
+#define DT_TEXTREL 22 /* Allow rel. mod. to unwritable seg */
+#define DT_JMPREL 23 /* add. of PLT's relocation entries */
+#define DT_BIND_NOW 24 /* Bind now regardless of env setting */
+#define DT_NUM 25 /* Number used. */
+#define DT_LOPROC 0x70000000 /* reserved range for processor */
+#define DT_HIPROC 0x7fffffff /* specific dynamic array tags */
+
+/* Standard ELF hashing function */
+unsigned int elf_hash(const unsigned char *name);
+
+/*
+ * Note Definitions
+ */
+typedef struct {
+ Elf32_Word namesz;
+ Elf32_Word descsz;
+ Elf32_Word type;
+} Elf32_Note;
+
+typedef struct {
+ Elf64_Word namesz;
+ Elf64_Word descsz;
+ Elf64_Word type;
+} Elf64_Note;
+
+
+#if defined(ELFSIZE)
+#define CONCAT(x,y) __CONCAT(x,y)
+#define ELFNAME(x) CONCAT(elf,CONCAT(ELFSIZE,CONCAT(_,x)))
+#define ELFNAME2(x,y) CONCAT(x,CONCAT(_elf,CONCAT(ELFSIZE,CONCAT(_,y))))
+#define ELFNAMEEND(x) CONCAT(x,CONCAT(_elf,ELFSIZE))
+#define ELFDEFNNAME(x) CONCAT(ELF,CONCAT(ELFSIZE,CONCAT(_,x)))
+#endif
+
+#if defined(ELFSIZE) && (ELFSIZE == 32)
+#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Phdr Elf32_Phdr
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Rel Elf32_Rel
+#define Elf_RelA Elf32_Rela
+#define Elf_Dyn Elf32_Dyn
+#define Elf_Word Elf32_Word
+#define Elf_Sword Elf32_Sword
+#define Elf_Addr Elf32_Addr
+#define Elf_Off Elf32_Off
+#define Elf_Nhdr Elf32_Nhdr
+#define Elf_Note Elf32_Note
+
+#define ELF_R_SYM ELF32_R_SYM
+#define ELF_R_TYPE ELF32_R_TYPE
+#define ELF_R_INFO ELF32_R_INFO
+#define ELFCLASS ELFCLASS32
+
+#define ELF_ST_BIND ELF32_ST_BIND
+#define ELF_ST_TYPE ELF32_ST_TYPE
+#define ELF_ST_INFO ELF32_ST_INFO
+
+#define AuxInfo Aux32Info
+#elif defined(ELFSIZE) && (ELFSIZE == 64)
+#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Phdr Elf64_Phdr
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Rel Elf64_Rel
+#define Elf_RelA Elf64_Rela
+#define Elf_Dyn Elf64_Dyn
+#define Elf_Word Elf64_Word
+#define Elf_Sword Elf64_Sword
+#define Elf_Addr Elf64_Addr
+#define Elf_Off Elf64_Off
+#define Elf_Nhdr Elf64_Nhdr
+#define Elf_Note Elf64_Note
+
+#define ELF_R_SYM ELF64_R_SYM
+#define ELF_R_TYPE ELF64_R_TYPE
+#define ELF_R_INFO ELF64_R_INFO
+#define ELFCLASS ELFCLASS64
+
+#define ELF_ST_BIND ELF64_ST_BIND
+#define ELF_ST_TYPE ELF64_ST_TYPE
+#define ELF_ST_INFO ELF64_ST_INFO
+
+#define AuxInfo Aux64Info
+#endif
+
+#endif /* __XEN_PUBLIC_ELFSTRUCTS_H__ */
diff --git a/xen/public/event_channel.h b/xen/public/event_channel.h
new file mode 100644
index 0000000..d35cce5
--- /dev/null
+++ b/xen/public/event_channel.h
@@ -0,0 +1,264 @@
+/******************************************************************************
+ * event_channel.h
+ *
+ * Event channels between domains.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2003-2004, K A Fraser.
+ */
+
+#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
+#define __XEN_PUBLIC_EVENT_CHANNEL_H__
+
+/*
+ * Prototype for this hypercall is:
+ * int event_channel_op(int cmd, void *args)
+ * @cmd == EVTCHNOP_??? (event-channel operation).
+ * @args == Operation-specific extra arguments (NULL if none).
+ */
+
+typedef uint32_t evtchn_port_t;
+DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
+
+/*
+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
+ * is allocated in <dom> and returned as <port>.
+ * NOTES:
+ * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
+ * 2. <rdom> may be DOMID_SELF, allowing loopback connections.
+ */
+#define EVTCHNOP_alloc_unbound 6
+struct evtchn_alloc_unbound {
+ /* IN parameters */
+ domid_t dom, remote_dom;
+ /* OUT parameters */
+ evtchn_port_t port;
+};
+typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
+
+/*
+ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
+ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
+ * a port that is unbound and marked as accepting bindings from the calling
+ * domain. A fresh port is allocated in the calling domain and returned as
+ * <local_port>.
+ * NOTES:
+ * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
+ */
+#define EVTCHNOP_bind_interdomain 0
+struct evtchn_bind_interdomain {
+ /* IN parameters. */
+ domid_t remote_dom;
+ evtchn_port_t remote_port;
+ /* OUT parameters. */
+ evtchn_port_t local_port;
+};
+typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
+
+/*
+ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
+ * vcpu.
+ * NOTES:
+ * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
+ * in xen.h for the classification of each VIRQ.
+ * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
+ * re-bound via EVTCHNOP_bind_vcpu.
+ * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
+ * The allocated event channel is bound to the specified vcpu and the
+ * binding cannot be changed.
+ */
+#define EVTCHNOP_bind_virq 1
+struct evtchn_bind_virq {
+ /* IN parameters. */
+ uint32_t virq;
+ uint32_t vcpu;
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_bind_virq evtchn_bind_virq_t;
+
+/*
+ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
+ * NOTES:
+ * 1. A physical IRQ may be bound to at most one event channel per domain.
+ * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
+ */
+#define EVTCHNOP_bind_pirq 2
+struct evtchn_bind_pirq {
+ /* IN parameters. */
+ uint32_t pirq;
+#define BIND_PIRQ__WILL_SHARE 1
+ uint32_t flags; /* BIND_PIRQ__* */
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
+
+/*
+ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
+ * NOTES:
+ * 1. The allocated event channel is bound to the specified vcpu. The binding
+ * may not be changed.
+ */
+#define EVTCHNOP_bind_ipi 7
+struct evtchn_bind_ipi {
+ uint32_t vcpu;
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
+
+/*
+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
+ * interdomain then the remote end is placed in the unbound state
+ * (EVTCHNSTAT_unbound), awaiting a new connection.
+ */
+#define EVTCHNOP_close 3
+struct evtchn_close {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_close evtchn_close_t;
+
+/*
+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
+ * endpoint is <port>.
+ */
+#define EVTCHNOP_send 4
+struct evtchn_send {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_send evtchn_send_t;
+
+/*
+ * EVTCHNOP_status: Get the current status of the communication channel which
+ * has an endpoint at <dom, port>.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may obtain the status of an event
+ * channel for which <dom> is not DOMID_SELF.
+ */
+#define EVTCHNOP_status 5
+struct evtchn_status {
+ /* IN parameters */
+ domid_t dom;
+ evtchn_port_t port;
+ /* OUT parameters */
+#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
+#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
+#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
+#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
+#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
+#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
+ uint32_t status;
+ uint32_t vcpu; /* VCPU to which this channel is bound. */
+ union {
+ struct {
+ domid_t dom;
+ } unbound; /* EVTCHNSTAT_unbound */
+ struct {
+ domid_t dom;
+ evtchn_port_t port;
+ } interdomain; /* EVTCHNSTAT_interdomain */
+ uint32_t pirq; /* EVTCHNSTAT_pirq */
+ uint32_t virq; /* EVTCHNSTAT_virq */
+ } u;
+};
+typedef struct evtchn_status evtchn_status_t;
+
+/*
+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
+ * event is pending.
+ * NOTES:
+ * 1. IPI-bound channels always notify the vcpu specified at bind time.
+ * This binding cannot be changed.
+ * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
+ * This binding cannot be changed.
+ * 3. All other channels notify vcpu0 by default. This default is set when
+ * the channel is allocated (a port that is freed and subsequently reused
+ * has its binding reset to vcpu0).
+ */
+#define EVTCHNOP_bind_vcpu 8
+struct evtchn_bind_vcpu {
+ /* IN parameters. */
+ evtchn_port_t port;
+ uint32_t vcpu;
+};
+typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
+
+/*
+ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
+ * a notification to the appropriate VCPU if an event is pending.
+ */
+#define EVTCHNOP_unmask 9
+struct evtchn_unmask {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_unmask evtchn_unmask_t;
+
+/*
+ * EVTCHNOP_reset: Close all event channels associated with specified domain.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
+ */
+#define EVTCHNOP_reset 10
+struct evtchn_reset {
+ /* IN parameters. */
+ domid_t dom;
+};
+typedef struct evtchn_reset evtchn_reset_t;
+
+/*
+ * Argument to event_channel_op_compat() hypercall. Superceded by new
+ * event_channel_op() hypercall since 0x00030202.
+ */
+struct evtchn_op {
+ uint32_t cmd; /* EVTCHNOP_* */
+ union {
+ struct evtchn_alloc_unbound alloc_unbound;
+ struct evtchn_bind_interdomain bind_interdomain;
+ struct evtchn_bind_virq bind_virq;
+ struct evtchn_bind_pirq bind_pirq;
+ struct evtchn_bind_ipi bind_ipi;
+ struct evtchn_close close;
+ struct evtchn_send send;
+ struct evtchn_status status;
+ struct evtchn_bind_vcpu bind_vcpu;
+ struct evtchn_unmask unmask;
+ } u;
+};
+typedef struct evtchn_op evtchn_op_t;
+DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
+
+#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/features.h b/xen/public/features.h
new file mode 100644
index 0000000..879131c
--- /dev/null
+++ b/xen/public/features.h
@@ -0,0 +1,83 @@
+/******************************************************************************
+ * features.h
+ *
+ * Feature flags, reported by XENVER_get_features.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_FEATURES_H__
+#define __XEN_PUBLIC_FEATURES_H__
+
+/*
+ * If set, the guest does not need to write-protect its pagetables, and can
+ * update them via direct writes.
+ */
+#define XENFEAT_writable_page_tables 0
+
+/*
+ * If set, the guest does not need to write-protect its segment descriptor
+ * tables, and can update them via direct writes.
+ */
+#define XENFEAT_writable_descriptor_tables 1
+
+/*
+ * If set, translation between the guest's 'pseudo-physical' address space
+ * and the host's machine address space are handled by the hypervisor. In this
+ * mode the guest does not need to perform phys-to/from-machine translations
+ * when performing page table operations.
+ */
+#define XENFEAT_auto_translated_physmap 2
+
+/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
+#define XENFEAT_supervisor_mode_kernel 3
+
+/*
+ * If set, the guest does not need to allocate x86 PAE page directories
+ * below 4GB. This flag is usually implied by auto_translated_physmap.
+ */
+#define XENFEAT_pae_pgdir_above_4gb 4
+
+/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
+#define XENFEAT_mmu_pt_update_preserve_ad 5
+
+/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */
+#define XENFEAT_highmem_assist 6
+
+/*
+ * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
+ * available pte bits.
+ */
+#define XENFEAT_gnttab_map_avail_bits 7
+
+#define XENFEAT_NR_SUBMAPS 1
+
+#endif /* __XEN_PUBLIC_FEATURES_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/grant_table.h b/xen/public/grant_table.h
new file mode 100644
index 0000000..ad116e7
--- /dev/null
+++ b/xen/public/grant_table.h
@@ -0,0 +1,438 @@
+/******************************************************************************
+ * grant_table.h
+ *
+ * Interface for granting foreign access to page frames, and receiving
+ * page-ownership transfers.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
+#define __XEN_PUBLIC_GRANT_TABLE_H__
+
+
+/***********************************
+ * GRANT TABLE REPRESENTATION
+ */
+
+/* Some rough guidelines on accessing and updating grant-table entries
+ * in a concurrency-safe manner. For more information, Linux contains a
+ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
+ *
+ * NB. WMB is a no-op on current-generation x86 processors. However, a
+ * compiler barrier will still be required.
+ *
+ * Introducing a valid entry into the grant table:
+ * 1. Write ent->domid.
+ * 2. Write ent->frame:
+ * GTF_permit_access: Frame to which access is permitted.
+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ * frame, or zero if none.
+ * 3. Write memory barrier (WMB).
+ * 4. Write ent->flags, inc. valid type.
+ *
+ * Invalidating an unused GTF_permit_access entry:
+ * 1. flags = ent->flags.
+ * 2. Observe that !(flags & (GTF_reading|GTF_writing)).
+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
+ * NB. No need for WMB as reuse of entry is control-dependent on success of
+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
+ *
+ * Invalidating an in-use GTF_permit_access entry:
+ * This cannot be done directly. Request assistance from the domain controller
+ * which can set a timeout on the use of a grant entry and take necessary
+ * action. (NB. This is not yet implemented!).
+ *
+ * Invalidating an unused GTF_accept_transfer entry:
+ * 1. flags = ent->flags.
+ * 2. Observe that !(flags & GTF_transfer_committed). [*]
+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
+ * NB. No need for WMB as reuse of entry is control-dependent on success of
+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
+ * [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
+ * The guest must /not/ modify the grant entry until the address of the
+ * transferred frame is written. It is safe for the guest to spin waiting
+ * for this to occur (detect by observing GTF_transfer_completed in
+ * ent->flags).
+ *
+ * Invalidating a committed GTF_accept_transfer entry:
+ * 1. Wait for (ent->flags & GTF_transfer_completed).
+ *
+ * Changing a GTF_permit_access from writable to read-only:
+ * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
+ *
+ * Changing a GTF_permit_access from read-only to writable:
+ * Use SMP-safe bit-setting instruction.
+ */
+
+/*
+ * A grant table comprises a packed array of grant entries in one or more
+ * page frames shared between Xen and a guest.
+ * [XEN]: This field is written by Xen and read by the sharing guest.
+ * [GST]: This field is written by the guest and read by Xen.
+ */
+struct grant_entry {
+ /* GTF_xxx: various type and flag information. [XEN,GST] */
+ uint16_t flags;
+ /* The domain being granted foreign privileges. [GST] */
+ domid_t domid;
+ /*
+ * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
+ * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
+ */
+ uint32_t frame;
+};
+typedef struct grant_entry grant_entry_t;
+
+/*
+ * Type of grant entry.
+ * GTF_invalid: This grant entry grants no privileges.
+ * GTF_permit_access: Allow @domid to map/access @frame.
+ * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
+ * to this guest. Xen writes the page number to @frame.
+ */
+#define GTF_invalid (0U<<0)
+#define GTF_permit_access (1U<<0)
+#define GTF_accept_transfer (2U<<0)
+#define GTF_type_mask (3U<<0)
+
+/*
+ * Subflags for GTF_permit_access.
+ * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
+ * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
+ * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
+ * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST]
+ */
+#define _GTF_readonly (2)
+#define GTF_readonly (1U<<_GTF_readonly)
+#define _GTF_reading (3)
+#define GTF_reading (1U<<_GTF_reading)
+#define _GTF_writing (4)
+#define GTF_writing (1U<<_GTF_writing)
+#define _GTF_PWT (5)
+#define GTF_PWT (1U<<_GTF_PWT)
+#define _GTF_PCD (6)
+#define GTF_PCD (1U<<_GTF_PCD)
+#define _GTF_PAT (7)
+#define GTF_PAT (1U<<_GTF_PAT)
+
+/*
+ * Subflags for GTF_accept_transfer:
+ * GTF_transfer_committed: Xen sets this flag to indicate that it is committed
+ * to transferring ownership of a page frame. When a guest sees this flag
+ * it must /not/ modify the grant entry until GTF_transfer_completed is
+ * set by Xen.
+ * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
+ * after reading GTF_transfer_committed. Xen will always write the frame
+ * address, followed by ORing this flag, in a timely manner.
+ */
+#define _GTF_transfer_committed (2)
+#define GTF_transfer_committed (1U<<_GTF_transfer_committed)
+#define _GTF_transfer_completed (3)
+#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
+
+
+/***********************************
+ * GRANT TABLE QUERIES AND USES
+ */
+
+/*
+ * Reference to a grant entry in a specified domain's grant table.
+ */
+typedef uint32_t grant_ref_t;
+
+/*
+ * Handle to track a mapping created via a grant reference.
+ */
+typedef uint32_t grant_handle_t;
+
+/*
+ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
+ * by devices and/or host CPUs. If successful, <handle> is a tracking number
+ * that must be presented later to destroy the mapping(s). On error, <handle>
+ * is a negative status code.
+ * NOTES:
+ * 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
+ * via which I/O devices may access the granted frame.
+ * 2. If GNTMAP_host_map is specified then a mapping will be added at
+ * either a host virtual address in the current address space, or at
+ * a PTE at the specified machine address. The type of mapping to
+ * perform is selected through the GNTMAP_contains_pte flag, and the
+ * address is specified in <host_addr>.
+ * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
+ * host mapping is destroyed by other means then it is *NOT* guaranteed
+ * to be accounted to the correct grant reference!
+ */
+#define GNTTABOP_map_grant_ref 0
+struct gnttab_map_grant_ref {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint32_t flags; /* GNTMAP_* */
+ grant_ref_t ref;
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+ grant_handle_t handle;
+ uint64_t dev_bus_addr;
+};
+typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
+
+/*
+ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
+ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
+ * field is ignored. If non-zero, they must refer to a device/host mapping
+ * that is tracked by <handle>
+ * NOTES:
+ * 1. The call may fail in an undefined manner if either mapping is not
+ * tracked by <handle>.
+ * 3. After executing a batch of unmaps, it is guaranteed that no stale
+ * mappings will remain in the device or host TLBs.
+ */
+#define GNTTABOP_unmap_grant_ref 1
+struct gnttab_unmap_grant_ref {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint64_t dev_bus_addr;
+ grant_handle_t handle;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+};
+typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
+
+/*
+ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
+ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
+ * Only <nr_frames> addresses are written, even if the table is larger.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ * 3. Xen may not support more than a single grant-table page per domain.
+ */
+#define GNTTABOP_setup_table 2
+struct gnttab_setup_table {
+ /* IN parameters. */
+ domid_t dom;
+ uint32_t nr_frames;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+ XEN_GUEST_HANDLE(ulong) frame_list;
+};
+typedef struct gnttab_setup_table gnttab_setup_table_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
+
+/*
+ * GNTTABOP_dump_table: Dump the contents of the grant table to the
+ * xen console. Debugging use only.
+ */
+#define GNTTABOP_dump_table 3
+struct gnttab_dump_table {
+ /* IN parameters. */
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+};
+typedef struct gnttab_dump_table gnttab_dump_table_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
+
+/*
+ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
+ * foreign domain has previously registered its interest in the transfer via
+ * <domid, ref>.
+ *
+ * Note that, even if the transfer fails, the specified page no longer belongs
+ * to the calling domain *unless* the error is GNTST_bad_page.
+ */
+#define GNTTABOP_transfer 4
+struct gnttab_transfer {
+ /* IN parameters. */
+ xen_pfn_t mfn;
+ domid_t domid;
+ grant_ref_t ref;
+ /* OUT parameters. */
+ int16_t status;
+};
+typedef struct gnttab_transfer gnttab_transfer_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
+
+
+/*
+ * GNTTABOP_copy: Hypervisor based copy
+ * source and destinations can be eithers MFNs or, for foreign domains,
+ * grant references. the foreign domain has to grant read/write access
+ * in its grant table.
+ *
+ * The flags specify what type source and destinations are (either MFN
+ * or grant reference).
+ *
+ * Note that this can also be used to copy data between two domains
+ * via a third party if the source and destination domains had previously
+ * grant appropriate access to their pages to the third party.
+ *
+ * source_offset specifies an offset in the source frame, dest_offset
+ * the offset in the target frame and len specifies the number of
+ * bytes to be copied.
+ */
+
+#define _GNTCOPY_source_gref (0)
+#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref)
+#define _GNTCOPY_dest_gref (1)
+#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
+
+#define GNTTABOP_copy 5
+typedef struct gnttab_copy {
+ /* IN parameters. */
+ struct {
+ union {
+ grant_ref_t ref;
+ xen_pfn_t gmfn;
+ } u;
+ domid_t domid;
+ uint16_t offset;
+ } source, dest;
+ uint16_t len;
+ uint16_t flags; /* GNTCOPY_* */
+ /* OUT parameters. */
+ int16_t status;
+} gnttab_copy_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
+
+/*
+ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
+ * grant table.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+#define GNTTABOP_query_size 6
+struct gnttab_query_size {
+ /* IN parameters. */
+ domid_t dom;
+ /* OUT parameters. */
+ uint32_t nr_frames;
+ uint32_t max_nr_frames;
+ int16_t status; /* GNTST_* */
+};
+typedef struct gnttab_query_size gnttab_query_size_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
+
+/*
+ * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
+ * tracked by <handle> but atomically replace the page table entry with one
+ * pointing to the machine address under <new_addr>. <new_addr> will be
+ * redirected to the null entry.
+ * NOTES:
+ * 1. The call may fail in an undefined manner if either mapping is not
+ * tracked by <handle>.
+ * 2. After executing a batch of unmaps, it is guaranteed that no stale
+ * mappings will remain in the device or host TLBs.
+ */
+#define GNTTABOP_unmap_and_replace 7
+struct gnttab_unmap_and_replace {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint64_t new_addr;
+ grant_handle_t handle;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+};
+typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
+
+
+/*
+ * Bitfield values for gnttab_map_grant_ref.flags.
+ */
+ /* Map the grant entry for access by I/O devices. */
+#define _GNTMAP_device_map (0)
+#define GNTMAP_device_map (1<<_GNTMAP_device_map)
+ /* Map the grant entry for access by host CPUs. */
+#define _GNTMAP_host_map (1)
+#define GNTMAP_host_map (1<<_GNTMAP_host_map)
+ /* Accesses to the granted frame will be restricted to read-only access. */
+#define _GNTMAP_readonly (2)
+#define GNTMAP_readonly (1<<_GNTMAP_readonly)
+ /*
+ * GNTMAP_host_map subflag:
+ * 0 => The host mapping is usable only by the guest OS.
+ * 1 => The host mapping is usable by guest OS + current application.
+ */
+#define _GNTMAP_application_map (3)
+#define GNTMAP_application_map (1<<_GNTMAP_application_map)
+
+ /*
+ * GNTMAP_contains_pte subflag:
+ * 0 => This map request contains a host virtual address.
+ * 1 => This map request contains the machine addess of the PTE to update.
+ */
+#define _GNTMAP_contains_pte (4)
+#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
+
+/*
+ * Bits to be placed in guest kernel available PTE bits (architecture
+ * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
+ */
+#define _GNTMAP_guest_avail0 (16)
+#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
+
+/*
+ * Values for error status returns. All errors are -ve.
+ */
+#define GNTST_okay (0) /* Normal return. */
+#define GNTST_general_error (-1) /* General undefined error. */
+#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
+#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
+#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
+#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
+#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
+#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
+#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
+#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
+#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
+#define GNTST_address_too_big (-11) /* transfer page address too large. */
+
+#define GNTTABOP_error_msgs { \
+ "okay", \
+ "undefined error", \
+ "unrecognised domain id", \
+ "invalid grant reference", \
+ "invalid mapping handle", \
+ "invalid virtual address", \
+ "invalid device address", \
+ "no spare translation slot in the I/O MMU", \
+ "permission denied", \
+ "bad page", \
+ "copy arguments cross page boundary", \
+ "page address size too large" \
+}
+
+#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/io/blkif.h b/xen/public/io/blkif.h
new file mode 100644
index 0000000..2380066
--- /dev/null
+++ b/xen/public/io/blkif.h
@@ -0,0 +1,141 @@
+/******************************************************************************
+ * blkif.h
+ *
+ * Unified block-device I/O interface for Xen guest OSes.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_BLKIF_H__
+#define __XEN_PUBLIC_IO_BLKIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Front->back notifications: When enqueuing a new request, sending a
+ * notification can be made conditional on req_event (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Backends must set
+ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
+ *
+ * Back->front notifications: When enqueuing a new response, sending a
+ * notification can be made conditional on rsp_event (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Frontends must set
+ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
+ */
+
+#ifndef blkif_vdev_t
+#define blkif_vdev_t uint16_t
+#endif
+#define blkif_sector_t uint64_t
+
+/*
+ * REQUEST CODES.
+ */
+#define BLKIF_OP_READ 0
+#define BLKIF_OP_WRITE 1
+/*
+ * Recognised only if "feature-barrier" is present in backend xenbus info.
+ * The "feature-barrier" node contains a boolean indicating whether barrier
+ * requests are likely to succeed or fail. Either way, a barrier request
+ * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
+ * the underlying block-device hardware. The boolean simply indicates whether
+ * or not it is worthwhile for the frontend to attempt barrier requests.
+ * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
+ * create the "feature-barrier" node!
+ */
+#define BLKIF_OP_WRITE_BARRIER 2
+/*
+ * Recognised if "feature-flush-cache" is present in backend xenbus
+ * info. A flush will ask the underlying storage hardware to flush its
+ * non-volatile caches as appropriate. The "feature-flush-cache" node
+ * contains a boolean indicating whether flush requests are likely to
+ * succeed or fail. Either way, a flush request may fail at any time
+ * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
+ * block-device hardware. The boolean simply indicates whether or not it
+ * is worthwhile for the frontend to attempt flushes. If a backend does
+ * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
+ * "feature-flush-cache" node!
+ */
+#define BLKIF_OP_FLUSH_DISKCACHE 3
+
+/*
+ * Maximum scatter/gather segments per request.
+ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
+ * NB. This could be 12 if the ring indexes weren't stored in the same page.
+ */
+#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
+
+struct blkif_request_segment {
+ grant_ref_t gref; /* reference to I/O buffer frame */
+ /* @first_sect: first sector in frame to transfer (inclusive). */
+ /* @last_sect: last sector in frame to transfer (inclusive). */
+ uint8_t first_sect, last_sect;
+};
+
+struct blkif_request {
+ uint8_t operation; /* BLKIF_OP_??? */
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+typedef struct blkif_request blkif_request_t;
+
+struct blkif_response {
+ uint64_t id; /* copied from request */
+ uint8_t operation; /* copied from request */
+ int16_t status; /* BLKIF_RSP_??? */
+};
+typedef struct blkif_response blkif_response_t;
+
+/*
+ * STATUS RETURN CODES.
+ */
+ /* Operation not supported (only happens on barrier writes). */
+#define BLKIF_RSP_EOPNOTSUPP -2
+ /* Operation failed for some unspecified reason (-EIO). */
+#define BLKIF_RSP_ERROR -1
+ /* Operation completed successfully. */
+#define BLKIF_RSP_OKAY 0
+
+/*
+ * Generate blkif ring structures and types.
+ */
+
+DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
+
+#define VDISK_CDROM 0x1
+#define VDISK_REMOVABLE 0x2
+#define VDISK_READONLY 0x4
+
+#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/io/console.h b/xen/public/io/console.h
new file mode 100644
index 0000000..4b8c01a
--- /dev/null
+++ b/xen/public/io/console.h
@@ -0,0 +1,51 @@
+/******************************************************************************
+ * console.h
+ *
+ * Console I/O interface for Xen guest OSes.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2005, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
+#define __XEN_PUBLIC_IO_CONSOLE_H__
+
+typedef uint32_t XENCONS_RING_IDX;
+
+#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
+
+struct xencons_interface {
+ char in[1024];
+ char out[2048];
+ XENCONS_RING_IDX in_cons, in_prod;
+ XENCONS_RING_IDX out_cons, out_prod;
+};
+
+#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/io/fbif.h b/xen/public/io/fbif.h
new file mode 100644
index 0000000..95377a0
--- /dev/null
+++ b/xen/public/io/fbif.h
@@ -0,0 +1,176 @@
+/*
+ * fbif.h -- Xen virtual frame buffer device
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_FBIF_H__
+#define __XEN_PUBLIC_IO_FBIF_H__
+
+/* Out events (frontend -> backend) */
+
+/*
+ * Out events may be sent only when requested by backend, and receipt
+ * of an unknown out event is an error.
+ */
+
+/* Event type 1 currently not used */
+/*
+ * Framebuffer update notification event
+ * Capable frontend sets feature-update in xenstore.
+ * Backend requests it by setting request-update in xenstore.
+ */
+#define XENFB_TYPE_UPDATE 2
+
+struct xenfb_update
+{
+ uint8_t type; /* XENFB_TYPE_UPDATE */
+ int32_t x; /* source x */
+ int32_t y; /* source y */
+ int32_t width; /* rect width */
+ int32_t height; /* rect height */
+};
+
+/*
+ * Framebuffer resize notification event
+ * Capable backend sets feature-resize in xenstore.
+ */
+#define XENFB_TYPE_RESIZE 3
+
+struct xenfb_resize
+{
+ uint8_t type; /* XENFB_TYPE_RESIZE */
+ int32_t width; /* width in pixels */
+ int32_t height; /* height in pixels */
+ int32_t stride; /* stride in bytes */
+ int32_t depth; /* depth in bits */
+ int32_t offset; /* offset of the framebuffer in bytes */
+};
+
+#define XENFB_OUT_EVENT_SIZE 40
+
+union xenfb_out_event
+{
+ uint8_t type;
+ struct xenfb_update update;
+ struct xenfb_resize resize;
+ char pad[XENFB_OUT_EVENT_SIZE];
+};
+
+/* In events (backend -> frontend) */
+
+/*
+ * Frontends should ignore unknown in events.
+ */
+
+/*
+ * Framebuffer refresh period advice
+ * Backend sends it to advise the frontend their preferred period of
+ * refresh. Frontends that keep the framebuffer constantly up-to-date
+ * just ignore it. Frontends that use the advice should immediately
+ * refresh the framebuffer (and send an update notification event if
+ * those have been requested), then use the update frequency to guide
+ * their periodical refreshs.
+ */
+#define XENFB_TYPE_REFRESH_PERIOD 1
+#define XENFB_NO_REFRESH 0
+
+struct xenfb_refresh_period
+{
+ uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */
+ uint32_t period; /* period of refresh, in ms,
+ * XENFB_NO_REFRESH if no refresh is needed */
+};
+
+#define XENFB_IN_EVENT_SIZE 40
+
+union xenfb_in_event
+{
+ uint8_t type;
+ struct xenfb_refresh_period refresh_period;
+ char pad[XENFB_IN_EVENT_SIZE];
+};
+
+/* shared page */
+
+#define XENFB_IN_RING_SIZE 1024
+#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
+#define XENFB_IN_RING_OFFS 1024
+#define XENFB_IN_RING(page) \
+ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
+#define XENFB_IN_RING_REF(page, idx) \
+ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
+
+#define XENFB_OUT_RING_SIZE 2048
+#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
+#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
+#define XENFB_OUT_RING(page) \
+ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
+#define XENFB_OUT_RING_REF(page, idx) \
+ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
+
+struct xenfb_page
+{
+ uint32_t in_cons, in_prod;
+ uint32_t out_cons, out_prod;
+
+ int32_t width; /* the width of the framebuffer (in pixels) */
+ int32_t height; /* the height of the framebuffer (in pixels) */
+ uint32_t line_length; /* the length of a row of pixels (in bytes) */
+ uint32_t mem_length; /* the length of the framebuffer (in bytes) */
+ uint8_t depth; /* the depth of a pixel (in bits) */
+
+ /*
+ * Framebuffer page directory
+ *
+ * Each directory page holds PAGE_SIZE / sizeof(*pd)
+ * framebuffer pages, and can thus map up to PAGE_SIZE *
+ * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
+ * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs
+ * 64 bit. 256 directories give enough room for a 512 Meg
+ * framebuffer with a max resolution of 12,800x10,240. Should
+ * be enough for a while with room leftover for expansion.
+ */
+ unsigned long pd[256];
+};
+
+/*
+ * Wart: xenkbd needs to know default resolution. Put it here until a
+ * better solution is found, but don't leak it to the backend.
+ */
+#ifdef __KERNEL__
+#define XENFB_WIDTH 800
+#define XENFB_HEIGHT 600
+#define XENFB_DEPTH 32
+#endif
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/io/fsif.h b/xen/public/io/fsif.h
new file mode 100644
index 0000000..04ef928
--- /dev/null
+++ b/xen/public/io/fsif.h
@@ -0,0 +1,191 @@
+/******************************************************************************
+ * fsif.h
+ *
+ * Interface to FS level split device drivers.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2007, Grzegorz Milos, <gm281@cam.ac.uk>.
+ */
+
+#ifndef __XEN_PUBLIC_IO_FSIF_H__
+#define __XEN_PUBLIC_IO_FSIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+#define REQ_FILE_OPEN 1
+#define REQ_FILE_CLOSE 2
+#define REQ_FILE_READ 3
+#define REQ_FILE_WRITE 4
+#define REQ_STAT 5
+#define REQ_FILE_TRUNCATE 6
+#define REQ_REMOVE 7
+#define REQ_RENAME 8
+#define REQ_CREATE 9
+#define REQ_DIR_LIST 10
+#define REQ_CHMOD 11
+#define REQ_FS_SPACE 12
+#define REQ_FILE_SYNC 13
+
+struct fsif_open_request {
+ grant_ref_t gref;
+};
+
+struct fsif_close_request {
+ uint32_t fd;
+};
+
+struct fsif_read_request {
+ uint32_t fd;
+ int32_t pad;
+ uint64_t len;
+ uint64_t offset;
+ grant_ref_t grefs[1]; /* Variable length */
+};
+
+struct fsif_write_request {
+ uint32_t fd;
+ int32_t pad;
+ uint64_t len;
+ uint64_t offset;
+ grant_ref_t grefs[1]; /* Variable length */
+};
+
+struct fsif_stat_request {
+ uint32_t fd;
+};
+
+/* This structure is a copy of some fields from stat structure, returned
+ * via the ring. */
+struct fsif_stat_response {
+ int32_t stat_mode;
+ uint32_t stat_uid;
+ uint32_t stat_gid;
+ int32_t stat_ret;
+ int64_t stat_size;
+ int64_t stat_atime;
+ int64_t stat_mtime;
+ int64_t stat_ctime;
+};
+
+struct fsif_truncate_request {
+ uint32_t fd;
+ int32_t pad;
+ int64_t length;
+};
+
+struct fsif_remove_request {
+ grant_ref_t gref;
+};
+
+struct fsif_rename_request {
+ uint16_t old_name_offset;
+ uint16_t new_name_offset;
+ grant_ref_t gref;
+};
+
+struct fsif_create_request {
+ int8_t directory;
+ int8_t pad;
+ int16_t pad2;
+ int32_t mode;
+ grant_ref_t gref;
+};
+
+struct fsif_list_request {
+ uint32_t offset;
+ grant_ref_t gref;
+};
+
+#define NR_FILES_SHIFT 0
+#define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */
+#define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT)
+#define ERROR_SIZE 32 /* 32 bits for the error mask */
+#define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT)
+#define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT)
+#define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE)
+#define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT)
+
+struct fsif_chmod_request {
+ uint32_t fd;
+ int32_t mode;
+};
+
+struct fsif_space_request {
+ grant_ref_t gref;
+};
+
+struct fsif_sync_request {
+ uint32_t fd;
+};
+
+
+/* FS operation request */
+struct fsif_request {
+ uint8_t type; /* Type of the request */
+ uint8_t pad;
+ uint16_t id; /* Request ID, copied to the response */
+ uint32_t pad2;
+ union {
+ struct fsif_open_request fopen;
+ struct fsif_close_request fclose;
+ struct fsif_read_request fread;
+ struct fsif_write_request fwrite;
+ struct fsif_stat_request fstat;
+ struct fsif_truncate_request ftruncate;
+ struct fsif_remove_request fremove;
+ struct fsif_rename_request frename;
+ struct fsif_create_request fcreate;
+ struct fsif_list_request flist;
+ struct fsif_chmod_request fchmod;
+ struct fsif_space_request fspace;
+ struct fsif_sync_request fsync;
+ } u;
+};
+typedef struct fsif_request fsif_request_t;
+
+/* FS operation response */
+struct fsif_response {
+ uint16_t id;
+ uint16_t pad1;
+ uint32_t pad2;
+ union {
+ uint64_t ret_val;
+ struct fsif_stat_response fstat;
+ };
+};
+
+typedef struct fsif_response fsif_response_t;
+
+#define FSIF_RING_ENTRY_SIZE 64
+
+#define FSIF_NR_READ_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) / \
+ sizeof(grant_ref_t) + 1)
+#define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \
+ sizeof(grant_ref_t) + 1)
+
+DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response);
+
+#define STATE_INITIALISED "init"
+#define STATE_READY "ready"
+
+
+
+#endif
diff --git a/xen/public/io/kbdif.h b/xen/public/io/kbdif.h
new file mode 100644
index 0000000..e1d66a5
--- /dev/null
+++ b/xen/public/io/kbdif.h
@@ -0,0 +1,132 @@
+/*
+ * kbdif.h -- Xen virtual keyboard/mouse
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
+ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_KBDIF_H__
+#define __XEN_PUBLIC_IO_KBDIF_H__
+
+/* In events (backend -> frontend) */
+
+/*
+ * Frontends should ignore unknown in events.
+ */
+
+/* Pointer movement event */
+#define XENKBD_TYPE_MOTION 1
+/* Event type 2 currently not used */
+/* Key event (includes pointer buttons) */
+#define XENKBD_TYPE_KEY 3
+/*
+ * Pointer position event
+ * Capable backend sets feature-abs-pointer in xenstore.
+ * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
+ * request-abs-update in xenstore.
+ */
+#define XENKBD_TYPE_POS 4
+
+struct xenkbd_motion
+{
+ uint8_t type; /* XENKBD_TYPE_MOTION */
+ int32_t rel_x; /* relative X motion */
+ int32_t rel_y; /* relative Y motion */
+ int32_t rel_z; /* relative Z motion (wheel) */
+};
+
+struct xenkbd_key
+{
+ uint8_t type; /* XENKBD_TYPE_KEY */
+ uint8_t pressed; /* 1 if pressed; 0 otherwise */
+ uint32_t keycode; /* KEY_* from linux/input.h */
+};
+
+struct xenkbd_position
+{
+ uint8_t type; /* XENKBD_TYPE_POS */
+ int32_t abs_x; /* absolute X position (in FB pixels) */
+ int32_t abs_y; /* absolute Y position (in FB pixels) */
+ int32_t rel_z; /* relative Z motion (wheel) */
+};
+
+#define XENKBD_IN_EVENT_SIZE 40
+
+union xenkbd_in_event
+{
+ uint8_t type;
+ struct xenkbd_motion motion;
+ struct xenkbd_key key;
+ struct xenkbd_position pos;
+ char pad[XENKBD_IN_EVENT_SIZE];
+};
+
+/* Out events (frontend -> backend) */
+
+/*
+ * Out events may be sent only when requested by backend, and receipt
+ * of an unknown out event is an error.
+ * No out events currently defined.
+ */
+
+#define XENKBD_OUT_EVENT_SIZE 40
+
+union xenkbd_out_event
+{
+ uint8_t type;
+ char pad[XENKBD_OUT_EVENT_SIZE];
+};
+
+/* shared page */
+
+#define XENKBD_IN_RING_SIZE 2048
+#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
+#define XENKBD_IN_RING_OFFS 1024
+#define XENKBD_IN_RING(page) \
+ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
+#define XENKBD_IN_RING_REF(page, idx) \
+ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
+
+#define XENKBD_OUT_RING_SIZE 1024
+#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
+#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
+#define XENKBD_OUT_RING(page) \
+ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
+#define XENKBD_OUT_RING_REF(page, idx) \
+ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
+
+struct xenkbd_page
+{
+ uint32_t in_cons, in_prod;
+ uint32_t out_cons, out_prod;
+};
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/io/netif.h b/xen/public/io/netif.h
new file mode 100644
index 0000000..fbb5c27
--- /dev/null
+++ b/xen/public/io/netif.h
@@ -0,0 +1,205 @@
+/******************************************************************************
+ * netif.h
+ *
+ * Unified network-device I/O interface for Xen guest OSes.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_NETIF_H__
+#define __XEN_PUBLIC_IO_NETIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Notifications after enqueuing any type of message should be conditional on
+ * the appropriate req_event or rsp_event field in the shared ring.
+ * If the client sends notification for rx requests then it should specify
+ * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume
+ * that it cannot safely queue packets (as it may not be kicked to send them).
+ */
+
+/*
+ * This is the 'wire' format for packets:
+ * Request 1: netif_tx_request -- NETTXF_* (any flags)
+ * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
+ * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE)
+ * Request 4: netif_tx_request -- NETTXF_more_data
+ * Request 5: netif_tx_request -- NETTXF_more_data
+ * ...
+ * Request N: netif_tx_request -- 0
+ */
+
+/* Protocol checksum field is blank in the packet (hardware offload)? */
+#define _NETTXF_csum_blank (0)
+#define NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
+
+/* Packet data has been validated against protocol checksum. */
+#define _NETTXF_data_validated (1)
+#define NETTXF_data_validated (1U<<_NETTXF_data_validated)
+
+/* Packet continues in the next request descriptor. */
+#define _NETTXF_more_data (2)
+#define NETTXF_more_data (1U<<_NETTXF_more_data)
+
+/* Packet to be followed by extra descriptor(s). */
+#define _NETTXF_extra_info (3)
+#define NETTXF_extra_info (1U<<_NETTXF_extra_info)
+
+struct netif_tx_request {
+ grant_ref_t gref; /* Reference to buffer page */
+ uint16_t offset; /* Offset within buffer page */
+ uint16_t flags; /* NETTXF_* */
+ uint16_t id; /* Echoed in response message. */
+ uint16_t size; /* Packet size in bytes. */
+};
+typedef struct netif_tx_request netif_tx_request_t;
+
+/* Types of netif_extra_info descriptors. */
+#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
+#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
+#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MAX (4)
+
+/* netif_extra_info flags. */
+#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
+#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
+
+/* GSO types - only TCPv4 currently supported. */
+#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
+
+/*
+ * This structure needs to fit within both netif_tx_request and
+ * netif_rx_response for compatibility.
+ */
+struct netif_extra_info {
+ uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
+ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
+
+ union {
+ /*
+ * XEN_NETIF_EXTRA_TYPE_GSO:
+ */
+ struct {
+ /*
+ * Maximum payload size of each segment. For example, for TCP this
+ * is just the path MSS.
+ */
+ uint16_t size;
+
+ /*
+ * GSO type. This determines the protocol of the packet and any
+ * extra features required to segment the packet properly.
+ */
+ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
+
+ /* Future expansion. */
+ uint8_t pad;
+
+ /*
+ * GSO features. This specifies any extra GSO features required
+ * to process this packet, such as ECN support for TCPv4.
+ */
+ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
+ } gso;
+
+ /*
+ * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
+ * Backend advertises availability via 'feature-multicast-control'
+ * xenbus node containing value '1'.
+ * Frontend requests this feature by advertising
+ * 'request-multicast-control' xenbus node containing value '1'.
+ * If multicast control is requested then multicast flooding is
+ * disabled and the frontend must explicitly register its interest
+ * in multicast groups using dummy transmit requests containing
+ * MCAST_{ADD,DEL} extra-info fragments.
+ */
+ struct {
+ uint8_t addr[6]; /* Address to add/remove. */
+ } mcast;
+
+ uint16_t pad[3];
+ } u;
+};
+typedef struct netif_extra_info netif_extra_info_t;
+
+struct netif_tx_response {
+ uint16_t id;
+ int16_t status; /* NETIF_RSP_* */
+};
+typedef struct netif_tx_response netif_tx_response_t;
+
+struct netif_rx_request {
+ uint16_t id; /* Echoed in response message. */
+ grant_ref_t gref; /* Reference to incoming granted frame */
+};
+typedef struct netif_rx_request netif_rx_request_t;
+
+/* Packet data has been validated against protocol checksum. */
+#define _NETRXF_data_validated (0)
+#define NETRXF_data_validated (1U<<_NETRXF_data_validated)
+
+/* Protocol checksum field is blank in the packet (hardware offload)? */
+#define _NETRXF_csum_blank (1)
+#define NETRXF_csum_blank (1U<<_NETRXF_csum_blank)
+
+/* Packet continues in the next request descriptor. */
+#define _NETRXF_more_data (2)
+#define NETRXF_more_data (1U<<_NETRXF_more_data)
+
+/* Packet to be followed by extra descriptor(s). */
+#define _NETRXF_extra_info (3)
+#define NETRXF_extra_info (1U<<_NETRXF_extra_info)
+
+struct netif_rx_response {
+ uint16_t id;
+ uint16_t offset; /* Offset in page of start of received packet */
+ uint16_t flags; /* NETRXF_* */
+ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
+};
+typedef struct netif_rx_response netif_rx_response_t;
+
+/*
+ * Generate netif ring structures and types.
+ */
+
+DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
+DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
+
+#define NETIF_RSP_DROPPED -2
+#define NETIF_RSP_ERROR -1
+#define NETIF_RSP_OKAY 0
+/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
+#define NETIF_RSP_NULL 1
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/io/pciif.h b/xen/public/io/pciif.h
new file mode 100644
index 0000000..0a0ffcc
--- /dev/null
+++ b/xen/public/io/pciif.h
@@ -0,0 +1,101 @@
+/*
+ * PCI Backend/Frontend Common Data Structures & Macros
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#ifndef __XEN_PCI_COMMON_H__
+#define __XEN_PCI_COMMON_H__
+
+/* Be sure to bump this number if you change this file */
+#define XEN_PCI_MAGIC "7"
+
+/* xen_pci_sharedinfo flags */
+#define _XEN_PCIF_active (0)
+#define XEN_PCIF_active (1<<_XEN_PCI_active)
+
+/* xen_pci_op commands */
+#define XEN_PCI_OP_conf_read (0)
+#define XEN_PCI_OP_conf_write (1)
+#define XEN_PCI_OP_enable_msi (2)
+#define XEN_PCI_OP_disable_msi (3)
+#define XEN_PCI_OP_enable_msix (4)
+#define XEN_PCI_OP_disable_msix (5)
+
+/* xen_pci_op error numbers */
+#define XEN_PCI_ERR_success (0)
+#define XEN_PCI_ERR_dev_not_found (-1)
+#define XEN_PCI_ERR_invalid_offset (-2)
+#define XEN_PCI_ERR_access_denied (-3)
+#define XEN_PCI_ERR_not_implemented (-4)
+/* XEN_PCI_ERR_op_failed - backend failed to complete the operation */
+#define XEN_PCI_ERR_op_failed (-5)
+
+/*
+ * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry))
+ * Should not exceed 128
+ */
+#define SH_INFO_MAX_VEC 128
+
+struct xen_msix_entry {
+ uint16_t vector;
+ uint16_t entry;
+};
+struct xen_pci_op {
+ /* IN: what action to perform: XEN_PCI_OP_* */
+ uint32_t cmd;
+
+ /* OUT: will contain an error number (if any) from errno.h */
+ int32_t err;
+
+ /* IN: which device to touch */
+ uint32_t domain; /* PCI Domain/Segment */
+ uint32_t bus;
+ uint32_t devfn;
+
+ /* IN: which configuration registers to touch */
+ int32_t offset;
+ int32_t size;
+
+ /* IN/OUT: Contains the result after a READ or the value to WRITE */
+ uint32_t value;
+ /* IN: Contains extra infor for this operation */
+ uint32_t info;
+ /*IN: param for msi-x */
+ struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC];
+};
+
+struct xen_pci_sharedinfo {
+ /* flags - XEN_PCIF_* */
+ uint32_t flags;
+ struct xen_pci_op op;
+};
+
+#endif /* __XEN_PCI_COMMON_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/io/protocols.h b/xen/public/io/protocols.h
new file mode 100644
index 0000000..77bd1bd
--- /dev/null
+++ b/xen/public/io/protocols.h
@@ -0,0 +1,40 @@
+/******************************************************************************
+ * protocols.h
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PROTOCOLS_H__
+#define __XEN_PROTOCOLS_H__
+
+#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
+#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
+#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
+
+#if defined(__i386__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
+#elif defined(__x86_64__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
+#elif defined(__ia64__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
+#else
+# error arch fixup needed here
+#endif
+
+#endif
diff --git a/xen/public/io/ring.h b/xen/public/io/ring.h
new file mode 100644
index 0000000..8669564
--- /dev/null
+++ b/xen/public/io/ring.h
@@ -0,0 +1,313 @@
+/******************************************************************************
+ * ring.h
+ *
+ * Shared producer-consumer ring macros.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Tim Deegan and Andrew Warfield November 2004.
+ */
+
+#ifndef __XEN_PUBLIC_IO_RING_H__
+#define __XEN_PUBLIC_IO_RING_H__
+
+#include "../xen-compat.h"
+
+#if __XEN_INTERFACE_VERSION__ < 0x00030208
+#define xen_mb() mb()
+#define xen_rmb() rmb()
+#define xen_wmb() wmb()
+#endif
+
+typedef unsigned int RING_IDX;
+
+/* Round a 32-bit unsigned constant down to the nearest power of two. */
+#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
+#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
+#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
+#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
+#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
+
+/*
+ * Calculate size of a shared ring, given the total available space for the
+ * ring and indexes (_sz), and the name tag of the request/response structure.
+ * A ring contains as many entries as will fit, rounded down to the nearest
+ * power of two (so we can mask with (size-1) to loop around).
+ */
+#define __CONST_RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
+ sizeof(((struct _s##_sring *)0)->ring[0])))
+/*
+ * The same for passing in an actual pointer instead of a name tag.
+ */
+#define __RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
+
+/*
+ * Macros to make the correct C datatypes for a new kind of ring.
+ *
+ * To make a new ring datatype, you need to have two message structures,
+ * let's say request_t, and response_t already defined.
+ *
+ * In a header where you want the ring datatype declared, you then do:
+ *
+ * DEFINE_RING_TYPES(mytag, request_t, response_t);
+ *
+ * These expand out to give you a set of types, as you can see below.
+ * The most important of these are:
+ *
+ * mytag_sring_t - The shared ring.
+ * mytag_front_ring_t - The 'front' half of the ring.
+ * mytag_back_ring_t - The 'back' half of the ring.
+ *
+ * To initialize a ring in your code you need to know the location and size
+ * of the shared memory area (PAGE_SIZE, for instance). To initialise
+ * the front half:
+ *
+ * mytag_front_ring_t front_ring;
+ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
+ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
+ *
+ * Initializing the back follows similarly (note that only the front
+ * initializes the shared ring):
+ *
+ * mytag_back_ring_t back_ring;
+ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
+ */
+
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
+ \
+/* Shared ring entry */ \
+union __name##_sring_entry { \
+ __req_t req; \
+ __rsp_t rsp; \
+}; \
+ \
+/* Shared ring page */ \
+struct __name##_sring { \
+ RING_IDX req_prod, req_event; \
+ RING_IDX rsp_prod, rsp_event; \
+ uint8_t pad[48]; \
+ union __name##_sring_entry ring[1]; /* variable-length */ \
+}; \
+ \
+/* "Front" end's private variables */ \
+struct __name##_front_ring { \
+ RING_IDX req_prod_pvt; \
+ RING_IDX rsp_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
+/* "Back" end's private variables */ \
+struct __name##_back_ring { \
+ RING_IDX rsp_prod_pvt; \
+ RING_IDX req_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
+/* Syntactic sugar */ \
+typedef struct __name##_sring __name##_sring_t; \
+typedef struct __name##_front_ring __name##_front_ring_t; \
+typedef struct __name##_back_ring __name##_back_ring_t
+
+/*
+ * Macros for manipulating rings.
+ *
+ * FRONT_RING_whatever works on the "front end" of a ring: here
+ * requests are pushed on to the ring and responses taken off it.
+ *
+ * BACK_RING_whatever works on the "back end" of a ring: here
+ * requests are taken off the ring and responses put on.
+ *
+ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
+ * This is OK in 1-for-1 request-response situations where the
+ * requestor (front end) never has more than RING_SIZE()-1
+ * outstanding requests.
+ */
+
+/* Initialising empty rings */
+#define SHARED_RING_INIT(_s) do { \
+ (_s)->req_prod = (_s)->rsp_prod = 0; \
+ (_s)->req_event = (_s)->rsp_event = 1; \
+ (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \
+} while(0)
+
+#define FRONT_RING_INIT(_r, _s, __size) do { \
+ (_r)->req_prod_pvt = 0; \
+ (_r)->rsp_cons = 0; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
+} while (0)
+
+#define BACK_RING_INIT(_r, _s, __size) do { \
+ (_r)->rsp_prod_pvt = 0; \
+ (_r)->req_cons = 0; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
+} while (0)
+
+/* Initialize to existing shared indexes -- for recovery */
+#define FRONT_RING_ATTACH(_r, _s, __size) do { \
+ (_r)->sring = (_s); \
+ (_r)->req_prod_pvt = (_s)->req_prod; \
+ (_r)->rsp_cons = (_s)->rsp_prod; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+} while (0)
+
+#define BACK_RING_ATTACH(_r, _s, __size) do { \
+ (_r)->sring = (_s); \
+ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
+ (_r)->req_cons = (_s)->req_prod; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+} while (0)
+
+/* How big is this ring? */
+#define RING_SIZE(_r) \
+ ((_r)->nr_ents)
+
+/* Number of free requests (for use on front side only). */
+#define RING_FREE_REQUESTS(_r) \
+ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
+
+/* Test if there is an empty slot available on the front ring.
+ * (This is only meaningful from the front. )
+ */
+#define RING_FULL(_r) \
+ (RING_FREE_REQUESTS(_r) == 0)
+
+/* Test if there are outstanding messages to be processed on a ring. */
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+ ((_r)->sring->rsp_prod - (_r)->rsp_cons)
+
+#ifdef __GNUC__
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
+ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
+ unsigned int rsp = RING_SIZE(_r) - \
+ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
+ req < rsp ? req : rsp; \
+})
+#else
+/* Same as above, but without the nice GCC ({ ... }) syntax. */
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
+ ((((_r)->sring->req_prod - (_r)->req_cons) < \
+ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
+ ((_r)->sring->req_prod - (_r)->req_cons) : \
+ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
+#endif
+
+/* Direct access to individual ring elements, by index. */
+#define RING_GET_REQUEST(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+
+#define RING_GET_RESPONSE(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+
+/* Loop termination condition: Would the specified index overflow the ring? */
+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
+ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
+
+#define RING_PUSH_REQUESTS(_r) do { \
+ xen_wmb(); /* back sees requests /before/ updated producer index */ \
+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
+} while (0)
+
+#define RING_PUSH_RESPONSES(_r) do { \
+ xen_wmb(); /* front sees resps /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
+} while (0)
+
+/*
+ * Notification hold-off (req_event and rsp_event):
+ *
+ * When queueing requests or responses on a shared ring, it may not always be
+ * necessary to notify the remote end. For example, if requests are in flight
+ * in a backend, the front may be able to queue further requests without
+ * notifying the back (if the back checks for new requests when it queues
+ * responses).
+ *
+ * When enqueuing requests or responses:
+ *
+ * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
+ * is a boolean return value. True indicates that the receiver requires an
+ * asynchronous notification.
+ *
+ * After dequeuing requests or responses (before sleeping the connection):
+ *
+ * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
+ * The second argument is a boolean return value. True indicates that there
+ * are pending messages on the ring (i.e., the connection should not be put
+ * to sleep).
+ *
+ * These macros will set the req_event/rsp_event field to trigger a
+ * notification on the very next message that is enqueued. If you want to
+ * create batches of work (i.e., only receive a notification after several
+ * messages have been enqueued) then you will need to create a customised
+ * version of the FINAL_CHECK macro in your own code, which sets the event
+ * field appropriately.
+ */
+
+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->req_prod; \
+ RING_IDX __new = (_r)->req_prod_pvt; \
+ xen_wmb(); /* back sees requests /before/ updated producer index */ \
+ (_r)->sring->req_prod = __new; \
+ xen_mb(); /* back sees new requests /before/ we check req_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
+ (RING_IDX)(__new - __old)); \
+} while (0)
+
+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->rsp_prod; \
+ RING_IDX __new = (_r)->rsp_prod_pvt; \
+ xen_wmb(); /* front sees resps /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = __new; \
+ xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
+ (RING_IDX)(__new - __old)); \
+} while (0)
+
+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->req_event = (_r)->req_cons + 1; \
+ xen_mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+} while (0)
+
+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
+ xen_mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+} while (0)
+
+#endif /* __XEN_PUBLIC_IO_RING_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/io/tpmif.h b/xen/public/io/tpmif.h
new file mode 100644
index 0000000..02ccdab
--- /dev/null
+++ b/xen/public/io/tpmif.h
@@ -0,0 +1,77 @@
+/******************************************************************************
+ * tpmif.h
+ *
+ * TPM I/O interface for Xen guest OSes.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2005, IBM Corporation
+ *
+ * Author: Stefan Berger, stefanb@us.ibm.com
+ * Grant table support: Mahadevan Gomathisankaran
+ *
+ * This code has been derived from tools/libxc/xen/io/netif.h
+ *
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_TPMIF_H__
+#define __XEN_PUBLIC_IO_TPMIF_H__
+
+#include "../grant_table.h"
+
+struct tpmif_tx_request {
+ unsigned long addr; /* Machine address of packet. */
+ grant_ref_t ref; /* grant table access reference */
+ uint16_t unused;
+ uint16_t size; /* Packet size in bytes. */
+};
+typedef struct tpmif_tx_request tpmif_tx_request_t;
+
+/*
+ * The TPMIF_TX_RING_SIZE defines the number of pages the
+ * front-end and backend can exchange (= size of array).
+ */
+typedef uint32_t TPMIF_RING_IDX;
+
+#define TPMIF_TX_RING_SIZE 1
+
+/* This structure must fit in a memory page. */
+
+struct tpmif_ring {
+ struct tpmif_tx_request req;
+};
+typedef struct tpmif_ring tpmif_ring_t;
+
+struct tpmif_tx_interface {
+ struct tpmif_ring ring[TPMIF_TX_RING_SIZE];
+};
+typedef struct tpmif_tx_interface tpmif_tx_interface_t;
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/io/xenbus.h b/xen/public/io/xenbus.h
new file mode 100644
index 0000000..4a053df
--- /dev/null
+++ b/xen/public/io/xenbus.h
@@ -0,0 +1,80 @@
+/*****************************************************************************
+ * xenbus.h
+ *
+ * Xenbus protocol details.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2005 XenSource Ltd.
+ */
+
+#ifndef _XEN_PUBLIC_IO_XENBUS_H
+#define _XEN_PUBLIC_IO_XENBUS_H
+
+/*
+ * The state of either end of the Xenbus, i.e. the current communication
+ * status of initialisation across the bus. States here imply nothing about
+ * the state of the connection between the driver and the kernel's device
+ * layers.
+ */
+enum xenbus_state {
+ XenbusStateUnknown = 0,
+
+ XenbusStateInitialising = 1,
+
+ /*
+ * InitWait: Finished early initialisation but waiting for information
+ * from the peer or hotplug scripts.
+ */
+ XenbusStateInitWait = 2,
+
+ /*
+ * Initialised: Waiting for a connection from the peer.
+ */
+ XenbusStateInitialised = 3,
+
+ XenbusStateConnected = 4,
+
+ /*
+ * Closing: The device is being closed due to an error or an unplug event.
+ */
+ XenbusStateClosing = 5,
+
+ XenbusStateClosed = 6,
+
+ /*
+ * Reconfiguring: The device is being reconfigured.
+ */
+ XenbusStateReconfiguring = 7,
+
+ XenbusStateReconfigured = 8
+};
+typedef enum xenbus_state XenbusState;
+
+#endif /* _XEN_PUBLIC_IO_XENBUS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/io/xs_wire.h b/xen/public/io/xs_wire.h
new file mode 100644
index 0000000..dd2d966
--- /dev/null
+++ b/xen/public/io/xs_wire.h
@@ -0,0 +1,132 @@
+/*
+ * Details of the "wire" protocol between Xen Store Daemon and client
+ * library or guest kernel.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2005 Rusty Russell IBM Corporation
+ */
+
+#ifndef _XS_WIRE_H
+#define _XS_WIRE_H
+
+enum xsd_sockmsg_type
+{
+ XS_DEBUG,
+ XS_DIRECTORY,
+ XS_READ,
+ XS_GET_PERMS,
+ XS_WATCH,
+ XS_UNWATCH,
+ XS_TRANSACTION_START,
+ XS_TRANSACTION_END,
+ XS_INTRODUCE,
+ XS_RELEASE,
+ XS_GET_DOMAIN_PATH,
+ XS_WRITE,
+ XS_MKDIR,
+ XS_RM,
+ XS_SET_PERMS,
+ XS_WATCH_EVENT,
+ XS_ERROR,
+ XS_IS_DOMAIN_INTRODUCED,
+ XS_RESUME,
+ XS_SET_TARGET
+};
+
+#define XS_WRITE_NONE "NONE"
+#define XS_WRITE_CREATE "CREATE"
+#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
+
+#ifdef linux_specific
+/* We hand errors as strings, for portability. */
+struct xsd_errors
+{
+ int errnum;
+ const char *errstring;
+};
+#define XSD_ERROR(x) { x, #x }
+/* LINTED: static unused */
+static struct xsd_errors xsd_errors[]
+#if defined(__GNUC__)
+__attribute__((unused))
+#endif
+ = {
+ XSD_ERROR(EINVAL),
+ XSD_ERROR(EACCES),
+ XSD_ERROR(EEXIST),
+ XSD_ERROR(EISDIR),
+ XSD_ERROR(ENOENT),
+ XSD_ERROR(ENOMEM),
+ XSD_ERROR(ENOSPC),
+ XSD_ERROR(EIO),
+ XSD_ERROR(ENOTEMPTY),
+ XSD_ERROR(ENOSYS),
+ XSD_ERROR(EROFS),
+ XSD_ERROR(EBUSY),
+ XSD_ERROR(EAGAIN),
+ XSD_ERROR(EISCONN)
+};
+#endif
+
+struct xsd_sockmsg
+{
+ uint32_t type; /* XS_??? */
+ uint32_t req_id;/* Request identifier, echoed in daemon's response. */
+ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
+ uint32_t len; /* Length of data following this. */
+
+ /* Generally followed by nul-terminated string(s). */
+};
+
+enum xs_watch_type
+{
+ XS_WATCH_PATH = 0,
+ XS_WATCH_TOKEN
+};
+
+/* Inter-domain shared memory communications. */
+#define XENSTORE_RING_SIZE 1024
+typedef uint32_t XENSTORE_RING_IDX;
+#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
+struct xenstore_domain_interface {
+ char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
+ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
+ XENSTORE_RING_IDX req_cons, req_prod;
+ XENSTORE_RING_IDX rsp_cons, rsp_prod;
+};
+
+/* Violating this is very bad. See docs/misc/xenstore.txt. */
+#define XENSTORE_PAYLOAD_MAX 4096
+
+/* Violating these just gets you an error back */
+#define XENSTORE_ABS_PATH_MAX 3072
+#define XENSTORE_REL_PATH_MAX 2048
+
+#endif /* _XS_WIRE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/kexec.h b/xen/public/kexec.h
new file mode 100644
index 0000000..fc19f2f
--- /dev/null
+++ b/xen/public/kexec.h
@@ -0,0 +1,189 @@
+/******************************************************************************
+ * kexec.h - Public portion
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Xen port written by:
+ * - Simon 'Horms' Horman <horms@verge.net.au>
+ * - Magnus Damm <magnus@valinux.co.jp>
+ */
+
+#ifndef _XEN_PUBLIC_KEXEC_H
+#define _XEN_PUBLIC_KEXEC_H
+
+
+/* This file describes the Kexec / Kdump hypercall interface for Xen.
+ *
+ * Kexec under vanilla Linux allows a user to reboot the physical machine
+ * into a new user-specified kernel. The Xen port extends this idea
+ * to allow rebooting of the machine from dom0. When kexec for dom0
+ * is used to reboot, both the hypervisor and the domains get replaced
+ * with some other kernel. It is possible to kexec between vanilla
+ * Linux and Xen and back again. Xen to Xen works well too.
+ *
+ * The hypercall interface for kexec can be divided into three main
+ * types of hypercall operations:
+ *
+ * 1) Range information:
+ * This is used by the dom0 kernel to ask the hypervisor about various
+ * address information. This information is needed to allow kexec-tools
+ * to fill in the ELF headers for /proc/vmcore properly.
+ *
+ * 2) Load and unload of images:
+ * There are no big surprises here, the kexec binary from kexec-tools
+ * runs in userspace in dom0. The tool loads/unloads data into the
+ * dom0 kernel such as new kernel, initramfs and hypervisor. When
+ * loaded the dom0 kernel performs a load hypercall operation, and
+ * before releasing all page references the dom0 kernel calls unload.
+ *
+ * 3) Kexec operation:
+ * This is used to start a previously loaded kernel.
+ */
+
+#include "xen.h"
+
+#if defined(__i386__) || defined(__x86_64__)
+#define KEXEC_XEN_NO_PAGES 17
+#endif
+
+/*
+ * Prototype for this hypercall is:
+ * int kexec_op(int cmd, void *args)
+ * @cmd == KEXEC_CMD_...
+ * KEXEC operation to perform
+ * @args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Kexec supports two types of operation:
+ * - kexec into a regular kernel, very similar to a standard reboot
+ * - KEXEC_TYPE_DEFAULT is used to specify this type
+ * - kexec into a special "crash kernel", aka kexec-on-panic
+ * - KEXEC_TYPE_CRASH is used to specify this type
+ * - parts of our system may be broken at kexec-on-panic time
+ * - the code should be kept as simple and self-contained as possible
+ */
+
+#define KEXEC_TYPE_DEFAULT 0
+#define KEXEC_TYPE_CRASH 1
+
+
+/* The kexec implementation for Xen allows the user to load two
+ * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH.
+ * All data needed for a kexec reboot is kept in one xen_kexec_image_t
+ * per "instance". The data mainly consists of machine address lists to pages
+ * together with destination addresses. The data in xen_kexec_image_t
+ * is passed to the "code page" which is one page of code that performs
+ * the final relocations before jumping to the new kernel.
+ */
+
+typedef struct xen_kexec_image {
+#if defined(__i386__) || defined(__x86_64__)
+ unsigned long page_list[KEXEC_XEN_NO_PAGES];
+#endif
+#if defined(__ia64__)
+ unsigned long reboot_code_buffer;
+#endif
+ unsigned long indirection_page;
+ unsigned long start_address;
+} xen_kexec_image_t;
+
+/*
+ * Perform kexec having previously loaded a kexec or kdump kernel
+ * as appropriate.
+ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
+ */
+#define KEXEC_CMD_kexec 0
+typedef struct xen_kexec_exec {
+ int type;
+} xen_kexec_exec_t;
+
+/*
+ * Load/Unload kernel image for kexec or kdump.
+ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
+ * image == relocation information for kexec (ignored for unload) [in]
+ */
+#define KEXEC_CMD_kexec_load 1
+#define KEXEC_CMD_kexec_unload 2
+typedef struct xen_kexec_load {
+ int type;
+ xen_kexec_image_t image;
+} xen_kexec_load_t;
+
+#define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */
+#define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */
+#define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */
+#define KEXEC_RANGE_MA_XENHEAP 3 /* machine address and size of xenheap
+ * Note that although this is adjacent
+ * to Xen it exists in a separate EFI
+ * region on ia64, and thus needs to be
+ * inserted into iomem_machine separately */
+#define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of
+ * the ia64_boot_param */
+#define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of
+ * of the EFI Memory Map */
+#define KEXEC_RANGE_MA_VMCOREINFO 6 /* machine address and size of vmcoreinfo */
+
+/*
+ * Find the address and size of certain memory areas
+ * range == KEXEC_RANGE_... [in]
+ * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in]
+ * size == number of bytes reserved in window [out]
+ * start == address of the first byte in the window [out]
+ */
+#define KEXEC_CMD_kexec_get_range 3
+typedef struct xen_kexec_range {
+ int range;
+ int nr;
+ unsigned long size;
+ unsigned long start;
+} xen_kexec_range_t;
+
+/* vmcoreinfo stuff */
+#define VMCOREINFO_BYTES (4096)
+#define VMCOREINFO_NOTE_NAME "VMCOREINFO_XEN"
+void arch_crash_save_vmcoreinfo(void);
+void vmcoreinfo_append_str(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+#define VMCOREINFO_PAGESIZE(value) \
+ vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
+#define VMCOREINFO_SYMBOL(name) \
+ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
+#define VMCOREINFO_SYMBOL_ALIAS(alias, name) \
+ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #alias, (unsigned long)&name)
+#define VMCOREINFO_STRUCT_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%zu\n", #name, sizeof(struct name))
+#define VMCOREINFO_OFFSET(name, field) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
+ (unsigned long)offsetof(struct name, field))
+#define VMCOREINFO_OFFSET_ALIAS(name, field, alias) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #alias, \
+ (unsigned long)offsetof(struct name, field))
+
+#endif /* _XEN_PUBLIC_KEXEC_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/libelf.h b/xen/public/libelf.h
new file mode 100644
index 0000000..d238330
--- /dev/null
+++ b/xen/public/libelf.h
@@ -0,0 +1,265 @@
+/******************************************************************************
+ * libelf.h
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XC_LIBELF__
+#define __XC_LIBELF__ 1
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
+#define XEN_ELF_LITTLE_ENDIAN
+#else
+#error define architectural endianness
+#endif
+
+#undef ELFSIZE
+#include "elfnote.h"
+#include "elfstructs.h"
+#include "features.h"
+
+/* ------------------------------------------------------------------------ */
+
+typedef union {
+ Elf32_Ehdr e32;
+ Elf64_Ehdr e64;
+} elf_ehdr;
+
+typedef union {
+ Elf32_Phdr e32;
+ Elf64_Phdr e64;
+} elf_phdr;
+
+typedef union {
+ Elf32_Shdr e32;
+ Elf64_Shdr e64;
+} elf_shdr;
+
+typedef union {
+ Elf32_Sym e32;
+ Elf64_Sym e64;
+} elf_sym;
+
+typedef union {
+ Elf32_Rel e32;
+ Elf64_Rel e64;
+} elf_rel;
+
+typedef union {
+ Elf32_Rela e32;
+ Elf64_Rela e64;
+} elf_rela;
+
+typedef union {
+ Elf32_Note e32;
+ Elf64_Note e64;
+} elf_note;
+
+struct elf_binary {
+ /* elf binary */
+ const char *image;
+ size_t size;
+ char class;
+ char data;
+
+ const elf_ehdr *ehdr;
+ const char *sec_strtab;
+ const elf_shdr *sym_tab;
+ const char *sym_strtab;
+
+ /* loaded to */
+ char *dest;
+ uint64_t pstart;
+ uint64_t pend;
+ uint64_t reloc_offset;
+
+ uint64_t bsd_symtab_pstart;
+ uint64_t bsd_symtab_pend;
+
+#ifndef __XEN__
+ /* misc */
+ FILE *log;
+#endif
+ int verbose;
+};
+
+/* ------------------------------------------------------------------------ */
+/* accessing elf header fields */
+
+#ifdef XEN_ELF_BIG_ENDIAN
+# define NATIVE_ELFDATA ELFDATA2MSB
+#else
+# define NATIVE_ELFDATA ELFDATA2LSB
+#endif
+
+#define elf_32bit(elf) (ELFCLASS32 == (elf)->class)
+#define elf_64bit(elf) (ELFCLASS64 == (elf)->class)
+#define elf_msb(elf) (ELFDATA2MSB == (elf)->data)
+#define elf_lsb(elf) (ELFDATA2LSB == (elf)->data)
+#define elf_swap(elf) (NATIVE_ELFDATA != (elf)->data)
+
+#define elf_uval(elf, str, elem) \
+ ((ELFCLASS64 == (elf)->class) \
+ ? elf_access_unsigned((elf), (str), \
+ offsetof(typeof(*(str)),e64.elem), \
+ sizeof((str)->e64.elem)) \
+ : elf_access_unsigned((elf), (str), \
+ offsetof(typeof(*(str)),e32.elem), \
+ sizeof((str)->e32.elem)))
+
+#define elf_sval(elf, str, elem) \
+ ((ELFCLASS64 == (elf)->class) \
+ ? elf_access_signed((elf), (str), \
+ offsetof(typeof(*(str)),e64.elem), \
+ sizeof((str)->e64.elem)) \
+ : elf_access_signed((elf), (str), \
+ offsetof(typeof(*(str)),e32.elem), \
+ sizeof((str)->e32.elem)))
+
+#define elf_size(elf, str) \
+ ((ELFCLASS64 == (elf)->class) \
+ ? sizeof((str)->e64) : sizeof((str)->e32))
+
+uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr,
+ uint64_t offset, size_t size);
+int64_t elf_access_signed(struct elf_binary *elf, const void *ptr,
+ uint64_t offset, size_t size);
+
+uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr);
+
+/* ------------------------------------------------------------------------ */
+/* xc_libelf_tools.c */
+
+int elf_shdr_count(struct elf_binary *elf);
+int elf_phdr_count(struct elf_binary *elf);
+
+const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name);
+const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index);
+const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index);
+
+const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr);
+const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr);
+const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr);
+
+const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr);
+const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr);
+
+const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol);
+const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index);
+
+const char *elf_note_name(struct elf_binary *elf, const elf_note * note);
+const void *elf_note_desc(struct elf_binary *elf, const elf_note * note);
+uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note);
+const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note);
+
+int elf_is_elfbinary(const void *image);
+int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr);
+
+/* ------------------------------------------------------------------------ */
+/* xc_libelf_loader.c */
+
+int elf_init(struct elf_binary *elf, const char *image, size_t size);
+#ifdef __XEN__
+void elf_set_verbose(struct elf_binary *elf);
+#else
+void elf_set_logfile(struct elf_binary *elf, FILE * log, int verbose);
+#endif
+
+void elf_parse_binary(struct elf_binary *elf);
+void elf_load_binary(struct elf_binary *elf);
+
+void *elf_get_ptr(struct elf_binary *elf, unsigned long addr);
+uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol);
+
+void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart); /* private */
+
+/* ------------------------------------------------------------------------ */
+/* xc_libelf_relocate.c */
+
+int elf_reloc(struct elf_binary *elf);
+
+/* ------------------------------------------------------------------------ */
+/* xc_libelf_dominfo.c */
+
+#define UNSET_ADDR ((uint64_t)-1)
+
+enum xen_elfnote_type {
+ XEN_ENT_NONE = 0,
+ XEN_ENT_LONG = 1,
+ XEN_ENT_STR = 2
+};
+
+struct xen_elfnote {
+ enum xen_elfnote_type type;
+ const char *name;
+ union {
+ const char *str;
+ uint64_t num;
+ } data;
+};
+
+struct elf_dom_parms {
+ /* raw */
+ const char *guest_info;
+ const void *elf_note_start;
+ const void *elf_note_end;
+ struct xen_elfnote elf_notes[XEN_ELFNOTE_MAX + 1];
+
+ /* parsed */
+ char guest_os[16];
+ char guest_ver[16];
+ char xen_ver[16];
+ char loader[16];
+ int pae;
+ int bsd_symtab;
+ uint64_t virt_base;
+ uint64_t virt_entry;
+ uint64_t virt_hypercall;
+ uint64_t virt_hv_start_low;
+ uint64_t elf_paddr_offset;
+ uint32_t f_supported[XENFEAT_NR_SUBMAPS];
+ uint32_t f_required[XENFEAT_NR_SUBMAPS];
+
+ /* calculated */
+ uint64_t virt_offset;
+ uint64_t virt_kstart;
+ uint64_t virt_kend;
+};
+
+static inline void elf_xen_feature_set(int nr, uint32_t * addr)
+{
+ addr[nr >> 5] |= 1 << (nr & 31);
+}
+static inline int elf_xen_feature_get(int nr, uint32_t * addr)
+{
+ return !!(addr[nr >> 5] & (1 << (nr & 31)));
+}
+
+int elf_xen_parse_features(const char *features,
+ uint32_t *supported,
+ uint32_t *required);
+int elf_xen_parse_note(struct elf_binary *elf,
+ struct elf_dom_parms *parms,
+ const elf_note *note);
+int elf_xen_parse_guest_info(struct elf_binary *elf,
+ struct elf_dom_parms *parms);
+int elf_xen_parse(struct elf_binary *elf,
+ struct elf_dom_parms *parms);
+
+#endif /* __XC_LIBELF__ */
diff --git a/xen/public/memory.h b/xen/public/memory.h
new file mode 100644
index 0000000..d7b9fff
--- /dev/null
+++ b/xen/public/memory.h
@@ -0,0 +1,312 @@
+/******************************************************************************
+ * memory.h
+ *
+ * Memory reservation and information.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_MEMORY_H__
+#define __XEN_PUBLIC_MEMORY_H__
+
+/*
+ * Increase or decrease the specified domain's memory reservation. Returns the
+ * number of extents successfully allocated or freed.
+ * arg == addr of struct xen_memory_reservation.
+ */
+#define XENMEM_increase_reservation 0
+#define XENMEM_decrease_reservation 1
+#define XENMEM_populate_physmap 6
+
+#if __XEN_INTERFACE_VERSION__ >= 0x00030209
+/*
+ * Maximum # bits addressable by the user of the allocated region (e.g., I/O
+ * devices often have a 32-bit limitation even in 64-bit systems). If zero
+ * then the user has no addressing restriction. This field is not used by
+ * XENMEM_decrease_reservation.
+ */
+#define XENMEMF_address_bits(x) (x)
+#define XENMEMF_get_address_bits(x) ((x) & 0xffu)
+/* NUMA node to allocate from. */
+#define XENMEMF_node(x) (((x) + 1) << 8)
+#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
+#endif
+
+struct xen_memory_reservation {
+
+ /*
+ * XENMEM_increase_reservation:
+ * OUT: MFN (*not* GMFN) bases of extents that were allocated
+ * XENMEM_decrease_reservation:
+ * IN: GMFN bases of extents to free
+ * XENMEM_populate_physmap:
+ * IN: GPFN bases of extents to populate with memory
+ * OUT: GMFN bases of extents that were allocated
+ * (NB. This command also updates the mach_to_phys translation table)
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
+
+ /* Number of extents, and size/alignment of each (2^extent_order pages). */
+ xen_ulong_t nr_extents;
+ unsigned int extent_order;
+
+#if __XEN_INTERFACE_VERSION__ >= 0x00030209
+ /* XENMEMF flags. */
+ unsigned int mem_flags;
+#else
+ unsigned int address_bits;
+#endif
+
+ /*
+ * Domain whose reservation is being changed.
+ * Unprivileged domains can specify only DOMID_SELF.
+ */
+ domid_t domid;
+};
+typedef struct xen_memory_reservation xen_memory_reservation_t;
+DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
+
+/*
+ * An atomic exchange of memory pages. If return code is zero then
+ * @out.extent_list provides GMFNs of the newly-allocated memory.
+ * Returns zero on complete success, otherwise a negative error code.
+ * On complete success then always @nr_exchanged == @in.nr_extents.
+ * On partial success @nr_exchanged indicates how much work was done.
+ */
+#define XENMEM_exchange 11
+struct xen_memory_exchange {
+ /*
+ * [IN] Details of memory extents to be exchanged (GMFN bases).
+ * Note that @in.address_bits is ignored and unused.
+ */
+ struct xen_memory_reservation in;
+
+ /*
+ * [IN/OUT] Details of new memory extents.
+ * We require that:
+ * 1. @in.domid == @out.domid
+ * 2. @in.nr_extents << @in.extent_order ==
+ * @out.nr_extents << @out.extent_order
+ * 3. @in.extent_start and @out.extent_start lists must not overlap
+ * 4. @out.extent_start lists GPFN bases to be populated
+ * 5. @out.extent_start is overwritten with allocated GMFN bases
+ */
+ struct xen_memory_reservation out;
+
+ /*
+ * [OUT] Number of input extents that were successfully exchanged:
+ * 1. The first @nr_exchanged input extents were successfully
+ * deallocated.
+ * 2. The corresponding first entries in the output extent list correctly
+ * indicate the GMFNs that were successfully exchanged.
+ * 3. All other input and output extents are untouched.
+ * 4. If not all input exents are exchanged then the return code of this
+ * command will be non-zero.
+ * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
+ */
+ xen_ulong_t nr_exchanged;
+};
+typedef struct xen_memory_exchange xen_memory_exchange_t;
+DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
+
+/*
+ * Returns the maximum machine frame number of mapped RAM in this system.
+ * This command always succeeds (it never returns an error code).
+ * arg == NULL.
+ */
+#define XENMEM_maximum_ram_page 2
+
+/*
+ * Returns the current or maximum memory reservation, in pages, of the
+ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
+ * arg == addr of domid_t.
+ */
+#define XENMEM_current_reservation 3
+#define XENMEM_maximum_reservation 4
+
+/*
+ * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
+ */
+#define XENMEM_maximum_gpfn 14
+
+/*
+ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
+ * mapping table. Architectures which do not have a m2p table do not implement
+ * this command.
+ * arg == addr of xen_machphys_mfn_list_t.
+ */
+#define XENMEM_machphys_mfn_list 5
+struct xen_machphys_mfn_list {
+ /*
+ * Size of the 'extent_start' array. Fewer entries will be filled if the
+ * machphys table is smaller than max_extents * 2MB.
+ */
+ unsigned int max_extents;
+
+ /*
+ * Pointer to buffer to fill with list of extent starts. If there are
+ * any large discontiguities in the machine address space, 2MB gaps in
+ * the machphys table will be represented by an MFN base of zero.
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
+
+ /*
+ * Number of extents written to the above array. This will be smaller
+ * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
+ */
+ unsigned int nr_extents;
+};
+typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
+DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
+
+/*
+ * Returns the location in virtual address space of the machine_to_phys
+ * mapping table. Architectures which do not have a m2p table, or which do not
+ * map it by default into guest address space, do not implement this command.
+ * arg == addr of xen_machphys_mapping_t.
+ */
+#define XENMEM_machphys_mapping 12
+struct xen_machphys_mapping {
+ xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
+ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
+};
+typedef struct xen_machphys_mapping xen_machphys_mapping_t;
+DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
+
+/*
+ * Sets the GPFN at which a particular page appears in the specified guest's
+ * pseudophysical address space.
+ * arg == addr of xen_add_to_physmap_t.
+ */
+#define XENMEM_add_to_physmap 7
+struct xen_add_to_physmap {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+
+ /* Source mapping space. */
+#define XENMAPSPACE_shared_info 0 /* shared info page */
+#define XENMAPSPACE_grant_table 1 /* grant table page */
+#define XENMAPSPACE_mfn 2 /* usual MFN */
+ unsigned int space;
+
+ /* Index into source mapping space. */
+ xen_ulong_t idx;
+
+ /* GPFN where the source mapping page should appear. */
+ xen_pfn_t gpfn;
+};
+typedef struct xen_add_to_physmap xen_add_to_physmap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
+
+/*
+ * Unmaps the page appearing at a particular GPFN from the specified guest's
+ * pseudophysical address space.
+ * arg == addr of xen_remove_from_physmap_t.
+ */
+#define XENMEM_remove_from_physmap 15
+struct xen_remove_from_physmap {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+
+ /* GPFN of the current mapping of the page. */
+ xen_pfn_t gpfn;
+};
+typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
+
+/*
+ * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
+ * code on failure. This call only works for auto-translated guests.
+ */
+#define XENMEM_translate_gpfn_list 8
+struct xen_translate_gpfn_list {
+ /* Which domain to translate for? */
+ domid_t domid;
+
+ /* Length of list. */
+ xen_ulong_t nr_gpfns;
+
+ /* List of GPFNs to translate. */
+ XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
+
+ /*
+ * Output list to contain MFN translations. May be the same as the input
+ * list (in which case each input GPFN is overwritten with the output MFN).
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
+};
+typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
+DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
+
+/*
+ * Returns the pseudo-physical memory map as it was when the domain
+ * was started (specified by XENMEM_set_memory_map).
+ * arg == addr of xen_memory_map_t.
+ */
+#define XENMEM_memory_map 9
+struct xen_memory_map {
+ /*
+ * On call the number of entries which can be stored in buffer. On
+ * return the number of entries which have been stored in
+ * buffer.
+ */
+ unsigned int nr_entries;
+
+ /*
+ * Entries in the buffer are in the same format as returned by the
+ * BIOS INT 0x15 EAX=0xE820 call.
+ */
+ XEN_GUEST_HANDLE(void) buffer;
+};
+typedef struct xen_memory_map xen_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
+
+/*
+ * Returns the real physical memory map. Passes the same structure as
+ * XENMEM_memory_map.
+ * arg == addr of xen_memory_map_t.
+ */
+#define XENMEM_machine_memory_map 10
+
+/*
+ * Set the pseudo-physical memory map of a domain, as returned by
+ * XENMEM_memory_map.
+ * arg == addr of xen_foreign_memory_map_t.
+ */
+#define XENMEM_set_memory_map 13
+struct xen_foreign_memory_map {
+ domid_t domid;
+ struct xen_memory_map map;
+};
+typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
+
+#endif /* __XEN_PUBLIC_MEMORY_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/nmi.h b/xen/public/nmi.h
new file mode 100644
index 0000000..b2b8401
--- /dev/null
+++ b/xen/public/nmi.h
@@ -0,0 +1,78 @@
+/******************************************************************************
+ * nmi.h
+ *
+ * NMI callback registration and reason codes.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_NMI_H__
+#define __XEN_PUBLIC_NMI_H__
+
+/*
+ * NMI reason codes:
+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
+ */
+ /* I/O-check error reported via ISA port 0x61, bit 6. */
+#define _XEN_NMIREASON_io_error 0
+#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
+ /* Parity error reported via ISA port 0x61, bit 7. */
+#define _XEN_NMIREASON_parity_error 1
+#define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error)
+ /* Unknown hardware-generated NMI. */
+#define _XEN_NMIREASON_unknown 2
+#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
+
+/*
+ * long nmi_op(unsigned int cmd, void *arg)
+ * NB. All ops return zero on success, else a negative error code.
+ */
+
+/*
+ * Register NMI callback for this (calling) VCPU. Currently this only makes
+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
+ * arg == pointer to xennmi_callback structure.
+ */
+#define XENNMI_register_callback 0
+struct xennmi_callback {
+ unsigned long handler_address;
+ unsigned long pad;
+};
+typedef struct xennmi_callback xennmi_callback_t;
+DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t);
+
+/*
+ * Deregister NMI callback for this (calling) VCPU.
+ * arg == NULL.
+ */
+#define XENNMI_unregister_callback 1
+
+#endif /* __XEN_PUBLIC_NMI_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/physdev.h b/xen/public/physdev.h
new file mode 100644
index 0000000..8057277
--- /dev/null
+++ b/xen/public/physdev.h
@@ -0,0 +1,219 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_PHYSDEV_H__
+#define __XEN_PUBLIC_PHYSDEV_H__
+
+/*
+ * Prototype for this hypercall is:
+ * int physdev_op(int cmd, void *args)
+ * @cmd == PHYSDEVOP_??? (physdev operation).
+ * @args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Notify end-of-interrupt (EOI) for the specified IRQ.
+ * @arg == pointer to physdev_eoi structure.
+ */
+#define PHYSDEVOP_eoi 12
+struct physdev_eoi {
+ /* IN */
+ uint32_t irq;
+};
+typedef struct physdev_eoi physdev_eoi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
+
+/*
+ * Query the status of an IRQ line.
+ * @arg == pointer to physdev_irq_status_query structure.
+ */
+#define PHYSDEVOP_irq_status_query 5
+struct physdev_irq_status_query {
+ /* IN */
+ uint32_t irq;
+ /* OUT */
+ uint32_t flags; /* XENIRQSTAT_* */
+};
+typedef struct physdev_irq_status_query physdev_irq_status_query_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
+
+/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
+#define _XENIRQSTAT_needs_eoi (0)
+#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
+
+/* IRQ shared by multiple guests? */
+#define _XENIRQSTAT_shared (1)
+#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
+
+/*
+ * Set the current VCPU's I/O privilege level.
+ * @arg == pointer to physdev_set_iopl structure.
+ */
+#define PHYSDEVOP_set_iopl 6
+struct physdev_set_iopl {
+ /* IN */
+ uint32_t iopl;
+};
+typedef struct physdev_set_iopl physdev_set_iopl_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
+
+/*
+ * Set the current VCPU's I/O-port permissions bitmap.
+ * @arg == pointer to physdev_set_iobitmap structure.
+ */
+#define PHYSDEVOP_set_iobitmap 7
+struct physdev_set_iobitmap {
+ /* IN */
+#if __XEN_INTERFACE_VERSION__ >= 0x00030205
+ XEN_GUEST_HANDLE(uint8) bitmap;
+#else
+ uint8_t *bitmap;
+#endif
+ uint32_t nr_ports;
+};
+typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
+
+/*
+ * Read or write an IO-APIC register.
+ * @arg == pointer to physdev_apic structure.
+ */
+#define PHYSDEVOP_apic_read 8
+#define PHYSDEVOP_apic_write 9
+struct physdev_apic {
+ /* IN */
+ unsigned long apic_physbase;
+ uint32_t reg;
+ /* IN or OUT */
+ uint32_t value;
+};
+typedef struct physdev_apic physdev_apic_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
+
+/*
+ * Allocate or free a physical upcall vector for the specified IRQ line.
+ * @arg == pointer to physdev_irq structure.
+ */
+#define PHYSDEVOP_alloc_irq_vector 10
+#define PHYSDEVOP_free_irq_vector 11
+struct physdev_irq {
+ /* IN */
+ uint32_t irq;
+ /* IN or OUT */
+ uint32_t vector;
+};
+typedef struct physdev_irq physdev_irq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
+
+#define MAP_PIRQ_TYPE_MSI 0x0
+#define MAP_PIRQ_TYPE_GSI 0x1
+#define MAP_PIRQ_TYPE_UNKNOWN 0x2
+
+#define PHYSDEVOP_map_pirq 13
+struct physdev_map_pirq {
+ domid_t domid;
+ /* IN */
+ int type;
+ /* IN */
+ int index;
+ /* IN or OUT */
+ int pirq;
+ /* IN */
+ int bus;
+ /* IN */
+ int devfn;
+ /* IN */
+ int entry_nr;
+ /* IN */
+ uint64_t table_base;
+};
+typedef struct physdev_map_pirq physdev_map_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
+
+#define PHYSDEVOP_unmap_pirq 14
+struct physdev_unmap_pirq {
+ domid_t domid;
+ /* IN */
+ int pirq;
+};
+
+typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
+
+#define PHYSDEVOP_manage_pci_add 15
+#define PHYSDEVOP_manage_pci_remove 16
+struct physdev_manage_pci {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+};
+
+typedef struct physdev_manage_pci physdev_manage_pci_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
+
+/*
+ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
+ * hypercall since 0x00030202.
+ */
+struct physdev_op {
+ uint32_t cmd;
+ union {
+ struct physdev_irq_status_query irq_status_query;
+ struct physdev_set_iopl set_iopl;
+ struct physdev_set_iobitmap set_iobitmap;
+ struct physdev_apic apic_op;
+ struct physdev_irq irq_op;
+ } u;
+};
+typedef struct physdev_op physdev_op_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
+
+/*
+ * Notify that some PIRQ-bound event channels have been unmasked.
+ * ** This command is obsolete since interface version 0x00030202 and is **
+ * ** unsupported by newer versions of Xen. **
+ */
+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
+
+/*
+ * These all-capitals physdev operation names are superceded by the new names
+ * (defined above) since interface version 0x00030202.
+ */
+#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
+#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
+#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
+#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
+#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
+#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
+#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
+#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
+
+#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/platform.h b/xen/public/platform.h
new file mode 100644
index 0000000..eee047b
--- /dev/null
+++ b/xen/public/platform.h
@@ -0,0 +1,346 @@
+/******************************************************************************
+ * platform.h
+ *
+ * Hardware platform operations. Intended for use by domain-0 kernel.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2002-2006, K Fraser
+ */
+
+#ifndef __XEN_PUBLIC_PLATFORM_H__
+#define __XEN_PUBLIC_PLATFORM_H__
+
+#include "xen.h"
+
+#define XENPF_INTERFACE_VERSION 0x03000001
+
+/*
+ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
+ * 1 January, 1970 if the current system time was <system_time>.
+ */
+#define XENPF_settime 17
+struct xenpf_settime {
+ /* IN variables. */
+ uint32_t secs;
+ uint32_t nsecs;
+ uint64_t system_time;
+};
+typedef struct xenpf_settime xenpf_settime_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
+
+/*
+ * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type.
+ * On x86, @type is an architecture-defined MTRR memory type.
+ * On success, returns the MTRR that was used (@reg) and a handle that can
+ * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting.
+ * (x86-specific).
+ */
+#define XENPF_add_memtype 31
+struct xenpf_add_memtype {
+ /* IN variables. */
+ xen_pfn_t mfn;
+ uint64_t nr_mfns;
+ uint32_t type;
+ /* OUT variables. */
+ uint32_t handle;
+ uint32_t reg;
+};
+typedef struct xenpf_add_memtype xenpf_add_memtype_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t);
+
+/*
+ * Tear down an existing memory-range type. If @handle is remembered then it
+ * should be passed in to accurately tear down the correct setting (in case
+ * of overlapping memory regions with differing types). If it is not known
+ * then @handle should be set to zero. In all cases @reg must be set.
+ * (x86-specific).
+ */
+#define XENPF_del_memtype 32
+struct xenpf_del_memtype {
+ /* IN variables. */
+ uint32_t handle;
+ uint32_t reg;
+};
+typedef struct xenpf_del_memtype xenpf_del_memtype_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t);
+
+/* Read current type of an MTRR (x86-specific). */
+#define XENPF_read_memtype 33
+struct xenpf_read_memtype {
+ /* IN variables. */
+ uint32_t reg;
+ /* OUT variables. */
+ xen_pfn_t mfn;
+ uint64_t nr_mfns;
+ uint32_t type;
+};
+typedef struct xenpf_read_memtype xenpf_read_memtype_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t);
+
+#define XENPF_microcode_update 35
+struct xenpf_microcode_update {
+ /* IN variables. */
+ XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */
+ uint32_t length; /* Length of microcode data. */
+};
+typedef struct xenpf_microcode_update xenpf_microcode_update_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t);
+
+#define XENPF_platform_quirk 39
+#define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */
+#define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */
+#define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */
+struct xenpf_platform_quirk {
+ /* IN variables. */
+ uint32_t quirk_id;
+};
+typedef struct xenpf_platform_quirk xenpf_platform_quirk_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
+
+#define XENPF_firmware_info 50
+#define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */
+#define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */
+#define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */
+struct xenpf_firmware_info {
+ /* IN variables. */
+ uint32_t type;
+ uint32_t index;
+ /* OUT variables. */
+ union {
+ struct {
+ /* Int13, Fn48: Check Extensions Present. */
+ uint8_t device; /* %dl: bios device number */
+ uint8_t version; /* %ah: major version */
+ uint16_t interface_support; /* %cx: support bitmap */
+ /* Int13, Fn08: Legacy Get Device Parameters. */
+ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */
+ uint8_t legacy_max_head; /* %dh: max head # */
+ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */
+ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */
+ /* NB. First uint16_t of buffer must be set to buffer size. */
+ XEN_GUEST_HANDLE(void) edd_params;
+ } disk_info; /* XEN_FW_DISK_INFO */
+ struct {
+ uint8_t device; /* bios device number */
+ uint32_t mbr_signature; /* offset 0x1b8 in mbr */
+ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */
+ struct {
+ /* Int10, AX=4F15: Get EDID info. */
+ uint8_t capabilities;
+ uint8_t edid_transfer_time;
+ /* must refer to 128-byte buffer */
+ XEN_GUEST_HANDLE(uint8) edid;
+ } vbeddc_info; /* XEN_FW_VBEDDC_INFO */
+ } u;
+};
+typedef struct xenpf_firmware_info xenpf_firmware_info_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t);
+
+#define XENPF_enter_acpi_sleep 51
+struct xenpf_enter_acpi_sleep {
+ /* IN variables */
+ uint16_t pm1a_cnt_val; /* PM1a control value. */
+ uint16_t pm1b_cnt_val; /* PM1b control value. */
+ uint32_t sleep_state; /* Which state to enter (Sn). */
+ uint32_t flags; /* Must be zero. */
+};
+typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t);
+
+#define XENPF_change_freq 52
+struct xenpf_change_freq {
+ /* IN variables */
+ uint32_t flags; /* Must be zero. */
+ uint32_t cpu; /* Physical cpu. */
+ uint64_t freq; /* New frequency (Hz). */
+};
+typedef struct xenpf_change_freq xenpf_change_freq_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t);
+
+/*
+ * Get idle times (nanoseconds since boot) for physical CPUs specified in the
+ * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is
+ * indexed by CPU number; only entries with the corresponding @cpumap_bitmap
+ * bit set are written to. On return, @cpumap_bitmap is modified so that any
+ * non-existent CPUs are cleared. Such CPUs have their @idletime array entry
+ * cleared.
+ */
+#define XENPF_getidletime 53
+struct xenpf_getidletime {
+ /* IN/OUT variables */
+ /* IN: CPUs to interrogate; OUT: subset of IN which are present */
+ XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
+ /* IN variables */
+ /* Size of cpumap bitmap. */
+ uint32_t cpumap_nr_cpus;
+ /* Must be indexable for every cpu in cpumap_bitmap. */
+ XEN_GUEST_HANDLE(uint64) idletime;
+ /* OUT variables */
+ /* System time when the idletime snapshots were taken. */
+ uint64_t now;
+};
+typedef struct xenpf_getidletime xenpf_getidletime_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t);
+
+#define XENPF_set_processor_pminfo 54
+
+/* ability bits */
+#define XEN_PROCESSOR_PM_CX 1
+#define XEN_PROCESSOR_PM_PX 2
+#define XEN_PROCESSOR_PM_TX 4
+
+/* cmd type */
+#define XEN_PM_CX 0
+#define XEN_PM_PX 1
+#define XEN_PM_TX 2
+
+/* Px sub info type */
+#define XEN_PX_PCT 1
+#define XEN_PX_PSS 2
+#define XEN_PX_PPC 4
+#define XEN_PX_PSD 8
+
+struct xen_power_register {
+ uint32_t space_id;
+ uint32_t bit_width;
+ uint32_t bit_offset;
+ uint32_t access_size;
+ uint64_t address;
+};
+
+struct xen_processor_csd {
+ uint32_t domain; /* domain number of one dependent group */
+ uint32_t coord_type; /* coordination type */
+ uint32_t num; /* number of processors in same domain */
+};
+typedef struct xen_processor_csd xen_processor_csd_t;
+DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t);
+
+struct xen_processor_cx {
+ struct xen_power_register reg; /* GAS for Cx trigger register */
+ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */
+ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */
+ uint32_t power; /* average power consumption(mW) */
+ uint32_t dpcnt; /* number of dependency entries */
+ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */
+};
+typedef struct xen_processor_cx xen_processor_cx_t;
+DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t);
+
+struct xen_processor_flags {
+ uint32_t bm_control:1;
+ uint32_t bm_check:1;
+ uint32_t has_cst:1;
+ uint32_t power_setup_done:1;
+ uint32_t bm_rld_set:1;
+};
+
+struct xen_processor_power {
+ uint32_t count; /* number of C state entries in array below */
+ struct xen_processor_flags flags; /* global flags of this processor */
+ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */
+};
+
+struct xen_pct_register {
+ uint8_t descriptor;
+ uint16_t length;
+ uint8_t space_id;
+ uint8_t bit_width;
+ uint8_t bit_offset;
+ uint8_t reserved;
+ uint64_t address;
+};
+
+struct xen_processor_px {
+ uint64_t core_frequency; /* megahertz */
+ uint64_t power; /* milliWatts */
+ uint64_t transition_latency; /* microseconds */
+ uint64_t bus_master_latency; /* microseconds */
+ uint64_t control; /* control value */
+ uint64_t status; /* success indicator */
+};
+typedef struct xen_processor_px xen_processor_px_t;
+DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t);
+
+struct xen_psd_package {
+ uint64_t num_entries;
+ uint64_t revision;
+ uint64_t domain;
+ uint64_t coord_type;
+ uint64_t num_processors;
+};
+
+struct xen_processor_performance {
+ uint32_t flags; /* flag for Px sub info type */
+ uint32_t platform_limit; /* Platform limitation on freq usage */
+ struct xen_pct_register control_register;
+ struct xen_pct_register status_register;
+ uint32_t state_count; /* total available performance states */
+ XEN_GUEST_HANDLE(xen_processor_px_t) states;
+ struct xen_psd_package domain_info;
+ uint32_t shared_type; /* coordination type of this processor */
+};
+typedef struct xen_processor_performance xen_processor_performance_t;
+DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t);
+
+struct xenpf_set_processor_pminfo {
+ /* IN variables */
+ uint32_t id; /* ACPI CPU ID */
+ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */
+ union {
+ struct xen_processor_power power;/* Cx: _CST/_CSD */
+ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */
+ };
+};
+typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t);
+
+struct xen_platform_op {
+ uint32_t cmd;
+ uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
+ union {
+ struct xenpf_settime settime;
+ struct xenpf_add_memtype add_memtype;
+ struct xenpf_del_memtype del_memtype;
+ struct xenpf_read_memtype read_memtype;
+ struct xenpf_microcode_update microcode;
+ struct xenpf_platform_quirk platform_quirk;
+ struct xenpf_firmware_info firmware_info;
+ struct xenpf_enter_acpi_sleep enter_acpi_sleep;
+ struct xenpf_change_freq change_freq;
+ struct xenpf_getidletime getidletime;
+ struct xenpf_set_processor_pminfo set_pminfo;
+ uint8_t pad[128];
+ } u;
+};
+typedef struct xen_platform_op xen_platform_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t);
+
+#endif /* __XEN_PUBLIC_PLATFORM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/sched.h b/xen/public/sched.h
new file mode 100644
index 0000000..2227a95
--- /dev/null
+++ b/xen/public/sched.h
@@ -0,0 +1,121 @@
+/******************************************************************************
+ * sched.h
+ *
+ * Scheduler state interactions
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_SCHED_H__
+#define __XEN_PUBLIC_SCHED_H__
+
+#include "event_channel.h"
+
+/*
+ * The prototype for this hypercall is:
+ * long sched_op(int cmd, void *arg)
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == Operation-specific extra argument(s), as described below.
+ *
+ * Versions of Xen prior to 3.0.2 provided only the following legacy version
+ * of this hypercall, supporting only the commands yield, block and shutdown:
+ * long sched_op(int cmd, unsigned long arg)
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
+ * == SHUTDOWN_* code (SCHEDOP_shutdown)
+ * This legacy version is available to new guests as sched_op_compat().
+ */
+
+/*
+ * Voluntarily yield the CPU.
+ * @arg == NULL.
+ */
+#define SCHEDOP_yield 0
+
+/*
+ * Block execution of this VCPU until an event is received for processing.
+ * If called with event upcalls masked, this operation will atomically
+ * reenable event delivery and check for pending events before blocking the
+ * VCPU. This avoids a "wakeup waiting" race.
+ * @arg == NULL.
+ */
+#define SCHEDOP_block 1
+
+/*
+ * Halt execution of this domain (all VCPUs) and notify the system controller.
+ * @arg == pointer to sched_shutdown structure.
+ */
+#define SCHEDOP_shutdown 2
+struct sched_shutdown {
+ unsigned int reason; /* SHUTDOWN_* */
+};
+typedef struct sched_shutdown sched_shutdown_t;
+DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
+
+/*
+ * Poll a set of event-channel ports. Return when one or more are pending. An
+ * optional timeout may be specified.
+ * @arg == pointer to sched_poll structure.
+ */
+#define SCHEDOP_poll 3
+struct sched_poll {
+ XEN_GUEST_HANDLE(evtchn_port_t) ports;
+ unsigned int nr_ports;
+ uint64_t timeout;
+};
+typedef struct sched_poll sched_poll_t;
+DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
+
+/*
+ * Declare a shutdown for another domain. The main use of this function is
+ * in interpreting shutdown requests and reasons for fully-virtualized
+ * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
+ * @arg == pointer to sched_remote_shutdown structure.
+ */
+#define SCHEDOP_remote_shutdown 4
+struct sched_remote_shutdown {
+ domid_t domain_id; /* Remote domain ID */
+ unsigned int reason; /* SHUTDOWN_xxx reason */
+};
+typedef struct sched_remote_shutdown sched_remote_shutdown_t;
+DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
+
+/*
+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
+ * software to determine the appropriate action. For the most part, Xen does
+ * not care about the shutdown code.
+ */
+#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
+#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
+#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
+#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
+
+#endif /* __XEN_PUBLIC_SCHED_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/sysctl.h b/xen/public/sysctl.h
new file mode 100644
index 0000000..6b10954
--- /dev/null
+++ b/xen/public/sysctl.h
@@ -0,0 +1,308 @@
+/******************************************************************************
+ * sysctl.h
+ *
+ * System management operations. For use by node control stack.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2002-2006, K Fraser
+ */
+
+#ifndef __XEN_PUBLIC_SYSCTL_H__
+#define __XEN_PUBLIC_SYSCTL_H__
+
+#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
+#error "sysctl operations are intended for use by node control tools only"
+#endif
+
+#include "xen.h"
+#include "domctl.h"
+
+#define XEN_SYSCTL_INTERFACE_VERSION 0x00000006
+
+/*
+ * Read console content from Xen buffer ring.
+ */
+#define XEN_SYSCTL_readconsole 1
+struct xen_sysctl_readconsole {
+ /* IN: Non-zero -> clear after reading. */
+ uint8_t clear;
+ /* IN: Non-zero -> start index specified by @index field. */
+ uint8_t incremental;
+ uint8_t pad0, pad1;
+ /*
+ * IN: Start index for consuming from ring buffer (if @incremental);
+ * OUT: End index after consuming from ring buffer.
+ */
+ uint32_t index;
+ /* IN: Virtual address to write console data. */
+ XEN_GUEST_HANDLE_64(char) buffer;
+ /* IN: Size of buffer; OUT: Bytes written to buffer. */
+ uint32_t count;
+};
+typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
+
+/* Get trace buffers machine base address */
+#define XEN_SYSCTL_tbuf_op 2
+struct xen_sysctl_tbuf_op {
+ /* IN variables */
+#define XEN_SYSCTL_TBUFOP_get_info 0
+#define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
+#define XEN_SYSCTL_TBUFOP_set_evt_mask 2
+#define XEN_SYSCTL_TBUFOP_set_size 3
+#define XEN_SYSCTL_TBUFOP_enable 4
+#define XEN_SYSCTL_TBUFOP_disable 5
+ uint32_t cmd;
+ /* IN/OUT variables */
+ struct xenctl_cpumap cpu_mask;
+ uint32_t evt_mask;
+ /* OUT variables */
+ uint64_aligned_t buffer_mfn;
+ uint32_t size;
+};
+typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
+
+/*
+ * Get physical information about the host machine
+ */
+#define XEN_SYSCTL_physinfo 3
+ /* (x86) The platform supports HVM guests. */
+#define _XEN_SYSCTL_PHYSCAP_hvm 0
+#define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
+ /* (x86) The platform supports HVM-guest direct access to I/O devices. */
+#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
+#define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
+struct xen_sysctl_physinfo {
+ uint32_t threads_per_core;
+ uint32_t cores_per_socket;
+ uint32_t nr_cpus;
+ uint32_t nr_nodes;
+ uint32_t cpu_khz;
+ uint64_aligned_t total_pages;
+ uint64_aligned_t free_pages;
+ uint64_aligned_t scrub_pages;
+ uint32_t hw_cap[8];
+
+ /*
+ * IN: maximum addressable entry in the caller-provided cpu_to_node array.
+ * OUT: largest cpu identifier in the system.
+ * If OUT is greater than IN then the cpu_to_node array is truncated!
+ */
+ uint32_t max_cpu_id;
+ /*
+ * If not NULL, this array is filled with node identifier for each cpu.
+ * If a cpu has no node information (e.g., cpu not present) then the
+ * sentinel value ~0u is written.
+ * The size of this array is specified by the caller in @max_cpu_id.
+ * If the actual @max_cpu_id is smaller than the array then the trailing
+ * elements of the array will not be written by the sysctl.
+ */
+ XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
+
+ /* XEN_SYSCTL_PHYSCAP_??? */
+ uint32_t capabilities;
+};
+typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
+
+/*
+ * Get the ID of the current scheduler.
+ */
+#define XEN_SYSCTL_sched_id 4
+struct xen_sysctl_sched_id {
+ /* OUT variable */
+ uint32_t sched_id;
+};
+typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
+
+/* Interface for controlling Xen software performance counters. */
+#define XEN_SYSCTL_perfc_op 5
+/* Sub-operations: */
+#define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */
+#define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */
+struct xen_sysctl_perfc_desc {
+ char name[80]; /* name of perf counter */
+ uint32_t nr_vals; /* number of values for this counter */
+};
+typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
+typedef uint32_t xen_sysctl_perfc_val_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
+
+struct xen_sysctl_perfc_op {
+ /* IN variables. */
+ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */
+ /* OUT variables. */
+ uint32_t nr_counters; /* number of counters description */
+ uint32_t nr_vals; /* number of values */
+ /* counter information (or NULL) */
+ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
+ /* counter values (or NULL) */
+ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
+};
+typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
+
+#define XEN_SYSCTL_getdomaininfolist 6
+struct xen_sysctl_getdomaininfolist {
+ /* IN variables. */
+ domid_t first_domain;
+ uint32_t max_domains;
+ XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
+ /* OUT variables. */
+ uint32_t num_domains;
+};
+typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
+
+/* Inject debug keys into Xen. */
+#define XEN_SYSCTL_debug_keys 7
+struct xen_sysctl_debug_keys {
+ /* IN variables. */
+ XEN_GUEST_HANDLE_64(char) keys;
+ uint32_t nr_keys;
+};
+typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
+
+/* Get physical CPU information. */
+#define XEN_SYSCTL_getcpuinfo 8
+struct xen_sysctl_cpuinfo {
+ uint64_aligned_t idletime;
+};
+typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t);
+struct xen_sysctl_getcpuinfo {
+ /* IN variables. */
+ uint32_t max_cpus;
+ XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info;
+ /* OUT variables. */
+ uint32_t nr_cpus;
+};
+typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
+
+#define XEN_SYSCTL_availheap 9
+struct xen_sysctl_availheap {
+ /* IN variables. */
+ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
+ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */
+ int32_t node; /* NUMA node of interest (-1 for all nodes). */
+ /* OUT variables. */
+ uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
+};
+typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
+
+#define XEN_SYSCTL_get_pmstat 10
+struct pm_px_val {
+ uint64_aligned_t freq; /* Px core frequency */
+ uint64_aligned_t residency; /* Px residency time */
+ uint64_aligned_t count; /* Px transition count */
+};
+typedef struct pm_px_val pm_px_val_t;
+DEFINE_XEN_GUEST_HANDLE(pm_px_val_t);
+
+struct pm_px_stat {
+ uint8_t total; /* total Px states */
+ uint8_t usable; /* usable Px states */
+ uint8_t last; /* last Px state */
+ uint8_t cur; /* current Px state */
+ XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */
+ XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
+};
+typedef struct pm_px_stat pm_px_stat_t;
+DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t);
+
+struct pm_cx_stat {
+ uint32_t nr; /* entry nr in triggers & residencies, including C0 */
+ uint32_t last; /* last Cx state */
+ uint64_aligned_t idle_time; /* idle time from boot */
+ XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */
+ XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */
+};
+
+struct xen_sysctl_get_pmstat {
+#define PMSTAT_CATEGORY_MASK 0xf0
+#define PMSTAT_PX 0x10
+#define PMSTAT_CX 0x20
+#define PMSTAT_get_max_px (PMSTAT_PX | 0x1)
+#define PMSTAT_get_pxstat (PMSTAT_PX | 0x2)
+#define PMSTAT_reset_pxstat (PMSTAT_PX | 0x3)
+#define PMSTAT_get_max_cx (PMSTAT_CX | 0x1)
+#define PMSTAT_get_cxstat (PMSTAT_CX | 0x2)
+#define PMSTAT_reset_cxstat (PMSTAT_CX | 0x3)
+ uint32_t type;
+ uint32_t cpuid;
+ union {
+ struct pm_px_stat getpx;
+ struct pm_cx_stat getcx;
+ /* other struct for tx, etc */
+ } u;
+};
+typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
+
+#define XEN_SYSCTL_cpu_hotplug 11
+struct xen_sysctl_cpu_hotplug {
+ /* IN variables */
+ uint32_t cpu; /* Physical cpu. */
+#define XEN_SYSCTL_CPU_HOTPLUG_ONLINE 0
+#define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
+ uint32_t op; /* hotplug opcode */
+};
+typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t);
+
+
+struct xen_sysctl {
+ uint32_t cmd;
+ uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
+ union {
+ struct xen_sysctl_readconsole readconsole;
+ struct xen_sysctl_tbuf_op tbuf_op;
+ struct xen_sysctl_physinfo physinfo;
+ struct xen_sysctl_sched_id sched_id;
+ struct xen_sysctl_perfc_op perfc_op;
+ struct xen_sysctl_getdomaininfolist getdomaininfolist;
+ struct xen_sysctl_debug_keys debug_keys;
+ struct xen_sysctl_getcpuinfo getcpuinfo;
+ struct xen_sysctl_availheap availheap;
+ struct xen_sysctl_get_pmstat get_pmstat;
+ struct xen_sysctl_cpu_hotplug cpu_hotplug;
+ uint8_t pad[128];
+ } u;
+};
+typedef struct xen_sysctl xen_sysctl_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
+
+#endif /* __XEN_PUBLIC_SYSCTL_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/trace.h b/xen/public/trace.h
new file mode 100644
index 0000000..0fc864d
--- /dev/null
+++ b/xen/public/trace.h
@@ -0,0 +1,206 @@
+/******************************************************************************
+ * include/public/trace.h
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Mark Williamson, (C) 2004 Intel Research Cambridge
+ * Copyright (C) 2005 Bin Ren
+ */
+
+#ifndef __XEN_PUBLIC_TRACE_H__
+#define __XEN_PUBLIC_TRACE_H__
+
+#define TRACE_EXTRA_MAX 7
+#define TRACE_EXTRA_SHIFT 28
+
+/* Trace classes */
+#define TRC_CLS_SHIFT 16
+#define TRC_GEN 0x0001f000 /* General trace */
+#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */
+#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */
+#define TRC_HVM 0x0008f000 /* Xen HVM trace */
+#define TRC_MEM 0x0010f000 /* Xen memory trace */
+#define TRC_PV 0x0020f000 /* Xen PV traces */
+#define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */
+#define TRC_PM 0x0080f000 /* Xen power management trace */
+#define TRC_ALL 0x0ffff000
+#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff)
+#define TRC_HD_CYCLE_FLAG (1UL<<31)
+#define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) )
+#define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX)
+
+/* Trace subclasses */
+#define TRC_SUBCLS_SHIFT 12
+
+/* trace subclasses for SVM */
+#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
+#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
+
+#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */
+#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */
+
+/* Trace events per class */
+#define TRC_LOST_RECORDS (TRC_GEN + 1)
+#define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2)
+#define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3)
+
+#define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1)
+#define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1)
+#define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2)
+#define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3)
+#define TRC_SCHED_WAKE (TRC_SCHED_VERBOSE + 4)
+#define TRC_SCHED_YIELD (TRC_SCHED_VERBOSE + 5)
+#define TRC_SCHED_BLOCK (TRC_SCHED_VERBOSE + 6)
+#define TRC_SCHED_SHUTDOWN (TRC_SCHED_VERBOSE + 7)
+#define TRC_SCHED_CTL (TRC_SCHED_VERBOSE + 8)
+#define TRC_SCHED_ADJDOM (TRC_SCHED_VERBOSE + 9)
+#define TRC_SCHED_SWITCH (TRC_SCHED_VERBOSE + 10)
+#define TRC_SCHED_S_TIMER_FN (TRC_SCHED_VERBOSE + 11)
+#define TRC_SCHED_T_TIMER_FN (TRC_SCHED_VERBOSE + 12)
+#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13)
+#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14)
+#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15)
+
+#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
+#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
+#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
+
+#define TRC_PV_HYPERCALL (TRC_PV + 1)
+#define TRC_PV_TRAP (TRC_PV + 3)
+#define TRC_PV_PAGE_FAULT (TRC_PV + 4)
+#define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5)
+#define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6)
+#define TRC_PV_EMULATE_4GB (TRC_PV + 7)
+#define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8)
+#define TRC_PV_PAGING_FIXUP (TRC_PV + 9)
+#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10)
+#define TRC_PV_PTWR_EMULATION (TRC_PV + 11)
+#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12)
+ /* Indicates that addresses in trace record are 64 bits */
+#define TRC_64_FLAG (0x100)
+
+#define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1)
+#define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2)
+#define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3)
+#define TRC_SHADOW_FALSE_FAST_PATH (TRC_SHADOW + 4)
+#define TRC_SHADOW_MMIO (TRC_SHADOW + 5)
+#define TRC_SHADOW_FIXUP (TRC_SHADOW + 6)
+#define TRC_SHADOW_DOMF_DYING (TRC_SHADOW + 7)
+#define TRC_SHADOW_EMULATE (TRC_SHADOW + 8)
+#define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9)
+#define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10)
+#define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11)
+#define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12)
+#define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13)
+#define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 14)
+#define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15)
+
+/* trace events per subclass */
+#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
+#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
+#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
+#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
+#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01)
+#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
+#define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02)
+#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
+#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
+#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
+#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
+#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
+#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
+#define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08)
+#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
+#define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09)
+#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
+#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
+#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
+#define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D)
+#define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E)
+#define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F)
+#define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10)
+#define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11)
+#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
+#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
+#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
+#define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14)
+#define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15)
+#define TRC_HVM_IO_ASSIST (TRC_HVM_HANDLER + 0x16)
+#define TRC_HVM_IO_ASSIST64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x16)
+#define TRC_HVM_MMIO_ASSIST (TRC_HVM_HANDLER + 0x17)
+#define TRC_HVM_MMIO_ASSIST64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x17)
+#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18)
+#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
+#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
+
+/* trace subclasses for power management */
+#define TRC_PM_FREQ 0x00801000 /* xen cpu freq events */
+#define TRC_PM_IDLE 0x00802000 /* xen cpu idle events */
+
+/* trace events for per class */
+#define TRC_PM_FREQ_CHANGE (TRC_PM_FREQ + 0x01)
+#define TRC_PM_IDLE_ENTRY (TRC_PM_IDLE + 0x01)
+#define TRC_PM_IDLE_EXIT (TRC_PM_IDLE + 0x02)
+
+/* This structure represents a single trace buffer record. */
+struct t_rec {
+ uint32_t event:28;
+ uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */
+ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */
+ union {
+ struct {
+ uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */
+ uint32_t extra_u32[7]; /* event data items */
+ } cycles;
+ struct {
+ uint32_t extra_u32[7]; /* event data items */
+ } nocycles;
+ } u;
+};
+
+/*
+ * This structure contains the metadata for a single trace buffer. The head
+ * field, indexes into an array of struct t_rec's.
+ */
+struct t_buf {
+ /* Assume the data buffer size is X. X is generally not a power of 2.
+ * CONS and PROD are incremented modulo (2*X):
+ * 0 <= cons < 2*X
+ * 0 <= prod < 2*X
+ * This is done because addition modulo X breaks at 2^32 when X is not a
+ * power of 2:
+ * (((2^32 - 1) % X) + 1) % X != (2^32) % X
+ */
+ uint32_t cons; /* Offset of next item to be consumed by control tools. */
+ uint32_t prod; /* Offset of next item to be produced by Xen. */
+ /* Records follow immediately after the meta-data header. */
+};
+
+#endif /* __XEN_PUBLIC_TRACE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
+ */
diff --git a/xen/public/vcpu.h b/xen/public/vcpu.h
new file mode 100644
index 0000000..ab65493
--- /dev/null
+++ b/xen/public/vcpu.h
@@ -0,0 +1,213 @@
+/******************************************************************************
+ * vcpu.h
+ *
+ * VCPU initialisation, query, and hotplug.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VCPU_H__
+#define __XEN_PUBLIC_VCPU_H__
+
+/*
+ * Prototype for this hypercall is:
+ * int vcpu_op(int cmd, int vcpuid, void *extra_args)
+ * @cmd == VCPUOP_??? (VCPU operation).
+ * @vcpuid == VCPU to operate on.
+ * @extra_args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Initialise a VCPU. Each VCPU can be initialised only once. A
+ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
+ *
+ * @extra_arg == pointer to vcpu_guest_context structure containing initial
+ * state for the VCPU.
+ */
+#define VCPUOP_initialise 0
+
+/*
+ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
+ * if the VCPU has not been initialised (VCPUOP_initialise).
+ */
+#define VCPUOP_up 1
+
+/*
+ * Bring down a VCPU (i.e., make it non-runnable).
+ * There are a few caveats that callers should observe:
+ * 1. This operation may return, and VCPU_is_up may return false, before the
+ * VCPU stops running (i.e., the command is asynchronous). It is a good
+ * idea to ensure that the VCPU has entered a non-critical loop before
+ * bringing it down. Alternatively, this operation is guaranteed
+ * synchronous if invoked by the VCPU itself.
+ * 2. After a VCPU is initialised, there is currently no way to drop all its
+ * references to domain memory. Even a VCPU that is down still holds
+ * memory references via its pagetable base pointer and GDT. It is good
+ * practise to move a VCPU onto an 'idle' or default page table, LDT and
+ * GDT before bringing it down.
+ */
+#define VCPUOP_down 2
+
+/* Returns 1 if the given VCPU is up. */
+#define VCPUOP_is_up 3
+
+/*
+ * Return information about the state and running time of a VCPU.
+ * @extra_arg == pointer to vcpu_runstate_info structure.
+ */
+#define VCPUOP_get_runstate_info 4
+struct vcpu_runstate_info {
+ /* VCPU's current state (RUNSTATE_*). */
+ int state;
+ /* When was current state entered (system time, ns)? */
+ uint64_t state_entry_time;
+ /*
+ * Time spent in each RUNSTATE_* (ns). The sum of these times is
+ * guaranteed not to drift from system time.
+ */
+ uint64_t time[4];
+};
+typedef struct vcpu_runstate_info vcpu_runstate_info_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
+
+/* VCPU is currently running on a physical CPU. */
+#define RUNSTATE_running 0
+
+/* VCPU is runnable, but not currently scheduled on any physical CPU. */
+#define RUNSTATE_runnable 1
+
+/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
+#define RUNSTATE_blocked 2
+
+/*
+ * VCPU is not runnable, but it is not blocked.
+ * This is a 'catch all' state for things like hotplug and pauses by the
+ * system administrator (or for critical sections in the hypervisor).
+ * RUNSTATE_blocked dominates this state (it is the preferred state).
+ */
+#define RUNSTATE_offline 3
+
+/*
+ * Register a shared memory area from which the guest may obtain its own
+ * runstate information without needing to execute a hypercall.
+ * Notes:
+ * 1. The registered address may be virtual or physical or guest handle,
+ * depending on the platform. Virtual address or guest handle should be
+ * registered on x86 systems.
+ * 2. Only one shared area may be registered per VCPU. The shared area is
+ * updated by the hypervisor each time the VCPU is scheduled. Thus
+ * runstate.state will always be RUNSTATE_running and
+ * runstate.state_entry_time will indicate the system time at which the
+ * VCPU was last scheduled to run.
+ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
+ */
+#define VCPUOP_register_runstate_memory_area 5
+struct vcpu_register_runstate_memory_area {
+ union {
+ XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
+ struct vcpu_runstate_info *v;
+ uint64_t p;
+ } addr;
+};
+typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
+
+/*
+ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
+ * which can be set via these commands. Periods smaller than one millisecond
+ * may not be supported.
+ */
+#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
+#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
+struct vcpu_set_periodic_timer {
+ uint64_t period_ns;
+};
+typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
+
+/*
+ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
+ * timer which can be set via these commands.
+ */
+#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
+#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
+struct vcpu_set_singleshot_timer {
+ uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */
+ uint32_t flags; /* VCPU_SSHOTTMR_??? */
+};
+typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
+
+/* Flags to VCPUOP_set_singleshot_timer. */
+ /* Require the timeout to be in the future (return -ETIME if it's passed). */
+#define _VCPU_SSHOTTMR_future (0)
+#define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future)
+
+/*
+ * Register a memory location in the guest address space for the
+ * vcpu_info structure. This allows the guest to place the vcpu_info
+ * structure in a convenient place, such as in a per-cpu data area.
+ * The pointer need not be page aligned, but the structure must not
+ * cross a page boundary.
+ *
+ * This may be called only once per vcpu.
+ */
+#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */
+struct vcpu_register_vcpu_info {
+ uint64_t mfn; /* mfn of page to place vcpu_info */
+ uint32_t offset; /* offset within page */
+ uint32_t rsvd; /* unused */
+};
+typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
+
+/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
+#define VCPUOP_send_nmi 11
+
+/*
+ * Get the physical ID information for a pinned vcpu's underlying physical
+ * processor. The physical ID informmation is architecture-specific.
+ * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and
+ * greater are reserved.
+ * This command returns -EINVAL if it is not a valid operation for this VCPU.
+ */
+#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
+struct vcpu_get_physid {
+ uint64_t phys_id;
+};
+typedef struct vcpu_get_physid vcpu_get_physid_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
+#define xen_vcpu_physid_to_x86_apicid(physid) \
+ ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid)))
+#define xen_vcpu_physid_to_x86_acpiid(physid) \
+ ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32)))
+
+#endif /* __XEN_PUBLIC_VCPU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/version.h b/xen/public/version.h
new file mode 100644
index 0000000..944ca62
--- /dev/null
+++ b/xen/public/version.h
@@ -0,0 +1,91 @@
+/******************************************************************************
+ * version.h
+ *
+ * Xen version, type, and compile information.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VERSION_H__
+#define __XEN_PUBLIC_VERSION_H__
+
+/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
+
+/* arg == NULL; returns major:minor (16:16). */
+#define XENVER_version 0
+
+/* arg == xen_extraversion_t. */
+#define XENVER_extraversion 1
+typedef char xen_extraversion_t[16];
+#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
+
+/* arg == xen_compile_info_t. */
+#define XENVER_compile_info 2
+struct xen_compile_info {
+ char compiler[64];
+ char compile_by[16];
+ char compile_domain[32];
+ char compile_date[32];
+};
+typedef struct xen_compile_info xen_compile_info_t;
+
+#define XENVER_capabilities 3
+typedef char xen_capabilities_info_t[1024];
+#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
+
+#define XENVER_changeset 4
+typedef char xen_changeset_info_t[64];
+#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
+
+#define XENVER_platform_parameters 5
+struct xen_platform_parameters {
+ unsigned long virt_start;
+};
+typedef struct xen_platform_parameters xen_platform_parameters_t;
+
+#define XENVER_get_features 6
+struct xen_feature_info {
+ unsigned int submap_idx; /* IN: which 32-bit submap to return */
+ uint32_t submap; /* OUT: 32-bit submap */
+};
+typedef struct xen_feature_info xen_feature_info_t;
+
+/* Declares the features reported by XENVER_get_features. */
+#include "features.h"
+
+/* arg == NULL; returns host memory page size. */
+#define XENVER_pagesize 7
+
+/* arg == xen_domain_handle_t. */
+#define XENVER_guest_handle 8
+
+#endif /* __XEN_PUBLIC_VERSION_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/xen-compat.h b/xen/public/xen-compat.h
new file mode 100644
index 0000000..329be07
--- /dev/null
+++ b/xen/public/xen-compat.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+ * xen-compat.h
+ *
+ * Guest OS interface to Xen. Compatibility layer.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2006, Christian Limpach
+ */
+
+#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
+#define __XEN_PUBLIC_XEN_COMPAT_H__
+
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030209
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Xen is built with matching headers and implements the latest interface. */
+#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
+#elif !defined(__XEN_INTERFACE_VERSION__)
+/* Guests which do not specify a version get the legacy interface. */
+#define __XEN_INTERFACE_VERSION__ 0x00000000
+#endif
+
+#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
+#error "These header files do not support the requested interface version."
+#endif
+
+#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
diff --git a/xen/public/xen.h b/xen/public/xen.h
new file mode 100644
index 0000000..084bb90
--- /dev/null
+++ b/xen/public/xen.h
@@ -0,0 +1,657 @@
+/******************************************************************************
+ * xen.h
+ *
+ * Guest OS interface to Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_XEN_H__
+#define __XEN_PUBLIC_XEN_H__
+
+#include <sys/types.h>
+
+#include "xen-compat.h"
+
+#if defined(__i386__) || defined(__x86_64__)
+#include "arch-x86/xen.h"
+#elif defined(__ia64__)
+#include "arch-ia64.h"
+#else
+#error "Unsupported architecture"
+#endif
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+DEFINE_XEN_GUEST_HANDLE(char);
+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
+DEFINE_XEN_GUEST_HANDLE(int);
+__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
+DEFINE_XEN_GUEST_HANDLE(long);
+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
+DEFINE_XEN_GUEST_HANDLE(void);
+
+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
+#endif
+
+/*
+ * HYPERCALLS
+ */
+
+#define __HYPERVISOR_set_trap_table 0
+#define __HYPERVISOR_mmu_update 1
+#define __HYPERVISOR_set_gdt 2
+#define __HYPERVISOR_stack_switch 3
+#define __HYPERVISOR_set_callbacks 4
+#define __HYPERVISOR_fpu_taskswitch 5
+#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
+#define __HYPERVISOR_platform_op 7
+#define __HYPERVISOR_set_debugreg 8
+#define __HYPERVISOR_get_debugreg 9
+#define __HYPERVISOR_update_descriptor 10
+#define __HYPERVISOR_memory_op 12
+#define __HYPERVISOR_multicall 13
+#define __HYPERVISOR_update_va_mapping 14
+#define __HYPERVISOR_set_timer_op 15
+#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
+#define __HYPERVISOR_xen_version 17
+#define __HYPERVISOR_console_io 18
+#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
+#define __HYPERVISOR_grant_table_op 20
+#define __HYPERVISOR_vm_assist 21
+#define __HYPERVISOR_update_va_mapping_otherdomain 22
+#define __HYPERVISOR_iret 23 /* x86 only */
+#define __HYPERVISOR_vcpu_op 24
+#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
+#define __HYPERVISOR_mmuext_op 26
+#define __HYPERVISOR_xsm_op 27
+#define __HYPERVISOR_nmi_op 28
+#define __HYPERVISOR_sched_op 29
+#define __HYPERVISOR_callback_op 30
+#define __HYPERVISOR_xenoprof_op 31
+#define __HYPERVISOR_event_channel_op 32
+#define __HYPERVISOR_physdev_op 33
+#define __HYPERVISOR_hvm_op 34
+#define __HYPERVISOR_sysctl 35
+#define __HYPERVISOR_domctl 36
+#define __HYPERVISOR_kexec_op 37
+
+/* Architecture-specific hypercall definitions. */
+#define __HYPERVISOR_arch_0 48
+#define __HYPERVISOR_arch_1 49
+#define __HYPERVISOR_arch_2 50
+#define __HYPERVISOR_arch_3 51
+#define __HYPERVISOR_arch_4 52
+#define __HYPERVISOR_arch_5 53
+#define __HYPERVISOR_arch_6 54
+#define __HYPERVISOR_arch_7 55
+
+/*
+ * HYPERCALL COMPATIBILITY.
+ */
+
+/* New sched_op hypercall introduced in 0x00030101. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030101
+#undef __HYPERVISOR_sched_op
+#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
+#endif
+
+/* New event-channel and physdev hypercalls introduced in 0x00030202. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030202
+#undef __HYPERVISOR_event_channel_op
+#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
+#undef __HYPERVISOR_physdev_op
+#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
+#endif
+
+/* New platform_op hypercall introduced in 0x00030204. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030204
+#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
+#endif
+
+/*
+ * VIRTUAL INTERRUPTS
+ *
+ * Virtual interrupts that a guest OS may receive from Xen.
+ *
+ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
+ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
+ * The latter can be allocated only once per guest: they must initially be
+ * allocated to VCPU0 but can subsequently be re-bound.
+ */
+#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
+#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
+#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
+#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
+#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
+#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
+#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
+#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
+
+/* Architecture-specific VIRQ definitions. */
+#define VIRQ_ARCH_0 16
+#define VIRQ_ARCH_1 17
+#define VIRQ_ARCH_2 18
+#define VIRQ_ARCH_3 19
+#define VIRQ_ARCH_4 20
+#define VIRQ_ARCH_5 21
+#define VIRQ_ARCH_6 22
+#define VIRQ_ARCH_7 23
+
+#define NR_VIRQS 24
+
+/*
+ * MMU-UPDATE REQUESTS
+ *
+ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
+ * Where the FD has some effect, it is described below.
+ * ptr[1:0] specifies the appropriate MMU_* command.
+ *
+ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
+ * Updates an entry in a page table. If updating an L1 table, and the new
+ * table entry is valid/present, the mapped frame must belong to the FD, if
+ * an FD has been specified. If attempting to map an I/O page then the
+ * caller assumes the privilege of the FD.
+ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
+ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
+ * ptr[:2] -- Machine address of the page-table entry to modify.
+ * val -- Value to write.
+ *
+ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
+ * Updates an entry in the machine->pseudo-physical mapping table.
+ * ptr[:2] -- Machine address within the frame whose mapping to modify.
+ * The frame must belong to the FD, if one is specified.
+ * val -- Value to write into the mapping entry.
+ *
+ * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
+ * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
+ * with those in @val.
+ */
+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
+#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
+#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
+
+/*
+ * MMU EXTENDED OPERATIONS
+ *
+ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
+ * Where the FD has some effect, it is described below.
+ *
+ * cmd: MMUEXT_(UN)PIN_*_TABLE
+ * mfn: Machine frame number to be (un)pinned as a p.t. page.
+ * The frame must belong to the FD, if one is specified.
+ *
+ * cmd: MMUEXT_NEW_BASEPTR
+ * mfn: Machine frame number of new page-table base to install in MMU.
+ *
+ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
+ * mfn: Machine frame number of new page-table base to install in MMU
+ * when in user space.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_LOCAL
+ * No additional arguments. Flushes local TLB.
+ *
+ * cmd: MMUEXT_INVLPG_LOCAL
+ * linear_addr: Linear address to be flushed from the local TLB.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_MULTI
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
+ *
+ * cmd: MMUEXT_INVLPG_MULTI
+ * linear_addr: Linear address to be flushed.
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_ALL
+ * No additional arguments. Flushes all VCPUs' TLBs.
+ *
+ * cmd: MMUEXT_INVLPG_ALL
+ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE
+ * No additional arguments. Writes back and flushes cache contents.
+ *
+ * cmd: MMUEXT_SET_LDT
+ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
+ * nr_ents: Number of entries in LDT.
+ *
+ * cmd: MMUEXT_CLEAR_PAGE
+ * mfn: Machine frame number to be cleared.
+ *
+ * cmd: MMUEXT_COPY_PAGE
+ * mfn: Machine frame number of the destination page.
+ * src_mfn: Machine frame number of the source page.
+ */
+#define MMUEXT_PIN_L1_TABLE 0
+#define MMUEXT_PIN_L2_TABLE 1
+#define MMUEXT_PIN_L3_TABLE 2
+#define MMUEXT_PIN_L4_TABLE 3
+#define MMUEXT_UNPIN_TABLE 4
+#define MMUEXT_NEW_BASEPTR 5
+#define MMUEXT_TLB_FLUSH_LOCAL 6
+#define MMUEXT_INVLPG_LOCAL 7
+#define MMUEXT_TLB_FLUSH_MULTI 8
+#define MMUEXT_INVLPG_MULTI 9
+#define MMUEXT_TLB_FLUSH_ALL 10
+#define MMUEXT_INVLPG_ALL 11
+#define MMUEXT_FLUSH_CACHE 12
+#define MMUEXT_SET_LDT 13
+#define MMUEXT_NEW_USER_BASEPTR 15
+#define MMUEXT_CLEAR_PAGE 16
+#define MMUEXT_COPY_PAGE 17
+
+#ifndef __ASSEMBLY__
+struct mmuext_op {
+ unsigned int cmd;
+ union {
+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
+ * CLEAR_PAGE, COPY_PAGE */
+ xen_pfn_t mfn;
+ /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
+ unsigned long linear_addr;
+ } arg1;
+ union {
+ /* SET_LDT */
+ unsigned int nr_ents;
+ /* TLB_FLUSH_MULTI, INVLPG_MULTI */
+#if __XEN_INTERFACE_VERSION__ >= 0x00030205
+ XEN_GUEST_HANDLE(void) vcpumask;
+#else
+ void *vcpumask;
+#endif
+ /* COPY_PAGE */
+ xen_pfn_t src_mfn;
+ } arg2;
+};
+typedef struct mmuext_op mmuext_op_t;
+DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
+#endif
+
+/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
+/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
+/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
+#define UVMF_NONE (0UL<<0) /* No flushing at all. */
+#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
+#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
+#define UVMF_FLUSHTYPE_MASK (3UL<<0)
+#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
+#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
+#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
+
+/*
+ * Commands to HYPERVISOR_console_io().
+ */
+#define CONSOLEIO_write 0
+#define CONSOLEIO_read 1
+
+/*
+ * Commands to HYPERVISOR_vm_assist().
+ */
+#define VMASST_CMD_enable 0
+#define VMASST_CMD_disable 1
+
+/* x86/32 guests: simulate full 4GB segment limits. */
+#define VMASST_TYPE_4gb_segments 0
+
+/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
+#define VMASST_TYPE_4gb_segments_notify 1
+
+/*
+ * x86 guests: support writes to bottom-level PTEs.
+ * NB1. Page-directory entries cannot be written.
+ * NB2. Guest must continue to remove all writable mappings of PTEs.
+ */
+#define VMASST_TYPE_writable_pagetables 2
+
+/* x86/PAE guests: support PDPTs above 4GB. */
+#define VMASST_TYPE_pae_extended_cr3 3
+
+#define MAX_VMASST_TYPE 3
+
+#ifndef __ASSEMBLY__
+
+typedef uint16_t domid_t;
+
+/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
+#define DOMID_FIRST_RESERVED (0x7FF0U)
+
+/* DOMID_SELF is used in certain contexts to refer to oneself. */
+#define DOMID_SELF (0x7FF0U)
+
+/*
+ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
+ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
+ * is useful to ensure that no mappings to the OS's own heap are accidentally
+ * installed. (e.g., in Linux this could cause havoc as reference counts
+ * aren't adjusted on the I/O-mapping code path).
+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
+ * be specified by any calling domain.
+ */
+#define DOMID_IO (0x7FF1U)
+
+/*
+ * DOMID_XEN is used to allow privileged domains to map restricted parts of
+ * Xen's heap space (e.g., the machine_to_phys table).
+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
+ * the caller is privileged.
+ */
+#define DOMID_XEN (0x7FF2U)
+
+/*
+ * Send an array of these to HYPERVISOR_mmu_update().
+ * NB. The fields are natural pointer/address size for this architecture.
+ */
+struct mmu_update {
+ uint64_t ptr; /* Machine address of PTE. */
+ uint64_t val; /* New contents of PTE. */
+};
+typedef struct mmu_update mmu_update_t;
+DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
+
+/*
+ * Send an array of these to HYPERVISOR_multicall().
+ * NB. The fields are natural register size for this architecture.
+ */
+struct multicall_entry {
+ unsigned long op, result;
+ unsigned long args[6];
+};
+typedef struct multicall_entry multicall_entry_t;
+DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
+
+/*
+ * Event channel endpoints per domain:
+ * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
+ */
+#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
+
+struct vcpu_time_info {
+ /*
+ * Updates to the following values are preceded and followed by an
+ * increment of 'version'. The guest can therefore detect updates by
+ * looking for changes to 'version'. If the least-significant bit of
+ * the version number is set then an update is in progress and the guest
+ * must wait to read a consistent set of values.
+ * The correct way to interact with the version number is similar to
+ * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
+ */
+ uint32_t version;
+ uint32_t pad0;
+ uint64_t tsc_timestamp; /* TSC at last update of time vals. */
+ uint64_t system_time; /* Time, in nanosecs, since boot. */
+ /*
+ * Current system time:
+ * system_time +
+ * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
+ * CPU frequency (Hz):
+ * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
+ */
+ uint32_t tsc_to_system_mul;
+ int8_t tsc_shift;
+ int8_t pad1[3];
+}; /* 32 bytes */
+typedef struct vcpu_time_info vcpu_time_info_t;
+
+struct vcpu_info {
+ /*
+ * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
+ * a pending notification for a particular VCPU. It is then cleared
+ * by the guest OS /before/ checking for pending work, thus avoiding
+ * a set-and-check race. Note that the mask is only accessed by Xen
+ * on the CPU that is currently hosting the VCPU. This means that the
+ * pending and mask flags can be updated by the guest without special
+ * synchronisation (i.e., no need for the x86 LOCK prefix).
+ * This may seem suboptimal because if the pending flag is set by
+ * a different CPU then an IPI may be scheduled even when the mask
+ * is set. However, note:
+ * 1. The task of 'interrupt holdoff' is covered by the per-event-
+ * channel mask bits. A 'noisy' event that is continually being
+ * triggered can be masked at source at this very precise
+ * granularity.
+ * 2. The main purpose of the per-VCPU mask is therefore to restrict
+ * reentrant execution: whether for concurrency control, or to
+ * prevent unbounded stack usage. Whatever the purpose, we expect
+ * that the mask will be asserted only for short periods at a time,
+ * and so the likelihood of a 'spurious' IPI is suitably small.
+ * The mask is read before making an event upcall to the guest: a
+ * non-zero mask therefore guarantees that the VCPU will not receive
+ * an upcall activation. The mask is cleared when the VCPU requests
+ * to block: this avoids wakeup-waiting races.
+ */
+ uint8_t evtchn_upcall_pending;
+ uint8_t evtchn_upcall_mask;
+ unsigned long evtchn_pending_sel;
+ struct arch_vcpu_info arch;
+ struct vcpu_time_info time;
+}; /* 64 bytes (x86) */
+#ifndef __XEN__
+typedef struct vcpu_info vcpu_info_t;
+#endif
+
+/*
+ * Xen/kernel shared data -- pointer provided in start_info.
+ *
+ * This structure is defined to be both smaller than a page, and the
+ * only data on the shared page, but may vary in actual size even within
+ * compatible Xen versions; guests should not rely on the size
+ * of this structure remaining constant.
+ */
+struct shared_info {
+ struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
+
+ /*
+ * A domain can create "event channels" on which it can send and receive
+ * asynchronous event notifications. There are three classes of event that
+ * are delivered by this mechanism:
+ * 1. Bi-directional inter- and intra-domain connections. Domains must
+ * arrange out-of-band to set up a connection (usually by allocating
+ * an unbound 'listener' port and avertising that via a storage service
+ * such as xenstore).
+ * 2. Physical interrupts. A domain with suitable hardware-access
+ * privileges can bind an event-channel port to a physical interrupt
+ * source.
+ * 3. Virtual interrupts ('events'). A domain can bind an event-channel
+ * port to a virtual interrupt source, such as the virtual-timer
+ * device or the emergency console.
+ *
+ * Event channels are addressed by a "port index". Each channel is
+ * associated with two bits of information:
+ * 1. PENDING -- notifies the domain that there is a pending notification
+ * to be processed. This bit is cleared by the guest.
+ * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
+ * will cause an asynchronous upcall to be scheduled. This bit is only
+ * updated by the guest. It is read-only within Xen. If a channel
+ * becomes pending while the channel is masked then the 'edge' is lost
+ * (i.e., when the channel is unmasked, the guest must manually handle
+ * pending notifications as no upcall will be scheduled by Xen).
+ *
+ * To expedite scanning of pending notifications, any 0->1 pending
+ * transition on an unmasked channel causes a corresponding bit in a
+ * per-vcpu selector word to be set. Each bit in the selector covers a
+ * 'C long' in the PENDING bitfield array.
+ */
+ unsigned long evtchn_pending[sizeof(unsigned long) * 8];
+ unsigned long evtchn_mask[sizeof(unsigned long) * 8];
+
+ /*
+ * Wallclock time: updated only by control software. Guests should base
+ * their gettimeofday() syscall on this wallclock-base value.
+ */
+ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
+ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
+ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
+
+ struct arch_shared_info arch;
+
+};
+#ifndef __XEN__
+typedef struct shared_info shared_info_t;
+#endif
+
+/*
+ * Start-of-day memory layout:
+ * 1. The domain is started within contiguous virtual-memory region.
+ * 2. The contiguous region ends on an aligned 4MB boundary.
+ * 3. This the order of bootstrap elements in the initial virtual region:
+ * a. relocated kernel image
+ * b. initial ram disk [mod_start, mod_len]
+ * c. list of allocated page frames [mfn_list, nr_pages]
+ * d. start_info_t structure [register ESI (x86)]
+ * e. bootstrap page tables [pt_base, CR3 (x86)]
+ * f. bootstrap stack [register ESP (x86)]
+ * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
+ * 5. The initial ram disk may be omitted.
+ * 6. The list of page frames forms a contiguous 'pseudo-physical' memory
+ * layout for the domain. In particular, the bootstrap virtual-memory
+ * region is a 1:1 mapping to the first section of the pseudo-physical map.
+ * 7. All bootstrap elements are mapped read-writable for the guest OS. The
+ * only exception is the bootstrap page table, which is mapped read-only.
+ * 8. There is guaranteed to be at least 512kB padding after the final
+ * bootstrap element. If necessary, the bootstrap virtual region is
+ * extended by an extra 4MB to ensure this.
+ */
+
+#define MAX_GUEST_CMDLINE 1024
+struct start_info {
+ /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
+ char magic[32]; /* "xen-<version>-<platform>". */
+ unsigned long nr_pages; /* Total pages allocated to this domain. */
+ unsigned long shared_info; /* MACHINE address of shared info struct. */
+ uint32_t flags; /* SIF_xxx flags. */
+ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */
+ uint32_t store_evtchn; /* Event channel for store communication. */
+ union {
+ struct {
+ xen_pfn_t mfn; /* MACHINE page number of console page. */
+ uint32_t evtchn; /* Event channel for console page. */
+ } domU;
+ struct {
+ uint32_t info_off; /* Offset of console_info struct. */
+ uint32_t info_size; /* Size of console_info struct from start.*/
+ } dom0;
+ } console;
+ /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
+ unsigned long pt_base; /* VIRTUAL address of page directory. */
+ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
+ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
+ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
+ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
+ int8_t cmd_line[MAX_GUEST_CMDLINE];
+
+ /* hackish, for multiboot compatibility */
+ unsigned mods_count;
+};
+typedef struct start_info start_info_t;
+
+/* New console union for dom0 introduced in 0x00030203. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030203
+#define console_mfn console.domU.mfn
+#define console_evtchn console.domU.evtchn
+#endif
+
+/* These flags are passed in the 'flags' field of start_info_t. */
+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
+#define SIF_MULTIBOOT_MOD (1<<2) /* Is this the initial control domain? */
+#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
+
+typedef struct dom0_vga_console_info {
+ uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
+#define XEN_VGATYPE_TEXT_MODE_3 0x03
+#define XEN_VGATYPE_VESA_LFB 0x23
+
+ union {
+ struct {
+ /* Font height, in pixels. */
+ uint16_t font_height;
+ /* Cursor location (column, row). */
+ uint16_t cursor_x, cursor_y;
+ /* Number of rows and columns (dimensions in characters). */
+ uint16_t rows, columns;
+ } text_mode_3;
+
+ struct {
+ /* Width and height, in pixels. */
+ uint16_t width, height;
+ /* Bytes per scan line. */
+ uint16_t bytes_per_line;
+ /* Bits per pixel. */
+ uint16_t bits_per_pixel;
+ /* LFB physical address, and size (in units of 64kB). */
+ uint32_t lfb_base;
+ uint32_t lfb_size;
+ /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
+ uint8_t red_pos, red_size;
+ uint8_t green_pos, green_size;
+ uint8_t blue_pos, blue_size;
+ uint8_t rsvd_pos, rsvd_size;
+#if __XEN_INTERFACE_VERSION__ >= 0x00030206
+ /* VESA capabilities (offset 0xa, VESA command 0x4f00). */
+ uint32_t gbl_caps;
+ /* Mode attributes (offset 0x0, VESA command 0x4f01). */
+ uint16_t mode_attrs;
+#endif
+ } vesa_lfb;
+ } u;
+} dom0_vga_console_info_t;
+#define xen_vga_console_info dom0_vga_console_info
+#define xen_vga_console_info_t dom0_vga_console_info_t
+
+typedef uint8_t xen_domain_handle_t[16];
+
+/* Turn a plain number into a C unsigned long constant. */
+#define __mk_unsigned_long(x) x ## UL
+#define mk_unsigned_long(x) __mk_unsigned_long(x)
+
+__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t);
+__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
+__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
+__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
+
+#else /* __ASSEMBLY__ */
+
+/* In assembly code we cannot use C numeric constant suffixes. */
+#define mk_unsigned_long(x) x
+
+#endif /* !__ASSEMBLY__ */
+
+/* Default definitions for macros used by domctl/sysctl. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#ifndef uint64_aligned_t
+#define uint64_aligned_t uint64_t
+#endif
+#ifndef XEN_GUEST_HANDLE_64
+#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
+#endif
+#endif
+
+#endif /* __XEN_PUBLIC_XEN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/public/xencomm.h b/xen/public/xencomm.h
new file mode 100644
index 0000000..ac45e07
--- /dev/null
+++ b/xen/public/xencomm.h
@@ -0,0 +1,41 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) IBM Corp. 2006
+ */
+
+#ifndef _XEN_XENCOMM_H_
+#define _XEN_XENCOMM_H_
+
+/* A xencomm descriptor is a scatter/gather list containing physical
+ * addresses corresponding to a virtually contiguous memory area. The
+ * hypervisor translates these physical addresses to machine addresses to copy
+ * to and from the virtually contiguous area.
+ */
+
+#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
+#define XENCOMM_INVALID (~0UL)
+
+struct xencomm_desc {
+ uint32_t magic;
+ uint32_t nr_addrs; /* the number of entries in address[] */
+ uint64_t address[0];
+};
+
+#endif /* _XEN_XENCOMM_H_ */
diff --git a/xen/public/xenoprof.h b/xen/public/xenoprof.h
new file mode 100644
index 0000000..183078d
--- /dev/null
+++ b/xen/public/xenoprof.h
@@ -0,0 +1,138 @@
+/******************************************************************************
+ * xenoprof.h
+ *
+ * Interface for enabling system wide profiling based on hardware performance
+ * counters
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2005 Hewlett-Packard Co.
+ * Written by Aravind Menon & Jose Renato Santos
+ */
+
+#ifndef __XEN_PUBLIC_XENOPROF_H__
+#define __XEN_PUBLIC_XENOPROF_H__
+
+#include "xen.h"
+
+/*
+ * Commands to HYPERVISOR_xenoprof_op().
+ */
+#define XENOPROF_init 0
+#define XENOPROF_reset_active_list 1
+#define XENOPROF_reset_passive_list 2
+#define XENOPROF_set_active 3
+#define XENOPROF_set_passive 4
+#define XENOPROF_reserve_counters 5
+#define XENOPROF_counter 6
+#define XENOPROF_setup_events 7
+#define XENOPROF_enable_virq 8
+#define XENOPROF_start 9
+#define XENOPROF_stop 10
+#define XENOPROF_disable_virq 11
+#define XENOPROF_release_counters 12
+#define XENOPROF_shutdown 13
+#define XENOPROF_get_buffer 14
+#define XENOPROF_set_backtrace 15
+#define XENOPROF_last_op 15
+
+#define MAX_OPROF_EVENTS 32
+#define MAX_OPROF_DOMAINS 25
+#define XENOPROF_CPU_TYPE_SIZE 64
+
+/* Xenoprof performance events (not Xen events) */
+struct event_log {
+ uint64_t eip;
+ uint8_t mode;
+ uint8_t event;
+};
+
+/* PC value that indicates a special code */
+#define XENOPROF_ESCAPE_CODE ~0UL
+/* Transient events for the xenoprof->oprofile cpu buf */
+#define XENOPROF_TRACE_BEGIN 1
+
+/* Xenoprof buffer shared between Xen and domain - 1 per VCPU */
+struct xenoprof_buf {
+ uint32_t event_head;
+ uint32_t event_tail;
+ uint32_t event_size;
+ uint32_t vcpu_id;
+ uint64_t xen_samples;
+ uint64_t kernel_samples;
+ uint64_t user_samples;
+ uint64_t lost_samples;
+ struct event_log event_log[1];
+};
+#ifndef __XEN__
+typedef struct xenoprof_buf xenoprof_buf_t;
+DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t);
+#endif
+
+struct xenoprof_init {
+ int32_t num_events;
+ int32_t is_primary;
+ char cpu_type[XENOPROF_CPU_TYPE_SIZE];
+};
+typedef struct xenoprof_init xenoprof_init_t;
+DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t);
+
+struct xenoprof_get_buffer {
+ int32_t max_samples;
+ int32_t nbuf;
+ int32_t bufsize;
+ uint64_t buf_gmaddr;
+};
+typedef struct xenoprof_get_buffer xenoprof_get_buffer_t;
+DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t);
+
+struct xenoprof_counter {
+ uint32_t ind;
+ uint64_t count;
+ uint32_t enabled;
+ uint32_t event;
+ uint32_t hypervisor;
+ uint32_t kernel;
+ uint32_t user;
+ uint64_t unit_mask;
+};
+typedef struct xenoprof_counter xenoprof_counter_t;
+DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t);
+
+typedef struct xenoprof_passive {
+ uint16_t domain_id;
+ int32_t max_samples;
+ int32_t nbuf;
+ int32_t bufsize;
+ uint64_t buf_gmaddr;
+} xenoprof_passive_t;
+DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t);
+
+
+#endif /* __XEN_PUBLIC_XENOPROF_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/ring.c b/xen/ring.c
new file mode 100644
index 0000000..91ce3ff
--- /dev/null
+++ b/xen/ring.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <string.h>
+#include "ring.h"
+
+/* dest is ring */
+void hyp_ring_store(void *dest, const void *src, size_t size, void *start, void *end)
+{
+ if (dest + size > end) {
+ size_t first_size = end - dest;
+ memcpy(dest, src, first_size);
+ src += first_size;
+ dest = start;
+ size -= first_size;
+ }
+ memcpy(dest, src, size);
+}
+
+/* src is ring */
+void hyp_ring_fetch(void *dest, const void *src, size_t size, void *start, void *end)
+{
+ if (src + size > end) {
+ size_t first_size = end - src;
+ memcpy(dest, src, first_size);
+ dest += first_size;
+ src = start;
+ size -= first_size;
+ }
+ memcpy(dest, src, size);
+}
+
+size_t hyp_ring_next_word(char **c, void *start, void *end)
+{
+ size_t n = 0;
+
+ while (**c) {
+ n++;
+ if (++(*c) == end)
+ *c = start;
+ }
+ (*c)++;
+
+ return n;
+}
diff --git a/xen/ring.h b/xen/ring.h
new file mode 100644
index 0000000..1ac8b37
--- /dev/null
+++ b/xen/ring.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_RING_H
+#define XEN_RING_H
+
+typedef uint32_t hyp_ring_pos_t;
+
+#define hyp_ring_idx(ring, pos) (((unsigned)(pos)) & (sizeof(ring)-1))
+#define hyp_ring_cell(ring, pos) (ring)[hyp_ring_idx((ring), (pos))]
+#define hyp_ring_smash(ring, prod, cons) (hyp_ring_idx((ring), (prod) + 1) == \
+ hyp_ring_idx((ring), (cons)))
+#define hyp_ring_available(ring, prod, cons) hyp_ring_idx((ring), (cons)-(prod)-1)
+
+void hyp_ring_store(void *dest, const void *src, size_t size, void *start, void *end);
+void hyp_ring_fetch(void *dest, const void *src, size_t size, void *start, void *end);
+size_t hyp_ring_next_word(char **c, void *start, void *end);
+
+#endif /* XEN_RING_H */
diff --git a/xen/store.c b/xen/store.c
new file mode 100644
index 0000000..5f5a902
--- /dev/null
+++ b/xen/store.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <mach/mig_support.h>
+#include <machine/pmap.h>
+#include <machine/ipl.h>
+#include <kern/kalloc.h>
+#include <stdarg.h>
+#include <string.h>
+#include <alloca.h>
+#include <xen/public/xen.h>
+#include <xen/public/io/xs_wire.h>
+#include <util/atoi.h>
+#include "store.h"
+#include "ring.h"
+#include "evt.h"
+#include "xen.h"
+
+/* TODO use events instead of just yielding */
+
+/* Hypervisor part */
+
+def_simple_lock_data(static, lock);
+
+static struct xenstore_domain_interface *store;
+
+struct store_req {
+ const char *data;
+ unsigned len;
+};
+
+/* Send a request */
+static void store_put(hyp_store_transaction_t t, uint32_t type, struct store_req *req, unsigned nr_reqs) {
+ struct xsd_sockmsg head = {
+ .type = type,
+ .req_id = 0,
+ .tx_id = t,
+ };
+ unsigned totlen, len;
+ unsigned i;
+
+ totlen = 0;
+ for (i = 0; i < nr_reqs; i++)
+ totlen += req[i].len;
+ head.len = totlen;
+ totlen += sizeof(head);
+
+ if (totlen > sizeof(store->req) - 1)
+ panic("too big store message %d, max %d", totlen, (int) sizeof(store->req));
+
+ while (hyp_ring_available(store->req, store->req_prod, store->req_cons) < totlen)
+ hyp_yield();
+
+ mb();
+ hyp_ring_store(&hyp_ring_cell(store->req, store->req_prod), &head, sizeof(head), store->req, store->req + sizeof(store->req));
+ len = sizeof(head);
+ for (i=0; i<nr_reqs; i++) {
+ hyp_ring_store(&hyp_ring_cell(store->req, store->req_prod + len), req[i].data, req[i].len, store->req, store->req + sizeof(store->req));
+ len += req[i].len;
+ }
+
+ wmb();
+ store->req_prod += totlen;
+ hyp_event_channel_send(boot_info.store_evtchn);
+}
+
+static const char *errors[] = {
+ "EINVAL",
+ "EACCES",
+ "EEXIST",
+ "EISDIR",
+ "ENOENT",
+ "ENOMEM",
+ "ENOSPC",
+ "EIO",
+ "ENOTEMPTY",
+ "ENOSYS",
+ "EROFS",
+ "EBUSY",
+ "EAGAIN",
+ "EISCONN",
+ NULL,
+};
+
+/* Send a request and wait for a reply, whose header is put in head, and
+ * data is returned (beware, that's in the ring !)
+ * On error, returns NULL. Else takes the lock and return pointer on data and
+ * store_put_wait_end shall be called after reading it. */
+static struct xsd_sockmsg head;
+const char *hyp_store_error;
+
+static void *store_put_wait(hyp_store_transaction_t t, uint32_t type, struct store_req *req, unsigned nr_reqs) {
+ unsigned len;
+ const char **error;
+ void *data;
+
+ simple_lock(&lock);
+ store_put(t, type, req, nr_reqs);
+again:
+ while (store->rsp_prod - store->rsp_cons < sizeof(head))
+ hyp_yield();
+ rmb();
+ hyp_ring_fetch(&head, &hyp_ring_cell(store->rsp, store->rsp_cons), sizeof(head), store->rsp, store->rsp + sizeof(store->rsp));
+ len = sizeof(head) + head.len;
+ while (store->rsp_prod - store->rsp_cons < len)
+ hyp_yield();
+ rmb();
+ if (head.type == XS_WATCH_EVENT) {
+ /* Spurious watch event, drop */
+ store->rsp_cons += sizeof(head) + head.len;
+ hyp_event_channel_send(boot_info.store_evtchn);
+ goto again;
+ }
+ data = &hyp_ring_cell(store->rsp, store->rsp_cons + sizeof(head));
+ if (head.len <= 10) {
+ char c[10];
+ hyp_ring_fetch(c, data, head.len, store->rsp, store->rsp + sizeof(store->rsp));
+ for (error = errors; *error; error++) {
+ if (head.len == strlen(*error) + 1 && !memcmp(*error, c, head.len)) {
+ hyp_store_error = *error;
+ store->rsp_cons += len;
+ hyp_event_channel_send(boot_info.store_evtchn);
+ simple_unlock(&lock);
+ return NULL;
+ }
+ }
+ }
+ return data;
+}
+
+/* Must be called after each store_put_wait. Releases lock. */
+static void store_put_wait_end(void) {
+ mb();
+ store->rsp_cons += sizeof(head) + head.len;
+ hyp_event_channel_send(boot_info.store_evtchn);
+ simple_unlock(&lock);
+}
+
+/* Start a transaction. */
+hyp_store_transaction_t hyp_store_transaction_start(void) {
+ struct store_req req = {
+ .data = "",
+ .len = 1,
+ };
+ char *rep;
+ char *s;
+ int i;
+
+ rep = store_put_wait(0, XS_TRANSACTION_START, &req, 1);
+ if (!rep)
+ panic("couldn't start transaction (%s)", hyp_store_error);
+ s = alloca(head.len);
+ hyp_ring_fetch(s, rep, head.len, store->rsp, store->rsp + sizeof(store->rsp));
+ mach_atoi((u_char*) s, &i);
+ if (i == MACH_ATOI_DEFAULT)
+ panic("bogus transaction id len %d '%s'", head.len, s);
+ store_put_wait_end();
+ return i;
+}
+
+/* Stop a transaction. */
+int hyp_store_transaction_stop(hyp_store_transaction_t t) {
+ struct store_req req = {
+ .data = "T",
+ .len = 2,
+ };
+ int ret = 1;
+ void *rep;
+ rep = store_put_wait(t, XS_TRANSACTION_END, &req, 1);
+ if (!rep)
+ return 0;
+ store_put_wait_end();
+ return ret;
+}
+
+/* List a directory: returns an array to file names, terminated by NULL. Free
+ * with kfree. */
+char **hyp_store_ls(hyp_store_transaction_t t, int n, ...) {
+ struct store_req req[n];
+ va_list listp;
+ int i;
+ char *rep;
+ char *c;
+ char **res, **rsp;
+
+ va_start (listp, n);
+ for (i = 0; i < n; i++) {
+ req[i].data = va_arg(listp, char *);
+ req[i].len = strlen(req[i].data);
+ }
+ req[n - 1].len++;
+ va_end (listp);
+
+ rep = store_put_wait(t, XS_DIRECTORY, req, n);
+ if (!rep)
+ return NULL;
+ i = 0;
+ for ( c = rep, n = 0;
+ n < head.len;
+ n += hyp_ring_next_word(&c, store->rsp, store->rsp + sizeof(store->rsp)) + 1)
+ i++;
+ res = (void*) kalloc((i + 1) * sizeof(char*) + head.len);
+ if (!res)
+ hyp_store_error = "ENOMEM";
+ else {
+ hyp_ring_fetch(res + (i + 1), rep, head.len, store->rsp, store->rsp + sizeof(store->rsp));
+ rsp = res;
+ for (c = (char*) (res + (i + 1)); i; i--, c += strlen(c) + 1)
+ *rsp++ = c;
+ *rsp = NULL;
+ }
+ store_put_wait_end();
+ return res;
+}
+
+/* Get the value of an entry, va version. */
+static void *hyp_store_read_va(hyp_store_transaction_t t, int n, va_list listp) {
+ struct store_req req[n];
+ int i;
+ void *rep;
+ char *res;
+
+ for (i = 0; i < n; i++) {
+ req[i].data = va_arg(listp, char *);
+ req[i].len = strlen(req[i].data);
+ }
+ req[n - 1].len++;
+
+ rep = store_put_wait(t, XS_READ, req, n);
+ if (!rep)
+ return NULL;
+ res = (void*) kalloc(head.len + 1);
+ if (!res)
+ hyp_store_error = "ENOMEM";
+ else {
+ hyp_ring_fetch(res, rep, head.len, store->rsp, store->rsp + sizeof(store->rsp));
+ res[head.len] = 0;
+ }
+ store_put_wait_end();
+ return res;
+}
+
+/* Get the value of an entry. Free with kfree. */
+void *hyp_store_read(hyp_store_transaction_t t, int n, ...) {
+ va_list listp;
+ char *res;
+
+ va_start(listp, n);
+ res = hyp_store_read_va(t, n, listp);
+ va_end(listp);
+ return res;
+}
+
+/* Get the integer value of an entry, -1 on error. */
+int hyp_store_read_int(hyp_store_transaction_t t, int n, ...) {
+ va_list listp;
+ char *res;
+ int i;
+
+ va_start(listp, n);
+ res = hyp_store_read_va(t, n, listp);
+ va_end(listp);
+ if (!res)
+ return -1;
+ mach_atoi((u_char *) res, &i);
+ if (i == MACH_ATOI_DEFAULT)
+ printf("bogus integer '%s'\n", res);
+ kfree((vm_offset_t) res, strlen(res)+1);
+ return i;
+}
+
+/* Set the value of an entry. */
+char *hyp_store_write(hyp_store_transaction_t t, const char *data, int n, ...) {
+ struct store_req req[n + 1];
+ va_list listp;
+ int i;
+ void *rep;
+ char *res;
+
+ va_start (listp, n);
+ for (i = 0; i < n; i++) {
+ req[i].data = va_arg(listp, char *);
+ req[i].len = strlen(req[i].data);
+ }
+ req[n - 1].len++;
+ req[n].data = data;
+ req[n].len = strlen (data);
+ va_end (listp);
+
+ rep = store_put_wait (t, XS_WRITE, req, n + 1);
+ if (!rep)
+ return NULL;
+ res = (void*) kalloc(head.len + 1);
+ if (!res)
+ hyp_store_error = NULL;
+ else {
+ hyp_ring_fetch(res, rep, head.len, store->rsp, store->rsp + sizeof(store->rsp));
+ res[head.len] = 0;
+ }
+ store_put_wait_end();
+ return res;
+}
+
+static void hyp_store_handler(int unit)
+{
+ thread_wakeup(&boot_info.store_evtchn);
+}
+
+/* Map store's shared page. */
+void hyp_store_init(void)
+{
+ if (store)
+ return;
+ simple_lock_init(&lock);
+ store = (void*) mfn_to_kv(boot_info.store_mfn);
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readwrite(store);
+#endif /* MACH_PV_PAGETABLES */
+ /* SPL sched */
+ hyp_evt_handler(boot_info.store_evtchn, hyp_store_handler, 0, SPL7);
+}
diff --git a/xen/store.h b/xen/store.h
new file mode 100644
index 0000000..6bb78ea
--- /dev/null
+++ b/xen/store.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_STORE_H
+#define XEN_STORE_H
+#include <machine/xen.h>
+#include <xen/public/io/xenbus.h>
+
+typedef uint32_t hyp_store_transaction_t;
+
+#define hyp_store_state_unknown "0"
+#define hyp_store_state_initializing "1"
+#define hyp_store_state_init_wait "2"
+#define hyp_store_state_initialized "3"
+#define hyp_store_state_connected "4"
+#define hyp_store_state_closing "5"
+#define hyp_store_state_closed "6"
+
+void hyp_store_init(void);
+
+extern const char *hyp_store_error;
+
+/* Start a transaction. */
+hyp_store_transaction_t hyp_store_transaction_start(void);
+/* Stop a transaction. Returns 1 if the transactions succeeded, 0 else. */
+int hyp_store_transaction_stop(hyp_store_transaction_t t);
+
+/* List a directory: returns an array to file names, terminated by NULL. Free
+ * with kfree. */
+char **hyp_store_ls(hyp_store_transaction_t t, int n, ...);
+
+/* Get the value of an entry. Free with kfree. */
+void *hyp_store_read(hyp_store_transaction_t t, int n, ...);
+/* Get the integer value of an entry, -1 on error. */
+int hyp_store_read_int(hyp_store_transaction_t t, int n, ...);
+/* Set the value of an entry. */
+char *hyp_store_write(hyp_store_transaction_t t, const char *data, int n, ...);
+
+#endif /* XEN_STORE_H */
diff --git a/xen/time.c b/xen/time.c
new file mode 100644
index 0000000..21791a5
--- /dev/null
+++ b/xen/time.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <mach/mach_types.h>
+#include <kern/mach_clock.h>
+#include <mach/xen.h>
+#include <machine/xen.h>
+#include <machine/spl.h>
+#include <machine/ipl.h>
+#include <mach/machine/eflags.h>
+#include <xen/evt.h>
+#include "xen.h"
+#include "time.h"
+#include "store.h"
+
+static uint64_t lastnsec;
+
+/* 2^64 nanoseconds ~= 500 years */
+static uint64_t hyp_get_stime(void) {
+ uint32_t version;
+ uint64_t cpu_clock, last_cpu_clock, delta, system_time;
+ uint64_t delta_high, delta_low;
+ uint32_t mul;
+ int8_t shift;
+ volatile struct vcpu_time_info *time = &hyp_shared_info.vcpu_info[0].time;
+
+ do {
+ version = time->version;
+ rmb();
+ cpu_clock = hyp_cpu_clock();
+ last_cpu_clock = time->tsc_timestamp;
+ system_time = time->system_time;
+ mul = time->tsc_to_system_mul;
+ shift = time->tsc_shift;
+ rmb();
+ } while (version != time->version);
+
+ delta = cpu_clock - last_cpu_clock;
+ if (shift < 0)
+ delta >>= -shift;
+ else
+ delta <<= shift;
+ delta_high = delta >> 32;
+ delta_low = (uint32_t) delta;
+ return system_time + ((delta_low * (uint64_t) mul) >> 32)
+ + (delta_high * (uint64_t) mul);
+}
+
+uint64_t hyp_get_time(void) {
+ uint32_t version;
+ uint32_t sec, nsec;
+
+ do {
+ version = hyp_shared_info.wc_version;
+ rmb();
+ sec = hyp_shared_info.wc_sec;
+ nsec = hyp_shared_info.wc_nsec;
+ rmb();
+ } while (version != hyp_shared_info.wc_version);
+
+ return sec*1000000000ULL + nsec + hyp_get_stime();
+}
+
+static void hypclock_intr(int unit, int old_ipl, void *ret_addr, struct i386_interrupt_state *regs) {
+ uint64_t nsec, delta;
+
+ if (!lastnsec)
+ return;
+
+ nsec = hyp_get_stime();
+ if (nsec < lastnsec) {
+ printf("warning: nsec 0x%08lx%08lx < lastnsec 0x%08lx%08lx\n",(unsigned long)(nsec>>32), (unsigned long)nsec, (unsigned long)(lastnsec>>32), (unsigned long)lastnsec);
+ nsec = lastnsec;
+ }
+ delta = nsec-lastnsec;
+
+ lastnsec += (delta/1000)*1000;
+ hypclock_machine_intr(old_ipl, ret_addr, regs, delta);
+ /* 10ms tick rest */
+ hyp_do_set_timer_op(hyp_get_stime()+10*1000*1000);
+
+#if 0
+ char *c = hyp_store_read(0, 1, "control/shutdown");
+ if (c) {
+ static int go_down = 0;
+ if (!go_down) {
+ printf("uh oh, shutdown: %s\n", c);
+ go_down = 1;
+ /* TODO: somehow send startup_reboot notification to init */
+ if (!strcmp(c, "reboot")) {
+ /* this is just a reboot */
+ }
+ }
+ }
+#endif
+}
+
+int
+readtodc(uint64_t *tp)
+{
+ uint64_t t = hyp_get_time();
+ uint64_t n = t / 1000000000;
+
+ *tp = n;
+
+ return(0);
+}
+
+int
+writetodc(void)
+{
+ /* Not allowed in Xen */
+ return(-1);
+}
+
+void
+clkstart(void)
+{
+ evtchn_port_t port = hyp_event_channel_bind_virq(VIRQ_TIMER, 0);
+ hyp_evt_handler(port, (interrupt_handler_fn)hypclock_intr, 0, SPLHI);
+
+ /* first clock tick */
+ clock_interrupt(0, 0, 0, 0);
+ lastnsec = hyp_get_stime();
+
+ /* 10ms tick rest */
+ hyp_do_set_timer_op(hyp_get_stime()+10*1000*1000);
+}
diff --git a/xen/time.h b/xen/time.h
new file mode 100644
index 0000000..cf28720
--- /dev/null
+++ b/xen/time.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2006-2009 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_TIME_H
+#define XEN_TIME_H
+
+#include <mach/mach_types.h>
+uint64_t hyp_get_time(void);
+
+#endif /* XEN_TIME_H */
diff --git a/xen/xen.c b/xen/xen.c
new file mode 100644
index 0000000..6d42447
--- /dev/null
+++ b/xen/xen.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2007-2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <string.h>
+#include <mach/xen.h>
+#include <machine/xen.h>
+#include <machine/ipl.h>
+#include <xen/block.h>
+#include <xen/console.h>
+#include <xen/grant.h>
+#include <xen/net.h>
+#include <xen/store.h>
+#include <xen/time.h>
+#include "xen.h"
+#include "evt.h"
+
+static void hyp_debug(void)
+{
+ panic("debug");
+}
+
+void hyp_init(void)
+{
+ hyp_grant_init();
+ hyp_store_init();
+ evtchn_port_t port = hyp_event_channel_bind_virq(VIRQ_DEBUG, 0);
+ hyp_evt_handler(port, (interrupt_handler_fn)hyp_debug, 0, SPL7);
+}
+
+void hyp_dev_init(void)
+{
+ /* these depend on hyp_init() and working threads */
+ hyp_block_init();
+ hyp_net_init();
+}
+
+extern int int_mask[];
+void hyp_idle(void)
+{
+ int cpu = 0;
+ hyp_shared_info.vcpu_info[cpu].evtchn_upcall_mask = 0xff;
+ barrier();
+ /* Avoid blocking if there are pending events */
+ if (!hyp_shared_info.vcpu_info[cpu].evtchn_upcall_pending &&
+ !hyp_shared_info.evtchn_pending[cpu])
+ hyp_block();
+ while (1) {
+ hyp_shared_info.vcpu_info[cpu].evtchn_upcall_mask = 0x00;
+ barrier();
+ if (!hyp_shared_info.vcpu_info[cpu].evtchn_upcall_pending &&
+ !hyp_shared_info.evtchn_pending[cpu])
+ /* Didn't miss any event, can return to threads. */
+ break;
+ hyp_shared_info.vcpu_info[cpu].evtchn_upcall_mask = 0xff;
+ hyp_c_callback(NULL,NULL);
+ }
+}
diff --git a/xen/xen.h b/xen/xen.h
new file mode 100644
index 0000000..3fd4028
--- /dev/null
+++ b/xen/xen.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2006-2009, 2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_XEN_H
+#define XEN_XEN_H
+
+void hyp_init(void);
+void hyp_dev_init(void);
+void hyp_idle(void);
+void hyp_p2m_init(void);
+
+void hypclock_machine_intr(int old_ipl, void *ret_addr, struct i386_interrupt_state *regs, uint64_t delta);
+
+struct failsafe_callback_regs {
+ unsigned int ds;
+ unsigned int es;
+ unsigned int fs;
+ unsigned int gs;
+ unsigned int ip;
+ unsigned int cs_and_mask;
+ unsigned int flags;
+};
+
+void hyp_failsafe_c_callback(struct failsafe_callback_regs *regs);
+
+#endif /* XEN_XEN_H */